[
  {
    "path": ".circleci/config.yml",
    "content": "# Python CircleCI 2.0 configuration file\n#\n# Check https://circleci.com/docs/2.0/language-python/ for more details\n#\nversion: 2\njobs:\n  build:\n    branches:\n      ignore:\n        - dennis-tests\n    docker:\n      # specify the version you desire here\n      # use `-browsers` prefix for selenium tests, e.g. `3.6.1-browsers`\n      #- image: circleci/python:3.6.1\n      #- image: singularityware/singularity:3.1-slim  # would be faster but interacts badly with cuda docker image loading\n      - image : nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04\n\n    # machine: true\n    working_directory: ~/repo\n    # resource_class: xlarge  # not activated for the project\n\n    steps:\n      - checkout\n\n        #- run:\n        #-   name: Install singularity\n        #-   command: |\n        #-       #chmod u+x ~/repo/.circleci/*.sh\n        #-       #/bin/bash ~/repo/.circleci/install_singularity.sh\n        #-       sudo wget -O- http://neuro.debian.net/lists/xenial.us-ca.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list && \\\n        #-           sudo apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9 && \\\n        #-           sudo apt-get update\n        #-       sudo apt-get install -y singularity-container\n\n      - restore_cache:\n          keys:\n          - tag3-conda-{{ checksum \"singularity/environment.yml\" }}-pytorch\n          - tag3-conda-{{ checksum \"singularity/environment.yml\" }}\n          - tag1-conda\n\n      - run:\n          name: Install miniconda and clone pytorch\n          command: |\n              apt-get update\n              apt-get install -y \\\n                  build-essential \\\n                  libzmq3-dev \\\n                  cmake \\\n                  wget \\\n                  vim \\\n                  git \\\n                  ca-certificates \\\n                  libjpeg-dev \\\n                  openjdk-8-jdk \\\n                  libgtest-dev \\\n                  libpng-dev\n              rm -rf /var/lib/apt/lists/\n              # build gtest (for polygames-tests)\n              cd /usr/src/googletest/googletest\n              mkdir build\n              cd build\n              cmake ..\n              make\n              cp libgtest* /usr/lib/\n              cd ..\n              rm -rf build\n              # conda\n              CHECKPATH=/opt/conda\n              if [ -d \"$CHECKPATH\" ]; then\n                  echo \"$CHECKPATH already exists\"\n              else\n                  wget -O ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n                  bash ~/miniconda.sh -b -p /opt/conda\n                  rm ~/miniconda.sh\n                  git clone --recursive https://github.com/pytorch/pytorch --branch=v1.1.0 ~/pytorch\n              fi\n\n      - save_cache:\n          paths:\n            - /opt/conda\n            - ~/pytorch\n          key: tag1-conda\n\n      - run:\n          name: Install conda environment\n          command: |\n              CHECKPATH=/opt/conda/envs/pypg\n              . /opt/conda/etc/profile.d/conda.sh\n              if [ -d \"$CHECKPATH\" ]; then\n                  echo \"$CHECKPATH already exists\"\n                  # conda env update -f singularity/environment.yml --name pypg\n              else\n                  conda env create -f singularity/environment.yml --name pypg\n                  conda activate pypg\n                  pip install mypy>=0.630 pytest>=4.3.0 pytest-cov>=2.6.1  # only required for testing\n              fi\n\n      - save_cache:\n          paths:\n            - /opt/conda\n            - ~/pytorch\n          key: tag3-conda-{{ checksum \"singularity/environment.yml\" }}\n\n      - run:\n          name: Install pytorch\n          command: |\n              CHECKPATH=~/pytorch/done\n              if [ -f \"$CHECKPATH\" ]; then\n                      echo \"pytorch already exists\"\n              else\n                  . /opt/conda/etc/profile.d/conda.sh\n                  conda activate pypg\n                  which pip\n                  pip install -U pip\n                  cd ~/pytorch\n                  export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-\"$(dirname $(which conda))/../\"}\n                  # export MAX_JOBS=$(cat /proc/cpuinfo | grep -c processor)\n                  # if (( $MAX_JOBS < 4 ));\n                  #     then MAX_JOBS=4;\n                  # fi;\n                  export MAX_JOBS=4  # calibrate for circleci resources...\n                  echo \"Using $MAX_JOBS jobs for pytorch compilation\"\n                  # # set cuda arch list so that the built binary can be run on both pascal and volta\n                  MAX_JOBS=$MAX_JOBS TORCH_CUDA_ARCH_LIST='6.0;7.0' pip install . -v\n                  touch $CHECKPATH\n              fi\n\n      - save_cache:\n          paths:\n            - /opt/conda\n            - ~/pytorch\n          key: tag3-conda-{{ checksum \"singularity/environment.yml\" }}-pytorch\n\n      - run:\n          name: Build polygames\n          command: |\n              . /opt/conda/etc/profile.d/conda.sh\n              conda activate pypg\n              mkdir build\n              cd build\n              cmake ..\n              make -j 2\n\n      - run:\n          name: Build polygames-tests\n          command: |\n              . /opt/conda/etc/profile.d/conda.sh\n              conda activate pypg\n              mkdir ludii\n              wget -P ludii https://ludii.games/downloads/Ludii.jar\n              mkdir tests/build\n              cd tests/build\n              cmake ..\n              make -j 2\n\n      - run:\n          name: Test games\n          command: |\n              ./build/test_state\n\n      - run:\n          name: Test polygames-tests (unit tests)\n          command: |\n              ./tests/build/polygames-tests\n\n      - run:\n          name: Test Mcts\n          command: |\n              ./build/torchRL/mcts/test_mcts 1 100\n              ./build/torchRL/mcts/test_mcts 4 50\n\n      - run:\n          name: Test python\n          command: |\n              . /opt/conda/etc/profile.d/conda.sh\n              conda activate pypg\n              pytest pypolygames --durations=10 --verbose\n\n      - run:\n          name: Run training\n          command: |\n              . /opt/conda/etc/profile.d/conda.sh\n              conda activate pypg\n              python -m pypolygames traineval --act_batchsize=2 \\\n                  --batchsize=2 --replay_capacity=16  --replay_warmup=2 \\\n                  --num_epoch=1 --num_game=12 --model_name=NanoFCLogitModel \\\n                  --epoch_len=1 --device=cpu --game_name=TicTacToe --sync_period=1  --device_eval=cpu \\\n                  --num_actor_eval=2 --num_rollouts_opponent=50 --num_game_eval=4\n"
  },
  {
    "path": ".clang-format",
    "content": "AccessModifierOffset: -1\nAllowShortFunctionsOnASingleLine: false\nAllowShortIfStatementsOnASingleLine: false\nAllowShortLoopsOnASingleLine: false\nBinPackParameters: false\nBreakConstructorInitializersBeforeComma: true\nPenaltyBreakBeforeFirstCallParameter: 0\nPenaltyReturnTypeOnItsOwnLine: 200\nPointerBindsToType: true\nSpacesBeforeTrailingComments: 2\n"
  },
  {
    "path": ".github/CONTRIBUTING.md",
    "content": "# Contributing to Polygames\nWe want to make contributing to this project as easy and transparent as\npossible.\n\n## Pull Requests\nWe actively welcome your pull requests.\n\n1. Fork the repo and create your branch from `master`.\n2. If you've added code that should be tested, add tests.\n3. If you've changed APIs, update the documentation.\n4. Ensure the test suite passes.\n5. Make sure your code lints.\n6. If you haven't already, complete the Contributor License Agreement (\"CLA\").\n\n## Our Development Process\n\nAny pull request will trigger continuous integration. Its configuration is\navailable [here](../.circleci/config.yml).\nIn particular it defines tests that you should try to run locally as well:\n- testing mcts and state C++ code\n``` \n./build/test_state\n./build/torchRL/mcts/test_mcts 1 100\n./build/torchRL/mcts/test_mcts 4 50\n```\n- testing the python tools:\n```\npytest pypolygames --durations=10 --verbose\n```\n\n- trying a short training:\n```\npython -m pypolygames traineval --act_batchsize=2 \\\n  --batchsize=2 --replay_capacity=16  --replay_warmup=2 \\\n  --num_epoch=1 --num_game=12 --model_name=NanoFCLogitModel \\\n  --epoch_len=1 --device=cpu --game_name=TicTacToe --sync_period=1  --device_eval=cpu \\\n  --num_actor_eval=2 --num_rollouts_opponent=50 --num_game_eval=4\n```\n\n## Contributor License Agreement (\"CLA\")\nIn order to accept your pull request, we need you to submit a CLA. You only need\nto do this once to work on any of Facebook's open source projects.\n\nComplete your CLA here: <https://code.facebook.com/cla>\n\n## Issues\nWe use GitHub issues to track public bugs. Please ensure your description is\nclear and has sufficient instructions to be able to reproduce the issue.\n\n## Coding Style  \n\nThe root contains a ```.clang-format``` file that define the coding style of\nthis repo, run the following command before submitting PR or push\n```\nclang-format -i path_to_your_cc_files\nclang-format -i path_to_your_h_files\n```\n\n\n## License\nBy contributing to `Polygames`, you agree that your contributions will be licensed\nunder the LICENSE file in the root directory of this source tree.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "content": "## Steps to reproduce\n\n  1. _____\n  2. _____\n  3. _____\n\n## Observed Results\n\n  * What happened?  This could be a description, log output, etc.\n\n## Expected Results\n\n  * What did you expect to happen?\n\n## Relevant Code\n\n  ```\n  // TODO(you): code here to reproduce the problem\n  ```\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "## Types of changes\n\n<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->\n\n- [ ] Docs change / refactoring / dependency upgrade\n- [ ] Bug fix (non-breaking change which fixes an issue)\n- [ ] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to change)\n\n## Motivation and Context / Related issue\n\n<!--- Why is this change required? What problem does it solve? -->\n<!--- Please link to an existing issue here if one exists. -->\n<!--- (we recommend to have an existing issue for each pull request) -->\n\n## How Has This Been Tested (if it applies)\n\n<!--- Please describe here how your modifications have been tested. -->\n\n## Checklist\n\n<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->\n<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->\n\n- [ ] The documentation is up-to-date with the changes I made.\n- [ ] I have read the **CONTRIBUTING** document and completed the CLA (see **CONTRIBUTING**).\n- [ ] All tests passed, and additional code has been covered with new tests.\n"
  },
  {
    "path": ".gitignore",
    "content": "# Created by https://www.gitignore.io/api/vim,c++,cmake,linux,macos,python,intellij,sublimetext\n# Edit at https://www.gitignore.io/?templates=vim,c++,cmake,linux,macos,python,intellij,sublimetext\n\n### C++ ###\n# Prerequisites\n*.d\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.obj\n\n# Precompiled Headers\n*.gch\n*.pch\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n*.dll\n\n# Fortran module files\n*.mod\n*.smod\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n*.lib\n\n# Executables\n*.exe\n*.out\n*.app\n\n### CMake ###\nCMakeLists.txt.user\nCMakeCache.txt\nCMakeFiles\nCMakeScripts\nTesting\nMakefile\ncmake_install.cmake\ninstall_manifest.txt\ncompile_commands.json\nCTestTestfile.cmake\n_deps\n\n### CMake Patch ###\n# External projects\n*-prefix/\n\n### Intellij ###\n# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm\n# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839\n\n# User-specific stuff\n.idea/**/workspace.xml\n.idea/**/tasks.xml\n.idea/**/usage.statistics.xml\n.idea/**/dictionaries\n.idea/**/shelf\n\n# Generated files\n.idea/**/contentModel.xml\n\n# Sensitive or high-churn files\n.idea/**/dataSources/\n.idea/**/dataSources.ids\n.idea/**/dataSources.local.xml\n.idea/**/sqlDataSources.xml\n.idea/**/dynamic.xml\n.idea/**/uiDesigner.xml\n.idea/**/dbnavigator.xml\n\n# Gradle\n.idea/**/gradle.xml\n.idea/**/libraries\n\n# Gradle and Maven with auto-import\n# When using Gradle or Maven with auto-import, you should exclude module files,\n# since they will be recreated, and may cause churn.  Uncomment if using\n# auto-import.\n# .idea/modules.xml\n# .idea/*.iml\n# .idea/modules\n\n# CMake\ncmake-build-*/\n\n# Mongo Explorer plugin\n.idea/**/mongoSettings.xml\n\n# File-based project format\n*.iws\n\n# IntelliJ\nout/\n\n# mpeltonen/sbt-idea plugin\n.idea_modules/\n\n# JIRA plugin\natlassian-ide-plugin.xml\n\n# Cursive Clojure plugin\n.idea/replstate.xml\n\n# Crashlytics plugin (for Android Studio and IntelliJ)\ncom_crashlytics_export_strings.xml\ncrashlytics.properties\ncrashlytics-build.properties\nfabric.properties\n\n# Editor-based Rest Client\n.idea/httpRequests\n\n# Android studio 3.1+ serialized cache file\n.idea/caches/build_file_checksums.ser\n\n# JetBrains templates\n**___jb_tmp___\n\n### Intellij Patch ###\n# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721\n\n# *.iml\n# modules.xml\n# .idea/misc.xml\n# *.ipr\n\n# Sonarlint plugin\n.idea/sonarlint\n\n### Linux ###\n*~\n\n# temporary files which can be created if a process still has a handle open of a deleted file\n.fuse_hidden*\n\n# KDE directory preferences\n.directory\n\n# Linux trash folder which might appear on any partition or disk\n.Trash-*\n\n# .nfs files are created when an open file is removed but is still being accessed\n.nfs*\n\n### macOS ###\n# General\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Icon must end with two \\r\nIcon\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n### Python ###\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don’t work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n### SublimeText ###\n# Cache files for Sublime Text\n*.tmlanguage.cache\n*.tmPreferences.cache\n*.stTheme.cache\n\n# Workspace files are user-specific\n*.sublime-workspace\n\n# Project files should be checked into the repository, unless a significant\n# proportion of contributors will probably not be using Sublime Text\n# *.sublime-project\n\n# SFTP configuration file\nsftp-config.json\n\n# Package control specific files\nPackage Control.last-run\nPackage Control.ca-list\nPackage Control.ca-bundle\nPackage Control.system-ca-bundle\nPackage Control.cache/\nPackage Control.ca-certs/\nPackage Control.merged-ca-bundle\nPackage Control.user-ca-bundle\noscrypto-ca-bundle.crt\nbh_unicode_properties.cache\n\n# Sublime-github package stores a github token in this file\n# https://packagecontrol.io/packages/sublime-github\nGitHub.sublime-settings\n\n### Vim ###\n# Swap\n[._]*.s[a-v][a-z]\n[._]*.sw[a-p]\n[._]s[a-rt-v][a-z]\n[._]ss[a-gi-z]\n[._]sw[a-p]\n\n# Session\nSession.vim\n\n# Temporary\n.netrwhist\n# Auto-generated tag files\ntags\n# Persistent undo\n[._]*.un~\n\n# End of https://www.gitignore.io/api/vim,c++,cmake,linux,macos,python,intellij,sublimetext\n\n*.simg\ncmake-build-debug/\n\n# Visual Studio Code\n.vscode/\n\n# Eclipse projects\n/.cproject\n/.project\n.settings/\n\n# Experiment directory\nexps/\n\n# For some people testing\nrun.sh\n\n# Ludii's JAR file\nludii/Ludii.jar\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "CMAKE_MINIMUM_REQUIRED(VERSION 3.3)\nproject(polygames)\n\n# if(NOT CMAKE_BUILD_TYPE)\n#   set(CMAKE_BUILD_TYPE RelWithDebInfo)\n# endif()\n\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_FLAGS\n    \"${CMAKE_CXX_FLAGS} -fsized-deallocation -O3 -ffast-math\")\n\nset(CMAKE_POSITION_INDEPENDENT_CODE ON)\n\nOPTION(PYTORCH12 \"Is PyTorch >= 1.2\" OFF)\nOPTION(PYTORCH15 \"Is PyTorch >= 1.5\" OFF)\nIF(PYTORCH15)\n    ADD_DEFINITIONS(-DPYTORCH15 -DPYTORCH12)\nELSEIF(PYTORCH12)\n    ADD_DEFINITIONS(-DPYTORCH12)\nENDIF()\n\nexecute_process(\n    COMMAND python -c \"import torch; import os; print(os.path.dirname(torch.__file__), end='')\"\n    OUTPUT_VARIABLE TorchPath\n)\nset(CMAKE_PREFIX_PATH ${TorchPath})\nfind_package(Torch REQUIRED)\n\nfind_package(Boost COMPONENTS system)\nif( Boost_FOUND )\ninclude_directories( ${Boost_INCLUDE_DIRS})\nendif()\n\noption(WITH_LUDII \"Include LUDII support\" ON)\n\nif(WITH_LUDII)\n  find_package(JNI)\n  if (JNI_FOUND)\n      include_directories( ${JNI_INCLUDE_DIRS})\n  else()\n      message(STATUS \"Java not found, LUDII support will not be included\")\n      add_definitions(-DNO_JAVA)\n  endif()\nelse()\n  add_definitions(-DNO_JAVA)\nendif()\n\nmessage(STATUS \"Adding PyTorch compilation flags: ${TORCH_CXX_FLAGS}\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}\")\n\nadd_subdirectory(src)\n\n# add Minesweeper benchmarks\nadd_subdirectory(src/games/minesweeper_csp_vkms)\n\n# tests\nadd_executable(test_state src/core/test_state.cc src/core/state.cc)\ntarget_link_libraries(test_state PUBLIC _tube _mcts _games ${JNI_LIBRARIES})\n\nenable_testing()\n\nadd_test(NAME test_replay_buffer\n    COMMAND ${PYTHON_EXECUTABLE} -m test_replay_buffer\n    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tests/python)\n\nset_tests_properties(test_replay_buffer\n    PROPERTIES ENVIRONMENT \"PYTHONPATH=${PROJECT_SOURCE_DIR}:$ENV{PYTHONPATH}\")\n\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and maintainers pledge to make participation in our project and\nour community a harassment-free experience for everyone, regardless of age, body\nsize, disability, ethnicity, sex characteristics, gender identity and expression,\nlevel of experience, education, socio-economic status, nationality, personal\nappearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment\ninclude:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or\n  advances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic\n  address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable\nbehavior and are expected to take appropriate and fair corrective action in\nresponse to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or\nreject comments, commits, code, wiki edits, issues, and other contributions\nthat are not aligned to this Code of Conduct, or to ban temporarily or\npermanently any contributor for other behaviors that they deem inappropriate,\nthreatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies within all project spaces, and it also applies when\nan individual is representing the project or its community in public spaces.\nExamples of representing a project or community include using an official\nproject e-mail address, posting via an official social media account, or acting\nas an appointed representative at an online or offline event. Representation of\na project may be further defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported by contacting the project team at <opensource-conduct@fb.com>. All\ncomplaints will be reviewed and investigated and will result in a response that\nis deemed necessary and appropriate to the circumstances. The project team is\nobligated to maintain confidentiality with regard to the reporter of an incident.\nFurther details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good\nfaith may face temporary or permanent repercussions as determined by other\nmembers of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,\navailable at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html\n\n[homepage]: https://www.contributor-covenant.org\n\nFor answers to common questions about this code of conduct, see\nhttps://www.contributor-covenant.org/faq\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) Facebook, Inc. and its affiliates.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "[![CircleCI](https://circleci.com/gh/facebookincubator/Polygames.svg?style=svg)](https://circleci.com/gh/facebookincubator/Polygames)\n\n# Polygames\n\nThis README is a work in progress, please feel very free to post issues - we are happy to help.\nSave up computational power: you can find checkpoints here: http://dl.fbaipublicfiles.com/polygames/checkpoints/list.txt (feel free to open an issue for discussing which checkpoint you should use for which game/problem!).\n\nFor Nix users: see [this doc](./nix/README.md).\n\n## Requirement:\n```\nC++17 compatible compiler\nminiconda3\n```\n\n## Compilation Guide:\n\n### First install conda and pytorch\n\nCreate a fresh conda environment with python3.7, install pytorch and dependencies.\n\n```\n# create a fresh conda environment with python3\n# you will need to have miniconda3 set up\nconda create --name [your env name] python=3.7 pip\n\nconda activate [your env name] # Or source activate [your env name], depending on conda version.\n\nconda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing\nconda install pytorch cudatoolkit=10.1 -c pytorch\nconda install -c conda-forge tensorboardx\nconda install -c conda-forge openjdk  # optional\nconda install -c conda-forge graphviz # optional\n\npip install visdom\npip install torchviz\t\t\t\t  # optional\n\n```\n\n### Clone the repo and build\n\n\n```\ngit clone --recursive https://github.com/facebookincubator/polygames\ncd polygames\n\nmkdir build\ncd build\n\ncmake .. -DCMAKE_BUILD_TYPE=relwithdebinfo -DPYTORCH15=ON\nmake -j\n\n```\n\nLudii support can be disabled by appending `-DWITH_LUDII=OFF` to the cmake command (required if you don't have jdk)\n\n## Content\n\nThe repo contains mostly the following folders:\n\n- the `pypolygames` python package, which serves as an entry point for the application\n- the `src` folder, containing all C++ source code and third party libraries\n  - the `src/games` folder, containing the games coded in C++\n\n## How to use the application\n\nThe application is launched from the `pypolygames` python package, in either of the following modes:ar\n\n- `pypolygames train` (training mode): a game and a model (as well as several other options, see below) are chosen and the model is iteratively trained with MCTS\n- `pypolygames eval` (evaluation mode): the model confronts either a pure MCTS or another neural network powered MCTS. The evaluation of a training can be done either offline (from checkpoints periodically saved) or in real time; in that case, the evaluation considers only the most recent checkpoint in order to follow closely the training, skipping some checkpoints in case the eval computation takes longer than the time becween consecutive checkpoints. It is displayed through visdom.\n- `pypolygames traineval` (training + evaluation mode): it mixes the two previous modes and allow to launch one command instead of two. With the `real_time` option the modes can be launched in parallel instead of sequentially.\n- `pypolygames human` (human mode): a human player plays against the machine\n\nWhen a training is launched, it creates a `game_GAMENAME_model_MODELNAME_feat_FEATURIZATION_GMT_YYYYMMDDHHMMSS` within the `save_dir` where it will log relevant files:\n- `model.pt`\n- `train.log`\n- `stat.tb`\n- `checkpoints_EPOCH.pt` for for checkpoints saved each `saving_period` epoch (e.g., if `saving_period == 10`, `checkpoints_0.pt`, `checkpoints_10.pt`, `checkpoints_20.pt`, `checkpoints_30.pt`)\n\nThis directory will be the `checkpoint_save_dir` directory used by evaluation to retrieve the checkpoints to perform eval computation.\n\n### Parameters\n\nThe list of parameters for each mode is available with\n\n```\npython -m pypolygames {train,eval,traineval,human} --help\n```\n\n#### Threads\n\nIn train (resp. eval) mode, `num_game * num_actor` (resp. `num_game * num_actor_eval * num_actor_opponent`) is the total number of threads. The more `num_actor` (and `num_actor_eval`, `num_actor_opponent`), the larger the MCTS is for a given player.\n\nIn human mode, since `num_game` is set to one, for leveraging the computing power available on the platform, a rule-of-thumb is to set `num_actor` to 5 times the number of CPUs available (it is platform-dependent though, and performance tests should be done).\n\n### Model zoo\n\nAll models can be found in `pypolygames/model_zoo`. They come with a set of sensible parameters that can be customized as well as default games.\n\nUsually models come in pair: `MODELNAMEFCLogitModel` and `MODELNAMEConvLogitModel`:\n\n- `FCLogit` models use a fully-connected layer for logit inference and are compatible with all games\n- `ConvLogit` models use a convolutional layer for logit inference and are only compatible with games whose action space if of same dimensions than their input space (an exception will be raised in case of an attempt to use an incompatible game)\n\nSo far the models being implemented are the folling:\n\n- `GenericModel`: generic model compatible with all games, default when no `model_name` is specified\n- `NanoFCLogitModel`: a simple model with a logit-inference fully-connected layer\n- `NanoConvLogitModel`: a simple model with a logit-inference convolutional layer\n- `ResConvFCLogitModel`: resnets with a logit-inference fully-connected layer\n- `ResConvConvLogitModel`: resnets with a logit-inference convolutional layer\n- `UConvFCLogitModel`: unets (direct paths between first and last layers) with a logit-inference fully-connected layer\n- `UConvConvLogitModel`: unets (direct paths between first and last layers) with a logit-inference convolutional layer\n- `AmazonsModel`: only for the Amazons game\n\nDepending on the actual model chosen, some parameters might not have any use.\n\n### Featurization\n\n```\n--out_features=True: the input to the NN includes a channel with 1 on the frontier.\n--turn_features=True: the input to the NN includes a channel with the player index broadcasted.\n--geometric_features=True: the input to the NN includes 4 geometric channels representing the position on the board.\n--random_features=4: the input to the NN includes 4 random features.\n--one_feature=True: the input to the NN includes a channel with 1 everywhere.\n--history=3: the representation from the last 3 steps is added in the featurization.\n```\n### Examples\n\nRun the following command before running the code\n```\nexport OMP_NUM_THREADS=1\n```\n\n#### Examples for the training mode\n\n- Launch the game `Connect4` with the `GenericModel`\n```\npython -m pypolygames train --game_name=\"Connect4\"\n```\n\n- Launch a game with a specific model and specific parameters\n```\npython -m pypolygames train --game_name=\"Connect4\" --out_features=True \\\n    --model_name=\"UConvFCLogitModel\" \\\n    --nnsize=16 \\\n    --nnks=3 \\\n    --pooling\n```\n\n- Save checkpoints every 20 epochs in a specific folder\n```\npython -m pypolygames train --game_name=\"Connect4\" --model_name=\"UConvFCLogitModel\" \\\n    --saving_period=20 \\\n    --save_dir=\"/checkpoints\"\n```\n\n- Run training on GPU for a max time\n```\npython -m pypolygames train --game_name=\"Connect4\" --model_name=\"UConvFCLogitModel\" \\\n    --device=\"cuda:0\" \\\n    --max_time=3600\n```\n\n- Resume training from a given epoch\n```\npython -m pypolygames train \\\n    --save_dir=\"/checkpoints/game_Connect4_model_GenericModel_feat..._GMT_20190717103728\" \\\n    --init_epoch=42\n```\n\n- Initiate from a pretrained model\n```\npython -m pypolygames train --init_checkpoint=\"path/to/pretrained_model.pt\" \\\n    --lr=0.001\n```\n\nNote that any checkpoint can serve as a pretrained model\n\n- Train on multiple GPUs\n```\npython -m pypolygames train --init_checkpoint \"path/to/pretrained_model.pt\" \\\n    --device cuda:0 cuda:1 cuda:2 cuda:3 cuda:4\n```\n\nIn this case `cuda:0` will be used for training the model while `cuda:1`, `cuda:2` and `cuda:3` will be used for generating games. If there is only one device specified, it will be used for both purposes.\n\nNotes:\n\n- By default, the number of threads used for processing and batch sizes for inference are set automatically. These can be overriden with `num_thread` and `per_thread_batchsize` respectively.\n- `num_game` specifies the number of \"master\" threads scheduling games, and the total number of games being run in parallel will be `num_game * per_thread_batchsize`. Since `per_thread_batchsize` is automatically determined by default, this could be a large number in some instances.\n\n#### Examples for the evaluation mode\n\n- Run offline evaluation\n```\npython -m pypolygames eval \\\n    --checkpoint_dir=\"/checkpoints/game_Connect4_model_GenericModel_feat..._GMT_20190717103728\"\n```\n\n- Plot evaluation on `http://localhost:10000` as the same time as training happens (training needs to be run from another process)\n```\npython -m pypolygames eval \\\n    --checkpoint_dir=\"/checkpoints/game_Connect4_model_GenericModel_feat..._GMT_20190717103728\" \\\n    --real_time \\\n    --plot_enabled \\\n    --plot_port=10000\n```\n\n- Run evaluation on cpu with 100 games per evaluation, the pure-MCTS opponent playing 1000 rollouts while the model plays 400 rollouts\n```\npython -m pypolygames eval \\\n    --checkpoint_dir=\"/checkpoints/game_Connect4_model_GenericModel_feat..._GMT_20190717103728\" \\\n    --device_eval=\"cpu\" \\\n    --num_game_eval=100 \\\n    --num_rollouts_eval=400 \\\n    --num_actor_eval=8 \\\n    --num_rollouts_opponent=1000 \\\n    --num_actor_opponent=8\n```\n\n- A specific checkpoint plays against another neural-network-powered MCTS\n```\npython -m pypolygames eval \\\n    --checkpoint=\"/checkpoints/checkpoint_600.zip\" \\\n    --num_rollouts_eval=400 \\\n    --num_actor_eval=8 \\\n    --checkpoint_opponent=\"/checkpoints/checkpoint_200.zip\" \\\n    --num_rollouts_opponent=1000 \\\n    --num_actor_opponent=8\n```\n\n- Four GPUs are used for evaluating the model, all for inference\n```\npython -m pypolygames eval \\\n    --checkpoint=\"/checkpoints/checkpoint_600.zip\" \\\n    --device_eval cuda:0 cuda:1 cuda:2 cuda:3 \\\n    --num_rollouts_eval=400 \\\n    --num_actor_eval=8 \\\n    --num_rollouts_opponent=1000 \\\n    --num_actor_opponent=8\n```\n\nNotes:\n\n- `num_actor_eval`, `num_rollouts_eval`, `num_actor_opponent` and `num_rollouts_opponent` are independent from the values used during training; in particular for proper benchmarking `num_actor_eval` and `num_rollouts_eval` should be set to the values used in human mode\n- `num_game_eval * num_actor_eval` (resp. `num_game_eval * num_actor_opponent`) is the number of threads used by the model to be evaluated (resp. the opponent)\n- there is no `per_thread_batchsize` in this mode\n- the higher `num_actor_eval` (resp. `num_actor_opponent`), the larger MCTS for a move in a given game will be, up to a limit where overheads between threads lead to decreasing returns. Empiracally this limit seems to be around 8. This limit may be game/model/platform dependent and should be tuned for a given instance.\n- against a pure MCTS opponent, `num_rollouts_opponent` should be set significantly higher than `num_rollouts_eval`\n\n#### Examples for the training+evaluation mode\n\n- Run first training then evaluation on the last checkpoint\n```\npython -m pypolygames traineval --game_name=\"Connect4\" \\\n    --save_dir=\"/checkpoints\" \\\n    --num_epoch=1000\n```\n\n- Plot evaluation on `http://localhost:10000` as the same time as training happens\n```\npython -m pypolygames traineval --game_name=\"Connect4\" \\\n    --save_dir=\"/checkpoints\" \\\n    --real_time \\\n    --plot_enabled \\\n    --plot_port=10000\n```\n\n#### Examples for the human mode\n\n- Play to Connect4 against a pure MCTS as the second player with 8 threads\n```\npython -m pypolygames human --game_name=\"Connect4\" \\\n    --pure_mcts \\\n    --num_actor 8\n```\n\n- Play to Connect4 against a pretrained model as the second player\n```\npython -m pypolygames human \\\n    --init_checkpoint=\"/checkpoints/checkpoint_600.zip\" \\\n    --human_first\n```\n\n- Play with a timer, each side having 1800s in total, and the model playing each move with 0.07 of the remaining time\n```\npython -m pypolygames human \\\n    --init_checkpoint=\"/checkpoints/checkpoint_600.zip\" \\\n    --total_time=1800 \\\n    --time_ratio=0.07\n```\n\n- The model uses four GPUs, all for inference\n```\npython -m pypolygames human \\\n    --init_checkpoint \"/checkpoints/checkpoint_600.zip\" \\\n    --device cuda:0 cuda:1 cuda:2 cuda:3\n```\n\n- The model uses four GPUs, all for inference, and uses the text protocol (actions are represented by x y z, each on one line):\n```\npython -m pypolygames tp \\\n    --init_checkpoint \"/checkpoints/checkpoint_600.zip\" \\\n    --device cuda:0 cuda:1 cuda:2 cuda:3\n```\n\nNotes:\n\n- in human mode, the model being fixed, the goal is to maximize performance given the platform running the model\n- the most effective way to improve model performance is to increase the MCTS size\n- as for training and evaluation, but given that there is only one game played, `num_actor` is the total number of threads\n- the higher `num_actor`, the larger the MCTS, up to a limit where overheads between threads lead to decreasing returns. Empiracally this limit seems to be around 8. This limit may be game/model/platform dependent and should be tuned for a given instance.\n- in a time-limited game `num_rollouts` should not be specified as it is maximized within each `time_ratio` * remaining time period\n\n### Examples for converting models\n\nSaved checkpoints of models also store details about the game for which they were trained, and can only be used directly for the\ngame in which they were trained. This is why `eval` runs do not require the `--game_name` to be specified; this is inferred from\nthe model. The `pypolygames convert` command can be used to convert models to different games.\n\n- Fully automated convert between games:\n\n```\npython -m pypolygames convert \\\n    --init_checkpoint \"/checkpoints/checkpoint_600.pt.gz\" \\\n\t--game_name=\"LudiiGomoku.lud\" \\\n\t--out=\"/checkpoints/converted/XToGomoku.pt.gz\"\n```\n\nThis takes the previously-trained model stored in `\"/checkpoints/checkpoint_600.pt.gz\"`,\nmodifies it such that it can be used to play the Ludii implementation of Gomoku, and stores\nthis modified version of the model in the new file `\"/checkpoints/converted/XToGomoku.pt.gz\"`.\n\nThis works best when using neural network architectures that are compatible with arbitrary\nboard shapes (such as `ResConvConvLogitPoolModel`), and source and target games that have\nidentical numbers of channels for state and move tensors, as well as identical semantics for\nthose channels. For instance, the Ludii implementation of Yavalath has the same number of\nchannels with identical semantics (in the same order) as Gomoku. Therefore, if the source model\nin `\"/checkpoints/checkpoint_600.pt.gz\"` was trained using `--model_name=ResConvConvLogitPoolModel`\nand `--game_name=\"LudiiYavalath.lud\"`, this conversion can be performed directly without having\nto delete any parameters or add any new parameters.\n\n- Fully automated convert between game options:\n\n```\npython -m pypolygames convert \\\n    --init_checkpoint \"/checkpoints/checkpoint_600.pt.gz\" \\\n\t--game_options=\"Board Size/19x19\" \\\n\t--out=\"/checkpoints/converted/Gomoku/15x15_to_19x19.pt.gz\"\n```\n\nThis example will convert the source checkpoint `\"/checkpoints/checkpoint_600.pt.gz\"`\ninto a model that can be used in a game loaded with the additional \n`--game_options=\"Board Size/19x19\"` argument. For example, `--game_name=LudiiGomoku.lud`\nis by default played on a 15x15 board, but can be played on a larger 19x19 board with\nthe `--game_options=\"Board Size/19x19\"` argument.\n\nNote that the convert command only takes game options into account if some form of\n`--game_options` is explicitly provided among the command line arguments. This means that, if\na model was first trained for `--game_options=Board Size/19x19`, and the goal is to convert\nit into one for the default board size of 15x15, it is still necessary to provide either \n`--game_options` (without any values after it) or `--game_options=Board Size/15x15`\nto the convert script. This tells it that the goal is indeed to revert to default options,\nrather than just leaving whichever options were baked into the source model.\n\n### Examples for generating figures of models\n\nIf the optional `graphviz` and `torchviz` dependencies are installed, we can use `torchviz`\nto automatically generate figures of our models. This can be done using `draw_model` script:\n\n```\npython -m pypolygames draw_model \\\n\t--game_name=\"Hex5pie\" \\\n\t--model_name=\"ResConvConvLogitPoolModelV2\" \\\n\t--out=\"/private/home/$USER/ImageName\"\n```\n\nThis command will generate an image of the `ResConvConvLogitPoolModelV2`\narchitecture when playing `Hex5pie`, and save it to `/private/home/$USER/ImageName.png`\n(note that the `.png` extension will be automatically appended).\n\nAny arguments that can be used to modify the game, or any aspect of the Neural Network\narchitecture, can be used in this command.\n\n### Running games through Ludii\n\nSee [detailed documentation on the Ludii integration here](./src/games/ludii/).\n\n## Contributing\n\nWe welcome contributions! Please check basic instructions [here](.github/CONTRIBUTING.md)\n\n## Initial contributors\n\nContributors to the early version of Polygames (before open source release) include:\n\nTristan Cazenave, Univ. Dauphine; Yen-Chi Chen, National Taiwan Normal University; Guan-Wei Chen, National Dong Hwa University; Shi-Yu Chen, National Dong Hwa University; Xian-Dong Chiu, National Dong Hwa University; Julien Dehos, Univ. Littoral Cote d’Opale; Maria Elsa, National Dong Hwa University; Qucheng Gong, Facebook AI Research; Hengyuan Hu, Facebook AI Research; Vasil Khalidov, Facebook AI Research; Chen-Ling Li, National Dong Hwa University; Hsin-I Lin, National Dong Hwa University; Yu-Jin Lin, National Dong Hwa University; Xavier Martinet, Facebook AI Research; Vegard Mella, Facebook AI Research; Jeremy Rapin, Facebook AI Research; Baptiste Roziere, Facebook AI Research; Gabriel Synnaeve, Facebook AI Research; Fabien Teytaud, Univ. Littoral Cote d’Opale; Olivier Teytaud, Facebook AI Research; Shi-Cheng Ye, National Dong Hwa University; Yi-Jun Ye, National Dong Hwa University; Shi-Jim Yen, National Dong Hwa University; Sergey Zagoruyko, Facebook AI Research\n\n## License\n\n`polygames` is released under the MIT license. See [LICENSE](LICENSE) for additional details about it.\nThird-party libraries are also included under their own license.\n"
  },
  {
    "path": "build.sh",
    "content": "#!/bin/sh\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nexport OMP_NUM_THREADS=1\nmkdir -p build\ncd build\ncmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=1\nmake -j $(($(nproc) + 1))\ncd ..\n"
  },
  {
    "path": "littlegolem/play_littlegolem.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport requests\nfrom time import sleep \nfrom bs4 import BeautifulSoup\nimport argparse\nimport os\n\n\ndef lg_connect(thelogin, thepassword):\n    ''' Send login request and get session/login cookies. '''\n    myresponse = requests.post(\"https://www.littlegolem.net/jsp/login/index.jsp\",\n            data = {'login': thelogin, 'password': thepassword})\n    if myresponse.status_code != requests.codes.ok:\n        raise ConnectionError(\"failed to request littlegolem (lg_connect)\")\n    if not 'login2' in myresponse.cookies:\n        raise ConnectionError(\"failed to connect as '{}' (lg_connect)\".format(thelogin))\n    return myresponse.cookies\n\ndef lg_clean_str(thestr):\n    ''' Remove \\t, \\n, \\r and # in a string. '''\n    return thestr.translate({ord(c): None for c in '\\t\\n\\r#'})\n\ndef lg_get_onmove_games(thecookies):\n    ''' Get the list of games \"on move\". Return a list [(game id, game name)]. '''\n    myresponse = requests.get(\"http://www.littlegolem.net/jsp/game/\", \n            cookies = thecookies)\n    if myresponse.status_code != requests.codes.ok:\n        raise ConnectionError(\"failed to request littlegolem (lg_get_onmove_games)\")\n    myhtml = BeautifulSoup(myresponse.text, \"html.parser\")\n    mydivs = myhtml.select(\"div.portlet.box.blue-madison\")\n    if not mydivs:\n        return []\n    mytrs = mydivs[0].select(\"tbody\")[0].select(\"tr\")\n    return [ (lg_clean_str(mytds[0].a.text), mytds[4].text) \n             for mytds in (mytr.select(\"td\") for mytr in mytrs) ]\n\ndef lg_get_hsgf(thegid):\n    ''' Get the description of a game, in the hsgf format. '''\n    myurl = \"http://www.littlegolem.net/servlet/sgf/{}/game{}.hsgf\".format(thegid, thegid)\n    myresponse = requests.get(myurl)\n    if myresponse.status_code != requests.codes.ok:\n        raise ConnectionError(\"failed to request littlegolem (lg_get_hsgf)\")\n    #import pdb;pdb.set_trace()\n    return myresponse.text\n\ndef lg_play(thecookies, thegid, themove):\n    myurl = \"http://www.littlegolem.net/jsp/game/game.jsp?sendgame={}&sendmove={}\".format(thegid, themove)\n    myresponse = requests.post(myurl, cookies = thecookies)\n    if myresponse.status_code != requests.codes.ok:\n        raise ConnectionError(\"failed to request littlegolem (lg_play)\")\n\ndef einstein_convert_txt_to_polygames(myhsgf, gid):\n    #requests.get(\"http://www.littlegolem.net/jsp/game/game.jsp?gid=2127403\").text\n    myurl = \"http://www.littlegolem.net/jsp/game/game.jsp?gid={}\".format(gid) \n    myresponse = requests.get(myurl)\n    if myresponse.status_code != requests.codes.ok:\n        raise ConnectionError(\"failed to request littlegolem (einstein html)\")\n    myhtml = BeautifulSoup(myresponse.text, \"html.parser\")\n    imgs = myhtml.select(\"img\")\n    assert(len(imgs) == 32)\n    # 2 is dice img\n    # 3-27 is number img\n\n    num_myhsgf = len(myhsgf.split(\"/\"))\n\n    turn = num_myhsgf % 2\n    dice = int(imgs[2]['src'][28])\n    assert(dice >= 1 and dice <= 6)\n    state_str = \"\"\n    for i in range(25):\n      current = None\n      num_img = imgs[i + 3]['src']\n      color = None\n      num = None\n      if len(num_img) < 27:\n        color = num_img[18]\n      else:\n        color = num_img[27]\n        num = num_img[29]\n      if color == 'b':\n        current = chr(ord(\"A\") + int(num) - 1)\n      elif color == 'r':\n        current = chr(ord(\"a\") + int(num) - 1)\n      elif color == '0':\n        current = \"0\"\n      else:\n        print(\"parse image error, unexpected color\")\n        assert(False)\n      state_str += current\n    s = \"\"\n    s += str(dice) + \"\\n\" #input dice value\n    s += \"m\\n\"  # we switch to manual mode\n    s += \"singlemovemode\\n\" # make one move, print it, exit\n    s += \"set_\" + state_str + str(turn) + \"\\n\" # set state string\n    if turn == 0:  # By default we assume that we play first.\n        s += \"swap\\n\"  # Please note that this has nothing to do with the pie rule.\n    s += \"c\\n\"  # Resume; this is the genmove.\n    s += str(dice) + \"\\n\" #input dice value (unused)\n    #s += \"exit\\n\" # Safety exit\n    return s, state_str, dice, turn\n\n\n#[Event \"Tournament null\"] \n#[Site \"www.littlegolem.net\"] \n#[White \"luffy_bot\"] \n#[Black \"gzero_bot\"] \n#[Result \"0-1\"] \n#1. h2-g3 e7-e6 2. a2-b3 g7-f6 3. b3-c4 f6-e5 4. f2-e3 b7-c6 5. g3-f4 f7-f6 6. g2-f3 h7-g6 7. e1-f2 a7-b6 8. h1-g2 b6-c5 9. d2-c3 h8-g7 10. c2-d3 e8-f7 11. b2-b3 c7-d6 12. b3-b4 a8-b7 13. d3-d4 c5xd4 14. c3xd4 g6-g5 15. f2-g3 g7-g6 16. b4-a5 c6-c5 17. d4xc5 d6xc5 18. d1-d2 d7-c6 19. f3-e4 g6-f5 20. e2-f3 f7-g6 21. a1-b2 d8-c7 22. b2-c3 g6-h5 23. e4xf5 e6xf5 24. d2-d3 h5-h4 25. g3xh4 e5xf4 26. resign 0-1\n\ndef breakthrough_convert_txt_to_polygames(txt):\n                    turn = 0\n                    last_action = None\n                    s = \"m\\n\"  # we switch to manual mode\n                    s += \"singlemovemode\\n\" # make one move, print it, exit\n                    elements = txt.split(\".\")\n                    swapped = False\n                    print(txt)\n                    for e in elements:\n                      if e[0] != \" \":\n                        continue\n                      if e[2:] == \"resign])\":\n                        s += \"exit\\n\"  # We stop everything.\n                        continue\n                      if len(e) < 6 or (e[3] != \"-\" and e[3] != \"x\"):\n                        continue\n                      es = e.split()\n                      e0 = es[0]\n                      e1 = es[1]\n                      #print(e0)\n                      #print(e1)\n                      y = ord(e0[0]) - ord('a')\n                      z = 8 - int(e0[1])\n                      x = ord(e0[3]) - ord('a') - y + 1\n                      last_action = str(x) + str(y) + str(z)\n                      s += last_action + \"\\n\"\n                      turn = 1 - turn\n                      if e1 == \"*\":\n                        continue\n                      else:\n                        y = ord(e1[0]) - ord('a')\n                        z = 8 - int(e1[1])\n                        x = ord(e1[3]) - ord('a') - y + 1\n                        last_action = str(x) + str(y) + str(z)\n                        s += last_action + \"\\n\" \n                        turn = 1 - turn\n                    if turn == 0:  # By default we assume that we play first.\n                        s += \"swap\\n\"  # Please note that this has nothing to do with the pie rule.\n                    s += \"c\\n\"  # Resume; this is the genmove.\n                    s += \"exit\\n\" # Safety exit\n                    #print(\"turn is\" + str(turn))\n                    return s, swapped, turn\n    \ndef hex_convert_hsgf_to_polygames(hsgf):\n                    turn = 0\n                    last_action = None\n                    s = \"m\\n\"  # we switch to manual mode\n                    s += \"singlemovemode\\n\" # make one move, print it, exit\n                    elements = hsgf.split(\";\")\n                    swapped = False\n                    print(hsgf)\n                    for e in elements:\n                      if e[2:] == \"resign])\":\n                        s += \"exit\\n\"  # We stop everything.\n                        continue\n                      if len(e) < 5:\n                        continue\n                      if e[2:6] == \"swap\":\n                        # swap is implemented differently in littlegolem and polygames.\n                        # we convert by flipping all remaining moves along the long diagonal.\n                        swapped = True\n                        s += last_action + \"\\n\"\n                        turn = 1 - turn\n                        continue\n                      if (e[0] == \"W\" or e[0] == \"B\") and e[1] == \"[\" and e[4] == \"]\":\n                        x = ord(e[2]) - ord('a')\n                        y = ord(e[3]) - ord('a')\n                        if swapped:\n                          x, y = y, x\n                        last_action = chr(ord('a') + x) + str(1 + y)\n                        s += last_action + \"\\n\"  # Swap is implemented as replaying the last action in Hex.\n                        turn = 1 - turn\n                    if turn == 0:  # By default we assume that we play first.\n                        s += \"swap\\n\"  # Please note that this has nothing to do with the pie rule.\n                    s += \"c\\n\"  # Resume; this is the genmove.\n                    s += \"exit\\n\" # Safety exit\n                    return s, swapped, last_action\n                #(;FF[4]EV[null]PB[leela_bot]PW[gzero_bot]SZ[13]RE[B]GC[ game #2103276]\n                #SO[http://www.littlegolem.com];W[ma];B[swap];W[jd];B[ej];W[ji];B[if];\n                #W[ck];B[ed];W[di];B[he];W[hf];B[ie];W[dd];B[ec];W[cc];B[cj];W[ef];B[dl];\n                #W[ke];B[jg];W[cl];B[dj];W[le];B[kd];W[je];B[lf];W[ig];B[jf];W[hg];B[db];\n                #W[ff];B[hb];W[cb];B[resign])\n\ndef havannah_convert_hsgf_to_polygames(hsgf, boardsize):\n                    turn = 0\n                    last_action = None\n                    s = \"m\\n\"  # we switch to manual mode\n                    s += \"singlemovemode\\n\" # make one move, print it, exit\n                    elements = hsgf.split(\";\")\n                    swapped = False\n                    print(hsgf)\n                    for e in elements:\n                      if e[2:] == \"resign])\":\n                        s += \"exit\\n\"  # We stop everything.\n                        continue\n                      if len(e) < 5:\n                        continue\n                      if e[2:6] == \"swap\":\n                        # swap is implemented differently in littlegolem and polygames.\n                        # we convert by flipping all remaining moves along the long diagonal.\n                        swapped = True\n                        s += last_action + \"\\n\"\n                        turn = 1 - turn\n                        continue\n                      if (e[0] == \"W\" or e[0] == \"B\") and e[1] == \"[\" and (e[4] == \"]\" or e[5] == \"]\"):\n                        # in littlegolem x, y = y, x in polygames\n                        x = int(e[3]) \n                        if (e[4] != \"]\"):\n                            x = int(e[3:5])\n                        y = ord(e[2]) - ord('A') \n                        v = 1\n                        if (y >= boardsize):\n                            v = y - boardsize + 2\n                        last_action = str((x * -1)+(boardsize*2-v)) + \",\" + str(y)\n                        s += last_action + \"\\n\"  # Swap is implemented as replaying the last action in Hex.\n                        turn = 1 - turn\n                    if turn == 0:  # By default we assume that we play first.\n                        s += \"swap\\n\"  # Please note that this has nothing to do with the pie rule.\n                    s += \"c\\n\"  # Resume; this is the genmove.\n                    s += \"exit\\n\" # Safety exit\n                    return s, swapped, last_action\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description='Play polygames on littlegolem.')\n  parser.add_argument('--username', type=str, help='Username for login')\n  parser.add_argument('--password', type=str, help='Password for login')\n  parser.add_argument('--hex11_model', type=str, help='Model to use for playing hex11pie')\n  parser.add_argument('--hex13_model', type=str, help='Model to use for playing hex13pie')\n  parser.add_argument('--havannah8_model', type=str, help='Model to use for playing havannah8pie')\n  parser.add_argument('--breakthrough_model', type=str, help='Model to use for playing breakthrough')\n  parser.add_argument('--havannah10_model', type=str, help='Model to use for playing havannah10pie')\n  parser.add_argument('--einstein_model', type=str, help='Model to use for playing einstein')\n\n  args = parser.parse_args()\n\n  try:\n    mylogin = args.username\n    mypassword = args.password\n    mycookies = lg_connect(mylogin, mypassword)\n    mygames = lg_get_onmove_games(mycookies)\n  except ConnectionError as e:\n    print(\"error:\", e)\n    exit(1)\n\n  if not mygames:\n    print(\"no turn to play\")\n  else:\n    played = []\n    not_played = []\n    for mygame in mygames:\n      try:\n        (mygid, mygname) = mygame\n        print(\"playing game #{} ({})...\".format(mygid, mygname))\n\n        myhsgf = lg_get_hsgf(mygid)\n        #if \"river\" not in myhsgf:    # uncomment this if you want to play only against someone with \"river\" in the name\n        #                             # (e.g. rookDriver, a.k.a the other Teytaud)\n        #  print(\"I do not play \", myhsgf)\n        #  not_played.append(mygname)\n        #  continue\n        print(\"I play \", myhsgf)\n\n        resign_score = -0.99\n        model_path = None\n        swapped = False\n        last_action = None\n        turn = None\n        state_str = None\n        dice = None\n        if mygname == \"Hex Size 11\" and args.hex11_model:\n          polygames_commands, swapped, last_action = hex_convert_hsgf_to_polygames(myhsgf)\n          model_path = args.hex11_model\n        elif mygname == \"Hex Size 13\" and args.hex13_model:\n          polygames_commands, swapped, last_action = hex_convert_hsgf_to_polygames(myhsgf)\n          model_path = args.hex13_model\n        elif mygname == \"Havannah Size 8\" and args.havannah8_model:\n          polygames_commands, swapped, last_action = havannah_convert_hsgf_to_polygames(myhsgf, 8)\n          model_path = args.havannah8_model\n        elif mygname == \"Breakthrough Size 8\" and args.breakthrough_model:\n          polygames_commands, swapped, turn = breakthrough_convert_txt_to_polygames(myhsgf)\n          model_path = args.breakthrough_model\n        elif mygname == \"Havannah Size 10\" and args.havannah10_model:\n          polygames_commands, swapped, last_action = havannah_convert_hsgf_to_polygames(myhsgf, 10)\n          model_path = args.havannah10_model\n        elif mygname[:7] == \"havannah\"[:7] and \"ize 8\" in mygname and args.havannah8_model:\n          polygames_commands, swapped, last_action = havannah_convert_hsgf_to_polygames(myhsgf, 8)\n          model_path = args.havannah8_model\n        elif mygname[:7] == \"havannah\"[:7] and \"ize 10\" in mygname and args.havannah10_model:\n          polygames_commands, swapped, last_action = havannah_convert_hsgf_to_polygames(myhsgf, 10)\n          model_path = args.havannah10_model\n        elif mygname[:8] == \"EinStein würfelt nicht! 3-points match\"[:8] and args.einstein_model:\n          #pass in gid to handle specially for Einstein. e.g. parse board and dice value from html\n          polygames_commands, state_str, dice, turn = einstein_convert_txt_to_polygames(myhsgf, mygid)\n          model_path = args.einstein_model\n        else:\n          not_played.append(mygname)\n          continue\n        played.append(mygname)\n\n        # 60 seconds per move. Human first. 8 threads.\n        # Singularity command line below might be old fashioned ?\n        # command = \"singularity exec --nv --overlay overlay.img /checkpoint/polygames/polygames_190927.simg python -m pypolygames human --init_checkpoint \" + model_path\n        command = \"python -m pypolygames human --init_checkpoint \" + model_path\n        command += \" --total_time 60000 --time_ratio 0.01 --human_first --num_actor 8\"\n        import subprocess\n        command = \"echo -e \\\"\" + polygames_commands.translate({ord(c): '\\\\n' for c in '\\n'}) + \"\\\" | \" + command\n        print(command)\n\n        mcts_value = None\n        move = None\n        if mygname[:8] == \"EinStein würfelt nicht! 3-points match\"[:8]:\n          # Unfortunately EinStein game needs special handling\n          # Somehow if I put -e here it gets passed to the program, need to investigate\n          command = \"echo\" + command[7:]\n          #print(command)\n          result = subprocess.check_output(command, shell=True)\n          mcts_value = result.splitlines()[-2].decode()\n          move = result.splitlines()[-4].decode()\n          print(move)\n          move_tokens = move.split()\n          origin = move_tokens[-3]\n          origin_num = int(origin[1])\n          target = move_tokens[-1]\n          origin_idx = -1\n          for i in range(5):\n            for j in range(5):\n              idx = i * 5 + j\n              if ord(state_str[idx]) - ord('a') == origin_num - 1 and origin[0] == 'x':\n                 origin_idx = idx\n                 break\n              if ord(state_str[idx]) - ord('A') == origin_num - 1 and origin[0] == 'o':\n                 origin_idx = idx\n                 break\n          #print(origin_idx)\n          char0 = chr(4 - origin_idx % 5 + ord('a'))\n          char1 = chr(4 - origin_idx // 5 + ord('a'))\n          char2 = chr(ord('E') - ord(target[0]) + ord('a'))\n          char3 = chr(5 - int(target[1]) + ord('a'))\n          move = char0 + char1 + char2 + char3\n        \n        else:\n          result = subprocess.check_output(command, shell=True)\n\n                                                                # Maybe \"cwd = '..' \" ? not if we assume\n                                                                # run from the root of polygames\n          #print(result)\n          (mcts_value, move) = [i.decode() for i in result.splitlines()[-2:]]\n\n        mcts_value = mcts_value.split(\":\")[-1]\n        print(\"MCTS value: \" + mcts_value)\n        print(\"Making move: \" + move)\n        print(\"in game \" + mygname)\n\n        mymove = None\n\n        if mygname == \"Hex Size 11\" or mygname == \"Hex Size 13\":\n          print(\"playing hex\")\n          x = ord(move[0]) - ord('a')\n          y = int(move[1:]) - 1\n          if swapped:\n            x, y = y, x # in littlegolem, swap is implemented by mirror move and not switching colors\n          mymove = chr(ord('a') + int(x)) + chr(ord('a') + int(y))\n          if last_action != None and last_action.lower() == move.lower():\n            mymove = \"swap\"\n        elif (mygname[:10] == \"Havannah Size 8\"[:10] or mygname[:4] == \"havannah.in\"[:4]) and \"Size 8\" in mygname:\n          print(\"playing havannah 8\")\n          boardsize = 8  # ONLY FOR SIZE 8\n          listmove = move.split(',')\n          x = int(listmove[0])\n          y = int(listmove[1])\n          mymove = chr(ord('a') + y + boardsize - 1) + chr(ord('a') + x + boardsize - 1) # ,11\n          if last_action != None and last_action.lower() == move.lower():\n            mymove = \"swap\"\n        elif (mygname[:4] == \"havannah.in\"[:4] or mygname[:10] == \"Havannah Size 10\"[:10]) and \"Size 10\" in mygname:\n          print(\"playing havannah 10\")\n          boardsize = 10  # ONLY FOR SIZE 10\n          listmove = move.split(',')\n          x = int(listmove[0])\n          y = int(listmove[1])\n          mymove = chr(ord('a') + y + boardsize - 5) + chr(ord('a') + x + boardsize - 5) # ,11\n          if last_action != None and last_action.lower() == move.lower():\n            mymove = \"swap\"\n        elif mygname == \"Breakthrough Size 8\":\n          boardsize = 8\n          listmove = move.split(',')\n          x = int(listmove[0])\n          y = int(listmove[1])\n          z = boardsize - int(listmove[2]) - 1\n          z1 = z + 1 if turn == 0 else z - 1\n          mymove = str(y) + str(z) + str(y + x - 1) + str(z1)\n        elif mygname[:8] == \"EinStein würfelt nicht! 3-points match\"[:8]:\n          mymove = move\n        else:\n          print(\"implement mymove for \" + mygname)\n          exit(1)\n\n        mymove = mymove.lower()\n\n        # No resign in Einstein, due to the complications on LittleGolem.\n        # Anyway, in a multigame (i.e. the best of k games), resigning is complicated and can not save up much time.\n        if float(mcts_value) < resign_score and \"nStein w\" not in mygname:\n          print(\"Resigning!\")\n          mymove = \"resign\"\n\n        print(\"Sending move \" + mymove)\n\n        lg_play(mycookies, mygid, mymove)\n\n      except ConnectionError as e:\n        print(\"error:\", e)\n        exit(1)\n    if len(played):\n      print(\"Made a move in \", played)\n    if len(not_played):\n      print(\"Did not make a move in \", not_played)\n\n# requests: https://2.python-requests.org/en/master/\n# beautifulsoup4: https://www.crummy.com/software/BeautifulSoup/bs4/doc/\n\n"
  },
  {
    "path": "nix/Dockerfile",
    "content": "# This Dockerfile configures a Debian system with Nix, builds Polygames and\n# runs some tests. To build this docker image, run:\n# `docker build -t polygames .`\n\n\n###############################################################################\n# Initialize the docker image. You can ignore this when installing on a real\n# system.\n###############################################################################\nFROM debian:buster\nENV DEBIAN_FRONTEND noninteractive\nRUN apt-get update -y\nRUN apt-get install -y git curl sudo xz-utils\nRUN useradd -ms /bin/bash -G sudo myuser\nRUN echo \"myuser ALL=(ALL) NOPASSWD:ALL\" >> /etc/sudoers\nUSER myuser\nENV USER=\"myuser\"\nENV HOME=\"/home/myuser\"\n\n\n###############################################################################\n# Install Nix.\n###############################################################################\nRUN curl https://nixos.org/releases/nix/latest/install | sh\n\n# For docker only:\nENV PATH=\"$HOME/.nix-profile/bin/:$PATH\"\n\n# On a real system, yau have to run these two lines instead:\n#RUN echo \"source $HOME/.nix-profile/etc/profile.d/nix.sh\" >> $HOME/.bashrc\n#RUN source $HOME/.bashrc\n\n\n###############################################################################\n# Activate the cachix repo.\n###############################################################################\nRUN nix-env -iA nixpkgs.cachix\nRUN cachix use polygames\n\n\n###############################################################################\n# Get Polygames.\n###############################################################################\nWORKDIR $HOME\nRUN git clone https://github.com/facebookincubator/polygames.git\nWORKDIR $HOME/Polygames\n\n# On a real system with CUDA devices, you have to run `./nix/get-nvidia.sh`\n# here.\n\n\n###############################################################################\n# Build Polygames.\n###############################################################################\nRUN mkdir $HOME/Polygames/build\nWORKDIR $HOME/Polygames/build\nRUN nix-shell ../nix/shell-cpu.nix --run \"cmake -DPYTORCH12=ON .. ; make -j4\"\n\n\n###############################################################################\n# Run unit-tests.\n###############################################################################\nRUN mkdir $HOME/Polygames/tests/build\nWORKDIR $HOME/Polygames/tests/build\nRUN nix-shell ../../nix/shell-cpu.nix --run \"cmake .. ; make -j4 ; ./polygames-tests\"\n\n\n###############################################################################\n# Run tests.\n###############################################################################\nWORKDIR $HOME/Polygames/\nRUN nix-shell nix/shell-cpu.nix --run \"pytest pypolygames --durations=10 --verbose\"\n\n\n"
  },
  {
    "path": "nix/Dockerfile-centos7-nix",
    "content": "# docker build -t polygames-centos7-nix -f Dockerfile-centos7-nix .\n# docker run --rm -it polygames-centos7-nix\n# nix-shell nix/shell-cpu.nix --run \"python -m pypolygames train --game_name Hex11 --device=cpu\"\n\nFROM centos:centos7\nRUN yum update -y\nRUN yum install -y git curl sudo xz-utils cacert\nRUN useradd -ms /bin/bash -G wheel myuser\nRUN echo \"myuser ALL=(ALL) NOPASSWD:ALL\" >> /etc/sudoers\nUSER myuser\nENV USER=\"myuser\"\nENV HOME=\"/home/myuser\"\n\nRUN curl https://nixos.org/releases/nix/latest/install | sh\nRUN echo \"source $HOME/.nix-profile/etc/profile.d/nix.sh\" >> $HOME/.bashrc\nRUN source $HOME/.bashrc\nENV PATH=\"$HOME/.nix-profile/bin/:$PATH#\"\nENV NIX_SSL_CERT_FILE=\"/etc/ssl/certs/ca-bundle.crt\"\n\nRUN nix-env -iA nixpkgs.cachix\nRUN cachix use polygames\n\nWORKDIR $HOME\nRUN git clone https://github.com/juliendehos/Polygames.git --branch=nix\n\nRUN mkdir $HOME/Polygames/build\nWORKDIR $HOME/Polygames/build\nRUN nix-shell ../nix/shell-cpu.nix --run \"cmake -DPYTORCH12=ON .. ; make -j4\"\nWORKDIR $HOME/Polygames/\n\n"
  },
  {
    "path": "nix/README.md",
    "content": "# Polygames for Nix users\n\n[Nix](https://nixos.org/) is a package manager that can be installed on any\nLinux distribution (you just need root permissions to create the `/nix`\ndirectory). Alternatively, NixOS is a Linux distribution based on Nix.\n\nPolygames is quite easy to use with Nix and NixOS, as explained below. See also\nthe [Dockerfile](./Dockerfile).\n\n\n## Get Polygames\n\n- Clone the repo:\n\n    ```\n    git clone https://github.com/facebookincubator/polygames.git\n    cd Polygames\n    ```\n\n\n## Configure your system (NixOS)\n\n- With NixOS, if you want to run Polygames on CPU, you have nothing to\n  configure.\n\n- If you want to run Polygames on CUDA devices, check that the Nvidia driver is\n  enable in `/etc/nixos/configuration.nix` (don't forget to rebuild and reboot\n  the system, if necessary):\n\n    ```\n      services.xserver.videoDrivers = [ \"nvidia\" ];\n      nixpkgs.config.allowUnfree = true;\n    ```\n\n\n## Configure your system (Nix + Linux)\n\n- Install Nix:\n\n    ```\n    curl https://nixos.org/releases/nix/latest/install | sh\n    echo \"source $HOME/.nix-profile/etc/profile.d/nix.sh\" >> $HOME/.bashrc\n    source $HOME/.bashrc\n    ```\n\n- If you want to run Polygames on CUDA devices:\n\n    - Install the Nvidia driver using your Linux distribution.\n\n    - Check that `nvidia-smi` is also installed.\n\n    - Get the Nvidia driver version and write the `nvidia.json` config file:\n\n        ```\n        ./nix/get-nvidia.sh\n        ```\n\n\n## Build & run Polygames\n\n- Activate the binary cache (optional if you like compiling for hours):\n\n    ```\n    nix-env -iA nixpkgs.cachix\n    cachix use polygames\n    ```\n\n    > Warning: this cache provides pre-built binaries for CPU and for Nvidia\n    > 418.74 only but you can [build your own binary\n    > cache](README.md#build-you-own-binary-cache).\n\n- Open a nix-shell:\n\n    - CPU:\n\n        ```\n        nix-shell nix/shell-cpu.nix\n        ```\n\n    - CUDA:\n\n        ```\n        nix-shell nix/shell-cuda.nix\n        ```\n\n- Build Polygames:\n\n    ```\n    mkdir build\n    cd build\n    cmake -DPYTORCH12=ON ..\n    make -j4\n    cd ..\n    ```\n\n- Run Polygames:\n\n    - CPU:\n\n        ```\n        python -m pypolygames train --game_name=\"Connect4\" --device=cpu\n        ```\n\n    - CUDA:\n\n        ```\n        python -m pypolygames train --game_name=\"Connect4\" --device=cuda:0\n        ```\n\n## Build you own binary cache\n\nWhen you open a nix-shell for the first time, Nix/NixOS fetches and builds all\nthe required dependencies. [This cachix repo](https://polygames.cachix.org/)\nprovides some pre-built dependencies that should greatly speed-up the\ninstallation.\n\nHowever, if your Nvidia driver version is not in the cache, you have to build\nCUDA/Pytorch/etc. You can upload the built binaries to reuse them on other\nmachines:\n\n- Create a (free) account on [cachix](https://cachix.org/).\n\n- Create a repo on cachix.\n\n- Push your binaries to this repo:\n\n    ```\n    find /nix/store -maxdepth 1 -name \"*pytorch*\" -exec cachix push <my-cachix-repo> {} \\;\n    ```\n\n- Use your repo when you install/configure Polygames on a new machine:\n\n    ```\n    cachix use <my-cachix-repo>\n    ```\n\n"
  },
  {
    "path": "nix/get-nvidia.sh",
    "content": "#!/bin/sh\n\nnvidiaVersion=$(nvidia-smi --query-gpu=driver_version --format=csv | tail -n 1)\necho \"detected: ${nvidiaVersion}\"\n\nnvidiaUrl=\"http://download.nvidia.com/XFree86/Linux-x86_64/${nvidiaVersion}/NVIDIA-Linux-x86_64-${nvidiaVersion}.run\"\nnvidiaSha256=$(nix-prefetch-url ${nvidiaUrl})\n\nOUTNAME=\"nvidia.json\"\necho \"{\" > ${OUTNAME}\necho \"    \\\"nvidiaVersion\\\" : \\\"${nvidiaVersion}\\\",\" >> ${OUTNAME}\necho \"    \\\"nvidiaSha256\\\" : \\\"${nvidiaSha256}\\\"\" >> ${OUTNAME}\necho \"}\" >> ${OUTNAME}\necho \"${OUTNAME} written\"\n\n"
  },
  {
    "path": "nix/shell-cpu.nix",
    "content": "let\n  rev = \"dfa8e8b9bc4a18bab8f2c55897b6054d31e2c71b\";\n  channel = fetchTarball \"https://github.com/NixOS/nixpkgs/archive/${rev}.tar.gz\";\n  config = { allowUnfree = true; };\n  pkgs = import channel { inherit config; };\n\n  python = pkgs.python3;\n  pytorch = pkgs.python3Packages.pytorchWithoutCuda;\n  pybind11 = pkgs.pybind11;\n\n  tensorboardX = pkgs.python3Packages.buildPythonPackage rec {\n    pname = \"tensorboardX\";\n    version = \"1.8\";\n    src = fetchTarball \"https://github.com/lanpa/tensorboardX/archive/v1.8.tar.gz\";\n    propagatedBuildInputs = with pkgs.python3Packages; [\n      six\n      protobuf\n      numpy\n    ];\n    doCheck = false;\n  };\n\nin pkgs.mkShell {\n  name = \"Polygames-cpu\";\n  src = ./.;\n\n  buildInputs = [\n    pkgs.cmake\n    pkgs.czmq\n    pkgs.gtest\n    pkgs.python3Packages.pytest\n    pkgs.openjdk\n    pytorch\n    tensorboardX\n  ];\n\n  shellHook = ''\n      export CFLAGS=\"-I${pybind11}/include -I${pytorch}/${python.sitePackages}/torch/include -I${pytorch}/${python.sitePackages}/torch/include/torch/csrc/api/include\"\n      export CXXFLAGS=$CFLAGS\n      export LDFLAGS=\"-L${pytorch}/${python.sitePackages}/torch/lib -L$out/${python.sitePackages}\"\n      export PYTHONPATH=\"$PYTHONPATH:build:build/torchRL/mcts:build/torchRL/tube\"\n      export OMP_NUM_THREADS=1\n  '';\n}\n\n"
  },
  {
    "path": "nix/shell-cuda.nix",
    "content": "let\n\n  jsonPath = ../nvidia.json;\n  hasJson = builtins.pathExists jsonPath;\n  json = builtins.fromJSON (builtins.readFile jsonPath);\n\n  overlay = self: super:\n  {\n    linuxPackages = super.linuxPackages //\n    {\n      nvidia_x11 = (super.linuxPackages.nvidia_x11.override {\n      }).overrideAttrs(oldAttrs: rec {\n        version = json.nvidiaVersion;\n        name = \"nvidia-${version}\";\n        src = super.fetchurl {\n          url = \"http://download.nvidia.com/XFree86/Linux-x86_64/${version}/NVIDIA-Linux-x86_64-${version}.run\";\n          sha256 = json.nvidiaSha256;\n        };\n        useGLVND = true;\n      });\n    };\n  };\n\n  rev = \"dfa8e8b9bc4a18bab8f2c55897b6054d31e2c71b\";\n  channel = fetchTarball \"https://github.com/NixOS/nixpkgs/archive/${rev}.tar.gz\";\n  config = {\n    allowUnfree = true;\n    cudaSupport = true;\n    packageOverrides = pkgs: {\n      cudatoolkit = pkgs.cudatoolkit_10;\n      cudnn = pkgs.cudnn_cudatoolkit_10;\n    };\n  };\n  pkgs = import channel {\n    overlays = if hasJson then [overlay] else [];\n    inherit config;\n  };\n\n  python = pkgs.python3;\n  pytorch = pkgs.python3Packages.pytorchWithCuda;\n  pybind11 = pkgs.pybind11;\n\n  tensorboardX = pkgs.python3Packages.buildPythonPackage rec {\n    pname = \"tensorboardX\";\n    version = \"1.8\";\n    src = fetchTarball \"https://github.com/lanpa/tensorboardX/archive/v1.8.tar.gz\";\n    propagatedBuildInputs = with pkgs.python3Packages; [\n      six\n      protobuf\n      numpy\n    ];\n    doCheck = false;\n  };\n\nin pkgs.mkShell {\n  name = \"Polygames-cuda\";\n  src = ./.;\n\n  buildInputs = [\n    pkgs.boost\n    pkgs.cmake\n    pkgs.cudatoolkit\n    pkgs.cudnn\n    pkgs.czmq\n    pkgs.gtest\n    pkgs.linuxPackages.nvidia_x11\n    pkgs.python3Packages.pytest\n    pkgs.openjdk\n    pytorch\n    tensorboardX\n  ];\n\n  shellHook = ''\n      export CFLAGS=\"-I${pybind11}/include -I${pytorch}/${python.sitePackages}/torch/include -I${pytorch}/${python.sitePackages}/torch/include/torch/csrc/api/include\"\n      export CXXFLAGS=$CFLAGS\n      export LDFLAGS=\"-L${pytorch}/${python.sitePackages}/torch/lib -L$out/${python.sitePackages} -L${pkgs.cudatoolkit}/lib\"\n      export LD_LIBRARY_PATH=\"${pkgs.linuxPackages.nvidia_x11}/lib\"\n      export PYTHONPATH=\"$PYTHONPATH:build:build/torchRL/mcts:build/torchRL/tube\"\n      export OMP_NUM_THREADS=1\n  '';\n}\n\n"
  },
  {
    "path": "pypolygames/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\n\n# disable CUDA cache as it can throw errors when doing distributed\n# training and the cache folder is on NFS\nos.environ['CUDA_CACHE_DISABLE'] = '1'\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ngame = os.path.join(root, \"build/src\")\nif game not in sys.path:\n    sys.path.append(game)\n\ntube = os.path.join(root, \"build/src\", \"tube\")\nif tube not in sys.path:\n    sys.path.append(tube)\npytube = os.path.join(root, \"src\", \"tube\")\nif pytube not in sys.path:\n    sys.path.append(pytube)\n\nmcts = os.path.join(root, \"build/src\", \"mcts\")\nif mcts not in sys.path:\n    sys.path.append(mcts)\n"
  },
  {
    "path": "pypolygames/__main__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom pathlib import Path\nimport sys\nimport warnings\nimport time\nfrom dataclasses import astuple\nimport argparse\nfrom multiprocessing import Process\nfrom typing import Union, List\n\nfrom .params import (\n    GameParams,\n    ModelParams,\n    OptimParams,\n    SimulationParams,\n    ExecutionParams,\n    EvalParams,\n)\nfrom .utils import CommandHistory\nfrom .training import run_training\nfrom .evaluation import run_evaluation\nfrom .human import run_human_played_game\nfrom .human import run_tp_played_game\nfrom .convert import convert_checkpoint\nfrom .draw_model import draw_model\n\nDOC = \"\"\"The python package `pypolygames` can be used in either of the following modes:\n\n- `pypolygames train` (training mode): a game and a model (as well as several other options, see below) are chosen and the model is iteratively trained with MCTS\n- `pypolygames eval` (evaluation mode): the model confronts either a pure MCTS or another neural network powered MCTS. The evaluation of a training can be done either offline (from checkpoints periodically saved) or in real time; in that case, the evaluation considers only the most recent checkpoint in order to follow closely the training, skipping some checkpoints in case the eval computation takes longer than the time becween consecutive checkpoints. It is displayed through visdom.\n- `pypolygames traineval` (training + evaluation mode): it mixes the two previous modes and allow to launch one command instead of two. With the `real_time` option the modes can be launched in parallel instead of sequentially.\n- `pypolygames human` (human mode): a human player plays against the machine\n\nTrainings log the following relevant files in the `checkpoint_dir`:\n- `model.pt`\n- `train.log`\n- `stat.tb`\n- `checkpoints_<epoch>.pt` for for checkpoints saved each `saving_period` epoch (e.g., if `saving_period == 10`, `checkpoints_0.pt`, `checkpoints_9.pt`, `checkpoints_19.pt`, `checkpoints_29.pt`)\n\nBy default, the checkpoint_dir is exps/dev/game_<game_name>_model_<model_name>_feat_<featurization>_GMT_<YYYYMMDDHHMMSS>\n\nThis directory will be the `checkpoint_dir` directory used by evaluation to retrieve the checkpoints to perform eval computation.\"\"\"\n\n\ndef _check_arg_consistency(args: argparse.Namespace) -> None:\n    # Most of the consistency is done in the `__post_init__` methods in the params class\n    if (\n        args.command_history.last_command_contains(\"pure_mcts\")\n        and getattr(args, \"game_name\", None) is None\n    ):\n        raise ValueError(\n            \"In '--pure_mcts' the game must be specified with '--game_name'\"\n        )\n    if args.command_history.last_command_contains(\"human\"):\n        if (\n            getattr(args, \"pure_mcts\", None) is False\n            and getattr(args, \"init_checkpoint\", None) is None\n        ):\n            raise ValueError(\n                \"The human player need to play either a '--pure_mcts' \"\n                \"or a '--init_checkpoint' neural network powered MCTS\"\n            )\n    if args.command_history.last_command_contains(\"device_opponent\"):\n        if getattr(args, \"checkpoint_opponent\", None) is None:\n            raise ValueError(\n                \"If the opponent is a pure MCTS player \"\n                \"('--checkpoint_opponent' not set), \"\n                \"all its computation will happen on CPU, \"\n                \"'--device_opponent' should not be set\"\n            )\n    if args.command_history.last_command_contains(\n        \"per_thread_batchsize\"\n    ) and args.command_history.last_command_contains(\"act_batchsize\"):\n        raise ValueError(\n            \"When '--per_thread_batchsize' is set, '--act_batchsize' is not used\"\n        )\n\n    if getattr(args, \"total_time\", 0) is not None and getattr(args, \"total_time\", 0) > 0:\n        if args.command_history.last_command_contains(\"num_rollouts\"):\n            raise ValueError(\n                \"When a '--total_time' is set, \"\n                \"the '--num_rollouts' will adapt automatically and should not be set\"\n            )\n\n\ndef parse_args() -> argparse.Namespace:\n    parser = argparse.ArgumentParser(\n        description=DOC, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False,\n    )\n    parser.set_defaults(func=run_training_and_evaluation_from_args_warning)\n\n    subparsers = parser.add_subparsers(\n        help=\"Modes to be chosen from: `python -m pypolygames MODE`\"\n    )\n\n    # TRAINING\n    parser_train = subparsers.add_parser(\"train\")\n    parser_train.set_defaults(func=run_training_from_args)\n\n    # EVALUATION\n    parser_eval = subparsers.add_parser(\"eval\")\n    parser_eval.set_defaults(func=run_evaluation_from_args)\n\n    # TRAINING + EVALUATION\n    parser_traineval = subparsers.add_parser(\"traineval\")\n    parser_traineval.set_defaults(func=run_training_and_evaluation_from_args)\n\n    # HUMAN-PLAYED GAME\n    parser_human = subparsers.add_parser(\"human\")\n    parser_human.set_defaults(func=run_human_played_game_from_args)\n\n    # TEXT-PROTOCOLE GAME\n    parser_tp = subparsers.add_parser(\"tp\")\n    parser_tp.set_defaults(func=run_tp_played_game_from_args)\n\n    # CONVERT CHECKPOINT COMMAND\n    parser_convert = subparsers.add_parser(\"convert\")\n    parser_convert.set_defaults(func=convert_checkpoint_from_args)\n\n    parser_convert.add_argument('--out', type=str, required=True, help='File name to save the converted checkpoint to')\n    parser_convert.add_argument('--skip', type=str, nargs=\"*\", help='List of attributes to not copy, leaving them initialized')\n    parser_convert.add_argument(\n        '--auto_tune_nnsize', action=\"store_true\",\n        help='Tune nnsize automatically such that number of filters in hidden layers remains unchanged.'\n    )\n    parser_convert.add_argument(\n        '--zero_shot', type=bool, default=False, \n        help='Convert for zero-shot evaluation without training; this will initialise any skipped or new params to 0.'\n    )\n    parser_convert.add_argument(\n        '--move_source_channels', type=int, nargs=\"*\", \n        help=('For fully convolutional architectures, for every channel in the destination game\\'s move tensors, '\n              'specify the channel from the original tensor that we should transfer weights from.')\n    )\n    parser_convert.add_argument(\n        '--state_source_channels', type=int, nargs=\"*\", \n        help=('For fully convolutional architectures, for every channel in the destination game\\'s state tensors, '\n              'specify the channel from the original tensor that we should transfer weights from.')\n    )\n    \n    # DRAW MODEL COMMAND\n    parser_draw_model = subparsers.add_parser(\"draw_model\")\n    parser_draw_model.set_defaults(func=draw_model_from_args)\n    parser_draw_model.add_argument('--out', type=str, required=True, help='File name (without extension) to save figure to.')\n\n    # Game params\n    train_game_params_group = parser_train.add_argument_group(\n        \"Game parameters\",\n        \"Not to be specified in case of loading a checkpoint or a pretrained model\",\n    )\n    traineval_game_params_group = parser_traineval.add_argument_group(\n        \"Game parameters\",\n        \"Not to be specified in case of loading a checkpoint or a pretrained model\",\n    )\n    game_params_group = parser.add_argument_group(\n        \"Game parameters\",\n        \"Not to be specified in case of loading a checkpoint or a pretrained model\",\n    )\n    human_game_params_group = parser_human.add_argument_group(\n        \"Game parameters\",\n        \"Mandatory for pure MTCS, \"\n        \"but not to be specified in case of loading a pretrained model\",\n    )\n    for arg_name, arg_field in GameParams.arg_fields():\n        train_game_params_group.add_argument(arg_field.name, **arg_field.opts)\n        traineval_game_params_group.add_argument(arg_field.name, **arg_field.opts)\n        game_params_group.add_argument(\n            arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n        )\n        human_game_params_group.add_argument(arg_field.name, **arg_field.opts)\n        parser_convert.add_argument(arg_field.name, **arg_field.opts)\n        parser_draw_model.add_argument(arg_field.name, **arg_field.opts)\n\n    # Model params\n    train_model_params_group = parser_train.add_argument_group(\n        \"Model parameters\",\n        \"Not to be specified in case of loading a checkpoint or a pretrained model\",\n    )\n    traineval_model_params_group = parser_traineval.add_argument_group(\n        \"Model parameters\",\n        \"Not to be specified in case of loading a checkpoint or a pretrained model\",\n    )\n    model_params_group = parser.add_argument_group(\"Model parameters\")\n    human_model_params_group = parser_human.add_argument_group(\n        \"Model parameters\",\n        \"The machine model can be either a '--pure_mcts' or \"\n        \"a '--init_checkpoint' neural network powered MCTS\",\n    )\n    for arg_name, arg_field in ModelParams.arg_fields():\n        if arg_name != \"pure_mcts\":\n            train_model_params_group.add_argument(arg_field.name, **arg_field.opts)\n            traineval_model_params_group.add_argument(arg_field.name, **arg_field.opts)\n            model_params_group.add_argument(\n                arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n            )\n        if arg_name in {\"pure_mcts\", \"init_checkpoint\"}:\n            human_model_params_group.add_argument(arg_field.name, **arg_field.opts)\n        if arg_name != \"pure_mcts\":\n            parser_convert.add_argument(arg_field.name, **arg_field.opts)\n            parser_draw_model.add_argument(arg_field.name, **arg_field.opts)\n\n    # Optimizer params\n    train_optim_params_group = parser_train.add_argument_group(\"Optimizer parameters\")\n    traineval_optim_params_group = parser_traineval.add_argument_group(\n        \"Optimizer parameters\"\n    )\n    optim_params_group = parser.add_argument_group(\"Optimizer parameters\")\n    for _, arg_field in OptimParams.arg_fields():\n        train_optim_params_group.add_argument(arg_field.name, **arg_field.opts)\n        traineval_optim_params_group.add_argument(arg_field.name, **arg_field.opts)\n        optim_params_group.add_argument(\n            arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n        )\n\n    # Simulation params\n    train_simulation_params_group = parser_train.add_argument_group(\n        \"Simulation parameters\"\n    )\n    traineval_simulation_params_group = parser_traineval.add_argument_group(\n        \"Simulation parameters\"\n    )\n    simulation_params_group = parser.add_argument_group(\"Simulation parameters\")\n    human_simulation_params_group = parser_human.add_argument_group(\n        \"Simulation parameters\"\n    )\n    for arg_name, arg_field in SimulationParams.arg_fields():\n        if arg_name not in {\n            \"human_first\",\n            \"time_ratio\",\n            \"total_time\",\n        }:  # , \"num_actor\"}:\n            train_simulation_params_group.add_argument(arg_field.name, **arg_field.opts)\n            traineval_simulation_params_group.add_argument(\n                arg_field.name, **arg_field.opts\n            )\n            simulation_params_group.add_argument(\n                arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n            )\n        #if arg_name in {\"num_actor\", \"num_rollouts\"}:\n        if True:\n            human_simulation_params_group.add_argument(arg_field.name, **arg_field.opts)\n\n    # Execution params\n    train_execution_params_group = parser_train.add_argument_group(\n        \"Execution parameters\"\n    )\n    traineval_execution_params_group = parser_traineval.add_argument_group(\n        \"Execution parameters\"\n    )\n    human_execution_params_group = parser_human.add_argument_group(\n        \"Execution parameters\"\n    )\n    execution_params_group = parser.add_argument_group(\"Execution parameters\")\n    for arg_name, arg_field in ExecutionParams.arg_fields():\n        if arg_name not in {\"human_first\", \"time_ratio\", \"total_time\"}:\n            train_execution_params_group.add_argument(arg_field.name, **arg_field.opts)\n            traineval_execution_params_group.add_argument(\n                arg_field.name, **arg_field.opts\n            )\n            execution_params_group.add_argument(\n                arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n            )\n        if arg_name in {\"human_first\", \"time_ratio\", \"total_time\", \"device\", \"seed\"}:\n            human_execution_params_group.add_argument(arg_field.name, **arg_field.opts)\n\n    # Evaluation params\n    eval_eval_params_group = parser_eval.add_argument_group(\"Evaluation parameters\")\n    traineval_eval_params_group = parser_traineval.add_argument_group(\n        \"Evaluation parameters\"\n    )\n    eval_params_group = parser.add_argument_group(\"Evaluation parameters\")\n    for arg_name, arg_field in EvalParams.arg_fields():\n        eval_eval_params_group.add_argument(arg_field.name, **arg_field.opts)\n        if arg_name not in {\"checkpoint_dir\", \"checkpoint\"}:\n            traineval_eval_params_group.add_argument(arg_field.name, **arg_field.opts)\n            eval_params_group.add_argument(\n                arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}\n            )\n\n    args = parser.parse_args()\n    args.command_history = CommandHistory()\n\n    # check arg consistency\n    _check_arg_consistency(args)\n    return args\n\n\ndef _get_game_features(game_params: GameParams) -> str:\n    return \"_\".join(str(x) for x in astuple(game_params))\n\n\ndef _get_timestamp() -> str:\n    return time.strftime(\"%Y%m%d%H%M%S\", time.gmtime())\n\n\ndef update_and_create_checkpoint_dir(\n    game_params: GameParams,\n    model_params: ModelParams,\n    execution_params: ExecutionParams,\n) -> None:\n    # create a dedicated folder if none is provided\n    if execution_params.checkpoint_dir is None:\n        game_name = game_params.game_name\n        model_name = model_params.model_name\n        game_features = _get_game_features(game_params)\n        timestamp = _get_timestamp()\n        subfolder = f\"game_{game_name}_model_{model_name}_feat_{game_features}_GMT_{timestamp}\"\n        execution_params.checkpoint_dir = Path(\"exps\").absolute() / \"dev\" / subfolder\n    execution_params.checkpoint_dir.mkdir(exist_ok=True, parents=True)\n\n\ndef instanciate_params_from_args(\n    Dataclass, args: argparse.Namespace\n) -> Union[\n    GameParams, ModelParams, OptimParams, SimulationParams, ExecutionParams, EvalParams\n]:\n    return Dataclass(\n        **{param: getattr(args, param, None) for param, _ in Dataclass.arg_fields()}\n    )\n\n\ndef run_training_from_args(args: argparse.Namespace):\n    command_history = args.command_history\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    optim_params = instanciate_params_from_args(OptimParams, args)\n    simulation_params = instanciate_params_from_args(SimulationParams, args)\n    execution_params = instanciate_params_from_args(ExecutionParams, args)\n\n    update_and_create_checkpoint_dir(\n        game_params=game_params,\n        model_params=model_params,\n        execution_params=execution_params,\n    )\n    run_training(\n        command_history=command_history,\n        game_params=game_params,\n        model_params=model_params,\n        optim_params=optim_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n    )\n\n\ndef run_evaluation_from_args(args: argparse.Namespace):\n    eval_params = instanciate_params_from_args(EvalParams, args)\n    execution_params = instanciate_params_from_args(ExecutionParams, args)\n    run_evaluation(eval_params=eval_params, execution_params=execution_params)\n\n\ndef run_training_and_evaluation_from_args(args: argparse.Namespace):\n    command_history = args.command_history\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    optim_params = instanciate_params_from_args(OptimParams, args)\n    simulation_params = instanciate_params_from_args(SimulationParams, args)\n    execution_params = instanciate_params_from_args(ExecutionParams, args)\n    # create the save dir\n    update_and_create_checkpoint_dir(\n        game_params=game_params,\n        model_params=model_params,\n        execution_params=execution_params,\n    )\n    args.checkpoint_dir = execution_params.checkpoint_dir\n    eval_params = instanciate_params_from_args(EvalParams, args)\n    if args.real_time:\n        eval_process = Process(target=run_evaluation, args=(eval_params,))\n        eval_process.start()\n        run_training(\n            command_history=command_history,\n            game_params=game_params,\n            model_params=model_params,\n            optim_params=optim_params,\n            simulation_params=simulation_params,\n            execution_params=execution_params,\n        )\n        eval_process.join()\n    else:\n        run_training(\n            command_history=command_history,\n            game_params=game_params,\n            model_params=model_params,\n            optim_params=optim_params,\n            simulation_params=simulation_params,\n            execution_params=execution_params,\n        )\n        run_evaluation(eval_params=eval_params, only_last=True)\n\n\ndef run_human_played_game_from_args(args: argparse.Namespace):\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    simulation_params = instanciate_params_from_args(SimulationParams, args)\n    simulation_params.num_game = 1\n    execution_params = instanciate_params_from_args(ExecutionParams, args)\n    run_human_played_game(\n        game_params=game_params,\n        model_params=model_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n    )\n\n\ndef run_tp_played_game_from_args(args: argparse.Namespace):\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    simulation_params = instanciate_params_from_args(SimulationParams, args)\n    simulation_params.num_game = 1\n    execution_params = instanciate_params_from_args(ExecutionParams, args)\n    run_tp_played_game(\n        game_params=game_params,\n        model_params=model_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n    )\n\ndef convert_checkpoint_from_args(args: argparse.Namespace):\n    command_history = args.command_history\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    convert_checkpoint(\n        command_history=command_history,\n        game_params=game_params,\n        model_params=model_params,\n        out=args.out,\n        skip=args.skip,\n        auto_tune_nnsize=args.auto_tune_nnsize,\n        zero_shot=args.zero_shot,\n        move_source_channels=args.move_source_channels,\n        state_source_channels=args.state_source_channels,\n    )\n    \ndef draw_model_from_args(args: argparse.Namespace):\n    game_params = instanciate_params_from_args(GameParams, args)\n    model_params = instanciate_params_from_args(ModelParams, args)\n    draw_model(\n        game_params=game_params,\n        model_params=model_params,\n        out=args.out,\n    )\n\n\ndef run_training_and_evaluation_from_args_warning(args: argparse.Namespace):\n    # pypolygames called directly\n    if len(sys.argv) == 1:\n        print(DOC)\n    # otherwise default to traineval\n    else:\n        warnings.warn(\n            \"'pypolygames' called with arguments runs as 'pypolygames traineval'\",\n            DeprecationWarning,\n        )\n        run_training_and_evaluation_from_args(args)\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    args.func(args)\n"
  },
  {
    "path": "pypolygames/convert.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nfrom typing import Iterator, Tuple, Callable, Optional, List, Dict\nfrom pathlib import Path\nimport copy\n\nimport torch\n\nimport polygames\n\nfrom . import utils\nfrom .model_zoo import utils as zutils\nfrom .params import GameParams, ModelParams, SimulationParams, ExecutionParams\nfrom .env_creation_helpers import (\n    sanitize_game_params,\n    create_model,\n    create_game,\n    create_player,\n)\n\n\ndef convert_checkpoint(\n    command_history: utils.CommandHistory,\n    game_params: GameParams,\n    model_params: ModelParams,\n    out: str,\n    skip: List[str],\n    auto_tune_nnsize: bool,\n    zero_shot: bool,\n    move_source_channels: List[int],\n    state_source_channels: List[int],\n):\n    checkpoint = utils.load_checkpoint(\n        checkpoint_path=model_params.init_checkpoint)\n    old_model_params = checkpoint[\"model_params\"]\n    old_game_params = checkpoint[\"game_params\"]\n    sanitize_game_params(old_game_params) # backwards compatibility for models without game_options\n    model_state_dict = checkpoint[\"model_state_dict\"]\n\n    print(old_model_params.model_name)\n    print(getattr(old_model_params, \"model_name\"))\n\n    new_model_params = copy.deepcopy(old_model_params)\n    new_game_params = copy.deepcopy(old_game_params)\n    for k, v in vars(model_params).items():\n        if not command_history.last_command_contains(k) or k == \"init_checkpoint\":\n            continue\n        ov = getattr(new_model_params, k)\n        if v != ov:\n            print(\"Changing %s from %s to %s\" % (k, ov, v))\n            setattr(new_model_params, k, v)\n    for k, v in vars(game_params).items():\n        if not command_history.last_command_contains(k):\n            continue\n        ov = getattr(new_game_params, k)\n        if v != ov:\n            print(\"Changing %s from %s to %s\" % (k, ov, v))\n            setattr(new_game_params, k, v)\n\n    fix_global_pooling = old_model_params.model_name == \"ResConvConvLogitPoolModelV2\" and new_model_params.model_name == \"ResConvConvLogitPoolModelV2\" and new_model_params.global_pooling > 0\n    if fix_global_pooling:\n        print(\"Note: attempting to patch global pooling weights to match the new model\")\n        \n    if zero_shot:\n        print(\"Note: converting model for zero-shot evaluation only! Added/reinitialized params will be all 0 and untrainable!\")\n        \n    if auto_tune_nnsize or move_source_channels is not None or state_source_channels is not None:\n        # We'll need to load the game info\n        old_game_info = zutils.get_game_info(old_game_params)\n        new_game_info = zutils.get_game_info(new_game_params)\n        \n    if auto_tune_nnsize:\n        # We want to automatically tune nnsize, such that the number\n        # of filters in hidden layers does not change from source to\n        # target model\n        c_old, _, _ = old_game_info[\"feature_size\"][:3]\n        c_new, _, _ = new_game_info[\"feature_size\"][:3]\n        new_nnsize = float((getattr(old_model_params, 'nnsize') * c_old) / c_new)\n        print(\"Auto-tuning nnsize to:\", new_nnsize)\n        setattr(new_model_params, 'nnsize', new_nnsize)\n        \n    if move_source_channels is not None:\n        c_action_new, _, _ = new_game_info[\"action_size\"][:3]\n        if c_action_new != len(move_source_channels):\n            print(\"ERROR: if --move_source_channels is specified, it must have exactly c_action_new entries!\")\n            print(\"c_action_new = \", c_action_new)\n            print(\"len(move_source_channels) = \", len(move_source_channels))\n            \n    if state_source_channels is not None:\n        c_state_new, _, _ = new_game_info[\"feature_size\"][:3]\n        if c_state_new != len(state_source_channels):\n            print(\"ERROR: if --state_source_channels is specified, it must have exactly c_state_new entries!\")\n            print(\"c_state_new = \", c_state_new)\n            print(\"len(state_source_channels) = \", len(state_source_channels))\n\n    m = create_model(game_params=new_game_params,\n                     model_params=new_model_params)\n    s = m.state_dict()\n    params_added = 0\n    params_removed = 0\n    params_reinitialized = 0\n    taken = []\n    for k, src in model_state_dict.items():\n        if not k in s:\n            moved = False\n            for k2, dst in s.items():\n                if not k2 in model_state_dict and src.shape == dst.shape and not k2 in taken:\n                  print(\"%s shape %s moved to %s\" % (k, src.shape, k2))\n                  taken.append(k2)\n                  dst.copy_(src)\n                  moved = True\n                  break\n            if not moved:\n              print(\"%s shape %s removed\" % (k, src.shape))\n              params_removed += src.numel()\n    for k, dst in s.items():\n        if k in taken:\n            continue\n            \n        if zero_shot:\n            dst = dst.fill_(0) \n        \n        if skip is not None and k in skip:\n            print(\"%s shape %s skipped\" % (k, dst.shape))\n            params_reinitialized += dst.numel()\n            continue\n        if not k in model_state_dict:\n            params_added += dst.numel()\n            continue\n        src = model_state_dict[k]\n        \n        if move_source_channels is not None and \"pi_logit.\" in k:\n            # Use manually specified channels to transfer from for\n            # last Conv2D operation that produces pi logits\n            if \"weight\" in k:\n                for i in range(len(move_source_channels)):\n                    if move_source_channels[i] >= 0:\n                        dst_view = dst\n                        src_view = src\n                        for j in range(dst_view.dim()):\n                            if j == 0:  # Don't narrow this dim, need original indexing\n                                continue\n                            if src_view.shape[j] > dst_view.shape[j]:\n                                src_view = src_view.narrow(j, 0, dst_view.shape[j])\n                            if dst_view.shape[j] > src_view.shape[j]:\n                                dst_view = dst_view.narrow(j, 0, src_view.shape[j])\n                    \n                        dst_view[i] = src_view[move_source_channels[i]]\n            elif \"bias\" in k:\n                for i in range(len(move_source_channels)):\n                    if move_source_channels[i] >= 0:\n                        dst[i] = src[move_source_channels[i]]\n            continue\n            \n        if state_source_channels is not None and \"mono.0.\" in k:\n            # Use manually specified channels to transfer from for\n            # first Conv2D operation on state tensor\n            if \"weight\" in k:\n                for i in range(len(state_source_channels)):\n                    if state_source_channels[i] >= 0:\n                        dst_view = dst\n                        src_view = src\n                        for j in range(dst_view.dim()):\n                            if j == 1:  # Don't narrow this dim, need original indexing\n                                continue\n                            if src_view.shape[j] > dst_view.shape[j]:\n                                src_view = src_view.narrow(j, 0, dst_view.shape[j])\n                            if dst_view.shape[j] > src_view.shape[j]:\n                                dst_view = dst_view.narrow(j, 0, src_view.shape[j])\n                    \n                        dst_view[:, i] = src_view[:, state_source_channels[i]]\n            elif \"bias\" in k:\n                for i in range(len(state_source_channels)):\n                    if state_source_channels[i] >= 0:\n                        dst[i] = src[state_source_channels[i]]\n            continue\n        \n        delta = dst.numel() - src.numel()\n        if delta > 0:\n            params_added += delta\n        else:\n            params_removed -= delta\n        if dst.shape != src.shape:\n            print(\"%s shape %s -> %s\" % (k, src.shape, dst.shape))\n        if fix_global_pooling and \"resnets\" in k and \"0.0\" in k and \"weight\" in k:\n          if src.dim() == 4 and dst.dim() == 4:\n\n            src_c = src.shape[0]\n            dst_c = dst.shape[0]\n\n            src_s = int(src_c * old_model_params.global_pooling)\n            dst_s = int(dst_c * new_model_params.global_pooling)\n\n            src_d = src_c + src_s\n            dst_d = dst_c + src_s\n\n            print(\"Moving global pooling weights from %d:%d to %d:%d\" % (src_c, src_d, dst_c, dst_d))\n\n            min_c = min(src_c, dst_c)\n            dst[:min_c, dst_c:dst_d, :, :] = src[:min_c, src_c:src_d, :, :]\n            #dst.narrow(0, 0, src_c).narrow(1, dst_c, src_s).copy_(src.narrow(1, src_c, src_s))\n\n            print(\"Moving global pooling weights from %d:%d to %d:%d\" % (src_c+src_s, src_d+src_s, dst_c+dst_s, dst_d+dst_s))\n\n            #dst.narrow(0, 0, src_c).narrow(1, dst_c+dst_s, src_s).copy_(src.narrow(1, src_c+src_s, src_s))\n            dst[:min_c, dst_c+dst_s:dst_d+dst_s, :, :] = src[:min_c, src_c+src_s:src_d+src_s, :, :]\n\n            src = src[:, :src_c, :, :]\n            dst = dst[:, :dst_c, :, :]\n            #src = src.narrow(1, 0, src_c)\n            #dst = dst.narrow(1, 0, dst_c)\n\n        while dst.dim() < src.dim():\n            dst = dst.unsqueeze(0)\n        while src.dim() < dst.dim():\n            src = src.unsqueeze(0)\n        for i in range(dst.dim()):\n            if src.shape[i] > dst.shape[i]:\n                src = src.narrow(i, 0, dst.shape[i])\n            if dst.shape[i] > src.shape[i]:\n                dst = dst.narrow(i, 0, src.shape[i])\n        dst.copy_(src)\n    print(\"Parameters added: %d\" % params_added)\n    print(\"Parameters removed: %d\" % params_removed)\n    print(\"Parameters reinitialized: %d\" % params_reinitialized)\n    checkpoint[\"model_state_dict\"] = s\n    checkpoint[\"model_params\"] = new_model_params\n    checkpoint[\"game_params\"] = new_game_params\n    \n    Path(out).parent.mkdir(parents=True, exist_ok=True)\n    import gzip\n    with gzip.open(out, \"wb\") as f:\n        torch.save(checkpoint, f)\n"
  },
  {
    "path": "pypolygames/draw_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nfrom typing import Iterator, Tuple, Callable, Optional, List, Dict\nimport copy\n\nimport torch\n\nimport polygames\n\nfrom .model_zoo.utils import get_game_info \nfrom .params import GameParams, ModelParams\nfrom .env_creation_helpers import (\n    create_model,\n)\n\n\ndef draw_model(\n    game_params: GameParams,\n    model_params: ModelParams,\n    out: str,\n):\n    import torchviz\n    m = create_model(game_params=game_params,\n                     model_params=model_params)\n                     \n    info = get_game_info(game_params)\n    m.eval()  # necessary for batch norm as it expects more than 1 ex in training\n    feature_size = info[\"feature_size\"][:3]\n    action_size = info[\"action_size\"][:3]\n    input_data = torch.zeros([1] + feature_size, device=torch.device(\"cpu\"))\n    model_out = m(input_data)\n    dot = torchviz.make_dot((model_out[\"v\"], model_out[\"pi_logit\"]), params=dict(list(m.named_parameters())))\n    dot.format = 'png'\n    dot.render(out)\n"
  },
  {
    "path": "pypolygames/env_creation_helpers.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional, Iterator, List\n\nimport torch  # must be loaded before tube\nimport tube\nimport mcts\nimport polygames\n\nfrom . import model_zoo\nfrom .params import GameParams, ModelParams\nfrom .weight_init import WEIGHT_INIT\n\n\ndef sanitize_game_params(game_params: GameParams) -> None:\n    # eval and human modes do not support `per_thread_batchsize` != 0\n    # while in training, the option could have been set to > 0\n    # ideally this should be a simulation parameter, but it would change\n    # the C++ 'polygames.Game' signature\n    # EDIT: now it a simulation parameter, but for retro-compatibility\n    #  we keep that function\n    game_params.per_thread_batchsize = 0\n    \n    # Many old models don't have the game_options attribute\n    if not hasattr(game_params, 'game_options'):\n        game_params.game_options = list()\n\n\ndef create_game(\n    game_params: GameParams,\n    num_episode: int,\n    seed: int,\n    eval_mode: bool,\n    per_thread_batchsize: int = 0,\n    rewind: int = 0,\n    predict_end_state: bool = False,\n    predict_n_states: int = 0,\n) -> polygames.Game:\n    # Many old models don't have the game_options attribute\n    if hasattr(game_params, 'game_options'):\n        game_options = game_params.game_options\n        if game_options is None:\n            game_options = list()\n    else:\n        game_options = list()\n\n    return polygames.Game(\n        game_params.game_name,\n        game_options,\n        num_episode,\n        seed,\n        eval_mode,\n        game_params.out_features,\n        game_params.turn_features,\n        game_params.turn_features_mc,\n        game_params.geometric_features,\n        game_params.history,\n        game_params.random_features,\n        game_params.one_feature,\n        per_thread_batchsize,\n        rewind,\n        predict_end_state,\n        predict_n_states,\n    )  # cannot use named parameters :(\n\n\ndef create_model(\n    game_params: GameParams,\n    model_params: ModelParams,\n    resume_training: bool = False,\n    model_state_dict: Optional[dict] = None,\n) -> torch.jit.ScriptModule:\n    if model_params.model_name is not None:\n        if model_params.model_name in model_zoo.MODELS:\n            model = model_zoo.MODELS[model_params.model_name](\n                game_params=game_params, model_params=model_params\n            )\n        else:\n            raise RuntimeError(\n                f'The model \"{model_params.model_name}\" has not been implemented '\n                f'in the \"model_zoo\" package'\n            )\n    else:\n        print(\"creating a generic model\")\n        model = model_zoo.GenericModel(\n            game_params=game_params, model_params=model_params\n        )\n    if resume_training:\n        if model_state_dict is not None:\n            print(\"load state dict!\")\n            model.load_state_dict(model_state_dict)\n    else:\n        model.apply(WEIGHT_INIT[model_params.init_method])\n\n    nb_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n    print(f\"total #trainable params = {nb_params}\")\n    return model\n\n\ndef _set_mcts_option(\n    num_rollouts: int,\n    seed: int,\n    human_mode: bool = False,\n    time_ratio: float = 0.7,\n    total_time: float = 0,\n    sample_before_step_idx: int = 0,\n    randomized_rollouts: bool = False,\n    sampling_mcts: bool = False,\n) -> mcts.MctsOption:\n    # TODO: put hardcoded value in conf file\n    mcts_option = mcts.MctsOption()\n    mcts_option.puct = 1.1\n    mcts_option.sample_before_step_idx = sample_before_step_idx\n    mcts_option.num_rollout_per_thread = num_rollouts\n    mcts_option.seed = seed\n    mcts_option.virtual_loss = 1\n    mcts_option.total_time = total_time\n    mcts_option.time_ratio = time_ratio\n    mcts_option.randomized_rollouts = randomized_rollouts\n    mcts_option.sampling_mcts = sampling_mcts\n    return mcts_option\n\n\ndef _create_pure_mcts_player(\n    game: polygames.Game, mcts_option: mcts.MctsOption, num_actor: int\n) -> mcts.MctsPlayer:\n    \"\"\"a player that uses only mcts + random rollout, no neural net\"\"\"\n    player = mcts.MctsPlayer(mcts_option)\n    for _ in range(num_actor):\n        actor = polygames.Actor(\n            None, game.get_feat_size(), game.get_action_size(), [], 0, False, False, False, None\n        )\n        player.set_actor(actor)\n    return player\n\n\ndef _create_neural_mcts_player(\n    game: polygames.Game,\n    mcts_option: mcts.MctsOption,\n    num_actor: int,\n    actor_channel: tube.DataChannel,\n    model_manager: Optional[polygames.ModelManager] = None,\n    rnn_state_shape: List[int] = [],\n    rnn_seqlen: int = 0,\n    logit_value: bool = False,\n) -> mcts.MctsPlayer:\n\n    player = mcts.MctsPlayer(mcts_option)\n    for _ in range(num_actor):\n        num_actor += 1\n        actor = polygames.Actor(\n            actor_channel,\n            game.get_feat_size(),\n            game.get_action_size(),\n            rnn_state_shape,\n            rnn_seqlen,\n            logit_value,\n            True,\n            True,\n            model_manager,\n        )\n        player.set_actor(actor)\n    return player\n\ndef _create_forward_player(\n    game: polygames.Game,\n    actor_channel: tube.DataChannel,\n    model_manager: Optional[polygames.ModelManager] = None,\n    rnn_state_shape: List[int] = [],\n    rnn_seqlen: int = 0,\n    logit_value: bool = False,\n) -> mcts.MctsPlayer:\n\n    player = polygames.ForwardPlayer()\n    actor = polygames.Actor(\n        actor_channel,\n        game.get_feat_size(),\n        game.get_action_size(),\n        rnn_state_shape,\n        rnn_seqlen,\n        logit_value,\n        True,\n        True,\n        model_manager,\n    )\n    player.set_actor(actor)\n    return player\n\n\ndef create_player(\n    seed_generator: Iterator[int],\n    game: polygames.Game,\n    player: str,\n    num_actor: int,\n    num_rollouts: int,\n    pure_mcts: bool,\n    actor_channel: Optional[tube.DataChannel],\n    model_manager: Optional[polygames.ModelManager] = None,\n    human_mode: bool = False,\n    time_ratio: float = 0.07,\n    total_time: float = 0,\n    sample_before_step_idx: int = 0,\n    randomized_rollouts: bool = False,\n    sampling_mcts: bool = False,\n    rnn_state_shape: List[int] = [],\n    rnn_seqlen: int = 0,\n    logit_value: bool = False,\n):\n    if player == \"mcts\":\n      mcts_option = _set_mcts_option(\n          num_rollouts=num_rollouts,\n          seed=next(seed_generator),\n          human_mode=human_mode,\n          time_ratio=time_ratio,\n          total_time=total_time,\n          sample_before_step_idx=sample_before_step_idx,\n          randomized_rollouts=randomized_rollouts,\n          sampling_mcts=sampling_mcts,\n      )\n      if pure_mcts:\n          return _create_pure_mcts_player(\n              game=game, mcts_option=mcts_option, num_actor=num_actor\n          )\n      else:\n          return _create_neural_mcts_player(\n              game=game,\n              mcts_option=mcts_option,\n              num_actor=num_actor,\n              actor_channel=actor_channel,\n              model_manager=model_manager,\n              rnn_state_shape=rnn_state_shape,\n              rnn_seqlen=rnn_seqlen,\n              logit_value=logit_value\n          )\n    elif player == \"forward\":\n        return _create_forward_player(\n              game=game,\n              actor_channel=actor_channel,\n              model_manager=model_manager,\n              rnn_state_shape=rnn_state_shape,\n              rnn_seqlen=rnn_seqlen,\n              logit_value=logit_value\n        )\n    else:\n        raise RuntimeError(\"Unknown player \" + player)\n"
  },
  {
    "path": "pypolygames/evaluation.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Iterator, Tuple, List, Callable, Optional, Dict\n\nimport torch\n\nimport tube\nfrom pytube.data_channel_manager import DataChannelManager\n\nfrom .params import GameParams, EvalParams, ExecutionParams\nfrom . import utils\nfrom .env_creation_helpers import (\n    sanitize_game_params,\n    create_model,\n    create_game,\n    create_player,\n)\n\n\n#######################################################################################\n# PLOTTER CREATION\n#######################################################################################\n\n\ndef create_plotter(eval_params: EvalParams) -> utils.Plotter:\n    checkpoint_dir = eval_params.checkpoint_dir\n    if checkpoint_dir[-1] == \"/\":\n        checkpoint_dir = checkpoint_dir[:-1]\n    plot_env = os.path.basename(checkpoint_dir)\n    return utils.Plotter(\n        plot_enabled=eval_params.plot_enabled,\n        env=plot_env,\n        server=eval_params.plot_server,\n        port=eval_params.plot_port,\n    )\n\n\n#######################################################################################\n# CHECKPOINT ITERATOR CREATION\n#######################################################################################\n\n\ndef create_checkpoint_iter(eval_params: EvalParams, only_last: bool = False):\n    if eval_params.checkpoint_dir is not None:\n        return utils.gen_checkpoints(\n            checkpoint_dir=eval_params.checkpoint_dir,\n            real_time=eval_params.real_time and not only_last,\n            only_last=only_last,\n        )\n    else:\n        return [utils.load_checkpoint(eval_params.checkpoint)]\n\n\n#######################################################################################\n# OPPONENT MODEL AND DEVICE CREATION\n#######################################################################################\n\n\ndef create_models_and_devices_opponent(\n    eval_params: EvalParams\n) -> Tuple[List[torch.jit.ScriptModule], List[torch.device], GameParams]:\n    devices_opponent = [\n        torch.device(device_opponent) for device_opponent in eval_params.device_opponent\n    ]\n    checkpoint_opponent = utils.load_checkpoint(eval_params.checkpoint_opponent)\n    model_state_dict_opponent = checkpoint_opponent[\"model_state_dict\"]\n    game_params_opponent = checkpoint_opponent[\"game_params\"]\n    sanitize_game_params(game_params_opponent)\n    model_params_opponent = checkpoint_opponent[\"model_params\"]\n    models_opponent = []\n    for device_opponent in devices_opponent:\n        model_opponent = create_model(\n            game_params=game_params_opponent,\n            model_params=model_params_opponent,\n            resume_training=False,\n        ).to(device_opponent)\n        remove = []\n        for k, v in model_state_dict_opponent.items():\n          if \"training\" in k:\n            remove.append(k)\n        for k in remove:\n          model_state_dict_opponent.pop(k)\n        model_opponent.load_state_dict(model_state_dict_opponent)\n        model_opponent.eval()\n        models_opponent.append(model_opponent)\n    return models_opponent, devices_opponent, game_params_opponent\n\n\n#######################################################################################\n# EVALUATION ENVIRONMENT CREATION\n#######################################################################################\n\n\ndef create_evaluation_environment(\n    seed_generator: Iterator[int],\n    game_params: GameParams,\n    eval_params: EvalParams,\n    current_batch_size: int = None,\n    pure_mcts_eval: bool = False,\n    pure_mcts_opponent: bool = True,\n    num_evaluated_games: int = 0\n) -> Tuple[\n    tube.Context,\n    Optional[tube.DataChannel],\n    Optional[tube.DataChannel],\n    Callable[[], List[int]],\n]:\n    num_game = eval_params.num_game_eval\n    num_actor_eval = eval_params.num_actor_eval\n    num_rollouts_eval = eval_params.num_rollouts_eval\n    num_actor_opponent = eval_params.num_actor_opponent\n    num_rollouts_opponent = eval_params.num_rollouts_opponent\n    first_hand = []\n    second_hand = []\n    games = []\n\n    context = tube.Context()\n    actor_channel_eval = (\n        None\n        if pure_mcts_eval\n        else tube.DataChannel(\"act_eval\", num_game * num_actor_eval, 1)\n    )\n    actor_channel_opponent = (\n        None\n        if pure_mcts_opponent\n        else tube.DataChannel(\"act_opponent\", num_game * num_actor_opponent, 1)\n    )\n    for game_no in range(current_batch_size if current_batch_size else num_game):\n        game = create_game(\n            game_params, num_episode=1, seed=next(seed_generator), eval_mode=True\n        )\n        player = create_player(\n            seed_generator=seed_generator,\n            game=game,\n            player=\"mcts\",\n            num_actor=num_actor_eval,\n            num_rollouts=num_rollouts_eval,\n            pure_mcts=pure_mcts_eval,\n            actor_channel=actor_channel_eval,\n            model_manager=None,\n            human_mode=False,\n            sample_before_step_idx=8,\n            randomized_rollouts=False,\n            sampling_mcts=False,\n        )\n        if game.is_one_player_game():\n            game.add_eval_player(player)\n            first_hand.append(game)\n        else:\n            opponent = create_player(\n                seed_generator=seed_generator,\n                game=game,\n                player=\"mcts\",\n                num_actor=num_actor_opponent,\n                num_rollouts=num_rollouts_opponent,\n                pure_mcts=pure_mcts_opponent,\n                actor_channel=actor_channel_opponent,\n                model_manager=None,\n                human_mode=False,\n                sample_before_step_idx=8,\n                randomized_rollouts=False,\n                sampling_mcts=False,\n            )\n            game_id = num_evaluated_games + game_no\n            if player_moves_first(game_id, num_game):\n                game.add_eval_player(player)\n                game.add_eval_player(opponent)\n                first_hand.append(game)\n            else:\n                game.add_eval_player(opponent)\n                game.add_eval_player(player)\n                second_hand.append(game)\n\n        context.push_env_thread(game)\n        games.append(game)\n\n    def get_eval_reward():\n        nonlocal first_hand, second_hand\n        reward = []\n        for hand in first_hand:\n            reward.append(hand.get_result()[0])\n        for hand in second_hand:\n            reward.append(hand.get_result()[1])\n        return reward\n\n    return context, actor_channel_eval, actor_channel_opponent, get_eval_reward\n\n\ndef player_moves_first(game_id, num_games_eval):\n    return game_id < num_games_eval // 2\n\n#######################################################################################\n# EVALUATION\n#######################################################################################\n\n\ndef _forward_pass_on_device(\n    device: torch.device, model: torch.jit.ScriptModule, batch_s: torch.Tensor\n) -> Dict[str, torch.Tensor]:\n    batch_s = utils.to_device(batch_s, device)\n    with torch.no_grad():\n        reply = model(batch_s)\n    return reply\n\n\ndef _play_game_neural_mcts_against_pure_mcts_opponent(\n    context: tube.Context,\n    actor_channel_eval: tube.DataChannel,\n    devices_eval: List[torch.device],\n    models_eval: List[torch.jit.ScriptModule],\n) -> None:\n    nb_devices_eval = len(devices_eval)\n    context.start()\n    dcm = DataChannelManager([actor_channel_eval])\n    while not context.terminated():\n        batch = dcm.get_input(max_timeout_s=1)\n        if len(batch) == 0:\n            continue\n\n        assert len(batch) == 1  # only one channel\n\n        # split in as many part as there are devices\n        batches_eval_s = torch.chunk(\n            batch[actor_channel_eval.name][\"s\"], nb_devices_eval, dim=0\n        )\n        futures = []\n        reply_eval = {\"v\": None, \"pi_logit\": None}\n        # multithread\n        with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:\n            for device, model, batch_s in zip(\n                devices_eval, models_eval, batches_eval_s\n            ):\n                futures.append(\n                    executor.submit(_forward_pass_on_device, device, model, batch_s)\n                )\n            results = [future.result() for future in futures]\n            reply_eval[\"v\"] = torch.cat([result[\"v\"] for result in results], dim=0)\n            reply_eval[\"pi_logit\"] = torch.cat([result[\"pi_logit\"] for result in results], dim=0)\n        dcm.set_reply(actor_channel_eval.name, reply_eval)\n    dcm.terminate()\n\n\ndef _play_game_neural_mcts_against_neural_mcts_opponent(\n    context: tube.Context,\n    actor_channel_eval: tube.DataChannel,\n    actor_channel_opponent: tube.DataChannel,\n    devices_eval: List[torch.device],\n    models_eval: List[torch.jit.ScriptModule],\n    devices_opponent: List[torch.device],\n    models_opponent: List[torch.jit.ScriptModule],\n) -> None:\n    nb_devices_eval = len(devices_eval)\n    nb_devices_opponent = len(devices_opponent)\n    context.start()\n    dcm = DataChannelManager([actor_channel_eval, actor_channel_opponent])\n    while not context.terminated():\n        batch = dcm.get_input(max_timeout_s=1)\n        if len(batch) == 0:\n            continue\n\n        assert len(batch) <= 2  # up to two channels\n\n        if actor_channel_eval.name in batch:\n            # split in as many part as there are devices\n            batches_eval_s = torch.chunk(\n                batch[actor_channel_eval.name][\"s\"], nb_devices_eval, dim=0\n            )\n            futures = []\n            reply_eval = {\"v\": None, \"pi_logit\": None}\n            # multithread\n            with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:\n                for device, model, batch_s in zip(\n                    devices_eval, models_eval, batches_eval_s\n                ):\n                    futures.append(\n                        executor.submit(_forward_pass_on_device, device, model, batch_s)\n                    )\n                results = [future.result() for future in futures]\n                reply_eval[\"v\"] = torch.cat([result[\"v\"] for result in results], dim=0)\n                reply_eval[\"pi_logit\"] = torch.cat(\n                    [result[\"pi_logit\"] for result in results], dim=0\n                )\n            dcm.set_reply(actor_channel_eval.name, reply_eval)\n\n        if actor_channel_opponent.name in batch:\n            # split in as many part as there are devices\n            batches_opponent_s = torch.chunk(\n                batch[actor_channel_opponent.name][\"s\"], nb_devices_opponent, dim=0\n            )\n            futures = []\n            reply_opponent = {\"v\": None, \"pi_logit\": None}\n            # multithread\n            with ThreadPoolExecutor(max_workers=nb_devices_opponent) as executor:\n                for device, model, batch_s in zip(\n                    devices_opponent, models_opponent, batches_opponent_s\n                ):\n                    futures.append(\n                        executor.submit(_forward_pass_on_device, device, model, batch_s)\n                    )\n                results = [future.result() for future in futures]\n                reply_opponent[\"v\"] = torch.cat(\n                    [result[\"v\"] for result in results], dim=0\n                )\n                reply_opponent[\"pi_logit\"] = torch.cat(\n                    [result[\"pi_logit\"] for result in results], dim=0\n                )\n            dcm.set_reply(actor_channel_opponent.name, reply_opponent)\n    dcm.terminate()\n\n\ndef evaluate_on_checkpoint(\n    game_params: GameParams,\n    eval_params: EvalParams,\n    context: tube.Context,\n    actor_channel_eval: Optional[tube.DataChannel],\n    actor_channel_opponent: Optional[tube.DataChannel],\n    get_eval_reward: Callable[[], List[int]],\n    devices_eval: Optional[List[torch.device]],\n    models_eval: Optional[List[torch.jit.ScriptModule]],\n    pure_mcts_eval: bool,\n    devices_opponent: Optional[List[torch.device]],\n    models_opponent: Optional[List[torch.jit.ScriptModule]],\n    pure_mcts_opponent: bool,\n) -> utils.Result:\n    if eval_params.eval_verbosity:\n        print(f\"Playing {eval_params.num_game_eval} games of {game_params.game_name}:\")\n        print(\n            f\"- {'pure MCTS' if pure_mcts_eval else type(models_eval[0]).__name__} \"\n            f\"player uses \"\n            f\"{eval_params.num_rollouts_eval} rollouts per actor \"\n            f\"with {eval_params.num_actor_eval} \"\n            f\"actor{'s' if eval_params.num_actor_eval > 1 else ''}\"\n        )\n        print(\n            f\"- {'pure MCTS' if pure_mcts_opponent else type(models_opponent[0]).__name__} \"\n            f\"opponent uses \"\n            f\"{eval_params.num_rollouts_opponent} rollouts per actor \"\n            f\"with {eval_params.num_actor_opponent} \"\n            f\"actor{'s' if eval_params.num_actor_opponent > 1 else ''}\"\n        )\n    if pure_mcts_eval:\n        pass  # not implemented\n    else:\n        if pure_mcts_opponent:\n            _play_game_neural_mcts_against_pure_mcts_opponent(\n                context=context,\n                actor_channel_eval=actor_channel_eval,\n                devices_eval=devices_eval,\n                models_eval=models_eval,\n            )\n        else:\n            _play_game_neural_mcts_against_neural_mcts_opponent(\n                context=context,\n                actor_channel_eval=actor_channel_eval,\n                actor_channel_opponent=actor_channel_opponent,\n                devices_eval=devices_eval,\n                models_eval=models_eval,\n                devices_opponent=devices_opponent,\n                models_opponent=models_opponent,\n            )\n    result = utils.Result(get_eval_reward())\n    if eval_params.eval_verbosity >= 2:\n        print(\"@@@eval: %s\" % result.log())\n    return result\n\n\n#######################################################################################\n# OVERALL EVALUATION WORKFLOW\n#######################################################################################\n\n\ndef run_evaluation(eval_params: EvalParams, execution_params: ExecutionParams, only_last: bool = False) -> None:\n    start_time = time.time()\n    logger_dir = eval_params.checkpoint_dir\n    if eval_params.checkpoint_dir is None:\n        logger_dir = os.path.dirname(eval_params.checkpoint)\n    logger_path = os.path.join(logger_dir, \"eval.log\")\n    sys.stdout = utils.Logger(logger_path)\n\n    print(\"#\" * 70)\n    print(\"#\" + \"EVALUATION\".center(68) + \"#\")\n    print(\"#\" * 70)\n\n    # evaluation is done on a NN-powered MCTS\n    pure_mcts_eval = False\n\n    print(\"setting-up pseudo-random generator...\")\n    seed_generator = utils.generate_random_seeds(seed=eval_params.seed_eval)\n\n    if eval_params.plot_enabled:\n        print(\"creating plotter...\")\n        plotter = create_plotter(eval_params=eval_params)\n\n    print(\"finding checkpoints...\")\n    checkpoint_iter = create_checkpoint_iter(\n        eval_params=eval_params, only_last=only_last\n    )\n\n    models_opponent = []\n    pure_mcts_opponent = True\n    devices_opponent = None\n    game_params_opponent = None\n    if eval_params.checkpoint_opponent is not None:\n        print(\"creating opponent model(s) and device(s)...\")\n        pure_mcts_opponent = False\n        (\n            models_opponent,\n            devices_opponent,\n            game_params_opponent,\n        ) = create_models_and_devices_opponent(eval_params=eval_params)\n\n    results = []\n    first_checkpoint = False\n    game_params = None\n    for checkpoint in checkpoint_iter:\n        epoch = checkpoint.get(\"epoch\", 0)  # 0 when checkpoint_dir is None\n        model_state_dict_eval = checkpoint[\"model_state_dict\"]\n        model_params_eval = checkpoint[\"model_params\"]\n        if game_params is None:\n            game_params = checkpoint[\"game_params\"]\n            sanitize_game_params(game_params)\n        # check that game_params are consistent between the model_eval and\n        # the model_opponent\n        if game_params_opponent is not None and game_params != game_params_opponent:\n            raise ValueError(\n                \"The game parameters between the model to be tested\"\n                \"and the opponent model are different\"\n            )\n        # check that game_params are consistent from one epoch to the other\n        checkpoint_game_params = checkpoint[\"game_params\"]\n        sanitize_game_params(checkpoint_game_params)\n        if game_params != checkpoint_game_params:\n            raise ValueError(f\"The game parameters have changed at checkpoint #{epoch}\")\n\n        if not first_checkpoint:\n            print(\"creating model(s) and device(s)...\")\n            devices_eval = [\n                torch.device(device_eval) for device_eval in eval_params.device_eval\n            ]\n            models_eval = []\n            for device_eval in devices_eval:\n                models_eval.append(\n                    create_model(\n                        game_params=game_params,\n                        model_params=model_params_eval,\n                        resume_training=False,\n                    ).to(device_eval)\n                )\n            first_checkpoint = True\n\n        print(\"updating model(s)...\")\n        for model_eval in models_eval:\n            model_eval.load_state_dict(model_state_dict_eval)\n            model_eval.eval()\n\n        num_evaluated_games = 0\n        rewards = []\n\n        eval_batch_size = eval_params.num_parallel_games_eval if eval_params.num_parallel_games_eval else eval_params.num_game_eval\n        print(\"evaluating {} games with batches of size {}\".format(eval_params.num_game_eval, eval_batch_size))\n        while num_evaluated_games < eval_params.num_game_eval:\n            if eval_params.eval_verbosity:\n                print(\"creating evaluation environment...\")\n            current_batch_size = min(eval_batch_size, eval_params.num_game_eval - num_evaluated_games)\n            (\n                context,\n                actor_channel_eval,\n                actor_channel_opponent,\n                get_eval_reward,\n            ) = create_evaluation_environment(\n                seed_generator=seed_generator,\n                game_params=game_params,\n                eval_params=eval_params,\n                current_batch_size=current_batch_size,\n                pure_mcts_eval=pure_mcts_eval,\n                pure_mcts_opponent=pure_mcts_opponent,\n                num_evaluated_games=num_evaluated_games,\n            )\n            if eval_params.eval_verbosity:\n                print(\"evaluating...\")\n            partial_result = evaluate_on_checkpoint(\n                game_params=game_params,\n                eval_params=eval_params,\n                context=context,\n                actor_channel_eval=actor_channel_eval,\n                actor_channel_opponent=actor_channel_opponent,\n                get_eval_reward=get_eval_reward,\n                devices_eval=devices_eval,\n                models_eval=models_eval,\n                pure_mcts_eval=pure_mcts_eval,\n                devices_opponent=devices_opponent,\n                models_opponent=models_opponent,\n                pure_mcts_opponent=pure_mcts_opponent,\n            )\n            num_evaluated_games += current_batch_size\n            rewards += partial_result.reward\n            elapsed_time = time.time() - start_time\n            print(f\"Evaluated on {num_evaluated_games} games in : {elapsed_time} s\")\n\n        result = utils.Result(rewards)\n        print(\"@@@eval: %s\" % result.log())\n        results.append((epoch, result))\n\n        if eval_params.plot_enabled:\n            print(\"plotting...\")\n            plotter.plot_results(results)\n            plotter.save()\n\n    elapsed_time = time.time() - start_time\n    print(f\"total time: {elapsed_time} s\")\n"
  },
  {
    "path": "pypolygames/human.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Iterator, Tuple, Callable, Optional, List, Dict\n\nimport torch\n\nimport tube\nimport polygames\nfrom pytube.data_channel_manager import DataChannelManager\n\nfrom . import utils\nfrom .params import GameParams, ModelParams, SimulationParams, ExecutionParams\nfrom .env_creation_helpers import (\n    sanitize_game_params,\n    create_model,\n    create_game,\n    create_player,\n)\n\n\n#######################################################################################\n# HUMAN-PLAYED ENVIRONMENT CREATION\n#######################################################################################\n\n\ndef create_human_environment(\n    seed_generator: Iterator[int],\n    game_params: GameParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n    pure_mcts: bool,\n    model\n) -> Tuple[tube.Context, Optional[tube.DataChannel], Callable[[], int]]:\n    human_first = execution_params.human_first\n    time_ratio = execution_params.time_ratio\n    total_time = execution_params.total_time\n    context = tube.Context()\n    actor_channel = (\n        None if pure_mcts else tube.DataChannel(\"act\", simulation_params.num_actor, 1)\n    )\n    rnn_state_shape = []\n    if model is not None and hasattr(model, \"rnn_cells\") and model.rnn_cells > 0:\n      rnn_state_shape = [model.rnn_cells, model.rnn_channels]\n    rnn_state_size = 0\n    if len(rnn_state_shape) >= 2:\n      rnn_state_size = rnn_state_shape[0] * rnn_state_shape[1]\n    logit_value = getattr(model, \"logit_value\", False)\n    game = create_game(\n        game_params,\n        num_episode=1,\n        seed=next(seed_generator),\n        eval_mode=True,\n        per_thread_batchsize=0,\n        rewind=simulation_params.rewind,\n        predict_end_state=game_params.predict_end_state,\n        predict_n_states=game_params.predict_n_states,\n    )\n    player = create_player(\n        seed_generator=seed_generator,\n        game=game,\n        player=\"mcts\",\n        num_actor=simulation_params.num_actor,\n        num_rollouts=simulation_params.num_rollouts,\n        pure_mcts=pure_mcts,\n        actor_channel=actor_channel,\n        model_manager=None,\n        human_mode=True,\n        total_time=total_time,\n        time_ratio=time_ratio,\n        sample_before_step_idx=80,\n        randomized_rollouts=False,\n        sampling_mcts=False,\n        rnn_state_shape=rnn_state_shape,\n        rnn_seqlen=execution_params.rnn_seqlen,\n        logit_value=logit_value,\n    )\n    human_player = polygames.HumanPlayer()\n    if game.is_one_player_game():\n        game.add_human_player(human_player)\n    else:\n        if human_first:\n            game.add_human_player(human_player)\n            game.add_eval_player(player)\n        else:\n            game.add_eval_player(player)\n            game.add_human_player(human_player)\n\n    context.push_env_thread(game)\n\n    def get_result_for_human_player():\n        nonlocal game, human_first\n        return game.get_result()[not human_first]\n\n    return context, actor_channel, get_result_for_human_player\n\n\ndef create_tp_environment(\n    seed_generator: Iterator[int],\n    game_params: GameParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n    pure_mcts: bool,\n) -> Tuple[tube.Context, Optional[tube.DataChannel], Callable[[], int]]:\n    human_first = execution_params.human_first\n    time_ratio = execution_params.time_ratio\n    total_time = execution_params.total_time\n    context = tube.Context()\n    actor_channel = (\n        None if pure_mcts else tube.DataChannel(\"act\", simulation_params.num_actor, 1)\n    )\n    game = create_game(\n        game_params,\n        num_episode=1,\n        seed=next(seed_generator),\n        eval_mode=True,\n        per_thread_batchsize=0,\n    )\n    player = create_player(\n        seed_generator=seed_generator,\n        game=game,\n        num_actor=simulation_params.num_actor,\n        num_rollouts=simulation_params.num_rollouts,\n        pure_mcts=pure_mcts,\n        actor_channel=actor_channel,\n        model_manager=None,\n        human_mode=True,\n        total_time=total_time,\n        time_ratio=time_ratio,\n    )\n    tp_player = polygames.TPPlayer()\n    if game.is_one_player_game():\n        game.add_tp_player(tp_player)\n    else:\n        if human_first:\n            game.add_tp_player(tp_player)\n            game.add_eval_player(player)\n        else:\n            game.add_eval_player(player)\n            game.add_tp_player(tp_player)\n\n    context.push_env_thread(game)\n\n    def get_result_for_tp_player():\n        nonlocal game, human_first\n        return game.get_result()[not human_first]\n\n    return context, actor_channel, get_result_for_tp_player\n\n\n#######################################################################################\n# HUMAN-PLAYED GAME\n#######################################################################################\n\n\ndef _forward_pass_on_device(\n    device: torch.device, model: torch.jit.ScriptModule, batch_s: torch.Tensor, batch_rnn_state: torch.Tensor = None\n) -> Dict[str, torch.Tensor]:\n    batch_s = utils.to_device(batch_s, device)\n    if batch_rnn_state is not None:\n        batch_rnn_state = utils.to_device(batch_rnn_state, device)\n        with torch.no_grad():\n            reply = model(batch_s, batch_rnn_state)\n    else:\n      with torch.no_grad():\n          reply = model(batch_s)\n    return reply\n\n\ndef _play_game_against_mcts(context: tube.Context) -> None:\n    context.start()\n    while not context.terminated():\n        time.sleep(1)\n\n\ndef _play_game_against_neural_mcts(\n    devices: List[torch.device],\n    models: List[torch.jit.ScriptModule],\n    context: tube.Context,\n    actor_channel: tube.DataChannel,\n) -> None:\n    nb_devices = len(devices)\n    context.start()\n    dcm = DataChannelManager([actor_channel])\n    # multithread\n    with ThreadPoolExecutor(max_workers=nb_devices) as executor:\n        while not context.terminated():\n            batch = dcm.get_input(max_timeout_s=1)\n            if len(batch) == 0:\n                continue\n\n            assert len(batch) == 1\n\n            # split in as many part as there are devices\n            batches_s = torch.chunk(\n                batch[actor_channel.name][\"s\"], nb_devices, dim=0\n            )\n            has_rnn = \"rnn_state\" in batch[actor_channel.name]\n            if has_rnn:\n                batches_rnn_state = torch.chunk(\n                    batch[actor_channel.name][\"rnn_state\"], nb_devices, dim=0\n                )\n            futures = []\n            reply_eval = {\"v\": None, \"pi_logit\": None}\n            if has_rnn:\n                for device, model, batch_s, batch_rnn_state in zip(\n                    devices, models, batches_s, batches_rnn_state\n                ):\n                    futures.append(\n                        executor.submit(_forward_pass_on_device, device, model, batch_s, batch_rnn_state)\n                    )\n                results = [future.result() for future in futures]\n                reply_eval[\"v\"] = torch.cat([result[\"v\"] for result in results], dim=0)\n                reply_eval[\"pi_logit\"] = torch.cat([result[\"pi_logit\"] for result in results], dim=0)\n                reply_eval[\"rnn_state_out\"] = torch.cat([result[\"rnn_state\"] for result in results], dim=0)\n            else:\n                for device, model, batch_s in zip(\n                    devices, models, batches_s\n                ):\n                    futures.append(\n                        executor.submit(_forward_pass_on_device, device, model, batch_s)\n                    )\n                results = [future.result() for future in futures]\n                reply_eval[\"v\"] = torch.cat([result[\"v\"] for result in results], dim=0)\n                reply_eval[\"pi_logit\"] = torch.cat([result[\"pi_logit\"] for result in results], dim=0)\n            dcm.set_reply(actor_channel.name, reply_eval)\n    dcm.terminate()\n\n\ndef play_game(\n    pure_mcts: bool,\n    devices: Optional[List[torch.device]],\n    models: Optional[List[torch.jit.ScriptModule]],\n    context: tube.Context,\n    actor_channel: Optional[tube.DataChannel],\n    get_result_for_human_player: Callable[[], int],\n) -> int:\n    if pure_mcts:\n        _play_game_against_mcts(context)\n    else:\n        _play_game_against_neural_mcts(\n            devices=devices, models=models, context=context, actor_channel=actor_channel\n        )\n    print(\"game over\")\n    return get_result_for_human_player()\n\n\ndef play_tp_game(   #FIXME TODO not sure this helps\n    pure_mcts: bool,\n    devices: Optional[List[torch.device]],\n    models: Optional[List[torch.jit.ScriptModule]],\n    context: tube.Context,\n    actor_channel: Optional[tube.DataChannel],\n    get_result_for_human_player: Callable[[], int],\n) -> int:\n    if pure_mcts:\n        _play_game_against_mcts(context)\n    else:\n        _play_game_against_neural_mcts(\n            devices=devices, models=models, context=context, actor_channel=actor_channel\n        )\n    print(\"#game over\")\n    return get_result_for_human_player()\n\n\n#######################################################################################\n# OVERALL HUMAN-PLAYED GAME WORKFLOW\n#######################################################################################\n\n\ndef run_human_played_game(\n    game_params: GameParams,\n    model_params: ModelParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n):\n    print(\"#\" * 70)\n    print(\"#\" + \"HUMAN-PLAYED GAME\".center(68) + \"#\")\n    print(\"#\" * 70)\n\n    print(\"setting-up pseudo-random generator...\")\n    seed_generator = utils.generate_random_seeds(seed=execution_params.seed)\n\n    devices, models = None, None\n    if not model_params.pure_mcts:\n        print(\"loading pretrained model from checkpoint...\")\n        checkpoint = utils.load_checkpoint(checkpoint_path=model_params.init_checkpoint)\n        game_params = checkpoint[\"game_params\"]\n        sanitize_game_params(game_params)\n        model_params = checkpoint[\"model_params\"]\n        model_state_dict = checkpoint[\"model_state_dict\"]\n        del checkpoint\n        print(\"creating model(s) and device(s)...\")\n        models = []\n        devices = [torch.device(device) for device in execution_params.devices]\n        for device in devices:\n            model = create_model(game_params=game_params, model_params=model_params).to(\n                device\n            )\n            print(\"updating model...\")\n            model.load_state_dict(model_state_dict)\n            model.eval()\n            models.append(model)\n\n    print(\"creating human-played environment\")\n    context, actor_channel, get_result_for_human_player = create_human_environment(\n        seed_generator=seed_generator,\n        game_params=game_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n        pure_mcts=model_params.pure_mcts,\n        model=model\n    )\n\n    print(\"playing against a human player...\")\n    human_score = play_game(\n        pure_mcts=model_params.pure_mcts,\n        devices=devices,\n        models=models,\n        context=context,\n        actor_channel=actor_channel,\n        get_result_for_human_player=get_result_for_human_player,\n    )\n\n    print(f\"result for the human human_player: {human_score}\")\n\n\ndef run_tp_played_game(\n    game_params: GameParams,\n    model_params: ModelParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n):\n    print(\"#\" * 70)\n    print(\"#\" + \"HUMAN-PLAYED GAME\".center(68) + \"#\")\n    print(\"#\" * 70)\n\n    print(\"#setting-up pseudo-random generator...\")\n    seed_generator = utils.generate_random_seeds(seed=execution_params.seed)\n\n    devices, models = None, None\n    if not model_params.pure_mcts:\n        print(\"#loading pretrained model from checkpoint...\")\n        checkpoint = utils.load_checkpoint(checkpoint_path=model_params.init_checkpoint)\n        game_params = checkpoint[\"game_params\"]\n        sanitize_game_params(game_params)\n        model_params = checkpoint[\"model_params\"]\n        model_state_dict = checkpoint[\"model_state_dict\"]\n        del checkpoint\n        print(\"#creating model(s) and device(s)...\")\n        models = []\n        devices = [torch.device(device) for device in execution_params.device]\n        for device in devices:\n            model = create_model(game_params=game_params, model_params=model_params).to(\n                device\n            )\n            print(\"#updating model...\")\n            model.load_state_dict(model_state_dict)\n            model.eval()\n            models.append(model)\n\n    print(\"#creating human-played environment\")\n    context, actor_channel, get_result_for_human_player = create_tp_environment(\n        seed_generator=seed_generator,\n        game_params=game_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n        pure_mcts=model_params.pure_mcts,\n    )\n\n    print(\"#playing against a tp player...\")\n    human_score = play_tp_game(\n        pure_mcts=model_params.pure_mcts,\n        devices=devices,\n        models=models,\n        context=context,\n        actor_channel=actor_channel,\n        get_result_for_human_player=get_result_for_human_player,\n    )\n\n    print(f\"#result for the TP_player: {human_score}\")\n"
  },
  {
    "path": "pypolygames/model_zoo/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .generic_model import GenericModel\nfrom .amazons_model import AmazonsModel\nfrom .nano_fc_logit_model import NanoFCLogitModel\nfrom .nano_conv_logit_model import NanoConvLogitModel\nfrom .deep_conv_fc_logit_model import DeepConvFCLogitModel\nfrom .deep_conv_conv_logit_model import DeepConvConvLogitModel\nfrom .res_conv_fc_logit_model import ResConvFCLogitModel\nfrom .res_conv_conv_logit_model import ResConvConvLogitModel\nfrom .res_conv_conv_logit_pool_model import ResConvConvLogitPoolModel\nfrom .res_conv_conv_logit_pool_model_v2 import ResConvConvLogitPoolModelV2\nfrom .u_conv_fc_logit_model import UConvFCLogitModel\nfrom .u_conv_conv_logit_model import UConvConvLogitModel\nfrom .connect4_benchmark_model import Connect4BenchModel\n\nfrom .utils import MODELS  # directory where models are registered\n"
  },
  {
    "path": "pypolygames/model_zoo/amazons_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass AmazonsModel(torch.jit.ScriptModule):\n    __constants__ = [\"c_prime\", \"h_prime\", \"w_prime\"]\n\n    DEFAULT_FCSIZE = 1024\n    DEFAULT_NNSIZE = 4\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n\n    default_game_name = \"GameOfTheAmazons\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # fc size\n        if model_params.fcsize is None:\n            model_params.fcsize = self.DEFAULT_FCSIZE\n        fcsize = model_params.fcsize\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        self.model_params = model_params\n\n        self.net1 = nn.Conv2d(\n            c, int(nnsize * c), nnks, stride=stride, padding=padding, dilation=dilation\n        )\n        self.net2 = nn.Conv2d(\n            int(nnsize * c),\n            int(nnsize * c),\n            nnks,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n        )\n        self.net3 = nn.Conv2d(\n            int(nnsize * c),\n            int(nnsize * c),\n            nnks,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n        )\n        self.net4 = nn.Conv2d(\n            int(nnsize * c),\n            int(nnsize * c),\n            nnks,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n        )\n        self.v1 = nn.Linear(int(nnsize * c) * h * w, fcsize)\n        self.v2 = nn.Linear(fcsize, fcsize)\n        self.v3 = nn.Linear(fcsize, 1)\n        self.pi1 = nn.Linear(int(nnsize * c) * h * w, fcsize)\n        self.pi2 = nn.Linear(fcsize, fcsize)\n        self.pi3 = nn.Linear(fcsize, c_prime + h_prime + w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime\n        bs = x.shape[0]\n        h1 = nn.functional.relu(self.net1(x))\n        h2 = nn.functional.relu(self.net2(h1)) + h1\n        h3 = nn.functional.relu(self.net3(h2)) + h2\n        h4 = nn.functional.relu(self.net4(h3)) + h3\n        v = nn.functional.relu(self.v1(h4.flatten(1)))\n        v = nn.functional.relu(self.v2(v))\n        v = torch.tanh(self.v3(v))\n        pi_logit = nn.functional.relu(self.pi1(h4.flatten(1)))\n        pi_logit = nn.functional.relu(self.pi2(pi_logit))\n        pi_logit = nn.functional.relu(self.pi3(pi_logit))\n        if return_logit:\n            v1 = pi_logit[:, :c_prime].reshape(-1, c_prime, 1, 1)\n            v2 = pi_logit[:, c_prime : c_prime + h_prime].reshape(-1, 1, h_prime, 1)\n            v3 = pi_logit[:, c_prime + h_prime :].reshape(-1, 1, 1, w_prime)\n            # This representation is not sparse, that's a temporary hack\n            # for testing the idea of a cartesian product.\n            pi_logit = v1 + v2 + v3\n            return v, pi_logit\n        # TODO(oteytaud): remove duplicate reshaping.\n        v1 = nn.functional.softmax(pi_logit[:, :c_prime].reshape(-1, c_prime, 1, 1), 1)\n        v2 = nn.functional.softmax(\n            pi_logit[:, c_prime : c_prime + h_prime].reshape(-1, 1, h_prime, 1), 2\n        )\n        v3 = nn.functional.softmax(\n            pi_logit[:, c_prime + h_prime :].reshape(-1, 1, 1, w_prime), 3\n        )\n        pi = v1 * v2 * v3\n        # pi = nn.functional.softmax(pi.view(pi.shape[0], -1), 1).reshape(pi.shape)\n        # This representation is not sparse, that's a temporary hack\n        # for testing the idea of a cartesian product.\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n\n    def loss(\n        self,\n        model,\n        x: torch.Tensor,\n        v: torch.Tensor,\n        pi: torch.Tensor,\n        pi_mask: torch.Tensor,\n        stat: utils.MultiCounter\n    ) -> float:\n        # print(x.size())\n        # print(x[0])\n        # print(v)\n        # print(pi.size())\n        batchsize = pi.shape[0]\n        # pi = pi.view(batchsize, -1)\n        pred_v, pred_logit = self._forward(x, True)\n        utils.assert_eq(v.size(), pred_v.size())\n        utils.assert_eq(pred_logit.size(), pi.size())\n        utils.assert_eq(pred_logit.dim(), 4)\n\n        pred_logit = pred_logit * pi_mask.view(pred_logit.shape)\n\n        # pred_logit = pred_logit.view(batchsize, -1)\n        v_err = 0.5 * (v - pred_v).pow(2).squeeze(1)\n        s = pred_logit.shape\n        bs = x.shape[0]\n        pred_log_pi = nn.functional.log_softmax(pred_logit.flatten(1), 1).reshape(s)\n        # pred_log_pi = nn.functional.log_softmax(pred_logit, 1)\n        pi_err = -(pred_log_pi * pi).reshape(bs, -1).sum(1)\n\n        # why would these quantities be equal ?\n        utils.assert_eq(v_err.size(), pi_err.size())\n        err = v_err + pi_err\n\n        stat[\"v_err\"].feed(v_err.detach().mean().item())\n        stat[\"pi_err\"].feed(pi_err.detach().mean().item())\n        return err.mean()\n"
  },
  {
    "path": "pypolygames/model_zoo/connect4_benchmark_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\n\n# import utils\n\n@zutils.register_model\nclass Connect4BenchModel(torch.jit.ScriptModule):\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n    # def __init__(self):\n        super().__init__()\n        self.fc1 = nn.Linear(6 * 7 * 2, 200)\n        self.fc2 = nn.Linear(200, 200)\n        self.fc3 = nn.Linear(200, 200)\n        self.fc_pi = nn.Linear(200, 7)\n        self.fc_val = nn.Linear(200, 1)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        x = x[:, :2, :, :]\n        # print(x.size())\n        x = x.view(-1, 84)\n        h = nn.functional.relu(self.fc1(x))\n        h = nn.functional.relu(self.fc2(h))\n        h = nn.functional.relu(self.fc3(h))\n        v = self.fc_val(h)\n        pi_logit = self.fc_pi(h)\n        if return_logit:\n            return v, pi_logit\n        pi = nn.functional.softmax(pi_logit, 1)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, 7, 1, 1)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n\n"
  },
  {
    "path": "pypolygames/model_zoo/deep_conv_conv_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass DeepConvConvLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\"c_prime\", \"h_prime\", \"w_prime\", \"mono\", \"conv_nets\"]\n\n    DEFAULT_NB_NETS = 13\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nb identical hidden layers (first layer excepted)\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n        \n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        conv_nets = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n            for _ in range(nb_nets)\n        ]\n        if pooling:\n            for i in range(nb_nets):\n                conv_nets[i] = nn.Sequential(\n                    conv_nets[i],\n                    nn.MaxPool2d(\n                        kernel_size=nnks,\n                        padding=padding,\n                        stride=stride,\n                        dilation=dilation,\n                    ),\n                )\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            for i in range(nb_nets):\n                conv_nets[i] = nn.Sequential(\n                    conv_nets[i],\n                    nn.BatchNorm2d(\n                        int(nnsize * c), track_running_stats=True, affine=bn_affine\n                    ),\n                )\n        self.mono = nn.Sequential(*mono)\n        self.conv_nets = nn.ModuleList(conv_nets)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        bs = x.shape[0]\n        h = F.relu(self.mono(x))\n        for conv_net in self.conv_nets:\n            h = F.relu(conv_net(h))\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h).flatten(1)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n\n"
  },
  {
    "path": "pypolygames/model_zoo/deep_conv_fc_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass DeepConvFCLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\"c_prime\", \"h_prime\", \"w_prime\", \"mono\", \"conv_nets\"]\n\n    DEFAULT_NB_NETS = 13\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Connect4\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # nb identical hidden layers (first layer excepted)\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        conv_nets = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n            for _ in range(nb_nets)\n        ]\n        if pooling:\n            for i in range(nb_nets):\n                conv_nets[i] = nn.Sequential(\n                    conv_nets[i],\n                    nn.MaxPool2d(\n                        kernel_size=nnks,\n                        padding=padding,\n                        stride=stride,\n                        dilation=dilation,\n                    ),\n                )\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            for i in range(nb_nets):\n                conv_nets[i] = nn.Sequential(\n                    conv_nets[i],\n                    nn.BatchNorm2d(\n                        int(nnsize * c), track_running_stats=True, affine=bn_affine\n                    ),\n                )\n        self.mono = nn.Sequential(*mono)\n        self.conv_nets = nn.ModuleList(conv_nets)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Linear(int(nnsize * c) * h * w, c_prime * h_prime * w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        bs = x.shape[0]\n        h = F.relu(self.mono(x))\n        for conv_net in self.conv_nets:\n            h = F.relu(conv_net(h))\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h.flatten(1))\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/generic_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass GenericModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"net1\",\n        \"net2\",\n        \"net3\",\n        \"net4\",\n        \"v1\",\n        \"v2\",\n        \"pi1\",\n        \"pi2\",\n    ]\n\n    DEFAULT_FCSIZE = 1024\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Connect4\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # fc size\n        if model_params.fcsize is None:\n            model_params.fcsize = self.DEFAULT_FCSIZE\n        fcsize = model_params.fcsize\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        net1 = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        net2 = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        net3 = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        net4 = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        v1 = [nn.Linear(int(nnsize * c) * h * w, fcsize)]\n        v2 = [nn.Linear(fcsize, fcsize)]\n        pi1 = [nn.Linear(int(nnsize * c) * h * w, fcsize)]\n        pi2 = [nn.Linear(fcsize, fcsize)]\n        if bn or bn_affine:\n            net1.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            net2.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            net3.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            net4.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n            v1.append(\n                nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n            )\n            v2.append(\n                nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n            )\n            pi1.append(\n                nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n            )\n            pi2.append(\n                nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n            )\n        self.net1 = nn.Sequential(*net1)\n        self.net2 = nn.Sequential(*net2)\n        self.net3 = nn.Sequential(*net3)\n        self.net4 = nn.Sequential(*net4)\n        self.v1 = nn.Sequential(*v1)\n        self.v2 = nn.Sequential(*v2)\n        self.pi1 = nn.Sequential(*pi1)\n        self.pi2 = nn.Sequential(*pi2)\n        self.v3 = nn.Linear(fcsize, 1)\n        self.pi3 = nn.Linear(fcsize, c_prime * h_prime * w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        h1 = F.relu(self.net1(x))\n        h2 = F.relu(self.net2(h1)) + h1\n        h3 = F.relu(self.net3(h2)) + h2\n        h4 = F.relu(self.net4(h3)) + h3\n        v1 = F.relu(self.v1(h4.flatten(1)))\n        v2 = F.relu(self.v2(v1))\n        v = torch.tanh(self.v3(v2))\n        pi_logit1 = F.relu(self.pi1(h4.flatten(1)))\n        pi_logit2 = F.relu(self.pi2(pi_logit1))\n        pi_logit = self.pi3(pi_logit2)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/loss.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom typing import Tuple\n\ndef mcts_loss(\n   self,\n   model,\n   batch,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n\n    predicts = getattr(self, \"predicts\", 0)\n\n    x = batch[\"s\"]\n    v = batch[\"v\"]\n    pi = batch[\"pi\"]\n    pi_mask = batch[\"pi_mask\"]\n    predict_pi = batch[\"predict_pi\"] if predicts > 0 else None\n    predict_pi_mask = batch[\"predict_pi_mask\"] if predicts > 0 else None\n\n    pi = pi.flatten(1)\n    if predicts > 0:\n        pred_v, pred_logit, pred_predict_logit = model._forward(x, return_logit=True)\n    else:\n        pred_v, pred_logit, *_ = model._forward(x, return_logit=True)\n\n    pi_mask = pi_mask.view(pred_logit.shape);\n    pred_logit = pred_logit * pi_mask - 400 * (1 - pi_mask)\n    if predicts > 0:\n        predict_pi_err = (F.mse_loss(pred_predict_logit, predict_pi, reduction=\"none\") * predict_pi_mask).flatten(2).sum(2).flatten(1).mean(1)\n\n    v_err = F.mse_loss(pred_v, v, reduction=\"none\").squeeze(1)\n    pred_log_pi = nn.functional.log_softmax(pred_logit.flatten(1), dim=1).view_as(pred_logit) * pi_mask\n    pi_err = -(pred_log_pi * pi).sum(1)\n\n    err = v_err * 1.5 + pi_err + (predict_pi_err * 0.1 if predicts > 0 else 0)\n\n    return err.mean(), v_err.detach().mean(), pi_err.detach().mean(), (predict_pi_err.detach().mean() if predicts > 0 else None)\n\n\n"
  },
  {
    "path": "pypolygames/model_zoo/nano_conv_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass NanoConvLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\"c_prime\", \"h_prime\", \"w_prime\", \"net\"]\n\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        net = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        if bn or bn_affine:\n            net.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n        self.net = nn.Sequential(*net)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        h = F.relu(self.net(x))\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h).flatten(1)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/nano_fc_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass NanoFCLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\"c_prime\", \"h_prime\", \"w_prime\", \"net\"]\n\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Connect4\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        net = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        if bn or bn_affine:\n            net.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n        self.net = nn.Sequential(*net)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Linear(int(nnsize * c) * h * w, c_prime * h_prime * w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        h = F.relu(self.net(x))\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h.flatten(1))\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n\n"
  },
  {
    "path": "pypolygames/model_zoo/res_conv_conv_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass ResConvConvLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"resnets\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nb resnets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nb layers per resnet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n\n        resnet_list = []\n        for i in range(nb_nets):\n            nets = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            resnet_list.append(nets)\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine),\n            )\n            for i in range(nb_nets):\n                for j in range(nb_layers_per_net):\n                    resnet_list[i][j] = nn.Sequential(\n                        resnet_list[i][j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n        for i in range(nb_nets):\n            resnet_list[i] = nn.ModuleList(resnet_list[i])\n        self.mono = nn.Sequential(*mono)\n        self.resnets = nn.ModuleList(resnet_list)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        previous_block = self.mono(x)  # linear transformation only\n        for resnet in self.resnets:\n            sublayer_no = 0\n            h = F.relu(previous_block)  # initial activation\n            for net in resnet:\n                if sublayer_no < self.nb_layers_per_net - 1:\n                    h = F.relu(net(h))\n                else:\n                    h = net(h) + previous_block  #  linear transformation only\n                    previous_block = h\n                sublayer_no = sublayer_no + 1\n        h = F.relu(previous_block)  # final activation\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h).flatten(1)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit, 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/res_conv_conv_logit_pool_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass ResConvConvLogitPoolModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"resnets\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nb resnets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nb layers per resnet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        batchnorm_momentum = model_params.batchnorm_momentum\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n\n        resnet_list = []\n        for i in range(nb_nets):\n            nets = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine, momentum=batchnorm_momentum\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            resnet_list.append(nets)\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine, momentum=batchnorm_momentum),\n            )\n            for i in range(nb_nets):\n                for j in range(nb_layers_per_net):\n                    resnet_list[i][j] = nn.Sequential(\n                        resnet_list[i][j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n        for i in range(nb_nets):\n            resnet_list[i] = nn.ModuleList(resnet_list[i])\n        self.mono = nn.Sequential(*mono)\n        self.resnets = nn.ModuleList(resnet_list)\n        self.v = nn.Linear(2 * int(nnsize * c), 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        previous_block = self.mono(x)  # linear transformation only\n        for resnet in self.resnets:\n            sublayer_no = 0\n            h = F.relu(previous_block)  # initial activation\n            for net in resnet:\n                if sublayer_no < self.nb_layers_per_net - 1:\n                    h = F.relu(net(h))\n                else:\n                    h = net(h) + previous_block  #  linear transformation only\n                    previous_block = h\n                sublayer_no = sublayer_no + 1\n        h = F.relu(previous_block)  # final activation\n        pool = torch.cat((F.adaptive_max_pool2d(h, 1), F.adaptive_avg_pool2d(h, 1)), 1)\n        v = torch.tanh(self.v(pool.flatten(1)))\n        pi_logit = self.pi_logit(h).flatten(1)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit, 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, x.size(2), x.size(3))\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/res_conv_conv_logit_pool_model_v2.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom typing import Tuple\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass ResConvConvLogitPoolModelV2(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"resnets\",\n        \"global_pooling\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        r_c, r_h, r_w = info[\"raw_feature_size\"]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nb resnets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nb layers per resnet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n\n        self.global_pooling = model_params.global_pooling\n        if model_params.activation_function == \"relu\":\n          self.af = F.relu\n        elif model_params.activation_function == \"gelu\":\n          self.af = F.gelu\n        elif model_params.activation_function == \"celu\":\n          self.af = F.celu\n        else:\n          raise RuntimeError(\"Unknown activation function\")\n        batchnorm_momentum = model_params.batchnorm_momentum\n\n        self.predict_end_state = game_params.predict_end_state\n        self.predict_n_states = game_params.predict_n_states\n\n        self.predicts = self.predict_n_states + (2 if self.predict_end_state else 0)\n\n        print(\"global pooling \", self.global_pooling)\n        print(\"af \", model_params.activation_function)\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n\n        resnet_list = []\n        for i in range(nb_nets):\n            nets = [\n                nn.Conv2d(\n                    int(nnsize * c) + int(nnsize * c * (self.global_pooling if _ == 0 else 0)) * 2,\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine, momentum=batchnorm_momentum\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            resnet_list.append(nets)\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine, momentum=batchnorm_momentum),\n            )\n        for i in range(nb_nets):\n            resnet_list[i] = nn.ModuleList(resnet_list[i])\n        self.mono = nn.Sequential(*mono)\n        self.resnets = nn.ModuleList(resnet_list)\n        self.v = nn.Linear(2 * int(nnsize * c), 2 * int(nnsize * c))\n        self.v2 = nn.Linear(2 * int(nnsize * c), 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n        if self.predicts > 0:\n          self.predict_pi_logit = nn.Conv2d(\n              int(nnsize * c), r_c * self.predicts, nnks, stride=stride, padding=padding, dilation=dilation\n          )\n        else:\n          self.predict_pi_logit = None\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        af = self.af\n        global_pooling = self.global_pooling\n        previous_block = self.mono(x)  # linear transformation only\n        for resnet in self.resnets:\n            sublayer_no = 0\n            h = previous_block\n            if global_pooling > 0:\n                hpart = h.narrow(1, 0, int(h.size(1) * global_pooling))\n                h = torch.cat((h, F.adaptive_max_pool2d(hpart, 1).expand_as(hpart), F.adaptive_avg_pool2d(hpart, 1).expand_as(hpart)), 1)\n            h = af(h)  # initial activation\n            for net in resnet:\n                if sublayer_no < self.nb_layers_per_net - 1:\n                    h = af(net(h))\n                else:\n                    h = net(h) + previous_block  #  linear transformation only\n                    previous_block = h\n                sublayer_no = sublayer_no + 1\n        h = af(previous_block)  # final activation\n        pool = torch.cat((F.adaptive_max_pool2d(h, 1), F.adaptive_avg_pool2d(h, 1)), 1)\n        pi_logit = self.pi_logit(h).flatten(1)\n        v = af(self.v(pool.flatten(1)))\n        v = torch.tanh(self.v2(v))\n        if return_logit:\n            if self.predict_pi_logit is not None:\n                predict_pi_logit = self.predict_pi_logit(h)\n                return v, pi_logit, predict_pi_logit\n            else:\n                return v, pi_logit, torch.empty(0)\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.float(), 1).reshape(s)\n        return v, pi, torch.empty(0)\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit, _ = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, x.size(2), x.size(3))\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/res_conv_fc_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass ResConvFCLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"resnets\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Connect4\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # nb resnets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        # nb layers per resnet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n\n        resnet_list = []\n        for i in range(nb_nets):\n            nets = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets[j] = nn.Sequential(\n                        nets[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            resnet_list.append(nets)\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine),\n            )\n            for i in range(nb_nets):\n                for j in range(nb_layers_per_net):\n                    resnet_list[i][j] = nn.Sequential(\n                        resnet_list[i][j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n        for i in range(nb_nets):\n            resnet_list[i] = nn.ModuleList(resnet_list[i])\n        self.mono = nn.Sequential(*mono)\n        self.resnets = nn.ModuleList(resnet_list)\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Linear(int(nnsize * c) * h * w, c_prime * h_prime * w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        previous_block = self.mono(x)  # linear transformation only\n        for resnet in self.resnets:\n            sublayer_no = 0\n            h = F.relu(previous_block)  # initial activation\n            for net in resnet:\n                if sublayer_no < self.nb_layers_per_net - 1:\n                    h = F.relu(net(h))\n                else:\n                    h = net(h) + previous_block  #  linear transformation only\n                    previous_block = h\n                sublayer_no = sublayer_no + 1\n        h = F.relu(previous_block)  # final activation\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h.flatten(1))\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit, 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/u_conv_conv_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass UConvConvLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"nb_unets_div_by_2\",\n        \"unets\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Hex13\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n        if h_prime != h or w_prime != w:\n            raise RuntimeError(\n                f'The game \"{self.game_name}\" is not eligible to a conv-computed logit '\n                f'model such as \"{self.__class__.__name__}\" - try with '\n                f'\"{self.__class__.__name__.replace(\"ConvLogit\", \"FCLogit\")}\" instead'\n            )\n\n        # nb unets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        if nb_nets % 2 == 0:\n            raise RuntimeError(\n                f'The model \"{self.__class__.__name__}\" accepts only odd numbers '\n                f'for \"nb_nets\" while it was set to {nb_nets}'\n            )\n        self.nb_unets_div_by_2 = nb_unets_div_by_2 = nb_nets // 2\n        # nb layers per unet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n        self.mono = nn.Sequential(*mono)\n\n        unet_list = [None] * (2 * nb_unets_div_by_2 + 1)\n        for i in range(nb_unets_div_by_2):\n            nets1 = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            nets2 = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets1[j] = nn.Sequential(\n                        nets1[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n                    nets2[j] = nn.Sequential(\n                        nets2[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets1[j] = nn.Sequential(\n                        nets1[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n                    nets2[j] = nn.Sequential(\n                        nets2[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            unet_list[i] = nn.ModuleList(nets1)\n            unet_list[-i - 1] = nn.ModuleList(nets2)\n        middle_nets = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n            for _ in range(nb_layers_per_net)\n        ]\n        if bn or bn_affine:\n            for j in range(nb_layers_per_net):\n                middle_nets[j] = nn.Sequential(\n                    middle_nets[j],\n                    nn.BatchNorm2d(\n                        int(nnsize * c), track_running_stats=True, affine=bn_affine\n                    ),\n                )\n        if pooling:\n            for j in range(nb_layers_per_net):\n                middle_nets[j] = nn.Sequential(\n                    middle_nets[j],\n                    nn.MaxPool2d(\n                        kernel_size=nnks,\n                        padding=padding,\n                        stride=stride,\n                        dilation=dilation,\n                    ),\n                )\n        unet_list[nb_unets_div_by_2] = nn.ModuleList(middle_nets)\n        self.unets = nn.ModuleList(unet_list)\n\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Conv2d(\n            int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation\n        )\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        h = self.mono(x)  # linear transformation only\n        saved_h = [\n            h\n        ] * self.nb_unets_div_by_2  # saves output of last linear transformation\n        layer_no = 0\n        for unet in self.unets:\n            sublayer_no = 0\n            for net in unet:\n                h = F.relu(h)  # activation on previous block\n                h = net(h)\n                if sublayer_no == self.nb_layers_per_net - 1:\n                    if layer_no < self.nb_unets_div_by_2:\n                        saved_h[layer_no] = h\n                    elif layer_no > self.nb_unets_div_by_2:\n                        h = h + saved_h[layer_no - self.nb_unets_div_by_2 - 1]\n                sublayer_no = sublayer_no + 1\n            layer_no = layer_no + 1\n        h = F.relu(h)  # final activation\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h).flatten(1)\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/u_conv_fc_logit_model.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass UConvFCLogitModel(torch.jit.ScriptModule):\n    __constants__ = [\n        \"c_prime\",\n        \"h_prime\",\n        \"w_prime\",\n        \"nb_layers_per_net\",\n        \"mono\",\n        \"nb_unets_div_by_2\",\n        \"unets\",\n    ]\n\n    DEFAULT_NB_NETS = 5\n    DEFAULT_NB_LAYERS_PER_NET = 3\n    DEFAULT_NNSIZE = 2\n    DEFAULT_NNKS = 3\n    DEFAULT_STRIDE = 1\n    DEFAULT_DILATION = 1\n    DEFAULT_POOLING = False\n    DEFAULT_BN = False\n    # DEFAULT_BN_AFFINE = False\n\n    default_game_name = \"Connect4\"\n\n    def __init__(self, game_params: GameParams, model_params: ModelParams):\n        torch.jit.ScriptModule.__init__(self)\n        if game_params.game_name is None:\n            game_params.game_name = self.__class__.default_game_name\n        self.game_name = game_params.game_name\n        self.game_params = game_params\n        info = zutils.get_game_info(game_params)\n        c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n        c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n            \"action_size\"\n        ][:3]\n\n        # nb unets\n        if model_params.nb_nets is None:\n            model_params.nb_nets = self.DEFAULT_NB_NETS\n        nb_nets = model_params.nb_nets\n        if nb_nets % 2 == 0:\n            raise RuntimeError(\n                f'The model \"{self.__class__.__name__}\" accepts only odd numbers '\n                f'for \"nb_nets\" while it was set to {nb_nets}'\n            )\n        self.nb_unets_div_by_2 = nb_unets_div_by_2 = nb_nets // 2\n        # nb layers per unet\n        if model_params.nb_layers_per_net is None:\n            model_params.nb_layers_per_net = self.DEFAULT_NB_LAYERS_PER_NET\n        nb_layers_per_net = model_params.nb_layers_per_net\n        self.nb_layers_per_net = nb_layers_per_net\n        # nn size\n        if model_params.nnsize is None:\n            model_params.nnsize = self.DEFAULT_NNSIZE\n        nnsize = model_params.nnsize\n        # kernel size\n        if model_params.nnks is None:\n            model_params.nnks = self.DEFAULT_NNKS\n        nnks = model_params.nnks\n        # stride\n        stride = self.DEFAULT_STRIDE\n        # dilation\n        dilation = self.DEFAULT_DILATION\n        # padding\n        padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n        # pooling\n        if model_params.pooling is None:\n            model_params.pooling = self.DEFAULT_POOLING\n        pooling = model_params.pooling\n        # batch norm\n        if model_params.bn is None:\n            model_params.bn = self.DEFAULT_BN\n        bn = model_params.bn\n        # # batch norm affine\n        # if model_params.bn_affine is None:\n        #     model_params.bn_affine = self.DEFAULT_BN_AFFINE\n        # bn_affine = model_params.bn_affine\n        bn_affine = bn\n        self.model_params = model_params\n\n        mono = [\n            nn.Conv2d(\n                c,\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n        ]\n        if bn or bn_affine:\n            mono.append(\n                nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n            )\n        self.mono = nn.Sequential(*mono)\n\n        unet_list = [None] * (2 * nb_unets_div_by_2 + 1)\n        for i in range(nb_unets_div_by_2):\n            nets1 = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            nets2 = [\n                nn.Conv2d(\n                    int(nnsize * c),\n                    int(nnsize * c),\n                    nnks,\n                    stride=stride,\n                    padding=padding,\n                    dilation=dilation,\n                    bias=not bn_affine,\n                )\n                for _ in range(nb_layers_per_net)\n            ]\n            if bn or bn_affine:\n                for j in range(nb_layers_per_net):\n                    nets1[j] = nn.Sequential(\n                        nets1[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n                    nets2[j] = nn.Sequential(\n                        nets2[j],\n                        nn.BatchNorm2d(\n                            int(nnsize * c), track_running_stats=True, affine=bn_affine\n                        ),\n                    )\n            if pooling:\n                for j in range(nb_layers_per_net):\n                    nets1[j] = nn.Sequential(\n                        nets1[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n                    nets2[j] = nn.Sequential(\n                        nets2[j],\n                        nn.MaxPool2d(\n                            kernel_size=nnks,\n                            padding=padding,\n                            stride=stride,\n                            dilation=dilation,\n                        ),\n                    )\n            unet_list[i] = nn.ModuleList(nets1)\n            unet_list[-i - 1] = nn.ModuleList(nets2)\n        middle_nets = [\n            nn.Conv2d(\n                int(nnsize * c),\n                int(nnsize * c),\n                nnks,\n                stride=stride,\n                padding=padding,\n                dilation=dilation,\n                bias=not bn_affine,\n            )\n            for _ in range(nb_layers_per_net)\n        ]\n        if bn or bn_affine:\n            for j in range(nb_layers_per_net):\n                middle_nets[j] = nn.Sequential(\n                    middle_nets[j],\n                    nn.BatchNorm2d(\n                        int(nnsize * c), track_running_stats=True, affine=bn_affine\n                    ),\n                )\n        if pooling:\n            for j in range(nb_layers_per_net):\n                middle_nets[j] = nn.Sequential(\n                    middle_nets[j],\n                    nn.MaxPool2d(\n                        kernel_size=nnks,\n                        padding=padding,\n                        stride=stride,\n                        dilation=dilation,\n                    ),\n                )\n        unet_list[nb_unets_div_by_2] = nn.ModuleList(middle_nets)\n        self.unets = nn.ModuleList(unet_list)\n\n        self.v = nn.Linear(int(nnsize * c) * h * w, 1)\n        self.pi_logit = nn.Linear(int(nnsize * c) * h * w, c_prime * h_prime * w_prime)\n\n    @torch.jit.script_method\n    def _forward(self, x: torch.Tensor, return_logit: bool):\n        h = self.mono(x)  # linear transformation only\n        saved_h = [\n            h\n        ] * self.nb_unets_div_by_2  # saves output of last linear transformation\n        layer_no = 0\n        for unet in self.unets:\n            sublayer_no = 0\n            for net in unet:\n                h = F.relu(h)  # activation on previous block\n                h = net(h)\n                if sublayer_no == self.nb_layers_per_net - 1:\n                    if layer_no < self.nb_unets_div_by_2:\n                        saved_h[layer_no] = h\n                    elif layer_no > self.nb_unets_div_by_2:\n                        h = h + saved_h[layer_no - self.nb_unets_div_by_2 - 1]\n                sublayer_no = sublayer_no + 1\n            layer_no = layer_no + 1\n        h = F.relu(h)  # final activation\n        v = torch.tanh(self.v(h.flatten(1)))\n        pi_logit = self.pi_logit(h.flatten(1))\n        if return_logit:\n            return v, pi_logit\n        s = pi_logit.shape\n        pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n        return v, pi\n\n    @torch.jit.script_method\n    def forward(self, x: torch.Tensor):\n        v, pi_logit = self._forward(x, True)\n        pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n        reply = {\"v\": v, \"pi_logit\": pi_logit}\n        return reply\n"
  },
  {
    "path": "pypolygames/model_zoo/utils.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Dict\nfrom .. import params\nfrom .. import env_creation_helpers\n\nimport torch\n\n\nMODELS: Dict[str, torch.jit.ScriptModule] = {}\n\n\ndef register_model(cls):\n    MODELS[cls.__name__] = cls\n    return cls\n\n\ndef get_game_info(game_params: params) -> Dict[str, list]:\n    game = env_creation_helpers.create_game(\n        game_params=game_params,\n        num_episode=-1,\n        seed=0,\n        eval_mode=False,\n        per_thread_batchsize=0,\n    )\n    info = {\"feature_size\": game.get_feat_size(), \"action_size\": game.get_action_size()}\n    info[\"raw_feature_size\"] = game.get_raw_feat_size()\n    return info\n\n\ndef get_consistent_padding_from_nnks(nnks: int, dilation: int = 1) -> int:\n    # the params are such than the output layer of a Conv2d is the same\n    # size as the input layer assuming the stride is one\n    padding = dilation * (nnks - 1) / 2\n    if padding != int(padding):\n        raise ValueError(\n            \"The values of nnks, padding and dilation must be integers \"\n            \"such as 2 * padding == dilation * (nnks - 1) - \"\n            f\"nnks={nnks} dilation={dilation} padding={padding} - \"\n            \"for default values for dilation and padding, nnks should be even\"\n        )\n    return int(padding)\n"
  },
  {
    "path": "pypolygames/params.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import OrderedDict\nfrom dataclasses import dataclass, field\nimport os\nfrom pathlib import Path\nfrom typing import Iterator, Tuple, Union, List, Optional, Dict, Any\n\nfrom .weight_init import WEIGHT_INIT\n\ndef boolarg(x):\n  if str(x).lower() in [\"true\", \"yes\", \"on\", \"1\", \"y\"]:\n    return True\n  if str(x).lower() in [\"false\", \"no\", \"off\", \"0\", \"n\"]:\n    return False\n  raise RuntimeError(\"Unknown bool value \" + str(x))\n\n@dataclass\nclass ArgFields:\n    name: Optional[str] = None\n    opts: Optional[Dict[str, Any]] = None\n\n\n@dataclass\nclass GameParams:\n    game_name: Optional[str] = None\n    game_options: List[str] = None\n    out_features: bool = False\n    turn_features: bool = False\n    turn_features_mc: bool = False\n    geometric_features: bool = False\n    random_features: int = 0\n    one_feature: bool = False\n    history: int = 0\n    predict_end_state: bool = False\n    predict_n_states: int = 0\n    player: str = \"mcts\"\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            value = getattr(self, attr)\n        super().__setattr__(attr, value)\n\n    def __eq__(self, other_game_params):\n        return all(\n            getattr(self, field) == getattr(other_game_params, field)\n            for field in {\n                \"game_name\",\n                \"game_options\",\n                \"out_features\",\n                \"turn_features\",\n                \"turn_features_mc\",\n                \"geometric_features\",\n                \"one_feature\",\n                \"history\",\n                \"predict_end_state\",\n                \"predict_n_states\",\n                \"player\",\n            }\n        )\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            game_name=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Game name - if left unspecified it will default to the game \"\n                    \"that the model selected with '--model_name' refers to as default\",\n                )\n            ),\n            game_options=ArgFields(\n                opts=dict(\n                    type=str,\n                    nargs=\"*\",\n                    default=None,\n                    help=\"Optional list of extra options to customise the game.\",\n                )\n            ),\n            out_features=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.out_features else \"store_true\",\n                    help=\"If set, the input to the NN includes a channel \"\n                    \"with 1 on the frontier\",\n                )\n            ),\n            turn_features=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.turn_features else \"store_true\",\n                    help=\"If set, the input to the NN includes a channel \"\n                    \"with the player index broadcasted\",\n                )\n            ),\n            turn_features_mc=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.turn_features_mc else \"store_true\",\n                    help=\"If set, the input to the NN includes one channel \"\n                    \"for each player (color), with the one corresponding to the\"\n                    \" current player set to 1 and the others set to 0\",\n                )\n            ),\n            geometric_features=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.geometric_features else \"store_true\",\n                    help=\"If set, the input to the NN includes \"\n                    \"4 geometric channels representing the position on the board\",\n                )\n            ),\n            random_features=ArgFields(\n                opts=dict(type=int, help=\"Number of random features the input includes\")\n            ),\n            one_feature=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.one_feature else \"store_true\",\n                    help=\"If set, the input to the NN includes \"\n                    \"a channel with 1 everywhere\",\n                )\n            ),\n            history=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of last steps whose representation is \"\n                    \"added in the featurization\",\n                )\n            ),\n            predict_end_state=ArgFields(\n                opts=dict(\n                    type=boolarg,\n                    help=\"Side learning: predict end state\",\n                )\n            ),\n            predict_n_states=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Side learning: predict N next game states\",\n                )\n            ),\n            player=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Type of player to use. One of: mcts, forward\",\n                )\n            )\n        )\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\"help\"] += f\" (DEFAULT: {getattr(cls(), param)})\"\n            yield param, arg_field\n\n\n@dataclass\nclass ModelParams:\n    \"\"\"Model parameters - all set to 'None' as they have sensible default\n    specified in their definitions\"\"\"\n\n    init_checkpoint: Path = None\n    pure_mcts: bool = False\n    model_name: str = None\n    nb_nets: int = None\n    nb_layers_per_net: int = None\n    nnsize: float = None\n    fcsize: int = None\n    nnks: int = None\n    pooling: bool = False\n    bn: bool = False\n    # bn_affine: bool = False\n    init_method: str = next(iter(WEIGHT_INIT))\n    activation_function: str = \"relu\"\n    global_pooling: float = 0\n    batchnorm_momentum: float = 0.01\n    rnn_interval: float = 0\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            value = getattr(self, attr)\n        super().__setattr__(attr, value)\n\n    def __post_init__(self):\n        if self.init_checkpoint is not None:\n            if self.pure_mcts:\n                raise ValueError(\n                    \"The MCTS can be either assisted with a \"\n                    \"'--init_checkpoint' neural network or be a '--pure_mcts'\"\n                )\n            self.init_checkpoint = self.init_checkpoint.absolute()\n        # if self.bn and self.bn_affine:\n        #     raise ValueError(\n        #         \"At most one of the options '--bn' and '--bn_affine' can be selected\"\n        #     )\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            init_checkpoint=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Path to pretrained model (a checkpoint), in case of a \"\n                    \"simulation or for fine-tuning - if specified the game parameters \"\n                    \"and model parameters should be left unspecified as the pretrained \"\n                    \"model contains them\",\n                )\n            ),\n            pure_mcts=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.pure_mcts else \"store_true\",\n                    help=\"If set, the inference will be done with MCTS only \"\n                    \"- no Neural Network\",\n                )\n            ),\n            model_name=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Model name - if left unspecified \"\n                    \"it will default to a generic model\",\n                )\n            ),\n            nb_nets=ArgFields(\n                opts=dict(type=int, help=\"Number of subnets, when applicable\")\n            ),\n            nb_layers_per_net=ArgFields(\n                opts=dict(type=int, help=\"Number of layer per subnets, when applicable\")\n            ),\n            nnsize=ArgFields(\n                opts=dict(\n                    type=float, help=\"Number of units per hidden layer, when applicable\"\n                )\n            ),\n            fcsize=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Size of final full-connected layers, when applicable\",\n                )\n            ),\n            nnks=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Kernel size for convolutional layers, when applicable \"\n                    \"- dilation and stride are set to one, \"\n                    \"so it must be an even number\",\n                )\n            ),\n            pooling=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.pooling else \"store_true\",\n                    help=\"If set, adds pooling layers following convolutional layers, \"\n                    \"when applicable\",\n                )\n            ),\n            bn=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.bn else \"store_true\",\n                    # help=\"If set, adds batch normalisation with \"\n                    # \"no learnable affine parameters\",\n                    help=\"If set, adds batch normalisation with \"\n                    \"learnable affine parameters\",\n                )\n            ),\n            # bn_affine=ArgFields(\n            #     opts=dict(\n            #         action=\"store_false\" if cls.bn_affine else \"store_true\",\n            #         help=\"If set, adds batch normalisation with \"\n            #         \"learnable affine parameters\",\n            #     )\n            # ),\n            init_method=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Weight initialisation method\",\n                    choices=list(WEIGHT_INIT),\n                )\n            ),\n            activation_function=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Activation function to use\",\n                )\n            ),\n            global_pooling=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"Global pooling - this will, for the models that support it, \"\n                    \"add global pooling over some channels after convolutional layers. \"\n                    \"The parameter is the proportion of the channels that should be pooled. \"\n                    \"Eg. 0.1 will specify that we should pool 10% of the channels\"\n                )\n            ),\n            batchnorm_momentum=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"Batch normalization momentum\",\n                )\n            ),\n            rnn_interval=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"RNN layer every this many CNN layers\",\n                )\n            ),\n        )\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\"help\"] += f\" (DEFAULT: {getattr(cls(), param)})\"\n            yield param, arg_field\n\n\n@dataclass\nclass OptimParams:\n    num_epoch: int = 10_000_000  # basically infinity\n    epoch_len: int = 1000\n    batchsize: int = 128\n    lr: float = 1e-3\n    eps: float = 1.5e-4\n    grad_clip: float = 0.25\n    reset_optimizer_state: bool = False\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            value = getattr(self, attr)\n        super().__setattr__(attr, value)\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            num_epoch=ArgFields(opts=dict(type=int, help=f\"Number of epochs\")),\n            epoch_len=ArgFields(\n                opts=dict(type=int, help=f\"Number of train batches per epoch\")\n            ),\n            batchsize=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of training examples in a mini-batch (train batch)\"\n                    \"- also the batchsize in GPU (when enabled) for training\",\n                )\n            ),\n            lr=ArgFields(opts=dict(type=float, default=cls.lr, help=f\"Learning rate\")),\n            eps=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"Term added to the denominator to improve \"\n                    \"numerical stability \",\n                )\n            ),\n            grad_clip=ArgFields(\n                opts=dict(type=float, help=f\"Max norm of the gradients\")\n            ),\n            reset_optimizer_state=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.reset_optimizer_state else \"store_true\",\n                    help=\"If set, any internal state of optimizer from checkpoint will be reset\"\n                )\n            ),\n        )\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\"help\"] += f\" (DEFAULT: {getattr(cls(), param)})\"\n            yield param, arg_field\n\n\n@dataclass\nclass SimulationParams:\n    num_game: int = 2\n    num_threads: int = 0\n    num_actor: int = 1  # should be 1 at training time\n    num_rollouts: int = 1600\n    replay_capacity: int = 1_000_000\n    replay_warmup: int = 10_000\n    sync_period: int = 100\n    act_batchsize: int = 1\n    per_thread_batchsize: int = 0\n    bsfinder_max_bs: int = 10240\n    bsfinder_max_ms: float = 100\n    rewind: int = 0\n    randomized_rollouts: bool = False\n    sampling_mcts: bool = False\n    sample_before_step_idx: int = 30\n    train_channel_timeout_ms: int = 1000\n    train_channel_num_slots: int = 10000\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            value = getattr(self, attr)\n        super().__setattr__(attr, value)\n\n    def __post_init__(self) -> None:\n        if self.per_thread_batchsize == 0 and self.act_batchsize > self.num_game:\n            raise ValueError(\"'act_batchsize' cannot be larger than 'num_games'\")\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            num_game=ArgFields(\n                opts=dict(type=int, help=f\"Number of game-running threads\")\n            ),\n            num_threads=ArgFields(\n                opts=dict(type=int, help=f\"Number of async threads\")\n            ),\n            num_actor=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=f\"Number of actors per non-human player, \"\n                    \"one actor being one thread doing MCTS \"\n                    \"- the more num_actor, the larger the MCTS\",\n                )\n            ),\n            num_rollouts=ArgFields(\n                opts=dict(type=int, help=\"Number of rollouts per actor/thread\")\n            ),\n            replay_capacity=ArgFields(\n                opts=dict(\n                    type=int, help=\"Nb of act_batches the replay buffer can contain\"\n                )\n            ),\n            replay_warmup=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Nb of act_batches the replay buffer needs to buffer \"\n                    \"before the training can start\",\n                )\n            ),\n            sync_period=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of epochs between two consecutive sync \"\n                    \"between the model and the assembler\",\n                )\n            ),\n            act_batchsize=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"When '--per_thread_batchsize' is not set, \"\n                    \"number or requests batched together for inference\",\n                )\n            ),\n            per_thread_batchsize=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"When non-zero, \"\n                    \"number of games per game-running thread, \"\n                    \"batched together for inference (see '--act_batchsize'). \"\n                    \"This parameter will be automatically tuned if it is <= 0\",\n                )\n            ),\n            bsfinder_max_bs=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"The maximum batch size for the automatic batch size \"\n                    \"finder to use\",\n                )\n            ),\n            bsfinder_max_ms=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"The maximum time in milliseconds for a batch size \"\n                    \"found by the automatic batch size finder to use\",\n                )\n            ),\n            rewind=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Use rewind feature for training; number of times to rewind\",\n                )\n            ),\n            randomized_rollouts=ArgFields(\n                opts=dict(\n                    type=boolarg,\n                    help=\"Enable randomized rollouts\",\n                )\n            ),\n            sampling_mcts=ArgFields(\n                opts=dict(\n                    type=boolarg,\n                    help=\"Use sampling MCTS\",\n                )\n            ),\n            sample_before_step_idx=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Before this many steps in the game, sample over moves instead \"\n                    \" of always selecting the best move\",\n                )\n            ),\n            train_channel_timeout_ms=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Timeout (in milliseconds) to wait for actors to produce \"\n                    \"trajectories\",\n                )\n            ),\n            train_channel_num_slots=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of slots in train channel used to send trajectories\",\n                )\n            ),\n        )\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\"help\"] += f\" (DEFAULT: {getattr(cls(), param)})\"\n            yield param, arg_field\n\n\n@dataclass\nclass ExecutionParams:\n    checkpoint_dir: Path = None\n    save_dir: str = None  # keep for deprecation warning\n    save_uncompressed: bool = True\n    do_not_save_replay_buffer: bool = False\n    saving_period: int = 100\n    max_time: Optional[int] = None\n    human_first: bool = False\n    time_ratio: float = 0.035\n    total_time: float = 0\n    devices: List[str] = field(default_factory=lambda: [\"cuda:0\"])\n    seed: int = 1\n    listen: str = \"\"\n    connect: str = \"\"\n    opponent_model_path: Path = None\n    tournament_mode: bool = False\n    rnn_seqlen: int = 0\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            try:\n                value = getattr(self, attr)\n            except AttributeError:\n                value = getattr(type(self)(), attr)\n        super().__setattr__(attr, value)\n\n    def __post_init__(self) -> None:\n        if self.checkpoint_dir is not None:\n            self.checkpoint_dir = self.checkpoint_dir.resolve().absolute()\n        if self.save_dir is not None:\n            raise RuntimeError(\"\"\"--save_dir is deprecated, use --checkpoint_dir instead, with slightly different behavior:\n    - no subfolder creation.\n    - resumes from latest checkpoint if available in the directory.\"\"\")\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            checkpoint_dir=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Directory for saving checkpoints. \"\n                         \"If the directory is not empty, the latest checkpoint will be resumed\",\n                )\n            ),\n            save_dir=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Deprecated, use checkpoint_dir with slightly different behavior instead\"\n                )\n            ),\n            save_uncompressed=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.save_uncompressed else \"store_true\",\n                    help=\"If set, saved checkpoints will be saved uncompressed\",\n                )\n            ),\n            do_not_save_replay_buffer=ArgFields(\n                opts=dict(\n                    action=\"store_false\"\n                    if cls.do_not_save_replay_buffer\n                    else \"store_true\",\n                    help=\"If set, the replay buffer will be not saved \"\n                    \"in the checkpoint\",\n                )\n            ),\n            saving_period=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of epochs between two consecutive checkpoints\",\n                )\n            ),\n            max_time=ArgFields(\n                opts=dict(type=int, help=\"Maximum time allowed for a run (in seconds)\")\n            ),\n            human_first=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.human_first else \"store_true\",\n                    help=\"If set in a two-player game, \" \"the human player plays first\",\n                )\n            ),\n            time_ratio=ArgFields(\n                opts=dict(\n                    type=float, help=\"Part of the remaining time for the next move\"\n                )\n            ),\n            total_time=ArgFields(\n                opts=dict(\n                    type=float,\n                    help=\"Total time in seconds for the entire game for one player\",\n                )\n            ),\n            devices=ArgFields(\n                opts=dict(\n                    type=str,\n                    nargs=\"*\",\n                    help=\"List of torch devices where the computation for the model\"\n                    \"will happen \"\n                    '(e.g., \"cpu\", \"cuda:0\") '\n                    \"- in training mode, only one device is allowed\",\n                )\n            ),\n            seed=ArgFields(\n                opts=dict(type=int, help=\"Seed for pseudo-random number generator\")\n            ),\n            listen=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Listen for distributed training, eg. tcp://0.0.0.0:5611\",\n                )\n            ),\n            connect=ArgFields(\n                opts=dict(\n                    type=str,\n                    help=\"Connect to hostname for distributed training, eg. tcp://127.0.0.1:5611\",\n                )\n            ),\n            opponent_model_path=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Load this model as the opponent - will not request model updates\",\n                )\n            ),\n            tournament_mode=ArgFields(\n                opts=dict(\n                    type=boolarg,\n                    help=\"Use tournament mode\",\n                )\n            ),\n            rnn_seqlen=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"RNN sequence length used for training\",\n                )\n            ),\n        )\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\"help\"] += f\" (DEFAULT: {getattr(cls(), param)})\"\n            yield param, arg_field\n\n\n@dataclass\nclass EvalParams:\n    real_time: bool = False\n    checkpoint_dir: Path = None\n    checkpoint: Path = None\n    device_eval: List[str] = field(default_factory=lambda: [\"cuda:0\"])\n    num_game_eval: int = 100\n    num_parallel_games_eval: int = None\n    num_actor_eval: int = 1\n    num_rollouts_eval: int = 400\n    checkpoint_opponent: Path = None\n    device_opponent: List[str] = field(default_factory=lambda: [\"cuda:0\"])\n    num_actor_opponent: int = 1\n    num_rollouts_opponent: int = 2000\n    seed_eval: int = 2\n    plot_enabled: bool = False\n    plot_server: str = \"http://localhost\"\n    plot_port: int = 8097\n    eval_verbosity: int = 1\n\n    def __setattr__(self, attr, value):\n        if value is None:\n            try:\n                value = getattr(self, attr)\n            except AttributeError:\n                # TODO: this part may be buggy (infinite recursion), is it needed?\n                # cannot create with both None for checkpoint_dir and checkpoint\n                defaults = self.__class__(checkpoint_dir=Path(\"blublu\"))\n                if attr != \"checkpoint_dir\":\n                    value = getattr(defaults, attr)\n        super().__setattr__(attr, value)\n\n    def __post_init__(self) -> None:\n        if self.real_time and self.checkpoint is not None:\n            raise ValueError(\n                \"In '--real_time' the evaluation follow the training \"\n                \"so '--checkpoint' should not be set\"\n            )\n        if self.checkpoint_dir is None and self.checkpoint is None:\n            raise ValueError(\n                \"Either a '--checkpoint_dir' or a path to a '--checkpoint' \"\n                \"must be specified\"\n            )\n        if self.checkpoint_dir is not None and self.checkpoint is not None:\n            raise ValueError(\n                \"Either a '--checkpoint_dir' or a path to a '--checkpoint' \"\n                \"must be specified, but not both\"\n            )\n        if self.checkpoint is not None and self.plot_enabled:\n            raise ValueError(\n                \"Plotting is not available if the evaluation is performed \"\n                \"only on one checkpoint\"\n            )\n        if self.checkpoint_dir is not None:\n            self.checkpoint_dir = self.checkpoint_dir.absolute()\n        if self.checkpoint is not None:\n            self.checkpoint = self.checkpoint.absolute()\n\n    @classmethod\n    def arg_fields(cls) -> Iterator[Tuple[str, ArgFields]]:\n        params = OrderedDict(\n            real_time=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.real_time else \"store_true\",\n                    help=\"In 'real_time' the evaluation follows the training \"\n                    \"as it goes, \"\n                    \"taking the last available checkpoints and \"\n                    \"skipping some previous checkpoints \"\n                    \"if they are taking too much time to compute\",\n                )\n            ),\n            checkpoint_dir=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Directory storing the checkpoints \"\n                    \"- if set, '--checkpoint' should not be set\",\n                )\n            ),\n            checkpoint=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Path to the individual checkpoint to be evaluated \"\n                    \"- if set, '--checkpoint_dir' should not be set\",\n                )\n            ),\n            device_eval=ArgFields(\n                opts=dict(\n                    type=str,\n                    nargs=\"*\",\n                    help=\"List of torch devices where the computation for the model\"\n                    \"to be tested will happen \"\n                    '(e.g., \"cpu\", \"cuda:0\")',\n                )\n            ),\n            num_game_eval=ArgFields(\n                opts=dict(\n                    type=int, help=\"Number of games played against a pure MCTS opponent\"\n                )\n            ),\n            num_parallel_games_eval=ArgFields(\n                opts=dict(\n                    type=int, help=\"Number of evaluation games to be played in parallel. \"\n                                   \"If set to None, all games are played in parallel\"\n                )\n            ),\n            num_actor_eval=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of actors per player for the model to be tested, \"\n                    \"one actor being one thread doing MCTS \"\n                    \"- the more num_actor_eval, the larger the MCTS \"\n                    \"- when the model plays against another model as opponent, \"\n                    \"it needs to be set to a number > 1\",\n                )\n            ),\n            num_rollouts_eval=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of rollouts per actor/thread for \"\n                    \"the model to be tested\",\n                )\n            ),\n            checkpoint_opponent=ArgFields(\n                opts=dict(\n                    type=Path,\n                    help=\"Path to the checkpoint the opponent will use as model\"\n                    \" - if not set, the opponent will be a pure MCTS\",\n                )\n            ),\n            device_opponent=ArgFields(\n                opts=dict(\n                    type=str,\n                    nargs=\"*\",\n                    help=\"List of torch devices where the computation for the opponent\"\n                    \"will happen \"\n                    '(e.g., \"cpu\", \"cuda:0\")',\n                )\n            ),\n            num_actor_opponent=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of MCTS actor threads for the opponent\",\n                )\n            ),\n            num_rollouts_opponent=ArgFields(\n                opts=dict(\n                    type=int,\n                    help=\"Number of rollouts per actor/thread for the opponent\",\n                )\n            ),\n            seed_eval=ArgFields(\n                opts=dict(type=int, help=\"Seed for pseudo-random number generator\")\n            ),\n            plot_enabled=ArgFields(\n                opts=dict(\n                    action=\"store_false\" if cls.plot_enabled else \"store_true\",\n                    help=\"If set, visdom plots the evaluation as it is computed\",\n                )\n            ),\n            plot_server=ArgFields(opts=dict(type=str, help=\"Visdom server url\")),\n            plot_port=ArgFields(opts=dict(type=int, help=\"Visdom server port\")),\n            eval_verbosity=ArgFields(opts=dict(type=int, help=\"Verbosity during the evaluation\")),\n        )\n        defaults = cls(checkpoint_dir=Path(\"blublu\"))  # cannot create with both None for checkpoint_dir and checkpoint\n        defaults.checkpoint_dir = None  # revert\n        for param, arg_field in params.items():\n            if arg_field.name is None:\n                arg_field.name = f\"--{param}\"\n            if arg_field.opts is None:\n                arg_field.opts = {}\n            if \"help\" not in arg_field.opts:\n                arg_field.opts[\"help\"] = \"\"\n            arg_field.opts[\n                \"help\"\n            ] += f\" (DEFAULT: {getattr(defaults, param)})\"\n            yield param, arg_field\n\n\nGenericParams = Union[\n    GameParams, ModelParams, OptimParams, SimulationParams, ExecutionParams\n]\n"
  },
  {
    "path": "pypolygames/tests/README.md",
    "content": "\n## how to use test_interactions.py\n\n- remove old ground-truth file: `rm pypolygames/tests/data/Hex11.txt`\n\n- run tests to generate a new ground-truth file: `pytest pypolygames --durations=10 --verbose -x`\n\n- manually check that the generated file is correct: `cat pypolygames/tests/data/Hex11.txt`\n\n- check that all tests pass: `pytest pypolygames --durations=10 --verbose`\n\n"
  },
  {
    "path": "pypolygames/tests/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n"
  },
  {
    "path": "pypolygames/tests/data/BlockGo.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 35, 8, 9\n\nCurrent board:\n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | |x| | | | \n | | | | | | | |x| | | | \n | | | | | | | |x|x| | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n\nLegal Actions:\nAction 0: 0, 3, 3\nAction 1: 4, 3, 3\nAction 2: 8, 3, 3\nAction 3: 8, 2, 3\nAction 4: 8, 3, 2\nAction 5: 8, 2, 2\nAction 6: 12, 3, 3\nAction 7: 12, 2, 3\nAction 8: 12, 1, 3\nAction 9: 12, 0, 3\nAction 10: 13, 3, 3\nAction 11: 13, 3, 2\nAction 12: 13, 3, 1\nAction 13: 13, 3, 0\nAction 14: 16, 3, 3\nAction 15: 16, 2, 3\nAction 16: 16, 2, 2\nAction 17: 16, 1, 2\nAction 18: 17, 3, 3\nAction 19: 17, 3, 2\nAction 20: 17, 4, 2\nAction 21: 17, 4, 1\nAction 22: 20, 3, 3\nAction 23: 20, 2, 3\nAction 24: 20, 2, 4\nAction 25: 20, 1, 4\nAction 26: 21, 3, 3\nAction 27: 21, 3, 2\nAction 28: 21, 2, 2\nAction 29: 21, 2, 1\nAction 30: 24, 3, 3\nAction 31: 24, 2, 3\nAction 32: 24, 1, 3\nAction 33: 24, 2, 2\nAction 34: 25, 3, 3\nAction 35: 25, 3, 2\nAction 36: 25, 3, 1\nAction 37: 25, 4, 2\nAction 38: 26, 3, 3\nAction 39: 26, 4, 3\nAction 40: 26, 5, 3\nAction 41: 26, 4, 4\nAction 42: 27, 3, 3\nAction 43: 27, 3, 4\nAction 44: 27, 3, 5\nAction 45: 27, 2, 4\nAction 46: 28, 3, 3\nAction 47: 28, 2, 3\nAction 48: 28, 1, 3\nAction 49: 28, 1, 2\nAction 50: 29, 3, 3\nAction 51: 29, 3, 2\nAction 52: 29, 3, 1\nAction 53: 29, 4, 1\nAction 54: 30, 3, 3\nAction 55: 30, 4, 3\nAction 56: 30, 5, 3\nAction 57: 30, 5, 4\nAction 58: 31, 3, 3\nAction 59: 31, 3, 4\nAction 60: 31, 3, 5\nAction 61: 31, 2, 5\nAction 62: 32, 3, 3\nAction 63: 32, 2, 3\nAction 64: 32, 1, 3\nAction 65: 32, 3, 2\nAction 66: 33, 3, 3\nAction 67: 33, 3, 2\nAction 68: 33, 3, 1\nAction 69: 33, 4, 3\nAction 70: 34, 3, 3\nAction 71: 34, 4, 3\nAction 72: 34, 5, 3\nAction 73: 34, 3, 4\nAction 74: 35, 3, 3\nAction 75: 35, 3, 4\nAction 76: 35, 3, 5\nAction 77: 35, 2, 3\nAction 78: 0, 9, 3\nAction 79: 4, 9, 3\nAction 80: 8, 9, 3\nAction 81: 8, 8, 3\nAction 82: 8, 9, 2\nAction 83: 8, 8, 2\nAction 84: 12, 9, 3\nAction 85: 12, 8, 3\nAction 86: 12, 7, 3\nAction 87: 12, 6, 3\nAction 88: 13, 9, 3\nAction 89: 13, 9, 2\nAction 90: 13, 9, 1\nAction 91: 13, 9, 0\nAction 92: 16, 9, 3\nAction 93: 16, 8, 3\nAction 94: 16, 8, 2\nAction 95: 16, 7, 2\nAction 96: 17, 9, 3\nAction 97: 17, 9, 2\nAction 98: 17, 10, 2\nAction 99: 17, 10, 1\nAction 100: 20, 9, 3\nAction 101: 20, 8, 3\nAction 102: 20, 8, 4\nAction 103: 20, 7, 4\nAction 104: 21, 9, 3\nAction 105: 21, 9, 2\nAction 106: 21, 8, 2\nAction 107: 21, 8, 1\nAction 108: 24, 9, 3\nAction 109: 24, 8, 3\nAction 110: 24, 7, 3\nAction 111: 24, 8, 2\nAction 112: 25, 9, 3\nAction 113: 25, 9, 2\nAction 114: 25, 9, 1\nAction 115: 25, 10, 2\nAction 116: 26, 9, 3\nAction 117: 26, 10, 3\nAction 118: 26, 11, 3\nAction 119: 26, 10, 4\nAction 120: 27, 9, 3\nAction 121: 27, 9, 4\nAction 122: 27, 9, 5\nAction 123: 27, 8, 4\nAction 124: 28, 9, 3\nAction 125: 28, 8, 3\nAction 126: 28, 7, 3\nAction 127: 28, 7, 2\nAction 128: 29, 9, 3\nAction 129: 29, 9, 2\nAction 130: 29, 9, 1\nAction 131: 29, 10, 1\nAction 132: 30, 9, 3\nAction 133: 30, 10, 3\nAction 134: 30, 11, 3\nAction 135: 30, 11, 4\nAction 136: 31, 9, 3\nAction 137: 31, 9, 4\nAction 138: 31, 9, 5\nAction 139: 31, 8, 5\nAction 140: 32, 9, 3\nAction 141: 32, 8, 3\nAction 142: 32, 7, 3\nAction 143: 32, 9, 2\nAction 144: 33, 9, 3\nAction 145: 33, 9, 2\nAction 146: 33, 9, 1\nAction 147: 33, 10, 3\nAction 148: 34, 9, 3\nAction 149: 34, 10, 3\nAction 150: 34, 11, 3\nAction 151: 34, 9, 4\nAction 152: 35, 9, 3\nAction 153: 35, 9, 4\nAction 154: 35, 9, 5\nAction 155: 35, 8, 3\nAction 156: 0, 3, 9\nAction 157: 4, 3, 9\nAction 158: 8, 3, 9\nAction 159: 8, 2, 9\nAction 160: 8, 3, 8\nAction 161: 8, 2, 8\nAction 162: 12, 3, 9\nAction 163: 12, 2, 9\nAction 164: 12, 1, 9\nAction 165: 12, 0, 9\nAction 166: 13, 3, 9\nAction 167: 13, 3, 8\nAction 168: 13, 3, 7\nAction 169: 13, 3, 6\nAction 170: 16, 3, 9\nAction 171: 16, 2, 9\nAction 172: 16, 2, 8\nAction 173: 16, 1, 8\nAction 174: 17, 3, 9\nAction 175: 17, 3, 8\nAction 176: 17, 4, 8\nAction 177: 17, 4, 7\nAction 178: 20, 3, 9\nAction 179: 20, 2, 9\nAction 180: 20, 2, 10\nAction 181: 20, 1, 10\nAction 182: 21, 3, 9\nAction 183: 21, 3, 8\nAction 184: 21, 2, 8\nAction 185: 21, 2, 7\nAction 186: 24, 3, 9\nAction 187: 24, 2, 9\nAction 188: 24, 1, 9\nAction 189: 24, 2, 8\nAction 190: 25, 3, 9\nAction 191: 25, 3, 8\nAction 192: 25, 3, 7\nAction 193: 25, 4, 8\nAction 194: 26, 3, 9\nAction 195: 26, 4, 9\nAction 196: 26, 5, 9\nAction 197: 26, 4, 10\nAction 198: 27, 3, 9\nAction 199: 27, 3, 10\nAction 200: 27, 3, 11\nAction 201: 27, 2, 10\nAction 202: 28, 3, 9\nAction 203: 28, 2, 9\nAction 204: 28, 1, 9\nAction 205: 28, 1, 8\nAction 206: 29, 3, 9\nAction 207: 29, 3, 8\nAction 208: 29, 3, 7\nAction 209: 29, 4, 7\nAction 210: 30, 3, 9\nAction 211: 30, 4, 9\nAction 212: 30, 5, 9\nAction 213: 30, 5, 10\nAction 214: 31, 3, 9\nAction 215: 31, 3, 10\nAction 216: 31, 3, 11\nAction 217: 31, 2, 11\nAction 218: 32, 3, 9\nAction 219: 32, 2, 9\nAction 220: 32, 1, 9\nAction 221: 32, 3, 8\nAction 222: 33, 3, 9\nAction 223: 33, 3, 8\nAction 224: 33, 3, 7\nAction 225: 33, 4, 9\nAction 226: 34, 3, 9\nAction 227: 34, 4, 9\nAction 228: 34, 5, 9\nAction 229: 34, 3, 10\nAction 230: 35, 3, 9\nAction 231: 35, 3, 10\nAction 232: 35, 3, 11\nAction 233: 35, 2, 9\n\nInput action:  applying action... \n\nLast Action: 31, 2, 11\n\nCurrent board:\n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | |o| | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | | | | | | \n | | | | | | | |x| | | | \n | | | | | | | |x| | | | \n | |x|x| | | | |x|x| | | \n | |x| | | | | | | | | | \n | |x| | | | | | | | | | \n | | | | | | | | | | | | \n\nLegal Actions:\nAction 0: 4, 9, 3\nAction 1: 8, 9, 3\nAction 2: 8, 8, 3\nAction 3: 8, 9, 2\nAction 4: 8, 8, 2\nAction 5: 12, 9, 3\nAction 6: 12, 8, 3\nAction 7: 12, 7, 3\nAction 8: 12, 6, 3\nAction 9: 13, 9, 3\nAction 10: 13, 9, 2\nAction 11: 13, 9, 1\nAction 12: 13, 9, 0\nAction 13: 16, 9, 3\nAction 14: 16, 8, 3\nAction 15: 16, 8, 2\nAction 16: 16, 7, 2\nAction 17: 17, 9, 3\nAction 18: 17, 9, 2\nAction 19: 17, 10, 2\nAction 20: 17, 10, 1\nAction 21: 20, 9, 3\nAction 22: 20, 8, 3\nAction 23: 20, 8, 4\nAction 24: 20, 7, 4\nAction 25: 21, 9, 3\nAction 26: 21, 9, 2\nAction 27: 21, 8, 2\nAction 28: 21, 8, 1\nAction 29: 24, 9, 3\nAction 30: 24, 8, 3\nAction 31: 24, 7, 3\nAction 32: 24, 8, 2\nAction 33: 25, 9, 3\nAction 34: 25, 9, 2\nAction 35: 25, 9, 1\nAction 36: 25, 10, 2\nAction 37: 26, 9, 3\nAction 38: 26, 10, 3\nAction 39: 26, 11, 3\nAction 40: 26, 10, 4\nAction 41: 27, 9, 3\nAction 42: 27, 9, 4\nAction 43: 27, 9, 5\nAction 44: 27, 8, 4\nAction 45: 28, 9, 3\nAction 46: 28, 8, 3\nAction 47: 28, 7, 3\nAction 48: 28, 7, 2\nAction 49: 29, 9, 3\nAction 50: 29, 9, 2\nAction 51: 29, 9, 1\nAction 52: 29, 10, 1\nAction 53: 30, 9, 3\nAction 54: 30, 10, 3\nAction 55: 30, 11, 3\nAction 56: 30, 11, 4\nAction 57: 31, 9, 3\nAction 58: 31, 9, 4\nAction 59: 31, 9, 5\nAction 60: 31, 8, 5\nAction 61: 32, 9, 3\nAction 62: 32, 8, 3\nAction 63: 32, 7, 3\nAction 64: 32, 9, 2\nAction 65: 33, 9, 3\nAction 66: 33, 9, 2\nAction 67: 33, 9, 1\nAction 68: 33, 10, 3\nAction 69: 34, 9, 3\nAction 70: 34, 10, 3\nAction 71: 34, 11, 3\nAction 72: 34, 9, 4\nAction 73: 35, 9, 3\nAction 74: 35, 9, 4\nAction 75: 35, 9, 5\nAction 76: 35, 8, 3\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Breakthrough.txt",
    "content": "actions: ['1', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 1, 7, 6\n\nCurrent board:\nx|x|x|x|x|x|x|x\nx|x|x|x|x|x|x|x\n | | | | | | | \n | | | | | | | \n | | | | | | | \n | | | | | | |o\no|o|o|o|o|o|o| \no|o|o|o|o|o|o|o\n\nLegal Actions:\nAction 0: 2, 0, 1\nAction 1: 1, 0, 1\nAction 2: 2, 1, 1\nAction 3: 0, 1, 1\nAction 4: 1, 1, 1\nAction 5: 2, 2, 1\nAction 6: 0, 2, 1\nAction 7: 1, 2, 1\nAction 8: 2, 3, 1\nAction 9: 0, 3, 1\nAction 10: 1, 3, 1\nAction 11: 2, 4, 1\nAction 12: 0, 4, 1\nAction 13: 1, 4, 1\nAction 14: 2, 5, 1\nAction 15: 0, 5, 1\nAction 16: 1, 5, 1\nAction 17: 2, 6, 1\nAction 18: 0, 6, 1\nAction 19: 1, 6, 1\nAction 20: 0, 7, 1\nAction 21: 1, 7, 1\n\nInput action:  applying action... \n\nLast Action: 1, 7, 7\n\nCurrent board:\nx|x|x|x|x|x|x|x\n |x|x|x|x|x|x|x\nx| | | | | | | \n | | | | | | | \n | | | | | | | \n | | | | | | |o\no|o|o|o|o|o|o|o\no|o|o|o|o|o|o| \n\nLegal Actions:\nAction 0: 1, 0, 0\nAction 1: 2, 0, 2\nAction 2: 1, 0, 2\nAction 3: 0, 1, 0\nAction 4: 2, 1, 1\nAction 5: 1, 1, 1\nAction 6: 2, 2, 1\nAction 7: 0, 2, 1\nAction 8: 1, 2, 1\nAction 9: 2, 3, 1\nAction 10: 0, 3, 1\nAction 11: 1, 3, 1\nAction 12: 2, 4, 1\nAction 13: 0, 4, 1\nAction 14: 1, 4, 1\nAction 15: 2, 5, 1\nAction 16: 0, 5, 1\nAction 17: 1, 5, 1\nAction 18: 2, 6, 1\nAction 19: 0, 6, 1\nAction 20: 1, 6, 1\nAction 21: 0, 7, 1\nAction 22: 1, 7, 1\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/ChineseCheckers.txt",
    "content": "actions: ['C4', 'G35', 'A10', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: move from A10 to G05\n\nhands: 1\nCurrent Board:\n  Empty='   ' White=' ● ' Black=' ○ ' \n                            A\n                            ● \n                           / \\\n                          ● - ● \n                         / \\ / \\\n                        ● - ● - ● \n                       / \\ / \\ / \\\n                      ● - ● - ● -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   - ● -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   -   \n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   - ○ \n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   - ○ - ○ \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   - ○ - ○ - ○ \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   - ○ - ○ - ○ - ○ \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nChesses which can move:\n                            A\n                              \n                           / \\\n                            -   \n                         / \\ / \\\n                          -   -   \n                       / \\ / \\ / \\\n                        -   -   -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   -   \n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   -C07\n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   -C08-C04\n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   -C09-C05-   \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   -C10-C06-   -   \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nPlease choose chess and move to where: (Use \"a01\", \"B5\",\"C10\", etc. to correspond to the correct format in chess board)\nChess you choose is:\n>                             A\n                              \n                           / \\\n                            -   \n                         / \\ / \\\n                          -   -   \n                       / \\ / \\ / \\\n                        -   -   -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   -G35\n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   -   \n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -G50-   -   \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   -   -   -   \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   -   -   -   -   \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\nWhere you wanna go:\n>  applying action... \n\nLast Action: move from G05 to G11\n\nhands: 3\nCurrent Board:\n  Empty='   ' White=' ● ' Black=' ○ ' \n                            A\n                            ● \n                           / \\\n                          ● - ● \n                         / \\ / \\\n                        ● - ● - ● \n                       / \\ / \\ / \\\n                      ● - ● - ● -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   - ● -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   - ○ \n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   - ○ \n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   - ○ -   \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   - ○ - ○ - ○ \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   - ○ - ○ - ○ - ○ \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nChesses which can move:\n                            A\n                              \n                           / \\\n                            -   \n                         / \\ / \\\n                          -   -   \n                       / \\ / \\ / \\\n                        -   -   -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   -G35\n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   -C07\n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   -C08-   \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   -C09-C05-C02\n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   -C10-C06-   -C01\n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nPlease choose chess and move to where: (Use \"a01\", \"B5\",\"C10\", etc. to correspond to the correct format in chess board)\nChess you choose is:\n> Error! No any legal move.\nThis is invalid Input! Please try again.\nCurrent Board:\n  Empty='   ' White=' ● ' Black=' ○ ' \n                            A\n                            ● \n                           / \\\n                          ● - ● \n                         / \\ / \\\n                        ● - ● - ● \n                       / \\ / \\ / \\\n                      ● - ● - ● -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   - ● -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   - ○ \n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   - ○ \n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   - ○ -   \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   - ○ - ○ - ○ \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   - ○ - ○ - ○ - ○ \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nChesses which can move:\n                          A\n                            \n                         / \\\n                          -   \n                       / \\ / \\\n                        -   -   \n                     / \\ / \\ / \\\n                      -   -   -   \nF                  / \\ / \\ / \\ / \\                  B\n    -   -   -   -   -   -   -   -   -   -   -   -   \n   \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n      -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -G35\n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   -C07\n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   -C08-   \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   -C09-C05-C02\n   / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n    -   -   -   -   -   -   -   -   -C10-C06-   -C01\nE                  \\ / \\ / \\ / \\ /                  C\n                      -   -   -   \n                     \\ / \\ / \\ /\n                        -   -   \n                       \\ / \\ /\n                          -   \n                         \\ /\n                            \n                          D\n\nChess you choose is:\n> This is invalid Input! Please try again.\nCurrent Board:\n  Empty='   ' White=' ● ' Black=' ○ ' \n                            A\n                            ● \n                           / \\\n                          ● - ● \n                         / \\ / \\\n                        ● - ● - ● \n                       / \\ / \\ / \\\n                      ● - ● - ● -   \n  F                  / \\ / \\ / \\ / \\                  B\n      -   -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   - ● -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -   -   \n           \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n              -   -   -   -   -   -   -   - ○ \n           / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n            -   -   -   -   -   -   -   -   - ○ \n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   - ○ -   \n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   - ○ - ○ - ○ \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   - ○ - ○ - ○ - ○ \n  E                  \\ / \\ / \\ / \\ /                  C\n                        -   -   -   \n                       \\ / \\ / \\ /\n                          -   -   \n                         \\ / \\ /\n                            -   \n                           \\ /\n                              \n                            D\n\nChesses which can move:\n                          A\n                            \n                         / \\\n                          -   \n                       / \\ / \\\n                        -   -   \n                     / \\ / \\ / \\\n                      -   -   -   \nF                  / \\ / \\ / \\ / \\                  B\n    -   -   -   -   -   -   -   -   -   -   -   -   \n   \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n      -   -   -   -   -   -   -   -   -   -   -   \n     \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n        -   -   -   -   -   -   -   -   -   -   \n       \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n          -   -   -   -   -   -   -   -   -   \n         \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n            -   -   -   -   -   -   -   -G35\n         / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n          -   -   -   -   -   -   -   -   -C07\n       / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n        -   -   -   -   -   -   -   -   -C08-   \n     / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n      -   -   -   -   -   -   -   -   -C09-C05-C02\n   / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n    -   -   -   -   -   -   -   -   -C10-C06-   -C01\nE                  \\ / \\ / \\ / \\ /                  C\n                      -   -   -   \n                     \\ / \\ / \\ /\n                        -   -   \n                       \\ / \\ /\n                          -   \n                         \\ /\n                            \n                          D\n\nChess you choose is:\n> "
  },
  {
    "path": "pypolygames/tests/data/DiceShogi.txt",
    "content": "actions: ['1', '1', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action:  b-D3\n\nCurrent board:\n   A| B| C| D| E\n5  r|  | s| g| k\n4   |  |  |  | p\n3   |  |  | b|  \n2  P|  |  |  |  \n1  K| G| S| B| R\n\nLegal Actions:\nAction 0:  B-E2\nAction 1:  R-E2\nAction 2:  R-E3\nAction 3:  R-E4\n\nInput format : action index e.g. 0\n\nInput action:  applying action... \nRandom outcome ?\n\nLast Action:  r-A2\n\nCurrent board:\n   A| B| C| D| E\n5   |  | s| g| k\n4   |  |  |  | p\n3   |  |  | b|  \n2  r|  |  |  | R\n1  K| G| S| B|  \n\nLegal Actions:\nAction 0:  S-D2\nAction 1:  R-D2\n\nInput format : action index e.g. 0\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Einstein.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 0, 0, 2\n\nCurrent board:\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n1 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n\n\n0 0 0 0 0\n0 0 0 0 0\n1 1 1 1 1\n0 0 0 0 0\n0 0 0 0 0\n\n\n1 1 1 1 1\n1 1 1 1 1\n1 1 1 1 1\n1 1 1 1 1\n1 1 1 1 1\n\nLegal Actions:\nAction 0: 0, 2, 4\n\nInput action:  applying action... \nRandom outcome ?\n"
  },
  {
    "path": "pypolygames/tests/data/GameOfTheAmazons.txt",
    "content": "actions: ['A7', 'B6', 'C6', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: moved chess from J04 to J01 and put arrow at J06\n\nCurrent board:\n  Empty=' ' WhiteChess='○' BlackChess='●' WhiteArrow='□' BlackArrow='■'\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │ ● │   │   │ ● │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │ ● │   │   │   │   │   │   │   │   │ ● │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │   │   │   │   │   │ □ │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │ ○ │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │ ○ │   │   │ ○ │   │   │ ○ │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nPosition of chesses which are possible to move: `A07`, `D10`, `G10`, `J07`\n\nInput three positions to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\nInput the position of the chess which wants to move on:\n> Input the position of selected chess after moved:\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │ ? │   │   │ ● │   │   │ ● │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │ ? │   │ ? │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │ ? │ ? │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │ ● │ ? │ ? │ ? │ ? │ ? │ ? │ ? │ ? │ ● │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │ ? │ ? │   │   │   │   │   │   │   │ □ │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │ ? │   │ ? │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │ ○ │   │   │ ? │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │ ? │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │ ? │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │ ○ │   │   │ ○ │   │   │ ○ │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n> Input the position of the arrow that wants to put:\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │ ? │   │ ● │   │ ? │ ● │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │ ? │   │   │ ? │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │ ? │   │ ? │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │ ? │ ? │ ? │   │   │   │   │   │   │ ● │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │ ? │   │ ? │ ? │ ? │ ? │ ? │ ? │ ? │ □ │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │ ? │ ? │ ? │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │ ○ │ ? │   │ ? │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │ ? │   │   │ ? │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │ ? │   │   │   │ ? │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │ ? │   │ ○ │   │   │ ○ │   │   │ ○ │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n>  applying action... \n\nLast Action: moved chess from J01 to J05 and put arrow at J01\n\nCurrent board:\n  Empty=' ' WhiteChess='○' BlackChess='●' WhiteArrow='□' BlackArrow='■'\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │ ● │   │   │ ● │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │   │   │ ● │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │ ● │ ■ │   │   │   │   │   │   │ □ │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │ ○ │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │ ○ │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │ ○ │   │   │ ○ │   │   │ □ │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nPosition of chesses which are possible to move: `B06`, `D10`, `G10`, `J07`\n\nInput three positions to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\nInput the position of the chess which wants to move on:\n> invalid input, try again.\nInput the position of the chess which wants to move on:\n> "
  },
  {
    "path": "pypolygames/tests/data/Havannah5.txt",
    "content": "actions: ['0,4', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 8,4\n\nCurrent board:\nHavannah\n    0   1   2   3   4   5   6   7   8 \n                   --------------------\n 0                 \\ . \\ . \\ . \\ . \\ . \\ \n                 ------------------------\n 1               \\ . \\ . \\ . \\ . \\ . \\ . \\ \n               ----------------------------\n 2             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n             --------------------------------\n 3           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n           ------------------------------------\n 4         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n             ------------------------------------\n 5           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n               --------------------------------\n 6             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                 ----------------------------\n 7               \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   ------------------------\n 8                 \\ . \\ . \\ . \\ . \\ X \\ \n                    --------------------\n                      0   1   2   3   4   5   6   7   8 \n\nLegal Actions:\n0,4 0,5 0,6 0,7 0,8 1,3 1,4 1,5 1,6 1,7 1,8 2,2 2,3 2,4 2,5 2,6 2,7 2,8 3,1 3,2 3,3 3,4 3,5 3,6 3,7 3,8 4,0 4,1 4,2 4,3 4,4 4,5 4,6 4,7 4,8 5,0 5,1 5,2 5,3 5,4 5,5 5,6 5,7 6,0 6,1 6,2 6,3 6,4 6,5 6,6 7,0 7,1 7,2 7,3 7,4 7,5 8,0 8,1 8,2 8,3 \n\nInput action:  applying action... \n\nLast Action: 8,3\n\nCurrent board:\nHavannah\n    0   1   2   3   4   5   6   7   8 \n                   --------------------\n 0                 \\ O \\ . \\ . \\ . \\ . \\ \n                 ------------------------\n 1               \\ . \\ . \\ . \\ . \\ . \\ . \\ \n               ----------------------------\n 2             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n             --------------------------------\n 3           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n           ------------------------------------\n 4         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n             ------------------------------------\n 5           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n               --------------------------------\n 6             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                 ----------------------------\n 7               \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   ------------------------\n 8                 \\ . \\ . \\ . \\ X \\ X \\ \n                    --------------------\n                      0   1   2   3   4   5   6   7   8 \n\nLegal Actions:\n0,5 0,6 0,7 0,8 1,3 1,4 1,5 1,6 1,7 1,8 2,2 2,3 2,4 2,5 2,6 2,7 2,8 3,1 3,2 3,3 3,4 3,5 3,6 3,7 3,8 4,0 4,1 4,2 4,3 4,4 4,5 4,6 4,7 4,8 5,0 5,1 5,2 5,3 5,4 5,5 5,6 5,7 6,0 6,1 6,2 6,3 6,4 6,5 6,6 7,0 7,1 7,2 7,3 7,4 7,5 8,0 8,1 8,2 \n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Havannah8.txt",
    "content": "actions: ['0,7', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 14,7\n\nCurrent board:\nHavannah\n    0   1   2   3   4   5   6   7   8   9  10  11  12  13  14 \n                               --------------------------------\n 0                             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                             ------------------------------------\n 1                           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                           ----------------------------------------\n 2                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                         --------------------------------------------\n 3                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                       ------------------------------------------------\n 4                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                     ----------------------------------------------------\n 5                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   --------------------------------------------------------\n 6                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                 ------------------------------------------------------------\n 7               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   ------------------------------------------------------------\n 8                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                     --------------------------------------------------------\n 9                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                       ----------------------------------------------------\n10                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                         ------------------------------------------------\n11                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                           --------------------------------------------\n12                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                             ----------------------------------------\n13                           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                               ------------------------------------\n14                             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ X \\ \n                                --------------------------------\n                                  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14 \n\nLegal Actions:\n0,7 0,8 0,9 0,10 0,11 0,12 0,13 0,14 1,6 1,7 1,8 1,9 1,10 1,11 1,12 1,13 1,14 2,5 2,6 2,7 2,8 2,9 2,10 2,11 2,12 2,13 2,14 3,4 3,5 3,6 3,7 3,8 3,9 3,10 3,11 3,12 3,13 3,14 4,3 4,4 4,5 4,6 4,7 4,8 4,9 4,10 4,11 4,12 4,13 4,14 5,2 5,3 5,4 5,5 5,6 5,7 5,8 5,9 5,10 5,11 5,12 5,13 5,14 6,1 6,2 6,3 6,4 6,5 6,6 6,7 6,8 6,9 6,10 6,11 6,12 6,13 6,14 7,0 7,1 7,2 7,3 7,4 7,5 7,6 7,7 7,8 7,9 7,10 7,11 7,12 7,13 7,14 8,0 8,1 8,2 8,3 8,4 8,5 8,6 8,7 8,8 8,9 8,10 8,11 8,12 8,13 9,0 9,1 9,2 9,3 9,4 9,5 9,6 9,7 9,8 9,9 9,10 9,11 9,12 10,0 10,1 10,2 10,3 10,4 10,5 10,6 10,7 10,8 10,9 10,10 10,11 11,0 11,1 11,2 11,3 11,4 11,5 11,6 11,7 11,8 11,9 11,10 12,0 12,1 12,2 12,3 12,4 12,5 12,6 12,7 12,8 12,9 13,0 13,1 13,2 13,3 13,4 13,5 13,6 13,7 13,8 14,0 14,1 14,2 14,3 14,4 14,5 14,6 \n\nInput action:  applying action... \n\nLast Action: 14,6\n\nCurrent board:\nHavannah\n    0   1   2   3   4   5   6   7   8   9  10  11  12  13  14 \n                               --------------------------------\n 0                             \\ O \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                             ------------------------------------\n 1                           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                           ----------------------------------------\n 2                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                         --------------------------------------------\n 3                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                       ------------------------------------------------\n 4                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                     ----------------------------------------------------\n 5                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   --------------------------------------------------------\n 6                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                 ------------------------------------------------------------\n 7               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                   ------------------------------------------------------------\n 8                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                     --------------------------------------------------------\n 9                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                       ----------------------------------------------------\n10                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                         ------------------------------------------------\n11                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                           --------------------------------------------\n12                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                             ----------------------------------------\n13                           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                               ------------------------------------\n14                             \\ . \\ . \\ . \\ . \\ . \\ . \\ X \\ X \\ \n                                --------------------------------\n                                  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14 \n\nLegal Actions:\n0,8 0,9 0,10 0,11 0,12 0,13 0,14 1,6 1,7 1,8 1,9 1,10 1,11 1,12 1,13 1,14 2,5 2,6 2,7 2,8 2,9 2,10 2,11 2,12 2,13 2,14 3,4 3,5 3,6 3,7 3,8 3,9 3,10 3,11 3,12 3,13 3,14 4,3 4,4 4,5 4,6 4,7 4,8 4,9 4,10 4,11 4,12 4,13 4,14 5,2 5,3 5,4 5,5 5,6 5,7 5,8 5,9 5,10 5,11 5,12 5,13 5,14 6,1 6,2 6,3 6,4 6,5 6,6 6,7 6,8 6,9 6,10 6,11 6,12 6,13 6,14 7,0 7,1 7,2 7,3 7,4 7,5 7,6 7,7 7,8 7,9 7,10 7,11 7,12 7,13 7,14 8,0 8,1 8,2 8,3 8,4 8,5 8,6 8,7 8,8 8,9 8,10 8,11 8,12 8,13 9,0 9,1 9,2 9,3 9,4 9,5 9,6 9,7 9,8 9,9 9,10 9,11 9,12 10,0 10,1 10,2 10,3 10,4 10,5 10,6 10,7 10,8 10,9 10,10 10,11 11,0 11,1 11,2 11,3 11,4 11,5 11,6 11,7 11,8 11,9 11,10 12,0 12,1 12,2 12,3 12,4 12,5 12,6 12,7 12,8 12,9 13,0 13,1 13,2 13,3 13,4 13,5 13,6 13,7 13,8 14,0 14,1 14,2 14,3 14,4 14,5 \n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Hex11.txt",
    "content": "actions: ['a1', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: k11\n\nCurrent board:\nHex\n    a   b   c   d   e   f   g   h   i   j   k\n  ---------------------------------------------\n 1 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n    ---------------------------------------------\n 2   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n      ---------------------------------------------\n 3     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n        ---------------------------------------------\n 4       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n          ---------------------------------------------\n 5         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n            ---------------------------------------------\n 6           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n              ---------------------------------------------\n 7             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                ---------------------------------------------\n 8               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                  ---------------------------------------------\n 9                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                    ---------------------------------------------\n10                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                      ---------------------------------------------\n11                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                        --------------------------------------------\n                          a   b   c   d   e   f   g   h   i   j   k\n\nLegal Actions:\na1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 \n\nInput action:  applying action... \n\nLast Action: k10\n\nCurrent board:\nHex\n    a   b   c   d   e   f   g   h   i   j   k\n  ---------------------------------------------\n 1 \\ W \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n    ---------------------------------------------\n 2   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n      ---------------------------------------------\n 3     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n        ---------------------------------------------\n 4       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n          ---------------------------------------------\n 5         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n            ---------------------------------------------\n 6           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n              ---------------------------------------------\n 7             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                ---------------------------------------------\n 8               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                  ---------------------------------------------\n 9                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                    ---------------------------------------------\n10                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                      ---------------------------------------------\n11                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                        --------------------------------------------\n                          a   b   c   d   e   f   g   h   i   j   k\n\nLegal Actions:\na2 a3 a4 a5 a6 a7 a8 a9 a10 a11 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 k1 k2 k3 k4 k5 k6 k7 k8 k9 \n\nInput action: failed to parse action\nInput action: "
  },
  {
    "path": "pypolygames/tests/data/Hex13.txt",
    "content": "actions: ['a1', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: m13\n\nCurrent board:\nHex\n    a   b   c   d   e   f   g   h   i   j   k   l   m\n  -----------------------------------------------------\n 1 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n    -----------------------------------------------------\n 2   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n      -----------------------------------------------------\n 3     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n        -----------------------------------------------------\n 4       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n          -----------------------------------------------------\n 5         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n            -----------------------------------------------------\n 6           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n              -----------------------------------------------------\n 7             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                -----------------------------------------------------\n 8               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                  -----------------------------------------------------\n 9                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                    -----------------------------------------------------\n10                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                      -----------------------------------------------------\n11                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                        -----------------------------------------------------\n12                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                          -----------------------------------------------------\n13                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                            ----------------------------------------------------\n                              a   b   c   d   e   f   g   h   i   j   k   l   m\n\nLegal Actions:\na1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 \n\nInput action:  applying action... \n\nLast Action: m12\n\nCurrent board:\nHex\n    a   b   c   d   e   f   g   h   i   j   k   l   m\n  -----------------------------------------------------\n 1 \\ W \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n    -----------------------------------------------------\n 2   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n      -----------------------------------------------------\n 3     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n        -----------------------------------------------------\n 4       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n          -----------------------------------------------------\n 5         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n            -----------------------------------------------------\n 6           \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n              -----------------------------------------------------\n 7             \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                -----------------------------------------------------\n 8               \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                  -----------------------------------------------------\n 9                 \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                    -----------------------------------------------------\n10                   \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                      -----------------------------------------------------\n11                     \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ \n                        -----------------------------------------------------\n12                       \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                          -----------------------------------------------------\n13                         \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ . \\ B \\ \n                            ----------------------------------------------------\n                              a   b   c   d   e   f   g   h   i   j   k   l   m\n\nLegal Actions:\na2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 \n\nInput action: failed to parse action\nInput action: "
  },
  {
    "path": "pypolygames/tests/data/KyotoShogi.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action:  p ->  r-A4\n\nCurrent board:\n   A| B| C| D| E\n5   | g| k| s| t\n4  r|  |  |  |  \n3   |  |  |  |  \n2   |  |  |  |  \n1  T| S| K| G| P\n\nLegal Actions:\nAction 0:  T ->  L-B2\nAction 1:  T ->  L-A2\nAction 2:  S ->  B-C2\nAction 3:  S ->  B-B2\nAction 4:  S ->  B-A2\nAction 5:  K ->  K-D2\nAction 6:  K ->  K-C2\nAction 7:  K ->  K-B2\nAction 8:  G -> KN-E2\nAction 9:  G -> KN-D2\nAction 10:  G -> KN-C2\nAction 11:  P ->  R-E2\n\nInput format : action index e.g. 0\n\nInput action:  applying action... \n\nLast Action:  r ->  p-A1\n\nCurrent board:\n   A| B| C| D| E\n5   | g| k| s| t\n4   |  |  |  |  \n3   |  |  |  |  \n2   | L|  |  |  \n1  p| S| K| G| P\n\nLegal Actions:\nAction 0:  L ->  T-B3\nAction 1:  L ->  T-B4\nAction 2:  L ->  T-B5\nAction 3:  S ->  B-C2\nAction 4:  S ->  B-A2\nAction 5:  K ->  K-D2\nAction 6:  K ->  K-C2\nAction 7:  G -> KN-E2\nAction 8:  G -> KN-D2\nAction 9:  G -> KN-C2\nAction 10:  P ->  R-E2\n\nInput format : action index e.g. 0\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Minishogi.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action:  p-E3\n\nCurrent board:\n   A| B| C| D| E\n5  r| b| s| g| k\n4   |  |  |  |  \n3   |  |  |  | p\n2  P|  |  |  |  \n1  K| G| S| B| R\n\nLegal Actions:\nAction 0:  K-B2\nAction 1:  G-C2\nAction 2:  G-B2\nAction 3:  S-D2\nAction 4:  S-C2\nAction 5:  S-B2\nAction 6:  B-E2\nAction 7:  B-C2\nAction 8:  B-B3\nAction 9:  B-A4\nAction 10:  R-E2\nAction 11:  R-E3\nAction 12:  P-A3\n\nInput format : action index e.g. 0\n\nInput action:  applying action... \n\nLast Action:  p-E2\n\nCurrent board:\n   A| B| C| D| E\n5  r| b| s| g| k\n4   |  |  |  |  \n3   |  |  |  |  \n2  P| K|  |  | p\n1   | G| S| B| R\n\nLegal Actions:\nAction 0:  K-C2\nAction 1:  K-C3\nAction 2:  K-B3\nAction 3:  K-A3\nAction 4:  K-A1\nAction 5:  G-C2\nAction 6:  G-A1\nAction 7:  S-D2\nAction 8:  S-C2\nAction 9:  B-E2\nAction 10:  B-C2\nAction 11:  B-B3\nAction 12:  B-A4\nAction 13:  R-E2\nAction 14:  P-A3\n\nInput format : action index e.g. 0\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Othello10.txt",
    "content": "actions: ['G6', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: put chess at F04\n\nCurrent board:\n  Empty=' ' Black='●' White='○'\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │ ○ │ ● │   │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │ ● │ ● │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │ ● │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │ ○ │ ● │ ? │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │ ● │ ● │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │ ? │ ● │ ? │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nInput a position to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\n>  applying action... \n\nLast Action: put chess at H07\n\nCurrent board:\n  Empty=' ' Black='●' White='○'\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │ ● │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │ ○ │ ○ │ ● │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │ ● │ ● │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │ ● │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  10 │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │ ● │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │ ○ │ ○ │ ● │ ? │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │ ● │ ● │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │ ? │ ? │ ● │ ? │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │ ? │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J\n\nInput a position to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\n> invalid input, try again.\n> "
  },
  {
    "path": "pypolygames/tests/data/Othello16.txt",
    "content": "actions: ['J9', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: put chess at I07\n\nCurrent board:\n  Empty=' ' Black='●' White='○'\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  16 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 16\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  15 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 15\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  14 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 14\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  13 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 13\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  12 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 12\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  11 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 11\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  10 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │ ○ │ ● │   │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │ ● │ ● │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │   │ ● │   │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  16 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 16\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  15 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 15\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  14 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 14\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  13 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 13\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  12 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 12\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  11 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 11\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  10 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │ ○ │ ● │ ? │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │ ● │ ● │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │ ? │ ● │ ? │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n\nInput a position to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\n>  applying action... \n\nLast Action: put chess at K10\n\nCurrent board:\n  Empty=' ' Black='●' White='○'\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  16 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 16\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  15 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 15\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  14 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 14\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  13 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 13\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  12 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 12\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  11 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 11\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  10 │   │   │   │   │   │   │   │   │   │   │ ● │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │ ○ │ ○ │ ● │   │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │ ● │ ● │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │   │   │ ● │   │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n\nAllowed positions: ('?' means an allowed position)\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n     ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐\n  16 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 16\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  15 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 15\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  14 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 14\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  13 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 13\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  12 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 12\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  11 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 11\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  10 │   │   │   │   │   │   │   │   │   │   │ ● │   │   │   │   │   │ 10\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  09 │   │   │   │   │   │   │   │ ○ │ ○ │ ● │ ? │   │   │   │   │   │ 09\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  08 │   │   │   │   │   │   │   │ ● │ ● │   │   │   │   │   │   │   │ 08\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  07 │   │   │   │   │   │   │ ? │ ? │ ● │ ? │   │   │   │   │   │   │ 07\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  06 │   │   │   │   │   │   │   │   │ ? │   │   │   │   │   │   │   │ 06\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  05 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 05\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  04 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 04\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  03 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 03\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  02 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 02\n     ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n  01 │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │   │ 01\n     └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘\n       A   B   C   D   E   F   G   H   I   J   K   L   M   N   O   P\n\nInput a position to play: (uses format <alphabet of x-axis><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\n> invalid input, try again.\n> "
  },
  {
    "path": "pypolygames/tests/data/OthelloOpt10.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 0, 5, 6\n\nCurrent board:\n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | |o|x| | | | \n | | | |x|x| | | | \n | | | | |x| | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n\nLegal Actions:\nAction 0: 0, 6, 4\nAction 1: 0, 4, 6\nAction 2: 0, 6, 6\n\nInput action:  applying action... \n\nLast Action: 0, 7, 3\n\nCurrent board:\n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | | | | |x| | \n | | | |o|o|x| | | \n | | | |x|x| | | | \n | | | | |x| | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n\nLegal Actions:\nAction 0: 0, 7, 4\nAction 1: 0, 3, 6\nAction 2: 0, 4, 6\nAction 3: 0, 6, 6\nAction 4: 0, 5, 7\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/OthelloOpt16.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 0, 8, 9\n\nCurrent board:\n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | |o|x| | | | | | | \n | | | | | | |x|x| | | | | | | \n | | | | | | | |x| | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n\nLegal Actions:\nAction 0: 0, 9, 7\nAction 1: 0, 7, 9\nAction 2: 0, 9, 9\n\nInput action:  applying action... \n\nLast Action: 0, 10, 6\n\nCurrent board:\n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | |x| | | | | \n | | | | | | |o|o|x| | | | | | \n | | | | | | |x|x| | | | | | | \n | | | | | | | |x| | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n\nLegal Actions:\nAction 0: 0, 10, 7\nAction 1: 0, 6, 9\nAction 2: 0, 7, 9\nAction 3: 0, 9, 9\nAction 4: 0, 8, 10\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Surakarta.txt",
    "content": "actions: ['A5-B4', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: F2-E3\n\nCurrent board:\n  A|B|C|D|E|F\n6 x|x|x|x|x|x\n5 x|x|x|x|x|x\n4  | | | | | \n3  | | | |o| \n2 o|o|o|o|o| \n1 o|o|o|o|o|o\n\nLegal Actions:\nAction 0: A5-B4\nAction 1: A5-A4\nAction 2: B5-C4\nAction 3: B5-B4\nAction 4: B5-A4\nAction 5: C5-D4\nAction 6: C5-C4\nAction 7: C5-B4\nAction 8: D5-E4\nAction 9: D5-D4\nAction 10: D5-C4\nAction 11: E5-F4\nAction 12: E5-E4\nAction 13: E5-D4\nAction 14: F5-F4\nAction 15: F5-E4\n\nInput format : <Alphabet><Digit>-<Alphabet><Digit> e.g. A1-A2\n\nInput action:  applying action... \n\nLast Action: F1-F2\n\nCurrent board:\n  A|B|C|D|E|F\n6 x|x|x|x|x|x\n5  |x|x|x|x|x\n4  |x| | | | \n3  | | | |o| \n2 o|o|o|o|o|o\n1 o|o|o|o|o| \n\nLegal Actions:\nAction 0: A6-A5\nAction 1: B4-C4\nAction 2: B4-C3\nAction 3: B4-B3\nAction 4: B4-A3\nAction 5: B4-A4\nAction 6: B4-A5\nAction 7: B5-C4\nAction 8: B5-A4\nAction 9: B5-A5\nAction 10: B6-A5\nAction 11: C5-D4\nAction 12: C5-C4\nAction 13: D5-E4\nAction 14: D5-D4\nAction 15: D5-C4\nAction 16: E5-F4\nAction 17: E5-E4\nAction 18: E5-D4\nAction 19: F5-F4\nAction 20: F5-E4\n\nInput format : <Alphabet><Digit>-<Alphabet><Digit> e.g. A1-A2\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/data/Tristannogo.txt",
    "content": "actions: ['0', 'blublu']\n######################################################################\n#                         HUMAN-PLAYED GAME                          #\n######################################################################\nsetting-up pseudo-random generator...\ncreating human-played environment\nplaying against a human player...\n\nLast Action: 0, 8, 8\n\nCurrent board:\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 1\n\n\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n\n\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n\n\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 0\n\n\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 0\n\nLegal Actions:\nAction 0: 0, 0, 0\nAction 1: 0, 1, 0\nAction 2: 0, 2, 0\nAction 3: 0, 3, 0\nAction 4: 0, 4, 0\nAction 5: 0, 5, 0\nAction 6: 0, 6, 0\nAction 7: 0, 7, 0\nAction 8: 0, 8, 0\nAction 9: 0, 0, 1\nAction 10: 0, 1, 1\nAction 11: 0, 2, 1\nAction 12: 0, 3, 1\nAction 13: 0, 4, 1\nAction 14: 0, 5, 1\nAction 15: 0, 6, 1\nAction 16: 0, 7, 1\nAction 17: 0, 8, 1\nAction 18: 0, 0, 2\nAction 19: 0, 1, 2\nAction 20: 0, 2, 2\nAction 21: 0, 3, 2\nAction 22: 0, 4, 2\nAction 23: 0, 5, 2\nAction 24: 0, 6, 2\nAction 25: 0, 7, 2\nAction 26: 0, 8, 2\nAction 27: 0, 0, 3\nAction 28: 0, 1, 3\nAction 29: 0, 2, 3\nAction 30: 0, 3, 3\nAction 31: 0, 4, 3\nAction 32: 0, 5, 3\nAction 33: 0, 6, 3\nAction 34: 0, 7, 3\nAction 35: 0, 8, 3\nAction 36: 0, 0, 4\nAction 37: 0, 1, 4\nAction 38: 0, 2, 4\nAction 39: 0, 3, 4\nAction 40: 0, 4, 4\nAction 41: 0, 5, 4\nAction 42: 0, 6, 4\nAction 43: 0, 7, 4\nAction 44: 0, 8, 4\nAction 45: 0, 0, 5\nAction 46: 0, 1, 5\nAction 47: 0, 2, 5\nAction 48: 0, 3, 5\nAction 49: 0, 4, 5\nAction 50: 0, 5, 5\nAction 51: 0, 6, 5\nAction 52: 0, 7, 5\nAction 53: 0, 8, 5\nAction 54: 0, 0, 6\nAction 55: 0, 1, 6\nAction 56: 0, 2, 6\nAction 57: 0, 3, 6\nAction 58: 0, 4, 6\nAction 59: 0, 5, 6\nAction 60: 0, 6, 6\nAction 61: 0, 7, 6\nAction 62: 0, 8, 6\nAction 63: 0, 0, 7\nAction 64: 0, 1, 7\nAction 65: 0, 2, 7\nAction 66: 0, 3, 7\nAction 67: 0, 4, 7\nAction 68: 0, 5, 7\nAction 69: 0, 6, 7\nAction 70: 0, 7, 7\nAction 71: 0, 8, 7\nAction 72: 0, 0, 8\nAction 73: 0, 1, 8\nAction 74: 0, 2, 8\nAction 75: 0, 3, 8\nAction 76: 0, 4, 8\nAction 77: 0, 5, 8\nAction 78: 0, 6, 8\nAction 79: 0, 7, 8\n\nInput action:  applying action... \n\nLast Action: 0, 7, 8\n\nCurrent board:\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 1 1\n\n\n1 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0\n\n\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n\n\n0 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 0 0\n\n\n0 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 1 1\n1 1 1 1 1 1 1 0 0\n\nLegal Actions:\nAction 0: 0, 1, 0\nAction 1: 0, 2, 0\nAction 2: 0, 3, 0\nAction 3: 0, 4, 0\nAction 4: 0, 5, 0\nAction 5: 0, 6, 0\nAction 6: 0, 7, 0\nAction 7: 0, 8, 0\nAction 8: 0, 0, 1\nAction 9: 0, 1, 1\nAction 10: 0, 2, 1\nAction 11: 0, 3, 1\nAction 12: 0, 4, 1\nAction 13: 0, 5, 1\nAction 14: 0, 6, 1\nAction 15: 0, 7, 1\nAction 16: 0, 8, 1\nAction 17: 0, 0, 2\nAction 18: 0, 1, 2\nAction 19: 0, 2, 2\nAction 20: 0, 3, 2\nAction 21: 0, 4, 2\nAction 22: 0, 5, 2\nAction 23: 0, 6, 2\nAction 24: 0, 7, 2\nAction 25: 0, 8, 2\nAction 26: 0, 0, 3\nAction 27: 0, 1, 3\nAction 28: 0, 2, 3\nAction 29: 0, 3, 3\nAction 30: 0, 4, 3\nAction 31: 0, 5, 3\nAction 32: 0, 6, 3\nAction 33: 0, 7, 3\nAction 34: 0, 8, 3\nAction 35: 0, 0, 4\nAction 36: 0, 1, 4\nAction 37: 0, 2, 4\nAction 38: 0, 3, 4\nAction 39: 0, 4, 4\nAction 40: 0, 5, 4\nAction 41: 0, 6, 4\nAction 42: 0, 7, 4\nAction 43: 0, 8, 4\nAction 44: 0, 0, 5\nAction 45: 0, 1, 5\nAction 46: 0, 2, 5\nAction 47: 0, 3, 5\nAction 48: 0, 4, 5\nAction 49: 0, 5, 5\nAction 50: 0, 6, 5\nAction 51: 0, 7, 5\nAction 52: 0, 8, 5\nAction 53: 0, 0, 6\nAction 54: 0, 1, 6\nAction 55: 0, 2, 6\nAction 56: 0, 3, 6\nAction 57: 0, 4, 6\nAction 58: 0, 5, 6\nAction 59: 0, 6, 6\nAction 60: 0, 7, 6\nAction 61: 0, 8, 6\nAction 62: 0, 0, 7\nAction 63: 0, 1, 7\nAction 64: 0, 2, 7\nAction 65: 0, 3, 7\nAction 66: 0, 4, 7\nAction 67: 0, 5, 7\nAction 68: 0, 6, 7\nAction 69: 0, 7, 7\nAction 70: 0, 8, 7\nAction 71: 0, 0, 8\nAction 72: 0, 1, 8\nAction 73: 0, 2, 8\nAction 74: 0, 3, 8\nAction 75: 0, 4, 8\nAction 76: 0, 5, 8\nAction 77: 0, 6, 8\n\nInput action: Input action: "
  },
  {
    "path": "pypolygames/tests/test_interactions.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nimport difflib\nimport tempfile\nimport subprocess\nfrom pathlib import Path\nfrom pprint import pprint\nfrom unittest import SkipTest\nimport pytest\nfrom ..utils import listings\n\n\nclass FileStream:\n    \"\"\"Simplifies stdout reading\n    \"\"\"\n\n    def __init__(self) -> None:\n        self.tempdir = tempfile.TemporaryDirectory()\n        path = Path(self.tempdir.name) / \"std_in_out.txt\"\n        self.writer = path.open(\"w\")\n        self.reader = path.open(\"r\")\n\n    def __del__(self) -> None:\n        self.writer.close()\n        self.reader.close()\n\n\n# Specify any specific set of actions for your game\nGAME_ACTIONS = {\"Breakthrough\": [\"1\", \"blublu\"],\n                \"GameOfTheAmazons\": [\"A7\", \"B6\", \"C6\", \"blublu\"],\n                \"Othello10\": [\"G6\", \"blublu\"],\n                \"Othello16\": [\"J9\", \"blublu\"],\n                \"Havannah5\": [\"0,4\", \"blublu\"],\n                \"Havannah8\": [\"0,7\", \"blublu\"],\n                \"Hex11\": [\"a1\", \"blublu\"],\n                \"Hex13\": [\"a1\", \"blublu\"],\n                \"Surakarta\": [\"A5-B4\", \"blublu\"],\n                \"DiceShogi\": [\"1\", \"1\", \"blublu\"],\n                \"ChineseCheckers\": [\"C4\", \"G35\", \"A10\", \"blublu\"],\n                }\n\n\n@pytest.mark.parametrize(\n    \"game_name\", [game_name for game_name in listings.games(olympiads=True)]\n)\n\ndef test_game_interactions(game_name: str):\n    raise SkipTest\n    if game_name in [\"Einstein\", \"DiceShogi\"]:\n        # Feel free to add name here in order to deactivate a test\n        raise SkipTest(f\"Skipping {game_name} for lack of reproducibility\")\n    actions = GAME_ACTIONS.get(game_name, [\"0\", \"blublu\"])\n    # let's play\n    fsout = FileStream()\n    command = ['timeout', '--signal=SIGTERM', '20', 'python', '-um', 'pypolygames', 'human',\n               \"--pure_mcts\", f'--game_name={game_name}', '--num_rollouts=2', '--seed=12']\n    input_requests = [\"Input\", \"Random outcome\", \"Chess you choose is:\", \"Where you wanna go:\"]  # this may need to be made more robust\n    text = \"\"\n    popen = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=fsout.writer)\n    try:\n        # wait for the process to initialize\n        for _ in range(120):  # wait for input request\n            if any(x in text for x in input_requests):\n                break\n            text = fsout.reader.read()\n            if text:\n                print(text)  # for debugging\n            time.sleep(.1)\n        for action in actions:\n            print(f\"*** PLAYING: {action} ***\")\n            popen.stdin.write((action + \"\\n\").encode())\n            popen.stdin.flush()\n            text = \"\"\n            for _ in range(20):  # wait for input request\n                if any(x in text for x in input_requests):\n                    break\n                text = fsout.reader.read()\n                if text:\n                    print(text)\n                time.sleep(.1)\n    except Exception as e:\n        popen.terminate()  # make sure the process is killed, whatever happens\n        raise e\n    popen.terminate()\n    fsout.reader.seek(0)\n    all_text = f\"actions: {actions}\\n\" + fsout.reader.read()\n    #\n    # compare the outputs to records\n    filepath = Path(__file__).parent / \"data\" / f\"{game_name}.txt\"\n    filepath.parent.mkdir(exist_ok=True)\n    if not filepath.exists():\n        filepath.write_text(all_text)\n        raise AssertionError(\"Logs were written, rerun once again to test reproducibility\")\n    expected = filepath.read_text()\n    if all_text != expected:\n        print(\"\\n\\n\\nHERE IS THE DIFF:\\n\\n\\n\")\n        pprint(list(difflib.Differ().compare(all_text.splitlines(), expected.splitlines())))\n        raise ValueError(f\"String differ. If the new string is better, delete {filepath}\\n\"\n                         \"and rerun twice (pytest pypolygames/tests/test_interactions)\\n\"\n                         \"Alternatively, feel free to add failing games to the list of skipped\\n\"\n                         \"tests at the top of this function, and notify jrapin or teytaud\\n\"\n                         \"(we'll reactivate it for you later on, since it can be cumbersome).\")\n"
  },
  {
    "path": "pypolygames/tests/test_mcts.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nimport random\nfrom unittest import SkipTest\nfrom pathlib import Path\nimport pytest\nfrom .. import params\nfrom .. import evaluation\nfrom .. import utils\n\n\n@pytest.mark.parametrize(\n    \"game_name\", [game_name for game_name in utils.listings.games()]\n)\ndef test_mcts(game_name) -> None:\n    #\n    # Important informations in the following block about which games are skipped because they are:\n    # - too slow (more than 1min)\n    # - crashing (segfault or crash with no more information)\n    # - too_bad: not better with larger rollouts (these games are played but not evaluated)\n    # also, for some games, we must add some tolerance (they dont win at 100%)\n    #\n    crashing = []\n    is_one_player_game = any(x in game_name for x in [\"asterm\", \"ineswee\", \"WeakSchur\"])\n    too_slow = [\n        \"GameOfTheAmazons\",\n        \"Connect6\",\n        \"KyotoShogi\",\n        \"Hex19\",\n        \"Hex19pie\",\n        \"Minishogi\",\n        \"DiceShogi\",\n        \"ChineseCheckers\",\n    ]\n    too_bad = [\"Havannah5\", \"Havannah5pie\", \"Surakarta\", \"DiceShogi\", \"Connect6\"]\n    if game_name in crashing + too_slow:  # + one_player_games:\n        raise SkipTest(f\"Skipping {game_name}\")\n    if \"inesweeper\" in game_name and \"4_4_4\" not in game_name:\n        raise SkipTest(f\"Skipping {game_name}\")\n    if \"astermind\" in game_name and \"4_4_6\" not in game_name:\n        raise SkipTest(f\"Skipping {game_name}\")\n    if \"WeakSchur\" in game_name:\n        raise SkipTest(f\"Skipping {game_name} (currently aborts when finished, which kills the CI)\")\n    # for allowing some tolerance to winning all games with larger rollouts, add here:\n    tolerance = {\n        \"TicTacToe\": 4,\n        \"FreeStyleGomoku\": 4,\n        \"OuterOpenGomoku\": 3,\n        \"Havannah5pieExt\": 2,\n        \"Havannah8\": 5,\n        \"Havannah8pie\": 5,\n        \"Hex13\": 2,\n        \"Hex13pie\": 2,\n        \"Einstein\": 3,\n        \"Othello10\": 2,\n        \"OthelloOpt10\": 2,\n        \"YINSH\": 3,\n        \"Minishogi\": 1,\n        \"GomokuSwap2\": 3,\n        \"BlockGo\": 2,\n    }.get(game_name, 1)\n    #\n    game_params = params.GameParams(game_name=game_name)\n    case = random.randint(0, 2)\n    rollouts = (2, 40)\n    if (\n        not case\n    ):  # In case 0, 0 wins, else 1  (this makes sure results dependent on rollouts)\n        rollouts = tuple(reversed(rollouts))\n    eval_params = params.EvalParams(\n        num_game_eval=10,\n        device_eval=\"cpu\",\n        checkpoint_dir=Path(\"mock/path\"),  # this should not be *required* here! no network\n        num_rollouts_eval=rollouts[0],\n        num_rollouts_opponent=rollouts[1],\n    )  # device eval is actually not used\n\n    def seed_generator():\n        i = 0\n        while True:\n            yield i\n            i += 1\n\n    context, _, _, get_eval_reward = evaluation.create_evaluation_environment(\n        seed_generator=seed_generator(),\n        game_params=game_params,\n        eval_params=eval_params,\n        pure_mcts_eval=True,\n    )\n\n    context.start()\n    while not context.terminated():\n        time.sleep(0.01)\n    # check that the one with most rollouts wins!\n    score = sum(v > 0 for v in get_eval_reward())\n    expected = 0 if case else eval_params.num_game_eval\n    msg = f\"Wrong score for random case {case}, expected {expected} with tol {tolerance} but got {score}.\"\n    if is_one_player_game or game_name in too_bad:\n        raise SkipTest(f\"Skipping evaluation of {game_name} (not very good, or one player)\")\n    assert abs(score - expected) <= tolerance, msg\n"
  },
  {
    "path": "pypolygames/tests/test_params.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport pytest\nfrom pathlib import Path\nfrom dataclasses import asdict\nfrom .. import params\n\n\n@pytest.mark.parametrize(\n    \"cls\",\n    [\n        params.GameParams,\n        params.ModelParams,\n        params.OptimParams,\n        params.SimulationParams,\n        params.EvalParams,\n        params.ExecutionParams,\n    ],\n)\ndef test_dataclass(cls):\n    argfields = dict(cls.arg_fields())\n    for key, field in argfields.items():\n        if key != field.name.strip(\"-\"):\n            raise AssertionError(\n                f\"ArgField key ('{key}') and \"\n                f\"name ('{field.name.strip('-')}') must match!\"\n            )\n        if \"default\" in field.opts:\n            assert field.opts[\"default\"] == getattr(\n                cls, key\n            ), f\"Field default do not match class one for {key}\"\n    field_keys = set(argfields)\n    cls_attrs = set(\n        asdict(cls() if cls != params.EvalParams else cls(checkpoint_dir=Path(\"blublu\")))\n    )\n    additional = field_keys - cls_attrs\n    missing = cls_attrs - field_keys\n    errors = []\n    if additional:\n        errors.append(\n            f\"Found additional fields {additional} in \"\n            f\"{cls.__name__}.arg_fields() compared to its attributes.\"\n        )\n    if missing:\n        errors.append(\n            f\"Found missing fields {missing} in \"\n            f\"{cls.__name__}.arg_fields() compared to its attributes.\"\n        )\n    if errors:\n        raise AssertionError(\"\\n\".join(errors))\n"
  },
  {
    "path": "pypolygames/tests/test_zoo.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom unittest import SkipTest\nimport pytest\nimport torch\nfrom .. import model_zoo\nfrom ..model_zoo.utils import get_game_info\nfrom .. import params\nfrom .. import utils\n\n\n@pytest.mark.parametrize(\"model_name\", [n for n in model_zoo.MODELS])\ndef test_models(model_name) -> None:\n    if model_name in [\"Connect4BenchModel\", \"ResConvConvLogitPoolModelV2\"]:\n        raise SkipTest(f\"Skipping {model_name}\")\n    game_params = params.GameParams(\n        game_name=\"Tristannogo\"\n        if \"GameOfTheAmazons\" not in model_name\n        else \"GameOfTheAmazons\"\n    )\n    model_params = params.ModelParams(model_name=model_name)\n    info = get_game_info(game_params)\n    model = model_zoo.MODELS[model_name](game_params, model_params)\n    model.eval()  # necessary for batch norm as it expects more than 1 ex in training\n    feature_size = info[\"feature_size\"][:3]\n    action_size = info[\"action_size\"][:3]\n    input_data = torch.zeros([1] + feature_size, device=torch.device(\"cpu\"))\n    outputs = model.forward(input_data)\n    assert list(outputs[\"v\"].shape) == [1, 1]\n    assert list(outputs[\"pi\"].shape) == [1] + action_size\n    # loss\n    multi_counter = utils.MultiCounter(root=None)\n    pi_mask = torch.ones(outputs[\"pi\"].shape)\n    model.loss(\n        model, input_data, outputs[\"v\"], outputs[\"pi\"], pi_mask, multi_counter\n    )  # make sure it computes something\n"
  },
  {
    "path": "pypolygames/training.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\nimport time\nimport datetime\nfrom pathlib import Path\nfrom dataclasses import asdict\nfrom typing import Iterator, Tuple, Callable, List, Optional, Dict\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport torch\nfrom torch import nn\n\nimport tube\nimport polygames\n\nfrom .params import (\n    GameParams,\n    ModelParams,\n    OptimParams,\n    SimulationParams,\n    ExecutionParams,\n)\nfrom . import utils\nfrom .env_creation_helpers import create_model, create_game, create_player\n\nfrom .model_zoo import utils as zutils\n\nfrom .model_zoo import loss as model_loss\n\n#######################################################################################\n# OPTIMIZER CREATION\n#######################################################################################\n\n\ndef create_optimizer(\n    model: torch.jit.ScriptModule,\n    optim_params: OptimParams,\n    optim_state_dict: Optional[dict] = None,\n) -> torch.optim.Optimizer:\n    optim = torch.optim.Adam(\n        model.parameters(), lr=optim_params.lr, eps=optim_params.eps\n    )\n    if optim_state_dict is not None and not optim_params.reset_optimizer_state:\n        try:\n            optim.load_state_dict(optim_state_dict)\n        except ValueError:\n            print(\"Optimizer state not compatible... skipping.\")\n    return optim\n\n\n#######################################################################################\n# TRAINING ENVIRONMENT CREATION\n#######################################################################################\n\n\ndef create_training_environment(\n    seed_generator: Iterator[int],\n    model_path: Path,\n    device: str,\n    game_params: GameParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n    model\n) -> Tuple[tube.Context, polygames.ModelManager, Callable[[], List[int]], bool]:\n    games = []\n    context = tube.Context()\n    print(\"Game generation device: {}\".format(device))\n    listen_ep = execution_params.listen\n    connect_ep = execution_params.connect\n    opponent_model_path = execution_params.opponent_model_path\n    is_server = listen_ep != \"\"\n    is_client = connect_ep != \"\"\n    print(\"is_server is \", is_server)\n    print(\"is_client is \", is_client)\n    model_manager = polygames.ModelManager(\n        simulation_params.act_batchsize,\n        str(device),\n        simulation_params.replay_capacity,\n        next(seed_generator),\n        str(model_path),\n        simulation_params.train_channel_timeout_ms,\n        simulation_params.train_channel_num_slots,\n    )\n    model_manager.set_find_batch_size_max_bs(simulation_params.bsfinder_max_bs)\n    model_manager.set_find_batch_size_max_ms(simulation_params.bsfinder_max_ms)\n    if is_server:\n        model_manager.start_server(listen_ep)\n    if is_client:\n        model_manager.start_client(connect_ep)\n    if is_client and is_server:\n        raise RuntimeError(\"Client and server parameters have both been specified\")\n\n    rnn_state_shape = getattr(model, \"rnn_state_shape\", [])\n    logit_value = getattr(model, \"logit_value\", False)\n\n    print(\"rnn_state_shape is \", rnn_state_shape)\n\n    if simulation_params.num_threads != 0:\n        polygames.init_threads(simulation_params.num_threads)\n\n    opgame = None\n    op_rnn_state_shape = None\n    op_rnn_seqlen = None\n    op_logit_value = None\n    if not is_server:\n      if opponent_model_path:\n        print(\"loading opponent model\")\n        checkpoint = utils.load_checkpoint(checkpoint_path=opponent_model_path)\n        opmodel = create_model(\n            game_params=checkpoint[\"game_params\"],\n            model_params=checkpoint[\"model_params\"],\n            resume_training=True,\n            model_state_dict=checkpoint[\"model_state_dict\"],\n        )\n        opponent_model_path = execution_params.checkpoint_dir / \"model_opponent.pt\"\n        opmodel.save(str(opponent_model_path))\n        opgame = create_game(\n            checkpoint[\"game_params\"],\n            num_episode=-1,\n            seed=next(seed_generator),\n            eval_mode=False,\n        )\n        op_rnn_state_shape = getattr(opmodel, \"rnn_state_shape\", [])\n        op_rnn_seqlen = 0\n        if hasattr(checkpoint[\"execution_params\"], \"rnn_seqlen\"):\n          op_rnn_seqlen = checkpoint[\"execution_params\"].rnn_seqlen\n        op_logit_value = getattr(opmodel, \"logit_value\", False)\n      model_manager_opponent = polygames.ModelManager(\n          simulation_params.act_batchsize,\n          str(device),\n          simulation_params.replay_capacity,\n          next(seed_generator),\n          str(opponent_model_path) if opponent_model_path else str(model_path),\n          simulation_params.train_channel_timeout_ms,\n          simulation_params.train_channel_num_slots,\n      )\n      model_manager_opponent.set_find_batch_size_max_bs(simulation_params.bsfinder_max_bs)\n      model_manager_opponent.set_find_batch_size_max_ms(simulation_params.bsfinder_max_ms)\n      print(\"tournament_mode is \" + str(execution_params.tournament_mode))\n      if execution_params.tournament_mode:\n        model_manager_opponent.set_is_tournament_opponent(True)\n      if opponent_model_path:\n        model_manager_opponent.set_dont_request_model_updates(True)\n      if is_client:\n        model_manager_opponent.start_client(connect_ep)\n    if not is_server:\n      train_channel = model_manager.get_train_channel()\n      actor_channel = model_manager.get_act_channel()\n\n      op_actor_channel = actor_channel\n      if model_manager_opponent is not None:\n        op_actor_channel = model_manager_opponent.get_act_channel()\n\n      for i in range(simulation_params.num_game):\n          game = create_game(\n              game_params,\n              num_episode=-1,\n              seed=next(seed_generator),\n              eval_mode=False,\n              per_thread_batchsize=simulation_params.per_thread_batchsize,\n              rewind=simulation_params.rewind,\n              predict_end_state=game_params.predict_end_state,\n              predict_n_states=game_params.predict_n_states,\n          )\n          player_1 = create_player(\n              seed_generator=seed_generator,\n              game=game,\n              player=game_params.player,\n              num_actor=simulation_params.num_actor,\n              num_rollouts=simulation_params.num_rollouts,\n              pure_mcts=False,\n              actor_channel=actor_channel,\n              model_manager=model_manager,\n              human_mode=False,\n              sample_before_step_idx=simulation_params.sample_before_step_idx,\n              randomized_rollouts=simulation_params.randomized_rollouts,\n              sampling_mcts=simulation_params.sampling_mcts,\n              rnn_state_shape=rnn_state_shape,\n              rnn_seqlen=execution_params.rnn_seqlen,\n              logit_value=logit_value\n          )\n          player_1.set_name(\"dev\")\n          if game.is_one_player_game():\n            game.add_player(player_1, train_channel)\n          else:\n            player_2 = create_player(\n                seed_generator=seed_generator,\n                game=opgame if opgame is not None else game,\n                player=game_params.player,\n                num_actor=simulation_params.num_actor,\n                num_rollouts=simulation_params.num_rollouts,\n                pure_mcts=False,\n                actor_channel=op_actor_channel,\n                model_manager=model_manager_opponent,\n                human_mode=False,\n                sample_before_step_idx=simulation_params.sample_before_step_idx,\n                randomized_rollouts=simulation_params.randomized_rollouts,\n                sampling_mcts=simulation_params.sampling_mcts,\n                rnn_state_shape=op_rnn_state_shape if op_rnn_state_shape is not None else rnn_state_shape,\n                rnn_seqlen=op_rnn_seqlen if op_rnn_seqlen is not None else execution_params.rnn_seqlen,\n                logit_value=op_logit_value if op_logit_value is not None else logit_value\n            )\n            player_2.set_name(\"opponent\")\n            if next(seed_generator) % 2 == 0:\n              game.add_player(player_1, train_channel, game, player_1)\n              game.add_player(player_2, train_channel, opgame if opgame is not None else game, player_1)\n            else:\n              game.add_player(player_2, train_channel, opgame if opgame is not None else game, player_1)\n              game.add_player(player_1, train_channel, game, player_1)\n\n          context.push_env_thread(game)\n          games.append(game)\n\n    def get_train_reward() -> Callable[[], List[int]]:\n        nonlocal games\n        nonlocal opgame\n        reward = []\n        for game in games:\n            reward.append(game.get_result()[0])\n        if opgame is not None:\n          reward.append(opgame.get_result()[0])\n\n        return reward\n\n    return context, model_manager, get_train_reward, is_client\n\n\n#######################################################################################\n# REPLAY BUFFER WARMING-UP\n#######################################################################################\n\n\ndef warm_up_replay_buffer(\n    model_manager: polygames.ModelManager, replay_warmup: int\n) -> None:\n    model_manager.start()\n    prev_buffer_size = -1\n    t = t_init = time.time()\n    t0 = -1\n    size0 = 0\n    while model_manager.buffer_size() < replay_warmup:\n        buffer_size = model_manager.buffer_size()\n        if buffer_size != prev_buffer_size:  # avoid flooding stdout\n            if buffer_size > 10000 and t0 == -1:\n                size0 = buffer_size\n                t0 = time.time()\n            prev_buffer_size = max(prev_buffer_size, 0)\n            frame_rate = (buffer_size - prev_buffer_size) / (time.time() - t)\n            frame_rate = int(frame_rate)\n            prev_buffer_size = buffer_size\n            t = time.time()\n            duration = t - t_init\n            print(\n                f\"warming-up replay buffer: {(buffer_size * 100) // replay_warmup}% \"\n                f\"({buffer_size}/{replay_warmup}) in {duration:.2f}s \"\n                f\"- speed: {frame_rate} frames/s\",\n            )\n        time.sleep(2)\n    print(\n        f\"replay buffer warmed up: 100% \"\n        f\"({model_manager.buffer_size()}/{replay_warmup})\"\n        \"                                                                          \"\n    )\n    print(\n        \"avg speed: %.2f frames/s\"\n        % ((model_manager.buffer_size() - size0) / (time.time() - t0))\n    )\n\n\n#######################################################################################\n# TRAINING\n#######################################################################################\n\nclass ModelWrapperForDDP(nn.Module):\n    def __init__(self, module):\n        super().__init__()\n        self.module = module\n    def forward(self, x: torch.Tensor, rnn_state: torch.Tensor=None, rnn_state_mask: torch.Tensor=None):\n        if rnn_state is None:\n          return self.module._forward(x, return_logit=True)\n        else:\n          return self.module._forward(x, rnn_state, rnn_state_mask, return_logit=True)\n\nclass DDPWrapperForModel(nn.Module):\n    def __init__(self, module):\n        super().__init__()\n        self.module = module\n    def _forward(self, x: torch.Tensor, rnn_state: torch.Tensor=None, rnn_state_mask: torch.Tensor=None, return_logit: bool=False):\n        if not return_logit:\n            raise RuntimeError(\"DDPWrapperForModel: return_logit is false\")\n        if rnn_state is None:\n          return self.module.forward(x)\n        else:\n          return self.module.forward(x, rnn_state, rnn_state_mask)\n\n_pre_num_add = None\n_pre_num_sample = None\n_running_add_rate = 0\n_running_sample_rate = 0\n_last_train_time = 0\ndef _train_epoch(\n    model: torch.jit.ScriptModule,\n    device: torch.device,\n    ddpmodel: ModelWrapperForDDP,\n    batchsizes,\n    optim: torch.optim.Optimizer,\n    model_manager: polygames.ModelManager,\n    stat: utils.MultiCounter,\n    epoch: int,\n    optim_params: OptimParams,\n    sync_period: int,\n) -> None:\n    global _pre_num_add\n    global _pre_num_sample\n    global _running_add_rate\n    global _running_sample_rate\n    global _last_train_time\n    global _remote_replay_buffer_inited\n    if _pre_num_add is None:\n        pre_num_add = model_manager.buffer_num_add()\n        pre_num_sample = model_manager.buffer_num_sample()\n    else:\n        pre_num_add = _pre_num_add\n        pre_num_sample = _pre_num_sample\n    sync_s = 0.\n    num_sync = 0\n\n    train_start_time = time.time()\n\n    if pre_num_sample > 0:\n        print(\"sample/add ratio \", float(pre_num_sample) / pre_num_add)\n\n    if _last_train_time == 0:\n      _last_train_time = time.time();\n\n    batchsize = optim_params.batchsize\n\n    lossmodel = DDPWrapperForModel(ddpmodel) if ddpmodel is not None else model\n\n    lossmodel.train()\n\n    world_size = 0\n    rank = 0\n    if ddpmodel is not None:\n      print(\"DDP is active\")\n      world_size = torch.distributed.get_world_size()\n      rank = torch.distributed.get_rank()\n\n      print(\"World size %d, rank %d. Waiting for all processes\" % (world_size, rank))\n      torch.distributed.barrier()\n      print(\"Synchronizing model\")\n      for p in ddpmodel.parameters():\n        torch.distributed.broadcast(p.data, 0)\n      for p in ddpmodel.buffers():\n        torch.distributed.broadcast(p.data, 0)\n      print(\"Synchronized, start training\")\n\n    has_predict = False\n    cpubatch = {}\n    for k, v in batchsizes.items():\n      sizes = v.copy()\n      sizes.insert(0, batchsize)\n      cpubatch[k] = torch.empty(sizes)\n      if k == \"predict_pi\":\n        has_predict = True\n\n    for eid in range(optim_params.epoch_len):\n        while _running_add_rate * 1.25 < _running_sample_rate:\n          print(\"add rate insufficient, waiting\")\n          time.sleep(5)\n          t = time.time()\n          time_elapsed = t - _last_train_time\n          _last_train_time = t\n          alpha = pow(0.99, time_elapsed)\n          post_num_add = model_manager.buffer_num_add()\n          post_num_sample = model_manager.buffer_num_sample()\n          delta_add = post_num_add - pre_num_add\n          delta_sample = post_num_sample - pre_num_sample\n          _running_add_rate = _running_add_rate * alpha + (delta_add / time_elapsed) * (1 - alpha)\n          _running_sample_rate = _running_sample_rate * alpha + (delta_sample / time_elapsed) * (1 - alpha)\n          pre_num_add = post_num_add\n          pre_num_sample = post_num_sample\n          print(\"running add rate: %.2f / s\" % (_running_add_rate))\n          print(\"running sample rate: %.2f / s\" % (_running_sample_rate))\n          print(\"current add rate: %.2f / s\" % (delta_add / time_elapsed))\n          print(\"current sample rate: %.2f / s\" % (delta_sample / time_elapsed))\n\n        if world_size > 0:\n          batchlist = None\n          if rank == 0:\n            batchlist = {}\n            for k in cpubatch.keys():\n              batchlist[k] = []\n            for i in range(world_size):\n              for k,v in model_manager.sample(batchsize).items():\n                batchlist[k].append(v)\n          for k, v in cpubatch.items():\n            torch.distributed.scatter(v, batchlist[k] if rank == 0 else None)\n          batch = utils.to_device(cpubatch, device)\n        else:\n          batch = model_manager.sample(batchsize)\n          batch = utils.to_device(batch, device)\n        for k, v in batch.items():\n          batch[k] = v.detach()\n        loss, v_err, pi_err, predict_err = model_loss.mcts_loss(model, lossmodel, batch)\n        loss.backward()\n\n        grad_norm = nn.utils.clip_grad_norm_(lossmodel.parameters(), optim_params.grad_clip)\n        optim.step()\n        optim.zero_grad()\n\n        stat[\"v_err\"].feed(v_err.item())\n        stat[\"pi_err\"].feed(pi_err.item())\n        if has_predict:\n          stat[\"predict_err\"].feed(predict_err.item())\n        stat[\"loss\"].feed(loss.item())\n        stat[\"grad_norm\"].feed(grad_norm)\n\n        if (epoch * optim_params.epoch_len + eid + 1) % sync_period == 0:\n            sync_t0 = time.time()\n            model_manager.update_model(model.state_dict())\n            sync_s += time.time() - sync_t0\n            num_sync += 1\n\n        t = time.time()\n        time_elapsed = t - _last_train_time\n        _last_train_time = t\n        alpha = pow(0.99, time_elapsed)\n        post_num_add = model_manager.buffer_num_add()\n        post_num_sample = model_manager.buffer_num_sample()\n        delta_add = post_num_add - pre_num_add\n        delta_sample = post_num_sample - pre_num_sample\n        _running_add_rate = _running_add_rate * alpha + (delta_add / time_elapsed) * (1 - alpha)\n        _running_sample_rate = _running_sample_rate * alpha + (delta_sample / time_elapsed) * (1 - alpha)\n        pre_num_add = post_num_add\n        pre_num_sample = post_num_sample\n\n    total_time_elapsed = time.time() - train_start_time\n\n    print(\"running add rate: %.2f / s\" % (_running_add_rate))\n    print(\"running sample rate: %.2f / s\" % (_running_sample_rate))\n    print(\"current add rate: %.2f / s\" % (delta_add / time_elapsed))\n    print(\"current sample rate: %.2f / s\" % (delta_sample / time_elapsed))\n    print(f\"syncing duration: {sync_s:2f}s for {num_sync} syncs ({int(100 * sync_s / total_time_elapsed)}% of train time)\")\n\n    _pre_num_add = pre_num_add\n    _pre_num_sample = pre_num_sample\n\n    stat.summary(epoch)\n    stat.reset()\n\ndef train_model(\n    command_history: utils.CommandHistory,\n    start_time: float,\n    model: torch.jit.ScriptModule,\n    device: torch.device,\n    ddpmodel,\n    optim: torch.optim.Optimizer,\n    context: tube.Context,\n    model_manager: polygames.ModelManager,\n    get_train_reward: Callable[[], List[int]],\n    game_params: GameParams,\n    model_params: ModelParams,\n    optim_params: OptimParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n    epoch: int = 0,\n) -> None:\n\n    info = zutils.get_game_info(game_params)\n    c, h, w = info[\"feature_size\"][:3]\n    rc, rh, rw = info[\"raw_feature_size\"][:3]\n    c_prime, h_prime, w_prime = info[\n        \"action_size\"\n    ][:3]\n\n    predicts = (2 if game_params.predict_end_state else 0) + game_params.predict_n_states\n\n    batchsizes = {\n      \"s\":  [c, h, w],\n      \"v\": [3 if getattr(model, \"logit_value\", False) else 1],\n      \"pred_v\": [1],\n      \"pi\": [c_prime, h_prime, w_prime],\n      \"pi_mask\": [c_prime, h_prime, w_prime]\n    }\n\n    if game_params.player == \"forward\":\n      batchsizes[\"action_pi\"] = [c_prime, h_prime, w_prime]\n\n    if predicts > 0:\n      batchsizes[\"predict_pi\"] = [rc * predicts, rh, rw]\n      batchsizes[\"predict_pi_mask\"] = [rc * predicts, rh, rw]\n\n    if getattr(model, \"rnn_state_shape\", None) is not None:\n      batchsizes[\"rnn_state_mask\"] = [1]\n\n    if execution_params.rnn_seqlen > 0:\n      for k, v in batchsizes.items():\n        batchsizes[k] = [execution_params.rnn_seqlen, *v]\n\n    if getattr(model, \"rnn_state_shape\", None) is not None:\n      batchsizes[\"rnn_initial_state\"] = model.rnn_state_shape\n\n    rank = 0\n    if ddpmodel:\n        rank = torch.distributed.get_rank()\n\n    executor = ThreadPoolExecutor(max_workers=1)\n    savefuture = None\n\n    stat = utils.MultiCounter(execution_params.checkpoint_dir)\n    max_time = execution_params.max_time\n    init_epoch = epoch\n    while max_time is None or time.time() < start_time + max_time:\n        if epoch - init_epoch >= optim_params.num_epoch:\n            break\n        epoch += 1\n        if rank == 0 and epoch % execution_params.saving_period == 0:\n            model_manager.add_tournament_model(\"e%d\" % (epoch), model.state_dict())\n            savestart = time.time()\n            if savefuture is not None:\n               savefuture.result()\n            savefuture = utils.save_checkpoint(\n                command_history=command_history,\n                epoch=epoch,\n                model=model,\n                optim=optim,\n                game_params=game_params,\n                model_params=model_params,\n                optim_params=optim_params,\n                simulation_params=simulation_params,\n                execution_params=execution_params,\n                executor=executor\n            )\n            print(\"checkpoint saved in %gs\" % (time.time() - savestart))\n        _train_epoch(\n            model=model,\n            device=device,\n            ddpmodel=ddpmodel,\n            batchsizes=batchsizes,\n            optim=optim,\n            model_manager=model_manager,\n            stat=stat,\n            epoch=epoch,\n            optim_params=optim_params,\n            sync_period=simulation_params.sync_period,\n        )\n        # resource usage stats\n        print(\"Resource usage:\")\n        print(utils.get_res_usage_str())\n        print(\"Context stats:\")\n        print(context.get_stats_str())\n        # train result\n        print(\n            \">>>train: epoch: %d, %s\" % (epoch, utils.Result(get_train_reward()).log()),\n            flush=True,\n        )\n    if savefuture is not None:\n        savefuture.result()\n    # checkpoint last state\n    utils.save_checkpoint(\n        command_history=command_history,\n        epoch=epoch,\n        model=model,\n        optim=optim,\n        game_params=game_params,\n        model_params=model_params,\n        optim_params=optim_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n    )\n\ndef client_loop(\n    model_manager: polygames.ModelManager,\n    start_time: float,\n    context: tube.Context,\n    execution_params: ExecutionParams,\n) -> None:\n    model_manager.start()\n    max_time = execution_params.max_time\n    while max_time is None or time.time() < start_time + max_time:\n        time.sleep(60)\n        print(\"Resource usage:\")\n        print(utils.get_res_usage_str())\n        print(\"Context stats:\")\n        print(context.get_stats_str())\n\n#######################################################################################\n# OVERALL TRAINING WORKFLOW\n#######################################################################################\n\n\ndef run_training(\n    command_history: utils.CommandHistory,\n    game_params: GameParams,\n    model_params: ModelParams,\n    optim_params: OptimParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n) -> None:\n    start_time = time.time()\n    logger_path = os.path.join(execution_params.checkpoint_dir, \"train.log\")\n    sys.stdout = utils.Logger(logger_path)\n\n    print(\"#\" * 70)\n    print(\"#\" + \"TRAINING\".center(68) + \"#\")\n    print(\"#\" * 70)\n\n    if execution_params.rnn_seqlen > 0:\n      optim_params.batchsize //= execution_params.rnn_seqlen\n      simulation_params.replay_capacity //= execution_params.rnn_seqlen\n      simulation_params.replay_warmup //= execution_params.rnn_seqlen\n      simulation_params.train_channel_num_slots //= execution_params.rnn_seqlen\n\n\n    print(\"setting-up pseudo-random generator...\")\n    seed_generator = utils.generate_random_seeds(seed=execution_params.seed)\n\n    # checkpoint, resume from where it stops\n    epoch = 0\n    ckpts = list(utils.gen_checkpoints(checkpoint_dir=execution_params.checkpoint_dir, only_last=True, real_time=False))\n    checkpoint = {}\n    if ckpts:\n        checkpoint = ckpts[0]\n        former_command_history = checkpoint[\"command_history\"]\n        command_history.build_history(former_command_history)\n        optim_params = command_history.update_params_from_checkpoint(\n            checkpoint_params=checkpoint[\"optim_params\"], resume_params=optim_params\n        )\n        simulation_params = command_history.update_params_from_checkpoint(\n            checkpoint_params=checkpoint[\"simulation_params\"],\n            resume_params=simulation_params,\n        )\n        execution_params = command_history.update_params_from_checkpoint(\n            checkpoint_params=checkpoint[\"execution_params\"],\n            resume_params=execution_params,\n        )\n    if command_history.last_command_contains(\"init_checkpoint\"):\n        if ckpts:\n            raise RuntimeError(\"Cannot restart from init_checkpoint, already restarting from non-empty checkpoint_dir\")\n        # pretrained model, consider new training from epoch zero\n        print(\"loading pretrained model from checkpoint...\")\n        checkpoint = utils.load_checkpoint(checkpoint_path=model_params.init_checkpoint)\n    if checkpoint:\n        # game_params and model_params cannot change on a checkpoint\n        # either write the same, or don't specify them\n        ignored = {\"init_checkpoint\", \"game_name\"}  # this one can change\n        current_params = dict(game_params=game_params, model_params=model_params)\n#        for params_name, params in current_params.items():\n#            for attr, val in asdict(params).items():\n#                if command_history.last_command_contains(attr) and attr not in ignored:\n#                    ckpt_val = getattr(checkpoint[params_name], attr)\n#                    assert val == ckpt_val, f\"When resuming, got '{val}' for {attr} but cannot override from past run with '{ckpt_val}'.\"\n        specified_game_name = game_params.game_name\n        game_params = checkpoint[\"game_params\"]\n        if specified_game_name is not None:\n          game_params.game_name = specified_game_name\n        model_params = checkpoint[\"model_params\"]\n        for params_name, params in current_params.items():\n            for attr, val in asdict(params).items():\n                if command_history.last_command_contains(attr) and attr not in ignored:\n                    ckpt_val = getattr(checkpoint[params_name], attr)\n                    if val != ckpt_val:\n                      print(f\"Note: overrriding {attr} from {ckpt_val} to {val}\")\n                      setattr(checkpoint[params_name], attr, val)\n        epoch = checkpoint[\"epoch\"]\n        print(\"reconstructing the model...\")\n    else:\n        print(\"creating and saving the model...\")\n    if len(execution_params.devices) != 1:\n        raise RuntimeError(\"Only one device is supported for training\")\n    device = execution_params.devices[0]\n\n    model = create_model(\n          game_params=game_params,\n          model_params=model_params,\n          resume_training=bool(checkpoint),\n          model_state_dict=checkpoint[\"model_state_dict\"] if checkpoint else None,\n      ).to(device)\n\n    model_path = execution_params.checkpoint_dir / \"model.pt\"\n    model.save(str(model_path))\n\n    ddpmodel = None\n    if os.environ.get(\"RANK\") is not None:\n        torch.distributed.init_process_group(backend=\"gloo\", timeout=datetime.timedelta(0, 864000))\n        ddpmodel = nn.parallel.DistributedDataParallel(ModelWrapperForDDP(model), broadcast_buffers=False, find_unused_parameters=False)\n\n    print(\"creating optimizer...\")\n    optim = create_optimizer(\n        model=ddpmodel if ddpmodel is not None else model,\n        optim_params=optim_params,\n        optim_state_dict=checkpoint.get(\"optim_state_dict\", None),\n    )\n\n    print(\"creating training environment...\")\n    context, model_manager, get_train_reward, is_client = create_training_environment(\n        seed_generator=seed_generator,\n        model_path=model_path,\n        device=device,\n        game_params=game_params,\n        simulation_params=simulation_params,\n        execution_params=execution_params,\n        model=model\n    )\n    if not is_client:\n        model_manager.update_model(model.state_dict())\n    model_manager.add_tournament_model(\"init\", model.state_dict())\n    context.start()\n\n    if is_client:\n      client_loop(\n          model_manager=model_manager,\n          start_time=start_time,\n          context=context,\n          execution_params=execution_params\n      )\n    else:\n      if ddpmodel is None or torch.distributed.get_rank() == 0:\n        print(\"warming-up replay buffer...\")\n        warm_up_replay_buffer(\n            model_manager=model_manager,\n            replay_warmup=simulation_params.replay_warmup\n        )\n\n      print(\"training model...\")\n      train_model(\n          command_history=command_history,\n          start_time=start_time,\n          model=model,\n          device=device,\n          ddpmodel=ddpmodel,\n          optim=optim,\n          context=context,\n          model_manager=model_manager,\n          get_train_reward=get_train_reward,\n          game_params=game_params,\n          model_params=model_params,\n          optim_params=optim_params,\n          simulation_params=simulation_params,\n          execution_params=execution_params,\n          epoch=epoch\n      )\n\n    elapsed_time = time.time() - start_time\n    print(f\"total time: {elapsed_time} s\")\n"
  },
  {
    "path": "pypolygames/utils/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .checkpoint import Checkpoint, save_checkpoint, load_checkpoint, gen_checkpoints\nfrom .command_history import CommandHistory\nfrom .logger import Logger\nfrom .plotter import Plotter\nfrom .multi_counter import MultiCounter\nfrom .helpers import *\nfrom .assert_utils import assert_eq\nfrom .result import Result\nfrom .restrack import get_res_usage_str\nfrom . import listings\n"
  },
  {
    "path": "pypolygames/utils/assert_utils.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"Utils for assertions\"\"\"\n\n\ndef assert_eq(real, expected):\n    assert real == expected, '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef assert_neq(real, expected):\n    assert real != expected, '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef assert_lt(real, expected):\n    assert real < expected, '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef assert_lteq(real, expected):\n    assert real <= expected, '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef assert_tensor_eq(t1, t2, eps=1e-6):\n    if t1.size() != t2.size():\n        print('Warning: size mismatch', t1.size(), 'vs', t2.size())\n        return False\n\n    t1 = t1.cpu().numpy()\n    t2 = t2.cpu().numpy()\n    diff = abs(t1 - t2)\n    eq = (diff < eps).all()\n    if not eq:\n        import pdb\n        pdb.set_trace()\n    assert(eq)\n\n\ndef assert_zero_grads(params):\n    for p in params:\n        if p.grad is not None:\n            assert(p.grad.sum().item() == 0)\n"
  },
  {
    "path": "pypolygames/utils/checkpoint.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport glob\nimport time\nimport gzip\nimport zipfile\nimport re\nimport copy\nfrom pathlib import Path\nfrom typing import Iterator, Dict, Union, Any\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport torch\n\nfrom .command_history import CommandHistory\nfrom ..params import (\n    GameParams,\n    ModelParams,\n    OptimParams,\n    SimulationParams,\n    ExecutionParams,\n)\n\nCheckpoint = Dict[\n    str,\n    Union[\n        int,\n        bytes,\n        Dict[str, Any],\n        GameParams,\n        ModelParams,\n        OptimParams,\n        SimulationParams,\n        ExecutionParams,\n    ],\n]\n\n\ndef save_checkpoint(\n    command_history: CommandHistory,\n    epoch: int,\n    model: torch.jit.ScriptModule,\n    optim: torch.optim.Optimizer,\n    game_params: GameParams,\n    model_params: ModelParams,\n    optim_params: OptimParams,\n    simulation_params: SimulationParams,\n    execution_params: ExecutionParams,\n    executor: ThreadPoolExecutor = None,\n) -> None:\n    checkpoint_dir = execution_params.checkpoint_dir\n    save_uncompressed = execution_params.save_uncompressed\n    checkpoint_name = f\"checkpoint_{epoch}\"\n    checkpoint = {\n        \"command_history\": command_history,\n        \"epoch\": epoch,\n        \"model_state_dict\": {k : v.cpu().clone() if isinstance(v, torch.Tensor) else copy.deepcopy(v) for k, v in model.state_dict().items()},\n        \"optim_state_dict\": {k : v.cpu().clone() if isinstance(v, torch.Tensor) else copy.deepcopy(v) for k, v in optim.state_dict().items()},\n        \"game_params\": game_params,\n        \"model_params\": model_params,\n        \"optim_params\": optim_params,\n        \"simulation_params\": simulation_params,\n        \"execution_params\": execution_params,\n    }\n\n    def saveit():\n        nonlocal save_uncompressed\n        nonlocal checkpoint\n        nonlocal checkpoint_dir\n        if save_uncompressed:\n            torch.save(checkpoint, checkpoint_dir / f\"{checkpoint_name}.pt\")\n        else:\n            # with zipfile.ZipFile(Path(checkpoint_dir) / f\"{checkpoint_name}.zip\", \"w\", allowZip64=True) as z:\n            #    with z.open(f\"{checkpoint_name}.pt\", \"w\", force_zip64=True) as f:\n            #        torch.save(checkpoint, f)\n            with gzip.open(checkpoint_dir / f\"{checkpoint_name}.pt.gz\", \"wb\") as f:\n                torch.save(checkpoint, f)\n    if executor is not None:\n        return executor.submit(saveit)\n    else:\n        saveit()\n\n\ndef load_checkpoint(checkpoint_path: Path) -> Checkpoint:\n    ext = checkpoint_path.suffix\n    if ext == \".pt\":\n        checkpoint = torch.load(str(checkpoint_path), map_location=torch.device('cpu'))\n    elif ext == \".gz\":\n        with gzip.open(checkpoint_path, \"rb\") as f:\n            checkpoint = torch.load(f, map_location=torch.device('cpu'))\n    elif ext == \".zip\":\n        with zipfile.ZipFile(checkpoint_path, \"r\", allowZip64=True) as z:\n            checkpoint_unzipped_name = z.namelist()[0]\n            with z.open(checkpoint_unzipped_name, \"r\") as f:\n                checkpoint = torch.load(f)\n    else:\n        raise ValueError(\n            \"The checkpoint file extension must be either \"\n            \"'.pt', '.gz', '.pt.gz' or '.zip'\"\n        )\n    return checkpoint\n\n\nEXT_PATTERN = re.compile(r\"(\\.pt|\\.gz|\\.pt\\.gz|\\.zip)$\")\n\n\ndef gen_checkpoints(\n    checkpoint_dir: Path, real_time: bool, only_last: bool = False\n) -> Iterator[Checkpoint]:\n    checkpoint_basepath = str(checkpoint_dir / \"checkpoint_\")\n    epoch_list = set()\n    checkpoint_ext_detected = False\n    first_time = True\n    # infinite loop, could be made elegant with inotify\n    while first_time or real_time:\n        if not first_time:\n            time.sleep(2)\n        first_time = False\n        checkpoint_path_list_no_ext = [\n            re.sub(EXT_PATTERN, \"\", checkpoint_path)\n            for checkpoint_path in glob.glob(f\"{checkpoint_basepath}*\")\n        ]\n        new_epoch_list = {\n            int(checkpoint_path_no_ext[len(checkpoint_basepath):])\n            for checkpoint_path_no_ext in checkpoint_path_list_no_ext\n        }\n        if not checkpoint_ext_detected and new_epoch_list:\n            checkpoint_ext = re.search(\n                EXT_PATTERN, next(iter(glob.glob(f\"{checkpoint_basepath}*\")))\n            ).group(0)\n            checkpoint_ext_detected = True\n\n        added_epoch_list = sorted(new_epoch_list - epoch_list)\n        # if the evaluation runs in real time, only consider the latest checkpoint\n        if real_time or only_last:\n            added_epoch_list = added_epoch_list[-1:]\n        epoch_list = new_epoch_list\n        for epoch in added_epoch_list:\n            print(f\"loading checkpoint #{epoch}...\")\n            checkpoint_path = f\"{checkpoint_basepath}{epoch}{checkpoint_ext}\"\n            yield load_checkpoint(checkpoint_path=Path(checkpoint_path))\n"
  },
  {
    "path": "pypolygames/utils/command_history.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport itertools\nfrom dataclasses import fields\nfrom typing import Optional, List\n\nfrom ..params import GenericParams\n\n\nclass CommandHistory:\n    def __init__(self):\n        # remove '='\n        command = [\n            x\n            for x in itertools.chain.from_iterable(\n                map(lambda x: x.split(\"=\"), sys.argv)\n            )\n        ]\n        self._commands = [command]\n\n    def build_history(self, former_command_history: \"CommandHistory\"):\n        self._commands = former_command_history._commands + self._commands\n\n    def former_commands_contain(self, option: str) -> bool:\n        if option[:2] != \"--\":\n            option = f\"--{option}\"\n        for command in self._commands[:-1]:\n            if option in command:\n                return True\n        return False\n\n    def last_command_contains(self, option: str) -> bool:\n        if option[:2] != \"--\":\n            option = f\"--{option}\"\n        if self._commands:\n            if option in self._commands[-1]:\n                return True\n        return False\n\n    def last_command_contains_params(\n        self, DataclassParams: GenericParams, exclude: Optional[List[str]] = None\n    ) -> bool:\n        if exclude is None:\n            exclude = []\n        exclude = [\n            f\"--{option}\" if option[:2] != \"--\" else option for option in exclude\n        ]\n        if self._commands:\n            for _, arg_field in DataclassParams.arg_fields():\n                if arg_field.name not in exclude and self.last_command_contains(\n                    arg_field.name\n                ):\n                    return True\n        return False\n\n    def update_params_from_checkpoint(\n        self, checkpoint_params: GenericParams, resume_params: GenericParams\n    ) -> GenericParams:\n        Dataclass = type(checkpoint_params)\n        params = {}\n        for field in fields(Dataclass):\n            formerly_set = self.former_commands_contain(field.name)\n            newly_set = self.last_command_contains(field.name)\n            if not formerly_set:\n                if not newly_set:\n                    params.update({field.name: getattr(resume_params, field.name)})\n                else:\n                    params.update({field.name: getattr(resume_params, field.name)})\n            else:\n                if not newly_set:\n                    params.update({field.name: getattr(checkpoint_params, field.name)})\n                else:\n                    params.update({field.name: getattr(resume_params, field.name)})\n        return Dataclass(**params)\n"
  },
  {
    "path": "pypolygames/utils/helpers.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Iterator\n\nimport torch\nimport random\nimport numpy as np\n\n\ndef generate_random_seeds(seed: int) -> Iterator[int]:\n    # set-up all seeds\n    random.seed(seed)\n    np.random.seed(seed + 1)\n    torch.manual_seed(seed + 2)\n    torch.cuda.manual_seed(seed + 3)\n    # generate random seeds\n    generator = random.Random(seed)\n    while True:\n        yield generator.randint(0, 2 ** 31 - 1)\n\n\ndef to_device(batch, device):\n    if isinstance(batch, torch.Tensor):\n        return batch.to(device).detach()\n    elif isinstance(batch, dict):\n        return {key: to_device(batch[key], device) for key in batch}\n    else:\n        assert False, \"unsupported type: %s\" % type(batch)\n"
  },
  {
    "path": "pypolygames/utils/listings.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport re\nimport typing as tp\nfrom pathlib import Path\n\n\ndef games(olympiads: bool = False) -> tp.List[str]:\n    \"\"\"List games using pattern in core/game.h\n\n    Parameters\n    ----------\n    olympiads: bool\n        only list olympiad games\n    \"\"\"\n    if olympiads:\n        pies = {\"Hex11pie\", \"Hex13pie\", \"Hex19pie\", \"Havannah5pie\", \"Havannah8pie\"} & set(\n            games()\n        )  # to ready yet\n        return [\n            \"BlockGo\",\n            \"Einstein\",\n            \"Othello8\",\n            \"Othello10\",\n            \"Othello16\",\n            \"Minishogi\",\n            \"DiceShogi\",\n            \"Surakarta\",\n            \"Breakthrough\",\n            \"Tristannogo\",\n            \"GameOfTheAmazons\",\n        ] + list(pies)\n    filepath = Path(__file__).parents[2] / \"core\" / \"game.h\"\n    assert filepath.exists()\n    pattern = r\".*if\\s*?\\(\\s*?isGameNameMatched\\s*?\\(\\s*?\\{\\s*?\\\"(?P<name>\\w+)\\\"[^}]*\\}\\s*?\\)\\s*?\\)\\s*?\\{.*\"\n    iterator = re.finditer(pattern, filepath.read_text())\n    return list(\n        x.group(\"name\") for x in iterator if not x.group().strip().startswith(\"//\")\n    )\n"
  },
  {
    "path": "pypolygames/utils/logger.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\n\n\nclass Logger:\n    def __init__(self, path, mode='w'):\n        assert mode in {'w', 'a'}, 'unknown mode for logger %s' % mode\n        self.terminal = sys.stdout\n        if not os.path.exists(os.path.dirname(path)):\n            os.makedirs(os.path.dirname(path))\n        if mode == 'w' or not os.path.exists(path):\n            self.log = open(path, \"w\")\n        else:\n            self.log = open(path, \"a\")\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n        self.log.flush()\n\n    def flush(self):\n        # for python 3 compatibility.\n        pass\n"
  },
  {
    "path": "pypolygames/utils/multi_counter.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom pathlib import Path\nfrom collections import defaultdict, Counter\nfrom datetime import datetime\nfrom tensorboardX import SummaryWriter\n\n\nclass ValueStats:\n    def __init__(self, name=None):\n        self.name = name\n        self.reset()\n\n    def feed(self, v):\n        self.summation += v\n        if v > self.max_value:\n            self.max_value = v\n            self.max_idx = self.counter\n        if v < self.min_value:\n            self.min_value = v\n            self.min_idx = self.counter\n\n        self.counter += 1\n\n    def mean(self):\n        return self.summation / self.counter\n\n    def summary(self, info=None):\n        info = \"\" if info is None else info\n        name = \"\" if self.name is None else self.name\n        if self.counter > 0:\n            # try:\n            return \"%s%s[%4d]: avg: %8.4f, min: %8.4f[%4d], max: %8.4f[%4d]\" % (\n                info,\n                name,\n                self.counter,\n                self.summation / self.counter,\n                self.min_value,\n                self.min_idx,\n                self.max_value,\n                self.max_idx,\n            )\n            # except BaseException:\n            #     return \"%s%s[Err]:\" % (info, name)\n        else:\n            return \"%s%s[0]\" % (info, name)\n\n    def reset(self):\n        self.counter = 0\n        self.summation = 0.0\n        self.max_value = -1e38\n        self.min_value = 1e38\n        self.max_idx = None\n        self.min_idx = None\n\n\nclass MultiCounter:\n    def __init__(self, root: Path, verbose=False):\n        # TODO: rethink counters\n        self.last_time = None\n        self.verbose = verbose\n        self.counts = Counter()\n        self.stats = defaultdict(lambda: ValueStats())\n        self.total_count = 0\n        self.max_key_len = 0\n        if root is not None:\n            self.tb_writer = SummaryWriter(str(root / \"stat.tb\"))\n        else:\n            self.tb_writer = None\n\n    def __getitem__(self, key):\n        if len(key) > self.max_key_len:\n            self.max_key_len = len(key)\n\n        if self.last_time is None:\n            self.last_time = datetime.now()\n\n        return self.stats[key]\n\n    def start_timer(self):\n        self.last_time = datetime.now()\n\n    def inc(self, key):\n        if self.verbose:\n            print(\"[MultiCounter]: %s\" % key)\n        self.counts[key] += 1\n        self.total_count += 1\n        if self.last_time is None:\n            self.last_time = datetime.now()\n\n    def reset(self):\n        for k in self.stats.keys():\n            self.stats[k].reset()\n\n        self.counts = Counter()\n        self.total_count = 0\n        self.last_time = datetime.now()\n\n    def summary(self, global_counter):\n        assert self.last_time is not None\n        time_elapsed = (datetime.now() - self.last_time).total_seconds()\n        print(\"[%d] Time spent = %.2f s\" % (global_counter, time_elapsed))\n\n        for key, count in self.counts.items():\n            print(\"%s: %d/%d\" % (key, count, self.total_count))\n\n        for k in sorted(self.stats.keys()):\n            v = self.stats[k]\n            info = str(global_counter) + \":\" + k\n            print(v.summary(info=info.ljust(self.max_key_len + 4)))\n\n            if self.tb_writer is not None:\n                self.tb_writer.add_scalar(k, v.mean(), global_counter)\n"
  },
  {
    "path": "pypolygames/utils/plotter.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom .result import Result\n\n\nclass Plotter:\n    def __init__(self, plot_enabled: bool, env: str, server: str, port: int):\n        self.plot_enabled = plot_enabled\n        self.env = env\n        if plot_enabled:\n            import visdom\n\n            self.vis = visdom.Visdom(env=env, server=server, port=port)\n\n    def plot_results(self, results: List[Tuple[int, Result]]):\n        if self.plot_enabled:\n            epochs, results = list(map(np.array, list(zip(*results))))\n            nb_wins, nb_ties, nb_losses = list(\n                map(\n                    np.array,\n                    zip(\n                        *[\n                            (\n                                result.result[\"win\"],\n                                result.result[\"tie\"],\n                                result.result[\"loss\"],\n                            )\n                            for result in results\n                        ]\n                    ),\n                )\n            )\n            nb_totals = np.array(list(map(sum, zip(nb_wins, nb_ties, nb_losses))))\n            win_percents, tie_percents, loss_percents = list(\n                map(\n                    np.array,\n                    zip(\n                        *[\n                            (100 * w / total, 100 * t / total, 100 * l / total)\n                            for w, t, l, total in zip(\n                                nb_wins, nb_ties, nb_losses, nb_totals\n                            )\n                        ]\n                    ),\n                )\n            )\n            # lines\n            self.vis.line(\n                win=\"eval win-tie-loss\",\n                X=epochs,\n                Y=np.array([nb_wins, nb_ties, nb_losses]).T,\n                opts={\n                    \"title\": \"eval win-tie-loss\",\n                    \"xlabel\": \"#epochs\",\n                    \"ylabel\": \"#games\",\n                    \"xtickmin\": 0,\n                    \"ytickmin\": 0,\n                    \"ytickmax\": 100,\n                    \"legend\": [\"wins\", \"ties\", \"losses\"],\n                },\n            )\n            # stacked area\n            self.vis.line(\n                win=\"eval stacked percentages\",\n                X=epochs,\n                Y=np.array(\n                    [\n                        win_percents,\n                        list(map(sum, zip(win_percents, tie_percents))),\n                        [100] * len(epochs),\n                    ]\n                ).T,\n                opts={\n                    \"title\": \"eval stacked percentages\",\n                    \"xlabel\": \"#epochs\",\n                    \"ylabel\": \"%\",\n                    \"xtickmin\": 0,\n                    \"ytickmin\": 0,\n                    \"ytickmax\": 100,\n                    \"fillarea\": True,\n                    \"legend\": [\"wins\", \"ties\", \"losses\"],\n                },\n            )\n\n    def save(self):\n        if self.plot_enabled:\n            self.vis.save([self.env])\n"
  },
  {
    "path": "pypolygames/utils/restrack.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport resource\nimport os\nimport subprocess\n\n\ndef get_gpu_usage_nvidia():\n    try:\n        nvidia_exe = 'nvidia-smi'\n        nvquery = \"index,utilization.gpu,memory.total,memory.used\"\n        nvformat = \"csv,noheader,nounits\"\n        stdout = subprocess.check_output([\n            nvidia_exe,\n            f\"--query-gpu={nvquery}\",\n            f\"--format={nvformat}\"])\n    except subprocess.CalledProcessError as e:\n        return f\"GPU: ({nvidia_exe} error code {e.returncode})\"\n    except FileNotFoundError as e:\n        return \"GPU: (unknown)\"\n    gpustrs_raw = stdout.decode(\"utf-8\").strip().split(os.linesep)\n    gpustrs = []\n    for gpustr_raw in gpustrs_raw:\n        tokens = gpustr_raw.split(',')\n        gpuid = tokens[0].strip()\n        gpuutil = tokens[1].strip()\n        memtotal = float(tokens[2].strip())\n        memused = float(tokens[3].strip())\n        gpustr = f\"GPU{gpuid}: {gpuutil}%, {memused} MB / {memtotal} MB\"\n        gpustrs.append(gpustr)\n    gpustr = os.linesep.join(gpustrs)\n    return gpustr\n\n\ndef get_res_usage_psutil_str():\n    import psutil\n    p = psutil.Process()\n    ru = p.as_dict(attrs=[\n        'cpu_num', 'cpu_percent', 'cpu_times', 'num_threads', 'memory_info',\n        'memory_percent', 'nice', 'ionice'])\n    cpu_num = ru['cpu_num']\n    cpu_nthr = ru['num_threads']\n    cpu_tusr = ru['cpu_times'].user\n    cpu_tsys = ru['cpu_times'].system\n    nice = ru['nice']\n    ionice = ru['ionice']\n    cpustr = f\"CPU: {cpu_nthr} threads, Tusr={cpu_tusr}, \" \\\n        f\"Tsys={cpu_tsys}\"\n    #  f\"Tsys={cpu_tsys}, nice={nice}, ionice={ionice}\"\n    mem_rss = ru['memory_info'].rss / 1024 / 1024\n    mem_vms = ru['memory_info'].vms / 1024 / 1024\n    mem_pcent = ru['memory_percent']\n    memstr = f\"Mem: RSS {mem_rss:8.2f} MB,\" \\\n        f\" VMS {mem_vms:8.2f} MB, {mem_pcent:5.2f}%\"\n    gpustr = get_gpu_usage_nvidia()\n    resstr = os.linesep.join([cpustr, gpustr, memstr])\n    return resstr\n\n\ndef get_res_usage_no_psutil_str():\n    ru = resource.getrusage(resource.RUSAGE_SELF)\n    cpustr = f\"CPU User={ru.ru_utime} System={ru.ru_stime}\"\n    gpustr = get_gpu_usage_nvidia()\n    memstr = f\"Mem maxrss={ru.ru_maxrss}\"\n    comment = \"(install psutil for detailed data)\"\n    resstr = f\"{cpustr}, {gpustr}, {memstr}, {comment}\"\n    return resstr\n\n\ndef get_res_usage_str():\n    try:\n        import psutil\n        return get_res_usage_psutil_str()\n    except (ImportError, ModuleNotFoundError) as e:\n        return get_res_usage_no_psutil_str()\n"
  },
  {
    "path": "pypolygames/utils/result.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# helper class for result stats\n\n\ndef parse_reward(reward):\n    result = {\"win\": 0, \"loss\": 0, \"tie\": 0, \"avg\": 0.}\n    for r in reward:\n        if r == -1:\n            result[\"loss\"] += 1\n        elif r == 1:\n            result[\"win\"] += 1\n        else:\n            result[\"tie\"] += 1\n    result[\"total\"] = len(reward)\n    result[\"avg\"] = (sum(reward) / max(len(reward), 1) + 1.) / 2.\n    return result\n\n\nclass Result:\n    def __init__(self, reward):\n        self.reward = reward\n        self.result = parse_reward(reward)\n\n    def log(self):\n        total = max(self.result[\"total\"], 1)\n        s = \"win: %.2f, tie: %.2f, loss: %.2f, avg: %.2f\" % (\n            100 * self.result[\"win\"] / total,\n            100 * self.result[\"tie\"] / total,\n            100 * self.result[\"loss\"] / total,\n            100 * self.result[\"avg\"],\n        )\n        return s\n"
  },
  {
    "path": "pypolygames/utils/test_listings.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import Counter\nfrom . import listings\n\n\ndef test_lists() -> None:\n    includes = {\"Connect4\", \"TicTacToe\", \"Othello8\", \"Othello16\", \"GameOfTheAmazons\",\n                \"Hex5\", \"Hex11\", \"Hex13\", \"Connect6\",\n                \"Havannah5\", \"Havannah8\", \"Breakthrough\", \"Tristannogo\",\n                \"Minishogi\", \"Surakarta\", \"DiceShogi\"}\n    listed_items = listings.games()\n    duplicated = {x: y for x, y in Counter(listed_items).items() if y > 1}\n    assert duplicated == {}\n    missing = includes - set(listed_items)\n    assert not missing, f\"Could not find {missing} (screening through core/game.h or model_zoo/init or main), was it renamed?\"\n"
  },
  {
    "path": "pypolygames/weight_init.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef _init_weight_from_method(init_method):\n    def wrapped_init_method(net):\n        if getattr(net, \"weight\", None) is not None:\n            # with batch norm affine some weight have dim == 1\n            if net.weight.dim() > 1:\n                return init_method(net)\n\n    return wrapped_init_method\n\n\nWEIGHT_INIT = OrderedDict(\n    xavier_uniform=_init_weight_from_method(\n        lambda net: torch.nn.init.xavier_uniform_(net.weight, gain=1.0)\n    ),\n    xavier_normal=_init_weight_from_method(\n        lambda net: torch.nn.init.xavier_normal_(net.weight, gain=1.0)\n    ),\n    kaiming_uniform=_init_weight_from_method(\n        lambda net: torch.nn.init.kaiming_uniform_(\n            net.weight, a=0, mode=\"fan_in\", nonlinearity=\"relu\"\n        )\n    ),\n    kaiming_normal=_init_weight_from_method(\n        lambda net: torch.nn.init.kaiming_normal_(\n            net.weight, a=0, mode=\"fan_in\", nonlinearity=\"relu\"\n        )\n    ),\n)\n"
  },
  {
    "path": "singularity/README.md",
    "content": "# Polygames singularity image\n\n\nBuild an image with the following command (from polygames root directory):\n```bash\nsingularity build singularity/polygames.simg singularity/polygames.def\n```\nThis can take up to 45min.\n\nOnce the image is built, you can run it with:\n```bash\nsingularity shell --nv /checkpoint/polygames/polygames.simg\n```\nThe `--nv` parameters gives access to the GPUs. Do not worry about the warnings: `awk: warning: escape sequence`.\n\nIn the image, you can compile polygames with (please remove build if it already exists):\n```bash\ncd polygames\nmkdir build\ncd build\ncmake ..\nmake\n```\n\nThe image can be used to run any command:\n```bash\nsingularity run --nv singularity/polygames.simg python -m pypolygames\n```\n"
  },
  {
    "path": "singularity/environment.yml",
    "content": "name: polygames\nchannels:\n  - pytorch\n  - conda-forge\n  - defaults\ndependencies:\n  - ca-certificates=2019.6.16=hecc5488_0\n  - certifi=2019.6.16=py37_0\n  - libprotobuf=3.8.0=h8b12597_0\n  - openssl=1.1.1b=h14c3975_1\n  - protobuf=3.8.0=py37he1b5a44_0\n  - six=1.12.0=py37_1000\n  - tensorboardx=1.7=py_0\n  - blas=1.0=mkl\n  - bzip2=1.0.6=h14c3975_5\n  - cffi=1.12.3=py37h2e261b9_0\n  - cmake=3.14.0=h52cb24c_0\n  - expat=2.2.6=he6710b0_0\n  - intel-openmp=2019.4=243\n  - krb5=1.16.1=h173b8e3_7\n  - libcurl=7.64.1=h20c2e04_0\n  - libedit=3.1.20181209=hc058e9b_0\n  - libffi=3.2.1=hd88cf55_4\n  - libgcc-ng=9.1.0=hdf63c60_0\n  - libgfortran-ng=7.3.0=hdf63c60_0\n  - libssh2=1.8.2=h1ba5d50_0\n  - libstdcxx-ng=9.1.0=hdf63c60_0\n  - mkl=2019.4=243\n  - mkl-include=2019.4=243\n  - mkl_fft=1.0.12=py37ha843d7b_0\n  - mkl_random=1.0.2=py37hd81dba3_0\n  - ncurses=6.1=he6710b0_1\n  - numpy=1.16.4=py37h7e9f1db_0\n  - numpy-base=1.16.4=py37hde5b4d6_0\n  - pip=19.1.1=py37_0\n  - pycparser=2.19=py37_0\n  - python=3.7.3=h0371630_0\n  - pyyaml=5.1=py37h7b6447c_0\n  - readline=7.0=h7b6447c_5\n  - rhash=1.3.8=h1ba5d50_0\n  - setuptools=41.0.1=py37_0\n  - sqlite=3.28.0=h7b6447c_0\n  - tk=8.6.8=hbc83047_0\n  - typing=3.6.4=py37_0\n  - wheel=0.33.4=py37_0\n  - xz=5.2.4=h14c3975_4\n  - yaml=0.1.7=had09818_2\n  - zlib=1.2.11=h7b6447c_3\n  - magma-cuda100=2.5.0=1\n"
  },
  {
    "path": "singularity/polygames.def",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nBootstrap: docker\n\nFrom: nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04\n# chosen among: https://hub.docker.com/r/nvidia/cuda/\n\n%runscript\n\n    # init file loads conda environment\n    #exec bash --init-file /opt/sing_bash_init_file\n    exec $@\n\n%files\n\n    singularity/environment.yml environment.yml\n\n%environment\n\n    SHELL=/bin/bash\n    PATH=\"/opt/conda/bin:$PATH\"\n    export OMP_NUM_THREADS=1\n    # export CUDNN paths to avoid finding the external one\n    export CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/libcudnn.so\n    export CUDNN_INCLUDE_DIR=/usr/include\n    export CUDNN_ROOT_DIR=\"\"\n    . /opt/conda/etc/profile.d/conda.sh\n    conda activate pypg\n\n\n%labels\n\n   AUTHOR Facebook AI Research\n\n%post\n\n    # inspired from https://github.com/pytorch/pytorch/blob/master/docker/pytorch/Dockerfile\n\n    # install depencies\n    apt-get update\n    apt-get install -y \\\n        build-essential \\\n        libzmq3-dev \\\n        cmake \\\n        wget \\\n        vim \\\n        git \\\n        ca-certificates \\\n        libjpeg-dev \\\n        libpng-dev\n    rm -rf /var/lib/apt/lists/\n    mkdir /checkpoint  # for future bindings\n    mkdir /public  # for future bindings\n    mkdir /scratch  # for future bindings\n\n    # install miniconda and the pypg environment\n    wget -O ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n    bash ~/miniconda.sh -b -p /opt/conda\n    rm ~/miniconda.sh\n    . /opt/conda/etc/profile.d/conda.sh\n    conda init bash zsh\n    echo '. /opt/conda/etc/profile.d/conda.sh; conda activate pypg' > /opt/sing_bash_init_file\n    conda env create -f environment.yml --name pypg \n    # conda create -n pypg pip\n    conda activate pypg\n    pip install pytest pytest-cov mypy ipython psutil nevergrad pyzmq # convenient to have\n\n    # # the environment contains all this\n    # conda create --name pypg python=3.7 pip -y\n    # conda activate pypg\n    # pip install tube\n    # conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing -y\n    # conda install -c pytorch magma-cuda100 -y\n    # conda install -c conda-forge tensorboardx -y\n    # # it was exported with `conda env export > environment.yml`\n\n    # download and install pytorch from source (in circleci: 2 cpus)\n    export MAX_JOBS=$(cat /proc/cpuinfo | grep -c processor)\n    if (( $MAX_JOBS < 4 ));\n        then MAX_JOBS=4;\n    fi;\n    echo \"Using $MAX_JOBS jobs for pytorch compilation\"\n    git clone --recursive https://github.com/pytorch/pytorch --branch=v1.1.0 ~/pytorch\n    cd ~/pytorch\n    export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-\"$(dirname $(which conda))/../\"}\n    # # set cuda arch list so that the built binary can be run on both pascal and volta\n    MAX_JOBS=$MAX_JOBS TORCH_CUDA_ARCH_LIST='6.0;7.0' pip install . -v\n"
  },
  {
    "path": "src/CMakeLists.txt",
    "content": " \n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(third_party)\ninclude_directories(third_party/fmt/include)\nadd_subdirectory(third_party/fmt)\n\n# add_subdirectory(torchRL)\nadd_subdirectory(third_party/pybind11)\nadd_subdirectory(tube)\nadd_subdirectory(mcts)\n\nfile(GLOB _zstd_SOURCES third_party/zstd/lib/common/*.c third_party/zstd/lib/compress/*.c third_party/zstd/lib/decompress/*.c)\nadd_library(_zstd OBJECT ${_zstd_SOURCES})\n\ntarget_include_directories(_zstd BEFORE PUBLIC third_party/zstd/lib third_party/zstd/lib/common)\n\nfind_path(IBV_INCLUDE_DIR infiniband/verbs.h)\nfind_library(IBV_LIBRARY ibverbs)\n\nadd_library(libpolygames SHARED \"\")\n\nadd_library(_distributed OBJECT\n  distributed/network.cc\n  distributed/distributed.cc\n)\ntarget_include_directories(_distributed SYSTEM PUBLIC ${TORCH_INCLUDE_DIRS})\n\nif (IBV_INCLUDE_DIR AND IBV_LIBRARY)\n  message(STATUS \"Found ibverbs: ${IBV_INCLUDE_DIR}/infiniband/verbs.h ${IBV_LIBRARY}\")\n  target_sources(_distributed PRIVATE\n    distributed/ib.cc)\n  target_include_directories(_distributed SYSTEM PUBLIC ${IBV_INCLUDE_DIR})\n  target_link_libraries(libpolygames PUBLIC ${IBV_LIBRARY})\nelse()\n  message(STATUS \"ibverbs NOT found, InfiniBand support will be disabled!\")\n  target_sources(_distributed PRIVATE\n    distributed/rdma_nop.cc)\nendif()\n\nadd_library(_common OBJECT\n  common/thread_id.cc\n  common/threads.cc\n  )\n\nset(_games_SOURCES\n  games/gomoku_swap2.cc\n  games/othello_opt.cc\n  games/mastermind_state.cc\n  games/amazons.cc\n  games/breakthrough.cc\n  games/chess.cc\n  games/chinesecheckers.cc\n  games/tristan_nogo.cc\n  games/yinsh.cc\n  games/minesweeper.cc\n  games/weakschur/SchurMatrix.cpp\n  games/weakschur/SchurVector.cpp\n  games/weakschur/WeakSchur.cpp)\n\nif (JNI_FOUND)\n  list(APPEND _games_SOURCES\n    games/ludii/jni_utils.cc\n    games/ludii/ludii_game_wrapper.cc\n    games/ludii/ludii_state_wrapper.cc)\nendif()\n\nadd_library(_games\n  ${_games_SOURCES})\ntarget_include_directories(_games PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/torchRL)\ntarget_include_directories(_games SYSTEM PUBLIC ${TORCH_INCLUDE_DIRS})\ntarget_include_directories(_games PUBLIC ${PYTHON_INCLUDE_DIRS})\n\ntarget_sources(libpolygames PRIVATE\n  core/game.cc\n  core/state.cc\n  core/replay_buffer.cc\n  core/model_manager.cc\n  $<TARGET_OBJECTS:_zstd>\n  $<TARGET_OBJECTS:_distributed>\n  $<TARGET_OBJECTS:_common>\n)\n\ntarget_include_directories(libpolygames PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/torchRL)\ntarget_link_libraries(libpolygames PUBLIC _tube _mcts _games)\nset_target_properties(libpolygames PROPERTIES PREFIX \"\")\n\nif (JNI_FOUND)\n  target_include_directories(_games PUBLIC ${JNI_INCLUDE_DIRS})\n  target_link_libraries(libpolygames PUBLIC ${JNI_LIBRARIES})\nendif()\n\n\npybind11_add_module(polygames\n  core/pybind.cc\n)\ntarget_link_libraries(polygames PUBLIC libpolygames)\n"
  },
  {
    "path": "src/common/async.h",
    "content": "#pragma once\n\n#include <condition_variable>\n#include <deque>\n#include <functional>\n#include <future>\n#include <list>\n#include <mutex>\n#include <thread>\n#include <vector>\n\n#ifdef _POSIX_C_SOURCE\n#include <semaphore.h>\n#endif\n\nnamespace async {\n\n#ifdef _POSIX_C_SOURCE\nclass Semaphore {\n  sem_t sem;\n\n public:\n  Semaphore() {\n    sem_init(&sem, 0, 0);\n  }\n  ~Semaphore() {\n    sem_destroy(&sem);\n  }\n  void post() {\n    sem_post(&sem);\n  }\n  void wait() {\n    sem_wait(&sem);\n  }\n};\n#else\nclass Semaphore {\n  int count_ = 0;\n  std::mutex mut_;\n  std::condition_variable cv_;\n\n public:\n  void post() {\n    std::unique_lock l(mut_);\n    if (++count_ >= 1) {\n      cv_.notify_one();\n    }\n  }\n  void wait() {\n    std::unique_lock l(mut_);\n    while (count_ == 0) {\n      cv_.wait(l);\n    }\n    --count_;\n  }\n};\n#endif\n\nstruct Function {\n  Function* next = nullptr;\n  int priority = 0;\n  void* storage;\n  size_t allocated = 0;\n  void (*dtor)(void*);\n  void (*call)(void*);\n  Function() = default;\n  Function(const Function&) = delete;\n  Function& operator=(const Function&) = delete;\n  Function(Function&& n) {\n    storage = n.storage;\n    allocated = n.allocated;\n    dtor = n.dtor;\n    call = n.call;\n    n.storage = nullptr;\n  }\n  Function& operator=(Function&& n) {\n    std::swap(storage, n.storage);\n    std::swap(allocated, n.allocated);\n    std::swap(dtor, n.dtor);\n    std::swap(call, n.call);\n    return *this;\n  }\n  template <typename F> Function(F&& f) {\n    storage = std::malloc(sizeof(F));\n    if (!storage) {\n      throw std::bad_alloc();\n    }\n    allocated = sizeof(F);\n    try {\n      new (storage) F(std::forward<F>(f));\n    } catch (...) {\n      std::free(storage);\n      throw;\n    }\n    dtor = [](void* ptr) noexcept {\n      ((F*)ptr)->~F();\n    };\n    call = [](void* ptr) noexcept {\n      (*(F*)ptr)();\n    };\n  }\n  template <typename F> Function& operator=(F&& f) {\n    if (allocated < sizeof(F)) {\n      void* newStorage = std::malloc(sizeof(F));\n      if (!newStorage) {\n        throw std::bad_alloc();\n      }\n      if (storage) {\n        dtor(storage);\n        std::free(storage);\n      }\n      storage = newStorage;\n      allocated = sizeof(F);\n    } else {\n      if (storage) {\n        dtor(storage);\n      }\n    }\n    try {\n      new (storage) F(std::forward<F>(f));\n    } catch (...) {\n      std::free(storage);\n      throw;\n    }\n    dtor = [](void* ptr) { ((F*)ptr)->~F(); };\n    call = [](void* ptr) { (*(F*)ptr)(); };\n\n    return *this;\n  }\n  ~Function() {\n    if (storage) {\n      dtor(storage);\n      std::free(storage);\n    }\n  }\n  void operator()() {\n    call(storage);\n  }\n};\n\ntemplate <typename Thread> struct HandleT {\n  Function* func = nullptr;\n  Thread* thread = nullptr;\n  HandleT() = default;\n  HandleT(Function* func, Thread* thread)\n      : func(func)\n      , thread(thread) {\n  }\n  HandleT(HandleT&& n) {\n    func = std::exchange(n.func, nullptr);\n    thread = std::exchange(n.thread, nullptr);\n  }\n  HandleT(const HandleT&) = delete;\n  HandleT& operator=(HandleT&& n) {\n    std::swap(func, n.func);\n    std::swap(thread, n.thread);\n    return *this;\n  }\n  HandleT& operator=(const HandleT&) = delete;\n  ~HandleT() {\n    if (func) {\n      Function* ftmp = thread->freelist;\n      do {\n        func->next = ftmp;\n      } while (!thread->freelist.compare_exchange_weak(ftmp, func));\n    }\n  }\n  void setPriority(int value) {\n    func->priority = value;\n  }\n  explicit operator bool() const {\n    return func;\n  }\n};\n\nusing Handle = HandleT<struct Thread>;\n\nstruct Thread {\n\n  std::thread thread;\n  std::atomic<Function*> queue = nullptr;\n  std::atomic<Function*> freelist = nullptr;\n  Function* internalqueue = nullptr;\n  bool dead = false;\n\n  Semaphore sem;\n\n  Thread() = default;\n\n  void threadEntry() {\n    while (true) {\n      Function* f = queue;\n      while (!f) {\n        if (dead) {\n          return;\n        }\n        sem.wait();\n        f = queue;\n      }\n      while (!queue.compare_exchange_weak(f, f->next))\n        ;\n      if (internalqueue || queue) {\n        do {\n          while (f) {\n            Function** insert = &internalqueue;\n            Function* next = internalqueue;\n            while (next && next->priority <= f->priority) {\n              insert = &next->next;\n              next = next->next;\n            }\n            f->next = next;\n            *insert = f;\n\n            f = queue;\n            while (f && !queue.compare_exchange_weak(f, f->next))\n              ;\n          }\n\n          f = internalqueue;\n          internalqueue = f->next;\n\n          (*f)();\n\n          f = queue;\n          while (f && !queue.compare_exchange_weak(f, f->next))\n            ;\n        } while (f || internalqueue);\n      } else {\n        (*f)();\n      }\n    }\n  }\n\n  void enqueue(Function* func) {\n    Function* qtmp = queue;\n    do {\n      func->next = qtmp;\n    } while (!queue.compare_exchange_weak(qtmp, func));\n    sem.post();\n  }\n\n  template <typename F> Handle getHandle(F&& f) {\n    Function* func = freelist;\n    while (func && !freelist.compare_exchange_weak(func, func->next))\n      ;\n    if (!func) {\n      func = new Function();\n    }\n    Handle h(func, this);\n    *func = std::forward<F>(f);\n    return h;\n  }\n};\n\nstruct Threads {\n\n  std::atomic_size_t nextThread = 0;\n  std::deque<Thread> threads;\n\n  size_t size() const {\n    return threads.size();\n  }\n\n  Thread& getThread() {\n    return threads[nextThread++ % threads.size()];\n  }\n\n  void enqueue(const Handle& h) {\n    h.thread->enqueue(h.func);\n  }\n\n  Threads() = default;\n  Threads(int nThreads) {\n    start(nThreads);\n  }\n\n  void start(int nThreads) {\n    for (int i = 0; i != nThreads; ++i) {\n      threads.emplace_back();\n      Thread* t = &threads.back();\n      threads.back().thread = std::thread([t]() { t->threadEntry(); });\n    }\n  }\n\n  ~Threads() {\n    for (auto& v : threads) {\n      v.dead = true;\n      v.sem.post();\n    }\n    for (auto& v : threads) {\n      v.thread.join();\n    }\n  }\n};\n\nstruct Task {\n  Semaphore sem;\n  std::atomic_int liveCount{0};\n\n  Threads* threads = nullptr;\n  Task() = default;\n  Task(Threads& threads)\n      : threads(&threads) {\n  }\n  ~Task() {\n    wait();\n  }\n  Task& operator=(const Task& n) {\n    if (liveCount || n.liveCount) {\n      throw std::runtime_error(\"attempt to copy active Task object\");\n    }\n    threads = n.threads;\n    return *this;\n  }\n\n  template <typename F> Handle getHandle(Thread& thread, F&& f) {\n    return thread.getHandle([f = std::forward<F>(f), this]() mutable {\n      f();\n      if (--liveCount == 0) {\n        sem.post();\n      }\n    });\n  }\n\n  void enqueue(const Handle& h) {\n    ++liveCount;\n    threads->enqueue(h);\n  }\n\n  void wait() {\n    while (liveCount.load(std::memory_order_relaxed) != 0) {\n      sem.wait();\n    }\n  }\n};\n\n}  // namespace async\n"
  },
  {
    "path": "src/common/thread_id.cc",
    "content": "\n#include <atomic>\n\nnamespace {\nstd::atomic_int threadIdCounter{0};\nthread_local int threadId = ++threadIdCounter;\n}  // namespace\n\nnamespace common {\n\nint getThreadId() {\n  return threadId;\n}\n\n}  // namespace common\n"
  },
  {
    "path": "src/common/thread_id.h",
    "content": "\nnamespace common {\n\nint getThreadId();\n}\n"
  },
  {
    "path": "src/common/threads.cc",
    "content": "\n#include \"threads.h\"\n\nnamespace threads {\n\nasync::Threads threads;\nstd::once_flag flag;\n\nvoid init(int nThreads) {\n\n  std::call_once(\n      flag,\n      [](int nThreads) {\n        if (nThreads <= 0) {\n          nThreads = std::thread::hardware_concurrency();\n          if (nThreads <= 0) {\n            throw std::runtime_error(\"Could not automatically determine the \"\n                                     \"number of hardware threads :(\");\n          }\n          printf(\"Starting %d threads (automatically configured)\\n\", nThreads);\n        } else {\n          printf(\"Starting %d threads\\n\", nThreads);\n        }\n\n        threads.start(nThreads);\n\n        async::Task task(threads);\n        std::vector<async::Handle> handles;\n\n        for (int i = 0; i != nThreads; ++i) {\n          auto& thread = threads.getThread();\n          auto h = task.getHandle(thread, [i]() {\n            setCurrentThreadName(\"async \" + std::to_string(i));\n          });\n          task.enqueue(h);\n          handles.push_back(std::move(h));\n        }\n\n        task.wait();\n      },\n      nThreads);\n}\n\nvoid setCurrentThreadName(const std::string& name) {\n#ifdef __APPLE__\n  pthread_setname_np(name.c_str());\n#elif __linux__\n  pthread_setname_np(pthread_self(), name.c_str());\n#endif\n}\n\n}  // namespace threads\n"
  },
  {
    "path": "src/common/threads.h",
    "content": "\n#include \"async.h\"\n\n#include <string>\n\nnamespace threads {\n\nextern async::Threads threads;\n\nvoid init(int nThreads);\nvoid setCurrentThreadName(const std::string& name);\n\n}  // namespace threads\n"
  },
  {
    "path": "src/core/actor.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"model_manager.h\"\n#include \"tube/src_cpp/data_block.h\"\n#include \"tube/src_cpp/dispatcher.h\"\n\n#include \"state.h\"\n#include \"utils.h\"\n\n//#define DEBUG_ACTOR\n\nnamespace core {\n\nclass PiVal {\n public:\n  PiVal() {\n    reset();\n  }\n\n  void reset() {\n    playerId = -999;\n    value = 0.0f;\n    logitPolicy.reset();\n    rnnState.reset();\n  }\n\n  int playerId;\n  float value;\n  torch::Tensor logitPolicy;\n  torch::Tensor rnnState;\n};\n\nclass Actor {\n public:\n  Actor(std::shared_ptr<tube::DataChannel> dc,\n        const std::vector<int64_t>& featSize,\n        const std::vector<int64_t>& actionSize,\n        const std::vector<int64_t>& rnnStateSize,\n        int rnnSeqlen,\n        bool logitValue,\n        bool useValue,\n        bool usePolicy,\n        std::shared_ptr<ModelManager> modelManager)\n      : dispatcher_(std::move(dc))\n      , useValue_(useValue)\n      , usePolicy_(usePolicy)\n      , policySize_(actionSize)\n      , uniformPolicy_(1.0 / product(actionSize))\n      , rnnStateSize_(rnnStateSize)\n      , rnnSeqlen_(rnnSeqlen)\n      , logitValue_(logitValue)\n      , modelManager_(modelManager) {\n    if (!useValue && !usePolicy_) {\n      return;\n    }\n\n    feat_ = std::make_shared<tube::DataBlock>(\"s\", featSize, torch::kFloat32);\n    pi_ = std::make_shared<tube::DataBlock>(\n        \"pi_logit\", actionSize, torch::kFloat32);\n    value_ = std::make_shared<tube::DataBlock>(\n        \"v\", std::initializer_list<int64_t>{logitValue ? 3 : 1},\n        torch::kFloat32);\n\n    if (!rnnStateSize.empty()) {\n      rnnState_ = std::make_shared<tube::DataBlock>(\n          \"rnn_state\", rnnStateSize, torch::kFloat32);\n      rnnStateOut_ = std::make_shared<tube::DataBlock>(\n          \"rnn_state_out\", rnnStateSize, torch::kFloat32);\n    }\n\n    if (rnnStateSize.empty()) {\n      dispatcher_.addDataBlocks({feat_}, {pi_, value_});\n    } else {\n      dispatcher_.addDataBlocks(\n          {feat_, rnnState_}, {pi_, value_, rnnStateOut_});\n    }\n  }\n\n  PiVal& evaluate(const core::State& s, PiVal& pival) {\n    const auto state = dynamic_cast<const State*>(&s);\n    assert(state != nullptr);\n\n    // termination should be handled by mcts\n    assert(!state->terminated());\n\n    bool resultsAreValid = false;\n    if (useValue_ || usePolicy_) {\n      getFeatureInTensor(*state, feat_->data);\n      int errcode = dispatcher_.dispatch();\n      switch (errcode) {\n      case tube::Dispatcher::DISPATCH_ERR_DC_TERM:\n#ifdef DEBUG_ACTOR\n        std::cout << \"actor \" << this << \": attempt to dispatch through\"\n                  << \" a terminated data channel \" << std::endl;\n#endif\n        break;\n      case tube::Dispatcher::DISPATCH_ERR_NO_SLOT:\n#ifdef DEBUG_ACTOR\n        std::cout << \"actor \" << this << \": no slots available to dispatch\"\n                  << std::endl;\n#endif\n        break;\n      case tube::Dispatcher::DISPATCH_NOERR:\n        resultsAreValid = true;\n      }\n    }\n\n    float val;\n    torch::Tensor policy;\n    if (useValue_ && resultsAreValid) {\n      if (logitValue_) {\n        float* begin = value_->data.data_ptr<float>();\n        float* end = begin + 3;\n        softmax_(begin, end);\n      }\n      val = logitValue_\n                ? value_->data[0].item<float>() - value_->data[1].item<float>()\n                : value_->data.item<float>();\n    } else {\n      val = state->getRandomRolloutReward(state->getCurrentPlayer());\n    }\n    if (usePolicy_ && resultsAreValid) {\n      policy = pi_->data;\n    } else {\n      policy = torch::zeros(policySize_, torch::kFloat32);\n      policy.fill_(uniformPolicy_);\n    }\n\n    pival.logitPolicy = policy.clone();\n    pival.playerId = state->getCurrentPlayer();\n    pival.value = val;\n    if (rnnStateOut_) {\n      pival.rnnState = rnnStateOut_->data.clone();\n    }\n    return pival;\n  }\n\n  void terminate() {\n    dispatcher_.terminate();\n  }\n\n  void batchResize(size_t n) {\n    if (!modelManager_) {\n      return;\n    }\n    if (!batchFeat_.defined() || batchFeat_[0].sizes() != feat_->data.sizes() ||\n        batchFeat_.size(0) < n) {\n      auto allocBatch = [&](auto&& sizes) {\n        std::vector<int64_t> s1(sizes.begin(), sizes.end());\n        s1.insert(s1.begin(), n);\n        if (modelManager_ && modelManager_->isCuda()) {\n          return torch::empty(\n              s1, at::TensorOptions().pinned_memory(true).requires_grad(false));\n        } else {\n          return torch::empty(s1, at::TensorOptions().requires_grad(false));\n        }\n      };\n      batchFeat_ = allocBatch(feat_->data.sizes());\n      batchPi_ = allocBatch(pi_->data.sizes());\n      batchValue_ = allocBatch(value_->data.sizes());\n\n      valueAcc_ = batchValue_.accessor<float, 2>();\n      piAcc_ = batchPi_.accessor<float, 4>();\n      featAcc_ = batchFeat_.accessor<float, 4>();\n    }\n    if (rnnState_) {\n      rnnStateStack_.resize(n);\n    }\n  }\n  void batchPrepare(size_t index,\n                    const core::State& s,\n                    torch::Tensor rnnState) {\n    if (!modelManager_) {\n      if (rnnState.defined()) {\n        rnnState_->data.copy_(rnnState);\n      }\n      return;\n    }\n    getFeatureInTensor(*dynamic_cast<const State*>(&s), featAcc_[index].data());\n    if (!useValue_) {\n      batchValue_[index][0] = s.getRandomRolloutReward(s.getCurrentPlayer());\n    }\n    if (rnnState.defined()) {\n      if (rnnState.device() != device()) {\n        rnnState = rnnState.to(device());\n      }\n      rnnStateStack_.at(index) = rnnState;\n    }\n  }\n  void batchEvaluate(size_t n) {\n    if (!modelManager_) {\n      return;\n    }\n    if (useValue_ || usePolicy_) {\n      if (rnnState_) {\n        modelManager_->batchAct(\n            batchFeat_.narrow(0, 0, n), batchValue_.narrow(0, 0, n),\n            batchPi_.narrow(0, 0, n), torch::stack(rnnStateStack_),\n            &batchRnnStateOut_);\n      } else {\n        modelManager_->batchAct(batchFeat_.narrow(0, 0, n),\n                                batchValue_.narrow(0, 0, n),\n                                batchPi_.narrow(0, 0, n));\n      }\n    }\n  }\n  void batchResult(size_t index, const core::State& s, PiVal& pival) {\n    if (!modelManager_) {\n      evaluate(s, pival);\n      return;\n    }\n    if (logitValue_) {\n      float* begin = &valueAcc_[index][0];\n      float* end = begin + 3;\n      softmax_(begin, end);\n    }\n    float val = logitValue_ ? valueAcc_[index][0] - valueAcc_[index][1]\n                            : valueAcc_[index][0];\n    pival.logitPolicy = batchPi_[index].clone();\n    pival.playerId = s.getCurrentPlayer();\n    pival.value = val;\n    if (rnnState_) {\n      pival.rnnState = batchRnnStateOut_[index];\n    }\n  }\n\n  void recordMove(const core::State* state) {\n    auto id = modelManager_->getTournamentModelId();\n    ++modelTrackers_[state][id];\n  }\n\n  std::string getModelId() const {\n    return modelManager_ ? std::string(modelManager_->getTournamentModelId())\n                         : \"dev\";\n  }\n\n  void result(const core::State* state, float reward) {\n    if (modelManager_) {\n      auto i = modelTrackers_.find(state);\n      if (i != modelTrackers_.end()) {\n        auto m = std::move(i->second);\n        float sum = 0.0f;\n        for (auto& v : m) {\n          sum += v.second;\n        }\n        for (auto& v : m) {\n          v.second /= sum;\n        }\n        modelManager_->result(reward, std::move(m));\n        modelTrackers_.erase(i);\n      }\n    }\n  }\n\n  void forget(const core::State* state) {\n    if (modelManager_) {\n      auto i = modelTrackers_.find(state);\n      if (i != modelTrackers_.end()) {\n        modelTrackers_.erase(i);\n      }\n    }\n  }\n\n  bool isTournamentOpponent() const {\n    return modelManager_ ? modelManager_->isTournamentOpponent() : false;\n  }\n\n  bool wantsTournamentResult() const {\n    return modelManager_ ? modelManager_->wantsTournamentResult() : false;\n  }\n\n  std::vector<int64_t> rnnStateSize() const {\n    return rnnStateSize_;\n  }\n\n  int rnnSeqlen() const {\n    return rnnSeqlen_;\n  }\n\n  int vOutputs() const {\n    return logitValue_ ? 3 : 1;\n  }\n\n  int findBatchSize(const core::State& state) const {\n    if (modelManager_) {\n      if (rnnState_) {\n        return modelManager_->findBatchSize(\n            getFeatureInTensor(*dynamic_cast<const State*>(&state)),\n            torch::zeros(rnnStateSize_));\n      } else {\n        return modelManager_->findBatchSize(\n            getFeatureInTensor(*dynamic_cast<const State*>(&state)));\n      }\n    }\n    return 0;\n  }\n\n  bool isCuda() const {\n    return modelManager_ ? modelManager_->isCuda() : false;\n  }\n\n  torch::Device device() const {\n    return modelManager_ ? modelManager_->device() : torch::Device(torch::kCPU);\n  }\n\n private:\n  tube::Dispatcher dispatcher_;\n\n  std::shared_ptr<tube::DataBlock> feat_;\n  std::shared_ptr<tube::DataBlock> pi_;\n  std::shared_ptr<tube::DataBlock> value_;\n  std::shared_ptr<tube::DataBlock> rnnState_;\n  std::shared_ptr<tube::DataBlock> rnnStateOut_;\n\n  const bool useValue_;\n  const bool usePolicy_;\n  const std::vector<int64_t> policySize_;\n  const float uniformPolicy_;\n\n  torch::Tensor batchFeat_;\n  torch::Tensor batchPi_;\n  torch::Tensor batchValue_;\n\n  torch::Tensor batchRnnStateOut_;\n\n  torch::TensorAccessor<float, 2> valueAcc_{nullptr, nullptr, nullptr};\n  torch::TensorAccessor<float, 4> piAcc_{nullptr, nullptr, nullptr};\n  torch::TensorAccessor<float, 4> featAcc_{nullptr, nullptr, nullptr};\n\n  std::vector<torch::Tensor> rnnStateStack_;\n  torch::Tensor rnnStateStackResult_;\n\n  std::unordered_map<const core::State*,\n                     std::unordered_map<std::string_view, float>>\n      modelTrackers_;\n\n  const std::vector<int64_t> rnnStateSize_;\n  int rnnSeqlen_ = 0;\n  bool logitValue_ = false;\n  std::shared_ptr<ModelManager> modelManager_;\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/actor_player.h",
    "content": "#pragma once\n\n#include \"actor.h\"\n#include \"player.h\"\n\nnamespace core {\n\nclass ActorPlayer : public Player {\n public:\n  ActorPlayer()\n      : Player(false) {\n  }\n\n  void setActor(std::shared_ptr<Actor> actor) {\n    actor_ = std::move(actor);\n  }\n\n  void recordMove(const core::State* state) {\n    actor_->recordMove(state);\n  }\n\n  void result(const core::State* state, float reward) {\n    actor_->result(state, reward);\n  }\n\n  void forget(const core::State* state) {\n    actor_->forget(state);\n  }\n\n  bool isTournamentOpponent() const {\n    return actor_->isTournamentOpponent();\n  }\n\n  bool wantsTournamentResult() const {\n    return actor_->wantsTournamentResult();\n  }\n\n  std::string getModelId() const {\n    return actor_->getModelId();\n  }\n\n  float calculateValue(const core::State& state) {\n    PiVal result;\n    actor_->evaluate(state, result);\n    return result.value;\n  }\n\n  std::vector<torch::Tensor> nextRnnState(\n      const std::vector<const core::State*>& state,\n      const std::vector<torch::Tensor>& rnnState) {\n    PiVal result;\n    actor_->batchResize(state.size());\n    for (size_t i = 0; i != state.size(); ++i) {\n      actor_->batchPrepare(i, *state[i], rnnState[i]);\n    }\n    actor_->batchEvaluate(state.size());\n    std::vector<torch::Tensor> r;\n    for (size_t i = 0; i != state.size(); ++i) {\n      actor_->batchResult(i, *state[i], result);\n      r.push_back(result.rnnState);\n    }\n    return r;\n  }\n\n  std::vector<int64_t> rnnStateSize() const {\n    return actor_->rnnStateSize();\n  }\n\n  int rnnSeqlen() const {\n    return actor_->rnnSeqlen();\n  }\n\n  int vOutputs() const {\n    return actor_->vOutputs();\n  }\n\n  int findBatchSize(const core::State& state) const {\n    return actor_->findBatchSize(state);\n  }\n\n  virtual void terminate() override {\n    if (actor_) {\n      actor_->terminate();\n    }\n  }\n\n protected:\n  std::shared_ptr<Actor> actor_;\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/forward_player.h",
    "content": "#pragma once\n\n#include \"actor_player.h\"\n\nnamespace core {\n\nclass ForwardPlayer : public ActorPlayer {\n public:\n  void batchResize(size_t n) {\n    actor_->batchResize(n);\n  }\n  void batchPrepare(size_t index, const State& s, torch::Tensor rnnState) {\n    actor_->batchPrepare(index, s, rnnState);\n  }\n  void batchEvaluate(size_t n) {\n    actor_->batchEvaluate(n);\n  }\n  void batchResult(size_t index, const State& s, PiVal& pival) {\n    actor_->batchResult(index, s, pival);\n  }\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/game.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"game.h\"\n#include \"common/thread_id.h\"\n#include \"common/threads.h\"\n#include \"forward_player.h\"\n#include \"utils.h\"\n\n#include <fmt/printf.h>\n\nnamespace core {\n\nstruct BatchExecutor {\n\n  struct MoveHistory {\n    int turn = 0;\n    uint64_t move = 0;\n    float value = 0.0f;\n    torch::Tensor shortFeat;\n    bool featurized = false;\n  };\n\n  struct Sequence {\n    std::vector<torch::Tensor> feat;\n    std::vector<torch::Tensor> v;\n    std::vector<torch::Tensor> pi;\n    std::vector<torch::Tensor> piMask;\n    std::vector<torch::Tensor> actionPi;\n    std::vector<torch::Tensor> predV;\n    torch::Tensor rnnInitialState;\n    std::vector<torch::Tensor> rnnStateMask;\n    std::vector<torch::Tensor> predictPi;\n    std::vector<torch::Tensor> predictPiMask;\n  };\n\n  struct GameState {\n    std::unique_ptr<State> state;\n    std::vector<std::unique_ptr<State>> playerState;\n    std::vector<size_t> players;\n    std::vector<size_t> playersReverseMap;\n    std::vector<std::vector<torch::Tensor>> feat;\n    std::vector<std::vector<torch::Tensor>> pi;\n    std::vector<std::vector<torch::Tensor>> piMask;\n    std::vector<std::vector<torch::Tensor>> rnnStates;\n    std::vector<std::vector<torch::Tensor>> actionPi;\n    std::vector<std::vector<torch::Tensor>> predV;\n    std::vector<std::vector<float>> reward;\n    size_t stepindex;\n    std::chrono::steady_clock::time_point start;\n    std::vector<int> resignCounter;\n    int drawCounter = 0;\n    bool canResign = false;\n    int resigned = -1;\n    bool drawn = false;\n    std::chrono::steady_clock::time_point prevMoveTime =\n        std::chrono::steady_clock::now();\n    std::vector<size_t> playerOrder;\n    std::vector<MoveHistory> history;\n    bool justRewound = false;\n    bool justRewoundToNegativeValue = false;\n    int rewindCount = 0;\n    std::vector<torch::Tensor> rnnState;\n    std::vector<torch::Tensor> rnnState2;\n\n    std::vector<int> allowRandomMoves;\n    bool validTournamentGame = false;\n\n    std::vector<size_t> startMoves;\n\n    int randMoveCount = 0;\n  };\n\n  Game* game = nullptr;\n  std::vector<Player*> players_;\n  std::unique_ptr<State> basestate;\n  std::minstd_rand rng{std::random_device{}()};\n  std::vector<Sequence> seqs;\n  std::list<GameState> states;\n  std::list<GameState> freeGameList;\n  int64_t startedGameCount = 0;\n  int64_t completedGameCount = 0;\n  float runningAverageGameSteps = 0.0f;\n  ActorPlayer* devPlayer = nullptr;\n  std::vector<ActorPlayer*> actorPlayers;\n  std::vector<mcts::MctsPlayer*> mctsPlayers;\n  std::vector<ForwardPlayer*> forwardPlayers;\n  std::vector<float> result_;\n  std::vector<std::vector<const State*>> actStates;\n  std::vector<std::vector<const State*>> actPlayerStates;\n  std::vector<std::vector<GameState*>> actGameStates;\n  std::vector<const State*> playerActStates;\n  bool alignPlayers = true;\n  std::vector<std::pair<size_t, size_t>> statePlayerSize;\n  std::vector<size_t> remapPlayerIdx;\n  async::Task task;\n  std::vector<torch::Tensor> actRnnState;\n  const mcts::MctsOption* mctsOption = nullptr;\n  std::vector<mcts::MctsResult> mctsResult;\n  mutable std::mutex recordMoveMutex;\n\n  int randint(int n) {\n    return std::uniform_int_distribution<int>(0, n - 1)(rng);\n  }\n\n  std::unique_ptr<State> cloneState(const std::unique_ptr<State>& state) const {\n    return state->clone();\n  }\n\n  void doRandomMoves(GameState& gst, int n) {\n    auto o = cloneState(gst.state);\n    std::vector<size_t> moves;\n    for (; n > 0; --n) {\n      if (gst.state->terminated()) {\n        break;\n      }\n      size_t n = randint(gst.state->GetLegalActions().size());\n      moves.push_back(n);\n      gst.state->forward(n);\n    }\n    if (gst.state->terminated()) {\n      gst.state = std::move(o);\n    } else {\n      for (auto m : moves) {\n        for (auto& x : gst.playerState) {\n          if (x) {\n            x->forward(m);\n          }\n        }\n      }\n      // fmt::printf(\"Did %d random moves: '%s'\\n\", gst.state->getStepIdx(),\n      // gst.state->history());\n    }\n    gst.startMoves = std::move(moves);\n  };\n\n  std::list<GameState>::iterator addGame(std::list<GameState>::iterator at) {\n    if (!freeGameList.empty()) {\n      GameState gst = std::move(freeGameList.front());\n      freeGameList.pop_front();\n      return states.insert(at, std::move(gst));\n    }\n    ++startedGameCount;\n    GameState gst;\n    for (size_t i = 0; i != players_.size(); ++i) {\n      gst.players.push_back(i);\n    }\n    std::shuffle(gst.players.begin(), gst.players.end(), rng);\n    gst.playersReverseMap.resize(players_.size());\n    for (size_t i = 0; i != players_.size(); ++i) {\n      gst.playersReverseMap[gst.players[i]] = i;\n    }\n    gst.state = cloneState(basestate);\n    unsigned long seed = rng();\n    gst.state->newGame(seed);\n    gst.playerState.resize(players_.size());\n    for (size_t i = 0; i != players_.size(); ++i) {\n      std::unique_ptr<State> s = nullptr;\n      int index = gst.players[i];\n      if (&*game->playerGame_[index] != game) {\n        s = cloneState(game->playerGame_[index]->state_);\n        s->newGame(seed);\n      }\n      gst.playerState[i] = std::move(s);\n    }\n    gst.feat.resize(players_.size());\n    gst.pi.resize(players_.size());\n    gst.piMask.resize(players_.size());\n    gst.reward.resize(players_.size());\n    gst.rnnState.resize(players_.size());\n    gst.rnnState2.resize(players_.size());\n    gst.rnnStates.resize(players_.size());\n    gst.actionPi.resize(players_.size());\n    gst.predV.resize(players_.size());\n    gst.stepindex = 0;\n    gst.start = std::chrono::steady_clock::now();\n    gst.resignCounter.resize(players_.size());\n    gst.canResign = !game->evalMode && players_.size() == 2 && randint(3) != 0;\n    gst.validTournamentGame = true;\n    gst.allowRandomMoves.resize(players_.size());\n    for (auto& v : gst.allowRandomMoves) {\n      v = randint(4) == 0;\n    }\n    if (randint(250) == 0) {\n      switch (randint(2)) {\n      case 0:\n        doRandomMoves(gst, randint(std::max((int)runningAverageGameSteps, 1)));\n        break;\n      case 1:\n        doRandomMoves(\n            gst, randint(std::max((int)runningAverageGameSteps / 10, 1)));\n        break;\n      case 2:\n        doRandomMoves(\n            gst, randint(std::max((int)runningAverageGameSteps / 5, 1)));\n        break;\n      }\n      gst.validTournamentGame = false;\n    }\n    return states.insert(at, std::move(gst));\n  }\n\n  bool rewind(GameState* s, int player, bool rewindToNegativeValue) const {\n    if (s->history.size() <= 2) {\n      // fmt::printf(\"refusing to rewind with history size %d\\n\",\n      // s->history.size());\n      return false;\n    }\n    float flip = rewindToNegativeValue ? -1 : 1;\n    size_t index = 0;\n    for (index = s->history.size(); index;) {\n      --index;\n      auto& h = s->history[index];\n      if (h.turn == player && h.value * flip > 0) {\n        break;\n      }\n    }\n    if (index <= 2) {\n      // fmt::printf(\"refusing to rewind to index %d\\n\", index);\n      return false;\n    }\n    if (!s->rnnStates.empty() || !s->rnnState.empty() ||\n        !s->rnnState2.empty()) {\n      bool rnn = false;\n      for (auto& v : actorPlayers) {\n        if (v->rnnSeqlen()) {\n          rnn = true;\n        }\n      }\n      if (rnn) {\n        fmt::printf(\"Cannot currently rewind with rnn states, sorry :(\\n\");\n        return false;\n      }\n    }\n    fmt::printf(\"rewinding from %d to index %d\\n\", s->history.size(), index);\n    s->justRewound = true;\n    s->justRewoundToNegativeValue = rewindToNegativeValue;\n\n    auto& gst = *s;\n    gst.state = cloneState(basestate);\n\n    for (size_t i = 0; i != gst.playerState.size(); ++i) {\n      auto& x = gst.playerState[i];\n      if (x) {\n        int player = gst.players.at(i);\n        x = cloneState(game->playerGame_.at(player)->state_);\n      }\n    }\n\n    for (auto m : gst.startMoves) {\n      gst.state->forward(m);\n      for (auto& x : gst.playerState) {\n        if (x) {\n          x->forward(m);\n        }\n      }\n    }\n\n    for (auto& v : gst.feat) {\n      v.clear();\n    }\n    for (auto& v : gst.pi) {\n      v.clear();\n    }\n    for (auto& v : gst.piMask) {\n      v.clear();\n    }\n    for (auto& v : gst.reward) {\n      v.clear();\n    }\n    for (auto& v : gst.actionPi) {\n      v.clear();\n    }\n    for (auto& v : gst.predV) {\n      v.clear();\n    }\n    for (auto& v : gst.resignCounter) {\n      v = 0;\n    }\n    gst.drawCounter = 0;\n    gst.resigned = -1;\n    gst.drawn = false;\n\n    gst.history.resize(index);\n    for (auto& v : gst.history) {\n      v.featurized = false;\n      gst.state->forward(v.move);\n      for (auto& x : gst.playerState) {\n        if (x) {\n          x->forward(v.move);\n        }\n      }\n    }\n    return true;\n  }\n\n  using stateCallback = void (BatchExecutor::*)(GameState*,\n                                                int currentPlayerIndex,\n                                                size_t index) const;\n\n  void actPrepareRnn(GameState* gameState,\n                     int currentPlayerIndex,\n                     size_t index) const {\n    size_t slot = gameState->playersReverseMap.at(currentPlayerIndex);\n\n    if (!gameState->rnnState.at(slot).defined()) {\n      auto shape = actorPlayers.at(currentPlayerIndex)->rnnStateSize();\n      gameState->rnnState[slot] = torch::zeros(shape);\n    }\n\n    const_cast<torch::Tensor&>(actRnnState[index]) =\n        std::move(gameState->rnnState[slot]);\n    gameState->rnnState[slot].reset();\n\n    if (&*game->playerGame_.at(currentPlayerIndex) == game) {\n      gameState->rnnStates.at(slot).push_back(actRnnState[index].cpu());\n    }\n  }\n\n  void actPrepareForward(GameState* gameState,\n                         int currentPlayerIndex,\n                         size_t index) const {\n    auto* player = forwardPlayers.at(currentPlayerIndex);\n    if (actRnnState.empty()) {\n      player->batchPrepare(index, *gameState->state, {});\n    } else {\n      player->batchPrepare(index, *gameState->state, actRnnState.at(index));\n    }\n  }\n\n  void actResult(GameState* gameState,\n                 int currentPlayerIndex,\n                 size_t index) const {\n    State* state = &*gameState->state;\n    size_t slot = gameState->playersReverseMap.at(currentPlayerIndex);\n\n    if (gameState->rnnState.at(slot).defined()) {\n      throw std::runtime_error(\"rnnState is not empty error\");\n    }\n    mcts::MctsPlayer* mctsPlayer = mctsPlayers[currentPlayerIndex];\n    ForwardPlayer* forwardPlayer = forwardPlayers[currentPlayerIndex];\n    PiVal pival;\n    int bestAction = -1;\n    float value = 0.0f;\n\n    thread_local std::mt19937_64 tlrng(std::random_device{}());\n\n    if (forwardPlayer) {\n      forwardPlayer->batchResult(index, *state, pival);\n\n      gameState->rnnState[slot] = std::move(pival.rnnState);\n\n      thread_local std::vector<float> x;\n      getLegalPi(*state, pival.logitPolicy, x);\n      softmax_(x);\n\n      bestAction = std::discrete_distribution(x.begin(), x.end())(tlrng);\n\n      int oa = state->overrideAction();\n      if (oa != -1) {\n        bestAction = oa;\n      }\n\n      value = pival.value;\n\n    } else if (mctsPlayer) {\n      gameState->rnnState[slot] = std::move(mctsResult.at(index).rnnState);\n      bestAction = mctsResult.at(index).bestAction;\n      value = mctsResult.at(index).rootValue;\n    } else {\n      throw std::runtime_error(\"unknown player\");\n    }\n\n    if (gameState->canResign) {\n      if (value < -0.95f) {\n        if (++gameState->resignCounter.at(slot) >= 7) {\n          gameState->resigned = int(slot);\n        }\n      } else {\n        gameState->resignCounter.at(slot) = 0;\n      }\n      int opponent =\n          (gameState->playersReverseMap.at(currentPlayerIndex) + 1) % 2;\n      if (value > 0.95f) {\n        ++gameState->resignCounter.at(opponent);\n      } else {\n        gameState->resignCounter.at(opponent) = 0;\n      }\n\n      // Automatic draw disabled for now; this is not the best way to handle\n      // this\n      // TODO: enable this only for models with logit value outputs, since they\n      // have\n      //       a logit specific for draw - this would work best with MCTS\n      //       modification that allows backpropagating the draw probability\n      //      if (gameState->stepindex >= 40 && value >\n      //      -0.05 && value < 0.05) {\n      //        ++gameState->drawCounter;\n      //        if (gameState->drawCounter >= 7) {\n      //          gameState->drawn = true;\n      //        }\n      //      } else {\n      //        gameState->drawCounter = 0;\n      //      }\n    }\n    bool saveForTraining = true;\n    // TODO: improve this randomizedRollouts check, 1.5 is a magic number that\n    //       needs to be synchronized with mcts.cc\n    if (mctsOption && mctsOption->randomizedRollouts &&\n        mctsResult.at(index).rollouts <\n            mctsOption->numRolloutPerThread * 1.5f) {\n      saveForTraining = false;\n    }\n    if (saveForTraining) {\n      torch::Tensor feat = getFeatureInTensor(*state);\n      gameState->feat.at(slot).push_back(feat);\n      if (forwardPlayer) {\n        auto actionPolicy = torch::zeros_like(pival.logitPolicy);\n        auto action = state->GetLegalActions().at(bestAction);\n        actionPolicy[action.GetX()][action.GetY()][action.GetZ()] = 1;\n        gameState->actionPi.at(slot).push_back(actionPolicy);\n        gameState->pi.at(slot).push_back(pival.logitPolicy);\n        gameState->piMask.at(slot).push_back(getPolicyMaskInTensor(*state));\n      } else {\n        auto [policy, policyMask] =\n            getPolicyInTensor(*state, mctsResult.at(index).mctsPolicy);\n        gameState->pi.at(slot).push_back(policy);\n        gameState->piMask.at(slot).push_back(policyMask);\n      }\n      torch::Tensor predV = torch::zeros({1}, torch::kFloat32);\n      predV[0] = value;\n      gameState->predV.at(slot).push_back(predV);\n\n      gameState->reward[slot].push_back(state->getReward(slot));\n    }\n\n    gameState->history.emplace_back();\n    auto& h = gameState->history.back();\n    h.turn = slot;\n    h.move = bestAction;\n    h.value = value;\n    h.featurized = saveForTraining;\n    h.shortFeat = getRawFeatureInTensor(*state);\n\n    if (gameState->rewindCount == 0) {\n      std::lock_guard l(recordMoveMutex);\n      actorPlayers[currentPlayerIndex]->recordMove(state);\n    }\n\n    state->forward(bestAction);\n\n    auto now = std::chrono::steady_clock::now();\n    double elapsed = std::chrono::duration_cast<\n                         std::chrono::duration<double, std::ratio<1, 1>>>(\n                         now - gameState->prevMoveTime)\n                         .count();\n    gameState->prevMoveTime = now;\n\n    // fmt::printf(\"Thread %d: move took %gs\\n\", common::getThreadId(),\n    // elapsed);\n\n    {\n      std::unique_lock<std::mutex> lkStats(game->mutexStats_);\n      auto& stats_s = game->stats_[\"Move Duration (seconds)\"];\n      std::get<0>(stats_s) += 1;\n      std::get<1>(stats_s) += elapsed;\n      std::get<2>(stats_s) += elapsed * elapsed;\n    }\n\n    if (gameState->justRewound) {\n      float flip = gameState->justRewoundToNegativeValue ? -1.0f : 1.0f;\n      if (h.value * flip < 0.0f) {\n        // fmt::printf(\"rewound turned negative, rewinding more!\\n\");\n        rewind(gameState, slot, gameState->justRewoundToNegativeValue);\n      } else {\n        gameState->justRewound = false;\n      }\n    }\n\n    // fmt::printf(\"game in progress: %s\\n\", state->history());\n  }\n\n  std::vector<stateCallback> stateCallbacks;\n\n  std::vector<async::Handle> taskHandles;\n\n  void prepareStateCallbacks() {\n    stateCallbacks.clear();\n    taskHandles.clear();\n    size_t offset = 0;\n    for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n      size_t currentPlayerIndex = statePlayerSize[pi].first;\n      size_t currentPlayerStates = statePlayerSize[pi].second;\n      for (size_t i = 0; i != currentPlayerStates; ++i) {\n        GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n\n        auto f = [this, gameState, currentPlayerIndex, index = offset + i]() {\n          for (auto& cb : stateCallbacks) {\n            (this->*cb)(gameState, currentPlayerIndex, index);\n          }\n        };\n\n        auto& thread = threads::threads.getThread();\n        taskHandles.push_back(task.getHandle(thread, std::move(f)));\n        taskHandles.back().setPriority(common::getThreadId());\n      }\n      offset += currentPlayerStates;\n    }\n  }\n\n  void pushStateCallback(stateCallback cb) {\n    stateCallbacks.push_back(cb);\n  }\n\n  void runStateCallbacks() {\n    if (stateCallbacks.empty()) {\n      return;\n    }\n    for (auto& v : taskHandles) {\n      task.enqueue(v);\n    }\n    task.wait();\n  }\n\n  void actForPlayer(size_t playerIndex) {\n    // Merge all identical players so they get batched together\n    auto& states = actStates[playerIndex];\n    if (!states.empty()) {\n      statePlayerSize.clear();\n      statePlayerSize.emplace_back(playerIndex, states.size());\n      for (size_t i = 0; i != players_.size(); ++i) {\n        if (i != playerIndex && remapPlayerIdx[i] == playerIndex) {\n          auto& nstates = actStates[i];\n          if (!nstates.empty()) {\n            states.insert(states.end(), nstates.begin(), nstates.end());\n            statePlayerSize.emplace_back(i, nstates.size());\n            nstates.clear();\n          }\n        }\n      }\n      playerActStates.resize(states.size());\n      size_t offset = 0;\n      for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n        size_t currentPlayerIndex = statePlayerSize[pi].first;\n        size_t currentPlayerStates = statePlayerSize[pi].second;\n        for (size_t i = 0; i != currentPlayerStates; ++i) {\n          GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n          int slot = gameState->playersReverseMap.at(currentPlayerIndex);\n          if (gameState->playerState.at(slot)) {\n            playerActStates.at(offset + i) = &*gameState->playerState[slot];\n          } else {\n            playerActStates.at(offset + i) = states.at(offset + i);\n          }\n        }\n        offset += currentPlayerStates;\n      }\n\n      prepareStateCallbacks();\n\n      if (actorPlayers.at(playerIndex)->rnnSeqlen()) {\n        actRnnState.resize(states.size());\n        pushStateCallback(&BatchExecutor::actPrepareRnn);\n      } else {\n        actRnnState.clear();\n      }\n\n      mctsResult.clear();\n\n      if (forwardPlayers.at(playerIndex)) {\n        forwardPlayers[playerIndex]->batchResize(states.size());\n        pushStateCallback(&BatchExecutor::actPrepareForward);\n        runStateCallbacks();\n        forwardPlayers[playerIndex]->batchEvaluate(states.size());\n      } else {\n\n        runStateCallbacks();\n\n        mctsResult =\n            mctsPlayers.at(playerIndex)->actMcts(playerActStates, actRnnState);\n      }\n\n      stateCallbacks.clear();\n\n      // allowRandomMoves support\n      // TODO: move into a state callback\n      if (mctsPlayers.at(playerIndex)) {\n        offset = 0;\n        for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n          size_t currentPlayerIndex = statePlayerSize[pi].first;\n          size_t currentPlayerStates = statePlayerSize[pi].second;\n          for (size_t i = 0; i != currentPlayerStates; ++i) {\n            State* state = (State*)states.at(offset + i);\n            GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n\n            size_t slot = gameState->playersReverseMap.at(currentPlayerIndex);\n\n            if (gameState->allowRandomMoves.at(slot)) {\n              float x = 4.0f / std::pow(state->getStepIdx() + 10, 2.0f);\n              if (std::uniform_real_distribution<float>(0, 1.0f)(rng) < x) {\n                mctsResult.at(offset + i).bestAction =\n                    randint(state->GetLegalActions().size());\n                // fmt::printf(\"at state '%s' - performing random move %s\\n\",\n                // state->history(),\n                // state->actionDescription(state->GetLegalActions().at(mctsResult.at(offset\n                // + i).bestAction)));\n                gameState->validTournamentGame = false;\n              }\n            }\n          }\n          offset += currentPlayerStates;\n        }\n      }\n\n      // Support for running the opponent player in a different game\n      // implementation. This is rarely needed or used, but can be useful to\n      // train against a model that was trained on a different game\n      // implementation but with the same action space\n      // TODO: move into a state callback\n      if (&*game->playerGame_.at(playerIndex) != game) {\n        if (devPlayer->rnnSeqlen()) {\n          actRnnState.clear();\n\n          offset = 0;\n          for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n            size_t currentPlayerIndex = statePlayerSize[pi].first;\n            size_t currentPlayerStates = statePlayerSize[pi].second;\n            for (size_t i = 0; i != currentPlayerStates; ++i) {\n              GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n\n              size_t slot = gameState->playersReverseMap.at(currentPlayerIndex);\n\n              if (!gameState->rnnState2.at(slot).defined()) {\n                gameState->rnnState2[slot] =\n                    torch::zeros(devPlayer->rnnStateSize());\n              }\n\n              actRnnState.push_back(std::move(gameState->rnnState2[slot]));\n\n              gameState->rnnStates.at(slot).push_back(actRnnState.back().cpu());\n            }\n            offset += currentPlayerStates;\n          }\n\n          std::vector<torch::Tensor> nextRnnState =\n              devPlayer->nextRnnState(states, actRnnState);\n\n          offset = 0;\n          for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n            size_t currentPlayerIndex = statePlayerSize[pi].first;\n            size_t currentPlayerStates = statePlayerSize[pi].second;\n            for (size_t i = 0; i != currentPlayerStates; ++i) {\n              GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n\n              size_t slot = gameState->playersReverseMap.at(currentPlayerIndex);\n\n              gameState->rnnState2[slot] =\n                  std::move(nextRnnState.at(offset + i));\n            }\n            offset += currentPlayerStates;\n          }\n        }\n\n        offset = 0;\n        for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n          size_t currentPlayerStates = statePlayerSize[pi].second;\n          for (size_t i = 0; i != currentPlayerStates; ++i) {\n            State* state = (State*)playerActStates.at(offset + i);\n\n            auto& res = mctsResult.at(offset + i);\n\n            state->forward(res.bestAction);\n          }\n          offset += currentPlayerStates;\n        }\n\n      } else {\n        offset = 0;\n        for (size_t pi = 0; pi != statePlayerSize.size(); ++pi) {\n          size_t currentPlayerIndex = statePlayerSize[pi].first;\n          size_t currentPlayerStates = statePlayerSize[pi].second;\n          for (size_t i = 0; i != currentPlayerStates; ++i) {\n            GameState* gameState = actGameStates.at(currentPlayerIndex).at(i);\n            auto& res = mctsResult.at(offset + i);\n\n            for (auto& x : gameState->playerState) {\n              if (x) {\n                x->forward(res.bestAction);\n              }\n            }\n          }\n          offset += currentPlayerStates;\n        }\n      }\n\n      mctsOption = mctsPlayers[playerIndex]\n                       ? &mctsPlayers[playerIndex]->option()\n                       : nullptr;\n\n      if (mctsPlayers.at(playerIndex)) {\n        double rps = mctsPlayers.at(playerIndex)->rolloutsPerSecond();\n        std::unique_lock<std::mutex> lkStats(game->mutexStats_);\n        auto& stats_s = game->stats_[\"Rollouts per second\"];\n        std::get<0>(stats_s) += 1;\n        std::get<1>(stats_s) += rps;\n        std::get<2>(stats_s) += rps * rps;\n      }\n\n      pushStateCallback(&BatchExecutor::actResult);\n\n      runStateCallbacks();\n\n      states.clear();\n    }\n  }\n\n  void run() {\n\n    task = async::Task(threads::threads);\n\n    for (auto& v : game->players_) {\n      players_.push_back(&*v);\n    }\n\n    result_.resize(players_.size());\n\n    actStates.resize(players_.size());\n    actPlayerStates.resize(players_.size());\n    actGameStates.resize(players_.size());\n    remapPlayerIdx.resize(players_.size());\n\n    basestate = std::move(game->state_);\n\n    seqs.resize(players_.size());\n\n    size_t ngames = size_t(std::max(game->perThreadBatchSize, 1));\n\n    if (game->perThreadBatchSize < 1) {\n      int bs = 102400;\n      int n = 0;\n      for (auto& v : players_) {\n        auto actorPlayer = dynamic_cast<ActorPlayer*>(v);\n        if (actorPlayer) {\n          int v = actorPlayer->findBatchSize(*basestate);\n          if (v > 0) {\n            bs = std::min(bs, v);\n            ++n;\n          }\n        }\n      }\n      if (n) {\n        fmt::printf(\"Using batch size of %d\\n\", bs);\n        ngames = bs;\n      }\n    }\n\n    while (states.size() < ngames &&\n           (game->numEpisode < 0 || startedGameCount < game->numEpisode)) {\n      addGame(states.end());\n    }\n\n    devPlayer = nullptr;\n    for (auto& v : players_) {\n      auto actorPlayer = dynamic_cast<ActorPlayer*>(v);\n      if (!actorPlayer) {\n        throw std::runtime_error(\n            \"Cannot use perThreadBatchSize without ActorPlayer\");\n      }\n      if (actorPlayer->getName() == \"dev\") {\n        devPlayer = actorPlayer;\n      }\n      actorPlayers.push_back(std::move(actorPlayer));\n      mctsPlayers.push_back(dynamic_cast<mcts::MctsPlayer*>(v));\n      forwardPlayers.push_back(dynamic_cast<ForwardPlayer*>(v));\n    }\n    if (!devPlayer) {\n      throw std::runtime_error(\"dev player not found\");\n    }\n\n    // If two players are the same (pointer comparison), then they can act\n    // together.\n    for (size_t i = 0; i != players_.size(); ++i) {\n      remapPlayerIdx[i] = i;\n      for (size_t i2 = 0; i2 != i; ++i2) {\n        if (i != i2 && players_[i] == players_[i2]) {\n          remapPlayerIdx[i] = i2;\n        }\n      }\n    }\n\n    while (!states.empty() && !game->terminate_) {\n\n      for (auto& v : actStates) {\n        v.clear();\n      }\n      for (auto& v : actPlayerStates) {\n        v.clear();\n      }\n      for (auto& v : actGameStates) {\n        v.clear();\n      }\n\n      for (auto i = states.begin(); i != states.end();) {\n        auto* state = &*i->state;\n        bool completed = state->terminated() || i->resigned != -1 || i->drawn;\n        if (completed) {\n          const auto end = std::chrono::steady_clock::now();\n          const auto elapsed =\n              std::chrono::duration_cast<std::chrono::seconds>(end - i->start)\n                  .count();\n          const size_t stepindex = i->stepindex;\n          if (i->rewindCount == 0) {\n            std::unique_lock<std::mutex> lkStats(game->mutexStats_);\n            auto& stats_steps = game->stats_[\"Game Duration (steps)\"];\n            std::get<0>(stats_steps) += 1;\n            std::get<1>(stats_steps) += stepindex;\n            std::get<2>(stats_steps) += stepindex * stepindex;\n            auto& stats_s = game->stats_[\"Game Duration (seconds)\"];\n            std::get<0>(stats_s) += 1;\n            std::get<1>(stats_s) += elapsed;\n            std::get<2>(stats_s) += elapsed * elapsed;\n          }\n          if (i->drawn) {\n            for (size_t idx = 0; idx != players_.size(); ++idx) {\n              result_.at(i->players.at(idx)) = 0;\n            }\n          }\n          if (i->resigned != -1) {\n            for (size_t idx = 0; idx != players_.size(); ++idx) {\n              result_.at(i->players.at(idx)) = int(idx) == i->resigned ? -1 : 1;\n            }\n            // fmt::printf(\"player %d (%s) resigned : %s\\n\", i->resigned,\n            //            players_.at(i->players.at(i->resigned))->getName(),\n            //            state->history());\n          } else {\n            for (size_t idx = 0; idx != players_.size(); ++idx) {\n              result_.at(i->players.at(idx)) = state->getReward(idx);\n            }\n            // fmt::printf(\"game ended normally: %s\\n\",\n            // state->history().c_str());\n            if (randint(256) == 0) {\n              fmt::printf(\n                  \"game ended normally: %s\\n\", state->history().c_str());\n            }\n          }\n\n          runningAverageGameSteps =\n              runningAverageGameSteps * 0.99f + state->getStepIdx() * 0.01f;\n        }\n\n        bool doRewind = false;\n        int rewindPlayer = 0;\n        bool rewindToNegativeValue = false;\n\n        bool isForward = dynamic_cast<ForwardPlayer*>(devPlayer) != nullptr;\n\n        int seqlen = devPlayer->rnnSeqlen();\n\n        if ((isForward && seqlen > 0) || completed) {\n          for (size_t slot = 0; slot != players_.size(); ++slot) {\n            size_t dstp = i->players.at(slot);\n\n            if (completed) {\n#ifdef OPENBW_UI\n              fmt::printf(\"Result for %s: %g\\n\", players_[dstp]->getName(),\n                          result_[dstp]);\n#endif\n            } else {\n              if ((int)i->pi[slot].size() < seqlen * 16 + 1 ||\n                  i->history.empty() || i->history.back().turn != (int)slot) {\n                continue;\n              }\n              result_[dstp] = i->history.back().value;\n\n              i->pi[slot].pop_back();\n              i->piMask[slot].pop_back();\n              i->actionPi[slot].pop_back();\n              i->predV[slot].pop_back();\n              i->feat[slot].pop_back();\n              i->rnnStates[slot].pop_back();\n              i->reward[slot].pop_back();\n            }\n\n            auto addseq = [&](const std::vector<torch::Tensor>& src,\n                              std::vector<torch::Tensor>& dst,\n                              tube::EpisodicTrajectory& traj) {\n              for (auto& x : src) {\n                dst.push_back(x);\n                if ((int)dst.size() > seqlen) {\n                  throw std::runtime_error(\"addseq bad seqlen\");\n                }\n                if ((int)dst.size() == seqlen) {\n                  traj.pushBack(torch::stack(dst));\n                  dst.clear();\n                }\n              }\n            };\n\n            std::vector<float> dReward;\n            dReward.resize(i->feat[slot].size());\n            if (isForward) {\n              float gae = 0.0f;\n              float gamma = 0.997;\n              float gaeLambda = 0.95;\n              float reward = result_[slot];\n              // reward = 0;\n              for (size_t n = dReward.size(); n;) {\n                --n;\n                float predv = i->predV[slot].at(n).item<float>();\n                float npredv = 0.0f;\n                if (n == dReward.size() - 1) {\n                  npredv = result_[dstp];\n                  // npredv = 0;\n                } else {\n                  npredv = i->predV[slot].at(n + 1).item<float>();\n                }\n                float delta = reward + gamma * npredv - predv;\n                gae = delta + gamma * gaeLambda * gae;\n\n                dReward.at(n) = gae + predv;\n\n                reward = i->reward[slot].at(n);\n              }\n            } else {\n              float reward = result_[dstp];\n              for (size_t n = dReward.size(); n;) {\n                --n;\n                dReward.at(n) = reward;\n\n                // reward *= 0.99;\n                // reward += i->reward[slot].at(n);\n              }\n            }\n\n            std::vector<torch::Tensor> rewards;\n            for (size_t j = 0; j != i->feat[slot].size(); ++j) {\n              if (devPlayer->vOutputs() == 3) {\n                torch::Tensor reward = torch::zeros({3}, torch::kFloat32);\n                reward[0] = result_[dstp] > 0;\n                reward[1] = result_[dstp] < 0;\n                reward[2] = result_[dstp] == 0;\n                rewards.push_back(std::move(reward));\n              } else {\n                torch::Tensor reward = torch::zeros({1}, torch::kFloat32);\n                reward[0] = dReward.at(j);\n                rewards.push_back(std::move(reward));\n              }\n            }\n\n            std::vector<float> piReward;\n            piReward.resize(i->pi[slot].size());\n            auto& seq = seqs[dstp];\n            if (actorPlayers[dstp]->getModelId() == \"dev\" &&\n                i->feat[slot].size() > 0) {\n              if (seqlen) {\n                for (size_t n = 0; n != i->feat[slot].size(); ++n) {\n                  if ((seq.feat.size() + n) % seqlen == seqlen - 1) {\n                    game->rnnInitialState_[dstp].pushBack(\n                        i->rnnStates[slot].at(n));\n                  }\n                }\n                addseq(i->feat[slot], seq.feat, game->feature_[dstp]);\n                addseq(i->pi[slot], seq.pi, game->pi_[dstp]);\n                addseq(i->piMask[slot], seq.piMask, game->piMask_[dstp]);\n                if (isForward) {\n                  addseq(\n                      i->actionPi[slot], seq.actionPi, game->actionPi_[dstp]);\n                }\n                addseq(i->predV[slot], seq.predV, game->predV_[dstp]);\n                std::vector<torch::Tensor> rnnStateMask;\n                rnnStateMask.resize(i->feat[slot].size());\n                for (auto& v : rnnStateMask) {\n                  v = torch::ones({1});\n                }\n                rnnStateMask.at(0).zero_();\n                addseq(\n                    rnnStateMask, seq.rnnStateMask, game->rnnStateMask_[dstp]);\n              } else {\n                for (auto& v : i->feat[slot]) {\n                  game->feature_[dstp].pushBack(v);\n                }\n                for (auto& v : i->pi[slot]) {\n                  game->pi_[dstp].pushBack(v);\n                }\n                for (auto& v : i->piMask[slot]) {\n                  game->piMask_[dstp].pushBack(v);\n                }\n                for (auto& v : i->actionPi[slot]) {\n                  game->actionPi_[dstp].pushBack(v);\n                }\n                for (auto& v : i->predV[slot]) {\n                  game->predV_[dstp].pushBack(v);\n                }\n              }\n\n              if (game->predictEndState || game->predictNStates) {\n                int n = (game->predictEndState ? 2 : 0) + game->predictNStates;\n                auto size = state->GetRawFeatureSize();\n                size.insert(size.begin(), n);\n                auto finalsize = size;\n                finalsize[1] *= finalsize[0];\n                finalsize.erase(finalsize.begin());\n                for (size_t m = 0; m != i->history.size(); ++m) {\n                  if (!i->history[m].featurized ||\n                      i->history[m].turn != (int)slot) {\n                    continue;\n                  }\n                  auto tensor = torch::zeros(size);\n                  auto mask = torch::zeros(size);\n                  size_t offset = 0;\n                  if (game->predictEndState) {\n                    if (state->terminated()) {\n                      tensor[0].copy_(i->history.back().shortFeat);\n                      mask[0].fill_(1.0f);\n                    } else {\n                      tensor[1].copy_(i->history.back().shortFeat);\n                      mask[1].fill_(1.0f);\n                    }\n                    offset += 2;\n                  }\n                  for (int j = 0; j != game->predictNStates; ++j, ++offset) {\n                    size_t index = m + 1 + j;\n                    if (index < i->history.size()) {\n                      tensor[offset].copy_(i->history[m].shortFeat);\n                      mask[offset].fill_(1.0f);\n                    }\n                  }\n\n                  tensor = tensor.view(finalsize);\n                  mask = mask.view(finalsize);\n\n                  if (seqlen) {\n                    addseq({tensor}, seq.predictPi, game->predictPi_[dstp]);\n                    addseq(\n                        {mask}, seq.predictPiMask, game->predictPiMask_[dstp]);\n                  } else {\n                    game->predictPi_[dstp].pushBack(tensor);\n                    game->predictPiMask_[dstp].pushBack(mask);\n                  }\n                }\n              }\n\n              // fmt::printf(\"result[%d] (%s) is %g\\n\", p,\n              // players_[p]->getName(), result_[p]);\n\n              if (seqlen) {\n                addseq(rewards, seq.v, game->v_[dstp]);\n              } else {\n                for (auto& reward : rewards) {\n                  game->v_[dstp].pushBack(std::move(reward));\n                }\n              }\n            }\n\n            i->pi[slot].clear();\n            i->piMask[slot].clear();\n            i->actionPi[slot].clear();\n            i->predV[slot].clear();\n            i->feat[slot].clear();\n            i->rnnStates[slot].clear();\n            i->reward[slot].clear();\n            for (auto& v : i->history) {\n              if (v.turn == (int)slot) {\n                v.featurized = false;\n              }\n            }\n\n            if (completed) {\n              if (actorPlayers[dstp]->getModelId() == \"dev\") {\n                if (result_[dstp] != 0) {\n                  doRewind = true;\n                  rewindPlayer = slot;\n                  rewindToNegativeValue = result_[dstp] > 0;\n                }\n              }\n\n              if (i->rewindCount == 0 && i->validTournamentGame) {\n                actorPlayers[dstp]->result(state, result_[dstp]);\n              } else {\n                actorPlayers[dstp]->forget(state);\n              }\n            }\n          }\n          game->sendTrajectory();\n\n          if (doRewind) {\n            for (size_t slot = 0; slot != players_.size(); ++slot) {\n              size_t dstp = i->players.at(slot);\n              if (actorPlayers[dstp]->wantsTournamentResult()) {\n                doRewind = false;\n                break;\n              }\n            }\n          }\n        }\n\n        if (completed) {\n          ++completedGameCount;\n          if (doRewind && i->rewindCount < game->maxRewinds &&\n              rewind(&*i, rewindPlayer, rewindToNegativeValue)) {\n            ++i->rewindCount;\n          } else {\n            i = states.erase(i);\n            if (game->numEpisode < 0 || startedGameCount < game->numEpisode) {\n              i = addGame(i);\n            }\n          }\n        } else {\n          i->stepindex++;\n          int slot = state->getCurrentPlayer();\n          auto playerIdx = i->players.at(slot);\n          actStates.at(playerIdx).push_back(state);\n          actPlayerStates.at(playerIdx).push_back(&*i->playerState[slot]);\n          actGameStates.at(playerIdx).push_back(&*i);\n          ++i;\n        }\n      }\n\n      if (alignPlayers) {\n        size_t bestPlayerIdx = 0;\n        size_t bestPlayerIdxSize = 0;\n        for (size_t playerIdx = 0; playerIdx != actStates.size(); ++playerIdx) {\n          auto& states = actStates[playerIdx];\n          if (states.size() > bestPlayerIdxSize) {\n            bestPlayerIdxSize = states.size();\n            bestPlayerIdx = playerIdx;\n          }\n        }\n        actForPlayer(bestPlayerIdx);\n      } else {\n        for (size_t playerIdx = 0; playerIdx != actStates.size(); ++playerIdx) {\n          actForPlayer(playerIdx);\n        }\n      }\n    }\n  }\n};\n\nvoid Game::mainLoop() {\n  threads::setCurrentThreadName(\"game thread \" +\n                                std::to_string(common::getThreadId()));\n  threads::init(0);\n  if (players_.size() != (isOnePlayerGame() ? 1 : 2)) {\n    std::cout << \"Error: wrong number of players: \" << players_.size()\n              << std::endl;\n    assert(false);\n  }\n  if (!evalMode) {\n    reset();\n\n    for (auto& v : playerGame_) {\n      if (&*v != this && v->state_) {\n        v->state_->reset();\n      }\n    }\n\n    BatchExecutor batchExecutor;\n    batchExecutor.game = this;\n\n    batchExecutor.run();\n\n  } else {\n\n    // Warm up JIT/model. This can take several seconds, so do it before we\n    // start time counting.\n    for (auto& v : players_) {\n      auto mctsPlayer = std::dynamic_pointer_cast<mcts::MctsPlayer>(v);\n      if (mctsPlayer && mctsPlayer->option().totalTime) {\n        std::cout << \"Warming up model.\\n\";\n        auto opt = mctsPlayer->option();\n        mctsPlayer->option().totalTime = 0;\n        mctsPlayer->option().numRolloutPerThread = 20;\n        mctsPlayer->option().randomizedRollouts = false;\n        mctsPlayer->reset();\n        mctsPlayer->actMcts(*state_);\n        mctsPlayer->actMcts(*state_);\n        mctsPlayer->actMcts(*state_);\n        mctsPlayer->actMcts(*state_);\n\n        mctsPlayer->option() = opt;\n        mctsPlayer->reset();\n      }\n    }\n\n    int64_t gameCount = 0;\n#ifdef DEBUG_GAME\n    std::thread::id thread_id = std::this_thread::get_id();\n#endif\n    while ((numEpisode < 0 || gameCount < numEpisode) && !terminate_) {\n      if (terminate_) {\n#ifdef DEBUG_GAME\n        std::cout << \"Thread \" << thread_id << \": terminating, \"\n                  << \"game \" << this << \", \" << gameCount << \" / \" << numEpisode\n                  << \" games played\" << std::endl;\n#endif\n        break;\n      }\n#ifdef DEBUG_GAME\n      std::cout << \"Thread \" << thread_id << \", game \" << this\n                << \": not terminating - run another iteration. \" << std::endl;\n#endif\n      bool aHuman = std::any_of(players_.begin(), players_.end(),\n                                [](const std::shared_ptr<Player>& player) {\n                                  return player->isHuman();\n                                });\n      if (aHuman && state_->stochasticReset()) {\n        std::string line;\n        std::cout << \"Random outcome ?\" << std::endl;\n        std::cin >> line;\n        state_->forcedDice = std::stoul(line, nullptr, 0);\n      }\n      reset();\n      int stepindex = 0;\n      auto start = std::chrono::steady_clock::now();\n      while (!state_->terminated()) {\n        stepindex += 1;\n#ifdef DEBUG_GAME\n        std::cout << \"Thread \" << thread_id << \", game \" << this << \": step \"\n                  << stepindex << std::endl;\n#endif\n        step();\n        if (isInSingleMoveMode_) {\n          std::cout << lastMctsValue_ << \"\\n\";\n          state_->printLastAction();\n          std::exit(0);\n        }\n        if (printMoves_) {\n          std::cout << \"MCTS value: \" << lastMctsValue_ << \"\\n\";\n          std::cout << \"Made move: \" << state_->lastMoveString() << std::endl;\n        }\n      }\n      auto end = std::chrono::steady_clock::now();\n      auto elapsed =\n          std::chrono::duration_cast<std::chrono::seconds>(end - start).count();\n      {\n        std::unique_lock<std::mutex> lkStats(mutexStats_);\n        auto& stats_steps = stats_[\"Game Duration (steps)\"];\n        std::get<0>(stats_steps) += 1;\n        std::get<1>(stats_steps) += stepindex;\n        std::get<2>(stats_steps) += stepindex * stepindex;\n        auto& stats_s = stats_[\"Game Duration (seconds)\"];\n        std::get<0>(stats_s) += 1;\n        std::get<1>(stats_s) += elapsed;\n        std::get<2>(stats_s) += elapsed * elapsed;\n      }\n#ifdef DEBUG_GAME\n      std::cout << \"Thread \" << thread_id << \", game \" << this << \": game \"\n                << gameCount << \" / \" << numEpisode << \" ended; \" << stepindex\n                << \" steps, \" << (static_cast<float>(stepindex) / elapsed)\n                << \" steps per second\" << std::endl;\n#endif\n      if (!lastAction_.empty() && aHuman) {\n        std::cout << \"\\n#Last Action: \" << lastAction_ << \"\\n\\n\";\n        state_->printCurrentBoard();\n      }\n      if (std::any_of(players_.begin(), players_.end(),\n                      [](const std::shared_ptr<Player>& player) {\n                        return player->isTP();\n                      })) {\n        state_->errPrintCurrentBoard();\n      }\n\n      result_[0] = state_->getReward(0);\n      if (players_.size() > 1) {\n        result_[1] = state_->getReward(1);\n      }\n\n      ++gameCount;\n    }\n#ifdef DEBUG_GAME\n    std::cout << \"Thread \" << thread_id << \", game \" << this\n              << \": exiting main loop\" << std::endl;\n#endif\n  }\n}\n\nstd::optional<int> Game::parseSpecialAction(const std::string& str) {\n  if (str == \"-1\" || str == \"undo\" || str == \"u\") {\n    std::cout << \"Undoing the last move\\n\";\n    state_->undoLastMoveForPlayer(state_->getCurrentPlayer());\n    return -1;\n  } else if (str == \"exit\") {\n    std::exit(0);\n  } else if (str == \"m\" || str == \"manual\") {\n    bool resume = false;\n    auto playerString = [&](int index) {\n      std::string str;\n      auto& player = players_.at(index);\n      if (std::dynamic_pointer_cast<mcts::MctsPlayer>(player)) {\n        str += \"MctsPlayer\";\n      } else if (std::dynamic_pointer_cast<HumanPlayer>(player)) {\n        str += \"HumanPlayer\";\n      } else {\n        str += typeid(player).name();\n      }\n      return str;\n    };\n    auto specialAction = [&](const std::string& str) -> std::optional<int> {\n      if (str == \"singlemovemode\" || str == \"sm\") {\n        isInSingleMoveMode_ = true;\n        return -1;\n      } else if (str.substr(0, 3) == \"set\") {\n        state_->setStateFromStr(str.substr(4));\n        return -1;\n      } else if (str == \"r\" || str == \"reset\") {\n        state_->reset();\n        for (auto& v : players_) {\n          v->reset();\n        }\n        return -1;\n      } else if (str == \"u\" || str == \"undo\") {\n        state_->undoLastMove();\n        return -1;\n      } else if (str == \"c\" || str == \"continue\") {\n        resume = true;\n        return -1;\n      } else if (str == \"swap\") {\n        std::next_permutation(players_.begin(), players_.end());\n        for (size_t i = 0; i != players_.size(); ++i) {\n          std::cout << \"Player \" << i << \" is now \" << playerString(i) << \"\\n\";\n        }\n        return -1;\n      } else if (str == \"printmoves\") {\n        printMoves_ = true;\n        return -1;\n      } else if (str == \"printvalue\") {\n        auto mctsPlayer = std::dynamic_pointer_cast<mcts::MctsPlayer>(\n            players_.at(state_->getCurrentPlayer()));\n        if (!mctsPlayer) {\n          for (auto& v : players_) {\n            mctsPlayer = std::dynamic_pointer_cast<mcts::MctsPlayer>(v);\n            if (mctsPlayer) {\n              break;\n            }\n          }\n        }\n        if (mctsPlayer) {\n          std::cout << \"NN Value: \" << mctsPlayer->calculateValue(*state_)\n                    << \"\\n\";\n        } else {\n          std::cout << \"NN Value: 0\\n\";\n        }\n      }\n      return std::nullopt;\n    };\n    std::cout\n        << \"\\nEntering moves manually. Enter 'r' or 'reset' to reset the \"\n           \"board, 'u' or 'undo' to undo the last move, 'c' or 'continue' to \"\n           \"continue play, or 'swap' to swap the turn order of the players\\n\\n\";\n    while (!state_->terminated()) {\n      int index = -1;\n      while (index == -1) {\n        std::cout << \"Enter a move for player \" << state_->getCurrentPlayer()\n                  << \" (\" << playerString(state_->getCurrentPlayer()) << \")\\n\";\n        index = state_->humanInputAction(specialAction);\n        if (resume) {\n          return -1;\n        }\n      }\n\n      if (!lastAction_.empty()) {\n        std::cout << \"\\nLast Action: \" << lastAction_ << \"\\n\\n\";\n      }\n      std::cout << \" applying action... \" << std::endl;\n      auto action = state_->GetLegalActions().at(index);\n      lastAction_ = state_->actionDescription(action);\n      if (!state_->isStochastic()) {\n        state_->forward(action.GetIndex());\n      } else {\n        // auto backup_state = state_->clone();\n        std::string line;\n        std::cout << \"Random outcome ?\" << std::endl;\n        std::cin >> line;\n        state_->forcedDice = std::stoul(line, nullptr, 0);\n        state_->forward(action.GetIndex());\n      }\n    }\n    return -1;\n  }\n  return std::nullopt;\n}\n\n/* virtual */ tube::EnvThread::Stats Game::get_stats() {\n  std::unique_lock<std::mutex> lkStats(mutexStats_);\n  return stats_;\n}\n\nvoid Game::step() {\n  auto playerIdx = state_->getCurrentPlayer();\n  auto& player = players_.at(playerIdx);\n  // std::cout << \"board\" << std::endl;\n  // state_->printCurrentBoard();\n  if (player->isTP()) {\n    // auto TPplayer = std::dynamic_pointer_cast<TPPlayer>(player);\n    assert(!state_->isStochastic());\n    auto index = state_->TPInputAction();\n    auto action = state_->GetLegalActions().at(index);\n    lastAction_ = state_->actionDescription(action);\n    state_->forward(index);\n  } else if (player->isHuman()) {\n    if (!hasPrintedHumanHelp_) {\n      std::cout << \"\\nEnter a move for the human player. Enter 'u' or 'undo' \"\n                   \"to undo your previous move, 'm' or 'manual' to enter moves \"\n                   \"manually for all players.\\n\\n\";\n      hasPrintedHumanHelp_ = true;\n    }\n    auto humanPlayer = std::dynamic_pointer_cast<HumanPlayer>(player);\n    if (!lastAction_.empty()) {\n      std::cout << \"\\nLast Action: \" << lastAction_ << \"\\n\\n\";\n    }\n\n    std::cout << \"History: \" << state_->history() << \"\\n\";\n\n    int index = state_->humanInputAction(\n        std::bind(&Game::parseSpecialAction, this, std::placeholders::_1));\n    if (index == -1) {\n      return step();\n    }\n    std::cout << \" applying action... \" << std::endl;\n    auto action = state_->GetLegalActions().at(index);\n    lastAction_ = state_->actionDescription(action);\n    if (!state_->isStochastic()) {\n      state_->forward(action.GetIndex());\n    } else {\n      // auto backup_state = state_->clone();\n      std::string line;\n      std::cout << \"Random outcome ?\" << std::endl;\n      std::cin >> line;\n      state_->forcedDice = std::stoul(line, nullptr, 0);\n      state_->forward(action.GetIndex());\n    }\n  } else {\n    auto mctsPlayer = std::dynamic_pointer_cast<mcts::MctsPlayer>(player);\n    auto rnnShape = mctsPlayer->rnnStateSize();\n    if (!rnnShape.empty()) {\n      if (rnnState_.size() <= (size_t)playerIdx) {\n        rnnState_.resize(playerIdx + 1);\n      }\n      if (!rnnState_.at(playerIdx).defined()) {\n        rnnState_[playerIdx] = torch::zeros(rnnShape);\n      }\n    }\n    mcts::MctsResult result;\n    if (!rnnShape.empty()) {\n      result = mctsPlayer->actMcts(*state_, rnnState_.at(playerIdx));\n      rnnState_.at(playerIdx) = std::move(result.rnnState);\n    } else {\n      result = mctsPlayer->actMcts(*state_);\n    }\n    lastMctsValue_ = result.rootValue;\n\n    // store feature for training\n    if (!evalMode) {\n      torch::Tensor feat = getFeatureInTensor(*state_);\n      auto [policy, policyMask] = getPolicyInTensor(*state_, result.mctsPolicy);\n      feature_[playerIdx].pushBack(std::move(feat));\n      pi_[playerIdx].pushBack(std::move(policy));\n      piMask_[playerIdx].pushBack(std::move(policyMask));\n    }\n\n    // std::cout << \">>>>actual act\" << std::endl;\n    _Action action = state_->GetLegalActions().at(result.bestAction);\n    lastAction_ = state_->actionDescription(action);\n    bool noHuman = std::none_of(players_.begin(), players_.end(),\n                                [](const std::shared_ptr<Player>& player) {\n                                  return player->isHuman();\n                                });\n    if (!state_->isStochastic()) {\n      if (!noHuman) {\n        std::cout << \"Performing action \"\n                  << state_->actionDescription(\n                         state_->GetLegalActions().at(result.bestAction))\n                  << \"\\n\";\n      }\n    } else if (!noHuman) {\n      std::string line;\n      std::cout << \"Performing action \"\n                << state_->actionDescription(\n                       state_->GetLegalActions().at(result.bestAction))\n                << \"\\n\";\n      std::cout << \"Random outcome ?\" << std::endl;\n      std::cin >> line;\n      state_->forcedDice = std::stoul(line, nullptr, 0);\n    }\n    state_->forward(result.bestAction);\n  }\n}\n\nvoid Game::sendTrajectory() {\n  for (int i = 0; i < (int)players_.size(); ++i) {\n    assert(v_[i].len() == pi_[i].len() && pi_[i].len() == feature_[i].len());\n    assert(pi_[i].len() == piMask_[i].len());\n    int errcode;\n    while (prepareForSend(i)) {\n      // ignore error codes from the dispatcher\n      errcode = dispatchers_[i].dispatchNoReply();\n      switch (errcode) {\n      case tube::Dispatcher::DISPATCH_ERR_DC_TERM:\n#ifdef DEBUG_GAME\n        std::cout << \"game \" << this << \", sendTrajectory: \"\n                  << \"attempt to dispatch through\"\n                  << \" a terminated data channel \" << std::endl;\n#endif\n        break;\n      case tube::Dispatcher::DISPATCH_ERR_NO_SLOT:\n#ifdef DEBUG_GAME\n        std::cout << \"game \" << this << \": sendTrajectory: \"\n                  << \"no slots available to dispatch\" << std::endl;\n#endif\n        break;\n      case tube::Dispatcher::DISPATCH_NOERR:\n        break;\n      }\n    }\n    assert(v_[i].len() == 0);\n    assert(pi_[i].len() == 0);\n    assert(piMask_[i].len() == 0);\n    assert(feature_[i].len() == 0);\n  }\n}\n\nbool Game::prepareForSend(int playerId) {\n  int len = feature_[playerId].len();\n#define check(n)                                                               \\\n  if (!n.empty() && n[playerId].len() != len)                                  \\\n  throw std::runtime_error(\"len mismatch in \" #n)\n  check(pi_);\n  check(piMask_);\n  check(actionPi_);\n  check(v_);\n  check(predV_);\n  check(rnnInitialState_);\n  check(rnnStateMask_);\n#undef check\n  if (feature_[playerId].prepareForSend()) {\n    bool b = pi_[playerId].prepareForSend();\n    b &= piMask_[playerId].prepareForSend();\n    if (!actionPi_.empty()) {\n      b &= actionPi_[playerId].prepareForSend();\n    }\n    b &= v_[playerId].prepareForSend();\n    b &= predV_[playerId].prepareForSend();\n    if (predictEndState + predictNStates) {\n      b &= predictPi_[playerId].prepareForSend();\n      b &= predictPiMask_[playerId].prepareForSend();\n    }\n    if (!rnnInitialState_.empty()) {\n      b &= rnnInitialState_[playerId].prepareForSend();\n      b &= rnnStateMask_[playerId].prepareForSend();\n    }\n    if (!b) {\n      throw std::runtime_error(\"prepareForSend mismatch 1\");\n    }\n    return true;\n  }\n  bool b = pi_[playerId].prepareForSend();\n  b |= piMask_[playerId].prepareForSend();\n  b |= v_[playerId].prepareForSend();\n  b |= predV_[playerId].prepareForSend();\n  if (!actionPi_.empty()) {\n    b |= actionPi_[playerId].prepareForSend();\n  }\n  if (predictEndState + predictNStates) {\n    b |= predictPi_[playerId].prepareForSend();\n    b |= predictPiMask_[playerId].prepareForSend();\n  }\n  if (!rnnInitialState_.empty()) {\n    b |= rnnInitialState_[playerId].prepareForSend();\n    b |= rnnStateMask_[playerId].prepareForSend();\n  }\n  if (b) {\n    throw std::runtime_error(\"prepareForSend mismatch 2\");\n  }\n  return false;\n}\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/game.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"forward_player.h\"\n#include \"mcts/mcts.h\"\n#include \"tube/src_cpp/data_channel.h\"\n#include \"tube/src_cpp/dispatcher.h\"\n#include \"tube/src_cpp/env_thread.h\"\n\n#ifndef NO_JAVA\n#include \"../games/ludii/jni_utils.h\"\n#include \"../games/ludii/ludii_state_wrapper.h\"\n#endif\n\n#include \"../games/amazons.h\"\n#include \"../games/breakthrough_state.h\"\n#include \"../games/chess.h\"\n#include \"../games/chinesecheckers.h\"\n#include \"../games/connect6_state.h\"\n#include \"../games/connectfour.h\"\n#include \"../games/diceshogi.h\"\n#include \"../games/einstein.h\"\n#include \"../games/havannah_state.h\"\n#include \"../games/hex_state.h\"\n#include \"../games/kyotoshogi_state.h\"\n#include \"../games/mastermind_state.h\"\n#include \"../games/minesweeper_state.h\"\n#include \"../games/minishogi.h\"\n#include \"../games/mnkgame.h\"\n// #include \"../games/nogo_zestate.h\"\n#include \"../games/block_go.h\"\n#include \"../games/gomoku_swap2.h\"\n#include \"../games/othello.h\"\n#include \"../games/othello_opt.h\"\n#include \"../games/outeropengomoku_new.h\"\n#include \"../games/surakarta_state.h\"\n#include \"../games/tristannogo_state.h\"\n#include \"../games/weakschur/weakschur_state.h\"\n#include \"../games/yinsh.h\"\n\n#include \"human_player.h\"\n#include \"utils.h\"\n\n#include <algorithm>\n#include <cctype>\n#include <optional>\n#include <string>\n\n//#define DEBUG_GAME\n\nnamespace core {\n\n// Class for 2player fully observable game.\nclass Game : public tube::EnvThread {\n  friend struct BatchExecutor;\n\n public:\n  Game(std::string gameName,\n       std::vector<std::string> gameOptions,\n       int numEpisode,\n       int seed,\n       bool evalMode,\n       bool outFeatures,\n       bool turnFeaturesSingleChannel,\n       bool turnFeaturesMultiChannel,\n       bool geometricFeatures,\n       int history,\n       int randomFeatures,\n       bool oneFeature,\n       int perThreadBatchSize,\n       int maxRewinds,\n       bool predictEndState,\n       int predictNStates)\n      : numEpisode(numEpisode)\n      , evalMode(evalMode)\n      , perThreadBatchSize(perThreadBatchSize)\n      , maxRewinds(maxRewinds)\n      , predictEndState(predictEndState)\n      , predictNStates(predictNStates)\n      , result_(2, 0) {\n    gameName_ = gameName;\n    if (isGameNameMatched({\"Connect6\"})) {\n      state_ = newState<Connect6::StateForConnect6<1>>(seed);\n    } else if (isGameNameMatched({\"Connect6v2\"})) {\n      state_ = newState<Connect6::StateForConnect6<2>>(seed);\n    } else if (isGameNameMatched({\"Connect4\"})) {\n      state_ = newState<StateForConnectFour>(seed);\n      /*\ncombinations with numcolors 2,6,8 and slots 4,5,6,10 are interesting. For time\nhorizon it is more difficult to guess. I’d go with 0.5*slots, 1.0 slots, 1.5\nslots, 2 slots and then check numbers (just send us results in csv if easy to\nshare and we can suggest additional settings). If those experiments are too\nlong, you should start with colors=6, slots=4 and horizon \\in {3,4,5}. With\nhorizon=5 you should get a winning proba of 1. This could be a reasonable check.\nAnd we can check (by evaluating the decision tree - maybe we can find a student\nto look into this) if the strategy is identical to knuth’s.\n      */\n      // Mastermind_<size>_<horizon>_<arity>\n    } else if (isGameNameMatched({\"Mastermind_4_5_6\"})) {\n      // should be winning proba 1\n      state_ = newState<Mastermind::State<4, 5, 6>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_4_6_6\"})) {\n      // should be winning proba 1\n      state_ = newState<Mastermind::State<4, 6, 6>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_4_7_6\"})) {\n      // should be winning proba 1\n      state_ = newState<Mastermind::State<4, 7, 6>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_4_3_6\"})) {\n      state_ = newState<Mastermind::State<4, 4, 6>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_4_4_6\"})) {\n      state_ = newState<Mastermind::State<4, 3, 6>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_5_2\"})) {\n      state_ = newState<Mastermind::State<10, 5, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_6_2\"})) {\n      state_ = newState<Mastermind::State<10, 6, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_7_2\"})) {\n      state_ = newState<Mastermind::State<10, 7, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_8_2\"})) {\n      state_ = newState<Mastermind::State<10, 8, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_9_2\"})) {\n      state_ = newState<Mastermind::State<10, 9, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_10_2\"})) {\n      state_ = newState<Mastermind::State<10, 10, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind_10_15_2\"})) {\n      state_ = newState<Mastermind::State<10, 15, 2>>(seed);\n    } else if (isGameNameMatched({\"Mastermind\"})) {\n      state_ = newState<Mastermind::State<3, 2, 2>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_4_4_4\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<4, 4, 4>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_3_1_1\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<3, 1, 1>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_5_2_3\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<5, 2, 3>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_5_5_10\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<5, 5, 10>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_10_1_5\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<10, 1, 5>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_7_3_10\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<7, 3, 10>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_5_5_15\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<5, 5, 15>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_8_8_10\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<8, 8, 10>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_9_9_10\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<9, 9, 10>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_16_16_40\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<16, 16, 40>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Minesweeper_30_16_99\"})) {  // width, height, mines\n      state_ = newState<Minesweeper::State<30, 16, 99>>(seed);\n    } else if (isGameNameMatched({\"TicTacToe\", \"NoughtsAndCrosses\", \"XsAndOs\",\n                                  \"MNKGame_3_3_3\"})) {\n      state_ = newState<MNKGame::State<3, 3, 3>>(seed);\n    } else if (isGameNameMatched(\n                   {\"FreeStyleGomoku\", \"GomokuFreeStyle\", \"MNKGame_15_15_5\"})) {\n      state_ = newState<MNKGame::State<15, 15, 5>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Othello4\", \"Reversi4\", \"Othello04\", \"Reversi04\"})) {\n      state_ = newState<Othello::State<6>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Othello6\", \"Reversi6\", \"Othello06\", \"Reversi06\"})) {\n      state_ = newState<Othello::State<6>>(seed);\n    } else if (isGameNameMatched({\"Othello8\", \"Reversi8\", \"Othello08\",\n                                  \"Reversi08\", \"Othello\", \"Reversi\"})) {\n      state_ = newState<Othello::State<8>>(seed);\n    } else if (isGameNameMatched({\"Othello10\", \"Reversi10\"})) {\n      state_ = newState<Othello::State<10>>(seed);\n    } else if (isGameNameMatched({\"Othello12\", \"Reversi12\"})) {\n      state_ = newState<Othello::State<12>>(seed);\n    } else if (isGameNameMatched({\"Othello14\", \"Reversi14\"})) {\n      state_ = newState<Othello::State<14>>(seed);\n    } else if (isGameNameMatched({\"Othello16\", \"Reversi16\"})) {\n      state_ = newState<Othello::State<16>>(seed);\n    } else if (isGameNameMatched({\"OthelloOpt8\", \"OthelloOpt\", \"ReversiOpt8\",\n                                  \"ReversiOpt\"})) {\n      state_ = newState<Othello2::State<8>>(seed);\n    } else if (isGameNameMatched({\"OthelloOpt10\", \"ReversiOpt10\"})) {\n      state_ = newState<Othello2::State<10>>(seed);\n    } else if (isGameNameMatched({\"OthelloOpt16\", \"ReversiOpt16\"})) {\n      state_ = newState<Othello2::State<16>>(seed);\n    } else if (isGameNameMatched({\"GameOfTheAmazons\", \"Amazons\"})) {\n      state_ = newState<Amazons::State>(seed);\n    } else if (isGameNameMatched({\"ChineseCheckers\"})) {\n      state_ = newState<ChineseCheckers::State>(seed);\n    } else if (isGameNameMatched({\"Hex5pie\"})) {\n      state_ = newState<Hex::State<5, true>>(seed);\n    } else if (isGameNameMatched({\"Hex11pie\"})) {\n      state_ = newState<Hex::State<11, true>>(seed);\n    } else if (isGameNameMatched({\"Hex13pie\"})) {\n      state_ = newState<Hex::State<13, true>>(seed);\n    } else if (isGameNameMatched({\"Hex19pie\"})) {\n      state_ = newState<Hex::State<19, true>>(seed);\n    } else if (isGameNameMatched({\"Hex5\"})) {\n      state_ = newState<Hex::State<5, false>>(seed);\n    } else if (isGameNameMatched({\"Hex11\"})) {\n      state_ = newState<Hex::State<11, false>>(seed);\n    } else if (isGameNameMatched({\"Hex13\"})) {\n      state_ = newState<Hex::State<13, false>>(seed);\n    } else if (isGameNameMatched({\"Hex19\"})) {\n      state_ = newState<Hex::State<19, false>>(seed);\n    } else if (isGameNameMatched(\n                   {\"Havannah5pieExt\"})) {  // ext = borders, corners\n      state_ = newState<Havannah::State<5, true, true>>(seed);\n    } else if (isGameNameMatched({\"Havannah10pieExt\"})) {\n      state_ = newState<Havannah::State<10, true, true>>(seed);\n    } else if (isGameNameMatched({\"Havannah8pieExt\"})) {\n      state_ = newState<Havannah::State<8, true, true>>(seed);\n    } else if (isGameNameMatched({\"Havannah5pie\"})) {\n      state_ = newState<Havannah::State<5, true, false>>(seed);\n    } else if (isGameNameMatched({\"Havannah8pie\"})) {\n      state_ = newState<Havannah::State<8, true, false>>(seed);\n    } else if (isGameNameMatched({\"Havannah10pie\"})) {\n      state_ = newState<Havannah::State<10, true, false>>(seed);\n    } else if (isGameNameMatched({\"Havannah5\"})) {\n      state_ = newState<Havannah::State<5, false, false>>(seed);\n    } else if (isGameNameMatched({\"Havannah8\"})) {\n      state_ = newState<Havannah::State<8, false, false>>(seed);\n    } else if (isGameNameMatched({\"Havannah10\"})) {\n      state_ = newState<Havannah::State<10, false, false>>(seed);\n    } else if (isGameNameMatched({\"Breakthrough\"})) {\n      state_ = newState<StateForBreakthrough<false>>(seed);\n    } else if (isGameNameMatched({\"BreakthroughV2\"})) {\n      state_ = newState<StateForBreakthrough<true>>(seed);\n    } else if (gameName.rfind(\"Ludii\", 0) == 0) {\n#ifdef NO_JAVA\n      throw std::runtime_error(\n          \"Java/JNI support has not been built in, but is required for Ludii\");\n#else\n      std::string ludii_name = gameName.substr(5);\n      Ludii::JNIUtils::InitJVM(\"\");  // Use default /ludii/Ludii.jar path\n      JNIEnv* jni_env = Ludii::JNIUtils::GetEnv();\n\n      if (jni_env) {\n        if (gameOptions.size() > 0) {\n          Ludii::LudiiGameWrapper game_wrapper(ludii_name, gameOptions);\n          for (const std::string option : gameOptions) {\n            std::cout << \"Using Game Option: \" << option << std::endl;\n          }\n          state_ =\n              newState<Ludii::LudiiStateWrapper>(seed, std::move(game_wrapper));\n        } else {\n          Ludii::LudiiGameWrapper game_wrapper(ludii_name);\n          state_ =\n              newState<Ludii::LudiiStateWrapper>(seed, std::move(game_wrapper));\n        }\n      } else {\n        // Probably means we couldn't find the Ludii.jar file\n        throw std::runtime_error(\n            \"Failed to create Ludii game due to missing JNI Env!\");\n      }\n#endif\n    } else if (isGameNameMatched({\"Tristannogo\"})) {\n      state_ = newState<StateForTristannogo>(seed);\n    } else if (isGameNameMatched({\"OuterOpenGomoku\", \"OOGomoku\"})) {\n      state_ = newState<StateForOOGomoku>(seed);\n    } else if (isGameNameMatched({\"Minishogi\"})) {\n      state_ = newState<StateForMinishogi<1>>(seed);\n    } else if (isGameNameMatched({\"MinishogiV2\"})) {\n      state_ = newState<StateForMinishogi<2>>(seed);\n    } else if (isGameNameMatched({\"Surakarta\"})) {\n      state_ = newState<StateForSurakarta>(seed);\n    } else if (isGameNameMatched({\"DiceShogi\"})) {\n      state_ = newState<StateForDiceshogi>(seed);\n    } else if (isGameNameMatched({\"BlockGo\"})) {\n      state_ = newState<StateForBlockGo>(seed);\n    } else if (isGameNameMatched({\"YINSH\"})) {\n      state_ = newState<StateForYinsh>(seed);\n    } else if (isGameNameMatched({\"GomokuSwap2\", \"Swap2Gomoku\", \"Gomoku\"})) {\n      state_ = newState<GomokuSwap2::State>(seed);\n    } else if (isGameNameMatched({\"KyotoShogi\"})) {\n      state_ = newState<StateForKyotoshogi>(seed);\n    } else if (isGameNameMatched({\"Einstein\"})) {\n      state_ = newState<StateForEinstein>(seed);\n    } else if (isGameNameMatched({\"WeakSchur_3_20\"})) {  // subsets, maxNumber\n      state_ = newState<weakschur::State<3, 20>>(seed);\n\n    } else if (isGameNameMatched({\"WeakSchur_4_66\"})) {  // subsets, maxNumber\n      state_ = newState<weakschur::State<4, 66>>(seed);\n      // } else if (isGameNameMatched(gameName, {\"Nogo\"})) {\n      //   state_ = newState<StateForNogo>();\n    } else if (isGameNameMatched(\n                   {\"WeakSchur_5_197\",\n                    \"WalkerSchur\"})) {  // subsets, maxNumber  // is Walker\n                                        // right ?   (1952! he said 197...)\n      state_ = newState<weakschur::State<5, 197>>(seed);\n    } else if (isGameNameMatched({\"WeakSchur_3_70\", \"ImpossibleSchur\"})) {\n      state_ = newState<weakschur::State<3, 70>>(seed);\n    } else if (isGameNameMatched(\n                   {\"WeakSchur_6_583\",\n                    \"FabienSchur\"})) {  // subsets, maxNumber  // beating F.\n                                        // Teytaud et al\n      state_ = newState<weakschur::State<6, 583>>(seed);\n    } else if (isGameNameMatched({\"WeakSchur_7_1737\",\n                                  \"Arpad7Schur\"})) {  // beating A. Rimmel et al\n      state_ = newState<weakschur::State<7, 1737>>(seed);\n    } else if (isGameNameMatched({\"WeakSchur_8_5197\",\n                                  \"Arpad8Schur\"})) {  // beating A. Rimmel et al\n      state_ = newState<weakschur::State<8, 5197>>(seed);\n    } else if (isGameNameMatched({\"WeakSchur_9_15315\",\n                                  \"Arpad9Schur\"})) {  // beating A. Rimmel et al\n      state_ = newState<weakschur::State<9, 15315>>(seed);\n    } else if (isGameNameMatched({\"Chess\"})) {\n      state_ = newState<chess::State>(seed);\n    } else {\n      throw std::runtime_error(\"Unknown game name '\" + gameName + \"'\");\n    }\n\n    setFeatures(outFeatures, turnFeaturesSingleChannel,\n                turnFeaturesMultiChannel, geometricFeatures, history,\n                randomFeatures, oneFeature);\n\n    state_->Initialize();\n  }\n\n  template <typename T, typename... A>\n  std::unique_ptr<State> newState(A&&... args) {\n    auto r = std::make_unique<T>(std::forward<A>(args)...);\n    r->template initializeAs<T>();\n    return r;\n  }\n\n  virtual bool isOnePlayerGame() const {\n    return state_->isOnePlayerGame();\n  }\n\n  void setFeatures(bool outFeatures,\n                   bool turnFeaturesSingleChannel,\n                   bool turnFeaturesMultiChannel,\n                   bool geometricFeatures,\n                   int history,\n                   int randomFeatures,\n                   bool oneFeature) {\n    featopts.emplace_back();\n    FeatureOptions& opt = featopts.back();\n    opt.outFeatures = outFeatures;\n    opt.turnFeaturesSingleChannel = turnFeaturesSingleChannel;\n    opt.turnFeaturesMultiChannel = turnFeaturesMultiChannel;\n    opt.geometricFeatures = geometricFeatures;\n    opt.history = history;\n    opt.randomFeatures = randomFeatures;\n    opt.oneFeature = oneFeature;\n\n    state_->setFeatures(&opt);\n  }\n\n  void addHumanPlayer(std::shared_ptr<HumanPlayer> player) {\n    players_.push_back(std::move(player));\n  }\n\n  void addTPPlayer(std::shared_ptr<TPPlayer> player) {\n    players_.push_back(std::move(player));\n  }\n\n  void addEvalPlayer(std::shared_ptr<mcts::MctsPlayer> player) {\n    assert(evalMode);\n    players_.push_back(std::move(player));\n  }\n\n  void addPlayer(std::shared_ptr<core::ActorPlayer> player,\n                 std::shared_ptr<tube::DataChannel> dc,\n                 std::shared_ptr<Game> game,\n                 std::shared_ptr<core::ActorPlayer> devplayer) {\n    assert(dc != nullptr && !evalMode);\n\n    players_.push_back(player);\n    playerGame_.push_back(game);\n\n    if (devplayer) {\n      player = devplayer;\n    }\n    int seqlen = player->rnnSeqlen();\n\n    auto addseq = [&](std::vector<int64_t> a) {\n      if (seqlen) {\n        a.insert(a.begin(), seqlen);\n      }\n      return a;\n    };\n\n    auto feat = tube::EpisodicTrajectory(\n        \"s\", addseq(state_->GetFeatureSize()), torch::kFloat32);\n    auto rnnInitialState = tube::EpisodicTrajectory(\n        \"rnn_initial_state\", player->rnnStateSize(), torch::kFloat32);\n    auto rnnStateMask = tube::EpisodicTrajectory(\n        \"rnn_state_mask\", addseq({1}), torch::kFloat32);\n    auto pi = tube::EpisodicTrajectory(\n        \"pi\", addseq(state_->GetActionSize()), torch::kFloat32);\n    auto piMask = tube::EpisodicTrajectory(\n        \"pi_mask\", addseq(state_->GetActionSize()), torch::kFloat32);\n    auto actionPi = tube::EpisodicTrajectory(\n        \"action_pi\", addseq(state_->GetActionSize()), torch::kFloat32);\n    auto v = tube::EpisodicTrajectory(\n        \"v\", addseq({player->vOutputs()}), torch::kFloat32);\n    auto predV = tube::EpisodicTrajectory(\n        \"pred_v\", addseq({player->vOutputs()}), torch::kFloat32);\n    int predicts = (predictEndState ? 2 : 0) + predictNStates;\n    auto predictSize = state_->GetRawFeatureSize();\n    predictSize[0] *= predicts;\n    auto predictPi = tube::EpisodicTrajectory(\n        \"predict_pi\", addseq(predictSize), torch::kFloat32);\n    auto predictPiMask = tube::EpisodicTrajectory(\n        \"predict_pi_mask\", addseq(predictSize), torch::kFloat32);\n\n    tube::Dispatcher dispatcher(std::move(dc));\n    std::vector<std::shared_ptr<tube::DataBlock>> send;\n    send = {feat.buffer, pi.buffer, piMask.buffer, v.buffer, predV.buffer};\n    if (predictEndState + predictNStates) {\n      send.push_back(predictPi.buffer);\n      send.push_back(predictPiMask.buffer);\n      predictPi_.push_back(predictPi);\n      predictPiMask_.push_back(predictPiMask);\n    }\n    if (seqlen) {\n      send.push_back(rnnInitialState.buffer);\n      rnnInitialState_.push_back(rnnInitialState);\n      send.push_back(rnnStateMask.buffer);\n      rnnStateMask_.push_back(rnnStateMask);\n    }\n    if (dynamic_cast<ForwardPlayer*>(&*player) != nullptr) {\n      send.push_back(actionPi.buffer);\n      actionPi_.push_back(actionPi);\n    }\n    dispatcher.addDataBlocks(send, {});\n\n    feature_.push_back(feat);\n    pi_.push_back(pi);\n    piMask_.push_back(piMask);\n    v_.push_back(v);\n    predV_.push_back(predV);\n    dispatchers_.push_back(dispatcher);\n  }\n\n  const std::vector<int64_t>& getRawFeatSize() {\n    return state_->GetRawFeatureSize();\n  }\n\n  const std::vector<int64_t>& getFeatSize() {\n    return state_->GetFeatureSize();\n  }\n\n  const std::vector<int64_t>& getActionSize() {\n    return state_->GetActionSize();\n  }\n\n  virtual void mainLoop() override;\n\n  std::vector<float> getResult() {\n    return result_;\n  }\n\n  virtual void terminate() override {\n#ifdef DEBUG_GAME\n    std::cout << \"game \" << this << \", setting terminating flag\" << std::endl;\n#endif\n    EnvThread::terminate();\n// std::unique_lock<std::mutex> lk(terminateMutex_);\n#ifdef DEBUG_GAME\n    std::cout << \"game \" << this << \", terminating dispatchers\" << std::endl;\n#endif\n    for (auto& v : dispatchers_) {\n      v.terminate();\n    }\n#ifdef DEBUG_GAME\n    std::cout << \"game \" << this << \", terminating players\" << std::endl;\n#endif\n    for (auto& v : players_) {\n      v->terminate();\n    }\n  }\n\n  virtual EnvThread::Stats get_stats() override;\n\n  const int numEpisode;\n  const bool evalMode;\n  const int perThreadBatchSize;\n  const int maxRewinds;\n  const bool predictEndState;\n  const int predictNStates;\n\n private:\n  bool isGameNameMatched(const std::vector<std::string>&& allowedNames) {\n    auto strToLower = [&](const std::string& str) {\n      std::string s = std::string(str);\n      std::transform(s.begin(), s.end(), s.begin(),\n                     [](unsigned char c) { return std::tolower(c); });\n      return s;\n    };\n\n    std::string nameLower = strToLower(gameName_);\n    for (auto& allowedName : allowedNames) {\n      if (nameLower == strToLower(allowedName))\n        return true;\n    }\n    return false;\n  }\n\n  void reset() {\n    state_->reset();\n  }\n\n  std::optional<int> parseSpecialAction(const std::string& str);\n\n  void step();\n\n  void sendTrajectory();\n\n  bool prepareForSend(int playerId);\n\n  std::unique_ptr<State> state_;\n  std::vector<std::shared_ptr<Player>> players_;\n  std::vector<std::shared_ptr<Game>> playerGame_;\n\n  std::vector<tube::EpisodicTrajectory> feature_;\n  std::vector<tube::EpisodicTrajectory> rnnStateMask_;\n  std::vector<tube::EpisodicTrajectory> rnnInitialState_;\n  std::vector<tube::EpisodicTrajectory> pi_;\n  std::vector<tube::EpisodicTrajectory> piMask_;\n  std::vector<tube::EpisodicTrajectory> actionPi_;\n  std::vector<tube::EpisodicTrajectory> v_;\n  std::vector<tube::EpisodicTrajectory> predV_;\n  std::vector<tube::EpisodicTrajectory> predictPi_;\n  std::vector<tube::EpisodicTrajectory> predictPiMask_;\n\n  std::vector<tube::Dispatcher> dispatchers_;\n\n  std::list<FeatureOptions> featopts;\n\n  std::vector<float> result_;\n\n  std::mutex mutexStats_;\n  EnvThread::Stats stats_;\n\n  std::string lastAction_;\n  bool hasPrintedHumanHelp_ = false;\n  bool isInSingleMoveMode_ = false;\n  float lastMctsValue_ = 0.0f;\n  bool printMoves_ = false;\n  std::string gameName_;\n  std::vector<torch::Tensor> rnnState_;\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/human_player.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n#include \"player.h\"\n#include \"state.h\"\n\nnamespace core {\n\nclass HumanPlayer : public Player {\n public:\n  HumanPlayer()\n      : Player(true){\n\n        };\n\n  _Action act(State& state) {\n    int index = state.humanInputAction();\n    assert(false);\n    auto& legalActions = state.GetLegalActions();\n    assert(index < (int)legalActions.size());\n    std::cout << \" applying action... \" << std::endl;\n    return legalActions[index];\n    // std::cerr << \" applied action... \" << std::endl;\n  }\n};\n\nclass TPPlayer : public Player {\n public:\n  TPPlayer()\n      : Player(true) {\n    isTP_ = true;\n  };\n\n  _Action act(State& state) {\n    assert(!state.isStochastic());  // TPPlayer is not implemented for\n                                    // stochastic games. Could be done though.\n    assert(false);\n    int index = state.TPInputAction();\n    auto& legalActions = state.GetLegalActions();\n    assert(index < (int)legalActions.size());\n    std::cerr << \" applying action... \" << std::endl;\n    return legalActions[index];\n    // std::cerr << \" applied action... \" << std::endl;\n  }\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/model_manager.cc",
    "content": "\n#include \"model_manager.h\"\n\n#include \"common/async.h\"\n#include \"common/thread_id.h\"\n#include \"distributed/distributed.h\"\n#include \"distributed/rpc.h\"\n#include \"replay_buffer.h\"\n#include \"tube/src_cpp/data_channel.h\"\n\n#include <c10/cuda/CUDAGuard.h>\n#include <c10/cuda/CUDAStream.h>\n#include <fmt/printf.h>\n#include <torch/extension.h>\n#include <torch/script.h>\n\nnamespace core {\n\nstd::unordered_map<std::string, at::Tensor> convertIValueToMap(\n    const c10::IValue& value) {\n  std::unordered_map<std::string, torch::Tensor> map;\n  auto dict = value.toGenericDict();\n\n#ifdef PYTORCH12\n  for (auto& name2tensor : dict) {\n    auto name = name2tensor.key().toString();\n    torch::Tensor tensor = name2tensor.value().toTensor();\n#else\n  auto ivalMap = dict->elements();\n  for (auto& name2tensor : ivalMap) {\n    auto name = name2tensor.first.toString();\n    torch::Tensor tensor = name2tensor.second.toTensor();\n#endif\n\n    tensor = tensor.detach();\n    map.insert({name->string(), tensor});\n  }\n  return map;\n}\n\n// A mutex where threads are strictly ordered by their priority when waiting to\n// acquire the mutex.\n// Threads must call PriorityMutex::setThreadPriority to set their priority.\n// Lower priority values will acquire the mutex first.\nclass PriorityMutex {\n  struct TLData {\n    TLData* next = nullptr;\n    int priority = 0;\n    std::condition_variable cv;\n    bool waiting = false;\n  };\n\n  static inline thread_local std::unique_ptr<TLData> tldata;\n\n  std::mutex mut;\n  TLData* queue = nullptr;\n  TLData* owner = nullptr;\n\n  static TLData& getTLData() {\n    if (!tldata) {\n      tldata = std::make_unique<TLData>();\n    }\n    return *tldata;\n  }\n\n public:\n  static void setThreadPriority(int priority) {\n    getTLData().priority = priority;\n  }\n\n  void lock() {\n    std::unique_lock<std::mutex> l(mut);\n    auto& tld = getTLData();\n    if (!owner) {\n      owner = &tld;\n      return;\n    } else {\n      if (!queue) {\n        queue = &tld;\n      } else {\n        TLData** insert = &queue;\n        TLData* next = queue;\n        while (next && next->priority <= tld.priority) {\n          insert = &next->next;\n          next = next->next;\n        }\n        tld.next = next;\n        *insert = &tld;\n      }\n    }\n    tld.waiting = true;\n    while (tld.waiting) {\n      tld.cv.wait(l);\n    }\n  }\n  void unlock() {\n    std::unique_lock<std::mutex> l(mut);\n    if (queue) {\n      auto* next = queue;\n      next->waiting = false;\n      queue = next->next;\n      owner = next;\n      next->next = nullptr;\n      l.unlock();\n      next->cv.notify_all();\n    } else {\n      owner = nullptr;\n    }\n  }\n};\n\nnamespace {\ntemplate <typename T> struct StreamBuffer : std::streambuf {\n  T& buf;\n  StreamBuffer(T& buf)\n      : buf(buf) {\n  }\n  virtual std::streamsize xsputn(const char_type* s,\n                                 std::streamsize count) override {\n    if (buf.capacity() < buf.size() + count) {\n      buf.reserve(std::max(buf.size() * 2, buf.size() + count + 16));\n    }\n    size_t prevSize = buf.size();\n    buf.resize(buf.size() + count);\n    char* dst = buf.data() + prevSize;\n    memcpy(dst, s, count);\n    return count;\n  }\n\n  void write(const std::string& str) {\n    size_t len = str.size();\n    xsputn((char*)&len, sizeof(len));\n    xsputn(str.data(), str.size());\n  }\n};\n\ntemplate <typename T> struct StreamReadBuffer : std::streambuf {\n  T& buf;\n  size_t readPos = 0;\n  StreamReadBuffer(T& buf)\n      : buf(buf) {\n  }\n  virtual std::streamsize xsgetn(char_type* s, std::streamsize count) override {\n    count = std::min((size_t)count, buf.size() - readPos);\n    memcpy(s, buf.data() + readPos, count);\n    readPos += count;\n    return count;\n  }\n\n  std::string_view read() {\n    size_t len = 0;\n    size_t n = xsgetn((char*)&len, sizeof(len));\n    if (n != sizeof(len)) {\n      return {};\n    }\n    if (buf.size() - readPos < len) {\n      return {};\n    }\n    std::string_view r{buf.data() + readPos, len};\n    readPos += len;\n    return r;\n  }\n};\n\nstd::mutex deviceMutexMutex;\nstd::unordered_map<std::string, PriorityMutex> deviceMutex;\nPriorityMutex* getDeviceMutex(std::string device) {\n  std::lock_guard l(deviceMutexMutex);\n  return &deviceMutex[device];\n}\n\n}  // namespace\n\nusing TorchJitModel = torch::jit::script::Module;\n\nclass ModelManagerImpl {\n public:\n  ModelManagerImpl(int actBatchsize,\n                   const std::string& device,\n                   int replayCapacity,\n                   int seed,\n                   const std::string& jitModel,\n                   int trainChannelTimeoutMs,\n                   int trainChannelNumSlots)\n      : jitModel_(jitModel)\n      , device_(device)\n      , replayBuffer_(replayCapacity, seed) {\n    trainChannel_ = std::make_shared<tube::DataChannel>(\n        \"train\", trainChannelNumSlots, trainChannelTimeoutMs);\n    actChannel_ = std::make_shared<tube::DataChannel>(\"act\", actBatchsize, -1);\n\n#ifdef PYTORCH12\n    model_ =\n        std::make_shared<TorchJitModel>(torch::jit::load(jitModel_, device));\n#else\n    model_ = torch::jit::load(jitModel_, device);\n#endif\n    model_->eval();\n\n    dtype_ = at::ScalarType::Float;\n\n    model_->to(dtype_);\n\n    modelMutex_ = getDeviceMutex(device);\n  }\n\n  ~ModelManagerImpl() {\n    terminate_ = true;\n    actChannel_->terminate();\n    trainChannel_->terminate();\n    for (auto& v : threads_) {\n      v.join();\n    }\n    if (modelUpdateThread.joinable()) {\n      modelUpdateThread.join();\n    }\n  }\n\n  void startServer(std::string serverListenEndpoint) {\n    server_.emplace();\n    server_->setOnTrainData(\n        [this](std::unordered_map<std::string, torch::Tensor> batch) {\n          replayBuffer_.add(std::move(batch));\n        });\n    server_->start(serverListenEndpoint);\n    fmt::printf(\"Listening on %s\\n\", serverListenEndpoint);\n  }\n\n  void startClient(std::string serverConnectHostname) {\n    auto firstUpdate = std::make_shared<std::promise<bool>>();\n    auto firstUpdateFuture = firstUpdate->get_future();\n    client_.emplace();\n    client_->setOnUpdateModel(\n        [this, firstUpdate](\n            std::string_view id,\n            std::unordered_map<std::string, torch::Tensor> dict) mutable {\n          if (firstUpdate) {\n            firstUpdate->set_value(true);\n            firstUpdate.reset();\n          }\n          if (!dontRequestModelUpdates_) {\n            fmt::printf(\"onUpdateModel '%s'\\n\", id);\n            updateModel(dict);\n          }\n        });\n    client_->connect(serverConnectHostname);\n    fmt::printf(\"Connected to %s\\n\", serverConnectHostname);\n\n    modelUpdateThread = std::thread([this]() {\n      while (!terminate_ && !trainChannel_->terminated()) {\n        if (!dontRequestModelUpdates_) {\n          client_->requestModel(isTournamentOpponent_);\n        }\n        for (int i = 0; i != 2 && !terminate_ && !trainChannel_->terminated();\n             ++i) {\n          std::this_thread::sleep_for(std::chrono::seconds(2));\n        }\n      }\n    });\n\n    if (!dontRequestModelUpdates_) {\n      fmt::printf(\"Waiting for model\\n\");\n      firstUpdateFuture.wait();\n      fmt::printf(\"Received model\\n\");\n    } else {\n      fmt::printf(\"Not requesting model updates\\n\");\n    }\n  }\n\n  std::unique_ptr<rpc::Rpc> replayBufferRpc;\n  std::shared_ptr<rpc::Server> replayBufferRpcServer;\n  std::shared_ptr<rpc::Client> replayBufferRpcClient;\n\n  void startReplayBufferServer(std::string endpoint) {\n    if (endpoint.substr(0, 6) == \"tcp://\") {\n      endpoint.erase(0, 6);\n    }\n    if (!replayBufferRpc) {\n      replayBufferRpc = std::make_unique<rpc::Rpc>();\n      replayBufferRpc->asyncRun(8);\n    }\n\n    replayBufferRpcServer = replayBufferRpc->listen(\"\");\n    replayBufferRpcServer->define(\"sample\", &ModelManagerImpl::sample, this);\n\n    replayBufferRpcServer->listen(endpoint);\n  }\n\n  void startReplayBufferClient(std::string endpoint) {\n    if (endpoint.substr(0, 6) == \"tcp://\") {\n      endpoint.erase(0, 6);\n    }\n    if (!replayBufferRpc) {\n      replayBufferRpc = std::make_unique<rpc::Rpc>();\n      replayBufferRpc->asyncRun(8);\n    }\n\n    replayBufferRpcClient = replayBufferRpc->connect(endpoint);\n  }\n\n  SampleResult remoteSample(int sampleSize) {\n    return {replayBufferRpcClient\n                ->async<std::unordered_map<std::string, torch::Tensor>>(\n                    \"sample\", sampleSize)};\n  }\n\n  std::shared_ptr<tube::DataChannel> getTrainChannel() {\n    return trainChannel_;\n  }\n\n  std::shared_ptr<tube::DataChannel> getActChannel() {\n    return actChannel_;\n  }\n\n  std::unordered_map<std::string, torch::Tensor> cloneStateDict(\n      const std::unordered_map<std::string, torch::Tensor>& stateDict) {\n    torch::NoGradGuard ng;\n    std::unordered_map<std::string, torch::Tensor> r;\n    for (auto& [name, tensor] : stateDict) {\n      r[name] = tensor.detach().to(\n          torch::TensorOptions().device(torch::kCPU).dtype(dtype_), false,\n          true);\n    }\n    return r;\n  }\n\n  void addTournamentModel(\n      std::string id,\n      const std::unordered_map<std::string, torch::Tensor>& stateDict) {\n    if (server_) {\n      fmt::printf(\" -- ADD MODEL %s --\\n\", id);\n      server_->updateModel(id, cloneStateDict(stateDict));\n    }\n  }\n\n#ifdef PYTORCH15\n  void loadModelStateDict(\n      TorchJitModel& model,\n      const std::unordered_map<std::string, torch::Tensor>& stateDict) {\n    std::unordered_map<std::string, torch::Tensor> dst;\n    for (const auto& v : model.named_parameters()) {\n      dst[v.name] = v.value;\n    }\n    for (const auto& v : model.named_buffers()) {\n      dst[v.name] = v.value;\n    }\n\n    for (auto& [k, v] : stateDict) {\n      auto i = dst.find(k);\n      if (i == dst.end()) {\n        throw std::runtime_error(\n            fmt::sprintf(\"key '%s' not found in destination state dict\", k));\n      } else if (i->second.sizes() != v.sizes()) {\n        throw std::runtime_error(\n            fmt::sprintf(\"state dict key '%s' shape mismatch\", k));\n      }\n    }\n\n    for (auto& [k, v] : dst) {\n      auto i = stateDict.find(k);\n      if (i == stateDict.end()) {\n        throw std::runtime_error(\n            fmt::sprintf(\"key '%s' not found in source state dict\", k));\n      }\n    }\n\n    fmt::printf(\"loadModelStateDict: state dicts OK\\n\");\n\n    for (auto& v : stateDict) {\n      auto i = dst.find(v.first);\n      if (i != dst.end()) {\n        dst.at(v.first).copy_(v.second).detach();\n      } else {\n        fmt::printf(\n            \"copyModelStateDict: Unknown state dict entry '%s'\\n\", v.first);\n        std::abort();\n      }\n    }\n    model.eval();\n  }\n#else\n  void loadModelStateDict(\n      TorchJitModel& model,\n      const std::unordered_map<std::string, torch::Tensor>& stateDict) {\n    for (auto& [name, tensor] : stateDict) {\n      const char* ptr = name.c_str();\n      std::string memberNameString;\n      const char* memberNamePtr = ptr;\n      auto* currentModule = &model;\n      decltype(currentModule->find_module(memberNameString)) subModule;\n      while (*ptr) {\n        if (*ptr == '.') {\n          memberNameString.assign(memberNamePtr, ptr - memberNamePtr);\n          subModule = currentModule->find_module(memberNameString);\n          if (!subModule) {\n            fmt::printf(\n                \"copyModelStateDict: Unknown state dict entry '%s' -- could \"\n                \"not find module '%s'\\n\",\n                name, memberNameString);\n            std::abort();\n          }\n          currentModule = &*subModule;\n          ++ptr;\n          memberNamePtr = ptr;\n        } else {\n          ++ptr;\n        }\n      }\n      memberNameString.assign(memberNamePtr, ptr - memberNamePtr);\n\n      if (auto p = currentModule->find_parameter(memberNameString); p) {\n        p->value().toTensor().copy_(tensor);\n      } else if (auto b = currentModule->find_buffer(memberNameString); b) {\n        b->value().toTensor().copy_(tensor);\n      } else {\n        fmt::printf(\n            \"copyModelStateDict: Unknown state dict entry '%s' -- could not \"\n            \"find parameter/buffer '%s'\\n\",\n            name, memberNameString);\n        std::abort();\n      }\n    }\n    model.eval();\n  }\n#endif\n\n  void updateModel(\n      const std::unordered_map<std::string, torch::Tensor>& stateDict) {\n    torch::NoGradGuard ng;\n    fmt::printf(\" -- UPDATE MODEL --\\n\");\n    if (server_) {\n      server_->updateModel(\"dev\", cloneStateDict(stateDict));\n    }\n    PriorityMutex::setThreadPriority(-9);\n    std::lock_guard<PriorityMutex> lk(*modelMutex_);\n    loadModelStateDict(*model_, stateDict);\n  }\n\n  int bufferSize() const {\n    return replayBuffer_.size();\n  }\n\n  bool bufferFull() const {\n    return replayBuffer_.full();\n  }\n\n  std::unordered_map<std::string, torch::Tensor> sample(int sampleSize) {\n    return replayBuffer_.sample(sampleSize);\n  }\n\n  void start() {\n    threads_.emplace_back(&ModelManagerImpl::trainThread, this);\n\n    threads_.emplace_back(&ModelManagerImpl::actThread, this);\n  }\n\n  void trainThread() {\n    torch::NoGradGuard ng;\n    if (client_) {\n      std::atomic<bool> qdone{false};\n      int qwaiters = 0;\n      std::mutex qmut;\n      std::condition_variable qcv;\n      std::deque<std::unordered_map<std::string, torch::Tensor>> queue;\n      std::vector<std::thread> qthreads;\n      for (int i = 0; i != 4; ++i) {\n        qthreads.emplace_back([&]() {\n          while (true) {\n            std::unique_lock l(qmut);\n            ++qwaiters;\n            while (queue.empty()) {\n              if (qdone) {\n                --qwaiters;\n                return;\n              }\n              qcv.wait(l);\n            }\n            --qwaiters;\n            auto batch = std::move(queue.front());\n            queue.pop_front();\n            l.unlock();\n            client_->sendTrainData(batch);\n          }\n        });\n      }\n      while (true) {\n        auto batch = trainChannel_->getInput();\n        if (terminate_ || trainChannel_->terminated()) {\n          break;\n        }\n        trainChannel_->setReply({});\n        std::lock_guard l(qmut);\n        if (queue.size() < 128) {\n          queue.push_back(batch);\n        } else {\n          fmt::printf(\"Warning: train data queue is full, discarding data\\n\");\n        }\n        if (qwaiters) {\n          qcv.notify_one();\n        }\n      }\n      std::lock_guard l(qmut);\n      qdone = true;\n      qcv.notify_all();\n    } else {\n      while (true) {\n        auto batch = trainChannel_->getInput();\n        if (terminate_ || trainChannel_->terminated()) {\n          break;\n        }\n        replayBuffer_.add(batch);\n        trainChannel_->setReply({});\n      }\n    }\n  }\n\n  void actThread() {\n    torch::NoGradGuard ng;\n    while (true) {\n      auto batch = actChannel_->getInput();\n      if (terminate_ || actChannel_->terminated()) {\n        break;\n      }\n      // TODO[hengyuan]: temp hard code\n      auto s = batch[\"s\"].to(device_);\n      std::vector<torch::jit::IValue> input;\n      input.push_back(s);\n      PriorityMutex::setThreadPriority(-1);\n      std::unique_lock<PriorityMutex> lk(*modelMutex_);\n      auto output = model_->forward(input);\n      lk.unlock();\n      auto reply = convertIValueToMap(output);\n      actChannel_->setReply(reply);\n    }\n  }\n\n  void testAct() {\n    torch::NoGradGuard ng;\n    std::vector<torch::jit::IValue> inputs;\n    auto x = torch::ones({1, 6 * 7 * 2}, torch::kFloat32);\n    inputs.push_back(x);\n    auto y = model_->forward(inputs);\n    auto reply = convertIValueToMap(y);\n    for (auto& name2tensor : reply) {\n      std::cout << name2tensor.first << \": \" << std::endl;\n      std::cout << name2tensor.second << std::endl;\n    }\n  }\n\n  void batchAct(torch::Tensor input,\n                torch::Tensor v,\n                torch::Tensor pi,\n                torch::Tensor rnnState = {},\n                torch::Tensor* rnnStateOut = nullptr) {\n    torch::NoGradGuard ng;\n    bool isCuda = device_.is_cuda();\n    PriorityMutex::setThreadPriority(common::getThreadId());\n    std::optional<c10::cuda::CUDAStreamGuard> g;\n    if (isCuda) {\n      g.emplace(c10::cuda::getStreamFromPool(false, device_.index()));\n    }\n    std::vector<torch::jit::IValue> inp;\n    inp.push_back(input.to(device_, dtype_, true));\n    if (rnnState.defined()) {\n      inp.push_back(rnnState.to(device_, dtype_, true));\n    }\n    std::unique_lock<PriorityMutex> lk(*modelMutex_);\n    auto output = model_->forward(inp);\n    if (isCuda) {\n      g->current_stream().synchronize();\n    }\n    lk.unlock();\n    auto reply = convertIValueToMap(output);\n    v.copy_(reply[\"v\"], true);\n    pi.copy_(reply[\"pi_logit\"], true);\n    if (rnnStateOut) {\n      *rnnStateOut = reply[\"rnn_state\"];\n    }\n    if (isCuda) {\n      g->current_stream().synchronize();\n    }\n  }\n\n  struct Timer {\n    std::chrono::steady_clock::time_point start;\n    Timer() {\n      reset();\n    }\n    void reset() {\n      start = std::chrono::steady_clock::now();\n    }\n    float elapsedAt(std::chrono::steady_clock::time_point now) {\n      return std::chrono::duration_cast<\n                 std::chrono::duration<float, std::ratio<1, 1>>>(now - start)\n          .count();\n    }\n    float elapsed() {\n      return elapsedAt(std::chrono::steady_clock::now());\n    }\n    float elapsedReset() {\n      auto now = std::chrono::steady_clock::now();\n      float r = elapsedAt(now);\n      start = now;\n      return r;\n    }\n  };\n\n  int findBatchSize(torch::Tensor input, torch::Tensor rnnState = {}) {\n    if (hasFoundBatchSize_) {\n      return foundBatchSize_;\n    }\n    torch::NoGradGuard ng;\n    bool isCuda = device_.is_cuda();\n    PriorityMutex::setThreadPriority(common::getThreadId());\n    std::optional<c10::cuda::CUDAStreamGuard> g;\n    if (isCuda) {\n      g.emplace(c10::cuda::getStreamFromPool(false, device_.index()));\n    } else {\n      return 1;\n    }\n    std::vector<torch::jit::IValue> inp;\n    torch::Tensor gpuinput = input.to(device_, dtype_, true);\n    torch::Tensor gpurnnState;\n    if (rnnState.defined()) {\n      gpurnnState = rnnState.to(device_, dtype_, true);\n    }\n    auto prep = [&](int bs) {\n      inp.clear();\n      std::vector<torch::Tensor> batch;\n      for (int i = 0; i != bs; ++i) {\n        batch.push_back(gpuinput);\n      }\n      inp.push_back(torch::stack(batch).to(device_, dtype_, true));\n      if (rnnState.defined()) {\n        batch.clear();\n        for (int i = 0; i != bs; ++i) {\n          batch.push_back(gpurnnState);\n        }\n        inp.push_back(torch::stack(batch).to(device_, dtype_, true));\n      }\n      g->current_stream().synchronize();\n    };\n    std::unique_lock<PriorityMutex> lk(*modelMutex_);\n    if (hasFoundBatchSize_) {\n      return foundBatchSize_;\n    }\n    auto call = [&]() {\n      model_->forward(inp);\n      if (isCuda) {\n        g->current_stream().synchronize();\n      }\n    };\n    fmt::printf(\"Finding batch size\\n\");\n    prep(1);\n    // warm up\n    for (int i = 0; i != 10; ++i) {\n      call();\n    }\n    Timer t;\n    for (int i = 0; i != 10; ++i) {\n      call();\n    }\n    float call1 = t.elapsed() / 10.0f * 1000.0f;\n    fmt::printf(\"Base latency: %gms\\n\", call1);\n\n    float maxms = findBatchSizeMaxMs_;\n    int maxbs = findBatchSizeMaxBs_;\n\n    struct I {\n      float latency = 0.0f;\n      float throughput = 0.0f;\n      int n = 0;\n      float score() {\n        return latency / n / 400 - std::log(throughput / n);\n      }\n    };\n\n    std::map<int, I> li;\n\n    int best = 0;\n    float bestScore = std::numeric_limits<float>::infinity();\n\n    auto eval = [&](int i) {\n      prep(i);\n      int badcount = 0;\n      float latency = 0.0f;\n      float throughput = 0.0f;\n      int n = 2;\n      for (int j = 0; j != n; ++j) {\n        call();\n      }\n      for (int j = 0; j != n; ++j) {\n        t.reset();\n        call();\n        float ms = t.elapsed() * 1000;\n        latency += ms;\n        throughput += i / ms;\n        if (ms > maxms || i > maxbs) {\n          ++badcount;\n        }\n      }\n      auto& x = li[i];\n      x.latency += latency;\n      x.throughput += throughput;\n      x.n += n;\n      float score = x.score();\n      if (badcount < n && score < bestScore) {\n        bestScore = score;\n        best = i;\n      }\n      return badcount < n;\n    };\n\n    for (int i = 1;; i += (i + 1) / 2) {\n      if (!eval(i)) {\n        break;\n      }\n    }\n    std::minstd_rand rng(std::random_device{}());\n\n    auto expandNear = [&](int k) {\n      int r = 0;\n      auto i = li.find(k);\n      if (i != li.end()) {\n        auto search = [&](auto begin, auto end) {\n          int b = begin->first;\n          int e;\n          if (end == li.end()) {\n            e = std::prev(end)->first;\n          } else {\n            e = end->first;\n          }\n          b = std::max(b, i->first - 3);\n          e = std::max(b, i->first + 6);\n          for (int i = b; i != e; ++i) {\n            if (li.find(i) != li.end()) {\n              continue;\n            }\n            ++r;\n            if (!eval(i)) {\n              break;\n            }\n          }\n        };\n        search(i, std::next(i));\n        if (i != li.begin()) {\n          search(std::prev(i), i);\n        }\n      }\n      return r;\n    };\n\n    for (int j = 0; j != 4; ++j) {\n      int expands = 12;\n      for (int k = 0; k != 12; ++k) {\n        float sum = 0.0f;\n        std::vector<std::tuple<float, int, int>> list;\n        float minweight = std::numeric_limits<float>::infinity();\n        for (auto& [k, v] : li) {\n          minweight = std::min(minweight, v.score());\n        }\n        for (auto i = li.begin();;) {\n          auto next = std::next(i);\n          if (next == li.end()) {\n            break;\n          }\n          int from = i->first + 1;\n          int to = next->first;\n          if (to - from > 0) {\n            float weight =\n                std::min(i->second.score(), next->second.score()) - minweight;\n            weight = 1.0f / std::min(std::exp(weight * 4), 1e9f);\n            weight *= to - from;\n            list.emplace_back(weight, from, to);\n            sum += weight;\n          }\n          i = next;\n        }\n        if (list.size() > 0 && sum > 0.0f) {\n          float val = std::uniform_real_distribution<float>(0.0f, sum)(rng);\n          for (auto& [weight, from, to] : list) {\n            val -= weight;\n            if (val <= 0) {\n              int k = std::uniform_int_distribution<int>(from, to - 1)(rng);\n              eval(k);\n              if (expands > 0) {\n                expands -= expandNear(k);\n              }\n              break;\n            }\n          }\n        }\n      }\n      if (best) {\n        expandNear(best);\n      }\n      std::vector<std::tuple<float, int>> sorted;\n      for (auto& [k, v] : li) {\n        sorted.emplace_back(v.score(), k);\n      }\n      std::sort(sorted.begin(), sorted.end());\n      for (size_t i = 0; i != sorted.size() && i < 10; ++i) {\n        int k = std::get<1>(sorted[i]);\n        if (li[k].n < 8) {\n          eval(k);\n        }\n      }\n    }\n    foundBatchSize_ = best;\n    hasFoundBatchSize_ = true;\n\n    for (auto& [k, v] : li) {\n      fmt::printf(\n          \"Batch size %d, evals %d latency %fms throughput %g score %g\\n\", k,\n          v.n, v.latency / v.n, v.throughput / v.n, v.score());\n    }\n\n    fmt::printf(\"Found best batch size of %d with evals %d latency %fms \"\n                \"throughput %g score %g\\n\",\n                best, li[best].n, li[best].latency / li[best].n,\n                li[best].throughput / li[best].n, li[best].score());\n    return best;\n  }\n\n  bool isCuda() const {\n    return device_.is_cuda();\n  }\n\n  torch::Device device() const {\n    return device_;\n  }\n\n  int64_t bufferNumSample() const {\n    return replayBuffer_.numSample();\n  }\n\n  int64_t bufferNumAdd() const {\n    return replayBuffer_.numAdd();\n  }\n\n  void setIsTournamentOpponent(bool mode) {\n    isTournamentOpponent_ = mode;\n  }\n  bool isTournamentOpponent() const {\n    return isTournamentOpponent_;\n  }\n  void setDontRequestModelUpdates(bool v) {\n    dontRequestModelUpdates_ = v;\n  }\n\n  bool wantsTournamentResult() {\n    return client_ ? client_->wantsTournamentResult() : false;\n  }\n\n  std::string_view getTournamentModelId() {\n    if (client_) {\n      return client_->getModelId();\n    } else {\n      return \"dev\";\n    }\n  }\n\n  void result(float reward,\n              std::unordered_map<std::string_view, float> models) {\n    if (client_ && isTournamentOpponent_ && !dontRequestModelUpdates_) {\n      client_->sendResult(reward, std::move(models));\n    }\n  }\n\n  void setFindBatchSizeMaxMs(float ms) {\n    findBatchSizeMaxMs_ = ms;\n  }\n  void setFindBatchSizeMaxBs(int n) {\n    findBatchSizeMaxBs_ = n;\n  }\n\n private:\n  const std::string jitModel_;\n  torch::Device device_;\n  torch::ScalarType dtype_;\n\n  PriorityMutex* modelMutex_;\n  std::shared_ptr<TorchJitModel> model_;\n  std::shared_ptr<tube::DataChannel> actChannel_;\n  std::shared_ptr<tube::DataChannel> trainChannel_;\n  std::vector<std::thread> threads_;\n  std::atomic_bool terminate_{false};\n\n  ReplayBuffer replayBuffer_;\n\n  std::atomic_size_t nextActIndex_{0};\n  std::optional<distributed::Server> server_;\n  std::optional<distributed::Client> client_;\n  std::thread modelUpdateThread;\n  bool isTournamentOpponent_ = false;\n  bool dontRequestModelUpdates_ = false;\n\n  std::atomic<bool> hasFoundBatchSize_ = false;\n  std::atomic<int> foundBatchSize_ = 0;\n\n  std::atomic<float> findBatchSizeMaxMs_ = 100.0f;\n  std::atomic<int> findBatchSizeMaxBs_ = 10240;\n};\n\nModelManager::ModelManager() {\n}\n\nModelManager::ModelManager(int actBatchsize,\n                           const std::string& device,\n                           int replayCapacity,\n                           int seed,\n                           const std::string& jitModel,\n                           int trainChannelTimeoutMs,\n                           int trainChannelNumSlots) {\n  impl = std::make_unique<ModelManagerImpl>(\n      actBatchsize, device, replayCapacity, seed, jitModel,\n      trainChannelTimeoutMs, trainChannelNumSlots);\n}\n\nModelManager::~ModelManager() {\n}\n\nstd::shared_ptr<tube::DataChannel> ModelManager::getTrainChannel() {\n  return impl->getTrainChannel();\n}\n\nstd::shared_ptr<tube::DataChannel> ModelManager::getActChannel() {\n  return impl->getActChannel();\n}\n\nvoid ModelManager::updateModel(\n    const std::unordered_map<std::string, at::Tensor>& stateDict) {\n  return impl->updateModel(stateDict);\n}\n\nint ModelManager::bufferSize() const {\n  return impl->bufferSize();\n}\n\nbool ModelManager::bufferFull() const {\n  return impl->bufferFull();\n}\n\nstd::unordered_map<std::string, at::Tensor> ModelManager::sample(\n    int sampleSize) {\n  return impl->sample(sampleSize);\n}\n\nvoid ModelManager::start() {\n  return impl->start();\n}\n\nvoid ModelManager::testAct() {\n  return impl->testAct();\n}\n\nvoid ModelManager::setIsTournamentOpponent(bool mode) {\n  return impl->setIsTournamentOpponent(mode);\n}\n\nvoid ModelManager::addTournamentModel(\n    std::string id,\n    const std::unordered_map<std::string, at::Tensor>& stateDict) {\n  return impl->addTournamentModel(std::move(id), stateDict);\n}\n\nvoid ModelManager::setDontRequestModelUpdates(bool v) {\n  return impl->setDontRequestModelUpdates(v);\n}\n\nvoid ModelManager::startServer(std::string serverListenEndpoint) {\n  return impl->startServer(serverListenEndpoint);\n}\n\nvoid ModelManager::startClient(std::string serverConnectHostname) {\n  return impl->startClient(serverConnectHostname);\n}\n\nvoid ModelManager::startReplayBufferServer(std::string endpoint) {\n  return impl->startReplayBufferServer(endpoint);\n}\n\nvoid ModelManager::startReplayBufferClient(std::string endpoint) {\n  return impl->startReplayBufferClient(endpoint);\n}\n\nSampleResult ModelManager::remoteSample(int sampleSize) {\n  return impl->remoteSample(sampleSize);\n}\n\nbool ModelManager::isCuda() const {\n  return impl->isCuda();\n}\n\ntorch::Device ModelManager::device() const {\n  return impl->device();\n}\n\nvoid ModelManager::batchAct(at::Tensor input,\n                            at::Tensor v,\n                            at::Tensor pi,\n                            at::Tensor rnnState,\n                            at::Tensor* rnnStateOut) {\n  return impl->batchAct(std::move(input), std::move(v), std::move(pi),\n                        std::move(rnnState), rnnStateOut);\n}\n\nstd::string_view ModelManager::getTournamentModelId() {\n  return impl->getTournamentModelId();\n}\n\nvoid ModelManager::result(float reward,\n                          std::unordered_map<std::string_view, float> models) {\n  return impl->result(reward, std::move(models));\n}\n\nint ModelManager::findBatchSize(at::Tensor input, at::Tensor rnnState) {\n  return impl->findBatchSize(input, rnnState);\n}\n\nint64_t ModelManager::bufferNumSample() const {\n  return impl->bufferNumSample();\n}\n\nint64_t ModelManager::bufferNumAdd() const {\n  return impl->bufferNumAdd();\n}\n\nbool ModelManager::isTournamentOpponent() const {\n  return impl->isTournamentOpponent();\n}\n\nbool ModelManager::wantsTournamentResult() {\n  return impl->wantsTournamentResult();\n}\n\nvoid ModelManager::setFindBatchSizeMaxMs(float ms) {\n  impl->setFindBatchSizeMaxMs(ms);\n}\n\nvoid ModelManager::setFindBatchSizeMaxBs(int n) {\n  impl->setFindBatchSizeMaxBs(n);\n}\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/model_manager.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <tube/src_cpp/data_channel.h>\n\n#include <future>\n#include <memory>\n#include <string>\n#include <torch/torch.h>\n#include <unordered_map>\n\nnamespace core {\n\nstruct SampleResult {\n  std::future<std::unordered_map<std::string, torch::Tensor>> fut;\n\n  std::unordered_map<std::string, torch::Tensor> get() {\n    return fut.get();\n  }\n};\n\nclass ModelManagerImpl;\nclass ModelManager {\n  std::unique_ptr<ModelManagerImpl> impl;\n\n public:\n  ModelManager();\n  ModelManager(int actBatchsize,\n               const std::string& device,\n               int replayCapacity,\n               int seed,\n               const std::string& jitModel,\n               int trainChannelTimeoutMs,\n               int trainChannelNumSlots);\n  ~ModelManager();\n\n  std::shared_ptr<tube::DataChannel> getTrainChannel();\n  std::shared_ptr<tube::DataChannel> getActChannel();\n  void updateModel(\n      const std::unordered_map<std::string, torch::Tensor>& stateDict);\n  int bufferSize() const;\n  bool bufferFull() const;\n  std::unordered_map<std::string, torch::Tensor> sample(int sampleSize);\n  void start();\n  void testAct();\n  void setIsTournamentOpponent(bool mode);\n  void addTournamentModel(\n      std::string id,\n      const std::unordered_map<std::string, torch::Tensor>& stateDict);\n  void setDontRequestModelUpdates(bool v);\n  void startServer(std::string serverListenEndpoint);\n  void startClient(std::string serverConnectHostname);\n  void startReplayBufferServer(std::string endpoint);\n  void startReplayBufferClient(std::string endpoint);\n  SampleResult remoteSample(int sampleSize);\n\n  bool isCuda() const;\n  torch::Device device() const;\n  void batchAct(torch::Tensor input,\n                torch::Tensor v,\n                torch::Tensor pi,\n                torch::Tensor rnnState = {},\n                torch::Tensor* rnnStateOut = nullptr);\n  std::string_view getTournamentModelId();\n  void result(float reward, std::unordered_map<std::string_view, float> models);\n  int findBatchSize(torch::Tensor input, torch::Tensor rnnState = {});\n  int64_t bufferNumSample() const;\n  int64_t bufferNumAdd() const;\n  bool isTournamentOpponent() const;\n  bool wantsTournamentResult();\n\n  void setFindBatchSizeMaxMs(float ms);\n  void setFindBatchSizeMaxBs(int n);\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/player.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <string>\n#include <unordered_map>\n\nnamespace core {\n\nclass State;\nclass Player {\n public:\n  Player(bool isHuman)\n      : isHuman_(isHuman) {\n    isTP_ = false;\n  }\n\n  bool isHuman() const {\n    return isHuman_;\n  }\n  bool isTP() const {\n    return isTP_;\n  }\n\n  virtual void setName(std::string name) {\n    name_ = std::move(name);\n  }\n  const std::string& getName() {\n    return name_;\n  }\n\n  virtual void terminate() {\n  }\n  virtual void reset() {\n  }\n\n private:\n  std::string name_ = \"unnamed\";\n  bool isHuman_;\n\n protected:\n  bool isTP_;\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/pybind.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <pybind11/pybind11.h>\n\n#include \"actor.h\"\n#include \"common/threads.h\"\n#include \"forward_player.h\"\n#include \"game.h\"\n#include \"model_manager.h\"\n\nnamespace py = pybind11;\n\nusing namespace core;\n\nPYBIND11_MODULE(polygames, m) {\n\n  m.def(\"init_threads\", &threads::init);\n\n  py::class_<Game, tube::EnvThread, std::shared_ptr<Game>>(m, \"Game\")\n      .def(py::init<std::string, std::vector<std::string>, int, int, bool, bool, bool, bool, bool, int,\n                    int, bool, int, int, bool, int>())\n      .def(\"add_player\", &Game::addPlayer, py::keep_alive<1, 2>())\n      .def(\"add_eval_player\", &Game::addEvalPlayer)\n      .def(\"add_human_player\", &Game::addHumanPlayer)\n      .def(\"add_tp_player\", &Game::addTPPlayer)\n      .def(\"get_raw_feat_size\", &Game::getRawFeatSize)\n      .def(\"get_feat_size\", &Game::getFeatSize)\n      .def(\"is_one_player_game\", &Game::isOnePlayerGame)\n      .def(\"set_features\", &Game::setFeatures)\n      .def(\"get_action_size\", &Game::getActionSize)\n      .def(\"get_result\", &Game::getResult);\n\n  py::class_<Actor, std::shared_ptr<Actor>>(m, \"Actor\")\n      .def(py::init<std::shared_ptr<tube::DataChannel>,\n                    const std::vector<int64_t>&,  // featSize\n                    const std::vector<int64_t>&,  // actionSize\n                    const std::vector<int64_t>&,  // rnnStateSize\n                    int,                          // rnnSeqlen\n                    bool,                         // logitValue\n                    bool,                         // useValue\n                    bool,                         // usePolicy\n                    std::shared_ptr<ModelManager>>());\n\n  py::class_<HumanPlayer, std::shared_ptr<HumanPlayer>>(m, \"HumanPlayer\")\n      .def(py::init<>(), py::call_guard<py::gil_scoped_release>());\n\n  py::class_<TPPlayer, std::shared_ptr<TPPlayer>>(m, \"TPPlayer\")\n      .def(py::init<>(), py::call_guard<py::gil_scoped_release>());\n\n  py::class_<ActorPlayer, std::shared_ptr<ActorPlayer>>(m, \"ActorPlayer\")\n      .def(py::init<>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"set_actor\", &ForwardPlayer::setActor, py::keep_alive<1, 2>())\n      .def(\"set_name\", &ForwardPlayer::setName);\n\n  py::class_<ForwardPlayer, ActorPlayer, std::shared_ptr<ForwardPlayer>>(\n      m, \"ForwardPlayer\")\n      .def(py::init<>(), py::call_guard<py::gil_scoped_release>());\n\n  py::class_<ModelManager, std::shared_ptr<ModelManager>>(m, \"ModelManager\")\n      .def(py::init<int, const std::string&, int, int, const std::string&, int,\n                    int>())\n      .def(\"get_train_channel\", &ModelManager::getTrainChannel)\n      .def(\"get_act_channel\", &ModelManager::getActChannel)\n      .def(\"update_model\", &ModelManager::updateModel)\n      .def(\"buffer_size\", &ModelManager::bufferSize)\n      .def(\"buffer_full\", &ModelManager::bufferFull)\n      .def(\"buffer_num_sample\", &ModelManager::bufferNumSample)\n      .def(\"buffer_num_add\", &ModelManager::bufferNumAdd)\n      .def(\"sample\", &ModelManager::sample)\n      .def(\"start\", &ModelManager::start)\n      .def(\"test_act\", &ModelManager::testAct)\n      .def(\"set_is_tournament_opponent\", &ModelManager::setIsTournamentOpponent)\n      .def(\"add_tournament_model\", &ModelManager::addTournamentModel)\n      .def(\"set_dont_request_model_updates\",\n           &ModelManager::setDontRequestModelUpdates)\n      .def(\"start_server\", &ModelManager::startServer)\n      .def(\"start_client\", &ModelManager::startClient)\n      .def(\"start_replay_buffer_server\", &ModelManager::startReplayBufferServer)\n      .def(\"start_replay_buffer_client\", &ModelManager::startReplayBufferClient)\n      .def(\"remote_sample\", &ModelManager::remoteSample)\n      .def(\"set_find_batch_size_max_ms\", &ModelManager::setFindBatchSizeMaxMs)\n      .def(\"set_find_batch_size_max_bs\", &ModelManager::setFindBatchSizeMaxBs);\n\n  py::class_<SampleResult>(m, \"SampleResult\").def(\"get\", &SampleResult::get);\n}\n"
  },
  {
    "path": "src/core/replay_buffer.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <sstream>\n\n#include \"replay_buffer.h\"\n\n#define ZSTD_STATIC_LINKING_ONLY\n#include \"zstd/lib/zstd.h\"\n\nnamespace {\nstruct cctx {\n  ZSTD_CCtx* ctx;\n  cctx() {\n    ctx = ZSTD_createCCtx();\n    if (!ctx) {\n      throw std::runtime_error(\"Failed to allocate zstd context\");\n    }\n  }\n  ~cctx() {\n    ZSTD_freeCCtx(ctx);\n  }\n};\n\nstruct dctx {\n  ZSTD_DCtx* ctx;\n  dctx() {\n    ctx = ZSTD_createDCtx();\n    if (!ctx) {\n      throw std::runtime_error(\"Failed to allocate zstd context\");\n    }\n  }\n  ~dctx() {\n    ZSTD_freeDCtx(ctx);\n  }\n};\n}  // namespace\n\nnamespace core {\n\nReplayBuffer::ReplayBuffer(int capacity, int seed)\n    : capacity(capacity)\n    , buffer(capacity) {\n  rng_.seed(seed);\n}\n\nReplayBuffer::~ReplayBuffer() {\n  if (!sampleThreads.empty()) {\n    std::unique_lock l(mut);\n    sampleThreadDie = true;\n    cv.notify_all();\n    l.unlock();\n    for (auto& v : sampleThreads) {\n      v.join();\n    }\n  }\n}\n\nvoid ReplayBuffer::add(std::unordered_map<std::string, at::Tensor> input) {\n  if (input.empty()) {\n    return;\n  }\n  if (!hasKeys) {\n    std::lock_guard l(keyMutex);\n    if (keys.empty()) {\n      for (auto& v : input) {\n        torch::ArrayRef<int64_t> x = v.second.sizes();\n        x = torch::ArrayRef<int64_t>(x.begin() + 1, x.end());\n        keys.push_back({v.first, std::vector<int64_t>(x.begin(), x.end()),\n                        v.second.dtype()});\n      }\n      hasKeys = true;\n\n      for (auto& vx : input) {\n        printf(\"  key '%s' shape %s\\n\", vx.first.c_str(),\n               ss(vx.second.sizes()).c_str());\n      }\n    }\n  }\n\n  std::vector<char> tmpbuf;\n\n  auto n = input.begin()->second.size(0);\n\n  cctx ctx;\n\n  for (int i = 0; i != n; ++i) {\n    if (input.size() != keys.size()) {\n      throw std::runtime_error(\"replay buffer keys mismatch\");\n    }\n    BufferEntry* newEntry = new BufferEntry[input.size()];\n\n    size_t index = 0;\n    for (const auto& [key, shape, dtype] : keys) {\n      auto t = input.at(key)[i];\n\n      if (!t.is_contiguous()) {\n        throw std::runtime_error(\"replay buffer input is not contiguous\");\n      }\n\n      void* data = t.data_ptr();\n      size_t datasize = dtype.itemsize() * t.numel();\n\n      tmpbuf.resize(sizeof(size_t) + ZSTD_compressBound(datasize));\n      auto n = ZSTD_compressCCtx(\n          ctx.ctx, tmpbuf.data(), tmpbuf.size(), data, datasize, 0);\n      if (ZSTD_isError(n)) {\n        throw std::runtime_error(\"replay buffer compress failed\");\n      }\n\n      auto& e = newEntry[index++];\n      e.datasize = datasize;\n      e.data.assign(tmpbuf.begin(), tmpbuf.begin() + n);\n    }\n\n    auto slot = numAdd_++ % capacity;\n    auto* prev = buffer[slot].exchange(newEntry);\n    if (prev) {\n      delete[] prev;\n    }\n  }\n}\n\nstd::unordered_map<std::string, at::Tensor> ReplayBuffer::sampleImpl(\n    int sampleSize) {\n  if (!hasKeys) {\n    return {};\n  }\n  int siz = size();\n  std::unordered_map<std::string, torch::Tensor> r;\n  std::vector<char*> pointers;\n  for (auto& [k, shape, dtype] : keys) {\n    std::vector<int64_t> sizes;\n    sizes.assign(shape.begin(), shape.end());\n    sizes.insert(sizes.begin(), sampleSize);\n    auto tensor = torch::empty(sizes, dtype);\n    pointers.push_back((char*)tensor.data_ptr());\n    r[k] = tensor;\n  }\n  dctx ctx;\n  size_t nCopies = 0;\n  auto copy = [&](size_t srcIndex) {\n    auto src = buffer[srcIndex].exchange(nullptr);\n    if (!src) {\n      return 0;\n    }\n    if (nCopies >= (size_t)sampleSize) {\n      throw std::runtime_error(\n          \"replay buffer internal error: copied too many samples\");\n    }\n    ++nCopies;\n    for (size_t i = 0; i != keys.size(); ++i) {\n      size_t datasize = src[i].datasize;\n      auto n = ZSTD_decompressDCtx(ctx.ctx, pointers[i], datasize,\n                                   src[i].data.data(), src[i].data.size());\n      if (ZSTD_isError(n)) {\n        throw std::runtime_error(\"replay buffer decompress failed\");\n      }\n      pointers[i] += datasize;\n    }\n    BufferEntry* nullref = nullptr;\n    if (!buffer[srcIndex].compare_exchange_strong(nullref, src)) {\n      delete[] src;\n    }\n    return 1;\n  };\n  int64_t seq = std::min(numAdd_ - prevSampleNumAdd_, (int64_t)sampleSize);\n  int64_t i = 0;\n  //    for (;i != seq;) {\n  //      i += copy((prevSampleNumAdd_ + i) % siz);\n  //    }\n  prevSampleNumAdd_ += seq;\n  std::vector<size_t> indices;\n  while (i != sampleSize) {\n    indices.clear();\n    std::unique_lock l(sampleMutex);\n    for (size_t ii = i; ii != (size_t)sampleSize;) {\n      if (sampleOrderIndex >= sampleOrder.size()) {\n        size_t p = sampleOrder.size();\n        if (p != (size_t)capacity) {\n          sampleOrder.resize(siz);\n          for (size_t i = p; i != sampleOrder.size(); ++i) {\n            sampleOrder[i] = i;\n          }\n        }\n        std::shuffle(sampleOrder.begin(), sampleOrder.end(), rng_);\n        sampleOrderIndex = 0;\n      }\n      indices.push_back(sampleOrder.at(sampleOrderIndex++));\n      ++ii;\n    }\n    l.unlock();\n    for (size_t index : indices) {\n      i += copy(index);\n    }\n  }\n  numSample_ += sampleSize;\n  return r;\n}\n\nstd::unordered_map<std::string, at::Tensor> ReplayBuffer::sample(\n    int sampleSize) {\n  // return sampleImpl(sampleSize);\n\n  // TODO\n  // This code currently prefetches 8 samples for efficent sampling when\n  // training on 8 gpus. This should be made configurable or training\n  // should be made efficient without requiring prefetch\n  std::unique_lock l(mut);\n  if (sampleThreads.empty()) {\n    for (int i = 0; i != 8; ++i) {\n      sampleThreads.emplace_back([this]() {\n        std::unique_lock l(mut);\n        while (true) {\n          while (results.size() >= 8 || resultsSampleSize == 0) {\n            cv.wait(l);\n            if (sampleThreadDie) {\n              return;\n            }\n          }\n          l.unlock();\n          auto tmp = sampleImpl(resultsSampleSize);\n          l.lock();\n          results.push_back(std::move(tmp));\n          cv2.notify_all();\n        }\n      });\n    }\n  }\n  resultsSampleSize = sampleSize;\n  while (results.empty()) {\n    cv.notify_all();\n    cv2.wait(l);\n  }\n  auto r = std::move(results.front());\n  results.pop_front();\n  cv.notify_all();\n  return r;\n}\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/replay_buffer.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <mutex>\n#include <random>\n#include <string>\n#include <torch/torch.h>\n#include <unordered_map>\n#include <vector>\n\nnamespace core {\n\nclass ReplayBuffer {\n public:\n  ReplayBuffer(int capacity, int seed);\n  ~ReplayBuffer();\n\n  /*\n   * add to the circular array the elements of input\n   * updates input if size is greater than capacity\n   * initializes if the buffer is empty\n   */\n  void add(std::unordered_map<std::string, torch::Tensor> input);\n\n  template <typename T> static std::string ss(T&& sizes) {\n    std::string r = \"[\";\n    for (int64_t v : sizes) {\n      if (r != \"[\")\n        r += \", \";\n      r += std::to_string(v);\n    }\n    return r + \"]\";\n  }\n\n  /*\n   * sample sampleSize elements from the replayBuffer\n   */\n  std::unordered_map<std::string, torch::Tensor> sampleImpl(int sampleSize);\n\n  std::mutex mut;\n  std::condition_variable cv;\n  std::condition_variable cv2;\n  std::deque<std::unordered_map<std::string, torch::Tensor>> results;\n  int resultsSampleSize = 0;\n  bool sampleThreadDie = false;\n\n  std::vector<std::thread> sampleThreads;\n\n  std::unordered_map<std::string, torch::Tensor> sample(int sampleSize);\n\n  int size() const {\n    return (int)std::min((int64_t)numAdd_, (int64_t)capacity);\n  }\n\n  bool full() const {\n    return size() == capacity;\n  }\n\n  int64_t numAdd() const {\n    return numAdd_;\n  }\n\n  int64_t numSample() const {\n    return numSample_;\n  }\n\n  const int capacity;\n\n private:\n  struct Key {\n    std::string name;\n    std::vector<int64_t> shape;\n    caffe2::TypeMeta dtype;\n  };\n\n  struct BufferEntry {\n    size_t datasize;\n    std::vector<char> data;\n  };\n\n  std::vector<std::atomic<BufferEntry*>> buffer;\n\n  size_t sampleOrderIndex = 0;\n  std::vector<size_t> sampleOrder;\n\n  std::vector<Key> keys;\n  std::mutex keyMutex;\n  std::atomic<bool> hasKeys = false;\n  std::mutex sampleMutex;\n  int64_t prevSampleNumAdd_ = 0;\n  std::atomic_int64_t numAdd_ = 0;\n  std::atomic_int64_t numSample_ = 0;\n\n  std::mt19937 rng_;\n};\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/state.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"state.h\"\n\nnamespace core {\n\nstd::ostream& operator<<(std::ostream& os, const _Action& action) {\n  os << action.GetX() << \", \" << action.GetY() << \", \" << action.GetZ();\n  return os;\n}\n\nvoid State::fillFullFeatures() {\n  if (!_featopts) {\n    return;\n  }\n  size_t offset = 0;\n  auto expand = [&](size_t n) {\n    size_t newOffset = offset + n;\n    if (newOffset > _fullFeatures.size()) {\n      throw std::runtime_error(\"internal error: _fullFeatures is too small\");\n    }\n    return _fullFeatures.data() + std::exchange(offset, newOffset);\n  };\n  const size_t planeSize = _featSize[1] * _featSize[2];\n  auto add_constant_plane = [&](float value) {\n    auto* at = expand(planeSize);\n    std::fill(at, at + planeSize, value);\n  };\n  if (_fullFeatures.empty()) {\n    _outFeatSize = _featSize;\n    _outFeatSize[0] *= (1 + _featopts->history);\n    _outFeatSize[0] +=\n        (_featopts->outFeatures ? 1 : 0) +\n        (_featopts->turnFeaturesSingleChannel ? 1 : 0) +\n        (_featopts->turnFeaturesMultiChannel ? getNumPlayerColors() : 0) +\n        (_featopts->geometricFeatures ? 4 : 0) +\n        (_featopts->oneFeature ? 1 : 0) + _featopts->randomFeatures;\n    _fullFeatures.resize(_outFeatSize[0] * _outFeatSize[1] * _outFeatSize[2]);\n\n    if (_featopts->history > 0) {\n      expand(_features.size() * (_featopts->history + 1));\n    } else {\n      expand(_features.size());\n    }\n\n    if (_featopts->randomFeatures > 0) {\n      float* dst =\n          expand(_featopts->randomFeatures * _featSize[1] * _featSize[2]);\n      for (int k = 1; k <= _featopts->randomFeatures; k++) {\n        for (int i = 1; i <= _featSize[1]; i++) {\n          for (int j = 1; j <= _featSize[2]; j++) {\n            float x = k * 0.754421f + i * 0.147731f + j * 0.242551f;\n            x +=\n                0.145531f * (i * k) + 0.741431f * (i * j) + 0.134134f * (j * k);\n            x += 0.423423f * (i * j * k);\n            *dst++ = x - std::floor(x);\n          }\n        }\n      }\n    }\n    if (_featopts->geometricFeatures) {\n      float* dst = expand(4 * _featSize[1] * _featSize[2]);\n      for (int k = 0; k < 4; k++) {\n        for (int i = 0; i < _featSize[1]; i++) {\n          for (int j = 0; j < _featSize[2]; j++) {\n            if (k == 0) {\n              *dst++ = float(i) / float(_featSize[1] - 1);\n            } else if (k == 1) {\n              *dst++ = float(j) / float(_featSize[2] - 1);\n            } else if (k == 2) {\n              float x = float(i) / float(_featSize[1] - 1) - 0.5f;\n              float y = float(j) / float(_featSize[2] - 1) - 0.5f;\n              *dst++ = x * x + y * y;\n            } else if (k == 3) {\n              float x1 = float(i) / float(_featSize[1] - 1);\n              float x2 = 1.f - float(i) / float(_featSize[1] - 1);\n              x2 = x2 < x1 ? x2 : x1;\n              float x3 = float(j) / float(_featSize[2] - 1);\n              x2 = x2 < x3 ? x2 : x3;\n              float x4 = 1.f - float(j) / float(_featSize[2] - 1);\n              x2 = x2 < x4 ? x2 : x4;\n              *dst++ = x2;\n            }\n          }\n        }\n      }\n    }\n    if (_featopts->oneFeature) {\n      add_constant_plane(1);\n    }\n    if (_featopts->turnFeaturesSingleChannel) {\n      _turnFeaturesSingleChannelOffset = offset;\n      expand(planeSize);\n    }\n    if (_featopts->turnFeaturesMultiChannel) {\n      _turnFeaturesMultiChannelOffset = offset;\n      expand(planeSize * getNumPlayerColors());\n    }\n    if (_featopts->outFeatures) {\n      float* dst = expand(_featSize[1] * _featSize[2]);\n      for (int i = 0; i < _featSize[1]; i++) {\n        for (int j = 0; j < _featSize[2]; j++) {\n          if ((i == 0) || (i == _featSize[1] - 1) || (j == 0) ||\n              (j == _featSize[2] - 1)) {\n            *dst++ = (1.);  // 1 for the frontier\n          } else {\n            *dst++ = (0.);  // 0 for the rest\n          }\n        }\n      }\n    }\n\n    _previousFeatures.resize(_features.size() * (_featopts->history + 1));\n    for (int i = 0; i != _featopts->history + 1; ++i) {\n      std::memcpy(_previousFeatures.data() + _features.size() * i,\n                  _features.data(), sizeof(float) * _features.size());\n    }\n  }\n  if (_featopts->history > 0) {\n    offset = 0;\n    // we check the expected size of features.\n    unsigned int expected_size =\n        (_featopts->history + 1) * _featSize[0] * _featSize[1] * _featSize[2];\n    if (_previousFeatures.size() != expected_size) {\n      throw std::runtime_error(\n          \"internal error: previousFeatures is of incorrect size!\");\n    }\n    std::memcpy(_previousFeatures.data() + _previousFeaturesOffset,\n                _features.data(), sizeof(float) * _features.size());\n\n    _previousFeaturesOffset += _features.size();\n    if (_previousFeaturesOffset == expected_size) {\n      _previousFeaturesOffset = 0;\n    }\n\n    // we add the previous features in the full features.\n    auto* dst = expand(expected_size);\n    std::memcpy(dst, _previousFeatures.data() + _previousFeaturesOffset,\n                sizeof(float) * (expected_size - _previousFeaturesOffset));\n    std::memcpy(dst + expected_size - _previousFeaturesOffset,\n                _previousFeatures.data(),\n                sizeof(float) * _previousFeaturesOffset);\n  } else {\n    offset = 0;\n    std::memcpy(expand(_features.size()), _features.data(),\n                sizeof(float) * _features.size());\n  }\n  if (_featopts->turnFeaturesSingleChannel) {\n    offset = _turnFeaturesSingleChannelOffset;\n    add_constant_plane(getCurrentPlayerColor());\n  }\n  if (_featopts->turnFeaturesMultiChannel) {\n    offset = _turnFeaturesMultiChannelOffset;\n    int n = getNumPlayerColors();\n    int myColor = getCurrentPlayerColor();\n    for (int i = 0; i != n; ++i) {\n      add_constant_plane(i == myColor ? 1.0f : 0.0f);\n    }\n  }\n}\n\n}  // namespace core\n"
  },
  {
    "path": "src/core/state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"common/thread_id.h\"\n#include \"mcts/types.h\"\n\n#include <cassert>\n#include <chrono>\n#include <cstring>\n#include <fstream>\n#include <functional>\n#include <iostream>\n#include <optional>\n#include <random>\n#include <sstream>\n#include <string>\n#include <thread>\n\nnamespace core {\n\n/*****\n Action and State are abstract classes.\n Derived classes correspond to various problems.\n\n A difference with the AZ setting  is that several actions can correspond to the\nsame logit\n from the neural net. This is useful for complex action spaces in which the list\nof possible\n actions is tricky: the MCTS then takes care of differentiating the possible\nactions.\n\n In most of our games, we are still in bijection, but in the case of draughts\nthis makes a difference.\n\n******/\n\n// TODO[doc]: ideally Action should just be private class known\n// only to State. the Actor and State represent actions using\n// the mcts::Action (i.e. int64_t)\nclass _Action {\n public:\n  // Get the location of the move in the neural network output.\n  // Several moves might fall in the same location, no pb.\n  _Action() {\n  }\n  _Action(mcts::Action index, int x, int y, int z)\n      : _i(index) {\n    _loc[0] = x;\n    _loc[1] = y;\n    _loc[2] = z;\n  }\n\n  int GetX() const {\n    return _loc[0];\n  }\n\n  int GetY() const {\n    return _loc[1];\n  }\n\n  int GetZ() const {\n    return _loc[2];\n  }\n\n  uint64_t GetHash() const {\n    return _hash;\n  }\n\n  void SetIndex(int i) {\n    _i = i;\n  }\n\n  int GetIndex() const {\n    return _i;\n  }\n\n protected:\n  uint64_t _hash = 0;\n\n  // Warning! Two actions might have the same position _loc.  position\n  // of the action in {0,...,GetXActionSize()-1} *\n  // {0,...,GetYActionSize()-1} * {0,...,GetZActionSize()-1}\n  std::array<int, 3> _loc;\n\n  // index of the action in the list of legal actions in the\n  // corresponding state.\n  // _i makes sense since an action is never applied to two distinct\n  // states. We could have a pointer to the state this action is\n  // associated to.\n  mcts::Action _i = -1;\n};\n\nstd::ostream& operator<<(std::ostream& os, const _Action& action);\n\nenum class GameStatus {\n  player0Turn = 0,\n  player1Turn,\n  tie,\n  player0Win,\n  player1Win\n};\n\nstruct FeatureOptions {\n  // Data specifying the way we generate generic features.\n  bool outFeatures = false;  // do we add a feature for borders\n  bool turnFeaturesSingleChannel =\n      false;  // do we add a feature for turn (deprecated, use\n              // turnFeaturesMultiChannel instead)\n  bool turnFeaturesMultiChannel =\n      false;  // do we add a feature for turn/player(color) (one channel for\n              // each player, 0 for other players, 1 for current player)\n  bool geometricFeatures = false;  // do we add geometric features\n  int history = 0;  // do we add a feature for history and how long (0 = none)\n  int randomFeatures = 0;   // how many random features (could be 0)\n  bool oneFeature = false;  // do we want a plane of 1s\n};\n\nclass State {\n public:\n  void setSeed(int seed) {\n    _rng.seed(seed);\n  }\n  State(int seed) {\n    _rng.seed(seed);\n    _stochasticReset = false;\n    _hash = 0;\n    _featSize.resize(3);\n    _actionSize.resize(3);\n    _stochastic = false;\n    forcedDice = -1;\n  }\n\n  virtual ~State() {\n  }\n\n  template <typename T> void initializeAs() {\n    _typeId = &typeid(T);\n    _copyImpl = [](State* dst, const State* src) { *(T*)dst = *(T*)src; };\n  }\n\n  virtual void newGame(unsigned long seed) {\n  }\n\n  // -----overriding core::State's virtual functions-----\n\n  static auto threadrng() {\n    static thread_local std::minstd_rand rng(std::random_device{}());\n    return rng();\n  }\n\n  std::unique_ptr<State> clone() const {\n    auto state = clone_();\n    state->_rng.seed(threadrng());\n    return state;\n  }\n\n  int getCurrentPlayer() const {\n    if ((_status == GameStatus::player0Turn) ||\n        (_status == GameStatus::player0Win)) {\n      return 0;\n    } else if ((_status == GameStatus::player1Turn) ||\n               (_status == GameStatus::player1Win)) {\n      return 1;\n    } else {\n      // assert(false);    do not assert this! there might be ties :-)\n      return 0;  // the current player does not matter if we have ties.\n    }\n  }\n\n  std::string lastMoveString() {\n    std::string str;\n    auto sc = clone();\n    auto* s = (State*)&*sc;\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    s->reset();\n    for (size_t i = 0; i != moves.size(); ++i) {\n      if (i == moves.size() - 1) {\n        str = s->actionDescription(s->GetLegalActions().at(moves.at(i)));\n      }\n      std::tie(s->_rng, s->forcedDice) = rngs.at(i);\n      s->forward(moves.at(i));\n    }\n    return str;\n  }\n\n  std::string history() const {\n    std::string str;\n    auto sc = clone();\n    auto* s = (State*)&*sc;\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    s->reset();\n    for (size_t i = 0; i != moves.size(); ++i) {\n      if (!str.empty()) {\n        str += \" \";\n      }\n      str += s->actionDescription(s->GetLegalActions().at(moves.at(i)));\n      std::tie(s->_rng, s->forcedDice) = rngs.at(i);\n      s->forward(moves.at(i));\n    }\n    return str;\n  }\n\n  virtual int getCurrentPlayerColor() const {\n    return getCurrentPlayer();\n  }\n\n  virtual int getNumPlayerColors() const {\n    throw std::runtime_error(\n        \"getNumPlayerColors is not implemented for this game\");\n    return 0;\n  }\n\n  int getStepIdx() const {\n    return _moves.size();\n  }\n\n  const std::vector<mcts::Action>& getMoves() const {\n    return _moves;\n  }\n\n  virtual float getReward(int player) const {\n    assert(player == 0 || player == 1);\n    if (_status == GameStatus::player0Win) {\n      return player == 0 ? 1.0 : -1.0;\n    } else if (_status == GameStatus::player1Win) {\n      return player == 1 ? 1.0 : -1.0;\n    } else {\n      return 0.0;\n    }\n  }\n\n  virtual int overrideAction() const {\n    return -1;\n  }\n\n  bool terminated() const {\n    return (_status == GameStatus::tie || _status == GameStatus::player0Win ||\n            _status == GameStatus::player1Win);\n  };\n\n  virtual float getRandomRolloutReward(int player) const {\n    const int numSimulation = 10;\n    float sumReward = 0.0;\n    for (int i = 0; i < numSimulation; ++i) {\n      auto clonedState = clone();\n      auto s = dynamic_cast<State*>(clonedState.get());\n      while (!s->terminated()) {\n        s->DoRandomAction();\n      }\n      sumReward += s->getReward(player);\n    }\n    return sumReward / numSimulation;\n  }\n\n  void forward(const mcts::Action& action) {\n    assert(action != mcts::InvalidAction);\n    ApplyAction(GetLegalActions().at(action));\n    _moves.push_back(action);\n    _moveRngs.emplace_back(_rng, forcedDice);\n  }\n\n  // -----interface for games to implement-----\n\n  virtual void Initialize() = 0;\n\n  virtual std::unique_ptr<State> clone_() const = 0;\n\n  virtual void ApplyAction(const _Action& action) = 0;\n\n  virtual void DoGoodAction() {\n    DoRandomAction();\n  }\n\n  virtual void printCurrentBoard() const {\n    std::cout << stateDescription() << std::endl;\n  }\n\n  virtual void errPrintCurrentBoard() const {\n    std::cerr << stateDescription() << std::endl;\n  }\n\n  const std::vector<_Action>& GetLegalActions() const {\n    return _legalActions;\n  }\n\n  virtual std::string stateDescription() const {\n    std::string str;\n    auto& feats = GetFeatures();\n    auto& sizes = GetFeatureSize();\n    if (sizes[0] == 2) {\n      bool allOnesOrZero = true;\n      for (auto& v : feats) {\n        if (v != 0 && v != 1) {\n          allOnesOrZero = false;\n          break;\n        }\n      }\n      if (allOnesOrZero) {\n        size_t index = 0;\n        size_t offset = sizes[1] * sizes[2];\n        for (int64_t y = 0; y != sizes[1]; ++y) {\n          for (int64_t z = 0; z != sizes[2]; ++z) {\n            if (z) {\n              str += '|';\n            }\n            char c = ' ';\n            if (feats[index] && feats[offset + index]) {\n              c = '!';\n            } else if (feats[index]) {\n              c = 'x';\n            } else if (feats[offset + index]) {\n              c = 'o';\n            }\n            str += c;\n            ++index;\n          }\n          str += '\\n';\n        }\n        return str;\n      }\n    }\n    size_t index = 0;\n    for (int64_t x = 0; x != sizes[0]; ++x) {\n      str += \"Channel \" + std::to_string(x) + \":\\n\";\n      for (int64_t y = 0; y != sizes[1]; ++y) {\n        for (int64_t z = 0; z != sizes[2]; ++z) {\n          if (z) {\n            str += ' ';\n          }\n          str += feats[index] == int(feats[index])\n                     ? std::to_string(int(feats[index]))\n                     : std::to_string(feats[index]);\n          ++index;\n        }\n        str += '\\n';\n      }\n      if (x != sizes[0] - 1) {\n        str += '\\n';\n      }\n    }\n    return str;\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    ss << action.GetIndex();\n    return ss.str();\n  }\n\n  virtual std::string actionsDescription() const {\n    std::string str;\n    for (auto& v : _legalActions) {\n      str += actionDescription(v) + \" \";\n    }\n    return str;\n  }\n\n  virtual int parseAction(const std::string& str) const {\n    for (size_t i = 0; i != _legalActions.size(); ++i) {\n      if (str == actionDescription(_legalActions[i])) {\n        return i;\n      }\n    }\n    return -1;\n  }\n\n  int TPInputAction(\n      std::function<std::optional<int>(std::string)> specialAction =\n          [](std::string) { return std::nullopt; }) {\n    /*std::cout << \"Current board:\" << std::endl\n              << stateDescription() << std::endl;\n    std::cout << \"Legal Actions:\" << std::endl\n              << actionsDescription() << std::endl;*/\n    // Second, receive human feedback.\n    std::string line1;\n    std::string line2;\n    std::string line3;\n    auto& legalActions = GetLegalActions();\n    int index = -1;\n    int index1 = -1;\n    int index2 = -1;\n    int index3 = -1;\n    std::cout << \"# Last action\" << std::endl;\n    std::cerr << stateDescription() << std::endl;\n    printLastActionXYZ();\n    while (index < 0 || index >= (int)legalActions.size()) {\n      std::cout << \"#Input action as x y z: \";\n      std::cin >> line1;\n      std::cin >> line2;\n      std::cin >> line3;\n      index1 = parseAction(line1);\n      index2 = parseAction(line2);\n      index3 = parseAction(line3);\n      for (size_t i = 0; i < legalActions.size(); i++) {\n        if ((GetLegalActions().at(i).GetX() == index1) &&\n            (GetLegalActions().at(i).GetY() == index2) &&\n            (GetLegalActions().at(i).GetZ() == index3)) {\n          index = i;\n          break;\n        }\n        if (i == legalActions.size()) {\n          std::cout << \"# bad answer!\" << std::endl;\n        }\n      }\n      if (index == -1) {\n        if (auto r = specialAction(line1); r) {\n          return *r;\n        }\n      }\n    }\n    return index;\n  }\n\n  virtual int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction =\n          [](std::string) { return std::nullopt; }) {\n    std::cout << \"Current board:\" << std::endl\n              << stateDescription() << std::endl;\n    std::cout << \"Legal Actions:\" << std::endl\n              << actionsDescription() << std::endl;\n    // Second, receive human feedback.\n    std::string line;\n    auto& legalActions = GetLegalActions();\n    int index = -1;\n    while (index < 0 || index >= (int)legalActions.size()) {\n      std::cout << \"Input action: \";\n      std::cin.clear();\n      std::cin >> line;\n      if (!std::cin.good()) {\n        std::exit(1);\n      }\n      index = parseAction(line);\n      if (index == -1) {\n        if (auto r = specialAction(line); r) {\n          return *r;\n        }\n      }\n    }\n    return index;\n  }\n\n  void undoLastMove() {\n    if (_moves.empty()) {\n      return;\n    }\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    reset();\n    for (size_t i = 0; i != moves.size() - 1; ++i) {\n      std::tie(_rng, forcedDice) = rngs.at(i);\n      forward(moves.at(i));\n    }\n  }\n\n  virtual void setStateFromStr(const std::string& /*str*/) {\n  }\n\n  void printLastAction() {\n    if (_moves.empty()) {\n      std::cout << \"no moves\" << std::endl;\n      return;\n    }\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    reset();\n    for (size_t i = 0; i != moves.size(); ++i) {\n      std::tie(_rng, forcedDice) = rngs.at(i);\n      if (i == moves.size() - 1) {\n        std::cout << actionDescription(GetLegalActions().at(moves.at(i)))\n                  << std::endl;\n      }\n      forward(moves.at(i));\n    }\n  }\n\n  void printLastActionXYZ() {\n    if (_moves.empty()) {\n      std::cout << 0 << std::endl;\n      std::cout << 0 << std::endl;\n      std::cout << 0 << std::endl;\n      return;\n    }\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    reset();\n    for (size_t i = 0; i != moves.size(); ++i) {\n      std::tie(_rng, forcedDice) = rngs.at(i);\n      if (i == moves.size() - 1) {\n        std::cout << GetLegalActions().at(moves.at(i)).GetX() << std::endl;\n        std::cout << GetLegalActions().at(moves.at(i)).GetY() << std::endl;\n        std::cout << GetLegalActions().at(moves.at(i)).GetZ() << std::endl;\n      }\n      forward(moves.at(i));\n    }\n  }\n\n  void undoLastMoveForPlayer(int player) {\n    auto moves = _moves;\n    auto rngs = _moveRngs;\n    reset();\n    size_t resetToIndex = moves.size();\n    // Find the last move that was ours\n    for (size_t i = 0; i != moves.size(); ++i) {\n      std::tie(_rng, forcedDice) = rngs.at(i);\n      auto prevPlayer = getCurrentPlayer();\n      forward(moves[i]);\n      if (prevPlayer == player) {\n        resetToIndex = i;\n      }\n    }\n    // Reset to it\n    reset();\n    for (size_t i = 0; i != resetToIndex; ++i) {\n      std::tie(_rng, forcedDice) = rngs.at(i);\n      forward(moves.at(i));\n    }\n    if (getCurrentPlayer() != player) {\n      throw std::runtime_error(\"Undo error: expected player \" +\n                               std::to_string(player) + \", got \" +\n                               std::to_string(getCurrentPlayer()));\n    }\n  }\n\n  // -----other non-virtual functions-----\n\n  void fillFullFeatures();\n\n  void DoRandomAction() {\n    assert(!_legalActions.empty());\n    std::uniform_int_distribution<size_t> distr(0, _legalActions.size() - 1);\n    size_t i = distr(_rng);\n    _Action a = _legalActions[i];\n    ApplyAction(a);\n  }\n\n  void doIndexedAction(int j) {\n    int i = j % _legalActions.size();\n    _Action a = _legalActions[i];\n    ApplyAction(a);\n  }\n\n  bool checkMove(const mcts::Action& c) const {\n    return c < (int)_legalActions.size();\n  }\n\n  uint64_t getHash() const {\n    return _hash;\n  }\n\n  const std::vector<float>& GetRawFeatures() const {\n    return _features;\n  }\n  const std::vector<int64_t>& GetRawFeatureSize() const {\n    return _featSize;\n  }\n\n  // Returns GetXSize x GetYSize x GetZSize float input for the NN.\n  const std::vector<float>& GetFeatures() const {\n    return _fullFeatures.empty() ? _features : _fullFeatures;\n  }\n  const std::vector<int64_t>& GetFeatureSize() const {\n    return _outFeatSize.empty() ? _featSize : _outFeatSize;\n  }\n\n  int GetFeatureLength() const {\n    auto featureSize = GetFeatureSize();\n    return featureSize[0] * featureSize[1] * featureSize[2];\n  }\n\n  const std::vector<int64_t>& GetActionSize() const {\n    return _actionSize;\n  }\n\n  void reset() {\n    _moves.clear();\n    _moveRngs.clear();\n    _previousFeatures.clear();\n    _previousFeaturesOffset = 0;\n    _turnFeaturesSingleChannelOffset = 0;\n    _turnFeaturesMultiChannelOffset = 0;\n    _outFeatSize.clear();\n    _fullFeatures.clear();\n    _features.clear();\n    _legalActions.clear();\n    Initialize();\n  }\n\n  void setFeatures(const FeatureOptions* opts) {\n    _featopts = opts;\n  }\n\n  bool stochasticReset() const {\n    return _stochasticReset;\n  }\n\n  virtual bool isStochastic() const {\n    return _stochastic;\n  }\n\n  void copy(const State& src) {\n    _copyImpl(this, &src);\n  }\n\n  const std::type_info& typeId() const {\n    return *_typeId;\n  }\n\n  virtual bool isOnePlayerGame() const {\n    return false;\n  }\n\n  int forcedDice;\n\n protected:\n  void clearActions() {\n    _legalActions.clear();\n  }\n  // Note: x is the channel, y & z are the spartial coordinates\n  void addAction(int x, int y, int z) {\n    _legalActions.emplace_back(_legalActions.size(), x, y, z);\n  }\n\n  bool _stochastic;\n  bool _stochasticReset;\n\n  const std::type_info* _typeId = nullptr;\n  void (*_copyImpl)(State* dst, const State* src) = nullptr;\n\n  std::minstd_rand _rng;\n\n  GameStatus _status;\n  uint64_t _hash;\n\n  std::vector<float> _features;  // neural network input\n  std::vector<_Action> _legalActions;\n  std::vector<int64_t> _featSize;    // size of the neural network input\n  std::vector<int64_t> _actionSize;  // size of the neural network output\n\n  std::vector<mcts::Action> _moves;\n  std::vector<std::pair<std::minstd_rand, int>> _moveRngs;\n\n  const FeatureOptions* _featopts = nullptr;\n\n  // Below the std::vector involved in the generic added features.\n  // size of the neural network input if using _outFeature or _history > 0:\n  std::vector<int64_t> _outFeatSize;\n  std::vector<float> _fullFeatures;      // neural network input, completed\n  std::vector<float> _previousFeatures;  // history of features\n  size_t _previousFeaturesOffset = 0;\n  size_t _turnFeaturesSingleChannelOffset = 0;\n  size_t _turnFeaturesMultiChannelOffset = 0;\n};\n\n}  // namespace core\nusing core::_Action;\nusing core::GameStatus;\n"
  },
  {
    "path": "src/core/test_state.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"game.h\"\n#include \"state.h\"\n#include <iostream>\n\nfloat goodEval(core::State& s) {\n  float numWins = 0;\n  int gameCount = 0;\n  while (gameCount < 100) {\n    s.reset();\n    while (!s.terminated()) {\n      s.DoGoodAction();\n    }\n    numWins += 0.5 * (1 + s.getReward(0));\n    ++gameCount;\n  }\n  float winRate = numWins / float(gameCount);\n  std::cout << \"good win rate = \" << winRate << std::endl;\n  if ((winRate <= 0.01) || (winRate >= 0.99)) {\n    throw std::runtime_error(\n        \"this game has a random win rate beyond acceptable.\");\n  }\n  return true;\n}\n\nfloat randEval(core::State& s) {\n  float numWins = 0;\n  int gameCount = 0;\n  while (gameCount < 100) {\n    s.reset();\n    while (!s.terminated()) {\n      s.DoRandomAction();\n    }\n    numWins += 0.5 * (1 + s.getReward(0));\n    ++gameCount;\n  }\n  float winRate = numWins / float(gameCount);\n  std::cout << \"win rate = \" << winRate << std::endl;\n  if ((winRate <= 0.01) || (winRate >= 0.99)) {\n    throw std::runtime_error(\n        \"this game has a random win rate beyond acceptable.\");\n  }\n  return true;\n}\n\nint doSimpleTest(core::State& s) {\n  // goodEval(s);\n  // Test that everything is fine.\n  // win_frequency = 0 or 1 in purely random play is weird.\n  randEval(s);\n\n  // Now testing if the game looks stochastic.\n  bool isStochastic = false;\n  // We will check this for various lengths of simulations, i is the length.\n  // if isStochastic switches to true (i.e. a non-determinism is already\n  // detected),\n  // then we stop the loop.\n  bool theoreticallyStochastic = s.isStochastic();\n  for (int umax = 8; ((umax < 70) && (!isStochastic)); umax += 1) {\n    s.Initialize();\n    s.setSeed(5678);\n    if (s.isStochastic()) {\n      theoreticallyStochastic = true;\n    }\n    for (int u = 0; u < umax; u++) {\n      if (!s.terminated())\n        s.doIndexedAction(int(umax * 7.123 + u * 1.35));\n      if (s.isStochastic()) {\n        theoreticallyStochastic = true;\n      }\n      // s.stateDescription();\n      // std::cout << \"old:\" << u << \":\" << s.GetFeatures() << std::endl;\n    }\n    // std::cerr << s.stateDescription() << std::endl;\n    auto oldFeatures = s.GetFeatures();\n    // we play another game of length u.\n    s.Initialize();\n    s.setSeed(1234);\n    for (int u = 0; u < umax; u++) {\n      if (!s.terminated())\n        s.doIndexedAction(int(umax * 7.123 + u * 1.35));\n      // std::cout << \"=====\" << std::endl << s.stateDescription() << std::endl;\n      // std::cout << \"new:\" << u << \":\" << s.GetFeatures() << std::endl;\n    }\n    // std::cerr << s.stateDescription() << std::endl;\n    if ((int)s.GetFeatures().size() != s.GetFeatureLength()) {\n      throw std::runtime_error(\"wrong feature length\");\n    }\n    for (int j = 0; ((!isStochastic) && (j < s.GetFeatureLength())); j++) {\n      if (s.GetFeatures()[j] != oldFeatures[j]) {\n        std::cout << \"#horizon\" << umax << \"+feature\" << j << \"/\"\n                  << s.GetFeatureLength() << \"--\" << s.GetFeatures()[j]\n                  << \" vs \" << oldFeatures[j] << std::endl;\n        isStochastic = true;\n      }\n    }\n    // if (isStochastic && (!theoreticallyStochastic)) {\n    //   std::cout << \"original:\" << oldFeatures << std::endl;\n    //   std::cout << \"current: \" << s.GetFeatures() << std::endl;\n    // }\n  }\n  if (isStochastic != theoreticallyStochastic) {\n    std::cout << s.stateDescription() << std::endl;\n    std::cout << \" Theoretically: \" << theoreticallyStochastic << std::endl;\n    std::cout << \" Practically: \" << isStochastic << std::endl;\n    throw std::runtime_error(\"stochasticity violated\");\n  }\n  // s.Initialize();\n  return s.GetFeatureSize()[0];\n}\n\nvoid doTest(core::State& s) {\n  doSimpleTest(s);\n  std::cout << \"testing: fillFullFeatures at the end of ApplyAction and of \"\n               \"Initialize.\"\n            << std::endl;\n  core::FeatureOptions opt;\n  opt.randomFeatures = 3;\n  s.setFeatures(&opt);\n  doSimpleTest(s);\n}\n\n// TODO: there should be better way (using gtest?) than writing a main\n// this is just for demo purpose\n// After compilation, run test_state from build folder to run the test\nint main() {\n  int seed = 999;\n\n  {\n    std::cout << \"testing: tristannogo\" << std::endl;\n    auto state = StateForTristannogo(seed);\n    doTest(state);\n    std::cout << \"test pass: tristannogo\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: BlockGo\" << std::endl;\n    auto state = StateForBlockGo(seed);\n    doTest(state);\n    std::cout << \"test pass: BlockGo\" << std::endl;\n  }\n  {\n#ifdef NO_JAVA\n    std::cout << \"skipping: Ludii Tic-Tac-Toe\" << std::endl;\n#else\n    std::cout << \"testing: Ludii Tic-Tac-Toe\" << std::endl;\n    Ludii::JNIUtils::InitJVM(\"\");  // Use default /ludii/Ludii.jar path\n    JNIEnv* jni_env = Ludii::JNIUtils::GetEnv();\n\n    if (jni_env) {\n      Ludii::LudiiGameWrapper game_wrapper(\"Tic-Tac-Toe.lud\");\n      auto state = std::make_unique<Ludii::LudiiStateWrapper>(\n          seed, std::move(game_wrapper));\n      doTest(*state);\n      Ludii::JNIUtils::CloseJVM();\n      std::cout << \"test pass: Ludii Tic-Tac-Toe\" << std::endl;\n    } else {\n      std::cout << \"skipping: Ludii Tic-Tac-Toe\" << std::endl;\n    }\n#endif\n  }\n\n  {\n    std::cout << \"testing: connect four\" << std::endl;\n    auto state = StateForConnectFour(seed);\n    doTest(state);\n    std::cout << \"test pass: connect four\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: breakthrough\" << std::endl;\n    auto state = StateForBreakthrough(seed);\n    doTest(state);\n    std::cout << \"test pass: breakthrough\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Connect6\" << std::endl;\n    auto state = Connect6::StateForConnect6(seed);\n    doTest(state);\n    std::cout << \"test pass: Connect6\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Tic-tac-toe\" << std::endl;\n    auto state = MNKGame::State<3, 3, 3>(seed);\n    doTest(state);\n    std::cout << \"test pass: Tic-tac-toe\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Free-style gomoku\" << std::endl;\n    auto state = MNKGame::State<15, 15, 5>(seed);\n    doTest(state);\n    std::cout << \"test pass: Free-style gomoku\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Othello\" << std::endl;\n    auto state8 = Othello::State<8>(seed);\n    doTest(state8);\n    std::cout << \"test pass: 8×8 Othello\" << std::endl;\n    auto state10 = Othello::State<10>(seed);\n    doTest(state10);\n    std::cout << \"test pass: 10×10 Othello\" << std::endl;\n    auto state16 = Othello::State<16>(seed);\n    doTest(state16);\n    std::cout << \"test pass: 16×16 Othello\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Game of the Amazons\" << std::endl;\n    auto state = Amazons::State(seed);\n    doTest(state);\n    std::cout << \"test pass: Game of the Amazons\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Chinese Checkers\" << std::endl;\n    auto state = ChineseCheckers::State(seed);\n    doTest(state);\n    std::cout << \"test pass: Chinese Checkers\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Gomoku swap2\" << std::endl;\n    auto state = GomokuSwap2::State(seed);\n    doTest(state);\n    std::cout << \"test pass: Gomoku swap2\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex5pie\" << std::endl;\n    auto state = Hex::State<5, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex5pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex11pie\" << std::endl;\n    auto state = Hex::State<11, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex11pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex13pie\" << std::endl;\n    auto state = Hex::State<13, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex13pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex19pie\" << std::endl;\n    auto state = Hex::State<19, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex19pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex5\" << std::endl;\n    auto state = Hex::State<5, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex5\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex11\" << std::endl;\n    auto state = Hex::State<11, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex11\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex13\" << std::endl;\n    auto state = Hex::State<13, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex13\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: hex19\" << std::endl;\n    auto state = Hex::State<19, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: hex19\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah5pieExt\" << std::endl;\n    auto state = Havannah::State<5, true, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah5pieExt\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah8pieExt\" << std::endl;\n    auto state = Havannah::State<8, true, true>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah8pieExt\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah5pie\" << std::endl;\n    auto state = Havannah::State<5, true, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah5pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah8pie\" << std::endl;\n    auto state = Havannah::State<8, true, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah8pie\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah5\" << std::endl;\n    auto state = Havannah::State<5, false, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah5\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: havannah8\" << std::endl;\n    auto state = Havannah::State<8, false, false>(seed);\n    doTest(state);\n    std::cout << \"test pass: havannah8\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Outer Open Gomoku\" << std::endl;\n    auto state = StateForOOGomoku(seed);\n    doTest(state);\n    std::cout << \"test pass: Outer Open Gomoku\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Mastermind\" << std::endl;\n    auto state = Mastermind::State<10, 7, 2>(seed);\n    doTest(state);\n    std::cout << \"test pass: Mastermind\" << std::endl;\n  }\n  {\n    std::cout << \"testing: Minesweeper beginner\" << std::endl;\n    auto state = Minesweeper::State<8, 8, 10>(seed);\n    doTest(state);\n    std::cout << \"test pass: Minesweeper beginner\" << std::endl;\n  }\n\n  /* win rates for intermediate and expert are too low\n     when taking random actions\n  {\n    std::cout << \"testing: Minesweeper intermediate\" << std::endl;\n    auto state = Minesweeper::State<15, 13, 40>(seed);\n    doTest(state);\n    std::cout << \"test pass: Minesweeper intermediate\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Minesweeper expert\" << std::endl;\n    auto state = Minesweeper::State<30, 16, 99>(seed);\n    doTest(state);\n    std::cout << \"test pass: Minesweeper expert\" << std::endl;\n  }\n  */\n\n  {\n    std::cout << \"testing: Outer Open Gomoku\" << std::endl;\n    auto state = StateForOOGomoku(seed);\n    doTest(state);\n    std::cout << \"test pass: Outer Open Gomoku\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Surakarta\" << std::endl;\n    auto state = StateForSurakarta(seed);\n    doTest(state);\n    std::cout << \"test pass: Surakarta\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Einstein\" << std::endl;\n    auto state = StateForEinstein(seed);\n    doTest(state);\n    std::cout << \"test pass: Einstein\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Minishogi\" << std::endl;\n    auto state = StateForMinishogi(seed);\n    doTest(state);\n    std::cout << \"test pass: Minishogi\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Diceshogi\" << std::endl;\n    auto state = StateForDiceshogi(seed);\n    doTest(state);\n    std::cout << \"test pass: Diceshogi\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: YINSH\" << std::endl;\n    auto state = StateForYinsh(seed);\n    doTest(state);\n    std::cout << \"test pass: YINSH\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: Kyotoshogi\" << std::endl;\n    auto state = StateForKyotoshogi(seed);\n    doTest(state);\n    std::cout << \"test pass: Kyotoshogi\" << std::endl;\n  }\n\n  {\n    std::cout << \"testing: chess\" << std::endl;\n    auto state = chess::State(seed);\n    doTest(state);\n    std::cout << \"test pass: chess\" << std::endl;\n  }\n}\n"
  },
  {
    "path": "src/core/utils.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <torch/torch.h>\n\n#include \"state.h\"\n\nnamespace core {\n\ninline void getFeatureInTensor(const State& state, float* dest) {\n  auto& feat = state.GetFeatures();\n  memcpy(dest, feat.data(), sizeof(float) * feat.size());\n}\n\ninline void getFeatureInTensor(const State& state, torch::Tensor dest) {\n  assert(dest.dtype() == torch::kFloat32);\n  auto& feat = state.GetFeatures();\n  torch::Tensor temp = torch::from_blob(\n      (void*)feat.data(), state.GetFeatureSize(), dest.dtype());\n  if (feat.size() != temp.numel()) {\n    throw std::runtime_error(\"getFeatureInTensor size mismatch\");\n  }\n  dest.copy_(temp);\n}\n\ninline torch::Tensor getFeatureInTensor(const State& state) {\n  torch::Tensor t = torch::zeros(state.GetFeatureSize(), torch::kFloat32);\n  getFeatureInTensor(state, t);\n  return t;\n}\n\ninline void getRawFeatureInTensor(const State& state, torch::Tensor dest) {\n  assert(dest.dtype() == torch::kFloat32);\n  auto& feat = state.GetRawFeatures();\n  torch::Tensor temp = torch::from_blob(\n      (void*)feat.data(), state.GetRawFeatureSize(), dest.dtype());\n  if (feat.size() != temp.numel()) {\n    throw std::runtime_error(\"getRawFeatureInTensor size mismatch\");\n  }\n  dest.copy_(temp);\n}\n\ninline torch::Tensor getRawFeatureInTensor(const State& state) {\n  torch::Tensor t = torch::zeros(state.GetRawFeatureSize(), torch::kFloat32);\n  getRawFeatureInTensor(state, t);\n  return t;\n}\n\ninline void getPolicyMaskInTensor(\n    const State& state, torch::TensorAccessor<float, 3> maskaccessor) {\n  for (const auto& action : state.GetLegalActions()) {\n    maskaccessor[action.GetX()][action.GetY()][action.GetZ()] = 1;\n  }\n}\n\ninline void getPolicyMaskInTensor(const State& state, torch::Tensor& mask) {\n  assert(state.GetActionSize().size() == 3);\n  auto maskaccessor = mask.accessor<float, 3>();\n  getPolicyMaskInTensor(state, maskaccessor);\n}\n\ninline torch::Tensor getPolicyMaskInTensor(const State& state) {\n  torch::Tensor mask = torch::zeros(state.GetActionSize(), torch::kFloat32);\n  getPolicyMaskInTensor(state, mask);\n  return mask;\n}\n\ninline void getPolicyInTensor(const State& state,\n                              const std::vector<float>& pi,\n                              torch::Tensor& dest,\n                              torch::Tensor& mask) {\n  assert(dest.dtype() == torch::kFloat32);\n  assert(state.GetActionSize().size() == 3);\n  auto accessor = dest.accessor<float, 3>();\n  auto maskaccessor = mask.accessor<float, 3>();\n\n  const auto& legalAction = state.GetLegalActions();\n  for (mcts::Action actionIdx = 0; actionIdx != pi.size(); ++actionIdx) {\n    if (actionIdx >= (int)legalAction.size() || actionIdx < 0) {\n      std::cout << \"Wrong action in getPolicyTargetInTensor, \"\n                << \"action idx: \" << actionIdx\n                << \", num legal: \" << legalAction.size() << std::endl;\n      std::terminate();\n      assert(false);\n    }\n    const auto& action = legalAction[actionIdx];\n    float piVal = pi[actionIdx];\n    int x = action.GetX();\n    int y = action.GetY();\n    int z = action.GetZ();\n    accessor[x][y][z] += piVal;\n    maskaccessor[x][y][z] = 1;\n  }\n}\n\ninline std::pair<torch::Tensor, torch::Tensor> getPolicyInTensor(\n    const State& state, const std::vector<float>& pi) {\n  torch::Tensor t = torch::zeros(state.GetActionSize(), torch::kFloat32);\n  torch::Tensor mask = torch::zeros(state.GetActionSize(), torch::kFloat32);\n  getPolicyInTensor(state, pi, t, mask);\n  return std::make_pair(t, mask);\n}\n\ninline void normalize(std::vector<float>& a2pi) {\n  float sumProb = 0.0f;\n  for (auto& p : a2pi) {\n    sumProb += p;\n  }\n\n  if (sumProb > 1.0f + 1e-3f) {\n    throw std::runtime_error(\"sumProb is \" + std::to_string(sumProb));\n  }\n\n  if (sumProb != 0.0f) {\n    for (auto& p : a2pi) {\n      p /= sumProb;\n    }\n  }\n}\n\ninline void getLegalPi(const State& state,\n                       torch::TensorAccessor<float, 3> accessor,\n                       std::vector<float>& out) {\n  const auto& legalActions = state.GetLegalActions();\n  out.resize(legalActions.size());\n  for (size_t i = 0; i != legalActions.size(); ++i) {\n    const auto& action = legalActions[i];\n    float& pi = accessor[action.GetX()][action.GetY()][action.GetZ()];\n    out[i] = std::exchange(pi, -400.0f);\n    // we exchange with -400 (exp(-400) ~ 0) because:\n    //  - two actions A and B can share the same policy output location\n    //  - the NN will then be trained to output the sum of their policy values\n    //  in that location\n    //  - if we give both actions that policy output, there will be a bias in\n    //  the MCTS towards exploring\n    //     A and B, and the sum of all policy values will be > 1\n    //  - instead, we give the sum to one action and 0 to the others, preserving\n    //  the sum of probabilities\n    //  - the MCTS must compensate for this by forcefully visiting both A and B\n    //  whenever A or B is visited\n    //      other algorithms are unlikely to support multiple actions sharing\n    //      the same policy output\n    //  - it would be equivalent to divide pi by the number of actions that\n    //  share this policy value (2 in\n    //      this case), but it would be slower as we don't know beforehand how\n    //      many that is\n    // this will set the source tensor to all -400 (it shouldn't be needed for\n    // anything else)\n  }\n}\n\ninline void getLegalPi(const State& state,\n                       const torch::Tensor& pi,\n                       std::vector<float>& out) {\n  auto accessor = pi.accessor<float, 3>();\n  return getLegalPi(state, accessor, out);\n}\n\ninline int64_t product(const std::vector<int64_t>& nums) {\n  int64_t p = 1;\n  for (auto n : nums) {\n    p *= n;\n  }\n  return p;\n}\n\ntemplate <typename T> inline static void softmax_(T begin, T end) {\n  if (begin == end) {\n    return;\n  }\n  float max = *begin;\n  for (auto i = std::next(begin); i != end; ++i) {\n    max = std::max(max, *i);\n  }\n  float sum = 0.0f;\n  for (auto i = begin; i != end; ++i) {\n    *i = std::exp(*i - max);\n    sum += *i;\n  }\n  for (auto i = begin; i != end; ++i) {\n    *i /= sum;\n  }\n}\n\ninline static void softmax_(std::vector<float>& vec) {\n  softmax_(vec.begin(), vec.end());\n}\n\ntemplate <typename T>\ninline static void softmax_(T begin, T end, float temperature) {\n  if (begin == end) {\n    return;\n  }\n  float itemp = 1.0f / temperature;\n  for (auto i = begin; i != end; ++i) {\n    *i *= itemp;\n  }\n  float max = *begin;\n  for (auto i = std::next(begin); i != end; ++i) {\n    max = std::max(max, *i);\n  }\n  float sum = 0.0f;\n  for (auto i = begin; i != end; ++i) {\n    *i = std::exp(*i - max);\n    sum += *i;\n  }\n  for (auto i = begin; i != end; ++i) {\n    *i /= sum;\n  }\n}\n\ninline static void softmax_(std::vector<float>& vec, float temperature) {\n  softmax_(vec.begin(), vec.end(), temperature);\n}\n\n}  // namespace core\n"
  },
  {
    "path": "src/distributed/distributed.cc",
    "content": "\n#include \"distributed.h\"\n\n#include \"rpc.h\"\n\n#include \"rdma.h\"\n\n#define ZSTD_STATIC_LINKING_ONLY\n#include \"zstd/lib/zstd.h\"\n\n#include <fmt/printf.h>\n#include <torch/torch.h>\n\n#include <cstring>\n#include <functional>\n#include <optional>\n#include <random>\n#include <string>\n#include <type_traits>\n#include <unordered_set>\n\nnamespace distributed {\n\nstruct NetStats {\n  bool hasData = false;\n  double sent = 0.0;\n  double received = 0.0;\n  double rpcCalls = 0.0;\n  double latency = 0.0;\n  std::chrono::steady_clock::time_point lastprint{};\n  std::mutex m;\n};\ninline NetStats netstats;\n\nstruct NetStatsCounter {\n  std::chrono::steady_clock::time_point timestamp{};\n  size_t sent = 0;\n  size_t received = 0;\n  size_t rpcCalls = 0;\n};\n\ntemplate <typename T>\nvoid addnetworkstats(const T& obj, NetStatsCounter& counter) {\n  auto now = std::chrono::steady_clock::now();\n  auto elapsed = now - counter.timestamp;\n  if (elapsed <\n      (netstats.hasData ? std::chrono::seconds(1) : std::chrono::seconds(10))) {\n    return;\n  }\n  std::unique_lock l(netstats.m, std::try_to_lock);\n  if (!l.owns_lock()) {\n    return;\n  }\n  counter.timestamp = now;\n  double t = std::chrono::duration_cast<\n                 std::chrono::duration<double, std::ratio<1, 1>>>(elapsed)\n                 .count();\n  size_t newSent = obj.bytesSent();\n  size_t newReceived = obj.bytesReceived();\n  size_t newCalls = obj.numRpcCalls();\n  double sent = (newSent - std::exchange(counter.sent, newSent)) / t;\n  double recv =\n      (newReceived - std::exchange(counter.received, newReceived)) / t;\n  double calls = (newCalls - std::exchange(counter.rpcCalls, newCalls)) / t;\n\n  double alpha = std::pow(0.99, t);\n  if (!netstats.hasData) {\n    alpha = 0.0;\n    netstats.hasData = true;\n  }\n  netstats.sent = netstats.sent * alpha + sent * (1.0 - alpha);\n  netstats.received = netstats.received * alpha + recv * (1.0 - alpha);\n  netstats.rpcCalls = netstats.rpcCalls * alpha + calls * (1.0 - alpha);\n\n  constexpr bool haslatency = std::is_same_v<T, rpc::Client>;\n  if constexpr (haslatency) {\n    double ll = std::chrono::duration_cast<\n                    std::chrono::duration<double, std::ratio<1, 1000>>>(\n                    obj.lastLatency())\n                    .count();\n    netstats.latency = netstats.latency * alpha + ll * (1.0 - alpha);\n  }\n\n  if (now - netstats.lastprint >= std::chrono::seconds(60)) {\n    netstats.lastprint = now;\n    if (haslatency) {\n      printf(\"Network stats: in: %.02fM/s out: %.02fM/s  RPC calls: %.02f/s \"\n             \"latency: %.02fms\\n\",\n             netstats.received / 1024 / 1024, netstats.sent / 1024 / 1024,\n             netstats.rpcCalls, netstats.latency);\n    } else {\n      printf(\"Network stats: in: %.02fM/s out: %.02fM/s  RPC calls: %.02f/s\\n\",\n             netstats.received / 1024 / 1024, netstats.sent / 1024 / 1024,\n             netstats.rpcCalls);\n    }\n  }\n}\n\ninline rpc::Rpc& getRpc() {\n  static std::unique_ptr<rpc::Rpc> rpc = []() {\n    auto rpc = std::make_unique<rpc::Rpc>();\n    rpc->asyncRun(40);\n    return rpc;\n  }();\n  return *rpc;\n}\n\nstruct RDMAModelInfo {\n  uint32_t checksum;\n  uint32_t key;\n  uintptr_t address;\n  size_t size;\n};\n\nstruct Crc32 {\n  std::array<uint32_t, 256> lut;\n  Crc32() {\n    for (uint32_t i = 0; i != 256; ++i) {\n      uint32_t v = i;\n      for (size_t b = 0; b != 8; ++b) {\n        v = (v >> 1) ^ (v & 1 ? 0xedb88320 : 0);\n      }\n      lut[i] = v;\n    }\n  }\n  uint32_t operator()(const void* ptr, size_t size) {\n    uint32_t r = 0xffffffff;\n    unsigned char* c = (unsigned char*)ptr;\n    unsigned char* end = c + size;\n    while (c != end) {\n      r = (r >> 8) ^ lut[(r ^ *c++) & 0xff];\n    }\n    return r;\n  }\n} crc32;\n\nclass ServerImpl {\n\n  std::shared_ptr<rpc::Server> server;\n\n  std::minstd_rand rng{std::random_device()()};\n\n  float rollChance(std::string_view id) {\n    auto i = models.find(id);\n    if (i == models.end()) {\n      return 0.0f;\n    }\n    float rating = i->second.rating;\n    float max = 0.0f;\n    std::vector<std::pair<float, std::string_view>> sorted;\n    for (auto& [id, m] : models) {\n      sorted.emplace_back(m.rating, id);\n      max = std::max(max, m.rating);\n    }\n    std::sort(sorted.begin(), sorted.end(), std::greater<>());\n    float lo = 1.0f;\n    float ret = 0.0f;\n    for (size_t i = 0; i != sorted.size(); ++i) {\n      auto [r, n] = sorted[i];\n      float x = r - max;\n      float o =\n          x == 0 ? 1.0f : std::min(std::log(1 - (2.0f * 200) / x) / 4, 1.0f);\n      if (r < rating) {\n        ret += (lo - o) / i;\n      }\n      lo = o;\n    }\n    ret += lo / sorted.size();\n    return ret;\n  }\n\n  std::string_view sampleModelId() {\n    if (models.empty() ||\n        std::uniform_real_distribution<double>(0.0, 1.0)(rng) < 0.5) {\n      return \"dev\";\n    }\n    if (std::uniform_real_distribution<double>(0.0, 1.0)(rng) < 0.01) {\n      auto it = models.begin();\n      std::advance(\n          it, std::uniform_int_distribution<size_t>(0, models.size() - 1)(rng));\n      return it->first;\n    }\n\n    float max = 0.0f;\n    for (auto& [id, m] : models) {\n      max = std::max(max, m.rating);\n    }\n    double x = std::uniform_real_distribution<double>(0.0, 1.0)(rng);\n    double target = -(2.0f / (std::exp(x * 4) - 1)) * 200;\n    std::vector<std::string_view> pool;\n    for (auto& [id, m] : models) {\n      double diff = m.rating - max;\n      if (diff >= target) {\n        pool.push_back(id);\n      }\n    }\n    if (!pool.empty()) {\n      return pool.at(\n          std::uniform_int_distribution<size_t>(0, pool.size() - 1)(rng));\n    }\n    return \"dev\";\n  }\n\n  std::chrono::steady_clock::time_point lastRatingPrint =\n      std::chrono::steady_clock::now();\n\n  void addResult(std::string_view id, float ratio, float reward) {\n    if (ratio < 0.9f) {\n      return;\n    }\n    auto i = models.find(id);\n    if (i == models.end()) {\n      return;\n    }\n    auto di = models.find(\"dev\");\n    if (di == models.end()) {\n      return;\n    }\n\n    if (i == di) {\n      return;\n    }\n\n    float rating = i->second.rating;\n    float devrating = di->second.rating;\n\n    auto calc = [&](float reward, float diff) {\n      float k = 6;\n      float scale = 400;\n      float offset = 0.5f;\n      if (reward > 0) {\n        offset = 1.0f;\n      } else if (reward < 0) {\n        offset = 0.0f;\n      }\n      return k * (offset - 1.0 / (1.0 + std::pow(10.0f, diff / scale)));\n    };\n\n    float delta = calc(reward, devrating - rating) * ratio;\n    float delta2 = calc(-reward, rating - devrating) * ratio;\n\n    rating += delta;\n    devrating += delta2;\n\n    i->second.rating = rating;\n    di->second.rating = devrating;\n\n    ++i->second.ngames;\n    ++di->second.ngames;\n\n    i->second.rewardsum += reward;\n    di->second.rewardsum -= reward;\n\n    i->second.avgreward = i->second.rewardsum / i->second.ngames;\n    di->second.avgreward = di->second.rewardsum / di->second.ngames;\n\n    auto now = std::chrono::steady_clock::now();\n    if (now - lastRatingPrint >= std::chrono::seconds(120)) {\n      lastRatingPrint = now;\n      std::vector<std::pair<float, std::string_view>> sorted;\n      for (auto& [id, m] : models) {\n        sorted.emplace_back(m.rating, id);\n\n        m.curgames = m.ngames - m.prevngames;\n        m.curreward = (m.rewardsum - m.prevrewardsum) / m.curgames;\n\n        m.prevngames = m.ngames;\n        m.prevrewardsum = m.rewardsum;\n      }\n      std::sort(sorted.begin(), sorted.end(), std::greater<>());\n      int devrank = 0;\n      float devrating = 0;\n      for (size_t i = 0; i != sorted.size(); ++i) {\n        if (sorted[i].second == \"dev\") {\n          devrank = (int)i + 1;\n          devrating = sorted[i].first;\n          break;\n        }\n      }\n      if (sorted.size() > 20) {\n        sorted.resize(20);\n      }\n      std::string str;\n      int rank = 1;\n      auto stringify = [&](int rank, float rating, std::string_view id) {\n        return fmt::sprintf(\"%d. %g %s (roll chance %f) (total %d games, %f \"\n                            \"avg reward) (diff %d games, %f avg reward)\\n\",\n                            rank, rating, id, rollChance(id), models[id].ngames,\n                            models[id].avgreward, models[id].curgames,\n                            models[id].curreward);\n      };\n      for (auto& [rating, id] : sorted) {\n        str += stringify(rank, rating, id);\n        ++rank;\n      }\n      if (devrank > 20) {\n        str += stringify(devrank, devrating, \"dev\");\n      }\n      fmt::printf(\"Top 20:\\n%s\", str);\n    }\n  }\n\n  std::pair<std::string_view, int> requestModel(bool wantsNewModelId,\n                                                std::string_view modelId) {\n    std::unique_lock l(mut);\n    if (wantsNewModelId) {\n      modelId = sampleModelId();\n    }\n    int version = -1;\n    auto i = models.find(modelId);\n    if (i == models.end()) {\n      modelId = \"dev\";\n      i = models.find(modelId);\n    }\n    if (i != models.end()) {\n      version = i->second.version;\n    } else {\n      version = -1;\n    }\n    addnetworkstats(*server, netstatsCounter);\n    return {modelId, version};\n  }\n\n  std::optional<std::unordered_map<std::string, torch::Tensor>>\n  requestStateDict(std::string_view modelId) {\n    std::unique_lock l(mut);\n    auto i = models.find(modelId);\n    if (i == models.end()) {\n      return {};\n    } else {\n      return i->second.stateDict;\n    }\n  }\n\n  std::optional<std::vector<char>> requestCompressedStateDict(\n      std::string_view modelId) {\n    std::unique_lock l(mut);\n    auto i = models.find(modelId);\n    if (i == models.end()) {\n      return {};\n    } else {\n      if (i->second.compressedStateDict.empty()) {\n        for (int n = 0; n != 500 && i->second.compressing.exchange(true); ++n) {\n          l.unlock();\n          std::this_thread::sleep_for(std::chrono::milliseconds(50));\n          l.lock();\n          i = models.find(modelId);\n          if (i == models.end()) {\n            return {};\n          }\n          if (!i->second.compressedStateDict.empty()) {\n            return i->second.compressedStateDict;\n          }\n        }\n        auto copy = i->second.stateDict;\n        l.unlock();\n        auto start = std::chrono::steady_clock::now();\n        rpc::Serializer s;\n        rpc::Serialize ser(s);\n        ser(copy);\n        auto now = std::chrono::steady_clock::now();\n        double t1 =\n            std::chrono::duration_cast<\n                std::chrono::duration<double, std::ratio<1, 1000>>>(now - start)\n                .count();\n        start = now;\n        size_t oldsize = s.size();\n        s.compress(15);\n        size_t newsize = s.size();\n        s.buf.shrink_to_fit();\n        now = std::chrono::steady_clock::now();\n        double t2 =\n            std::chrono::duration_cast<\n                std::chrono::duration<double, std::ratio<1, 1000>>>(now - start)\n                .count();\n        start = now;\n\n        fmt::printf(\"State dict serialized in %gms, compressed (from %gM to \"\n                    \"%gM) in %gms\\n\",\n                    t1, oldsize / 1024.0 / 1024.0, newsize / 1024.0 / 1024.0,\n                    t2);\n\n        l.lock();\n        i = models.find(modelId);\n        if (i == models.end()) {\n          return {};\n        }\n        i->second.compressedStateDict = std::move(s.buf);\n        i->second.compressing = false;\n      }\n      return i->second.compressedStateDict;\n    }\n  }\n\n  void trainData(const std::unordered_map<std::string, torch::Tensor> data) {\n    onTrainData(std::move(data));\n  }\n\n  void gameResult(\n      std::vector<std::pair<float, std::unordered_map<std::string_view, float>>>\n          result) {\n    std::lock_guard l(mut);\n    for (auto& [reward, models] : result) {\n      for (auto& [id, ratio] : models) {\n        addResult(id, ratio, reward);\n      }\n    }\n  }\n\n  struct rdmaClient {\n    std::chrono::steady_clock::time_point timestamp;\n    std::unique_ptr<rdma::Host> host;\n    rdma::Endpoint localEp;\n    rdma::Endpoint remoteEp;\n  };\n\n  std::unique_ptr<rdma::Context> rdmaContext;\n  std::unique_ptr<rdma::CompletionQueue> rdmaCq;\n  std::list<rdmaClient> rdmaClients;\n  std::mutex rdmaMut;\n\n  rdma::Endpoint rdmaConnect(rdma::Endpoint ep) {\n    auto host = rdmaContext->createHost();\n    auto localEp = host->init(*rdmaCq);\n    host->connect(ep);\n\n    // fmt::printf(\"rdmaConnect, remoteEp %d:%d localEp %d:%d\\n\", ep.lid,\n    // ep.qpnum, localEp.lid, localEp.qpnum);\n\n    std::lock_guard l(rdmaMut);\n    auto now = std::chrono::steady_clock::now();\n    for (auto i = rdmaClients.begin(); i != rdmaClients.end();) {\n      if (now - i->timestamp >= std::chrono::minutes(1)) {\n        // fmt::printf(\"RDMA client %d:%d timed out\\n\", i->remoteEp.lid,\n        // i->remoteEp.qpnum);\n        i = rdmaClients.erase(i);\n      } else {\n        ++i;\n      }\n    }\n    rdmaClients.emplace_back();\n    auto& c = rdmaClients.back();\n    c.host = std::move(host);\n    c.localEp = localEp;\n    c.remoteEp = ep;\n    c.timestamp = now;\n    return localEp;\n  }\n\n  bool rdmaKeepalive(rdma::Endpoint remoteEp) {\n    std::unique_lock rl(rdmaMut);\n    for (auto i = rdmaClients.begin(); i != rdmaClients.end(); ++i) {\n      if (i->remoteEp == remoteEp) {\n        // fmt::printf(\"keepalive: rdma client %d:%d found, timestamp\n        // updated\\n\", remoteEp.lid, remoteEp.qpnum);\n        i->timestamp = std::chrono::steady_clock::now();\n        return true;\n      }\n    }\n    return false;\n  }\n\n  std::optional<RDMAModelInfo> rdmaGetModel(rdma::Endpoint remoteEp,\n                                            std::string_view modelId) {\n    try {\n      std::unique_lock rl(rdmaMut);\n      rdma::Endpoint localEp;\n      for (auto i = rdmaClients.begin(); i != rdmaClients.end(); ++i) {\n        if (i->remoteEp == remoteEp) {\n          i->timestamp = std::chrono::steady_clock::now();\n          localEp = i->localEp;\n        }\n      }\n      rl.unlock();\n      std::unique_lock l(mut);\n      auto i = models.find(modelId);\n      if (i == models.end()) {\n        return {};\n      }\n      for (int n = 0;; ++n) {\n        if (!i->second.rdmaBuffer ||\n            i->second.rdmaBufferVersion != i->second.version) {\n          if (n < 500 && i->second.rdmaSerializing.exchange(true)) {\n            l.unlock();\n            std::this_thread::sleep_for(std::chrono::milliseconds(50));\n            l.lock();\n            i = models.find(modelId);\n            if (i == models.end()) {\n              return {};\n            }\n          } else {\n            auto copy = i->second.stateDict;\n            int version = i->second.version;\n            l.unlock();\n            auto start = std::chrono::steady_clock::now();\n            rpc::Serializer s;\n            rpc::Serialize ser(s);\n            ser((uint32_t)0);\n            ser(copy);\n\n            uint32_t checksum =\n                s.size() > 4 ? crc32(s.data() + 4, s.size() - 4) : 0;\n            std::memcpy((void*)s.data(), &checksum, 4);\n\n            l.lock();\n            i = models.find(modelId);\n            if (i == models.end()) {\n              return {};\n            }\n\n            auto buffer = std::move(i->second.rdmaBuffer);\n            auto storage = std::move(i->second.rdmaBufferStorage);\n            // if (buffer && storage.size() >= s.size()) {\n            if (false) {\n\n              storage.resize(s.size());\n              std::memcpy(storage.data(), s.data(), s.size());\n\n              auto now = std::chrono::steady_clock::now();\n              double t1 =\n                  std::chrono::duration_cast<\n                      std::chrono::duration<double, std::ratio<1, 1000>>>(now -\n                                                                          start)\n                      .count();\n              fmt::printf(\"State dict serialized and RDMA buffer updated in \"\n                          \"%gms, %gM (checksum %#x)\\n\",\n                          t1, storage.size() / 1024.0 / 1024.0, checksum);\n            } else {\n              i->second.rdmaBuffer_01 = std::move(buffer);\n              i->second.rdmaBufferStorage_01 = std::move(storage);\n\n              storage = std::move(s.buf);\n              buffer = rdmaContext->createBuffer(\n                  (void*)storage.data(), storage.size());\n\n              auto now = std::chrono::steady_clock::now();\n              double t1 =\n                  std::chrono::duration_cast<\n                      std::chrono::duration<double, std::ratio<1, 1000>>>(now -\n                                                                          start)\n                      .count();\n              fmt::printf(\"State dict serialized and RDMA buffer created in \"\n                          \"%gms, %gM (checksum %#x)\\n\",\n                          t1, storage.size() / 1024.0 / 1024.0, checksum);\n            }\n\n            i->second.rdmaBuffer = std::move(buffer);\n            i->second.rdmaBufferStorage = std::move(storage);\n            i->second.rdmaSerializing = false;\n            i->second.rdmaBufferVersion = version;\n          }\n        } else {\n          RDMAModelInfo r;\n          r.key = i->second.rdmaBuffer->keyFor(localEp);\n          r.checksum = i->second.rdmaBufferChecksum;\n          r.address = (uintptr_t)i->second.rdmaBufferStorage.data();\n          r.size = i->second.rdmaBufferStorage.size();\n          return r;\n        }\n      }\n    } catch (const std::exception& e) {\n      fmt::printf(\"rdmaGetModel error: %s\\n\", e.what());\n      return {};\n    }\n  }\n\n  struct ModelInfo {\n    std::string id;\n    int version = 0;\n    float rating = 0.0f;\n    std::unordered_map<std::string, torch::Tensor> stateDict;\n    std::vector<char> compressedStateDict;\n    std::atomic<bool> compressing{false};\n    uint64_t ngames = 0;\n    double rewardsum = 0.0;\n    float avgreward = 0.0f;\n\n    uint64_t prevngames = 0;\n    double prevrewardsum = 0.0;\n\n    uint64_t curgames = 0;\n    float curreward = 0.0f;\n\n    double rollChance = 0.0;\n\n    std::atomic<bool> rdmaSerializing{false};\n    std::vector<char> rdmaBufferStorage;\n    std::unique_ptr<rdma::Buffer> rdmaBuffer;\n    int rdmaBufferVersion = -1;\n    uint32_t rdmaBufferChecksum = 0;\n    std::vector<char> rdmaBufferStorage_01;\n    std::unique_ptr<rdma::Buffer> rdmaBuffer_01;\n  };\n\n  std::mutex mut;\n  std::unordered_map<std::string_view, ModelInfo> models;\n\n  std::mutex timemut;\n  std::unordered_map<std::string, float> calltimes;\n  std::chrono::steady_clock::time_point lasttimereport;\n\n public:\n  std::function<void(const std::unordered_map<std::string, torch::Tensor>)>\n      onTrainData;\n  NetStatsCounter netstatsCounter;\n\n  template <typename R, typename... Args>\n  auto define(std::string name, R (ServerImpl::*f)(Args...)) {\n    server->define(\n        name, std::function<R(Args...)>([this, f, name](Args&&... args) {\n          auto begin = std::chrono::steady_clock::now();\n          auto finish = [&]() {\n            auto end = std::chrono::steady_clock::now();\n            double t = std::chrono::duration_cast<\n                           std::chrono::duration<double, std::ratio<1, 1000>>>(\n                           end - begin)\n                           .count();\n            {\n              std::unique_lock l(timemut);\n              auto i = calltimes.find(name);\n              if (i == calltimes.end()) {\n                i = calltimes.emplace(name, t).first;\n              }\n              float& v = i->second;\n              v = v * 0.99 + t * 0.01;\n\n              if (end - lasttimereport >= std::chrono::seconds(60)) {\n                lasttimereport = end;\n                std::string s = \"RPC call times (running average):\\n\";\n                for (auto& v : calltimes) {\n                  s += fmt::sprintf(\"  %s  %fms\\n\", v.first, v.second);\n                }\n                l.unlock();\n                fmt::printf(\"%s\", s);\n              }\n            }\n          };\n          if constexpr (std::is_same_v<R, void>) {\n            (this->*f)(std::forward<Args>(args)...);\n            finish();\n          } else {\n            auto rv = (this->*f)(std::forward<Args>(args)...);\n            finish();\n            return rv;\n          }\n        }));\n  }\n\n  void start(std::string_view endpoint) {\n    if (endpoint.substr(0, 6) == \"tcp://\") {\n      endpoint.remove_prefix(6);\n    }\n    printf(\"actual listen endpoint is %s\\n\", std::string(endpoint).c_str());\n    server = getRpc().listen(\"\");\n\n    define(\"requestModel\", &ServerImpl::requestModel);\n    define(\"requestStateDict\", &ServerImpl::requestStateDict);\n    define(\n        \"requestCompressedStateDict\", &ServerImpl::requestCompressedStateDict);\n    define(\"trainData\", &ServerImpl::trainData);\n    define(\"gameResult\", &ServerImpl::gameResult);\n\n    try {\n      rdmaContext = rdma::create();\n      if (!rdmaContext) {\n        fmt::printf(\"RDMA/IB is not supported\\n\");\n      } else {\n        rdmaCq = rdmaContext->createCQ(4);\n        auto testHost = rdmaContext->createHost();\n\n        define(\"rdmaConnect\", &ServerImpl::rdmaConnect);\n        define(\"rdmaKeepalive\", &ServerImpl::rdmaKeepalive);\n        define(\"rdmaGetModel\", &ServerImpl::rdmaGetModel);\n\n        fmt::printf(\"RDMA over IB supported\\n\");\n      }\n    } catch (const std::exception& e) {\n      fmt::printf(\"RDMA error: %s\\nRDMA/IB will not be used\\n\", e.what());\n    }\n\n    server->listen(endpoint);\n  }\n\n  void updateModel(const std::string& id,\n                   std::unordered_map<std::string, torch::Tensor> stateDict) {\n    std::unique_lock l(mut);\n    auto i = models.try_emplace(id);\n    if (i.second) {\n      i.first->second.version =\n          std::uniform_int_distribution<int>(0, 10000)(rng) * 1000;\n      i.first->second.id = id;\n      (std::string_view&)i.first->first = i.first->second.id;\n      auto idev = models.find(\"dev\");\n      if (idev != models.end()) {\n        i.first->second.rating = idev->second.rating;\n      }\n    }\n    auto& m = i.first->second;\n    m.stateDict = std::move(stateDict);\n    ++m.version;\n    m.compressedStateDict.clear();\n  }\n};\n\nclass ClientImpl {\n\n  std::shared_ptr<rpc::Client> client;\n\n  mutable std::mutex mut;\n  std::unordered_set<std::string> allModelIds;\n  std::string_view currentModelId = *allModelIds.emplace(\"dev\").first;\n  int currentModelVersion = -1;\n  int gamesDoneWithCurrentModel = 0;\n  bool wantsNewModelId = false;\n  bool wantsTournamentResult_ = false;\n\n  std::chrono::steady_clock::time_point lastCheckTournamentResult =\n      std::chrono::steady_clock::now();\n  std::chrono::steady_clock::time_point lastTournamentResult =\n      std::chrono::steady_clock::now();\n\n  std::vector<std::pair<float, std::unordered_map<std::string_view, float>>>\n      resultQueue;\n\n  NetStatsCounter netstatsCounter;\n\n  std::unique_ptr<rdma::Context> rdmaContext;\n  std::unique_ptr<rdma::Host> rdmaHost;\n  std::unique_ptr<rdma::Buffer> rdmaBuffer;\n  size_t rdmaBufferSize = 0;\n  std::vector<char> rdmaBufferStorage;\n  std::optional<rdma::Endpoint> rdmaEndpoint;\n  std::unique_ptr<rdma::CompletionQueue> rdmaCq;\n\n  std::mutex trainDataMut;\n  std::vector<std::future<void>> trainDataFutures;\n\n  struct Bandit {\n    std::mutex mut;\n    std::unordered_map<std::string, float> value;\n    std::minstd_rand rng{std::random_device{}()};\n    float sample(std::string name, float weight = 1.0f) {\n      std::lock_guard l(mut);\n      return std::uniform_real_distribution<float>(\n          0.0f, std::exp(value[name] * 4) * weight)(rng);\n    }\n    float get(std::string name) {\n      std::lock_guard l(mut);\n      return value[name];\n    }\n  };\n\n  struct BanditResultCounter {\n    Bandit& b;\n    std::string name;\n    bool succeeded_ = false;\n    BanditResultCounter(Bandit& b, std::string name)\n        : b(b)\n        , name(std::move(name)) {\n    }\n    void success(bool succeeded = true) {\n      succeeded_ = succeeded;\n    }\n    ~BanditResultCounter() {\n      std::lock_guard l(b.mut);\n      float& v = b.value[name];\n      v = v * 0.95f + (succeeded_ ? 1.0f : -1.0f) * 0.05f;\n    }\n  };\n\n  Bandit bandit;\n\n  bool createRdmaHost() {\n    if (!rdmaContext) {\n      return false;\n    }\n    if (rdmaHost) {\n      return true;\n    }\n    try {\n      rdmaHost = rdmaContext->createHost();\n      return true;\n    } catch (const std::exception& e) {\n      fmt::printf(\"RDMA error: %s\\nRDMA/IB will not be used\\n\", e.what());\n      return false;\n    }\n  }\n\n  void requestModelStateDict(std::string modelId, int modelVersion) {\n    try {\n\n      auto start = std::chrono::steady_clock::now();\n\n      float rdmaValue = bandit.get(\"rdma\");\n      float rpcValue = bandit.get(\"rpc\");\n\n      fmt::printf(\"bandit values: rdma %g rpc %g\\n\", rdmaValue, rpcValue);\n\n      if ((rdmaHost || createRdmaHost()) &&\n          (rdmaValue >= 0.75f || (rdmaValue >= 0.0f && rpcValue < 0.5f) ||\n           bandit.sample(\"rdma\", 4.0f) > bandit.sample(\"rpc\"))) {\n\n        BanditResultCounter bc(bandit, \"rdma\");\n\n        try {\n\n          if (!rdmaEndpoint) {\n\n            rdmaCq = rdmaContext->createCQ(4);\n            rdmaEndpoint = rdmaHost->init(*rdmaCq);\n\n            fmt::printf(\"local endpoint is %d:%d\\n\", rdmaEndpoint->lid,\n                        rdmaEndpoint->qpnum);\n\n            auto remoteEp =\n                client->sync<rdma::Endpoint>(\"rdmaConnect\", *rdmaEndpoint);\n            addnetworkstats(*client, netstatsCounter);\n\n            fmt::printf(\n                \"remote endpoint is %d:%d\\n\", remoteEp.lid, remoteEp.qpnum);\n\n            rdmaHost->connect(remoteEp);\n          }\n\n          auto result = client->async<std::optional<RDMAModelInfo>>(\n              \"rdmaGetModel\", *rdmaEndpoint, modelId);\n          auto mi = result.get();\n\n          if (!mi) {\n            std::lock_guard l(mut);\n            currentModelId = \"dev\";\n            currentModelVersion = -1;\n          } else {\n\n            if (!rdmaBuffer || rdmaBufferSize < mi->size) {\n              rdmaBufferStorage.resize(mi->size);\n              rdmaBuffer =\n                  rdmaContext->createBuffer(rdmaBufferStorage.data(), mi->size);\n            }\n\n            rdmaHost->read(*rdmaBuffer, rdmaBufferStorage.data(), mi->key,\n                           mi->address, mi->size);\n            rdmaHost->wait();\n\n            std::unordered_map<std::string, torch::Tensor> stateDict;\n            rpc::Deserializer d(rdmaBufferStorage.data(), mi->size);\n            rpc::Deserialize des(d);\n            uint32_t checksum = 0;\n            des(checksum);\n            if (mi->size > 4 &&\n                crc32(rdmaBufferStorage.data() + 4, mi->size - 4) == checksum) {\n              fmt::printf(\"RDMA model checksum OK (%#x)\\n\", checksum);\n              des(stateDict);\n              onUpdateModel(modelId, stateDict);\n              std::lock_guard l(mut);\n              if (currentModelId != modelId) {\n                currentModelId = *allModelIds.emplace(modelId).first;\n                gamesDoneWithCurrentModel = 0;\n              }\n              currentModelVersion = modelVersion;\n              fmt::printf(\"Got model '%s' version %d\\n\", modelId, modelVersion);\n              bc.success();\n            } else {\n              fmt::printf(\"RDMA model checksum error\\n\");\n              return;\n            }\n          }\n\n        } catch (const rdma::Error& e) {\n          fmt::printf(\"RDMA error: %s\\n\", e.what());\n\n          rdmaEndpoint.reset();\n          rdmaHost.reset();\n          rdmaCq.reset();\n          return;\n        }\n\n      } else {\n\n        BanditResultCounter bc(bandit, \"rpc\");\n\n        auto result = client->async<std::optional<std::vector<char>>>(\n            \"requestCompressedStateDict\", modelId);\n        auto compressed = result.get();\n        addnetworkstats(*client, netstatsCounter);\n        if (!compressed) {\n          std::unique_lock l(mut);\n          currentModelId = \"dev\";\n          currentModelVersion = -1;\n        } else {\n          std::unordered_map<std::string, torch::Tensor> stateDict;\n          rpc::Deserializer d(compressed->data(), compressed->size());\n          d.decompress();\n          rpc::Deserialize des(d);\n          des(stateDict);\n          onUpdateModel(modelId, stateDict);\n          std::unique_lock l(mut);\n          if (currentModelId != modelId) {\n            currentModelId = *allModelIds.emplace(modelId).first;\n            gamesDoneWithCurrentModel = 0;\n          }\n          currentModelVersion = modelVersion;\n          fmt::printf(\"Got model '%s' version %d\\n\", modelId, modelVersion);\n          bc.success();\n        }\n      }\n\n      double t = std::chrono::duration_cast<\n                     std::chrono::duration<double, std::ratio<1, 1000>>>(\n                     std::chrono::steady_clock::now() - start)\n                     .count();\n      fmt::printf(\"State dict received and updated in %gms\\n\", t);\n\n    } catch (const rpc::RPCException& e) {\n      fmt::printf(\"RPC exception: %s\\n\", e.what());\n    }\n  }\n\n public:\n  std::function<void(\n      std::string_view, std::unordered_map<std::string, torch::Tensor>)>\n      onUpdateModel;\n\n  ClientImpl() {\n    try {\n      rdmaContext = rdma::create();\n      if (!rdmaContext) {\n        fmt::printf(\"RDMA/IB is not supported\\n\");\n      } else {\n        rdmaHost = rdmaContext->createHost();\n\n        fmt::printf(\"Using RDMA over IB for model transfers\\n\");\n      }\n    } catch (const std::exception& e) {\n      fmt::printf(\"RDMA error: %s\\nRDMA/IB will not be used\\n\", e.what());\n    }\n  }\n\n  void requestModel(bool isTournamentOpponent) {\n    try {\n      std::unique_lock l(mut);\n      if (!resultQueue.empty()) {\n        client->async(\"gameResult\", resultQueue);\n        resultQueue.clear();\n      }\n\n      //      fmt::printf(\n      //          \"Request model, isTournamentOpponent %d, wantsNewModelId\n      //          %d\\n\", isTournamentOpponent, wantsNewModelId);\n\n      auto result = client->async<std::pair<std::string, int>>(\n          \"requestModel\",\n          isTournamentOpponent ? std::exchange(wantsNewModelId, false) : false,\n          currentModelId);\n      l.unlock();\n\n      if (rdmaEndpoint) {\n        if (!client->sync<bool>(\"rdmaKeepalive\", *rdmaEndpoint)) {\n          rdmaEndpoint.reset();\n          rdmaHost.reset();\n          rdmaCq.reset();\n        }\n      }\n\n      auto [newId, version] = result.get();\n      addnetworkstats(*client, netstatsCounter);\n\n      // fmt::printf(\"Got model '%s'\\n\", newId);\n\n      l.lock();\n      auto now = std::chrono::steady_clock::now();\n      if (isTournamentOpponent &&\n          now - lastCheckTournamentResult >= std::chrono::minutes(2)) {\n        lastCheckTournamentResult = now;\n        wantsTournamentResult_ =\n            now - lastTournamentResult >= std::chrono::minutes(5);\n        if (!wantsTournamentResult_) {\n          wantsNewModelId = true;\n        }\n        //        fmt::printf(\"wantsTournamentResult_ is %d, wantsNewModelId is\n        //        %d\\n\",\n        //                    wantsTournamentResult_,\n        //                    wantsNewModelId);\n      } else if (!isTournamentOpponent) {\n        wantsTournamentResult_ = false;\n      }\n      if (currentModelId != newId || version != currentModelVersion) {\n        l.unlock();\n        requestModelStateDict(newId, version);\n      } else {\n        l.unlock();\n      }\n    } catch (const rpc::RPCException& e) {\n      fmt::printf(\"RPC exception: %s\\n\", e.what());\n    }\n  }\n\n  void connect(std::string_view endpoint) {\n    if (endpoint.substr(0, 6) == \"tcp://\") {\n      endpoint.remove_prefix(6);\n    }\n    printf(\"actual connect endpoint is %s\\n\", std::string(endpoint).c_str());\n    client = getRpc().connect(endpoint);\n  }\n\n  void sendTrainData(\n      const std::unordered_map<std::string, torch::Tensor>& data) {\n    try {\n      std::unique_lock l(trainDataMut);\n      std::future<void> fut;\n      if (trainDataFutures.size() >= 32) {\n        fut = std::move(trainDataFutures.front());\n        trainDataFutures.erase(trainDataFutures.begin());\n      }\n      l.unlock();\n      if (fut.valid()) {\n        fut.get();\n      }\n    } catch (const rpc::RPCException& e) {\n      fmt::printf(\"RPC exception: %s\\n\", e.what());\n    }\n    try {\n      auto fut = client->async<void>(\"trainData\", data);\n      std::lock_guard l(trainDataMut);\n      trainDataFutures.push_back(std::move(fut));\n    } catch (const rpc::RPCException& e) {\n      fmt::printf(\"RPC exception: %s\\n\", e.what());\n    }\n  }\n\n  void sendResult(float reward,\n                  std::unordered_map<std::string_view, float> models) {\n    std::unique_lock l(mut);\n    auto i = models.find(currentModelId);\n    if (i != models.end()) {\n      if (i->second >= 0.9f) {\n        ++gamesDoneWithCurrentModel;\n        if (gamesDoneWithCurrentModel >= 20) {\n          lastTournamentResult = std::chrono::steady_clock::now();\n          wantsNewModelId = true;\n        }\n      }\n    }\n    resultQueue.emplace_back(reward, std::move(models));\n  }\n\n  bool wantsTournamentResult() const {\n    std::unique_lock l(mut);\n    return wantsTournamentResult_;\n  }\n\n  std::string_view getModelId() const {\n    std::unique_lock l(mut);\n    return currentModelId;\n  }\n};\n\nServer::Server() {\n  impl = std::make_unique<ServerImpl>();\n}\nServer::~Server() {\n}\n\nvoid Server::setOnTrainData(\n    std::function<void(std::unordered_map<std::string, torch::Tensor>)>\n        onTrainData) {\n  impl->onTrainData = std::move(onTrainData);\n}\n\nvoid Server::start(std::string endpoint) {\n  impl->start(endpoint);\n}\n\nvoid Server::updateModel(\n    const std::string& id,\n    std::unordered_map<std::string, torch::Tensor> stateDict) {\n  impl->updateModel(id, std::move(stateDict));\n}\n\nClient::Client() {\n  impl = std::make_unique<ClientImpl>();\n}\n\nClient::~Client() {\n}\n\nvoid Client::setOnUpdateModel(\n    std::function<void(std::string_view,\n                       std::unordered_map<std::string, torch::Tensor>)>\n        onUpdateModel) {\n  impl->onUpdateModel = std::move(onUpdateModel);\n}\n\nvoid Client::connect(std::string endpoint) {\n  impl->connect(endpoint);\n}\n\nvoid Client::requestModel(bool isTournamentOpponent) {\n  impl->requestModel(isTournamentOpponent);\n}\n\nvoid Client::sendTrainData(\n    const std::unordered_map<std::string, torch::Tensor>& data) {\n  impl->sendTrainData(data);\n}\n\nbool Client::wantsTournamentResult() {\n  return impl->wantsTournamentResult();\n}\n\nstd::string_view Client::getModelId() {\n  return impl->getModelId();\n}\n\nvoid Client::sendResult(float reward,\n                        std::unordered_map<std::string_view, float> models) {\n  impl->sendResult(reward, std::move(models));\n}\n\n}  // namespace distributed\n"
  },
  {
    "path": "src/distributed/distributed.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <functional>\n#include <memory>\n#include <string>\n#include <string_view>\n#include <torch/torch.h>\n#include <unordered_map>\n\nnamespace rpc {\n\ntemplate <typename X, typename A, typename B>\nvoid serialize(X& x, const std::pair<A, B>& v) {\n  x(v.first, v.second);\n}\n\ntemplate <typename X, typename A, typename B>\nvoid serialize(X& x, std::pair<A, B>& v) {\n  x(v.first, v.second);\n}\n\ntemplate <typename X, typename T>\nvoid serialize(X& x, const std::optional<T>& v) {\n  x(v.has_value());\n  if (v.has_value()) {\n    x(v.value());\n  }\n}\n\ntemplate <typename X, typename T> void serialize(X& x, std::optional<T>& v) {\n  if (x.template read<bool>()) {\n    v.emplace();\n    x(v.value());\n  } else {\n    v.reset();\n  }\n}\n\ntemplate <typename X, typename T>\nvoid serialize(X& x, const std::vector<T>& v) {\n  x(v.size());\n  for (auto& v2 : v) {\n    x(v2);\n  }\n}\n\ntemplate <typename X, typename T> void serialize(X& x, std::vector<T>& v) {\n  size_t n = x.template read<size_t>();\n  v.resize(n);\n  for (size_t i = 0; i != n; ++i) {\n    x(v[i]);\n  }\n}\n\ntemplate <typename X, typename Key, typename Value>\nvoid serialize(X& x, const std::unordered_map<Key, Value>& v) {\n  x(v.size());\n  for (auto& v2 : v) {\n    x(v2.first, v2.second);\n  }\n}\n\ntemplate <typename X, typename Key, typename Value>\nvoid serialize(X& x, std::unordered_map<Key, Value>& v) {\n  v.clear();\n  size_t n = x.template read<size_t>();\n  for (; n; --n) {\n    auto k = x.template read<Key>();\n    v.emplace(std::move(k), x.template read<Value>());\n  }\n}\n\ntemplate <typename X> void serialize(X& x, const torch::Tensor& v) {\n  if (!v.is_contiguous()) {\n    serialize(x, v.contiguous());\n    return;\n  }\n  x(v.scalar_type(),\n    std::basic_string_view<int64_t>(v.sizes().data(), v.sizes().size()));\n  void* data = v.data_ptr();\n  size_t size = v.numel() * v.dtype().itemsize();\n  x(std::string_view((const char*)data, size));\n}\n\ntemplate <typename X> void serialize(X& x, torch::Tensor& v) {\n  torch::ScalarType dtype;\n  std::basic_string_view<int64_t> sizes;\n  x(dtype, sizes);\n  if (v.defined() && v.scalar_type() == dtype) {\n    v.resize_(torch::IntArrayRef(sizes.begin(), sizes.end()));\n  } else {\n    v = torch::empty(torch::IntArrayRef(sizes.begin(), sizes.end()), dtype);\n  }\n  std::string_view data;\n  x(data);\n  if ((size_t)v.numel() != data.size() / v.dtype().itemsize()) {\n    throw std::runtime_error(\"numel mismatch in tensor deserialize\");\n  }\n  std::memcpy(v.data_ptr(), data.data(), data.size());\n}\n\n}  // namespace rpc\n\nnamespace distributed {\n\nclass ServerImpl;\nclass ClientImpl;\n\nclass Server {\n  std::unique_ptr<ServerImpl> impl;\n\n public:\n  Server();\n  ~Server();\n\n  void setOnTrainData(\n      std::function<\n          void(const std::unordered_map<std::string, torch::Tensor>)>);\n  void start(std::string endpoint);\n  void updateModel(const std::string& id,\n                   std::unordered_map<std::string, torch::Tensor> stateDict);\n};\n\nclass Client {\n  std::unique_ptr<ClientImpl> impl;\n\n public:\n  Client();\n  ~Client();\n\n  void setOnUpdateModel(\n      std::function<void(std::string_view,\n                         std::unordered_map<std::string, torch::Tensor>)>);\n  void connect(std::string endpoint);\n  void requestModel(bool isTournamentOpponent);\n  void sendTrainData(\n      const std::unordered_map<std::string, torch::Tensor>& data);\n  bool wantsTournamentResult();\n  std::string_view getModelId();\n  void sendResult(float reward,\n                  std::unordered_map<std::string_view, float> models);\n};\n\n}  // namespace distributed\n"
  },
  {
    "path": "src/distributed/ib.cc",
    "content": "\n#include \"rdma.h\"\n\n#include <infiniband/verbs.h>\n\n#include <deque>\n#include <list>\n#include <optional>\n#include <random>\n#include <stdexcept>\n#include <string>\n#include <thread>\n#include <vector>\n\nnamespace ib {\n\nstruct Device {\n  std::string name;\n  ibv_device* info;\n};\n\nstruct DeviceList {\n\n  std::vector<Device> list;\n  ibv_device** rawlist = nullptr;\n\n  DeviceList() {\n    int num = 0;\n    rawlist = ibv_get_device_list(&num);\n    for (int i = 0; i < num; ++i) {\n      Device d;\n      d.name = rawlist[i]->name;\n      d.info = rawlist[i];\n      list.push_back(d);\n    }\n  }\n  ~DeviceList() {\n    if (rawlist) {\n      ibv_free_device_list(rawlist);\n    }\n  }\n\n  size_t size() const {\n    return list.size();\n  }\n\n  auto begin() const {\n    return list.begin();\n  }\n  auto end() const {\n    return list.end();\n  }\n\n  bool empty() const {\n    return list.empty();\n  }\n\n  decltype(auto) operator[](size_t index) const {\n    return list[index];\n  }\n};\n\nclass Error : public rdma::Error {\n public:\n  using rdma::Error::Error;\n};\n\nstd::string gidstr(std::array<std::byte, 16> gid) {\n  std::string s;\n  for (auto& v : gid) {\n    s += \"0123456789abcdef\"[unsigned(v) >> 4];\n    s += \"0123456789abcdef\"[unsigned(v) & 0xf];\n  }\n  return s;\n}\n\nstruct Port {\n  ibv_context* context = nullptr;\n  int num = 0;\n  ibv_port_attr attr;\n  uint32_t lid() {\n    return attr.lid;\n  }\n  std::array<std::byte, 16> gid() {\n    std::array<std::byte, 16> gid;\n    static_assert(sizeof(gid) == sizeof(ibv_gid));\n    if (ibv_query_gid(context, num, 0, (ibv_gid*)&gid)) {\n      throw Error(\"ibv_query_gid failed\");\n    }\n    return gid;\n  }\n};\n\nstruct NoMove {\n  NoMove() = default;\n  NoMove(const NoMove&) = delete;\n  NoMove(NoMove&&) = delete;\n  NoMove& operator=(const NoMove&) = delete;\n  NoMove& operator=(NoMove&&) = delete;\n};\n\nstruct Context : NoMove {\n  ibv_context* context = nullptr;\n  ibv_device_attr devattr;\n  std::vector<Port> ports;\n  Context(const Device& dev) {\n    context = ibv_open_device(dev.info);\n    if (!context) {\n      throw Error(\"Failed to open device \" + dev.name);\n    }\n    memset(&devattr, 0, sizeof(devattr));\n    ibv_query_device(context, &devattr);\n\n    for (int i = 1; i <= devattr.phys_port_cnt; ++i) {\n      ports.emplace_back();\n      ports.back().context = context;\n      ports.back().num = i;\n      auto& attr = ports.back().attr;\n      memset(&attr, 0, sizeof(attr));\n      ibv_query_port(context, i, &attr);\n    }\n  }\n  ~Context() {\n    if (context) {\n      ibv_close_device(context);\n      context = nullptr;\n    }\n  }\n};\n\nstruct ProtectionDomain : NoMove {\n  ibv_pd* pd = nullptr;\n  ProtectionDomain(Context& ctx) {\n    pd = ibv_alloc_pd(ctx.context);\n    if (!pd) {\n      throw Error(\"Failed to allocate protection domain\");\n    }\n  }\n  ~ProtectionDomain() {\n    if (pd) {\n      ibv_dealloc_pd(pd);\n      pd = nullptr;\n    }\n  }\n};\n\nstruct MemoryRegion : NoMove {\n  ibv_mr* mr = nullptr;\n  MemoryRegion(const ProtectionDomain& pd,\n               void* address,\n               size_t size,\n               int access) {\n    mr = ibv_reg_mr(pd.pd, address, size, access);\n    if (!mr) {\n      throw Error(\"Failed to register memory region\");\n    }\n  }\n  ~MemoryRegion() {\n    if (mr) {\n      ibv_dereg_mr(mr);\n      mr = nullptr;\n    }\n  }\n  auto lkey() {\n    return mr->lkey;\n  }\n  auto rkey() {\n    return mr->rkey;\n  }\n};\n\nstruct CompletionQueue : NoMove {\n  ibv_cq* cq = nullptr;\n  CompletionQueue(const Context& ctx, int size) {\n    cq = ibv_create_cq(ctx.context, size, nullptr, nullptr, 0);\n    if (!cq) {\n      throw Error(\"Failed to create completion queue\");\n    }\n  }\n  ~CompletionQueue() {\n    if (cq) {\n      int e = ibv_destroy_cq(cq);\n      if (e) {\n        throw std::runtime_error(\"ibv_destroy_cq failed with error \" +\n                                 std::to_string(e));\n      }\n      cq = nullptr;\n    }\n  }\n\n  void wait() {\n    ibv_wc wc;\n    auto start = std::chrono::steady_clock::now();\n    while (true) {\n      std::this_thread::sleep_for(std::chrono::milliseconds(1));\n      if (std::chrono::steady_clock::now() - start >=\n          std::chrono::seconds(10)) {\n        throw Error(\"wait timed out\");\n      }\n      int r = ibv_poll_cq(cq, 1, &wc);\n      if (r > 0) {\n        if (wc.status != IBV_WC_SUCCESS) {\n          throw Error(ibv_wc_status_str(wc.status));\n        }\n        return;\n      } else if (r < 0) {\n        throw Error(\"Failed to poll the completion queue\");\n      }\n    }\n  }\n};\n\nstruct QueuePair : NoMove {\n  ibv_qp* qp = nullptr;\n  QueuePair(const ProtectionDomain& pd, const CompletionQueue& cq) {\n    ibv_qp_init_attr init;\n    memset(&init, 0, sizeof(init));\n    init.send_cq = cq.cq;\n    init.recv_cq = cq.cq;\n    init.qp_type = IBV_QPT_RC;\n    init.cap.max_send_wr = 2;\n    init.cap.max_recv_wr = 2;\n    init.cap.max_send_sge = 1;\n    init.cap.max_recv_sge = 1;\n\n    qp = ibv_create_qp(pd.pd, &init);\n    if (!qp) {\n      throw Error(\"Failed to create queue pair\");\n    }\n  }\n  ~QueuePair() {\n    if (qp) {\n      ibv_destroy_qp(qp);\n    }\n  }\n\n  uint32_t num() {\n    return qp->qp_num;\n  }\n\n  void init(const Port& port, int accessFlags) {\n    ibv_qp_attr attr;\n    memset(&attr, 0, sizeof(attr));\n    attr.qp_state = IBV_QPS_INIT;\n    attr.pkey_index = 0;\n    attr.port_num = port.num;\n    attr.qp_access_flags = accessFlags;\n    int err = ibv_modify_qp(\n        qp, &attr,\n        IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);\n    if (err) {\n      throw Error(\"Failed to move queue pair to init state; error \" +\n                  std::to_string(err));\n    }\n  }\n\n  void rtr(const Port& port,\n           uint16_t remoteLid,\n           uint32_t remoteQPNum,\n           const std::array<std::byte, 16>& gid) {\n    ibv_qp_attr attr;\n    memset(&attr, 0, sizeof(attr));\n    attr.qp_state = IBV_QPS_RTR;\n    attr.path_mtu = port.attr.active_mtu;\n    attr.dest_qp_num = remoteQPNum;\n    attr.rq_psn = 4242;\n    attr.max_dest_rd_atomic = 1;\n    attr.min_rnr_timer = 12;\n    attr.ah_attr.is_global = 0;\n    attr.ah_attr.dlid = remoteLid;\n    attr.ah_attr.sl = 0;\n    attr.ah_attr.src_path_bits = 0;\n    attr.ah_attr.port_num = port.num;\n\n    attr.ah_attr.is_global = 1;\n    attr.ah_attr.grh.hop_limit = 4;\n    attr.ah_attr.grh.dgid = (ibv_gid&)gid;\n    attr.ah_attr.grh.sgid_index = 0;\n    int err = ibv_modify_qp(\n        qp, &attr,\n        IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |\n            IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);\n    if (err) {\n      throw Error(\"Failed to move queue pair to rtr state; error \" +\n                  std::to_string(err));\n    }\n  }\n\n  void rts() {\n    ibv_qp_attr attr;\n    memset(&attr, 0, sizeof(attr));\n    attr.qp_state = IBV_QPS_RTS;\n    attr.sq_psn = 4242;\n    attr.timeout = 17;  // 0.5s\n    attr.retry_cnt = 7;\n    attr.rnr_retry = 7;\n    attr.max_rd_atomic = 1;\n    int err = ibv_modify_qp(qp, &attr,\n                            IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT |\n                                IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN |\n                                IBV_QP_MAX_QP_RD_ATOMIC);\n    if (err) {\n      throw Error(\"Failed to move queue pair to rts state; error \" +\n                  std::to_string(err));\n    }\n  }\n\n  void read(MemoryRegion& dstmr,\n            void* dstbuf,\n            uint32_t rkey,\n            uintptr_t remoteAddress,\n            size_t length) {\n\n    ibv_sge sg;\n    ibv_send_wr wr;\n    ibv_send_wr* bad_wr;\n\n    memset(&sg, 0, sizeof(sg));\n    sg.addr = (uintptr_t)dstbuf;\n    sg.length = length;\n    sg.lkey = dstmr.lkey();\n\n    memset(&wr, 0, sizeof(wr));\n    wr.wr_id = 0;\n    wr.sg_list = &sg;\n    wr.num_sge = 1;\n    wr.opcode = IBV_WR_RDMA_READ;\n    wr.send_flags = IBV_SEND_SIGNALED;\n    wr.wr.rdma.remote_addr = remoteAddress;\n    wr.wr.rdma.rkey = rkey;\n\n    ibv_qp_attr qattr;\n    ibv_qp_init_attr qiattr;\n\n    if (ibv_query_qp(qp, &qattr, IBV_QP_STATE, &qiattr)) {\n      throw Error(\"Failed to query qp\");\n    }\n\n    int err = ibv_post_send(qp, &wr, &bad_wr);\n    if (err) {\n      throw Error(\"RDMA read failed; error \" + std::to_string(err));\n    }\n  }\n};\n\n}  // namespace ib\n\nnamespace rdma {\n\nstruct ibBuffer : Buffer {\n  std::optional<ib::MemoryRegion> mr;\n  virtual ~ibBuffer() override {\n  }\n  virtual uint32_t key() override {\n    return mr->rkey();\n  }\n  virtual uint32_t keyFor(Endpoint ep) override {\n    return mr->rkey();\n  }\n};\n\nstruct ibCompletionQueue : CompletionQueue {\n  ib::CompletionQueue cq;\n  virtual ~ibCompletionQueue() {\n  }\n  ibCompletionQueue(ib::Context& ctx, int size)\n      : cq(ctx, size) {\n  }\n  virtual void wait() override {\n    cq.wait();\n  }\n};\n\nstruct ibMultiBuffer : Buffer {\n  std::deque<ib::MemoryRegion> mrs;\n  std::vector<uint32_t> lids;\n  virtual ~ibMultiBuffer() override {\n  }\n  virtual uint32_t key() override {\n    std::abort();\n  }\n  ib::MemoryRegion& mrFor(Endpoint ep) {\n    for (size_t i = 0; i != lids.size(); ++i) {\n      if (lids[i] == ep.lid) {\n        return mrs.at(i);\n      }\n    }\n    throw Error(\"Endpoint not found for multibuffer\");\n  }\n  virtual uint32_t keyFor(Endpoint ep) override {\n    return mrFor(ep).rkey();\n  }\n};\n\nstruct ibHost : Host {\n  ib::Context* context = nullptr;\n  ib::ProtectionDomain* pd = nullptr;\n  std::optional<ib::Port> port;\n  ib::CompletionQueue* cq = nullptr;\n  std::optional<ib::QueuePair> qp;\n  bool inRts = false;\n  virtual ~ibHost() override {\n  }\n  ibHost(ib::Context* context, ib::ProtectionDomain* pd)\n      : context(context)\n      , pd(pd) {\n    if (context->ports.empty()) {\n      throw ib::Error(\"Infiniband device has no ports!\");\n    }\n    port = context->ports.at(0);\n    // printf(\"Host using %d:%d\\n\", port->lid(), port->num);\n  }\n  virtual Endpoint init(CompletionQueue& cqa) override {\n    cq = &((ibCompletionQueue&)cqa).cq;\n    qp.emplace(*pd, *cq);\n    qp->init(*port, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);\n    inRts = false;\n    // printf(\"QP %d:%d\\n\", port->lid(), qp->num());\n    return {port->lid(), qp->num(), port->gid()};\n  }\n  virtual void connect(Endpoint ep) override {\n    qp->rtr(*port, ep.lid, ep.qpnum, ep.gid);\n    inRts = false;\n  }\n  virtual void read(Buffer& localBuffer,\n                    void* localAddress,\n                    uint32_t remoteKey,\n                    uintptr_t remoteAddress,\n                    size_t size) override {\n    if (!inRts) {\n      qp->rts();\n      inRts = true;\n    }\n    if (auto buf = dynamic_cast<ibBuffer*>(&localBuffer)) {\n      qp->read(*buf->mr, localAddress, remoteKey, remoteAddress, size);\n    } else if (auto buf = dynamic_cast<ibMultiBuffer*>(&localBuffer)) {\n      qp->read(buf->mrFor(Endpoint{port->lid(), qp->num(), port->gid()}),\n               localAddress, remoteKey, remoteAddress, size);\n    }\n  }\n  virtual void wait() override {\n    cq->wait();\n  }\n};\n\nstruct ibContext : Context {\n  ib::Device device;\n  std::optional<ib::Context> context;\n  std::optional<ib::ProtectionDomain> pd;\n  ibContext(ib::Device device)\n      : device(device) {\n    context.emplace(device);\n    pd.emplace(*context);\n  }\n  virtual ~ibContext() override {\n  }\n  virtual std::unique_ptr<Host> createHost() override {\n    return std::make_unique<ibHost>(&*context, &*pd);\n  }\n  virtual std::unique_ptr<Buffer> createBuffer(void* address,\n                                               size_t size) override {\n    auto r = std::make_unique<ibBuffer>();\n    r->mr.emplace(\n        *pd, address, size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);\n    return r;\n  }\n  virtual std::unique_ptr<CompletionQueue> createCQ(int size) override {\n    return std::make_unique<ibCompletionQueue>(*context, size);\n  }\n};\n\nstruct ibMultiCompletionQueue : CompletionQueue {\n  std::vector<std::shared_ptr<ibCompletionQueue>> cqs;\n  virtual ~ibMultiCompletionQueue() override {\n  }\n  virtual void wait() override {\n    std::abort();\n  }\n};\n\nstruct ibMultiHost : ibHost {\n  size_t index;\n  ibMultiHost(size_t index, ib::Context* context, ib::ProtectionDomain* pd)\n      : ibHost(context, pd)\n      , index(index) {\n  }\n  virtual ~ibMultiHost() override {\n  }\n  virtual Endpoint init(CompletionQueue& cqa) override {\n    auto cq = (ibMultiCompletionQueue&)cqa;\n    return ibHost::init(*cq.cqs.at(index));\n  }\n};\n\nstruct ibMultiContext : Context {\n  ib::DeviceList devlist;\n  std::deque<ibContext> contexts;\n  std::minstd_rand rng;\n  ibMultiContext() {\n    if (devlist.empty()) {\n      throw ib::Error(\"No infiniband devices found\");\n    }\n    rng.seed(std::random_device{}());\n    for (auto& v : devlist) {\n      contexts.emplace_back(v);\n    }\n  }\n  virtual ~ibMultiContext() override {\n  }\n  virtual std::unique_ptr<Host> createHost() override {\n    size_t index =\n        std::uniform_int_distribution<size_t>(0, contexts.size() - 1)(rng);\n    auto& ctx = contexts[index];\n    return std::make_unique<ibMultiHost>(index, &*ctx.context, &*ctx.pd);\n  }\n  virtual std::unique_ptr<Buffer> createBuffer(void* address,\n                                               size_t size) override {\n    auto r = std::make_unique<ibMultiBuffer>();\n    for (auto& ctx : contexts) {\n      r->mrs.emplace_back(*ctx.pd, address, size,\n                          IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);\n      r->lids.push_back(ctx.context->ports.at(0).lid());\n    }\n    return r;\n  }\n  virtual std::unique_ptr<CompletionQueue> createCQ(int size) override {\n    auto r = std::make_unique<ibMultiCompletionQueue>();\n    for (auto& ctx : contexts) {\n      r->cqs.push_back(std::make_unique<ibCompletionQueue>(*ctx.context, size));\n    }\n    return r;\n  }\n};\n\nstd::unique_ptr<Context> create() {\n  return std::make_unique<ibMultiContext>();\n}\n\n}  // namespace rdma\n"
  },
  {
    "path": "src/distributed/network.cc",
    "content": "\n#include \"network.h\"\n\n#include \"asio.hpp\"\n\n#include <deque>\n#include <list>\n#include <vector>\n\nnamespace network {\n\ntemplate <typename T> struct Handle {\n  T* obj = nullptr;\n  Handle() {\n  }\n  Handle(T& obj)\n      : obj(&obj) {\n    ++obj.refcount;\n  }\n  Handle(std::nullptr_t) {\n  }\n  Handle(Handle&& n) {\n    obj = std::exchange(n.obj, nullptr);\n  }\n  Handle(const Handle& n) {\n    acquire(n.obj);\n  }\n  Handle& operator=(Handle&& n) {\n    std::swap(obj, n.obj);\n    return *this;\n  }\n  Handle& operator=(const Handle& n) {\n    acquire(n.obj);\n    return *this;\n  }\n  void acquire(T* newobj) {\n    release();\n    obj = newobj;\n    if (obj)\n      ++obj->refcount;\n  }\n  void release() {\n    if (obj) {\n      if (--obj->refcount == 0) {\n        obj->owner->free(obj);\n      }\n      obj = nullptr;\n    }\n  }\n  ~Handle() {\n    release();\n  }\n  T& operator*() const {\n    return *obj;\n  }\n  T* operator->() const {\n    return obj;\n  }\n  explicit operator bool() const {\n    return obj;\n  }\n};\n\ntemplate <typename T> struct Ref {\n  std::atomic_int refcount = 0;\n  Handle<T> ref() {\n    return *(T*)this;\n  }\n};\n\ntemplate <typename T> struct Cache {\n  struct entry {\n    entry* freenext = nullptr;\n    Cache* owner = nullptr;\n    std::aligned_storage_t<sizeof(T), alignof(T)> buf;\n    entry* storagenext = nullptr;\n  };\n  std::atomic<entry*> storagelist;\n  std::atomic<entry*> freelist;\n  static entry* get(T* ptr) {\n    uintptr_t v = (uintptr_t)(void*)ptr;\n    v -= offsetof(entry, buf);\n    return (entry*)v;\n  }\n  template <typename... A> T* allocate(A&&... args) {\n    T* r;\n    entry* e = freelist;\n    while (e && !freelist.compare_exchange_weak(e, e->freenext))\n      ;\n    if (!e) {\n      e = new entry();\n      entry* s = storagelist;\n      do {\n        e->storagenext = s;\n      } while (!storagelist.compare_exchange_weak(s, e));\n    }\n    r = (T*)&e->buf;\n    new (r) T(std::forward<A>(args)...);\n    e->owner = this;\n    return r;\n  }\n  void free(T* obj) {\n    entry* e = get(obj);\n    if (e->owner != this) {\n      std::terminate();\n    }\n    e->owner = nullptr;\n    obj->~T();\n    entry* f = freelist;\n    do {\n      e->freenext = f;\n    } while (!freelist.compare_exchange_weak(f, e));\n  }\n\n  ~Cache() {\n    for (entry* e = storagelist; e; e = e->storagenext) {\n      if (e->owner) {\n        ((T&)e->buf).~T();\n      }\n    }\n  }\n\n  template <typename... A> Handle<T> make(A&&... args) {\n    return (allocate(this, std::forward<A>(args)...))->ref();\n  }\n};\n\ntemplate <typename T> struct Wrapper : Ref<Wrapper<T>> {\n  Cache<Wrapper<T>>* owner = nullptr;\n  T obj;\n  template <typename... A>\n  Wrapper(Cache<Wrapper<T>>* owner, A&&... args)\n      : owner(owner)\n      , obj(std::forward<A>(args)...) {\n  }\n  T* operator->() {\n    return &obj;\n  }\n  T& operator*() {\n    return obj;\n  }\n};\n\ntemplate <size_t maxsize> struct Buffer : Ref<Buffer<maxsize>> {\n  Cache<Buffer>* owner = nullptr;\n  std::array<char, maxsize> buf;\n  size_t begin = 0;\n  size_t end = 0;\n  Buffer(Cache<Buffer>* owner)\n      : owner(owner) {\n  }\n  size_t space() {\n    return buf.size() - end;\n  }\n  size_t append(const void* data, size_t n) {\n    n = std::min(n, space());\n    std::memcpy(buf.data() + end, data, n);\n    end += n;\n    return n;\n  }\n  void free(size_t n) {\n    begin += n;\n  }\n  const void* data() const {\n    return buf.data() + begin;\n  }\n  size_t size() const {\n    return end - begin;\n  }\n  bool empty() const {\n    return size() == 0;\n  }\n};\n\nstd::pair<std::string_view, int> decodeEndpoint(std::string_view endpoint) {\n  std::string_view hostname = endpoint;\n  int port = 0;\n  auto bpos = endpoint.find('[');\n  if (bpos != std::string_view::npos) {\n    auto bepos = endpoint.find(']', bpos);\n    if (bepos != std::string_view::npos) {\n      hostname = endpoint.substr(bpos + 1, bepos - (bpos + 1));\n      endpoint = endpoint.substr(bepos + 1);\n    }\n  }\n  auto cpos = endpoint.find(':');\n  if (cpos != std::string_view::npos) {\n    if (hostname == endpoint)\n      hostname = endpoint.substr(0, cpos);\n    ++cpos;\n    while (cpos != endpoint.size()) {\n      char c = endpoint[cpos];\n      if (c < '0' || c > '9')\n        break;\n      port *= 10;\n      port += c - '0';\n      ++cpos;\n    }\n  }\n  return {hostname, port};\n}\n\nclass PeerImpl : public Ref<PeerImpl> {\n public:\n  Cache<PeerImpl>* owner = nullptr;\n  asio::io_context& context;\n  Cache<Wrapper<asio::ip::tcp::resolver>> resolverCache;\n  Cache<Wrapper<asio::steady_timer>> timerCache;\n  Cache<Wrapper<asio::ip::tcp::socket>> socketCache;\n  Cache<Buffer<0x10000>> bufferCache;\n  asio::ip::tcp::socket socket;\n  std::string connectEndpoint;\n  std::string remoteHost;\n  int remotePort = 0;\n  bool connected = false;\n  bool closed = false;\n  std::vector<char> readBuffer;\n  Handle<Buffer<0x10000>> writeBuffer;\n  std::vector<Handle<Buffer<0x10000>>> writeBufferQueue;\n  std::mutex mutex;\n  PeerImpl(Cache<PeerImpl>* owner, asio::io_context& context)\n      : owner(owner)\n      , context(context)\n      , socket(context) {\n  }\n  void connect(std::string_view endpoint) {\n    auto l = lock();\n    if (connected)\n      return;\n    if (endpoint.empty())\n      return;\n    if (connectEndpoint != endpoint)\n      connectEndpoint = endpoint;\n    auto [hostname, port] = decodeEndpoint(endpoint);\n\n    remoteHost = hostname;\n    remotePort = port;\n\n    asio::error_code ec;\n    asio::ip::address address = asio::ip::make_address(hostname, ec);\n    if (ec) {\n      auto resolver = resolverCache.make(context);\n      (*resolver)->async_resolve(\n          remoteHost, \"\",\n          [this, str = std::string(remoteHost), peer = ref(), resolver,\n           port = port](const asio::error_code& ec,\n                        asio::ip::tcp::resolver::results_type results) mutable {\n            if (!ec) {\n              int n = 0;\n              for (auto ep : results) {\n                auto timer = timerCache.make(context);\n                (*timer)->expires_from_now(std::chrono::seconds(n));\n                (*timer)->async_wait(\n                    [ep, timer, peer, port](const asio::error_code& ec) {\n                      if (!ec) {\n                        peer->connect(\n                            {ep.endpoint().address(), (unsigned short)port});\n                      }\n                    });\n                ++n;\n              }\n            } else {\n              printf(\"resolve(%s): %s\\n\", str.c_str(), ec.message().c_str());\n            }\n          });\n    } else {\n      connect({address, (unsigned short)port});\n    }\n\n    auto timer = timerCache.make(context);\n    (*timer)->expires_from_now(std::chrono::seconds(30));\n    (*timer)->async_wait(\n        [this, timer, peer = ref()](const asio::error_code& ec) {\n          if (!ec) {\n            connect(connectEndpoint);\n          }\n        });\n  }\n\n  void asyncRead(size_t offset = 0) {\n    socket.async_receive(\n        asio::buffer(readBuffer.data() + offset, readBuffer.size() - offset),\n        [this, peer = ref()](auto&&... args) mutable {\n          onReceive(std::forward<decltype(args)>(args)...);\n        });\n  }\n\n  void setConnected(asio::ip::tcp::socket sock) {\n    if (connected)\n      return;\n    if (closed) {\n      sock.close();\n      return;\n    }\n    connected = true;\n    socket = std::move(sock);\n\n    if (onReceiveCallback || onMessageCallback)\n      asyncRead();\n    flush();\n  }\n\n  void connect(asio::ip::tcp::endpoint ep) {\n    if (connected || closed)\n      return;\n\n    auto h = socketCache.make(context);\n\n    (*h)->async_connect(\n        ep, [this, ep, h, peer = ref()](const asio::error_code& ec) {\n          auto l = lock();\n          if (!ec && !connected) {\n            setConnected(std::move(**h));\n          } else if (ec) {\n            printf(\"connect(%s:%d): %s\\n\", ep.address().to_string().c_str(),\n                   (int)ep.port(), ec.message().c_str());\n          }\n        });\n  }\n\n  void failure() {\n    auto l = lock();\n    if (!connected)\n      return;\n    connected = false;\n    socket.close();\n    writeBuffer = nullptr;\n    CallbackCounter cc(activeCallbacks);\n    l.unlock();\n    if (onConnectionClosed) {\n      onConnectionClosed();\n    }\n    auto timer = timerCache.make(context);\n    (*timer)->expires_from_now(std::chrono::seconds(5));\n    (*timer)->async_wait(\n        [this, timer, peer = ref()](const asio::error_code& ec) {\n          if (!ec) {\n            connect(connectEndpoint);\n          }\n        });\n  }\n\n  std::atomic<bool> sending = false;\n\n  std::vector<char> sendBuffer;\n\n  void callSend(std::vector<char> buffer, size_t offset) {\n    auto ab = asio::buffer(buffer.data() + offset, buffer.size() - offset);\n    socket.async_send(\n        ab, [this, peer = ref(), buffer = std::move(buffer), offset](\n                const asio::error_code& ec, size_t n) mutable {\n          if (ec) {\n            sending = false;\n            failure();\n          } else {\n            size_t remaining = buffer.size() - offset - n;\n            if (remaining) {\n              auto l = lock();\n              callSend(std::move(buffer), offset + n);\n            } else {\n              sending = false;\n              auto l = lock();\n              flush();\n            }\n          }\n        });\n  }\n\n  void flush() {\n    if (!connected)\n      return;\n    if (!sendBuffer.empty()) {\n      if (sending.exchange(true))\n        return;\n      callSend(std::move(sendBuffer), 0);\n      sendBuffer.clear();\n    }\n  }\n\n  void sendNoFlush(const void* data, size_t n) {\n    size_t offset = sendBuffer.size();\n    if (offset + n > sendBuffer.capacity()) {\n      sendBuffer.reserve(std::max(offset + n, offset * 2));\n    }\n    sendBuffer.resize(offset + n);\n    std::memcpy(sendBuffer.data() + offset, data, n);\n  }\n\n  //  std::vector<asio::const_buffer> sendBuffers;\n  //\n  //  void flush() {\n  //    if (!connected) return;\n  //    if (sending) return;\n  //    sendBuffers.clear();\n  //    if (!writeBufferQueue.empty()) {\n  //      for (auto& v : writeBufferQueue) {\n  //        sendBuffers.push_back(asio::buffer(v->data(), v->size()));\n  //      }\n  //      writeBufferQueue.clear();\n  //    }\n  //    size_t freeN = 0;\n  //    if (writeBuffer && !writeBuffer->empty()) {\n  //      sendBuffers.push_back(asio::buffer(writeBuffer->data(),\n  //      writeBuffer->size())); freeN = writeBuffer->size();\n  //    }\n  //    if (!sendBuffers.empty()) {\n  //      sending = true;\n  //      socket.async_send(sendBuffers, [this, freeSrc = writeBuffer->ref(),\n  //      freeN, peer = ref()](const asio::error_code& ec, size_t n) {\n  //        sending = false;\n  //        freeSrc->free(freeN);\n  //        if (ec) {\n  //          failure();\n  //        } else {\n  //          flush();\n  //        }\n  //      });\n  //    }\n  //  }\n\n  //  void sendNoFlush(const void* data, size_t n) {\n  //    if (!writeBuffer || writeBuffer->space() == 0) {\n  //      writeBuffer = bufferCache.make();\n  //    }\n  //    size_t s = writeBuffer->append(data, n);\n  //    while (writeBuffer->space() == 0) {\n  //      writeBufferQueue.push_back(writeBuffer);\n  //      writeBuffer = bufferCache.make();\n  //      data = (const char*)data + s;\n  //      n -= s;\n  //      s = writeBuffer->append(data, n);\n  //    }\n  //  }\n\n  void send(const void* data, size_t n) {\n    auto l = lock();\n    sendNoFlush(data, n);\n    if (!sending) {\n      flush();\n    }\n  }\n\n  void sendMessage(const void* data, size_t n) {\n    auto l = lock();\n    uint32_t len = n;\n    sendNoFlush(&len, sizeof(len));\n    sendNoFlush(data, n);\n    flush();\n  }\n\n  struct CallbackCounter {\n    std::atomic_int& c;\n    CallbackCounter(std::atomic_int& c)\n        : c(c) {\n      ++c;\n    }\n    ~CallbackCounter() {\n      --c;\n    }\n  };\n  std::atomic_int activeCallbacks;\n\n  std::function<void()> onConnectionClosed;\n  std::function<void(const void*, size_t)> onReceiveCallback;\n  std::function<void(const void*, size_t)> onMessageCallback;\n\n  void setOnReceive(std::function<void(const void*, size_t)> callback,\n                    size_t bufferSize = 0x10000) {\n    auto l = lock();\n    if (!onReceiveCallback && connected) {\n      asyncRead();\n    }\n    readBuffer.resize(bufferSize);\n    onReceiveCallback = std::move(callback);\n  }\n\n  int messageState = -1;\n  size_t messageReceived = 0;\n  size_t messageLength = 0;\n\n  void setOnMessage(std::function<void(const void*, size_t)> callback,\n                    size_t bufferSize = 0x10000) {\n    auto l = lock();\n    if (!onMessageCallback && callback) {\n      if (connected) {\n        asyncRead();\n      }\n      readBuffer.resize(bufferSize);\n      messageState = 0;\n    }\n    if (!callback) {\n      messageState = -1;\n    }\n    onMessageCallback = std::move(callback);\n  }\n\n  void onReceive(const asio::error_code& ec, size_t n) {\n    auto l = lock();\n    if (closed) {\n      return;\n    }\n    CallbackCounter cc(activeCallbacks);\n    if (!ec) {\n      while (true) {\n        if (messageState == 0) {\n          messageReceived += n;\n          n = 0;\n          if (messageReceived >= 4) {\n            messageLength = *(uint32_t*)readBuffer.data();\n            readBuffer.resize(std::max(readBuffer.size(), 4 + messageLength));\n            messageState = 1;\n            continue;\n          } else {\n            asyncRead(messageReceived);\n          }\n        } else if (messageState == 1) {\n          messageReceived += n;\n          n = 0;\n          if (messageReceived >= 4 + messageLength) {\n            std::vector<char> tmp(\n                readBuffer.data() + 4, readBuffer.data() + 4 + messageLength);\n            if (messageReceived == 4 + messageLength) {\n              messageState = 0;\n              messageReceived = 0;\n              asyncRead(messageReceived);\n              l.unlock();\n            } else {\n              std::memmove(readBuffer.data(),\n                           readBuffer.data() + 4 + messageLength,\n                           messageReceived - (4 + messageLength));\n              messageReceived -= 4 + messageLength;\n              messageState = 0;\n              l.unlock();\n              context.post([this]() { onReceive({}, 0); });\n            }\n            onMessageCallback(tmp.data(), tmp.size());\n            break;\n          } else {\n            if (readBuffer.size() - messageReceived == 0) {\n              readBuffer.resize(readBuffer.size() * 2);\n            }\n          }\n          asyncRead(messageReceived);\n        } else if (onReceiveCallback) {\n          asyncRead();\n          if (n) {\n            l.unlock();\n            onReceiveCallback(readBuffer.data(), n);\n          }\n        }\n        break;\n      }\n    } else {\n      l.unlock();\n      failure();\n    }\n  }\n\n  std::unique_lock<std::mutex> lock() {\n    return std::unique_lock(mutex);\n  }\n\n  void close() {\n    auto l = lock();\n    if (connected) {\n      connected = false;\n      socket.close();\n      writeBuffer = nullptr;\n    }\n\n    messageState = -1;\n    closed = true;\n\n    l.unlock();\n    while (activeCallbacks) {\n      std::this_thread::yield();\n    }\n  }\n\n  void post_close() {\n    context.post([peer = ref()] { peer->close(); });\n  }\n\n  void setOnConnectionClosed(std::function<void()> callback) {\n    onConnectionClosed = std::move(callback);\n  }\n};\n\nclass ServerImpl : public Ref<ServerImpl> {\n public:\n  Cache<ServerImpl>* owner = nullptr;\n  asio::io_context& context;\n  Cache<Wrapper<asio::ip::tcp::resolver>> resolverCache;\n  Cache<Wrapper<asio::steady_timer>> timerCache;\n  Cache<Wrapper<asio::ip::tcp::acceptor>> acceptorCache;\n  Cache<Wrapper<asio::ip::tcp::socket>> socketCache;\n  Cache<PeerImpl> peerCache;\n  std::string listenEndpoint;\n  std::function<void(Handle<PeerImpl> peer)> onPeer;\n  bool bound = false;\n  std::vector<Handle<Wrapper<asio::ip::tcp::socket>>> sockets;\n  std::mutex mutex;\n  ServerImpl(Cache<ServerImpl>* owner, asio::io_context& context)\n      : owner(owner)\n      , context(context) {\n  }\n  ~ServerImpl() {\n    close();\n  }\n\n  void asyncAccept(Handle<Wrapper<asio::ip::tcp::acceptor>> h,\n                   Handle<Wrapper<asio::ip::tcp::socket>> socket) {\n    (*h)->async_accept(**socket, [this, server = ref(), h,\n                                  socket](const asio::error_code& ec) {\n      if (!ec) {\n        if (onPeer) {\n          auto peer = peerCache.make(context);\n          peer->setConnected(std::move(**socket));\n          onPeer(peer);\n        }\n      }\n      asyncAccept(h, socket);\n    });\n  }\n\n  void bind(asio::ip::tcp::endpoint ep) {\n    auto retry = [&](asio::error_code ec) {\n      printf(\"bind(%s:%d): %s\\n\", ep.address().to_string().c_str(),\n             (int)ep.port(), ec.message().c_str());\n      auto timer = timerCache.make(context);\n      (*timer)->expires_from_now(std::chrono::seconds(30));\n      (*timer)->async_wait(\n          [this, ep, timer, server = ref()](const asio::error_code& ec) {\n            if (!ec) {\n              bind(ep);\n            }\n          });\n    };\n\n    auto h = acceptorCache.make(context);\n\n    asio::error_code ec;\n    (*h)->open(ep.protocol());\n    (*h)->set_option(asio::socket_base::reuse_address(true));\n    (*h)->bind(ep, ec);\n    if (ec)\n      return retry(ec);\n    (*h)->listen(asio::socket_base::max_connections, ec);\n    if (ec)\n      return retry(ec);\n\n    auto socket = socketCache.make(context);\n\n    asyncAccept(h, socket);\n\n    auto l = lock();\n    sockets.push_back(socket);\n  }\n\n  void bind(std::string_view endpoint) {\n    if (endpoint.empty()) {\n      return;\n    }\n    bound = true;\n    auto [hostname, port] = decodeEndpoint(endpoint);\n\n    asio::error_code ec;\n    asio::ip::address address;\n    if (hostname != \"*\") {\n      address = asio::ip::make_address(hostname, ec);\n    }\n    if (ec) {\n      auto resolver = resolverCache.make(context);\n      (*resolver)->async_resolve(\n          hostname, \"\",\n          [this, peer = ref(), resolver, port = port](\n              const asio::error_code& ec,\n              asio::ip::tcp::resolver::results_type results) mutable {\n            if (!ec) {\n              for (auto ep : results) {\n                bind({ep.endpoint().address(), (unsigned short)port});\n              }\n            } else {\n              auto timer = timerCache.make(context);\n              (*timer)->expires_from_now(std::chrono::seconds(30));\n              (*timer)->async_wait(\n                  [this, timer, peer = ref()](const asio::error_code& ec) {\n                    if (!ec) {\n                      auto l = lock();\n                      bind(listenEndpoint);\n                    }\n                  });\n            }\n          });\n    } else {\n      context.post([this, address, port = port, server = ref()] {\n        bind({address, (unsigned short)port});\n      });\n    }\n  }\n\n  void listen(std::string_view endpoint) {\n    auto l = lock();\n    listenEndpoint = endpoint;\n\n    if (onPeer)\n      bind(endpoint);\n  }\n\n  void setOnPeer(std::function<void(Handle<PeerImpl>)> callback) {\n    auto l = lock();\n    onPeer = callback;\n    if (!bound)\n      bind(listenEndpoint);\n  }\n\n  std::unique_lock<std::mutex> lock() {\n    return std::unique_lock(mutex);\n  }\n\n  void close() {\n    auto l = lock();\n    onPeer = nullptr;\n    for (auto& v : sockets) {\n      (*v)->close();\n    }\n    sockets.clear();\n  }\n};\n\nclass NetworkImpl {\n public:\n  Cache<PeerImpl> peerCache;\n  Cache<ServerImpl> serverCache;\n  asio::io_context context;\n  asio::executor_work_guard<asio::io_context::executor_type> work{\n      context.get_executor()};\n\n  ~NetworkImpl() {\n    work.reset();\n  }\n\n  Handle<PeerImpl> connect(std::string_view endpoint) {\n    auto h = peerCache.make(context);\n    h->connect(endpoint);\n    return h;\n  }\n\n  Handle<ServerImpl> listen(std::string_view endpoint) {\n    auto h = serverCache.make(context);\n    h->listen(endpoint);\n    return h;\n  }\n\n  template <typename T>\n  static std::unique_ptr<T, std::function<void(T*)>> wrap(Handle<T> h) {\n    auto* ptr = &*h;\n    return std::unique_ptr<T, std::function<void(T*)>>(\n        ptr, [h = std::move(h)](T* ptr) mutable { h = nullptr; });\n  }\n\n  bool run_one() {\n    return context.run_one() != 0;\n  }\n\n  void post(std::function<void()> f) {\n    asio::post(std::move(f));\n  }\n};\n\nPeer::Peer(std::unique_ptr<PeerImpl, std::function<void(PeerImpl*)>> impl)\n    : impl(std::move(impl)) {\n}\n\nPeer::~Peer() {\n}\n\nvoid Peer::send(const void* data, size_t n) {\n  return impl->send(data, n);\n}\n\nvoid Peer::send(std::string_view buf) {\n  return send(buf.data(), buf.size());\n}\n\nvoid Peer::setOnReceive(std::function<void(const void*, size_t)> callback) {\n  return impl->setOnReceive(std::move(callback));\n}\n\nvoid Peer::setOnReceive(std::function<void(std::string_view)> callback) {\n  setOnReceive([callback = std::move(callback)](const void* data, size_t n) {\n    return callback(std::string_view((const char*)data, n));\n  });\n}\n\nvoid Peer::sendMessage(const void* data, size_t n) {\n  return impl->sendMessage(data, n);\n}\n\nvoid Peer::sendMessage(std::string_view buf) {\n  return sendMessage(buf.data(), buf.size());\n}\n\nvoid Peer::setOnMessage(std::function<void(const void*, size_t)> callback) {\n  return impl->setOnMessage(std::move(callback));\n}\n\nvoid Peer::setOnMessage(std::function<void(std::string_view)> callback) {\n  if (!callback)\n    setOnMessage(nullptr);\n  else\n    setOnMessage([callback = std::move(callback)](const void* data, size_t n) {\n      return callback(std::string_view((const char*)data, n));\n    });\n}\n\nvoid Peer::setOnMessage(std::nullptr_t) {\n  impl->setOnMessage(nullptr);\n}\n\nvoid Peer::setOnConnectionClosed(std::function<void()> callback) {\n  impl->setOnConnectionClosed(std::move(callback));\n}\n\nbool Peer::connected() const {\n  return impl->connected;\n}\n\nvoid Peer::close() {\n  impl->close();\n}\n\nvoid Peer::post_close() {\n  impl->post_close();\n}\n\nstd::unique_lock<std::mutex> Peer::lock() {\n  return impl->lock();\n}\n\nServer::Server(\n    std::unique_ptr<ServerImpl, std::function<void(ServerImpl*)>> impl)\n    : impl(std::move(impl)) {\n}\n\nServer::~Server() {\n}\n\nvoid Server::setOnPeer(std::function<void(Peer)> callback) {\n  impl->setOnPeer([callback = std::move(callback)](Handle<PeerImpl> peer) {\n    callback(NetworkImpl::wrap(peer));\n  });\n}\n\nvoid Server::close() {\n  impl->close();\n}\n\nstd::unique_lock<std::mutex> Server::lock() {\n  return impl->lock();\n}\n\nvoid Server::listen(std::string_view endpoint) {\n  return impl->listen(endpoint);\n}\n\nNetwork::Network() {\n  impl = std::make_unique<NetworkImpl>();\n}\n\nNetwork::Network(\n    std::unique_ptr<NetworkImpl, std::function<void(NetworkImpl*)>> impl)\n    : impl(std::move(impl)) {\n}\n\nNetwork::~Network() {\n}\n\nPeer Network::connect(std::string_view endpoint) {\n  return impl->wrap(impl->connect(endpoint));\n}\n\nServer Network::listen(std::string_view endpoint) {\n  return impl->wrap(impl->listen(endpoint));\n}\n\nbool Network::run_one() {\n  return impl->run_one();\n}\n\nvoid Network::post(std::function<void()> f) {\n  return impl->post(std::move(f));\n}\n\n}  // namespace network\n"
  },
  {
    "path": "src/distributed/network.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <mutex>\n#include <string_view>\n\nnamespace network {\n\nclass PeerImpl;\nclass ServerImpl;\nclass NetworkImpl;\n\nclass Peer {\n public:\n  Peer() = default;\n  Peer(Peer&&) = default;\n  Peer(std::unique_ptr<PeerImpl, std::function<void(PeerImpl*)>> impl);\n  ~Peer();\n  Peer& operator=(Peer&&) = default;\n\n  void send(const void* data, size_t n);\n  void send(std::string_view buf);\n\n  void setOnReceive(std::function<void(const void* data, size_t n)> callback);\n  void setOnReceive(std::function<void(std::string_view)> callback);\n\n  void sendMessage(const void* data, size_t n);\n  void sendMessage(std::string_view buf);\n\n  void setOnMessage(std::function<void(const void* data, size_t n)> callback);\n  void setOnMessage(std::function<void(std::string_view)> callback);\n  void setOnMessage(std::nullptr_t);\n\n  void setOnConnectionClosed(std::function<void()> callback);\n\n  explicit operator bool() const {\n    return impl != nullptr;\n  }\n\n  bool connected() const;\n  void close();\n  void post_close();\n\n  std::unique_lock<std::mutex> lock();\n\n private:\n  std::unique_ptr<PeerImpl, std::function<void(PeerImpl*)>> impl;\n};\n\nclass Server {\n public:\n  Server() = default;\n  Server(Server&&) = default;\n  Server(std::unique_ptr<ServerImpl, std::function<void(ServerImpl*)>> impl);\n  ~Server();\n  Server& operator=(Server&&) = default;\n\n  void setOnPeer(std::function<void(Peer)> callback);\n  void close();\n  std::unique_lock<std::mutex> lock();\n  void listen(std::string_view endpoint);\n\n private:\n  std::unique_ptr<ServerImpl, std::function<void(ServerImpl*)>> impl;\n};\n\nclass Network {\n public:\n  Network();\n  Network(Network&&) = default;\n  Network(std::unique_ptr<NetworkImpl, std::function<void(NetworkImpl*)>> impl);\n  ~Network();\n\n  Peer connect(std::string_view endpoint);\n  Server listen(std::string_view endpoint);\n\n  bool run_one();\n  void post(std::function<void()> f);\n\n private:\n  std::unique_ptr<NetworkImpl, std::function<void(NetworkImpl*)>> impl;\n};\n\n}  // namespace network\n"
  },
  {
    "path": "src/distributed/rdma.h",
    "content": "#pragma once\n\n#include <cstddef>\n#include <cstdint>\n#include <memory>\n#include <optional>\n#include <stdexcept>\n\nnamespace rdma {\n\nclass Error : public std::runtime_error {\n  using std::runtime_error::runtime_error;\n};\n\nstruct Endpoint {\n  uint32_t lid;\n  uint32_t qpnum;\n  std::array<std::byte, 16> gid;\n  bool operator==(const Endpoint& n) {\n    return lid == n.lid && qpnum == n.qpnum;\n  }\n  bool operator!=(const Endpoint& n) {\n    return !(*this == n);\n  }\n};\n\nstruct Buffer {\n  virtual ~Buffer() {\n  }\n  virtual uint32_t key() = 0;\n  virtual uint32_t keyFor(Endpoint ep) = 0;\n};\n\nstruct CompletionQueue {\n  virtual ~CompletionQueue() {\n  }\n  virtual void wait() = 0;\n};\n\nstruct Host {\n  virtual ~Host() {\n  }\n  virtual Endpoint init(CompletionQueue& cq) = 0;\n  virtual void connect(Endpoint ep) = 0;\n\n  virtual void read(Buffer& localBuffer,\n                    void* localAddress,\n                    uint32_t remoteKey,\n                    uintptr_t remoteAddress,\n                    size_t size) = 0;\n  virtual void wait() = 0;\n};\n\nstruct Context {\n  virtual ~Context() {\n  }\n  virtual std::unique_ptr<Host> createHost() = 0;\n  virtual std::unique_ptr<Buffer> createBuffer(void* address, size_t size) = 0;\n  virtual std::unique_ptr<CompletionQueue> createCQ(int size) = 0;\n};\n\nstd::unique_ptr<Context> create();\n\n}  // namespace rdma\n"
  },
  {
    "path": "src/distributed/rdma_nop.cc",
    "content": "#include \"rdma.h\"\n\nnamespace rdma {\n\nstd::unique_ptr<Context> create() {\n  return nullptr;\n}\n\n}  // namespace rdma\n"
  },
  {
    "path": "src/distributed/rpc.h",
    "content": "\n#define ZSTD_STATIC_LINKING_ONLY\n#include \"zstd/lib/zstd.h\"\n\n#include \"network.h\"\n\n#include \"string_view\"\n\n#include <atomic>\n#include <chrono>\n#include <condition_variable>\n#include <cstring>\n#include <future>\n#include <mutex>\n#include <thread>\n#include <unordered_map>\n#include <unordered_set>\n\nnamespace rpc {\n\n// This is not a cross platform serializer\nstruct Serializer {\n  std::vector<char> buf;\n  void write(const void* data, size_t len) {\n    size_t offset = buf.size();\n    if (buf.capacity() < offset + len) {\n      buf.reserve(\n          std::max(offset + len, std::max(buf.capacity() * 2, (size_t)16)));\n    }\n    buf.resize(offset + len);\n    std::memcpy(buf.data() + offset, data, len);\n  }\n  template <typename T, std::enable_if_t<std::is_trivial_v<T>>* = nullptr>\n  void write(T v) {\n    write((void*)&v, sizeof(v));\n  }\n\n  void write(std::string_view str) {\n    write(str.size());\n    write(str.data(), str.size());\n  }\n\n  template <typename T> void write(std::basic_string_view<T> str) {\n    write(str.size());\n    write(str.data(), sizeof(T) * str.size());\n  }\n\n  void clear() {\n    buf.clear();\n  }\n  const char* data() const {\n    return buf.data();\n  }\n  size_t size() const {\n    return buf.size();\n  }\n\n  void compress(int level = 0) {\n    std::vector<char> newbuf;\n    newbuf.resize(sizeof(size_t) + ZSTD_compressBound(buf.size()));\n    auto n = ZSTD_compress(newbuf.data() + sizeof(size_t),\n                           newbuf.size() - sizeof(size_t), buf.data(),\n                           buf.size(), level);\n    if (!ZSTD_isError(n)) {\n      size_t sn = buf.size();\n      std::memcpy(newbuf.data(), &sn, sizeof(sn));\n      newbuf.resize(sizeof(size_t) + n);\n      std::swap(buf, newbuf);\n    } else {\n      buf.clear();\n    }\n  }\n};\nstruct Deserializer {\n  std::string_view buf;\n  std::vector<char> ownbuf;\n  Deserializer() = default;\n  Deserializer(std::string_view buf)\n      : buf(buf) {\n  }\n  Deserializer(const void* data, size_t len)\n      : buf((const char*)data, len) {\n  }\n  void consume(size_t len) {\n    buf = {buf.data() + len, buf.size() - len};\n  }\n  template <typename T> std::basic_string_view<T> readStringView() {\n    size_t len = read<size_t>();\n    if (buf.size() < sizeof(T) * len) {\n      len = buf.size() / sizeof(T);\n    }\n    T* data = (T*)buf.data();\n    consume(sizeof(T) * len);\n    return {data, len};\n  }\n  std::string_view readString() {\n    size_t len = read<size_t>();\n    if (buf.size() < len) {\n      len = buf.size();\n    }\n    const char* data = buf.data();\n    consume(len);\n    return {data, len};\n  }\n  template <typename T, std::enable_if_t<std::is_trivial_v<T>>* = nullptr>\n  void read(T& r) {\n    if (buf.size() < sizeof(T)) {\n      consume(buf.size());\n      r = {};\n      return;\n    }\n    std::memcpy(&r, buf.data(), sizeof(T));\n    consume(sizeof(T));\n  }\n  void read(std::string_view& r) {\n    r = readString();\n  }\n  void read(std::string& r) {\n    r = readString();\n  }\n  template <typename T> void read(std::basic_string_view<T>& r) {\n    r = readStringView<T>();\n  }\n\n  template <typename T> T read() {\n    T r;\n    read(r);\n    return r;\n  }\n  std::string_view read() {\n    return readString();\n  }\n\n  bool empty() {\n    return buf.empty();\n  }\n\n  void decompress() {\n    size_t sn = read<size_t>();\n    std::vector<char> newbuf;\n    newbuf.resize(sn);\n    auto n =\n        ZSTD_decompress(newbuf.data(), newbuf.size(), buf.data(), buf.size());\n    if (!ZSTD_isError(n)) {\n      std::swap(ownbuf, newbuf);\n      buf = {ownbuf.data(), ownbuf.size()};\n    } else {\n      buf = {};\n    }\n  }\n};\n\nstruct Serialize {\n  Serialize(Serializer& ser)\n      : ser(ser) {\n  }\n  Serializer& ser;\n\n  template <typename T> static std::false_type has_serialize_f(...);\n  template <typename T,\n            typename = decltype(\n                std::declval<T>().serialize(std::declval<Serialize&>()))>\n  static std::true_type has_serialize_f(int);\n  template <typename T>\n  static const bool has_serialize =\n      decltype(Serialize::has_serialize_f<T>(0))::value;\n  template <typename T> static std::false_type has_builtin_write_f(...);\n  template <\n      typename T,\n      typename = decltype(std::declval<Serializer>().write(std::declval<T>()))>\n  static std::true_type has_builtin_write_f(int);\n  template <typename T>\n  static const bool has_builtin_write =\n      decltype(Serialize::has_builtin_write_f<T>(0))::value;\n  template <typename T> void operator()(const T& v) {\n    if constexpr (has_serialize<const T>) {\n      v.serialize(*this);\n    } else if constexpr (has_builtin_write<const T>) {\n      ser.write(std::forward<const T>(v));\n    } else {\n      serialize(*this, std::forward<const T>(v));\n    }\n  }\n\n  template <typename... T> void operator()(const T&... v) {\n    (int[]){((*this)(std::forward<const T>(v)), 0)...};\n  }\n};\n\nstruct Deserialize {\n  Deserialize(Deserializer& des)\n      : des(des) {\n  }\n  Deserializer& des;\n\n  template <typename T> static std::false_type has_serialize_f(...);\n  template <typename T,\n            typename = decltype(\n                std::declval<T>().serialize(std::declval<Deserialize&>()))>\n  static std::true_type has_serialize_f(int);\n  template <typename T>\n  static const bool has_serialize =\n      decltype(Deserialize::has_serialize_f<T>(0))::value;\n  template <typename T> static std::false_type has_builtin_read_f(...);\n  template <typename T,\n            typename =\n                decltype(std::declval<Deserializer>().read(std::declval<T&>()))>\n  static std::true_type has_builtin_read_f(int);\n  template <typename T>\n  static const bool has_builtin_read =\n      decltype(Deserialize::has_builtin_read_f<T>(0))::value;\n  template <typename T> void operator()(T& v) {\n    if constexpr (has_serialize<T>) {\n      v.serialize(*this);\n    } else if constexpr (has_builtin_read<T>) {\n      des.read(v);\n    } else {\n      serialize(*this, v);\n    }\n  }\n\n  template <typename... T> void operator()(T&... v) {\n    (int[]){((*this)(v), 0)...};\n  }\n\n  template <typename T> T read() {\n    if constexpr (has_serialize<T>) {\n      T r;\n      r.serialize(*this);\n      return r;\n    } else if constexpr (has_builtin_read<T>) {\n      return des.read<T>();\n    } else {\n      T r;\n      serialize(*this, r);\n      return r;\n    }\n  }\n};\n\nstruct RPCException : std::exception {};\n\nstruct RPCExceptionConnectionError : RPCException {\n  virtual const char* what() const noexcept override {\n    return \"RPC connection error\";\n  }\n};\n\nstruct RPCExceptionFunctionNotFound : RPCException {\n  virtual const char* what() const noexcept override {\n    return \"RPC function not found\";\n  }\n};\n\nstruct RPCExceptionRemoteException : RPCException {\n  virtual const char* what() const noexcept override {\n    return \"RPC remote exception\";\n  }\n};\n\nclass Client : public std::enable_shared_from_this<Client> {\n public:\n  Client() = default;\n  Client(network::Peer peer)\n      : peer(std::move(peer)) {\n    this->peer.setOnMessage([this](std::string_view buf) {\n      bytesReceived_ += buf.size();\n      Deserializer des(buf);\n      des.decompress();\n      Deserialize x(des);\n      uint32_t id;\n      uint8_t status;\n      x(id, status);\n      std::unique_lock l(reqmut);\n      auto i = requests.find(id);\n      if (i != requests.end()) {\n        auto r = std::move(i->second);\n        requests.erase(i);\n        l.unlock();\n        lastLatency_ = std::chrono::steady_clock::now() - r->timestamp;\n        if (status == 0xff) {\n          r->exception(std::make_exception_ptr(RPCExceptionFunctionNotFound()));\n        } else if (status == 0xfe) {\n          r->exception(std::make_exception_ptr(RPCExceptionRemoteException()));\n        } else if (status != 0) {\n          r->exception(std::make_exception_ptr(RPCExceptionConnectionError()));\n        } else {\n          r->handle(x);\n        }\n      }\n    });\n    this->peer.setOnConnectionClosed([this]() {\n      std::unique_lock l(reqmut);\n      for (auto& v : requests) {\n        v.second->exception(\n            std::make_exception_ptr(RPCExceptionConnectionError()));\n      }\n      requests.clear();\n    });\n  }\n  ~Client() {\n    peer.close();\n  }\n\n  void close() {\n    peer.close();\n  }\n\n  template <typename... Args>\n  void async(std::string_view funcname, Args&&... args) {\n    Serializer ser;\n    Serialize x(ser);\n    uint32_t id = ++reqcounter;\n    x(id, funcname, std::forward<Args>(args)...);\n    ser.compress();\n    peer.sendMessage(ser.data(), ser.size());\n    bytesSent_ += ser.size();\n    ++numRpcCalls_;\n  }\n\n  struct RequestBase {\n    std::chrono::steady_clock::time_point timestamp;\n    virtual ~RequestBase() {\n    }\n    virtual void handle(Deserialize&) noexcept = 0;\n    virtual void exception(std::exception_ptr e) noexcept = 0;\n  };\n\n  template <typename R> struct RequestImpl : RequestBase {\n    std::promise<R> p;\n    std::atomic_bool handled = false;\n    virtual ~RequestImpl() {\n    }\n    virtual void handle(Deserialize& x) noexcept override {\n      if (handled.exchange(true)) {\n        return;\n      }\n      if constexpr (std::is_same_v<void, R>) {\n        p.set_value();\n      } else {\n        R r;\n        x(r);\n        p.set_value(r);\n      }\n    }\n    virtual void exception(std::exception_ptr e) noexcept override {\n      if (handled.exchange(true)) {\n        return;\n      }\n      p.set_exception(e);\n    }\n  };\n\n  template <typename R, typename... Args>\n  std::future<R> async(std::string_view funcname, Args&&... args) {\n    Serializer ser;\n    Serialize x(ser);\n    uint32_t id = ++reqcounter;\n    x(id, funcname, std::forward<Args>(args)...);\n    auto req = std::make_unique<RequestImpl<R>>();\n    auto fut = req->p.get_future();\n    std::unique_lock l(reqmut);\n    req->timestamp = std::chrono::steady_clock::now();\n    requests[id] = std::move(req);\n    l.unlock();\n    ser.compress();\n    peer.sendMessage(ser.data(), ser.size());\n    bytesSent_ += ser.size();\n    ++numRpcCalls_;\n    return fut;\n  }\n\n  template <typename R, typename... Args>\n  R sync(std::string_view funcname, Args&&... args) {\n    auto f = async<R>(funcname, std::forward<Args>(args)...);\n    return f.get();\n  }\n\n  template <typename... Args>\n  void sync(std::string_view funcname, Args&&... args) {\n    return sync<void>(funcname, std::forward<Args>(args)...);\n  }\n\n  size_t bytesSent() const {\n    return bytesSent_;\n  }\n  size_t bytesReceived() const {\n    return bytesReceived_;\n  }\n  size_t numRpcCalls() const {\n    return numRpcCalls_;\n  }\n  std::chrono::steady_clock::duration lastLatency() const {\n    return lastLatency_;\n  }\n\n private:\n  std::mutex reqmut;\n  std::unordered_map<uint32_t, std::unique_ptr<RequestBase>> requests;\n  std::atomic<uint32_t> reqcounter = 0;\n  network::Peer peer;\n\n  std::atomic_size_t bytesSent_ = 0;\n  std::atomic_size_t bytesReceived_ = 0;\n  std::atomic_size_t numRpcCalls_ = 0;\n  std::atomic<std::chrono::steady_clock::duration> lastLatency_{};\n};\n\nclass Server {\n public:\n  Server() = default;\n  Server(network::Server server)\n      : server(std::move(server)) {\n    this->server.setOnPeer([this](network::Peer peer) {\n      auto ref = std::make_shared<Peer>();\n      ref->peer = std::move(peer);\n      auto l = this->server.lock();\n      peers.push_back(ref);\n      l.unlock();\n      ref->peer.setOnMessage(\n          [this, ref](std::string_view buf) { handle(*ref, buf); });\n      ref->peer.setOnConnectionClosed([this, ref]() {\n        auto l = this->server.lock();\n        for (auto i = peers.begin(); i != peers.end(); ++i) {\n          if (*i == ref) {\n            peers.erase(i);\n            break;\n          }\n        }\n        ref->peer.post_close();\n      });\n    });\n  }\n  ~Server() {\n    server.close();\n    for (auto& v : peers) {\n      v->peer.close();\n    }\n    peers.clear();\n  }\n\n  Server(Server&&) = delete;\n  Server(Server&) = delete;\n\n  void listen(std::string_view endpoint) {\n    server.listen(endpoint);\n  }\n\n  struct FBase {\n    virtual ~FBase(){};\n    virtual void call(Deserialize& x, Serialize& sx) = 0;\n  };\n\n  template <typename R, typename... Args> struct FImpl : FBase {\n    std::function<R(Args...)> f;\n    FImpl(std::function<R(Args...)> f)\n        : f(std::move(f)) {\n    }\n    virtual ~FImpl(){};\n    virtual void call(Deserialize& x, Serialize& sx) override {\n      std::tuple<Args...> args;\n      unfold<0>(x, args);\n      if constexpr (std::is_same_v<void, R>) {\n        std::apply(f, std::move(args));\n      } else {\n        sx(std::apply(f, std::move(args)));\n      }\n    }\n    template <size_t n, typename T> void unfold(Deserialize& x, T& tuple) {\n      x(std::get<n>(tuple));\n      if constexpr (n + 1 != std::tuple_size_v<T>) {\n        unfold<n + 1>(x, tuple);\n      }\n    }\n  };\n\n  std::unordered_set<std::string> funcnames;\n  std::unordered_map<std::string_view, std::unique_ptr<FBase>> funcs;\n\n  template <typename R, typename... Args>\n  void define(std::string_view name, std::function<R(Args...)> f) {\n    auto ff = std::make_unique<FImpl<R, Args...>>(std::move(f));\n    funcs[*funcnames.emplace(name).first] = std::move(ff);\n  }\n\n  template <typename R, typename... Args>\n  void define(std::string_view name, R (*f)(Args...)) {\n    define(name, std::function<R(Args...)>(f));\n  }\n\n  template <typename R, typename M, typename... Args>\n  void define(std::string_view name, R (M::*f)(Args...), M* self) {\n    define(name, std::function<R(Args...)>([self, f](Args&&... args) -> R {\n             return (self->*f)(std::forward<Args>(args)...);\n           }));\n  }\n\n  template <typename R, typename... Args, typename T>\n  void define(std::string_view name, T f) {\n    auto ff = std::make_unique<FImpl<R, Args...>>(std::move(f));\n    funcs[*funcnames.emplace(name).first] = std::move(ff);\n  }\n\n  size_t bytesSent() const {\n    return bytesSent_;\n  }\n  size_t bytesReceived() const {\n    return bytesReceived_;\n  }\n  size_t numRpcCalls() const {\n    return numRpcCalls_;\n  }\n\n private:\n  struct Peer {\n    network::Peer peer;\n  };\n\n  struct Message {\n    Message* next = nullptr;\n    std::vector<char> buf;\n  };\n\n  void handle(Peer& peer, std::string_view buf) {\n    bytesReceived_ += buf.size();\n    Deserializer des(buf.data(), buf.size());\n    des.decompress();\n    Deserialize x(des);\n    uint32_t id;\n    std::string_view name;\n    x(id, name);\n    Serializer ser;\n    Serialize sx(ser);\n    ++numRpcCalls_;\n    auto i = funcs.find(name);\n    if (i != funcs.end()) {\n      sx(id);\n      sx((uint8_t)0);\n      try {\n        i->second->call(x, sx);\n      } catch (...) {\n        ser.clear();\n        sx(id);\n        sx((uint8_t)0xfe);\n        ser.compress();\n        peer.peer.sendMessage(ser.data(), ser.size());\n        bytesSent_ += ser.size();\n        throw;\n      }\n    } else {\n      sx(id);\n      sx((uint8_t)0xff);\n    }\n    ser.compress();\n    peer.peer.sendMessage(ser.data(), ser.size());\n    bytesSent_ += ser.size();\n  }\n\n  network::Server server;\n  std::vector<std::shared_ptr<Peer>> peers;\n\n  std::atomic_size_t bytesSent_ = 0;\n  std::atomic_size_t bytesReceived_ = 0;\n  std::atomic_size_t numRpcCalls_ = 0;\n};\n\nclass Rpc {\n public:\n  Rpc() = default;\n  Rpc(Rpc&&) = delete;\n  Rpc(Rpc&) = delete;\n  ~Rpc() {\n    terminate = true;\n    for (auto& _ : threads) {\n      (void)_;\n      net.post([] {});\n    }\n  }\n\n  std::shared_ptr<Server> listen(std::string_view endpoint) {\n    return std::make_shared<Server>(net.listen(endpoint));\n  }\n\n  std::shared_ptr<Client> connect(std::string_view endpoint) {\n    return std::make_shared<Client>(net.connect(endpoint));\n  }\n\n  bool run_one() {\n    return net.run_one();\n  }\n\n  void asyncRun(int nThreads = 1) {\n    for (; nThreads; --nThreads) {\n      threads.emplace_back([this]() {\n        while (!terminate) {\n          net.run_one();\n        }\n      });\n    }\n  }\n\n private:\n  network::Network net;\n\n  std::atomic_bool terminate = false;\n  std::vector<std::thread> threads;\n};\n\n}  // namespace rpc\n"
  },
  {
    "path": "src/games/amazons.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#include \"amazons.h\"\n\nnamespace Amazons {\n\nState::State(int seed)\n    : core::State(seed) {\n  std::call_once(setupCalled, [&] { setupBoard(_rng); });\n}\n\nvoid State::Initialize() {\n  _moves.clear();\n  _featSize = {chessKinds, Board::rows, Board::columns};\n  _features.resize(chessKinds * Board::squares);\n  _actionSize = {Board::squares, Board::squares, Board::squares};\n  _legalActions.reserve(maxLegalActionsCnt);\n  _status = GameStatus::player0Turn;\n\n  board.initialize();\n  setInitialChesses();\n  _hash = board.getHash();\n  findLegalActions(Player::first);\n  fillFeatures();\n}\n\nstd::unique_ptr<core::State> State::clone_() const {\n  return std::make_unique<State>(*this);\n}\n\nvoid State::ApplyAction(const ::_Action& action) {\n  Move move{};\n  std::tie(move.fromX, move.fromY) = Board::posTo2D(action.GetX());\n  std::tie(move.toX, move.toY) = Board::posTo2D(action.GetY());\n  std::tie(move.arrowX, move.arrowY) = Board::posTo2D(action.GetZ());\n  play(move);\n  board.turnHash();\n  _hash = board.getHash();\n  Player nextPlayer = turnPlayer();\n  if (canGoNext(nextPlayer)) {\n    fillFeatures();\n  } else {\n    setTerminatedStatus(nextPlayer);\n    _legalActions.clear();\n  }\n}\n\nvoid State::DoGoodAction() {\n  DoRandomAction();\n}\n\nvoid State::printCurrentBoard() const {\n  std::cout << board.sprint(\"  \");\n}\n\nstd::string State::stateDescription() const {\n  return board.sprint(\"  \");\n}\n\nstd::string State::actionDescription(const ::_Action& action) const {\n  std::ostringstream oss;\n  oss << \"moved the chess from \" << board.getPosStr(action.GetX()) << \" to \"\n      << board.getPosStr(action.GetY()) << \" and shooted an arrow at \"\n      << board.getPosStr(action.GetZ());\n  return oss.str();\n}\n\nstd::string State::actionsDescription() const {\n  std::ostringstream oss;\n  oss << \"Position of queen chesses which are able to move: \";\n  std::set<int> xySet;\n  for (auto& legalAction : _legalActions)\n    xySet.insert(legalAction.GetX());\n  std::size_t i = 0;\n  for (int xy : xySet) {\n    oss << \"`\" << board.getPosStr(xy) << \"`\";\n    if (++i < xySet.size())\n      oss << \", \";\n  }\n  oss << std::endl;\n  return oss.str();\n}\n\nint State::parseAction(const std::string& str) const {\n  auto getXY = [&](const std::set<int>& xySet, const std::string& content) {\n    std::cout << content << std::endl\n              << \"Allowed positions: ('\" << Board::getMarkSymbol()\n              << \"' means an allowed position)\";\n    std::set<std::tuple<int, int>> markedPos;\n    for (auto xy : xySet)\n      markedPos.insert(Board::posTo2D(xy));\n    std::cout << std::endl << board.sprintBoard(\"  \", markedPos);\n    std::cout << \"> \";\n    std::string str;\n    std::getline(std::cin, str);\n    auto xyResult = board.parsePosStr(str);\n    if (!xyResult)\n      return -1;\n    auto [x, y] = xyResult.value();\n    int xy = Board::posTo1D(x, y);\n    if (xySet.find(xy) == xySet.end())\n      return -1;\n    return xy;\n  };\n\n  auto result = board.parsePosStr(str);\n  if (!result)\n    return -1;\n  auto [fromX, fromY] = result.value();\n  int fromXY = Board::posTo1D(fromX, fromY);\n  std::set<int> toXYSet;\n  std::unordered_map<int, std::set<int>> arrowXYSet;\n  std::unordered_map<int, std::unordered_map<int, int>> iMap;\n  for (std::size_t i = 0; i < _legalActions.size(); i++) {\n    if (fromXY != _legalActions[i].GetX())\n      continue;\n    int toXY = _legalActions[i].GetY();\n    int arrowXY = _legalActions[i].GetZ();\n    toXYSet.insert(toXY);\n    arrowXYSet[toXY].insert(arrowXY);\n    iMap[toXY][arrowXY] = i;\n  }\n  if (toXYSet.empty())\n    return -1;\n\n  int toXY =\n      getXY(toXYSet, \"Input the position of selected queen chess after moved:\");\n  if (toXY < 0)\n    return -1;\n  int arrowXY = getXY(\n      arrowXYSet[toXY], \"Input the position of the arrow that wants to shoot:\");\n  if (arrowXY < 0)\n    return -1;\n  return iMap[toXY][arrowXY];\n}\n\nint State::humanInputAction(\n    std::function<std::optional<int>(std::string)> specialAction) {\n  std::cout << \"Current board:\" << std::endl << stateDescription() << std::endl;\n  std::cout << \"Input three positions to play: (uses format <alphabet of x-axi\"\n            << \"s><numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\" << std::endl;\n  std::string str;\n  int index = -1;\n  while (index < 0) {\n    std::cout << actionsDescription() << std::endl;\n    std::cout << \"Input the position of the queen chess which wants to move:\";\n    std::cout << std::endl << \"> \";\n    std::getline(std::cin, str);\n    index = parseAction(str);\n    if (index < 0) {\n      if (auto r = specialAction(str); r)\n        return *r;\n      std::cout << \"invalid input, try again.\" << std::endl;\n    }\n  }\n  return index;\n}\n\ntemplate <typename R> void State::setupBoard(const R& re) {\n  Board::setup(\n      {\"Empty\", \"WhiteQueen\", \"BlackQueen\", \"WhiteArrow\", \"BlackArrow\"},\n      {\" \", \"○\", \"●\", \"□\", \"■\"}, re);\n}\n\nconstexpr Player State::chessToPlayer(Chess chess) {\n  if (chess == ChessKind::whiteQueen || chess == ChessKind::whiteArrow)\n    return Player::first;\n  else if (chess == ChessKind::blackQueen || chess == ChessKind::blackArrow)\n    return Player::second;\n  assert(chess == ChessKind::whiteQueen || chess == ChessKind::whiteArrow ||\n         chess == ChessKind::blackQueen || chess == ChessKind::blackArrow);\n  return Player::none;\n}\n\nconstexpr Chess State::playerToQueenChess(Player player) {\n  if (player == Player::first)\n    return ChessKind::whiteQueen;\n  else if (player == Player::second)\n    return ChessKind::blackQueen;\n  assert(player == Player::first || player == Player::second);\n  return ChessKind::empty;\n}\n\nconstexpr Chess State::playerToArrowChess(Player player) {\n  if (player == Player::first)\n    return ChessKind::whiteArrow;\n  else if (player == Player::second)\n    return ChessKind::blackArrow;\n  assert(player == Player::first || player == Player::second);\n  return ChessKind::empty;\n}\n\nvoid State::setInitialChesses() {\n  for (int p = 0; p < players; p++) {\n    Chess queenChess = playerToQueenChess(Player::set(p));\n    int i = 0;\n    for (auto [x, y] : initialQueenChessesPos[p]) {\n      board.setChess(x, y, queenChess);\n      queenChessesPos[p][i++] = {x, y};\n    }\n  }\n}\n\nvoid State::play(const Move& move) {\n  Chess queenChess = board.getChess(move.fromX, move.fromY);\n  Player player = chessToPlayer(queenChess);\n  assert(queenChess == ChessKind::whiteQueen ||\n         queenChess == ChessKind::blackQueen);\n  for (auto& [x, y] : queenChessesPos[player.index()]) {\n    if (x == move.fromX && y == move.fromY) {\n      x = move.toX, y = move.toY;\n      break;\n    }\n  }\n  board.setChess(move.fromX, move.fromY, ChessKind::empty);\n  board.setChess(move.toX, move.toY, queenChess);\n  board.setChess(move.arrowX, move.arrowY, playerToArrowChess(player));\n}\n\nbool State::canGoNext(Player nextPlayer) {\n  findLegalActions(nextPlayer);\n  return _legalActions.size() != 0;\n}\n\nvoid State::findLegalActions(Player player) {\n  clearActions();\n  int i = 0;\n\n  auto addLegalActions = [&](Move move) {\n    int fromXY = Board::posTo1D(move.fromX, move.fromY);\n    int toXY = Board::posTo1D(move.toX, move.toY);\n    int arrowXY = Board::posTo1D(move.arrowX, move.arrowY);\n    addAction(fromXY, toXY, arrowXY);\n    assert(i <= maxLegalActionsCnt);\n  };\n\n  auto findLegalArrowShoots = [&](int fromX, int fromY, int toX, int toY) {\n    for (auto [dx, dy] : directions) {\n      int arrowX = toX + dx, arrowY = toY + dy;\n      while (Board::isPosInBoard(arrowX, arrowY)) {\n        if (board.getChess(arrowX, arrowY) == ChessKind::empty ||\n            (arrowX == fromX && arrowY == fromY)) {\n          addLegalActions(Move{fromX, fromY, toX, toY, arrowX, arrowY});\n        } else {\n          break;\n        }\n        arrowX += dx, arrowY += dy;\n      }\n    }\n  };\n\n  auto findLegalQueenMoves = [&](int fromX, int fromY) {\n    for (auto [dx, dy] : directions) {\n      int toX = fromX + dx, toY = fromY + dy;\n      while (Board::isPosInBoard(toX, toY) &&\n             board.getChess(toX, toY) == ChessKind::empty) {\n        findLegalArrowShoots(fromX, fromY, toX, toY);\n        toX += dx, toY += dy;\n      }\n    }\n  };\n\n  for (auto [fromX, fromY] : queenChessesPos[player.index()])\n    findLegalQueenMoves(fromX, fromY);\n}\n\nPlayer State::turnPlayer() {\n  if (_status == GameStatus::player0Turn) {\n    _status = GameStatus::player1Turn;\n    return Player::second;\n  } else if (_status == GameStatus::player1Turn) {\n    _status = GameStatus::player0Turn;\n    return Player::first;\n  }\n  assert(_status == GameStatus::player0Turn ||\n         _status == GameStatus::player1Turn);\n  return Player::none;\n}\n\nvoid State::setTerminatedStatus(Player loser) {\n  if (loser == Player::first)\n    _status = GameStatus::player1Win;\n  else if (loser == Player::second)\n    _status = GameStatus::player0Win;\n  assert(loser == Player::first || loser == Player::second);\n}\n\nvoid State::fillFeatures() {\n  std::fill(_features.begin(), _features.end(), 0.0);\n  auto* f = _features.data();\n  for (int c = 0; c < chessKinds; c++) {\n    Chess chess = static_cast<Chess>(c + 1);\n    for (int xy = 0; xy < Board::squares; xy++, f++)\n      if (board.getChess(xy) == chess)\n        *f = 1.0;\n  }\n  fillFullFeatures();\n}\n\nAction::Action(int i, int fromXY, int toXY, int arrowXY)\n    : ::_Action() {\n  _i = i;\n  _loc = {fromXY, toXY, arrowXY};\n  _hash = State::Board::squares * State::Board::squares * fromXY +\n          State::Board::squares * toXY + arrowXY;\n}\n\n}  // namespace Amazons\n"
  },
  {
    "path": "src/games/amazons.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#pragma once\n\n#include <algorithm>\n#include <array>\n#include <cassert>\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <optional>\n#include <set>\n#include <sstream>\n#include <string>\n#include <tuple>\n\n#include \"../core/state.h\"\n#include \"commons/chessboard.h\"\n#include \"commons/player.h\"\n\nnamespace Amazons {\n\nclass ChessKind {\n public:\n  static constexpr Chess empty = 0;\n  static constexpr Chess whiteQueen = 1;\n  static constexpr Chess blackQueen = 2;\n  static constexpr Chess whiteArrow = 3;\n  static constexpr Chess blackArrow = 4;\n};\n\nstruct Move {\n  int fromX, fromY;\n  int toX, toY;\n  int arrowX, arrowY;\n};\n\nclass State : public core::State {\n public:\n  using Board = Chessboard<10, 10>;\n\n  State(int seed);\n  void Initialize() override;\n  std::unique_ptr<core::State> clone_() const override;\n  void ApplyAction(const ::_Action& action) override;\n  void DoGoodAction() override;\n  void printCurrentBoard() const override;\n  std::string stateDescription() const override;\n  std::string actionDescription(const ::_Action& action) const override;\n  std::string actionsDescription() const override;\n  int parseAction(const std::string& str) const override;\n  int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction) override;\n\n private:\n  template <typename R> static void setupBoard(const R& re);\n  static constexpr Player chessToPlayer(Chess chess);\n  static constexpr Chess playerToQueenChess(Player player);\n  static constexpr Chess playerToArrowChess(Player player);\n\n  void setInitialChesses();\n  void play(const Move& move);\n  bool canGoNext(Player nextPlayer);\n  void findLegalActions(Player player);\n  inline Player turnPlayer();\n  inline void setTerminatedStatus(Player loser);\n  void fillFeatures();\n\n  static constexpr int players = 2;\n  static constexpr int chessKinds = 4;\n  static constexpr int maxHands = Board::squares - 8;\n  // Maximum count of legal moves in ideal situations: ⌈(((9*4-9)*(10*4-4)+(9*4-\n  // 7)*(8*4-4)+(9*4-5)*(6*4-4)+(9*4-3)*(4*4-4)+(9*4-1)*(2*4-4))/10^2)^2*4⌉=3458\n  static constexpr int maxLegalActionsCnt = 3458;\n  static constexpr std::tuple<int, int> directions[8] = {\n      {-1, -1}, {-1, 0}, {-1, 1}, {0, -1}, {0, 1}, {1, -1}, {1, 0}, {1, 1}};\n  static constexpr std::tuple<int, int>\n      initialQueenChessesPos[players][chessKinds] = {\n          {{0, 6}, {3, 9}, {6, 9}, {9, 6}}, {{0, 3}, {3, 0}, {6, 0}, {9, 3}}};\n  static inline std::once_flag setupCalled;\n\n  Board board;\n  std::array<std::array<std::tuple<int, int>, 4>, 2> queenChessesPos;\n};\n\nclass Action : public ::_Action {\n public:\n  Action(int i, int fromXY, int toXY, int arrowRelRD);\n};\n\n}  // namespace Amazons\n"
  },
  {
    "path": "src/games/block_go.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../core/state.h\"\n#include <algorithm>\n\nclass StateForBlockGo : public core::State {\n public:\n  class Point {\n   public:\n    int x, y;\n\n    Point(int X, int Y) {\n      x = X;\n      y = Y;\n    }\n\n    Point() {\n      x = y = 0;\n    }\n\n    void setxy(int X, int Y) {\n      x = X, y = Y;\n    }\n\n    void turn90() {\n      int tmp = x;\n      x = -y;\n      y = tmp;\n    }\n  };\n\n  class Piece {\n   public:\n    friend class StateForBlockGo;\n    short count;  // 1~4\n    short turn;\n    bool onboard;\n    Point s;\n    Point tail[4];\n\n    Piece() {\n      count = 1;\n      turn = 4;  // 0 90 180 270\n      onboard = false;\n    }\n\n    void turn90() {\n      for (int i = 1; i < count; ++i)\n        tail[i].turn90();\n    }\n  };\n\n  class Move {\n   public:\n    int x, y;\n    int piece;\n    int dir;\n    Move() {\n      x = y = -1;\n      piece = -1;\n      dir = 0;\n    }\n    Move(int X, int Y, int p, int d) {\n      x = X;\n      y = Y;\n      piece = p;\n      dir = d;\n    }\n  };\n\n public:\n  static const int boardWidth = 13;\n  static const int boardHeight = 13;\n  int board[13][13];\n  int territory[13][13];\n  int round;\n  unsigned long long HashArray[20][13][13];  // 20 different types\n  unsigned long long HashTurn;\n  std::vector<Piece> player0;\n  std::vector<Piece> player1;\n  std::vector<Move> moves;\n\n  StateForBlockGo(int seed)\n      : State(seed) {\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n    // _hash = 2166136261u;\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 2;\n    _featSize[1] = boardHeight;\n    _featSize[2] = boardWidth;\n    _actionSize[0] = 36;  // 9 pieces * 4 directions\n    _actionSize[1] = boardHeight;\n    _actionSize[2] = boardWidth;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n\n    gameInit();\n    initHash();\n    // printCurrentBoard();\n\n    findFeature();\n    findActions();\n    fillFullFeatures();\n  }\n\n  void initHash() {\n    for (int x = 0; x < 20; ++x)\n      for (int y = 0; y < boardHeight; ++y)\n        for (int z = 0; z < boardWidth; ++z) {\n          HashArray[x][y][z] = 0;\n          for (int k = 0; k < 64; ++k)\n            if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n              HashArray[x][y][z] |= (1ULL << k);\n        }\n    HashTurn = 0;\n    for (int k = 0; k < 64; k++)\n      if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n        HashTurn |= (1ULL << k);\n  }\n\n  void gameInit() {\n    round = 1;\n    std::fill(&board[0][0], &board[0][0] + 169, 2);\n    player0.clear();\n    player0.resize(9);\n    player1.clear();\n    player1.resize(9);\n\n    // s\n    player0[0].turn = 1;\n    player0[0].count = 1;\n    // s\n    player0[1].turn = 1;\n    player0[1].count = 1;\n    // s+\n    // ++\n    player0[2].turn = 1;\n    player0[2].count = 4;\n    player0[2].tail[1].setxy(1, 0);\n    player0[2].tail[2].setxy(0, 1);\n    player0[2].tail[3].setxy(1, 1);\n    // s+++\n    player0[3].turn = 2;\n    player0[3].count = 4;\n    player0[3].tail[1].setxy(1, 0);\n    player0[3].tail[2].setxy(2, 0);\n    player0[3].tail[3].setxy(3, 0);\n    // s+\n    //  ++\n    player0[4].turn = 2;\n    player0[4].count = 4;\n    player0[4].tail[1].setxy(1, 0);\n    player0[4].tail[2].setxy(1, 1);\n    player0[4].tail[3].setxy(2, 1);\n    //  ++\n    // s+\n    player0[5].turn = 2;\n    player0[5].count = 4;\n    player0[5].tail[1].setxy(1, 0);\n    player0[5].tail[2].setxy(1, -1);\n    player0[5].tail[3].setxy(2, -1);\n    // s++\n    //  +\n    player0[6].turn = 4;\n    player0[6].count = 4;\n    player0[6].tail[1].setxy(1, 0);\n    player0[6].tail[2].setxy(2, 0);\n    player0[6].tail[3].setxy(1, 1);\n    // s++\n    //   +\n    player0[7].turn = 4;\n    player0[7].count = 4;\n    player0[7].tail[1].setxy(1, 0);\n    player0[7].tail[2].setxy(2, 0);\n    player0[7].tail[3].setxy(2, 1);\n    // s++\n    // +\n    player0[8].turn = 4;\n    player0[8].count = 4;\n    player0[8].tail[1].setxy(1, 0);\n    player0[8].tail[2].setxy(2, 0);\n    player0[8].tail[3].setxy(0, 1);\n\n    // ============= //\n\n    // s\n    player1[0].turn = 1;\n    player1[0].count = 1;\n    // s\n    player1[1].turn = 1;\n    player1[1].count = 1;\n    // s+\n    // ++\n    player1[2].turn = 1;\n    player1[2].count = 4;\n    player1[2].tail[1].setxy(1, 0);\n    player1[2].tail[2].setxy(0, 1);\n    player1[2].tail[3].setxy(1, 1);\n    // s+++\n    player1[3].turn = 2;\n    player1[3].count = 4;\n    player1[3].tail[1].setxy(1, 0);\n    player1[3].tail[2].setxy(2, 0);\n    player1[3].tail[3].setxy(3, 0);\n    // s+\n    //  ++\n    player1[4].turn = 2;\n    player1[4].count = 4;\n    player1[4].tail[1].setxy(1, 0);\n    player1[4].tail[2].setxy(1, 1);\n    player1[4].tail[3].setxy(2, 1);\n    //  ++\n    // s+\n    player1[5].turn = 2;\n    player1[5].count = 4;\n    player1[5].tail[1].setxy(1, 0);\n    player1[5].tail[2].setxy(1, -1);\n    player1[5].tail[3].setxy(2, -1);\n    // s++\n    //  +\n    player1[6].turn = 4;\n    player1[6].count = 4;\n    player1[6].tail[1].setxy(1, 0);\n    player1[6].tail[2].setxy(2, 0);\n    player1[6].tail[3].setxy(1, 1);\n    // s++\n    //   +\n    player1[7].turn = 4;\n    player1[7].count = 4;\n    player1[7].tail[1].setxy(1, 0);\n    player1[7].tail[2].setxy(2, 0);\n    player1[7].tail[3].setxy(2, 1);\n    // s++\n    // +\n    player1[8].turn = 4;\n    player1[8].count = 4;\n    player1[8].tail[1].setxy(1, 0);\n    player1[8].tail[2].setxy(2, 0);\n    player1[8].tail[3].setxy(0, 1);\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForBlockGo>(*this);\n  }\n\n  virtual void printCurrentBoard() const override {\n    fprintf(stderr, \"   0  1  2  3  4  5  6  7  8  9  10 11 12\\n\");\n    for (int i = 0; i < boardHeight; ++i) {\n      fprintf(stderr, \"%2d\", i);\n      for (int j = 0; j < boardWidth; ++j) {\n        switch (board[i][j]) {\n        case 0:\n          std::cerr << \" O \";\n          break;\n        case 1:\n          std::cerr << \" X \";\n          break;\n        default:\n          std::cerr << \" - \";\n          break;\n        }\n      }\n      std::cerr << std::endl;\n    }\n  }\n\n  void findFeature() {\n    std::fill(_features.begin(), _features.end(), 0);\n    for (int i = 0; i < 169; ++i) {\n      switch (board[i / 13][i % 13]) {\n      case 0:\n        _features[i] = 1;\n        break;\n      case 1:\n        _features[169 + i] = 1;\n        break;\n      default:\n        break;\n      }\n    }\n    // std::fill(_features.begin()+338, _features.end(), getCurrentPlayer());\n  }\n\n  bool canDrop(int x, int y) {\n    return (x >= 0 && y >= 0 && x < boardWidth && y < boardHeight &&\n            board[y][x] == 2);\n  }\n\n  void legalMoves(std::vector<Piece> player, std::vector<Move>& moves) {\n    if (round <= 4) {\n      int dx[] = {3, 9, 3, 9};\n      int dy[] = {3, 3, 9, 9};\n      for (int d = 0; d < 4; ++d) {\n        if (board[dy[d]][dx[d]] == 2) {  // empty\n          for (size_t p = 0; p < player.size(); ++p) {\n            if (!player[p].onboard) {\n              for (int t = 0; t < player[p].turn; ++t) {\n                for (int i = 0; i < player[p].count; ++i) {\n                  Move* m = new Move();\n                  m->dir = t;\n                  m->piece = p;\n                  m->x = dx[d] - player[p].tail[i].x;\n                  m->y = dy[d] - player[p].tail[i].y;\n                  // fprintf(stderr, \"%d %d || %d %d\\n\", m->x, m->y,\n                  // player[p].tail[i].x, player[p].tail[i].y);\n                  moves.push_back(*m);\n                  delete m;\n                }\n                // fprintf(stderr, \"\\n\\n\");\n                player[p].turn90();\n              }\n\n              if (p >= 3 && p <= 5) {\n                player[p].turn90();\n                player[p].turn90();\n              } else if (p == 2) {\n                player[p].turn90();\n                player[p].turn90();\n                player[p].turn90();\n              }\n\n            } else\n              continue;\n          }\n        }\n      }\n    } else {\n      bool visited[boardHeight][boardWidth];\n      for (int j = 0; j < boardHeight; ++j)\n        for (int i = 0; i < boardWidth; ++i)\n          visited[j][i] = false;\n\n      for (int j = 0; j < boardHeight; ++j) {\n        for (int i = 0; i < boardWidth; ++i) {\n          if ((int)_status == board[j][i]) {\n            int dx[] = {1, 0, -1, 0};\n            int dy[] = {0, 1, 0, -1};\n\n            for (int k = 0; k < 4; ++k) {\n              int x = i + dx[k];\n              int y = j + dy[k];\n              if (canDrop(x, y))\n                visited[y][x] = true;\n            }\n          }\n        }\n      }\n\n      for (size_t p = 0; p < player.size(); ++p) {\n        if (player[p].onboard)\n          continue;\n        for (int t = 0; t < player[p].turn; ++t) {\n          for (int j = 0; j < boardHeight; ++j) {\n            for (int i = 0; i < boardWidth; ++i) {\n              if (board[j][i] != 2)\n                continue;\n              bool legal = false;\n              for (int c = 0; c < player[p].count; ++c) {\n                int x = i + player[p].tail[c].x;\n                int y = j + player[p].tail[c].y;\n                if (!canDrop(x, y)) {\n                  legal = false;\n                  break;\n                } else if (visited[y][x]) {\n                  legal = true;\n                }\n              }\n              if (legal) {\n                Move* m = new Move();\n                m->dir = t;\n                m->piece = p;\n                m->x = i;\n                m->y = j;\n                moves.push_back(*m);\n                delete m;\n              }\n            }\n          }\n          player[p].turn90();\n        }\n        if (p >= 3 && p <= 5) {\n          player[p].turn90();\n          player[p].turn90();\n        } else if (p == 2) {\n          player[p].turn90();\n          player[p].turn90();\n          player[p].turn90();\n        }\n      }\n    }\n  }\n\n  void findActions() {\n    moves.clear();\n    if (_status == GameStatus::player0Turn) {\n      legalMoves(player0, moves);\n\n    } else if (_status == GameStatus::player1Turn) {\n      legalMoves(player1, moves);\n    }\n    // fprintf(stderr, \"moves: %d\\n\", moves.size());\n    clearActions();\n    for (auto m : moves) {\n      int x = m.x;\n      int y = m.y;\n      int p = m.piece * 4 + m.dir;\n\n      addAction(p, x, y);\n    }\n  }\n\n  void track(int x, int y) {\n    if (!canDrop(x, y))\n      return;\n    if (territory[y][x] == 4)\n      return;\n    territory[y][x] = 4;\n    // fprintf(stderr, \"xy: %d %d\\n\", x, y);\n    track(x + 1, y);\n    track(x, y - 1);\n    track(x - 1, y);\n    track(x, y + 1);\n  }\n\n  int edge() {\n    int p[2] = {0};\n    for (int j = 0; j < boardHeight; ++j) {\n      for (int i = 0; i < boardWidth; ++i) {\n        if (territory[j][i] == 4) {\n          if (i + 1 < boardWidth && board[j][i + 1] < 2)\n            p[board[j][i + 1]]++;\n          if (j - 1 >= 0 && board[j - 1][i] < 2)\n            p[board[j - 1][i]]++;\n          if (i - 1 >= 0 && board[j][i - 1] < 2)\n            p[board[j][i - 1]]++;\n          if (j + 1 < boardHeight && board[j + 1][i] < 2)\n            p[board[j + 1][i]]++;\n        }\n      }\n    }\n    // fprintf(stderr, \"p: %d %d\\n\", p[0], p[1]);\n    if (p[0] && p[1])\n      return 2;\n    else if (p[0])\n      return 0;\n    else\n      return 1;\n  }\n\n  void setTerritory(int a) {\n    for (int j = 0; j < boardHeight; ++j)\n      for (int i = 0; i < boardWidth; ++i)\n        if (territory[j][i] == 4)\n          territory[j][i] = a;\n  }\n\n  void printTerritory() {\n    for (int i = 0; i < 13; ++i) {\n      for (int j = 0; j < 13; ++j)\n        fprintf(stderr, \" %d \", territory[i][j]);\n      fprintf(stderr, \"\\n\");\n    }\n    fprintf(stderr, \"\\n\");\n  }\n\n  void findTerritory() {\n    // fprintf(stderr, \"findTerritory\\n\");\n    for (int j = 0; j < boardHeight; ++j)\n      for (int i = 0; i < boardWidth; ++i)\n        territory[j][i] = 3;\n\n    for (int j = 0; j < boardHeight; ++j) {\n      for (int i = 0; i < boardWidth; ++i) {\n        if (canDrop(i, j) && territory[j][i] == 3) {\n          // fprintf(stderr, \"x: %d y: %d\\n\", i, j);\n          track(i, j);\n          // printTerritory();\n          setTerritory(edge());\n        }\n      }\n    }\n  }\n\n  GameStatus findWinner() {\n    findTerritory();\n    int p[4] = {0};\n    for (int j = 0; j < boardHeight; ++j)\n      for (int i = 0; i < boardWidth; ++i)\n        p[territory[j][i]]++;\n\n    // fprintf(stderr, \"score: %d %d %d %d\\n\", p[0], p[1], p[2], p[3]);\n\n    if (p[0] == p[1])\n      return GameStatus::tie;\n    else if (p[0] > p[1])\n      return GameStatus::player0Win;\n    else\n      return GameStatus::player1Win;\n  }\n\n  int findType(int piece, int dir) {\n    switch (piece) {\n    case 0:\n    case 1:\n      return 0;\n    case 2:\n      return 1;\n    case 3:\n      return 2 + dir;\n    case 4:\n      return 4 + dir;\n    case 5:\n      return 6 + dir;\n    case 6:\n      return 8 + dir;\n    case 7:\n      return 12 + dir;\n    case 8:\n      return 16 + dir;\n\n    default:\n      fprintf(stderr, \"piece error!!!!!\\n\");\n      return -1;\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    _hash ^= HashTurn;\n    // fprintf(stderr, \"ApplyAction round %d\\n\", round);\n    int x = action.GetY();\n    int y = action.GetZ();\n    int piece = action.GetX() >> 2;\n    int dir = action.GetX() & 3;\n    _hash ^= HashArray[findType(piece, dir)][x][y];\n    // fprintf(stderr, \"(%d, %d) %d %d\\n\", x, y, piece, dir);\n    // fprintf(stderr, \"hash: %llu\\n\", _hash);\n    if (_status == GameStatus::player0Turn) {\n      for (int i = 0; i < dir; ++i)\n        player0[piece].turn90();\n      for (int i = 0; i < player0[piece].count; ++i)\n        board[y + player0[piece].tail[i].y][x + player0[piece].tail[i].x] = 0;\n      player0[piece].onboard = true;\n      _status = GameStatus::player1Turn;\n\n    } else {\n      for (int i = 0; i < dir; ++i)\n        player1[piece].turn90();\n      for (int i = 0; i < player1[piece].count; ++i)\n        board[y + player1[piece].tail[i].y][x + player1[piece].tail[i].x] = 1;\n      player1[piece].onboard = true;\n      _status = GameStatus::player0Turn;\n    }\n\n    if (round >= 18) {\n      _status = findWinner();\n      // printTerritory();\n    } else {\n      round += 1;\n      findFeature();\n      findActions();\n    }\n\n    // printCurrentBoard();\n    // fprintf(stderr, \"end applyAction =======\\n\\n\");\n    fillFullFeatures();\n  }\n\n  virtual std::string stateDescription() const override {\n    std::stringstream ss;\n    ss << \"    0  1  2  3  4  5  6  7  8  9  10 11 12\\n\";\n    for (int i = 0; i < boardHeight; ++i) {\n      char buff[12];\n      sprintf(buff, \"%2d \", i);\n      ss << buff;\n      for (int j = 0; j < boardWidth; ++j) {\n        switch (board[i][j]) {\n        case 0:\n          ss << \" O \";\n          break;\n        case 1:\n          ss << \" X \";\n          break;\n        default:\n          ss << \" - \";\n          break;\n        }\n      }\n      ss << std::endl;\n    }\n    ss << std::endl;\n    return ss.str();\n  }\n\n  struct Actions {\n    int x, y, z, i;\n  };\n\n  static bool compareAction(Actions a, Actions b) {\n    if (a.z != b.z)\n      return a.z < b.z;\n    if (a.x != b.x)\n      return a.x < b.x;\n    return a.y < b.y;\n  };\n\n  virtual std::string actionsDescription() const override {\n    std::stringstream ss;\n    Actions a[_legalActions.size()];\n    int i = 0;\n    for (auto action : _legalActions) {\n      a[i].x = action.GetY();\n      a[i].y = action.GetZ();\n      a[i].z = action.GetX();\n      a[i].i = i;\n      i++;\n    }\n    std::sort(a, a + _legalActions.size(), compareAction);\n    for (auto action : a) {\n      char buff[54];\n      sprintf(\n          buff, \"%d: (%d %d) ---%d\\n\", action.z, action.x, action.y, action.i);\n      ss << buff;\n    }\n    ss << \"\\nInput format: action index e.g. 0\\n\";\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    int z = action.GetX();\n    int x = action.GetY();\n    int y = action.GetZ();\n    ss << z << \": \" << '(' << x << ' ' << y << \")\\n\";\n    return ss.str();\n  }\n\n  virtual void DoGoodAction() override {\n    //    std::cerr << \"DoGoodAction\" << std::endl;\n    return DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/breakthrough.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"breakthrough.h\"\n#include <list>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n\nusing namespace std;\n\nunsigned long long BTHashArray[2][BTDx][BTDy];\nunsigned long long BTHashTurn;\n\n// int level = 1;\n\n// unsigned long long nbPlay = 0;\n\n// timeval stop, start;\n// unsigned long long previousTime = 0;\n\n// bool BTinitHashCalled = false;\nstd::once_flag BTinitHashCalled;\n\nvoid BTinitHash() {\n  for (int player = 0; player < 2; player++)\n    for (int i = 0; i < BTDx; i++)\n      for (int j = 0; j < BTDy; j++) {\n        BTHashArray[player][i][j] = 0;\n        for (int k = 0; k < 64; k++)\n          if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n            BTHashArray[player][i][j] |= (1ULL << k);\n      }\n  BTHashTurn = 0;\n  for (int k = 0; k < 64; k++)\n    if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n      BTHashTurn |= (1ULL << k);\n}\n"
  },
  {
    "path": "src/games/breakthrough.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <list>\n#include <math.h>\n#include <mutex>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n\nusing namespace std;\n\nconst int White = 0;\nconst int Black = 1;\nconst int Empty = 2;\n\nconst int BTDx = 8;\nconst int BTDy = 8;\n\nconst int BTMaxLegalBTMoves = 3 * BTDx * 2;\nconst int BTMaxPlayoutLength = 1000;\n\n// const int SizeTable = 1048575;  // une puissance de 2 moins 1\n\nextern unsigned long long BTHashArray[2][BTDx][BTDy];\nextern unsigned long long BTHashTurn;\n\n// int level = 1;\n\n// unsigned long long nbPlay = 0;\n\n// timeval stop, start;\n// unsigned long long previousTime = 0;\n\n// extern bool BTinitHashCalled = false;\nextern std::once_flag BTinitHashCalled;\n\nextern void BTinitHash();\n\nclass BTPlayer {\n public:\n  int player;\n\n  bool operator==(BTPlayer p) {\n    return (p.player == player);\n  }\n};\n\n// const int MaxBTMoveNumber = 2 * 2 * (3 * BTDx * BTDy) + 1;\nconst int MaxBTMoveNumber = 80 * 2 * 2 * (3 * BTDx * BTDy) + 1;\n\n// bool BTuseCode = true;\n\nclass BTMove {\n public:\n  int x, y, x1, y1, color;\n  int code;\n  int codePrevious;\n\n  BTMove() {\n    x = -1;\n    y = -1;\n    x1 = -1;\n    y1 = -1;\n    color = -1;\n    code = -1;\n    codePrevious = -1;\n  }\n\n  int numberPrevious() {\n    int c = 0;\n    c = code;\n    if (color == White)\n      return c + 3 * (x + BTDx * y) + x1 - x + 1;\n    else\n      return c + 3 * BTDx * BTDy + 3 * (x + BTDx * y) + x1 - x + 1;\n  }\n\n  int number() {\n    int c = 0;\n    c = code + codePrevious;\n    if (color == White)\n      return c + 3 * (x + BTDx * y) + x1 - x + 1;\n    else\n      return c + 3 * BTDx * BTDy + 3 * (x + BTDx * y) + x1 - x + 1;\n  }\n};\n\nclass BTBoard {\n public:\n  int board[BTDx][BTDy];\n  unsigned long long hash;\n  BTMove rollout[BTMaxPlayoutLength];\n  int length, turn;\n  int orderBTMove[BTMaxLegalBTMoves];\n\n  void init() {\n    for (int i = 0; i < BTDx; i++)\n      for (int j = 0; j < BTDy; j++)\n        board[i][j] = Empty;\n    for (int i = 0; i < 2; i++)\n      for (int j = 0; j < BTDx; j++)\n        board[j][i] = Black;\n    for (int i = BTDy - 2; i < BTDy; i++)\n      for (int j = 0; j < BTDx; j++)\n        board[j][i] = White;\n    hash = 0;\n    length = 0;\n    turn = White;\n    /*if (BTinitHashCalled == false) {\n      BTinitHash();\n      BTinitHashCalled = true;\n    }*/\n    std::call_once(BTinitHashCalled, BTinitHash);\n  }\n\n  int countPieces(int color) const {\n    int r = 0;\n    for (int i = 0; i < BTDx; i++) {\n      for (int j = 0; j < BTDy; j++) {\n        if (board[i][j] == color) {\n          ++r;\n        }\n      }\n    }\n    return r;\n  }\n\n  bool won(int color) {\n    if (color == White) {\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][0] == White)\n          return true;\n      BTMove listeCoups[BTMaxLegalBTMoves];\n      int nb = legalBTMoves(Black, listeCoups);\n      if (nb == 0)\n        return true;\n    } else {\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][BTDy - 1] == Black)\n          return true;\n      BTMove listeCoups[BTMaxLegalBTMoves];\n      int nb = legalBTMoves(White, listeCoups);\n      if (nb == 0)\n        return true;\n    }\n    return false;\n  }\n\n  bool terminal() {\n    // return won (Black) || won (White);\n    for (int j = 0; j < BTDx; j++)\n      if (board[j][0] == White)\n        return true;\n    for (int j = 0; j < BTDx; j++)\n      if (board[j][BTDy - 1] == Black)\n        return true;\n    BTMove listeCoups[BTMaxLegalBTMoves];\n    int nb = legalBTMoves(turn, listeCoups);\n    if (nb == 0)\n      return true;\n    return false;\n  }\n\n  int score() {\n    if (won(White))\n      return 1;\n    return 0;\n  }\n\n  float evaluation(int color) {\n    if (won(color))\n      return 1000000.0;\n    if (won(opponent(color)))\n      return -1000000.0;\n    BTMove moves[BTMaxLegalBTMoves];\n    int nb = legalBTMoves(turn, moves);\n    if (nb == 0) {\n      if (color == turn)\n        return -1000000.0;\n      else\n        return 1000000.0;\n    }\n    int nbOpponent = legalBTMoves(opponent(turn), moves);\n    if (color == turn)\n      return (float)(nb - nbOpponent);\n    return (float)(nbOpponent - nb);\n  }\n\n  int opponent(int joueur) const {\n    if (joueur == White)\n      return Black;\n    return White;\n  }\n\n  bool losingBTMove(BTMove m) {\n    if (m.color == Black) {\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][BTDy - 2] == Black)\n          if (m.y1 != BTDy - 1)\n            return true;\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][1] == White)\n          if ((m.y1 != 1) || (m.x1 != j))\n            return true;\n    }\n    if (m.color == White) {\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][1] == White)\n          if (m.y1 != 0)\n            return true;\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][BTDy - 2] == Black)\n          if ((m.y1 != BTDy - 2) || (m.x1 != j))\n            return true;\n    }\n    return false;\n  }\n\n  int order(BTMove m) {\n    if (m.color == Black) {\n      if (m.y1 == BTDy - 1)\n        return 0;\n    }\n    if (m.color == White) {\n      if (m.y1 == 0)\n        return 0;\n    }\n    if (board[m.x1][m.y1] == opponent(m.color))\n      return 1;\n    return 2;\n  }\n\n  bool legalBTMove(BTMove m) {\n    if (board[m.x][m.y] != m.color)\n      return false;\n    if (board[m.x1][m.y1] == m.color)\n      return false;\n    if (m.color == White)\n      if ((m.y1 == m.y - 1) && (m.x == m.x1))\n        if (board[m.x1][m.y1] == Black)\n          return false;\n    if (m.color == Black)\n      if ((m.y1 == m.y + 1) && (m.x == m.x1))\n        if (board[m.x1][m.y1] == White)\n          return false;\n    return true;\n  }\n\n  void play(BTMove m) {\n    board[m.x][m.y] = Empty;\n    hash ^= BTHashArray[m.color][m.x][m.y];\n    if (board[m.x1][m.y1] != Empty)\n      hash ^= BTHashArray[board[m.x1][m.y1]][m.x1][m.y1];\n    board[m.x1][m.y1] = m.color;\n    hash ^= BTHashArray[m.color][m.x1][m.y1];\n    hash ^= BTHashTurn;\n    if (length < BTMaxPlayoutLength) {\n      rollout[length] = m;\n      length++;\n    } else\n      fprintf(stderr, \"Pb play,\");\n    turn = opponent(turn);\n    // nbPlay++;\n  }\n\n  void print(FILE* fp) {\n    for (int i = 0; i < BTDy; i++) {\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][i] == Empty)\n          fprintf(fp, \" +\");\n        else if (board[j][i] == Black)\n          fprintf(fp, \" @\");\n        else\n          fprintf(fp, \" O\");\n      fprintf(fp, \" \\n\");\n    }\n    fprintf(fp, \" \\n\");\n  }\n\n  int legalBTMoves(int color, BTMove moves[BTMaxLegalBTMoves]) {\n    int nb = 0;\n    for (int i = 0; i < BTDx; i++)\n      for (int j = 0; j < BTDy; j++)\n        if (board[i][j] == color) {\n          BTMove m;\n          m.x = i;\n          m.y = j;\n          m.color = color;\n          if (color == White) {\n            if ((j - 1 >= 0) && (i + 1 < BTDx)) {\n              m.x1 = i + 1;\n              m.y1 = j - 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n            if ((j - 1 >= 0) && (i - 1 >= 0)) {\n              m.x1 = i - 1;\n              m.y1 = j - 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n            if ((j - 1 >= 0)) {\n              m.x1 = i;\n              m.y1 = j - 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n          } else {\n            if ((j + 1 < BTDy) && (i + 1 < BTDx)) {\n              m.x1 = i + 1;\n              m.y1 = j + 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n            if ((j + 1 < BTDy) && (i - 1 >= 0)) {\n              m.x1 = i - 1;\n              m.y1 = j + 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n            if ((j + 1 < BTDy)) {\n              m.x1 = i;\n              m.y1 = j + 1;\n              if (board[m.x1][m.y1] == Empty)\n                m.code = 0;\n              else\n                m.code = 6 * BTDx * BTDy;\n              if (legalBTMove(m)) {\n                moves[nb] = m;\n                nb++;\n              }\n            }\n          }\n        }\n    for (int i = 0; i < nb; i++)\n      orderBTMove[i] = order(moves[i]);\n    for (int i = 0; i < nb; i++) {\n      int imin = i;\n      int o = orderBTMove[i];\n      for (int j = i + 1; j < nb; j++) {\n        int o1 = orderBTMove[j];\n        if (o1 < o) {\n          imin = j;\n          o = o1;\n        }\n      }\n      BTMove m = moves[i];\n      moves[i] = moves[imin];\n      moves[imin] = m;\n      o = orderBTMove[i];\n      orderBTMove[i] = orderBTMove[imin];\n      orderBTMove[imin] = o;\n    }\n    /*\n       for (int i = 0; i < nb; i++)\n       if (order (moves [i]) == 1) {\n       BTMove m = moves [0];\n       moves [0] = moves [i];\n       moves [i] = m;\n       }\n       for (int i = 0; i < nb; i++)\n       if (order (moves [i]) == 0) {\n       BTMove m = moves [0];\n       moves [0] = moves [i];\n       moves [i] = m;\n       }\n     */\n    int codePrevious = 0;\n    if (length > 0)\n      // codePrevious = rollout [length].numberPrevious () + 12 * BTDx * BTDy;\n      codePrevious = (rollout[length].x1 + BTDx * rollout[length].y1) * 2 * 2 *\n                     (3 * BTDx * BTDy);\n    for (int i = 0; i < nb; i++) {\n      codePrevious = 0;\n      if (color == White) {\n        if (moves[i].y1 > 0) {\n          if (moves[i].x1 == 0)\n            codePrevious += 4;\n          else\n            codePrevious += board[moves[i].x1 - 1][moves[i].y1 - 1];\n          codePrevious += 4 * board[moves[i].x1][moves[i].y1 - 1];\n          if (moves[i].x1 == BTDx - 1)\n            codePrevious += 16 * 4;\n          else\n            codePrevious += 16 * board[moves[i].x1 + 1][moves[i].y1 - 1];\n        }\n      } else {\n        if (moves[i].y1 < BTDy - 1) {\n          if (moves[i].x1 == 0)\n            codePrevious += 4;\n          else\n            codePrevious += board[moves[i].x1 - 1][moves[i].y1 + 1];\n          codePrevious += 4 * board[moves[i].x1][moves[i].y1 + 1];\n          if (moves[i].x1 == BTDx - 1)\n            codePrevious += 16 * 4;\n          else\n            codePrevious += 16 * board[moves[i].x1 + 1][moves[i].y1 + 1];\n        }\n      }\n      moves[i].codePrevious = codePrevious * 12 * BTDx * BTDy;\n    }\n    return nb;\n  }\n\n  int playout(int joueur) {\n    BTMove listeCoups[BTMaxLegalBTMoves];\n    while (true) {\n      if (terminal())\n        return (score() > 0);\n      int nb = legalBTMoves(joueur, listeCoups);\n      int n = rand() % nb;\n      play(listeCoups[n]);\n      if (length >= BTMaxPlayoutLength - 20) {\n        return 0;\n      }\n      joueur = opponent(joueur);\n    }\n  }\n\n  float discountedPlayout(int joueur, int maxLength = BTMaxPlayoutLength - 20) {\n    BTMove listeCoups[BTMaxLegalBTMoves];\n    while (true) {\n      if (terminal()) {\n        if (score() > 0)\n          return 1.0 / (length + 1);\n        else\n          return -1.0 / (length + 1);\n      }\n      int nb = legalBTMoves(joueur, listeCoups);\n      int n = rand() % nb;\n      play(listeCoups[n]);\n      if (length >= maxLength) {\n        return 0;\n      }\n      joueur = opponent(joueur);\n    }\n  }\n};\n"
  },
  {
    "path": "src/games/breakthrough_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"breakthrough.h\"\n//#include \"game.h\"\n#include \"../core/state.h\"\n\n#include \"fmt/printf.h\"\n\nconst int StateForBreakthroughNumActions = 64 * 3;\nconst int StateForBreakthroughX = 2;\nconst int StateForBreakthroughY = 8;\nconst int StateForBreakthroughZ = 8;\nconst int BTMaxLegalMoves = 48;\n\ntemplate <bool fixedPolicy = true>\nclass StateForBreakthrough : public core::State, BTBoard {\n public:\n  StateForBreakthrough(int seed)\n      : State(seed) {\n  }\n\n  virtual ~StateForBreakthrough() {\n  }\n\n  virtual void Initialize() override {\n    // People implementing classes should not have much to do in _moves; just\n    // _moves.clear().\n    _moves.clear();\n    // std::cout << \"OTGBreakthrough initialize\" << std::endl;\n\n    // the features are just one number between 0 and 1 (the distance,\n    // normalized).\n    _featSize[0] = StateForBreakthroughX;\n    _featSize[1] = StateForBreakthroughY;\n    _featSize[2] = StateForBreakthroughZ;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 3;\n    _actionSize[1] = 8;\n    _actionSize[2] = 8;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    // std::cout << \"restart!\" << std::endl;\n    // _features is a vector representing the current state. It can\n    // (must...) be large for complex games; here just one number\n    // between 0 and 1. trivial case in dimension 1.\n    _features.resize(StateForBreakthroughX * StateForBreakthroughY *\n                     StateForBreakthroughZ);\n    /*\n        // _features[:_hash] = 1\n        for (int i = 0; i < DISTANCE; i++) {\n          _features[i] = (float(_hash) > float(i)) ? 1. : 0.;\n        }\n    */\n    init();\n    findFeatures();\n    findActions(White);\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForBreakthrough>(*this);\n  }\n\n  std::string actionDescription(const _Action& action) const override {\n    int dir = action.GetX();\n    int x = action.GetY();\n    int y = action.GetZ();\n    int tx = x;\n    int ty = y + (turn == Black ? 1 : -1);\n    if (dir == 0) {\n      --tx;\n    } else if (dir == 2) {\n      ++tx;\n    }\n    return fmt::sprintf(\"%c%d%c%d\", 'a' + x, BTDy - y, 'a' + tx, BTDy - ty);\n  }\n\n  void findActions(int color) {\n    BTMove moves[BTMaxLegalMoves];\n    int nb = legalBTMoves(color, moves);\n\n    _legalActions.clear();\n    for (int i = 0; i < nb; i++) {\n      int x = moves[i].x;\n      int y = moves[i].y;\n      int dir = 2;\n      if (moves[i].x1 == x - 1)\n        dir = 0;\n      else if (moves[i].x1 == x)\n        dir = 1;\n      _legalActions.emplace_back(i, dir, x, y);\n    }\n  }\n\n  void findFeatures() {\n    if ((_status == GameStatus::player0Win) ||\n        (_status == GameStatus::player1Win))\n      return;\n    // init\n    const size_t numFeats =\n        StateForBreakthroughX * StateForBreakthroughY * StateForBreakthroughZ;\n    std::fill(_features.begin(), _features.begin() + numFeats, 0.);\n    for (int i = 0; i < 64; i++) {\n      auto value = fixedPolicy ? board[i / 8][i % 8] : board[i % 8][i / 8];\n      if (value == Black)\n        _features[i] = 1;\n      else if (value == White)\n        _features[64 + i] = 1;\n    }\n  }\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n    BTMove m;\n    // print(stdout);\n    if (_status == GameStatus::player0Turn) {  // White\n      m.color = White;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n      if (action.GetX() == 0) {\n        m.x1 = m.x - 1;\n        m.y1 = m.y - 1;\n      } else if (action.GetX() == 1) {\n        m.x1 = m.x;\n        m.y1 = m.y - 1;\n      } else if (action.GetX() == 2) {\n        m.x1 = m.x + 1;\n        m.y1 = m.y - 1;\n      }\n      play(m);\n      findActions(Black);\n      if (won(White))\n        _status = GameStatus::player0Win;\n      else\n        _status = GameStatus::player1Turn;\n    } else {\n      // Black\n      m.color = Black;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n      if (action.GetX() == 0) {\n        m.x1 = m.x - 1;\n        m.y1 = m.y + 1;\n      } else if (action.GetX() == 1) {\n        m.x1 = m.x;\n        m.y1 = m.y + 1;\n      } else if (action.GetX() == 2) {\n        m.x1 = m.x + 1;\n        m.y1 = m.y + 1;\n      }\n      play(m);\n      findActions(White);\n      if (won(Black))\n        _status = GameStatus::player1Win;\n      else\n        _status = GameStatus::player0Turn;\n    }\n    findFeatures();\n    _hash = hash;\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play. Ok, this is not\n  // really a good action.\n  // By the way we need a good default DoGoodAction, e.g. one-ply at least.\n  // FIXME\n  virtual void DoGoodAction() override {\n\n    int i = rand() % _legalActions.size();\n    _Action a = _legalActions[i];\n    ApplyAction(a);\n  }\n\n  std::string stateDescription() const override {\n    std::string s;\n    for (int i = 0; i < BTDy; i++) {\n      s += std::to_string(BTDy - i);\n      for (int j = 0; j < BTDx; j++)\n        if (board[j][i] == Empty)\n          s += \" +\";\n        else if (board[j][i] == Black)\n          s += \" @\";\n        else\n          s += \" O\";\n      s += \" \\n\";\n    }\n    s += \"  a b c d e f g h \\n\";\n    return s;\n  }\n};\n"
  },
  {
    "path": "src/games/chess.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"chess.h\"\n\nnamespace chess {\n\nstruct ZobrishHash {\n  std::array<std::array<uint64_t, 13 * 2>, 12 * 12> hash;\n  ZobrishHash() {\n    std::mt19937_64 rng(std::random_device{}() + 42);\n    rng.discard(1024);\n    for (auto& v : hash) {\n      for (auto& v2 : v) {\n        v2 = rng();\n      }\n    }\n  }\n};\n\nstatic inline ZobrishHash zhash;\n\nvoid ChessBoard::init() {\n  board.fill(0);\n  for (int i = 0; i != boardDim; ++i) {\n    board[i] = OOB;\n    board[i + boardDim] = OOB;\n    board[boardDim * i] = OOB;\n    board[boardDim * i + 1] = OOB;\n    board[boardDim * (boardDim - 1) + i] = OOB;\n    board[boardDim * (boardDim - 2) + i] = OOB;\n    board[boardDim * i + boardDim - 1] = OOB;\n    board[boardDim * i + boardDim - 2] = OOB;\n  }\n\n  board[boardDim * 2 + 2] = WHITE | ROOK;\n  board[boardDim * 2 + 3] = WHITE | KNIGHT;\n  board[boardDim * 2 + 4] = WHITE | BISHOP;\n  board[boardDim * 2 + 5] = WHITE | QUEEN;\n  board[boardDim * 2 + 6] = WHITE | KING;\n  board[boardDim * 2 + 7] = WHITE | BISHOP;\n  board[boardDim * 2 + 8] = WHITE | KNIGHT;\n  board[boardDim * 2 + 9] = WHITE | ROOK;\n  board[boardDim * (boardDim - 3) + 2] = BLACK | ROOK;\n  board[boardDim * (boardDim - 3) + 3] = BLACK | KNIGHT;\n  board[boardDim * (boardDim - 3) + 4] = BLACK | BISHOP;\n  board[boardDim * (boardDim - 3) + 5] = BLACK | QUEEN;\n  board[boardDim * (boardDim - 3) + 6] = BLACK | KING;\n  board[boardDim * (boardDim - 3) + 7] = BLACK | BISHOP;\n  board[boardDim * (boardDim - 3) + 8] = BLACK | KNIGHT;\n  board[boardDim * (boardDim - 3) + 9] = BLACK | ROOK;\n  for (int i = 0; i != 8; ++i) {\n    board[boardDim * 3 + 2 + i] = WHITE | PAWN;\n    board[boardDim * (boardDim - 4) + 2 + i] = BLACK | PAWN;\n  }\n\n  moveflags = castleleft | castleright;\n  moveflags |= (castleleft | castleright) << 1;\n  turn = 0;\n  moves.clear();\n  done = false;\n  winner = -1;\n  fiftyMoveCounter = 100;\n}\n\nvoid ChessBoard::findMoves() {\n\n  int color = turn;\n  const int ahead = color == 0 ? boardDim : -boardDim;\n\n  char colorbit = 1 << (5 + color);\n  char opponentcolorbit = 1 << (5 + (color ^ 1));\n  char occupied = colorbit | OOB;\n\n  moves.clear();\n\n  size_t end = boardDim * (boardDim - 2) - 2;\n  size_t king = 0;\n  for (size_t i = 2 + boardDim * 2; i != end; ++i) {\n    if (board[i] == (colorbit | KING)) {\n      king = i;\n    }\n  }\n  size_t kingx = king % boardDim;\n  size_t kingy = king / boardDim;\n\n  auto checkcheck = [&]() {\n    uint8_t pawn = opponentcolorbit | PAWN;\n    if ((board[king + ahead + 1] & pawn) == pawn)\n      return true;\n    if ((board[king + ahead - 1] & pawn) == pawn)\n      return true;\n    uint8_t rook = opponentcolorbit | ROOK;\n    for (size_t xx = kingx + 1, ii = king + 1; xx != boardDim - 2; ++xx, ++ii) {\n      if ((board[ii] & rook) == rook)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = kingx - 1, ii = king - 1; xx != 1; --xx, --ii) {\n      if ((board[ii] & rook) == rook)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = kingy - 1, ii = king - boardDim; xx != 1;\n         --xx, ii -= boardDim) {\n      if ((board[ii] & rook) == rook)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = kingy + 1, ii = king + boardDim; xx != boardDim - 2;\n         ++xx, ii += boardDim) {\n      if ((board[ii] & rook) == rook)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    uint8_t bishop = opponentcolorbit | BISHOP;\n    for (size_t xx = std::min(kingx, kingy) - 1, ii = king - 1 - boardDim;\n         xx != 1; --xx, ii += -1 - boardDim) {\n      if ((board[ii] & bishop) == bishop)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = std::min(kingx, boardDim - 1 - kingy) - 1,\n                ii = king - 1 + boardDim;\n         xx != 1; --xx, ii += -1 + boardDim) {\n      if ((board[ii] & bishop) == bishop)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = std::min(boardDim - 1 - kingx, boardDim - 1 - kingy) - 1,\n                ii = king + 1 + boardDim;\n         xx != 1; --xx, ii += 1 + boardDim) {\n      if ((board[ii] & bishop) == bishop)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    for (size_t xx = std::min(boardDim - 1 - kingx, kingy) - 1,\n                ii = king + 1 - boardDim;\n         xx != 1; --xx, ii += 1 - boardDim) {\n      if ((board[ii] & bishop) == bishop)\n        return true;\n      else if (board[ii] != EMPTY)\n        break;\n    }\n    uint8_t knight = opponentcolorbit | KNIGHT;\n    if (board[king + ahead + ahead - 1] == knight)\n      return true;\n    if (board[king + ahead + ahead + 1] == knight)\n      return true;\n    if (board[king - ahead - ahead - 1] == knight)\n      return true;\n    if (board[king - ahead - ahead + 1] == knight)\n      return true;\n    if (board[king - 1 - 1 - ahead] == knight)\n      return true;\n    if (board[king - 1 - 1 + ahead] == knight)\n      return true;\n    if (board[king + 1 + 1 - ahead] == knight)\n      return true;\n    if (board[king + 1 + 1 + ahead] == knight)\n      return true;\n    return false;\n  };\n\n  for (size_t i = 2 + boardDim * 2; i != end; ++i) {\n    if ((board[i] & colorbit) == 0)\n      continue;\n    int piece = board[i] & 0xf;\n\n    unsigned char* relative = &board[i];\n\n    auto check = [&](size_t to) {\n      auto dst = board[to];\n      auto src = board[i];\n      board[to] = src;\n      board[i] = EMPTY;\n      bool r;\n      if (piece == KING) {\n        uint8_t knight = opponentcolorbit | KNIGHT;\n        uint8_t pawn = opponentcolorbit | PAWN;\n        if ((board[to + ahead] & 0xf) == KING)\n          r = true;\n        else if ((board[to + ahead + 1] & pawn) == pawn)\n          r = true;\n        else if ((board[to + ahead - 1] & pawn) == pawn)\n          r = true;\n        else if ((board[to + 1] & 0xf) == KING)\n          r = true;\n        else if ((board[to - 1] & 0xf) == KING)\n          r = true;\n        else if ((board[to - ahead] & 0xf) == KING)\n          r = true;\n        else if ((board[to - ahead + 1] & 0xf) == KING)\n          r = true;\n        else if ((board[to - ahead - 1] & 0xf) == KING)\n          r = true;\n        else if (board[to + ahead + ahead - 1] == knight)\n          r = true;\n        else if (board[to + ahead + ahead + 1] == knight)\n          r = true;\n        else if (board[to - ahead - ahead - 1] == knight)\n          r = true;\n        else if (board[to - ahead - ahead + 1] == knight)\n          r = true;\n        else if (board[to - 1 - 1 - ahead] == knight)\n          r = true;\n        else if (board[to - 1 - 1 + ahead] == knight)\n          r = true;\n        else if (board[to + 1 + 1 - ahead] == knight)\n          r = true;\n        else if (board[to + 1 + 1 + ahead] == knight)\n          r = true;\n        else {\n          king = to;\n          kingx = to % boardDim;\n          kingy = to / boardDim;\n          r = checkcheck();\n          king = i;\n          kingx = i % boardDim;\n          kingy = i / boardDim;\n        }\n      } else {\n        r = checkcheck();\n      }\n      board[i] = src;\n      board[to] = dst;\n      return r;\n    };\n\n    auto addMove = [&](size_t to) {\n      if (!check(to)) {\n        if (piece == PAWN && relative[ahead + ahead] == OOB) {\n          moves.push_back(i | (to << 17));\n          moves.push_back(i | (to << 17) | 0x8000);\n          moves.push_back(i | (to << 17) | 0x10000);\n          moves.push_back(i | (to << 17) | 0x18000);\n        } else {\n          moves.push_back(i | (to << 17));\n        }\n      }\n    };\n\n    auto test = [&](size_t to) {\n      board.at(i + to);\n      if ((relative[to] & occupied) == 0) {\n        addMove(i + to);\n        return true;\n      } else {\n        return false;\n      }\n    };\n\n    auto tryMove = [&](size_t to) {\n      board.at(to);\n      int v = board[to];\n      if (v != EMPTY) {\n        if (v & occupied)\n          return false;\n        addMove(to);\n        return false;\n      }\n      addMove(to);\n      return true;\n    };\n\n    switch (piece) {\n    case PAWN:\n      if (relative[ahead] == EMPTY) {\n        addMove(i + ahead);\n        if (color == 0 ? i - boardDim * 3 < boardDim\n                       : i - boardDim * (boardDim - 4) < boardDim) {\n          if (relative[ahead + ahead] == EMPTY && !check(i + ahead + ahead)) {\n            moves.push_back(i | 0x8000 | ((i + ahead + ahead) << 17));\n          }\n        }\n      }\n      if ((relative[ahead + 1] & opponentcolorbit) == opponentcolorbit) {\n        addMove(i + ahead + 1);\n      }\n      if ((relative[ahead - 1] & opponentcolorbit) == opponentcolorbit) {\n        addMove(i + ahead - 1);\n      }\n      if (moveflags & 0x8000) {\n        size_t x = moveflags & 0x7fff;\n        if (i + 1 == x) {\n          int tmp = board[i + 1];\n          board[i + 1] = EMPTY;\n          if (!check(i + ahead + 1)) {\n            moves.push_back(i | 0x10000 | ((i + ahead + 1) << 17));\n          }\n          board[i + 1] = tmp;\n        } else if (i - 1 == x) {\n          int tmp = board[i - 1];\n          board[i - 1] = EMPTY;\n          if (!check(i + ahead - 1)) {\n            moves.push_back(i | 0x10000 | ((i + ahead - 1) << 17));\n          }\n          board[i - 1] = tmp;\n        }\n      }\n      break;\n    case KNIGHT:\n      test(ahead + ahead + 1);\n      test(ahead + ahead - 1);\n      test(-ahead - ahead - 1);\n      test(-ahead - ahead + 1);\n      test(-1 - 1 - ahead);\n      test(-1 - 1 + ahead);\n      test(+1 + 1 - ahead);\n      test(+1 + 1 + ahead);\n      break;\n    case KING:\n      test(ahead);\n      test(ahead + 1);\n      test(ahead - 1);\n      test(1);\n      test(-1);\n      test(-ahead);\n      test(-ahead + 1);\n      test(-ahead - 1);\n      if ((moveflags & (castleleft << turn)) && !checkcheck()) {\n        size_t x = i % boardDim;\n        for (size_t xx = x - 1, ii = i - 1;; --xx, --ii) {\n          if (xx == 2) {\n            if (!check(i - 1) && !check(i - 2)) {\n              moves.push_back(i | 0x8000 | ((i - 1 - 1) << 17));\n            }\n            break;\n          }\n          if (board[ii] != EMPTY)\n            break;\n        }\n      }\n      if ((moveflags & (castleright << turn)) && !checkcheck()) {\n        size_t x = i % boardDim;\n        for (size_t xx = x + 1, ii = i + 1;; ++xx, ++ii) {\n          if (xx == boardDim - 3) {\n            if (!check(i + 1) && !check(i + 2)) {\n              moves.push_back(i | 0x8000 | ((i + 1 + 1) << 17));\n            }\n            break;\n          }\n          if (board[ii] != EMPTY)\n            break;\n        }\n      }\n      break;\n    default:\n      size_t x = i % boardDim;\n      size_t y = i / boardDim;\n      switch (piece) {\n      case QUEEN:\n      case ROOK:\n        for (size_t xx = x + 1, ii = i + 1; xx != boardDim - 2 && tryMove(ii);\n             ++xx, ++ii)\n          ;\n        for (size_t xx = x - 1, ii = i - 1; xx != 1 && tryMove(ii); --xx, --ii)\n          ;\n        for (size_t xx = y - 1, ii = i - boardDim; xx != 1 && tryMove(ii);\n             --xx, ii -= boardDim)\n          ;\n        for (size_t xx = y + 1, ii = i + boardDim;\n             xx != boardDim - 2 && tryMove(ii); ++xx, ii += boardDim)\n          ;\n        if (piece != QUEEN)\n          break;\n        [[fallthrough]];\n      case BISHOP:\n        for (size_t xx = std::min(x, y) - 1, ii = i - 1 - boardDim;\n             xx != 1 && tryMove(ii); --xx, ii += -1 - boardDim)\n          ;\n        for (size_t xx = std::min(x, boardDim - 1 - y) - 1,\n                    ii = i - 1 + boardDim;\n             xx != 1 && tryMove(ii); --xx, ii += -1 + boardDim)\n          ;\n        for (size_t xx = std::min(boardDim - 1 - x, boardDim - 1 - y) - 1,\n                    ii = i + 1 + boardDim;\n             xx != 1 && tryMove(ii); --xx, ii += 1 + boardDim)\n          ;\n        for (size_t xx = std::min(boardDim - 1 - x, y) - 1,\n                    ii = i + 1 - boardDim;\n             xx != 1 && tryMove(ii); --xx, ii += 1 - boardDim)\n          ;\n        break;\n      }\n    }\n  }\n\n  if (moves.empty()) {\n    done = true;\n    if (checkcheck()) {\n      winner = turn ^ 1;\n    }\n  } else if (fiftyMoveCounter <= 0) {\n    done = true;\n    winner = -1;\n  }\n}\n\nvoid ChessBoard::move(uint_fast32_t move) {\n  size_t to = move >> 17;\n  size_t from = move & 0x7fff;\n\n  int v = board[from];\n  int piece = v & 0xf;\n\n  --fiftyMoveCounter;\n\n  switch (piece) {\n  case KING:\n    moveflags &= ~((castleleft | castleright) << turn);\n    break;\n  case ROOK: {\n    size_t x = from % boardDim;\n    size_t y = from / boardDim;\n    if (x == 2 || x == boardDim - 3) {\n      if (y == (turn == 0 ? 2 : boardDim - 3)) {\n        if (x == 2)\n          moveflags &= ~(castleleft << turn);\n        else\n          moveflags &= ~(castleright << turn);\n      }\n    }\n    break;\n  }\n  case PAWN:\n    fiftyMoveCounter = 100;\n    for (auto& v : repetitions) {\n      v.clear();\n    }\n    if (board[to + (turn == 0 ? boardDim : -boardDim)] == OOB) {\n      v &= ~PAWN;\n      switch ((move >> 15) & 3) {\n      case 0:\n        v |= QUEEN;\n        break;\n      case 1:\n        v |= ROOK;\n        break;\n      case 2:\n        v |= BISHOP;\n        break;\n      case 3:\n        v |= KNIGHT;\n        break;\n      }\n      move = 0;\n    }\n    break;\n  }\n\n  moveflags &= ~0xffff;\n  if ((move & 0x8000) != 0) {\n    if (piece == PAWN) {\n      moveflags |= to | 0x8000;\n    } else if (piece == KING) {\n      size_t y = from / boardDim;\n      if (to < from) {\n        std::swap(board[y * boardDim + 2], board[to + 1]);\n      } else {\n        std::swap(board[y * boardDim + boardDim - 3], board[to - 1]);\n      }\n    }\n  } else {\n    if ((move & 0x10000) != 0) {\n      int dx = to % boardDim - from % boardDim;\n      board.at(from + dx) = EMPTY;\n\n      fiftyMoveCounter = 100;\n      for (auto& v : repetitions) {\n        v.clear();\n      }\n    }\n  }\n\n  if (board[to] != EMPTY) {\n    fiftyMoveCounter = 100;\n    for (auto& v : repetitions) {\n      v.clear();\n    }\n  }\n  if ((board[to] & 0xf) == ROOK) {\n    size_t x = to % boardDim;\n    size_t y = to / boardDim;\n    if (x == 2 || x == boardDim - 3) {\n      if (y == (turn == 1 ? 2 : boardDim - 3)) {\n        if (x == 2)\n          moveflags &= ~(castleleft << (turn ^ 1));\n        else\n          moveflags &= ~(castleright << (turn ^ 1));\n      }\n    }\n  }\n\n  hash ^= zhash.hash.at(from).at(13 * turn + piece);\n  hash ^= zhash.hash.at(to).at(13 * turn + piece);\n\n  board[from] = EMPTY;\n  board[to] = v;\n\n  turn ^= 1;\n\n  uint64_t fullhash = hash ^ moveflags ^ zhash.hash.at(5).at(turn);\n  size_t index = fullhash % repetitions.size();\n  bool found = false;\n  for (auto& v : repetitions[index]) {\n    if (v.first == fullhash) {\n      ++v.second;\n      if (v.second >= 3) {\n        done = true;\n        winner = -1;\n      }\n      found = true;\n      break;\n    }\n  }\n  if (!found) {\n    repetitions[index].emplace_back(fullhash, 1);\n  }\n}\n\nstd::string ChessBoard::moveString(uint_fast32_t move) const {\n  size_t from = move & 0x7fff;\n  size_t fx = from % boardDim - 2;\n  size_t fy = from / boardDim - 2;\n  size_t to = move >> 17;\n  size_t tx = to % boardDim - 2;\n  size_t ty = to / boardDim - 2;\n  int piece = board[from];\n  bool disx = false;\n  bool disy = false;\n  for (auto& v : moves) {\n    size_t vfrom = v & 0x7fff;\n    size_t vto = v >> 17;\n    if (vfrom != from && board[vfrom] == piece && vto == to) {\n      size_t vx = vfrom % boardDim - 2;\n      if (vx == fx) {\n        disy = true;\n      } else {\n        disx = true;\n      }\n    }\n  }\n\n  std::string str;\n  switch (piece & 0xf) {\n  case KNIGHT:\n    str += 'N';\n    break;\n  case BISHOP:\n    str += 'B';\n    break;\n  case ROOK:\n    str += 'R';\n    break;\n  case QUEEN:\n    str += 'Q';\n    break;\n  case KING:\n    str += 'K';\n    break;\n  }\n  bool promotion = false;\n  if ((piece & 0xf) == PAWN) {\n    if (board[to + (turn == 0 ? boardDim : -boardDim)] == OOB) {\n      promotion = true;\n    }\n  } else if ((piece & 0xf) == KING) {\n    if ((move & 0x8000) != 0) {\n      if (to < from) {\n        return \"O-O-O\";\n      } else {\n        return \"O-O\";\n      }\n    }\n  }\n  bool capture = board[to] != EMPTY;\n  if (!promotion && (move & 0x10000) != 0) {\n    capture = true;\n  }\n  if (capture && str.empty()) {\n    disx = true;\n  }\n  if (disx) {\n    str += char('a' + fx);\n  }\n  if (disy) {\n    str += char('1' + fy);\n  }\n  if (capture != EMPTY) {\n    str += 'x';\n  }\n  str += char('a' + tx);\n  str += char('1' + ty);\n  if (promotion) {\n    switch ((move >> 15) & 3) {\n    case 0:\n      str += \"=Q\";\n      break;\n    case 1:\n      str += \"=R\";\n      break;\n    case 2:\n      str += \"=B\";\n      break;\n    case 3:\n      str += \"=N\";\n      break;\n    }\n  }\n  return str;\n}\n\n}  // namespace chess\n"
  },
  {
    "path": "src/games/chess.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"../core/state.h\"\n\nnamespace chess {\n\nstruct ChessBoard {\n\n  static const size_t boardSize = 8;\n  static const size_t boardDim = boardSize + 4;\n\n  std::array<uint8_t, boardDim * boardDim> board;\n\n  std::vector<uint32_t> moves;\n  uint_fast32_t moveflags = 0;\n  std::array<std::vector<std::pair<uint64_t, char>>, 16> repetitions;\n  uint64_t hash = 0;\n\n  static const uint32_t castleleft = 1u << 28;\n  static const uint32_t castleright = 1u << 30;\n\n  int turn = 0;\n\n  static const uint8_t EMPTY = 0;\n  static const uint8_t PAWN = 1;\n  static const uint8_t KNIGHT = 2;\n  static const uint8_t BISHOP = 4;\n  static const uint8_t ROOK = 8;\n  static const uint8_t QUEEN = 12;\n  static const uint8_t KING = 3;\n  static const uint8_t OOB = 0x80;\n\n  static const uint8_t WHITE = 1 << 5;\n  static const uint8_t BLACK = 2 << 5;\n\n  void init();\n\n  void findMoves();\n\n  void move(uint_fast32_t move);\n\n  std::string moveString(uint_fast32_t move) const;\n\n  bool done = false;\n  int winner = -1;\n  int fiftyMoveCounter = 0;\n};\n\nclass State : public core::State {\n public:\n  State(int seed)\n      : core::State(seed) {\n  }\n\n  ChessBoard board;\n\n  std::vector<size_t> moves;\n\n  static const int boardSize = 8;\n\n  virtual void Initialize() override {\n    _moves.clear();\n    _hash = 2166136261u;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 12;\n    _featSize[1] = boardSize;\n    _featSize[2] = boardSize;\n    _actionSize[0] = 6;\n    _actionSize[1] = boardSize;\n    _actionSize[2] = boardSize;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    std::fill(_features.begin(), _features.end(), 0.0f);\n    board.init();\n    board.findMoves();\n    featurize();\n    findActions();\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<State>(*this);\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    const size_t boardDim = 8 + 4;\n    int y = 8;\n    for (size_t iy = 2 + boardDim * 2;;) {\n      str += '0' + y;\n      --y;\n      str += ' ';\n      size_t ii = (boardDim - 1 - iy / boardDim) * boardDim + 2;\n      for (size_t i = 0; i != 8; ++i, ++ii) {\n        int v = board.board[ii];\n        if (v & ChessBoard::WHITE) {\n          switch (v & 0xf) {\n          case ChessBoard::PAWN:\n            str += 'P';\n            break;\n          case ChessBoard::KNIGHT:\n            str += 'N';\n            break;\n          case ChessBoard::BISHOP:\n            str += 'B';\n            break;\n          case ChessBoard::ROOK:\n            str += 'R';\n            break;\n          case ChessBoard::QUEEN:\n            str += 'Q';\n            break;\n          case ChessBoard::KING:\n            str += 'K';\n            break;\n          default:\n            str += '.';\n          }\n        } else {\n          switch (v & 0xf) {\n          case ChessBoard::PAWN:\n            str += 'p';\n            break;\n          case ChessBoard::KNIGHT:\n            str += 'n';\n            break;\n          case ChessBoard::BISHOP:\n            str += 'b';\n            break;\n          case ChessBoard::ROOK:\n            str += 'r';\n            break;\n          case ChessBoard::QUEEN:\n            str += 'q';\n            break;\n          case ChessBoard::KING:\n            str += 'k';\n            break;\n          default:\n            str += '.';\n          }\n        }\n        str += ' ';\n      }\n      str += '\\n';\n      iy += 8 + 2;\n      if (iy == 12 + boardDim * 9) {\n        break;\n      }\n      iy += 2;\n    }\n    str += \"  a b c d e f g h\";\n    return str;\n  }\n\n  virtual std::string actionDescription(const _Action& action) const override {\n    auto move = board.moves.at(action.GetIndex());\n    return board.moveString(move);\n  }\n\n  void featurize() {\n    const size_t boardDim = 8 + 4;\n    size_t findex = 0;\n    std::fill(_features.begin(), _features.begin() + boardSize * boardSize * 12,\n              0.0f);\n    for (size_t ii = 2 + boardDim * 2;;) {\n      for (size_t i = 0; i != 8; ++i, ++ii) {\n        int v = board.board[ii];\n        if (v) {\n          size_t offset = 0;\n          switch (v & 0xf) {\n          case ChessBoard::PAWN:\n            offset = 0;\n            break;\n          case ChessBoard::KNIGHT:\n            offset = 1;\n            break;\n          case ChessBoard::BISHOP:\n            offset = 2;\n            break;\n          case ChessBoard::ROOK:\n            offset = 3;\n            break;\n          case ChessBoard::QUEEN:\n            offset = 4;\n            break;\n          case ChessBoard::KING:\n            offset = 5;\n            break;\n          }\n          if (v & ChessBoard::BLACK) {\n            offset += 6;\n          }\n          _features[boardSize * boardSize * offset + findex] = 1.0f;\n        }\n        ++findex;\n      }\n      ii += 2;\n      if (ii == 12 + boardDim * 9) {\n        break;\n      }\n      ii += 2;\n    }\n  }\n\n  void findActions() {\n    clearActions();\n\n    const size_t boardDim = 12;\n\n    for (auto move : board.moves) {\n      size_t to = move >> 17;\n      size_t from = move & 0x7fff;\n\n      int v = board.board[from];\n\n      size_t offset = 0;\n      switch (v & 0xf) {\n      case ChessBoard::PAWN:\n        offset = 0;\n        break;\n      case ChessBoard::KNIGHT:\n        offset = 1;\n        break;\n      case ChessBoard::BISHOP:\n        offset = 2;\n        break;\n      case ChessBoard::ROOK:\n        offset = 3;\n        break;\n      case ChessBoard::QUEEN:\n        offset = 4;\n        break;\n      case ChessBoard::KING:\n        offset = 5;\n        break;\n      }\n\n      size_t x = to % boardDim - 2;\n      size_t y = to / boardDim - 2;\n\n      addAction(offset, y, x);\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    auto move = board.moves.at(action.GetIndex());\n\n    board.move(move);\n    board.findMoves();\n    findActions();\n\n    if (board.done) {\n      if (board.winner == 0) {\n        _status = GameStatus::player0Win;\n      } else if (board.winner == 1) {\n        _status = GameStatus::player1Win;\n      } else {\n        _status = GameStatus::tie;\n      }\n    } else {\n      _status =\n          board.turn == 0 ? GameStatus::player0Turn : GameStatus::player1Turn;\n      featurize();\n    }\n    fillFullFeatures();\n  }\n\n  virtual void DoGoodAction() override {\n    return DoRandomAction();\n  }\n};\n\n}  // namespace chess\n"
  },
  {
    "path": "src/games/chinesecheckers.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 葉士誠 (SHI-CHENG YE)\n// affiliation: National Dong Hwa University(NDHU)\n// email: 410521206@gms.ndhu.edu.tw / 0930164@gmail.com\n\n#include \"chinesecheckers.h\"\n\nnamespace ChineseCheckers {\n\nstring Board::sprintBoard(string_view prefix,\n                          const set<tuple<int, int>>& markedPos) const {\n  prefix.data();                    // ignore compile warning\n  assert(markedPos.size() == 0UL);  // ignore compile warning\n  return showBoard(false);\n}\n\nstring Board::getPosStr(int p) const {\n  assert(isPosInBoard(p));\n  int tp = p % 10 + 1;\n  string curPOS = to_string(tp);\n  if (p / 10 > 5) {\n    tp = (p - 60) % 61 + 1;\n    curPOS = to_string(tp);\n    return tp + 1 > 10 ? \"G\" + curPOS : \"G0\" + curPOS;\n  } else {\n    switch (p / 10) {\n    case 0:\n      return tp + 1 > 10 ? \"A\" + curPOS : \"A0\" + curPOS;\n    case 1:\n      return tp + 1 > 10 ? \"B\" + curPOS : \"B0\" + curPOS;\n    case 2:\n      return tp + 1 > 10 ? \"C\" + curPOS : \"C0\" + curPOS;\n    case 3:\n      return tp + 1 > 10 ? \"D\" + curPOS : \"D0\" + curPOS;\n    case 4:\n      return tp + 1 > 10 ? \"E\" + curPOS : \"E0\" + curPOS;\n    case 5:\n      return tp + 1 > 10 ? \"F\" + curPOS : \"F0\" + curPOS;\n    default:\n      tp = (p - 60) % 61 + 1;\n      curPOS = to_string(tp);\n      return tp + 1 > 10 ? \"G\" + curPOS : \"G0\" + curPOS;\n    }\n  }\n}\n\nstring Board::showBoard(bool prePlay) const {\n  ostringstream ostr;\n  auto f = [&](bool prePlay, int p) {\n    string_view sv;\n    ostringstream ostr;\n    if (!prePlay && p != pass) {  // show cur board\n      sv = getChessSymbol(getChess(p));\n    } else {  // show legal move board\n      if (find(legal.begin(), legal.end(), p) != legal.end()) {\n        if (p == pass)\n          sv = \"You can input \\\"p\\\" or \\\"pass\\\"to pass\\n\";\n        else\n          sv = getPosStr(p);\n      } else\n        sv = getChessSymbol(Chesses::empty);\n    }\n    ostr << sv;\n    return ostr.str();\n  };\n\n  ostr << \"                          A\\n\";\n  ostr << \"                         \" << f(prePlay, P_A1).c_str() << endl;\n  ostr << \"                         / \\\\\\n\";\n  ostr << \"                       \" << f(prePlay, P_A2).c_str() << \"-\"\n       << f(prePlay, P_A3).c_str() << endl;\n  ostr << \"                       / \\\\ / \\\\\\n\";\n  ostr << \"                     \" << f(prePlay, P_A4).c_str() << \"-\"\n       << f(prePlay, P_A5).c_str() << \"-\" << f(prePlay, P_A6).c_str() << endl;\n  ostr << \"                     / \\\\ / \\\\ / \\\\\\n\";\n  ostr << \"                   \" << f(prePlay, P_A7).c_str() << \"-\"\n       << f(prePlay, P_A8).c_str() << \"-\" << f(prePlay, P_A9).c_str() << \"-\"\n       << f(prePlay, P_A10).c_str() << endl;\n  ostr << \"F                  / \\\\ / \\\\ / \\\\ / \\\\                  B\\n\";\n  ostr << \" \" << f(prePlay, P_F1).c_str() << \"-\" << f(prePlay, P_F3).c_str()\n       << \"-\" << f(prePlay, P_F6).c_str() << \"-\" << f(prePlay, P_F10).c_str()\n       << \"-\" << f(prePlay, P_G1).c_str() << \"-\" << f(prePlay, P_G2).c_str()\n       << \"-\" << f(prePlay, P_G3).c_str() << \"-\" << f(prePlay, P_G4).c_str()\n       << \"-\" << f(prePlay, P_G5).c_str() << \"-\" << f(prePlay, P_B7).c_str()\n       << \"-\" << f(prePlay, P_B4).c_str() << \"-\" << f(prePlay, P_B2).c_str()\n       << \"-\" << f(prePlay, P_B1).c_str() << endl;\n  ostr << \"   \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ /\\n\";\n  ostr << \"   \" << f(prePlay, P_F2).c_str() << \"-\" << f(prePlay, P_F5).c_str()\n       << \"-\" << f(prePlay, P_F9).c_str() << \"-\" << f(prePlay, P_G6).c_str()\n       << \"-\" << f(prePlay, P_G7).c_str() << \"-\" << f(prePlay, P_G8).c_str()\n       << \"-\" << f(prePlay, P_G9).c_str() << \"-\" << f(prePlay, P_G10).c_str()\n       << \"-\" << f(prePlay, P_G11).c_str() << \"-\" << f(prePlay, P_B8).c_str()\n       << \"-\" << f(prePlay, P_B5).c_str() << \"-\" << f(prePlay, P_B3).c_str()\n       << endl;\n  ostr << \"     \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ /\\n\";\n  ostr << \"     \" << f(prePlay, P_F4).c_str() << \"-\" << f(prePlay, P_F8).c_str()\n       << \"-\" << f(prePlay, P_G12).c_str() << \"-\" << f(prePlay, P_G13).c_str()\n       << \"-\" << f(prePlay, P_G14).c_str() << \"-\" << f(prePlay, P_G15).c_str()\n       << \"-\" << f(prePlay, P_G16).c_str() << \"-\" << f(prePlay, P_G17).c_str()\n       << \"-\" << f(prePlay, P_G18).c_str() << \"-\" << f(prePlay, P_B9).c_str()\n       << \"-\" << f(prePlay, P_B6).c_str() << endl;\n  ostr << \"       \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ /\\n\";\n  ostr << \"       \" << f(prePlay, P_F7).c_str() << \"-\"\n       << f(prePlay, P_G19).c_str() << \"-\" << f(prePlay, P_G20).c_str() << \"-\"\n       << f(prePlay, P_G21).c_str() << \"-\" << f(prePlay, P_G22).c_str() << \"-\"\n       << f(prePlay, P_G23).c_str() << \"-\" << f(prePlay, P_G24).c_str() << \"-\"\n       << f(prePlay, P_G25).c_str() << \"-\" << f(prePlay, P_G26).c_str() << \"-\"\n       << f(prePlay, P_B10).c_str() << endl;\n  ostr << \"         \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ /\\n\";\n  ostr << \"         \" << f(prePlay, P_G27).c_str() << \"-\"\n       << f(prePlay, P_G28).c_str() << \"-\" << f(prePlay, P_G29).c_str() << \"-\"\n       << f(prePlay, P_G30).c_str() << \"-\" << f(prePlay, P_G31).c_str() << \"-\"\n       << f(prePlay, P_G32).c_str() << \"-\" << f(prePlay, P_G33).c_str() << \"-\"\n       << f(prePlay, P_G34).c_str() << \"-\" << f(prePlay, P_G35).c_str() << endl;\n  ostr << \"         / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\\\n\";\n  ostr << \"       \" << f(prePlay, P_E10).c_str() << \"-\"\n       << f(prePlay, P_G36).c_str() << \"-\" << f(prePlay, P_G37).c_str() << \"-\"\n       << f(prePlay, P_G38).c_str() << \"-\" << f(prePlay, P_G39).c_str() << \"-\"\n       << f(prePlay, P_G40).c_str() << \"-\" << f(prePlay, P_G41).c_str() << \"-\"\n       << f(prePlay, P_G42).c_str() << \"-\" << f(prePlay, P_G43).c_str() << \"-\"\n       << f(prePlay, P_C7).c_str() << endl;\n  ostr << \"       / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\\\n\";\n  ostr << \"     \" << f(prePlay, P_E6).c_str() << \"-\" << f(prePlay, P_E9).c_str()\n       << \"-\" << f(prePlay, P_G44).c_str() << \"-\" << f(prePlay, P_G45).c_str()\n       << \"-\" << f(prePlay, P_G46).c_str() << \"-\" << f(prePlay, P_G47).c_str()\n       << \"-\" << f(prePlay, P_G48).c_str() << \"-\" << f(prePlay, P_G49).c_str()\n       << \"-\" << f(prePlay, P_G50).c_str() << \"-\" << f(prePlay, P_C8).c_str()\n       << \"-\" << f(prePlay, P_C4).c_str() << endl;\n  ostr << \"     / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\\\n\";\n  ostr << \"   \" << f(prePlay, P_E3).c_str() << \"-\" << f(prePlay, P_E5).c_str()\n       << \"-\" << f(prePlay, P_E8).c_str() << \"-\" << f(prePlay, P_G51).c_str()\n       << \"-\" << f(prePlay, P_G52).c_str() << \"-\" << f(prePlay, P_G53).c_str()\n       << \"-\" << f(prePlay, P_G54).c_str() << \"-\" << f(prePlay, P_G55).c_str()\n       << \"-\" << f(prePlay, P_G56).c_str() << \"-\" << f(prePlay, P_C9).c_str()\n       << \"-\" << f(prePlay, P_C5).c_str() << \"-\" << f(prePlay, P_C2).c_str()\n       << endl;\n  ostr << \"   / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\ / \\\\\\n\";\n  ostr << \" \" << f(prePlay, P_E1).c_str() << \"-\" << f(prePlay, P_E2).c_str()\n       << \"-\" << f(prePlay, P_E4).c_str() << \"-\" << f(prePlay, P_E7).c_str()\n       << \"-\" << f(prePlay, P_G57).c_str() << \"-\" << f(prePlay, P_G58).c_str()\n       << \"-\" << f(prePlay, P_G59).c_str() << \"-\" << f(prePlay, P_G60).c_str()\n       << \"-\" << f(prePlay, P_G61).c_str() << \"-\" << f(prePlay, P_C10).c_str()\n       << \"-\" << f(prePlay, P_C6).c_str() << \"-\" << f(prePlay, P_C3).c_str()\n       << \"-\" << f(prePlay, P_C1).c_str() << endl;\n  ostr << \"E                  \\\\ / \\\\ / \\\\ / \\\\ /                  C\\n\";\n  ostr << \"                   \" << f(prePlay, P_D10).c_str() << \"-\"\n       << f(prePlay, P_D9).c_str() << \"-\" << f(prePlay, P_D8).c_str() << \"-\"\n       << f(prePlay, P_D7).c_str() << endl;\n  ostr << \"                     \\\\ / \\\\ / \\\\ /\\n\";\n  ostr << \"                     \" << f(prePlay, P_D6).c_str() << \"-\"\n       << f(prePlay, P_D5).c_str() << \"-\" << f(prePlay, P_D4).c_str() << endl;\n  ostr << \"                       \\\\ / \\\\ /\\n\";\n  ostr << \"                       \" << f(prePlay, P_D3).c_str() << \"-\"\n       << f(prePlay, P_D2).c_str() << endl;\n  ostr << \"                         \\\\ /\\n\";\n  ostr << \"                         \" << f(prePlay, P_D1).c_str() << endl;\n  ostr << \"                          D\\n\";\n  ostr << f(prePlay, pass);\n  return ostr.str();\n}\n\nvoid Game::initialize() {\n  board.initialize();\n  setInitialChesses();\n  hands = 0;\n  prePos = -1;\n  preChess = -1;\n  continue_jump = false;\n  winner = -1;\n  legalMoves.clear();\n  way.clear();\n}\n\nvoid Game::setInitialChesses() {\n  Chess chess;\n  for (int i = 0; i < boardWH * boardWH; i++) {\n    if (i >= P_A1 && i <= P_A10)\n      chess = Chesses::White;\n    else if (i >= P_D1 && i <= P_D10)\n      chess = Chesses::Black;\n    else\n      chess = Chesses::empty;\n    board.setChess(i, chess);\n  }\n}\n\nvoid Game::play(Move& m) {\n  if (hands >= maxHands)\n    fprintf(stderr, \"hand %d out of range 0~%d\", hands, maxHands - 1);\n  assert(m.chess == board.getChess(m.x));\n\n  board.setChess(m.x, Chesses::empty);\n  board.setChess(m.tx, m.chess);\n  way.push_back(m);\n}\n\nbool Game::Won() {\n  bool win = true;\n  for (int i = P_D1; i <= P_D10; i++)\n    if (board.getChess(i) != Chesses::White) {\n      win = false;\n      break;\n    }\n  if (win) {\n    winner = Players::player0;\n    return true;\n  }\n\n  win = true;\n  for (int i = P_A1; i <= P_A10; i++)\n    if (board.getChess(i) != Chesses::Black) {\n      win = false;\n      break;\n    }\n\n  if (win) {\n    winner = Players::player1;\n    return true;\n  }\n\n  return false;\n}\n\nvoid Game::findLegalMoves(Player player) {\n  legalMoves.clear();\n  Chess chess = playerToChess(player);\n  for (int i = 0; i < boardWH * boardWH; i++)\n    if (board.getChess(i) == chess) {\n      LegalMove(i, chess, true);\n      LegalMove(i, chess, false);\n    }\n}\n\nvoid Game::LegalMove(int cur, Chess chess, bool isJump) {\n  Move m = Move{};\n  m.x = cur;\n  m.chess = chess;\n  m.method = isJump ? 1 : 0;\n  // byJump is decide to whether change method, cur is transform from (x,y):\n  //   2D -> 1D\n  int next, byJump = isJump ? 6 : 0;\n  for (int md = M_LU + byJump; md <= M_RD + byJump; md++) {\n    next = canGo(cur, md, chess);\n    if (next >= 0 && next <= 120) {\n      m.tx = next;\n      legalMoves.push_back(m);\n    }\n  }\n}\n\nint Game::canGo(int cur, int md, Chess chess) {\n  // six direction, two method, refer to define in ChineseCheckers_graph.h\n  // md%6 is take pure direction\n  int dirct = md % 6, next;\n  bool isJump = md / 6 == 1;\n  if (cur >= 0 && cur <= 120 &&\n      board.getChess(cur) == chess) {  // check choose right chess\n    next = isJump ? JumpOBS(cur, dirct) : move_table[cur][dirct];\n\n    if (next >= 0 && next <= 120 && board.getChess(next) == Chesses::empty)\n      return next;\n    return -1;\n  }\n  return -1;\n}\n\nint Game::JumpOBS(int cur, int dirct) {\n  for (int step = 1; step <= 6;\n       step++) {  // up to six steps, or it'll jump outside\n    cur = move_table[cur][dirct];\n    if (cur >= 0 && cur <= 120) {\n      if (board.getChess(cur) != Chesses::empty) {  // find one obstacle\n        for (int j = 0; j < step; j++) {\n          cur = move_table[cur][dirct];\n          if (cur >= 0 && cur <= 120) {\n            if (board.getChess(cur) != Chesses::empty) {\n              return -1;  // find >2 obstacles -> can't jump\n            }\n          } else {\n            return -1;  // out of chessboard\n          }\n        }\n        return cur;\n      }\n    } else {\n      return -1;  // out of chessboard\n    }\n  }\n  return -1;  // must out of range\n}\n\ntemplate <typename R> void Game::setupBoard(const R& re) {\n  Board::setup({\"Empty\", \"White\", \"Black\"}, {\"   \", \" ● \", \" ○ \"}, re);\n}\n\nconstexpr Player Game::chessToPlayer(Chess chess) {\n  if (chess == Chesses::White)\n    return Players::player0;\n  else if (chess == Chesses::Black)\n    return Players::player1;\n  assert(chess == Chesses::White || chess == Chesses::Black);\n  return -1;\n}\n\nconstexpr Chess Game::playerToChess(Player player) {\n  if (player == Players::player0)\n    return Chesses::White;\n  else if (player == Players::player1)\n    return Chesses::Black;\n  assert(player == Players::player0 || player == Players::player1);\n  return 0xFF;\n}\n\nState::State(int seed)\n    : core::State(seed)\n    , Game() {\n  // fprintf(stderr,\"State(int seed)\\n\");\n  call_once(setupCalled, [&] { setupBoard(_rng); });\n}\n\nvoid State::Initialize() {\n  // fprintf(stderr,\"Initialize()\\n\");\n  _moves.clear();\n\n  _featSize[0] = featuresSizeX;\n  _featSize[1] = featuresSizeY;\n  _featSize[2] = featuresSizeZ;\n\n  _actionSize[0] = 3;  // move + jump + pass\n  _actionSize[1] = boardWH * boardWH;\n  _actionSize[2] = boardWH * boardWH;\n\n  _status = GameStatus::player0Turn;\n  _features.resize(featuresSize);\n\n  initialize();\n  findLegalMoves(Players::player0);\n  findActions();\n  findFeatures();\n  _hash = board.getHash();\n}\n\nunique_ptr<core::State> State::clone_() const {\n  // fprintf(stderr,\"clone_()\\n\");\n  return make_unique<State>(*this);\n}\n\nvoid State::ApplyAction(const _Action& action) {\n  // fprintf(stderr,\"ApplyAction()\\n\");\n  if (!terminated()) {\n    Move m{};\n    Player Player = -1;\n    // player0 plays white (●), player1 plays black (○)\n    if (_status == GameStatus::player0Turn) {\n      m.chess = Chesses::White;\n      Player = Players::player0;\n    } else if (_status == GameStatus::player1Turn) {\n      m.chess = Chesses::Black;\n      Player = Players::player1;\n    }\n    m.x = action.GetY();\n    m.tx = action.GetZ();\n    m.method = action.GetX();\n\n    // check pass hand\n    if (action.GetX() == 2) {\n      if (canChange(Player)) {\n        findActions();\n        findFeatures();\n      }\n    } else {\n      play(m);\n\n      legalMoves.clear();\n\n      if (m.method == 1) {  // jump\n        LegalMove(m.tx, m.chess, m.method);\n      }\n\n      if (legalMoves.empty()) {  // move or no more jump\n        if (canChange(Player)) {\n          continue_jump = false;\n          findActions();\n          findFeatures();\n        }\n      } else {\n        legalMoves.push_back(Pass);\n        findActions();\n        continue_jump = true;\n      }\n    }\n  }\n  _hash = board.getHash();\n}\n\nPlayer State::changeTurn(Player player) {\n  Player nextPlayer = -1;\n  if (player == Players::player1) {\n    nextPlayer = Players::player0;\n    _status = GameStatus::player0Turn;\n  } else if (player == Players::player0) {\n    nextPlayer = Players::player1;\n    _status = GameStatus::player1Turn;\n  }\n\n  hands += 1;\n  board.turnHash();\n  way.clear();\n  return nextPlayer;\n}\n\nbool State::canChange(Player player) {\n  // fprintf(stderr,\"canGoNext()\\n\");\n  Player nextPlayer = changeTurn(player);\n  if (Won()) {\n    if (winner == Players::player1)\n      _status = GameStatus::player1Win;\n    else if (winner == Players::player0)\n      _status = GameStatus::player0Win;\n    else\n      assert(winner == Players::player1 || winner == Players::player0);\n    return false;\n  }\n  if (hands >= maxHands) {\n    _status = GameStatus::tie;\n    return false;\n  }\n\n  findLegalMoves(nextPlayer);\n  assert(!legalMoves.empty());\n  return true;\n}\n\nvoid State::findActions() {\n  // fprintf(stderr,\"findActions()\\n\");\n  clearActions();\n  for (auto& m : legalMoves) {\n    if (m.x != pass && m.tx != pass) {\n      addAction(m.method, m.x, m.tx);\n    } else {\n      addAction(2, 0, 0);\n    }\n  }\n}\n\nvoid State::findFeatures() {\n  // fprintf(stderr,\"findFeatures()\\n\");\n  std::fill(_features.begin(), _features.end(), 0.0);\n  auto* f = _features.data();\n  for (size_t c = 0; c < chesses; c++) {\n    Chess chess = static_cast<Chess>(c + 1);\n    for (int i = 0; i < boardWH * boardWH; i++) {\n      if (board.getChess(i) == chess)\n        *f = 1.0;\n      f++;\n    }\n  }\n  fillFullFeatures();\n}\n\nvoid State::DoGoodAction() {\n  // fprintf(stderr,\"DoGoodAction()\\n\");\n  // DoRandomAction();\n  assert(!_legalActions.empty());\n  std::uniform_int_distribution<size_t> distr(0, _legalActions.size() - 1);\n  size_t i = distr(_rng);\n  _Action a = _legalActions[i];\n  int cur = a.GetY(), next = a.GetZ();\n  Chess chess = board.getChess(cur);\n  while (prePos == next && preChess == chess) {\n    std::uniform_int_distribution<size_t> distr(0, _legalActions.size() - 1);\n    // size_t i = distr(_rng);\n    //_Action a = _legalActions[i];\n  }\n  prePos = cur;\n  preChess = chess;\n  ApplyAction(a);\n}\n\nvoid State::printCurrentBoard() const {\n  // fprintf(stderr,\"printCurrentBoard()\\n\");\n  cout << board.sprint(\"  \");\n}\n\nstring State::stateDescription() const {\n  // fprintf(stderr,\"stateDescription()\\n\");\n  return board.sprint(\"  \");\n}\n\nstring State::actionDescription(const ::_Action& action) const {\n  ostringstream ostr;\n  if (continue_jump)\n    ostr << \"<continue jumps>\\t\";\n\n  int cur = way.front().x, goal = action.GetZ(), method = action.GetX();\n  if (method == 2) {\n    ostr << \" <-Pass-> \";\n    goal = way[way.size() - 2].tx;\n  } else if (method == 0)\n    ostr << \"move from \";\n  else\n    ostr << \"jump from \";\n  ostr << board.getPosStr(cur) << \" to \" << board.getPosStr(goal);\n  return ostr.str();\n}\n\nstring State::actionsDescription() const {\n  // fprintf(stderr,\"actionsDescription()\\n\");\n  ostringstream ostr;\n  auto board = this->board;\n  board.legal.clear();\n\n  for (auto& m : legalMoves) {\n    board.legal.push_back(m.x);\n  }\n  ostr << \"Chesses which can move:\\n\" << board.showBoard(true);\n  return ostr.str();\n}\n\nint State::parseAction(const string& str) const {\n  auto parse = [&](const string& s, int& x) {\n    if (s == \"pass\" || s == \"p\") {\n      x = pass;\n      return true;\n    }\n    int pos = -1, len = 0;\n    for (size_t i = 0; i < s.size(); i++) {\n      if (!isalnum(s[i]) && pos >= 0)\n        break;\n      if (isalnum(s[i])) {\n        if (pos < 0)\n          pos = (int)i;\n        len += 1;\n      }\n    }\n    if (len == 0 || pos < 0)\n      return false;\n    string ts = s.substr(pos, len);\n    if (ts.size() < 2)\n      return false;\n    else if (!isalpha(ts[0]))\n      return false;\n    string num = ts.substr(1, ts.size() - 1);\n    int n = 0;\n    for (char c : num) {\n      if (!isdigit(c))\n        return false;\n      n = c - '0' + n * 10;\n    }\n    char c = toupper(s[0]);\n    if (c >= 'A' && c <= 'F') {\n      if (n <= 0 || n >= 11)\n        return false;\n      n--;\n    }\n    if (c == 'G') {\n      if (n <= 0 || n >= 62)\n        return false;\n      n--;\n    }\n    switch (c) {\n    case 'A':\n      n += 0;\n      break;\n    case 'B':\n      n += 10;\n      break;\n    case 'C':\n      n += 20;\n      break;\n    case 'D':\n      n += 30;\n      break;\n    case 'E':\n      n += 40;\n      break;\n    case 'F':\n      n += 50;\n      break;\n    case 'G':\n      n += 60;\n      break;\n    default:\n      return false;\n    }\n    x = n;\n    return true;\n  };\n  int cur = -1, goal = -1;\n  if (!parse(str, cur))\n    return -1;\n\n  auto board = this->board;\n  board.legal.clear();\n  bool found = false;\n  for (auto& m : legalMoves) {\n    if (m.x == cur) {\n      board.legal.push_back(m.tx);\n      found = true;\n    }\n  }\n  if (!found) {\n    cout << \"Error! No any legal move.\" << endl;\n    return -1;\n  }\n\n  if (cur != pass) {\n    cout << board.showBoard(true);\n    cout << \"Where you wanna go:\" << endl << \"> \";\n    string input;\n    cin >> input;\n    if (!parse(input, goal))\n      return -1;\n  }\n\n  auto& legalActions = GetLegalActions();\n  for (size_t i = 0; i < legalActions.size(); i++) {\n    if (cur == pass) {\n      if (legalActions[i].GetX() == 2) {\n        return (int)i;\n      }\n    } else {\n      if (legalActions[i].GetY() == cur && legalActions[i].GetZ() == goal)\n        return (int)i;\n    }\n  }\n  return -1;\n}\n\nint State::humanInputAction(function<optional<int>(string)> specialAction) {\n  cout << \"hands: \" << hands << endl;\n  cout << \"Current Board:\" << endl << stateDescription() << endl;\n  cout << actionsDescription() << endl;\n  cout << \"Please choose chess and move to where: \";\n  cout << \"(Use \\\"a01\\\", \\\"B5\\\",\\\"C10\\\", etc. to correspond to the correct \"\n          \"format in \"\n          \"chess board)\"\n       << endl;\n\n  string input;\n  int index = -1;\n  size_t max = GetLegalActions().size();\n  while (index < 0 || index >= (int)max) {\n    cout << \"Chess you choose is:\" << endl << \"> \";\n    cin >> input;\n    index = parseAction(input);\n\n    if (index == -1) {\n      if (auto r = specialAction(input); r) {\n        return *r;\n      }\n    }\n\n    if (index < 0 || index >= (int)max) {\n      cout << \"This is invalid Input! Please try again.\" << endl;\n      cout << \"Current Board:\" << endl << stateDescription() << endl;\n      cout << actionsDescription() << endl;\n    }\n  }\n  return index;\n}\n\n}  // namespace ChineseCheckers\n"
  },
  {
    "path": "src/games/chinesecheckers.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 葉士誠 (SHI-CHENG YE)\n// affiliation: National Dong Hwa University(NDHU)\n// email: 410521206@gms.ndhu.edu.tw / 0930164@gmail.com\n\n#pragma once\n\n#include <algorithm>\n#include <cassert>\n#include <cstdint>\n#include <cstdio>\n#include <memory>\n#include <mutex>\n\n#include \"../core/state.h\"\n#include \"chinesecheckers_defines.h\"\n#include \"commons/chessboard.h\"\n\nusing namespace std;\n\nnamespace ChineseCheckers {\ntypedef int Player;\nclass Players {\n public:\n  static constexpr Player player0 = 0;\n  static constexpr Player player1 = 1;\n};\n\nclass Chesses {  // define Chess in commons/bitboard.h\n public:\n  static constexpr Chess empty = 0;\n  static constexpr Chess White = 1;\n  static constexpr Chess Black = 2;\n};\n\nclass Board : public ::Chessboard<11, 11> {\n public:\n  string sprintBoard(string_view prefix = \"\",\n                     const set<tuple<int, int>>& markedPos = {}) const override;\n  // string_view getPosStr(int x, int y) const override;\n  string getPosStr(int p) const override;\n\n  string showBoard(bool prePlay) const;\n\n  vector<int> legal;\n};\n\nstruct Move {\n  // int x, y, tx, ty;\n  int x, tx, method;\n  Chess chess;\n};\n\nclass Game {\n public:\n  void initialize();\n  void setInitialChesses();\n  void play(Move& m);\n  bool Won();\n  void findLegalMoves(Player player);\n  // void LegalMove(int x, int y, Chess chess, bool isJump);\n  void LegalMove(int cur, Chess chess, bool isJump);\n  int canGo(int cur, int md, Chess chess);\n\n  template <typename R> static void setupBoard(const R& re);\n  static constexpr Player chessToPlayer(Chess chess);\n  static constexpr Chess playerToChess(Player player);\n\n  static constexpr size_t players = 2;\n  static constexpr size_t chesses = 2;\n  static constexpr int boardWH = 11;             // width and height in board\n  static constexpr int maxLegalMovesCnt = 5000;  // boardWH * boardWH - 40;\n  static constexpr int maxHands =\n      maxLegalMovesCnt;  // maxLegalMovesCnt * 2 * 10;\n  static inline once_flag setupCalled;\n\n  Board board;\n  int hands, prePos, preChess;\n  bool continue_jump;  // for action description(enable to continue jump )\n  Player winner;\n  vector<Move> legalMoves, way;\n\n  Move Pass = {pass, pass, Chesses::empty, true};\n\n  static constexpr short int move_table[121][6] = {\n      //  position {LU,RU,L,R,LD,RD}\n      {-1, -1, -1, -1, P_A2, P_A3},                // A1\n      {-1, P_A1, -1, P_A3, P_A4, P_A5},            // A2\n      {P_A1, -1, P_A2, -1, P_A5, P_A6},            // A3\n      {-1, P_A2, -1, P_A5, P_A7, P_A8},            // A4\n      {P_A2, P_A3, P_A4, P_A6, P_A8, P_A9},        // A5\n      {P_A3, -1, P_A5, -1, P_A9, P_A10},           // A6\n      {-1, P_A4, -1, P_A8, P_G1, P_G2},            // A7\n      {P_A4, P_A5, P_A7, P_A9, P_G2, P_G3},        // A8\n      {P_A5, P_A6, P_A8, P_A10, P_G3, P_G4},       // A9\n      {P_A6, -1, P_A9, -1, P_G4, P_G5},            // A10\n      {-1, -1, P_B2, -1, P_B3, -1},                // B1\n      {-1, -1, P_B4, P_B1, P_B5, P_B3},            // B2\n      {P_B2, P_B1, P_B5, -1, P_B6, -1},            // B3\n      {-1, -1, P_B7, P_B2, P_B8, P_B5},            // B4\n      {P_B4, P_B2, P_B8, P_B3, P_B9, P_B6},        // B5\n      {P_B5, P_B3, P_B9, -1, P_B10, -1},           // B6\n      {-1, -1, P_G5, P_B4, P_G11, P_B8},           // B7\n      {P_B7, P_B4, P_G11, P_B5, P_G18, P_B9},      // B8\n      {P_B8, P_B5, P_G18, P_B6, P_G26, P_B10},     // B9\n      {P_B9, P_B6, P_G26, -1, P_G35, -1},          // B10\n      {P_C2, -1, P_C3, -1, -1, -1},                // C1\n      {P_C4, -1, P_C5, -1, P_C3, P_C1},            // C2\n      {P_C5, P_C2, P_C6, P_C1, -1, -1},            // C3\n      {P_C7, -1, P_C8, -1, P_C5, P_C2},            // C4\n      {P_C8, P_C4, P_C9, P_C2, P_C6, P_C3},        // C5\n      {P_C9, P_C5, P_C10, P_C3, -1, -1},           // C6\n      {P_G35, -1, P_G43, -1, P_C8, P_C4},          // C7\n      {P_G43, P_C7, P_G50, P_C4, P_C9, P_C5},      // C8\n      {P_G50, P_C8, P_G56, P_C5, P_C10, P_C6},     // C9\n      {P_G56, P_C9, P_G61, P_C6, -1, -1},          // C10\n      {P_D3, P_D2, -1, -1, -1, -1},                // D1\n      {P_D5, P_D4, P_D3, -1, P_D1, -1},            // D2\n      {P_D6, P_D5, -1, P_D2, -1, P_D1},            // D3\n      {P_D8, P_D7, P_D5, -1, P_D2, -1},            // D4\n      {P_D9, P_D8, P_D6, P_D4, P_D3, P_D2},        // D5\n      {P_D10, P_D9, -1, P_D5, -1, P_D3},           // D6\n      {P_G60, P_G61, P_D8, -1, P_D4, -1},          // D7\n      {P_G59, P_G60, P_D9, P_D7, P_D5, P_D4},      // D8\n      {P_G58, P_G59, P_D10, P_D8, P_D6, P_D5},     // D9\n      {P_G57, P_G58, -1, P_D9, -1, P_D6},          // D10\n      {-1, P_E3, -1, P_E2, -1, -1},                // E1\n      {P_E3, P_E5, P_E1, P_E4, -1, -1},            // E2\n      {-1, P_E6, -1, P_E5, P_E1, P_E2},            // E3\n      {P_E5, P_E8, P_E2, P_E7, -1, -1},            // E4\n      {P_E6, P_E9, P_E3, P_E8, P_E2, P_E4},        // E5\n      {-1, P_E10, -1, P_E9, P_E3, P_E5},           // E6\n      {P_E8, P_G51, P_E4, P_G57, -1, -1},          // E7\n      {P_E9, P_G44, P_E5, P_G51, P_E4, P_E7},      // E8\n      {P_E10, P_G36, P_E6, P_G44, P_E5, P_E8},     // E9\n      {-1, P_G27, -1, P_G36, P_E6, P_E9},          // E10\n      {-1, -1, -1, P_F3, -1, P_F2},                // F1\n      {P_F1, P_F3, -1, P_F5, -1, P_F4},            // F2\n      {-1, -1, P_F1, P_F6, P_F2, P_F5},            // F3\n      {P_F2, P_F5, -1, P_F8, -1, P_F7},            // F4\n      {P_F3, P_F6, P_F2, P_F9, P_F4, P_F8},        // F5\n      {-1, -1, P_F3, P_F10, P_F5, P_F9},           // F6\n      {P_F4, P_F8, -1, P_G19, -1, P_G27},          // F7\n      {P_F5, P_F9, P_F4, P_G12, P_F7, P_G19},      // F8\n      {P_F6, P_F10, P_F5, P_G6, P_F8, P_G12},      // F9\n      {-1, -1, P_F6, P_G1, P_F9, P_G6},            // F10\n      {-1, P_A7, P_F10, P_G2, P_G6, P_G7},         // G1\n      {P_A7, P_A8, P_G1, P_G3, P_G7, P_G8},        // G2\n      {P_A8, P_A9, P_G2, P_G4, P_G8, P_G9},        // G3\n      {P_A9, P_A10, P_G3, P_G5, P_G9, P_G10},      // G4\n      {P_A10, -1, P_G4, P_B7, P_G10, P_G11},       // G5\n      {P_F10, P_G1, P_F9, P_G7, P_G12, P_G13},     // G6\n      {P_G1, P_G2, P_G6, P_G8, P_G13, P_G14},      // G7\n      {P_G2, P_G3, P_G7, P_G9, P_G14, P_G15},      // G8\n      {P_G3, P_G4, P_G8, P_G10, P_G15, P_G16},     // G9\n      {P_G4, P_G5, P_G9, P_G11, P_G16, P_G17},     // G10\n      {P_G5, P_B7, P_G10, P_B8, P_G17, P_G18},     // G11\n      {P_F9, P_G6, P_F8, P_G13, P_G19, P_G20},     // G12\n      {P_G6, P_G7, P_G12, P_G14, P_G20, P_G21},    // G13\n      {P_G7, P_G8, P_G13, P_G15, P_G21, P_G22},    // G14\n      {P_G8, P_G9, P_G14, P_G16, P_G22, P_G23},    // G15\n      {P_G9, P_G10, P_G15, P_G17, P_G23, P_G24},   // G16\n      {P_G10, P_G11, P_G16, P_G18, P_G24, P_G25},  // G17\n      {P_G11, P_B8, P_G17, P_B9, P_G25, P_G26},    // G18\n      {P_F8, P_G12, P_F7, P_G20, P_G27, P_G28},    // G19\n      {P_G12, P_G13, P_G19, P_G21, P_G28, P_G29},  // G20\n      {P_G13, P_G14, P_G20, P_G22, P_G29, P_G30},  // G21\n      {P_G14, P_G15, P_G21, P_G23, P_G30, P_G31},  // G22\n      {P_G15, P_G16, P_G22, P_G24, P_G31, P_G32},  // G23\n      {P_G16, P_G17, P_G23, P_G25, P_G32, P_G33},  // G24\n      {P_G17, P_G18, P_G24, P_G26, P_G33, P_G34},  // G25\n      {P_G18, P_B9, P_G25, P_B10, P_G34, P_G35},   // G26\n      {P_F7, P_G19, -1, P_G28, P_E10, P_G36},      // G27\n      {P_G19, P_G20, P_G27, P_G29, P_G36, P_G37},  // G28\n      {P_G20, P_G21, P_G28, P_G30, P_G37, P_G38},  // G29\n      {P_G21, P_G22, P_G29, P_G31, P_G38, P_G39},  // G30\n      {P_G22, P_G23, P_G30, P_G32, P_G39, P_G40},  // G31\n      {P_G23, P_G24, P_G31, P_G33, P_G40, P_G41},  // G32\n      {P_G24, P_G25, P_G32, P_G34, P_G41, P_G42},  // G33\n      {P_G25, P_G26, P_G33, P_G35, P_G42, P_G43},  // G34\n      {P_G26, P_B10, P_G34, -1, P_G43, P_C7},      // G35\n      {P_G27, P_G28, P_E10, P_G37, P_E9, P_G44},   // G36\n      {P_G28, P_G29, P_G36, P_G38, P_G44, P_G45},  // G37\n      {P_G29, P_G30, P_G37, P_G39, P_G45, P_G46},  // G38\n      {P_G30, P_G31, P_G38, P_G40, P_G46, P_G47},  // G39\n      {P_G31, P_G32, P_G39, P_G41, P_G47, P_G48},  // G40\n      {P_G32, P_G33, P_G40, P_G42, P_G48, P_G49},  // G41\n      {P_G33, P_G34, P_G41, P_G43, P_G49, P_G50},  // G42\n      {P_G34, P_G35, P_G42, P_C7, P_G50, P_C8},    // G43\n      {P_G36, P_G37, P_E9, P_G45, P_E8, P_G51},    // G44\n      {P_G37, P_G38, P_G44, P_G46, P_G51, P_G52},  // G45\n      {P_G38, P_G39, P_G45, P_G47, P_G52, P_G53},  // G46\n      {P_G39, P_G40, P_G46, P_G48, P_G53, P_G54},  // G47\n      {P_G40, P_G41, P_G47, P_G49, P_G54, P_G55},  // G48\n      {P_G41, P_G42, P_G48, P_G50, P_G55, P_G56},  // G49\n      {P_G42, P_G43, P_G49, P_C8, P_G56, P_C9},    // G50\n      {P_G44, P_G45, P_E8, P_G52, P_E7, P_G57},    // G51\n      {P_G45, P_G46, P_G51, P_G53, P_G57, P_G58},  // G52\n      {P_G46, P_G47, P_G52, P_G54, P_G58, P_G59},  // G53\n      {P_G47, P_G48, P_G53, P_G55, P_G59, P_G60},  // G54\n      {P_G48, P_G49, P_G54, P_G56, P_G60, P_G61},  // G55\n      {P_G49, P_G50, P_G55, P_C9, P_G61, P_C10},   // G56\n      {P_G51, P_G52, P_E7, P_G58, -1, P_D10},      // G57\n      {P_G52, P_G53, P_G57, P_G59, P_D10, P_D9},   // G58\n      {P_G53, P_G54, P_G58, P_G60, P_D9, P_D8},    // G59\n      {P_G54, P_G55, P_G59, P_G61, P_D8, P_D7},    // G60\n      {P_G55, P_G56, P_G60, P_C10, P_D7, -1}       // G61\n  };\n\n private:\n  int JumpOBS(int cur, int dirct);\n};\n\nclass State : public core::State, public Game {\n public:\n  State(int seed);\n  void Initialize() override;\n  unique_ptr<core::State> clone_() const override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction() override;\n  void printCurrentBoard() const override;\n  string stateDescription() const override;\n  string actionsDescription() const override;\n  string actionDescription(const ::_Action& action) const override;\n  int parseAction(const string& str) const override;\n  int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction) override;\n\n private:\n  Player changeTurn(Player player);\n  bool canChange(Player Player);\n  void findActions();\n  void findFeatures();\n\n  int seed;\n\n  static constexpr size_t featuresSizeX = chesses;\n  static constexpr size_t featuresSizeY = boardWH;\n  static constexpr size_t featuresSizeZ = boardWH;\n  static constexpr size_t featuresSize =\n      featuresSizeX * featuresSizeY * featuresSizeZ;\n};\n\n}  // namespace ChineseCheckers\n"
  },
  {
    "path": "src/games/chinesecheckers_defines.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#define P_A1 0\n#define P_A2 1\n#define P_A3 2\n#define P_A4 3\n#define P_A5 4\n#define P_A6 5\n#define P_A7 6\n#define P_A8 7\n#define P_A9 8\n#define P_A10 9\n#define P_B1 10\n#define P_B2 11\n#define P_B3 12\n#define P_B4 13\n#define P_B5 14\n#define P_B6 15\n#define P_B7 16\n#define P_B8 17\n#define P_B9 18\n#define P_B10 19\n#define P_C1 20\n#define P_C2 21\n#define P_C3 22\n#define P_C4 23\n#define P_C5 24\n#define P_C6 25\n#define P_C7 26\n#define P_C8 27\n#define P_C9 28\n#define P_C10 29\n#define P_D1 30\n#define P_D2 31\n#define P_D3 32\n#define P_D4 33\n#define P_D5 34\n#define P_D6 35\n#define P_D7 36\n#define P_D8 37\n#define P_D9 38\n#define P_D10 39\n#define P_E1 40\n#define P_E2 41\n#define P_E3 42\n#define P_E4 43\n#define P_E5 44\n#define P_E6 45\n#define P_E7 46\n#define P_E8 47\n#define P_E9 48\n#define P_E10 49\n#define P_F1 50\n#define P_F2 51\n#define P_F3 52\n#define P_F4 53\n#define P_F5 54\n#define P_F6 55\n#define P_F7 56\n#define P_F8 57\n#define P_F9 58\n#define P_F10 59\n#define P_G1 60\n#define P_G2 61\n#define P_G3 62\n#define P_G4 63\n#define P_G5 64\n#define P_G6 65\n#define P_G7 66\n#define P_G8 67\n#define P_G9 68\n#define P_G10 69\n#define P_G11 70\n#define P_G12 71\n#define P_G13 72\n#define P_G14 73\n#define P_G15 74\n#define P_G16 75\n#define P_G17 76\n#define P_G18 77\n#define P_G19 78\n#define P_G20 79\n#define P_G21 80\n#define P_G22 81\n#define P_G23 82\n#define P_G24 83\n#define P_G25 84\n#define P_G26 85\n#define P_G27 86\n#define P_G28 87\n#define P_G29 88\n#define P_G30 89\n#define P_G31 90\n#define P_G32 91\n#define P_G33 92\n#define P_G34 93\n#define P_G35 94\n#define P_G36 95\n#define P_G37 96\n#define P_G38 97\n#define P_G39 98\n#define P_G40 99\n#define P_G41 100\n#define P_G42 101\n#define P_G43 102\n#define P_G44 103\n#define P_G45 104\n#define P_G46 105\n#define P_G47 106\n#define P_G48 107\n#define P_G49 108\n#define P_G50 109\n#define P_G51 110\n#define P_G52 111\n#define P_G53 112\n#define P_G54 113\n#define P_G55 114\n#define P_G56 115\n#define P_G57 116\n#define P_G58 117\n#define P_G59 118\n#define P_G60 119\n#define P_G61 120\n#define pass 121\n\n// direction\n#define M_LU 0\n#define M_RU 1\n#define M_L 2\n#define M_R 3\n#define M_LD 4\n#define M_RD 5\n\n#define J_LU 6\n#define J_RU 7\n#define J_L 8\n#define J_R 9\n#define J_LD 10\n#define J_RD 11\n"
  },
  {
    "path": "src/games/commons/chessboard.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n\n#pragma once\n\n#include <array>\n#include <cassert>\n#include <cctype>\n#include <cstddef>\n#include <cstdint>\n#include <limits>\n#include <optional>\n#include <random>\n#include <set>\n#include <sstream>\n#include <string>\n#include <string_view>\n#include <tuple>\n#include <vector>\n\nusing Chess = std::uint8_t;\n\ntemplate <int ROW, int COL, bool INVERTY = true> class Chessboard {\n  static_assert(ROW > 0 && ROW <= 26 && COL > 0 && COL <= 26,\n                \"rows and columns of chessboard must be in range [1,26]\");\n\n public:\n  static constexpr int rows = ROW;\n  static constexpr int columns = COL;\n  static constexpr int squares = rows * columns;\n  using Board = std::array<Chess, squares>;\n\n  template <typename RE>\n  static void setup(const std::vector<std::string_view>& cn,\n                    const std::vector<std::string_view>& cs,\n                    const RE& re);\n  static constexpr bool isPosInBoard(int x, int y);\n  static constexpr bool isPosInBoard(int xy);\n  static constexpr int posTo1D(int x, int y);\n  static constexpr std::tuple<int, int> posTo2D(int xy);\n  static constexpr std::string_view getMarkSymbol();\n  static constexpr std::string_view getChessName(Chess chess);\n  static constexpr std::string_view getChessSymbol(Chess chess);\n\n  void initialize();\n  void initialize(const Board& b);\n  Chess getChess(int x, int y) const;\n  Chess getChess(int xy) const;\n  void setChess(int x, int y, Chess chess);\n  void setChess(int xy, Chess chess);\n  std::vector<int> countChesses() const;\n  void turnHash();\n\n  std::string sprint(std::string_view prefix = \"\") const;\n  virtual std::string sprintBoard(\n      std::string_view prefix = \"\",\n      const std::set<std::tuple<int, int>>& markedPos = {}) const;\n  const Board& getBoard() const;\n  std::uint64_t getHash() const;\n  virtual std::string getPosStr(int xy) const;\n  virtual std::string getPosStr(int x, int y) const;\n  virtual std::optional<std::tuple<int, int>> parsePosStr(\n      const std::string& str) const;\n\n  constexpr bool operator==(const Chessboard& cb) const;\n  constexpr bool operator!=(const Chessboard& cb) const;\n\n protected:\n  static inline std::string_view markSymbol = \"?\";\n\n private:\n  void updateHash();\n  void updateHash(int xy, Chess chess);\n\n  static inline std::vector<std::uint64_t> hashList;\n  static inline std::uint64_t hashTurn;\n  static inline std::size_t chessKinds;\n  static inline std::vector<std::string_view> chessesName;\n  static inline std::vector<std::string_view> chessesSymbol;\n\n  Board board;\n  std::uint64_t hash;\n};\n\ntemplate <int ROW, int COL, bool INVERTY>\ntemplate <typename RE>\nvoid Chessboard<ROW, COL, INVERTY>::setup(\n    const std::vector<std::string_view>& cn,\n    const std::vector<std::string_view>& cs,\n    const RE& re) {\n  assert(cn.size() == cs.size());\n  chessKinds = cn.size();\n  chessesName = cn;\n  chessesSymbol = cs;\n\n  std::independent_bits_engine<RE, 64UL, std::uint64_t> genRandomBits(re);\n  hashList = std::vector<std::uint64_t>(chessKinds * squares);\n  for (std::uint64_t& hash : hashList)\n    hash = genRandomBits();\n  hashTurn = genRandomBits();\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr bool Chessboard<ROW, COL, INVERTY>::isPosInBoard(int x, int y) {\n  return x >= 0 && x < rows && y >= 0 && y < columns;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr bool Chessboard<ROW, COL, INVERTY>::isPosInBoard(int xy) {\n  auto [x, y] = posTo2D(xy);\n  return isPosInBoard(x, y);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr int Chessboard<ROW, COL, INVERTY>::posTo1D(int x, int y) {\n  return rows * y + x;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr std::tuple<int, int> Chessboard<ROW, COL, INVERTY>::posTo2D(int xy) {\n  return {xy % rows, xy / rows};\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr std::string_view Chessboard<ROW, COL, INVERTY>::getMarkSymbol() {\n  return markSymbol;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr std::string_view Chessboard<ROW, COL, INVERTY>::getChessName(\n    Chess chess) {\n  return chessesName.at(chess);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr std::string_view Chessboard<ROW, COL, INVERTY>::getChessSymbol(\n    Chess chess) {\n  return chessesSymbol.at(chess);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::initialize() {\n  board.fill(0U);\n  updateHash();\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::initialize(\n    const Chessboard<ROW, COL, INVERTY>::Board& b) {\n  board = b;\n  updateHash();\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nChess Chessboard<ROW, COL, INVERTY>::getChess(int x, int y) const {\n  return getChess(posTo1D(x, y));\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nChess Chessboard<ROW, COL, INVERTY>::getChess(int xy) const {\n  return board[xy];\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::setChess(int x, int y, Chess chess) {\n  setChess(posTo1D(x, y), chess);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::setChess(int xy, Chess chess) {\n  updateHash(xy, getChess(xy));\n  board[xy] = chess;\n  updateHash(xy, chess);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::vector<int> Chessboard<ROW, COL, INVERTY>::countChesses() const {\n  std::vector<int> counts(chessKinds, 0);\n  for (Chess chess : board)\n    counts[chess]++;\n  return counts;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::turnHash() {\n  hash ^= hashTurn;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::string Chessboard<ROW, COL, INVERTY>::sprint(\n    std::string_view prefix) const {\n  std::ostringstream oss;\n  oss << prefix;\n  for (std::size_t i = 0; i < chessKinds; i++) {\n    oss << getChessName(i) << \"='\" << getChessSymbol(i) << \"'\";\n    if (i < chessKinds - 1)\n      oss << \" \";\n  }\n  oss << std::endl << sprintBoard(prefix);\n  return oss.str();\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::string Chessboard<ROW, COL, INVERTY>::sprintBoard(\n    std::string_view prefix,\n    const std::set<std::tuple<int, int>>& markedPos) const {\n  auto hr = [&](std::string_view l, std::string_view m, std::string_view r) {\n    std::ostringstream ossE;\n    ossE << (columns < 10 ? \"  \" : \"   \") << l;\n    for (int x = 0; x < rows; x++) {\n      ossE << \"───\";\n      if (x < rows - 1)\n        ossE << m;\n    }\n    ossE << r << std::endl;\n    return ossE.str();\n  };\n\n  std::ostringstream oss, ossW;\n  ossW << (columns < 10 ? \" \" : \"  \");\n  for (int x = 0; x < rows; x++)\n    ossW << \"   \" << std::string(1, 'A' + x);\n  ossW << std::endl;\n\n  oss << prefix << ossW.str() << prefix << hr(\"┌\", \"┬\", \"┐\");\n  for (int y = 0; y < columns; y++) {\n    char yStr[4];\n    sprintf(\n        yStr, columns < 10 ? \"%d\" : \"%02d\", (INVERTY ? columns - y : y + 1));\n    oss << prefix << yStr << \" │ \";\n    for (int x = 0; x < rows; x++) {\n      if (markedPos.count({x, y}) == 0)\n        oss << getChessSymbol(getChess(x, y)) << \" │ \";\n      else\n        oss << markSymbol << \" │ \";\n    }\n    oss << yStr << std::endl;\n    if (y < columns - 1)\n      oss << prefix << hr(\"├\", \"┼\", \"┤\");\n  }\n  oss << prefix << hr(\"└\", \"┴\", \"┘\") << prefix << ossW.str();\n  return oss.str();\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconst typename Chessboard<ROW, COL, INVERTY>::Board&\nChessboard<ROW, COL, INVERTY>::getBoard() const {\n  return board;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::uint64_t Chessboard<ROW, COL, INVERTY>::getHash() const {\n  return hash;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::string Chessboard<ROW, COL, INVERTY>::getPosStr(int xy) const {\n  auto [x, y] = posTo2D(xy);\n  return getPosStr(x, y);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::string Chessboard<ROW, COL, INVERTY>::getPosStr(int x, int y) const {\n  char str[4] = {0};\n  str[0] = 'A' + x;\n  y = INVERTY ? columns - y : y + 1;\n  if (columns >= 10) {\n    str[1] = '0' + y / 10;\n    str[2] = '0' + y % 10;\n  } else {\n    str[1] = '0' + y;\n  }\n  return std::string(str);\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nstd::optional<std::tuple<int, int>> Chessboard<ROW, COL, INVERTY>::parsePosStr(\n    const std::string& str) const {\n  int x = -1, y = -1, yBegin = -1, yEnd = -1;\n  for (std::size_t i = 0; i < str.size(); i++) {\n    char c = str[i];\n    if (x < 0) {\n      if (std::isspace(c))\n        continue;\n      else if (!std::isalpha(c))\n        return std::nullopt;\n      x = std::toupper(c) - 'A';\n    } else if (yBegin < 0) {\n      if (std::isspace(c))\n        continue;\n      else if (!std::isdigit(c))\n        return std::nullopt;\n      yBegin = i;\n    } else if (yEnd < 0) {\n      if (std::isdigit(c))\n        continue;\n      else if (!std::isspace(c))\n        return std::nullopt;\n      yEnd = i;\n    } else if (!std::isspace(c)) {\n      return std::nullopt;\n    }\n  }\n  if (x < 0 || yBegin < 0)\n    return std::nullopt;\n  if (yEnd < 0)\n    yEnd = str.size();\n  std::string yStr(str, yBegin, yEnd - yBegin);\n  try {\n    y = std::stoul(yStr, nullptr, 10);\n    y = (INVERTY ? columns - y : y - 1);\n  } catch (...) {\n    return std::nullopt;\n  }\n  if (isPosInBoard(x, y))\n    return std::tuple(x, y);\n  return std::nullopt;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr bool Chessboard<ROW, COL, INVERTY>::operator==(\n    const Chessboard& cb) const {\n  if (hash == cb.hash && board == cb.board)\n    return true;\n  return false;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nconstexpr bool Chessboard<ROW, COL, INVERTY>::operator!=(\n    const Chessboard& cb) const {\n  if (hash != cb.hash || board != cb.board)\n    return true;\n  return false;\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::updateHash() {\n  hash = 0ULL;\n  for (int xy = 0; xy < squares; xy++)\n    updateHash(xy, getChess(xy));\n}\n\ntemplate <int ROW, int COL, bool INVERTY>\nvoid Chessboard<ROW, COL, INVERTY>::updateHash(int xy, Chess chess) {\n  hash ^= hashList[squares * chess + xy];\n}\n"
  },
  {
    "path": "src/games/commons/hash.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\ntemplate <typename T, size_t SIZE> class HashBook {\n public:\n  using Storage = std::array<T, SIZE>;\n\n  HashBook() = default;\n\n  template <typename RngEngine> void setup(RngEngine& rng) {\n    std::independent_bits_engine<RngEngine, sizeof(T) * 8, T> gen(rng);\n    for (size_t i = 0; i < _book.size(); ++i) {\n      _book[i] = gen();\n    }\n  }\n\n  constexpr T operator[](size_t i) const {\n    return _book[i];\n  }\n\n private:\n  HashBook(const HashBook&) = delete;\n  HashBook& operator=(const HashBook&) = delete;\n  Storage _book;\n};\n\ntemplate <typename T, size_t SIZE> class Hasher {\n public:\n  Hasher(const HashBook<T, SIZE>& hashBook)\n      : _hashBook(&hashBook)\n      , _hash(0) {\n  }\n\n  void reset() {\n    _hash = 0;\n  }\n\n  void trigger(size_t i) {\n    _hash ^= (*_hashBook)[i];\n  }\n\n  uint64_t hash() const {\n    return _hash;\n  }\n\n private:\n  const HashBook<T, SIZE>* _hashBook;\n  uint64_t _hash;\n};\n"
  },
  {
    "path": "src/games/commons/player.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n\n#pragma once\n\nclass Player {\n public:\n  enum Index : int { none = -1, first, second };\n\n  static constexpr Player set(int i) {\n    return Player(static_cast<Index>(i));\n  }\n\n  constexpr Player()\n      : _i(Index::none) {\n  }\n\n  constexpr Player(Index i)\n      : _i(i) {\n  }\n\n  constexpr bool operator==(const Player& p) const {\n    return _i == p._i;\n  }\n\n  constexpr bool operator!=(const Player& p) const {\n    return _i != p._i;\n  }\n\n  constexpr int index() {\n    return static_cast<int>(_i);\n  }\n\n private:\n  Index _i;\n};\n"
  },
  {
    "path": "src/games/connect6.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author 1. CHEN,SHIH-YU leo03164@gmail.com\n// Author 2. CHIU,HSIEN-TUNG yumjelly@gmail.com\n\n#include <list>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n\nusing namespace std;\nnamespace Connect6 {\n\nconst int C6White = 0;\nconst int C6Black = 1;\nconst int C6Empty = 2;\n\nconst int C6Dx = 19;\nconst int C6Dy = 19;\n\nconst int C6MaxLegalMoves = C6Dy * C6Dx;\nconst int C6MaxPlayoutLength = C6Dx * C6Dy;\n\n// TODO: 原本沒有 BEGIN\n\nclass C6Player {\n public:\n  int player;\n\n  bool operator==(C6Player p) {\n    return (p.player == player);\n  }\n};\n// END TODO\n\n// const int MaxMoveNumber = 2 * 2 * (3 * C6Dx * C6Dy) + 1;\n// const int MaxMoveNumber = 80 * 2 * 2 * (3 * C6Dx * C6Dy) + 1;\nconst int MaxMoveNumber = C6Dx * C6Dy;\n\nclass C6Move {\n public:\n  int x, y, color;\n};\n\nclass C6Board {\n\n public:\n  int nb;\n  char board[C6Dx][C6Dy];\n  unsigned long long hash;\n\n  void init() {\n    for (int i = 0; i < C6Dx; i++)\n      for (int j = 0; j < C6Dy; j++)\n        board[i][j] = C6Empty;\n\n    hash = 0;\n    // printf(\"init\\n\");\n  }\n  bool won(C6Move m) {\n    int Max_connect = 6;\n    int current_coun = 1;\n\n    int x = m.x;\n    int y = m.y;\n    int color = m.color;\n\n    bool opsite = true;\n\n    for (int i = 1; i <= Max_connect; i++) {\n      if (x - i >= 0 && board[x - i][y] == color && opsite) {\n        current_coun++;\n        continue;\n      } else {\n        i = 7;\n        opsite = false;\n        for (int j = 1; j <= Max_connect; j++) {\n          if (x + j < C6Dx && board[x + j][y] == color) {\n            current_coun++;\n            continue;\n          } else\n            break;\n        }\n      }\n    }\n    if (current_coun >= Max_connect)\n      return true;\n    current_coun = 1;\n    opsite = true;\n    //------------------------------------------------------------------------------\n    for (int i = 1; i <= Max_connect; i++) {\n      if (y - i >= 0 && board[x][y - i] == color && opsite) {\n        current_coun++;\n        continue;\n      } else {\n        i = 7;\n        opsite = false;\n        for (int j = 1; j <= Max_connect; j++) {\n          if (y + j < C6Dx && board[x][y + j] == color) {\n            current_coun++;\n            continue;\n          } else\n            break;\n        }\n      }\n    }\n    if (current_coun >= Max_connect)\n      return true;\n    current_coun = 1;\n    opsite = true;\n    //------------------------------------------------------------------------------\n    for (int i = 1; i <= Max_connect; i++) {\n      if ((y - i >= 0) && (x + i < C6Dx) && board[x + i][y - i] == color &&\n          opsite) {\n        current_coun++;\n        continue;\n      } else {\n        i = 7;\n        opsite = false;\n        for (int j = 1; j <= Max_connect; j++) {\n          if ((x - j >= 0) && (y + j < C6Dx) && board[x - j][y + j] == color) {\n            current_coun++;\n            continue;\n          } else\n            break;\n        }\n      }\n    }\n    if (current_coun >= Max_connect)\n      return true;\n    current_coun = 1;\n    opsite = true;\n    //------------------------------------------------------------------------------\n    for (int i = 1; i <= Max_connect; i++) {\n      if ((y - i >= 0) && (x - i >= 0) && board[x - i][y - i] == color &&\n          opsite) {\n        current_coun++;\n        continue;\n      } else {\n        i = 7;\n        opsite = false;\n        for (int j = 1; j <= Max_connect; j++) {\n          if ((y + j < C6Dx) && (x + j < C6Dy) &&\n              board[x + j][y + j] == color) {\n            current_coun++;\n            continue;\n          } else\n            break;\n        }\n      }\n    }\n    if (current_coun >= Max_connect)\n      return true;\n    else\n      return false;\n  }\n\n  int opponent(int joueur) {\n    // printf(\"opponent\\n\");\n    if (joueur == C6White)\n      return C6Black;\n    return C6White;\n  }\n\n  //找合法步\n  bool legalMove(C6Move m) {\n    // printf(\"legalMove\\n\");\n    if (board[m.x][m.y] != C6Empty)\n      return false;\n    return true;\n  }\n\n  void play(C6Move m) {\n\n    board[m.x][m.y] = m.color;\n  }\n\n  int legalMoves(C6Move moves[C6MaxLegalMoves]) {\n    // printf(\"legalMoves\\n\");\n    nb = 0;\n\n    for (int i = 0; i < C6Dx; i++) {\n      for (int j = 0; j < C6Dy; j++) {\n        if (board[i][j] == C6Empty) {\n          moves[nb].x = i;\n          moves[nb].y = j;\n          nb++;\n        }\n      }\n    }\n    return nb;\n  }\n};\n}  // namespace Connect6\n"
  },
  {
    "path": "src/games/connect6_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author 1. CHEN,SHIH-YU leo03164@gmail.com\n// Author 2. CHIU,HSIEN-TUNG yumjelly@gmail.com\n#pragma once\n\n#include \"../core/state.h\"\n#include \"connect6.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include <fmt/printf.h>\n\nnamespace Connect6 {\n\nconst int StateForConnect6NumActions = 19 * 19 * 3;\n\n// class ActionForConnect6 : public ::_Action {\n// public:\n//  ActionForConnect6(int x, int y)\n//      : _Action() {\n//    _loc[0] = 0;\n//    _loc[1] = x;\n//    _loc[2] = y;\n//    _hash = x + y * 19;\n//  }  // step is 2 or 3.\n//};\n\ntemplate <int version = 2>\nclass StateForConnect6 : public core::State, C6Board {\n public:\n  int twice;\n  int firhand;\n\n  StateForConnect6(int seed)\n      : State(seed) {\n  }\n\n  virtual void Initialize() override {\n    // printf(\"Initialize\\n\");\n    // People implementing classes should not have much to do in _moves; just\n    // _moves.clear().\n    _moves.clear();\n\n    const int StateForConnect6X = version == 2 ? 2 + 1 : 2 * 6 + 1;\n    const int StateForConnect6Y = 19;\n    const int StateForConnect6Z = 19;\n\n    _featSize[0] = StateForConnect6X;\n    _featSize[1] = StateForConnect6Y;\n    _featSize[2] = StateForConnect6Z;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 1;\n    _actionSize[1] = 19;\n    _actionSize[2] = 19;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n    _status = GameStatus::player1Turn;\n\n    _features.resize(StateForConnect6X * StateForConnect6Y * StateForConnect6Z);\n\n    twice = 0;\n    firhand = 1;\n\n    init();\n    findFeatures();\n    findActions();\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForConnect6>(*this);\n  }\n\n  void findActions() {\n    // printf(\"findActions\\n\");\n    C6Move moves[C6MaxLegalMoves];\n    int nb = legalMoves(moves);\n\n    _legalActions.clear();\n    for (int i = 0; i < nb; i++) {\n      int x = moves[i].x;\n      int y = moves[i].y;\n\n      _legalActions.emplace_back(i, 0, x, y);\n    }\n  }\n\n  void findFeatures() {\n\n    // printf(\"findFeatures\\n\");\n    if ((_status == GameStatus::player0Win) ||\n        (_status == GameStatus::player1Win) || (_status == GameStatus::tie)) {\n      return;\n    }\n\n    if (version == 2) {\n      std::fill(_features.begin() + 2 * C6Dx * C6Dy, _features.end(),\n                twice || firhand ? 1.0f : 0.0f);\n    } else {\n      std::vector<float> old(_features);\n      for (int i = 0; i < C6Dx * C6Dy * 2; i++)\n        _features[i] = 0;\n      for (int i = 0; i < C6Dx * C6Dy; i++)\n        if (board[i % C6Dx][i / C6Dy] == C6Black)\n          _features[i] = 1;\n      for (int i = 0; i < C6Dx * C6Dy; i++)\n        if (board[i % C6Dx][i / C6Dy] == C6White)\n          _features[C6Dx * C6Dy + i] = 1;\n\n      std::copy(old.begin(), old.begin() + 3610, _features.begin() + 722);\n\n      // 4332-4693\n      std::fill(_features.begin() + 4332, _features.end(), getCurrentPlayer());\n    }\n  }\n\n  virtual void ApplyAction(const ::_Action& action) override {\n    // printf(\"ApplyAction\\n\");\n\n    C6Move m;\n    // print(stdout);\n    if (_status == GameStatus::player0Turn) {  // C6White\n      m.color = C6White;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n\n      play(m);\n\n      if (version == 2) {\n        _features[m.x * C6Dy + m.y + C6Dx * C6Dy * 0] = 1.0f;\n      }\n\n      bool hasWon = won(m);\n      if (hasWon) {\n        _status = GameStatus::player0Win;\n      } else {\n        findActions();\n        if (nb == 0) {\n          _status = GameStatus::tie;\n        } else {\n          if (twice == 0) {\n            twice = 1;\n          } else if (twice == 1) {\n            twice = 0;\n            _status = GameStatus::player1Turn;\n          }\n        }\n      }\n    } else if (_status == GameStatus::player1Turn) {\n      // C6Black\n      m.color = C6Black;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n\n      play(m);\n\n      if (version == 2) {\n        _features[m.x * C6Dy + m.y + C6Dx * C6Dy * 1] = 1.0f;\n      }\n\n      bool hasWon = won(m);\n      if (hasWon) {\n        _status = GameStatus::player1Win;\n      } else {\n        findActions();\n        if (nb == 0) {\n          _status = GameStatus::tie;\n        } else {\n          if (firhand) {\n            _status = GameStatus::player0Turn;\n            firhand = 0;\n          } else {\n            if (twice == 0)\n              twice = 1;\n            else if (twice == 1) {\n              twice = 0;\n              _status = GameStatus::player0Turn;\n            }\n          }\n        }\n      }\n    }\n    findFeatures();\n    _hash = hash;\n    fillFullFeatures();\n  }\n\n  virtual void DoGoodAction() override {\n    return DoRandomAction();\n  }\n\n  std::string stateDescription() const override {\n    std::string s;\n    s += fmt::sprintf(\"   \");\n    for (int k = 65; k < 84; k++)\n      s += fmt::sprintf(\"%c \", k);\n    s += fmt::sprintf(\"\\n\");\n    for (int i = 0; i < C6Dx; i++) {\n      if (C6Dx - i < 10)\n        s += fmt::sprintf(\"%d  \", C6Dx - i);\n      else\n        s += fmt::sprintf(\"%d \", C6Dx - i);\n      for (int j = 0; j < C6Dy; j++) {\n        if (board[C6Dx - 1 - i][j] == C6Black)\n          s += \"X \";\n        else if (board[C6Dx - 1 - i][j] == C6White)\n          s += \"O \";\n        else\n          s += \". \";\n      }\n      s += \"\\n\";\n    }\n    return s;\n  }\n\n  std::string actionDescription(const _Action& action) const override {\n    return std::string(1, 'A' + action.GetZ()) +\n           std::to_string(action.GetY() + 1);\n  }\n\n  int parseAction(const std::string& str) const override {\n    if (str.size() < 2) {\n      return -1;\n    }\n    int z = str[0] - 'A';\n    if (z < 0 || z >= 19) {\n      z = str[0] - 'a';\n      if (z < 0 || z >= 19) {\n        return -1;\n      }\n    }\n    int y = std::atoi(str.data() + 1) - 1;\n    if (y < 0 || y >= 19) {\n      return -1;\n    }\n    for (auto& a : _legalActions) {\n      if (a.GetZ() == z && a.GetY() == y) {\n        return a.GetIndex();\n      }\n    }\n    return -1;\n  }\n\n  int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction) {\n    std::cout << \"Current board:\" << std::endl\n              << stateDescription() << std::endl;\n    std::string str;\n    int index = -1;\n    while (index < 0) {\n      std::cout << \"Input action: \";\n      std::getline(std::cin, str);\n      index = parseAction(str);\n      if (index < 0) {\n        if (auto r = specialAction(str); r)\n          return *r;\n        std::cout << \"invalid input, try again.\" << std::endl;\n      }\n    }\n    return index;\n  }\n};\n\n}  // namespace Connect6\n"
  },
  {
    "path": "src/games/connectfour.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"../core/state.h\"\n\nclass StateForConnectFour : public core::State {\n public:\n  StateForConnectFour(int seed)\n      : State(seed) {\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n    _hash = 2166136261u;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 3;\n    _featSize[1] = boardHeight;\n    _featSize[2] = boardWidth;\n    _actionSize[0] = boardWidth;\n    _actionSize[1] = 1;\n    _actionSize[2] = 1;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    std::fill(_features.begin(), _features.end(), 1.0f);\n    board.clear();\n    board.resize(boardWidth * boardHeight, 0);\n    height.clear();\n    height.resize(boardWidth, 0);\n    featurize();\n    findActions();\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForConnectFour>(*this);\n  }\n\n  virtual void printCurrentBoard() const override {\n    std::cout << \"printing board\" << std::endl << std::flush;\n    for (int r = boardHeight - 1; r >= 0; --r) {\n      std::cout << \"|\";\n      for (int c = 0; c < boardWidth; ++c) {\n        auto val = board[r * boardWidth + c];\n        if (val == 0) {\n          std::cout << \" \";\n        } else if (val == 1) {\n          std::cout << \"X\";\n        } else if (val == 2) {\n          std::cout << \"O\";\n        } else {\n          assert(false);\n        }\n        std::cout << \"|\";\n      }\n      std::cout << std::endl;\n    }\n  }\n\n  void featurize() {\n    int player = 1 + getCurrentPlayer();\n    int otherPlayer = player == 1 ? 2 : 1;\n    for (int i = 0; i != (int)board.size(); ++i) {\n      int v = board[i];\n      _features[i] = v == player;\n      _features[board.size() + i] = v == otherPlayer;\n    }\n  }\n\n  void findActions() {\n    clearActions();\n    for (int i = 0; i != boardWidth; ++i) {\n      if (height[i] != boardHeight) {\n        addAction(i, 0, 0);\n      }\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    int x = action.GetX();\n    int y = height.at(x);\n    ++height[x];\n    int player = 1 + getCurrentPlayer();\n    size_t index = x + y * boardWidth;\n    board.at(index) = player;\n    _hash ^= index;\n    _hash *= 16777619u;\n    auto count = [&](int dx, int dy) {\n      int nx = x + dx;\n      int ny = y + dy;\n      int r = 0;\n      int stride = dx + dy * boardWidth;\n      size_t nIndex = index + stride;\n      while (nx >= 0 && nx < boardWidth && ny >= 0 && ny < boardHeight &&\n             board.at(nIndex) == player) {\n        ++r;\n        nIndex += stride;\n        nx += dx;\n        ny += dy;\n      }\n      return r;\n    };\n    bool won = count(-1, 0) + count(1, 0) >= 3;\n    won |= count(0, -1) + count(0, 1) >= 3;\n    won |= count(-1, -1) + count(1, 1) >= 3;\n    won |= count(1, -1) + count(-1, 1) >= 3;\n    if (won) {\n      _status = player == 1 ? GameStatus::player0Win : GameStatus::player1Win;\n    } else {\n      featurize();\n      findActions();\n      if (_legalActions.empty()) {\n        _status = GameStatus::tie;\n      } else {\n        _status =\n            player == 1 ? GameStatus::player1Turn : GameStatus::player0Turn;\n      }\n    }\n    fillFullFeatures();\n  }\n\n  virtual void DoGoodAction() override {\n    return DoRandomAction();\n  }\n\n  int boardWidth = 7;\n  int boardHeight = 6;\n  std::vector<char> board;\n  std::vector<char> height;\n};\n"
  },
  {
    "path": "src/games/diceshogi.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author1: Lin Hsin-I\n// - Github: https://github.com/free00000000000\n// - Email:  410521233@gms.ndhu.edu.tw\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#pragma once\n#include \"../core/state.h\"\n#include \"shogi.h\"\n#include <queue>\n#include <sstream>\n#include <vector>\n\nclass StateForDiceshogi : public core::State, public Shogi {\n public:\n  unsigned long long HashArray[2][10][Dx][Dy];\n  unsigned long long HashArrayJail[20];\n  unsigned long long HashTurn;\n  unsigned long long hash;\n  int length;\n  short dice;  // 0-5\n  int repeat;\n  std::queue<unsigned long long> situation;\n\n  StateForDiceshogi(int seed)\n      : State(seed)\n      , Shogi() {\n    _stochasticReset = true;\n  }\n\n  virtual void Initialize() override {\n    _stochastic = true;\n    _moves.clear();\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 225;\n    _featSize[1] = Dy;\n    _featSize[2] = Dx;\n    _actionSize[0] = 19;  // 11 pieces + 8 promoted\n    _actionSize[1] = Dy;\n    _actionSize[2] = Dx;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    // setFeatures(false, false, false, 0, 0, false);\n\n    gameInit();\n    initHash();\n    // printCurrentBoard();\n\n    findFeature();\n    findActions();\n    // fixxx\n    fillFullFeatures();\n  }\n\n  void gameInit() {\n    chess.clear();\n    chess.resize(2);\n    for (int i = 0; i < Dx; ++i)\n      for (int j = 0; j < Dy; ++j)\n        board[i][j] = Piece();\n\n    for (int i = 0; i < Dx; ++i) {\n      board[i][0] = Piece(White, PieceType(i + 1), false, Position(i, 0));\n      chess[White].push_back(board[i][0]);\n    }\n    board[0][1] = Piece(White, PieceType::Pawn, false, Position(0, 1));\n    chess[White].push_back(board[0][1]);\n\n    for (int i = 1; i <= Dx; ++i) {\n      int x = Dx - i;\n      board[x][4] = Piece(Black, PieceType(i), false, Position(x, 4));\n      chess[Black].push_back(board[x][4]);\n    }\n    board[4][3] = Piece(Black, PieceType::Pawn, false, Position(4, 3));\n    chess[Black].push_back(board[4][3]);\n\n    if (forcedDice > 0) {\n      assert(forcedDice > 0);\n      assert(forcedDice < 7);\n      dice = forcedDice - 1;\n      forcedDice = -1;\n    } else {\n      dice = _rng() % 6;\n    }\n\n    _hash = dice + 1;\n    hash = 0;\n    length = 0;\n    repeat = 0;\n    situation.push(hash);\n  }\n\n  void initHash() {\n    for (int a = 0; a < 2; ++a)\n      for (int b = 0; b < 10; ++b)\n        for (int c = 0; c < 5; ++c)\n          for (int d = 0; d < 5; ++d) {\n            HashArray[a][b][c][d] = 0;\n            for (int k = 0; k < 64; ++k)\n              if ((_rng() / (RAND_MAX + 1.0)) > 0.5)\n                HashArray[a][b][c][d] |= (1ULL << k);\n          }\n    for (int a = 0; a < 20; ++a) {\n      for (int k = 0; k < 64; ++k)\n        if ((_rng() / (RAND_MAX + 1.0)) > 0.5)\n          HashArrayJail[a] |= (1ULL << k);\n    }\n\n    HashTurn = 0;\n    for (int k = 0; k < 64; k++)\n      if ((_rng() / (RAND_MAX + 1.0)) > 0.5)\n        HashTurn |= (1ULL << k);\n  }\n\n  void findFeature() {\n    std::vector<float> old(_features);\n    for (int i = 0; i < 5425; ++i)\n      _features[i] = 0;\n    // 0 ~ 500\n    for (int i = 0; i < 25; ++i) {\n      Piece p = board[i % 5][i / 5];\n      if (p.color == White) {\n        switch (p.type) {\n        case PieceType::King:\n          _features[i] = 1;\n          break;\n\n        case PieceType::Gold:\n        case PieceType::Gold2:\n          _features[25 + i] = 1;\n          break;\n\n        case PieceType::Silver:\n        case PieceType::Silver2:\n          if (p.promoted)\n            _features[50 + i] = 1;\n          else\n            _features[75 + i] = 1;\n          break;\n\n        case PieceType::Bishop:\n        case PieceType::Bishop2:\n          if (p.promoted)\n            _features[100 + i] = 1;\n          else\n            _features[125 + i] = 1;\n          break;\n\n        case PieceType::Rook:\n        case PieceType::Rook2:\n          if (p.promoted)\n            _features[150 + i] = 1;\n          else\n            _features[175 + i] = 1;\n          break;\n\n        case PieceType::Pawn:\n        case PieceType::Pawn2:\n          if (p.promoted)\n            _features[200 + i] = 1;\n          else\n            _features[225 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      } else {\n        switch (p.type) {\n        case PieceType::King:\n          _features[250 + i] = 1;\n          break;\n\n        case PieceType::Gold:\n        case PieceType::Gold2:\n          _features[275 + i] = 1;\n          break;\n\n        case PieceType::Silver:\n        case PieceType::Silver2:\n          if (p.promoted)\n            _features[300 + i] = 1;\n          else\n            _features[325 + i] = 1;\n          break;\n\n        case PieceType::Bishop:\n        case PieceType::Bishop2:\n          if (p.promoted)\n            _features[350 + i] = 1;\n          else\n            _features[375 + i] = 1;\n          break;\n\n        case PieceType::Rook:\n        case PieceType::Rook2:\n          if (p.promoted)\n            _features[400 + i] = 1;\n          else\n            _features[425 + i] = 1;\n          break;\n\n        case PieceType::Pawn:\n        case PieceType::Pawn2:\n          if (p.promoted)\n            _features[450 + i] = 1;\n          else\n            _features[475 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n\n    // 500 ~ 575\n    switch (repeat) {\n    case 1:\n      std::fill(_features.begin() + 500, _features.begin() + 525, 1);\n      break;\n    case 5:\n      std::fill(_features.begin() + 525, _features.begin() + 550, 1);\n      break;\n    case 9:\n      std::fill(_features.begin() + 550, _features.begin() + 575, 1);\n      break;\n    default:\n      break;\n    }\n\n    // prison w 575 ~ 625\n    // prison b 625 ~ 675\n    int tmp = 575;\n    for (int i = 0; i < 2; ++i) {\n      std::vector<Piece>::iterator it;\n      for (it = chess[i].begin(); it != chess[i].end(); ++it) {\n        if (!(*it).pos.on_board()) {\n          switch ((*it).type) {\n          case PieceType::Gold:\n            std::fill(_features.begin() + tmp, _features.begin() + tmp + 5, 1);\n            break;\n          case PieceType::Silver:\n            std::fill(\n                _features.begin() + tmp + 5, _features.begin() + tmp + 10, 1);\n            break;\n          case PieceType::Bishop:\n            std::fill(\n                _features.begin() + tmp + 10, _features.begin() + tmp + 15, 1);\n            break;\n          case PieceType::Rook:\n            std::fill(\n                _features.begin() + tmp + 15, _features.begin() + tmp + 20, 1);\n            break;\n          case PieceType::Pawn:\n            std::fill(\n                _features.begin() + tmp + 20, _features.begin() + tmp + 25, 1);\n            break;\n          case PieceType::Gold2:\n            std::fill(\n                _features.begin() + tmp + 25, _features.begin() + tmp + 30, 1);\n            break;\n          case PieceType::Silver2:\n            std::fill(\n                _features.begin() + tmp + 30, _features.begin() + tmp + 35, 1);\n            break;\n          case PieceType::Bishop2:\n            std::fill(\n                _features.begin() + tmp + 35, _features.begin() + tmp + 40, 1);\n            break;\n          case PieceType::Rook2:\n            std::fill(\n                _features.begin() + tmp + 40, _features.begin() + tmp + 45, 1);\n            break;\n          case PieceType::Pawn2:\n            std::fill(\n                _features.begin() + tmp + 45, _features.begin() + tmp + 50, 1);\n            break;\n          default:\n            break;\n          }\n        }\n      }\n      tmp += 50;\n    }\n\n    // dice 675 ~ 700\n    if (dice == 5)\n      std::fill(_features.begin() + 675, _features.begin() + 700, 1);\n    else\n      std::fill(\n          _features.begin() + dice * 5, _features.begin() + dice * 5 + 5, 1);\n\n    // history 700 ~ 4900+700\n    std::copy(old.begin(), old.begin() + 4900, _features.begin() + 700);\n\n    // 5600 ~ 5625\n    std::fill(_features.begin() + 5600, _features.end(), (int)_status);\n  }\n\n  void findActions() {\n    std::vector<Move> moves;\n    std::vector<Move> dice_moves;\n    for (auto i : chess[(int)_status]) {\n      legalMoves(i, moves);\n    }\n\n    // dice limit\n    if (dice != 5) {\n      for (auto m : moves)\n        if (m.next.x == dice)\n          dice_moves.push_back(m);\n    }\n    if (dice_moves.empty())\n      dice_moves = moves;\n\n    clearActions();\n    for (auto m : dice_moves) {\n      m.piece.promoted = m.promote;\n\n      int x = m.next.x;\n      int y = m.next.y;\n      int z = type_to_z(m.piece);\n\n      addAction(z, x, y);\n    }\n  }\n\n  virtual void printCurrentBoard() const override {\n    std::cerr << stateDescription();\n    // for(int i=0; i<2; ++i) {\n    //     for(auto j : chess[i]) {\n    //         fprintf(stderr, \"(%c,%d) \", j.pos.x+'A', j.pos.y);\n    //     }\n    //     std::cerr << std::endl;\n    // }\n  }\n\n  std::string print_chess(const int color) const {\n    std::string str;\n    if (color == White)\n      str += \"DiceWhite: \";\n    else\n      str += \"DiceBlack: \";\n    for (auto i : chess[color]) {\n      if (!i.pos.on_board()) {\n        str += '(';\n        str += i.print();\n        str += ')';\n      } else\n        str += i.print();\n      str += ' ';\n    }\n    str += '\\n';\n    return str;\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"   A| B| C| D| E\\n\";\n    for (int i = Dy - 1; i >= 0; --i) {\n      str += std::to_string(i + 1) + ' ';\n      for (int j = 0; j < Dx; ++j) {\n        if (j > 0)\n          str += '|';\n        str += board[j][i].print();\n      }\n      str += '\\n';\n    }\n    str += print_chess(White);\n    str += print_chess(Black);\n\n    return str;\n  }\n\n  virtual std::string actionsDescription() const override {\n    std::stringstream ss;\n    int i = 0;\n    for (auto action : _legalActions) {\n      int z = action.GetX();\n      int x = action.GetY();\n      int y = action.GetZ();\n\n      Piece p;\n      p.type = z_to_type(z);\n      p.promoted = z_promoted(z);\n      p.color = (int)_status;\n\n      for (auto i : chess[p.color])\n        if (i.type == p.type)\n          p.pos = i.pos;\n\n      ss << p.print();\n      char buff[53];\n      sprintf(buff, \" (%c, %c) to (%c, %c) ---%d\\n\", p.pos.x + 'A',\n              p.pos.y + '1', x + 'A', y + '1', i++);\n      ss << buff;\n    }\n    ss << \"\\nInput format: action index e.g. 0\\n\";\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    int z = action.GetX();\n    int x = action.GetY();\n    int y = action.GetZ();\n\n    Piece p;\n    p.type = z_to_type(z);\n    p.promoted = z_promoted(z);\n    p.color = opponent((int)_status);\n\n    for (auto i : chess[p.color])\n      if (i.type == p.type)\n        p.pos = i.pos;\n\n    ss << p.print();\n    char buff[21];\n    sprintf(buff, \" to (%c, %c)\\n\", x + 'A', y + '1');\n    ss << buff;\n\n    return ss.str();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForDiceshogi>(*this);\n  }\n\n  int getHashNum(Piece p) {\n    int num = (int)p.type;\n    if (num >= 7)\n      num -= 5;\n    if (p.promoted)\n      num += 5;\n    //   7 8 9 10 11\n    // 1 2 3 4  5  6 | 7 8 9 10\n    num -= 1;\n    return num;\n  }\n\n  int getHashNumjail(Piece p) {\n    // 0~19\n    return (int)p.type - 2 + 10 * p.color;\n  }\n\n  void play(Move m) {\n    // std::cerr << m.piece.print();\n    // fprintf(stderr, \" play (%c, %d) to (%c, %d)\\n\\n\", m.piece.pos.x+'A',\n    // m.piece.pos.y, m.next.x+'A', m.next.y);\n    m.piece.promoted |= m.promote;\n\n    if (m.piece.pos.on_board()) {\n      hash ^= HashArray[m.piece.color][getHashNum(m.piece)][m.piece.pos.x]\n                       [m.piece.pos.y];\n      // eat\n      if (board[m.next.x][m.next.y].color != Empty) {\n        int opp = opponent(m.piece.color);\n        hash ^= HashArray[opp][getHashNum(board[m.next.x][m.next.y])][m.next.x]\n                         [m.next.y];\n        hash ^= HashArrayJail[getHashNumjail(m.piece)];\n\n        Piece tmp(\n            m.piece.color, new_type(board[m.next.x][m.next.y].type), false);\n        chess[m.piece.color].push_back(tmp);\n\n        std::vector<Piece>::iterator it;\n        for (it = chess[opp].begin(); it != chess[opp].end(); ++it) {\n          if ((*it).type == board[m.next.x][m.next.y].type) {\n            chess[opp].erase(it);\n            break;\n          }\n        }\n      }\n\n      std::vector<Piece>::iterator it;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).type == m.piece.type) {\n          (*it).pos = m.next;\n          // decide promoted\n          if ((m.piece.color == White && m.next.y == Dy - 1) ||\n              (m.piece.color == Black && m.next.y == 0)) {\n            if (m.piece.promoted || (*it).type == PieceType::Pawn ||\n                (*it).type == PieceType::Pawn2)\n              (*it).promoted = true;\n          }\n          board[m.next.x][m.next.y] = (*it);\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          break;\n        }\n      }\n    } else {  // Drop move\n      hash ^= HashArrayJail[getHashNumjail(m.piece)];\n      std::vector<Piece>::iterator it;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).type == m.piece.type) {\n          (*it).pos = m.next;\n          board[m.next.x][m.next.y] = (*it);\n          break;\n        }\n      }\n    }\n    hash ^= HashArray[m.piece.color][getHashNum(board[m.next.x][m.next.y])]\n                     [m.next.x][m.next.y];\n    hash ^= HashTurn;\n\n    if (length < MaxPlayoutLength) {\n      // rollout[length] = m;\n      length++;\n    } else {\n      // set draw when the moves bigger than 1000\n      _status = GameStatus::tie;\n    }\n\n    // find repeat\n    if (hash != situation.front())\n      repeat = 0;\n    else\n      repeat += 1;\n    // fprintf(stderr, \"end play\\n\");\n  }\n\n  bool fourfold() {\n    if (repeat < 9)\n      return false;\n    return true;\n  }\n\n  bool won(int color) {\n    // fprintf(stderr, \"won: \");\n    // for(auto i : chess[opponent(color)]) {\n    //     if(i.type == PieceType::King) {\n    //         if(checkmate(i)) return true;\n    //         break;\n    //     }\n    // }\n    if (checkmate(opponent(color)))\n      return true;\n\n    if (fourfold() && opponent((int)_status) == opponent(color))\n      return true;\n    return false;\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    // fprintf(stderr, \"\\nApply Action %d\\n\", (int)_status);\n\n    Move m;\n    int z = action.GetX();\n    int x = action.GetY();\n    int y = action.GetZ();\n\n    m.next.x = x;\n    m.next.y = y;\n    m.piece.type = z_to_type(z);\n    m.promote = z_promoted(z);\n\n    if (_status == GameStatus::player0Turn) {  // White to move\n      m.piece.color = White;\n      // find original position\n      for (auto i : chess[White]) {\n        if (i.type == m.piece.type)\n          m.piece.pos = i.pos;\n      }\n\n      play(m);\n      // printCurrentBoard();\n      // fprintf(stderr, \"hash: %llu\\n\", hash);\n      // fprintf(stderr, \"repeat: %d\\n\", repeat);\n      if ((GameStatus)_status == GameStatus::tie) {\n      } else if (!won(White))\n        _status = GameStatus::player1Turn;  // Black turn\n      else\n        _status = GameStatus::player0Win;  // White won\n    } else {                               // Black to move\n      m.piece.color = Black;\n      // find original position\n      for (auto i : chess[Black]) {\n        if (i.type == m.piece.type)\n          m.piece.pos = i.pos;\n      }\n\n      play(m);\n      // printCurrentBoard();\n      // fprintf(stderr, \"hash: %llu\\n\", hash);\n      // fprintf(stderr, \"repeat: %d\\n\", repeat);\n\n      if ((GameStatus)_status == GameStatus::tie) {\n      } else if (!won(Black))\n        _status = GameStatus::player0Turn;  // White turn\n      else\n        _status = GameStatus::player1Win;  // Black won\n    }\n    if (_status == GameStatus::player0Turn ||\n        _status == GameStatus::player1Turn) {\n      if (forcedDice >= 0) {\n        assert(forcedDice > 0);\n        assert(forcedDice < 7);\n        dice = forcedDice - 1;\n        forcedDice = -1;\n      } else {\n        dice = _rng() % 6;\n      }\n      _hash = dice + 1;\n      findFeature();\n      findActions();\n      fillFullFeatures();\n\n      if (situation.size() == 4) {\n        situation.pop();\n        situation.push(hash);\n      } else\n        situation.push(hash);\n    } else {\n      _legalActions.clear();\n      // if(_status == GameStatus::player0Win)\n      //     fprintf(stderr, \"white win\\n\");\n      // else if(_status == GameStatus::player1Win)\n      //     fprintf(stderr, \"black win\\n\");\n      // else fprintf(stderr, \"tie\\n\");\n    }\n    // fprintf(stderr, \"end apply action\\n\");\n  }\n\n  virtual void DoGoodAction() override {\n    // int i;\n    // printCurrentBoard();\n    // std::cout << actionsDescription();\n    // std::cin >> i;\n    // _Action a = *(_legalActions[i].get());\n    // ApplyAction(a);\n    // std::cout << actionDescription(a);\n\n    return DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/diceshogi_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Lin Hsin-I\n// - Github: https://github.com/free00000000000\n// - Email:   410521233@gms.ndhu.edu.tw\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#include \"../core/state.h\"\n\ntypedef unsigned short Coord;\n\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\nconst int StateForDiceshogiX = 225;\nconst int StateForDiceshogiY = 5;\nconst int StateForDiceshogiZ = 5;\n\n#include \"diceshogi.h\"\n\nclass ActionForDiceshogi : public _Action {\n public:\n  // each action has a position (_x[0], _x[1], _x[2])\n  // here for Diceshogi, there is (0, 0, 0) and (1, 0, 0),\n  // corresponding to steps 2 and 3 respectively.\n  ActionForDiceshogi(int x, int y, int piece)\n      : _Action() {\n    _loc[0] = piece;\n    _loc[1] = x;\n    _loc[2] = y;\n    _hash = (x + y * 5) * 19 + piece;\n  }  // step is 2 or 3.\n};\n\nclass StateForDiceshogi : public core::State {\n public:\n  StateForDiceshogi(int seed)\n      : State(seed) {\n    _stochasticReset = true;\n  }\n\n  DSPiece board[DSDx][DSDy];\n  unsigned long long hash;\n\n  DSMove rollout[DSMaxPlayoutLength];\n  int length, turn;\n\n  int repeat;\n  std::queue<unsigned long long> situation;\n\n  // 0 = DiceWhite, 1 = DiceBlack\n  std::vector<std::vector<DSPiece>> chess;\n  // 0~5\n  short dice;\n\n  void init() {\n    chess.clear();\n    chess.resize(2);\n    for (int i = 0; i < DSDx; ++i) {\n      for (int j = 0; j < DSDy; ++j) {\n        board[i][j] = DSPiece(DiceEmpty, DSPieceType::None, false);\n      }\n    }\n\n    for (int i = 1; i <= DSDx; ++i) {\n      board[i - 1][0].addDSPiece(\n          DiceWhite, DSPieceType(i), false, DSPosition(i - 1, 0));\n      chess[DiceWhite].push_back(board[i - 1][0]);\n    }\n    board[0][1].addDSPiece(\n        DiceWhite, DSPieceType::Pawn, false, DSPosition(0, 1));\n    chess[DiceWhite].push_back(board[0][1]);\n    for (int i = 1; i <= DSDx; ++i) {\n      board[DSDx - i][4].addDSPiece(\n          DiceBlack, DSPieceType(i), false, DSPosition(DSDx - i, 4));\n      chess[DiceBlack].push_back(board[DSDx - i][4]);\n    }\n    board[4][3].addDSPiece(\n        DiceBlack, DSPieceType::Pawn, false, DSPosition(4, 3));\n    chess[DiceBlack].push_back(board[4][3]);\n\n    turn = DiceBlack;  // black first\n    if (forcedDice > 0) {\n      assert(forcedDice > 0);\n      assert(forcedDice < 7);\n      dice = forcedDice - 1;\n      forcedDice = -1;\n    } else {\n      dice = _rng() % 6;\n    }\n    _hash = dice + 1;\n    hash = 0;\n    length = 0;\n    repeat = 0;\n    situation.push(hash);\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForDiceshogi>(*this);\n  }\n\n  bool fourfold() {\n    if (repeat < 9)\n      return false;\n    return true;\n  }\n\n  bool won(int color) {\n    if (chess[color].back().type == DSPieceType::King)\n      return true;\n    if (_legalActions.empty())\n      return true;\n    if (fourfold() && opponent(turn) == color)\n      return true;\n    return false;\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"   A| B| C| D| E\\n\";\n    for (int i = DSDy - 1; i >= 0; --i) {\n      str += to_string(i + 1) + ' ';\n      for (int j = 0; j < DSDx; ++j) {\n        if (j > 0)\n          str += '|';\n        str += board[j][i].print();\n      }\n      str += '\\n';\n    }\n\n    return str;\n  }\n\n  virtual std::string actionsDescription() override {\n    std::stringstream ss;\n    char x1, y1;\n    for (int i = 0; i < (int)_legalActions.size(); i++) {\n      _Action& action = *(_legalActions[i]);\n      int color = (GameStatus)_status == GameStatus::player1Turn ? DiceWhite\n                                                                 : DiceBlack;\n      DSPieceType type = z_to_type(action.GetX());\n      bool promote = z_promoted(action.GetX());\n      DSPiece piece = DSPiece(color, type, promote);\n\n      x1 = static_cast<char>(action.GetY() + 'A');\n      y1 = static_cast<char>(action.GetZ() + '1');\n      ss << \"Action \" << i << \": \" << piece.print() << \"-\" << x1 << y1\n         << std::endl;\n    }\n    ss << \"\\nInput format : action index e.g. 0\\n\";\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    char x1, y1;\n    int color = (turn + 1) % 2;\n    DSPieceType type = z_to_type(action.GetX());\n    bool promote = z_promoted(action.GetX());\n    DSPiece piece = DSPiece(color, type, promote);\n\n    x1 = static_cast<char>(action.GetY() + 'A');\n    y1 = static_cast<char>(action.GetZ() + '1');\n    ss << piece.print() << \"-\" << x1 << y1;\n\n    return ss.str();\n  }\n\n  void print_chess(int color, FILE* fp) {\n    if (color == DiceWhite)\n      fprintf(fp, \"DiceWhite \");\n    else\n      fprintf(fp, \"DiceBlack \");\n    fprintf(fp, \"%lu\\n\", chess[color].size());\n    std::vector<DSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      if (!(*it).pos.on_board()) {\n        fprintf(fp, \"(%s)\", (*it).print().c_str());\n      } else\n        fprintf(fp, \"%s\", (*it).print().c_str());\n    }\n    fprintf(fp, \"\\n\");\n  }\n\n  void legal_king_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    short dx[] = {1, 1, 0, -1, -1, -1, 0, 1};\n    short dy[] = {0, 1, 1, 1, 0, -1, -1, -1};\n    for (int i = 0; i < 8; ++i) {\n      origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n      if (origin.next.on_board() &&\n          board[origin.next.x][origin.next.y].color != origin.piece.color)\n        moves.push_back(origin);\n    }\n  }\n\n  void legal_gold_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    if (origin.piece.color == DiceWhite) {\n      short dx[] = {1, 1, 0, -1, -1, 0};\n      short dy[] = {0, 1, 1, 1, 0, -1};\n      for (int i = 0; i < 6; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color)\n          moves.push_back(origin);\n      }\n    } else {\n      short dx[] = {1, 0, -1, -1, 0, 1};\n      short dy[] = {0, 1, 0, -1, -1, -1};\n      for (int i = 0; i < 6; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color)\n          moves.push_back(origin);\n      }\n    }\n  }\n\n  void legal_silver_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    if (origin.piece.promoted) {\n      legal_gold_moves(origin, moves);\n      return;\n    }\n    if (origin.piece.color == DiceWhite) {\n      short dx[] = {1, 0, -1, -1, 1};\n      short dy[] = {1, 1, 1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color) {\n          moves.push_back(origin);\n          if (origin.next.y == 4) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        }\n      }\n    } else {\n      short dx[] = {1, -1, -1, 0, 1};\n      short dy[] = {1, 1, -1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color) {\n          moves.push_back(origin);\n          if (origin.next.y == 0) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        }\n      }\n    }\n  }\n\n  void legal_bishop_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    short dx[] = {1, -1, -1, 1};\n    short dy[] = {1, 1, -1, -1};\n    for (int i = 0; i < 4; ++i) {\n      origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n      while (origin.next.on_board() &&\n             board[origin.next.x][origin.next.y].color != origin.piece.color) {\n        moves.push_back(origin);\n        if (origin.piece.color == DiceWhite) {\n          if (origin.next.y == 4 && !origin.piece.promoted) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        } else {  // DiceBlack\n          if (origin.next.y == 0 && !origin.piece.promoted) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        }\n        if (board[origin.next.x][origin.next.y].color != DiceEmpty)\n          break;\n        origin.next = origin.next + DSPosition(dx[i], dy[i]);\n      }\n    }\n    if (origin.piece.promoted) {\n      short dx[] = {1, 0, -1, 0};\n      short dy[] = {0, 1, 0, -1};\n      for (int i = 0; i < 4; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color)\n          moves.push_back(origin);\n      }\n    }\n  }\n\n  void legal_rook_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    short dx[] = {1, 0, -1, 0};\n    short dy[] = {0, 1, 0, -1};\n    for (int i = 0; i < 4; ++i) {\n      origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n      while (origin.next.on_board() &&\n             board[origin.next.x][origin.next.y].color != origin.piece.color) {\n        moves.push_back(origin);\n        if (origin.piece.color == DiceWhite) {\n          if (origin.next.y == 4 && !origin.piece.promoted) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        } else {  // DiceBlack\n          if (origin.next.y == 0 && !origin.piece.promoted) {\n            origin.promote = true;\n            moves.push_back(origin);\n            origin.promote = false;\n          }\n        }\n        if (board[origin.next.x][origin.next.y].color != DiceEmpty)\n          break;\n        origin.next = origin.next + DSPosition(dx[i], dy[i]);\n      }\n    }\n    if (origin.piece.promoted) {\n      short dx[] = {1, -1, -1, 1};\n      short dy[] = {1, 1, -1, -1};\n      for (int i = 0; i < 4; ++i) {\n        origin.next = origin.piece.pos + DSPosition(dx[i], dy[i]);\n        if (origin.next.on_board() &&\n            board[origin.next.x][origin.next.y].color != origin.piece.color)\n          moves.push_back(origin);\n      }\n    }\n  }\n\n  void legal_pawn_moves(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    if (origin.piece.promoted) {\n      legal_gold_moves(origin, moves);\n      return;\n    }\n    if (origin.piece.color == DiceWhite) {\n      origin.next = origin.piece.pos + DSPosition(0, 1);\n      if (origin.next.on_board() &&\n          board[origin.next.x][origin.next.y].color != origin.piece.color) {\n        if (origin.next.y != 5)\n          moves.push_back(origin);\n        else {\n          origin.promote = true;\n          moves.push_back(origin);\n          origin.promote = false;\n        }\n      }\n\n    } else {\n      origin.next = origin.piece.pos + DSPosition(0, -1);\n      if (origin.next.on_board() &&\n          board[origin.next.x][origin.next.y].color != origin.piece.color) {\n        if (origin.next.y != 0)\n          moves.push_back(origin);\n        else {\n          origin.promote = true;\n          moves.push_back(origin);\n          origin.promote = false;\n        }\n      }\n    }\n  }\n\n  void legal_drop(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    for (int i = 0; i < DSDx; ++i) {\n      for (int j = 0; j < DSDy; ++j) {\n        if (board[i][j].color == DiceEmpty) {\n          origin.next = DSPosition(i, j);\n          moves.push_back(origin);\n        }\n      }\n    }\n  }\n\n  void legal_drop_pawn(DSMove origin, std::vector<DSMove>& moves) {\n    origin.promote = false;\n    // find another pawn\n    std::vector<DSPiece>::iterator it;\n    int cannotdrop = DSDx;\n    DSPieceType t = new_type(origin.piece.type);\n    for (it = chess[origin.piece.color].begin();\n         it != chess[origin.piece.color].end(); ++it) {\n      if ((*it).type == t)\n        cannotdrop = (*it).pos.x;\n    }\n\n    if (origin.piece.color == DiceWhite) {\n      for (int i = 0; i < DSDx; ++i) {\n        if (i == cannotdrop)\n          continue;\n        for (int j = 0; j < DSDy - 1; ++j) {\n          if (board[i][j].color == DiceEmpty) {\n            if ((j - 1) >= 0 && board[i][j - 1].type == DSPieceType::King &&\n                board[i][j - 1].color != origin.piece.color) {\n              if (checkmate(board[i][j - 1]))\n                continue;\n            }\n            origin.next = DSPosition(i, j);\n            moves.push_back(origin);\n          }\n        }\n      }\n    } else {\n      for (int i = 0; i < DSDx; ++i) {\n        if (i == cannotdrop)\n          continue;\n        for (int j = 1; j < DSDy; ++j) {\n          if (board[i][j].color == DiceEmpty) {\n            if (board[i][j - 1].type == DSPieceType::King &&\n                board[i][j - 1].color != origin.piece.color) {\n              if (checkmate(board[i][j - 1]))\n                continue;\n            }\n            origin.next = DSPosition(i, j);\n            moves.push_back(origin);\n          }\n        }\n      }\n    }\n  }\n\n  bool can_eat(DSPosition tar, int color) {\n    std::vector<DSMove> moves;\n    legalDSMoves_onboard(opponent(color), moves);\n\n    std::vector<DSMove>::iterator it;\n    for (it = moves.begin(); it != moves.end(); ++it)\n      if ((*it).next == tar)\n        return true;\n\n    return false;\n  }\n\n  bool checkmate(DSPiece king) {\n    std::vector<DSMove> king_moves;\n    DSMove m;\n    m.piece = king;\n    legal_king_moves(m, king_moves);\n    if (king_moves.empty())\n      return true;\n\n    std::vector<DSMove>::iterator it;\n    for (it = king_moves.begin(); it != king_moves.end(); ++it)\n      if (!can_eat((*it).next, king.color))\n        return false;\n\n    return true;\n  }\n\n  void legalDSMoves(int color, std::vector<DSMove>& moves) {\n    legalDSMoves_onboard(color, moves);\n    std::vector<DSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      DSPiece p = *it;\n\n      if (!p.pos.on_board()) {\n        DSMove m;\n        m.piece = p;\n        switch (m.piece.type) {\n        case DSPieceType::Gold:\n        case DSPieceType::Gold2:\n        case DSPieceType::Silver:\n        case DSPieceType::Silver2:\n        case DSPieceType::Bishop:\n        case DSPieceType::Bishop2:\n        case DSPieceType::Rook:\n        case DSPieceType::Rook2:\n          legal_drop(m, moves);\n          break;\n\n        case DSPieceType::Pawn:\n        case DSPieceType::Pawn2:\n          legal_drop_pawn(m, moves);\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n  }\n\n  void legalDSMoves_onboard(int color, std::vector<DSMove>& moves) {\n    std::vector<DSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      DSPiece p = *it;\n      DSMove m;\n      m.piece = p;\n      if (m.piece.pos.on_board()) {\n        switch (m.piece.type) {\n        case DSPieceType::King:\n          legal_king_moves(m, moves);\n          break;\n\n        case DSPieceType::Gold:\n        case DSPieceType::Gold2:\n          legal_gold_moves(m, moves);\n          break;\n\n        case DSPieceType::Silver:\n        case DSPieceType::Silver2:\n          legal_silver_moves(m, moves);\n          break;\n\n        case DSPieceType::Bishop:\n        case DSPieceType::Bishop2:\n          legal_bishop_moves(m, moves);\n          break;\n\n        case DSPieceType::Rook:\n        case DSPieceType::Rook2:\n          legal_rook_moves(m, moves);\n          break;\n\n        case DSPieceType::Pawn:\n        case DSPieceType::Pawn2:\n          legal_pawn_moves(m, moves);\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n  }\n\n  int opponent(int player) {\n    if (player == DiceWhite)\n      return DiceBlack;\n    return DiceWhite;\n  }\n\n  DSPieceType new_type(DSPieceType p) {\n    DSPieceType t = p;\n    switch (p) {\n    case DSPieceType::Gold:\n      t = DSPieceType::Gold2;\n      break;\n    case DSPieceType::Gold2:\n      t = DSPieceType::Gold;\n      break;\n    case DSPieceType::Silver:\n      t = DSPieceType::Silver2;\n      break;\n    case DSPieceType::Silver2:\n      t = DSPieceType::Silver;\n      break;\n    case DSPieceType::Bishop:\n      t = DSPieceType::Bishop2;\n      break;\n    case DSPieceType::Bishop2:\n      t = DSPieceType::Bishop;\n      break;\n    case DSPieceType::Rook:\n      t = DSPieceType::Rook2;\n      break;\n    case DSPieceType::Rook2:\n      t = DSPieceType::Rook;\n      break;\n    case DSPieceType::Pawn:\n      t = DSPieceType::Pawn2;\n      break;\n    case DSPieceType::Pawn2:\n      t = DSPieceType::Pawn;\n      break;\n    default:\n      break;\n    }\n    return t;\n  }\n\n  void play(DSMove m) {\n    m.piece.promoted |= m.promote;\n\n    turn = opponent(turn);\n    if (m.piece.pos.on_board()) {\n      hash ^= DSHashArray[m.piece.color][getHashNum(m.piece)][m.piece.pos.x]\n                         [m.piece.pos.y];\n\n      // eat\n      if (board[m.next.x][m.next.y].color != DiceEmpty) {\n        hash ^= DSHashArray[turn][getHashNum(board[m.next.x][m.next.y])]\n                           [m.next.x][m.next.y];\n        hash ^= DSHashArrayE[getHashNumE(m.piece)];\n\n        DSPiece tmp(\n            m.piece.color, new_type(board[m.next.x][m.next.y].type), false);\n        chess[m.piece.color].push_back(tmp);\n\n        std::vector<DSPiece>::iterator it;\n        for (it = chess[turn].begin(); it != chess[turn].end(); ++it) {\n          if ((*it).type == board[m.next.x][m.next.y].type) {\n            chess[turn].erase(it);\n            break;\n          }\n        }\n      }\n\n      std::vector<DSPiece>::iterator it;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).type == m.piece.type) {\n          (*it).pos = m.next;\n          // decide promoted\n          if ((m.piece.color == DiceWhite && m.next.y == DSDy - 1) ||\n              (m.piece.color == DiceBlack && m.next.y == 0)) {\n            if (m.piece.promoted || (*it).type == DSPieceType::Pawn ||\n                (*it).type == DSPieceType::Pawn2)\n              (*it).promoted = true;\n          }\n          board[m.next.x][m.next.y] = (*it);\n          board[m.piece.pos.x][m.piece.pos.y] =\n              DSPiece(DiceEmpty, DSPieceType::None, false);\n          break;\n        }\n      }\n\n    } else {\n      hash ^= DSHashArrayE[getHashNumE(m.piece)];\n\n      std::vector<DSPiece>::iterator it;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).type == m.piece.type) {\n          (*it).pos = m.next;\n          board[m.next.x][m.next.y] = (*it);\n          break;\n        }\n      }\n    }\n\n    hash ^= DSHashArray[m.piece.color][getHashNum(board[m.next.x][m.next.y])]\n                       [m.next.x][m.next.y];\n    hash ^= DSHashTurn;\n\n    if (length < DSMaxPlayoutLength) {\n      rollout[length] = m;\n      length++;\n    } else {\n      _status = GameStatus::tie;\n    }\n\n    // find repeat\n    if (hash != situation.front())\n      repeat = 0;\n    else\n      repeat += 1;\n  }\n\n  int getHashNum(DSPiece p) {\n    int num = static_cast<int>(p.type);\n    if (num >= 7)\n      num -= 5;\n    if (p.promoted)\n      num += 5;\n    //   7 8 9 10 11\n    // 1 2 3 4  5  6 | 7 8 9 10\n    num -= 1;\n    return num;\n  }\n\n  int getHashNumE(DSPiece p) {\n    // 0~19\n    return static_cast<int>(p.type) - 2 + 10 * p.color;\n  }\n\n  // ############### board\n\n  virtual void Initialize() override {\n    // People implementing classes should not have much to do in _moves; just\n    // _moves.clear().\n    _stochastic = true;\n    _moves.clear();\n    // std::cout << \"OTGDiceshogi initialize\" << std::endl;\n\n    // the features are just one number between 0 and 1 (the distance,\n    // normalized).\n    _featSize[0] = StateForDiceshogiX;\n    _featSize[1] = StateForDiceshogiY;\n    _featSize[2] = StateForDiceshogiZ;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n\n    _actionSize[0] = 19;\n    _actionSize[1] = 5;\n    _actionSize[2] = 5;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n\n    // std::cout << \"restart!\" << std::endl;\n    // _features is a vector representing the current state. It can\n    // (must...) be large for complex games; here just one number\n    // between 0 and 1. trivial case in dimension 1.\n    _features.resize(StateForDiceshogiX * StateForDiceshogiY *\n                     StateForDiceshogiZ);\n    std::fill(_features.begin(), _features.end(), 0);\n    /*\n        // _features[:_hash] = 1\n        for (int i = 0; i < DISTANCE; i++) {\n          _features[i] = (float(_hash) > float(i)) ? 1. : 0.;\n        }\n    */\n    init();\n    _status = (GameStatus)opponent(turn);\n    findFeatures();\n    findActions(turn);\n    fillFullFeatures();\n  }\n\n  int type_to_z(DSPiece p) {\n    if (!p.promoted)\n      return (int)p.type - 1;\n    switch (p.type) {\n    case DSPieceType::Silver:\n      return (int)DSPieceZ::PSilver;\n    case DSPieceType::Bishop:\n      return (int)DSPieceZ::PBishop;\n    case DSPieceType::Rook:\n      return (int)DSPieceZ::PRook;\n    case DSPieceType::Pawn:\n      return (int)DSPieceZ::PPawn;\n    case DSPieceType::Silver2:\n      return (int)DSPieceZ::PSilver2;\n    case DSPieceType::Bishop2:\n      return (int)DSPieceZ::PBishop2;\n    case DSPieceType::Rook2:\n      return (int)DSPieceZ::PRook2;\n    case DSPieceType::Pawn2:\n      return (int)DSPieceZ::PPawn2;\n\n    default:\n      // fprintf(stderr);\n      fprintf(\n          stderr, \"%s type to z error %d\\n\", p.print().c_str(), (int)p.type);\n      break;\n    }\n    return -1;\n  }\n\n  DSPieceType z_to_type(int z) const {\n    switch ((DSPieceZ)z) {\n    case DSPieceZ::K:\n      return DSPieceType::King;\n    case DSPieceZ::G:\n      return DSPieceType::Gold;\n    case DSPieceZ::G2:\n      return DSPieceType::Gold2;\n    case DSPieceZ::PSilver:\n    case DSPieceZ::S:\n      return DSPieceType::Silver;\n    case DSPieceZ::PBishop:\n    case DSPieceZ::B:\n      return DSPieceType::Bishop;\n    case DSPieceZ::PRook:\n    case DSPieceZ::R:\n      return DSPieceType::Rook;\n    case DSPieceZ::PPawn:\n    case DSPieceZ::P:\n      return DSPieceType::Pawn;\n    case DSPieceZ::PSilver2:\n    case DSPieceZ::S2:\n      return DSPieceType::Silver2;\n    case DSPieceZ::PBishop2:\n    case DSPieceZ::B2:\n      return DSPieceType::Bishop2;\n    case DSPieceZ::PRook2:\n    case DSPieceZ::R2:\n      return DSPieceType::Rook2;\n    case DSPieceZ::PPawn2:\n    case DSPieceZ::P2:\n      return DSPieceType::Pawn2;\n\n    default:\n      // print(stderr);\n      fprintf(stderr, \"%d\", z);\n      fprintf(stderr, \"z to type error\\n\");\n      break;\n    }\n    return DSPieceType::None;\n  }\n\n  bool z_promoted(int z) const {\n    return z >= 11;\n  }\n\n  void findActions(int color) {\n    std::vector<DSMove> moves;\n    std::vector<DSMove> dice_moves;\n    legalDSMoves(color, moves);\n\n    // diece limit\n    if (dice != 5) {\n      std::vector<DSMove>::iterator it;\n      for (it = moves.begin(); it != moves.end(); ++it) {\n        if ((*it).next.x == dice)\n          dice_moves.push_back((*it));\n      }\n    }\n    if (dice_moves.empty())\n      dice_moves = moves;\n\n    int nb = dice_moves.size();\n\n    clearActions();\n    for (int i = 0; i < nb; ++i) {\n      int x = dice_moves[i].next.x;\n      int y = dice_moves[i].next.y;\n      dice_moves[i].piece.promoted |= dice_moves[i].promote;\n      int z = type_to_z(dice_moves[i].piece);\n\n      addAction(z, x, y);\n    }\n  }\n\n  void findFeatures() {\n    // fprintf(stderr, \"rrrrrrrrrrrr%d\\n\", turn);\n    std::vector<float> old(_features);\n    for (int i = 0; i < 5425; ++i)\n      _features[i] = 0;\n    // 0 ~ 500\n    for (int i = 0; i < 25; ++i) {\n      DSPiece p = board[i % 5][i / 5];\n      if (p.color == DiceWhite) {\n        switch (p.type) {\n        case DSPieceType::King:\n          _features[i] = 1;\n          break;\n\n        case DSPieceType::Gold:\n        case DSPieceType::Gold2:\n          _features[25 + i] = 1;\n          break;\n\n        case DSPieceType::Silver:\n        case DSPieceType::Silver2:\n          if (p.promoted)\n            _features[50 + i] = 1;\n          else\n            _features[75 + i] = 1;\n          break;\n\n        case DSPieceType::Bishop:\n        case DSPieceType::Bishop2:\n          if (p.promoted)\n            _features[100 + i] = 1;\n          else\n            _features[125 + i] = 1;\n          break;\n\n        case DSPieceType::Rook:\n        case DSPieceType::Rook2:\n          if (p.promoted)\n            _features[150 + i] = 1;\n          else\n            _features[175 + i] = 1;\n          break;\n\n        case DSPieceType::Pawn:\n        case DSPieceType::Pawn2:\n          if (p.promoted)\n            _features[200 + i] = 1;\n          else\n            _features[225 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      } else {\n        switch (p.type) {\n        case DSPieceType::King:\n          _features[250 + i] = 1;\n          break;\n\n        case DSPieceType::Gold:\n        case DSPieceType::Gold2:\n          _features[275 + i] = 1;\n          break;\n\n        case DSPieceType::Silver:\n        case DSPieceType::Silver2:\n          if (p.promoted)\n            _features[300 + i] = 1;\n          else\n            _features[325 + i] = 1;\n          break;\n\n        case DSPieceType::Bishop:\n        case DSPieceType::Bishop2:\n          if (p.promoted)\n            _features[350 + i] = 1;\n          else\n            _features[375 + i] = 1;\n          break;\n\n        case DSPieceType::Rook:\n        case DSPieceType::Rook2:\n          if (p.promoted)\n            _features[400 + i] = 1;\n          else\n            _features[425 + i] = 1;\n          break;\n\n        case DSPieceType::Pawn:\n        case DSPieceType::Pawn2:\n          if (p.promoted)\n            _features[450 + i] = 1;\n          else\n            _features[475 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n\n    // 500 ~ 575\n    switch (repeat) {\n    case 1:\n      std::fill(_features.begin() + 500, _features.begin() + 525, 1);\n      break;\n    case 5:\n      std::fill(_features.begin() + 525, _features.begin() + 550, 1);\n      break;\n    case 9:\n      std::fill(_features.begin() + 550, _features.begin() + 575, 1);\n      break;\n    default:\n      break;\n    }\n\n    // prison w 575 ~ 625\n    // prison b 625 ~ 675\n    int tmp = 575;\n    for (int i = 0; i < 2; ++i) {\n      std::vector<DSPiece>::iterator it;\n      for (it = chess[i].begin(); it != chess[i].end(); ++it) {\n        if (!(*it).pos.on_board()) {\n          switch ((*it).type) {\n          case DSPieceType::Gold:\n            std::fill(_features.begin() + tmp, _features.begin() + tmp + 5, 1);\n            break;\n          case DSPieceType::Silver:\n            std::fill(\n                _features.begin() + tmp + 5, _features.begin() + tmp + 10, 1);\n            break;\n          case DSPieceType::Bishop:\n            std::fill(\n                _features.begin() + tmp + 10, _features.begin() + tmp + 15, 1);\n            break;\n          case DSPieceType::Rook:\n            std::fill(\n                _features.begin() + tmp + 15, _features.begin() + tmp + 20, 1);\n            break;\n          case DSPieceType::Pawn:\n            std::fill(\n                _features.begin() + tmp + 20, _features.begin() + tmp + 25, 1);\n            break;\n          case DSPieceType::Gold2:\n            std::fill(\n                _features.begin() + tmp + 25, _features.begin() + tmp + 30, 1);\n            break;\n          case DSPieceType::Silver2:\n            std::fill(\n                _features.begin() + tmp + 30, _features.begin() + tmp + 35, 1);\n            break;\n          case DSPieceType::Bishop2:\n            std::fill(\n                _features.begin() + tmp + 35, _features.begin() + tmp + 40, 1);\n            break;\n          case DSPieceType::Rook2:\n            std::fill(\n                _features.begin() + tmp + 40, _features.begin() + tmp + 45, 1);\n            break;\n          case DSPieceType::Pawn2:\n            std::fill(\n                _features.begin() + tmp + 45, _features.begin() + tmp + 50, 1);\n            break;\n          default:\n            break;\n          }\n        }\n      }\n      tmp += 50;\n    }\n\n    // dice 675 ~ 700\n    if (dice == 5)\n      std::fill(_features.begin() + 675, _features.begin() + 700, 1);\n    else\n      std::fill(\n          _features.begin() + dice * 5, _features.begin() + dice * 5 + 5, 1);\n\n    // history 700 ~ 4900+700\n    std::copy(old.begin(), old.begin() + 4900, _features.begin() + 700);\n\n    // 5600 ~ 5625\n    std::fill(_features.begin() + 5600, _features.end(), turn);\n  }\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n    DSMove m;\n    if ((GameStatus)_status ==\n        GameStatus::player1Turn) {  // 1 DiceWhite to move\n      // fprintf(stderr, \"DiceWhite \");\n      m.piece.color = DiceWhite;\n      m.next = DSPosition(action.GetY(), action.GetZ());\n      m.piece.type = z_to_type(action.GetX());\n      m.promote = z_promoted(action.GetX());\n\n      std::vector<DSPiece>::iterator it;\n      for (it = chess[DiceWhite].begin(); it != chess[DiceWhite].end(); ++it) {\n        if ((*it).type == m.piece.type)\n          m.piece.pos = (*it).pos;\n      }\n\n      play(m);\n\n      if ((GameStatus)_status == GameStatus::tie) {\n      }  // fprintf(stderr, \"draw \");  // draw\n      else if (!won(DiceWhite))\n        _status = GameStatus::player0Turn;  // DiceBlack turn\n      else\n        _status = GameStatus::player1Win;  // DiceWhite won\n    } else {                               // DiceBlack\n      // fprintf(stderr, \"DiceBlack \");\n      m.piece.color = DiceBlack;\n      m.next = DSPosition(action.GetY(), action.GetZ());\n      m.piece.type = z_to_type(action.GetX());\n      m.promote = z_promoted(action.GetX());\n\n      std::vector<DSPiece>::iterator it;\n      for (it = chess[DiceBlack].begin(); it != chess[DiceBlack].end(); ++it) {\n        if ((*it).type == m.piece.type)\n          m.piece.pos = (*it).pos;\n      }\n\n      play(m);\n\n      if ((GameStatus)_status == GameStatus::tie)\n        fprintf(stderr, \"draw \");  // draw\n      else if (!won(DiceBlack))\n        _status = GameStatus::player1Turn;  // DiceWhite turn\n      else\n        _status = GameStatus::player0Win;  // DiceBlack won\n    }\n    findFeatures();\n    if (situation.size() == 4) {\n      situation.pop();\n      situation.push(hash);\n    } else\n      situation.push(hash);\n    if (forcedDice >= 0) {\n      assert(forcedDice > 0);\n      assert(forcedDice < 7);\n      dice = forcedDice - 1;\n      forcedDice = -1;\n    } else {\n      dice = _rng() % 6;\n    }\n    // TODO useless now.\n    _hash = dice + 1;  // This is useful for the interaction with human. From\n                       // now on, _hash represented the random dice.\n    findActions(turn);\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play\n  virtual void DoGoodAction() override {\n    DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/einstein.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../core/state.h\"\n\nclass StateForEinstein : public core::State {\n public:\n  class Piece {\n   public:\n    int color;\n    int type;  // 1~6\n    bool onboard;\n    int x, y;\n\n    Piece() {\n      color = 2;\n      type = 0;\n      x = y = -1;\n    }\n\n    Piece(int c, int t) {\n      color = c;\n      type = t;\n      onboard = false;\n      x = y = -1;\n    }\n\n    void setPiece(int c, int t, bool isonboard) {\n      color = c;\n      type = t;\n      onboard = isonboard;\n    }\n\n    void setPosition(int X, int Y) {\n      x = X;\n      y = Y;\n    }\n  };\n\n  class Move {\n   public:\n    int x, y;\n    int type;\n    Move() {\n      x = y = -1;\n      type = 0;\n    }\n    Move(int X, int Y, int p) {\n      x = X;\n      y = Y;\n      type = p;\n    }\n  };\n\n public:\n  const static int boardWidth = 5;\n  const static int boardHeight = 5;\n  Move p0_drop;\n  Piece player[2][6];\n  Piece board[5][5];\n  int dice;\n  int round;\n  std::vector<Move> moves;\n  //  unsigned long long HashArray[2][6][5][5];\n  unsigned long long HashTurn;\n\n  StateForEinstein(int seed)\n      : State(seed) {\n    _stochasticReset = true;\n  }\n  /*~StateForEinstein() {\n  }*/\n  virtual void Initialize() override {\n    _moves.clear();\n    // _hash = 2166136261u;\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 14;  // 2 players * 6 pieces + 1 dice + 1 turn\n    _featSize[1] = boardHeight;\n    _featSize[2] = boardWidth;\n    _actionSize[0] = 6;  // 6 pieces\n    _actionSize[1] = boardHeight;\n    _actionSize[2] = boardWidth;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    _stochastic = true;\n    // setFeatures(false, false, false, 0, 0, false);\n\n    gameInit();\n    // printCurrentBoard();\n\n    findFeature();\n    findActions();\n  }\n\n  void initHash() {\n    /*    for (int i = 0; i < 2; ++i)\n          for (int j = 0; j < 6; ++j)\n            for (int x = 0; x < boardWidth; ++x)\n              for (int y = 0; y < boardHeight; ++y) {\n                HashArray[i][j][x][y] = 0;\n                for (int k = 0; k < 64; ++k)\n                  if ((_rng() / (RAND_MAX + 1.0)) > 0.5)\n                    HashArray[i][j][x][y] |= (1ULL << k);\n              }\n        HashTurn = 0;\n        for (int k = 0; k < 64; ++k)\n          if ((_rng() / (RAND_MAX + 1.0)) > 0.5)\n            HashTurn |= (1ULL << k);*/\n  }\n\n  void gameInit() {\n    for (int j = 0; j < boardHeight; ++j)\n      for (int i = 0; i < boardWidth; ++i)\n        board[j][i].setPiece(2, 0, false);  // 2 = Empty\n\n    for (int i = 0; i < 6; ++i) {\n      player[0][i].setPiece(0, i + 1, false);\n      player[1][i].setPiece(1, i + 1, false);\n    }\n    if (forcedDice > 0) {\n      dice = forcedDice - 1;\n    } else {\n      dice = _rng() % 6;\n    }\n    _hash = dice;\n    round = 1;\n  }\n\n  virtual void setStateFromStr(const std::string& str) override {\n    // example: ABCDEF0000000000000abcdef\n    /* -> x1 x2 x3 x4 x5\n          x6 0 0 0 0\n          0 0 0 0 0\n          0 0 0 0 o1\n          o2 o3 o4 o5 o6\n    */\n    assert(str.length() == 26);\n    char turn = str[25];\n    _status = turn == '0' ? GameStatus::player0Turn : GameStatus::player1Turn;\n    int color = -1;\n    for (int i = 0; i < 25; i++) {\n      int t = -1;\n      int y = i / 5;\n      int x = i % 5;\n      char c = str[i];\n      if (c >= 'A' && c <= 'F') {\n        t = int(c) - int('A');\n        color = 1;\n      }\n      if (c >= 'a' && c <= 'f') {\n        t = int(c) - int('a');\n        color = 0;\n      }\n      if (t == -1) {\n        continue;\n      }\n      player[color][t].onboard = true;\n      player[color][t].setPosition(x, y);\n      board[y][x] = player[color][t];\n    }\n    if (forcedDice > 0) {\n      dice = forcedDice - 1;\n    } else {\n      dice = _rng() % 6;\n    }\n    _hash = dice;\n    round = 13;\n    findActions();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForEinstein>(*this);\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"  A |B |C |D |E \\n\";\n    for (int j = 0; j < boardHeight; j++) {\n      str += to_string(j + 1) + ' ';\n      for (int i = 0; i < boardWidth; i++) {\n        if (i > 0)\n          str += '|';\n        if (board[j][i].color == 0) {\n          str += 'x';\n          str += static_cast<char>(board[j][i].type + '0');\n        } else if (board[j][i].color == 1) {\n          str += 'o';\n          str += static_cast<char>(board[j][i].type + '0');\n        } else\n          str += \"  \";\n      }\n      str += '\\n';\n    }\n\n    return str;\n  }\n\n  virtual std::string actionsDescription() const override {\n    std::stringstream ss;\n    char c, p, x1, y1;\n    for (int i = 0; i < (int)_legalActions.size(); i++) {\n      const _Action& action = _legalActions[i];\n      c = (_status == GameStatus::player0Turn) ? 'x' : 'o';\n      p = static_cast<char>(action.GetX() + ((round <= 12) ? '0' : '1'));\n      x1 = static_cast<char>(action.GetY() + 'A');\n      y1 = static_cast<char>(action.GetZ() + '1');\n      ss << \"Action \" << i << \": \" << c << p << \" to \" << x1 << y1 << std::endl;\n    }\n    ss << \"\\nInput format : action index e.g. 0\\n\";\n\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const override {\n    std::stringstream ss;\n    char c, p, x1, y1;\n    c = (_status == GameStatus::player0Turn) ? 'o' : 'x';\n    p = static_cast<char>(action.GetX() + ((round <= 12) ? '0' : '1'));\n    x1 = static_cast<char>(action.GetY() + 'A');\n    y1 = static_cast<char>(action.GetZ() + '1');\n    ss << c << p << \" to \" << x1 << y1;\n\n    return ss.str();\n  }\n\n  void findFeature() {\n    std::fill(_features.begin(), _features.end(), 0);\n    if (_status == GameStatus::player1Turn) {\n      // 0 ~ 150\n      for (int i = 0; i < 6; ++i) {\n        Piece p = player[0][i];\n        if (p.onboard)\n          _features[25 * p.type + p.y * 5 + p.x] = 1;\n      }\n      // 150 ~ 300\n      for (int i = 0; i < 6; ++i) {\n        Piece p = player[1][i];\n        if (p.onboard)\n          _features[25 * p.type + p.y * 5 + p.x + 150] = 1;\n      }\n    } else {\n      // 0 ~ 150\n      for (int i = 0; i < 6; ++i) {\n        Piece p = player[0][i];\n        if (p.onboard)\n          _features[25 * p.type + (4 - p.y) * 5 + (4 - p.x)] = 1;\n      }\n      // 150 ~ 300\n      for (int i = 0; i < 6; ++i) {\n        Piece p = player[1][i];\n        if (p.onboard)\n          _features[25 * p.type + (4 - p.y) * 5 + (4 - p.x) + 150] = 1;\n      }\n    }\n    // 300 ~ 325\n    if (dice == 5)\n      std::fill(_features.begin() + 300, _features.begin() + 325, 1);\n    else\n      std::fill(_features.begin() + 300 + dice * 5,\n                _features.begin() + 300 + dice * 5 + 5, 1);\n\n    // 325 ~\n    std::fill(_features.begin() + 325, _features.end(), (float)_status);\n    fillFullFeatures();\n  }\n\n  void legalMoves(int color, std::vector<Move>& moves) {\n    // fprintf(stderr, \"dice: %d\\n\", dice+1);\n    if (round <= 12) {\n      if (color == 0) {  // player0\n        int p = round >> 1;\n\n        for (int j = 0; j < 3; ++j) {\n          for (int i = 0; i < 3 - j; ++i) {\n            if (board[j][i].type == 0) {  // if empty\n              Move m(i, j, p + 1);        // p+1 = piece type\n              moves.push_back(m);\n            }\n          }\n        }\n\n      } else {\n        Move m(-p0_drop.y + 4, -p0_drop.x + 4, p0_drop.type);\n        moves.push_back(m);\n      }\n    } else {\n      if (player[color][dice].onboard) {\n        if (color == 0) {\n          short dx[] = {1, 1, 0};\n          short dy[] = {0, 1, 1};\n          for (int i = 0; i < 3; ++i) {\n            int x = dx[i] + player[color][dice].x;\n            int y = dy[i] + player[color][dice].y;\n            if (x < 5 && y < 5) {\n              Move m(x, y, player[color][dice].type);\n              moves.push_back(m);\n            }\n          }\n        } else {\n          short dx[] = {0, -1, -1};\n          short dy[] = {-1, -1, 0};\n          for (int i = 0; i < 3; ++i) {\n            int x = dx[i] + player[color][dice].x;\n            int y = dy[i] + player[color][dice].y;\n            if (x >= 0 && y >= 0) {\n              Move m(x, y, player[color][dice].type);\n              moves.push_back(m);\n            }\n          }\n        }\n      } else {\n        bool find = false;\n        for (int i = 1; i < 6; ++i) {\n          for (int j = 0, k = 1; j < 2; ++j, k *= -1) {\n            int closest = dice + i * k;\n            if (closest < 6 && closest >= 0 && player[color][closest].onboard) {\n              find = true;\n              if (color == 0) {\n                short dx[] = {1, 1, 0};\n                short dy[] = {0, 1, 1};\n                for (int ii = 0; ii < 3; ++ii) {\n                  int x = dx[ii] + player[color][closest].x;\n                  int y = dy[ii] + player[color][closest].y;\n                  if (x < 5 && y < 5) {\n                    Move m(x, y, player[color][closest].type);\n                    moves.push_back(m);\n                  }\n                }\n              } else {\n                short dx[] = {0, -1, -1};\n                short dy[] = {-1, -1, 0};\n                for (int ii = 0; ii < 3; ++ii) {\n                  int x = dx[ii] + player[color][closest].x;\n                  int y = dy[ii] + player[color][closest].y;\n                  if (x >= 0 && y >= 0) {\n                    Move m(x, y, player[color][closest].type);\n                    moves.push_back(m);\n                  }\n                }\n              }\n            }\n          }\n          if (find)\n            break;\n        }\n      }\n    }\n  }\n\n  void findActions() {\n    moves.clear();\n    if (_status == GameStatus::player0Turn) {\n      legalMoves(0, moves);\n\n    } else if (_status == GameStatus::player1Turn) {\n      legalMoves(1, moves);\n    }\n    // fprintf(stderr, \"round %d moves: %d\\n\", round, moves.size());\n\n    clearActions();\n    for (auto m : moves) {\n      // fprintf(stderr, \"%d: (%d, %d)\\n\", m.type, m.x, m.y);\n      addAction(m.type - 1, m.x, m.y);\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    // fprintf(stderr, \"Apply action round: %d\\n\", round);\n    int color;\n    if (_status == GameStatus::player0Turn) {\n      color = 0;\n      _status = GameStatus::player1Turn;\n    } else {\n      color = 1;\n      _status = GameStatus::player0Turn;\n    }\n\n    int t = action.GetX();\n    assert(t < 6);\n    int x = action.GetY();\n    int y = action.GetZ();\n    assert(y < 6);\n    assert(x < 6);\n    // fprintf(stderr, \"get: %d %d %d\\n\", t, x, y);\n    if (round <= 12) {\n      p0_drop.type = t + 1;\n      p0_drop.x = x;\n      p0_drop.y = y;\n      player[color][t].onboard = true;\n    } else {\n      board[player[color][t].y][player[color][t].x] = Piece();\n    }\n\n    if (board[y][x].type != 0) {  // eat\n      player[board[y][x].color][board[y][x].type - 1].onboard = false;\n      player[board[y][x].color][board[y][x].type - 1].setPosition(-1, -1);\n      //      _hash ^= HashArray[board[y][x].color][board[y][x].type - 1][x][y];\n    }\n    player[color][t].onboard = true;\n    player[color][t].setPosition(x, y);\n    board[y][x] = player[color][t];\n    //    _hash ^= HashArray[color][t][x][y];\n    //    _hash ^= HashTurn;\n    // fprintf(stderr, \"(%d %d) t:%d c:%d\\n\", x, y, board[y][x].type,\n    // board[y][x].color);\n\n    if (color == 0 && x == 4 && y == 4)\n      _status = GameStatus::player0Win;\n    else if (color == 1 && x == 0 && y == 0)\n      _status = GameStatus::player1Win;\n    else {\n      round += 1;\n      if (forcedDice > 0) {\n        assert(forcedDice > 0);\n        assert(forcedDice < 7);\n        dice = forcedDice - 1;\n        forcedDice = -1;\n      } else {\n        dice = _rng() % 6;\n      }\n      _hash = dice;\n      findActions();\n    }\n    findFeature();\n    if (_legalActions.size() <= 0)\n      _status = (GameStatus)(3 + color);\n    // printCurrentBoard();\n    // fprintf(stderr, \"end Apply Action\\n\\n\");\n  }\n\n  virtual void DoGoodAction() override {\n    std::cerr << \"DoGoodAction\" << std::endl;\n    DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/game_action.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_GAME_GAME_ACTION_H_\n#define CZF_GAME_GAME_ACTION_H_\n\nclass GameAction {};\n\n#endif  // CZF_GAME_GAME_ACTION_H_\n"
  },
  {
    "path": "src/games/game_base.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"game_base.h\"\n"
  },
  {
    "path": "src/games/game_base.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_GAME_GAME_BASE_H_\n#define CZF_GAME_GAME_BASE_H_\n\n#include \"game_action.h\"\n#include \"game_player.h\"\n#include \"game_state.h\"\n#include <string>\n#include <vector>\n\ntemplate <typename StateType, typename ActionType> class GameBase {\n public:\n  typedef ActionType Action;\n  typedef StateType State;\n  typedef std::vector<Action> History;\n\n protected:\n  State state_;\n  History history_;\n  PLAYER turn_player_;\n  bool is_terminal_;\n  PLAYER win_player_;\n\n public:\n  GameBase() {\n    Reset();\n  }\n  ~GameBase() {\n    ;\n  }\n  void Reset() {\n    state_.Reset();\n    history_.clear();\n    turn_player_ = PLAYER_0;\n    is_terminal_ = false;\n    win_player_ = PLAYER_NULL;\n  }\n  PLAYER GetTurnPlayer() const {\n    return turn_player_;\n  }\n  int GetGameLength() const {\n    return history_.size();\n  }\n  void SetTurnPlayer(PLAYER turn_player) {\n    turn_player_ = turn_player;\n  }\n  void GetHistory(History& history) const {\n    history = history_;\n  }\n  Action GetLastAction() {\n    if (history_.size() > 0)\n      return history_.back();\n    else\n      return Action();\n  }\n  PLAYER GetWinPlayer() {\n    return win_player_;\n  }\n\n  // pure virtual functions\n\n  // need to save win player if game is terminal\n  virtual bool PlayAction(Action action) = 0;\n  virtual bool IsTerminalState() = 0;\n  virtual bool IsLegalAction(Action action) = 0;\n  virtual std::vector<Action> GetLegalActions() = 0;\n  virtual std::vector<bool> GetIsLegalAction() = 0;\n};\n\n#endif  // CZF_GAME_GAME_BASE_H_\n"
  },
  {
    "path": "src/games/game_player.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_GAME_GAME_PLAYER_H_\n#define CZF_GAME_GAME_PLAYER_H_\n\nenum PLAYER {\n  PLAYER_0 = 0u,\n  PLAYER_1 = 1u,\n  PLAYER_NULL = 2u,  // if an action's player is null, it is illegal\n  PLAYER_SIZE = 3u\n};\n\ninline PLAYER operator!(PLAYER player) {\n  return (PLAYER)((unsigned int)player ^ 1);\n}\n\ninline PLAYER IntToPlayer(int i) {\n  if (i == 0)\n    return PLAYER_0;\n  else\n    return PLAYER_1;\n}\n\n#endif  // CZF_GAME_GAME_PLAYER_H_\n"
  },
  {
    "path": "src/games/game_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_GAME_GAME_STATE_H_\n#define CZF_GAME_GAME_STATE_H_\n\nclass GameState {};\n\n#endif  // CZF_GAME_GAME_STATE_H_\n"
  },
  {
    "path": "src/games/gomoku_swap2.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"gomoku_swap2.h\"\n\nnamespace GomokuSwap2 {\n\nvoid Game::initGame() {\n  board.initialize();\n  hands = 0;\n  winner = -1;\n}\n\nvoid Game::play(Move& m) {\n  assert(hands < maxHands);\n  hands += 1;\n\n  if (!m.isColorChanged) {\n    for (int x = 0; x < boardRadix; x++) {\n      for (int y = 0; y < boardRadix; y++) {\n        Chess chess = board.getChess(x, y);\n        if (chess == Chesses::white) {\n          board.setChess(x, y, Chesses::black);\n        } else if (chess == Chesses::black) {\n          board.setChess(x, y, Chesses::white);\n        }\n      }\n    }\n  }\n\n  board.setChess(m.x, m.y, m.chess);\n  board.turnHash();\n}\n\nbool Game::isWon(Move& m) {\n  Chess chess = board.getChess(m.x, m.y);\n  auto suck = [&](int dx, int dy) {\n    int count = 1;\n    int x = m.x + dx, y = m.y + dy;\n    while (board.getChess(x, y) == chess) {\n      count += 1;\n      x += dx;\n      y += dy;\n      if (Board::isPosInBoard(x, y))\n        break;\n    }\n    x = m.x - dx, y = m.y - dy;\n    while (board.getChess(x, y) == chess) {\n      count += 1;\n      x -= dx;\n      y -= dy;\n      if (Board::isPosInBoard(x, y))\n        break;\n    }\n    return count;\n  };\n\n  if (suck(0, 1) >= 5)\n    return true;\n  if (suck(1, 0) >= 5)\n    return true;\n  if (suck(1, 1) >= 5)\n    return true;\n  if (suck(1, -1) >= 5)\n    return true;\n  return false;\n}\n\nvoid Game::findLegalMoves(Player player) {\n  legalMovesCnt = 0;\n  Chess chess = playerToChess(player);\n  for (int x = 0; x < boardRadix; x++) {\n    for (int y = 0; y < boardRadix; y++) {\n      Move m = Move{x, y, chess, false};\n\n      if (board.getChess(x, y) == Chesses::empty) {\n        assert(legalMovesCnt < maxLegalMovesCnt);\n        legalMoves[legalMovesCnt] = m;\n        legalMovesCnt += 1;\n        if (hands == 3 || (hands == 5 && !isTurned)) {\n          m.isColorChanged = true;\n          assert(legalMovesCnt < maxLegalMovesCnt);\n          legalMoves[legalMovesCnt] = m;\n          legalMovesCnt += 1;\n        }\n      }\n    }\n  }\n}\n\nPlayer Game::chessToPlayer(Chess chess) {\n  if (chess == Chesses::black)\n    return isTurned ? Players::player1 : Players::player0;\n  else if (chess == Chesses::white)\n    return isTurned ? Players::player0 : Players::player1;\n  else\n    assert(chess == Chesses::black || chess == Chesses::white);\n  return -1;\n}\n\nChess Game::playerToChess(Player player) {\n  if (player == Players::player0)\n    return isTurned ? Chesses::white : Chesses::black;\n  else if (player == Players::player1)\n    return isTurned ? Chesses::black : Chesses::white;\n  else\n    assert(player == Players::player0 || player == Players::player1);\n  return 0xFF;\n}\n\ntemplate <typename R> void Game::setupBoard(const R& re) {\n  Board::setup({\"Empty\", \"Black\", \"White\"}, {\" \", \"●\", \"○\"}, re);\n}\n\nState::State(int seed)\n    : core::State(seed)\n    , Game() {\n  std::call_once(setupCalled, [&] { setupBoard(_rng); });\n}\n\nvoid State::Initialize() {\n  _moves.clear();\n\n  _featSize[0] = featuresSizeX;\n  _featSize[1] = featuresSizeY;\n  _featSize[2] = featuresSizeZ;\n\n  _actionSize[0] = 2;\n  _actionSize[1] = boardRadix;\n  _actionSize[2] = boardRadix;\n\n  _status = GameStatus::player0Turn;\n  _features.resize(featuresSize);\n\n  initGame();\n  findLegalMoves(Players::player0);\n  findActions();\n  findFeatures();\n  _hash = board.getHash();\n}\n\nunique_ptr<core::State> State::clone_() const {\n  return make_unique<State>(*this);\n}\n\nvoid State::ApplyAction(const _Action& action) {\n  if (!terminated()) {\n    Move m{};\n    Player nextPlayer = -1;\n    m.isColorChanged = action.GetX();\n    if (m.isColorChanged)\n      isTurned = true;\n    if (_status == GameStatus::player0Turn) {\n      m.chess = playerToChess(Players::player0);\n      nextPlayer = Players::player1;\n      _status = GameStatus::player1Turn;\n    } else if (_status == GameStatus::player1Turn) {\n      m.chess = playerToChess(Players::player1);\n      nextPlayer = Players::player0;\n      _status = GameStatus::player0Turn;\n    }\n    m.x = action.GetY();\n    m.y = action.GetZ();\n    play(m);\n    if (canGoNext(m)) {\n      findLegalMoves(nextPlayer);\n      findActions();\n      findFeatures();\n    }\n  }\n  _hash = board.getHash();\n}\n\nbool State::canGoNext(Move& m) {\n  if (isWon(m)) {\n    Player winner = chessToPlayer(m.chess);\n    if (winner == Players::player0)\n      _status = GameStatus::player0Win;\n    else if (winner == Players::player1)\n      _status = GameStatus::player1Win;\n    else\n      assert(winner == Players::player0 || winner == Players::player1);\n    return false;\n  }\n  return true;\n}\n\nvoid State::DoGoodAction() {\n  DoRandomAction();\n}\n\nvoid State::findActions() {\n  clearActions();\n  for (int i = 0; i < legalMovesCnt; i++) {\n    Move& m = legalMoves[i];\n    addAction(m.isColorChanged, m.x, m.y);\n  }\n}\n\nvoid State::findFeatures() {\n  std::fill(_features.begin(), _features.end(), 0.0);\n  auto* f = _features.data();\n  for (int c = 0; c < this->chesses; c++) {\n    Chess chess = static_cast<Chess>(c + 1);\n    for (int i = 0; i < this->boardSize; i++) {\n      if (this->board.getChess(i) == chess)\n        *f = 1.0;\n      f++;\n    }\n  }\n  fillFullFeatures();\n}\n\n}  // namespace GomokuSwap2\n"
  },
  {
    "path": "src/games/gomoku_swap2.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n#include <cstdint>\n#include <cstdio>\n#include <memory>\n#include <mutex>\n#include <vector>\n\n#include \"../core/state.h\"\n#include \"commons/chessboard.h\"\n\nusing namespace std;\n\nnamespace GomokuSwap2 {\n\ntypedef int Player;\nclass Players {\n public:\n  static constexpr Player player0 = 0;\n  static constexpr Player player1 = 1;\n};\n\nclass Chesses {\n public:\n  static constexpr Chess empty = 0;\n  static constexpr Chess black = 1;\n  static constexpr Chess white = 2;\n};\n\nclass Move {\n public:\n  int x, y;\n  Chess chess;\n  bool isColorChanged;\n};\n\nclass Game {\n public:\n  void initGame();\n  void play(Move& m);\n  bool isBoardFulled();\n  bool isWon(Move& m);\n  void findLegalMoves(Player player);\n  Player chessToPlayer(Chess chess);\n  Chess playerToChess(Player player);\n\n  template <typename R> static void setupBoard(const R& re);\n\n  static constexpr int chesses = 2;\n  static constexpr int boardRadix = 15;\n  static constexpr int boardSize = boardRadix * boardRadix;\n  static constexpr int maxLegalMovesCnt = 2 * boardRadix * boardRadix;\n  static constexpr int maxHands = maxLegalMovesCnt;\n  static inline std::once_flag setupCalled;\n\n  using Board = Chessboard<boardRadix, boardRadix>;\n  Board board;\n  int hands;\n  bool isTurned;\n  Player winner;\n  Move legalMoves[maxLegalMovesCnt];\n  int legalMovesCnt;\n};\n\nclass State : public core::State, public Game {\n public:\n  State(int seed);\n  void Initialize() override;\n  unique_ptr<core::State> clone_() const override;\n  void ApplyAction(const ::_Action& action) override;\n  void DoGoodAction() override;\n  // void printCurrentBoard() const override;\n  // string stateDescription() const override;\n  // std::string actionDescription(const ::_Action& action) const override;\n  // string actionsDescription() override;\n  // int parseAction(const string& str) override;\n\n private:\n  bool canGoNext(Move& m);\n  void findActions();\n  void findFeatures();\n\n  static constexpr int timesteps = 2;\n  static constexpr size_t featuresSizeX = chesses * timesteps + 1;\n  static constexpr size_t featuresSizeY = boardRadix;\n  static constexpr size_t featuresSizeZ = boardRadix;\n  static constexpr size_t featuresSize =\n      featuresSizeX * featuresSizeY * featuresSizeZ;\n};\n\n}  // namespace GomokuSwap2\n"
  },
  {
    "path": "src/games/havannah.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"game_player.h\"\n\n#include <array>\n#include <cassert>\n#include <cstring>\n#include <optional>\n#include <random>\n#include <set>\n\nnamespace Havannah {\n\n// For Havannah, SIZE is the edge size of the board. FULLSIZE is 2*SIZE - 1\n// and the number of playable cells is FULLSIZE - SIZE*(SIZE-1).\n\nenum Color { COLOR_BLACK, COLOR_WHITE, COLOR_NONE };\n\nusing Cell = std::pair<int, int>;  // (i, j) in [0, FULLSIZE) x [0, FULLSIZE)\n\nconstexpr int fullsize(int size) {\n  return 2 * size - 1;\n}\n\ntemplate <int SIZE> class Hash {\n protected:\n  unsigned long long _array[2][fullsize(SIZE)][fullsize(SIZE)];\n  unsigned long long _turn;\n  unsigned long long _value;\n\n public:\n  Hash();\n  void init();\n  void updateArray(int color, int j, int i);\n  void updateTurn();\n  unsigned long long getValue() const;\n};\n\nstruct PathInfo {\n\n  // Minimal index, in a path array, to a connected path.\n  // Can index itself.\n  int _mainPathIndex;\n\n  Color _color;\n\n  unsigned _borders;\n  unsigned _corners;\n\n  PathInfo() = default;\n\n  PathInfo(int index, Color color, unsigned borders, unsigned corners)\n      : _mainPathIndex(index)\n      , _color(color)\n      , _borders(borders)\n      , _corners(corners) {\n  }\n};\n\ntemplate <int SIZE, bool PIE> class Board {\n\n protected:\n  int _nbFullIndices;\n  int _nbIndices;\n\n  Color _currentColor;\n  Color _winnerColor;\n  bool _hasPie;\n\n  std::optional<int> _lastIndex;\n  int _nbEmptyIndices;\n\n  // neighbours of each cell (indices)\n  // end value: -1\n  std::array<std::array<int, 7>, fullsize(SIZE) * fullsize(SIZE)>\n      _neighboursBoard;\n\n  static inline Hash<SIZE> _hash;\n\n public:  // TODO getter ?\n  // PathInfo of the paths indexed from _pathBoard\n  int _pathsEnd;\n  std::array<PathInfo, fullsize(SIZE) * fullsize(SIZE)> _paths;\n\n  // path of each cell (index in _paths)\n  std::array<int, fullsize(SIZE) * fullsize(SIZE)> _pathBoard;\n\n public:\n  Board();\n  bool canPie() const;\n\n  Color getCurrentColor() const;\n  Color getWinnerColor() const;\n  PLAYER colorToPlayer(Color color) const;\n  PLAYER getCurrentPlayer() const;\n  PLAYER getWinnerPlayer() const;\n  bool isGameFinished() const;\n  std::optional<int> getLastIndex() const;\n\n  static Cell convertIndexToCell(int index);\n  static int convertCellToIndex(const Cell& refCell);\n\n  unsigned long long getHashValue() const;\n\n  Color getColorAtIndex(int index) const;\n\n protected:\n  void getPathIndexAndColorAtIndex(int index,\n                                   int& pathIndex,\n                                   Color& color) const;\n\n  ////////////////////////////////////////////////////////////\n  // Havannah-specific\n  ////////////////////////////////////////////////////////////\n\n protected:\n  int _winningCycle;\n\n public:\n  void reset();\n  void play(int index);\n\n  bool isValidCell(const Cell& refCell) const;\n  bool isValidIndex(int index) const;\n\n  std::vector<int> findLegalIndices() const;\n  std::vector<int> findWinnerPath() const;\n\n protected:\n  unsigned computeBorders(int index) const;\n  unsigned computeCorners(int index) const;\n\n  bool isWinningPath(const PathInfo& path, int pathIndex, int cellIndex);\n  // isCycle: assume _paths[cellIndex] == pathIndex\n  bool isCycle(int pathIndex, int cellIndex) const;\n  int computeNbOnes(unsigned f) const;\n  std::vector<int> findPathIndices(int pathIndex) const;\n  int computeNbNeighbours(int cellIndex, Color color) const;\n  bool detectHole(const std::vector<int>& indices) const;\n};\n\n}  // namespace Havannah\n\n///////////////////////////////////////////////////////////////////////////////\n// Havannah::Hash\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE> Havannah::Hash<SIZE>::Hash() {\n  for (int color = 0; color < 2; color++)\n    for (int j = 0; j < fullsize(SIZE); j++)\n      for (int i = 0; i < fullsize(SIZE); i++) {\n        _array[color][j][i] = 0;\n        for (int k = 0; k < 64; k++)\n          if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n            _array[color][j][i] |= (1ULL << k);\n      }\n  _turn = 0;\n  for (int k = 0; k < 64; k++)\n    if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n      _turn |= (1ULL << k);\n}\n\ntemplate <int SIZE> void Havannah::Hash<SIZE>::init() {\n  _value = 0;\n}\n\ntemplate <int SIZE>\nvoid Havannah::Hash<SIZE>::updateArray(int color, int j, int i) {\n  _value ^= _array[color][j][i];\n}\n\ntemplate <int SIZE> void Havannah::Hash<SIZE>::updateTurn() {\n  _value ^= _turn;\n}\n\ntemplate <int SIZE> unsigned long long Havannah::Hash<SIZE>::getValue() const {\n  return _value;\n}\n\n///////////////////////////////////////////////////////////////////////////////\n// Havannah::Board\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE> Havannah::Board<SIZE, PIE>::Board() {\n  _hash.init();\n}\n\ntemplate <int SIZE, bool PIE>\nHavannah::Color Havannah::Board<SIZE, PIE>::getCurrentColor() const {\n  return _currentColor;\n}\n\ntemplate <int SIZE, bool PIE>\nHavannah::Color Havannah::Board<SIZE, PIE>::getWinnerColor() const {\n  return _winnerColor;\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Havannah::Board<SIZE, PIE>::colorToPlayer(Color color) const {\n  if (color == COLOR_NONE)\n    return PLAYER_NULL;\n  else if (color == COLOR_BLACK)\n    return _hasPie ? PLAYER_1 : PLAYER_0;\n  else\n    return _hasPie ? PLAYER_0 : PLAYER_1;\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Havannah::Board<SIZE, PIE>::getCurrentPlayer() const {\n  return colorToPlayer(_currentColor);\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Havannah::Board<SIZE, PIE>::getWinnerPlayer() const {\n  return colorToPlayer(_winnerColor);\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::isGameFinished() const {\n  return _nbEmptyIndices == 0 or _winnerColor != COLOR_NONE;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::optional<int> Havannah::Board<SIZE, PIE>::getLastIndex() const {\n  return _lastIndex;\n}\n\ntemplate <int SIZE, bool PIE> bool Havannah::Board<SIZE, PIE>::canPie() const {\n  return PIE and _nbEmptyIndices == _nbIndices - 1 and not _hasPie;\n}\n\ntemplate <int SIZE, bool PIE>\nHavannah::Cell Havannah::Board<SIZE, PIE>::convertIndexToCell(int index) {\n  int i = index / fullsize(SIZE);\n  int j = index % fullsize(SIZE);\n  return Cell(i, j);\n}\n\ntemplate <int SIZE, bool PIE>\nint Havannah::Board<SIZE, PIE>::convertCellToIndex(const Cell& refCell) {\n  return refCell.first * fullsize(SIZE) + refCell.second;\n}\n\ntemplate <int SIZE, bool PIE>\nunsigned long long Havannah::Board<SIZE, PIE>::getHashValue() const {\n  return _hash.getValue();\n}\n\ntemplate <int SIZE, bool PIE>\nvoid Havannah::Board<SIZE, PIE>::getPathIndexAndColorAtIndex(\n    int index, int& pathIndex, Color& color) const {\n\n  assert(index >= 0);\n  assert(index < _nbFullIndices);\n\n  // get path index from board\n  pathIndex = _pathBoard[index];\n\n  assert(pathIndex >= 0);\n  assert(pathIndex < _pathsEnd);\n\n  // get color from paths\n  color = _paths[pathIndex]._color;\n}\n\ntemplate <int SIZE, bool PIE>\nHavannah::Color Havannah::Board<SIZE, PIE>::getColorAtIndex(int index) const {\n  int pathIndex;\n  Color color;\n  getPathIndexAndColorAtIndex(index, pathIndex, color);\n  return color;\n}\n\n////////////////////////////////////////////////////////////\n// Havannah-specific\n////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE> void Havannah::Board<SIZE, PIE>::reset() {\n\n  _nbFullIndices =\n      fullsize(SIZE) * fullsize(SIZE);  // nb indices for full hex board\n  _nbIndices = _nbFullIndices - (SIZE - 1) * SIZE;\n  _nbEmptyIndices = _nbIndices;  // nb indices for havannah board\n\n  _winningCycle = 0;\n\n  _currentColor = COLOR_BLACK;\n  _winnerColor = COLOR_NONE;\n  _hasPie = false;\n\n  _lastIndex.reset();\n\n  // _neighboursBoard\n  // precompute all valid neighbours of each cell\n  for (int i = 0; i < fullsize(SIZE); i++) {\n    for (int j = 0; j < fullsize(SIZE); j++) {\n      int index = convertCellToIndex(Cell(i, j));\n      int k = 0;\n      std::array<Cell, 6> neighbours = {{Cell(i - 1, j), Cell(i - 1, j + 1),\n                                         Cell(i, j - 1), Cell(i, j + 1),\n                                         Cell(i + 1, j - 1), Cell(i + 1, j)}};\n      for (const Cell& refCell : neighbours) {\n        if (isValidCell(refCell)) {\n          _neighboursBoard[index][k] = convertCellToIndex(refCell);\n          k++;\n        }\n      }\n      _neighboursBoard[index][k] = -1;\n    }\n  }\n\n  // _paths\n  // no initial path\n  // path 0 for empty cells\n  _paths[0] = PathInfo(0, COLOR_NONE, 0, 0);\n  _pathsEnd = 1;\n\n  // _pathBoard\n  // set all cells to 0 (empty path)\n  _pathBoard.fill(0);\n}\n\ntemplate <int SIZE, bool PIE> void Havannah::Board<SIZE, PIE>::play(int index) {\n\n  assert(isValidIndex(index));\n  assert(not isGameFinished());\n\n  if (_lastIndex and index == *_lastIndex) {\n    assert(canPie());\n    _hasPie = true;\n\n    /*\n    // TODO player or color in hash ?\n    Cell cell = convertIndexToCell(index);\n    _hash.updateArray(_currentPlayer, cell.second, cell.first);\n    _hash.updateTurn();\n     */\n\n  } else {\n    assert(_pathBoard[index] == 0);\n\n    // find previous path & cell at index\n    int boardPathIndex;\n    Color boardColor;\n    getPathIndexAndColorAtIndex(index, boardPathIndex, boardColor);\n\n    // if board cell is empty, update board\n    if (boardColor == COLOR_NONE) {\n\n      // update hash\n      int color = getCurrentColor() == COLOR_BLACK ? 0 : 1;\n      Cell cell = convertIndexToCell(index);\n      _hash.updateArray(color, cell.second, cell.first);\n      _hash.updateTurn();\n\n      // cell data\n      int mainPathIndex = _pathsEnd;\n      int borders = computeBorders(index);\n      int corners = computeCorners(index);\n\n      // find all connected paths\n      std::set<int> neighbourMainPathIndices;\n      for (int neighbourIndex : _neighboursBoard[index]) {\n        if (neighbourIndex == -1)\n          break;\n        int neighbourPathIndex;\n        Color neighbourColor;\n        getPathIndexAndColorAtIndex(\n            neighbourIndex, neighbourPathIndex, neighbourColor);\n        if (neighbourColor == _currentColor) {\n          int neighbourMain = _paths[neighbourPathIndex]._mainPathIndex;\n          const PathInfo& neighbourPath = _paths[neighbourMain];\n          // add neigbour in set\n          neighbourMainPathIndices.insert(neighbourMain);\n          // update cell data\n          borders |= neighbourPath._borders;\n          corners |= neighbourPath._corners;\n          mainPathIndex = std::min(mainPathIndex, neighbourMain);\n        }\n      }\n\n      // if the cell is not connected to any existing path, then create a\n      // new path\n      if (neighbourMainPathIndices.empty()) {\n        _paths[_pathsEnd] =\n            PathInfo(_pathsEnd, _currentColor, borders, corners);\n        _pathsEnd++;\n        _pathBoard[index] = mainPathIndex;\n      }\n      // if the cell is connected to an existing path, then update paths\n      // and check end of game\n      else {\n        // update main path\n        PathInfo& mainPath = _paths[mainPathIndex];\n        mainPath._borders |= borders;\n        mainPath._corners |= corners;\n\n        // update other paths\n        neighbourMainPathIndices.erase(mainPathIndex);\n        if (not neighbourMainPathIndices.empty()) {\n          for (int k = mainPathIndex + 1; k < _pathsEnd; k++) {\n            int mainK = _paths[k]._mainPathIndex;\n            auto iter = neighbourMainPathIndices.find(mainK);\n            if (iter != neighbourMainPathIndices.end())\n              _paths[k] = mainPath;\n          }\n        }\n        _pathBoard[index] = mainPathIndex;\n\n        // update winner\n        if (isWinningPath(mainPath, mainPathIndex, index))\n          _winnerColor = _currentColor;\n      }\n\n      // end turn and prepare for next one\n      _nbEmptyIndices--;\n      _lastIndex = index;\n      _currentColor = _currentColor == COLOR_BLACK ? COLOR_WHITE : COLOR_BLACK;\n    }\n  }\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::isValidCell(const Cell& refCell) const {\n  int i = refCell.first;\n  int j = refCell.second;\n  return i >= 0 and i < fullsize(SIZE) and j >= 0 and j < fullsize(SIZE) and\n         i + j >= SIZE - 1 and i + j <= 3 * SIZE - 3;\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::isValidIndex(int index) const {\n  Cell c = convertIndexToCell(index);\n  return isValidCell(c);\n}\n\ntemplate <int SIZE, bool PIE>\nstd::vector<int> Havannah::Board<SIZE, PIE>::findLegalIndices() const {\n  std::vector<int> emptyIndices;\n  emptyIndices.reserve(_nbEmptyIndices + 1);\n  for (int k = 0; k < _nbFullIndices; k++)\n    if (isValidIndex(k) and _pathBoard[k] == 0)\n      emptyIndices.push_back(k);\n  if (canPie())\n    emptyIndices.push_back(*_lastIndex);\n  return emptyIndices;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::vector<int> Havannah::Board<SIZE, PIE>::findWinnerPath() const {\n\n  assert(_winnerColor != COLOR_NONE);\n\n  // find winning path\n  int winPathIndex;\n  if (_winningCycle != 0)\n    winPathIndex = _winningCycle;\n  else {\n    winPathIndex = 1;\n    while (true) {\n      const PathInfo& path = _paths[winPathIndex];\n      if (computeNbOnes(path._borders) >= 3)\n        break;\n      if (computeNbOnes(path._corners) >= 2)\n        break;\n      winPathIndex++;\n    }\n  }\n\n  assert(_paths[winPathIndex]._color == _winnerColor);\n\n  // find all indices connected to winning path\n  return findPathIndices(winPathIndex);\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::isWinningPath(const PathInfo& path,\n                                               int pathIndex,\n                                               int cellIndex) {\n\n  // test if path is connected to 3 borders\n  if (computeNbOnes(path._borders) >= 3) {\n    return true;\n  }\n\n  // test if path is connected to 2 corners\n  if (computeNbOnes(path._corners) >= 2) {\n    return true;\n  }\n\n  // test if path is a cycle\n  if (isCycle(pathIndex, cellIndex)) {\n    _winningCycle = pathIndex;\n    return true;\n  }\n\n  return false;\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::isCycle(int pathIndex, int cellIndex) const {\n\n  Color currentColor = _paths[pathIndex]._color;\n\n  // compute full path\n  std::vector<int> indices = findPathIndices(pathIndex);\n\n  // check if full path has 6 cells at least\n  if (indices.size() < 6)\n    return false;\n\n  // check if cell is connected to two previous cells at least\n  if (computeNbNeighbours(cellIndex, currentColor) < 2)\n    return false;\n\n  // detect interior point\n  for (int index : indices)\n    if (computeNbNeighbours(index, currentColor) == 6)\n      return true;\n\n  // detect hole\n  return detectHole(indices);\n}\n\ntemplate <int SIZE, bool PIE>\nunsigned Havannah::Board<SIZE, PIE>::computeBorders(int index) const {\n  unsigned borders = 0;\n  Cell c = convertIndexToCell(index);\n  if (isValidCell(c)) {\n    int i = c.first;\n    int j = c.second;\n    int e1 = SIZE - 1;\n    int s1 = fullsize(SIZE) - 1;\n    if (i == 0 and e1 < j and j < s1)\n      borders += 1;\n    if (0 < i and i < e1 and j == s1)\n      borders += 2;\n    if (i + j == 3 * e1 and i < s1 and j < s1)\n      borders += 4;\n    if (i == s1 and 0 < j and j < e1)\n      borders += 8;\n    if (e1 < i and i < s1 and j == 0)\n      borders += 16;\n    if (i + j == e1 and i > 0 and j > 0)\n      borders += 32;\n  }\n  return borders;\n}\n\ntemplate <int SIZE, bool PIE>\nunsigned Havannah::Board<SIZE, PIE>::computeCorners(int index) const {\n  unsigned corners = 0;\n  Cell c = convertIndexToCell(index);\n  if (isValidCell(c)) {\n    int i = c.first;\n    int j = c.second;\n    int e1 = SIZE - 1;\n    int s1 = fullsize(SIZE) - 1;\n    if (i == 0 and j == e1)\n      corners += 1;\n    if (i == 0 and j == s1)\n      corners += 2;\n    if (i == e1 and j == s1)\n      corners += 4;\n    if (i == s1 and j == e1)\n      corners += 8;\n    if (i == s1 and j == 0)\n      corners += 16;\n    if (i == e1 and j == 0)\n      corners += 32;\n  }\n  return corners;\n}\n\ntemplate <int SIZE, bool PIE>\nint Havannah::Board<SIZE, PIE>::computeNbOnes(unsigned f) const {\n  int n = f & 1u;\n  f >>= 1;\n  n += f & 1u;\n  f >>= 1;\n  n += f & 1u;\n  f >>= 1;\n  n += f & 1u;\n  f >>= 1;\n  n += f & 1u;\n  f >>= 1;\n  n += f & 1u;\n  return n;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::vector<int> Havannah::Board<SIZE, PIE>::findPathIndices(\n    int pathIndex) const {\n  std::vector<int> indices;\n  indices.reserve(2 * fullsize(SIZE));\n  for (int k = 0; k < _nbFullIndices; k++) {\n    int pathIndexOfK = _pathBoard[k];\n    int mainPathIndexOfK = _paths[pathIndexOfK]._mainPathIndex;\n    if (mainPathIndexOfK == pathIndex)\n      indices.push_back(k);\n  }\n  return indices;\n}\n\ntemplate <int SIZE, bool PIE>\nint Havannah::Board<SIZE, PIE>::computeNbNeighbours(int cellIndex,\n                                                    Color color) const {\n  int nbNeighbours = 0;\n  for (int neighbourIndex : _neighboursBoard[cellIndex]) {\n    if (neighbourIndex == -1)\n      break;\n    if (getColorAtIndex(neighbourIndex) == color)\n      nbNeighbours++;\n  }\n  return nbNeighbours;\n}\n\ntemplate <int SIZE, bool PIE>\nbool Havannah::Board<SIZE, PIE>::detectHole(\n    const std::vector<int>& indices) const {\n\n  std::vector<Cell> cells;\n  cells.reserve(indices.size());\n\n  for (int i : indices)\n    cells.emplace_back(convertIndexToCell(i));\n\n  // find bounding box\n  int imin = fullsize(SIZE);\n  int jmin = fullsize(SIZE);\n  int imax = 0;\n  int jmax = 0;\n  for (const Cell& c : cells) {\n    imin = std::min(imin, c.first);\n    imax = std::max(imax, c.first);\n    jmin = std::min(jmin, c.second);\n    jmax = std::max(jmax, c.second);\n  }\n\n  // reset data\n  int data[fullsize(SIZE) + 2][fullsize(SIZE) + 2];\n  std::memset((void*)data, 0,\n              sizeof(int) * (fullsize(SIZE) + 2) * (fullsize(SIZE) + 2));\n  int di = imax - imin + 3;\n  int dj = jmax - jmin + 3;\n  for (int i = 0; i < di; i++) {\n    data[i][0] = 1;\n    data[i][dj - 1] = 1;\n  }\n  for (int j = 0; j < dj; j++) {\n    data[0][j] = 1;\n    data[di - 1][j] = 1;\n  }\n\n  // write object\n  for (const Cell& c : cells) {\n    int i = c.first - imin + 1;\n    int j = c.second - jmin + 1;\n    data[i][j] = -1;\n  }\n\n  // propagate background\n  auto fMaxNeighbour = [&data](int i, int j) {\n    int d = data[i][j];\n    if (d >= 0) {\n      d = std::max(d, data[i - 1][j]);\n      d = std::max(d, data[i - 1][j + 1]);\n      d = std::max(d, data[i][j - 1]);\n      d = std::max(d, data[i][j + 1]);\n      d = std::max(d, data[i + 1][j - 1]);\n      d = std::max(d, data[i + 1][j]);\n    }\n    return d;\n  };\n\n  bool hasChanged = true;\n  while (hasChanged) {\n    hasChanged = false;\n\n    for (int i = 1; i < di - 1; i++) {\n      for (int j = 1; j < dj - 1; j++) {\n        int d = fMaxNeighbour(i, j);\n        if (data[i][j] != d) {\n          data[i][j] = d;\n          hasChanged = true;\n        }\n      }\n    }\n\n    for (int i = di - 2; i > 0; i--) {\n      for (int j = dj - 2; j > 0; j--) {\n        int d = fMaxNeighbour(i, j);\n        if (data[i][j] != d) {\n          data[i][j] = d;\n          hasChanged = true;\n        }\n      }\n    }\n  }\n\n  // check initial background\n  for (int i = 0; i < di; i++)\n    for (int j = 0; j < dj; j++)\n      if (data[i][j] == 0)\n        return true;\n\n  return false;\n}\n"
  },
  {
    "path": "src/games/havannah_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../core/state.h\"\n#include \"havannah.h\"\n\n#include <algorithm>\n#include <chrono>\n\nnamespace Havannah {\n\ntemplate <int SIZE> class Action : public ::_Action {\n public:\n  Action(int i, int j, int indexInActions);\n};\n\ntemplate <int SIZE, bool PIE, bool EXTENDED> class State : public core::State {\n private:\n  Board<SIZE, PIE> _board;\n\n public:\n  State(int seed);\n  // State(int seed, int history, bool turnFeatures);\n  void findActions();\n  void Initialize() override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction() override;\n  std::unique_ptr<core::State> clone_() const override;\n  std::string stateDescription() const override;\n  std::string actionDescription(const _Action& action) const override;\n  std::string actionsDescription() const override;\n  int parseAction(const std::string& str) const override;\n  virtual int getCurrentPlayerColor() const override;\n};\n}  // namespace Havannah\n\n///////////////////////////////////////////////////////////////////////////////\n// Havannah::Action\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE>\nHavannah::Action<SIZE>::Action(int i, int j, int indexInActions) {\n  _loc[0] = 0;\n  _loc[1] = i;\n  _loc[2] = j;\n  _hash = uint32_t(i * fullsize(SIZE) + j);\n  _i = indexInActions;  // (position in _legalActions)\n}\n\n///////////////////////////////////////////////////////////////////////////////\n// Havannah::State\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nHavannah::State<SIZE, PIE, EXTENDED>::State(int seed)\n    : core::State(seed) {\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nvoid Havannah::State<SIZE, PIE, EXTENDED>::findActions() {\n  auto legalIndices = _board.findLegalIndices();\n  clearActions();\n  for (unsigned k = 0; k < legalIndices.size(); ++k) {\n    auto c = _board.convertIndexToCell(legalIndices[k]);\n    addAction(0, c.first, c.second);\n  }\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nvoid Havannah::State<SIZE, PIE, EXTENDED>::Initialize() {\n  _board.reset();\n  _moves.clear();\n  _hash = 0;\n  _status = GameStatus::player0Turn;\n\n  // features\n  _featSize = {EXTENDED ? 27 : 3, fullsize(SIZE), fullsize(SIZE)};\n  _features =\n      std::vector<float>(_featSize[0] * _featSize[1] * _featSize[2], 0.f);\n\n  for (int k = 0; k < fullsize(SIZE) * fullsize(SIZE); k++)\n    _features[2 * fullsize(SIZE) * fullsize(SIZE) + k] = _board.isValidIndex(k);\n\n  fillFullFeatures();\n\n  // actions\n  _actionSize = {1, fullsize(SIZE), fullsize(SIZE)};\n  findActions();\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nvoid Havannah::State<SIZE, PIE, EXTENDED>::ApplyAction(const _Action& action) {\n\n  assert(not _board.isGameFinished());\n\n  // find board move from action\n  int i = action.GetY();\n  int j = action.GetZ();\n  int index = _board.convertCellToIndex(Cell(i, j));\n  std::optional<int> lastIndex = _board.getLastIndex();\n\n  // update features\n  if (not lastIndex or *lastIndex != index) {\n    Color currentColor = _board.getCurrentColor();\n    _features[((currentColor * fullsize(SIZE)) + i) * fullsize(SIZE) + j] = 1.f;\n  }\n\n  // add connections to borders/corners\n  if (EXTENDED) {\n    const int fs2 = fullsize(SIZE) * fullsize(SIZE);\n    const unsigned mask = 1;\n    for (int k = 0; k < fs2; k++) {\n      if (_board.isValidIndex(k)) {\n\n        int iPath = _board._pathBoard[k];\n        if (iPath != 0) {\n          assert(iPath < _board._pathsEnd);\n          const auto& pathInfo = _board._paths[iPath];\n\n          unsigned borders = pathInfo._borders;\n          Color color = _board.getColorAtIndex(k);\n          for (int iBorder = 0; iBorder < 6; iBorder++) {\n            _features[(2 * iBorder + color + 3) * fs2 + k] =\n                (borders >> iBorder) & mask;\n          }\n\n          unsigned corners = pathInfo._corners;\n          for (int iCorner = 0; iCorner < 6; iCorner++) {\n            _features[(2 * iCorner + 12 + color + 3) * fs2 + k] =\n                (corners >> iCorner) & mask;\n          }\n        }\n      }\n    }\n  }\n\n  // play move\n  _board.play(index);\n\n  // update game status\n  if (_board.isGameFinished()) {\n    PLAYER winner = _board.getWinnerPlayer();\n    if (winner == PLAYER_0)\n      _status = GameStatus::player0Win;\n    else if (winner == PLAYER_1)\n      _status = GameStatus::player1Win;\n    else\n      _status = GameStatus::tie;\n  } else {\n    _status = _board.getCurrentPlayer() == PLAYER_0 ? GameStatus::player0Turn\n                                                    : GameStatus::player1Turn;\n  }\n\n  fillFullFeatures();\n  // update actions\n  findActions();\n\n  // update hash\n  _hash = _board.getHashValue();\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nvoid Havannah::State<SIZE, PIE, EXTENDED>::DoGoodAction() {\n  return DoRandomAction();\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nstd::unique_ptr<core::State> Havannah::State<SIZE, PIE, EXTENDED>::clone_()\n    const {\n  return std::make_unique<Havannah::State<SIZE, PIE, EXTENDED>>(*this);\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nstd::string Havannah::State<SIZE, PIE, EXTENDED>::stateDescription() const {\n\n  const auto& feats = _features;\n  const auto& sizes = _featSize;\n\n  int ni = sizes[1];\n  int nj = sizes[2];\n\n  auto ind = [ni, nj](int i, int j, int k) { return (k * ni + i) * nj + j; };\n\n  std::string str;\n\n  str += \"Havannah\\n\";\n\n  str += \"  \";\n  for (int k = 0; k < ni; k++) {\n    str += \" \";\n    if (k < 10)\n      str += \" \";\n    str += std::to_string(k) + \" \";\n  }\n  str += \"\\n\";\n\n  for (int i = 0; i < ni; i++) {\n\n    str += \"   \";\n    for (int k = 0; k < i; k++)\n      str += \"  \";\n\n    for (int k = 0; k < SIZE - i - 1; k++)\n      str += \"    \";\n    for (int k = 0; k < SIZE + i and k < 3 * SIZE - i - 1; k++)\n      str += \"----\";\n    str += \"\\n\";\n\n    if (i < 10)\n      str += \" \";\n    str += std::to_string(i) + \" \";\n    for (int k = 0; k < i; k++)\n      str += \"  \";\n    for (int j = 0; j < nj; j++) {\n\n      if (_board.isValidCell({i, j}))\n        str += \"\\\\ \";\n      else if (j < SIZE)\n        str += \"  \";\n\n      if (feats[ind(i, j, 0)] && feats[ind(i, j, 1)])\n        str += \"! \";\n      else if (feats[ind(i, j, 0)])\n        str += \"X \";\n      else if (feats[ind(i, j, 1)])\n        str += \"O \";\n      else if (_board.isValidCell({i, j}))\n        str += \". \";\n      else if (j < SIZE)\n        str += \"  \";\n      else\n        continue;\n    }\n\n    str += \"\\\\ \\n\";\n  }\n\n  str += \"  \";\n  for (int k = 0; k < ni; k++)\n    str += \"  \";\n  for (int k = SIZE - 2; _board.isValidCell({SIZE, k}); k++)\n    str += \"----\";\n  str += \"\\n\";\n\n  str += \"    \";\n  for (int k = 0; k < SIZE - 1; k++)\n    str += \"    \";\n  for (int k = 0; k < ni; k++) {\n    str += \" \";\n    if (k < 10)\n      str += \" \";\n    str += std::to_string(k) + \" \";\n  }\n  str += \"\\n\";\n\n  return str;\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nstd::string Havannah::State<SIZE, PIE, EXTENDED>::actionDescription(\n    const _Action& action) const {\n  return std::to_string(action.GetY()) + \",\" + std::to_string(action.GetZ());\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nstd::string Havannah::State<SIZE, PIE, EXTENDED>::actionsDescription() const {\n  std::ostringstream oss;\n  for (const auto& a : _legalActions) {\n    oss << a.GetY() << \",\" << a.GetZ() << \" \";\n  }\n  oss << std::endl;\n  return oss.str();\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nint Havannah::State<SIZE, PIE, EXTENDED>::parseAction(\n    const std::string& str) const {\n  std::istringstream iss(str);\n  try {\n    std::string token;\n    if (not std::getline(iss, token, ','))\n      throw - 1;\n    int i = std::stoi(token);\n    if (not std::getline(iss, token))\n      throw - 1;\n    int j = std::stoi(token);\n    for (unsigned k = 0; k < _legalActions.size(); k++)\n      if (_legalActions[k].GetY() == i and _legalActions[k].GetZ() == j)\n        return k;\n  } catch (...) {\n    std::cout << \"failed to parse action\" << std::endl;\n  }\n  return -1;\n}\n\ntemplate <int SIZE, bool PIE, bool EXTENDED>\nint Havannah ::State<SIZE, PIE, EXTENDED>::getCurrentPlayerColor() const {\n  return _board.getCurrentColor();\n}\n"
  },
  {
    "path": "src/games/hex.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"game_player.h\"\n\n#include <array>\n#include <cassert>\n#include <optional>\n#include <random>\n#include <set>\n\nnamespace Hex {\n\nenum Color { COLOR_BLACK, COLOR_WHITE, COLOR_NONE };\n\nusing Cell = std::pair<int, int>;  // (i, j) in [0, SIZE) x [0, SIZE)\n\ntemplate <int SIZE> class Hash {\n protected:\n  unsigned long long _array[2][SIZE][SIZE];\n  unsigned long long _turn;\n  unsigned long long _value;\n\n public:\n  Hash();\n  void init();\n  void updateArray(int Color, int j, int i);\n  void updateTurn();\n  unsigned long long getValue() const;\n};\n\nstruct PathInfo {\n\n  // Minimal index, in a path array, to a connected path.\n  // Can index itself.\n  int _mainPathIndex;\n\n  Color _color;\n\n  bool _isConnectedBorder1;\n  bool _isConnectedBorder2;\n\n  PathInfo() = default;\n\n  PathInfo(int index, Color color, bool border1, bool border2)\n      : _mainPathIndex(index)\n      , _color(color)\n      , _isConnectedBorder1(border1)\n      , _isConnectedBorder2(border2) {\n  }\n};\n\ntemplate <int SIZE, bool PIE> class Board {\n\n protected:\n  int _nbFullIndices;\n  int _nbIndices;\n\n  Color _currentColor;\n  Color _winnerColor;\n  bool _hasPie;\n\n  std::optional<int> _lastIndex;\n  int _nbEmptyIndices;\n\n  // neighbours of each cell (indices)\n  // end value: -1\n  std::array<std::array<int, 7>, SIZE * SIZE> _neighboursBoard;\n\n  // PathInfo of the paths indexed from _pathBoard\n  int _pathsEnd;\n  std::array<PathInfo, SIZE * SIZE> _paths;\n\n  // path of each cell (index in _paths)\n  std::array<int, SIZE * SIZE> _pathBoard;\n\n  // static inline Hash<SIZE> _hash;\n\n public:\n  Board();\n  bool canPie() const;\n\n  Color getCurrentColor() const;\n  Color getWinnerColor() const;\n  PLAYER colorToPlayer(Color color) const;\n  PLAYER getCurrentPlayer() const;\n  PLAYER getWinnerPlayer() const;\n  bool isGameFinished() const;\n  std::optional<int> getLastIndex() const;\n\n  static Cell convertIndexToCell(int index);\n  static int convertCellToIndex(const Cell& refCell);\n\n  unsigned long long getHashValue() const;\n\n protected:\n  void getPathIndexAndColorAtIndex(int index,\n                                   int& pathIndex,\n                                   Color& color) const;\n\n  ////////////////////////////////////////////////////////////\n  // hex-specific\n  ////////////////////////////////////////////////////////////\n\n public:\n  void reset();\n  void play(int index);\n\n  bool isValidCell(const Cell& refCell) const;\n  bool isValidIndex(int index) const;\n\n  std::vector<int> findLegalIndices() const;\n  std::vector<int> findWinnerPath() const;\n\n protected:\n  void computeBorderConnection(int index,\n                               Color color,\n                               bool& isConnectedBorder1,\n                               bool& isConnectedBorder2) const;\n};\n\n}  // namespace Hex\n\n///////////////////////////////////////////////////////////////////////////////\n// Hex::Hash\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE> Hex::Hash<SIZE>::Hash() {\n  for (int color = 0; color < 2; color++)\n    for (int j = 0; j < SIZE; j++)\n      for (int i = 0; i < SIZE; i++) {\n        _array[color][j][i] = 0;\n        for (int k = 0; k < 64; k++)\n          if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n            _array[color][j][i] |= (1ULL << k);\n      }\n  _turn = 0;\n  for (int k = 0; k < 64; k++)\n    if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n      _turn |= (1ULL << k);\n}\n\ntemplate <int SIZE> void Hex::Hash<SIZE>::init() {\n  _value = 0;\n}\n\ntemplate <int SIZE> void Hex::Hash<SIZE>::updateArray(int color, int j, int i) {\n  _value ^= _array[color][j][i];\n}\n\ntemplate <int SIZE> void Hex::Hash<SIZE>::updateTurn() {\n  _value ^= _turn;\n}\n\ntemplate <int SIZE> unsigned long long Hex::Hash<SIZE>::getValue() const {\n  return _value;\n}\n\n///////////////////////////////////////////////////////////////////////////////\n// Hex::Board\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE> Hex::Board<SIZE, PIE>::Board() {\n  //_hash.init();\n}\n\ntemplate <int SIZE, bool PIE>\nHex::Color Hex::Board<SIZE, PIE>::getCurrentColor() const {\n  return _currentColor;\n}\n\ntemplate <int SIZE, bool PIE>\nHex::Color Hex::Board<SIZE, PIE>::getWinnerColor() const {\n  return _winnerColor;\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Hex::Board<SIZE, PIE>::colorToPlayer(Color color) const {\n  if (color == COLOR_NONE)\n    return PLAYER_NULL;\n  else if (color == COLOR_BLACK)\n    return _hasPie ? PLAYER_1 : PLAYER_0;\n  else\n    return _hasPie ? PLAYER_0 : PLAYER_1;\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Hex::Board<SIZE, PIE>::getCurrentPlayer() const {\n  return colorToPlayer(_currentColor);\n}\n\ntemplate <int SIZE, bool PIE>\nPLAYER Hex::Board<SIZE, PIE>::getWinnerPlayer() const {\n  return colorToPlayer(_winnerColor);\n}\n\ntemplate <int SIZE, bool PIE>\nbool Hex::Board<SIZE, PIE>::isGameFinished() const {\n  return _nbEmptyIndices == 0 or _winnerColor != COLOR_NONE;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::optional<int> Hex::Board<SIZE, PIE>::getLastIndex() const {\n  return _lastIndex;\n}\n\ntemplate <int SIZE, bool PIE> bool Hex::Board<SIZE, PIE>::canPie() const {\n  return PIE and _nbEmptyIndices == _nbIndices - 1 and not _hasPie;\n}\n\ntemplate <int SIZE, bool PIE>\nHex::Cell Hex::Board<SIZE, PIE>::convertIndexToCell(int index) {\n  int i = index / SIZE;\n  int j = index % SIZE;\n  return Cell(i, j);\n}\n\ntemplate <int SIZE, bool PIE>\nint Hex::Board<SIZE, PIE>::convertCellToIndex(const Cell& refCell) {\n  return refCell.first * SIZE + refCell.second;\n}\n\ntemplate <int SIZE, bool PIE>\nunsigned long long Hex::Board<SIZE, PIE>::getHashValue() const {\n  return 0;\n  // return _hash.getValue();\n}\n\ntemplate <int SIZE, bool PIE>\nvoid Hex::Board<SIZE, PIE>::getPathIndexAndColorAtIndex(int index,\n                                                        int& pathIndex,\n                                                        Color& color) const {\n  assert(index >= 0);\n  assert(index < _nbFullIndices);\n\n  // get path index from board\n  pathIndex = _pathBoard[index];\n\n  assert(pathIndex >= 0);\n  assert(pathIndex < _pathsEnd);\n\n  // get color from paths\n  color = _paths[pathIndex]._color;\n}\n\n////////////////////////////////////////////////////////////\n// hex-specific\n////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE> void Hex::Board<SIZE, PIE>::reset() {\n\n  _nbFullIndices = SIZE * SIZE;\n  _nbIndices = _nbFullIndices;\n  _nbEmptyIndices = _nbIndices;\n\n  _currentColor = COLOR_BLACK;\n  _winnerColor = COLOR_NONE;\n  _hasPie = false;\n\n  _lastIndex.reset();\n\n  // _neighboursBoard\n  // precompute all valid neighbours of each cell\n  for (int i = 0; i < SIZE; i++) {\n    for (int j = 0; j < SIZE; j++) {\n      int index = convertCellToIndex(Cell(i, j));\n      int k = 0;\n      std::array<Cell, 6> neighbours = {{Cell(i - 1, j), Cell(i - 1, j + 1),\n                                         Cell(i, j - 1), Cell(i, j + 1),\n                                         Cell(i + 1, j - 1), Cell(i + 1, j)}};\n      for (const Cell& refCell : neighbours) {\n        if (isValidCell(refCell)) {\n          _neighboursBoard[index][k] = convertCellToIndex(refCell);\n          k++;\n        }\n      }\n      _neighboursBoard[index][k] = -1;\n    }\n  }\n\n  // _paths\n  // no initial path\n  // path 0 for empty cells\n  _paths[0] = PathInfo(0, COLOR_NONE, false, false);\n  _pathsEnd = 1;\n\n  // _pathBoard\n  // set all cells to 0 (empty path)\n  _pathBoard.fill(0);\n}\n\ntemplate <int SIZE, bool PIE> void Hex::Board<SIZE, PIE>::play(int index) {\n  assert(isValidIndex(index));\n  assert(not isGameFinished());\n\n  if (_lastIndex and index == *_lastIndex) {\n    assert(canPie());\n    _hasPie = true;\n\n    /*\n    // TODO player or color in hash ?\n    Cell cell = convertIndexToCell(index);\n    _hash.updateArray(_currentPlayer, cell.second, cell.first);\n    _hash.updateTurn();\n     */\n\n  } else {\n    assert(_pathBoard[index] == 0);\n\n    // find previous path & cell at index\n    int boardPathIndex;\n    Color boardColor;\n    getPathIndexAndColorAtIndex(index, boardPathIndex, boardColor);\n\n    // if board cell is empty, update board\n    if (boardColor == COLOR_NONE) {\n\n      // update hash\n      // int color = getCurrentColor() == COLOR_BLACK ? 0 : 1;\n      // Cell cell = convertIndexToCell(index);\n      //_hash.updateArray(color, cell.second, cell.first);\n      //_hash.updateTurn();\n\n      // cell data\n      int mainPathIndex = _pathsEnd;\n      bool isConnectedBorder1, isConnectedBorder2;\n      computeBorderConnection(\n          index, _currentColor, isConnectedBorder1, isConnectedBorder2);\n\n      // find all connected paths\n      std::set<int> neighbourMainPathIndices;\n      for (int neighbourIndex : _neighboursBoard[index]) {\n        if (neighbourIndex == -1)\n          break;\n        int neighbourPathIndex;\n        Color neighbourColor;\n        getPathIndexAndColorAtIndex(\n            neighbourIndex, neighbourPathIndex, neighbourColor);\n        if (neighbourColor == _currentColor) {\n          int neighbourMain = _paths[neighbourPathIndex]._mainPathIndex;\n          const PathInfo& neighbourPath = _paths[neighbourMain];\n          // add neigbour in set\n          neighbourMainPathIndices.insert(neighbourMain);\n          // update cell data\n          isConnectedBorder1 |= neighbourPath._isConnectedBorder1;\n          isConnectedBorder2 |= neighbourPath._isConnectedBorder2;\n          mainPathIndex = std::min(mainPathIndex, neighbourMain);\n        }\n      }\n\n      // if the cell is not connected to any existing path, then create a\n      // new path\n      if (neighbourMainPathIndices.empty()) {\n        _paths[_pathsEnd] = PathInfo(\n            _pathsEnd, _currentColor, isConnectedBorder1, isConnectedBorder2);\n        _pathsEnd++;\n      }\n      // if the cell is connected to an existing path, then update paths\n      // and check end of game\n      else {\n        // update main path\n        PathInfo& mainPath = _paths[mainPathIndex];\n        mainPath._isConnectedBorder1 |= isConnectedBorder1;\n        mainPath._isConnectedBorder2 |= isConnectedBorder2;\n\n        // update other paths\n        neighbourMainPathIndices.erase(mainPathIndex);\n        if (not neighbourMainPathIndices.empty()) {\n          for (int k = mainPathIndex + 1; k < _pathsEnd; k++) {\n            int mainK = _paths[k]._mainPathIndex;\n            auto iter = neighbourMainPathIndices.find(mainK);\n            if (iter != neighbourMainPathIndices.end())\n              _paths[k] = mainPath;\n          }\n        }\n\n        // update winner\n        if (mainPath._isConnectedBorder1 and mainPath._isConnectedBorder2)\n          _winnerColor = _currentColor;\n      }\n\n      // end turn and prepare for next one\n      _pathBoard[index] = mainPathIndex;\n      _nbEmptyIndices--;\n      _lastIndex = index;\n      _currentColor = _currentColor == COLOR_BLACK ? COLOR_WHITE : COLOR_BLACK;\n    }\n  }\n}\n\ntemplate <int SIZE, bool PIE>\nbool Hex::Board<SIZE, PIE>::isValidCell(const Cell& refCell) const {\n  return refCell.first >= 0 and refCell.first < SIZE and refCell.second >= 0 and\n         refCell.second < SIZE;\n}\n\ntemplate <int SIZE, bool PIE>\nbool Hex::Board<SIZE, PIE>::isValidIndex(int index) const {\n  return (index >= 0 and index < _nbFullIndices);\n}\n\ntemplate <int SIZE, bool PIE>\nstd::vector<int> Hex::Board<SIZE, PIE>::findLegalIndices() const {\n  std::vector<int> emptyIndices;\n  emptyIndices.reserve(_nbEmptyIndices + 1);\n  for (int k = 0; k < _nbFullIndices; k++)\n    if (_pathBoard[k] == 0)\n      emptyIndices.push_back(k);\n  if (canPie())\n    emptyIndices.push_back(*_lastIndex);\n  return emptyIndices;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::vector<int> Hex::Board<SIZE, PIE>::findWinnerPath() const {\n  assert(_winnerColor != COLOR_NONE);\n\n  // find winning main path index\n  int winPathIndex = 1;\n  while (not _paths[winPathIndex]._isConnectedBorder1 or\n         not _paths[winPathIndex]._isConnectedBorder2)\n    winPathIndex++;\n\n  assert(_paths[winPathIndex]._color == _winnerColor);\n\n  // find all indices connected to main path\n  std::vector<int> winIndices;\n  winIndices.reserve(2 * SIZE);\n  for (int k = 0; k < _nbFullIndices; k++) {\n    int pathIndexOfK = _pathBoard[k];\n    int mainPathIndexOfK = _paths[pathIndexOfK]._mainPathIndex;\n    if (mainPathIndexOfK == winPathIndex)\n      winIndices.push_back(k);\n  }\n\n  return winIndices;\n}\n\ntemplate <int SIZE, bool PIE>\nvoid Hex::Board<SIZE, PIE>::computeBorderConnection(\n    int index,\n    Color color,\n    bool& isConnectedBorder1,\n    bool& isConnectedBorder2) const {\n\n  if (color == COLOR_BLACK) {\n    isConnectedBorder1 = (index < SIZE);\n    isConnectedBorder2 = (index >= _nbFullIndices - SIZE);\n  } else if (color == COLOR_WHITE) {\n    int j = index % SIZE;\n    isConnectedBorder1 = (j == 0);\n    isConnectedBorder2 = (j == SIZE - 1);\n  } else {\n    isConnectedBorder1 = false;\n    isConnectedBorder2 = false;\n  }\n}\n"
  },
  {
    "path": "src/games/hex_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../core/state.h\"\n#include \"hex.h\"\n\n#include <algorithm>\n#include <chrono>\n\nnamespace Hex {\n\ntemplate <int SIZE, bool PIE> class State : public core::State {\n private:\n  Board<SIZE, PIE> _board;\n\n public:\n  State(int seed);\n  // State(int seed, int history, bool turnFeatures);\n  void findActions();\n  void Initialize() override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction() override;\n  std::unique_ptr<core::State> clone_() const override;\n  std::string stateDescription() const override;\n  std::string actionDescription(const _Action& action) const override;\n  std::string actionsDescription() const override;\n  int parseAction(const std::string& str) const override;\n  virtual int getCurrentPlayerColor() const override;\n  virtual int getNumPlayerColors() const override;\n};\n}  // namespace Hex\n\n///////////////////////////////////////////////////////////////////////////////\n// Hex::State\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, bool PIE>\nHex::State<SIZE, PIE>::State(int seed)\n    : core::State(seed) {\n}\n\ntemplate <int SIZE, bool PIE> void Hex::State<SIZE, PIE>::findActions() {\n  auto legalIndices = _board.findLegalIndices();\n  clearActions();\n  for (unsigned k = 0; k < legalIndices.size(); ++k) {\n    auto c = _board.convertIndexToCell(legalIndices[k]);\n    addAction(0, c.first, c.second);\n  }\n}\n\ntemplate <int SIZE, bool PIE> void Hex::State<SIZE, PIE>::Initialize() {\n  _board.reset();\n  _moves.clear();\n  _hash = 0;\n  _status = GameStatus::player0Turn;\n\n  // features\n  _featSize = {2, SIZE, SIZE};\n  _features =\n      std::vector<float>(_featSize[0] * _featSize[1] * _featSize[2], 0.f);\n  fillFullFeatures();\n\n  // actions\n  _actionSize = {1, SIZE, SIZE};\n  findActions();\n}\n\ntemplate <int SIZE, bool PIE>\nvoid Hex::State<SIZE, PIE>::ApplyAction(const _Action& action) {\n\n  assert(not _board.isGameFinished());\n\n  // find board move from action\n  int i = action.GetY();\n  int j = action.GetZ();\n  int index = _board.convertCellToIndex(Cell(i, j));\n  std::optional<int> lastIndex = _board.getLastIndex();\n\n  // update features\n  // TODO assert action is in legal actions ?\n  if (not lastIndex or *lastIndex != index) {\n    Color currentColor = _board.getCurrentColor();\n    _features[((currentColor * SIZE) + i) * SIZE + j] = 1.f;\n  }\n\n  // play move\n  _board.play(index);\n\n  // update game status\n  if (_board.isGameFinished()) {\n    PLAYER winner = _board.getWinnerPlayer();\n    assert(winner == PLAYER_0 or winner == PLAYER_1);\n    _status =\n        winner == PLAYER_0 ? GameStatus::player0Win : GameStatus::player1Win;\n  } else {\n    PLAYER player = _board.getCurrentPlayer();\n    assert(player == PLAYER_0 or player == PLAYER_1);\n    _status =\n        player == PLAYER_0 ? GameStatus::player0Turn : GameStatus::player1Turn;\n  }\n\n  fillFullFeatures();\n  // update actions\n  findActions();\n\n  // update hash\n  _hash = _board.getHashValue();\n}\n\ntemplate <int SIZE, bool PIE> void Hex::State<SIZE, PIE>::DoGoodAction() {\n  return DoRandomAction();\n}\n\ntemplate <int SIZE, bool PIE>\nstd::unique_ptr<core::State> Hex::State<SIZE, PIE>::clone_() const {\n  return std::make_unique<Hex::State<SIZE, PIE>>(*this);\n}\n\ntemplate <int SIZE, bool PIE>\nstd::string Hex::State<SIZE, PIE>::stateDescription() const {\n\n  const auto& feats = _features;\n  const auto& sizes = _featSize;\n  int ni = sizes[1];\n  int nj = sizes[2];\n  assert(ni <= 26);\n\n  auto ind = [ni, nj](int i, int j, int k) { return (k * ni + i) * nj + j; };\n\n  std::string str;\n\n  str += \"Hex\\n\";\n  str += \" \";\n  for (int k = 0; k < nj; k++) {\n    str += \"   \";\n    str += 'a' + k;\n  }\n  str += \"\\n\";\n\n  for (int i = 0; i < ni; i++) {\n\n    str += \"  \";\n    for (int k = 0; k < i; k++)\n      str += \"  \";\n    str += \"-\";\n    for (int k = 0; k < nj; k++)\n      str += \"----\";\n    str += \"\\n\";\n\n    if (i < 9)\n      str += \" \";\n    str += std::to_string(1 + i) + \" \";\n    for (int k = 0; k < i; k++)\n      str += \"  \";\n    for (int j = 0; j < nj; j++) {\n      str += \"\\\\ \";\n      if (feats[ind(i, j, 0)] && feats[ind(i, j, 1)])\n        str += \"! \";\n      else if (feats[ind(i, j, 0)])\n        str += \"B \";\n      else if (feats[ind(i, j, 1)])\n        str += \"W \";\n      else\n        str += \". \";\n    }\n    str += \"\\\\ \\n\";\n  }\n\n  str += \"  \";\n  for (int k = 0; k < nj; k++)\n    str += \"  \";\n  for (int k = 0; k < nj; k++)\n    str += \"----\";\n  str += \"\\n\";\n\n  str += \"   \";\n  for (int k = 0; k < SIZE - 1; k++)\n    str += \"  \";\n  for (int k = 0; k < nj; k++) {\n    str += \"   \";\n    str += 'a' + k;\n  }\n  str += \"\\n\";\n\n  return str;\n}\n\ntemplate <int SIZE, bool PIE>\nstd::string Hex::State<SIZE, PIE>::actionDescription(\n    const _Action& action) const {\n  return char('a' + action.GetZ()) + std::to_string(1 + action.GetY());\n}\n\ntemplate <int SIZE, bool PIE>\nstd::string Hex::State<SIZE, PIE>::actionsDescription() const {\n  std::ostringstream oss;\n  for (const auto& a : _legalActions) {\n    oss << actionDescription(a) << \" \";\n  }\n  oss << std::endl;\n  return oss.str();\n}\n\ntemplate <int SIZE, bool PIE>\nint Hex::State<SIZE, PIE>::parseAction(const std::string& str) const {\n  std::istringstream iss(str);\n  try {\n    char c;\n    iss >> c;\n    std::string token;\n    int j = int(c) - 'a';\n    if (not std::getline(iss, token))\n      throw - 1;\n    int i = std::stoi(token) - 1;\n    for (unsigned k = 0; k < _legalActions.size(); k++)\n      if (_legalActions[k].GetY() == i and _legalActions[k].GetZ() == j)\n        return k;\n  } catch (...) {\n    std::cout << \"failed to parse action\" << std::endl;\n  }\n  return -1;\n}\n\ntemplate <int SIZE, bool PIE>\nint Hex::State<SIZE, PIE>::getCurrentPlayerColor() const {\n  return _board.getCurrentColor();\n}\n\ntemplate <int SIZE, bool PIE>\nint Hex::State<SIZE, PIE>::getNumPlayerColors() const {\n  return 2;\n}\n"
  },
  {
    "path": "src/games/kyotoshogi.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <algorithm>\n#include <iostream>\n#include <queue>\n#include <random>\n#include <vector>\n\nconst int KyotoWhite = 0;\nconst int KyotoBlack = 1;\nconst int KyotoEmpty = 2;\n\nconst int KSDx = 5;\nconst int KSDy = 5;\nconst int KSMaxPlayoutLength = 1000;\n\nenum KSPieceType {\n  KSNone,\n  Tokin_Lance,\n  Silver_Bishop,\n  KSKing,\n  Gold_Knight,\n  Pawn_Rook,\n  Tokin_Lance2,\n  Silver_Bishop2,\n  Gold_Knight2,\n  Pawn_Rook2,\n};\n\nenum KSPieceZ {\n  TL,\n  SB,\n  KSK,\n  GK,\n  PR,\n  TL2,\n  SB2,\n  GK2,\n  PR2,\n  PTokin_Lance,\n  PSilver_Bishop,\n  PGold_Knight,\n  PPawn_Rook,\n  PTokin_Lance2,\n  PSilver_Bishop2,\n  PGold_Knight2,\n  PPawn_Rook2\n};\n\n// 2: players, 9: pieces+promoted\nconst unsigned long long KSHashArray[2][10][KSDx][KSDy] = {\n    {{{14514284786278117030ULL, 4620546740167642908ULL, 13109570281517897720ULL,\n       17462938647148434322ULL, 355488278567739596ULL},\n      {7469126240319926998ULL, 4635995468481642529ULL, 418970542659199878ULL,\n       9604170989252516556ULL, 6358044926049913402ULL},\n      {5058016125798318033ULL, 10349215569089701407ULL, 2583272014892537200ULL,\n       10032373690199166667ULL, 9627645531742285868ULL},\n      {15810285301089087632ULL, 9219209713614924562ULL, 7736011505917826031ULL,\n       13729552270962724157ULL, 4596340717661012313ULL},\n      {4413874586873285858ULL, 5904155143473820934ULL, 16795776195466785825ULL,\n       3040631852046752166ULL, 4529279813148173111ULL}},\n     {{3658352497551999605ULL, 13205889818278417278ULL, 17853215078830450730ULL,\n       14193508720503142180ULL, 1488787817663097441ULL},\n      {8484116316263611556ULL, 4745643133208116498ULL, 14333959900198994173ULL,\n       10770733876927207790ULL, 17529942701849009476ULL},\n      {8081518017574486547ULL, 5945178879512507902ULL, 9821139136195250096ULL,\n       4728986788662773602ULL, 840062144447779464ULL},\n      {9315169977352719788ULL, 12843335216705846126ULL, 1682692516156909696ULL,\n       16733405176195045732ULL, 570275675392078508ULL},\n      {2804578118555336986ULL, 18105853946332827420ULL, 11444576169427052165ULL,\n       5511269538150904327ULL, 6665263661402689669ULL}},\n     {{8872308438533970361ULL, 5494304472256329401ULL, 5260777597240341458ULL,\n       17048363385688465216ULL, 11601203342555724204ULL},\n      {13927871433293278342ULL, 13168989862813642697ULL,\n       13332527631701716084ULL, 1288265801825883165ULL, 8980511589347843149ULL},\n      {1639193574298669424ULL, 14012553476551396225ULL, 7818048564976445173ULL,\n       11012385938523194722ULL, 1594098091654903511ULL},\n      {5035242355473277827ULL, 11507220397369885600ULL, 4097669440061230013ULL,\n       4158775797243890311ULL, 8008476757622511610ULL},\n      {18212599999684195413ULL, 3892070972454396029ULL, 15739033291548026583ULL,\n       5240984520368774617ULL, 15428220128146522508ULL}},\n     {{6764778500174078837ULL, 17250425930626079997ULL, 15862445320841941901ULL,\n       9055707723866709616ULL, 407278260229756649ULL},\n      {6679883267401891436ULL, 13585010976506536654ULL, 9580697194899010248ULL,\n       7802093638911637786ULL, 535562807229422763ULL},\n      {16772549087470588412ULL, 2069348082463192648ULL, 18080878539236249869ULL,\n       12688200000096479737ULL, 8989665349769173357ULL},\n      {13575112928849473200ULL, 10859033464356012248ULL, 9748216112997718693ULL,\n       8405158063935141693ULL, 15279502632583570477ULL},\n      {16055899490125284200ULL, 9066388900883848980ULL, 17884680971936629565ULL,\n       16395391805201036549ULL, 2550532686790805254ULL}},\n     {{8052938288948613298ULL, 6344035301348514175ULL, 2193824757648316037ULL,\n       10113332896580941759ULL, 14001553499759966766ULL},\n      {597702890888347204ULL, 1874324574384293454ULL, 10826913572691111562ULL,\n       12821185545071087721ULL, 14606566723149387105ULL},\n      {15679487422249894303ULL, 16146086267469614290ULL,\n       11169330698794304272ULL, 17590151747242102595ULL,\n       18278229723818623796ULL},\n      {15994633360516603469ULL, 11881756471423721131ULL,\n       11153906733009525059ULL, 16836145075420168747ULL,\n       8614597919830747987ULL},\n      {1459907787369619658ULL, 16682004712721580156ULL, 15261848763679157527ULL,\n       2717413695111288049ULL, 14889665525641206303ULL}},\n     {{12338480473037317818ULL, 2557597240994564872ULL, 12402353581130313583ULL,\n       15355546302939095474ULL, 17651033590338072704ULL},\n      {11616809212196625943ULL, 6561978461173088746ULL, 5962436378610109024ULL,\n       1168012300494473422ULL, 5175053317267933097ULL},\n      {4740525681678845797ULL, 1614376253554691208ULL, 1358027693590031708ULL,\n       1856992378370522222ULL, 2410813678132517023ULL},\n      {11582456654366157909ULL, 5754940895753314317ULL, 17548218371729667895ULL,\n       17945642044770404276ULL, 3721164045489467070ULL},\n      {13394551493150992827ULL, 12475264300415171883ULL,\n       10462606688633056562ULL, 13251365510693735175ULL,\n       3876338822302790600ULL}},\n     {{13771801863059799470ULL, 13815564444636394855ULL,\n       16495110748802246170ULL, 2156091871580385249ULL,\n       12069080176326280986ULL},\n      {489805578737239572ULL, 5271183164515543116ULL, 11286401144444756863ULL,\n       6746000579485080744ULL, 5186625150343537151ULL},\n      {13119883039086991857ULL, 16025170396082521338ULL, 2259331576759215945ULL,\n       16362343102415556603ULL, 10982898132796723193ULL},\n      {14666888772828547003ULL, 10462483830193419334ULL,\n       18236154274104239589ULL, 17759599582309981676ULL,\n       9339512652453242670ULL},\n      {14635458573977612405ULL, 13273192362623128494ULL, 7419053614262815071ULL,\n       2139880725825605974ULL, 15336265650071823816ULL}},\n     {{6291952205449675957ULL, 14977329074317573394ULL, 4364768269648744391ULL,\n       17232241565077788317ULL, 8450549923677533764ULL},\n      {15732483035355013039ULL, 13831185231495622915ULL, 6819123640184841760ULL,\n       11886944798543888851ULL, 10879889186777890996ULL},\n      {15555433551230813341ULL, 105259452319848079ULL, 3441909642659419332ULL,\n       5480947869602487239ULL, 6247709904124292706ULL},\n      {13391610271247915041ULL, 18346462037123761313ULL,\n       16636317150577797347ULL, 14149179703416851896ULL,\n       2376171948756359367ULL},\n      {5152472389910152792ULL, 2368047066677070121ULL, 16396163399604156946ULL,\n       14864288050288048653ULL, 7393398358587456124ULL}},\n     {{9728143941576351989ULL, 5481913815176021747ULL, 16927964714362701213ULL,\n       14993236783745363262ULL, 9552302871570670457ULL},\n      {11071069341174528295ULL, 15381321939083200837ULL, 8816171210895558106ULL,\n       6071991122052964372ULL, 10925078611503375837ULL},\n      {15239629154712277871ULL, 8615167154188153180ULL, 4917230293625512515ULL,\n       14895742215835130464ULL, 2359753755290725009ULL},\n      {6783321469015983851ULL, 360705462143558065ULL, 2287732638733919300ULL,\n       2984153050512747353ULL, 8021412450653308816ULL},\n      {12759258587083258672ULL, 1585563973173997547ULL, 18209504305389149669ULL,\n       11416757620121532143ULL, 6846989578536141166ULL}}},\n    {{{4365862612957164362ULL, 2931876801952518067ULL, 680191398818283694ULL,\n       1834352496547951770ULL, 12616538556720116808ULL},\n      {17563613795929063197ULL, 14519515363534791688ULL, 4349527158980778739ULL,\n       6714794984698083967ULL, 6696141578113299617ULL},\n      {17231874453010340947ULL, 18425812703539835928ULL, 3707544366662920973ULL,\n       10197276740411893574ULL, 12864434420502416888ULL},\n      {12767250491273234520ULL, 1588549204908870909ULL, 6610295429674120152ULL,\n       5281895767268096036ULL, 1739897672032589486ULL},\n      {17406469206626426854ULL, 8710378533013875691ULL, 9587926405039941516ULL,\n       2805299725371867574ULL, 7146901261023555807ULL}},\n     {{1825062423171923931ULL, 3049052876249887095ULL, 10771741767689142181ULL,\n       8733642741329011601ULL, 11979515434717210935ULL},\n      {10043245691272652957ULL, 5830279975302858953ULL, 17190113074333440499ULL,\n       18260575806620923460ULL, 14335648769917655401ULL},\n      {4153816861017702156ULL, 14590500750979768984ULL, 810991542442466488ULL,\n       7089785717813579612ULL, 12357837562747114001ULL},\n      {5554121432788679660ULL, 5931025703748246718ULL, 2097835176693352889ULL,\n       12745618408404359587ULL, 6090924568528767236ULL},\n      {14734637834598564704ULL, 14439652293742648615ULL, 132405348116615733ULL,\n       13869945305505934743ULL, 7372953811704808036ULL}},\n     {{7756437368369298361ULL, 3794582695199039623ULL, 12917619229835701974ULL,\n       14320084076906478671ULL, 2606626751703588462ULL},\n      {3137561743724131360ULL, 13808802441028589896ULL, 14231944027275971054ULL,\n       16852581317945783254ULL, 10323673491841952054ULL},\n      {2313335010769237820ULL, 13955532667350441768ULL, 5747153089934705338ULL,\n       13377135145695875091ULL, 6830230899286657495ULL},\n      {81856298782858401ULL, 1754724887913860152ULL, 13750479713795882912ULL,\n       11120120136303124367ULL, 15046307382468953177ULL},\n      {3696979254055818020ULL, 15352898388246644384ULL, 1024778962410818770ULL,\n       2388728043318081123ULL, 6871857727931721608ULL}},\n     {{17721619206096294273ULL, 10585202864517959301ULL,\n       10898249199547365704ULL, 9663430180652362739ULL, 1737102419936989910ULL},\n      {5117227310201589790ULL, 16884367896390523102ULL, 10498150099412419335ULL,\n       1921007855220546564ULL, 7643484074408755248ULL},\n      {11318429053286342939ULL, 1370093900783164344ULL, 6776537281339823025ULL,\n       3450492372588984223ULL, 9401014545757436331ULL},\n      {7896519943553875907ULL, 14303443932332314010ULL, 281238069833157985ULL,\n       9628364435514671685ULL, 1035647896705322917ULL},\n      {940113500519447970ULL, 12858978713386075837ULL, 2103046007104782505ULL,\n       1170332608028903179ULL, 6569179731999105361ULL}},\n     {{9795365446060253382ULL, 3663276878692063340ULL, 11746321300354091749ULL,\n       5408361990473950532ULL, 9735653452670998906ULL},\n      {4324195634733601175ULL, 9037136744494003310ULL, 10715330324656609711ULL,\n       3474343689175121886ULL, 5794004792094061662ULL},\n      {13295581273946061060ULL, 7292949743142825837ULL, 10886028626057941279ULL,\n       10688849249577735178ULL, 17297010345160851373ULL},\n      {13658139148821214513ULL, 4468290234101910565ULL, 9583516840381960864ULL,\n       2100818272677130469ULL, 3835407486618772476ULL},\n      {11687972045781987867ULL, 2584265809482868424ULL, 2184370854727222683ULL,\n       17762352308671769689ULL, 10901114407297935135ULL}},\n     {{17932666452350314317ULL, 14800534017102555607ULL,\n       16233839909626358812ULL, 1704089397092793640ULL, 2891239861334407450ULL},\n      {18077585692287687954ULL, 2363047449739120434ULL, 5904357530901606076ULL,\n       16765772907460692007ULL, 8757786729323486734ULL},\n      {3706883612695347371ULL, 14958907430930711064ULL, 9624134580897548276ULL,\n       10298009507777483067ULL, 5667412839234900228ULL},\n      {6828701555684071915ULL, 10482797977665945217ULL, 13440894740881464138ULL,\n       12078258924098889769ULL, 5740761565098658841ULL},\n      {13914375003115830180ULL, 16808960379045776034ULL,\n       18421450170384511575ULL, 16478974619417516521ULL,\n       14381565232287562804ULL}},\n     {{12792472782420522791ULL, 6620422687983566193ULL, 12025299949416885293ULL,\n       6046334025019123ULL, 16769051888439418536ULL},\n      {10312203372653850423ULL, 720028297035890629ULL, 6441255456466558203ULL,\n       9874005816230679263ULL, 15903170012916142038ULL},\n      {7557768652767625223ULL, 17626605079857371651ULL, 9092603716684679963ULL,\n       15518831173015579794ULL, 300798272301981904ULL},\n      {13762040857722893585ULL, 3117104080838901168ULL, 4702649037537941245ULL,\n       14408238429167682374ULL, 17923200330177894118ULL},\n      {7470538549881440849ULL, 3664543122474851710ULL, 17626200978883719521ULL,\n       15355649603762884691ULL, 4749231114166154448ULL}},\n     {{11220859020615935192ULL, 4740127963151294603ULL, 16616708905207951068ULL,\n       9828299274924872726ULL, 8985762004928355786ULL},\n      {14578866413196595465ULL, 11009044264074492189ULL,\n       16196760954725621137ULL, 10725252972011913420ULL,\n       4601011175737567235ULL},\n      {1441938685024169613ULL, 1896485105672535586ULL, 6635496128279078494ULL,\n       7401072902622950072ULL, 16075245295895555285ULL},\n      {11009539992705810569ULL, 13666961049432909413ULL, 930044899627839572ULL,\n       7899294831116079515ULL, 7830402010660588539ULL},\n      {5485720725031791061ULL, 17051528642209786987ULL, 7280223907880312904ULL,\n       10641556535303807158ULL, 12639056541805784436ULL}},\n     {{12321318600465693220ULL, 10108223508416203621ULL,\n       16243972184205577210ULL, 8544062083712081766ULL,\n       11274622334580836223ULL},\n      {10844017387984539333ULL, 14774228730866078526ULL, 560237794062265107ULL,\n       5844494700804214355ULL, 12270220729021534083ULL},\n      {8560016492134621125ULL, 12198417933760222474ULL, 10133839346494565561ULL,\n       9295901871619786454ULL, 10849442312533122519ULL},\n      {18021432643418872607ULL, 10155396024449547909ULL,\n       10524212640889309144ULL, 16662796689072019468ULL, 965963318619140447ULL},\n      {8887484786999567242ULL, 15714444653107301219ULL, 1678356452623540647ULL,\n       11052117692502964420ULL, 14549914962216724919ULL}}}};\n// eat\n\nconst unsigned long long KSHashArrayE[18] = {\n    2062106447906584711ULL,  9160372737526136799ULL,  408961132483689555ULL,\n    16057982805180036489ULL, 3569128826873655261ULL,  9330490631980133992ULL,\n    1176328083272936519ULL,  11222898184704497134ULL, 9302091588024171405ULL,\n    10671057562378043302ULL, 4098229850247478874ULL,  8603114141751656125ULL,\n    5095034292565071557ULL,  17972196540767155575ULL, 17052421619317624598ULL,\n    1582078615100434096ULL,  12012345949788712038ULL, 16161371278263065802ULL};\nconst unsigned long long KSHashTurn = 2541771182459136706ULL;\n\nclass KSPosition {\n public:\n  int x, y;\n  KSPosition() {\n    x = y = -1;\n  }\n\n  KSPosition(int X, int Y) {\n    x = X;\n    y = Y;\n  }\n\n  bool on_board() {\n    return (x >= 0 && y >= 0 && x < KSDx && y < KSDy);\n  }\n\n  KSPosition operator+(const KSPosition& p) {\n    KSPosition tmp(x + p.x, y + p.y);\n    return tmp;\n  }\n\n  bool operator==(const KSPosition& p) {\n    return (x == p.x && y == p.y);\n  }\n};\n\nclass KSPiece {\n public:\n  int color;\n  KSPieceType type;\n  bool promoted;\n  KSPosition pos;\n\n  KSPiece() {\n    color = KyotoEmpty;\n    type = KSNone;\n    promoted = false;\n    pos = KSPosition(-1, -1);\n  }\n\n  KSPiece(int c, KSPieceType t, bool p) {\n    color = c;\n    type = t;\n    promoted = p;\n  }\n\n  bool operator==(const KSPiece& p) {\n    return (color == p.color && type == p.type && promoted == p.promoted &&\n            pos == p.pos);\n  }\n\n  void addKSPiece(int c, KSPieceType t, bool p, KSPosition position) {\n    color = c;\n    type = t;\n    promoted = p;\n    pos = position;\n  }\n\n  std::string print() const {\n    std::string str;\n    switch (type) {\n    case KSNone:\n      str += \"  \";\n      break;\n\n    case KSKing:\n      if (color == KyotoBlack)\n        str += \" k\";\n      else\n        str += \" K\";\n      break;\n\n    case Gold_Knight:\n    case Gold_Knight2:\n      if (color == KyotoBlack) {\n        if (promoted) {\n          str += \"kn\";\n        } else {\n          str += \" g\";\n        }\n      } else {\n        if (promoted) {\n          str += \"KN\";\n        } else {\n          str += \" G\";\n        }\n      }\n      break;\n\n    case Silver_Bishop:\n    case Silver_Bishop2:\n      if (color == KyotoBlack) {\n        if (promoted) {\n          str += \" b\";\n        } else {\n          str += \" s\";\n        }\n      } else {\n        if (promoted) {\n          str += \" B\";\n        } else {\n          str += \" S\";\n        }\n      }\n      break;\n    case Pawn_Rook:\n    case Pawn_Rook2:\n      if (color == KyotoBlack) {\n        if (promoted) {\n          str += \" r\";\n        } else {\n          str += \" p\";\n        }\n      } else {\n        if (promoted) {\n          str += \" R\";\n        } else {\n          str += \" P\";\n        }\n      }\n      break;\n    case Tokin_Lance:\n    case Tokin_Lance2:\n      if (color == KyotoBlack) {\n        if (promoted) {\n          str += \" l\";\n        } else {\n          str += \" t\";\n        }\n      } else {\n        if (promoted) {\n          str += \" L\";\n        } else {\n          str += \" T\";\n        }\n      }\n      break;\n    default:\n      break;\n    }\n    return str;\n  }\n};\n\nclass KSMove {\n public:\n  KSPiece piece;\n  KSPosition pos1;\n\n  bool operator==(const KSMove& m) {\n    return pos1 == m.pos1 && piece == m.piece;\n  }\n};\n"
  },
  {
    "path": "src/games/kyotoshogi_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"../core/state.h\"\n\ntypedef unsigned short Coord;\n\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\nconst int StateForKyotoshogiNumActions = 64 * 3;\nconst int StateForKyotoshogiX = 201;  //(9+9+3+2+2)*8\nconst int StateForKyotoshogiY = 5;\nconst int StateForKyotoshogiZ = 5;\n\n#include \"kyotoshogi.h\"\n\nclass StateForKyotoshogi : public core::State {\n public:\n  StateForKyotoshogi(int seed)\n      : State(seed) {\n  }\n  KSPiece board[KSDx][KSDy];\n  unsigned long long hash;\n  KSMove rollout[KSMaxPlayoutLength];\n  int length, turn;\n\n  int Repetition;\n  std::queue<unsigned long long> situation;\n\n  // 0 = White, 1 = Black\n  std::vector<std::vector<KSPiece>> chess;\n  std::queue<unsigned long long> repet;\n\n  void init() {\n    chess.clear();\n    chess.resize(2);\n    for (int i = 0; i < KSDx; ++i) {\n      for (int j = 0; j < KSDy; ++j) {\n        board[i][j] = KSPiece(KyotoEmpty, KSNone, false);\n      }\n    }\n\n    for (int i = 1; i <= KSDx; ++i) {\n      board[i - 1][0].addKSPiece(\n          KyotoWhite, KSPieceType(i), false, KSPosition(i - 1, 0));\n      chess[KyotoWhite].push_back(board[i - 1][0]);\n    }\n\n    for (int i = 1; i <= KSDx; ++i) {\n      board[KSDx - i][4].addKSPiece(\n          KyotoBlack, KSPieceType(i), false, KSPosition(KSDx - i, 4));\n      chess[KyotoBlack].push_back(board[KSDx - i][4]);\n    }\n\n    turn = KyotoBlack;  // black first\n    hash = 0;\n    length = 0;\n    Repetition = 0;\n    repet.push(_hash);\n  }\n\n  bool fourfold() {\n    if (Repetition < 9)\n      return false;\n    return true;\n  }\n\n  bool won(int color) {\n    if (chess[color].back().type == KSKing)\n      return true;\n    if (_legalActions.empty())\n      return true;\n\n    return false;\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"   A| B| C| D| E\\n\";\n    for (int i = KSDy - 1; i >= 0; --i) {\n      str += std::to_string(i + 1) + ' ';\n      for (int j = 0; j < KSDx; ++j) {\n        if (j > 0)\n          str += '|';\n        str += board[j][i].print();\n      }\n      str += '\\n';\n    }\n\n    return str;\n  }\n\n  virtual std::string actionsDescription() const override {\n    std::stringstream ss;\n    char x1, y1;\n    for (int i = 0; i < (int)_legalActions.size(); i++) {\n      const _Action& action = _legalActions[i];\n      int color = (GameStatus)_status == GameStatus::player1Turn ? KyotoWhite\n                                                                 : KyotoBlack;\n      KSPieceType type = z_to_type(action.GetX());\n      bool promote = z_promoted(action.GetX());\n      KSPiece piece = KSPiece(color, type, promote);\n      KSPiece flip = KSPiece(color, type, !promote);\n\n      x1 = static_cast<char>(action.GetY() + 'A');\n      y1 = static_cast<char>(action.GetZ() + '1');\n      ss << \"Action \" << i << \": \" << piece.print() << \" -> \" << flip.print()\n         << \"-\" << x1 << y1 << std::endl;\n    }\n    ss << \"\\nInput format : action index e.g. 0\\n\";\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    char x1, y1;\n    int color = (turn + 1) % 2;\n    KSPieceType type = z_to_type(action.GetX());\n    bool promote = z_promoted(action.GetX());\n    KSPiece piece = KSPiece(color, type, promote);\n    KSPiece flip = KSPiece(color, type, !promote);\n\n    x1 = static_cast<char>(action.GetY() + 'A');\n    y1 = static_cast<char>(action.GetZ() + '1');\n    ss << piece.print() << \" -> \" << flip.print() << \"-\" << x1 << y1;\n\n    return ss.str();\n  }\n\n  void print_chess(int color, FILE* fp) {\n    if (color == KyotoWhite)\n      fprintf(fp, \"KyotoWhite \");\n    else\n      fprintf(fp, \"KyotoBlack \");\n    fprintf(fp, \"%lu\\n\", chess[color].size());\n    std::vector<KSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      if (!(*it).pos.on_board()) {\n        fprintf(fp, \"(%s)\", (*it).print().c_str());\n      } else\n        fprintf(fp, \"%s\", (*it).print().c_str());\n    }\n    fprintf(fp, \"\\n\");\n  }\n\n  void legal_king_moves(KSMove origin, std::vector<KSMove>& moves) {\n    origin.piece.promoted = false;\n    short dx[] = {1, 1, 0, -1, -1, -1, 0, 1};\n    short dy[] = {0, 1, 1, 1, 0, -1, -1, -1};\n    for (int i = 0; i < 8; ++i) {\n      origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n      if (origin.pos1.on_board() &&\n          board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n        moves.push_back(origin);\n    }\n  }\n\n  void legal_gold_knight_moves(KSMove origin, std::vector<KSMove>& moves) {\n    if (origin.piece.promoted) {\n      if (origin.piece.color == KyotoWhite) {\n        short dx[] = {-1, 1};\n        short dy[] = {2, 2};\n        for (int i = 0; i < 2; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      } else {\n        short dx[] = {-1, 1};\n        short dy[] = {-2, -2};\n        for (int i = 0; i < 2; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      }\n    } else {\n      if (origin.piece.color == KyotoWhite) {\n        short dx[] = {1, 1, 0, -1, -1, 0};\n        short dy[] = {0, 1, 1, 1, 0, -1};\n        for (int i = 0; i < 6; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      } else {\n        short dx[] = {1, 0, -1, -1, 0, 1};\n        short dy[] = {0, 1, 0, -1, -1, -1};\n        for (int i = 0; i < 6; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      }\n    }\n  }\n\n  void legal_silver_bishop_moves(KSMove origin, std::vector<KSMove>& moves) {\n    if (origin.piece.promoted) {\n      short dx[] = {1, -1, -1, 1};\n      short dy[] = {1, 1, -1, -1};\n      for (int i = 0; i < 4; ++i) {\n        origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n        while (origin.pos1.on_board() &&\n               board[origin.pos1.x][origin.pos1.y].color !=\n                   origin.piece.color) {\n          moves.push_back(origin);\n          if (board[origin.pos1.x][origin.pos1.y].color != KyotoEmpty)\n            break;\n          origin.pos1 = origin.pos1 + KSPosition(dx[i], dy[i]);\n        }\n      }\n    } else {\n      if (origin.piece.color == KyotoWhite) {\n        short dx[] = {1, 0, -1, -1, 1};\n        short dy[] = {1, 1, 1, -1, -1};\n        for (int i = 0; i < 5; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      } else {\n        short dx[] = {1, -1, -1, 0, 1};\n        short dy[] = {1, 1, -1, -1, -1};\n        for (int i = 0; i < 5; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      }\n    }\n  }\n\n  void legal_pawn_rook_moves(KSMove origin, std::vector<KSMove>& moves) {\n    if (origin.piece.promoted) {\n      short dx[] = {1, 0, -1, 0};\n      short dy[] = {0, 1, 0, -1};\n      for (int i = 0; i < 4; ++i) {\n        origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n        while (origin.pos1.on_board() &&\n               board[origin.pos1.x][origin.pos1.y].color !=\n                   origin.piece.color) {\n          moves.push_back(origin);\n          if (board[origin.pos1.x][origin.pos1.y].color != KyotoEmpty)\n            break;\n          origin.pos1 = origin.pos1 + KSPosition(dx[i], dy[i]);\n        }\n      }\n    } else {\n      if (origin.piece.color == KyotoWhite) {\n        origin.pos1 = origin.piece.pos + KSPosition(0, 1);\n        if (origin.pos1.on_board() &&\n            board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n          moves.push_back(origin);\n      } else {\n        origin.pos1 = origin.piece.pos + KSPosition(0, -1);\n        if (origin.pos1.on_board() &&\n            board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n          moves.push_back(origin);\n      }\n    }\n  }\n\n  void legal_tokin_lance_moves(KSMove origin, std::vector<KSMove>& moves) {\n    if (origin.piece.promoted) {\n      if (origin.piece.color == KyotoWhite) {\n        origin.pos1 = origin.piece.pos + KSPosition(0, 1);\n        while (origin.pos1.on_board() &&\n               board[origin.pos1.x][origin.pos1.y].color !=\n                   origin.piece.color) {\n          moves.push_back(origin);\n          if (board[origin.pos1.x][origin.pos1.y].color != KyotoEmpty)\n            break;\n          origin.pos1 = origin.pos1 + KSPosition(0, 1);\n        }\n      } else {\n        origin.pos1 = origin.piece.pos + KSPosition(0, -1);\n        while (origin.pos1.on_board() &&\n               board[origin.pos1.x][origin.pos1.y].color !=\n                   origin.piece.color) {\n          moves.push_back(origin);\n          if (board[origin.pos1.x][origin.pos1.y].color != KyotoEmpty)\n            break;\n          origin.pos1 = origin.pos1 + KSPosition(0, -1);\n        }\n      }\n    } else {\n      if (origin.piece.color == KyotoWhite) {\n        short dx[] = {1, 1, 0, -1, -1, 0};\n        short dy[] = {0, 1, 1, 1, 0, -1};\n        for (int i = 0; i < 6; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      } else {\n        short dx[] = {1, 0, -1, -1, 0, 1};\n        short dy[] = {0, 1, 0, -1, -1, -1};\n        for (int i = 0; i < 6; ++i) {\n          origin.pos1 = origin.piece.pos + KSPosition(dx[i], dy[i]);\n          if (origin.pos1.on_board() &&\n              board[origin.pos1.x][origin.pos1.y].color != origin.piece.color)\n            moves.push_back(origin);\n        }\n      }\n    }\n  }\n\n  void legal_drop(KSMove origin, std::vector<KSMove>& moves) {\n    origin.piece.promoted = false;\n    for (int i = 0; i < KSDx; ++i) {\n      for (int j = 0; j < KSDy; ++j) {\n        if (board[i][j].color == KyotoEmpty) {\n          origin.pos1 = KSPosition(i, j);\n          origin.piece.promoted = false;\n          moves.push_back(origin);\n          origin.piece.promoted = true;\n          moves.push_back(origin);\n        }\n      }\n    }\n  }\n\n  bool can_eat(KSPosition tar, int color) {\n    std::vector<KSMove> moves;\n    legalKSMoves_onboard(opponent(color), moves);\n\n    std::vector<KSMove>::iterator it;\n    for (it = moves.begin(); it != moves.end(); ++it)\n      if ((*it).pos1 == tar)\n        return true;\n\n    return false;\n  }\n\n  bool checkmate(KSPiece king) {\n    std::vector<KSMove> king_moves;\n    KSMove m;\n    m.piece = king;\n    legal_king_moves(m, king_moves);\n    if (king_moves.empty())\n      return true;\n\n    std::vector<KSMove>::iterator it;\n    for (it = king_moves.begin(); it != king_moves.end(); ++it)\n      if (!can_eat((*it).pos1, king.color))\n        return false;\n\n    return true;\n  }\n\n  void legalKSMoves(int color, std::vector<KSMove>& moves) {\n    legalKSMoves_onboard(color, moves);\n    std::vector<KSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      KSPiece p = *it;\n\n      if (!p.pos.on_board()) {\n        KSMove m;\n        m.piece = p;\n        legal_drop(m, moves);\n      }\n    }\n  }\n\n  void legalKSMoves_onboard(int color, std::vector<KSMove>& moves) {\n    std::vector<KSPiece>::iterator it;\n    for (it = chess[color].begin(); it != chess[color].end(); ++it) {\n      KSPiece p = *it;\n      KSMove m;\n      m.piece = p;\n\n      if (m.piece.pos.on_board()) {\n        switch (m.piece.type) {\n        case KSKing:\n          legal_king_moves(m, moves);\n          break;\n\n        case Gold_Knight:\n        case Gold_Knight2:\n          legal_gold_knight_moves(m, moves);\n          break;\n\n        case Silver_Bishop:\n        case Silver_Bishop2:\n          legal_silver_bishop_moves(m, moves);\n          break;\n\n        case Tokin_Lance:\n        case Tokin_Lance2:\n          legal_tokin_lance_moves(m, moves);\n          break;\n\n        case Pawn_Rook:\n        case Pawn_Rook2:\n          legal_pawn_rook_moves(m, moves);\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n  }\n\n  int opponent(int player) {\n    if (player == KyotoWhite)\n      return KyotoBlack;\n    return KyotoWhite;\n  }\n\n  KSPieceType new_type(KSPieceType p) {\n    KSPieceType t = p;\n    switch (p) {\n    case Gold_Knight:\n      t = Gold_Knight2;\n      break;\n    case Gold_Knight2:\n      t = Gold_Knight;\n      break;\n    case Silver_Bishop:\n      t = Silver_Bishop2;\n      break;\n    case Silver_Bishop2:\n      t = Silver_Bishop;\n      break;\n    case Tokin_Lance:\n      t = Tokin_Lance2;\n      break;\n    case Tokin_Lance2:\n      t = Tokin_Lance;\n      break;\n    case Pawn_Rook:\n      t = Pawn_Rook2;\n      break;\n    case Pawn_Rook2:\n      t = Pawn_Rook;\n      break;\n    default:\n      break;\n    }\n    return t;\n  }\n\n  void play(KSMove m) {\n    turn = opponent(turn);\n    if (m.piece.pos.on_board()) {\n      hash ^= KSHashArray[m.piece.color][getHashNum(m.piece)][m.piece.pos.x]\n                         [m.piece.pos.y];\n\n      if (board[m.pos1.x][m.pos1.y].color != KyotoEmpty) {\n        assert(m.pos1.on_board());\n        hash ^= KSHashArray[turn][getHashNum(board[m.pos1.x][m.pos1.y])]\n                           [m.pos1.x][m.pos1.y];\n        hash ^= KSHashArrayE[getHashNumE(m.piece)];\n\n        KSPiece tmp(\n            m.piece.color, new_type(board[m.pos1.x][m.pos1.y].type), false);\n        chess[m.piece.color].push_back(tmp);\n\n        std::vector<KSPiece>::iterator it;\n        for (it = chess[turn].begin(); it != chess[turn].end(); ++it)\n          if ((*it).type == board[m.pos1.x][m.pos1.y].type) {\n            chess[turn].erase(it);\n            break;\n          }\n      }\n\n      board[m.pos1.x][m.pos1.y] = board[m.piece.pos.x][m.piece.pos.y];\n      board[m.pos1.x][m.pos1.y].pos = KSPosition(m.pos1.x, m.pos1.y);\n      // decide promoted\n      if (m.piece.type != KSKing) {\n        board[m.pos1.x][m.pos1.y].promoted =\n            !board[m.pos1.x][m.pos1.y].promoted;\n      }\n\n      board[m.piece.pos.x][m.piece.pos.y] = KSPiece(KyotoEmpty, KSNone, false);\n    } else {\n      hash ^= KSHashArrayE[getHashNumE(m.piece)];\n      board[m.pos1.x][m.pos1.y] =\n          KSPiece(m.piece.color, m.piece.type, m.piece.promoted);\n      board[m.pos1.x][m.pos1.y].pos = KSPosition(m.pos1.x, m.pos1.y);\n    }\n\n    std::vector<KSPiece>::iterator it;\n    for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n         ++it) {\n      if ((*it).type == m.piece.type) {\n        (*it).pos = m.pos1;\n        if (m.piece.pos.on_board()) {\n          if (m.piece.type != KSKing)\n            (*it).promoted = !(*it).promoted;\n        } else {\n          (*it).promoted = m.piece.promoted;\n        }\n        break;\n      }\n    }\n\n    hash ^= KSHashArray[m.piece.color][getHashNum(board[m.pos1.x][m.pos1.y])]\n                       [m.pos1.x][m.pos1.y];\n    hash ^= KSHashTurn;\n\n    if (length < KSMaxPlayoutLength) {\n      rollout[length] = m;\n      length++;\n    } else {\n      _status = GameStatus::tie;\n    }\n\n    if (hash == repet.front()) {\n      Repetition += 1;\n    } else {\n      Repetition = 0;\n    }\n  }\n\n  int getHashNum(KSPiece p) {\n    int num = p.type;\n    if (num == 3) {\n      num = 1;\n    } else if (num == 1 || num == 2) {\n      num = num + 1;\n    } else if (num >= 6) {\n      num -= 4;\n    }\n    if (p.promoted)\n      num += 4;\n    num = num - 1;\n    return num;\n  }\n\n  int getHashNumE(KSPiece p) {\n    int num = p.type;\n    if (num == 3) {\n      num = 1;\n    } else if (num == 1 || num == 2) {\n      num = num + 1;\n    }\n\n    return num - 2 + 10 * p.color;\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n\n    // the features are just one number between 0 and 1 (the distance,\n    // normalized).\n    _featSize[0] = StateForKyotoshogiX;\n    _featSize[1] = StateForKyotoshogiY;\n    _featSize[2] = StateForKyotoshogiZ;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 17;\n    _actionSize[1] = 5;\n    _actionSize[2] = 5;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n\n    _features.resize(StateForKyotoshogiX * StateForKyotoshogiY *\n                     StateForKyotoshogiZ);\n    std::fill(_features.begin(), _features.end(), 0);\n\n    init();\n    _status = (GameStatus)opponent(turn);\n    findFeatures();\n    findActions(turn);\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForKyotoshogi>(*this);\n  }\n\n  int type_to_z(KSPiece p) {\n    if (!p.promoted)\n      return (int)p.type - 1;\n    switch (p.type) {\n    case Tokin_Lance:\n      return PTokin_Lance;\n    case Silver_Bishop:\n      return PSilver_Bishop;\n    case Gold_Knight:\n      return PGold_Knight;\n    case Pawn_Rook:\n      return PPawn_Rook;\n    case Tokin_Lance2:\n      return PTokin_Lance2;\n    case Silver_Bishop2:\n      return PSilver_Bishop2;\n    case Gold_Knight2:\n      return PGold_Knight2;\n    case Pawn_Rook2:\n      return PPawn_Rook2;\n\n    default:\n      fprintf(\n          stderr, \"%s type to z error %d\\n\", p.print().c_str(), (int)p.type);\n      break;\n    }\n    return -1;\n  }\n\n  KSPieceType z_to_type(int z) const {\n    switch ((KSPieceZ)z) {\n    case KSK:\n      return KSKing;\n    case GK:\n    case PGold_Knight:\n      return Gold_Knight;\n    case GK2:\n    case PGold_Knight2:\n      return Gold_Knight2;\n    case PSilver_Bishop:\n    case SB:\n      return Silver_Bishop;\n    case PSilver_Bishop2:\n    case SB2:\n      return Silver_Bishop2;\n    case PPawn_Rook:\n    case PR:\n      return Pawn_Rook;\n    case PPawn_Rook2:\n    case PR2:\n      return Pawn_Rook2;\n    case PTokin_Lance2:\n    case TL2:\n      return Tokin_Lance2;\n    case PTokin_Lance:\n    case TL:\n      return Tokin_Lance;\n\n    default:\n      fprintf(stderr, \"z %d to type error\\n\", z);\n      break;\n    }\n    return KSNone;\n  }\n\n  bool z_promoted(int z) const {\n    return z >= 9;\n  }\n\n  void findActions(int color) {\n    std::vector<KSMove> moves;\n\n    legalKSMoves(color, moves);\n\n    int nb = moves.size();\n    clearActions();\n\n    for (int i = 0; i < nb; ++i) {\n      int x = moves[i].pos1.x;\n      int y = moves[i].pos1.y;\n      int z = type_to_z(moves[i].piece);\n\n      addAction(z, x, y);\n    }\n  }\n\n  void findFeatures() {\n    std::vector<float> old(_features);\n    for (int i = 0; i < 4865; ++i)\n      _features[i] = 0;\n    // 0 ~ 500\n    for (int i = 0; i < 25; ++i) {\n      KSPiece p = board[i % 5][i / 5];\n      if (p.color == KyotoWhite) {\n        switch (p.type) {\n        case KSKing:\n          _features[i] = 1;\n          break;\n\n        case Gold_Knight:\n        case Gold_Knight2:\n          if (p.promoted)\n            _features[25 + i] = 1;\n          else\n            _features[50 + i] = 1;\n          break;\n\n        case Silver_Bishop:\n        case Silver_Bishop2:\n          if (p.promoted)\n            _features[75 + i] = 1;\n          else\n            _features[100 + i] = 1;\n          break;\n\n        case Pawn_Rook:\n        case Pawn_Rook2:\n          if (p.promoted)\n            _features[125 + i] = 1;\n          else\n            _features[150 + i] = 1;\n          break;\n\n        case Tokin_Lance:\n        case Tokin_Lance2:\n          if (p.promoted)\n            _features[175 + i] = 1;\n          else\n            _features[200 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      } else {\n        switch (p.type) {\n        case KSKing:\n          _features[225 + i] = 1;\n          break;\n\n        case Gold_Knight:\n        case Gold_Knight2:\n          if (p.promoted)\n            _features[250 + i] = 1;\n          else\n            _features[275 + i] = 1;\n          break;\n\n        case Silver_Bishop:\n        case Silver_Bishop2:\n          if (p.promoted)\n            _features[300 + i] = 1;\n          else\n            _features[325 + i] = 1;\n          break;\n\n        case Pawn_Rook:\n        case Pawn_Rook2:\n          if (p.promoted)\n            _features[350 + i] = 1;\n          else\n            _features[375 + i] = 1;\n          break;\n\n        case Tokin_Lance:\n        case Tokin_Lance2:\n          if (p.promoted)\n            _features[400 + i] = 1;\n          else\n            _features[425 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n\n    // 450 ~ 525\n    switch (Repetition) {\n    case 1:\n      std::fill(_features.begin() + 450, _features.begin() + 475, 1);\n      break;\n    case 5:\n      std::fill(_features.begin() + 475, _features.begin() + 500, 1);\n      break;\n    case 9:\n      std::fill(_features.begin() + 500, _features.begin() + 525, 1);\n      break;\n    default:\n      break;\n    }\n\n    int tmp = 525;\n    for (int i = 0; i < 2; ++i) {\n      std::vector<KSPiece>::iterator it;\n      for (it = chess[i].begin(); it != chess[i].end(); ++it) {\n        if (!(*it).pos.on_board()) {\n          switch ((*it).type) {\n          case Gold_Knight:\n            std::fill(_features.begin() + tmp, _features.begin() + tmp + 5, 1);\n            break;\n          case Silver_Bishop:\n            std::fill(\n                _features.begin() + tmp + 5, _features.begin() + tmp + 10, 1);\n            break;\n          case Pawn_Rook:\n            std::fill(\n                _features.begin() + tmp + 10, _features.begin() + tmp + 15, 1);\n            break;\n          case Tokin_Lance:\n            std::fill(\n                _features.begin() + tmp + 15, _features.begin() + tmp + 20, 1);\n            break;\n          case Gold_Knight2:\n            std::fill(\n                _features.begin() + tmp + 25, _features.begin() + tmp + 30, 1);\n            break;\n          case Silver_Bishop2:\n            std::fill(\n                _features.begin() + tmp + 30, _features.begin() + tmp + 35, 1);\n            break;\n          case Pawn_Rook2:\n            std::fill(\n                _features.begin() + tmp + 35, _features.begin() + tmp + 40, 1);\n            break;\n          case Tokin_Lance2:\n            std::fill(\n                _features.begin() + tmp + 40, _features.begin() + tmp + 45, 1);\n            break;\n          default:\n            break;\n          }\n        }\n      }\n      std::fill(_features.begin() + tmp + 20, _features.begin() + tmp + 25, 0);\n      std::fill(_features.begin() + tmp + 45, _features.begin() + tmp + 50, 0);\n      tmp += 50;\n    }\n\n    // history 625 ~ 4375+625\n    std::copy(old.begin(), old.begin() + 4375, _features.begin() + 625);\n    // 5000~5025\n    std::fill(_features.begin() + 5000, _features.end(), turn);\n  }\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n    KSMove m;\n    if ((GameStatus)_status == GameStatus::player1Turn) {  // KyotoWhite to move\n      m.piece.color = KyotoWhite;\n      m.pos1 = KSPosition(action.GetY(), action.GetZ());\n      m.piece.type = z_to_type(action.GetX());\n      m.piece.promoted = z_promoted(action.GetX());\n\n      std::vector<KSPiece>::iterator it;\n      for (it = chess[KyotoWhite].begin(); it != chess[KyotoWhite].end();\n           ++it) {\n        if ((*it).type == m.piece.type)\n          m.piece.pos = (*it).pos;\n      }\n\n      play(m);\n      findActions(KyotoBlack);\n      if ((GameStatus)_status == GameStatus::tie || fourfold()) {\n        _status = GameStatus::tie;\n        _legalActions.clear();\n      } else if (won(KyotoWhite) || _legalActions.empty())\n        _status = GameStatus::player1Win;  // KyotoWhite win\n      else\n        _status = GameStatus::player0Turn;  // KyotoBlack turn\n    } else {                                // KyotoBlack\n      m.piece.color = KyotoBlack;\n      m.pos1 = KSPosition(action.GetY(), action.GetZ());\n      m.piece.type = z_to_type(action.GetX());\n      m.piece.promoted = z_promoted(action.GetX());\n\n      std::vector<KSPiece>::iterator it;\n      for (it = chess[KyotoBlack].begin(); it != chess[KyotoBlack].end();\n           ++it) {\n        if ((*it).type == m.piece.type)\n          m.piece.pos = (*it).pos;\n      }\n\n      play(m);\n      findActions(KyotoWhite);\n      if ((GameStatus)_status == GameStatus::tie || fourfold()) {\n        _status = GameStatus::tie;\n        _legalActions.clear();\n      } else if (won(KyotoBlack) || _legalActions.empty())\n        _status = GameStatus::player0Win;  // KyotoBlack won\n      else\n        _status = GameStatus::player1Turn;  // KyotoWhite turn\n    }\n    findFeatures();\n    _hash = hash;\n\n    if (repet.size() == 4) {\n      repet.pop();\n      repet.push(_hash);\n    } else\n      repet.push(_hash);\n\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play.\n  virtual void DoGoodAction() override {\n    DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/ludii/README.md",
    "content": "# Polygames + Ludii integration\n\nWe have implemented a bridge between Polygames' tree search and learning algorithms, and the large library of games implemented in the\n[Ludii general game system](https://ludii.games/). The game logic is run in Ludii, and training logic / action selection are performed\nby Polygames. In theory, this can work for **any game** that can be run in Ludii. In practice, there may be some games that fail\n(such as extremely large games that run out of memory, or games with a complex state representation for which appropriate support \nfor building tensors has not yet been built into Ludii), but many hundreds of games should work.\n\n## Requirements\n\nSince Ludii uses Java, the Ludii integration of Polygames requires the optional step of installing `openjdk`\nfrom [Polygames' main installation instructions](https://github.com/facebookincubator/Polygames) to be followed.\n\nWhen building Polygames, make sure **not** to use the `-DWITH_LUDII=OFF` argument for `cmake`, because that will\ndisable support for Ludii.\n\n## Installation\n\nAfter installing Polygames as per usual, Ludii itself must also be installed in the correct place such that Polygames\ncan find and run it. More specifically:\n\n1. Download any desired version of the Ludii player from https://ludii.games/download.php (at least versions 1.1.6 and higher\nshould run correctly, some older versions may also still work well).\n2. Rename the downloaded file from `Ludii-X.Y.Z.jar` to `Ludii.jar`, and place it in `<Polygames install directory>/ludii/Ludii.jar`\n(create a new `ludii` directory under `Polygames` if it does not already exist).\n\n## Using Ludii Games\n\nAny command-line option in Polygames that accepts `--game_name` arguments (such as `train`, `eval`, etc.) can also run any game\nthrough Ludii by specifying it in the following format:\n\n```\n--game_name=\"Ludii<NAME>.lud\"\n```\n\nThe `<NAME>` part of such an argument must match the name of the game as it is inside Ludii exactly, including whitespaces. This\nworks in the same way as [programmatic loading of games in Java when using Ludii as a library for Java code](https://ludiitutorials.readthedocs.io/en/latest/loading_games.html).\nThe exact game names are also displayed inside the game loader of the GUI of Ludii, which is visible when the Ludii jar is run as\nan executable.\n\nFor example, a training run with otherwise default arguments for Ludii's implementation of Tic-Tac-Toe (as opposed to the built-in C++\nimplementation of the game in Polygames) can be launched using:\n\n```\npython -m pypolygames train --game_name=\"LudiiTic-Tac-Toe.lud\"\n```\n\n## Using Game Options\n\nFor many of its games, Ludii also provides additional *options* that can be used to load different variants of a game, with\ndifferent board sizes, board shapes, different rulesets, etc. Non-default variants of any Ludii game can also be loaded\nin Polygames, through an additional `--game_options` argument followed up by any arbitrary number of Strings, which are\nsubsequently all passed into Ludii as options. These should again be provided in the same format as when options are provided\n[programmatically to Ludii from Java](https://ludiitutorials.readthedocs.io/en/latest/loading_games.html), and the exact\nstrings to be entered can also be found in Ludii's GUI from the options menu after loading a particular game. For example,\nwe can launch a training run in Ludii's implementation of Hex, with a board size of 13x13 and an inverted win condition \n(\"Misere\") as follows:\n\n```\npython -m pypolygames train --game_name=\"LudiiHex.lud\" --game_options \"Board Size/13x13\" \"End Rules/Misere\"\n```\n\n## Trained Models\n\nCheckpoints of training runs for some Ludii games have been made [publicly available here](http://dl.fbaipublicfiles.com/polygames/ludii_checkpoints/list.txt).\nEach of these checkpoints was trained on the default variant of its game (no custom options specified), for 20 hours on 8 GPUs and 80 CPU cores."
  },
  {
    "path": "src/games/ludii/jni_utils.cc",
    "content": "// inspired from\n// https://gist.github.com/alexminnaar/90cf1ea3de45e79a1b14081d90d214b7\n\n/*\nCopyright (c) 2020 Alex Minnaar\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*/\n\n#include \"jni_utils.h\"\n\n#include <cstring>\n#include <iostream>\n#include <stdio.h>\n#include <string>\n\nnamespace Ludii {\n\nJavaVM* JNIUtils::jvm = nullptr;\njint JNIUtils::res = 0;\n\nthread_local JNIEnv* JNIUtils::env = nullptr;\n\nJNIEnv* JNIUtils::GetEnv() {\n  if (jvm == nullptr)\n    return nullptr;\n\n  if (env == nullptr) {\n    JavaVMAttachArgs args = {JNI_VERSION_1_2, 0, 0};\n    jvm->AttachCurrentThread((void**)&env, &args);\n  }\n\n  return env;\n}\n\nvoid JNIUtils::InitJVM(std::string jar_location) {\n  if (jvm != nullptr)\n    return;  // We've already initialised the JVM\n\n  JNIEnv* env = nullptr;\n  std::cout << \"intializing JVM\" << std::endl;\n  if (jar_location.empty())\n    jar_location = \"ludii/Ludii.jar\";\n\n  // Check if we can actually access the JAR file\n  if (FILE* file = fopen(jar_location.c_str(), \"rb\")) {\n    // The Ludii.jar file seems to be there, so we're fine!\n    fclose(file);\n  } else {\n    // Can't find the Ludii.jar file\n    return;\n  }\n\n  //#define CHECK_JNI  // Uncomment this to run extra checks for JNI\n\n#ifdef JNI_VERSION_1_2\n  JavaVMInitArgs vm_args;\n\n#ifdef CHECK_JNI\n  const size_t num_jvm_args = 3;\n#else\n  const size_t num_jvm_args = 2;\n#endif\n\n  JavaVMOption options[num_jvm_args];\n  std::string java_classpath = \"-Djava.class.path=\" + jar_location;\n  options[0].optionString = java_classpath.data();\n  std::string heapdump_str = \"-XX:+HeapDumpOnOutOfMemoryError\";\n  options[1].optionString = heapdump_str.data();\n\n#ifdef CHECK_JNI\n  std::string check_jni = \"-Xcheck:jni\";\n  options[2].optionString = check_jni.data();\n#endif\n\n  vm_args.version = 0x00010002;\n  vm_args.options = options;\n  vm_args.nOptions = num_jvm_args;\n  vm_args.ignoreUnrecognized = JNI_TRUE;\n  /* Create the Java VM */\n  res = JNI_CreateJavaVM(&jvm, (void**)&env, &vm_args);\n#else\n  JDK1_1InitArgs vm_args;\n  std::string classpath = vm_args.classpath + \";\" + jar_location;\n  vm_args.version = 0x00010001;\n  JNI_GetDefaultJavaVMInitArgs(&vm_args);\n  /* Append jar location to the default system class path */\n  vm_args.classpath = java_classpath.data();\n  /* Create the Java VM */\n  res = JNI_CreateJavaVM(&jvm, &env, &vm_args);\n#endif /* JNI_VERSION_1_2 */\n\n  // Find our LudiiGameWrapper Java class\n  ludiiGameWrapperClass =\n      (jclass)env->NewGlobalRef(env->FindClass(\"utils/LudiiGameWrapper\"));\n  CheckJniException(env);\n\n  // Find our LudiiStateWrapper Java class\n  ludiiStateWrapperClass =\n      (jclass)env->NewGlobalRef(env->FindClass(\"utils/LudiiStateWrapper\"));\n  CheckJniException(env);\n\n  // Find the method ID for the static method giving us the Ludii versio\n  ludiiVersionMethodID = env->GetStaticMethodID(\n      ludiiGameWrapperClass, \"ludiiVersion\", \"()Ljava/lang/String;\");\n  CheckJniException(env);\n\n  std::cout << \"Using Ludii version \" << LudiiVersion() << std::endl;\n}\n\nvoid JNIUtils::CloseJVM() {\n  JNIEnv* env = JNIUtils::GetEnv();\n\n  if (env != nullptr) {\n    env->DeleteGlobalRef(ludiiStateWrapperClass);\n    env->DeleteGlobalRef(ludiiGameWrapperClass);\n    jvm->DestroyJavaVM();\n\n    jvm = nullptr;\n    res = 0;\n  }\n}\n\n// These will be assigned proper values by InitJVM() call\njclass JNIUtils::ludiiGameWrapperClass = nullptr;\njclass JNIUtils::ludiiStateWrapperClass = nullptr;\njmethodID JNIUtils::ludiiVersionMethodID = nullptr;\n\njclass JNIUtils::LudiiGameWrapperClass() {\n  return ludiiGameWrapperClass;\n}\n\njclass JNIUtils::LudiiStateWrapperClass() {\n  return ludiiStateWrapperClass;\n}\n\nconst std::string JNIUtils::LudiiVersion() {\n  JNIEnv* env = JNIUtils::GetEnv();\n  jstring jstr = (jstring)(\n      env->CallStaticObjectMethod(ludiiGameWrapperClass, ludiiVersionMethodID));\n  CheckJniException(env);\n  const char* strReturn = env->GetStringUTFChars(jstr, (jboolean*)0);\n  CheckJniException(env);\n  const std::string str = strReturn;\n  env->ReleaseStringUTFChars(jstr, strReturn);\n  CheckJniException(env);\n  env->DeleteLocalRef(jstr);\n  CheckJniException(env);\n  return str;\n}\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/ludii/jni_utils.h",
    "content": "// strongly inspired from\n// https://gist.github.com/alexminnaar/90cf1ea3de45e79a1b14081d90d214b7 might\n// need something like export\n// LD_LIBRARY_PATH=/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/server/ maybe\n// also install jvm\n\n/*\nCopyright (c) 2020 Alex Minnaar\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*/\n\n#pragma once\n\n#include <cstring>\n#include <jni.h>  // NOLINT\n#include <stdexcept>\n#include <string>\n\nnamespace Ludii {\n\nclass JNIUtils {\n public:\n  static JNIEnv* GetEnv();\n\n  static void InitJVM(std::string jar_location);\n  static void CloseJVM();\n\n  static void CheckJniException(JNIEnv* jenv) {\n    if (jenv->ExceptionCheck()) {\n      jenv->ExceptionDescribe();\n      jenv->ExceptionClear();\n      printf(\"Java Exception at line %d of %s\\n\", __LINE__, __FILE__);\n      throw std::runtime_error(\"Java exception thrown!\");\n    }\n  }\n\n  static jclass LudiiGameWrapperClass();\n  static jclass LudiiStateWrapperClass();\n\n  /**\n   * @return A string description of the version of Ludii that we're working\n   * with.\n   */\n  static const std::string LudiiVersion();\n\n private:\n  static JavaVM* jvm;\n  static jint res;\n\n  thread_local static JNIEnv* env;\n\n  /** Our LudiiGameWrapper class in Java */\n  static jclass ludiiGameWrapperClass;\n\n  /** Our LudiiStateWrapper class in Java */\n  static jclass ludiiStateWrapperClass;\n\n  /** Method ID for the ludiiVersion() method in Java */\n  static jmethodID ludiiVersionMethodID;\n};\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/ludii/ludii_game_wrapper.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Dennis Soemers\n// - Affiliation: Maastricht University, DKE, Digital Ludeme Project (Ludii\n// developer)\n// - Github: https://github.com/DennisSoemers/\n// - Email: dennis.soemers@maastrichtuniversity.nl (or d.soemers@gmail.com)\n\n#include \"ludii_game_wrapper.h\"\n#include \"jni_utils.h\"\n\nnamespace Ludii {\n\n// NOTE: String descriptions of signatures of Java methods can be found by\n// navigating to directory containing the .class files and using:\n//\n// javap -s <ClassName.class>\n\nLudiiGameWrapper::LudiiGameWrapper(const std::string lud_path) {\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jclass ludiiGameWrapperClass = JNIUtils::LudiiGameWrapperClass();\n\n  // Find the LudiiGameWrapper Java construct method\n  jmethodID ludiiGameWrapperConstruct =\n      jenv->GetStaticMethodID(ludiiGameWrapperClass, \"construct\",\n                              \"(Ljava/lang/String;)Lutils/LudiiGameWrapper;\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Convert our lud path into a Java string\n  jstring java_lud_path = jenv->NewStringUTF(lud_path.c_str());\n  JNIUtils::CheckJniException(jenv);\n\n  // Call our Java construct method to instantiate new object\n  jobject local_ref = jenv->CallStaticObjectMethod(\n      ludiiGameWrapperClass, ludiiGameWrapperConstruct, java_lud_path);\n  JNIUtils::CheckJniException(jenv);\n  ludiiGameWrapperJavaObject = jenv->NewGlobalRef(local_ref);\n  jenv->DeleteLocalRef(local_ref);\n\n  // Find method IDs for the two tensor shape Java methods that we may be\n  // calling frequently\n  stateTensorsShapeMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"stateTensorsShape\", \"()[I\");\n  JNIUtils::CheckJniException(jenv);\n  moveTensorsShapeMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"moveTensorsShape\", \"()[I\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Find the method ID for the stateTensorChannelNames() method in Java\n  stateTensorChannelNamesMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"stateTensorChannelNames\",\n                        \"()[Ljava/lang/String;\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Find the method ID for the numPlayers() method in Java\n  numPlayersMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"numPlayers\", \"()I\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Clean up memory\n  jenv->DeleteLocalRef(java_lud_path);\n  JNIUtils::CheckJniException(jenv);\n}\n\nLudiiGameWrapper::LudiiGameWrapper(\n    const std::string lud_path, const std::vector<std::string> game_options) {\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jclass ludiiGameWrapperClass = JNIUtils::LudiiGameWrapperClass();\n\n  // Find the LudiiGameWrapper Java construct method (with extra argument for\n  // options)\n  jmethodID ludiiGameWrapperConstruct = jenv->GetStaticMethodID(\n      ludiiGameWrapperClass, \"construct\",\n      \"(Ljava/lang/String;[Ljava/lang/String;)Lutils/LudiiGameWrapper;\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Convert our lud path into a Java string\n  jstring java_lud_path = jenv->NewStringUTF(lud_path.c_str());\n  JNIUtils::CheckJniException(jenv);\n\n  // Convert vector of game options into array of Java strings\n  const jobjectArray java_game_options = (jobjectArray)jenv->NewObjectArray(\n      game_options.size(), jenv->FindClass(\"java/lang/String\"), nullptr);\n  JNIUtils::CheckJniException(jenv);\n  for (size_t i = 0; i < game_options.size(); ++i) {\n    jstring jstr = jenv->NewStringUTF(game_options[i].c_str());\n    jenv->SetObjectArrayElement(java_game_options, i, jstr);\n    jenv->DeleteLocalRef(jstr);\n  }\n\n  // Call our Java construct method to instantiate new object\n  jobject local_ref = jenv->CallStaticObjectMethod(\n      ludiiGameWrapperClass, ludiiGameWrapperConstruct, java_lud_path,\n      java_game_options);\n  JNIUtils::CheckJniException(jenv);\n  ludiiGameWrapperJavaObject = jenv->NewGlobalRef(local_ref);\n  jenv->DeleteLocalRef(local_ref);\n\n  // Find method IDs for the two tensor shape Java methods that we may be\n  // calling frequently\n  stateTensorsShapeMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"stateTensorsShape\", \"()[I\");\n  JNIUtils::CheckJniException(jenv);\n  moveTensorsShapeMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"moveTensorsShape\", \"()[I\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Find the method ID for the stateTensorChannelNames() method in Java\n  stateTensorChannelNamesMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"stateTensorChannelNames\",\n                        \"()[Ljava/lang/String;\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Find the method ID for the numPlayers() method in Java\n  numPlayersMethodID =\n      jenv->GetMethodID(ludiiGameWrapperClass, \"numPlayers\", \"()I\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Clean up memory\n  jenv->DeleteLocalRef(java_lud_path);\n  jenv->DeleteLocalRef(java_game_options);\n}\n\nLudiiGameWrapper::LudiiGameWrapper(LudiiGameWrapper const& other) {\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n\n  // We can just copy the pointer to the same Java Game object\n  ludiiGameWrapperJavaObject =\n      jenv->NewGlobalRef(other.ludiiGameWrapperJavaObject);\n  JNIUtils::CheckJniException(jenv);\n\n  // We can just copy all the pointers to methods\n  stateTensorsShapeMethodID = other.stateTensorsShapeMethodID;\n  moveTensorsShapeMethodID = other.moveTensorsShapeMethodID;\n  stateTensorChannelNamesMethodID = other.stateTensorChannelNamesMethodID;\n  numPlayersMethodID = other.numPlayersMethodID;\n}\n\nLudiiGameWrapper& LudiiGameWrapper::operator=(LudiiGameWrapper const& other) {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n\n  // We can just copy the pointer to the same Java Game object\n  ludiiGameWrapperJavaObject =\n      jenv->NewGlobalRef(other.ludiiGameWrapperJavaObject);\n  JNIUtils::CheckJniException(jenv);\n\n  // We can just copy all the pointers to methods\n  stateTensorsShapeMethodID = other.stateTensorsShapeMethodID;\n  moveTensorsShapeMethodID = other.moveTensorsShapeMethodID;\n  stateTensorChannelNamesMethodID = other.stateTensorChannelNamesMethodID;\n  numPlayersMethodID = other.numPlayersMethodID;\n\n  return *this;\n}\n\nLudiiGameWrapper::~LudiiGameWrapper() {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  if (jenv) {\n    jenv->DeleteGlobalRef(ludiiGameWrapperJavaObject);\n  }\n}\n\nconst std::array<int, 3>& LudiiGameWrapper::StateTensorsShape() {\n  if (not stateTensorsShape) {\n    JNIEnv* jenv = JNIUtils::GetEnv();\n\n    // Get our array of Java ints\n    const jintArray jint_array = static_cast<jintArray>(jenv->CallObjectMethod(\n        ludiiGameWrapperJavaObject, stateTensorsShapeMethodID));\n    JNIUtils::CheckJniException(jenv);\n    jint* jints = jenv->GetIntArrayElements(jint_array, nullptr);\n    JNIUtils::CheckJniException(jenv);\n\n    // Create our C++ array of 3 ints\n    stateTensorsShape = std::make_unique<std::array<int, 3>>(\n        std::array<int, 3>{jints[0], jints[1], jints[2]});\n\n    // Allow JVM to clean up memory now that we have our own ints\n    jenv->ReleaseIntArrayElements(jint_array, jints, 0);\n    jenv->DeleteLocalRef(jint_array);\n  }\n\n  return *stateTensorsShape;\n}\n\nconst std::array<int, 3>& LudiiGameWrapper::MoveTensorsShape() {\n  if (not moveTensorsShape) {\n    JNIEnv* jenv = JNIUtils::GetEnv();\n\n    // Get our array of Java ints\n    const jintArray jint_array = static_cast<jintArray>(jenv->CallObjectMethod(\n        ludiiGameWrapperJavaObject, moveTensorsShapeMethodID));\n    JNIUtils::CheckJniException(jenv);\n    jint* jints = jenv->GetIntArrayElements(jint_array, nullptr);\n    JNIUtils::CheckJniException(jenv);\n\n    // Create our C++ array of 3 ints\n    moveTensorsShape = std::make_unique<std::array<int, 3>>(\n        std::array<int, 3>{jints[0], jints[1], jints[2]});\n\n    // Allow JVM to clean up memory now that we have our own ints\n    jenv->ReleaseIntArrayElements(jint_array, jints, 0);\n    jenv->DeleteLocalRef(jint_array);\n  }\n\n  return *moveTensorsShape;\n}\n\nint LudiiGameWrapper::NumPlayers() {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const int numPlayers =\n      (int)jenv->CallIntMethod(ludiiGameWrapperJavaObject, numPlayersMethodID);\n  JNIUtils::CheckJniException(JNIUtils::GetEnv());\n  return numPlayers;\n}\n\nconst std::vector<std::string> LudiiGameWrapper::stateTensorChannelNames() {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  std::vector<std::string> channelNames;\n\n  const jobjectArray java_arr =\n      static_cast<jobjectArray>(jenv->CallObjectMethod(\n          ludiiGameWrapperJavaObject, stateTensorChannelNamesMethodID));\n  JNIUtils::CheckJniException(jenv);\n  const int len = jenv->GetArrayLength(java_arr);\n  JNIUtils::CheckJniException(jenv);\n\n  for (int i = 0; i < len; ++i) {\n    jstring jstr = (jstring)(jenv->GetObjectArrayElement(java_arr, i));\n    JNIUtils::CheckJniException(jenv);\n\n    // Convert Java string to C++ string\n    const jsize jstr_len = jenv->GetStringUTFLength(jstr);\n    JNIUtils::CheckJniException(jenv);\n    const char* chars = jenv->GetStringUTFChars(jstr, (jboolean*)0);\n    JNIUtils::CheckJniException(jenv);\n    std::string str(chars, jstr_len);\n\n    channelNames.push_back(str);\n\n    // Allow JVM to clean up memory\n    jenv->ReleaseStringUTFChars(jstr, chars);\n    jenv->DeleteLocalRef(jstr);\n  }\n\n  jenv->DeleteLocalRef(java_arr);\n\n  return channelNames;\n}\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/ludii/ludii_game_wrapper.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Dennis Soemers\n// - Affiliation: Maastricht University, DKE, Digital Ludeme Project (Ludii\n// developer)\n// - Github: https://github.com/DennisSoemers/\n// - Email: dennis.soemers@maastrichtuniversity.nl (or d.soemers@gmail.com)\n\n#pragma once\n\n#include <array>\n#include <jni.h>\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace Ludii {\n\n/**\n * C++ wrapper around Ludii's \"LudiiGameWrapper\" class.\n *\n * This class takes care of calling all the required Java methods from Ludii\n * games.\n */\nclass LudiiGameWrapper {\n\n public:\n  /**\n   * Constructor; calls the LudiiGameWrapper Java constructor\n   *\n   * @param lud_path String describing the path of the game to load. Should end\n   * in .lud\n   */\n  LudiiGameWrapper(const std::string lud_path);\n\n  /**\n   * Constructor; calls the LudiiGameWrapper Java constructor\n   *\n   * @param lud_path String describing the path of the game to load. Should end\n   * in .lud\n   * @param game_options Vector of additiona options to pass into Ludii,\n   * describing variant of game to load.\n   */\n  LudiiGameWrapper(const std::string lud_path,\n                   const std::vector<std::string> game_options);\n\n  /**\n   * Copy constructor. Re-uses the same Java LudiiGameWrapper object.\n   */\n  LudiiGameWrapper(LudiiGameWrapper const&);\n\n  /**\n   * Copy-assignment operator. Re-uses the same Java LudiiGameWrapper object.\n   */\n  LudiiGameWrapper& operator=(LudiiGameWrapper const& other);\n\n  /**\n   * Destructor\n   */\n  ~LudiiGameWrapper();\n\n  /**\n   * @return Array of 3 ints describing the shape of state tensors; [channels,\n   * x, y]\n   */\n  const std::array<int, 3>& StateTensorsShape();\n\n  /**\n   * @return Array of 3 ints describing the shape of move tensors; [channels, x,\n   * y]\n   */\n  const std::array<int, 3>& MoveTensorsShape();\n\n  /**\n   * @return The number of players in this game.\n   */\n  int NumPlayers();\n\n  /**\n   * @return Vector with, for every channel in state tensors, a name describing\n   * what data we have in that channel.\n   */\n  const std::vector<std::string> stateTensorChannelNames();\n\n  /** Our object of Java's LudiiGameWrapper type */\n  jobject ludiiGameWrapperJavaObject;\n\n private:\n  /** Method ID for the stateTensorsShape() method in Java */\n  jmethodID stateTensorsShapeMethodID;\n\n  /** Method ID for the moveTensorsShape() method in Java */\n  jmethodID moveTensorsShapeMethodID;\n\n  /** Method ID for the stateTensorChannelNames() method in Java */\n  jmethodID stateTensorChannelNamesMethodID;\n\n  /** Method ID for the numPlayers() method in Java */\n  jmethodID numPlayersMethodID;\n\n  /**\n   * Shape for state tensors.\n   * This remains constant throughout episodes, so can just compute it once and\n   * store\n   */\n  std::unique_ptr<std::array<int, 3>> stateTensorsShape;\n\n  /**\n   * Shape for state tensors.\n   * This remains constant throughout episodes, so can just compute it once and\n   * store\n   */\n  std::unique_ptr<std::array<int, 3>> moveTensorsShape;\n};\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/ludii/ludii_state_wrapper.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Dennis Soemers\n// - Affiliation: Maastricht University, DKE, Digital Ludeme Project (Ludii\n// developer)\n// - Github: https://github.com/DennisSoemers/\n// - Email: dennis.soemers@maastrichtuniversity.nl (or d.soemers@gmail.com)\n\n#include \"ludii_state_wrapper.h\"\n#include \"jni_utils.h\"\n\nnamespace Ludii {\n\n// Action::Action(int i, int j, int k) {\n//  _loc[0] = i;\n//  _loc[1] = j;\n//  _loc[2] = k;\n//  _hash = uint32_t(0);  // TODO implement hash for stochastic games\n//}\n\nvoid LudiiStateWrapper::Initialize() {\n  Reset();\n\n  _hash = 0;  // TODO implement hash for stochastic games\n  _status = GameStatus::player0Turn;\n\n  // Initializes Features.\n  _featSize.resize(3);\n  const std::array<int, 3>& sts = ludiiGameWrapper->StateTensorsShape();\n  std::copy(sts.begin(), sts.end(), _featSize.begin());\n  _features = std::vector<float>(_featSize[0] * _featSize[1] * _featSize[2]);\n  findFeatures();\n  fillFullFeatures();\n\n  // Initializes Actions.\n  _actionSize.resize(3);\n  const std::array<int, 3>& mts = ludiiGameWrapper->MoveTensorsShape();\n  std::copy(mts.begin(), mts.end(), _actionSize.begin());\n  findActions();\n}\n\nvoid LudiiStateWrapper::findFeatures() {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const jfloatArray flatTensorArray =\n      static_cast<jfloatArray>(jenv->CallObjectMethod(\n          ludiiStateWrapperJavaObject, toTensorFlatMethodID));\n  JNIUtils::CheckJniException(jenv);\n  const jsize numEntries = jenv->GetArrayLength(flatTensorArray);\n  jfloat* jfloats =\n      (jfloat*)jenv->GetPrimitiveArrayCritical(flatTensorArray, nullptr);\n  std::copy(jfloats, jfloats + numEntries, _features.begin());\n\n  // Allow JVM to clean up memory now that we have our own floats\n  jenv->ReleasePrimitiveArrayCritical(flatTensorArray, jfloats, JNI_ABORT);\n  jenv->DeleteLocalRef(flatTensorArray);\n}\n\nvoid LudiiStateWrapper::findActions() {\n  const std::vector<std::array<int, 3>> moves = LegalMovesTensors();\n  size_t nbMoves = moves.size();\n  _legalActions.clear();\n  _legalActions.reserve(nbMoves);\n  for (size_t i = 0; i < nbMoves; ++i) {\n    const std::array<int, 3>& move = moves[i];\n    _legalActions.emplace_back(i, move[0], move[1], move[2]);\n  }\n}\n\nstd::unique_ptr<core::State> LudiiStateWrapper::clone_() const {\n  return std::make_unique<LudiiStateWrapper>(*this);\n}\n\nvoid LudiiStateWrapper::ApplyAction(const _Action& action) {\n\n  assert(not IsTerminal());\n\n  // play move\n  ApplyNthMove(action.GetIndex());\n\n  // update game status\n  if (IsTerminal()) {\n\n    if (isOnePlayerGame()) {\n      const double score = Returns(0);\n      if (score >= 0.99)  // Probably just 1.0\n        _status = GameStatus::player0Win;\n      else if (score <= -0.99)  // Probably just -1.0\n        _status = GameStatus::player1Win;\n      else\n        _status = GameStatus::tie;\n    } else {\n      const double score_0 = Returns(0);\n      const double score_1 = Returns(1);\n      if (score_0 > score_1)\n        _status = score_0 > 0.0 ? GameStatus::player0Win : GameStatus::tie;\n      else\n        _status = score_1 > 0.0 ? GameStatus::player1Win : GameStatus::tie;\n    }\n  } else {\n    const int player = CurrentPlayer();\n    _status = player == 0 ? GameStatus::player0Turn : GameStatus::player1Turn;\n  }\n\n  // update features\n  findFeatures();\n  fillFullFeatures();\n\n  // update actions\n  findActions();\n\n  // update hash  // TODO\n}\n\nvoid LudiiStateWrapper::DoGoodAction() {\n  return DoRandomAction();\n}\n\n// NOTE: String descriptions of signatures of Java methods can be found by\n// navigating to directory containing the .class files and using:\n//\n// javap -s <ClassName.class>\n\nLudiiStateWrapper::LudiiStateWrapper(int seed,\n                                     LudiiGameWrapper&& inLudiiGameWrapper)\n    : core::State(seed) {\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  ludiiGameWrapper =\n      std::make_shared<LudiiGameWrapper>(std::move(inLudiiGameWrapper));\n  jclass ludiiStateWrapperClass = JNIUtils::LudiiStateWrapperClass();\n\n  // Find the LudiiStateWrapper Java constructor\n  jmethodID ludiiStateWrapperConstructor = jenv->GetMethodID(\n      ludiiStateWrapperClass, \"<init>\", \"(Lutils/LudiiGameWrapper;)V\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Call our Java constructor to instantiate new object\n  jobject local_ref =\n      jenv->NewObject(ludiiStateWrapperClass, ludiiStateWrapperConstructor,\n                      ludiiGameWrapper->ludiiGameWrapperJavaObject);\n  JNIUtils::CheckJniException(jenv);\n  ludiiStateWrapperJavaObject = jenv->NewGlobalRef(local_ref);\n  jenv->DeleteLocalRef(local_ref);\n\n  // Find method IDs for all the Java methods we may want to call\n  legalMovesTensorsMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"legalMovesTensors\", \"()[[I\");\n  JNIUtils::CheckJniException(jenv);\n  numLegalMovesMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"numLegalMoves\", \"()I\");\n  JNIUtils::CheckJniException(jenv);\n  applyNthMoveMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"applyNthMove\", \"(I)V\");\n  JNIUtils::CheckJniException(jenv);\n  returnsMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"returns\", \"(I)D\");\n  JNIUtils::CheckJniException(jenv);\n  isTerminalMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"isTerminal\", \"()Z\");\n  JNIUtils::CheckJniException(jenv);\n  toTensorFlatMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"toTensorFlat\", \"()[F\");\n  JNIUtils::CheckJniException(jenv);\n  currentPlayerMethodID =\n      jenv->GetMethodID(ludiiStateWrapperClass, \"currentPlayer\", \"()I\");\n  JNIUtils::CheckJniException(jenv);\n  resetMethodID = jenv->GetMethodID(ludiiStateWrapperClass, \"reset\", \"()V\");\n  JNIUtils::CheckJniException(jenv);\n  copyFromMethodID = jenv->GetMethodID(\n      ludiiStateWrapperClass, \"copyFrom\", \"(Lutils/LudiiStateWrapper;)V\");\n  JNIUtils::CheckJniException(jenv);\n  getRandomRolloutsRewardMethodID = jenv->GetMethodID(\n      ludiiStateWrapperClass, \"getRandomRolloutsReward\", \"(III)D\");\n  JNIUtils::CheckJniException(jenv);\n}\n\nLudiiStateWrapper::LudiiStateWrapper(const LudiiStateWrapper& other)\n    : core::State(other)\n    , ludiiGameWrapper(other.ludiiGameWrapper) {\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jclass ludiiStateWrapperClass = JNIUtils::LudiiStateWrapperClass();\n\n  // Find the LudiiStateWrapper Java copy constructor\n  jmethodID ludiiStateWrapperCopyConstructor = jenv->GetMethodID(\n      ludiiStateWrapperClass, \"<init>\", \"(Lutils/LudiiStateWrapper;)V\");\n  JNIUtils::CheckJniException(jenv);\n\n  // Call our Java constructor to instantiate new object\n  jobject local_ref =\n      jenv->NewObject(ludiiStateWrapperClass, ludiiStateWrapperCopyConstructor,\n                      other.ludiiStateWrapperJavaObject);\n  JNIUtils::CheckJniException(jenv);\n  ludiiStateWrapperJavaObject = jenv->NewGlobalRef(local_ref);\n  jenv->DeleteLocalRef(local_ref);\n\n  // We can just copy all the pointers to methods\n  legalMovesTensorsMethodID = other.legalMovesTensorsMethodID;\n  numLegalMovesMethodID = other.numLegalMovesMethodID;\n  applyNthMoveMethodID = other.applyNthMoveMethodID;\n  returnsMethodID = other.returnsMethodID;\n  isTerminalMethodID = other.isTerminalMethodID;\n  toTensorFlatMethodID = other.toTensorFlatMethodID;\n  currentPlayerMethodID = other.currentPlayerMethodID;\n  resetMethodID = other.resetMethodID;\n  copyFromMethodID = other.copyFromMethodID;\n  getRandomRolloutsRewardMethodID = other.getRandomRolloutsRewardMethodID;\n}\n\nLudiiStateWrapper& LudiiStateWrapper::operator=(\n    LudiiStateWrapper const& other) {\n  if (&other == this)\n    return *this;\n\n  core::State::operator=(other);\n\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jenv->CallVoidMethod(ludiiStateWrapperJavaObject, copyFromMethodID,\n                       other.ludiiStateWrapperJavaObject);\n  JNIUtils::CheckJniException(jenv);\n\n  // We can just copy all the pointers to methods\n  legalMovesTensorsMethodID = other.legalMovesTensorsMethodID;\n  numLegalMovesMethodID = other.numLegalMovesMethodID;\n  applyNthMoveMethodID = other.applyNthMoveMethodID;\n  returnsMethodID = other.returnsMethodID;\n  isTerminalMethodID = other.isTerminalMethodID;\n  toTensorFlatMethodID = other.toTensorFlatMethodID;\n  currentPlayerMethodID = other.currentPlayerMethodID;\n  resetMethodID = other.resetMethodID;\n  copyFromMethodID = other.copyFromMethodID;\n  getRandomRolloutsRewardMethodID = other.getRandomRolloutsRewardMethodID;\n\n  return *this;\n}\n\nLudiiStateWrapper::~LudiiStateWrapper() {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  if (jenv) {\n    jenv->DeleteGlobalRef(ludiiStateWrapperJavaObject);\n  }\n}\n\nstd::vector<std::array<int, 3>> LudiiStateWrapper::LegalMovesTensors() const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const jobjectArray javaArrOuter =\n      static_cast<jobjectArray>(jenv->CallObjectMethod(\n          ludiiStateWrapperJavaObject, legalMovesTensorsMethodID));\n  JNIUtils::CheckJniException(jenv);\n  const jsize numLegalMoves = jenv->GetArrayLength(javaArrOuter);\n\n  std::vector<std::array<int, 3>> matrix(numLegalMoves);\n  for (jsize i = 0; i < numLegalMoves; ++i) {\n    const jintArray inner =\n        static_cast<jintArray>(jenv->GetObjectArrayElement(javaArrOuter, i));\n    jint* jints = (jint*)jenv->GetPrimitiveArrayCritical(inner, nullptr);\n\n    matrix[i] = {jints[0], jints[1], jints[2]};\n\n    // Allow JVM to clean up memory now that we have our own ints\n    jenv->ReleasePrimitiveArrayCritical(inner, jints, JNI_ABORT);\n    jenv->DeleteLocalRef(inner);\n  }\n\n  jenv->DeleteLocalRef(javaArrOuter);\n\n  return matrix;\n}\n\nint LudiiStateWrapper::NumLegalMoves() const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const int num_legal_moves = (int)jenv->CallIntMethod(\n      ludiiStateWrapperJavaObject, numLegalMovesMethodID);\n  JNIUtils::CheckJniException(jenv);\n  return num_legal_moves;\n}\n\nvoid LudiiStateWrapper::ApplyNthMove(const int n) const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jenv->CallVoidMethod(ludiiStateWrapperJavaObject, applyNthMoveMethodID, n);\n  JNIUtils::CheckJniException(jenv);\n}\n\ndouble LudiiStateWrapper::Returns(const int player) const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const double returns = (double)jenv->CallDoubleMethod(\n      ludiiStateWrapperJavaObject, returnsMethodID, player);\n  JNIUtils::CheckJniException(jenv);\n  return returns;\n}\n\nbool LudiiStateWrapper::IsTerminal() const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const bool is_terminal = (bool)jenv->CallBooleanMethod(\n      ludiiStateWrapperJavaObject, isTerminalMethodID);\n  JNIUtils::CheckJniException(jenv);\n  return is_terminal;\n}\n\nint LudiiStateWrapper::CurrentPlayer() const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const int current_player = (int)jenv->CallIntMethod(\n      ludiiStateWrapperJavaObject, currentPlayerMethodID);\n  JNIUtils::CheckJniException(jenv);\n  return current_player;\n}\n\nvoid LudiiStateWrapper::Reset() const {\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  jenv->CallVoidMethod(ludiiStateWrapperJavaObject, resetMethodID);\n  JNIUtils::CheckJniException(jenv);\n}\n\nbool LudiiStateWrapper::isOnePlayerGame() const {\n  return (ludiiGameWrapper->NumPlayers() == 1);\n}\n\nfloat LudiiStateWrapper::getRandomRolloutReward(int player) const {\n  const int numSimulation = 10;\n  const int rolloutRandomMovesCap =\n      200;  // Use -1 for no cap on num moves in rollout\n  JNIEnv* jenv = JNIUtils::GetEnv();\n  const double avgReward = (double)jenv->CallDoubleMethod(\n      ludiiStateWrapperJavaObject, getRandomRolloutsRewardMethodID, player,\n      numSimulation, rolloutRandomMovesCap);\n  JNIUtils::CheckJniException(jenv);\n  return (float)avgReward;\n}\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/ludii/ludii_state_wrapper.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Dennis Soemers\n// - Affiliation: Maastricht University, DKE, Digital Ludeme Project (Ludii\n// developer)\n// - Github: https://github.com/DennisSoemers/\n// - Email: dennis.soemers@maastrichtuniversity.nl (or d.soemers@gmail.com)\n\n#pragma once\n\n#include <algorithm>\n#include <array>\n#include <jni.h>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"../../core/state.h\"\n#include \"ludii_game_wrapper.h\"\n\nnamespace Ludii {\n\n// class Action : public ::_Action {\n// public:\n//  Action(int i, int j, int k);\n//};\n\n/**\n * C++ wrapper around Ludii's \"LudiiStateWrapper\" class.\n *\n * This class takes care of calling all the required Java methods from Ludii\n * states.\n */\nclass LudiiStateWrapper : public core::State {\n\n public:\n  void Initialize();\n  std::unique_ptr<core::State> clone_() const;\n  void ApplyAction(const _Action& action);\n  void DoGoodAction();\n\n public:\n  /**\n   * Constructor; calls the LudiiStateWrapper Java constructor\n   */\n  LudiiStateWrapper(int seed, LudiiGameWrapper&& inLudiiGameWrapper);\n\n  /**\n   * Copy constructor; calls the Java copy constructor for LudiiStateWrapper\n   *\n   * @param other The LudiiStateWrapper object of which we wish to create a deep\n   * copy\n   */\n  LudiiStateWrapper(const LudiiStateWrapper& other);\n\n  /**\n   * Destructor\n   */\n  ~LudiiStateWrapper();\n\n  /**\n   * @return 2D int array; for every legal move, we have an array of\n   * \tlength 3 containing [channel, x, y]\n   */\n  std::vector<std::array<int, 3>> LegalMovesTensors() const;\n\n  /**\n   * @return Number of legal moves in current state\n   */\n  int NumLegalMoves() const;\n\n  /**\n   * Applies the n'th legal move in current game state\n   */\n  void ApplyNthMove(const int n) const;\n\n  /**\n   * NOTE: The Java method that we call for this actually first computes\n   * the array of scores for all players, and then only returns the score\n   * for the queried player. If we often want to do this inside a loop\n   * through all players, it'd be more efficient to call a Java method\n   * that instantly returns the full array once.\n   *\n   * @return Score in [-1.0, 1.0] for given player index (starting at 0).\n   * Will always return 0.0 for non-terminal game states.\n   */\n  double Returns(const int player) const;\n\n  /**\n   * @return True if and only if the current game state is terminal; false\n   * otherwise.\n   */\n  bool IsTerminal() const;\n\n  /**\n   * @return The current player to move (0 for first, 1 for second, etc.)\n   */\n  int CurrentPlayer() const;\n\n  /**\n   * Calls the Java reset() method on the Java game state object\n   */\n  void Reset() const;\n\n  virtual bool isOnePlayerGame() const override;\n\n  virtual float getRandomRolloutReward(int player) const override;\n\n  LudiiStateWrapper& operator=(LudiiStateWrapper const& other);\n\n private:\n  void findFeatures();\n  void findActions();\n\n  // We don't want to be accidentally coyping objects of this class\n  // (without having implemented our own, correct copy constructor or assignment\n  // operator)\n  // LudiiStateWrapper& operator=(LudiiStateWrapper const&) = delete;\n\n  /** Pointer to our Game wrapper */\n  std::shared_ptr<LudiiGameWrapper> ludiiGameWrapper;\n\n  /** Our object of Java's LudiiStateWrapper type */\n  jobject ludiiStateWrapperJavaObject;\n\n  /** Method ID for the legalMovesTensors() method in Java */\n  jmethodID legalMovesTensorsMethodID;\n\n  /** Method ID for the numLegalMoves() method in Java */\n  jmethodID numLegalMovesMethodID;\n\n  /** Method ID for the applyNthMove() method in Java */\n  jmethodID applyNthMoveMethodID;\n\n  /** Method ID for the returns() method in Java */\n  jmethodID returnsMethodID;\n\n  /** Method ID for the isTerminal() method in Java */\n  jmethodID isTerminalMethodID;\n\n  /** Method ID for the toTensorFlat() method in Java */\n  jmethodID toTensorFlatMethodID;\n\n  /** Method ID for the currentPlayer() method in Java */\n  jmethodID currentPlayerMethodID;\n\n  /** Method ID for the reset() method in Java */\n  jmethodID resetMethodID;\n\n  /** Method ID for the copyFrom() method in Java */\n  jmethodID copyFromMethodID;\n\n  /** Method ID for the getRandomRolloutsReward() method in Java */\n  jmethodID getRandomRolloutsRewardMethodID;\n};\n\n}  // namespace Ludii\n"
  },
  {
    "path": "src/games/mastermind_state.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"mastermind_state.h\"\n#include <algorithm>\n"
  },
  {
    "path": "src/games/mastermind_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <algorithm>\n#include <chrono>\n#include <functional>\n#include <random>\n\n#include \"../core/state.h\"\n// #include <boost/stacktrace.hpp>\n\n/*****************************\n  Mastermind with black pegs only.\n  Each time steps decides one color.\n  HORIZON is the number of rows that player 1 can fill for trying to find the\n  target.\n  ARITY is the number of colors.\n  SIZE is the number of slots per row (and, equivalently, the number of colors\n  to\n  guess).\n  The number of time steps is HORIZON * SIZE because SIZE is the number of\n  colors\n  to decide\n  at each row.\n                        */\nnamespace Mastermind {\n\ntemplate <int SIZE, int HORIZON, int ARITY> class State : public core::State {\n private:\n  // just for enabling verbosity\n  bool mmverbose;\n\n  // representation\n  int _board[HORIZON][SIZE];\n  int _results[HORIZON];\n  int _currentAction[SIZE];\n  int real[SIZE];\n  int _timeStep;\n\n  // helper functions\n  int mmhamming(int real[], int action[]);\n  void rejection(int real[], int board[][SIZE], int results[], int time);\n  bool unicity(int real[], int board[][SIZE], int results[], int time);\n\n  // representation of the state\n  virtual std::string stateDescription() const override {\n    std::string res;\n    res += \"time\";\n    res += std::to_string(_timeStep);\n    res += \"  corresponding to line \";\n    res += std::to_string(_timeStep / SIZE);\n    res += \" and slot \";\n    res += std::to_string(_timeStep % SIZE);\n    res += \"\\n\";\n    for (int i = 0; i < 1 + (_timeStep / SIZE); i++) {\n      for (int j = 0; j < SIZE; j++) {\n        if (i * SIZE + j < _timeStep) {\n          res += \" \";\n          res += std::to_string(_board[i][j]);\n        }\n      }\n      if (i * SIZE + SIZE - 1 < _timeStep) {\n        res += \" ==> score \";\n        res += std::to_string(_results[i]);\n        res += \"     win:\";\n        res += std::to_string(_status == GameStatus::player0Win);\n        res += \"(\";\n        res += std::to_string(GetLegalActions().size());\n        res += \")\";\n        res += \"\\n\";\n      }\n    }\n    return res;\n  }\n\n public:\n  virtual bool isOnePlayerGame() const override {\n    return true;\n  }\n  State(int seed);\n  void findActions();\n  void Initialize() override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction() override;\n  std::unique_ptr<core::State> clone_() const override;\n};\n\n// Hamming distance.\ntemplate <int SIZE, int HORIZON, int ARITY>\nint State<SIZE, HORIZON, ARITY>::mmhamming(int real[], int action[]) {\n  int result = SIZE;\n  for (int i = 0; i < SIZE; i++) {\n    if (real[i] != action[i]) {\n      result--;\n    }\n  }\n  return result;\n}\n\n// Rejection sampling -- we need better than that TODO(oteytaud).\ntemplate <int SIZE, int HORIZON, int ARITY>\nvoid State<SIZE, HORIZON, ARITY>::rejection(int real[],\n                                            int board[][SIZE],\n                                            int results[],\n                                            int time) {\n  assert(time < HORIZON);\n  std::uniform_int_distribution<int> distribution(0, ARITY - 1);\n  if (mmverbose) {\n    std::cerr << \"rejection\" << std::endl;\n  }\n  auto dice = std::bind(distribution, _rng);\n  bool found = false;\n  while (!found) {\n    if (mmverbose) {\n      std::cerr << \" let us try...\" << std::endl;\n    }\n    for (int i = 0; i < SIZE; i++) {\n      real[i] = dice();\n    }\n    found = true;\n    for (int j = 0; j < time; j++) {\n      int localdistance = mmhamming(real, board[j]);\n      if (localdistance != results[j]) {\n        if (mmverbose) {\n          std::cerr << \"fail at time \" << j << std::endl;\n        }\n        found = false;\n        break;\n      }\n    }\n    // if found, then it's ok we can proceed\n  }\n  if (mmverbose) {\n    for (int i = 0; i < SIZE; i++)\n      std::cout << real[i];\n    std::cout << \"\\n\";\n  }\n}  // end of rejection\n\n// Checking if the solution is unique\ntemplate <int SIZE, int HORIZON, int ARITY>\nbool State<SIZE, HORIZON, ARITY>::unicity(int real[],\n                                          int board[][SIZE],\n                                          int results[],\n                                          int time) {\n  int num_found = 0;\n  assert(time < HORIZON);\n  std::uniform_int_distribution<int> distribution(0, ARITY - 1);\n  if (mmverbose) {\n    std::cout << \"unicity == == == == == == == == == == == = \" << std::endl;\n  }\n  bool found = false;\n  int index = 0;\n  int maxIndex = 1;\n  for (int i = 0; i < SIZE; i++)\n    maxIndex *= ARITY;\n  while (index < maxIndex) {\n    if (mmverbose) {\n      std::cout << \" let us try ... \";\n    }\n    int tempoIndex = index;\n    for (int i = 0; i < SIZE; i++) {\n      real[i] = tempoIndex % ARITY;\n      if (mmverbose) {\n        std::cout << real[i];\n      }\n      tempoIndex /= ARITY;\n    }\n    found = true;\n    if (mmverbose) {\n      std::cout << std::endl;\n    }\n    for (int j = 0; j < time; j++) {\n      int localdistance = mmhamming(real, board[j]);\n      if (mmverbose) {\n        std::cout << \"distance \" << localdistance << \"/\" << results[j]\n                  << std::endl;\n      }\n      if (localdistance != results[j]) {\n        if (mmverbose) {\n          std::cout << \"fail at time \" << j << std::endl;\n        }\n        found = false;\n      }\n    }\n    if (found) {\n      num_found++;\n      if (mmverbose) {\n        std::cout << \"success\";\n      }\n      if (num_found > 1) {\n        if (mmverbose) {\n          std::cout << \"several sols \" << std::endl;\n        }\n        return false;\n      }\n    }\n    index++;\n  }\n  if (num_found <= 0) {\n    std::cout << \"State with no solution : \" << stateDescription() << std::endl;\n  }\n  assert(num_found == 1);\n  return true;\n}\n// end of unicity\n}  // namespace Mastermind\n\n///////////////////////////////////////////////////////////////////////////////\n// Mastermind::State\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nMastermind::State<SIZE, HORIZON, ARITY>::State(int seed)\n    : core::State(seed) {\n  mmverbose = false;\n  if (mmverbose) {\n    std::cerr << \" cretion\" << std::endl;\n  }\n  long s = std::chrono::system_clock::now().time_since_epoch().count();\n  _rng.seed(s);\n  Initialize();\n  if (mmverbose) {\n    std::cerr << \" creation done\" << std::endl;\n  }\n}\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nvoid Mastermind::State<SIZE, HORIZON, ARITY>::findActions() {\n  clearActions();\n  int time = _timeStep / SIZE;\n  int slot = _timeStep % SIZE;\n  if (_status == GameStatus::player0Turn) {\n    assert(time < HORIZON);\n    for (int i = 0; i < ARITY; i++) {\n      addAction(i, time, slot);\n    }\n    if (mmverbose) {\n      std::cerr << \" fa done\" << std::endl;\n    }\n  }\n}\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nvoid Mastermind::State<SIZE, HORIZON, ARITY>::Initialize() {\n  if (mmverbose) {\n    std::cerr << \" initialize\" << std::endl;\n  }\n  _timeStep = 0;\n  memset(_board, 0, sizeof(_board));\n  memset(_currentAction, 0, sizeof(_currentAction));\n  for (int i = 0; i < HORIZON; i++)\n    _results[i] = -1;\n  _hash = 0;\n  _status = GameStatus::player0Turn;\n\n  // features\n  _featSize = {ARITY + 1, HORIZON, SIZE};\n  _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n  for (int i = 0; i < (int)_features.size(); i++)\n    _features[i] = 0.;\n  _actionSize = {(ARITY + 1), (HORIZON), (SIZE)};\n  if (mmverbose) {\n    std::cerr << \" init --> findactions \" << std::endl;\n  }\n  findActions();\n  if (mmverbose) {\n    std::cerr << \" init --> fff \" << std::endl;\n  }\n  fillFullFeatures();\n  _stochastic = false;\n  if (mmverbose) {\n    std::cerr << \" init ok \" << std::endl;\n  }\n}\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nvoid Mastermind::State<SIZE, HORIZON, ARITY>::ApplyAction(\n    const _Action& action) {\n  assert(_legalActions.size() > 0);\n  assert(forcedDice <\n         0);  // mastermind does not have a human mode for the moment.\n  if (mmverbose) {\n    std::cout << \"before:\\n\" << stateDescription();\n  }\n  if (_legalActions.size() == 0) {\n    // std::cout << boost::stacktrace::stacktrace();\n    std::cout << \"no legal action \" << stateDescription() << std::endl;\n    std::cout << \"but playing \" << actionDescription(action) << std::endl;\n    std::cout << \"wonstatus=\" << (_status == GameStatus::player0Win)\n              << std::endl;\n    assert(_legalActions.size() > 0);\n  }\n  assert(_status == GameStatus::player0Turn);\n  int time = _timeStep / SIZE;\n  int slot = _timeStep % SIZE;\n\n  if (mmverbose) {\n    std::cerr << \" timestep = \" << _timeStep << std::endl;\n    std::cerr << \" slot=\" << slot << \"/\" << SIZE << std::endl;\n    std::cerr << \" time=\" << time << \"/\" << HORIZON << std::endl;\n  }\n  assert(time == action.GetY());\n  assert(slot == action.GetZ());\n  _currentAction[slot] = action.GetX();\n  assert(time < HORIZON);\n  assert(slot < SIZE);\n  _board[time][slot] = action.GetX();\n  assert(action.GetX() * HORIZON * SIZE + time * SIZE + slot <\n         (int)_features.size());\n  _features[action.GetX() * HORIZON * SIZE + time * SIZE + slot] = 1;\n  if (slot == SIZE - 1) {\n    rejection(real, _board, _results, time);\n    if (mmverbose) {\n      std::cerr << \" now computing distance\" << std::endl;\n    }\n    unsigned int distance = mmhamming(real, _currentAction);\n    if (mmverbose) {\n      std::cerr << \"ARITY=\" << ARITY << std::endl\n                << \"HORIZON=\" << HORIZON << std::endl\n                << \"SIZE=\" << SIZE << std::endl\n                << \"     AHS=\" << ARITY * HORIZON * SIZE << std::endl\n                << \"time=\" << time << std::endl\n                << \"     timeSIZE=\" << time * SIZE << std::endl\n                << \"AHS+timeSIZE+SIZE=\"\n                << ARITY * HORIZON * SIZE + time * SIZE + SIZE << std::endl\n                << \"versus \" << _features.size() << std::endl;\n    }\n    {\n      _results[time] = distance;\n      assert(ARITY * HORIZON * SIZE + time * SIZE + SIZE - 1 <\n             (int)_features.size());\n      for (int i = 0; i < SIZE; i++) {\n        _features[ARITY * HORIZON * SIZE + time * SIZE + i] =\n            float(distance) / float(ARITY);\n      }\n    }\n    _hash = distance;\n    if (distance == SIZE) {\n      if (mmverbose) {\n        std::cout << \" won by found at time \" << time << std::endl;\n      }\n      _status = GameStatus::player0Win;\n    } else if ((time < HORIZON - 1) &&\n               (unicity(real, _board, _results, time))) {\n      if (mmverbose) {\n        std::cout << \" won by unicity of solution and time=\" << time << \"<\"\n                  << HORIZON - 1 << std::endl;\n        std::cerr << \" won!\" << std::endl;\n      }\n      _status = GameStatus::player0Win;\n    } else if (time == HORIZON - 1) {\n      if (mmverbose) {\n        std::cerr << \" lost!\" << std::endl;\n      }\n      _status = GameStatus::player1Win;\n    }\n  }\n\n  // TODO(oteytaud): cartesian product of actions would be better!\n  _timeStep++;\n  // if slot is SIZE-2 then the next step corresponds\n  // to the last slot and therefore the next state is stochastic.\n  if (slot == SIZE - 2) {\n    if (mmverbose) {\n      std::cerr << \" go on\" << std::endl;\n    }\n    _stochastic = true;\n  } else {\n    _stochastic = false;\n  }\n\n  // TODO(oteytaud): cartesian product of actions would be better!\n  if (mmverbose) {\n    std::cerr << \" findactions \" << std::endl;\n  }\n  findActions();\n  if (mmverbose) {\n    std::cerr << \" fff \" << std::endl;\n  }\n  fillFullFeatures();\n  if (mmverbose) {\n    std::cerr << \"AA done\" << std::endl;\n  }\n}\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nvoid Mastermind::State<SIZE, HORIZON, ARITY>::DoGoodAction() {\n  if (mmverbose) {\n    std::cerr << \" do random action\" << std::endl;\n  }\n  DoRandomAction();\n}\n\ntemplate <int SIZE, int HORIZON, int ARITY>\nstd::unique_ptr<core::State> Mastermind::State<SIZE, HORIZON, ARITY>::clone_()\n    const {\n  return std::make_unique<Mastermind::State<SIZE, HORIZON, ARITY>>(*this);\n}\n"
  },
  {
    "path": "src/games/minesweeper.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <ctime>\n#include <iostream>\n\n#include \"minesweeper_common.h\"\n\nnamespace Minesweeper {\n\nstatic std::ostream& timestamp(std::ostream& os) {\n  std::time_t result = std::time(nullptr);\n  char buf[100];\n  if (std::strftime(buf, sizeof(buf), \"%c\", std::localtime(&result))) {\n    os << buf;\n  }\n  return os;\n}  // timestamp\n\nstd::ostream& debug(std::ostream& os) {\n  timestamp(os) << \" [DEBUG] [Minesweeper] \";\n  return os;\n}  // debug\n\nstd::string sparseMaskToString(const SparseMask& mask) {\n  std::ostringstream oss;\n  oss << '[';\n  for (const auto& v : mask) {\n    oss << '(' << v.row() << ',' << v.col() << \"), \";\n  }\n  oss << ']';\n  return oss.str();\n}  // sparseMaskToString\n\n}  // namespace Minesweeper\n"
  },
  {
    "path": "src/games/minesweeper_common.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <array>\n#include <cassert>\n#include <iostream>\n#include <sstream>\n#include <string>\n#include <vector>\n\n#define MINESWEEPER_DEBUG_COMMA ,\n// Debug output disabled:\n#define MINESWEEPER_DEBUG(ARG)\n// Debug output enabled:\n//#define MINESWEEPER_DEBUG(ARG) ARG\n#define UNUSED(ARG)\n\nnamespace Minesweeper {\n\ntemplate <typename Offset, size_t STRIDE, size_t NEIGHBORS>\nstruct NeighborOffsets {};\n\ntemplate <typename Offset, size_t STRIDE>\nstruct NeighborOffsets<Offset, STRIDE, 8> {\n  static constexpr std::array<Offset, 8> dindices = {\n      -static_cast<Offset>(STRIDE) - 1,\n      -static_cast<Offset>(STRIDE),\n      -static_cast<Offset>(STRIDE) + 1,\n      -1,\n      1,\n      static_cast<Offset>(STRIDE) - 1,\n      static_cast<Offset>(STRIDE),\n      static_cast<Offset>(STRIDE) + 1};\n  static constexpr std::array<Offset, 8> drow = {-1, -1, -1, 0, 0, 1, 1, 1};\n  static constexpr std::array<Offset, 8> dcol = {-1, 0, 1, -1, 1, -1, 0, 1};\n};  // class NeighborOffsets<Offset, STRIDE, 8>\n\nclass BoardPosition {\n public:\n  constexpr BoardPosition(int row, int col) noexcept\n      : _r(row)\n      , _c(col) {\n  }\n  constexpr int row() const noexcept {\n    return _r;\n  }\n  constexpr int col() const noexcept {\n    return _c;\n  }\n\n private:\n  int _r;\n  int _c;\n};  // class BoardPosition\n\ntemplate <size_t STRIDE> int rowColToIdx(int row, int col) {\n  return row * static_cast<int>(STRIDE) + col;\n}\n\ntemplate <size_t STRIDE> void idxToRowCol(int idx, int& row, int& col) {\n  row = idx / STRIDE;\n  col = idx % STRIDE;\n}\n\ntemplate <size_t WIDTH, size_t HEIGHT>\nconstexpr bool isInBoard(int row, int col) {\n  return (row >= 0) && (row < static_cast<int>(HEIGHT)) && (col >= 0) &&\n         (col < static_cast<int>(WIDTH));\n}\n\ntemplate <typename T, size_t STRIDE>\ntypename T::const_reference arrGet(const T& arr, int row, int col) {\n  return arr[rowColToIdx<STRIDE>(row, col)];\n}\n\ntemplate <typename T, size_t STRIDE>\ntypename T::reference arrGet(T& arr, int row, int col) {\n  return arr[rowColToIdx<STRIDE>(row, col)];\n}\n\nusing SparseMask = std::vector<BoardPosition>;\n\nstatic constexpr int UNKNOWN = -1;\nstatic constexpr int BOOM = -2;\nstatic constexpr size_t NUM_NEIGHBORS = 8;\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES> struct GameDefs {\n  using Board = std::array<int, HEIGHT * WIDTH>;\n  using BoardProbas = std::array<float, HEIGHT * WIDTH>;\n  using BoardMask = std::array<bool, HEIGHT * WIDTH>;\n  using Mines = std::array<int, MINES>;\n  using Neighbors = std::array<int, NUM_NEIGHBORS>;\n\n  static std::string boardMaskToString(const BoardMask& mask) {\n    std::ostringstream oss;\n    int k = 0;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        oss << (mask[k++] ? 1 : 0);\n      }\n      oss << std::endl;\n    }\n    return oss.str();\n  }  // boardMaskToString\n\n  static std::string boardToString(const Board& board) {\n    using BoardChars = std::array<char, WIDTH * HEIGHT>;\n    BoardChars boardChars;\n    int v;\n    char c;\n    int k = 0;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        v = board[k];\n        switch (v) {\n        case UNKNOWN:\n          c = '?';\n          break;\n        case BOOM:\n          c = 'X';\n          break;\n        default:\n          assert(v >= 0);\n          c = '0' + v;\n        }\n        boardChars[k] = c;\n        ++k;\n      }\n    }\n    std::ostringstream oss;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        oss << arrGet<BoardChars, WIDTH>(boardChars, row, col);\n      }\n      oss << std::endl;\n    }\n    return oss.str();\n  }  // boardToString\n\n  static std::string minesToString(const Mines& mines) {\n    std::ostringstream oss;\n    for (size_t i = 0; i < MINES; ++i) {\n      oss << mines[i] << \" \";\n    }\n    return oss.str();\n  }  // minesToString\n\n  template <typename Predicate>\n  static std::vector<BoardPosition> getNeighbors(const Board& board,\n                                                 int row,\n                                                 int col,\n                                                 Predicate predicate) {\n    std::vector<BoardPosition> result;\n    result.reserve(NUM_NEIGHBORS);\n    int row_i, col_i;\n    for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n      row_i = row + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::drow[i];\n      col_i = col + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::dcol[i];\n      if (isInBoard<WIDTH, HEIGHT>(row_i, col_i) &&\n          predicate(arrGet<Board, WIDTH>(board, row_i, col_i), row_i, col_i)) {\n        result.emplace_back(row_i, col_i);\n      }\n    }\n    return result;\n  }  // getNeighbors\n\n  template <typename Predicate, typename Mask>\n  static void markNeighbors(\n      const Board& board, int row, int col, Mask& mask, Predicate predicate) {\n    int row_i, col_i;\n    for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n      row_i = row + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::drow[i];\n      col_i = col + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::dcol[i];\n      if (isInBoard<WIDTH, HEIGHT>(row_i, col_i) &&\n          predicate(arrGet<Board, WIDTH>(board, row_i, col_i), row_i, col_i)) {\n        mask.set(row_i, col_i);\n      }\n    }\n  }  // markNeighbors\n\n  template <typename Predicate>\n  static size_t countNeighbors(const Board& board,\n                               int row,\n                               int col,\n                               Predicate predicate) {\n    size_t count = 0;\n    int row_i, col_i;\n    for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n      row_i = row + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::drow[i];\n      col_i = col + NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::dcol[i];\n      if (isInBoard<WIDTH, HEIGHT>(row_i, col_i) &&\n          predicate(arrGet<Board, WIDTH>(board, row_i, col_i), row_i, col_i)) {\n        ++count;\n      }\n    }\n    return count;\n  }  // countNeighbors\n};\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES> class Mask {\n private:\n  using BoardMask = typename GameDefs<WIDTH, HEIGHT, MINES>::BoardMask;\n\n public:\n  explicit Mask(size_t sparseSize = MINES) {\n    _maskSparse.reserve(sparseSize);\n  }\n\n  void zero() {\n    memset(_maskDense.data(), 0,\n           WIDTH * HEIGHT * sizeof(typename BoardMask::value_type));\n    _maskSparse.clear();\n  }\n\n  void set(int row, int col) {\n    if (!_maskDense[rowColToIdx<WIDTH>(row, col)]) {\n      _maskDense[rowColToIdx<WIDTH>(row, col)] = 1;\n      _maskSparse.emplace_back(row, col);\n    }\n  }\n\n  typename BoardMask::value_type get(int row, int col) const {\n    return _maskDense[rowColToIdx<WIDTH>(row, col)];\n  }\n\n  const BoardMask& dense() const {\n    return _maskDense;\n  }\n\n  const std::vector<BoardPosition>& sparse() const {\n    return _maskSparse;\n  }\n\n private:\n  BoardMask _maskDense;\n  SparseMask _maskSparse;\n};  // class Mask\n\nstd::ostream& debug(std::ostream& os);\n\nstd::string sparseMaskToString(const SparseMask& mask);\n\n}  // namespace Minesweeper\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/CMakeLists.txt",
    "content": "CMAKE_MINIMUM_REQUIRED(VERSION 3.3)\nproject(csp_vkms)\n\nset(CMAKE_CXX_STANDARD 17)\n\nif (${Torch_FOUND})\n  include_directories(${TORCH_INCLUDE_DIRS})\nelse()\n  find_package(Torch REQUIRED)\n  include_directories(${TORCH_INCLUDE_DIRS})\nendif()\n\nfind_package( PythonInterp 3.7 REQUIRED )\nfind_package( PythonLibs 3.7 REQUIRED )\ninclude_directories( ${PYTHON_INCLUDE_DIRS} )\n\nset(SRC_DIR ../..)\n\nadd_executable(benchmark_csp_vkms\n  ${SRC_DIR}/core/state.cc\n  ${SRC_DIR}/games/minesweeper.cc\n  csp_vkms.cc\n)\ntarget_include_directories(benchmark_csp_vkms PUBLIC ${SRC_DIR})\ntarget_link_libraries(benchmark_csp_vkms\n  ${CMAKE_THREAD_LIBS_INIT}\n  ${TORCH_LIBRARIES}\n  fmt\n)\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/ConnectedComponent.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../minesweeper_common.h\"\n\nnamespace csp {\nnamespace vkms {\n\nstruct ConnectedComponent {\n  Minesweeper::SparseMask _constraints;\n  Minesweeper::SparseMask _variables;\n};  // struct ConnectedComponent\n\n}  // namespace vkms\n}  // namespace csp\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/CspStrategy.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"SolutionSetSampler.h\"\n#include <chrono>\n#include <thread>\n\nnamespace csp {\nnamespace vkms {\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES> class CspStrategy {\n\n  using _GameDefs = Minesweeper::GameDefs<WIDTH, HEIGHT, MINES>;\n  using Board = typename _GameDefs::Board;\n  using BoardMask = typename _GameDefs::BoardMask;\n  using Mines = typename _GameDefs::Mines;\n  using _Mask = Minesweeper::Mask<WIDTH, HEIGHT, MINES>;\n  using _SolutionSet = SolutionSet<WIDTH, HEIGHT, MINES>;\n  using _SolutionSetSampler = SolutionSetSampler<WIDTH, HEIGHT, MINES>;\n\n public:\n  using MineProbas = std::array<float, HEIGHT * WIDTH>;\n\n  CspStrategy()\n      : _minesMask(MINES)\n      , _notMinesMask(WIDTH * HEIGHT)\n      , _activeConstraints(WIDTH * HEIGHT)\n      , _unconstrainedVariables(WIDTH * HEIGHT)\n      , _processed(WIDTH * HEIGHT) {\n  }  // CspStrategy\n\n  template <typename RngEngine>\n  void sampleMines(Mines& mines, const Board& board, RngEngine& rng) {\n    initializeMinesMasks(board);\n    initializeActiveConstraints(board);\n    initializeUnconstrainedVariables(board);\n    MINESWEEPER_DEBUG(dumpMasks(std::cout));\n    std::vector<ConnectedComponent> connectedActiveConstraints =\n        connectedConstraints(board);\n    MINESWEEPER_DEBUG(dumpConstraints(std::cout, connectedActiveConstraints));\n    std::vector<_SolutionSet> solutionSets;\n    solutionSets.reserve(connectedActiveConstraints.size());\n    for (const ConnectedComponent& component : connectedActiveConstraints) {\n      solutionSets.emplace_back(component, board, _minesMask);\n    }\n    // sampleFromSolutionSets(solutionSets, mines, rng);\n    _SolutionSetSampler sampler(\n        solutionSets, _unconstrainedVariables, _minesMask);\n    // computeMineProbabilities(solutionSets, sampler);\n    sampler.sampleMines(mines, rng);\n    std::sort(mines.begin(), mines.end());\n  }  // sampleMines\n\n  std::vector<int> locateForSureMines(const Board& board) {\n    initializeMinesMasks(board);\n    const auto& minePositions = _minesMask.sparse();\n    std::vector<int> mineIndices;\n    mineIndices.reserve(minePositions.size());\n    for (const auto& minePosition : minePositions) {\n      mineIndices.push_back(\n          rowColToIdx<WIDTH>(minePosition.row(), minePosition.col()));\n    }\n    std::sort(mineIndices.begin(), mineIndices.end());\n    return mineIndices;\n  }  // getForSureMines\n\n  void computeMineProbabilities(const Board& board) {\n    initializeMinesMasks(board);\n    initializeActiveConstraints(board);\n    initializeUnconstrainedVariables(board);\n    std::vector<ConnectedComponent> connectedActiveConstraints =\n        connectedConstraints(board);\n    std::vector<_SolutionSet> solutionSets;\n    solutionSets.reserve(connectedActiveConstraints.size());\n    for (const ConnectedComponent& component : connectedActiveConstraints) {\n      solutionSets.emplace_back(component, board, _minesMask);\n    }\n    _SolutionSetSampler sampler(\n        solutionSets, _unconstrainedVariables, _minesMask);\n    computeMineProbabilities(solutionSets, sampler);\n  }  // computeMineProbabilities\n\n  template <typename RngEngine>\n  void computeMineProbabilitiesAndSampleMines(Mines& mines,\n                                              const Board& board,\n                                              RngEngine& rng) {\n    initializeMinesMasks(board);\n    initializeActiveConstraints(board);\n    initializeUnconstrainedVariables(board);\n    std::vector<ConnectedComponent> connectedActiveConstraints =\n        connectedConstraints(board);\n    std::vector<_SolutionSet> solutionSets;\n    solutionSets.reserve(connectedActiveConstraints.size());\n    for (const ConnectedComponent& component : connectedActiveConstraints) {\n      solutionSets.emplace_back(component, board, _minesMask);\n    }\n    _SolutionSetSampler sampler(\n        solutionSets, _unconstrainedVariables, _minesMask);\n    computeMineProbabilities(solutionSets, sampler);\n    sampler.sampleMines(mines, rng);\n    std::sort(mines.begin(), mines.end());\n  }  // computeMineProbabilities\n\n  const MineProbas& getMineProbabilities() const {\n    return _mineProbas;\n  }  // getMineProbabilities\n\n private:\n  void computeMineProbabilities(const std::vector<_SolutionSet>& solutionSets,\n                                const _SolutionSetSampler& solutionSetSampler) {\n    memset(_mineProbas.data(), 0,\n           WIDTH * HEIGHT * sizeof(typename MineProbas::value_type));\n    // mines are 100%\n    for (const auto& pos : _minesMask.sparse()) {\n      arrGet<MineProbas, WIDTH>(_mineProbas, pos.row(), pos.col()) = 1.0;\n    }\n    // not mines are 0%\n    for (const auto& pos : _notMinesMask.sparse()) {\n      arrGet<MineProbas, WIDTH>(_mineProbas, pos.row(), pos.col()) = 0.0;\n    }\n    auto countsWithProbas = solutionSetSampler.countsWithProbabilities();\n    for (const auto& countsWithProba : countsWithProbas) {\n      const auto& counts = countsWithProba.first;\n      auto proba = countsWithProba.second;\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Counts: \");\n      MINESWEEPER_DEBUG(for (auto n : counts) { std::cout << n << \" \"; });\n      MINESWEEPER_DEBUG(std::cout << \", weight=\" << proba << std::endl);\n      assert(counts.size() == solutionSets.size() + 1);\n      for (size_t j = 0; j < solutionSets.size(); ++j) {\n        MINESWEEPER_DEBUG(debug(std::cout)\n                          << \"Solution set \" << j << \": \" << counts[j]\n                          << \" mines\" << std::endl);\n        auto count = counts[j];\n        if (!count) {\n          continue;\n        }\n        const auto& vars = solutionSets[j].getVariables();\n        const auto& varProbas = solutionSets[j].getVarProbas(count);\n        MINESWEEPER_DEBUG(debug(std::cout) << \"Variable probabilities: \");\n        MINESWEEPER_DEBUG(for (auto p : varProbas) { std::cout << p << \" \"; });\n        MINESWEEPER_DEBUG(std::cout << std::endl);\n        assert(vars.size() == varProbas.size());\n        for (size_t i = 0; i < vars.size(); ++i) {\n          arrGet<MineProbas, WIDTH>(_mineProbas, vars[i].row(),\n                                    vars[i].col()) += proba * varProbas[i];\n        }\n      }\n      auto count = counts[solutionSets.size()];\n      if (!count) {\n        continue;\n      }\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Unconstrained solution set: \");\n      MINESWEEPER_DEBUG(std::cout << _unconstrainedVariables.sparse().size());\n      MINESWEEPER_DEBUG(std::cout << \" variables, \" << count << \" mines, \");\n      MINESWEEPER_DEBUG(std::cout << \"proba=\"\n                                  << getUnconstrainedVarProba(count));\n      MINESWEEPER_DEBUG(std::cout << std::endl);\n      for (const auto& pos : _unconstrainedVariables.sparse()) {\n        arrGet<MineProbas, WIDTH>(_mineProbas, pos.row(), pos.col()) +=\n            proba * getUnconstrainedVarProba(count);\n      }\n    }\n  }  // computeMineProbabilities\n\n  float getUnconstrainedVarProba(size_t count) const {\n    assert(count > 0);\n    size_t nUnconstr = _unconstrainedVariables.sparse().size();\n    assert(nUnconstr >= count);\n    if (count == 1) {\n      return 1.0f / nUnconstr;\n    }\n    return static_cast<float>(count) /\n           (static_cast<float>(nUnconstr) * (nUnconstr - count + 1));\n  }  // getUnconstrainedVarProba\n\n  std::ostream& dumpMasks(std::ostream& os) {\n    debug(os) << \"Mines mask:\" << std::endl\n              << _GameDefs::boardMaskToString(_minesMask.dense());\n    debug(os) << \"Not mines mask:\" << std::endl\n              << _GameDefs::boardMaskToString(_notMinesMask.dense());\n    debug(os) << \"Active constraints mask:\" << std::endl\n              << _GameDefs::boardMaskToString(_activeConstraints.dense());\n    debug(os) << \"Unconstrained variables mask:\" << std::endl\n              << _GameDefs::boardMaskToString(_unconstrainedVariables.dense());\n    return os;\n  }  // dumpMasks\n\n  template <typename T>\n  std::ostream& dumpConstraints(std::ostream& os, const T& constraints) {\n    debug(std::cout) << \"Active constraints:\" << std::endl;\n    size_t i = 0;\n    for (const auto& component : constraints) {\n      debug(std::cout) << \"Component \" << i++ << std::endl;\n      debug(std::cout) << \"Constraints: \"\n                       << sparseMaskToString(component._constraints)\n                       << std::endl;\n      debug(std::cout) << \"Variables: \"\n                       << sparseMaskToString(component._variables) << std::endl;\n    };\n    return os;\n  }  // dumpActiveConstraints\n\n  std::vector<ConnectedComponent> connectedConstraints(const Board& board) {\n    std::vector<ConnectedComponent> connectedComponents;\n    const Minesweeper::SparseMask& constraintsSparse =\n        _activeConstraints.sparse();\n    connectedComponents.reserve(constraintsSparse.size());\n    BoardMask processed;\n    memset(processed.data(), 0,\n           WIDTH * HEIGHT * sizeof(typename BoardMask::value_type));\n    for (const Minesweeper::BoardPosition& bp : constraintsSparse) {\n      if (arrGet<BoardMask, WIDTH>(processed, bp.row(), bp.col())) {\n        continue;\n      }\n      ConnectedComponent component =\n          connectedConstraintsFromSeed(board, bp.row(), bp.col(), processed);\n      connectedComponents.push_back(component);\n    }\n    return connectedComponents;\n  }  // connectedComponents\n\n  ConnectedComponent connectedConstraintsFromSeed(const Board& board,\n                                                  int row,\n                                                  int col,\n                                                  BoardMask& processed) {\n    // collect together active constraints connected through variables\n    const Minesweeper::SparseMask& constraintsSparse =\n        _activeConstraints.sparse();\n    ConnectedComponent component;\n    component._constraints.reserve(constraintsSparse.size());\n    component._variables.reserve(constraintsSparse.size());\n    auto select_unprocessed_variables = [this, &processed](\n                                            int v, int row, int col) {\n      return (v == Minesweeper::UNKNOWN) &&\n             !this->_notMinesMask.get(row, col) &&\n             !this->_minesMask.get(row, col) &&\n             !arrGet<BoardMask, WIDTH>(processed, row, col);\n    };\n    auto select_unprocessed_constraints = [this, &processed](\n                                              int UNUSED(v), int row, int col) {\n      return this->_activeConstraints.get(row, col) &&\n             !arrGet<BoardMask, WIDTH>(processed, row, col);\n    };\n    std::vector<Minesweeper::BoardPosition> currentVariables =\n        _GameDefs::getNeighbors(board, row, col, select_unprocessed_variables);\n    // add variables to the queue\n    std::list<Minesweeper::BoardPosition> var_queue(\n        currentVariables.begin(), currentVariables.end());\n    // mark variables as processed\n    for (const Minesweeper::BoardPosition& bp : currentVariables) {\n      arrGet<BoardMask, WIDTH>(processed, bp.row(), bp.col()) = 1;\n    }\n    // add constraint to the connected component\n    component._constraints.emplace_back(row, col);\n    // mark constraint as processed\n    arrGet<BoardMask, WIDTH>(processed, row, col) = 1;\n    // process the queue of variables\n    while (!var_queue.empty()) {\n      Minesweeper::BoardPosition currentVar = var_queue.front();\n      component._variables.push_back(currentVar);\n      var_queue.pop_front();\n      std::vector<Minesweeper::BoardPosition> currentConstraints =\n          _GameDefs::getNeighbors(board, currentVar.row(), currentVar.col(),\n                                  select_unprocessed_constraints);\n      for (const auto& currentConstraint : currentConstraints) {\n        currentVariables = _GameDefs::getNeighbors(\n            board, currentConstraint.row(), currentConstraint.col(),\n            select_unprocessed_variables);\n        // add variables to the queue\n        var_queue.insert(\n            var_queue.end(), currentVariables.begin(), currentVariables.end());\n        // mark variables as processed\n        for (const auto& var : currentVariables) {\n          arrGet<BoardMask, WIDTH>(processed, var.row(), var.col()) = 1;\n        }\n        // add constraint to the connected component\n        component._constraints.emplace_back(\n            currentConstraint.row(), currentConstraint.col());\n        // mark constraint as processed\n        arrGet<BoardMask, WIDTH>(\n            processed, currentConstraint.row(), currentConstraint.col()) = 1;\n      }\n    }\n    return component;\n  }  // connectedComponentFromSeed\n\n  void initializeMinesMasks(const Board& board) {\n    _notMinesMask.zero();\n    _minesMask.zero();\n    _processed.zero();\n    auto select_all = [](int UNUSED(v), int UNUSED(row), int UNUSED(col)) {\n      return true;\n    };\n    auto select_potential_mines = [=](int v, int row, int col) {\n      return (v == Minesweeper::UNKNOWN) && !_notMinesMask.get(row, col) &&\n             !_minesMask.get(row, col);\n    };\n    auto select_not_marked_mines = [=](int UNUSED(v), int row, int col) {\n      return !_minesMask.get(row, col);\n    };\n    auto select_marked_mines = [=](int UNUSED(v), int row, int col) {\n      return _minesMask.get(row, col);\n    };\n    int v;\n    bool notchanged;\n    do {\n      notchanged = true;\n      for (size_t row = 0; row < HEIGHT; ++row) {\n        for (size_t col = 0; col < WIDTH; ++col) {\n          if (_processed.get(row, col)) {\n            continue;\n          }\n          v = arrGet<Board, WIDTH>(board, row, col);\n          switch (v) {\n          case Minesweeper::UNKNOWN:\n            _processed.set(row, col);\n            break;\n          case Minesweeper::BOOM:\n            _minesMask.set(row, col);\n            _processed.set(row, col);\n            notchanged = false;\n            break;\n          case 0:\n            // none of the neighbors are mines\n            _GameDefs::markNeighbors(\n                board, row, col, _notMinesMask, select_all);\n            _notMinesMask.set(row, col);\n            _processed.set(row, col);\n            notchanged = false;\n            break;\n          default:\n            assert(v > 0);\n            size_t num_mines_total = static_cast<size_t>(v);\n            size_t num_mines =\n                _GameDefs::countNeighbors(board, row, col, select_marked_mines);\n            size_t num_potential_mines = _GameDefs::countNeighbors(\n                board, row, col, select_potential_mines);\n            assert(num_mines <= num_mines_total);\n            assert(num_potential_mines + num_mines >= num_mines_total);\n            if (num_mines == num_mines_total) {\n              // the rest are not mines\n              _GameDefs::markNeighbors(\n                  board, row, col, _notMinesMask, select_not_marked_mines);\n              _processed.set(row, col);\n              notchanged = false;\n            } else if (num_potential_mines + num_mines == num_mines_total) {\n              // all candidates are mines\n              _GameDefs::markNeighbors(\n                  board, row, col, _minesMask, select_potential_mines);\n              // the rest are not mines\n              _GameDefs::markNeighbors(\n                  board, row, col, _notMinesMask, select_not_marked_mines);\n              _processed.set(row, col);\n              notchanged = false;\n            }\n            if (!_notMinesMask.get(row, col)) {\n              _notMinesMask.set(row, col);\n              notchanged = false;\n            }\n          }  // switch(v)\n        }    // for col\n      }      // for row\n    } while (!notchanged);\n  }  // initializeMinesMasks\n\n  void initializeActiveConstraints(const Board& board) {\n    _activeConstraints.zero();\n    auto select_potential_mines = [=](int v, int row, int col) {\n      return (v == Minesweeper::UNKNOWN) && !_notMinesMask.get(row, col) &&\n             !_minesMask.get(row, col);\n    };\n    auto select_mines = [=](int UNUSED(v), int row, int col) {\n      return _minesMask.get(row, col);\n    };\n    typename Board::value_type v;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        v = arrGet<Board, WIDTH>(board, row, col);\n        if (v > 0) {\n          size_t num_mines_total = static_cast<size_t>(v);\n          size_t num_mines =\n              _GameDefs::countNeighbors(board, row, col, select_mines);\n          size_t num_potential_mines = _GameDefs::countNeighbors(\n              board, row, col, select_potential_mines);\n          assert(num_mines <= num_mines_total);\n          assert(num_potential_mines + num_mines >= num_mines_total);\n          if (num_potential_mines + num_mines > num_mines_total) {\n            // has unknown neighbors, not all of them are mines\n            _activeConstraints.set(row, col);\n          }\n        }\n      }  // for col\n    }    // for row\n  }      // initializeActiveConstraints\n\n  void initializeUnconstrainedVariables(const Board& board) {\n    _unconstrainedVariables.zero();\n    auto select_active_constraints = [this](int UNUSED(v), int row, int col) {\n      return this->_activeConstraints.get(row, col);\n    };\n    typename Board::value_type v;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        v = arrGet<Board, WIDTH>(board, row, col);\n        if ((v == Minesweeper::UNKNOWN) && !_minesMask.get(row, col) &&\n            !_notMinesMask.get(row, col) &&\n            !_GameDefs::countNeighbors(\n                board, row, col, select_active_constraints)) {\n          _unconstrainedVariables.set(row, col);\n        }\n      }  // for col\n    }    // for row\n  }      // initializeUnconstrainedVariables\n\n  _Mask _minesMask;\n  _Mask _notMinesMask;\n  _Mask _activeConstraints;\n  _Mask _unconstrainedVariables;\n  _Mask _processed;\n  MineProbas _mineProbas;\n\n};  // class CspStrategy\n\n}  // namespace vkms\n}  // namespace csp\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/SolutionSet.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"ConnectedComponent.h\"\n\nnamespace csp {\nnamespace vkms {\n\n#define debug Minesweeper::debug\n#define rowColToIdx Minesweeper::rowColToIdx\n#define arrGet Minesweeper::arrGet\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES> class SolutionSet {\n\n  using _GameDefs = Minesweeper::GameDefs<WIDTH, HEIGHT, MINES>;\n  using Board = typename _GameDefs::Board;\n  using _Mask = Minesweeper::Mask<WIDTH, HEIGHT, MINES>;\n  using IdxSizePair = std::pair<size_t, size_t>;\n  using IdxSizePairs = std::vector<IdxSizePair>;\n  using Indices = std::list<size_t>;\n  using Solution = std::list<size_t>;\n  using Solutions = std::list<Solution>;\n\n public:\n  SolutionSet(const ConnectedComponent& cc,\n              const Board& board,\n              const _Mask& mines)\n      : _variables(cc._variables)\n      , _constraints(cc._constraints)\n      , _varToConstr(_variables.size())\n      , _constrToVar(_constraints.size())\n      , _minNumMines(MINES)\n      , _maxNumMines(0)\n      , _varStates(_variables.size(), -1) {\n    initializeVariableIdxMap();\n    initializeMaps();\n    _constrCounts = computeConstraintNminesLeft(board, mines);\n    _constrStates = _constrCounts;\n    _varOrder = variablesOrderedByNconstraintsDescending();\n    enumerateSolutions(board, mines);\n    MINESWEEPER_DEBUG(dumpSolutionStats(debug(std::cout)) << std::endl);\n  }  // SolutionSet\n\n  size_t minNumMines() const {\n    return _minNumMines;\n  }  // min_num_mines\n\n  size_t maxNumMines() const {\n    return _maxNumMines;\n  }  // max_num_mines\n\n  bool hasSamples(size_t nmines) const {\n    return _solutions.find(nmines) != _solutions.end();\n  }  // hasSamples\n\n  size_t numSamples(size_t nmines) const {\n    return _solutions.at(nmines).size();\n  }  // hasSamples\n\n  const Minesweeper::SparseMask& getVariables() const {\n    return _variables;\n  }  // getVariables\n\n  std::vector<float> getVarProbas(size_t nmines) const {\n    std::vector<float> probas(_variables.size(), 0);\n    if (!hasSamples(nmines)) {\n      return probas;\n    }\n    const auto& solutions = _solutions.at(nmines);\n    for (const auto& solution : solutions) {\n      for (auto mineIdx : solution) {\n        probas[_mineIdxToVarIdx.at(mineIdx)] += 1;\n      }\n    }\n    for (auto& proba : probas) {\n      proba /= solutions.size();\n    }\n    return probas;\n  }  // getVarProbas\n\n  template <typename RngEngine>\n  std::vector<int> sample(size_t nmines, RngEngine& rng) const {\n    std::vector<int> sample;\n    if (!nmines) {\n      return sample;\n    }\n    sample.reserve(nmines);\n    assert(hasSamples(nmines));\n    const auto& solutions = _solutions.at(nmines);\n    std::uniform_int_distribution<size_t> distribution(0, solutions.size() - 1);\n    size_t sampleIdx = distribution(rng);\n    auto solutionsIt = solutions.begin();\n    for (size_t i = 0; i < sampleIdx; ++i, ++solutionsIt)\n      ;\n    const auto& solution = *solutionsIt;\n    assert(solution.size() == nmines);\n    for (auto mineIdx : solution) {\n      sample.push_back(mineIdx);\n    }\n    return sample;\n  }  // sample\n\n private:\n  void initializeVariableIdxMap() {\n    int mineIdx;\n    for (size_t i = 0; i < _variables.size(); ++i) {\n      mineIdx = rowColToIdx<WIDTH>(_variables[i].row(), _variables[i].col());\n      assert(mineIdx >= 0);\n      _mineIdxToVarIdx[static_cast<size_t>(mineIdx)] = i;\n    }\n  }  // initializeVariableIdxMap\n\n  void initializeMaps() {\n    for (size_t i = 0; i < _variables.size(); ++i) {\n      Minesweeper::BoardPosition bp_i = _variables[i];\n      for (size_t j = 0; j < _constraints.size(); ++j) {\n        Minesweeper::BoardPosition bp_j = _constraints[j];\n        if ((bp_i.col() - bp_j.col() >= -1) && (bp_i.col() - bp_j.col() <= 1) &&\n            (bp_i.row() - bp_j.row() >= -1) && (bp_i.row() - bp_j.row() <= 1)) {\n          _varToConstr[i].push_back(j);\n          _constrToVar[j].push_back(i);\n        }\n      }  // for j\n    }    // for i\n  }      // initializeMaps\n\n  std::vector<size_t> computeConstraintNminesLeft(const Board& board,\n                                                  const _Mask& mines) {\n    std::vector<size_t> counts(_constrToVar.size(), 0);\n    auto select_mines = [&](int UNUSED(v), int row, int col) {\n      return mines.get(row, col);\n    };\n    int v, row, col;\n    size_t count, nmines;\n    for (size_t j = 0; j < _constrToVar.size(); ++j) {\n      row = _constraints[j].row();\n      col = _constraints[j].col();\n      v = arrGet<Board, WIDTH>(board, row, col);\n      assert(v > 0);\n      count = static_cast<size_t>(v);\n      nmines = _GameDefs::countNeighbors(board, row, col, select_mines);\n      assert(count > nmines);\n      counts[j] = count - nmines;\n    }\n    return counts;\n  }  // computeConstraintNminesLeft\n\n  std::vector<size_t> variablesOrderedByNconstraintsDescending() {\n    std::vector<size_t> order(_varToConstr.size());\n    for (size_t i = 0; i < order.size(); ++i) {\n      order[i] = i;\n    }\n    std::sort(order.begin(), order.end(), [this](size_t i, size_t j) {\n      return this->_varToConstr[i].size() > this->_varToConstr[j].size();\n    });\n    return order;\n  }  // variablesOrderedByNconstraintsDescending\n\n  void assignMine(size_t i) {\n    _varStates[i] = 1;\n    for (size_t j : _varToConstr[i]) {\n      if (_constrStates[j]) {\n        // IMPORTANT: this conditional constraint state decrease may lead to\n        // inconsistent states, where some cells have more neighbor mines\n        // than declared; consistency check is required after updates\n        _constrStates[j]--;\n      }\n    }\n  }  // assignMine\n\n  void assignNotMine(size_t i) {\n    if (_varStates[i] == 1) {\n      for (size_t j : _varToConstr[i]) {\n        _constrStates[j]++;\n      }\n    }\n    _varStates[i] = 0;\n  }  // assignNotMine\n\n  int nextUnassignedVariable() const {\n    for (size_t i = 0; i < _varOrder.size(); ++i) {\n      if (_varStates[_varOrder[i]] == -1) {\n        return static_cast<int>(_varOrder[i]);\n      }\n    }\n    return -1;\n  }  // nextUnassignedVariable\n\n  bool constraintsSatisfied() const {\n    for (size_t constrState : _constrStates) {\n      if (constrState) {\n        return false;\n      }\n    }\n    return true;\n  }  // constraintsSatisfied\n\n  bool updateStates() {\n    bool changed = false;\n    for (size_t j = 0; j < _constrStates.size(); ++j) {\n      if (!_constrStates[j]) {\n        // no more mines for this constraint, mark all free variables as\n        // not mines\n        for (size_t i : _constrToVar[j]) {\n          if (_varStates[i] == -1) {\n            assignNotMine(i);\n            changed = true;\n          }\n        }\n      } else {\n        // check if the number of free variables is equal to the number of\n        // mines left; assign all free variables to mines, if true\n        size_t freeVars = 0;\n        for (size_t i : _constrToVar[j]) {\n          if (_varStates[i] == -1) {\n            ++freeVars;\n          }\n        }\n        if (freeVars == _constrStates[j]) {\n          for (size_t i : _constrToVar[j]) {\n            if (_varStates[i] == -1) {\n              assignMine(i);\n            }\n          }\n          changed = true;\n        }\n      }\n    }  // for j\n    return changed;\n  }  // updateConstrStates\n\n  void updateFromAssignments() {\n    std::copy(\n        _constrCounts.begin(), _constrCounts.end(), _constrStates.begin());\n    std::vector<int> varStates(_varStates);\n    std::fill(_varStates.begin(), _varStates.end(), -1);\n    for (size_t v : _assignedVars) {\n      assert((varStates[v] == 1) || (varStates[v] == 0));\n      if (varStates[v]) {\n        assignMine(v);\n      } else {\n        assignNotMine(v);\n      }\n    }\n    bool changed;\n    do {\n      changed = updateStates();\n    } while (changed);\n  }  // updateConstraint\n\n  void assignVariable(size_t i) {\n    _assignedVars.push_back(i);\n    _varStates[i] = 1;\n    updateFromAssignments();\n  }  // assignVariable\n\n  void triggerLastAssignment() {\n    if (_assignedVars.empty()) {\n      return;\n    }\n    size_t lastAssigned = _assignedVars.back();\n    while (!_varStates[lastAssigned]) {\n      _assignedVars.pop_back();\n      if (_assignedVars.empty()) {\n        return;\n      }\n      lastAssigned = _assignedVars.back();\n    }\n    assert(_varStates[lastAssigned] == 1);\n    _varStates[lastAssigned] = 0;\n    updateFromAssignments();\n  }  // triggerLastAssignment\n\n  void enumerateSolution() {\n    Solution mines;\n    for (size_t i = 0; i < _varStates.size(); ++i) {\n      if (_varStates[i] == 1) {\n        int idx = rowColToIdx<WIDTH>(_variables[i].row(), _variables[i].col());\n        assert(idx >= 0);\n        mines.push_back(static_cast<size_t>(idx));\n      }\n    }\n    if (_minNumMines > mines.size()) {\n      _minNumMines = mines.size();\n    }\n    if (_maxNumMines < mines.size()) {\n      _maxNumMines = mines.size();\n    }\n    _solutions[mines.size()].push_back(mines);\n  }  // enumerateSolution\n\n  bool checkSolutionAgainstBoard(const Board& board, const _Mask& mines) {\n    for (size_t i = 0; i < _constraints.size(); ++i) {\n      const auto& varIndices = _constrToVar[i];\n      size_t nMines = 0;\n      for (const auto& varIdx : varIndices) {\n        if (_varStates[varIdx] > 0) {\n          ++nMines;\n        }\n      }\n      auto select_mines = [&](int UNUSED(v), int row, int col) {\n        return mines.get(row, col);\n      };\n      size_t nMinesMarked = _GameDefs::countNeighbors(\n          board, _constraints[i].row(), _constraints[i].col(), select_mines);\n      nMines += nMinesMarked;\n      int v = arrGet<Board, WIDTH>(\n          board, _constraints[i].row(), _constraints[i].col());\n      assert(v > 0);\n      if (static_cast<size_t>(v) != nMines) {\n        /*MINESWEEPER_DEBUG(debug(std::cout) << \"Constraint \" << i \\\n            << \" (\" << _constraints[i].row() << \", \" \\\n            << _constraints[i].col() << \")=\" << v << \" has \" \\\n            << nMines << \" mines\" << std::endl);\n        MINESWEEPER_DEBUG(debug(std::cout) << \"Board:\" << std::endl; \\\n            std::cout << _GameDefs::boardToString(board) << std::endl);\n        MINESWEEPER_DEBUG(debug(std::cout) << \"State Board:\" << std::endl; \\\n            std::cout << stateToString(board) << std::endl);\n        dumpDebugInfo(board);*/\n        return false;\n      }\n    }\n    return true;\n  }  // checkSolutionAgainstBoard\n\n  std::string stateToString(const Board& board) {\n    using BoardChars = std::array<char, WIDTH * HEIGHT>;\n    BoardChars boardChars;\n    int v;\n    char c;\n    int k = 0;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        v = board[k];\n        switch (v) {\n        case Minesweeper::UNKNOWN:\n          c = '.';\n          break;\n        case Minesweeper::BOOM:\n          c = 'X';\n          break;\n        default:\n          assert(v >= 0);\n          c = '0' + v;\n        }\n        boardChars[k] = c;\n        ++k;\n      }\n    }\n    for (size_t i = 0; i < _varStates.size(); ++i) {\n      if (_varStates[i] == 1) {\n        arrGet<BoardChars, WIDTH>(\n            boardChars, _variables[i].row(), _variables[i].col()) = '*';\n      } else if (_varStates[i] == 0) {\n        arrGet<BoardChars, WIDTH>(\n            boardChars, _variables[i].row(), _variables[i].col()) = '@';\n      } else {\n        assert(_varStates[i] == -1);\n        arrGet<BoardChars, WIDTH>(\n            boardChars, _variables[i].row(), _variables[i].col()) = '?';\n      }\n    }\n    std::ostringstream oss;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        oss << arrGet<BoardChars, WIDTH>(boardChars, row, col);\n      }\n      oss << std::endl;\n    }\n    return oss.str();\n  }  // stateToString\n\n  void enumerateSolutions(const Board& board, const _Mask& mines) {\n    _solutions.clear();\n    _minNumMines = MINES;\n    _maxNumMines = 0;\n    std::fill(_varStates.begin(), _varStates.end(), -1);\n    std::copy(\n        _constrCounts.begin(), _constrCounts.end(), _constrStates.begin());\n    _assignedVars.clear();\n    int v;\n    do {\n      if (constraintsSatisfied()) {\n        // some constraints might have more mines than specified,\n        // check more thoroughly\n        if (checkSolutionAgainstBoard(board, mines)) {\n          enumerateSolution();\n        }\n        triggerLastAssignment();\n      } else {\n        v = nextUnassignedVariable();\n        if (v == -1) {\n          triggerLastAssignment();\n        } else {\n          assignVariable(static_cast<size_t>(v));\n          if (!checkConsistency()) {\n            triggerLastAssignment();\n          }\n        }\n      }\n    } while (!_assignedVars.empty());\n    MINESWEEPER_DEBUG(\n        if (_solutions.empty()) { enumerateSolutionsDebug(board); });\n    assert(!_solutions.empty());\n  }  // enumerateSolutions\n\n  void enumerateSolutionsDebug(const Board& board) {\n    debug(std::cout) << \"Debugging session for solutions enumeration!\"\n                     << std::endl;\n    _solutions.clear();\n    _minNumMines = MINES;\n    _maxNumMines = 0;\n    std::fill(_varStates.begin(), _varStates.end(), -1);\n    std::copy(\n        _constrCounts.begin(), _constrCounts.end(), _constrStates.begin());\n    _assignedVars.clear();\n    int v;\n    do {\n      if (constraintsSatisfied()) {\n        debug(std::cout) << \"Constraints satisfied\" << std::endl;\n        dumpDebugInfo(board);\n        enumerateSolution();\n        debug(std::cout) << \"Enumerated solution\" << std::endl;\n        triggerLastAssignment();\n        debug(std::cout) << \"Triggered last assignment\" << std::endl;\n        dumpDebugInfo(board);\n      } else {\n        debug(std::cout) << \"Constraints not satisfied\" << std::endl;\n        dumpDebugInfo(board);\n        v = nextUnassignedVariable();\n        debug(std::cout) << \"Next variable: \" << v << std::endl;\n        if (v == -1) {\n          debug(std::cout) << \"No variable to assign\" << std::endl;\n          dumpDebugInfo(board);\n          triggerLastAssignment();\n          debug(std::cout) << \"Triggered last assignment\" << std::endl;\n          dumpDebugInfo(board);\n        } else {\n          debug(std::cout) << \"Assign variable \" << v << std::endl;\n          assignVariable(static_cast<size_t>(v));\n          if (!checkConsistency()) {\n            debug(std::cout) << \"Inconsistency found!\" << std::endl;\n            dumpDebugInfo(board);\n            triggerLastAssignment();\n            debug(std::cout) << \"Triggered last assignment\" << std::endl;\n            dumpDebugInfo(board);\n          }\n        }\n      }\n    } while (!_assignedVars.empty());\n  }  // enumerateSolutionsDebug\n\n  std::ostream& dumpSolutionStats(std::ostream& os) {\n    os << \"Solution set: \";\n    for (const auto& [nmines, solution] : _solutions) {\n      os << nmines << \" mines (\" << solution.size() << \" solutions), \";\n    }\n    return os;\n  }  // printSolutionStats\n\n  bool checkConsistency() {\n    size_t i = 0;\n    size_t nUnassigned;\n    size_t nMines;\n    for (const Indices& indices : _constrToVar) {\n      nUnassigned = 0;\n      nMines = 0;\n      size_t constrCount = _constrCounts[i];\n      size_t constrState = _constrStates[i];\n      for (size_t j : indices) {\n        switch (_varStates[j]) {\n        case -1:\n          nUnassigned++;\n          break;\n        case 1:\n          nMines++;\n          break;\n        default:;\n        }\n      }\n      if ((constrCount < constrState) || (constrCount < nMines) ||\n          (nUnassigned < constrState) || (nUnassigned + nMines < constrCount)) {\n        // debug(std::cout) << \"Inconsistency for constraint \" << i\n        //    << \": constrCount=\" << constrCount\n        //    << \", constrState=\" << constrState\n        //    << \", varCount=\" << varCount << std::endl;\n        // dumpDebugInfo();\n        return false;\n      }\n      ++i;\n    }\n    return true;\n  }  // SolutionSet::checkConsistency\n\n  void checkCanAssignVar(size_t varIdx) {\n    for (size_t j : _varToConstr[varIdx]) {\n      if (!_constrStates[j]) {\n        /*debug(std::cout) << \"Attempt to assign variable \" << varIdx\n            << \" that must not be assigned: constraint \" << j\n            << \" has 0\" << std::endl;\n        dumpDebugInfo();*/\n        assert(false);\n      }\n    }\n  }  // SolutionSet::checkCanAssignVar\n\n  void dumpDebugInfo(const Board& board) {\n    debug(std::cout) << \"Solution set:\" << std::endl;\n    debug(std::cout) << \"Variables: \" << sparseMaskToString(_variables)\n                     << std::endl;\n    debug(std::cout) << \"Constraints: \" << sparseMaskToString(_constraints)\n                     << std::endl;\n    debug(std::cout) << \"Variables to constraints: \" << std::endl;\n    size_t i = 0;\n    for (const Indices& indices : _varToConstr) {\n      debug(std::cout) << \"  \" << i++ << \": \" << valuesToString(indices)\n                       << std::endl;\n    }\n    debug(std::cout) << \"Constraints to variables: \" << std::endl;\n    i = 0;\n    for (const Indices& indices : _constrToVar) {\n      debug(std::cout) << \"  \" << i++ << \": \" << valuesToString(indices)\n                       << std::endl;\n    }\n    debug(std::cout) << \"Assigned variables: \";\n    for (size_t i : _assignedVars) {\n      std::cout << i << \"=\" << _varStates[i] << \", \";\n    }\n    std::cout << std::endl;\n    debug(std::cout) << \"Variable states: \";\n    i = 0;\n    for (int s : _varStates) {\n      std::cout << i++ << \"=\" << s << \", \";\n    }\n    std::cout << std::endl;\n    debug(std::cout) << \"Variables order: \" << valuesToString(_varOrder)\n                     << std::endl;\n    debug(std::cout) << \"Constraint counts: \" << valuesToString(_constrCounts)\n                     << std::endl;\n    debug(std::cout) << \"Constraint states: \" << valuesToString(_constrStates)\n                     << std::endl;\n    debug(std::cout) << \"State Board:\" << std::endl;\n    std::cout << stateToString(board) << std::endl;\n  }  // SolutionSet::dumpDebugInfo\n\n  template <typename T> std::string valuesToString(const T& values) {\n    std::ostringstream oss;\n    for (typename T::value_type v : values) {\n      oss << v << \", \";\n    }\n    return oss.str();\n  }  // SolutionSet::indicesToString\n\n private:\n  const Minesweeper::SparseMask& _variables;\n  const Minesweeper::SparseMask& _constraints;\n  std::vector<Indices> _varToConstr;\n  std::vector<Indices> _constrToVar;\n  std::unordered_map<size_t, size_t> _mineIdxToVarIdx;\n\n  std::unordered_map<size_t, Solutions> _solutions;\n  size_t _minNumMines;\n  size_t _maxNumMines;\n\n  // enumeration fields\n  std::list<size_t> _assignedVars;\n  std::vector<int> _varStates;\n  std::vector<size_t> _varOrder;\n  std::vector<size_t> _constrCounts;\n  std::vector<size_t> _constrStates;\n\n};  // class SolutionSet\n\n}  // namespace vkms\n}  // namespace csp\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/SolutionSetSampler.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"SolutionSet.h\"\n\nnamespace csp {\nnamespace vkms {\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES> class SolutionSetSampler {\n\n  using _GameDefs = Minesweeper::GameDefs<WIDTH, HEIGHT, MINES>;\n  using Mines = typename _GameDefs::Mines;\n  using _SolutionSet = SolutionSet<WIDTH, HEIGHT, MINES>;\n  using _Mask = Minesweeper::Mask<WIDTH, HEIGHT, MINES>;\n  using CountSample = std::vector<size_t>;\n  using CountSampleList = std::list<CountSample>;\n\n public:\n  SolutionSetSampler(const std::vector<_SolutionSet>& solutionSets,\n                     const _Mask& unconstrainedVariables,\n                     const _Mask& mines)\n      : _solutionSets(solutionSets)\n      , _unconstrainedVariables(unconstrainedVariables)\n      , _mines(mines)\n      , _numMinesRemaining(MINES - mines.sparse().size())\n      , _unconstrVarSetIdx(solutionSets.size())\n      , _numUnconstrVars(unconstrainedVariables.sparse().size())\n      , _minMines(solutionSets.size() + 1, 0)\n      , _maxMines(solutionSets.size() + 1, 0)\n      , _firstBranchingSet(0)\n      , _solutionSetsOrder(solutionSets.size() + 1, 0)\n      , _numMinesToSample(solutionSets.size() + 1, 0) {\n    assert(mines.sparse().size() <= MINES);\n    assert(_numMinesRemaining <= MINES);\n    initializeMinMaxMinesStats();\n    prepareSampling();\n  }  // SolutionSetSampler::SolutionSetSampler\n\n  std::vector<std::pair<CountSample, float>> countsWithProbabilities() const {\n    std::vector<std::pair<CountSample, float>> countSamplesWithProbas;\n    countSamplesWithProbas.reserve(_countSamples.size());\n    auto countSamplesIt = _countSamples.begin();\n    for (size_t k = 0; k < _countSamples.size(); ++k) {\n      const auto& countSampleOrdered = *countSamplesIt++;\n      CountSample countSample(countSampleOrdered.size());\n      for (size_t i = 0; i < countSample.size(); ++i) {\n        countSample[_solutionSetsOrder[i]] = countSampleOrdered[i];\n      }\n      countSamplesWithProbas.emplace_back(countSample, _countSampleProbas[k]);\n    }\n    return countSamplesWithProbas;\n  }  // countsWithProbabilities\n\n  template <typename RngEngine> void sampleMines(Mines& mines, RngEngine& rng) {\n    auto mineSampleIt = mines.begin();\n    // add all marked mines\n    for (const auto& v : _mines.sparse()) {\n      *mineSampleIt++ = rowColToIdx<WIDTH>(v.row(), v.col());\n    }\n    // sample counts for each solution set\n    CountSample countSample;\n    if (_countSamples.size() > 1) {\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Unnormalized probas: \";\n                        dumpCollection(std::cout, _countSampleProbas)\n                        << std::endl);\n      std::discrete_distribution<size_t> sampleDistribution(\n          _countSampleProbas.begin(), _countSampleProbas.end());\n      size_t countSampleIdx = sampleDistribution(rng);\n      auto countSampleIt = _countSamples.begin();\n      for (size_t i = 0; i < countSampleIdx; ++i, ++countSampleIt)\n        ;\n      countSample = *countSampleIt;\n    } else {\n      countSample = _countSamples.front();\n    }\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Count sample: \";\n                      dumpCollection(std::cout, countSample) << std::endl);\n    // sample according to counts\n    size_t nMinesInSetJ, j;\n    for (size_t i = 0; i < countSample.size(); ++i) {\n      j = _solutionSetsOrder[i];\n      nMinesInSetJ = countSample[i];\n      if (!nMinesInSetJ) {\n        continue;\n      }\n      if (j != _unconstrVarSetIdx) {\n        auto mineSample = _solutionSets[j].sample(nMinesInSetJ, rng);\n        mineSampleIt =\n            std::copy(mineSample.begin(), mineSample.end(), mineSampleIt);\n      } else {\n        auto mineSample = sampleUnconstrained(nMinesInSetJ, rng);\n        assert(mineSample.size() == nMinesInSetJ);\n        mineSampleIt =\n            std::copy(mineSample.begin(), mineSample.end(), mineSampleIt);\n      }\n    }\n    assert(mineSampleIt == mines.end());\n    std::sort(mines.begin(), mines.end());\n  }  // olutionSetSampler::sampleMines\n\n private:\n  static constexpr size_t INVALID_COUNT = static_cast<size_t>(-1);\n\n  void prepareSampling() {\n    adjustMinMaxMinesStats();\n    computeSolutionSetsOrder();\n    _countSamples = enumeratePlausibleCountSamples();\n    assert(!_countSamples.empty());\n    MINESWEEPER_DEBUG(dumpCountSamples(std::cout, _countSamples));\n    _countSampleProbas = computeSampleProbabilities(_countSamples);\n  }  // prepareSampling\n\n  template <typename RngEngine>\n  std::vector<int> sampleUnconstrained(size_t n, RngEngine& rng) {\n    std::vector<int> sample;\n    if (!n) {\n      return sample;\n    }\n    sample.reserve(n);\n    std::vector<Minesweeper::BoardPosition> unconstrained(\n        _unconstrainedVariables.sparse());\n    std::shuffle(unconstrained.begin(), unconstrained.end(), rng);\n    for (size_t i = 0; i < n; ++i) {\n      std::uniform_int_distribution<size_t> distribution(\n          i, unconstrained.size() - 1);\n      size_t varIdx = distribution(rng);\n      const auto& boardPosition = unconstrained[varIdx];\n      sample.push_back(\n          rowColToIdx<WIDTH>(boardPosition.row(), boardPosition.col()));\n      std::swap(unconstrained[i], unconstrained[varIdx]);\n    }\n    return sample;\n  }\n\n  std::vector<float> computeSampleProbabilities(\n      const CountSampleList& samples) {\n    std::vector<float> probas;\n    probas.reserve(samples.size());\n    float unnormalizedLogProba;\n    float maxLogProba = 0;\n    size_t nSamples, nMinesInSetJ, j;\n    for (const auto& sample : samples) {\n      MINESWEEPER_DEBUG(debug(std::cout)\n                        << \"Computing probabilities for sample counts:\"\n                        << std::endl);\n      MINESWEEPER_DEBUG(dumpCollection(debug(std::cout), sample) << std::endl);\n      assert(sample.size() == _solutionSetsOrder.size());\n      unnormalizedLogProba = 0;\n      for (size_t i = 0; i < sample.size(); ++i) {\n        j = _solutionSetsOrder[i];\n        nMinesInSetJ = sample[i];\n        if (nMinesInSetJ > 0) {\n          if (j != _unconstrVarSetIdx) {\n            assert(_solutionSets[j].hasSamples(nMinesInSetJ));\n            nSamples = _solutionSets[j].numSamples(nMinesInSetJ);\n            unnormalizedLogProba += log(nSamples);\n            MINESWEEPER_DEBUG(debug(std::cout)\n                              << \"Set \" << j << \" (constrained)\"\n                              << \", #mines \" << nMinesInSetJ << \", log proba \"\n                              << log(nSamples) << std::endl);\n          } else {\n            assert(nMinesInSetJ <= _numUnconstrVars);\n            MINESWEEPER_DEBUG(debug(std::cout)\n                              << \"Set \" << j << \" (unconstrained)\"\n                              << \", #mines \" << nMinesInSetJ << \", log proba \"\n                              << logCnk(_numUnconstrVars, nMinesInSetJ)\n                              << std::endl);\n            unnormalizedLogProba += logCnk(_numUnconstrVars, nMinesInSetJ);\n          }\n        }\n      }\n      if (maxLogProba < unnormalizedLogProba) {\n        maxLogProba = unnormalizedLogProba;\n      }\n      probas.push_back(unnormalizedLogProba);\n    }\n    // normalize\n    float probaSum = 0;\n    for (auto& proba : probas) {\n      proba = exp(proba - maxLogProba);\n      probaSum += proba;\n    }\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Probas: \");\n    for (auto& proba : probas) {\n      proba /= probaSum;\n      MINESWEEPER_DEBUG(std::cout << probas << \", \");\n    }\n    MINESWEEPER_DEBUG(std::cout << std::endl);\n    return probas;\n  }  // computeSampleProbabilities\n\n  float logCnk(size_t n, size_t k) {\n    // n! / (k! (n-k)!)\n    float result = 0;\n    assert(n >= k);\n    result += sumLog(max(k, n - k) + 1, n);\n    result -= sumLog(2, min(k, n - k));\n    return result;\n  }\n\n  float sumLog(size_t n1, size_t n2) {\n    float result = 0;\n    for (size_t n = n1; n <= n2; ++n) {\n      result += log(n);\n    }\n    return result;\n  }\n\n  template <typename T>\n  std::ostream& dumpCollection(std::ostream& os, const T& collection) {\n    for (const auto& v : collection) {\n      os << v << ' ';\n    }\n    return os;\n  }  // dumpSample\n\n  template <typename T>\n  std::ostream& dumpCountSamples(std::ostream& os, const T& samples) {\n    debug(os) << \"Count samples:\" << std::endl;\n    for (const auto& sample : samples) {\n      debug(os) << \"  \";\n      dumpCollection(os, sample) << std::endl;\n    }\n    return os;\n  }  // dumpCountSamples\n\n  std::list<std::vector<size_t>> enumeratePlausibleCountSamples() {\n    std::list<std::vector<size_t>> samples;\n    // set mine counts for sets with deterministic counts\n    size_t initNumMines = 0;\n    for (size_t i = 0; i < _firstBranchingSet; ++i) {\n      size_t j = _solutionSetsOrder[i];\n      assert(_minMines[j] == _maxMines[j]);\n      _numMinesToSample[i] = _minMines[j];\n      initNumMines += _minMines[j];\n    }\n    // finish if there are only deterministic counts\n    if (_firstBranchingSet == _solutionSetsOrder.size()) {\n      samples.push_back(_numMinesToSample);\n      return samples;\n    }\n    // traverse the rest and select valid combinations\n    std::vector<size_t> currentCounts;\n    assert(_solutionSetsOrder.size() > _firstBranchingSet);\n    const size_t currentCountsCapacity =\n        _solutionSetsOrder.size() - _firstBranchingSet;\n    currentCounts.reserve(currentCountsCapacity);\n    size_t cursor = _firstBranchingSet;\n    size_t setIdx = _solutionSetsOrder[cursor];\n    size_t numMines = nextMinesCount(_minMines[setIdx], setIdx);\n    assert(numMines != INVALID_COUNT);\n    currentCounts.push_back(numMines);\n    size_t sumNumMines = initNumMines + numMines;\n    while (!currentCounts.empty()) {\n      // fill with min number of mines for each set\n      if (currentCounts.size() < currentCountsCapacity - 1) {\n        ++cursor;\n        setIdx = _solutionSetsOrder[cursor];\n        numMines = nextMinesCount(_minMines[setIdx], setIdx);\n        assert(numMines != INVALID_COUNT);\n        currentCounts.push_back(numMines);\n        sumNumMines += numMines;\n        if (sumNumMines > _numMinesRemaining) {\n          break;\n        }\n        continue;\n      }\n      assert(currentCounts.size() == currentCountsCapacity - 1);\n      assert(cursor == _solutionSetsOrder.size() - 2);\n      assert(sumNumMines ==\n             initNumMines + std::accumulate(currentCounts.begin(),\n                                            currentCounts.end(), 0u));\n      // check if can sample the missing mines from the last set\n      if (sumNumMines <= _numMinesRemaining) {\n        ++cursor;\n        setIdx = _solutionSetsOrder[cursor];\n        numMines = _numMinesRemaining - sumNumMines;\n        if (canSampleNumMinesFromSet(numMines, setIdx)) {\n          currentCounts.push_back(numMines);\n          std::copy(currentCounts.begin(), currentCounts.end(),\n                    &_numMinesToSample[_firstBranchingSet]);\n          samples.push_back(_numMinesToSample);\n          currentCounts.pop_back();\n        }\n        // next counts configuration\n        --cursor;\n        setIdx = _solutionSetsOrder[cursor];\n        numMines = nextMinesCount(currentCounts.back() + 1, setIdx);\n      } else {\n        // next counts configuration\n        numMines = INVALID_COUNT;\n      }\n      while ((numMines == INVALID_COUNT) && !currentCounts.empty()) {\n        sumNumMines -= currentCounts.back();\n        currentCounts.pop_back();\n        if (cursor > _firstBranchingSet) {\n          --cursor;\n          setIdx = _solutionSetsOrder[cursor];\n          numMines = nextMinesCount(currentCounts.back() + 1, setIdx);\n        } else {\n          assert(currentCounts.empty());\n        }\n      }\n      if (numMines != INVALID_COUNT) {\n        sumNumMines -= currentCounts.back();\n        sumNumMines += numMines;\n        currentCounts.back() = numMines;\n      }\n    }\n    return samples;\n  }  // determineNumberOfSamples\n\n  bool canSampleNumMinesFromSet(size_t numMines, size_t setIdx) {\n    if (setIdx == _unconstrVarSetIdx) {\n      // set of unconstrained variables\n      return numMines <= _numUnconstrVars;\n    } else {\n      return _solutionSets[setIdx].hasSamples(numMines);\n    }\n  }  // canSampleNumMinesFromSet\n\n  size_t nextMinesCount(size_t countHint, size_t setIdx) {\n    assert(setIdx < _maxMines.size());\n    if (countHint > _maxMines[setIdx]) {\n      return INVALID_COUNT;\n    }\n    if (setIdx == _unconstrVarSetIdx) {\n      return countHint;\n    }\n    while (!_solutionSets[setIdx].hasSamples(countHint)) {\n      ++countHint;\n      if (countHint > _maxMines[setIdx]) {\n        return INVALID_COUNT;\n      }\n    }\n    assert(_solutionSets[setIdx].hasSamples(countHint));\n    return countHint;\n  }  // nextMinesCount\n\n  void computeSolutionSetsOrder() {\n    assert(_solutionSetsOrder.size() > 0);\n    assert(_solutionSetsOrder.size() == _minMines.size());\n    assert(_solutionSetsOrder.size() == _maxMines.size());\n    _firstBranchingSet = 0;\n    size_t head = 0;\n    size_t tail = _solutionSetsOrder.size() - 1;\n    for (size_t i = 0; i < _solutionSetsOrder.size(); ++i) {\n      if (_minMines[i] == _maxMines[i]) {\n        _solutionSetsOrder[head++] = i;\n      } else {\n        _solutionSetsOrder[tail--] = i;\n      }\n    }\n    assert(tail + 1 == head);\n    // if unconstrained set does not have a fixed number of mines to sample\n    // swap it with the last one\n    if (_solutionSetsOrder[head] == _unconstrVarSetIdx) {\n      size_t tmp = _solutionSetsOrder.back();\n      _solutionSetsOrder.back() = _unconstrVarSetIdx;\n      _solutionSetsOrder[head] = tmp;\n    }\n    _firstBranchingSet = head;\n  }  // computeSolutionSetsOrder\n\n  void initializeMinMaxMinesStats() {\n    for (size_t i = 0; i < _solutionSets.size(); ++i) {\n      _minMines[i] = _solutionSets[i].minNumMines();\n      _maxMines[i] = _solutionSets[i].maxNumMines();\n    }\n    _minMines[_solutionSets.size()] = 0;\n    _maxMines[_solutionSets.size()] = _unconstrainedVariables.sparse().size();\n    MINESWEEPER_DEBUG(\n        debug(std::cout) << \"Min mines init: \"; for (size_t n\n                                                     : _minMines) {\n          std::cout << n << \" \";\n        } std::cout << std::endl;);\n    MINESWEEPER_DEBUG(\n        debug(std::cout) << \"Max mines init: \"; for (size_t n\n                                                     : _maxMines) {\n          std::cout << n << \" \";\n        } std::cout << std::endl;);\n  }  // SolutionSetSampler::initializeMinMaxMinesStats\n\n  void adjustMinMaxMinesStats() {\n    size_t sumMax = std::accumulate(_maxMines.begin(), _maxMines.end(), 0u);\n    size_t sumMin = std::accumulate(_minMines.begin(), _minMines.end(), 0u);\n    const size_t nsets = _maxMines.size();\n    size_t delta;\n    bool changed;\n    do {\n      changed = false;\n      for (size_t i = 0; i < nsets; ++i) {\n        if (_maxMines[i] + sumMin > _numMinesRemaining + _minMines[i]) {\n          delta = _maxMines[i] + sumMin - (_numMinesRemaining + _minMines[i]);\n          _maxMines[i] -= delta;\n          sumMax -= delta;\n          changed = true;\n        }\n        if (_minMines[i] + sumMax < _numMinesRemaining + _maxMines[i]) {\n          delta = _numMinesRemaining + _maxMines[i] - (_minMines[i] + sumMax);\n          _minMines[i] += delta;\n          sumMin += delta;\n          changed = true;\n        }\n      }\n    } while (changed);\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Min mines adj: \"; for (size_t n\n                                                                  : _minMines) {\n      std::cout << n << \" \";\n    } std::cout << std::endl;);\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Max mines adj: \"; for (size_t n\n                                                                  : _maxMines) {\n      std::cout << n << \" \";\n    } std::cout << std::endl;);\n  }  // SolutionSetSampler::adjustMinMaxMinesStats\n\n  const std::vector<_SolutionSet>& _solutionSets;\n  const _Mask& _unconstrainedVariables;\n  const _Mask& _mines;\n  const size_t _numMinesRemaining;\n  const size_t _unconstrVarSetIdx;\n  const size_t _numUnconstrVars;\n\n  std::vector<size_t> _minMines;\n  std::vector<size_t> _maxMines;\n  size_t _firstBranchingSet;\n  std::vector<size_t> _solutionSetsOrder;\n  std::vector<size_t> _numMinesToSample;\n\n  CountSampleList _countSamples;\n  std::vector<float> _countSampleProbas;\n\n};  // class SolutionSetSampler\n\n}  // namespace vkms\n}  // namespace csp\n"
  },
  {
    "path": "src/games/minesweeper_csp_vkms/csp_vkms.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <iostream>\n#include <random>\n#include <sstream>\n\n#include <core/game.h>\n#include <core/state.h>\n\nusing namespace csp::vkms;\nusing namespace std;\n\ntemplate <size_t NTOTAL> float good_action_eval(core::State& s) {\n  size_t game_count{0};\n  float sum_score{0};\n  size_t n_actions_cur_game = 0;\n  while (game_count < NTOTAL) {\n    s.reset();\n    n_actions_cur_game = 0;\n    while (!s.terminated()) {\n      s.DoGoodAction();\n      ++n_actions_cur_game;\n    }\n    sum_score += 0.5 * (1 + s.getReward(0));\n    ++game_count;\n  }\n  const float avg_score = sum_score / float(game_count);\n  return avg_score;\n}  // good_action_eval\n\ntemplate <size_t W, size_t H, size_t N> std::string get_map_str() {\n  ostringstream oss;\n  oss << \"Minesweeper<W=\" << W << \", H=\" << H << \", M=\" << N << \">\";\n  return oss.str();\n}  // get_map_str\n\ntemplate <size_t W, size_t H, size_t N, int SEED, size_t NTOTAL>\nvoid benchmark_vkms() {\n  using namespace std::chrono;\n  high_resolution_clock::time_point t1 = high_resolution_clock::now();\n  auto state = Minesweeper::State<W, H, N>(SEED);\n  float win_rate = good_action_eval<NTOTAL>(state);\n  high_resolution_clock::time_point t2 = high_resolution_clock::now();\n  double duration_s = duration<double>(t2 - t1).count();\n  auto map_str = get_map_str<W, H, N>();\n  std::cout << map_str << \", win rate = \" << win_rate << \", took \" << duration_s\n            << \"s (\" << static_cast<double>(NTOTAL) / duration_s << \" games/s)\"\n            << std::endl;\n}  // benchmark_vkms\n\nint main(int, char**) {\n  static constexpr int master_seed = 999;\n  benchmark_vkms<4, 4, 4, master_seed, 100000>();\n  benchmark_vkms<3, 1, 1, master_seed, 100000>();\n  benchmark_vkms<5, 2, 3, master_seed, 100000>();\n  benchmark_vkms<5, 5, 10, master_seed, 100000>();\n  benchmark_vkms<10, 1, 5, master_seed, 100000>();\n  benchmark_vkms<7, 3, 10, master_seed, 100000>();\n  benchmark_vkms<5, 5, 15, master_seed, 100000>();\n  benchmark_vkms<8, 8, 10, master_seed, 100000>();\n  benchmark_vkms<9, 9, 10, master_seed, 100000>();\n  benchmark_vkms<16, 16, 40, master_seed, 10000>();\n  benchmark_vkms<30, 16, 99, master_seed, 10000>();\n}\n"
  },
  {
    "path": "src/games/minesweeper_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <algorithm>\n#include <chrono>\n#include <functional>\n#include <list>\n#include <mutex>\n#include <random>\n#include <sstream>\n\n#include \"../core/state.h\"\n#include \"commons/hash.h\"\n#include \"minesweeper_common.h\"\n#include \"minesweeper_csp_vkms/CspStrategy.h\"\n\n#define EXPAND_ZEROS\n#define FLAG_MINES\n\nnamespace Minesweeper {\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES>\nclass State : public core::State {\n\n  static constexpr size_t HASHBOOK_SIZE = WIDTH * HEIGHT * 11;\n  using _Mask = Mask<WIDTH, HEIGHT, MINES>;\n  using _HashBook = HashBook<uint64_t, HASHBOOK_SIZE>;\n  using _Hasher = Hasher<uint64_t, HASHBOOK_SIZE>;\n\n public:\n  using Act = ::_Action;\n  using _GameDefs = GameDefs<WIDTH, HEIGHT, MINES>;\n  using Board = typename _GameDefs::Board;\n  using BoardProbas = typename _GameDefs::BoardProbas;\n  using Mines = typename _GameDefs::Mines;\n  using Neighbors = typename _GameDefs::Neighbors;\n\n  State(int seed)\n      : core::State(seed)\n      , _hasher(hashBook) {\n    _board.fill(UNKNOWN);\n    _boardSample.fill(UNKNOWN);\n    _minesSample.fill(-1);\n    std::call_once(hashBookConfigured, [this]() { hashBook.setup(_rng); });\n  }  // State::State\n\n  virtual void Initialize() override {\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Initialize\" << std::endl);\n    _status = GameStatus::player0Turn;\n    _featSize = {2, HEIGHT, WIDTH};\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2], 0);\n    fillFullFeatures();\n    _actionSize = {1, HEIGHT, WIDTH};\n    _stochastic = true;\n    _board.fill(UNKNOWN);\n    _boardSample.fill(UNKNOWN);\n    _legalActions.clear();\n    fillLegalActions(_legalActions, _board, std::vector<int>());\n    MINESWEEPER_DEBUG(debug(std::cout) << \"Num legal actions: \"\n                                       << _legalActions.size() << std::endl);\n    _hash = boardHash();\n  }  // State::Initialize\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<Minesweeper::State<WIDTH, HEIGHT, MINES>>(*this);\n  }  // State::clone_\n\n  virtual void ApplyAction(const _Action& action) override {\n    MINESWEEPER_DEBUG(debug(std::cout) << \"ApplyAction\" << std::endl);\n    assert(_status == GameStatus::player0Turn);\n    assert(!_legalActions.empty());\n    int row = action.GetY();\n    int col = action.GetZ();\n    assert((isInBoard<WIDTH, HEIGHT>(row, col)));\n    sampleMines(_minesSample, _board, _rng, row, col);\n    minesToBoard(_minesSample, _boardSample);\n    applyActionToSampledBoard(row, col);\n    _hash = boardHash();\n  }  // State::ApplyAction\n\n  virtual void DoGoodAction() override {\n    assert(!_legalActions.empty());\n    if (isFirstMove(_board)) {\n      MINESWEEPER_DEBUG(debug(std::cout)\n                        << \"Apply Random Action\"\n                        << \", rng=\\\"\" << _rng << \"\\\"\" << std::endl);\n      DoRandomAction();\n      return;\n    }\n    using CspStrategy = csp::vkms::CspStrategy<WIDTH, HEIGHT, MINES>;\n    CspStrategy cspStrategy;\n    cspStrategy.computeMineProbabilitiesAndSampleMines(\n        _minesSample, _board, _rng);\n    // greedy choice of the best location to probe\n    using MineProbas = std::array<float, WIDTH * HEIGHT>;\n    const MineProbas& mineProbas = cspStrategy.getMineProbabilities();\n    MINESWEEPER_DEBUG(\n        debug(std::cout) << \"Mine probabilities:\" << std::endl;\n        for (size_t row = 0; row < HEIGHT; ++row) {\n          for (size_t col = 0; col < WIDTH; ++col) {\n            std::cout << setw(10)\n                      << arrGet<MineProbas MINESWEEPER_DEBUG_COMMA WIDTH>(\n                             mineProbas, row, col);\n          }\n          std::cout << std::endl;\n        });\n    int row = -1, row_i;\n    int col = -1, col_i;\n    float proba = 1.0, probaMin = -1.0;\n    for (size_t i = 0; i < _legalActions.size(); i++) {\n      row_i = _legalActions[i].GetY();\n      col_i = _legalActions[i].GetZ();\n      proba = arrGet<typename CspStrategy::MineProbas, WIDTH>(\n          mineProbas, row_i, col_i);\n      if ((proba < probaMin) || (probaMin < 0)) {\n        probaMin = proba;\n        row = row_i;\n        col = col_i;\n      }\n    }\n    minesToBoard(_minesSample, _boardSample);\n    applyActionToSampledBoard(row, col);\n  }  // State::ApplyAction\n\n  /**\n   * Expects action in format \"r,c\", where r and c are row and column\n   * of a cell to be probed. The coordinates must be non-negative integers\n   * that do not exceed board size. Action validity is performed\n   */\n  virtual int parseAction(const std::string& str) const override {\n    int row = -1;\n    int col = -1;\n    char c = 0;\n    std::istringstream iss(str);\n    iss >> c;\n    if (!iss.good() || (c != '(')) {\n      return -1;\n    }\n    iss >> row;\n    if (!iss.good() || (row < 0) || (row >= static_cast<int>(HEIGHT))) {\n      return -1;\n    }\n    iss >> c;\n    if (!iss.good() || (c != ',')) {\n      return -1;\n    }\n    iss >> col;\n    if (!iss.good() || (col < 0) || (col >= static_cast<int>(WIDTH))) {\n      return -1;\n    }\n    if (!(iss.good() || iss.eof()) || (c != ')')) {\n      return -1;\n    }\n    for (size_t i = 0; i < _legalActions.size(); i++) {\n      if ((_legalActions[i].GetY() == row) &&\n          (_legalActions[i].GetZ() == col)) {\n        return (int)i;\n      }\n    }\n    return -1;\n  }  // State::parseAction\n\n  virtual bool isOnePlayerGame() const override {\n    return true;\n  }\n\n  std::array<float, WIDTH * HEIGHT> getMineProbas() {\n    using CspStrategy = csp::vkms::CspStrategy<WIDTH, HEIGHT, MINES>;\n    CspStrategy cspStrategy;\n    cspStrategy.computeMineProbabilitiesAndSampleMines(\n        _minesSample, _board, _rng);\n    using MineProbas = std::array<float, WIDTH * HEIGHT>;\n    const MineProbas& mineProbas = cspStrategy.getMineProbabilities();\n    return mineProbas;\n  }\n\n  /*\n   * -1 means \"unknown, could be a mine\"\n   *  k >=0 means \"k mines in the neighborhood\"\n   */\n  virtual std::string stateDescription() const override {\n    std::string boardStr = _GameDefs::boardToString(_board);\n    return boardStr;\n  }  // State::stateDescription\n\n private:\n  void applyActionToSampledBoard(int row, int col) {\n    MINESWEEPER_DEBUG(displayBoard(\"Current board:\", _board));\n\n    MINESWEEPER_DEBUG(displayBoard(\"Sampled board:\", _boardSample));\n    MINESWEEPER_DEBUG(checkConsistency(_boardSample, _board));\n    MINESWEEPER_DEBUG(debug(std::cout)\n                      << \"Probe: row=\" << row << \", col=\" << col << \": \");\n\n    int value = arrGet<Board, WIDTH>(_boardSample, row, col);\n    if (value == BOOM) {\n      MINESWEEPER_DEBUG(std::cout << \"BOOM!\" << std::endl);\n      _status = GameStatus::player1Win;\n      _legalActions.clear();\n      fillFeatures(_features, _board);\n      fillFullFeatures();\n      return;\n    }\n    arrGet<Board, WIDTH>(_board, row, col) = value;\n    MINESWEEPER_DEBUG(std::cout << \"value=\" << value << std::endl);\n#ifdef EXPAND_ZEROS\n    if (!value) {\n      expandZeros(_board, _boardSample, row, col);\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Expanded zeros\" << std::endl);\n      MINESWEEPER_DEBUG(displayBoard(\"Current board:\", _board));\n    }\n#endif\n    if (done()) {\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Done.\" << std::endl);\n      _status = GameStatus::player0Win;\n      _legalActions.clear();\n      fillFeatures(_features, _board);\n      fillFullFeatures();\n      return;\n    }\n    using CspStrategy = csp::vkms::CspStrategy<WIDTH, HEIGHT, MINES>;\n    CspStrategy cspStrategy;\n    auto forSureMines = cspStrategy.locateForSureMines(_board);\n    _legalActions.clear();\n    if (_status == GameStatus::player0Turn) {\n      fillLegalActions(_legalActions, _board, forSureMines);\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Num legal actions: \"\n                                         << _legalActions.size() << std::endl);\n    }\n    fillFeatures(_features, _board);\n    fillFullFeatures();\n  }\n\n  void expandZeros(Board& board, const Board& boardSample, int row, int col) {\n    int value = arrGet<Board, WIDTH>(board, row, col);\n    assert(!value);\n    _expandZerosProcessedMask.zero();\n    auto select_unprocessed = [this](int UNUSED(v), int row, int col) {\n      return !this->_expandZerosProcessedMask.get(row, col);\n    };\n    std::list<int> queue;\n    int idx = rowColToIdx<WIDTH>(row, col);\n    queue.push_back(idx);\n    _expandZerosProcessedMask.set(row, col);\n    while (!queue.empty()) {\n      idx = queue.front();\n      queue.pop_front();\n      idxToRowCol<WIDTH>(idx, row, col);\n      auto neighborPositions =\n          _GameDefs::getNeighbors(board, row, col, select_unprocessed);\n      for (const auto& pos : neighborPositions) {\n        value = arrGet<Board, WIDTH>(boardSample, pos.row(), pos.col());\n        arrGet<Board, WIDTH>(board, pos.row(), pos.col()) = value;\n        if (!value) {\n          idx = rowColToIdx<WIDTH>(pos.row(), pos.col());\n          queue.push_back(idx);\n          _expandZerosProcessedMask.set(pos.row(), pos.col());\n        }\n      }\n    }\n  }  // expandZeros\n\n  template <typename RngEngine>\n  void sampleMines(Mines& minesSample,\n                   const Board& board,\n                   RngEngine& rng,\n                   int row,\n                   int col) {\n    int probeIdx = rowColToIdx<WIDTH>(row, col);\n    MINESWEEPER_DEBUG(debug(std::cout)\n                      << \"Is first move? \"\n                      << (isFirstMove(board) ? \"yes\" : \"no\") << std::endl);\n    if (isFirstMove(board)) {\n      MINESWEEPER_DEBUG(debug(std::cout) << \"Sample mines uniformly \"\n                                         << \"without duplicates\" << std::endl);\n      std::vector<int> cellIndices(HEIGHT * WIDTH);\n      for (size_t i = 0; i < HEIGHT * WIDTH; ++i) {\n        cellIndices[i] = i;\n      }\n      std::swap(cellIndices[0], cellIndices[probeIdx]);\n      for (size_t i = 0; i < MINES; ++i) {\n        std::uniform_int_distribution<size_t> distribution(\n            i + 1, cellIndices.size() - 1);\n        size_t varIdx = distribution(rng);\n        minesSample[i] = cellIndices[varIdx];\n        std::swap(cellIndices[i + 1], cellIndices[varIdx]);\n      }\n      std::sort(minesSample.begin(), minesSample.end());\n    } else {\n      // do CSP sampling\n      MINESWEEPER_DEBUG(debug(std::cout)\n                        << \"Sample mines with CSP: \" << std::endl);\n      using CspStrategy = csp::vkms::CspStrategy<WIDTH, HEIGHT, MINES>;\n      CspStrategy cspStrategy;\n      cspStrategy.sampleMines(minesSample, board, rng);\n    }\n    MINESWEEPER_DEBUG(displayMines(\"Sampled mines:\", minesSample));\n    checkMinesSample(minesSample);\n  }  // sampleMines\n\n  static bool isFirstMove(const Board& board) {\n    size_t unknown = 0;\n    for (size_t k = 0; k < HEIGHT * WIDTH; ++k) {\n      if (board[k] == UNKNOWN) {\n        ++unknown;\n      }\n    }\n    return (unknown == HEIGHT * WIDTH);\n  }  // shouldDoRejectionSampling\n\n  static bool hasDuplicates(const Mines& mines) {\n    for (size_t k = 1; k < MINES; ++k) {\n      if (mines[k - 1] == mines[k]) {\n        return true;\n      }\n    }\n    return false;\n  }  // hasDuplicates\n\n  /**\n   * Check that mine indices are valid and are in srtrictly ascending order\n   */\n  static void checkMinesSample(Mines& minesSample) {\n#ifndef NDEBUG\n    int prevMineIdx = -1;\n    for (int mineIdx : minesSample) {\n      assert(mineIdx >= 0);\n      assert(mineIdx < static_cast<int>(HEIGHT * WIDTH));\n      assert(mineIdx > prevMineIdx);\n      prevMineIdx = mineIdx;\n    }\n#endif\n  }  // checkMinesSample\n\n  /**\n   * Check that sampled board complies with the current board\n   */\n  static void checkConsistency(const Board& boardSample, const Board& board) {\n    for (size_t i = 0; i < WIDTH * HEIGHT; ++i) {\n      if (board[i] != UNKNOWN) {\n        assert(boardSample[i] == board[i]);\n      }\n    }\n  }  // checkConsistency\n\n  void minesToBoard(const Mines& mines, Board& board) {\n    memset(board.data(), 0, WIDTH * HEIGHT * sizeof(int));\n    int idx, row, col;\n    for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n      for (size_t j = 0; j < MINES; ++j) {\n        idx = mines[j];\n        idxToRowCol<WIDTH>(idx, row, col);\n        idx += _minesToBoardDeltaIdx[i];\n        row += _minesToBoardDeltaRow[i];\n        col += _minesToBoardDeltaCol[i];\n        if ((row >= 0) && (row < static_cast<int>(HEIGHT)) && (col >= 0) &&\n            (col < static_cast<int>(WIDTH))) {\n          board[idx]++;\n        }\n      }\n    }\n    for (size_t j = 0; j < MINES; ++j) {\n      board[mines[j]] = BOOM;\n    }\n  }  // minesToBoard\n\n  static void fillLegalActions(std::vector<::_Action>& legalActions,\n                               const Board& board,\n#ifdef FLAG_MINES\n                               const std::vector<int>& forSureMines\n#else\n                               const std::vector<int>& UNUSED(forSureMines)\n#endif\n  ) {\n    legalActions.reserve(HEIGHT * WIDTH);\n    auto k = 0;\n    auto n = 0;\n    for (size_t row = 0; row < HEIGHT; ++row) {\n      for (size_t col = 0; col < WIDTH; ++col) {\n        if (board[k++] == UNKNOWN) {\n          ::_Action act = Act(0, 0, row, col);\n#ifdef FLAG_MINES\n          if (std::binary_search(\n                  forSureMines.begin(), forSureMines.end(),\n                  /*Minesweeper::template*/ rowColToIdx<WIDTH>(row, col))) {\n            continue;\n          }\n#endif\n          act.SetIndex(n++);\n          legalActions.push_back(act);\n        }\n      }\n    }\n  }  // fillLegalActions\n\n  static void fillFeatures(std::vector<float>& features, const Board& board) {\n    for (size_t k = 0; k < HEIGHT * WIDTH; ++k) {\n      features[k] = board[k];\n      features[HEIGHT * WIDTH + k] = (board[k] < 0 ? 1 : 0);\n    }\n  }  // fillFeatures\n\n  static void displayBoard(const std::string& title, const Board& board) {\n    std::cout << title << std::endl;\n    std::string boardStr = _GameDefs::boardToString(board);\n    std::cout << boardStr;\n  }  // displayBoard\n\n  static void displayMines(const std::string& title, const Mines& mines) {\n    std::cout << title << std::endl;\n    std::string minesStr = _GameDefs::minesToString(mines);\n    std::cout << minesStr << std::endl;\n  }  // displayMines\n\n  static bool boardAllUnknown(const Board& board) {\n    for (size_t k = 0; k < HEIGHT * WIDTH; ++k) {\n      if (board[k] != UNKNOWN) {\n        return false;\n      }\n    }\n    return true;\n  }  // boardAllUnknown\n\n  bool done() const {\n    size_t nUnknown = 0;\n    for (size_t k = 0; k < HEIGHT * WIDTH; ++k) {\n      if (_board[k] == UNKNOWN) {\n        ++nUnknown;\n      }\n    }\n    return (nUnknown == MINES);\n  }  // done\n\n  uint64_t boardHash() {\n    _hasher.reset();\n    int v;\n    for (unsigned i = 0; i < _board.size(); ++i) {\n      v = _board[i] + 2;\n      assert(v >= 0);\n      _hasher.trigger(static_cast<size_t>(v) * WIDTH * HEIGHT + i);\n    }\n    return _hasher.hash();\n  }  // boardHash\n\n  Board _board;\n  Board _boardSample;\n  Mines _minesSample;\n  _Mask _expandZerosProcessedMask;\n  static constexpr Neighbors _minesToBoardDeltaIdx =\n      NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::dindices;\n  static constexpr Neighbors _minesToBoardDeltaRow =\n      NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::drow;\n  static constexpr Neighbors _minesToBoardDeltaCol =\n      NeighborOffsets<int, WIDTH, NUM_NEIGHBORS>::dcol;\n\n  static std::once_flag hashBookConfigured;\n  static _HashBook hashBook;\n  _Hasher _hasher;\n\n};  // class State\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES>\nstd::once_flag State<WIDTH, HEIGHT, MINES>::hashBookConfigured;\n\ntemplate <size_t WIDTH, size_t HEIGHT, size_t MINES>\ntypename State<WIDTH, HEIGHT, MINES>::_HashBook\n    State<WIDTH, HEIGHT, MINES>::hashBook;\n\n}  // namespace Minesweeper\n"
  },
  {
    "path": "src/games/minishogi.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author1: Maria Elsa\n// - Github: https://github.com/melsaa\n// - Email:  m_elsa@ymail.com\n// Author2: Lin Hsin-I\n// - Github: https://github.com/free00000000000\n// - Email:  410521233@gms.ndhu.edu.tw\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#pragma once\n#include \"../core/state.h\"\n#include \"shogi.h\"\n#include <mutex>\n#include <queue>\n#include <sstream>\n#include <vector>\n\ntemplate <int version = 2>\nclass StateForMinishogi : public core::State, public Shogi {\n public:\n  static inline uint64_t HashArray[2][10][Dx][Dy];\n  static inline uint64_t HashArrayJail[20];\n  static inline uint64_t HashTurn;\n  int length;\n\n  std::array<int, 2> checkCount;\n  struct Repetition {\n    uint64_t hash;\n    char count;\n    int lastStepIdx;\n  };\n\n  std::array<std::vector<Repetition>, 16> repetitions;\n  int repeatCount;\n\n  StateForMinishogi(int seed)\n      : State(seed)\n      , Shogi() {\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n    // _hash = 2166136261u;\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 217;\n    if (version == 2) {\n      _featSize[0] =\n          (6 + 4 + 6) * 2 + 3;  // (6 pieces + 4 promoted + 6 off board\n                                // (counts)) * 2 + 3 repeat counts\n    }\n    _featSize[1] = Dy;\n    _featSize[2] = Dx;\n    _actionSize[0] = 19;  // 11 pieces + 8 promoted\n    if (version == 2) {\n      _actionSize[0] = 6 + 6;\n    }\n    _actionSize[1] = Dy;\n    _actionSize[2] = Dx;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n\n    gameInit();\n    static std::once_flag initFlag;\n    std::call_once(initFlag, initHash);\n\n    findFeature();\n    findActions();\n    fillFullFeatures();\n  }\n\n  void gameInit() {\n    chess.clear();\n    chess.resize(2);\n    for (int i = 0; i < Dx; ++i)\n      for (int j = 0; j < Dy; ++j)\n        board[i][j] = Piece();\n\n    for (int i = 0; i < Dx; ++i) {\n      board[i][0] = Piece(White, PieceType(i + 1), false, Position(i, 0));\n      chess[White].push_back(board[i][0]);\n    }\n    board[0][1] = Piece(White, PieceType::Pawn, false, Position(0, 1));\n    chess[White].push_back(board[0][1]);\n\n    for (int i = 1; i <= Dx; ++i) {\n      int x = Dx - i;\n      board[x][4] = Piece(Black, PieceType(i), false, Position(x, 4));\n      chess[Black].push_back(board[x][4]);\n    }\n    board[4][3] = Piece(Black, PieceType::Pawn, false, Position(4, 3));\n    chess[Black].push_back(board[4][3]);\n\n    length = 0;\n    _hash = 0;\n    checkCount = {0, 0};\n    for (auto& v : repetitions) {\n      v.clear();\n    }\n    repetitions[_hash % repetitions.size()].push_back({_hash, 1, 0});\n    repeatCount = 0;\n  }\n\n  static void initHash() {\n    std::mt19937_64 rng(\n        std::random_device{}() ^\n        std::chrono::steady_clock::now().time_since_epoch().count());\n    rng.discard(1024);\n    for (int a = 0; a < 2; ++a)\n      for (int b = 0; b < 10; ++b)\n        for (int c = 0; c < 5; ++c)\n          for (int d = 0; d < 5; ++d) {\n            HashArray[a][b][c][d] = rng();\n          }\n    for (int a = 0; a < 20; ++a) {\n      HashArrayJail[a] = rng();\n    }\n\n    HashTurn = rng();\n  }\n\n  void findFeature() {\n    if (version == 2) {\n      std::fill(_features.begin(), _features.end(), 0);\n      for (auto& v : chess) {\n        for (const Piece& p : v) {\n          if (p.pos.on_board()) {\n            int x = p.pos.x;\n            int y = p.pos.y;\n            size_t offset = y * Dx + x;\n            size_t index = (int)p.type - 1;\n            if (p.color == Black) {\n              index += 6 + 4;\n            }\n            _features[offset + Dx * Dy * index] = 1.0f;\n            if (p.promoted) {\n              index = 6 + (int)p.type - 3;\n              if (p.color == Black) {\n                index += 6 + 4;\n              }\n              _features[offset + Dx * Dy * index] = 1.0f;\n            }\n          } else {\n            size_t index = (6 + 4) * 2 + (int)p.type - 1;\n            if (p.color == Black) {\n              index += 6;\n            }\n            size_t begin = Dx * Dy * index;\n            size_t end = Dx * Dy * (index + 1);\n            for (size_t i = begin; i != end; ++i) {\n              _features[i] += 1.0f;\n            }\n          }\n        }\n      }\n      if (repeatCount) {\n        size_t index = (6 + 4 + 6) * 2;\n        index += std::max(std::min(repeatCount, 3), 1) - 1;\n        size_t begin = Dx * Dy * index;\n        size_t end = Dx * Dy * (index + 1);\n        for (size_t i = begin; i != end; ++i) {\n          _features[i] += 1.0f;\n        }\n      }\n      return;\n    }\n\n    std::vector<float> old(_features);\n    for (int i = 0; i < 5425; ++i)\n      _features[i] = 0;\n    // 0 ~ 500\n    for (int i = 0; i < 25; ++i) {\n      Piece p = board[i % 5][i / 5];\n      if (p.color == White) {\n        switch (p.type) {\n        case PieceType::King:\n          _features[i] = 1;\n          break;\n\n        case PieceType::Gold:\n        case PieceType::Gold2:\n          _features[25 + i] = 1;\n          break;\n\n        case PieceType::Silver:\n        case PieceType::Silver2:\n          if (p.promoted)\n            _features[50 + i] = 1;\n          else\n            _features[75 + i] = 1;\n          break;\n\n        case PieceType::Bishop:\n        case PieceType::Bishop2:\n          if (p.promoted)\n            _features[100 + i] = 1;\n          else\n            _features[125 + i] = 1;\n          break;\n\n        case PieceType::Rook:\n        case PieceType::Rook2:\n          if (p.promoted)\n            _features[150 + i] = 1;\n          else\n            _features[175 + i] = 1;\n          break;\n\n        case PieceType::Pawn:\n        case PieceType::Pawn2:\n          if (p.promoted)\n            _features[200 + i] = 1;\n          else\n            _features[225 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      } else {\n        switch (p.type) {\n        case PieceType::King:\n          _features[250 + i] = 1;\n          break;\n\n        case PieceType::Gold:\n        case PieceType::Gold2:\n          _features[275 + i] = 1;\n          break;\n\n        case PieceType::Silver:\n        case PieceType::Silver2:\n          if (p.promoted)\n            _features[300 + i] = 1;\n          else\n            _features[325 + i] = 1;\n          break;\n\n        case PieceType::Bishop:\n        case PieceType::Bishop2:\n          if (p.promoted)\n            _features[350 + i] = 1;\n          else\n            _features[375 + i] = 1;\n          break;\n\n        case PieceType::Rook:\n        case PieceType::Rook2:\n          if (p.promoted)\n            _features[400 + i] = 1;\n          else\n            _features[425 + i] = 1;\n          break;\n\n        case PieceType::Pawn:\n        case PieceType::Pawn2:\n          if (p.promoted)\n            _features[450 + i] = 1;\n          else\n            _features[475 + i] = 1;\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n\n    //    // 500 ~ 575\n    //    switch (repeat) {\n    //    case 1:\n    //      std::fill(_features.begin() + 500, _features.begin() + 525, 1);\n    //      break;\n    //    case 5:\n    //      std::fill(_features.begin() + 525, _features.begin() + 550, 1);\n    //      break;\n    //    case 9:\n    //      std::fill(_features.begin() + 550, _features.begin() + 575, 1);\n    //      break;\n    //    default:\n    //      break;\n    //    }\n\n    // prison w 575 ~ 625\n    // prison b 625 ~ 675\n    int tmp = 575;\n    for (int i = 0; i < 2; ++i) {\n      std::vector<Piece>::iterator it;\n      for (it = chess[i].begin(); it != chess[i].end(); ++it) {\n        if (!(*it).pos.on_board()) {\n          switch ((*it).type) {\n          case PieceType::Gold:\n            std::fill(_features.begin() + tmp, _features.begin() + tmp + 5, 1);\n            break;\n          case PieceType::Silver:\n            std::fill(\n                _features.begin() + tmp + 5, _features.begin() + tmp + 10, 1);\n            break;\n          case PieceType::Bishop:\n            std::fill(\n                _features.begin() + tmp + 10, _features.begin() + tmp + 15, 1);\n            break;\n          case PieceType::Rook:\n            std::fill(\n                _features.begin() + tmp + 15, _features.begin() + tmp + 20, 1);\n            break;\n          case PieceType::Pawn:\n            std::fill(\n                _features.begin() + tmp + 20, _features.begin() + tmp + 25, 1);\n            break;\n          case PieceType::Gold2:\n            std::fill(\n                _features.begin() + tmp + 25, _features.begin() + tmp + 30, 1);\n            break;\n          case PieceType::Silver2:\n            std::fill(\n                _features.begin() + tmp + 30, _features.begin() + tmp + 35, 1);\n            break;\n          case PieceType::Bishop2:\n            std::fill(\n                _features.begin() + tmp + 35, _features.begin() + tmp + 40, 1);\n            break;\n          case PieceType::Rook2:\n            std::fill(\n                _features.begin() + tmp + 40, _features.begin() + tmp + 45, 1);\n            break;\n          case PieceType::Pawn2:\n            std::fill(\n                _features.begin() + tmp + 45, _features.begin() + tmp + 50, 1);\n            break;\n          default:\n            break;\n          }\n        }\n      }\n      tmp += 50;\n    }\n\n    // history 675 ~ 4725+675\n    std::copy(old.begin(), old.begin() + 4725, _features.begin() + 675);\n\n    // 5400 ~ 5425\n    std::fill(_features.begin() + 5400, _features.end(), (int)_status);\n  }\n\n  std::vector<Move> moves;\n\n  void findActions() {\n    moves.clear();\n\n    auto& list = chess[(int)_status];\n    for (size_t i = 0; i != list.size(); ++i) {\n      const Piece& p = list[i];\n      if (!p.pos.on_board()) {\n        bool duplicate = false;\n        for (size_t i2 = 0; i2 != i; ++i2) {\n          const Piece& p2 = list[i2];\n          if (p2.type == p.type && !p2.pos.on_board()) {\n            duplicate = true;\n            break;\n          }\n        }\n        if (duplicate) {\n          continue;\n        }\n      }\n      legalMoves(p, moves);\n    }\n\n    int i = 0;\n    clearActions();\n    for (auto m : moves) {\n      m.piece.promoted = m.promote;\n\n      if (version == 2) {\n        addAction((int)m.piece.type - 1 + (m.piece.pos.on_board() ? 0 : 6),\n                  m.next.y, m.next.x);\n      } else {\n\n        int x = m.next.x;\n        int y = m.next.y;\n        int z = type_to_z(m.piece);\n\n        addAction(z, x, y);\n      }\n      i++;\n    }\n  }\n\n  virtual void printCurrentBoard() const override {\n    std::cerr << stateDescription() << \"\\n\";\n  }\n\n  std::string print_chess(const int color) const {\n    std::string str;\n    if (color == White)\n      str += \"MiniWhite: \";\n    else\n      str += \"MiniBlack: \";\n    for (auto i : chess[color]) {\n      if (!i.pos.on_board()) {\n        str += '(';\n        str += i.print();\n        str += ')';\n      } else\n        str += i.print();\n      str += ' ';\n    }\n    str += '\\n';\n    return str;\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"   A| B| C| D| E\\n\";\n    for (int i = Dy - 1; i >= 0; --i) {\n      str += std::to_string(i + 1) + ' ';\n      for (int j = 0; j < Dx; ++j) {\n        if (j > 0) {\n          str += '|';\n        }\n        auto x = board[j][i].print();\n        if (x.size() == 1) {\n          str += ' ';\n        }\n        str += x;\n      }\n      str += '\\n';\n    }\n    str += print_chess(White);\n    str += print_chess(Black);\n\n    return str;\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    const Move& move = moves.at(action.GetIndex());\n\n    const Piece& p = move.piece;\n\n    bool disy = false;\n    bool disx = false;\n    if (p.pos.on_board()) {\n      for (const Move& m : moves) {\n        if ((m.piece.type == p.type || new_type(m.piece.type) == p.type) &&\n            m.piece.promoted == p.promoted && m.piece.pos.on_board() &&\n            m.piece.pos != p.pos && m.next == move.next) {\n          if (m.piece.pos.x == p.pos.x) {\n            disy = true;\n          } else {\n            disx = true;\n          }\n        }\n      }\n    }\n\n    std::string s = p.print();\n    for (auto& v : s) {\n      v = std::toupper(v);\n    }\n    if (s == \"P\") {\n      if (p.pos.on_board() && board[move.next.x][move.next.y].color == Empty) {\n        s = \"\";\n      }\n    }\n    if (disx) {\n      s += char('a' + p.pos.x);\n    }\n    if (disy) {\n      s += std::to_string(1 + p.pos.y);\n    }\n    if (board[move.next.x][move.next.y].color != Empty) {\n      s += 'x';\n    } else if (!p.pos.on_board()) {\n      s += '@';\n    }\n    s += 'a' + move.next.x;\n    s += std::to_string(1 + move.next.y);\n    if (move.promote) {\n      s += '+';\n    }\n    return s;\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForMinishogi>(*this);\n  }\n\n  int getHashNum(Piece p) {\n    int num = (int)p.type;\n    if (num >= 7)\n      num -= 5;\n    if (p.promoted)\n      num += 5;\n    //   7 8 9 10 11\n    // 1 2 3 4  5  6 | 7 8 9 10\n    num -= 1;\n    return num;\n  }\n\n  int getHashNumjail(Piece p) {\n    // 0~19\n    return (int)p.type - 2 + 10 * p.color;\n  }\n\n  void play(Move m) {\n\n    if (m.piece.pos.on_board()) {\n      _hash ^= HashArray[m.piece.color][getHashNum(m.piece)][m.piece.pos.x]\n                        [m.piece.pos.y];\n      m.piece.promoted |= m.promote;\n      // eat\n      if (board[m.next.x][m.next.y].color != Empty) {\n        int opp = opponent(m.piece.color);\n        _hash ^= HashArray[opp][getHashNum(board[m.next.x][m.next.y])][m.next.x]\n                          [m.next.y];\n\n        auto type = board[m.next.x][m.next.y].type;\n        if (version == 1) {\n          type = new_type(type);\n        }\n        Piece tmp(m.piece.color, type, false);\n        chess[m.piece.color].push_back(tmp);\n\n        _hash ^= HashArrayJail[getHashNumjail(tmp)];\n\n        bool found = false;\n        std::vector<Piece>::iterator it;\n        for (it = chess[opp].begin(); it != chess[opp].end(); ++it) {\n          if (it->pos == m.next) {\n            chess[opp].erase(it);\n            found = true;\n            break;\n          }\n        }\n        if (!found)\n          throw std::runtime_error(\"Could not find piece to erase\");\n      }\n\n      std::vector<Piece>::iterator it;\n      bool found = false;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).pos == m.piece.pos) {\n          (*it).pos = m.next;\n          // decide promoted\n          if (m.piece.promoted) {\n            (*it).promoted = true;\n          }\n          found = true;\n\n          board[m.next.x][m.next.y] = (*it);\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          break;\n        }\n      }\n      if (!found) {\n        throw std::runtime_error(\"could not find piece to move\");\n      }\n    } else {  // Drop move\n      _hash ^= HashArrayJail[getHashNumjail(m.piece)];\n      std::vector<Piece>::iterator it;\n      for (it = chess[m.piece.color].begin(); it != chess[m.piece.color].end();\n           ++it) {\n        if ((*it).type == m.piece.type && !it->pos.on_board()) {\n          (*it).pos = m.next;\n          board[m.next.x][m.next.y] = (*it);\n          break;\n        }\n      }\n    }\n    _hash ^= HashArray[m.piece.color][getHashNum(board[m.next.x][m.next.y])]\n                      [m.next.x][m.next.y];\n    _hash ^= HashTurn;\n\n    if (length < MaxPlayoutLength) {\n      // rollout[length] = m;\n      length++;\n    } else {\n      // set draw when the moves bigger than 1000\n      _status = GameStatus::tie;\n    }\n\n    for (auto i : chess[opponent(m.piece.color)]) {\n      if (i.type == PieceType::King) {\n        if (check(i.pos, m.piece.color)) {\n          ++checkCount.at(m.piece.color);\n        } else {\n          checkCount.at(m.piece.color) = 0;\n        }\n        break;\n      }\n    }\n\n    // find repeat\n    repeatCount = 0;\n    bool found = false;\n    size_t index = _hash % repetitions.size();\n    for (auto& v : repetitions[index]) {\n      if (v.hash == _hash) {\n        ++v.count;\n        repeatCount = v.count;\n        int repetitionLength = _moves.size() - v.lastStepIdx;\n        v.lastStepIdx = _moves.size();\n        if (v.count >= 4) {\n          // sennichite\n          if (checkCount[Black] * 2 >= repetitionLength) {\n            _status = GameStatus::player0Win;\n          } else {\n            _status = GameStatus::player1Win;\n          }\n        }\n        found = true;\n        break;\n      }\n    }\n    if (!found) {\n      repetitions[index].push_back({_hash, 1, (int)_moves.size()});\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    play(moves.at(action.GetIndex()));\n    if (_status == GameStatus::player0Turn ||\n        _status == GameStatus::player1Turn) {\n      _status = _status == GameStatus::player0Turn ? GameStatus::player1Turn\n                                                   : GameStatus::player0Turn;\n      findActions();\n      if (moves.empty()) {\n        _status = _status == GameStatus::player1Turn ? GameStatus::player0Win\n                                                     : GameStatus::player1Win;\n      }\n    }\n    if (_status == GameStatus::player0Turn ||\n        _status == GameStatus::player1Turn) {\n      findFeature();\n      fillFullFeatures();\n    } else {\n      clearActions();\n    }\n  }\n\n  virtual void DoGoodAction() override {\n    return DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/mnkgame.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n\n#pragma once\n\n#include <algorithm>\n#include <bitset>\n#include <cassert>\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <optional>\n#include <set>\n#include <sstream>\n#include <string>\n#include <tuple>\n\n#include \"../core/state.h\"\n#include \"commons/chessboard.h\"\n#include \"commons/player.h\"\n\nnamespace MNKGame {\n\nclass ChessKind {\n public:\n  static constexpr Chess empty = 0;\n  static constexpr Chess black = 1;\n  static constexpr Chess white = 2;\n};\n\nstruct Move {\n  Chess chess;\n  int x, y;\n};\n\ntemplate <int M, int N, int K> class State : public core::State {\n  static_assert(M > 0 && N > 0 && K > 0, \"m, n, and k must be greater then 0\");\n  static_assert(K <= M || K <= N, \"k must be less than or equal to m or n\");\n\n public:\n  using Board = Chessboard<M, N>;\n\n  State(int seed);\n  void Initialize() override;\n  std::unique_ptr<core::State> clone_() const override;\n  void ApplyAction(const ::_Action& action) override;\n  void DoGoodAction() override;\n  void printCurrentBoard() const override;\n  std::string stateDescription() const override;\n  std::string actionDescription(const ::_Action& action) const override;\n  std::string actionsDescription() const override;\n  int parseAction(const std::string& str) const override;\n  int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction) override;\n\n private:\n  template <typename RE> static void setupBoard(const RE& re);\n  static constexpr Player chessToPlayer(Chess chess);\n  static constexpr Chess playerToChess(Player player);\n\n  virtual void play(const Move& move);\n  std::optional<Player> findWinner(const Move& move);\n  bool isConnected(const Move& move);\n  virtual void findLegalActions();\n  inline Player getCurrentPlayer();\n  inline void turnPlayer();\n  inline void setTerminatedStatus(Player winner);\n  void fillFeatures();\n\n  static constexpr int chessKinds = 2;\n  static constexpr int connections = K;\n  static constexpr int maxLegalActionsCnt = Board::squares;\n  static constexpr std::tuple<int, int> directions[4] = {\n      {0, 1}, {1, -1}, {1, 0}, {1, 1}};\n  static inline std::once_flag setupCalled;\n\n  Board board;\n  std::bitset<Board::squares> areEmpty;\n};\n\ntemplate <int M, int N, int K>\nState<M, N, K>::State(int seed)\n    : core::State(seed) {\n  std::call_once(setupCalled, [&] { setupBoard(_rng); });\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::Initialize() {\n  _moves.clear();\n  _featSize = {chessKinds, Board::rows, Board::columns};\n  _features.resize(chessKinds * Board::squares);\n  _actionSize = {1, Board::rows, Board::columns};\n  _legalActions.reserve(maxLegalActionsCnt);\n  _status = GameStatus::player0Turn;\n\n  board.initialize();\n  areEmpty.set();\n  _hash = board.getHash();\n  findLegalActions();\n  fillFeatures();\n}\n\ntemplate <int M, int N, int K>\nstd::unique_ptr<core::State> State<M, N, K>::clone_() const {\n  return std::make_unique<State>(*this);\n}\n\ntemplate <int M, int N, int K>\nvoid State<M, N, K>::ApplyAction(const ::_Action& action) {\n  Move move{};\n  move.chess = playerToChess(getCurrentPlayer());\n  move.x = action.GetY();\n  move.y = action.GetZ();\n  play(move);\n  board.turnHash();\n  _hash = board.getHash();\n  if (auto hasWinner = findWinner(move); !hasWinner) {\n    turnPlayer();\n    findLegalActions();\n    fillFeatures();\n  } else {\n    setTerminatedStatus(hasWinner.value());\n  }\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::DoGoodAction() {\n  DoRandomAction();\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::printCurrentBoard() const {\n  std::cout << board.sprint(\"  \");\n}\n\ntemplate <int M, int N, int K>\nstd::string State<M, N, K>::stateDescription() const {\n  return board.sprint(\"  \");\n}\n\ntemplate <int M, int N, int K>\nstd::string State<M, N, K>::actionDescription(const ::_Action& action) const {\n  std::ostringstream oss;\n  oss << \"put a chess at \" << board.getPosStr(action.GetY(), action.GetZ());\n  return oss.str();\n}\n\ntemplate <int M, int N, int K>\nstd::string State<M, N, K>::actionsDescription() const {\n  std::set<std::tuple<int, int>> markedPos;\n  for (auto& legalAction : _legalActions)\n    markedPos.insert({legalAction.GetY(), legalAction.GetZ()});\n  return board.sprintBoard(\"  \", markedPos);\n}\n\ntemplate <int M, int N, int K>\nint State<M, N, K>::parseAction(const std::string& str) const {\n  auto result = board.parsePosStr(str);\n  if (!result)\n    return -1;\n  auto [x, y] = result.value();\n  int i = 0;\n  for (auto& legalAction : _legalActions) {\n    if (legalAction.GetY() == x && legalAction.GetZ() == y)\n      return i;\n    i++;\n  }\n  return -1;\n}\n\ntemplate <int M, int N, int K>\nint State<M, N, K>::humanInputAction(\n    std::function<std::optional<int>(std::string)> specialAction) {\n  std::cout << \"Current board:\" << std::endl << stateDescription() << std::endl;\n  std::cout << \"Allowed positions: ('\" << Board::getMarkSymbol()\n            << \"' means an allowed position)\" << std::endl\n            << actionsDescription() << std::endl;\n  std::cout << \"Input a position to play: (uses format <alphabet of x-axis>\"\n            << \"<numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\" << std::endl;\n  std::string str;\n  int index = -1;\n  while (index < 0) {\n    std::cout << \"> \";\n    std::getline(std::cin, str);\n    index = parseAction(str);\n    if (index < 0) {\n      if (auto r = specialAction(str); r)\n        return *r;\n      std::cout << \"invalid input, try again.\" << std::endl;\n    }\n  }\n  return index;\n}\n\ntemplate <int M, int N, int K>\ntemplate <typename RE>\nvoid State<M, N, K>::setupBoard(const RE& re) {\n  Board::setup({\"Empty\", \"Black\", \"White\"}, {\" \", \"●\", \"○\"}, re);\n}\n\ntemplate <int M, int N, int K>\nconstexpr Player State<M, N, K>::chessToPlayer(Chess chess) {\n  if (chess == ChessKind::black)\n    return Player::first;\n  else if (chess == ChessKind::white)\n    return Player::second;\n  assert(chess == ChessKind::black || chess == ChessKind::white);\n  return Player::none;\n}\n\ntemplate <int M, int N, int K>\nconstexpr Chess State<M, N, K>::playerToChess(Player player) {\n  if (player == Player::first)\n    return ChessKind::black;\n  else if (player == Player::second)\n    return ChessKind::white;\n  assert(player == Player::first || player == Player::second);\n  return ChessKind::empty;\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::play(const Move& move) {\n  board.setChess(move.x, move.y, move.chess);\n  areEmpty.reset(Board::posTo1D(move.x, move.y));\n}\n\ntemplate <int M, int N, int K>\nstd::optional<Player> State<M, N, K>::findWinner(const Move& move) {\n  if (isConnected(move))\n    return chessToPlayer(move.chess);\n  else if (areEmpty.none())\n    return Player::none;\n  return std::nullopt;\n}\n\ntemplate <int M, int N, int K>\nbool State<M, N, K>::isConnected(const Move& move) {\n  if (connections == 1)\n    return true;\n  auto areChessesEnough = [&](int& count, int dx, int dy) {\n    int x = move.x + dx, y = move.y + dy;\n    while (Board::isPosInBoard(x, y)) {\n      if (board.getChess(x, y) == move.chess) {\n        if (++count == connections)\n          return true;\n      } else {\n        return false;\n      }\n      x += dx, y += dy;\n    }\n    return false;\n  };\n  for (auto [dx, dy] : directions) {\n    int count = 1;\n    if (areChessesEnough(count, dx, dy) || areChessesEnough(count, -dx, -dy))\n      return true;\n  }\n  return false;\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::findLegalActions() {\n  clearActions();\n  for (int xy = 0; xy < Board::squares; xy++) {\n    if (areEmpty[xy]) {\n      auto [x, y] = Board::posTo2D(xy);\n      addAction(0, x, y);\n      assert(_legalActions.size() <= maxLegalActionsCnt);\n    }\n  }\n}\n\ntemplate <int M, int N, int K> Player State<M, N, K>::getCurrentPlayer() {\n  if (_status == GameStatus::player0Turn)\n    return Player::first;\n  else if (_status == GameStatus::player1Turn)\n    return Player::second;\n  assert(_status == GameStatus::player0Turn ||\n         _status == GameStatus::player1Turn);\n  return Player::none;\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::turnPlayer() {\n  if (_status == GameStatus::player0Turn)\n    _status = GameStatus::player1Turn;\n  else if (_status == GameStatus::player1Turn)\n    _status = GameStatus::player0Turn;\n  assert(_status == GameStatus::player0Turn ||\n         _status == GameStatus::player1Turn);\n}\n\ntemplate <int M, int N, int K>\nvoid State<M, N, K>::setTerminatedStatus(Player winner) {\n  if (winner == Player::first)\n    _status = GameStatus::player0Win;\n  else if (winner == Player::second)\n    _status = GameStatus::player1Win;\n  else\n    _status = GameStatus::tie;\n}\n\ntemplate <int M, int N, int K> void State<M, N, K>::fillFeatures() {\n  std::fill(_features.begin(), _features.end(), 0.0);\n  auto* f = _features.data();\n  for (int c = 0; c < chessKinds; c++) {\n    Chess chess = static_cast<Chess>(c + 1);\n    for (int xy = 0; xy < Board::squares; xy++, f++)\n      if (board.getChess(xy) == chess)\n        *f = 1.0;\n  }\n  fillFullFeatures();\n}\n\n}  // namespace MNKGame\n"
  },
  {
    "path": "src/games/nogo_action.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"nogo_action.h\"\n\nNoGoAction::NoGoAction(PLAYER player, Position position) {\n  Set(player, position);\n}\n\nvoid NoGoAction::Set(PLAYER player, Position position) {\n  SetPlayer(player);\n  SetPosition(position);\n}\n\nvoid NoGoAction::SetPosition(Position position) {\n  position_ = position;\n}\n\nPosition NoGoAction::GetPosition() const {\n  return position_;\n}\n\nbool NoGoAction::operator==(const NoGoAction& rhs) {\n  return ((GetPlayer() == rhs.GetPlayer()) && (position_ == rhs.position_));\n}\n\nbool NoGoAction::operator!=(const NoGoAction& rhs) {\n  return !(*this == rhs);\n}\n\nint NoGoAction::x() const {\n  return position_ / kNOGO_BOARD_SIZE;\n}\n\nint NoGoAction::y() const {\n  return position_ % kNOGO_BOARD_SIZE;\n}\n\nstd::string NoGoAction::ToString() {\n  return ToGTPString(true);\n}\n\nstd::string NoGoAction::ToGTPString(bool with_color) const {\n  std::ostringstream oss;\n  if (with_color) {\n    if (GetPlayer() == PLAYER_0) {\n      oss << \"B(\";\n    } else {\n      oss << \"W(\";\n    }\n  }\n  oss << (char)(y() + 'A' + (y() >= 8))\n      << (char)(kNOGO_BOARD_SIZE - x() - 1 + '1');\n  if (with_color) {\n    oss << \")\";\n  }\n\n  return oss.str();\n}\n\nstd::string NoGoAction::ToSgfString(bool with_color) const {\n  std::ostringstream oss;\n  if (with_color) {\n    if (GetPlayer() == PLAYER_0) {\n      oss << \"B[\";\n    } else {\n      oss << \"W[\";\n    }\n  }\n  if (GetPlayer() == PLAYER_NULL || GetPosition() == kNOGO_GRIDS_NUM) {\n    oss << \"tt\";\n  } else {\n    oss << (char)(y() + 'a') << (char)(x() + 'a');\n  }\n  if (with_color) {\n    oss << \"]\";\n  }\n  return oss.str();\n}\n\nvoid NoGoAction::Rotate(SYMMETRYTYPE type) {\n  Point point(position_);\n  point.ToSymmetryOf(type);\n  position_ = point.GetPosition();\n}\n"
  },
  {
    "path": "src/games/nogo_action.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"game_action.h\"\n#include \"game_player.h\"\n#include \"nogo_position.h\"\n#include <sstream>\n#include <string>\n#include <vector>\n\nclass NoGoAction : public GameAction {\n private:\n  PLAYER player_;\n  Position position_;\n\n public:\n  NoGoAction(PLAYER player = PLAYER_NULL, Position position = kNOGO_GRIDS_NUM);\n  PLAYER GetPlayer() const {\n    return player_;\n  }\n  Position GetPosition() const;\n  void SetPlayer(PLAYER player) {\n    player_ = player;\n  }\n  bool IsIllegalAction() const {\n    return player_ == PLAYER_NULL;\n  }\n  void Set(PLAYER player, Position position);\n  void SetPosition(Position position);\n  bool operator==(const NoGoAction& rhs);\n  bool operator!=(const NoGoAction& rhs);\n  int x() const;\n  int y() const;\n  std::string ToString();\n  std::string ToGTPString(bool with_color = false) const;\n  std::string ToSgfString(bool with_color = false) const;\n  void Rotate(SYMMETRYTYPE type);\n  int GetID() {\n    return position_;\n  }\n  void SetID(int id) {\n    position_ = id;\n  }\n};\n#include \"nogo_action.cc\"\n"
  },
  {
    "path": "src/games/nogo_bitboard.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_NOGO_NOGO_BITBOARD_H_\n#define CZF_NOGO_NOGO_BITBOARD_H_\n\n#include \"nogo_position.h\"\nclass NoGoBitBoard {\n  static const long long kMASK_55 = 0x5555555555555555ULL;\n  static const long long kMASK_33 = 0x3333333333333333ULL;\n  static const long long kMASK_0F = 0x0f0f0f0f0f0f0f0fULL;\n  static const long long kMASK_01 = 0x0101010101010101ULL;\n  static const long long kMASK_00FF = 0x00ff00ff00ff00ffULL;\n  static const long long kMASK_0000FFFF = 0x0000ffff0000ffffULL;\n  static const long long kMASK_00000000FFFFFFFF = 0x00000000ffffffffULL;\n  static const long long kMASK_FFFFFFFFFFFFFFFF = 0xffffffffffffffffULL;\n  long long bitboard_[(kNOGO_GRIDS_NUM / 64) + 1];\n\n public:\n  NoGoBitBoard() {\n    Reset();\n  }\n  void Reset() {\n    bitboard_[0] = 0;\n    bitboard_[1] = 0;\n  }\n\n  NoGoBitBoard& operator=(const NoGoBitBoard& rhs) {\n    bitboard_[0] = rhs.bitboard_[0];\n    bitboard_[1] = rhs.bitboard_[1];\n    return *this;\n  }\n\n  int Count() const {\n    unsigned long long v, v1;\n    v = (bitboard_[0] & kMASK_55) + ((bitboard_[0] >> 1) & kMASK_55);\n    v1 = (bitboard_[1] & kMASK_55) + ((bitboard_[1] >> 1) & kMASK_55);\n    v = (v & kMASK_33) + ((v >> 2) & kMASK_33);\n    v1 = (v1 & kMASK_33) + ((v1 >> 2) & kMASK_33);\n    v += v1;\n    v = (v & kMASK_0F) + ((v >> 4) & kMASK_0F);\n    v = (v & kMASK_00FF) + ((v >> 8) & kMASK_00FF);\n    v = (v & kMASK_0000FFFF) + ((v >> 16) & kMASK_0000FFFF);\n    return (int)((v & kMASK_00000000FFFFFFFF) + (v >> 32));\n  }\n\n  bool GetPosition(int i) const {\n    return (bitboard_[i >> 6] & (1LL << (i & 63))) != 0;\n  }\n\n  void DeletePosition(int i) {\n    bitboard_[i >> 6] &= ~(1LL << (i & 63));\n  }\n\n  void AddPosition(int i) {\n    bitboard_[i >> 6] |= (1LL << (i & 63));\n  }\n\n  void operator|=(NoGoBitBoard rhs) {\n    bitboard_[0] |= rhs.bitboard_[0];\n    bitboard_[1] |= rhs.bitboard_[1];\n    return;\n  }\n\n  bool Isempty() const {\n    return (bitboard_[0] == 0) && (bitboard_[1] == 0);\n  }\n\n  bool CheckIsOne() const {\n    if (bitboard_[0] == 0) {\n      return (bitboard_[1] != 0) &&\n             ((bitboard_[1] ^ ((-bitboard_[1]) & (bitboard_[1]))) == 0);\n    } else if (bitboard_[1] == 0) {\n      return (bitboard_[0] ^ ((-bitboard_[0]) & (bitboard_[0]))) == 0;\n    }\n    return false;\n  }\n};\n\n#endif  // CZF_NOGO_NOGO_BITBOARD_H_\n"
  },
  {
    "path": "src/games/nogo_game.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"nogo_game.h\"\n\nNoGoGame::NoGoGame() {\n  Reset();\n}\nNoGoGame::~NoGoGame() {\n  ;\n}\n\nbool NoGoGame::PlayAction(Action action) {\n  if (is_terminal_)\n    return false;\n  if (!state_.IsLegalAction(action)) {\n    is_terminal_ = true;\n    win_player_ = !turn_player_;\n    return false;\n  }\n  history_.push_back(action);\n  turn_player_ = !action.GetPlayer();\n  state_.PlayAction(action);\n  return true;\n}\n\nNoGoGame& NoGoGame::operator=(const NoGoGame& rhs) {\n  history_ = rhs.history_;\n  turn_player_ = rhs.turn_player_;\n  state_ = rhs.state_;\n  is_terminal_ = rhs.is_terminal_;\n  win_player_ = rhs.win_player_;\n  return *this;\n}\n\nNoGoState NoGoGame::GetNoGoState() {\n  return state_;\n}\n\nstd::vector<NoGoGame::Action> NoGoGame::GetLegalActions() {\n  std::vector<Action> legal_actions;\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (state_.IsLegalAction(turn_player_, i)) {\n      legal_actions.push_back(Action(turn_player_, i));\n    }\n  }\n  return legal_actions;\n}\n\nstd::vector<bool> NoGoGame::GetIsLegalAction() {\n  std::vector<bool> is_legal(kNOGO_GRIDS_NUM, false);\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (IsLegalAction(Action(turn_player_, i))) {\n      is_legal[i] = true;\n    }\n  }\n  return is_legal;\n}\n\nbool NoGoGame::IsTerminalState() {\n  if (is_terminal_)\n    return true;\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (state_.IsLegalAction(turn_player_, i))\n      return false;\n  }\n  win_player_ = !turn_player_;\n  is_terminal_ = true;\n  return true;\n}\n\nNoGoBitBoard NoGoGame::GetIllegalBitBoard() {\n  NoGoBitBoard bitBoard;\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (!state_.IsLegalAction(turn_player_, i)) {\n      bitBoard.AddPosition(i);\n    }\n  }\n  return bitBoard;\n}\n\nbool NoGoGame::IsLegalAction(Action action) {\n  return state_.IsLegalAction(action);\n}\n\nbool NoGoGame::IsLegalAction(PLAYER player, Position position) {\n  return state_.IsLegalAction(player, position);\n}\n\nbool NoGoGame::IsLegalAction(Position position) {\n  return state_.IsLegalAction(turn_player_, position);\n}\n\nPLAYER NoGoGame::GetPositionPlayer(Position position) {\n  return state_.GetPlayer(position);\n}\n\nvoid NoGoGame::ShowState() {\n  if (GetTurnPlayer() == PLAYER_0)\n    std::cerr << \"PLAYER 0\\n\";\n  if (GetTurnPlayer() == PLAYER_1)\n    std::cerr << \"PLAYER 1\\n\";\n  state_.ShowBoard();\n}\n\nstd::string NoGoGame::ShowBoard() {\n  return state_.ToString();\n}\n\nstd::string NoGoGame::GetGtpResultString() {\n  if (!IsTerminalState()) {\n    return \"0\";\n  }\n  if (GetTurnPlayer() == PLAYER_0) {\n    return \"W+R\";\n  } else {\n    return \"B+R\";\n  }\n}\n\nstd::string NoGoGame::ToSgfFilePrefix(std::string player0,\n                                      std::string player1,\n                                      std::string event_name = \"\") {\n  std::ostringstream oss;\n  oss << \"(;FF[4]CA[UTF-8]SZ[\" << kNOGO_BOARD_SIZE << \"]\"\n      << \"KM[0]\"\n      << \"EV[\" << event_name << \"]\"\n      << \"PB[\" << player0 << \"]\"\n      << \"PW[\" << player1 << \"]\"\n      << \"RE[\" << GetGtpResultString() << \"]\";\n  return oss.str();\n}\n\nstd::string NoGoGame::ToMoveString(bool with_semicolon = false) {\n  std::ostringstream oss;\n  for (size_t i = 0; i < history_.size(); i++) {\n    if (with_semicolon)\n      oss << \";\";\n    oss << history_[i].ToSgfString(with_semicolon);\n  }\n  return oss.str();\n}\n\nstd::string NoGoGame::ToMoveString(bool with_semicolon,\n                                   bool with_comments,\n                                   std::vector<std::string>& comments) {\n  std::ostringstream oss;\n  for (size_t i = 0; i < history_.size(); i++) {\n    if (with_semicolon)\n      oss << \";\";\n    oss << history_[i].ToSgfString(with_semicolon);\n    if (with_comments)\n      oss << \"C[\" << comments[i] << \"]\";\n  }\n  return oss.str();\n}\n\nstd::string NoGoGame::ToSgfFileString(std::string player0,\n                                      std::string player1,\n                                      std::string event_name,\n                                      bool with_semicolon = false) {\n  std::ostringstream oss;\n  oss << ToSgfFilePrefix(player0, player1, event_name)\n      << ToMoveString(with_semicolon) << \")\";\n  return oss.str();\n}\n\nstd::string NoGoGame::ToSgfFileString(std::string player0,\n                                      std::string player1,\n                                      std::string event_name,\n                                      bool with_semicolon,\n                                      bool with_comments,\n                                      std::vector<std::string>& comments) {\n  std::ostringstream oss;\n  oss << ToSgfFilePrefix(player0, player1, event_name)\n      << ToMoveString(with_semicolon, with_comments, comments) << \")\";\n  return oss.str();\n}\n"
  },
  {
    "path": "src/games/nogo_game.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"game_base.h\"\n#include \"nogo_action.h\"\n#include \"nogo_state.h\"\n#include <fstream>\n#include <iostream>\n\nclass NoGoGame : public GameBase<NoGoState, NoGoAction> {\n public:\n  NoGoGame();\n  ~NoGoGame();\n\n  NoGoGame& operator=(const NoGoGame& rhs);\n  bool IsTerminalState() override;\n  bool IsLegalAction(Action action) override;\n  std::vector<Action> GetLegalActions() override;\n\n  std::vector<bool> GetIsLegalAction() override;\n  bool PlayAction(Action action) override;\n\n  NoGoBitBoard GetIllegalBitBoard();\n  NoGoState GetNoGoState();\n  PLAYER GetPositionPlayer(Position position);\n  bool IsLegalAction(PLAYER player, Position position);\n  bool IsLegalAction(Position position);\n\n  void ShowState();\n  std::string ShowBoard();\n  std::string GetGtpResultString();\n  std::string ToSgfFilePrefix(std::string player0,\n                              std::string player1,\n                              std::string sEventName);\n  std::string ToMoveString(bool with_semicolon);\n  std::string ToMoveString(bool with_semicolon,\n                           bool with_comments,\n                           std::vector<std::string>& comments);\n  std::string ToSgfFileString(std::string player0,\n                              std::string player1,\n                              std::string sEventName,\n                              bool with_semicolon);\n  std::string ToSgfFileString(std::string player0,\n                              std::string player1,\n                              std::string sEventName,\n                              bool with_semicolon,\n                              bool with_comments,\n                              std::vector<std::string>& comments);\n};\n#include \"nogo_game.cc\"\n"
  },
  {
    "path": "src/games/nogo_position.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#ifndef CZF_NOGO_NOGO_POSITION_H_\n#define CZF_NOGO_NOGO_POSITION_H_\n\n#include <cassert>\n#include <string>\n#include <vector>\n\nconst int kNOGO_BOARD_SIZE = 9;\nconst int kNOGO_GRIDS_NUM = kNOGO_BOARD_SIZE * kNOGO_BOARD_SIZE;\ntypedef int Position;\n\n// just for represent X and Y and do some symmetric\nenum SYMMETRYTYPE {\n  SYM_NORMAL,\n  SYM_ROTATE_90,\n  SYM_ROTATE_180,\n  SYM_ROTATE_270,\n  SYM_HORIZONTAL_REFLECTION,\n  SYM_HORIZONTAL_REFLECTION_ROTATE_90,\n  SYM_HORIZONTAL_REFLECTION_ROTATE_180,\n  SYM_HORIZONTAL_REFLECTION_ROTATE_270,\n  SYMMETRY_SIZE\n};\n\nnamespace symmetry {\nconst std::vector<SYMMETRYTYPE> kSYMMETRY_LIST{\n    SYM_NORMAL,\n    SYM_ROTATE_90,\n    SYM_ROTATE_180,\n    SYM_ROTATE_270,\n    SYM_HORIZONTAL_REFLECTION,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_90,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_180,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_270};\n\nconst std::string kSYMMETRY_TYPE_STRING[SYMMETRY_SIZE] = {\n    \"SYM_NORMAL\",\n    \"SYM_ROTATE_90\",\n    \"SYM_ROTATE_180\",\n    \"SYM_ROTATE_270\",\n    \"SYM_HORIZONTAL_REFLECTION\",\n    \"SYM_HORIZONTAL_REFLECTION_ROTATE_90\",\n    \"SYM_HORIZONTAL_REFLECTION_ROTATE_180\",\n    \"SYM_HORIZONTAL_REFLECTION_ROTATE_270\"};\n\ninline std::string GetSymmetryTypeString(SYMMETRYTYPE type) {\n  return kSYMMETRY_TYPE_STRING[type];\n}\n\ninline SYMMETRYTYPE GetSymmetryType(std::string sType) {\n  for (int i = 0; i < SYMMETRY_SIZE; i++) {\n    if (sType == kSYMMETRY_TYPE_STRING[i]) {\n      return static_cast<SYMMETRYTYPE>(i);\n    }\n  }\n  return SYMMETRY_SIZE;\n}\n\nconst SYMMETRYTYPE kREVERSE_SYMMETRIC_TYPE[SYMMETRY_SIZE] = {\n    SYM_NORMAL,\n    SYM_ROTATE_270,\n    SYM_ROTATE_180,\n    SYM_ROTATE_90,\n    SYM_HORIZONTAL_REFLECTION,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_90,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_180,\n    SYM_HORIZONTAL_REFLECTION_ROTATE_270};\n}  // namespace symmetry\n\nclass Point {\n public:\n  int x_;\n  int y_;\n  Point() {\n    ;\n  }\n  Point(int x, int y) {\n    x_ = x;\n    y_ = y;\n  }\n  Point(const Position p) {\n    x_ = p % kNOGO_BOARD_SIZE;\n    y_ = p / kNOGO_BOARD_SIZE;\n  }\n  inline bool operator==(const Point& rhs) const {\n    return (x_ == rhs.x_ && y_ == rhs.y_) ? true : false;\n  }\n  inline bool operator!=(const Point& rhs) const {\n    return !(*this == rhs);\n  }\n  Point& operator=(const Point& rhs) {\n    x_ = rhs.x_;\n    y_ = rhs.y_;\n    return *this;\n  }\n  Point& operator=(const Position& rhs) {\n    *this = Point(rhs);\n    return *this;\n  }\n  inline Position GetPosition() {\n    return y_ * kNOGO_BOARD_SIZE + x_;\n  }\n  inline void ToSymmetryOf(SYMMETRYTYPE type) {\n    /*\n    symmetric radius pattern:           ( ChangeXY   x*(-1)\n    y*(-1) ) 0 NORMAL                 : 1\n    ROTATE_90 : ChangeXY             y*(-1) 2 ROTATE_180 :            x*(-1)\n    y*(-1) 3 ROTATE_270 : ChangeXY   x*(-1) 4 HORIZONTAL_REFLECTION\n    : x*(-1) 5 HORIZONTAL_REFLECTION_ROTATE_90\t: ChangeXY 6\n    HORIZONTAL_REFLECTION_ROTATE_180\t:                     y*(-1) 7\n    HORIZONTAL_REFLECTION_ROTATE_270\t: ChangeXY   x*(-1)   y*(-1)\n    */\n    Shift();\n    switch (type) {\n    case SYM_NORMAL:\n      break;\n    case SYM_ROTATE_90:\n      ChangeXY();\n      MinusY();\n      break;\n    case SYM_ROTATE_180:\n      MinusX();\n      MinusY();\n      break;\n    case SYM_ROTATE_270:\n      ChangeXY();\n      MinusX();\n      break;\n    case SYM_HORIZONTAL_REFLECTION:\n      MinusX();\n      break;\n    case SYM_HORIZONTAL_REFLECTION_ROTATE_90:\n      ChangeXY();\n      break;\n    case SYM_HORIZONTAL_REFLECTION_ROTATE_180:\n      MinusY();\n      break;\n    case SYM_HORIZONTAL_REFLECTION_ROTATE_270:\n      ChangeXY();\n      MinusX();\n      MinusY();\n      break;\n    default:\n      // should not be here\n      assert(false);\n    }\n    ShiftBack();\n    return;\n  }\n\n private:\n  inline void MinusX() {\n    x_ = -x_;\n  }\n  inline void MinusY() {\n    y_ = -y_;\n  }\n  inline void ChangeXY() {\n    int tmp = x_;\n    x_ = y_;\n    y_ = tmp;\n  }\n  inline void Shift() {\n    int center = kNOGO_BOARD_SIZE / 2;\n    x_ -= center;\n    y_ -= center;\n  }\n  inline void ShiftBack() {\n    int center = kNOGO_BOARD_SIZE / 2;\n    x_ += center;\n    y_ += center;\n  }\n};\n\n#endif  // CZF_NOGO_NOGO_POSITION_H_\n"
  },
  {
    "path": "src/games/nogo_state.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"nogo_state.h\"\n#include <iostream>\n\nNoGoState::NoGoState() {\n  InitNeighborList();\n  Reset();\n}\n\nvoid NoGoState::Reset() {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    parent_[i] = i;\n    liberty_[i].Reset();\n  }\n\n  bm_board_[PLAYER_0].Reset();\n  illegal_[PLAYER_0].Reset();\n  warning_[PLAYER_0].Reset();\n\n  bm_board_[PLAYER_1].Reset();\n  illegal_[PLAYER_1].Reset();\n  warning_[PLAYER_1].Reset();\n  liberty_is_one_.Reset();\n}\n\nbool NoGoState::PlayAction(NoGoAction action) {\n  PLAYER player = action.GetPlayer();\n  Position position = action.GetPosition();\n  if (!IsLegalAction(player, position)) {\n    return false;\n  }\n  illegal_[PLAYER_0].AddPosition(position);\n  illegal_[PLAYER_1].AddPosition(position);\n  bm_board_[player].AddPosition(position);\n\n  Position parent_position = position;\n  NoGoBitBoard parent_new_liberty;\n\n  for (size_t i = 0; i < neighbor_list_[position].size(); i++) {\n    Position neighbor = neighbor_list_[position][i];\n\n    if (bm_board_[player].GetPosition(neighbor)) {\n      Position parent_of_neighbor = FindParent(neighbor);\n      parent_new_liberty |= liberty_[parent_of_neighbor];\n      if (parent_position < parent_of_neighbor)  // unite\n      {\n        parent_[parent_of_neighbor] = parent_position;\n      } else {\n        parent_[parent_position] = parent_of_neighbor;\n        parent_position = parent_of_neighbor;\n      }\n\n    } else if (bm_board_[!player].GetPosition(neighbor)) {\n      Position parent_of_neighbor = FindParent(neighbor);\n      liberty_[parent_of_neighbor].DeletePosition(position);\n      if (liberty_[parent_of_neighbor].CheckIsOne()) {\n        liberty_is_one_.AddPosition(parent_of_neighbor);\n        illegal_[player] |= liberty_[parent_of_neighbor];\n        warning_[!player] |= liberty_[parent_of_neighbor];\n      }\n    } else {\n      warning_[!player].AddPosition(neighbor);\n      parent_new_liberty.AddPosition(neighbor);\n    }\n  }\n  parent_new_liberty.DeletePosition(position);\n  liberty_[parent_position] = parent_new_liberty;\n\n  if (parent_new_liberty.CheckIsOne()) {\n    liberty_is_one_.AddPosition(parent_position);\n    illegal_[!player] |= parent_new_liberty;\n    warning_[player] |= parent_new_liberty;\n  } else {\n    liberty_is_one_.DeletePosition(parent_position);\n  }\n  return true;\n}\n\nNoGoState& NoGoState::operator=(const NoGoState& rhs) {\n  for (int i = 0; i < 2; i++)  // two players\n  {\n    bm_board_[i] = rhs.bm_board_[i];\n    illegal_[i] = rhs.illegal_[i];\n    warning_[i] = rhs.warning_[i];\n  }\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    parent_[i] = rhs.parent_[i];\n    liberty_[i] = rhs.liberty_[i];\n  }\n  liberty_is_one_ = rhs.liberty_is_one_;\n  return *this;\n}\n\nvoid NoGoState::Rotate(SYMMETRYTYPE type) {\n  std::vector<NoGoAction> action_list;\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    PLAYER player = GetPlayer(i);\n    if (player == PLAYER_NULL)\n      continue;\n    Point point((Position)i);\n    point.ToSymmetryOf(type);\n    action_list.push_back(NoGoAction(player, point.GetPosition()));\n  }\n  Reset();\n  for (size_t i = 0; i < action_list.size(); i++) {\n    PlayAction(action_list[i]);\n  }\n}\n\nPLAYER NoGoState::GetPlayer(Position position) const {\n  if (bm_board_[PLAYER_0].GetPosition(position))\n    return PLAYER_0;\n  if (bm_board_[PLAYER_1].GetPosition(position))\n    return PLAYER_1;\n  return PLAYER_NULL;\n}\n\nbool NoGoState::IsLegalAction(PLAYER player, Position position) {\n  if (player == PLAYER_NULL)\n    return false;\n  if (illegal_[player].GetPosition(position))\n    return false;\n  else if (!warning_[player].GetPosition(position))\n    return true;\n  warning_[player].DeletePosition(position);\n\n  // start check warning (is the action a suicide action)\n  for (size_t i = 0; i < neighbor_list_[position].size(); i++) {\n    Position neighbor = neighbor_list_[position][i];\n    if (bm_board_[player].GetPosition(neighbor)) {\n      if (!liberty_is_one_.GetPosition(FindParent(neighbor)))\n        return true;\n    } else if (!bm_board_[!player].GetPosition(neighbor))  // have liberty\n    {\n      return true;\n    }\n  }\n  illegal_[player].AddPosition(position);\n  return false;\n}\n\nbool NoGoState::IsLegalAction(NoGoAction action) {\n  return IsLegalAction(action.GetPlayer(), action.GetPosition());\n}\n\nvoid NoGoState::ShowBoard() const {\n  std::cerr << ToString() << std::endl;\n}\n\nvoid NoGoState::ShowLegalMove(PLAYER turn_player) {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    PLAYER player = GetPlayer(i);\n    if (player == PLAYER_0)\n      std::cerr << '@';\n    if (player == PLAYER_1)\n      std::cerr << 'O';\n    if (player == PLAYER_NULL) {\n      if (IsLegalAction(turn_player, i)) {\n        std::cerr << \"#\";\n      } else {\n        std::cerr << \".\";\n      }\n    }\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      std::cerr << '\\n';\n  }\n}\n\nstd::string NoGoState::ToString() const {\n  std::ostringstream oss;\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    PLAYER player = GetPlayer(i);\n    if (player == PLAYER_0)\n      oss << '@';\n    if (player == PLAYER_1)\n      oss << 'O';\n    if (player == PLAYER_NULL)\n      oss << '.';\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      oss << '\\n';\n  }\n  return oss.str();\n}\n\nvoid NoGoState::PrintNeighborNum() const {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    std::cerr << neighbor_list_[i].size() << ' ';\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      std::cerr << '\\n';\n  }\n}\n\nvoid NoGoState::PrintLiberty() {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (!bm_board_[0].GetPosition(i) && !bm_board_[1].GetPosition(i)) {\n      std::cerr << \".\\t\";\n    } else if (FindParent(i) == i) {\n      std::cerr << liberty_[i].Count() << '\\t';\n    } else {\n      std::cerr << \"0\\t\";\n    }\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      std::cerr << '\\n';\n  }\n}\n\nvoid NoGoState::PrintLibertyIsOne(bool check_again) {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (!bm_board_[0].GetPosition(i) && !bm_board_[1].GetPosition(i)) {\n      std::cerr << \".\\t\";\n    } else if (FindParent(i) == i) {\n      if (check_again) {\n        std::cerr << liberty_[i].CheckIsOne() << '\\t';\n        if (liberty_[i].CheckIsOne())\n          liberty_is_one_.AddPosition(i);\n        else\n          liberty_is_one_.DeletePosition(i);\n      } else {\n        std::cerr << liberty_is_one_.GetPosition(i) << '\\t';\n      }\n    } else {\n      std::cerr << \"0\\t\";\n    }\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      std::cerr << '\\n';\n  }\n}\n\nvoid NoGoState::PrintParent() {\n  for (int i = 0; i < kNOGO_GRIDS_NUM; i++) {\n    if (!bm_board_[0].GetPosition(i) && !bm_board_[1].GetPosition(i)) {\n      std::cerr << \".\\t\";\n    } else {\n      std::cerr << FindParent(i) << '\\t';\n    }\n    if (i % kNOGO_BOARD_SIZE == kNOGO_BOARD_SIZE - 1)\n      std::cerr << '\\n';\n  }\n}\n\nPosition NoGoState::FindParent(Position position) {\n  Position& parent_position = parent_[position];\n  if (parent_position == parent_[parent_position])\n    return parent_position;\n  return parent_position = FindParent(parent_position);\n}\n\nvoid NoGoState::InitNeighborList() {\n  for (int i = 0; i < kNOGO_BOARD_SIZE; i++) {\n    for (int j = 0; j < kNOGO_BOARD_SIZE; j++) {\n      int position = i * kNOGO_BOARD_SIZE + j;\n      if (i > 0)\n        neighbor_list_[position].push_back(position - kNOGO_BOARD_SIZE);\n      if (j > 0)\n        neighbor_list_[position].push_back(position - 1);\n      if (i < kNOGO_BOARD_SIZE - 1)\n        neighbor_list_[position].push_back(position + kNOGO_BOARD_SIZE);\n      if (j < kNOGO_BOARD_SIZE - 1)\n        neighbor_list_[position].push_back(position + 1);\n    }\n  }\n}\n"
  },
  {
    "path": "src/games/nogo_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"game_state.h\"\n#include \"nogo_action.h\"\n#include \"nogo_bitboard.h\"\n#include <vector>\n\nclass NoGoState : public GameState {\n private:\n  NoGoBitBoard bm_board_[2];\n  std::vector<Position> neighbor_list_[kNOGO_GRIDS_NUM];\n\n  Position parent_[kNOGO_GRIDS_NUM];\n  NoGoBitBoard liberty_[kNOGO_GRIDS_NUM];\n  NoGoBitBoard illegal_[2];\n  NoGoBitBoard warning_[2];  // might be suicide\n  NoGoBitBoard liberty_is_one_;\n\n public:\n  NoGoState();\n  void Reset();\n  bool PlayAction(NoGoAction action);  // return success or not\n  NoGoState& operator=(const NoGoState& rhs);\n  void Rotate(SYMMETRYTYPE type);\n\n  PLAYER GetPlayer(Position position) const;\n  bool IsLegalAction(NoGoAction action);\n  bool IsLegalAction(PLAYER player, Position position);\n  void ShowBoard() const;\n  void ShowLegalMove(PLAYER turn_player);\n  std::string ToString() const;\n  void PrintNeighborNum() const;\n  void PrintLiberty();\n  void PrintLibertyIsOne(bool check_again = false);\n  void PrintParent();\n\n private:\n  Position FindParent(Position p);\n  void InitNeighborList();\n};\n#include \"nogo_state.cc\"\n"
  },
  {
    "path": "src/games/nogo_zestate.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n#include \"game.h\"\n\ntypedef unsigned short Coord;\n\n#include \"nogo_game.h\"\n#include \"nogo_state.h\"\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n//#include \"base/common.h\"\n\n//#include \"breakthrough.h\"\n\nclass StateForNogo : public State {\n public:\n  StateForNogo()\n      : State() {\n    Initialize();\n    _history = 0;\n    _outFeatures = false;\n  }\n\n  virtual ~StateForNogo() {\n  }\n\n  virtual void Initialize() override {\n    // People implementing classes should not have much to do in _moves; just\n    // _moves.clear().\n    _moves.clear();\n\n    _featSize[0] = 3;\n    _featSize[1] = 9;\n    _featSize[2] = 9;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 1;\n    _actionSize[1] = 9;\n    _actionSize[2] = 9;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n    _nogoGame.Reset();\n    _status = GameStatus::player0Turn;\n    // std::cout << \"restart!\" << std::endl;\n    // _features is a vector representing the current state. It can\n    // (must...) be large for complex games; here just one number\n    // between 0 and 1. trivial case in dimension 1.\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    for (int i = 0; i < _features.size(); i++)\n      _features[i] = 0.;\n    clearActions();\n    for (int i = 0; i < 9; i++)\n      for (int j = 0; j < 9; j++) {\n        addAction(0, i, j);\n      }\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<tree_search::State> clone() const override {\n    auto newState = std::make_unique<StateForNogo>();\n    *newState = *this;\n    return newState;\n  }\n\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n    assert(_status != GameStatus::player0Win);\n    assert(_status != GameStatus::player1Win);\n    NoGoAction nogoAction(_nogoGame.GetTurnPlayer(), action.GetHash());\n    if (_nogoGame.GetTurnPlayer() == PLAYER_0) {\n      _features[action.GetHash()] = 1.;\n      for (int i = 0; i < 81; i++)\n        _features[81 * 2 + i] = 1.;\n    } else {\n      _features[9 * 9 + action.GetHash()] = 1.;\n      for (int i = 0; i < 81; i++)\n        _features[81 * 2 + i] = 0.;\n    }\n    if (!_nogoGame.IsLegalAction(nogoAction)) {\n      // if (true) {\n      std::cerr << \" before move\" << std::endl;\n      _nogoGame.ShowState();\n      std::cerr << \" the proposed action \" << nogoAction.ToString()\n                << \" is legal ? \" << _nogoGame.IsLegalAction(nogoAction)\n                << std::endl;\n      _nogoGame.PlayAction(nogoAction);\n      std::cerr << \" after move\" << std::endl;\n      _nogoGame.ShowState(); /*assert(false);*/\n    } else {\n      _nogoGame.PlayAction(nogoAction);\n    }\n    // let us remove the nogoAction from legal actions\n    clearActions();\n    auto legal_actions = _nogoGame.GetLegalActions();\n    int index = 0;\n    for (const auto& action : legal_actions) {\n      addAction(0, action.GetPosition() % 9, action.GetPosition() / 9);\n    }\n    //   _nogoGame.ShowState();\n    // std::cerr << \" number of legal actions : \" << _legalActions.size() <<\n    // std::endl;\n    // first channel: black stones.\n    // second channel: white stones.\n    // third channel: 0 if player  black to play, 1 otherwise.\n    // assert(false);\n    if (_legalActions.size() == 0) {\n      // if (_nogoGame.IsTerminalState()) {\n      assert(_nogoGame.IsTerminalState());\n      if (_status == GameStatus::player0Turn)\n        _status = GameStatus::player0Win;\n      else\n        _status = GameStatus::player1Win;\n      //     _nogoGame.ShowState();\n\n      assert((_status == GameStatus::player0Win) ==\n             (_nogoGame.GetWinPlayer() == PLAYER_0));\n      assert((_status == GameStatus::player1Win) ==\n             (_nogoGame.GetWinPlayer() == PLAYER_1));\n    } else {\n      if (_status == GameStatus::player0Turn)\n        _status = GameStatus::player1Turn;\n      else\n        _status = GameStatus::player0Turn;\n    }\n    assert(_status == GameStatus::player1Win ||\n           _status == GameStatus::player0Win || _legalActions.size() > 0);\n    //      std::cerr << \" play 0 wins:\" << (_status == GameStatus::player0Win)\n    //      << std::endl;\n    //     std::cerr << \" play 1 wins:\" << (_status == GameStatus::player1Win)\n    //     << std::endl;\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play. Ok, this is not\n  // really a good action.\n  // By the way we need a good default DoGoodAction, e.g. one-ply at least.\n  // FIXME\n  virtual void DoGoodAction() override {\n\n    int i = rand() % _legalActions.size();\n    _Action a = *(_legalActions[i].get());\n    ApplyAction(a);\n  }\n\n private:\n  NoGoGame _nogoGame;\n};\n"
  },
  {
    "path": "src/games/othello.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: 林鈺錦 (Yù-Jǐn Lín)\n// - Github: https://github.com/abc1236762\n// - Email:  abc1236762@outlook.com\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#pragma once\n\n#include <algorithm>\n#include <bitset>\n#include <cassert>\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <optional>\n#include <set>\n#include <sstream>\n#include <string>\n#include <tuple>\n\n#include \"../core/state.h\"\n#include \"commons/chessboard.h\"\n#include \"commons/player.h\"\n\nnamespace Othello {\n\nclass ChessKind {\n public:\n  static constexpr Chess empty = 0;\n  static constexpr Chess black = 1;\n  static constexpr Chess white = 2;\n};\n\nstruct Move {\n  Chess chess;\n  int x, y;\n};\n\ntemplate <int BR> class State : public core::State {\n  static_assert(BR >= 4 && BR % 2 == 0,\n                \"radix of board must be greater than or equal to 4 and even\");\n\n public:\n  using Board = Chessboard<BR, BR, false>;\n\n  State(int seed);\n  void Initialize() override;\n  std::unique_ptr<core::State> clone_() const override;\n  void ApplyAction(const ::_Action& action) override;\n  void DoGoodAction() override;\n  void printCurrentBoard() const override;\n  std::string stateDescription() const override;\n  std::string actionDescription(const ::_Action& action) const override;\n  std::string actionsDescription() const override;\n  int parseAction(const std::string& str) const override;\n  int humanInputAction(\n      std::function<std::optional<int>(std::string)> specialAction) override;\n\n private:\n  template <typename R> static void setupBoard(const R& re);\n  static constexpr Player chessToPlayer(Chess chess);\n  static constexpr Chess playerToChess(Player player);\n\n  void setInitialChesses();\n  void play(const Move& move);\n  bool canGoNext(Player nextPlayer, bool isPassMove);\n  Player findWinner();\n  void findLegalActions(Player player);\n  int countReverseChesses(const Move& move, int dx, int dy);\n  bool canDoReverse(const Move& move);\n  void doReverse(const Move& move);\n  inline Player getCurrentPlayer();\n  inline Player turnPlayer();\n  inline void setTerminatedStatus(Player winner);\n  void fillFeatures();\n\n  static constexpr int players = 2;\n  static constexpr int chessKinds = 2;\n  static constexpr int maxLegalActionsCnt = Board::squares - 4;\n  static constexpr std::tuple<int, int> directions[8] = {\n      {-1, -1}, {-1, 0}, {-1, 1}, {0, -1}, {0, 1}, {1, -1}, {1, 0}, {1, 1}};\n  static constexpr std::tuple<int, int> initialChessesPos[players][chessKinds] =\n      {{{Board::rows / 2 - 1, Board::columns / 2},\n        {Board::rows / 2, Board::columns / 2 - 1}},\n       {{Board::rows / 2 - 1, Board::columns / 2 - 1},\n        {Board::rows / 2, Board::columns / 2}}};\n  static inline std::once_flag setupCalled;\n\n  Board board;\n  std::bitset<Board::squares> areEmpty, candi;\n};\n\n// template <int BR> class Action : public ::_Action {\n// public:\n//  Action(int i, int x, int y, bool isPassMove);\n//};\n\ntemplate <int BR>\nState<BR>::State(int seed)\n    : core::State(seed) {\n  std::call_once(setupCalled, [&] { setupBoard(_rng); });\n}\n\ntemplate <int BR> void State<BR>::Initialize() {\n  _moves.clear();\n  _featSize = {chessKinds, Board::rows, Board::columns};\n  _features.resize(chessKinds * Board::squares);\n  _actionSize = {2, Board::rows, Board::columns};\n  _legalActions.reserve(maxLegalActionsCnt);\n  _status = GameStatus::player0Turn;\n\n  board.initialize();\n  areEmpty.set();\n  candi.reset();\n  setInitialChesses();\n  _hash = board.getHash();\n  findLegalActions(Player::first);\n  fillFeatures();\n}\n\ntemplate <int BR> std::unique_ptr<core::State> State<BR>::clone_() const {\n  return std::make_unique<State>(*this);\n}\n\ntemplate <int BR> void State<BR>::ApplyAction(const ::_Action& action) {\n  bool isPassMove = action.GetX();\n  if (!isPassMove) {\n    Move move{};\n    move.chess = playerToChess(getCurrentPlayer());\n    move.x = action.GetY();\n    move.y = action.GetZ();\n    play(move);\n  }\n  board.turnHash();\n  _hash = board.getHash();\n  Player nextPlayer = turnPlayer();\n  if (canGoNext(nextPlayer, isPassMove)) {\n    if (_legalActions.size() == 0) {\n      _legalActions.emplace_back(\n          _legalActions.size(), 1, Board::rows / 2, Board::columns / 2);\n    }\n    fillFeatures();\n  } else {\n    Player winner = findWinner();\n    setTerminatedStatus(winner);\n    _legalActions.clear();\n  }\n}\n\ntemplate <int BR> void State<BR>::DoGoodAction() {\n  DoRandomAction();\n}\n\ntemplate <int BR> void State<BR>::printCurrentBoard() const {\n  std::cout << board.sprint(\"  \");\n}\n\ntemplate <int BR> std::string State<BR>::stateDescription() const {\n  return board.sprint(\"  \");\n}\n\ntemplate <int BR>\nstd::string State<BR>::actionDescription(const ::_Action& action) const {\n  bool isPassMove = (bool)action.GetX();\n  if (isPassMove)\n    return \"passed\";\n  std::ostringstream oss;\n  oss << board.getPosStr(action.GetY(), action.GetZ());\n  return oss.str();\n}\n\ntemplate <int BR> std::string State<BR>::actionsDescription() const {\n  std::set<std::tuple<int, int>> markedPos;\n  if (_legalActions.size() >= 1 && _legalActions[0].GetX() == 0)\n    for (auto& legalAction : _legalActions)\n      markedPos.insert({legalAction.GetY(), legalAction.GetZ()});\n  return board.sprintBoard(\"  \", markedPos);\n}\n\ntemplate <int BR> int State<BR>::parseAction(const std::string& str) const {\n  if (_legalActions.size() == 1 && _legalActions[0].GetX() == 1)\n    return 0;\n  auto result = board.parsePosStr(str);\n  if (!result)\n    return -1;\n  auto [x, y] = result.value();\n  int i = 0;\n  for (auto& legalAction : _legalActions) {\n    if (legalAction.GetY() == x && legalAction.GetZ() == y)\n      return i;\n    i++;\n  }\n  return -1;\n}\n\ntemplate <int BR>\nint State<BR>::humanInputAction(\n    std::function<std::optional<int>(std::string)> specialAction) {\n  std::cout << \"Current board:\" << std::endl << stateDescription() << std::endl;\n  if (_legalActions.size() == 1 && _legalActions[0].GetX() == 1) {\n    std::cout << \"No positions to play.\" << std::endl;\n    std::cout << \"Input nothing to pass.\" << std::endl;\n  } else {\n    std::cout << \"Allowed positions: ('\" << Board::getMarkSymbol()\n              << \"' means an allowed position)\" << std::endl\n              << actionsDescription() << std::endl;\n    std::cout << \"Input a position to play: (uses format <alphabet of x-axis>\"\n              << \"<numbers of y-axis>, e.g. `A1`, `b2`, `C03`...)\" << std::endl;\n  }\n  std::string str;\n  int index = -1;\n  while (index < 0) {\n    std::cout << \"Input action: \";\n    std::getline(std::cin, str);\n    index = parseAction(str);\n    if (index < 0) {\n      if (auto r = specialAction(str); r)\n        return *r;\n      std::cout << \"invalid input, try again.\" << std::endl;\n    }\n  }\n  return index;\n}\n\ntemplate <int BR>\ntemplate <typename R>\nvoid State<BR>::setupBoard(const R& re) {\n  Board::setup({\"Empty\", \"Black\", \"White\"}, {\" \", \"●\", \"○\"}, re);\n}\n\ntemplate <int BR> constexpr Player State<BR>::chessToPlayer(Chess chess) {\n  if (chess == ChessKind::black)\n    return Player::first;\n  else if (chess == ChessKind::white)\n    return Player::second;\n  assert(chess == ChessKind::black || chess == ChessKind::white);\n  return Player::none;\n}\n\ntemplate <int BR> constexpr Chess State<BR>::playerToChess(Player player) {\n  if (player == Player::first)\n    return ChessKind::black;\n  else if (player == Player::second)\n    return ChessKind::white;\n  assert(player == Player::first || player == Player::second);\n  return ChessKind::empty;\n}\n\ntemplate <int BR> void State<BR>::setInitialChesses() {\n  for (int p = 0; p < players; p++) {\n    Chess chess = playerToChess(Player::set(p));\n    for (auto [x, y] : initialChessesPos[p]) {\n      board.setChess(x, y, chess);\n      areEmpty.reset(Board::posTo1D(x, y));\n    }\n  }\n  for (int y = Board::columns / 2 - 2; y < Board::columns / 2 + 2; y++)\n    for (int x = Board::rows / 2 - 2; x < Board::rows / 2 + 2; x++)\n      candi.set(Board::posTo1D(x, y));\n}\n\ntemplate <int BR> void State<BR>::play(const Move& move) {\n  board.setChess(move.x, move.y, move.chess);\n  areEmpty.reset(Board::posTo1D(move.x, move.y));\n  doReverse(move);\n}\n\ntemplate <int BR>\nbool State<BR>::canGoNext(Player nextPlayer, bool isPassMove) {\n  if (areEmpty.none())\n    return false;\n  findLegalActions(nextPlayer);\n  return _legalActions.size() != 0 || !isPassMove;\n}\n\ntemplate <int BR> Player State<BR>::findWinner() {\n  auto counts = board.countChesses();\n  if (counts[ChessKind::black] > counts[ChessKind::white])\n    return Player::first;\n  else if (counts[ChessKind::black] < counts[ChessKind::white])\n    return Player::second;\n  return Player::none;\n}\n\ntemplate <int BR> void State<BR>::findLegalActions(Player player) {\n  _legalActions.clear();\n  auto possibles = areEmpty & candi;\n  Chess chess = playerToChess(player);\n  int i = 0;\n  for (int xy = 0; xy < Board::squares; xy++) {\n    if (possibles[xy]) {\n      auto [x, y] = Board::posTo2D(xy);\n      Move legalMove = Move{chess, x, y};\n      if (canDoReverse(legalMove)) {\n        _legalActions.emplace_back(i++, 0, legalMove.x, legalMove.y);\n        assert(i <= maxLegalActionsCnt);\n      }\n    }\n  }\n}\n\ntemplate <int BR>\nint State<BR>::countReverseChesses(const Move& move, int dx, int dy) {\n  int x = move.x + dx, y = move.y + dy, chessCnt = 0;\n  while (Board::isPosInBoard(x, y)) {\n    if (Chess chess = board.getChess(x, y); chess == ChessKind::empty)\n      return 0;\n    else if (chess != move.chess)\n      chessCnt++;\n    else\n      return chessCnt;\n    x += dx, y += dy;\n  }\n  return 0;\n}\n\ntemplate <int BR> bool State<BR>::canDoReverse(const Move& move) {\n  for (auto [dx, dy] : directions)\n    if (countReverseChesses(move, dx, dy) > 0)\n      return true;\n  return false;\n}\n\ntemplate <int BR> void State<BR>::doReverse(const Move& move) {\n  for (auto [dx, dy] : directions) {\n    int x = move.x + dx, y = move.y + dy;\n    if (Board::isPosInBoard(x, y)) {\n      candi.set(Board::posTo1D(x, y));\n      int chessCnt = countReverseChesses(move, dx, dy);\n      for (int j = 0; j < chessCnt; j++) {\n        board.setChess(x, y, move.chess);\n        x += dx, y += dy;\n      }\n    }\n  }\n}\n\ntemplate <int BR> Player State<BR>::getCurrentPlayer() {\n  if (_status == GameStatus::player0Turn)\n    return Player::first;\n  else if (_status == GameStatus::player1Turn)\n    return Player::second;\n  assert(_status == GameStatus::player0Turn ||\n         _status == GameStatus::player1Turn);\n  return Player::none;\n}\n\ntemplate <int BR> Player State<BR>::turnPlayer() {\n  if (_status == GameStatus::player0Turn) {\n    _status = GameStatus::player1Turn;\n    return Player::second;\n  } else if (_status == GameStatus::player1Turn) {\n    _status = GameStatus::player0Turn;\n    return Player::first;\n  }\n  assert(_status == GameStatus::player0Turn ||\n         _status == GameStatus::player1Turn);\n  return Player::none;\n}\n\ntemplate <int BR> void State<BR>::setTerminatedStatus(Player winner) {\n  if (winner == Player::first)\n    _status = GameStatus::player0Win;\n  else if (winner == Player::second)\n    _status = GameStatus::player1Win;\n  else\n    _status = GameStatus::tie;\n}\n\ntemplate <int BR> void State<BR>::fillFeatures() {\n  std::fill(_features.begin(), _features.end(), 0.0);\n  auto* f = _features.data();\n  for (int c = 0; c < chessKinds; c++) {\n    Chess chess = static_cast<Chess>(c + 1);\n    for (int xy = 0; xy < Board::squares; xy++, f++)\n      if (board.getChess(xy) == chess)\n        *f = 1.0;\n  }\n  fillFullFeatures();\n}\n\n// template <int BR>\n// Action<BR>::Action(int i, int x, int y, bool isPassMove)\n//    : ::_Action() {\n//  _i = i;\n//  _loc = {isPassMove, x, y};\n//  _hash =\n//      isPassMove ? State<BR>::Board::squares : State<BR>::Board::rows * y + x;\n//}\n\n}  // namespace Othello\n"
  },
  {
    "path": "src/games/othello_opt.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <cstring>\n#include <iomanip>\n\n#include \"othello_opt.h\"\n\n//#define OTHELLO_DEBUG(arg)\n#define OTHELLO_DEBUG(arg) arg\n\nnamespace Othello2 {\n\nstatic constexpr size_t NUM_NEIGHBORS = 8;\nstatic constexpr std::array<int, 8> DROW = {-1, -1, -1, 0, 0, 1, 1, 1};\nstatic constexpr std::array<int, 8> DCOL = {-1, 0, 1, -1, 1, -1, 0, 1};\nstatic constexpr char BOARD_COORD_SMALL_LETTERS[] = \"abcdefghijkmnopqrstuvwxyz\";\nstatic constexpr char BOARD_COORD_CAPITAL_LETTERS[] =\n    \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\";\n\ntemplate <size_t SIZE> static constexpr bool isInBoard(int row, int col) {\n  return (row >= 0) && (col >= 0) && (row < static_cast<int>(SIZE)) &&\n         (col < static_cast<int>(SIZE));\n}  // isInBoard\n\ntemplate <typename Array, size_t SIZE>\nstatic typename Array::reference arrGet(Array& arr, size_t row, size_t col) {\n  return arr[row * SIZE + col];\n}  // arrGet\n\ntemplate <typename Array, size_t SIZE>\nstatic typename Array::const_reference arrGet(const Array& arr,\n                                              size_t row,\n                                              size_t col) {\n  return arr[row * SIZE + col];\n}  // arrGet\n\ntemplate <size_t SIZE>\nState<SIZE>::State(int seed)\n    : core::State(seed)\n    , _hasher(hashBook) {\n  std::call_once(hashBookConfigured, [this]() { hashBook.setup(_rng); });\n}  // State<SIZE>::State\n\ntemplate <size_t SIZE>\n/* virtual */ void State<SIZE>::Initialize() {\n  _status = GameStatus::player0Turn;\n\n  initializeBoard();\n  initializeHasher();\n  initializeCache();\n\n  _featSize = {NUM_PIECE_TYPES, SIZE, SIZE};\n  _features.resize(NUM_PIECE_TYPES * SIZE * SIZE, 0);\n  fillFeatures();\n  fillFullFeatures();\n\n  _actionSize = {NUM_PIECE_TYPES, SIZE, SIZE};\n  _legalActions.reserve(SIZE * SIZE - 4);\n  RefillLegalActions();\n\n  _hash = _hasher.hash();\n}  // State<SIZE>::Initialize\n\ntemplate <size_t SIZE>\n/* virtual */ std::unique_ptr<core::State> State<SIZE>::clone_() const {\n  return std::make_unique<State>(*this);\n}  // State<SIZE>::clone_\n\ntemplate <size_t SIZE>\n/* virtual */ void State<SIZE>::ApplyAction(const ::_Action& action) {\n  assert((_status == GameStatus::player0Turn) ||\n         (_status == GameStatus::player1Turn));\n  auto stone = stoneToPlay();\n  bool skipTurn = (action.GetX() != 0);\n  if (skipTurn) {\n    // assert(!CanPutStone(stone));\n    nextTurn();\n    stone = stoneToPlay();\n    if (!CanPutStone(stone)) {\n      _legalActions.clear();\n      setTerminalStatus();\n    } else {\n      RefillLegalActions();\n      fillFeatures();\n      fillFullFeatures();\n      _hash = _hasher.hash();\n    }\n    return;\n  }\n  int col = action.GetY();\n  int row = action.GetZ();\n  PutStone(stone, row, col);\n  if (boardFilled()) {\n    setTerminalStatus();\n    return;\n  }\n  nextTurn();\n  RefillLegalActions();\n  fillFeatures();\n  fillFullFeatures();\n  _hash = _hasher.hash();\n}  // State<SIZE>::ApplyAction\n\ntemplate <size_t SIZE>\n/* virtual */ void State<SIZE>::DoGoodAction() {\n  DoRandomAction();\n}\n\ntemplate <size_t SIZE>\n/* virtual */ void State<SIZE>::printCurrentBoard() const {\n  std::cout << boardToString() << std::endl;\n}  // State<SIZE>::printCurrentBoard\n\ntemplate <size_t SIZE> std::string State<SIZE>::boardToString() const {\n  static constexpr bool SHOW_BOARD_COORDS =\n      SIZE < sizeof(BOARD_COORD_CAPITAL_LETTERS);\n  // static constexpr bool SHOW_BOARD_COORDS = false;\n  std::ostringstream oss;\n  if (SHOW_BOARD_COORDS) {\n    oss << std::setfill(' ') << std::setw(SIZE / 10 + 1) << ' ' << \"  \";\n    oss << std::string(BOARD_COORD_CAPITAL_LETTERS, SIZE) << std::endl;\n    oss << std::setfill(' ') << std::setw(SIZE / 10 + 1) << ' ' << \"  \";\n    oss << std::string(SIZE, '-') << std::endl;\n  }\n  for (size_t row = 0; row < SIZE; ++row) {\n    if (SHOW_BOARD_COORDS) {\n      oss << std::setfill(' ') << std::setw(SIZE / 10 + 1) << SIZE - row\n          << \" |\";\n    }\n    for (size_t col = 0; col < SIZE; ++col) {\n      switch (arrGet<Board, SIZE>(_board, row, col)) {\n      case EMPTY:\n        oss << EMPTY_STR;\n        break;\n      case BLACK:\n        oss << BLACK_STR;\n        break;\n      case WHITE:\n        oss << WHITE_STR;\n        break;\n      default:\n        oss << '?';\n      }\n    }\n    if (SHOW_BOARD_COORDS) {\n      oss << \"| \" << std::setfill(' ') << std::setw(SIZE / 10 + 1)\n          << SIZE - row;\n    }\n    oss << std::endl;\n  }\n  if (SHOW_BOARD_COORDS) {\n    oss << std::setfill(' ') << std::setw(SIZE / 10 + 1) << ' ' << \"  \";\n    oss << std::string(SIZE, '-') << std::endl;\n    oss << std::setfill(' ') << std::setw(SIZE / 10 + 1) << ' ' << \"  \";\n    oss << std::string(BOARD_COORD_CAPITAL_LETTERS, SIZE) << std::endl;\n  }\n  return oss.str();\n}  // State<SIZE>::boardToString\n\ntemplate <size_t SIZE> bool State<SIZE>::CanPutStone(Field stone) const {\n  for (size_t row = 0; row < SIZE; ++row) {\n    for (size_t col = 0; col < SIZE; ++col) {\n      if (CanPutStone(stone, row, col)) {\n        return true;\n      }\n    }\n  }\n  return false;\n}  // State<SIZE>::CanPutStone\n\ntemplate <size_t SIZE>\nbool State<SIZE>::CanPutStone(Field stone, size_t row, size_t col) const {\n  Field field = arrGet<Board, SIZE>(_board, row, col);\n  if ((field != EMPTY) || !arrGet<Cache, SIZE>(_cache, row, col)) {\n    return false;\n  }\n  size_t count;\n  int dr, dc, r, c;\n  for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n    count = 0;\n    r = static_cast<int>(row);\n    c = static_cast<int>(col);\n    dr = DROW[i];\n    dc = DCOL[i];\n    while (isInBoard<SIZE>(r + dr, c + dc)) {\n      field = arrGet<Board, SIZE>(_board, r + dr, c + dc);\n      if (field == EMPTY) {\n        break;\n      } else if (field != stone) {\n        // opponent piece\n        ++count;\n      } else if (!count) {\n        // our piece straight after the location\n        break;\n      } else {\n        // our piece after a number of opponent pieces\n        return true;\n      }\n      r += dr;\n      c += dc;\n    }\n  }\n  return false;\n}  // State<SIZE>::CanPutStone\n\ntemplate <size_t SIZE>\nvoid State<SIZE>::PutStone(Field stone, size_t row, size_t col) {\n  assert(isInBoard<SIZE>(row, col));\n  // assert(CanPutStone(stone, row, col));\n  const size_t myHashOffset =\n      (stone == BLACK ? HASH_BLACK_OFFSET : HASH_WHITE_OFFSET);\n  const size_t theirHashOffset =\n      (stone == BLACK ? HASH_WHITE_OFFSET : HASH_BLACK_OFFSET);\n  Field field;\n  size_t count;\n  int dr, dc, r, c;\n  bool isInside;\n  arrGet<Board, SIZE>(_board, row, col) = stone;\n  _hasher.trigger(SIZE * row + col);\n  _hasher.trigger(myHashOffset + SIZE * row + col);\n  for (size_t i = 0; i < NUM_NEIGHBORS; ++i) {\n    count = 0;\n    r = static_cast<int>(row);\n    c = static_cast<int>(col);\n    dr = DROW[i];\n    dc = DCOL[i];\n    isInside = isInBoard<SIZE>(r + dr, c + dc);\n    if (isInside) {\n      arrGet<Cache, SIZE>(_cache, r + dr, c + dc) = 1;\n    }\n    while (isInside) {\n      field = arrGet<Board, SIZE>(_board, r + dr, c + dc);\n      if (field == EMPTY) {\n        break;\n      } else if (field != stone) {\n        // opponent piece\n        ++count;\n      } else if (!count) {\n        // our piece straight after the location\n        break;\n      } else {\n        // our piece after a number of opponent pieces\n        // move back and reverse stones\n        for (size_t j = 0; j < count; ++j) {\n          arrGet<Board, SIZE>(_board, r, c) = stone;\n          _hasher.trigger(theirHashOffset + SIZE * r + c);\n          _hasher.trigger(myHashOffset + SIZE * r + c);\n          r -= dr;\n          c -= dc;\n        }\n        break;\n      }\n      r += dr;\n      c += dc;\n      isInside = isInBoard<SIZE>(r + dr, c + dc);\n    }\n  }\n}  // State<SIZE>::PutStone\n\ntemplate <size_t SIZE> bool State<SIZE>::boardFilled() const {\n  for (size_t i = 0; i < SIZE * SIZE; ++i) {\n    if (_board[i] == EMPTY) {\n      return false;\n    }\n  }\n  return true;\n}  // boardFilled\n\ntemplate <size_t SIZE>\nconstexpr typename State<SIZE>::Field State<SIZE>::stoneToPlay() const {\n  switch (_status) {\n  case GameStatus::player0Turn:\n    return BLACK;\n  case GameStatus::player1Turn:\n    return WHITE;\n  default:\n    return EMPTY;\n  }\n}  // State<SIZE>::stoneByStatus\n\ntemplate <size_t SIZE> void State<SIZE>::nextTurn() {\n  _status = (_status == GameStatus::player0Turn) ? GameStatus::player1Turn\n                                                 : GameStatus::player0Turn;\n  _hasher.trigger(HASHBOOK_SIZE - 1);\n}  // State<SIZE>::nextTurn\n\ntemplate <size_t SIZE> void State<SIZE>::RefillLegalActions() {\n  assert((_status == GameStatus::player0Turn) ||\n         (_status == GameStatus::player1Turn));\n  _legalActions.clear();\n  Field stoneToPlay = (_status == GameStatus::player0Turn ? BLACK : WHITE);\n  for (size_t row = 0; row < SIZE; ++row) {\n    for (size_t col = 0; col < SIZE; ++col) {\n      if ((arrGet<Board, SIZE>(_board, row, col) == EMPTY) &&\n          CanPutStone(stoneToPlay, row, col)) {\n        // add action\n        _legalActions.emplace_back(_legalActions.size(), 0, col, row);\n      }\n    }\n  }\n  if (_legalActions.empty() && !boardFilled()) {\n    _legalActions.emplace_back(_legalActions.size(), 1, SIZE / 2, SIZE / 2);\n  }\n}  // State<SIZE>::RefillLegalAction\n\ntemplate <size_t SIZE> void State<SIZE>::fillFeatures() {\n  auto* featuresBlack = _features.data();\n  auto* featuresWhite = featuresBlack + SIZE * SIZE;\n  memset(featuresBlack, 0, NUM_PIECE_TYPES * SIZE * SIZE * sizeof(float));\n  for (size_t i = 0; i < SIZE * SIZE; ++i) {\n    switch (_board[i]) {\n    case BLACK:\n      featuresBlack[i] = 1.0;\n      break;\n    case WHITE:\n      featuresWhite[i] = 1.0;\n      break;\n    default:\n      break;\n    }\n  }\n}  // State<SIZE>::fillFeatures\n\ntemplate <size_t SIZE> void State<SIZE>::initializeBoard() {\n  memset(_board.data(), 0, SIZE * SIZE * sizeof(typename Board::value_type));\n  _board[WHITE_INIT_OFFSET_1] = WHITE;\n  _board[WHITE_INIT_OFFSET_2] = WHITE;\n  _board[BLACK_INIT_OFFSET_1] = BLACK;\n  _board[BLACK_INIT_OFFSET_2] = BLACK;\n}  // State<SIZE>::initializeBoard\n\ntemplate <size_t SIZE> void State<SIZE>::initializeHasher() {\n  _hasher.reset();\n  for (unsigned i = 0; i < _board.size(); ++i) {\n    _hasher.trigger(i);\n  }\n  // black stones\n  _hasher.trigger(BLACK_INIT_OFFSET_1);\n  _hasher.trigger(HASH_BLACK_OFFSET + BLACK_INIT_OFFSET_1);\n  _hasher.trigger(BLACK_INIT_OFFSET_2);\n  _hasher.trigger(HASH_BLACK_OFFSET + BLACK_INIT_OFFSET_2);\n  // white stones\n  _hasher.trigger(WHITE_INIT_OFFSET_1);\n  _hasher.trigger(HASH_WHITE_OFFSET + WHITE_INIT_OFFSET_1);\n  _hasher.trigger(WHITE_INIT_OFFSET_2);\n  _hasher.trigger(HASH_WHITE_OFFSET + WHITE_INIT_OFFSET_2);\n}  // State<SIZE>::initializeHasher\n\ntemplate <size_t SIZE> void State<SIZE>::initializeCache() {\n  memset(_cache.data(), 0, SIZE * SIZE * sizeof(typename Cache::value_type));\n  for (size_t row = SIZE / 2 - 2; row < SIZE / 2 + 2; ++row) {\n    for (size_t col = SIZE / 2 - 2; col < SIZE / 2 + 2; ++col) {\n      arrGet<Cache, SIZE>(_cache, row, col) = 1;\n    }\n  }\n}  // State<SIZE>::initializeCache\n\ntemplate <size_t SIZE> void State<SIZE>::setTerminalStatus() {\n  size_t nWhite = 0;\n  size_t nBlack = 0;\n  for (size_t i = 0; i < SIZE * SIZE; ++i) {\n    switch (_board[i]) {\n    case WHITE:\n      ++nWhite;\n      break;\n    case BLACK:\n      ++nBlack;\n      break;\n    default:\n      break;\n    }\n  }\n  if (nWhite > nBlack) {\n    _status = GameStatus::player1Win;\n  } else if (nBlack > nWhite) {\n    _status = GameStatus::player0Win;\n  } else {\n    _status = GameStatus::tie;\n  }\n}  // State<SIZE>::setTerminalStatus\n\ntemplate class State<4>;\ntemplate class State<6>;\ntemplate class State<8>;\ntemplate class State<10>;\ntemplate class State<12>;\ntemplate class State<14>;\ntemplate class State<16>;\n\n}  // namespace Othello2\n"
  },
  {
    "path": "src/games/othello_opt.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../core/state.h\"\n#include \"commons/hash.h\"\n#include <array>\n#include <mutex>\n\nnamespace Othello2 {\n\ntemplate <size_t SIZE> class State : public core::State {\n\n  static_assert(SIZE >= 4, \"Board too small\");\n  static_assert(SIZE % 2 == 0, \"Board has odd size\");\n  static constexpr size_t NUM_PIECE_TYPES = 2;\n  static constexpr size_t NUM_FIELD_TYPES = NUM_PIECE_TYPES + 1;\n  // using _Action = Action<SIZE>;\n  static constexpr size_t HASHBOOK_SIZE = SIZE * SIZE * NUM_FIELD_TYPES + 1;\n  static constexpr size_t HASH_BLACK_OFFSET = SIZE * SIZE;\n  static constexpr size_t HASH_WHITE_OFFSET = 2 * SIZE * SIZE;\n  static constexpr size_t WHITE_INIT_OFFSET_1 =\n      SIZE * (SIZE / 2 - 1) + SIZE / 2 - 1;\n  static constexpr size_t WHITE_INIT_OFFSET_2 = SIZE * SIZE / 2 + SIZE / 2;\n  static constexpr size_t BLACK_INIT_OFFSET_1 =\n      SIZE * (SIZE / 2 - 1) + SIZE / 2;\n  static constexpr size_t BLACK_INIT_OFFSET_2 = SIZE * SIZE / 2 + SIZE / 2 - 1;\n  using _HashBook = HashBook<uint64_t, HASHBOOK_SIZE>;\n  using _Hasher = Hasher<uint64_t, HASHBOOK_SIZE>;\n  using Cache = std::array<uint8_t, SIZE * SIZE>;\n\n public:\n  using Field = uint8_t;\n  using Board = std::array<Field, SIZE * SIZE>;\n\n  static constexpr Field EMPTY = 0;\n  static constexpr Field BLACK = 1;\n  static constexpr Field WHITE = 2;\n  static constexpr char EMPTY_STR[] = \".\";\n  static constexpr char BLACK_STR[] = \"x\";\n  static constexpr char WHITE_STR[] = \"o\";\n\n  State(int seed);\n  virtual void Initialize() override;\n  virtual std::unique_ptr<core::State> clone_() const override;\n  virtual void ApplyAction(const ::_Action& action) override;\n  virtual void DoGoodAction() override;\n  virtual void printCurrentBoard() const override;\n\n  const Board& GetBoard() const {\n    return _board;\n  }\n\n private:\n  std::string boardToString() const;\n  bool CanPutStone(Field stone) const;\n  bool CanPutStone(Field stone, size_t row, size_t col) const;\n  void PutStone(Field stone, size_t row, size_t col);\n  bool boardFilled() const;\n  constexpr Field stoneToPlay() const;\n  void nextTurn();\n\n  void RefillLegalActions();\n  void fillFeatures();\n  void initializeBoard();\n  void initializeHasher();\n  void initializeCache();\n  void setTerminalStatus();\n\n  static std::once_flag hashBookConfigured;\n  static _HashBook hashBook;\n  _Hasher _hasher;\n  Board _board;\n  Cache _cache;\n\n};  // class State\n\ntemplate <size_t SIZE> std::once_flag State<SIZE>::hashBookConfigured;\n\ntemplate <size_t SIZE> typename State<SIZE>::_HashBook State<SIZE>::hashBook;\n\n}  // namespace Othello2\n"
  },
  {
    "path": "src/games/outeropengomoku_new.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"../core/state.h\"\n\nclass StateForOOGomoku : public core::State {\n public:\n  StateForOOGomoku(int seed)\n      : State(seed) {\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n    _hash = 2166136261u;\n    _status = GameStatus::player0Turn;\n    _featSize[0] = 3;\n    _featSize[1] = boardHeight;\n    _featSize[2] = boardWidth;\n    _actionSize[0] = 1;\n    _actionSize[1] = boardWidth;\n    _actionSize[2] = boardHeight;\n    _features.clear();\n    _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n    std::fill(_features.begin(), _features.end(), 1.0f);\n    board.clear();\n    board.resize(boardWidth * boardHeight, 0);\n    FirstMove = 1;\n    featurize();\n    findActions();\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForOOGomoku>(*this);\n  }\n\n  virtual void printCurrentBoard() const override {\n    std::cout << \"printing board\" << std::endl << std::flush;\n    for (int r = boardHeight - 1; r >= 0; --r) {\n      std::cout << \"|\";\n      for (int c = 0; c < boardWidth; ++c) {\n        auto val = board[r * boardWidth + c];\n        if (val == 0) {\n          std::cout << \" \";\n        } else if (val == 1) {\n          std::cout << \"X\";\n        } else if (val == 2) {\n          std::cout << \"O\";\n        } else {\n          assert(false);\n        }\n        std::cout << \"|\";\n      }\n      std::cout << std::endl;\n    }\n  }\n\n  void featurize() {\n    int player = 1 + getCurrentPlayer();\n    int otherPlayer = player == 1 ? 2 : 1;\n    for (int i = 0; i != (int)board.size(); ++i) {\n      int v = board[i];\n      _features[i] = v == player;\n      _features[board.size() + i] = v == otherPlayer;\n    }\n  }\n\n  void findActions() {\n    clearActions();\n    if (FirstMove) {\n      FirstMove = 0;\n      for (int i = 0; i < boardWidth; ++i) {\n        for (int j = 0; j < boardHeight; ++j) {\n          if ((i == 0 || i == 1 || i == 13 || i == 14 || j == 0 || j == 1 ||\n               j == 13 || j == 14))\n            addAction(0, i, j);\n        }\n      }\n    } else {\n      for (int i = 0; i < boardWidth; ++i) {\n        for (int j = 0; j < boardHeight; ++j) {\n          auto pos = i + j * boardHeight;\n          if (board[pos] == 0)\n            addAction(0, i, j);\n        }\n      }\n    }\n  }\n\n  virtual void ApplyAction(const _Action& action) override {\n    int x = action.GetY();\n    int y = action.GetZ();\n    int player = 1 + getCurrentPlayer();\n    size_t index = x + y * boardWidth;\n    board.at(index) = player;\n    _hash ^= index;\n    _hash *= 16777619u;\n    auto count = [&](int dx, int dy) {\n      int nx = x + dx;\n      int ny = y + dy;\n      int r = 0;\n      int stride = dx + dy * boardWidth;\n      size_t nIndex = index + stride;\n      while (nx >= 0 && nx < boardWidth && ny >= 0 && ny < boardHeight &&\n             board.at(nIndex) == player) {\n        ++r;\n        nIndex += stride;\n        nx += dx;\n        ny += dy;\n      }\n      return r;\n    };\n    bool won = count(-1, 0) + count(1, 0) >= 4;\n    won |= count(0, -1) + count(0, 1) >= 4;\n    won |= count(-1, -1) + count(1, 1) >= 4;\n    won |= count(1, -1) + count(-1, 1) >= 4;\n    if (won) {\n      _status = player == 1 ? GameStatus::player0Win : GameStatus::player1Win;\n    } else {\n      featurize();\n      findActions();\n      if (_legalActions.empty()) {\n        _status = GameStatus::tie;\n      } else {\n        _status =\n            player == 1 ? GameStatus::player1Turn : GameStatus::player0Turn;\n      }\n    }\n    fillFullFeatures();\n  }\n\n  virtual void DoGoodAction() override {\n    return DoRandomAction();\n  }\n\n  static const int boardWidth = 15;\n  static const int boardHeight = 15;\n  bool FirstMove;\n  std::vector<char> board;\n};\n"
  },
  {
    "path": "src/games/shogi.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n#include <assert.h>\n#include <string>\n#include <vector>\n\nclass Shogi {\n public:\n  const static int Dx = 5;\n  const static int Dy = 5;\n  const static int White = 0;  // player0\n  const static int Black = 1;  // player1\n  const static int Empty = 2;\n\n  const static int MaxPlayoutLength = 1000;\n\n  enum class PieceType {\n    None = 0,\n    King,\n    Gold,\n    Silver,\n    Bishop,\n    Rook,\n    Pawn,\n    Gold2,\n    Silver2,\n    Bishop2,\n    Rook2,\n    Pawn2\n  };\n\n  class Position {\n   public:\n    int x, y;\n    Position() {\n      x = y = -1;\n    }\n\n    Position(int X, int Y) {\n      x = X;\n      y = Y;\n    }\n\n    bool on_board() const {\n      assert((x == -1 && y == -1) ||\n             (x >= -1 && y >= -1 && x <= Dx && y <= Dy));\n      return (x >= 0 && y >= 0 && x < Dx && y < Dy);\n    }\n\n    Position operator+(const Position& p) {\n      return Position(x + p.x, y + p.y);\n    }\n\n    bool operator==(const Position& p) const {\n      return (x == p.x && y == p.y);\n    }\n    bool operator!=(const Position& p) const {\n      return x != p.x || y != p.y;\n    }\n  };\n\n  class Piece {\n   public:\n    PieceType type;\n    int color;\n    bool promoted;\n    Position pos;\n\n    Piece() {\n      color = Empty;\n      type = PieceType::None;\n      promoted = false;\n    }\n\n    Piece(int c, PieceType t, bool p, Position P = Position(-1, -1)) {\n      color = c;\n      type = t;\n      promoted = p;\n      pos = P;\n    }\n\n    std::string print() const {\n      std::string str;\n      switch (type) {\n      case PieceType::None:\n        str += \"  \";\n        break;\n\n      case PieceType::King:\n        if (color == Black)\n          str += \"k\";\n        else\n          str += \"K\";\n        break;\n\n      case PieceType::Gold:\n      case PieceType::Gold2:\n        if (color == Black)\n          str += \"g\";\n        else\n          str += \"G\";\n        break;\n\n      case PieceType::Silver:\n      case PieceType::Silver2:\n        if (promoted)\n          str += \"+\";\n        if (color == Black)\n          str += \"s\";\n        else\n          str += \"S\";\n        break;\n\n      case PieceType::Bishop:\n      case PieceType::Bishop2:\n        if (promoted)\n          str += \"+\";\n        if (color == Black)\n          str += \"b\";\n        else\n          str += \"B\";\n        break;\n\n      case PieceType::Rook:\n      case PieceType::Rook2:\n        if (promoted)\n          str += \"+\";\n        if (color == Black)\n          str += \"r\";\n        else\n          str += \"R\";\n        break;\n\n      case PieceType::Pawn:\n      case PieceType::Pawn2:\n        if (promoted)\n          str += \"+\";\n        if (color == Black)\n          str += \"p\";\n        else\n          str += \"P\";\n        break;\n\n      default:\n        break;\n      }\n      return str;\n    }\n  };\n\n  class Move {\n   public:\n    Piece piece;\n    Position next;\n    bool promote;\n  };\n\n  Piece board[5][5];\n  std::vector<std::vector<Piece>> chess;  // 0 = White, 1 = Black\n  // Move rollout[1000];\n\n  void king_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    short dx[] = {1, 1, 0, -1, -1, -1, 0, 1};\n    short dy[] = {0, 1, 1, 1, 0, -1, -1, -1};\n    for (int i = 0; i < 8; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      // 在棋盤上、是對方的棋或空\n      if (m.next.on_board() && board[m.next.x][m.next.y].color != m.piece.color)\n        moves.push_back(m);\n    }\n  }\n\n  void gold_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    if (m.piece.color == White) {\n      short dx[] = {1, 1, 0, -1, -1, 0};\n      short dy[] = {0, 1, 1, 1, 0, -1};\n      for (int i = 0; i < 6; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color)\n          moves.push_back(m);\n      }\n    } else {\n      short dx[] = {1, 0, -1, -1, 0, 1};\n      short dy[] = {0, 1, 0, -1, -1, -1};\n      for (int i = 0; i < 6; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color)\n          moves.push_back(m);\n      }\n    }\n  }\n\n  void silver_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    if (m.piece.promoted) {\n      gold_moves(p, moves);\n      return;\n    }\n    if (m.piece.color == White) {\n      short dx[] = {1, 0, -1, -1, 1};\n      short dy[] = {1, 1, 1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          moves.push_back(m);\n          if (m.next.y == 4) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n      }\n    } else {\n      short dx[] = {1, -1, -1, 0, 1};\n      short dy[] = {1, 1, -1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          moves.push_back(m);\n          if (m.next.y == 0) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n      }\n    }\n  }\n\n  void bishop_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    short dx[] = {1, -1, -1, 1};\n    short dy[] = {1, 1, -1, -1};\n    for (int i = 0; i < 4; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      while (m.next.on_board() &&\n             board[m.next.x][m.next.y].color != m.piece.color) {\n        moves.push_back(m);\n        if (m.piece.color == White) {\n          if (m.next.y == 4 && !m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        } else {  // Black\n          if (m.next.y == 0 && !m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n        if (board[m.next.x][m.next.y].color != Empty)\n          break;\n        m.next = m.next + Position(dx[i], dy[i]);\n      }\n    }\n    if (m.piece.promoted) {\n      short dx[] = {1, 0, -1, 0};\n      short dy[] = {0, 1, 0, -1};\n      for (int i = 0; i < 4; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color)\n          moves.push_back(m);\n      }\n    }\n  }\n\n  void rook_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    short dx[] = {1, 0, -1, 0};\n    short dy[] = {0, 1, 0, -1};\n    for (int i = 0; i < 4; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      while (m.next.on_board() &&\n             board[m.next.x][m.next.y].color != m.piece.color) {\n        moves.push_back(m);\n        if (m.piece.color == White) {\n          if (m.next.y == 4 && !m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        } else {  // Black\n          if (m.next.y == 0 && !m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n        if (board[m.next.x][m.next.y].color != Empty)\n          break;\n        m.next = m.next + Position(dx[i], dy[i]);\n      }\n    }\n    if (m.piece.promoted) {\n      short dx[] = {1, -1, -1, 1};\n      short dy[] = {1, 1, -1, -1};\n      for (int i = 0; i < 4; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color)\n          moves.push_back(m);\n      }\n    }\n  }\n\n  void pawn_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    if (m.piece.promoted) {\n      gold_moves(p, moves);\n      return;\n    }\n    if (m.piece.color == White) {\n      m.next = m.piece.pos + Position(0, 1);\n      if (m.next.on_board() &&\n          board[m.next.x][m.next.y].color != m.piece.color) {\n        if (m.next.y != 4)\n          moves.push_back(m);\n        else {\n          m.promote = true;\n          moves.push_back(m);\n          m.promote = false;\n        }\n      }\n    } else {\n      m.next = m.piece.pos + Position(0, -1);\n      if (m.next.on_board() &&\n          board[m.next.x][m.next.y].color != m.piece.color) {\n        if (m.next.y != 0)\n          moves.push_back(m);\n        else {\n          m.promote = true;\n          moves.push_back(m);\n          m.promote = false;\n        }\n      }\n    }\n  }\n\n  void legal_king_moves(Piece p, std::vector<Move>& moves) {\n    // fprintf(stderr, \"legal king moves (%c, %d)\\n\", p.pos.x+'A', p.pos.y);\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    short dx[] = {1, 1, 0, -1, -1, -1, 0, 1};\n    short dy[] = {0, 1, 1, 1, 0, -1, -1, -1};\n    for (int i = 0; i < 8; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      if (m.next.on_board() &&\n          board[m.next.x][m.next.y].color != m.piece.color) {\n        Piece pp;\n        if (board[m.next.x][m.next.y].color != Empty) {\n          pp = board[m.next.x][m.next.y];\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.pos == pp.pos) {\n              i.pos = Position(-1, -1);\n              break;\n            }\n          }\n        }\n        board[m.piece.pos.x][m.piece.pos.y] = Piece();\n        board[m.next.x][m.next.y] = m.piece;\n\n        if (!check(m.next, opponent(m.piece.color))) {\n          moves.push_back(m);\n        }\n\n        board[m.next.x][m.next.y] = pp;\n        board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n        for (auto& i : chess[opponent(m.piece.color)]) {\n          if (i.type == pp.type && !i.pos.on_board()) {\n            i.pos = pp.pos;\n            break;\n          }\n        }\n      }\n    }\n\n    // fprintf(stderr, \"end legal king moves\\n\");\n  }\n\n  void legal_gold_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    if (m.piece.color == White) {\n      short dx[] = {1, 1, 0, -1, -1, 0};\n      short dy[] = {0, 1, 1, 1, 0, -1};\n      for (int i = 0; i < 6; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        // fprintf(stderr, \"g next: %c %d\\n\", m.next.x+'A', m.next.y);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          // std::string str;\n          // str += \"   A| B| C| D| E\\n\";\n          // for(int i=Dy-1; i>=0; --i) {\n          //     str += std::to_string(i) + ' ';\n          //     for(int j=0; j<Dx; ++j) {\n          //         if(j > 0) str += '|';\n          //         str += board[j][i].print();\n          //     }\n          //     str += '\\n';\n          // }\n          // std::cerr << str;\n\n          if (!check(king.pos, opponent(king.color)))\n            moves.push_back(m);\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    } else {\n      short dx[] = {1, 0, -1, -1, 0, 1};\n      short dy[] = {0, 1, 0, -1, -1, -1};\n      for (int i = 0; i < 6; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        // fprintf(stderr, \"g next: %c %d\\n\", m.next.x+'A', m.next.y);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color)))\n            moves.push_back(m);\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n  void legal_silver_moves(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    if (m.piece.promoted) {\n      legal_gold_moves(p, moves);\n      return;\n    }\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    if (m.piece.color == White) {\n      short dx[] = {1, 0, -1, -1, 1};\n      short dy[] = {1, 1, 1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color))) {\n            moves.push_back(m);\n            if ((m.next.y == 4 || m.piece.pos.y == 4) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          }\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    } else {\n      short dx[] = {1, -1, -1, 0, 1};\n      short dy[] = {1, 1, -1, -1, -1};\n      for (int i = 0; i < 5; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color))) {\n            moves.push_back(m);\n            if ((m.next.y == 0 || m.piece.pos.y == 0) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          }\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n  void legal_bishop_moves(Piece p, std::vector<Move>& moves) {\n    // fprintf(stderr, \"innnnnnnnnnnnnnBishop\\n\");\n    // for(int i=0; i<2; ++i) {\n    //     for(auto j : chess[i]) {\n    //         fprintf(stderr, \"(%c,%d) \", j.pos.x+'A', j.pos.y);\n    //     }\n    //     std::cerr << std::endl;\n    // }\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    short dx[] = {1, -1, -1, 1};\n    short dy[] = {1, 1, -1, -1};\n    for (int i = 0; i < 4; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      while (m.next.on_board() &&\n             board[m.next.x][m.next.y].color != m.piece.color) {\n        Piece pp;\n        if (board[m.next.x][m.next.y].color != Empty) {\n          pp = board[m.next.x][m.next.y];\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.pos == pp.pos) {\n              i.pos = Position(-1, -1);\n              break;\n            }\n          }\n        }\n        board[m.piece.pos.x][m.piece.pos.y] = Piece();\n        board[m.next.x][m.next.y] = m.piece;\n\n        if (!check(king.pos, opponent(king.color))) {\n          moves.push_back(m);\n          if (m.piece.color == White) {\n            if ((m.next.y == 4 || m.piece.pos.y == 4) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          } else {  // Black\n            if ((m.next.y == 0 || m.piece.pos.y == 0) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          }\n        }\n\n        board[m.next.x][m.next.y] = pp;\n        board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n        for (auto& i : chess[opponent(m.piece.color)]) {\n          if (i.type == pp.type && !i.pos.on_board()) {\n            i.pos = pp.pos;\n            break;\n          }\n        }\n\n        if (board[m.next.x][m.next.y].color != Empty)\n          break;\n        m.next = m.next + Position(dx[i], dy[i]);\n      }\n    }\n    if (m.piece.promoted) {\n      short dx[] = {1, 0, -1, 0};\n      short dy[] = {0, 1, 0, -1};\n      for (int i = 0; i < 4; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color))) {\n            moves.push_back(m);\n          }\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    }\n    // fprintf(stderr, \"innnnnnnnnnnnnnBiiiiiii\\n\");\n    // for(int i=0; i<2; ++i) {\n    //     for(auto j : chess[i]) {\n    //         fprintf(stderr, \"(%c,%d) \", j.pos.x+'A', j.pos.y);\n    //     }\n    //     std::cerr << std::endl;\n    // }\n  }\n\n  void legal_rook_moves(Piece p, std::vector<Move>& moves) {\n    // fprintf(stderr, \"innnnnnnnnnnnnnRook\\n\");\n    // for(int i=0; i<2; ++i) {\n    //     for(auto j : chess[i]) {\n    //         fprintf(stderr, \"(%c,%d) \", j.pos.x+'A', j.pos.y);\n    //     }\n    //     std::cerr << std::endl;\n    // }\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    short dx[] = {1, 0, -1, 0};\n    short dy[] = {0, 1, 0, -1};\n    for (int i = 0; i < 4; ++i) {\n      m.next = m.piece.pos + Position(dx[i], dy[i]);\n      while (m.next.on_board() &&\n             board[m.next.x][m.next.y].color != m.piece.color) {\n        Piece pp;\n        if (board[m.next.x][m.next.y].color != Empty) {\n          pp = board[m.next.x][m.next.y];\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.pos == pp.pos) {\n              i.pos = Position(-1, -1);\n              break;\n            }\n          }\n        }\n        board[m.piece.pos.x][m.piece.pos.y] = Piece();\n        board[m.next.x][m.next.y] = m.piece;\n\n        if (!check(king.pos, opponent(king.color))) {\n          moves.push_back(m);\n          if (m.piece.color == White) {\n            if ((m.next.y == 4 || m.piece.pos.y == 4) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          } else {  // Black\n            if ((m.next.y == 0 || m.piece.pos.y == 0) && !m.piece.promoted) {\n              m.promote = true;\n              moves.push_back(m);\n              m.promote = false;\n            }\n          }\n        }\n\n        board[m.next.x][m.next.y] = pp;\n        board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n        for (auto& i : chess[opponent(m.piece.color)]) {\n          if (i.type == pp.type && !i.pos.on_board()) {\n            i.pos = pp.pos;\n            break;\n          }\n        }\n\n        if (board[m.next.x][m.next.y].color != Empty)\n          break;\n        m.next = m.next + Position(dx[i], dy[i]);\n      }\n    }\n    if (m.piece.promoted) {\n      short dx[] = {1, -1, -1, 1};\n      short dy[] = {1, 1, -1, -1};\n      for (int i = 0; i < 4; ++i) {\n        m.next = m.piece.pos + Position(dx[i], dy[i]);\n        if (m.next.on_board() &&\n            board[m.next.x][m.next.y].color != m.piece.color) {\n          Piece pp;\n          if (board[m.next.x][m.next.y].color != Empty) {\n            pp = board[m.next.x][m.next.y];\n            for (auto& i : chess[opponent(m.piece.color)]) {\n              if (i.pos == pp.pos) {\n                i.pos = Position(-1, -1);\n                break;\n              }\n            }\n          }\n          board[m.piece.pos.x][m.piece.pos.y] = Piece();\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color))) {\n            moves.push_back(m);\n          }\n\n          board[m.next.x][m.next.y] = pp;\n          board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.type == pp.type && !i.pos.on_board()) {\n              i.pos = pp.pos;\n              break;\n            }\n          }\n        }\n      }\n    }\n    // fprintf(stderr, \"innnnnnnnnnnnnnRook\\n\");\n    // for(int i=0; i<2; ++i) {\n    //     for(auto j : chess[i]) {\n    //         fprintf(stderr, \"(%c,%d) \", j.pos.x+'A', j.pos.y);\n    //     }\n    //     std::cerr << std::endl;\n    // }\n  }\n\n  void legal_pawn_moves(Piece p, std::vector<Move>& moves) {\n\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    if (m.piece.promoted) {\n      legal_gold_moves(p, moves);\n      return;\n    }\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    if (m.piece.color == White) {\n      m.next = m.piece.pos + Position(0, 1);\n      if (m.next.on_board() &&\n          board[m.next.x][m.next.y].color != m.piece.color) {\n        Piece pp;\n        if (board[m.next.x][m.next.y].color != Empty) {\n          pp = board[m.next.x][m.next.y];\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.pos == pp.pos) {\n              i.pos = Position(-1, -1);\n              break;\n            }\n          }\n        }\n        board[m.piece.pos.x][m.piece.pos.y] = Piece();\n        board[m.next.x][m.next.y] = m.piece;\n\n        if (!check(king.pos, opponent(king.color))) {\n          if (m.next.y != 4)\n            moves.push_back(m);\n          else if (!m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n\n        board[m.next.x][m.next.y] = pp;\n        board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n        for (auto& i : chess[opponent(m.piece.color)]) {\n          if (i.type == pp.type && !i.pos.on_board()) {\n            i.pos = pp.pos;\n            break;\n          }\n        }\n      }\n    } else {\n      m.next = m.piece.pos + Position(0, -1);\n      // fprintf(stderr, \"pawn: %d %d\\n\", m.next.x, m.next.y);\n      if (m.next.on_board() &&\n          board[m.next.x][m.next.y].color != m.piece.color) {\n        Piece pp;\n        if (board[m.next.x][m.next.y].color != Empty) {\n          pp = board[m.next.x][m.next.y];\n          for (auto& i : chess[opponent(m.piece.color)]) {\n            if (i.pos == pp.pos) {\n              i.pos = Position(-1, -1);\n              break;\n            }\n          }\n        }\n        board[m.piece.pos.x][m.piece.pos.y] = Piece();\n        board[m.next.x][m.next.y] = m.piece;\n\n        if (!check(king.pos, opponent(king.color))) {\n          if (m.next.y != 0)\n            moves.push_back(m);\n          else if (!m.piece.promoted) {\n            m.promote = true;\n            moves.push_back(m);\n            m.promote = false;\n          }\n        }\n\n        board[m.next.x][m.next.y] = pp;\n        board[m.piece.pos.x][m.piece.pos.y] = m.piece;\n        for (auto& i : chess[opponent(m.piece.color)]) {\n          if (i.type == pp.type && !i.pos.on_board()) {\n            i.pos = pp.pos;\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  void legal_pawn_drop(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n\n    // find another pawn\n    int cannotdrop = Dx;\n    for (auto c : chess[p.color]) {\n      if ((c.type == PieceType::Pawn || c.type == PieceType::Pawn2) &&\n          c.pos.x != -1)\n        cannotdrop = c.pos.x;\n    }\n\n    if (p.color == White) {\n      for (int i = 0; i < Dx; ++i) {\n        if (i == cannotdrop)\n          continue;\n        for (int j = 0; j < Dy - 1; ++j) {\n          if (board[i][j].color == Empty) {\n            if (board[i][j + 1].type == PieceType::King &&\n                board[i][j + 1].color != p.color) {\n              board[i][j] = p;\n              for (auto& v : chess[m.piece.color]) {\n                if (v.type == p.type && !v.pos.on_board()) {\n                  v.pos = Position(i, j);\n                  break;\n                }\n              }\n\n              bool cm = checkmate(board[i][j + 1].color);\n              board[i][j] = Piece();\n\n              for (auto& v : chess[m.piece.color]) {\n                if (v.pos == Position(i, j)) {\n                  v.pos = Position(-1, -1);\n                  break;\n                }\n              }\n              if (cm) {\n                continue;\n              }\n            }\n            m.next = Position(i, j);\n\n            Piece pp;\n            board[m.next.x][m.next.y] = m.piece;\n\n            if (!check(king.pos, opponent(king.color))) {\n              moves.push_back(m);\n            }\n\n            board[m.next.x][m.next.y] = pp;\n          }\n        }\n      }\n    } else {\n      for (int i = 0; i < Dx; ++i) {\n        if (i == cannotdrop)\n          continue;\n        for (int j = 1; j < Dy; ++j) {\n          if (board[i][j].color == Empty) {\n            if (board[i][j - 1].type == PieceType::King &&\n                board[i][j - 1].color != p.color) {\n              board[i][j] = p;\n              for (auto& v : chess[m.piece.color]) {\n                if (v.type == p.type && !v.pos.on_board()) {\n                  v.pos = Position(i, j);\n                  break;\n                }\n              }\n\n              bool cm = checkmate(board[i][j - 1].color);\n              board[i][j] = Piece();\n\n              for (auto& v : chess[m.piece.color]) {\n                if (v.pos == Position(i, j)) {\n                  v.pos = Position(-1, -1);\n                  break;\n                }\n              }\n              if (cm) {\n                continue;\n              }\n            }\n            m.next = Position(i, j);\n            Piece pp;\n            board[m.next.x][m.next.y] = m.piece;\n\n            if (!check(king.pos, opponent(king.color))) {\n              moves.push_back(m);\n            }\n\n            board[m.next.x][m.next.y] = pp;\n          }\n        }\n      }\n    }\n  }\n\n  void legal_drop(Piece p, std::vector<Move>& moves) {\n    Move m;\n    m.piece = p;\n    m.promote = false;\n    Piece king;\n    for (auto i : chess[p.color]) {\n      if (i.type == PieceType::King) {\n        king = i;\n        break;\n      }\n    }\n    for (int i = 0; i < Dx; ++i) {\n      for (int j = 0; j < Dy; ++j) {\n        if (board[i][j].color == Empty) {\n          m.next = Position(i, j);\n          Piece pp;\n          board[m.next.x][m.next.y] = m.piece;\n\n          if (!check(king.pos, opponent(king.color)))\n            moves.push_back(m);\n\n          board[m.next.x][m.next.y] = pp;\n        }\n      }\n    }\n  }\n\n  int opponent(int player) const {\n    if (player == White)\n      return Black;\n    return White;\n  }\n\n  // pos: king position\n  // opponent can eat king\n  bool check(Position pos, int op) {\n    for (auto i : chess[op]) {\n      std::vector<Move> moves;\n      if (i.pos.on_board()) {\n        switch (i.type) {\n        case PieceType::King:\n          king_moves(i, moves);\n          break;\n\n        case PieceType::Gold:\n        case PieceType::Gold2:\n          gold_moves(i, moves);\n          break;\n\n        case PieceType::Silver:\n        case PieceType::Silver2:\n          silver_moves(i, moves);\n          break;\n\n        case PieceType::Bishop:\n        case PieceType::Bishop2:\n          bishop_moves(i, moves);\n          break;\n\n        case PieceType::Rook:\n        case PieceType::Rook2:\n          rook_moves(i, moves);\n          break;\n\n        case PieceType::Pawn:\n        case PieceType::Pawn2:\n          pawn_moves(i, moves);\n          break;\n\n        default:\n          break;\n        }\n      }\n      for (auto m : moves) {\n        if (m.next == pos) {\n          return true;\n        }\n      }\n    }\n    return false;\n  }\n\n  bool checkmate(int color) {\n    std::vector<Move> moves;\n\n    for (auto i : chess[color]) {\n      legalMoves(i, moves);\n    }\n    return moves.empty();\n  }\n\n  void legalMoves(Piece p, std::vector<Move>& moves) {\n    if (p.pos.on_board()) {\n      switch (p.type) {\n      case PieceType::King:\n        legal_king_moves(p, moves);\n        break;\n\n      case PieceType::Gold:\n      case PieceType::Gold2:\n        legal_gold_moves(p, moves);\n        break;\n\n      case PieceType::Silver:\n      case PieceType::Silver2:\n        legal_silver_moves(p, moves);\n        break;\n\n      case PieceType::Bishop:\n      case PieceType::Bishop2:\n        legal_bishop_moves(p, moves);\n        break;\n\n      case PieceType::Rook:\n      case PieceType::Rook2:\n        legal_rook_moves(p, moves);\n        break;\n\n      case PieceType::Pawn:\n      case PieceType::Pawn2:\n        legal_pawn_moves(p, moves);\n        break;\n\n      default:\n        break;\n      }\n    } else {\n      switch (p.type) {\n      case PieceType::King:\n        fprintf(stderr, \"Error: King drop\\n\");\n        break;\n\n      case PieceType::Gold:\n      case PieceType::Gold2:\n      case PieceType::Silver:\n      case PieceType::Silver2:\n      case PieceType::Bishop:\n      case PieceType::Bishop2:\n      case PieceType::Rook:\n      case PieceType::Rook2:\n        legal_drop(p, moves);\n        break;\n\n      case PieceType::Pawn:\n      case PieceType::Pawn2:\n        legal_pawn_drop(p, moves);\n        break;\n\n      default:\n        break;\n      }\n    }\n  }\n\n  int type_to_z(Piece p) {\n    if (!p.promoted)\n      return (int)p.type - 1;\n    switch (p.type) {\n    case PieceType::Silver:\n      return 11;\n    case PieceType::Bishop:\n      return 12;\n    case PieceType::Rook:\n      return 13;\n    case PieceType::Pawn:\n      return 14;\n    case PieceType::Silver2:\n      return 15;\n    case PieceType::Bishop2:\n      return 16;\n    case PieceType::Rook2:\n      return 17;\n    case PieceType::Pawn2:\n      return 18;\n\n    default:\n      fprintf(\n          stderr, \"%s type to z error %d\\n\", p.print().c_str(), (int)p.type);\n      return -1;\n    }\n  }\n\n  bool z_promoted(int z) const {\n    return z >= 11;\n  }\n\n  PieceType z_to_type(int z) const {\n    switch (z) {\n    case 0:\n      return PieceType::King;\n    case 1:\n      return PieceType::Gold;\n    case 2:\n      return PieceType::Silver;\n    case 3:\n      return PieceType::Bishop;\n    case 4:\n      return PieceType::Rook;\n    case 5:\n      return PieceType::Pawn;\n    case 6:\n      return PieceType::Gold2;\n    case 7:\n      return PieceType::Silver2;\n    case 8:\n      return PieceType::Bishop2;\n    case 9:\n      return PieceType::Rook2;\n    case 10:\n      return PieceType::Pawn2;\n    case 11:\n      return PieceType::Silver;\n    case 12:\n      return PieceType::Bishop;\n    case 13:\n      return PieceType::Rook;\n    case 14:\n      return PieceType::Pawn;\n    case 15:\n      return PieceType::Silver2;\n    case 16:\n      return PieceType::Bishop2;\n    case 17:\n      return PieceType::Rook2;\n    case 18:\n      return PieceType::Pawn2;\n\n    default:\n      fprintf(stderr, \"z to type error %d\\n\", z);\n      return PieceType::None;\n    }\n  }\n\n  PieceType new_type(PieceType p) const {\n    PieceType t = p;\n    switch (p) {\n    case PieceType::Gold:\n      t = PieceType::Gold2;\n      break;\n    case PieceType::Gold2:\n      t = PieceType::Gold;\n      break;\n    case PieceType::Silver:\n      t = PieceType::Silver2;\n      break;\n    case PieceType::Silver2:\n      t = PieceType::Silver;\n      break;\n    case PieceType::Bishop:\n      t = PieceType::Bishop2;\n      break;\n    case PieceType::Bishop2:\n      t = PieceType::Bishop;\n      break;\n    case PieceType::Rook:\n      t = PieceType::Rook2;\n      break;\n    case PieceType::Rook2:\n      t = PieceType::Rook;\n      break;\n    case PieceType::Pawn:\n      t = PieceType::Pawn2;\n      break;\n    case PieceType::Pawn2:\n      t = PieceType::Pawn;\n      break;\n    default:\n      break;\n    }\n    return t;\n  }\n};\n"
  },
  {
    "path": "src/games/surakarta.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Maria Elsa\n// - Github: https://github.com/melsaa\n// - Email:  m_elsa@ymail.com\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#pragma once\n#include <list>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n#include <vector>\n\nusing namespace std;\n\nconst int SuraWhite = 0;\nconst int SuraBlack = 1;\nconst int SuraEmpty = 2;\n\nconst int SKDx = 6;\nconst int SKDy = 6;\n\nconst int SuraMaxPlayoutLength = 1000;\n\nclass SKHash {\n public:\n  unsigned long long HashArray[2][SKDx][SKDy];\n  unsigned long long HashTurn;\n\n  bool InitHashCalled = false;\n\n  void InitHash() {\n    for (int player = 0; player < 2; player++)\n      for (int i = 0; i < SKDx; i++)\n        for (int j = 0; j < SKDy; j++) {\n          HashArray[player][i][j] = 0;\n          for (int k = 0; k < 36; k++)\n            if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n              HashArray[player][i][j] |= (1ULL << k);\n        }\n    HashTurn = 0;\n    for (int k = 0; k < 36; k++)\n      if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n        HashTurn |= (1ULL << k);\n  }\n};\n\nclass SKPlayer {\n public:\n  int player;\n\n  bool operator==(SKPlayer p) {\n    return (p.player == player);\n  }\n};\n\nclass SKMove {\n public:\n  int x, y, x1, y1, color;\n  bool operator==(const SKMove& m) {\n    return (x == m.x && y == m.y && x1 == m.x1 && y1 == m.y1 &&\n            color == m.color);\n  }\n  bool operator!=(const SKMove& m) {\n    return !(x == m.x && y == m.y && x1 == m.x1 && y1 == m.y1 &&\n             color == m.color);\n  }\n};\n\nclass SKBoard {\n public:\n  int board[SKDx][SKDy];\n  unsigned long long hash;\n  SKMove rollout[SuraMaxPlayoutLength];\n  int length, turn, nbPlay, repetition;\n  bool isCapture, draw;\n  vector<unsigned long long> history_move;\n  SKHash Sura;\n\n  void init() {\n    for (int i = 0; i < SKDx; i++)\n      for (int j = 0; j < SKDy; j++)\n        board[i][j] = SuraEmpty;\n    for (int i = 0; i < 2; i++)\n      for (int j = 0; j < SKDx; j++)\n        board[j][i] = SuraWhite;\n    for (int i = SKDy - 2; i < SKDy; i++)\n      for (int j = 0; j < SKDx; j++)\n        board[j][i] = SuraBlack;\n    hash = 0;\n    length = 0;\n    turn = SuraWhite;\n    nbPlay = 0;\n    repetition = 0;\n    isCapture = false;\n    draw = false;\n    history_move.clear();\n    history_move.push_back(hash);\n    if (Sura.InitHashCalled == false) {\n      Sura.InitHash();\n      Sura.InitHashCalled = true;\n    }\n  }\n\n  void print_board(FILE* fp) {\n    fprintf(fp, \"====================\\n\");\n    for (int i = SKDy - 1; i >= 0; i--) {\n      for (int j = 0; j < SKDx; j++)\n        if (board[j][i] == SuraBlack)\n          fprintf(fp, \"x \");\n        else if (board[j][i] == SuraWhite)\n          fprintf(fp, \"o \");\n        else if (board[j][i] == SuraEmpty)\n          fprintf(fp, \"- \");\n      fprintf(fp, \"\\n\");\n    }\n    fprintf(fp, \"====================\\n\");\n  }\n\n  bool won(int color) {\n    if (color == SuraWhite) {\n      for (int i = 0; i < SKDy; i++)\n        for (int j = 0; j < SKDx; j++)\n          if (board[j][i] == SuraBlack)\n            return false;\n      vector<SKMove> moves;\n      int nb = legalMoves(SuraBlack, moves);\n      if (nb == 0)\n        return true;\n    } else if (color == SuraBlack) {\n      for (int i = 0; i < SKDy; i++)\n        for (int j = 0; j < SKDx; j++)\n          if (board[j][i] == SuraWhite)\n            return false;\n      vector<SKMove> moves;\n      int nb = legalMoves(SuraWhite, moves);\n      if (nb == 0)\n        return true;\n    }\n    return false;\n  }\n\n  bool is_draw() {\n    if (repetition == 3)\n      draw = true;\n    if (draw == true)\n      return true;\n    return false;\n  }\n\n  int opponent(int color) {\n    if (color == SuraWhite)\n      return SuraBlack;\n    return SuraWhite;\n  }\n\n  bool legalMove(SKMove m, bool capture) {\n    if (m.x1 < 0 || m.y1 < 0 || m.x1 >= SKDx || m.y1 >= SKDy)\n      return false;\n    if (board[m.x][m.y] != m.color)\n      return false;\n    if (board[m.x1][m.y1] == m.color)\n      return false;\n    if (capture == false && board[m.x1][m.y1] == opponent(m.color))\n      return false;\n    return true;\n  }\n\n  void play(SKMove m) {\n    board[m.x][m.y] = SuraEmpty;\n    hash ^= Sura.HashArray[m.color][m.x][m.y];\n    if (board[m.x1][m.y1] != SuraEmpty) {\n      hash ^= Sura.HashArray[board[m.x1][m.y1]][m.x1][m.y1];\n      isCapture = true;\n    }\n    board[m.x1][m.y1] = m.color;\n    hash ^= Sura.HashArray[m.color][m.x1][m.y1];\n    hash ^= Sura.HashTurn;\n    if (length < SuraMaxPlayoutLength) {\n      rollout[length] = m;\n      length++;\n    }\n    check_repetition();\n    turn = opponent(turn);\n    nbPlay++;\n    if (nbPlay == 50 && isCapture == false) {\n      draw = true;\n    } else if (isCapture == true) {\n      nbPlay = 0;\n      isCapture = false;\n    }\n  }\n\n  void check_repetition() {\n    history_move.push_back(hash);\n    int cur_index = history_move.size() - 1;\n    if (cur_index >= 8 &&\n        history_move[cur_index] == history_move[cur_index - 4] &&\n        history_move[cur_index] == history_move[cur_index - 8]) {\n      // std::cout << \"Three fold Repetitions\\n\";\n      repetition = 3;\n    } else if (cur_index >= 4 &&\n               history_move[cur_index] == history_move[cur_index - 4]) {\n      // std::cout << \"Two fold Repetitions\\n\";\n      repetition = 2;\n    }\n  }\n\n  int legalMoves(int color, vector<SKMove>& moves) {\n    int nb = 0;\n    bool capture = false;\n    int dir[8][2] = {\n        {0, 1}, {1, 1}, {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}};\n    for (int i = 0; i < SKDx; i++)\n      for (int j = 0; j < SKDy; j++)\n        if (board[i][j] == color) {\n          SKMove m;\n          m.x = i;\n          m.y = j;\n          m.color = color;\n          for (int k = 0; k < 8; k++) {\n            int mx = dir[k][0];\n            int my = dir[k][1];\n            m.x1 = i + mx;\n            m.y1 = j + my;\n            if (legalMove(m, capture)) {\n              moves.push_back(m);\n              nb++;\n            }\n          }\n        }\n    return nb;\n  }\n\n  int legalCaptures(int nb, int color, vector<SKMove>& moves) {\n    int nc = nb;\n    bool capture = true;\n    int dir[4][2] = {\n        {0, 1}, {1, 0}, {0, -1}, {-1, 0}};  // up, right, down, left\n    for (int i = 0; i < SKDx; i++)\n      for (int j = 0; j < SKDy; j++)\n        if (board[i][j] == color &&\n            ((i >= 1 && i <= 4) || (j >= 1 && j <= 4))) {\n          SKMove m;\n          m.x = i;\n          m.y = j;\n          m.color = color;\n          // std::cout << \"origin = \" << i << \",\" << j << std::endl;\n          for (int k = 0; k < 4; k++) {\n            int curK = k;\n            int curX = i + dir[curK][0];\n            int curY = j + dir[curK][1];\n            bool loop = false;\n            int step = 0;\n            while (curX >= -1 && curY >= -1 && curX <= SKDx && curY <= SKDy) {\n              step++;\n              // std::cout << \"dir = \" << curK << \" to \" << curX << \",\" << curY\n              // << std::endl;\n              if (!(curX == i && curY == j) && curX >= 0 && curY >= 0 &&\n                  curX < SKDx && curY < SKDy && board[curX][curY] == color) {\n                // std::cout << \"same color\\n\";\n                break;\n              } else if (curX >= 0 && curY >= 0 && curX < SKDx && curY < SKDy &&\n                         board[curX][curY] == opponent(color)) {\n                if (loop) {  // already go through a loop\n                  m.x1 = curX;\n                  m.y1 = curY;\n                  if (legalMove(m, capture) &&\n                      find(moves.begin(), moves.end(), m) == moves.end()) {\n                    moves.push_back(m);\n                    nc++;\n                  }\n                }  // else std::cout << \"opponent but no loop\\n\";\n                break;\n              } else if (curX == i && curY == j && step >= 28)\n                break;  // return to origin\n              else if ((curX == 0 && curY == 0) ||\n                       (curX == 0 && curY == SKDy - 1) ||\n                       (curX == SKDx - 1 && curY == 0) ||\n                       (curX == SKDx - 1 && curY == SKDy - 1))\n                break;  // corner\n              else {\n                if (curX < 0 || curY < 0 || curX >= SKDx || curY >= SKDy) {\n                  // std::cout << \"go through a loop\\n\";\n                  // std::cout << \"prev \" << curX << \",\" << curY << std::endl;\n                  if (curX == 1 || curX == 2) {\n                    curK = 1;  // right\n                    if (curY > 0)\n                      curY -= (curX + 1);\n                    else\n                      curY += (curX + 1);\n                    curX = 0;\n                    loop = true;\n                  } else if (curX == 3 || curX == 4) {\n                    curK = 3;  // left\n                    if (curY > 0 && curX == 3)\n                      curY -= 3;\n                    else if (curY > 0 && curX == 4)\n                      curY -= 2;\n                    else if (curY < 0 && curX == 3)\n                      curY += 3;\n                    else if (curY < 0 && curX == 4)\n                      curY += 2;\n                    curX = 5;\n                    loop = true;\n                  } else if (curY == 1 || curY == 2) {\n                    curK = 0;  // up\n                    if (curX > 0)\n                      curX -= (curY + 1);\n                    else\n                      curX += (curY + 1);\n                    curY = 0;\n                    loop = true;\n                  } else if (curY == 3 || curY == 4) {\n                    curK = 2;  // down\n                    if (curX > 0 && curY == 3)\n                      curX -= 3;\n                    else if (curX > 0 && curY == 4)\n                      curX -= 2;\n                    else if (curX < 0 && curY == 3)\n                      curX += 3;\n                    else if (curX < 0 && curY == 4)\n                      curX += 2;\n                    curY = 5;\n                    loop = true;\n                  }\n                  // std::cout << \"cur = \" << curX << \",\" << curY << \" dir = \"\n                  // << curK << std::endl;\n                } else {\n                  // std::cout << \"continue to next one under same direction\\n\";\n                  curX += dir[curK][0];\n                  curY += dir[curK][1];\n                }\n              }\n            }\n          }\n        }\n    return nc;\n  }\n};\n"
  },
  {
    "path": "src/games/surakarta_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Author: Maria Elsa\n// - Github: https://github.com/melsaa\n// - Email:  m_elsa@ymail.com\n// Facilitator: 邱顯棟 (Xiǎn-Dòng Qiū)\n// - Github: https://github.com/YumJelly\n// - Email:  yumjelly@gmail.com\n\n#include \"../core/state.h\"\n\ntypedef unsigned short Coord;\n\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\nconst int StateForSurakartaX = 2;\nconst int StateForSurakartaY = 6;\nconst int StateForSurakartaZ = 6;\n\n#include \"surakarta.h\"\n\nclass StateForSurakarta : public core::State, public SKBoard {\n public:\n  StateForSurakarta(int seed)\n      : State(seed) {\n  }\n\n  virtual void Initialize() override {\n    _moves.clear();\n\n    // the features are just one number between 0 and 1 (the distance,\n    // normalized).\n    _featSize[0] = StateForSurakartaX;\n    _featSize[1] = StateForSurakartaY;\n    _featSize[2] = StateForSurakartaZ;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 36;\n    _actionSize[1] = 6;\n    _actionSize[2] = 6;\n\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n\n    _features.resize(StateForSurakartaX * StateForSurakartaY *\n                     StateForSurakartaZ);\n\n    init();\n    findFeatures();\n    findActions(SuraWhite);\n    fillFullFeatures();\n  }\n\n  virtual std::string stateDescription() const override {\n    std::string str;\n    str += \"  A|B|C|D|E|F\\n\";\n    for (int i = SKDy - 1; i >= 0; i--) {\n      str += to_string(i + 1) + ' ';\n      for (int j = 0; j < SKDx; j++) {\n        if (j > 0)\n          str += '|';\n        if (board[j][i] == SuraEmpty)\n          str += ' ';\n        else if (board[j][i] == SuraBlack)\n          str += 'x';\n        else\n          str += 'o';\n      }\n      str += '\\n';\n    }\n\n    return str;\n  }\n\n  virtual std::string actionsDescription() const override {\n    std::stringstream ss;\n    char x, y, x1, y1;\n    for (int i = 0; i < (int)_legalActions.size(); i++) {\n      const _Action& action = _legalActions[i];\n      x = static_cast<char>(action.GetY() + 'A');\n      y = static_cast<char>(action.GetZ() + '1');\n      int curY = action.GetX() / SKDx;\n      y1 = static_cast<char>(curY + '1');\n      x1 = static_cast<char>(action.GetX() - curY * SKDx + 'A');\n      ss << \"Action \" << i << \": \" << x << y << \"-\" << x1 << y1 << std::endl;\n    }\n    ss << \"\\nInput format : <Alphabet><Digit>-<Alphabet><Digit> e.g. A1-A2\\n\";\n    return ss.str();\n  }\n\n  virtual std::string actionDescription(const _Action& action) const {\n    std::stringstream ss;\n    char x, y, x1, y1;\n    x = static_cast<char>(action.GetY() + 'A');\n    y = static_cast<char>(action.GetZ() + '1');\n    int curY = action.GetX() / SKDx;\n    y1 = static_cast<char>(curY + '1');\n    x1 = static_cast<char>(action.GetX() - curY * SKDx + 'A');\n    ss << x << y << \"-\" << x1 << y1;\n\n    return ss.str();\n  }\n\n  virtual int parseAction(const std::string& str) const override {\n    int x = -1, y = -1, x1 = -1, y1 = -1;\n    if (!isalpha(str[0]) || !isalpha(str[3]))\n      return -1;\n    if (!isdigit(str[1]) || !isdigit(str[4]))\n      return -1;\n\n    x = static_cast<int>(toupper(str[0]) - 'A');\n    y = static_cast<int>(str[1] - '1');\n    x1 = static_cast<int>(toupper(str[3]) - 'A');\n    y1 = static_cast<int>(str[4] - '1');\n\n    if (x < 0 || y < 0 || x1 < 0 || y1 < 0 || x >= SKDx || y >= SKDy ||\n        x1 >= SKDx || y1 >= SKDy)\n      return -1;\n\n    for (int i = 0; i < (int)_legalActions.size(); i++) {\n      if (_legalActions[i].GetX() == (y1 * SKDy + x1) &&\n          _legalActions[i].GetY() == x && _legalActions[i].GetZ() == y)\n        return i;\n    }\n    return -1;\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForSurakarta>(*this);\n  }\n\n  void findActions(int color) {\n    vector<SKMove> moves;\n    int nb = legalMoves(color, moves);\n    int nc = legalCaptures(nb, color, moves);\n\n    clearActions();\n    for (int i = 0; i < nc; i++) {\n      int x = moves[i].x;\n      int y = moves[i].y;\n      int final_pos = moves[i].y1 * SKDy + moves[i].x1;\n      addAction(final_pos, x, y);\n    }\n  }\n\n  void findFeatures() {\n    if ((_status == GameStatus::player0Win) ||\n        (_status == GameStatus::player1Win) || (_status == GameStatus::tie))\n      return;\n    if (_status == GameStatus::player0Turn) {  // SuraWhite\n      for (int i = 0; i < 72; i++)\n        _features[i] = 0;\n      for (int i = 0; i < 36; i++)\n        if (board[i % 6][i / 6] == SuraBlack)\n          _features[i] = 1;\n      for (int i = 0; i < 36; i++)\n        if (board[i % 6][i / 6] == SuraWhite)\n          _features[36 + i] = 1;\n    } else if (_status == GameStatus::player1Turn) {  // SuraBlack\n      assert(_status == GameStatus::player1Turn);\n      for (int i = 0; i < 72; i++)\n        _features[i] = 0;\n      for (int i = 0; i < 36; i++)\n        if (board[i % 6][5 - (i / 6)] == SuraWhite)\n          _features[i] = 1;\n      for (int i = 0; i < 36; i++)\n        if (board[i % 6][5 - (i / 6)] == SuraBlack)\n          _features[36 + i] = 1;\n    }\n  }\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n    SKMove m;\n\n    if (_status == GameStatus::player0Turn) {  // SuraWhite\n      m.color = SuraWhite;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n\n      m.y1 = action.GetX() / SKDx;\n      m.x1 = action.GetX() - m.y1 * SKDx;\n\n      play(m);\n      findActions(SuraBlack);\n      if (won(SuraWhite) || _legalActions.size() == 0) {\n        _legalActions.clear();\n        _status = GameStatus::player0Win;\n      } else if (is_draw()) {\n        _status = GameStatus::tie;\n        _legalActions.clear();\n      } else\n        _status = GameStatus::player1Turn;\n    } else {\n      // SuraBlack\n      m.color = SuraBlack;\n      m.x = action.GetY();\n      m.y = action.GetZ();\n\n      m.y1 = action.GetX() / SKDx;\n      m.x1 = action.GetX() - m.y1 * SKDx;\n\n      play(m);\n      findActions(SuraWhite);\n      if (won(SuraBlack) || _legalActions.size() == 0) {\n        _legalActions.clear();\n        _status = GameStatus::player1Win;\n      } else if (is_draw()) {\n        _legalActions.clear();\n        _status = GameStatus::tie;\n      } else\n        _status = GameStatus::player0Turn;\n    }\n    findFeatures();\n    _hash = hash;\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play\n  virtual void DoGoodAction() override {\n    DoRandomAction();\n  }\n};\n"
  },
  {
    "path": "src/games/tristan_nogo.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <list>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n\n#include \"tristan_nogo.h\"\n\nusing namespace std;\n\nunsigned long long HashArray[3000];\n\nbool useOrderMoves = true;\n\nbool MonteCarloMoveOrdering = true;\n\nbool printGame = false;\n\nunsigned long long nbPlay = 0;\n\nint level = 1;\n\ntimeval stop, start;\nunsigned long long previousTime = 0;\n\nbool useNotLosing = false;\n\nbool useOrderPPAF = false;\n\nvoid initHash() {\n  for (int i = 0; i < 3000; i++) {\n    HashArray[i] = 0;\n    for (int k = 0; k < 64; k++)\n      if ((rand() / (RAND_MAX + 1.0)) > 0.5)\n        HashArray[i] |= (1ULL << k);\n  }\n}\n\nbool useCode = true;\n\ndouble history[MaxMoveNumber];\n\nint interMove[MaxSize], moveInter[MaxSize];\n\nbool ajoute(int* stack, int elt) {\n  for (int i = 1; i <= stack[0]; i++)\n    if (stack[i] == elt)\n      return false;\n  stack[0]++;\n  stack[stack[0]] = elt;\n  return true;\n}\n"
  },
  {
    "path": "src/games/tristan_nogo.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <iostream>\n#include <list>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/time.h>\n#include <time.h>\n\nusing namespace std;\n\nextern const int White;\nextern const int Black;\nextern const int Empty;\n\nconst int Dx = 9;\nconst int Dy = 9;\n\nconst int MaxLegalMoves = Dx * Dy;\nconst int MaxPlayoutLength = 1000;\n\nconst int SizeTable = 1048575;  // une puissance de 2 moins 1\n\nextern unsigned long long HashArray[3000];\n\nextern bool useOrderMoves;\n\nextern bool MonteCarloMoveOrdering;\n\nextern bool printGame;\n\nextern unsigned long long nbPlay;\n\nextern int level;\n\nextern timeval stop, start;\nextern unsigned long long previousTime;\n\nextern bool useNotLosing;\n\nclass NogoBoard;\n\nextern void initHash();\n\n/*\nclass Player {\n public:\n  int player;\n\n  bool operator==(Player p) {\n    return (p.player == player);\n  }\n};\n*/\n\nconst int MaxMoveNumber = 256 * 2 * (Dx * Dy + 1) + 1;\n\nextern bool useCode;\n\nclass NogoMove {\n public:\n  int inter, color;\n  int code;\n\n  int number() {\n    int c = 0;\n    if (useCode)\n      c = code;\n    if (color == White)\n      return c * 2 * (Dx * Dy + 1) + inter;\n    else\n      return c * 2 * (Dx * Dy + 1) + Dx * Dy + 1 + inter;\n  }\n};\n\nextern double history[MaxMoveNumber];\n\nconst int MaxSize = (Dx + 2) * (Dy + 2);\nconst int MaxIntersections = Dx * Dy;\n\nconst int Exterieur = 3;\n\nconst int Haut = 0;\nconst int Bas = 1;\nconst int Gauche = 2;\nconst int Droite = 3;\n\nextern int interMove[MaxSize], moveInter[MaxSize];\n\nextern bool ajoute(int* stack, int elt);\n\nclass NogoBoard {\n public:\n  int start, end, size, dxNogoBoard, dyNogoBoard;\n  char board[MaxSize];\n  unsigned long long hash;\n  int turn;\n  int orderMove[MaxLegalMoves];\n\n  NogoMove rollout[MaxPlayoutLength];\n  int length;\n\n  int nbVides, vides[MaxSize], indiceVide[MaxSize];\n  int nbChaines, chaines[MaxSize], indiceChaine[MaxSize];\n\n  int nbPierres[MaxSize];\n  int premierePierre[MaxSize];\n  int pierreSuivante[MaxSize];\n\n  int nbPseudoLibertes[MaxSize];\n  int premierePseudoLiberte[MaxSize];\n  int pseudoLiberteSuivante[4 * MaxSize];\n  int pseudoLibertePrecedente[4 * MaxSize];\n\n  NogoBoard() {\n    init();\n  }\n\n  void init() {\n    dxNogoBoard = Dx + 2;\n    dyNogoBoard = Dy + 2;\n    start = dxNogoBoard + 1;\n    end = dxNogoBoard * dyNogoBoard - dxNogoBoard - 1;\n    size = Dx * Dy;\n    nbVides = 0;\n    nbChaines = 0;\n    hash = 0;\n    for (int i = 0; i < dxNogoBoard * dyNogoBoard; i++) {\n      if ((i < start) || (i % dxNogoBoard == 0) ||\n          ((i + 1) % dxNogoBoard == 0) || (i >= end))\n        board[i] = Exterieur;\n      else {\n        board[i] = Empty;\n        vides[nbVides] = i;\n        indiceVide[i] = nbVides;\n        interMove[nbVides] = i;\n        moveInter[i] = nbVides;\n        nbVides++;\n      }\n    }\n    turn = White;\n    length = 0;\n  }\n\n  void winningMove(int depth, NogoMove m, NogoMove) {\n    unsigned long long l = 4ULL << (depth - 70);\n    // fprintf (stderr, \"depth = %d, history [%d] += %llu, \", depth - 70,\n    // m.number (), l);\n    history[m.number()] += l;\n  }\n\n  int order(NogoMove m) {\n    return 4;\n\n    // return rand () % 1000;\n    return history[m.number()];\n  }\n\n  int legalNogoMoves(int joueur, NogoMove moves[MaxLegalMoves]) {\n    NogoMove coup;\n    int nb = 0;\n    coup.color = joueur;\n    for (coup.inter = 0; coup.inter < size; coup.inter++)\n      if (board[interMove[coup.inter]] == Empty)\n        if (legal(interMove[coup.inter], joueur)) {\n          // - ajouter joueur ?\n          coup.code = board[interMove[coup.inter] - dxNogoBoard] +\n                      4 * board[interMove[coup.inter] - 1] +\n                      16 * board[interMove[coup.inter] + 1] +\n                      64 * board[interMove[coup.inter] + dxNogoBoard];\n          moves[nb] = coup;\n          nb++;\n        }\n    if (useOrderMoves) {\n      for (int i = 0; i < nb; i++)\n        orderMove[i] = order(moves[i]);\n      for (int i = 0; i < nb; i++) {\n        int imin = i;\n        int o = orderMove[i];\n        for (int j = i + 1; j < nb; j++) {\n          int o1 = orderMove[j];\n          if (o1 < o) {\n            imin = j;\n            o = o1;\n          }\n        }\n        NogoMove m = moves[i];\n        moves[i] = moves[imin];\n        moves[imin] = m;\n        o = orderMove[i];\n        orderMove[i] = orderMove[imin];\n        orderMove[imin] = o;\n      }\n    }\n    return nb;\n  }\n\n  bool losingMove(NogoMove) {\n    return false;\n  }\n\n  bool legalMove(NogoMove m) {\n    return legal(interMove[m.inter], m.color);\n  }\n\n  bool legal(int inter, char color) {\n    if (board[inter] != Empty)\n      return false;\n\n    char autre = opponent(color);\n\n    // check if capture\n    if (board[inter - 1] == autre)\n      if (atari(premierePierre[inter - 1]))\n        return false;\n    if (board[inter + 1] == autre)\n      if (atari(premierePierre[inter + 1]))\n        return false;\n    if (board[inter - dxNogoBoard] == autre)\n      if (atari(premierePierre[inter - dxNogoBoard]))\n        return false;\n    if (board[inter + dxNogoBoard] == autre)\n      if (atari(premierePierre[inter + dxNogoBoard]))\n        return false;\n\n    // check if suicide\n    if (board[inter - 1] == Empty)\n      return true;\n    if (board[inter + 1] == Empty)\n      return true;\n    if (board[inter - dxNogoBoard] == Empty)\n      return true;\n    if (board[inter + dxNogoBoard] == Empty)\n      return true;\n\n    int nb = 0;\n\n    if (board[inter - 1] == color) {\n      nb += nbPseudoLibertes[premierePierre[inter - 1]] - 1;\n      if (board[inter + 1] == color) {\n        if (premierePierre[inter - 1] != premierePierre[inter + 1])\n          nb += nbPseudoLibertes[premierePierre[inter + 1]] - 1;\n        else\n          nb--;\n        if (board[inter - dxNogoBoard] == color) {\n          if ((premierePierre[inter - 1] !=\n               premierePierre[inter - dxNogoBoard]) &&\n              (premierePierre[inter + 1] !=\n               premierePierre[inter - dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter - dxNogoBoard]] - 1;\n          else\n            nb--;\n          if (board[inter + dxNogoBoard] == color) {\n            if ((premierePierre[inter - 1] !=\n                 premierePierre[inter + dxNogoBoard]) &&\n                (premierePierre[inter + 1] !=\n                 premierePierre[inter + dxNogoBoard]) &&\n                (premierePierre[inter - dxNogoBoard] !=\n                 premierePierre[inter + dxNogoBoard]))\n              nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n            else\n              nb--;\n          }\n        } else if (board[inter + dxNogoBoard] == color) {\n          if ((premierePierre[inter - 1] !=\n               premierePierre[inter + dxNogoBoard]) &&\n              (premierePierre[inter + 1] !=\n               premierePierre[inter + dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n          else\n            nb--;\n        }\n      } else {\n        if (board[inter - dxNogoBoard] == color) {\n          if ((premierePierre[inter - 1] !=\n               premierePierre[inter - dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter - dxNogoBoard]] - 1;\n          else\n            nb--;\n          if (board[inter + dxNogoBoard] == color) {\n            if ((premierePierre[inter - 1] !=\n                 premierePierre[inter + dxNogoBoard]) &&\n                (premierePierre[inter - dxNogoBoard] !=\n                 premierePierre[inter + dxNogoBoard]))\n              nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n            else\n              nb--;\n          }\n        } else if (board[inter + dxNogoBoard] == color) {\n          if ((premierePierre[inter - 1] !=\n               premierePierre[inter + dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n          else\n            nb--;\n        }\n      }\n    } else {\n      if (board[inter + 1] == color) {\n        nb += nbPseudoLibertes[premierePierre[inter + 1]] - 1;\n        if (board[inter - dxNogoBoard] == color) {\n          if ((premierePierre[inter + 1] !=\n               premierePierre[inter - dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter - dxNogoBoard]] - 1;\n          else\n            nb--;\n          if (board[inter + dxNogoBoard] == color) {\n            if ((premierePierre[inter + 1] !=\n                 premierePierre[inter + dxNogoBoard]) &&\n                (premierePierre[inter - dxNogoBoard] !=\n                 premierePierre[inter + dxNogoBoard]))\n              nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n            else\n              nb--;\n          }\n        } else if (board[inter + dxNogoBoard] == color) {\n          if ((premierePierre[inter + 1] !=\n               premierePierre[inter + dxNogoBoard]))\n            nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n          else\n            nb--;\n        }\n      } else {\n        if (board[inter - dxNogoBoard] == color) {\n          nb += nbPseudoLibertes[premierePierre[inter - dxNogoBoard]] - 1;\n          if (board[inter + dxNogoBoard] == color) {\n            if ((premierePierre[inter - dxNogoBoard] !=\n                 premierePierre[inter + dxNogoBoard]))\n              nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n            else\n              nb--;\n          }\n        } else if (board[inter + dxNogoBoard] == color) {\n          nb += nbPseudoLibertes[premierePierre[inter + dxNogoBoard]] - 1;\n        }\n      }\n    }\n\n    if (nb > 0)\n      return true;\n\n    return false;\n  }\n\n  bool atari(int p) {\n    if (nbPseudoLibertes[premierePierre[p]] > 4)\n      return false;\n    int premiere = premierePseudoLiberte[premierePierre[p]];\n    int lib = premiere >> 2;\n    int suivante = pseudoLiberteSuivante[premiere];\n\n    while (suivante != premiere) {\n      if ((suivante >> 2) != lib)\n        return false;\n      suivante = pseudoLiberteSuivante[suivante];\n    }\n\n    return true;\n  }\n\n  void ajouteChaine(int chaine1, int chaine2) {\n    int pierre = chaine1;\n\n    do {\n      premierePierre[pierre] = chaine2;\n      pierre = pierreSuivante[pierre];\n    } while (pierre != chaine1);\n\n    int suivante = pierreSuivante[chaine1];\n    pierreSuivante[chaine1] = pierreSuivante[chaine2];\n    pierreSuivante[chaine2] = suivante;\n\n    nbPierres[chaine2] += nbPierres[chaine1];\n\n    chaines[indiceChaine[chaine1]] = chaines[nbChaines - 1];\n    indiceChaine[chaines[nbChaines - 1]] = indiceChaine[chaine1];\n    nbChaines--;\n\n    if (nbPseudoLibertes[chaine1] > 0) {\n      if (nbPseudoLibertes[chaine2] == 0) {\n        premierePseudoLiberte[chaine2] = premierePseudoLiberte[chaine1];\n      } else {\n        int premiereChaine2 = premierePseudoLiberte[chaine2];\n        int suivanteChaine2 = pseudoLiberteSuivante[premiereChaine2];\n        int premiereChaine1 = premierePseudoLiberte[chaine1];\n        int derniereChaine1 = pseudoLibertePrecedente[premiereChaine1];\n        pseudoLiberteSuivante[premiereChaine2] = premiereChaine1;\n        pseudoLibertePrecedente[premiereChaine1] = premiereChaine2;\n        pseudoLiberteSuivante[derniereChaine1] = suivanteChaine2;\n        pseudoLibertePrecedente[suivanteChaine2] = derniereChaine1;\n      }\n    }\n    nbPseudoLibertes[chaine2] += nbPseudoLibertes[chaine1];\n  }\n\n  void otePseudoLiberte(int chaine, int lib) {\n    nbPseudoLibertes[chaine]--;\n    if (nbPseudoLibertes[chaine] > 0) {\n      int precedente = pseudoLibertePrecedente[lib];\n      int suivante = pseudoLiberteSuivante[lib];\n      pseudoLiberteSuivante[precedente] = suivante;\n      pseudoLibertePrecedente[suivante] = precedente;\n      if (premierePseudoLiberte[chaine] == lib)\n        premierePseudoLiberte[chaine] = suivante;\n    }\n  }\n\n  void ajoutePseudoLiberte(int chaine, int lib) {\n    if (nbPseudoLibertes[chaine] == 0) {\n      premierePseudoLiberte[chaine] = lib;\n      pseudoLiberteSuivante[lib] = lib;\n      pseudoLibertePrecedente[lib] = lib;\n    } else {\n      int premiere = premierePseudoLiberte[chaine];\n      int suivante = pseudoLiberteSuivante[premiere];\n      pseudoLiberteSuivante[premiere] = lib;\n      pseudoLibertePrecedente[lib] = premiere;\n      pseudoLiberteSuivante[lib] = suivante;\n      pseudoLibertePrecedente[suivante] = lib;\n    }\n    nbPseudoLibertes[chaine]++;\n  }\n\n  void play(NogoMove c) {\n    // std::cerr << \" before:\" << std::endl;\n    // print(stderr);\n    // std::cerr << c.inter % Dx << \",\" << c.inter / Dx << std::endl;\n    joue(interMove[c.inter], c.color);\n    // std::cerr << \" after:\" << std::endl;\n    // print(stderr);\n    turn = opponent(turn);\n    if (length < MaxPlayoutLength) {\n      rollout[length] = c;\n      length++;\n    } else\n      fprintf(stderr, \"Pb play,\");\n  }\n\n  void joue(int inter, char color) {\n    nbPlay++;\n    board[inter] = color;\n    if (color == Black)\n      hash ^= HashArray[moveInter[inter]];\n    else\n      hash ^= HashArray[MaxIntersections + moveInter[inter]];\n\n    nbVides--;\n    indiceVide[vides[nbVides]] = indiceVide[inter];\n    vides[indiceVide[inter]] = vides[nbVides];\n\n    nbPierres[inter] = 1;\n    premierePierre[inter] = inter;\n    pierreSuivante[inter] = inter;\n    nbPseudoLibertes[inter] = 0;\n\n    indiceChaine[inter] = nbChaines;\n    chaines[nbChaines] = inter;\n    nbChaines++;\n\n    if (board[inter - 1] == color) {\n      if (premierePierre[inter] != premierePierre[inter - 1])\n        ajouteChaine(premierePierre[inter], premierePierre[inter - 1]);\n      otePseudoLiberte(premierePierre[inter], (inter << 2) | Gauche);\n    } else if (board[inter - 1] == Empty) {\n      ajoutePseudoLiberte(premierePierre[inter], ((inter - 1) << 2) | Droite);\n    } else if (board[inter - 1] != Exterieur) {\n      otePseudoLiberte(premierePierre[inter - 1], (inter << 2) | Gauche);\n    }\n\n    if (board[inter + 1] == color) {\n      if (premierePierre[inter] != premierePierre[inter + 1])\n        ajouteChaine(premierePierre[inter], premierePierre[inter + 1]);\n      otePseudoLiberte(premierePierre[inter], (inter << 2) | Droite);\n    } else if (board[inter + 1] == Empty) {\n      ajoutePseudoLiberte(premierePierre[inter], ((inter + 1) << 2) | Gauche);\n    } else if (board[inter + 1] != Exterieur) {\n      otePseudoLiberte(premierePierre[inter + 1], (inter << 2) | Droite);\n    }\n\n    if (board[inter - dxNogoBoard] == color) {\n      if (premierePierre[inter] != premierePierre[inter - dxNogoBoard])\n        ajouteChaine(\n            premierePierre[inter], premierePierre[inter - dxNogoBoard]);\n      otePseudoLiberte(premierePierre[inter], (inter << 2) | Haut);\n    } else if (board[inter - dxNogoBoard] == Empty) {\n      ajoutePseudoLiberte(\n          premierePierre[inter], ((inter - dxNogoBoard) << 2) | Bas);\n    } else if (board[inter - dxNogoBoard] != Exterieur) {\n      otePseudoLiberte(\n          premierePierre[inter - dxNogoBoard], (inter << 2) | Haut);\n    }\n\n    if (board[inter + dxNogoBoard] == color) {\n      if (premierePierre[inter] != premierePierre[inter + dxNogoBoard])\n        ajouteChaine(\n            premierePierre[inter], premierePierre[inter + dxNogoBoard]);\n      otePseudoLiberte(premierePierre[inter], (inter << 2) | Bas);\n    } else if (board[inter + dxNogoBoard] == Empty) {\n      ajoutePseudoLiberte(\n          premierePierre[inter], ((inter + dxNogoBoard) << 2) | Haut);\n    } else if (board[inter + dxNogoBoard] != Exterieur) {\n      otePseudoLiberte(premierePierre[inter + dxNogoBoard], (inter << 2) | Bas);\n    }\n  }\n\n  int choisitUnCoup(char color) {\n    int debut = rand() % nbVides;\n\n    for (int i = debut; i < nbVides; i++)\n      if (legal(vides[i], color))\n        return vides[i];\n\n    for (int i = 0; i < debut; i++)\n      if (legal(vides[i], color))\n        return vides[i];\n\n    return -1;\n  }\n\n  /**/\n  int fastPlayout(int color) {\n    for (;;) {\n      int pos = choisitUnCoup(color);\n      if (pos == -1)\n        break;\n      joue(pos, color);\n      color = opponent(color);\n    }\n    if (color == Black)\n      return 1;\n    return 0;\n  }\n  /**/\n\n  void print(FILE* fp) {\n    int i;\n    fprintf(fp, \"       \");\n    for (i = 0; i < dxNogoBoard - 2; i++)\n      fprintf(fp, \"%-3c\", 'A' + i + (i > 7));\n    fprintf(fp, \"\\n\");\n    fprintf(fp, \"    /  \");\n    for (i = 0; i < dxNogoBoard - 2; i++)\n      fprintf(fp, \"-  \");\n    fprintf(fp, \"\\\\ \\n\");\n    for (i = start - 1; i <= end; i++) {\n      if (((i) % dxNogoBoard == 0))\n        fprintf(fp, \"%3d |  \", dyNogoBoard - 1 - (i / dxNogoBoard));\n      else if (((i + 1) % (dxNogoBoard) == 0))\n        fprintf(fp, \"| %2d\\n\", dyNogoBoard - 1 - (i / dxNogoBoard));\n      else if (board[i] == Empty)\n        fprintf(fp, \"+  \");\n      else if (board[i] == Black)\n        fprintf(fp, \"@  \");\n      else if (board[i] == White)\n        fprintf(fp, \"O  \");\n      else\n        fprintf(fp, \"%d  \", board[i]);\n    }\n    fprintf(fp, \"    \\\\  \");\n    for (i = 0; i < dxNogoBoard - 2; i++)\n      fprintf(fp, \"-  \");\n    fprintf(fp, \"/ \\n\");\n    fprintf(fp, \"       \");\n    for (i = 0; i < dxNogoBoard - 2; i++)\n      fprintf(fp, \"%-3c\", 'A' + i + (i > 7));\n    fprintf(fp, \"\\n\");\n    fprintf(fp, \"hash = %llu\\n\", hash);\n  }\n\n  bool won(int color) {\n    NogoMove moves[MaxLegalMoves];\n    int nb = legalNogoMoves(opponent(color), moves);\n    return nb == 0;\n  }\n\n  float evaluation(int color) {\n    NogoMove moves[MaxLegalMoves];\n    int nb = legalNogoMoves(turn, moves);\n    if (nb == 0) {\n      if (color == turn)\n        return -1000000.0;\n      else\n        return 1000000.0;\n    }\n    int nbOpponent = legalNogoMoves(opponent(turn), moves);\n    if (color == turn)\n      return (float)(nb - nbOpponent);\n    return (float)(nbOpponent - nb);\n  }\n\n  bool terminal() {\n    NogoMove moves[MaxLegalMoves];\n    int nb = legalNogoMoves(turn, moves);\n    return nb == 0;\n  }\n\n  int score() {\n    if (turn == Black)\n      return 1;\n    return 0;\n  }\n\n  int opponent(int joueur) {\n    if (joueur == White)\n      return Black;\n    return White;\n  }\n\n  int playout(int joueur) {\n    return fastPlayout(joueur);\n    NogoMove listeCoups[MaxLegalMoves];\n    while (true) {\n      int nb = legalNogoMoves(joueur, listeCoups);\n      if (nb == 0) {\n        if (joueur == Black)\n          return 1;\n        else\n          return 0;\n      }\n      int n = rand() % nb;\n      play(listeCoups[n]);\n      if (length >= MaxPlayoutLength - 20) {\n        return 0;\n      }\n      joueur = opponent(joueur);\n    }\n  }\n\n  float discountedPlayout(int joueur, int maxLength = MaxPlayoutLength - 20) {\n    NogoMove listeCoups[MaxLegalMoves];\n    while (true) {\n      int nb = legalNogoMoves(joueur, listeCoups);\n      if (nb == 0) {\n        if (joueur == Black)\n          return 1.0 / (length + 1);\n        else\n          return -1.0 / (length + 1);\n        // if (joueur == Black)\n        //   return 1.0 * (length + 1);\n        // else\n        //   return -1.0 * (length + 1);\n      }\n      int n = rand() % nb;\n      play(listeCoups[n]);\n      if (length >= maxLength) {\n        return 0;\n      }\n      joueur = opponent(joueur);\n    }\n  }\n};\n"
  },
  {
    "path": "src/games/tristannogo_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"time.h\"\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"../core/game.h\"\n#include \"tristan_nogo.h\"\n\nconst int StateForTristannogoNumActions = Dx * Dy;\nconst int StateForTristannogoX = 5;\nconst int StateForTristannogoY = Dx;\nconst int StateForTristannogoZ = Dy;\nconst int NogoMaxLegalMoves = Dx * Dy;\n\nclass StateForTristannogo : public core::State, NogoBoard {\n public:\n  StateForTristannogo(int seed)\n      : State(seed) {\n  }\n\n  virtual ~StateForTristannogo() {\n  }\n\n  virtual void Initialize() override {\n    // People implementing classes should not have much to do in _moves; just\n    // _moves.clear().\n    _moves.clear();\n    // std::cout << \"OTGTristannogo initialize\" << std::endl;\n\n    // the features are just one number between 0 and 1 (the distance,\n    // normalized).\n    _featSize[0] = StateForTristannogoX;\n    _featSize[1] = StateForTristannogoY;\n    _featSize[2] = StateForTristannogoZ;\n\n    // size of the output of the neural network; this should cover the positions\n    // of actions (above).\n    _actionSize[0] = 1;\n    _actionSize[1] = Dx;\n    _actionSize[2] = Dy;\n\n    // _hash is an unsigned int, it has to be *unique*.\n    _hash = 0;\n    _status = GameStatus::player0Turn;\n    // std::cout << \"restart!\" << std::endl;\n    // _features is a vector representing the current state. It can\n    // (must...) be large for complex games; here just one number\n    // between 0 and 1. trivial case in dimension 1.\n    _features.resize(StateForTristannogoX * StateForTristannogoY *\n                     StateForTristannogoZ);\n    /*\n        // _features[:_hash] = 1\n        for (int i = 0; i < DISTANCE; i++) {\n          _features[i] = (float(_hash) > float(i)) ? 1. : 0.;\n        }\n    */\n    init();\n    findFeatures();\n    findActions(White);\n    fillFullFeatures();\n  }\n\n  virtual std::unique_ptr<core::State> clone_() const override {\n    return std::make_unique<StateForTristannogo>(*this);\n  }\n\n  void findActions(int color) {\n    NogoMove moves[NogoMaxLegalMoves];\n    int nb = legalNogoMoves(color, moves);\n\n    clearActions();\n    for (int i = 0; i < nb; i++) {\n      int x = moves[i].inter % Dx;\n      int y = moves[i].inter / Dx;\n      addAction(0, x, y);\n    }\n  }\n\n  void findFeatures() {\n    if ((_status == GameStatus::player0Win) ||\n        (_status == GameStatus::player1Win))\n      return;\n    if (_status == GameStatus::player0Turn) {  // Black\n      for (int i = 0; i < 5 * Dx * Dy; i++)\n        _features[i] = 0;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (board[interMove[i]] == Black)\n          _features[i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (board[interMove[i]] == White)\n          _features[Dx * Dy + i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (legal(interMove[i], Black))\n          _features[3 * Dx * Dy + i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (legal(interMove[i], White))\n          _features[4 * Dx * Dy + i] = 1;\n    } else {\n      assert(_status == GameStatus::player1Turn);  // White\n      for (int i = 0; i < 5 * Dx * Dy; i++)\n        _features[i] = 0;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (board[interMove[i]] == Black)\n          _features[i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (board[interMove[i]] == White)\n          _features[Dx * Dy + i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        _features[2 * Dx * Dy + i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (legal(interMove[i], Black))\n          _features[3 * Dx * Dy + i] = 1;\n      for (int i = 0; i < Dx * Dy; i++)\n        if (legal(interMove[i], White))\n          _features[4 * Dx * Dy + i] = 1;\n    }\n  }\n  // The action just decreases the distance and swaps the turn to play.\n  virtual void ApplyAction(const _Action& action) override {\n\n    NogoMove m;\n    // print(stdout);\n    if (_status == GameStatus::player0Turn) {  // Black\n      m.color = Black;\n      m.inter = action.GetY() + Dx * action.GetZ();\n      play(m);\n      findActions(White);\n      if (won(Black))\n        _status = GameStatus::player0Win;\n      else\n        _status = GameStatus::player1Turn;\n    } else {  // White\n      m.color = White;\n      m.inter = action.GetY() + Dx * action.GetZ();\n      play(m);\n      findActions(Black);\n      if (won(White))\n        _status = GameStatus::player1Win;\n      else\n        _status = GameStatus::player0Turn;\n    }\n    findFeatures();\n    _hash = hash;\n    fillFullFeatures();\n  }\n\n  // For this trivial example we just compare to random play. Ok, this is not\n  // really a good action.\n  // By the way we need a good default DoGoodAction, e.g. one-ply at least.\n  // FIXME\n  virtual void DoGoodAction() override {\n\n    int i = rand() % _legalActions.size();\n    const _Action& a = _legalActions[i];\n    ApplyAction(a);\n  }\n\n  std::string stateDescription() const override {\n    std::string s(\"  0 1 2 3 4 5 6 7 8\\n\");\n    for (int i = 0; i < Dy; i++) {\n      s += std::to_string(i);\n      for (int j = 0; j < Dx; j++) {\n        if (board[interMove[i * Dy + j]] == Black)\n          s += \" @\";\n        else if (board[interMove[i * Dy + j]] == White)\n          s += \" O\";\n        else if (board[interMove[i * Dy + j]] == Empty)\n          s += \" +\";\n      }\n      s += \" \\n\";\n    }\n    s += \" \\n\";\n    return s;\n  }\n};\n"
  },
  {
    "path": "src/games/weakschur/SchurMatrix.cpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"SchurMatrix.hpp\"\n\n#include <cassert>\n\nSchurMatrix::SchurMatrix(int nbSubsets, int maxNumber) :\n    _nbSubsets(nbSubsets),\n    _maxNumber(maxNumber),\n    _data(_nbSubsets*_maxNumber)\n{}\n\nvoid SchurMatrix::reset(bool value) {\n    std::fill(_data.begin(), _data.end(), value);\n}\n\nbool SchurMatrix::get(int i, int j) const {\n    assert(i >= 1);\n    assert(i <= _nbSubsets);\n    assert(j >= 1);\n    assert(j <= _maxNumber);\n    return _data[(i-1) * _maxNumber + (j-1)];\n}\n\nvoid SchurMatrix::set(int i, int j, bool b) {\n    assert(i >= 1);\n    assert(i <= _nbSubsets);\n    assert(j >= 1);\n    assert(j <= _maxNumber);\n    _data[(i-1) * _maxNumber + (j-1)] = b;\n}\n\nconst std::vector<bool> & SchurMatrix::data() const {\n    return _data;\n}\n\n"
  },
  {
    "path": "src/games/weakschur/SchurMatrix.hpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <vector>\n\n// A _nbSubsets x _maxNumber matrix storing if subset i can host number j,\n// where (i, j) in [1, _nbSubsets] x [1, _maxNumber]\nclass SchurMatrix {\n    private:\n        int _nbSubsets;\n        int _maxNumber;\n        std::vector<bool> _data;\n    public:\n        SchurMatrix(int nbSubsets, int maxNumber);\n        void reset(bool value);\n        bool get(int i, int j) const;\n        void set(int i, int j, bool b);\n        const std::vector<bool> & data() const;\n};\n\n"
  },
  {
    "path": "src/games/weakschur/SchurVector.cpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"SchurVector.hpp\"\n\n#include <cassert>\n\nSchurVector::SchurVector(int maxIndex) :\n    _maxIndex(maxIndex),\n    _data(maxIndex) {\n}\n\nvoid SchurVector::reset(int value) {\n    std::fill(_data.begin(), _data.end(), value);\n}\n\nint SchurVector::get(int i) const {\n    assert(i >= 1);\n    assert(i <= _maxIndex);\n    return _data[i-1];\n}\n\nvoid SchurVector::set(int i, int value) {\n    assert(i >= 1);\n    assert(i <= _maxIndex);\n    _data[i-1] = value;\n}\n\nconst std::vector<int> & SchurVector::data() const {\n    return _data;\n}\n\n"
  },
  {
    "path": "src/games/weakschur/SchurVector.hpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <vector>\n\n// 1-based indexed vector of int\nclass SchurVector {\n    private:\n        int _maxIndex;\n        std::vector<int> _data;\n    public:\n        SchurVector(int maxIndex);\n        void reset(int value);\n        int get(int i) const;\n        void set(int i, int value);\n        const std::vector<int> & data() const;\n};\n\n"
  },
  {
    "path": "src/games/weakschur/WeakSchur.cpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"WeakSchur.hpp\"\n\n#include <iostream>\n#include <cassert>\n\nWeakSchur::WeakSchur(int nbSubsets, int maxNumber) :\n    _nbSubsets(nbSubsets),\n    _maxNumber(maxNumber),\n    _freeActions(nbSubsets, maxNumber),\n    _nbFreeNumbersOfSubset(nbSubsets),\n    _nbNumbersOfSubset(nbSubsets),\n    _nbFreeSubsetsOfNumber(maxNumber),\n    _subsetOfNumber(maxNumber)\n{\n    reset();\n}\n\nvoid WeakSchur::reset() {\n    _freeActions.reset(true);\n    _nbFreeActions = _nbSubsets * _maxNumber;\n    _nbFreeNumbersOfSubset.reset(_maxNumber);\n    _nbNumbersOfSubset.reset(0);\n    _nbFreeSubsetsOfNumber.reset(_nbSubsets);\n    _subsetOfNumber.reset(0);\n    _score = 0;\n    applyAction({1, 1});\n}\n\nbool WeakSchur::isTerminated() const {\n    return _score == _maxNumber or _nbFreeSubsetsOfNumber.get(_score+1) == 0;\n}\n\nint WeakSchur::getScore() const {\n    return _score;\n}\n\nint WeakSchur::getFirstLegalNumber() const {\n    assert(not isTerminated());\n    return _score + 1;\n}\n\nint WeakSchur::getMostConstrainedNumber() const {\n    assert(not isTerminated());\n    int bestNumber = _score + 1;\n    int nbSubsetsBest = _nbFreeSubsetsOfNumber.get(bestNumber);\n    for (int number = bestNumber + 1; number <  _maxNumber; number++) {\n        const int nbSubsets = _nbFreeSubsetsOfNumber.get(number);\n        if (nbSubsets != 0 and nbSubsets < nbSubsetsBest) {\n            nbSubsetsBest = nbSubsets;\n            bestNumber = number;\n        }\n    }\n    return bestNumber;\n}\n\nstd::vector<int> WeakSchur::getLegalSubsets(int number) const {\n    const int nbSubsetsOfNumber = _nbFreeSubsetsOfNumber.get(number);\n    std::vector<int> legalSubsets;\n    legalSubsets.reserve(nbSubsetsOfNumber);\n    for (int i = 1; i <= _nbSubsets; i++)\n        //if ((_freeActions.get(i, number) and i<=2) or \n\t//\t(_freeActions.get(i, number) and i>2 and _nbNumbersOfSubset.get(i-1) > 0))\n        if (_freeActions.get(i, number) and (i<=2 or _nbNumbersOfSubset.get(i-1) > 0))\n            legalSubsets.push_back(i);\n    //assert(int(legalSubsets.size()) == nbSubsetsOfNumber);\n    // TODO\n    return legalSubsets;\n}\n\nstd::pair<int, int> WeakSchur::getLongestSeq(int subset) const {\n    int longest = 0;\n    int nbLongest = 0;\n    int currLongest = 0;\n    for (int n=1; n<=_maxNumber; n++) {\n        if (_subsetOfNumber.get(n) == subset) {\n            currLongest++;\n        }\n        else {\n            if (currLongest > longest) {\n                longest = currLongest;\n                nbLongest = 1;\n            }\n            else if (currLongest == longest) {\n                nbLongest++;\n            }\n            currLongest = 0;\n        }\n    }\n    return {longest, nbLongest};\n}\n\nvoid WeakSchur::applyAction(const Action & action) {\n\n    const int subset = action.first;\n    const int number = action.second;\n\n    // assert action\n    assert(subset >= 1);\n    assert(subset <= _nbSubsets);\t\n    assert(number >= 1);\n    assert(number <= _maxNumber);\t\n    assert(_nbFreeSubsetsOfNumber.get(number) > 0);\n    assert(_subsetOfNumber.get(number) == 0);\n    assert(_freeActions.get(subset, number));\n\n    // update subset data\n    for (int s = 1; s <= _nbSubsets; s++)\n        _freeActions.set(s, number, false);\n    _nbFreeActions -= _nbFreeSubsetsOfNumber.get(number);\n    _nbFreeSubsetsOfNumber.set(number, 0);\n    _nbNumbersOfSubset.set(subset, _nbNumbersOfSubset.get(subset) + 1);\n\n    // update number data\n    for (int n = 1; n <= _maxNumber; n++) {\n        if (subset == _subsetOfNumber.get(n)) {\n            removeAction({subset, n + number});\n            removeAction({subset, n - number});\n            removeAction({subset, number - n});\n\n            _nbFreeNumbersOfSubset.set(subset, \n                    _nbFreeNumbersOfSubset.get(subset)-1);\n        }\n    }\n\n    // store action\n    _subsetOfNumber.set(number, subset);\n\n    // update score\n    while (_score < _maxNumber and _subsetOfNumber.get(_score+1) != 0)\n        _score++;\n\n}\n\nvoid WeakSchur::removeAction(const Action & action) {\n    const int subset = action.first;\n    const int numberToRemove = action.second;\n    if (numberToRemove >= 1 and numberToRemove <= _maxNumber\n            and _freeActions.get(subset, numberToRemove)) {\n        _freeActions.set(subset, numberToRemove, false);\n        _nbFreeActions--;\n        const int oldNbSubsets = _nbFreeSubsetsOfNumber.get(numberToRemove);\n        _nbFreeSubsetsOfNumber.set(numberToRemove, oldNbSubsets - 1);\n    }\n}\n\nstd::ostream & operator<<(std::ostream & os, const WeakSchur & ws) {\n\n    os << \"freeActions: \" << std::endl;\n    for (int s = 1; s <= ws._nbSubsets; s++) {\n        os << \" \";\n        for (int n = 1; n <= ws._maxNumber; n++) {\n            os << \" \" << ws._freeActions.get(s, n);\n        }\n        os << std::endl;\n    }\n\n    os << \"nbFreeActions: \\n  \" << ws._nbFreeActions << std::endl;\n\n    os << \"nbSubsetsOfNumber:\\n \";\n    for (int n = 1; n <= ws._maxNumber; n++) {\n        os << \" \" << ws._nbFreeSubsetsOfNumber.get(n);\n    }\n    os << std::endl;\n\n    os << \"subsetOfNumber:\\n \";\n    for (int n = 1; n <= ws._maxNumber; n++) {\n        os << \" \" << ws._subsetOfNumber.get(n);\n    }\n    os << std::endl;\n\n    os << \"score: \\n  \" << ws._score << std::endl;\n\n    os << \"subsets: \" << std::endl;\n    for (int s = 1; s <= ws._nbSubsets; s++) {\n        os << \" \";\n        for (int n = 1; n <= ws._maxNumber; n++) {\n            if (ws._subsetOfNumber.get(n) == s)\n                os << \" \" << n;\n        }\n        os << std::endl;\n    }\n    os << std::endl;\n\n    return os;\n}\n\n"
  },
  {
    "path": "src/games/weakschur/WeakSchur.hpp",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"SchurMatrix.hpp\"\n#include \"SchurVector.hpp\"\n\n#include <iostream>\n\nclass WeakSchur {\n\n    // Action = (subset, number)\n    using Action = std::pair<int, int>;\n\n    public:\n        int _nbSubsets;\n        int _maxNumber;\n\n    protected:\n    public:  // todo getters ?\n        SchurMatrix _freeActions;\n\n        int _nbFreeActions;\n\n        // how many possible numbers, for each subset\n        SchurVector _nbFreeNumbersOfSubset; // TODO\n        SchurVector _nbNumbersOfSubset;\n\n        // how many possible subsets, for each number\n        SchurVector _nbFreeSubsetsOfNumber;\n\n        // vector storing, for each number, the selected subset\n        // (or 0 where no subset has been selected)\n        SchurVector _subsetOfNumber;\n\n        // the current score is the max of the successive numbers validly\n        // placed in the subsets, i.e. n where 1, ..., n are validly placed\n        // in the subsets and (n+1) is not\n        int _score;\n\n    public:\n        WeakSchur(int nbSubsets, int maxNumber);\n        void reset();\n\n        void applyAction(const Action & action);\n        bool isTerminated() const;\n        int getScore() const;\n\n        int getFirstLegalNumber() const;\n        int getMostConstrainedNumber() const;\n        std::vector<int> getLegalSubsets(int number) const;\n\n        std::pair<int, int> getLongestSeq(int subset) const;\n\n        friend std::ostream & operator<<(std::ostream & os, const WeakSchur & ws);\n\n    protected:\n        void removeAction(const Action & action);\n\n};\n\nstd::ostream & operator<<(std::ostream & os, const WeakSchur & ws);\n\n"
  },
  {
    "path": "src/games/weakschur/weakschur_state.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"../../core/state.h\"\n#include \"WeakSchur.hpp\"\n// #include <boost/stacktrace.hpp> // TODO #ifdef\n#include <sstream>\n\nnamespace weakschur {\n\ntemplate <int NBSUBSETS, int MAXNUMBER> class State : public core::State {\n private:\n  WeakSchur _weakschur;\n\n public:\n  State(int seed);\n  bool isOnePlayerGame() const override;\n  void Initialize() override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction() override;\n  float getReward(int player) const override final {\n    // if (player != 0)\n    //\tstd::cout << boost::stacktrace::stacktrace();\n    if (_weakschur.getScore() == MAXNUMBER) {\n      std::cout << \"Found Good Schur:\" << _weakschur << std::endl;\n      std::cerr << \"Found Good Schur:\" << _weakschur << std::endl;\n      abort();\n    }\n    float value = float(_weakschur.getScore()) / float(MAXNUMBER);\n    return player == 0 ? value : -value;\n  };\n  std::unique_ptr<core::State> clone_() const override;\n\n private:\n  std::string stateDescription() const override;\n  void findActions();\n};\n\n}  // namespace weakschur\n\n///////////////////////////////////////////////////////////////////////////////\n// weakschur::State\n///////////////////////////////////////////////////////////////////////////////\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nweakschur::State<NBSUBSETS, MAXNUMBER>::State(int seed)\n    : core::State(seed)\n    , _weakschur(NBSUBSETS, MAXNUMBER) {\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nbool weakschur::State<NBSUBSETS, MAXNUMBER>::isOnePlayerGame() const {\n  return true;\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nvoid weakschur::State<NBSUBSETS, MAXNUMBER>::DoGoodAction() {\n  DoRandomAction();\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nstd::unique_ptr<core::State> weakschur::State<NBSUBSETS, MAXNUMBER>::clone_()\n    const {\n  return std::make_unique<weakschur::State<NBSUBSETS, MAXNUMBER>>(*this);\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nstd::string weakschur::State<NBSUBSETS, MAXNUMBER>::stateDescription() const {\n  std::ostringstream oss;\n  oss << _weakschur;\n  return oss.str();\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nvoid weakschur::State<NBSUBSETS, MAXNUMBER>::Initialize() {\n\n  // _weakschur\n  _weakschur.reset();\n\n  // state\n  _hash = 0;\n  _status = GameStatus::player0Turn;\n\n  // features\n  // TODO channels\n  _featSize = {9, NBSUBSETS, MAXNUMBER};\n  _features =\n      std::vector<float>(_featSize[0] * _featSize[1] * _featSize[2], 0.f);\n  _features[0] = 1.f;  // _weakschur always does the first action {1, 1}\n\n  const int channelSize = NBSUBSETS * MAXNUMBER;\n\n  // 1 features: i / first(t)\n  // 2 features: longest seq(t) / maxnumber\n  // 3 features: #longest\n\n  // 4 features: #possible for i / nbsubsets\n  for (int i = 0; i < channelSize; i++)\n    _features[channelSize * 4 + i] = 1.f;\n  // 5 features: #possible for t / maxnumber\n  for (int i = 0; i < channelSize; i++)\n    _features[channelSize * 5 + i] = (MAXNUMBER - 1) / float(MAXNUMBER);\n\n  // 6 features: board (t, i-1)\n  // 7 features: board (t, i-2)\n  // 8 features: board (t, i-3)\n  fillFullFeatures();\n\n  // actions\n  _actionSize = {1, NBSUBSETS + 1, MAXNUMBER + 1};\n  findActions();\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nvoid weakschur::State<NBSUBSETS, MAXNUMBER>::ApplyAction(\n    const _Action& action) {\n\n  const int channelSize = NBSUBSETS * MAXNUMBER;\n\n  auto feature = [channelSize, this](int c, int subset, int number) -> float& {\n    return this\n        ->_features[channelSize * c + (subset - 1) * MAXNUMBER + number - 1];\n  };\n\n  // update weakschur\n  assert(not _weakschur.isTerminated());\n  int subset = action.GetY();\n  int number = action.GetZ();\n  _weakschur.applyAction({subset, number});\n\n  // update status\n  if (_weakschur.isTerminated()) {\n    // std::cout << \"WS:\" << _weakschur.getScore() << \",\" << MAXNUMBER <<\n    // std::endl;\n    _status = _weakschur.getScore() == MAXNUMBER ? GameStatus::player0Win\n                                                 : GameStatus::player1Win;\n  }\n\n  // update state\n  int k = (subset - 1) * MAXNUMBER + (number - 1);\n  assert(_features[k] == 0.f);\n  _features[k] = 1.f;\n\n  // 1 features: i / first(t)\n  int firstT = MAXNUMBER + 1;\n  for (int n = 1; n <= MAXNUMBER; n++) {\n    if (_weakschur._subsetOfNumber.get(n) == subset) {\n      firstT = n;\n      break;\n    }\n  }\n  for (int n = 1; n <= MAXNUMBER; n++) {\n    feature(1, subset, n) = n / float(firstT);\n  }\n\n  // 2 features: longest seq(t) / maxnumber\n  // 3 features: #longest\n  auto longestAndNb = _weakschur.getLongestSeq(subset);\n  for (int n = 1; n <= MAXNUMBER; n++) {\n    feature(2, subset, n) = longestAndNb.first / float(MAXNUMBER);\n    feature(3, subset, n) = longestAndNb.second / float(MAXNUMBER);\n  }\n\n  // 4 features: #possible for i / nbsubsets\n  for (int n = 1; n <= MAXNUMBER; n++)\n    if (_weakschur._subsetOfNumber.get(n) != 0)\n      feature(4, subset, n) =\n          _weakschur.getLegalSubsets(n).size() / float(NBSUBSETS);\n    else\n      feature(4, subset, n) = 0.f;\n\n  // 5 features: #possible for t / maxnumber\n  for (int n = 1; n <= MAXNUMBER; n++)\n    for (int s = 1; s <= NBSUBSETS; s++)\n      feature(5, s, n) =\n          _weakschur._nbFreeNumbersOfSubset.get(s) / float(MAXNUMBER);\n\n  // 6 features: board (t, i-1)\n  for (int n = 2; n <= MAXNUMBER; n++)\n    for (int s = 1; s <= NBSUBSETS; s++)\n      feature(6, s, n) = _weakschur._subsetOfNumber.get(n - 1) == s ? 1.f : 0.f;\n\n  // 7 features: board (t, i-2)\n  for (int n = 3; n <= MAXNUMBER; n++)\n    for (int s = 1; s <= NBSUBSETS; s++)\n      feature(7, s, n) = _weakschur._subsetOfNumber.get(n - 2) == s ? 1.f : 0.f;\n\n  // 8 features: board (t, i-3)\n  for (int n = 4; n <= MAXNUMBER; n++)\n    for (int s = 1; s <= NBSUBSETS; s++)\n      feature(8, s, n) = _weakschur._subsetOfNumber.get(n - 3) == s ? 1.f : 0.f;\n\n  fillFullFeatures();\n\n  // update actions\n  findActions();\n}\n\ntemplate <int NBSUBSETS, int MAXNUMBER>\nvoid weakschur::State<NBSUBSETS, MAXNUMBER>::findActions() {\n  // TODO multiple \"most contrained\" numbers ?\n\n  clearActions();\n  if (not _weakschur.isTerminated()) {\n\n    // get possible numbers\n    int number1 = _weakschur.getFirstLegalNumber();\n    int number2 = _weakschur.getMostConstrainedNumber();\n\n    // get subsets and update actions\n    if (number1 == number2) {\n      std::vector<int> subsets1 = _weakschur.getLegalSubsets(number1);\n      _legalActions.reserve(subsets1.size());\n      for (unsigned k = 0; k < subsets1.size(); ++k)\n        addAction(0, subsets1[k], number1);\n    } else {\n      std::vector<int> subsets1 = _weakschur.getLegalSubsets(number1);\n      std::vector<int> subsets2 = _weakschur.getLegalSubsets(number2);\n      _legalActions.reserve(subsets1.size() + subsets2.size());\n      for (unsigned k = 0; k < subsets1.size(); ++k)\n        addAction(0, subsets1[k], number1);\n      for (unsigned k = 0; k < subsets2.size(); ++k)\n        addAction(0, subsets2[k], number2);\n    }\n  }\n}\n"
  },
  {
    "path": "src/games/yinsh.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// testing push\n#include \"yinsh.h\"\n#include <signal.h>\n#include <unistd.h>\n// #include <iostream>\n\n// PAKKA GALTI\n// FIX LATER\n// premature optimisation is the source of all evil\n// relisting the types for reference\n// std::vector<float> _features;  // neural network input\n// std::vector<std::shared_ptr<_Action>> _legalActions;\n// DNU: shared pointer\n\n// draw ka rule is different- no of rings pe aa jaati hai baat\n// namespace Yinsh{\n\nuint64_t StateForYinsh::hash_table[5][BOARD_X][BOARD_Y] = {0};\nstd::once_flag StateForYinsh::table_flag = once_flag();\n\nstring StateForYinsh::stateDescription(void) const {\n  return \"empty_state\";\n}\nstring StateForYinsh::actionsDescription(void) const {\n  return \"empty_act\";\n}\n\nchar StateForYinsh::map_piece_to_char(int p) const {\n  // printf(\"enter:    map_piece_to_char\\n\");\n  char ans;\n  if (p == (int)(piece::invalid))\n    ans = ' ';\n  else if (p == (int)(piece::p0_marker))\n    ans = 'a';\n  else if (p == (int)(piece::p0_ring))\n    ans = 'A';\n  else if (p == (int)(piece::p1_marker))\n    ans = 'b';\n  else if (p == (int)(piece::p1_ring))\n    ans = 'B';\n  else if (p == (int)(piece::empty))\n    ans = '.';\n  else\n    ans = '!';\n  // printf(\"leave:    map_piece_to_char\\n\");\n  return ans;\n}\n\nvoid StateForYinsh::set_vars() {\n  // printf(\"enter:    set_vars\\n\");\n\n  // my_rings starts with p0\n  // cout<<\"player is \"<<player<<endl;\n  if (_status == GameStatus::player0Turn)\n    player = 0;\n  else if (_status == GameStatus::player1Turn)\n    player = 1;\n  else {\n    // means that either the state is someone's win or tie , so it shouldnt have\n    // entered this func\n    // cout<<\"Game already over\"<<endl;//PE\n  }\n  // 2,3,4,5, m, r, m, r\n  my_marker = 2 * player + 2;\n  my_ring = 2 * player + 3;\n  opp_marker = 6 - my_marker;  // PE\n  opp_ring = 8 - my_ring;\n\n  // player = 1;\n  // if (player==0){\n  //   my_rings = &rings[0];\n  //   opp_rings = &rings[1];\n  // }\n  // else{\n  //   my_rings = &rings[1];\n  //   opp_rings = &rings[0];\n  // }\n  // printf(\"leave:    set_vars\\n\");\n}\nvoid StateForYinsh::fill_hash_table() {\n  std::random_device rd;\n  std::default_random_engine generator(rd());\n  generator.seed(0);\n  std::uniform_int_distribution<long long unsigned> distribution(\n      0, 0xFFFFFFFFFFFFFFFF);\n\n  for (int i = 0; i < 5; i++) {\n    for (int j = 0; j < BOARD_X; j++) {\n      for (int k = 0; k < BOARD_Y; k++) {\n        hash_table[i][j][k] = (uint64_t)(distribution(generator));\n      }\n    }\n  }\n}\nvoid StateForYinsh::Initialize() {\n  // printf(\"enter:    Initialize\\n\");\n\n  _moves.clear();  // DNU\n  _status = GameStatus::player0Turn;\n  _featSize[0] = NUM_PIECES;\n  _featSize[1] = BOARD_X;\n  _featSize[2] = BOARD_Y;\n\n  _actionSize[0] = NUM_ACTIONS;\n  _actionSize[1] = BOARD_X;\n  _actionSize[2] = BOARD_Y;\n\n  _hash = 0ULL;  // DNU\n  _status = GameStatus::player0Turn;\n  _features.clear();\n  _features.resize(_featSize[0] * _featSize[1] * _featSize[2]);\n\n  std::call_once(table_flag, fill_hash_table);\n  // initGame();\n  // hard code your init yahan\n  // TODO\n  // all empty pehle\n  for (int i = 0; i < (BOARD_X + 2); i++) {\n    for (int j = 0; j < (BOARD_Y + 2); j++) {\n      board[i][j] = (int)(piece::empty);\n      // mark invalid\n      if (i == 0 || j == 0 || i == 12 || j == 12 || (i - j) >= 6 ||\n          (j - i) >= 6) {\n        board[i][j] = (int)(piece::invalid);\n      }\n    }\n  }\n  // corners of my hexagon\n  board[1][1] = (int)(piece::invalid);\n  board[1][6] = (int)(piece::invalid);\n  board[6][11] = (int)(piece::invalid);\n  board[11][11] = (int)(piece::invalid);\n  board[6][1] = (int)(piece::invalid);\n  board[11][6] = (int)(piece::invalid);\n\n  // rings[0].clear();\n  // // printf(\"LOOK HERE\n  // _____________________________________%d\\n\",rings[0].size() );\n  // rings[1].clear();\n  vector<tuple<int, int>> temp1, temp2;\n  // printf(\"num of rings are %d\\n\",rings.size() );\n  // sleep(10);\n  rings.clear();\n  // printf(\"num of rings are %d\\n\",rings.size() );\n  rings.push_back(temp1);\n  rings.push_back(temp2);\n\n  // printf(\"LOOK HERE\n  // _____________________________________%d\\n\",rings[1].size() );\n  initial_fill = 0;\n  places_filled = 0;\n  // rings[0].resize(5);\n  // rings[1].resize(5);\n  still_have_to_remove_ring = false;\n  still_have_to_remove_marker = false;\n  free_lunch = false;\n  // ended = false;\n\n  set_vars();\n  findActions();\n  findFeatures();\n  fillFullFeatures();\n  // printf(\"leave:    Initialize\\n\");\n}\nbool StateForYinsh::ended() {\n  // printf(\"enter:    ended\\n\");\n\n  if (_status == GameStatus::player0Win || _status == GameStatus::player1Win ||\n      _status == GameStatus::tie) {\n    // printf(\"leave:    ended\\n\");\n    return true;\n  }\n  // printf(\"leave:    ended\\n\");\n  return false;\n}\n\nvoid StateForYinsh::printCurrentBoard() const {\n  // printf(\"enter:    printCurrentBoard\\n\");\n  for (int j = 0; j < (BOARD_Y + 2); j++) {\n    for (int i = 0; i < (BOARD_X + 2); i++) {\n      cout << map_piece_to_char(board[i][BOARD_Y + 1 - j]) << \" \";\n    }\n    cout << endl;\n  }\n  cout << endl;\n  // printf(\"leave:    printCurrentBoard\\n\");\n}\ntuple<int, int> StateForYinsh::find_first_invalid(int x,\n                                                  int y,\n                                                  int d0,\n                                                  int d1) {\n  // printf(\"enter:    find_first_invalid\\n\");\n\n  /// PENDING, shayad zaroorat nhi\n  int ex_x, ex_y;\n  int i = d0;\n  int j = d1;\n  // PE- assumes that x and y are non extremes\n  for (int c = 0; c < 13; c++) {\n    if (board[x + c * i][y + c * j] == (int)(piece::invalid)) {\n      ex_x = x + c * i;\n      ex_y = y + c * j;\n      break;\n    }\n    if (c == 10) {\n      cout << \"Debug here\" << endl;\n    }\n  }\n  // printf(\"leave:    find_first_invalid\\n\");\n  return make_tuple(ex_x, ex_y);\n}\nvector<int> StateForYinsh::find_first_5_for_specific_pt(int x, int y) {\n  // printf(\"enter:    find_first_5_for_specific_pt\\n\");\n\n  // returns the start point and direction\n  int start_x, start_y;\n  for (int i = 0; i < 2; i++) {\n    for (int j = 0; j < 2; j++) {\n      if ((i + j) != 0) {\n        // my three directions to check\n        tuple<int, int> temp = find_first_invalid(x, y, -i, -j);\n        // since x,y is included it cant be invlaid and hence my func should\n        // work just fine\n        int count = 0;\n        int type;\n        start_x = get<0>(temp);\n        start_y = get<1>(temp);\n\n        // PE kis extreme se start kar raha hai\n        for (int k = 1; k < 13; k++) {\n          start_x =\n              start_x + i;  // one step in the direction with each iteration\n          start_y = start_y + j;\n          type = board[start_x][start_y];\n          if (type == (int)(piece::invalid)) {\n            break;\n          } else if (type == my_marker) {\n            count++;\n          } else {\n            count = 0;\n          }\n          if (count == 5) {\n            vector<int> answer;\n            answer.push_back(start_x - 4 * i);\n            answer.push_back(start_y - 4 * j);\n            answer.push_back(i);\n            answer.push_back(j);\n            // printf(\"leave:    find_first_5_for_specific_pt\\n\");\n            return answer;\n          }\n        }\n      }\n    }\n  }\n  vector<int> wrong;\n  printf(\"Wrong answer in find_first_5_for_specific_pt\\n\");\n  printf(\"x: %d and y: %d\\n\", x, y);\n  printCurrentBoard();\n  raise(SIGSEGV);\n  // printf(\"leave:    find_first_5_for_specific_pt\\n\");\n  return wrong;\n}\ntuple<int, int> StateForYinsh::map_num_to_direction(int n) {\n  // printf(\"enter:    map_num_to_direction\\n\");\n\n  // if else\n  // PENDING\n  tuple<int, int> ans;\n  if (n == 0)\n    ans = make_tuple(0, 1);\n  else if (n == 1)\n    ans = make_tuple(1, 1);\n  else if (n == 2)\n    ans = make_tuple(1, 0);\n  else if (n == 3)\n    ans = make_tuple(0, -1);\n  else if (n == 4)\n    ans = make_tuple(-1, -1);\n  else if (n == 5)\n    ans = make_tuple(-1, 0);\n  else {\n    cout << \"invalid number for direciton\" << endl;\n    // return NULL;\n    ans = make_tuple(-7, -7);\n  }\n  // printf(\"leave:    map_num_to_direction\\n\");\n  return ans;\n}\nint StateForYinsh::map_direction_to_num(int i, int j) {\n  // printf(\"enter:    map_direction_to_num\\n\");\n  int ans;\n  if (i == 0 and j == 1)\n    ans = 0;\n  else if (i == 1 and j == 1)\n    ans = 1;\n  else if (i == 1 and j == 0)\n    ans = 2;\n  else if (i == 0 and j == -1)\n    ans = 3;\n  else if (i == -1 and j == -1)\n    ans = 4;\n  else if (i == -1 and j == 0)\n    ans = 5;\n  else {\n    cout << \"invalid direciton\" << endl;\n    ans = -8;\n  }\n  // printf(\"leave:    map_direction_to_num\\n\");\n  return ans;\n}\nvector<vector<int>> StateForYinsh::find_all_5s(bool my) {\n  // printf(\"enter:    find_all_5s\\n\");\n\n  vector<vector<int>> all_5s;  // empty abhi\n  // 3 baaar likhlo same cheez kya dikat hai\n  // PENDING\n  int local_marker;\n  if (my) {\n    local_marker = my_marker;\n    // local_ring = my_ring;\n  } else {\n    local_marker = opp_marker;\n    // local_ring = opp_ring;\n  }\n  for (int d_x = 0; d_x < 2; d_x++) {\n    for (int d_y = 0; d_y < 2; d_y++) {\n      if ((d_x + d_y) != 0) {\n        // for all axes\n        // all pts on x axis and y\n        int x, y;\n        // bool start = false;\n        int count = 0;\n        for (int types = 0; types < 2; types++) {\n          for (int pt = 0; pt < 13; pt++) {\n            if (types == 0) {\n              x = 0;\n              y = pt;\n            } else {\n              if (pt == 0)\n                break;\n              x = pt;\n              y = 0;\n            }\n            // start = false;\n            count = 0;\n            int poi;\n            for (int jump = 0; jump < 13; jump++) {\n              if (x + jump * d_x > 12 || y + jump * d_y > 12)\n                break;\n              // this will definitely go out of bounds\n              // %13 lagaden yahan?\n              // PAKKA GALTI //PE\n              poi = board[(x + jump * d_x)][(y + jump * d_y)];\n              if (poi == local_marker)\n                count++;\n              else\n                count = 0;\n              if (count >= 5) {\n                // store\n                vector<int> temporary_vec;\n                temporary_vec.push_back((x + (jump - 4) * d_x));  // start_x\n                temporary_vec.push_back((y + (jump - 4) * d_y));  // start_y\n                temporary_vec.push_back(d_x);                     // d_x\n                temporary_vec.push_back(d_y);                     // d_y\n                all_5s.push_back(temporary_vec);\n              }\n            }\n          }\n        }\n      }\n    }\n  }\n  // printf(\"leave:    find_all_5s\\n\");\n  return all_5s;\n}\n// vector<vector<int>> StateForYinsh::find_all_5s(bool my){\n//   printf(\"enter:    find_all_5s\\n\");\n\n//   vector<vector<int>> all_5s; //empty abhi\n//   //3 baaar likhlo same cheez kya dikat hai\n//   //PENDING\n//   int local_marker;\n//   if(my){\n//     local_marker = my_marker;\n//     // local_ring = my_ring;\n//   }\n//   else{\n//     local_marker = opp_marker;\n//     // local_ring = opp_ring;\n//   }\n//   for(int d_x = 0; d_x < 2; d_x ++){\n//     for(int d_y = 0; d_y <2; d_y ++){\n//       if((d_x + d_y) != 0){\n//         //for all axes\n//         //all pts on x axis and y\n//         int x, y;\n//         // bool start = false;\n//         int count = 0;\n//         for(int types = 0 ; types<2; types ++){\n//           for(int pt = 0; pt<13; pt++){\n//             if(types == 0){\n//               x = 0;\n//               y = pt;\n//             }\n//             else{\n//               x = pt;\n//               y = 0;\n//             }\n//             // start = false;\n//             count = 0;\n//             int poi;\n//             for(int jump = 0; jump<13;\n// jump++){\n//               //this will definitely go out of\n// bounds\n//               // %13 lagaden yahan?\n//               //PAKKA GALTI //PE\n//               poi = board[(x + jump*d_x)%13][(y +\n// jump*d_y)%13];               if(poi == local_marker) count++; else count = 0;\n// if (count\n// >=5){\n//                 //store\n//                 vector<int>\n// temporary_vec;                 temporary_vec.push_back((x +\n// (jump-4)*d_x)%13);//start_x\n//                 temporary_vec.push_back((y +\n// (jump-4)*d_y)%13);//start_y temporary_vec.push_back(d_x);//d_x\n//                 temporary_vec.push_back(d_y);//d_y\n//                 all_5s.push_back(temporary_vec);\n//               }\n//             }\n//           }\n//         }\n//       }\n//     }\n//   }\n//   printf(\"leave:    find_all_5s\\n\");\n//   return all_5s;\n// }\nvoid StateForYinsh::ApplyAction(const _Action& action) {\n  // printf(\"enter:    ApplyAction\\n\");\n\n  // fetch args\n  // for every update manage hash\n  set_vars();\n  // printCurrentBoard();\n  // printf(\"following move was played by player %d\\n\", player);\n  int action_num = action.GetX();\n  int x = action.GetY() + 1;  // PE\n  int y = action.GetZ() + 1;  // PE\n  // printf(\"no of rings left %d\\n\", ((rings[player]).size()));\n  // decide player_number\n  // set_vars();\n  // since _status is a protected variable StateForYinsh being a subclass should\n  // be able to access\n\n  // depending on the action num do different things\n  // till 2:30\n  // cout<<\"for p0\";\n  // for(int i=0; i<(rings[0]).size(); i++){\n  // printf(\"(%d, %d)  \", get<0>((rings[0])[i]),get<1>((rings[0])[i]));\n  // }\n  // cout<<endl;\n  // cout<<\"for p1\";\n  // for(int i=0; i<(rings[1]).size(); i++){\n  // printf(\"(%d, %d)  \", get<0>((rings[1])[i]),get<1>((rings[1])[i]));\n  // }\n  // cout<<endl;\n\n  // remember to maintain all state variables\n  // cout<<\"on enter\"<<endl;\n  // printCurrentBoard();\n  if (action_num == 56) {\n    // if(initial_fill==0){\n    //   // rings[0].clear();\n    //   // rings[1].clear();\n    // }\n    // simply place the ring on the board\n    //\n    // cout<<\"debug\"<<endl;\n    _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n    board[x][y] = my_ring;\n    _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n\n    // assignment\n    // _hash ^= hash_table[][][];\n    tuple<int, int> placed_ring(x, y);\n    // if(player == 0) rings[0].push_back(placed_ring);\n    // else rings[1].push_back(placed_ring);\n    // cout<<\"placed at \"<<x<<\" \"<<y<<endl;\n    // printf(\"%p\\n\",my_rings );\n\n    if (rings[player].size() >= 5) {\n      printf(\"WRONG NUM OF RINGS\\n\");\n      raise(SIGSEGV);\n    }\n    (rings[player]).push_back(placed_ring);\n    // cout<<\"debug\"<<endl;\n\n    initial_fill++;\n    places_filled++;\n\n    // flip turn\n    if (player == 0)\n      _status = GameStatus::player1Turn;\n    else\n      _status = GameStatus::player0Turn;\n  } else if (action_num == 57) {\n    // ring selection for removal\n    if (board[x][y] != my_ring) {\n      cout << \"trying to remove something which is not my ring\" << endl;\n      raise(SIGSEGV);\n    }\n    _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n    board[x][y] = (int)(piece::empty);\n    _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n\n    // remove it from my vector_list too\n    // cout<<\"ring removed from \"<<x<<\" \"<<y<<endl;\n    for (int i = 0; i < (int)rings[player].size(); i++) {\n      int a, b;\n      a = get<0>((rings[player])[i]);\n      b = get<1>((rings[player])[i]);\n      if (a == x && b == y)\n        (rings[player]).erase((rings[player]).begin() + i);\n    }\n\n    still_have_to_remove_ring = false;\n    still_have_to_remove_marker = true;\n    places_filled--;\n    // dont change turn\n\n  } else if (action_num == 58) {\n    // sequence selection\n    // find the 5 and then delete\n    // for all directions '\n    // cout<<\"LOOK HERE\"<<endl;\n    vector<int> temp;\n    // temp = find_first_5_for_specific_pt(x,y);\n    temp = find_first_5_for_specific_pt(x, y);\n    // cout<<\"hey\"<<endl;\n    if (temp.size() == 1) {\n      cout << \"couldnt find 5, shouldnt have happened\" << endl;\n    }\n    int start_x = temp[0];\n    int start_y = temp[1];\n    int dir_x = temp[2];\n    int dir_y = temp[3];\n    // vector<vector<int>> temp;\n    // // temp = find_first_5_for_specific_pt(x,y);\n    // temp = find_all_5s(true);\n    // if (temp.size() == 0 ){\n    //   cout<<\"couldnt find 5, shouldnt have happened\"<<endl;\n    // }\n    // int start_x = temp[0][0];\n    // int start_y = temp[0][1];\n    // int dir_x = temp[0][2];\n    // int dir_y = temp[0][3];\n\n    // cout<<\"sequence_removed, starting and ending were-\";\n    // printf(\"(%d ,%d) and (%d, %d) \",start_x,start_y, start_x + 4*dir_x,\n    // start_y + 4*dir_y);\n\n    for (int i = 0; i < 5; i++) {\n      int x1, y1;\n      x1 = start_x + i * dir_x;\n      y1 = start_y + i * dir_y;\n      _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n      board[x1][y1] = (int)(piece::empty);\n      _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n    }\n    places_filled -= 5;\n\n    still_have_to_remove_marker = false;\n    if ((rings[player]).size() == 2) {\n      // GAME OVER\n      // cout<<\"GAME OVER\"<<endl;\n      // cout<<\"player number \"<<player<<\" wins\"<<endl;\n      if (player == 0)\n        _status = GameStatus::player0Win;\n      else\n        _status = GameStatus::player1Win;\n      // DNU_ASK_SOMEONE\n      // rings[0].clear();\n      // rings[1].clear();\n      // raise(SIGSEGV);\n      // PROB in this route\n      fillFullFeatures();\n      // printf(\"leave:    ApplyAction\\n\");\n      return;\n      // NO DRAWS IN THIS GAME UNLESS AND UNTIL you fill all the board\n    }\n    // check for more sequences.\n    vector<vector<int>> temp2 = find_all_5s(true);\n    if (temp2.size() == 0) {\n      // change turn\n      if (!free_lunch) {\n        if (player == 0)\n          _status = GameStatus::player1Turn;\n        else\n          _status = GameStatus::player0Turn;\n      } else {\n        // continue you turn now\n        // cout<<\"FREE\"<<endl;\n        free_lunch = false;\n      }\n\n    } else {\n      still_have_to_remove_ring = true;\n      still_have_to_remove_marker = true;\n    }\n  } else if (action_num >= 0 && action_num < 56) {\n    // normal movement\n    // dont check for legality\n\n    int direction = action_num / 9;\n    int jump = action_num % 9 + 1;\n\n    tuple<int, int> dir = map_num_to_direction(direction);\n    int d_x = get<0>(dir);\n    int d_y = get<1>(dir);\n    // ismei invert kardo bas\n    // ring at x,y\n    // cout<<\"ring pos, direction and jump were-\";\n    // printf(\"(%d ,%d), (%d, %d) and %d respectively\\n\",x,y, d_x, d_y, jump);\n    if (board[x][y] == my_ring) {\n      _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n      board[x][y] = my_marker;\n      _hash ^= hash_table[board[x][y] - 1][x - 1][y - 1];\n    } else {\n      cout << \"trying to move non_ring \" << x << \" \" << y << \", actually found \"\n           << board[x][y] << endl;\n      raise(SIGSEGV);\n    }\n\n    int x1, y1;\n    for (int i = 1; i < jump; i++) {\n      int found = board[x + (i)*d_x][y + (i)*d_y];\n\n      if (found == (int)(piece::empty)) {\n        // do nothing\n      } else if (found == my_marker || found == opp_marker) {\n        // flip it\n\n        x1 = x + (i)*d_x;\n        y1 = y + (i)*d_y;\n        _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n        board[x1][y1] = 6 - found;\n        _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n      } else {\n        cout << \"invalid move, dude\" << endl;\n      }\n    }\n\n    x1 = x + jump * d_x;\n    y1 = y + jump * d_y;\n    _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n    board[x1][y1] = my_ring;\n    _hash ^= hash_table[board[x1][y1] - 1][x1 - 1][y1 - 1];\n    // update my_rings\n    for (int i = 0; i < (int)rings[player].size(); i++) {\n      int a, b;\n      a = get<0>((rings[player])[i]);\n      b = get<1>((rings[player])[i]);\n      if (a == x && b == y)\n        (rings[player]).erase((rings[player]).begin() + i);\n    }\n    (rings[player]).push_back(make_tuple(x + jump * d_x, y + jump * d_y));\n\n    // check for 5\n    places_filled++;\n    vector<vector<int>> all_5s = find_all_5s(true);\n    vector<vector<int>> all_5s_other = find_all_5s(false);\n    // cout<<\"places filled are \"<<places_filled<<endl;\n    if (all_5s.size() == 0) {\n      // cout<<\"hi1\"<<endl;\n      if (all_5s_other.size() == 0) {\n        // continue\n        // cout<<\"hi3\"<<endl;\n        if (places_filled == 85) {\n          if (rings[0].size() < rings[1].size())\n            _status = GameStatus::player0Win;\n          else if (rings[0].size() > rings[1].size())\n            _status = GameStatus::player1Win;\n          else\n            _status = GameStatus::tie;\n          // rings[0].clear();\n          // rings[1].clear();\n          // break;\n          // raise(SIGSEGV);\n          fillFullFeatures();\n          // printf(\"leave:    ApplyAction\\n\");\n          return;\n        } else {\n          // CHANGE TURN\n          if (player == 0)\n            _status = GameStatus::player1Turn;\n          else\n            _status = GameStatus::player0Turn;\n        }\n      } else {\n        // cout<<\"hi4\"<<endl;\n        // made a mistake\n        // cout<<\"supposed free seqs for other\"<<endl;\n        vector<int> t = all_5s_other[0];\n\n        // cout<<\"sequence_removed, starting and ending were-\";\n        // printf(\"(%d ,%d) and (%d, %d) \",t[0],t[1], t[0] + 4*t[2], t[1] +\n        // 4*t[3]);\n        free_lunch = true;\n        still_have_to_remove_ring = true;\n        still_have_to_remove_marker = true;\n        if (player == 0)\n          _status = GameStatus::player1Turn;\n        else\n          _status = GameStatus::player0Turn;\n      }\n\n    } else {\n      // cout<<\"hi2\"<<endl;\n      // cout<<\"found my sequences\"<<endl;\n      vector<int> t = all_5s[0];\n\n      // cout<<\"sequence_removed, starting and ending were-\";\n      // printf(\"(%d ,%d)/ and (%d, %d) \",t[0],t[1], t[0] + 4*t[2], t[1] +\n      // 4*t[3]);\n\n      still_have_to_remove_ring = true;\n      still_have_to_remove_marker = true;\n      // dont change turn\n    }\n    // you have to set the flags in appply to\n  } else {\n    cout << \"You fucked up something\" << endl;\n  }\n\n  // cout<<\"before calling others\"<<endl;\n  // printCurrentBoard();\n  // cout<<\"debug3\"<<endl;\n\n  set_vars();\n  // cout<<\"debug4\"<<endl;\n  // cout<<\"after set vars\"<<endl;\n  // printCurrentBoard();\n\n  findActions();\n  // cout<<\"debug5\"<<endl;\n  // cout<<\"after finding actions\"<<endl;\n  // printCurrentBoard();\n\n  findFeatures();\n  // cout<<\"debug6\"<<endl;\n  // cout<<\"after finding features\"<<endl;\n  // printCurrentBoard();\n  fillFullFeatures();\n  // check_termination\n  // update turns\n  // printf(\"leave:    ApplyAction\\n\");\n}\n// void StateForYinsh::findActions(){\n\n// }\nvoid StateForYinsh::findActions() {\n  //\n  // printf(\"enter:    findActions\\n\");\n\n  // cout<<\"frequency_check\"<<endl;\n  clearActions();\n\n  if (initial_fill < 10) {\n    // you can only try to fill the empty spaces\n\n    // main line\n    // cout<<\"is it here\"<<endl;\n    for (int i = 0; i < 13; i++) {\n      for (int j = 0; j < 13; j++) {\n        if (board[i][j] == (int)(piece::empty)) {\n          addAction(56, i - 1, j - 1);\n        }\n      }\n    }\n  } else if (still_have_to_remove_ring) {\n    for (int i = 0; i < (int)rings[player].size(); i++) {\n      addAction(\n          57, get<0>((rings[player])[i]) - 1, get<1>((rings[player])[i]) - 1);\n    }\n  } else if (still_have_to_remove_marker) {\n    vector<vector<int>> all_5s = find_all_5s(true);\n\n    vector<vector<int>> till_now;\n    int m_x, m_y, d_x, d_y, k, j;\n    bool matched = false;\n    //  printf(\"num of 5s are %d\\n\", all_5s.size());\n\n    for (int i = 0; i < (int)all_5s.size(); i++) {\n      // FIX LATER\n      // i just put the first point to make this quick\n      // _legalActions.push_back(make_shared<ActionForYinsh>(\n      //     58, (all_5s[i][0]) - 1, (all_5s[i][1]) - 1,\n      //     _legalActions.size()));\n      m_x = all_5s[i][0];\n      m_y = all_5s[i][1];\n      d_x = all_5s[i][2];\n      d_y = all_5s[i][3];\n      //  printf(\"mx, my, dx, dy are %d %d %d %d \\n\",m_x, m_y, d_x, d_y);\n\n      for (j = 0; j < 5; j++) {\n        matched = false;\n        // for all 5 markers\n        for (k = 0; k < (int)till_now.size(); k++) {\n          if (((till_now[k][0] == m_x + d_x * j) &&\n               (till_now[k][1] == m_y + d_y * j)))\n            matched = true;\n        }\n        if (!matched)\n          break;\n      }\n      // fill the chosen marker\n      //  printf(\"chose %d, %d \\n\", (m_x + d_x*j), (m_y + d_y*j));\n\n      addAction(58, (m_x + d_x * j) - 1, (m_y + d_y * j) - 1);\n      vector<int> temp_pt;\n      temp_pt.push_back(m_x + d_x * j);\n      temp_pt.push_back(m_y + d_y * j);\n      till_now.push_back(temp_pt);\n    }\n\n  } else {\n    // normal move\n    // for all rings\n    // go in all directions\n    // cout<<\"enter\"<<endl;\n    // cout<<rings[0].size()<<endl;\n    // cout<<rings[1].size()<<endl;\n    int x, y, start_x, start_y;\n    bool start = false;\n    for (int k = 0; k < (int)rings[player].size(); k++) {\n\n      // cout<<\"before access\"<<endl;\n      start_x = get<0>((rings[player])[k]);\n      start_y = get<1>((rings[player])[k]);\n      // cout<<\"ring \"<<k<<\" at \"<<start_x<<\" \"<<start_y<<endl;\n      for (int i = -1; i < 2; i++) {\n        for (int j = -1; j < 2; j++) {\n          if ((i + j) != 0) {\n            start = false;\n            for (int jump = 1; jump < 13; jump++) {\n              x = start_x + jump * i;\n              y = start_y + jump * j;\n              if (!start) {\n                if (board[x][y] == (int)(piece::empty)) {\n                  // possible move\n                  addAction((jump - 1) + 9 * map_direction_to_num(i, j),\n                            start_x - 1, start_y - 1);\n                } else {\n                  start = true;\n                }\n              }\n              // yahan else nhi ayega\n              if (start) {\n                if (board[x][y] == my_marker || board[x][y] == opp_marker) {\n                  // do nothing\n                  // continur\n                } else if (board[x][y] == (int)(piece::empty)) {\n                  addAction((jump - 1) + 9 * map_direction_to_num(i, j),\n                            start_x - 1, start_y - 1);\n                  break;\n                } else {\n                  // must be invalid or someones ring\n\n                  break;\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n  }\n  if (_legalActions.size() == 0) {\n    // cout<<\"ZERO MOVES TO PLAY\"<<endl;\n    // printf(\"player is %d\" , player);\n    // printCurrentBoard();\n    if (rings[0].size() < rings[1].size())\n      _status = GameStatus::player0Win;\n    else if (rings[0].size() > rings[1].size())\n      _status = GameStatus::player1Win;\n    else\n      _status = GameStatus::tie;\n    fillFullFeatures();\n    // printf(\"leave:    findActions\\n\");\n    return;\n  }\n  // printf(\"leave:    findActions\\n\");\n}\n\nunique_ptr<core::State> StateForYinsh::clone_() const {\n  // printf(\"enter:    clone_\\n\");\n  unique_ptr<core::State> temp = std::make_unique<StateForYinsh>(*this);\n  // printf(\"leave:    clone_\\n\");\n  return temp;\n}\n\nvoid StateForYinsh::DoGoodAction() {\n  // printf(\"enter:    DoGoodAction\\n\");\n\n  // return DoRandomAction();\n  int i = rand() % _legalActions.size();\n  if (_legalActions.size() == 0)\n    cout << \"NO MOVES\" << endl;\n  // printf(\"No of legal moves are %d\\n\",_legalActions.size() );\n  _Action a = _legalActions[i];\n  ApplyAction(a);\n  // printf(\"leave:    DoGoodAction\\n\");\n}\nvoid StateForYinsh::findFeatures() {\n  // printf(\"enter:    findFeatures\\n\");\n\n  // yeh toh chill function hai\n  // simply just map the board to the 3 dimensions discussed\n  // NUM_PIECES, BOARD_X, BOARD_Y;\n  // cout<<\"enter\"  <<endl;\n  for (int p = 0; p < NUM_PIECES; p++) {\n    for (int j = 0; j < BOARD_Y; j++) {\n      for (int i = 0; i < BOARD_X; i++) {\n        if (p == 0 && board[i + 1][j + 1] == (int)(piece::empty))\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 1;\n        else if (p == 1 && board[i + 1][j + 1] == my_ring)\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 1;\n        else if (p == 2 && board[i + 1][j + 1] == my_marker)\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 1;\n        else if (p == 3 && board[i + 1][j + 1] == opp_ring)\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 1;\n        else if (p == 4 && board[i + 1][j + 1] == opp_marker)\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 1;\n        else\n          _features[p * (BOARD_X * BOARD_Y) + j * BOARD_X + i] = 0;\n      }\n    }\n  }\n  // cout<<\"leave\"  <<endl;\n  // printf(\"leave:    findFeatures\\n\");\n}\n// string vala and good action\n// void StateForYinsh:\n\n// }\n// int main(){\n//   cout<<\"hello world\"<<endl;\n//   double a = time(NULL);\n//   srand(a);\n//   StateForYinsh g1(0);\n//   // g1.printCurrentBoard();\n//   g1.Initialize();\n//   g1.printCurrentBoard();\n\n//   //to debug- print num of legal moves at every turn and count\n//   for(int n = 1; n<150; n++){\n//     cout<<\"move number \"<<n<<endl;\n//     g1.DoGoodAction();\n//     g1.printCurrentBoard();\n//     if(g1.ended()){\n//       break;\n//     }\n\n//     // cout<<g1.initial_fill<<endl;\n//     // if(g1._status == GameStatus::player0Win || g1._status ==\n// GameStatus::player1Win ||g1._status == GameStatus::playerTie ){\n//     //   break;\n//     // }\n//     //floating point error because of division by zero legal moves\n//   }\n//   // printf(\"Seed was %lf\\n\",a );\n\n// }\n"
  },
  {
    "path": "src/games/yinsh.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <iostream>\n#include <mutex>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"../core/state.h\"\n// declare helper classes and functions\n#define NUM_ACTIONS 59\n// (6 directions * 9 max jump) + 1(initial placement) + 1(ring selection for\n// removal) + 1(seq selection via choosing a marker)\n#define BOARD_X 11\n#define BOARD_Y 11\n#define NUM_PIECES 5\n\nusing namespace std;\n// PE the coordinates returned by the output of the net are actually shifted\n\n// namespace Yinsh{\nenum class piece : int {\n  invalid,\n  empty,\n  p0_marker,\n  p0_ring,\n  p1_marker,\n  p1_ring\n};\n\nclass StateForYinsh : public core::State {\n public:\n  StateForYinsh(int seed)\n      : State(seed) {\n  }\n  void Initialize() override;\n  // virtual unique_ptr<core::State> clone_() const override;\n  unique_ptr<core::State> clone_(void) const override;\n  void ApplyAction(const _Action& action) override;\n  void DoGoodAction(void) override;\n  void printCurrentBoard(void) const override;\n  bool ended();\n  string stateDescription(void) const override;    // DNU\n  string actionsDescription(void) const override;  // DNU\n  static void fill_hash_table();\n  static uint64_t hash_table[5][BOARD_X][BOARD_Y];\n  static std::once_flag table_flag;\n\n private:\n  void findActions(void);  // i have to maintain legal actions by myself\n  void findFeatures(\n      void);  // after every move i have to call these two because the\n              // base(State class expects that it would be filled)\n  char map_piece_to_char(int p) const;\n  // void printCurrentBoard();\n  tuple<int, int> map_num_to_direction(int n);\n  int map_direction_to_num(int i, int j);\n  void set_vars();\n  tuple<int, int> find_first_invalid(int x, int y, int d0, int d1);\n  vector<int> find_first_5_for_specific_pt(int x, int y);\n  vector<vector<int>> find_all_5s(bool my);\n\n  int places_filled;\n  int initial_fill;\n  bool still_have_to_remove_ring;\n  bool still_have_to_remove_marker;\n  int board[13][13];\n  // vector <tuple<int,int>> *my_rings;//not necessary but makes it faster\n  // vector <tuple<int,int>> *opp_rings;\n  // vector <tuple<int,int>> p0_rings;\n  // vector <tuple<int,int>> p1_rings;\n  vector<vector<tuple<int, int>>> rings;\n  int player, my_ring, my_marker, opp_ring,\n      opp_marker;  // set all these with set_vars()\n  bool free_lunch;\n};\n\n// }\n"
  },
  {
    "path": "src/mcts/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0 FATAL_ERROR)\n\n# lib for other c++ programs\nadd_library(_mcts\n  node.cc\n  mcts.cc\n  storage.cc\n)\ntarget_link_libraries(_mcts PUBLIC pthread)\ntarget_include_directories(_mcts PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)\n\ntarget_include_directories(_mcts PUBLIC ${TORCH_INCLUDE_DIRS})\ntarget_include_directories(_mcts PUBLIC ${PYTHON_INCLUDE_DIRS})\ntarget_link_libraries(_mcts PUBLIC ${TORCH_LIBRARIES} _tube)\n\n# pybind lib\npybind11_add_module(mcts pybind.cc)\ntarget_link_libraries(mcts PUBLIC libpolygames)\n\n# tests\n#add_executable(test_mcts test.cc)\n#target_link_libraries(test_mcts PUBLIC _mcts)\n\n"
  },
  {
    "path": "src/mcts/actor.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"mcts/state.h\"\n#include \"mcts/utils.h\"\n\n#include <functional>\n\nnamespace mcts {\n\n// this is a minimal interface class,\n// should ONLY keep functions used by mcts\nclass Actor {\n public:\n  Actor() = default;\n\n  Actor(const Actor&) = delete;\n  Actor& operator=(const Actor&) = delete;\n\n  virtual PiVal evaluate(const State& s) = 0;\n\n  virtual ~Actor() {\n  }\n\n  virtual void evaluate(\n      const std::vector<const State*>& s,\n      const std::function<void(size_t, PiVal)>& resultCallback) {\n    for (size_t i = 0; i != s.size(); ++i) {\n      resultCallback(i, evaluate(*s[i]));\n    }\n  };\n\n  virtual void terminate() {\n  }\n\n  virtual void recordMove(const mcts::State* state) {\n  }\n\n  virtual void result(const State* state, float reward) {\n  }\n\n  virtual bool isTournamentOpponent() const {\n    return false;\n  }\n};\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/mcts.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"mcts/mcts.h\"\n#include \"common/async.h\"\n#include \"common/thread_id.h\"\n#include \"common/threads.h\"\n#include \"core/state.h\"\n\n#include <chrono>\n\nnamespace mcts {\n\nint forcedRollouts(float piValue, int numVisits, const MctsOption& option) {\n  return (int)std::sqrt(option.forcedRolloutsMultiplier * piValue * numVisits);\n}\n\n// TODO: eliminate duplicate code shared with pickBestAction below\nfloat puctValue(int rootPlayerId,\n                float puct,\n                const Node* node,\n                mcts::Action action) {\n  const auto& pi = node->legalPolicy_;\n  const Node* child = node->getChild(action);\n  auto childNumVisit = child->getMctsStats().getNumVisit();\n  float piValue = pi[action];\n  auto parentNumVisit = node->getMctsStats().getNumVisit();\n  float priorScore =\n      (float)piValue / (1 + childNumVisit) * (float)std::sqrt(parentNumVisit);\n  int flip = (node->getPiVal().playerId == rootPlayerId) ? 1 : -1;\n  float value = child->getMctsStats().getValue();\n  float vloss = child->getMctsStats().getVirtualLoss();\n  float q = (value * flip - vloss) / (childNumVisit + vloss);\n  float score = priorScore * puct + q;\n  return score;\n}\n\ntemplate <bool sample>\nAction pickBestAction(int rootPlayerId,\n                      const Node* const node,\n                      const MctsOption& option,\n                      std::minstd_rand& rng,\n                      int maxNumRollouts) {\n  const auto& pi = node->legalPolicy_;\n  if (pi.empty()) {\n    return InvalidAction;\n  }\n\n  float puct = option.puct;\n  bool useValuePrior = option.useValuePrior;\n  // We need to flip here because at opponent's step, we need to find\n  // opponent's best action which minimizes our value.  Careful not to\n  // flip the exploration term.\n  int flip = (node->getPiVal().playerId == rootPlayerId) ? 1 : -1;\n  float priorValue = node->getMctsStats().getAvgChildV() * flip;\n\n  auto getScore = [&](Action actionIndex, const Node* child) {\n    float q = 0;\n    int childNumVisit = 0;\n    float vloss = 0;\n    float value = 0;\n\n    float piValue = pi[actionIndex];\n    auto parentNumVisit = node->getMctsStats().getNumVisit();\n\n    if (child) {\n      const MctsStats& mctsStats = child->getMctsStats();\n      childNumVisit += mctsStats.getNumVisit();\n      vloss += mctsStats.getVirtualLoss();\n      value += mctsStats.getValue();\n    }\n    if (childNumVisit != 0) {\n      q = (value * flip - vloss) / (childNumVisit + vloss);\n    } else {\n      // When there are no child nodes under this action, replace the q value\n      // with prior.\n      // This prior is estimated from the values of other explored child.\n      // q = 0 if this is the first child to be explroed. In this case, all q =\n      // 0 and we start with the child with highest policy probability.\n      if (useValuePrior) {\n        q = priorValue;\n      }\n    }\n\n    float priorScore =\n        (float)piValue / (1 + childNumVisit) * (float)std::sqrt(parentNumVisit);\n    return priorScore * puct + q;\n  };\n\n  if (option.forcedRolloutsMultiplier && !node->getParent()) {\n    // Forced rollouts; this only happens at the root node\n    int maxForcedRollouts = forcedRollouts(1.0f, maxNumRollouts, option);\n    for (auto& v : node->getChildren()) {\n      Action actionIndex = v.first;\n      const Node* child = v.second;\n      int childNumVisit = child->getMctsStats().getNumVisit();\n      if (childNumVisit < maxForcedRollouts &&\n          childNumVisit <\n              forcedRollouts(pi[actionIndex], maxNumRollouts, option)) {\n        return actionIndex;\n      }\n    }\n  }\n\n  if (sample) {\n    return sampleDiscreteProbability(pi.size(),\n                                     [&](size_t index) {\n                                       float score = getScore(\n                                           index, node->getChild(index));\n                                       return std::exp(score * 4);\n                                     },\n                                     rng);\n  } else {\n    float bestScore = -std::numeric_limits<float>::infinity();\n    Action bestAction = InvalidAction;\n\n    for (mcts::Action actionIndex = 0; actionIndex != (mcts::Action)pi.size();\n         ++actionIndex) {\n      const Node* child = node->getChild(actionIndex);\n\n      float score = getScore(actionIndex, child);\n      if (score > bestScore) {\n        bestScore = score;\n        bestAction = actionIndex;\n      }\n    }\n    return bestAction;\n  }\n}\n\nnamespace {\n\nstd::atomic_uint64_t rolloutCount;\n\nstd::chrono::steady_clock::time_point starttime;\nbool started = false;\n\n}  // namespace\n\nint computeRolloutsImpl(const std::vector<Node*>& rootNode,\n                        const std::vector<const core::State*>& rootState,\n                        const std::vector<torch::Tensor>& rnnState,\n                        core::Actor& actor,\n                        const MctsOption& option,\n                        double max_time,\n                        std::minstd_rand& rng) {\n\n  double elapsedTime = 0;\n  auto begin = std::chrono::steady_clock::now();\n\n  struct RolloutState {\n    Node* root = nullptr;\n    Node* node = nullptr;\n    std::unique_ptr<core::State> state;\n    bool terminated = false;\n    Storage* storage = nullptr;\n    torch::Tensor rnnState;\n\n    Node* forcedParent = nullptr;\n    Action forcedAction = InvalidAction;\n  };\n\n  std::vector<RolloutState> states(rootNode.size());\n\n  async::Task task(threads::threads);\n\n  size_t stride =\n      (states.size() + threads::threads.size() - 1) / threads::threads.size();\n\n  std::vector<async::Thread*> reservedThreads(states.size());\n  for (size_t i = 0; i < states.size(); i += stride) {\n    reservedThreads[i] = &threads::threads.getThread();\n  }\n\n  int numRollout = 0;\n  std::vector<async::Handle> functionHandles(states.size());\n\n  int rollouts = option.totalTime ? 0 : option.numRolloutPerThread;\n\n  bool keepGoing = false;\n\n  for (size_t i = 0; i < states.size(); i += stride) {\n    rng.discard(1);\n    size_t n = std::min(states.size() - i, stride);\n\n    auto f = [&, ii = i, n, rng]() mutable {\n      size_t i = ii;\n\n      for (size_t s = 0; s != n; ++s, ++i) {\n\n        auto& st = states[i];\n        Node* root = rootNode[i];\n        st.root = root;\n\n        if (!st.storage) {\n          st.storage = Storage::getStorage();\n        }\n        Storage* storage = st.storage;\n\n        if (numRollout != 0) {\n          Node* node = st.node;\n          if (!st.terminated) {\n            auto& state = *st.state;\n            actor.batchResult(i, state, node->piVal_);\n            core::getLegalPi(\n                state, node->piVal_.logitPolicy, node->legalPolicy_);\n            core::softmax_(node->legalPolicy_);\n            node->piVal_.logitPolicy.reset();\n          }\n\n          node->settle(st.root->getPiVal().playerId);\n\n          float value = node->getPiVal().value;\n          int flip = st.root->getPiVal().playerId == node->getPiVal().playerId\n                         ? 1\n                         : -1;\n          value = value * flip;\n          // We need to flip here because at opponent's node, we have opponent's\n          // value. We need to sum up our value.\n          while (node != nullptr) {\n            MctsStats& mctsStats = node->getMctsStats();\n            mctsStats.atomicUpdate(value, 0.0f);\n            node = node->getParent();\n          }\n        }\n\n        if (!keepGoing) {\n          continue;\n        }\n\n        Node* node = root;\n        std::unique_ptr<core::State> localState = std::move(st.state);\n        const core::State* src =\n            st.forcedParent ? &*st.forcedParent->localState() : rootState[i];\n        if (!src) {\n          throw std::runtime_error(\"src state is null\");\n        }\n        if (!localState) {\n          localState = src->clone();\n        } else {\n          localState->copy(*src);\n        }\n\n        const torch::Tensor* rsp = nullptr;\n        if (!rnnState.empty()) {\n          rsp = &rnnState[i];\n        }\n\n        // 1. Selection\n\n        thread_local std::vector<Action> queuedActions;\n        queuedActions.clear();\n\n        const core::State* checkpointState = nullptr;\n\n        auto flushActions = [&]() {\n          if (checkpointState) {\n            localState->copy(*checkpointState);\n          }\n          if (!queuedActions.empty()) {\n            for (Action a : queuedActions) {\n              localState->forward(a);\n            }\n            queuedActions.clear();\n          }\n        };\n\n        Node* parent = nullptr;\n        Action action = InvalidAction;\n\n        bool save = false;\n\n        if (st.forcedParent) {\n          parent = st.forcedParent;\n          action = st.forcedAction;\n          st.forcedParent = nullptr;\n\n          node = parent->newChild(storage->newNode(), action);\n\n          auto& state = *localState;\n\n          if ((size_t)action >= state.GetLegalActions().size()) {\n            throw std::runtime_error(\"forced rollout bad action :((\");\n          }\n\n        } else if (node->isVisited()) {\n          while (true) {\n            rsp = &node->piVal_.rnnState;\n\n            Action bestAction =\n                (option.samplingMcts\n                     ? pickBestAction<true>\n                     : pickBestAction<false>)(root->getPiVal().playerId, node,\n                                              option, rng, rollouts);\n            // this is a terminal state that has been visited\n            if (bestAction == InvalidAction) {\n              flushActions();\n              break;\n            }\n\n            Node* childNode = node->getChild(bestAction);\n            if (childNode) {\n              node = childNode;\n              if (node->hasState()) {\n                checkpointState = &node->getState();\n                queuedActions.clear();\n              } else {\n                queuedActions.push_back(bestAction);\n              }\n              continue;\n            }\n            save = queuedActions.size() >= (size_t)option.storeStateInterval;\n            flushActions();\n\n            childNode = node->newChild(storage->newNode(), bestAction);\n\n            action = bestAction;\n            parent = node;\n            node = childNode;\n            break;\n          }\n\n          auto& state = *localState;\n\n          if (node->isVisited() && !state.terminated()) {\n            if (state.GetLegalActions().empty()) {\n              throw std::runtime_error(\n                  \"MCTS error - no legal actions in unterminated game state\");\n            }\n            throw std::runtime_error(\"MCTS error - rollout ended on unvisited \"\n                                     \"node with unterminated game state\");\n          }\n        }\n\n        auto& state = *localState;\n\n        auto saveState = [&](Node* saveNode) {\n          if (saveNode->localState() &&\n              saveNode->localState()->typeId() == state.typeId()) {\n            core::State* dst = &*saveNode->localState();\n            dst->copy(state);\n            saveNode->setState(dst);\n          } else {\n            saveNode->localState() = localState->clone();\n            saveNode->setState(&*saveNode->localState());\n          }\n        };\n\n        if (parent) {\n          // Force visits to any children that share this policy output\n          // location.\n          const _Action& a = state.GetLegalActions().at(action);\n          for (auto& x : state.GetLegalActions()) {\n            if (x.GetIndex() != action && x.GetX() == a.GetX() &&\n                x.GetY() == a.GetY() && x.GetZ() == a.GetZ()) {\n              if (!parent->getChild(x.GetIndex())) {\n                st.forcedParent = parent;\n                st.forcedAction = x.GetIndex();\n\n                if (!parent->hasState()) {\n                  saveState(parent);\n                }\n                break;\n              }\n            }\n          }\n\n          localState->forward(action);\n\n          if (save) {\n            saveState(node);\n          }\n        }\n\n        // 2. Expansion\n\n        if (state.terminated()) {\n          PiVal& piVal = node->piVal_;\n          piVal.value = state.getReward(state.getCurrentPlayer()) * 2.0f;\n          piVal.playerId = state.getCurrentPlayer();\n\n          st.terminated = true;\n        } else {\n          st.terminated = false;\n        }\n\n        st.node = node;\n        st.state = std::move(localState);\n        actor.batchPrepare(i, state, rsp ? *rsp : torch::Tensor());\n      }\n    };\n\n    functionHandles[i] = task.getHandle(*reservedThreads[i], std::move(f));\n    functionHandles[i].setPriority(common::getThreadId());\n  }\n\n  actor.batchResize(states.size());\n\n  if (option.randomizedRollouts && rollouts >= 4) {\n    float mean = std::uniform_int_distribution<int>(0, 3)(rng) != 0\n                     ? rollouts / 8.0f\n                     : rollouts * 2.0f;\n    std::normal_distribution<float> r(mean, rollouts / 4.0f);\n    int max = rollouts * 4;\n    do {\n      rollouts = r(rng);\n    } while (rollouts < 1 || rollouts > max);\n  }\n\n  while (true) {\n    keepGoing = (option.totalTime ? elapsedTime < max_time\n                                  : numRollout < option.numRolloutPerThread) ||\n                numRollout < 2;\n\n    for (size_t i = 0; i < states.size(); i += stride) {\n      task.enqueue(functionHandles[i]);\n    }\n\n    task.wait();\n    if (!keepGoing) {\n      break;\n    }\n    actor.batchEvaluate(states.size());\n\n    rolloutCount += states.size();\n\n    ++numRollout;\n    auto end = std::chrono::steady_clock::now();\n    elapsedTime =\n        std::chrono::duration_cast<\n            std::chrono::duration<double, std::ratio<1, 1>>>(end - begin)\n            .count();\n  }\n\n  for (const Node* root : rootNode) {\n    mcts::Action bestAction = -1;\n    int best = 0;\n    for (auto& v : root->getChildren()) {\n      const Node* child = v.second;\n      if (child->getMctsStats().getNumVisit() > best) {\n        best = child->getMctsStats().getNumVisit();\n        bestAction = v.first;\n      }\n    }\n    if (bestAction != -1) {\n      float bestPuct =\n          puctValue(root->getPiVal().playerId, option.puct, root, bestAction);\n      for (auto& v : root->getChildren()) {\n        if (v.first == bestAction) {\n          continue;\n        }\n        Node* child = v.second;\n        int forced =\n            forcedRollouts(root->legalPolicy_[v.first], rollouts, option);\n        for (; forced && child->getMctsStats().getNumVisit(); --forced) {\n          child->getMctsStats().subtractVisit();\n          float pv =\n              puctValue(root->getPiVal().playerId, option.puct, root, v.first);\n          if (pv > bestPuct) {\n            child->getMctsStats().addVisit();\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  return rollouts;\n}\n\nint computeRollouts(const std::vector<Node*>& rootNode,\n                    const std::vector<const core::State*>& rootState,\n                    const std::vector<torch::Tensor>& rnnState,\n                    core::Actor& actor,\n                    const MctsOption& option,\n                    double max_time,\n                    std::minstd_rand& rng) {\n\n  return computeRolloutsImpl(\n      rootNode, rootState, rnnState, actor, option, max_time, rng);\n}\n\nstd::vector<MctsResult> MctsPlayer::actMcts(\n    const std::vector<const core::State*>& states,\n    const std::vector<torch::Tensor>& rnnState) {\n  std::vector<MctsResult> result(states.size(), &rng_);\n\n  auto begin = std::chrono::steady_clock::now();\n  uint64_t beginRolloutCount = rolloutCount;\n\n  if (!started) {\n    started = true;\n    starttime = begin;\n  }\n\n  std::vector<Node*> roots;\n  Storage* storage = Storage::getStorage();\n  for (auto* state : states) {\n    Node* rootNode = storage->newNode();\n    rootNode->init(nullptr);\n    roots.push_back(rootNode);\n\n    if (state->terminated()) {\n      throw std::runtime_error(\"Attempt to run MCTS from terminated state\");\n    }\n  }\n\n  double thisMoveTime = remaining_time * option_.timeRatio;\n  if (option_.totalTime) {\n    std::cerr << \"Remaining time:\" << remaining_time << std::endl;\n    std::cerr << \"This move time:\" << thisMoveTime << std::endl;\n  }\n  int rollouts = computeRollouts(\n      roots, states, rnnState, *actor_, option_, thisMoveTime, rng_);\n  if (option_.totalTime) {\n    auto end = std::chrono::steady_clock::now();\n    remaining_time -=\n        std::chrono::duration_cast<\n            std::chrono::duration<double, std::ratio<1, 1>>>(end - begin)\n            .count();\n  }\n  for (size_t i = 0; i != states.size(); ++i) {\n    Node* rootNode = roots[i];\n    assert(rootNode->getMctsStats().getVirtualLoss() == 0);\n    if (option_.totalTime > 0) {\n      std::cerr << \"Value : \" << rootNode->getMctsStats().getValue()\n                << \" total rollouts : \"\n                << rootNode->getMctsStats().getNumVisit() << std::endl;\n      std::cerr << \"Current value is (-1 to 1): \"\n                << rootNode->getMctsStats().getValue() /\n                       rootNode->getMctsStats().getNumVisit()\n                << std::endl;\n    }\n    result[i].rollouts = rollouts;\n    result[i].rootValue = rootNode->getMctsStats().getAvgValue();\n    for (auto& v : rootNode->getChildren()) {\n      int visits = v.second->getMctsStats().getNumVisit();\n      if (visits > 1) {\n        result[i].add(v.first, visits);\n      }\n    }\n    if (result[i].bestAction == InvalidAction) {\n      for (auto& v : rootNode->getChildren()) {\n        int visits = v.second->getMctsStats().getNumVisit();\n        result[i].add(v.first, visits);\n      }\n    }\n    result[i].normalize();\n  }\n\n  for (size_t i = 0; i != states.size(); ++i) {\n    if (result[i].bestAction == InvalidAction) {\n      throw std::runtime_error(\n          \"MCTS could not find any valid actions at state \" +\n          states[i]->history());\n    }\n    if (states[i]->getStepIdx() < option_.sampleBeforeStepIdx) {\n      // std::cout << \"sample:\" << std::endl;\n      result[i].sample();\n    }\n  }\n\n  for (size_t i = 0; i != states.size(); ++i) {\n    auto* n = roots[i]->getChild(result[i].bestAction);\n    if (n && n->getPiVal().rnnState.defined()) {\n      result[i].rnnState = n->getPiVal().rnnState;\n    }\n    roots[i]->freeTree();\n  }\n\n  bool verbose = false;\n\n  if (verbose) {\n    uint64_t n = rolloutCount - beginRolloutCount;\n    double s = std::chrono::duration_cast<\n                   std::chrono::duration<double, std::ratio<1, 1>>>(\n                   std::chrono::steady_clock::now() - begin)\n                   .count();\n    rolloutsPerSecond_ = n / s;\n\n    printf(\"rollouts per second: %g\\n\", rolloutsPerSecond_);\n\n    double sx = std::chrono::duration_cast<\n                    std::chrono::duration<double, std::ratio<1, 1>>>(\n                    std::chrono::steady_clock::now() - starttime)\n                    .count();\n    printf(\"total rollouts per second: %g\\n\", rolloutCount / sx);\n  }\n\n  return result;\n}\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/mcts.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <cmath>\n#include <ctime>\n\n#include <chrono>\n#include <future>\n#include <iostream>\n#include <random>\n#include <vector>\n\n#include \"core/actor.h\"\n#include \"core/actor_player.h\"\n#include \"core/state.h\"\n#include \"mcts/node.h\"\n#include \"mcts/storage.h\"\n#include \"mcts/utils.h\"\n\nnamespace mcts {\n\nint computeRollouts(const std::vector<Node*>& rootNode,\n                    const std::vector<const core::State*>& rootState,\n                    const std::vector<std::vector<float>>& rnnState,\n                    core::Actor& actor,\n                    const MctsOption& option,\n                    double thisMoveTime,\n                    std::minstd_rand& rng);\n\nclass MctsPlayer : public core::ActorPlayer {\n public:\n  MctsPlayer(const MctsOption& option)\n      : option_(option)\n      , rng_(option.seed) {\n    reset();\n  }\n\n  std::vector<MctsResult> actMcts(const std::vector<const core::State*>& states,\n                                  const std::vector<torch::Tensor>& rnnState);\n\n  MctsResult actMcts(const core::State& state) {\n    return actMcts({&state}, {}).at(0);\n  }\n\n  MctsResult actMcts(const core::State& state, const torch::Tensor& rnnState) {\n    return actMcts({&state}, {rnnState}).at(0);\n  }\n\n  double rolloutsPerSecond() {\n    return rolloutsPerSecond_;\n  }\n\n  MctsOption& option() {\n    return option_;\n  }\n\n  const MctsOption& option() const {\n    return option_;\n  }\n\n  virtual void reset() override {\n    remaining_time = option_.totalTime;\n  }\n\n private:\n  MctsOption option_;\n  double remaining_time;\n  std::minstd_rand rng_;\n  // Storage storage_;\n  double rolloutsPerSecond_ = 0.0;\n};\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/node.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <iostream>\n\n#include \"mcts/node.h\"\n#include \"mcts/storage.h\"\n\nnamespace mcts {\n\nvoid Node::init(Node* parent) {\n\n  parent_ = nullptr;\n  state_ = nullptr;\n  children_.clear();\n  visited_ = false;\n\n  mctsStats_.reset();\n  piVal_.reset();\n  legalPolicy_.clear();\n\n  parent_ = parent;\n}\n\nvoid Node::acquire() {\n  //  mSelf_.lock();\n  //  holderThreadId_ = std::this_thread::get_id();\n}\n\nvoid Node::release() {\n  //  assert(holderThreadId_ == std::this_thread::get_id());\n  //  mSelf_.unlock();\n}\n\nNode* Node::newChild(Node* child, Action action) {\n  child->init(this);\n  auto i = std::lower_bound(children_.begin(), children_.end(), action,\n                            [](auto& a, Action b) { return a.first < b; });\n  children_.insert(i, std::make_pair(action, child));\n  return child;\n}\n\nnamespace {\nconst std::vector<Node*> emptyList;\n}\n\nNode* Node::getChild(Action action) const {\n  auto i = std::lower_bound(children_.begin(), children_.end(), action,\n                            [](auto& a, Action b) { return a.first < b; });\n  return i == children_.end() || i->first != action ? nullptr : i->second;\n}\n\nvoid Node::freeTree() {\n  piVal_.rnnState.reset();\n  for (auto& v : children_) {\n    v.second->freeTree();\n  }\n  storage_->freeNode(this);\n}\n\nvoid Node::printTree(int level, int maxLevel, int action) const {\n  if (level > maxLevel) {\n    return;\n  }\n  for (int i = 0; i < level; ++i) {\n    std::cout << \"    \";\n  }\n  std::cout << action << \" \" << mctsStats_.getValue() << \"/\"\n            << mctsStats_.getNumVisit();\n  std::cout << \"(\" << mctsStats_.getValue() / mctsStats_.getNumVisit() << \")\";\n  std::cout << \", vloss:\" << mctsStats_.getVirtualLoss() << std::endl;\n  for (auto& v : getChildren()) {\n    v.second->printTree(level + 1, maxLevel, v.first);\n  }\n}\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/node.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <mutex>\n#include <thread>\n#include <vector>\n\n#include \"core/state.h\"\n#include \"mcts/types.h\"\n#include \"mcts/utils.h\"\n\nnamespace mcts {\n\nclass Storage;\n\nclass Node {\n public:\n  Node()\n      : storage_(nullptr)\n      , id_(0) {\n  }\n\n  void setStorageAndId(Storage* storage, NodeId id) {\n    storage_ = storage;\n    id_ = id;\n  }\n\n  Node(const Node&) = delete;\n  Node& operator=(const Node&) = delete;\n\n  void init(Node* parent);\n\n  void acquire();\n\n  void release();\n\n  // caller is responsible for holding locks in case of multi-threading\n  Node* newChild(Node* childNode, Action action);\n\n  Node* getChild(Action action) const;\n\n  MctsStats& getMctsStats() {\n    return mctsStats_;\n  }\n\n  const MctsStats& getMctsStats() const {\n    return mctsStats_;\n  }\n\n  const core::State& getState() const {\n    return *state_;\n  }\n\n  bool hasState() const {\n    return state_ != nullptr;\n  }\n\n  void setState(core::State* state) {\n    state_ = state;\n  }\n\n  std::unique_ptr<core::State>& localState() {\n    return localState_;\n  }\n\n  NodeId getId() const {\n    return id_;\n  }\n\n  const auto& getChildren() const {\n    return children_;\n  }\n\n  Node* getParent() const {\n    return parent_;\n  }\n\n  const PiVal& getPiVal() const {\n    return piVal_;\n  }\n\n  void settle(int rootPlayerId) {\n    // Only called when the node is locked.\n    if (parent_ != nullptr) {\n      auto& stats = parent_->getMctsStats();\n      float upValue =\n          rootPlayerId == piVal_.playerId ? piVal_.value : -piVal_.value;\n      stats.atomicUpdateChildV(upValue);\n    }\n    visited_ = true;\n  }\n\n  // free the entire tree rooted at this node\n  void freeTree();\n\n  bool isVisited() {\n    return visited_;\n  }\n\n  uint64_t getStateHash() {\n    return stateHash_;\n  }\n\n  void printTree(int level, int maxLevel, int action) const;\n\n  // private:\n\n  // std::pair<Node*, Node*> link;\n\n  // set in constructor, should never be changed\n  Storage* storage_;\n  NodeId id_;\n\n  // sync tools\n  // std::mutex mSelf_;\n  // std::thread::id holderThreadId_;\n\n  // actual attributes\n  Node* parent_;\n  std::unique_ptr<core::State> localState_;\n  core::State* state_;\n  uint64_t stateHash_;\n  // std::unordered_map<Action, std::vector<Node*>> children_;\n  std::vector<std::pair<Action, Node*>> children_;\n  // int depth_;\n  bool visited_;\n\n  MctsStats mctsStats_;\n  PiVal piVal_;\n  std::vector<float> legalPolicy_;\n};\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/player.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <string>\n\nnamespace mcts {\nclass State;\nclass Player {\n public:\n  Player(bool isHuman)\n      : isHuman_(isHuman) {\n    isTP_ = false;\n  }\n\n  bool isHuman() const {\n    return isHuman_;\n  }\n  bool isTP() const {\n    return isTP_;\n  }\n\n  virtual void terminate() {\n  }\n  virtual void reset() {\n  }\n  virtual void newEpisode() {\n  }\n  virtual void recordMove(const State* state) {\n  }\n  virtual void result(const State*, float reward) {\n  }\n\n  virtual void setName(std::string name) {\n    name_ = std::move(name);\n  }\n  const std::string& getName() {\n    return name_;\n  }\n\n private:\n  std::string name_ = \"unnamed\";\n  bool isHuman_;\n\n protected:\n  bool isTP_;\n};\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/pybind.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <pybind11/pybind11.h>\n\n#include \"mcts/mcts.h\"\n\nnamespace py = pybind11;\n\nPYBIND11_MODULE(mcts, m) {\n  using namespace mcts;\n\n  py::class_<MctsPlayer, core::ActorPlayer, std::shared_ptr<MctsPlayer>>(\n      m, \"MctsPlayer\")\n      .def(py::init<const MctsOption&>(),\n           py::call_guard<py::gil_scoped_release>());\n\n  py::class_<MctsOption>(m, \"MctsOption\")\n      .def(py::init<>())\n      .def(py::init<const MctsOption&>())\n      .def_readwrite(\"puct\", &MctsOption::puct)\n      .def_readwrite(\"sample_before_step_idx\", &MctsOption::sampleBeforeStepIdx)\n      .def_readwrite(\"num_rollout_per_thread\", &MctsOption::numRolloutPerThread)\n      .def_readwrite(\"seed\", &MctsOption::seed)\n      .def_readwrite(\"virtual_loss\", &MctsOption::virtualLoss)\n      .def_readwrite(\"store_state_interval\", &MctsOption::storeStateInterval)\n      .def_readwrite(\"use_value_prior\", &MctsOption::useValuePrior)\n      .def_readwrite(\"time_ratio\", &MctsOption::timeRatio)\n      .def_readwrite(\"total_time\", &MctsOption::totalTime)\n      .def_readwrite(\"randomized_rollouts\", &MctsOption::randomizedRollouts)\n      .def_readwrite(\"sampling_mcts\", &MctsOption::samplingMcts)\n      .def_readwrite(\n          \"forced_rollouts_multiplier\", &MctsOption::forcedRolloutsMultiplier);\n}\n"
  },
  {
    "path": "src/mcts/storage.cc",
    "content": "\n#include \"storage.h\"\n\nnamespace mcts {\n\nstd::mutex freeStoragesMutex;\nstd::list<Storage*> freeStorages;\n\nNode* Storage::newNode() {\n  if (chunkIndex >= chunks.size()) {\n    Node* newChunk = (Node*)std::aligned_alloc(128, sizeof(Node) * chunkSize);\n    new (newChunk) Node[chunkSize];\n    for (size_t i = 0; i != chunkSize; ++i) {\n      newChunk[i].setStorageAndId(this, i);\n    }\n    chunks.push_back(newChunk);\n  }\n  Node* r = chunks[chunkIndex] + subIndex;\n  ++subIndex;\n  if (subIndex == chunkSize) {\n    subIndex = 0;\n    ++chunkIndex;\n  }\n  ++allocated;\n  return r;\n}\n\nvoid Storage::freeNode(Node* node) {\n  --allocated;\n  if (allocated == 0) {\n    chunkIndex = 0;\n    subIndex = 0;\n    std::lock_guard l(freeStoragesMutex);\n    freeStorages.push_back(this);\n  }\n}\n\nStorage* Storage::getStorage() {\n  std::unique_lock l(freeStoragesMutex);\n  if (freeStorages.empty()) {\n    l.unlock();\n    return new Storage();\n  }\n  Storage* r = freeStorages.back();\n  freeStorages.pop_back();\n  return r;\n}\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/storage.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"mcts/node.h\"\n\n#include <deque>\n#include <list>\n\nnamespace mcts {\n\nclass Storage {\n  std::vector<Node*> chunks;\n  size_t chunkIndex = 0;\n  size_t subIndex = 0;\n  size_t allocated = 0;\n  const size_t chunkSize = 16;\n\n public:\n  Storage() = default;\n  Storage(const Storage&) = delete;\n  Storage& operator=(const Storage&) = delete;\n\n  Node* newNode();\n  void freeNode(Node* node);\n  static Storage* getStorage();\n};\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/test.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"actor.h\"\n#include \"mcts.h\"\n#include \"types.h\"\n#include \"utils.h\"\n\n#include <algorithm>\n#include <atomic>\n#include <iomanip>\n#include <iostream>\n#include <random>\n\nusing namespace mcts;\nstd::atomic<int> seed(0);\n\nclass TicTacToeState : public State {\n public:\n  TicTacToeState()\n      : State()\n      , rng_(seed++) {\n    board.resize(9);\n    std::fill(board.begin(), board.end(), 0);\n    currentPlayer = 1;\n  }\n\n  int getCurrentPlayer() const override {\n    return currentPlayer;\n  }\n\n  uint64_t getHash() const override {\n    return 0;\n  }\n\n  float getReward(int player) const override {\n    int r = winner;\n    if (r == 0)\n      r = checkWinner();\n    return r * player;\n  };\n\n  // bool isStochastic() const override {\n  //   return false;\n  // };\n\n  std::vector<int> getLegalActions() const {\n    std::vector<int> actions;\n    for (int i = 0; i < 9; i++) {\n      if (board[i] == 0) {\n        actions.push_back(i);\n      }\n    }\n    return actions;\n  }\n\n  float getRandomRolloutReward(int player) const override {\n    int numRandomRollout = 100;\n    int totalReward = 0;\n    for (int i = 0; i < numRandomRollout; ++i) {\n      TicTacToeState state;\n      // std::cout << \"start random rollout\" << std::endl;\n      // state.printState();\n      state.board = board;\n      state.currentPlayer = currentPlayer;\n      state.moveIdx = moveIdx;\n      while (!state.terminated()) {\n        // state.printState();\n        auto actions = state.getLegalActions();\n        int idx = state.rng_() % actions.size();\n        // std::cout << idx << \",\" << actions[idx] << \";;  \";\n        state.forward(actions[idx]);\n        // std::cout << \"player \" << player << \", action: \"  << actions[idx] <<\n        // std::endl;\n        // state.printState();\n      }\n      // std::cout << \"+++++++end of random rollout +++++++, winner: \"\n      //           << checkWinner() << std::endl;\n      // state.winner = state.checkWinner();\n      totalReward += state.checkWinner() * player;\n    }\n    return totalReward / (float)numRandomRollout;\n  }\n\n  bool operator==(const State&) const override {\n    return false;\n  }\n\n  int getStepIdx() const override {\n    return moveIdx;\n  }\n\n  std::unique_ptr<State> clone() const override {\n    auto other = std::make_unique<TicTacToeState>();\n    other->moveIdx = moveIdx;\n    other->board = board;\n    other->currentPlayer = currentPlayer;\n    return other;\n    // return std::make_unique<TicTacToeState>(other);\n  }\n\n  const std::vector<mcts::Action>& getMoves() const override {\n    return {};\n  }\n\n  bool forward(const Action& a) override {\n    assert(a >= 0 && a <= 8);\n    if (board[a] != 0) {\n      winner = -currentPlayer;\n    }\n    board[a] = currentPlayer;\n    currentPlayer = -currentPlayer;\n    moveIdx += 1;\n    return true;\n  }\n\n  bool terminated() const override {\n    return winner != 0 || checkWinner() != 0 || moveIdx == 9;\n  }\n\n  int at(int i, int j) const {\n    // std::cout << i << j << std::endl;\n    assert(i >= 0 && i < 3 && j >= 0 && j < 3);\n    return board[i * 3 + j];\n  }\n\n  void checkSum(int sum, int* winner) const {\n    if (sum == 3)\n      *winner = 1;\n    if (sum == -3)\n      *winner = -1;\n  }\n\n  int checkWinner() const {\n    int w = 0;\n    int sum = 0;\n    for (int i = 0; i < 3; i++) {\n      sum = 0;\n      for (int j = 0; j < 3; j++) {\n        sum += at(i, j);\n        checkSum(sum, &w);\n      }\n      sum = 0;\n      for (int j = 0; j < 3; j++) {\n        sum += at(j, i);\n        checkSum(sum, &w);\n      }\n    }\n    sum = 0;\n    for (int i = 0; i < 3; i++) {\n      sum += at(i, i);\n      checkSum(sum, &w);\n    }\n    sum = 0;\n    for (int i = 0; i < 3; i++) {\n      sum += at(i, 2 - i);\n      checkSum(sum, &w);\n    }\n    return w;\n  }\n\n  void printState() {\n    std::cout << \"PRINT STATE===\" << std::endl;\n    std::cout << \"current player is \" << currentPlayer << std::endl;\n    for (int i = 0; i < 9; ++i) {\n      std::cout << std::setw(2);\n      std::cout << board[i] << \" \";\n      if (i % 3 == 2) {\n        std::cout << std::endl;\n      }\n    }\n    // std::cout << std::endl;\n  }\n\n  std::vector<int> board;\n  int currentPlayer = 1;\n  int winner = 0;\n  int moveIdx = 0;\n  std::mt19937 rng_;\n};\n\nclass TestActor : public Actor {\n public:\n  TestActor() {\n  }\n\n  PiVal& evaluate(const State& s, PiVal& pival) override {\n    const auto& state = dynamic_cast<const TicTacToeState*>(&s);\n    const auto& actions = state->getLegalActions();\n    std::vector<float> pi;\n    pi.resize(actions.size());\n\n    for (size_t i = 0; i < actions.size(); ++i) {\n      pi[i] = 1.0 / actions.size();\n    }\n    auto player = state->getCurrentPlayer();\n    float value = state->getRandomRolloutReward(state->getCurrentPlayer());\n    pival = PiVal(player, value, std::move(pi));\n    return pival;\n  }\n};\n\nint main(int argc, char* argv[]) {\n  // args are thread, rollouts\n  assert(argc == 3);\n  TicTacToeState state;\n  MctsOption option;\n  // option.numThread = 2;\n  option.numRolloutPerThread = std::stoi(std::string(argv[2]));\n  option.puct = 1.0;\n  option.virtualLoss = 1.0;\n  std::vector<std::unique_ptr<MctsPlayer>> players;\n\n  for (int i = 0; i < 2; ++i) {\n    players.push_back(std::make_unique<MctsPlayer>(option));\n    for (int j = 0; j < std::stoi(std::string(argv[1])); ++j) {\n      players.at(i)->addActor(std::make_shared<TestActor>());\n    }\n  }\n\n  int i = 0;\n  while (!state.terminated()) {\n    int playerIdx = state.getCurrentPlayer() == 1 ? 0 : 1;\n    MctsResult result = players.at(playerIdx)->actMcts(state);\n    std::cout << \"best action is \" << result.bestAction << std::endl;\n    state.forward(result.bestAction);\n    state.printState();\n    std::cout << \"-----------\" << std::endl;\n    ++i;\n    // if (i > 1) {\n    //   break;\n    // }\n  }\n  std::cout << \"winner is \" << state.checkWinner() << std::endl;\n  assert(state.checkWinner() == 0);\n  return 0;\n}\n"
  },
  {
    "path": "src/mcts/types.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <cstdint>\n\nnamespace mcts {\n\nusing Action = int64_t;\nusing NodeId = int64_t;\n\nconst int InvalidAction = -1;\n}  // namespace mcts\n"
  },
  {
    "path": "src/mcts/utils.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <torch/torch.h>\n\n#include <algorithm>\n#include <atomic>\n#include <cassert>\n#include <iostream>\n#include <mutex>\n#include <random>\n#include <sstream>\n#include <unordered_map>\n\n#include \"core/actor.h\"\n#include \"mcts/types.h\"\n\nnamespace mcts {\n\nclass MctsOption {\n public:\n  float totalTime = 0;\n  float timeRatio = 0.035;\n  // coefficient of prior score\n  float puct = 0.0;\n\n  // first K steps in the game where we use sample instead of greedily\n  // pick the best action. For example, if K = 6, then each player will\n  // sample action based on mcts probability for their first 3 steps\n  // in a two player game.\n  int sampleBeforeStepIdx = 0;\n\n  // num of rollout for each move\n  int numRolloutPerThread = -1;\n\n  int seed = 123;\n\n  float virtualLoss = 0.0;\n\n  // If true, initialize unvisited node with prior values from siblings\n  bool useValuePrior = true;\n\n  // Store the state in the MCTS node at multiples of this tree depth.\n  int storeStateInterval = 1;\n\n  bool randomizedRollouts = false;\n\n  bool samplingMcts = false;\n\n  float forcedRolloutsMultiplier = 2.0f;\n};\n\nclass MctsStats {\n public:\n  MctsStats() {\n    reset();\n  }\n\n  void reset() {\n    value_ = 0.0;\n    numVisit_ = 0;\n    virtualLoss_ = 0.0;\n    sumChildV_ = 0.0;\n    numChild_ = 0;\n  }\n\n  float getValue() const {\n    // std::lock_guard<std::mutex> lock(mSelf_);\n    return value_;\n  }\n\n  int getNumVisit() const {\n    // std::lock_guard<std::mutex> lock(mSelf_);\n    return numVisit_;\n  }\n\n  // Get prior child value (from the perspective of the current node).\n  //\n  // When a child hasn't been explored yet, we don't know its value and need to\n  // use the \"prior\" from other children that has been explored before.\n  // This is very important, otherwise the tree search could be overly-\n  // optimistic and explore all actions once and waste a lot of rollouts (which\n  // is bad for cases with high-branching factor).\n  float getAvgChildV() const {\n    if (numChild_ == 0) {\n      return 0.0;\n    } else {\n      return sumChildV_ / numChild_;\n    }\n  }\n\n  float getAvgValue() const {\n    assert(numVisit_ > 0);\n    return value_ / numVisit_;\n  }\n\n  float getVirtualLoss() const {\n    return virtualLoss_;\n  }\n\n  void addVirtualLoss(float virtualLoss) {\n    // std::lock_guard<std::mutex> lock(mSelf_);\n    virtualLoss_ += virtualLoss;\n  }\n\n  void atomicUpdate(float value, float virtualLoss) {\n    // std::lock_guard<std::mutex> lock(mSelf_);\n    value_ += value;\n    numVisit_++;\n    virtualLoss_ -= virtualLoss;\n  }\n\n  // Update child value estimate with a new obtained child value\n  // (from the perspective of the root node\n  void atomicUpdateChildV(float childV) {\n    // std::lock_guard<std::mutex> lock(mSelf_);\n    sumChildV_ += childV;\n    numChild_++;\n  }\n\n  std::string summary() const {\n    std::stringstream ss;\n    ss << value_ << \"/\" << numVisit_ << \" (\" << value_ / numVisit_\n       << \"), vloss: \" << virtualLoss_;\n    return ss.str();\n  }\n\n  void subtractVisit() {\n    --numVisit_;\n  }\n  void addVisit() {\n    ++numVisit_;\n  }\n\n private:\n  float value_;\n  int numVisit_;\n  float virtualLoss_;\n\n  // Summation of the value prediction from a child\n  float sumChildV_;\n  // # child that has been explored.\n  int numChild_;\n\n  // std::mutex mSelf_;\n};\n\ntemplate <typename F, typename Rng>\nsize_t sampleDiscreteProbability(size_t nElements,\n                                 float maxValue,\n                                 F&& getValue,\n                                 Rng& rng) {\n  if (nElements == 0) {\n    throw std::runtime_error(\"sampleDiscreteProbability was passed 0 elements\");\n  }\n  for (size_t i = 0; i != 4; ++i) {\n    size_t index = std::uniform_int_distribution<int>(0.0f, nElements - 1)(rng);\n    if (std::generate_canonical<float, 20>(rng) <= getValue(index) / maxValue) {\n      return index;\n    }\n  }\n  thread_local std::vector<float> probs;\n  probs.resize(nElements);\n  float sum = 0.0f;\n  for (size_t i = 0; i != nElements; ++i) {\n    sum += getValue(i);\n    probs[i] = sum;\n  }\n  float v = std::uniform_real_distribution<float>(0.0f, sum)(rng);\n  return std::lower_bound(probs.begin(), std::prev(probs.end()), v) -\n         probs.begin();\n}\n\ntemplate <typename F, typename Rng>\nsize_t sampleDiscreteProbability(size_t nElements, F&& getValue, Rng& rng) {\n  if (nElements == 0) {\n    throw std::runtime_error(\"sampleDiscreteProbability was passed 0 elements\");\n  }\n  thread_local std::vector<float> probs;\n  probs.resize(nElements);\n  float sum = 0.0f;\n  for (size_t i = 0; i != nElements; ++i) {\n    sum += getValue(i);\n    probs[i] = sum;\n  }\n  float v = std::uniform_real_distribution<float>(0.0f, sum)(rng);\n  return std::lower_bound(probs.begin(), std::prev(probs.end()), v) -\n         probs.begin();\n}\n\nclass MctsResult {\n public:\n  MctsResult() = default;\n  MctsResult(std::minstd_rand* rng)\n      : maxVisits(-1000)\n      , sumVisits(0)\n      , bestAction(InvalidAction)\n      , rng_(rng) {\n  }\n\n  void add(Action a, float visits) {\n    if (mctsPolicy.size() <= (size_t)a) {\n      if (mctsPolicy.capacity() <= (size_t)a) {\n        mctsPolicy.reserve(mctsPolicy.size() * 2);\n      }\n      mctsPolicy.resize(a + 1);\n    }\n    mctsPolicy[a] = visits;\n    sumVisits += visits;\n    if (visits > maxVisits) {\n      maxVisits = visits;\n      bestAction = a;\n    }\n  }\n\n  void normalize() {\n    for (auto& value : mctsPolicy) {\n      value = value / (float)sumVisits;\n    }\n  }\n\n  // assume already normalized\n  void sample() {\n    auto weight = [this](float pival) {\n      return std::exp(pival * pival * 2) - (1.0f - 0.5f / mctsPolicy.size());\n    };\n    float maxWeight = 0.0f;\n    for (size_t i = 0; i != mctsPolicy.size(); ++i) {\n      if (mctsPolicy[i] > maxWeight) {\n        maxWeight = mctsPolicy[i];\n      }\n    }\n    maxWeight = weight(maxWeight);\n    bestAction = sampleDiscreteProbability(\n        mctsPolicy.size(), maxWeight,\n        [&](size_t i) { return weight(mctsPolicy[i]); }, *rng_);\n  }\n\n  void setMctsPolicy(std::vector<float> pi) {\n    mctsPolicy = std::move(pi);\n  }\n\n  float maxVisits;\n  float sumVisits;\n  Action bestAction;\n  std::vector<float> mctsPolicy;\n  float rootValue = 0.0f;\n  int rollouts = 0;\n  torch::Tensor rnnState;\n\n private:\n  std::minstd_rand* rng_;\n};\n\nusing core::PiVal;\n\ninline void printPolicy(const std::vector<float>& pi) {\n  for (mcts::Action i = 0; i != (mcts::Action)pi.size(); ++i) {\n    std::cout << i << \":\" << pi[i] << std::endl;\n  }\n}\n\n}  // namespace mcts\n"
  },
  {
    "path": "src/third_party/asio/associated_allocator.hpp",
    "content": "//\n// associated_allocator.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_ASSOCIATED_ALLOCATOR_HPP\n#define ASIO_ASSOCIATED_ALLOCATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <memory>\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename>\nstruct associated_allocator_check\n{\n  typedef void type;\n};\n\ntemplate <typename T, typename E, typename = void>\nstruct associated_allocator_impl\n{\n  typedef E type;\n\n  static type get(const T&, const E& e) ASIO_NOEXCEPT\n  {\n    return e;\n  }\n};\n\ntemplate <typename T, typename E>\nstruct associated_allocator_impl<T, E,\n  typename associated_allocator_check<typename T::allocator_type>::type>\n{\n  typedef typename T::allocator_type type;\n\n  static type get(const T& t, const E&) ASIO_NOEXCEPT\n  {\n    return t.get_allocator();\n  }\n};\n\n} // namespace detail\n\n/// Traits type used to obtain the allocator associated with an object.\n/**\n * A program may specialise this traits type if the @c T template parameter in\n * the specialisation is a user-defined type. The template parameter @c\n * Allocator shall be a type meeting the Allocator requirements.\n *\n * Specialisations shall meet the following requirements, where @c t is a const\n * reference to an object of type @c T, and @c a is an object of type @c\n * Allocator.\n *\n * @li Provide a nested typedef @c type that identifies a type meeting the\n * Allocator requirements.\n *\n * @li Provide a noexcept static member function named @c get, callable as @c\n * get(t) and with return type @c type.\n *\n * @li Provide a noexcept static member function named @c get, callable as @c\n * get(t,a) and with return type @c type.\n */\ntemplate <typename T, typename Allocator = std::allocator<void> >\nstruct associated_allocator\n{\n  /// If @c T has a nested type @c allocator_type, <tt>T::allocator_type</tt>.\n  /// Otherwise @c Allocator.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef see_below type;\n#else // defined(GENERATING_DOCUMENTATION)\n  typedef typename detail::associated_allocator_impl<T, Allocator>::type type;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n  /// If @c T has a nested type @c allocator_type, returns\n  /// <tt>t.get_allocator()</tt>. Otherwise returns @c a.\n  static type get(const T& t,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return detail::associated_allocator_impl<T, Allocator>::get(t, a);\n  }\n};\n\n/// Helper function to obtain an object's associated allocator.\n/**\n * @returns <tt>associated_allocator<T>::get(t)</tt>\n */\ntemplate <typename T>\ninline typename associated_allocator<T>::type\nget_associated_allocator(const T& t) ASIO_NOEXCEPT\n{\n  return associated_allocator<T>::get(t);\n}\n\n/// Helper function to obtain an object's associated allocator.\n/**\n * @returns <tt>associated_allocator<T, Allocator>::get(t, a)</tt>\n */\ntemplate <typename T, typename Allocator>\ninline typename associated_allocator<T, Allocator>::type\nget_associated_allocator(const T& t, const Allocator& a) ASIO_NOEXCEPT\n{\n  return associated_allocator<T, Allocator>::get(t, a);\n}\n\n#if defined(ASIO_HAS_ALIAS_TEMPLATES)\n\ntemplate <typename T, typename Allocator = std::allocator<void> >\nusing associated_allocator_t\n  = typename associated_allocator<T, Allocator>::type;\n\n#endif // defined(ASIO_HAS_ALIAS_TEMPLATES)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_ASSOCIATED_ALLOCATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/associated_executor.hpp",
    "content": "//\n// associated_executor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_ASSOCIATED_EXECUTOR_HPP\n#define ASIO_ASSOCIATED_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/system_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename>\nstruct associated_executor_check\n{\n  typedef void type;\n};\n\ntemplate <typename T, typename E, typename = void>\nstruct associated_executor_impl\n{\n  typedef E type;\n\n  static type get(const T&, const E& e) ASIO_NOEXCEPT\n  {\n    return e;\n  }\n};\n\ntemplate <typename T, typename E>\nstruct associated_executor_impl<T, E,\n  typename associated_executor_check<typename T::executor_type>::type>\n{\n  typedef typename T::executor_type type;\n\n  static type get(const T& t, const E&) ASIO_NOEXCEPT\n  {\n    return t.get_executor();\n  }\n};\n\n} // namespace detail\n\n/// Traits type used to obtain the executor associated with an object.\n/**\n * A program may specialise this traits type if the @c T template parameter in\n * the specialisation is a user-defined type. The template parameter @c\n * Executor shall be a type meeting the Executor requirements.\n *\n * Specialisations shall meet the following requirements, where @c t is a const\n * reference to an object of type @c T, and @c e is an object of type @c\n * Executor.\n *\n * @li Provide a nested typedef @c type that identifies a type meeting the\n * Executor requirements.\n *\n * @li Provide a noexcept static member function named @c get, callable as @c\n * get(t) and with return type @c type.\n *\n * @li Provide a noexcept static member function named @c get, callable as @c\n * get(t,e) and with return type @c type.\n */\ntemplate <typename T, typename Executor = system_executor>\nstruct associated_executor\n{\n  /// If @c T has a nested type @c executor_type, <tt>T::executor_type</tt>.\n  /// Otherwise @c Executor.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef see_below type;\n#else // defined(GENERATING_DOCUMENTATION)\n  typedef typename detail::associated_executor_impl<T, Executor>::type type;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n  /// If @c T has a nested type @c executor_type, returns\n  /// <tt>t.get_executor()</tt>. Otherwise returns @c ex.\n  static type get(const T& t,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return detail::associated_executor_impl<T, Executor>::get(t, ex);\n  }\n};\n\n/// Helper function to obtain an object's associated executor.\n/**\n * @returns <tt>associated_executor<T>::get(t)</tt>\n */\ntemplate <typename T>\ninline typename associated_executor<T>::type\nget_associated_executor(const T& t) ASIO_NOEXCEPT\n{\n  return associated_executor<T>::get(t);\n}\n\n/// Helper function to obtain an object's associated executor.\n/**\n * @returns <tt>associated_executor<T, Executor>::get(t, ex)</tt>\n */\ntemplate <typename T, typename Executor>\ninline typename associated_executor<T, Executor>::type\nget_associated_executor(const T& t, const Executor& ex,\n    typename enable_if<is_executor<\n      Executor>::value>::type* = 0) ASIO_NOEXCEPT\n{\n  return associated_executor<T, Executor>::get(t, ex);\n}\n\n/// Helper function to obtain an object's associated executor.\n/**\n * @returns <tt>associated_executor<T, typename\n * ExecutionContext::executor_type>::get(t, ctx.get_executor())</tt>\n */\ntemplate <typename T, typename ExecutionContext>\ninline typename associated_executor<T,\n  typename ExecutionContext::executor_type>::type\nget_associated_executor(const T& t, ExecutionContext& ctx,\n    typename enable_if<is_convertible<ExecutionContext&,\n      execution_context&>::value>::type* = 0) ASIO_NOEXCEPT\n{\n  return associated_executor<T,\n    typename ExecutionContext::executor_type>::get(t, ctx.get_executor());\n}\n\n#if defined(ASIO_HAS_ALIAS_TEMPLATES)\n\ntemplate <typename T, typename Executor = system_executor>\nusing associated_executor_t = typename associated_executor<T, Executor>::type;\n\n#endif // defined(ASIO_HAS_ALIAS_TEMPLATES)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_ASSOCIATED_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/async_result.hpp",
    "content": "//\n// async_result.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_ASYNC_RESULT_HPP\n#define ASIO_ASYNC_RESULT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_CONCEPTS) \\\n  && defined(ASIO_HAS_VARIADIC_TEMPLATES) \\\n  && defined(ASIO_HAS_DECLTYPE)\n\nnamespace detail {\n\ntemplate <typename T>\nstruct is_completion_signature : false_type\n{\n};\n\ntemplate <typename R, typename... Args>\nstruct is_completion_signature<R(Args...)> : true_type\n{\n};\n\ntemplate <typename T, typename... Args>\nASIO_CONCEPT callable_with = requires(T t, Args&&... args)\n{\n  t(static_cast<Args&&>(args)...);\n};\n\ntemplate <typename T, typename Signature>\nstruct is_completion_handler_for : false_type\n{\n};\n\ntemplate <typename T, typename R, typename... Args>\nstruct is_completion_handler_for<T, R(Args...)>\n  : integral_constant<bool, (callable_with<T, Args...>)>\n{\n};\n\n} // namespace detail\n\ntemplate <typename T>\nASIO_CONCEPT completion_signature =\n  detail::is_completion_signature<T>::value;\n\n#define ASIO_COMPLETION_SIGNATURE \\\n  ::asio::completion_signature\n\ntemplate <typename T, completion_signature Signature>\nASIO_CONCEPT completion_handler_for =\n  detail::is_completion_handler_for<T, Signature>::value;\n\n#define ASIO_COMPLETION_HANDLER_FOR(s) \\\n  ::asio::completion_handler_for<s>\n\n#else // defined(ASIO_HAS_CONCEPTS)\n      //   && defined(ASIO_HAS_VARIADIC_TEMPLATES)\n      //   && defined(ASIO_HAS_DECLTYPE)\n\n#define ASIO_COMPLETION_SIGNATURE typename\n#define ASIO_COMPLETION_HANDLER_FOR(s) typename\n\n#endif // defined(ASIO_HAS_CONCEPTS)\n       //   && defined(ASIO_HAS_VARIADIC_TEMPLATES)\n       //   && defined(ASIO_HAS_DECLTYPE)\n\n/// An interface for customising the behaviour of an initiating function.\n/**\n * The async_result traits class is used for determining:\n *\n * @li the concrete completion handler type to be called at the end of the\n * asynchronous operation;\n *\n * @li the initiating function return type; and\n *\n * @li how the return value of the initiating function is obtained.\n *\n * The trait allows the handler and return types to be determined at the point\n * where the specific completion handler signature is known.\n *\n * This template may be specialised for user-defined completion token types.\n * The primary template assumes that the CompletionToken is the completion\n * handler.\n */\ntemplate <typename CompletionToken, ASIO_COMPLETION_SIGNATURE Signature>\nclass async_result\n{\npublic:\n  /// The concrete completion handler type for the specific signature.\n  typedef CompletionToken completion_handler_type;\n\n  /// The return type of the initiating function.\n  typedef void return_type;\n\n  /// Construct an async result from a given handler.\n  /**\n   * When using a specalised async_result, the constructor has an opportunity\n   * to initialise some state associated with the completion handler, which is\n   * then returned from the initiating function.\n   */\n  explicit async_result(completion_handler_type& h)\n  {\n    (void)h;\n  }\n\n  /// Obtain the value to be returned from the initiating function.\n  return_type get()\n  {\n  }\n\n#if defined(GENERATING_DOCUMENTATION)\n\n  /// Initiate the asynchronous operation that will produce the result, and\n  /// obtain the value to be returned from the initiating function.\n  template <typename Initiation, typename RawCompletionToken, typename... Args>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken) token,\n      ASIO_MOVE_ARG(Args)... args);\n\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation,\n      ASIO_COMPLETION_HANDLER_FOR(Signature) RawCompletionToken,\n      typename... Args>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken) token,\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    ASIO_MOVE_CAST(Initiation)(initiation)(\n        ASIO_MOVE_CAST(RawCompletionToken)(token),\n        ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation,\n      ASIO_COMPLETION_HANDLER_FOR(Signature) RawCompletionToken>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken) token)\n  {\n    ASIO_MOVE_CAST(Initiation)(initiation)(\n        ASIO_MOVE_CAST(RawCompletionToken)(token));\n  }\n\n#define ASIO_PRIVATE_INITIATE_DEF(n) \\\n  template <typename Initiation, \\\n      ASIO_COMPLETION_HANDLER_FOR(Signature) RawCompletionToken, \\\n      ASIO_VARIADIC_TPARAMS(n)> \\\n  static return_type initiate( \\\n      ASIO_MOVE_ARG(Initiation) initiation, \\\n      ASIO_MOVE_ARG(RawCompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    ASIO_MOVE_CAST(Initiation)(initiation)( \\\n        ASIO_MOVE_CAST(RawCompletionToken)(token), \\\n        ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF)\n#undef ASIO_PRIVATE_INITIATE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\nprivate:\n  async_result(const async_result&) ASIO_DELETED;\n  async_result& operator=(const async_result&) ASIO_DELETED;\n};\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <ASIO_COMPLETION_SIGNATURE Signature>\nclass async_result<void, Signature>\n{\n  // Empty.\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n/// Helper template to deduce the handler type from a CompletionToken, capture\n/// a local copy of the handler, and then create an async_result for the\n/// handler.\ntemplate <typename CompletionToken, ASIO_COMPLETION_SIGNATURE Signature>\nstruct async_completion\n{\n  /// The real handler type to be used for the asynchronous operation.\n  typedef typename asio::async_result<\n    typename decay<CompletionToken>::type,\n      Signature>::completion_handler_type completion_handler_type;\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Constructor.\n  /**\n   * The constructor creates the concrete completion handler and makes the link\n   * between the handler and the asynchronous result.\n   */\n  explicit async_completion(CompletionToken& token)\n    : completion_handler(static_cast<typename conditional<\n        is_same<CompletionToken, completion_handler_type>::value,\n        completion_handler_type&, CompletionToken&&>::type>(token)),\n      result(completion_handler)\n  {\n  }\n#else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  explicit async_completion(typename decay<CompletionToken>::type& token)\n    : completion_handler(token),\n      result(completion_handler)\n  {\n  }\n\n  explicit async_completion(const typename decay<CompletionToken>::type& token)\n    : completion_handler(token),\n      result(completion_handler)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// A copy of, or reference to, a real handler object.\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  typename conditional<\n    is_same<CompletionToken, completion_handler_type>::value,\n    completion_handler_type&, completion_handler_type>::type completion_handler;\n#else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  completion_handler_type completion_handler;\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// The result of the asynchronous operation's initiating function.\n  async_result<typename decay<CompletionToken>::type, Signature> result;\n};\n\nnamespace detail {\n\ntemplate <typename CompletionToken, typename Signature>\nstruct async_result_helper\n  : async_result<typename decay<CompletionToken>::type, Signature>\n{\n};\n\nstruct async_result_memfns_base\n{\n  void initiate();\n};\n\ntemplate <typename T>\nstruct async_result_memfns_derived\n  : T, async_result_memfns_base\n{\n};\n\ntemplate <typename T, T>\nstruct async_result_memfns_check\n{\n};\n\ntemplate <typename>\nchar (&async_result_initiate_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar async_result_initiate_memfn_helper(\n    async_result_memfns_check<\n      void (async_result_memfns_base::*)(),\n      &async_result_memfns_derived<T>::initiate>*);\n\ntemplate <typename CompletionToken, typename Signature>\nstruct async_result_has_initiate_memfn\n  : integral_constant<bool, sizeof(async_result_initiate_memfn_helper<\n      async_result<typename decay<CompletionToken>::type, Signature>\n    >(0)) != 1>\n{\n};\n\n} // namespace detail\n\n#if defined(GENERATING_DOCUMENTATION)\n# define ASIO_INITFN_RESULT_TYPE(ct, sig) \\\n  void_or_deduced\n#elif defined(_MSC_VER) && (_MSC_VER < 1500)\n# define ASIO_INITFN_RESULT_TYPE(ct, sig) \\\n  typename ::asio::detail::async_result_helper< \\\n    ct, sig>::return_type\n#define ASIO_HANDLER_TYPE(ct, sig) \\\n  typename ::asio::detail::async_result_helper< \\\n    ct, sig>::completion_handler_type\n#else\n# define ASIO_INITFN_RESULT_TYPE(ct, sig) \\\n  typename ::asio::async_result< \\\n    typename ::asio::decay<ct>::type, sig>::return_type\n#define ASIO_HANDLER_TYPE(ct, sig) \\\n  typename ::asio::async_result< \\\n    typename ::asio::decay<ct>::type, sig>::completion_handler_type\n#endif\n\n#if defined(GENERATION_DOCUMENTATION)\n# define ASIO_INITFN_AUTO_RESULT_TYPE(ct, sig) \\\n  auto\n#elif defined(ASIO_HAS_RETURN_TYPE_DEDUCTION)\n# define ASIO_INITFN_AUTO_RESULT_TYPE(ct, sig) \\\n  auto\n#else\n# define ASIO_INITFN_AUTO_RESULT_TYPE(ct, sig) \\\n  ASIO_INITFN_RESULT_TYPE(ct, sig)\n#endif\n\n#if defined(GENERATION_DOCUMENTATION)\n# define ASIO_INITFN_DEDUCED_RESULT_TYPE(ct, sig, expr) \\\n  void_or_deduced\n#elif defined(ASIO_HAS_DECLTYPE)\n# define ASIO_INITFN_DEDUCED_RESULT_TYPE(ct, sig, expr) \\\n  decltype expr\n#else\n# define ASIO_INITFN_DEDUCED_RESULT_TYPE(ct, sig, expr) \\\n  ASIO_INITFN_RESULT_TYPE(ct, sig)\n#endif\n\n#if defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename CompletionToken,\n    completion_signature Signature,\n    typename Initiation, typename... Args>\nvoid_or_deduced async_initiate(\n    ASIO_MOVE_ARG(Initiation) initiation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken),\n    ASIO_MOVE_ARG(Args)... args);\n\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename CompletionToken,\n    ASIO_COMPLETION_SIGNATURE Signature,\n    typename Initiation, typename... Args>\ninline typename enable_if<\n    detail::async_result_has_initiate_memfn<CompletionToken, Signature>::value,\n    ASIO_INITFN_DEDUCED_RESULT_TYPE(CompletionToken, Signature,\n      (async_result<typename decay<CompletionToken>::type,\n        Signature>::initiate(declval<ASIO_MOVE_ARG(Initiation)>(),\n          declval<ASIO_MOVE_ARG(CompletionToken)>(),\n          declval<ASIO_MOVE_ARG(Args)>()...)))>::type\nasync_initiate(ASIO_MOVE_ARG(Initiation) initiation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token,\n    ASIO_MOVE_ARG(Args)... args)\n{\n  return async_result<typename decay<CompletionToken>::type,\n    Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation),\n      ASIO_MOVE_CAST(CompletionToken)(token),\n      ASIO_MOVE_CAST(Args)(args)...);\n}\n\ntemplate <typename CompletionToken,\n    ASIO_COMPLETION_SIGNATURE Signature,\n    typename Initiation, typename... Args>\ninline typename enable_if<\n    !detail::async_result_has_initiate_memfn<CompletionToken, Signature>::value,\n    ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type\nasync_initiate(ASIO_MOVE_ARG(Initiation) initiation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token,\n    ASIO_MOVE_ARG(Args)... args)\n{\n  async_completion<CompletionToken, Signature> completion(token);\n\n  ASIO_MOVE_CAST(Initiation)(initiation)(\n      ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken,\n        Signature))(completion.completion_handler),\n      ASIO_MOVE_CAST(Args)(args)...);\n\n  return completion.result.get();\n}\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename CompletionToken,\n    ASIO_COMPLETION_SIGNATURE Signature,\n    typename Initiation>\ninline typename enable_if<\n    detail::async_result_has_initiate_memfn<CompletionToken, Signature>::value,\n    ASIO_INITFN_DEDUCED_RESULT_TYPE(CompletionToken, Signature,\n      (async_result<typename decay<CompletionToken>::type,\n        Signature>::initiate(declval<ASIO_MOVE_ARG(Initiation)>(),\n          declval<ASIO_MOVE_ARG(CompletionToken)>())))>::type\nasync_initiate(ASIO_MOVE_ARG(Initiation) initiation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token)\n{\n  return async_result<typename decay<CompletionToken>::type,\n    Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation),\n      ASIO_MOVE_CAST(CompletionToken)(token));\n}\n\ntemplate <typename CompletionToken,\n    ASIO_COMPLETION_SIGNATURE Signature,\n    typename Initiation>\ninline typename enable_if<\n    !detail::async_result_has_initiate_memfn<CompletionToken, Signature>::value,\n    ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type\nasync_initiate(ASIO_MOVE_ARG(Initiation) initiation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token)\n{\n  async_completion<CompletionToken, Signature> completion(token);\n\n  ASIO_MOVE_CAST(Initiation)(initiation)(\n      ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken,\n        Signature))(completion.completion_handler));\n\n  return completion.result.get();\n}\n\n#define ASIO_PRIVATE_INITIATE_DEF(n) \\\n  template <typename CompletionToken, \\\n      ASIO_COMPLETION_SIGNATURE Signature, \\\n      typename Initiation, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline typename enable_if< \\\n      detail::async_result_has_initiate_memfn< \\\n        CompletionToken, Signature>::value, \\\n      ASIO_INITFN_DEDUCED_RESULT_TYPE(CompletionToken, Signature, \\\n        (async_result<typename decay<CompletionToken>::type, \\\n          Signature>::initiate(declval<ASIO_MOVE_ARG(Initiation)>(), \\\n            declval<ASIO_MOVE_ARG(CompletionToken)>(), \\\n            ASIO_VARIADIC_MOVE_DECLVAL(n))))>::type \\\n  async_initiate(ASIO_MOVE_ARG(Initiation) initiation, \\\n      ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    return async_result<typename decay<CompletionToken>::type, \\\n      Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation), \\\n        ASIO_MOVE_CAST(CompletionToken)(token), \\\n        ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  \\\n  template <typename CompletionToken, \\\n      ASIO_COMPLETION_SIGNATURE Signature, \\\n      typename Initiation, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline typename enable_if< \\\n      !detail::async_result_has_initiate_memfn< \\\n        CompletionToken, Signature>::value, \\\n      ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type \\\n  async_initiate(ASIO_MOVE_ARG(Initiation) initiation, \\\n      ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    async_completion<CompletionToken, Signature> completion(token); \\\n  \\\n    ASIO_MOVE_CAST(Initiation)(initiation)( \\\n        ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken, \\\n          Signature))(completion.completion_handler), \\\n        ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  \\\n    return completion.result.get(); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF)\n#undef ASIO_PRIVATE_INITIATE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#if defined(ASIO_HAS_CONCEPTS) \\\n  && defined(ASIO_HAS_VARIADIC_TEMPLATES) \\\n  && defined(ASIO_HAS_DECLTYPE)\n\nnamespace detail {\n\ntemplate <typename Signature>\nstruct initiation_archetype\n{\n  template <completion_handler_for<Signature> CompletionHandler>\n  void operator()(CompletionHandler&&) const\n  {\n  }\n};\n\n} // namespace detail\n\ntemplate <typename T, completion_signature Signature>\nASIO_CONCEPT completion_token_for = requires(T&& t)\n{\n  async_initiate<T, Signature>(detail::initiation_archetype<Signature>{}, t);\n};\n\n#define ASIO_COMPLETION_TOKEN_FOR(s) \\\n  ::asio::completion_token_for<s>\n\n#else // defined(ASIO_HAS_CONCEPTS)\n      //   && defined(ASIO_HAS_VARIADIC_TEMPLATES)\n      //   && defined(ASIO_HAS_DECLTYPE)\n\n#define ASIO_COMPLETION_TOKEN_FOR(s) typename\n\n#endif // defined(ASIO_HAS_CONCEPTS)\n       //   && defined(ASIO_HAS_VARIADIC_TEMPLATES)\n       //   && defined(ASIO_HAS_DECLTYPE)\n\nnamespace detail {\n\ntemplate <typename>\nstruct default_completion_token_check\n{\n  typedef void type;\n};\n\ntemplate <typename T, typename = void>\nstruct default_completion_token_impl\n{\n  typedef void type;\n};\n\ntemplate <typename T>\nstruct default_completion_token_impl<T,\n  typename default_completion_token_check<\n    typename T::default_completion_token_type>::type>\n{\n  typedef typename T::default_completion_token_type type;\n};\n\n} // namespace detail\n\n#if defined(GENERATING_DOCUMENTATION)\n\n/// Traits type used to determine the default completion token type associated\n/// with a type (such as an executor).\n/**\n * A program may specialise this traits type if the @c T template parameter in\n * the specialisation is a user-defined type.\n *\n * Specialisations of this trait may provide a nested typedef @c type, which is\n * a default-constructible completion token type.\n */\ntemplate <typename T>\nstruct default_completion_token\n{\n  /// If @c T has a nested type @c default_completion_token_type,\n  /// <tt>T::default_completion_token_type</tt>. Otherwise the typedef @c type\n  /// is not defined.\n  typedef see_below type;\n};\n#else\ntemplate <typename T>\nstruct default_completion_token\n  : detail::default_completion_token_impl<T>\n{\n};\n#endif\n\n#if defined(ASIO_HAS_ALIAS_TEMPLATES)\n\ntemplate <typename T>\nusing default_completion_token_t = typename default_completion_token<T>::type;\n\n#endif // defined(ASIO_HAS_ALIAS_TEMPLATES)\n\n#if defined(ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n\n#define ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(e) \\\n  = typename ::asio::default_completion_token<e>::type\n#define ASIO_DEFAULT_COMPLETION_TOKEN(e) \\\n  = typename ::asio::default_completion_token<e>::type()\n\n#else // defined(ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n\n#define ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(e)\n#define ASIO_DEFAULT_COMPLETION_TOKEN(e)\n\n#endif // defined(ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_ASYNC_RESULT_HPP\n"
  },
  {
    "path": "src/third_party/asio/awaitable.hpp",
    "content": "//\n// awaitable.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_AWAITABLE_HPP\n#define ASIO_AWAITABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#include <experimental/coroutine>\n#include \"asio/executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nusing std::experimental::coroutine_handle;\nusing std::experimental::suspend_always;\n\ntemplate <typename> class awaitable_thread;\ntemplate <typename, typename> class awaitable_frame;\n\n} // namespace detail\n\n/// The return type of a coroutine or asynchronous operation.\ntemplate <typename T, typename Executor = executor>\nclass awaitable\n{\npublic:\n  /// The type of the awaited value.\n  typedef T value_type;\n\n  /// The executor type that will be used for the coroutine.\n  typedef Executor executor_type;\n\n  /// Default constructor.\n  constexpr awaitable() noexcept\n    : frame_(nullptr)\n  {\n  }\n\n  /// Move constructor.\n  awaitable(awaitable&& other) noexcept\n    : frame_(std::exchange(other.frame_, nullptr))\n  {\n  }\n\n  /// Destructor\n  ~awaitable()\n  {\n    if (frame_)\n      frame_->destroy();\n  }\n\n  /// Checks if the awaitable refers to a future result.\n  bool valid() const noexcept\n  {\n    return !!frame_;\n  }\n\n#if !defined(GENERATING_DOCUMENTATION)\n\n  // Support for co_await keyword.\n  bool await_ready() const noexcept\n  {\n    return false;\n  }\n\n  // Support for co_await keyword.\n  template <class U>\n  void await_suspend(\n      detail::coroutine_handle<detail::awaitable_frame<U, Executor>> h)\n  {\n    frame_->push_frame(&h.promise());\n  }\n\n  // Support for co_await keyword.\n  T await_resume()\n  {\n    return frame_->get();\n  }\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\nprivate:\n  template <typename> friend class detail::awaitable_thread;\n  template <typename, typename> friend class detail::awaitable_frame;\n\n  // Not copy constructible or copy assignable.\n  awaitable(const awaitable&) = delete;\n  awaitable& operator=(const awaitable&) = delete;\n\n  // Construct the awaitable from a coroutine's frame object.\n  explicit awaitable(detail::awaitable_frame<T, Executor>* a)\n    : frame_(a)\n  {\n  }\n\n  detail::awaitable_frame<T, Executor>* frame_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/awaitable.hpp\"\n\n#endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_AWAITABLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_datagram_socket.hpp",
    "content": "//\n// basic_datagram_socket.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP\n#define ASIO_BASIC_DATAGRAM_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL)\n#define ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_datagram_socket;\n\n#endif // !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL)\n\n/// Provides datagram-oriented socket functionality.\n/**\n * The basic_datagram_socket class template provides asynchronous and blocking\n * datagram-oriented socket functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_datagram_socket\n  : public basic_socket<Protocol, Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the socket type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_datagram_socket<Protocol, Executor1> other;\n  };\n\n  /// The native representation of a socket.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef typename basic_socket<Protocol,\n    Executor>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// Construct a basic_datagram_socket without opening it.\n  /**\n   * This constructor creates a datagram socket without opening it. The open()\n   * function must be called before data can be sent or received on the socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   */\n  explicit basic_datagram_socket(const executor_type& ex)\n    : basic_socket<Protocol, Executor>(ex)\n  {\n  }\n\n  /// Construct a basic_datagram_socket without opening it.\n  /**\n   * This constructor creates a datagram socket without opening it. The open()\n   * function must be called before data can be sent or received on the socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   */\n  template <typename ExecutionContext>\n  explicit basic_datagram_socket(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context)\n  {\n  }\n\n  /// Construct and open a basic_datagram_socket.\n  /**\n   * This constructor creates and opens a datagram socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_datagram_socket(const executor_type& ex, const protocol_type& protocol)\n    : basic_socket<Protocol, Executor>(ex, protocol)\n  {\n  }\n\n  /// Construct and open a basic_datagram_socket.\n  /**\n   * This constructor creates and opens a datagram socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_datagram_socket(ExecutionContext& context,\n      const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol)\n  {\n  }\n\n  /// Construct a basic_datagram_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a datagram socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the datagram\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_datagram_socket(const executor_type& ex, const endpoint_type& endpoint)\n    : basic_socket<Protocol, Executor>(ex, endpoint)\n  {\n  }\n\n  /// Construct a basic_datagram_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a datagram socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the datagram\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_datagram_socket(ExecutionContext& context,\n      const endpoint_type& endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, endpoint)\n  {\n  }\n\n  /// Construct a basic_datagram_socket on an existing native socket.\n  /**\n   * This constructor creates a datagram socket object to hold an existing\n   * native socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_datagram_socket(const executor_type& ex,\n      const protocol_type& protocol, const native_handle_type& native_socket)\n    : basic_socket<Protocol, Executor>(ex, protocol, native_socket)\n  {\n  }\n\n  /// Construct a basic_datagram_socket on an existing native socket.\n  /**\n   * This constructor creates a datagram socket object to hold an existing\n   * native socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_datagram_socket(ExecutionContext& context,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol, native_socket)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_datagram_socket from another.\n  /**\n   * This constructor moves a datagram socket from one object to another.\n   *\n   * @param other The other basic_datagram_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_datagram_socket(const executor_type&)\n   * constructor.\n   */\n  basic_datagram_socket(basic_datagram_socket&& other) ASIO_NOEXCEPT\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_datagram_socket from another.\n  /**\n   * This assignment operator moves a datagram socket from one object to\n   * another.\n   *\n   * @param other The other basic_datagram_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_datagram_socket(const executor_type&)\n   * constructor.\n   */\n  basic_datagram_socket& operator=(basic_datagram_socket&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n\n  /// Move-construct a basic_datagram_socket from a socket of another protocol\n  /// type.\n  /**\n   * This constructor moves a datagram socket from one object to another.\n   *\n   * @param other The other basic_datagram_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_datagram_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_datagram_socket(basic_datagram_socket<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_datagram_socket from a socket of another protocol\n  /// type.\n  /**\n   * This assignment operator moves a datagram socket from one object to\n   * another.\n   *\n   * @param other The other basic_datagram_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_datagram_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_datagram_socket&\n  >::type operator=(basic_datagram_socket<Protocol1, Executor1>&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the socket.\n  /**\n   * This function destroys the socket, cancelling any outstanding asynchronous\n   * operations associated with the socket as if by calling @c cancel.\n   */\n  ~basic_datagram_socket()\n  {\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the datagram socket. The function\n   * call will block until the data has been sent successfully or an error\n   * occurs.\n   *\n   * @param buffers One ore more data buffers to be sent on the socket.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected datagram socket.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code socket.send(asio::buffer(data, size)); @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the datagram socket. The function\n   * call will block until the data has been sent successfully or an error\n   * occurs.\n   *\n   * @param buffers One ore more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected datagram socket.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the datagram socket. The function\n   * call will block until the data has been sent successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected datagram socket.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous send on a connected socket.\n  /**\n   * This function is used to asynchronously send data on the datagram socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_send operation can only be used with a connected socket.\n   * Use the async_send_to function to send data on an unconnected datagram\n   * socket.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_send(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous send on a connected socket.\n  /**\n   * This function is used to asynchronously send data on the datagram socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_send operation can only be used with a connected socket.\n   * Use the async_send_to function to send data on an unconnected datagram\n   * socket.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler, buffers, flags);\n  }\n\n  /// Send a datagram to the specified endpoint.\n  /**\n   * This function is used to send a datagram to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * asio::ip::udp::endpoint destination(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.send_to(asio::buffer(data, size), destination);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send_to(\n        this->impl_.get_implementation(), buffers, destination, 0, ec);\n    asio::detail::throw_error(ec, \"send_to\");\n    return s;\n  }\n\n  /// Send a datagram to the specified endpoint.\n  /**\n   * This function is used to send a datagram to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send_to(\n        this->impl_.get_implementation(), buffers, destination, flags, ec);\n    asio::detail::throw_error(ec, \"send_to\");\n    return s;\n  }\n\n  /// Send a datagram to the specified endpoint.\n  /**\n   * This function is used to send a datagram to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().send_to(this->impl_.get_implementation(),\n        buffers, destination, flags, ec);\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send a datagram to the specified\n   * remote endpoint. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   * Copies will be made of the endpoint as required.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * asio::ip::udp::endpoint destination(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.async_send_to(\n   *     asio::buffer(data, size), destination, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send_to(this), handler, buffers,\n        destination, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send a datagram to the specified\n   * remote endpoint. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   * Copies will be made of the endpoint as required.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send_to(this), handler, buffers, destination, flags);\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the datagram socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected datagram\n   * socket.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code socket.receive(asio::buffer(data, size)); @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the datagram socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected datagram\n   * socket.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the datagram socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected datagram\n   * socket.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous receive on a connected socket.\n  /**\n   * This function is used to asynchronously receive data from the datagram\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_receive operation can only be used with a connected socket.\n   * Use the async_receive_from function to receive data on an unconnected\n   * datagram socket.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous receive on a connected socket.\n  /**\n   * This function is used to asynchronously receive data from the datagram\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_receive operation can only be used with a connected socket.\n   * Use the async_receive_from function to receive data on an unconnected\n   * datagram socket.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler, buffers, flags);\n  }\n\n  /// Receive a datagram with the endpoint of the sender.\n  /**\n   * This function is used to receive a datagram. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the datagram.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * asio::ip::udp::endpoint sender_endpoint;\n   * socket.receive_from(\n   *     asio::buffer(data, size), sender_endpoint);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, 0, ec);\n    asio::detail::throw_error(ec, \"receive_from\");\n    return s;\n  }\n  \n  /// Receive a datagram with the endpoint of the sender.\n  /**\n   * This function is used to receive a datagram. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the datagram.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec);\n    asio::detail::throw_error(ec, \"receive_from\");\n    return s;\n  }\n  \n  /// Receive a datagram with the endpoint of the sender.\n  /**\n   * This function is used to receive a datagram. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the datagram.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec);\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive a datagram. The function\n   * call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the datagram. Ownership of the sender_endpoint object\n   * is retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code socket.async_receive_from(\n   *     asio::buffer(data, size), sender_endpoint, handler); @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_from(this), handler, buffers,\n        &sender_endpoint, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive a datagram. The function\n   * call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the datagram. Ownership of the sender_endpoint object\n   * is retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_from(this), handler,\n        buffers, &sender_endpoint, flags);\n  }\n\nprivate:\n  class initiate_async_send\n  { \n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send(basic_datagram_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_datagram_socket* self_;\n  };\n\n  class initiate_async_send_to\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send_to(basic_datagram_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers, const endpoint_type& destination,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send_to(\n          self_->impl_.get_implementation(), buffers, destination, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_datagram_socket* self_;\n  };\n\n  class initiate_async_receive\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive(basic_datagram_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_datagram_socket* self_;\n  };\n\n  class initiate_async_receive_from\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive_from(basic_datagram_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers, endpoint_type* sender_endpoint,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive_from(\n          self_->impl_.get_implementation(), buffers, *sender_endpoint, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_datagram_socket* self_;\n  };\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_DATAGRAM_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_deadline_timer.hpp",
    "content": "//\n// basic_deadline_timer.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_DEADLINE_TIMER_HPP\n#define ASIO_BASIC_DEADLINE_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include <cstddef>\n#include \"asio/detail/deadline_timer_service.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/time_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Provides waitable timer functionality.\n/**\n * The basic_deadline_timer class template provides the ability to perform a\n * blocking or asynchronous wait for a timer to expire.\n *\n * A deadline timer is always in one of two states: \"expired\" or \"not expired\".\n * If the wait() or async_wait() function is called on an expired timer, the\n * wait operation will complete immediately.\n *\n * Most applications will use the asio::deadline_timer typedef.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Examples\n * Performing a blocking wait:\n * @code\n * // Construct a timer without setting an expiry time.\n * asio::deadline_timer timer(my_context);\n *\n * // Set an expiry time relative to now.\n * timer.expires_from_now(boost::posix_time::seconds(5));\n *\n * // Wait for the timer to expire.\n * timer.wait();\n * @endcode\n *\n * @par \n * Performing an asynchronous wait:\n * @code\n * void handler(const asio::error_code& error)\n * {\n *   if (!error)\n *   {\n *     // Timer expired.\n *   }\n * }\n *\n * ...\n *\n * // Construct a timer with an absolute expiry time.\n * asio::deadline_timer timer(my_context,\n *     boost::posix_time::time_from_string(\"2005-12-07 23:59:59.000\"));\n *\n * // Start an asynchronous wait.\n * timer.async_wait(handler);\n * @endcode\n *\n * @par Changing an active deadline_timer's expiry time\n *\n * Changing the expiry time of a timer while there are pending asynchronous\n * waits causes those wait operations to be cancelled. To ensure that the action\n * associated with the timer is performed only once, use something like this:\n * used:\n *\n * @code\n * void on_some_event()\n * {\n *   if (my_timer.expires_from_now(seconds(5)) > 0)\n *   {\n *     // We managed to cancel the timer. Start new asynchronous wait.\n *     my_timer.async_wait(on_timeout);\n *   }\n *   else\n *   {\n *     // Too late, timer has already expired!\n *   }\n * }\n *\n * void on_timeout(const asio::error_code& e)\n * {\n *   if (e != asio::error::operation_aborted)\n *   {\n *     // Timer was not cancelled, take necessary action.\n *   }\n * }\n * @endcode\n *\n * @li The asio::basic_deadline_timer::expires_from_now() function\n * cancels any pending asynchronous waits, and returns the number of\n * asynchronous waits that were cancelled. If it returns 0 then you were too\n * late and the wait handler has already been executed, or will soon be\n * executed. If it returns 1 then the wait handler was successfully cancelled.\n *\n * @li If a wait handler is cancelled, the asio::error_code passed to\n * it contains the value asio::error::operation_aborted.\n */\ntemplate <typename Time,\n    typename TimeTraits = asio::time_traits<Time>,\n    typename Executor = executor>\nclass basic_deadline_timer\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the timer type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The timer type when rebound to the specified executor.\n    typedef basic_deadline_timer<Time, TimeTraits, Executor1> other;\n  };\n\n  /// The time traits type.\n  typedef TimeTraits traits_type;\n\n  /// The time type.\n  typedef typename traits_type::time_type time_type;\n\n  /// The duration type.\n  typedef typename traits_type::duration_type duration_type;\n\n  /// Constructor.\n  /**\n   * This constructor creates a timer without setting an expiry time. The\n   * expires_at() or expires_from_now() functions must be called to set an\n   * expiry time before the timer can be waited on.\n   *\n   * @param ex The I/O executor that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   */\n  explicit basic_deadline_timer(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Constructor.\n  /**\n   * This constructor creates a timer without setting an expiry time. The\n   * expires_at() or expires_from_now() functions must be called to set an\n   * expiry time before the timer can be waited on.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   */\n  template <typename ExecutionContext>\n  explicit basic_deadline_timer(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Constructor to set a particular expiry time as an absolute time.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param ex The I/O executor that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, expressed\n   * as an absolute time.\n   */\n  basic_deadline_timer(const executor_type& ex, const time_type& expiry_time)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n  }\n\n  /// Constructor to set a particular expiry time as an absolute time.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, expressed\n   * as an absolute time.\n   */\n  template <typename ExecutionContext>\n  basic_deadline_timer(ExecutionContext& context, const time_type& expiry_time,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n  }\n\n  /// Constructor to set a particular expiry time relative to now.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param ex The I/O executor that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, relative to\n   * now.\n   */\n  basic_deadline_timer(const executor_type& ex,\n      const duration_type& expiry_time)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_from_now\");\n  }\n\n  /// Constructor to set a particular expiry time relative to now.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, relative to\n   * now.\n   */\n  template <typename ExecutionContext>\n  basic_deadline_timer(ExecutionContext& context,\n      const duration_type& expiry_time,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_from_now\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_deadline_timer from another.\n  /**\n   * This constructor moves a timer from one object to another.\n   *\n   * @param other The other basic_deadline_timer object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_deadline_timer(const executor_type&)\n   * constructor.\n   */\n  basic_deadline_timer(basic_deadline_timer&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_deadline_timer from another.\n  /**\n   * This assignment operator moves a timer from one object to another. Cancels\n   * any outstanding asynchronous operations associated with the target object.\n   *\n   * @param other The other basic_deadline_timer object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_deadline_timer(const executor_type&)\n   * constructor.\n   */\n  basic_deadline_timer& operator=(basic_deadline_timer&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the timer.\n  /**\n   * This function destroys the timer, cancelling any outstanding asynchronous\n   * wait operations associated with the timer as if by calling @c cancel.\n   */\n  ~basic_deadline_timer()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Cancel any asynchronous operations that are waiting on the timer.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the timer. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel()\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n    return s;\n  }\n\n  /// Cancel any asynchronous operations that are waiting on the timer.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the timer. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel(asio::error_code& ec)\n  {\n    return impl_.get_service().cancel(impl_.get_implementation(), ec);\n  }\n\n  /// Cancels one asynchronous operation that is waiting on the timer.\n  /**\n   * This function forces the completion of one pending asynchronous wait\n   * operation against the timer. Handlers are cancelled in FIFO order. The\n   * handler for the cancelled operation will be invoked with the\n   * asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @return The number of asynchronous operations that were cancelled. That is,\n   * either 0 or 1.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when cancel_one() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel_one()\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().cancel_one(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel_one\");\n    return s;\n  }\n\n  /// Cancels one asynchronous operation that is waiting on the timer.\n  /**\n   * This function forces the completion of one pending asynchronous wait\n   * operation against the timer. Handlers are cancelled in FIFO order. The\n   * handler for the cancelled operation will be invoked with the\n   * asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled. That is,\n   * either 0 or 1.\n   *\n   * @note If the timer has already expired when cancel_one() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel_one(asio::error_code& ec)\n  {\n    return impl_.get_service().cancel_one(impl_.get_implementation(), ec);\n  }\n\n  /// Get the timer's expiry time as an absolute time.\n  /**\n   * This function may be used to obtain the timer's current expiry time.\n   * Whether the timer has expired or not does not affect this value.\n   */\n  time_type expires_at() const\n  {\n    return impl_.get_service().expires_at(impl_.get_implementation());\n  }\n\n  /// Set the timer's expiry time as an absolute time.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when expires_at() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_at(const time_type& expiry_time)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().expires_at(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n    return s;\n  }\n\n  /// Set the timer's expiry time as an absolute time.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when expires_at() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_at(const time_type& expiry_time,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().expires_at(\n        impl_.get_implementation(), expiry_time, ec);\n  }\n\n  /// Get the timer's expiry time relative to now.\n  /**\n   * This function may be used to obtain the timer's current expiry time.\n   * Whether the timer has expired or not does not affect this value.\n   */\n  duration_type expires_from_now() const\n  {\n    return impl_.get_service().expires_from_now(impl_.get_implementation());\n  }\n\n  /// Set the timer's expiry time relative to now.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when expires_from_now() is called,\n   * then the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_from_now(const duration_type& expiry_time)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_from_now\");\n    return s;\n  }\n\n  /// Set the timer's expiry time relative to now.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when expires_from_now() is called,\n   * then the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_from_now(const duration_type& expiry_time,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n  }\n\n  /// Perform a blocking wait on the timer.\n  /**\n   * This function is used to wait for the timer to expire. This function\n   * blocks and does not return until the timer has expired.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void wait()\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Perform a blocking wait on the timer.\n  /**\n   * This function is used to wait for the timer to expire. This function\n   * blocks and does not return until the timer has expired.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  void wait(asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n  }\n\n  /// Start an asynchronous wait on the timer.\n  /**\n   * This function may be used to initiate an asynchronous wait against the\n   * timer. It always returns immediately.\n   *\n   * For each call to async_wait(), the supplied handler will be called exactly\n   * once. The handler will be called when:\n   *\n   * @li The timer has expired.\n   *\n   * @li The timer was cancelled, in which case the handler is passed the error\n   * code asio::error::operation_aborted.\n   *\n   * @param handler The handler to be called when the timer expires. Copies\n   * will be made of the handler as required. The function signature of the\n   * handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_deadline_timer(const basic_deadline_timer&) ASIO_DELETED;\n  basic_deadline_timer& operator=(\n      const basic_deadline_timer&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_deadline_timer* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_deadline_timer* self_;\n  };\n\n  detail::io_object_impl<\n    detail::deadline_timer_service<TimeTraits>, Executor> impl_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_BASIC_DEADLINE_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_io_object.hpp",
    "content": "//\n// basic_io_object.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_IO_OBJECT_HPP\n#define ASIO_BASIC_IO_OBJECT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_MOVE)\nnamespace detail\n{\n  // Type trait used to determine whether a service supports move.\n  template <typename IoObjectService>\n  class service_has_move\n  {\n  private:\n    typedef IoObjectService service_type;\n    typedef typename service_type::implementation_type implementation_type;\n\n    template <typename T, typename U>\n    static auto asio_service_has_move_eval(T* t, U* u)\n      -> decltype(t->move_construct(*u, *u), char());\n    static char (&asio_service_has_move_eval(...))[2];\n\n  public:\n    static const bool value =\n      sizeof(asio_service_has_move_eval(\n        static_cast<service_type*>(0),\n        static_cast<implementation_type*>(0))) == 1;\n  };\n}\n#endif // defined(ASIO_HAS_MOVE)\n\n/// Base class for all I/O objects.\n/**\n * @note All I/O objects are non-copyable. However, when using C++0x, certain\n * I/O objects do support move construction and move assignment.\n */\n#if !defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\ntemplate <typename IoObjectService>\n#else\ntemplate <typename IoObjectService,\n    bool Movable = detail::service_has_move<IoObjectService>::value>\n#endif\nclass basic_io_object\n{\npublic:\n  /// The type of the service that will be used to provide I/O operations.\n  typedef IoObjectService service_type;\n\n  /// The underlying implementation type of I/O object.\n  typedef typename service_type::implementation_type implementation_type;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use get_executor().) Get the io_context associated with the\n  /// object.\n  /**\n   * This function may be used to obtain the io_context object that the I/O\n   * object uses to dispatch handlers for asynchronous operations.\n   *\n   * @return A reference to the io_context object that the I/O object will use\n   * to dispatch handlers. Ownership is not transferred to the caller.\n   */\n  asio::io_context& get_io_context()\n  {\n    return service_.get_io_context();\n  }\n\n  /// (Deprecated: Use get_executor().) Get the io_context associated with the\n  /// object.\n  /**\n   * This function may be used to obtain the io_context object that the I/O\n   * object uses to dispatch handlers for asynchronous operations.\n   *\n   * @return A reference to the io_context object that the I/O object will use\n   * to dispatch handlers. Ownership is not transferred to the caller.\n   */\n  asio::io_context& get_io_service()\n  {\n    return service_.get_io_context();\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// The type of the executor associated with the object.\n  typedef asio::io_context::executor_type executor_type;\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return service_.get_io_context().get_executor();\n  }\n\nprotected:\n  /// Construct a basic_io_object.\n  /**\n   * Performs:\n   * @code get_service().construct(get_implementation()); @endcode\n   */\n  explicit basic_io_object(asio::io_context& io_context)\n    : service_(asio::use_service<IoObjectService>(io_context))\n  {\n    service_.construct(implementation_);\n  }\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_io_object.\n  /**\n   * Performs:\n   * @code get_service().move_construct(\n   *     get_implementation(), other.get_implementation()); @endcode\n   *\n   * @note Available only for services that support movability,\n   */\n  basic_io_object(basic_io_object&& other);\n\n  /// Move-assign a basic_io_object.\n  /**\n   * Performs:\n   * @code get_service().move_assign(get_implementation(),\n   *     other.get_service(), other.get_implementation()); @endcode\n   *\n   * @note Available only for services that support movability,\n   */\n  basic_io_object& operator=(basic_io_object&& other);\n\n  /// Perform a converting move-construction of a basic_io_object.\n  template <typename IoObjectService1>\n  basic_io_object(IoObjectService1& other_service,\n      typename IoObjectService1::implementation_type& other_implementation);\n#endif // defined(GENERATING_DOCUMENTATION)\n\n  /// Protected destructor to prevent deletion through this type.\n  /**\n   * Performs:\n   * @code get_service().destroy(get_implementation()); @endcode\n   */\n  ~basic_io_object()\n  {\n    service_.destroy(implementation_);\n  }\n\n  /// Get the service associated with the I/O object.\n  service_type& get_service()\n  {\n    return service_;\n  }\n\n  /// Get the service associated with the I/O object.\n  const service_type& get_service() const\n  {\n    return service_;\n  }\n\n  /// Get the underlying implementation of the I/O object.\n  implementation_type& get_implementation()\n  {\n    return implementation_;\n  }\n\n  /// Get the underlying implementation of the I/O object.\n  const implementation_type& get_implementation() const\n  {\n    return implementation_;\n  }\n\nprivate:\n  basic_io_object(const basic_io_object&);\n  basic_io_object& operator=(const basic_io_object&);\n\n  // The service associated with the I/O object.\n  service_type& service_;\n\n  /// The underlying implementation of the I/O object.\n  implementation_type implementation_;\n};\n\n#if defined(ASIO_HAS_MOVE)\n// Specialisation for movable objects.\ntemplate <typename IoObjectService>\nclass basic_io_object<IoObjectService, true>\n{\npublic:\n  typedef IoObjectService service_type;\n  typedef typename service_type::implementation_type implementation_type;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  asio::io_context& get_io_context()\n  {\n    return service_->get_io_context();\n  }\n\n  asio::io_context& get_io_service()\n  {\n    return service_->get_io_context();\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  typedef asio::io_context::executor_type executor_type;\n\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return service_->get_io_context().get_executor();\n  }\n\nprotected:\n  explicit basic_io_object(asio::io_context& io_context)\n    : service_(&asio::use_service<IoObjectService>(io_context))\n  {\n    service_->construct(implementation_);\n  }\n\n  basic_io_object(basic_io_object&& other)\n    : service_(&other.get_service())\n  {\n    service_->move_construct(implementation_, other.implementation_);\n  }\n\n  template <typename IoObjectService1>\n  basic_io_object(IoObjectService1& other_service,\n      typename IoObjectService1::implementation_type& other_implementation)\n    : service_(&asio::use_service<IoObjectService>(\n          other_service.get_io_context()))\n  {\n    service_->converting_move_construct(implementation_,\n        other_service, other_implementation);\n  }\n\n  ~basic_io_object()\n  {\n    service_->destroy(implementation_);\n  }\n\n  basic_io_object& operator=(basic_io_object&& other)\n  {\n    service_->move_assign(implementation_,\n        *other.service_, other.implementation_);\n    service_ = other.service_;\n    return *this;\n  }\n\n  service_type& get_service()\n  {\n    return *service_;\n  }\n\n  const service_type& get_service() const\n  {\n    return *service_;\n  }\n\n  implementation_type& get_implementation()\n  {\n    return implementation_;\n  }\n\n  const implementation_type& get_implementation() const\n  {\n    return implementation_;\n  }\n\nprivate:\n  basic_io_object(const basic_io_object&);\n  void operator=(const basic_io_object&);\n\n  IoObjectService* service_;\n  implementation_type implementation_;\n};\n#endif // defined(ASIO_HAS_MOVE)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_IO_OBJECT_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_raw_socket.hpp",
    "content": "//\n// basic_raw_socket.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_RAW_SOCKET_HPP\n#define ASIO_BASIC_RAW_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_RAW_SOCKET_FWD_DECL)\n#define ASIO_BASIC_RAW_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_raw_socket;\n\n#endif // !defined(ASIO_BASIC_RAW_SOCKET_FWD_DECL)\n\n/// Provides raw-oriented socket functionality.\n/**\n * The basic_raw_socket class template provides asynchronous and blocking\n * raw-oriented socket functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_raw_socket\n  : public basic_socket<Protocol, Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the socket type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_raw_socket<Protocol, Executor1> other;\n  };\n\n  /// The native representation of a socket.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef typename basic_socket<Protocol,\n    Executor>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// Construct a basic_raw_socket without opening it.\n  /**\n   * This constructor creates a raw socket without opening it. The open()\n   * function must be called before data can be sent or received on the socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   */\n  explicit basic_raw_socket(const executor_type& ex)\n    : basic_socket<Protocol, Executor>(ex)\n  {\n  }\n\n  /// Construct a basic_raw_socket without opening it.\n  /**\n   * This constructor creates a raw socket without opening it. The open()\n   * function must be called before data can be sent or received on the socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   */\n  template <typename ExecutionContext>\n  explicit basic_raw_socket(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context)\n  {\n  }\n\n  /// Construct and open a basic_raw_socket.\n  /**\n   * This constructor creates and opens a raw socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_raw_socket(const executor_type& ex, const protocol_type& protocol)\n    : basic_socket<Protocol, Executor>(ex, protocol)\n  {\n  }\n\n  /// Construct and open a basic_raw_socket.\n  /**\n   * This constructor creates and opens a raw socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_raw_socket(ExecutionContext& context, const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol)\n  {\n  }\n\n  /// Construct a basic_raw_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a raw socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the raw\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_raw_socket(const executor_type& ex, const endpoint_type& endpoint)\n    : basic_socket<Protocol, Executor>(ex, endpoint)\n  {\n  }\n\n  /// Construct a basic_raw_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a raw socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the raw\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_raw_socket(ExecutionContext& context, const endpoint_type& endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, endpoint)\n  {\n  }\n\n  /// Construct a basic_raw_socket on an existing native socket.\n  /**\n   * This constructor creates a raw socket object to hold an existing\n   * native socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_raw_socket(const executor_type& ex,\n      const protocol_type& protocol, const native_handle_type& native_socket)\n    : basic_socket<Protocol, Executor>(ex, protocol, native_socket)\n  {\n  }\n\n  /// Construct a basic_raw_socket on an existing native socket.\n  /**\n   * This constructor creates a raw socket object to hold an existing\n   * native socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_raw_socket(ExecutionContext& context,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol, native_socket)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_raw_socket from another.\n  /**\n   * This constructor moves a raw socket from one object to another.\n   *\n   * @param other The other basic_raw_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_raw_socket(const executor_type&)\n   * constructor.\n   */\n  basic_raw_socket(basic_raw_socket&& other) ASIO_NOEXCEPT\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_raw_socket from another.\n  /**\n   * This assignment operator moves a raw socket from one object to another.\n   *\n   * @param other The other basic_raw_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_raw_socket(const executor_type&)\n   * constructor.\n   */\n  basic_raw_socket& operator=(basic_raw_socket&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n\n  /// Move-construct a basic_raw_socket from a socket of another protocol\n  /// type.\n  /**\n   * This constructor moves a raw socket from one object to another.\n   *\n   * @param other The other basic_raw_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_raw_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_raw_socket(basic_raw_socket<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_raw_socket from a socket of another protocol type.\n  /**\n   * This assignment operator moves a raw socket from one object to another.\n   *\n   * @param other The other basic_raw_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_raw_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_raw_socket&\n  >::type operator=(basic_raw_socket<Protocol1, Executor1>&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the socket.\n  /**\n   * This function destroys the socket, cancelling any outstanding asynchronous\n   * operations associated with the socket as if by calling @c cancel.\n   */\n  ~basic_raw_socket()\n  {\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the raw socket. The function call\n   * will block until the data has been sent successfully or an error occurs.\n   *\n   * @param buffers One ore more data buffers to be sent on the socket.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected raw socket.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code socket.send(asio::buffer(data, size)); @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the raw socket. The function call\n   * will block until the data has been sent successfully or an error occurs.\n   *\n   * @param buffers One ore more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected raw socket.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on a connected socket.\n  /**\n   * This function is used to send data on the raw socket. The function call\n   * will block until the data has been sent successfully or an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @note The send operation can only be used with a connected socket. Use\n   * the send_to function to send data on an unconnected raw socket.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous send on a connected socket.\n  /**\n   * This function is used to send data on the raw socket. The function call\n   * will block until the data has been sent successfully or an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_send operation can only be used with a connected socket.\n   * Use the async_send_to function to send data on an unconnected raw\n   * socket.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_send(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous send on a connected socket.\n  /**\n   * This function is used to send data on the raw socket. The function call\n   * will block until the data has been sent successfully or an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_send operation can only be used with a connected socket.\n   * Use the async_send_to function to send data on an unconnected raw\n   * socket.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler, buffers, flags);\n  }\n\n  /// Send raw data to the specified endpoint.\n  /**\n   * This function is used to send raw data to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * asio::ip::udp::endpoint destination(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.send_to(asio::buffer(data, size), destination);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send_to(\n        this->impl_.get_implementation(), buffers, destination, 0, ec);\n    asio::detail::throw_error(ec, \"send_to\");\n    return s;\n  }\n\n  /// Send raw data to the specified endpoint.\n  /**\n   * This function is used to send raw data to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send_to(\n        this->impl_.get_implementation(), buffers, destination, flags, ec);\n    asio::detail::throw_error(ec, \"send_to\");\n    return s;\n  }\n\n  /// Send raw data to the specified endpoint.\n  /**\n   * This function is used to send raw data to the specified remote endpoint.\n   * The function call will block until the data has been sent successfully or\n   * an error occurs.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().send_to(this->impl_.get_implementation(),\n        buffers, destination, flags, ec);\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send raw data to the specified\n   * remote endpoint. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   * Copies will be made of the endpoint as required.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * asio::ip::udp::endpoint destination(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.async_send_to(\n   *     asio::buffer(data, size), destination, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send_to(this), handler, buffers,\n        destination, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send raw data to the specified\n   * remote endpoint. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent to the remote endpoint.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param destination The remote endpoint to which the data will be sent.\n   * Copies will be made of the endpoint as required.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send_to(const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send_to(this), handler, buffers, destination, flags);\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the raw socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected raw\n   * socket.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code socket.receive(asio::buffer(data, size)); @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the raw socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected raw\n   * socket.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the raw socket. The function\n   * call will block until data has been received successfully or an error\n   * occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received.\n   *\n   * @note The receive operation can only be used with a connected socket. Use\n   * the receive_from function to receive data on an unconnected raw\n   * socket.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous receive on a connected socket.\n  /**\n   * This function is used to asynchronously receive data from the raw\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_receive operation can only be used with a connected socket.\n   * Use the async_receive_from function to receive data on an unconnected\n   * raw socket.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous receive on a connected socket.\n  /**\n   * This function is used to asynchronously receive data from the raw\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The async_receive operation can only be used with a connected socket.\n   * Use the async_receive_from function to receive data on an unconnected\n   * raw socket.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler, buffers, flags);\n  }\n\n  /// Receive raw data with the endpoint of the sender.\n  /**\n   * This function is used to receive raw data. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the data.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * asio::ip::udp::endpoint sender_endpoint;\n   * socket.receive_from(\n   *     asio::buffer(data, size), sender_endpoint);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, 0, ec);\n    asio::detail::throw_error(ec, \"receive_from\");\n    return s;\n  }\n  \n  /// Receive raw data with the endpoint of the sender.\n  /**\n   * This function is used to receive raw data. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the data.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec);\n    asio::detail::throw_error(ec, \"receive_from\");\n    return s;\n  }\n  \n  /// Receive raw data with the endpoint of the sender.\n  /**\n   * This function is used to receive raw data. The function call will block\n   * until data has been received successfully or an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the data.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive_from(\n        this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec);\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive raw data. The function\n   * call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the data. Ownership of the sender_endpoint object\n   * is retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code socket.async_receive_from(\n   *     asio::buffer(data, size), 0, sender_endpoint, handler); @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_from(this), handler, buffers,\n        &sender_endpoint, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive raw data. The function\n   * call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param sender_endpoint An endpoint object that receives the endpoint of\n   * the remote sender of the data. Ownership of the sender_endpoint object\n   * is retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive_from(const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_from(this), handler,\n        buffers, &sender_endpoint, flags);\n  }\n\nprivate:\n  class initiate_async_send\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send(basic_raw_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_raw_socket* self_;\n  };\n\n  class initiate_async_send_to\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send_to(basic_raw_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers, const endpoint_type& destination,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send_to(\n          self_->impl_.get_implementation(), buffers, destination, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_raw_socket* self_;\n  };\n\n  class initiate_async_receive\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive(basic_raw_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_raw_socket* self_;\n  };\n\n  class initiate_async_receive_from\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive_from(basic_raw_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers, endpoint_type* sender_endpoint,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive_from(\n          self_->impl_.get_implementation(), buffers, *sender_endpoint, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_raw_socket* self_;\n  };\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_RAW_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_seq_packet_socket.hpp",
    "content": "//\n// basic_seq_packet_socket.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP\n#define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL)\n#define ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_seq_packet_socket;\n\n#endif // !defined(ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL)\n\n/// Provides sequenced packet socket functionality.\n/**\n * The basic_seq_packet_socket class template provides asynchronous and blocking\n * sequenced packet socket functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_seq_packet_socket\n  : public basic_socket<Protocol, Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the socket type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_seq_packet_socket<Protocol, Executor1> other;\n  };\n\n  /// The native representation of a socket.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef typename basic_socket<Protocol,\n    Executor>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// Construct a basic_seq_packet_socket without opening it.\n  /**\n   * This constructor creates a sequenced packet socket without opening it. The\n   * socket needs to be opened and then connected or accepted before data can\n   * be sent or received on it.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   */\n  explicit basic_seq_packet_socket(const executor_type& ex)\n    : basic_socket<Protocol, Executor>(ex)\n  {\n  }\n\n  /// Construct a basic_seq_packet_socket without opening it.\n  /**\n   * This constructor creates a sequenced packet socket without opening it. The\n   * socket needs to be opened and then connected or accepted before data can\n   * be sent or received on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   */\n  template <typename ExecutionContext>\n  explicit basic_seq_packet_socket(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context)\n  {\n  }\n\n  /// Construct and open a basic_seq_packet_socket.\n  /**\n   * This constructor creates and opens a sequenced_packet socket. The socket\n   * needs to be connected or accepted before data can be sent or received on\n   * it.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_seq_packet_socket(const executor_type& ex,\n      const protocol_type& protocol)\n    : basic_socket<Protocol, Executor>(ex, protocol)\n  {\n  }\n\n  /// Construct and open a basic_seq_packet_socket.\n  /**\n   * This constructor creates and opens a sequenced_packet socket. The socket\n   * needs to be connected or accepted before data can be sent or received on\n   * it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_seq_packet_socket(ExecutionContext& context,\n      const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol)\n  {\n  }\n\n  /// Construct a basic_seq_packet_socket, opening it and binding it to the\n  /// given local endpoint.\n  /**\n   * This constructor creates a sequenced packet socket and automatically opens\n   * it bound to the specified endpoint on the local machine. The protocol used\n   * is the protocol associated with the given endpoint.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the sequenced\n   * packet socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_seq_packet_socket(const executor_type& ex,\n      const endpoint_type& endpoint)\n    : basic_socket<Protocol, Executor>(ex, endpoint)\n  {\n  }\n\n  /// Construct a basic_seq_packet_socket, opening it and binding it to the\n  /// given local endpoint.\n  /**\n   * This constructor creates a sequenced packet socket and automatically opens\n   * it bound to the specified endpoint on the local machine. The protocol used\n   * is the protocol associated with the given endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the sequenced\n   * packet socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_seq_packet_socket(ExecutionContext& context,\n      const endpoint_type& endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, endpoint)\n  {\n  }\n\n  /// Construct a basic_seq_packet_socket on an existing native socket.\n  /**\n   * This constructor creates a sequenced packet socket object to hold an\n   * existing native socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_seq_packet_socket(const executor_type& ex,\n      const protocol_type& protocol, const native_handle_type& native_socket)\n    : basic_socket<Protocol, Executor>(ex, protocol, native_socket)\n  {\n  }\n\n  /// Construct a basic_seq_packet_socket on an existing native socket.\n  /**\n   * This constructor creates a sequenced packet socket object to hold an\n   * existing native socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_seq_packet_socket(ExecutionContext& context,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol, native_socket)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_seq_packet_socket from another.\n  /**\n   * This constructor moves a sequenced packet socket from one object to\n   * another.\n   *\n   * @param other The other basic_seq_packet_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_seq_packet_socket(const executor_type&)\n   * constructor.\n   */\n  basic_seq_packet_socket(basic_seq_packet_socket&& other) ASIO_NOEXCEPT\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_seq_packet_socket from another.\n  /**\n   * This assignment operator moves a sequenced packet socket from one object to\n   * another.\n   *\n   * @param other The other basic_seq_packet_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_seq_packet_socket(const executor_type&)\n   * constructor.\n   */\n  basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n\n  /// Move-construct a basic_seq_packet_socket from a socket of another protocol\n  /// type.\n  /**\n   * This constructor moves a sequenced packet socket from one object to\n   * another.\n   *\n   * @param other The other basic_seq_packet_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_seq_packet_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_seq_packet_socket(basic_seq_packet_socket<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_seq_packet_socket from a socket of another protocol\n  /// type.\n  /**\n   * This assignment operator moves a sequenced packet socket from one object to\n   * another.\n   *\n   * @param other The other basic_seq_packet_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_seq_packet_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_seq_packet_socket&\n  >::type operator=(basic_seq_packet_socket<Protocol1, Executor1>&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the socket.\n  /**\n   * This function destroys the socket, cancelling any outstanding asynchronous\n   * operations associated with the socket as if by calling @c cancel.\n   */\n  ~basic_seq_packet_socket()\n  {\n  }\n\n  /// Send some data on the socket.\n  /**\n   * This function is used to send data on the sequenced packet socket. The\n   * function call will block until the data has been sent successfully, or an\n   * until error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.send(asio::buffer(data, size), 0);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on the socket.\n  /**\n   * This function is used to send data on the sequenced packet socket. The\n   * function call will block the data has been sent successfully, or an until\n   * error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent. Returns 0 if an error occurred.\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref write function if you need to ensure that all data\n   * is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send data on the sequenced packet\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_send(asio::buffer(data, size), 0, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler, buffers, flags);\n  }\n\n  /// Receive some data on the socket.\n  /**\n   * This function is used to receive data on the sequenced packet socket. The\n   * function call will block until data has been received successfully, or\n   * until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param out_flags After the receive call completes, contains flags\n   * associated with the received data. For example, if the\n   * socket_base::message_end_of_record bit is set then the received data marks\n   * the end of a record.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.receive(asio::buffer(data, size), out_flags);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags& out_flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_with_flags(\n        this->impl_.get_implementation(), buffers, 0, out_flags, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on the socket.\n  /**\n   * This function is used to receive data on the sequenced packet socket. The\n   * function call will block until data has been received successfully, or\n   * until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param in_flags Flags specifying how the receive call is to be made.\n   *\n   * @param out_flags After the receive call completes, contains flags\n   * associated with the received data. For example, if the\n   * socket_base::message_end_of_record bit is set then the received data marks\n   * the end of a record.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.receive(asio::buffer(data, size), 0, out_flags);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive_with_flags(\n        this->impl_.get_implementation(), buffers, in_flags, out_flags, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the sequenced packet socket. The\n   * function call will block until data has been received successfully, or\n   * until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param in_flags Flags specifying how the receive call is to be made.\n   *\n   * @param out_flags After the receive call completes, contains flags\n   * associated with the received data. For example, if the\n   * socket_base::message_end_of_record bit is set then the received data marks\n   * the end of a record.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received. Returns 0 if an error occurred.\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive_with_flags(\n        this->impl_.get_implementation(), buffers, in_flags, out_flags, ec);\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive data from the sequenced\n   * packet socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param out_flags Once the asynchronous operation completes, contains flags\n   * associated with the received data. For example, if the\n   * socket_base::message_end_of_record bit is set then the received data marks\n   * the end of a record. The caller must guarantee that the referenced\n   * variable remains valid until the handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(asio::buffer(data, size), out_flags, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags& out_flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_with_flags(this), handler,\n        buffers, socket_base::message_flags(0), &out_flags);\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive data from the sequenced\n   * data socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param in_flags Flags specifying how the receive call is to be made.\n   *\n   * @param out_flags Once the asynchronous operation completes, contains flags\n   * associated with the received data. For example, if the\n   * socket_base::message_end_of_record bit is set then the received data marks\n   * the end of a record. The caller must guarantee that the referenced\n   * variable remains valid until the handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(\n   *     asio::buffer(data, size),\n   *     0, out_flags, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive_with_flags(this),\n        handler, buffers, in_flags, &out_flags);\n  }\n\nprivate:\n  class initiate_async_send\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send(basic_seq_packet_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_seq_packet_socket* self_;\n  };\n\n  class initiate_async_receive_with_flags\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive_with_flags(basic_seq_packet_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers,\n        socket_base::message_flags in_flags,\n        socket_base::message_flags* out_flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive_with_flags(\n          self_->impl_.get_implementation(), buffers, in_flags, *out_flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_seq_packet_socket* self_;\n  };\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_serial_port.hpp",
    "content": "//\n// basic_serial_port.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SERIAL_PORT_HPP\n#define ASIO_BASIC_SERIAL_PORT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include <string>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/serial_port_base.hpp\"\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_serial_port_service.hpp\"\n#else\n# include \"asio/detail/reactive_serial_port_service.hpp\"\n#endif\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Provides serial port functionality.\n/**\n * The basic_serial_port class provides a wrapper over serial port\n * functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Executor = executor>\nclass basic_serial_port\n  : public serial_port_base\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the serial port type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The serial port type when rebound to the specified executor.\n    typedef basic_serial_port<Executor1> other;\n  };\n\n  /// The native representation of a serial port.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#elif defined(ASIO_HAS_IOCP)\n  typedef detail::win_iocp_serial_port_service::native_handle_type\n    native_handle_type;\n#else\n  typedef detail::reactive_serial_port_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// A basic_basic_serial_port is always the lowest layer.\n  typedef basic_serial_port lowest_layer_type;\n\n  /// Construct a basic_serial_port without opening it.\n  /**\n   * This constructor creates a serial port without opening it.\n   *\n   * @param ex The I/O executor that the serial port will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * serial port.\n   */\n  explicit basic_serial_port(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct a basic_serial_port without opening it.\n  /**\n   * This constructor creates a serial port without opening it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the serial port will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the serial port.\n   */\n  template <typename ExecutionContext>\n  explicit basic_serial_port(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value,\n        basic_serial_port\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct and open a basic_serial_port.\n  /**\n   * This constructor creates and opens a serial port for the specified device\n   * name.\n   *\n   * @param ex The I/O executor that the serial port will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * serial port.\n   *\n   * @param device The platform-specific device name for this serial\n   * port.\n   */\n  basic_serial_port(const executor_type& ex, const char* device)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct and open a basic_serial_port.\n  /**\n   * This constructor creates and opens a serial port for the specified device\n   * name.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the serial port will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the serial port.\n   *\n   * @param device The platform-specific device name for this serial\n   * port.\n   */\n  template <typename ExecutionContext>\n  basic_serial_port(ExecutionContext& context, const char* device,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct and open a basic_serial_port.\n  /**\n   * This constructor creates and opens a serial port for the specified device\n   * name.\n   *\n   * @param ex The I/O executor that the serial port will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * serial port.\n   *\n   * @param device The platform-specific device name for this serial\n   * port.\n   */\n  basic_serial_port(const executor_type& ex, const std::string& device)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct and open a basic_serial_port.\n  /**\n   * This constructor creates and opens a serial port for the specified device\n   * name.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the serial port will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the serial port.\n   *\n   * @param device The platform-specific device name for this serial\n   * port.\n   */\n  template <typename ExecutionContext>\n  basic_serial_port(ExecutionContext& context, const std::string& device,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct a basic_serial_port on an existing native serial port.\n  /**\n   * This constructor creates a serial port object to hold an existing native\n   * serial port.\n   *\n   * @param ex The I/O executor that the serial port will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * serial port.\n   *\n   * @param native_serial_port A native serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_serial_port(const executor_type& ex,\n      const native_handle_type& native_serial_port)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_serial_port, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct a basic_serial_port on an existing native serial port.\n  /**\n   * This constructor creates a serial port object to hold an existing native\n   * serial port.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the serial port will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the serial port.\n   *\n   * @param native_serial_port A native serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_serial_port(ExecutionContext& context,\n      const native_handle_type& native_serial_port,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_serial_port, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_serial_port from another.\n  /**\n   * This constructor moves a serial port from one object to another.\n   *\n   * @param other The other basic_serial_port object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_serial_port(const executor_type&)\n   * constructor.\n   */\n  basic_serial_port(basic_serial_port&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_serial_port from another.\n  /**\n   * This assignment operator moves a serial port from one object to another.\n   *\n   * @param other The other basic_serial_port object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_serial_port(const executor_type&)\n   * constructor.\n   */\n  basic_serial_port& operator=(basic_serial_port&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the serial port.\n  /**\n   * This function destroys the serial port, cancelling any outstanding\n   * asynchronous wait operations associated with the serial port as if by\n   * calling @c cancel.\n   */\n  ~basic_serial_port()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * layers. Since a basic_serial_port cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A reference to the lowest layer in the stack of layers. Ownership\n   * is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return *this;\n  }\n\n  /// Get a const reference to the lowest layer.\n  /**\n   * This function returns a const reference to the lowest layer in a stack of\n   * layers. Since a basic_serial_port cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A const reference to the lowest layer in the stack of layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return *this;\n  }\n\n  /// Open the serial port using the specified device name.\n  /**\n   * This function opens the serial port for the specified device name.\n   *\n   * @param device The platform-specific device name.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void open(const std::string& device)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Open the serial port using the specified device name.\n  /**\n   * This function opens the serial port using the given platform-specific\n   * device name.\n   *\n   * @param device The platform-specific device name.\n   *\n   * @param ec Set the indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID open(const std::string& device,\n      asio::error_code& ec)\n  {\n    impl_.get_service().open(impl_.get_implementation(), device, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Assign an existing native serial port to the serial port.\n  /*\n   * This function opens the serial port to hold an existing native serial port.\n   *\n   * @param native_serial_port A native serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const native_handle_type& native_serial_port)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_serial_port, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assign an existing native serial port to the serial port.\n  /*\n   * This function opens the serial port to hold an existing native serial port.\n   *\n   * @param native_serial_port A native serial port.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const native_handle_type& native_serial_port,\n      asio::error_code& ec)\n  {\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_serial_port, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the serial port is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Close the serial port.\n  /**\n   * This function is used to close the serial port. Any asynchronous read or\n   * write operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the serial port.\n  /**\n   * This function is used to close the serial port. Any asynchronous read or\n   * write operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the native serial port representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * serial port. This is intended to allow access to native serial port\n   * functionality that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the serial port.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the serial port.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Send a break sequence to the serial port.\n  /**\n   * This function causes a break sequence of platform-specific duration to be\n   * sent out the serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void send_break()\n  {\n    asio::error_code ec;\n    impl_.get_service().send_break(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"send_break\");\n  }\n\n  /// Send a break sequence to the serial port.\n  /**\n   * This function causes a break sequence of platform-specific duration to be\n   * sent out the serial port.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID send_break(asio::error_code& ec)\n  {\n    impl_.get_service().send_break(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Set an option on the serial port.\n  /**\n   * This function is used to set an option on the serial port.\n   *\n   * @param option The option value to be set on the serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa SettableSerialPortOption @n\n   * asio::serial_port_base::baud_rate @n\n   * asio::serial_port_base::flow_control @n\n   * asio::serial_port_base::parity @n\n   * asio::serial_port_base::stop_bits @n\n   * asio::serial_port_base::character_size\n   */\n  template <typename SettableSerialPortOption>\n  void set_option(const SettableSerialPortOption& option)\n  {\n    asio::error_code ec;\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"set_option\");\n  }\n\n  /// Set an option on the serial port.\n  /**\n   * This function is used to set an option on the serial port.\n   *\n   * @param option The option value to be set on the serial port.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa SettableSerialPortOption @n\n   * asio::serial_port_base::baud_rate @n\n   * asio::serial_port_base::flow_control @n\n   * asio::serial_port_base::parity @n\n   * asio::serial_port_base::stop_bits @n\n   * asio::serial_port_base::character_size\n   */\n  template <typename SettableSerialPortOption>\n  ASIO_SYNC_OP_VOID set_option(const SettableSerialPortOption& option,\n      asio::error_code& ec)\n  {\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get an option from the serial port.\n  /**\n   * This function is used to get the current value of an option on the serial\n   * port.\n   *\n   * @param option The option value to be obtained from the serial port.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa GettableSerialPortOption @n\n   * asio::serial_port_base::baud_rate @n\n   * asio::serial_port_base::flow_control @n\n   * asio::serial_port_base::parity @n\n   * asio::serial_port_base::stop_bits @n\n   * asio::serial_port_base::character_size\n   */\n  template <typename GettableSerialPortOption>\n  void get_option(GettableSerialPortOption& option) const\n  {\n    asio::error_code ec;\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"get_option\");\n  }\n\n  /// Get an option from the serial port.\n  /**\n   * This function is used to get the current value of an option on the serial\n   * port.\n   *\n   * @param option The option value to be obtained from the serial port.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa GettableSerialPortOption @n\n   * asio::serial_port_base::baud_rate @n\n   * asio::serial_port_base::flow_control @n\n   * asio::serial_port_base::parity @n\n   * asio::serial_port_base::stop_bits @n\n   * asio::serial_port_base::character_size\n   */\n  template <typename GettableSerialPortOption>\n  ASIO_SYNC_OP_VOID get_option(GettableSerialPortOption& option,\n      asio::error_code& ec) const\n  {\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Write some data to the serial port.\n  /**\n   * This function is used to write data to the serial port. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the serial port.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * basic_serial_port.write_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().write_some(\n        impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"write_some\");\n    return s;\n  }\n\n  /// Write some data to the serial port.\n  /**\n   * This function is used to write data to the serial port. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the serial port.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().write_some(\n        impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous write.\n  /**\n   * This function is used to asynchronously write data to the serial port.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be written to the serial port.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The write operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * basic_serial_port.async_write_some(\n   *     asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_write_some(this), handler, buffers);\n  }\n\n  /// Read some data from the serial port.\n  /**\n   * This function is used to read data from the serial port. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * basic_serial_port.read_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().read_some(\n        impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"read_some\");\n    return s;\n  }\n\n  /// Read some data from the serial port.\n  /**\n   * This function is used to read data from the serial port. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().read_some(\n        impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous read.\n  /**\n   * This function is used to asynchronously read data from the serial port.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The read operation may not read all of the requested number of bytes.\n   * Consider using the @ref async_read function if you need to ensure that the\n   * requested amount of data is read before the asynchronous operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * basic_serial_port.async_read_some(\n   *     asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_read_some(this), handler, buffers);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_serial_port(const basic_serial_port&) ASIO_DELETED;\n  basic_serial_port& operator=(const basic_serial_port&) ASIO_DELETED;\n\n  class initiate_async_write_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_write_some(basic_serial_port* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_write_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_serial_port* self_;\n  };\n\n  class initiate_async_read_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_read_some(basic_serial_port* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_read_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_serial_port* self_;\n  };\n\n#if defined(ASIO_HAS_IOCP)\n  detail::io_object_impl<detail::win_iocp_serial_port_service, Executor> impl_;\n#else\n  detail::io_object_impl<detail::reactive_serial_port_service, Executor> impl_;\n#endif\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_BASIC_SERIAL_PORT_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_signal_set.hpp",
    "content": "//\n// basic_signal_set.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SIGNAL_SET_HPP\n#define ASIO_BASIC_SIGNAL_SET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/signal_set_service.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n\nnamespace asio {\n\n/// Provides signal functionality.\n/**\n * The basic_signal_set class provides the ability to perform an asynchronous\n * wait for one or more signals to occur.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Example\n * Performing an asynchronous wait:\n * @code\n * void handler(\n *     const asio::error_code& error,\n *     int signal_number)\n * {\n *   if (!error)\n *   {\n *     // A signal occurred.\n *   }\n * }\n *\n * ...\n *\n * // Construct a signal set registered for process termination.\n * asio::signal_set signals(my_context, SIGINT, SIGTERM);\n *\n * // Start an asynchronous wait for one of the signals to occur.\n * signals.async_wait(handler);\n * @endcode\n *\n * @par Queueing of signal notifications\n *\n * If a signal is registered with a signal_set, and the signal occurs when\n * there are no waiting handlers, then the signal notification is queued. The\n * next async_wait operation on that signal_set will dequeue the notification.\n * If multiple notifications are queued, subsequent async_wait operations\n * dequeue them one at a time. Signal notifications are dequeued in order of\n * ascending signal number.\n *\n * If a signal number is removed from a signal_set (using the @c remove or @c\n * erase member functions) then any queued notifications for that signal are\n * discarded.\n *\n * @par Multiple registration of signals\n *\n * The same signal number may be registered with different signal_set objects.\n * When the signal occurs, one handler is called for each signal_set object.\n *\n * Note that multiple registration only works for signals that are registered\n * using Asio. The application must not also register a signal handler using\n * functions such as @c signal() or @c sigaction().\n *\n * @par Signal masking on POSIX platforms\n *\n * POSIX allows signals to be blocked using functions such as @c sigprocmask()\n * and @c pthread_sigmask(). For signals to be delivered, programs must ensure\n * that any signals registered using signal_set objects are unblocked in at\n * least one thread.\n */\ntemplate <typename Executor = executor>\nclass basic_signal_set\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the signal set type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The signal set type when rebound to the specified executor.\n    typedef basic_signal_set<Executor1> other;\n  };\n\n  /// Construct a signal set without adding any signals.\n  /**\n   * This constructor creates a signal set without registering for any signals.\n   *\n   * @param ex The I/O executor that the signal set will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * signal set.\n   */\n  explicit basic_signal_set(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct a signal set without adding any signals.\n  /**\n   * This constructor creates a signal set without registering for any signals.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the signal set will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the signal set.\n   */\n  template <typename ExecutionContext>\n  explicit basic_signal_set(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct a signal set and add one signal.\n  /**\n   * This constructor creates a signal set and registers for one signal.\n   *\n   * @param ex The I/O executor that the signal set will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * signal set.\n   *\n   * @param signal_number_1 The signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(ex);\n   * signals.add(signal_number_1); @endcode\n   */\n  basic_signal_set(const executor_type& ex, int signal_number_1)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Construct a signal set and add one signal.\n  /**\n   * This constructor creates a signal set and registers for one signal.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the signal set will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the signal set.\n   *\n   * @param signal_number_1 The signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(context);\n   * signals.add(signal_number_1); @endcode\n   */\n  template <typename ExecutionContext>\n  basic_signal_set(ExecutionContext& context, int signal_number_1,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Construct a signal set and add two signals.\n  /**\n   * This constructor creates a signal set and registers for two signals.\n   *\n   * @param ex The I/O executor that the signal set will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * signal set.\n   *\n   * @param signal_number_1 The first signal number to be added.\n   *\n   * @param signal_number_2 The second signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(ex);\n   * signals.add(signal_number_1);\n   * signals.add(signal_number_2); @endcode\n   */\n  basic_signal_set(const executor_type& ex, int signal_number_1,\n      int signal_number_2)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Construct a signal set and add two signals.\n  /**\n   * This constructor creates a signal set and registers for two signals.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the signal set will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the signal set.\n   *\n   * @param signal_number_1 The first signal number to be added.\n   *\n   * @param signal_number_2 The second signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(context);\n   * signals.add(signal_number_1);\n   * signals.add(signal_number_2); @endcode\n   */\n  template <typename ExecutionContext>\n  basic_signal_set(ExecutionContext& context, int signal_number_1,\n      int signal_number_2,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Construct a signal set and add three signals.\n  /**\n   * This constructor creates a signal set and registers for three signals.\n   *\n   * @param ex The I/O executor that the signal set will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * signal set.\n   *\n   * @param signal_number_1 The first signal number to be added.\n   *\n   * @param signal_number_2 The second signal number to be added.\n   *\n   * @param signal_number_3 The third signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(ex);\n   * signals.add(signal_number_1);\n   * signals.add(signal_number_2);\n   * signals.add(signal_number_3); @endcode\n   */\n  basic_signal_set(const executor_type& ex, int signal_number_1,\n      int signal_number_2, int signal_number_3)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_3, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Construct a signal set and add three signals.\n  /**\n   * This constructor creates a signal set and registers for three signals.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the signal set will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the signal set.\n   *\n   * @param signal_number_1 The first signal number to be added.\n   *\n   * @param signal_number_2 The second signal number to be added.\n   *\n   * @param signal_number_3 The third signal number to be added.\n   *\n   * @note This constructor is equivalent to performing:\n   * @code asio::signal_set signals(context);\n   * signals.add(signal_number_1);\n   * signals.add(signal_number_2);\n   * signals.add(signal_number_3); @endcode\n   */\n  template <typename ExecutionContext>\n  basic_signal_set(ExecutionContext& context, int signal_number_1,\n      int signal_number_2, int signal_number_3,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec);\n    asio::detail::throw_error(ec, \"add\");\n    impl_.get_service().add(impl_.get_implementation(), signal_number_3, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Destroys the signal set.\n  /**\n   * This function destroys the signal set, cancelling any outstanding\n   * asynchronous wait operations associated with the signal set as if by\n   * calling @c cancel.\n   */\n  ~basic_signal_set()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Add a signal to a signal_set.\n  /**\n   * This function adds the specified signal to the set. It has no effect if the\n   * signal is already in the set.\n   *\n   * @param signal_number The signal to be added to the set.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void add(int signal_number)\n  {\n    asio::error_code ec;\n    impl_.get_service().add(impl_.get_implementation(), signal_number, ec);\n    asio::detail::throw_error(ec, \"add\");\n  }\n\n  /// Add a signal to a signal_set.\n  /**\n   * This function adds the specified signal to the set. It has no effect if the\n   * signal is already in the set.\n   *\n   * @param signal_number The signal to be added to the set.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID add(int signal_number,\n      asio::error_code& ec)\n  {\n    impl_.get_service().add(impl_.get_implementation(), signal_number, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Remove a signal from a signal_set.\n  /**\n   * This function removes the specified signal from the set. It has no effect\n   * if the signal is not in the set.\n   *\n   * @param signal_number The signal to be removed from the set.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Removes any notifications that have been queued for the specified\n   * signal number.\n   */\n  void remove(int signal_number)\n  {\n    asio::error_code ec;\n    impl_.get_service().remove(impl_.get_implementation(), signal_number, ec);\n    asio::detail::throw_error(ec, \"remove\");\n  }\n\n  /// Remove a signal from a signal_set.\n  /**\n   * This function removes the specified signal from the set. It has no effect\n   * if the signal is not in the set.\n   *\n   * @param signal_number The signal to be removed from the set.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Removes any notifications that have been queued for the specified\n   * signal number.\n   */\n  ASIO_SYNC_OP_VOID remove(int signal_number,\n      asio::error_code& ec)\n  {\n    impl_.get_service().remove(impl_.get_implementation(), signal_number, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Remove all signals from a signal_set.\n  /**\n   * This function removes all signals from the set. It has no effect if the set\n   * is already empty.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Removes all queued notifications.\n   */\n  void clear()\n  {\n    asio::error_code ec;\n    impl_.get_service().clear(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"clear\");\n  }\n\n  /// Remove all signals from a signal_set.\n  /**\n   * This function removes all signals from the set. It has no effect if the set\n   * is already empty.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Removes all queued notifications.\n   */\n  ASIO_SYNC_OP_VOID clear(asio::error_code& ec)\n  {\n    impl_.get_service().clear(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Cancel all operations associated with the signal set.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the signal set. The handler for each cancelled\n   * operation will be invoked with the asio::error::operation_aborted\n   * error code.\n   *\n   * Cancellation does not alter the set of registered signals.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If a registered signal occurred before cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all operations associated with the signal set.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the signal set. The handler for each cancelled\n   * operation will be invoked with the asio::error::operation_aborted\n   * error code.\n   *\n   * Cancellation does not alter the set of registered signals.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note If a registered signal occurred before cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Start an asynchronous operation to wait for a signal to be delivered.\n  /**\n   * This function may be used to initiate an asynchronous wait against the\n   * signal set. It always returns immediately.\n   *\n   * For each call to async_wait(), the supplied handler will be called exactly\n   * once. The handler will be called when:\n   *\n   * @li One of the registered signals in the signal set occurs; or\n   *\n   * @li The signal set was cancelled, in which case the handler is passed the\n   * error code asio::error::operation_aborted.\n   *\n   * @param handler The handler to be called when the signal occurs. Copies\n   * will be made of the handler as required. The function signature of the\n   * handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   int signal_number // Indicates which signal occurred.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code, int))\n      SignalHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(SignalHandler,\n      void (asio::error_code, int))\n  async_wait(\n      ASIO_MOVE_ARG(SignalHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<SignalHandler, void (asio::error_code, int)>(\n        initiate_async_wait(this), handler);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_signal_set(const basic_signal_set&) ASIO_DELETED;\n  basic_signal_set& operator=(const basic_signal_set&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_signal_set* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename SignalHandler>\n    void operator()(ASIO_MOVE_ARG(SignalHandler) handler) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a SignalHandler.\n      ASIO_SIGNAL_HANDLER_CHECK(SignalHandler, handler) type_check;\n\n      detail::non_const_lvalue<SignalHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_signal_set* self_;\n  };\n\n  detail::io_object_impl<detail::signal_set_service, Executor> impl_;\n};\n\n} // namespace asio\n\n#endif // ASIO_BASIC_SIGNAL_SET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_socket.hpp",
    "content": "//\n// basic_socket.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SOCKET_HPP\n#define ASIO_BASIC_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/socket_base.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/null_socket_service.hpp\"\n#elif defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_socket_service.hpp\"\n#else\n# include \"asio/detail/reactive_socket_service.hpp\"\n#endif\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_SOCKET_FWD_DECL)\n#define ASIO_BASIC_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_socket;\n\n#endif // !defined(ASIO_BASIC_SOCKET_FWD_DECL)\n\n/// Provides socket functionality.\n/**\n * The basic_socket class template provides functionality that is common to both\n * stream-oriented and datagram-oriented sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_socket\n  : public socket_base\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the socket type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_socket<Protocol, Executor1> other;\n  };\n\n  /// The native representation of a socket.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#elif defined(ASIO_WINDOWS_RUNTIME)\n  typedef typename detail::null_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#elif defined(ASIO_HAS_IOCP)\n  typedef typename detail::win_iocp_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#else\n  typedef typename detail::reactive_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n#if !defined(ASIO_NO_EXTENSIONS)\n  /// A basic_socket is always the lowest layer.\n  typedef basic_socket<Protocol, Executor> lowest_layer_type;\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n  /// Construct a basic_socket without opening it.\n  /**\n   * This constructor creates a socket without opening it.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   */\n  explicit basic_socket(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct a basic_socket without opening it.\n  /**\n   * This constructor creates a socket without opening it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   */\n  template <typename ExecutionContext>\n  explicit basic_socket(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct and open a basic_socket.\n  /**\n   * This constructor creates and opens a socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_socket(const executor_type& ex, const protocol_type& protocol)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct and open a basic_socket.\n  /**\n   * This constructor creates and opens a socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_socket(ExecutionContext& context, const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct a basic_socket, opening it and binding it to the given local\n  /// endpoint.\n  /**\n   * This constructor creates a socket and automatically opens it bound to the\n   * specified endpoint on the local machine. The protocol used is the protocol\n   * associated with the given endpoint.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket will\n   * be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_socket(const executor_type& ex, const endpoint_type& endpoint)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    const protocol_type protocol = endpoint.protocol();\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n  }\n\n  /// Construct a basic_socket, opening it and binding it to the given local\n  /// endpoint.\n  /**\n   * This constructor creates a socket and automatically opens it bound to the\n   * specified endpoint on the local machine. The protocol used is the protocol\n   * associated with the given endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket will\n   * be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_socket(ExecutionContext& context, const endpoint_type& endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    const protocol_type protocol = endpoint.protocol();\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n  }\n\n  /// Construct a basic_socket on an existing native socket.\n  /**\n   * This constructor creates a socket object to hold an existing native socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket A native socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_socket(const executor_type& ex, const protocol_type& protocol,\n      const native_handle_type& native_socket)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_socket, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct a basic_socket on an existing native socket.\n  /**\n   * This constructor creates a socket object to hold an existing native socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket A native socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_socket(ExecutionContext& context, const protocol_type& protocol,\n      const native_handle_type& native_socket,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_socket, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_socket from another.\n  /**\n   * This constructor moves a socket from one object to another.\n   *\n   * @param other The other basic_socket object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket(const executor_type&) constructor.\n   */\n  basic_socket(basic_socket&& other) ASIO_NOEXCEPT\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_socket from another.\n  /**\n   * This assignment operator moves a socket from one object to another.\n   *\n   * @param other The other basic_socket object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket(const executor_type&) constructor.\n   */\n  basic_socket& operator=(basic_socket&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n\n  // All sockets have access to each other's implementations.\n  template <typename Protocol1, typename Executor1>\n  friend class basic_socket;\n\n  /// Move-construct a basic_socket from a socket of another protocol type.\n  /**\n   * This constructor moves a socket from one object to another.\n   *\n   * @param other The other basic_socket object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket(const executor_type&) constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_socket(basic_socket<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_socket from a socket of another protocol type.\n  /**\n   * This assignment operator moves a socket from one object to another.\n   *\n   * @param other The other basic_socket object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket(const executor_type&) constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_socket&\n  >::type operator=(basic_socket<Protocol1, Executor1> && other)\n  {\n    basic_socket tmp(std::move(other));\n    impl_ = std::move(tmp.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n#if !defined(ASIO_NO_EXTENSIONS)\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * layers. Since a basic_socket cannot contain any further layers, it simply\n   * returns a reference to itself.\n   *\n   * @return A reference to the lowest layer in the stack of layers. Ownership\n   * is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return *this;\n  }\n\n  /// Get a const reference to the lowest layer.\n  /**\n   * This function returns a const reference to the lowest layer in a stack of\n   * layers. Since a basic_socket cannot contain any further layers, it simply\n   * returns a reference to itself.\n   *\n   * @return A const reference to the lowest layer in the stack of layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return *this;\n  }\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n  /// Open the socket using the specified protocol.\n  /**\n   * This function opens the socket so that it will use the specified protocol.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * socket.open(asio::ip::tcp::v4());\n   * @endcode\n   */\n  void open(const protocol_type& protocol = protocol_type())\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Open the socket using the specified protocol.\n  /**\n   * This function opens the socket so that it will use the specified protocol.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::error_code ec;\n   * socket.open(asio::ip::tcp::v4(), ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID open(const protocol_type& protocol,\n      asio::error_code& ec)\n  {\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Assign an existing native socket to the socket.\n  /*\n   * This function opens the socket to hold an existing native socket.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param native_socket A native socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const protocol_type& protocol,\n      const native_handle_type& native_socket)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_socket, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assign an existing native socket to the socket.\n  /*\n   * This function opens the socket to hold an existing native socket.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param native_socket A native socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const protocol_type& protocol,\n      const native_handle_type& native_socket, asio::error_code& ec)\n  {\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_socket, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the socket is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Close the socket.\n  /**\n   * This function is used to close the socket. Any asynchronous send, receive\n   * or connect operations will be cancelled immediately, and will complete\n   * with the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure. Note that, even if\n   * the function indicates an error, the underlying descriptor is closed.\n   *\n   * @note For portable behaviour with respect to graceful closure of a\n   * connected socket, call shutdown() before closing the socket.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the socket.\n  /**\n   * This function is used to close the socket. Any asynchronous send, receive\n   * or connect operations will be cancelled immediately, and will complete\n   * with the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any. Note that, even if\n   * the function indicates an error, the underlying descriptor is closed.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::error_code ec;\n   * socket.close(ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   *\n   * @note For portable behaviour with respect to graceful closure of a\n   * connected socket, call shutdown() before closing the socket.\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Release ownership of the underlying native socket.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error. Ownership\n   * of the native socket is then transferred to the caller.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note This function is unsupported on Windows versions prior to Windows\n   * 8.1, and will fail with asio::error::operation_not_supported on\n   * these platforms.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603)\n  __declspec(deprecated(\"This function always fails with \"\n        \"operation_not_supported when used on Windows versions \"\n        \"prior to Windows 8.1.\"))\n#endif\n  native_handle_type release()\n  {\n    asio::error_code ec;\n    native_handle_type s = impl_.get_service().release(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"release\");\n    return s;\n  }\n\n  /// Release ownership of the underlying native socket.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error. Ownership\n   * of the native socket is then transferred to the caller.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note This function is unsupported on Windows versions prior to Windows\n   * 8.1, and will fail with asio::error::operation_not_supported on\n   * these platforms.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603)\n  __declspec(deprecated(\"This function always fails with \"\n        \"operation_not_supported when used on Windows versions \"\n        \"prior to Windows 8.1.\"))\n#endif\n  native_handle_type release(asio::error_code& ec)\n  {\n    return impl_.get_service().release(impl_.get_implementation(), ec);\n  }\n\n  /// Get the native socket representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * socket. This is intended to allow access to native socket functionality\n   * that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the socket.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls to cancel() will always fail with\n   * asio::error::operation_not_supported when run on Windows XP, Windows\n   * Server 2003, and earlier versions of Windows, unless\n   * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has\n   * two issues that should be considered before enabling its use:\n   *\n   * @li It will only cancel asynchronous operations that were initiated in the\n   * current thread.\n   *\n   * @li It can appear to complete without error, but the request to cancel the\n   * unfinished operations may be silently ignored by the operating system.\n   * Whether it works or not seems to depend on the drivers that are installed.\n   *\n   * For portable cancellation, consider using one of the following\n   * alternatives:\n   *\n   * @li Disable asio's I/O completion port backend by defining\n   * ASIO_DISABLE_IOCP.\n   *\n   * @li Use the close() function to simultaneously cancel the outstanding\n   * operations and close the socket.\n   *\n   * When running on Windows Vista, Windows Server 2008, and later, the\n   * CancelIoEx function is always used. This function does not have the\n   * problems described above.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \\\n  && !defined(ASIO_ENABLE_CANCELIO)\n  __declspec(deprecated(\"By default, this function always fails with \"\n        \"operation_not_supported when used on Windows XP, Windows Server 2003, \"\n        \"or earlier. Consult documentation for details.\"))\n#endif\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the socket.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls to cancel() will always fail with\n   * asio::error::operation_not_supported when run on Windows XP, Windows\n   * Server 2003, and earlier versions of Windows, unless\n   * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has\n   * two issues that should be considered before enabling its use:\n   *\n   * @li It will only cancel asynchronous operations that were initiated in the\n   * current thread.\n   *\n   * @li It can appear to complete without error, but the request to cancel the\n   * unfinished operations may be silently ignored by the operating system.\n   * Whether it works or not seems to depend on the drivers that are installed.\n   *\n   * For portable cancellation, consider using one of the following\n   * alternatives:\n   *\n   * @li Disable asio's I/O completion port backend by defining\n   * ASIO_DISABLE_IOCP.\n   *\n   * @li Use the close() function to simultaneously cancel the outstanding\n   * operations and close the socket.\n   *\n   * When running on Windows Vista, Windows Server 2008, and later, the\n   * CancelIoEx function is always used. This function does not have the\n   * problems described above.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \\\n  && !defined(ASIO_ENABLE_CANCELIO)\n  __declspec(deprecated(\"By default, this function always fails with \"\n        \"operation_not_supported when used on Windows XP, Windows Server 2003, \"\n        \"or earlier. Consult documentation for details.\"))\n#endif\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the socket is at the out-of-band data mark.\n  /**\n   * This function is used to check whether the socket input is currently\n   * positioned at the out-of-band data mark.\n   *\n   * @return A bool indicating whether the socket is at the out-of-band data\n   * mark.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  bool at_mark() const\n  {\n    asio::error_code ec;\n    bool b = impl_.get_service().at_mark(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"at_mark\");\n    return b;\n  }\n\n  /// Determine whether the socket is at the out-of-band data mark.\n  /**\n   * This function is used to check whether the socket input is currently\n   * positioned at the out-of-band data mark.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return A bool indicating whether the socket is at the out-of-band data\n   * mark.\n   */\n  bool at_mark(asio::error_code& ec) const\n  {\n    return impl_.get_service().at_mark(impl_.get_implementation(), ec);\n  }\n\n  /// Determine the number of bytes available for reading.\n  /**\n   * This function is used to determine the number of bytes that may be read\n   * without blocking.\n   *\n   * @return The number of bytes that may be read without blocking, or 0 if an\n   * error occurs.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  std::size_t available() const\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().available(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"available\");\n    return s;\n  }\n\n  /// Determine the number of bytes available for reading.\n  /**\n   * This function is used to determine the number of bytes that may be read\n   * without blocking.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of bytes that may be read without blocking, or 0 if an\n   * error occurs.\n   */\n  std::size_t available(asio::error_code& ec) const\n  {\n    return impl_.get_service().available(impl_.get_implementation(), ec);\n  }\n\n  /// Bind the socket to the given local endpoint.\n  /**\n   * This function binds the socket to the specified endpoint on the local\n   * machine.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket will\n   * be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * socket.open(asio::ip::tcp::v4());\n   * socket.bind(asio::ip::tcp::endpoint(\n   *       asio::ip::tcp::v4(), 12345));\n   * @endcode\n   */\n  void bind(const endpoint_type& endpoint)\n  {\n    asio::error_code ec;\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n  }\n\n  /// Bind the socket to the given local endpoint.\n  /**\n   * This function binds the socket to the specified endpoint on the local\n   * machine.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket will\n   * be bound.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * socket.open(asio::ip::tcp::v4());\n   * asio::error_code ec;\n   * socket.bind(asio::ip::tcp::endpoint(\n   *       asio::ip::tcp::v4(), 12345), ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID bind(const endpoint_type& endpoint,\n      asio::error_code& ec)\n  {\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Connect the socket to the specified endpoint.\n  /**\n   * This function is used to connect a socket to the specified remote endpoint.\n   * The function call will block until the connection is successfully made or\n   * an error occurs.\n   *\n   * The socket is automatically opened if it is not already open. If the\n   * connect fails, and the socket was automatically opened, the socket is\n   * not returned to the closed state.\n   *\n   * @param peer_endpoint The remote endpoint to which the socket will be\n   * connected.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::ip::tcp::endpoint endpoint(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.connect(endpoint);\n   * @endcode\n   */\n  void connect(const endpoint_type& peer_endpoint)\n  {\n    asio::error_code ec;\n    if (!is_open())\n    {\n      impl_.get_service().open(impl_.get_implementation(),\n          peer_endpoint.protocol(), ec);\n      asio::detail::throw_error(ec, \"connect\");\n    }\n    impl_.get_service().connect(impl_.get_implementation(), peer_endpoint, ec);\n    asio::detail::throw_error(ec, \"connect\");\n  }\n\n  /// Connect the socket to the specified endpoint.\n  /**\n   * This function is used to connect a socket to the specified remote endpoint.\n   * The function call will block until the connection is successfully made or\n   * an error occurs.\n   *\n   * The socket is automatically opened if it is not already open. If the\n   * connect fails, and the socket was automatically opened, the socket is\n   * not returned to the closed state.\n   *\n   * @param peer_endpoint The remote endpoint to which the socket will be\n   * connected.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::ip::tcp::endpoint endpoint(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * asio::error_code ec;\n   * socket.connect(endpoint, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID connect(const endpoint_type& peer_endpoint,\n      asio::error_code& ec)\n  {\n    if (!is_open())\n    {\n      impl_.get_service().open(impl_.get_implementation(),\n            peer_endpoint.protocol(), ec);\n      if (ec)\n      {\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n\n    impl_.get_service().connect(impl_.get_implementation(), peer_endpoint, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Start an asynchronous connect.\n  /**\n   * This function is used to asynchronously connect a socket to the specified\n   * remote endpoint. The function call always returns immediately.\n   *\n   * The socket is automatically opened if it is not already open. If the\n   * connect fails, and the socket was automatically opened, the socket is\n   * not returned to the closed state.\n   *\n   * @param peer_endpoint The remote endpoint to which the socket will be\n   * connected. Copies will be made of the endpoint object as required.\n   *\n   * @param handler The handler to be called when the connection operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void connect_handler(const asio::error_code& error)\n   * {\n   *   if (!error)\n   *   {\n   *     // Connect succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::ip::tcp::endpoint endpoint(\n   *     asio::ip::address::from_string(\"1.2.3.4\"), 12345);\n   * socket.async_connect(endpoint, connect_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        ConnectHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ConnectHandler,\n      void (asio::error_code))\n  async_connect(const endpoint_type& peer_endpoint,\n      ASIO_MOVE_ARG(ConnectHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    asio::error_code open_ec;\n    if (!is_open())\n    {\n      const protocol_type protocol = peer_endpoint.protocol();\n      impl_.get_service().open(impl_.get_implementation(), protocol, open_ec);\n    }\n\n    return async_initiate<ConnectHandler, void (asio::error_code)>(\n        initiate_async_connect(this), handler, peer_endpoint, open_ec);\n  }\n\n  /// Set an option on the socket.\n  /**\n   * This function is used to set an option on the socket.\n   *\n   * @param option The new option value to be set on the socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa SettableSocketOption @n\n   * asio::socket_base::broadcast @n\n   * asio::socket_base::do_not_route @n\n   * asio::socket_base::keep_alive @n\n   * asio::socket_base::linger @n\n   * asio::socket_base::receive_buffer_size @n\n   * asio::socket_base::receive_low_watermark @n\n   * asio::socket_base::reuse_address @n\n   * asio::socket_base::send_buffer_size @n\n   * asio::socket_base::send_low_watermark @n\n   * asio::ip::multicast::join_group @n\n   * asio::ip::multicast::leave_group @n\n   * asio::ip::multicast::enable_loopback @n\n   * asio::ip::multicast::outbound_interface @n\n   * asio::ip::multicast::hops @n\n   * asio::ip::tcp::no_delay\n   *\n   * @par Example\n   * Setting the IPPROTO_TCP/TCP_NODELAY option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::no_delay option(true);\n   * socket.set_option(option);\n   * @endcode\n   */\n  template <typename SettableSocketOption>\n  void set_option(const SettableSocketOption& option)\n  {\n    asio::error_code ec;\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"set_option\");\n  }\n\n  /// Set an option on the socket.\n  /**\n   * This function is used to set an option on the socket.\n   *\n   * @param option The new option value to be set on the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa SettableSocketOption @n\n   * asio::socket_base::broadcast @n\n   * asio::socket_base::do_not_route @n\n   * asio::socket_base::keep_alive @n\n   * asio::socket_base::linger @n\n   * asio::socket_base::receive_buffer_size @n\n   * asio::socket_base::receive_low_watermark @n\n   * asio::socket_base::reuse_address @n\n   * asio::socket_base::send_buffer_size @n\n   * asio::socket_base::send_low_watermark @n\n   * asio::ip::multicast::join_group @n\n   * asio::ip::multicast::leave_group @n\n   * asio::ip::multicast::enable_loopback @n\n   * asio::ip::multicast::outbound_interface @n\n   * asio::ip::multicast::hops @n\n   * asio::ip::tcp::no_delay\n   *\n   * @par Example\n   * Setting the IPPROTO_TCP/TCP_NODELAY option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::no_delay option(true);\n   * asio::error_code ec;\n   * socket.set_option(option, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename SettableSocketOption>\n  ASIO_SYNC_OP_VOID set_option(const SettableSocketOption& option,\n      asio::error_code& ec)\n  {\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get an option from the socket.\n  /**\n   * This function is used to get the current value of an option on the socket.\n   *\n   * @param option The option value to be obtained from the socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa GettableSocketOption @n\n   * asio::socket_base::broadcast @n\n   * asio::socket_base::do_not_route @n\n   * asio::socket_base::keep_alive @n\n   * asio::socket_base::linger @n\n   * asio::socket_base::receive_buffer_size @n\n   * asio::socket_base::receive_low_watermark @n\n   * asio::socket_base::reuse_address @n\n   * asio::socket_base::send_buffer_size @n\n   * asio::socket_base::send_low_watermark @n\n   * asio::ip::multicast::join_group @n\n   * asio::ip::multicast::leave_group @n\n   * asio::ip::multicast::enable_loopback @n\n   * asio::ip::multicast::outbound_interface @n\n   * asio::ip::multicast::hops @n\n   * asio::ip::tcp::no_delay\n   *\n   * @par Example\n   * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::socket::keep_alive option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   */\n  template <typename GettableSocketOption>\n  void get_option(GettableSocketOption& option) const\n  {\n    asio::error_code ec;\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"get_option\");\n  }\n\n  /// Get an option from the socket.\n  /**\n   * This function is used to get the current value of an option on the socket.\n   *\n   * @param option The option value to be obtained from the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa GettableSocketOption @n\n   * asio::socket_base::broadcast @n\n   * asio::socket_base::do_not_route @n\n   * asio::socket_base::keep_alive @n\n   * asio::socket_base::linger @n\n   * asio::socket_base::receive_buffer_size @n\n   * asio::socket_base::receive_low_watermark @n\n   * asio::socket_base::reuse_address @n\n   * asio::socket_base::send_buffer_size @n\n   * asio::socket_base::send_low_watermark @n\n   * asio::ip::multicast::join_group @n\n   * asio::ip::multicast::leave_group @n\n   * asio::ip::multicast::enable_loopback @n\n   * asio::ip::multicast::outbound_interface @n\n   * asio::ip::multicast::hops @n\n   * asio::ip::tcp::no_delay\n   *\n   * @par Example\n   * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::socket::keep_alive option;\n   * asio::error_code ec;\n   * socket.get_option(option, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * bool is_set = option.value();\n   * @endcode\n   */\n  template <typename GettableSocketOption>\n  ASIO_SYNC_OP_VOID get_option(GettableSocketOption& option,\n      asio::error_code& ec) const\n  {\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform an IO control command on the socket.\n  /**\n   * This function is used to execute an IO control command on the socket.\n   *\n   * @param command The IO control command to be performed on the socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa IoControlCommand @n\n   * asio::socket_base::bytes_readable @n\n   * asio::socket_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::socket::bytes_readable command;\n   * socket.io_control(command);\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  void io_control(IoControlCommand& command)\n  {\n    asio::error_code ec;\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    asio::detail::throw_error(ec, \"io_control\");\n  }\n\n  /// Perform an IO control command on the socket.\n  /**\n   * This function is used to execute an IO control command on the socket.\n   *\n   * @param command The IO control command to be performed on the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa IoControlCommand @n\n   * asio::socket_base::bytes_readable @n\n   * asio::socket_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::socket::bytes_readable command;\n   * asio::error_code ec;\n   * socket.io_control(command, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  ASIO_SYNC_OP_VOID io_control(IoControlCommand& command,\n      asio::error_code& ec)\n  {\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the socket.\n  /**\n   * @returns @c true if the socket's synchronous operations will fail with\n   * asio::error::would_block if they are unable to perform the requested\n   * operation immediately. If @c false, synchronous operations will block\n   * until complete.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  bool non_blocking() const\n  {\n    return impl_.get_service().non_blocking(impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the socket.\n  /**\n   * @param mode If @c true, the socket's synchronous operations will fail with\n   * asio::error::would_block if they are unable to perform the requested\n   * operation immediately. If @c false, synchronous operations will block\n   * until complete.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  void non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the socket.\n  /**\n   * @param mode If @c true, the socket's synchronous operations will fail with\n   * asio::error::would_block if they are unable to perform the requested\n   * operation immediately. If @c false, synchronous operations will block\n   * until complete.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  ASIO_SYNC_OP_VOID non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the native socket implementation.\n  /**\n   * This function is used to retrieve the non-blocking mode of the underlying\n   * native socket. This mode has no effect on the behaviour of the socket\n   * object's synchronous operations.\n   *\n   * @returns @c true if the underlying socket is in non-blocking mode and\n   * direct system calls may fail with asio::error::would_block (or the\n   * equivalent system error).\n   *\n   * @note The current non-blocking mode is cached by the socket object.\n   * Consequently, the return value may be incorrect if the non-blocking mode\n   * was set directly on the native socket.\n   *\n   * @par Example\n   * This function is intended to allow the encapsulation of arbitrary\n   * non-blocking system calls as asynchronous operations, in a way that is\n   * transparent to the user of the socket object. The following example\n   * illustrates how Linux's @c sendfile system call might be encapsulated:\n   * @code template <typename Handler>\n   * struct sendfile_op\n   * {\n   *   tcp::socket& sock_;\n   *   int fd_;\n   *   Handler handler_;\n   *   off_t offset_;\n   *   std::size_t total_bytes_transferred_;\n   *\n   *   // Function call operator meeting WriteHandler requirements.\n   *   // Used as the handler for the async_write_some operation.\n   *   void operator()(asio::error_code ec, std::size_t)\n   *   {\n   *     // Put the underlying socket into non-blocking mode.\n   *     if (!ec)\n   *       if (!sock_.native_non_blocking())\n   *         sock_.native_non_blocking(true, ec);\n   *\n   *     if (!ec)\n   *     {\n   *       for (;;)\n   *       {\n   *         // Try the system call.\n   *         errno = 0;\n   *         int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);\n   *         ec = asio::error_code(n < 0 ? errno : 0,\n   *             asio::error::get_system_category());\n   *         total_bytes_transferred_ += ec ? 0 : n;\n   *\n   *         // Retry operation immediately if interrupted by signal.\n   *         if (ec == asio::error::interrupted)\n   *           continue;\n   *\n   *         // Check if we need to run the operation again.\n   *         if (ec == asio::error::would_block\n   *             || ec == asio::error::try_again)\n   *         {\n   *           // We have to wait for the socket to become ready again.\n   *           sock_.async_wait(tcp::socket::wait_write, *this);\n   *           return;\n   *         }\n   *\n   *         if (ec || n == 0)\n   *         {\n   *           // An error occurred, or we have reached the end of the file.\n   *           // Either way we must exit the loop so we can call the handler.\n   *           break;\n   *         }\n   *\n   *         // Loop around to try calling sendfile again.\n   *       }\n   *     }\n   *\n   *     // Pass result back to user's handler.\n   *     handler_(ec, total_bytes_transferred_);\n   *   }\n   * };\n   *\n   * template <typename Handler>\n   * void async_sendfile(tcp::socket& sock, int fd, Handler h)\n   * {\n   *   sendfile_op<Handler> op = { sock, fd, h, 0, 0 };\n   *   sock.async_wait(tcp::socket::wait_write, op);\n   * } @endcode\n   */\n  bool native_non_blocking() const\n  {\n    return impl_.get_service().native_non_blocking(impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the native socket implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native socket. It has no effect on the behaviour of the socket object's\n   * synchronous operations.\n   *\n   * @param mode If @c true, the underlying socket is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @throws asio::system_error Thrown on failure. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   *\n   * @par Example\n   * This function is intended to allow the encapsulation of arbitrary\n   * non-blocking system calls as asynchronous operations, in a way that is\n   * transparent to the user of the socket object. The following example\n   * illustrates how Linux's @c sendfile system call might be encapsulated:\n   * @code template <typename Handler>\n   * struct sendfile_op\n   * {\n   *   tcp::socket& sock_;\n   *   int fd_;\n   *   Handler handler_;\n   *   off_t offset_;\n   *   std::size_t total_bytes_transferred_;\n   *\n   *   // Function call operator meeting WriteHandler requirements.\n   *   // Used as the handler for the async_write_some operation.\n   *   void operator()(asio::error_code ec, std::size_t)\n   *   {\n   *     // Put the underlying socket into non-blocking mode.\n   *     if (!ec)\n   *       if (!sock_.native_non_blocking())\n   *         sock_.native_non_blocking(true, ec);\n   *\n   *     if (!ec)\n   *     {\n   *       for (;;)\n   *       {\n   *         // Try the system call.\n   *         errno = 0;\n   *         int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);\n   *         ec = asio::error_code(n < 0 ? errno : 0,\n   *             asio::error::get_system_category());\n   *         total_bytes_transferred_ += ec ? 0 : n;\n   *\n   *         // Retry operation immediately if interrupted by signal.\n   *         if (ec == asio::error::interrupted)\n   *           continue;\n   *\n   *         // Check if we need to run the operation again.\n   *         if (ec == asio::error::would_block\n   *             || ec == asio::error::try_again)\n   *         {\n   *           // We have to wait for the socket to become ready again.\n   *           sock_.async_wait(tcp::socket::wait_write, *this);\n   *           return;\n   *         }\n   *\n   *         if (ec || n == 0)\n   *         {\n   *           // An error occurred, or we have reached the end of the file.\n   *           // Either way we must exit the loop so we can call the handler.\n   *           break;\n   *         }\n   *\n   *         // Loop around to try calling sendfile again.\n   *       }\n   *     }\n   *\n   *     // Pass result back to user's handler.\n   *     handler_(ec, total_bytes_transferred_);\n   *   }\n   * };\n   *\n   * template <typename Handler>\n   * void async_sendfile(tcp::socket& sock, int fd, Handler h)\n   * {\n   *   sendfile_op<Handler> op = { sock, fd, h, 0, 0 };\n   *   sock.async_wait(tcp::socket::wait_write, op);\n   * } @endcode\n   */\n  void native_non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"native_non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the native socket implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native socket. It has no effect on the behaviour of the socket object's\n   * synchronous operations.\n   *\n   * @param mode If @c true, the underlying socket is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @param ec Set to indicate what error occurred, if any. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   *\n   * @par Example\n   * This function is intended to allow the encapsulation of arbitrary\n   * non-blocking system calls as asynchronous operations, in a way that is\n   * transparent to the user of the socket object. The following example\n   * illustrates how Linux's @c sendfile system call might be encapsulated:\n   * @code template <typename Handler>\n   * struct sendfile_op\n   * {\n   *   tcp::socket& sock_;\n   *   int fd_;\n   *   Handler handler_;\n   *   off_t offset_;\n   *   std::size_t total_bytes_transferred_;\n   *\n   *   // Function call operator meeting WriteHandler requirements.\n   *   // Used as the handler for the async_write_some operation.\n   *   void operator()(asio::error_code ec, std::size_t)\n   *   {\n   *     // Put the underlying socket into non-blocking mode.\n   *     if (!ec)\n   *       if (!sock_.native_non_blocking())\n   *         sock_.native_non_blocking(true, ec);\n   *\n   *     if (!ec)\n   *     {\n   *       for (;;)\n   *       {\n   *         // Try the system call.\n   *         errno = 0;\n   *         int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);\n   *         ec = asio::error_code(n < 0 ? errno : 0,\n   *             asio::error::get_system_category());\n   *         total_bytes_transferred_ += ec ? 0 : n;\n   *\n   *         // Retry operation immediately if interrupted by signal.\n   *         if (ec == asio::error::interrupted)\n   *           continue;\n   *\n   *         // Check if we need to run the operation again.\n   *         if (ec == asio::error::would_block\n   *             || ec == asio::error::try_again)\n   *         {\n   *           // We have to wait for the socket to become ready again.\n   *           sock_.async_wait(tcp::socket::wait_write, *this);\n   *           return;\n   *         }\n   *\n   *         if (ec || n == 0)\n   *         {\n   *           // An error occurred, or we have reached the end of the file.\n   *           // Either way we must exit the loop so we can call the handler.\n   *           break;\n   *         }\n   *\n   *         // Loop around to try calling sendfile again.\n   *       }\n   *     }\n   *\n   *     // Pass result back to user's handler.\n   *     handler_(ec, total_bytes_transferred_);\n   *   }\n   * };\n   *\n   * template <typename Handler>\n   * void async_sendfile(tcp::socket& sock, int fd, Handler h)\n   * {\n   *   sendfile_op<Handler> op = { sock, fd, h, 0, 0 };\n   *   sock.async_wait(tcp::socket::wait_write, op);\n   * } @endcode\n   */\n  ASIO_SYNC_OP_VOID native_non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the local endpoint of the socket.\n  /**\n   * This function is used to obtain the locally bound endpoint of the socket.\n   *\n   * @returns An object that represents the local endpoint of the socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint = socket.local_endpoint();\n   * @endcode\n   */\n  endpoint_type local_endpoint() const\n  {\n    asio::error_code ec;\n    endpoint_type ep = impl_.get_service().local_endpoint(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"local_endpoint\");\n    return ep;\n  }\n\n  /// Get the local endpoint of the socket.\n  /**\n   * This function is used to obtain the locally bound endpoint of the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns An object that represents the local endpoint of the socket.\n   * Returns a default-constructed endpoint object if an error occurred.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::error_code ec;\n   * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  endpoint_type local_endpoint(asio::error_code& ec) const\n  {\n    return impl_.get_service().local_endpoint(impl_.get_implementation(), ec);\n  }\n\n  /// Get the remote endpoint of the socket.\n  /**\n   * This function is used to obtain the remote endpoint of the socket.\n   *\n   * @returns An object that represents the remote endpoint of the socket.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint();\n   * @endcode\n   */\n  endpoint_type remote_endpoint() const\n  {\n    asio::error_code ec;\n    endpoint_type ep = impl_.get_service().remote_endpoint(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"remote_endpoint\");\n    return ep;\n  }\n\n  /// Get the remote endpoint of the socket.\n  /**\n   * This function is used to obtain the remote endpoint of the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns An object that represents the remote endpoint of the socket.\n   * Returns a default-constructed endpoint object if an error occurred.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::error_code ec;\n   * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  endpoint_type remote_endpoint(asio::error_code& ec) const\n  {\n    return impl_.get_service().remote_endpoint(impl_.get_implementation(), ec);\n  }\n\n  /// Disable sends or receives on the socket.\n  /**\n   * This function is used to disable send operations, receive operations, or\n   * both.\n   *\n   * @param what Determines what types of operation will no longer be allowed.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * Shutting down the send side of the socket:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * socket.shutdown(asio::ip::tcp::socket::shutdown_send);\n   * @endcode\n   */\n  void shutdown(shutdown_type what)\n  {\n    asio::error_code ec;\n    impl_.get_service().shutdown(impl_.get_implementation(), what, ec);\n    asio::detail::throw_error(ec, \"shutdown\");\n  }\n\n  /// Disable sends or receives on the socket.\n  /**\n   * This function is used to disable send operations, receive operations, or\n   * both.\n   *\n   * @param what Determines what types of operation will no longer be allowed.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * Shutting down the send side of the socket:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::error_code ec;\n   * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID shutdown(shutdown_type what,\n      asio::error_code& ec)\n  {\n    impl_.get_service().shutdown(impl_.get_implementation(), what, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Wait for the socket to become ready to read, ready to write, or to have\n  /// pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for a socket to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired socket state.\n   *\n   * @par Example\n   * Waiting for a socket to become readable.\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * socket.wait(asio::ip::tcp::socket::wait_read);\n   * @endcode\n   */\n  void wait(wait_type w)\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Wait for the socket to become ready to read, ready to write, or to have\n  /// pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for a socket to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired socket state.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * Waiting for a socket to become readable.\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::error_code ec;\n   * socket.wait(asio::ip::tcp::socket::wait_read, ec);\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Asynchronously wait for the socket to become ready to read, ready to\n  /// write, or to have pending error conditions.\n  /**\n   * This function is used to perform an asynchronous wait for a socket to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired socket state.\n   *\n   * @param handler The handler to be called when the wait operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void wait_handler(const asio::error_code& error)\n   * {\n   *   if (!error)\n   *   {\n   *     // Wait succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * socket.async_wait(asio::ip::tcp::socket::wait_read, wait_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(wait_type w,\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler, w);\n  }\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  /**\n   * This function destroys the socket, cancelling any outstanding asynchronous\n   * operations associated with the socket as if by calling @c cancel.\n   */\n  ~basic_socket()\n  {\n  }\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n  detail::io_object_impl<\n    detail::null_socket_service<Protocol>, Executor> impl_;\n#elif defined(ASIO_HAS_IOCP)\n  detail::io_object_impl<\n    detail::win_iocp_socket_service<Protocol>, Executor> impl_;\n#else\n  detail::io_object_impl<\n    detail::reactive_socket_service<Protocol>, Executor> impl_;\n#endif\n\nprivate:\n  // Disallow copying and assignment.\n  basic_socket(const basic_socket&) ASIO_DELETED;\n  basic_socket& operator=(const basic_socket&) ASIO_DELETED;\n\n  class initiate_async_connect\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_connect(basic_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ConnectHandler>\n    void operator()(ASIO_MOVE_ARG(ConnectHandler) handler,\n        const endpoint_type& peer_endpoint,\n        const asio::error_code& open_ec) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ConnectHandler.\n      ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check;\n\n      if (open_ec)\n      {\n          asio::post(self_->impl_.get_executor(),\n              asio::detail::bind_handler(\n                ASIO_MOVE_CAST(ConnectHandler)(handler), open_ec));\n      }\n      else\n      {\n        detail::non_const_lvalue<ConnectHandler> handler2(handler);\n        self_->impl_.get_service().async_connect(\n            self_->impl_.get_implementation(), peer_endpoint,\n            handler2.value, self_->impl_.get_implementation_executor());\n      }\n    }\n\n  private:\n    basic_socket* self_;\n  };\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler, wait_type w) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), w, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_socket* self_;\n  };\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_socket_acceptor.hpp",
    "content": "//\n// basic_socket_acceptor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SOCKET_ACCEPTOR_HPP\n#define ASIO_BASIC_SOCKET_ACCEPTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/socket_base.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/null_socket_service.hpp\"\n#elif defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_socket_service.hpp\"\n#else\n# include \"asio/detail/reactive_socket_service.hpp\"\n#endif\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL)\n#define ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_socket_acceptor;\n\n#endif // !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL)\n\n/// Provides the ability to accept new connections.\n/**\n * The basic_socket_acceptor class template is used for accepting new socket\n * connections.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Example\n * Opening a socket acceptor with the SO_REUSEADDR option enabled:\n * @code\n * asio::ip::tcp::acceptor acceptor(my_context);\n * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), port);\n * acceptor.open(endpoint.protocol());\n * acceptor.set_option(asio::ip::tcp::acceptor::reuse_address(true));\n * acceptor.bind(endpoint);\n * acceptor.listen();\n * @endcode\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_socket_acceptor\n  : public socket_base\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the acceptor type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_socket_acceptor<Protocol, Executor1> other;\n  };\n\n  /// The native representation of an acceptor.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#elif defined(ASIO_WINDOWS_RUNTIME)\n  typedef typename detail::null_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#elif defined(ASIO_HAS_IOCP)\n  typedef typename detail::win_iocp_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#else\n  typedef typename detail::reactive_socket_service<\n    Protocol>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// Construct an acceptor without opening it.\n  /**\n   * This constructor creates an acceptor without opening it to listen for new\n   * connections. The open() function must be called before the acceptor can\n   * accept new socket connections.\n   *\n   * @param ex The I/O executor that the acceptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * acceptor.\n   */\n  explicit basic_socket_acceptor(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct an acceptor without opening it.\n  /**\n   * This constructor creates an acceptor without opening it to listen for new\n   * connections. The open() function must be called before the acceptor can\n   * accept new socket connections.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the acceptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the acceptor.\n   */\n  template <typename ExecutionContext>\n  explicit basic_socket_acceptor(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct an open acceptor.\n  /**\n   * This constructor creates an acceptor and automatically opens it.\n   *\n   * @param ex The I/O executor that the acceptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * acceptor.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_socket_acceptor(const executor_type& ex, const protocol_type& protocol)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct an open acceptor.\n  /**\n   * This constructor creates an acceptor and automatically opens it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the acceptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the acceptor.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_socket_acceptor(ExecutionContext& context,\n      const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Construct an acceptor opened on the given endpoint.\n  /**\n   * This constructor creates an acceptor and automatically opens it to listen\n   * for new connections on the specified endpoint.\n   *\n   * @param ex The I/O executor that the acceptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * acceptor.\n   *\n   * @param endpoint An endpoint on the local machine on which the acceptor\n   * will listen for new connections.\n   *\n   * @param reuse_addr Whether the constructor should set the socket option\n   * socket_base::reuse_address.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note This constructor is equivalent to the following code:\n   * @code\n   * basic_socket_acceptor<Protocol> acceptor(my_context);\n   * acceptor.open(endpoint.protocol());\n   * if (reuse_addr)\n   *   acceptor.set_option(socket_base::reuse_address(true));\n   * acceptor.bind(endpoint);\n   * acceptor.listen();\n   * @endcode\n   */\n  basic_socket_acceptor(const executor_type& ex,\n      const endpoint_type& endpoint, bool reuse_addr = true)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    const protocol_type protocol = endpoint.protocol();\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n    if (reuse_addr)\n    {\n      impl_.get_service().set_option(impl_.get_implementation(),\n          socket_base::reuse_address(true), ec);\n      asio::detail::throw_error(ec, \"set_option\");\n    }\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n    impl_.get_service().listen(impl_.get_implementation(),\n        socket_base::max_listen_connections, ec);\n    asio::detail::throw_error(ec, \"listen\");\n  }\n\n  /// Construct an acceptor opened on the given endpoint.\n  /**\n   * This constructor creates an acceptor and automatically opens it to listen\n   * for new connections on the specified endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the acceptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the acceptor.\n   *\n   * @param endpoint An endpoint on the local machine on which the acceptor\n   * will listen for new connections.\n   *\n   * @param reuse_addr Whether the constructor should set the socket option\n   * socket_base::reuse_address.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note This constructor is equivalent to the following code:\n   * @code\n   * basic_socket_acceptor<Protocol> acceptor(my_context);\n   * acceptor.open(endpoint.protocol());\n   * if (reuse_addr)\n   *   acceptor.set_option(socket_base::reuse_address(true));\n   * acceptor.bind(endpoint);\n   * acceptor.listen();\n   * @endcode\n   */\n  template <typename ExecutionContext>\n  basic_socket_acceptor(ExecutionContext& context,\n      const endpoint_type& endpoint, bool reuse_addr = true,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    const protocol_type protocol = endpoint.protocol();\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n    if (reuse_addr)\n    {\n      impl_.get_service().set_option(impl_.get_implementation(),\n          socket_base::reuse_address(true), ec);\n      asio::detail::throw_error(ec, \"set_option\");\n    }\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n    impl_.get_service().listen(impl_.get_implementation(),\n        socket_base::max_listen_connections, ec);\n    asio::detail::throw_error(ec, \"listen\");\n  }\n\n  /// Construct a basic_socket_acceptor on an existing native acceptor.\n  /**\n   * This constructor creates an acceptor object to hold an existing native\n   * acceptor.\n   *\n   * @param ex The I/O executor that the acceptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * acceptor.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_acceptor A native acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_socket_acceptor(const executor_type& ex,\n      const protocol_type& protocol, const native_handle_type& native_acceptor)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_acceptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct a basic_socket_acceptor on an existing native acceptor.\n  /**\n   * This constructor creates an acceptor object to hold an existing native\n   * acceptor.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the acceptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the acceptor.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_acceptor A native acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_socket_acceptor(ExecutionContext& context,\n      const protocol_type& protocol, const native_handle_type& native_acceptor,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_acceptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_socket_acceptor from another.\n  /**\n   * This constructor moves an acceptor from one object to another.\n   *\n   * @param other The other basic_socket_acceptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket_acceptor(const executor_type&)\n   * constructor.\n   */\n  basic_socket_acceptor(basic_socket_acceptor&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_socket_acceptor from another.\n  /**\n   * This assignment operator moves an acceptor from one object to another.\n   *\n   * @param other The other basic_socket_acceptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket_acceptor(const executor_type&)\n   * constructor.\n   */\n  basic_socket_acceptor& operator=(basic_socket_acceptor&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n\n  // All socket acceptors have access to each other's implementations.\n  template <typename Protocol1, typename Executor1>\n  friend class basic_socket_acceptor;\n\n  /// Move-construct a basic_socket_acceptor from an acceptor of another\n  /// protocol type.\n  /**\n   * This constructor moves an acceptor from one object to another.\n   *\n   * @param other The other basic_socket_acceptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket_acceptor(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_socket_acceptor(basic_socket_acceptor<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_socket_acceptor from an acceptor of another protocol\n  /// type.\n  /**\n   * This assignment operator moves an acceptor from one object to another.\n   *\n   * @param other The other basic_socket_acceptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_socket_acceptor(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_socket_acceptor&\n  >::type operator=(basic_socket_acceptor<Protocol1, Executor1>&& other)\n  {\n    basic_socket_acceptor tmp(std::move(other));\n    impl_ = std::move(tmp.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the acceptor.\n  /**\n   * This function destroys the acceptor, cancelling any outstanding\n   * asynchronous operations associated with the acceptor as if by calling\n   * @c cancel.\n   */\n  ~basic_socket_acceptor()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Open the acceptor using the specified protocol.\n  /**\n   * This function opens the socket acceptor so that it will use the specified\n   * protocol.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * acceptor.open(asio::ip::tcp::v4());\n   * @endcode\n   */\n  void open(const protocol_type& protocol = protocol_type())\n  {\n    asio::error_code ec;\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    asio::detail::throw_error(ec, \"open\");\n  }\n\n  /// Open the acceptor using the specified protocol.\n  /**\n   * This function opens the socket acceptor so that it will use the specified\n   * protocol.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * asio::error_code ec;\n   * acceptor.open(asio::ip::tcp::v4(), ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID open(const protocol_type& protocol,\n      asio::error_code& ec)\n  {\n    impl_.get_service().open(impl_.get_implementation(), protocol, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Assigns an existing native acceptor to the acceptor.\n  /*\n   * This function opens the acceptor to hold an existing native acceptor.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param native_acceptor A native acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const protocol_type& protocol,\n      const native_handle_type& native_acceptor)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_acceptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assigns an existing native acceptor to the acceptor.\n  /*\n   * This function opens the acceptor to hold an existing native acceptor.\n   *\n   * @param protocol An object specifying which protocol is to be used.\n   *\n   * @param native_acceptor A native acceptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const protocol_type& protocol,\n      const native_handle_type& native_acceptor, asio::error_code& ec)\n  {\n    impl_.get_service().assign(impl_.get_implementation(),\n        protocol, native_acceptor, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the acceptor is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Bind the acceptor to the given local endpoint.\n  /**\n   * This function binds the socket acceptor to the specified endpoint on the\n   * local machine.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket\n   * acceptor will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345);\n   * acceptor.open(endpoint.protocol());\n   * acceptor.bind(endpoint);\n   * @endcode\n   */\n  void bind(const endpoint_type& endpoint)\n  {\n    asio::error_code ec;\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    asio::detail::throw_error(ec, \"bind\");\n  }\n\n  /// Bind the acceptor to the given local endpoint.\n  /**\n   * This function binds the socket acceptor to the specified endpoint on the\n   * local machine.\n   *\n   * @param endpoint An endpoint on the local machine to which the socket\n   * acceptor will be bound.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345);\n   * acceptor.open(endpoint.protocol());\n   * asio::error_code ec;\n   * acceptor.bind(endpoint, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID bind(const endpoint_type& endpoint,\n      asio::error_code& ec)\n  {\n    impl_.get_service().bind(impl_.get_implementation(), endpoint, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Place the acceptor into the state where it will listen for new\n  /// connections.\n  /**\n   * This function puts the socket acceptor into the state where it may accept\n   * new connections.\n   *\n   * @param backlog The maximum length of the queue of pending connections.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void listen(int backlog = socket_base::max_listen_connections)\n  {\n    asio::error_code ec;\n    impl_.get_service().listen(impl_.get_implementation(), backlog, ec);\n    asio::detail::throw_error(ec, \"listen\");\n  }\n\n  /// Place the acceptor into the state where it will listen for new\n  /// connections.\n  /**\n   * This function puts the socket acceptor into the state where it may accept\n   * new connections.\n   *\n   * @param backlog The maximum length of the queue of pending connections.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::error_code ec;\n   * acceptor.listen(asio::socket_base::max_listen_connections, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID listen(int backlog, asio::error_code& ec)\n  {\n    impl_.get_service().listen(impl_.get_implementation(), backlog, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Close the acceptor.\n  /**\n   * This function is used to close the acceptor. Any asynchronous accept\n   * operations will be cancelled immediately.\n   *\n   * A subsequent call to open() is required before the acceptor can again be\n   * used to again perform socket accept operations.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the acceptor.\n  /**\n   * This function is used to close the acceptor. Any asynchronous accept\n   * operations will be cancelled immediately.\n   *\n   * A subsequent call to open() is required before the acceptor can again be\n   * used to again perform socket accept operations.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::error_code ec;\n   * acceptor.close(ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Release ownership of the underlying native acceptor.\n  /**\n   * This function causes all outstanding asynchronous accept operations to\n   * finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error. Ownership of the\n   * native acceptor is then transferred to the caller.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note This function is unsupported on Windows versions prior to Windows\n   * 8.1, and will fail with asio::error::operation_not_supported on\n   * these platforms.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603)\n  __declspec(deprecated(\"This function always fails with \"\n        \"operation_not_supported when used on Windows versions \"\n        \"prior to Windows 8.1.\"))\n#endif\n  native_handle_type release()\n  {\n    asio::error_code ec;\n    native_handle_type s = impl_.get_service().release(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"release\");\n    return s;\n  }\n\n  /// Release ownership of the underlying native acceptor.\n  /**\n   * This function causes all outstanding asynchronous accept operations to\n   * finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error. Ownership of the\n   * native acceptor is then transferred to the caller.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note This function is unsupported on Windows versions prior to Windows\n   * 8.1, and will fail with asio::error::operation_not_supported on\n   * these platforms.\n   */\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \\\n  && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603)\n  __declspec(deprecated(\"This function always fails with \"\n        \"operation_not_supported when used on Windows versions \"\n        \"prior to Windows 8.1.\"))\n#endif\n  native_handle_type release(asio::error_code& ec)\n  {\n    return impl_.get_service().release(impl_.get_implementation(), ec);\n  }\n\n  /// Get the native acceptor representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * acceptor. This is intended to allow access to native acceptor functionality\n   * that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the acceptor.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the acceptor.\n  /**\n   * This function causes all outstanding asynchronous connect, send and receive\n   * operations to finish immediately, and the handlers for cancelled operations\n   * will be passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Set an option on the acceptor.\n  /**\n   * This function is used to set an option on the acceptor.\n   *\n   * @param option The new option value to be set on the acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa SettableSocketOption @n\n   * asio::socket_base::reuse_address\n   * asio::socket_base::enable_connection_aborted\n   *\n   * @par Example\n   * Setting the SOL_SOCKET/SO_REUSEADDR option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::reuse_address option(true);\n   * acceptor.set_option(option);\n   * @endcode\n   */\n  template <typename SettableSocketOption>\n  void set_option(const SettableSocketOption& option)\n  {\n    asio::error_code ec;\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"set_option\");\n  }\n\n  /// Set an option on the acceptor.\n  /**\n   * This function is used to set an option on the acceptor.\n   *\n   * @param option The new option value to be set on the acceptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa SettableSocketOption @n\n   * asio::socket_base::reuse_address\n   * asio::socket_base::enable_connection_aborted\n   *\n   * @par Example\n   * Setting the SOL_SOCKET/SO_REUSEADDR option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::reuse_address option(true);\n   * asio::error_code ec;\n   * acceptor.set_option(option, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename SettableSocketOption>\n  ASIO_SYNC_OP_VOID set_option(const SettableSocketOption& option,\n      asio::error_code& ec)\n  {\n    impl_.get_service().set_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get an option from the acceptor.\n  /**\n   * This function is used to get the current value of an option on the\n   * acceptor.\n   *\n   * @param option The option value to be obtained from the acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa GettableSocketOption @n\n   * asio::socket_base::reuse_address\n   *\n   * @par Example\n   * Getting the value of the SOL_SOCKET/SO_REUSEADDR option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::reuse_address option;\n   * acceptor.get_option(option);\n   * bool is_set = option.get();\n   * @endcode\n   */\n  template <typename GettableSocketOption>\n  void get_option(GettableSocketOption& option) const\n  {\n    asio::error_code ec;\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    asio::detail::throw_error(ec, \"get_option\");\n  }\n\n  /// Get an option from the acceptor.\n  /**\n   * This function is used to get the current value of an option on the\n   * acceptor.\n   *\n   * @param option The option value to be obtained from the acceptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa GettableSocketOption @n\n   * asio::socket_base::reuse_address\n   *\n   * @par Example\n   * Getting the value of the SOL_SOCKET/SO_REUSEADDR option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::reuse_address option;\n   * asio::error_code ec;\n   * acceptor.get_option(option, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * bool is_set = option.get();\n   * @endcode\n   */\n  template <typename GettableSocketOption>\n  ASIO_SYNC_OP_VOID get_option(GettableSocketOption& option,\n      asio::error_code& ec) const\n  {\n    impl_.get_service().get_option(impl_.get_implementation(), option, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform an IO control command on the acceptor.\n  /**\n   * This function is used to execute an IO control command on the acceptor.\n   *\n   * @param command The IO control command to be performed on the acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa IoControlCommand @n\n   * asio::socket_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::non_blocking_io command(true);\n   * socket.io_control(command);\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  void io_control(IoControlCommand& command)\n  {\n    asio::error_code ec;\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    asio::detail::throw_error(ec, \"io_control\");\n  }\n\n  /// Perform an IO control command on the acceptor.\n  /**\n   * This function is used to execute an IO control command on the acceptor.\n   *\n   * @param command The IO control command to be performed on the acceptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa IoControlCommand @n\n   * asio::socket_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::acceptor::non_blocking_io command(true);\n   * asio::error_code ec;\n   * socket.io_control(command, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  ASIO_SYNC_OP_VOID io_control(IoControlCommand& command,\n      asio::error_code& ec)\n  {\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the acceptor.\n  /**\n   * @returns @c true if the acceptor's synchronous operations will fail with\n   * asio::error::would_block if they are unable to perform the requested\n   * operation immediately. If @c false, synchronous operations will block\n   * until complete.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  bool non_blocking() const\n  {\n    return impl_.get_service().non_blocking(impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the acceptor.\n  /**\n   * @param mode If @c true, the acceptor's synchronous operations will fail\n   * with asio::error::would_block if they are unable to perform the\n   * requested operation immediately. If @c false, synchronous operations will\n   * block until complete.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  void non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the acceptor.\n  /**\n   * @param mode If @c true, the acceptor's synchronous operations will fail\n   * with asio::error::would_block if they are unable to perform the\n   * requested operation immediately. If @c false, synchronous operations will\n   * block until complete.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  ASIO_SYNC_OP_VOID non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the native acceptor implementation.\n  /**\n   * This function is used to retrieve the non-blocking mode of the underlying\n   * native acceptor. This mode has no effect on the behaviour of the acceptor\n   * object's synchronous operations.\n   *\n   * @returns @c true if the underlying acceptor is in non-blocking mode and\n   * direct system calls may fail with asio::error::would_block (or the\n   * equivalent system error).\n   *\n   * @note The current non-blocking mode is cached by the acceptor object.\n   * Consequently, the return value may be incorrect if the non-blocking mode\n   * was set directly on the native acceptor.\n   */\n  bool native_non_blocking() const\n  {\n    return impl_.get_service().native_non_blocking(impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the native acceptor implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native acceptor. It has no effect on the behaviour of the acceptor object's\n   * synchronous operations.\n   *\n   * @param mode If @c true, the underlying acceptor is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @throws asio::system_error Thrown on failure. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   */\n  void native_non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"native_non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the native acceptor implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native acceptor. It has no effect on the behaviour of the acceptor object's\n   * synchronous operations.\n   *\n   * @param mode If @c true, the underlying acceptor is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @param ec Set to indicate what error occurred, if any. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   */\n  ASIO_SYNC_OP_VOID native_non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the local endpoint of the acceptor.\n  /**\n   * This function is used to obtain the locally bound endpoint of the acceptor.\n   *\n   * @returns An object that represents the local endpoint of the acceptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint();\n   * @endcode\n   */\n  endpoint_type local_endpoint() const\n  {\n    asio::error_code ec;\n    endpoint_type ep = impl_.get_service().local_endpoint(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"local_endpoint\");\n    return ep;\n  }\n\n  /// Get the local endpoint of the acceptor.\n  /**\n   * This function is used to obtain the locally bound endpoint of the acceptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns An object that represents the local endpoint of the acceptor.\n   * Returns a default-constructed endpoint object if an error occurred and the\n   * error handler did not throw an exception.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::error_code ec;\n   * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  endpoint_type local_endpoint(asio::error_code& ec) const\n  {\n    return impl_.get_service().local_endpoint(impl_.get_implementation(), ec);\n  }\n\n  /// Wait for the acceptor to become ready to read, ready to write, or to have\n  /// pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for an acceptor to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired acceptor state.\n   *\n   * @par Example\n   * Waiting for an acceptor to become readable.\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * acceptor.wait(asio::ip::tcp::acceptor::wait_read);\n   * @endcode\n   */\n  void wait(wait_type w)\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Wait for the acceptor to become ready to read, ready to write, or to have\n  /// pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for an acceptor to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired acceptor state.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * Waiting for an acceptor to become readable.\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::error_code ec;\n   * acceptor.wait(asio::ip::tcp::acceptor::wait_read, ec);\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Asynchronously wait for the acceptor to become ready to read, ready to\n  /// write, or to have pending error conditions.\n  /**\n   * This function is used to perform an asynchronous wait for an acceptor to\n   * enter a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired acceptor state.\n   *\n   * @param handler The handler to be called when the wait operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void wait_handler(const asio::error_code& error)\n   * {\n   *   if (!error)\n   *   {\n   *     // Wait succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * acceptor.async_wait(\n   *     asio::ip::tcp::acceptor::wait_read,\n   *     wait_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(wait_type w,\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler, w);\n  }\n\n#if !defined(ASIO_NO_EXTENSIONS)\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer into the\n   * given socket. The function call will block until a new connection has been\n   * accepted successfully or an error occurs.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(my_context);\n   * acceptor.accept(socket);\n   * @endcode\n   */\n  template <typename Protocol1, typename Executor1>\n  void accept(basic_socket<Protocol1, Executor1>& peer,\n      typename enable_if<\n        is_convertible<Protocol, Protocol1>::value\n      >::type* = 0)\n  {\n    asio::error_code ec;\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, static_cast<endpoint_type*>(0), ec);\n    asio::detail::throw_error(ec, \"accept\");\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer into the\n   * given socket. The function call will block until a new connection has been\n   * accepted successfully or an error occurs.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::error_code ec;\n   * acceptor.accept(socket, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename Protocol1, typename Executor1>\n  ASIO_SYNC_OP_VOID accept(\n      basic_socket<Protocol1, Executor1>& peer, asio::error_code& ec,\n      typename enable_if<\n        is_convertible<Protocol, Protocol1>::value\n      >::type* = 0)\n  {\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, static_cast<endpoint_type*>(0), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection into a\n   * socket. The function call always returns immediately.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   * Ownership of the peer object is retained by the caller, which must\n   * guarantee that it is valid until the handler is called.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(my_context);\n   * acceptor.async_accept(socket, accept_handler);\n   * @endcode\n   */\n  template <typename Protocol1, typename Executor1,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        AcceptHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(AcceptHandler,\n      void (asio::error_code))\n  async_accept(basic_socket<Protocol1, Executor1>& peer,\n      ASIO_MOVE_ARG(AcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type),\n      typename enable_if<\n        is_convertible<Protocol, Protocol1>::value\n      >::type* = 0)\n  {\n    return async_initiate<AcceptHandler, void (asio::error_code)>(\n        initiate_async_accept(this), handler,\n        &peer, static_cast<endpoint_type*>(0));\n  }\n\n  /// Accept a new connection and obtain the endpoint of the peer\n  /**\n   * This function is used to accept a new connection from a peer into the\n   * given socket, and additionally provide the endpoint of the remote peer.\n   * The function call will block until a new connection has been accepted\n   * successfully or an error occurs.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   *\n   * @param peer_endpoint An endpoint object which will receive the endpoint of\n   * the remote peer.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::ip::tcp::endpoint endpoint;\n   * acceptor.accept(socket, endpoint);\n   * @endcode\n   */\n  template <typename Executor1>\n  void accept(basic_socket<protocol_type, Executor1>& peer,\n      endpoint_type& peer_endpoint)\n  {\n    asio::error_code ec;\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    asio::detail::throw_error(ec, \"accept\");\n  }\n\n  /// Accept a new connection and obtain the endpoint of the peer\n  /**\n   * This function is used to accept a new connection from a peer into the\n   * given socket, and additionally provide the endpoint of the remote peer.\n   * The function call will block until a new connection has been accepted\n   * successfully or an error occurs.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   *\n   * @param peer_endpoint An endpoint object which will receive the endpoint of\n   * the remote peer.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(my_context);\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::error_code ec;\n   * acceptor.accept(socket, endpoint, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename Executor1>\n  ASIO_SYNC_OP_VOID accept(basic_socket<protocol_type, Executor1>& peer,\n      endpoint_type& peer_endpoint, asio::error_code& ec)\n  {\n    impl_.get_service().accept(\n        impl_.get_implementation(), peer, &peer_endpoint, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection into a\n   * socket, and additionally obtain the endpoint of the remote peer. The\n   * function call always returns immediately.\n   *\n   * @param peer The socket into which the new connection will be accepted.\n   * Ownership of the peer object is retained by the caller, which must\n   * guarantee that it is valid until the handler is called.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written. Ownership of the peer_endpoint object is\n   * retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <typename Executor1,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        AcceptHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(AcceptHandler,\n      void (asio::error_code))\n  async_accept(basic_socket<protocol_type, Executor1>& peer,\n      endpoint_type& peer_endpoint,\n      ASIO_MOVE_ARG(AcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<AcceptHandler, void (asio::error_code)>(\n        initiate_async_accept(this), handler, &peer, &peer_endpoint);\n  }\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept());\n   * @endcode\n   */\n  typename Protocol::socket::template rebind_executor<executor_type>::other\n  accept()\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template rebind_executor<\n      executor_type>::other peer(impl_.get_executor());\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept(ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  typename Protocol::socket::template rebind_executor<executor_type>::other\n  accept(asio::error_code& ec)\n  {\n    typename Protocol::socket::template rebind_executor<\n      executor_type>::other peer(impl_.get_executor());\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    return peer;\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   // Result of operation.\n   *   const asio::error_code& error,\n   *   // On success, the newly accepted socket.\n   *   typename Protocol::socket::template\n   *     rebind_executor<executor_type>::other peer\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * acceptor.async_accept(accept_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          executor_type>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template\n          rebind_executor<executor_type>::other))\n  async_accept(\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, typename Protocol::socket::template\n        rebind_executor<executor_type>::other)>(\n          initiate_async_move_accept(this), handler,\n          impl_.get_executor(), static_cast<endpoint_type*>(0),\n          static_cast<typename Protocol::socket::template\n            rebind_executor<executor_type>::other*>(0));\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly\n   * accepted socket.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept());\n   * @endcode\n   */\n  template <typename Executor1>\n  typename Protocol::socket::template rebind_executor<Executor1>::other\n  accept(const Executor1& ex,\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template\n      rebind_executor<Executor1>::other peer(ex);\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept());\n   * @endcode\n   */\n  template <typename ExecutionContext>\n  typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other\n  accept(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template rebind_executor<\n        typename ExecutionContext::executor_type>::other peer(context);\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly accepted\n   * socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept(my_context2, ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename Executor1>\n  typename Protocol::socket::template rebind_executor<Executor1>::other\n  accept(const Executor1& ex, asio::error_code& ec,\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    typename Protocol::socket::template\n      rebind_executor<Executor1>::other peer(ex);\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::socket socket(acceptor.accept(my_context2, ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename ExecutionContext>\n  typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other\n  accept(ExecutionContext& context, asio::error_code& ec,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    typename Protocol::socket::template rebind_executor<\n        typename ExecutionContext::executor_type>::other peer(context);\n    impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec);\n    return peer;\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly accepted\n   * socket.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   typename Protocol::socket::template rebind_executor<\n   *     Executor1>::other peer // On success, the newly accepted socket.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * acceptor.async_accept(my_context2, accept_handler);\n   * @endcode\n   */\n  template <typename Executor1,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          Executor1>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          Executor1>::other))\n  async_accept(const Executor1& ex,\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type),\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    typedef typename Protocol::socket::template rebind_executor<\n      Executor1>::other other_socket_type;\n\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, other_socket_type)>(\n        initiate_async_move_accept(this), handler,\n        ex, static_cast<endpoint_type*>(0),\n        static_cast<other_socket_type*>(0));\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   typename Protocol::socket::template rebind_executor<\n   *     typename ExecutionContext::executor_type>::other peer\n   *       // On success, the newly accepted socket.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * acceptor.async_accept(my_context2, accept_handler);\n   * @endcode\n   */\n  template <typename ExecutionContext,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          typename ExecutionContext::executor_type>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          typename ExecutionContext::executor_type>::other))\n  async_accept(ExecutionContext& context,\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type),\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    typedef typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other other_socket_type;\n\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, other_socket_type)>(\n        initiate_async_move_accept(this), handler,\n        context.get_executor(), static_cast<endpoint_type*>(0),\n        static_cast<other_socket_type*>(0));\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(acceptor.accept(endpoint));\n   * @endcode\n   */\n  typename Protocol::socket::template rebind_executor<executor_type>::other\n  accept(endpoint_type& peer_endpoint)\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template rebind_executor<\n      executor_type>::other peer(impl_.get_executor());\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(acceptor.accept(endpoint, ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  typename Protocol::socket::template rebind_executor<executor_type>::other\n  accept(endpoint_type& peer_endpoint, asio::error_code& ec)\n  {\n    typename Protocol::socket::template rebind_executor<\n      executor_type>::other peer(impl_.get_executor());\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    return peer;\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written. Ownership of the peer_endpoint object is\n   * retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   // Result of operation.\n   *   const asio::error_code& error,\n   *   // On success, the newly accepted socket.\n   *   typename Protocol::socket::template\n   *     rebind_executor<executor_type>::other peer\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * acceptor.async_accept(endpoint, accept_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          executor_type>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template\n          rebind_executor<executor_type>::other))\n  async_accept(endpoint_type& peer_endpoint,\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, typename Protocol::socket::template\n        rebind_executor<executor_type>::other)>(\n          initiate_async_move_accept(this), handler,\n          impl_.get_executor(), &peer_endpoint,\n          static_cast<typename Protocol::socket::template\n            rebind_executor<executor_type>::other*>(0));\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly accepted\n   * socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(\n   *     acceptor.accept(my_context2, endpoint));\n   * @endcode\n   */\n  template <typename Executor1>\n  typename Protocol::socket::template rebind_executor<Executor1>::other\n  accept(const Executor1& ex, endpoint_type& peer_endpoint,\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template\n        rebind_executor<Executor1>::other peer(ex);\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @returns A socket object representing the newly accepted connection.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(\n   *     acceptor.accept(my_context2, endpoint));\n   * @endcode\n   */\n  template <typename ExecutionContext>\n  typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other\n  accept(ExecutionContext& context, endpoint_type& peer_endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    asio::error_code ec;\n    typename Protocol::socket::template rebind_executor<\n        typename ExecutionContext::executor_type>::other peer(context);\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    asio::detail::throw_error(ec, \"accept\");\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly accepted\n   * socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(\n   *     acceptor.accept(my_context2, endpoint, ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename Executor1>\n  typename Protocol::socket::template rebind_executor<Executor1>::other\n  accept(const executor_type& ex,\n      endpoint_type& peer_endpoint, asio::error_code& ec,\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    typename Protocol::socket::template\n      rebind_executor<Executor1>::other peer(ex);\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    return peer;\n  }\n\n  /// Accept a new connection.\n  /**\n   * This function is used to accept a new connection from a peer. The function\n   * call will block until a new connection has been accepted successfully or\n   * an error occurs.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns On success, a socket object representing the newly accepted\n   * connection. On error, a socket object where is_open() is false.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * asio::ip::tcp::socket socket(\n   *     acceptor.accept(my_context2, endpoint, ec));\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * @endcode\n   */\n  template <typename ExecutionContext>\n  typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other\n  accept(ExecutionContext& context,\n      endpoint_type& peer_endpoint, asio::error_code& ec,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    typename Protocol::socket::template rebind_executor<\n        typename ExecutionContext::executor_type>::other peer(context);\n    impl_.get_service().accept(impl_.get_implementation(),\n        peer, &peer_endpoint, ec);\n    return peer;\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param ex The I/O executor object to be used for the newly accepted\n   * socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written. Ownership of the peer_endpoint object is\n   * retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   typename Protocol::socket::template rebind_executor<\n   *     Executor1>::other peer // On success, the newly accepted socket.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * acceptor.async_accept(my_context2, endpoint, accept_handler);\n   * @endcode\n   */\n  template <typename Executor1,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          Executor1>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          Executor1>::other))\n  async_accept(const Executor1& ex, endpoint_type& peer_endpoint,\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type),\n      typename enable_if<\n        is_executor<Executor1>::value\n      >::type* = 0)\n  {\n    typedef typename Protocol::socket::template rebind_executor<\n      Executor1>::other other_socket_type;\n\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, other_socket_type)>(\n        initiate_async_move_accept(this), handler,\n        ex, &peer_endpoint,\n        static_cast<other_socket_type*>(0));\n  }\n\n  /// Start an asynchronous accept.\n  /**\n   * This function is used to asynchronously accept a new connection. The\n   * function call always returns immediately.\n   *\n   * This overload requires that the Protocol template parameter satisfy the\n   * AcceptableProtocol type requirements.\n   *\n   * @param context The I/O execution context object to be used for the newly\n   * accepted socket.\n   *\n   * @param peer_endpoint An endpoint object into which the endpoint of the\n   * remote peer will be written. Ownership of the peer_endpoint object is\n   * retained by the caller, which must guarantee that it is valid until the\n   * handler is called.\n   *\n   * @param handler The handler to be called when the accept operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   typename Protocol::socket::template rebind_executor<\n   *     typename ExecutionContext::executor_type>::other peer\n   *       // On success, the newly accepted socket.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void accept_handler(const asio::error_code& error,\n   *     asio::ip::tcp::socket peer)\n   * {\n   *   if (!error)\n   *   {\n   *     // Accept succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::ip::tcp::endpoint endpoint;\n   * acceptor.async_accept(my_context2, endpoint, accept_handler);\n   * @endcode\n   */\n  template <typename ExecutionContext,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          typename ExecutionContext::executor_type>::other)) MoveAcceptHandler\n            ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(MoveAcceptHandler,\n      void (asio::error_code,\n        typename Protocol::socket::template rebind_executor<\n          typename ExecutionContext::executor_type>::other))\n  async_accept(ExecutionContext& context,\n      endpoint_type& peer_endpoint,\n      ASIO_MOVE_ARG(MoveAcceptHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type),\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    typedef typename Protocol::socket::template rebind_executor<\n      typename ExecutionContext::executor_type>::other other_socket_type;\n\n    return async_initiate<MoveAcceptHandler,\n      void (asio::error_code, other_socket_type)>(\n        initiate_async_move_accept(this), handler,\n        context.get_executor(), &peer_endpoint,\n        static_cast<other_socket_type*>(0));\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\nprivate:\n  // Disallow copying and assignment.\n  basic_socket_acceptor(const basic_socket_acceptor&) ASIO_DELETED;\n  basic_socket_acceptor& operator=(\n      const basic_socket_acceptor&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_socket_acceptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler, wait_type w) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), w, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_socket_acceptor* self_;\n  };\n\n  class initiate_async_accept\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_accept(basic_socket_acceptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename AcceptHandler, typename Protocol1, typename Executor1>\n    void operator()(ASIO_MOVE_ARG(AcceptHandler) handler,\n        basic_socket<Protocol1, Executor1>* peer,\n        endpoint_type* peer_endpoint) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a AcceptHandler.\n      ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check;\n\n      detail::non_const_lvalue<AcceptHandler> handler2(handler);\n      self_->impl_.get_service().async_accept(\n          self_->impl_.get_implementation(), *peer, peer_endpoint,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_socket_acceptor* self_;\n  };\n\n  class initiate_async_move_accept\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_move_accept(basic_socket_acceptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename MoveAcceptHandler, typename Executor1, typename Socket>\n    void operator()(ASIO_MOVE_ARG(MoveAcceptHandler) handler,\n        const Executor1& peer_ex, endpoint_type* peer_endpoint, Socket*) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a MoveAcceptHandler.\n      ASIO_MOVE_ACCEPT_HANDLER_CHECK(\n          MoveAcceptHandler, handler, Socket) type_check;\n\n      detail::non_const_lvalue<MoveAcceptHandler> handler2(handler);\n      self_->impl_.get_service().async_move_accept(\n          self_->impl_.get_implementation(), peer_ex, peer_endpoint,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_socket_acceptor* self_;\n  };\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n  detail::io_object_impl<\n    detail::null_socket_service<Protocol>, Executor> impl_;\n#elif defined(ASIO_HAS_IOCP)\n  detail::io_object_impl<\n    detail::win_iocp_socket_service<Protocol>, Executor> impl_;\n#else\n  detail::io_object_impl<\n    detail::reactive_socket_service<Protocol>, Executor> impl_;\n#endif\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_SOCKET_ACCEPTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_socket_iostream.hpp",
    "content": "//\n// basic_socket_iostream.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SOCKET_IOSTREAM_HPP\n#define ASIO_BASIC_SOCKET_IOSTREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include <istream>\n#include <ostream>\n#include \"asio/basic_socket_streambuf.hpp\"\n\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n# include \"asio/detail/variadic_templates.hpp\"\n\n// A macro that should expand to:\n//   template <typename T1, ..., typename Tn>\n//   explicit basic_socket_iostream(T1 x1, ..., Tn xn)\n//     : std::basic_iostream<char>(\n//         &this->detail::socket_iostream_base<\n//           Protocol, Clock, WaitTraits>::streambuf_)\n//   {\n//     if (rdbuf()->connect(x1, ..., xn) == 0)\n//       this->setstate(std::ios_base::failbit);\n//   }\n// This macro should only persist within this file.\n\n# define ASIO_PRIVATE_CTR_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  explicit basic_socket_iostream(ASIO_VARIADIC_BYVAL_PARAMS(n)) \\\n    : std::basic_iostream<char>( \\\n        &this->detail::socket_iostream_base< \\\n          Protocol, Clock, WaitTraits>::streambuf_) \\\n  { \\\n    this->setf(std::ios_base::unitbuf); \\\n    if (rdbuf()->connect(ASIO_VARIADIC_BYVAL_ARGS(n)) == 0) \\\n      this->setstate(std::ios_base::failbit); \\\n  } \\\n  /**/\n\n// A macro that should expand to:\n//   template <typename T1, ..., typename Tn>\n//   void connect(T1 x1, ..., Tn xn)\n//   {\n//     if (rdbuf()->connect(x1, ..., xn) == 0)\n//       this->setstate(std::ios_base::failbit);\n//   }\n// This macro should only persist within this file.\n\n# define ASIO_PRIVATE_CONNECT_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void connect(ASIO_VARIADIC_BYVAL_PARAMS(n)) \\\n  { \\\n    if (rdbuf()->connect(ASIO_VARIADIC_BYVAL_ARGS(n)) == 0) \\\n      this->setstate(std::ios_base::failbit); \\\n  } \\\n  /**/\n\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// A separate base class is used to ensure that the streambuf is initialised\n// prior to the basic_socket_iostream's basic_iostream base class.\ntemplate <typename Protocol, typename Clock, typename WaitTraits>\nclass socket_iostream_base\n{\nprotected:\n  socket_iostream_base()\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  socket_iostream_base(socket_iostream_base&& other)\n    : streambuf_(std::move(other.streambuf_))\n  {\n  }\n\n  socket_iostream_base(basic_stream_socket<Protocol> s)\n    : streambuf_(std::move(s))\n  {\n  }\n\n  socket_iostream_base& operator=(socket_iostream_base&& other)\n  {\n    streambuf_ = std::move(other.streambuf_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  basic_socket_streambuf<Protocol, Clock, WaitTraits> streambuf_;\n};\n\n} // namespace detail\n\n#if !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL)\n#define ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol,\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    typename Clock = boost::posix_time::ptime,\n    typename WaitTraits = time_traits<Clock> >\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\nclass basic_socket_iostream;\n\n#endif // !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL)\n\n/// Iostream interface for a socket.\n#if defined(GENERATING_DOCUMENTATION)\ntemplate <typename Protocol,\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#else // defined(GENERATING_DOCUMENTATION)\ntemplate <typename Protocol, typename Clock, typename WaitTraits>\n#endif // defined(GENERATING_DOCUMENTATION)\nclass basic_socket_iostream\n  : private detail::socket_iostream_base<Protocol, Clock, WaitTraits>,\n    public std::basic_iostream<char>\n{\nprivate:\n  // These typedefs are intended keep this class's implementation independent\n  // of whether it's using Boost.DateClock, Boost.Chrono or std::chrono.\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n  typedef WaitTraits traits_helper;\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n  typedef detail::chrono_time_traits<Clock, WaitTraits> traits_helper;\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n\npublic:\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// The clock type.\n  typedef Clock clock_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// (Deprecated: Use time_point.) The time type.\n  typedef typename WaitTraits::time_type time_type;\n\n  /// The time type.\n  typedef typename WaitTraits::time_point time_point;\n\n  /// (Deprecated: Use duration.) The duration type.\n  typedef typename WaitTraits::duration_type duration_type;\n\n  /// The duration type.\n  typedef typename WaitTraits::duration duration;\n#else\n# if !defined(ASIO_NO_DEPRECATED)\n  typedef typename traits_helper::time_type time_type;\n  typedef typename traits_helper::duration_type duration_type;\n# endif // !defined(ASIO_NO_DEPRECATED)\n  typedef typename traits_helper::time_type time_point;\n  typedef typename traits_helper::duration_type duration;\n#endif\n\n  /// Construct a basic_socket_iostream without establishing a connection.\n  basic_socket_iostream()\n    : std::basic_iostream<char>(\n        &this->detail::socket_iostream_base<\n          Protocol, Clock, WaitTraits>::streambuf_)\n  {\n    this->setf(std::ios_base::unitbuf);\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Construct a basic_socket_iostream from the supplied socket.\n  explicit basic_socket_iostream(basic_stream_socket<protocol_type> s)\n    : detail::socket_iostream_base<\n        Protocol, Clock, WaitTraits>(std::move(s)),\n      std::basic_iostream<char>(\n        &this->detail::socket_iostream_base<\n          Protocol, Clock, WaitTraits>::streambuf_)\n  {\n    this->setf(std::ios_base::unitbuf);\n  }\n\n#if defined(ASIO_HAS_STD_IOSTREAM_MOVE) \\\n  || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_socket_iostream from another.\n  basic_socket_iostream(basic_socket_iostream&& other)\n    : detail::socket_iostream_base<\n        Protocol, Clock, WaitTraits>(std::move(other)),\n      std::basic_iostream<char>(std::move(other))\n  {\n    this->set_rdbuf(&this->detail::socket_iostream_base<\n          Protocol, Clock, WaitTraits>::streambuf_);\n  }\n\n  /// Move-assign a basic_socket_iostream from another.\n  basic_socket_iostream& operator=(basic_socket_iostream&& other)\n  {\n    std::basic_iostream<char>::operator=(std::move(other));\n    detail::socket_iostream_base<\n        Protocol, Clock, WaitTraits>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_STD_IOSTREAM_MOVE)\n       //   || defined(GENERATING_DOCUMENTATION)\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Establish a connection to an endpoint corresponding to a resolver query.\n  /**\n   * This constructor automatically establishes a connection based on the\n   * supplied resolver query parameters. The arguments are used to construct\n   * a resolver query object.\n   */\n  template <typename T1, ..., typename TN>\n  explicit basic_socket_iostream(T1 t1, ..., TN tn);\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  template <typename... T>\n  explicit basic_socket_iostream(T... x)\n    : std::basic_iostream<char>(\n        &this->detail::socket_iostream_base<\n          Protocol, Clock, WaitTraits>::streambuf_)\n  {\n    this->setf(std::ios_base::unitbuf);\n    if (rdbuf()->connect(x...) == 0)\n      this->setstate(std::ios_base::failbit);\n  }\n#else\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CTR_DEF)\n#endif\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Establish a connection to an endpoint corresponding to a resolver query.\n  /**\n   * This function automatically establishes a connection based on the supplied\n   * resolver query parameters. The arguments are used to construct a resolver\n   * query object.\n   */\n  template <typename T1, ..., typename TN>\n  void connect(T1 t1, ..., TN tn);\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  template <typename... T>\n  void connect(T... x)\n  {\n    if (rdbuf()->connect(x...) == 0)\n      this->setstate(std::ios_base::failbit);\n  }\n#else\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CONNECT_DEF)\n#endif\n\n  /// Close the connection.\n  void close()\n  {\n    if (rdbuf()->close() == 0)\n      this->setstate(std::ios_base::failbit);\n  }\n\n  /// Return a pointer to the underlying streambuf.\n  basic_socket_streambuf<Protocol, Clock, WaitTraits>* rdbuf() const\n  {\n    return const_cast<basic_socket_streambuf<Protocol, Clock, WaitTraits>*>(\n        &this->detail::socket_iostream_base<\n          Protocol, Clock, WaitTraits>::streambuf_);\n  }\n\n  /// Get a reference to the underlying socket.\n  basic_socket<Protocol>& socket()\n  {\n    return rdbuf()->socket();\n  }\n\n  /// Get the last error associated with the stream.\n  /**\n   * @return An \\c error_code corresponding to the last error from the stream.\n   *\n   * @par Example\n   * To print the error associated with a failure to establish a connection:\n   * @code tcp::iostream s(\"www.boost.org\", \"http\");\n   * if (!s)\n   * {\n   *   std::cout << \"Error: \" << s.error().message() << std::endl;\n   * } @endcode\n   */\n  const asio::error_code& error() const\n  {\n    return rdbuf()->error();\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use expiry().) Get the stream's expiry time as an absolute\n  /// time.\n  /**\n   * @return An absolute time value representing the stream's expiry time.\n   */\n  time_point expires_at() const\n  {\n    return rdbuf()->expires_at();\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Get the stream's expiry time as an absolute time.\n  /**\n   * @return An absolute time value representing the stream's expiry time.\n   */\n  time_point expiry() const\n  {\n    return rdbuf()->expiry();\n  }\n\n  /// Set the stream's expiry time as an absolute time.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the stream.\n   */\n  void expires_at(const time_point& expiry_time)\n  {\n    rdbuf()->expires_at(expiry_time);\n  }\n\n  /// Set the stream's expiry time relative to now.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   */\n  void expires_after(const duration& expiry_time)\n  {\n    rdbuf()->expires_after(expiry_time);\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use expiry().) Get the stream's expiry time relative to now.\n  /**\n   * @return A relative time value representing the stream's expiry time.\n   */\n  duration expires_from_now() const\n  {\n    return rdbuf()->expires_from_now();\n  }\n\n  /// (Deprecated: Use expires_after().) Set the stream's expiry time relative\n  /// to now.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   */\n  void expires_from_now(const duration& expiry_time)\n  {\n    rdbuf()->expires_from_now(expiry_time);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprivate:\n  // Disallow copying and assignment.\n  basic_socket_iostream(const basic_socket_iostream&) ASIO_DELETED;\n  basic_socket_iostream& operator=(\n      const basic_socket_iostream&) ASIO_DELETED;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n# undef ASIO_PRIVATE_CTR_DEF\n# undef ASIO_PRIVATE_CONNECT_DEF\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_BASIC_SOCKET_IOSTREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_socket_streambuf.hpp",
    "content": "//\n// basic_socket_streambuf.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_SOCKET_STREAMBUF_HPP\n#define ASIO_BASIC_SOCKET_STREAMBUF_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include <streambuf>\n#include <vector>\n#include \"asio/basic_socket.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/io_context.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n# include \"asio/detail/deadline_timer_service.hpp\"\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n# include \"asio/steady_timer.hpp\"\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n# include \"asio/detail/variadic_templates.hpp\"\n\n// A macro that should expand to:\n//   template <typename T1, ..., typename Tn>\n//   basic_socket_streambuf* connect(T1 x1, ..., Tn xn)\n//   {\n//     init_buffers();\n//     typedef typename Protocol::resolver resolver_type;\n//     resolver_type resolver(socket().get_executor());\n//     connect_to_endpoints(\n//         resolver.resolve(x1, ..., xn, ec_));\n//     return !ec_ ? this : 0;\n//   }\n// This macro should only persist within this file.\n\n# define ASIO_PRIVATE_CONNECT_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  basic_socket_streambuf* connect(ASIO_VARIADIC_BYVAL_PARAMS(n)) \\\n  { \\\n    init_buffers(); \\\n    typedef typename Protocol::resolver resolver_type; \\\n    resolver_type resolver(socket().get_executor()); \\\n    connect_to_endpoints( \\\n        resolver.resolve(ASIO_VARIADIC_BYVAL_ARGS(n), ec_)); \\\n    return !ec_ ? this : 0; \\\n  } \\\n  /**/\n\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// A separate base class is used to ensure that the io_context member is\n// initialised prior to the basic_socket_streambuf's basic_socket base class.\nclass socket_streambuf_io_context\n{\nprotected:\n  socket_streambuf_io_context(io_context* ctx)\n    : default_io_context_(ctx)\n  {\n  }\n\n  shared_ptr<io_context> default_io_context_;\n};\n\n// A separate base class is used to ensure that the dynamically allocated\n// buffers are constructed prior to the basic_socket_streambuf's basic_socket\n// base class. This makes moving the socket is the last potentially throwing\n// step in the streambuf's move constructor, giving the constructor a strong\n// exception safety guarantee.\nclass socket_streambuf_buffers\n{\nprotected:\n  socket_streambuf_buffers()\n    : get_buffer_(buffer_size),\n      put_buffer_(buffer_size)\n  {\n  }\n\n  enum { buffer_size = 512 };\n  std::vector<char> get_buffer_;\n  std::vector<char> put_buffer_;\n};\n\n} // namespace detail\n\n#if !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL)\n#define ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol,\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    typename Clock = boost::posix_time::ptime,\n    typename WaitTraits = time_traits<Clock> >\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\nclass basic_socket_streambuf;\n\n#endif // !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL)\n\n/// Iostream streambuf for a socket.\n#if defined(GENERATING_DOCUMENTATION)\ntemplate <typename Protocol,\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#else // defined(GENERATING_DOCUMENTATION)\ntemplate <typename Protocol, typename Clock, typename WaitTraits>\n#endif // defined(GENERATING_DOCUMENTATION)\nclass basic_socket_streambuf\n  : public std::streambuf,\n    private detail::socket_streambuf_io_context,\n    private detail::socket_streambuf_buffers,\n#if defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n    private basic_socket<Protocol>\n#else // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n    public basic_socket<Protocol>\n#endif // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n{\nprivate:\n  // These typedefs are intended keep this class's implementation independent\n  // of whether it's using Boost.DateClock, Boost.Chrono or std::chrono.\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n  typedef WaitTraits traits_helper;\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n  typedef detail::chrono_time_traits<Clock, WaitTraits> traits_helper;\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n\npublic:\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// The clock type.\n  typedef Clock clock_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// (Deprecated: Use time_point.) The time type.\n  typedef typename WaitTraits::time_type time_type;\n\n  /// The time type.\n  typedef typename WaitTraits::time_point time_point;\n\n  /// (Deprecated: Use duration.) The duration type.\n  typedef typename WaitTraits::duration_type duration_type;\n\n  /// The duration type.\n  typedef typename WaitTraits::duration duration;\n#else\n# if !defined(ASIO_NO_DEPRECATED)\n  typedef typename traits_helper::time_type time_type;\n  typedef typename traits_helper::duration_type duration_type;\n# endif // !defined(ASIO_NO_DEPRECATED)\n  typedef typename traits_helper::time_type time_point;\n  typedef typename traits_helper::duration_type duration;\n#endif\n\n  /// Construct a basic_socket_streambuf without establishing a connection.\n  basic_socket_streambuf()\n    : detail::socket_streambuf_io_context(new io_context),\n      basic_socket<Protocol>(*default_io_context_),\n      expiry_time_(max_expiry_time())\n  {\n    init_buffers();\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Construct a basic_socket_streambuf from the supplied socket.\n  explicit basic_socket_streambuf(basic_stream_socket<protocol_type> s)\n    : detail::socket_streambuf_io_context(0),\n      basic_socket<Protocol>(std::move(s)),\n      expiry_time_(max_expiry_time())\n  {\n    init_buffers();\n  }\n\n  /// Move-construct a basic_socket_streambuf from another.\n  basic_socket_streambuf(basic_socket_streambuf&& other)\n    : detail::socket_streambuf_io_context(other),\n      basic_socket<Protocol>(std::move(other.socket())),\n      ec_(other.ec_),\n      expiry_time_(other.expiry_time_)\n  {\n    get_buffer_.swap(other.get_buffer_);\n    put_buffer_.swap(other.put_buffer_);\n    setg(other.eback(), other.gptr(), other.egptr());\n    setp(other.pptr(), other.epptr());\n    other.ec_ = asio::error_code();\n    other.expiry_time_ = max_expiry_time();\n    other.init_buffers();\n  }\n\n  /// Move-assign a basic_socket_streambuf from another.\n  basic_socket_streambuf& operator=(basic_socket_streambuf&& other)\n  {\n    this->close();\n    socket() = std::move(other.socket());\n    detail::socket_streambuf_io_context::operator=(other);\n    ec_ = other.ec_;\n    expiry_time_ = other.expiry_time_;\n    get_buffer_.swap(other.get_buffer_);\n    put_buffer_.swap(other.put_buffer_);\n    setg(other.eback(), other.gptr(), other.egptr());\n    setp(other.pptr(), other.epptr());\n    other.ec_ = asio::error_code();\n    other.expiry_time_ = max_expiry_time();\n    other.put_buffer_.resize(buffer_size);\n    other.init_buffers();\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor flushes buffered data.\n  virtual ~basic_socket_streambuf()\n  {\n    if (pptr() != pbase())\n      overflow(traits_type::eof());\n  }\n\n  /// Establish a connection.\n  /**\n   * This function establishes a connection to the specified endpoint.\n   *\n   * @return \\c this if a connection was successfully established, a null\n   * pointer otherwise.\n   */\n  basic_socket_streambuf* connect(const endpoint_type& endpoint)\n  {\n    init_buffers();\n    ec_ = asio::error_code();\n    this->connect_to_endpoints(&endpoint, &endpoint + 1);\n    return !ec_ ? this : 0;\n  }\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Establish a connection.\n  /**\n   * This function automatically establishes a connection based on the supplied\n   * resolver query parameters. The arguments are used to construct a resolver\n   * query object.\n   *\n   * @return \\c this if a connection was successfully established, a null\n   * pointer otherwise.\n   */\n  template <typename T1, ..., typename TN>\n  basic_socket_streambuf* connect(T1 t1, ..., TN tn);\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  template <typename... T>\n  basic_socket_streambuf* connect(T... x)\n  {\n    init_buffers();\n    typedef typename Protocol::resolver resolver_type;\n    resolver_type resolver(socket().get_executor());\n    connect_to_endpoints(resolver.resolve(x..., ec_));\n    return !ec_ ? this : 0;\n  }\n#else\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CONNECT_DEF)\n#endif\n\n  /// Close the connection.\n  /**\n   * @return \\c this if a connection was successfully established, a null\n   * pointer otherwise.\n   */\n  basic_socket_streambuf* close()\n  {\n    sync();\n    socket().close(ec_);\n    if (!ec_)\n      init_buffers();\n    return !ec_ ? this : 0;\n  }\n\n  /// Get a reference to the underlying socket.\n  basic_socket<Protocol>& socket()\n  {\n    return *this;\n  }\n\n  /// Get the last error associated with the stream buffer.\n  /**\n   * @return An \\c error_code corresponding to the last error from the stream\n   * buffer.\n   */\n  const asio::error_code& error() const\n  {\n    return ec_;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use error().) Get the last error associated with the stream\n  /// buffer.\n  /**\n   * @return An \\c error_code corresponding to the last error from the stream\n   * buffer.\n   */\n  const asio::error_code& puberror() const\n  {\n    return error();\n  }\n\n  /// (Deprecated: Use expiry().) Get the stream buffer's expiry time as an\n  /// absolute time.\n  /**\n   * @return An absolute time value representing the stream buffer's expiry\n   * time.\n   */\n  time_point expires_at() const\n  {\n    return expiry_time_;\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Get the stream buffer's expiry time as an absolute time.\n  /**\n   * @return An absolute time value representing the stream buffer's expiry\n   * time.\n   */\n  time_point expiry() const\n  {\n    return expiry_time_;\n  }\n\n  /// Set the stream buffer's expiry time as an absolute time.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the stream.\n   */\n  void expires_at(const time_point& expiry_time)\n  {\n    expiry_time_ = expiry_time;\n  }\n\n  /// Set the stream buffer's expiry time relative to now.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   */\n  void expires_after(const duration& expiry_time)\n  {\n    expiry_time_ = traits_helper::add(traits_helper::now(), expiry_time);\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use expiry().) Get the stream buffer's expiry time relative\n  /// to now.\n  /**\n   * @return A relative time value representing the stream buffer's expiry time.\n   */\n  duration expires_from_now() const\n  {\n    return traits_helper::subtract(expires_at(), traits_helper::now());\n  }\n\n  /// (Deprecated: Use expires_after().) Set the stream buffer's expiry time\n  /// relative to now.\n  /**\n   * This function sets the expiry time associated with the stream. Stream\n   * operations performed after this time (where the operations cannot be\n   * completed using the internal buffers) will fail with the error\n   * asio::error::operation_aborted.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   */\n  void expires_from_now(const duration& expiry_time)\n  {\n    expiry_time_ = traits_helper::add(traits_helper::now(), expiry_time);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprotected:\n  int_type underflow()\n  {\n#if defined(ASIO_WINDOWS_RUNTIME)\n    ec_ = asio::error::operation_not_supported;\n    return traits_type::eof();\n#else // defined(ASIO_WINDOWS_RUNTIME)\n    if (gptr() != egptr())\n      return traits_type::eof();\n\n    for (;;)\n    {\n      // Check if we are past the expiry time.\n      if (traits_helper::less_than(expiry_time_, traits_helper::now()))\n      {\n        ec_ = asio::error::timed_out;\n        return traits_type::eof();\n      }\n\n      // Try to complete the operation without blocking.\n      if (!socket().native_non_blocking())\n        socket().native_non_blocking(true, ec_);\n      detail::buffer_sequence_adapter<mutable_buffer, mutable_buffer>\n        bufs(asio::buffer(get_buffer_) + putback_max);\n      detail::signed_size_type bytes = detail::socket_ops::recv(\n          socket().native_handle(), bufs.buffers(), bufs.count(), 0, ec_);\n\n      // Check if operation succeeded.\n      if (bytes > 0)\n      {\n        setg(&get_buffer_[0], &get_buffer_[0] + putback_max,\n            &get_buffer_[0] + putback_max + bytes);\n        return traits_type::to_int_type(*gptr());\n      }\n\n      // Check for EOF.\n      if (bytes == 0)\n      {\n        ec_ = asio::error::eof;\n        return traits_type::eof();\n      }\n\n      // Operation failed.\n      if (ec_ != asio::error::would_block\n          && ec_ != asio::error::try_again)\n        return traits_type::eof();\n\n      // Wait for socket to become ready.\n      if (detail::socket_ops::poll_read(\n            socket().native_handle(), 0, timeout(), ec_) < 0)\n        return traits_type::eof();\n    }\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n  }\n\n  int_type overflow(int_type c)\n  {\n#if defined(ASIO_WINDOWS_RUNTIME)\n    ec_ = asio::error::operation_not_supported;\n    return traits_type::eof();\n#else // defined(ASIO_WINDOWS_RUNTIME)\n    char_type ch = traits_type::to_char_type(c);\n\n    // Determine what needs to be sent.\n    const_buffer output_buffer;\n    if (put_buffer_.empty())\n    {\n      if (traits_type::eq_int_type(c, traits_type::eof()))\n        return traits_type::not_eof(c); // Nothing to do.\n      output_buffer = asio::buffer(&ch, sizeof(char_type));\n    }\n    else\n    {\n      output_buffer = asio::buffer(pbase(),\n          (pptr() - pbase()) * sizeof(char_type));\n    }\n\n    while (output_buffer.size() > 0)\n    {\n      // Check if we are past the expiry time.\n      if (traits_helper::less_than(expiry_time_, traits_helper::now()))\n      {\n        ec_ = asio::error::timed_out;\n        return traits_type::eof();\n      }\n\n      // Try to complete the operation without blocking.\n      if (!socket().native_non_blocking())\n        socket().native_non_blocking(true, ec_);\n      detail::buffer_sequence_adapter<\n        const_buffer, const_buffer> bufs(output_buffer);\n      detail::signed_size_type bytes = detail::socket_ops::send(\n          socket().native_handle(), bufs.buffers(), bufs.count(), 0, ec_);\n\n      // Check if operation succeeded.\n      if (bytes > 0)\n      {\n        output_buffer += static_cast<std::size_t>(bytes);\n        continue;\n      }\n\n      // Operation failed.\n      if (ec_ != asio::error::would_block\n          && ec_ != asio::error::try_again)\n        return traits_type::eof();\n\n      // Wait for socket to become ready.\n      if (detail::socket_ops::poll_write(\n            socket().native_handle(), 0, timeout(), ec_) < 0)\n        return traits_type::eof();\n    }\n\n    if (!put_buffer_.empty())\n    {\n      setp(&put_buffer_[0], &put_buffer_[0] + put_buffer_.size());\n\n      // If the new character is eof then our work here is done.\n      if (traits_type::eq_int_type(c, traits_type::eof()))\n        return traits_type::not_eof(c);\n\n      // Add the new character to the output buffer.\n      *pptr() = ch;\n      pbump(1);\n    }\n\n    return c;\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n  }\n\n  int sync()\n  {\n    return overflow(traits_type::eof());\n  }\n\n  std::streambuf* setbuf(char_type* s, std::streamsize n)\n  {\n    if (pptr() == pbase() && s == 0 && n == 0)\n    {\n      put_buffer_.clear();\n      setp(0, 0);\n      sync();\n      return this;\n    }\n\n    return 0;\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_socket_streambuf(const basic_socket_streambuf&) ASIO_DELETED;\n  basic_socket_streambuf& operator=(\n      const basic_socket_streambuf&) ASIO_DELETED;\n\n  void init_buffers()\n  {\n    setg(&get_buffer_[0],\n        &get_buffer_[0] + putback_max,\n        &get_buffer_[0] + putback_max);\n\n    if (put_buffer_.empty())\n      setp(0, 0);\n    else\n      setp(&put_buffer_[0], &put_buffer_[0] + put_buffer_.size());\n  }\n\n  int timeout() const\n  {\n    int64_t msec = traits_helper::to_posix_duration(\n        traits_helper::subtract(expiry_time_,\n          traits_helper::now())).total_milliseconds();\n    if (msec > (std::numeric_limits<int>::max)())\n      msec = (std::numeric_limits<int>::max)();\n    else if (msec < 0)\n      msec = 0;\n    return static_cast<int>(msec);\n  }\n\n  template <typename EndpointSequence>\n  void connect_to_endpoints(const EndpointSequence& endpoints)\n  {\n    this->connect_to_endpoints(endpoints.begin(), endpoints.end());\n  }\n\n  template <typename EndpointIterator>\n  void connect_to_endpoints(EndpointIterator begin, EndpointIterator end)\n  {\n#if defined(ASIO_WINDOWS_RUNTIME)\n    ec_ = asio::error::operation_not_supported;\n#else // defined(ASIO_WINDOWS_RUNTIME)\n    if (ec_)\n      return;\n\n    ec_ = asio::error::not_found;\n    for (EndpointIterator i = begin; i != end; ++i)\n    {\n      // Check if we are past the expiry time.\n      if (traits_helper::less_than(expiry_time_, traits_helper::now()))\n      {\n        ec_ = asio::error::timed_out;\n        return;\n      }\n\n      // Close and reopen the socket.\n      typename Protocol::endpoint ep(*i);\n      socket().close(ec_);\n      socket().open(ep.protocol(), ec_);\n      if (ec_)\n        continue;\n\n      // Try to complete the operation without blocking.\n      if (!socket().native_non_blocking())\n        socket().native_non_blocking(true, ec_);\n      detail::socket_ops::connect(socket().native_handle(),\n          ep.data(), ep.size(), ec_);\n\n      // Check if operation succeeded.\n      if (!ec_)\n        return;\n\n      // Operation failed.\n      if (ec_ != asio::error::in_progress\n          && ec_ != asio::error::would_block)\n        continue;\n\n      // Wait for socket to become ready.\n      if (detail::socket_ops::poll_connect(\n            socket().native_handle(), timeout(), ec_) < 0)\n        continue;\n\n      // Get the error code from the connect operation.\n      int connect_error = 0;\n      size_t connect_error_len = sizeof(connect_error);\n      if (detail::socket_ops::getsockopt(socket().native_handle(), 0,\n            SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec_)\n          == detail::socket_error_retval)\n        return;\n\n      // Check the result of the connect operation.\n      ec_ = asio::error_code(connect_error,\n          asio::error::get_system_category());\n      if (!ec_)\n        return;\n    }\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n  }\n\n  // Helper function to get the maximum expiry time.\n  static time_point max_expiry_time()\n  {\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    return boost::posix_time::pos_infin;\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n      // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n    return (time_point::max)();\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM)\n  }\n\n  enum { putback_max = 8 };\n  asio::error_code ec_;\n  time_point expiry_time_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n# undef ASIO_PRIVATE_CONNECT_DEF\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_BASIC_SOCKET_STREAMBUF_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_stream_socket.hpp",
    "content": "//\n// basic_stream_socket.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_STREAM_SOCKET_HPP\n#define ASIO_BASIC_STREAM_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL)\n#define ASIO_BASIC_STREAM_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_stream_socket;\n\n#endif // !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL)\n\n/// Provides stream-oriented socket functionality.\n/**\n * The basic_stream_socket class template provides asynchronous and blocking\n * stream-oriented socket functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Protocol, typename Executor>\nclass basic_stream_socket\n  : public basic_socket<Protocol, Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the socket type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The socket type when rebound to the specified executor.\n    typedef basic_stream_socket<Protocol, Executor1> other;\n  };\n\n  /// The native representation of a socket.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef typename basic_socket<Protocol,\n    Executor>::native_handle_type native_handle_type;\n#endif\n\n  /// The protocol type.\n  typedef Protocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  /// Construct a basic_stream_socket without opening it.\n  /**\n   * This constructor creates a stream socket without opening it. The socket\n   * needs to be opened and then connected or accepted before data can be sent\n   * or received on it.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   */\n  explicit basic_stream_socket(const executor_type& ex)\n    : basic_socket<Protocol, Executor>(ex)\n  {\n  }\n\n  /// Construct a basic_stream_socket without opening it.\n  /**\n   * This constructor creates a stream socket without opening it. The socket\n   * needs to be opened and then connected or accepted before data can be sent\n   * or received on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   */\n  template <typename ExecutionContext>\n  explicit basic_stream_socket(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context)\n  {\n  }\n\n  /// Construct and open a basic_stream_socket.\n  /**\n   * This constructor creates and opens a stream socket. The socket needs to be\n   * connected or accepted before data can be sent or received on it.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_stream_socket(const executor_type& ex, const protocol_type& protocol)\n    : basic_socket<Protocol, Executor>(ex, protocol)\n  {\n  }\n\n  /// Construct and open a basic_stream_socket.\n  /**\n   * This constructor creates and opens a stream socket. The socket needs to be\n   * connected or accepted before data can be sent or received on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_stream_socket(ExecutionContext& context, const protocol_type& protocol,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol)\n  {\n  }\n\n  /// Construct a basic_stream_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a stream socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the stream\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_stream_socket(const executor_type& ex, const endpoint_type& endpoint)\n    : basic_socket<Protocol, Executor>(ex, endpoint)\n  {\n  }\n\n  /// Construct a basic_stream_socket, opening it and binding it to the given\n  /// local endpoint.\n  /**\n   * This constructor creates a stream socket and automatically opens it bound\n   * to the specified endpoint on the local machine. The protocol used is the\n   * protocol associated with the given endpoint.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param endpoint An endpoint on the local machine to which the stream\n   * socket will be bound.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_stream_socket(ExecutionContext& context, const endpoint_type& endpoint,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, endpoint)\n  {\n  }\n\n  /// Construct a basic_stream_socket on an existing native socket.\n  /**\n   * This constructor creates a stream socket object to hold an existing native\n   * socket.\n   *\n   * @param ex The I/O executor that the socket will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_stream_socket(const executor_type& ex,\n      const protocol_type& protocol, const native_handle_type& native_socket)\n    : basic_socket<Protocol, Executor>(ex, protocol, native_socket)\n  {\n  }\n\n  /// Construct a basic_stream_socket on an existing native socket.\n  /**\n   * This constructor creates a stream socket object to hold an existing native\n   * socket.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the socket will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the socket.\n   *\n   * @param protocol An object specifying protocol parameters to be used.\n   *\n   * @param native_socket The new underlying socket implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_stream_socket(ExecutionContext& context,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(context, protocol, native_socket)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_stream_socket from another.\n  /**\n   * This constructor moves a stream socket from one object to another.\n   *\n   * @param other The other basic_stream_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_socket(const executor_type&)\n   * constructor.\n   */\n  basic_stream_socket(basic_stream_socket&& other) ASIO_NOEXCEPT\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_stream_socket from another.\n  /**\n   * This assignment operator moves a stream socket from one object to another.\n   *\n   * @param other The other basic_stream_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_socket(const executor_type&)\n   * constructor.\n   */\n  basic_stream_socket& operator=(basic_stream_socket&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n\n  /// Move-construct a basic_stream_socket from a socket of another protocol\n  /// type.\n  /**\n   * This constructor moves a stream socket from one object to another.\n   *\n   * @param other The other basic_stream_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  basic_stream_socket(basic_stream_socket<Protocol1, Executor1>&& other,\n      typename enable_if<\n        is_convertible<Protocol1, Protocol>::value\n          && is_convertible<Executor1, Executor>::value\n      >::type* = 0)\n    : basic_socket<Protocol, Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a basic_stream_socket from a socket of another protocol type.\n  /**\n   * This assignment operator moves a stream socket from one object to another.\n   *\n   * @param other The other basic_stream_socket object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_socket(const executor_type&)\n   * constructor.\n   */\n  template <typename Protocol1, typename Executor1>\n  typename enable_if<\n    is_convertible<Protocol1, Protocol>::value\n      && is_convertible<Executor1, Executor>::value,\n    basic_stream_socket&\n  >::type operator=(basic_stream_socket<Protocol1, Executor1>&& other)\n  {\n    basic_socket<Protocol, Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the socket.\n  /**\n   * This function destroys the socket, cancelling any outstanding asynchronous\n   * operations associated with the socket as if by calling @c cancel.\n   */\n  ~basic_stream_socket()\n  {\n  }\n\n  /// Send some data on the socket.\n  /**\n   * This function is used to send data on the stream socket. The function\n   * call will block until one or more bytes of the data has been sent\n   * successfully, or an until error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref write function if you need to ensure that all data\n   * is written before the blocking operation completes.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.send(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on the socket.\n  /**\n   * This function is used to send data on the stream socket. The function\n   * call will block until one or more bytes of the data has been sent\n   * successfully, or an until error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @returns The number of bytes sent.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref write function if you need to ensure that all data\n   * is written before the blocking operation completes.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.send(asio::buffer(data, size), 0);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"send\");\n    return s;\n  }\n\n  /// Send some data on the socket.\n  /**\n   * This function is used to send data on the stream socket. The function\n   * call will block until one or more bytes of the data has been sent\n   * successfully, or an until error occurs.\n   *\n   * @param buffers One or more data buffers to be sent on the socket.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes sent. Returns 0 if an error occurred.\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref write function if you need to ensure that all data\n   * is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send data on the stream socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_send(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous send.\n  /**\n   * This function is used to asynchronously send data on the stream socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be sent on the socket. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * memory blocks is retained by the caller, which must guarantee that they\n   * remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the send call is to be made.\n   *\n   * @param handler The handler to be called when the send operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes sent.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The send operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To send a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_send(asio::buffer(data, size), 0, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on sending multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_send(const ConstBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler, buffers, flags);\n  }\n\n  /// Receive some data on the socket.\n  /**\n   * This function is used to receive data on the stream socket. The function\n   * call will block until one or more bytes of data has been received\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.receive(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on the socket.\n  /**\n   * This function is used to receive data on the stream socket. The function\n   * call will block until one or more bytes of data has been received\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @returns The number of bytes received.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.receive(asio::buffer(data, size), 0);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n    asio::detail::throw_error(ec, \"receive\");\n    return s;\n  }\n\n  /// Receive some data on a connected socket.\n  /**\n   * This function is used to receive data on the stream socket. The function\n   * call will block until one or more bytes of data has been received\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes received. Returns 0 if an error occurred.\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, flags, ec);\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive data from the stream\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref async_read function if you need to ensure\n   * that the requested amount of data is received before the asynchronous\n   * operation completes.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Start an asynchronous receive.\n  /**\n   * This function is used to asynchronously receive data from the stream\n   * socket. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be received.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param flags Flags specifying how the receive call is to be made.\n   *\n   * @param handler The handler to be called when the receive operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes received.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The receive operation may not receive all of the requested number of\n   * bytes. Consider using the @ref async_read function if you need to ensure\n   * that the requested amount of data is received before the asynchronous\n   * operation completes.\n   *\n   * @par Example\n   * To receive into a single data buffer use the @ref buffer function as\n   * follows:\n   * @code\n   * socket.async_receive(asio::buffer(data, size), 0, handler);\n   * @endcode\n   * See the @ref buffer documentation for information on receiving into\n   * multiple buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_receive(const MutableBufferSequence& buffers,\n      socket_base::message_flags flags,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler, buffers, flags);\n  }\n\n  /// Write some data to the socket.\n  /**\n   * This function is used to write data to the stream socket. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the socket.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.write_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"write_some\");\n    return s;\n  }\n\n  /// Write some data to the socket.\n  /**\n   * This function is used to write data to the stream socket. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the socket.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().send(\n        this->impl_.get_implementation(), buffers, 0, ec);\n  }\n\n  /// Start an asynchronous write.\n  /**\n   * This function is used to asynchronously write data to the stream socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be written to the socket.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The write operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_write_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_send(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\n  /// Read some data from the socket.\n  /**\n   * This function is used to read data from the stream socket. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.read_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, 0, ec);\n    asio::detail::throw_error(ec, \"read_some\");\n    return s;\n  }\n\n  /// Read some data from the socket.\n  /**\n   * This function is used to read data from the stream socket. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().receive(\n        this->impl_.get_implementation(), buffers, 0, ec);\n  }\n\n  /// Start an asynchronous read.\n  /**\n   * This function is used to asynchronously read data from the stream socket.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The read operation may not read all of the requested number of bytes.\n   * Consider using the @ref async_read function if you need to ensure that the\n   * requested amount of data is read before the asynchronous operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * socket.async_read_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_receive(this), handler,\n        buffers, socket_base::message_flags(0));\n  }\n\nprivate:\n  class initiate_async_send\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_send(basic_stream_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_send(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_socket* self_;\n  };\n\n  class initiate_async_receive\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_receive(basic_stream_socket* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers,\n        socket_base::message_flags flags) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_receive(\n          self_->impl_.get_implementation(), buffers, flags,\n          handler2.value, self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_socket* self_;\n  };\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_STREAM_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_streambuf.hpp",
    "content": "//\n// basic_streambuf.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_STREAMBUF_HPP\n#define ASIO_BASIC_STREAMBUF_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include <algorithm>\n#include <cstring>\n#include <stdexcept>\n#include <streambuf>\n#include <vector>\n#include \"asio/basic_streambuf_fwd.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Automatically resizable buffer class based on std::streambuf.\n/**\n * The @c basic_streambuf class is derived from @c std::streambuf to associate\n * the streambuf's input and output sequences with one or more character\n * arrays. These character arrays are internal to the @c basic_streambuf\n * object, but direct access to the array elements is provided to permit them\n * to be used efficiently with I/O operations. Characters written to the output\n * sequence of a @c basic_streambuf object are appended to the input sequence\n * of the same object.\n *\n * The @c basic_streambuf class's public interface is intended to permit the\n * following implementation strategies:\n *\n * @li A single contiguous character array, which is reallocated as necessary\n * to accommodate changes in the size of the character sequence. This is the\n * implementation approach currently used in Asio.\n *\n * @li A sequence of one or more character arrays, where each array is of the\n * same size. Additional character array objects are appended to the sequence\n * to accommodate changes in the size of the character sequence.\n *\n * @li A sequence of one or more character arrays of varying sizes. Additional\n * character array objects are appended to the sequence to accommodate changes\n * in the size of the character sequence.\n *\n * The constructor for basic_streambuf accepts a @c size_t argument specifying\n * the maximum of the sum of the sizes of the input sequence and output\n * sequence. During the lifetime of the @c basic_streambuf object, the following\n * invariant holds:\n * @code size() <= max_size()@endcode\n * Any member function that would, if successful, cause the invariant to be\n * violated shall throw an exception of class @c std::length_error.\n *\n * The constructor for @c basic_streambuf takes an Allocator argument. A copy\n * of this argument is used for any memory allocation performed, by the\n * constructor and by all member functions, during the lifetime of each @c\n * basic_streambuf object.\n *\n * @par Examples\n * Writing directly from an streambuf to a socket:\n * @code\n * asio::streambuf b;\n * std::ostream os(&b);\n * os << \"Hello, World!\\n\";\n *\n * // try sending some data in input sequence\n * size_t n = sock.send(b.data());\n *\n * b.consume(n); // sent data is removed from input sequence\n * @endcode\n *\n * Reading from a socket directly into a streambuf:\n * @code\n * asio::streambuf b;\n *\n * // reserve 512 bytes in output sequence\n * asio::streambuf::mutable_buffers_type bufs = b.prepare(512);\n *\n * size_t n = sock.receive(bufs);\n *\n * // received data is \"committed\" from output sequence to input sequence\n * b.commit(n);\n *\n * std::istream is(&b);\n * std::string s;\n * is >> s;\n * @endcode\n */\n#if defined(GENERATING_DOCUMENTATION)\ntemplate <typename Allocator = std::allocator<char> >\n#else\ntemplate <typename Allocator>\n#endif\nclass basic_streambuf\n  : public std::streambuf,\n    private noncopyable\n{\npublic:\n#if defined(GENERATING_DOCUMENTATION)\n  /// The type used to represent the input sequence as a list of buffers.\n  typedef implementation_defined const_buffers_type;\n\n  /// The type used to represent the output sequence as a list of buffers.\n  typedef implementation_defined mutable_buffers_type;\n#else\n  typedef ASIO_CONST_BUFFER const_buffers_type;\n  typedef ASIO_MUTABLE_BUFFER mutable_buffers_type;\n#endif\n\n  /// Construct a basic_streambuf object.\n  /**\n   * Constructs a streambuf with the specified maximum size. The initial size\n   * of the streambuf's input sequence is 0.\n   */\n  explicit basic_streambuf(\n      std::size_t maximum_size = (std::numeric_limits<std::size_t>::max)(),\n      const Allocator& allocator = Allocator())\n    : max_size_(maximum_size),\n      buffer_(allocator)\n  {\n    std::size_t pend = (std::min<std::size_t>)(max_size_, buffer_delta);\n    buffer_.resize((std::max<std::size_t>)(pend, 1));\n    setg(&buffer_[0], &buffer_[0], &buffer_[0]);\n    setp(&buffer_[0], &buffer_[0] + pend);\n  }\n\n  /// Get the size of the input sequence.\n  /**\n   * @returns The size of the input sequence. The value is equal to that\n   * calculated for @c s in the following code:\n   * @code\n   * size_t s = 0;\n   * const_buffers_type bufs = data();\n   * const_buffers_type::const_iterator i = bufs.begin();\n   * while (i != bufs.end())\n   * {\n   *   const_buffer buf(*i++);\n   *   s += buf.size();\n   * }\n   * @endcode\n   */\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return pptr() - gptr();\n  }\n\n  /// Get the maximum size of the basic_streambuf.\n  /**\n   * @returns The allowed maximum of the sum of the sizes of the input sequence\n   * and output sequence.\n   */\n  std::size_t max_size() const ASIO_NOEXCEPT\n  {\n    return max_size_;\n  }\n\n  /// Get the current capacity of the basic_streambuf.\n  /**\n   * @returns The current total capacity of the streambuf, i.e. for both the\n   * input sequence and output sequence.\n   */\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return buffer_.capacity();\n  }\n\n  /// Get a list of buffers that represents the input sequence.\n  /**\n   * @returns An object of type @c const_buffers_type that satisfies\n   * ConstBufferSequence requirements, representing all character arrays in the\n   * input sequence.\n   *\n   * @note The returned object is invalidated by any @c basic_streambuf member\n   * function that modifies the input sequence or output sequence.\n   */\n  const_buffers_type data() const ASIO_NOEXCEPT\n  {\n    return asio::buffer(asio::const_buffer(gptr(),\n          (pptr() - gptr()) * sizeof(char_type)));\n  }\n\n  /// Get a list of buffers that represents the output sequence, with the given\n  /// size.\n  /**\n   * Ensures that the output sequence can accommodate @c n characters,\n   * reallocating character array objects as necessary.\n   *\n   * @returns An object of type @c mutable_buffers_type that satisfies\n   * MutableBufferSequence requirements, representing character array objects\n   * at the start of the output sequence such that the sum of the buffer sizes\n   * is @c n.\n   *\n   * @throws std::length_error If <tt>size() + n > max_size()</tt>.\n   *\n   * @note The returned object is invalidated by any @c basic_streambuf member\n   * function that modifies the input sequence or output sequence.\n   */\n  mutable_buffers_type prepare(std::size_t n)\n  {\n    reserve(n);\n    return asio::buffer(asio::mutable_buffer(\n          pptr(), n * sizeof(char_type)));\n  }\n\n  /// Move characters from the output sequence to the input sequence.\n  /**\n   * Appends @c n characters from the start of the output sequence to the input\n   * sequence. The beginning of the output sequence is advanced by @c n\n   * characters.\n   *\n   * Requires a preceding call <tt>prepare(x)</tt> where <tt>x >= n</tt>, and\n   * no intervening operations that modify the input or output sequence.\n   *\n   * @note If @c n is greater than the size of the output sequence, the entire\n   * output sequence is moved to the input sequence and no error is issued.\n   */\n  void commit(std::size_t n)\n  {\n    n = std::min<std::size_t>(n, epptr() - pptr());\n    pbump(static_cast<int>(n));\n    setg(eback(), gptr(), pptr());\n  }\n\n  /// Remove characters from the input sequence.\n  /**\n   * Removes @c n characters from the beginning of the input sequence.\n   *\n   * @note If @c n is greater than the size of the input sequence, the entire\n   * input sequence is consumed and no error is issued.\n   */\n  void consume(std::size_t n)\n  {\n    if (egptr() < pptr())\n      setg(&buffer_[0], gptr(), pptr());\n    if (gptr() + n > pptr())\n      n = pptr() - gptr();\n    gbump(static_cast<int>(n));\n  }\n\nprotected:\n  enum { buffer_delta = 128 };\n\n  /// Override std::streambuf behaviour.\n  /**\n   * Behaves according to the specification of @c std::streambuf::underflow().\n   */\n  int_type underflow()\n  {\n    if (gptr() < pptr())\n    {\n      setg(&buffer_[0], gptr(), pptr());\n      return traits_type::to_int_type(*gptr());\n    }\n    else\n    {\n      return traits_type::eof();\n    }\n  }\n\n  /// Override std::streambuf behaviour.\n  /**\n   * Behaves according to the specification of @c std::streambuf::overflow(),\n   * with the specialisation that @c std::length_error is thrown if appending\n   * the character to the input sequence would require the condition\n   * <tt>size() > max_size()</tt> to be true.\n   */\n  int_type overflow(int_type c)\n  {\n    if (!traits_type::eq_int_type(c, traits_type::eof()))\n    {\n      if (pptr() == epptr())\n      {\n        std::size_t buffer_size = pptr() - gptr();\n        if (buffer_size < max_size_ && max_size_ - buffer_size < buffer_delta)\n        {\n          reserve(max_size_ - buffer_size);\n        }\n        else\n        {\n          reserve(buffer_delta);\n        }\n      }\n\n      *pptr() = traits_type::to_char_type(c);\n      pbump(1);\n      return c;\n    }\n\n    return traits_type::not_eof(c);\n  }\n\n  void reserve(std::size_t n)\n  {\n    // Get current stream positions as offsets.\n    std::size_t gnext = gptr() - &buffer_[0];\n    std::size_t pnext = pptr() - &buffer_[0];\n    std::size_t pend = epptr() - &buffer_[0];\n\n    // Check if there is already enough space in the put area.\n    if (n <= pend - pnext)\n    {\n      return;\n    }\n\n    // Shift existing contents of get area to start of buffer.\n    if (gnext > 0)\n    {\n      pnext -= gnext;\n      std::memmove(&buffer_[0], &buffer_[0] + gnext, pnext);\n    }\n\n    // Ensure buffer is large enough to hold at least the specified size.\n    if (n > pend - pnext)\n    {\n      if (n <= max_size_ && pnext <= max_size_ - n)\n      {\n        pend = pnext + n;\n        buffer_.resize((std::max<std::size_t>)(pend, 1));\n      }\n      else\n      {\n        std::length_error ex(\"asio::streambuf too long\");\n        asio::detail::throw_exception(ex);\n      }\n    }\n\n    // Update stream positions.\n    setg(&buffer_[0], &buffer_[0], &buffer_[0] + pnext);\n    setp(&buffer_[0] + pnext, &buffer_[0] + pend);\n  }\n\nprivate:\n  std::size_t max_size_;\n  std::vector<char_type, Allocator> buffer_;\n\n  // Helper function to get the preferred size for reading data.\n  friend std::size_t read_size_helper(\n      basic_streambuf& sb, std::size_t max_size)\n  {\n    return std::min<std::size_t>(\n        std::max<std::size_t>(512, sb.buffer_.capacity() - sb.size()),\n        std::min<std::size_t>(max_size, sb.max_size() - sb.size()));\n  }\n};\n\n/// Adapts basic_streambuf to the dynamic buffer sequence type requirements.\n#if defined(GENERATING_DOCUMENTATION)\ntemplate <typename Allocator = std::allocator<char> >\n#else\ntemplate <typename Allocator>\n#endif\nclass basic_streambuf_ref\n{\npublic:\n  /// The type used to represent the input sequence as a list of buffers.\n  typedef typename basic_streambuf<Allocator>::const_buffers_type\n    const_buffers_type;\n\n  /// The type used to represent the output sequence as a list of buffers.\n  typedef typename basic_streambuf<Allocator>::mutable_buffers_type\n    mutable_buffers_type;\n\n  /// Construct a basic_streambuf_ref for the given basic_streambuf object.\n  explicit basic_streambuf_ref(basic_streambuf<Allocator>& sb)\n    : sb_(sb)\n  {\n  }\n\n  /// Copy construct a basic_streambuf_ref.\n  basic_streambuf_ref(const basic_streambuf_ref& other) ASIO_NOEXCEPT\n    : sb_(other.sb_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move construct a basic_streambuf_ref.\n  basic_streambuf_ref(basic_streambuf_ref&& other) ASIO_NOEXCEPT\n    : sb_(other.sb_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Get the size of the input sequence.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return sb_.size();\n  }\n\n  /// Get the maximum size of the dynamic buffer.\n  std::size_t max_size() const ASIO_NOEXCEPT\n  {\n    return sb_.max_size();\n  }\n\n  /// Get the current capacity of the dynamic buffer.\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return sb_.capacity();\n  }\n\n  /// Get a list of buffers that represents the input sequence.\n  const_buffers_type data() const ASIO_NOEXCEPT\n  {\n    return sb_.data();\n  }\n\n  /// Get a list of buffers that represents the output sequence, with the given\n  /// size.\n  mutable_buffers_type prepare(std::size_t n)\n  {\n    return sb_.prepare(n);\n  }\n\n  /// Move bytes from the output sequence to the input sequence.\n  void commit(std::size_t n)\n  {\n    return sb_.commit(n);\n  }\n\n  /// Remove characters from the input sequence.\n  void consume(std::size_t n)\n  {\n    return sb_.consume(n);\n  }\n\nprivate:\n  basic_streambuf<Allocator>& sb_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_BASIC_STREAMBUF_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_streambuf_fwd.hpp",
    "content": "//\n// basic_streambuf_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_STREAMBUF_FWD_HPP\n#define ASIO_BASIC_STREAMBUF_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include <memory>\n\nnamespace asio {\n\ntemplate <typename Allocator = std::allocator<char> >\nclass basic_streambuf;\n\ntemplate <typename Allocator = std::allocator<char> >\nclass basic_streambuf_ref;\n\n} // namespace asio\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_BASIC_STREAMBUF_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/basic_waitable_timer.hpp",
    "content": "//\n// basic_waitable_timer.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BASIC_WAITABLE_TIMER_HPP\n#define ASIO_BASIC_WAITABLE_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/detail/chrono_time_traits.hpp\"\n#include \"asio/detail/deadline_timer_service.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/wait_traits.hpp\"\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL)\n#define ASIO_BASIC_WAITABLE_TIMER_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Clock,\n    typename WaitTraits = asio::wait_traits<Clock>,\n    typename Executor = executor>\nclass basic_waitable_timer;\n\n#endif // !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL)\n\n/// Provides waitable timer functionality.\n/**\n * The basic_waitable_timer class template provides the ability to perform a\n * blocking or asynchronous wait for a timer to expire.\n *\n * A waitable timer is always in one of two states: \"expired\" or \"not expired\".\n * If the wait() or async_wait() function is called on an expired timer, the\n * wait operation will complete immediately.\n *\n * Most applications will use one of the asio::steady_timer,\n * asio::system_timer or asio::high_resolution_timer typedefs.\n *\n * @note This waitable timer functionality is for use with the C++11 standard\n * library's @c &lt;chrono&gt; facility, or with the Boost.Chrono library.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Examples\n * Performing a blocking wait (C++11):\n * @code\n * // Construct a timer without setting an expiry time.\n * asio::steady_timer timer(my_context);\n *\n * // Set an expiry time relative to now.\n * timer.expires_after(std::chrono::seconds(5));\n *\n * // Wait for the timer to expire.\n * timer.wait();\n * @endcode\n *\n * @par \n * Performing an asynchronous wait (C++11):\n * @code\n * void handler(const asio::error_code& error)\n * {\n *   if (!error)\n *   {\n *     // Timer expired.\n *   }\n * }\n *\n * ...\n *\n * // Construct a timer with an absolute expiry time.\n * asio::steady_timer timer(my_context,\n *     std::chrono::steady_clock::now() + std::chrono::seconds(60));\n *\n * // Start an asynchronous wait.\n * timer.async_wait(handler);\n * @endcode\n *\n * @par Changing an active waitable timer's expiry time\n *\n * Changing the expiry time of a timer while there are pending asynchronous\n * waits causes those wait operations to be cancelled. To ensure that the action\n * associated with the timer is performed only once, use something like this:\n * used:\n *\n * @code\n * void on_some_event()\n * {\n *   if (my_timer.expires_after(seconds(5)) > 0)\n *   {\n *     // We managed to cancel the timer. Start new asynchronous wait.\n *     my_timer.async_wait(on_timeout);\n *   }\n *   else\n *   {\n *     // Too late, timer has already expired!\n *   }\n * }\n *\n * void on_timeout(const asio::error_code& e)\n * {\n *   if (e != asio::error::operation_aborted)\n *   {\n *     // Timer was not cancelled, take necessary action.\n *   }\n * }\n * @endcode\n *\n * @li The asio::basic_waitable_timer::expires_after() function\n * cancels any pending asynchronous waits, and returns the number of\n * asynchronous waits that were cancelled. If it returns 0 then you were too\n * late and the wait handler has already been executed, or will soon be\n * executed. If it returns 1 then the wait handler was successfully cancelled.\n *\n * @li If a wait handler is cancelled, the asio::error_code passed to\n * it contains the value asio::error::operation_aborted.\n */\ntemplate <typename Clock, typename WaitTraits, typename Executor>\nclass basic_waitable_timer\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the timer type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The timer type when rebound to the specified executor.\n    typedef basic_waitable_timer<Clock, WaitTraits, Executor1> other;\n  };\n\n  /// The clock type.\n  typedef Clock clock_type;\n\n  /// The duration type of the clock.\n  typedef typename clock_type::duration duration;\n\n  /// The time point type of the clock.\n  typedef typename clock_type::time_point time_point;\n\n  /// The wait traits type.\n  typedef WaitTraits traits_type;\n\n  /// Constructor.\n  /**\n   * This constructor creates a timer without setting an expiry time. The\n   * expires_at() or expires_after() functions must be called to set an expiry\n   * time before the timer can be waited on.\n   *\n   * @param ex The I/O executor that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   */\n  explicit basic_waitable_timer(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Constructor.\n  /**\n   * This constructor creates a timer without setting an expiry time. The\n   * expires_at() or expires_after() functions must be called to set an expiry\n   * time before the timer can be waited on.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   */\n  template <typename ExecutionContext>\n  explicit basic_waitable_timer(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Constructor to set a particular expiry time as an absolute time.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param ex The I/O executor object that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, expressed\n   * as an absolute time.\n   */\n  basic_waitable_timer(const executor_type& ex, const time_point& expiry_time)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n  }\n\n  /// Constructor to set a particular expiry time as an absolute time.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, expressed\n   * as an absolute time.\n   */\n  template <typename ExecutionContext>\n  explicit basic_waitable_timer(ExecutionContext& context,\n      const time_point& expiry_time,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n  }\n\n  /// Constructor to set a particular expiry time relative to now.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param ex The I/O executor that the timer will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, relative to\n   * now.\n   */\n  basic_waitable_timer(const executor_type& ex, const duration& expiry_time)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_after(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_after\");\n  }\n\n  /// Constructor to set a particular expiry time relative to now.\n  /**\n   * This constructor creates a timer and sets the expiry time.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the timer will use, by default, to dispatch handlers for any asynchronous\n   * operations performed on the timer.\n   *\n   * @param expiry_time The expiry time to be used for the timer, relative to\n   * now.\n   */\n  template <typename ExecutionContext>\n  explicit basic_waitable_timer(ExecutionContext& context,\n      const duration& expiry_time,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().expires_after(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_after\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_waitable_timer from another.\n  /**\n   * This constructor moves a timer from one object to another.\n   *\n   * @param other The other basic_waitable_timer object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_waitable_timer(const executor_type&)\n   * constructor.\n   */\n  basic_waitable_timer(basic_waitable_timer&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_waitable_timer from another.\n  /**\n   * This assignment operator moves a timer from one object to another. Cancels\n   * any outstanding asynchronous operations associated with the target object.\n   *\n   * @param other The other basic_waitable_timer object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_waitable_timer(const executor_type&)\n   * constructor.\n   */\n  basic_waitable_timer& operator=(basic_waitable_timer&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the timer.\n  /**\n   * This function destroys the timer, cancelling any outstanding asynchronous\n   * wait operations associated with the timer as if by calling @c cancel.\n   */\n  ~basic_waitable_timer()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Cancel any asynchronous operations that are waiting on the timer.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the timer. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel()\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n    return s;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Cancel any asynchronous\n  /// operations that are waiting on the timer.\n  /**\n   * This function forces the completion of any pending asynchronous wait\n   * operations against the timer. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when cancel() is called, then the\n   * handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel(asio::error_code& ec)\n  {\n    return impl_.get_service().cancel(impl_.get_implementation(), ec);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Cancels one asynchronous operation that is waiting on the timer.\n  /**\n   * This function forces the completion of one pending asynchronous wait\n   * operation against the timer. Handlers are cancelled in FIFO order. The\n   * handler for the cancelled operation will be invoked with the\n   * asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @return The number of asynchronous operations that were cancelled. That is,\n   * either 0 or 1.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when cancel_one() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel_one()\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().cancel_one(\n        impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel_one\");\n    return s;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Cancels one asynchronous\n  /// operation that is waiting on the timer.\n  /**\n   * This function forces the completion of one pending asynchronous wait\n   * operation against the timer. Handlers are cancelled in FIFO order. The\n   * handler for the cancelled operation will be invoked with the\n   * asio::error::operation_aborted error code.\n   *\n   * Cancelling the timer does not change the expiry time.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled. That is,\n   * either 0 or 1.\n   *\n   * @note If the timer has already expired when cancel_one() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t cancel_one(asio::error_code& ec)\n  {\n    return impl_.get_service().cancel_one(impl_.get_implementation(), ec);\n  }\n\n  /// (Deprecated: Use expiry().) Get the timer's expiry time as an absolute\n  /// time.\n  /**\n   * This function may be used to obtain the timer's current expiry time.\n   * Whether the timer has expired or not does not affect this value.\n   */\n  time_point expires_at() const\n  {\n    return impl_.get_service().expires_at(impl_.get_implementation());\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Get the timer's expiry time as an absolute time.\n  /**\n   * This function may be used to obtain the timer's current expiry time.\n   * Whether the timer has expired or not does not affect this value.\n   */\n  time_point expiry() const\n  {\n    return impl_.get_service().expiry(impl_.get_implementation());\n  }\n\n  /// Set the timer's expiry time as an absolute time.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when expires_at() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_at(const time_point& expiry_time)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().expires_at(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_at\");\n    return s;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Set the timer's expiry time as\n  /// an absolute time.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when expires_at() is called, then\n   * the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_at(const time_point& expiry_time,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().expires_at(\n        impl_.get_implementation(), expiry_time, ec);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Set the timer's expiry time relative to now.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when expires_after() is called,\n   * then the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_after(const duration& expiry_time)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().expires_after(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_after\");\n    return s;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use expiry().) Get the timer's expiry time relative to now.\n  /**\n   * This function may be used to obtain the timer's current expiry time.\n   * Whether the timer has expired or not does not affect this value.\n   */\n  duration expires_from_now() const\n  {\n    return impl_.get_service().expires_from_now(impl_.get_implementation());\n  }\n\n  /// (Deprecated: Use expires_after().) Set the timer's expiry time relative\n  /// to now.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note If the timer has already expired when expires_from_now() is called,\n   * then the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_from_now(const duration& expiry_time)\n  {\n    asio::error_code ec;\n    std::size_t s = impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n    asio::detail::throw_error(ec, \"expires_from_now\");\n    return s;\n  }\n\n  /// (Deprecated: Use expires_after().) Set the timer's expiry time relative\n  /// to now.\n  /**\n   * This function sets the expiry time. Any pending asynchronous wait\n   * operations will be cancelled. The handler for each cancelled operation will\n   * be invoked with the asio::error::operation_aborted error code.\n   *\n   * @param expiry_time The expiry time to be used for the timer.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of asynchronous operations that were cancelled.\n   *\n   * @note If the timer has already expired when expires_from_now() is called,\n   * then the handlers for asynchronous wait operations will:\n   *\n   * @li have already been invoked; or\n   *\n   * @li have been queued for invocation in the near future.\n   *\n   * These handlers can no longer be cancelled, and therefore are passed an\n   * error code that indicates the successful completion of the wait operation.\n   */\n  std::size_t expires_from_now(const duration& expiry_time,\n      asio::error_code& ec)\n  {\n    return impl_.get_service().expires_from_now(\n        impl_.get_implementation(), expiry_time, ec);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Perform a blocking wait on the timer.\n  /**\n   * This function is used to wait for the timer to expire. This function\n   * blocks and does not return until the timer has expired.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void wait()\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Perform a blocking wait on the timer.\n  /**\n   * This function is used to wait for the timer to expire. This function\n   * blocks and does not return until the timer has expired.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  void wait(asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n  }\n\n  /// Start an asynchronous wait on the timer.\n  /**\n   * This function may be used to initiate an asynchronous wait against the\n   * timer. It always returns immediately.\n   *\n   * For each call to async_wait(), the supplied handler will be called exactly\n   * once. The handler will be called when:\n   *\n   * @li The timer has expired.\n   *\n   * @li The timer was cancelled, in which case the handler is passed the error\n   * code asio::error::operation_aborted.\n   *\n   * @param handler The handler to be called when the timer expires. Copies\n   * will be made of the handler as required. The function signature of the\n   * handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_waitable_timer(const basic_waitable_timer&) ASIO_DELETED;\n  basic_waitable_timer& operator=(\n      const basic_waitable_timer&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_waitable_timer* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_waitable_timer* self_;\n  };\n\n  detail::io_object_impl<\n    detail::deadline_timer_service<\n      detail::chrono_time_traits<Clock, WaitTraits> >,\n    executor_type > impl_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BASIC_WAITABLE_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/bind_executor.hpp",
    "content": "//\n// bind_executor.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BIND_EXECUTOR_HPP\n#define ASIO_BIND_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/uses_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct executor_binder_check\n{\n  typedef void type;\n};\n\n// Helper to automatically define nested typedef result_type.\n\ntemplate <typename T, typename = void>\nstruct executor_binder_result_type\n{\nprotected:\n  typedef void result_type_or_void;\n};\n\ntemplate <typename T>\nstruct executor_binder_result_type<T,\n  typename executor_binder_check<typename T::result_type>::type>\n{\n  typedef typename T::result_type result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R>\nstruct executor_binder_result_type<R(*)()>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R>\nstruct executor_binder_result_type<R(&)()>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R, typename A1>\nstruct executor_binder_result_type<R(*)(A1)>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R, typename A1>\nstruct executor_binder_result_type<R(&)(A1)>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R, typename A1, typename A2>\nstruct executor_binder_result_type<R(*)(A1, A2)>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\ntemplate <typename R, typename A1, typename A2>\nstruct executor_binder_result_type<R(&)(A1, A2)>\n{\n  typedef R result_type;\nprotected:\n  typedef result_type result_type_or_void;\n};\n\n// Helper to automatically define nested typedef argument_type.\n\ntemplate <typename T, typename = void>\nstruct executor_binder_argument_type {};\n\ntemplate <typename T>\nstruct executor_binder_argument_type<T,\n  typename executor_binder_check<typename T::argument_type>::type>\n{\n  typedef typename T::argument_type argument_type;\n};\n\ntemplate <typename R, typename A1>\nstruct executor_binder_argument_type<R(*)(A1)>\n{\n  typedef A1 argument_type;\n};\n\ntemplate <typename R, typename A1>\nstruct executor_binder_argument_type<R(&)(A1)>\n{\n  typedef A1 argument_type;\n};\n\n// Helper to automatically define nested typedefs first_argument_type and\n// second_argument_type.\n\ntemplate <typename T, typename = void>\nstruct executor_binder_argument_types {};\n\ntemplate <typename T>\nstruct executor_binder_argument_types<T,\n  typename executor_binder_check<typename T::first_argument_type>::type>\n{\n  typedef typename T::first_argument_type first_argument_type;\n  typedef typename T::second_argument_type second_argument_type;\n};\n\ntemplate <typename R, typename A1, typename A2>\nstruct executor_binder_argument_type<R(*)(A1, A2)>\n{\n  typedef A1 first_argument_type;\n  typedef A2 second_argument_type;\n};\n\ntemplate <typename R, typename A1, typename A2>\nstruct executor_binder_argument_type<R(&)(A1, A2)>\n{\n  typedef A1 first_argument_type;\n  typedef A2 second_argument_type;\n};\n\n// Helper to:\n// - Apply the empty base optimisation to the executor.\n// - Perform uses_executor construction of the target type, if required.\n\ntemplate <typename T, typename Executor, bool UsesExecutor>\nclass executor_binder_base;\n\ntemplate <typename T, typename Executor>\nclass executor_binder_base<T, Executor, true>\n  : protected Executor\n{\nprotected:\n  template <typename E, typename U>\n  executor_binder_base(ASIO_MOVE_ARG(E) e, ASIO_MOVE_ARG(U) u)\n    : executor_(ASIO_MOVE_CAST(E)(e)),\n      target_(executor_arg_t(), executor_, ASIO_MOVE_CAST(U)(u))\n  {\n  }\n\n  Executor executor_;\n  T target_;\n};\n\ntemplate <typename T, typename Executor>\nclass executor_binder_base<T, Executor, false>\n{\nprotected:\n  template <typename E, typename U>\n  executor_binder_base(ASIO_MOVE_ARG(E) e, ASIO_MOVE_ARG(U) u)\n    : executor_(ASIO_MOVE_CAST(E)(e)),\n      target_(ASIO_MOVE_CAST(U)(u))\n  {\n  }\n\n  Executor executor_;\n  T target_;\n};\n\n// Helper to enable SFINAE on zero-argument operator() below.\n\ntemplate <typename T, typename = void>\nstruct executor_binder_result_of0\n{\n  typedef void type;\n};\n\ntemplate <typename T>\nstruct executor_binder_result_of0<T,\n  typename executor_binder_check<typename result_of<T()>::type>::type>\n{\n  typedef typename result_of<T()>::type type;\n};\n\n} // namespace detail\n\n/// A call wrapper type to bind an executor of type @c Executor to an object of\n/// type @c T.\ntemplate <typename T, typename Executor>\nclass executor_binder\n#if !defined(GENERATING_DOCUMENTATION)\n  : public detail::executor_binder_result_type<T>,\n    public detail::executor_binder_argument_type<T>,\n    public detail::executor_binder_argument_types<T>,\n    private detail::executor_binder_base<\n      T, Executor, uses_executor<T, Executor>::value>\n#endif // !defined(GENERATING_DOCUMENTATION)\n{\npublic:\n  /// The type of the target object.\n  typedef T target_type;\n\n  /// The type of the associated executor.\n  typedef Executor executor_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// The return type if a function.\n  /**\n   * The type of @c result_type is based on the type @c T of the wrapper's\n   * target object:\n   *\n   * @li if @c T is a pointer to function type, @c result_type is a synonym for\n   * the return type of @c T;\n   *\n   * @li if @c T is a class type with a member type @c result_type, then @c\n   * result_type is a synonym for @c T::result_type;\n   *\n   * @li otherwise @c result_type is not defined.\n   */\n  typedef see_below result_type;\n\n  /// The type of the function's argument.\n  /**\n   * The type of @c argument_type is based on the type @c T of the wrapper's\n   * target object:\n   *\n   * @li if @c T is a pointer to a function type accepting a single argument,\n   * @c argument_type is a synonym for the return type of @c T;\n   *\n   * @li if @c T is a class type with a member type @c argument_type, then @c\n   * argument_type is a synonym for @c T::argument_type;\n   *\n   * @li otherwise @c argument_type is not defined.\n   */\n  typedef see_below argument_type;\n\n  /// The type of the function's first argument.\n  /**\n   * The type of @c first_argument_type is based on the type @c T of the\n   * wrapper's target object:\n   *\n   * @li if @c T is a pointer to a function type accepting two arguments, @c\n   * first_argument_type is a synonym for the return type of @c T;\n   *\n   * @li if @c T is a class type with a member type @c first_argument_type,\n   * then @c first_argument_type is a synonym for @c T::first_argument_type;\n   *\n   * @li otherwise @c first_argument_type is not defined.\n   */\n  typedef see_below first_argument_type;\n\n  /// The type of the function's second argument.\n  /**\n   * The type of @c second_argument_type is based on the type @c T of the\n   * wrapper's target object:\n   *\n   * @li if @c T is a pointer to a function type accepting two arguments, @c\n   * second_argument_type is a synonym for the return type of @c T;\n   *\n   * @li if @c T is a class type with a member type @c first_argument_type,\n   * then @c second_argument_type is a synonym for @c T::second_argument_type;\n   *\n   * @li otherwise @c second_argument_type is not defined.\n   */\n  typedef see_below second_argument_type;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n  /// Construct an executor wrapper for the specified object.\n  /**\n   * This constructor is only valid if the type @c T is constructible from type\n   * @c U.\n   */\n  template <typename U>\n  executor_binder(executor_arg_t, const executor_type& e,\n      ASIO_MOVE_ARG(U) u)\n    : base_type(e, ASIO_MOVE_CAST(U)(u))\n  {\n  }\n\n  /// Copy constructor.\n  executor_binder(const executor_binder& other)\n    : base_type(other.get_executor(), other.get())\n  {\n  }\n\n  /// Construct a copy, but specify a different executor.\n  executor_binder(executor_arg_t, const executor_type& e,\n      const executor_binder& other)\n    : base_type(e, other.get())\n  {\n  }\n\n  /// Construct a copy of a different executor wrapper type.\n  /**\n   * This constructor is only valid if the @c Executor type is constructible\n   * from type @c OtherExecutor, and the type @c T is constructible from type\n   * @c U.\n   */\n  template <typename U, typename OtherExecutor>\n  executor_binder(const executor_binder<U, OtherExecutor>& other)\n    : base_type(other.get_executor(), other.get())\n  {\n  }\n\n  /// Construct a copy of a different executor wrapper type, but specify a\n  /// different executor.\n  /**\n   * This constructor is only valid if the type @c T is constructible from type\n   * @c U.\n   */\n  template <typename U, typename OtherExecutor>\n  executor_binder(executor_arg_t, const executor_type& e,\n      const executor_binder<U, OtherExecutor>& other)\n    : base_type(e, other.get())\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Move constructor.\n  executor_binder(executor_binder&& other)\n    : base_type(ASIO_MOVE_CAST(executor_type)(other.get_executor()),\n        ASIO_MOVE_CAST(T)(other.get()))\n  {\n  }\n\n  /// Move construct the target object, but specify a different executor.\n  executor_binder(executor_arg_t, const executor_type& e,\n      executor_binder&& other)\n    : base_type(e, ASIO_MOVE_CAST(T)(other.get()))\n  {\n  }\n\n  /// Move construct from a different executor wrapper type.\n  template <typename U, typename OtherExecutor>\n  executor_binder(executor_binder<U, OtherExecutor>&& other)\n    : base_type(ASIO_MOVE_CAST(OtherExecutor)(other.get_executor()),\n        ASIO_MOVE_CAST(U)(other.get()))\n  {\n  }\n\n  /// Move construct from a different executor wrapper type, but specify a\n  /// different executor.\n  template <typename U, typename OtherExecutor>\n  executor_binder(executor_arg_t, const executor_type& e,\n      executor_binder<U, OtherExecutor>&& other)\n    : base_type(e, ASIO_MOVE_CAST(U)(other.get()))\n  {\n  }\n\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor.\n  ~executor_binder()\n  {\n  }\n\n  /// Obtain a reference to the target object.\n  target_type& get() ASIO_NOEXCEPT\n  {\n    return this->target_;\n  }\n\n  /// Obtain a reference to the target object.\n  const target_type& get() const ASIO_NOEXCEPT\n  {\n    return this->target_;\n  }\n\n  /// Obtain the associated executor.\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return this->executor_;\n  }\n\n#if defined(GENERATING_DOCUMENTATION)\n\n  template <typename... Args> auto operator()(Args&& ...);\n  template <typename... Args> auto operator()(Args&& ...) const;\n\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  /// Forwarding function call operator.\n  template <typename... Args>\n  typename result_of<T(Args...)>::type operator()(\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    return this->target_(ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n  /// Forwarding function call operator.\n  template <typename... Args>\n  typename result_of<T(Args...)>::type operator()(\n      ASIO_MOVE_ARG(Args)... args) const\n  {\n    return this->target_(ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#elif defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER)\n\n  typename detail::executor_binder_result_of0<T>::type operator()()\n  {\n    return this->target_();\n  }\n\n  typename detail::executor_binder_result_of0<T>::type operator()() const\n  {\n    return this->target_();\n  }\n\n#define ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  typename result_of<T(ASIO_VARIADIC_TARGS(n))>::type operator()( \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  typename result_of<T(ASIO_VARIADIC_TARGS(n))>::type operator()( \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) const \\\n  { \\\n    return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF)\n#undef ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF\n\n#else // defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER)\n\n  typedef typename detail::executor_binder_result_type<T>::result_type_or_void\n    result_type_or_void;\n\n  result_type_or_void operator()()\n  {\n    return this->target_();\n  }\n\n  result_type_or_void operator()() const\n  {\n    return this->target_();\n  }\n\n#define ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  result_type_or_void operator()( \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  result_type_or_void operator()( \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) const \\\n  { \\\n    return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF)\n#undef ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF\n\n#endif // defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER)\n\nprivate:\n  typedef detail::executor_binder_base<T, Executor,\n    uses_executor<T, Executor>::value> base_type;\n};\n\n/// Associate an object of type @c T with an executor of type @c Executor.\ntemplate <typename Executor, typename T>\ninline executor_binder<typename decay<T>::type, Executor>\nbind_executor(const Executor& ex, ASIO_MOVE_ARG(T) t,\n    typename enable_if<is_executor<Executor>::value>::type* = 0)\n{\n  return executor_binder<typename decay<T>::type, Executor>(\n      executor_arg_t(), ex, ASIO_MOVE_CAST(T)(t));\n}\n\n/// Associate an object of type @c T with an execution context's executor.\ntemplate <typename ExecutionContext, typename T>\ninline executor_binder<typename decay<T>::type,\n  typename ExecutionContext::executor_type>\nbind_executor(ExecutionContext& ctx, ASIO_MOVE_ARG(T) t,\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type* = 0)\n{\n  return executor_binder<typename decay<T>::type,\n    typename ExecutionContext::executor_type>(\n      executor_arg_t(), ctx.get_executor(), ASIO_MOVE_CAST(T)(t));\n}\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename T, typename Executor>\nstruct uses_executor<executor_binder<T, Executor>, Executor>\n  : true_type {};\n\ntemplate <typename T, typename Executor, typename Signature>\nclass async_result<executor_binder<T, Executor>, Signature>\n{\npublic:\n  typedef executor_binder<\n    typename async_result<T, Signature>::completion_handler_type, Executor>\n      completion_handler_type;\n\n  typedef typename async_result<T, Signature>::return_type return_type;\n\n  explicit async_result(executor_binder<T, Executor>& b)\n    : target_(b.get())\n  {\n  }\n\n  return_type get()\n  {\n    return target_.get();\n  }\n\nprivate:\n  async_result(const async_result&) ASIO_DELETED;\n  async_result& operator=(const async_result&) ASIO_DELETED;\n\n  async_result<T, Signature> target_;\n};\n\ntemplate <typename T, typename Executor, typename Allocator>\nstruct associated_allocator<executor_binder<T, Executor>, Allocator>\n{\n  typedef typename associated_allocator<T, Allocator>::type type;\n\n  static type get(const executor_binder<T, Executor>& b,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<T, Allocator>::get(b.get(), a);\n  }\n};\n\ntemplate <typename T, typename Executor, typename Executor1>\nstruct associated_executor<executor_binder<T, Executor>, Executor1>\n{\n  typedef Executor type;\n\n  static type get(const executor_binder<T, Executor>& b,\n      const Executor1& = Executor1()) ASIO_NOEXCEPT\n  {\n    return b.get_executor();\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BIND_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffer.hpp",
    "content": "//\n// buffer.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFER_HPP\n#define ASIO_BUFFER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <cstring>\n#include <limits>\n#include <stdexcept>\n#include <string>\n#include <vector>\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1700)\n# if defined(_HAS_ITERATOR_DEBUGGING) && (_HAS_ITERATOR_DEBUGGING != 0)\n#  if !defined(ASIO_DISABLE_BUFFER_DEBUGGING)\n#   define ASIO_ENABLE_BUFFER_DEBUGGING\n#  endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING)\n# endif // defined(_HAS_ITERATOR_DEBUGGING)\n#endif // defined(ASIO_MSVC) && (ASIO_MSVC >= 1700)\n\n#if defined(__GNUC__)\n# if defined(_GLIBCXX_DEBUG)\n#  if !defined(ASIO_DISABLE_BUFFER_DEBUGGING)\n#   define ASIO_ENABLE_BUFFER_DEBUGGING\n#  endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING)\n# endif // defined(_GLIBCXX_DEBUG)\n#endif // defined(__GNUC__)\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n# include \"asio/detail/functional.hpp\"\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n#if defined(ASIO_HAS_BOOST_WORKAROUND)\n# include <boost/detail/workaround.hpp>\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582)) \\\n    || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590))\n#  define ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND\n# endif // BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582))\n        // || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590))\n#endif // defined(ASIO_HAS_BOOST_WORKAROUND)\n\n#if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND)\n# include \"asio/detail/type_traits.hpp\"\n#endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass mutable_buffer;\nclass const_buffer;\n\n/// Holds a buffer that can be modified.\n/**\n * The mutable_buffer class provides a safe representation of a buffer that can\n * be modified. It does not own the underlying data, and so is cheap to copy or\n * assign.\n *\n * @par Accessing Buffer Contents\n *\n * The contents of a buffer may be accessed using the @c data() and @c size()\n * member functions:\n *\n * @code asio::mutable_buffer b1 = ...;\n * std::size_t s1 = b1.size();\n * unsigned char* p1 = static_cast<unsigned char*>(b1.data());\n * @endcode\n *\n * The @c data() member function permits violations of type safety, so uses of\n * it in application code should be carefully considered.\n */\nclass mutable_buffer\n{\npublic:\n  /// Construct an empty buffer.\n  mutable_buffer() ASIO_NOEXCEPT\n    : data_(0),\n      size_(0)\n  {\n  }\n\n  /// Construct a buffer to represent a given memory range.\n  mutable_buffer(void* data, std::size_t size) ASIO_NOEXCEPT\n    : data_(data),\n      size_(size)\n  {\n  }\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  mutable_buffer(void* data, std::size_t size,\n      asio::detail::function<void()> debug_check)\n    : data_(data),\n      size_(size),\n      debug_check_(debug_check)\n  {\n  }\n\n  const asio::detail::function<void()>& get_debug_check() const\n  {\n    return debug_check_;\n  }\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n  /// Get a pointer to the beginning of the memory range.\n  void* data() const ASIO_NOEXCEPT\n  {\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    if (size_ && debug_check_)\n      debug_check_();\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n    return data_;\n  }\n\n  /// Get the size of the memory range.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return size_;\n  }\n\n  /// Move the start of the buffer by the specified number of bytes.\n  mutable_buffer& operator+=(std::size_t n) ASIO_NOEXCEPT\n  {\n    std::size_t offset = n < size_ ? n : size_;\n    data_ = static_cast<char*>(data_) + offset;\n    size_ -= offset;\n    return *this;\n  }\n\nprivate:\n  void* data_;\n  std::size_t size_;\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  asio::detail::function<void()> debug_check_;\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n};\n\n#if !defined(ASIO_NO_DEPRECATED)\n\n/// (Deprecated: Use mutable_buffer.) Adapts a single modifiable buffer so that\n/// it meets the requirements of the MutableBufferSequence concept.\nclass mutable_buffers_1\n  : public mutable_buffer\n{\npublic:\n  /// The type for each element in the list of buffers.\n  typedef mutable_buffer value_type;\n\n  /// A random-access iterator type that may be used to read elements.\n  typedef const mutable_buffer* const_iterator;\n\n  /// Construct to represent a given memory range.\n  mutable_buffers_1(void* data, std::size_t size) ASIO_NOEXCEPT\n    : mutable_buffer(data, size)\n  {\n  }\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  mutable_buffers_1(void* data, std::size_t size,\n      asio::detail::function<void()> debug_check)\n    : mutable_buffer(data, size, debug_check)\n  {\n  }\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n  /// Construct to represent a single modifiable buffer.\n  explicit mutable_buffers_1(const mutable_buffer& b) ASIO_NOEXCEPT\n    : mutable_buffer(b)\n  {\n  }\n\n  /// Get a random-access iterator to the first element.\n  const_iterator begin() const ASIO_NOEXCEPT\n  {\n    return this;\n  }\n\n  /// Get a random-access iterator for one past the last element.\n  const_iterator end() const ASIO_NOEXCEPT\n  {\n    return begin() + 1;\n  }\n};\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Holds a buffer that cannot be modified.\n/**\n * The const_buffer class provides a safe representation of a buffer that cannot\n * be modified. It does not own the underlying data, and so is cheap to copy or\n * assign.\n *\n * @par Accessing Buffer Contents\n *\n * The contents of a buffer may be accessed using the @c data() and @c size()\n * member functions:\n *\n * @code asio::const_buffer b1 = ...;\n * std::size_t s1 = b1.size();\n * const unsigned char* p1 = static_cast<const unsigned char*>(b1.data());\n * @endcode\n *\n * The @c data() member function permits violations of type safety, so uses of\n * it in application code should be carefully considered.\n */\nclass const_buffer\n{\npublic:\n  /// Construct an empty buffer.\n  const_buffer() ASIO_NOEXCEPT\n    : data_(0),\n      size_(0)\n  {\n  }\n\n  /// Construct a buffer to represent a given memory range.\n  const_buffer(const void* data, std::size_t size) ASIO_NOEXCEPT\n    : data_(data),\n      size_(size)\n  {\n  }\n\n  /// Construct a non-modifiable buffer from a modifiable one.\n  const_buffer(const mutable_buffer& b) ASIO_NOEXCEPT\n    : data_(b.data()),\n      size_(b.size())\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , debug_check_(b.get_debug_check())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n  {\n  }\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  const_buffer(const void* data, std::size_t size,\n      asio::detail::function<void()> debug_check)\n    : data_(data),\n      size_(size),\n      debug_check_(debug_check)\n  {\n  }\n\n  const asio::detail::function<void()>& get_debug_check() const\n  {\n    return debug_check_;\n  }\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n  /// Get a pointer to the beginning of the memory range.\n  const void* data() const ASIO_NOEXCEPT\n  {\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    if (size_ && debug_check_)\n      debug_check_();\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n    return data_;\n  }\n\n  /// Get the size of the memory range.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return size_;\n  }\n\n  /// Move the start of the buffer by the specified number of bytes.\n  const_buffer& operator+=(std::size_t n) ASIO_NOEXCEPT\n  {\n    std::size_t offset = n < size_ ? n : size_;\n    data_ = static_cast<const char*>(data_) + offset;\n    size_ -= offset;\n    return *this;\n  }\n\nprivate:\n  const void* data_;\n  std::size_t size_;\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  asio::detail::function<void()> debug_check_;\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n};\n\n#if !defined(ASIO_NO_DEPRECATED)\n\n/// (Deprecated: Use const_buffer.) Adapts a single non-modifiable buffer so\n/// that it meets the requirements of the ConstBufferSequence concept.\nclass const_buffers_1\n  : public const_buffer\n{\npublic:\n  /// The type for each element in the list of buffers.\n  typedef const_buffer value_type;\n\n  /// A random-access iterator type that may be used to read elements.\n  typedef const const_buffer* const_iterator;\n\n  /// Construct to represent a given memory range.\n  const_buffers_1(const void* data, std::size_t size) ASIO_NOEXCEPT\n    : const_buffer(data, size)\n  {\n  }\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n  const_buffers_1(const void* data, std::size_t size,\n      asio::detail::function<void()> debug_check)\n    : const_buffer(data, size, debug_check)\n  {\n  }\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n  /// Construct to represent a single non-modifiable buffer.\n  explicit const_buffers_1(const const_buffer& b) ASIO_NOEXCEPT\n    : const_buffer(b)\n  {\n  }\n\n  /// Get a random-access iterator to the first element.\n  const_iterator begin() const ASIO_NOEXCEPT\n  {\n    return this;\n  }\n\n  /// Get a random-access iterator for one past the last element.\n  const_iterator end() const ASIO_NOEXCEPT\n  {\n    return begin() + 1;\n  }\n};\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// (Deprecated: Use the socket/descriptor wait() and async_wait() member\n/// functions.) An implementation of both the ConstBufferSequence and\n/// MutableBufferSequence concepts to represent a null buffer sequence.\nclass null_buffers\n{\npublic:\n  /// The type for each element in the list of buffers.\n  typedef mutable_buffer value_type;\n\n  /// A random-access iterator type that may be used to read elements.\n  typedef const mutable_buffer* const_iterator;\n\n  /// Get a random-access iterator to the first element.\n  const_iterator begin() const ASIO_NOEXCEPT\n  {\n    return &buf_;\n  }\n\n  /// Get a random-access iterator for one past the last element.\n  const_iterator end() const ASIO_NOEXCEPT\n  {\n    return &buf_;\n  }\n\nprivate:\n  mutable_buffer buf_;\n};\n\n/** @defgroup buffer_sequence_begin asio::buffer_sequence_begin\n *\n * @brief The asio::buffer_sequence_begin function returns an iterator\n * pointing to the first element in a buffer sequence.\n */\n/*@{*/\n\n/// Get an iterator to the first element in a buffer sequence.\ntemplate <typename MutableBuffer>\ninline const mutable_buffer* buffer_sequence_begin(const MutableBuffer& b,\n    typename enable_if<\n      is_convertible<const MutableBuffer*, const mutable_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return static_cast<const mutable_buffer*>(detail::addressof(b));\n}\n\n/// Get an iterator to the first element in a buffer sequence.\ntemplate <typename ConstBuffer>\ninline const const_buffer* buffer_sequence_begin(const ConstBuffer& b,\n    typename enable_if<\n      is_convertible<const ConstBuffer*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return static_cast<const const_buffer*>(detail::addressof(b));\n}\n\n#if defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\n/// Get an iterator to the first element in a buffer sequence.\ntemplate <typename C>\ninline auto buffer_sequence_begin(C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT -> decltype(c.begin())\n{\n  return c.begin();\n}\n\n/// Get an iterator to the first element in a buffer sequence.\ntemplate <typename C>\ninline auto buffer_sequence_begin(const C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT -> decltype(c.begin())\n{\n  return c.begin();\n}\n\n#else // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename C>\ninline typename C::iterator buffer_sequence_begin(C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return c.begin();\n}\n\ntemplate <typename C>\ninline typename C::const_iterator buffer_sequence_begin(const C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return c.begin();\n}\n\n#endif // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\n/*@}*/\n\n/** @defgroup buffer_sequence_end asio::buffer_sequence_end\n *\n * @brief The asio::buffer_sequence_end function returns an iterator\n * pointing to one past the end element in a buffer sequence.\n */\n/*@{*/\n\n/// Get an iterator to one past the end element in a buffer sequence.\ntemplate <typename MutableBuffer>\ninline const mutable_buffer* buffer_sequence_end(const MutableBuffer& b,\n    typename enable_if<\n      is_convertible<const MutableBuffer*, const mutable_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return static_cast<const mutable_buffer*>(detail::addressof(b)) + 1;\n}\n\n/// Get an iterator to one past the end element in a buffer sequence.\ntemplate <typename ConstBuffer>\ninline const const_buffer* buffer_sequence_end(const ConstBuffer& b,\n    typename enable_if<\n      is_convertible<const ConstBuffer*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return static_cast<const const_buffer*>(detail::addressof(b)) + 1;\n}\n\n#if defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\n/// Get an iterator to one past the end element in a buffer sequence.\ntemplate <typename C>\ninline auto buffer_sequence_end(C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT -> decltype(c.end())\n{\n  return c.end();\n}\n\n/// Get an iterator to one past the end element in a buffer sequence.\ntemplate <typename C>\ninline auto buffer_sequence_end(const C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT -> decltype(c.end())\n{\n  return c.end();\n}\n\n#else // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename C>\ninline typename C::iterator buffer_sequence_end(C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return c.end();\n}\n\ntemplate <typename C>\ninline typename C::const_iterator buffer_sequence_end(const C& c,\n    typename enable_if<\n      !is_convertible<const C*, const mutable_buffer*>::value\n        && !is_convertible<const C*, const const_buffer*>::value\n    >::type* = 0) ASIO_NOEXCEPT\n{\n  return c.end();\n}\n\n#endif // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION)\n\n/*@}*/\n\nnamespace detail {\n\n// Tag types used to select appropriately optimised overloads.\nstruct one_buffer {};\nstruct multiple_buffers {};\n\n// Helper trait to detect single buffers.\ntemplate <typename BufferSequence>\nstruct buffer_sequence_cardinality :\n  conditional<\n    is_same<BufferSequence, mutable_buffer>::value\n#if !defined(ASIO_NO_DEPRECATED)\n      || is_same<BufferSequence, mutable_buffers_1>::value\n      || is_same<BufferSequence, const_buffers_1>::value\n#endif // !defined(ASIO_NO_DEPRECATED)\n      || is_same<BufferSequence, const_buffer>::value,\n    one_buffer, multiple_buffers>::type {};\n\ntemplate <typename Iterator>\ninline std::size_t buffer_size(one_buffer,\n    Iterator begin, Iterator) ASIO_NOEXCEPT\n{\n  return const_buffer(*begin).size();\n}\n\ntemplate <typename Iterator>\ninline std::size_t buffer_size(multiple_buffers,\n    Iterator begin, Iterator end) ASIO_NOEXCEPT\n{\n  std::size_t total_buffer_size = 0;\n\n  Iterator iter = begin;\n  for (; iter != end; ++iter)\n  {\n    const_buffer b(*iter);\n    total_buffer_size += b.size();\n  }\n\n  return total_buffer_size;\n}\n\n} // namespace detail\n\n/// Get the total number of bytes in a buffer sequence.\n/**\n * The @c buffer_size function determines the total size of all buffers in the\n * buffer sequence, as if computed as follows:\n *\n * @code size_t total_size = 0;\n * auto i = asio::buffer_sequence_begin(buffers);\n * auto end = asio::buffer_sequence_end(buffers);\n * for (; i != end; ++i)\n * {\n *   const_buffer b(*i);\n *   total_size += b.size();\n * }\n * return total_size; @endcode\n *\n * The @c BufferSequence template parameter may meet either of the @c\n * ConstBufferSequence or @c MutableBufferSequence type requirements.\n */\ntemplate <typename BufferSequence>\ninline std::size_t buffer_size(const BufferSequence& b) ASIO_NOEXCEPT\n{\n  return detail::buffer_size(\n      detail::buffer_sequence_cardinality<BufferSequence>(),\n      asio::buffer_sequence_begin(b),\n      asio::buffer_sequence_end(b));\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\n\n/** @defgroup buffer_cast asio::buffer_cast\n *\n * @brief (Deprecated: Use the @c data() member function.) The\n * asio::buffer_cast function is used to obtain a pointer to the\n * underlying memory region associated with a buffer.\n *\n * @par Examples:\n *\n * To access the memory of a non-modifiable buffer, use:\n * @code asio::const_buffer b1 = ...;\n * const unsigned char* p1 = asio::buffer_cast<const unsigned char*>(b1);\n * @endcode\n *\n * To access the memory of a modifiable buffer, use:\n * @code asio::mutable_buffer b2 = ...;\n * unsigned char* p2 = asio::buffer_cast<unsigned char*>(b2);\n * @endcode\n *\n * The asio::buffer_cast function permits violations of type safety, so\n * uses of it in application code should be carefully considered.\n */\n/*@{*/\n\n/// Cast a non-modifiable buffer to a specified pointer to POD type.\ntemplate <typename PointerToPodType>\ninline PointerToPodType buffer_cast(const mutable_buffer& b) ASIO_NOEXCEPT\n{\n  return static_cast<PointerToPodType>(b.data());\n}\n\n/// Cast a non-modifiable buffer to a specified pointer to POD type.\ntemplate <typename PointerToPodType>\ninline PointerToPodType buffer_cast(const const_buffer& b) ASIO_NOEXCEPT\n{\n  return static_cast<PointerToPodType>(b.data());\n}\n\n/*@}*/\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Create a new modifiable buffer that is offset from the start of another.\n/**\n * @relates mutable_buffer\n */\ninline mutable_buffer operator+(const mutable_buffer& b,\n    std::size_t n) ASIO_NOEXCEPT\n{\n  std::size_t offset = n < b.size() ? n : b.size();\n  char* new_data = static_cast<char*>(b.data()) + offset;\n  std::size_t new_size = b.size() - offset;\n  return mutable_buffer(new_data, new_size\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , b.get_debug_check()\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new modifiable buffer that is offset from the start of another.\n/**\n * @relates mutable_buffer\n */\ninline mutable_buffer operator+(std::size_t n,\n    const mutable_buffer& b) ASIO_NOEXCEPT\n{\n  return b + n;\n}\n\n/// Create a new non-modifiable buffer that is offset from the start of another.\n/**\n * @relates const_buffer\n */\ninline const_buffer operator+(const const_buffer& b,\n    std::size_t n) ASIO_NOEXCEPT\n{\n  std::size_t offset = n < b.size() ? n : b.size();\n  const char* new_data = static_cast<const char*>(b.data()) + offset;\n  std::size_t new_size = b.size() - offset;\n  return const_buffer(new_data, new_size\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , b.get_debug_check()\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that is offset from the start of another.\n/**\n * @relates const_buffer\n */\ninline const_buffer operator+(std::size_t n,\n    const const_buffer& b) ASIO_NOEXCEPT\n{\n  return b + n;\n}\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\nnamespace detail {\n\ntemplate <typename Iterator>\nclass buffer_debug_check\n{\npublic:\n  buffer_debug_check(Iterator iter)\n    : iter_(iter)\n  {\n  }\n\n  ~buffer_debug_check()\n  {\n#if defined(ASIO_MSVC) && (ASIO_MSVC == 1400)\n    // MSVC 8's string iterator checking may crash in a std::string::iterator\n    // object's destructor when the iterator points to an already-destroyed\n    // std::string object, unless the iterator is cleared first.\n    iter_ = Iterator();\n#endif // defined(ASIO_MSVC) && (ASIO_MSVC == 1400)\n  }\n\n  void operator()()\n  {\n    (void)*iter_;\n  }\n\nprivate:\n  Iterator iter_;\n};\n\n} // namespace detail\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n\n/** @defgroup buffer asio::buffer\n *\n * @brief The asio::buffer function is used to create a buffer object to\n * represent raw memory, an array of POD elements, a vector of POD elements,\n * or a std::string.\n *\n * A buffer object represents a contiguous region of memory as a 2-tuple\n * consisting of a pointer and size in bytes. A tuple of the form <tt>{void*,\n * size_t}</tt> specifies a mutable (modifiable) region of memory. Similarly, a\n * tuple of the form <tt>{const void*, size_t}</tt> specifies a const\n * (non-modifiable) region of memory. These two forms correspond to the classes\n * mutable_buffer and const_buffer, respectively. To mirror C++'s conversion\n * rules, a mutable_buffer is implicitly convertible to a const_buffer, and the\n * opposite conversion is not permitted.\n *\n * The simplest use case involves reading or writing a single buffer of a\n * specified size:\n *\n * @code sock.send(asio::buffer(data, size)); @endcode\n *\n * In the above example, the return value of asio::buffer meets the\n * requirements of the ConstBufferSequence concept so that it may be directly\n * passed to the socket's write function. A buffer created for modifiable\n * memory also meets the requirements of the MutableBufferSequence concept.\n *\n * An individual buffer may be created from a builtin array, std::vector,\n * std::array or boost::array of POD elements. This helps prevent buffer\n * overruns by automatically determining the size of the buffer:\n *\n * @code char d1[128];\n * size_t bytes_transferred = sock.receive(asio::buffer(d1));\n *\n * std::vector<char> d2(128);\n * bytes_transferred = sock.receive(asio::buffer(d2));\n *\n * std::array<char, 128> d3;\n * bytes_transferred = sock.receive(asio::buffer(d3));\n *\n * boost::array<char, 128> d4;\n * bytes_transferred = sock.receive(asio::buffer(d4)); @endcode\n *\n * In all three cases above, the buffers created are exactly 128 bytes long.\n * Note that a vector is @e never automatically resized when creating or using\n * a buffer. The buffer size is determined using the vector's <tt>size()</tt>\n * member function, and not its capacity.\n *\n * @par Accessing Buffer Contents\n *\n * The contents of a buffer may be accessed using the @c data() and @c size()\n * member functions:\n *\n * @code asio::mutable_buffer b1 = ...;\n * std::size_t s1 = b1.size();\n * unsigned char* p1 = static_cast<unsigned char*>(b1.data());\n *\n * asio::const_buffer b2 = ...;\n * std::size_t s2 = b2.size();\n * const void* p2 = b2.data(); @endcode\n *\n * The @c data() member function permits violations of type safety, so\n * uses of it in application code should be carefully considered.\n *\n * For convenience, a @ref buffer_size function is provided that works with\n * both buffers and buffer sequences (that is, types meeting the\n * ConstBufferSequence or MutableBufferSequence type requirements). In this\n * case, the function returns the total size of all buffers in the sequence.\n *\n * @par Buffer Copying\n *\n * The @ref buffer_copy function may be used to copy raw bytes between\n * individual buffers and buffer sequences.\n*\n * In particular, when used with the @ref buffer_size function, the @ref\n * buffer_copy function can be used to linearise a sequence of buffers. For\n * example:\n *\n * @code vector<const_buffer> buffers = ...;\n *\n * vector<unsigned char> data(asio::buffer_size(buffers));\n * asio::buffer_copy(asio::buffer(data), buffers); @endcode\n *\n * Note that @ref buffer_copy is implemented in terms of @c memcpy, and\n * consequently it cannot be used to copy between overlapping memory regions.\n *\n * @par Buffer Invalidation\n *\n * A buffer object does not have any ownership of the memory it refers to. It\n * is the responsibility of the application to ensure the memory region remains\n * valid until it is no longer required for an I/O operation. When the memory\n * is no longer available, the buffer is said to have been invalidated.\n *\n * For the asio::buffer overloads that accept an argument of type\n * std::vector, the buffer objects returned are invalidated by any vector\n * operation that also invalidates all references, pointers and iterators\n * referring to the elements in the sequence (C++ Std, 23.2.4)\n *\n * For the asio::buffer overloads that accept an argument of type\n * std::basic_string, the buffer objects returned are invalidated according to\n * the rules defined for invalidation of references, pointers and iterators\n * referring to elements of the sequence (C++ Std, 21.3).\n *\n * @par Buffer Arithmetic\n *\n * Buffer objects may be manipulated using simple arithmetic in a safe way\n * which helps prevent buffer overruns. Consider an array initialised as\n * follows:\n *\n * @code boost::array<char, 6> a = { 'a', 'b', 'c', 'd', 'e' }; @endcode\n *\n * A buffer object @c b1 created using:\n *\n * @code b1 = asio::buffer(a); @endcode\n *\n * represents the entire array, <tt>{ 'a', 'b', 'c', 'd', 'e' }</tt>. An\n * optional second argument to the asio::buffer function may be used to\n * limit the size, in bytes, of the buffer:\n *\n * @code b2 = asio::buffer(a, 3); @endcode\n *\n * such that @c b2 represents the data <tt>{ 'a', 'b', 'c' }</tt>. Even if the\n * size argument exceeds the actual size of the array, the size of the buffer\n * object created will be limited to the array size.\n *\n * An offset may be applied to an existing buffer to create a new one:\n *\n * @code b3 = b1 + 2; @endcode\n *\n * where @c b3 will set to represent <tt>{ 'c', 'd', 'e' }</tt>. If the offset\n * exceeds the size of the existing buffer, the newly created buffer will be\n * empty.\n *\n * Both an offset and size may be specified to create a buffer that corresponds\n * to a specific range of bytes within an existing buffer:\n *\n * @code b4 = asio::buffer(b1 + 1, 3); @endcode\n *\n * so that @c b4 will refer to the bytes <tt>{ 'b', 'c', 'd' }</tt>.\n *\n * @par Buffers and Scatter-Gather I/O\n *\n * To read or write using multiple buffers (i.e. scatter-gather I/O), multiple\n * buffer objects may be assigned into a container that supports the\n * MutableBufferSequence (for read) or ConstBufferSequence (for write) concepts:\n *\n * @code\n * char d1[128];\n * std::vector<char> d2(128);\n * boost::array<char, 128> d3;\n *\n * boost::array<mutable_buffer, 3> bufs1 = {\n *   asio::buffer(d1),\n *   asio::buffer(d2),\n *   asio::buffer(d3) };\n * bytes_transferred = sock.receive(bufs1);\n *\n * std::vector<const_buffer> bufs2;\n * bufs2.push_back(asio::buffer(d1));\n * bufs2.push_back(asio::buffer(d2));\n * bufs2.push_back(asio::buffer(d3));\n * bytes_transferred = sock.send(bufs2); @endcode\n */\n/*@{*/\n\n#if defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n# define ASIO_MUTABLE_BUFFER mutable_buffer\n# define ASIO_CONST_BUFFER const_buffer\n#else // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n# define ASIO_MUTABLE_BUFFER mutable_buffers_1\n# define ASIO_CONST_BUFFER const_buffers_1\n#endif // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION)\n\n/// Create a new modifiable buffer from an existing buffer.\n/**\n * @returns <tt>mutable_buffer(b)</tt>.\n */\ninline ASIO_MUTABLE_BUFFER buffer(\n    const mutable_buffer& b) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(b);\n}\n\n/// Create a new modifiable buffer from an existing buffer.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     b.data(),\n *     min(b.size(), max_size_in_bytes)); @endcode\n */\ninline ASIO_MUTABLE_BUFFER buffer(const mutable_buffer& b,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(\n      mutable_buffer(b.data(),\n        b.size() < max_size_in_bytes\n        ? b.size() : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n        , b.get_debug_check()\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n        ));\n}\n\n/// Create a new non-modifiable buffer from an existing buffer.\n/**\n * @returns <tt>const_buffer(b)</tt>.\n */\ninline ASIO_CONST_BUFFER buffer(\n    const const_buffer& b) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(b);\n}\n\n/// Create a new non-modifiable buffer from an existing buffer.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     b.data(),\n *     min(b.size(), max_size_in_bytes)); @endcode\n */\ninline ASIO_CONST_BUFFER buffer(const const_buffer& b,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(b.data(),\n      b.size() < max_size_in_bytes\n      ? b.size() : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , b.get_debug_check()\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new modifiable buffer that represents the given memory range.\n/**\n * @returns <tt>mutable_buffer(data, size_in_bytes)</tt>.\n */\ninline ASIO_MUTABLE_BUFFER buffer(void* data,\n    std::size_t size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data, size_in_bytes);\n}\n\n/// Create a new non-modifiable buffer that represents the given memory range.\n/**\n * @returns <tt>const_buffer(data, size_in_bytes)</tt>.\n */\ninline ASIO_CONST_BUFFER buffer(const void* data,\n    std::size_t size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data, size_in_bytes);\n}\n\n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     static_cast<void*>(data),\n *     N * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(PodType (&data)[N]) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data, N * sizeof(PodType));\n}\n \n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     static_cast<void*>(data),\n *     min(N * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(PodType (&data)[N],\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data,\n      N * sizeof(PodType) < max_size_in_bytes\n      ? N * sizeof(PodType) : max_size_in_bytes);\n}\n \n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     static_cast<const void*>(data),\n *     N * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(\n    const PodType (&data)[N]) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data, N * sizeof(PodType));\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     static_cast<const void*>(data),\n *     min(N * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(const PodType (&data)[N],\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data,\n      N * sizeof(PodType) < max_size_in_bytes\n      ? N * sizeof(PodType) : max_size_in_bytes);\n}\n\n#if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND)\n\n// Borland C++ and Sun Studio think the overloads:\n//\n//   unspecified buffer(boost::array<PodType, N>& array ...);\n//\n// and\n//\n//   unspecified buffer(boost::array<const PodType, N>& array ...);\n//\n// are ambiguous. This will be worked around by using a buffer_types traits\n// class that contains typedefs for the appropriate buffer and container\n// classes, based on whether PodType is const or non-const.\n\nnamespace detail {\n\ntemplate <bool IsConst>\nstruct buffer_types_base;\n\ntemplate <>\nstruct buffer_types_base<false>\n{\n  typedef mutable_buffer buffer_type;\n  typedef ASIO_MUTABLE_BUFFER container_type;\n};\n\ntemplate <>\nstruct buffer_types_base<true>\n{\n  typedef const_buffer buffer_type;\n  typedef ASIO_CONST_BUFFER container_type;\n};\n\ntemplate <typename PodType>\nstruct buffer_types\n  : public buffer_types_base<is_const<PodType>::value>\n{\n};\n\n} // namespace detail\n\ntemplate <typename PodType, std::size_t N>\ninline typename detail::buffer_types<PodType>::container_type\nbuffer(boost::array<PodType, N>& data) ASIO_NOEXCEPT\n{\n  typedef typename asio::detail::buffer_types<PodType>::buffer_type\n    buffer_type;\n  typedef typename asio::detail::buffer_types<PodType>::container_type\n    container_type;\n  return container_type(\n      buffer_type(data.c_array(), data.size() * sizeof(PodType)));\n}\n\ntemplate <typename PodType, std::size_t N>\ninline typename detail::buffer_types<PodType>::container_type\nbuffer(boost::array<PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  typedef typename asio::detail::buffer_types<PodType>::buffer_type\n    buffer_type;\n  typedef typename asio::detail::buffer_types<PodType>::container_type\n    container_type;\n  return container_type(\n      buffer_type(data.c_array(),\n        data.size() * sizeof(PodType) < max_size_in_bytes\n        ? data.size() * sizeof(PodType) : max_size_in_bytes));\n}\n\n#else // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND)\n\n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(\n    boost::array<PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(\n      data.c_array(), data.size() * sizeof(PodType));\n}\n\n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(boost::array<PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.c_array(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(\n    boost::array<const PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType));\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(boost::array<const PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n#endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND)\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(\n    const boost::array<PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType));\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(const boost::array<PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n#if defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION)\n\n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(\n    std::array<PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.data(), data.size() * sizeof(PodType));\n}\n\n/// Create a new modifiable buffer that represents the given POD array.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_MUTABLE_BUFFER buffer(std::array<PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.data(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(\n    std::array<const PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType));\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(std::array<const PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     data.size() * sizeof(PodType)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(\n    const std::array<PodType, N>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType));\n}\n\n/// Create a new non-modifiable buffer that represents the given POD array.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n */\ntemplate <typename PodType, std::size_t N>\ninline ASIO_CONST_BUFFER buffer(const std::array<PodType, N>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(),\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes);\n}\n\n#endif // defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION)\n\n/// Create a new modifiable buffer that represents the given POD vector.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.size() ? &data[0] : 0,\n *     data.size() * sizeof(PodType)); @endcode\n *\n * @note The buffer is invalidated by any vector operation that would also\n * invalidate iterators.\n */\ntemplate <typename PodType, typename Allocator>\ninline ASIO_MUTABLE_BUFFER buffer(\n    std::vector<PodType, Allocator>& data) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(\n      data.size() ? &data[0] : 0, data.size() * sizeof(PodType)\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::vector<PodType, Allocator>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new modifiable buffer that represents the given POD vector.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.size() ? &data[0] : 0,\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n *\n * @note The buffer is invalidated by any vector operation that would also\n * invalidate iterators.\n */\ntemplate <typename PodType, typename Allocator>\ninline ASIO_MUTABLE_BUFFER buffer(std::vector<PodType, Allocator>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::vector<PodType, Allocator>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that represents the given POD vector.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.size() ? &data[0] : 0,\n *     data.size() * sizeof(PodType)); @endcode\n *\n * @note The buffer is invalidated by any vector operation that would also\n * invalidate iterators.\n */\ntemplate <typename PodType, typename Allocator>\ninline ASIO_CONST_BUFFER buffer(\n    const std::vector<PodType, Allocator>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(\n      data.size() ? &data[0] : 0, data.size() * sizeof(PodType)\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::vector<PodType, Allocator>::const_iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that represents the given POD vector.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.size() ? &data[0] : 0,\n *     min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode\n *\n * @note The buffer is invalidated by any vector operation that would also\n * invalidate iterators.\n */\ntemplate <typename PodType, typename Allocator>\ninline ASIO_CONST_BUFFER buffer(\n    const std::vector<PodType, Allocator>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(PodType) < max_size_in_bytes\n      ? data.size() * sizeof(PodType) : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::vector<PodType, Allocator>::const_iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new modifiable buffer that represents the given string.\n/**\n * @returns <tt>mutable_buffer(data.size() ? &data[0] : 0,\n * data.size() * sizeof(Elem))</tt>.\n *\n * @note The buffer is invalidated by any non-const operation called on the\n * given string object.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline ASIO_MUTABLE_BUFFER buffer(\n    std::basic_string<Elem, Traits, Allocator>& data) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(Elem)\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::basic_string<Elem, Traits, Allocator>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new modifiable buffer that represents the given string.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.size() ? &data[0] : 0,\n *     min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode\n *\n * @note The buffer is invalidated by any non-const operation called on the\n * given string object.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline ASIO_MUTABLE_BUFFER buffer(\n    std::basic_string<Elem, Traits, Allocator>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(Elem) < max_size_in_bytes\n      ? data.size() * sizeof(Elem) : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::basic_string<Elem, Traits, Allocator>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that represents the given string.\n/**\n * @returns <tt>const_buffer(data.data(), data.size() * sizeof(Elem))</tt>.\n *\n * @note The buffer is invalidated by any non-const operation called on the\n * given string object.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline ASIO_CONST_BUFFER buffer(\n    const std::basic_string<Elem, Traits, Allocator>& data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(Elem)\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::basic_string<Elem, Traits, Allocator>::const_iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that represents the given string.\n/**\n * @returns A const_buffer value equivalent to:\n * @code const_buffer(\n *     data.data(),\n *     min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode\n *\n * @note The buffer is invalidated by any non-const operation called on the\n * given string object.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline ASIO_CONST_BUFFER buffer(\n    const std::basic_string<Elem, Traits, Allocator>& data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.data(),\n      data.size() * sizeof(Elem) < max_size_in_bytes\n      ? data.size() * sizeof(Elem) : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename std::basic_string<Elem, Traits, Allocator>::const_iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create a new modifiable buffer that represents the given string_view.\n/**\n * @returns <tt>mutable_buffer(data.size() ? &data[0] : 0,\n * data.size() * sizeof(Elem))</tt>.\n */\ntemplate <typename Elem, typename Traits>\ninline ASIO_CONST_BUFFER buffer(\n    basic_string_view<Elem, Traits> data) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(Elem)\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename basic_string_view<Elem, Traits>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n/// Create a new non-modifiable buffer that represents the given string.\n/**\n * @returns A mutable_buffer value equivalent to:\n * @code mutable_buffer(\n *     data.size() ? &data[0] : 0,\n *     min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode\n */\ntemplate <typename Elem, typename Traits>\ninline ASIO_CONST_BUFFER buffer(\n    basic_string_view<Elem, Traits> data,\n    std::size_t max_size_in_bytes) ASIO_NOEXCEPT\n{\n  return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0,\n      data.size() * sizeof(Elem) < max_size_in_bytes\n      ? data.size() * sizeof(Elem) : max_size_in_bytes\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n      , detail::buffer_debug_check<\n          typename basic_string_view<Elem, Traits>::iterator\n        >(data.begin())\n#endif // ASIO_ENABLE_BUFFER_DEBUGGING\n      );\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n/*@}*/\n\n/// Adapt a basic_string to the DynamicBuffer requirements.\n/**\n * Requires that <tt>sizeof(Elem) == 1</tt>.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\nclass dynamic_string_buffer\n{\npublic:\n  /// The type used to represent a sequence of constant buffers that refers to\n  /// the underlying memory.\n  typedef ASIO_CONST_BUFFER const_buffers_type;\n\n  /// The type used to represent a sequence of mutable buffers that refers to\n  /// the underlying memory.\n  typedef ASIO_MUTABLE_BUFFER mutable_buffers_type;\n\n  /// Construct a dynamic buffer from a string.\n  /**\n   * @param s The string to be used as backing storage for the dynamic buffer.\n   * The object stores a reference to the string and the user is responsible\n   * for ensuring that the string object remains valid while the\n   * dynamic_string_buffer object, and copies of the object, are in use.\n   *\n   * @b DynamicBuffer_v1: Any existing data in the string is treated as the\n   * dynamic buffer's input sequence.\n   *\n   * @param maximum_size Specifies a maximum size for the buffer, in bytes.\n   */\n  explicit dynamic_string_buffer(std::basic_string<Elem, Traits, Allocator>& s,\n      std::size_t maximum_size =\n        (std::numeric_limits<std::size_t>::max)()) ASIO_NOEXCEPT\n    : string_(s),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_((std::numeric_limits<std::size_t>::max)()),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(maximum_size)\n  {\n  }\n\n  /// @b DynamicBuffer_v2: Copy construct a dynamic buffer.\n  dynamic_string_buffer(const dynamic_string_buffer& other) ASIO_NOEXCEPT\n    : string_(other.string_),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_(other.size_),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(other.max_size_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move construct a dynamic buffer.\n  dynamic_string_buffer(dynamic_string_buffer&& other) ASIO_NOEXCEPT\n    : string_(other.string_),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_(other.size_),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(other.max_size_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// @b DynamicBuffer_v1: Get the size of the input sequence.\n  /// @b DynamicBuffer_v2: Get the current size of the underlying memory.\n  /**\n   * @returns @b DynamicBuffer_v1 The current size of the input sequence.\n   * @b DynamicBuffer_v2: The current size of the underlying string if less than\n   * max_size(). Otherwise returns max_size().\n   */\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    if (size_ != (std::numeric_limits<std::size_t>::max)())\n      return size_;\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    return (std::min)(string_.size(), max_size());\n  }\n\n  /// Get the maximum size of the dynamic buffer.\n  /**\n   * @returns The allowed maximum size of the underlying memory.\n   */\n  std::size_t max_size() const ASIO_NOEXCEPT\n  {\n    return max_size_;\n  }\n\n  /// Get the maximum size that the buffer may grow to without triggering\n  /// reallocation.\n  /**\n   * @returns The current capacity of the underlying string if less than\n   * max_size(). Otherwise returns max_size().\n   */\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return (std::min)(string_.capacity(), max_size());\n  }\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  /// @b DynamicBuffer_v1: Get a list of buffers that represents the input\n  /// sequence.\n  /**\n   * @returns An object of type @c const_buffers_type that satisfies\n   * ConstBufferSequence requirements, representing the basic_string memory in\n   * the input sequence.\n   *\n   * @note The returned object is invalidated by any @c dynamic_string_buffer\n   * or @c basic_string member function that resizes or erases the string.\n   */\n  const_buffers_type data() const ASIO_NOEXCEPT\n  {\n    return const_buffers_type(asio::buffer(string_, size_));\n  }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n  /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the\n  /// underlying memory.\n  /**\n   * @param pos Position of the first byte to represent in the buffer sequence\n   *\n   * @param n The number of bytes to return in the buffer sequence. If the\n   * underlying memory is shorter, the buffer sequence represents as many bytes\n   * as are available.\n   *\n   * @returns An object of type @c mutable_buffers_type that satisfies\n   * MutableBufferSequence requirements, representing the basic_string memory.\n   *\n   * @note The returned object is invalidated by any @c dynamic_string_buffer\n   * or @c basic_string member function that resizes or erases the string.\n   */\n  mutable_buffers_type data(std::size_t pos, std::size_t n) ASIO_NOEXCEPT\n  {\n    return mutable_buffers_type(asio::buffer(\n          asio::buffer(string_, max_size_) + pos, n));\n  }\n\n  /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the\n  /// underlying memory.\n  /**\n   * @param pos Position of the first byte to represent in the buffer sequence\n   *\n   * @param n The number of bytes to return in the buffer sequence. If the\n   * underlying memory is shorter, the buffer sequence represents as many bytes\n   * as are available.\n   *\n   * @note The returned object is invalidated by any @c dynamic_string_buffer\n   * or @c basic_string member function that resizes or erases the string.\n   */\n  const_buffers_type data(std::size_t pos,\n      std::size_t n) const ASIO_NOEXCEPT\n  {\n    return const_buffers_type(asio::buffer(\n          asio::buffer(string_, max_size_) + pos, n));\n  }\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  /// @b DynamicBuffer_v1: Get a list of buffers that represents the output\n  /// sequence, with the given size.\n  /**\n   * Ensures that the output sequence can accommodate @c n bytes, resizing the\n   * basic_string object as necessary.\n   *\n   * @returns An object of type @c mutable_buffers_type that satisfies\n   * MutableBufferSequence requirements, representing basic_string memory\n   * at the start of the output sequence of size @c n.\n   *\n   * @throws std::length_error If <tt>size() + n > max_size()</tt>.\n   *\n   * @note The returned object is invalidated by any @c dynamic_string_buffer\n   * or @c basic_string member function that modifies the input sequence or\n   * output sequence.\n   */\n  mutable_buffers_type prepare(std::size_t n)\n  {\n    if (size() > max_size() || max_size() - size() < n)\n    {\n      std::length_error ex(\"dynamic_string_buffer too long\");\n      asio::detail::throw_exception(ex);\n    }\n\n    if (size_ == (std::numeric_limits<std::size_t>::max)())\n      size_ = string_.size(); // Enable v1 behaviour.\n\n    string_.resize(size_ + n);\n\n    return asio::buffer(asio::buffer(string_) + size_, n);\n  }\n\n  /// @b DynamicBuffer_v1: Move bytes from the output sequence to the input\n  /// sequence.\n  /**\n   * @param n The number of bytes to append from the start of the output\n   * sequence to the end of the input sequence. The remainder of the output\n   * sequence is discarded.\n   *\n   * Requires a preceding call <tt>prepare(x)</tt> where <tt>x >= n</tt>, and\n   * no intervening operations that modify the input or output sequence.\n   *\n   * @note If @c n is greater than the size of the output sequence, the entire\n   * output sequence is moved to the input sequence and no error is issued.\n   */\n  void commit(std::size_t n)\n  {\n    size_ += (std::min)(n, string_.size() - size_);\n    string_.resize(size_);\n  }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n  /// @b DynamicBuffer_v2: Grow the underlying memory by the specified number of\n  /// bytes.\n  /**\n   * Resizes the string to accommodate an additional @c n bytes at the end.\n   *\n   * @throws std::length_error If <tt>size() + n > max_size()</tt>.\n   */\n  void grow(std::size_t n)\n  {\n    if (size() > max_size() || max_size() - size() < n)\n    {\n      std::length_error ex(\"dynamic_string_buffer too long\");\n      asio::detail::throw_exception(ex);\n    }\n\n    string_.resize(size() + n);\n  }\n\n  /// @b DynamicBuffer_v2: Shrink the underlying memory by the specified number\n  /// of bytes.\n  /**\n   * Erases @c n bytes from the end of the string by resizing the basic_string\n   * object. If @c n is greater than the current size of the string, the string\n   * is emptied.\n   */\n  void shrink(std::size_t n)\n  {\n    string_.resize(n > size() ? 0 : size() - n);\n  }\n\n  /// @b DynamicBuffer_v1: Remove characters from the input sequence.\n  /// @b DynamicBuffer_v2: Consume the specified number of bytes from the\n  /// beginning of the underlying memory.\n  /**\n   * @b DynamicBuffer_v1: Removes @c n characters from the beginning of the\n   * input sequence. @note If @c n is greater than the size of the input\n   * sequence, the entire input sequence is consumed and no error is issued.\n   *\n   * @b DynamicBuffer_v2: Erases @c n bytes from the beginning of the string.\n   * If @c n is greater than the current size of the string, the string is\n   * emptied.\n   */\n  void consume(std::size_t n)\n  {\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    if (size_ != (std::numeric_limits<std::size_t>::max)())\n    {\n      std::size_t consume_length = (std::min)(n, size_);\n      string_.erase(0, consume_length);\n      size_ -= consume_length;\n      return;\n    }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    string_.erase(0, n);\n  }\n\nprivate:\n  std::basic_string<Elem, Traits, Allocator>& string_;\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  std::size_t size_;\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  const std::size_t max_size_;\n};\n\n/// Adapt a vector to the DynamicBuffer requirements.\n/**\n * Requires that <tt>sizeof(Elem) == 1</tt>.\n */\ntemplate <typename Elem, typename Allocator>\nclass dynamic_vector_buffer\n{\npublic:\n  /// The type used to represent a sequence of constant buffers that refers to\n  /// the underlying memory.\n  typedef ASIO_CONST_BUFFER const_buffers_type;\n\n  /// The type used to represent a sequence of mutable buffers that refers to\n  /// the underlying memory.\n  typedef ASIO_MUTABLE_BUFFER mutable_buffers_type;\n\n  /// Construct a dynamic buffer from a vector.\n  /**\n   * @param v The vector to be used as backing storage for the dynamic buffer.\n   * The object stores a reference to the vector and the user is responsible\n   * for ensuring that the vector object remains valid while the\n   * dynamic_vector_buffer object, and copies of the object, are in use.\n   *\n   * @param maximum_size Specifies a maximum size for the buffer, in bytes.\n   */\n  explicit dynamic_vector_buffer(std::vector<Elem, Allocator>& v,\n      std::size_t maximum_size =\n        (std::numeric_limits<std::size_t>::max)()) ASIO_NOEXCEPT\n    : vector_(v),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_((std::numeric_limits<std::size_t>::max)()),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(maximum_size)\n  {\n  }\n\n  /// @b DynamicBuffer_v2: Copy construct a dynamic buffer.\n  dynamic_vector_buffer(const dynamic_vector_buffer& other) ASIO_NOEXCEPT\n    : vector_(other.vector_),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_(other.size_),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(other.max_size_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move construct a dynamic buffer.\n  dynamic_vector_buffer(dynamic_vector_buffer&& other) ASIO_NOEXCEPT\n    : vector_(other.vector_),\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      size_(other.size_),\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n      max_size_(other.max_size_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// @b DynamicBuffer_v1: Get the size of the input sequence.\n  /// @b DynamicBuffer_v2: Get the current size of the underlying memory.\n  /**\n   * @returns @b DynamicBuffer_v1 The current size of the input sequence.\n   * @b DynamicBuffer_v2: The current size of the underlying vector if less than\n   * max_size(). Otherwise returns max_size().\n   */\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    if (size_ != (std::numeric_limits<std::size_t>::max)())\n      return size_;\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    return (std::min)(vector_.size(), max_size());\n  }\n\n  /// Get the maximum size of the dynamic buffer.\n  /**\n   * @returns @b DynamicBuffer_v1: The allowed maximum of the sum of the sizes\n   * of the input sequence and output sequence. @b DynamicBuffer_v2: The allowed\n   * maximum size of the underlying memory.\n   */\n  std::size_t max_size() const ASIO_NOEXCEPT\n  {\n    return max_size_;\n  }\n\n  /// Get the maximum size that the buffer may grow to without triggering\n  /// reallocation.\n  /**\n   * @returns @b DynamicBuffer_v1: The current total capacity of the buffer,\n   * i.e. for both the input sequence and output sequence. @b DynamicBuffer_v2:\n   * The current capacity of the underlying vector if less than max_size().\n   * Otherwise returns max_size().\n   */\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return (std::min)(vector_.capacity(), max_size());\n  }\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  /// @b DynamicBuffer_v1: Get a list of buffers that represents the input\n  /// sequence.\n  /**\n   * @returns An object of type @c const_buffers_type that satisfies\n   * ConstBufferSequence requirements, representing the vector memory in the\n   * input sequence.\n   *\n   * @note The returned object is invalidated by any @c dynamic_vector_buffer\n   * or @c vector member function that modifies the input sequence or output\n   * sequence.\n   */\n  const_buffers_type data() const ASIO_NOEXCEPT\n  {\n    return const_buffers_type(asio::buffer(vector_, size_));\n  }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n  /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the\n  /// underlying memory.\n  /**\n   * @param pos Position of the first byte to represent in the buffer sequence\n   *\n   * @param n The number of bytes to return in the buffer sequence. If the\n   * underlying memory is shorter, the buffer sequence represents as many bytes\n   * as are available.\n   *\n   * @returns An object of type @c mutable_buffers_type that satisfies\n   * MutableBufferSequence requirements, representing the vector memory.\n   *\n   * @note The returned object is invalidated by any @c dynamic_vector_buffer\n   * or @c vector member function that resizes or erases the vector.\n   */\n  mutable_buffers_type data(std::size_t pos, std::size_t n) ASIO_NOEXCEPT\n  {\n    return mutable_buffers_type(asio::buffer(\n          asio::buffer(vector_, max_size_) + pos, n));\n  }\n\n  /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the\n  /// underlying memory.\n  /**\n   * @param pos Position of the first byte to represent in the buffer sequence\n   *\n   * @param n The number of bytes to return in the buffer sequence. If the\n   * underlying memory is shorter, the buffer sequence represents as many bytes\n   * as are available.\n   *\n   * @note The returned object is invalidated by any @c dynamic_vector_buffer\n   * or @c vector member function that resizes or erases the vector.\n   */\n  const_buffers_type data(std::size_t pos,\n      std::size_t n) const ASIO_NOEXCEPT\n  {\n    return const_buffers_type(asio::buffer(\n          asio::buffer(vector_, max_size_) + pos, n));\n  }\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  /// @b DynamicBuffer_v1: Get a list of buffers that represents the output\n  /// sequence, with the given size.\n  /**\n   * Ensures that the output sequence can accommodate @c n bytes, resizing the\n   * vector object as necessary.\n   *\n   * @returns An object of type @c mutable_buffers_type that satisfies\n   * MutableBufferSequence requirements, representing vector memory at the\n   * start of the output sequence of size @c n.\n   *\n   * @throws std::length_error If <tt>size() + n > max_size()</tt>.\n   *\n   * @note The returned object is invalidated by any @c dynamic_vector_buffer\n   * or @c vector member function that modifies the input sequence or output\n   * sequence.\n   */\n  mutable_buffers_type prepare(std::size_t n)\n  {\n    if (size () > max_size() || max_size() - size() < n)\n    {\n      std::length_error ex(\"dynamic_vector_buffer too long\");\n      asio::detail::throw_exception(ex);\n    }\n\n    if (size_ == (std::numeric_limits<std::size_t>::max)())\n      size_ = vector_.size(); // Enable v1 behaviour.\n\n    vector_.resize(size_ + n);\n\n    return asio::buffer(asio::buffer(vector_) + size_, n);\n  }\n\n  /// @b DynamicBuffer_v1: Move bytes from the output sequence to the input\n  /// sequence.\n  /**\n   * @param n The number of bytes to append from the start of the output\n   * sequence to the end of the input sequence. The remainder of the output\n   * sequence is discarded.\n   *\n   * Requires a preceding call <tt>prepare(x)</tt> where <tt>x >= n</tt>, and\n   * no intervening operations that modify the input or output sequence.\n   *\n   * @note If @c n is greater than the size of the output sequence, the entire\n   * output sequence is moved to the input sequence and no error is issued.\n   */\n  void commit(std::size_t n)\n  {\n    size_ += (std::min)(n, vector_.size() - size_);\n    vector_.resize(size_);\n  }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n  /// @b DynamicBuffer_v2: Grow the underlying memory by the specified number of\n  /// bytes.\n  /**\n   * Resizes the vector to accommodate an additional @c n bytes at the end.\n   *\n   * @throws std::length_error If <tt>size() + n > max_size()</tt>.\n   */\n  void grow(std::size_t n)\n  {\n    if (size() > max_size() || max_size() - size() < n)\n    {\n      std::length_error ex(\"dynamic_vector_buffer too long\");\n      asio::detail::throw_exception(ex);\n    }\n\n    vector_.resize(size() + n);\n  }\n\n  /// @b DynamicBuffer_v2: Shrink the underlying memory by the specified number\n  /// of bytes.\n  /**\n   * Erases @c n bytes from the end of the vector by resizing the vector\n   * object. If @c n is greater than the current size of the vector, the vector\n   * is emptied.\n   */\n  void shrink(std::size_t n)\n  {\n    vector_.resize(n > size() ? 0 : size() - n);\n  }\n\n  /// @b DynamicBuffer_v1: Remove characters from the input sequence.\n  /// @b DynamicBuffer_v2: Consume the specified number of bytes from the\n  /// beginning of the underlying memory.\n  /**\n   * @b DynamicBuffer_v1: Removes @c n characters from the beginning of the\n   * input sequence. @note If @c n is greater than the size of the input\n   * sequence, the entire input sequence is consumed and no error is issued.\n   *\n   * @b DynamicBuffer_v2: Erases @c n bytes from the beginning of the vector.\n   * If @c n is greater than the current size of the vector, the vector is\n   * emptied.\n   */\n  void consume(std::size_t n)\n  {\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    if (size_ != (std::numeric_limits<std::size_t>::max)())\n    {\n      std::size_t consume_length = (std::min)(n, size_);\n      vector_.erase(vector_.begin(), vector_.begin() + consume_length);\n      size_ -= consume_length;\n      return;\n    }\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n    vector_.erase(vector_.begin(), vector_.begin() + (std::min)(size(), n));\n  }\n\nprivate:\n  std::vector<Elem, Allocator>& vector_;\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  std::size_t size_;\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  const std::size_t max_size_;\n};\n\n/** @defgroup dynamic_buffer asio::dynamic_buffer\n *\n * @brief The asio::dynamic_buffer function is used to create a\n * dynamically resized buffer from a @c std::basic_string or @c std::vector.\n */\n/*@{*/\n\n/// Create a new dynamic buffer that represents the given string.\n/**\n * @returns <tt>dynamic_string_buffer<Elem, Traits, Allocator>(data)</tt>.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline dynamic_string_buffer<Elem, Traits, Allocator> dynamic_buffer(\n    std::basic_string<Elem, Traits, Allocator>& data) ASIO_NOEXCEPT\n{\n  return dynamic_string_buffer<Elem, Traits, Allocator>(data);\n}\n\n/// Create a new dynamic buffer that represents the given string.\n/**\n * @returns <tt>dynamic_string_buffer<Elem, Traits, Allocator>(data,\n * max_size)</tt>.\n */\ntemplate <typename Elem, typename Traits, typename Allocator>\ninline dynamic_string_buffer<Elem, Traits, Allocator> dynamic_buffer(\n    std::basic_string<Elem, Traits, Allocator>& data,\n    std::size_t max_size) ASIO_NOEXCEPT\n{\n  return dynamic_string_buffer<Elem, Traits, Allocator>(data, max_size);\n}\n\n/// Create a new dynamic buffer that represents the given vector.\n/**\n * @returns <tt>dynamic_vector_buffer<Elem, Allocator>(data)</tt>.\n */\ntemplate <typename Elem, typename Allocator>\ninline dynamic_vector_buffer<Elem, Allocator> dynamic_buffer(\n    std::vector<Elem, Allocator>& data) ASIO_NOEXCEPT\n{\n  return dynamic_vector_buffer<Elem, Allocator>(data);\n}\n\n/// Create a new dynamic buffer that represents the given vector.\n/**\n * @returns <tt>dynamic_vector_buffer<Elem, Allocator>(data, max_size)</tt>.\n */\ntemplate <typename Elem, typename Allocator>\ninline dynamic_vector_buffer<Elem, Allocator> dynamic_buffer(\n    std::vector<Elem, Allocator>& data,\n    std::size_t max_size) ASIO_NOEXCEPT\n{\n  return dynamic_vector_buffer<Elem, Allocator>(data, max_size);\n}\n\n/*@}*/\n\n/** @defgroup buffer_copy asio::buffer_copy\n *\n * @brief The asio::buffer_copy function is used to copy bytes from a\n * source buffer (or buffer sequence) to a target buffer (or buffer sequence).\n *\n * The @c buffer_copy function is available in two forms:\n *\n * @li A 2-argument form: @c buffer_copy(target, source)\n *\n * @li A 3-argument form: @c buffer_copy(target, source, max_bytes_to_copy)\n *\n * Both forms return the number of bytes actually copied. The number of bytes\n * copied is the lesser of:\n *\n * @li @c buffer_size(target)\n *\n * @li @c buffer_size(source)\n *\n * @li @c If specified, @c max_bytes_to_copy.\n *\n * This prevents buffer overflow, regardless of the buffer sizes used in the\n * copy operation.\n *\n * Note that @ref buffer_copy is implemented in terms of @c memcpy, and\n * consequently it cannot be used to copy between overlapping memory regions.\n */\n/*@{*/\n\nnamespace detail {\n\ninline std::size_t buffer_copy_1(const mutable_buffer& target,\n    const const_buffer& source)\n{\n  using namespace std; // For memcpy.\n  std::size_t target_size = target.size();\n  std::size_t source_size = source.size();\n  std::size_t n = target_size < source_size ? target_size : source_size;\n  if (n > 0)\n    memcpy(target.data(), source.data(), n);\n  return n;\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\ninline std::size_t buffer_copy(one_buffer, one_buffer,\n    TargetIterator target_begin, TargetIterator,\n    SourceIterator source_begin, SourceIterator) ASIO_NOEXCEPT\n{\n  return (buffer_copy_1)(*target_begin, *source_begin);\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\ninline std::size_t buffer_copy(one_buffer, one_buffer,\n    TargetIterator target_begin, TargetIterator,\n    SourceIterator source_begin, SourceIterator,\n    std::size_t max_bytes_to_copy) ASIO_NOEXCEPT\n{\n  return (buffer_copy_1)(*target_begin,\n      asio::buffer(*source_begin, max_bytes_to_copy));\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\nstd::size_t buffer_copy(one_buffer, multiple_buffers,\n    TargetIterator target_begin, TargetIterator,\n    SourceIterator source_begin, SourceIterator source_end,\n    std::size_t max_bytes_to_copy\n      = (std::numeric_limits<std::size_t>::max)()) ASIO_NOEXCEPT\n{\n  std::size_t total_bytes_copied = 0;\n  SourceIterator source_iter = source_begin;\n\n  for (mutable_buffer target_buffer(\n        asio::buffer(*target_begin, max_bytes_to_copy));\n      target_buffer.size() && source_iter != source_end; ++source_iter)\n  {\n    const_buffer source_buffer(*source_iter);\n    std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer);\n    total_bytes_copied += bytes_copied;\n    target_buffer += bytes_copied;\n  }\n\n  return total_bytes_copied;\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\nstd::size_t buffer_copy(multiple_buffers, one_buffer,\n    TargetIterator target_begin, TargetIterator target_end,\n    SourceIterator source_begin, SourceIterator,\n    std::size_t max_bytes_to_copy\n      = (std::numeric_limits<std::size_t>::max)()) ASIO_NOEXCEPT\n{\n  std::size_t total_bytes_copied = 0;\n  TargetIterator target_iter = target_begin;\n\n  for (const_buffer source_buffer(\n        asio::buffer(*source_begin, max_bytes_to_copy));\n      source_buffer.size() && target_iter != target_end; ++target_iter)\n  {\n    mutable_buffer target_buffer(*target_iter);\n    std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer);\n    total_bytes_copied += bytes_copied;\n    source_buffer += bytes_copied;\n  }\n\n  return total_bytes_copied;\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\nstd::size_t buffer_copy(multiple_buffers, multiple_buffers,\n    TargetIterator target_begin, TargetIterator target_end,\n    SourceIterator source_begin, SourceIterator source_end) ASIO_NOEXCEPT\n{\n  std::size_t total_bytes_copied = 0;\n\n  TargetIterator target_iter = target_begin;\n  std::size_t target_buffer_offset = 0;\n\n  SourceIterator source_iter = source_begin;\n  std::size_t source_buffer_offset = 0;\n\n  while (target_iter != target_end && source_iter != source_end)\n  {\n    mutable_buffer target_buffer =\n      mutable_buffer(*target_iter) + target_buffer_offset;\n\n    const_buffer source_buffer =\n      const_buffer(*source_iter) + source_buffer_offset;\n\n    std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer);\n    total_bytes_copied += bytes_copied;\n\n    if (bytes_copied == target_buffer.size())\n    {\n      ++target_iter;\n      target_buffer_offset = 0;\n    }\n    else\n      target_buffer_offset += bytes_copied;\n\n    if (bytes_copied == source_buffer.size())\n    {\n      ++source_iter;\n      source_buffer_offset = 0;\n    }\n    else\n      source_buffer_offset += bytes_copied;\n  }\n\n  return total_bytes_copied;\n}\n\ntemplate <typename TargetIterator, typename SourceIterator>\nstd::size_t buffer_copy(multiple_buffers, multiple_buffers,\n    TargetIterator target_begin, TargetIterator target_end,\n    SourceIterator source_begin, SourceIterator source_end,\n    std::size_t max_bytes_to_copy) ASIO_NOEXCEPT\n{\n  std::size_t total_bytes_copied = 0;\n\n  TargetIterator target_iter = target_begin;\n  std::size_t target_buffer_offset = 0;\n\n  SourceIterator source_iter = source_begin;\n  std::size_t source_buffer_offset = 0;\n\n  while (total_bytes_copied != max_bytes_to_copy\n      && target_iter != target_end && source_iter != source_end)\n  {\n    mutable_buffer target_buffer =\n      mutable_buffer(*target_iter) + target_buffer_offset;\n\n    const_buffer source_buffer =\n      const_buffer(*source_iter) + source_buffer_offset;\n\n    std::size_t bytes_copied = (buffer_copy_1)(\n        target_buffer, asio::buffer(source_buffer,\n          max_bytes_to_copy - total_bytes_copied));\n    total_bytes_copied += bytes_copied;\n\n    if (bytes_copied == target_buffer.size())\n    {\n      ++target_iter;\n      target_buffer_offset = 0;\n    }\n    else\n      target_buffer_offset += bytes_copied;\n\n    if (bytes_copied == source_buffer.size())\n    {\n      ++source_iter;\n      source_buffer_offset = 0;\n    }\n    else\n      source_buffer_offset += bytes_copied;\n  }\n\n  return total_bytes_copied;\n}\n\n} // namespace detail\n\n/// Copies bytes from a source buffer sequence to a target buffer sequence.\n/**\n * @param target A modifiable buffer sequence representing the memory regions to\n * which the bytes will be copied.\n *\n * @param source A non-modifiable buffer sequence representing the memory\n * regions from which the bytes will be copied.\n *\n * @returns The number of bytes copied.\n *\n * @note The number of bytes copied is the lesser of:\n *\n * @li @c buffer_size(target)\n *\n * @li @c buffer_size(source)\n *\n * This function is implemented in terms of @c memcpy, and consequently it\n * cannot be used to copy between overlapping memory regions.\n */\ntemplate <typename MutableBufferSequence, typename ConstBufferSequence>\ninline std::size_t buffer_copy(const MutableBufferSequence& target,\n    const ConstBufferSequence& source) ASIO_NOEXCEPT\n{\n  return detail::buffer_copy(\n      detail::buffer_sequence_cardinality<MutableBufferSequence>(),\n      detail::buffer_sequence_cardinality<ConstBufferSequence>(),\n      asio::buffer_sequence_begin(target),\n      asio::buffer_sequence_end(target),\n      asio::buffer_sequence_begin(source),\n      asio::buffer_sequence_end(source));\n}\n\n/// Copies a limited number of bytes from a source buffer sequence to a target\n/// buffer sequence.\n/**\n * @param target A modifiable buffer sequence representing the memory regions to\n * which the bytes will be copied.\n *\n * @param source A non-modifiable buffer sequence representing the memory\n * regions from which the bytes will be copied.\n *\n * @param max_bytes_to_copy The maximum number of bytes to be copied.\n *\n * @returns The number of bytes copied.\n *\n * @note The number of bytes copied is the lesser of:\n *\n * @li @c buffer_size(target)\n *\n * @li @c buffer_size(source)\n *\n * @li @c max_bytes_to_copy\n *\n * This function is implemented in terms of @c memcpy, and consequently it\n * cannot be used to copy between overlapping memory regions.\n */\ntemplate <typename MutableBufferSequence, typename ConstBufferSequence>\ninline std::size_t buffer_copy(const MutableBufferSequence& target,\n    const ConstBufferSequence& source,\n    std::size_t max_bytes_to_copy) ASIO_NOEXCEPT\n{\n  return detail::buffer_copy(\n      detail::buffer_sequence_cardinality<MutableBufferSequence>(),\n      detail::buffer_sequence_cardinality<ConstBufferSequence>(),\n      asio::buffer_sequence_begin(target),\n      asio::buffer_sequence_end(target),\n      asio::buffer_sequence_begin(source),\n      asio::buffer_sequence_end(source), max_bytes_to_copy);\n}\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n#include \"asio/detail/is_buffer_sequence.hpp\"\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Trait to determine whether a type satisfies the MutableBufferSequence\n/// requirements.\ntemplate <typename T>\nstruct is_mutable_buffer_sequence\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#else // defined(GENERATING_DOCUMENTATION)\n  : asio::detail::is_buffer_sequence<T, mutable_buffer>\n#endif // defined(GENERATING_DOCUMENTATION)\n{\n};\n\n/// Trait to determine whether a type satisfies the ConstBufferSequence\n/// requirements.\ntemplate <typename T>\nstruct is_const_buffer_sequence\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#else // defined(GENERATING_DOCUMENTATION)\n  : asio::detail::is_buffer_sequence<T, const_buffer>\n#endif // defined(GENERATING_DOCUMENTATION)\n{\n};\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n/// Trait to determine whether a type satisfies the DynamicBuffer_v1\n/// requirements.\ntemplate <typename T>\nstruct is_dynamic_buffer_v1\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#else // defined(GENERATING_DOCUMENTATION)\n  : asio::detail::is_dynamic_buffer_v1<T>\n#endif // defined(GENERATING_DOCUMENTATION)\n{\n};\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Trait to determine whether a type satisfies the DynamicBuffer_v2\n/// requirements.\ntemplate <typename T>\nstruct is_dynamic_buffer_v2\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#else // defined(GENERATING_DOCUMENTATION)\n  : asio::detail::is_dynamic_buffer_v2<T>\n#endif // defined(GENERATING_DOCUMENTATION)\n{\n};\n\n/// Trait to determine whether a type satisfies the DynamicBuffer requirements.\n/**\n * If @c ASIO_NO_DYNAMIC_BUFFER_V1 is not defined, determines whether the\n * type satisfies the DynamicBuffer_v1 requirements. Otherwise, if @c\n * ASIO_NO_DYNAMIC_BUFFER_V1 is defined, determines whether the type\n * satisfies the DynamicBuffer_v2 requirements.\n */\ntemplate <typename T>\nstruct is_dynamic_buffer\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#elif defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  : asio::is_dynamic_buffer_v2<T>\n#else // defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n  : asio::is_dynamic_buffer_v1<T>\n#endif // defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n{\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BUFFER_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_read_stream.hpp",
    "content": "//\n// buffered_read_stream.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_READ_STREAM_HPP\n#define ASIO_BUFFERED_READ_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/buffered_read_stream_fwd.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_resize_guard.hpp\"\n#include \"asio/detail/buffered_stream_storage.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Adds buffering to the read-related operations of a stream.\n/**\n * The buffered_read_stream class template can be used to add buffering to the\n * synchronous and asynchronous read operations of a stream.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Stream>\nclass buffered_read_stream\n  : private noncopyable\n{\npublic:\n  /// The type of the next layer.\n  typedef typename remove_reference<Stream>::type next_layer_type;\n\n  /// The type of the lowest layer.\n  typedef typename next_layer_type::lowest_layer_type lowest_layer_type;\n\n  /// The type of the executor associated with the object.\n  typedef typename lowest_layer_type::executor_type executor_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// The default buffer size.\n  static const std::size_t default_buffer_size = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024);\n#endif\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  explicit buffered_read_stream(Arg& a)\n    : next_layer_(a),\n      storage_(default_buffer_size)\n  {\n  }\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  buffered_read_stream(Arg& a, std::size_t buffer_size)\n    : next_layer_(a),\n      storage_(buffer_size)\n  {\n  }\n\n  /// Get a reference to the next layer.\n  next_layer_type& next_layer()\n  {\n    return next_layer_;\n  }\n\n  /// Get a reference to the lowest layer.\n  lowest_layer_type& lowest_layer()\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Get a const reference to the lowest layer.\n  const lowest_layer_type& lowest_layer() const\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return next_layer_.lowest_layer().get_executor();\n  }\n\n  /// Close the stream.\n  void close()\n  {\n    next_layer_.close();\n  }\n\n  /// Close the stream.\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    next_layer_.close(ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Write the given data to the stream. Returns the number of bytes written.\n  /// Throws an exception on failure.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    return next_layer_.write_some(buffers);\n  }\n\n  /// Write the given data to the stream. Returns the number of bytes written,\n  /// or 0 if an error occurred.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return next_layer_.write_some(buffers, ec);\n  }\n\n  /// Start an asynchronous write. The data being written must be valid for the\n  /// lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return next_layer_.async_write_some(buffers,\n        ASIO_MOVE_CAST(WriteHandler)(handler));\n  }\n\n  /// Fill the buffer with some data. Returns the number of bytes placed in the\n  /// buffer as a result of the operation. Throws an exception on failure.\n  std::size_t fill();\n\n  /// Fill the buffer with some data. Returns the number of bytes placed in the\n  /// buffer as a result of the operation, or 0 if an error occurred.\n  std::size_t fill(asio::error_code& ec);\n\n  /// Start an asynchronous fill.\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_fill(\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type));\n\n  /// Read some data from the stream. Returns the number of bytes read. Throws\n  /// an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers);\n\n  /// Read some data from the stream. Returns the number of bytes read or 0 if\n  /// an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec);\n\n  /// Start an asynchronous read. The buffer into which the data will be read\n  /// must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type));\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read.\n  /// Throws an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers);\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read,\n  /// or 0 if an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers,\n      asio::error_code& ec);\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail()\n  {\n    return storage_.size();\n  }\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail(asio::error_code& ec)\n  {\n    ec = asio::error_code();\n    return storage_.size();\n  }\n\nprivate:\n  /// Copy data out of the internal buffer to the specified target buffer.\n  /// Returns the number of bytes copied.\n  template <typename MutableBufferSequence>\n  std::size_t copy(const MutableBufferSequence& buffers)\n  {\n    std::size_t bytes_copied = asio::buffer_copy(\n        buffers, storage_.data(), storage_.size());\n    storage_.consume(bytes_copied);\n    return bytes_copied;\n  }\n\n  /// Copy data from the internal buffer to the specified target buffer, without\n  /// removing the data from the internal buffer. Returns the number of bytes\n  /// copied.\n  template <typename MutableBufferSequence>\n  std::size_t peek_copy(const MutableBufferSequence& buffers)\n  {\n    return asio::buffer_copy(buffers, storage_.data(), storage_.size());\n  }\n\n  /// The next layer.\n  Stream next_layer_;\n\n  // The data in the buffer.\n  detail::buffered_stream_storage storage_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/buffered_read_stream.hpp\"\n\n#endif // ASIO_BUFFERED_READ_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_read_stream_fwd.hpp",
    "content": "//\n// buffered_read_stream_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_READ_STREAM_FWD_HPP\n#define ASIO_BUFFERED_READ_STREAM_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\nnamespace asio {\n\ntemplate <typename Stream>\nclass buffered_read_stream;\n\n} // namespace asio\n\n#endif // ASIO_BUFFERED_READ_STREAM_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_stream.hpp",
    "content": "//\n// buffered_stream.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_STREAM_HPP\n#define ASIO_BUFFERED_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/buffered_read_stream.hpp\"\n#include \"asio/buffered_write_stream.hpp\"\n#include \"asio/buffered_stream_fwd.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Adds buffering to the read- and write-related operations of a stream.\n/**\n * The buffered_stream class template can be used to add buffering to the\n * synchronous and asynchronous read and write operations of a stream.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Stream>\nclass buffered_stream\n  : private noncopyable\n{\npublic:\n  /// The type of the next layer.\n  typedef typename remove_reference<Stream>::type next_layer_type;\n\n  /// The type of the lowest layer.\n  typedef typename next_layer_type::lowest_layer_type lowest_layer_type;\n\n  /// The type of the executor associated with the object.\n  typedef typename lowest_layer_type::executor_type executor_type;\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  explicit buffered_stream(Arg& a)\n    : inner_stream_impl_(a),\n      stream_impl_(inner_stream_impl_)\n  {\n  }\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  explicit buffered_stream(Arg& a, std::size_t read_buffer_size,\n      std::size_t write_buffer_size)\n    : inner_stream_impl_(a, write_buffer_size),\n      stream_impl_(inner_stream_impl_, read_buffer_size)\n  {\n  }\n\n  /// Get a reference to the next layer.\n  next_layer_type& next_layer()\n  {\n    return stream_impl_.next_layer().next_layer();\n  }\n\n  /// Get a reference to the lowest layer.\n  lowest_layer_type& lowest_layer()\n  {\n    return stream_impl_.lowest_layer();\n  }\n\n  /// Get a const reference to the lowest layer.\n  const lowest_layer_type& lowest_layer() const\n  {\n    return stream_impl_.lowest_layer();\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return stream_impl_.lowest_layer().get_executor();\n  }\n\n  /// Close the stream.\n  void close()\n  {\n    stream_impl_.close();\n  }\n\n  /// Close the stream.\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    stream_impl_.close(ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Flush all data from the buffer to the next layer. Returns the number of\n  /// bytes written to the next layer on the last write operation. Throws an\n  /// exception on failure.\n  std::size_t flush()\n  {\n    return stream_impl_.next_layer().flush();\n  }\n\n  /// Flush all data from the buffer to the next layer. Returns the number of\n  /// bytes written to the next layer on the last write operation, or 0 if an\n  /// error occurred.\n  std::size_t flush(asio::error_code& ec)\n  {\n    return stream_impl_.next_layer().flush(ec);\n  }\n\n  /// Start an asynchronous flush.\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_flush(\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return stream_impl_.next_layer().async_flush(\n        ASIO_MOVE_CAST(WriteHandler)(handler));\n  }\n\n  /// Write the given data to the stream. Returns the number of bytes written.\n  /// Throws an exception on failure.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    return stream_impl_.write_some(buffers);\n  }\n\n  /// Write the given data to the stream. Returns the number of bytes written,\n  /// or 0 if an error occurred.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return stream_impl_.write_some(buffers, ec);\n  }\n\n  /// Start an asynchronous write. The data being written must be valid for the\n  /// lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return stream_impl_.async_write_some(buffers,\n        ASIO_MOVE_CAST(WriteHandler)(handler));\n  }\n\n  /// Fill the buffer with some data. Returns the number of bytes placed in the\n  /// buffer as a result of the operation. Throws an exception on failure.\n  std::size_t fill()\n  {\n    return stream_impl_.fill();\n  }\n\n  /// Fill the buffer with some data. Returns the number of bytes placed in the\n  /// buffer as a result of the operation, or 0 if an error occurred.\n  std::size_t fill(asio::error_code& ec)\n  {\n    return stream_impl_.fill(ec);\n  }\n\n  /// Start an asynchronous fill.\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_fill(\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return stream_impl_.async_fill(ASIO_MOVE_CAST(ReadHandler)(handler));\n  }\n\n  /// Read some data from the stream. Returns the number of bytes read. Throws\n  /// an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    return stream_impl_.read_some(buffers);\n  }\n\n  /// Read some data from the stream. Returns the number of bytes read or 0 if\n  /// an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return stream_impl_.read_some(buffers, ec);\n  }\n\n  /// Start an asynchronous read. The buffer into which the data will be read\n  /// must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return stream_impl_.async_read_some(buffers,\n        ASIO_MOVE_CAST(ReadHandler)(handler));\n  }\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read.\n  /// Throws an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers)\n  {\n    return stream_impl_.peek(buffers);\n  }\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read,\n  /// or 0 if an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return stream_impl_.peek(buffers, ec);\n  }\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail()\n  {\n    return stream_impl_.in_avail();\n  }\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail(asio::error_code& ec)\n  {\n    return stream_impl_.in_avail(ec);\n  }\n\nprivate:\n  // The buffered write stream.\n  typedef buffered_write_stream<Stream> write_stream_type;\n  write_stream_type inner_stream_impl_;\n\n  // The buffered read stream.\n  typedef buffered_read_stream<write_stream_type&> read_stream_type;\n  read_stream_type stream_impl_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BUFFERED_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_stream_fwd.hpp",
    "content": "//\n// buffered_stream_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_STREAM_FWD_HPP\n#define ASIO_BUFFERED_STREAM_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\nnamespace asio {\n\ntemplate <typename Stream>\nclass buffered_stream;\n\n} // namespace asio\n\n#endif // ASIO_BUFFERED_STREAM_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_write_stream.hpp",
    "content": "//\n// buffered_write_stream.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_WRITE_STREAM_HPP\n#define ASIO_BUFFERED_WRITE_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/buffered_write_stream_fwd.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffered_stream_storage.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/write.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Adds buffering to the write-related operations of a stream.\n/**\n * The buffered_write_stream class template can be used to add buffering to the\n * synchronous and asynchronous write operations of a stream.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Stream>\nclass buffered_write_stream\n  : private noncopyable\n{\npublic:\n  /// The type of the next layer.\n  typedef typename remove_reference<Stream>::type next_layer_type;\n\n  /// The type of the lowest layer.\n  typedef typename next_layer_type::lowest_layer_type lowest_layer_type;\n\n  /// The type of the executor associated with the object.\n  typedef typename lowest_layer_type::executor_type executor_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// The default buffer size.\n  static const std::size_t default_buffer_size = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024);\n#endif\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  explicit buffered_write_stream(Arg& a)\n    : next_layer_(a),\n      storage_(default_buffer_size)\n  {\n  }\n\n  /// Construct, passing the specified argument to initialise the next layer.\n  template <typename Arg>\n  buffered_write_stream(Arg& a, std::size_t buffer_size)\n    : next_layer_(a),\n      storage_(buffer_size)\n  {\n  }\n\n  /// Get a reference to the next layer.\n  next_layer_type& next_layer()\n  {\n    return next_layer_;\n  }\n\n  /// Get a reference to the lowest layer.\n  lowest_layer_type& lowest_layer()\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Get a const reference to the lowest layer.\n  const lowest_layer_type& lowest_layer() const\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return next_layer_.lowest_layer().get_executor();\n  }\n\n  /// Close the stream.\n  void close()\n  {\n    next_layer_.close();\n  }\n\n  /// Close the stream.\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    next_layer_.close(ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Flush all data from the buffer to the next layer. Returns the number of\n  /// bytes written to the next layer on the last write operation. Throws an\n  /// exception on failure.\n  std::size_t flush();\n\n  /// Flush all data from the buffer to the next layer. Returns the number of\n  /// bytes written to the next layer on the last write operation, or 0 if an\n  /// error occurred.\n  std::size_t flush(asio::error_code& ec);\n\n  /// Start an asynchronous flush.\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_flush(\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type));\n\n  /// Write the given data to the stream. Returns the number of bytes written.\n  /// Throws an exception on failure.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers);\n\n  /// Write the given data to the stream. Returns the number of bytes written,\n  /// or 0 if an error occurred and the error handler did not throw.\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec);\n\n  /// Start an asynchronous write. The data being written must be valid for the\n  /// lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type));\n\n  /// Read some data from the stream. Returns the number of bytes read. Throws\n  /// an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    return next_layer_.read_some(buffers);\n  }\n\n  /// Read some data from the stream. Returns the number of bytes read or 0 if\n  /// an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return next_layer_.read_some(buffers, ec);\n  }\n\n  /// Start an asynchronous read. The buffer into which the data will be read\n  /// must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return next_layer_.async_read_some(buffers,\n        ASIO_MOVE_CAST(ReadHandler)(handler));\n  }\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read.\n  /// Throws an exception on failure.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers)\n  {\n    return next_layer_.peek(buffers);\n  }\n\n  /// Peek at the incoming data on the stream. Returns the number of bytes read,\n  /// or 0 if an error occurred.\n  template <typename MutableBufferSequence>\n  std::size_t peek(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return next_layer_.peek(buffers, ec);\n  }\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail()\n  {\n    return next_layer_.in_avail();\n  }\n\n  /// Determine the amount of data that may be read without blocking.\n  std::size_t in_avail(asio::error_code& ec)\n  {\n    return next_layer_.in_avail(ec);\n  }\n\nprivate:\n  /// Copy data into the internal buffer from the specified source buffer.\n  /// Returns the number of bytes copied.\n  template <typename ConstBufferSequence>\n  std::size_t copy(const ConstBufferSequence& buffers);\n\n  /// The next layer.\n  Stream next_layer_;\n\n  // The data in the buffer.\n  detail::buffered_stream_storage storage_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/buffered_write_stream.hpp\"\n\n#endif // ASIO_BUFFERED_WRITE_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffered_write_stream_fwd.hpp",
    "content": "//\n// buffered_write_stream_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERED_WRITE_STREAM_FWD_HPP\n#define ASIO_BUFFERED_WRITE_STREAM_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\nnamespace asio {\n\ntemplate <typename Stream>\nclass buffered_write_stream;\n\n} // namespace asio\n\n#endif // ASIO_BUFFERED_WRITE_STREAM_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/buffers_iterator.hpp",
    "content": "//\n// buffers_iterator.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_BUFFERS_ITERATOR_HPP\n#define ASIO_BUFFERS_ITERATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <iterator>\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <bool IsMutable>\n  struct buffers_iterator_types_helper;\n\n  template <>\n  struct buffers_iterator_types_helper<false>\n  {\n    typedef const_buffer buffer_type;\n    template <typename ByteType>\n    struct byte_type\n    {\n      typedef typename add_const<ByteType>::type type;\n    };\n  };\n\n  template <>\n  struct buffers_iterator_types_helper<true>\n  {\n    typedef mutable_buffer buffer_type;\n    template <typename ByteType>\n    struct byte_type\n    {\n      typedef ByteType type;\n    };\n  };\n\n  template <typename BufferSequence, typename ByteType>\n  struct buffers_iterator_types\n  {\n    enum\n    {\n      is_mutable = is_convertible<\n          typename BufferSequence::value_type,\n          mutable_buffer>::value\n    };\n    typedef buffers_iterator_types_helper<is_mutable> helper;\n    typedef typename helper::buffer_type buffer_type;\n    typedef typename helper::template byte_type<ByteType>::type byte_type;\n    typedef typename BufferSequence::const_iterator const_iterator;\n  };\n\n  template <typename ByteType>\n  struct buffers_iterator_types<mutable_buffer, ByteType>\n  {\n    typedef mutable_buffer buffer_type;\n    typedef ByteType byte_type;\n    typedef const mutable_buffer* const_iterator;\n  };\n\n  template <typename ByteType>\n  struct buffers_iterator_types<const_buffer, ByteType>\n  {\n    typedef const_buffer buffer_type;\n    typedef typename add_const<ByteType>::type byte_type;\n    typedef const const_buffer* const_iterator;\n  };\n\n#if !defined(ASIO_NO_DEPRECATED)\n\n  template <typename ByteType>\n  struct buffers_iterator_types<mutable_buffers_1, ByteType>\n  {\n    typedef mutable_buffer buffer_type;\n    typedef ByteType byte_type;\n    typedef const mutable_buffer* const_iterator;\n  };\n\n  template <typename ByteType>\n  struct buffers_iterator_types<const_buffers_1, ByteType>\n  {\n    typedef const_buffer buffer_type;\n    typedef typename add_const<ByteType>::type byte_type;\n    typedef const const_buffer* const_iterator;\n  };\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n}\n\n/// A random access iterator over the bytes in a buffer sequence.\ntemplate <typename BufferSequence, typename ByteType = char>\nclass buffers_iterator\n{\nprivate:\n  typedef typename detail::buffers_iterator_types<\n      BufferSequence, ByteType>::buffer_type buffer_type;\n\n  typedef typename detail::buffers_iterator_types<BufferSequence,\n          ByteType>::const_iterator buffer_sequence_iterator_type;\n\npublic:\n  /// The type used for the distance between two iterators.\n  typedef std::ptrdiff_t difference_type;\n\n  /// The type of the value pointed to by the iterator.\n  typedef ByteType value_type;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// The type of the result of applying operator->() to the iterator.\n  /**\n   * If the buffer sequence stores buffer objects that are convertible to\n   * mutable_buffer, this is a pointer to a non-const ByteType. Otherwise, a\n   * pointer to a const ByteType.\n   */\n  typedef const_or_non_const_ByteType* pointer;\n#else // defined(GENERATING_DOCUMENTATION)\n  typedef typename detail::buffers_iterator_types<\n      BufferSequence, ByteType>::byte_type* pointer;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// The type of the result of applying operator*() to the iterator.\n  /**\n   * If the buffer sequence stores buffer objects that are convertible to\n   * mutable_buffer, this is a reference to a non-const ByteType. Otherwise, a\n   * reference to a const ByteType.\n   */\n  typedef const_or_non_const_ByteType& reference;\n#else // defined(GENERATING_DOCUMENTATION)\n  typedef typename detail::buffers_iterator_types<\n      BufferSequence, ByteType>::byte_type& reference;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n  /// The iterator category.\n  typedef std::random_access_iterator_tag iterator_category;\n\n  /// Default constructor. Creates an iterator in an undefined state.\n  buffers_iterator()\n    : current_buffer_(),\n      current_buffer_position_(0),\n      begin_(),\n      current_(),\n      end_(),\n      position_(0)\n  {\n  }\n\n  /// Construct an iterator representing the beginning of the buffers' data.\n  static buffers_iterator begin(const BufferSequence& buffers)\n#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3)\n    __attribute__ ((__noinline__))\n#endif // defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3)\n  {\n    buffers_iterator new_iter;\n    new_iter.begin_ = asio::buffer_sequence_begin(buffers);\n    new_iter.current_ = asio::buffer_sequence_begin(buffers);\n    new_iter.end_ = asio::buffer_sequence_end(buffers);\n    while (new_iter.current_ != new_iter.end_)\n    {\n      new_iter.current_buffer_ = *new_iter.current_;\n      if (new_iter.current_buffer_.size() > 0)\n        break;\n      ++new_iter.current_;\n    }\n    return new_iter;\n  }\n\n  /// Construct an iterator representing the end of the buffers' data.\n  static buffers_iterator end(const BufferSequence& buffers)\n#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3)\n    __attribute__ ((__noinline__))\n#endif // defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3)\n  {\n    buffers_iterator new_iter;\n    new_iter.begin_ = asio::buffer_sequence_begin(buffers);\n    new_iter.current_ = asio::buffer_sequence_begin(buffers);\n    new_iter.end_ = asio::buffer_sequence_end(buffers);\n    while (new_iter.current_ != new_iter.end_)\n    {\n      buffer_type buffer = *new_iter.current_;\n      new_iter.position_ += buffer.size();\n      ++new_iter.current_;\n    }\n    return new_iter;\n  }\n\n  /// Dereference an iterator.\n  reference operator*() const\n  {\n    return dereference();\n  }\n\n  /// Dereference an iterator.\n  pointer operator->() const\n  {\n    return &dereference();\n  }\n\n  /// Access an individual element.\n  reference operator[](std::ptrdiff_t difference) const\n  {\n    buffers_iterator tmp(*this);\n    tmp.advance(difference);\n    return *tmp;\n  }\n\n  /// Increment operator (prefix).\n  buffers_iterator& operator++()\n  {\n    increment();\n    return *this;\n  }\n\n  /// Increment operator (postfix).\n  buffers_iterator operator++(int)\n  {\n    buffers_iterator tmp(*this);\n    ++*this;\n    return tmp;\n  }\n\n  /// Decrement operator (prefix).\n  buffers_iterator& operator--()\n  {\n    decrement();\n    return *this;\n  }\n\n  /// Decrement operator (postfix).\n  buffers_iterator operator--(int)\n  {\n    buffers_iterator tmp(*this);\n    --*this;\n    return tmp;\n  }\n\n  /// Addition operator.\n  buffers_iterator& operator+=(std::ptrdiff_t difference)\n  {\n    advance(difference);\n    return *this;\n  }\n\n  /// Subtraction operator.\n  buffers_iterator& operator-=(std::ptrdiff_t difference)\n  {\n    advance(-difference);\n    return *this;\n  }\n\n  /// Addition operator.\n  friend buffers_iterator operator+(const buffers_iterator& iter,\n      std::ptrdiff_t difference)\n  {\n    buffers_iterator tmp(iter);\n    tmp.advance(difference);\n    return tmp;\n  }\n\n  /// Addition operator.\n  friend buffers_iterator operator+(std::ptrdiff_t difference,\n      const buffers_iterator& iter)\n  {\n    buffers_iterator tmp(iter);\n    tmp.advance(difference);\n    return tmp;\n  }\n\n  /// Subtraction operator.\n  friend buffers_iterator operator-(const buffers_iterator& iter,\n      std::ptrdiff_t difference)\n  {\n    buffers_iterator tmp(iter);\n    tmp.advance(-difference);\n    return tmp;\n  }\n\n  /// Subtraction operator.\n  friend std::ptrdiff_t operator-(const buffers_iterator& a,\n      const buffers_iterator& b)\n  {\n    return b.distance_to(a);\n  }\n\n  /// Test two iterators for equality.\n  friend bool operator==(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return a.equal(b);\n  }\n\n  /// Test two iterators for inequality.\n  friend bool operator!=(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return !a.equal(b);\n  }\n\n  /// Compare two iterators.\n  friend bool operator<(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return a.distance_to(b) > 0;\n  }\n\n  /// Compare two iterators.\n  friend bool operator<=(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return !(b < a);\n  }\n\n  /// Compare two iterators.\n  friend bool operator>(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return b < a;\n  }\n\n  /// Compare two iterators.\n  friend bool operator>=(const buffers_iterator& a, const buffers_iterator& b)\n  {\n    return !(a < b);\n  }\n\nprivate:\n  // Dereference the iterator.\n  reference dereference() const\n  {\n    return static_cast<pointer>(\n        current_buffer_.data())[current_buffer_position_];\n  }\n\n  // Compare two iterators for equality.\n  bool equal(const buffers_iterator& other) const\n  {\n    return position_ == other.position_;\n  }\n\n  // Increment the iterator.\n  void increment()\n  {\n    ASIO_ASSERT(current_ != end_ && \"iterator out of bounds\");\n    ++position_;\n\n    // Check if the increment can be satisfied by the current buffer.\n    ++current_buffer_position_;\n    if (current_buffer_position_ != current_buffer_.size())\n      return;\n\n    // Find the next non-empty buffer.\n    ++current_;\n    current_buffer_position_ = 0;\n    while (current_ != end_)\n    {\n      current_buffer_ = *current_;\n      if (current_buffer_.size() > 0)\n        return;\n      ++current_;\n    }\n  }\n\n  // Decrement the iterator.\n  void decrement()\n  {\n    ASIO_ASSERT(position_ > 0 && \"iterator out of bounds\");\n    --position_;\n\n    // Check if the decrement can be satisfied by the current buffer.\n    if (current_buffer_position_ != 0)\n    {\n      --current_buffer_position_;\n      return;\n    }\n\n    // Find the previous non-empty buffer.\n    buffer_sequence_iterator_type iter = current_;\n    while (iter != begin_)\n    {\n      --iter;\n      buffer_type buffer = *iter;\n      std::size_t buffer_size = buffer.size();\n      if (buffer_size > 0)\n      {\n        current_ = iter;\n        current_buffer_ = buffer;\n        current_buffer_position_ = buffer_size - 1;\n        return;\n      }\n    }\n  }\n\n  // Advance the iterator by the specified distance.\n  void advance(std::ptrdiff_t n)\n  {\n    if (n > 0)\n    {\n      ASIO_ASSERT(current_ != end_ && \"iterator out of bounds\");\n      for (;;)\n      {\n        std::ptrdiff_t current_buffer_balance\n          = current_buffer_.size() - current_buffer_position_;\n\n        // Check if the advance can be satisfied by the current buffer.\n        if (current_buffer_balance > n)\n        {\n          position_ += n;\n          current_buffer_position_ += n;\n          return;\n        }\n\n        // Update position.\n        n -= current_buffer_balance;\n        position_ += current_buffer_balance;\n\n        // Move to next buffer. If it is empty then it will be skipped on the\n        // next iteration of this loop.\n        if (++current_ == end_)\n        {\n          ASIO_ASSERT(n == 0 && \"iterator out of bounds\");\n          current_buffer_ = buffer_type();\n          current_buffer_position_ = 0;\n          return;\n        }\n        current_buffer_ = *current_;\n        current_buffer_position_ = 0;\n      }\n    }\n    else if (n < 0)\n    {\n      std::size_t abs_n = -n;\n      ASIO_ASSERT(position_ >= abs_n && \"iterator out of bounds\");\n      for (;;)\n      {\n        // Check if the advance can be satisfied by the current buffer.\n        if (current_buffer_position_ >= abs_n)\n        {\n          position_ -= abs_n;\n          current_buffer_position_ -= abs_n;\n          return;\n        }\n\n        // Update position.\n        abs_n -= current_buffer_position_;\n        position_ -= current_buffer_position_;\n\n        // Check if we've reached the beginning of the buffers.\n        if (current_ == begin_)\n        {\n          ASIO_ASSERT(abs_n == 0 && \"iterator out of bounds\");\n          current_buffer_position_ = 0;\n          return;\n        }\n\n        // Find the previous non-empty buffer.\n        buffer_sequence_iterator_type iter = current_;\n        while (iter != begin_)\n        {\n          --iter;\n          buffer_type buffer = *iter;\n          std::size_t buffer_size = buffer.size();\n          if (buffer_size > 0)\n          {\n            current_ = iter;\n            current_buffer_ = buffer;\n            current_buffer_position_ = buffer_size;\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  // Determine the distance between two iterators.\n  std::ptrdiff_t distance_to(const buffers_iterator& other) const\n  {\n    return other.position_ - position_;\n  }\n\n  buffer_type current_buffer_;\n  std::size_t current_buffer_position_;\n  buffer_sequence_iterator_type begin_;\n  buffer_sequence_iterator_type current_;\n  buffer_sequence_iterator_type end_;\n  std::size_t position_;\n};\n\n/// Construct an iterator representing the beginning of the buffers' data.\ntemplate <typename BufferSequence>\ninline buffers_iterator<BufferSequence> buffers_begin(\n    const BufferSequence& buffers)\n{\n  return buffers_iterator<BufferSequence>::begin(buffers);\n}\n\n/// Construct an iterator representing the end of the buffers' data.\ntemplate <typename BufferSequence>\ninline buffers_iterator<BufferSequence> buffers_end(\n    const BufferSequence& buffers)\n{\n  return buffers_iterator<BufferSequence>::end(buffers);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_BUFFERS_ITERATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/co_spawn.hpp",
    "content": "//\n// co_spawn.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_CO_SPAWN_HPP\n#define ASIO_CO_SPAWN_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/awaitable.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct awaitable_signature;\n\ntemplate <typename T, typename Executor>\nstruct awaitable_signature<awaitable<T, Executor>>\n{\n  typedef void type(std::exception_ptr, T);\n};\n\ntemplate <typename Executor>\nstruct awaitable_signature<awaitable<void, Executor>>\n{\n  typedef void type(std::exception_ptr);\n};\n\n} // namespace detail\n\n/// Spawn a new thread of execution.\n/**\n * The entry point function object @c f must have the signature:\n *\n * @code awaitable<void, E> f(); @endcode\n *\n * where @c E is convertible from @c Executor.\n */\ntemplate <typename Executor, typename F,\n    ASIO_COMPLETION_TOKEN_FOR(typename detail::awaitable_signature<\n      typename result_of<F()>::type>::type) CompletionToken\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken,\n    typename detail::awaitable_signature<typename result_of<F()>::type>::type)\nco_spawn(const Executor& ex, F&& f,\n    CompletionToken&& token\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<\n      is_executor<Executor>::value\n    >::type* = 0);\n\n/// Spawn a new thread of execution.\n/**\n * The entry point function object @c f must have the signature:\n *\n * @code awaitable<void, E> f(); @endcode\n *\n * where @c E is convertible from @c ExecutionContext::executor_type.\n */\ntemplate <typename ExecutionContext, typename F,\n    ASIO_COMPLETION_TOKEN_FOR(typename detail::awaitable_signature<\n      typename result_of<F()>::type>::type) CompletionToken\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename ExecutionContext::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken,\n    typename detail::awaitable_signature<typename result_of<F()>::type>::type)\nco_spawn(ExecutionContext& ctx, F&& f,\n    CompletionToken&& token\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename ExecutionContext::executor_type),\n    typename enable_if<\n      is_convertible<ExecutionContext&, execution_context&>::value\n    >::type* = 0);\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/co_spawn.hpp\"\n\n#endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_CO_SPAWN_HPP\n"
  },
  {
    "path": "src/third_party/asio/completion_condition.hpp",
    "content": "//\n// completion_condition.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_COMPLETION_CONDITION_HPP\n#define ASIO_COMPLETION_CONDITION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail {\n\n// The default maximum number of bytes to transfer in a single operation.\nenum default_max_transfer_size_t { default_max_transfer_size = 65536 };\n\n// Adapt result of old-style completion conditions (which had a bool result\n// where true indicated that the operation was complete).\ninline std::size_t adapt_completion_condition_result(bool result)\n{\n  return result ? 0 : default_max_transfer_size;\n}\n\n// Adapt result of current completion conditions (which have a size_t result\n// where 0 means the operation is complete, and otherwise the result is the\n// maximum number of bytes to transfer on the next underlying operation).\ninline std::size_t adapt_completion_condition_result(std::size_t result)\n{\n  return result;\n}\n\nclass transfer_all_t\n{\npublic:\n  typedef std::size_t result_type;\n\n  template <typename Error>\n  std::size_t operator()(const Error& err, std::size_t)\n  {\n    return !!err ? 0 : default_max_transfer_size;\n  }\n};\n\nclass transfer_at_least_t\n{\npublic:\n  typedef std::size_t result_type;\n\n  explicit transfer_at_least_t(std::size_t minimum)\n    : minimum_(minimum)\n  {\n  }\n\n  template <typename Error>\n  std::size_t operator()(const Error& err, std::size_t bytes_transferred)\n  {\n    return (!!err || bytes_transferred >= minimum_)\n      ? 0 : default_max_transfer_size;\n  }\n\nprivate:\n  std::size_t minimum_;\n};\n\nclass transfer_exactly_t\n{\npublic:\n  typedef std::size_t result_type;\n\n  explicit transfer_exactly_t(std::size_t size)\n    : size_(size)\n  {\n  }\n\n  template <typename Error>\n  std::size_t operator()(const Error& err, std::size_t bytes_transferred)\n  {\n    return (!!err || bytes_transferred >= size_) ? 0 :\n      (size_ - bytes_transferred < default_max_transfer_size\n        ? size_ - bytes_transferred : std::size_t(default_max_transfer_size));\n  }\n\nprivate:\n  std::size_t size_;\n};\n\n} // namespace detail\n\n/**\n * @defgroup completion_condition Completion Condition Function Objects\n *\n * Function objects used for determining when a read or write operation should\n * complete.\n */\n/*@{*/\n\n/// Return a completion condition function object that indicates that a read or\n/// write operation should continue until all of the data has been transferred,\n/// or until an error occurs.\n/**\n * This function is used to create an object, of unspecified type, that meets\n * CompletionCondition requirements.\n *\n * @par Example\n * Reading until a buffer is full:\n * @code\n * boost::array<char, 128> buf;\n * asio::error_code ec;\n * std::size_t n = asio::read(\n *     sock, asio::buffer(buf),\n *     asio::transfer_all(), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * }\n * else\n * {\n *   // n == 128\n * }\n * @endcode\n */\n#if defined(GENERATING_DOCUMENTATION)\nunspecified transfer_all();\n#else\ninline detail::transfer_all_t transfer_all()\n{\n  return detail::transfer_all_t();\n}\n#endif\n\n/// Return a completion condition function object that indicates that a read or\n/// write operation should continue until a minimum number of bytes has been\n/// transferred, or until an error occurs.\n/**\n * This function is used to create an object, of unspecified type, that meets\n * CompletionCondition requirements.\n *\n * @par Example\n * Reading until a buffer is full or contains at least 64 bytes:\n * @code\n * boost::array<char, 128> buf;\n * asio::error_code ec;\n * std::size_t n = asio::read(\n *     sock, asio::buffer(buf),\n *     asio::transfer_at_least(64), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * }\n * else\n * {\n *   // n >= 64 && n <= 128\n * }\n * @endcode\n */\n#if defined(GENERATING_DOCUMENTATION)\nunspecified transfer_at_least(std::size_t minimum);\n#else\ninline detail::transfer_at_least_t transfer_at_least(std::size_t minimum)\n{\n  return detail::transfer_at_least_t(minimum);\n}\n#endif\n\n/// Return a completion condition function object that indicates that a read or\n/// write operation should continue until an exact number of bytes has been\n/// transferred, or until an error occurs.\n/**\n * This function is used to create an object, of unspecified type, that meets\n * CompletionCondition requirements.\n *\n * @par Example\n * Reading until a buffer is full or contains exactly 64 bytes:\n * @code\n * boost::array<char, 128> buf;\n * asio::error_code ec;\n * std::size_t n = asio::read(\n *     sock, asio::buffer(buf),\n *     asio::transfer_exactly(64), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * }\n * else\n * {\n *   // n == 64\n * }\n * @endcode\n */\n#if defined(GENERATING_DOCUMENTATION)\nunspecified transfer_exactly(std::size_t size);\n#else\ninline detail::transfer_exactly_t transfer_exactly(std::size_t size)\n{\n  return detail::transfer_exactly_t(size);\n}\n#endif\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_COMPLETION_CONDITION_HPP\n"
  },
  {
    "path": "src/third_party/asio/compose.hpp",
    "content": "//\n// compose.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_COMPOSE_HPP\n#define ASIO_COMPOSE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Launch an asynchronous operation with a stateful implementation.\n/**\n * The async_compose function simplifies the implementation of composed\n * asynchronous operations automatically wrapping a stateful function object\n * with a conforming intermediate completion handler.\n *\n * @param implementation A function object that contains the implementation of\n * the composed asynchronous operation. The first argument to the function\n * object is a non-const reference to the enclosing intermediate completion\n * handler. The remaining arguments are any arguments that originate from the\n * completion handlers of any asynchronous operations performed by the\n * implementation.\n\n * @param token The completion token.\n *\n * @param io_objects_or_executors Zero or more I/O objects or I/O executors for\n * which outstanding work must be maintained.\n *\n * @par Example:\n *\n * @code struct async_echo_implementation\n * {\n *   tcp::socket& socket_;\n *   asio::mutable_buffer buffer_;\n *   enum { starting, reading, writing } state_;\n *\n *   template <typename Self>\n *   void operator()(Self& self,\n *       asio::error_code error = {},\n *       std::size_t n = 0)\n *   {\n *     switch (state_)\n *     {\n *     case starting:\n *       state_ = reading;\n *       socket_.async_read_some(\n *           buffer_, std::move(self));\n *       break;\n *     case reading:\n *       if (error)\n *       {\n *         self.complete(error, 0);\n *       }\n *       else\n *       {\n *         state_ = writing;\n *         asio::async_write(socket_, buffer_,\n *             asio::transfer_exactly(n),\n *             std::move(self));\n *       }\n *       break;\n *     case writing:\n *       self.complete(error, n);\n *       break;\n *     }\n *   }\n * };\n *\n * template <typename CompletionToken>\n * auto async_echo(tcp::socket& socket,\n *     asio::mutable_buffer buffer,\n *     CompletionToken&& token) ->\n *   typename asio::async_result<\n *     typename std::decay<CompletionToken>::type,\n *       void(asio::error_code, std::size_t)>::return_type\n * {\n *   return asio::async_compose<CompletionToken,\n *     void(asio::error_code, std::size_t)>(\n *       async_echo_implementation{socket, buffer,\n *         async_echo_implementation::starting},\n *       token, socket);\n * } @endcode\n */\ntemplate <typename CompletionToken, typename Signature,\n    typename Implementation, typename... IoObjectsOrExecutors>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)\nasync_compose(ASIO_MOVE_ARG(Implementation) implementation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token,\n    ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors);\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n      //   || defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename CompletionToken, typename Signature, typename Implementation>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)\nasync_compose(ASIO_MOVE_ARG(Implementation) implementation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token);\n\n#define ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \\\n  template <typename CompletionToken, typename Signature, \\\n      typename Implementation, ASIO_VARIADIC_TPARAMS(n)> \\\n  ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) \\\n  async_compose(ASIO_MOVE_ARG(Implementation) implementation, \\\n      ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n));\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_COMPOSE_DEF)\n#undef ASIO_PRIVATE_ASYNC_COMPOSE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/compose.hpp\"\n\n#endif // ASIO_COMPOSE_HPP\n"
  },
  {
    "path": "src/third_party/asio/connect.hpp",
    "content": "//\n// connect.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_CONNECT_HPP\n#define ASIO_CONNECT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  char (&has_iterator_helper(...))[2];\n\n  template <typename T>\n  char has_iterator_helper(T*, typename T::iterator* = 0);\n\n  template <typename T>\n  struct has_iterator_typedef\n  {\n    enum { value = (sizeof((has_iterator_helper)((T*)(0))) == 1) };\n  };\n} // namespace detail\n\n/// Type trait used to determine whether a type is an endpoint sequence that can\n/// be used with with @c connect and @c async_connect.\ntemplate <typename T>\nstruct is_endpoint_sequence\n{\n#if defined(GENERATING_DOCUMENTATION)\n  /// The value member is true if the type may be used as an endpoint sequence.\n  static const bool value;\n#else\n  enum\n  {\n    value = detail::has_iterator_typedef<T>::value\n  };\n#endif\n};\n\n/**\n * @defgroup connect asio::connect\n *\n * @brief The @c connect function is a composed operation that establishes a\n * socket connection by trying each endpoint in a sequence.\n */\n/*@{*/\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @returns The successfully connected endpoint.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @par Example\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n * asio::connect(s, r.resolve(q)); @endcode\n */\ntemplate <typename Protocol, typename Executor, typename EndpointSequence>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, the successfully connected endpoint. Otherwise, a\n * default-constructed endpoint.\n *\n * @par Example\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n * asio::error_code ec;\n * asio::connect(s, r.resolve(q), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor, typename EndpointSequence>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, asio::error_code& ec,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// (Deprecated: Use range overload.) Establishes a socket connection by trying\n/// each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n\n/// (Deprecated: Use range overload.) Establishes a socket connection by trying\n/// each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, asio::error_code& ec,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @returns An iterator denoting the successfully connected endpoint.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @par Example\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::resolver::results_type e = r.resolve(q);\n * tcp::socket s(my_context);\n * asio::connect(s, e.begin(), e.end()); @endcode\n */\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, Iterator end);\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @par Example\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::resolver::results_type e = r.resolve(q);\n * tcp::socket s(my_context);\n * asio::error_code ec;\n * asio::connect(s, e.begin(), e.end(), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, Iterator end, asio::error_code& ec);\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @returns The successfully connected endpoint.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n * tcp::endpoint e = asio::connect(s,\n *     r.resolve(q), my_connect_condition());\n * std::cout << \"Connected to: \" << e << std::endl; @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, the successfully connected endpoint. Otherwise, a\n * default-constructed endpoint.\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n * asio::error_code ec;\n * tcp::endpoint e = asio::connect(s,\n *     r.resolve(q), my_connect_condition(), ec);\n * if (ec)\n * {\n *   // An error occurred.\n * }\n * else\n * {\n *   std::cout << \"Connected to: \" << e << std::endl;\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    asio::error_code& ec,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// (Deprecated: Use range overload.) Establishes a socket connection by trying\n/// each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, ConnectCondition connect_condition,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n\n/// (Deprecated: Use range overload.) Establishes a socket connection by trying\n/// each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    ConnectCondition connect_condition, asio::error_code& ec,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @returns An iterator denoting the successfully connected endpoint.\n *\n * @throws asio::system_error Thrown on failure. If the sequence is\n * empty, the associated @c error_code is asio::error::not_found.\n * Otherwise, contains the error from the last connection attempt.\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::resolver::results_type e = r.resolve(q);\n * tcp::socket s(my_context);\n * tcp::resolver::results_type::iterator i = asio::connect(\n *     s, e.begin(), e.end(), my_connect_condition());\n * std::cout << \"Connected to: \" << i->endpoint() << std::endl; @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    Iterator end, ConnectCondition connect_condition);\n\n/// Establishes a socket connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c connect member\n * function, once for each endpoint in the sequence, until a connection is\n * successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param ec Set to indicate what error occurred, if any. If the sequence is\n * empty, set to asio::error::not_found. Otherwise, contains the error\n * from the last connection attempt.\n *\n * @returns On success, an iterator denoting the successfully connected\n * endpoint. Otherwise, the end iterator.\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::resolver::results_type e = r.resolve(q);\n * tcp::socket s(my_context);\n * asio::error_code ec;\n * tcp::resolver::results_type::iterator i = asio::connect(\n *     s, e.begin(), e.end(), my_connect_condition());\n * if (ec)\n * {\n *   // An error occurred.\n * }\n * else\n * {\n *   std::cout << \"Connected to: \" << i->endpoint() << std::endl;\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, Iterator end, ConnectCondition connect_condition,\n    asio::error_code& ec);\n\n/*@}*/\n\n/**\n * @defgroup async_connect asio::async_connect\n *\n * @brief The @c async_connect function is a composed asynchronous operation\n * that establishes a socket connection by trying each endpoint in a sequence.\n */\n/*@{*/\n\n/// Asynchronously establishes a socket connection by trying each endpoint in a\n/// sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, the successfully connected endpoint.\n *   // Otherwise, a default-constructed endpoint.\n *   const typename Protocol::endpoint& endpoint\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n *\n * // ...\n *\n * r.async_resolve(q, resolve_handler);\n *\n * // ...\n *\n * void resolve_handler(\n *     const asio::error_code& ec,\n *     tcp::resolver::results_type results)\n * {\n *   if (!ec)\n *   {\n *     asio::async_connect(s, results, connect_handler);\n *   }\n * }\n *\n * // ...\n *\n * void connect_handler(\n *     const asio::error_code& ec,\n *     const tcp::endpoint& endpoint)\n * {\n *   // ...\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor, typename EndpointSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      typename Protocol::endpoint)) RangeConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint))\nasync_connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints,\n    ASIO_MOVE_ARG(RangeConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// (Deprecated: Use range overload.) Asynchronously establishes a socket\n/// connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, an iterator denoting the successfully\n *   // connected endpoint. Otherwise, the end iterator.\n *   Iterator iterator\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Asynchronously establishes a socket connection by trying each endpoint in a\n/// sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, an iterator denoting the successfully\n *   // connected endpoint. Otherwise, the end iterator.\n *   Iterator iterator\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * @code std::vector<tcp::endpoint> endpoints = ...;\n * tcp::socket s(my_context);\n * asio::async_connect(s,\n *     endpoints.begin(), endpoints.end(),\n *     connect_handler);\n *\n * // ...\n *\n * void connect_handler(\n *     const asio::error_code& ec,\n *     std::vector<tcp::endpoint>::iterator i)\n * {\n *   // ...\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin, Iterator end,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor));\n\n/// Asynchronously establishes a socket connection by trying each endpoint in a\n/// sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param endpoints A sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, an iterator denoting the successfully\n *   // connected endpoint. Otherwise, the end iterator.\n *   Iterator iterator\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n *\n * // ...\n *\n * r.async_resolve(q, resolve_handler);\n *\n * // ...\n *\n * void resolve_handler(\n *     const asio::error_code& ec,\n *     tcp::resolver::results_type results)\n * {\n *   if (!ec)\n *   {\n *     asio::async_connect(s, results,\n *         my_connect_condition(),\n *         connect_handler);\n *   }\n * }\n *\n * // ...\n *\n * void connect_handler(\n *     const asio::error_code& ec,\n *     const tcp::endpoint& endpoint)\n * {\n *   if (ec)\n *   {\n *     // An error occurred.\n *   }\n *   else\n *   {\n *     std::cout << \"Connected to: \" << endpoint << std::endl;\n *   }\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      typename Protocol::endpoint)) RangeConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint))\nasync_connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(RangeConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type* = 0);\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// (Deprecated: Use range overload.) Asynchronously establishes a socket\n/// connection by trying each endpoint in a sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, an iterator denoting the successfully\n *   // connected endpoint. Otherwise, the end iterator.\n *   Iterator iterator\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload assumes that a default constructed object of type @c\n * Iterator represents the end of the sequence. This is a valid assumption for\n * iterator types such as @c asio::ip::tcp::resolver::iterator.\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type* = 0);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Asynchronously establishes a socket connection by trying each endpoint in a\n/// sequence.\n/**\n * This function attempts to connect a socket to one of a sequence of\n * endpoints. It does this by repeated calls to the socket's @c async_connect\n * member function, once for each endpoint in the sequence, until a connection\n * is successfully established.\n *\n * @param s The socket to be connected. If the socket is already open, it will\n * be closed.\n *\n * @param begin An iterator pointing to the start of a sequence of endpoints.\n *\n * @param end An iterator pointing to the end of a sequence of endpoints.\n *\n * @param connect_condition A function object that is called prior to each\n * connection attempt. The signature of the function object must be:\n * @code bool connect_condition(\n *     const asio::error_code& ec,\n *     const typename Protocol::endpoint& next); @endcode\n * The @c ec parameter contains the result from the most recent connect\n * operation. Before the first connection attempt, @c ec is always set to\n * indicate success. The @c next parameter is the next endpoint to be tried.\n * The function object should return true if the next endpoint should be tried,\n * and false if it should be skipped.\n *\n * @param handler The handler to be called when the connect operation\n * completes. Copies will be made of the handler as required. The function\n * signature of the handler must be:\n * @code void handler(\n *   // Result of operation. if the sequence is empty, set to\n *   // asio::error::not_found. Otherwise, contains the\n *   // error from the last connection attempt.\n *   const asio::error_code& error,\n *\n *   // On success, an iterator denoting the successfully\n *   // connected endpoint. Otherwise, the end iterator.\n *   Iterator iterator\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * The following connect condition function object can be used to output\n * information about the individual connection attempts:\n * @code struct my_connect_condition\n * {\n *   bool operator()(\n *       const asio::error_code& ec,\n *       const::tcp::endpoint& next)\n *   {\n *     if (ec) std::cout << \"Error: \" << ec.message() << std::endl;\n *     std::cout << \"Trying: \" << next << std::endl;\n *     return true;\n *   }\n * }; @endcode\n * It would be used with the asio::connect function as follows:\n * @code tcp::resolver r(my_context);\n * tcp::resolver::query q(\"host\", \"service\");\n * tcp::socket s(my_context);\n *\n * // ...\n *\n * r.async_resolve(q, resolve_handler);\n *\n * // ...\n *\n * void resolve_handler(\n *     const asio::error_code& ec,\n *     tcp::resolver::iterator i)\n * {\n *   if (!ec)\n *   {\n *     tcp::resolver::iterator end;\n *     asio::async_connect(s, i, end,\n *         my_connect_condition(),\n *         connect_handler);\n *   }\n * }\n *\n * // ...\n *\n * void connect_handler(\n *     const asio::error_code& ec,\n *     tcp::resolver::iterator i)\n * {\n *   if (ec)\n *   {\n *     // An error occurred.\n *   }\n *   else\n *   {\n *     std::cout << \"Connected to: \" << i->endpoint() << std::endl;\n *   }\n * } @endcode\n */\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    Iterator end, ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor));\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/connect.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/third_party/asio/coroutine.hpp",
    "content": "//\n// coroutine.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_COROUTINE_HPP\n#define ASIO_COROUTINE_HPP\n\nnamespace asio {\nnamespace detail {\n\nclass coroutine_ref;\n\n} // namespace detail\n\n/// Provides support for implementing stackless coroutines.\n/**\n * The @c coroutine class may be used to implement stackless coroutines. The\n * class itself is used to store the current state of the coroutine.\n *\n * Coroutines are copy-constructible and assignable, and the space overhead is\n * a single int. They can be used as a base class:\n *\n * @code class session : coroutine\n * {\n *   ...\n * }; @endcode\n *\n * or as a data member:\n *\n * @code class session\n * {\n *   ...\n *   coroutine coro_;\n * }; @endcode\n *\n * or even bound in as a function argument using lambdas or @c bind(). The\n * important thing is that as the application maintains a copy of the object\n * for as long as the coroutine must be kept alive.\n *\n * @par Pseudo-keywords\n *\n * A coroutine is used in conjunction with certain \"pseudo-keywords\", which\n * are implemented as macros. These macros are defined by a header file:\n *\n * @code #include <asio/yield.hpp>@endcode\n *\n * and may conversely be undefined as follows:\n *\n * @code #include <asio/unyield.hpp>@endcode\n *\n * <b>reenter</b>\n *\n * The @c reenter macro is used to define the body of a coroutine. It takes a\n * single argument: a pointer or reference to a coroutine object. For example,\n * if the base class is a coroutine object you may write:\n *\n * @code reenter (this)\n * {\n *   ... coroutine body ...\n * } @endcode\n *\n * and if a data member or other variable you can write:\n *\n * @code reenter (coro_)\n * {\n *   ... coroutine body ...\n * } @endcode\n *\n * When @c reenter is executed at runtime, control jumps to the location of the\n * last @c yield or @c fork.\n *\n * The coroutine body may also be a single statement, such as:\n *\n * @code reenter (this) for (;;)\n * {\n *   ...\n * } @endcode\n *\n * @b Limitation: The @c reenter macro is implemented using a switch. This\n * means that you must take care when using local variables within the\n * coroutine body. The local variable is not allowed in a position where\n * reentering the coroutine could bypass the variable definition.\n *\n * <b>yield <em>statement</em></b>\n *\n * This form of the @c yield keyword is often used with asynchronous operations:\n *\n * @code yield socket_->async_read_some(buffer(*buffer_), *this); @endcode\n *\n * This divides into four logical steps:\n *\n * @li @c yield saves the current state of the coroutine.\n * @li The statement initiates the asynchronous operation.\n * @li The resume point is defined immediately following the statement.\n * @li Control is transferred to the end of the coroutine body.\n *\n * When the asynchronous operation completes, the function object is invoked\n * and @c reenter causes control to transfer to the resume point. It is\n * important to remember to carry the coroutine state forward with the\n * asynchronous operation. In the above snippet, the current class is a\n * function object object with a coroutine object as base class or data member.\n *\n * The statement may also be a compound statement, and this permits us to\n * define local variables with limited scope:\n *\n * @code yield\n * {\n *   mutable_buffers_1 b = buffer(*buffer_);\n *   socket_->async_read_some(b, *this);\n * } @endcode\n *\n * <b>yield return <em>expression</em> ;</b>\n *\n * This form of @c yield is often used in generators or coroutine-based parsers.\n * For example, the function object:\n *\n * @code struct interleave : coroutine\n * {\n *   istream& is1;\n *   istream& is2;\n *   char operator()(char c)\n *   {\n *     reenter (this) for (;;)\n *     {\n *       yield return is1.get();\n *       yield return is2.get();\n *     }\n *   }\n * }; @endcode\n *\n * defines a trivial coroutine that interleaves the characters from two input\n * streams.\n *\n * This type of @c yield divides into three logical steps:\n *\n * @li @c yield saves the current state of the coroutine.\n * @li The resume point is defined immediately following the semicolon.\n * @li The value of the expression is returned from the function.\n *\n * <b>yield ;</b>\n *\n * This form of @c yield is equivalent to the following steps:\n *\n * @li @c yield saves the current state of the coroutine.\n * @li The resume point is defined immediately following the semicolon.\n * @li Control is transferred to the end of the coroutine body.\n *\n * This form might be applied when coroutines are used for cooperative\n * threading and scheduling is explicitly managed. For example:\n *\n * @code struct task : coroutine\n * {\n *   ...\n *   void operator()()\n *   {\n *     reenter (this)\n *     {\n *       while (... not finished ...)\n *       {\n *         ... do something ...\n *         yield;\n *         ... do some more ...\n *         yield;\n *       }\n *     }\n *   }\n *   ...\n * };\n * ...\n * task t1, t2;\n * for (;;)\n * {\n *   t1();\n *   t2();\n * } @endcode\n *\n * <b>yield break ;</b>\n *\n * The final form of @c yield is used to explicitly terminate the coroutine.\n * This form is comprised of two steps:\n *\n * @li @c yield sets the coroutine state to indicate termination.\n * @li Control is transferred to the end of the coroutine body.\n *\n * Once terminated, calls to is_complete() return true and the coroutine cannot\n * be reentered.\n *\n * Note that a coroutine may also be implicitly terminated if the coroutine\n * body is exited without a yield, e.g. by return, throw or by running to the\n * end of the body.\n *\n * <b>fork <em>statement</em></b>\n *\n * The @c fork pseudo-keyword is used when \"forking\" a coroutine, i.e. splitting\n * it into two (or more) copies. One use of @c fork is in a server, where a new\n * coroutine is created to handle each client connection:\n * \n * @code reenter (this)\n * {\n *   do\n *   {\n *     socket_.reset(new tcp::socket(my_context_));\n *     yield acceptor->async_accept(*socket_, *this);\n *     fork server(*this)();\n *   } while (is_parent());\n *   ... client-specific handling follows ...\n * } @endcode\n * \n * The logical steps involved in a @c fork are:\n * \n * @li @c fork saves the current state of the coroutine.\n * @li The statement creates a copy of the coroutine and either executes it\n *     immediately or schedules it for later execution.\n * @li The resume point is defined immediately following the semicolon.\n * @li For the \"parent\", control immediately continues from the next line.\n *\n * The functions is_parent() and is_child() can be used to differentiate\n * between parent and child. You would use these functions to alter subsequent\n * control flow.\n *\n * Note that @c fork doesn't do the actual forking by itself. It is the\n * application's responsibility to create a clone of the coroutine and call it.\n * The clone can be called immediately, as above, or scheduled for delayed\n * execution using something like asio::post().\n *\n * @par Alternate macro names\n *\n * If preferred, an application can use macro names that follow a more typical\n * naming convention, rather than the pseudo-keywords. These are:\n *\n * @li @c ASIO_CORO_REENTER instead of @c reenter\n * @li @c ASIO_CORO_YIELD instead of @c yield\n * @li @c ASIO_CORO_FORK instead of @c fork\n */\nclass coroutine\n{\npublic:\n  /// Constructs a coroutine in its initial state.\n  coroutine() : value_(0) {}\n\n  /// Returns true if the coroutine is the child of a fork.\n  bool is_child() const { return value_ < 0; }\n\n  /// Returns true if the coroutine is the parent of a fork.\n  bool is_parent() const { return !is_child(); }\n\n  /// Returns true if the coroutine has reached its terminal state.\n  bool is_complete() const { return value_ == -1; }\n\nprivate:\n  friend class detail::coroutine_ref;\n  int value_;\n};\n\n\nnamespace detail {\n\nclass coroutine_ref\n{\npublic:\n  coroutine_ref(coroutine& c) : value_(c.value_), modified_(false) {}\n  coroutine_ref(coroutine* c) : value_(c->value_), modified_(false) {}\n  ~coroutine_ref() { if (!modified_) value_ = -1; }\n  operator int() const { return value_; }\n  int& operator=(int v) { modified_ = true; return value_ = v; }\nprivate:\n  void operator=(const coroutine_ref&);\n  int& value_;\n  bool modified_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#define ASIO_CORO_REENTER(c) \\\n  switch (::asio::detail::coroutine_ref _coro_value = c) \\\n    case -1: if (_coro_value) \\\n    { \\\n      goto terminate_coroutine; \\\n      terminate_coroutine: \\\n      _coro_value = -1; \\\n      goto bail_out_of_coroutine; \\\n      bail_out_of_coroutine: \\\n      break; \\\n    } \\\n    else /* fall-through */ case 0:\n\n#define ASIO_CORO_YIELD_IMPL(n) \\\n  for (_coro_value = (n);;) \\\n    if (_coro_value == 0) \\\n    { \\\n      case (n): ; \\\n      break; \\\n    } \\\n    else \\\n      switch (_coro_value ? 0 : 1) \\\n        for (;;) \\\n          /* fall-through */ case -1: if (_coro_value) \\\n            goto terminate_coroutine; \\\n          else for (;;) \\\n            /* fall-through */ case 1: if (_coro_value) \\\n              goto bail_out_of_coroutine; \\\n            else /* fall-through */ case 0:\n\n#define ASIO_CORO_FORK_IMPL(n) \\\n  for (_coro_value = -(n);; _coro_value = (n)) \\\n    if (_coro_value == (n)) \\\n    { \\\n      case -(n): ; \\\n      break; \\\n    } \\\n    else\n\n#if defined(_MSC_VER)\n# define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__COUNTER__ + 1)\n# define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__COUNTER__ + 1)\n#else // defined(_MSC_VER)\n# define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__LINE__)\n# define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__LINE__)\n#endif // defined(_MSC_VER)\n\n#endif // ASIO_COROUTINE_HPP\n"
  },
  {
    "path": "src/third_party/asio/deadline_timer.hpp",
    "content": "//\n// deadline_timer.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DEADLINE_TIMER_HPP\n#define ASIO_DEADLINE_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/socket_types.hpp\" // Must come before posix_time.\n#include \"asio/basic_deadline_timer.hpp\"\n\n#include <boost/date_time/posix_time/posix_time_types.hpp>\n\nnamespace asio {\n\n/// Typedef for the typical usage of timer. Uses a UTC clock.\ntypedef basic_deadline_timer<boost::posix_time::ptime> deadline_timer;\n\n} // namespace asio\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_DEADLINE_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/defer.hpp",
    "content": "//\n// defer.hpp\n// ~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DEFER_HPP\n#define ASIO_DEFER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the object's associated\n * executor. The function object is queued for execution, and is never called\n * from the current thread prior to returning from <tt>defer()</tt>.\n *\n * The use of @c defer(), rather than @ref post(), indicates the caller's\n * preference that the executor defer the queueing of the function object. This\n * may allow the executor to optimise queueing for cases when the function\n * object represents a continuation of the current call context.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Performs <tt>ex.defer(std::move(handler), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    ASIO_MOVE_ARG(CompletionToken) token);\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the specified executor.\n * The function object is queued for execution, and is never called from the\n * current thread prior to returning from <tt>defer()</tt>.\n *\n * The use of @c defer(), rather than @ref post(), indicates the caller's\n * preference that the executor defer the queueing of the function object. This\n * may allow the executor to optimise queueing for cases when the function\n * object represents a continuation of the current call context.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex1 by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Creates a work object @c w by performing <tt>make_work(ex1)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Constructs a function object @c f with a function call operator that\n * performs <tt>ex1.dispatch(std::move(handler), alloc)</tt> followed by\n * <tt>w.reset()</tt>.\n *\n * @li Performs <tt>Executor(ex).defer(std::move(f), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    const Executor& ex,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<is_executor<Executor>::value>::type* = 0);\n\n/// Submits a completion token or function object for execution.\n/**\n * @returns <tt>defer(ctx.get_executor(), forward<CompletionToken>(token))</tt>.\n */\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n        typename ExecutionContext::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    ExecutionContext& ctx,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename ExecutionContext::executor_type),\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type* = 0);\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/defer.hpp\"\n\n#endif // ASIO_DEFER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detached.hpp",
    "content": "//\n// detached.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETACHED_HPP\n#define ASIO_DETACHED_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <memory>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Class used to specify that an asynchronous operation is detached.\n/**\n\n * The detached_t class is used to indicate that an asynchronous operation is\n * detached. That is, there is no completion handler waiting for the\n * operation's result. A detached_t object may be passed as a handler to an\n * asynchronous operation, typically using the special value\n * @c asio::detached. For example:\n\n * @code my_socket.async_send(my_buffer, asio::detached);\n * @endcode\n */\nclass detached_t\n{\npublic:\n  /// Constructor. \n  ASIO_CONSTEXPR detached_t()\n  {\n  }\n};\n\n/// A special value, similar to std::nothrow.\n/**\n * See the documentation for asio::detached_t for a usage example.\n */\n#if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION)\nconstexpr detached_t detached;\n#elif defined(ASIO_MSVC)\n__declspec(selectany) detached_t detached;\n#endif\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/detached.hpp\"\n\n#endif // ASIO_DETACHED_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/array.hpp",
    "content": "//\n// detail/array.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_ARRAY_HPP\n#define ASIO_DETAIL_ARRAY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_ARRAY)\n# include <array>\n#else // defined(ASIO_HAS_STD_ARRAY)\n# include <boost/array.hpp>\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_STD_ARRAY)\nusing std::array;\n#else // defined(ASIO_HAS_STD_ARRAY)\nusing boost::array;\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_ARRAY_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/array_fwd.hpp",
    "content": "//\n// detail/array_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_ARRAY_FWD_HPP\n#define ASIO_DETAIL_ARRAY_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\nnamespace boost {\n\ntemplate<class T, std::size_t N>\nclass array;\n\n} // namespace boost\n\n// Standard library components can't be forward declared, so we'll have to\n// include the array header. Fortunately, it's fairly lightweight and doesn't\n// add significantly to the compile time.\n#if defined(ASIO_HAS_STD_ARRAY)\n# include <array>\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\n#endif // ASIO_DETAIL_ARRAY_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/assert.hpp",
    "content": "//\n// detail/assert.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_ASSERT_HPP\n#define ASIO_DETAIL_ASSERT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_ASSERT)\n# include <boost/assert.hpp>\n#else // defined(ASIO_HAS_BOOST_ASSERT)\n# include <cassert>\n#endif // defined(ASIO_HAS_BOOST_ASSERT)\n\n#if defined(ASIO_HAS_BOOST_ASSERT)\n# define ASIO_ASSERT(expr) BOOST_ASSERT(expr)\n#else // defined(ASIO_HAS_BOOST_ASSERT)\n# define ASIO_ASSERT(expr) assert(expr)\n#endif // defined(ASIO_HAS_BOOST_ASSERT)\n\n#endif // ASIO_DETAIL_ASSERT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/atomic_count.hpp",
    "content": "//\n// detail/atomic_count.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_ATOMIC_COUNT_HPP\n#define ASIO_DETAIL_ATOMIC_COUNT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n// Nothing to include.\n#elif defined(ASIO_HAS_STD_ATOMIC)\n# include <atomic>\n#else // defined(ASIO_HAS_STD_ATOMIC)\n# include <boost/detail/atomic_count.hpp>\n#endif // defined(ASIO_HAS_STD_ATOMIC)\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS)\ntypedef long atomic_count;\ninline void increment(atomic_count& a, long b) { a += b; }\n#elif defined(ASIO_HAS_STD_ATOMIC)\ntypedef std::atomic<long> atomic_count;\ninline void increment(atomic_count& a, long b) { a += b; }\n#else // defined(ASIO_HAS_STD_ATOMIC)\ntypedef boost::detail::atomic_count atomic_count;\ninline void increment(atomic_count& a, long b) { while (b > 0) ++a, --b; }\n#endif // defined(ASIO_HAS_STD_ATOMIC)\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_ATOMIC_COUNT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/base_from_completion_cond.hpp",
    "content": "//\n// detail/base_from_completion_cond.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP\n#define ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/completion_condition.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename CompletionCondition>\nclass base_from_completion_cond\n{\nprotected:\n  explicit base_from_completion_cond(CompletionCondition& completion_condition)\n    : completion_condition_(\n        ASIO_MOVE_CAST(CompletionCondition)(completion_condition))\n  {\n  }\n\n  std::size_t check_for_completion(\n      const asio::error_code& ec,\n      std::size_t total_transferred)\n  {\n    return detail::adapt_completion_condition_result(\n        completion_condition_(ec, total_transferred));\n  }\n\nprivate:\n  CompletionCondition completion_condition_;\n};\n\ntemplate <>\nclass base_from_completion_cond<transfer_all_t>\n{\nprotected:\n  explicit base_from_completion_cond(transfer_all_t)\n  {\n  }\n\n  static std::size_t check_for_completion(\n      const asio::error_code& ec,\n      std::size_t total_transferred)\n  {\n    return transfer_all_t()(ec, total_transferred);\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/bind_handler.hpp",
    "content": "//\n// detail/bind_handler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_BIND_HANDLER_HPP\n#define ASIO_DETAIL_BIND_HANDLER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename Arg1>\nclass binder1\n{\npublic:\n  template <typename T>\n  binder1(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1)\n    : handler_(ASIO_MOVE_CAST(T)(handler)),\n      arg1_(arg1)\n  {\n  }\n\n  binder1(Handler& handler, const Arg1& arg1)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  binder1(const binder1& other)\n    : handler_(other.handler_),\n      arg1_(other.arg1_)\n  {\n  }\n\n  binder1(binder1&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_));\n  }\n\n  void operator()() const\n  {\n    handler_(arg1_);\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n};\n\ntemplate <typename Handler, typename Arg1>\ninline void* asio_handler_allocate(std::size_t size,\n    binder1<Handler, Arg1>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    binder1<Handler, Arg1>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1>\ninline bool asio_handler_is_continuation(\n    binder1<Handler, Arg1>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1>\ninline void asio_handler_invoke(Function& function,\n    binder1<Handler, Arg1>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1>\ninline void asio_handler_invoke(const Function& function,\n    binder1<Handler, Arg1>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1>\ninline binder1<typename decay<Handler>::type, Arg1> bind_handler(\n    ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1)\n{\n  return binder1<typename decay<Handler>::type, Arg1>(0,\n      ASIO_MOVE_CAST(Handler)(handler), arg1);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\nclass binder2\n{\npublic:\n  template <typename T>\n  binder2(int, ASIO_MOVE_ARG(T) handler,\n      const Arg1& arg1, const Arg2& arg2)\n    : handler_(ASIO_MOVE_CAST(T)(handler)),\n      arg1_(arg1),\n      arg2_(arg2)\n  {\n  }\n\n  binder2(Handler& handler, const Arg1& arg1, const Arg2& arg2)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1),\n      arg2_(arg2)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  binder2(const binder2& other)\n    : handler_(other.handler_),\n      arg1_(other.arg1_),\n      arg2_(other.arg2_)\n  {\n  }\n\n  binder2(binder2&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)),\n      arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_),\n        static_cast<const Arg2&>(arg2_));\n  }\n\n  void operator()() const\n  {\n    handler_(arg1_, arg2_);\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline void* asio_handler_allocate(std::size_t size,\n    binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline bool asio_handler_is_continuation(\n    binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1, typename Arg2>\ninline void asio_handler_invoke(Function& function,\n    binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1, typename Arg2>\ninline void asio_handler_invoke(const Function& function,\n    binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline binder2<typename decay<Handler>::type, Arg1, Arg2> bind_handler(\n    ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2)\n{\n  return binder2<typename decay<Handler>::type, Arg1, Arg2>(0,\n      ASIO_MOVE_CAST(Handler)(handler), arg1, arg2);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Arg3>\nclass binder3\n{\npublic:\n  template <typename T>\n  binder3(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1,\n      const Arg2& arg2, const Arg3& arg3)\n    : handler_(ASIO_MOVE_CAST(T)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3)\n  {\n  }\n\n  binder3(Handler& handler, const Arg1& arg1,\n      const Arg2& arg2, const Arg3& arg3)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  binder3(const binder3& other)\n    : handler_(other.handler_),\n      arg1_(other.arg1_),\n      arg2_(other.arg2_),\n      arg3_(other.arg3_)\n  {\n  }\n\n  binder3(binder3&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)),\n      arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)),\n      arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_),\n        static_cast<const Arg2&>(arg2_), static_cast<const Arg3&>(arg3_));\n  }\n\n  void operator()() const\n  {\n    handler_(arg1_, arg2_, arg3_);\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n  Arg3 arg3_;\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Arg3>\ninline void* asio_handler_allocate(std::size_t size,\n    binder3<Handler, Arg1, Arg2, Arg3>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Arg3>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    binder3<Handler, Arg1, Arg2, Arg3>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Arg3>\ninline bool asio_handler_is_continuation(\n    binder3<Handler, Arg1, Arg2, Arg3>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler,\n    typename Arg1, typename Arg2, typename Arg3>\ninline void asio_handler_invoke(Function& function,\n    binder3<Handler, Arg1, Arg2, Arg3>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler,\n    typename Arg1, typename Arg2, typename Arg3>\ninline void asio_handler_invoke(const Function& function,\n    binder3<Handler, Arg1, Arg2, Arg3>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Arg3>\ninline binder3<typename decay<Handler>::type, Arg1, Arg2, Arg3> bind_handler(\n    ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2,\n    const Arg3& arg3)\n{\n  return binder3<typename decay<Handler>::type, Arg1, Arg2, Arg3>(0,\n      ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3);\n}\n\ntemplate <typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\nclass binder4\n{\npublic:\n  template <typename T>\n  binder4(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1,\n      const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)\n    : handler_(ASIO_MOVE_CAST(T)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3),\n      arg4_(arg4)\n  {\n  }\n\n  binder4(Handler& handler, const Arg1& arg1,\n      const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3),\n      arg4_(arg4)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  binder4(const binder4& other)\n    : handler_(other.handler_),\n      arg1_(other.arg1_),\n      arg2_(other.arg2_),\n      arg3_(other.arg3_),\n      arg4_(other.arg4_)\n  {\n  }\n\n  binder4(binder4&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)),\n      arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)),\n      arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_)),\n      arg4_(ASIO_MOVE_CAST(Arg4)(other.arg4_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_),\n        static_cast<const Arg2&>(arg2_), static_cast<const Arg3&>(arg3_),\n        static_cast<const Arg4&>(arg4_));\n  }\n\n  void operator()() const\n  {\n    handler_(arg1_, arg2_, arg3_, arg4_);\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n  Arg3 arg3_;\n  Arg4 arg4_;\n};\n\ntemplate <typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline void* asio_handler_allocate(std::size_t size,\n    binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline bool asio_handler_is_continuation(\n    binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline void asio_handler_invoke(Function& function,\n    binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline void asio_handler_invoke(const Function& function,\n    binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4>\ninline binder4<typename decay<Handler>::type, Arg1, Arg2, Arg3, Arg4>\nbind_handler(ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1,\n    const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)\n{\n  return binder4<typename decay<Handler>::type, Arg1, Arg2, Arg3, Arg4>(0,\n      ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3, arg4);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2,\n    typename Arg3, typename Arg4, typename Arg5>\nclass binder5\n{\npublic:\n  template <typename T>\n  binder5(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1,\n      const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)\n    : handler_(ASIO_MOVE_CAST(T)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3),\n      arg4_(arg4),\n      arg5_(arg5)\n  {\n  }\n\n  binder5(Handler& handler, const Arg1& arg1, const Arg2& arg2,\n      const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1),\n      arg2_(arg2),\n      arg3_(arg3),\n      arg4_(arg4),\n      arg5_(arg5)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  binder5(const binder5& other)\n    : handler_(other.handler_),\n      arg1_(other.arg1_),\n      arg2_(other.arg2_),\n      arg3_(other.arg3_),\n      arg4_(other.arg4_),\n      arg5_(other.arg5_)\n  {\n  }\n\n  binder5(binder5&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)),\n      arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)),\n      arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_)),\n      arg4_(ASIO_MOVE_CAST(Arg4)(other.arg4_)),\n      arg5_(ASIO_MOVE_CAST(Arg5)(other.arg5_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_),\n        static_cast<const Arg2&>(arg2_), static_cast<const Arg3&>(arg3_),\n        static_cast<const Arg4&>(arg4_), static_cast<const Arg5&>(arg5_));\n  }\n\n  void operator()() const\n  {\n    handler_(arg1_, arg2_, arg3_, arg4_, arg5_);\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n  Arg3 arg3_;\n  Arg4 arg4_;\n  Arg5 arg5_;\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2,\n    typename Arg3, typename Arg4, typename Arg5>\ninline void* asio_handler_allocate(std::size_t size,\n    binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2,\n    typename Arg3, typename Arg4, typename Arg5>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2,\n    typename Arg3, typename Arg4, typename Arg5>\ninline bool asio_handler_is_continuation(\n    binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4, typename Arg5>\ninline void asio_handler_invoke(Function& function,\n    binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1,\n    typename Arg2, typename Arg3, typename Arg4, typename Arg5>\ninline void asio_handler_invoke(const Function& function,\n    binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2,\n    typename Arg3, typename Arg4, typename Arg5>\ninline binder5<typename decay<Handler>::type, Arg1, Arg2, Arg3, Arg4, Arg5>\nbind_handler(ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1,\n    const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)\n{\n  return binder5<typename decay<Handler>::type, Arg1, Arg2, Arg3, Arg4, Arg5>(0,\n      ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3, arg4, arg5);\n}\n\n#if defined(ASIO_HAS_MOVE)\n\ntemplate <typename Handler, typename Arg1>\nclass move_binder1\n{\npublic:\n  move_binder1(int, ASIO_MOVE_ARG(Handler) handler,\n      ASIO_MOVE_ARG(Arg1) arg1)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(arg1))\n  {\n  }\n\n  move_binder1(move_binder1&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_))\n  {\n  }\n\n  void operator()()\n  {\n    handler_(ASIO_MOVE_CAST(Arg1)(arg1_));\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n};\n\ntemplate <typename Handler, typename Arg1>\ninline void* asio_handler_allocate(std::size_t size,\n    move_binder1<Handler, Arg1>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    move_binder1<Handler, Arg1>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1>\ninline bool asio_handler_is_continuation(\n    move_binder1<Handler, Arg1>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1>\ninline void asio_handler_invoke(ASIO_MOVE_ARG(Function) function,\n    move_binder1<Handler, Arg1>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      ASIO_MOVE_CAST(Function)(function), this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\nclass move_binder2\n{\npublic:\n  move_binder2(int, ASIO_MOVE_ARG(Handler) handler,\n      const Arg1& arg1, ASIO_MOVE_ARG(Arg2) arg2)\n    : handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      arg1_(arg1),\n      arg2_(ASIO_MOVE_CAST(Arg2)(arg2))\n  {\n  }\n\n  move_binder2(move_binder2&& other)\n    : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n      arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)),\n      arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_))\n  {\n  }\n\n  void operator()()\n  {\n    handler_(static_cast<const Arg1&>(arg1_),\n        ASIO_MOVE_CAST(Arg2)(arg2_));\n  }\n\n//private:\n  Handler handler_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline void* asio_handler_allocate(std::size_t size,\n    move_binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    move_binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\ninline bool asio_handler_is_continuation(\n    move_binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler, typename Arg1, typename Arg2>\ninline void asio_handler_invoke(ASIO_MOVE_ARG(Function) function,\n    move_binder2<Handler, Arg1, Arg2>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      ASIO_MOVE_CAST(Function)(function), this_handler->handler_);\n}\n\n#endif // defined(ASIO_HAS_MOVE)\n\n} // namespace detail\n\ntemplate <typename Handler, typename Arg1, typename Allocator>\nstruct associated_allocator<detail::binder1<Handler, Arg1>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const detail::binder1<Handler, Arg1>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Allocator>\nstruct associated_allocator<detail::binder2<Handler, Arg1, Arg2>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const detail::binder2<Handler, Arg1, Arg2>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Executor>\nstruct associated_executor<detail::binder1<Handler, Arg1>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const detail::binder1<Handler, Arg1>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Executor>\nstruct associated_executor<detail::binder2<Handler, Arg1, Arg2>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const detail::binder2<Handler, Arg1, Arg2>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#if defined(ASIO_HAS_MOVE)\n\ntemplate <typename Handler, typename Arg1, typename Allocator>\nstruct associated_allocator<detail::move_binder1<Handler, Arg1>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const detail::move_binder1<Handler, Arg1>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Allocator>\nstruct associated_allocator<\n    detail::move_binder2<Handler, Arg1, Arg2>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const detail::move_binder2<Handler, Arg1, Arg2>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Executor>\nstruct associated_executor<detail::move_binder1<Handler, Arg1>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const detail::move_binder1<Handler, Arg1>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\ntemplate <typename Handler, typename Arg1, typename Arg2, typename Executor>\nstruct associated_executor<detail::move_binder2<Handler, Arg1, Arg2>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const detail::move_binder2<Handler, Arg1, Arg2>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // defined(ASIO_HAS_MOVE)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_BIND_HANDLER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/buffer_resize_guard.hpp",
    "content": "//\n// detail/buffer_resize_guard.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP\n#define ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/limits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper class to manage buffer resizing in an exception safe way.\ntemplate <typename Buffer>\nclass buffer_resize_guard\n{\npublic:\n  // Constructor.\n  buffer_resize_guard(Buffer& buffer)\n    : buffer_(buffer),\n      old_size_(buffer.size())\n  {\n  }\n\n  // Destructor rolls back the buffer resize unless commit was called.\n  ~buffer_resize_guard()\n  {\n    if (old_size_ != (std::numeric_limits<size_t>::max)())\n    {\n      buffer_.resize(old_size_);\n    }\n  }\n\n  // Commit the resize transaction.\n  void commit()\n  {\n    old_size_ = (std::numeric_limits<size_t>::max)();\n  }\n\nprivate:\n  // The buffer being managed.\n  Buffer& buffer_;\n\n  // The size of the buffer at the time the guard was constructed.\n  size_t old_size_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/buffer_sequence_adapter.hpp",
    "content": "//\n// detail/buffer_sequence_adapter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP\n#define ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass buffer_sequence_adapter_base\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\npublic:\n  // The maximum number of buffers to support in a single operation.\n  enum { max_buffers = 1 };\n\nprotected:\n  typedef Windows::Storage::Streams::IBuffer^ native_buffer_type;\n\n  ASIO_DECL static void init_native_buffer(\n      native_buffer_type& buf,\n      const asio::mutable_buffer& buffer);\n\n  ASIO_DECL static void init_native_buffer(\n      native_buffer_type& buf,\n      const asio::const_buffer& buffer);\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\npublic:\n  // The maximum number of buffers to support in a single operation.\n  enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len };\n\nprotected:\n  typedef WSABUF native_buffer_type;\n\n  static void init_native_buffer(WSABUF& buf,\n      const asio::mutable_buffer& buffer)\n  {\n    buf.buf = static_cast<char*>(buffer.data());\n    buf.len = static_cast<ULONG>(buffer.size());\n  }\n\n  static void init_native_buffer(WSABUF& buf,\n      const asio::const_buffer& buffer)\n  {\n    buf.buf = const_cast<char*>(static_cast<const char*>(buffer.data()));\n    buf.len = static_cast<ULONG>(buffer.size());\n  }\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\npublic:\n  // The maximum number of buffers to support in a single operation.\n  enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len };\n\nprotected:\n  typedef iovec native_buffer_type;\n\n  static void init_iov_base(void*& base, void* addr)\n  {\n    base = addr;\n  }\n\n  template <typename T>\n  static void init_iov_base(T& base, void* addr)\n  {\n    base = static_cast<T>(addr);\n  }\n\n  static void init_native_buffer(iovec& iov,\n      const asio::mutable_buffer& buffer)\n  {\n    init_iov_base(iov.iov_base, buffer.data());\n    iov.iov_len = buffer.size();\n  }\n\n  static void init_native_buffer(iovec& iov,\n      const asio::const_buffer& buffer)\n  {\n    init_iov_base(iov.iov_base, const_cast<void*>(buffer.data()));\n    iov.iov_len = buffer.size();\n  }\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n};\n\n// Helper class to translate buffers into the native buffer representation.\ntemplate <typename Buffer, typename Buffers>\nclass buffer_sequence_adapter\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(const Buffers& buffer_sequence)\n    : count_(0), total_buffer_size_(0)\n  {\n    buffer_sequence_adapter::init(\n        asio::buffer_sequence_begin(buffer_sequence),\n        asio::buffer_sequence_end(buffer_sequence));\n  }\n\n  native_buffer_type* buffers()\n  {\n    return buffers_;\n  }\n\n  std::size_t count() const\n  {\n    return count_;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const Buffers& buffer_sequence)\n  {\n    return buffer_sequence_adapter::all_empty(\n        asio::buffer_sequence_begin(buffer_sequence),\n        asio::buffer_sequence_end(buffer_sequence));\n  }\n\n  static void validate(const Buffers& buffer_sequence)\n  {\n    buffer_sequence_adapter::validate(\n        asio::buffer_sequence_begin(buffer_sequence),\n        asio::buffer_sequence_end(buffer_sequence));\n  }\n\n  static Buffer first(const Buffers& buffer_sequence)\n  {\n    return buffer_sequence_adapter::first(\n        asio::buffer_sequence_begin(buffer_sequence),\n        asio::buffer_sequence_end(buffer_sequence));\n  }\n\nprivate:\n  template <typename Iterator>\n  void init(Iterator begin, Iterator end)\n  {\n    Iterator iter = begin;\n    for (; iter != end && count_ < max_buffers; ++iter, ++count_)\n    {\n      Buffer buffer(*iter);\n      init_native_buffer(buffers_[count_], buffer);\n      total_buffer_size_ += buffer.size();\n    }\n  }\n\n  template <typename Iterator>\n  static bool all_empty(Iterator begin, Iterator end)\n  {\n    Iterator iter = begin;\n    std::size_t i = 0;\n    for (; iter != end && i < max_buffers; ++iter, ++i)\n      if (Buffer(*iter).size() > 0)\n        return false;\n    return true;\n  }\n\n  template <typename Iterator>\n  static void validate(Iterator begin, Iterator end)\n  {\n    Iterator iter = begin;\n    for (; iter != end; ++iter)\n    {\n      Buffer buffer(*iter);\n      buffer.data();\n    }\n  }\n\n  template <typename Iterator>\n  static Buffer first(Iterator begin, Iterator end)\n  {\n    Iterator iter = begin;\n    for (; iter != end; ++iter)\n    {\n      Buffer buffer(*iter);\n      if (buffer.size() != 0)\n        return buffer;\n    }\n    return Buffer();\n  }\n\n  native_buffer_type buffers_[max_buffers];\n  std::size_t count_;\n  std::size_t total_buffer_size_;\n};\n\ntemplate <typename Buffer>\nclass buffer_sequence_adapter<Buffer, asio::mutable_buffer>\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const asio::mutable_buffer& buffer_sequence)\n  {\n    init_native_buffer(buffer_, Buffer(buffer_sequence));\n    total_buffer_size_ = buffer_sequence.size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return &buffer_;\n  }\n\n  std::size_t count() const\n  {\n    return 1;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const asio::mutable_buffer& buffer_sequence)\n  {\n    return buffer_sequence.size() == 0;\n  }\n\n  static void validate(const asio::mutable_buffer& buffer_sequence)\n  {\n    buffer_sequence.data();\n  }\n\n  static Buffer first(const asio::mutable_buffer& buffer_sequence)\n  {\n    return Buffer(buffer_sequence);\n  }\n\nprivate:\n  native_buffer_type buffer_;\n  std::size_t total_buffer_size_;\n};\n\ntemplate <typename Buffer>\nclass buffer_sequence_adapter<Buffer, asio::const_buffer>\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const asio::const_buffer& buffer_sequence)\n  {\n    init_native_buffer(buffer_, Buffer(buffer_sequence));\n    total_buffer_size_ = buffer_sequence.size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return &buffer_;\n  }\n\n  std::size_t count() const\n  {\n    return 1;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const asio::const_buffer& buffer_sequence)\n  {\n    return buffer_sequence.size() == 0;\n  }\n\n  static void validate(const asio::const_buffer& buffer_sequence)\n  {\n    buffer_sequence.data();\n  }\n\n  static Buffer first(const asio::const_buffer& buffer_sequence)\n  {\n    return Buffer(buffer_sequence);\n  }\n\nprivate:\n  native_buffer_type buffer_;\n  std::size_t total_buffer_size_;\n};\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Buffer>\nclass buffer_sequence_adapter<Buffer, asio::mutable_buffers_1>\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const asio::mutable_buffers_1& buffer_sequence)\n  {\n    init_native_buffer(buffer_, Buffer(buffer_sequence));\n    total_buffer_size_ = buffer_sequence.size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return &buffer_;\n  }\n\n  std::size_t count() const\n  {\n    return 1;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const asio::mutable_buffers_1& buffer_sequence)\n  {\n    return buffer_sequence.size() == 0;\n  }\n\n  static void validate(const asio::mutable_buffers_1& buffer_sequence)\n  {\n    buffer_sequence.data();\n  }\n\n  static Buffer first(const asio::mutable_buffers_1& buffer_sequence)\n  {\n    return Buffer(buffer_sequence);\n  }\n\nprivate:\n  native_buffer_type buffer_;\n  std::size_t total_buffer_size_;\n};\n\ntemplate <typename Buffer>\nclass buffer_sequence_adapter<Buffer, asio::const_buffers_1>\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const asio::const_buffers_1& buffer_sequence)\n  {\n    init_native_buffer(buffer_, Buffer(buffer_sequence));\n    total_buffer_size_ = buffer_sequence.size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return &buffer_;\n  }\n\n  std::size_t count() const\n  {\n    return 1;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const asio::const_buffers_1& buffer_sequence)\n  {\n    return buffer_sequence.size() == 0;\n  }\n\n  static void validate(const asio::const_buffers_1& buffer_sequence)\n  {\n    buffer_sequence.data();\n  }\n\n  static Buffer first(const asio::const_buffers_1& buffer_sequence)\n  {\n    return Buffer(buffer_sequence);\n  }\n\nprivate:\n  native_buffer_type buffer_;\n  std::size_t total_buffer_size_;\n};\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Buffer, typename Elem>\nclass buffer_sequence_adapter<Buffer, boost::array<Elem, 2> >\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const boost::array<Elem, 2>& buffer_sequence)\n  {\n    init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));\n    init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));\n    total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return buffers_;\n  }\n\n  std::size_t count() const\n  {\n    return 2;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const boost::array<Elem, 2>& buffer_sequence)\n  {\n    return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0;\n  }\n\n  static void validate(const boost::array<Elem, 2>& buffer_sequence)\n  {\n    buffer_sequence[0].data();\n    buffer_sequence[1].data();\n  }\n\n  static Buffer first(const boost::array<Elem, 2>& buffer_sequence)\n  {\n    return Buffer(buffer_sequence[0].size() != 0\n        ? buffer_sequence[0] : buffer_sequence[1]);\n  }\n\nprivate:\n  native_buffer_type buffers_[2];\n  std::size_t total_buffer_size_;\n};\n\n#if defined(ASIO_HAS_STD_ARRAY)\n\ntemplate <typename Buffer, typename Elem>\nclass buffer_sequence_adapter<Buffer, std::array<Elem, 2> >\n  : buffer_sequence_adapter_base\n{\npublic:\n  explicit buffer_sequence_adapter(\n      const std::array<Elem, 2>& buffer_sequence)\n  {\n    init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));\n    init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));\n    total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size();\n  }\n\n  native_buffer_type* buffers()\n  {\n    return buffers_;\n  }\n\n  std::size_t count() const\n  {\n    return 2;\n  }\n\n  std::size_t total_size() const\n  {\n    return total_buffer_size_;\n  }\n\n  bool all_empty() const\n  {\n    return total_buffer_size_ == 0;\n  }\n\n  static bool all_empty(const std::array<Elem, 2>& buffer_sequence)\n  {\n    return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0;\n  }\n\n  static void validate(const std::array<Elem, 2>& buffer_sequence)\n  {\n    buffer_sequence[0].data();\n    buffer_sequence[1].data();\n  }\n\n  static Buffer first(const std::array<Elem, 2>& buffer_sequence)\n  {\n    return Buffer(buffer_sequence[0].size() != 0\n        ? buffer_sequence[0] : buffer_sequence[1]);\n  }\n\nprivate:\n  native_buffer_type buffers_[2];\n  std::size_t total_buffer_size_;\n};\n\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/buffer_sequence_adapter.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/buffered_stream_storage.hpp",
    "content": "//\n// detail/buffered_stream_storage.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP\n#define ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/assert.hpp\"\n#include <cstddef>\n#include <cstring>\n#include <vector>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass buffered_stream_storage\n{\npublic:\n  // The type of the bytes stored in the buffer.\n  typedef unsigned char byte_type;\n\n  // The type used for offsets into the buffer.\n  typedef std::size_t size_type;\n\n  // Constructor.\n  explicit buffered_stream_storage(std::size_t buffer_capacity)\n    : begin_offset_(0),\n      end_offset_(0),\n      buffer_(buffer_capacity)\n  {\n  }\n\n  /// Clear the buffer.\n  void clear()\n  {\n    begin_offset_ = 0;\n    end_offset_ = 0;\n  }\n\n  // Return a pointer to the beginning of the unread data.\n  mutable_buffer data()\n  {\n    return asio::buffer(buffer_) + begin_offset_;\n  }\n\n  // Return a pointer to the beginning of the unread data.\n  const_buffer data() const\n  {\n    return asio::buffer(buffer_) + begin_offset_;\n  }\n\n  // Is there no unread data in the buffer.\n  bool empty() const\n  {\n    return begin_offset_ == end_offset_;\n  }\n\n  // Return the amount of unread data the is in the buffer.\n  size_type size() const\n  {\n    return end_offset_ - begin_offset_;\n  }\n\n  // Resize the buffer to the specified length.\n  void resize(size_type length)\n  {\n    ASIO_ASSERT(length <= capacity());\n    if (begin_offset_ + length <= capacity())\n    {\n      end_offset_ = begin_offset_ + length;\n    }\n    else\n    {\n      using namespace std; // For memmove.\n      memmove(&buffer_[0], &buffer_[0] + begin_offset_, size());\n      end_offset_ = length;\n      begin_offset_ = 0;\n    }\n  }\n\n  // Return the maximum size for data in the buffer.\n  size_type capacity() const\n  {\n    return buffer_.size();\n  }\n\n  // Consume multiple bytes from the beginning of the buffer.\n  void consume(size_type count)\n  {\n    ASIO_ASSERT(begin_offset_ + count <= end_offset_);\n    begin_offset_ += count;\n    if (empty())\n      clear();\n  }\n\nprivate:\n  // The offset to the beginning of the unread data.\n  size_type begin_offset_;\n\n  // The offset to the end of the unread data.\n  size_type end_offset_;\n  \n  // The data in the buffer.\n  std::vector<byte_type> buffer_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/call_stack.hpp",
    "content": "//\n// detail/call_stack.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CALL_STACK_HPP\n#define ASIO_DETAIL_CALL_STACK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/tss_ptr.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper class to determine whether or not the current thread is inside an\n// invocation of io_context::run() for a specified io_context object.\ntemplate <typename Key, typename Value = unsigned char>\nclass call_stack\n{\npublic:\n  // Context class automatically pushes the key/value pair on to the stack.\n  class context\n    : private noncopyable\n  {\n  public:\n    // Push the key on to the stack.\n    explicit context(Key* k)\n      : key_(k),\n        next_(call_stack<Key, Value>::top_)\n    {\n      value_ = reinterpret_cast<unsigned char*>(this);\n      call_stack<Key, Value>::top_ = this;\n    }\n\n    // Push the key/value pair on to the stack.\n    context(Key* k, Value& v)\n      : key_(k),\n        value_(&v),\n        next_(call_stack<Key, Value>::top_)\n    {\n      call_stack<Key, Value>::top_ = this;\n    }\n\n    // Pop the key/value pair from the stack.\n    ~context()\n    {\n      call_stack<Key, Value>::top_ = next_;\n    }\n\n    // Find the next context with the same key.\n    Value* next_by_key() const\n    {\n      context* elem = next_;\n      while (elem)\n      {\n        if (elem->key_ == key_)\n          return elem->value_;\n        elem = elem->next_;\n      }\n      return 0;\n    }\n\n  private:\n    friend class call_stack<Key, Value>;\n\n    // The key associated with the context.\n    Key* key_;\n\n    // The value associated with the context.\n    Value* value_;\n\n    // The next element in the stack.\n    context* next_;\n  };\n\n  friend class context;\n\n  // Determine whether the specified owner is on the stack. Returns address of\n  // key if present, 0 otherwise.\n  static Value* contains(Key* k)\n  {\n    context* elem = top_;\n    while (elem)\n    {\n      if (elem->key_ == k)\n        return elem->value_;\n      elem = elem->next_;\n    }\n    return 0;\n  }\n\n  // Obtain the value at the top of the stack.\n  static Value* top()\n  {\n    context* elem = top_;\n    return elem ? elem->value_ : 0;\n  }\n\nprivate:\n  // The top of the stack of calls for the current thread.\n  static tss_ptr<context> top_;\n};\n\ntemplate <typename Key, typename Value>\ntss_ptr<typename call_stack<Key, Value>::context>\ncall_stack<Key, Value>::top_;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_CALL_STACK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/chrono.hpp",
    "content": "//\n// detail/chrono.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CHRONO_HPP\n#define ASIO_DETAIL_CHRONO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_CHRONO)\n# include <chrono>\n#elif defined(ASIO_HAS_BOOST_CHRONO)\n# include <boost/chrono/system_clocks.hpp>\n#endif // defined(ASIO_HAS_BOOST_CHRONO)\n\nnamespace asio {\nnamespace chrono {\n\n#if defined(ASIO_HAS_STD_CHRONO)\nusing std::chrono::duration;\nusing std::chrono::time_point;\nusing std::chrono::duration_cast;\nusing std::chrono::nanoseconds;\nusing std::chrono::microseconds;\nusing std::chrono::milliseconds;\nusing std::chrono::seconds;\nusing std::chrono::minutes;\nusing std::chrono::hours;\nusing std::chrono::time_point_cast;\n#if defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)\ntypedef std::chrono::monotonic_clock steady_clock;\n#else // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)\nusing std::chrono::steady_clock;\n#endif // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)\nusing std::chrono::system_clock;\nusing std::chrono::high_resolution_clock;\n#elif defined(ASIO_HAS_BOOST_CHRONO)\nusing boost::chrono::duration;\nusing boost::chrono::time_point;\nusing boost::chrono::duration_cast;\nusing boost::chrono::nanoseconds;\nusing boost::chrono::microseconds;\nusing boost::chrono::milliseconds;\nusing boost::chrono::seconds;\nusing boost::chrono::minutes;\nusing boost::chrono::hours;\nusing boost::chrono::time_point_cast;\nusing boost::chrono::system_clock;\nusing boost::chrono::steady_clock;\nusing boost::chrono::high_resolution_clock;\n#endif // defined(ASIO_HAS_BOOST_CHRONO)\n\n} // namespace chrono\n} // namespace asio\n\n#endif // ASIO_DETAIL_CHRONO_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/chrono_time_traits.hpp",
    "content": "//\n// detail/chrono_time_traits.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP\n#define ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/cstdint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper template to compute the greatest common divisor.\ntemplate <int64_t v1, int64_t v2>\nstruct gcd { enum { value = gcd<v2, v1 % v2>::value }; };\n\ntemplate <int64_t v1>\nstruct gcd<v1, 0> { enum { value = v1 }; };\n\n// Adapts std::chrono clocks for use with a deadline timer.\ntemplate <typename Clock, typename WaitTraits>\nstruct chrono_time_traits\n{\n  // The clock type.\n  typedef Clock clock_type;\n\n  // The duration type of the clock.\n  typedef typename clock_type::duration duration_type;\n\n  // The time point type of the clock.\n  typedef typename clock_type::time_point time_type;\n\n  // The period of the clock.\n  typedef typename duration_type::period period_type;\n\n  // Get the current time.\n  static time_type now()\n  {\n    return clock_type::now();\n  }\n\n  // Add a duration to a time.\n  static time_type add(const time_type& t, const duration_type& d)\n  {\n    const time_type epoch;\n    if (t >= epoch)\n    {\n      if ((time_type::max)() - t < d)\n        return (time_type::max)();\n    }\n    else // t < epoch\n    {\n      if (-(t - (time_type::min)()) > d)\n        return (time_type::min)();\n    }\n\n    return t + d;\n  }\n\n  // Subtract one time from another.\n  static duration_type subtract(const time_type& t1, const time_type& t2)\n  {\n    const time_type epoch;\n    if (t1 >= epoch)\n    {\n      if (t2 >= epoch)\n      {\n        return t1 - t2;\n      }\n      else if (t2 == (time_type::min)())\n      {\n        return (duration_type::max)();\n      }\n      else if ((time_type::max)() - t1 < epoch - t2)\n      {\n        return (duration_type::max)();\n      }\n      else\n      {\n        return t1 - t2;\n      }\n    }\n    else // t1 < epoch\n    {\n      if (t2 < epoch)\n      {\n        return t1 - t2;\n      }\n      else if (t1 == (time_type::min)())\n      {\n        return (duration_type::min)();\n      }\n      else if ((time_type::max)() - t2 < epoch - t1)\n      {\n        return (duration_type::min)();\n      }\n      else\n      {\n        return -(t2 - t1);\n      }\n    }\n  }\n\n  // Test whether one time is less than another.\n  static bool less_than(const time_type& t1, const time_type& t2)\n  {\n    return t1 < t2;\n  }\n\n  // Implement just enough of the posix_time::time_duration interface to supply\n  // what the timer_queue requires.\n  class posix_time_duration\n  {\n  public:\n    explicit posix_time_duration(const duration_type& d)\n      : d_(d)\n    {\n    }\n\n    int64_t ticks() const\n    {\n      return d_.count();\n    }\n\n    int64_t total_seconds() const\n    {\n      return duration_cast<1, 1>();\n    }\n\n    int64_t total_milliseconds() const\n    {\n      return duration_cast<1, 1000>();\n    }\n\n    int64_t total_microseconds() const\n    {\n      return duration_cast<1, 1000000>();\n    }\n\n  private:\n    template <int64_t Num, int64_t Den>\n    int64_t duration_cast() const\n    {\n      const int64_t num1 = period_type::num / gcd<period_type::num, Num>::value;\n      const int64_t num2 = Num / gcd<period_type::num, Num>::value;\n\n      const int64_t den1 = period_type::den / gcd<period_type::den, Den>::value;\n      const int64_t den2 = Den / gcd<period_type::den, Den>::value;\n\n      const int64_t num = num1 * den2;\n      const int64_t den = num2 * den1;\n\n      if (num == 1 && den == 1)\n        return ticks();\n      else if (num != 1 && den == 1)\n        return ticks() * num;\n      else if (num == 1 && period_type::den != 1)\n        return ticks() / den;\n      else\n        return ticks() * num / den;\n    }\n\n    duration_type d_;\n  };\n\n  // Convert to POSIX duration type.\n  static posix_time_duration to_posix_duration(const duration_type& d)\n  {\n    return posix_time_duration(WaitTraits::to_wait_duration(d));\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/completion_handler.hpp",
    "content": "//\n// detail/completion_handler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_COMPLETION_HANDLER_HPP\n#define ASIO_DETAIL_COMPLETION_HANDLER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_work.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler>\nclass completion_handler : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(completion_handler);\n\n  completion_handler(Handler& h)\n    : operation(&completion_handler::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(h))\n  {\n    handler_work<Handler>::start(handler_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    completion_handler* h(static_cast<completion_handler*>(base));\n    ptr p = { asio::detail::addressof(h->handler_), h, h };\n    handler_work<Handler> w(h->handler_);\n\n    ASIO_HANDLER_COMPLETION((*h));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    Handler handler(ASIO_MOVE_CAST(Handler)(h->handler_));\n    p.h = asio::detail::addressof(handler);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN(());\n      w.complete(handler, handler);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_COMPLETION_HANDLER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/concurrency_hint.hpp",
    "content": "//\n// detail/concurrency_hint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CONCURRENCY_HINT_HPP\n#define ASIO_DETAIL_CONCURRENCY_HINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n// The concurrency hint ID and mask are used to identify when a \"well-known\"\n// concurrency hint value has been passed to the io_context.\n#define ASIO_CONCURRENCY_HINT_ID 0xA5100000u\n#define ASIO_CONCURRENCY_HINT_ID_MASK 0xFFFF0000u\n\n// If set, this bit indicates that the scheduler should perform locking.\n#define ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER 0x1u\n\n// If set, this bit indicates that the reactor should perform locking when\n// managing descriptor registrations.\n#define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION 0x2u\n\n// If set, this bit indicates that the reactor should perform locking for I/O.\n#define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO 0x4u\n\n// Helper macro to determine if we have a special concurrency hint.\n#define ASIO_CONCURRENCY_HINT_IS_SPECIAL(hint) \\\n  ((static_cast<unsigned>(hint) \\\n    & ASIO_CONCURRENCY_HINT_ID_MASK) \\\n      == ASIO_CONCURRENCY_HINT_ID)\n\n// Helper macro to determine if locking is enabled for a given facility.\n#define ASIO_CONCURRENCY_HINT_IS_LOCKING(facility, hint) \\\n  (((static_cast<unsigned>(hint) \\\n    & (ASIO_CONCURRENCY_HINT_ID_MASK \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_ ## facility)) \\\n        ^ ASIO_CONCURRENCY_HINT_ID) != 0)\n\n// This special concurrency hint disables locking in both the scheduler and\n// reactor I/O. This hint has the following restrictions:\n//\n// - Care must be taken to ensure that all operations on the io_context and any\n//   of its associated I/O objects (such as sockets and timers) occur in only\n//   one thread at a time.\n//\n// - Asynchronous resolve operations fail with operation_not_supported.\n//\n// - If a signal_set is used with the io_context, signal_set objects cannot be\n//   used with any other io_context in the program.\n#define ASIO_CONCURRENCY_HINT_UNSAFE \\\n  static_cast<int>(ASIO_CONCURRENCY_HINT_ID)\n\n// This special concurrency hint disables locking in the reactor I/O. This hint\n// has the following restrictions:\n//\n// - Care must be taken to ensure that run functions on the io_context, and all\n//   operations on the io_context's associated I/O objects (such as sockets and\n//   timers), occur in only one thread at a time.\n#define ASIO_CONCURRENCY_HINT_UNSAFE_IO \\\n  static_cast<int>(ASIO_CONCURRENCY_HINT_ID \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION)\n\n// The special concurrency hint provides full thread safety.\n#define ASIO_CONCURRENCY_HINT_SAFE \\\n  static_cast<int>(ASIO_CONCURRENCY_HINT_ID \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION \\\n      | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO)\n\n// This #define may be overridden at compile time to specify a program-wide\n// default concurrency hint, used by the zero-argument io_context constructor.\n#if !defined(ASIO_CONCURRENCY_HINT_DEFAULT)\n# define ASIO_CONCURRENCY_HINT_DEFAULT -1\n#endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT)\n\n// This #define may be overridden at compile time to specify a program-wide\n// concurrency hint, used by the one-argument io_context constructor when\n// passed a value of 1.\n#if !defined(ASIO_CONCURRENCY_HINT_1)\n# define ASIO_CONCURRENCY_HINT_1 1\n#endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT)\n\n#endif // ASIO_DETAIL_CONCURRENCY_HINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/conditionally_enabled_event.hpp",
    "content": "//\n// detail/conditionally_enabled_event.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP\n#define ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/conditionally_enabled_mutex.hpp\"\n#include \"asio/detail/event.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/null_event.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Mutex adapter used to conditionally enable or disable locking.\nclass conditionally_enabled_event\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  conditionally_enabled_event()\n  {\n  }\n\n  // Destructor.\n  ~conditionally_enabled_event()\n  {\n  }\n\n  // Signal the event. (Retained for backward compatibility.)\n  void signal(conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      event_.signal(lock);\n  }\n\n  // Signal all waiters.\n  void signal_all(conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      event_.signal_all(lock);\n  }\n\n  // Unlock the mutex and signal one waiter.\n  void unlock_and_signal_one(\n      conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      event_.unlock_and_signal_one(lock);\n  }\n\n  // If there's a waiter, unlock the mutex and signal it.\n  bool maybe_unlock_and_signal_one(\n      conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      return event_.maybe_unlock_and_signal_one(lock);\n    else\n      return false;\n  }\n\n  // Reset the event.\n  void clear(conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      event_.clear(lock);\n  }\n\n  // Wait for the event to become signalled.\n  void wait(conditionally_enabled_mutex::scoped_lock& lock)\n  {\n    if (lock.mutex_.enabled_)\n      event_.wait(lock);\n    else\n      null_event().wait(lock);\n  }\n\n  // Timed wait for the event to become signalled.\n  bool wait_for_usec(\n      conditionally_enabled_mutex::scoped_lock& lock, long usec)\n  {\n    if (lock.mutex_.enabled_)\n      return event_.wait_for_usec(lock, usec);\n    else\n      return null_event().wait_for_usec(lock, usec);\n  }\n\nprivate:\n  asio::detail::event event_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/conditionally_enabled_mutex.hpp",
    "content": "//\n// detail/conditionally_enabled_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP\n#define ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Mutex adapter used to conditionally enable or disable locking.\nclass conditionally_enabled_mutex\n  : private noncopyable\n{\npublic:\n  // Helper class to lock and unlock a mutex automatically.\n  class scoped_lock\n    : private noncopyable\n  {\n  public:\n    // Tag type used to distinguish constructors.\n    enum adopt_lock_t { adopt_lock };\n\n    // Constructor adopts a lock that is already held.\n    scoped_lock(conditionally_enabled_mutex& m, adopt_lock_t)\n      : mutex_(m),\n        locked_(m.enabled_)\n    {\n    }\n\n    // Constructor acquires the lock.\n    explicit scoped_lock(conditionally_enabled_mutex& m)\n      : mutex_(m)\n    {\n      if (m.enabled_)\n      {\n        mutex_.mutex_.lock();\n        locked_ = true;\n      }\n      else\n        locked_ = false;\n    }\n\n    // Destructor releases the lock.\n    ~scoped_lock()\n    {\n      if (locked_)\n        mutex_.mutex_.unlock();\n    }\n\n    // Explicitly acquire the lock.\n    void lock()\n    {\n      if (mutex_.enabled_ && !locked_)\n      {\n        mutex_.mutex_.lock();\n        locked_ = true;\n      }\n    }\n\n    // Explicitly release the lock.\n    void unlock()\n    {\n      if (locked_)\n      {\n        mutex_.unlock();\n        locked_ = false;\n      }\n    }\n\n    // Test whether the lock is held.\n    bool locked() const\n    {\n      return locked_;\n    }\n\n    // Get the underlying mutex.\n    asio::detail::mutex& mutex()\n    {\n      return mutex_.mutex_;\n    }\n\n  private:\n    friend class conditionally_enabled_event;\n    conditionally_enabled_mutex& mutex_;\n    bool locked_;\n  };\n\n  // Constructor.\n  explicit conditionally_enabled_mutex(bool enabled)\n    : enabled_(enabled)\n  {\n  }\n\n  // Destructor.\n  ~conditionally_enabled_mutex()\n  {\n  }\n\n  // Determine whether locking is enabled.\n  bool enabled() const\n  {\n    return enabled_;\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    if (enabled_)\n      mutex_.lock();\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    if (enabled_)\n      mutex_.unlock();\n  }\n\nprivate:\n  friend class scoped_lock;\n  friend class conditionally_enabled_event;\n  asio::detail::mutex mutex_;\n  const bool enabled_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/config.hpp",
    "content": "//\n// detail/config.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CONFIG_HPP\n#define ASIO_DETAIL_CONFIG_HPP\n\n// boostify: non-boost code starts here\n#if !defined(ASIO_STANDALONE)\n# if !defined(ASIO_ENABLE_BOOST)\n#  if (__cplusplus >= 201103)\n#   define ASIO_STANDALONE 1\n#  elif defined(_MSC_VER) && defined(_MSVC_LANG)\n#   if (_MSC_VER >= 1900) && (_MSVC_LANG >= 201103)\n#    define ASIO_STANDALONE 1\n#   endif // (_MSC_VER >= 1900) && (_MSVC_LANG >= 201103)\n#  endif // defined(_MSC_VER) && defined(_MSVC_LANG)\n# endif // !defined(ASIO_ENABLE_BOOST)\n#endif // !defined(ASIO_STANDALONE)\n\n// boostify: non-boost code ends here\n#if defined(ASIO_STANDALONE)\n# define ASIO_DISABLE_BOOST_ARRAY 1\n# define ASIO_DISABLE_BOOST_ASSERT 1\n# define ASIO_DISABLE_BOOST_BIND 1\n# define ASIO_DISABLE_BOOST_CHRONO 1\n# define ASIO_DISABLE_BOOST_DATE_TIME 1\n# define ASIO_DISABLE_BOOST_LIMITS 1\n# define ASIO_DISABLE_BOOST_REGEX 1\n# define ASIO_DISABLE_BOOST_STATIC_CONSTANT 1\n# define ASIO_DISABLE_BOOST_THROW_EXCEPTION 1\n# define ASIO_DISABLE_BOOST_WORKAROUND 1\n#else // defined(ASIO_STANDALONE)\n# include <boost/config.hpp>\n# include <boost/version.hpp>\n# define ASIO_HAS_BOOST_CONFIG 1\n#endif // defined(ASIO_STANDALONE)\n\n// Default to a header-only implementation. The user must specifically request\n// separate compilation by defining either ASIO_SEPARATE_COMPILATION or\n// ASIO_DYN_LINK (as a DLL/shared library implies separate compilation).\n#if !defined(ASIO_HEADER_ONLY)\n# if !defined(ASIO_SEPARATE_COMPILATION)\n#  if !defined(ASIO_DYN_LINK)\n#   define ASIO_HEADER_ONLY 1\n#  endif // !defined(ASIO_DYN_LINK)\n# endif // !defined(ASIO_SEPARATE_COMPILATION)\n#endif // !defined(ASIO_HEADER_ONLY)\n\n#if defined(ASIO_HEADER_ONLY)\n# define ASIO_DECL inline\n#else // defined(ASIO_HEADER_ONLY)\n# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__)\n// We need to import/export our code only if the user has specifically asked\n// for it by defining ASIO_DYN_LINK.\n#  if defined(ASIO_DYN_LINK)\n// Export if this is our own source, otherwise import.\n#   if defined(ASIO_SOURCE)\n#    define ASIO_DECL __declspec(dllexport)\n#   else // defined(ASIO_SOURCE)\n#    define ASIO_DECL __declspec(dllimport)\n#   endif // defined(ASIO_SOURCE)\n#  endif // defined(ASIO_DYN_LINK)\n# endif // defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__)\n#endif // defined(ASIO_HEADER_ONLY)\n\n// If ASIO_DECL isn't defined yet define it now.\n#if !defined(ASIO_DECL)\n# define ASIO_DECL\n#endif // !defined(ASIO_DECL)\n\n// Microsoft Visual C++ detection.\n#if !defined(ASIO_MSVC)\n# if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC)\n#  define ASIO_MSVC BOOST_MSVC\n# elif defined(_MSC_VER) && (defined(__INTELLISENSE__) \\\n      || (!defined(__MWERKS__) && !defined(__EDG_VERSION__)))\n#  define ASIO_MSVC _MSC_VER\n# endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC)\n#endif // !defined(ASIO_MSVC)\n\n// Clang / libc++ detection.\n#if defined(__clang__)\n# if (__cplusplus >= 201103)\n#  if __has_include(<__config>)\n#   include <__config>\n#   if defined(_LIBCPP_VERSION)\n#    define ASIO_HAS_CLANG_LIBCXX 1\n#   endif // defined(_LIBCPP_VERSION)\n#  endif // __has_include(<__config>)\n# endif // (__cplusplus >= 201103)\n#endif // defined(__clang__)\n\n// Android platform detection.\n#if defined(__ANDROID__)\n# include <android/api-level.h>\n#endif // defined(__ANDROID__)\n\n// Support move construction and assignment on compilers known to allow it.\n#if !defined(ASIO_HAS_MOVE)\n# if !defined(ASIO_DISABLE_MOVE)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_rvalue_references__)\n#    define ASIO_HAS_MOVE 1\n#   endif // __has_feature(__cxx_rvalue_references__)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_MOVE 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_MOVE 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n#  if defined(__INTEL_CXX11_MODE__)\n#    if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)\n#      define BOOST_ASIO_HAS_MOVE 1\n#    endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)\n#    if defined(__ICL) && (__ICL >= 1500)\n#      define BOOST_ASIO_HAS_MOVE 1\n#    endif // defined(__ICL) && (__ICL >= 1500)\n#  endif // defined(__INTEL_CXX11_MODE__)\n# endif // !defined(ASIO_DISABLE_MOVE)\n#endif // !defined(ASIO_HAS_MOVE)\n\n// If ASIO_MOVE_CAST isn't defined, and move support is available, define\n// * ASIO_MOVE_ARG,\n// * ASIO_NONDEDUCED_MOVE_ARG, and\n// * ASIO_MOVE_CAST\n// to take advantage of rvalue references and perfect forwarding.\n#if defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST)\n# define ASIO_MOVE_ARG(type) type&&\n# define ASIO_MOVE_ARG2(type1, type2) type1, type2&&\n# define ASIO_NONDEDUCED_MOVE_ARG(type) type&\n# define ASIO_MOVE_CAST(type) static_cast<type&&>\n# define ASIO_MOVE_CAST2(type1, type2) static_cast<type1, type2&&>\n#endif // defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST)\n\n// If ASIO_MOVE_CAST still isn't defined, default to a C++03-compatible\n// implementation. Note that older g++ and MSVC versions don't like it when you\n// pass a non-member function through a const reference, so for most compilers\n// we'll play it safe and stick with the old approach of passing the handler by\n// value.\n#if !defined(ASIO_MOVE_CAST)\n# if defined(__GNUC__)\n#  if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4)\n#   define ASIO_MOVE_ARG(type) const type&\n#  else // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4)\n#   define ASIO_MOVE_ARG(type) type\n#  endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4)\n# elif defined(ASIO_MSVC)\n#  if (_MSC_VER >= 1400)\n#   define ASIO_MOVE_ARG(type) const type&\n#  else // (_MSC_VER >= 1400)\n#   define ASIO_MOVE_ARG(type) type\n#  endif // (_MSC_VER >= 1400)\n# else\n#  define ASIO_MOVE_ARG(type) type\n# endif\n# define ASIO_NONDEDUCED_MOVE_ARG(type) const type&\n# define ASIO_MOVE_CAST(type) static_cast<const type&>\n# define ASIO_MOVE_CAST2(type1, type2) static_cast<const type1, type2&>\n#endif // !defined(ASIO_MOVE_CAST)\n\n// Support variadic templates on compilers known to allow it.\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n# if !defined(ASIO_DISABLE_VARIADIC_TEMPLATES)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_variadic_templates__)\n#    define ASIO_HAS_VARIADIC_TEMPLATES 1\n#   endif // __has_feature(__cxx_variadic_templates__)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_VARIADIC_TEMPLATES 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1900)\n#    define ASIO_HAS_VARIADIC_TEMPLATES 1\n#   endif // (_MSC_VER >= 1900)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_VARIADIC_TEMPLATES)\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n// Support deleted functions on compilers known to allow it.\n#if !defined(ASIO_DELETED)\n# if defined(__GNUC__)\n#  if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#   if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#    define ASIO_DELETED = delete\n#   endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n# endif // defined(__GNUC__)\n# if defined(__clang__)\n#  if __has_feature(__cxx_deleted_functions__)\n#   define ASIO_DELETED = delete\n#  endif // __has_feature(__cxx_deleted_functions__)\n# endif // defined(__clang__)\n# if defined(ASIO_MSVC)\n#  if (_MSC_VER >= 1900)\n#   define ASIO_DELETED = delete\n#  endif // (_MSC_VER >= 1900)\n# endif // defined(ASIO_MSVC)\n# if !defined(ASIO_DELETED)\n#  define ASIO_DELETED\n# endif // !defined(ASIO_DELETED)\n#endif // !defined(ASIO_DELETED)\n\n// Support constexpr on compilers known to allow it.\n#if !defined(ASIO_HAS_CONSTEXPR)\n# if !defined(ASIO_DISABLE_CONSTEXPR)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_constexpr__)\n#    define ASIO_HAS_CONSTEXPR 1\n#   endif // __has_feature(__cxx_constexr__)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_CONSTEXPR 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1900)\n#    define ASIO_HAS_CONSTEXPR 1\n#   endif // (_MSC_VER >= 1900)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_CONSTEXPR)\n#endif // !defined(ASIO_HAS_CONSTEXPR)\n#if !defined(ASIO_CONSTEXPR)\n# if defined(ASIO_HAS_CONSTEXPR)\n#  define ASIO_CONSTEXPR constexpr\n# else // defined(ASIO_HAS_CONSTEXPR)\n#  define ASIO_CONSTEXPR\n# endif // defined(ASIO_HAS_CONSTEXPR)\n#endif // !defined(ASIO_CONSTEXPR)\n\n// Support noexcept on compilers known to allow it.\n#if !defined(ASIO_NOEXCEPT)\n# if !defined(ASIO_DISABLE_NOEXCEPT)\n#  if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 105300)\n#   define ASIO_NOEXCEPT BOOST_NOEXCEPT\n#   define ASIO_NOEXCEPT_OR_NOTHROW BOOST_NOEXCEPT_OR_NOTHROW\n#  elif defined(__clang__)\n#   if __has_feature(__cxx_noexcept__)\n#    define ASIO_NOEXCEPT noexcept(true)\n#    define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true)\n#   endif // __has_feature(__cxx_noexcept__)\n#  elif defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#      define ASIO_NOEXCEPT noexcept(true)\n#      define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true)\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  elif defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1900)\n#    define ASIO_NOEXCEPT noexcept(true)\n#    define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true)\n#   endif // (_MSC_VER >= 1900)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_NOEXCEPT)\n# if !defined(ASIO_NOEXCEPT)\n#  define ASIO_NOEXCEPT\n# endif // !defined(ASIO_NOEXCEPT)\n# if !defined(ASIO_NOEXCEPT_OR_NOTHROW)\n#  define ASIO_NOEXCEPT_OR_NOTHROW throw()\n# endif // !defined(ASIO_NOEXCEPT_OR_NOTHROW)\n#endif // !defined(ASIO_NOEXCEPT)\n\n// Support automatic type deduction on compilers known to support it.\n#if !defined(ASIO_HAS_DECLTYPE)\n# if !defined(ASIO_DISABLE_DECLTYPE)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_decltype__)\n#    define ASIO_HAS_DECLTYPE 1\n#   endif // __has_feature(__cxx_decltype__)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_DECLTYPE 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1800)\n#    define ASIO_HAS_DECLTYPE 1\n#   endif // (_MSC_VER >= 1800)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_DECLTYPE)\n#endif // !defined(ASIO_HAS_DECLTYPE)\n\n// Support alias templates on compilers known to allow it.\n#if !defined(ASIO_HAS_ALIAS_TEMPLATES)\n# if !defined(ASIO_DISABLE_ALIAS_TEMPLATES)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_alias_templates__)\n#    define ASIO_HAS_ALIAS_TEMPLATES 1\n#   endif // __has_feature(__cxx_alias_templates__)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_ALIAS_TEMPLATES 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1900)\n#    define ASIO_HAS_ALIAS_TEMPLATES 1\n#   endif // (_MSC_VER >= 1900)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_ALIAS_TEMPLATES)\n#endif // !defined(ASIO_HAS_ALIAS_TEMPLATES)\n\n// Support return type deduction on compilers known to allow it.\n#if !defined(ASIO_HAS_RETURN_TYPE_DEDUCTION)\n# if !defined(ASIO_DISABLE_RETURN_TYPE_DEDUCTION)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_return_type_deduction__)\n#    define ASIO_HAS_RETURN_TYPE_DEDUCTION 1\n#   endif // __has_feature(__cxx_alias_templates__)\n#  elif (__cplusplus >= 201402)\n#   define ASIO_HAS_RETURN_TYPE_DEDUCTION 1\n#  endif // (__cplusplus >= 201402)\n# endif // !defined(ASIO_DISABLE_RETURN_TYPE_DEDUCTION)\n#endif // !defined(ASIO_HAS_RETURN_TYPE_DEDUCTION)\n\n// Support default function template arguments on compilers known to allow it.\n#if !defined(ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n# if !defined(ASIO_DISABLE_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n#  if (__cplusplus >= 201103)\n#   define ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS 1\n#  endif // (__cplusplus >= 201103)\n# endif // !defined(ASIO_DISABLE_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n#endif // !defined(ASIO_HAS_DEFAULT_FUNCTION_TEMPLATE_ARGUMENTS)\n\n// Support concepts on compilers known to allow them.\n#if !defined(ASIO_HAS_CONCEPTS)\n# if !defined(ASIO_DISABLE_CONCEPTS)\n#  if __cpp_concepts\n#   define ASIO_HAS_CONCEPTS 1\n#   define ASIO_CONCEPT concept bool\n#  endif // __cpp_concepts\n# endif // !defined(ASIO_DISABLE_CONCEPTS)\n#endif // !defined(ASIO_HAS_CONCEPTS)\n\n// Standard library support for system errors.\n#if !defined(ASIO_HAS_STD_SYSTEM_ERROR)\n# if !defined(ASIO_DISABLE_STD_SYSTEM_ERROR)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_SYSTEM_ERROR 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<system_error>)\n#     define ASIO_HAS_STD_SYSTEM_ERROR 1\n#    endif // __has_include(<system_error>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_SYSTEM_ERROR 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_SYSTEM_ERROR 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_SYSTEM_ERROR)\n#endif // !defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n// Compliant C++11 compilers put noexcept specifiers on error_category members.\n#if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT)\n# if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 105300)\n#  define ASIO_ERROR_CATEGORY_NOEXCEPT BOOST_NOEXCEPT\n# elif defined(__clang__)\n#  if __has_feature(__cxx_noexcept__)\n#   define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true)\n#  endif // __has_feature(__cxx_noexcept__)\n# elif defined(__GNUC__)\n#  if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#   if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true)\n#   endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n# elif defined(ASIO_MSVC)\n#  if (_MSC_VER >= 1900)\n#   define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true)\n#  endif // (_MSC_VER >= 1900)\n# endif // defined(ASIO_MSVC)\n# if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT)\n#  define ASIO_ERROR_CATEGORY_NOEXCEPT\n# endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT)\n#endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT)\n\n// Standard library support for arrays.\n#if !defined(ASIO_HAS_STD_ARRAY)\n# if !defined(ASIO_DISABLE_STD_ARRAY)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_ARRAY 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<array>)\n#     define ASIO_HAS_STD_ARRAY 1\n#    endif // __has_include(<array>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_ARRAY 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1600)\n#    define ASIO_HAS_STD_ARRAY 1\n#   endif // (_MSC_VER >= 1600)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_ARRAY)\n#endif // !defined(ASIO_HAS_STD_ARRAY)\n\n// Standard library support for shared_ptr and weak_ptr.\n#if !defined(ASIO_HAS_STD_SHARED_PTR)\n# if !defined(ASIO_DISABLE_STD_SHARED_PTR)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_SHARED_PTR 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_STD_SHARED_PTR 1\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_SHARED_PTR 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1600)\n#    define ASIO_HAS_STD_SHARED_PTR 1\n#   endif // (_MSC_VER >= 1600)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_SHARED_PTR)\n#endif // !defined(ASIO_HAS_STD_SHARED_PTR)\n\n// Standard library support for allocator_arg_t.\n#if !defined(ASIO_HAS_STD_ALLOCATOR_ARG)\n# if !defined(ASIO_DISABLE_STD_ALLOCATOR_ARG)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_ALLOCATOR_ARG 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_STD_ALLOCATOR_ARG 1\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_ALLOCATOR_ARG 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1600)\n#    define ASIO_HAS_STD_ALLOCATOR_ARG 1\n#   endif // (_MSC_VER >= 1600)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_ALLOCATOR_ARG)\n#endif // !defined(ASIO_HAS_STD_ALLOCATOR_ARG)\n\n// Standard library support for atomic operations.\n#if !defined(ASIO_HAS_STD_ATOMIC)\n# if !defined(ASIO_DISABLE_STD_ATOMIC)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_ATOMIC 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<atomic>)\n#     define ASIO_HAS_STD_ATOMIC 1\n#    endif // __has_include(<atomic>)\n#   elif defined(__apple_build_version__) && defined(_LIBCPP_VERSION)\n#    if (__clang_major__ >= 10)\n#     if __has_include(<atomic>)\n#      define ASIO_HAS_STD_ATOMIC 1\n#     endif // __has_include(<atomic>)\n#    endif // (__clang_major__ >= 10)\n#   endif /// defined(__apple_build_version__) && defined(_LIBCPP_VERSION)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_ATOMIC 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_ATOMIC 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_ATOMIC)\n#endif // !defined(ASIO_HAS_STD_ATOMIC)\n\n// Standard library support for chrono. Some standard libraries (such as the\n// libstdc++ shipped with gcc 4.6) provide monotonic_clock as per early C++0x\n// drafts, rather than the eventually standardised name of steady_clock.\n#if !defined(ASIO_HAS_STD_CHRONO)\n# if !defined(ASIO_DISABLE_STD_CHRONO)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_CHRONO 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<chrono>)\n#     define ASIO_HAS_STD_CHRONO 1\n#    endif // __has_include(<chrono>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_CHRONO 1\n#     if ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6))\n#      define ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK 1\n#     endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6))\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_CHRONO 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_CHRONO)\n#endif // !defined(ASIO_HAS_STD_CHRONO)\n\n// Boost support for chrono.\n#if !defined(ASIO_HAS_BOOST_CHRONO)\n# if !defined(ASIO_DISABLE_BOOST_CHRONO)\n#  if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 104700)\n#   define ASIO_HAS_BOOST_CHRONO 1\n#  endif // defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 104700)\n# endif // !defined(ASIO_DISABLE_BOOST_CHRONO)\n#endif // !defined(ASIO_HAS_BOOST_CHRONO)\n\n// Some form of chrono library is available.\n#if !defined(ASIO_HAS_CHRONO)\n# if defined(ASIO_HAS_STD_CHRONO) \\\n    || defined(ASIO_HAS_BOOST_CHRONO)\n#  define ASIO_HAS_CHRONO 1\n# endif // defined(ASIO_HAS_STD_CHRONO)\n        // || defined(ASIO_HAS_BOOST_CHRONO)\n#endif // !defined(ASIO_HAS_CHRONO)\n\n// Boost support for the DateTime library.\n#if !defined(ASIO_HAS_BOOST_DATE_TIME)\n# if !defined(ASIO_DISABLE_BOOST_DATE_TIME)\n#  define ASIO_HAS_BOOST_DATE_TIME 1\n# endif // !defined(ASIO_DISABLE_BOOST_DATE_TIME)\n#endif // !defined(ASIO_HAS_BOOST_DATE_TIME)\n\n// Standard library support for addressof.\n#if !defined(ASIO_HAS_STD_ADDRESSOF)\n# if !defined(ASIO_DISABLE_STD_ADDRESSOF)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_ADDRESSOF 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_STD_ADDRESSOF 1\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_ADDRESSOF 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_ADDRESSOF 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_ADDRESSOF)\n#endif // !defined(ASIO_HAS_STD_ADDRESSOF)\n\n// Standard library support for the function class.\n#if !defined(ASIO_HAS_STD_FUNCTION)\n# if !defined(ASIO_DISABLE_STD_FUNCTION)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_FUNCTION 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_STD_FUNCTION 1\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_FUNCTION 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_FUNCTION 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_FUNCTION)\n#endif // !defined(ASIO_HAS_STD_FUNCTION)\n\n// Standard library support for type traits.\n#if !defined(ASIO_HAS_STD_TYPE_TRAITS)\n# if !defined(ASIO_DISABLE_STD_TYPE_TRAITS)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_TYPE_TRAITS 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<type_traits>)\n#     define ASIO_HAS_STD_TYPE_TRAITS 1\n#    endif // __has_include(<type_traits>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_TYPE_TRAITS 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_TYPE_TRAITS 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_TYPE_TRAITS)\n#endif // !defined(ASIO_HAS_STD_TYPE_TRAITS)\n\n// Standard library support for the nullptr_t type.\n#if !defined(ASIO_HAS_NULLPTR)\n# if !defined(ASIO_DISABLE_NULLPTR)\n#  if defined(__clang__)\n#   if __has_feature(__cxx_nullptr__)\n#    define ASIO_HAS_NULLPTR 1\n#   endif // __has_feature(__cxx_rvalue_references__)\n#  elif defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_NULLPTR 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_NULLPTR 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_NULLPTR)\n#endif // !defined(ASIO_HAS_NULLPTR)\n\n// Standard library support for the C++11 allocator additions.\n#if !defined(ASIO_HAS_CXX11_ALLOCATORS)\n# if !defined(ASIO_DISABLE_CXX11_ALLOCATORS)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_CXX11_ALLOCATORS 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_CXX11_ALLOCATORS 1\n#   endif // (__cplusplus >= 201103)\n#  elif defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_CXX11_ALLOCATORS 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1800)\n#    define ASIO_HAS_CXX11_ALLOCATORS 1\n#   endif // (_MSC_VER >= 1800)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_CXX11_ALLOCATORS)\n#endif // !defined(ASIO_HAS_CXX11_ALLOCATORS)\n\n// Standard library support for the cstdint header.\n#if !defined(ASIO_HAS_CSTDINT)\n# if !defined(ASIO_DISABLE_CSTDINT)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_CSTDINT 1\n#   elif (__cplusplus >= 201103)\n#    define ASIO_HAS_CSTDINT 1\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_CSTDINT 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_CSTDINT 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_CSTDINT)\n#endif // !defined(ASIO_HAS_CSTDINT)\n\n// Standard library support for the thread class.\n#if !defined(ASIO_HAS_STD_THREAD)\n# if !defined(ASIO_DISABLE_STD_THREAD)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_THREAD 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<thread>)\n#     define ASIO_HAS_STD_THREAD 1\n#    endif // __has_include(<thread>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_THREAD 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_THREAD 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_THREAD)\n#endif // !defined(ASIO_HAS_STD_THREAD)\n\n// Standard library support for the mutex and condition variable classes.\n#if !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n# if !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<mutex>)\n#     define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1\n#    endif // __has_include(<mutex>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR)\n#endif // !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n// Standard library support for the call_once function.\n#if !defined(ASIO_HAS_STD_CALL_ONCE)\n# if !defined(ASIO_DISABLE_STD_CALL_ONCE)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_CALL_ONCE 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<mutex>)\n#     define ASIO_HAS_STD_CALL_ONCE 1\n#    endif // __has_include(<mutex>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_CALL_ONCE 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_CALL_ONCE 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_CALL_ONCE)\n#endif // !defined(ASIO_HAS_STD_CALL_ONCE)\n\n// Standard library support for futures.\n#if !defined(ASIO_HAS_STD_FUTURE)\n# if !defined(ASIO_DISABLE_STD_FUTURE)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    define ASIO_HAS_STD_FUTURE 1\n#   elif (__cplusplus >= 201103)\n#    if __has_include(<future>)\n#     define ASIO_HAS_STD_FUTURE 1\n#    endif // __has_include(<mutex>)\n#   endif // (__cplusplus >= 201103)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_FUTURE 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_FUTURE 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_FUTURE)\n#endif // !defined(ASIO_HAS_STD_FUTURE)\n\n// Standard library support for std::string_view.\n#if !defined(ASIO_HAS_STD_STRING_VIEW)\n# if !defined(ASIO_DISABLE_STD_STRING_VIEW)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    if (__cplusplus >= 201402)\n#     if __has_include(<string_view>)\n#      define ASIO_HAS_STD_STRING_VIEW 1\n#     endif // __has_include(<string_view>)\n#    endif // (__cplusplus >= 201402)\n#   else // defined(ASIO_HAS_CLANG_LIBCXX)\n#    if (__cplusplus >= 201703)\n#     if __has_include(<string_view>)\n#      define ASIO_HAS_STD_STRING_VIEW 1\n#     endif // __has_include(<string_view>)\n#    endif // (__cplusplus >= 201703)\n#   endif // defined(ASIO_HAS_CLANG_LIBCXX)\n#  elif defined(__GNUC__)\n#   if (__GNUC__ >= 7)\n#    if (__cplusplus >= 201703)\n#     define ASIO_HAS_STD_STRING_VIEW 1\n#    endif // (__cplusplus >= 201703)\n#   endif // (__GNUC__ >= 7)\n#  elif defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1910 && _MSVC_LANG >= 201703)\n#    define ASIO_HAS_STD_STRING_VIEW 1\n#   endif // (_MSC_VER >= 1910 && _MSVC_LANG >= 201703)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_STRING_VIEW)\n#endif // !defined(ASIO_HAS_STD_STRING_VIEW)\n\n// Standard library support for std::experimental::string_view.\n#if !defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n# if !defined(ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW)\n#  if defined(__clang__)\n#   if defined(ASIO_HAS_CLANG_LIBCXX)\n#    if (_LIBCPP_VERSION < 7000)\n#     if (__cplusplus >= 201402)\n#      if __has_include(<experimental/string_view>)\n#       define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1\n#      endif // __has_include(<experimental/string_view>)\n#     endif // (__cplusplus >= 201402)\n#    endif // (_LIBCPP_VERSION < 7000)\n#   else // defined(ASIO_HAS_CLANG_LIBCXX)\n#    if (__cplusplus >= 201402)\n#     if __has_include(<experimental/string_view>)\n#      define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1\n#     endif // __has_include(<experimental/string_view>)\n#    endif // (__cplusplus >= 201402)\n#   endif // // defined(ASIO_HAS_CLANG_LIBCXX)\n#  endif // defined(__clang__)\n#  if defined(__GNUC__)\n#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)) || (__GNUC__ > 4)\n#    if (__cplusplus >= 201402)\n#     define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1\n#    endif // (__cplusplus >= 201402)\n#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)) || (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n# endif // !defined(ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW)\n#endif // !defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n\n// Standard library has a string_view that we can use.\n#if !defined(ASIO_HAS_STRING_VIEW)\n# if !defined(ASIO_DISABLE_STRING_VIEW)\n#  if defined(ASIO_HAS_STD_STRING_VIEW)\n#   define ASIO_HAS_STRING_VIEW 1\n#  elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n#   define ASIO_HAS_STRING_VIEW 1\n#  endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n# endif // !defined(ASIO_DISABLE_STRING_VIEW)\n#endif // !defined(ASIO_HAS_STRING_VIEW)\n\n// Standard library support for iostream move construction and assignment.\n#if !defined(ASIO_HAS_STD_IOSTREAM_MOVE)\n# if !defined(ASIO_DISABLE_STD_IOSTREAM_MOVE)\n#  if defined(__GNUC__)\n#   if (__GNUC__ > 4)\n#    if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#     define ASIO_HAS_STD_IOSTREAM_MOVE 1\n#    endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#   endif // (__GNUC__ > 4)\n#  endif // defined(__GNUC__)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1700)\n#    define ASIO_HAS_STD_IOSTREAM_MOVE 1\n#   endif // (_MSC_VER >= 1700)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_IOSTREAM_MOVE)\n#endif // !defined(ASIO_HAS_STD_IOSTREAM_MOVE)\n\n// Standard library has invoke_result (which supersedes result_of).\n#if !defined(ASIO_HAS_STD_INVOKE_RESULT)\n# if !defined(ASIO_DISABLE_STD_INVOKE_RESULT)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_VER >= 1911 && _MSVC_LANG >= 201703)\n#    define ASIO_HAS_STD_INVOKE_RESULT 1\n#   endif // (_MSC_VER >= 1911 && _MSVC_LANG >= 201703)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_STD_INVOKE_RESULT)\n#endif // !defined(ASIO_HAS_STD_INVOKE_RESULT)\n\n// Windows App target. Windows but with a limited API.\n#if !defined(ASIO_WINDOWS_APP)\n# if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603)\n#  include <winapifamily.h>\n#  if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \\\n   && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)\n#   define ASIO_WINDOWS_APP 1\n#  endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)\n         // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)\n# endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603)\n#endif // !defined(ASIO_WINDOWS_APP)\n\n// Legacy WinRT target. Windows App is preferred.\n#if !defined(ASIO_WINDOWS_RUNTIME)\n# if !defined(ASIO_WINDOWS_APP)\n#  if defined(__cplusplus_winrt)\n#   include <winapifamily.h>\n#   if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \\\n    && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)\n#    define ASIO_WINDOWS_RUNTIME 1\n#   endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)\n          // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)\n#  endif // defined(__cplusplus_winrt)\n# endif // !defined(ASIO_WINDOWS_APP)\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n// Windows target. Excludes WinRT but includes Windows App targets.\n#if !defined(ASIO_WINDOWS)\n# if !defined(ASIO_WINDOWS_RUNTIME)\n#  if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS)\n#   define ASIO_WINDOWS 1\n#  elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__)\n#   define ASIO_WINDOWS 1\n#  elif defined(ASIO_WINDOWS_APP)\n#   define ASIO_WINDOWS 1\n#  endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS)\n# endif // !defined(ASIO_WINDOWS_RUNTIME)\n#endif // !defined(ASIO_WINDOWS)\n\n// Windows: target OS version.\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS)\n#  if defined(_MSC_VER) || defined(__BORLANDC__)\n#   pragma message( \\\n  \"Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. For example:\\n\"\\\n  \"- add -D_WIN32_WINNT=0x0601 to the compiler command line; or\\n\"\\\n  \"- add _WIN32_WINNT=0x0601 to your project's Preprocessor Definitions.\\n\"\\\n  \"Assuming _WIN32_WINNT=0x0601 (i.e. Windows 7 target).\")\n#  else // defined(_MSC_VER) || defined(__BORLANDC__)\n#   warning Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately.\n#   warning For example, add -D_WIN32_WINNT=0x0601 to the compiler command line.\n#   warning Assuming _WIN32_WINNT=0x0601 (i.e. Windows 7 target).\n#  endif // defined(_MSC_VER) || defined(__BORLANDC__)\n#  define _WIN32_WINNT 0x0601\n# endif // !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS)\n# if defined(_MSC_VER)\n#  if defined(_WIN32) && !defined(WIN32)\n#   if !defined(_WINSOCK2API_)\n#    define WIN32 // Needed for correct types in winsock2.h\n#   else // !defined(_WINSOCK2API_)\n#    error Please define the macro WIN32 in your compiler options\n#   endif // !defined(_WINSOCK2API_)\n#  endif // defined(_WIN32) && !defined(WIN32)\n# endif // defined(_MSC_VER)\n# if defined(__BORLANDC__)\n#  if defined(__WIN32__) && !defined(WIN32)\n#   if !defined(_WINSOCK2API_)\n#    define WIN32 // Needed for correct types in winsock2.h\n#   else // !defined(_WINSOCK2API_)\n#    error Please define the macro WIN32 in your compiler options\n#   endif // !defined(_WINSOCK2API_)\n#  endif // defined(__WIN32__) && !defined(WIN32)\n# endif // defined(__BORLANDC__)\n# if defined(__CYGWIN__)\n#  if !defined(__USE_W32_SOCKETS)\n#   error You must add -D__USE_W32_SOCKETS to your compiler options.\n#  endif // !defined(__USE_W32_SOCKETS)\n# endif // defined(__CYGWIN__)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n// Windows: minimise header inclusion.\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if !defined(ASIO_NO_WIN32_LEAN_AND_MEAN)\n#  if !defined(WIN32_LEAN_AND_MEAN)\n#   define WIN32_LEAN_AND_MEAN\n#  endif // !defined(WIN32_LEAN_AND_MEAN)\n# endif // !defined(ASIO_NO_WIN32_LEAN_AND_MEAN)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n// Windows: suppress definition of \"min\" and \"max\" macros.\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if !defined(ASIO_NO_NOMINMAX)\n#  if !defined(NOMINMAX)\n#   define NOMINMAX 1\n#  endif // !defined(NOMINMAX)\n# endif // !defined(ASIO_NO_NOMINMAX)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n// Windows: IO Completion Ports.\n#if !defined(ASIO_HAS_IOCP)\n# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n#  if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400)\n#   if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP)\n#    if !defined(ASIO_DISABLE_IOCP)\n#     define ASIO_HAS_IOCP 1\n#    endif // !defined(ASIO_DISABLE_IOCP)\n#   endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP)\n#  endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400)\n# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n#endif // !defined(ASIO_HAS_IOCP)\n\n// On POSIX (and POSIX-like) platforms we need to include unistd.h in order to\n// get access to the various platform feature macros, e.g. to be able to test\n// for threads support.\n#if !defined(ASIO_HAS_UNISTD_H)\n# if !defined(ASIO_HAS_BOOST_CONFIG)\n#  if defined(unix) \\\n   || defined(__unix) \\\n   || defined(_XOPEN_SOURCE) \\\n   || defined(_POSIX_SOURCE) \\\n   || (defined(__MACH__) && defined(__APPLE__)) \\\n   || defined(__FreeBSD__) \\\n   || defined(__NetBSD__) \\\n   || defined(__OpenBSD__) \\\n   || defined(__linux__) \\\n   || defined(__HAIKU__)\n#   define ASIO_HAS_UNISTD_H 1\n#  endif\n# endif // !defined(ASIO_HAS_BOOST_CONFIG)\n#endif // !defined(ASIO_HAS_UNISTD_H)\n#if defined(ASIO_HAS_UNISTD_H)\n# include <unistd.h>\n#endif // defined(ASIO_HAS_UNISTD_H)\n\n// Linux: epoll, eventfd and timerfd.\n#if defined(__linux__)\n# include <linux/version.h>\n# if !defined(ASIO_HAS_EPOLL)\n#  if !defined(ASIO_DISABLE_EPOLL)\n#   if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45)\n#    define ASIO_HAS_EPOLL 1\n#   endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45)\n#  endif // !defined(ASIO_DISABLE_EPOLL)\n# endif // !defined(ASIO_HAS_EPOLL)\n# if !defined(ASIO_HAS_EVENTFD)\n#  if !defined(ASIO_DISABLE_EVENTFD)\n#   if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)\n#    define ASIO_HAS_EVENTFD 1\n#   endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)\n#  endif // !defined(ASIO_DISABLE_EVENTFD)\n# endif // !defined(ASIO_HAS_EVENTFD)\n# if !defined(ASIO_HAS_TIMERFD)\n#  if defined(ASIO_HAS_EPOLL)\n#   if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8)\n#    define ASIO_HAS_TIMERFD 1\n#   endif // (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8)\n#  endif // defined(ASIO_HAS_EPOLL)\n# endif // !defined(ASIO_HAS_TIMERFD)\n#endif // defined(__linux__)\n\n// Mac OS X, FreeBSD, NetBSD, OpenBSD: kqueue.\n#if (defined(__MACH__) && defined(__APPLE__)) \\\n  || defined(__FreeBSD__) \\\n  || defined(__NetBSD__) \\\n  || defined(__OpenBSD__)\n# if !defined(ASIO_HAS_KQUEUE)\n#  if !defined(ASIO_DISABLE_KQUEUE)\n#   define ASIO_HAS_KQUEUE 1\n#  endif // !defined(ASIO_DISABLE_KQUEUE)\n# endif // !defined(ASIO_HAS_KQUEUE)\n#endif // (defined(__MACH__) && defined(__APPLE__))\n       //   || defined(__FreeBSD__)\n       //   || defined(__NetBSD__)\n       //   || defined(__OpenBSD__)\n\n// Solaris: /dev/poll.\n#if defined(__sun)\n# if !defined(ASIO_HAS_DEV_POLL)\n#  if !defined(ASIO_DISABLE_DEV_POLL)\n#   define ASIO_HAS_DEV_POLL 1\n#  endif // !defined(ASIO_DISABLE_DEV_POLL)\n# endif // !defined(ASIO_HAS_DEV_POLL)\n#endif // defined(__sun)\n\n// Serial ports.\n#if !defined(ASIO_HAS_SERIAL_PORT)\n# if defined(ASIO_HAS_IOCP) \\\n  || !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n#  if !defined(__SYMBIAN32__)\n#   if !defined(ASIO_DISABLE_SERIAL_PORT)\n#    define ASIO_HAS_SERIAL_PORT 1\n#   endif // !defined(ASIO_DISABLE_SERIAL_PORT)\n#  endif // !defined(__SYMBIAN32__)\n# endif // defined(ASIO_HAS_IOCP)\n        //   || !defined(ASIO_WINDOWS)\n        //   && !defined(ASIO_WINDOWS_RUNTIME)\n        //   && !defined(__CYGWIN__)\n#endif // !defined(ASIO_HAS_SERIAL_PORT)\n\n// Windows: stream handles.\n#if !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n# if !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE)\n#  if defined(ASIO_HAS_IOCP)\n#   define ASIO_HAS_WINDOWS_STREAM_HANDLE 1\n#  endif // defined(ASIO_HAS_IOCP)\n# endif // !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE)\n#endif // !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n\n// Windows: random access handles.\n#if !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n# if !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE)\n#  if defined(ASIO_HAS_IOCP)\n#   define ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE 1\n#  endif // defined(ASIO_HAS_IOCP)\n# endif // !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE)\n#endif // !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n\n// Windows: object handles.\n#if !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n# if !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE)\n#  if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n#   if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP)\n#    define ASIO_HAS_WINDOWS_OBJECT_HANDLE 1\n#   endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP)\n#  endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# endif // !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE)\n#endif // !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n\n// Windows: OVERLAPPED wrapper.\n#if !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR)\n# if !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR)\n#  if defined(ASIO_HAS_IOCP)\n#   define ASIO_HAS_WINDOWS_OVERLAPPED_PTR 1\n#  endif // defined(ASIO_HAS_IOCP)\n# endif // !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR)\n#endif // !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR)\n\n// POSIX: stream-oriented file descriptors.\n#if !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n# if !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR)\n#  if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n#   define ASIO_HAS_POSIX_STREAM_DESCRIPTOR 1\n#  endif // !defined(ASIO_WINDOWS)\n         //   && !defined(ASIO_WINDOWS_RUNTIME)\n         //   && !defined(__CYGWIN__)\n# endif // !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR)\n#endif // !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n\n// UNIX domain sockets.\n#if !defined(ASIO_HAS_LOCAL_SOCKETS)\n# if !defined(ASIO_DISABLE_LOCAL_SOCKETS)\n#  if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n#   define ASIO_HAS_LOCAL_SOCKETS 1\n#  endif // !defined(ASIO_WINDOWS)\n         //   && !defined(ASIO_WINDOWS_RUNTIME)\n         //   && !defined(__CYGWIN__)\n# endif // !defined(ASIO_DISABLE_LOCAL_SOCKETS)\n#endif // !defined(ASIO_HAS_LOCAL_SOCKETS)\n\n// Can use sigaction() instead of signal().\n#if !defined(ASIO_HAS_SIGACTION)\n# if !defined(ASIO_DISABLE_SIGACTION)\n#  if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n#   define ASIO_HAS_SIGACTION 1\n#  endif // !defined(ASIO_WINDOWS)\n         //   && !defined(ASIO_WINDOWS_RUNTIME)\n         //   && !defined(__CYGWIN__)\n# endif // !defined(ASIO_DISABLE_SIGACTION)\n#endif // !defined(ASIO_HAS_SIGACTION)\n\n// Can use signal().\n#if !defined(ASIO_HAS_SIGNAL)\n# if !defined(ASIO_DISABLE_SIGNAL)\n#  if !defined(UNDER_CE)\n#   define ASIO_HAS_SIGNAL 1\n#  endif // !defined(UNDER_CE)\n# endif // !defined(ASIO_DISABLE_SIGNAL)\n#endif // !defined(ASIO_HAS_SIGNAL)\n\n// Can use getaddrinfo() and getnameinfo().\n#if !defined(ASIO_HAS_GETADDRINFO)\n# if !defined(ASIO_DISABLE_GETADDRINFO)\n#  if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n#   if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501)\n#    define ASIO_HAS_GETADDRINFO 1\n#   elif defined(UNDER_CE)\n#    define ASIO_HAS_GETADDRINFO 1\n#   endif // defined(UNDER_CE)\n#  elif defined(__MACH__) && defined(__APPLE__)\n#   if defined(__MAC_OS_X_VERSION_MIN_REQUIRED)\n#    if (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050)\n#     define ASIO_HAS_GETADDRINFO 1\n#    endif // (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050)\n#   else // defined(__MAC_OS_X_VERSION_MIN_REQUIRED)\n#    define ASIO_HAS_GETADDRINFO 1\n#   endif // defined(__MAC_OS_X_VERSION_MIN_REQUIRED)\n#  else // defined(__MACH__) && defined(__APPLE__)\n#   define ASIO_HAS_GETADDRINFO 1\n#  endif // defined(__MACH__) && defined(__APPLE__)\n# endif // !defined(ASIO_DISABLE_GETADDRINFO)\n#endif // !defined(ASIO_HAS_GETADDRINFO)\n\n// Whether standard iostreams are disabled.\n#if !defined(ASIO_NO_IOSTREAM)\n# if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_IOSTREAM)\n#  define ASIO_NO_IOSTREAM 1\n# endif // !defined(BOOST_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n// Whether exception handling is disabled.\n#if !defined(ASIO_NO_EXCEPTIONS)\n# if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_EXCEPTIONS)\n#  define ASIO_NO_EXCEPTIONS 1\n# endif // !defined(BOOST_NO_EXCEPTIONS)\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n\n// Whether the typeid operator is supported.\n#if !defined(ASIO_NO_TYPEID)\n# if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_TYPEID)\n#  define ASIO_NO_TYPEID 1\n# endif // !defined(BOOST_NO_TYPEID)\n#endif // !defined(ASIO_NO_TYPEID)\n\n// Threads.\n#if !defined(ASIO_HAS_THREADS)\n# if !defined(ASIO_DISABLE_THREADS)\n#  if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(__GNUC__) && !defined(__MINGW32__) \\\n     && !defined(linux) && !defined(__linux) && !defined(__linux__)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(_MT) || defined(__MT__)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(_REENTRANT)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(__APPLE__)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(__HAIKU__)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(_POSIX_THREADS) && (_POSIX_THREADS + 0 >= 0)\n#   define ASIO_HAS_THREADS 1\n#  elif defined(_PTHREADS)\n#   define ASIO_HAS_THREADS 1\n#  endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS)\n# endif // !defined(ASIO_DISABLE_THREADS)\n#endif // !defined(ASIO_HAS_THREADS)\n\n// POSIX threads.\n#if !defined(ASIO_HAS_PTHREADS)\n# if defined(ASIO_HAS_THREADS)\n#  if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS)\n#   define ASIO_HAS_PTHREADS 1\n#  elif defined(_POSIX_THREADS) && (_POSIX_THREADS + 0 >= 0)\n#   define ASIO_HAS_PTHREADS 1\n#  elif defined(__HAIKU__)\n#   define ASIO_HAS_PTHREADS 1\n#  endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS)\n# endif // defined(ASIO_HAS_THREADS)\n#endif // !defined(ASIO_HAS_PTHREADS)\n\n// Helper to prevent macro expansion.\n#define ASIO_PREVENT_MACRO_SUBSTITUTION\n\n// Helper to define in-class constants.\n#if !defined(ASIO_STATIC_CONSTANT)\n# if !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT)\n#  define ASIO_STATIC_CONSTANT(type, assignment) \\\n    BOOST_STATIC_CONSTANT(type, assignment)\n# else // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT)\n#  define ASIO_STATIC_CONSTANT(type, assignment) \\\n    static const type assignment\n# endif // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT)\n#endif // !defined(ASIO_STATIC_CONSTANT)\n\n// Boost array library.\n#if !defined(ASIO_HAS_BOOST_ARRAY)\n# if !defined(ASIO_DISABLE_BOOST_ARRAY)\n#  define ASIO_HAS_BOOST_ARRAY 1\n# endif // !defined(ASIO_DISABLE_BOOST_ARRAY)\n#endif // !defined(ASIO_HAS_BOOST_ARRAY)\n\n// Boost assert macro.\n#if !defined(ASIO_HAS_BOOST_ASSERT)\n# if !defined(ASIO_DISABLE_BOOST_ASSERT)\n#  define ASIO_HAS_BOOST_ASSERT 1\n# endif // !defined(ASIO_DISABLE_BOOST_ASSERT)\n#endif // !defined(ASIO_HAS_BOOST_ASSERT)\n\n// Boost limits header.\n#if !defined(ASIO_HAS_BOOST_LIMITS)\n# if !defined(ASIO_DISABLE_BOOST_LIMITS)\n#  define ASIO_HAS_BOOST_LIMITS 1\n# endif // !defined(ASIO_DISABLE_BOOST_LIMITS)\n#endif // !defined(ASIO_HAS_BOOST_LIMITS)\n\n// Boost throw_exception function.\n#if !defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\n# if !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION)\n#  define ASIO_HAS_BOOST_THROW_EXCEPTION 1\n# endif // !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION)\n#endif // !defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\n\n// Boost regex library.\n#if !defined(ASIO_HAS_BOOST_REGEX)\n# if !defined(ASIO_DISABLE_BOOST_REGEX)\n#  define ASIO_HAS_BOOST_REGEX 1\n# endif // !defined(ASIO_DISABLE_BOOST_REGEX)\n#endif // !defined(ASIO_HAS_BOOST_REGEX)\n\n// Boost bind function.\n#if !defined(ASIO_HAS_BOOST_BIND)\n# if !defined(ASIO_DISABLE_BOOST_BIND)\n#  define ASIO_HAS_BOOST_BIND 1\n# endif // !defined(ASIO_DISABLE_BOOST_BIND)\n#endif // !defined(ASIO_HAS_BOOST_BIND)\n\n// Boost's BOOST_WORKAROUND macro.\n#if !defined(ASIO_HAS_BOOST_WORKAROUND)\n# if !defined(ASIO_DISABLE_BOOST_WORKAROUND)\n#  define ASIO_HAS_BOOST_WORKAROUND 1\n# endif // !defined(ASIO_DISABLE_BOOST_WORKAROUND)\n#endif // !defined(ASIO_HAS_BOOST_WORKAROUND)\n\n// Microsoft Visual C++'s secure C runtime library.\n#if !defined(ASIO_HAS_SECURE_RTL)\n# if !defined(ASIO_DISABLE_SECURE_RTL)\n#  if defined(ASIO_MSVC) \\\n    && (ASIO_MSVC >= 1400) \\\n    && !defined(UNDER_CE)\n#   define ASIO_HAS_SECURE_RTL 1\n#  endif // defined(ASIO_MSVC)\n         // && (ASIO_MSVC >= 1400)\n         // && !defined(UNDER_CE)\n# endif // !defined(ASIO_DISABLE_SECURE_RTL)\n#endif // !defined(ASIO_HAS_SECURE_RTL)\n\n// Handler hooking. Disabled for ancient Borland C++ and gcc compilers.\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n# if !defined(ASIO_DISABLE_HANDLER_HOOKS)\n#  if defined(__GNUC__)\n#   if (__GNUC__ >= 3)\n#    define ASIO_HAS_HANDLER_HOOKS 1\n#   endif // (__GNUC__ >= 3)\n#  elif !defined(__BORLANDC__)\n#   define ASIO_HAS_HANDLER_HOOKS 1\n#  endif // !defined(__BORLANDC__)\n# endif // !defined(ASIO_DISABLE_HANDLER_HOOKS)\n#endif // !defined(ASIO_HAS_HANDLER_HOOKS)\n\n// Support for the __thread keyword extension.\n#if !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION)\n# if defined(__linux__)\n#  if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n#   if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)\n#    if !defined(__INTEL_COMPILER) && !defined(__ICL) \\\n       && !(defined(__clang__) && defined(__ANDROID__))\n#     define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1\n#     define ASIO_THREAD_KEYWORD __thread\n#    elif defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100)\n#     define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1\n#    endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100)\n           // && !(defined(__clang__) && defined(__ANDROID__))\n#   endif // ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)\n#  endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n# endif // defined(__linux__)\n# if defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME)\n#  if (_MSC_VER >= 1700)\n#   define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1\n#   define ASIO_THREAD_KEYWORD __declspec(thread)\n#  endif // (_MSC_VER >= 1700)\n# endif // defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME)\n#endif // !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION)\n#if !defined(ASIO_THREAD_KEYWORD)\n# define ASIO_THREAD_KEYWORD __thread\n#endif // !defined(ASIO_THREAD_KEYWORD)\n\n// Support for POSIX ssize_t typedef.\n#if !defined(ASIO_DISABLE_SSIZE_T)\n# if defined(__linux__) \\\n   || (defined(__MACH__) && defined(__APPLE__))\n#  define ASIO_HAS_SSIZE_T 1\n# endif // defined(__linux__)\n        //   || (defined(__MACH__) && defined(__APPLE__))\n#endif // !defined(ASIO_DISABLE_SSIZE_T)\n\n// Helper macros to manage transition away from error_code return values.\n#if defined(ASIO_NO_DEPRECATED)\n# define ASIO_SYNC_OP_VOID void\n# define ASIO_SYNC_OP_VOID_RETURN(e) return\n#else // defined(ASIO_NO_DEPRECATED)\n# define ASIO_SYNC_OP_VOID asio::error_code\n# define ASIO_SYNC_OP_VOID_RETURN(e) return e\n#endif // defined(ASIO_NO_DEPRECATED)\n\n// Newer gcc, clang need special treatment to suppress unused typedef warnings.\n#if defined(__clang__)\n# if defined(__apple_build_version__)\n#  if (__clang_major__ >= 7)\n#   define ASIO_UNUSED_TYPEDEF __attribute__((__unused__))\n#  endif // (__clang_major__ >= 7)\n# elif ((__clang_major__ == 3) && (__clang_minor__ >= 6)) \\\n    || (__clang_major__ > 3)\n#  define ASIO_UNUSED_TYPEDEF __attribute__((__unused__))\n# endif // ((__clang_major__ == 3) && (__clang_minor__ >= 6))\n        //   || (__clang_major__ > 3)\n#elif defined(__GNUC__)\n# if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4)\n#  define ASIO_UNUSED_TYPEDEF __attribute__((__unused__))\n# endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4)\n#endif // defined(__GNUC__)\n#if !defined(ASIO_UNUSED_TYPEDEF)\n# define ASIO_UNUSED_TYPEDEF\n#endif // !defined(ASIO_UNUSED_TYPEDEF)\n\n// Some versions of gcc generate spurious warnings about unused variables.\n#if defined(__GNUC__)\n# if (__GNUC__ >= 4)\n#  define ASIO_UNUSED_VARIABLE __attribute__((__unused__))\n# endif // (__GNUC__ >= 4)\n#endif // defined(__GNUC__)\n#if !defined(ASIO_UNUSED_VARIABLE)\n# define ASIO_UNUSED_VARIABLE\n#endif // !defined(ASIO_UNUSED_VARIABLE)\n\n// Support co_await on compilers known to allow it.\n#if !defined(ASIO_HAS_CO_AWAIT)\n# if !defined(ASIO_DISABLE_CO_AWAIT)\n#  if defined(ASIO_MSVC)\n#   if (_MSC_FULL_VER >= 190023506)\n#    if defined(_RESUMABLE_FUNCTIONS_SUPPORTED)\n#     define ASIO_HAS_CO_AWAIT 1\n#    endif // defined(_RESUMABLE_FUNCTIONS_SUPPORTED)\n#   endif // (_MSC_FULL_VER >= 190023506)\n#  endif // defined(ASIO_MSVC)\n# endif // !defined(ASIO_DISABLE_CO_AWAIT)\n# if defined(__clang__)\n#  if (__cplusplus >= 201703) && (__cpp_coroutines >= 201703)\n#   if __has_include(<experimental/coroutine>)\n#    define ASIO_HAS_CO_AWAIT 1\n#   endif // __has_include(<experimental/coroutine>)\n#  endif // (__cplusplus >= 201703) && (__cpp_coroutines >= 201703)\n# endif // defined(__clang__)\n#endif // !defined(ASIO_HAS_CO_AWAIT)\n\n#endif // ASIO_DETAIL_CONFIG_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/consuming_buffers.hpp",
    "content": "//\n// detail/consuming_buffers.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CONSUMING_BUFFERS_HPP\n#define ASIO_DETAIL_CONSUMING_BUFFERS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/limits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper template to determine the maximum number of prepared buffers.\ntemplate <typename Buffers>\nstruct prepared_buffers_max\n{\n  enum { value = buffer_sequence_adapter_base::max_buffers };\n};\n\ntemplate <typename Elem, std::size_t N>\nstruct prepared_buffers_max<boost::array<Elem, N> >\n{\n  enum { value = N };\n};\n\n#if defined(ASIO_HAS_STD_ARRAY)\n\ntemplate <typename Elem, std::size_t N>\nstruct prepared_buffers_max<std::array<Elem, N> >\n{\n  enum { value = N };\n};\n\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\n// A buffer sequence used to represent a subsequence of the buffers.\ntemplate <typename Buffer, std::size_t MaxBuffers>\nstruct prepared_buffers\n{\n  typedef Buffer value_type;\n  typedef const Buffer* const_iterator;\n\n  enum { max_buffers = MaxBuffers < 16 ? MaxBuffers : 16 };\n\n  prepared_buffers() : count(0) {}\n  const_iterator begin() const { return elems; }\n  const_iterator end() const { return elems + count; }\n\n  Buffer elems[max_buffers];\n  std::size_t count;\n};\n\n// A proxy for a sub-range in a list of buffers.\ntemplate <typename Buffer, typename Buffers, typename Buffer_Iterator>\nclass consuming_buffers\n{\npublic:\n  typedef prepared_buffers<Buffer, prepared_buffers_max<Buffers>::value>\n    prepared_buffers_type;\n\n  // Construct to represent the entire list of buffers.\n  explicit consuming_buffers(const Buffers& buffers)\n    : buffers_(buffers),\n      total_consumed_(0),\n      next_elem_(0),\n      next_elem_offset_(0)\n  {\n    using asio::buffer_size;\n    total_size_ = buffer_size(buffers);\n  }\n\n  // Determine if we are at the end of the buffers.\n  bool empty() const\n  {\n    return total_consumed_ >= total_size_;\n  }\n\n  // Get the buffer for a single transfer, with a size.\n  prepared_buffers_type prepare(std::size_t max_size)\n  {\n    prepared_buffers_type result;\n\n    Buffer_Iterator next = asio::buffer_sequence_begin(buffers_);\n    Buffer_Iterator end = asio::buffer_sequence_end(buffers_);\n\n    std::advance(next, next_elem_);\n    std::size_t elem_offset = next_elem_offset_;\n    while (next != end && max_size > 0 && (result.count) < result.max_buffers)\n    {\n      Buffer next_buf = Buffer(*next) + elem_offset;\n      result.elems[result.count] = asio::buffer(next_buf, max_size);\n      max_size -= result.elems[result.count].size();\n      elem_offset = 0;\n      if (result.elems[result.count].size() > 0)\n        ++result.count;\n      ++next;\n    }\n\n    return result;\n  }\n\n  // Consume the specified number of bytes from the buffers.\n  void consume(std::size_t size)\n  {\n    total_consumed_ += size;\n\n    Buffer_Iterator next = asio::buffer_sequence_begin(buffers_);\n    Buffer_Iterator end = asio::buffer_sequence_end(buffers_);\n\n    std::advance(next, next_elem_);\n    while (next != end && size > 0)\n    {\n      Buffer next_buf = Buffer(*next) + next_elem_offset_;\n      if (size < next_buf.size())\n      {\n        next_elem_offset_ += size;\n        size = 0;\n      }\n      else\n      {\n        size -= next_buf.size();\n        next_elem_offset_ = 0;\n        ++next_elem_;\n        ++next;\n      }\n    }\n  }\n\n  // Get the total number of bytes consumed from the buffers.\n  std::size_t total_consumed() const\n  {\n    return total_consumed_;\n  }\n\nprivate:\n  Buffers buffers_;\n  std::size_t total_size_;\n  std::size_t total_consumed_;\n  std::size_t next_elem_;\n  std::size_t next_elem_offset_;\n};\n\n// Base class of all consuming_buffers specialisations for single buffers.\ntemplate <typename Buffer>\nclass consuming_single_buffer\n{\npublic:\n  // Construct to represent the entire list of buffers.\n  template <typename Buffer1>\n  explicit consuming_single_buffer(const Buffer1& buffer)\n    : buffer_(buffer),\n      total_consumed_(0)\n  {\n  }\n\n  // Determine if we are at the end of the buffers.\n  bool empty() const\n  {\n    return total_consumed_ >= buffer_.size();\n  }\n\n  // Get the buffer for a single transfer, with a size.\n  Buffer prepare(std::size_t max_size)\n  {\n    return asio::buffer(buffer_ + total_consumed_, max_size);\n  }\n\n  // Consume the specified number of bytes from the buffers.\n  void consume(std::size_t size)\n  {\n    total_consumed_ += size;\n  }\n\n  // Get the total number of bytes consumed from the buffers.\n  std::size_t total_consumed() const\n  {\n    return total_consumed_;\n  }\n\nprivate:\n  Buffer buffer_;\n  std::size_t total_consumed_;\n};\n\ntemplate <>\nclass consuming_buffers<mutable_buffer, mutable_buffer, const mutable_buffer*>\n  : public consuming_single_buffer<ASIO_MUTABLE_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const mutable_buffer& buffer)\n    : consuming_single_buffer<ASIO_MUTABLE_BUFFER>(buffer)\n  {\n  }\n};\n\ntemplate <>\nclass consuming_buffers<const_buffer, mutable_buffer, const mutable_buffer*>\n  : public consuming_single_buffer<ASIO_CONST_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const mutable_buffer& buffer)\n    : consuming_single_buffer<ASIO_CONST_BUFFER>(buffer)\n  {\n  }\n};\n\ntemplate <>\nclass consuming_buffers<const_buffer, const_buffer, const const_buffer*>\n  : public consuming_single_buffer<ASIO_CONST_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const const_buffer& buffer)\n    : consuming_single_buffer<ASIO_CONST_BUFFER>(buffer)\n  {\n  }\n};\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ntemplate <>\nclass consuming_buffers<mutable_buffer,\n    mutable_buffers_1, const mutable_buffer*>\n  : public consuming_single_buffer<ASIO_MUTABLE_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const mutable_buffers_1& buffer)\n    : consuming_single_buffer<ASIO_MUTABLE_BUFFER>(buffer)\n  {\n  }\n};\n\ntemplate <>\nclass consuming_buffers<const_buffer, mutable_buffers_1, const mutable_buffer*>\n  : public consuming_single_buffer<ASIO_CONST_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const mutable_buffers_1& buffer)\n    : consuming_single_buffer<ASIO_CONST_BUFFER>(buffer)\n  {\n  }\n};\n\ntemplate <>\nclass consuming_buffers<const_buffer, const_buffers_1, const const_buffer*>\n  : public consuming_single_buffer<ASIO_CONST_BUFFER>\n{\npublic:\n  explicit consuming_buffers(const const_buffers_1& buffer)\n    : consuming_single_buffer<ASIO_CONST_BUFFER>(buffer)\n  {\n  }\n};\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Buffer, typename Elem>\nclass consuming_buffers<Buffer, boost::array<Elem, 2>,\n    typename boost::array<Elem, 2>::const_iterator>\n{\npublic:\n  // Construct to represent the entire list of buffers.\n  explicit consuming_buffers(const boost::array<Elem, 2>& buffers)\n    : buffers_(buffers),\n      total_consumed_(0)\n  {\n  }\n\n  // Determine if we are at the end of the buffers.\n  bool empty() const\n  {\n    return total_consumed_ >=\n      Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size();\n  }\n\n  // Get the buffer for a single transfer, with a size.\n  boost::array<Buffer, 2> prepare(std::size_t max_size)\n  {\n    boost::array<Buffer, 2> result = {{\n      Buffer(buffers_[0]), Buffer(buffers_[1]) }};\n    std::size_t buffer0_size = result[0].size();\n    result[0] = asio::buffer(result[0] + total_consumed_, max_size);\n    result[1] = asio::buffer(\n        result[1] + (total_consumed_ < buffer0_size\n          ? 0 : total_consumed_ - buffer0_size),\n        max_size - result[0].size());\n    return result;\n  }\n\n  // Consume the specified number of bytes from the buffers.\n  void consume(std::size_t size)\n  {\n    total_consumed_ += size;\n  }\n\n  // Get the total number of bytes consumed from the buffers.\n  std::size_t total_consumed() const\n  {\n    return total_consumed_;\n  }\n\nprivate:\n  boost::array<Elem, 2> buffers_;\n  std::size_t total_consumed_;\n};\n\n#if defined(ASIO_HAS_STD_ARRAY)\n\ntemplate <typename Buffer, typename Elem>\nclass consuming_buffers<Buffer, std::array<Elem, 2>,\n    typename std::array<Elem, 2>::const_iterator>\n{\npublic:\n  // Construct to represent the entire list of buffers.\n  explicit consuming_buffers(const std::array<Elem, 2>& buffers)\n    : buffers_(buffers),\n      total_consumed_(0)\n  {\n  }\n\n  // Determine if we are at the end of the buffers.\n  bool empty() const\n  {\n    return total_consumed_ >=\n      Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size();\n  }\n\n  // Get the buffer for a single transfer, with a size.\n  std::array<Buffer, 2> prepare(std::size_t max_size)\n  {\n    std::array<Buffer, 2> result = {{\n      Buffer(buffers_[0]), Buffer(buffers_[1]) }};\n    std::size_t buffer0_size = result[0].size();\n    result[0] = asio::buffer(result[0] + total_consumed_, max_size);\n    result[1] = asio::buffer(\n        result[1] + (total_consumed_ < buffer0_size\n          ? 0 : total_consumed_ - buffer0_size),\n        max_size - result[0].size());\n    return result;\n  }\n\n  // Consume the specified number of bytes from the buffers.\n  void consume(std::size_t size)\n  {\n    total_consumed_ += size;\n  }\n\n  // Get the total number of bytes consumed from the buffers.\n  std::size_t total_consumed() const\n  {\n    return total_consumed_;\n  }\n\nprivate:\n  std::array<Elem, 2> buffers_;\n  std::size_t total_consumed_;\n};\n\n#endif // defined(ASIO_HAS_STD_ARRAY)\n\n// Specialisation for null_buffers to ensure that the null_buffers type is\n// always passed through to the underlying read or write operation.\ntemplate <typename Buffer>\nclass consuming_buffers<Buffer, null_buffers, const mutable_buffer*>\n  : public asio::null_buffers\n{\npublic:\n  consuming_buffers(const null_buffers&)\n  {\n    // No-op.\n  }\n\n  bool empty()\n  {\n    return false;\n  }\n\n  null_buffers prepare(std::size_t)\n  {\n    return null_buffers();\n  }\n\n  void consume(std::size_t)\n  {\n    // No-op.\n  }\n\n  std::size_t total_consumed() const\n  {\n    return 0;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_CONSUMING_BUFFERS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/cstddef.hpp",
    "content": "//\n// detail/cstddef.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CSTDDEF_HPP\n#define ASIO_DETAIL_CSTDDEF_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n\nnamespace asio {\n\n#if defined(ASIO_HAS_NULLPTR)\nusing std::nullptr_t;\n#else // defined(ASIO_HAS_NULLPTR)\nstruct nullptr_t {};\n#endif // defined(ASIO_HAS_NULLPTR)\n\n} // namespace asio\n\n#endif // ASIO_DETAIL_CSTDDEF_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/cstdint.hpp",
    "content": "//\n// detail/cstdint.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_CSTDINT_HPP\n#define ASIO_DETAIL_CSTDINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CSTDINT)\n# include <cstdint>\n#else // defined(ASIO_HAS_CSTDINT)\n# include <boost/cstdint.hpp>\n#endif // defined(ASIO_HAS_CSTDINT)\n\nnamespace asio {\n\n#if defined(ASIO_HAS_CSTDINT)\nusing std::int16_t;\nusing std::int_least16_t;\nusing std::uint16_t;\nusing std::uint_least16_t;\nusing std::int32_t;\nusing std::int_least32_t;\nusing std::uint32_t;\nusing std::uint_least32_t;\nusing std::int64_t;\nusing std::int_least64_t;\nusing std::uint64_t;\nusing std::uint_least64_t;\nusing std::uintmax_t;\n#else // defined(ASIO_HAS_CSTDINT)\nusing boost::int16_t;\nusing boost::int_least16_t;\nusing boost::uint16_t;\nusing boost::uint_least16_t;\nusing boost::int32_t;\nusing boost::int_least32_t;\nusing boost::uint32_t;\nusing boost::uint_least32_t;\nusing boost::int64_t;\nusing boost::int_least64_t;\nusing boost::uint64_t;\nusing boost::uint_least64_t;\nusing boost::uintmax_t;\n#endif // defined(ASIO_HAS_CSTDINT)\n\n} // namespace asio\n\n#endif // ASIO_DETAIL_CSTDINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/date_time_fwd.hpp",
    "content": "//\n// detail/date_time_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DATE_TIME_FWD_HPP\n#define ASIO_DETAIL_DATE_TIME_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\nnamespace boost {\nnamespace date_time {\n\ntemplate<class T, class TimeSystem>\nclass base_time;\n\n} // namespace date_time\nnamespace posix_time {\n\nclass ptime;\n\n} // namespace posix_time\n} // namespace boost\n\n#endif // ASIO_DETAIL_DATE_TIME_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/deadline_timer_service.hpp",
    "content": "//\n// detail/deadline_timer_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP\n#define ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/timer_queue.hpp\"\n#include \"asio/detail/timer_queue_ptime.hpp\"\n#include \"asio/detail/timer_scheduler.hpp\"\n#include \"asio/detail/wait_handler.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include <chrono>\n# include <thread>\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nclass deadline_timer_service\n  : public execution_context_service_base<deadline_timer_service<Time_Traits> >\n{\npublic:\n  // The time type.\n  typedef typename Time_Traits::time_type time_type;\n\n  // The duration type.\n  typedef typename Time_Traits::duration_type duration_type;\n\n  // The implementation type of the timer. This type is dependent on the\n  // underlying implementation of the timer service.\n  struct implementation_type\n    : private asio::detail::noncopyable\n  {\n    time_type expiry;\n    bool might_have_pending_waits;\n    typename timer_queue<Time_Traits>::per_timer_data timer_data;\n  };\n\n  // Constructor.\n  deadline_timer_service(execution_context& context)\n    : execution_context_service_base<\n        deadline_timer_service<Time_Traits> >(context),\n      scheduler_(asio::use_service<timer_scheduler>(context))\n  {\n    scheduler_.init_task();\n    scheduler_.add_timer_queue(timer_queue_);\n  }\n\n  // Destructor.\n  ~deadline_timer_service()\n  {\n    scheduler_.remove_timer_queue(timer_queue_);\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n  }\n\n  // Construct a new timer implementation.\n  void construct(implementation_type& impl)\n  {\n    impl.expiry = time_type();\n    impl.might_have_pending_waits = false;\n  }\n\n  // Destroy a timer implementation.\n  void destroy(implementation_type& impl)\n  {\n    asio::error_code ec;\n    cancel(impl, ec);\n  }\n\n  // Move-construct a new serial port implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl)\n  {\n    scheduler_.move_timer(timer_queue_, impl.timer_data, other_impl.timer_data);\n\n    impl.expiry = other_impl.expiry;\n    other_impl.expiry = time_type();\n\n    impl.might_have_pending_waits = other_impl.might_have_pending_waits;\n    other_impl.might_have_pending_waits = false;\n  }\n\n  // Move-assign from another serial port implementation.\n  void move_assign(implementation_type& impl,\n      deadline_timer_service& other_service,\n      implementation_type& other_impl)\n  {\n    if (this != &other_service)\n      if (impl.might_have_pending_waits)\n        scheduler_.cancel_timer(timer_queue_, impl.timer_data);\n\n    other_service.scheduler_.move_timer(other_service.timer_queue_,\n        impl.timer_data, other_impl.timer_data);\n\n    impl.expiry = other_impl.expiry;\n    other_impl.expiry = time_type();\n\n    impl.might_have_pending_waits = other_impl.might_have_pending_waits;\n    other_impl.might_have_pending_waits = false;\n  }\n\n  // Cancel any asynchronous wait operations associated with the timer.\n  std::size_t cancel(implementation_type& impl, asio::error_code& ec)\n  {\n    if (!impl.might_have_pending_waits)\n    {\n      ec = asio::error_code();\n      return 0;\n    }\n\n    ASIO_HANDLER_OPERATION((scheduler_.context(),\n          \"deadline_timer\", &impl, 0, \"cancel\"));\n\n    std::size_t count = scheduler_.cancel_timer(timer_queue_, impl.timer_data);\n    impl.might_have_pending_waits = false;\n    ec = asio::error_code();\n    return count;\n  }\n\n  // Cancels one asynchronous wait operation associated with the timer.\n  std::size_t cancel_one(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    if (!impl.might_have_pending_waits)\n    {\n      ec = asio::error_code();\n      return 0;\n    }\n\n    ASIO_HANDLER_OPERATION((scheduler_.context(),\n          \"deadline_timer\", &impl, 0, \"cancel_one\"));\n\n    std::size_t count = scheduler_.cancel_timer(\n        timer_queue_, impl.timer_data, 1);\n    if (count == 0)\n      impl.might_have_pending_waits = false;\n    ec = asio::error_code();\n    return count;\n  }\n\n  // Get the expiry time for the timer as an absolute time.\n  time_type expiry(const implementation_type& impl) const\n  {\n    return impl.expiry;\n  }\n\n  // Get the expiry time for the timer as an absolute time.\n  time_type expires_at(const implementation_type& impl) const\n  {\n    return impl.expiry;\n  }\n\n  // Get the expiry time for the timer relative to now.\n  duration_type expires_from_now(const implementation_type& impl) const\n  {\n    return Time_Traits::subtract(this->expiry(impl), Time_Traits::now());\n  }\n\n  // Set the expiry time for the timer as an absolute time.\n  std::size_t expires_at(implementation_type& impl,\n      const time_type& expiry_time, asio::error_code& ec)\n  {\n    std::size_t count = cancel(impl, ec);\n    impl.expiry = expiry_time;\n    ec = asio::error_code();\n    return count;\n  }\n\n  // Set the expiry time for the timer relative to now.\n  std::size_t expires_after(implementation_type& impl,\n      const duration_type& expiry_time, asio::error_code& ec)\n  {\n    return expires_at(impl,\n        Time_Traits::add(Time_Traits::now(), expiry_time), ec);\n  }\n\n  // Set the expiry time for the timer relative to now.\n  std::size_t expires_from_now(implementation_type& impl,\n      const duration_type& expiry_time, asio::error_code& ec)\n  {\n    return expires_at(impl,\n        Time_Traits::add(Time_Traits::now(), expiry_time), ec);\n  }\n\n  // Perform a blocking wait on the timer.\n  void wait(implementation_type& impl, asio::error_code& ec)\n  {\n    time_type now = Time_Traits::now();\n    ec = asio::error_code();\n    while (Time_Traits::less_than(now, impl.expiry) && !ec)\n    {\n      this->do_wait(Time_Traits::to_posix_duration(\n            Time_Traits::subtract(impl.expiry, now)), ec);\n      now = Time_Traits::now();\n    }\n  }\n\n  // Start an asynchronous wait on the timer.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(implementation_type& impl,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef wait_handler<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    impl.might_have_pending_waits = true;\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"deadline_timer\", &impl, 0, \"async_wait\"));\n\n    scheduler_.schedule_timer(timer_queue_, impl.expiry, impl.timer_data, p.p);\n    p.v = p.p = 0;\n  }\n\nprivate:\n  // Helper function to wait given a duration type. The duration type should\n  // either be of type boost::posix_time::time_duration, or implement the\n  // required subset of its interface.\n  template <typename Duration>\n  void do_wait(const Duration& timeout, asio::error_code& ec)\n  {\n#if defined(ASIO_WINDOWS_RUNTIME)\n    std::this_thread::sleep_for(\n        std::chrono::seconds(timeout.total_seconds())\n        + std::chrono::microseconds(timeout.total_microseconds()));\n    ec = asio::error_code();\n#else // defined(ASIO_WINDOWS_RUNTIME)\n    ::timeval tv;\n    tv.tv_sec = timeout.total_seconds();\n    tv.tv_usec = timeout.total_microseconds() % 1000000;\n    socket_ops::select(0, 0, 0, 0, &tv, ec);\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n  }\n\n  // The queue of timers.\n  timer_queue<Time_Traits> timer_queue_;\n\n  // The object that schedules and executes timers. Usually a reactor.\n  timer_scheduler& scheduler_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/dependent_type.hpp",
    "content": "//\n// detail/dependent_type.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DEPENDENT_TYPE_HPP\n#define ASIO_DETAIL_DEPENDENT_TYPE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename DependsOn, typename T>\nstruct dependent_type\n{\n  typedef T type;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_DEPENDENT_TYPE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/descriptor_ops.hpp",
    "content": "//\n// detail/descriptor_ops.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DESCRIPTOR_OPS_HPP\n#define ASIO_DETAIL_DESCRIPTOR_OPS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n\n#include <cstddef>\n#include \"asio/error.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace descriptor_ops {\n\n// Descriptor state bits.\nenum\n{\n  // The user wants a non-blocking descriptor.\n  user_set_non_blocking = 1,\n\n  // The descriptor has been set non-blocking.\n  internal_non_blocking = 2,\n\n  // Helper \"state\" used to determine whether the descriptor is non-blocking.\n  non_blocking = user_set_non_blocking | internal_non_blocking,\n\n  // The descriptor may have been dup()-ed.\n  possible_dup = 4\n};\n\ntypedef unsigned char state_type;\n\ntemplate <typename ReturnType>\ninline ReturnType error_wrapper(ReturnType return_value,\n    asio::error_code& ec)\n{\n  ec = asio::error_code(errno,\n      asio::error::get_system_category());\n  return return_value;\n}\n\nASIO_DECL int open(const char* path, int flags,\n    asio::error_code& ec);\n\nASIO_DECL int close(int d, state_type& state,\n    asio::error_code& ec);\n\nASIO_DECL bool set_user_non_blocking(int d,\n    state_type& state, bool value, asio::error_code& ec);\n\nASIO_DECL bool set_internal_non_blocking(int d,\n    state_type& state, bool value, asio::error_code& ec);\n\ntypedef iovec buf;\n\nASIO_DECL std::size_t sync_read(int d, state_type state, buf* bufs,\n    std::size_t count, bool all_empty, asio::error_code& ec);\n\nASIO_DECL bool non_blocking_read(int d, buf* bufs, std::size_t count,\n    asio::error_code& ec, std::size_t& bytes_transferred);\n\nASIO_DECL std::size_t sync_write(int d, state_type state,\n    const buf* bufs, std::size_t count, bool all_empty,\n    asio::error_code& ec);\n\nASIO_DECL bool non_blocking_write(int d,\n    const buf* bufs, std::size_t count,\n    asio::error_code& ec, std::size_t& bytes_transferred);\n\nASIO_DECL int ioctl(int d, state_type& state, long cmd,\n    ioctl_arg_type* arg, asio::error_code& ec);\n\nASIO_DECL int fcntl(int d, int cmd, asio::error_code& ec);\n\nASIO_DECL int fcntl(int d, int cmd,\n    long arg, asio::error_code& ec);\n\nASIO_DECL int poll_read(int d,\n    state_type state, asio::error_code& ec);\n\nASIO_DECL int poll_write(int d,\n    state_type state, asio::error_code& ec);\n\nASIO_DECL int poll_error(int d,\n    state_type state, asio::error_code& ec);\n\n} // namespace descriptor_ops\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/descriptor_ops.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_DESCRIPTOR_OPS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/descriptor_read_op.hpp",
    "content": "//\n// detail/descriptor_read_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP\n#define ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/descriptor_ops.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_work.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence>\nclass descriptor_read_op_base : public reactor_op\n{\npublic:\n  descriptor_read_op_base(int descriptor,\n      const MutableBufferSequence& buffers, func_type complete_func)\n    : reactor_op(&descriptor_read_op_base::do_perform, complete_func),\n      descriptor_(descriptor),\n      buffers_(buffers)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    descriptor_read_op_base* o(static_cast<descriptor_read_op_base*>(base));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(o->buffers_);\n\n    status result = descriptor_ops::non_blocking_read(o->descriptor_,\n        bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_)\n      ? done : not_done;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_read\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  int descriptor_;\n  MutableBufferSequence buffers_;\n};\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass descriptor_read_op\n  : public descriptor_read_op_base<MutableBufferSequence>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(descriptor_read_op);\n\n  descriptor_read_op(int descriptor, const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : descriptor_read_op_base<MutableBufferSequence>(\n        descriptor, buffers, &descriptor_read_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    descriptor_read_op* o(static_cast<descriptor_read_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/descriptor_write_op.hpp",
    "content": "//\n// detail/descriptor_write_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP\n#define ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/descriptor_ops.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_work.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence>\nclass descriptor_write_op_base : public reactor_op\n{\npublic:\n  descriptor_write_op_base(int descriptor,\n      const ConstBufferSequence& buffers, func_type complete_func)\n    : reactor_op(&descriptor_write_op_base::do_perform, complete_func),\n      descriptor_(descriptor),\n      buffers_(buffers)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    descriptor_write_op_base* o(static_cast<descriptor_write_op_base*>(base));\n\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(o->buffers_);\n\n    status result = descriptor_ops::non_blocking_write(o->descriptor_,\n        bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_)\n      ? done : not_done;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_write\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  int descriptor_;\n  ConstBufferSequence buffers_;\n};\n\ntemplate <typename ConstBufferSequence, typename Handler, typename IoExecutor>\nclass descriptor_write_op\n  : public descriptor_write_op_base<ConstBufferSequence>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(descriptor_write_op);\n\n  descriptor_write_op(int descriptor, const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : descriptor_write_op_base<ConstBufferSequence>(\n        descriptor, buffers, &descriptor_write_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    descriptor_write_op* o(static_cast<descriptor_write_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/dev_poll_reactor.hpp",
    "content": "//\n// detail/dev_poll_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_DEV_POLL_REACTOR_HPP\n#define ASIO_DETAIL_DEV_POLL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_DEV_POLL)\n\n#include <cstddef>\n#include <vector>\n#include <sys/devpoll.h>\n#include \"asio/detail/hash_map.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/reactor_op_queue.hpp\"\n#include \"asio/detail/select_interrupter.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass dev_poll_reactor\n  : public execution_context_service_base<dev_poll_reactor>\n{\npublic:\n  enum op_types { read_op = 0, write_op = 1,\n    connect_op = 1, except_op = 2, max_ops = 3 };\n\n  // Per-descriptor data.\n  struct per_descriptor_data\n  {\n  };\n\n  // Constructor.\n  ASIO_DECL dev_poll_reactor(asio::execution_context& ctx);\n\n  // Destructor.\n  ASIO_DECL ~dev_poll_reactor();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Recreate internal descriptors following a fork.\n  ASIO_DECL void notify_fork(\n      asio::execution_context::fork_event fork_ev);\n\n  // Initialise the task.\n  ASIO_DECL void init_task();\n\n  // Register a socket with the reactor. Returns 0 on success, system error\n  // code on failure.\n  ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&);\n\n  // Register a descriptor with an associated single operation. Returns 0 on\n  // success, system error code on failure.\n  ASIO_DECL int register_internal_descriptor(\n      int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op);\n\n  // Move descriptor registration from one descriptor_data object to another.\n  ASIO_DECL void move_descriptor(socket_type descriptor,\n      per_descriptor_data& target_descriptor_data,\n      per_descriptor_data& source_descriptor_data);\n\n  // Post a reactor operation for immediate completion.\n  void post_immediate_completion(reactor_op* op, bool is_continuation)\n  {\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n\n  // Start a new operation. The reactor operation will be performed when the\n  // given descriptor is flagged as ready, or an error has occurred.\n  ASIO_DECL void start_op(int op_type, socket_type descriptor,\n      per_descriptor_data&, reactor_op* op,\n      bool is_continuation, bool allow_speculative);\n\n  // Cancel all operations associated with the given descriptor. The\n  // handlers associated with the descriptor will be invoked with the\n  // operation_aborted error.\n  ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&);\n\n  // Cancel any operations that are running against the descriptor and remove\n  // its registration from the reactor. The reactor resources associated with\n  // the descriptor must be released by calling cleanup_descriptor_data.\n  ASIO_DECL void deregister_descriptor(socket_type descriptor,\n      per_descriptor_data&, bool closing);\n\n  // Remove the descriptor's registration from the reactor. The reactor\n  // resources associated with the descriptor must be released by calling\n  // cleanup_descriptor_data.\n  ASIO_DECL void deregister_internal_descriptor(\n      socket_type descriptor, per_descriptor_data&);\n\n  // Perform any post-deregistration cleanup tasks associated with the\n  // descriptor data.\n  ASIO_DECL void cleanup_descriptor_data(per_descriptor_data&);\n\n  // Add a new timer queue to the reactor.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Remove a timer queue from the reactor.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer operations associated with the given token. Returns the\n  // number of operations that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& target,\n      typename timer_queue<Time_Traits>::per_timer_data& source);\n\n  // Run /dev/poll once until interrupted or events are ready to be dispatched.\n  ASIO_DECL void run(long usec, op_queue<operation>& ops);\n\n  // Interrupt the select loop.\n  ASIO_DECL void interrupt();\n\nprivate:\n  // Create the /dev/poll file descriptor. Throws an exception if the descriptor\n  // cannot be created.\n  ASIO_DECL static int do_dev_poll_create();\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // Get the timeout value for the /dev/poll DP_POLL operation. The timeout\n  // value is returned as a number of milliseconds. A return value of -1\n  // indicates that the poll should block indefinitely.\n  ASIO_DECL int get_timeout(int msec);\n\n  // Cancel all operations associated with the given descriptor. The do_cancel\n  // function of the handler objects will be invoked. This function does not\n  // acquire the dev_poll_reactor's mutex.\n  ASIO_DECL void cancel_ops_unlocked(socket_type descriptor,\n      const asio::error_code& ec);\n\n  // Add a pending event entry for the given descriptor.\n  ASIO_DECL ::pollfd& add_pending_event_change(int descriptor);\n\n  // The scheduler implementation used to post completions.\n  scheduler& scheduler_;\n\n  // Mutex to protect access to internal data.\n  asio::detail::mutex mutex_;\n\n  // The /dev/poll file descriptor.\n  int dev_poll_fd_;\n\n  // Vector of /dev/poll events waiting to be written to the descriptor.\n  std::vector< ::pollfd> pending_event_changes_;\n\n  // Hash map to associate a descriptor with a pending event change index.\n  hash_map<int, std::size_t> pending_event_change_index_;\n\n  // The interrupter is used to break a blocking DP_POLL operation.\n  select_interrupter interrupter_;\n\n  // The queues of read, write and except operations.\n  reactor_op_queue<socket_type> op_queue_[max_ops];\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n  // Whether the service has been shut down.\n  bool shutdown_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/dev_poll_reactor.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/dev_poll_reactor.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_DEV_POLL)\n\n#endif // ASIO_DETAIL_DEV_POLL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/epoll_reactor.hpp",
    "content": "//\n// detail/epoll_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_EPOLL_REACTOR_HPP\n#define ASIO_DETAIL_EPOLL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_EPOLL)\n\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/conditionally_enabled_mutex.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/object_pool.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/select_interrupter.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_TIMERFD)\n# include <sys/timerfd.h>\n#endif // defined(ASIO_HAS_TIMERFD)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass epoll_reactor\n  : public execution_context_service_base<epoll_reactor>\n{\nprivate:\n  // The mutex type used by this reactor.\n  typedef conditionally_enabled_mutex mutex;\n\npublic:\n  enum op_types { read_op = 0, write_op = 1,\n    connect_op = 1, except_op = 2, max_ops = 3 };\n\n  // Per-descriptor queues.\n  class descriptor_state : operation\n  {\n    friend class epoll_reactor;\n    friend class object_pool_access;\n\n    descriptor_state* next_;\n    descriptor_state* prev_;\n\n    mutex mutex_;\n    epoll_reactor* reactor_;\n    int descriptor_;\n    uint32_t registered_events_;\n    op_queue<reactor_op> op_queue_[max_ops];\n    bool try_speculative_[max_ops];\n    bool shutdown_;\n\n    ASIO_DECL descriptor_state(bool locking);\n    void set_ready_events(uint32_t events) { task_result_ = events; }\n    void add_ready_events(uint32_t events) { task_result_ |= events; }\n    ASIO_DECL operation* perform_io(uint32_t events);\n    ASIO_DECL static void do_complete(\n        void* owner, operation* base,\n        const asio::error_code& ec, std::size_t bytes_transferred);\n  };\n\n  // Per-descriptor data.\n  typedef descriptor_state* per_descriptor_data;\n\n  // Constructor.\n  ASIO_DECL epoll_reactor(asio::execution_context& ctx);\n\n  // Destructor.\n  ASIO_DECL ~epoll_reactor();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Recreate internal descriptors following a fork.\n  ASIO_DECL void notify_fork(\n      asio::execution_context::fork_event fork_ev);\n\n  // Initialise the task.\n  ASIO_DECL void init_task();\n\n  // Register a socket with the reactor. Returns 0 on success, system error\n  // code on failure.\n  ASIO_DECL int register_descriptor(socket_type descriptor,\n      per_descriptor_data& descriptor_data);\n\n  // Register a descriptor with an associated single operation. Returns 0 on\n  // success, system error code on failure.\n  ASIO_DECL int register_internal_descriptor(\n      int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op);\n\n  // Move descriptor registration from one descriptor_data object to another.\n  ASIO_DECL void move_descriptor(socket_type descriptor,\n      per_descriptor_data& target_descriptor_data,\n      per_descriptor_data& source_descriptor_data);\n\n  // Post a reactor operation for immediate completion.\n  void post_immediate_completion(reactor_op* op, bool is_continuation)\n  {\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n\n  // Start a new operation. The reactor operation will be performed when the\n  // given descriptor is flagged as ready, or an error has occurred.\n  ASIO_DECL void start_op(int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op,\n      bool is_continuation, bool allow_speculative);\n\n  // Cancel all operations associated with the given descriptor. The\n  // handlers associated with the descriptor will be invoked with the\n  // operation_aborted error.\n  ASIO_DECL void cancel_ops(socket_type descriptor,\n      per_descriptor_data& descriptor_data);\n\n  // Cancel any operations that are running against the descriptor and remove\n  // its registration from the reactor. The reactor resources associated with\n  // the descriptor must be released by calling cleanup_descriptor_data.\n  ASIO_DECL void deregister_descriptor(socket_type descriptor,\n      per_descriptor_data& descriptor_data, bool closing);\n\n  // Remove the descriptor's registration from the reactor. The reactor\n  // resources associated with the descriptor must be released by calling\n  // cleanup_descriptor_data.\n  ASIO_DECL void deregister_internal_descriptor(\n      socket_type descriptor, per_descriptor_data& descriptor_data);\n\n  // Perform any post-deregistration cleanup tasks associated with the\n  // descriptor data.\n  ASIO_DECL void cleanup_descriptor_data(\n      per_descriptor_data& descriptor_data);\n\n  // Add a new timer queue to the reactor.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& timer_queue);\n\n  // Remove a timer queue from the reactor.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& timer_queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer operations associated with the given token. Returns the\n  // number of operations that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& target,\n      typename timer_queue<Time_Traits>::per_timer_data& source);\n\n  // Run epoll once until interrupted or events are ready to be dispatched.\n  ASIO_DECL void run(long usec, op_queue<operation>& ops);\n\n  // Interrupt the select loop.\n  ASIO_DECL void interrupt();\n\nprivate:\n  // The hint to pass to epoll_create to size its data structures.\n  enum { epoll_size = 20000 };\n\n  // Create the epoll file descriptor. Throws an exception if the descriptor\n  // cannot be created.\n  ASIO_DECL static int do_epoll_create();\n\n  // Create the timerfd file descriptor. Does not throw.\n  ASIO_DECL static int do_timerfd_create();\n\n  // Allocate a new descriptor state object.\n  ASIO_DECL descriptor_state* allocate_descriptor_state();\n\n  // Free an existing descriptor state object.\n  ASIO_DECL void free_descriptor_state(descriptor_state* s);\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // Called to recalculate and update the timeout.\n  ASIO_DECL void update_timeout();\n\n  // Get the timeout value for the epoll_wait call. The timeout value is\n  // returned as a number of milliseconds. A return value of -1 indicates\n  // that epoll_wait should block indefinitely.\n  ASIO_DECL int get_timeout(int msec);\n\n#if defined(ASIO_HAS_TIMERFD)\n  // Get the timeout value for the timer descriptor. The return value is the\n  // flag argument to be used when calling timerfd_settime.\n  ASIO_DECL int get_timeout(itimerspec& ts);\n#endif // defined(ASIO_HAS_TIMERFD)\n\n  // The scheduler implementation used to post completions.\n  scheduler& scheduler_;\n\n  // Mutex to protect access to internal data.\n  mutex mutex_;\n\n  // The interrupter is used to break a blocking epoll_wait call.\n  select_interrupter interrupter_;\n\n  // The epoll file descriptor.\n  int epoll_fd_;\n\n  // The timer file descriptor.\n  int timer_fd_;\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n  // Whether the service has been shut down.\n  bool shutdown_;\n\n  // Mutex to protect access to the registered descriptors.\n  mutex registered_descriptors_mutex_;\n\n  // Keep track of all registered descriptors.\n  object_pool<descriptor_state> registered_descriptors_;\n\n  // Helper class to do post-perform_io cleanup.\n  struct perform_io_cleanup_on_block_exit;\n  friend struct perform_io_cleanup_on_block_exit;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/epoll_reactor.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/epoll_reactor.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_EPOLL)\n\n#endif // ASIO_DETAIL_EPOLL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/event.hpp",
    "content": "//\n// detail/event.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_EVENT_HPP\n#define ASIO_DETAIL_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_event.hpp\"\n#elif defined(ASIO_WINDOWS)\n# include \"asio/detail/win_event.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_event.hpp\"\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n# include \"asio/detail/std_event.hpp\"\n#else\n# error Only Windows, POSIX and std::condition_variable are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS)\ntypedef null_event event;\n#elif defined(ASIO_WINDOWS)\ntypedef win_event event;\n#elif defined(ASIO_HAS_PTHREADS)\ntypedef posix_event event;\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\ntypedef std_event event;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/eventfd_select_interrupter.hpp",
    "content": "//\n// detail/eventfd_select_interrupter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP\n#define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_EVENTFD)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass eventfd_select_interrupter\n{\npublic:\n  // Constructor.\n  ASIO_DECL eventfd_select_interrupter();\n\n  // Destructor.\n  ASIO_DECL ~eventfd_select_interrupter();\n\n  // Recreate the interrupter's descriptors. Used after a fork.\n  ASIO_DECL void recreate();\n\n  // Interrupt the select call.\n  ASIO_DECL void interrupt();\n\n  // Reset the select interrupt. Returns true if the call was interrupted.\n  ASIO_DECL bool reset();\n\n  // Get the read descriptor to be passed to select.\n  int read_descriptor() const\n  {\n    return read_descriptor_;\n  }\n\nprivate:\n  // Open the descriptors. Throws on error.\n  ASIO_DECL void open_descriptors();\n\n  // Close the descriptors.\n  ASIO_DECL void close_descriptors();\n\n  // The read end of a connection used to interrupt the select call. This file\n  // descriptor is passed to select such that when it is time to stop, a single\n  // 64bit value will be written on the other end of the connection and this\n  // descriptor will become readable.\n  int read_descriptor_;\n\n  // The write end of a connection used to interrupt the select call. A single\n  // 64bit non-zero value may be written to this to wake up the select which is\n  // waiting for the other end to become readable. This descriptor will only\n  // differ from the read descriptor when a pipe is used.\n  int write_descriptor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/eventfd_select_interrupter.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_EVENTFD)\n\n#endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/executor_function.hpp",
    "content": "//\n// detail/executor_function.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_EXECUTOR_FUNCTION_HPP\n#define ASIO_DETAIL_EXECUTOR_FUNCTION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass executor_function_base\n{\npublic:\n  void complete()\n  {\n    func_(this, true);\n  }\n\n  void destroy()\n  {\n    func_(this, false);\n  }\n\nprotected:\n  typedef void (*func_type)(executor_function_base*, bool);\n\n  executor_function_base(func_type func)\n    : func_(func)\n  {\n  }\n\n  // Prevents deletion through this type.\n  ~executor_function_base()\n  {\n  }\n\nprivate:\n  func_type func_;\n};\n\ntemplate <typename Function, typename Alloc>\nclass executor_function : public executor_function_base\n{\npublic:\n  ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR(\n      thread_info_base::executor_function_tag, executor_function);\n\n  template <typename F>\n  executor_function(ASIO_MOVE_ARG(F) f, const Alloc& allocator)\n    : executor_function_base(&executor_function::do_complete),\n      function_(ASIO_MOVE_CAST(F)(f)),\n      allocator_(allocator)\n  {\n  }\n\n  static void do_complete(executor_function_base* base, bool call)\n  {\n    // Take ownership of the function object.\n    executor_function* o(static_cast<executor_function*>(base));\n    Alloc allocator(o->allocator_);\n    ptr p = { detail::addressof(allocator), o, o };\n\n    // Make a copy of the function so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the function may be the true owner of the memory\n    // associated with the function. Consequently, a local copy of the function\n    // is required to ensure that any owning sub-object remains valid until\n    // after we have deallocated the memory here.\n    Function function(ASIO_MOVE_CAST(Function)(o->function_));\n    p.reset();\n\n    // Make the upcall if required.\n    if (call)\n    {\n      function();\n    }\n  }\n\nprivate:\n  Function function_;\n  Alloc allocator_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_EXECUTOR_FUNCTION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/executor_op.hpp",
    "content": "//\n// detail/executor_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_EXECUTOR_OP_HPP\n#define ASIO_DETAIL_EXECUTOR_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/scheduler_operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename Alloc,\n    typename Operation = scheduler_operation>\nclass executor_op : public Operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(executor_op);\n\n  template <typename H>\n  executor_op(ASIO_MOVE_ARG(H) h, const Alloc& allocator)\n    : Operation(&executor_op::do_complete),\n      handler_(ASIO_MOVE_CAST(H)(h)),\n      allocator_(allocator)\n  {\n  }\n\n  static void do_complete(void* owner, Operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    executor_op* o(static_cast<executor_op*>(base));\n    Alloc allocator(o->allocator_);\n    ptr p = { detail::addressof(allocator), o, o };\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    Handler handler(ASIO_MOVE_CAST(Handler)(o->handler_));\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN(());\n      asio_handler_invoke_helpers::invoke(handler, handler);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  Alloc allocator_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_EXECUTOR_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/fd_set_adapter.hpp",
    "content": "//\n// detail/fd_set_adapter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP\n#define ASIO_DETAIL_FD_SET_ADAPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/posix_fd_set_adapter.hpp\"\n#include \"asio/detail/win_fd_set_adapter.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef win_fd_set_adapter fd_set_adapter;\n#else\ntypedef posix_fd_set_adapter fd_set_adapter;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_FD_SET_ADAPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/fenced_block.hpp",
    "content": "//\n// detail/fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS) \\\n  || defined(ASIO_DISABLE_FENCED_BLOCK)\n# include \"asio/detail/null_fenced_block.hpp\"\n#elif defined(ASIO_HAS_STD_ATOMIC)\n# include \"asio/detail/std_fenced_block.hpp\"\n#elif defined(__MACH__) && defined(__APPLE__)\n# include \"asio/detail/macos_fenced_block.hpp\"\n#elif defined(__sun)\n# include \"asio/detail/solaris_fenced_block.hpp\"\n#elif defined(__GNUC__) && defined(__arm__) \\\n  && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n# include \"asio/detail/gcc_arm_fenced_block.hpp\"\n#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))\n# include \"asio/detail/gcc_hppa_fenced_block.hpp\"\n#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n# include \"asio/detail/gcc_x86_fenced_block.hpp\"\n#elif defined(__GNUC__) \\\n  && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \\\n  && !defined(__INTEL_COMPILER) && !defined(__ICL) \\\n  && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)\n# include \"asio/detail/gcc_sync_fenced_block.hpp\"\n#elif defined(ASIO_WINDOWS) && !defined(UNDER_CE)\n# include \"asio/detail/win_fenced_block.hpp\"\n#else\n# include \"asio/detail/null_fenced_block.hpp\"\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS) \\\n  || defined(ASIO_DISABLE_FENCED_BLOCK)\ntypedef null_fenced_block fenced_block;\n#elif defined(ASIO_HAS_STD_ATOMIC)\ntypedef std_fenced_block fenced_block;\n#elif defined(__MACH__) && defined(__APPLE__)\ntypedef macos_fenced_block fenced_block;\n#elif defined(__sun)\ntypedef solaris_fenced_block fenced_block;\n#elif defined(__GNUC__) && defined(__arm__) \\\n  && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\ntypedef gcc_arm_fenced_block fenced_block;\n#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))\ntypedef gcc_hppa_fenced_block fenced_block;\n#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\ntypedef gcc_x86_fenced_block fenced_block;\n#elif defined(__GNUC__) \\\n  && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \\\n  && !defined(__INTEL_COMPILER) && !defined(__ICL) \\\n  && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)\ntypedef gcc_sync_fenced_block fenced_block;\n#elif defined(ASIO_WINDOWS) && !defined(UNDER_CE)\ntypedef win_fenced_block fenced_block;\n#else\ntypedef null_fenced_block fenced_block;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/functional.hpp",
    "content": "//\n// detail/functional.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_FUNCTIONAL_HPP\n#define ASIO_DETAIL_FUNCTIONAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <functional>\n\n#if !defined(ASIO_HAS_STD_FUNCTION)\n# include <boost/function.hpp>\n#endif // !defined(ASIO_HAS_STD_FUNCTION)\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_STD_FUNCTION)\nusing std::function;\n#else // defined(ASIO_HAS_STD_FUNCTION)\nusing boost::function;\n#endif // defined(ASIO_HAS_STD_FUNCTION)\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_FUNCTIONAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/future.hpp",
    "content": "//\n// detail/future.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_FUTURE_HPP\n#define ASIO_DETAIL_FUTURE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#if defined(ASIO_HAS_STD_FUTURE)\n# include <future>\n// Even though the future header is available, libstdc++ may not implement the\n// std::future class itself. However, we need to have already included the\n// future header to reliably test for _GLIBCXX_HAS_GTHREADS.\n# if defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)\n#  if defined(_GLIBCXX_HAS_GTHREADS)\n#   define ASIO_HAS_STD_FUTURE_CLASS 1\n#  endif // defined(_GLIBCXX_HAS_GTHREADS)\n# else // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)\n#  define ASIO_HAS_STD_FUTURE_CLASS 1\n# endif // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)\n#endif // defined(ASIO_HAS_STD_FUTURE)\n\n#endif // ASIO_DETAIL_FUTURE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/gcc_arm_fenced_block.hpp",
    "content": "//\n// detail/gcc_arm_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__GNUC__) && defined(__arm__)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass gcc_arm_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit gcc_arm_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit gcc_arm_fenced_block(full_t)\n  {\n    barrier();\n  }\n\n  // Destructor.\n  ~gcc_arm_fenced_block()\n  {\n    barrier();\n  }\n\nprivate:\n  static void barrier()\n  {\n#if defined(__ARM_ARCH_4__) \\\n    || defined(__ARM_ARCH_4T__) \\\n    || defined(__ARM_ARCH_5__) \\\n    || defined(__ARM_ARCH_5E__) \\\n    || defined(__ARM_ARCH_5T__) \\\n    || defined(__ARM_ARCH_5TE__) \\\n    || defined(__ARM_ARCH_5TEJ__) \\\n    || defined(__ARM_ARCH_6__) \\\n    || defined(__ARM_ARCH_6J__) \\\n    || defined(__ARM_ARCH_6K__) \\\n    || defined(__ARM_ARCH_6Z__) \\\n    || defined(__ARM_ARCH_6ZK__) \\\n    || defined(__ARM_ARCH_6T2__)\n# if defined(__thumb__)\n    // This is just a placeholder and almost certainly not sufficient.\n    __asm__ __volatile__ (\"\" : : : \"memory\");\n# else // defined(__thumb__)\n    int a = 0, b = 0;\n    __asm__ __volatile__ (\"swp %0, %1, [%2]\"\n        : \"=&r\"(a) : \"r\"(1), \"r\"(&b) : \"memory\", \"cc\");\n# endif // defined(__thumb__)\n#else\n    // ARMv7 and later.\n    __asm__ __volatile__ (\"dmb\" : : : \"memory\");\n#endif\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__GNUC__) && defined(__arm__)\n\n#endif // ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/gcc_hppa_fenced_block.hpp",
    "content": "//\n// detail/gcc_hppa_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass gcc_hppa_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit gcc_hppa_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit gcc_hppa_fenced_block(full_t)\n  {\n    barrier();\n  }\n\n  // Destructor.\n  ~gcc_hppa_fenced_block()\n  {\n    barrier();\n  }\n\nprivate:\n  static void barrier()\n  {\n    // This is just a placeholder and almost certainly not sufficient.\n    __asm__ __volatile__ (\"\" : : : \"memory\");\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))\n\n#endif // ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/gcc_sync_fenced_block.hpp",
    "content": "//\n// detail/gcc_sync_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__GNUC__) \\\n  && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \\\n  && !defined(__INTEL_COMPILER) && !defined(__ICL) \\\n  && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass gcc_sync_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_or_full_t { half, full };\n\n  // Constructor.\n  explicit gcc_sync_fenced_block(half_or_full_t)\n    : value_(0)\n  {\n    __sync_lock_test_and_set(&value_, 1);\n  }\n\n  // Destructor.\n  ~gcc_sync_fenced_block()\n  {\n    __sync_lock_release(&value_);\n  }\n\nprivate:\n  int value_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__GNUC__)\n       // && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4))\n       // && !defined(__INTEL_COMPILER) && !defined(__ICL)\n       // && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)\n\n#endif // ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/gcc_x86_fenced_block.hpp",
    "content": "//\n// detail/gcc_x86_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass gcc_x86_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit gcc_x86_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit gcc_x86_fenced_block(full_t)\n  {\n    lbarrier();\n  }\n\n  // Destructor.\n  ~gcc_x86_fenced_block()\n  {\n    sbarrier();\n  }\n\nprivate:\n  static int barrier()\n  {\n    int r = 0, m = 1;\n    __asm__ __volatile__ (\n        \"xchgl %0, %1\" :\n        \"=r\"(r), \"=m\"(m) :\n        \"0\"(1), \"m\"(m) :\n        \"memory\", \"cc\");\n    return r;\n  }\n\n  static void lbarrier()\n  {\n#if defined(__SSE2__)\n# if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n    __builtin_ia32_lfence();\n# else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n    __asm__ __volatile__ (\"lfence\" ::: \"memory\");\n# endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n#else // defined(__SSE2__)\n    barrier();\n#endif // defined(__SSE2__)\n  }\n\n  static void sbarrier()\n  {\n#if defined(__SSE2__)\n# if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n    __builtin_ia32_sfence();\n# else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n    __asm__ __volatile__ (\"sfence\" ::: \"memory\");\n# endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL)\n#else // defined(__SSE2__)\n    barrier();\n#endif // defined(__SSE2__)\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n\n#endif // ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/global.hpp",
    "content": "//\n// detail/global.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_GLOBAL_HPP\n#define ASIO_DETAIL_GLOBAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_global.hpp\"\n#elif defined(ASIO_WINDOWS)\n# include \"asio/detail/win_global.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_global.hpp\"\n#elif defined(ASIO_HAS_STD_CALL_ONCE)\n# include \"asio/detail/std_global.hpp\"\n#else\n# error Only Windows, POSIX and std::call_once are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\ninline T& global()\n{\n#if !defined(ASIO_HAS_THREADS)\n  return null_global<T>();\n#elif defined(ASIO_WINDOWS)\n  return win_global<T>();\n#elif defined(ASIO_HAS_PTHREADS)\n  return posix_global<T>();\n#elif defined(ASIO_HAS_STD_CALL_ONCE)\n  return std_global<T>();\n#endif\n}\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_GLOBAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_alloc_helpers.hpp",
    "content": "//\n// detail/handler_alloc_helpers.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP\n#define ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/handler_alloc_hook.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\n// Calls to asio_handler_allocate and asio_handler_deallocate must be made from\n// a namespace that does not contain any overloads of these functions. The\n// asio_handler_alloc_helpers namespace is defined here for that purpose.\nnamespace asio_handler_alloc_helpers {\n\ntemplate <typename Handler>\ninline void* allocate(std::size_t s, Handler& h)\n{\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n  return ::operator new(s);\n#else\n  using asio::asio_handler_allocate;\n  return asio_handler_allocate(s, asio::detail::addressof(h));\n#endif\n}\n\ntemplate <typename Handler>\ninline void deallocate(void* p, std::size_t s, Handler& h)\n{\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n  ::operator delete(p);\n#else\n  using asio::asio_handler_deallocate;\n  asio_handler_deallocate(p, s, asio::detail::addressof(h));\n#endif\n}\n\n} // namespace asio_handler_alloc_helpers\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename T>\nclass hook_allocator\n{\npublic:\n  typedef T value_type;\n\n  template <typename U>\n  struct rebind\n  {\n    typedef hook_allocator<Handler, U> other;\n  };\n\n  explicit hook_allocator(Handler& h)\n    : handler_(h)\n  {\n  }\n\n  template <typename U>\n  hook_allocator(const hook_allocator<Handler, U>& a)\n    : handler_(a.handler_)\n  {\n  }\n\n  T* allocate(std::size_t n)\n  {\n    return static_cast<T*>(\n        asio_handler_alloc_helpers::allocate(sizeof(T) * n, handler_));\n  }\n\n  void deallocate(T* p, std::size_t n)\n  {\n    asio_handler_alloc_helpers::deallocate(p, sizeof(T) * n, handler_);\n  }\n\n//private:\n  Handler& handler_;\n};\n\ntemplate <typename Handler>\nclass hook_allocator<Handler, void>\n{\npublic:\n  typedef void value_type;\n\n  template <typename U>\n  struct rebind\n  {\n    typedef hook_allocator<Handler, U> other;\n  };\n\n  explicit hook_allocator(Handler& h)\n    : handler_(h)\n  {\n  }\n\n  template <typename U>\n  hook_allocator(const hook_allocator<Handler, U>& a)\n    : handler_(a.handler_)\n  {\n  }\n\n//private:\n  Handler& handler_;\n};\n\ntemplate <typename Handler, typename Allocator>\nstruct get_hook_allocator\n{\n  typedef Allocator type;\n\n  static type get(Handler&, const Allocator& a)\n  {\n    return a;\n  }\n};\n\ntemplate <typename Handler, typename T>\nstruct get_hook_allocator<Handler, std::allocator<T> >\n{\n  typedef hook_allocator<Handler, T> type;\n\n  static type get(Handler& handler, const std::allocator<T>&)\n  {\n    return type(handler);\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#define ASIO_DEFINE_HANDLER_PTR(op) \\\n  struct ptr \\\n  { \\\n    Handler* h; \\\n    op* v; \\\n    op* p; \\\n    ~ptr() \\\n    { \\\n      reset(); \\\n    } \\\n    static op* allocate(Handler& handler) \\\n    { \\\n      typedef typename ::asio::associated_allocator< \\\n        Handler>::type associated_allocator_type; \\\n      typedef typename ::asio::detail::get_hook_allocator< \\\n        Handler, associated_allocator_type>::type hook_allocator_type; \\\n      ASIO_REBIND_ALLOC(hook_allocator_type, op) a( \\\n            ::asio::detail::get_hook_allocator< \\\n              Handler, associated_allocator_type>::get( \\\n                handler, ::asio::get_associated_allocator(handler))); \\\n      return a.allocate(1); \\\n    } \\\n    void reset() \\\n    { \\\n      if (p) \\\n      { \\\n        p->~op(); \\\n        p = 0; \\\n      } \\\n      if (v) \\\n      { \\\n        typedef typename ::asio::associated_allocator< \\\n          Handler>::type associated_allocator_type; \\\n        typedef typename ::asio::detail::get_hook_allocator< \\\n          Handler, associated_allocator_type>::type hook_allocator_type; \\\n        ASIO_REBIND_ALLOC(hook_allocator_type, op) a( \\\n              ::asio::detail::get_hook_allocator< \\\n                Handler, associated_allocator_type>::get( \\\n                  *h, ::asio::get_associated_allocator(*h))); \\\n        a.deallocate(static_cast<op*>(v), 1); \\\n        v = 0; \\\n      } \\\n    } \\\n  } \\\n  /**/\n\n#define ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR(purpose, op) \\\n  struct ptr \\\n  { \\\n    const Alloc* a; \\\n    void* v; \\\n    op* p; \\\n    ~ptr() \\\n    { \\\n      reset(); \\\n    } \\\n    static op* allocate(const Alloc& a) \\\n    { \\\n      typedef typename ::asio::detail::get_recycling_allocator< \\\n        Alloc, purpose>::type recycling_allocator_type; \\\n      ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \\\n            ::asio::detail::get_recycling_allocator< \\\n              Alloc, purpose>::get(a)); \\\n      return a1.allocate(1); \\\n    } \\\n    void reset() \\\n    { \\\n      if (p) \\\n      { \\\n        p->~op(); \\\n        p = 0; \\\n      } \\\n      if (v) \\\n      { \\\n        typedef typename ::asio::detail::get_recycling_allocator< \\\n          Alloc, purpose>::type recycling_allocator_type; \\\n        ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \\\n              ::asio::detail::get_recycling_allocator< \\\n                Alloc, purpose>::get(*a)); \\\n        a1.deallocate(static_cast<op*>(v), 1); \\\n        v = 0; \\\n      } \\\n    } \\\n  } \\\n  /**/\n\n#define ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(op) \\\n  ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR( \\\n      ::asio::detail::thread_info_base::default_tag, op ) \\\n  /**/\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_cont_helpers.hpp",
    "content": "//\n// detail/handler_cont_helpers.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP\n#define ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/handler_continuation_hook.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\n// Calls to asio_handler_is_continuation must be made from a namespace that\n// does not contain overloads of this function. This namespace is defined here\n// for that purpose.\nnamespace asio_handler_cont_helpers {\n\ntemplate <typename Context>\ninline bool is_continuation(Context& context)\n{\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n  return false;\n#else\n  using asio::asio_handler_is_continuation;\n  return asio_handler_is_continuation(\n      asio::detail::addressof(context));\n#endif\n}\n\n} // namespace asio_handler_cont_helpers\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_invoke_helpers.hpp",
    "content": "//\n// detail/handler_invoke_helpers.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP\n#define ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/handler_invoke_hook.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\n// Calls to asio_handler_invoke must be made from a namespace that does not\n// contain overloads of this function. The asio_handler_invoke_helpers\n// namespace is defined here for that purpose.\nnamespace asio_handler_invoke_helpers {\n\ntemplate <typename Function, typename Context>\ninline void invoke(Function& function, Context& context)\n{\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n  Function tmp(function);\n  tmp();\n#else\n  using asio::asio_handler_invoke;\n  asio_handler_invoke(function, asio::detail::addressof(context));\n#endif\n}\n\ntemplate <typename Function, typename Context>\ninline void invoke(const Function& function, Context& context)\n{\n#if !defined(ASIO_HAS_HANDLER_HOOKS)\n  Function tmp(function);\n  tmp();\n#else\n  using asio::asio_handler_invoke;\n  asio_handler_invoke(function, asio::detail::addressof(context));\n#endif\n}\n\n} // namespace asio_handler_invoke_helpers\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_tracking.hpp",
    "content": "//\n// detail/handler_tracking.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_TRACKING_HPP\n#define ASIO_DETAIL_HANDLER_TRACKING_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\nnamespace asio {\n\nclass execution_context;\n\n} // namespace asio\n\n#if defined(ASIO_CUSTOM_HANDLER_TRACKING)\n# include ASIO_CUSTOM_HANDLER_TRACKING\n#elif defined(ASIO_ENABLE_HANDLER_TRACKING)\n# include \"asio/error_code.hpp\"\n# include \"asio/detail/cstdint.hpp\"\n# include \"asio/detail/static_mutex.hpp\"\n# include \"asio/detail/tss_ptr.hpp\"\n#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_CUSTOM_HANDLER_TRACKING)\n\n// The user-specified header must define the following macros:\n// - ASIO_INHERIT_TRACKED_HANDLER\n// - ASIO_ALSO_INHERIT_TRACKED_HANDLER\n// - ASIO_HANDLER_TRACKING_INIT\n// - ASIO_HANDLER_CREATION(args)\n// - ASIO_HANDLER_COMPLETION(args)\n// - ASIO_HANDLER_INVOCATION_BEGIN(args)\n// - ASIO_HANDLER_INVOCATION_END\n// - ASIO_HANDLER_OPERATION(args)\n// - ASIO_HANDLER_REACTOR_REGISTRATION(args)\n// - ASIO_HANDLER_REACTOR_DEREGISTRATION(args)\n// - ASIO_HANDLER_REACTOR_READ_EVENT\n// - ASIO_HANDLER_REACTOR_WRITE_EVENT\n// - ASIO_HANDLER_REACTOR_ERROR_EVENT\n// - ASIO_HANDLER_REACTOR_EVENTS(args)\n// - ASIO_HANDLER_REACTOR_OPERATION(args)\n\n# if !defined(ASIO_ENABLE_HANDLER_TRACKING)\n#  define ASIO_ENABLE_HANDLER_TRACKING 1\n# endif /// !defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n#elif defined(ASIO_ENABLE_HANDLER_TRACKING)\n\nclass handler_tracking\n{\npublic:\n  class completion;\n\n  // Base class for objects containing tracked handlers.\n  class tracked_handler\n  {\n  private:\n    // Only the handler_tracking class will have access to the id.\n    friend class handler_tracking;\n    friend class completion;\n    uint64_t id_;\n\n  protected:\n    // Constructor initialises with no id.\n    tracked_handler() : id_(0) {}\n\n    // Prevent deletion through this type.\n    ~tracked_handler() {}\n  };\n\n  // Initialise the tracking system.\n  ASIO_DECL static void init();\n\n  // Record the creation of a tracked handler.\n  ASIO_DECL static void creation(\n      execution_context& context, tracked_handler& h,\n      const char* object_type, void* object,\n      uintmax_t native_handle, const char* op_name);\n\n  class completion\n  {\n  public:\n    // Constructor records that handler is to be invoked with no arguments.\n    ASIO_DECL explicit completion(const tracked_handler& h);\n\n    // Destructor records only when an exception is thrown from the handler, or\n    // if the memory is being freed without the handler having been invoked.\n    ASIO_DECL ~completion();\n\n    // Records that handler is to be invoked with no arguments.\n    ASIO_DECL void invocation_begin();\n\n    // Records that handler is to be invoked with one arguments.\n    ASIO_DECL void invocation_begin(const asio::error_code& ec);\n\n    // Constructor records that handler is to be invoked with two arguments.\n    ASIO_DECL void invocation_begin(\n        const asio::error_code& ec, std::size_t bytes_transferred);\n\n    // Constructor records that handler is to be invoked with two arguments.\n    ASIO_DECL void invocation_begin(\n        const asio::error_code& ec, int signal_number);\n\n    // Constructor records that handler is to be invoked with two arguments.\n    ASIO_DECL void invocation_begin(\n        const asio::error_code& ec, const char* arg);\n\n    // Record that handler invocation has ended.\n    ASIO_DECL void invocation_end();\n\n  private:\n    friend class handler_tracking;\n    uint64_t id_;\n    bool invoked_;\n    completion* next_;\n  };\n\n  // Record an operation that is not directly associated with a handler.\n  ASIO_DECL static void operation(execution_context& context,\n      const char* object_type, void* object,\n      uintmax_t native_handle, const char* op_name);\n\n  // Record that a descriptor has been registered with the reactor.\n  ASIO_DECL static void reactor_registration(execution_context& context,\n      uintmax_t native_handle, uintmax_t registration);\n\n  // Record that a descriptor has been deregistered from the reactor.\n  ASIO_DECL static void reactor_deregistration(execution_context& context,\n      uintmax_t native_handle, uintmax_t registration);\n\n  // Record a reactor-based operation that is associated with a handler.\n  ASIO_DECL static void reactor_events(execution_context& context,\n      uintmax_t registration, unsigned events);\n\n  // Record a reactor-based operation that is associated with a handler.\n  ASIO_DECL static void reactor_operation(\n      const tracked_handler& h, const char* op_name,\n      const asio::error_code& ec);\n\n  // Record a reactor-based operation that is associated with a handler.\n  ASIO_DECL static void reactor_operation(\n      const tracked_handler& h, const char* op_name,\n      const asio::error_code& ec, std::size_t bytes_transferred);\n\n  // Write a line of output.\n  ASIO_DECL static void write_line(const char* format, ...);\n\nprivate:\n  struct tracking_state;\n  ASIO_DECL static tracking_state* get_state();\n};\n\n# define ASIO_INHERIT_TRACKED_HANDLER \\\n  : public asio::detail::handler_tracking::tracked_handler\n\n# define ASIO_ALSO_INHERIT_TRACKED_HANDLER \\\n  , public asio::detail::handler_tracking::tracked_handler\n\n# define ASIO_HANDLER_TRACKING_INIT \\\n  asio::detail::handler_tracking::init()\n\n# define ASIO_HANDLER_CREATION(args) \\\n  asio::detail::handler_tracking::creation args\n\n# define ASIO_HANDLER_COMPLETION(args) \\\n  asio::detail::handler_tracking::completion tracked_completion args\n\n# define ASIO_HANDLER_INVOCATION_BEGIN(args) \\\n  tracked_completion.invocation_begin args\n\n# define ASIO_HANDLER_INVOCATION_END \\\n  tracked_completion.invocation_end()\n\n# define ASIO_HANDLER_OPERATION(args) \\\n  asio::detail::handler_tracking::operation args\n\n# define ASIO_HANDLER_REACTOR_REGISTRATION(args) \\\n  asio::detail::handler_tracking::reactor_registration args\n\n# define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) \\\n  asio::detail::handler_tracking::reactor_deregistration args\n\n# define ASIO_HANDLER_REACTOR_READ_EVENT 1\n# define ASIO_HANDLER_REACTOR_WRITE_EVENT 2\n# define ASIO_HANDLER_REACTOR_ERROR_EVENT 4\n\n# define ASIO_HANDLER_REACTOR_EVENTS(args) \\\n  asio::detail::handler_tracking::reactor_events args\n\n# define ASIO_HANDLER_REACTOR_OPERATION(args) \\\n  asio::detail::handler_tracking::reactor_operation args\n\n#else // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n# define ASIO_INHERIT_TRACKED_HANDLER\n# define ASIO_ALSO_INHERIT_TRACKED_HANDLER\n# define ASIO_HANDLER_TRACKING_INIT (void)0\n# define ASIO_HANDLER_CREATION(args) (void)0\n# define ASIO_HANDLER_COMPLETION(args) (void)0\n# define ASIO_HANDLER_INVOCATION_BEGIN(args) (void)0\n# define ASIO_HANDLER_INVOCATION_END (void)0\n# define ASIO_HANDLER_OPERATION(args) (void)0\n# define ASIO_HANDLER_REACTOR_REGISTRATION(args) (void)0\n# define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) (void)0\n# define ASIO_HANDLER_REACTOR_READ_EVENT 0\n# define ASIO_HANDLER_REACTOR_WRITE_EVENT 0\n# define ASIO_HANDLER_REACTOR_ERROR_EVENT 0\n# define ASIO_HANDLER_REACTOR_EVENTS(args) (void)0\n# define ASIO_HANDLER_REACTOR_OPERATION(args) (void)0\n\n#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/handler_tracking.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_HANDLER_TRACKING_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_type_requirements.hpp",
    "content": "//\n// detail/handler_type_requirements.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP\n#define ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n// Older versions of gcc have difficulty compiling the sizeof expressions where\n// we test the handler type requirements. We'll disable checking of handler type\n// requirements for those compilers, but otherwise enable it by default.\n#if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)\n# if !defined(__GNUC__) || (__GNUC__ >= 4)\n#  define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS 1\n# endif // !defined(__GNUC__) || (__GNUC__ >= 4)\n#endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)\n\n// With C++0x we can use a combination of enhanced SFINAE and static_assert to\n// generate better template error messages. As this technique is not yet widely\n// portable, we'll only enable it for tested compilers.\n#if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)\n# if defined(__GNUC__)\n#  if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n#   if defined(__GXX_EXPERIMENTAL_CXX0X__)\n#    define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1\n#   endif // defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)\n# endif // defined(__GNUC__)\n# if defined(ASIO_MSVC)\n#  if (_MSC_VER >= 1600)\n#   define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1\n#  endif // (_MSC_VER >= 1600)\n# endif // defined(ASIO_MSVC)\n# if defined(__clang__)\n#  if __has_feature(__cxx_static_assert__)\n#   define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1\n#  endif // __has_feature(cxx_static_assert)\n# endif // defined(__clang__)\n#endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)\n\n#if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)\n# include \"asio/async_result.hpp\"\n#endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)\n\n# if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)\n\ntemplate <typename Handler>\nauto zero_arg_copyable_handler_test(Handler h, void*)\n  -> decltype(\n    sizeof(Handler(static_cast<const Handler&>(h))),\n    ((h)()),\n    char(0));\n\ntemplate <typename Handler>\nchar (&zero_arg_copyable_handler_test(Handler, ...))[2];\n\ntemplate <typename Handler, typename Arg1>\nauto one_arg_handler_test(Handler h, Arg1* a1)\n  -> decltype(\n    sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))),\n    ((h)(*a1)),\n    char(0));\n\ntemplate <typename Handler>\nchar (&one_arg_handler_test(Handler h, ...))[2];\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\nauto two_arg_handler_test(Handler h, Arg1* a1, Arg2* a2)\n  -> decltype(\n    sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))),\n    ((h)(*a1, *a2)),\n    char(0));\n\ntemplate <typename Handler>\nchar (&two_arg_handler_test(Handler, ...))[2];\n\ntemplate <typename Handler, typename Arg1, typename Arg2>\nauto two_arg_move_handler_test(Handler h, Arg1* a1, Arg2* a2)\n  -> decltype(\n    sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))),\n    ((h)(*a1, ASIO_MOVE_CAST(Arg2)(*a2))),\n    char(0));\n\ntemplate <typename Handler>\nchar (&two_arg_move_handler_test(Handler, ...))[2];\n\n#  define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) \\\n     static_assert(expr, msg);\n\n# else // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)\n\n#  define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg)\n\n# endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)\n\ntemplate <typename T> T& lvref();\ntemplate <typename T> T& lvref(T);\ntemplate <typename T> const T& clvref();\ntemplate <typename T> const T& clvref(T);\n#if defined(ASIO_HAS_MOVE)\ntemplate <typename T> T rvref();\ntemplate <typename T> T rvref(T);\n#else // defined(ASIO_HAS_MOVE)\ntemplate <typename T> const T& rvref();\ntemplate <typename T> const T& rvref(T);\n#endif // defined(ASIO_HAS_MOVE)\ntemplate <typename T> char argbyv(T);\n\ntemplate <int>\nstruct handler_type_requirements\n{\n};\n\n#define ASIO_LEGACY_COMPLETION_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void()) asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::zero_arg_copyable_handler_test( \\\n          asio::detail::clvref< \\\n            asio_true_handler_type>(), 0)) == 1, \\\n      \"CompletionHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::clvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()(), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_READ_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, std::size_t)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const std::size_t*>(0))) == 1, \\\n      \"ReadHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const std::size_t>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_WRITE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, std::size_t)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const std::size_t*>(0))) == 1, \\\n      \"WriteHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const std::size_t>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_ACCEPT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::one_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0))) == 1, \\\n      \"AcceptHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \\\n    handler_type, handler, socket_type) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, socket_type)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_move_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<socket_type*>(0))) == 1, \\\n      \"MoveAcceptHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::rvref<socket_type>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::one_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0))) == 1, \\\n      \"ConnectHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_RANGE_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler, endpoint_type) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, endpoint_type)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const endpoint_type*>(0))) == 1, \\\n      \"RangeConnectHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const endpoint_type>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler, iter_type) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, iter_type)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const iter_type*>(0))) == 1, \\\n      \"IteratorConnectHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const iter_type>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_RESOLVE_HANDLER_CHECK( \\\n    handler_type, handler, range_type) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, range_type)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const range_type*>(0))) == 1, \\\n      \"ResolveHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const range_type>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_WAIT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::one_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0))) == 1, \\\n      \"WaitHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_SIGNAL_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, int)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const int*>(0))) == 1, \\\n      \"SignalHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>(), \\\n            asio::detail::lvref<const int>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_HANDSHAKE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::one_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0))) == 1, \\\n      \"HandshakeHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code, std::size_t)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::two_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0), \\\n          static_cast<const std::size_t*>(0))) == 1, \\\n      \"BufferedHandshakeHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n          asio::detail::lvref<const asio::error_code>(), \\\n          asio::detail::lvref<const std::size_t>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#define ASIO_SHUTDOWN_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  \\\n  typedef ASIO_HANDLER_TYPE(handler_type, \\\n      void(asio::error_code)) \\\n    asio_true_handler_type; \\\n  \\\n  ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \\\n      sizeof(asio::detail::one_arg_handler_test( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>(), \\\n          static_cast<const asio::error_code*>(0))) == 1, \\\n      \"ShutdownHandler type requirements not met\") \\\n  \\\n  typedef asio::detail::handler_type_requirements< \\\n      sizeof( \\\n        asio::detail::argbyv( \\\n          asio::detail::rvref< \\\n            asio_true_handler_type>())) + \\\n      sizeof( \\\n        asio::detail::lvref< \\\n          asio_true_handler_type>()( \\\n            asio::detail::lvref<const asio::error_code>()), \\\n        char(0))> ASIO_UNUSED_TYPEDEF\n\n#else // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)\n\n#define ASIO_LEGACY_COMPLETION_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_READ_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_WRITE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_ACCEPT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \\\n    handler_type, handler, socket_type) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_RANGE_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler, iter_type) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \\\n    handler_type, handler, iter_type) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_RESOLVE_HANDLER_CHECK( \\\n    handler_type, handler, iter_type) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_WAIT_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_SIGNAL_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_HANDSHAKE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#define ASIO_SHUTDOWN_HANDLER_CHECK( \\\n    handler_type, handler) \\\n  typedef int ASIO_UNUSED_TYPEDEF\n\n#endif // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/handler_work.hpp",
    "content": "//\n// detail/handler_work.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HANDLER_WORK_HPP\n#define ASIO_DETAIL_HANDLER_WORK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// A helper class template to allow completion handlers to be dispatched\n// through either the new executors framework or the old invocaton hook. The\n// primary template uses the new executors framework.\ntemplate <typename Handler,\n    typename IoExecutor = system_executor, typename HandlerExecutor\n      = typename associated_executor<Handler, IoExecutor>::type>\nclass handler_work\n{\npublic:\n  explicit handler_work(Handler& handler) ASIO_NOEXCEPT\n    : io_executor_(),\n      executor_(asio::get_associated_executor(handler, io_executor_))\n  {\n  }\n\n  handler_work(Handler& handler, const IoExecutor& io_ex) ASIO_NOEXCEPT\n    : io_executor_(io_ex),\n      executor_(asio::get_associated_executor(handler, io_executor_))\n  {\n  }\n\n  static void start(Handler& handler) ASIO_NOEXCEPT\n  {\n    HandlerExecutor ex(asio::get_associated_executor(handler));\n    ex.on_work_started();\n  }\n\n  static void start(Handler& handler,\n      const IoExecutor& io_ex) ASIO_NOEXCEPT\n  {\n    HandlerExecutor ex(asio::get_associated_executor(handler, io_ex));\n    ex.on_work_started();\n    io_ex.on_work_started();\n  }\n\n  ~handler_work()\n  {\n    io_executor_.on_work_finished();\n    executor_.on_work_finished();\n  }\n\n  template <typename Function>\n  void complete(Function& function, Handler& handler)\n  {\n    executor_.dispatch(ASIO_MOVE_CAST(Function)(function),\n        asio::get_associated_allocator(handler));\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  handler_work(const handler_work&);\n  handler_work& operator=(const handler_work&);\n\n  IoExecutor io_executor_;\n  HandlerExecutor executor_;\n};\n\n// This specialisation dispatches a handler through the old invocation hook.\n// The specialisation is not strictly required for correctness, as the\n// system_executor will dispatch through the hook anyway. However, by doing\n// this we avoid an extra copy of the handler.\ntemplate <typename Handler>\nclass handler_work<Handler, system_executor, system_executor>\n{\npublic:\n  explicit handler_work(Handler&) ASIO_NOEXCEPT {}\n  static void start(Handler&) ASIO_NOEXCEPT {}\n  ~handler_work() {}\n\n  template <typename Function>\n  void complete(Function& function, Handler& handler)\n  {\n    asio_handler_invoke_helpers::invoke(function, handler);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  handler_work(const handler_work&);\n  handler_work& operator=(const handler_work&);\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_HANDLER_WORK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/hash_map.hpp",
    "content": "//\n// detail/hash_map.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_HASH_MAP_HPP\n#define ASIO_DETAIL_HASH_MAP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <list>\n#include <utility>\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# include \"asio/detail/socket_types.hpp\"\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ninline std::size_t calculate_hash_value(int i)\n{\n  return static_cast<std::size_t>(i);\n}\n\ninline std::size_t calculate_hash_value(void* p)\n{\n  return reinterpret_cast<std::size_t>(p)\n    + (reinterpret_cast<std::size_t>(p) >> 3);\n}\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ninline std::size_t calculate_hash_value(SOCKET s)\n{\n  return static_cast<std::size_t>(s);\n}\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n// Note: assumes K and V are POD types.\ntemplate <typename K, typename V>\nclass hash_map\n  : private noncopyable\n{\npublic:\n  // The type of a value in the map.\n  typedef std::pair<K, V> value_type;\n\n  // The type of a non-const iterator over the hash map.\n  typedef typename std::list<value_type>::iterator iterator;\n\n  // The type of a const iterator over the hash map.\n  typedef typename std::list<value_type>::const_iterator const_iterator;\n\n  // Constructor.\n  hash_map()\n    : size_(0),\n      buckets_(0),\n      num_buckets_(0)\n  {\n  }\n\n  // Destructor.\n  ~hash_map()\n  {\n    delete[] buckets_;\n  }\n\n  // Get an iterator for the beginning of the map.\n  iterator begin()\n  {\n    return values_.begin();\n  }\n\n  // Get an iterator for the beginning of the map.\n  const_iterator begin() const\n  {\n    return values_.begin();\n  }\n\n  // Get an iterator for the end of the map.\n  iterator end()\n  {\n    return values_.end();\n  }\n\n  // Get an iterator for the end of the map.\n  const_iterator end() const\n  {\n    return values_.end();\n  }\n\n  // Check whether the map is empty.\n  bool empty() const\n  {\n    return values_.empty();\n  }\n\n  // Find an entry in the map.\n  iterator find(const K& k)\n  {\n    if (num_buckets_)\n    {\n      size_t bucket = calculate_hash_value(k) % num_buckets_;\n      iterator it = buckets_[bucket].first;\n      if (it == values_.end())\n        return values_.end();\n      iterator end_it = buckets_[bucket].last;\n      ++end_it;\n      while (it != end_it)\n      {\n        if (it->first == k)\n          return it;\n        ++it;\n      }\n    }\n    return values_.end();\n  }\n\n  // Find an entry in the map.\n  const_iterator find(const K& k) const\n  {\n    if (num_buckets_)\n    {\n      size_t bucket = calculate_hash_value(k) % num_buckets_;\n      const_iterator it = buckets_[bucket].first;\n      if (it == values_.end())\n        return it;\n      const_iterator end_it = buckets_[bucket].last;\n      ++end_it;\n      while (it != end_it)\n      {\n        if (it->first == k)\n          return it;\n        ++it;\n      }\n    }\n    return values_.end();\n  }\n\n  // Insert a new entry into the map.\n  std::pair<iterator, bool> insert(const value_type& v)\n  {\n    if (size_ + 1 >= num_buckets_)\n      rehash(hash_size(size_ + 1));\n    size_t bucket = calculate_hash_value(v.first) % num_buckets_;\n    iterator it = buckets_[bucket].first;\n    if (it == values_.end())\n    {\n      buckets_[bucket].first = buckets_[bucket].last =\n        values_insert(values_.end(), v);\n      ++size_;\n      return std::pair<iterator, bool>(buckets_[bucket].last, true);\n    }\n    iterator end_it = buckets_[bucket].last;\n    ++end_it;\n    while (it != end_it)\n    {\n      if (it->first == v.first)\n        return std::pair<iterator, bool>(it, false);\n      ++it;\n    }\n    buckets_[bucket].last = values_insert(end_it, v);\n    ++size_;\n    return std::pair<iterator, bool>(buckets_[bucket].last, true);\n  }\n\n  // Erase an entry from the map.\n  void erase(iterator it)\n  {\n    ASIO_ASSERT(it != values_.end());\n    ASIO_ASSERT(num_buckets_ != 0);\n\n    size_t bucket = calculate_hash_value(it->first) % num_buckets_;\n    bool is_first = (it == buckets_[bucket].first);\n    bool is_last = (it == buckets_[bucket].last);\n    if (is_first && is_last)\n      buckets_[bucket].first = buckets_[bucket].last = values_.end();\n    else if (is_first)\n      ++buckets_[bucket].first;\n    else if (is_last)\n      --buckets_[bucket].last;\n\n    values_erase(it);\n    --size_;\n  }\n\n  // Erase a key from the map.\n  void erase(const K& k)\n  {\n    iterator it = find(k);\n    if (it != values_.end())\n      erase(it);\n  }\n\n  // Remove all entries from the map.\n  void clear()\n  {\n    // Clear the values.\n    values_.clear();\n    size_ = 0;\n\n    // Initialise all buckets to empty.\n    iterator end_it = values_.end();\n    for (size_t i = 0; i < num_buckets_; ++i)\n      buckets_[i].first = buckets_[i].last = end_it;\n  }\n\nprivate:\n  // Calculate the hash size for the specified number of elements.\n  static std::size_t hash_size(std::size_t num_elems)\n  {\n    static std::size_t sizes[] =\n    {\n#if defined(ASIO_HASH_MAP_BUCKETS)\n      ASIO_HASH_MAP_BUCKETS\n#else // ASIO_HASH_MAP_BUCKETS\n      3, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593,\n      49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469,\n      12582917, 25165843\n#endif // ASIO_HASH_MAP_BUCKETS\n    };\n    const std::size_t nth_size = sizeof(sizes) / sizeof(std::size_t) - 1;\n    for (std::size_t i = 0; i < nth_size; ++i)\n      if (num_elems < sizes[i])\n        return sizes[i];\n    return sizes[nth_size];\n  }\n\n  // Re-initialise the hash from the values already contained in the list.\n  void rehash(std::size_t num_buckets)\n  {\n    if (num_buckets == num_buckets_)\n      return;\n    ASIO_ASSERT(num_buckets != 0);\n\n    iterator end_iter = values_.end();\n\n    // Update number of buckets and initialise all buckets to empty.\n    bucket_type* tmp = new bucket_type[num_buckets];\n    delete[] buckets_;\n    buckets_ = tmp;\n    num_buckets_ = num_buckets;\n    for (std::size_t i = 0; i < num_buckets_; ++i)\n      buckets_[i].first = buckets_[i].last = end_iter;\n\n    // Put all values back into the hash.\n    iterator iter = values_.begin();\n    while (iter != end_iter)\n    {\n      std::size_t bucket = calculate_hash_value(iter->first) % num_buckets_;\n      if (buckets_[bucket].last == end_iter)\n      {\n        buckets_[bucket].first = buckets_[bucket].last = iter++;\n      }\n      else if (++buckets_[bucket].last == iter)\n      {\n        ++iter;\n      }\n      else\n      {\n        values_.splice(buckets_[bucket].last, values_, iter++);\n        --buckets_[bucket].last;\n      }\n    }\n  }\n\n  // Insert an element into the values list by splicing from the spares list,\n  // if a spare is available, and otherwise by inserting a new element.\n  iterator values_insert(iterator it, const value_type& v)\n  {\n    if (spares_.empty())\n    {\n      return values_.insert(it, v);\n    }\n    else\n    {\n      spares_.front() = v;\n      values_.splice(it, spares_, spares_.begin());\n      return --it;\n    }\n  }\n\n  // Erase an element from the values list by splicing it to the spares list.\n  void values_erase(iterator it)\n  {\n    *it = value_type();\n    spares_.splice(spares_.begin(), values_, it);\n  }\n\n  // The number of elements in the hash.\n  std::size_t size_;\n\n  // The list of all values in the hash map.\n  std::list<value_type> values_;\n\n  // The list of spare nodes waiting to be recycled. Assumes that POD types only\n  // are stored in the hash map.\n  std::list<value_type> spares_;\n\n  // The type for a bucket in the hash table.\n  struct bucket_type\n  {\n    iterator first;\n    iterator last;\n  };\n\n  // The buckets in the hash.\n  bucket_type* buckets_;\n\n  // The number of buckets in the hash.\n  std::size_t num_buckets_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_HASH_MAP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/buffer_sequence_adapter.ipp",
    "content": "//\n// detail/impl/buffer_sequence_adapter.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP\n#define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include <robuffer.h>\n#include <windows.storage.streams.h>\n#include <wrl/implements.h>\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass winrt_buffer_impl :\n  public Microsoft::WRL::RuntimeClass<\n    Microsoft::WRL::RuntimeClassFlags<\n      Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>,\n    ABI::Windows::Storage::Streams::IBuffer,\n    Windows::Storage::Streams::IBufferByteAccess>\n{\npublic:\n  explicit winrt_buffer_impl(const asio::const_buffer& b)\n  {\n    bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data()));\n    length_ = b.size();\n    capacity_ = b.size();\n  }\n\n  explicit winrt_buffer_impl(const asio::mutable_buffer& b)\n  {\n    bytes_ = static_cast<byte*>(b.data());\n    length_ = 0;\n    capacity_ = b.size();\n  }\n\n  ~winrt_buffer_impl()\n  {\n  }\n\n  STDMETHODIMP Buffer(byte** value)\n  {\n    *value = bytes_;\n    return S_OK;\n  }\n\n  STDMETHODIMP get_Capacity(UINT32* value)\n  {\n    *value = capacity_;\n    return S_OK;\n  }\n\n  STDMETHODIMP get_Length(UINT32 *value)\n  {\n    *value = length_;\n    return S_OK;\n  }\n\n  STDMETHODIMP put_Length(UINT32 value)\n  {\n    if (value > capacity_)\n      return E_INVALIDARG;\n    length_ = value;\n    return S_OK;\n  }\n\nprivate:\n  byte* bytes_;\n  UINT32 length_;\n  UINT32 capacity_;\n};\n\nvoid buffer_sequence_adapter_base::init_native_buffer(\n    buffer_sequence_adapter_base::native_buffer_type& buf,\n    const asio::mutable_buffer& buffer)\n{\n  std::memset(&buf, 0, sizeof(native_buffer_type));\n  Microsoft::WRL::ComPtr<IInspectable> insp\n    = Microsoft::WRL::Make<winrt_buffer_impl>(buffer);\n  buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());\n}\n\nvoid buffer_sequence_adapter_base::init_native_buffer(\n    buffer_sequence_adapter_base::native_buffer_type& buf,\n    const asio::const_buffer& buffer)\n{\n  std::memset(&buf, 0, sizeof(native_buffer_type));\n  Microsoft::WRL::ComPtr<IInspectable> insp\n    = Microsoft::WRL::Make<winrt_buffer_impl>(buffer);\n  Platform::Object^ buf_obj = reinterpret_cast<Platform::Object^>(insp.Get());\n  buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/descriptor_ops.ipp",
    "content": "//\n// detail/impl/descriptor_ops.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP\n#define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cerrno>\n#include \"asio/detail/descriptor_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace descriptor_ops {\n\nint open(const char* path, int flags, asio::error_code& ec)\n{\n  errno = 0;\n  int result = error_wrapper(::open(path, flags), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint close(int d, state_type& state, asio::error_code& ec)\n{\n  int result = 0;\n  if (d != -1)\n  {\n    errno = 0;\n    result = error_wrapper(::close(d), ec);\n\n    if (result != 0\n        && (ec == asio::error::would_block\n          || ec == asio::error::try_again))\n    {\n      // According to UNIX Network Programming Vol. 1, it is possible for\n      // close() to fail with EWOULDBLOCK under certain circumstances. What\n      // isn't clear is the state of the descriptor after this error. The one\n      // current OS where this behaviour is seen, Windows, says that the socket\n      // remains open. Therefore we'll put the descriptor back into blocking\n      // mode and have another attempt at closing it.\n#if defined(__SYMBIAN32__)\n      int flags = ::fcntl(d, F_GETFL, 0);\n      if (flags >= 0)\n        ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);\n#else // defined(__SYMBIAN32__)\n      ioctl_arg_type arg = 0;\n      ::ioctl(d, FIONBIO, &arg);\n#endif // defined(__SYMBIAN32__)\n      state &= ~non_blocking;\n\n      errno = 0;\n      result = error_wrapper(::close(d), ec);\n    }\n  }\n\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\nbool set_user_non_blocking(int d, state_type& state,\n    bool value, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return false;\n  }\n\n  errno = 0;\n#if defined(__SYMBIAN32__)\n  int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);\n  if (result >= 0)\n  {\n    errno = 0;\n    int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));\n    result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);\n  }\n#else // defined(__SYMBIAN32__)\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);\n#endif // defined(__SYMBIAN32__)\n\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n    if (value)\n      state |= user_set_non_blocking;\n    else\n    {\n      // Clearing the user-set non-blocking mode always overrides any\n      // internally-set non-blocking flag. Any subsequent asynchronous\n      // operations will need to re-enable non-blocking I/O.\n      state &= ~(user_set_non_blocking | internal_non_blocking);\n    }\n    return true;\n  }\n\n  return false;\n}\n\nbool set_internal_non_blocking(int d, state_type& state,\n    bool value, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return false;\n  }\n\n  if (!value && (state & user_set_non_blocking))\n  {\n    // It does not make sense to clear the internal non-blocking flag if the\n    // user still wants non-blocking behaviour. Return an error and let the\n    // caller figure out whether to update the user-set non-blocking flag.\n    ec = asio::error::invalid_argument;\n    return false;\n  }\n\n  errno = 0;\n#if defined(__SYMBIAN32__)\n  int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);\n  if (result >= 0)\n  {\n    errno = 0;\n    int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));\n    result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);\n  }\n#else // defined(__SYMBIAN32__)\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);\n#endif // defined(__SYMBIAN32__)\n\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n    if (value)\n      state |= internal_non_blocking;\n    else\n      state &= ~internal_non_blocking;\n    return true;\n  }\n\n  return false;\n}\n\nstd::size_t sync_read(int d, state_type state, buf* bufs,\n    std::size_t count, bool all_empty, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // A request to read 0 bytes on a stream is a no-op.\n  if (all_empty)\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  // Read some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    errno = 0;\n    signed_size_type bytes = error_wrapper(::readv(\n          d, bufs, static_cast<int>(count)), ec);\n\n    // Check if operation succeeded.\n    if (bytes > 0)\n      return bytes;\n\n    // Check for EOF.\n    if (bytes == 0)\n    {\n      ec = asio::error::eof;\n      return 0;\n    }\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for descriptor to become ready.\n    if (descriptor_ops::poll_read(d, 0, ec) < 0)\n      return 0;\n  }\n}\n\nbool non_blocking_read(int d, buf* bufs, std::size_t count,\n    asio::error_code& ec, std::size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Read some data.\n    errno = 0;\n    signed_size_type bytes = error_wrapper(::readv(\n          d, bufs, static_cast<int>(count)), ec);\n\n    // Check for end of stream.\n    if (bytes == 0)\n    {\n      ec = asio::error::eof;\n      return true;\n    }\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes > 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\nstd::size_t sync_write(int d, state_type state, const buf* bufs,\n    std::size_t count, bool all_empty, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // A request to write 0 bytes on a stream is a no-op.\n  if (all_empty)\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  // Write some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    errno = 0;\n    signed_size_type bytes = error_wrapper(::writev(\n          d, bufs, static_cast<int>(count)), ec);\n\n    // Check if operation succeeded.\n    if (bytes > 0)\n      return bytes;\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for descriptor to become ready.\n    if (descriptor_ops::poll_write(d, 0, ec) < 0)\n      return 0;\n  }\n}\n\nbool non_blocking_write(int d, const buf* bufs, std::size_t count,\n    asio::error_code& ec, std::size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Write some data.\n    errno = 0;\n    signed_size_type bytes = error_wrapper(::writev(\n          d, bufs, static_cast<int>(count)), ec);\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\nint ioctl(int d, state_type& state, long cmd,\n    ioctl_arg_type* arg, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  errno = 0;\n  int result = error_wrapper(::ioctl(d, cmd, arg), ec);\n\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n\n    // When updating the non-blocking mode we always perform the ioctl syscall,\n    // even if the flags would otherwise indicate that the descriptor is\n    // already in the correct state. This ensures that the underlying\n    // descriptor is put into the state that has been requested by the user. If\n    // the ioctl syscall was successful then we need to update the flags to\n    // match.\n    if (cmd == static_cast<long>(FIONBIO))\n    {\n      if (*arg)\n      {\n        state |= user_set_non_blocking;\n      }\n      else\n      {\n        // Clearing the non-blocking mode always overrides any internally-set\n        // non-blocking flag. Any subsequent asynchronous operations will need\n        // to re-enable non-blocking I/O.\n        state &= ~(user_set_non_blocking | internal_non_blocking);\n      }\n    }\n  }\n\n  return result;\n}\n\nint fcntl(int d, int cmd, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  errno = 0;\n  int result = error_wrapper(::fcntl(d, cmd), ec);\n  if (result != -1)\n    ec = asio::error_code();\n  return result;\n}\n\nint fcntl(int d, int cmd, long arg, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  errno = 0;\n  int result = error_wrapper(::fcntl(d, cmd, arg), ec);\n  if (result != -1)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_read(int d, state_type state, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  pollfd fds;\n  fds.fd = d;\n  fds.events = POLLIN;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : -1;\n  errno = 0;\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_write(int d, state_type state, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  pollfd fds;\n  fds.fd = d;\n  fds.events = POLLOUT;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : -1;\n  errno = 0;\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_error(int d, state_type state, asio::error_code& ec)\n{\n  if (d == -1)\n  {\n    ec = asio::error::bad_descriptor;\n    return -1;\n  }\n\n  pollfd fds;\n  fds.fd = d;\n  fds.events = POLLPRI | POLLERR | POLLHUP;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : -1;\n  errno = 0;\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\n} // namespace descriptor_ops\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/dev_poll_reactor.hpp",
    "content": "//\n// detail/impl/dev_poll_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP\n#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_DEV_POLL)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid dev_poll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid dev_poll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    scheduler_.post_immediate_completion(op, false);\n    return;\n  }\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  scheduler_.work_started();\n  if (earliest)\n    interrupter_.interrupt();\n}\n\ntemplate <typename Time_Traits>\nstd::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid dev_poll_reactor::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& target,\n    typename timer_queue<Time_Traits>::per_timer_data& source)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(target, ops);\n  queue.move_timer(target, source);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_DEV_POLL)\n\n#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/dev_poll_reactor.ipp",
    "content": "//\n// detail/impl/dev_poll_reactor.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP\n#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_DEV_POLL)\n\n#include \"asio/detail/dev_poll_reactor.hpp\"\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ndev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx)\n  : asio::detail::execution_context_service_base<dev_poll_reactor>(ctx),\n    scheduler_(use_service<scheduler>(ctx)),\n    mutex_(),\n    dev_poll_fd_(do_dev_poll_create()),\n    interrupter_(),\n    shutdown_(false)\n{\n  // Add the interrupter's descriptor to /dev/poll.\n  ::pollfd ev = { 0, 0, 0 };\n  ev.fd = interrupter_.read_descriptor();\n  ev.events = POLLIN | POLLERR;\n  ev.revents = 0;\n  ::write(dev_poll_fd_, &ev, sizeof(ev));\n}\n\ndev_poll_reactor::~dev_poll_reactor()\n{\n  shutdown();\n  ::close(dev_poll_fd_);\n}\n\nvoid dev_poll_reactor::shutdown()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n  lock.unlock();\n\n  op_queue<operation> ops;\n\n  for (int i = 0; i < max_ops; ++i)\n    op_queue_[i].get_all_operations(ops);\n\n  timer_queues_.get_all_timers(ops);\n\n  scheduler_.abandon_operations(ops);\n} \n\nvoid dev_poll_reactor::notify_fork(\n    asio::execution_context::fork_event fork_ev)\n{\n  if (fork_ev == asio::execution_context::fork_child)\n  {\n    detail::mutex::scoped_lock lock(mutex_);\n\n    if (dev_poll_fd_ != -1)\n      ::close(dev_poll_fd_);\n    dev_poll_fd_ = -1;\n    dev_poll_fd_ = do_dev_poll_create();\n\n    interrupter_.recreate();\n\n    // Add the interrupter's descriptor to /dev/poll.\n    ::pollfd ev = { 0, 0, 0 };\n    ev.fd = interrupter_.read_descriptor();\n    ev.events = POLLIN | POLLERR;\n    ev.revents = 0;\n    ::write(dev_poll_fd_, &ev, sizeof(ev));\n\n    // Re-register all descriptors with /dev/poll. The changes will be written\n    // to the /dev/poll descriptor the next time the reactor is run.\n    for (int i = 0; i < max_ops; ++i)\n    {\n      reactor_op_queue<socket_type>::iterator iter = op_queue_[i].begin();\n      reactor_op_queue<socket_type>::iterator end = op_queue_[i].end();\n      for (; iter != end; ++iter)\n      {\n        ::pollfd& pending_ev = add_pending_event_change(iter->first);\n        pending_ev.events |= POLLERR | POLLHUP;\n        switch (i)\n        {\n        case read_op: pending_ev.events |= POLLIN; break;\n        case write_op: pending_ev.events |= POLLOUT; break;\n        case except_op: pending_ev.events |= POLLPRI; break;\n        default: break;\n        }\n      }\n    }\n    interrupter_.interrupt();\n  }\n}\n\nvoid dev_poll_reactor::init_task()\n{\n  scheduler_.init_task();\n}\n\nint dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)\n{\n  return 0;\n}\n\nint dev_poll_reactor::register_internal_descriptor(int op_type,\n    socket_type descriptor, per_descriptor_data&, reactor_op* op)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  op_queue_[op_type].enqueue_operation(descriptor, op);\n  ::pollfd& ev = add_pending_event_change(descriptor);\n  ev.events = POLLERR | POLLHUP;\n  switch (op_type)\n  {\n  case read_op: ev.events |= POLLIN; break;\n  case write_op: ev.events |= POLLOUT; break;\n  case except_op: ev.events |= POLLPRI; break;\n  default: break;\n  }\n  interrupter_.interrupt();\n\n  return 0;\n}\n\nvoid dev_poll_reactor::move_descriptor(socket_type,\n    dev_poll_reactor::per_descriptor_data&,\n    dev_poll_reactor::per_descriptor_data&)\n{\n}\n\nvoid dev_poll_reactor::start_op(int op_type, socket_type descriptor,\n    dev_poll_reactor::per_descriptor_data&, reactor_op* op,\n    bool is_continuation, bool allow_speculative)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  if (allow_speculative)\n  {\n    if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor))\n    {\n      if (!op_queue_[op_type].has_operation(descriptor))\n      {\n        if (op->perform())\n        {\n          lock.unlock();\n          scheduler_.post_immediate_completion(op, is_continuation);\n          return;\n        }\n      }\n    }\n  }\n\n  bool first = op_queue_[op_type].enqueue_operation(descriptor, op);\n  scheduler_.work_started();\n  if (first)\n  {\n    ::pollfd& ev = add_pending_event_change(descriptor);\n    ev.events = POLLERR | POLLHUP;\n    if (op_type == read_op\n        || op_queue_[read_op].has_operation(descriptor))\n      ev.events |= POLLIN;\n    if (op_type == write_op\n        || op_queue_[write_op].has_operation(descriptor))\n      ev.events |= POLLOUT;\n    if (op_type == except_op\n        || op_queue_[except_op].has_operation(descriptor))\n      ev.events |= POLLPRI;\n    interrupter_.interrupt();\n  }\n}\n\nvoid dev_poll_reactor::cancel_ops(socket_type descriptor,\n    dev_poll_reactor::per_descriptor_data&)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  cancel_ops_unlocked(descriptor, asio::error::operation_aborted);\n}\n\nvoid dev_poll_reactor::deregister_descriptor(socket_type descriptor,\n    dev_poll_reactor::per_descriptor_data&, bool)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // Remove the descriptor from /dev/poll.\n  ::pollfd& ev = add_pending_event_change(descriptor);\n  ev.events = POLLREMOVE;\n  interrupter_.interrupt();\n\n  // Cancel any outstanding operations associated with the descriptor.\n  cancel_ops_unlocked(descriptor, asio::error::operation_aborted);\n}\n\nvoid dev_poll_reactor::deregister_internal_descriptor(\n    socket_type descriptor, dev_poll_reactor::per_descriptor_data&)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // Remove the descriptor from /dev/poll. Since this function is only called\n  // during a fork, we can apply the change immediately.\n  ::pollfd ev = { 0, 0, 0 };\n  ev.fd = descriptor;\n  ev.events = POLLREMOVE;\n  ev.revents = 0;\n  ::write(dev_poll_fd_, &ev, sizeof(ev));\n\n  // Destroy all operations associated with the descriptor.\n  op_queue<operation> ops;\n  asio::error_code ec;\n  for (int i = 0; i < max_ops; ++i)\n    op_queue_[i].cancel_operations(descriptor, ops, ec);\n}\n\nvoid dev_poll_reactor::cleanup_descriptor_data(\n    dev_poll_reactor::per_descriptor_data&)\n{\n}\n\nvoid dev_poll_reactor::run(long usec, op_queue<operation>& ops)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // We can return immediately if there's no work to do and the reactor is\n  // not supposed to block.\n  if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty()\n      && op_queue_[except_op].empty() && timer_queues_.all_empty())\n    return;\n\n  // Write the pending event registration changes to the /dev/poll descriptor.\n  std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size();\n  if (events_size > 0)\n  {\n    errno = 0;\n    int result = ::write(dev_poll_fd_,\n        &pending_event_changes_[0], events_size);\n    if (result != static_cast<int>(events_size))\n    {\n      asio::error_code ec = asio::error_code(\n          errno, asio::error::get_system_category());\n      for (std::size_t i = 0; i < pending_event_changes_.size(); ++i)\n      {\n        int descriptor = pending_event_changes_[i].fd;\n        for (int j = 0; j < max_ops; ++j)\n          op_queue_[j].cancel_operations(descriptor, ops, ec);\n      }\n    }\n    pending_event_changes_.clear();\n    pending_event_change_index_.clear();\n  }\n\n  // Calculate timeout.\n  int timeout;\n  if (usec == 0)\n    timeout = 0;\n  else\n  {\n    timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);\n    timeout = get_timeout(timeout);\n  }\n  lock.unlock();\n\n  // Block on the /dev/poll descriptor.\n  ::pollfd events[128] = { { 0, 0, 0 } };\n  ::dvpoll dp = { 0, 0, 0 };\n  dp.dp_fds = events;\n  dp.dp_nfds = 128;\n  dp.dp_timeout = timeout;\n  int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp);\n\n  lock.lock();\n\n  // Dispatch the waiting events.\n  for (int i = 0; i < num_events; ++i)\n  {\n    int descriptor = events[i].fd;\n    if (descriptor == interrupter_.read_descriptor())\n    {\n      interrupter_.reset();\n    }\n    else\n    {\n      bool more_reads = false;\n      bool more_writes = false;\n      bool more_except = false;\n\n      // Exception operations must be processed first to ensure that any\n      // out-of-band data is read before normal data.\n      if (events[i].events & (POLLPRI | POLLERR | POLLHUP))\n        more_except =\n          op_queue_[except_op].perform_operations(descriptor, ops);\n      else\n        more_except = op_queue_[except_op].has_operation(descriptor);\n\n      if (events[i].events & (POLLIN | POLLERR | POLLHUP))\n        more_reads = op_queue_[read_op].perform_operations(descriptor, ops);\n      else\n        more_reads = op_queue_[read_op].has_operation(descriptor);\n\n      if (events[i].events & (POLLOUT | POLLERR | POLLHUP))\n        more_writes = op_queue_[write_op].perform_operations(descriptor, ops);\n      else\n        more_writes = op_queue_[write_op].has_operation(descriptor);\n\n      if ((events[i].events & (POLLERR | POLLHUP)) != 0\n            && !more_except && !more_reads && !more_writes)\n      {\n        // If we have an event and no operations associated with the\n        // descriptor then we need to delete the descriptor from /dev/poll.\n        // The poll operation can produce POLLHUP or POLLERR events when there\n        // is no operation pending, so if we do not remove the descriptor we\n        // can end up in a tight polling loop.\n        ::pollfd ev = { 0, 0, 0 };\n        ev.fd = descriptor;\n        ev.events = POLLREMOVE;\n        ev.revents = 0;\n        ::write(dev_poll_fd_, &ev, sizeof(ev));\n      }\n      else\n      {\n        ::pollfd ev = { 0, 0, 0 };\n        ev.fd = descriptor;\n        ev.events = POLLERR | POLLHUP;\n        if (more_reads)\n          ev.events |= POLLIN;\n        if (more_writes)\n          ev.events |= POLLOUT;\n        if (more_except)\n          ev.events |= POLLPRI;\n        ev.revents = 0;\n        int result = ::write(dev_poll_fd_, &ev, sizeof(ev));\n        if (result != sizeof(ev))\n        {\n          asio::error_code ec(errno,\n              asio::error::get_system_category());\n          for (int j = 0; j < max_ops; ++j)\n            op_queue_[j].cancel_operations(descriptor, ops, ec);\n        }\n      }\n    }\n  }\n  timer_queues_.get_ready_timers(ops);\n}\n\nvoid dev_poll_reactor::interrupt()\n{\n  interrupter_.interrupt();\n}\n\nint dev_poll_reactor::do_dev_poll_create()\n{\n  int fd = ::open(\"/dev/poll\", O_RDWR);\n  if (fd == -1)\n  {\n    asio::error_code ec(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"/dev/poll\");\n  }\n  return fd;\n}\n\nvoid dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.insert(&queue);\n}\n\nvoid dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.erase(&queue);\n}\n\nint dev_poll_reactor::get_timeout(int msec)\n{\n  // By default we will wait no longer than 5 minutes. This will ensure that\n  // any changes to the system clock are detected after no longer than this.\n  const int max_msec = 5 * 60 * 1000;\n  return timer_queues_.wait_duration_msec(\n      (msec < 0 || max_msec < msec) ? max_msec : msec);\n}\n\nvoid dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,\n    const asio::error_code& ec)\n{\n  bool need_interrupt = false;\n  op_queue<operation> ops;\n  for (int i = 0; i < max_ops; ++i)\n    need_interrupt = op_queue_[i].cancel_operations(\n        descriptor, ops, ec) || need_interrupt;\n  scheduler_.post_deferred_completions(ops);\n  if (need_interrupt)\n    interrupter_.interrupt();\n}\n\n::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor)\n{\n  hash_map<int, std::size_t>::iterator iter\n    = pending_event_change_index_.find(descriptor);\n  if (iter == pending_event_change_index_.end())\n  {\n    std::size_t index = pending_event_changes_.size();\n    pending_event_changes_.reserve(pending_event_changes_.size() + 1);\n    pending_event_change_index_.insert(std::make_pair(descriptor, index));\n    pending_event_changes_.push_back(::pollfd());\n    pending_event_changes_[index].fd = descriptor;\n    pending_event_changes_[index].revents = 0;\n    return pending_event_changes_[index];\n  }\n  else\n  {\n    return pending_event_changes_[iter->second];\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_DEV_POLL)\n\n#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/epoll_reactor.hpp",
    "content": "//\n// detail/impl/epoll_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP\n#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if defined(ASIO_HAS_EPOLL)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid epoll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid epoll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    scheduler_.post_immediate_completion(op, false);\n    return;\n  }\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  scheduler_.work_started();\n  if (earliest)\n    update_timeout();\n}\n\ntemplate <typename Time_Traits>\nstd::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid epoll_reactor::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& target,\n    typename timer_queue<Time_Traits>::per_timer_data& source)\n{\n  mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(target, ops);\n  queue.move_timer(target, source);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_EPOLL)\n\n#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/epoll_reactor.ipp",
    "content": "//\n// detail/impl/epoll_reactor.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP\n#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_EPOLL)\n\n#include <cstddef>\n#include <sys/epoll.h>\n#include \"asio/detail/epoll_reactor.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#if defined(ASIO_HAS_TIMERFD)\n# include <sys/timerfd.h>\n#endif // defined(ASIO_HAS_TIMERFD)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nepoll_reactor::epoll_reactor(asio::execution_context& ctx)\n  : execution_context_service_base<epoll_reactor>(ctx),\n    scheduler_(use_service<scheduler>(ctx)),\n    mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(\n          REACTOR_REGISTRATION, scheduler_.concurrency_hint())),\n    interrupter_(),\n    epoll_fd_(do_epoll_create()),\n    timer_fd_(do_timerfd_create()),\n    shutdown_(false),\n    registered_descriptors_mutex_(mutex_.enabled())\n{\n  // Add the interrupter's descriptor to epoll.\n  epoll_event ev = { 0, { 0 } };\n  ev.events = EPOLLIN | EPOLLERR | EPOLLET;\n  ev.data.ptr = &interrupter_;\n  epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);\n  interrupter_.interrupt();\n\n  // Add the timer descriptor to epoll.\n  if (timer_fd_ != -1)\n  {\n    ev.events = EPOLLIN | EPOLLERR;\n    ev.data.ptr = &timer_fd_;\n    epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);\n  }\n}\n\nepoll_reactor::~epoll_reactor()\n{\n  if (epoll_fd_ != -1)\n    close(epoll_fd_);\n  if (timer_fd_ != -1)\n    close(timer_fd_);\n}\n\nvoid epoll_reactor::shutdown()\n{\n  mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n  lock.unlock();\n\n  op_queue<operation> ops;\n\n  while (descriptor_state* state = registered_descriptors_.first())\n  {\n    for (int i = 0; i < max_ops; ++i)\n      ops.push(state->op_queue_[i]);\n    state->shutdown_ = true;\n    registered_descriptors_.free(state);\n  }\n\n  timer_queues_.get_all_timers(ops);\n\n  scheduler_.abandon_operations(ops);\n}\n\nvoid epoll_reactor::notify_fork(\n    asio::execution_context::fork_event fork_ev)\n{\n  if (fork_ev == asio::execution_context::fork_child)\n  {\n    if (epoll_fd_ != -1)\n      ::close(epoll_fd_);\n    epoll_fd_ = -1;\n    epoll_fd_ = do_epoll_create();\n\n    if (timer_fd_ != -1)\n      ::close(timer_fd_);\n    timer_fd_ = -1;\n    timer_fd_ = do_timerfd_create();\n\n    interrupter_.recreate();\n\n    // Add the interrupter's descriptor to epoll.\n    epoll_event ev = { 0, { 0 } };\n    ev.events = EPOLLIN | EPOLLERR | EPOLLET;\n    ev.data.ptr = &interrupter_;\n    epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);\n    interrupter_.interrupt();\n\n    // Add the timer descriptor to epoll.\n    if (timer_fd_ != -1)\n    {\n      ev.events = EPOLLIN | EPOLLERR;\n      ev.data.ptr = &timer_fd_;\n      epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);\n    }\n\n    update_timeout();\n\n    // Re-register all descriptors with epoll.\n    mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n    for (descriptor_state* state = registered_descriptors_.first();\n        state != 0; state = state->next_)\n    {\n      ev.events = state->registered_events_;\n      ev.data.ptr = state;\n      int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);\n      if (result != 0)\n      {\n        asio::error_code ec(errno,\n            asio::error::get_system_category());\n        asio::detail::throw_error(ec, \"epoll re-registration\");\n      }\n    }\n  }\n}\n\nvoid epoll_reactor::init_task()\n{\n  scheduler_.init_task();\n}\n\nint epoll_reactor::register_descriptor(socket_type descriptor,\n    epoll_reactor::per_descriptor_data& descriptor_data)\n{\n  descriptor_data = allocate_descriptor_state();\n\n  ASIO_HANDLER_REACTOR_REGISTRATION((\n        context(), static_cast<uintmax_t>(descriptor),\n        reinterpret_cast<uintmax_t>(descriptor_data)));\n\n  {\n    mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n    descriptor_data->reactor_ = this;\n    descriptor_data->descriptor_ = descriptor;\n    descriptor_data->shutdown_ = false;\n    for (int i = 0; i < max_ops; ++i)\n      descriptor_data->try_speculative_[i] = true;\n  }\n\n  epoll_event ev = { 0, { 0 } };\n  ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;\n  descriptor_data->registered_events_ = ev.events;\n  ev.data.ptr = descriptor_data;\n  int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);\n  if (result != 0)\n  {\n    if (errno == EPERM)\n    {\n      // This file descriptor type is not supported by epoll. However, if it is\n      // a regular file then operations on it will not block. We will allow\n      // this descriptor to be used and fail later if an operation on it would\n      // otherwise require a trip through the reactor.\n      descriptor_data->registered_events_ = 0;\n      return 0;\n    }\n    return errno;\n  }\n\n  return 0;\n}\n\nint epoll_reactor::register_internal_descriptor(\n    int op_type, socket_type descriptor,\n    epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)\n{\n  descriptor_data = allocate_descriptor_state();\n\n  ASIO_HANDLER_REACTOR_REGISTRATION((\n        context(), static_cast<uintmax_t>(descriptor),\n        reinterpret_cast<uintmax_t>(descriptor_data)));\n\n  {\n    mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n    descriptor_data->reactor_ = this;\n    descriptor_data->descriptor_ = descriptor;\n    descriptor_data->shutdown_ = false;\n    descriptor_data->op_queue_[op_type].push(op);\n    for (int i = 0; i < max_ops; ++i)\n      descriptor_data->try_speculative_[i] = true;\n  }\n\n  epoll_event ev = { 0, { 0 } };\n  ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;\n  descriptor_data->registered_events_ = ev.events;\n  ev.data.ptr = descriptor_data;\n  int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);\n  if (result != 0)\n    return errno;\n\n  return 0;\n}\n\nvoid epoll_reactor::move_descriptor(socket_type,\n    epoll_reactor::per_descriptor_data& target_descriptor_data,\n    epoll_reactor::per_descriptor_data& source_descriptor_data)\n{\n  target_descriptor_data = source_descriptor_data;\n  source_descriptor_data = 0;\n}\n\nvoid epoll_reactor::start_op(int op_type, socket_type descriptor,\n    epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,\n    bool is_continuation, bool allow_speculative)\n{\n  if (!descriptor_data)\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (descriptor_data->shutdown_)\n  {\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  if (descriptor_data->op_queue_[op_type].empty())\n  {\n    if (allow_speculative\n        && (op_type != read_op\n          || descriptor_data->op_queue_[except_op].empty()))\n    {\n      if (descriptor_data->try_speculative_[op_type])\n      {\n        if (reactor_op::status status = op->perform())\n        {\n          if (status == reactor_op::done_and_exhausted)\n            if (descriptor_data->registered_events_ != 0)\n              descriptor_data->try_speculative_[op_type] = false;\n          descriptor_lock.unlock();\n          scheduler_.post_immediate_completion(op, is_continuation);\n          return;\n        }\n      }\n\n      if (descriptor_data->registered_events_ == 0)\n      {\n        op->ec_ = asio::error::operation_not_supported;\n        scheduler_.post_immediate_completion(op, is_continuation);\n        return;\n      }\n\n      if (op_type == write_op)\n      {\n        if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)\n        {\n          epoll_event ev = { 0, { 0 } };\n          ev.events = descriptor_data->registered_events_ | EPOLLOUT;\n          ev.data.ptr = descriptor_data;\n          if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)\n          {\n            descriptor_data->registered_events_ |= ev.events;\n          }\n          else\n          {\n            op->ec_ = asio::error_code(errno,\n                asio::error::get_system_category());\n            scheduler_.post_immediate_completion(op, is_continuation);\n            return;\n          }\n        }\n      }\n    }\n    else if (descriptor_data->registered_events_ == 0)\n    {\n      op->ec_ = asio::error::operation_not_supported;\n      scheduler_.post_immediate_completion(op, is_continuation);\n      return;\n    }\n    else\n    {\n      if (op_type == write_op)\n      {\n        descriptor_data->registered_events_ |= EPOLLOUT;\n      }\n\n      epoll_event ev = { 0, { 0 } };\n      ev.events = descriptor_data->registered_events_;\n      ev.data.ptr = descriptor_data;\n      epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);\n    }\n  }\n\n  descriptor_data->op_queue_[op_type].push(op);\n  scheduler_.work_started();\n}\n\nvoid epoll_reactor::cancel_ops(socket_type,\n    epoll_reactor::per_descriptor_data& descriptor_data)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  op_queue<operation> ops;\n  for (int i = 0; i < max_ops; ++i)\n  {\n    while (reactor_op* op = descriptor_data->op_queue_[i].front())\n    {\n      op->ec_ = asio::error::operation_aborted;\n      descriptor_data->op_queue_[i].pop();\n      ops.push(op);\n    }\n  }\n\n  descriptor_lock.unlock();\n\n  scheduler_.post_deferred_completions(ops);\n}\n\nvoid epoll_reactor::deregister_descriptor(socket_type descriptor,\n    epoll_reactor::per_descriptor_data& descriptor_data, bool closing)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (!descriptor_data->shutdown_)\n  {\n    if (closing)\n    {\n      // The descriptor will be automatically removed from the epoll set when\n      // it is closed.\n    }\n    else if (descriptor_data->registered_events_ != 0)\n    {\n      epoll_event ev = { 0, { 0 } };\n      epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);\n    }\n\n    op_queue<operation> ops;\n    for (int i = 0; i < max_ops; ++i)\n    {\n      while (reactor_op* op = descriptor_data->op_queue_[i].front())\n      {\n        op->ec_ = asio::error::operation_aborted;\n        descriptor_data->op_queue_[i].pop();\n        ops.push(op);\n      }\n    }\n\n    descriptor_data->descriptor_ = -1;\n    descriptor_data->shutdown_ = true;\n\n    descriptor_lock.unlock();\n\n    ASIO_HANDLER_REACTOR_DEREGISTRATION((\n          context(), static_cast<uintmax_t>(descriptor),\n          reinterpret_cast<uintmax_t>(descriptor_data)));\n\n    scheduler_.post_deferred_completions(ops);\n\n    // Leave descriptor_data set so that it will be freed by the subsequent\n    // call to cleanup_descriptor_data.\n  }\n  else\n  {\n    // We are shutting down, so prevent cleanup_descriptor_data from freeing\n    // the descriptor_data object and let the destructor free it instead.\n    descriptor_data = 0;\n  }\n}\n\nvoid epoll_reactor::deregister_internal_descriptor(socket_type descriptor,\n    epoll_reactor::per_descriptor_data& descriptor_data)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (!descriptor_data->shutdown_)\n  {\n    epoll_event ev = { 0, { 0 } };\n    epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);\n\n    op_queue<operation> ops;\n    for (int i = 0; i < max_ops; ++i)\n      ops.push(descriptor_data->op_queue_[i]);\n\n    descriptor_data->descriptor_ = -1;\n    descriptor_data->shutdown_ = true;\n\n    descriptor_lock.unlock();\n\n    ASIO_HANDLER_REACTOR_DEREGISTRATION((\n          context(), static_cast<uintmax_t>(descriptor),\n          reinterpret_cast<uintmax_t>(descriptor_data)));\n\n    // Leave descriptor_data set so that it will be freed by the subsequent\n    // call to cleanup_descriptor_data.\n  }\n  else\n  {\n    // We are shutting down, so prevent cleanup_descriptor_data from freeing\n    // the descriptor_data object and let the destructor free it instead.\n    descriptor_data = 0;\n  }\n}\n\nvoid epoll_reactor::cleanup_descriptor_data(\n    per_descriptor_data& descriptor_data)\n{\n  if (descriptor_data)\n  {\n    free_descriptor_state(descriptor_data);\n    descriptor_data = 0;\n  }\n}\n\nvoid epoll_reactor::run(long usec, op_queue<operation>& ops)\n{\n  // This code relies on the fact that the scheduler queues the reactor task\n  // behind all descriptor operations generated by this function. This means,\n  // that by the time we reach this point, any previously returned descriptor\n  // operations have already been dequeued. Therefore it is now safe for us to\n  // reuse and return them for the scheduler to queue again.\n\n  // Calculate timeout. Check the timer queues only if timerfd is not in use.\n  int timeout;\n  if (usec == 0)\n    timeout = 0;\n  else\n  {\n    timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);\n    if (timer_fd_ == -1)\n    {\n      mutex::scoped_lock lock(mutex_);\n      timeout = get_timeout(timeout);\n    }\n  }\n\n  // Block on the epoll descriptor.\n  epoll_event events[128];\n  int num_events = epoll_wait(epoll_fd_, events, 128, timeout);\n\n#if defined(ASIO_ENABLE_HANDLER_TRACKING)\n  // Trace the waiting events.\n  for (int i = 0; i < num_events; ++i)\n  {\n    void* ptr = events[i].data.ptr;\n    if (ptr == &interrupter_)\n    {\n      // Ignore.\n    }\n# if defined(ASIO_HAS_TIMERFD)\n    else if (ptr == &timer_fd_)\n    {\n      // Ignore.\n    }\n# endif // defined(ASIO_HAS_TIMERFD)\n    else\n    {\n      unsigned event_mask = 0;\n      if ((events[i].events & EPOLLIN) != 0)\n        event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;\n      if ((events[i].events & EPOLLOUT))\n        event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;\n      if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0)\n        event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;\n      ASIO_HANDLER_REACTOR_EVENTS((context(),\n            reinterpret_cast<uintmax_t>(ptr), event_mask));\n    }\n  }\n#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n#if defined(ASIO_HAS_TIMERFD)\n  bool check_timers = (timer_fd_ == -1);\n#else // defined(ASIO_HAS_TIMERFD)\n  bool check_timers = true;\n#endif // defined(ASIO_HAS_TIMERFD)\n\n  // Dispatch the waiting events.\n  for (int i = 0; i < num_events; ++i)\n  {\n    void* ptr = events[i].data.ptr;\n    if (ptr == &interrupter_)\n    {\n      // No need to reset the interrupter since we're leaving the descriptor\n      // in a ready-to-read state and relying on edge-triggered notifications\n      // to make it so that we only get woken up when the descriptor's epoll\n      // registration is updated.\n\n#if defined(ASIO_HAS_TIMERFD)\n      if (timer_fd_ == -1)\n        check_timers = true;\n#else // defined(ASIO_HAS_TIMERFD)\n      check_timers = true;\n#endif // defined(ASIO_HAS_TIMERFD)\n    }\n#if defined(ASIO_HAS_TIMERFD)\n    else if (ptr == &timer_fd_)\n    {\n      check_timers = true;\n    }\n#endif // defined(ASIO_HAS_TIMERFD)\n    else\n    {\n      // The descriptor operation doesn't count as work in and of itself, so we\n      // don't call work_started() here. This still allows the scheduler to\n      // stop if the only remaining operations are descriptor operations.\n      descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);\n      if (!ops.is_enqueued(descriptor_data))\n      {\n        descriptor_data->set_ready_events(events[i].events);\n        ops.push(descriptor_data);\n      }\n      else\n      {\n        descriptor_data->add_ready_events(events[i].events);\n      }\n    }\n  }\n\n  if (check_timers)\n  {\n    mutex::scoped_lock common_lock(mutex_);\n    timer_queues_.get_ready_timers(ops);\n\n#if defined(ASIO_HAS_TIMERFD)\n    if (timer_fd_ != -1)\n    {\n      itimerspec new_timeout;\n      itimerspec old_timeout;\n      int flags = get_timeout(new_timeout);\n      timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);\n    }\n#endif // defined(ASIO_HAS_TIMERFD)\n  }\n}\n\nvoid epoll_reactor::interrupt()\n{\n  epoll_event ev = { 0, { 0 } };\n  ev.events = EPOLLIN | EPOLLERR | EPOLLET;\n  ev.data.ptr = &interrupter_;\n  epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);\n}\n\nint epoll_reactor::do_epoll_create()\n{\n#if defined(EPOLL_CLOEXEC)\n  int fd = epoll_create1(EPOLL_CLOEXEC);\n#else // defined(EPOLL_CLOEXEC)\n  int fd = -1;\n  errno = EINVAL;\n#endif // defined(EPOLL_CLOEXEC)\n\n  if (fd == -1 && (errno == EINVAL || errno == ENOSYS))\n  {\n    fd = epoll_create(epoll_size);\n    if (fd != -1)\n      ::fcntl(fd, F_SETFD, FD_CLOEXEC);\n  }\n\n  if (fd == -1)\n  {\n    asio::error_code ec(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"epoll\");\n  }\n\n  return fd;\n}\n\nint epoll_reactor::do_timerfd_create()\n{\n#if defined(ASIO_HAS_TIMERFD)\n# if defined(TFD_CLOEXEC)\n  int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);\n# else // defined(TFD_CLOEXEC)\n  int fd = -1;\n  errno = EINVAL;\n# endif // defined(TFD_CLOEXEC)\n\n  if (fd == -1 && errno == EINVAL)\n  {\n    fd = timerfd_create(CLOCK_MONOTONIC, 0);\n    if (fd != -1)\n      ::fcntl(fd, F_SETFD, FD_CLOEXEC);\n  }\n\n  return fd;\n#else // defined(ASIO_HAS_TIMERFD)\n  return -1;\n#endif // defined(ASIO_HAS_TIMERFD)\n}\n\nepoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()\n{\n  mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n  return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(\n        REACTOR_IO, scheduler_.concurrency_hint()));\n}\n\nvoid epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)\n{\n  mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n  registered_descriptors_.free(s);\n}\n\nvoid epoll_reactor::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.insert(&queue);\n}\n\nvoid epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.erase(&queue);\n}\n\nvoid epoll_reactor::update_timeout()\n{\n#if defined(ASIO_HAS_TIMERFD)\n  if (timer_fd_ != -1)\n  {\n    itimerspec new_timeout;\n    itimerspec old_timeout;\n    int flags = get_timeout(new_timeout);\n    timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);\n    return;\n  }\n#endif // defined(ASIO_HAS_TIMERFD)\n  interrupt();\n}\n\nint epoll_reactor::get_timeout(int msec)\n{\n  // By default we will wait no longer than 5 minutes. This will ensure that\n  // any changes to the system clock are detected after no longer than this.\n  const int max_msec = 5 * 60 * 1000;\n  return timer_queues_.wait_duration_msec(\n      (msec < 0 || max_msec < msec) ? max_msec : msec);\n}\n\n#if defined(ASIO_HAS_TIMERFD)\nint epoll_reactor::get_timeout(itimerspec& ts)\n{\n  ts.it_interval.tv_sec = 0;\n  ts.it_interval.tv_nsec = 0;\n\n  long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);\n  ts.it_value.tv_sec = usec / 1000000;\n  ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;\n\n  return usec ? 0 : TFD_TIMER_ABSTIME;\n}\n#endif // defined(ASIO_HAS_TIMERFD)\n\nstruct epoll_reactor::perform_io_cleanup_on_block_exit\n{\n  explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)\n    : reactor_(r), first_op_(0)\n  {\n  }\n\n  ~perform_io_cleanup_on_block_exit()\n  {\n    if (first_op_)\n    {\n      // Post the remaining completed operations for invocation.\n      if (!ops_.empty())\n        reactor_->scheduler_.post_deferred_completions(ops_);\n\n      // A user-initiated operation has completed, but there's no need to\n      // explicitly call work_finished() here. Instead, we'll take advantage of\n      // the fact that the scheduler will call work_finished() once we return.\n    }\n    else\n    {\n      // No user-initiated operations have completed, so we need to compensate\n      // for the work_finished() call that the scheduler will make once this\n      // operation returns.\n      reactor_->scheduler_.compensating_work_started();\n    }\n  }\n\n  epoll_reactor* reactor_;\n  op_queue<operation> ops_;\n  operation* first_op_;\n};\n\nepoll_reactor::descriptor_state::descriptor_state(bool locking)\n  : operation(&epoll_reactor::descriptor_state::do_complete),\n    mutex_(locking)\n{\n}\n\noperation* epoll_reactor::descriptor_state::perform_io(uint32_t events)\n{\n  mutex_.lock();\n  perform_io_cleanup_on_block_exit io_cleanup(reactor_);\n  mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock);\n\n  // Exception operations must be processed first to ensure that any\n  // out-of-band data is read before normal data.\n  static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };\n  for (int j = max_ops - 1; j >= 0; --j)\n  {\n    if (events & (flag[j] | EPOLLERR | EPOLLHUP))\n    {\n      try_speculative_[j] = true;\n      while (reactor_op* op = op_queue_[j].front())\n      {\n        if (reactor_op::status status = op->perform())\n        {\n          op_queue_[j].pop();\n          io_cleanup.ops_.push(op);\n          if (status == reactor_op::done_and_exhausted)\n          {\n            try_speculative_[j] = false;\n            break;\n          }\n        }\n        else\n          break;\n      }\n    }\n  }\n\n  // The first operation will be returned for completion now. The others will\n  // be posted for later by the io_cleanup object's destructor.\n  io_cleanup.first_op_ = io_cleanup.ops_.front();\n  io_cleanup.ops_.pop();\n  return io_cleanup.first_op_;\n}\n\nvoid epoll_reactor::descriptor_state::do_complete(\n    void* owner, operation* base,\n    const asio::error_code& ec, std::size_t bytes_transferred)\n{\n  if (owner)\n  {\n    descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);\n    uint32_t events = static_cast<uint32_t>(bytes_transferred);\n    if (operation* op = descriptor_data->perform_io(events))\n    {\n      op->complete(owner, ec, 0);\n    }\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_EPOLL)\n\n#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/eventfd_select_interrupter.ipp",
    "content": "//\n// detail/impl/eventfd_select_interrupter.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP\n#define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_EVENTFD)\n\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <fcntl.h>\n#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n# include <asm/unistd.h>\n#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n# include <sys/eventfd.h>\n#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/detail/eventfd_select_interrupter.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\neventfd_select_interrupter::eventfd_select_interrupter()\n{\n  open_descriptors();\n}\n\nvoid eventfd_select_interrupter::open_descriptors()\n{\n#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n  write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);\n  if (read_descriptor_ != -1)\n  {\n    ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);\n    ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);\n  }\n#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)\n  write_descriptor_ = read_descriptor_ =\n    ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);\n# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)\n  errno = EINVAL;\n  write_descriptor_ = read_descriptor_ = -1;\n# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)\n  if (read_descriptor_ == -1 && errno == EINVAL)\n  {\n    write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);\n    if (read_descriptor_ != -1)\n    {\n      ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);\n      ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);\n    }\n  }\n#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8\n\n  if (read_descriptor_ == -1)\n  {\n    int pipe_fds[2];\n    if (pipe(pipe_fds) == 0)\n    {\n      read_descriptor_ = pipe_fds[0];\n      ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);\n      ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);\n      write_descriptor_ = pipe_fds[1];\n      ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);\n      ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);\n    }\n    else\n    {\n      asio::error_code ec(errno,\n          asio::error::get_system_category());\n      asio::detail::throw_error(ec, \"eventfd_select_interrupter\");\n    }\n  }\n}\n\neventfd_select_interrupter::~eventfd_select_interrupter()\n{\n  close_descriptors();\n}\n\nvoid eventfd_select_interrupter::close_descriptors()\n{\n  if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)\n    ::close(write_descriptor_);\n  if (read_descriptor_ != -1)\n    ::close(read_descriptor_);\n}\n\nvoid eventfd_select_interrupter::recreate()\n{\n  close_descriptors();\n\n  write_descriptor_ = -1;\n  read_descriptor_ = -1;\n\n  open_descriptors();\n}\n\nvoid eventfd_select_interrupter::interrupt()\n{\n  uint64_t counter(1UL);\n  int result = ::write(write_descriptor_, &counter, sizeof(uint64_t));\n  (void)result;\n}\n\nbool eventfd_select_interrupter::reset()\n{\n  if (write_descriptor_ == read_descriptor_)\n  {\n    for (;;)\n    {\n      // Only perform one read. The kernel maintains an atomic counter.\n      uint64_t counter(0);\n      errno = 0;\n      int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t));\n      if (bytes_read < 0 && errno == EINTR)\n        continue;\n      bool was_interrupted = (bytes_read > 0);\n      return was_interrupted;\n    }\n  }\n  else\n  {\n    for (;;)\n    {\n      // Clear all data from the pipe.\n      char data[1024];\n      int bytes_read = ::read(read_descriptor_, data, sizeof(data));\n      if (bytes_read < 0 && errno == EINTR)\n        continue;\n      bool was_interrupted = (bytes_read > 0);\n      while (bytes_read == sizeof(data))\n        bytes_read = ::read(read_descriptor_, data, sizeof(data));\n      return was_interrupted;\n    }\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_EVENTFD)\n\n#endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/handler_tracking.ipp",
    "content": "//\n// detail/impl/handler_tracking.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP\n#define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_CUSTOM_HANDLER_TRACKING)\n\n// The handler tracking implementation is provided by the user-specified header.\n\n#elif defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n#include <cstdarg>\n#include <cstdio>\n#include \"asio/detail/handler_tracking.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n# include \"asio/time_traits.hpp\"\n#elif defined(ASIO_HAS_CHRONO)\n# include \"asio/detail/chrono.hpp\"\n# include \"asio/detail/chrono_time_traits.hpp\"\n# include \"asio/wait_traits.hpp\"\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/socket_types.hpp\"\n#elif !defined(ASIO_WINDOWS)\n# include <unistd.h>\n#endif // !defined(ASIO_WINDOWS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct handler_tracking_timestamp\n{\n  uint64_t seconds;\n  uint64_t microseconds;\n\n  handler_tracking_timestamp()\n  {\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n    boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));\n    boost::posix_time::time_duration now =\n      boost::posix_time::microsec_clock::universal_time() - epoch;\n#elif defined(ASIO_HAS_CHRONO)\n    typedef chrono_time_traits<chrono::system_clock,\n        asio::wait_traits<chrono::system_clock> > traits_helper;\n    traits_helper::posix_time_duration now(\n        chrono::system_clock::now().time_since_epoch());\n#endif\n    seconds = static_cast<uint64_t>(now.total_seconds());\n    microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000);\n  }\n};\n\nstruct handler_tracking::tracking_state\n{\n  static_mutex mutex_;\n  uint64_t next_id_;\n  tss_ptr<completion>* current_completion_;\n};\n\nhandler_tracking::tracking_state* handler_tracking::get_state()\n{\n  static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0 };\n  return &state;\n}\n\nvoid handler_tracking::init()\n{\n  static tracking_state* state = get_state();\n\n  state->mutex_.init();\n\n  static_mutex::scoped_lock lock(state->mutex_);\n  if (state->current_completion_ == 0)\n    state->current_completion_ = new tss_ptr<completion>;\n}\n\nvoid handler_tracking::creation(execution_context&,\n    handler_tracking::tracked_handler& h,\n    const char* object_type, void* object,\n    uintmax_t /*native_handle*/, const char* op_name)\n{\n  static tracking_state* state = get_state();\n\n  static_mutex::scoped_lock lock(state->mutex_);\n  h.id_ = state->next_id_++;\n  lock.unlock();\n\n  handler_tracking_timestamp timestamp;\n\n  uint64_t current_id = 0;\n  if (completion* current_completion = *state->current_completion_)\n    current_id = current_completion->id_;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      current_id, h.id_, object_type, object, op_name);\n}\n\nhandler_tracking::completion::completion(\n    const handler_tracking::tracked_handler& h)\n  : id_(h.id_),\n    invoked_(false),\n    next_(*get_state()->current_completion_)\n{\n  *get_state()->current_completion_ = this;\n}\n\nhandler_tracking::completion::~completion()\n{\n  if (id_)\n  {\n    handler_tracking_timestamp timestamp;\n\n    write_line(\n#if defined(ASIO_WINDOWS)\n        \"@asio|%I64u.%06I64u|%c%I64u|\\n\",\n#else // defined(ASIO_WINDOWS)\n        \"@asio|%llu.%06llu|%c%llu|\\n\",\n#endif // defined(ASIO_WINDOWS)\n        timestamp.seconds, timestamp.microseconds,\n        invoked_ ? '!' : '~', id_);\n  }\n\n  *get_state()->current_completion_ = next_;\n}\n\nvoid handler_tracking::completion::invocation_begin()\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|>%I64u|\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|>%llu|\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds, id_);\n\n  invoked_ = true;\n}\n\nvoid handler_tracking::completion::invocation_begin(\n    const asio::error_code& ec)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|>%llu|ec=%.20s:%d\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      id_, ec.category().name(), ec.value());\n\n  invoked_ = true;\n}\n\nvoid handler_tracking::completion::invocation_begin(\n    const asio::error_code& ec, std::size_t bytes_transferred)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      id_, ec.category().name(), ec.value(),\n      static_cast<uint64_t>(bytes_transferred));\n\n  invoked_ = true;\n}\n\nvoid handler_tracking::completion::invocation_begin(\n    const asio::error_code& ec, int signal_number)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      id_, ec.category().name(), ec.value(), signal_number);\n\n  invoked_ = true;\n}\n\nvoid handler_tracking::completion::invocation_begin(\n    const asio::error_code& ec, const char* arg)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      id_, ec.category().name(), ec.value(), arg);\n\n  invoked_ = true;\n}\n\nvoid handler_tracking::completion::invocation_end()\n{\n  if (id_)\n  {\n    handler_tracking_timestamp timestamp;\n\n    write_line(\n#if defined(ASIO_WINDOWS)\n        \"@asio|%I64u.%06I64u|<%I64u|\\n\",\n#else // defined(ASIO_WINDOWS)\n        \"@asio|%llu.%06llu|<%llu|\\n\",\n#endif // defined(ASIO_WINDOWS)\n        timestamp.seconds, timestamp.microseconds, id_);\n\n    id_ = 0;\n  }\n}\n\nvoid handler_tracking::operation(execution_context&,\n    const char* object_type, void* object,\n    uintmax_t /*native_handle*/, const char* op_name)\n{\n  static tracking_state* state = get_state();\n\n  handler_tracking_timestamp timestamp;\n\n  unsigned long long current_id = 0;\n  if (completion* current_completion = *state->current_completion_)\n    current_id = current_completion->id_;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      current_id, object_type, object, op_name);\n}\n\nvoid handler_tracking::reactor_registration(execution_context& /*context*/,\n    uintmax_t /*native_handle*/, uintmax_t /*registration*/)\n{\n}\n\nvoid handler_tracking::reactor_deregistration(execution_context& /*context*/,\n    uintmax_t /*native_handle*/, uintmax_t /*registration*/)\n{\n}\n\nvoid handler_tracking::reactor_events(execution_context& /*context*/,\n    uintmax_t /*native_handle*/, unsigned /*events*/)\n{\n}\n\nvoid handler_tracking::reactor_operation(\n    const tracked_handler& h, const char* op_name,\n    const asio::error_code& ec)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      h.id_, op_name, ec.category().name(), ec.value());\n}\n\nvoid handler_tracking::reactor_operation(\n    const tracked_handler& h, const char* op_name,\n    const asio::error_code& ec, std::size_t bytes_transferred)\n{\n  handler_tracking_timestamp timestamp;\n\n  write_line(\n#if defined(ASIO_WINDOWS)\n      \"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\\n\",\n#else // defined(ASIO_WINDOWS)\n      \"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\\n\",\n#endif // defined(ASIO_WINDOWS)\n      timestamp.seconds, timestamp.microseconds,\n      h.id_, op_name, ec.category().name(), ec.value(),\n      static_cast<uint64_t>(bytes_transferred));\n}\n\nvoid handler_tracking::write_line(const char* format, ...)\n{\n  using namespace std; // For sprintf (or equivalent).\n\n  va_list args;\n  va_start(args, format);\n\n  char line[256] = \"\";\n#if defined(ASIO_HAS_SECURE_RTL)\n  int length = vsprintf_s(line, sizeof(line), format, args);\n#else // defined(ASIO_HAS_SECURE_RTL)\n  int length = vsprintf(line, format, args);\n#endif // defined(ASIO_HAS_SECURE_RTL)\n\n  va_end(args);\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n  wchar_t wline[256] = L\"\";\n  mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length);\n  ::OutputDebugStringW(wline);\n#elif defined(ASIO_WINDOWS)\n  HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);\n  DWORD bytes_written = 0;\n  ::WriteFile(stderr_handle, line, length, &bytes_written, 0);\n#else // defined(ASIO_WINDOWS)\n  ::write(STDERR_FILENO, line, length);\n#endif // defined(ASIO_WINDOWS)\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n#endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/kqueue_reactor.hpp",
    "content": "//\n// detail/impl/kqueue_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP\n#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_KQUEUE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid kqueue_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\n// Remove a timer queue from the reactor.\ntemplate <typename Time_Traits>\nvoid kqueue_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    scheduler_.post_immediate_completion(op, false);\n    return;\n  }\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  scheduler_.work_started();\n  if (earliest)\n    interrupt();\n}\n\ntemplate <typename Time_Traits>\nstd::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid kqueue_reactor::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& target,\n    typename timer_queue<Time_Traits>::per_timer_data& source)\n{\n  mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(target, ops);\n  queue.move_timer(target, source);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_KQUEUE)\n\n#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/kqueue_reactor.ipp",
    "content": "//\n// detail/impl/kqueue_reactor.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP\n#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_KQUEUE)\n\n#include \"asio/detail/kqueue_reactor.hpp\"\n#include \"asio/detail/scheduler.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\n#if defined(__NetBSD__)\n# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \\\n    EV_SET(ev, ident, filt, flags, fflags, data, \\\n      reinterpret_cast<intptr_t>(static_cast<void*>(udata)))\n#else\n# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \\\n    EV_SET(ev, ident, filt, flags, fflags, data, udata)\n#endif\n\nnamespace asio {\nnamespace detail {\n\nkqueue_reactor::kqueue_reactor(asio::execution_context& ctx)\n  : execution_context_service_base<kqueue_reactor>(ctx),\n    scheduler_(use_service<scheduler>(ctx)),\n    mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(\n          REACTOR_REGISTRATION, scheduler_.concurrency_hint())),\n    kqueue_fd_(do_kqueue_create()),\n    interrupter_(),\n    shutdown_(false),\n    registered_descriptors_mutex_(mutex_.enabled())\n{\n  struct kevent events[1];\n  ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),\n      EVFILT_READ, EV_ADD, 0, 0, &interrupter_);\n  if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)\n  {\n    asio::error_code error(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(error);\n  }\n}\n\nkqueue_reactor::~kqueue_reactor()\n{\n  close(kqueue_fd_);\n}\n\nvoid kqueue_reactor::shutdown()\n{\n  mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n  lock.unlock();\n\n  op_queue<operation> ops;\n\n  while (descriptor_state* state = registered_descriptors_.first())\n  {\n    for (int i = 0; i < max_ops; ++i)\n      ops.push(state->op_queue_[i]);\n    state->shutdown_ = true;\n    registered_descriptors_.free(state);\n  }\n\n  timer_queues_.get_all_timers(ops);\n\n  scheduler_.abandon_operations(ops);\n}\n\nvoid kqueue_reactor::notify_fork(\n    asio::execution_context::fork_event fork_ev)\n{\n  if (fork_ev == asio::execution_context::fork_child)\n  {\n    // The kqueue descriptor is automatically closed in the child.\n    kqueue_fd_ = -1;\n    kqueue_fd_ = do_kqueue_create();\n\n    interrupter_.recreate();\n\n    struct kevent events[2];\n    ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),\n        EVFILT_READ, EV_ADD, 0, 0, &interrupter_);\n    if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)\n    {\n      asio::error_code ec(errno,\n          asio::error::get_system_category());\n      asio::detail::throw_error(ec, \"kqueue interrupter registration\");\n    }\n\n    // Re-register all descriptors with kqueue.\n    mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n    for (descriptor_state* state = registered_descriptors_.first();\n        state != 0; state = state->next_)\n    {\n      if (state->num_kevents_ > 0)\n      {\n        ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_,\n            EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);\n        ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_,\n            EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);\n        if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1)\n        {\n          asio::error_code ec(errno,\n              asio::error::get_system_category());\n          asio::detail::throw_error(ec, \"kqueue re-registration\");\n        }\n      }\n    }\n  }\n}\n\nvoid kqueue_reactor::init_task()\n{\n  scheduler_.init_task();\n}\n\nint kqueue_reactor::register_descriptor(socket_type descriptor,\n    kqueue_reactor::per_descriptor_data& descriptor_data)\n{\n  descriptor_data = allocate_descriptor_state();\n\n  ASIO_HANDLER_REACTOR_REGISTRATION((\n        context(), static_cast<uintmax_t>(descriptor),\n        reinterpret_cast<uintmax_t>(descriptor_data)));\n\n  mutex::scoped_lock lock(descriptor_data->mutex_);\n\n  descriptor_data->descriptor_ = descriptor;\n  descriptor_data->num_kevents_ = 0;\n  descriptor_data->shutdown_ = false;\n\n  return 0;\n}\n\nint kqueue_reactor::register_internal_descriptor(\n    int op_type, socket_type descriptor,\n    kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)\n{\n  descriptor_data = allocate_descriptor_state();\n\n  ASIO_HANDLER_REACTOR_REGISTRATION((\n        context(), static_cast<uintmax_t>(descriptor),\n        reinterpret_cast<uintmax_t>(descriptor_data)));\n\n  mutex::scoped_lock lock(descriptor_data->mutex_);\n\n  descriptor_data->descriptor_ = descriptor;\n  descriptor_data->num_kevents_ = 1;\n  descriptor_data->shutdown_ = false;\n  descriptor_data->op_queue_[op_type].push(op);\n\n  struct kevent events[1];\n  ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,\n      EV_ADD | EV_CLEAR, 0, 0, descriptor_data);\n  if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)\n    return errno;\n\n  return 0;\n}\n\nvoid kqueue_reactor::move_descriptor(socket_type,\n    kqueue_reactor::per_descriptor_data& target_descriptor_data,\n    kqueue_reactor::per_descriptor_data& source_descriptor_data)\n{\n  target_descriptor_data = source_descriptor_data;\n  source_descriptor_data = 0;\n}\n\nvoid kqueue_reactor::start_op(int op_type, socket_type descriptor,\n    kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op,\n    bool is_continuation, bool allow_speculative)\n{\n  if (!descriptor_data)\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (descriptor_data->shutdown_)\n  {\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  if (descriptor_data->op_queue_[op_type].empty())\n  {\n    static const int num_kevents[max_ops] = { 1, 2, 1 };\n\n    if (allow_speculative\n        && (op_type != read_op\n          || descriptor_data->op_queue_[except_op].empty()))\n    {\n      if (op->perform())\n      {\n        descriptor_lock.unlock();\n        scheduler_.post_immediate_completion(op, is_continuation);\n        return;\n      }\n\n      if (descriptor_data->num_kevents_ < num_kevents[op_type])\n      {\n        struct kevent events[2];\n        ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,\n            EV_ADD | EV_CLEAR, 0, 0, descriptor_data);\n        ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,\n            EV_ADD | EV_CLEAR, 0, 0, descriptor_data);\n        if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1)\n        {\n          descriptor_data->num_kevents_ = num_kevents[op_type];\n        }\n        else\n        {\n          op->ec_ = asio::error_code(errno,\n              asio::error::get_system_category());\n          scheduler_.post_immediate_completion(op, is_continuation);\n          return;\n        }\n      }\n    }\n    else\n    {\n      if (descriptor_data->num_kevents_ < num_kevents[op_type])\n        descriptor_data->num_kevents_ = num_kevents[op_type];\n\n      struct kevent events[2];\n      ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,\n          EV_ADD | EV_CLEAR, 0, 0, descriptor_data);\n      ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,\n          EV_ADD | EV_CLEAR, 0, 0, descriptor_data);\n      ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);\n    }\n  }\n\n  descriptor_data->op_queue_[op_type].push(op);\n  scheduler_.work_started();\n}\n\nvoid kqueue_reactor::cancel_ops(socket_type,\n    kqueue_reactor::per_descriptor_data& descriptor_data)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  op_queue<operation> ops;\n  for (int i = 0; i < max_ops; ++i)\n  {\n    while (reactor_op* op = descriptor_data->op_queue_[i].front())\n    {\n      op->ec_ = asio::error::operation_aborted;\n      descriptor_data->op_queue_[i].pop();\n      ops.push(op);\n    }\n  }\n\n  descriptor_lock.unlock();\n\n  scheduler_.post_deferred_completions(ops);\n}\n\nvoid kqueue_reactor::deregister_descriptor(socket_type descriptor,\n    kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (!descriptor_data->shutdown_)\n  {\n    if (closing)\n    {\n      // The descriptor will be automatically removed from the kqueue when it\n      // is closed.\n    }\n    else\n    {\n      struct kevent events[2];\n      ASIO_KQUEUE_EV_SET(&events[0], descriptor,\n          EVFILT_READ, EV_DELETE, 0, 0, 0);\n      ASIO_KQUEUE_EV_SET(&events[1], descriptor,\n          EVFILT_WRITE, EV_DELETE, 0, 0, 0);\n      ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);\n    }\n\n    op_queue<operation> ops;\n    for (int i = 0; i < max_ops; ++i)\n    {\n      while (reactor_op* op = descriptor_data->op_queue_[i].front())\n      {\n        op->ec_ = asio::error::operation_aborted;\n        descriptor_data->op_queue_[i].pop();\n        ops.push(op);\n      }\n    }\n\n    descriptor_data->descriptor_ = -1;\n    descriptor_data->shutdown_ = true;\n\n    descriptor_lock.unlock();\n\n    ASIO_HANDLER_REACTOR_DEREGISTRATION((\n          context(), static_cast<uintmax_t>(descriptor),\n          reinterpret_cast<uintmax_t>(descriptor_data)));\n\n    scheduler_.post_deferred_completions(ops);\n\n    // Leave descriptor_data set so that it will be freed by the subsequent\n    // call to cleanup_descriptor_data.\n  }\n  else\n  {\n    // We are shutting down, so prevent cleanup_descriptor_data from freeing\n    // the descriptor_data object and let the destructor free it instead.\n    descriptor_data = 0;\n  }\n}\n\nvoid kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,\n    kqueue_reactor::per_descriptor_data& descriptor_data)\n{\n  if (!descriptor_data)\n    return;\n\n  mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n  if (!descriptor_data->shutdown_)\n  {\n    struct kevent events[2];\n    ASIO_KQUEUE_EV_SET(&events[0], descriptor,\n        EVFILT_READ, EV_DELETE, 0, 0, 0);\n    ASIO_KQUEUE_EV_SET(&events[1], descriptor,\n        EVFILT_WRITE, EV_DELETE, 0, 0, 0);\n    ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);\n\n    op_queue<operation> ops;\n    for (int i = 0; i < max_ops; ++i)\n      ops.push(descriptor_data->op_queue_[i]);\n\n    descriptor_data->descriptor_ = -1;\n    descriptor_data->shutdown_ = true;\n\n    descriptor_lock.unlock();\n\n    ASIO_HANDLER_REACTOR_DEREGISTRATION((\n          context(), static_cast<uintmax_t>(descriptor),\n          reinterpret_cast<uintmax_t>(descriptor_data)));\n\n    // Leave descriptor_data set so that it will be freed by the subsequent\n    // call to cleanup_descriptor_data.\n  }\n  else\n  {\n    // We are shutting down, so prevent cleanup_descriptor_data from freeing\n    // the descriptor_data object and let the destructor free it instead.\n    descriptor_data = 0;\n  }\n}\n\nvoid kqueue_reactor::cleanup_descriptor_data(\n    per_descriptor_data& descriptor_data)\n{\n  if (descriptor_data)\n  {\n    free_descriptor_state(descriptor_data);\n    descriptor_data = 0;\n  }\n}\n\nvoid kqueue_reactor::run(long usec, op_queue<operation>& ops)\n{\n  mutex::scoped_lock lock(mutex_);\n\n  // Determine how long to block while waiting for events.\n  timespec timeout_buf = { 0, 0 };\n  timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf;\n\n  lock.unlock();\n\n  // Block on the kqueue descriptor.\n  struct kevent events[128];\n  int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);\n\n#if defined(ASIO_ENABLE_HANDLER_TRACKING)\n  // Trace the waiting events.\n  for (int i = 0; i < num_events; ++i)\n  {\n    void* ptr = reinterpret_cast<void*>(events[i].udata);\n    if (ptr != &interrupter_)\n    {\n      unsigned event_mask = 0;\n      switch (events[i].filter)\n      {\n      case EVFILT_READ:\n        event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;\n        break;\n      case EVFILT_WRITE:\n        event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;\n        break;\n      }\n      if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0)\n        event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;\n      ASIO_HANDLER_REACTOR_EVENTS((context(),\n            reinterpret_cast<uintmax_t>(ptr), event_mask));\n    }\n  }\n#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)\n\n  // Dispatch the waiting events.\n  for (int i = 0; i < num_events; ++i)\n  {\n    void* ptr = reinterpret_cast<void*>(events[i].udata);\n    if (ptr == &interrupter_)\n    {\n      interrupter_.reset();\n    }\n    else\n    {\n      descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);\n      mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);\n\n      if (events[i].filter == EVFILT_WRITE\n          && descriptor_data->num_kevents_ == 2\n          && descriptor_data->op_queue_[write_op].empty())\n      {\n        // Some descriptor types, like serial ports, don't seem to support\n        // EV_CLEAR with EVFILT_WRITE. Since we have no pending write\n        // operations we'll remove the EVFILT_WRITE registration here so that\n        // we don't end up in a tight spin.\n        struct kevent delete_events[1];\n        ASIO_KQUEUE_EV_SET(&delete_events[0],\n            descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0);\n        ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0);\n        descriptor_data->num_kevents_ = 1;\n      }\n\n      // Exception operations must be processed first to ensure that any\n      // out-of-band data is read before normal data.\n#if defined(__NetBSD__)\n      static const unsigned int filter[max_ops] =\n#else\n      static const int filter[max_ops] =\n#endif\n        { EVFILT_READ, EVFILT_WRITE, EVFILT_READ };\n      for (int j = max_ops - 1; j >= 0; --j)\n      {\n        if (events[i].filter == filter[j])\n        {\n          if (j != except_op || events[i].flags & EV_OOBAND)\n          {\n            while (reactor_op* op = descriptor_data->op_queue_[j].front())\n            {\n              if (events[i].flags & EV_ERROR)\n              {\n                op->ec_ = asio::error_code(\n                    static_cast<int>(events[i].data),\n                    asio::error::get_system_category());\n                descriptor_data->op_queue_[j].pop();\n                ops.push(op);\n              }\n              if (op->perform())\n              {\n                descriptor_data->op_queue_[j].pop();\n                ops.push(op);\n              }\n              else\n                break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n  lock.lock();\n  timer_queues_.get_ready_timers(ops);\n}\n\nvoid kqueue_reactor::interrupt()\n{\n  interrupter_.interrupt();\n}\n\nint kqueue_reactor::do_kqueue_create()\n{\n  int fd = ::kqueue();\n  if (fd == -1)\n  {\n    asio::error_code ec(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"kqueue\");\n  }\n  return fd;\n}\n\nkqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()\n{\n  mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n  return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(\n        REACTOR_IO, scheduler_.concurrency_hint()));\n}\n\nvoid kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)\n{\n  mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);\n  registered_descriptors_.free(s);\n}\n\nvoid kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.insert(&queue);\n}\n\nvoid kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.erase(&queue);\n}\n\ntimespec* kqueue_reactor::get_timeout(long usec, timespec& ts)\n{\n  // By default we will wait no longer than 5 minutes. This will ensure that\n  // any changes to the system clock are detected after no longer than this.\n  const long max_usec = 5 * 60 * 1000 * 1000;\n  usec = timer_queues_.wait_duration_usec(\n      (usec < 0 || max_usec < usec) ? max_usec : usec);\n  ts.tv_sec = usec / 1000000;\n  ts.tv_nsec = (usec % 1000000) * 1000;\n  return &ts;\n}\n\n} // namespace detail\n} // namespace asio\n\n#undef ASIO_KQUEUE_EV_SET\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_KQUEUE)\n\n#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/null_event.ipp",
    "content": "//\n// detail/impl/null_event.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP\n#define ASIO_DETAIL_IMPL_NULL_EVENT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include <thread>\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# include \"asio/detail/socket_types.hpp\"\n#else\n# include <unistd.h>\n# if defined(__hpux)\n#  include <sys/time.h>\n# endif\n# if !defined(__hpux) || defined(__SELECT)\n#  include <sys/select.h>\n# endif\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nvoid null_event::do_wait()\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)());\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  ::Sleep(INFINITE);\n#else\n  ::pause();\n#endif\n}\n\nvoid null_event::do_wait_for_usec(long usec)\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  std::this_thread::sleep_for(std::chrono::microseconds(usec));\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  ::Sleep(usec / 1000);\n#elif defined(__hpux) && defined(__SELECT)\n  timespec ts;\n  ts.tv_sec = usec / 1000000;\n  ts.tv_nsec = (usec % 1000000) * 1000;\n  ::pselect(0, 0, 0, 0, &ts, 0);\n#else\n  timeval tv;\n  tv.tv_sec = usec / 1000000;\n  tv.tv_usec = usec % 1000000;\n  ::select(0, 0, 0, 0, &tv);\n#endif\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/pipe_select_interrupter.ipp",
    "content": "//\n// detail/impl/pipe_select_interrupter.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP\n#define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n#if !defined(ASIO_WINDOWS)\n#if !defined(__CYGWIN__)\n#if !defined(__SYMBIAN32__)\n#if !defined(ASIO_HAS_EVENTFD)\n\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n#include \"asio/detail/pipe_select_interrupter.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\npipe_select_interrupter::pipe_select_interrupter()\n{\n  open_descriptors();\n}\n\nvoid pipe_select_interrupter::open_descriptors()\n{\n  int pipe_fds[2];\n  if (pipe(pipe_fds) == 0)\n  {\n    read_descriptor_ = pipe_fds[0];\n    ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);\n    write_descriptor_ = pipe_fds[1];\n    ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);\n\n#if defined(FD_CLOEXEC)\n    ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);\n    ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);\n#endif // defined(FD_CLOEXEC)\n  }\n  else\n  {\n    asio::error_code ec(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"pipe_select_interrupter\");\n  }\n}\n\npipe_select_interrupter::~pipe_select_interrupter()\n{\n  close_descriptors();\n}\n\nvoid pipe_select_interrupter::close_descriptors()\n{\n  if (read_descriptor_ != -1)\n    ::close(read_descriptor_);\n  if (write_descriptor_ != -1)\n    ::close(write_descriptor_);\n}\n\nvoid pipe_select_interrupter::recreate()\n{\n  close_descriptors();\n\n  write_descriptor_ = -1;\n  read_descriptor_ = -1;\n\n  open_descriptors();\n}\n\nvoid pipe_select_interrupter::interrupt()\n{\n  char byte = 0;\n  signed_size_type result = ::write(write_descriptor_, &byte, 1);\n  (void)result;\n}\n\nbool pipe_select_interrupter::reset()\n{\n  for (;;)\n  {\n    char data[1024];\n    signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data));\n    if (bytes_read < 0 && errno == EINTR)\n      continue;\n    bool was_interrupted = (bytes_read > 0);\n    while (bytes_read == sizeof(data))\n      bytes_read = ::read(read_descriptor_, data, sizeof(data));\n    return was_interrupted;\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_EVENTFD)\n#endif // !defined(__SYMBIAN32__)\n#endif // !defined(__CYGWIN__)\n#endif // !defined(ASIO_WINDOWS)\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/posix_event.ipp",
    "content": "//\n// detail/impl/posix_event.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP\n#define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include \"asio/detail/posix_event.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nposix_event::posix_event()\n  : state_(0)\n{\n#if (defined(__MACH__) && defined(__APPLE__)) \\\n      || (defined(__ANDROID__) && (__ANDROID_API__ < 21))\n  int error = ::pthread_cond_init(&cond_, 0);\n#else // (defined(__MACH__) && defined(__APPLE__))\n      // || (defined(__ANDROID__) && (__ANDROID_API__ < 21))\n  ::pthread_condattr_t attr;\n  ::pthread_condattr_init(&attr);\n  int error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);\n  if (error == 0)\n    error = ::pthread_cond_init(&cond_, &attr);\n#endif // (defined(__MACH__) && defined(__APPLE__))\n       // || (defined(__ANDROID__) && (__ANDROID_API__ < 21))\n\n  asio::error_code ec(error,\n      asio::error::get_system_category());\n  asio::detail::throw_error(ec, \"event\");\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/posix_mutex.ipp",
    "content": "//\n// detail/impl/posix_mutex.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP\n#define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include \"asio/detail/posix_mutex.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nposix_mutex::posix_mutex()\n{\n  int error = ::pthread_mutex_init(&mutex_, 0);\n  asio::error_code ec(error,\n      asio::error::get_system_category());\n  asio::detail::throw_error(ec, \"mutex\");\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/posix_thread.ipp",
    "content": "//\n// detail/impl/posix_thread.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP\n#define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include \"asio/detail/posix_thread.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nposix_thread::~posix_thread()\n{\n  if (!joined_)\n    ::pthread_detach(thread_);\n}\n\nvoid posix_thread::join()\n{\n  if (!joined_)\n  {\n    ::pthread_join(thread_, 0);\n    joined_ = true;\n  }\n}\n\nstd::size_t posix_thread::hardware_concurrency()\n{\n#if defined(_SC_NPROCESSORS_ONLN)\n  long result = sysconf(_SC_NPROCESSORS_ONLN);\n  if (result > 0)\n    return result;\n#endif // defined(_SC_NPROCESSORS_ONLN)\n  return 0;\n}\n\nvoid posix_thread::start_thread(func_base* arg)\n{\n  int error = ::pthread_create(&thread_, 0,\n        asio_detail_posix_thread_function, arg);\n  if (error != 0)\n  {\n    delete arg;\n    asio::error_code ec(error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"thread\");\n  }\n}\n\nvoid* asio_detail_posix_thread_function(void* arg)\n{\n  posix_thread::auto_func_base_ptr func = {\n      static_cast<posix_thread::func_base*>(arg) };\n  func.ptr->run();\n  return 0;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/posix_tss_ptr.ipp",
    "content": "//\n// detail/impl/posix_tss_ptr.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP\n#define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include \"asio/detail/posix_tss_ptr.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nvoid posix_tss_ptr_create(pthread_key_t& key)\n{\n  int error = ::pthread_key_create(&key, 0);\n  asio::error_code ec(error,\n      asio::error::get_system_category());\n  asio::detail::throw_error(ec, \"tss\");\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/reactive_descriptor_service.ipp",
    "content": "//\n// detail/impl/reactive_descriptor_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n\n#include \"asio/error.hpp\"\n#include \"asio/detail/reactive_descriptor_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nreactive_descriptor_service::reactive_descriptor_service(\n    execution_context& context)\n  : execution_context_service_base<reactive_descriptor_service>(context),\n    reactor_(asio::use_service<reactor>(context))\n{\n  reactor_.init_task();\n}\n\nvoid reactive_descriptor_service::shutdown()\n{\n}\n\nvoid reactive_descriptor_service::construct(\n    reactive_descriptor_service::implementation_type& impl)\n{\n  impl.descriptor_ = -1;\n  impl.state_ = 0;\n}\n\nvoid reactive_descriptor_service::move_construct(\n    reactive_descriptor_service::implementation_type& impl,\n    reactive_descriptor_service::implementation_type& other_impl)\n{\n  impl.descriptor_ = other_impl.descriptor_;\n  other_impl.descriptor_ = -1;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  reactor_.move_descriptor(impl.descriptor_,\n      impl.reactor_data_, other_impl.reactor_data_);\n}\n\nvoid reactive_descriptor_service::move_assign(\n    reactive_descriptor_service::implementation_type& impl,\n    reactive_descriptor_service& other_service,\n    reactive_descriptor_service::implementation_type& other_impl)\n{\n  destroy(impl);\n\n  impl.descriptor_ = other_impl.descriptor_;\n  other_impl.descriptor_ = -1;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  other_service.reactor_.move_descriptor(impl.descriptor_,\n      impl.reactor_data_, other_impl.reactor_data_);\n}\n\nvoid reactive_descriptor_service::destroy(\n    reactive_descriptor_service::implementation_type& impl)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((reactor_.context(),\n          \"descriptor\", &impl, impl.descriptor_, \"close\"));\n\n    reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,\n        (impl.state_ & descriptor_ops::possible_dup) == 0);\n\n    asio::error_code ignored_ec;\n    descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);\n\n    reactor_.cleanup_descriptor_data(impl.reactor_data_);\n  }\n}\n\nasio::error_code reactive_descriptor_service::assign(\n    reactive_descriptor_service::implementation_type& impl,\n    const native_handle_type& native_descriptor, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  if (int err = reactor_.register_descriptor(\n        native_descriptor, impl.reactor_data_))\n  {\n    ec = asio::error_code(err,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  impl.descriptor_ = native_descriptor;\n  impl.state_ = descriptor_ops::possible_dup;\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code reactive_descriptor_service::close(\n    reactive_descriptor_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((reactor_.context(),\n          \"descriptor\", &impl, impl.descriptor_, \"close\"));\n\n    reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,\n        (impl.state_ & descriptor_ops::possible_dup) == 0);\n\n    descriptor_ops::close(impl.descriptor_, impl.state_, ec);\n\n    reactor_.cleanup_descriptor_data(impl.reactor_data_);\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n\n  // The descriptor is closed by the OS even if close() returns an error.\n  //\n  // (Actually, POSIX says the state of the descriptor is unspecified. On\n  // Linux the descriptor is apparently closed anyway; e.g. see\n  //   http://lkml.org/lkml/2005/9/10/129\n  // We'll just have to assume that other OSes follow the same behaviour.)\n  construct(impl);\n\n  return ec;\n}\n\nreactive_descriptor_service::native_handle_type\nreactive_descriptor_service::release(\n    reactive_descriptor_service::implementation_type& impl)\n{\n  native_handle_type descriptor = impl.descriptor_;\n\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((reactor_.context(),\n          \"descriptor\", &impl, impl.descriptor_, \"release\"));\n\n    reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);\n    reactor_.cleanup_descriptor_data(impl.reactor_data_);\n    construct(impl);\n  }\n\n  return descriptor;\n}\n\nasio::error_code reactive_descriptor_service::cancel(\n    reactive_descriptor_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  ASIO_HANDLER_OPERATION((reactor_.context(),\n        \"descriptor\", &impl, impl.descriptor_, \"cancel\"));\n\n  reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);\n  ec = asio::error_code();\n  return ec;\n}\n\nvoid reactive_descriptor_service::start_op(\n    reactive_descriptor_service::implementation_type& impl,\n    int op_type, reactor_op* op, bool is_continuation,\n    bool is_non_blocking, bool noop)\n{\n  if (!noop)\n  {\n    if ((impl.state_ & descriptor_ops::non_blocking) ||\n        descriptor_ops::set_internal_non_blocking(\n          impl.descriptor_, impl.state_, true, op->ec_))\n    {\n      reactor_.start_op(op_type, impl.descriptor_,\n          impl.reactor_data_, op, is_continuation, is_non_blocking);\n      return;\n    }\n  }\n\n  reactor_.post_immediate_completion(op, is_continuation);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/reactive_serial_port_service.ipp",
    "content": "//\n// detail/impl/reactive_serial_port_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT)\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include <cstring>\n#include \"asio/detail/reactive_serial_port_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nreactive_serial_port_service::reactive_serial_port_service(\n    execution_context& context)\n  : execution_context_service_base<reactive_serial_port_service>(context),\n    descriptor_service_(context)\n{\n}\n\nvoid reactive_serial_port_service::shutdown()\n{\n  descriptor_service_.shutdown();\n}\n\nasio::error_code reactive_serial_port_service::open(\n    reactive_serial_port_service::implementation_type& impl,\n    const std::string& device, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  descriptor_ops::state_type state = 0;\n  int fd = descriptor_ops::open(device.c_str(),\n      O_RDWR | O_NONBLOCK | O_NOCTTY, ec);\n  if (fd < 0)\n    return ec;\n\n  int s = descriptor_ops::fcntl(fd, F_GETFL, ec);\n  if (s >= 0)\n    s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec);\n  if (s < 0)\n  {\n    asio::error_code ignored_ec;\n    descriptor_ops::close(fd, state, ignored_ec);\n    return ec;\n  }\n\n  // Set up default serial port options.\n  termios ios;\n  errno = 0;\n  s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec);\n  if (s >= 0)\n  {\n#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n    ::cfmakeraw(&ios);\n#else\n    ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK\n        | ISTRIP | INLCR | IGNCR | ICRNL | IXON);\n    ios.c_oflag &= ~OPOST;\n    ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);\n    ios.c_cflag &= ~(CSIZE | PARENB);\n    ios.c_cflag |= CS8;\n#endif\n    ios.c_iflag |= IGNPAR;\n    ios.c_cflag |= CREAD | CLOCAL;\n    errno = 0;\n    s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec);\n  }\n  if (s < 0)\n  {\n    asio::error_code ignored_ec;\n    descriptor_ops::close(fd, state, ignored_ec);\n    return ec;\n  }\n\n  // We're done. Take ownership of the serial port descriptor.\n  if (descriptor_service_.assign(impl, fd, ec))\n  {\n    asio::error_code ignored_ec;\n    descriptor_ops::close(fd, state, ignored_ec);\n  }\n\n  return ec;\n}\n\nasio::error_code reactive_serial_port_service::do_set_option(\n    reactive_serial_port_service::implementation_type& impl,\n    reactive_serial_port_service::store_function_type store,\n    const void* option, asio::error_code& ec)\n{\n  termios ios;\n  errno = 0;\n  descriptor_ops::error_wrapper(::tcgetattr(\n        descriptor_service_.native_handle(impl), &ios), ec);\n  if (ec)\n    return ec;\n\n  if (store(option, ios, ec))\n    return ec;\n\n  errno = 0;\n  descriptor_ops::error_wrapper(::tcsetattr(\n        descriptor_service_.native_handle(impl), TCSANOW, &ios), ec);\n  return ec;\n}\n\nasio::error_code reactive_serial_port_service::do_get_option(\n    const reactive_serial_port_service::implementation_type& impl,\n    reactive_serial_port_service::load_function_type load,\n    void* option, asio::error_code& ec) const\n{\n  termios ios;\n  errno = 0;\n  descriptor_ops::error_wrapper(::tcgetattr(\n        descriptor_service_.native_handle(impl), &ios), ec);\n  if (ec)\n    return ec;\n\n  return load(option, ios, ec);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n\n#endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/reactive_socket_service_base.ipp",
    "content": "//\n// detail/reactive_socket_service_base.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP\n#define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_IOCP) \\\n  && !defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/reactive_socket_service_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nreactive_socket_service_base::reactive_socket_service_base(\n    execution_context& context)\n  : reactor_(use_service<reactor>(context))\n{\n  reactor_.init_task();\n}\n\nvoid reactive_socket_service_base::base_shutdown()\n{\n}\n\nvoid reactive_socket_service_base::construct(\n    reactive_socket_service_base::base_implementation_type& impl)\n{\n  impl.socket_ = invalid_socket;\n  impl.state_ = 0;\n}\n\nvoid reactive_socket_service_base::base_move_construct(\n    reactive_socket_service_base::base_implementation_type& impl,\n    reactive_socket_service_base::base_implementation_type& other_impl)\n  ASIO_NOEXCEPT\n{\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = invalid_socket;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  reactor_.move_descriptor(impl.socket_,\n      impl.reactor_data_, other_impl.reactor_data_);\n}\n\nvoid reactive_socket_service_base::base_move_assign(\n    reactive_socket_service_base::base_implementation_type& impl,\n    reactive_socket_service_base& other_service,\n    reactive_socket_service_base::base_implementation_type& other_impl)\n{\n  destroy(impl);\n\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = invalid_socket;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  other_service.reactor_.move_descriptor(impl.socket_,\n      impl.reactor_data_, other_impl.reactor_data_);\n}\n\nvoid reactive_socket_service_base::destroy(\n    reactive_socket_service_base::base_implementation_type& impl)\n{\n  if (impl.socket_ != invalid_socket)\n  {\n    ASIO_HANDLER_OPERATION((reactor_.context(),\n          \"socket\", &impl, impl.socket_, \"close\"));\n\n    reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,\n        (impl.state_ & socket_ops::possible_dup) == 0);\n\n    asio::error_code ignored_ec;\n    socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);\n\n    reactor_.cleanup_descriptor_data(impl.reactor_data_);\n  }\n}\n\nasio::error_code reactive_socket_service_base::close(\n    reactive_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((reactor_.context(),\n          \"socket\", &impl, impl.socket_, \"close\"));\n\n    reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,\n        (impl.state_ & socket_ops::possible_dup) == 0);\n\n    socket_ops::close(impl.socket_, impl.state_, false, ec);\n\n    reactor_.cleanup_descriptor_data(impl.reactor_data_);\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n\n  // The descriptor is closed by the OS even if close() returns an error.\n  //\n  // (Actually, POSIX says the state of the descriptor is unspecified. On\n  // Linux the descriptor is apparently closed anyway; e.g. see\n  //   http://lkml.org/lkml/2005/9/10/129\n  // We'll just have to assume that other OSes follow the same behaviour. The\n  // known exception is when Windows's closesocket() function fails with\n  // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().\n  construct(impl);\n\n  return ec;\n}\n\nsocket_type reactive_socket_service_base::release(\n    reactive_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return invalid_socket;\n  }\n\n  ASIO_HANDLER_OPERATION((reactor_.context(),\n        \"socket\", &impl, impl.socket_, \"release\"));\n\n  reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false);\n  reactor_.cleanup_descriptor_data(impl.reactor_data_);\n  socket_type sock = impl.socket_;\n  construct(impl);\n  ec = asio::error_code();\n  return sock;\n}\n\nasio::error_code reactive_socket_service_base::cancel(\n    reactive_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  ASIO_HANDLER_OPERATION((reactor_.context(),\n        \"socket\", &impl, impl.socket_, \"cancel\"));\n\n  reactor_.cancel_ops(impl.socket_, impl.reactor_data_);\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code reactive_socket_service_base::do_open(\n    reactive_socket_service_base::base_implementation_type& impl,\n    int af, int type, int protocol, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  socket_holder sock(socket_ops::socket(af, type, protocol, ec));\n  if (sock.get() == invalid_socket)\n    return ec;\n\n  if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_))\n  {\n    ec = asio::error_code(err,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  impl.socket_ = sock.release();\n  switch (type)\n  {\n  case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;\n  case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;\n  default: impl.state_ = 0; break;\n  }\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code reactive_socket_service_base::do_assign(\n    reactive_socket_service_base::base_implementation_type& impl, int type,\n    const reactive_socket_service_base::native_handle_type& native_socket,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  if (int err = reactor_.register_descriptor(\n        native_socket, impl.reactor_data_))\n  {\n    ec = asio::error_code(err,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  impl.socket_ = native_socket;\n  switch (type)\n  {\n  case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;\n  case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;\n  default: impl.state_ = 0; break;\n  }\n  impl.state_ |= socket_ops::possible_dup;\n  ec = asio::error_code();\n  return ec;\n}\n\nvoid reactive_socket_service_base::start_op(\n    reactive_socket_service_base::base_implementation_type& impl,\n    int op_type, reactor_op* op, bool is_continuation,\n    bool is_non_blocking, bool noop)\n{\n  if (!noop)\n  {\n    if ((impl.state_ & socket_ops::non_blocking)\n        || socket_ops::set_internal_non_blocking(\n          impl.socket_, impl.state_, true, op->ec_))\n    {\n      reactor_.start_op(op_type, impl.socket_,\n          impl.reactor_data_, op, is_continuation, is_non_blocking);\n      return;\n    }\n  }\n\n  reactor_.post_immediate_completion(op, is_continuation);\n}\n\nvoid reactive_socket_service_base::start_accept_op(\n    reactive_socket_service_base::base_implementation_type& impl,\n    reactor_op* op, bool is_continuation, bool peer_is_open)\n{\n  if (!peer_is_open)\n    start_op(impl, reactor::read_op, op, is_continuation, true, false);\n  else\n  {\n    op->ec_ = asio::error::already_open;\n    reactor_.post_immediate_completion(op, is_continuation);\n  }\n}\n\nvoid reactive_socket_service_base::start_connect_op(\n    reactive_socket_service_base::base_implementation_type& impl,\n    reactor_op* op, bool is_continuation,\n    const socket_addr_type* addr, size_t addrlen)\n{\n  if ((impl.state_ & socket_ops::non_blocking)\n      || socket_ops::set_internal_non_blocking(\n        impl.socket_, impl.state_, true, op->ec_))\n  {\n    if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)\n    {\n      if (op->ec_ == asio::error::in_progress\n          || op->ec_ == asio::error::would_block)\n      {\n        op->ec_ = asio::error_code();\n        reactor_.start_op(reactor::connect_op, impl.socket_,\n            impl.reactor_data_, op, is_continuation, false);\n        return;\n      }\n    }\n  }\n\n  reactor_.post_immediate_completion(op, is_continuation);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_IOCP)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/resolver_service_base.ipp",
    "content": "//\n// detail/impl/resolver_service_base.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP\n#define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/resolver_service_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass resolver_service_base::work_scheduler_runner\n{\npublic:\n  work_scheduler_runner(scheduler_impl& work_scheduler)\n    : work_scheduler_(work_scheduler)\n  {\n  }\n\n  void operator()()\n  {\n    asio::error_code ec;\n    work_scheduler_.run(ec);\n  }\n\nprivate:\n  scheduler_impl& work_scheduler_;\n};\n\nresolver_service_base::resolver_service_base(execution_context& context)\n  : scheduler_(asio::use_service<scheduler_impl>(context)),\n    work_scheduler_(new scheduler_impl(context, -1, false)),\n    work_thread_(0)\n{\n  work_scheduler_->work_started();\n}\n\nresolver_service_base::~resolver_service_base()\n{\n  base_shutdown();\n}\n\nvoid resolver_service_base::base_shutdown()\n{\n  if (work_scheduler_.get())\n  {\n    work_scheduler_->work_finished();\n    work_scheduler_->stop();\n    if (work_thread_.get())\n    {\n      work_thread_->join();\n      work_thread_.reset();\n    }\n    work_scheduler_.reset();\n  }\n}\n\nvoid resolver_service_base::base_notify_fork(\n    execution_context::fork_event fork_ev)\n{\n  if (work_thread_.get())\n  {\n    if (fork_ev == execution_context::fork_prepare)\n    {\n      work_scheduler_->stop();\n      work_thread_->join();\n      work_thread_.reset();\n    }\n    else\n    {\n      work_scheduler_->restart();\n      work_thread_.reset(new asio::detail::thread(\n            work_scheduler_runner(*work_scheduler_)));\n    }\n  }\n}\n\nvoid resolver_service_base::construct(\n    resolver_service_base::implementation_type& impl)\n{\n  impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());\n}\n\nvoid resolver_service_base::destroy(\n    resolver_service_base::implementation_type& impl)\n{\n  ASIO_HANDLER_OPERATION((scheduler_.context(),\n        \"resolver\", &impl, 0, \"cancel\"));\n\n  impl.reset();\n}\n\nvoid resolver_service_base::move_construct(implementation_type& impl,\n    implementation_type& other_impl)\n{\n  impl = ASIO_MOVE_CAST(implementation_type)(other_impl);\n}\n\nvoid resolver_service_base::move_assign(implementation_type& impl,\n    resolver_service_base&, implementation_type& other_impl)\n{\n  destroy(impl);\n  impl = ASIO_MOVE_CAST(implementation_type)(other_impl);\n}\n\nvoid resolver_service_base::cancel(\n    resolver_service_base::implementation_type& impl)\n{\n  ASIO_HANDLER_OPERATION((scheduler_.context(),\n        \"resolver\", &impl, 0, \"cancel\"));\n\n  impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());\n}\n\nvoid resolver_service_base::start_resolve_op(resolve_op* op)\n{\n  if (ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,\n        scheduler_.concurrency_hint()))\n  {\n    start_work_thread();\n    scheduler_.work_started();\n    work_scheduler_->post_immediate_completion(op, false);\n  }\n  else\n  {\n    op->ec_ = asio::error::operation_not_supported;\n    scheduler_.post_immediate_completion(op, false);\n  }\n}\n\nvoid resolver_service_base::start_work_thread()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  if (!work_thread_.get())\n  {\n    work_thread_.reset(new asio::detail::thread(\n          work_scheduler_runner(*work_scheduler_)));\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/scheduler.ipp",
    "content": "//\n// detail/impl/scheduler.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP\n#define ASIO_DETAIL_IMPL_SCHEDULER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/concurrency_hint.hpp\"\n#include \"asio/detail/event.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/reactor.hpp\"\n#include \"asio/detail/scheduler.hpp\"\n#include \"asio/detail/scheduler_thread_info.hpp\"\n#include \"asio/detail/signal_blocker.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass scheduler::thread_function\n{\npublic:\n  explicit thread_function(scheduler* s)\n    : this_(s)\n  {\n  }\n\n  void operator()()\n  {\n    asio::error_code ec;\n    this_->run(ec);\n  }\n\nprivate:\n  scheduler* this_;\n};\n\nstruct scheduler::task_cleanup\n{\n  ~task_cleanup()\n  {\n    if (this_thread_->private_outstanding_work > 0)\n    {\n      asio::detail::increment(\n          scheduler_->outstanding_work_,\n          this_thread_->private_outstanding_work);\n    }\n    this_thread_->private_outstanding_work = 0;\n\n    // Enqueue the completed operations and reinsert the task at the end of\n    // the operation queue.\n    lock_->lock();\n    scheduler_->task_interrupted_ = true;\n    scheduler_->op_queue_.push(this_thread_->private_op_queue);\n    scheduler_->op_queue_.push(&scheduler_->task_operation_);\n  }\n\n  scheduler* scheduler_;\n  mutex::scoped_lock* lock_;\n  thread_info* this_thread_;\n};\n\nstruct scheduler::work_cleanup\n{\n  ~work_cleanup()\n  {\n    if (this_thread_->private_outstanding_work > 1)\n    {\n      asio::detail::increment(\n          scheduler_->outstanding_work_,\n          this_thread_->private_outstanding_work - 1);\n    }\n    else if (this_thread_->private_outstanding_work < 1)\n    {\n      scheduler_->work_finished();\n    }\n    this_thread_->private_outstanding_work = 0;\n\n#if defined(ASIO_HAS_THREADS)\n    if (!this_thread_->private_op_queue.empty())\n    {\n      lock_->lock();\n      scheduler_->op_queue_.push(this_thread_->private_op_queue);\n    }\n#endif // defined(ASIO_HAS_THREADS)\n  }\n\n  scheduler* scheduler_;\n  mutex::scoped_lock* lock_;\n  thread_info* this_thread_;\n};\n\nscheduler::scheduler(asio::execution_context& ctx,\n    int concurrency_hint, bool own_thread)\n  : asio::detail::execution_context_service_base<scheduler>(ctx),\n    one_thread_(concurrency_hint == 1\n        || !ASIO_CONCURRENCY_HINT_IS_LOCKING(\n          SCHEDULER, concurrency_hint)\n        || !ASIO_CONCURRENCY_HINT_IS_LOCKING(\n          REACTOR_IO, concurrency_hint)),\n    mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(\n          SCHEDULER, concurrency_hint)),\n    task_(0),\n    task_interrupted_(true),\n    outstanding_work_(0),\n    stopped_(false),\n    shutdown_(false),\n    concurrency_hint_(concurrency_hint),\n    thread_(0)\n{\n  ASIO_HANDLER_TRACKING_INIT;\n\n  if (own_thread)\n  {\n    ++outstanding_work_;\n    asio::detail::signal_blocker sb;\n    thread_ = new asio::detail::thread(thread_function(this));\n  }\n}\n\nscheduler::~scheduler()\n{\n  if (thread_)\n  {\n    thread_->join();\n    delete thread_;\n  }\n}\n\nvoid scheduler::shutdown()\n{\n  mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n  if (thread_)\n    stop_all_threads(lock);\n  lock.unlock();\n\n  // Join thread to ensure task operation is returned to queue.\n  if (thread_)\n  {\n    thread_->join();\n    delete thread_;\n    thread_ = 0;\n  }\n\n  // Destroy handler objects.\n  while (!op_queue_.empty())\n  {\n    operation* o = op_queue_.front();\n    op_queue_.pop();\n    if (o != &task_operation_)\n      o->destroy();\n  }\n\n  // Reset to initial state.\n  task_ = 0;\n}\n\nvoid scheduler::init_task()\n{\n  mutex::scoped_lock lock(mutex_);\n  if (!shutdown_ && !task_)\n  {\n    task_ = &use_service<reactor>(this->context());\n    op_queue_.push(&task_operation_);\n    wake_one_thread_and_unlock(lock);\n  }\n}\n\nstd::size_t scheduler::run(asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (outstanding_work_ == 0)\n  {\n    stop();\n    return 0;\n  }\n\n  thread_info this_thread;\n  this_thread.private_outstanding_work = 0;\n  thread_call_stack::context ctx(this, this_thread);\n\n  mutex::scoped_lock lock(mutex_);\n\n  std::size_t n = 0;\n  for (; do_run_one(lock, this_thread, ec); lock.lock())\n    if (n != (std::numeric_limits<std::size_t>::max)())\n      ++n;\n  return n;\n}\n\nstd::size_t scheduler::run_one(asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (outstanding_work_ == 0)\n  {\n    stop();\n    return 0;\n  }\n\n  thread_info this_thread;\n  this_thread.private_outstanding_work = 0;\n  thread_call_stack::context ctx(this, this_thread);\n\n  mutex::scoped_lock lock(mutex_);\n\n  return do_run_one(lock, this_thread, ec);\n}\n\nstd::size_t scheduler::wait_one(long usec, asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (outstanding_work_ == 0)\n  {\n    stop();\n    return 0;\n  }\n\n  thread_info this_thread;\n  this_thread.private_outstanding_work = 0;\n  thread_call_stack::context ctx(this, this_thread);\n\n  mutex::scoped_lock lock(mutex_);\n\n  return do_wait_one(lock, this_thread, usec, ec);\n}\n\nstd::size_t scheduler::poll(asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (outstanding_work_ == 0)\n  {\n    stop();\n    return 0;\n  }\n\n  thread_info this_thread;\n  this_thread.private_outstanding_work = 0;\n  thread_call_stack::context ctx(this, this_thread);\n\n  mutex::scoped_lock lock(mutex_);\n\n#if defined(ASIO_HAS_THREADS)\n  // We want to support nested calls to poll() and poll_one(), so any handlers\n  // that are already on a thread-private queue need to be put on to the main\n  // queue now.\n  if (one_thread_)\n    if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))\n      op_queue_.push(outer_info->private_op_queue);\n#endif // defined(ASIO_HAS_THREADS)\n\n  std::size_t n = 0;\n  for (; do_poll_one(lock, this_thread, ec); lock.lock())\n    if (n != (std::numeric_limits<std::size_t>::max)())\n      ++n;\n  return n;\n}\n\nstd::size_t scheduler::poll_one(asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (outstanding_work_ == 0)\n  {\n    stop();\n    return 0;\n  }\n\n  thread_info this_thread;\n  this_thread.private_outstanding_work = 0;\n  thread_call_stack::context ctx(this, this_thread);\n\n  mutex::scoped_lock lock(mutex_);\n\n#if defined(ASIO_HAS_THREADS)\n  // We want to support nested calls to poll() and poll_one(), so any handlers\n  // that are already on a thread-private queue need to be put on to the main\n  // queue now.\n  if (one_thread_)\n    if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))\n      op_queue_.push(outer_info->private_op_queue);\n#endif // defined(ASIO_HAS_THREADS)\n\n  return do_poll_one(lock, this_thread, ec);\n}\n\nvoid scheduler::stop()\n{\n  mutex::scoped_lock lock(mutex_);\n  stop_all_threads(lock);\n}\n\nbool scheduler::stopped() const\n{\n  mutex::scoped_lock lock(mutex_);\n  return stopped_;\n}\n\nvoid scheduler::restart()\n{\n  mutex::scoped_lock lock(mutex_);\n  stopped_ = false;\n}\n\nvoid scheduler::compensating_work_started()\n{\n  thread_info_base* this_thread = thread_call_stack::contains(this);\n  ++static_cast<thread_info*>(this_thread)->private_outstanding_work;\n}\n\nvoid scheduler::post_immediate_completion(\n    scheduler::operation* op, bool is_continuation)\n{\n#if defined(ASIO_HAS_THREADS)\n  if (one_thread_ || is_continuation)\n  {\n    if (thread_info_base* this_thread = thread_call_stack::contains(this))\n    {\n      ++static_cast<thread_info*>(this_thread)->private_outstanding_work;\n      static_cast<thread_info*>(this_thread)->private_op_queue.push(op);\n      return;\n    }\n  }\n#else // defined(ASIO_HAS_THREADS)\n  (void)is_continuation;\n#endif // defined(ASIO_HAS_THREADS)\n\n  work_started();\n  mutex::scoped_lock lock(mutex_);\n  op_queue_.push(op);\n  wake_one_thread_and_unlock(lock);\n}\n\nvoid scheduler::post_deferred_completion(scheduler::operation* op)\n{\n#if defined(ASIO_HAS_THREADS)\n  if (one_thread_)\n  {\n    if (thread_info_base* this_thread = thread_call_stack::contains(this))\n    {\n      static_cast<thread_info*>(this_thread)->private_op_queue.push(op);\n      return;\n    }\n  }\n#endif // defined(ASIO_HAS_THREADS)\n\n  mutex::scoped_lock lock(mutex_);\n  op_queue_.push(op);\n  wake_one_thread_and_unlock(lock);\n}\n\nvoid scheduler::post_deferred_completions(\n    op_queue<scheduler::operation>& ops)\n{\n  if (!ops.empty())\n  {\n#if defined(ASIO_HAS_THREADS)\n    if (one_thread_)\n    {\n      if (thread_info_base* this_thread = thread_call_stack::contains(this))\n      {\n        static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);\n        return;\n      }\n    }\n#endif // defined(ASIO_HAS_THREADS)\n\n    mutex::scoped_lock lock(mutex_);\n    op_queue_.push(ops);\n    wake_one_thread_and_unlock(lock);\n  }\n}\n\nvoid scheduler::do_dispatch(\n    scheduler::operation* op)\n{\n  work_started();\n  mutex::scoped_lock lock(mutex_);\n  op_queue_.push(op);\n  wake_one_thread_and_unlock(lock);\n}\n\nvoid scheduler::abandon_operations(\n    op_queue<scheduler::operation>& ops)\n{\n  op_queue<scheduler::operation> ops2;\n  ops2.push(ops);\n}\n\nstd::size_t scheduler::do_run_one(mutex::scoped_lock& lock,\n    scheduler::thread_info& this_thread,\n    const asio::error_code& ec)\n{\n  while (!stopped_)\n  {\n    if (!op_queue_.empty())\n    {\n      // Prepare to execute first handler from queue.\n      operation* o = op_queue_.front();\n      op_queue_.pop();\n      bool more_handlers = (!op_queue_.empty());\n\n      if (o == &task_operation_)\n      {\n        task_interrupted_ = more_handlers;\n\n        if (more_handlers && !one_thread_)\n          wakeup_event_.unlock_and_signal_one(lock);\n        else\n          lock.unlock();\n\n        task_cleanup on_exit = { this, &lock, &this_thread };\n        (void)on_exit;\n\n        // Run the task. May throw an exception. Only block if the operation\n        // queue is empty and we're not polling, otherwise we want to return\n        // as soon as possible.\n        task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);\n      }\n      else\n      {\n        std::size_t task_result = o->task_result_;\n\n        if (more_handlers && !one_thread_)\n          wake_one_thread_and_unlock(lock);\n        else\n          lock.unlock();\n\n        // Ensure the count of outstanding work is decremented on block exit.\n        work_cleanup on_exit = { this, &lock, &this_thread };\n        (void)on_exit;\n\n        // Complete the operation. May throw an exception. Deletes the object.\n        o->complete(this, ec, task_result);\n\n        return 1;\n      }\n    }\n    else\n    {\n      wakeup_event_.clear(lock);\n      wakeup_event_.wait(lock);\n    }\n  }\n\n  return 0;\n}\n\nstd::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,\n    scheduler::thread_info& this_thread, long usec,\n    const asio::error_code& ec)\n{\n  if (stopped_)\n    return 0;\n\n  operation* o = op_queue_.front();\n  if (o == 0)\n  {\n    wakeup_event_.clear(lock);\n    wakeup_event_.wait_for_usec(lock, usec);\n    usec = 0; // Wait at most once.\n    o = op_queue_.front();\n  }\n\n  if (o == &task_operation_)\n  {\n    op_queue_.pop();\n    bool more_handlers = (!op_queue_.empty());\n\n    task_interrupted_ = more_handlers;\n\n    if (more_handlers && !one_thread_)\n      wakeup_event_.unlock_and_signal_one(lock);\n    else\n      lock.unlock();\n\n    {\n      task_cleanup on_exit = { this, &lock, &this_thread };\n      (void)on_exit;\n\n      // Run the task. May throw an exception. Only block if the operation\n      // queue is empty and we're not polling, otherwise we want to return\n      // as soon as possible.\n      task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);\n    }\n\n    o = op_queue_.front();\n    if (o == &task_operation_)\n    {\n      if (!one_thread_)\n        wakeup_event_.maybe_unlock_and_signal_one(lock);\n      return 0;\n    }\n  }\n\n  if (o == 0)\n    return 0;\n\n  op_queue_.pop();\n  bool more_handlers = (!op_queue_.empty());\n\n  std::size_t task_result = o->task_result_;\n\n  if (more_handlers && !one_thread_)\n    wake_one_thread_and_unlock(lock);\n  else\n    lock.unlock();\n\n  // Ensure the count of outstanding work is decremented on block exit.\n  work_cleanup on_exit = { this, &lock, &this_thread };\n  (void)on_exit;\n\n  // Complete the operation. May throw an exception. Deletes the object.\n  o->complete(this, ec, task_result);\n\n  return 1;\n}\n\nstd::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,\n    scheduler::thread_info& this_thread,\n    const asio::error_code& ec)\n{\n  if (stopped_)\n    return 0;\n\n  operation* o = op_queue_.front();\n  if (o == &task_operation_)\n  {\n    op_queue_.pop();\n    lock.unlock();\n\n    {\n      task_cleanup c = { this, &lock, &this_thread };\n      (void)c;\n\n      // Run the task. May throw an exception. Only block if the operation\n      // queue is empty and we're not polling, otherwise we want to return\n      // as soon as possible.\n      task_->run(0, this_thread.private_op_queue);\n    }\n\n    o = op_queue_.front();\n    if (o == &task_operation_)\n    {\n      wakeup_event_.maybe_unlock_and_signal_one(lock);\n      return 0;\n    }\n  }\n\n  if (o == 0)\n    return 0;\n\n  op_queue_.pop();\n  bool more_handlers = (!op_queue_.empty());\n\n  std::size_t task_result = o->task_result_;\n\n  if (more_handlers && !one_thread_)\n    wake_one_thread_and_unlock(lock);\n  else\n    lock.unlock();\n\n  // Ensure the count of outstanding work is decremented on block exit.\n  work_cleanup on_exit = { this, &lock, &this_thread };\n  (void)on_exit;\n\n  // Complete the operation. May throw an exception. Deletes the object.\n  o->complete(this, ec, task_result);\n\n  return 1;\n}\n\nvoid scheduler::stop_all_threads(\n    mutex::scoped_lock& lock)\n{\n  stopped_ = true;\n  wakeup_event_.signal_all(lock);\n\n  if (!task_interrupted_ && task_)\n  {\n    task_interrupted_ = true;\n    task_->interrupt();\n  }\n}\n\nvoid scheduler::wake_one_thread_and_unlock(\n    mutex::scoped_lock& lock)\n{\n  if (!wakeup_event_.maybe_unlock_and_signal_one(lock))\n  {\n    if (!task_interrupted_ && task_)\n    {\n      task_interrupted_ = true;\n      task_->interrupt();\n    }\n    lock.unlock();\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/select_reactor.hpp",
    "content": "//\n// detail/impl/select_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP\n#define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) \\\n  || (!defined(ASIO_HAS_DEV_POLL) \\\n      && !defined(ASIO_HAS_EPOLL) \\\n      && !defined(ASIO_HAS_KQUEUE) \\\n      && !defined(ASIO_WINDOWS_RUNTIME))\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid select_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\n// Remove a timer queue from the reactor.\ntemplate <typename Time_Traits>\nvoid select_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    scheduler_.post_immediate_completion(op, false);\n    return;\n  }\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  scheduler_.work_started();\n  if (earliest)\n    interrupter_.interrupt();\n}\n\ntemplate <typename Time_Traits>\nstd::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid select_reactor::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& target,\n    typename timer_queue<Time_Traits>::per_timer_data& source)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(target, ops);\n  queue.move_timer(target, source);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n       //   || (!defined(ASIO_HAS_DEV_POLL)\n       //       && !defined(ASIO_HAS_EPOLL)\n       //       && !defined(ASIO_HAS_KQUEUE)\n       //       && !defined(ASIO_WINDOWS_RUNTIME))\n\n#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/select_reactor.ipp",
    "content": "//\n// detail/impl/select_reactor.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP\n#define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) \\\n  || (!defined(ASIO_HAS_DEV_POLL) \\\n      && !defined(ASIO_HAS_EPOLL) \\\n      && !defined(ASIO_HAS_KQUEUE) \\\n      && !defined(ASIO_WINDOWS_RUNTIME))\n\n#include \"asio/detail/fd_set_adapter.hpp\"\n#include \"asio/detail/select_reactor.hpp\"\n#include \"asio/detail/signal_blocker.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_IOCP)\nclass select_reactor::thread_function\n{\npublic:\n  explicit thread_function(select_reactor* r)\n    : this_(r)\n  {\n  }\n\n  void operator()()\n  {\n    this_->run_thread();\n  }\n\nprivate:\n  select_reactor* this_;\n};\n#endif // defined(ASIO_HAS_IOCP)\n\nselect_reactor::select_reactor(asio::execution_context& ctx)\n  : execution_context_service_base<select_reactor>(ctx),\n    scheduler_(use_service<scheduler_type>(ctx)),\n    mutex_(),\n    interrupter_(),\n#if defined(ASIO_HAS_IOCP)\n    stop_thread_(false),\n    thread_(0),\n#endif // defined(ASIO_HAS_IOCP)\n    shutdown_(false)\n{\n#if defined(ASIO_HAS_IOCP)\n  asio::detail::signal_blocker sb;\n  thread_ = new asio::detail::thread(thread_function(this));\n#endif // defined(ASIO_HAS_IOCP)\n}\n\nselect_reactor::~select_reactor()\n{\n  shutdown();\n}\n\nvoid select_reactor::shutdown()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n#if defined(ASIO_HAS_IOCP)\n  stop_thread_ = true;\n#endif // defined(ASIO_HAS_IOCP)\n  lock.unlock();\n\n#if defined(ASIO_HAS_IOCP)\n  if (thread_)\n  {\n    interrupter_.interrupt();\n    thread_->join();\n    delete thread_;\n    thread_ = 0;\n  }\n#endif // defined(ASIO_HAS_IOCP)\n\n  op_queue<operation> ops;\n\n  for (int i = 0; i < max_ops; ++i)\n    op_queue_[i].get_all_operations(ops);\n\n  timer_queues_.get_all_timers(ops);\n\n  scheduler_.abandon_operations(ops);\n}\n\nvoid select_reactor::notify_fork(\n    asio::execution_context::fork_event fork_ev)\n{\n  if (fork_ev == asio::execution_context::fork_child)\n    interrupter_.recreate();\n}\n\nvoid select_reactor::init_task()\n{\n  scheduler_.init_task();\n}\n\nint select_reactor::register_descriptor(socket_type,\n    select_reactor::per_descriptor_data&)\n{\n  return 0;\n}\n\nint select_reactor::register_internal_descriptor(\n    int op_type, socket_type descriptor,\n    select_reactor::per_descriptor_data&, reactor_op* op)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  op_queue_[op_type].enqueue_operation(descriptor, op);\n  interrupter_.interrupt();\n\n  return 0;\n}\n\nvoid select_reactor::move_descriptor(socket_type,\n    select_reactor::per_descriptor_data&,\n    select_reactor::per_descriptor_data&)\n{\n}\n\nvoid select_reactor::start_op(int op_type, socket_type descriptor,\n    select_reactor::per_descriptor_data&, reactor_op* op,\n    bool is_continuation, bool)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  bool first = op_queue_[op_type].enqueue_operation(descriptor, op);\n  scheduler_.work_started();\n  if (first)\n    interrupter_.interrupt();\n}\n\nvoid select_reactor::cancel_ops(socket_type descriptor,\n    select_reactor::per_descriptor_data&)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  cancel_ops_unlocked(descriptor, asio::error::operation_aborted);\n}\n\nvoid select_reactor::deregister_descriptor(socket_type descriptor,\n    select_reactor::per_descriptor_data&, bool)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  cancel_ops_unlocked(descriptor, asio::error::operation_aborted);\n}\n\nvoid select_reactor::deregister_internal_descriptor(\n    socket_type descriptor, select_reactor::per_descriptor_data&)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  for (int i = 0; i < max_ops; ++i)\n    op_queue_[i].cancel_operations(descriptor, ops);\n}\n\nvoid select_reactor::cleanup_descriptor_data(\n    select_reactor::per_descriptor_data&)\n{\n}\n\nvoid select_reactor::run(long usec, op_queue<operation>& ops)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n#if defined(ASIO_HAS_IOCP)\n  // Check if the thread is supposed to stop.\n  if (stop_thread_)\n    return;\n#endif // defined(ASIO_HAS_IOCP)\n\n  // Set up the descriptor sets.\n  for (int i = 0; i < max_select_ops; ++i)\n    fd_sets_[i].reset();\n  fd_sets_[read_op].set(interrupter_.read_descriptor());\n  socket_type max_fd = 0;\n  bool have_work_to_do = !timer_queues_.all_empty();\n  for (int i = 0; i < max_select_ops; ++i)\n  {\n    have_work_to_do = have_work_to_do || !op_queue_[i].empty();\n    fd_sets_[i].set(op_queue_[i], ops);\n    if (fd_sets_[i].max_descriptor() > max_fd)\n      max_fd = fd_sets_[i].max_descriptor();\n  }\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  // Connection operations on Windows use both except and write fd_sets.\n  have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();\n  fd_sets_[write_op].set(op_queue_[connect_op], ops);\n  if (fd_sets_[write_op].max_descriptor() > max_fd)\n    max_fd = fd_sets_[write_op].max_descriptor();\n  fd_sets_[except_op].set(op_queue_[connect_op], ops);\n  if (fd_sets_[except_op].max_descriptor() > max_fd)\n    max_fd = fd_sets_[except_op].max_descriptor();\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n  // We can return immediately if there's no work to do and the reactor is\n  // not supposed to block.\n  if (!usec && !have_work_to_do)\n    return;\n\n  // Determine how long to block while waiting for events.\n  timeval tv_buf = { 0, 0 };\n  timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf;\n\n  lock.unlock();\n\n  // Block on the select call until descriptors become ready.\n  asio::error_code ec;\n  int retval = socket_ops::select(static_cast<int>(max_fd + 1),\n      fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);\n\n  // Reset the interrupter.\n  if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))\n  {\n    interrupter_.reset();\n    --retval;\n  }\n\n  lock.lock();\n\n  // Dispatch all ready operations.\n  if (retval > 0)\n  {\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    // Connection operations on Windows use both except and write fd_sets.\n    fd_sets_[except_op].perform(op_queue_[connect_op], ops);\n    fd_sets_[write_op].perform(op_queue_[connect_op], ops);\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n    // Exception operations must be processed first to ensure that any\n    // out-of-band data is read before normal data.\n    for (int i = max_select_ops - 1; i >= 0; --i)\n      fd_sets_[i].perform(op_queue_[i], ops);\n  }\n  timer_queues_.get_ready_timers(ops);\n}\n\nvoid select_reactor::interrupt()\n{\n  interrupter_.interrupt();\n}\n\n#if defined(ASIO_HAS_IOCP)\nvoid select_reactor::run_thread()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  while (!stop_thread_)\n  {\n    lock.unlock();\n    op_queue<operation> ops;\n    run(true, ops);\n    scheduler_.post_deferred_completions(ops);\n    lock.lock();\n  }\n}\n#endif // defined(ASIO_HAS_IOCP)\n\nvoid select_reactor::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.insert(&queue);\n}\n\nvoid select_reactor::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.erase(&queue);\n}\n\ntimeval* select_reactor::get_timeout(long usec, timeval& tv)\n{\n  // By default we will wait no longer than 5 minutes. This will ensure that\n  // any changes to the system clock are detected after no longer than this.\n  const long max_usec = 5 * 60 * 1000 * 1000;\n  usec = timer_queues_.wait_duration_usec(\n      (usec < 0 || max_usec < usec) ? max_usec : usec);\n  tv.tv_sec = usec / 1000000;\n  tv.tv_usec = usec % 1000000;\n  return &tv;\n}\n\nvoid select_reactor::cancel_ops_unlocked(socket_type descriptor,\n    const asio::error_code& ec)\n{\n  bool need_interrupt = false;\n  op_queue<operation> ops;\n  for (int i = 0; i < max_ops; ++i)\n    need_interrupt = op_queue_[i].cancel_operations(\n        descriptor, ops, ec) || need_interrupt;\n  scheduler_.post_deferred_completions(ops);\n  if (need_interrupt)\n    interrupter_.interrupt();\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n       //   || (!defined(ASIO_HAS_DEV_POLL)\n       //       && !defined(ASIO_HAS_EPOLL)\n       //       && !defined(ASIO_HAS_KQUEUE))\n       //       && !defined(ASIO_WINDOWS_RUNTIME))\n\n#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/service_registry.hpp",
    "content": "//\n// detail/impl/service_registry.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP\n#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Service>\nService& service_registry::use_service()\n{\n  execution_context::service::key key;\n  init_key<Service>(key, 0);\n  factory_type factory = &service_registry::create<Service, execution_context>;\n  return *static_cast<Service*>(do_use_service(key, factory, &owner_));\n}\n\ntemplate <typename Service>\nService& service_registry::use_service(io_context& owner)\n{\n  execution_context::service::key key;\n  init_key<Service>(key, 0);\n  factory_type factory = &service_registry::create<Service, io_context>;\n  return *static_cast<Service*>(do_use_service(key, factory, &owner));\n}\n\ntemplate <typename Service>\nvoid service_registry::add_service(Service* new_service)\n{\n  execution_context::service::key key;\n  init_key<Service>(key, 0);\n  return do_add_service(key, new_service);\n}\n\ntemplate <typename Service>\nbool service_registry::has_service() const\n{\n  execution_context::service::key key;\n  init_key<Service>(key, 0);\n  return do_has_service(key);\n}\n\ntemplate <typename Service>\ninline void service_registry::init_key(\n    execution_context::service::key& key, ...)\n{\n  init_key_from_id(key, Service::id);\n}\n\n#if !defined(ASIO_NO_TYPEID)\ntemplate <typename Service>\nvoid service_registry::init_key(execution_context::service::key& key,\n    typename enable_if<\n      is_base_of<typename Service::key_type, Service>::value>::type*)\n{\n  key.type_info_ = &typeid(typeid_wrapper<Service>);\n  key.id_ = 0;\n}\n\ntemplate <typename Service>\nvoid service_registry::init_key_from_id(execution_context::service::key& key,\n    const service_id<Service>& /*id*/)\n{\n  key.type_info_ = &typeid(typeid_wrapper<Service>);\n  key.id_ = 0;\n}\n#endif // !defined(ASIO_NO_TYPEID)\n\ntemplate <typename Service, typename Owner>\nexecution_context::service* service_registry::create(void* owner)\n{\n  return new Service(*static_cast<Owner*>(owner));\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/service_registry.ipp",
    "content": "//\n// detail/impl/service_registry.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP\n#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <vector>\n#include \"asio/detail/service_registry.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nservice_registry::service_registry(execution_context& owner)\n  : owner_(owner),\n    first_service_(0)\n{\n}\n\nservice_registry::~service_registry()\n{\n}\n\nvoid service_registry::shutdown_services()\n{\n  execution_context::service* service = first_service_;\n  while (service)\n  {\n    service->shutdown();\n    service = service->next_;\n  }\n}\n\nvoid service_registry::destroy_services()\n{\n  while (first_service_)\n  {\n    execution_context::service* next_service = first_service_->next_;\n    destroy(first_service_);\n    first_service_ = next_service;\n  }\n}\n\nvoid service_registry::notify_fork(execution_context::fork_event fork_ev)\n{\n  // Make a copy of all of the services while holding the lock. We don't want\n  // to hold the lock while calling into each service, as it may try to call\n  // back into this class.\n  std::vector<execution_context::service*> services;\n  {\n    asio::detail::mutex::scoped_lock lock(mutex_);\n    execution_context::service* service = first_service_;\n    while (service)\n    {\n      services.push_back(service);\n      service = service->next_;\n    }\n  }\n\n  // If processing the fork_prepare event, we want to go in reverse order of\n  // service registration, which happens to be the existing order of the\n  // services in the vector. For the other events we want to go in the other\n  // direction.\n  std::size_t num_services = services.size();\n  if (fork_ev == execution_context::fork_prepare)\n    for (std::size_t i = 0; i < num_services; ++i)\n      services[i]->notify_fork(fork_ev);\n  else\n    for (std::size_t i = num_services; i > 0; --i)\n      services[i - 1]->notify_fork(fork_ev);\n}\n\nvoid service_registry::init_key_from_id(execution_context::service::key& key,\n    const execution_context::id& id)\n{\n  key.type_info_ = 0;\n  key.id_ = &id;\n}\n\nbool service_registry::keys_match(\n    const execution_context::service::key& key1,\n    const execution_context::service::key& key2)\n{\n  if (key1.id_ && key2.id_)\n    if (key1.id_ == key2.id_)\n      return true;\n  if (key1.type_info_ && key2.type_info_)\n    if (*key1.type_info_ == *key2.type_info_)\n      return true;\n  return false;\n}\n\nvoid service_registry::destroy(execution_context::service* service)\n{\n  delete service;\n}\n\nexecution_context::service* service_registry::do_use_service(\n    const execution_context::service::key& key,\n    factory_type factory, void* owner)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // First see if there is an existing service object with the given key.\n  execution_context::service* service = first_service_;\n  while (service)\n  {\n    if (keys_match(service->key_, key))\n      return service;\n    service = service->next_;\n  }\n\n  // Create a new service object. The service registry's mutex is not locked\n  // at this time to allow for nested calls into this function from the new\n  // service's constructor.\n  lock.unlock();\n  auto_service_ptr new_service = { factory(owner) };\n  new_service.ptr_->key_ = key;\n  lock.lock();\n\n  // Check that nobody else created another service object of the same type\n  // while the lock was released.\n  service = first_service_;\n  while (service)\n  {\n    if (keys_match(service->key_, key))\n      return service;\n    service = service->next_;\n  }\n\n  // Service was successfully initialised, pass ownership to registry.\n  new_service.ptr_->next_ = first_service_;\n  first_service_ = new_service.ptr_;\n  new_service.ptr_ = 0;\n  return first_service_;\n}\n\nvoid service_registry::do_add_service(\n    const execution_context::service::key& key,\n    execution_context::service* new_service)\n{\n  if (&owner_ != &new_service->context())\n    asio::detail::throw_exception(invalid_service_owner());\n\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // Check if there is an existing service object with the given key.\n  execution_context::service* service = first_service_;\n  while (service)\n  {\n    if (keys_match(service->key_, key))\n      asio::detail::throw_exception(service_already_exists());\n    service = service->next_;\n  }\n\n  // Take ownership of the service object.\n  new_service->key_ = key;\n  new_service->next_ = first_service_;\n  first_service_ = new_service;\n}\n\nbool service_registry::do_has_service(\n    const execution_context::service::key& key) const\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  execution_context::service* service = first_service_;\n  while (service)\n  {\n    if (keys_match(service->key_, key))\n      return true;\n    service = service->next_;\n  }\n\n  return false;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/signal_set_service.ipp",
    "content": "//\n// detail/impl/signal_set_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstring>\n#include <stdexcept>\n#include \"asio/detail/reactor.hpp\"\n#include \"asio/detail/signal_blocker.hpp\"\n#include \"asio/detail/signal_set_service.hpp\"\n#include \"asio/detail/static_mutex.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct signal_state\n{\n  // Mutex used for protecting global state.\n  static_mutex mutex_;\n\n  // The read end of the pipe used for signal notifications.\n  int read_descriptor_;\n\n  // The write end of the pipe used for signal notifications.\n  int write_descriptor_;\n\n  // Whether the signal state has been prepared for a fork.\n  bool fork_prepared_;\n\n  // The head of a linked list of all signal_set_service instances.\n  class signal_set_service* service_list_;\n\n  // A count of the number of objects that are registered for each signal.\n  std::size_t registration_count_[max_signal_number];\n};\n\nsignal_state* get_signal_state()\n{\n  static signal_state state = {\n    ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } };\n  return &state;\n}\n\nvoid asio_signal_handler(int signal_number)\n{\n#if defined(ASIO_WINDOWS) \\\n  || defined(ASIO_WINDOWS_RUNTIME) \\\n  || defined(__CYGWIN__)\n  signal_set_service::deliver_signal(signal_number);\n#else // defined(ASIO_WINDOWS)\n      //   || defined(ASIO_WINDOWS_RUNTIME)\n      //   || defined(__CYGWIN__)\n  int saved_errno = errno;\n  signal_state* state = get_signal_state();\n  signed_size_type result = ::write(state->write_descriptor_,\n      &signal_number, sizeof(signal_number));\n  (void)result;\n  errno = saved_errno;\n#endif // defined(ASIO_WINDOWS)\n       //   || defined(ASIO_WINDOWS_RUNTIME)\n       //   || defined(__CYGWIN__)\n\n#if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)\n  ::signal(signal_number, asio_signal_handler);\n#endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)\n}\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\nclass signal_set_service::pipe_read_op : public reactor_op\n{\npublic:\n  pipe_read_op()\n    : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete)\n  {\n  }\n\n  static status do_perform(reactor_op*)\n  {\n    signal_state* state = get_signal_state();\n\n    int fd = state->read_descriptor_;\n    int signal_number = 0;\n    while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))\n      if (signal_number >= 0 && signal_number < max_signal_number)\n        signal_set_service::deliver_signal(signal_number);\n\n    return not_done;\n  }\n\n  static void do_complete(void* /*owner*/, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    pipe_read_op* o(static_cast<pipe_read_op*>(base));\n    delete o;\n  }\n};\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\nsignal_set_service::signal_set_service(execution_context& context)\n  : execution_context_service_base<signal_set_service>(context),\n    scheduler_(asio::use_service<scheduler_impl>(context)),\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n    reactor_(asio::use_service<reactor>(context)),\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n    next_(0),\n    prev_(0)\n{\n  get_signal_state()->mutex_.init();\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  reactor_.init_task();\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n  for (int i = 0; i < max_signal_number; ++i)\n    registrations_[i] = 0;\n\n  add_service(this);\n}\n\nsignal_set_service::~signal_set_service()\n{\n  remove_service(this);\n}\n\nvoid signal_set_service::shutdown()\n{\n  remove_service(this);\n\n  op_queue<operation> ops;\n\n  for (int i = 0; i < max_signal_number; ++i)\n  {\n    registration* reg = registrations_[i];\n    while (reg)\n    {\n      ops.push(*reg->queue_);\n      reg = reg->next_in_table_;\n    }\n  }\n\n  scheduler_.abandon_operations(ops);\n}\n\nvoid signal_set_service::notify_fork(execution_context::fork_event fork_ev)\n{\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  switch (fork_ev)\n  {\n  case execution_context::fork_prepare:\n    {\n      int read_descriptor = state->read_descriptor_;\n      state->fork_prepared_ = true;\n      lock.unlock();\n      reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_);\n      reactor_.cleanup_descriptor_data(reactor_data_);\n    }\n    break;\n  case execution_context::fork_parent:\n    if (state->fork_prepared_)\n    {\n      int read_descriptor = state->read_descriptor_;\n      state->fork_prepared_ = false;\n      lock.unlock();\n      reactor_.register_internal_descriptor(reactor::read_op,\n          read_descriptor, reactor_data_, new pipe_read_op);\n    }\n    break;\n  case execution_context::fork_child:\n    if (state->fork_prepared_)\n    {\n      asio::detail::signal_blocker blocker;\n      close_descriptors();\n      open_descriptors();\n      int read_descriptor = state->read_descriptor_;\n      state->fork_prepared_ = false;\n      lock.unlock();\n      reactor_.register_internal_descriptor(reactor::read_op,\n          read_descriptor, reactor_data_, new pipe_read_op);\n    }\n    break;\n  default:\n    break;\n  }\n#else // !defined(ASIO_WINDOWS)\n      //   && !defined(ASIO_WINDOWS_RUNTIME)\n      //   && !defined(__CYGWIN__)\n  (void)fork_ev;\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n}\n\nvoid signal_set_service::construct(\n    signal_set_service::implementation_type& impl)\n{\n  impl.signals_ = 0;\n}\n\nvoid signal_set_service::destroy(\n    signal_set_service::implementation_type& impl)\n{\n  asio::error_code ignored_ec;\n  clear(impl, ignored_ec);\n  cancel(impl, ignored_ec);\n}\n\nasio::error_code signal_set_service::add(\n    signal_set_service::implementation_type& impl,\n    int signal_number, asio::error_code& ec)\n{\n  // Check that the signal number is valid.\n  if (signal_number < 0 || signal_number >= max_signal_number)\n  {\n    ec = asio::error::invalid_argument;\n    return ec;\n  }\n\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  // Find the appropriate place to insert the registration.\n  registration** insertion_point = &impl.signals_;\n  registration* next = impl.signals_;\n  while (next && next->signal_number_ < signal_number)\n  {\n    insertion_point = &next->next_in_set_;\n    next = next->next_in_set_;\n  }\n\n  // Only do something if the signal is not already registered.\n  if (next == 0 || next->signal_number_ != signal_number)\n  {\n    registration* new_registration = new registration;\n\n#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n    // Register for the signal if we're the first.\n    if (state->registration_count_[signal_number] == 0)\n    {\n# if defined(ASIO_HAS_SIGACTION)\n      using namespace std; // For memset.\n      struct sigaction sa;\n      memset(&sa, 0, sizeof(sa));\n      sa.sa_handler = asio_signal_handler;\n      sigfillset(&sa.sa_mask);\n      if (::sigaction(signal_number, &sa, 0) == -1)\n# else // defined(ASIO_HAS_SIGACTION)\n      if (::signal(signal_number, asio_signal_handler) == SIG_ERR)\n# endif // defined(ASIO_HAS_SIGACTION)\n      {\n# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error::invalid_argument;\n# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error_code(errno,\n            asio::error::get_system_category());\n# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        delete new_registration;\n        return ec;\n      }\n    }\n#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n\n    // Record the new registration in the set.\n    new_registration->signal_number_ = signal_number;\n    new_registration->queue_ = &impl.queue_;\n    new_registration->next_in_set_ = next;\n    *insertion_point = new_registration;\n\n    // Insert registration into the registration table.\n    new_registration->next_in_table_ = registrations_[signal_number];\n    if (registrations_[signal_number])\n      registrations_[signal_number]->prev_in_table_ = new_registration;\n    registrations_[signal_number] = new_registration;\n\n    ++state->registration_count_[signal_number];\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code signal_set_service::remove(\n    signal_set_service::implementation_type& impl,\n    int signal_number, asio::error_code& ec)\n{\n  // Check that the signal number is valid.\n  if (signal_number < 0 || signal_number >= max_signal_number)\n  {\n    ec = asio::error::invalid_argument;\n    return ec;\n  }\n\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  // Find the signal number in the list of registrations.\n  registration** deletion_point = &impl.signals_;\n  registration* reg = impl.signals_;\n  while (reg && reg->signal_number_ < signal_number)\n  {\n    deletion_point = &reg->next_in_set_;\n    reg = reg->next_in_set_;\n  }\n\n  if (reg != 0 && reg->signal_number_ == signal_number)\n  {\n#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n    // Set signal handler back to the default if we're the last.\n    if (state->registration_count_[signal_number] == 1)\n    {\n# if defined(ASIO_HAS_SIGACTION)\n      using namespace std; // For memset.\n      struct sigaction sa;\n      memset(&sa, 0, sizeof(sa));\n      sa.sa_handler = SIG_DFL;\n      if (::sigaction(signal_number, &sa, 0) == -1)\n# else // defined(ASIO_HAS_SIGACTION)\n      if (::signal(signal_number, SIG_DFL) == SIG_ERR)\n# endif // defined(ASIO_HAS_SIGACTION)\n      {\n# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error::invalid_argument;\n# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error_code(errno,\n            asio::error::get_system_category());\n# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        return ec;\n      }\n    }\n#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n\n    // Remove the registration from the set.\n    *deletion_point = reg->next_in_set_;\n\n    // Remove the registration from the registration table.\n    if (registrations_[signal_number] == reg)\n      registrations_[signal_number] = reg->next_in_table_;\n    if (reg->prev_in_table_)\n      reg->prev_in_table_->next_in_table_ = reg->next_in_table_;\n    if (reg->next_in_table_)\n      reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;\n\n    --state->registration_count_[signal_number];\n\n    delete reg;\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code signal_set_service::clear(\n    signal_set_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  while (registration* reg = impl.signals_)\n  {\n#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n    // Set signal handler back to the default if we're the last.\n    if (state->registration_count_[reg->signal_number_] == 1)\n    {\n# if defined(ASIO_HAS_SIGACTION)\n      using namespace std; // For memset.\n      struct sigaction sa;\n      memset(&sa, 0, sizeof(sa));\n      sa.sa_handler = SIG_DFL;\n      if (::sigaction(reg->signal_number_, &sa, 0) == -1)\n# else // defined(ASIO_HAS_SIGACTION)\n      if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)\n# endif // defined(ASIO_HAS_SIGACTION)\n      {\n# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error::invalid_argument;\n# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        ec = asio::error_code(errno,\n            asio::error::get_system_category());\n# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n        return ec;\n      }\n    }\n#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)\n\n    // Remove the registration from the registration table.\n    if (registrations_[reg->signal_number_] == reg)\n      registrations_[reg->signal_number_] = reg->next_in_table_;\n    if (reg->prev_in_table_)\n      reg->prev_in_table_->next_in_table_ = reg->next_in_table_;\n    if (reg->next_in_table_)\n      reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;\n\n    --state->registration_count_[reg->signal_number_];\n\n    impl.signals_ = reg->next_in_set_;\n    delete reg;\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code signal_set_service::cancel(\n    signal_set_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  ASIO_HANDLER_OPERATION((scheduler_.context(),\n        \"signal_set\", &impl, 0, \"cancel\"));\n\n  op_queue<operation> ops;\n  {\n    signal_state* state = get_signal_state();\n    static_mutex::scoped_lock lock(state->mutex_);\n\n    while (signal_op* op = impl.queue_.front())\n    {\n      op->ec_ = asio::error::operation_aborted;\n      impl.queue_.pop();\n      ops.push(op);\n    }\n  }\n\n  scheduler_.post_deferred_completions(ops);\n\n  ec = asio::error_code();\n  return ec;\n}\n\nvoid signal_set_service::deliver_signal(int signal_number)\n{\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  signal_set_service* service = state->service_list_;\n  while (service)\n  {\n    op_queue<operation> ops;\n\n    registration* reg = service->registrations_[signal_number];\n    while (reg)\n    {\n      if (reg->queue_->empty())\n      {\n        ++reg->undelivered_;\n      }\n      else\n      {\n        while (signal_op* op = reg->queue_->front())\n        {\n          op->signal_number_ = signal_number;\n          reg->queue_->pop();\n          ops.push(op);\n        }\n      }\n\n      reg = reg->next_in_table_;\n    }\n\n    service->scheduler_.post_deferred_completions(ops);\n\n    service = service->next_;\n  }\n}\n\nvoid signal_set_service::add_service(signal_set_service* service)\n{\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n  // If this is the first service to be created, open a new pipe.\n  if (state->service_list_ == 0)\n    open_descriptors();\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n  // If a scheduler_ object is thread-unsafe then it must be the only\n  // scheduler used to create signal_set objects.\n  if (state->service_list_ != 0)\n  {\n    if (!ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,\n          service->scheduler_.concurrency_hint())\n        || !ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,\n          state->service_list_->scheduler_.concurrency_hint()))\n    {\n      std::logic_error ex(\n          \"Thread-unsafe execution context objects require \"\n          \"exclusive access to signal handling.\");\n      asio::detail::throw_exception(ex);\n    }\n  }\n\n  // Insert service into linked list of all services.\n  service->next_ = state->service_list_;\n  service->prev_ = 0;\n  if (state->service_list_)\n    state->service_list_->prev_ = service;\n  state->service_list_ = service;\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  // Register for pipe readiness notifications.\n  int read_descriptor = state->read_descriptor_;\n  lock.unlock();\n  service->reactor_.register_internal_descriptor(reactor::read_op,\n      read_descriptor, service->reactor_data_, new pipe_read_op);\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n}\n\nvoid signal_set_service::remove_service(signal_set_service* service)\n{\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  if (service->next_ || service->prev_ || state->service_list_ == service)\n  {\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n    // Disable the pipe readiness notifications.\n    int read_descriptor = state->read_descriptor_;\n    lock.unlock();\n    service->reactor_.deregister_internal_descriptor(\n        read_descriptor, service->reactor_data_);\n    service->reactor_.cleanup_descriptor_data(service->reactor_data_);\n    lock.lock();\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n    // Remove service from linked list of all services.\n    if (state->service_list_ == service)\n      state->service_list_ = service->next_;\n    if (service->prev_)\n      service->prev_->next_ = service->next_;\n    if (service->next_)\n      service->next_->prev_= service->prev_;\n    service->next_ = 0;\n    service->prev_ = 0;\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n    // If this is the last service to be removed, close the pipe.\n    if (state->service_list_ == 0)\n      close_descriptors();\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n  }\n}\n\nvoid signal_set_service::open_descriptors()\n{\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  signal_state* state = get_signal_state();\n\n  int pipe_fds[2];\n  if (::pipe(pipe_fds) == 0)\n  {\n    state->read_descriptor_ = pipe_fds[0];\n    ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);\n\n    state->write_descriptor_ = pipe_fds[1];\n    ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);\n\n#if defined(FD_CLOEXEC)\n    ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);\n    ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);\n#endif // defined(FD_CLOEXEC)\n  }\n  else\n  {\n    asio::error_code ec(errno,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"signal_set_service pipe\");\n  }\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n}\n\nvoid signal_set_service::close_descriptors()\n{\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  signal_state* state = get_signal_state();\n\n  if (state->read_descriptor_ != -1)\n    ::close(state->read_descriptor_);\n  state->read_descriptor_ = -1;\n\n  if (state->write_descriptor_ != -1)\n    ::close(state->write_descriptor_);\n  state->write_descriptor_ = -1;\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n}\n\nvoid signal_set_service::start_wait_op(\n    signal_set_service::implementation_type& impl, signal_op* op)\n{\n  scheduler_.work_started();\n\n  signal_state* state = get_signal_state();\n  static_mutex::scoped_lock lock(state->mutex_);\n\n  registration* reg = impl.signals_;\n  while (reg)\n  {\n    if (reg->undelivered_ > 0)\n    {\n      --reg->undelivered_;\n      op->signal_number_ = reg->signal_number_;\n      scheduler_.post_deferred_completion(op);\n      return;\n    }\n\n    reg = reg->next_in_set_;\n  }\n\n  impl.queue_.push(op);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/socket_ops.ipp",
    "content": "//\n// detail/impl/socket_ops.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_OPS_IPP\n#define ASIO_DETAIL_SOCKET_OPS_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cctype>\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <cerrno>\n#include <new>\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include <codecvt>\n# include <locale>\n# include <string>\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \\\n  || defined(__MACH__) && defined(__APPLE__)\n# if defined(ASIO_HAS_PTHREADS)\n#  include <pthread.h>\n# endif // defined(ASIO_HAS_PTHREADS)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n       // || defined(__MACH__) && defined(__APPLE__)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace socket_ops {\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\nstruct msghdr { int msg_namelen; };\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#if defined(__hpux)\n// HP-UX doesn't declare these functions extern \"C\", so they are declared again\n// here to avoid linker errors about undefined symbols.\nextern \"C\" char* if_indextoname(unsigned int, char*);\nextern \"C\" unsigned int if_nametoindex(const char*);\n#endif // defined(__hpux)\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\ninline void clear_last_error()\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  WSASetLastError(0);\n#else\n  errno = 0;\n#endif\n}\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\ntemplate <typename ReturnType>\ninline ReturnType error_wrapper(ReturnType return_value,\n    asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  ec = asio::error_code(WSAGetLastError(),\n      asio::error::get_system_category());\n#else\n  ec = asio::error_code(errno,\n      asio::error::get_system_category());\n#endif\n  return return_value;\n}\n\ntemplate <typename SockLenType>\ninline socket_type call_accept(SockLenType msghdr::*,\n    socket_type s, socket_addr_type* addr, std::size_t* addrlen)\n{\n  SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0;\n  socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0);\n  if (addrlen)\n    *addrlen = (std::size_t)tmp_addrlen;\n  return result;\n}\n\nsocket_type accept(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return invalid_socket;\n  }\n\n  clear_last_error();\n\n  socket_type new_s = error_wrapper(call_accept(\n        &msghdr::msg_namelen, s, addr, addrlen), ec);\n  if (new_s == invalid_socket)\n    return new_s;\n\n#if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)\n  int optval = 1;\n  int result = error_wrapper(::setsockopt(new_s,\n        SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec);\n  if (result != 0)\n  {\n    ::close(new_s);\n    return invalid_socket;\n  }\n#endif\n\n  ec = asio::error_code();\n  return new_s;\n}\n\nsocket_type sync_accept(socket_type s, state_type state,\n    socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec)\n{\n  // Accept a socket.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec);\n\n    // Check if operation succeeded.\n    if (new_socket != invalid_socket)\n      return new_socket;\n\n    // Operation failed.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n    {\n      if (state & user_set_non_blocking)\n        return invalid_socket;\n      // Fall through to retry operation.\n    }\n    else if (ec == asio::error::connection_aborted)\n    {\n      if (state & enable_connection_aborted)\n        return invalid_socket;\n      // Fall through to retry operation.\n    }\n#if defined(EPROTO)\n    else if (ec.value() == EPROTO)\n    {\n      if (state & enable_connection_aborted)\n        return invalid_socket;\n      // Fall through to retry operation.\n    }\n#endif // defined(EPROTO)\n    else\n      return invalid_socket;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_read(s, 0, -1, ec) < 0)\n      return invalid_socket;\n  }\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_accept(socket_type s,\n    void* output_buffer, DWORD address_length,\n    socket_addr_type* addr, std::size_t* addrlen,\n    socket_type new_socket, asio::error_code& ec)\n{\n  // Map non-portable errors to their portable counterparts.\n  if (ec.value() == ERROR_NETNAME_DELETED)\n    ec = asio::error::connection_aborted;\n\n  if (!ec)\n  {\n    // Get the address of the peer.\n    if (addr && addrlen)\n    {\n      LPSOCKADDR local_addr = 0;\n      int local_addr_length = 0;\n      LPSOCKADDR remote_addr = 0;\n      int remote_addr_length = 0;\n      GetAcceptExSockaddrs(output_buffer, 0, address_length,\n          address_length, &local_addr, &local_addr_length,\n          &remote_addr, &remote_addr_length);\n      if (static_cast<std::size_t>(remote_addr_length) > *addrlen)\n      {\n        ec = asio::error::invalid_argument;\n      }\n      else\n      {\n        using namespace std; // For memcpy.\n        memcpy(addr, remote_addr, remote_addr_length);\n        *addrlen = static_cast<std::size_t>(remote_addr_length);\n      }\n    }\n\n    // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname\n    // and getpeername will work on the accepted socket.\n    SOCKET update_ctx_param = s;\n    socket_ops::state_type state = 0;\n    socket_ops::setsockopt(new_socket, state,\n          SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,\n          &update_ctx_param, sizeof(SOCKET), ec);\n  }\n}\n\n#else // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_accept(socket_type s,\n    state_type state, socket_addr_type* addr, std::size_t* addrlen,\n    asio::error_code& ec, socket_type& new_socket)\n{\n  for (;;)\n  {\n    // Accept the waiting connection.\n    new_socket = socket_ops::accept(s, addr, addrlen, ec);\n\n    // Check if operation succeeded.\n    if (new_socket != invalid_socket)\n      return true;\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Operation failed.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n    {\n      // Fall through to retry operation.\n    }\n    else if (ec == asio::error::connection_aborted)\n    {\n      if (state & enable_connection_aborted)\n        return true;\n      // Fall through to retry operation.\n    }\n#if defined(EPROTO)\n    else if (ec.value() == EPROTO)\n    {\n      if (state & enable_connection_aborted)\n        return true;\n      // Fall through to retry operation.\n    }\n#endif // defined(EPROTO)\n    else\n      return true;\n\n    return false;\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\ntemplate <typename SockLenType>\ninline int call_bind(SockLenType msghdr::*,\n    socket_type s, const socket_addr_type* addr, std::size_t addrlen)\n{\n  return ::bind(s, addr, (SockLenType)addrlen);\n}\n\nint bind(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n  int result = error_wrapper(call_bind(\n        &msghdr::msg_namelen, s, addr, addrlen), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint close(socket_type s, state_type& state,\n    bool destruction, asio::error_code& ec)\n{\n  int result = 0;\n  if (s != invalid_socket)\n  {\n    // We don't want the destructor to block, so set the socket to linger in\n    // the background. If the user doesn't like this behaviour then they need\n    // to explicitly close the socket.\n    if (destruction && (state & user_set_linger))\n    {\n      ::linger opt;\n      opt.l_onoff = 0;\n      opt.l_linger = 0;\n      asio::error_code ignored_ec;\n      socket_ops::setsockopt(s, state, SOL_SOCKET,\n          SO_LINGER, &opt, sizeof(opt), ignored_ec);\n    }\n\n    clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    result = error_wrapper(::closesocket(s), ec);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    result = error_wrapper(::close(s), ec);\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n    if (result != 0\n        && (ec == asio::error::would_block\n          || ec == asio::error::try_again))\n    {\n      // According to UNIX Network Programming Vol. 1, it is possible for\n      // close() to fail with EWOULDBLOCK under certain circumstances. What\n      // isn't clear is the state of the descriptor after this error. The one\n      // current OS where this behaviour is seen, Windows, says that the socket\n      // remains open. Therefore we'll put the descriptor back into blocking\n      // mode and have another attempt at closing it.\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n      ioctl_arg_type arg = 0;\n      ::ioctlsocket(s, FIONBIO, &arg);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if defined(__SYMBIAN32__)\n      int flags = ::fcntl(s, F_GETFL, 0);\n      if (flags >= 0)\n        ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);\n# else // defined(__SYMBIAN32__)\n      ioctl_arg_type arg = 0;\n      ::ioctl(s, FIONBIO, &arg);\n# endif // defined(__SYMBIAN32__)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n      state &= ~non_blocking;\n\n      clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n      result = error_wrapper(::closesocket(s), ec);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n      result = error_wrapper(::close(s), ec);\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    }\n  }\n\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\nbool set_user_non_blocking(socket_type s,\n    state_type& state, bool value, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return false;\n  }\n\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);\n#elif defined(__SYMBIAN32__)\n  int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);\n  if (result >= 0)\n  {\n    clear_last_error();\n    int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));\n    result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);\n  }\n#else\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);\n#endif\n\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n    if (value)\n      state |= user_set_non_blocking;\n    else\n    {\n      // Clearing the user-set non-blocking mode always overrides any\n      // internally-set non-blocking flag. Any subsequent asynchronous\n      // operations will need to re-enable non-blocking I/O.\n      state &= ~(user_set_non_blocking | internal_non_blocking);\n    }\n    return true;\n  }\n\n  return false;\n}\n\nbool set_internal_non_blocking(socket_type s,\n    state_type& state, bool value, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return false;\n  }\n\n  if (!value && (state & user_set_non_blocking))\n  {\n    // It does not make sense to clear the internal non-blocking flag if the\n    // user still wants non-blocking behaviour. Return an error and let the\n    // caller figure out whether to update the user-set non-blocking flag.\n    ec = asio::error::invalid_argument;\n    return false;\n  }\n\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);\n#elif defined(__SYMBIAN32__)\n  int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);\n  if (result >= 0)\n  {\n    clear_last_error();\n    int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));\n    result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);\n  }\n#else\n  ioctl_arg_type arg = (value ? 1 : 0);\n  int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);\n#endif\n\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n    if (value)\n      state |= internal_non_blocking;\n    else\n      state &= ~internal_non_blocking;\n    return true;\n  }\n\n  return false;\n}\n\nint shutdown(socket_type s, int what, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n  int result = error_wrapper(::shutdown(s, what), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\ntemplate <typename SockLenType>\ninline int call_connect(SockLenType msghdr::*,\n    socket_type s, const socket_addr_type* addr, std::size_t addrlen)\n{\n  return ::connect(s, addr, (SockLenType)addrlen);\n}\n\nint connect(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n  int result = error_wrapper(call_connect(\n        &msghdr::msg_namelen, s, addr, addrlen), ec);\n  if (result == 0)\n    ec = asio::error_code();\n#if defined(__linux__)\n  else if (ec == asio::error::try_again)\n    ec = asio::error::no_buffer_space;\n#endif // defined(__linux__)\n  return result;\n}\n\nvoid sync_connect(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec)\n{\n  // Perform the connect operation.\n  socket_ops::connect(s, addr, addrlen, ec);\n  if (ec != asio::error::in_progress\n      && ec != asio::error::would_block)\n  {\n    // The connect operation finished immediately.\n    return;\n  }\n\n  // Wait for socket to become ready.\n  if (socket_ops::poll_connect(s, -1, ec) < 0)\n    return;\n\n  // Get the error code from the connect operation.\n  int connect_error = 0;\n  size_t connect_error_len = sizeof(connect_error);\n  if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,\n        &connect_error, &connect_error_len, ec) == socket_error_retval)\n    return;\n\n  // Return the result of the connect operation.\n  ec = asio::error_code(connect_error,\n      asio::error::get_system_category());\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_connect(socket_type s, asio::error_code& ec)\n{\n  // Map non-portable errors to their portable counterparts.\n  switch (ec.value())\n  {\n  case ERROR_CONNECTION_REFUSED:\n    ec = asio::error::connection_refused;\n    break;\n  case ERROR_NETWORK_UNREACHABLE:\n    ec = asio::error::network_unreachable;\n    break;\n  case ERROR_HOST_UNREACHABLE:\n    ec = asio::error::host_unreachable;\n    break;\n  case ERROR_SEM_TIMEOUT:\n    ec = asio::error::timed_out;\n    break;\n  default:\n    break;\n  }\n\n  if (!ec)\n  {\n    // Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname\n    // and getpeername will work on the connected socket.\n    socket_ops::state_type state = 0;\n    const int so_update_connect_context = 0x7010;\n    socket_ops::setsockopt(s, state, SOL_SOCKET,\n        so_update_connect_context, 0, 0, ec);\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_connect(socket_type s, asio::error_code& ec)\n{\n  // Check if the connect operation has finished. This is required since we may\n  // get spurious readiness notifications from the reactor.\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n  fd_set write_fds;\n  FD_ZERO(&write_fds);\n  FD_SET(s, &write_fds);\n  fd_set except_fds;\n  FD_ZERO(&except_fds);\n  FD_SET(s, &except_fds);\n  timeval zero_timeout;\n  zero_timeout.tv_sec = 0;\n  zero_timeout.tv_usec = 0;\n  int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout);\n#else // defined(ASIO_WINDOWS)\n      // || defined(__CYGWIN__)\n      // || defined(__SYMBIAN32__)\n  pollfd fds;\n  fds.fd = s;\n  fds.events = POLLOUT;\n  fds.revents = 0;\n  int ready = ::poll(&fds, 1, 0);\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n  if (ready == 0)\n  {\n    // The asynchronous connect operation is still in progress.\n    return false;\n  }\n\n  // Get the error code from the connect operation.\n  int connect_error = 0;\n  size_t connect_error_len = sizeof(connect_error);\n  if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,\n        &connect_error, &connect_error_len, ec) == 0)\n  {\n    if (connect_error)\n    {\n      ec = asio::error_code(connect_error,\n          asio::error::get_system_category());\n    }\n    else\n      ec = asio::error_code();\n  }\n\n  return true;\n}\n\nint socketpair(int af, int type, int protocol,\n    socket_type sv[2], asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  (void)(af);\n  (void)(type);\n  (void)(protocol);\n  (void)(sv);\n  ec = asio::error::operation_not_supported;\n  return socket_error_retval;\n#else\n  clear_last_error();\n  int result = error_wrapper(::socketpair(af, type, protocol, sv), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n#endif\n}\n\nbool sockatmark(socket_type s, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return false;\n  }\n\n#if defined(SIOCATMARK)\n  ioctl_arg_type value = 0;\n# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec);\n# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec);\n# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (result == 0)\n    ec = asio::error_code();\n# if defined(ENOTTY)\n  if (ec.value() == ENOTTY)\n    ec = asio::error::not_socket;\n# endif // defined(ENOTTY)\n#else // defined(SIOCATMARK)\n  int value = error_wrapper(::sockatmark(s), ec);\n  if (value != -1)\n    ec = asio::error_code();\n#endif // defined(SIOCATMARK)\n\n  return ec ? false : value != 0;\n}\n\nsize_t available(socket_type s, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  ioctl_arg_type value = 0;\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec);\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (result == 0)\n    ec = asio::error_code();\n#if defined(ENOTTY)\n  if (ec.value() == ENOTTY)\n    ec = asio::error::not_socket;\n#endif // defined(ENOTTY)\n\n  return ec ? static_cast<size_t>(0) : static_cast<size_t>(value);\n}\n\nint listen(socket_type s, int backlog, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n  int result = error_wrapper(::listen(s, backlog), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\ninline void init_buf_iov_base(void*& base, void* addr)\n{\n  base = addr;\n}\n\ntemplate <typename T>\ninline void init_buf_iov_base(T& base, void* addr)\n{\n  base = static_cast<T>(addr);\n}\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef WSABUF buf;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef iovec buf;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\nvoid init_buf(buf& b, void* data, size_t size)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  b.buf = static_cast<char*>(data);\n  b.len = static_cast<u_long>(size);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  init_buf_iov_base(b.iov_base, data);\n  b.iov_len = size;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nvoid init_buf(buf& b, const void* data, size_t size)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  b.buf = static_cast<char*>(const_cast<void*>(data));\n  b.len = static_cast<u_long>(size);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  init_buf_iov_base(b.iov_base, const_cast<void*>(data));\n  b.iov_len = size;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\ninline void init_msghdr_msg_name(void*& name, socket_addr_type* addr)\n{\n  name = addr;\n}\n\ninline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr)\n{\n  name = const_cast<socket_addr_type*>(addr);\n}\n\ntemplate <typename T>\ninline void init_msghdr_msg_name(T& name, socket_addr_type* addr)\n{\n  name = reinterpret_cast<T>(addr);\n}\n\ntemplate <typename T>\ninline void init_msghdr_msg_name(T& name, const socket_addr_type* addr)\n{\n  name = reinterpret_cast<T>(const_cast<socket_addr_type*>(addr));\n}\n\nsigned_size_type recv(socket_type s, buf* bufs, size_t count,\n    int flags, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  // Receive some data.\n  DWORD recv_buf_count = static_cast<DWORD>(count);\n  DWORD bytes_transferred = 0;\n  DWORD recv_flags = flags;\n  int result = error_wrapper(::WSARecv(s, bufs,\n        recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec);\n  if (ec.value() == ERROR_NETNAME_DELETED)\n    ec = asio::error::connection_reset;\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    ec = asio::error::connection_refused;\n  else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)\n    result = 0;\n  if (result != 0)\n    return socket_error_retval;\n  ec = asio::error_code();\n  return bytes_transferred;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  msghdr msg = msghdr();\n  msg.msg_iov = bufs;\n  msg.msg_iovlen = static_cast<int>(count);\n  signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nsize_t sync_recv(socket_type s, state_type state, buf* bufs,\n    size_t count, int flags, bool all_empty, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // A request to read 0 bytes on a stream is a no-op.\n  if (all_empty && (state & stream_oriented))\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  // Read some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec);\n\n    // Check if operation succeeded.\n    if (bytes > 0)\n      return bytes;\n\n    // Check for EOF.\n    if ((state & stream_oriented) && bytes == 0)\n    {\n      ec = asio::error::eof;\n      return 0;\n    }\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_read(s, 0, -1, ec) < 0)\n      return 0;\n  }\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_recv(state_type state,\n    const weak_cancel_token_type& cancel_token, bool all_empty,\n    asio::error_code& ec, size_t bytes_transferred)\n{\n  // Map non-portable errors to their portable counterparts.\n  if (ec.value() == ERROR_NETNAME_DELETED)\n  {\n    if (cancel_token.expired())\n      ec = asio::error::operation_aborted;\n    else\n      ec = asio::error::connection_reset;\n  }\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n  {\n    ec = asio::error::connection_refused;\n  }\n  else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)\n  {\n    ec.assign(0, ec.category());\n  }\n\n  // Check for connection closed.\n  else if (!ec && bytes_transferred == 0\n      && (state & stream_oriented) != 0\n      && !all_empty)\n  {\n    ec = asio::error::eof;\n  }\n}\n\n#else // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_recv(socket_type s,\n    buf* bufs, size_t count, int flags, bool is_stream,\n    asio::error_code& ec, size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Read some data.\n    signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec);\n\n    // Check for end of stream.\n    if (is_stream && bytes == 0)\n    {\n      ec = asio::error::eof;\n      return true;\n    }\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\nsigned_size_type recvfrom(socket_type s, buf* bufs, size_t count,\n    int flags, socket_addr_type* addr, std::size_t* addrlen,\n    asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  // Receive some data.\n  DWORD recv_buf_count = static_cast<DWORD>(count);\n  DWORD bytes_transferred = 0;\n  DWORD recv_flags = flags;\n  int tmp_addrlen = (int)*addrlen;\n  int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count,\n        &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec);\n  *addrlen = (std::size_t)tmp_addrlen;\n  if (ec.value() == ERROR_NETNAME_DELETED)\n    ec = asio::error::connection_reset;\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    ec = asio::error::connection_refused;\n  else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)\n    result = 0;\n  if (result != 0)\n    return socket_error_retval;\n  ec = asio::error_code();\n  return bytes_transferred;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  msghdr msg = msghdr();\n  init_msghdr_msg_name(msg.msg_name, addr);\n  msg.msg_namelen = static_cast<int>(*addrlen);\n  msg.msg_iov = bufs;\n  msg.msg_iovlen = static_cast<int>(count);\n  signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec);\n  *addrlen = msg.msg_namelen;\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nsize_t sync_recvfrom(socket_type s, state_type state, buf* bufs,\n    size_t count, int flags, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // Read some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    signed_size_type bytes = socket_ops::recvfrom(\n        s, bufs, count, flags, addr, addrlen, ec);\n\n    // Check if operation succeeded.\n    if (bytes >= 0)\n      return bytes;\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_read(s, 0, -1, ec) < 0)\n      return 0;\n  }\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_recvfrom(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec)\n{\n  // Map non-portable errors to their portable counterparts.\n  if (ec.value() == ERROR_NETNAME_DELETED)\n  {\n    if (cancel_token.expired())\n      ec = asio::error::operation_aborted;\n    else\n      ec = asio::error::connection_reset;\n  }\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n  {\n    ec = asio::error::connection_refused;\n  }\n  else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)\n  {\n    ec.assign(0, ec.category());\n  }\n}\n\n#else // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_recvfrom(socket_type s,\n    buf* bufs, size_t count, int flags,\n    socket_addr_type* addr, std::size_t* addrlen,\n    asio::error_code& ec, size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Read some data.\n    signed_size_type bytes = socket_ops::recvfrom(\n        s, bufs, count, flags, addr, addrlen, ec);\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\nsigned_size_type recvmsg(socket_type s, buf* bufs, size_t count,\n    int in_flags, int& out_flags, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  out_flags = 0;\n  return socket_ops::recv(s, bufs, count, in_flags, ec);\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  msghdr msg = msghdr();\n  msg.msg_iov = bufs;\n  msg.msg_iovlen = static_cast<int>(count);\n  signed_size_type result = error_wrapper(::recvmsg(s, &msg, in_flags), ec);\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n    out_flags = msg.msg_flags;\n  }\n  else\n    out_flags = 0;\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nsize_t sync_recvmsg(socket_type s, state_type state,\n    buf* bufs, size_t count, int in_flags, int& out_flags,\n    asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // Read some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    signed_size_type bytes = socket_ops::recvmsg(\n        s, bufs, count, in_flags, out_flags, ec);\n\n    // Check if operation succeeded.\n    if (bytes >= 0)\n      return bytes;\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_read(s, 0, -1, ec) < 0)\n      return 0;\n  }\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_recvmsg(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec)\n{\n  // Map non-portable errors to their portable counterparts.\n  if (ec.value() == ERROR_NETNAME_DELETED)\n  {\n    if (cancel_token.expired())\n      ec = asio::error::operation_aborted;\n    else\n      ec = asio::error::connection_reset;\n  }\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n  {\n    ec = asio::error::connection_refused;\n  }\n  else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)\n  {\n    ec.assign(0, ec.category());\n  }\n}\n\n#else // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_recvmsg(socket_type s,\n    buf* bufs, size_t count, int in_flags, int& out_flags,\n    asio::error_code& ec, size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Read some data.\n    signed_size_type bytes = socket_ops::recvmsg(\n        s, bufs, count, in_flags, out_flags, ec);\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\nsigned_size_type send(socket_type s, const buf* bufs, size_t count,\n    int flags, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  // Send the data.\n  DWORD send_buf_count = static_cast<DWORD>(count);\n  DWORD bytes_transferred = 0;\n  DWORD send_flags = flags;\n  int result = error_wrapper(::WSASend(s, const_cast<buf*>(bufs),\n        send_buf_count, &bytes_transferred, send_flags, 0, 0), ec);\n  if (ec.value() == ERROR_NETNAME_DELETED)\n    ec = asio::error::connection_reset;\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    ec = asio::error::connection_refused;\n  if (result != 0)\n    return socket_error_retval;\n  ec = asio::error_code();\n  return bytes_transferred;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  msghdr msg = msghdr();\n  msg.msg_iov = const_cast<buf*>(bufs);\n  msg.msg_iovlen = static_cast<int>(count);\n#if defined(__linux__)\n  flags |= MSG_NOSIGNAL;\n#endif // defined(__linux__)\n  signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nsize_t sync_send(socket_type s, state_type state, const buf* bufs,\n    size_t count, int flags, bool all_empty, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // A request to write 0 bytes to a stream is a no-op.\n  if (all_empty && (state & stream_oriented))\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  // Read some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec);\n\n    // Check if operation succeeded.\n    if (bytes >= 0)\n      return bytes;\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_write(s, 0, -1, ec) < 0)\n      return 0;\n  }\n}\n\n#if defined(ASIO_HAS_IOCP)\n\nvoid complete_iocp_send(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec)\n{\n  // Map non-portable errors to their portable counterparts.\n  if (ec.value() == ERROR_NETNAME_DELETED)\n  {\n    if (cancel_token.expired())\n      ec = asio::error::operation_aborted;\n    else\n      ec = asio::error::connection_reset;\n  }\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n  {\n    ec = asio::error::connection_refused;\n  }\n}\n\n#else // defined(ASIO_HAS_IOCP)\n\nbool non_blocking_send(socket_type s,\n    const buf* bufs, size_t count, int flags,\n    asio::error_code& ec, size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Write some data.\n    signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec);\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\n#endif // defined(ASIO_HAS_IOCP)\n\nsigned_size_type sendto(socket_type s, const buf* bufs, size_t count,\n    int flags, const socket_addr_type* addr, std::size_t addrlen,\n    asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  // Send the data.\n  DWORD send_buf_count = static_cast<DWORD>(count);\n  DWORD bytes_transferred = 0;\n  int result = error_wrapper(::WSASendTo(s, const_cast<buf*>(bufs),\n        send_buf_count, &bytes_transferred, flags, addr,\n        static_cast<int>(addrlen), 0, 0), ec);\n  if (ec.value() == ERROR_NETNAME_DELETED)\n    ec = asio::error::connection_reset;\n  else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    ec = asio::error::connection_refused;\n  if (result != 0)\n    return socket_error_retval;\n  ec = asio::error_code();\n  return bytes_transferred;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  msghdr msg = msghdr();\n  init_msghdr_msg_name(msg.msg_name, addr);\n  msg.msg_namelen = static_cast<int>(addrlen);\n  msg.msg_iov = const_cast<buf*>(bufs);\n  msg.msg_iovlen = static_cast<int>(count);\n#if defined(__linux__)\n  flags |= MSG_NOSIGNAL;\n#endif // defined(__linux__)\n  signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nsize_t sync_sendto(socket_type s, state_type state, const buf* bufs,\n    size_t count, int flags, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // Write some data.\n  for (;;)\n  {\n    // Try to complete the operation without blocking.\n    signed_size_type bytes = socket_ops::sendto(\n        s, bufs, count, flags, addr, addrlen, ec);\n\n    // Check if operation succeeded.\n    if (bytes >= 0)\n      return bytes;\n\n    // Operation failed.\n    if ((state & user_set_non_blocking)\n        || (ec != asio::error::would_block\n          && ec != asio::error::try_again))\n      return 0;\n\n    // Wait for socket to become ready.\n    if (socket_ops::poll_write(s, 0, -1, ec) < 0)\n      return 0;\n  }\n}\n\n#if !defined(ASIO_HAS_IOCP)\n\nbool non_blocking_sendto(socket_type s,\n    const buf* bufs, size_t count, int flags,\n    const socket_addr_type* addr, std::size_t addrlen,\n    asio::error_code& ec, size_t& bytes_transferred)\n{\n  for (;;)\n  {\n    // Write some data.\n    signed_size_type bytes = socket_ops::sendto(\n        s, bufs, count, flags, addr, addrlen, ec);\n\n    // Retry operation if interrupted by signal.\n    if (ec == asio::error::interrupted)\n      continue;\n\n    // Check if we need to run the operation again.\n    if (ec == asio::error::would_block\n        || ec == asio::error::try_again)\n      return false;\n\n    // Operation is complete.\n    if (bytes >= 0)\n    {\n      ec = asio::error_code();\n      bytes_transferred = bytes;\n    }\n    else\n      bytes_transferred = 0;\n\n    return true;\n  }\n}\n\n#endif // !defined(ASIO_HAS_IOCP)\n\nsocket_type socket(int af, int type, int protocol,\n    asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  socket_type s = error_wrapper(::WSASocketW(af, type, protocol, 0, 0,\n        WSA_FLAG_OVERLAPPED), ec);\n  if (s == invalid_socket)\n    return s;\n\n  if (af == ASIO_OS_DEF(AF_INET6))\n  {\n    // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to\n    // false. This will only succeed on Windows Vista and later versions of\n    // Windows, where a dual-stack IPv4/v6 implementation is available.\n    DWORD optval = 0;\n    ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,\n        reinterpret_cast<const char*>(&optval), sizeof(optval));\n  }\n\n  ec = asio::error_code();\n\n  return s;\n#elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)\n  socket_type s = error_wrapper(::socket(af, type, protocol), ec);\n  if (s == invalid_socket)\n    return s;\n\n  int optval = 1;\n  int result = error_wrapper(::setsockopt(s,\n        SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec);\n  if (result != 0)\n  {\n    ::close(s);\n    return invalid_socket;\n  }\n\n  return s;\n#else\n  int s = error_wrapper(::socket(af, type, protocol), ec);\n  if (s >= 0)\n    ec = asio::error_code();\n  return s;\n#endif\n}\n\ntemplate <typename SockLenType>\ninline int call_setsockopt(SockLenType msghdr::*,\n    socket_type s, int level, int optname,\n    const void* optval, std::size_t optlen)\n{\n  return ::setsockopt(s, level, optname,\n      (const char*)optval, (SockLenType)optlen);\n}\n\nint setsockopt(socket_type s, state_type& state, int level, int optname,\n    const void* optval, std::size_t optlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  if (level == custom_socket_option_level && optname == always_fail_option)\n  {\n    ec = asio::error::invalid_argument;\n    return socket_error_retval;\n  }\n\n  if (level == custom_socket_option_level\n      && optname == enable_connection_aborted_option)\n  {\n    if (optlen != sizeof(int))\n    {\n      ec = asio::error::invalid_argument;\n      return socket_error_retval;\n    }\n\n    if (*static_cast<const int*>(optval))\n      state |= enable_connection_aborted;\n    else\n      state &= ~enable_connection_aborted;\n    ec = asio::error_code();\n    return 0;\n  }\n\n  if (level == SOL_SOCKET && optname == SO_LINGER)\n    state |= user_set_linger;\n\n#if defined(__BORLANDC__)\n  // Mysteriously, using the getsockopt and setsockopt functions directly with\n  // Borland C++ results in incorrect values being set and read. The bug can be\n  // worked around by using function addresses resolved with GetProcAddress.\n  if (HMODULE winsock_module = ::GetModuleHandleA(\"ws2_32\"))\n  {\n    typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int);\n    if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, \"setsockopt\"))\n    {\n      clear_last_error();\n      return error_wrapper(sso(s, level, optname,\n            reinterpret_cast<const char*>(optval),\n            static_cast<int>(optlen)), ec);\n    }\n  }\n  ec = asio::error::fault;\n  return socket_error_retval;\n#else // defined(__BORLANDC__)\n  clear_last_error();\n  int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen,\n        s, level, optname, optval, optlen), ec);\n  if (result == 0)\n  {\n    ec = asio::error_code();\n\n#if defined(__MACH__) && defined(__APPLE__) \\\n  || defined(__NetBSD__) || defined(__FreeBSD__) \\\n  || defined(__OpenBSD__) || defined(__QNX__)\n    // To implement portable behaviour for SO_REUSEADDR with UDP sockets we\n    // need to also set SO_REUSEPORT on BSD-based platforms.\n    if ((state & datagram_oriented)\n        && level == SOL_SOCKET && optname == SO_REUSEADDR)\n    {\n      call_setsockopt(&msghdr::msg_namelen, s,\n          SOL_SOCKET, SO_REUSEPORT, optval, optlen);\n    }\n#endif\n  }\n\n  return result;\n#endif // defined(__BORLANDC__)\n}\n\ntemplate <typename SockLenType>\ninline int call_getsockopt(SockLenType msghdr::*,\n    socket_type s, int level, int optname,\n    void* optval, std::size_t* optlen)\n{\n  SockLenType tmp_optlen = (SockLenType)*optlen;\n  int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen);\n  *optlen = (std::size_t)tmp_optlen;\n  return result;\n}\n\nint getsockopt(socket_type s, state_type state, int level, int optname,\n    void* optval, size_t* optlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  if (level == custom_socket_option_level && optname == always_fail_option)\n  {\n    ec = asio::error::invalid_argument;\n    return socket_error_retval;\n  }\n\n  if (level == custom_socket_option_level\n      && optname == enable_connection_aborted_option)\n  {\n    if (*optlen != sizeof(int))\n    {\n      ec = asio::error::invalid_argument;\n      return socket_error_retval;\n    }\n\n    *static_cast<int*>(optval) = (state & enable_connection_aborted) ? 1 : 0;\n    ec = asio::error_code();\n    return 0;\n  }\n\n#if defined(__BORLANDC__)\n  // Mysteriously, using the getsockopt and setsockopt functions directly with\n  // Borland C++ results in incorrect values being set and read. The bug can be\n  // worked around by using function addresses resolved with GetProcAddress.\n  if (HMODULE winsock_module = ::GetModuleHandleA(\"ws2_32\"))\n  {\n    typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*);\n    if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, \"getsockopt\"))\n    {\n      clear_last_error();\n      int tmp_optlen = static_cast<int>(*optlen);\n      int result = error_wrapper(gso(s, level, optname,\n            reinterpret_cast<char*>(optval), &tmp_optlen), ec);\n      *optlen = static_cast<size_t>(tmp_optlen);\n      if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY\n          && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))\n      {\n        // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are\n        // only supported on Windows Vista and later. To simplify program logic\n        // we will fake success of getting this option and specify that the\n        // value is non-zero (i.e. true). This corresponds to the behavior of\n        // IPv6 sockets on Windows platforms pre-Vista.\n        *static_cast<DWORD*>(optval) = 1;\n        ec = asio::error_code();\n      }\n      return result;\n    }\n  }\n  ec = asio::error::fault;\n  return socket_error_retval;\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  clear_last_error();\n  int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen,\n        s, level, optname, optval, optlen), ec);\n  if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY\n      && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))\n  {\n    // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only\n    // supported on Windows Vista and later. To simplify program logic we will\n    // fake success of getting this option and specify that the value is\n    // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets\n    // on Windows platforms pre-Vista.\n    *static_cast<DWORD*>(optval) = 1;\n    ec = asio::error_code();\n  }\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  clear_last_error();\n  int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen,\n        s, level, optname, optval, optlen), ec);\n#if defined(__linux__)\n  if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int)\n      && (optname == SO_SNDBUF || optname == SO_RCVBUF))\n  {\n    // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel\n    // to set the buffer size to N*2. Linux puts additional stuff into the\n    // buffers so that only about half is actually available to the application.\n    // The retrieved value is divided by 2 here to make it appear as though the\n    // correct value has been set.\n    *static_cast<int*>(optval) /= 2;\n  }\n#endif // defined(__linux__)\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\ntemplate <typename SockLenType>\ninline int call_getpeername(SockLenType msghdr::*,\n    socket_type s, socket_addr_type* addr, std::size_t* addrlen)\n{\n  SockLenType tmp_addrlen = (SockLenType)*addrlen;\n  int result = ::getpeername(s, addr, &tmp_addrlen);\n  *addrlen = (std::size_t)tmp_addrlen;\n  return result;\n}\n\nint getpeername(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, bool cached, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n#if defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) \\\n  || defined(__CYGWIN__)\n  if (cached)\n  {\n    // Check if socket is still connected.\n    DWORD connect_time = 0;\n    size_t connect_time_len = sizeof(connect_time);\n    if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME,\n          &connect_time, &connect_time_len, ec) == socket_error_retval)\n    {\n      return socket_error_retval;\n    }\n    if (connect_time == 0xFFFFFFFF)\n    {\n      ec = asio::error::not_connected;\n      return socket_error_retval;\n    }\n\n    // The cached value is still valid.\n    ec = asio::error_code();\n    return 0;\n  }\n#else // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP)\n      // || defined(__CYGWIN__)\n  (void)cached;\n#endif // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP)\n       // || defined(__CYGWIN__)\n\n  clear_last_error();\n  int result = error_wrapper(call_getpeername(\n        &msghdr::msg_namelen, s, addr, addrlen), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\ntemplate <typename SockLenType>\ninline int call_getsockname(SockLenType msghdr::*,\n    socket_type s, socket_addr_type* addr, std::size_t* addrlen)\n{\n  SockLenType tmp_addrlen = (SockLenType)*addrlen;\n  int result = ::getsockname(s, addr, &tmp_addrlen);\n  *addrlen = (std::size_t)tmp_addrlen;\n  return result;\n}\n\nint getsockname(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n  int result = error_wrapper(call_getsockname(\n        &msghdr::msg_namelen, s, addr, addrlen), ec);\n  if (result == 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint ioctl(socket_type s, state_type& state, int cmd,\n    ioctl_arg_type* arg, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec);\n#elif defined(__MACH__) && defined(__APPLE__) \\\n  || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)\n  int result = error_wrapper(::ioctl(s,\n        static_cast<unsigned int>(cmd), arg), ec);\n#else\n  int result = error_wrapper(::ioctl(s, cmd, arg), ec);\n#endif\n  if (result >= 0)\n  {\n    ec = asio::error_code();\n\n    // When updating the non-blocking mode we always perform the ioctl syscall,\n    // even if the flags would otherwise indicate that the socket is already in\n    // the correct state. This ensures that the underlying socket is put into\n    // the state that has been requested by the user. If the ioctl syscall was\n    // successful then we need to update the flags to match.\n    if (cmd == static_cast<int>(FIONBIO))\n    {\n      if (*arg)\n      {\n        state |= user_set_non_blocking;\n      }\n      else\n      {\n        // Clearing the non-blocking mode always overrides any internally-set\n        // non-blocking flag. Any subsequent asynchronous operations will need\n        // to re-enable non-blocking I/O.\n        state &= ~(user_set_non_blocking | internal_non_blocking);\n      }\n    }\n  }\n\n  return result;\n}\n\nint select(int nfds, fd_set* readfds, fd_set* writefds,\n    fd_set* exceptfds, timeval* timeout, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (!readfds && !writefds && !exceptfds && timeout)\n  {\n    DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;\n    if (milliseconds == 0)\n      milliseconds = 1; // Force context switch.\n    ::Sleep(milliseconds);\n    ec = asio::error_code();\n    return 0;\n  }\n\n  // The select() call allows timeout values measured in microseconds, but the\n  // system clock (as wrapped by boost::posix_time::microsec_clock) typically\n  // has a resolution of 10 milliseconds. This can lead to a spinning select\n  // reactor, meaning increased CPU usage, when waiting for the earliest\n  // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight\n  // spin we'll use a minimum timeout of 1 millisecond.\n  if (timeout && timeout->tv_sec == 0\n      && timeout->tv_usec > 0 && timeout->tv_usec < 1000)\n    timeout->tv_usec = 1000;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#if defined(__hpux) && defined(__SELECT)\n  timespec ts;\n  ts.tv_sec = timeout ? timeout->tv_sec : 0;\n  ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0;\n  return error_wrapper(::pselect(nfds, readfds,\n        writefds, exceptfds, timeout ? &ts : 0, 0), ec);\n#else\n  int result = error_wrapper(::select(nfds, readfds,\n        writefds, exceptfds, timeout), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif\n}\n\nint poll_read(socket_type s, state_type state,\n    int msec, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n  fd_set fds;\n  FD_ZERO(&fds);\n  FD_SET(s, &fds);\n  timeval timeout_obj;\n  timeval* timeout;\n  if (state & user_set_non_blocking)\n  {\n    timeout_obj.tv_sec = 0;\n    timeout_obj.tv_usec = 0;\n    timeout = &timeout_obj;\n  }\n  else if (msec >= 0)\n  {\n    timeout_obj.tv_sec = msec / 1000;\n    timeout_obj.tv_usec = (msec % 1000) * 1000;\n    timeout = &timeout_obj;\n  }\n  else\n    timeout = 0;\n  clear_last_error();\n  int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec);\n#else // defined(ASIO_WINDOWS)\n      // || defined(__CYGWIN__)\n      // || defined(__SYMBIAN32__)\n  pollfd fds;\n  fds.fd = s;\n  fds.events = POLLIN;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : msec;\n  clear_last_error();\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_write(socket_type s, state_type state,\n    int msec, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n  fd_set fds;\n  FD_ZERO(&fds);\n  FD_SET(s, &fds);\n  timeval timeout_obj;\n  timeval* timeout;\n  if (state & user_set_non_blocking)\n  {\n    timeout_obj.tv_sec = 0;\n    timeout_obj.tv_usec = 0;\n    timeout = &timeout_obj;\n  }\n  else if (msec >= 0)\n  {\n    timeout_obj.tv_sec = msec / 1000;\n    timeout_obj.tv_usec = (msec % 1000) * 1000;\n    timeout = &timeout_obj;\n  }\n  else\n    timeout = 0;\n  clear_last_error();\n  int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec);\n#else // defined(ASIO_WINDOWS)\n      // || defined(__CYGWIN__)\n      // || defined(__SYMBIAN32__)\n  pollfd fds;\n  fds.fd = s;\n  fds.events = POLLOUT;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : msec;\n  clear_last_error();\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_error(socket_type s, state_type state,\n    int msec, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n  fd_set fds;\n  FD_ZERO(&fds);\n  FD_SET(s, &fds);\n  timeval timeout_obj;\n  timeval* timeout;\n  if (state & user_set_non_blocking)\n  {\n    timeout_obj.tv_sec = 0;\n    timeout_obj.tv_usec = 0;\n    timeout = &timeout_obj;\n  }\n  else if (msec >= 0)\n  {\n    timeout_obj.tv_sec = msec / 1000;\n    timeout_obj.tv_usec = (msec % 1000) * 1000;\n    timeout = &timeout_obj;\n  }\n  else\n    timeout = 0;\n  clear_last_error();\n  int result = error_wrapper(::select(s + 1, 0, 0, &fds, timeout), ec);\n#else // defined(ASIO_WINDOWS)\n      // || defined(__CYGWIN__)\n      // || defined(__SYMBIAN32__)\n  pollfd fds;\n  fds.fd = s;\n  fds.events = POLLPRI | POLLERR | POLLHUP;\n  fds.revents = 0;\n  int timeout = (state & user_set_non_blocking) ? 0 : msec;\n  clear_last_error();\n  int result = error_wrapper(::poll(&fds, 1, timeout), ec);\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n  if (result == 0)\n    ec = (state & user_set_non_blocking)\n      ? asio::error::would_block : asio::error_code();\n  else if (result > 0)\n    ec = asio::error_code();\n  return result;\n}\n\nint poll_connect(socket_type s, int msec, asio::error_code& ec)\n{\n  if (s == invalid_socket)\n  {\n    ec = asio::error::bad_descriptor;\n    return socket_error_retval;\n  }\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n  fd_set write_fds;\n  FD_ZERO(&write_fds);\n  FD_SET(s, &write_fds);\n  fd_set except_fds;\n  FD_ZERO(&except_fds);\n  FD_SET(s, &except_fds);\n  timeval timeout_obj;\n  timeval* timeout;\n  if (msec >= 0)\n  {\n    timeout_obj.tv_sec = msec / 1000;\n    timeout_obj.tv_usec = (msec % 1000) * 1000;\n    timeout = &timeout_obj;\n  }\n  else\n    timeout = 0;\n  clear_last_error();\n  int result = error_wrapper(::select(\n        s + 1, 0, &write_fds, &except_fds, timeout), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#else // defined(ASIO_WINDOWS)\n      // || defined(__CYGWIN__)\n      // || defined(__SYMBIAN32__)\n  pollfd fds;\n  fds.fd = s;\n  fds.events = POLLOUT;\n  fds.revents = 0;\n  clear_last_error();\n  int result = error_wrapper(::poll(&fds, 1, msec), ec);\n  if (result >= 0)\n    ec = asio::error_code();\n  return result;\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n}\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\nconst char* inet_ntop(int af, const void* src, char* dest, size_t length,\n    unsigned long scope_id, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS_RUNTIME)\n  using namespace std; // For sprintf.\n  const unsigned char* bytes = static_cast<const unsigned char*>(src);\n  if (af == ASIO_OS_DEF(AF_INET))\n  {\n    sprintf_s(dest, length, \"%u.%u.%u.%u\",\n        bytes[0], bytes[1], bytes[2], bytes[3]);\n    return dest;\n  }\n  else if (af == ASIO_OS_DEF(AF_INET6))\n  {\n    size_t n = 0, b = 0, z = 0;\n    while (n < length && b < 16)\n    {\n      if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0)\n      {\n        do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0);\n        n += sprintf_s(dest + n, length - n, \":%s\", b < 16 ? \"\" : \":\"), ++z;\n      }\n      else\n      {\n        n += sprintf_s(dest + n, length - n, \"%s%x\", b ? \":\" : \"\",\n            (static_cast<u_long_type>(bytes[b]) << 8) | bytes[b + 1]);\n        b += 2;\n      }\n    }\n    if (scope_id)\n      n += sprintf_s(dest + n, length - n, \"%%%lu\", scope_id);\n    return dest;\n  }\n  else\n  {\n    ec = asio::error::address_family_not_supported;\n    return 0;\n  }\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  using namespace std; // For memcpy.\n\n  if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6))\n  {\n    ec = asio::error::address_family_not_supported;\n    return 0;\n  }\n\n  union\n  {\n    socket_addr_type base;\n    sockaddr_storage_type storage;\n    sockaddr_in4_type v4;\n    sockaddr_in6_type v6;\n  } address;\n  DWORD address_length;\n  if (af == ASIO_OS_DEF(AF_INET))\n  {\n    address_length = sizeof(sockaddr_in4_type);\n    address.v4.sin_family = ASIO_OS_DEF(AF_INET);\n    address.v4.sin_port = 0;\n    memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type));\n  }\n  else // AF_INET6\n  {\n    address_length = sizeof(sockaddr_in6_type);\n    address.v6.sin6_family = ASIO_OS_DEF(AF_INET6);\n    address.v6.sin6_port = 0;\n    address.v6.sin6_flowinfo = 0;\n    address.v6.sin6_scope_id = scope_id;\n    memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type));\n  }\n\n  DWORD string_length = static_cast<DWORD>(length);\n#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800))\n  LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR));\n  int result = error_wrapper(::WSAAddressToStringW(&address.base,\n        address_length, 0, string_buffer, &string_length), ec);\n  ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1,\n      dest, static_cast<int>(length), 0, 0);\n#else\n  int result = error_wrapper(::WSAAddressToStringA(\n        &address.base, address_length, 0, dest, &string_length), ec);\n#endif\n\n  // Windows may set error code on success.\n  if (result != socket_error_retval)\n    ec = asio::error_code();\n\n  // Windows may not set an error code on failure.\n  else if (result == socket_error_retval && !ec)\n    ec = asio::error::invalid_argument;\n\n  return result == socket_error_retval ? 0 : dest;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  const char* result = error_wrapper(::inet_ntop(\n        af, src, dest, static_cast<int>(length)), ec);\n  if (result == 0 && !ec)\n    ec = asio::error::invalid_argument;\n  if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0)\n  {\n    using namespace std; // For strcat and sprintf.\n    char if_name[(IF_NAMESIZE > 21 ? IF_NAMESIZE : 21) + 1] = \"%\";\n    const in6_addr_type* ipv6_address = static_cast<const in6_addr_type*>(src);\n    bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)\n        && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));\n    bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff)\n        && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02));\n    if ((!is_link_local && !is_multicast_link_local)\n        || if_indextoname(static_cast<unsigned>(scope_id), if_name + 1) == 0)\n      sprintf(if_name + 1, \"%lu\", scope_id);\n    strcat(dest, if_name);\n  }\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nint inet_pton(int af, const char* src, void* dest,\n    unsigned long* scope_id, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS_RUNTIME)\n  using namespace std; // For sscanf.\n  unsigned char* bytes = static_cast<unsigned char*>(dest);\n  if (af == ASIO_OS_DEF(AF_INET))\n  {\n    unsigned int b0, b1, b2, b3;\n    if (sscanf_s(src, \"%u.%u.%u.%u\", &b0, &b1, &b2, &b3) != 4)\n    {\n      ec = asio::error::invalid_argument;\n      return -1;\n    }\n    if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255)\n    {\n      ec = asio::error::invalid_argument;\n      return -1;\n    }\n    bytes[0] = static_cast<unsigned char>(b0);\n    bytes[1] = static_cast<unsigned char>(b1);\n    bytes[2] = static_cast<unsigned char>(b2);\n    bytes[3] = static_cast<unsigned char>(b3);\n    ec = asio::error_code();\n    return 1;\n  }\n  else if (af == ASIO_OS_DEF(AF_INET6))\n  {\n    unsigned char* bytes = static_cast<unsigned char*>(dest);\n    std::memset(bytes, 0, 16);\n    unsigned char back_bytes[16] = { 0 };\n    int num_front_bytes = 0, num_back_bytes = 0;\n    const char* p = src;\n\n    enum { fword, fcolon, bword, scope, done } state = fword;\n    unsigned long current_word = 0;\n    while (state != done)\n    {\n      if (current_word > 0xFFFF)\n      {\n        ec = asio::error::invalid_argument;\n        return -1;\n      }\n\n      switch (state)\n      {\n      case fword:\n        if (*p >= '0' && *p <= '9')\n          current_word = current_word * 16 + *p++ - '0';\n        else if (*p >= 'a' && *p <= 'f')\n          current_word = current_word * 16 + *p++ - 'a' + 10;\n        else if (*p >= 'A' && *p <= 'F')\n          current_word = current_word * 16 + *p++ - 'A' + 10;\n        else\n        {\n          if (num_front_bytes == 16)\n          {\n            ec = asio::error::invalid_argument;\n            return -1;\n          }\n\n          bytes[num_front_bytes++] = (current_word >> 8) & 0xFF;\n          bytes[num_front_bytes++] = current_word & 0xFF;\n          current_word = 0;\n\n          if (*p == ':')\n            state = fcolon, ++p;\n          else if (*p == '%')\n            state = scope, ++p;\n          else if (*p == 0)\n            state = done;\n          else\n          {\n            ec = asio::error::invalid_argument;\n            return -1;\n          }\n        }\n        break;\n\n      case fcolon:\n        if (*p == ':')\n          state = bword, ++p;\n        else\n          state = fword;\n        break;\n\n      case bword:\n        if (*p >= '0' && *p <= '9')\n          current_word = current_word * 16 + *p++ - '0';\n        else if (*p >= 'a' && *p <= 'f')\n          current_word = current_word * 16 + *p++ - 'a' + 10;\n        else if (*p >= 'A' && *p <= 'F')\n          current_word = current_word * 16 + *p++ - 'A' + 10;\n        else\n        {\n          if (num_front_bytes + num_back_bytes == 16)\n          {\n            ec = asio::error::invalid_argument;\n            return -1;\n          }\n\n          back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF;\n          back_bytes[num_back_bytes++] = current_word & 0xFF;\n          current_word = 0;\n\n          if (*p == ':')\n            state = bword, ++p;\n          else if (*p == '%')\n            state = scope, ++p;\n          else if (*p == 0)\n            state = done;\n          else\n          {\n            ec = asio::error::invalid_argument;\n            return -1;\n          }\n        }\n        break;\n\n      case scope:\n        if (*p >= '0' && *p <= '9')\n          current_word = current_word * 10 + *p++ - '0';\n        else if (*p == 0)\n          *scope_id = current_word, state = done;\n        else\n        {\n          ec = asio::error::invalid_argument;\n          return -1;\n        }\n        break;\n\n      default:\n        break;\n      }\n    }\n\n    for (int i = 0; i < num_back_bytes; ++i)\n      bytes[16 - num_back_bytes + i] = back_bytes[i];\n\n    ec = asio::error_code();\n    return 1;\n  }\n  else\n  {\n    ec = asio::error::address_family_not_supported;\n    return -1;\n  }\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  using namespace std; // For memcpy and strcmp.\n\n  if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6))\n  {\n    ec = asio::error::address_family_not_supported;\n    return -1;\n  }\n\n  union\n  {\n    socket_addr_type base;\n    sockaddr_storage_type storage;\n    sockaddr_in4_type v4;\n    sockaddr_in6_type v6;\n  } address;\n  int address_length = sizeof(sockaddr_storage_type);\n#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800))\n  int num_wide_chars = static_cast<int>(strlen(src)) + 1;\n  LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR));\n  ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars);\n  int result = error_wrapper(::WSAStringToAddressW(\n        wide_buffer, af, 0, &address.base, &address_length), ec);\n#else\n  int result = error_wrapper(::WSAStringToAddressA(\n        const_cast<char*>(src), af, 0, &address.base, &address_length), ec);\n#endif\n\n  if (af == ASIO_OS_DEF(AF_INET))\n  {\n    if (result != socket_error_retval)\n    {\n      memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type));\n      ec = asio::error_code();\n    }\n    else if (strcmp(src, \"255.255.255.255\") == 0)\n    {\n      static_cast<in4_addr_type*>(dest)->s_addr = INADDR_NONE;\n      ec = asio::error_code();\n    }\n  }\n  else // AF_INET6\n  {\n    if (result != socket_error_retval)\n    {\n      memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type));\n      if (scope_id)\n        *scope_id = address.v6.sin6_scope_id;\n      ec = asio::error_code();\n    }\n  }\n\n  // Windows may not set an error code on failure.\n  if (result == socket_error_retval && !ec)\n    ec = asio::error::invalid_argument;\n\n  if (result != socket_error_retval)\n    ec = asio::error_code();\n\n  return result == socket_error_retval ? -1 : 1;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  using namespace std; // For strchr, memcpy and atoi.\n\n  // On some platforms, inet_pton fails if an address string contains a scope\n  // id. Detect and remove the scope id before passing the string to inet_pton.\n  const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6));\n  const char* if_name = is_v6 ? strchr(src, '%') : 0;\n  char src_buf[max_addr_v6_str_len + 1];\n  const char* src_ptr = src;\n  if (if_name != 0)\n  {\n    if (if_name - src > max_addr_v6_str_len)\n    {\n      ec = asio::error::invalid_argument;\n      return 0;\n    }\n    memcpy(src_buf, src, if_name - src);\n    src_buf[if_name - src] = 0;\n    src_ptr = src_buf;\n  }\n\n  int result = error_wrapper(::inet_pton(af, src_ptr, dest), ec);\n  if (result <= 0 && !ec)\n    ec = asio::error::invalid_argument;\n  if (result > 0 && is_v6 && scope_id)\n  {\n    using namespace std; // For strchr and atoi.\n    *scope_id = 0;\n    if (if_name != 0)\n    {\n      in6_addr_type* ipv6_address = static_cast<in6_addr_type*>(dest);\n      bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)\n          && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));\n      bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff)\n          && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02));\n      if (is_link_local || is_multicast_link_local)\n        *scope_id = if_nametoindex(if_name + 1);\n      if (*scope_id == 0)\n        *scope_id = atoi(if_name + 1);\n    }\n  }\n  return result;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n}\n\nint gethostname(char* name, int namelen, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS_RUNTIME)\n  try\n  {\n    using namespace Windows::Foundation::Collections;\n    using namespace Windows::Networking;\n    using namespace Windows::Networking::Connectivity;\n    IVectorView<HostName^>^ hostnames = NetworkInformation::GetHostNames();\n    for (unsigned i = 0; i < hostnames->Size; ++i)\n    {\n      HostName^ hostname = hostnames->GetAt(i);\n      if (hostname->Type == HostNameType::DomainName)\n      {\n        std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;\n        std::string raw_name = converter.to_bytes(hostname->RawName->Data());\n        if (namelen > 0 && raw_name.size() < static_cast<std::size_t>(namelen))\n        {\n          strcpy_s(name, namelen, raw_name.c_str());\n          return 0;\n        }\n      }\n    }\n    return -1;\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n    return -1;\n  }\n#else // defined(ASIO_WINDOWS_RUNTIME)\n  int result = error_wrapper(::gethostname(name, namelen), ec);\n# if defined(ASIO_WINDOWS)\n  if (result == 0)\n    ec = asio::error_code();\n# endif // defined(ASIO_WINDOWS)\n  return result;\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n}\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#if !defined(ASIO_HAS_GETADDRINFO)\n\n// The following functions are only needed for emulation of getaddrinfo and\n// getnameinfo.\n\ninline asio::error_code translate_netdb_error(int error)\n{\n  switch (error)\n  {\n  case 0:\n    return asio::error_code();\n  case HOST_NOT_FOUND:\n    return asio::error::host_not_found;\n  case TRY_AGAIN:\n    return asio::error::host_not_found_try_again;\n  case NO_RECOVERY:\n    return asio::error::no_recovery;\n  case NO_DATA:\n    return asio::error::no_data;\n  default:\n    ASIO_ASSERT(false);\n    return asio::error::invalid_argument;\n  }\n}\n\ninline hostent* gethostbyaddr(const char* addr, int length, int af,\n    hostent* result, char* buffer, int buflength, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  (void)(buffer);\n  (void)(buflength);\n  hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec);\n  if (!retval)\n    return 0;\n  ec = asio::error_code();\n  *result = *retval;\n  return retval;\n#elif defined(__sun) || defined(__QNX__)\n  int error = 0;\n  hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result,\n        buffer, buflength, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  return retval;\n#elif defined(__MACH__) && defined(__APPLE__)\n  (void)(buffer);\n  (void)(buflength);\n  int error = 0;\n  hostent* retval = error_wrapper(::getipnodebyaddr(\n        addr, length, af, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  if (!retval)\n    return 0;\n  *result = *retval;\n  return retval;\n#else\n  hostent* retval = 0;\n  int error = 0;\n  error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer,\n        buflength, &retval, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  return retval;\n#endif\n}\n\ninline hostent* gethostbyname(const char* name, int af, struct hostent* result,\n    char* buffer, int buflength, int ai_flags, asio::error_code& ec)\n{\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  (void)(buffer);\n  (void)(buflength);\n  (void)(ai_flags);\n  if (af != ASIO_OS_DEF(AF_INET))\n  {\n    ec = asio::error::address_family_not_supported;\n    return 0;\n  }\n  hostent* retval = error_wrapper(::gethostbyname(name), ec);\n  if (!retval)\n    return 0;\n  ec = asio::error_code();\n  *result = *retval;\n  return result;\n#elif defined(__sun) || defined(__QNX__)\n  (void)(ai_flags);\n  if (af != ASIO_OS_DEF(AF_INET))\n  {\n    ec = asio::error::address_family_not_supported;\n    return 0;\n  }\n  int error = 0;\n  hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer,\n        buflength, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  return retval;\n#elif defined(__MACH__) && defined(__APPLE__)\n  (void)(buffer);\n  (void)(buflength);\n  int error = 0;\n  hostent* retval = error_wrapper(::getipnodebyname(\n        name, af, ai_flags, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  if (!retval)\n    return 0;\n  *result = *retval;\n  return retval;\n#else\n  (void)(ai_flags);\n  if (af != ASIO_OS_DEF(AF_INET))\n  {\n    ec = asio::error::address_family_not_supported;\n    return 0;\n  }\n  hostent* retval = 0;\n  int error = 0;\n  error_wrapper(::gethostbyname_r(name, result,\n        buffer, buflength, &retval, &error), ec);\n  if (error)\n    ec = translate_netdb_error(error);\n  return retval;\n#endif\n}\n\ninline void freehostent(hostent* h)\n{\n#if defined(__MACH__) && defined(__APPLE__)\n  if (h)\n    ::freehostent(h);\n#else\n  (void)(h);\n#endif\n}\n\n// Emulation of getaddrinfo based on implementation in:\n// Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998.\n\nstruct gai_search\n{\n  const char* host;\n  int family;\n};\n\ninline int gai_nsearch(const char* host,\n    const addrinfo_type* hints, gai_search (&search)[2])\n{\n  int search_count = 0;\n  if (host == 0 || host[0] == '\\0')\n  {\n    if (hints->ai_flags & AI_PASSIVE)\n    {\n      // No host and AI_PASSIVE implies wildcard bind.\n      switch (hints->ai_family)\n      {\n      case ASIO_OS_DEF(AF_INET):\n        search[search_count].host = \"0.0.0.0\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET);\n        ++search_count;\n        break;\n      case ASIO_OS_DEF(AF_INET6):\n        search[search_count].host = \"0::0\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET6);\n        ++search_count;\n        break;\n      case ASIO_OS_DEF(AF_UNSPEC):\n        search[search_count].host = \"0::0\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET6);\n        ++search_count;\n        search[search_count].host = \"0.0.0.0\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET);\n        ++search_count;\n        break;\n      default:\n        break;\n      }\n    }\n    else\n    {\n      // No host and not AI_PASSIVE means connect to local host.\n      switch (hints->ai_family)\n      {\n      case ASIO_OS_DEF(AF_INET):\n        search[search_count].host = \"localhost\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET);\n        ++search_count;\n        break;\n      case ASIO_OS_DEF(AF_INET6):\n        search[search_count].host = \"localhost\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET6);\n        ++search_count;\n        break;\n      case ASIO_OS_DEF(AF_UNSPEC):\n        search[search_count].host = \"localhost\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET6);\n        ++search_count;\n        search[search_count].host = \"localhost\";\n        search[search_count].family = ASIO_OS_DEF(AF_INET);\n        ++search_count;\n        break;\n      default:\n        break;\n      }\n    }\n  }\n  else\n  {\n    // Host is specified.\n    switch (hints->ai_family)\n    {\n    case ASIO_OS_DEF(AF_INET):\n      search[search_count].host = host;\n      search[search_count].family = ASIO_OS_DEF(AF_INET);\n      ++search_count;\n      break;\n    case ASIO_OS_DEF(AF_INET6):\n      search[search_count].host = host;\n      search[search_count].family = ASIO_OS_DEF(AF_INET6);\n      ++search_count;\n      break;\n    case ASIO_OS_DEF(AF_UNSPEC):\n      search[search_count].host = host;\n      search[search_count].family = ASIO_OS_DEF(AF_INET6);\n      ++search_count;\n      search[search_count].host = host;\n      search[search_count].family = ASIO_OS_DEF(AF_INET);\n      ++search_count;\n      break;\n    default:\n      break;\n    }\n  }\n  return search_count;\n}\n\ntemplate <typename T>\ninline T* gai_alloc(std::size_t size = sizeof(T))\n{\n  using namespace std;\n  T* p = static_cast<T*>(::operator new(size, std::nothrow));\n  if (p)\n    memset(p, 0, size);\n  return p;\n}\n\ninline void gai_free(void* p)\n{\n  ::operator delete(p);\n}\n\ninline void gai_strcpy(char* target, const char* source, std::size_t max_size)\n{\n  using namespace std;\n#if defined(ASIO_HAS_SECURE_RTL)\n  strcpy_s(target, max_size, source);\n#else // defined(ASIO_HAS_SECURE_RTL)\n  *target = 0;\n  if (max_size > 0)\n    strncat(target, source, max_size - 1);\n#endif // defined(ASIO_HAS_SECURE_RTL)\n}\n\nenum { gai_clone_flag = 1 << 30 };\n\ninline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints,\n    const void* addr, int family)\n{\n  using namespace std;\n\n  addrinfo_type* ai = gai_alloc<addrinfo_type>();\n  if (ai == 0)\n    return EAI_MEMORY;\n\n  ai->ai_next = 0;\n  **next = ai;\n  *next = &ai->ai_next;\n\n  ai->ai_canonname = 0;\n  ai->ai_socktype = hints->ai_socktype;\n  if (ai->ai_socktype == 0)\n    ai->ai_flags |= gai_clone_flag;\n  ai->ai_protocol = hints->ai_protocol;\n  ai->ai_family = family;\n\n  switch (ai->ai_family)\n  {\n  case ASIO_OS_DEF(AF_INET):\n    {\n      sockaddr_in4_type* sinptr = gai_alloc<sockaddr_in4_type>();\n      if (sinptr == 0)\n        return EAI_MEMORY;\n      sinptr->sin_family = ASIO_OS_DEF(AF_INET);\n      memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type));\n      ai->ai_addr = reinterpret_cast<sockaddr*>(sinptr);\n      ai->ai_addrlen = sizeof(sockaddr_in4_type);\n      break;\n    }\n  case ASIO_OS_DEF(AF_INET6):\n    {\n      sockaddr_in6_type* sin6ptr = gai_alloc<sockaddr_in6_type>();\n      if (sin6ptr == 0)\n        return EAI_MEMORY;\n      sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6);\n      memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type));\n      ai->ai_addr = reinterpret_cast<sockaddr*>(sin6ptr);\n      ai->ai_addrlen = sizeof(sockaddr_in6_type);\n      break;\n    }\n  default:\n    break;\n  }\n\n  return 0;\n}\n\ninline addrinfo_type* gai_clone(addrinfo_type* ai)\n{\n  using namespace std;\n\n  addrinfo_type* new_ai = gai_alloc<addrinfo_type>();\n  if (new_ai == 0)\n    return new_ai;\n\n  new_ai->ai_next = ai->ai_next;\n  ai->ai_next = new_ai;\n\n  new_ai->ai_flags = 0;\n  new_ai->ai_family = ai->ai_family;\n  new_ai->ai_socktype = ai->ai_socktype;\n  new_ai->ai_protocol = ai->ai_protocol;\n  new_ai->ai_canonname = 0;\n  new_ai->ai_addrlen = ai->ai_addrlen;\n  new_ai->ai_addr = gai_alloc<sockaddr>(ai->ai_addrlen);\n  memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen);\n\n  return new_ai;\n}\n\ninline int gai_port(addrinfo_type* aihead, int port, int socktype)\n{\n  int num_found = 0;\n\n  for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next)\n  {\n    if (ai->ai_flags & gai_clone_flag)\n    {\n      if (ai->ai_socktype != 0)\n      {\n        ai = gai_clone(ai);\n        if (ai == 0)\n          return -1;\n        // ai now points to newly cloned entry.\n      }\n    }\n    else if (ai->ai_socktype != socktype)\n    {\n      // Ignore if mismatch on socket type.\n      continue;\n    }\n\n    ai->ai_socktype = socktype;\n\n    switch (ai->ai_family)\n    {\n    case ASIO_OS_DEF(AF_INET):\n      {\n        sockaddr_in4_type* sinptr =\n          reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr);\n        sinptr->sin_port = port;\n        ++num_found;\n        break;\n      }\n    case ASIO_OS_DEF(AF_INET6):\n      {\n        sockaddr_in6_type* sin6ptr =\n          reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr);\n        sin6ptr->sin6_port = port;\n        ++num_found;\n        break;\n      }\n    default:\n      break;\n    }\n  }\n\n  return num_found;\n}\n\ninline int gai_serv(addrinfo_type* aihead,\n    const addrinfo_type* hints, const char* serv)\n{\n  using namespace std;\n\n  int num_found = 0;\n\n  if (\n#if defined(AI_NUMERICSERV)\n      (hints->ai_flags & AI_NUMERICSERV) ||\n#endif\n      isdigit(static_cast<unsigned char>(serv[0])))\n  {\n    int port = htons(atoi(serv));\n    if (hints->ai_socktype)\n    {\n      // Caller specifies socket type.\n      int rc = gai_port(aihead, port, hints->ai_socktype);\n      if (rc < 0)\n        return EAI_MEMORY;\n      num_found += rc;\n    }\n    else\n    {\n      // Caller does not specify socket type.\n      int rc = gai_port(aihead, port, SOCK_STREAM);\n      if (rc < 0)\n        return EAI_MEMORY;\n      num_found += rc;\n      rc = gai_port(aihead, port, SOCK_DGRAM);\n      if (rc < 0)\n        return EAI_MEMORY;\n      num_found += rc;\n    }\n  }\n  else\n  {\n    // Try service name with TCP first, then UDP.\n    if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM)\n    {\n      servent* sptr = getservbyname(serv, \"tcp\");\n      if (sptr != 0)\n      {\n        int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM);\n        if (rc < 0)\n          return EAI_MEMORY;\n        num_found += rc;\n      }\n    }\n    if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM)\n    {\n      servent* sptr = getservbyname(serv, \"udp\");\n      if (sptr != 0)\n      {\n        int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM);\n        if (rc < 0)\n          return EAI_MEMORY;\n        num_found += rc;\n      }\n    }\n  }\n\n  if (num_found == 0)\n  {\n    if (hints->ai_socktype == 0)\n    {\n      // All calls to getservbyname() failed.\n      return EAI_NONAME;\n    }\n    else\n    {\n      // Service not supported for socket type.\n      return EAI_SERVICE;\n    }\n  }\n\n  return 0;\n}\n\ninline int gai_echeck(const char* host, const char* service,\n    int flags, int family, int socktype, int protocol)\n{\n  (void)(flags);\n  (void)(protocol);\n\n  // Host or service must be specified.\n  if (host == 0 || host[0] == '\\0')\n    if (service == 0 || service[0] == '\\0')\n      return EAI_NONAME;\n\n  // Check combination of family and socket type.\n  switch (family)\n  {\n  case ASIO_OS_DEF(AF_UNSPEC):\n    break;\n  case ASIO_OS_DEF(AF_INET):\n  case ASIO_OS_DEF(AF_INET6):\n    if (service != 0 && service[0] != '\\0')\n      if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM)\n        return EAI_SOCKTYPE;\n    break;\n  default:\n    return EAI_FAMILY;\n  }\n\n  return 0;\n}\n\ninline void freeaddrinfo_emulation(addrinfo_type* aihead)\n{\n  addrinfo_type* ai = aihead;\n  while (ai)\n  {\n    gai_free(ai->ai_addr);\n    gai_free(ai->ai_canonname);\n    addrinfo_type* ainext = ai->ai_next;\n    gai_free(ai);\n    ai = ainext;\n  }\n}\n\ninline int getaddrinfo_emulation(const char* host, const char* service,\n    const addrinfo_type* hintsp, addrinfo_type** result)\n{\n  // Set up linked list of addrinfo structures.\n  addrinfo_type* aihead = 0;\n  addrinfo_type** ainext = &aihead;\n  char* canon = 0;\n\n  // Supply default hints if not specified by caller.\n  addrinfo_type hints = addrinfo_type();\n  hints.ai_family = ASIO_OS_DEF(AF_UNSPEC);\n  if (hintsp)\n    hints = *hintsp;\n\n  // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED\n  // and AI_ALL flags.\n#if defined(AI_V4MAPPED)\n  if (hints.ai_family != ASIO_OS_DEF(AF_INET6))\n    hints.ai_flags &= ~AI_V4MAPPED;\n#endif\n#if defined(AI_ALL)\n  if (hints.ai_family != ASIO_OS_DEF(AF_INET6))\n    hints.ai_flags &= ~AI_ALL;\n#endif\n\n  // Basic error checking.\n  int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family,\n      hints.ai_socktype, hints.ai_protocol);\n  if (rc != 0)\n  {\n    freeaddrinfo_emulation(aihead);\n    return rc;\n  }\n\n  gai_search search[2];\n  int search_count = gai_nsearch(host, &hints, search);\n  for (gai_search* sptr = search; sptr < search + search_count; ++sptr)\n  {\n    // Check for IPv4 dotted decimal string.\n    in4_addr_type inaddr;\n    asio::error_code ec;\n    if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET),\n          sptr->host, &inaddr, 0, ec) == 1)\n    {\n      if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)\n          && hints.ai_family != ASIO_OS_DEF(AF_INET))\n      {\n        freeaddrinfo_emulation(aihead);\n        gai_free(canon);\n        return EAI_FAMILY;\n      }\n      if (sptr->family == ASIO_OS_DEF(AF_INET))\n      {\n        rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET));\n        if (rc != 0)\n        {\n          freeaddrinfo_emulation(aihead);\n          gai_free(canon);\n          return rc;\n        }\n      }\n      continue;\n    }\n\n    // Check for IPv6 hex string.\n    in6_addr_type in6addr;\n    if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6),\n          sptr->host, &in6addr, 0, ec) == 1)\n    {\n      if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)\n          && hints.ai_family != ASIO_OS_DEF(AF_INET6))\n      {\n        freeaddrinfo_emulation(aihead);\n        gai_free(canon);\n        return EAI_FAMILY;\n      }\n      if (sptr->family == ASIO_OS_DEF(AF_INET6))\n      {\n        rc = gai_aistruct(&ainext, &hints, &in6addr,\n            ASIO_OS_DEF(AF_INET6));\n        if (rc != 0)\n        {\n          freeaddrinfo_emulation(aihead);\n          gai_free(canon);\n          return rc;\n        }\n      }\n      continue;\n    }\n\n    // Look up hostname.\n    hostent hent;\n    char hbuf[8192] = \"\";\n    hostent* hptr = socket_ops::gethostbyname(sptr->host,\n        sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec);\n    if (hptr == 0)\n    {\n      if (search_count == 2)\n      {\n        // Failure is OK if there are multiple searches.\n        continue;\n      }\n      freeaddrinfo_emulation(aihead);\n      gai_free(canon);\n      if (ec == asio::error::host_not_found)\n        return EAI_NONAME;\n      if (ec == asio::error::host_not_found_try_again)\n        return EAI_AGAIN;\n      if (ec == asio::error::no_recovery)\n        return EAI_FAIL;\n      if (ec == asio::error::no_data)\n        return EAI_NONAME;\n      return EAI_NONAME;\n    }\n\n    // Check for address family mismatch if one was specified.\n    if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)\n        && hints.ai_family != hptr->h_addrtype)\n    {\n      freeaddrinfo_emulation(aihead);\n      gai_free(canon);\n      socket_ops::freehostent(hptr);\n      return EAI_FAMILY;\n    }\n\n    // Save canonical name first time.\n    if (host != 0 && host[0] != '\\0' && hptr->h_name && hptr->h_name[0]\n        && (hints.ai_flags & AI_CANONNAME) && canon == 0)\n    {\n      std::size_t canon_len = strlen(hptr->h_name) + 1;\n      canon = gai_alloc<char>(canon_len);\n      if (canon == 0)\n      {\n        freeaddrinfo_emulation(aihead);\n        socket_ops::freehostent(hptr);\n        return EAI_MEMORY;\n      }\n      gai_strcpy(canon, hptr->h_name, canon_len);\n    }\n\n    // Create an addrinfo structure for each returned address.\n    for (char** ap = hptr->h_addr_list; *ap; ++ap)\n    {\n      rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype);\n      if (rc != 0)\n      {\n        freeaddrinfo_emulation(aihead);\n        gai_free(canon);\n        socket_ops::freehostent(hptr);\n        return EAI_FAMILY;\n      }\n    }\n\n    socket_ops::freehostent(hptr);\n  }\n\n  // Check if we found anything.\n  if (aihead == 0)\n  {\n    gai_free(canon);\n    return EAI_NONAME;\n  }\n\n  // Return canonical name in first entry.\n  if (host != 0 && host[0] != '\\0' && (hints.ai_flags & AI_CANONNAME))\n  {\n    if (canon)\n    {\n      aihead->ai_canonname = canon;\n      canon = 0;\n    }\n    else\n    {\n      std::size_t canonname_len = strlen(search[0].host) + 1;\n      aihead->ai_canonname = gai_alloc<char>(canonname_len);\n      if (aihead->ai_canonname == 0)\n      {\n        freeaddrinfo_emulation(aihead);\n        return EAI_MEMORY;\n      }\n      gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len);\n    }\n  }\n  gai_free(canon);\n\n  // Process the service name.\n  if (service != 0 && service[0] != '\\0')\n  {\n    rc = gai_serv(aihead, &hints, service);\n    if (rc != 0)\n    {\n      freeaddrinfo_emulation(aihead);\n      return rc;\n    }\n  }\n\n  // Return result to caller.\n  *result = aihead;\n  return 0;\n}\n\ninline asio::error_code getnameinfo_emulation(\n    const socket_addr_type* sa, std::size_t salen, char* host,\n    std::size_t hostlen, char* serv, std::size_t servlen, int flags,\n    asio::error_code& ec)\n{\n  using namespace std;\n\n  const char* addr;\n  size_t addr_len;\n  unsigned short port;\n  switch (sa->sa_family)\n  {\n  case ASIO_OS_DEF(AF_INET):\n    if (salen != sizeof(sockaddr_in4_type))\n    {\n      return ec = asio::error::invalid_argument;\n    }\n    addr = reinterpret_cast<const char*>(\n        &reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_addr);\n    addr_len = sizeof(in4_addr_type);\n    port = reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_port;\n    break;\n  case ASIO_OS_DEF(AF_INET6):\n    if (salen != sizeof(sockaddr_in6_type))\n    {\n      return ec = asio::error::invalid_argument;\n    }\n    addr = reinterpret_cast<const char*>(\n        &reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_addr);\n    addr_len = sizeof(in6_addr_type);\n    port = reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_port;\n    break;\n  default:\n    return ec = asio::error::address_family_not_supported;\n  }\n\n  if (host && hostlen > 0)\n  {\n    if (flags & NI_NUMERICHOST)\n    {\n      if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0)\n      {\n        return ec;\n      }\n    }\n    else\n    {\n      hostent hent;\n      char hbuf[8192] = \"\";\n      hostent* hptr = socket_ops::gethostbyaddr(addr,\n          static_cast<int>(addr_len), sa->sa_family,\n          &hent, hbuf, sizeof(hbuf), ec);\n      if (hptr && hptr->h_name && hptr->h_name[0] != '\\0')\n      {\n        if (flags & NI_NOFQDN)\n        {\n          char* dot = strchr(hptr->h_name, '.');\n          if (dot)\n          {\n            *dot = 0;\n          }\n        }\n        gai_strcpy(host, hptr->h_name, hostlen);\n        socket_ops::freehostent(hptr);\n      }\n      else\n      {\n        socket_ops::freehostent(hptr);\n        if (flags & NI_NAMEREQD)\n        {\n          return ec = asio::error::host_not_found;\n        }\n        if (socket_ops::inet_ntop(sa->sa_family,\n              addr, host, hostlen, 0, ec) == 0)\n        {\n          return ec;\n        }\n      }\n    }\n  }\n\n  if (serv && servlen > 0)\n  {\n    if (flags & NI_NUMERICSERV)\n    {\n      if (servlen < 6)\n      {\n        return ec = asio::error::no_buffer_space;\n      }\n#if defined(ASIO_HAS_SECURE_RTL)\n      sprintf_s(serv, servlen, \"%u\", ntohs(port));\n#else // defined(ASIO_HAS_SECURE_RTL)\n      sprintf(serv, \"%u\", ntohs(port));\n#endif // defined(ASIO_HAS_SECURE_RTL)\n    }\n    else\n    {\n#if defined(ASIO_HAS_PTHREADS)\n      static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;\n      ::pthread_mutex_lock(&mutex);\n#endif // defined(ASIO_HAS_PTHREADS)\n      servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? \"udp\" : 0);\n      if (sptr && sptr->s_name && sptr->s_name[0] != '\\0')\n      {\n        gai_strcpy(serv, sptr->s_name, servlen);\n      }\n      else\n      {\n        if (servlen < 6)\n        {\n          return ec = asio::error::no_buffer_space;\n        }\n#if defined(ASIO_HAS_SECURE_RTL)\n        sprintf_s(serv, servlen, \"%u\", ntohs(port));\n#else // defined(ASIO_HAS_SECURE_RTL)\n        sprintf(serv, \"%u\", ntohs(port));\n#endif // defined(ASIO_HAS_SECURE_RTL)\n      }\n#if defined(ASIO_HAS_PTHREADS)\n      ::pthread_mutex_unlock(&mutex);\n#endif // defined(ASIO_HAS_PTHREADS)\n    }\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\n#endif // !defined(ASIO_HAS_GETADDRINFO)\n\ninline asio::error_code translate_addrinfo_error(int error)\n{\n  switch (error)\n  {\n  case 0:\n    return asio::error_code();\n  case EAI_AGAIN:\n    return asio::error::host_not_found_try_again;\n  case EAI_BADFLAGS:\n    return asio::error::invalid_argument;\n  case EAI_FAIL:\n    return asio::error::no_recovery;\n  case EAI_FAMILY:\n    return asio::error::address_family_not_supported;\n  case EAI_MEMORY:\n    return asio::error::no_memory;\n  case EAI_NONAME:\n#if defined(EAI_ADDRFAMILY)\n  case EAI_ADDRFAMILY:\n#endif\n#if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME)\n  case EAI_NODATA:\n#endif\n    return asio::error::host_not_found;\n  case EAI_SERVICE:\n    return asio::error::service_not_found;\n  case EAI_SOCKTYPE:\n    return asio::error::socket_type_not_supported;\n  default: // Possibly the non-portable EAI_SYSTEM.\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    return asio::error_code(\n        WSAGetLastError(), asio::error::get_system_category());\n#else\n    return asio::error_code(\n        errno, asio::error::get_system_category());\n#endif\n  }\n}\n\nasio::error_code getaddrinfo(const char* host,\n    const char* service, const addrinfo_type& hints,\n    addrinfo_type** result, asio::error_code& ec)\n{\n  host = (host && *host) ? host : 0;\n  service = (service && *service) ? service : 0;\n  clear_last_error();\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if defined(ASIO_HAS_GETADDRINFO)\n  // Building for Windows XP, Windows Server 2003, or later.\n  int error = ::getaddrinfo(host, service, &hints, result);\n  return ec = translate_addrinfo_error(error);\n# else\n  // Building for Windows 2000 or earlier.\n  typedef int (WSAAPI *gai_t)(const char*,\n      const char*, const addrinfo_type*, addrinfo_type**);\n  if (HMODULE winsock_module = ::GetModuleHandleA(\"ws2_32\"))\n  {\n    if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, \"getaddrinfo\"))\n    {\n      int error = gai(host, service, &hints, result);\n      return ec = translate_addrinfo_error(error);\n    }\n  }\n  int error = getaddrinfo_emulation(host, service, &hints, result);\n  return ec = translate_addrinfo_error(error);\n# endif\n#elif !defined(ASIO_HAS_GETADDRINFO)\n  int error = getaddrinfo_emulation(host, service, &hints, result);\n  return ec = translate_addrinfo_error(error);\n#else\n  int error = ::getaddrinfo(host, service, &hints, result);\n#if defined(__MACH__) && defined(__APPLE__)\n  using namespace std; // For isdigit and atoi.\n  if (error == 0 && service && isdigit(static_cast<unsigned char>(service[0])))\n  {\n    u_short_type port = host_to_network_short(atoi(service));\n    for (addrinfo_type* ai = *result; ai; ai = ai->ai_next)\n    {\n      switch (ai->ai_family)\n      {\n      case ASIO_OS_DEF(AF_INET):\n        {\n          sockaddr_in4_type* sinptr =\n            reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr);\n          if (sinptr->sin_port == 0)\n            sinptr->sin_port = port;\n          break;\n        }\n      case ASIO_OS_DEF(AF_INET6):\n        {\n          sockaddr_in6_type* sin6ptr =\n            reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr);\n          if (sin6ptr->sin6_port == 0)\n            sin6ptr->sin6_port = port;\n          break;\n        }\n      default:\n        break;\n      }\n    }\n  }\n#endif\n  return ec = translate_addrinfo_error(error);\n#endif\n}\n\nasio::error_code background_getaddrinfo(\n    const weak_cancel_token_type& cancel_token, const char* host,\n    const char* service, const addrinfo_type& hints,\n    addrinfo_type** result, asio::error_code& ec)\n{\n  if (cancel_token.expired())\n    ec = asio::error::operation_aborted;\n  else\n    socket_ops::getaddrinfo(host, service, hints, result, ec);\n  return ec;\n}\n\nvoid freeaddrinfo(addrinfo_type* ai)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if defined(ASIO_HAS_GETADDRINFO)\n  // Building for Windows XP, Windows Server 2003, or later.\n  ::freeaddrinfo(ai);\n# else\n  // Building for Windows 2000 or earlier.\n  typedef int (WSAAPI *fai_t)(addrinfo_type*);\n  if (HMODULE winsock_module = ::GetModuleHandleA(\"ws2_32\"))\n  {\n    if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, \"freeaddrinfo\"))\n    {\n      fai(ai);\n      return;\n    }\n  }\n  freeaddrinfo_emulation(ai);\n# endif\n#elif !defined(ASIO_HAS_GETADDRINFO)\n  freeaddrinfo_emulation(ai);\n#else\n  ::freeaddrinfo(ai);\n#endif\n}\n\nasio::error_code getnameinfo(const socket_addr_type* addr,\n    std::size_t addrlen, char* host, std::size_t hostlen,\n    char* serv, std::size_t servlen, int flags, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if defined(ASIO_HAS_GETADDRINFO)\n  // Building for Windows XP, Windows Server 2003, or later.\n  clear_last_error();\n  int error = ::getnameinfo(addr, static_cast<socklen_t>(addrlen),\n      host, static_cast<DWORD>(hostlen),\n      serv, static_cast<DWORD>(servlen), flags);\n  return ec = translate_addrinfo_error(error);\n# else\n  // Building for Windows 2000 or earlier.\n  typedef int (WSAAPI *gni_t)(const socket_addr_type*,\n      int, char*, DWORD, char*, DWORD, int);\n  if (HMODULE winsock_module = ::GetModuleHandleA(\"ws2_32\"))\n  {\n    if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, \"getnameinfo\"))\n    {\n      clear_last_error();\n      int error = gni(addr, static_cast<int>(addrlen),\n          host, static_cast<DWORD>(hostlen),\n          serv, static_cast<DWORD>(servlen), flags);\n      return ec = translate_addrinfo_error(error);\n    }\n  }\n  clear_last_error();\n  return getnameinfo_emulation(addr, addrlen,\n      host, hostlen, serv, servlen, flags, ec);\n# endif\n#elif !defined(ASIO_HAS_GETADDRINFO)\n  using namespace std; // For memcpy.\n  sockaddr_storage_type tmp_addr;\n  memcpy(&tmp_addr, addr, addrlen);\n  addr = reinterpret_cast<socket_addr_type*>(&tmp_addr);\n  clear_last_error();\n  return getnameinfo_emulation(addr, addrlen,\n      host, hostlen, serv, servlen, flags, ec);\n#else\n  clear_last_error();\n  int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags);\n  return ec = translate_addrinfo_error(error);\n#endif\n}\n\nasio::error_code sync_getnameinfo(\n    const socket_addr_type* addr, std::size_t addrlen,\n    char* host, std::size_t hostlen, char* serv,\n    std::size_t servlen, int sock_type, asio::error_code& ec)\n{\n  // First try resolving with the service name. If that fails try resolving\n  // but allow the service to be returned as a number.\n  int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;\n  socket_ops::getnameinfo(addr, addrlen, host,\n      hostlen, serv, servlen, flags, ec);\n  if (ec)\n  {\n    socket_ops::getnameinfo(addr, addrlen, host, hostlen,\n        serv, servlen, flags | NI_NUMERICSERV, ec);\n  }\n\n  return ec;\n}\n\nasio::error_code background_getnameinfo(\n    const weak_cancel_token_type& cancel_token,\n    const socket_addr_type* addr, std::size_t addrlen,\n    char* host, std::size_t hostlen, char* serv,\n    std::size_t servlen, int sock_type, asio::error_code& ec)\n{\n  if (cancel_token.expired())\n  {\n    ec = asio::error::operation_aborted;\n  }\n  else\n  {\n    // First try resolving with the service name. If that fails try resolving\n    // but allow the service to be returned as a number.\n    int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;\n    socket_ops::getnameinfo(addr, addrlen, host,\n        hostlen, serv, servlen, flags, ec);\n    if (ec)\n    {\n      socket_ops::getnameinfo(addr, addrlen, host, hostlen,\n          serv, servlen, flags | NI_NUMERICSERV, ec);\n    }\n  }\n\n  return ec;\n}\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\nu_long_type network_to_host_long(u_long_type value)\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  unsigned char* value_p = reinterpret_cast<unsigned char*>(&value);\n  u_long_type result = (static_cast<u_long_type>(value_p[0]) << 24)\n    | (static_cast<u_long_type>(value_p[1]) << 16)\n    | (static_cast<u_long_type>(value_p[2]) << 8)\n    | static_cast<u_long_type>(value_p[3]);\n  return result;\n#else // defined(ASIO_WINDOWS_RUNTIME)\n  return ntohl(value);\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n}\n\nu_long_type host_to_network_long(u_long_type value)\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  u_long_type result;\n  unsigned char* result_p = reinterpret_cast<unsigned char*>(&result);\n  result_p[0] = static_cast<unsigned char>((value >> 24) & 0xFF);\n  result_p[1] = static_cast<unsigned char>((value >> 16) & 0xFF);\n  result_p[2] = static_cast<unsigned char>((value >> 8) & 0xFF);\n  result_p[3] = static_cast<unsigned char>(value & 0xFF);\n  return result;\n#else // defined(ASIO_WINDOWS_RUNTIME)\n  return htonl(value);\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n}\n\nu_short_type network_to_host_short(u_short_type value)\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  unsigned char* value_p = reinterpret_cast<unsigned char*>(&value);\n  u_short_type result = (static_cast<u_short_type>(value_p[0]) << 8)\n    | static_cast<u_short_type>(value_p[1]);\n  return result;\n#else // defined(ASIO_WINDOWS_RUNTIME)\n  return ntohs(value);\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n}\n\nu_short_type host_to_network_short(u_short_type value)\n{\n#if defined(ASIO_WINDOWS_RUNTIME)\n  u_short_type result;\n  unsigned char* result_p = reinterpret_cast<unsigned char*>(&result);\n  result_p[0] = static_cast<unsigned char>((value >> 8) & 0xFF);\n  result_p[1] = static_cast<unsigned char>(value & 0xFF);\n  return result;\n#else // defined(ASIO_WINDOWS_RUNTIME)\n  return htons(value);\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n}\n\n} // namespace socket_ops\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SOCKET_OPS_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/socket_select_interrupter.ipp",
    "content": "//\n// detail/impl/socket_select_interrupter.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP\n#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n\n#include <cstdlib>\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_select_interrupter.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nsocket_select_interrupter::socket_select_interrupter()\n{\n  open_descriptors();\n}\n\nvoid socket_select_interrupter::open_descriptors()\n{\n  asio::error_code ec;\n  socket_holder acceptor(socket_ops::socket(\n        AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));\n  if (acceptor.get() == invalid_socket)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  int opt = 1;\n  socket_ops::state_type acceptor_state = 0;\n  socket_ops::setsockopt(acceptor.get(), acceptor_state,\n      SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);\n\n  using namespace std; // For memset.\n  sockaddr_in4_type addr;\n  std::size_t addr_len = sizeof(addr);\n  memset(&addr, 0, sizeof(addr));\n  addr.sin_family = AF_INET;\n  addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);\n  addr.sin_port = 0;\n  if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr,\n        addr_len, ec) == socket_error_retval)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr,\n        &addr_len, ec) == socket_error_retval)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  // Some broken firewalls on Windows will intermittently cause getsockname to\n  // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We\n  // explicitly specify the target address here to work around this problem.\n  if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY))\n    addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);\n\n  if (socket_ops::listen(acceptor.get(),\n        SOMAXCONN, ec) == socket_error_retval)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  socket_holder client(socket_ops::socket(\n        AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));\n  if (client.get() == invalid_socket)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr,\n        addr_len, ec) == socket_error_retval)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));\n  if (server.get() == invalid_socket)\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n  \n  ioctl_arg_type non_blocking = 1;\n  socket_ops::state_type client_state = 0;\n  if (socket_ops::ioctl(client.get(), client_state,\n        FIONBIO, &non_blocking, ec))\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  opt = 1;\n  socket_ops::setsockopt(client.get(), client_state,\n      IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);\n\n  non_blocking = 1;\n  socket_ops::state_type server_state = 0;\n  if (socket_ops::ioctl(server.get(), server_state,\n        FIONBIO, &non_blocking, ec))\n    asio::detail::throw_error(ec, \"socket_select_interrupter\");\n\n  opt = 1;\n  socket_ops::setsockopt(server.get(), server_state,\n      IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);\n\n  read_descriptor_ = server.release();\n  write_descriptor_ = client.release();\n}\n\nsocket_select_interrupter::~socket_select_interrupter()\n{\n  close_descriptors();\n}\n\nvoid socket_select_interrupter::close_descriptors()\n{\n  asio::error_code ec;\n  socket_ops::state_type state = socket_ops::internal_non_blocking;\n  if (read_descriptor_ != invalid_socket)\n    socket_ops::close(read_descriptor_, state, true, ec);\n  if (write_descriptor_ != invalid_socket)\n    socket_ops::close(write_descriptor_, state, true, ec);\n}\n\nvoid socket_select_interrupter::recreate()\n{\n  close_descriptors();\n\n  write_descriptor_ = invalid_socket;\n  read_descriptor_ = invalid_socket;\n\n  open_descriptors();\n}\n\nvoid socket_select_interrupter::interrupt()\n{\n  char byte = 0;\n  socket_ops::buf b;\n  socket_ops::init_buf(b, &byte, 1);\n  asio::error_code ec;\n  socket_ops::send(write_descriptor_, &b, 1, 0, ec);\n}\n\nbool socket_select_interrupter::reset()\n{\n  char data[1024];\n  socket_ops::buf b;\n  socket_ops::init_buf(b, data, sizeof(data));\n  asio::error_code ec;\n  int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);\n  bool was_interrupted = (bytes_read > 0);\n  while (bytes_read == sizeof(data))\n    bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);\n  return was_interrupted;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/strand_executor_service.hpp",
    "content": "//\n// detail/impl/strand_executor_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP\n#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/call_stack.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/executor_work_guard.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Executor>\nclass strand_executor_service::invoker\n{\npublic:\n  invoker(const implementation_type& impl, Executor& ex)\n    : impl_(impl),\n      work_(ex)\n  {\n  }\n\n  invoker(const invoker& other)\n    : impl_(other.impl_),\n      work_(other.work_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  invoker(invoker&& other)\n    : impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)),\n      work_(ASIO_MOVE_CAST(executor_work_guard<Executor>)(other.work_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  struct on_invoker_exit\n  {\n    invoker* this_;\n\n    ~on_invoker_exit()\n    {\n      this_->impl_->mutex_->lock();\n      this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_);\n      bool more_handlers = this_->impl_->locked_ =\n        !this_->impl_->ready_queue_.empty();\n      this_->impl_->mutex_->unlock();\n\n      if (more_handlers)\n      {\n        Executor ex(this_->work_.get_executor());\n        recycling_allocator<void> allocator;\n        ex.post(ASIO_MOVE_CAST(invoker)(*this_), allocator);\n      }\n    }\n  };\n\n  void operator()()\n  {\n    // Indicate that this strand is executing on the current thread.\n    call_stack<strand_impl>::context ctx(impl_.get());\n\n    // Ensure the next handler, if any, is scheduled on block exit.\n    on_invoker_exit on_exit = { this };\n    (void)on_exit;\n\n    // Run all ready handlers. No lock is required since the ready queue is\n    // accessed only within the strand.\n    asio::error_code ec;\n    while (scheduler_operation* o = impl_->ready_queue_.front())\n    {\n      impl_->ready_queue_.pop();\n      o->complete(impl_.get(), ec, 0);\n    }\n  }\n\nprivate:\n  implementation_type impl_;\n  executor_work_guard<Executor> work_;\n};\n\ntemplate <typename Executor, typename Function, typename Allocator>\nvoid strand_executor_service::dispatch(const implementation_type& impl,\n    Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)\n{\n  typedef typename decay<Function>::type function_type;\n\n  // If we are already in the strand then the function can run immediately.\n  if (call_stack<strand_impl>::contains(impl.get()))\n  {\n    // Make a local, non-const copy of the function.\n    function_type tmp(ASIO_MOVE_CAST(Function)(function));\n\n    fenced_block b(fenced_block::full);\n    asio_handler_invoke_helpers::invoke(tmp, tmp);\n    return;\n  }\n\n  // Allocate and construct an operation to wrap the function.\n  typedef executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);\n\n  ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,\n        \"strand_executor\", impl.get(), 0, \"dispatch\"));\n\n  // Add the function to the strand and schedule the strand if required.\n  bool first = enqueue(impl, p.p);\n  p.v = p.p = 0;\n  if (first)\n    ex.dispatch(invoker<Executor>(impl, ex), a);\n}\n\n// Request invocation of the given function and return immediately.\ntemplate <typename Executor, typename Function, typename Allocator>\nvoid strand_executor_service::post(const implementation_type& impl,\n    Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);\n\n  ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,\n        \"strand_executor\", impl.get(), 0, \"post\"));\n\n  // Add the function to the strand and schedule the strand if required.\n  bool first = enqueue(impl, p.p);\n  p.v = p.p = 0;\n  if (first)\n    ex.post(invoker<Executor>(impl, ex), a);\n}\n\n// Request invocation of the given function and return immediately.\ntemplate <typename Executor, typename Function, typename Allocator>\nvoid strand_executor_service::defer(const implementation_type& impl,\n    Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);\n\n  ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,\n        \"strand_executor\", impl.get(), 0, \"defer\"));\n\n  // Add the function to the strand and schedule the strand if required.\n  bool first = enqueue(impl, p.p);\n  p.v = p.p = 0;\n  if (first)\n    ex.defer(invoker<Executor>(impl, ex), a);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/strand_executor_service.ipp",
    "content": "//\n// detail/impl/strand_executor_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/strand_executor_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstrand_executor_service::strand_executor_service(execution_context& ctx)\n  : execution_context_service_base<strand_executor_service>(ctx),\n    mutex_(),\n    salt_(0),\n    impl_list_(0)\n{\n}\n\nvoid strand_executor_service::shutdown()\n{\n  op_queue<scheduler_operation> ops;\n\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  strand_impl* impl = impl_list_;\n  while (impl)\n  {\n    impl->mutex_->lock();\n    impl->shutdown_ = true;\n    ops.push(impl->waiting_queue_);\n    ops.push(impl->ready_queue_);\n    impl->mutex_->unlock();\n    impl = impl->next_;\n  }\n}\n\nstrand_executor_service::implementation_type\nstrand_executor_service::create_implementation()\n{\n  implementation_type new_impl(new strand_impl);\n  new_impl->locked_ = false;\n  new_impl->shutdown_ = false;\n\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  // Select a mutex from the pool of shared mutexes.\n  std::size_t salt = salt_++;\n  std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get());\n  mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3);\n  mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2);\n  mutex_index = mutex_index % num_mutexes;\n  if (!mutexes_[mutex_index].get())\n    mutexes_[mutex_index].reset(new mutex);\n  new_impl->mutex_ = mutexes_[mutex_index].get();\n\n  // Insert implementation into linked list of all implementations.\n  new_impl->next_ = impl_list_;\n  new_impl->prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = new_impl.get();\n  impl_list_ = new_impl.get();\n  new_impl->service_ = this;\n\n  return new_impl;\n}\n\nstrand_executor_service::strand_impl::~strand_impl()\n{\n  asio::detail::mutex::scoped_lock lock(service_->mutex_);\n\n  // Remove implementation from linked list of all implementations.\n  if (service_->impl_list_ == this)\n    service_->impl_list_ = next_;\n  if (prev_)\n    prev_->next_ = next_;\n  if (next_)\n    next_->prev_= prev_;\n}\n\nbool strand_executor_service::enqueue(const implementation_type& impl,\n    scheduler_operation* op)\n{\n  impl->mutex_->lock();\n  if (impl->shutdown_)\n  {\n    impl->mutex_->unlock();\n    op->destroy();\n    return false;\n  }\n  else if (impl->locked_)\n  {\n    // Some other function already holds the strand lock. Enqueue for later.\n    impl->waiting_queue_.push(op);\n    impl->mutex_->unlock();\n    return false;\n  }\n  else\n  {\n    // The function is acquiring the strand lock and so is responsible for\n    // scheduling the strand.\n    impl->locked_ = true;\n    impl->mutex_->unlock();\n    impl->ready_queue_.push(op);\n    return true;\n  }\n}\n\nbool strand_executor_service::running_in_this_thread(\n    const implementation_type& impl)\n{\n  return !!call_stack<strand_impl>::contains(impl.get());\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/strand_service.hpp",
    "content": "//\n// detail/impl/strand_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP\n#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/call_stack.hpp\"\n#include \"asio/detail/completion_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ninline strand_service::strand_impl::strand_impl()\n  : operation(&strand_service::do_complete),\n    locked_(false)\n{\n}\n\nstruct strand_service::on_dispatch_exit\n{\n  io_context_impl* io_context_;\n  strand_impl* impl_;\n\n  ~on_dispatch_exit()\n  {\n    impl_->mutex_.lock();\n    impl_->ready_queue_.push(impl_->waiting_queue_);\n    bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();\n    impl_->mutex_.unlock();\n\n    if (more_handlers)\n      io_context_->post_immediate_completion(impl_, false);\n  }\n};\n\ntemplate <typename Handler>\nvoid strand_service::dispatch(strand_service::implementation_type& impl,\n    Handler& handler)\n{\n  // If we are already in the strand then the handler can run immediately.\n  if (call_stack<strand_impl>::contains(impl))\n  {\n    fenced_block b(fenced_block::full);\n    asio_handler_invoke_helpers::invoke(handler, handler);\n    return;\n  }\n\n  // Allocate and construct an operation to wrap the handler.\n  typedef completion_handler<Handler> op;\n  typename op::ptr p = { asio::detail::addressof(handler),\n    op::ptr::allocate(handler), 0 };\n  p.p = new (p.v) op(handler);\n\n  ASIO_HANDLER_CREATION((this->context(),\n        *p.p, \"strand\", impl, 0, \"dispatch\"));\n\n  bool dispatch_immediately = do_dispatch(impl, p.p);\n  operation* o = p.p;\n  p.v = p.p = 0;\n\n  if (dispatch_immediately)\n  {\n    // Indicate that this strand is executing on the current thread.\n    call_stack<strand_impl>::context ctx(impl);\n\n    // Ensure the next handler, if any, is scheduled on block exit.\n    on_dispatch_exit on_exit = { &io_context_, impl };\n    (void)on_exit;\n\n    completion_handler<Handler>::do_complete(\n        &io_context_, o, asio::error_code(), 0);\n  }\n}\n\n// Request the io_context to invoke the given handler and return immediately.\ntemplate <typename Handler>\nvoid strand_service::post(strand_service::implementation_type& impl,\n    Handler& handler)\n{\n  bool is_continuation =\n    asio_handler_cont_helpers::is_continuation(handler);\n\n  // Allocate and construct an operation to wrap the handler.\n  typedef completion_handler<Handler> op;\n  typename op::ptr p = { asio::detail::addressof(handler),\n    op::ptr::allocate(handler), 0 };\n  p.p = new (p.v) op(handler);\n\n  ASIO_HANDLER_CREATION((this->context(),\n        *p.p, \"strand\", impl, 0, \"post\"));\n\n  do_post(impl, p.p, is_continuation);\n  p.v = p.p = 0;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/strand_service.ipp",
    "content": "//\n// detail/impl/strand_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/call_stack.hpp\"\n#include \"asio/detail/strand_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct strand_service::on_do_complete_exit\n{\n  io_context_impl* owner_;\n  strand_impl* impl_;\n\n  ~on_do_complete_exit()\n  {\n    impl_->mutex_.lock();\n    impl_->ready_queue_.push(impl_->waiting_queue_);\n    bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();\n    impl_->mutex_.unlock();\n\n    if (more_handlers)\n      owner_->post_immediate_completion(impl_, true);\n  }\n};\n\nstrand_service::strand_service(asio::io_context& io_context)\n  : asio::detail::service_base<strand_service>(io_context),\n    io_context_(asio::use_service<io_context_impl>(io_context)),\n    mutex_(),\n    salt_(0)\n{\n}\n\nvoid strand_service::shutdown()\n{\n  op_queue<operation> ops;\n\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  for (std::size_t i = 0; i < num_implementations; ++i)\n  {\n    if (strand_impl* impl = implementations_[i].get())\n    {\n      ops.push(impl->waiting_queue_);\n      ops.push(impl->ready_queue_);\n    }\n  }\n}\n\nvoid strand_service::construct(strand_service::implementation_type& impl)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  std::size_t salt = salt_++;\n#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)\n  std::size_t index = salt;\n#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)\n  std::size_t index = reinterpret_cast<std::size_t>(&impl);\n  index += (reinterpret_cast<std::size_t>(&impl) >> 3);\n  index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);\n#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)\n  index = index % num_implementations;\n\n  if (!implementations_[index].get())\n    implementations_[index].reset(new strand_impl);\n  impl = implementations_[index].get();\n}\n\nbool strand_service::running_in_this_thread(\n    const implementation_type& impl) const\n{\n  return call_stack<strand_impl>::contains(impl) != 0;\n}\n\nbool strand_service::do_dispatch(implementation_type& impl, operation* op)\n{\n  // If we are running inside the io_context, and no other handler already\n  // holds the strand lock, then the handler can run immediately.\n  bool can_dispatch = io_context_.can_dispatch();\n  impl->mutex_.lock();\n  if (can_dispatch && !impl->locked_)\n  {\n    // Immediate invocation is allowed.\n    impl->locked_ = true;\n    impl->mutex_.unlock();\n    return true;\n  }\n\n  if (impl->locked_)\n  {\n    // Some other handler already holds the strand lock. Enqueue for later.\n    impl->waiting_queue_.push(op);\n    impl->mutex_.unlock();\n  }\n  else\n  {\n    // The handler is acquiring the strand lock and so is responsible for\n    // scheduling the strand.\n    impl->locked_ = true;\n    impl->mutex_.unlock();\n    impl->ready_queue_.push(op);\n    io_context_.post_immediate_completion(impl, false);\n  }\n\n  return false;\n}\n\nvoid strand_service::do_post(implementation_type& impl,\n    operation* op, bool is_continuation)\n{\n  impl->mutex_.lock();\n  if (impl->locked_)\n  {\n    // Some other handler already holds the strand lock. Enqueue for later.\n    impl->waiting_queue_.push(op);\n    impl->mutex_.unlock();\n  }\n  else\n  {\n    // The handler is acquiring the strand lock and so is responsible for\n    // scheduling the strand.\n    impl->locked_ = true;\n    impl->mutex_.unlock();\n    impl->ready_queue_.push(op);\n    io_context_.post_immediate_completion(impl, is_continuation);\n  }\n}\n\nvoid strand_service::do_complete(void* owner, operation* base,\n    const asio::error_code& ec, std::size_t /*bytes_transferred*/)\n{\n  if (owner)\n  {\n    strand_impl* impl = static_cast<strand_impl*>(base);\n\n    // Indicate that this strand is executing on the current thread.\n    call_stack<strand_impl>::context ctx(impl);\n\n    // Ensure the next handler, if any, is scheduled on block exit.\n    on_do_complete_exit on_exit;\n    on_exit.owner_ = static_cast<io_context_impl*>(owner);\n    on_exit.impl_ = impl;\n\n    // Run all ready handlers. No lock is required since the ready queue is\n    // accessed only within the strand.\n    while (operation* o = impl->ready_queue_.front())\n    {\n      impl->ready_queue_.pop();\n      o->complete(owner, ec, 0);\n    }\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/throw_error.ipp",
    "content": "//\n// detail/impl/throw_error.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP\n#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/system_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nvoid do_throw_error(const asio::error_code& err)\n{\n  asio::system_error e(err);\n  asio::detail::throw_exception(e);\n}\n\nvoid do_throw_error(const asio::error_code& err, const char* location)\n{\n  // boostify: non-boost code starts here\n#if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)\n  // Microsoft's implementation of std::system_error is non-conformant in that\n  // it ignores the error code's message when a \"what\" string is supplied. We'll\n  // work around this by explicitly formatting the \"what\" string.\n  std::string what_msg = location;\n  what_msg += \": \";\n  what_msg += err.message();\n  asio::system_error e(err, what_msg);\n  asio::detail::throw_exception(e);\n#else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)\n  // boostify: non-boost code ends here\n  asio::system_error e(err, location);\n  asio::detail::throw_exception(e);\n  // boostify: non-boost code starts here\n#endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)\n  // boostify: non-boost code ends here\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/timer_queue_ptime.ipp",
    "content": "//\n// detail/impl/timer_queue_ptime.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP\n#define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#include \"asio/detail/timer_queue_ptime.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntimer_queue<time_traits<boost::posix_time::ptime> >::timer_queue()\n{\n}\n\ntimer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue()\n{\n}\n\nbool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer(\n    const time_type& time, per_timer_data& timer, wait_op* op)\n{\n  return impl_.enqueue_timer(time, timer, op);\n}\n\nbool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const\n{\n  return impl_.empty();\n}\n\nlong timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec(\n    long max_duration) const\n{\n  return impl_.wait_duration_msec(max_duration);\n}\n\nlong timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec(\n    long max_duration) const\n{\n  return impl_.wait_duration_usec(max_duration);\n}\n\nvoid timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers(\n    op_queue<operation>& ops)\n{\n  impl_.get_ready_timers(ops);\n}\n\nvoid timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers(\n    op_queue<operation>& ops)\n{\n  impl_.get_all_timers(ops);\n}\n\nstd::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(\n    per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)\n{\n  return impl_.cancel_timer(timer, ops, max_cancelled);\n}\n\nvoid timer_queue<time_traits<boost::posix_time::ptime> >::move_timer(\n    per_timer_data& target, per_timer_data& source)\n{\n  impl_.move_timer(target, source);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/timer_queue_set.ipp",
    "content": "//\n// detail/impl/timer_queue_set.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP\n#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntimer_queue_set::timer_queue_set()\n  : first_(0)\n{\n}\n\nvoid timer_queue_set::insert(timer_queue_base* q)\n{\n  q->next_ = first_;\n  first_ = q;\n}\n\nvoid timer_queue_set::erase(timer_queue_base* q)\n{\n  if (first_)\n  {\n    if (q == first_)\n    {\n      first_ = q->next_;\n      q->next_ = 0;\n      return;\n    }\n\n    for (timer_queue_base* p = first_; p->next_; p = p->next_)\n    {\n      if (p->next_ == q)\n      {\n        p->next_ = q->next_;\n        q->next_ = 0;\n        return;\n      }\n    }\n  }\n}\n\nbool timer_queue_set::all_empty() const\n{\n  for (timer_queue_base* p = first_; p; p = p->next_)\n    if (!p->empty())\n      return false;\n  return true;\n}\n\nlong timer_queue_set::wait_duration_msec(long max_duration) const\n{\n  long min_duration = max_duration;\n  for (timer_queue_base* p = first_; p; p = p->next_)\n    min_duration = p->wait_duration_msec(min_duration);\n  return min_duration;\n}\n\nlong timer_queue_set::wait_duration_usec(long max_duration) const\n{\n  long min_duration = max_duration;\n  for (timer_queue_base* p = first_; p; p = p->next_)\n    min_duration = p->wait_duration_usec(min_duration);\n  return min_duration;\n}\n\nvoid timer_queue_set::get_ready_timers(op_queue<operation>& ops)\n{\n  for (timer_queue_base* p = first_; p; p = p->next_)\n    p->get_ready_timers(ops);\n}\n\nvoid timer_queue_set::get_all_timers(op_queue<operation>& ops)\n{\n  for (timer_queue_base* p = first_; p; p = p->next_)\n    p->get_all_timers(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_event.ipp",
    "content": "//\n// detail/win_event.ipp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP\n#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_event.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_event::win_event()\n  : state_(0)\n{\n#if defined(ASIO_WINDOWS_APP)\n  events_[0] = ::CreateEventExW(0, 0,\n      CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);\n#else // defined(ASIO_WINDOWS_APP)\n  events_[0] = ::CreateEventW(0, true, false, 0);\n#endif // defined(ASIO_WINDOWS_APP)\n  if (!events_[0])\n  {\n    DWORD last_error = ::GetLastError();\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"event\");\n  }\n\n#if defined(ASIO_WINDOWS_APP)\n  events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS);\n#else // defined(ASIO_WINDOWS_APP)\n  events_[1] = ::CreateEventW(0, false, false, 0);\n#endif // defined(ASIO_WINDOWS_APP)\n  if (!events_[1])\n  {\n    DWORD last_error = ::GetLastError();\n    ::CloseHandle(events_[0]);\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"event\");\n  }\n}\n\nwin_event::~win_event()\n{\n  ::CloseHandle(events_[0]);\n  ::CloseHandle(events_[1]);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_iocp_handle_service.ipp",
    "content": "//\n// detail/impl/win_iocp_handle_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/win_iocp_handle_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_iocp_handle_service::overlapped_wrapper\n  : public OVERLAPPED\n{\npublic:\n  explicit overlapped_wrapper(asio::error_code& ec)\n  {\n    Internal = 0;\n    InternalHigh = 0;\n    Offset = 0;\n    OffsetHigh = 0;\n\n    // Create a non-signalled manual-reset event, for GetOverlappedResult.\n    hEvent = ::CreateEventW(0, TRUE, FALSE, 0);\n    if (hEvent)\n    {\n      // As documented in GetQueuedCompletionStatus, setting the low order\n      // bit of this event prevents our synchronous writes from being treated\n      // as completion port events.\n      DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent);\n      hEvent = reinterpret_cast<HANDLE>(tmp | 1);\n    }\n    else\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n  }\n\n  ~overlapped_wrapper()\n  {\n    if (hEvent)\n    {\n      ::CloseHandle(hEvent);\n    }\n  }\n};\n\nwin_iocp_handle_service::win_iocp_handle_service(execution_context& context)\n  : execution_context_service_base<win_iocp_handle_service>(context),\n    iocp_service_(asio::use_service<win_iocp_io_context>(context)),\n    mutex_(),\n    impl_list_(0)\n{\n}\n\nvoid win_iocp_handle_service::shutdown()\n{\n  // Close all implementations, causing all operations to complete.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  implementation_type* impl = impl_list_;\n  while (impl)\n  {\n    close_for_destruction(*impl);\n    impl = impl->next_;\n  }\n}\n\nvoid win_iocp_handle_service::construct(\n    win_iocp_handle_service::implementation_type& impl)\n{\n  impl.handle_ = INVALID_HANDLE_VALUE;\n  impl.safe_cancellation_thread_id_ = 0;\n\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid win_iocp_handle_service::move_construct(\n    win_iocp_handle_service::implementation_type& impl,\n    win_iocp_handle_service::implementation_type& other_impl)\n{\n  impl.handle_ = other_impl.handle_;\n  other_impl.handle_ = INVALID_HANDLE_VALUE;\n\n  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;\n  other_impl.safe_cancellation_thread_id_ = 0;\n\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid win_iocp_handle_service::move_assign(\n    win_iocp_handle_service::implementation_type& impl,\n    win_iocp_handle_service& other_service,\n    win_iocp_handle_service::implementation_type& other_impl)\n{\n  close_for_destruction(impl);\n\n  if (this != &other_service)\n  {\n    // Remove implementation from linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(mutex_);\n    if (impl_list_ == &impl)\n      impl_list_ = impl.next_;\n    if (impl.prev_)\n      impl.prev_->next_ = impl.next_;\n    if (impl.next_)\n      impl.next_->prev_= impl.prev_;\n    impl.next_ = 0;\n    impl.prev_ = 0;\n  }\n\n  impl.handle_ = other_impl.handle_;\n  other_impl.handle_ = INVALID_HANDLE_VALUE;\n\n  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;\n  other_impl.safe_cancellation_thread_id_ = 0;\n\n  if (this != &other_service)\n  {\n    // Insert implementation into linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(other_service.mutex_);\n    impl.next_ = other_service.impl_list_;\n    impl.prev_ = 0;\n    if (other_service.impl_list_)\n      other_service.impl_list_->prev_ = &impl;\n    other_service.impl_list_ = &impl;\n  }\n}\n\nvoid win_iocp_handle_service::destroy(\n    win_iocp_handle_service::implementation_type& impl)\n{\n  close_for_destruction(impl);\n  \n  // Remove implementation from linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  if (impl_list_ == &impl)\n    impl_list_ = impl.next_;\n  if (impl.prev_)\n    impl.prev_->next_ = impl.next_;\n  if (impl.next_)\n    impl.next_->prev_= impl.prev_;\n  impl.next_ = 0;\n  impl.prev_ = 0;\n}\n\nasio::error_code win_iocp_handle_service::assign(\n    win_iocp_handle_service::implementation_type& impl,\n    const native_handle_type& handle, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  if (iocp_service_.register_handle(handle, ec))\n    return ec;\n\n  impl.handle_ = handle;\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code win_iocp_handle_service::close(\n    win_iocp_handle_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((iocp_service_.context(), \"handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.handle_), \"close\"));\n\n    if (!::CloseHandle(impl.handle_))\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n    else\n    {\n      ec = asio::error_code();\n    }\n\n    impl.handle_ = INVALID_HANDLE_VALUE;\n    impl.safe_cancellation_thread_id_ = 0;\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n\n  return ec;\n}\n\nasio::error_code win_iocp_handle_service::cancel(\n    win_iocp_handle_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  ASIO_HANDLER_OPERATION((iocp_service_.context(), \"handle\",\n        &impl, reinterpret_cast<uintmax_t>(impl.handle_), \"cancel\"));\n\n  if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(\n        ::GetModuleHandleA(\"KERNEL32\"), \"CancelIoEx\"))\n  {\n    // The version of Windows supports cancellation from any thread.\n    typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);\n    cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(\n        reinterpret_cast<void*>(cancel_io_ex_ptr));\n    if (!cancel_io_ex(impl.handle_, 0))\n    {\n      DWORD last_error = ::GetLastError();\n      if (last_error == ERROR_NOT_FOUND)\n      {\n        // ERROR_NOT_FOUND means that there were no operations to be\n        // cancelled. We swallow this error to match the behaviour on other\n        // platforms.\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error_code(last_error,\n            asio::error::get_system_category());\n      }\n    }\n    else\n    {\n      ec = asio::error_code();\n    }\n  }\n  else if (impl.safe_cancellation_thread_id_ == 0)\n  {\n    // No operations have been started, so there's nothing to cancel.\n    ec = asio::error_code();\n  }\n  else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())\n  {\n    // Asynchronous operations have been started from the current thread only,\n    // so it is safe to try to cancel them using CancelIo.\n    if (!::CancelIo(impl.handle_))\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n    else\n    {\n      ec = asio::error_code();\n    }\n  }\n  else\n  {\n    // Asynchronous operations have been started from more than one thread,\n    // so cancellation is not safe.\n    ec = asio::error::operation_not_supported;\n  }\n\n  return ec;\n}\n\nsize_t win_iocp_handle_service::do_write(\n    win_iocp_handle_service::implementation_type& impl, uint64_t offset,\n    const asio::const_buffer& buffer, asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  // A request to write 0 bytes on a handle is a no-op.\n  if (buffer.size() == 0)\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  overlapped_wrapper overlapped(ec);\n  if (ec)\n  {\n    return 0;\n  }\n\n  // Write the data. \n  overlapped.Offset = offset & 0xFFFFFFFF;\n  overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;\n  BOOL ok = ::WriteFile(impl.handle_, buffer.data(),\n      static_cast<DWORD>(buffer.size()), 0, &overlapped);\n  if (!ok) \n  {\n    DWORD last_error = ::GetLastError();\n    if (last_error != ERROR_IO_PENDING)\n    {\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n      return 0;\n    }\n  }\n\n  // Wait for the operation to complete.\n  DWORD bytes_transferred = 0;\n  ok = ::GetOverlappedResult(impl.handle_,\n      &overlapped, &bytes_transferred, TRUE);\n  if (!ok)\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return 0;\n  }\n\n  ec = asio::error_code();\n  return bytes_transferred;\n}\n\nvoid win_iocp_handle_service::start_write_op(\n    win_iocp_handle_service::implementation_type& impl, uint64_t offset,\n    const asio::const_buffer& buffer, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (!is_open(impl))\n  {\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  }\n  else if (buffer.size() == 0)\n  {\n    // A request to write 0 bytes on a handle is a no-op.\n    iocp_service_.on_completion(op);\n  }\n  else\n  {\n    DWORD bytes_transferred = 0;\n    op->Offset = offset & 0xFFFFFFFF;\n    op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;\n    BOOL ok = ::WriteFile(impl.handle_, buffer.data(),\n        static_cast<DWORD>(buffer.size()),\n        &bytes_transferred, op);\n    DWORD last_error = ::GetLastError();\n    if (!ok && last_error != ERROR_IO_PENDING\n        && last_error != ERROR_MORE_DATA)\n    {\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    }\n    else\n    {\n      iocp_service_.on_pending(op);\n    }\n  }\n}\n\nsize_t win_iocp_handle_service::do_read(\n    win_iocp_handle_service::implementation_type& impl, uint64_t offset,\n    const asio::mutable_buffer& buffer, asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n  \n  // A request to read 0 bytes on a stream handle is a no-op.\n  if (buffer.size() == 0)\n  {\n    ec = asio::error_code();\n    return 0;\n  }\n\n  overlapped_wrapper overlapped(ec);\n  if (ec)\n  {\n    return 0;\n  }\n\n  // Read some data.\n  overlapped.Offset = offset & 0xFFFFFFFF;\n  overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;\n  BOOL ok = ::ReadFile(impl.handle_, buffer.data(),\n      static_cast<DWORD>(buffer.size()), 0, &overlapped);\n  if (!ok) \n  {\n    DWORD last_error = ::GetLastError();\n    if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)\n    {\n      if (last_error == ERROR_HANDLE_EOF)\n      {\n        ec = asio::error::eof;\n      }\n      else\n      {\n        ec = asio::error_code(last_error,\n            asio::error::get_system_category());\n      }\n      return 0;\n    }\n  }\n\n  // Wait for the operation to complete.\n  DWORD bytes_transferred = 0;\n  ok = ::GetOverlappedResult(impl.handle_,\n      &overlapped, &bytes_transferred, TRUE);\n  if (!ok)\n  {\n    DWORD last_error = ::GetLastError();\n    if (last_error == ERROR_HANDLE_EOF)\n    {\n      ec = asio::error::eof;\n    }\n    else\n    {\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n    return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0;\n  }\n\n  ec = asio::error_code();\n  return bytes_transferred;\n}\n\nvoid win_iocp_handle_service::start_read_op(\n    win_iocp_handle_service::implementation_type& impl, uint64_t offset,\n    const asio::mutable_buffer& buffer, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (!is_open(impl))\n  {\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  }\n  else if (buffer.size() == 0)\n  {\n    // A request to read 0 bytes on a handle is a no-op.\n    iocp_service_.on_completion(op);\n  }\n  else\n  {\n    DWORD bytes_transferred = 0;\n    op->Offset = offset & 0xFFFFFFFF;\n    op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;\n    BOOL ok = ::ReadFile(impl.handle_, buffer.data(),\n        static_cast<DWORD>(buffer.size()),\n        &bytes_transferred, op);\n    DWORD last_error = ::GetLastError();\n    if (!ok && last_error != ERROR_IO_PENDING\n        && last_error != ERROR_MORE_DATA)\n    {\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    }\n    else\n    {\n      iocp_service_.on_pending(op);\n    }\n  }\n}\n\nvoid win_iocp_handle_service::update_cancellation_thread_id(\n    win_iocp_handle_service::implementation_type& impl)\n{\n  if (impl.safe_cancellation_thread_id_ == 0)\n    impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();\n  else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())\n    impl.safe_cancellation_thread_id_ = ~DWORD(0);\n}\n\nvoid win_iocp_handle_service::close_for_destruction(implementation_type& impl)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((iocp_service_.context(), \"handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.handle_), \"close\"));\n\n    ::CloseHandle(impl.handle_);\n    impl.handle_ = INVALID_HANDLE_VALUE;\n    impl.safe_cancellation_thread_id_ = 0;\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_iocp_io_context.hpp",
    "content": "//\n// detail/impl/win_iocp_io_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP\n#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/completion_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid win_iocp_io_context::add_timer_queue(\n    timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid win_iocp_io_context::remove_timer_queue(\n    timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid win_iocp_io_context::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  // If the service has been shut down we silently discard the timer.\n  if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)\n  {\n    post_immediate_completion(op, false);\n    return;\n  }\n\n  mutex::scoped_lock lock(dispatch_mutex_);\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  work_started();\n  if (earliest)\n    update_timeout();\n}\n\ntemplate <typename Time_Traits>\nstd::size_t win_iocp_io_context::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  // If the service has been shut down we silently ignore the cancellation.\n  if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)\n    return 0;\n\n  mutex::scoped_lock lock(dispatch_mutex_);\n  op_queue<win_iocp_operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid win_iocp_io_context::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& to,\n    typename timer_queue<Time_Traits>::per_timer_data& from)\n{\n  asio::detail::mutex::scoped_lock lock(dispatch_mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(to, ops);\n  queue.move_timer(to, from);\n  lock.unlock();\n  post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_iocp_io_context.ipp",
    "content": "//\n// detail/impl/win_iocp_io_context.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP\n#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/thread.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_iocp_io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct win_iocp_io_context::thread_function\n{\n  explicit thread_function(win_iocp_io_context* s)\n    : this_(s)\n  {\n  }\n\n  void operator()()\n  {\n    asio::error_code ec;\n    this_->run(ec);\n  }\n\n  win_iocp_io_context* this_;\n};\n\nstruct win_iocp_io_context::work_finished_on_block_exit\n{\n  ~work_finished_on_block_exit()\n  {\n    io_context_->work_finished();\n  }\n\n  win_iocp_io_context* io_context_;\n};\n\nstruct win_iocp_io_context::timer_thread_function\n{\n  void operator()()\n  {\n    while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)\n    {\n      if (::WaitForSingleObject(io_context_->waitable_timer_.handle,\n            INFINITE) == WAIT_OBJECT_0)\n      {\n        ::InterlockedExchange(&io_context_->dispatch_required_, 1);\n        ::PostQueuedCompletionStatus(io_context_->iocp_.handle,\n            0, wake_for_dispatch, 0);\n      }\n    }\n  }\n\n  win_iocp_io_context* io_context_;\n};\n\nwin_iocp_io_context::win_iocp_io_context(\n    asio::execution_context& ctx, int concurrency_hint, bool own_thread)\n  : execution_context_service_base<win_iocp_io_context>(ctx),\n    iocp_(),\n    outstanding_work_(0),\n    stopped_(0),\n    stop_event_posted_(0),\n    shutdown_(0),\n    gqcs_timeout_(get_gqcs_timeout()),\n    dispatch_required_(0),\n    concurrency_hint_(concurrency_hint)\n{\n  ASIO_HANDLER_TRACKING_INIT;\n\n  iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,\n      static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0)));\n  if (!iocp_.handle)\n  {\n    DWORD last_error = ::GetLastError();\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"iocp\");\n  }\n\n  if (own_thread)\n  {\n    ::InterlockedIncrement(&outstanding_work_);\n    thread_.reset(new asio::detail::thread(thread_function(this)));\n  }\n}\n\nwin_iocp_io_context::~win_iocp_io_context()\n{\n  if (thread_.get())\n  {\n    thread_->join();\n    thread_.reset();\n  }\n}\n\nvoid win_iocp_io_context::shutdown()\n{\n  ::InterlockedExchange(&shutdown_, 1);\n\n  if (timer_thread_.get())\n  {\n    LARGE_INTEGER timeout;\n    timeout.QuadPart = 1;\n    ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);\n  }\n\n  if (thread_.get())\n  {\n    thread_->join();\n    thread_.reset();\n    ::InterlockedDecrement(&outstanding_work_);\n  }\n\n  while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)\n  {\n    op_queue<win_iocp_operation> ops;\n    timer_queues_.get_all_timers(ops);\n    ops.push(completed_ops_);\n    if (!ops.empty())\n    {\n      while (win_iocp_operation* op = ops.front())\n      {\n        ops.pop();\n        ::InterlockedDecrement(&outstanding_work_);\n        op->destroy();\n      }\n    }\n    else\n    {\n      DWORD bytes_transferred = 0;\n      dword_ptr_t completion_key = 0;\n      LPOVERLAPPED overlapped = 0;\n      ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,\n          &completion_key, &overlapped, gqcs_timeout_);\n      if (overlapped)\n      {\n        ::InterlockedDecrement(&outstanding_work_);\n        static_cast<win_iocp_operation*>(overlapped)->destroy();\n      }\n    }\n  }\n\n  if (timer_thread_.get())\n    timer_thread_->join();\n}\n\nasio::error_code win_iocp_io_context::register_handle(\n    HANDLE handle, asio::error_code& ec)\n{\n  if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n  return ec;\n}\n\nsize_t win_iocp_io_context::run(asio::error_code& ec)\n{\n  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)\n  {\n    stop();\n    ec = asio::error_code();\n    return 0;\n  }\n\n  win_iocp_thread_info this_thread;\n  thread_call_stack::context ctx(this, this_thread);\n\n  size_t n = 0;\n  while (do_one(INFINITE, ec))\n    if (n != (std::numeric_limits<size_t>::max)())\n      ++n;\n  return n;\n}\n\nsize_t win_iocp_io_context::run_one(asio::error_code& ec)\n{\n  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)\n  {\n    stop();\n    ec = asio::error_code();\n    return 0;\n  }\n\n  win_iocp_thread_info this_thread;\n  thread_call_stack::context ctx(this, this_thread);\n\n  return do_one(INFINITE, ec);\n}\n\nsize_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec)\n{\n  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)\n  {\n    stop();\n    ec = asio::error_code();\n    return 0;\n  }\n\n  win_iocp_thread_info this_thread;\n  thread_call_stack::context ctx(this, this_thread);\n\n  return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), ec);\n}\n\nsize_t win_iocp_io_context::poll(asio::error_code& ec)\n{\n  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)\n  {\n    stop();\n    ec = asio::error_code();\n    return 0;\n  }\n\n  win_iocp_thread_info this_thread;\n  thread_call_stack::context ctx(this, this_thread);\n\n  size_t n = 0;\n  while (do_one(0, ec))\n    if (n != (std::numeric_limits<size_t>::max)())\n      ++n;\n  return n;\n}\n\nsize_t win_iocp_io_context::poll_one(asio::error_code& ec)\n{\n  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)\n  {\n    stop();\n    ec = asio::error_code();\n    return 0;\n  }\n\n  win_iocp_thread_info this_thread;\n  thread_call_stack::context ctx(this, this_thread);\n\n  return do_one(0, ec);\n}\n\nvoid win_iocp_io_context::stop()\n{\n  if (::InterlockedExchange(&stopped_, 1) == 0)\n  {\n    if (::InterlockedExchange(&stop_event_posted_, 1) == 0)\n    {\n      if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))\n      {\n        DWORD last_error = ::GetLastError();\n        asio::error_code ec(last_error,\n            asio::error::get_system_category());\n        asio::detail::throw_error(ec, \"pqcs\");\n      }\n    }\n  }\n}\n\nvoid win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)\n{\n  // Flag the operation as ready.\n  op->ready_ = 1;\n\n  // Enqueue the operation on the I/O completion port.\n  if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))\n  {\n    // Out of resources. Put on completed queue instead.\n    mutex::scoped_lock lock(dispatch_mutex_);\n    completed_ops_.push(op);\n    ::InterlockedExchange(&dispatch_required_, 1);\n  }\n}\n\nvoid win_iocp_io_context::post_deferred_completions(\n    op_queue<win_iocp_operation>& ops)\n{\n  while (win_iocp_operation* op = ops.front())\n  {\n    ops.pop();\n\n    // Flag the operation as ready.\n    op->ready_ = 1;\n\n    // Enqueue the operation on the I/O completion port.\n    if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))\n    {\n      // Out of resources. Put on completed queue instead.\n      mutex::scoped_lock lock(dispatch_mutex_);\n      completed_ops_.push(op);\n      completed_ops_.push(ops);\n      ::InterlockedExchange(&dispatch_required_, 1);\n    }\n  }\n}\n\nvoid win_iocp_io_context::abandon_operations(\n    op_queue<win_iocp_operation>& ops)\n{\n  while (win_iocp_operation* op = ops.front())\n  {\n    ops.pop();\n    ::InterlockedDecrement(&outstanding_work_);\n    op->destroy();\n  }\n}\n\nvoid win_iocp_io_context::on_pending(win_iocp_operation* op)\n{\n  if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)\n  {\n    // Enqueue the operation on the I/O completion port.\n    if (!::PostQueuedCompletionStatus(iocp_.handle,\n          0, overlapped_contains_result, op))\n    {\n      // Out of resources. Put on completed queue instead.\n      mutex::scoped_lock lock(dispatch_mutex_);\n      completed_ops_.push(op);\n      ::InterlockedExchange(&dispatch_required_, 1);\n    }\n  }\n}\n\nvoid win_iocp_io_context::on_completion(win_iocp_operation* op,\n    DWORD last_error, DWORD bytes_transferred)\n{\n  // Flag that the operation is ready for invocation.\n  op->ready_ = 1;\n\n  // Store results in the OVERLAPPED structure.\n  op->Internal = reinterpret_cast<ulong_ptr_t>(\n      &asio::error::get_system_category());\n  op->Offset = last_error;\n  op->OffsetHigh = bytes_transferred;\n\n  // Enqueue the operation on the I/O completion port.\n  if (!::PostQueuedCompletionStatus(iocp_.handle,\n        0, overlapped_contains_result, op))\n  {\n    // Out of resources. Put on completed queue instead.\n    mutex::scoped_lock lock(dispatch_mutex_);\n    completed_ops_.push(op);\n    ::InterlockedExchange(&dispatch_required_, 1);\n  }\n}\n\nvoid win_iocp_io_context::on_completion(win_iocp_operation* op,\n    const asio::error_code& ec, DWORD bytes_transferred)\n{\n  // Flag that the operation is ready for invocation.\n  op->ready_ = 1;\n\n  // Store results in the OVERLAPPED structure.\n  op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());\n  op->Offset = ec.value();\n  op->OffsetHigh = bytes_transferred;\n\n  // Enqueue the operation on the I/O completion port.\n  if (!::PostQueuedCompletionStatus(iocp_.handle,\n        0, overlapped_contains_result, op))\n  {\n    // Out of resources. Put on completed queue instead.\n    mutex::scoped_lock lock(dispatch_mutex_);\n    completed_ops_.push(op);\n    ::InterlockedExchange(&dispatch_required_, 1);\n  }\n}\n\nsize_t win_iocp_io_context::do_one(DWORD msec, asio::error_code& ec)\n{\n  for (;;)\n  {\n    // Try to acquire responsibility for dispatching timers and completed ops.\n    if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)\n    {\n      mutex::scoped_lock lock(dispatch_mutex_);\n\n      // Dispatch pending timers and operations.\n      op_queue<win_iocp_operation> ops;\n      ops.push(completed_ops_);\n      timer_queues_.get_ready_timers(ops);\n      post_deferred_completions(ops);\n      update_timeout();\n    }\n\n    // Get the next operation from the queue.\n    DWORD bytes_transferred = 0;\n    dword_ptr_t completion_key = 0;\n    LPOVERLAPPED overlapped = 0;\n    ::SetLastError(0);\n    BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,\n        &bytes_transferred, &completion_key, &overlapped,\n        msec < gqcs_timeout_ ? msec : gqcs_timeout_);\n    DWORD last_error = ::GetLastError();\n\n    if (overlapped)\n    {\n      win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);\n      asio::error_code result_ec(last_error,\n          asio::error::get_system_category());\n\n      // We may have been passed the last_error and bytes_transferred in the\n      // OVERLAPPED structure itself.\n      if (completion_key == overlapped_contains_result)\n      {\n        result_ec = asio::error_code(static_cast<int>(op->Offset),\n            *reinterpret_cast<asio::error_category*>(op->Internal));\n        bytes_transferred = op->OffsetHigh;\n      }\n\n      // Otherwise ensure any result has been saved into the OVERLAPPED\n      // structure.\n      else\n      {\n        op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());\n        op->Offset = result_ec.value();\n        op->OffsetHigh = bytes_transferred;\n      }\n\n      // Dispatch the operation only if ready. The operation may not be ready\n      // if the initiating function (e.g. a call to WSARecv) has not yet\n      // returned. This is because the initiating function still wants access\n      // to the operation's OVERLAPPED structure.\n      if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)\n      {\n        // Ensure the count of outstanding work is decremented on block exit.\n        work_finished_on_block_exit on_exit = { this };\n        (void)on_exit;\n\n        op->complete(this, result_ec, bytes_transferred);\n        ec = asio::error_code();\n        return 1;\n      }\n    }\n    else if (!ok)\n    {\n      if (last_error != WAIT_TIMEOUT)\n      {\n        ec = asio::error_code(last_error,\n            asio::error::get_system_category());\n        return 0;\n      }\n\n      // If we're waiting indefinitely we need to keep going until we get a\n      // real handler.\n      if (msec == INFINITE)\n        continue;\n\n      ec = asio::error_code();\n      return 0;\n    }\n    else if (completion_key == wake_for_dispatch)\n    {\n      // We have been woken up to try to acquire responsibility for dispatching\n      // timers and completed operations.\n    }\n    else\n    {\n      // Indicate that there is no longer an in-flight stop event.\n      ::InterlockedExchange(&stop_event_posted_, 0);\n\n      // The stopped_ flag is always checked to ensure that any leftover\n      // stop events from a previous run invocation are ignored.\n      if (::InterlockedExchangeAdd(&stopped_, 0) != 0)\n      {\n        // Wake up next thread that is blocked on GetQueuedCompletionStatus.\n        if (::InterlockedExchange(&stop_event_posted_, 1) == 0)\n        {\n          if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))\n          {\n            last_error = ::GetLastError();\n            ec = asio::error_code(last_error,\n                asio::error::get_system_category());\n            return 0;\n          }\n        }\n\n        ec = asio::error_code();\n        return 0;\n      }\n    }\n  }\n}\n\nDWORD win_iocp_io_context::get_gqcs_timeout()\n{\n  OSVERSIONINFOEX osvi;\n  ZeroMemory(&osvi, sizeof(osvi));\n  osvi.dwOSVersionInfoSize = sizeof(osvi);\n  osvi.dwMajorVersion = 6ul;\n\n  const uint64_t condition_mask = ::VerSetConditionMask(\n      0, VER_MAJORVERSION, VER_GREATER_EQUAL);\n\n  if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask))\n    return INFINITE;\n\n  return default_gqcs_timeout;\n}\n\nvoid win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(dispatch_mutex_);\n\n  timer_queues_.insert(&queue);\n\n  if (!waitable_timer_.handle)\n  {\n    waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);\n    if (waitable_timer_.handle == 0)\n    {\n      DWORD last_error = ::GetLastError();\n      asio::error_code ec(last_error,\n          asio::error::get_system_category());\n      asio::detail::throw_error(ec, \"timer\");\n    }\n\n    LARGE_INTEGER timeout;\n    timeout.QuadPart = -max_timeout_usec;\n    timeout.QuadPart *= 10;\n    ::SetWaitableTimer(waitable_timer_.handle,\n        &timeout, max_timeout_msec, 0, 0, FALSE);\n  }\n\n  if (!timer_thread_.get())\n  {\n    timer_thread_function thread_function = { this };\n    timer_thread_.reset(new thread(thread_function, 65536));\n  }\n}\n\nvoid win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(dispatch_mutex_);\n\n  timer_queues_.erase(&queue);\n}\n\nvoid win_iocp_io_context::update_timeout()\n{\n  if (timer_thread_.get())\n  {\n    // There's no point updating the waitable timer if the new timeout period\n    // exceeds the maximum timeout. In that case, we might as well wait for the\n    // existing period of the timer to expire.\n    long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);\n    if (timeout_usec < max_timeout_usec)\n    {\n      LARGE_INTEGER timeout;\n      timeout.QuadPart = -timeout_usec;\n      timeout.QuadPart *= 10;\n      ::SetWaitableTimer(waitable_timer_.handle,\n          &timeout, max_timeout_msec, 0, 0, FALSE);\n    }\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_iocp_serial_port_service.ipp",
    "content": "//\n// detail/impl/win_iocp_serial_port_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)\n\n#include <cstring>\n#include \"asio/detail/win_iocp_serial_port_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_iocp_serial_port_service::win_iocp_serial_port_service(\n    execution_context& context)\n  : execution_context_service_base<win_iocp_serial_port_service>(context),\n    handle_service_(context)\n{\n}\n\nvoid win_iocp_serial_port_service::shutdown()\n{\n}\n\nasio::error_code win_iocp_serial_port_service::open(\n    win_iocp_serial_port_service::implementation_type& impl,\n    const std::string& device, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  // For convenience, add a leading \\\\.\\ sequence if not already present.\n  std::string name = (device[0] == '\\\\') ? device : \"\\\\\\\\.\\\\\" + device;\n\n  // Open a handle to the serial port.\n  ::HANDLE handle = ::CreateFileA(name.c_str(),\n      GENERIC_READ | GENERIC_WRITE, 0, 0,\n      OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);\n  if (handle == INVALID_HANDLE_VALUE)\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  // Determine the initial serial port parameters.\n  using namespace std; // For memset.\n  ::DCB dcb;\n  memset(&dcb, 0, sizeof(DCB));\n  dcb.DCBlength = sizeof(DCB);\n  if (!::GetCommState(handle, &dcb))\n  {\n    DWORD last_error = ::GetLastError();\n    ::CloseHandle(handle);\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  // Set some default serial port parameters. This implementation does not\n  // support changing all of these, so they might as well be in a known state.\n  dcb.fBinary = TRUE; // Win32 only supports binary mode.\n  dcb.fNull = FALSE; // Do not ignore NULL characters.\n  dcb.fAbortOnError = FALSE; // Ignore serial framing errors.\n  dcb.BaudRate = 0; // 0 baud by default\n  dcb.ByteSize = 8; // 8 bit bytes\n  dcb.fOutxCtsFlow = FALSE; // No flow control\n  dcb.fOutxDsrFlow = FALSE;\n  dcb.fDtrControl = DTR_CONTROL_DISABLE;\n  dcb.fDsrSensitivity = FALSE;\n  dcb.fOutX = FALSE;\n  dcb.fInX = FALSE;\n  dcb.fRtsControl = DTR_CONTROL_DISABLE;\n  dcb.fParity = FALSE; // No parity\n  dcb.Parity = NOPARITY;\n  dcb.StopBits = ONESTOPBIT; // One stop bit\n  if (!::SetCommState(handle, &dcb))\n  {\n    DWORD last_error = ::GetLastError();\n    ::CloseHandle(handle);\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  // Set up timeouts so that the serial port will behave similarly to a\n  // network socket. Reads wait for at least one byte, then return with\n  // whatever they have. Writes return once everything is out the door.\n  ::COMMTIMEOUTS timeouts;\n  timeouts.ReadIntervalTimeout = 1;\n  timeouts.ReadTotalTimeoutMultiplier = 0;\n  timeouts.ReadTotalTimeoutConstant = 0;\n  timeouts.WriteTotalTimeoutMultiplier = 0;\n  timeouts.WriteTotalTimeoutConstant = 0;\n  if (!::SetCommTimeouts(handle, &timeouts))\n  {\n    DWORD last_error = ::GetLastError();\n    ::CloseHandle(handle);\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  // We're done. Take ownership of the serial port handle.\n  if (handle_service_.assign(impl, handle, ec))\n    ::CloseHandle(handle);\n  return ec;\n}\n\nasio::error_code win_iocp_serial_port_service::do_set_option(\n    win_iocp_serial_port_service::implementation_type& impl,\n    win_iocp_serial_port_service::store_function_type store,\n    const void* option, asio::error_code& ec)\n{\n  using namespace std; // For memcpy.\n\n  ::DCB dcb;\n  memset(&dcb, 0, sizeof(DCB));\n  dcb.DCBlength = sizeof(DCB);\n  if (!::GetCommState(handle_service_.native_handle(impl), &dcb))\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  if (store(option, dcb, ec))\n    return ec;\n\n  if (!::SetCommState(handle_service_.native_handle(impl), &dcb))\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code win_iocp_serial_port_service::do_get_option(\n    const win_iocp_serial_port_service::implementation_type& impl,\n    win_iocp_serial_port_service::load_function_type load,\n    void* option, asio::error_code& ec) const\n{\n  using namespace std; // For memset.\n\n  ::DCB dcb;\n  memset(&dcb, 0, sizeof(DCB));\n  dcb.DCBlength = sizeof(DCB);\n  if (!::GetCommState(handle_service_.native_handle(impl), &dcb))\n  {\n    DWORD last_error = ::GetLastError();\n    ec = asio::error_code(last_error,\n        asio::error::get_system_category());\n    return ec;\n  }\n\n  return load(option, dcb, ec);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)\n\n#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_iocp_socket_service_base.ipp",
    "content": "//\n// detail/impl/win_iocp_socket_service_base.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP\n#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/win_iocp_socket_service_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_iocp_socket_service_base::win_iocp_socket_service_base(\n    execution_context& context)\n  : context_(context),\n    iocp_service_(use_service<win_iocp_io_context>(context)),\n    reactor_(0),\n    connect_ex_(0),\n    nt_set_info_(0),\n    mutex_(),\n    impl_list_(0)\n{\n}\n\nvoid win_iocp_socket_service_base::base_shutdown()\n{\n  // Close all implementations, causing all operations to complete.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  base_implementation_type* impl = impl_list_;\n  while (impl)\n  {\n    close_for_destruction(*impl);\n    impl = impl->next_;\n  }\n}\n\nvoid win_iocp_socket_service_base::construct(\n    win_iocp_socket_service_base::base_implementation_type& impl)\n{\n  impl.socket_ = invalid_socket;\n  impl.state_ = 0;\n  impl.cancel_token_.reset();\n#if defined(ASIO_ENABLE_CANCELIO)\n  impl.safe_cancellation_thread_id_ = 0;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid win_iocp_socket_service_base::base_move_construct(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    win_iocp_socket_service_base::base_implementation_type& other_impl)\n  ASIO_NOEXCEPT\n{\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = invalid_socket;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  impl.cancel_token_ = other_impl.cancel_token_;\n  other_impl.cancel_token_.reset();\n\n#if defined(ASIO_ENABLE_CANCELIO)\n  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;\n  other_impl.safe_cancellation_thread_id_ = 0;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid win_iocp_socket_service_base::base_move_assign(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    win_iocp_socket_service_base& other_service,\n    win_iocp_socket_service_base::base_implementation_type& other_impl)\n{\n  close_for_destruction(impl);\n\n  if (this != &other_service)\n  {\n    // Remove implementation from linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(mutex_);\n    if (impl_list_ == &impl)\n      impl_list_ = impl.next_;\n    if (impl.prev_)\n      impl.prev_->next_ = impl.next_;\n    if (impl.next_)\n      impl.next_->prev_= impl.prev_;\n    impl.next_ = 0;\n    impl.prev_ = 0;\n  }\n\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = invalid_socket;\n\n  impl.state_ = other_impl.state_;\n  other_impl.state_ = 0;\n\n  impl.cancel_token_ = other_impl.cancel_token_;\n  other_impl.cancel_token_.reset();\n\n#if defined(ASIO_ENABLE_CANCELIO)\n  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;\n  other_impl.safe_cancellation_thread_id_ = 0;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n  if (this != &other_service)\n  {\n    // Insert implementation into linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(other_service.mutex_);\n    impl.next_ = other_service.impl_list_;\n    impl.prev_ = 0;\n    if (other_service.impl_list_)\n      other_service.impl_list_->prev_ = &impl;\n    other_service.impl_list_ = &impl;\n  }\n}\n\nvoid win_iocp_socket_service_base::destroy(\n    win_iocp_socket_service_base::base_implementation_type& impl)\n{\n  close_for_destruction(impl);\n\n  // Remove implementation from linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  if (impl_list_ == &impl)\n    impl_list_ = impl.next_;\n  if (impl.prev_)\n    impl.prev_->next_ = impl.next_;\n  if (impl.next_)\n    impl.next_->prev_= impl.prev_;\n  impl.next_ = 0;\n  impl.prev_ = 0;\n}\n\nasio::error_code win_iocp_socket_service_base::close(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((iocp_service_.context(),\n          \"socket\", &impl, impl.socket_, \"close\"));\n\n    // Check if the reactor was created, in which case we need to close the\n    // socket on the reactor as well to cancel any operations that might be\n    // running there.\n    select_reactor* r = static_cast<select_reactor*>(\n          interlocked_compare_exchange_pointer(\n            reinterpret_cast<void**>(&reactor_), 0, 0));\n    if (r)\n      r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);\n\n    socket_ops::close(impl.socket_, impl.state_, false, ec);\n\n    if (r)\n      r->cleanup_descriptor_data(impl.reactor_data_);\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n\n  impl.socket_ = invalid_socket;\n  impl.state_ = 0;\n  impl.cancel_token_.reset();\n#if defined(ASIO_ENABLE_CANCELIO)\n  impl.safe_cancellation_thread_id_ = 0;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n  return ec;\n}\n\nsocket_type win_iocp_socket_service_base::release(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n    return invalid_socket;\n\n  cancel(impl, ec);\n  if (ec)\n    return invalid_socket;\n\n  nt_set_info_fn fn = get_nt_set_info();\n  if (fn == 0)\n  {\n    ec = asio::error::operation_not_supported;\n    return invalid_socket;\n  }\n\n  HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_);\n  ULONG_PTR iosb[2] = { 0, 0 };\n  void* info[2] = { 0, 0 };\n  if (fn(sock_as_handle, iosb, &info, sizeof(info),\n        61 /* FileReplaceCompletionInformation */))\n  {\n    ec = asio::error::operation_not_supported;\n    return invalid_socket;\n  }\n\n  socket_type tmp = impl.socket_;\n  impl.socket_ = invalid_socket;\n  return tmp;\n}\n\nasio::error_code win_iocp_socket_service_base::cancel(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  ASIO_HANDLER_OPERATION((iocp_service_.context(),\n        \"socket\", &impl, impl.socket_, \"cancel\"));\n\n  if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(\n        ::GetModuleHandleA(\"KERNEL32\"), \"CancelIoEx\"))\n  {\n    // The version of Windows supports cancellation from any thread.\n    typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);\n    cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(\n        reinterpret_cast<void*>(cancel_io_ex_ptr));\n    socket_type sock = impl.socket_;\n    HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);\n    if (!cancel_io_ex(sock_as_handle, 0))\n    {\n      DWORD last_error = ::GetLastError();\n      if (last_error == ERROR_NOT_FOUND)\n      {\n        // ERROR_NOT_FOUND means that there were no operations to be\n        // cancelled. We swallow this error to match the behaviour on other\n        // platforms.\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error_code(last_error,\n            asio::error::get_system_category());\n      }\n    }\n    else\n    {\n      ec = asio::error_code();\n    }\n  }\n#if defined(ASIO_ENABLE_CANCELIO)\n  else if (impl.safe_cancellation_thread_id_ == 0)\n  {\n    // No operations have been started, so there's nothing to cancel.\n    ec = asio::error_code();\n  }\n  else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())\n  {\n    // Asynchronous operations have been started from the current thread only,\n    // so it is safe to try to cancel them using CancelIo.\n    socket_type sock = impl.socket_;\n    HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);\n    if (!::CancelIo(sock_as_handle))\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n    else\n    {\n      ec = asio::error_code();\n    }\n  }\n  else\n  {\n    // Asynchronous operations have been started from more than one thread,\n    // so cancellation is not safe.\n    ec = asio::error::operation_not_supported;\n  }\n#else // defined(ASIO_ENABLE_CANCELIO)\n  else\n  {\n    // Cancellation is not supported as CancelIo may not be used.\n    ec = asio::error::operation_not_supported;\n  }\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n  // Cancel any operations started via the reactor.\n  if (!ec)\n  {\n    select_reactor* r = static_cast<select_reactor*>(\n          interlocked_compare_exchange_pointer(\n            reinterpret_cast<void**>(&reactor_), 0, 0));\n    if (r)\n      r->cancel_ops(impl.socket_, impl.reactor_data_);\n  }\n\n  return ec;\n}\n\nasio::error_code win_iocp_socket_service_base::do_open(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    int family, int type, int protocol, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  socket_holder sock(socket_ops::socket(family, type, protocol, ec));\n  if (sock.get() == invalid_socket)\n    return ec;\n\n  HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get());\n  if (iocp_service_.register_handle(sock_as_handle, ec))\n    return ec;\n\n  impl.socket_ = sock.release();\n  switch (type)\n  {\n  case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;\n  case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;\n  default: impl.state_ = 0; break;\n  }\n  impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code win_iocp_socket_service_base::do_assign(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    int type, socket_type native_socket, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket);\n  if (iocp_service_.register_handle(sock_as_handle, ec))\n    return ec;\n\n  impl.socket_ = native_socket;\n  switch (type)\n  {\n  case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;\n  case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;\n  default: impl.state_ = 0; break;\n  }\n  impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());\n  ec = asio::error_code();\n  return ec;\n}\n\nvoid win_iocp_socket_service_base::start_send_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    WSABUF* buffers, std::size_t buffer_count,\n    socket_base::message_flags flags, bool noop, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (noop)\n    iocp_service_.on_completion(op);\n  else if (!is_open(impl))\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  else\n  {\n    DWORD bytes_transferred = 0;\n    int result = ::WSASend(impl.socket_, buffers,\n        static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0);\n    DWORD last_error = ::WSAGetLastError();\n    if (last_error == ERROR_PORT_UNREACHABLE)\n      last_error = WSAECONNREFUSED;\n    if (result != 0 && last_error != WSA_IO_PENDING)\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    else\n      iocp_service_.on_pending(op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_send_to_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    WSABUF* buffers, std::size_t buffer_count,\n    const socket_addr_type* addr, int addrlen,\n    socket_base::message_flags flags, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (!is_open(impl))\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  else\n  {\n    DWORD bytes_transferred = 0;\n    int result = ::WSASendTo(impl.socket_, buffers,\n        static_cast<DWORD>(buffer_count),\n        &bytes_transferred, flags, addr, addrlen, op, 0);\n    DWORD last_error = ::WSAGetLastError();\n    if (last_error == ERROR_PORT_UNREACHABLE)\n      last_error = WSAECONNREFUSED;\n    if (result != 0 && last_error != WSA_IO_PENDING)\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    else\n      iocp_service_.on_pending(op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_receive_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    WSABUF* buffers, std::size_t buffer_count,\n    socket_base::message_flags flags, bool noop, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (noop)\n    iocp_service_.on_completion(op);\n  else if (!is_open(impl))\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  else\n  {\n    DWORD bytes_transferred = 0;\n    DWORD recv_flags = flags;\n    int result = ::WSARecv(impl.socket_, buffers,\n        static_cast<DWORD>(buffer_count),\n        &bytes_transferred, &recv_flags, op, 0);\n    DWORD last_error = ::WSAGetLastError();\n    if (last_error == ERROR_NETNAME_DELETED)\n      last_error = WSAECONNRESET;\n    else if (last_error == ERROR_PORT_UNREACHABLE)\n      last_error = WSAECONNREFUSED;\n    if (result != 0 && last_error != WSA_IO_PENDING)\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    else\n      iocp_service_.on_pending(op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_null_buffers_receive_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    socket_base::message_flags flags, reactor_op* op)\n{\n  if ((impl.state_ & socket_ops::stream_oriented) != 0)\n  {\n    // For stream sockets on Windows, we may issue a 0-byte overlapped\n    // WSARecv to wait until there is data available on the socket.\n    ::WSABUF buf = { 0, 0 };\n    start_receive_op(impl, &buf, 1, flags, false, op);\n  }\n  else\n  {\n    start_reactor_op(impl,\n        (flags & socket_base::message_out_of_band)\n          ? select_reactor::except_op : select_reactor::read_op,\n        op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_receive_from_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr,\n    socket_base::message_flags flags, int* addrlen, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (!is_open(impl))\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  else\n  {\n    DWORD bytes_transferred = 0;\n    DWORD recv_flags = flags;\n    int result = ::WSARecvFrom(impl.socket_, buffers,\n        static_cast<DWORD>(buffer_count),\n        &bytes_transferred, &recv_flags, addr, addrlen, op, 0);\n    DWORD last_error = ::WSAGetLastError();\n    if (last_error == ERROR_PORT_UNREACHABLE)\n      last_error = WSAECONNREFUSED;\n    if (result != 0 && last_error != WSA_IO_PENDING)\n      iocp_service_.on_completion(op, last_error, bytes_transferred);\n    else\n      iocp_service_.on_pending(op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_accept_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    bool peer_is_open, socket_holder& new_socket, int family, int type,\n    int protocol, void* output_buffer, DWORD address_length, operation* op)\n{\n  update_cancellation_thread_id(impl);\n  iocp_service_.work_started();\n\n  if (!is_open(impl))\n    iocp_service_.on_completion(op, asio::error::bad_descriptor);\n  else if (peer_is_open)\n    iocp_service_.on_completion(op, asio::error::already_open);\n  else\n  {\n    asio::error_code ec;\n    new_socket.reset(socket_ops::socket(family, type, protocol, ec));\n    if (new_socket.get() == invalid_socket)\n      iocp_service_.on_completion(op, ec);\n    else\n    {\n      DWORD bytes_read = 0;\n      BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer,\n          0, address_length, address_length, &bytes_read, op);\n      DWORD last_error = ::WSAGetLastError();\n      if (!result && last_error != WSA_IO_PENDING)\n        iocp_service_.on_completion(op, last_error);\n      else\n        iocp_service_.on_pending(op);\n    }\n  }\n}\n\nvoid win_iocp_socket_service_base::restart_accept_op(\n    socket_type s, socket_holder& new_socket, int family, int type,\n    int protocol, void* output_buffer, DWORD address_length, operation* op)\n{\n  new_socket.reset();\n  iocp_service_.work_started();\n\n  asio::error_code ec;\n  new_socket.reset(socket_ops::socket(family, type, protocol, ec));\n  if (new_socket.get() == invalid_socket)\n    iocp_service_.on_completion(op, ec);\n  else\n  {\n    DWORD bytes_read = 0;\n    BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer,\n        0, address_length, address_length, &bytes_read, op);\n    DWORD last_error = ::WSAGetLastError();\n    if (!result && last_error != WSA_IO_PENDING)\n      iocp_service_.on_completion(op, last_error);\n    else\n      iocp_service_.on_pending(op);\n  }\n}\n\nvoid win_iocp_socket_service_base::start_reactor_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    int op_type, reactor_op* op)\n{\n  select_reactor& r = get_reactor();\n  update_cancellation_thread_id(impl);\n\n  if (is_open(impl))\n  {\n    r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false);\n    return;\n  }\n  else\n    op->ec_ = asio::error::bad_descriptor;\n\n  iocp_service_.post_immediate_completion(op, false);\n}\n\nvoid win_iocp_socket_service_base::start_connect_op(\n    win_iocp_socket_service_base::base_implementation_type& impl,\n    int family, int type, const socket_addr_type* addr,\n    std::size_t addrlen, win_iocp_socket_connect_op_base* op)\n{\n  // If ConnectEx is available, use that.\n  if (family == ASIO_OS_DEF(AF_INET)\n      || family == ASIO_OS_DEF(AF_INET6))\n  {\n    if (connect_ex_fn connect_ex = get_connect_ex(impl, type))\n    {\n      union address_union\n      {\n        socket_addr_type base;\n        sockaddr_in4_type v4;\n        sockaddr_in6_type v6;\n      } a;\n\n      using namespace std; // For memset.\n      memset(&a, 0, sizeof(a));\n      a.base.sa_family = family;\n\n      socket_ops::bind(impl.socket_, &a.base,\n          family == ASIO_OS_DEF(AF_INET)\n          ? sizeof(a.v4) : sizeof(a.v6), op->ec_);\n      if (op->ec_ && op->ec_ != asio::error::invalid_argument)\n      {\n        iocp_service_.post_immediate_completion(op, false);\n        return;\n      }\n\n      op->connect_ex_ = true;\n      update_cancellation_thread_id(impl);\n      iocp_service_.work_started();\n\n      BOOL result = connect_ex(impl.socket_,\n          addr, static_cast<int>(addrlen), 0, 0, 0, op);\n      DWORD last_error = ::WSAGetLastError();\n      if (!result && last_error != WSA_IO_PENDING)\n        iocp_service_.on_completion(op, last_error);\n      else\n        iocp_service_.on_pending(op);\n      return;\n    }\n  }\n\n  // Otherwise, fall back to a reactor-based implementation.\n  select_reactor& r = get_reactor();\n  update_cancellation_thread_id(impl);\n\n  if ((impl.state_ & socket_ops::non_blocking) != 0\n      || socket_ops::set_internal_non_blocking(\n        impl.socket_, impl.state_, true, op->ec_))\n  {\n    if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)\n    {\n      if (op->ec_ == asio::error::in_progress\n          || op->ec_ == asio::error::would_block)\n      {\n        op->ec_ = asio::error_code();\n        r.start_op(select_reactor::connect_op, impl.socket_,\n            impl.reactor_data_, op, false, false);\n        return;\n      }\n    }\n  }\n\n  r.post_immediate_completion(op, false);\n}\n\nvoid win_iocp_socket_service_base::close_for_destruction(\n    win_iocp_socket_service_base::base_implementation_type& impl)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((iocp_service_.context(),\n          \"socket\", &impl, impl.socket_, \"close\"));\n\n    // Check if the reactor was created, in which case we need to close the\n    // socket on the reactor as well to cancel any operations that might be\n    // running there.\n    select_reactor* r = static_cast<select_reactor*>(\n          interlocked_compare_exchange_pointer(\n            reinterpret_cast<void**>(&reactor_), 0, 0));\n    if (r)\n      r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);\n\n    asio::error_code ignored_ec;\n    socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);\n\n    if (r)\n      r->cleanup_descriptor_data(impl.reactor_data_);\n  }\n\n  impl.socket_ = invalid_socket;\n  impl.state_ = 0;\n  impl.cancel_token_.reset();\n#if defined(ASIO_ENABLE_CANCELIO)\n  impl.safe_cancellation_thread_id_ = 0;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n}\n\nvoid win_iocp_socket_service_base::update_cancellation_thread_id(\n    win_iocp_socket_service_base::base_implementation_type& impl)\n{\n#if defined(ASIO_ENABLE_CANCELIO)\n  if (impl.safe_cancellation_thread_id_ == 0)\n    impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();\n  else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())\n    impl.safe_cancellation_thread_id_ = ~DWORD(0);\n#else // defined(ASIO_ENABLE_CANCELIO)\n  (void)impl;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n}\n\nselect_reactor& win_iocp_socket_service_base::get_reactor()\n{\n  select_reactor* r = static_cast<select_reactor*>(\n        interlocked_compare_exchange_pointer(\n          reinterpret_cast<void**>(&reactor_), 0, 0));\n  if (!r)\n  {\n    r = &(use_service<select_reactor>(context_));\n    interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);\n  }\n  return *r;\n}\n\nwin_iocp_socket_service_base::connect_ex_fn\nwin_iocp_socket_service_base::get_connect_ex(\n    win_iocp_socket_service_base::base_implementation_type& impl, int type)\n{\n#if defined(ASIO_DISABLE_CONNECTEX)\n  (void)impl;\n  (void)type;\n  return 0;\n#else // defined(ASIO_DISABLE_CONNECTEX)\n  if (type != ASIO_OS_DEF(SOCK_STREAM)\n      && type != ASIO_OS_DEF(SOCK_SEQPACKET))\n    return 0;\n\n  void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0);\n  if (!ptr)\n  {\n    GUID guid = { 0x25a207b9, 0xddf3, 0x4660,\n      { 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } };\n\n    DWORD bytes = 0;\n    if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER,\n          &guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0)\n    {\n      // Set connect_ex_ to a special value to indicate that ConnectEx is\n      // unavailable. That way we won't bother trying to look it up again.\n      ptr = this;\n    }\n\n    interlocked_exchange_pointer(&connect_ex_, ptr);\n  }\n\n  return reinterpret_cast<connect_ex_fn>(ptr == this ? 0 : ptr);\n#endif // defined(ASIO_DISABLE_CONNECTEX)\n}\n\nwin_iocp_socket_service_base::nt_set_info_fn\nwin_iocp_socket_service_base::get_nt_set_info()\n{\n  void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);\n  if (!ptr)\n  {\n    if (HMODULE h = ::GetModuleHandleA(\"NTDLL.DLL\"))\n      ptr = reinterpret_cast<void*>(GetProcAddress(h, \"NtSetInformationFile\"));\n\n    // On failure, set nt_set_info_ to a special value to indicate that the\n    // NtSetInformationFile function is unavailable. That way we won't bother\n    // trying to look it up again.\n    interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);\n  }\n\n  return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);\n}\n\nvoid* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(\n    void** dest, void* exch, void* cmp)\n{\n#if defined(_M_IX86)\n  return reinterpret_cast<void*>(InterlockedCompareExchange(\n        reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),\n        reinterpret_cast<LONG>(cmp)));\n#else\n  return InterlockedCompareExchangePointer(dest, exch, cmp);\n#endif\n}\n\nvoid* win_iocp_socket_service_base::interlocked_exchange_pointer(\n    void** dest, void* val)\n{\n#if defined(_M_IX86)\n  return reinterpret_cast<void*>(InterlockedExchange(\n        reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));\n#else\n  return InterlockedExchangePointer(dest, val);\n#endif\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_mutex.ipp",
    "content": "//\n// detail/impl/win_mutex.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP\n#define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_mutex.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_mutex::win_mutex()\n{\n  int error = do_init();\n  asio::error_code ec(error,\n      asio::error::get_system_category());\n  asio::detail::throw_error(ec, \"mutex\");\n}\n\nint win_mutex::do_init()\n{\n#if defined(__MINGW32__)\n  // Not sure if MinGW supports structured exception handling, so for now\n  // we'll just call the Windows API and hope.\n# if defined(UNDER_CE)\n  ::InitializeCriticalSection(&crit_section_);\n# elif defined(ASIO_WINDOWS_APP)\n  if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))\n    return ::GetLastError();\n# else\n  if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))\n    return ::GetLastError();\n# endif\n  return 0;\n#else\n  __try\n  {\n# if defined(UNDER_CE)\n    ::InitializeCriticalSection(&crit_section_);\n# elif defined(ASIO_WINDOWS_APP)\n    if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))\n      return ::GetLastError();\n# else\n    if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))\n      return ::GetLastError();\n# endif\n  }\n  __except(GetExceptionCode() == STATUS_NO_MEMORY\n      ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)\n  {\n    return ERROR_OUTOFMEMORY;\n  }\n\n  return 0;\n#endif\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_object_handle_service.ipp",
    "content": "//\n// detail/impl/win_object_handle_service.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP\n#define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n\n#include \"asio/detail/win_object_handle_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_object_handle_service::win_object_handle_service(execution_context& context)\n  : execution_context_service_base<win_object_handle_service>(context),\n    scheduler_(asio::use_service<scheduler_impl>(context)),\n    mutex_(),\n    impl_list_(0),\n    shutdown_(false)\n{\n}\n\nvoid win_object_handle_service::shutdown()\n{\n  mutex::scoped_lock lock(mutex_);\n\n  // Setting this flag to true prevents new objects from being registered, and\n  // new asynchronous wait operations from being started. We only need to worry\n  // about cleaning up the operations that are currently in progress.\n  shutdown_ = true;\n\n  op_queue<operation> ops;\n  for (implementation_type* impl = impl_list_; impl; impl = impl->next_)\n    ops.push(impl->op_queue_);\n\n  lock.unlock();\n\n  scheduler_.abandon_operations(ops);\n}\n\nvoid win_object_handle_service::construct(\n    win_object_handle_service::implementation_type& impl)\n{\n  impl.handle_ = INVALID_HANDLE_VALUE;\n  impl.wait_handle_ = INVALID_HANDLE_VALUE;\n  impl.owner_ = this;\n\n  // Insert implementation into linked list of all implementations.\n  mutex::scoped_lock lock(mutex_);\n  if (!shutdown_)\n  {\n    impl.next_ = impl_list_;\n    impl.prev_ = 0;\n    if (impl_list_)\n      impl_list_->prev_ = &impl;\n    impl_list_ = &impl;\n  }\n}\n\nvoid win_object_handle_service::move_construct(\n    win_object_handle_service::implementation_type& impl,\n    win_object_handle_service::implementation_type& other_impl)\n{\n  mutex::scoped_lock lock(mutex_);\n\n  // Insert implementation into linked list of all implementations.\n  if (!shutdown_)\n  {\n    impl.next_ = impl_list_;\n    impl.prev_ = 0;\n    if (impl_list_)\n      impl_list_->prev_ = &impl;\n    impl_list_ = &impl;\n  }\n\n  impl.handle_ = other_impl.handle_;\n  other_impl.handle_ = INVALID_HANDLE_VALUE;\n  impl.wait_handle_ = other_impl.wait_handle_;\n  other_impl.wait_handle_ = INVALID_HANDLE_VALUE;\n  impl.op_queue_.push(other_impl.op_queue_);\n  impl.owner_ = this;\n\n  // We must not hold the lock while calling UnregisterWaitEx. This is because\n  // the registered callback function might be invoked while we are waiting for\n  // UnregisterWaitEx to complete.\n  lock.unlock();\n\n  if (impl.wait_handle_ != INVALID_HANDLE_VALUE)\n    ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);\n\n  if (!impl.op_queue_.empty())\n    register_wait_callback(impl, lock);\n}\n\nvoid win_object_handle_service::move_assign(\n    win_object_handle_service::implementation_type& impl,\n    win_object_handle_service& other_service,\n    win_object_handle_service::implementation_type& other_impl)\n{\n  asio::error_code ignored_ec;\n  close(impl, ignored_ec);\n\n  mutex::scoped_lock lock(mutex_);\n\n  if (this != &other_service)\n  {\n    // Remove implementation from linked list of all implementations.\n    if (impl_list_ == &impl)\n      impl_list_ = impl.next_;\n    if (impl.prev_)\n      impl.prev_->next_ = impl.next_;\n    if (impl.next_)\n      impl.next_->prev_= impl.prev_;\n    impl.next_ = 0;\n    impl.prev_ = 0;\n  }\n\n  impl.handle_ = other_impl.handle_;\n  other_impl.handle_ = INVALID_HANDLE_VALUE;\n  impl.wait_handle_ = other_impl.wait_handle_;\n  other_impl.wait_handle_ = INVALID_HANDLE_VALUE;\n  impl.op_queue_.push(other_impl.op_queue_);\n  impl.owner_ = this;\n\n  if (this != &other_service)\n  {\n    // Insert implementation into linked list of all implementations.\n    impl.next_ = other_service.impl_list_;\n    impl.prev_ = 0;\n    if (other_service.impl_list_)\n      other_service.impl_list_->prev_ = &impl;\n    other_service.impl_list_ = &impl;\n  }\n\n  // We must not hold the lock while calling UnregisterWaitEx. This is because\n  // the registered callback function might be invoked while we are waiting for\n  // UnregisterWaitEx to complete.\n  lock.unlock();\n\n  if (impl.wait_handle_ != INVALID_HANDLE_VALUE)\n    ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);\n\n  if (!impl.op_queue_.empty())\n    register_wait_callback(impl, lock);\n}\n\nvoid win_object_handle_service::destroy(\n    win_object_handle_service::implementation_type& impl)\n{\n  mutex::scoped_lock lock(mutex_);\n\n  // Remove implementation from linked list of all implementations.\n  if (impl_list_ == &impl)\n    impl_list_ = impl.next_;\n  if (impl.prev_)\n    impl.prev_->next_ = impl.next_;\n  if (impl.next_)\n    impl.next_->prev_= impl.prev_;\n  impl.next_ = 0;\n  impl.prev_ = 0;\n\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((scheduler_.context(), \"object_handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), \"close\"));\n\n    HANDLE wait_handle = impl.wait_handle_;\n    impl.wait_handle_ = INVALID_HANDLE_VALUE;\n\n    op_queue<operation> ops;\n    while (wait_op* op = impl.op_queue_.front())\n    {\n      op->ec_ = asio::error::operation_aborted;\n      impl.op_queue_.pop();\n      ops.push(op);\n    }\n\n    // We must not hold the lock while calling UnregisterWaitEx. This is\n    // because the registered callback function might be invoked while we are\n    // waiting for UnregisterWaitEx to complete.\n    lock.unlock();\n\n    if (wait_handle != INVALID_HANDLE_VALUE)\n      ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);\n\n    ::CloseHandle(impl.handle_);\n    impl.handle_ = INVALID_HANDLE_VALUE;\n\n    scheduler_.post_deferred_completions(ops);\n  }\n}\n\nasio::error_code win_object_handle_service::assign(\n    win_object_handle_service::implementation_type& impl,\n    const native_handle_type& handle, asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ec = asio::error::already_open;\n    return ec;\n  }\n\n  impl.handle_ = handle;\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code win_object_handle_service::close(\n    win_object_handle_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((scheduler_.context(), \"object_handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), \"close\"));\n\n    mutex::scoped_lock lock(mutex_);\n\n    HANDLE wait_handle = impl.wait_handle_;\n    impl.wait_handle_ = INVALID_HANDLE_VALUE;\n\n    op_queue<operation> completed_ops;\n    while (wait_op* op = impl.op_queue_.front())\n    {\n      impl.op_queue_.pop();\n      op->ec_ = asio::error::operation_aborted;\n      completed_ops.push(op);\n    }\n\n    // We must not hold the lock while calling UnregisterWaitEx. This is\n    // because the registered callback function might be invoked while we are\n    // waiting for UnregisterWaitEx to complete.\n    lock.unlock();\n\n    if (wait_handle != INVALID_HANDLE_VALUE)\n      ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);\n\n    if (::CloseHandle(impl.handle_))\n    {\n      impl.handle_ = INVALID_HANDLE_VALUE;\n      ec = asio::error_code();\n    }\n    else\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n    }\n\n    scheduler_.post_deferred_completions(completed_ops);\n  }\n  else\n  {\n    ec = asio::error_code();\n  }\n\n  return ec;\n}\n\nasio::error_code win_object_handle_service::cancel(\n    win_object_handle_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (is_open(impl))\n  {\n    ASIO_HANDLER_OPERATION((scheduler_.context(), \"object_handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), \"cancel\"));\n\n    mutex::scoped_lock lock(mutex_);\n\n    HANDLE wait_handle = impl.wait_handle_;\n    impl.wait_handle_ = INVALID_HANDLE_VALUE;\n\n    op_queue<operation> completed_ops;\n    while (wait_op* op = impl.op_queue_.front())\n    {\n      op->ec_ = asio::error::operation_aborted;\n      impl.op_queue_.pop();\n      completed_ops.push(op);\n    }\n\n    // We must not hold the lock while calling UnregisterWaitEx. This is\n    // because the registered callback function might be invoked while we are\n    // waiting for UnregisterWaitEx to complete.\n    lock.unlock();\n\n    if (wait_handle != INVALID_HANDLE_VALUE)\n      ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);\n\n    ec = asio::error_code();\n\n    scheduler_.post_deferred_completions(completed_ops);\n  }\n  else\n  {\n    ec = asio::error::bad_descriptor;\n  }\n\n  return ec;\n}\n\nvoid win_object_handle_service::wait(\n    win_object_handle_service::implementation_type& impl,\n    asio::error_code& ec)\n{\n  switch (::WaitForSingleObject(impl.handle_, INFINITE))\n  {\n  case WAIT_FAILED:\n    {\n      DWORD last_error = ::GetLastError();\n      ec = asio::error_code(last_error,\n          asio::error::get_system_category());\n      break;\n    }\n  case WAIT_OBJECT_0:\n  case WAIT_ABANDONED:\n  default:\n    ec = asio::error_code();\n    break;\n  }\n}\n\nvoid win_object_handle_service::start_wait_op(\n    win_object_handle_service::implementation_type& impl, wait_op* op)\n{\n  scheduler_.work_started();\n\n  if (is_open(impl))\n  {\n    mutex::scoped_lock lock(mutex_);\n\n    if (!shutdown_)\n    {\n      impl.op_queue_.push(op);\n\n      // Only the first operation to be queued gets to register a wait callback.\n      // Subsequent operations have to wait for the first to finish.\n      if (impl.op_queue_.front() == op)\n        register_wait_callback(impl, lock);\n    }\n    else\n    {\n      lock.unlock();\n      scheduler_.post_deferred_completion(op);\n    }\n  }\n  else\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    scheduler_.post_deferred_completion(op);\n  }\n}\n\nvoid win_object_handle_service::register_wait_callback(\n    win_object_handle_service::implementation_type& impl,\n    mutex::scoped_lock& lock)\n{\n  lock.lock();\n\n  if (!RegisterWaitForSingleObject(&impl.wait_handle_,\n        impl.handle_, &win_object_handle_service::wait_callback,\n        &impl, INFINITE, WT_EXECUTEONLYONCE))\n  {\n    DWORD last_error = ::GetLastError();\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n\n    op_queue<operation> completed_ops;\n    while (wait_op* op = impl.op_queue_.front())\n    {\n      op->ec_ = ec;\n      impl.op_queue_.pop();\n      completed_ops.push(op);\n    }\n\n    lock.unlock();\n    scheduler_.post_deferred_completions(completed_ops);\n  }\n}\n\nvoid win_object_handle_service::wait_callback(PVOID param, BOOLEAN)\n{\n  implementation_type* impl = static_cast<implementation_type*>(param);\n  mutex::scoped_lock lock(impl->owner_->mutex_);\n\n  if (impl->wait_handle_ != INVALID_HANDLE_VALUE)\n  {\n    ::UnregisterWaitEx(impl->wait_handle_, NULL);\n    impl->wait_handle_ = INVALID_HANDLE_VALUE;\n  }\n\n  if (wait_op* op = impl->op_queue_.front())\n  {\n    op_queue<operation> completed_ops;\n\n    op->ec_ = asio::error_code();\n    impl->op_queue_.pop();\n    completed_ops.push(op);\n\n    if (!impl->op_queue_.empty())\n    {\n      if (!RegisterWaitForSingleObject(&impl->wait_handle_,\n            impl->handle_, &win_object_handle_service::wait_callback,\n            param, INFINITE, WT_EXECUTEONLYONCE))\n      {\n        DWORD last_error = ::GetLastError();\n        asio::error_code ec(last_error,\n            asio::error::get_system_category());\n\n        while ((op = impl->op_queue_.front()) != 0)\n        {\n          op->ec_ = ec;\n          impl->op_queue_.pop();\n          completed_ops.push(op);\n        }\n      }\n    }\n\n    scheduler_impl& sched = impl->owner_->scheduler_;\n    lock.unlock();\n    sched.post_deferred_completions(completed_ops);\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n\n#endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_static_mutex.ipp",
    "content": "//\n// detail/impl/win_static_mutex.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP\n#define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include <cstdio>\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_static_mutex.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nvoid win_static_mutex::init()\n{\n  int error = do_init();\n  asio::error_code ec(error,\n      asio::error::get_system_category());\n  asio::detail::throw_error(ec, \"static_mutex\");\n}\n\nint win_static_mutex::do_init()\n{\n  using namespace std; // For sprintf.\n  wchar_t mutex_name[128];\n#if defined(ASIO_HAS_SECURE_RTL)\n  swprintf_s(\n#else // defined(ASIO_HAS_SECURE_RTL)\n  _snwprintf(\n#endif // defined(ASIO_HAS_SECURE_RTL)\n      mutex_name, 128, L\"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p\",\n      static_cast<unsigned int>(::GetCurrentProcessId()), this);\n\n#if defined(ASIO_WINDOWS_APP)\n  HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0);\n#else // defined(ASIO_WINDOWS_APP)\n  HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name);\n#endif // defined(ASIO_WINDOWS_APP)\n  DWORD last_error = ::GetLastError();\n  if (mutex == 0)\n    return ::GetLastError();\n\n  if (last_error == ERROR_ALREADY_EXISTS)\n  {\n#if defined(ASIO_WINDOWS_APP)\n    ::WaitForSingleObjectEx(mutex, INFINITE, false);\n#else // defined(ASIO_WINDOWS_APP)\n    ::WaitForSingleObject(mutex, INFINITE);\n#endif // defined(ASIO_WINDOWS_APP)\n  }\n\n  if (initialised_)\n  {\n    ::ReleaseMutex(mutex);\n    ::CloseHandle(mutex);\n    return 0;\n  }\n\n#if defined(__MINGW32__)\n  // Not sure if MinGW supports structured exception handling, so for now\n  // we'll just call the Windows API and hope.\n# if defined(UNDER_CE)\n  ::InitializeCriticalSection(&crit_section_);\n# else\n  if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))\n  {\n    last_error = ::GetLastError();\n    ::ReleaseMutex(mutex);\n    ::CloseHandle(mutex);\n    return last_error;\n  }\n# endif\n#else\n  __try\n  {\n# if defined(UNDER_CE)\n    ::InitializeCriticalSection(&crit_section_);\n# elif defined(ASIO_WINDOWS_APP)\n    if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))\n    {\n      last_error = ::GetLastError();\n      ::ReleaseMutex(mutex);\n      ::CloseHandle(mutex);\n      return last_error;\n    }\n# else\n    if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))\n    {\n      last_error = ::GetLastError();\n      ::ReleaseMutex(mutex);\n      ::CloseHandle(mutex);\n      return last_error;\n    }\n# endif\n  }\n  __except(GetExceptionCode() == STATUS_NO_MEMORY\n      ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)\n  {\n    ::ReleaseMutex(mutex);\n    ::CloseHandle(mutex);\n    return ERROR_OUTOFMEMORY;\n  }\n#endif\n\n  initialised_ = true;\n  ::ReleaseMutex(mutex);\n  ::CloseHandle(mutex);\n  return 0;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_thread.ipp",
    "content": "//\n// detail/impl/win_thread.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP\n#define ASIO_DETAIL_IMPL_WIN_THREAD_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_APP) \\\n  && !defined(UNDER_CE)\n\n#include <process.h>\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_thread.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwin_thread::~win_thread()\n{\n  ::CloseHandle(thread_);\n\n  // The exit_event_ handle is deliberately allowed to leak here since it\n  // is an error for the owner of an internal thread not to join() it.\n}\n\nvoid win_thread::join()\n{\n  HANDLE handles[2] = { exit_event_, thread_ };\n  ::WaitForMultipleObjects(2, handles, FALSE, INFINITE);\n  ::CloseHandle(exit_event_);\n  if (terminate_threads())\n  {\n    ::TerminateThread(thread_, 0);\n  }\n  else\n  {\n    ::QueueUserAPC(apc_function, thread_, 0);\n    ::WaitForSingleObject(thread_, INFINITE);\n  }\n}\n\nstd::size_t win_thread::hardware_concurrency()\n{\n  SYSTEM_INFO system_info;\n  ::GetSystemInfo(&system_info);\n  return system_info.dwNumberOfProcessors;\n}\n\nvoid win_thread::start_thread(func_base* arg, unsigned int stack_size)\n{\n  ::HANDLE entry_event = 0;\n  arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0);\n  if (!entry_event)\n  {\n    DWORD last_error = ::GetLastError();\n    delete arg;\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"thread.entry_event\");\n  }\n\n  arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0);\n  if (!exit_event_)\n  {\n    DWORD last_error = ::GetLastError();\n    delete arg;\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"thread.exit_event\");\n  }\n\n  unsigned int thread_id = 0;\n  thread_ = reinterpret_cast<HANDLE>(::_beginthreadex(0,\n        stack_size, win_thread_function, arg, 0, &thread_id));\n  if (!thread_)\n  {\n    DWORD last_error = ::GetLastError();\n    delete arg;\n    if (entry_event)\n      ::CloseHandle(entry_event);\n    if (exit_event_)\n      ::CloseHandle(exit_event_);\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"thread\");\n  }\n\n  if (entry_event)\n  {\n    ::WaitForSingleObject(entry_event, INFINITE);\n    ::CloseHandle(entry_event);\n  }\n}\n\nunsigned int __stdcall win_thread_function(void* arg)\n{\n  win_thread::auto_func_base_ptr func = {\n      static_cast<win_thread::func_base*>(arg) };\n\n  ::SetEvent(func.ptr->entry_event_);\n\n  func.ptr->run();\n\n  // Signal that the thread has finished its work, but rather than returning go\n  // to sleep to put the thread into a well known state. If the thread is being\n  // joined during global object destruction then it may be killed using\n  // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx\n  // call will be interrupted using QueueUserAPC and the thread will shut down\n  // cleanly.\n  HANDLE exit_event = func.ptr->exit_event_;\n  delete func.ptr;\n  func.ptr = 0;\n  ::SetEvent(exit_event);\n  ::SleepEx(INFINITE, TRUE);\n\n  return 0;\n}\n\n#if defined(WINVER) && (WINVER < 0x0500)\nvoid __stdcall apc_function(ULONG) {}\n#else\nvoid __stdcall apc_function(ULONG_PTR) {}\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n       // && !defined(ASIO_WINDOWS_APP)\n       // && !defined(UNDER_CE)\n\n#endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/win_tss_ptr.ipp",
    "content": "//\n// detail/impl/win_tss_ptr.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP\n#define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_tss_ptr.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nDWORD win_tss_ptr_create()\n{\n#if defined(UNDER_CE)\n  const DWORD out_of_indexes = 0xFFFFFFFF;\n#else\n  const DWORD out_of_indexes = TLS_OUT_OF_INDEXES;\n#endif\n\n  DWORD tss_key = ::TlsAlloc();\n  if (tss_key == out_of_indexes)\n  {\n    DWORD last_error = ::GetLastError();\n    asio::error_code ec(last_error,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"tss\");\n  }\n  return tss_key;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/winrt_ssocket_service_base.ipp",
    "content": "//\n// detail/impl/winrt_ssocket_service_base.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP\n#define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include <cstring>\n#include \"asio/detail/winrt_ssocket_service_base.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/detail/winrt_utils.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwinrt_ssocket_service_base::winrt_ssocket_service_base(\n    execution_context& context)\n  : scheduler_(use_service<scheduler_impl>(context)),\n    async_manager_(use_service<winrt_async_manager>(context)),\n    mutex_(),\n    impl_list_(0)\n{\n}\n\nvoid winrt_ssocket_service_base::base_shutdown()\n{\n  // Close all implementations, causing all operations to complete.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  base_implementation_type* impl = impl_list_;\n  while (impl)\n  {\n    asio::error_code ignored_ec;\n    close(*impl, ignored_ec);\n    impl = impl->next_;\n  }\n}\n\nvoid winrt_ssocket_service_base::construct(\n    winrt_ssocket_service_base::base_implementation_type& impl)\n{\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid winrt_ssocket_service_base::base_move_construct(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    winrt_ssocket_service_base::base_implementation_type& other_impl)\n  ASIO_NOEXCEPT\n{\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = nullptr;\n\n  // Insert implementation into linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  impl.next_ = impl_list_;\n  impl.prev_ = 0;\n  if (impl_list_)\n    impl_list_->prev_ = &impl;\n  impl_list_ = &impl;\n}\n\nvoid winrt_ssocket_service_base::base_move_assign(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    winrt_ssocket_service_base& other_service,\n    winrt_ssocket_service_base::base_implementation_type& other_impl)\n{\n  asio::error_code ignored_ec;\n  close(impl, ignored_ec);\n\n  if (this != &other_service)\n  {\n    // Remove implementation from linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(mutex_);\n    if (impl_list_ == &impl)\n      impl_list_ = impl.next_;\n    if (impl.prev_)\n      impl.prev_->next_ = impl.next_;\n    if (impl.next_)\n      impl.next_->prev_= impl.prev_;\n    impl.next_ = 0;\n    impl.prev_ = 0;\n  }\n\n  impl.socket_ = other_impl.socket_;\n  other_impl.socket_ = nullptr;\n\n  if (this != &other_service)\n  {\n    // Insert implementation into linked list of all implementations.\n    asio::detail::mutex::scoped_lock lock(other_service.mutex_);\n    impl.next_ = other_service.impl_list_;\n    impl.prev_ = 0;\n    if (other_service.impl_list_)\n      other_service.impl_list_->prev_ = &impl;\n    other_service.impl_list_ = &impl;\n  }\n}\n\nvoid winrt_ssocket_service_base::destroy(\n    winrt_ssocket_service_base::base_implementation_type& impl)\n{\n  asio::error_code ignored_ec;\n  close(impl, ignored_ec);\n\n  // Remove implementation from linked list of all implementations.\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  if (impl_list_ == &impl)\n    impl_list_ = impl.next_;\n  if (impl.prev_)\n    impl.prev_->next_ = impl.next_;\n  if (impl.next_)\n    impl.next_->prev_= impl.prev_;\n  impl.next_ = 0;\n  impl.prev_ = 0;\n}\n\nasio::error_code winrt_ssocket_service_base::close(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (impl.socket_)\n  {\n    delete impl.socket_;\n    impl.socket_ = nullptr;\n  }\n\n  ec = asio::error_code();\n  return ec;\n}\n\nwinrt_ssocket_service_base::native_handle_type\nwinrt_ssocket_service_base::release(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    asio::error_code& ec)\n{\n  if (!is_open(impl))\n    return nullptr;\n\n  cancel(impl, ec);\n  if (ec)\n    return nullptr;\n\n  native_handle_type tmp = impl.socket_;\n  impl.socket_ = nullptr;\n  return tmp;\n}\n\nstd::size_t winrt_ssocket_service_base::do_get_endpoint(\n    const base_implementation_type& impl, bool local,\n    void* addr, std::size_t addr_len, asio::error_code& ec) const\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return addr_len;\n  }\n\n  try\n  {\n    std::string addr_string = winrt_utils::string(local\n        ? impl.socket_->Information->LocalAddress->CanonicalName\n        : impl.socket_->Information->RemoteAddress->CanonicalName);\n    unsigned short port = winrt_utils::integer(local\n        ? impl.socket_->Information->LocalPort\n        : impl.socket_->Information->RemotePort);\n    unsigned long scope = 0;\n\n    switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)\n    {\n    case ASIO_OS_DEF(AF_INET):\n      if (addr_len < sizeof(sockaddr_in4_type))\n      {\n        ec = asio::error::invalid_argument;\n        return addr_len;\n      }\n      else\n      {\n        socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(),\n            &reinterpret_cast<sockaddr_in4_type*>(addr)->sin_addr, &scope, ec);\n        reinterpret_cast<sockaddr_in4_type*>(addr)->sin_port\n          = socket_ops::host_to_network_short(port);\n        ec = asio::error_code();\n        return sizeof(sockaddr_in4_type);\n      }\n    case ASIO_OS_DEF(AF_INET6):\n      if (addr_len < sizeof(sockaddr_in6_type))\n      {\n        ec = asio::error::invalid_argument;\n        return addr_len;\n      }\n      else\n      {\n        socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(),\n            &reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_addr, &scope, ec);\n        reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_port\n          = socket_ops::host_to_network_short(port);\n        ec = asio::error_code();\n        return sizeof(sockaddr_in6_type);\n      }\n    default:\n      ec = asio::error::address_family_not_supported;\n      return addr_len;\n    }\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n    return addr_len;\n  }\n}\n\nasio::error_code winrt_ssocket_service_base::do_set_option(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    int level, int optname, const void* optval,\n    std::size_t optlen, asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  try\n  {\n    if (level == ASIO_OS_DEF(SOL_SOCKET)\n        && optname == ASIO_OS_DEF(SO_KEEPALIVE))\n    {\n      if (optlen == sizeof(int))\n      {\n        int value = 0;\n        std::memcpy(&value, optval, optlen);\n        impl.socket_->Control->KeepAlive = !!value;\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error::invalid_argument;\n      }\n    }\n    else if (level == ASIO_OS_DEF(IPPROTO_TCP)\n        && optname == ASIO_OS_DEF(TCP_NODELAY))\n    {\n      if (optlen == sizeof(int))\n      {\n        int value = 0;\n        std::memcpy(&value, optval, optlen);\n        impl.socket_->Control->NoDelay = !!value;\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error::invalid_argument;\n      }\n    }\n    else\n    {\n      ec = asio::error::invalid_argument;\n    }\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n  }\n\n  return ec;\n}\n\nvoid winrt_ssocket_service_base::do_get_option(\n    const winrt_ssocket_service_base::base_implementation_type& impl,\n    int level, int optname, void* optval,\n    std::size_t* optlen, asio::error_code& ec) const\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return;\n  }\n\n  try\n  {\n    if (level == ASIO_OS_DEF(SOL_SOCKET)\n        && optname == ASIO_OS_DEF(SO_KEEPALIVE))\n    {\n      if (*optlen >= sizeof(int))\n      {\n        int value = impl.socket_->Control->KeepAlive ? 1 : 0;\n        std::memcpy(optval, &value, sizeof(int));\n        *optlen = sizeof(int);\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error::invalid_argument;\n      }\n    }\n    else if (level == ASIO_OS_DEF(IPPROTO_TCP)\n        && optname == ASIO_OS_DEF(TCP_NODELAY))\n    {\n      if (*optlen >= sizeof(int))\n      {\n        int value = impl.socket_->Control->NoDelay ? 1 : 0;\n        std::memcpy(optval, &value, sizeof(int));\n        *optlen = sizeof(int);\n        ec = asio::error_code();\n      }\n      else\n      {\n        ec = asio::error::invalid_argument;\n      }\n    }\n    else\n    {\n      ec = asio::error::invalid_argument;\n    }\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n  }\n}\n\nasio::error_code winrt_ssocket_service_base::do_connect(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    const void* addr, asio::error_code& ec)\n{\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return ec;\n  }\n\n  char addr_string[max_addr_v6_str_len];\n  unsigned short port;\n  switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)\n  {\n  case ASIO_OS_DEF(AF_INET):\n    socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),\n        &reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,\n        addr_string, sizeof(addr_string), 0, ec);\n    port = socket_ops::network_to_host_short(\n        reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);\n    break;\n  case ASIO_OS_DEF(AF_INET6):\n    socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),\n        &reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,\n        addr_string, sizeof(addr_string), 0, ec);\n    port = socket_ops::network_to_host_short(\n        reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);\n    break;\n  default:\n    ec = asio::error::address_family_not_supported;\n    return ec;\n  }\n\n  if (!ec) try\n  {\n    async_manager_.sync(impl.socket_->ConnectAsync(\n          ref new Windows::Networking::HostName(\n            winrt_utils::string(addr_string)),\n          winrt_utils::string(port)), ec);\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n  }\n\n  return ec;\n}\n\nvoid winrt_ssocket_service_base::start_connect_op(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    const void* addr, winrt_async_op<void>* op, bool is_continuation)\n{\n  if (!is_open(impl))\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  char addr_string[max_addr_v6_str_len];\n  unsigned short port = 0;\n  switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)\n  {\n  case ASIO_OS_DEF(AF_INET):\n    socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),\n        &reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,\n        addr_string, sizeof(addr_string), 0, op->ec_);\n    port = socket_ops::network_to_host_short(\n        reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);\n    break;\n  case ASIO_OS_DEF(AF_INET6):\n    socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),\n        &reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,\n        addr_string, sizeof(addr_string), 0, op->ec_);\n    port = socket_ops::network_to_host_short(\n        reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);\n    break;\n  default:\n    op->ec_ = asio::error::address_family_not_supported;\n    break;\n  }\n\n  if (op->ec_)\n  {\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  try\n  {\n    async_manager_.async(impl.socket_->ConnectAsync(\n          ref new Windows::Networking::HostName(\n            winrt_utils::string(addr_string)),\n          winrt_utils::string(port)), op);\n  }\n  catch (Platform::Exception^ e)\n  {\n    op->ec_ = asio::error_code(\n        e->HResult, asio::system_category());\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n}\n\nstd::size_t winrt_ssocket_service_base::do_send(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    const asio::const_buffer& data,\n    socket_base::message_flags flags, asio::error_code& ec)\n{\n  if (flags)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  try\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n      asio::const_buffer> bufs(asio::buffer(data));\n\n    if (bufs.all_empty())\n    {\n      ec = asio::error_code();\n      return 0;\n    }\n\n    return async_manager_.sync(\n        impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec);\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n    return 0;\n  }\n}\n\nvoid winrt_ssocket_service_base::start_send_op(\n      winrt_ssocket_service_base::base_implementation_type& impl,\n      const asio::const_buffer& data, socket_base::message_flags flags,\n      winrt_async_op<unsigned int>* op, bool is_continuation)\n{\n  if (flags)\n  {\n    op->ec_ = asio::error::operation_not_supported;\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  if (!is_open(impl))\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  try\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        asio::const_buffer> bufs(asio::buffer(data));\n\n    if (bufs.all_empty())\n    {\n      scheduler_.post_immediate_completion(op, is_continuation);\n      return;\n    }\n\n    async_manager_.async(\n        impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op);\n  }\n  catch (Platform::Exception^ e)\n  {\n    op->ec_ = asio::error_code(e->HResult,\n        asio::system_category());\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n}\n\nstd::size_t winrt_ssocket_service_base::do_receive(\n    winrt_ssocket_service_base::base_implementation_type& impl,\n    const asio::mutable_buffer& data,\n    socket_base::message_flags flags, asio::error_code& ec)\n{\n  if (flags)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  if (!is_open(impl))\n  {\n    ec = asio::error::bad_descriptor;\n    return 0;\n  }\n\n  try\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        asio::mutable_buffer> bufs(asio::buffer(data));\n\n    if (bufs.all_empty())\n    {\n      ec = asio::error_code();\n      return 0;\n    }\n\n    async_manager_.sync(\n        impl.socket_->InputStream->ReadAsync(\n          bufs.buffers()[0], bufs.buffers()[0]->Capacity,\n          Windows::Storage::Streams::InputStreamOptions::Partial), ec);\n\n    std::size_t bytes_transferred = bufs.buffers()[0]->Length;\n    if (bytes_transferred == 0 && !ec)\n    {\n      ec = asio::error::eof;\n    }\n\n    return bytes_transferred;\n  }\n  catch (Platform::Exception^ e)\n  {\n    ec = asio::error_code(e->HResult,\n        asio::system_category());\n    return 0;\n  }\n}\n\nvoid winrt_ssocket_service_base::start_receive_op(\n      winrt_ssocket_service_base::base_implementation_type& impl,\n      const asio::mutable_buffer& data, socket_base::message_flags flags,\n      winrt_async_op<Windows::Storage::Streams::IBuffer^>* op,\n      bool is_continuation)\n{\n  if (flags)\n  {\n    op->ec_ = asio::error::operation_not_supported;\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  if (!is_open(impl))\n  {\n    op->ec_ = asio::error::bad_descriptor;\n    scheduler_.post_immediate_completion(op, is_continuation);\n    return;\n  }\n\n  try\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        asio::mutable_buffer> bufs(asio::buffer(data));\n\n    if (bufs.all_empty())\n    {\n      scheduler_.post_immediate_completion(op, is_continuation);\n      return;\n    }\n\n    async_manager_.async(\n        impl.socket_->InputStream->ReadAsync(\n          bufs.buffers()[0], bufs.buffers()[0]->Capacity,\n          Windows::Storage::Streams::InputStreamOptions::Partial), op);\n  }\n  catch (Platform::Exception^ e)\n  {\n    op->ec_ = asio::error_code(e->HResult,\n        asio::system_category());\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/winrt_timer_scheduler.hpp",
    "content": "//\n// detail/impl/winrt_timer_scheduler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP\n#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nvoid winrt_timer_scheduler::add_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_add_timer_queue(queue);\n}\n\n// Remove a timer queue from the reactor.\ntemplate <typename Time_Traits>\nvoid winrt_timer_scheduler::remove_timer_queue(timer_queue<Time_Traits>& queue)\n{\n  do_remove_timer_queue(queue);\n}\n\ntemplate <typename Time_Traits>\nvoid winrt_timer_scheduler::schedule_timer(timer_queue<Time_Traits>& queue,\n    const typename Time_Traits::time_type& time,\n    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n\n  if (shutdown_)\n  {\n    scheduler_.post_immediate_completion(op, false);\n    return;\n  }\n\n  bool earliest = queue.enqueue_timer(time, timer, op);\n  scheduler_.work_started();\n  if (earliest)\n    event_.signal(lock);\n}\n\ntemplate <typename Time_Traits>\nstd::size_t winrt_timer_scheduler::cancel_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& timer,\n    std::size_t max_cancelled)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n  return n;\n}\n\ntemplate <typename Time_Traits>\nvoid winrt_timer_scheduler::move_timer(timer_queue<Time_Traits>& queue,\n    typename timer_queue<Time_Traits>::per_timer_data& to,\n    typename timer_queue<Time_Traits>::per_timer_data& from)\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  op_queue<operation> ops;\n  queue.cancel_timer(to, ops);\n  queue.move_timer(to, from);\n  lock.unlock();\n  scheduler_.post_deferred_completions(ops);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/winrt_timer_scheduler.ipp",
    "content": "//\n// detail/impl/winrt_timer_scheduler.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP\n#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/winrt_timer_scheduler.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nwinrt_timer_scheduler::winrt_timer_scheduler(execution_context& context)\n  : execution_context_service_base<winrt_timer_scheduler>(context),\n    scheduler_(use_service<scheduler_impl>(context)),\n    mutex_(),\n    event_(),\n    timer_queues_(),\n    thread_(0),\n    stop_thread_(false),\n    shutdown_(false)\n{\n  thread_ = new asio::detail::thread(\n      bind_handler(&winrt_timer_scheduler::call_run_thread, this));\n}\n\nwinrt_timer_scheduler::~winrt_timer_scheduler()\n{\n  shutdown();\n}\n\nvoid winrt_timer_scheduler::shutdown()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  shutdown_ = true;\n  stop_thread_ = true;\n  event_.signal(lock);\n  lock.unlock();\n\n  if (thread_)\n  {\n    thread_->join();\n    delete thread_;\n    thread_ = 0;\n  }\n\n  op_queue<operation> ops;\n  timer_queues_.get_all_timers(ops);\n  scheduler_.abandon_operations(ops);\n}\n\nvoid winrt_timer_scheduler::notify_fork(execution_context::fork_event)\n{\n}\n\nvoid winrt_timer_scheduler::init_task()\n{\n}\n\nvoid winrt_timer_scheduler::run_thread()\n{\n  asio::detail::mutex::scoped_lock lock(mutex_);\n  while (!stop_thread_)\n  {\n    const long max_wait_duration = 5 * 60 * 1000000;\n    long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration);\n    event_.wait_for_usec(lock, wait_duration);\n    event_.clear(lock);\n    op_queue<operation> ops;\n    timer_queues_.get_ready_timers(ops);\n    if (!ops.empty())\n    {\n      lock.unlock();\n      scheduler_.post_deferred_completions(ops);\n      lock.lock();\n    }\n  }\n}\n\nvoid winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler)\n{\n  scheduler->run_thread();\n}\n\nvoid winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.insert(&queue);\n}\n\nvoid winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue)\n{\n  mutex::scoped_lock lock(mutex_);\n  timer_queues_.erase(&queue);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/impl/winsock_init.ipp",
    "content": "//\n// detail/impl/winsock_init.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP\n#define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/winsock_init.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nvoid winsock_init_base::startup(data& d,\n    unsigned char major, unsigned char minor)\n{\n  if (::InterlockedIncrement(&d.init_count_) == 1)\n  {\n    WSADATA wsa_data;\n    long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data);\n    ::InterlockedExchange(&d.result_, result);\n  }\n}\n\nvoid winsock_init_base::manual_startup(data& d)\n{\n  if (::InterlockedIncrement(&d.init_count_) == 1)\n  {\n    ::InterlockedExchange(&d.result_, 0);\n  }\n}\n\nvoid winsock_init_base::cleanup(data& d)\n{\n  if (::InterlockedDecrement(&d.init_count_) == 0)\n  {\n    ::WSACleanup();\n  }\n}\n\nvoid winsock_init_base::manual_cleanup(data& d)\n{\n  ::InterlockedDecrement(&d.init_count_);\n}\n\nvoid winsock_init_base::throw_on_error(data& d)\n{\n  long result = ::InterlockedExchangeAdd(&d.result_, 0);\n  if (result != 0)\n  {\n    asio::error_code ec(result,\n        asio::error::get_system_category());\n    asio::detail::throw_error(ec, \"winsock\");\n  }\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP\n"
  },
  {
    "path": "src/third_party/asio/detail/io_control.hpp",
    "content": "//\n// detail/io_control.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IO_CONTROL_HPP\n#define ASIO_DETAIL_IO_CONTROL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace io_control {\n\n// I/O control command for getting number of bytes available.\nclass bytes_readable\n{\npublic:\n  // Default constructor.\n  bytes_readable()\n    : value_(0)\n  {\n  }\n\n  // Construct with a specific command value.\n  bytes_readable(std::size_t value)\n    : value_(static_cast<detail::ioctl_arg_type>(value))\n  {\n  }\n\n  // Get the name of the IO control command.\n  int name() const\n  {\n    return static_cast<int>(ASIO_OS_DEF(FIONREAD));\n  }\n\n  // Set the value of the I/O control command.\n  void set(std::size_t value)\n  {\n    value_ = static_cast<detail::ioctl_arg_type>(value);\n  }\n\n  // Get the current value of the I/O control command.\n  std::size_t get() const\n  {\n    return static_cast<std::size_t>(value_);\n  }\n\n  // Get the address of the command data.\n  detail::ioctl_arg_type* data()\n  {\n    return &value_;\n  }\n\n  // Get the address of the command data.\n  const detail::ioctl_arg_type* data() const\n  {\n    return &value_;\n  }\n\nprivate:\n  detail::ioctl_arg_type value_;\n};\n\n} // namespace io_control\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IO_CONTROL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/io_object_executor.hpp",
    "content": "//\n// io_object_executor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP\n#define ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Wrap the (potentially polymorphic) executor so that we can bypass it when\n// dispatching on a target executor that has a native I/O implementation.\ntemplate <typename Executor>\nclass io_object_executor\n{\npublic:\n  io_object_executor(const Executor& ex,\n      bool native_implementation) ASIO_NOEXCEPT\n    : executor_(ex),\n      has_native_impl_(native_implementation)\n  {\n  }\n\n  io_object_executor(const io_object_executor& other) ASIO_NOEXCEPT\n    : executor_(other.executor_),\n      has_native_impl_(other.has_native_impl_)\n  {\n  }\n\n  template <typename Executor1>\n  io_object_executor(\n      const io_object_executor<Executor1>& other) ASIO_NOEXCEPT\n    : executor_(other.inner_executor()),\n      has_native_impl_(other.has_native_implementation())\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  io_object_executor(io_object_executor&& other) ASIO_NOEXCEPT\n    : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)),\n      has_native_impl_(other.has_native_impl_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  const Executor& inner_executor() const ASIO_NOEXCEPT\n  {\n    return executor_;\n  }\n\n  bool has_native_implementation() const ASIO_NOEXCEPT\n  {\n    return has_native_impl_;\n  }\n\n  execution_context& context() const ASIO_NOEXCEPT\n  {\n    return executor_.context();\n  }\n\n  void on_work_started() const ASIO_NOEXCEPT\n  {\n    if (is_same<Executor, io_context::executor_type>::value\n        || has_native_impl_)\n    {\n      // When using a native implementation, work is already counted by the\n      // execution context.\n    }\n    else\n    {\n      executor_.on_work_started();\n    }\n  }\n\n  void on_work_finished() const ASIO_NOEXCEPT\n  {\n    if (is_same<Executor, io_context::executor_type>::value\n        || has_native_impl_)\n    {\n      // When using a native implementation, work is already counted by the\n      // execution context.\n    }\n    else\n    {\n      executor_.on_work_finished();\n    }\n  }\n\n  template <typename F, typename A>\n  void dispatch(ASIO_MOVE_ARG(F) f, const A& a) const\n  {\n    if (is_same<Executor, io_context::executor_type>::value\n        || has_native_impl_)\n    {\n      // When using a native implementation, I/O completion handlers are\n      // already dispatched according to the execution context's executor's\n      // rules. We can call the function directly.\n#if defined(ASIO_HAS_MOVE)\n      if (is_same<F, typename decay<F>::type>::value)\n      {\n        asio_handler_invoke_helpers::invoke(f, f);\n        return;\n      }\n#endif // defined(ASIO_HAS_MOVE)\n      typename decay<F>::type function(ASIO_MOVE_CAST(F)(f));\n      asio_handler_invoke_helpers::invoke(function, function);\n    }\n    else\n    {\n      executor_.dispatch(ASIO_MOVE_CAST(F)(f), a);\n    }\n  }\n\n  template <typename F, typename A>\n  void post(ASIO_MOVE_ARG(F) f, const A& a) const\n  {\n    executor_.post(ASIO_MOVE_CAST(F)(f), a);\n  }\n\n  template <typename F, typename A>\n  void defer(ASIO_MOVE_ARG(F) f, const A& a) const\n  {\n    executor_.defer(ASIO_MOVE_CAST(F)(f), a);\n  }\n\n  friend bool operator==(const io_object_executor& a,\n      const io_object_executor& b) ASIO_NOEXCEPT\n  {\n    return a.executor_ == b.executor_\n      && a.has_native_impl_ == b.has_native_impl_;\n  }\n\n  friend bool operator!=(const io_object_executor& a,\n      const io_object_executor& b) ASIO_NOEXCEPT\n  {\n    return a.executor_ != b.executor_\n      || a.has_native_impl_ != b.has_native_impl_;\n  }\n\nprivate:\n  Executor executor_;\n  const bool has_native_impl_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/io_object_impl.hpp",
    "content": "//\n// io_object_impl.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IO_OBJECT_IMPL_HPP\n#define ASIO_DETAIL_IO_OBJECT_IMPL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <new>\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/io_object_executor.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass executor;\n\nnamespace detail {\n\ninline bool is_native_io_executor(const io_context::executor_type&)\n{\n  return true;\n}\n\ntemplate <typename Executor>\ninline bool is_native_io_executor(const Executor&,\n    typename enable_if<!is_same<Executor, executor>::value>::type* = 0)\n{\n  return false;\n}\n\ntemplate <typename Executor>\ninline bool is_native_io_executor(const Executor& ex,\n    typename enable_if<is_same<Executor, executor>::value>::type* = 0)\n{\n#if !defined (ASIO_NO_TYPEID)\n  return ex.target_type() == typeid(io_context::executor_type);\n#else // !defined (ASIO_NO_TYPEID)\n  return false;\n#endif // !defined (ASIO_NO_TYPEID)\n}\n\ntemplate <typename IoObjectService,\n    typename Executor = io_context::executor_type>\nclass io_object_impl\n{\npublic:\n  // The type of the service that will be used to provide I/O operations.\n  typedef IoObjectService service_type;\n\n  // The underlying implementation type of I/O object.\n  typedef typename service_type::implementation_type implementation_type;\n\n  // The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  // The type of executor to be used when implementing asynchronous operations.\n  typedef io_object_executor<Executor> implementation_executor_type;\n\n  // Construct an I/O object using an executor.\n  explicit io_object_impl(const executor_type& ex)\n    : service_(&asio::use_service<IoObjectService>(ex.context())),\n      implementation_executor_(ex, (is_native_io_executor)(ex))\n  {\n    service_->construct(implementation_);\n  }\n\n  // Construct an I/O object using an execution context.\n  template <typename ExecutionContext>\n  explicit io_object_impl(ExecutionContext& context,\n      typename enable_if<is_convertible<\n        ExecutionContext&, execution_context&>::value>::type* = 0)\n    : service_(&asio::use_service<IoObjectService>(context)),\n      implementation_executor_(context.get_executor(),\n        is_same<ExecutionContext, io_context>::value)\n  {\n    service_->construct(implementation_);\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  // Move-construct an I/O object.\n  io_object_impl(io_object_impl&& other)\n    : service_(&other.get_service()),\n      implementation_executor_(other.get_implementation_executor())\n  {\n    service_->move_construct(implementation_, other.implementation_);\n  }\n\n  // Perform a converting move-construction of an I/O object.\n  template <typename IoObjectService1, typename Executor1>\n  io_object_impl(io_object_impl<IoObjectService1, Executor1>&& other)\n    : service_(&asio::use_service<IoObjectService>(\n            other.get_implementation_executor().context())),\n      implementation_executor_(other.get_implementation_executor())\n  {\n    service_->converting_move_construct(implementation_,\n        other.get_service(), other.get_implementation());\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  // Destructor.\n  ~io_object_impl()\n  {\n    service_->destroy(implementation_);\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  // Move-assign an I/O object.\n  io_object_impl& operator=(io_object_impl&& other)\n  {\n    if (this != &other)\n    {\n      service_->move_assign(implementation_,\n          *other.service_, other.implementation_);\n      implementation_executor_.~implementation_executor_type();\n      new (&implementation_executor_) implementation_executor_type(\n          std::move(other.implementation_executor_));\n      service_ = other.service_;\n    }\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  // Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return implementation_executor_.inner_executor();\n  }\n\n  // Get the executor to be used when implementing asynchronous operations.\n  const implementation_executor_type& get_implementation_executor()\n    ASIO_NOEXCEPT\n  {\n    return implementation_executor_;\n  }\n\n  // Get the service associated with the I/O object.\n  service_type& get_service()\n  {\n    return *service_;\n  }\n\n  // Get the service associated with the I/O object.\n  const service_type& get_service() const\n  {\n    return *service_;\n  }\n\n  // Get the underlying implementation of the I/O object.\n  implementation_type& get_implementation()\n  {\n    return implementation_;\n  }\n\n  // Get the underlying implementation of the I/O object.\n  const implementation_type& get_implementation() const\n  {\n    return implementation_;\n  }\n\nprivate:\n  // Disallow copying and copy assignment.\n  io_object_impl(const io_object_impl&);\n  io_object_impl& operator=(const io_object_impl&);\n\n  // The service associated with the I/O object.\n  service_type* service_;\n\n  // The underlying implementation of the I/O object.\n  implementation_type implementation_;\n\n  // The associated executor.\n  implementation_executor_type implementation_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IO_OBJECT_IMPL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/is_buffer_sequence.hpp",
    "content": "//\n// detail/is_buffer_sequence.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP\n#define ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass mutable_buffer;\nclass const_buffer;\n\nnamespace detail {\n\nstruct buffer_sequence_memfns_base\n{\n  void begin();\n  void end();\n  void size();\n  void max_size();\n  void capacity();\n  void data();\n  void prepare();\n  void commit();\n  void consume();\n  void grow();\n  void shrink();\n};\n\ntemplate <typename T>\nstruct buffer_sequence_memfns_derived\n  : T, buffer_sequence_memfns_base\n{\n};\n\ntemplate <typename T, T>\nstruct buffer_sequence_memfns_check\n{\n};\n\n#if defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar buffer_sequence_begin_helper(...);\n\ntemplate <typename T>\nchar (&buffer_sequence_begin_helper(T* t,\n    typename enable_if<!is_same<\n      decltype(asio::buffer_sequence_begin(*t)),\n        void>::value>::type*))[2];\n\n#else // defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar (&buffer_sequence_begin_helper(...))[2];\n\ntemplate <typename T>\nchar buffer_sequence_begin_helper(T* t,\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::begin>*);\n\n#endif // defined(ASIO_HAS_DECLTYPE)\n\n#if defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar buffer_sequence_end_helper(...);\n\ntemplate <typename T>\nchar (&buffer_sequence_end_helper(T* t,\n    typename enable_if<!is_same<\n      decltype(asio::buffer_sequence_end(*t)),\n        void>::value>::type*))[2];\n\n#else // defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar (&buffer_sequence_end_helper(...))[2];\n\ntemplate <typename T>\nchar buffer_sequence_end_helper(T* t,\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::end>*);\n\n#endif // defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar (&size_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar size_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::size>*);\n\ntemplate <typename>\nchar (&max_size_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar max_size_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::max_size>*);\n\ntemplate <typename>\nchar (&capacity_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar capacity_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::capacity>*);\n\ntemplate <typename>\nchar (&data_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar data_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::data>*);\n\ntemplate <typename>\nchar (&prepare_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar prepare_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::prepare>*);\n\ntemplate <typename>\nchar (&commit_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar commit_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::commit>*);\n\ntemplate <typename>\nchar (&consume_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar consume_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::consume>*);\n\ntemplate <typename>\nchar (&grow_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar grow_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::grow>*);\n\ntemplate <typename>\nchar (&shrink_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar shrink_memfn_helper(\n    buffer_sequence_memfns_check<\n      void (buffer_sequence_memfns_base::*)(),\n      &buffer_sequence_memfns_derived<T>::shrink>*);\n\ntemplate <typename, typename>\nchar (&buffer_sequence_element_type_helper(...))[2];\n\n#if defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename T, typename Buffer>\nchar buffer_sequence_element_type_helper(T* t,\n    typename enable_if<is_convertible<\n      decltype(*asio::buffer_sequence_begin(*t)),\n        Buffer>::value>::type*);\n\n#else // defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename T, typename Buffer>\nchar buffer_sequence_element_type_helper(\n    typename T::const_iterator*,\n    typename enable_if<is_convertible<\n      typename T::value_type, Buffer>::value>::type*);\n\n#endif // defined(ASIO_HAS_DECLTYPE)\n\ntemplate <typename>\nchar (&const_buffers_type_typedef_helper(...))[2];\n\ntemplate <typename T>\nchar const_buffers_type_typedef_helper(\n    typename T::const_buffers_type*);\n\ntemplate <typename>\nchar (&mutable_buffers_type_typedef_helper(...))[2];\n\ntemplate <typename T>\nchar mutable_buffers_type_typedef_helper(\n    typename T::mutable_buffers_type*);\n\ntemplate <typename T, typename Buffer>\nstruct is_buffer_sequence_class\n  : integral_constant<bool,\n      sizeof(buffer_sequence_begin_helper<T>(0, 0)) != 1 &&\n      sizeof(buffer_sequence_end_helper<T>(0, 0)) != 1 &&\n      sizeof(buffer_sequence_element_type_helper<T, Buffer>(0, 0)) == 1>\n{\n};\n\ntemplate <typename T, typename Buffer>\nstruct is_buffer_sequence\n  : conditional<is_class<T>::value,\n      is_buffer_sequence_class<T, Buffer>,\n      false_type>::type\n{\n};\n\ntemplate <>\nstruct is_buffer_sequence<mutable_buffer, mutable_buffer>\n  : true_type\n{\n};\n\ntemplate <>\nstruct is_buffer_sequence<mutable_buffer, const_buffer>\n  : true_type\n{\n};\n\ntemplate <>\nstruct is_buffer_sequence<const_buffer, const_buffer>\n  : true_type\n{\n};\n\ntemplate <>\nstruct is_buffer_sequence<const_buffer, mutable_buffer>\n  : false_type\n{\n};\n\ntemplate <typename T>\nstruct is_dynamic_buffer_class_v1\n  : integral_constant<bool,\n      sizeof(size_memfn_helper<T>(0)) != 1 &&\n      sizeof(max_size_memfn_helper<T>(0)) != 1 &&\n      sizeof(capacity_memfn_helper<T>(0)) != 1 &&\n      sizeof(data_memfn_helper<T>(0)) != 1 &&\n      sizeof(consume_memfn_helper<T>(0)) != 1 &&\n      sizeof(prepare_memfn_helper<T>(0)) != 1 &&\n      sizeof(commit_memfn_helper<T>(0)) != 1 &&\n      sizeof(const_buffers_type_typedef_helper<T>(0)) == 1 &&\n      sizeof(mutable_buffers_type_typedef_helper<T>(0)) == 1>\n{\n};\n\ntemplate <typename T>\nstruct is_dynamic_buffer_v1\n  : conditional<is_class<T>::value,\n      is_dynamic_buffer_class_v1<T>,\n      false_type>::type\n{\n};\n\ntemplate <typename T>\nstruct is_dynamic_buffer_class_v2\n  : integral_constant<bool,\n      sizeof(size_memfn_helper<T>(0)) != 1 &&\n      sizeof(max_size_memfn_helper<T>(0)) != 1 &&\n      sizeof(capacity_memfn_helper<T>(0)) != 1 &&\n      sizeof(data_memfn_helper<T>(0)) != 1 &&\n      sizeof(consume_memfn_helper<T>(0)) != 1 &&\n      sizeof(grow_memfn_helper<T>(0)) != 1 &&\n      sizeof(shrink_memfn_helper<T>(0)) != 1 &&\n      sizeof(const_buffers_type_typedef_helper<T>(0)) == 1 &&\n      sizeof(mutable_buffers_type_typedef_helper<T>(0)) == 1>\n{\n};\n\ntemplate <typename T>\nstruct is_dynamic_buffer_v2\n  : conditional<is_class<T>::value,\n      is_dynamic_buffer_class_v2<T>,\n      false_type>::type\n{\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/is_executor.hpp",
    "content": "//\n// detail/is_executor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_IS_EXECUTOR_HPP\n#define ASIO_DETAIL_IS_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct executor_memfns_base\n{\n  void context();\n  void on_work_started();\n  void on_work_finished();\n  void dispatch();\n  void post();\n  void defer();\n};\n\ntemplate <typename T>\nstruct executor_memfns_derived\n  : T, executor_memfns_base\n{\n};\n\ntemplate <typename T, T>\nstruct executor_memfns_check\n{\n};\n\ntemplate <typename>\nchar (&context_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar context_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::context>*);\n\ntemplate <typename>\nchar (&on_work_started_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar on_work_started_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::on_work_started>*);\n\ntemplate <typename>\nchar (&on_work_finished_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar on_work_finished_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::on_work_finished>*);\n\ntemplate <typename>\nchar (&dispatch_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar dispatch_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::dispatch>*);\n\ntemplate <typename>\nchar (&post_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar post_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::post>*);\n\ntemplate <typename>\nchar (&defer_memfn_helper(...))[2];\n\ntemplate <typename T>\nchar defer_memfn_helper(\n    executor_memfns_check<\n      void (executor_memfns_base::*)(),\n      &executor_memfns_derived<T>::defer>*);\n\ntemplate <typename T>\nstruct is_executor_class\n  : integral_constant<bool,\n      sizeof(context_memfn_helper<T>(0)) != 1 &&\n      sizeof(on_work_started_memfn_helper<T>(0)) != 1 &&\n      sizeof(on_work_finished_memfn_helper<T>(0)) != 1 &&\n      sizeof(dispatch_memfn_helper<T>(0)) != 1 &&\n      sizeof(post_memfn_helper<T>(0)) != 1 &&\n      sizeof(defer_memfn_helper<T>(0)) != 1>\n{\n};\n\ntemplate <typename T>\nstruct is_executor\n  : conditional<is_class<T>::value,\n      is_executor_class<T>,\n      false_type>::type\n{\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_IS_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/keyword_tss_ptr.hpp",
    "content": "//\n// detail/keyword_tss_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_KEYWORD_TSS_PTR_HPP\n#define ASIO_DETAIL_KEYWORD_TSS_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nclass keyword_tss_ptr\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  keyword_tss_ptr()\n  {\n  }\n\n  // Destructor.\n  ~keyword_tss_ptr()\n  {\n  }\n\n  // Get the value.\n  operator T*() const\n  {\n    return value_;\n  }\n\n  // Set the value.\n  void operator=(T* value)\n  {\n    value_ = value;\n  }\n\nprivate:\n  static ASIO_THREAD_KEYWORD T* value_;\n};\n\ntemplate <typename T>\nASIO_THREAD_KEYWORD T* keyword_tss_ptr<T>::value_;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION)\n\n#endif // ASIO_DETAIL_KEYWORD_TSS_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/kqueue_reactor.hpp",
    "content": "//\n// detail/kqueue_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_KQUEUE_REACTOR_HPP\n#define ASIO_DETAIL_KQUEUE_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_KQUEUE)\n\n#include <cstddef>\n#include <sys/types.h>\n#include <sys/event.h>\n#include <sys/time.h>\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/object_pool.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/select_interrupter.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n\n// Older versions of Mac OS X may not define EV_OOBAND.\n#if !defined(EV_OOBAND)\n# define EV_OOBAND EV_FLAG1\n#endif // !defined(EV_OOBAND)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass scheduler;\n\nclass kqueue_reactor\n  : public execution_context_service_base<kqueue_reactor>\n{\nprivate:\n  // The mutex type used by this reactor.\n  typedef conditionally_enabled_mutex mutex;\n\npublic:\n  enum op_types { read_op = 0, write_op = 1,\n    connect_op = 1, except_op = 2, max_ops = 3 };\n\n  // Per-descriptor queues.\n  struct descriptor_state\n  {\n    descriptor_state(bool locking) : mutex_(locking) {}\n\n    friend class kqueue_reactor;\n    friend class object_pool_access;\n\n    descriptor_state* next_;\n    descriptor_state* prev_;\n\n    mutex mutex_;\n    int descriptor_;\n    int num_kevents_; // 1 == read only, 2 == read and write\n    op_queue<reactor_op> op_queue_[max_ops];\n    bool shutdown_;\n  };\n\n  // Per-descriptor data.\n  typedef descriptor_state* per_descriptor_data;\n\n  // Constructor.\n  ASIO_DECL kqueue_reactor(asio::execution_context& ctx);\n\n  // Destructor.\n  ASIO_DECL ~kqueue_reactor();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Recreate internal descriptors following a fork.\n  ASIO_DECL void notify_fork(\n      asio::execution_context::fork_event fork_ev);\n\n  // Initialise the task.\n  ASIO_DECL void init_task();\n\n  // Register a socket with the reactor. Returns 0 on success, system error\n  // code on failure.\n  ASIO_DECL int register_descriptor(socket_type descriptor,\n      per_descriptor_data& descriptor_data);\n\n  // Register a descriptor with an associated single operation. Returns 0 on\n  // success, system error code on failure.\n  ASIO_DECL int register_internal_descriptor(\n      int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op);\n\n  // Move descriptor registration from one descriptor_data object to another.\n  ASIO_DECL void move_descriptor(socket_type descriptor,\n      per_descriptor_data& target_descriptor_data,\n      per_descriptor_data& source_descriptor_data);\n\n  // Post a reactor operation for immediate completion.\n  void post_immediate_completion(reactor_op* op, bool is_continuation)\n  {\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n\n  // Start a new operation. The reactor operation will be performed when the\n  // given descriptor is flagged as ready, or an error has occurred.\n  ASIO_DECL void start_op(int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op,\n      bool is_continuation, bool allow_speculative);\n\n  // Cancel all operations associated with the given descriptor. The\n  // handlers associated with the descriptor will be invoked with the\n  // operation_aborted error.\n  ASIO_DECL void cancel_ops(socket_type descriptor,\n      per_descriptor_data& descriptor_data);\n\n  // Cancel any operations that are running against the descriptor and remove\n  // its registration from the reactor. The reactor resources associated with\n  // the descriptor must be released by calling cleanup_descriptor_data.\n  ASIO_DECL void deregister_descriptor(socket_type descriptor,\n      per_descriptor_data& descriptor_data, bool closing);\n\n  // Remove the descriptor's registration from the reactor. The reactor\n  // resources associated with the descriptor must be released by calling\n  // cleanup_descriptor_data.\n  ASIO_DECL void deregister_internal_descriptor(\n      socket_type descriptor, per_descriptor_data& descriptor_data);\n\n  // Perform any post-deregistration cleanup tasks associated with the\n  // descriptor data.\n  ASIO_DECL void cleanup_descriptor_data(\n      per_descriptor_data& descriptor_data);\n\n  // Add a new timer queue to the reactor.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Remove a timer queue from the reactor.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer operations associated with the given token. Returns the\n  // number of operations that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& target,\n      typename timer_queue<Time_Traits>::per_timer_data& source);\n\n  // Run the kqueue loop.\n  ASIO_DECL void run(long usec, op_queue<operation>& ops);\n\n  // Interrupt the kqueue loop.\n  ASIO_DECL void interrupt();\n\nprivate:\n  // Create the kqueue file descriptor. Throws an exception if the descriptor\n  // cannot be created.\n  ASIO_DECL static int do_kqueue_create();\n\n  // Allocate a new descriptor state object.\n  ASIO_DECL descriptor_state* allocate_descriptor_state();\n\n  // Free an existing descriptor state object.\n  ASIO_DECL void free_descriptor_state(descriptor_state* s);\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // Get the timeout value for the kevent call.\n  ASIO_DECL timespec* get_timeout(long usec, timespec& ts);\n\n  // The scheduler used to post completions.\n  scheduler& scheduler_;\n\n  // Mutex to protect access to internal data.\n  mutex mutex_;\n\n  // The kqueue file descriptor.\n  int kqueue_fd_;\n\n  // The interrupter is used to break a blocking kevent call.\n  select_interrupter interrupter_;\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n  // Whether the service has been shut down.\n  bool shutdown_;\n\n  // Mutex to protect access to the registered descriptors.\n  mutex registered_descriptors_mutex_;\n\n  // Keep track of all registered descriptors.\n  object_pool<descriptor_state> registered_descriptors_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/kqueue_reactor.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/kqueue_reactor.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_KQUEUE)\n\n#endif // ASIO_DETAIL_KQUEUE_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/limits.hpp",
    "content": "//\n// detail/limits.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_LIMITS_HPP\n#define ASIO_DETAIL_LIMITS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_LIMITS)\n# include <boost/limits.hpp>\n#else // defined(ASIO_HAS_BOOST_LIMITS)\n# include <limits>\n#endif // defined(ASIO_HAS_BOOST_LIMITS)\n\n#endif // ASIO_DETAIL_LIMITS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/local_free_on_block_exit.hpp",
    "content": "//\n// detail/local_free_on_block_exit.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP\n#define ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n#if !defined(ASIO_WINDOWS_APP)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass local_free_on_block_exit\n  : private noncopyable\n{\npublic:\n  // Constructor blocks all signals for the calling thread.\n  explicit local_free_on_block_exit(void* p)\n    : p_(p)\n  {\n  }\n\n  // Destructor restores the previous signal mask.\n  ~local_free_on_block_exit()\n  {\n    ::LocalFree(p_);\n  }\n\nprivate:\n  void* p_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS_APP)\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/macos_fenced_block.hpp",
    "content": "//\n// detail/macos_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__MACH__) && defined(__APPLE__)\n\n#include <libkern/OSAtomic.h>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass macos_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit macos_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit macos_fenced_block(full_t)\n  {\n    OSMemoryBarrier();\n  }\n\n  // Destructor.\n  ~macos_fenced_block()\n  {\n    OSMemoryBarrier();\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__MACH__) && defined(__APPLE__)\n\n#endif // ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/memory.hpp",
    "content": "//\n// detail/memory.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_MEMORY_HPP\n#define ASIO_DETAIL_MEMORY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <memory>\n\n#if !defined(ASIO_HAS_STD_SHARED_PTR)\n# include <boost/shared_ptr.hpp>\n# include <boost/weak_ptr.hpp>\n#endif // !defined(ASIO_HAS_STD_SHARED_PTR)\n\n#if !defined(ASIO_HAS_STD_ADDRESSOF)\n# include <boost/utility/addressof.hpp>\n#endif // !defined(ASIO_HAS_STD_ADDRESSOF)\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_STD_SHARED_PTR)\nusing std::shared_ptr;\nusing std::weak_ptr;\n#else // defined(ASIO_HAS_STD_SHARED_PTR)\nusing boost::shared_ptr;\nusing boost::weak_ptr;\n#endif // defined(ASIO_HAS_STD_SHARED_PTR)\n\n#if defined(ASIO_HAS_STD_ADDRESSOF)\nusing std::addressof;\n#else // defined(ASIO_HAS_STD_ADDRESSOF)\nusing boost::addressof;\n#endif // defined(ASIO_HAS_STD_ADDRESSOF)\n\n} // namespace detail\n\n#if defined(ASIO_HAS_CXX11_ALLOCATORS)\nusing std::allocator_arg_t;\n# define ASIO_USES_ALLOCATOR(t) \\\n  namespace std { \\\n    template <typename Allocator> \\\n    struct uses_allocator<t, Allocator> : true_type {}; \\\n  } \\\n  /**/\n# define ASIO_REBIND_ALLOC(alloc, t) \\\n  typename std::allocator_traits<alloc>::template rebind_alloc<t>\n  /**/\n#else // defined(ASIO_HAS_CXX11_ALLOCATORS)\nstruct allocator_arg_t {};\n# define ASIO_USES_ALLOCATOR(t)\n# define ASIO_REBIND_ALLOC(alloc, t) \\\n  typename alloc::template rebind<t>::other\n  /**/\n#endif // defined(ASIO_HAS_CXX11_ALLOCATORS)\n\n} // namespace asio\n\n#endif // ASIO_DETAIL_MEMORY_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/mutex.hpp",
    "content": "//\n// detail/mutex.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_MUTEX_HPP\n#define ASIO_DETAIL_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_mutex.hpp\"\n#elif defined(ASIO_WINDOWS)\n# include \"asio/detail/win_mutex.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_mutex.hpp\"\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n# include \"asio/detail/std_mutex.hpp\"\n#else\n# error Only Windows, POSIX and std::mutex are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS)\ntypedef null_mutex mutex;\n#elif defined(ASIO_WINDOWS)\ntypedef win_mutex mutex;\n#elif defined(ASIO_HAS_PTHREADS)\ntypedef posix_mutex mutex;\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\ntypedef std_mutex mutex;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/non_const_lvalue.hpp",
    "content": "//\n// detail/non_const_lvalue.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NON_CONST_LVALUE_HPP\n#define ASIO_DETAIL_NON_CONST_LVALUE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct non_const_lvalue\n{\n#if defined(ASIO_HAS_MOVE)\n  explicit non_const_lvalue(T& t)\n    : value(static_cast<typename conditional<\n        is_same<T, typename decay<T>::type>::value,\n          typename decay<T>::type&, T&&>::type>(t))\n  {\n  }\n\n  typename conditional<is_same<T, typename decay<T>::type>::value,\n      typename decay<T>::type&, typename decay<T>::type>::type value;\n#else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  explicit non_const_lvalue(const typename decay<T>::type& t)\n    : value(t)\n  {\n  }\n\n  typename decay<T>::type value;\n#endif // defined(ASIO_HAS_MOVE)\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_NON_CONST_LVALUE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/noncopyable.hpp",
    "content": "//\n// detail/noncopyable.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NONCOPYABLE_HPP\n#define ASIO_DETAIL_NONCOPYABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass noncopyable\n{\nprotected:\n  noncopyable() {}\n  ~noncopyable() {}\nprivate:\n  noncopyable(const noncopyable&);\n  const noncopyable& operator=(const noncopyable&);\n};\n\n} // namespace detail\n\nusing asio::detail::noncopyable;\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_NONCOPYABLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_event.hpp",
    "content": "//\n// detail/null_event.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_EVENT_HPP\n#define ASIO_DETAIL_NULL_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_event\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  null_event()\n  {\n  }\n\n  // Destructor.\n  ~null_event()\n  {\n  }\n\n  // Signal the event. (Retained for backward compatibility.)\n  template <typename Lock>\n  void signal(Lock&)\n  {\n  }\n\n  // Signal all waiters.\n  template <typename Lock>\n  void signal_all(Lock&)\n  {\n  }\n\n  // Unlock the mutex and signal one waiter.\n  template <typename Lock>\n  void unlock_and_signal_one(Lock&)\n  {\n  }\n\n  // If there's a waiter, unlock the mutex and signal it.\n  template <typename Lock>\n  bool maybe_unlock_and_signal_one(Lock&)\n  {\n    return false;\n  }\n\n  // Reset the event.\n  template <typename Lock>\n  void clear(Lock&)\n  {\n  }\n\n  // Wait for the event to become signalled.\n  template <typename Lock>\n  void wait(Lock&)\n  {\n    do_wait();\n  }\n\n  // Timed wait for the event to become signalled.\n  template <typename Lock>\n  bool wait_for_usec(Lock&, long usec)\n  {\n    do_wait_for_usec(usec);\n    return true;\n  }\n\nprivate:\n  ASIO_DECL static void do_wait();\n  ASIO_DECL static void do_wait_for_usec(long usec);\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/null_event.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_NULL_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_fenced_block.hpp",
    "content": "//\n// detail/null_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_NULL_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_or_full_t { half, full };\n\n  // Constructor.\n  explicit null_fenced_block(half_or_full_t)\n  {\n  }\n\n  // Destructor.\n  ~null_fenced_block()\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_NULL_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_global.hpp",
    "content": "//\n// detail/null_global.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_GLOBAL_HPP\n#define ASIO_DETAIL_NULL_GLOBAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct null_global_impl\n{\n  null_global_impl()\n    : ptr_(0)\n  {\n  }\n\n  // Destructor automatically cleans up the global.\n  ~null_global_impl()\n  {\n    delete ptr_;\n  }\n\n  static null_global_impl instance_;\n  T* ptr_;\n};\n\ntemplate <typename T>\nnull_global_impl<T> null_global_impl<T>::instance_;\n\ntemplate <typename T>\nT& null_global()\n{\n  if (null_global_impl<T>::instance_.ptr_ == 0)\n    null_global_impl<T>::instance_.ptr_ = new T;\n  return *null_global_impl<T>::instance_.ptr_;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_NULL_GLOBAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_mutex.hpp",
    "content": "//\n// detail/null_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_MUTEX_HPP\n#define ASIO_DETAIL_NULL_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_mutex\n  : private noncopyable\n{\npublic:\n  typedef asio::detail::scoped_lock<null_mutex> scoped_lock;\n\n  // Constructor.\n  null_mutex()\n  {\n  }\n\n  // Destructor.\n  ~null_mutex()\n  {\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_THREADS)\n\n#endif // ASIO_DETAIL_NULL_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_reactor.hpp",
    "content": "//\n// detail/null_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_REACTOR_HPP\n#define ASIO_DETAIL_NULL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/scheduler_operation.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_reactor\n  : public execution_context_service_base<null_reactor>\n{\npublic:\n  // Constructor.\n  null_reactor(asio::execution_context& ctx)\n    : execution_context_service_base<null_reactor>(ctx)\n  {\n  }\n\n  // Destructor.\n  ~null_reactor()\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n  }\n\n  // No-op because should never be called.\n  void run(long /*usec*/, op_queue<scheduler_operation>& /*ops*/)\n  {\n  }\n\n  // No-op.\n  void interrupt()\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_NULL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_signal_blocker.hpp",
    "content": "//\n// detail/null_signal_blocker.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP\n#define ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS) \\\n  || defined(ASIO_WINDOWS) \\\n  || defined(ASIO_WINDOWS_RUNTIME) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_signal_blocker\n  : private noncopyable\n{\npublic:\n  // Constructor blocks all signals for the calling thread.\n  null_signal_blocker()\n  {\n  }\n\n  // Destructor restores the previous signal mask.\n  ~null_signal_blocker()\n  {\n  }\n\n  // Block all signals for the calling thread.\n  void block()\n  {\n  }\n\n  // Restore the previous signal mask.\n  void unblock()\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_THREADS)\n       // || defined(ASIO_WINDOWS)\n       // || defined(ASIO_WINDOWS_RUNTIME)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n\n#endif // ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_socket_service.hpp",
    "content": "//\n// detail/null_socket_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP\n#define ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass null_socket_service :\n  public execution_context_service_base<null_socket_service<Protocol> >\n{\npublic:\n  // The protocol type.\n  typedef Protocol protocol_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The native type of a socket.\n  typedef int native_handle_type;\n\n  // The implementation type of the socket.\n  struct implementation_type\n  {\n  };\n\n  // Constructor.\n  null_socket_service(execution_context& context)\n    : execution_context_service_base<null_socket_service<Protocol> >(context)\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n  }\n\n  // Construct a new socket implementation.\n  void construct(implementation_type&)\n  {\n  }\n\n  // Move-construct a new socket implementation.\n  void move_construct(implementation_type&, implementation_type&)\n  {\n  }\n\n  // Move-assign from another socket implementation.\n  void move_assign(implementation_type&,\n      null_socket_service&, implementation_type&)\n  {\n  }\n\n  // Move-construct a new socket implementation from another protocol type.\n  template <typename Protocol1>\n  void converting_move_construct(implementation_type&,\n      null_socket_service<Protocol1>&,\n      typename null_socket_service<Protocol1>::implementation_type&)\n  {\n  }\n\n  // Destroy a socket implementation.\n  void destroy(implementation_type&)\n  {\n  }\n\n  // Open a new socket implementation.\n  asio::error_code open(implementation_type&,\n      const protocol_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Assign a native socket to a socket implementation.\n  asio::error_code assign(implementation_type&, const protocol_type&,\n      const native_handle_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Determine whether the socket is open.\n  bool is_open(const implementation_type&) const\n  {\n    return false;\n  }\n\n  // Destroy a socket implementation.\n  asio::error_code close(implementation_type&,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Release ownership of the socket.\n  native_handle_type release(implementation_type&,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Get the native socket representation.\n  native_handle_type native_handle(implementation_type&)\n  {\n    return 0;\n  }\n\n  // Cancel all operations associated with the socket.\n  asio::error_code cancel(implementation_type&,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Determine whether the socket is at the out-of-band data mark.\n  bool at_mark(const implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return false;\n  }\n\n  // Determine the number of bytes available for reading.\n  std::size_t available(const implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Place the socket into the state where it will listen for new connections.\n  asio::error_code listen(implementation_type&,\n      int, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Perform an IO control command on the socket.\n  template <typename IO_Control_Command>\n  asio::error_code io_control(implementation_type&,\n      IO_Control_Command&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the socket.\n  bool non_blocking(const implementation_type&) const\n  {\n    return false;\n  }\n\n  // Sets the non-blocking mode of the socket.\n  asio::error_code non_blocking(implementation_type&,\n      bool, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the native socket implementation.\n  bool native_non_blocking(const implementation_type&) const\n  {\n    return false;\n  }\n\n  // Sets the non-blocking mode of the native socket implementation.\n  asio::error_code native_non_blocking(implementation_type&,\n      bool, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Disable sends or receives on the socket.\n  asio::error_code shutdown(implementation_type&,\n      socket_base::shutdown_type, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Bind the socket to the specified local endpoint.\n  asio::error_code bind(implementation_type&,\n      const endpoint_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code set_option(implementation_type&,\n      const Option&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code get_option(const implementation_type&,\n      Option&, asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Get the local endpoint.\n  endpoint_type local_endpoint(const implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return endpoint_type();\n  }\n\n  // Get the remote endpoint.\n  endpoint_type remote_endpoint(const implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return endpoint_type();\n  }\n\n  // Send the given data to the peer.\n  template <typename ConstBufferSequence>\n  std::size_t send(implementation_type&, const ConstBufferSequence&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Wait until data can be sent without blocking.\n  std::size_t send(implementation_type&, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send(implementation_type&, const ConstBufferSequence&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send(implementation_type&, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Receive some data from the peer. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  std::size_t receive(implementation_type&, const MutableBufferSequence&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Wait until data can be received without blocking.\n  std::size_t receive(implementation_type&, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive(implementation_type&, const MutableBufferSequence&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive(implementation_type&, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Receive some data with associated flags. Returns the number of bytes\n  // received.\n  template <typename MutableBufferSequence>\n  std::size_t receive_with_flags(implementation_type&,\n      const MutableBufferSequence&, socket_base::message_flags,\n      socket_base::message_flags&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Wait until data can be received without blocking.\n  std::size_t receive_with_flags(implementation_type&,\n      const null_buffers&, socket_base::message_flags,\n      socket_base::message_flags&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_with_flags(implementation_type&,\n      const MutableBufferSequence&, socket_base::message_flags,\n      socket_base::message_flags&, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_with_flags(implementation_type&, const null_buffers&,\n      socket_base::message_flags, socket_base::message_flags&,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Send a datagram to the specified endpoint. Returns the number of bytes\n  // sent.\n  template <typename ConstBufferSequence>\n  std::size_t send_to(implementation_type&, const ConstBufferSequence&,\n      const endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Wait until data can be sent without blocking.\n  std::size_t send_to(implementation_type&, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type&, const ConstBufferSequence&,\n      const endpoint_type&, socket_base::message_flags,\n      Handler& handler)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type&, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Receive a datagram with the endpoint of the sender. Returns the number of\n  // bytes received.\n  template <typename MutableBufferSequence>\n  std::size_t receive_from(implementation_type&, const MutableBufferSequence&,\n      endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Wait until data can be received without blocking.\n  std::size_t receive_from(implementation_type&, const null_buffers&,\n      endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received and\n  // the sender_endpoint object must both be valid for the lifetime of the\n  // asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type&, const MutableBufferSequence&,\n      endpoint_type&, socket_base::message_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type&, const null_buffers&,\n      endpoint_type&, socket_base::message_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex, detail::bind_handler(\n          handler, ec, bytes_transferred));\n  }\n\n  // Accept a new connection.\n  template <typename Socket>\n  asio::error_code accept(implementation_type&,\n      Socket&, endpoint_type*, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Start an asynchronous accept. The peer and peer_endpoint objects\n  // must be valid until the accept's handler is invoked.\n  template <typename Socket, typename Handler, typename IoExecutor>\n  void async_accept(implementation_type&, Socket&, endpoint_type*,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    asio::post(io_ex, detail::bind_handler(handler, ec));\n  }\n\n  // Connect the socket to the specified endpoint.\n  asio::error_code connect(implementation_type&,\n      const endpoint_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Start an asynchronous connect.\n  template <typename Handler, typename IoExecutor>\n  void async_connect(implementation_type&, const endpoint_type&,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    asio::post(io_ex, detail::bind_handler(handler, ec));\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_static_mutex.hpp",
    "content": "//\n// detail/null_static_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_STATIC_MUTEX_HPP\n#define ASIO_DETAIL_NULL_STATIC_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct null_static_mutex\n{\n  typedef asio::detail::scoped_lock<null_static_mutex> scoped_lock;\n\n  // Initialise the mutex.\n  void init()\n  {\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n  }\n\n  int unused_;\n};\n\n#define ASIO_NULL_STATIC_MUTEX_INIT { 0 }\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_THREADS)\n\n#endif // ASIO_DETAIL_NULL_STATIC_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_thread.hpp",
    "content": "//\n// detail/null_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_THREAD_HPP\n#define ASIO_DETAIL_NULL_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass null_thread\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  null_thread(Function, unsigned int = 0)\n  {\n    asio::detail::throw_error(\n        asio::error::operation_not_supported, \"thread\");\n  }\n\n  // Destructor.\n  ~null_thread()\n  {\n  }\n\n  // Wait for the thread to exit.\n  void join()\n  {\n  }\n\n  // Get number of CPUs.\n  static std::size_t hardware_concurrency()\n  {\n    return 1;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_THREADS)\n\n#endif // ASIO_DETAIL_NULL_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/null_tss_ptr.hpp",
    "content": "//\n// detail/null_tss_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_NULL_TSS_PTR_HPP\n#define ASIO_DETAIL_NULL_TSS_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nclass null_tss_ptr\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  null_tss_ptr()\n    : value_(0)\n  {\n  }\n\n  // Destructor.\n  ~null_tss_ptr()\n  {\n  }\n\n  // Get the value.\n  operator T*() const\n  {\n    return value_;\n  }\n\n  // Set the value.\n  void operator=(T* value)\n  {\n    value_ = value;\n  }\n\nprivate:\n  T* value_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_THREADS)\n\n#endif // ASIO_DETAIL_NULL_TSS_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/object_pool.hpp",
    "content": "//\n// detail/object_pool.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_OBJECT_POOL_HPP\n#define ASIO_DETAIL_OBJECT_POOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Object>\nclass object_pool;\n\nclass object_pool_access\n{\npublic:\n  template <typename Object>\n  static Object* create()\n  {\n    return new Object;\n  }\n\n  template <typename Object, typename Arg>\n  static Object* create(Arg arg)\n  {\n    return new Object(arg);\n  }\n\n  template <typename Object>\n  static void destroy(Object* o)\n  {\n    delete o;\n  }\n\n  template <typename Object>\n  static Object*& next(Object* o)\n  {\n    return o->next_;\n  }\n\n  template <typename Object>\n  static Object*& prev(Object* o)\n  {\n    return o->prev_;\n  }\n};\n\ntemplate <typename Object>\nclass object_pool\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  object_pool()\n    : live_list_(0),\n      free_list_(0)\n  {\n  }\n\n  // Destructor destroys all objects.\n  ~object_pool()\n  {\n    destroy_list(live_list_);\n    destroy_list(free_list_);\n  }\n\n  // Get the object at the start of the live list.\n  Object* first()\n  {\n    return live_list_;\n  }\n\n  // Allocate a new object.\n  Object* alloc()\n  {\n    Object* o = free_list_;\n    if (o)\n      free_list_ = object_pool_access::next(free_list_);\n    else\n      o = object_pool_access::create<Object>();\n\n    object_pool_access::next(o) = live_list_;\n    object_pool_access::prev(o) = 0;\n    if (live_list_)\n      object_pool_access::prev(live_list_) = o;\n    live_list_ = o;\n\n    return o;\n  }\n\n  // Allocate a new object with an argument.\n  template <typename Arg>\n  Object* alloc(Arg arg)\n  {\n    Object* o = free_list_;\n    if (o)\n      free_list_ = object_pool_access::next(free_list_);\n    else\n      o = object_pool_access::create<Object>(arg);\n\n    object_pool_access::next(o) = live_list_;\n    object_pool_access::prev(o) = 0;\n    if (live_list_)\n      object_pool_access::prev(live_list_) = o;\n    live_list_ = o;\n\n    return o;\n  }\n\n  // Free an object. Moves it to the free list. No destructors are run.\n  void free(Object* o)\n  {\n    if (live_list_ == o)\n      live_list_ = object_pool_access::next(o);\n\n    if (object_pool_access::prev(o))\n    {\n      object_pool_access::next(object_pool_access::prev(o))\n        = object_pool_access::next(o);\n    }\n\n    if (object_pool_access::next(o))\n    {\n      object_pool_access::prev(object_pool_access::next(o))\n        = object_pool_access::prev(o);\n    }\n\n    object_pool_access::next(o) = free_list_;\n    object_pool_access::prev(o) = 0;\n    free_list_ = o;\n  }\n\nprivate:\n  // Helper function to destroy all elements in a list.\n  void destroy_list(Object* list)\n  {\n    while (list)\n    {\n      Object* o = list;\n      list = object_pool_access::next(o);\n      object_pool_access::destroy(o);\n    }\n  }\n\n  // The list of live objects.\n  Object* live_list_;\n\n  // The free list.\n  Object* free_list_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_OBJECT_POOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/old_win_sdk_compat.hpp",
    "content": "//\n// detail/old_win_sdk_compat.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP\n#define ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n// Guess whether we are building against on old Platform SDK.\n#if !defined(IN6ADDR_ANY_INIT)\n#define ASIO_HAS_OLD_WIN_SDK 1\n#endif // !defined(IN6ADDR_ANY_INIT)\n\n#if defined(ASIO_HAS_OLD_WIN_SDK)\n\n// Emulation of types that are missing from old Platform SDKs.\n//\n// N.B. this emulation is also used if building for a Windows 2000 target with\n// a recent (i.e. Vista or later) SDK, as the SDK does not provide IPv6 support\n// in that case.\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nenum\n{\n  sockaddr_storage_maxsize = 128, // Maximum size.\n  sockaddr_storage_alignsize = (sizeof(__int64)), // Desired alignment.\n  sockaddr_storage_pad1size = (sockaddr_storage_alignsize - sizeof(short)),\n  sockaddr_storage_pad2size = (sockaddr_storage_maxsize -\n      (sizeof(short) + sockaddr_storage_pad1size + sockaddr_storage_alignsize))\n};\n\nstruct sockaddr_storage_emulation\n{\n  short ss_family;\n  char __ss_pad1[sockaddr_storage_pad1size];\n  __int64 __ss_align;\n  char __ss_pad2[sockaddr_storage_pad2size];\n};\n\nstruct in6_addr_emulation\n{\n  union\n  {\n    u_char Byte[16];\n    u_short Word[8];\n  } u;\n};\n\n#if !defined(s6_addr)\n# define _S6_un u\n# define _S6_u8 Byte\n# define s6_addr _S6_un._S6_u8\n#endif // !defined(s6_addr)\n\nstruct sockaddr_in6_emulation\n{\n  short sin6_family;\n  u_short sin6_port;\n  u_long sin6_flowinfo;\n  in6_addr_emulation sin6_addr;\n  u_long sin6_scope_id;\n};\n\nstruct ipv6_mreq_emulation\n{\n  in6_addr_emulation ipv6mr_multiaddr;\n  unsigned int ipv6mr_interface;\n};\n\nstruct addrinfo_emulation\n{\n  int ai_flags;\n  int ai_family;\n  int ai_socktype;\n  int ai_protocol;\n  size_t ai_addrlen;\n  char* ai_canonname;\n  sockaddr* ai_addr;\n  addrinfo_emulation* ai_next;\n};\n\n#if !defined(AI_PASSIVE)\n# define AI_PASSIVE 0x1\n#endif\n\n#if !defined(AI_CANONNAME)\n# define AI_CANONNAME 0x2\n#endif\n\n#if !defined(AI_NUMERICHOST)\n# define AI_NUMERICHOST 0x4\n#endif\n\n#if !defined(EAI_AGAIN)\n# define EAI_AGAIN WSATRY_AGAIN\n#endif\n\n#if !defined(EAI_BADFLAGS)\n# define EAI_BADFLAGS WSAEINVAL\n#endif\n\n#if !defined(EAI_FAIL)\n# define EAI_FAIL WSANO_RECOVERY\n#endif\n\n#if !defined(EAI_FAMILY)\n# define EAI_FAMILY WSAEAFNOSUPPORT\n#endif\n\n#if !defined(EAI_MEMORY)\n# define EAI_MEMORY WSA_NOT_ENOUGH_MEMORY\n#endif\n\n#if !defined(EAI_NODATA)\n# define EAI_NODATA WSANO_DATA\n#endif\n\n#if !defined(EAI_NONAME)\n# define EAI_NONAME WSAHOST_NOT_FOUND\n#endif\n\n#if !defined(EAI_SERVICE)\n# define EAI_SERVICE WSATYPE_NOT_FOUND\n#endif\n\n#if !defined(EAI_SOCKTYPE)\n# define EAI_SOCKTYPE WSAESOCKTNOSUPPORT\n#endif\n\n#if !defined(NI_NOFQDN)\n# define NI_NOFQDN 0x01\n#endif\n\n#if !defined(NI_NUMERICHOST)\n# define NI_NUMERICHOST 0x02\n#endif\n\n#if !defined(NI_NAMEREQD)\n# define NI_NAMEREQD 0x04\n#endif\n\n#if !defined(NI_NUMERICSERV)\n# define NI_NUMERICSERV 0x08\n#endif\n\n#if !defined(NI_DGRAM)\n# define NI_DGRAM 0x10\n#endif\n\n#if !defined(IPPROTO_IPV6)\n# define IPPROTO_IPV6 41\n#endif\n\n#if !defined(IPV6_UNICAST_HOPS)\n# define IPV6_UNICAST_HOPS 4\n#endif\n\n#if !defined(IPV6_MULTICAST_IF)\n# define IPV6_MULTICAST_IF 9\n#endif\n\n#if !defined(IPV6_MULTICAST_HOPS)\n# define IPV6_MULTICAST_HOPS 10\n#endif\n\n#if !defined(IPV6_MULTICAST_LOOP)\n# define IPV6_MULTICAST_LOOP 11\n#endif\n\n#if !defined(IPV6_JOIN_GROUP)\n# define IPV6_JOIN_GROUP 12\n#endif\n\n#if !defined(IPV6_LEAVE_GROUP)\n# define IPV6_LEAVE_GROUP 13\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_OLD_WIN_SDK)\n\n// Even newer Platform SDKs that support IPv6 may not define IPV6_V6ONLY.\n#if !defined(IPV6_V6ONLY)\n# define IPV6_V6ONLY 27\n#endif\n\n// Some SDKs (e.g. Windows CE) don't define IPPROTO_ICMPV6.\n#if !defined(IPPROTO_ICMPV6)\n# define IPPROTO_ICMPV6 58\n#endif\n\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/op_queue.hpp",
    "content": "//\n// detail/op_queue.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_OP_QUEUE_HPP\n#define ASIO_DETAIL_OP_QUEUE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Operation>\nclass op_queue;\n\nclass op_queue_access\n{\npublic:\n  template <typename Operation>\n  static Operation* next(Operation* o)\n  {\n    return static_cast<Operation*>(o->next_);\n  }\n\n  template <typename Operation1, typename Operation2>\n  static void next(Operation1*& o1, Operation2* o2)\n  {\n    o1->next_ = o2;\n  }\n\n  template <typename Operation>\n  static void destroy(Operation* o)\n  {\n    o->destroy();\n  }\n\n  template <typename Operation>\n  static Operation*& front(op_queue<Operation>& q)\n  {\n    return q.front_;\n  }\n\n  template <typename Operation>\n  static Operation*& back(op_queue<Operation>& q)\n  {\n    return q.back_;\n  }\n};\n\ntemplate <typename Operation>\nclass op_queue\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  op_queue()\n    : front_(0),\n      back_(0)\n  {\n  }\n\n  // Destructor destroys all operations.\n  ~op_queue()\n  {\n    while (Operation* op = front_)\n    {\n      pop();\n      op_queue_access::destroy(op);\n    }\n  }\n\n  // Get the operation at the front of the queue.\n  Operation* front()\n  {\n    return front_;\n  }\n\n  // Pop an operation from the front of the queue.\n  void pop()\n  {\n    if (front_)\n    {\n      Operation* tmp = front_;\n      front_ = op_queue_access::next(front_);\n      if (front_ == 0)\n        back_ = 0;\n      op_queue_access::next(tmp, static_cast<Operation*>(0));\n    }\n  }\n\n  // Push an operation on to the back of the queue.\n  void push(Operation* h)\n  {\n    op_queue_access::next(h, static_cast<Operation*>(0));\n    if (back_)\n    {\n      op_queue_access::next(back_, h);\n      back_ = h;\n    }\n    else\n    {\n      front_ = back_ = h;\n    }\n  }\n\n  // Push all operations from another queue on to the back of the queue. The\n  // source queue may contain operations of a derived type.\n  template <typename OtherOperation>\n  void push(op_queue<OtherOperation>& q)\n  {\n    if (Operation* other_front = op_queue_access::front(q))\n    {\n      if (back_)\n        op_queue_access::next(back_, other_front);\n      else\n        front_ = other_front;\n      back_ = op_queue_access::back(q);\n      op_queue_access::front(q) = 0;\n      op_queue_access::back(q) = 0;\n    }\n  }\n\n  // Whether the queue is empty.\n  bool empty() const\n  {\n    return front_ == 0;\n  }\n\n  // Test whether an operation is already enqueued.\n  bool is_enqueued(Operation* o) const\n  {\n    return op_queue_access::next(o) != 0 || back_ == o;\n  }\n\nprivate:\n  friend class op_queue_access;\n\n  // The front of the queue.\n  Operation* front_;\n\n  // The back of the queue.\n  Operation* back_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_OP_QUEUE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/operation.hpp",
    "content": "//\n// detail/operation.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_OPERATION_HPP\n#define ASIO_DETAIL_OPERATION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_operation.hpp\"\n#else\n# include \"asio/detail/scheduler_operation.hpp\"\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_IOCP)\ntypedef win_iocp_operation operation;\n#else\ntypedef scheduler_operation operation;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_OPERATION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/pipe_select_interrupter.hpp",
    "content": "//\n// detail/pipe_select_interrupter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP\n#define ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS)\n#if !defined(ASIO_WINDOWS_RUNTIME)\n#if !defined(__CYGWIN__)\n#if !defined(__SYMBIAN32__)\n#if !defined(ASIO_HAS_EVENTFD)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass pipe_select_interrupter\n{\npublic:\n  // Constructor.\n  ASIO_DECL pipe_select_interrupter();\n\n  // Destructor.\n  ASIO_DECL ~pipe_select_interrupter();\n\n  // Recreate the interrupter's descriptors. Used after a fork.\n  ASIO_DECL void recreate();\n\n  // Interrupt the select call.\n  ASIO_DECL void interrupt();\n\n  // Reset the select interrupt. Returns true if the call was interrupted.\n  ASIO_DECL bool reset();\n\n  // Get the read descriptor to be passed to select.\n  int read_descriptor() const\n  {\n    return read_descriptor_;\n  }\n\nprivate:\n  // Open the descriptors. Throws on error.\n  ASIO_DECL void open_descriptors();\n\n  // Close the descriptors.\n  ASIO_DECL void close_descriptors();\n\n  // The read end of a connection used to interrupt the select call. This file\n  // descriptor is passed to select such that when it is time to stop, a single\n  // byte will be written on the other end of the connection and this\n  // descriptor will become readable.\n  int read_descriptor_;\n\n  // The write end of a connection used to interrupt the select call. A single\n  // byte may be written to this to wake up the select which is waiting for the\n  // other end to become readable.\n  int write_descriptor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/pipe_select_interrupter.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // !defined(ASIO_HAS_EVENTFD)\n#endif // !defined(__SYMBIAN32__)\n#endif // !defined(__CYGWIN__)\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n#endif // !defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/pop_options.hpp",
    "content": "//\n// detail/pop_options.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// No header guard\n\n#if defined(__COMO__)\n\n// Comeau C++\n\n#elif defined(__DMC__)\n\n// Digital Mars C++\n\n#elif defined(__INTEL_COMPILER) || defined(__ICL) \\\n  || defined(__ICC) || defined(__ECC)\n\n// Intel C++\n\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility pop\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n\n#elif defined(__clang__)\n\n// Clang\n\n# if defined(__OBJC__)\n#  if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1)\n#   if defined(ASIO_OBJC_WORKAROUND)\n#    undef Protocol\n#    undef id\n#    undef ASIO_OBJC_WORKAROUND\n#   endif\n#  endif\n# endif\n\n# if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility pop\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n\n#elif defined(__GNUC__)\n\n// GNU C++\n\n# if defined(__MINGW32__) || defined(__CYGWIN__)\n#  pragma pack (pop)\n# endif\n\n# if defined(__OBJC__)\n#  if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1)\n#   if defined(ASIO_OBJC_WORKAROUND)\n#    undef Protocol\n#    undef id\n#    undef ASIO_OBJC_WORKAROUND\n#   endif\n#  endif\n# endif\n\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility pop\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n\n# if (__GNUC__ >= 7)\n#  pragma GCC diagnostic pop\n# endif // (__GNUC__ >= 7)\n\n#elif defined(__KCC)\n\n// Kai C++\n\n#elif defined(__sgi)\n\n// SGI MIPSpro C++\n\n#elif defined(__DECCXX)\n\n// Compaq Tru64 Unix cxx\n\n#elif defined(__ghs)\n\n// Greenhills C++\n\n#elif defined(__BORLANDC__)\n\n// Borland C++\n\n# pragma option pop\n# pragma nopushoptwarn\n# pragma nopackwarning\n\n#elif defined(__MWERKS__)\n\n// Metrowerks CodeWarrior\n\n#elif defined(__SUNPRO_CC)\n\n// Sun Workshop Compiler C++\n\n#elif defined(__HP_aCC)\n\n// HP aCC\n\n#elif defined(__MRC__) || defined(__SC__)\n\n// MPW MrCpp or SCpp\n\n#elif defined(__IBMCPP__)\n\n// IBM Visual Age\n\n#elif defined(_MSC_VER)\n\n// Microsoft Visual C++\n//\n// Must remain the last #elif since some other vendors (Metrowerks, for example)\n// also #define _MSC_VER\n\n# pragma warning (pop)\n# pragma pack (pop)\n\n# if defined(__cplusplus_cli) || defined(__cplusplus_winrt)\n#  if defined(ASIO_CLR_WORKAROUND)\n#   undef generic\n#   undef ASIO_CLR_WORKAROUND\n#  endif\n# endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_event.hpp",
    "content": "//\n// detail/posix_event.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_EVENT_HPP\n#define ASIO_DETAIL_POSIX_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <pthread.h>\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass posix_event\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  ASIO_DECL posix_event();\n\n  // Destructor.\n  ~posix_event()\n  {\n    ::pthread_cond_destroy(&cond_);\n  }\n\n  // Signal the event. (Retained for backward compatibility.)\n  template <typename Lock>\n  void signal(Lock& lock)\n  {\n    this->signal_all(lock);\n  }\n\n  // Signal all waiters.\n  template <typename Lock>\n  void signal_all(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    state_ |= 1;\n    ::pthread_cond_broadcast(&cond_); // Ignore EINVAL.\n  }\n\n  // Unlock the mutex and signal one waiter.\n  template <typename Lock>\n  void unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    bool have_waiters = (state_ > 1);\n    lock.unlock();\n    if (have_waiters)\n      ::pthread_cond_signal(&cond_); // Ignore EINVAL.\n  }\n\n  // If there's a waiter, unlock the mutex and signal it.\n  template <typename Lock>\n  bool maybe_unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    if (state_ > 1)\n    {\n      lock.unlock();\n      ::pthread_cond_signal(&cond_); // Ignore EINVAL.\n      return true;\n    }\n    return false;\n  }\n\n  // Reset the event.\n  template <typename Lock>\n  void clear(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    state_ &= ~std::size_t(1);\n  }\n\n  // Wait for the event to become signalled.\n  template <typename Lock>\n  void wait(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    while ((state_ & 1) == 0)\n    {\n      state_ += 2;\n      ::pthread_cond_wait(&cond_, &lock.mutex().mutex_); // Ignore EINVAL.\n      state_ -= 2;\n    }\n  }\n\n  // Timed wait for the event to become signalled.\n  template <typename Lock>\n  bool wait_for_usec(Lock& lock, long usec)\n  {\n    ASIO_ASSERT(lock.locked());\n    if ((state_ & 1) == 0)\n    {\n      state_ += 2;\n      timespec ts;\n#if (defined(__MACH__) && defined(__APPLE__)) \\\n      || (defined(__ANDROID__) && (__ANDROID_API__ < 21) \\\n          && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE))\n      ts.tv_sec = usec / 1000000;\n      ts.tv_nsec = (usec % 1000000) * 1000;\n      ::pthread_cond_timedwait_relative_np(\n          &cond_, &lock.mutex().mutex_, &ts); // Ignore EINVAL.\n#else // (defined(__MACH__) && defined(__APPLE__))\n      // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)\n      //     && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE))\n      if (::clock_gettime(CLOCK_MONOTONIC, &ts) == 0)\n      {\n        ts.tv_sec += usec / 1000000;\n        ts.tv_nsec += (usec % 1000000) * 1000;\n        ts.tv_sec += ts.tv_nsec / 1000000000;\n        ts.tv_nsec = ts.tv_nsec % 1000000000;\n        ::pthread_cond_timedwait(&cond_,\n            &lock.mutex().mutex_, &ts); // Ignore EINVAL.\n      }\n#endif // (defined(__MACH__) && defined(__APPLE__))\n       // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)\n       //     && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE))\n      state_ -= 2;\n    }\n    return (state_ & 1) != 0;\n  }\n\nprivate:\n  ::pthread_cond_t cond_;\n  std::size_t state_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/posix_event.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_fd_set_adapter.hpp",
    "content": "//\n// detail/posix_fd_set_adapter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP\n#define ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(__CYGWIN__) \\\n  && !defined(ASIO_WINDOWS_RUNTIME)\n\n#include <cstring>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/reactor_op_queue.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.\nclass posix_fd_set_adapter : noncopyable\n{\npublic:\n  posix_fd_set_adapter()\n    : max_descriptor_(invalid_socket)\n  {\n    using namespace std; // Needed for memset on Solaris.\n    FD_ZERO(&fd_set_);\n  }\n\n  void reset()\n  {\n    using namespace std; // Needed for memset on Solaris.\n    FD_ZERO(&fd_set_);\n  }\n\n  bool set(socket_type descriptor)\n  {\n    if (descriptor < (socket_type)FD_SETSIZE)\n    {\n      if (max_descriptor_ == invalid_socket || descriptor > max_descriptor_)\n        max_descriptor_ = descriptor;\n      FD_SET(descriptor, &fd_set_);\n      return true;\n    }\n    return false;\n  }\n\n  void set(reactor_op_queue<socket_type>& operations, op_queue<operation>& ops)\n  {\n    reactor_op_queue<socket_type>::iterator i = operations.begin();\n    while (i != operations.end())\n    {\n      reactor_op_queue<socket_type>::iterator op_iter = i++;\n      if (!set(op_iter->first))\n      {\n        asio::error_code ec(error::fd_set_failure);\n        operations.cancel_operations(op_iter, ops, ec);\n      }\n    }\n  }\n\n  bool is_set(socket_type descriptor) const\n  {\n    return FD_ISSET(descriptor, &fd_set_) != 0;\n  }\n\n  operator fd_set*()\n  {\n    return &fd_set_;\n  }\n\n  socket_type max_descriptor() const\n  {\n    return max_descriptor_;\n  }\n\n  void perform(reactor_op_queue<socket_type>& operations,\n      op_queue<operation>& ops) const\n  {\n    reactor_op_queue<socket_type>::iterator i = operations.begin();\n    while (i != operations.end())\n    {\n      reactor_op_queue<socket_type>::iterator op_iter = i++;\n      if (is_set(op_iter->first))\n        operations.perform_operations(op_iter, ops);\n    }\n  }\n\nprivate:\n  mutable fd_set fd_set_;\n  socket_type max_descriptor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS)\n       // && !defined(__CYGWIN__)\n       // && !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_global.hpp",
    "content": "//\n// detail/posix_global.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_GLOBAL_HPP\n#define ASIO_DETAIL_POSIX_GLOBAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <exception>\n#include <pthread.h>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct posix_global_impl\n{\n  // Helper function to perform initialisation.\n  static void do_init()\n  {\n    instance_.static_ptr_ = instance_.ptr_ = new T;\n  }\n\n  // Destructor automatically cleans up the global.\n  ~posix_global_impl()\n  {\n    delete static_ptr_;\n  }\n\n  static ::pthread_once_t init_once_;\n  static T* static_ptr_;\n  static posix_global_impl instance_;\n  T* ptr_;\n};\n\ntemplate <typename T>\n::pthread_once_t posix_global_impl<T>::init_once_ = PTHREAD_ONCE_INIT;\n\ntemplate <typename T>\nT* posix_global_impl<T>::static_ptr_ = 0;\n\ntemplate <typename T>\nposix_global_impl<T> posix_global_impl<T>::instance_;\n\ntemplate <typename T>\nT& posix_global()\n{\n  int result = ::pthread_once(\n      &posix_global_impl<T>::init_once_,\n      &posix_global_impl<T>::do_init);\n\n  if (result != 0)\n    std::terminate();\n\n  return *posix_global_impl<T>::instance_.ptr_;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_GLOBAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_mutex.hpp",
    "content": "//\n// detail/posix_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_MUTEX_HPP\n#define ASIO_DETAIL_POSIX_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <pthread.h>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass posix_event;\n\nclass posix_mutex\n  : private noncopyable\n{\npublic:\n  typedef asio::detail::scoped_lock<posix_mutex> scoped_lock;\n\n  // Constructor.\n  ASIO_DECL posix_mutex();\n\n  // Destructor.\n  ~posix_mutex()\n  {\n    ::pthread_mutex_destroy(&mutex_); // Ignore EBUSY.\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL.\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL.\n  }\n\nprivate:\n  friend class posix_event;\n  ::pthread_mutex_t mutex_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/posix_mutex.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_signal_blocker.hpp",
    "content": "//\n// detail/posix_signal_blocker.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP\n#define ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <csignal>\n#include <pthread.h>\n#include <signal.h>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass posix_signal_blocker\n  : private noncopyable\n{\npublic:\n  // Constructor blocks all signals for the calling thread.\n  posix_signal_blocker()\n    : blocked_(false)\n  {\n    sigset_t new_mask;\n    sigfillset(&new_mask);\n    blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0);\n  }\n\n  // Destructor restores the previous signal mask.\n  ~posix_signal_blocker()\n  {\n    if (blocked_)\n      pthread_sigmask(SIG_SETMASK, &old_mask_, 0);\n  }\n\n  // Block all signals for the calling thread.\n  void block()\n  {\n    if (!blocked_)\n    {\n      sigset_t new_mask;\n      sigfillset(&new_mask);\n      blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0);\n    }\n  }\n\n  // Restore the previous signal mask.\n  void unblock()\n  {\n    if (blocked_)\n      blocked_ = (pthread_sigmask(SIG_SETMASK, &old_mask_, 0) != 0);\n  }\n\nprivate:\n  // Have signals been blocked.\n  bool blocked_;\n\n  // The previous signal mask.\n  sigset_t old_mask_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_static_mutex.hpp",
    "content": "//\n// detail/posix_static_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP\n#define ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <pthread.h>\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct posix_static_mutex\n{\n  typedef asio::detail::scoped_lock<posix_static_mutex> scoped_lock;\n\n  // Initialise the mutex.\n  void init()\n  {\n    // Nothing to do.\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL.\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL.\n  }\n\n  ::pthread_mutex_t mutex_;\n};\n\n#define ASIO_POSIX_STATIC_MUTEX_INIT { PTHREAD_MUTEX_INITIALIZER }\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_thread.hpp",
    "content": "//\n// detail/posix_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_THREAD_HPP\n#define ASIO_DETAIL_POSIX_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <cstddef>\n#include <pthread.h>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nextern \"C\"\n{\n  ASIO_DECL void* asio_detail_posix_thread_function(void* arg);\n}\n\nclass posix_thread\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  posix_thread(Function f, unsigned int = 0)\n    : joined_(false)\n  {\n    start_thread(new func<Function>(f));\n  }\n\n  // Destructor.\n  ASIO_DECL ~posix_thread();\n\n  // Wait for the thread to exit.\n  ASIO_DECL void join();\n\n  // Get number of CPUs.\n  ASIO_DECL static std::size_t hardware_concurrency();\n\nprivate:\n  friend void* asio_detail_posix_thread_function(void* arg);\n\n  class func_base\n  {\n  public:\n    virtual ~func_base() {}\n    virtual void run() = 0;\n  };\n\n  struct auto_func_base_ptr\n  {\n    func_base* ptr;\n    ~auto_func_base_ptr() { delete ptr; }\n  };\n\n  template <typename Function>\n  class func\n    : public func_base\n  {\n  public:\n    func(Function f)\n      : f_(f)\n    {\n    }\n\n    virtual void run()\n    {\n      f_();\n    }\n\n  private:\n    Function f_;\n  };\n\n  ASIO_DECL void start_thread(func_base* arg);\n\n  ::pthread_t thread_;\n  bool joined_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/posix_thread.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/posix_tss_ptr.hpp",
    "content": "//\n// detail/posix_tss_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP\n#define ASIO_DETAIL_POSIX_TSS_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_PTHREADS)\n\n#include <pthread.h>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper function to create thread-specific storage.\nASIO_DECL void posix_tss_ptr_create(pthread_key_t& key);\n\ntemplate <typename T>\nclass posix_tss_ptr\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  posix_tss_ptr()\n  {\n    posix_tss_ptr_create(tss_key_);\n  }\n\n  // Destructor.\n  ~posix_tss_ptr()\n  {\n    ::pthread_key_delete(tss_key_);\n  }\n\n  // Get the value.\n  operator T*() const\n  {\n    return static_cast<T*>(::pthread_getspecific(tss_key_));\n  }\n\n  // Set the value.\n  void operator=(T* value)\n  {\n    ::pthread_setspecific(tss_key_, value);\n  }\n\nprivate:\n  // Thread-specific storage to allow unlocked access to determine whether a\n  // thread is a member of the pool.\n  pthread_key_t tss_key_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/posix_tss_ptr.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_PTHREADS)\n\n#endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/push_options.hpp",
    "content": "//\n// detail/push_options.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// No header guard\n\n#if defined(__COMO__)\n\n// Comeau C++\n\n#elif defined(__DMC__)\n\n// Digital Mars C++\n\n#elif defined(__INTEL_COMPILER) || defined(__ICL) \\\n  || defined(__ICC) || defined(__ECC)\n\n// Intel C++\n\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility push (default)\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n\n#elif defined(__clang__)\n\n// Clang\n\n# if defined(__OBJC__)\n#  if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1)\n#   if !defined(ASIO_DISABLE_OBJC_WORKAROUND)\n#    if !defined(Protocol) && !defined(id)\n#     define Protocol cpp_Protocol\n#     define id cpp_id\n#     define ASIO_OBJC_WORKAROUND\n#    endif\n#   endif\n#  endif\n# endif\n\n# if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility push (default)\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n\n#elif defined(__GNUC__)\n\n// GNU C++\n\n# if defined(__MINGW32__) || defined(__CYGWIN__)\n#  pragma pack (push, 8)\n# endif\n\n# if defined(__OBJC__)\n#  if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1)\n#   if !defined(ASIO_DISABLE_OBJC_WORKAROUND)\n#    if !defined(Protocol) && !defined(id)\n#     define Protocol cpp_Protocol\n#     define id cpp_id\n#     define ASIO_OBJC_WORKAROUND\n#    endif\n#   endif\n#  endif\n# endif\n\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  if !defined(ASIO_DISABLE_VISIBILITY)\n#   pragma GCC visibility push (default)\n#  endif // !defined(ASIO_DISABLE_VISIBILITY)\n# endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n\n# if (__GNUC__ >= 7)\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wimplicit-fallthrough\"\n# endif // (__GNUC__ >= 7)\n\n#elif defined(__KCC)\n\n// Kai C++\n\n#elif defined(__sgi)\n\n// SGI MIPSpro C++\n\n#elif defined(__DECCXX)\n\n// Compaq Tru64 Unix cxx\n\n#elif defined(__ghs)\n\n// Greenhills C++\n\n#elif defined(__BORLANDC__)\n\n// Borland C++\n\n# pragma option push -a8 -b -Ve- -Vx- -w-inl -vi-\n# pragma nopushoptwarn\n# pragma nopackwarning\n# if !defined(__MT__)\n#  error Multithreaded RTL must be selected.\n# endif // !defined(__MT__)\n\n#elif defined(__MWERKS__)\n\n// Metrowerks CodeWarrior\n\n#elif defined(__SUNPRO_CC)\n\n// Sun Workshop Compiler C++\n\n#elif defined(__HP_aCC)\n\n// HP aCC\n\n#elif defined(__MRC__) || defined(__SC__)\n\n// MPW MrCpp or SCpp\n\n#elif defined(__IBMCPP__)\n\n// IBM Visual Age\n\n#elif defined(_MSC_VER)\n\n// Microsoft Visual C++\n//\n// Must remain the last #elif since some other vendors (Metrowerks, for example)\n// also #define _MSC_VER\n\n# pragma warning (disable:4103)\n# pragma warning (push)\n# pragma warning (disable:4127)\n# pragma warning (disable:4180)\n# pragma warning (disable:4244)\n# pragma warning (disable:4355)\n# pragma warning (disable:4510)\n# pragma warning (disable:4512)\n# pragma warning (disable:4610)\n# pragma warning (disable:4675)\n# if (_MSC_VER < 1600)\n// Visual Studio 2008 generates spurious warnings about unused parameters.\n#  pragma warning (disable:4100)\n# endif // (_MSC_VER < 1600)\n# if defined(_M_IX86) && defined(_Wp64)\n// The /Wp64 option is broken. If you want to check 64 bit portability, use a\n// 64 bit compiler!\n#  pragma warning (disable:4311)\n#  pragma warning (disable:4312)\n# endif // defined(_M_IX86) && defined(_Wp64)\n# pragma pack (push, 8)\n// Note that if the /Og optimisation flag is enabled with MSVC6, the compiler\n// has a tendency to incorrectly optimise away some calls to member template\n// functions, even though those functions contain code that should not be\n// optimised away! Therefore we will always disable this optimisation option\n// for the MSVC6 compiler.\n# if (_MSC_VER < 1300)\n#  pragma optimize (\"g\", off)\n# endif\n# if !defined(_MT)\n#  error Multithreaded RTL must be selected.\n# endif // !defined(_MT)\n\n# if defined(__cplusplus_cli) || defined(__cplusplus_winrt)\n#  if !defined(ASIO_DISABLE_CLR_WORKAROUND)\n#   if !defined(generic)\n#    define generic cpp_generic\n#    define ASIO_CLR_WORKAROUND\n#   endif\n#  endif\n# endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_descriptor_service.hpp",
    "content": "//\n// detail/reactive_descriptor_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP\n#define ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/descriptor_ops.hpp\"\n#include \"asio/detail/descriptor_read_op.hpp\"\n#include \"asio/detail/descriptor_write_op.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/reactive_null_buffers_op.hpp\"\n#include \"asio/detail/reactive_wait_op.hpp\"\n#include \"asio/detail/reactor.hpp\"\n#include \"asio/posix/descriptor_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass reactive_descriptor_service :\n  public execution_context_service_base<reactive_descriptor_service>\n{\npublic:\n  // The native type of a descriptor.\n  typedef int native_handle_type;\n\n  // The implementation type of the descriptor.\n  class implementation_type\n    : private asio::detail::noncopyable\n  {\n  public:\n    // Default constructor.\n    implementation_type()\n      : descriptor_(-1),\n        state_(0)\n    {\n    }\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class reactive_descriptor_service;\n\n    // The native descriptor representation.\n    int descriptor_;\n\n    // The current state of the descriptor.\n    descriptor_ops::state_type state_;\n\n    // Per-descriptor data used by the reactor.\n    reactor::per_descriptor_data reactor_data_;\n  };\n\n  // Constructor.\n  ASIO_DECL reactive_descriptor_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new descriptor implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Move-construct a new descriptor implementation.\n  ASIO_DECL void move_construct(implementation_type& impl,\n      implementation_type& other_impl);\n\n  // Move-assign from another descriptor implementation.\n  ASIO_DECL void move_assign(implementation_type& impl,\n      reactive_descriptor_service& other_service,\n      implementation_type& other_impl);\n\n  // Destroy a descriptor implementation.\n  ASIO_DECL void destroy(implementation_type& impl);\n\n  // Assign a native descriptor to a descriptor implementation.\n  ASIO_DECL asio::error_code assign(implementation_type& impl,\n      const native_handle_type& native_descriptor,\n      asio::error_code& ec);\n\n  // Determine whether the descriptor is open.\n  bool is_open(const implementation_type& impl) const\n  {\n    return impl.descriptor_ != -1;\n  }\n\n  // Destroy a descriptor implementation.\n  ASIO_DECL asio::error_code close(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Get the native descriptor representation.\n  native_handle_type native_handle(const implementation_type& impl) const\n  {\n    return impl.descriptor_;\n  }\n\n  // Release ownership of the native descriptor representation.\n  ASIO_DECL native_handle_type release(implementation_type& impl);\n\n  // Cancel all operations associated with the descriptor.\n  ASIO_DECL asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Perform an IO control command on the descriptor.\n  template <typename IO_Control_Command>\n  asio::error_code io_control(implementation_type& impl,\n      IO_Control_Command& command, asio::error_code& ec)\n  {\n    descriptor_ops::ioctl(impl.descriptor_, impl.state_,\n        command.name(), static_cast<ioctl_arg_type*>(command.data()), ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the descriptor.\n  bool non_blocking(const implementation_type& impl) const\n  {\n    return (impl.state_ & descriptor_ops::user_set_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the descriptor.\n  asio::error_code non_blocking(implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    descriptor_ops::set_user_non_blocking(\n        impl.descriptor_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the native descriptor implementation.\n  bool native_non_blocking(const implementation_type& impl) const\n  {\n    return (impl.state_ & descriptor_ops::internal_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the native descriptor implementation.\n  asio::error_code native_non_blocking(implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    descriptor_ops::set_internal_non_blocking(\n        impl.descriptor_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Wait for the descriptor to become ready to read, ready to write, or to have\n  // pending error conditions.\n  asio::error_code wait(implementation_type& impl,\n      posix::descriptor_base::wait_type w, asio::error_code& ec)\n  {\n    switch (w)\n    {\n    case posix::descriptor_base::wait_read:\n      descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);\n      break;\n    case posix::descriptor_base::wait_write:\n      descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);\n      break;\n    case posix::descriptor_base::wait_error:\n      descriptor_ops::poll_error(impl.descriptor_, impl.state_, ec);\n      break;\n    default:\n      ec = asio::error::invalid_argument;\n      break;\n    }\n\n    return ec;\n  }\n\n  // Asynchronously wait for the descriptor to become ready to read, ready to\n  // write, or to have pending error conditions.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(implementation_type& impl,\n      posix::descriptor_base::wait_type w,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_wait_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"descriptor\",\n          &impl, impl.descriptor_, \"async_wait\"));\n\n    int op_type;\n    switch (w)\n    {\n    case posix::descriptor_base::wait_read:\n        op_type = reactor::read_op;\n        break;\n    case posix::descriptor_base::wait_write:\n        op_type = reactor::write_op;\n        break;\n    case posix::descriptor_base::wait_error:\n        op_type = reactor::except_op;\n        break;\n      default:\n        p.p->ec_ = asio::error::invalid_argument;\n        reactor_.post_immediate_completion(p.p, is_continuation);\n        p.v = p.p = 0;\n        return;\n    }\n\n    start_op(impl, op_type, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Write some data to the descriptor.\n  template <typename ConstBufferSequence>\n  size_t write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    return descriptor_ops::sync_write(impl.descriptor_, impl.state_,\n        bufs.buffers(), bufs.count(), bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be written without blocking.\n  size_t write_some(implementation_type& impl,\n      const null_buffers&, asio::error_code& ec)\n  {\n    // Wait for descriptor to become ready.\n    descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous write. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef descriptor_write_op<ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.descriptor_, buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"descriptor\",\n          &impl, impl.descriptor_, \"async_write_some\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, true,\n        buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::all_empty(buffers));\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be written without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const null_buffers&, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"descriptor\",\n          &impl, impl.descriptor_, \"async_write_some(null_buffers)\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Read some data from the stream. Returns the number of bytes read.\n  template <typename MutableBufferSequence>\n  size_t read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    return descriptor_ops::sync_read(impl.descriptor_, impl.state_,\n        bufs.buffers(), bufs.count(), bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be read without blocking.\n  size_t read_some(implementation_type& impl,\n      const null_buffers&, asio::error_code& ec)\n  {\n    // Wait for descriptor to become ready.\n    descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous read. The buffer for the data being read must be\n  // valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef descriptor_read_op<MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.descriptor_, buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"descriptor\",\n          &impl, impl.descriptor_, \"async_read_some\"));\n\n    start_op(impl, reactor::read_op, p.p, is_continuation, true,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::all_empty(buffers));\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be read without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const null_buffers&, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"descriptor\",\n          &impl, impl.descriptor_, \"async_read_some(null_buffers)\"));\n\n    start_op(impl, reactor::read_op, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\nprivate:\n  // Start the asynchronous operation.\n  ASIO_DECL void start_op(implementation_type& impl, int op_type,\n      reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop);\n\n  // The selector that performs event demultiplexing for the service.\n  reactor& reactor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/reactive_descriptor_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_null_buffers_op.hpp",
    "content": "//\n// detail/reactive_null_buffers_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP\n#define ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass reactive_null_buffers_op : public reactor_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_null_buffers_op);\n\n  reactive_null_buffers_op(Handler& handler, const IoExecutor& io_ex)\n    : reactor_op(&reactive_null_buffers_op::do_perform,\n        &reactive_null_buffers_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static status do_perform(reactor_op*)\n  {\n    return done;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_null_buffers_op* o(static_cast<reactive_null_buffers_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_serial_port_service.hpp",
    "content": "//\n// detail/reactive_serial_port_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP\n#define ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT)\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include <string>\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/serial_port_base.hpp\"\n#include \"asio/detail/descriptor_ops.hpp\"\n#include \"asio/detail/reactive_descriptor_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Extend reactive_descriptor_service to provide serial port support.\nclass reactive_serial_port_service :\n  public execution_context_service_base<reactive_serial_port_service>\n{\npublic:\n  // The native type of a serial port.\n  typedef reactive_descriptor_service::native_handle_type native_handle_type;\n\n  // The implementation type of the serial port.\n  typedef reactive_descriptor_service::implementation_type implementation_type;\n\n  ASIO_DECL reactive_serial_port_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new serial port implementation.\n  void construct(implementation_type& impl)\n  {\n    descriptor_service_.construct(impl);\n  }\n\n  // Move-construct a new serial port implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl)\n  {\n    descriptor_service_.move_construct(impl, other_impl);\n  }\n\n  // Move-assign from another serial port implementation.\n  void move_assign(implementation_type& impl,\n      reactive_serial_port_service& other_service,\n      implementation_type& other_impl)\n  {\n    descriptor_service_.move_assign(impl,\n        other_service.descriptor_service_, other_impl);\n  }\n\n  // Destroy a serial port implementation.\n  void destroy(implementation_type& impl)\n  {\n    descriptor_service_.destroy(impl);\n  }\n\n  // Open the serial port using the specified device name.\n  ASIO_DECL asio::error_code open(implementation_type& impl,\n      const std::string& device, asio::error_code& ec);\n\n  // Assign a native descriptor to a serial port implementation.\n  asio::error_code assign(implementation_type& impl,\n      const native_handle_type& native_descriptor,\n      asio::error_code& ec)\n  {\n    return descriptor_service_.assign(impl, native_descriptor, ec);\n  }\n\n  // Determine whether the serial port is open.\n  bool is_open(const implementation_type& impl) const\n  {\n    return descriptor_service_.is_open(impl);\n  }\n\n  // Destroy a serial port implementation.\n  asio::error_code close(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    return descriptor_service_.close(impl, ec);\n  }\n\n  // Get the native serial port representation.\n  native_handle_type native_handle(implementation_type& impl)\n  {\n    return descriptor_service_.native_handle(impl);\n  }\n\n  // Cancel all operations associated with the serial port.\n  asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    return descriptor_service_.cancel(impl, ec);\n  }\n\n  // Set an option on the serial port.\n  template <typename SettableSerialPortOption>\n  asio::error_code set_option(implementation_type& impl,\n      const SettableSerialPortOption& option, asio::error_code& ec)\n  {\n    return do_set_option(impl,\n        &reactive_serial_port_service::store_option<SettableSerialPortOption>,\n        &option, ec);\n  }\n\n  // Get an option from the serial port.\n  template <typename GettableSerialPortOption>\n  asio::error_code get_option(const implementation_type& impl,\n      GettableSerialPortOption& option, asio::error_code& ec) const\n  {\n    return do_get_option(impl,\n        &reactive_serial_port_service::load_option<GettableSerialPortOption>,\n        &option, ec);\n  }\n\n  // Send a break sequence to the serial port.\n  asio::error_code send_break(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    errno = 0;\n    descriptor_ops::error_wrapper(::tcsendbreak(\n          descriptor_service_.native_handle(impl), 0), ec);\n    return ec;\n  }\n\n  // Write the given data. Returns the number of bytes sent.\n  template <typename ConstBufferSequence>\n  size_t write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    return descriptor_service_.write_some(impl, buffers, ec);\n  }\n\n  // Start an asynchronous write. The data being written must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    descriptor_service_.async_write_some(impl, buffers, handler, io_ex);\n  }\n\n  // Read some data. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    return descriptor_service_.read_some(impl, buffers, ec);\n  }\n\n  // Start an asynchronous read. The buffer for the data being received must be\n  // valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    descriptor_service_.async_read_some(impl, buffers, handler, io_ex);\n  }\n\nprivate:\n  // Function pointer type for storing a serial port option.\n  typedef asio::error_code (*store_function_type)(\n      const void*, termios&, asio::error_code&);\n\n  // Helper function template to store a serial port option.\n  template <typename SettableSerialPortOption>\n  static asio::error_code store_option(const void* option,\n      termios& storage, asio::error_code& ec)\n  {\n    static_cast<const SettableSerialPortOption*>(option)->store(storage, ec);\n    return ec;\n  }\n\n  // Helper function to set a serial port option.\n  ASIO_DECL asio::error_code do_set_option(\n      implementation_type& impl, store_function_type store,\n      const void* option, asio::error_code& ec);\n\n  // Function pointer type for loading a serial port option.\n  typedef asio::error_code (*load_function_type)(\n      void*, const termios&, asio::error_code&);\n\n  // Helper function template to load a serial port option.\n  template <typename GettableSerialPortOption>\n  static asio::error_code load_option(void* option,\n      const termios& storage, asio::error_code& ec)\n  {\n    static_cast<GettableSerialPortOption*>(option)->load(storage, ec);\n    return ec;\n  }\n\n  // Helper function to get a serial port option.\n  ASIO_DECL asio::error_code do_get_option(\n      const implementation_type& impl, load_function_type load,\n      void* option, asio::error_code& ec) const;\n\n  // The implementation used for initiating asynchronous operations.\n  reactive_descriptor_service descriptor_service_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/reactive_serial_port_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n\n#endif // ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_accept_op.hpp",
    "content": "//\n// detail/reactive_socket_accept_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Socket, typename Protocol>\nclass reactive_socket_accept_op_base : public reactor_op\n{\npublic:\n  reactive_socket_accept_op_base(socket_type socket,\n      socket_ops::state_type state, Socket& peer, const Protocol& protocol,\n      typename Protocol::endpoint* peer_endpoint, func_type complete_func)\n    : reactor_op(&reactive_socket_accept_op_base::do_perform, complete_func),\n      socket_(socket),\n      state_(state),\n      peer_(peer),\n      protocol_(protocol),\n      peer_endpoint_(peer_endpoint),\n      addrlen_(peer_endpoint ? peer_endpoint->capacity() : 0)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_accept_op_base* o(\n        static_cast<reactive_socket_accept_op_base*>(base));\n\n    socket_type new_socket = invalid_socket;\n    status result = socket_ops::non_blocking_accept(o->socket_,\n        o->state_, o->peer_endpoint_ ? o->peer_endpoint_->data() : 0,\n        o->peer_endpoint_ ? &o->addrlen_ : 0, o->ec_, new_socket)\n    ? done : not_done;\n    o->new_socket_.reset(new_socket);\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_accept\", o->ec_));\n\n    return result;\n  }\n\n  void do_assign()\n  {\n    if (new_socket_.get() != invalid_socket)\n    {\n      if (peer_endpoint_)\n        peer_endpoint_->resize(addrlen_);\n      peer_.assign(protocol_, new_socket_.get(), ec_);\n      if (!ec_)\n        new_socket_.release();\n    }\n  }\n\nprivate:\n  socket_type socket_;\n  socket_ops::state_type state_;\n  socket_holder new_socket_;\n  Socket& peer_;\n  Protocol protocol_;\n  typename Protocol::endpoint* peer_endpoint_;\n  std::size_t addrlen_;\n};\n\ntemplate <typename Socket, typename Protocol,\n    typename Handler, typename IoExecutor>\nclass reactive_socket_accept_op :\n  public reactive_socket_accept_op_base<Socket, Protocol>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_accept_op);\n\n  reactive_socket_accept_op(socket_type socket,\n      socket_ops::state_type state, Socket& peer, const Protocol& protocol,\n      typename Protocol::endpoint* peer_endpoint, Handler& handler,\n      const IoExecutor& io_ex)\n    : reactive_socket_accept_op_base<Socket, Protocol>(socket, state, peer,\n        protocol, peer_endpoint, &reactive_socket_accept_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_accept_op* o(static_cast<reactive_socket_accept_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    // On success, assign new connection to peer socket object.\n    if (owner)\n      o->do_assign();\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, o->ec_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n#if defined(ASIO_HAS_MOVE)\n\ntemplate <typename Protocol, typename PeerIoExecutor,\n    typename Handler, typename IoExecutor>\nclass reactive_socket_move_accept_op :\n  private Protocol::socket::template rebind_executor<PeerIoExecutor>::other,\n  public reactive_socket_accept_op_base<\n    typename Protocol::socket::template rebind_executor<PeerIoExecutor>::other,\n    Protocol>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_move_accept_op);\n\n  reactive_socket_move_accept_op(const PeerIoExecutor& peer_io_ex,\n      socket_type socket, socket_ops::state_type state,\n      const Protocol& protocol, typename Protocol::endpoint* peer_endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n    : peer_socket_type(peer_io_ex),\n      reactive_socket_accept_op_base<peer_socket_type, Protocol>(\n        socket, state, *this, protocol, peer_endpoint,\n        &reactive_socket_move_accept_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_move_accept_op* o(\n        static_cast<reactive_socket_move_accept_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    // On success, assign new connection to peer socket object.\n    if (owner)\n      o->do_assign();\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::move_binder2<Handler,\n      asio::error_code, peer_socket_type>\n        handler(0, ASIO_MOVE_CAST(Handler)(o->handler_), o->ec_,\n          ASIO_MOVE_CAST(peer_socket_type)(*o));\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, \"...\"));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  typedef typename Protocol::socket::template\n    rebind_executor<PeerIoExecutor>::other peer_socket_type;\n\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n#endif // defined(ASIO_HAS_MOVE)\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_connect_op.hpp",
    "content": "//\n// detail/reactive_socket_connect_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass reactive_socket_connect_op_base : public reactor_op\n{\npublic:\n  reactive_socket_connect_op_base(socket_type socket, func_type complete_func)\n    : reactor_op(&reactive_socket_connect_op_base::do_perform, complete_func),\n      socket_(socket)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_connect_op_base* o(\n        static_cast<reactive_socket_connect_op_base*>(base));\n\n    status result = socket_ops::non_blocking_connect(\n        o->socket_, o->ec_) ? done : not_done;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_connect\", o->ec_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n};\n\ntemplate <typename Handler, typename IoExecutor>\nclass reactive_socket_connect_op : public reactive_socket_connect_op_base\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_connect_op);\n\n  reactive_socket_connect_op(socket_type socket,\n      Handler& handler, const IoExecutor& io_ex)\n    : reactive_socket_connect_op_base(socket,\n        &reactive_socket_connect_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_connect_op* o\n      (static_cast<reactive_socket_connect_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, o->ec_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_recv_op.hpp",
    "content": "//\n// detail/reactive_socket_recv_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence>\nclass reactive_socket_recv_op_base : public reactor_op\n{\npublic:\n  reactive_socket_recv_op_base(socket_type socket,\n      socket_ops::state_type state, const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, func_type complete_func)\n    : reactor_op(&reactive_socket_recv_op_base::do_perform, complete_func),\n      socket_(socket),\n      state_(state),\n      buffers_(buffers),\n      flags_(flags)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_recv_op_base* o(\n        static_cast<reactive_socket_recv_op_base*>(base));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(o->buffers_);\n\n    status result = socket_ops::non_blocking_recv(o->socket_,\n        bufs.buffers(), bufs.count(), o->flags_,\n        (o->state_ & socket_ops::stream_oriented) != 0,\n        o->ec_, o->bytes_transferred_) ? done : not_done;\n\n    if (result == done)\n      if ((o->state_ & socket_ops::stream_oriented) != 0)\n        if (o->bytes_transferred_ == 0)\n          result = done_and_exhausted;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_recv\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n  socket_ops::state_type state_;\n  MutableBufferSequence buffers_;\n  socket_base::message_flags flags_;\n};\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass reactive_socket_recv_op :\n  public reactive_socket_recv_op_base<MutableBufferSequence>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_recv_op);\n\n  reactive_socket_recv_op(socket_type socket, socket_ops::state_type state,\n      const MutableBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n    : reactive_socket_recv_op_base<MutableBufferSequence>(socket, state,\n        buffers, flags, &reactive_socket_recv_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_recv_op* o(static_cast<reactive_socket_recv_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_recvfrom_op.hpp",
    "content": "//\n// detail/reactive_socket_recvfrom_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Endpoint>\nclass reactive_socket_recvfrom_op_base : public reactor_op\n{\npublic:\n  reactive_socket_recvfrom_op_base(socket_type socket, int protocol_type,\n      const MutableBufferSequence& buffers, Endpoint& endpoint,\n      socket_base::message_flags flags, func_type complete_func)\n    : reactor_op(&reactive_socket_recvfrom_op_base::do_perform, complete_func),\n      socket_(socket),\n      protocol_type_(protocol_type),\n      buffers_(buffers),\n      sender_endpoint_(endpoint),\n      flags_(flags)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_recvfrom_op_base* o(\n        static_cast<reactive_socket_recvfrom_op_base*>(base));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(o->buffers_);\n\n    std::size_t addr_len = o->sender_endpoint_.capacity();\n    status result = socket_ops::non_blocking_recvfrom(o->socket_,\n        bufs.buffers(), bufs.count(), o->flags_,\n        o->sender_endpoint_.data(), &addr_len,\n        o->ec_, o->bytes_transferred_) ? done : not_done;\n\n    if (result && !o->ec_)\n      o->sender_endpoint_.resize(addr_len);\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_recvfrom\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n  int protocol_type_;\n  MutableBufferSequence buffers_;\n  Endpoint& sender_endpoint_;\n  socket_base::message_flags flags_;\n};\n\ntemplate <typename MutableBufferSequence, typename Endpoint,\n    typename Handler, typename IoExecutor>\nclass reactive_socket_recvfrom_op :\n  public reactive_socket_recvfrom_op_base<MutableBufferSequence, Endpoint>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvfrom_op);\n\n  reactive_socket_recvfrom_op(socket_type socket, int protocol_type,\n      const MutableBufferSequence& buffers, Endpoint& endpoint,\n      socket_base::message_flags flags, Handler& handler,\n      const IoExecutor& io_ex)\n    : reactive_socket_recvfrom_op_base<MutableBufferSequence, Endpoint>(\n        socket, protocol_type, buffers, endpoint, flags,\n        &reactive_socket_recvfrom_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_recvfrom_op* o(\n        static_cast<reactive_socket_recvfrom_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_recvmsg_op.hpp",
    "content": "//\n// detail/reactive_socket_recvmsg_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/socket_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence>\nclass reactive_socket_recvmsg_op_base : public reactor_op\n{\npublic:\n  reactive_socket_recvmsg_op_base(socket_type socket,\n      const MutableBufferSequence& buffers, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, func_type complete_func)\n    : reactor_op(&reactive_socket_recvmsg_op_base::do_perform, complete_func),\n      socket_(socket),\n      buffers_(buffers),\n      in_flags_(in_flags),\n      out_flags_(out_flags)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_recvmsg_op_base* o(\n        static_cast<reactive_socket_recvmsg_op_base*>(base));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(o->buffers_);\n\n    status result = socket_ops::non_blocking_recvmsg(o->socket_,\n        bufs.buffers(), bufs.count(),\n        o->in_flags_, o->out_flags_,\n        o->ec_, o->bytes_transferred_) ? done : not_done;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_recvmsg\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n  MutableBufferSequence buffers_;\n  socket_base::message_flags in_flags_;\n  socket_base::message_flags& out_flags_;\n};\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass reactive_socket_recvmsg_op :\n  public reactive_socket_recvmsg_op_base<MutableBufferSequence>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvmsg_op);\n\n  reactive_socket_recvmsg_op(socket_type socket,\n      const MutableBufferSequence& buffers, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, Handler& handler,\n      const IoExecutor& io_ex)\n    : reactive_socket_recvmsg_op_base<MutableBufferSequence>(socket, buffers,\n        in_flags, out_flags, &reactive_socket_recvmsg_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_recvmsg_op* o(\n        static_cast<reactive_socket_recvmsg_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_send_op.hpp",
    "content": "//\n// detail/reactive_socket_send_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence>\nclass reactive_socket_send_op_base : public reactor_op\n{\npublic:\n  reactive_socket_send_op_base(socket_type socket,\n      socket_ops::state_type state, const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, func_type complete_func)\n    : reactor_op(&reactive_socket_send_op_base::do_perform, complete_func),\n      socket_(socket),\n      state_(state),\n      buffers_(buffers),\n      flags_(flags)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_send_op_base* o(\n        static_cast<reactive_socket_send_op_base*>(base));\n\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(o->buffers_);\n\n    status result = socket_ops::non_blocking_send(o->socket_,\n          bufs.buffers(), bufs.count(), o->flags_,\n          o->ec_, o->bytes_transferred_) ? done : not_done;\n\n    if (result == done)\n      if ((o->state_ & socket_ops::stream_oriented) != 0)\n        if (o->bytes_transferred_ < bufs.total_size())\n          result = done_and_exhausted;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_send\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n  socket_ops::state_type state_;\n  ConstBufferSequence buffers_;\n  socket_base::message_flags flags_;\n};\n\ntemplate <typename ConstBufferSequence, typename Handler, typename IoExecutor>\nclass reactive_socket_send_op :\n  public reactive_socket_send_op_base<ConstBufferSequence>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_send_op);\n\n  reactive_socket_send_op(socket_type socket, socket_ops::state_type state,\n      const ConstBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n    : reactive_socket_send_op_base<ConstBufferSequence>(socket,\n        state, buffers, flags, &reactive_socket_send_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_send_op* o(static_cast<reactive_socket_send_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_sendto_op.hpp",
    "content": "//\n// detail/reactive_socket_sendto_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence, typename Endpoint>\nclass reactive_socket_sendto_op_base : public reactor_op\n{\npublic:\n  reactive_socket_sendto_op_base(socket_type socket,\n      const ConstBufferSequence& buffers, const Endpoint& endpoint,\n      socket_base::message_flags flags, func_type complete_func)\n    : reactor_op(&reactive_socket_sendto_op_base::do_perform, complete_func),\n      socket_(socket),\n      buffers_(buffers),\n      destination_(endpoint),\n      flags_(flags)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    reactive_socket_sendto_op_base* o(\n        static_cast<reactive_socket_sendto_op_base*>(base));\n\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(o->buffers_);\n\n    status result = socket_ops::non_blocking_sendto(o->socket_,\n          bufs.buffers(), bufs.count(), o->flags_,\n          o->destination_.data(), o->destination_.size(),\n          o->ec_, o->bytes_transferred_) ? done : not_done;\n\n    ASIO_HANDLER_REACTOR_OPERATION((*o, \"non_blocking_sendto\",\n          o->ec_, o->bytes_transferred_));\n\n    return result;\n  }\n\nprivate:\n  socket_type socket_;\n  ConstBufferSequence buffers_;\n  Endpoint destination_;\n  socket_base::message_flags flags_;\n};\n\ntemplate <typename ConstBufferSequence, typename Endpoint,\n    typename Handler, typename IoExecutor>\nclass reactive_socket_sendto_op :\n  public reactive_socket_sendto_op_base<ConstBufferSequence, Endpoint>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_socket_sendto_op);\n\n  reactive_socket_sendto_op(socket_type socket,\n      const ConstBufferSequence& buffers, const Endpoint& endpoint,\n      socket_base::message_flags flags, Handler& handler,\n      const IoExecutor& io_ex)\n    : reactive_socket_sendto_op_base<ConstBufferSequence, Endpoint>(socket,\n        buffers, endpoint, flags, &reactive_socket_sendto_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_socket_sendto_op* o(static_cast<reactive_socket_sendto_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->bytes_transferred_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_service.hpp",
    "content": "//\n// detail/reactive_socket_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_IOCP)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/reactive_null_buffers_op.hpp\"\n#include \"asio/detail/reactive_socket_accept_op.hpp\"\n#include \"asio/detail/reactive_socket_connect_op.hpp\"\n#include \"asio/detail/reactive_socket_recvfrom_op.hpp\"\n#include \"asio/detail/reactive_socket_sendto_op.hpp\"\n#include \"asio/detail/reactive_socket_service_base.hpp\"\n#include \"asio/detail/reactor.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass reactive_socket_service :\n  public execution_context_service_base<reactive_socket_service<Protocol> >,\n  public reactive_socket_service_base\n{\npublic:\n  // The protocol type.\n  typedef Protocol protocol_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The native type of a socket.\n  typedef socket_type native_handle_type;\n\n  // The implementation type of the socket.\n  struct implementation_type :\n    reactive_socket_service_base::base_implementation_type\n  {\n    // Default constructor.\n    implementation_type()\n      : protocol_(endpoint_type().protocol())\n    {\n    }\n\n    // The protocol associated with the socket.\n    protocol_type protocol_;\n  };\n\n  // Constructor.\n  reactive_socket_service(execution_context& context)\n    : execution_context_service_base<\n        reactive_socket_service<Protocol> >(context),\n      reactive_socket_service_base(context)\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n    this->base_shutdown();\n  }\n\n  // Move-construct a new socket implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl) ASIO_NOEXCEPT\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n  }\n\n  // Move-assign from another socket implementation.\n  void move_assign(implementation_type& impl,\n      reactive_socket_service_base& other_service,\n      implementation_type& other_impl)\n  {\n    this->base_move_assign(impl, other_service, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n  }\n\n  // Move-construct a new socket implementation from another protocol type.\n  template <typename Protocol1>\n  void converting_move_construct(implementation_type& impl,\n      reactive_socket_service<Protocol1>&,\n      typename reactive_socket_service<\n        Protocol1>::implementation_type& other_impl)\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = protocol_type(other_impl.protocol_);\n    other_impl.protocol_ = typename Protocol1::endpoint().protocol();\n  }\n\n  // Open a new socket implementation.\n  asio::error_code open(implementation_type& impl,\n      const protocol_type& protocol, asio::error_code& ec)\n  {\n    if (!do_open(impl, protocol.family(),\n          protocol.type(), protocol.protocol(), ec))\n      impl.protocol_ = protocol;\n    return ec;\n  }\n\n  // Assign a native socket to a socket implementation.\n  asio::error_code assign(implementation_type& impl,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      asio::error_code& ec)\n  {\n    if (!do_assign(impl, protocol.type(), native_socket, ec))\n      impl.protocol_ = protocol;\n    return ec;\n  }\n\n  // Get the native socket representation.\n  native_handle_type native_handle(implementation_type& impl)\n  {\n    return impl.socket_;\n  }\n\n  // Bind the socket to the specified local endpoint.\n  asio::error_code bind(implementation_type& impl,\n      const endpoint_type& endpoint, asio::error_code& ec)\n  {\n    socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec);\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code set_option(implementation_type& impl,\n      const Option& option, asio::error_code& ec)\n  {\n    socket_ops::setsockopt(impl.socket_, impl.state_,\n        option.level(impl.protocol_), option.name(impl.protocol_),\n        option.data(impl.protocol_), option.size(impl.protocol_), ec);\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code get_option(const implementation_type& impl,\n      Option& option, asio::error_code& ec) const\n  {\n    std::size_t size = option.size(impl.protocol_);\n    socket_ops::getsockopt(impl.socket_, impl.state_,\n        option.level(impl.protocol_), option.name(impl.protocol_),\n        option.data(impl.protocol_), &size, ec);\n    if (!ec)\n      option.resize(impl.protocol_, size);\n    return ec;\n  }\n\n  // Get the local endpoint.\n  endpoint_type local_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint;\n    std::size_t addr_len = endpoint.capacity();\n    if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec))\n      return endpoint_type();\n    endpoint.resize(addr_len);\n    return endpoint;\n  }\n\n  // Get the remote endpoint.\n  endpoint_type remote_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint;\n    std::size_t addr_len = endpoint.capacity();\n    if (socket_ops::getpeername(impl.socket_,\n          endpoint.data(), &addr_len, false, ec))\n      return endpoint_type();\n    endpoint.resize(addr_len);\n    return endpoint;\n  }\n\n  // Disable sends or receives on the socket.\n  asio::error_code shutdown(base_implementation_type& impl,\n      socket_base::shutdown_type what, asio::error_code& ec)\n  {\n    socket_ops::shutdown(impl.socket_, what, ec);\n    return ec;\n  }\n\n  // Send a datagram to the specified endpoint. Returns the number of bytes\n  // sent.\n  template <typename ConstBufferSequence>\n  size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_sendto(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags,\n        destination.data(), destination.size(), ec);\n  }\n\n  // Wait until data can be sent without blocking.\n  size_t send_to(implementation_type& impl, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_sendto_op<ConstBufferSequence,\n        endpoint_type, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, buffers,\n        destination, flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send_to\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, true, false);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type& impl, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send_to(null_buffers)\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Receive a datagram with the endpoint of the sender. Returns the number of\n  // bytes received.\n  template <typename MutableBufferSequence>\n  size_t receive_from(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    std::size_t addr_len = sender_endpoint.capacity();\n    std::size_t bytes_recvd = socket_ops::sync_recvfrom(\n        impl.socket_, impl.state_, bufs.buffers(), bufs.count(),\n        flags, sender_endpoint.data(), &addr_len, ec);\n\n    if (!ec)\n      sender_endpoint.resize(addr_len);\n\n    return bytes_recvd;\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive_from(implementation_type& impl, const null_buffers&,\n      endpoint_type& sender_endpoint, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    // Reset endpoint since it can be given no sensible value at this time.\n    sender_endpoint = endpoint_type();\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received and\n  // the sender_endpoint object must both be valid for the lifetime of the\n  // asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type& impl,\n      const MutableBufferSequence& buffers, endpoint_type& sender_endpoint,\n      socket_base::message_flags flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_recvfrom_op<MutableBufferSequence,\n        endpoint_type, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    int protocol = impl.protocol_.type();\n    p.p = new (p.v) op(impl.socket_, protocol, buffers,\n        sender_endpoint, flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_from\"));\n\n    start_op(impl,\n        (flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation, true, false);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type& impl, const null_buffers&,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_from(null_buffers)\"));\n\n    // Reset endpoint since it can be given no sensible value at this time.\n    sender_endpoint = endpoint_type();\n\n    start_op(impl,\n        (flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Accept a new connection.\n  template <typename Socket>\n  asio::error_code accept(implementation_type& impl,\n      Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec)\n  {\n    // We cannot accept a socket that is already open.\n    if (peer.is_open())\n    {\n      ec = asio::error::already_open;\n      return ec;\n    }\n\n    std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0;\n    socket_holder new_socket(socket_ops::sync_accept(impl.socket_,\n          impl.state_, peer_endpoint ? peer_endpoint->data() : 0,\n          peer_endpoint ? &addr_len : 0, ec));\n\n    // On success, assign new connection to peer socket object.\n    if (new_socket.get() != invalid_socket)\n    {\n      if (peer_endpoint)\n        peer_endpoint->resize(addr_len);\n      peer.assign(impl.protocol_, new_socket.get(), ec);\n      if (!ec)\n        new_socket.release();\n    }\n\n    return ec;\n  }\n\n  // Start an asynchronous accept. The peer and peer_endpoint objects must be\n  // valid until the accept's handler is invoked.\n  template <typename Socket, typename Handler, typename IoExecutor>\n  void async_accept(implementation_type& impl, Socket& peer,\n      endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_accept_op<Socket, Protocol, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, impl.state_, peer,\n        impl.protocol_, peer_endpoint, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_accept\"));\n\n    start_accept_op(impl, p.p, is_continuation, peer.is_open());\n    p.v = p.p = 0;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  // Start an asynchronous accept. The peer_endpoint object must be valid until\n  // the accept's handler is invoked.\n  template <typename PeerIoExecutor, typename Handler, typename IoExecutor>\n  void async_move_accept(implementation_type& impl,\n      const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_move_accept_op<Protocol,\n        PeerIoExecutor, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(peer_io_ex, impl.socket_, impl.state_,\n        impl.protocol_, peer_endpoint, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_accept\"));\n\n    start_accept_op(impl, p.p, is_continuation, false);\n    p.v = p.p = 0;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  // Connect the socket to the specified endpoint.\n  asio::error_code connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint, asio::error_code& ec)\n  {\n    socket_ops::sync_connect(impl.socket_,\n        peer_endpoint.data(), peer_endpoint.size(), ec);\n    return ec;\n  }\n\n  // Start an asynchronous connect.\n  template <typename Handler, typename IoExecutor>\n  void async_connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_connect_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_connect\"));\n\n    start_connect_op(impl, p.p, is_continuation,\n        peer_endpoint.data(), peer_endpoint.size());\n    p.v = p.p = 0;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_socket_service_base.hpp",
    "content": "//\n// detail/reactive_socket_service_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP\n#define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_IOCP) \\\n  && !defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactive_null_buffers_op.hpp\"\n#include \"asio/detail/reactive_socket_recv_op.hpp\"\n#include \"asio/detail/reactive_socket_recvmsg_op.hpp\"\n#include \"asio/detail/reactive_socket_send_op.hpp\"\n#include \"asio/detail/reactive_wait_op.hpp\"\n#include \"asio/detail/reactor.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass reactive_socket_service_base\n{\npublic:\n  // The native type of a socket.\n  typedef socket_type native_handle_type;\n\n  // The implementation type of the socket.\n  struct base_implementation_type\n  {\n    // The native socket representation.\n    socket_type socket_;\n\n    // The current state of the socket.\n    socket_ops::state_type state_;\n\n    // Per-descriptor data used by the reactor.\n    reactor::per_descriptor_data reactor_data_;\n  };\n\n  // Constructor.\n  ASIO_DECL reactive_socket_service_base(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void base_shutdown();\n\n  // Construct a new socket implementation.\n  ASIO_DECL void construct(base_implementation_type& impl);\n\n  // Move-construct a new socket implementation.\n  ASIO_DECL void base_move_construct(base_implementation_type& impl,\n      base_implementation_type& other_impl) ASIO_NOEXCEPT;\n\n  // Move-assign from another socket implementation.\n  ASIO_DECL void base_move_assign(base_implementation_type& impl,\n      reactive_socket_service_base& other_service,\n      base_implementation_type& other_impl);\n\n  // Destroy a socket implementation.\n  ASIO_DECL void destroy(base_implementation_type& impl);\n\n  // Determine whether the socket is open.\n  bool is_open(const base_implementation_type& impl) const\n  {\n    return impl.socket_ != invalid_socket;\n  }\n\n  // Destroy a socket implementation.\n  ASIO_DECL asio::error_code close(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Release ownership of the socket.\n  ASIO_DECL socket_type release(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Get the native socket representation.\n  native_handle_type native_handle(base_implementation_type& impl)\n  {\n    return impl.socket_;\n  }\n\n  // Cancel all operations associated with the socket.\n  ASIO_DECL asio::error_code cancel(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Determine whether the socket is at the out-of-band data mark.\n  bool at_mark(const base_implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    return socket_ops::sockatmark(impl.socket_, ec);\n  }\n\n  // Determine the number of bytes available for reading.\n  std::size_t available(const base_implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    return socket_ops::available(impl.socket_, ec);\n  }\n\n  // Place the socket into the state where it will listen for new connections.\n  asio::error_code listen(base_implementation_type& impl,\n      int backlog, asio::error_code& ec)\n  {\n    socket_ops::listen(impl.socket_, backlog, ec);\n    return ec;\n  }\n\n  // Perform an IO control command on the socket.\n  template <typename IO_Control_Command>\n  asio::error_code io_control(base_implementation_type& impl,\n      IO_Control_Command& command, asio::error_code& ec)\n  {\n    socket_ops::ioctl(impl.socket_, impl.state_, command.name(),\n        static_cast<ioctl_arg_type*>(command.data()), ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the socket.\n  bool non_blocking(const base_implementation_type& impl) const\n  {\n    return (impl.state_ & socket_ops::user_set_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the socket.\n  asio::error_code non_blocking(base_implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the native socket implementation.\n  bool native_non_blocking(const base_implementation_type& impl) const\n  {\n    return (impl.state_ & socket_ops::internal_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the native socket implementation.\n  asio::error_code native_non_blocking(base_implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Wait for the socket to become ready to read, ready to write, or to have\n  // pending error conditions.\n  asio::error_code wait(base_implementation_type& impl,\n      socket_base::wait_type w, asio::error_code& ec)\n  {\n    switch (w)\n    {\n    case socket_base::wait_read:\n      socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n      break;\n    case socket_base::wait_write:\n      socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n      break;\n    case socket_base::wait_error:\n      socket_ops::poll_error(impl.socket_, impl.state_, -1, ec);\n      break;\n    default:\n      ec = asio::error::invalid_argument;\n      break;\n    }\n\n    return ec;\n  }\n\n  // Asynchronously wait for the socket to become ready to read, ready to\n  // write, or to have pending error conditions.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(base_implementation_type& impl,\n      socket_base::wait_type w, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_wait_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_wait\"));\n\n    int op_type;\n    switch (w)\n    {\n      case socket_base::wait_read:\n        op_type = reactor::read_op;\n        break;\n      case socket_base::wait_write:\n        op_type = reactor::write_op;\n        break;\n      case socket_base::wait_error:\n        op_type = reactor::except_op;\n        break;\n      default:\n        p.p->ec_ = asio::error::invalid_argument;\n        reactor_.post_immediate_completion(p.p, is_continuation);\n        p.v = p.p = 0;\n        return;\n    }\n\n    start_op(impl, op_type, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Send the given data to the peer.\n  template <typename ConstBufferSequence>\n  size_t send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_send(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be sent without blocking.\n  size_t send(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_send_op<\n        ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, impl.state_,\n        buffers, flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, true,\n        ((impl.state_ & socket_ops::stream_oriented)\n          && buffer_sequence_adapter<asio::const_buffer,\n            ConstBufferSequence>::all_empty(buffers)));\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send(null_buffers)\"));\n\n    start_op(impl, reactor::write_op, p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Receive some data from the peer. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_recv(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_recv_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, impl.state_,\n        buffers, flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive\"));\n\n    start_op(impl,\n        (flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation,\n        (flags & socket_base::message_out_of_band) == 0,\n        ((impl.state_ & socket_ops::stream_oriented)\n          && buffer_sequence_adapter<asio::mutable_buffer,\n            MutableBufferSequence>::all_empty(buffers)));\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive(null_buffers)\"));\n\n    start_op(impl,\n        (flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\n  // Receive some data with associated flags. Returns the number of bytes\n  // received.\n  template <typename MutableBufferSequence>\n  size_t receive_with_flags(base_implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_recvmsg(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), in_flags, out_flags, ec);\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive_with_flags(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags,\n      socket_base::message_flags& out_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    // Clear out_flags, since we cannot give it any other sensible value when\n    // performing a null_buffers operation.\n    out_flags = 0;\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_with_flags(base_implementation_type& impl,\n      const MutableBufferSequence& buffers, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_socket_recvmsg_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, buffers,\n        in_flags, out_flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_with_flags\"));\n\n    start_op(impl,\n        (in_flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation,\n        (in_flags & socket_base::message_out_of_band) == 0, false);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_with_flags(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef reactive_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((reactor_.context(), *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_with_flags(null_buffers)\"));\n\n    // Clear out_flags, since we cannot give it any other sensible value when\n    // performing a null_buffers operation.\n    out_flags = 0;\n\n    start_op(impl,\n        (in_flags & socket_base::message_out_of_band)\n          ? reactor::except_op : reactor::read_op,\n        p.p, is_continuation, false, false);\n    p.v = p.p = 0;\n  }\n\nprotected:\n  // Open a new socket implementation.\n  ASIO_DECL asio::error_code do_open(\n      base_implementation_type& impl, int af,\n      int type, int protocol, asio::error_code& ec);\n\n  // Assign a native socket to a socket implementation.\n  ASIO_DECL asio::error_code do_assign(\n      base_implementation_type& impl, int type,\n      const native_handle_type& native_socket, asio::error_code& ec);\n\n  // Start the asynchronous read or write operation.\n  ASIO_DECL void start_op(base_implementation_type& impl, int op_type,\n      reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop);\n\n  // Start the asynchronous accept operation.\n  ASIO_DECL void start_accept_op(base_implementation_type& impl,\n      reactor_op* op, bool is_continuation, bool peer_is_open);\n\n  // Start the asynchronous connect operation.\n  ASIO_DECL void start_connect_op(base_implementation_type& impl,\n      reactor_op* op, bool is_continuation,\n      const socket_addr_type* addr, size_t addrlen);\n\n  // The selector that performs event demultiplexing for the service.\n  reactor& reactor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/reactive_socket_service_base.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // !defined(ASIO_HAS_IOCP)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactive_wait_op.hpp",
    "content": "//\n// detail/reactive_wait_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTIVE_WAIT_OP_HPP\n#define ASIO_DETAIL_REACTIVE_WAIT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass reactive_wait_op : public reactor_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(reactive_wait_op);\n\n  reactive_wait_op(Handler& handler, const IoExecutor& io_ex)\n    : reactor_op(&reactive_wait_op::do_perform,\n        &reactive_wait_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static status do_perform(reactor_op*)\n  {\n    return done;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    reactive_wait_op* o(static_cast<reactive_wait_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, o->ec_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTIVE_WAIT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactor.hpp",
    "content": "//\n// detail/reactor.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTOR_HPP\n#define ASIO_DETAIL_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/reactor_fwd.hpp\"\n\n#if defined(ASIO_HAS_EPOLL)\n# include \"asio/detail/epoll_reactor.hpp\"\n#elif defined(ASIO_HAS_KQUEUE)\n# include \"asio/detail/kqueue_reactor.hpp\"\n#elif defined(ASIO_HAS_DEV_POLL)\n# include \"asio/detail/dev_poll_reactor.hpp\"\n#elif defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/null_reactor.hpp\"\n#else\n# include \"asio/detail/select_reactor.hpp\"\n#endif\n\n#endif // ASIO_DETAIL_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactor_fwd.hpp",
    "content": "//\n// detail/reactor_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTOR_FWD_HPP\n#define ASIO_DETAIL_REACTOR_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME)\ntypedef class null_reactor reactor;\n#elif defined(ASIO_HAS_IOCP)\ntypedef class select_reactor reactor;\n#elif defined(ASIO_HAS_EPOLL)\ntypedef class epoll_reactor reactor;\n#elif defined(ASIO_HAS_KQUEUE)\ntypedef class kqueue_reactor reactor;\n#elif defined(ASIO_HAS_DEV_POLL)\ntypedef class dev_poll_reactor reactor;\n#else\ntypedef class select_reactor reactor;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_REACTOR_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactor_op.hpp",
    "content": "//\n// detail/reactor_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTOR_OP_HPP\n#define ASIO_DETAIL_REACTOR_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass reactor_op\n  : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\n  // The number of bytes transferred, to be passed to the completion handler.\n  std::size_t bytes_transferred_;\n\n  // Status returned by perform function. May be used to decide whether it is\n  // worth performing more operations on the descriptor immediately.\n  enum status { not_done, done, done_and_exhausted };\n\n  // Perform the operation. Returns true if it is finished.\n  status perform()\n  {\n    return perform_func_(this);\n  }\n\nprotected:\n  typedef status (*perform_func_type)(reactor_op*);\n\n  reactor_op(perform_func_type perform_func, func_type complete_func)\n    : operation(complete_func),\n      bytes_transferred_(0),\n      perform_func_(perform_func)\n  {\n  }\n\nprivate:\n  perform_func_type perform_func_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTOR_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/reactor_op_queue.hpp",
    "content": "//\n// detail/reactor_op_queue.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REACTOR_OP_QUEUE_HPP\n#define ASIO_DETAIL_REACTOR_OP_QUEUE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/hash_map.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Descriptor>\nclass reactor_op_queue\n  : private noncopyable\n{\npublic:\n  typedef Descriptor key_type;\n\n  struct mapped_type : op_queue<reactor_op>\n  {\n    mapped_type() {}\n    mapped_type(const mapped_type&) {}\n    void operator=(const mapped_type&) {}\n  };\n\n  typedef typename hash_map<key_type, mapped_type>::value_type value_type;\n  typedef typename hash_map<key_type, mapped_type>::iterator iterator;\n\n  // Constructor.\n  reactor_op_queue()\n    : operations_()\n  {\n  }\n\n  // Obtain iterators to all registered descriptors.\n  iterator begin() { return operations_.begin(); }\n  iterator end() { return operations_.end(); }\n\n  // Add a new operation to the queue. Returns true if this is the only\n  // operation for the given descriptor, in which case the reactor's event\n  // demultiplexing function call may need to be interrupted and restarted.\n  bool enqueue_operation(Descriptor descriptor, reactor_op* op)\n  {\n    std::pair<iterator, bool> entry =\n      operations_.insert(value_type(descriptor, mapped_type()));\n    entry.first->second.push(op);\n    return entry.second;\n  }\n\n  // Cancel all operations associated with the descriptor identified by the\n  // supplied iterator. Any operations pending for the descriptor will be\n  // cancelled. Returns true if any operations were cancelled, in which case\n  // the reactor's event demultiplexing function may need to be interrupted and\n  // restarted.\n  bool cancel_operations(iterator i, op_queue<operation>& ops,\n      const asio::error_code& ec =\n        asio::error::operation_aborted)\n  {\n    if (i != operations_.end())\n    {\n      while (reactor_op* op = i->second.front())\n      {\n        op->ec_ = ec;\n        i->second.pop();\n        ops.push(op);\n      }\n      operations_.erase(i);\n      return true;\n    }\n\n    return false;\n  }\n\n  // Cancel all operations associated with the descriptor. Any operations\n  // pending for the descriptor will be cancelled. Returns true if any\n  // operations were cancelled, in which case the reactor's event\n  // demultiplexing function may need to be interrupted and restarted.\n  bool cancel_operations(Descriptor descriptor, op_queue<operation>& ops,\n      const asio::error_code& ec =\n        asio::error::operation_aborted)\n  {\n    return this->cancel_operations(operations_.find(descriptor), ops, ec);\n  }\n\n  // Whether there are no operations in the queue.\n  bool empty() const\n  {\n    return operations_.empty();\n  }\n\n  // Determine whether there are any operations associated with the descriptor.\n  bool has_operation(Descriptor descriptor) const\n  {\n    return operations_.find(descriptor) != operations_.end();\n  }\n\n  // Perform the operations corresponding to the descriptor identified by the\n  // supplied iterator. Returns true if there are still unfinished operations\n  // queued for the descriptor.\n  bool perform_operations(iterator i, op_queue<operation>& ops)\n  {\n    if (i != operations_.end())\n    {\n      while (reactor_op* op = i->second.front())\n      {\n        if (op->perform())\n        {\n          i->second.pop();\n          ops.push(op);\n        }\n        else\n        {\n          return true;\n        }\n      }\n      operations_.erase(i);\n    }\n    return false;\n  }\n\n  // Perform the operations corresponding to the descriptor. Returns true if\n  // there are still unfinished operations queued for the descriptor.\n  bool perform_operations(Descriptor descriptor, op_queue<operation>& ops)\n  {\n    return this->perform_operations(operations_.find(descriptor), ops);\n  }\n\n  // Get all operations owned by the queue.\n  void get_all_operations(op_queue<operation>& ops)\n  {\n    iterator i = operations_.begin();\n    while (i != operations_.end())\n    {\n      iterator op_iter = i++;\n      ops.push(op_iter->second);\n      operations_.erase(op_iter);\n    }\n  }\n\nprivate:\n  // The operations that are currently executing asynchronously.\n  hash_map<key_type, mapped_type> operations_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_REACTOR_OP_QUEUE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/recycling_allocator.hpp",
    "content": "//\n// detail/recycling_allocator.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP\n#define ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/thread_context.hpp\"\n#include \"asio/detail/thread_info_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T, typename Purpose = thread_info_base::default_tag>\nclass recycling_allocator\n{\npublic:\n  typedef T value_type;\n\n  template <typename U>\n  struct rebind\n  {\n    typedef recycling_allocator<U, Purpose> other;\n  };\n\n  recycling_allocator()\n  {\n  }\n\n  template <typename U>\n  recycling_allocator(const recycling_allocator<U, Purpose>&)\n  {\n  }\n\n  T* allocate(std::size_t n)\n  {\n    typedef thread_context::thread_call_stack call_stack;\n    void* p = thread_info_base::allocate(Purpose(),\n        call_stack::top(), sizeof(T) * n);\n    return static_cast<T*>(p);\n  }\n\n  void deallocate(T* p, std::size_t n)\n  {\n    typedef thread_context::thread_call_stack call_stack;\n    thread_info_base::deallocate(Purpose(),\n        call_stack::top(), p, sizeof(T) * n);\n  }\n};\n\ntemplate <typename Purpose>\nclass recycling_allocator<void, Purpose>\n{\npublic:\n  typedef void value_type;\n\n  template <typename U>\n  struct rebind\n  {\n    typedef recycling_allocator<U, Purpose> other;\n  };\n\n  recycling_allocator()\n  {\n  }\n\n  template <typename U>\n  recycling_allocator(const recycling_allocator<U, Purpose>&)\n  {\n  }\n};\n\ntemplate <typename Allocator, typename Purpose>\nstruct get_recycling_allocator\n{\n  typedef Allocator type;\n  static type get(const Allocator& a) { return a; }\n};\n\ntemplate <typename T, typename Purpose>\nstruct get_recycling_allocator<std::allocator<T>, Purpose>\n{\n  typedef recycling_allocator<T, Purpose> type;\n  static type get(const std::allocator<T>&) { return type(); }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/regex_fwd.hpp",
    "content": "//\n// detail/regex_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_REGEX_FWD_HPP\n#define ASIO_DETAIL_REGEX_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if defined(ASIO_HAS_BOOST_REGEX)\n\n#include <boost/regex_fwd.hpp>\n#include <boost/regex/v4/match_flags.hpp>\n\nnamespace boost {\n\ntemplate <class BidiIterator>\nstruct sub_match;\n\ntemplate <class BidiIterator, class Allocator>\nclass match_results;\n\n} // namespace boost\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\n#endif // ASIO_DETAIL_REGEX_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/resolve_endpoint_op.hpp",
    "content": "//\n// detail/resolve_endpoint_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP\n#define ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/resolve_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol, typename Handler, typename IoExecutor>\nclass resolve_endpoint_op : public resolve_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(resolve_endpoint_op);\n\n  typedef typename Protocol::endpoint endpoint_type;\n  typedef asio::ip::basic_resolver_results<Protocol> results_type;\n\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n\n  resolve_endpoint_op(socket_ops::weak_cancel_token_type cancel_token,\n      const endpoint_type& endpoint, scheduler_impl& sched,\n      Handler& handler, const IoExecutor& io_ex)\n    : resolve_op(&resolve_endpoint_op::do_complete),\n      cancel_token_(cancel_token),\n      endpoint_(endpoint),\n      scheduler_(sched),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the operation object.\n    resolve_endpoint_op* o(static_cast<resolve_endpoint_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    if (owner && owner != &o->scheduler_)\n    {\n      // The operation is being run on the worker io_context. Time to perform\n      // the resolver operation.\n    \n      // Perform the blocking endpoint resolution operation.\n      char host_name[NI_MAXHOST];\n      char service_name[NI_MAXSERV];\n      socket_ops::background_getnameinfo(o->cancel_token_, o->endpoint_.data(),\n          o->endpoint_.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV,\n          o->endpoint_.protocol().type(), o->ec_);\n      o->results_ = results_type::create(o->endpoint_, host_name, service_name);\n\n      // Pass operation back to main io_context for completion.\n      o->scheduler_.post_deferred_completion(o);\n      p.v = p.p = 0;\n    }\n    else\n    {\n      // The operation has been returned to the main io_context. The completion\n      // handler is ready to be delivered.\n\n      ASIO_HANDLER_COMPLETION((*o));\n\n      // Make a copy of the handler so that the memory can be deallocated\n      // before the upcall is made. Even if we're not about to make an upcall,\n      // a sub-object of the handler may be the true owner of the memory\n      // associated with the handler. Consequently, a local copy of the handler\n      // is required to ensure that any owning sub-object remains valid until\n      // after we have deallocated the memory here.\n      detail::binder2<Handler, asio::error_code, results_type>\n        handler(o->handler_, o->ec_, o->results_);\n      p.h = asio::detail::addressof(handler.handler_);\n      p.reset();\n\n      if (owner)\n      {\n        fenced_block b(fenced_block::half);\n        ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, \"...\"));\n        w.complete(handler, handler.handler_);\n        ASIO_HANDLER_INVOCATION_END;\n      }\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  endpoint_type endpoint_;\n  scheduler_impl& scheduler_;\n  Handler handler_;\n  IoExecutor io_executor_;\n  results_type results_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/resolve_op.hpp",
    "content": "//\n// detail/resolve_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RESOLVE_OP_HPP\n#define ASIO_DETAIL_RESOLVE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass resolve_op : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\nprotected:\n  resolve_op(func_type complete_func)\n    : operation(complete_func)\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_RESOLVE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/resolve_query_op.hpp",
    "content": "//\n// detail/resolve_query_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RESOLVE_QUERY_OP_HPP\n#define ASIO_DETAIL_RESOLVE_QUERY_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/resolve_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol, typename Handler, typename IoExecutor>\nclass resolve_query_op : public resolve_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(resolve_query_op);\n\n  typedef asio::ip::basic_resolver_query<Protocol> query_type;\n  typedef asio::ip::basic_resolver_results<Protocol> results_type;\n\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n\n  resolve_query_op(socket_ops::weak_cancel_token_type cancel_token,\n      const query_type& query, scheduler_impl& sched,\n      Handler& handler, const IoExecutor& io_ex)\n    : resolve_op(&resolve_query_op::do_complete),\n      cancel_token_(cancel_token),\n      query_(query),\n      scheduler_(sched),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex),\n      addrinfo_(0)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  ~resolve_query_op()\n  {\n    if (addrinfo_)\n      socket_ops::freeaddrinfo(addrinfo_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the operation object.\n    resolve_query_op* o(static_cast<resolve_query_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n\n    if (owner && owner != &o->scheduler_)\n    {\n      // The operation is being run on the worker io_context. Time to perform\n      // the resolver operation.\n    \n      // Perform the blocking host resolution operation.\n      socket_ops::background_getaddrinfo(o->cancel_token_,\n          o->query_.host_name().c_str(), o->query_.service_name().c_str(),\n          o->query_.hints(), &o->addrinfo_, o->ec_);\n\n      // Pass operation back to main io_context for completion.\n      o->scheduler_.post_deferred_completion(o);\n      p.v = p.p = 0;\n    }\n    else\n    {\n      // The operation has been returned to the main io_context. The completion\n      // handler is ready to be delivered.\n\n      // Take ownership of the operation's outstanding work.\n      handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n      ASIO_HANDLER_COMPLETION((*o));\n\n      // Make a copy of the handler so that the memory can be deallocated\n      // before the upcall is made. Even if we're not about to make an upcall,\n      // a sub-object of the handler may be the true owner of the memory\n      // associated with the handler. Consequently, a local copy of the handler\n      // is required to ensure that any owning sub-object remains valid until\n      // after we have deallocated the memory here.\n      detail::binder2<Handler, asio::error_code, results_type>\n        handler(o->handler_, o->ec_, results_type());\n      p.h = asio::detail::addressof(handler.handler_);\n      if (o->addrinfo_)\n      {\n        handler.arg2_ = results_type::create(o->addrinfo_,\n            o->query_.host_name(), o->query_.service_name());\n      }\n      p.reset();\n\n      if (owner)\n      {\n        fenced_block b(fenced_block::half);\n        ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, \"...\"));\n        w.complete(handler, handler.handler_);\n        ASIO_HANDLER_INVOCATION_END;\n      }\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  query_type query_;\n  scheduler_impl& scheduler_;\n  Handler handler_;\n  IoExecutor io_executor_;\n  asio::detail::addrinfo_type* addrinfo_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_RESOLVE_QUERY_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/resolver_service.hpp",
    "content": "//\n// detail/resolver_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RESOLVER_SERVICE_HPP\n#define ASIO_DETAIL_RESOLVER_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/detail/concurrency_hint.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/resolve_endpoint_op.hpp\"\n#include \"asio/detail/resolve_query_op.hpp\"\n#include \"asio/detail/resolver_service_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass resolver_service :\n  public execution_context_service_base<resolver_service<Protocol> >,\n  public resolver_service_base\n{\npublic:\n  // The implementation type of the resolver. A cancellation token is used to\n  // indicate to the background thread that the operation has been cancelled.\n  typedef socket_ops::shared_cancel_token_type implementation_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The query type.\n  typedef asio::ip::basic_resolver_query<Protocol> query_type;\n\n  // The results type.\n  typedef asio::ip::basic_resolver_results<Protocol> results_type;\n\n  // Constructor.\n  resolver_service(execution_context& context)\n    : execution_context_service_base<resolver_service<Protocol> >(context),\n      resolver_service_base(context)\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n    this->base_shutdown();\n  }\n\n  // Perform any fork-related housekeeping.\n  void notify_fork(execution_context::fork_event fork_ev)\n  {\n    this->base_notify_fork(fork_ev);\n  }\n\n  // Resolve a query to a list of entries.\n  results_type resolve(implementation_type&, const query_type& query,\n      asio::error_code& ec)\n  {\n    asio::detail::addrinfo_type* address_info = 0;\n\n    socket_ops::getaddrinfo(query.host_name().c_str(),\n        query.service_name().c_str(), query.hints(), &address_info, ec);\n    auto_addrinfo auto_address_info(address_info);\n\n    return ec ? results_type() : results_type::create(\n        address_info, query.host_name(), query.service_name());\n  }\n\n  // Asynchronously resolve a query to a list of entries.\n  template <typename Handler, typename IoExecutor>\n  void async_resolve(implementation_type& impl, const query_type& query,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef resolve_query_op<Protocol, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl, query, scheduler_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"resolver\", &impl, 0, \"async_resolve\"));\n\n    start_resolve_op(p.p);\n    p.v = p.p = 0;\n  }\n\n  // Resolve an endpoint to a list of entries.\n  results_type resolve(implementation_type&,\n      const endpoint_type& endpoint, asio::error_code& ec)\n  {\n    char host_name[NI_MAXHOST];\n    char service_name[NI_MAXSERV];\n    socket_ops::sync_getnameinfo(endpoint.data(), endpoint.size(),\n        host_name, NI_MAXHOST, service_name, NI_MAXSERV,\n        endpoint.protocol().type(), ec);\n\n    return ec ? results_type() : results_type::create(\n        endpoint, host_name, service_name);\n  }\n\n  // Asynchronously resolve an endpoint to a list of entries.\n  template <typename Handler, typename IoExecutor>\n  void async_resolve(implementation_type& impl, const endpoint_type& endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef resolve_endpoint_op<Protocol, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl, endpoint, scheduler_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"resolver\", &impl, 0, \"async_resolve\"));\n\n    start_resolve_op(p.p);\n    p.v = p.p = 0;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_RESOLVER_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/resolver_service_base.hpp",
    "content": "//\n// detail/resolver_service_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP\n#define ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/resolve_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/thread.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass resolver_service_base\n{\npublic:\n  // The implementation type of the resolver. A cancellation token is used to\n  // indicate to the background thread that the operation has been cancelled.\n  typedef socket_ops::shared_cancel_token_type implementation_type;\n\n  // Constructor.\n  ASIO_DECL resolver_service_base(execution_context& context);\n\n  // Destructor.\n  ASIO_DECL ~resolver_service_base();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void base_shutdown();\n\n  // Perform any fork-related housekeeping.\n  ASIO_DECL void base_notify_fork(\n      execution_context::fork_event fork_ev);\n\n  // Construct a new resolver implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Destroy a resolver implementation.\n  ASIO_DECL void destroy(implementation_type&);\n\n  // Move-construct a new resolver implementation.\n  ASIO_DECL void move_construct(implementation_type& impl,\n      implementation_type& other_impl);\n\n  // Move-assign from another resolver implementation.\n  ASIO_DECL void move_assign(implementation_type& impl,\n      resolver_service_base& other_service,\n      implementation_type& other_impl);\n\n  // Cancel pending asynchronous operations.\n  ASIO_DECL void cancel(implementation_type& impl);\n\nprotected:\n  // Helper function to start an asynchronous resolve operation.\n  ASIO_DECL void start_resolve_op(resolve_op* op);\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n  // Helper class to perform exception-safe cleanup of addrinfo objects.\n  class auto_addrinfo\n    : private asio::detail::noncopyable\n  {\n  public:\n    explicit auto_addrinfo(asio::detail::addrinfo_type* ai)\n      : ai_(ai)\n    {\n    }\n\n    ~auto_addrinfo()\n    {\n      if (ai_)\n        socket_ops::freeaddrinfo(ai_);\n    }\n\n    operator asio::detail::addrinfo_type*()\n    {\n      return ai_;\n    }\n\n  private:\n    asio::detail::addrinfo_type* ai_;\n  };\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n  // Helper class to run the work scheduler in a thread.\n  class work_scheduler_runner;\n\n  // Start the work scheduler if it's not already running.\n  ASIO_DECL void start_work_thread();\n\n  // The scheduler implementation used to post completions.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\nprivate:\n  // Mutex to protect access to internal data.\n  asio::detail::mutex mutex_;\n\n  // Private scheduler used for performing asynchronous host resolution.\n  asio::detail::scoped_ptr<scheduler_impl> work_scheduler_;\n\n  // Thread used for running the work io_context's run loop.\n  asio::detail::scoped_ptr<asio::detail::thread> work_thread_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/resolver_service_base.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/scheduler.hpp",
    "content": "//\n// detail/scheduler.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SCHEDULER_HPP\n#define ASIO_DETAIL_SCHEDULER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/error_code.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/conditionally_enabled_event.hpp\"\n#include \"asio/detail/conditionally_enabled_mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_fwd.hpp\"\n#include \"asio/detail/scheduler_operation.hpp\"\n#include \"asio/detail/thread.hpp\"\n#include \"asio/detail/thread_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct scheduler_thread_info;\n\nclass scheduler\n  : public execution_context_service_base<scheduler>,\n    public thread_context\n{\npublic:\n  typedef scheduler_operation operation;\n\n  // Constructor. Specifies the number of concurrent threads that are likely to\n  // run the scheduler. If set to 1 certain optimisation are performed.\n  ASIO_DECL scheduler(asio::execution_context& ctx,\n      int concurrency_hint = 0, bool own_thread = true);\n\n  // Destructor.\n  ASIO_DECL ~scheduler();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Initialise the task, if required.\n  ASIO_DECL void init_task();\n\n  // Run the event loop until interrupted or no more work.\n  ASIO_DECL std::size_t run(asio::error_code& ec);\n\n  // Run until interrupted or one operation is performed.\n  ASIO_DECL std::size_t run_one(asio::error_code& ec);\n\n  // Run until timeout, interrupted, or one operation is performed.\n  ASIO_DECL std::size_t wait_one(\n      long usec, asio::error_code& ec);\n\n  // Poll for operations without blocking.\n  ASIO_DECL std::size_t poll(asio::error_code& ec);\n\n  // Poll for one operation without blocking.\n  ASIO_DECL std::size_t poll_one(asio::error_code& ec);\n\n  // Interrupt the event processing loop.\n  ASIO_DECL void stop();\n\n  // Determine whether the scheduler is stopped.\n  ASIO_DECL bool stopped() const;\n\n  // Restart in preparation for a subsequent run invocation.\n  ASIO_DECL void restart();\n\n  // Notify that some work has started.\n  void work_started()\n  {\n    ++outstanding_work_;\n  }\n\n  // Used to compensate for a forthcoming work_finished call. Must be called\n  // from within a scheduler-owned thread.\n  ASIO_DECL void compensating_work_started();\n\n  // Notify that some work has finished.\n  void work_finished()\n  {\n    if (--outstanding_work_ == 0)\n      stop();\n  }\n\n  // Return whether a handler can be dispatched immediately.\n  bool can_dispatch()\n  {\n    return thread_call_stack::contains(this) != 0;\n  }\n\n  // Request invocation of the given operation and return immediately. Assumes\n  // that work_started() has not yet been called for the operation.\n  ASIO_DECL void post_immediate_completion(\n      operation* op, bool is_continuation);\n\n  // Request invocation of the given operation and return immediately. Assumes\n  // that work_started() was previously called for the operation.\n  ASIO_DECL void post_deferred_completion(operation* op);\n\n  // Request invocation of the given operations and return immediately. Assumes\n  // that work_started() was previously called for each operation.\n  ASIO_DECL void post_deferred_completions(op_queue<operation>& ops);\n\n  // Enqueue the given operation following a failed attempt to dispatch the\n  // operation for immediate invocation.\n  ASIO_DECL void do_dispatch(operation* op);\n\n  // Process unfinished operations as part of a shutdownoperation. Assumes that\n  // work_started() was previously called for the operations.\n  ASIO_DECL void abandon_operations(op_queue<operation>& ops);\n\n  // Get the concurrency hint that was used to initialise the scheduler.\n  int concurrency_hint() const\n  {\n    return concurrency_hint_;\n  }\n\nprivate:\n  // The mutex type used by this scheduler.\n  typedef conditionally_enabled_mutex mutex;\n\n  // The event type used by this scheduler.\n  typedef conditionally_enabled_event event;\n\n  // Structure containing thread-specific data.\n  typedef scheduler_thread_info thread_info;\n\n  // Run at most one operation. May block.\n  ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock,\n      thread_info& this_thread, const asio::error_code& ec);\n\n  // Run at most one operation with a timeout. May block.\n  ASIO_DECL std::size_t do_wait_one(mutex::scoped_lock& lock,\n      thread_info& this_thread, long usec, const asio::error_code& ec);\n\n  // Poll for at most one operation.\n  ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock,\n      thread_info& this_thread, const asio::error_code& ec);\n\n  // Stop the task and all idle threads.\n  ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock);\n\n  // Wake a single idle thread, or the task, and always unlock the mutex.\n  ASIO_DECL void wake_one_thread_and_unlock(\n      mutex::scoped_lock& lock);\n\n  // Helper class to run the scheduler in its own thread.\n  class thread_function;\n  friend class thread_function;\n\n  // Helper class to perform task-related operations on block exit.\n  struct task_cleanup;\n  friend struct task_cleanup;\n\n  // Helper class to call work-related operations on block exit.\n  struct work_cleanup;\n  friend struct work_cleanup;\n\n  // Whether to optimise for single-threaded use cases.\n  const bool one_thread_;\n\n  // Mutex to protect access to internal data.\n  mutable mutex mutex_;\n\n  // Event to wake up blocked threads.\n  event wakeup_event_;\n\n  // The task to be run by this service.\n  reactor* task_;\n\n  // Operation object to represent the position of the task in the queue.\n  struct task_operation : operation\n  {\n    task_operation() : operation(0) {}\n  } task_operation_;\n\n  // Whether the task has been interrupted.\n  bool task_interrupted_;\n\n  // The count of unfinished work.\n  atomic_count outstanding_work_;\n\n  // The queue of handlers that are ready to be delivered.\n  op_queue<operation> op_queue_;\n\n  // Flag to indicate that the dispatcher has been stopped.\n  bool stopped_;\n\n  // Flag to indicate that the dispatcher has been shut down.\n  bool shutdown_;\n\n  // The concurrency hint used to initialise the scheduler.\n  const int concurrency_hint_;\n\n  // The thread that is running the scheduler.\n  asio::detail::thread* thread_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/scheduler.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_SCHEDULER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/scheduler_operation.hpp",
    "content": "//\n// detail/scheduler_operation.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SCHEDULER_OPERATION_HPP\n#define ASIO_DETAIL_SCHEDULER_OPERATION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/error_code.hpp\"\n#include \"asio/detail/handler_tracking.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass scheduler;\n\n// Base class for all operations. A function pointer is used instead of virtual\n// functions to avoid the associated overhead.\nclass scheduler_operation ASIO_INHERIT_TRACKED_HANDLER\n{\npublic:\n  typedef scheduler_operation operation_type;\n\n  void complete(void* owner, const asio::error_code& ec,\n      std::size_t bytes_transferred)\n  {\n    func_(owner, this, ec, bytes_transferred);\n  }\n\n  void destroy()\n  {\n    func_(0, this, asio::error_code(), 0);\n  }\n\nprotected:\n  typedef void (*func_type)(void*,\n      scheduler_operation*,\n      const asio::error_code&, std::size_t);\n\n  scheduler_operation(func_type func)\n    : next_(0),\n      func_(func),\n      task_result_(0)\n  {\n  }\n\n  // Prevents deletion through this type.\n  ~scheduler_operation()\n  {\n  }\n\nprivate:\n  friend class op_queue_access;\n  scheduler_operation* next_;\n  func_type func_;\nprotected:\n  friend class scheduler;\n  unsigned int task_result_; // Passed into bytes transferred.\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SCHEDULER_OPERATION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/scheduler_thread_info.hpp",
    "content": "//\n// detail/scheduler_thread_info.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP\n#define ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/thread_info_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass scheduler;\nclass scheduler_operation;\n\nstruct scheduler_thread_info : public thread_info_base\n{\n  op_queue<scheduler_operation> private_op_queue;\n  long private_outstanding_work;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/scoped_lock.hpp",
    "content": "//\n// detail/scoped_lock.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SCOPED_LOCK_HPP\n#define ASIO_DETAIL_SCOPED_LOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper class to lock and unlock a mutex automatically.\ntemplate <typename Mutex>\nclass scoped_lock\n  : private noncopyable\n{\npublic:\n  // Tag type used to distinguish constructors.\n  enum adopt_lock_t { adopt_lock };\n\n  // Constructor adopts a lock that is already held.\n  scoped_lock(Mutex& m, adopt_lock_t)\n    : mutex_(m),\n      locked_(true)\n  {\n  }\n\n  // Constructor acquires the lock.\n  explicit scoped_lock(Mutex& m)\n    : mutex_(m)\n  {\n    mutex_.lock();\n    locked_ = true;\n  }\n\n  // Destructor releases the lock.\n  ~scoped_lock()\n  {\n    if (locked_)\n      mutex_.unlock();\n  }\n\n  // Explicitly acquire the lock.\n  void lock()\n  {\n    if (!locked_)\n    {\n      mutex_.lock();\n      locked_ = true;\n    }\n  }\n\n  // Explicitly release the lock.\n  void unlock()\n  {\n    if (locked_)\n    {\n      mutex_.unlock();\n      locked_ = false;\n    }\n  }\n\n  // Test whether the lock is held.\n  bool locked() const\n  {\n    return locked_;\n  }\n\n  // Get the underlying mutex.\n  Mutex& mutex()\n  {\n    return mutex_;\n  }\n\nprivate:\n  // The underlying mutex.\n  Mutex& mutex_;\n\n  // Whether the mutex is currently locked or unlocked.\n  bool locked_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SCOPED_LOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/scoped_ptr.hpp",
    "content": "//\n// detail/scoped_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SCOPED_PTR_HPP\n#define ASIO_DETAIL_SCOPED_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nclass scoped_ptr\n{\npublic:\n  // Constructor.\n  explicit scoped_ptr(T* p = 0)\n    : p_(p)\n  {\n  }\n\n  // Destructor.\n  ~scoped_ptr()\n  {\n    delete p_;\n  }\n\n  // Access.\n  T* get()\n  {\n    return p_;\n  }\n\n  // Access.\n  T* operator->()\n  {\n    return p_;\n  }\n\n  // Dereference.\n  T& operator*()\n  {\n    return *p_;\n  }\n\n  // Reset pointer.\n  void reset(T* p = 0)\n  {\n    delete p_;\n    p_ = p;\n  }\n\n  // Release ownership of the pointer.\n  T* release()\n  {\n    T* tmp = p_;\n    p_ = 0;\n    return tmp;\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  scoped_ptr(const scoped_ptr&);\n  scoped_ptr& operator=(const scoped_ptr&);\n\n  T* p_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SCOPED_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/select_interrupter.hpp",
    "content": "//\n// detail/select_interrupter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SELECT_INTERRUPTER_HPP\n#define ASIO_DETAIL_SELECT_INTERRUPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__)\n# include \"asio/detail/socket_select_interrupter.hpp\"\n#elif defined(ASIO_HAS_EVENTFD)\n# include \"asio/detail/eventfd_select_interrupter.hpp\"\n#else\n# include \"asio/detail/pipe_select_interrupter.hpp\"\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__)\ntypedef socket_select_interrupter select_interrupter;\n#elif defined(ASIO_HAS_EVENTFD)\ntypedef eventfd_select_interrupter select_interrupter;\n#else\ntypedef pipe_select_interrupter select_interrupter;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_SELECT_INTERRUPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/select_reactor.hpp",
    "content": "//\n// detail/select_reactor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SELECT_REACTOR_HPP\n#define ASIO_DETAIL_SELECT_REACTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) \\\n  || (!defined(ASIO_HAS_DEV_POLL) \\\n      && !defined(ASIO_HAS_EPOLL) \\\n      && !defined(ASIO_HAS_KQUEUE) \\\n      && !defined(ASIO_WINDOWS_RUNTIME))\n\n#include <cstddef>\n#include \"asio/detail/fd_set_adapter.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/reactor_op_queue.hpp\"\n#include \"asio/detail/select_interrupter.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/thread.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass select_reactor\n  : public execution_context_service_base<select_reactor>\n{\npublic:\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  enum op_types { read_op = 0, write_op = 1, except_op = 2,\n    max_select_ops = 3, connect_op = 3, max_ops = 4 };\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  enum op_types { read_op = 0, write_op = 1, except_op = 2,\n    max_select_ops = 3, connect_op = 1, max_ops = 3 };\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n  // Per-descriptor data.\n  struct per_descriptor_data\n  {\n  };\n\n  // Constructor.\n  ASIO_DECL select_reactor(asio::execution_context& ctx);\n\n  // Destructor.\n  ASIO_DECL ~select_reactor();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Recreate internal descriptors following a fork.\n  ASIO_DECL void notify_fork(\n      asio::execution_context::fork_event fork_ev);\n\n  // Initialise the task, but only if the reactor is not in its own thread.\n  ASIO_DECL void init_task();\n\n  // Register a socket with the reactor. Returns 0 on success, system error\n  // code on failure.\n  ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&);\n\n  // Register a descriptor with an associated single operation. Returns 0 on\n  // success, system error code on failure.\n  ASIO_DECL int register_internal_descriptor(\n      int op_type, socket_type descriptor,\n      per_descriptor_data& descriptor_data, reactor_op* op);\n\n  // Post a reactor operation for immediate completion.\n  void post_immediate_completion(reactor_op* op, bool is_continuation)\n  {\n    scheduler_.post_immediate_completion(op, is_continuation);\n  }\n\n  // Start a new operation. The reactor operation will be performed when the\n  // given descriptor is flagged as ready, or an error has occurred.\n  ASIO_DECL void start_op(int op_type, socket_type descriptor,\n      per_descriptor_data&, reactor_op* op, bool is_continuation, bool);\n\n  // Cancel all operations associated with the given descriptor. The\n  // handlers associated with the descriptor will be invoked with the\n  // operation_aborted error.\n  ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&);\n\n  // Cancel any operations that are running against the descriptor and remove\n  // its registration from the reactor. The reactor resources associated with\n  // the descriptor must be released by calling cleanup_descriptor_data.\n  ASIO_DECL void deregister_descriptor(socket_type descriptor,\n      per_descriptor_data&, bool closing);\n\n  // Remove the descriptor's registration from the reactor. The reactor\n  // resources associated with the descriptor must be released by calling\n  // cleanup_descriptor_data.\n  ASIO_DECL void deregister_internal_descriptor(\n      socket_type descriptor, per_descriptor_data&);\n\n  // Perform any post-deregistration cleanup tasks associated with the\n  // descriptor data.\n  ASIO_DECL void cleanup_descriptor_data(per_descriptor_data&);\n\n  // Move descriptor registration from one descriptor_data object to another.\n  ASIO_DECL void move_descriptor(socket_type descriptor,\n      per_descriptor_data& target_descriptor_data,\n      per_descriptor_data& source_descriptor_data);\n\n  // Add a new timer queue to the reactor.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Remove a timer queue from the reactor.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer operations associated with the given token. Returns the\n  // number of operations that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& target,\n      typename timer_queue<Time_Traits>::per_timer_data& source);\n\n  // Run select once until interrupted or events are ready to be dispatched.\n  ASIO_DECL void run(long usec, op_queue<operation>& ops);\n\n  // Interrupt the select loop.\n  ASIO_DECL void interrupt();\n\nprivate:\n#if defined(ASIO_HAS_IOCP)\n  // Run the select loop in the thread.\n  ASIO_DECL void run_thread();\n#endif // defined(ASIO_HAS_IOCP)\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // Get the timeout value for the select call.\n  ASIO_DECL timeval* get_timeout(long usec, timeval& tv);\n\n  // Cancel all operations associated with the given descriptor. This function\n  // does not acquire the select_reactor's mutex.\n  ASIO_DECL void cancel_ops_unlocked(socket_type descriptor,\n      const asio::error_code& ec);\n\n  // The scheduler implementation used to post completions.\n# if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_type;\n# else // defined(ASIO_HAS_IOCP)\n  typedef class scheduler scheduler_type;\n# endif // defined(ASIO_HAS_IOCP)\n  scheduler_type& scheduler_;\n\n  // Mutex to protect access to internal data.\n  asio::detail::mutex mutex_;\n\n  // The interrupter is used to break a blocking select call.\n  select_interrupter interrupter_;\n\n  // The queues of read, write and except operations.\n  reactor_op_queue<socket_type> op_queue_[max_ops];\n\n  // The file descriptor sets to be passed to the select system call.\n  fd_set_adapter fd_sets_[max_select_ops];\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n#if defined(ASIO_HAS_IOCP)\n  // Helper class to run the reactor loop in a thread.\n  class thread_function;\n  friend class thread_function;\n\n  // Does the reactor loop thread need to stop.\n  bool stop_thread_;\n\n  // The thread that is running the reactor loop.\n  asio::detail::thread* thread_;\n#endif // defined(ASIO_HAS_IOCP)\n\n  // Whether the service has been shut down.\n  bool shutdown_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/select_reactor.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/select_reactor.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_IOCP)\n       //   || (!defined(ASIO_HAS_DEV_POLL)\n       //       && !defined(ASIO_HAS_EPOLL)\n       //       && !defined(ASIO_HAS_KQUEUE)\n       //       && !defined(ASIO_WINDOWS_RUNTIME))\n\n#endif // ASIO_DETAIL_SELECT_REACTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/service_registry.hpp",
    "content": "//\n// detail/service_registry.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SERVICE_REGISTRY_HPP\n#define ASIO_DETAIL_SERVICE_REGISTRY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <typeinfo>\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass io_context;\n\nnamespace detail {\n\ntemplate <typename T>\nclass typeid_wrapper {};\n\nclass service_registry\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  ASIO_DECL service_registry(execution_context& owner);\n\n  // Destructor.\n  ASIO_DECL ~service_registry();\n\n  // Shutdown all services.\n  ASIO_DECL void shutdown_services();\n\n  // Destroy all services.\n  ASIO_DECL void destroy_services();\n\n  // Notify all services of a fork event.\n  ASIO_DECL void notify_fork(execution_context::fork_event fork_ev);\n\n  // Get the service object corresponding to the specified service type. Will\n  // create a new service object automatically if no such object already\n  // exists. Ownership of the service object is not transferred to the caller.\n  template <typename Service>\n  Service& use_service();\n\n  // Get the service object corresponding to the specified service type. Will\n  // create a new service object automatically if no such object already\n  // exists. Ownership of the service object is not transferred to the caller.\n  // This overload is used for backwards compatibility with services that\n  // inherit from io_context::service.\n  template <typename Service>\n  Service& use_service(io_context& owner);\n\n  // Add a service object. Throws on error, in which case ownership of the\n  // object is retained by the caller.\n  template <typename Service>\n  void add_service(Service* new_service);\n\n  // Check whether a service object of the specified type already exists.\n  template <typename Service>\n  bool has_service() const;\n\nprivate:\n  // Initalise a service's key when the key_type typedef is not available.\n  template <typename Service>\n  static void init_key(execution_context::service::key& key, ...);\n\n#if !defined(ASIO_NO_TYPEID)\n  // Initalise a service's key when the key_type typedef is available.\n  template <typename Service>\n  static void init_key(execution_context::service::key& key,\n      typename enable_if<\n        is_base_of<typename Service::key_type, Service>::value>::type*);\n#endif // !defined(ASIO_NO_TYPEID)\n\n  // Initialise a service's key based on its id.\n  ASIO_DECL static void init_key_from_id(\n      execution_context::service::key& key,\n      const execution_context::id& id);\n\n#if !defined(ASIO_NO_TYPEID)\n  // Initialise a service's key based on its id.\n  template <typename Service>\n  static void init_key_from_id(execution_context::service::key& key,\n      const service_id<Service>& /*id*/);\n#endif // !defined(ASIO_NO_TYPEID)\n\n  // Check if a service matches the given id.\n  ASIO_DECL static bool keys_match(\n      const execution_context::service::key& key1,\n      const execution_context::service::key& key2);\n\n  // The type of a factory function used for creating a service instance.\n  typedef execution_context::service*(*factory_type)(void*);\n\n  // Factory function for creating a service instance.\n  template <typename Service, typename Owner>\n  static execution_context::service* create(void* owner);\n\n  // Destroy a service instance.\n  ASIO_DECL static void destroy(execution_context::service* service);\n\n  // Helper class to manage service pointers.\n  struct auto_service_ptr;\n  friend struct auto_service_ptr;\n  struct auto_service_ptr\n  {\n    execution_context::service* ptr_;\n    ~auto_service_ptr() { destroy(ptr_); }\n  };\n\n  // Get the service object corresponding to the specified service key. Will\n  // create a new service object automatically if no such object already\n  // exists. Ownership of the service object is not transferred to the caller.\n  ASIO_DECL execution_context::service* do_use_service(\n      const execution_context::service::key& key,\n      factory_type factory, void* owner);\n\n  // Add a service object. Throws on error, in which case ownership of the\n  // object is retained by the caller.\n  ASIO_DECL void do_add_service(\n      const execution_context::service::key& key,\n      execution_context::service* new_service);\n\n  // Check whether a service object with the specified key already exists.\n  ASIO_DECL bool do_has_service(\n      const execution_context::service::key& key) const;\n\n  // Mutex to protect access to internal data.\n  mutable asio::detail::mutex mutex_;\n\n  // The owner of this service registry and the services it contains.\n  execution_context& owner_;\n\n  // The first service in the list of contained services.\n  execution_context::service* first_service_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/service_registry.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/service_registry.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_SERVICE_REGISTRY_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/signal_blocker.hpp",
    "content": "//\n// detail/signal_blocker.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SIGNAL_BLOCKER_HPP\n#define ASIO_DETAIL_SIGNAL_BLOCKER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \\\n  || defined(ASIO_WINDOWS_RUNTIME) \\\n  || defined(__CYGWIN__) || defined(__SYMBIAN32__)\n# include \"asio/detail/null_signal_blocker.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_signal_blocker.hpp\"\n#else\n# error Only Windows and POSIX are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \\\n  || defined(ASIO_WINDOWS_RUNTIME) \\\n  || defined(__CYGWIN__) || defined(__SYMBIAN32__)\ntypedef null_signal_blocker signal_blocker;\n#elif defined(ASIO_HAS_PTHREADS)\ntypedef posix_signal_blocker signal_blocker;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_SIGNAL_BLOCKER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/signal_handler.hpp",
    "content": "//\n// detail/signal_handler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SIGNAL_HANDLER_HPP\n#define ASIO_DETAIL_SIGNAL_HANDLER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_work.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/signal_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass signal_handler : public signal_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(signal_handler);\n\n  signal_handler(Handler& h, const IoExecutor& io_ex)\n    : signal_op(&signal_handler::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(h)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    signal_handler* h(static_cast<signal_handler*>(base));\n    ptr p = { asio::detail::addressof(h->handler_), h, h };\n    handler_work<Handler, IoExecutor> w(h->handler_, h->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*h));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, int>\n      handler(h->handler_, h->ec_, h->signal_number_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SIGNAL_HANDLER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/signal_init.hpp",
    "content": "//\n// detail/signal_init.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SIGNAL_INIT_HPP\n#define ASIO_DETAIL_SIGNAL_INIT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include <csignal>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <int Signal = SIGPIPE>\nclass signal_init\n{\npublic:\n  // Constructor.\n  signal_init()\n  {\n    std::signal(Signal, SIG_IGN);\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_SIGNAL_INIT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/signal_op.hpp",
    "content": "//\n// detail/signal_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SIGNAL_OP_HPP\n#define ASIO_DETAIL_SIGNAL_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass signal_op\n  : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\n  // The signal number to be passed to the completion handler.\n  int signal_number_;\n\nprotected:\n  signal_op(func_type func)\n    : operation(func),\n      signal_number_(0)\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SIGNAL_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/signal_set_service.hpp",
    "content": "//\n// detail/signal_set_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP\n#define ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstddef>\n#include <signal.h>\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/signal_handler.hpp\"\n#include \"asio/detail/signal_op.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n# include \"asio/detail/reactor.hpp\"\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(NSIG) && (NSIG > 0)\nenum { max_signal_number = NSIG };\n#else\nenum { max_signal_number = 128 };\n#endif\n\nextern ASIO_DECL struct signal_state* get_signal_state();\n\nextern \"C\" ASIO_DECL void asio_signal_handler(int signal_number);\n\nclass signal_set_service :\n  public execution_context_service_base<signal_set_service>\n{\npublic:\n  // Type used for tracking an individual signal registration.\n  class registration\n  {\n  public:\n    // Default constructor.\n    registration()\n      : signal_number_(0),\n        queue_(0),\n        undelivered_(0),\n        next_in_table_(0),\n        prev_in_table_(0),\n        next_in_set_(0)\n    {\n    }\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class signal_set_service;\n\n    // The signal number that is registered.\n    int signal_number_;\n\n    // The waiting signal handlers.\n    op_queue<signal_op>* queue_;\n\n    // The number of undelivered signals.\n    std::size_t undelivered_;\n\n    // Pointers to adjacent registrations in the registrations_ table.\n    registration* next_in_table_;\n    registration* prev_in_table_;\n\n    // Link to next registration in the signal set.\n    registration* next_in_set_;\n  };\n\n  // The implementation type of the signal_set.\n  class implementation_type\n  {\n  public:\n    // Default constructor.\n    implementation_type()\n      : signals_(0)\n    {\n    }\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class signal_set_service;\n\n    // The pending signal handlers.\n    op_queue<signal_op> queue_;\n\n    // Linked list of registered signals.\n    registration* signals_;\n  };\n\n  // Constructor.\n  ASIO_DECL signal_set_service(execution_context& context);\n\n  // Destructor.\n  ASIO_DECL ~signal_set_service();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Perform fork-related housekeeping.\n  ASIO_DECL void notify_fork(\n      asio::execution_context::fork_event fork_ev);\n\n  // Construct a new signal_set implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Destroy a signal_set implementation.\n  ASIO_DECL void destroy(implementation_type& impl);\n\n  // Add a signal to a signal_set.\n  ASIO_DECL asio::error_code add(implementation_type& impl,\n      int signal_number, asio::error_code& ec);\n\n  // Remove a signal to a signal_set.\n  ASIO_DECL asio::error_code remove(implementation_type& impl,\n      int signal_number, asio::error_code& ec);\n\n  // Remove all signals from a signal_set.\n  ASIO_DECL asio::error_code clear(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Cancel all operations associated with the signal set.\n  ASIO_DECL asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Start an asynchronous operation to wait for a signal to be delivered.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(implementation_type& impl,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef signal_handler<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"signal_set\", &impl, 0, \"async_wait\"));\n\n    start_wait_op(impl, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Deliver notification that a particular signal occurred.\n  ASIO_DECL static void deliver_signal(int signal_number);\n\nprivate:\n  // Helper function to add a service to the global signal state.\n  ASIO_DECL static void add_service(signal_set_service* service);\n\n  // Helper function to remove a service from the global signal state.\n  ASIO_DECL static void remove_service(signal_set_service* service);\n\n  // Helper function to create the pipe descriptors.\n  ASIO_DECL static void open_descriptors();\n\n  // Helper function to close the pipe descriptors.\n  ASIO_DECL static void close_descriptors();\n\n  // Helper function to start a wait operation.\n  ASIO_DECL void start_wait_op(implementation_type& impl, signal_op* op);\n\n  // The scheduler used for dispatching handlers.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n#if !defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_RUNTIME) \\\n  && !defined(__CYGWIN__)\n  // The type used for registering for pipe reactor notifications.\n  class pipe_read_op;\n\n  // The reactor used for waiting for pipe readiness.\n  reactor& reactor_;\n\n  // The per-descriptor reactor data used for the pipe.\n  reactor::per_descriptor_data reactor_data_;\n#endif // !defined(ASIO_WINDOWS)\n       //   && !defined(ASIO_WINDOWS_RUNTIME)\n       //   && !defined(__CYGWIN__)\n\n  // A mapping from signal number to the registered signal sets.\n  registration* registrations_[max_signal_number];\n\n  // Pointers to adjacent services in linked list.\n  signal_set_service* next_;\n  signal_set_service* prev_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/signal_set_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/socket_holder.hpp",
    "content": "//\n// detail/socket_holder.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_HOLDER_HPP\n#define ASIO_DETAIL_SOCKET_HOLDER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Implement the resource acquisition is initialisation idiom for sockets.\nclass socket_holder\n  : private noncopyable\n{\npublic:\n  // Construct as an uninitialised socket.\n  socket_holder()\n    : socket_(invalid_socket)\n  {\n  }\n\n  // Construct to take ownership of the specified socket.\n  explicit socket_holder(socket_type s)\n    : socket_(s)\n  {\n  }\n\n  // Destructor.\n  ~socket_holder()\n  {\n    if (socket_ != invalid_socket)\n    {\n      asio::error_code ec;\n      socket_ops::state_type state = 0;\n      socket_ops::close(socket_, state, true, ec);\n    }\n  }\n\n  // Get the underlying socket.\n  socket_type get() const\n  {\n    return socket_;\n  }\n\n  // Reset to an uninitialised socket.\n  void reset()\n  {\n    if (socket_ != invalid_socket)\n    {\n      asio::error_code ec;\n      socket_ops::state_type state = 0;\n      socket_ops::close(socket_, state, true, ec);\n      socket_ = invalid_socket;\n    }\n  }\n\n  // Reset to take ownership of the specified socket.\n  void reset(socket_type s)\n  {\n    reset();\n    socket_ = s;\n  }\n\n  // Release ownership of the socket.\n  socket_type release()\n  {\n    socket_type tmp = socket_;\n    socket_ = invalid_socket;\n    return tmp;\n  }\n\nprivate:\n  // The underlying socket.\n  socket_type socket_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SOCKET_HOLDER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/socket_ops.hpp",
    "content": "//\n// detail/socket_ops.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_OPS_HPP\n#define ASIO_DETAIL_SOCKET_OPS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/error_code.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace socket_ops {\n\n// Socket state bits.\nenum\n{\n  // The user wants a non-blocking socket.\n  user_set_non_blocking = 1,\n\n  // The socket has been set non-blocking.\n  internal_non_blocking = 2,\n\n  // Helper \"state\" used to determine whether the socket is non-blocking.\n  non_blocking = user_set_non_blocking | internal_non_blocking,\n\n  // User wants connection_aborted errors, which are disabled by default.\n  enable_connection_aborted = 4,\n\n  // The user set the linger option. Needs to be checked when closing.\n  user_set_linger = 8,\n\n  // The socket is stream-oriented.\n  stream_oriented = 16,\n\n  // The socket is datagram-oriented.\n  datagram_oriented = 32,\n\n  // The socket may have been dup()-ed.\n  possible_dup = 64\n};\n\ntypedef unsigned char state_type;\n\nstruct noop_deleter { void operator()(void*) {} };\ntypedef shared_ptr<void> shared_cancel_token_type;\ntypedef weak_ptr<void> weak_cancel_token_type;\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\nASIO_DECL socket_type accept(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec);\n\nASIO_DECL socket_type sync_accept(socket_type s,\n    state_type state, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_accept(socket_type s,\n    void* output_buffer, DWORD address_length,\n    socket_addr_type* addr, std::size_t* addrlen,\n    socket_type new_socket, asio::error_code& ec);\n\n#else // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_accept(socket_type s,\n    state_type state, socket_addr_type* addr, std::size_t* addrlen,\n    asio::error_code& ec, socket_type& new_socket);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL int bind(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec);\n\nASIO_DECL int close(socket_type s, state_type& state,\n    bool destruction, asio::error_code& ec);\n\nASIO_DECL bool set_user_non_blocking(socket_type s,\n    state_type& state, bool value, asio::error_code& ec);\n\nASIO_DECL bool set_internal_non_blocking(socket_type s,\n    state_type& state, bool value, asio::error_code& ec);\n\nASIO_DECL int shutdown(socket_type s,\n    int what, asio::error_code& ec);\n\nASIO_DECL int connect(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec);\n\nASIO_DECL void sync_connect(socket_type s, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_connect(socket_type s,\n    asio::error_code& ec);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_connect(socket_type s,\n    asio::error_code& ec);\n\nASIO_DECL int socketpair(int af, int type, int protocol,\n    socket_type sv[2], asio::error_code& ec);\n\nASIO_DECL bool sockatmark(socket_type s, asio::error_code& ec);\n\nASIO_DECL size_t available(socket_type s, asio::error_code& ec);\n\nASIO_DECL int listen(socket_type s,\n    int backlog, asio::error_code& ec);\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef WSABUF buf;\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef iovec buf;\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\nASIO_DECL void init_buf(buf& b, void* data, size_t size);\n\nASIO_DECL void init_buf(buf& b, const void* data, size_t size);\n\nASIO_DECL signed_size_type recv(socket_type s, buf* bufs,\n    size_t count, int flags, asio::error_code& ec);\n\nASIO_DECL size_t sync_recv(socket_type s, state_type state, buf* bufs,\n    size_t count, int flags, bool all_empty, asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_recv(state_type state,\n    const weak_cancel_token_type& cancel_token, bool all_empty,\n    asio::error_code& ec, size_t bytes_transferred);\n\n#else // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_recv(socket_type s,\n    buf* bufs, size_t count, int flags, bool is_stream,\n    asio::error_code& ec, size_t& bytes_transferred);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL signed_size_type recvfrom(socket_type s, buf* bufs,\n    size_t count, int flags, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec);\n\nASIO_DECL size_t sync_recvfrom(socket_type s, state_type state,\n    buf* bufs, size_t count, int flags, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_recvfrom(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec);\n\n#else // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_recvfrom(socket_type s,\n    buf* bufs, size_t count, int flags,\n    socket_addr_type* addr, std::size_t* addrlen,\n    asio::error_code& ec, size_t& bytes_transferred);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL signed_size_type recvmsg(socket_type s, buf* bufs,\n    size_t count, int in_flags, int& out_flags,\n    asio::error_code& ec);\n\nASIO_DECL size_t sync_recvmsg(socket_type s, state_type state,\n    buf* bufs, size_t count, int in_flags, int& out_flags,\n    asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_recvmsg(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec);\n\n#else // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_recvmsg(socket_type s,\n    buf* bufs, size_t count, int in_flags, int& out_flags,\n    asio::error_code& ec, size_t& bytes_transferred);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL signed_size_type send(socket_type s, const buf* bufs,\n    size_t count, int flags, asio::error_code& ec);\n\nASIO_DECL size_t sync_send(socket_type s, state_type state,\n    const buf* bufs, size_t count, int flags,\n    bool all_empty, asio::error_code& ec);\n\n#if defined(ASIO_HAS_IOCP)\n\nASIO_DECL void complete_iocp_send(\n    const weak_cancel_token_type& cancel_token,\n    asio::error_code& ec);\n\n#else // defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_send(socket_type s,\n    const buf* bufs, size_t count, int flags,\n    asio::error_code& ec, size_t& bytes_transferred);\n\n#endif // defined(ASIO_HAS_IOCP)\n\nASIO_DECL signed_size_type sendto(socket_type s, const buf* bufs,\n    size_t count, int flags, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec);\n\nASIO_DECL size_t sync_sendto(socket_type s, state_type state,\n    const buf* bufs, size_t count, int flags, const socket_addr_type* addr,\n    std::size_t addrlen, asio::error_code& ec);\n\n#if !defined(ASIO_HAS_IOCP)\n\nASIO_DECL bool non_blocking_sendto(socket_type s,\n    const buf* bufs, size_t count, int flags,\n    const socket_addr_type* addr, std::size_t addrlen,\n    asio::error_code& ec, size_t& bytes_transferred);\n\n#endif // !defined(ASIO_HAS_IOCP)\n\nASIO_DECL socket_type socket(int af, int type, int protocol,\n    asio::error_code& ec);\n\nASIO_DECL int setsockopt(socket_type s, state_type& state,\n    int level, int optname, const void* optval,\n    std::size_t optlen, asio::error_code& ec);\n\nASIO_DECL int getsockopt(socket_type s, state_type state,\n    int level, int optname, void* optval,\n    size_t* optlen, asio::error_code& ec);\n\nASIO_DECL int getpeername(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, bool cached, asio::error_code& ec);\n\nASIO_DECL int getsockname(socket_type s, socket_addr_type* addr,\n    std::size_t* addrlen, asio::error_code& ec);\n\nASIO_DECL int ioctl(socket_type s, state_type& state,\n    int cmd, ioctl_arg_type* arg, asio::error_code& ec);\n\nASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds,\n    fd_set* exceptfds, timeval* timeout, asio::error_code& ec);\n\nASIO_DECL int poll_read(socket_type s,\n    state_type state, int msec, asio::error_code& ec);\n\nASIO_DECL int poll_write(socket_type s,\n    state_type state, int msec, asio::error_code& ec);\n\nASIO_DECL int poll_error(socket_type s,\n    state_type state, int msec, asio::error_code& ec);\n\nASIO_DECL int poll_connect(socket_type s,\n    int msec, asio::error_code& ec);\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\nASIO_DECL const char* inet_ntop(int af, const void* src, char* dest,\n    size_t length, unsigned long scope_id, asio::error_code& ec);\n\nASIO_DECL int inet_pton(int af, const char* src, void* dest,\n    unsigned long* scope_id, asio::error_code& ec);\n\nASIO_DECL int gethostname(char* name,\n    int namelen, asio::error_code& ec);\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\nASIO_DECL asio::error_code getaddrinfo(const char* host,\n    const char* service, const addrinfo_type& hints,\n    addrinfo_type** result, asio::error_code& ec);\n\nASIO_DECL asio::error_code background_getaddrinfo(\n    const weak_cancel_token_type& cancel_token, const char* host,\n    const char* service, const addrinfo_type& hints,\n    addrinfo_type** result, asio::error_code& ec);\n\nASIO_DECL void freeaddrinfo(addrinfo_type* ai);\n\nASIO_DECL asio::error_code getnameinfo(\n    const socket_addr_type* addr, std::size_t addrlen,\n    char* host, std::size_t hostlen, char* serv,\n    std::size_t servlen, int flags, asio::error_code& ec);\n\nASIO_DECL asio::error_code sync_getnameinfo(\n    const socket_addr_type* addr, std::size_t addrlen,\n    char* host, std::size_t hostlen, char* serv,\n    std::size_t servlen, int sock_type, asio::error_code& ec);\n\nASIO_DECL asio::error_code background_getnameinfo(\n    const weak_cancel_token_type& cancel_token,\n    const socket_addr_type* addr, std::size_t addrlen,\n    char* host, std::size_t hostlen, char* serv,\n    std::size_t servlen, int sock_type, asio::error_code& ec);\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\nASIO_DECL u_long_type network_to_host_long(u_long_type value);\n\nASIO_DECL u_long_type host_to_network_long(u_long_type value);\n\nASIO_DECL u_short_type network_to_host_short(u_short_type value);\n\nASIO_DECL u_short_type host_to_network_short(u_short_type value);\n\n} // namespace socket_ops\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/socket_ops.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_SOCKET_OPS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/socket_option.hpp",
    "content": "//\n// detail/socket_option.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_OPTION_HPP\n#define ASIO_DETAIL_SOCKET_OPTION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <stdexcept>\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace socket_option {\n\n// Helper template for implementing boolean-based options.\ntemplate <int Level, int Name>\nclass boolean\n{\npublic:\n  // Default constructor.\n  boolean()\n    : value_(0)\n  {\n  }\n\n  // Construct with a specific option value.\n  explicit boolean(bool v)\n    : value_(v ? 1 : 0)\n  {\n  }\n\n  // Set the current value of the boolean.\n  boolean& operator=(bool v)\n  {\n    value_ = v ? 1 : 0;\n    return *this;\n  }\n\n  // Get the current value of the boolean.\n  bool value() const\n  {\n    return !!value_;\n  }\n\n  // Convert to bool.\n  operator bool() const\n  {\n    return !!value_;\n  }\n\n  // Test for false.\n  bool operator!() const\n  {\n    return !value_;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol&) const\n  {\n    return Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol&) const\n  {\n    return Name;\n  }\n\n  // Get the address of the boolean data.\n  template <typename Protocol>\n  int* data(const Protocol&)\n  {\n    return &value_;\n  }\n\n  // Get the address of the boolean data.\n  template <typename Protocol>\n  const int* data(const Protocol&) const\n  {\n    return &value_;\n  }\n\n  // Get the size of the boolean data.\n  template <typename Protocol>\n  std::size_t size(const Protocol&) const\n  {\n    return sizeof(value_);\n  }\n\n  // Set the size of the boolean data.\n  template <typename Protocol>\n  void resize(const Protocol&, std::size_t s)\n  {\n    // On some platforms (e.g. Windows Vista), the getsockopt function will\n    // return the size of a boolean socket option as one byte, even though a\n    // four byte integer was passed in.\n    switch (s)\n    {\n    case sizeof(char):\n      value_ = *reinterpret_cast<char*>(&value_) ? 1 : 0;\n      break;\n    case sizeof(value_):\n      break;\n    default:\n      {\n        std::length_error ex(\"boolean socket option resize\");\n        asio::detail::throw_exception(ex);\n      }\n    }\n  }\n\nprivate:\n  int value_;\n};\n\n// Helper template for implementing integer options.\ntemplate <int Level, int Name>\nclass integer\n{\npublic:\n  // Default constructor.\n  integer()\n    : value_(0)\n  {\n  }\n\n  // Construct with a specific option value.\n  explicit integer(int v)\n    : value_(v)\n  {\n  }\n\n  // Set the value of the int option.\n  integer& operator=(int v)\n  {\n    value_ = v;\n    return *this;\n  }\n\n  // Get the current value of the int option.\n  int value() const\n  {\n    return value_;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol&) const\n  {\n    return Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol&) const\n  {\n    return Name;\n  }\n\n  // Get the address of the int data.\n  template <typename Protocol>\n  int* data(const Protocol&)\n  {\n    return &value_;\n  }\n\n  // Get the address of the int data.\n  template <typename Protocol>\n  const int* data(const Protocol&) const\n  {\n    return &value_;\n  }\n\n  // Get the size of the int data.\n  template <typename Protocol>\n  std::size_t size(const Protocol&) const\n  {\n    return sizeof(value_);\n  }\n\n  // Set the size of the int data.\n  template <typename Protocol>\n  void resize(const Protocol&, std::size_t s)\n  {\n    if (s != sizeof(value_))\n    {\n      std::length_error ex(\"integer socket option resize\");\n      asio::detail::throw_exception(ex);\n    }\n  }\n\nprivate:\n  int value_;\n};\n\n// Helper template for implementing linger options.\ntemplate <int Level, int Name>\nclass linger\n{\npublic:\n  // Default constructor.\n  linger()\n  {\n    value_.l_onoff = 0;\n    value_.l_linger = 0;\n  }\n\n  // Construct with specific option values.\n  linger(bool e, int t)\n  {\n    enabled(e);\n    timeout ASIO_PREVENT_MACRO_SUBSTITUTION(t);\n  }\n\n  // Set the value for whether linger is enabled.\n  void enabled(bool value)\n  {\n    value_.l_onoff = value ? 1 : 0;\n  }\n\n  // Get the value for whether linger is enabled.\n  bool enabled() const\n  {\n    return value_.l_onoff != 0;\n  }\n\n  // Set the value for the linger timeout.\n  void timeout ASIO_PREVENT_MACRO_SUBSTITUTION(int value)\n  {\n#if defined(WIN32)\n    value_.l_linger = static_cast<u_short>(value);\n#else\n    value_.l_linger = value;\n#endif\n  }\n\n  // Get the value for the linger timeout.\n  int timeout ASIO_PREVENT_MACRO_SUBSTITUTION() const\n  {\n    return static_cast<int>(value_.l_linger);\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol&) const\n  {\n    return Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol&) const\n  {\n    return Name;\n  }\n\n  // Get the address of the linger data.\n  template <typename Protocol>\n  detail::linger_type* data(const Protocol&)\n  {\n    return &value_;\n  }\n\n  // Get the address of the linger data.\n  template <typename Protocol>\n  const detail::linger_type* data(const Protocol&) const\n  {\n    return &value_;\n  }\n\n  // Get the size of the linger data.\n  template <typename Protocol>\n  std::size_t size(const Protocol&) const\n  {\n    return sizeof(value_);\n  }\n\n  // Set the size of the int data.\n  template <typename Protocol>\n  void resize(const Protocol&, std::size_t s)\n  {\n    if (s != sizeof(value_))\n    {\n      std::length_error ex(\"linger socket option resize\");\n      asio::detail::throw_exception(ex);\n    }\n  }\n\nprivate:\n  detail::linger_type value_;\n};\n\n} // namespace socket_option\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SOCKET_OPTION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/socket_select_interrupter.hpp",
    "content": "//\n// detail/socket_select_interrupter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP\n#define ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_WINDOWS_RUNTIME)\n\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(__SYMBIAN32__)\n\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass socket_select_interrupter\n{\npublic:\n  // Constructor.\n  ASIO_DECL socket_select_interrupter();\n\n  // Destructor.\n  ASIO_DECL ~socket_select_interrupter();\n\n  // Recreate the interrupter's descriptors. Used after a fork.\n  ASIO_DECL void recreate();\n\n  // Interrupt the select call.\n  ASIO_DECL void interrupt();\n\n  // Reset the select interrupt. Returns true if the call was interrupted.\n  ASIO_DECL bool reset();\n\n  // Get the read descriptor to be passed to select.\n  socket_type read_descriptor() const\n  {\n    return read_descriptor_;\n  }\n\nprivate:\n  // Open the descriptors. Throws on error.\n  ASIO_DECL void open_descriptors();\n\n  // Close the descriptors.\n  ASIO_DECL void close_descriptors();\n\n  // The read end of a connection used to interrupt the select call. This file\n  // descriptor is passed to select such that when it is time to stop, a single\n  // byte will be written on the other end of the connection and this\n  // descriptor will become readable.\n  socket_type read_descriptor_;\n\n  // The write end of a connection used to interrupt the select call. A single\n  // byte may be written to this to wake up the select which is waiting for the\n  // other end to become readable.\n  socket_type write_descriptor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/socket_select_interrupter.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n       // || defined(__CYGWIN__)\n       // || defined(__SYMBIAN32__)\n\n#endif // !defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/socket_types.hpp",
    "content": "//\n// detail/socket_types.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOCKET_TYPES_HPP\n#define ASIO_DETAIL_SOCKET_TYPES_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n// Empty.\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# if defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_)\n#  error WinSock.h has already been included\n# endif // defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_)\n# if defined(__BORLANDC__)\n#  include <stdlib.h> // Needed for __errno\n#  if !defined(_WSPIAPI_H_)\n#   define _WSPIAPI_H_\n#   define ASIO_WSPIAPI_H_DEFINED\n#  endif // !defined(_WSPIAPI_H_)\n# endif // defined(__BORLANDC__)\n# include <winsock2.h>\n# include <ws2tcpip.h>\n# if defined(WINAPI_FAMILY)\n#  if ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0)\n#   include <windows.h>\n#  endif // ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0)\n# endif // defined(WINAPI_FAMILY)\n# if !defined(ASIO_WINDOWS_APP)\n#  include <mswsock.h>\n# endif // !defined(ASIO_WINDOWS_APP)\n# if defined(ASIO_WSPIAPI_H_DEFINED)\n#  undef _WSPIAPI_H_\n#  undef ASIO_WSPIAPI_H_DEFINED\n# endif // defined(ASIO_WSPIAPI_H_DEFINED)\n# if !defined(ASIO_NO_DEFAULT_LINKED_LIBS)\n#  if defined(UNDER_CE)\n#   pragma comment(lib, \"ws2.lib\")\n#  elif defined(_MSC_VER) || defined(__BORLANDC__)\n#   pragma comment(lib, \"ws2_32.lib\")\n#   if !defined(ASIO_WINDOWS_APP)\n#    pragma comment(lib, \"mswsock.lib\")\n#   endif // !defined(ASIO_WINDOWS_APP)\n#  endif // defined(_MSC_VER) || defined(__BORLANDC__)\n# endif // !defined(ASIO_NO_DEFAULT_LINKED_LIBS)\n# include \"asio/detail/old_win_sdk_compat.hpp\"\n#else\n# include <sys/ioctl.h>\n# if (defined(__MACH__) && defined(__APPLE__)) \\\n   || defined(__FreeBSD__) || defined(__NetBSD__) \\\n   || defined(__OpenBSD__) || defined(__linux__) \\\n   || defined(__EMSCRIPTEN__)\n#  include <poll.h>\n# elif !defined(__SYMBIAN32__)\n#  include <sys/poll.h>\n# endif\n# include <sys/types.h>\n# include <sys/stat.h>\n# include <fcntl.h>\n# if defined(__hpux)\n#  include <sys/time.h>\n# endif\n# if !defined(__hpux) || defined(__SELECT)\n#  include <sys/select.h>\n# endif\n# include <sys/socket.h>\n# include <sys/uio.h>\n# include <sys/un.h>\n# include <netinet/in.h>\n# if !defined(__SYMBIAN32__)\n#  include <netinet/tcp.h>\n# endif\n# include <arpa/inet.h>\n# include <netdb.h>\n# include <net/if.h>\n# include <limits.h>\n# if defined(__sun)\n#  include <sys/filio.h>\n#  include <sys/sockio.h>\n# endif\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_WINDOWS_RUNTIME)\nconst int max_addr_v4_str_len = 256;\nconst int max_addr_v6_str_len = 256;\ntypedef unsigned __int32 u_long_type;\ntypedef unsigned __int16 u_short_type;\nstruct in4_addr_type { u_long_type s_addr; };\nstruct in4_mreq_type { in4_addr_type imr_multiaddr, imr_interface; };\nstruct in6_addr_type { unsigned char s6_addr[16]; };\nstruct in6_mreq_type { in6_addr_type ipv6mr_multiaddr;\n  unsigned long ipv6mr_interface; };\nstruct socket_addr_type { int sa_family; };\nstruct sockaddr_in4_type { int sin_family;\n  in4_addr_type sin_addr; u_short_type sin_port; };\nstruct sockaddr_in6_type { int sin6_family;\n  in6_addr_type sin6_addr; u_short_type sin6_port;\n  u_long_type sin6_flowinfo; u_long_type sin6_scope_id; };\nstruct sockaddr_storage_type { int ss_family;\n  unsigned char ss_bytes[128 - sizeof(int)]; };\nstruct addrinfo_type { int ai_flags;\n  int ai_family, ai_socktype, ai_protocol;\n  int ai_addrlen; const void* ai_addr;\n  const char* ai_canonname; addrinfo_type* ai_next; };\nstruct linger_type { u_short_type l_onoff, l_linger; };\ntypedef u_long_type ioctl_arg_type;\ntypedef int signed_size_type;\n# define ASIO_OS_DEF(c) ASIO_OS_DEF_##c\n# define ASIO_OS_DEF_AF_UNSPEC 0\n# define ASIO_OS_DEF_AF_INET 2\n# define ASIO_OS_DEF_AF_INET6 23\n# define ASIO_OS_DEF_SOCK_STREAM 1\n# define ASIO_OS_DEF_SOCK_DGRAM 2\n# define ASIO_OS_DEF_SOCK_RAW 3\n# define ASIO_OS_DEF_SOCK_SEQPACKET 5\n# define ASIO_OS_DEF_IPPROTO_IP 0\n# define ASIO_OS_DEF_IPPROTO_IPV6 41\n# define ASIO_OS_DEF_IPPROTO_TCP 6\n# define ASIO_OS_DEF_IPPROTO_UDP 17\n# define ASIO_OS_DEF_IPPROTO_ICMP 1\n# define ASIO_OS_DEF_IPPROTO_ICMPV6 58\n# define ASIO_OS_DEF_FIONBIO 1\n# define ASIO_OS_DEF_FIONREAD 2\n# define ASIO_OS_DEF_INADDR_ANY 0\n# define ASIO_OS_DEF_MSG_OOB 0x1\n# define ASIO_OS_DEF_MSG_PEEK 0x2\n# define ASIO_OS_DEF_MSG_DONTROUTE 0x4\n# define ASIO_OS_DEF_MSG_EOR 0 // Not supported.\n# define ASIO_OS_DEF_SHUT_RD 0x0\n# define ASIO_OS_DEF_SHUT_WR 0x1\n# define ASIO_OS_DEF_SHUT_RDWR 0x2\n# define ASIO_OS_DEF_SOMAXCONN 0x7fffffff\n# define ASIO_OS_DEF_SOL_SOCKET 0xffff\n# define ASIO_OS_DEF_SO_BROADCAST 0x20\n# define ASIO_OS_DEF_SO_DEBUG 0x1\n# define ASIO_OS_DEF_SO_DONTROUTE 0x10\n# define ASIO_OS_DEF_SO_KEEPALIVE 0x8\n# define ASIO_OS_DEF_SO_LINGER 0x80\n# define ASIO_OS_DEF_SO_OOBINLINE 0x100\n# define ASIO_OS_DEF_SO_SNDBUF 0x1001\n# define ASIO_OS_DEF_SO_RCVBUF 0x1002\n# define ASIO_OS_DEF_SO_SNDLOWAT 0x1003\n# define ASIO_OS_DEF_SO_RCVLOWAT 0x1004\n# define ASIO_OS_DEF_SO_REUSEADDR 0x4\n# define ASIO_OS_DEF_TCP_NODELAY 0x1\n# define ASIO_OS_DEF_IP_MULTICAST_IF 2\n# define ASIO_OS_DEF_IP_MULTICAST_TTL 3\n# define ASIO_OS_DEF_IP_MULTICAST_LOOP 4\n# define ASIO_OS_DEF_IP_ADD_MEMBERSHIP 5\n# define ASIO_OS_DEF_IP_DROP_MEMBERSHIP 6\n# define ASIO_OS_DEF_IP_TTL 7\n# define ASIO_OS_DEF_IPV6_UNICAST_HOPS 4\n# define ASIO_OS_DEF_IPV6_MULTICAST_IF 9\n# define ASIO_OS_DEF_IPV6_MULTICAST_HOPS 10\n# define ASIO_OS_DEF_IPV6_MULTICAST_LOOP 11\n# define ASIO_OS_DEF_IPV6_JOIN_GROUP 12\n# define ASIO_OS_DEF_IPV6_LEAVE_GROUP 13\n# define ASIO_OS_DEF_AI_CANONNAME 0x2\n# define ASIO_OS_DEF_AI_PASSIVE 0x1\n# define ASIO_OS_DEF_AI_NUMERICHOST 0x4\n# define ASIO_OS_DEF_AI_NUMERICSERV 0x8\n# define ASIO_OS_DEF_AI_V4MAPPED 0x800\n# define ASIO_OS_DEF_AI_ALL 0x100\n# define ASIO_OS_DEF_AI_ADDRCONFIG 0x400\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\ntypedef SOCKET socket_type;\nconst SOCKET invalid_socket = INVALID_SOCKET;\nconst int socket_error_retval = SOCKET_ERROR;\nconst int max_addr_v4_str_len = 256;\nconst int max_addr_v6_str_len = 256;\ntypedef sockaddr socket_addr_type;\ntypedef in_addr in4_addr_type;\ntypedef ip_mreq in4_mreq_type;\ntypedef sockaddr_in sockaddr_in4_type;\n# if defined(ASIO_HAS_OLD_WIN_SDK)\ntypedef in6_addr_emulation in6_addr_type;\ntypedef ipv6_mreq_emulation in6_mreq_type;\ntypedef sockaddr_in6_emulation sockaddr_in6_type;\ntypedef sockaddr_storage_emulation sockaddr_storage_type;\ntypedef addrinfo_emulation addrinfo_type;\n# else\ntypedef in6_addr in6_addr_type;\ntypedef ipv6_mreq in6_mreq_type;\ntypedef sockaddr_in6 sockaddr_in6_type;\ntypedef sockaddr_storage sockaddr_storage_type;\ntypedef addrinfo addrinfo_type;\n# endif\ntypedef ::linger linger_type;\ntypedef unsigned long ioctl_arg_type;\ntypedef u_long u_long_type;\ntypedef u_short u_short_type;\ntypedef int signed_size_type;\n# define ASIO_OS_DEF(c) ASIO_OS_DEF_##c\n# define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC\n# define ASIO_OS_DEF_AF_INET AF_INET\n# define ASIO_OS_DEF_AF_INET6 AF_INET6\n# define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM\n# define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM\n# define ASIO_OS_DEF_SOCK_RAW SOCK_RAW\n# define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET\n# define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP\n# define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6\n# define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP\n# define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP\n# define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP\n# define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6\n# define ASIO_OS_DEF_FIONBIO FIONBIO\n# define ASIO_OS_DEF_FIONREAD FIONREAD\n# define ASIO_OS_DEF_INADDR_ANY INADDR_ANY\n# define ASIO_OS_DEF_MSG_OOB MSG_OOB\n# define ASIO_OS_DEF_MSG_PEEK MSG_PEEK\n# define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE\n# define ASIO_OS_DEF_MSG_EOR 0 // Not supported on Windows.\n# define ASIO_OS_DEF_SHUT_RD SD_RECEIVE\n# define ASIO_OS_DEF_SHUT_WR SD_SEND\n# define ASIO_OS_DEF_SHUT_RDWR SD_BOTH\n# define ASIO_OS_DEF_SOMAXCONN SOMAXCONN\n# define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET\n# define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST\n# define ASIO_OS_DEF_SO_DEBUG SO_DEBUG\n# define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE\n# define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE\n# define ASIO_OS_DEF_SO_LINGER SO_LINGER\n# define ASIO_OS_DEF_SO_OOBINLINE SO_OOBINLINE\n# define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF\n# define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF\n# define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT\n# define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT\n# define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR\n# define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY\n# define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF\n# define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL\n# define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP\n# define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP\n# define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP\n# define ASIO_OS_DEF_IP_TTL IP_TTL\n# define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS\n# define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF\n# define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS\n# define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP\n# define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP\n# define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP\n# define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME\n# define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE\n# define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST\n# if defined(AI_NUMERICSERV)\n#  define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV\n# else\n#  define ASIO_OS_DEF_AI_NUMERICSERV 0\n# endif\n# if defined(AI_V4MAPPED)\n#  define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED\n# else\n#  define ASIO_OS_DEF_AI_V4MAPPED 0\n# endif\n# if defined(AI_ALL)\n#  define ASIO_OS_DEF_AI_ALL AI_ALL\n# else\n#  define ASIO_OS_DEF_AI_ALL 0\n# endif\n# if defined(AI_ADDRCONFIG)\n#  define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG\n# else\n#  define ASIO_OS_DEF_AI_ADDRCONFIG 0\n# endif\n# if defined (_WIN32_WINNT)\nconst int max_iov_len = 64;\n# else\nconst int max_iov_len = 16;\n# endif\n#else\ntypedef int socket_type;\nconst int invalid_socket = -1;\nconst int socket_error_retval = -1;\nconst int max_addr_v4_str_len = INET_ADDRSTRLEN;\n#if defined(INET6_ADDRSTRLEN)\nconst int max_addr_v6_str_len = INET6_ADDRSTRLEN + 1 + IF_NAMESIZE;\n#else // defined(INET6_ADDRSTRLEN)\nconst int max_addr_v6_str_len = 256;\n#endif // defined(INET6_ADDRSTRLEN)\ntypedef sockaddr socket_addr_type;\ntypedef in_addr in4_addr_type;\n# if defined(__hpux)\n// HP-UX doesn't provide ip_mreq when _XOPEN_SOURCE_EXTENDED is defined.\nstruct in4_mreq_type\n{\n  struct in_addr imr_multiaddr;\n  struct in_addr imr_interface;\n};\n# else\ntypedef ip_mreq in4_mreq_type;\n# endif\ntypedef sockaddr_in sockaddr_in4_type;\ntypedef in6_addr in6_addr_type;\ntypedef ipv6_mreq in6_mreq_type;\ntypedef sockaddr_in6 sockaddr_in6_type;\ntypedef sockaddr_storage sockaddr_storage_type;\ntypedef sockaddr_un sockaddr_un_type;\ntypedef addrinfo addrinfo_type;\ntypedef ::linger linger_type;\ntypedef int ioctl_arg_type;\ntypedef uint32_t u_long_type;\ntypedef uint16_t u_short_type;\n#if defined(ASIO_HAS_SSIZE_T)\ntypedef ssize_t signed_size_type;\n#else // defined(ASIO_HAS_SSIZE_T)\ntypedef int signed_size_type;\n#endif // defined(ASIO_HAS_SSIZE_T)\n# define ASIO_OS_DEF(c) ASIO_OS_DEF_##c\n# define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC\n# define ASIO_OS_DEF_AF_INET AF_INET\n# define ASIO_OS_DEF_AF_INET6 AF_INET6\n# define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM\n# define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM\n# define ASIO_OS_DEF_SOCK_RAW SOCK_RAW\n# define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET\n# define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP\n# define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6\n# define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP\n# define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP\n# define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP\n# define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6\n# define ASIO_OS_DEF_FIONBIO FIONBIO\n# define ASIO_OS_DEF_FIONREAD FIONREAD\n# define ASIO_OS_DEF_INADDR_ANY INADDR_ANY\n# define ASIO_OS_DEF_MSG_OOB MSG_OOB\n# define ASIO_OS_DEF_MSG_PEEK MSG_PEEK\n# define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE\n# define ASIO_OS_DEF_MSG_EOR MSG_EOR\n# define ASIO_OS_DEF_SHUT_RD SHUT_RD\n# define ASIO_OS_DEF_SHUT_WR SHUT_WR\n# define ASIO_OS_DEF_SHUT_RDWR SHUT_RDWR\n# define ASIO_OS_DEF_SOMAXCONN SOMAXCONN\n# define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET\n# define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST\n# define ASIO_OS_DEF_SO_DEBUG SO_DEBUG\n# define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE\n# define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE\n# define ASIO_OS_DEF_SO_LINGER SO_LINGER\n# define ASIO_OS_DEF_SO_OOBINLINE SO_OOBINLINE\n# define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF\n# define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF\n# define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT\n# define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT\n# define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR\n# define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY\n# define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF\n# define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL\n# define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP\n# define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP\n# define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP\n# define ASIO_OS_DEF_IP_TTL IP_TTL\n# define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS\n# define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF\n# define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS\n# define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP\n# define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP\n# define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP\n# define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME\n# define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE\n# define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST\n# if defined(AI_NUMERICSERV)\n#  define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV\n# else\n#  define ASIO_OS_DEF_AI_NUMERICSERV 0\n# endif\n// Note: QNX Neutrino 6.3 defines AI_V4MAPPED, AI_ALL and AI_ADDRCONFIG but\n// does not implement them. Therefore they are specifically excluded here.\n# if defined(AI_V4MAPPED) && !defined(__QNXNTO__)\n#  define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED\n# else\n#  define ASIO_OS_DEF_AI_V4MAPPED 0\n# endif\n# if defined(AI_ALL) && !defined(__QNXNTO__)\n#  define ASIO_OS_DEF_AI_ALL AI_ALL\n# else\n#  define ASIO_OS_DEF_AI_ALL 0\n# endif\n# if defined(AI_ADDRCONFIG) && !defined(__QNXNTO__)\n#  define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG\n# else\n#  define ASIO_OS_DEF_AI_ADDRCONFIG 0\n# endif\n# if defined(IOV_MAX)\nconst int max_iov_len = IOV_MAX;\n# else\n// POSIX platforms are not required to define IOV_MAX.\nconst int max_iov_len = 16;\n# endif\n#endif\nconst int custom_socket_option_level = 0xA5100000;\nconst int enable_connection_aborted_option = 1;\nconst int always_fail_option = 2;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_SOCKET_TYPES_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/solaris_fenced_block.hpp",
    "content": "//\n// detail/solaris_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(__sun)\n\n#include <atomic.h>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass solaris_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit solaris_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit solaris_fenced_block(full_t)\n  {\n    membar_consumer();\n  }\n\n  // Destructor.\n  ~solaris_fenced_block()\n  {\n    membar_producer();\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(__sun)\n\n#endif // ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/static_mutex.hpp",
    "content": "//\n// detail/static_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STATIC_MUTEX_HPP\n#define ASIO_DETAIL_STATIC_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_static_mutex.hpp\"\n#elif defined(ASIO_WINDOWS)\n# include \"asio/detail/win_static_mutex.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_static_mutex.hpp\"\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n# include \"asio/detail/std_static_mutex.hpp\"\n#else\n# error Only Windows and POSIX are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS)\ntypedef null_static_mutex static_mutex;\n# define ASIO_STATIC_MUTEX_INIT ASIO_NULL_STATIC_MUTEX_INIT\n#elif defined(ASIO_WINDOWS)\ntypedef win_static_mutex static_mutex;\n# define ASIO_STATIC_MUTEX_INIT ASIO_WIN_STATIC_MUTEX_INIT\n#elif defined(ASIO_HAS_PTHREADS)\ntypedef posix_static_mutex static_mutex;\n# define ASIO_STATIC_MUTEX_INIT ASIO_POSIX_STATIC_MUTEX_INIT\n#elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\ntypedef std_static_mutex static_mutex;\n# define ASIO_STATIC_MUTEX_INIT ASIO_STD_STATIC_MUTEX_INIT\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_STATIC_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_event.hpp",
    "content": "//\n// detail/std_event.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_EVENT_HPP\n#define ASIO_DETAIL_STD_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#include <chrono>\n#include <condition_variable>\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass std_event\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  std_event()\n    : state_(0)\n  {\n  }\n\n  // Destructor.\n  ~std_event()\n  {\n  }\n\n  // Signal the event. (Retained for backward compatibility.)\n  template <typename Lock>\n  void signal(Lock& lock)\n  {\n    this->signal_all(lock);\n  }\n\n  // Signal all waiters.\n  template <typename Lock>\n  void signal_all(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    state_ |= 1;\n    cond_.notify_all();\n  }\n\n  // Unlock the mutex and signal one waiter.\n  template <typename Lock>\n  void unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    bool have_waiters = (state_ > 1);\n    lock.unlock();\n    if (have_waiters)\n      cond_.notify_one();\n  }\n\n  // If there's a waiter, unlock the mutex and signal it.\n  template <typename Lock>\n  bool maybe_unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    if (state_ > 1)\n    {\n      lock.unlock();\n      cond_.notify_one();\n      return true;\n    }\n    return false;\n  }\n\n  // Reset the event.\n  template <typename Lock>\n  void clear(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    state_ &= ~std::size_t(1);\n  }\n\n  // Wait for the event to become signalled.\n  template <typename Lock>\n  void wait(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    unique_lock_adapter u_lock(lock);\n    while ((state_ & 1) == 0)\n    {\n      waiter w(state_);\n      cond_.wait(u_lock.unique_lock_);\n    }\n  }\n\n  // Timed wait for the event to become signalled.\n  template <typename Lock>\n  bool wait_for_usec(Lock& lock, long usec)\n  {\n    ASIO_ASSERT(lock.locked());\n    unique_lock_adapter u_lock(lock);\n    if ((state_ & 1) == 0)\n    {\n      waiter w(state_);\n      cond_.wait_for(u_lock.unique_lock_, std::chrono::microseconds(usec));\n    }\n    return (state_ & 1) != 0;\n  }\n\nprivate:\n  // Helper class to temporarily adapt a scoped_lock into a unique_lock so that\n  // it can be passed to std::condition_variable::wait().\n  struct unique_lock_adapter\n  {\n    template <typename Lock>\n    explicit unique_lock_adapter(Lock& lock)\n      : unique_lock_(lock.mutex().mutex_, std::adopt_lock)\n    {\n    }\n\n    ~unique_lock_adapter()\n    {\n      unique_lock_.release();\n    }\n\n    std::unique_lock<std::mutex> unique_lock_;\n  };\n\n  // Helper to increment and decrement the state to track outstanding waiters.\n  class waiter\n  {\n  public:\n    explicit waiter(std::size_t& state)\n      : state_(state)\n    {\n      state_ += 2;\n    }\n\n    ~waiter()\n    {\n      state_ -= 2;\n    }\n\n  private:\n    std::size_t& state_;\n  };\n\n  std::condition_variable cond_;\n  std::size_t state_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#endif // ASIO_DETAIL_STD_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_fenced_block.hpp",
    "content": "//\n// detail/std_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_STD_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_ATOMIC)\n\n#include <atomic>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass std_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit std_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit std_fenced_block(full_t)\n  {\n    std::atomic_thread_fence(std::memory_order_acquire);\n  }\n\n  // Destructor.\n  ~std_fenced_block()\n  {\n    std::atomic_thread_fence(std::memory_order_release);\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_ATOMIC)\n\n#endif // ASIO_DETAIL_STD_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_global.hpp",
    "content": "//\n// detail/std_global.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_GLOBAL_HPP\n#define ASIO_DETAIL_STD_GLOBAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_CALL_ONCE)\n\n#include <exception>\n#include <mutex>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct std_global_impl\n{\n  // Helper function to perform initialisation.\n  static void do_init()\n  {\n    instance_.ptr_ = new T;\n  }\n\n  // Destructor automatically cleans up the global.\n  ~std_global_impl()\n  {\n    delete ptr_;\n  }\n\n  static std::once_flag init_once_;\n  static std_global_impl instance_;\n  T* ptr_;\n};\n\ntemplate <typename T>\nstd::once_flag std_global_impl<T>::init_once_;\n\ntemplate <typename T>\nstd_global_impl<T> std_global_impl<T>::instance_;\n\ntemplate <typename T>\nT& std_global()\n{\n  std::call_once(std_global_impl<T>::init_once_, &std_global_impl<T>::do_init);\n  return *std_global_impl<T>::instance_.ptr_;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_CALL_ONCE)\n\n#endif // ASIO_DETAIL_STD_GLOBAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_mutex.hpp",
    "content": "//\n// detail/std_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_MUTEX_HPP\n#define ASIO_DETAIL_STD_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#include <mutex>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass std_event;\n\nclass std_mutex\n  : private noncopyable\n{\npublic:\n  typedef asio::detail::scoped_lock<std_mutex> scoped_lock;\n\n  // Constructor.\n  std_mutex()\n  {\n  }\n\n  // Destructor.\n  ~std_mutex()\n  {\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    mutex_.lock();\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    mutex_.unlock();\n  }\n\nprivate:\n  friend class std_event;\n  std::mutex mutex_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#endif // ASIO_DETAIL_STD_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_static_mutex.hpp",
    "content": "//\n// detail/std_static_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_STATIC_MUTEX_HPP\n#define ASIO_DETAIL_STD_STATIC_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#include <mutex>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass std_event;\n\nclass std_static_mutex\n  : private noncopyable\n{\npublic:\n  typedef asio::detail::scoped_lock<std_static_mutex> scoped_lock;\n\n  // Constructor.\n  std_static_mutex(int)\n  {\n  }\n\n  // Destructor.\n  ~std_static_mutex()\n  {\n  }\n\n  // Initialise the mutex.\n  void init()\n  {\n    // Nothing to do.\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    mutex_.lock();\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    mutex_.unlock();\n  }\n\nprivate:\n  friend class std_event;\n  std::mutex mutex_;\n};\n\n#define ASIO_STD_STATIC_MUTEX_INIT 0\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR)\n\n#endif // ASIO_DETAIL_STD_STATIC_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/std_thread.hpp",
    "content": "//\n// detail/std_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STD_THREAD_HPP\n#define ASIO_DETAIL_STD_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_THREAD)\n\n#include <thread>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass std_thread\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  std_thread(Function f, unsigned int = 0)\n    : thread_(f)\n  {\n  }\n\n  // Destructor.\n  ~std_thread()\n  {\n    join();\n  }\n\n  // Wait for the thread to exit.\n  void join()\n  {\n    if (thread_.joinable())\n      thread_.join();\n  }\n\n  // Get number of CPUs.\n  static std::size_t hardware_concurrency()\n  {\n    return std::thread::hardware_concurrency();\n  }\n\nprivate:\n  std::thread thread_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_THREAD)\n\n#endif // ASIO_DETAIL_STD_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/strand_executor_service.hpp",
    "content": "//\n// detail/strand_executor_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP\n#define ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/executor_op.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/scheduler_operation.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Default service implementation for a strand.\nclass strand_executor_service\n  : public execution_context_service_base<strand_executor_service>\n{\npublic:\n  // The underlying implementation of a strand.\n  class strand_impl\n  {\n  public:\n    ASIO_DECL ~strand_impl();\n\n  private:\n    friend class strand_executor_service;\n\n    // Mutex to protect access to internal data.\n    mutex* mutex_;\n\n    // Indicates whether the strand is currently \"locked\" by a handler. This\n    // means that there is a handler upcall in progress, or that the strand\n    // itself has been scheduled in order to invoke some pending handlers.\n    bool locked_;\n\n    // Indicates that the strand has been shut down and will accept no further\n    // handlers.\n    bool shutdown_;\n\n    // The handlers that are waiting on the strand but should not be run until\n    // after the next time the strand is scheduled. This queue must only be\n    // modified while the mutex is locked.\n    op_queue<scheduler_operation> waiting_queue_;\n\n    // The handlers that are ready to be run. Logically speaking, these are the\n    // handlers that hold the strand's lock. The ready queue is only modified\n    // from within the strand and so may be accessed without locking the mutex.\n    op_queue<scheduler_operation> ready_queue_;\n\n    // Pointers to adjacent handle implementations in linked list.\n    strand_impl* next_;\n    strand_impl* prev_;\n\n    // The strand service in where the implementation is held.\n    strand_executor_service* service_;\n  };\n\n  typedef shared_ptr<strand_impl> implementation_type;\n\n  // Construct a new strand service for the specified context.\n  ASIO_DECL explicit strand_executor_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Create a new strand_executor implementation.\n  ASIO_DECL implementation_type create_implementation();\n\n  // Request invocation of the given function.\n  template <typename Executor, typename Function, typename Allocator>\n  static void dispatch(const implementation_type& impl, Executor& ex,\n      ASIO_MOVE_ARG(Function) function, const Allocator& a);\n\n  // Request invocation of the given function and return immediately.\n  template <typename Executor, typename Function, typename Allocator>\n  static void post(const implementation_type& impl, Executor& ex,\n      ASIO_MOVE_ARG(Function) function, const Allocator& a);\n\n  // Request invocation of the given function and return immediately.\n  template <typename Executor, typename Function, typename Allocator>\n  static void defer(const implementation_type& impl, Executor& ex,\n      ASIO_MOVE_ARG(Function) function, const Allocator& a);\n\n  // Determine whether the strand is running in the current thread.\n  ASIO_DECL static bool running_in_this_thread(\n      const implementation_type& impl);\n\nprivate:\n  friend class strand_impl;\n  template <typename Executor> class invoker;\n\n  // Adds a function to the strand. Returns true if it acquires the lock.\n  ASIO_DECL static bool enqueue(const implementation_type& impl,\n      scheduler_operation* op);\n\n  // Mutex to protect access to the service-wide state.\n  mutex mutex_;\n\n  // Number of mutexes shared between all strand objects.\n  enum { num_mutexes = 193 };\n\n  // Pool of mutexes.\n  scoped_ptr<mutex> mutexes_[num_mutexes];\n\n  // Extra value used when hashing to prevent recycled memory locations from\n  // getting the same mutex.\n  std::size_t salt_;\n\n  // The head of a linked list of all implementations.\n  strand_impl* impl_list_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/strand_executor_service.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/strand_executor_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/strand_service.hpp",
    "content": "//\n// detail/strand_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STRAND_SERVICE_HPP\n#define ASIO_DETAIL_STRAND_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/io_context.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Default service implementation for a strand.\nclass strand_service\n  : public asio::detail::service_base<strand_service>\n{\nprivate:\n  // Helper class to re-post the strand on exit.\n  struct on_do_complete_exit;\n\n  // Helper class to re-post the strand on exit.\n  struct on_dispatch_exit;\n\npublic:\n\n  // The underlying implementation of a strand.\n  class strand_impl\n    : public operation\n  {\n  public:\n    strand_impl();\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class strand_service;\n    friend struct on_do_complete_exit;\n    friend struct on_dispatch_exit;\n\n    // Mutex to protect access to internal data.\n    asio::detail::mutex mutex_;\n\n    // Indicates whether the strand is currently \"locked\" by a handler. This\n    // means that there is a handler upcall in progress, or that the strand\n    // itself has been scheduled in order to invoke some pending handlers.\n    bool locked_;\n\n    // The handlers that are waiting on the strand but should not be run until\n    // after the next time the strand is scheduled. This queue must only be\n    // modified while the mutex is locked.\n    op_queue<operation> waiting_queue_;\n\n    // The handlers that are ready to be run. Logically speaking, these are the\n    // handlers that hold the strand's lock. The ready queue is only modified\n    // from within the strand and so may be accessed without locking the mutex.\n    op_queue<operation> ready_queue_;\n  };\n\n  typedef strand_impl* implementation_type;\n\n  // Construct a new strand service for the specified io_context.\n  ASIO_DECL explicit strand_service(asio::io_context& io_context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new strand implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Request the io_context to invoke the given handler.\n  template <typename Handler>\n  void dispatch(implementation_type& impl, Handler& handler);\n\n  // Request the io_context to invoke the given handler and return immediately.\n  template <typename Handler>\n  void post(implementation_type& impl, Handler& handler);\n\n  // Determine whether the strand is running in the current thread.\n  ASIO_DECL bool running_in_this_thread(\n      const implementation_type& impl) const;\n\nprivate:\n  // Helper function to dispatch a handler. Returns true if the handler should\n  // be dispatched immediately.\n  ASIO_DECL bool do_dispatch(implementation_type& impl, operation* op);\n\n  // Helper fiunction to post a handler.\n  ASIO_DECL void do_post(implementation_type& impl,\n      operation* op, bool is_continuation);\n\n  ASIO_DECL static void do_complete(void* owner,\n      operation* base, const asio::error_code& ec,\n      std::size_t bytes_transferred);\n\n  // The io_context implementation used to post completions.\n  io_context_impl& io_context_;\n\n  // Mutex to protect access to the array of implementations.\n  asio::detail::mutex mutex_;\n\n  // Number of implementations shared between all strand objects.\n#if defined(ASIO_STRAND_IMPLEMENTATIONS)\n  enum { num_implementations = ASIO_STRAND_IMPLEMENTATIONS };\n#else // defined(ASIO_STRAND_IMPLEMENTATIONS)\n  enum { num_implementations = 193 };\n#endif // defined(ASIO_STRAND_IMPLEMENTATIONS)\n\n  // Pool of implementations.\n  scoped_ptr<strand_impl> implementations_[num_implementations];\n\n  // Extra value used when hashing to prevent recycled memory locations from\n  // getting the same strand implementation.\n  std::size_t salt_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/strand_service.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/strand_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_STRAND_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/string_view.hpp",
    "content": "//\n// detail/string_view.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_STRING_VIEW_HPP\n#define ASIO_DETAIL_STRING_VIEW_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\n#if defined(ASIO_HAS_STD_STRING_VIEW)\n# include <string_view>\n#elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n# include <experimental/string_view>\n#else // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n# error ASIO_HAS_STRING_VIEW is set but no string_view is available\n#endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n\nnamespace asio {\n\n#if defined(ASIO_HAS_STD_STRING_VIEW)\nusing std::basic_string_view;\nusing std::string_view;\n#elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\nusing std::experimental::basic_string_view;\nusing std::experimental::string_view;\n#endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW)\n\n} // namespace asio\n\n# define ASIO_STRING_VIEW_PARAM asio::string_view\n#else // defined(ASIO_HAS_STRING_VIEW)\n# define ASIO_STRING_VIEW_PARAM const std::string&\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\n#endif // ASIO_DETAIL_STRING_VIEW_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/thread.hpp",
    "content": "//\n// detail/thread.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THREAD_HPP\n#define ASIO_DETAIL_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_thread.hpp\"\n#elif defined(ASIO_WINDOWS)\n# if defined(UNDER_CE)\n#  include \"asio/detail/wince_thread.hpp\"\n# elif defined(ASIO_WINDOWS_APP)\n#  include \"asio/detail/winapp_thread.hpp\"\n# else\n#  include \"asio/detail/win_thread.hpp\"\n# endif\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_thread.hpp\"\n#elif defined(ASIO_HAS_STD_THREAD)\n# include \"asio/detail/std_thread.hpp\"\n#else\n# error Only Windows, POSIX and std::thread are supported!\n#endif\n\nnamespace asio {\nnamespace detail {\n\n#if !defined(ASIO_HAS_THREADS)\ntypedef null_thread thread;\n#elif defined(ASIO_WINDOWS)\n# if defined(UNDER_CE)\ntypedef wince_thread thread;\n# elif defined(ASIO_WINDOWS_APP)\ntypedef winapp_thread thread;\n# else\ntypedef win_thread thread;\n# endif\n#elif defined(ASIO_HAS_PTHREADS)\ntypedef posix_thread thread;\n#elif defined(ASIO_HAS_STD_THREAD)\ntypedef std_thread thread;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/thread_context.hpp",
    "content": "//\n// detail/thread_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THREAD_CONTEXT_HPP\n#define ASIO_DETAIL_THREAD_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <climits>\n#include <cstddef>\n#include \"asio/detail/call_stack.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass thread_info_base;\n\n// Base class for things that manage threads (scheduler, win_iocp_io_context).\nclass thread_context\n{\npublic:\n  // Per-thread call stack to track the state of each thread in the context.\n  typedef call_stack<thread_context, thread_info_base> thread_call_stack;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_THREAD_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/thread_group.hpp",
    "content": "//\n// detail/thread_group.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THREAD_GROUP_HPP\n#define ASIO_DETAIL_THREAD_GROUP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/thread.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass thread_group\n{\npublic:\n  // Constructor initialises an empty thread group.\n  thread_group()\n    : first_(0)\n  {\n  }\n\n  // Destructor joins any remaining threads in the group.\n  ~thread_group()\n  {\n    join();\n  }\n\n  // Create a new thread in the group.\n  template <typename Function>\n  void create_thread(Function f)\n  {\n    first_ = new item(f, first_);\n  }\n\n  // Create new threads in the group.\n  template <typename Function>\n  void create_threads(Function f, std::size_t num_threads)\n  {\n    for (std::size_t i = 0; i < num_threads; ++i)\n      create_thread(f);\n  }\n\n  // Wait for all threads in the group to exit.\n  void join()\n  {\n    while (first_)\n    {\n      first_->thread_.join();\n      item* tmp = first_;\n      first_ = first_->next_;\n      delete tmp;\n    }\n  }\n\n  // Test whether the group is empty.\n  bool empty() const\n  {\n    return first_ == 0;\n  }\n\nprivate:\n  // Structure used to track a single thread in the group.\n  struct item\n  {\n    template <typename Function>\n    explicit item(Function f, item* next)\n      : thread_(f),\n        next_(next)\n    {\n    }\n\n    asio::detail::thread thread_;\n    item* next_;\n  };\n\n  // The first thread in the group.\n  item* first_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_THREAD_GROUP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/thread_info_base.hpp",
    "content": "//\n// detail/thread_info_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THREAD_INFO_BASE_HPP\n#define ASIO_DETAIL_THREAD_INFO_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <climits>\n#include <cstddef>\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass thread_info_base\n  : private noncopyable\n{\npublic:\n  struct default_tag\n  {\n    enum { mem_index = 0 };\n  };\n\n  struct awaitable_frame_tag\n  {\n    enum { mem_index = 1 };\n  };\n\n  struct executor_function_tag\n  {\n    enum { mem_index = 2 };\n  };\n\n  thread_info_base()\n  {\n    for (int i = 0; i < max_mem_index; ++i)\n      reusable_memory_[i] = 0;\n  }\n\n  ~thread_info_base()\n  {\n    for (int i = 0; i < max_mem_index; ++i)\n      if (reusable_memory_[i])\n        ::operator delete(reusable_memory_[i]);\n  }\n\n  static void* allocate(thread_info_base* this_thread, std::size_t size)\n  {\n    return allocate(default_tag(), this_thread, size);\n  }\n\n  static void deallocate(thread_info_base* this_thread,\n      void* pointer, std::size_t size)\n  {\n    deallocate(default_tag(), this_thread, pointer, size);\n  }\n\n  template <typename Purpose>\n  static void* allocate(Purpose, thread_info_base* this_thread,\n      std::size_t size)\n  {\n    std::size_t chunks = (size + chunk_size - 1) / chunk_size;\n\n    if (this_thread && this_thread->reusable_memory_[Purpose::mem_index])\n    {\n      void* const pointer = this_thread->reusable_memory_[Purpose::mem_index];\n      this_thread->reusable_memory_[Purpose::mem_index] = 0;\n\n      unsigned char* const mem = static_cast<unsigned char*>(pointer);\n      if (static_cast<std::size_t>(mem[0]) >= chunks)\n      {\n        mem[size] = mem[0];\n        return pointer;\n      }\n\n      ::operator delete(pointer);\n    }\n\n    void* const pointer = ::operator new(chunks * chunk_size + 1);\n    unsigned char* const mem = static_cast<unsigned char*>(pointer);\n    mem[size] = (chunks <= UCHAR_MAX) ? static_cast<unsigned char>(chunks) : 0;\n    return pointer;\n  }\n\n  template <typename Purpose>\n  static void deallocate(Purpose, thread_info_base* this_thread,\n      void* pointer, std::size_t size)\n  {\n    if (size <= chunk_size * UCHAR_MAX)\n    {\n      if (this_thread && this_thread->reusable_memory_[Purpose::mem_index] == 0)\n      {\n        unsigned char* const mem = static_cast<unsigned char*>(pointer);\n        mem[0] = mem[size];\n        this_thread->reusable_memory_[Purpose::mem_index] = pointer;\n        return;\n      }\n    }\n\n    ::operator delete(pointer);\n  }\n\nprivate:\n  enum { chunk_size = 4 };\n  enum { max_mem_index = 3 };\n  void* reusable_memory_[max_mem_index];\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_THREAD_INFO_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/throw_error.hpp",
    "content": "//\n// detail/throw_error.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THROW_ERROR_HPP\n#define ASIO_DETAIL_THROW_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error_code.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nASIO_DECL void do_throw_error(const asio::error_code& err);\n\nASIO_DECL void do_throw_error(const asio::error_code& err,\n    const char* location);\n\ninline void throw_error(const asio::error_code& err)\n{\n  if (err)\n    do_throw_error(err);\n}\n\ninline void throw_error(const asio::error_code& err,\n    const char* location)\n{\n  if (err)\n    do_throw_error(err, location);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/throw_error.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_THROW_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/throw_exception.hpp",
    "content": "//\n// detail/throw_exception.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_THROW_EXCEPTION_HPP\n#define ASIO_DETAIL_THROW_EXCEPTION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\n# include <boost/throw_exception.hpp>\n#endif // defined(ASIO_BOOST_THROW_EXCEPTION)\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\nusing boost::throw_exception;\n#else // defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\n\n// Declare the throw_exception function for all targets.\ntemplate <typename Exception>\nvoid throw_exception(const Exception& e);\n\n// Only define the throw_exception function when exceptions are enabled.\n// Otherwise, it is up to the application to provide a definition of this\n// function.\n# if !defined(ASIO_NO_EXCEPTIONS)\ntemplate <typename Exception>\nvoid throw_exception(const Exception& e)\n{\n  throw e;\n}\n# endif // !defined(ASIO_NO_EXCEPTIONS)\n\n#endif // defined(ASIO_HAS_BOOST_THROW_EXCEPTION)\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_THROW_EXCEPTION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_queue.hpp",
    "content": "//\n// detail/timer_queue.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_QUEUE_HPP\n#define ASIO_DETAIL_TIMER_QUEUE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <vector>\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/detail/date_time_fwd.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Time_Traits>\nclass timer_queue\n  : public timer_queue_base\n{\npublic:\n  // The time type.\n  typedef typename Time_Traits::time_type time_type;\n\n  // The duration type.\n  typedef typename Time_Traits::duration_type duration_type;\n\n  // Per-timer data.\n  class per_timer_data\n  {\n  public:\n    per_timer_data() :\n      heap_index_((std::numeric_limits<std::size_t>::max)()),\n      next_(0), prev_(0)\n    {\n    }\n\n  private:\n    friend class timer_queue;\n\n    // The operations waiting on the timer.\n    op_queue<wait_op> op_queue_;\n\n    // The index of the timer in the heap.\n    std::size_t heap_index_;\n\n    // Pointers to adjacent timers in a linked list.\n    per_timer_data* next_;\n    per_timer_data* prev_;\n  };\n\n  // Constructor.\n  timer_queue()\n    : timers_(),\n      heap_()\n  {\n  }\n\n  // Add a new timer to the queue. Returns true if this is the timer that is\n  // earliest in the queue, in which case the reactor's event demultiplexing\n  // function call may need to be interrupted and restarted.\n  bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op)\n  {\n    // Enqueue the timer object.\n    if (timer.prev_ == 0 && &timer != timers_)\n    {\n      if (this->is_positive_infinity(time))\n      {\n        // No heap entry is required for timers that never expire.\n        timer.heap_index_ = (std::numeric_limits<std::size_t>::max)();\n      }\n      else\n      {\n        // Put the new timer at the correct position in the heap. This is done\n        // first since push_back() can throw due to allocation failure.\n        timer.heap_index_ = heap_.size();\n        heap_entry entry = { time, &timer };\n        heap_.push_back(entry);\n        up_heap(heap_.size() - 1);\n      }\n\n      // Insert the new timer into the linked list of active timers.\n      timer.next_ = timers_;\n      timer.prev_ = 0;\n      if (timers_)\n        timers_->prev_ = &timer;\n      timers_ = &timer;\n    }\n\n    // Enqueue the individual timer operation.\n    timer.op_queue_.push(op);\n\n    // Interrupt reactor only if newly added timer is first to expire.\n    return timer.heap_index_ == 0 && timer.op_queue_.front() == op;\n  }\n\n  // Whether there are no timers in the queue.\n  virtual bool empty() const\n  {\n    return timers_ == 0;\n  }\n\n  // Get the time for the timer that is earliest in the queue.\n  virtual long wait_duration_msec(long max_duration) const\n  {\n    if (heap_.empty())\n      return max_duration;\n\n    return this->to_msec(\n        Time_Traits::to_posix_duration(\n          Time_Traits::subtract(heap_[0].time_, Time_Traits::now())),\n        max_duration);\n  }\n\n  // Get the time for the timer that is earliest in the queue.\n  virtual long wait_duration_usec(long max_duration) const\n  {\n    if (heap_.empty())\n      return max_duration;\n\n    return this->to_usec(\n        Time_Traits::to_posix_duration(\n          Time_Traits::subtract(heap_[0].time_, Time_Traits::now())),\n        max_duration);\n  }\n\n  // Dequeue all timers not later than the current time.\n  virtual void get_ready_timers(op_queue<operation>& ops)\n  {\n    if (!heap_.empty())\n    {\n      const time_type now = Time_Traits::now();\n      while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))\n      {\n        per_timer_data* timer = heap_[0].timer_;\n        ops.push(timer->op_queue_);\n        remove_timer(*timer);\n      }\n    }\n  }\n\n  // Dequeue all timers.\n  virtual void get_all_timers(op_queue<operation>& ops)\n  {\n    while (timers_)\n    {\n      per_timer_data* timer = timers_;\n      timers_ = timers_->next_;\n      ops.push(timer->op_queue_);\n      timer->next_ = 0;\n      timer->prev_ = 0;\n    }\n\n    heap_.clear();\n  }\n\n  // Cancel and dequeue operations for the given timer.\n  std::size_t cancel_timer(per_timer_data& timer, op_queue<operation>& ops,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)())\n  {\n    std::size_t num_cancelled = 0;\n    if (timer.prev_ != 0 || &timer == timers_)\n    {\n      while (wait_op* op = (num_cancelled != max_cancelled)\n          ? timer.op_queue_.front() : 0)\n      {\n        op->ec_ = asio::error::operation_aborted;\n        timer.op_queue_.pop();\n        ops.push(op);\n        ++num_cancelled;\n      }\n      if (timer.op_queue_.empty())\n        remove_timer(timer);\n    }\n    return num_cancelled;\n  }\n\n  // Move operations from one timer to another, empty timer.\n  void move_timer(per_timer_data& target, per_timer_data& source)\n  {\n    target.op_queue_.push(source.op_queue_);\n\n    target.heap_index_ = source.heap_index_;\n    source.heap_index_ = (std::numeric_limits<std::size_t>::max)();\n\n    if (target.heap_index_ < heap_.size())\n      heap_[target.heap_index_].timer_ = &target;\n\n    if (timers_ == &source)\n      timers_ = &target;\n    if (source.prev_)\n      source.prev_->next_ = &target;\n    if (source.next_)\n      source.next_->prev_= &target;\n    target.next_ = source.next_;\n    target.prev_ = source.prev_;\n    source.next_ = 0;\n    source.prev_ = 0;\n  }\n\nprivate:\n  // Move the item at the given index up the heap to its correct position.\n  void up_heap(std::size_t index)\n  {\n    while (index > 0)\n    {\n      std::size_t parent = (index - 1) / 2;\n      if (!Time_Traits::less_than(heap_[index].time_, heap_[parent].time_))\n        break;\n      swap_heap(index, parent);\n      index = parent;\n    }\n  }\n\n  // Move the item at the given index down the heap to its correct position.\n  void down_heap(std::size_t index)\n  {\n    std::size_t child = index * 2 + 1;\n    while (child < heap_.size())\n    {\n      std::size_t min_child = (child + 1 == heap_.size()\n          || Time_Traits::less_than(\n            heap_[child].time_, heap_[child + 1].time_))\n        ? child : child + 1;\n      if (Time_Traits::less_than(heap_[index].time_, heap_[min_child].time_))\n        break;\n      swap_heap(index, min_child);\n      index = min_child;\n      child = index * 2 + 1;\n    }\n  }\n\n  // Swap two entries in the heap.\n  void swap_heap(std::size_t index1, std::size_t index2)\n  {\n    heap_entry tmp = heap_[index1];\n    heap_[index1] = heap_[index2];\n    heap_[index2] = tmp;\n    heap_[index1].timer_->heap_index_ = index1;\n    heap_[index2].timer_->heap_index_ = index2;\n  }\n\n  // Remove a timer from the heap and list of timers.\n  void remove_timer(per_timer_data& timer)\n  {\n    // Remove the timer from the heap.\n    std::size_t index = timer.heap_index_;\n    if (!heap_.empty() && index < heap_.size())\n    {\n      if (index == heap_.size() - 1)\n      {\n        timer.heap_index_ = (std::numeric_limits<std::size_t>::max)();\n        heap_.pop_back();\n      }\n      else\n      {\n        swap_heap(index, heap_.size() - 1);\n        timer.heap_index_ = (std::numeric_limits<std::size_t>::max)();\n        heap_.pop_back();\n        if (index > 0 && Time_Traits::less_than(\n              heap_[index].time_, heap_[(index - 1) / 2].time_))\n          up_heap(index);\n        else\n          down_heap(index);\n      }\n    }\n\n    // Remove the timer from the linked list of active timers.\n    if (timers_ == &timer)\n      timers_ = timer.next_;\n    if (timer.prev_)\n      timer.prev_->next_ = timer.next_;\n    if (timer.next_)\n      timer.next_->prev_= timer.prev_;\n    timer.next_ = 0;\n    timer.prev_ = 0;\n  }\n\n  // Determine if the specified absolute time is positive infinity.\n  template <typename Time_Type>\n  static bool is_positive_infinity(const Time_Type&)\n  {\n    return false;\n  }\n\n  // Determine if the specified absolute time is positive infinity.\n  template <typename T, typename TimeSystem>\n  static bool is_positive_infinity(\n      const boost::date_time::base_time<T, TimeSystem>& time)\n  {\n    return time.is_pos_infinity();\n  }\n\n  // Helper function to convert a duration into milliseconds.\n  template <typename Duration>\n  long to_msec(const Duration& d, long max_duration) const\n  {\n    if (d.ticks() <= 0)\n      return 0;\n    int64_t msec = d.total_milliseconds();\n    if (msec == 0)\n      return 1;\n    if (msec > max_duration)\n      return max_duration;\n    return static_cast<long>(msec);\n  }\n\n  // Helper function to convert a duration into microseconds.\n  template <typename Duration>\n  long to_usec(const Duration& d, long max_duration) const\n  {\n    if (d.ticks() <= 0)\n      return 0;\n    int64_t usec = d.total_microseconds();\n    if (usec == 0)\n      return 1;\n    if (usec > max_duration)\n      return max_duration;\n    return static_cast<long>(usec);\n  }\n\n  // The head of a linked list of all active timers.\n  per_timer_data* timers_;\n\n  struct heap_entry\n  {\n    // The time when the timer should fire.\n    time_type time_;\n\n    // The associated timer with enqueued operations.\n    per_timer_data* timer_;\n  };\n\n  // The heap of timers, with the earliest timer at the front.\n  std::vector<heap_entry> heap_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_TIMER_QUEUE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_queue_base.hpp",
    "content": "//\n// detail/timer_queue_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_QUEUE_BASE_HPP\n#define ASIO_DETAIL_TIMER_QUEUE_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass timer_queue_base\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  timer_queue_base() : next_(0) {}\n\n  // Destructor.\n  virtual ~timer_queue_base() {}\n\n  // Whether there are no timers in the queue.\n  virtual bool empty() const = 0;\n\n  // Get the time to wait until the next timer.\n  virtual long wait_duration_msec(long max_duration) const = 0;\n\n  // Get the time to wait until the next timer.\n  virtual long wait_duration_usec(long max_duration) const = 0;\n\n  // Dequeue all ready timers.\n  virtual void get_ready_timers(op_queue<operation>& ops) = 0;\n\n  // Dequeue all timers.\n  virtual void get_all_timers(op_queue<operation>& ops) = 0;\n\nprivate:\n  friend class timer_queue_set;\n\n  // Next timer queue in the set.\n  timer_queue_base* next_;\n};\n\ntemplate <typename Time_Traits>\nclass timer_queue;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_TIMER_QUEUE_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_queue_ptime.hpp",
    "content": "//\n// detail/timer_queue_ptime.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP\n#define ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#include \"asio/time_traits.hpp\"\n#include \"asio/detail/timer_queue.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct forwarding_posix_time_traits : time_traits<boost::posix_time::ptime> {};\n\n// Template specialisation for the commonly used instantation.\ntemplate <>\nclass timer_queue<time_traits<boost::posix_time::ptime> >\n  : public timer_queue_base\n{\npublic:\n  // The time type.\n  typedef boost::posix_time::ptime time_type;\n\n  // The duration type.\n  typedef boost::posix_time::time_duration duration_type;\n\n  // Per-timer data.\n  typedef timer_queue<forwarding_posix_time_traits>::per_timer_data\n    per_timer_data;\n\n  // Constructor.\n  ASIO_DECL timer_queue();\n\n  // Destructor.\n  ASIO_DECL virtual ~timer_queue();\n\n  // Add a new timer to the queue. Returns true if this is the timer that is\n  // earliest in the queue, in which case the reactor's event demultiplexing\n  // function call may need to be interrupted and restarted.\n  ASIO_DECL bool enqueue_timer(const time_type& time,\n      per_timer_data& timer, wait_op* op);\n\n  // Whether there are no timers in the queue.\n  ASIO_DECL virtual bool empty() const;\n\n  // Get the time for the timer that is earliest in the queue.\n  ASIO_DECL virtual long wait_duration_msec(long max_duration) const;\n\n  // Get the time for the timer that is earliest in the queue.\n  ASIO_DECL virtual long wait_duration_usec(long max_duration) const;\n\n  // Dequeue all timers not later than the current time.\n  ASIO_DECL virtual void get_ready_timers(op_queue<operation>& ops);\n\n  // Dequeue all timers.\n  ASIO_DECL virtual void get_all_timers(op_queue<operation>& ops);\n\n  // Cancel and dequeue operations for the given timer.\n  ASIO_DECL std::size_t cancel_timer(\n      per_timer_data& timer, op_queue<operation>& ops,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move operations from one timer to another, empty timer.\n  ASIO_DECL void move_timer(per_timer_data& target,\n      per_timer_data& source);\n\nprivate:\n  timer_queue<forwarding_posix_time_traits> impl_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/timer_queue_ptime.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#endif // ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_queue_set.hpp",
    "content": "//\n// detail/timer_queue_set.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_QUEUE_SET_HPP\n#define ASIO_DETAIL_TIMER_QUEUE_SET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass timer_queue_set\n{\npublic:\n  // Constructor.\n  ASIO_DECL timer_queue_set();\n\n  // Add a timer queue to the set.\n  ASIO_DECL void insert(timer_queue_base* q);\n\n  // Remove a timer queue from the set.\n  ASIO_DECL void erase(timer_queue_base* q);\n\n  // Determine whether all queues are empty.\n  ASIO_DECL bool all_empty() const;\n\n  // Get the wait duration in milliseconds.\n  ASIO_DECL long wait_duration_msec(long max_duration) const;\n\n  // Get the wait duration in microseconds.\n  ASIO_DECL long wait_duration_usec(long max_duration) const;\n\n  // Dequeue all ready timers.\n  ASIO_DECL void get_ready_timers(op_queue<operation>& ops);\n\n  // Dequeue all timers.\n  ASIO_DECL void get_all_timers(op_queue<operation>& ops);\n\nprivate:\n  timer_queue_base* first_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/timer_queue_set.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_DETAIL_TIMER_QUEUE_SET_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_scheduler.hpp",
    "content": "//\n// detail/timer_scheduler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_SCHEDULER_HPP\n#define ASIO_DETAIL_TIMER_SCHEDULER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/timer_scheduler_fwd.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/winrt_timer_scheduler.hpp\"\n#elif defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#elif defined(ASIO_HAS_EPOLL)\n# include \"asio/detail/epoll_reactor.hpp\"\n#elif defined(ASIO_HAS_KQUEUE)\n# include \"asio/detail/kqueue_reactor.hpp\"\n#elif defined(ASIO_HAS_DEV_POLL)\n# include \"asio/detail/dev_poll_reactor.hpp\"\n#else\n# include \"asio/detail/select_reactor.hpp\"\n#endif\n\n#endif // ASIO_DETAIL_TIMER_SCHEDULER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/timer_scheduler_fwd.hpp",
    "content": "//\n// detail/timer_scheduler_fwd.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP\n#define ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_WINDOWS_RUNTIME)\ntypedef class winrt_timer_scheduler timer_scheduler;\n#elif defined(ASIO_HAS_IOCP)\ntypedef class win_iocp_io_context timer_scheduler;\n#elif defined(ASIO_HAS_EPOLL)\ntypedef class epoll_reactor timer_scheduler;\n#elif defined(ASIO_HAS_KQUEUE)\ntypedef class kqueue_reactor timer_scheduler;\n#elif defined(ASIO_HAS_DEV_POLL)\ntypedef class dev_poll_reactor timer_scheduler;\n#else\ntypedef class select_reactor timer_scheduler;\n#endif\n\n} // namespace detail\n} // namespace asio\n\n#endif // ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/tss_ptr.hpp",
    "content": "//\n// detail/tss_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TSS_PTR_HPP\n#define ASIO_DETAIL_TSS_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_THREADS)\n# include \"asio/detail/null_tss_ptr.hpp\"\n#elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION)\n# include \"asio/detail/keyword_tss_ptr.hpp\"\n#elif defined(ASIO_WINDOWS)\n# include \"asio/detail/win_tss_ptr.hpp\"\n#elif defined(ASIO_HAS_PTHREADS)\n# include \"asio/detail/posix_tss_ptr.hpp\"\n#else\n# error Only Windows and POSIX are supported!\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nclass tss_ptr\n#if !defined(ASIO_HAS_THREADS)\n  : public null_tss_ptr<T>\n#elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION)\n  : public keyword_tss_ptr<T>\n#elif defined(ASIO_WINDOWS)\n  : public win_tss_ptr<T>\n#elif defined(ASIO_HAS_PTHREADS)\n  : public posix_tss_ptr<T>\n#endif\n{\npublic:\n  void operator=(T* value)\n  {\n#if !defined(ASIO_HAS_THREADS)\n    null_tss_ptr<T>::operator=(value);\n#elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION)\n    keyword_tss_ptr<T>::operator=(value);\n#elif defined(ASIO_WINDOWS)\n    win_tss_ptr<T>::operator=(value);\n#elif defined(ASIO_HAS_PTHREADS)\n    posix_tss_ptr<T>::operator=(value);\n#endif\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_TSS_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/type_traits.hpp",
    "content": "//\n// detail/type_traits.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_TYPE_TRAITS_HPP\n#define ASIO_DETAIL_TYPE_TRAITS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_TYPE_TRAITS)\n# include <type_traits>\n#else // defined(ASIO_HAS_TYPE_TRAITS)\n# include <boost/type_traits/add_const.hpp>\n# include <boost/type_traits/conditional.hpp>\n# include <boost/type_traits/decay.hpp>\n# include <boost/type_traits/integral_constant.hpp>\n# include <boost/type_traits/is_base_of.hpp>\n# include <boost/type_traits/is_class.hpp>\n# include <boost/type_traits/is_const.hpp>\n# include <boost/type_traits/is_convertible.hpp>\n# include <boost/type_traits/is_function.hpp>\n# include <boost/type_traits/is_same.hpp>\n# include <boost/type_traits/remove_pointer.hpp>\n# include <boost/type_traits/remove_reference.hpp>\n# include <boost/utility/declval.hpp>\n# include <boost/utility/enable_if.hpp>\n# include <boost/utility/result_of.hpp>\n#endif // defined(ASIO_HAS_TYPE_TRAITS)\n\nnamespace asio {\n\n#if defined(ASIO_HAS_STD_TYPE_TRAITS)\nusing std::add_const;\nusing std::conditional;\nusing std::decay;\nusing std::declval;\nusing std::enable_if;\nusing std::false_type;\nusing std::integral_constant;\nusing std::is_base_of;\nusing std::is_class;\nusing std::is_const;\nusing std::is_convertible;\nusing std::is_function;\nusing std::is_same;\nusing std::remove_pointer;\nusing std::remove_reference;\n#if defined(ASIO_HAS_STD_INVOKE_RESULT)\ntemplate <typename> struct result_of;\ntemplate <typename F, typename... Args>\nstruct result_of<F(Args...)> : std::invoke_result<F, Args...> {};\n#else // defined(ASIO_HAS_STD_INVOKE_RESULT)\nusing std::result_of;\n#endif // defined(ASIO_HAS_STD_INVOKE_RESULT)\nusing std::true_type;\n#else // defined(ASIO_HAS_STD_TYPE_TRAITS)\nusing boost::add_const;\ntemplate <bool Condition, typename Type = void>\nstruct enable_if : boost::enable_if_c<Condition, Type> {};\nusing boost::conditional;\nusing boost::decay;\nusing boost::declval;\nusing boost::false_type;\nusing boost::integral_constant;\nusing boost::is_base_of;\nusing boost::is_class;\nusing boost::is_const;\nusing boost::is_convertible;\nusing boost::is_function;\nusing boost::is_same;\nusing boost::remove_pointer;\nusing boost::remove_reference;\nusing boost::result_of;\nusing boost::true_type;\n#endif // defined(ASIO_HAS_STD_TYPE_TRAITS)\n\n} // namespace asio\n\n#endif // ASIO_DETAIL_TYPE_TRAITS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/variadic_templates.hpp",
    "content": "//\n// detail/variadic_templates.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_VARIADIC_TEMPLATES_HPP\n#define ASIO_DETAIL_VARIADIC_TEMPLATES_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n# define ASIO_VARIADIC_TPARAMS(n) ASIO_VARIADIC_TPARAMS_##n\n\n# define ASIO_VARIADIC_TPARAMS_1 \\\n  typename T1\n# define ASIO_VARIADIC_TPARAMS_2 \\\n  typename T1, typename T2\n# define ASIO_VARIADIC_TPARAMS_3 \\\n  typename T1, typename T2, typename T3\n# define ASIO_VARIADIC_TPARAMS_4 \\\n  typename T1, typename T2, typename T3, typename T4\n# define ASIO_VARIADIC_TPARAMS_5 \\\n  typename T1, typename T2, typename T3, typename T4, typename T5\n\n# define ASIO_VARIADIC_TARGS(n) ASIO_VARIADIC_TARGS_##n\n\n# define ASIO_VARIADIC_TARGS_1 T1\n# define ASIO_VARIADIC_TARGS_2 T1, T2\n# define ASIO_VARIADIC_TARGS_3 T1, T2, T3\n# define ASIO_VARIADIC_TARGS_4 T1, T2, T3, T4\n# define ASIO_VARIADIC_TARGS_5 T1, T2, T3, T4, T5\n\n# define ASIO_VARIADIC_BYVAL_PARAMS(n) \\\n  ASIO_VARIADIC_BYVAL_PARAMS_##n\n\n# define ASIO_VARIADIC_BYVAL_PARAMS_1 T1 x1\n# define ASIO_VARIADIC_BYVAL_PARAMS_2 T1 x1, T2 x2\n# define ASIO_VARIADIC_BYVAL_PARAMS_3 T1 x1, T2 x2, T3 x3\n# define ASIO_VARIADIC_BYVAL_PARAMS_4 T1 x1, T2 x2, T3 x3, T4 x4\n# define ASIO_VARIADIC_BYVAL_PARAMS_5 T1 x1, T2 x2, T3 x3, T4 x4, T5 x5\n\n# define ASIO_VARIADIC_BYVAL_ARGS(n) \\\n  ASIO_VARIADIC_BYVAL_ARGS_##n\n\n# define ASIO_VARIADIC_BYVAL_ARGS_1 x1\n# define ASIO_VARIADIC_BYVAL_ARGS_2 x1, x2\n# define ASIO_VARIADIC_BYVAL_ARGS_3 x1, x2, x3\n# define ASIO_VARIADIC_BYVAL_ARGS_4 x1, x2, x3, x4\n# define ASIO_VARIADIC_BYVAL_ARGS_5 x1, x2, x3, x4, x5\n\n# define ASIO_VARIADIC_CONSTREF_PARAMS(n) \\\n  ASIO_VARIADIC_CONSTREF_PARAMS_##n\n\n# define ASIO_VARIADIC_CONSTREF_PARAMS_1 \\\n  const T1& x1\n# define ASIO_VARIADIC_CONSTREF_PARAMS_2 \\\n  const T1& x1, const T2& x2\n# define ASIO_VARIADIC_CONSTREF_PARAMS_3 \\\n  const T1& x1, const T2& x2, const T3& x3\n# define ASIO_VARIADIC_CONSTREF_PARAMS_4 \\\n  const T1& x1, const T2& x2, const T3& x3, const T4& x4\n# define ASIO_VARIADIC_CONSTREF_PARAMS_5 \\\n  const T1& x1, const T2& x2, const T3& x3, const T4& x4, const T5& x5\n\n# define ASIO_VARIADIC_MOVE_PARAMS(n) \\\n  ASIO_VARIADIC_MOVE_PARAMS_##n\n\n# define ASIO_VARIADIC_MOVE_PARAMS_1 \\\n  ASIO_MOVE_ARG(T1) x1\n# define ASIO_VARIADIC_MOVE_PARAMS_2 \\\n  ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2\n# define ASIO_VARIADIC_MOVE_PARAMS_3 \\\n  ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \\\n  ASIO_MOVE_ARG(T3) x3\n# define ASIO_VARIADIC_MOVE_PARAMS_4 \\\n  ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \\\n  ASIO_MOVE_ARG(T3) x3, ASIO_MOVE_ARG(T4) x4\n# define ASIO_VARIADIC_MOVE_PARAMS_5 \\\n  ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \\\n  ASIO_MOVE_ARG(T3) x3, ASIO_MOVE_ARG(T4) x4, \\\n  ASIO_MOVE_ARG(T5) x5\n\n# define ASIO_VARIADIC_MOVE_ARGS(n) \\\n  ASIO_VARIADIC_MOVE_ARGS_##n\n\n# define ASIO_VARIADIC_MOVE_ARGS_1 \\\n  ASIO_MOVE_CAST(T1)(x1)\n# define ASIO_VARIADIC_MOVE_ARGS_2 \\\n  ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2)\n# define ASIO_VARIADIC_MOVE_ARGS_3 \\\n  ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \\\n  ASIO_MOVE_CAST(T3)(x3)\n# define ASIO_VARIADIC_MOVE_ARGS_4 \\\n  ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \\\n  ASIO_MOVE_CAST(T3)(x3), ASIO_MOVE_CAST(T4)(x4)\n# define ASIO_VARIADIC_MOVE_ARGS_5 \\\n  ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \\\n  ASIO_MOVE_CAST(T3)(x3), ASIO_MOVE_CAST(T4)(x4), \\\n  ASIO_MOVE_CAST(T5)(x5)\n\n# define ASIO_VARIADIC_MOVE_DECLVAL(n) \\\n  ASIO_VARIADIC_MOVE_DECLVAL_##n\n\n# define ASIO_VARIADIC_MOVE_DECLVAL_1 \\\n  declval<ASIO_MOVE_ARG(T1)>()\n# define ASIO_VARIADIC_MOVE_DECLVAL_2 \\\n  declval<ASIO_MOVE_ARG(T1)>(), declval<ASIO_MOVE_ARG(T2)>()\n# define ASIO_VARIADIC_MOVE_DECLVAL_3 \\\n  declval<ASIO_MOVE_ARG(T1)>(), declval<ASIO_MOVE_ARG(T2)>(), \\\n  declval<ASIO_MOVE_ARG(T3)>()\n# define ASIO_VARIADIC_MOVE_DECLVAL_4 \\\n  declval<ASIO_MOVE_ARG(T1)>(), declval<ASIO_MOVE_ARG(T2)>(), \\\n  declval<ASIO_MOVE_ARG(T3)>(), declval<ASIO_MOVE_ARG(T4)>()\n# define ASIO_VARIADIC_MOVE_DECLVAL_5 \\\n  declval<ASIO_MOVE_ARG(T1)>(), declval<ASIO_MOVE_ARG(T2)>(), \\\n  declval<ASIO_MOVE_ARG(T3)>(), declval<ASIO_MOVE_ARG(T4)>(), \\\n  declval<ASIO_MOVE_ARG(T5)>()\n\n# define ASIO_VARIADIC_DECAY(n) \\\n  ASIO_VARIADIC_DECAY_##n\n\n# define ASIO_VARIADIC_DECAY_1 \\\n  typename decay<T1>::type\n# define ASIO_VARIADIC_DECAY_2 \\\n  typename decay<T1>::type, typename decay<T2>::type\n# define ASIO_VARIADIC_DECAY_3 \\\n  typename decay<T1>::type, typename decay<T2>::type, \\\n  typename decay<T3>::type\n# define ASIO_VARIADIC_DECAY_4 \\\n  typename decay<T1>::type, typename decay<T2>::type, \\\n  typename decay<T3>::type, typename decay<T4>::type\n# define ASIO_VARIADIC_DECAY_5 \\\n  typename decay<T1>::type, typename decay<T2>::type, \\\n  typename decay<T3>::type, typename decay<T4>::type, \\\n  typename decay<T5>::type\n\n# define ASIO_VARIADIC_GENERATE(m) m(1) m(2) m(3) m(4) m(5)\n\n#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#endif // ASIO_DETAIL_VARIADIC_TEMPLATES_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/wait_handler.hpp",
    "content": "//\n// detail/wait_handler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WAIT_HANDLER_HPP\n#define ASIO_DETAIL_WAIT_HANDLER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_work.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass wait_handler : public wait_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(wait_handler);\n\n  wait_handler(Handler& h, const IoExecutor& ex)\n    : wait_op(&wait_handler::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(h)),\n      io_executor_(ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& /*ec*/,\n      std::size_t /*bytes_transferred*/)\n  {\n    // Take ownership of the handler object.\n    wait_handler* h(static_cast<wait_handler*>(base));\n    ptr p = { asio::detail::addressof(h->handler_), h, h };\n    handler_work<Handler, IoExecutor> w(h->handler_, h->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*h));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(h->handler_, h->ec_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WAIT_HANDLER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/wait_op.hpp",
    "content": "//\n// detail/wait_op.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WAIT_OP_HPP\n#define ASIO_DETAIL_WAIT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass wait_op\n  : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\nprotected:\n  wait_op(func_type func)\n    : operation(func)\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WAIT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_event.hpp",
    "content": "//\n// detail/win_event.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_EVENT_HPP\n#define ASIO_DETAIL_WIN_EVENT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_event\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  ASIO_DECL win_event();\n\n  // Destructor.\n  ASIO_DECL ~win_event();\n\n  // Signal the event. (Retained for backward compatibility.)\n  template <typename Lock>\n  void signal(Lock& lock)\n  {\n    this->signal_all(lock);\n  }\n\n  // Signal all waiters.\n  template <typename Lock>\n  void signal_all(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    state_ |= 1;\n    ::SetEvent(events_[0]);\n  }\n\n  // Unlock the mutex and signal one waiter.\n  template <typename Lock>\n  void unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    bool have_waiters = (state_ > 1);\n    lock.unlock();\n    if (have_waiters)\n      ::SetEvent(events_[1]);\n  }\n\n  // If there's a waiter, unlock the mutex and signal it.\n  template <typename Lock>\n  bool maybe_unlock_and_signal_one(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    state_ |= 1;\n    if (state_ > 1)\n    {\n      lock.unlock();\n      ::SetEvent(events_[1]);\n      return true;\n    }\n    return false;\n  }\n\n  // Reset the event.\n  template <typename Lock>\n  void clear(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    (void)lock;\n    ::ResetEvent(events_[0]);\n    state_ &= ~std::size_t(1);\n  }\n\n  // Wait for the event to become signalled.\n  template <typename Lock>\n  void wait(Lock& lock)\n  {\n    ASIO_ASSERT(lock.locked());\n    while ((state_ & 1) == 0)\n    {\n      state_ += 2;\n      lock.unlock();\n#if defined(ASIO_WINDOWS_APP)\n      ::WaitForMultipleObjectsEx(2, events_, false, INFINITE, false);\n#else // defined(ASIO_WINDOWS_APP)\n      ::WaitForMultipleObjects(2, events_, false, INFINITE);\n#endif // defined(ASIO_WINDOWS_APP)\n      lock.lock();\n      state_ -= 2;\n    }\n  }\n\n  // Timed wait for the event to become signalled.\n  template <typename Lock>\n  bool wait_for_usec(Lock& lock, long usec)\n  {\n    ASIO_ASSERT(lock.locked());\n    if ((state_ & 1) == 0)\n    {\n      state_ += 2;\n      lock.unlock();\n      DWORD msec = usec > 0 ? (usec < 1000 ? 1 : usec / 1000) : 0;\n#if defined(ASIO_WINDOWS_APP)\n      ::WaitForMultipleObjectsEx(2, events_, false, msec, false);\n#else // defined(ASIO_WINDOWS_APP)\n      ::WaitForMultipleObjects(2, events_, false, msec);\n#endif // defined(ASIO_WINDOWS_APP)\n      lock.lock();\n      state_ -= 2;\n    }\n    return (state_ & 1) != 0;\n  }\n\nprivate:\n  HANDLE events_[2];\n  std::size_t state_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_event.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_WIN_EVENT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_fd_set_adapter.hpp",
    "content": "//\n// detail/win_fd_set_adapter.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP\n#define ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/reactor_op_queue.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.\nclass win_fd_set_adapter : noncopyable\n{\npublic:\n  enum { default_fd_set_size = 1024 };\n\n  win_fd_set_adapter()\n    : capacity_(default_fd_set_size),\n      max_descriptor_(invalid_socket)\n  {\n    fd_set_ = static_cast<win_fd_set*>(::operator new(\n          sizeof(win_fd_set) - sizeof(SOCKET)\n          + sizeof(SOCKET) * (capacity_)));\n    fd_set_->fd_count = 0;\n  }\n\n  ~win_fd_set_adapter()\n  {\n    ::operator delete(fd_set_);\n  }\n\n  void reset()\n  {\n    fd_set_->fd_count = 0;\n    max_descriptor_ = invalid_socket;\n  }\n\n  bool set(socket_type descriptor)\n  {\n    for (u_int i = 0; i < fd_set_->fd_count; ++i)\n      if (fd_set_->fd_array[i] == descriptor)\n        return true;\n\n    reserve(fd_set_->fd_count + 1);\n    fd_set_->fd_array[fd_set_->fd_count++] = descriptor;\n    return true;\n  }\n\n  void set(reactor_op_queue<socket_type>& operations, op_queue<operation>&)\n  {\n    reactor_op_queue<socket_type>::iterator i = operations.begin();\n    while (i != operations.end())\n    {\n      reactor_op_queue<socket_type>::iterator op_iter = i++;\n      reserve(fd_set_->fd_count + 1);\n      fd_set_->fd_array[fd_set_->fd_count++] = op_iter->first;\n    }\n  }\n\n  bool is_set(socket_type descriptor) const\n  {\n    return !!__WSAFDIsSet(descriptor,\n        const_cast<fd_set*>(reinterpret_cast<const fd_set*>(fd_set_)));\n  }\n\n  operator fd_set*()\n  {\n    return reinterpret_cast<fd_set*>(fd_set_);\n  }\n\n  socket_type max_descriptor() const\n  {\n    return max_descriptor_;\n  }\n\n  void perform(reactor_op_queue<socket_type>& operations,\n      op_queue<operation>& ops) const\n  {\n    for (u_int i = 0; i < fd_set_->fd_count; ++i)\n      operations.perform_operations(fd_set_->fd_array[i], ops);\n  }\n\nprivate:\n  // This structure is defined to be compatible with the Windows API fd_set\n  // structure, but without being dependent on the value of FD_SETSIZE. We use\n  // the \"struct hack\" to allow the number of descriptors to be varied at\n  // runtime.\n  struct win_fd_set\n  {\n    u_int fd_count;\n    SOCKET fd_array[1];\n  };\n\n  // Increase the fd_set_ capacity to at least the specified number of elements.\n  void reserve(u_int n)\n  {\n    if (n <= capacity_)\n      return;\n\n    u_int new_capacity = capacity_ + capacity_ / 2;\n    if (new_capacity < n)\n      new_capacity = n;\n\n    win_fd_set* new_fd_set = static_cast<win_fd_set*>(::operator new(\n          sizeof(win_fd_set) - sizeof(SOCKET)\n          + sizeof(SOCKET) * (new_capacity)));\n\n    new_fd_set->fd_count = fd_set_->fd_count;\n    for (u_int i = 0; i < fd_set_->fd_count; ++i)\n      new_fd_set->fd_array[i] = fd_set_->fd_array[i];\n\n    ::operator delete(fd_set_);\n    fd_set_ = new_fd_set;\n    capacity_ = new_capacity;\n  }\n\n  win_fd_set* fd_set_;\n  u_int capacity_;\n  socket_type max_descriptor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_fenced_block.hpp",
    "content": "//\n// detail/win_fenced_block.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_FENCED_BLOCK_HPP\n#define ASIO_DETAIL_WIN_FENCED_BLOCK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) && !defined(UNDER_CE)\n\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_fenced_block\n  : private noncopyable\n{\npublic:\n  enum half_t { half };\n  enum full_t { full };\n\n  // Constructor for a half fenced block.\n  explicit win_fenced_block(half_t)\n  {\n  }\n\n  // Constructor for a full fenced block.\n  explicit win_fenced_block(full_t)\n  {\n#if defined(__BORLANDC__)\n    LONG barrier = 0;\n    ::InterlockedExchange(&barrier, 1);\n#elif defined(ASIO_MSVC) \\\n  && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier))\n# if defined(_M_IX86)\n#  pragma warning(push)\n#  pragma warning(disable:4793)\n    LONG barrier;\n    __asm { xchg barrier, eax }\n#  pragma warning(pop)\n# endif // defined(_M_IX86)\n#else\n    MemoryBarrier();\n#endif\n  }\n\n  // Destructor.\n  ~win_fenced_block()\n  {\n#if defined(__BORLANDC__)\n    LONG barrier = 0;\n    ::InterlockedExchange(&barrier, 1);\n#elif defined(ASIO_MSVC) \\\n  && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier))\n# if defined(_M_IX86)\n#  pragma warning(push)\n#  pragma warning(disable:4793)\n    LONG barrier;\n    __asm { xchg barrier, eax }\n#  pragma warning(pop)\n# endif // defined(_M_IX86)\n#else\n    MemoryBarrier();\n#endif\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE)\n\n#endif // ASIO_DETAIL_WIN_FENCED_BLOCK_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_global.hpp",
    "content": "//\n// detail/win_global.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_GLOBAL_HPP\n#define ASIO_DETAIL_WIN_GLOBAL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/static_mutex.hpp\"\n#include \"asio/detail/tss_ptr.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T>\nstruct win_global_impl\n{\n  // Destructor automatically cleans up the global.\n  ~win_global_impl()\n  {\n    delete ptr_;\n  }\n\n  static win_global_impl instance_;\n  static static_mutex mutex_;\n  T* ptr_;\n  static tss_ptr<T> tss_ptr_;\n};\n\ntemplate <typename T>\nwin_global_impl<T> win_global_impl<T>::instance_ = { 0 };\n\ntemplate <typename T>\nstatic_mutex win_global_impl<T>::mutex_ = ASIO_STATIC_MUTEX_INIT;\n\ntemplate <typename T>\ntss_ptr<T> win_global_impl<T>::tss_ptr_;\n\ntemplate <typename T>\nT& win_global()\n{\n  if (static_cast<T*>(win_global_impl<T>::tss_ptr_) == 0)\n  {\n    win_global_impl<T>::mutex_.init();\n    static_mutex::scoped_lock lock(win_global_impl<T>::mutex_);\n    if (win_global_impl<T>::instance_.ptr_ == 0)\n      win_global_impl<T>::instance_.ptr_ = new T;\n    win_global_impl<T>::tss_ptr_ = win_global_impl<T>::instance_.ptr_;\n  }\n\n  return *win_global_impl<T>::tss_ptr_;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WIN_GLOBAL_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_handle_read_op.hpp",
    "content": "//\n// detail/win_iocp_handle_read_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass win_iocp_handle_read_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_read_op);\n\n  win_iocp_handle_read_op(const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_handle_read_op::do_complete),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_handle_read_op* o(static_cast<win_iocp_handle_read_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    if (owner)\n    {\n      // Check whether buffers are still valid.\n      buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    // Map non-portable errors to their portable counterparts.\n    if (ec.value() == ERROR_HANDLE_EOF)\n      ec = asio::error::eof;\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  MutableBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_handle_service.hpp",
    "content": "//\n// detail/win_iocp_handle_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP\n#define ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/win_iocp_handle_read_op.hpp\"\n#include \"asio/detail/win_iocp_handle_write_op.hpp\"\n#include \"asio/detail/win_iocp_io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_iocp_handle_service :\n  public execution_context_service_base<win_iocp_handle_service>\n{\npublic:\n  // The native type of a stream handle.\n  typedef HANDLE native_handle_type;\n\n  // The implementation type of the stream handle.\n  class implementation_type\n  {\n  public:\n    // Default constructor.\n    implementation_type()\n      : handle_(INVALID_HANDLE_VALUE),\n        safe_cancellation_thread_id_(0),\n        next_(0),\n        prev_(0)\n    {\n    }\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class win_iocp_handle_service;\n\n    // The native stream handle representation.\n    native_handle_type handle_;\n\n    // The ID of the thread from which it is safe to cancel asynchronous\n    // operations. 0 means no asynchronous operations have been started yet.\n    // ~0 means asynchronous operations have been started from more than one\n    // thread, and cancellation is not supported for the handle.\n    DWORD safe_cancellation_thread_id_;\n\n    // Pointers to adjacent handle implementations in linked list.\n    implementation_type* next_;\n    implementation_type* prev_;\n  };\n\n  ASIO_DECL win_iocp_handle_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new handle implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Move-construct a new handle implementation.\n  ASIO_DECL void move_construct(implementation_type& impl,\n      implementation_type& other_impl);\n\n  // Move-assign from another handle implementation.\n  ASIO_DECL void move_assign(implementation_type& impl,\n      win_iocp_handle_service& other_service,\n      implementation_type& other_impl);\n\n  // Destroy a handle implementation.\n  ASIO_DECL void destroy(implementation_type& impl);\n\n  // Assign a native handle to a handle implementation.\n  ASIO_DECL asio::error_code assign(implementation_type& impl,\n      const native_handle_type& handle, asio::error_code& ec);\n\n  // Determine whether the handle is open.\n  bool is_open(const implementation_type& impl) const\n  {\n    return impl.handle_ != INVALID_HANDLE_VALUE;\n  }\n\n  // Destroy a handle implementation.\n  ASIO_DECL asio::error_code close(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Get the native handle representation.\n  native_handle_type native_handle(const implementation_type& impl) const\n  {\n    return impl.handle_;\n  }\n\n  // Cancel all operations associated with the handle.\n  ASIO_DECL asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Write the given data. Returns the number of bytes written.\n  template <typename ConstBufferSequence>\n  size_t write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    return write_some_at(impl, 0, buffers, ec);\n  }\n\n  // Write the given data at the specified offset. Returns the number of bytes\n  // written.\n  template <typename ConstBufferSequence>\n  size_t write_some_at(implementation_type& impl, uint64_t offset,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    asio::const_buffer buffer =\n      buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence>::first(buffers);\n\n    return do_write(impl, offset, buffer, ec);\n  }\n\n  // Start an asynchronous write. The data being written must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_handle_write_op<\n        ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, \"handle\", &impl,\n          reinterpret_cast<uintmax_t>(impl.handle_), \"async_write_some\"));\n\n    start_write_op(impl, 0,\n        buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::first(buffers), p.p);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous write at a specified offset. The data being written\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_write_some_at(implementation_type& impl,\n      uint64_t offset, const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_handle_write_op<\n        ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, \"handle\", &impl,\n          reinterpret_cast<uintmax_t>(impl.handle_), \"async_write_some_at\"));\n\n    start_write_op(impl, offset,\n        buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::first(buffers), p.p);\n    p.v = p.p = 0;\n  }\n\n  // Read some data. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    return read_some_at(impl, 0, buffers, ec);\n  }\n\n  // Read some data at a specified offset. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t read_some_at(implementation_type& impl, uint64_t offset,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    asio::mutable_buffer buffer =\n      buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence>::first(buffers);\n\n    return do_read(impl, offset, buffer, ec);\n  }\n\n  // Start an asynchronous read. The buffer for the data being received must be\n  // valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_handle_read_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, \"handle\", &impl,\n          reinterpret_cast<uintmax_t>(impl.handle_), \"async_read_some\"));\n\n    start_read_op(impl, 0,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::first(buffers), p.p);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous read at a specified offset. The buffer for the data\n  // being received must be valid for the lifetime of the asynchronous\n  // operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_read_some_at(implementation_type& impl,\n      uint64_t offset, const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_handle_read_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, \"handle\", &impl,\n          reinterpret_cast<uintmax_t>(impl.handle_), \"async_read_some_at\"));\n\n    start_read_op(impl, offset,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::first(buffers), p.p);\n    p.v = p.p = 0;\n  }\n\nprivate:\n  // Prevent the use of the null_buffers type with this service.\n  size_t write_some(implementation_type& impl,\n      const null_buffers& buffers, asio::error_code& ec);\n  size_t write_some_at(implementation_type& impl, uint64_t offset,\n      const null_buffers& buffers, asio::error_code& ec);\n  template <typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const null_buffers& buffers, Handler& handler,\n      const IoExecutor& io_ex);\n  template <typename Handler, typename IoExecutor>\n  void async_write_some_at(implementation_type& impl, uint64_t offset,\n      const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex);\n  size_t read_some(implementation_type& impl,\n      const null_buffers& buffers, asio::error_code& ec);\n  size_t read_some_at(implementation_type& impl, uint64_t offset,\n      const null_buffers& buffers, asio::error_code& ec);\n  template <typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const null_buffers& buffers, Handler& handler,\n      const IoExecutor& io_ex);\n  template <typename Handler, typename IoExecutor>\n  void async_read_some_at(implementation_type& impl, uint64_t offset,\n      const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex);\n\n  // Helper class for waiting for synchronous operations to complete.\n  class overlapped_wrapper;\n\n  // Helper function to perform a synchronous write operation.\n  ASIO_DECL size_t do_write(implementation_type& impl,\n      uint64_t offset, const asio::const_buffer& buffer,\n      asio::error_code& ec);\n\n  // Helper function to start a write operation.\n  ASIO_DECL void start_write_op(implementation_type& impl,\n      uint64_t offset, const asio::const_buffer& buffer,\n      operation* op);\n\n  // Helper function to perform a synchronous write operation.\n  ASIO_DECL size_t do_read(implementation_type& impl,\n      uint64_t offset, const asio::mutable_buffer& buffer,\n      asio::error_code& ec);\n\n  // Helper function to start a read operation.\n  ASIO_DECL void start_read_op(implementation_type& impl,\n      uint64_t offset, const asio::mutable_buffer& buffer,\n      operation* op);\n\n  // Update the ID of the thread from which cancellation is safe.\n  ASIO_DECL void update_cancellation_thread_id(implementation_type& impl);\n\n  // Helper function to close a handle when the associated object is being\n  // destroyed.\n  ASIO_DECL void close_for_destruction(implementation_type& impl);\n\n  // The IOCP service used for running asynchronous operations and dispatching\n  // handlers.\n  win_iocp_io_context& iocp_service_;\n\n  // Mutex to protect access to the linked list of implementations.\n  mutex mutex_;\n\n  // The head of a linked list of all implementations.\n  implementation_type* impl_list_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_iocp_handle_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_handle_write_op.hpp",
    "content": "//\n// detail/win_iocp_handle_write_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence, typename Handler, typename IoExecutor>\nclass win_iocp_handle_write_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_write_op);\n\n  win_iocp_handle_write_op(const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_handle_write_op::do_complete),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& ec, std::size_t bytes_transferred)\n  {\n    // Take ownership of the operation object.\n    win_iocp_handle_write_op* o(static_cast<win_iocp_handle_write_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    if (owner)\n    {\n      // Check whether buffers are still valid.\n      buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  ConstBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_io_context.hpp",
    "content": "//\n// detail/win_iocp_io_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP\n#define ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/thread.hpp\"\n#include \"asio/detail/thread_context.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/detail/win_iocp_operation.hpp\"\n#include \"asio/detail/win_iocp_thread_info.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass wait_op;\n\nclass win_iocp_io_context\n  : public execution_context_service_base<win_iocp_io_context>,\n    public thread_context\n{\npublic:\n  // Constructor. Specifies a concurrency hint that is passed through to the\n  // underlying I/O completion port.\n  ASIO_DECL win_iocp_io_context(asio::execution_context& ctx,\n      int concurrency_hint = -1, bool own_thread = true);\n\n  // Destructor.\n  ASIO_DECL ~win_iocp_io_context();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Initialise the task. Nothing to do here.\n  void init_task()\n  {\n  }\n\n  // Register a handle with the IO completion port.\n  ASIO_DECL asio::error_code register_handle(\n      HANDLE handle, asio::error_code& ec);\n\n  // Run the event loop until stopped or no more work.\n  ASIO_DECL size_t run(asio::error_code& ec);\n\n  // Run until stopped or one operation is performed.\n  ASIO_DECL size_t run_one(asio::error_code& ec);\n\n  // Run until timeout, interrupted, or one operation is performed.\n  ASIO_DECL size_t wait_one(long usec, asio::error_code& ec);\n\n  // Poll for operations without blocking.\n  ASIO_DECL size_t poll(asio::error_code& ec);\n\n  // Poll for one operation without blocking.\n  ASIO_DECL size_t poll_one(asio::error_code& ec);\n\n  // Stop the event processing loop.\n  ASIO_DECL void stop();\n\n  // Determine whether the io_context is stopped.\n  bool stopped() const\n  {\n    return ::InterlockedExchangeAdd(&stopped_, 0) != 0;\n  }\n\n  // Restart in preparation for a subsequent run invocation.\n  void restart()\n  {\n    ::InterlockedExchange(&stopped_, 0);\n  }\n\n  // Notify that some work has started.\n  void work_started()\n  {\n    ::InterlockedIncrement(&outstanding_work_);\n  }\n\n  // Notify that some work has finished.\n  void work_finished()\n  {\n    if (::InterlockedDecrement(&outstanding_work_) == 0)\n      stop();\n  }\n\n  // Return whether a handler can be dispatched immediately.\n  bool can_dispatch()\n  {\n    return thread_call_stack::contains(this) != 0;\n  }\n\n  // Request invocation of the given operation and return immediately. Assumes\n  // that work_started() has not yet been called for the operation.\n  void post_immediate_completion(win_iocp_operation* op, bool)\n  {\n    work_started();\n    post_deferred_completion(op);\n  }\n\n  // Request invocation of the given operation and return immediately. Assumes\n  // that work_started() was previously called for the operation.\n  ASIO_DECL void post_deferred_completion(win_iocp_operation* op);\n\n  // Request invocation of the given operation and return immediately. Assumes\n  // that work_started() was previously called for the operations.\n  ASIO_DECL void post_deferred_completions(\n      op_queue<win_iocp_operation>& ops);\n\n  // Request invocation of the given operation using the thread-private queue\n  // and return immediately. Assumes that work_started() has not yet been\n  // called for the operation.\n  void post_private_immediate_completion(win_iocp_operation* op)\n  {\n    post_immediate_completion(op, false);\n  }\n\n  // Request invocation of the given operation using the thread-private queue\n  // and return immediately. Assumes that work_started() was previously called\n  // for the operation.\n  void post_private_deferred_completion(win_iocp_operation* op)\n  {\n    post_deferred_completion(op);\n  }\n\n  // Enqueue the given operation following a failed attempt to dispatch the\n  // operation for immediate invocation.\n  void do_dispatch(operation* op)\n  {\n    post_immediate_completion(op, false);\n  }\n\n  // Process unfinished operations as part of a shutdown operation. Assumes\n  // that work_started() was previously called for the operations.\n  ASIO_DECL void abandon_operations(op_queue<operation>& ops);\n\n  // Called after starting an overlapped I/O operation that did not complete\n  // immediately. The caller must have already called work_started() prior to\n  // starting the operation.\n  ASIO_DECL void on_pending(win_iocp_operation* op);\n\n  // Called after starting an overlapped I/O operation that completed\n  // immediately. The caller must have already called work_started() prior to\n  // starting the operation.\n  ASIO_DECL void on_completion(win_iocp_operation* op,\n      DWORD last_error = 0, DWORD bytes_transferred = 0);\n\n  // Called after starting an overlapped I/O operation that completed\n  // immediately. The caller must have already called work_started() prior to\n  // starting the operation.\n  ASIO_DECL void on_completion(win_iocp_operation* op,\n      const asio::error_code& ec, DWORD bytes_transferred = 0);\n\n  // Add a new timer queue to the service.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& timer_queue);\n\n  // Remove a timer queue from the service.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& timer_queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer associated with the given token. Returns the number of\n  // handlers that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& to,\n      typename timer_queue<Time_Traits>::per_timer_data& from);\n\n  // Get the concurrency hint that was used to initialise the io_context.\n  int concurrency_hint() const\n  {\n    return concurrency_hint_;\n  }\n\nprivate:\n#if defined(WINVER) && (WINVER < 0x0500)\n  typedef DWORD dword_ptr_t;\n  typedef ULONG ulong_ptr_t;\n#else // defined(WINVER) && (WINVER < 0x0500)\n  typedef DWORD_PTR dword_ptr_t;\n  typedef ULONG_PTR ulong_ptr_t;\n#endif // defined(WINVER) && (WINVER < 0x0500)\n\n  // Dequeues at most one operation from the I/O completion port, and then\n  // executes it. Returns the number of operations that were dequeued (i.e.\n  // either 0 or 1).\n  ASIO_DECL size_t do_one(DWORD msec, asio::error_code& ec);\n\n  // Helper to calculate the GetQueuedCompletionStatus timeout.\n  ASIO_DECL static DWORD get_gqcs_timeout();\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // Called to recalculate and update the timeout.\n  ASIO_DECL void update_timeout();\n\n  // Helper class to call work_finished() on block exit.\n  struct work_finished_on_block_exit;\n\n  // Helper class for managing a HANDLE.\n  struct auto_handle\n  {\n    HANDLE handle;\n    auto_handle() : handle(0) {}\n    ~auto_handle() { if (handle) ::CloseHandle(handle); }\n  };\n\n  // The IO completion port used for queueing operations.\n  auto_handle iocp_;\n\n  // The count of unfinished work.\n  long outstanding_work_;\n\n  // Flag to indicate whether the event loop has been stopped.\n  mutable long stopped_;\n\n  // Flag to indicate whether there is an in-flight stop event. Every event\n  // posted using PostQueuedCompletionStatus consumes non-paged pool, so to\n  // avoid exhausting this resouce we limit the number of outstanding events.\n  long stop_event_posted_;\n\n  // Flag to indicate whether the service has been shut down.\n  long shutdown_;\n\n  enum\n  {\n    // Timeout to use with GetQueuedCompletionStatus on older versions of\n    // Windows. Some versions of windows have a \"bug\" where a call to\n    // GetQueuedCompletionStatus can appear stuck even though there are events\n    // waiting on the queue. Using a timeout helps to work around the issue.\n    default_gqcs_timeout = 500,\n\n    // Maximum waitable timer timeout, in milliseconds.\n    max_timeout_msec = 5 * 60 * 1000,\n\n    // Maximum waitable timer timeout, in microseconds.\n    max_timeout_usec = max_timeout_msec * 1000,\n\n    // Completion key value used to wake up a thread to dispatch timers or\n    // completed operations.\n    wake_for_dispatch = 1,\n\n    // Completion key value to indicate that an operation has posted with the\n    // original last_error and bytes_transferred values stored in the fields of\n    // the OVERLAPPED structure.\n    overlapped_contains_result = 2\n  };\n\n  // Timeout to use with GetQueuedCompletionStatus.\n  const DWORD gqcs_timeout_;\n\n  // Helper class to run the scheduler in its own thread.\n  struct thread_function;\n  friend struct thread_function;\n\n  // Function object for processing timeouts in a background thread.\n  struct timer_thread_function;\n  friend struct timer_thread_function;\n\n  // Background thread used for processing timeouts.\n  scoped_ptr<thread> timer_thread_;\n\n  // A waitable timer object used for waiting for timeouts.\n  auto_handle waitable_timer_;\n\n  // Non-zero if timers or completed operations need to be dispatched.\n  long dispatch_required_;\n\n  // Mutex for protecting access to the timer queues and completed operations.\n  mutex dispatch_mutex_;\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n  // The operations that are ready to dispatch.\n  op_queue<win_iocp_operation> completed_ops_;\n\n  // The concurrency hint used to initialise the io_context.\n  const int concurrency_hint_;\n\n  // The thread that is running the io_context.\n  scoped_ptr<thread> thread_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/win_iocp_io_context.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_iocp_io_context.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_null_buffers_op.hpp",
    "content": "//\n// detail/win_iocp_null_buffers_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass win_iocp_null_buffers_op : public reactor_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_null_buffers_op);\n\n  win_iocp_null_buffers_op(socket_ops::weak_cancel_token_type cancel_token,\n      Handler& handler, const IoExecutor& io_ex)\n    : reactor_op(&win_iocp_null_buffers_op::do_perform,\n        &win_iocp_null_buffers_op::do_complete),\n      cancel_token_(cancel_token),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static status do_perform(reactor_op*)\n  {\n    return done;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_null_buffers_op* o(static_cast<win_iocp_null_buffers_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // The reactor may have stored a result in the operation object.\n    if (o->ec_)\n      ec = o->ec_;\n\n    // Map non-portable errors to their portable counterparts.\n    if (ec.value() == ERROR_NETNAME_DELETED)\n    {\n      if (o->cancel_token_.expired())\n        ec = asio::error::operation_aborted;\n      else\n        ec = asio::error::connection_reset;\n    }\n    else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    {\n      ec = asio::error::connection_refused;\n    }\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_operation.hpp",
    "content": "//\n// detail/win_iocp_operation.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_OPERATION_HPP\n#define ASIO_DETAIL_WIN_IOCP_OPERATION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/handler_tracking.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/error_code.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_iocp_io_context;\n\n// Base class for all operations. A function pointer is used instead of virtual\n// functions to avoid the associated overhead.\nclass win_iocp_operation\n  : public OVERLAPPED\n    ASIO_ALSO_INHERIT_TRACKED_HANDLER\n{\npublic:\n  typedef win_iocp_operation operation_type;\n\n  void complete(void* owner, const asio::error_code& ec,\n      std::size_t bytes_transferred)\n  {\n    func_(owner, this, ec, bytes_transferred);\n  }\n\n  void destroy()\n  {\n    func_(0, this, asio::error_code(), 0);\n  }\n\nprotected:\n  typedef void (*func_type)(\n      void*, win_iocp_operation*,\n      const asio::error_code&, std::size_t);\n\n  win_iocp_operation(func_type func)\n    : next_(0),\n      func_(func)\n  {\n    reset();\n  }\n\n  // Prevents deletion through this type.\n  ~win_iocp_operation()\n  {\n  }\n\n  void reset()\n  {\n    Internal = 0;\n    InternalHigh = 0;\n    Offset = 0;\n    OffsetHigh = 0;\n    hEvent = 0;\n    ready_ = 0;\n  }\n\nprivate:\n  friend class op_queue_access;\n  friend class win_iocp_io_context;\n  win_iocp_operation* next_;\n  func_type func_;\n  long ready_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_OPERATION_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_overlapped_op.hpp",
    "content": "//\n// detail/win_iocp_overlapped_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass win_iocp_overlapped_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_overlapped_op);\n\n  win_iocp_overlapped_op(Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_overlapped_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& ec, std::size_t bytes_transferred)\n  {\n    // Take ownership of the operation object.\n    win_iocp_overlapped_op* o(static_cast<win_iocp_overlapped_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_overlapped_ptr.hpp",
    "content": "//\n// detail/win_iocp_overlapped_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP\n#define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/io_context.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/io_object_executor.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/win_iocp_overlapped_op.hpp\"\n#include \"asio/detail/win_iocp_io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Wraps a handler to create an OVERLAPPED object for use with overlapped I/O.\nclass win_iocp_overlapped_ptr\n  : private noncopyable\n{\npublic:\n  // Construct an empty win_iocp_overlapped_ptr.\n  win_iocp_overlapped_ptr()\n    : ptr_(0),\n      iocp_service_(0)\n  {\n  }\n\n  // Construct an win_iocp_overlapped_ptr to contain the specified handler.\n  template <typename Executor, typename Handler>\n  explicit win_iocp_overlapped_ptr(const Executor& ex,\n      ASIO_MOVE_ARG(Handler) handler)\n    : ptr_(0),\n      iocp_service_(0)\n  {\n    this->reset(ex, ASIO_MOVE_CAST(Handler)(handler));\n  }\n\n  // Destructor automatically frees the OVERLAPPED object unless released.\n  ~win_iocp_overlapped_ptr()\n  {\n    reset();\n  }\n\n  // Reset to empty.\n  void reset()\n  {\n    if (ptr_)\n    {\n      ptr_->destroy();\n      ptr_ = 0;\n      iocp_service_->work_finished();\n      iocp_service_ = 0;\n    }\n  }\n\n  // Reset to contain the specified handler, freeing any current OVERLAPPED\n  // object.\n  template <typename Executor, typename Handler>\n  void reset(const Executor& ex, Handler handler)\n  {\n    const bool native = is_same<Executor, io_context::executor_type>::value;\n    win_iocp_io_context* iocp_service = this->get_iocp_service(ex);\n\n    typedef win_iocp_overlapped_op<Handler, io_object_executor<Executor> > op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_object_executor<Executor>(ex, native));\n\n    ASIO_HANDLER_CREATION((ex.context(), *p.p,\n          \"iocp_service\", iocp_service, 0, \"overlapped\"));\n\n    iocp_service->work_started();\n    reset();\n    ptr_ = p.p;\n    p.v = p.p = 0;\n    iocp_service_ = iocp_service;\n  }\n\n  // Get the contained OVERLAPPED object.\n  OVERLAPPED* get()\n  {\n    return ptr_;\n  }\n\n  // Get the contained OVERLAPPED object.\n  const OVERLAPPED* get() const\n  {\n    return ptr_;\n  }\n\n  // Release ownership of the OVERLAPPED object.\n  OVERLAPPED* release()\n  {\n    if (ptr_)\n      iocp_service_->on_pending(ptr_);\n\n    OVERLAPPED* tmp = ptr_;\n    ptr_ = 0;\n    iocp_service_ = 0;\n    return tmp;\n  }\n\n  // Post completion notification for overlapped operation. Releases ownership.\n  void complete(const asio::error_code& ec,\n      std::size_t bytes_transferred)\n  {\n    if (ptr_)\n    {\n      iocp_service_->on_completion(ptr_, ec,\n          static_cast<DWORD>(bytes_transferred));\n      ptr_ = 0;\n      iocp_service_ = 0;\n    }\n  }\n\nprivate:\n  template <typename Executor>\n  static win_iocp_io_context* get_iocp_service(const Executor& ex)\n  {\n    return &use_service<win_iocp_io_context>(ex.context());\n  }\n\n  static win_iocp_io_context* get_iocp_service(\n      const io_context::executor_type& ex)\n  {\n    return &ex.context().impl_;\n  }\n\n  win_iocp_operation* ptr_;\n  win_iocp_io_context* iocp_service_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_serial_port_service.hpp",
    "content": "//\n// detail/win_iocp_serial_port_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP\n#define ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)\n\n#include <string>\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/win_iocp_handle_service.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Extend win_iocp_handle_service to provide serial port support.\nclass win_iocp_serial_port_service :\n  public execution_context_service_base<win_iocp_serial_port_service>\n{\npublic:\n  // The native type of a serial port.\n  typedef win_iocp_handle_service::native_handle_type native_handle_type;\n\n  // The implementation type of the serial port.\n  typedef win_iocp_handle_service::implementation_type implementation_type;\n\n  // Constructor.\n  ASIO_DECL win_iocp_serial_port_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new serial port implementation.\n  void construct(implementation_type& impl)\n  {\n    handle_service_.construct(impl);\n  }\n\n  // Move-construct a new serial port implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl)\n  {\n    handle_service_.move_construct(impl, other_impl);\n  }\n\n  // Move-assign from another serial port implementation.\n  void move_assign(implementation_type& impl,\n      win_iocp_serial_port_service& other_service,\n      implementation_type& other_impl)\n  {\n    handle_service_.move_assign(impl,\n        other_service.handle_service_, other_impl);\n  }\n\n  // Destroy a serial port implementation.\n  void destroy(implementation_type& impl)\n  {\n    handle_service_.destroy(impl);\n  }\n\n  // Open the serial port using the specified device name.\n  ASIO_DECL asio::error_code open(implementation_type& impl,\n      const std::string& device, asio::error_code& ec);\n\n  // Assign a native handle to a serial port implementation.\n  asio::error_code assign(implementation_type& impl,\n      const native_handle_type& handle, asio::error_code& ec)\n  {\n    return handle_service_.assign(impl, handle, ec);\n  }\n\n  // Determine whether the serial port is open.\n  bool is_open(const implementation_type& impl) const\n  {\n    return handle_service_.is_open(impl);\n  }\n\n  // Destroy a serial port implementation.\n  asio::error_code close(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    return handle_service_.close(impl, ec);\n  }\n\n  // Get the native serial port representation.\n  native_handle_type native_handle(implementation_type& impl)\n  {\n    return handle_service_.native_handle(impl);\n  }\n\n  // Cancel all operations associated with the handle.\n  asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec)\n  {\n    return handle_service_.cancel(impl, ec);\n  }\n\n  // Set an option on the serial port.\n  template <typename SettableSerialPortOption>\n  asio::error_code set_option(implementation_type& impl,\n      const SettableSerialPortOption& option, asio::error_code& ec)\n  {\n    return do_set_option(impl,\n        &win_iocp_serial_port_service::store_option<SettableSerialPortOption>,\n        &option, ec);\n  }\n\n  // Get an option from the serial port.\n  template <typename GettableSerialPortOption>\n  asio::error_code get_option(const implementation_type& impl,\n      GettableSerialPortOption& option, asio::error_code& ec) const\n  {\n    return do_get_option(impl,\n        &win_iocp_serial_port_service::load_option<GettableSerialPortOption>,\n        &option, ec);\n  }\n\n  // Send a break sequence to the serial port.\n  asio::error_code send_break(implementation_type&,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Write the given data. Returns the number of bytes sent.\n  template <typename ConstBufferSequence>\n  size_t write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    return handle_service_.write_some(impl, buffers, ec);\n  }\n\n  // Start an asynchronous write. The data being written must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_write_some(implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    handle_service_.async_write_some(impl, buffers, handler, io_ex);\n  }\n\n  // Read some data. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    return handle_service_.read_some(impl, buffers, ec);\n  }\n\n  // Start an asynchronous read. The buffer for the data being received must be\n  // valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_read_some(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    handle_service_.async_read_some(impl, buffers, handler, io_ex);\n  }\n\nprivate:\n  // Function pointer type for storing a serial port option.\n  typedef asio::error_code (*store_function_type)(\n      const void*, ::DCB&, asio::error_code&);\n\n  // Helper function template to store a serial port option.\n  template <typename SettableSerialPortOption>\n  static asio::error_code store_option(const void* option,\n      ::DCB& storage, asio::error_code& ec)\n  {\n    static_cast<const SettableSerialPortOption*>(option)->store(storage, ec);\n    return ec;\n  }\n\n  // Helper function to set a serial port option.\n  ASIO_DECL asio::error_code do_set_option(\n      implementation_type& impl, store_function_type store,\n      const void* option, asio::error_code& ec);\n\n  // Function pointer type for loading a serial port option.\n  typedef asio::error_code (*load_function_type)(\n      void*, const ::DCB&, asio::error_code&);\n\n  // Helper function template to load a serial port option.\n  template <typename GettableSerialPortOption>\n  static asio::error_code load_option(void* option,\n      const ::DCB& storage, asio::error_code& ec)\n  {\n    static_cast<GettableSerialPortOption*>(option)->load(storage, ec);\n    return ec;\n  }\n\n  // Helper function to get a serial port option.\n  ASIO_DECL asio::error_code do_get_option(\n      const implementation_type& impl, load_function_type load,\n      void* option, asio::error_code& ec) const;\n\n  // The implementation used for initiating asynchronous operations.\n  win_iocp_handle_service handle_service_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_iocp_serial_port_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_accept_op.hpp",
    "content": "//\n// detail/win_iocp_socket_accept_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/win_iocp_socket_service_base.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Socket, typename Protocol,\n    typename Handler, typename IoExecutor>\nclass win_iocp_socket_accept_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_accept_op);\n\n  win_iocp_socket_accept_op(win_iocp_socket_service_base& socket_service,\n      socket_type socket, Socket& peer, const Protocol& protocol,\n      typename Protocol::endpoint* peer_endpoint,\n      bool enable_connection_aborted, Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_accept_op::do_complete),\n      socket_service_(socket_service),\n      socket_(socket),\n      peer_(peer),\n      protocol_(protocol),\n      peer_endpoint_(peer_endpoint),\n      enable_connection_aborted_(enable_connection_aborted),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  socket_holder& new_socket()\n  {\n    return new_socket_;\n  }\n\n  void* output_buffer()\n  {\n    return output_buffer_;\n  }\n\n  DWORD address_length()\n  {\n    return sizeof(sockaddr_storage_type) + 16;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t /*bytes_transferred*/)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_accept_op* o(static_cast<win_iocp_socket_accept_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    if (owner)\n    {\n      typename Protocol::endpoint peer_endpoint;\n      std::size_t addr_len = peer_endpoint.capacity();\n      socket_ops::complete_iocp_accept(o->socket_,\n          o->output_buffer(), o->address_length(),\n          peer_endpoint.data(), &addr_len,\n          o->new_socket_.get(), ec);\n\n      // Restart the accept operation if we got the connection_aborted error\n      // and the enable_connection_aborted socket option is not set.\n      if (ec == asio::error::connection_aborted\n          && !o->enable_connection_aborted_)\n      {\n        o->reset();\n        o->socket_service_.restart_accept_op(o->socket_,\n            o->new_socket_, o->protocol_.family(),\n            o->protocol_.type(), o->protocol_.protocol(),\n            o->output_buffer(), o->address_length(), o);\n        p.v = p.p = 0;\n        return;\n      }\n\n      // If the socket was successfully accepted, transfer ownership of the\n      // socket to the peer object.\n      if (!ec)\n      {\n        o->peer_.assign(o->protocol_,\n            typename Socket::native_handle_type(\n              o->new_socket_.get(), peer_endpoint), ec);\n        if (!ec)\n          o->new_socket_.release();\n      }\n\n      // Pass endpoint back to caller.\n      if (o->peer_endpoint_)\n        *o->peer_endpoint_ = peer_endpoint;\n    }\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, ec);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  win_iocp_socket_service_base& socket_service_;\n  socket_type socket_;\n  socket_holder new_socket_;\n  Socket& peer_;\n  Protocol protocol_;\n  typename Protocol::endpoint* peer_endpoint_;\n  unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2];\n  bool enable_connection_aborted_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n#if defined(ASIO_HAS_MOVE)\n\ntemplate <typename Protocol, typename PeerIoExecutor,\n    typename Handler, typename IoExecutor>\nclass win_iocp_socket_move_accept_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_move_accept_op);\n\n  win_iocp_socket_move_accept_op(\n      win_iocp_socket_service_base& socket_service, socket_type socket,\n      const Protocol& protocol, const PeerIoExecutor& peer_io_ex,\n      typename Protocol::endpoint* peer_endpoint,\n      bool enable_connection_aborted, Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_move_accept_op::do_complete),\n      socket_service_(socket_service),\n      socket_(socket),\n      peer_(peer_io_ex),\n      protocol_(protocol),\n      peer_endpoint_(peer_endpoint),\n      enable_connection_aborted_(enable_connection_aborted),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  socket_holder& new_socket()\n  {\n    return new_socket_;\n  }\n\n  void* output_buffer()\n  {\n    return output_buffer_;\n  }\n\n  DWORD address_length()\n  {\n    return sizeof(sockaddr_storage_type) + 16;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t /*bytes_transferred*/)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_move_accept_op* o(\n        static_cast<win_iocp_socket_move_accept_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    if (owner)\n    {\n      typename Protocol::endpoint peer_endpoint;\n      std::size_t addr_len = peer_endpoint.capacity();\n      socket_ops::complete_iocp_accept(o->socket_,\n          o->output_buffer(), o->address_length(),\n          peer_endpoint.data(), &addr_len,\n          o->new_socket_.get(), ec);\n\n      // Restart the accept operation if we got the connection_aborted error\n      // and the enable_connection_aborted socket option is not set.\n      if (ec == asio::error::connection_aborted\n          && !o->enable_connection_aborted_)\n      {\n        o->reset();\n        o->socket_service_.restart_accept_op(o->socket_,\n            o->new_socket_, o->protocol_.family(),\n            o->protocol_.type(), o->protocol_.protocol(),\n            o->output_buffer(), o->address_length(), o);\n        p.v = p.p = 0;\n        return;\n      }\n\n      // If the socket was successfully accepted, transfer ownership of the\n      // socket to the peer object.\n      if (!ec)\n      {\n        o->peer_.assign(o->protocol_,\n            typename Protocol::socket::native_handle_type(\n              o->new_socket_.get(), peer_endpoint), ec);\n        if (!ec)\n          o->new_socket_.release();\n      }\n\n      // Pass endpoint back to caller.\n      if (o->peer_endpoint_)\n        *o->peer_endpoint_ = peer_endpoint;\n    }\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::move_binder2<Handler,\n      asio::error_code, peer_socket_type>\n        handler(0, ASIO_MOVE_CAST(Handler)(o->handler_), ec,\n          ASIO_MOVE_CAST(peer_socket_type)(o->peer_));\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, \"...\"));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  typedef typename Protocol::socket::template\n    rebind_executor<PeerIoExecutor>::other peer_socket_type;\n\n  win_iocp_socket_service_base& socket_service_;\n  socket_type socket_;\n  socket_holder new_socket_;\n  peer_socket_type peer_;\n  Protocol protocol_;\n  typename Protocol::endpoint* peer_endpoint_;\n  unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2];\n  bool enable_connection_aborted_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n#endif // defined(ASIO_HAS_MOVE)\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_connect_op.hpp",
    "content": "//\n// detail/win_iocp_socket_connect_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_iocp_socket_connect_op_base : public reactor_op\n{\npublic:\n  win_iocp_socket_connect_op_base(socket_type socket, func_type complete_func)\n    : reactor_op(&win_iocp_socket_connect_op_base::do_perform, complete_func),\n      socket_(socket),\n      connect_ex_(false)\n  {\n  }\n\n  static status do_perform(reactor_op* base)\n  {\n    win_iocp_socket_connect_op_base* o(\n        static_cast<win_iocp_socket_connect_op_base*>(base));\n\n    return socket_ops::non_blocking_connect(\n        o->socket_, o->ec_) ? done : not_done;\n  }\n\n  socket_type socket_;\n  bool connect_ex_;\n};\n\ntemplate <typename Handler, typename IoExecutor>\nclass win_iocp_socket_connect_op : public win_iocp_socket_connect_op_base\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_connect_op);\n\n  win_iocp_socket_connect_op(socket_type socket,\n      Handler& handler, const IoExecutor& io_ex)\n    : win_iocp_socket_connect_op_base(socket,\n        &win_iocp_socket_connect_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t /*bytes_transferred*/)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_connect_op* o(\n        static_cast<win_iocp_socket_connect_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    if (owner)\n    {\n      if (o->connect_ex_)\n        socket_ops::complete_iocp_connect(o->socket_, ec);\n      else\n        ec = o->ec_;\n    }\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, ec);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_recv_op.hpp",
    "content": "//\n// detail/win_iocp_socket_recv_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass win_iocp_socket_recv_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recv_op);\n\n  win_iocp_socket_recv_op(socket_ops::state_type state,\n      socket_ops::weak_cancel_token_type cancel_token,\n      const MutableBufferSequence& buffers, Handler& handler,\n      const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_recv_op::do_complete),\n      state_(state),\n      cancel_token_(cancel_token),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_recv_op* o(static_cast<win_iocp_socket_recv_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    socket_ops::complete_iocp_recv(o->state_, o->cancel_token_,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::all_empty(o->buffers_),\n        ec, bytes_transferred);\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  socket_ops::state_type state_;\n  socket_ops::weak_cancel_token_type cancel_token_;\n  MutableBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_recvfrom_op.hpp",
    "content": "//\n// detail/win_iocp_socket_recvfrom_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Endpoint,\n    typename Handler, typename IoExecutor>\nclass win_iocp_socket_recvfrom_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvfrom_op);\n\n  win_iocp_socket_recvfrom_op(Endpoint& endpoint,\n      socket_ops::weak_cancel_token_type cancel_token,\n      const MutableBufferSequence& buffers, Handler& handler,\n      const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_recvfrom_op::do_complete),\n      endpoint_(endpoint),\n      endpoint_size_(static_cast<int>(endpoint.capacity())),\n      cancel_token_(cancel_token),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  int& endpoint_size()\n  {\n    return endpoint_size_;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_recvfrom_op* o(\n        static_cast<win_iocp_socket_recvfrom_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    socket_ops::complete_iocp_recvfrom(o->cancel_token_, ec);\n\n    // Record the size of the endpoint returned by the operation.\n    o->endpoint_.resize(o->endpoint_size_);\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Endpoint& endpoint_;\n  int endpoint_size_;\n  socket_ops::weak_cancel_token_type cancel_token_;\n  MutableBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_recvmsg_op.hpp",
    "content": "//\n// detail/win_iocp_socket_recvmsg_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/socket_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass win_iocp_socket_recvmsg_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvmsg_op);\n\n  win_iocp_socket_recvmsg_op(\n      socket_ops::weak_cancel_token_type cancel_token,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags& out_flags,\n      Handler& handler, const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_recvmsg_op::do_complete),\n      cancel_token_(cancel_token),\n      buffers_(buffers),\n      out_flags_(out_flags),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_recvmsg_op* o(\n        static_cast<win_iocp_socket_recvmsg_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    socket_ops::complete_iocp_recvmsg(o->cancel_token_, ec);\n    o->out_flags_ = 0;\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  MutableBufferSequence buffers_;\n  socket_base::message_flags& out_flags_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_send_op.hpp",
    "content": "//\n// detail/win_iocp_socket_send_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence, typename Handler, typename IoExecutor>\nclass win_iocp_socket_send_op : public operation\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_send_op);\n\n  win_iocp_socket_send_op(socket_ops::weak_cancel_token_type cancel_token,\n      const ConstBufferSequence& buffers, Handler& handler,\n      const IoExecutor& io_ex)\n    : operation(&win_iocp_socket_send_op::do_complete),\n      cancel_token_(cancel_token),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t bytes_transferred)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_socket_send_op* o(static_cast<win_iocp_socket_send_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    socket_ops::complete_iocp_send(o->cancel_token_, ec);\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, ec, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  ConstBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_service.hpp",
    "content": "//\n// detail/win_iocp_socket_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include <cstring>\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/select_reactor.hpp\"\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/win_iocp_io_context.hpp\"\n#include \"asio/detail/win_iocp_null_buffers_op.hpp\"\n#include \"asio/detail/win_iocp_socket_accept_op.hpp\"\n#include \"asio/detail/win_iocp_socket_connect_op.hpp\"\n#include \"asio/detail/win_iocp_socket_recvfrom_op.hpp\"\n#include \"asio/detail/win_iocp_socket_send_op.hpp\"\n#include \"asio/detail/win_iocp_socket_service_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass win_iocp_socket_service :\n  public execution_context_service_base<win_iocp_socket_service<Protocol> >,\n  public win_iocp_socket_service_base\n{\npublic:\n  // The protocol type.\n  typedef Protocol protocol_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The native type of a socket.\n  class native_handle_type\n  {\n  public:\n    native_handle_type(socket_type s)\n      : socket_(s),\n        have_remote_endpoint_(false)\n    {\n    }\n\n    native_handle_type(socket_type s, const endpoint_type& ep)\n      : socket_(s),\n        have_remote_endpoint_(true),\n        remote_endpoint_(ep)\n    {\n    }\n\n    void operator=(socket_type s)\n    {\n      socket_ = s;\n      have_remote_endpoint_ = false;\n      remote_endpoint_ = endpoint_type();\n    }\n\n    operator socket_type() const\n    {\n      return socket_;\n    }\n\n    bool have_remote_endpoint() const\n    {\n      return have_remote_endpoint_;\n    }\n\n    endpoint_type remote_endpoint() const\n    {\n      return remote_endpoint_;\n    }\n\n  private:\n    socket_type socket_;\n    bool have_remote_endpoint_;\n    endpoint_type remote_endpoint_;\n  };\n\n  // The implementation type of the socket.\n  struct implementation_type :\n    win_iocp_socket_service_base::base_implementation_type\n  {\n    // Default constructor.\n    implementation_type()\n      : protocol_(endpoint_type().protocol()),\n        have_remote_endpoint_(false),\n        remote_endpoint_()\n    {\n    }\n\n    // The protocol associated with the socket.\n    protocol_type protocol_;\n\n    // Whether we have a cached remote endpoint.\n    bool have_remote_endpoint_;\n\n    // A cached remote endpoint.\n    endpoint_type remote_endpoint_;\n  };\n\n  // Constructor.\n  win_iocp_socket_service(execution_context& context)\n    : execution_context_service_base<\n        win_iocp_socket_service<Protocol> >(context),\n      win_iocp_socket_service_base(context)\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n    this->base_shutdown();\n  }\n\n  // Move-construct a new socket implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl) ASIO_NOEXCEPT\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n\n    impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_;\n    other_impl.have_remote_endpoint_ = false;\n\n    impl.remote_endpoint_ = other_impl.remote_endpoint_;\n    other_impl.remote_endpoint_ = endpoint_type();\n  }\n\n  // Move-assign from another socket implementation.\n  void move_assign(implementation_type& impl,\n      win_iocp_socket_service_base& other_service,\n      implementation_type& other_impl)\n  {\n    this->base_move_assign(impl, other_service, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n\n    impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_;\n    other_impl.have_remote_endpoint_ = false;\n\n    impl.remote_endpoint_ = other_impl.remote_endpoint_;\n    other_impl.remote_endpoint_ = endpoint_type();\n  }\n\n  // Move-construct a new socket implementation from another protocol type.\n  template <typename Protocol1>\n  void converting_move_construct(implementation_type& impl,\n      win_iocp_socket_service<Protocol1>&,\n      typename win_iocp_socket_service<\n        Protocol1>::implementation_type& other_impl)\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = protocol_type(other_impl.protocol_);\n    other_impl.protocol_ = typename Protocol1::endpoint().protocol();\n\n    impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_;\n    other_impl.have_remote_endpoint_ = false;\n\n    impl.remote_endpoint_ = other_impl.remote_endpoint_;\n    other_impl.remote_endpoint_ = typename Protocol1::endpoint();\n  }\n\n  // Open a new socket implementation.\n  asio::error_code open(implementation_type& impl,\n      const protocol_type& protocol, asio::error_code& ec)\n  {\n    if (!do_open(impl, protocol.family(),\n          protocol.type(), protocol.protocol(), ec))\n    {\n      impl.protocol_ = protocol;\n      impl.have_remote_endpoint_ = false;\n      impl.remote_endpoint_ = endpoint_type();\n    }\n    return ec;\n  }\n\n  // Assign a native socket to a socket implementation.\n  asio::error_code assign(implementation_type& impl,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      asio::error_code& ec)\n  {\n    if (!do_assign(impl, protocol.type(), native_socket, ec))\n    {\n      impl.protocol_ = protocol;\n      impl.have_remote_endpoint_ = native_socket.have_remote_endpoint();\n      impl.remote_endpoint_ = native_socket.remote_endpoint();\n    }\n    return ec;\n  }\n\n  // Get the native socket representation.\n  native_handle_type native_handle(implementation_type& impl)\n  {\n    if (impl.have_remote_endpoint_)\n      return native_handle_type(impl.socket_, impl.remote_endpoint_);\n    return native_handle_type(impl.socket_);\n  }\n\n  // Bind the socket to the specified local endpoint.\n  asio::error_code bind(implementation_type& impl,\n      const endpoint_type& endpoint, asio::error_code& ec)\n  {\n    socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec);\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code set_option(implementation_type& impl,\n      const Option& option, asio::error_code& ec)\n  {\n    socket_ops::setsockopt(impl.socket_, impl.state_,\n        option.level(impl.protocol_), option.name(impl.protocol_),\n        option.data(impl.protocol_), option.size(impl.protocol_), ec);\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code get_option(const implementation_type& impl,\n      Option& option, asio::error_code& ec) const\n  {\n    std::size_t size = option.size(impl.protocol_);\n    socket_ops::getsockopt(impl.socket_, impl.state_,\n        option.level(impl.protocol_), option.name(impl.protocol_),\n        option.data(impl.protocol_), &size, ec);\n    if (!ec)\n      option.resize(impl.protocol_, size);\n    return ec;\n  }\n\n  // Get the local endpoint.\n  endpoint_type local_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint;\n    std::size_t addr_len = endpoint.capacity();\n    if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec))\n      return endpoint_type();\n    endpoint.resize(addr_len);\n    return endpoint;\n  }\n\n  // Get the remote endpoint.\n  endpoint_type remote_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint = impl.remote_endpoint_;\n    std::size_t addr_len = endpoint.capacity();\n    if (socket_ops::getpeername(impl.socket_, endpoint.data(),\n          &addr_len, impl.have_remote_endpoint_, ec))\n      return endpoint_type();\n    endpoint.resize(addr_len);\n    return endpoint;\n  }\n\n  // Disable sends or receives on the socket.\n  asio::error_code shutdown(base_implementation_type& impl,\n      socket_base::shutdown_type what, asio::error_code& ec)\n  {\n    socket_ops::shutdown(impl.socket_, what, ec);\n    return ec;\n  }\n\n  // Send a datagram to the specified endpoint. Returns the number of bytes\n  // sent.\n  template <typename ConstBufferSequence>\n  size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers,\n      const endpoint_type& destination, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_sendto(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags,\n        destination.data(), destination.size(), ec);\n  }\n\n  // Wait until data can be sent without blocking.\n  size_t send_to(implementation_type& impl, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags,\n      asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type& impl,\n      const ConstBufferSequence& buffers, const endpoint_type& destination,\n      socket_base::message_flags flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_send_op<\n        ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send_to\"));\n\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    start_send_to_op(impl, bufs.buffers(), bufs.count(),\n        destination.data(), static_cast<int>(destination.size()),\n        flags, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send_to(implementation_type& impl, const null_buffers&,\n      const endpoint_type&, socket_base::message_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send_to(null_buffers)\"));\n\n    start_reactor_op(impl, select_reactor::write_op, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Receive a datagram with the endpoint of the sender. Returns the number of\n  // bytes received.\n  template <typename MutableBufferSequence>\n  size_t receive_from(implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    std::size_t addr_len = sender_endpoint.capacity();\n    std::size_t bytes_recvd = socket_ops::sync_recvfrom(\n        impl.socket_, impl.state_, bufs.buffers(), bufs.count(),\n        flags, sender_endpoint.data(), &addr_len, ec);\n\n    if (!ec)\n      sender_endpoint.resize(addr_len);\n\n    return bytes_recvd;\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive_from(implementation_type& impl,\n      const null_buffers&, endpoint_type& sender_endpoint,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    // Reset endpoint since it can be given no sensible value at this time.\n    sender_endpoint = endpoint_type();\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received and\n  // the sender_endpoint object must both be valid for the lifetime of the\n  // asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type& impl,\n      const MutableBufferSequence& buffers, endpoint_type& sender_endp,\n      socket_base::message_flags flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_recvfrom_op<MutableBufferSequence,\n        endpoint_type, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(sender_endp, impl.cancel_token_,\n        buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_from\"));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    start_receive_from_op(impl, bufs.buffers(), bufs.count(),\n        sender_endp.data(), flags, &p.p->endpoint_size(), p.p);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_from(implementation_type& impl, const null_buffers&,\n      endpoint_type& sender_endpoint, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_from(null_buffers)\"));\n\n    // Reset endpoint since it can be given no sensible value at this time.\n    sender_endpoint = endpoint_type();\n\n    start_null_buffers_receive_op(impl, flags, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Accept a new connection.\n  template <typename Socket>\n  asio::error_code accept(implementation_type& impl, Socket& peer,\n      endpoint_type* peer_endpoint, asio::error_code& ec)\n  {\n    // We cannot accept a socket that is already open.\n    if (peer.is_open())\n    {\n      ec = asio::error::already_open;\n      return ec;\n    }\n\n    std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0;\n    socket_holder new_socket(socket_ops::sync_accept(impl.socket_,\n          impl.state_, peer_endpoint ? peer_endpoint->data() : 0,\n          peer_endpoint ? &addr_len : 0, ec));\n\n    // On success, assign new connection to peer socket object.\n    if (new_socket.get() != invalid_socket)\n    {\n      if (peer_endpoint)\n        peer_endpoint->resize(addr_len);\n      peer.assign(impl.protocol_, new_socket.get(), ec);\n      if (!ec)\n        new_socket.release();\n    }\n\n    return ec;\n  }\n\n  // Start an asynchronous accept. The peer and peer_endpoint objects\n  // must be valid until the accept's handler is invoked.\n  template <typename Socket, typename Handler, typename IoExecutor>\n  void async_accept(implementation_type& impl, Socket& peer,\n      endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_accept_op<Socket,\n        protocol_type, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    bool enable_connection_aborted =\n      (impl.state_ & socket_ops::enable_connection_aborted) != 0;\n    p.p = new (p.v) op(*this, impl.socket_, peer, impl.protocol_,\n        peer_endpoint, enable_connection_aborted, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_accept\"));\n\n    start_accept_op(impl, peer.is_open(), p.p->new_socket(),\n        impl.protocol_.family(), impl.protocol_.type(),\n        impl.protocol_.protocol(), p.p->output_buffer(),\n        p.p->address_length(), p.p);\n    p.v = p.p = 0;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  // Start an asynchronous accept. The peer and peer_endpoint objects\n  // must be valid until the accept's handler is invoked.\n  template <typename PeerIoExecutor, typename Handler, typename IoExecutor>\n  void async_move_accept(implementation_type& impl,\n      const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_move_accept_op<\n        protocol_type, PeerIoExecutor, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    bool enable_connection_aborted =\n      (impl.state_ & socket_ops::enable_connection_aborted) != 0;\n    p.p = new (p.v) op(*this, impl.socket_, impl.protocol_,\n        peer_io_ex, peer_endpoint, enable_connection_aborted,\n        handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_accept\"));\n\n    start_accept_op(impl, false, p.p->new_socket(),\n        impl.protocol_.family(), impl.protocol_.type(),\n        impl.protocol_.protocol(), p.p->output_buffer(),\n        p.p->address_length(), p.p);\n    p.v = p.p = 0;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  // Connect the socket to the specified endpoint.\n  asio::error_code connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint, asio::error_code& ec)\n  {\n    socket_ops::sync_connect(impl.socket_,\n        peer_endpoint.data(), peer_endpoint.size(), ec);\n    return ec;\n  }\n\n  // Start an asynchronous connect.\n  template <typename Handler, typename IoExecutor>\n  void async_connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_connect_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.socket_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_connect\"));\n\n    start_connect_op(impl, impl.protocol_.family(), impl.protocol_.type(),\n        peer_endpoint.data(), static_cast<int>(peer_endpoint.size()), p.p);\n    p.v = p.p = 0;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_socket_service_base.hpp",
    "content": "//\n// detail/win_iocp_socket_service_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP\n#define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/operation.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/select_reactor.hpp\"\n#include \"asio/detail/socket_holder.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/win_iocp_io_context.hpp\"\n#include \"asio/detail/win_iocp_null_buffers_op.hpp\"\n#include \"asio/detail/win_iocp_socket_connect_op.hpp\"\n#include \"asio/detail/win_iocp_socket_send_op.hpp\"\n#include \"asio/detail/win_iocp_socket_recv_op.hpp\"\n#include \"asio/detail/win_iocp_socket_recvmsg_op.hpp\"\n#include \"asio/detail/win_iocp_wait_op.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_iocp_socket_service_base\n{\npublic:\n  // The implementation type of the socket.\n  struct base_implementation_type\n  {\n    // The native socket representation.\n    socket_type socket_;\n\n    // The current state of the socket.\n    socket_ops::state_type state_;\n\n    // We use a shared pointer as a cancellation token here to work around the\n    // broken Windows support for cancellation. MSDN says that when you call\n    // closesocket any outstanding WSARecv or WSASend operations will complete\n    // with the error ERROR_OPERATION_ABORTED. In practice they complete with\n    // ERROR_NETNAME_DELETED, which means you can't tell the difference between\n    // a local cancellation and the socket being hard-closed by the peer.\n    socket_ops::shared_cancel_token_type cancel_token_;\n\n    // Per-descriptor data used by the reactor.\n    select_reactor::per_descriptor_data reactor_data_;\n\n#if defined(ASIO_ENABLE_CANCELIO)\n    // The ID of the thread from which it is safe to cancel asynchronous\n    // operations. 0 means no asynchronous operations have been started yet.\n    // ~0 means asynchronous operations have been started from more than one\n    // thread, and cancellation is not supported for the socket.\n    DWORD safe_cancellation_thread_id_;\n#endif // defined(ASIO_ENABLE_CANCELIO)\n\n    // Pointers to adjacent socket implementations in linked list.\n    base_implementation_type* next_;\n    base_implementation_type* prev_;\n  };\n\n  // Constructor.\n  ASIO_DECL win_iocp_socket_service_base(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void base_shutdown();\n\n  // Construct a new socket implementation.\n  ASIO_DECL void construct(base_implementation_type& impl);\n\n  // Move-construct a new socket implementation.\n  ASIO_DECL void base_move_construct(base_implementation_type& impl,\n      base_implementation_type& other_impl) ASIO_NOEXCEPT;\n\n  // Move-assign from another socket implementation.\n  ASIO_DECL void base_move_assign(base_implementation_type& impl,\n      win_iocp_socket_service_base& other_service,\n      base_implementation_type& other_impl);\n\n  // Destroy a socket implementation.\n  ASIO_DECL void destroy(base_implementation_type& impl);\n\n  // Determine whether the socket is open.\n  bool is_open(const base_implementation_type& impl) const\n  {\n    return impl.socket_ != invalid_socket;\n  }\n\n  // Destroy a socket implementation.\n  ASIO_DECL asio::error_code close(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Release ownership of the socket.\n  ASIO_DECL socket_type release(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Cancel all operations associated with the socket.\n  ASIO_DECL asio::error_code cancel(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Determine whether the socket is at the out-of-band data mark.\n  bool at_mark(const base_implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    return socket_ops::sockatmark(impl.socket_, ec);\n  }\n\n  // Determine the number of bytes available for reading.\n  std::size_t available(const base_implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    return socket_ops::available(impl.socket_, ec);\n  }\n\n  // Place the socket into the state where it will listen for new connections.\n  asio::error_code listen(base_implementation_type& impl,\n      int backlog, asio::error_code& ec)\n  {\n    socket_ops::listen(impl.socket_, backlog, ec);\n    return ec;\n  }\n\n  // Perform an IO control command on the socket.\n  template <typename IO_Control_Command>\n  asio::error_code io_control(base_implementation_type& impl,\n      IO_Control_Command& command, asio::error_code& ec)\n  {\n    socket_ops::ioctl(impl.socket_, impl.state_, command.name(),\n        static_cast<ioctl_arg_type*>(command.data()), ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the socket.\n  bool non_blocking(const base_implementation_type& impl) const\n  {\n    return (impl.state_ & socket_ops::user_set_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the socket.\n  asio::error_code non_blocking(base_implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the native socket implementation.\n  bool native_non_blocking(const base_implementation_type& impl) const\n  {\n    return (impl.state_ & socket_ops::internal_non_blocking) != 0;\n  }\n\n  // Sets the non-blocking mode of the native socket implementation.\n  asio::error_code native_non_blocking(base_implementation_type& impl,\n      bool mode, asio::error_code& ec)\n  {\n    socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec);\n    return ec;\n  }\n\n  // Wait for the socket to become ready to read, ready to write, or to have\n  // pending error conditions.\n  asio::error_code wait(base_implementation_type& impl,\n      socket_base::wait_type w, asio::error_code& ec)\n  {\n    switch (w)\n    {\n    case socket_base::wait_read:\n      socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n      break;\n    case socket_base::wait_write:\n      socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n      break;\n    case socket_base::wait_error:\n      socket_ops::poll_error(impl.socket_, impl.state_, -1, ec);\n      break;\n    default:\n      ec = asio::error::invalid_argument;\n      break;\n    }\n\n    return ec;\n  }\n\n  // Asynchronously wait for the socket to become ready to read, ready to\n  // write, or to have pending error conditions.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(base_implementation_type& impl,\n      socket_base::wait_type w, Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_wait_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_wait\"));\n\n    switch (w)\n    {\n      case socket_base::wait_read:\n        start_null_buffers_receive_op(impl, 0, p.p);\n        break;\n      case socket_base::wait_write:\n        start_reactor_op(impl, select_reactor::write_op, p.p);\n        break;\n      case socket_base::wait_error:\n        start_reactor_op(impl, select_reactor::except_op, p.p);\n        break;\n      default:\n        p.p->ec_ = asio::error::invalid_argument;\n        iocp_service_.post_immediate_completion(p.p, is_continuation);\n        break;\n    }\n\n    p.v = p.p = 0;\n  }\n\n  // Send the given data to the peer. Returns the number of bytes sent.\n  template <typename ConstBufferSequence>\n  size_t send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_send(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be sent without blocking.\n  size_t send(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_send_op<\n        ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send\"));\n\n    buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence> bufs(buffers);\n\n    start_send_op(impl, bufs.buffers(), bufs.count(), flags,\n        (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(),\n        p.p);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_send(null_buffers)\"));\n\n    start_reactor_op(impl, select_reactor::write_op, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Receive some data from the peer. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  size_t receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_recv(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec);\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive(base_implementation_type& impl, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_recv_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.state_, impl.cancel_token_,\n        buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive\"));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    start_receive_op(impl, bufs.buffers(), bufs.count(), flags,\n        (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(),\n        p.p);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive(null_buffers)\"));\n\n    start_null_buffers_receive_op(impl, flags, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Receive some data with associated flags. Returns the number of bytes\n  // received.\n  template <typename MutableBufferSequence>\n  size_t receive_with_flags(base_implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, asio::error_code& ec)\n  {\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    return socket_ops::sync_recvmsg(impl.socket_, impl.state_,\n        bufs.buffers(), bufs.count(), in_flags, out_flags, ec);\n  }\n\n  // Wait until data can be received without blocking.\n  size_t receive_with_flags(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags,\n      socket_base::message_flags& out_flags, asio::error_code& ec)\n  {\n    // Wait for socket to become ready.\n    socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);\n\n    // Clear out_flags, since we cannot give it any other sensible value when\n    // performing a null_buffers operation.\n    out_flags = 0;\n\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive_with_flags(base_implementation_type& impl,\n      const MutableBufferSequence& buffers, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_socket_recvmsg_op<\n        MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_,\n        buffers, out_flags, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_with_flags\"));\n\n    buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence> bufs(buffers);\n\n    start_receive_op(impl, bufs.buffers(), bufs.count(), in_flags, false, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive_with_flags(base_implementation_type& impl,\n      const null_buffers&, socket_base::message_flags in_flags,\n      socket_base::message_flags& out_flags, Handler& handler,\n      const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef win_iocp_null_buffers_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(impl.cancel_token_, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((context_, *p.p, \"socket\",\n          &impl, impl.socket_, \"async_receive_with_flags(null_buffers)\"));\n\n    // Reset out_flags since it can be given no sensible value at this time.\n    out_flags = 0;\n\n    start_null_buffers_receive_op(impl, in_flags, p.p);\n    p.v = p.p = 0;\n  }\n\n  // Helper function to restart an asynchronous accept operation.\n  ASIO_DECL void restart_accept_op(socket_type s,\n      socket_holder& new_socket, int family, int type, int protocol,\n      void* output_buffer, DWORD address_length, operation* op);\n\nprotected:\n  // Open a new socket implementation.\n  ASIO_DECL asio::error_code do_open(\n      base_implementation_type& impl, int family, int type,\n      int protocol, asio::error_code& ec);\n\n  // Assign a native socket to a socket implementation.\n  ASIO_DECL asio::error_code do_assign(\n      base_implementation_type& impl, int type,\n      socket_type native_socket, asio::error_code& ec);\n\n  // Helper function to start an asynchronous send operation.\n  ASIO_DECL void start_send_op(base_implementation_type& impl,\n      WSABUF* buffers, std::size_t buffer_count,\n      socket_base::message_flags flags, bool noop, operation* op);\n\n  // Helper function to start an asynchronous send_to operation.\n  ASIO_DECL void start_send_to_op(base_implementation_type& impl,\n      WSABUF* buffers, std::size_t buffer_count,\n      const socket_addr_type* addr, int addrlen,\n      socket_base::message_flags flags, operation* op);\n\n  // Helper function to start an asynchronous receive operation.\n  ASIO_DECL void start_receive_op(base_implementation_type& impl,\n      WSABUF* buffers, std::size_t buffer_count,\n      socket_base::message_flags flags, bool noop, operation* op);\n\n  // Helper function to start an asynchronous null_buffers receive operation.\n  ASIO_DECL void start_null_buffers_receive_op(\n      base_implementation_type& impl,\n      socket_base::message_flags flags, reactor_op* op);\n\n  // Helper function to start an asynchronous receive_from operation.\n  ASIO_DECL void start_receive_from_op(base_implementation_type& impl,\n      WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr,\n      socket_base::message_flags flags, int* addrlen, operation* op);\n\n  // Helper function to start an asynchronous accept operation.\n  ASIO_DECL void start_accept_op(base_implementation_type& impl,\n      bool peer_is_open, socket_holder& new_socket, int family, int type,\n      int protocol, void* output_buffer, DWORD address_length, operation* op);\n\n  // Start an asynchronous read or write operation using the reactor.\n  ASIO_DECL void start_reactor_op(base_implementation_type& impl,\n      int op_type, reactor_op* op);\n\n  // Start the asynchronous connect operation using the reactor.\n  ASIO_DECL void start_connect_op(base_implementation_type& impl,\n      int family, int type, const socket_addr_type* remote_addr,\n      std::size_t remote_addrlen, win_iocp_socket_connect_op_base* op);\n\n  // Helper function to close a socket when the associated object is being\n  // destroyed.\n  ASIO_DECL void close_for_destruction(base_implementation_type& impl);\n\n  // Update the ID of the thread from which cancellation is safe.\n  ASIO_DECL void update_cancellation_thread_id(\n      base_implementation_type& impl);\n\n  // Helper function to get the reactor. If no reactor has been created yet, a\n  // new one is obtained from the execution context and a pointer to it is\n  // cached in this service.\n  ASIO_DECL select_reactor& get_reactor();\n\n  // The type of a ConnectEx function pointer, as old SDKs may not provide it.\n  typedef BOOL (PASCAL *connect_ex_fn)(SOCKET,\n      const socket_addr_type*, int, void*, DWORD, DWORD*, OVERLAPPED*);\n\n  // Helper function to get the ConnectEx pointer. If no ConnectEx pointer has\n  // been obtained yet, one is obtained using WSAIoctl and the pointer is\n  // cached. Returns a null pointer if ConnectEx is not available.\n  ASIO_DECL connect_ex_fn get_connect_ex(\n      base_implementation_type& impl, int type);\n\n  // The type of a NtSetInformationFile function pointer.\n  typedef LONG (NTAPI *nt_set_info_fn)(HANDLE, ULONG_PTR*, void*, ULONG, ULONG);\n\n  // Helper function to get the NtSetInformationFile function pointer. If no\n  // NtSetInformationFile pointer has been obtained yet, one is obtained using\n  // GetProcAddress and the pointer is cached. Returns a null pointer if\n  // NtSetInformationFile is not available.\n  ASIO_DECL nt_set_info_fn get_nt_set_info();\n\n  // Helper function to emulate InterlockedCompareExchangePointer functionality\n  // for:\n  // - very old Platform SDKs; and\n  // - platform SDKs where MSVC's /Wp64 option causes spurious warnings.\n  ASIO_DECL void* interlocked_compare_exchange_pointer(\n      void** dest, void* exch, void* cmp);\n\n  // Helper function to emulate InterlockedExchangePointer functionality for:\n  // - very old Platform SDKs; and\n  // - platform SDKs where MSVC's /Wp64 option causes spurious warnings.\n  ASIO_DECL void* interlocked_exchange_pointer(void** dest, void* val);\n\n  // The execution context used to obtain the reactor, if required.\n  execution_context& context_;\n\n  // The IOCP service used for running asynchronous operations and dispatching\n  // handlers.\n  win_iocp_io_context& iocp_service_;\n\n  // The reactor used for performing connect operations. This object is created\n  // only if needed.\n  select_reactor* reactor_;\n\n  // Pointer to ConnectEx implementation.\n  void* connect_ex_;\n\n  // Pointer to NtSetInformationFile implementation.\n  void* nt_set_info_;\n\n  // Mutex to protect access to the linked list of implementations. \n  asio::detail::mutex mutex_;\n\n  // The head of a linked list of all implementations.\n  base_implementation_type* impl_list_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_iocp_socket_service_base.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_thread_info.hpp",
    "content": "//\n// detail/win_iocp_thread_info.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP\n#define ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/thread_info_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct win_iocp_thread_info : public thread_info_base\n{\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_iocp_wait_op.hpp",
    "content": "//\n// detail/win_iocp_wait_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP\n#define ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/reactor_op.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass win_iocp_wait_op : public reactor_op\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(win_iocp_wait_op);\n\n  win_iocp_wait_op(socket_ops::weak_cancel_token_type cancel_token,\n      Handler& handler, const IoExecutor& io_ex)\n    : reactor_op(&win_iocp_wait_op::do_perform,\n        &win_iocp_wait_op::do_complete),\n      cancel_token_(cancel_token),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static status do_perform(reactor_op*)\n  {\n    return done;\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code& result_ec,\n      std::size_t /*bytes_transferred*/)\n  {\n    asio::error_code ec(result_ec);\n\n    // Take ownership of the operation object.\n    win_iocp_wait_op* o(static_cast<win_iocp_wait_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // The reactor may have stored a result in the operation object.\n    if (o->ec_)\n      ec = o->ec_;\n\n    // Map non-portable errors to their portable counterparts.\n    if (ec.value() == ERROR_NETNAME_DELETED)\n    {\n      if (o->cancel_token_.expired())\n        ec = asio::error::operation_aborted;\n      else\n        ec = asio::error::connection_reset;\n    }\n    else if (ec.value() == ERROR_PORT_UNREACHABLE)\n    {\n      ec = asio::error::connection_refused;\n    }\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, ec);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  socket_ops::weak_cancel_token_type cancel_token_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_IOCP)\n\n#endif // ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_mutex.hpp",
    "content": "//\n// detail/win_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_MUTEX_HPP\n#define ASIO_DETAIL_WIN_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_lock.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_mutex\n  : private noncopyable\n{\npublic:\n  typedef asio::detail::scoped_lock<win_mutex> scoped_lock;\n\n  // Constructor.\n  ASIO_DECL win_mutex();\n\n  // Destructor.\n  ~win_mutex()\n  {\n    ::DeleteCriticalSection(&crit_section_);\n  }\n\n  // Lock the mutex.\n  void lock()\n  {\n    ::EnterCriticalSection(&crit_section_);\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    ::LeaveCriticalSection(&crit_section_);\n  }\n\nprivate:\n  // Initialisation must be performed in a separate function to the constructor\n  // since the compiler does not support the use of structured exceptions and\n  // C++ exceptions in the same function.\n  ASIO_DECL int do_init();\n\n  ::CRITICAL_SECTION crit_section_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_mutex.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_WIN_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_object_handle_service.hpp",
    "content": "//\n// detail/win_object_handle_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP\n#define ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/wait_handler.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass win_object_handle_service :\n  public execution_context_service_base<win_object_handle_service>\n{\npublic:\n  // The native type of an object handle.\n  typedef HANDLE native_handle_type;\n\n  // The implementation type of the object handle.\n  class implementation_type\n  {\n   public:\n    // Default constructor.\n    implementation_type()\n      : handle_(INVALID_HANDLE_VALUE),\n        wait_handle_(INVALID_HANDLE_VALUE),\n        owner_(0),\n        next_(0),\n        prev_(0)\n    {\n    }\n\n  private:\n    // Only this service will have access to the internal values.\n    friend class win_object_handle_service;\n\n    // The native object handle representation. May be accessed or modified\n    // without locking the mutex.\n    native_handle_type handle_;\n\n    // The handle used to unregister the wait operation. The mutex must be\n    // locked when accessing or modifying this member.\n    HANDLE wait_handle_;\n\n    // The operations waiting on the object handle. If there is a registered\n    // wait then the mutex must be locked when accessing or modifying this\n    // member\n    op_queue<wait_op> op_queue_;\n\n    // The service instance that owns the object handle implementation.\n    win_object_handle_service* owner_;\n\n    // Pointers to adjacent handle implementations in linked list. The mutex\n    // must be locked when accessing or modifying these members.\n    implementation_type* next_;\n    implementation_type* prev_;\n  };\n\n  // Constructor.\n  ASIO_DECL win_object_handle_service(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Construct a new handle implementation.\n  ASIO_DECL void construct(implementation_type& impl);\n\n  // Move-construct a new handle implementation.\n  ASIO_DECL void move_construct(implementation_type& impl,\n      implementation_type& other_impl);\n\n  // Move-assign from another handle implementation.\n  ASIO_DECL void move_assign(implementation_type& impl,\n      win_object_handle_service& other_service,\n      implementation_type& other_impl);\n\n  // Destroy a handle implementation.\n  ASIO_DECL void destroy(implementation_type& impl);\n\n  // Assign a native handle to a handle implementation.\n  ASIO_DECL asio::error_code assign(implementation_type& impl,\n      const native_handle_type& handle, asio::error_code& ec);\n\n  // Determine whether the handle is open.\n  bool is_open(const implementation_type& impl) const\n  {\n    return impl.handle_ != INVALID_HANDLE_VALUE && impl.handle_ != 0;\n  }\n\n  // Destroy a handle implementation.\n  ASIO_DECL asio::error_code close(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Get the native handle representation.\n  native_handle_type native_handle(const implementation_type& impl) const\n  {\n    return impl.handle_;\n  }\n\n  // Cancel all operations associated with the handle.\n  ASIO_DECL asio::error_code cancel(implementation_type& impl,\n      asio::error_code& ec);\n\n  // Perform a synchronous wait for the object to enter a signalled state.\n  ASIO_DECL void wait(implementation_type& impl,\n      asio::error_code& ec);\n\n  /// Start an asynchronous wait.\n  template <typename Handler, typename IoExecutor>\n  void async_wait(implementation_type& impl,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    // Allocate and construct an operation to wrap the handler.\n    typedef wait_handler<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, \"object_handle\",\n          &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), \"async_wait\"));\n\n    start_wait_op(impl, p.p);\n    p.v = p.p = 0;\n  }\n\nprivate:\n  // Helper function to start an asynchronous wait operation.\n  ASIO_DECL void start_wait_op(implementation_type& impl, wait_op* op);\n\n  // Helper function to register a wait operation.\n  ASIO_DECL void register_wait_callback(\n      implementation_type& impl, mutex::scoped_lock& lock);\n\n  // Callback function invoked when the registered wait completes.\n  static ASIO_DECL VOID CALLBACK wait_callback(\n      PVOID param, BOOLEAN timeout);\n\n  // The scheduler used to post completions.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n  // Mutex to protect access to internal state.\n  mutex mutex_;\n\n  // The head of a linked list of all implementations.\n  implementation_type* impl_list_;\n\n  // Flag to indicate that the dispatcher has been shut down.\n  bool shutdown_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_object_handle_service.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n\n#endif // ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_static_mutex.hpp",
    "content": "//\n// detail/win_static_mutex.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_STATIC_MUTEX_HPP\n#define ASIO_DETAIL_WIN_STATIC_MUTEX_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/scoped_lock.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct win_static_mutex\n{\n  typedef asio::detail::scoped_lock<win_static_mutex> scoped_lock;\n\n  // Initialise the mutex.\n  ASIO_DECL void init();\n\n  // Initialisation must be performed in a separate function to the \"public\"\n  // init() function since the compiler does not support the use of structured\n  // exceptions and C++ exceptions in the same function.\n  ASIO_DECL int do_init();\n\n  // Lock the mutex.\n  void lock()\n  {\n    ::EnterCriticalSection(&crit_section_);\n  }\n\n  // Unlock the mutex.\n  void unlock()\n  {\n    ::LeaveCriticalSection(&crit_section_);\n  }\n\n  bool initialised_;\n  ::CRITICAL_SECTION crit_section_;\n};\n\n#if defined(UNDER_CE)\n# define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0 } }\n#else // defined(UNDER_CE)\n# define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0, 0 } }\n#endif // defined(UNDER_CE)\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_static_mutex.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_WIN_STATIC_MUTEX_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_thread.hpp",
    "content": "//\n// detail/win_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_THREAD_HPP\n#define ASIO_DETAIL_WIN_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) \\\n  && !defined(ASIO_WINDOWS_APP) \\\n  && !defined(UNDER_CE)\n\n#include <cstddef>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nASIO_DECL unsigned int __stdcall win_thread_function(void* arg);\n\n#if defined(WINVER) && (WINVER < 0x0500)\nASIO_DECL void __stdcall apc_function(ULONG data);\n#else\nASIO_DECL void __stdcall apc_function(ULONG_PTR data);\n#endif\n\ntemplate <typename T>\nclass win_thread_base\n{\npublic:\n  static bool terminate_threads()\n  {\n    return ::InterlockedExchangeAdd(&terminate_threads_, 0) != 0;\n  }\n\n  static void set_terminate_threads(bool b)\n  {\n    ::InterlockedExchange(&terminate_threads_, b ? 1 : 0);\n  }\n\nprivate:\n  static long terminate_threads_;\n};\n\ntemplate <typename T>\nlong win_thread_base<T>::terminate_threads_ = 0;\n\nclass win_thread\n  : private noncopyable,\n    public win_thread_base<win_thread>\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  win_thread(Function f, unsigned int stack_size = 0)\n    : thread_(0),\n      exit_event_(0)\n  {\n    start_thread(new func<Function>(f), stack_size);\n  }\n\n  // Destructor.\n  ASIO_DECL ~win_thread();\n\n  // Wait for the thread to exit.\n  ASIO_DECL void join();\n\n  // Get number of CPUs.\n  ASIO_DECL static std::size_t hardware_concurrency();\n\nprivate:\n  friend ASIO_DECL unsigned int __stdcall win_thread_function(void* arg);\n\n#if defined(WINVER) && (WINVER < 0x0500)\n  friend ASIO_DECL void __stdcall apc_function(ULONG);\n#else\n  friend ASIO_DECL void __stdcall apc_function(ULONG_PTR);\n#endif\n\n  class func_base\n  {\n  public:\n    virtual ~func_base() {}\n    virtual void run() = 0;\n    ::HANDLE entry_event_;\n    ::HANDLE exit_event_;\n  };\n\n  struct auto_func_base_ptr\n  {\n    func_base* ptr;\n    ~auto_func_base_ptr() { delete ptr; }\n  };\n\n  template <typename Function>\n  class func\n    : public func_base\n  {\n  public:\n    func(Function f)\n      : f_(f)\n    {\n    }\n\n    virtual void run()\n    {\n      f_();\n    }\n\n  private:\n    Function f_;\n  };\n\n  ASIO_DECL void start_thread(func_base* arg, unsigned int stack_size);\n\n  ::HANDLE thread_;\n  ::HANDLE exit_event_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_thread.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n       // && !defined(ASIO_WINDOWS_APP)\n       // && !defined(UNDER_CE)\n\n#endif // ASIO_DETAIL_WIN_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/win_tss_ptr.hpp",
    "content": "//\n// detail/win_tss_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WIN_TSS_PTR_HPP\n#define ASIO_DETAIL_WIN_TSS_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Helper function to create thread-specific storage.\nASIO_DECL DWORD win_tss_ptr_create();\n\ntemplate <typename T>\nclass win_tss_ptr\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  win_tss_ptr()\n    : tss_key_(win_tss_ptr_create())\n  {\n  }\n\n  // Destructor.\n  ~win_tss_ptr()\n  {\n    ::TlsFree(tss_key_);\n  }\n\n  // Get the value.\n  operator T*() const\n  {\n    return static_cast<T*>(::TlsGetValue(tss_key_));\n  }\n\n  // Set the value.\n  void operator=(T* value)\n  {\n    ::TlsSetValue(tss_key_, value);\n  }\n\nprivate:\n  // Thread-specific storage to allow unlocked access to determine whether a\n  // thread is a member of the pool.\n  DWORD tss_key_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/win_tss_ptr.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS)\n\n#endif // ASIO_DETAIL_WIN_TSS_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winapp_thread.hpp",
    "content": "//\n// detail/winapp_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINAPP_THREAD_HPP\n#define ASIO_DETAIL_WINAPP_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) && defined(ASIO_WINDOWS_APP)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nDWORD WINAPI winapp_thread_function(LPVOID arg);\n\nclass winapp_thread\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  winapp_thread(Function f, unsigned int = 0)\n  {\n    scoped_ptr<func_base> arg(new func<Function>(f));\n    DWORD thread_id = 0;\n    thread_ = ::CreateThread(0, 0, winapp_thread_function,\n        arg.get(), 0, &thread_id);\n    if (!thread_)\n    {\n      DWORD last_error = ::GetLastError();\n      asio::error_code ec(last_error,\n          asio::error::get_system_category());\n      asio::detail::throw_error(ec, \"thread\");\n    }\n    arg.release();\n  }\n\n  // Destructor.\n  ~winapp_thread()\n  {\n    ::CloseHandle(thread_);\n  }\n\n  // Wait for the thread to exit.\n  void join()\n  {\n    ::WaitForSingleObjectEx(thread_, INFINITE, false);\n  }\n\n  // Get number of CPUs.\n  static std::size_t hardware_concurrency()\n  {\n    SYSTEM_INFO system_info;\n    ::GetNativeSystemInfo(&system_info);\n    return system_info.dwNumberOfProcessors;\n  }\n\nprivate:\n  friend DWORD WINAPI winapp_thread_function(LPVOID arg);\n\n  class func_base\n  {\n  public:\n    virtual ~func_base() {}\n    virtual void run() = 0;\n  };\n\n  template <typename Function>\n  class func\n    : public func_base\n  {\n  public:\n    func(Function f)\n      : f_(f)\n    {\n    }\n\n    virtual void run()\n    {\n      f_();\n    }\n\n  private:\n    Function f_;\n  };\n\n  ::HANDLE thread_;\n};\n\ninline DWORD WINAPI winapp_thread_function(LPVOID arg)\n{\n  scoped_ptr<winapp_thread::func_base> func(\n      static_cast<winapp_thread::func_base*>(arg));\n  func->run();\n  return 0;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS) && defined(ASIO_WINDOWS_APP)\n\n#endif // ASIO_DETAIL_WINAPP_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/wince_thread.hpp",
    "content": "//\n// detail/wince_thread.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINCE_THREAD_HPP\n#define ASIO_DETAIL_WINCE_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) && defined(UNDER_CE)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nDWORD WINAPI wince_thread_function(LPVOID arg);\n\nclass wince_thread\n  : private noncopyable\n{\npublic:\n  // Constructor.\n  template <typename Function>\n  wince_thread(Function f, unsigned int = 0)\n  {\n    scoped_ptr<func_base> arg(new func<Function>(f));\n    DWORD thread_id = 0;\n    thread_ = ::CreateThread(0, 0, wince_thread_function,\n        arg.get(), 0, &thread_id);\n    if (!thread_)\n    {\n      DWORD last_error = ::GetLastError();\n      asio::error_code ec(last_error,\n          asio::error::get_system_category());\n      asio::detail::throw_error(ec, \"thread\");\n    }\n    arg.release();\n  }\n\n  // Destructor.\n  ~wince_thread()\n  {\n    ::CloseHandle(thread_);\n  }\n\n  // Wait for the thread to exit.\n  void join()\n  {\n    ::WaitForSingleObject(thread_, INFINITE);\n  }\n\n  // Get number of CPUs.\n  static std::size_t hardware_concurrency()\n  {\n    SYSTEM_INFO system_info;\n    ::GetSystemInfo(&system_info);\n    return system_info.dwNumberOfProcessors;\n  }\n\nprivate:\n  friend DWORD WINAPI wince_thread_function(LPVOID arg);\n\n  class func_base\n  {\n  public:\n    virtual ~func_base() {}\n    virtual void run() = 0;\n  };\n\n  template <typename Function>\n  class func\n    : public func_base\n  {\n  public:\n    func(Function f)\n      : f_(f)\n    {\n    }\n\n    virtual void run()\n    {\n      f_();\n    }\n\n  private:\n    Function f_;\n  };\n\n  ::HANDLE thread_;\n};\n\ninline DWORD WINAPI wince_thread_function(LPVOID arg)\n{\n  scoped_ptr<wince_thread::func_base> func(\n      static_cast<wince_thread::func_base*>(arg));\n  func->run();\n  return 0;\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS) && defined(UNDER_CE)\n\n#endif // ASIO_DETAIL_WINCE_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_async_manager.hpp",
    "content": "//\n// detail/winrt_async_manager.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP\n#define ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include <future>\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass winrt_async_manager\n  : public execution_context_service_base<winrt_async_manager>\n{\npublic:\n  // Constructor.\n  winrt_async_manager(execution_context& context)\n    : execution_context_service_base<winrt_async_manager>(context),\n      scheduler_(use_service<scheduler_impl>(context)),\n      outstanding_ops_(1)\n  {\n  }\n\n  // Destructor.\n  ~winrt_async_manager()\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n    if (--outstanding_ops_ > 0)\n    {\n      // Block until last operation is complete.\n      std::future<void> f = promise_.get_future();\n      f.wait();\n    }\n  }\n\n  void sync(Windows::Foundation::IAsyncAction^ action,\n      asio::error_code& ec)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto promise = std::make_shared<std::promise<asio::error_code>>();\n    auto future = promise->get_future();\n\n    action->Completed = ref new AsyncActionCompletedHandler(\n      [promise](IAsyncAction^ action, AsyncStatus status)\n      {\n        switch (status)\n        {\n        case AsyncStatus::Canceled:\n          promise->set_value(asio::error::operation_aborted);\n          break;\n        case AsyncStatus::Error:\n        case AsyncStatus::Completed:\n        default:\n          asio::error_code ec(\n              action->ErrorCode.Value,\n              asio::system_category());\n          promise->set_value(ec);\n          break;\n        }\n      });\n\n    ec = future.get();\n  }\n\n  template <typename TResult>\n  TResult sync(Windows::Foundation::IAsyncOperation<TResult>^ operation,\n      asio::error_code& ec)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto promise = std::make_shared<std::promise<asio::error_code>>();\n    auto future = promise->get_future();\n\n    operation->Completed = ref new AsyncOperationCompletedHandler<TResult>(\n      [promise](IAsyncOperation<TResult>^ operation, AsyncStatus status)\n      {\n        switch (status)\n        {\n        case AsyncStatus::Canceled:\n          promise->set_value(asio::error::operation_aborted);\n          break;\n        case AsyncStatus::Error:\n        case AsyncStatus::Completed:\n        default:\n          asio::error_code ec(\n              operation->ErrorCode.Value,\n              asio::system_category());\n          promise->set_value(ec);\n          break;\n        }\n      });\n\n    ec = future.get();\n    return operation->GetResults();\n  }\n\n  template <typename TResult, typename TProgress>\n  TResult sync(\n      Windows::Foundation::IAsyncOperationWithProgress<\n        TResult, TProgress>^ operation,\n      asio::error_code& ec)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto promise = std::make_shared<std::promise<asio::error_code>>();\n    auto future = promise->get_future();\n\n    operation->Completed\n      = ref new AsyncOperationWithProgressCompletedHandler<TResult, TProgress>(\n        [promise](IAsyncOperationWithProgress<TResult, TProgress>^ operation,\n          AsyncStatus status)\n        {\n          switch (status)\n          {\n          case AsyncStatus::Canceled:\n            promise->set_value(asio::error::operation_aborted);\n            break;\n          case AsyncStatus::Started:\n            break;\n          case AsyncStatus::Error:\n          case AsyncStatus::Completed:\n          default:\n            asio::error_code ec(\n                operation->ErrorCode.Value,\n                asio::system_category());\n            promise->set_value(ec);\n            break;\n          }\n        });\n\n    ec = future.get();\n    return operation->GetResults();\n  }\n\n  void async(Windows::Foundation::IAsyncAction^ action,\n      winrt_async_op<void>* handler)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto on_completed = ref new AsyncActionCompletedHandler(\n      [this, handler](IAsyncAction^ action, AsyncStatus status)\n      {\n        switch (status)\n        {\n        case AsyncStatus::Canceled:\n          handler->ec_ = asio::error::operation_aborted;\n          break;\n        case AsyncStatus::Started:\n          return;\n        case AsyncStatus::Completed:\n        case AsyncStatus::Error:\n        default:\n          handler->ec_ = asio::error_code(\n              action->ErrorCode.Value,\n              asio::system_category());\n          break;\n        }\n        scheduler_.post_deferred_completion(handler);\n        if (--outstanding_ops_ == 0)\n          promise_.set_value();\n      });\n\n    scheduler_.work_started();\n    ++outstanding_ops_;\n    action->Completed = on_completed;\n  }\n\n  template <typename TResult>\n  void async(Windows::Foundation::IAsyncOperation<TResult>^ operation,\n      winrt_async_op<TResult>* handler)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto on_completed = ref new AsyncOperationCompletedHandler<TResult>(\n      [this, handler](IAsyncOperation<TResult>^ operation, AsyncStatus status)\n      {\n        switch (status)\n        {\n        case AsyncStatus::Canceled:\n          handler->ec_ = asio::error::operation_aborted;\n          break;\n        case AsyncStatus::Started:\n          return;\n        case AsyncStatus::Completed:\n          handler->result_ = operation->GetResults();\n          // Fall through.\n        case AsyncStatus::Error:\n        default:\n          handler->ec_ = asio::error_code(\n              operation->ErrorCode.Value,\n              asio::system_category());\n          break;\n        }\n        scheduler_.post_deferred_completion(handler);\n        if (--outstanding_ops_ == 0)\n          promise_.set_value();\n      });\n\n    scheduler_.work_started();\n    ++outstanding_ops_;\n    operation->Completed = on_completed;\n  }\n\n  template <typename TResult, typename TProgress>\n  void async(\n      Windows::Foundation::IAsyncOperationWithProgress<\n        TResult, TProgress>^ operation,\n      winrt_async_op<TResult>* handler)\n  {\n    using namespace Windows::Foundation;\n    using Windows::Foundation::AsyncStatus;\n\n    auto on_completed\n      = ref new AsyncOperationWithProgressCompletedHandler<TResult, TProgress>(\n        [this, handler](IAsyncOperationWithProgress<\n          TResult, TProgress>^ operation, AsyncStatus status)\n        {\n          switch (status)\n          {\n          case AsyncStatus::Canceled:\n            handler->ec_ = asio::error::operation_aborted;\n            break;\n          case AsyncStatus::Started:\n            return;\n          case AsyncStatus::Completed:\n            handler->result_ = operation->GetResults();\n            // Fall through.\n          case AsyncStatus::Error:\n          default:\n            handler->ec_ = asio::error_code(\n                operation->ErrorCode.Value,\n                asio::system_category());\n            break;\n          }\n          scheduler_.post_deferred_completion(handler);\n          if (--outstanding_ops_ == 0)\n            promise_.set_value();\n        });\n\n    scheduler_.work_started();\n    ++outstanding_ops_;\n    operation->Completed = on_completed;\n  }\n\nprivate:\n  // The scheduler implementation used to post completed handlers.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n  // Count of outstanding operations.\n  atomic_count outstanding_ops_;\n\n  // Used to keep wait for outstanding operations to complete.\n  std::promise<void> promise_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_async_op.hpp",
    "content": "//\n// detail/winrt_async_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_ASYNC_OP_HPP\n#define ASIO_DETAIL_WINRT_ASYNC_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/operation.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename TResult>\nclass winrt_async_op\n  : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\n  // The result of the operation, to be passed to the completion handler.\n  TResult result_;\n\nprotected:\n  winrt_async_op(func_type complete_func)\n    : operation(complete_func),\n      result_()\n  {\n  }\n};\n\ntemplate <>\nclass winrt_async_op<void>\n  : public operation\n{\npublic:\n  // The error code to be passed to the completion handler.\n  asio::error_code ec_;\n\nprotected:\n  winrt_async_op(func_type complete_func)\n    : operation(complete_func)\n  {\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WINRT_ASYNC_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_resolve_op.hpp",
    "content": "//\n// detail/winrt_resolve_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_RESOLVE_OP_HPP\n#define ASIO_DETAIL_WINRT_RESOLVE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol, typename Handler, typename IoExecutor>\nclass winrt_resolve_op :\n  public winrt_async_op<\n    Windows::Foundation::Collections::IVectorView<\n      Windows::Networking::EndpointPair^>^>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(winrt_resolve_op);\n\n  typedef typename Protocol::endpoint endpoint_type;\n  typedef asio::ip::basic_resolver_query<Protocol> query_type;\n  typedef asio::ip::basic_resolver_results<Protocol> results_type;\n\n  winrt_resolve_op(const query_type& query,\n      Handler& handler, const IoExecutor& io_ex)\n    : winrt_async_op<\n        Windows::Foundation::Collections::IVectorView<\n          Windows::Networking::EndpointPair^>^>(\n            &winrt_resolve_op::do_complete),\n      query_(query),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code&, std::size_t)\n  {\n    // Take ownership of the operation object.\n    winrt_resolve_op* o(static_cast<winrt_resolve_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    results_type results = results_type();\n    if (!o->ec_)\n    {\n      try\n      {\n        results = results_type::create(o->result_, o->query_.hints(),\n            o->query_.host_name(), o->query_.service_name());\n      }\n      catch (Platform::Exception^ e)\n      {\n        o->ec_ = asio::error_code(e->HResult,\n            asio::system_category());\n      }\n    }\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, results_type>\n      handler(o->handler_, o->ec_, results);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, \"...\"));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  query_type query_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_RESOLVE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_resolver_service.hpp",
    "content": "//\n// detail/winrt_resolver_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP\n#define ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/winrt_async_manager.hpp\"\n#include \"asio/detail/winrt_resolve_op.hpp\"\n#include \"asio/detail/winrt_utils.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass winrt_resolver_service :\n  public execution_context_service_base<winrt_resolver_service<Protocol> >\n{\npublic:\n  // The implementation type of the resolver. A cancellation token is used to\n  // indicate to the asynchronous operation that the operation has been\n  // cancelled.\n  typedef socket_ops::shared_cancel_token_type implementation_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The query type.\n  typedef asio::ip::basic_resolver_query<Protocol> query_type;\n\n  // The results type.\n  typedef asio::ip::basic_resolver_results<Protocol> results_type;\n\n  // Constructor.\n  winrt_resolver_service(execution_context& context)\n    : execution_context_service_base<\n        winrt_resolver_service<Protocol> >(context),\n      scheduler_(use_service<scheduler_impl>(context)),\n      async_manager_(use_service<winrt_async_manager>(context))\n  {\n  }\n\n  // Destructor.\n  ~winrt_resolver_service()\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n  }\n\n  // Perform any fork-related housekeeping.\n  void notify_fork(execution_context::fork_event)\n  {\n  }\n\n  // Construct a new resolver implementation.\n  void construct(implementation_type&)\n  {\n  }\n\n  // Move-construct a new resolver implementation.\n  void move_construct(implementation_type&,\n      implementation_type&)\n  {\n  }\n\n  // Move-assign from another resolver implementation.\n  void move_assign(implementation_type&,\n      winrt_resolver_service&, implementation_type&)\n  {\n  }\n\n  // Destroy a resolver implementation.\n  void destroy(implementation_type&)\n  {\n  }\n\n  // Cancel pending asynchronous operations.\n  void cancel(implementation_type&)\n  {\n  }\n\n  // Resolve a query to a list of entries.\n  results_type resolve(implementation_type&,\n      const query_type& query, asio::error_code& ec)\n  {\n    try\n    {\n      using namespace Windows::Networking::Sockets;\n      auto endpoint_pairs = async_manager_.sync(\n          DatagramSocket::GetEndpointPairsAsync(\n            winrt_utils::host_name(query.host_name()),\n            winrt_utils::string(query.service_name())), ec);\n\n      if (ec)\n        return results_type();\n\n      return results_type::create(\n          endpoint_pairs, query.hints(),\n          query.host_name(), query.service_name());\n    }\n    catch (Platform::Exception^ e)\n    {\n      ec = asio::error_code(e->HResult,\n          asio::system_category());\n      return results_type();\n    }\n  }\n\n  // Asynchronously resolve a query to a list of entries.\n  template <typename Handler, typename IoExecutor>\n  void async_resolve(implementation_type& impl, const query_type& query,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef winrt_resolve_op<Protocol, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(query, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"resolver\", &impl, 0, \"async_resolve\"));\n    (void)impl;\n\n    try\n    {\n      using namespace Windows::Networking::Sockets;\n      async_manager_.async(DatagramSocket::GetEndpointPairsAsync(\n            winrt_utils::host_name(query.host_name()),\n            winrt_utils::string(query.service_name())), p.p);\n      p.v = p.p = 0;\n    }\n    catch (Platform::Exception^ e)\n    {\n      p.p->ec_ = asio::error_code(\n          e->HResult, asio::system_category());\n      scheduler_.post_immediate_completion(p.p, is_continuation);\n      p.v = p.p = 0;\n    }\n  }\n\n  // Resolve an endpoint to a list of entries.\n  results_type resolve(implementation_type&,\n      const endpoint_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return results_type();\n  }\n\n  // Asynchronously resolve an endpoint to a list of entries.\n  template <typename Handler, typename IoExecutor>\n  void async_resolve(implementation_type&, const endpoint_type&,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const results_type results;\n    asio::post(io_ex, detail::bind_handler(handler, ec, results));\n  }\n\nprivate:\n  // The scheduler implementation used for delivering completions.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n  winrt_async_manager& async_manager_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_socket_connect_op.hpp",
    "content": "//\n// detail/winrt_socket_connect_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP\n#define ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler, typename IoExecutor>\nclass winrt_socket_connect_op :\n  public winrt_async_op<void>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(winrt_socket_connect_op);\n\n  winrt_socket_connect_op(Handler& handler, const IoExecutor& io_ex)\n    : winrt_async_op<void>(&winrt_socket_connect_op::do_complete),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code&, std::size_t)\n  {\n    // Take ownership of the operation object.\n    winrt_socket_connect_op* o(static_cast<winrt_socket_connect_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder1<Handler, asio::error_code>\n      handler(o->handler_, o->ec_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_socket_recv_op.hpp",
    "content": "//\n// detail/winrt_socket_recv_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP\n#define ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence, typename Handler, typename IoExecutor>\nclass winrt_socket_recv_op :\n  public winrt_async_op<Windows::Storage::Streams::IBuffer^>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(winrt_socket_recv_op);\n\n  winrt_socket_recv_op(const MutableBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : winrt_async_op<Windows::Storage::Streams::IBuffer^>(\n          &winrt_socket_recv_op::do_complete),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code&, std::size_t)\n  {\n    // Take ownership of the operation object.\n    winrt_socket_recv_op* o(static_cast<winrt_socket_recv_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    std::size_t bytes_transferred = o->result_ ? o->result_->Length : 0;\n    if (bytes_transferred == 0 && !o->ec_ &&\n        !buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::all_empty(o->buffers_))\n    {\n      o->ec_ = asio::error::eof;\n    }\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, bytes_transferred);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  MutableBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_socket_send_op.hpp",
    "content": "//\n// detail/winrt_socket_send_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP\n#define ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/winrt_async_op.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence, typename Handler, typename IoExecutor>\nclass winrt_socket_send_op :\n  public winrt_async_op<unsigned int>\n{\npublic:\n  ASIO_DEFINE_HANDLER_PTR(winrt_socket_send_op);\n\n  winrt_socket_send_op(const ConstBufferSequence& buffers,\n      Handler& handler, const IoExecutor& io_ex)\n    : winrt_async_op<unsigned int>(&winrt_socket_send_op::do_complete),\n      buffers_(buffers),\n      handler_(ASIO_MOVE_CAST(Handler)(handler)),\n      io_executor_(io_ex)\n  {\n    handler_work<Handler, IoExecutor>::start(handler_, io_executor_);\n  }\n\n  static void do_complete(void* owner, operation* base,\n      const asio::error_code&, std::size_t)\n  {\n    // Take ownership of the operation object.\n    winrt_socket_send_op* o(static_cast<winrt_socket_send_op*>(base));\n    ptr p = { asio::detail::addressof(o->handler_), o, o };\n    handler_work<Handler, IoExecutor> w(o->handler_, o->io_executor_);\n\n    ASIO_HANDLER_COMPLETION((*o));\n\n#if defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n    // Check whether buffers are still valid.\n    if (owner)\n    {\n      buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::validate(o->buffers_);\n    }\n#endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING)\n\n    // Make a copy of the handler so that the memory can be deallocated before\n    // the upcall is made. Even if we're not about to make an upcall, a\n    // sub-object of the handler may be the true owner of the memory associated\n    // with the handler. Consequently, a local copy of the handler is required\n    // to ensure that any owning sub-object remains valid until after we have\n    // deallocated the memory here.\n    detail::binder2<Handler, asio::error_code, std::size_t>\n      handler(o->handler_, o->ec_, o->result_);\n    p.h = asio::detail::addressof(handler.handler_);\n    p.reset();\n\n    // Make the upcall if required.\n    if (owner)\n    {\n      fenced_block b(fenced_block::half);\n      ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));\n      w.complete(handler, handler.handler_);\n      ASIO_HANDLER_INVOCATION_END;\n    }\n  }\n\nprivate:\n  ConstBufferSequence buffers_;\n  Handler handler_;\n  IoExecutor io_executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_ssocket_service.hpp",
    "content": "//\n// detail/winrt_ssocket_service.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP\n#define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/winrt_socket_connect_op.hpp\"\n#include \"asio/detail/winrt_ssocket_service_base.hpp\"\n#include \"asio/detail/winrt_utils.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Protocol>\nclass winrt_ssocket_service :\n  public execution_context_service_base<winrt_ssocket_service<Protocol> >,\n  public winrt_ssocket_service_base\n{\npublic:\n  // The protocol type.\n  typedef Protocol protocol_type;\n\n  // The endpoint type.\n  typedef typename Protocol::endpoint endpoint_type;\n\n  // The native type of a socket.\n  typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type;\n\n  // The implementation type of the socket.\n  struct implementation_type : base_implementation_type\n  {\n    // Default constructor.\n    implementation_type()\n      : base_implementation_type(),\n        protocol_(endpoint_type().protocol())\n    {\n    }\n\n    // The protocol associated with the socket.\n    protocol_type protocol_;\n  };\n\n  // Constructor.\n  winrt_ssocket_service(execution_context& context)\n    : execution_context_service_base<winrt_ssocket_service<Protocol> >(context),\n      winrt_ssocket_service_base(context)\n  {\n  }\n\n  // Destroy all user-defined handler objects owned by the service.\n  void shutdown()\n  {\n    this->base_shutdown();\n  }\n\n  // Move-construct a new socket implementation.\n  void move_construct(implementation_type& impl,\n      implementation_type& other_impl) ASIO_NOEXCEPT\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n  }\n\n  // Move-assign from another socket implementation.\n  void move_assign(implementation_type& impl,\n      winrt_ssocket_service& other_service,\n      implementation_type& other_impl)\n  {\n    this->base_move_assign(impl, other_service, other_impl);\n\n    impl.protocol_ = other_impl.protocol_;\n    other_impl.protocol_ = endpoint_type().protocol();\n  }\n\n  // Move-construct a new socket implementation from another protocol type.\n  template <typename Protocol1>\n  void converting_move_construct(implementation_type& impl,\n      winrt_ssocket_service<Protocol1>&,\n      typename winrt_ssocket_service<\n        Protocol1>::implementation_type& other_impl)\n  {\n    this->base_move_construct(impl, other_impl);\n\n    impl.protocol_ = protocol_type(other_impl.protocol_);\n    other_impl.protocol_ = typename Protocol1::endpoint().protocol();\n  }\n\n  // Open a new socket implementation.\n  asio::error_code open(implementation_type& impl,\n      const protocol_type& protocol, asio::error_code& ec)\n  {\n    if (is_open(impl))\n    {\n      ec = asio::error::already_open;\n      return ec;\n    }\n\n    try\n    {\n      impl.socket_ = ref new Windows::Networking::Sockets::StreamSocket;\n      impl.protocol_ = protocol;\n      ec = asio::error_code();\n    }\n    catch (Platform::Exception^ e)\n    {\n      ec = asio::error_code(e->HResult,\n            asio::system_category());\n    }\n\n    return ec;\n  }\n\n  // Assign a native socket to a socket implementation.\n  asio::error_code assign(implementation_type& impl,\n      const protocol_type& protocol, const native_handle_type& native_socket,\n      asio::error_code& ec)\n  {\n    if (is_open(impl))\n    {\n      ec = asio::error::already_open;\n      return ec;\n    }\n\n    impl.socket_ = native_socket;\n    impl.protocol_ = protocol;\n    ec = asio::error_code();\n\n    return ec;\n  }\n\n  // Bind the socket to the specified local endpoint.\n  asio::error_code bind(implementation_type&,\n      const endpoint_type&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Get the local endpoint.\n  endpoint_type local_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint;\n    endpoint.resize(do_get_endpoint(impl, true,\n          endpoint.data(), endpoint.size(), ec));\n    return endpoint;\n  }\n\n  // Get the remote endpoint.\n  endpoint_type remote_endpoint(const implementation_type& impl,\n      asio::error_code& ec) const\n  {\n    endpoint_type endpoint;\n    endpoint.resize(do_get_endpoint(impl, false,\n          endpoint.data(), endpoint.size(), ec));\n    return endpoint;\n  }\n\n  // Disable sends or receives on the socket.\n  asio::error_code shutdown(implementation_type&,\n      socket_base::shutdown_type, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Set a socket option.\n  template <typename Option>\n  asio::error_code set_option(implementation_type& impl,\n      const Option& option, asio::error_code& ec)\n  {\n    return do_set_option(impl, option.level(impl.protocol_),\n        option.name(impl.protocol_), option.data(impl.protocol_),\n        option.size(impl.protocol_), ec);\n  }\n\n  // Get a socket option.\n  template <typename Option>\n  asio::error_code get_option(const implementation_type& impl,\n      Option& option, asio::error_code& ec) const\n  {\n    std::size_t size = option.size(impl.protocol_);\n    do_get_option(impl, option.level(impl.protocol_),\n        option.name(impl.protocol_),\n        option.data(impl.protocol_), &size, ec);\n    if (!ec)\n      option.resize(impl.protocol_, size);\n    return ec;\n  }\n\n  // Connect the socket to the specified endpoint.\n  asio::error_code connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint, asio::error_code& ec)\n  {\n    return do_connect(impl, peer_endpoint.data(), ec);\n  }\n\n  // Start an asynchronous connect.\n  template <typename Handler, typename IoExecutor>\n  void async_connect(implementation_type& impl,\n      const endpoint_type& peer_endpoint,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef winrt_socket_connect_op<Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"socket\", &impl, 0, \"async_connect\"));\n\n    start_connect_op(impl, peer_endpoint.data(), p.p, is_continuation);\n    p.v = p.p = 0;\n  }\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_ssocket_service_base.hpp",
    "content": "//\n// detail/winrt_ssocket_service_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP\n#define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/winrt_async_manager.hpp\"\n#include \"asio/detail/winrt_socket_recv_op.hpp\"\n#include \"asio/detail/winrt_socket_send_op.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass winrt_ssocket_service_base\n{\npublic:\n  // The native type of a socket.\n  typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type;\n\n  // The implementation type of the socket.\n  struct base_implementation_type\n  {\n    // Default constructor.\n    base_implementation_type()\n      : socket_(nullptr),\n        next_(0),\n        prev_(0)\n    {\n    }\n\n    // The underlying native socket.\n    native_handle_type socket_;\n\n    // Pointers to adjacent socket implementations in linked list.\n    base_implementation_type* next_;\n    base_implementation_type* prev_;\n  };\n\n  // Constructor.\n  ASIO_DECL winrt_ssocket_service_base(execution_context& context);\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void base_shutdown();\n\n  // Construct a new socket implementation.\n  ASIO_DECL void construct(base_implementation_type&);\n\n  // Move-construct a new socket implementation.\n  ASIO_DECL void base_move_construct(base_implementation_type& impl,\n      base_implementation_type& other_impl) ASIO_NOEXCEPT;\n\n  // Move-assign from another socket implementation.\n  ASIO_DECL void base_move_assign(base_implementation_type& impl,\n      winrt_ssocket_service_base& other_service,\n      base_implementation_type& other_impl);\n\n  // Destroy a socket implementation.\n  ASIO_DECL void destroy(base_implementation_type& impl);\n\n  // Determine whether the socket is open.\n  bool is_open(const base_implementation_type& impl) const\n  {\n    return impl.socket_ != nullptr;\n  }\n\n  // Destroy a socket implementation.\n  ASIO_DECL asio::error_code close(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Release ownership of the socket.\n  ASIO_DECL native_handle_type release(\n      base_implementation_type& impl, asio::error_code& ec);\n\n  // Get the native socket representation.\n  native_handle_type native_handle(base_implementation_type& impl)\n  {\n    return impl.socket_;\n  }\n\n  // Cancel all operations associated with the socket.\n  asio::error_code cancel(base_implementation_type&,\n      asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Determine whether the socket is at the out-of-band data mark.\n  bool at_mark(const base_implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return false;\n  }\n\n  // Determine the number of bytes available for reading.\n  std::size_t available(const base_implementation_type&,\n      asio::error_code& ec) const\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Perform an IO control command on the socket.\n  template <typename IO_Control_Command>\n  asio::error_code io_control(base_implementation_type&,\n      IO_Control_Command&, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the socket.\n  bool non_blocking(const base_implementation_type&) const\n  {\n    return false;\n  }\n\n  // Sets the non-blocking mode of the socket.\n  asio::error_code non_blocking(base_implementation_type&,\n      bool, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Gets the non-blocking mode of the native socket implementation.\n  bool native_non_blocking(const base_implementation_type&) const\n  {\n    return false;\n  }\n\n  // Sets the non-blocking mode of the native socket implementation.\n  asio::error_code native_non_blocking(base_implementation_type&,\n      bool, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return ec;\n  }\n\n  // Send the given data to the peer.\n  template <typename ConstBufferSequence>\n  std::size_t send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return do_send(impl,\n        buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::first(buffers), flags, ec);\n  }\n\n  // Wait until data can be sent without blocking.\n  std::size_t send(base_implementation_type&, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous send. The data being sent must be valid for the\n  // lifetime of the asynchronous operation.\n  template <typename ConstBufferSequence, typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type& impl,\n      const ConstBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef winrt_socket_send_op<ConstBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"socket\", &impl, 0, \"async_send\"));\n\n    start_send_op(impl,\n        buffer_sequence_adapter<asio::const_buffer,\n          ConstBufferSequence>::first(buffers),\n        flags, p.p, is_continuation);\n    p.v = p.p = 0;\n  }\n\n  // Start an asynchronous wait until data can be sent without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_send(base_implementation_type&, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex,\n        detail::bind_handler(handler, ec, bytes_transferred));\n  }\n\n  // Receive some data from the peer. Returns the number of bytes received.\n  template <typename MutableBufferSequence>\n  std::size_t receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers,\n      socket_base::message_flags flags, asio::error_code& ec)\n  {\n    return do_receive(impl,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::first(buffers), flags, ec);\n  }\n\n  // Wait until data can be received without blocking.\n  std::size_t receive(base_implementation_type&, const null_buffers&,\n      socket_base::message_flags, asio::error_code& ec)\n  {\n    ec = asio::error::operation_not_supported;\n    return 0;\n  }\n\n  // Start an asynchronous receive. The buffer for the data being received\n  // must be valid for the lifetime of the asynchronous operation.\n  template <typename MutableBufferSequence,\n      typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type& impl,\n      const MutableBufferSequence& buffers, socket_base::message_flags flags,\n      Handler& handler, const IoExecutor& io_ex)\n  {\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef winrt_socket_recv_op<MutableBufferSequence, Handler, IoExecutor> op;\n    typename op::ptr p = { asio::detail::addressof(handler),\n      op::ptr::allocate(handler), 0 };\n    p.p = new (p.v) op(buffers, handler, io_ex);\n\n    ASIO_HANDLER_CREATION((scheduler_.context(),\n          *p.p, \"socket\", &impl, 0, \"async_receive\"));\n\n    start_receive_op(impl,\n        buffer_sequence_adapter<asio::mutable_buffer,\n          MutableBufferSequence>::first(buffers),\n        flags, p.p, is_continuation);\n    p.v = p.p = 0;\n  }\n\n  // Wait until data can be received without blocking.\n  template <typename Handler, typename IoExecutor>\n  void async_receive(base_implementation_type&, const null_buffers&,\n      socket_base::message_flags, Handler& handler, const IoExecutor& io_ex)\n  {\n    asio::error_code ec = asio::error::operation_not_supported;\n    const std::size_t bytes_transferred = 0;\n    asio::post(io_ex,\n        detail::bind_handler(handler, ec, bytes_transferred));\n  }\n\nprotected:\n  // Helper function to obtain endpoints associated with the connection.\n  ASIO_DECL std::size_t do_get_endpoint(\n      const base_implementation_type& impl, bool local,\n      void* addr, std::size_t addr_len, asio::error_code& ec) const;\n\n  // Helper function to set a socket option.\n  ASIO_DECL asio::error_code do_set_option(\n      base_implementation_type& impl,\n      int level, int optname, const void* optval,\n      std::size_t optlen, asio::error_code& ec);\n\n  // Helper function to get a socket option.\n  ASIO_DECL void do_get_option(\n      const base_implementation_type& impl,\n      int level, int optname, void* optval,\n      std::size_t* optlen, asio::error_code& ec) const;\n\n  // Helper function to perform a synchronous connect.\n  ASIO_DECL asio::error_code do_connect(\n      base_implementation_type& impl,\n      const void* addr, asio::error_code& ec);\n\n  // Helper function to start an asynchronous connect.\n  ASIO_DECL void start_connect_op(\n      base_implementation_type& impl, const void* addr,\n      winrt_async_op<void>* op, bool is_continuation);\n\n  // Helper function to perform a synchronous send.\n  ASIO_DECL std::size_t do_send(\n      base_implementation_type& impl, const asio::const_buffer& data,\n      socket_base::message_flags flags, asio::error_code& ec);\n\n  // Helper function to start an asynchronous send.\n  ASIO_DECL void start_send_op(base_implementation_type& impl,\n      const asio::const_buffer& data, socket_base::message_flags flags,\n      winrt_async_op<unsigned int>* op, bool is_continuation);\n\n  // Helper function to perform a synchronous receive.\n  ASIO_DECL std::size_t do_receive(\n      base_implementation_type& impl, const asio::mutable_buffer& data,\n      socket_base::message_flags flags, asio::error_code& ec);\n\n  // Helper function to start an asynchronous receive.\n  ASIO_DECL void start_receive_op(base_implementation_type& impl,\n      const asio::mutable_buffer& data, socket_base::message_flags flags,\n      winrt_async_op<Windows::Storage::Streams::IBuffer^>* op,\n      bool is_continuation);\n\n  // The scheduler implementation used for delivering completions.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n  // The manager that keeps track of outstanding operations.\n  winrt_async_manager& async_manager_;\n\n  // Mutex to protect access to the linked list of implementations. \n  asio::detail::mutex mutex_;\n\n  // The head of a linked list of all implementations.\n  base_implementation_type* impl_list_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/winrt_ssocket_service_base.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_timer_scheduler.hpp",
    "content": "//\n// detail/winrt_timer_scheduler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP\n#define ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include <cstddef>\n#include \"asio/detail/event.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/op_queue.hpp\"\n#include \"asio/detail/thread.hpp\"\n#include \"asio/detail/timer_queue_base.hpp\"\n#include \"asio/detail/timer_queue_set.hpp\"\n#include \"asio/detail/wait_op.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else // defined(ASIO_HAS_IOCP)\n# include \"asio/detail/scheduler.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/thread.hpp\"\n#endif // defined(ASIO_HAS_IOCP)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass winrt_timer_scheduler\n  : public execution_context_service_base<winrt_timer_scheduler>\n{\npublic:\n  // Constructor.\n  ASIO_DECL winrt_timer_scheduler(execution_context& context);\n\n  // Destructor.\n  ASIO_DECL ~winrt_timer_scheduler();\n\n  // Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL void shutdown();\n\n  // Recreate internal descriptors following a fork.\n  ASIO_DECL void notify_fork(execution_context::fork_event fork_ev);\n\n  // Initialise the task. No effect as this class uses its own thread.\n  ASIO_DECL void init_task();\n\n  // Add a new timer queue to the reactor.\n  template <typename Time_Traits>\n  void add_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Remove a timer queue from the reactor.\n  template <typename Time_Traits>\n  void remove_timer_queue(timer_queue<Time_Traits>& queue);\n\n  // Schedule a new operation in the given timer queue to expire at the\n  // specified absolute time.\n  template <typename Time_Traits>\n  void schedule_timer(timer_queue<Time_Traits>& queue,\n      const typename Time_Traits::time_type& time,\n      typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);\n\n  // Cancel the timer operations associated with the given token. Returns the\n  // number of operations that have been posted or dispatched.\n  template <typename Time_Traits>\n  std::size_t cancel_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& timer,\n      std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());\n\n  // Move the timer operations associated with the given timer.\n  template <typename Time_Traits>\n  void move_timer(timer_queue<Time_Traits>& queue,\n      typename timer_queue<Time_Traits>::per_timer_data& to,\n      typename timer_queue<Time_Traits>::per_timer_data& from);\n\nprivate:\n  // Run the select loop in the thread.\n  ASIO_DECL void run_thread();\n\n  // Entry point for the select loop thread.\n  ASIO_DECL static void call_run_thread(winrt_timer_scheduler* reactor);\n\n  // Helper function to add a new timer queue.\n  ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);\n\n  // Helper function to remove a timer queue.\n  ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);\n\n  // The scheduler implementation used to post completions.\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context scheduler_impl;\n#else\n  typedef class scheduler scheduler_impl;\n#endif\n  scheduler_impl& scheduler_;\n\n  // Mutex used to protect internal variables.\n  asio::detail::mutex mutex_;\n\n  // Event used to wake up background thread.\n  asio::detail::event event_;\n\n  // The timer queues.\n  timer_queue_set timer_queues_;\n\n  // The background thread that is waiting for timers to expire.\n  asio::detail::thread* thread_;\n\n  // Does the background thread need to stop.\n  bool stop_thread_;\n\n  // Whether the service has been shut down.\n  bool shutdown_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/detail/impl/winrt_timer_scheduler.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/winrt_timer_scheduler.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winrt_utils.hpp",
    "content": "//\n// detail/winrt_utils.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINRT_UTILS_HPP\n#define ASIO_DETAIL_WINRT_UTILS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n\n#include <codecvt>\n#include <cstdlib>\n#include <future>\n#include <locale>\n#include <robuffer.h>\n#include <windows.storage.streams.h>\n#include <wrl/implements.h>\n#include \"asio/buffer.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\nnamespace winrt_utils {\n\ninline Platform::String^ string(const char* from)\n{\n  std::wstring tmp(from, from + std::strlen(from));\n  return ref new Platform::String(tmp.c_str());\n}\n\ninline Platform::String^ string(const std::string& from)\n{\n  std::wstring tmp(from.begin(), from.end());\n  return ref new Platform::String(tmp.c_str());\n}\n\ninline std::string string(Platform::String^ from)\n{\n  std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;\n  return converter.to_bytes(from->Data());\n}\n\ninline Platform::String^ string(unsigned short from)\n{\n  return string(std::to_string(from));\n}\n\ntemplate <typename T>\ninline Platform::String^ string(const T& from)\n{\n  return string(from.to_string());\n}\n\ninline int integer(Platform::String^ from)\n{\n  return _wtoi(from->Data());\n}\n\ntemplate <typename T>\ninline Windows::Networking::HostName^ host_name(const T& from)\n{\n  return ref new Windows::Networking::HostName((string)(from));\n}\n\ntemplate <typename ConstBufferSequence>\ninline Windows::Storage::Streams::IBuffer^ buffer_dup(\n    const ConstBufferSequence& buffers)\n{\n  using Microsoft::WRL::ComPtr;\n  using asio::buffer_size;\n  std::size_t size = buffer_size(buffers);\n  auto b = ref new Windows::Storage::Streams::Buffer(size);\n  ComPtr<IInspectable> insp = reinterpret_cast<IInspectable*>(b);\n  ComPtr<Windows::Storage::Streams::IBufferByteAccess> bacc;\n  insp.As(&bacc);\n  byte* bytes = nullptr;\n  bacc->Buffer(&bytes);\n  asio::buffer_copy(asio::buffer(bytes, size), buffers);\n  b->Length = size;\n  return b;\n}\n\n} // namespace winrt_utils\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#endif // ASIO_DETAIL_WINRT_UTILS_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/winsock_init.hpp",
    "content": "//\n// detail/winsock_init.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WINSOCK_INIT_HPP\n#define ASIO_DETAIL_WINSOCK_INIT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass winsock_init_base\n{\nprotected:\n  // Structure to track result of initialisation and number of uses. POD is used\n  // to ensure that the values are zero-initialised prior to any code being run.\n  struct data\n  {\n    long init_count_;\n    long result_;\n  };\n\n  ASIO_DECL static void startup(data& d,\n      unsigned char major, unsigned char minor);\n\n  ASIO_DECL static void manual_startup(data& d);\n\n  ASIO_DECL static void cleanup(data& d);\n\n  ASIO_DECL static void manual_cleanup(data& d);\n\n  ASIO_DECL static void throw_on_error(data& d);\n};\n\ntemplate <int Major = 2, int Minor = 0>\nclass winsock_init : private winsock_init_base\n{\npublic:\n  winsock_init(bool allow_throw = true)\n  {\n    startup(data_, Major, Minor);\n    if (allow_throw)\n      throw_on_error(data_);\n  }\n\n  winsock_init(const winsock_init&)\n  {\n    startup(data_, Major, Minor);\n    throw_on_error(data_);\n  }\n\n  ~winsock_init()\n  {\n    cleanup(data_);\n  }\n\n  // This class may be used to indicate that user code will manage Winsock\n  // initialisation and cleanup. This may be required in the case of a DLL, for\n  // example, where it is not safe to initialise Winsock from global object\n  // constructors.\n  //\n  // To prevent asio from initialising Winsock, the object must be constructed\n  // before any Asio's own global objects. With MSVC, this may be accomplished\n  // by adding the following code to the DLL:\n  //\n  //   #pragma warning(push)\n  //   #pragma warning(disable:4073)\n  //   #pragma init_seg(lib)\n  //   asio::detail::winsock_init<>::manual manual_winsock_init;\n  //   #pragma warning(pop)\n  class manual\n  {\n  public:\n    manual()\n    {\n      manual_startup(data_);\n    }\n\n    manual(const manual&)\n    {\n      manual_startup(data_);\n    }\n\n    ~manual()\n    {\n      manual_cleanup(data_);\n    }\n  };\n\nprivate:\n  friend class manual;\n  static data data_;\n};\n\ntemplate <int Major, int Minor>\nwinsock_init_base::data winsock_init<Major, Minor>::data_;\n\n// Static variable to ensure that winsock is initialised before main, and\n// therefore before any other threads can get started.\nstatic const winsock_init<>& winsock_init_instance = winsock_init<>(false);\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/detail/impl/winsock_init.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n\n#endif // ASIO_DETAIL_WINSOCK_INIT_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/work_dispatcher.hpp",
    "content": "//\n// detail/work_dispatcher.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WORK_DISPATCHER_HPP\n#define ASIO_DETAIL_WORK_DISPATCHER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/executor_work_guard.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Handler>\nclass work_dispatcher\n{\npublic:\n  template <typename CompletionHandler>\n  explicit work_dispatcher(ASIO_MOVE_ARG(CompletionHandler) handler)\n    : work_((get_associated_executor)(handler)),\n      handler_(ASIO_MOVE_CAST(CompletionHandler)(handler))\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  work_dispatcher(const work_dispatcher& other)\n    : work_(other.work_),\n      handler_(other.handler_)\n  {\n  }\n\n  work_dispatcher(work_dispatcher&& other)\n    : work_(ASIO_MOVE_CAST(executor_work_guard<\n        typename associated_executor<Handler>::type>)(other.work_)),\n      handler_(ASIO_MOVE_CAST(Handler)(other.handler_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    typename associated_allocator<Handler>::type alloc(\n        (get_associated_allocator)(handler_));\n    work_.get_executor().dispatch(\n        ASIO_MOVE_CAST(Handler)(handler_), alloc);\n    work_.reset();\n  }\n\nprivate:\n  executor_work_guard<typename associated_executor<Handler>::type> work_;\n  Handler handler_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WORK_DISPATCHER_HPP\n"
  },
  {
    "path": "src/third_party/asio/detail/wrapped_handler.hpp",
    "content": "//\n// detail/wrapped_handler.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DETAIL_WRAPPED_HANDLER_HPP\n#define ASIO_DETAIL_WRAPPED_HANDLER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nstruct is_continuation_delegated\n{\n  template <typename Dispatcher, typename Handler>\n  bool operator()(Dispatcher&, Handler& handler) const\n  {\n    return asio_handler_cont_helpers::is_continuation(handler);\n  }\n};\n\nstruct is_continuation_if_running\n{\n  template <typename Dispatcher, typename Handler>\n  bool operator()(Dispatcher& dispatcher, Handler&) const\n  {\n    return dispatcher.running_in_this_thread();\n  }\n};\n\ntemplate <typename Dispatcher, typename Handler,\n    typename IsContinuation = is_continuation_delegated>\nclass wrapped_handler\n{\npublic:\n  typedef void result_type;\n\n  wrapped_handler(Dispatcher dispatcher, Handler& handler)\n    : dispatcher_(dispatcher),\n      handler_(ASIO_MOVE_CAST(Handler)(handler))\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  wrapped_handler(const wrapped_handler& other)\n    : dispatcher_(other.dispatcher_),\n      handler_(other.handler_)\n  {\n  }\n\n  wrapped_handler(wrapped_handler&& other)\n    : dispatcher_(other.dispatcher_),\n      handler_(ASIO_MOVE_CAST(Handler)(other.handler_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    dispatcher_.dispatch(ASIO_MOVE_CAST(Handler)(handler_));\n  }\n\n  void operator()() const\n  {\n    dispatcher_.dispatch(handler_);\n  }\n\n  template <typename Arg1>\n  void operator()(const Arg1& arg1)\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1));\n  }\n\n  template <typename Arg1>\n  void operator()(const Arg1& arg1) const\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1));\n  }\n\n  template <typename Arg1, typename Arg2>\n  void operator()(const Arg1& arg1, const Arg2& arg2)\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2));\n  }\n\n  template <typename Arg1, typename Arg2>\n  void operator()(const Arg1& arg1, const Arg2& arg2) const\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3)\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) const\n  {\n    dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3, typename Arg4>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,\n      const Arg4& arg4)\n  {\n    dispatcher_.dispatch(\n        detail::bind_handler(handler_, arg1, arg2, arg3, arg4));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3, typename Arg4>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,\n      const Arg4& arg4) const\n  {\n    dispatcher_.dispatch(\n        detail::bind_handler(handler_, arg1, arg2, arg3, arg4));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3, typename Arg4,\n      typename Arg5>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,\n      const Arg4& arg4, const Arg5& arg5)\n  {\n    dispatcher_.dispatch(\n        detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5));\n  }\n\n  template <typename Arg1, typename Arg2, typename Arg3, typename Arg4,\n      typename Arg5>\n  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,\n      const Arg4& arg4, const Arg5& arg5) const\n  {\n    dispatcher_.dispatch(\n        detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5));\n  }\n\n//private:\n  Dispatcher dispatcher_;\n  Handler handler_;\n};\n\ntemplate <typename Handler, typename Context>\nclass rewrapped_handler\n{\npublic:\n  explicit rewrapped_handler(Handler& handler, const Context& context)\n    : context_(context),\n      handler_(ASIO_MOVE_CAST(Handler)(handler))\n  {\n  }\n\n  explicit rewrapped_handler(const Handler& handler, const Context& context)\n    : context_(context),\n      handler_(handler)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  rewrapped_handler(const rewrapped_handler& other)\n    : context_(other.context_),\n      handler_(other.handler_)\n  {\n  }\n\n  rewrapped_handler(rewrapped_handler&& other)\n    : context_(ASIO_MOVE_CAST(Context)(other.context_)),\n      handler_(ASIO_MOVE_CAST(Handler)(other.handler_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()()\n  {\n    handler_();\n  }\n\n  void operator()() const\n  {\n    handler_();\n  }\n\n//private:\n  Context context_;\n  Handler handler_;\n};\n\ntemplate <typename Dispatcher, typename Handler, typename IsContinuation>\ninline void* asio_handler_allocate(std::size_t size,\n    wrapped_handler<Dispatcher, Handler, IsContinuation>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Dispatcher, typename Handler, typename IsContinuation>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    wrapped_handler<Dispatcher, Handler, IsContinuation>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Dispatcher, typename Handler, typename IsContinuation>\ninline bool asio_handler_is_continuation(\n    wrapped_handler<Dispatcher, Handler, IsContinuation>* this_handler)\n{\n  return IsContinuation()(this_handler->dispatcher_, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Dispatcher,\n    typename Handler, typename IsContinuation>\ninline void asio_handler_invoke(Function& function,\n    wrapped_handler<Dispatcher, Handler, IsContinuation>* this_handler)\n{\n  this_handler->dispatcher_.dispatch(\n      rewrapped_handler<Function, Handler>(\n        function, this_handler->handler_));\n}\n\ntemplate <typename Function, typename Dispatcher,\n    typename Handler, typename IsContinuation>\ninline void asio_handler_invoke(const Function& function,\n    wrapped_handler<Dispatcher, Handler, IsContinuation>* this_handler)\n{\n  this_handler->dispatcher_.dispatch(\n      rewrapped_handler<Function, Handler>(\n        function, this_handler->handler_));\n}\n\ntemplate <typename Handler, typename Context>\ninline void* asio_handler_allocate(std::size_t size,\n    rewrapped_handler<Handler, Context>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->context_);\n}\n\ntemplate <typename Handler, typename Context>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    rewrapped_handler<Handler, Context>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->context_);\n}\n\ntemplate <typename Dispatcher, typename Context>\ninline bool asio_handler_is_continuation(\n    rewrapped_handler<Dispatcher, Context>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n      this_handler->context_);\n}\n\ntemplate <typename Function, typename Handler, typename Context>\ninline void asio_handler_invoke(Function& function,\n    rewrapped_handler<Handler, Context>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->context_);\n}\n\ntemplate <typename Function, typename Handler, typename Context>\ninline void asio_handler_invoke(const Function& function,\n    rewrapped_handler<Handler, Context>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->context_);\n}\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_DETAIL_WRAPPED_HANDLER_HPP\n"
  },
  {
    "path": "src/third_party/asio/dispatch.hpp",
    "content": "//\n// dispatch.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_DISPATCH_HPP\n#define ASIO_DISPATCH_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the object's associated\n * executor. The function object may be called from the current thread prior to\n * returning from <tt>dispatch()</tt>. Otherwise, it is queued for execution.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Performs <tt>ex.dispatch(std::move(handler), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    ASIO_MOVE_ARG(CompletionToken) token);\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the specified executor.\n * The function object may be called from the current thread prior to returning\n * from <tt>dispatch()</tt>. Otherwise, it is queued for execution.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex1 by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Creates a work object @c w by performing <tt>make_work(ex1)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Constructs a function object @c f with a function call operator that\n * performs <tt>ex1.dispatch(std::move(handler), alloc)</tt> followed by\n * <tt>w.reset()</tt>.\n *\n * @li Performs <tt>Executor(ex).dispatch(std::move(f), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    const Executor& ex,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<is_executor<Executor>::value>::type* = 0);\n\n/// Submits a completion token or function object for execution.\n/**\n * @returns <tt>dispatch(ctx.get_executor(),\n * forward<CompletionToken>(token))</tt>.\n */\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n        typename ExecutionContext::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    ExecutionContext& ctx,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename ExecutionContext::executor_type),\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type* = 0);\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/dispatch.hpp\"\n\n#endif // ASIO_DISPATCH_HPP\n"
  },
  {
    "path": "src/third_party/asio/error.hpp",
    "content": "//\n// error.hpp\n// ~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_ERROR_HPP\n#define ASIO_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/system_error.hpp\"\n#if defined(ASIO_WINDOWS) \\\n  || defined(__CYGWIN__) \\\n  || defined(ASIO_WINDOWS_RUNTIME)\n# include <winerror.h>\n#else\n# include <cerrno>\n# include <netdb.h>\n#endif\n\n#if defined(GENERATING_DOCUMENTATION)\n/// INTERNAL ONLY.\n# define ASIO_NATIVE_ERROR(e) implementation_defined\n/// INTERNAL ONLY.\n# define ASIO_SOCKET_ERROR(e) implementation_defined\n/// INTERNAL ONLY.\n# define ASIO_NETDB_ERROR(e) implementation_defined\n/// INTERNAL ONLY.\n# define ASIO_GETADDRINFO_ERROR(e) implementation_defined\n/// INTERNAL ONLY.\n# define ASIO_WIN_OR_POSIX(e_win, e_posix) implementation_defined\n#elif defined(ASIO_WINDOWS_RUNTIME)\n# define ASIO_NATIVE_ERROR(e) __HRESULT_FROM_WIN32(e)\n# define ASIO_SOCKET_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e)\n# define ASIO_NETDB_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e)\n# define ASIO_GETADDRINFO_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e)\n# define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# define ASIO_NATIVE_ERROR(e) e\n# define ASIO_SOCKET_ERROR(e) WSA ## e\n# define ASIO_NETDB_ERROR(e) WSA ## e\n# define ASIO_GETADDRINFO_ERROR(e) WSA ## e\n# define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win\n#else\n# define ASIO_NATIVE_ERROR(e) e\n# define ASIO_SOCKET_ERROR(e) e\n# define ASIO_NETDB_ERROR(e) e\n# define ASIO_GETADDRINFO_ERROR(e) e\n# define ASIO_WIN_OR_POSIX(e_win, e_posix) e_posix\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace error {\n\nenum basic_errors\n{\n  /// Permission denied.\n  access_denied = ASIO_SOCKET_ERROR(EACCES),\n\n  /// Address family not supported by protocol.\n  address_family_not_supported = ASIO_SOCKET_ERROR(EAFNOSUPPORT),\n\n  /// Address already in use.\n  address_in_use = ASIO_SOCKET_ERROR(EADDRINUSE),\n\n  /// Transport endpoint is already connected.\n  already_connected = ASIO_SOCKET_ERROR(EISCONN),\n\n  /// Operation already in progress.\n  already_started = ASIO_SOCKET_ERROR(EALREADY),\n\n  /// Broken pipe.\n  broken_pipe = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_BROKEN_PIPE),\n      ASIO_NATIVE_ERROR(EPIPE)),\n\n  /// A connection has been aborted.\n  connection_aborted = ASIO_SOCKET_ERROR(ECONNABORTED),\n\n  /// Connection refused.\n  connection_refused = ASIO_SOCKET_ERROR(ECONNREFUSED),\n\n  /// Connection reset by peer.\n  connection_reset = ASIO_SOCKET_ERROR(ECONNRESET),\n\n  /// Bad file descriptor.\n  bad_descriptor = ASIO_SOCKET_ERROR(EBADF),\n\n  /// Bad address.\n  fault = ASIO_SOCKET_ERROR(EFAULT),\n\n  /// No route to host.\n  host_unreachable = ASIO_SOCKET_ERROR(EHOSTUNREACH),\n\n  /// Operation now in progress.\n  in_progress = ASIO_SOCKET_ERROR(EINPROGRESS),\n\n  /// Interrupted system call.\n  interrupted = ASIO_SOCKET_ERROR(EINTR),\n\n  /// Invalid argument.\n  invalid_argument = ASIO_SOCKET_ERROR(EINVAL),\n\n  /// Message too long.\n  message_size = ASIO_SOCKET_ERROR(EMSGSIZE),\n\n  /// The name was too long.\n  name_too_long = ASIO_SOCKET_ERROR(ENAMETOOLONG),\n\n  /// Network is down.\n  network_down = ASIO_SOCKET_ERROR(ENETDOWN),\n\n  /// Network dropped connection on reset.\n  network_reset = ASIO_SOCKET_ERROR(ENETRESET),\n\n  /// Network is unreachable.\n  network_unreachable = ASIO_SOCKET_ERROR(ENETUNREACH),\n\n  /// Too many open files.\n  no_descriptors = ASIO_SOCKET_ERROR(EMFILE),\n\n  /// No buffer space available.\n  no_buffer_space = ASIO_SOCKET_ERROR(ENOBUFS),\n\n  /// Cannot allocate memory.\n  no_memory = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_OUTOFMEMORY),\n      ASIO_NATIVE_ERROR(ENOMEM)),\n\n  /// Operation not permitted.\n  no_permission = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_ACCESS_DENIED),\n      ASIO_NATIVE_ERROR(EPERM)),\n\n  /// Protocol not available.\n  no_protocol_option = ASIO_SOCKET_ERROR(ENOPROTOOPT),\n\n  /// No such device.\n  no_such_device = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_BAD_UNIT),\n      ASIO_NATIVE_ERROR(ENODEV)),\n\n  /// Transport endpoint is not connected.\n  not_connected = ASIO_SOCKET_ERROR(ENOTCONN),\n\n  /// Socket operation on non-socket.\n  not_socket = ASIO_SOCKET_ERROR(ENOTSOCK),\n\n  /// Operation cancelled.\n  operation_aborted = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_OPERATION_ABORTED),\n      ASIO_NATIVE_ERROR(ECANCELED)),\n\n  /// Operation not supported.\n  operation_not_supported = ASIO_SOCKET_ERROR(EOPNOTSUPP),\n\n  /// Cannot send after transport endpoint shutdown.\n  shut_down = ASIO_SOCKET_ERROR(ESHUTDOWN),\n\n  /// Connection timed out.\n  timed_out = ASIO_SOCKET_ERROR(ETIMEDOUT),\n\n  /// Resource temporarily unavailable.\n  try_again = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(ERROR_RETRY),\n      ASIO_NATIVE_ERROR(EAGAIN)),\n\n  /// The socket is marked non-blocking and the requested operation would block.\n  would_block = ASIO_SOCKET_ERROR(EWOULDBLOCK)\n};\n\nenum netdb_errors\n{\n  /// Host not found (authoritative).\n  host_not_found = ASIO_NETDB_ERROR(HOST_NOT_FOUND),\n\n  /// Host not found (non-authoritative).\n  host_not_found_try_again = ASIO_NETDB_ERROR(TRY_AGAIN),\n\n  /// The query is valid but does not have associated address data.\n  no_data = ASIO_NETDB_ERROR(NO_DATA),\n\n  /// A non-recoverable error occurred.\n  no_recovery = ASIO_NETDB_ERROR(NO_RECOVERY)\n};\n\nenum addrinfo_errors\n{\n  /// The service is not supported for the given socket type.\n  service_not_found = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(WSATYPE_NOT_FOUND),\n      ASIO_GETADDRINFO_ERROR(EAI_SERVICE)),\n\n  /// The socket type is not supported.\n  socket_type_not_supported = ASIO_WIN_OR_POSIX(\n      ASIO_NATIVE_ERROR(WSAESOCKTNOSUPPORT),\n      ASIO_GETADDRINFO_ERROR(EAI_SOCKTYPE))\n};\n\nenum misc_errors\n{\n  /// Already open.\n  already_open = 1,\n\n  /// End of file or stream.\n  eof,\n\n  /// Element not found.\n  not_found,\n\n  /// The descriptor cannot fit into the select system call's fd_set.\n  fd_set_failure\n};\n\ninline const asio::error_category& get_system_category()\n{\n  return asio::system_category();\n}\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\nextern ASIO_DECL\nconst asio::error_category& get_netdb_category();\n\nextern ASIO_DECL\nconst asio::error_category& get_addrinfo_category();\n\n#else // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\ninline const asio::error_category& get_netdb_category()\n{\n  return get_system_category();\n}\n\ninline const asio::error_category& get_addrinfo_category()\n{\n  return get_system_category();\n}\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\nextern ASIO_DECL\nconst asio::error_category& get_misc_category();\n\nstatic const asio::error_category&\n  system_category ASIO_UNUSED_VARIABLE\n  = asio::error::get_system_category();\nstatic const asio::error_category&\n  netdb_category ASIO_UNUSED_VARIABLE\n  = asio::error::get_netdb_category();\nstatic const asio::error_category&\n  addrinfo_category ASIO_UNUSED_VARIABLE\n  = asio::error::get_addrinfo_category();\nstatic const asio::error_category&\n  misc_category ASIO_UNUSED_VARIABLE\n  = asio::error::get_misc_category();\n\n} // namespace error\n} // namespace asio\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\nnamespace std {\n\ntemplate<> struct is_error_code_enum<asio::error::basic_errors>\n{\n  static const bool value = true;\n};\n\ntemplate<> struct is_error_code_enum<asio::error::netdb_errors>\n{\n  static const bool value = true;\n};\n\ntemplate<> struct is_error_code_enum<asio::error::addrinfo_errors>\n{\n  static const bool value = true;\n};\n\ntemplate<> struct is_error_code_enum<asio::error::misc_errors>\n{\n  static const bool value = true;\n};\n\n} // namespace std\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\nnamespace asio {\nnamespace error {\n\ninline asio::error_code make_error_code(basic_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_system_category());\n}\n\ninline asio::error_code make_error_code(netdb_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_netdb_category());\n}\n\ninline asio::error_code make_error_code(addrinfo_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_addrinfo_category());\n}\n\ninline asio::error_code make_error_code(misc_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_misc_category());\n}\n\n} // namespace error\nnamespace stream_errc {\n  // Simulates the proposed stream_errc scoped enum.\n  using error::eof;\n  using error::not_found;\n} // namespace stream_errc\nnamespace socket_errc {\n  // Simulates the proposed socket_errc scoped enum.\n  using error::already_open;\n  using error::not_found;\n} // namespace socket_errc\nnamespace resolver_errc {\n  // Simulates the proposed resolver_errc scoped enum.\n  using error::host_not_found;\n  const error::netdb_errors try_again = error::host_not_found_try_again;\n  using error::service_not_found;\n} // namespace resolver_errc\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#undef ASIO_NATIVE_ERROR\n#undef ASIO_SOCKET_ERROR\n#undef ASIO_NETDB_ERROR\n#undef ASIO_GETADDRINFO_ERROR\n#undef ASIO_WIN_OR_POSIX\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/error.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/error_code.hpp",
    "content": "//\n// error_code.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_ERROR_CODE_HPP\n#define ASIO_ERROR_CODE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\n# include <system_error>\n#else // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n# include <string>\n# include \"asio/detail/noncopyable.hpp\"\n# if !defined(ASIO_NO_IOSTREAM)\n#  include <iosfwd>\n# endif // !defined(ASIO_NO_IOSTREAM)\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\ntypedef std::error_category error_category;\n\n#else // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n/// Base class for all error categories.\nclass error_category : private noncopyable\n{\npublic:\n  /// Destructor.\n  virtual ~error_category()\n  {\n  }\n\n  /// Returns a string naming the error gategory.\n  virtual const char* name() const = 0;\n\n  /// Returns a string describing the error denoted by @c value.\n  virtual std::string message(int value) const = 0;\n\n  /// Equality operator to compare two error categories.\n  bool operator==(const error_category& rhs) const\n  {\n    return this == &rhs;\n  }\n\n  /// Inequality operator to compare two error categories.\n  bool operator!=(const error_category& rhs) const\n  {\n    return !(*this == rhs);\n  }\n};\n\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n/// Returns the error category used for the system errors produced by asio.\nextern ASIO_DECL const error_category& system_category();\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\ntypedef std::error_code error_code;\n\n#else // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n/// Class to represent an error code value.\nclass error_code\n{\npublic:\n  /// Default constructor.\n  error_code()\n    : value_(0),\n      category_(&system_category())\n  {\n  }\n\n  /// Construct with specific error code and category.\n  error_code(int v, const error_category& c)\n    : value_(v),\n      category_(&c)\n  {\n  }\n\n  /// Construct from an error code enum.\n  template <typename ErrorEnum>\n  error_code(ErrorEnum e)\n  {\n    *this = make_error_code(e);\n  }\n\n  /// Clear the error value to the default.\n  void clear()\n  {\n    value_ = 0;\n    category_ = &system_category();\n  }\n\n  /// Assign a new error value.\n  void assign(int v, const error_category& c)\n  {\n    value_ = v;\n    category_ = &c;\n  }\n\n  /// Get the error value.\n  int value() const\n  {\n    return value_;\n  }\n\n  /// Get the error category.\n  const error_category& category() const\n  {\n    return *category_;\n  }\n\n  /// Get the message associated with the error.\n  std::string message() const\n  {\n    return category_->message(value_);\n  }\n\n  struct unspecified_bool_type_t\n  {\n  };\n\n  typedef void (*unspecified_bool_type)(unspecified_bool_type_t);\n\n  static void unspecified_bool_true(unspecified_bool_type_t) {}\n\n  /// Operator returns non-null if there is a non-success error code.\n  operator unspecified_bool_type() const\n  {\n    if (value_ == 0)\n      return 0;\n    else\n      return &error_code::unspecified_bool_true;\n  }\n\n  /// Operator to test if the error represents success.\n  bool operator!() const\n  {\n    return value_ == 0;\n  }\n\n  /// Equality operator to compare two error objects.\n  friend bool operator==(const error_code& e1, const error_code& e2)\n  {\n    return e1.value_ == e2.value_ && e1.category_ == e2.category_;\n  }\n\n  /// Inequality operator to compare two error objects.\n  friend bool operator!=(const error_code& e1, const error_code& e2)\n  {\n    return e1.value_ != e2.value_ || e1.category_ != e2.category_;\n  }\n\nprivate:\n  // The value associated with the error code.\n  int value_;\n\n  // The category associated with the error code.\n  const error_category* category_;\n};\n\n# if !defined(ASIO_NO_IOSTREAM)\n\n/// Output an error code.\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const error_code& ec)\n{\n  os << ec.category().name() << ':' << ec.value();\n  return os;\n}\n\n# endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/error_code.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_ERROR_CODE_HPP\n"
  },
  {
    "path": "src/third_party/asio/execution_context.hpp",
    "content": "//\n// execution_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_EXECUTION_CONTEXT_HPP\n#define ASIO_EXECUTION_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <stdexcept>\n#include <typeinfo>\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass execution_context;\nclass io_context;\n\n#if !defined(GENERATING_DOCUMENTATION)\ntemplate <typename Service> Service& use_service(execution_context&);\ntemplate <typename Service> Service& use_service(io_context&);\ntemplate <typename Service> void add_service(execution_context&, Service*);\ntemplate <typename Service> bool has_service(execution_context&);\n#endif // !defined(GENERATING_DOCUMENTATION)\n\nnamespace detail { class service_registry; }\n\n/// A context for function object execution.\n/**\n * An execution context represents a place where function objects will be\n * executed. An @c io_context is an example of an execution context.\n *\n * @par The execution_context class and services\n *\n * Class execution_context implements an extensible, type-safe, polymorphic set\n * of services, indexed by service type.\n *\n * Services exist to manage the resources that are shared across an execution\n * context. For example, timers may be implemented in terms of a single timer\n * queue, and this queue would be stored in a service.\n *\n * Access to the services of an execution_context is via three function\n * templates, use_service(), add_service() and has_service().\n *\n * In a call to @c use_service<Service>(), the type argument chooses a service,\n * making available all members of the named type. If @c Service is not present\n * in an execution_context, an object of type @c Service is created and added\n * to the execution_context. A C++ program can check if an execution_context\n * implements a particular service with the function template @c\n * has_service<Service>().\n *\n * Service objects may be explicitly added to an execution_context using the\n * function template @c add_service<Service>(). If the @c Service is already\n * present, the service_already_exists exception is thrown. If the owner of the\n * service is not the same object as the execution_context parameter, the\n * invalid_service_owner exception is thrown.\n *\n * Once a service reference is obtained from an execution_context object by\n * calling use_service(), that reference remains usable as long as the owning\n * execution_context object exists.\n *\n * All service implementations have execution_context::service as a public base\n * class. Custom services may be implemented by deriving from this class and\n * then added to an execution_context using the facilities described above.\n *\n * @par The execution_context as a base class\n *\n * Class execution_context may be used only as a base class for concrete\n * execution context types. The @c io_context is an example of such a derived\n * type.\n *\n * On destruction, a class that is derived from execution_context must perform\n * <tt>execution_context::shutdown()</tt> followed by\n * <tt>execution_context::destroy()</tt>.\n *\n * This destruction sequence permits programs to simplify their resource\n * management by using @c shared_ptr<>. Where an object's lifetime is tied to\n * the lifetime of a connection (or some other sequence of asynchronous\n * operations), a @c shared_ptr to the object would be bound into the handlers\n * for all asynchronous operations associated with it. This works as follows:\n *\n * @li When a single connection ends, all associated asynchronous operations\n * complete. The corresponding handler objects are destroyed, and all @c\n * shared_ptr references to the objects are destroyed.\n *\n * @li To shut down the whole program, the io_context function stop() is called\n * to terminate any run() calls as soon as possible. The io_context destructor\n * calls @c shutdown() and @c destroy() to destroy all pending handlers,\n * causing all @c shared_ptr references to all connection objects to be\n * destroyed.\n */\nclass execution_context\n  : private noncopyable\n{\npublic:\n  class id;\n  class service;\n\npublic:\n  /// Constructor.\n  ASIO_DECL execution_context();\n\n  /// Destructor.\n  ASIO_DECL ~execution_context();\n\nprotected:\n  /// Shuts down all services in the context.\n  /**\n   * This function is implemented as follows:\n   *\n   * @li For each service object @c svc in the execution_context set, in\n   * reverse order of the beginning of service object lifetime, performs @c\n   * svc->shutdown().\n   */\n  ASIO_DECL void shutdown();\n\n  /// Destroys all services in the context.\n  /**\n   * This function is implemented as follows:\n   *\n   * @li For each service object @c svc in the execution_context set, in\n   * reverse order * of the beginning of service object lifetime, performs\n   * <tt>delete static_cast<execution_context::service*>(svc)</tt>.\n   */\n  ASIO_DECL void destroy();\n\npublic:\n  /// Fork-related event notifications.\n  enum fork_event\n  {\n    /// Notify the context that the process is about to fork.\n    fork_prepare,\n\n    /// Notify the context that the process has forked and is the parent.\n    fork_parent,\n\n    /// Notify the context that the process has forked and is the child.\n    fork_child\n  };\n\n  /// Notify the execution_context of a fork-related event.\n  /**\n   * This function is used to inform the execution_context that the process is\n   * about to fork, or has just forked. This allows the execution_context, and\n   * the services it contains, to perform any necessary housekeeping to ensure\n   * correct operation following a fork.\n   *\n   * This function must not be called while any other execution_context\n   * function, or any function associated with the execution_context's derived\n   * class, is being called in another thread. It is, however, safe to call\n   * this function from within a completion handler, provided no other thread\n   * is accessing the execution_context or its derived class.\n   *\n   * @param event A fork-related event.\n   *\n   * @throws asio::system_error Thrown on failure. If the notification\n   * fails the execution_context object should no longer be used and should be\n   * destroyed.\n   *\n   * @par Example\n   * The following code illustrates how to incorporate the notify_fork()\n   * function:\n   * @code my_execution_context.notify_fork(execution_context::fork_prepare);\n   * if (fork() == 0)\n   * {\n   *   // This is the child process.\n   *   my_execution_context.notify_fork(execution_context::fork_child);\n   * }\n   * else\n   * {\n   *   // This is the parent process.\n   *   my_execution_context.notify_fork(execution_context::fork_parent);\n   * } @endcode\n   *\n   * @note For each service object @c svc in the execution_context set,\n   * performs <tt>svc->notify_fork();</tt>. When processing the fork_prepare\n   * event, services are visited in reverse order of the beginning of service\n   * object lifetime. Otherwise, services are visited in order of the beginning\n   * of service object lifetime.\n   */\n  ASIO_DECL void notify_fork(fork_event event);\n\n  /// Obtain the service object corresponding to the given type.\n  /**\n   * This function is used to locate a service object that corresponds to the\n   * given service type. If there is no existing implementation of the service,\n   * then the execution_context will create a new instance of the service.\n   *\n   * @param e The execution_context object that owns the service.\n   *\n   * @return The service interface implementing the specified service type.\n   * Ownership of the service interface is not transferred to the caller.\n   */\n  template <typename Service>\n  friend Service& use_service(execution_context& e);\n\n  /// Obtain the service object corresponding to the given type.\n  /**\n   * This function is used to locate a service object that corresponds to the\n   * given service type. If there is no existing implementation of the service,\n   * then the io_context will create a new instance of the service.\n   *\n   * @param ioc The io_context object that owns the service.\n   *\n   * @return The service interface implementing the specified service type.\n   * Ownership of the service interface is not transferred to the caller.\n   *\n   * @note This overload is preserved for backwards compatibility with services\n   * that inherit from io_context::service.\n   */\n  template <typename Service>\n  friend Service& use_service(io_context& ioc);\n\n#if defined(GENERATING_DOCUMENTATION)\n\n  /// Creates a service object and adds it to the execution_context.\n  /**\n   * This function is used to add a service to the execution_context.\n   *\n   * @param e The execution_context object that owns the service.\n   *\n   * @param args Zero or more arguments to be passed to the service\n   * constructor.\n   *\n   * @throws asio::service_already_exists Thrown if a service of the\n   * given type is already present in the execution_context.\n   */\n  template <typename Service, typename... Args>\n  friend Service& make_service(execution_context& e, Args&&... args);\n\n#elif defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Service, typename... Args>\n  friend Service& make_service(execution_context& e,\n      ASIO_MOVE_ARG(Args)... args);\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Service>\n  friend Service& make_service(execution_context& e);\n\n#define ASIO_PRIVATE_MAKE_SERVICE_DEF(n) \\\n  template <typename Service, ASIO_VARIADIC_TPARAMS(n)> \\\n  friend Service& make_service(execution_context& e, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)); \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_MAKE_SERVICE_DEF)\n#undef ASIO_PRIVATE_MAKE_SERVICE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  /// (Deprecated: Use make_service().) Add a service object to the\n  /// execution_context.\n  /**\n   * This function is used to add a service to the execution_context.\n   *\n   * @param e The execution_context object that owns the service.\n   *\n   * @param svc The service object. On success, ownership of the service object\n   * is transferred to the execution_context. When the execution_context object\n   * is destroyed, it will destroy the service object by performing: @code\n   * delete static_cast<execution_context::service*>(svc) @endcode\n   *\n   * @throws asio::service_already_exists Thrown if a service of the\n   * given type is already present in the execution_context.\n   *\n   * @throws asio::invalid_service_owner Thrown if the service's owning\n   * execution_context is not the execution_context object specified by the\n   * @c e parameter.\n   */\n  template <typename Service>\n  friend void add_service(execution_context& e, Service* svc);\n\n  /// Determine if an execution_context contains a specified service type.\n  /**\n   * This function is used to determine whether the execution_context contains a\n   * service object corresponding to the given service type.\n   *\n   * @param e The execution_context object that owns the service.\n   *\n   * @return A boolean indicating whether the execution_context contains the\n   * service.\n   */\n  template <typename Service>\n  friend bool has_service(execution_context& e);\n\nprivate:\n  // The service registry.\n  asio::detail::service_registry* service_registry_;\n};\n\n/// Class used to uniquely identify a service.\nclass execution_context::id\n  : private noncopyable\n{\npublic:\n  /// Constructor.\n  id() {}\n};\n\n/// Base class for all io_context services.\nclass execution_context::service\n  : private noncopyable\n{\npublic:\n  /// Get the context object that owns the service.\n  execution_context& context();\n\nprotected:\n  /// Constructor.\n  /**\n   * @param owner The execution_context object that owns the service.\n   */\n  ASIO_DECL service(execution_context& owner);\n\n  /// Destructor.\n  ASIO_DECL virtual ~service();\n\nprivate:\n  /// Destroy all user-defined handler objects owned by the service.\n  virtual void shutdown() = 0;\n\n  /// Handle notification of a fork-related event to perform any necessary\n  /// housekeeping.\n  /**\n   * This function is not a pure virtual so that services only have to\n   * implement it if necessary. The default implementation does nothing.\n   */\n  ASIO_DECL virtual void notify_fork(\n      execution_context::fork_event event);\n\n  friend class asio::detail::service_registry;\n  struct key\n  {\n    key() : type_info_(0), id_(0) {}\n    const std::type_info* type_info_;\n    const execution_context::id* id_;\n  } key_;\n\n  execution_context& owner_;\n  service* next_;\n};\n\n/// Exception thrown when trying to add a duplicate service to an\n/// execution_context.\nclass service_already_exists\n  : public std::logic_error\n{\npublic:\n  ASIO_DECL service_already_exists();\n};\n\n/// Exception thrown when trying to add a service object to an\n/// execution_context where the service has a different owner.\nclass invalid_service_owner\n  : public std::logic_error\n{\npublic:\n  ASIO_DECL invalid_service_owner();\n};\n\nnamespace detail {\n\n// Special derived service id type to keep classes header-file only.\ntemplate <typename Type>\nclass service_id\n  : public execution_context::id\n{\n};\n\n// Special service base class to keep classes header-file only.\ntemplate <typename Type>\nclass execution_context_service_base\n  : public execution_context::service\n{\npublic:\n  static service_id<Type> id;\n\n  // Constructor.\n  execution_context_service_base(execution_context& e)\n    : execution_context::service(e)\n  {\n  }\n};\n\ntemplate <typename Type>\nservice_id<Type> execution_context_service_base<Type>::id;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/execution_context.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/execution_context.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_EXECUTION_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/executor.hpp",
    "content": "//\n// executor.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_EXECUTOR_HPP\n#define ASIO_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <typeinfo>\n#include \"asio/detail/cstddef.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Exception thrown when trying to access an empty polymorphic executor.\nclass bad_executor\n  : public std::exception\n{\npublic:\n  /// Constructor.\n  ASIO_DECL bad_executor() ASIO_NOEXCEPT;\n\n  /// Obtain message associated with exception.\n  ASIO_DECL virtual const char* what() const\n    ASIO_NOEXCEPT_OR_NOTHROW;\n};\n\n/// Polymorphic wrapper for executors.\nclass executor\n{\npublic:\n  /// Default constructor.\n  executor() ASIO_NOEXCEPT\n    : impl_(0)\n  {\n  }\n\n  /// Construct from nullptr.\n  executor(nullptr_t) ASIO_NOEXCEPT\n    : impl_(0)\n  {\n  }\n\n  /// Copy constructor.\n  executor(const executor& other) ASIO_NOEXCEPT\n    : impl_(other.clone())\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  executor(executor&& other) ASIO_NOEXCEPT\n    : impl_(other.impl_)\n  {\n    other.impl_ = 0;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Construct a polymorphic wrapper for the specified executor.\n  template <typename Executor>\n  executor(Executor e);\n\n  /// Allocator-aware constructor to create a polymorphic wrapper for the\n  /// specified executor.\n  template <typename Executor, typename Allocator>\n  executor(allocator_arg_t, const Allocator& a, Executor e);\n\n  /// Destructor.\n  ~executor()\n  {\n    destroy();\n  }\n\n  /// Assignment operator.\n  executor& operator=(const executor& other) ASIO_NOEXCEPT\n  {\n    destroy();\n    impl_ = other.clone();\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  // Move assignment operator.\n  executor& operator=(executor&& other) ASIO_NOEXCEPT\n  {\n    destroy();\n    impl_ = other.impl_;\n    other.impl_ = 0;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Assignment operator for nullptr_t.\n  executor& operator=(nullptr_t) ASIO_NOEXCEPT\n  {\n    destroy();\n    impl_ = 0;\n    return *this;\n  }\n\n  /// Assignment operator to create a polymorphic wrapper for the specified\n  /// executor.\n  template <typename Executor>\n  executor& operator=(ASIO_MOVE_ARG(Executor) e) ASIO_NOEXCEPT\n  {\n    executor tmp(ASIO_MOVE_CAST(Executor)(e));\n    destroy();\n    impl_ = tmp.impl_;\n    tmp.impl_ = 0;\n    return *this;\n  }\n\n  /// Obtain the underlying execution context.\n  execution_context& context() const ASIO_NOEXCEPT\n  {\n    return get_impl()->context();\n  }\n\n  /// Inform the executor that it has some outstanding work to do.\n  void on_work_started() const ASIO_NOEXCEPT\n  {\n    get_impl()->on_work_started();\n  }\n\n  /// Inform the executor that some work is no longer outstanding.\n  void on_work_finished() const ASIO_NOEXCEPT\n  {\n    get_impl()->on_work_finished();\n  }\n\n  /// Request the executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object is executed according to the rules of the\n   * target executor object.\n   *\n   * @param f The function object to be called. The executor will make a copy\n   * of the handler object as required. The function signature of the function\n   * object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object is executed according to the rules of the\n   * target executor object.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object is executed according to the rules of the\n   * target executor object.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  struct unspecified_bool_type_t {};\n  typedef void (*unspecified_bool_type)(unspecified_bool_type_t);\n  static void unspecified_bool_true(unspecified_bool_type_t) {}\n\n  /// Operator to test if the executor contains a valid target.\n  operator unspecified_bool_type() const ASIO_NOEXCEPT\n  {\n    return impl_ ? &executor::unspecified_bool_true : 0;\n  }\n\n  /// Obtain type information for the target executor object.\n  /**\n   * @returns If @c *this has a target type of type @c T, <tt>typeid(T)</tt>;\n   * otherwise, <tt>typeid(void)</tt>.\n   */\n#if !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION)\n  const std::type_info& target_type() const ASIO_NOEXCEPT\n  {\n    return impl_ ? impl_->target_type() : typeid(void);\n  }\n#else // !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION)\n  const void* target_type() const ASIO_NOEXCEPT\n  {\n    return impl_ ? impl_->target_type() : 0;\n  }\n#endif // !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION)\n\n  /// Obtain a pointer to the target executor object.\n  /**\n   * @returns If <tt>target_type() == typeid(T)</tt>, a pointer to the stored\n   * executor target; otherwise, a null pointer.\n   */\n  template <typename Executor>\n  Executor* target() ASIO_NOEXCEPT;\n\n  /// Obtain a pointer to the target executor object.\n  /**\n   * @returns If <tt>target_type() == typeid(T)</tt>, a pointer to the stored\n   * executor target; otherwise, a null pointer.\n   */\n  template <typename Executor>\n  const Executor* target() const ASIO_NOEXCEPT;\n\n  /// Compare two executors for equality.\n  friend bool operator==(const executor& a,\n      const executor& b) ASIO_NOEXCEPT\n  {\n    if (a.impl_ == b.impl_)\n      return true;\n    if (!a.impl_ || !b.impl_)\n      return false;\n    return a.impl_->equals(b.impl_);\n  }\n\n  /// Compare two executors for inequality.\n  friend bool operator!=(const executor& a,\n      const executor& b) ASIO_NOEXCEPT\n  {\n    return !(a == b);\n  }\n\nprivate:\n#if !defined(GENERATING_DOCUMENTATION)\n  class function;\n  template <typename, typename> class impl;\n\n#if !defined(ASIO_NO_TYPEID)\n  typedef const std::type_info& type_id_result_type;\n#else // !defined(ASIO_NO_TYPEID)\n  typedef const void* type_id_result_type;\n#endif // !defined(ASIO_NO_TYPEID)\n\n  template <typename T>\n  static type_id_result_type type_id()\n  {\n#if !defined(ASIO_NO_TYPEID)\n    return typeid(T);\n#else // !defined(ASIO_NO_TYPEID)\n    static int unique_id;\n    return &unique_id;\n#endif // !defined(ASIO_NO_TYPEID)\n  }\n\n  // Base class for all polymorphic executor implementations.\n  class impl_base\n  {\n  public:\n    virtual impl_base* clone() const ASIO_NOEXCEPT = 0;\n    virtual void destroy() ASIO_NOEXCEPT = 0;\n    virtual execution_context& context() ASIO_NOEXCEPT = 0;\n    virtual void on_work_started() ASIO_NOEXCEPT = 0;\n    virtual void on_work_finished() ASIO_NOEXCEPT = 0;\n    virtual void dispatch(ASIO_MOVE_ARG(function)) = 0;\n    virtual void post(ASIO_MOVE_ARG(function)) = 0;\n    virtual void defer(ASIO_MOVE_ARG(function)) = 0;\n    virtual type_id_result_type target_type() const ASIO_NOEXCEPT = 0;\n    virtual void* target() ASIO_NOEXCEPT = 0;\n    virtual const void* target() const ASIO_NOEXCEPT = 0;\n    virtual bool equals(const impl_base* e) const ASIO_NOEXCEPT = 0;\n\n  protected:\n    impl_base(bool fast_dispatch) : fast_dispatch_(fast_dispatch) {}\n    virtual ~impl_base() {}\n\n  private:\n    friend class executor;\n    const bool fast_dispatch_;\n  };\n\n  // Helper function to check and return the implementation pointer.\n  impl_base* get_impl() const\n  {\n    if (!impl_)\n    {\n      bad_executor ex;\n      asio::detail::throw_exception(ex);\n    }\n    return impl_;\n  }\n\n  // Helper function to clone another implementation.\n  impl_base* clone() const ASIO_NOEXCEPT\n  {\n    return impl_ ? impl_->clone() : 0;\n  }\n\n  // Helper function to destroy an implementation.\n  void destroy() ASIO_NOEXCEPT\n  {\n    if (impl_)\n      impl_->destroy();\n  }\n\n  impl_base* impl_;\n#endif // !defined(GENERATING_DOCUMENTATION)\n};\n\n} // namespace asio\n\nASIO_USES_ALLOCATOR(asio::executor)\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/executor.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/executor.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/executor_work_guard.hpp",
    "content": "//\n// executor_work_guard.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_EXECUTOR_WORK_GUARD_HPP\n#define ASIO_EXECUTOR_WORK_GUARD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// An object of type @c executor_work_guard controls ownership of executor work\n/// within a scope.\ntemplate <typename Executor>\nclass executor_work_guard\n{\npublic:\n  /// The underlying executor type.\n  typedef Executor executor_type;\n\n  /// Constructs a @c executor_work_guard object for the specified executor.\n  /**\n   * Stores a copy of @c e and calls <tt>on_work_started()</tt> on it.\n   */\n  explicit executor_work_guard(const executor_type& e) ASIO_NOEXCEPT\n    : executor_(e),\n      owns_(true)\n  {\n    executor_.on_work_started();\n  }\n\n  /// Copy constructor.\n  executor_work_guard(const executor_work_guard& other) ASIO_NOEXCEPT\n    : executor_(other.executor_),\n      owns_(other.owns_)\n  {\n    if (owns_)\n      executor_.on_work_started();\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  executor_work_guard(executor_work_guard&& other) ASIO_NOEXCEPT\n    : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)),\n      owns_(other.owns_)\n  {\n    other.owns_ = false;\n  }\n#endif //  defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor.\n  /**\n   * Unless the object has already been reset, or is in a moved-from state,\n   * calls <tt>on_work_finished()</tt> on the stored executor.\n   */\n  ~executor_work_guard()\n  {\n    if (owns_)\n      executor_.on_work_finished();\n  }\n\n  /// Obtain the associated executor.\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return executor_;\n  }\n\n  /// Whether the executor_work_guard object owns some outstanding work.\n  bool owns_work() const ASIO_NOEXCEPT\n  {\n    return owns_;\n  }\n\n  /// Indicate that the work is no longer outstanding.\n  /*\n   * Unless the object has already been reset, or is in a moved-from state,\n   * calls <tt>on_work_finished()</tt> on the stored executor.\n   */\n  void reset() ASIO_NOEXCEPT\n  {\n    if (owns_)\n    {\n      executor_.on_work_finished();\n      owns_ = false;\n    }\n  }\n\nprivate:\n  // Disallow assignment.\n  executor_work_guard& operator=(const executor_work_guard&);\n\n  executor_type executor_;\n  bool owns_;\n};\n\n/// Create an @ref executor_work_guard object.\ntemplate <typename Executor>\ninline executor_work_guard<Executor> make_work_guard(const Executor& ex,\n    typename enable_if<is_executor<Executor>::value>::type* = 0)\n{\n  return executor_work_guard<Executor>(ex);\n}\n\n/// Create an @ref executor_work_guard object.\ntemplate <typename ExecutionContext>\ninline executor_work_guard<typename ExecutionContext::executor_type>\nmake_work_guard(ExecutionContext& ctx,\n    typename enable_if<\n      is_convertible<ExecutionContext&, execution_context&>::value>::type* = 0)\n{\n  return executor_work_guard<typename ExecutionContext::executor_type>(\n      ctx.get_executor());\n}\n\n/// Create an @ref executor_work_guard object.\ntemplate <typename T>\ninline executor_work_guard<typename associated_executor<T>::type>\nmake_work_guard(const T& t,\n    typename enable_if<!is_executor<T>::value &&\n      !is_convertible<T&, execution_context&>::value>::type* = 0)\n{\n  return executor_work_guard<typename associated_executor<T>::type>(\n      associated_executor<T>::get(t));\n}\n\n/// Create an @ref executor_work_guard object.\ntemplate <typename T, typename Executor>\ninline executor_work_guard<typename associated_executor<T, Executor>::type>\nmake_work_guard(const T& t, const Executor& ex,\n    typename enable_if<is_executor<Executor>::value>::type* = 0)\n{\n  return executor_work_guard<typename associated_executor<T, Executor>::type>(\n      associated_executor<T, Executor>::get(t, ex));\n}\n\n/// Create an @ref executor_work_guard object.\ntemplate <typename T, typename ExecutionContext>\ninline executor_work_guard<typename associated_executor<T,\n  typename ExecutionContext::executor_type>::type>\nmake_work_guard(const T& t, ExecutionContext& ctx,\n    typename enable_if<!is_executor<T>::value &&\n      !is_convertible<T&, execution_context&>::value &&\n      is_convertible<ExecutionContext&, execution_context&>::value>::type* = 0)\n{\n  return executor_work_guard<typename associated_executor<T,\n    typename ExecutionContext::executor_type>::type>(\n      associated_executor<T, typename ExecutionContext::executor_type>::get(\n        t, ctx.get_executor()));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_EXECUTOR_WORK_GUARD_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/basic_endpoint.hpp",
    "content": "//\n// generic/basic_endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_BASIC_ENDPOINT_HPP\n#define ASIO_GENERIC_BASIC_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/generic/detail/endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\n\n/// Describes an endpoint for any socket type.\n/**\n * The asio::generic::basic_endpoint class template describes an endpoint\n * that may be associated with any socket type.\n *\n * @note The socket types sockaddr type must be able to fit into a\n * @c sockaddr_storage structure.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * Endpoint.\n */\ntemplate <typename Protocol>\nclass basic_endpoint\n{\npublic:\n  /// The protocol type associated with the endpoint.\n  typedef Protocol protocol_type;\n\n  /// The type of the endpoint structure. This type is dependent on the\n  /// underlying implementation of the socket layer.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined data_type;\n#else\n  typedef asio::detail::socket_addr_type data_type;\n#endif\n\n  /// Default constructor.\n  basic_endpoint() ASIO_NOEXCEPT\n  {\n  }\n\n  /// Construct an endpoint from the specified socket address.\n  basic_endpoint(const void* socket_address,\n      std::size_t socket_address_size, int socket_protocol = 0)\n    : impl_(socket_address, socket_address_size, socket_protocol)\n  {\n  }\n\n  /// Construct an endpoint from the specific endpoint type.\n  template <typename Endpoint>\n  basic_endpoint(const Endpoint& endpoint)\n    : impl_(endpoint.data(), endpoint.size(), endpoint.protocol().protocol())\n  {\n  }\n\n  /// Copy constructor.\n  basic_endpoint(const basic_endpoint& other)\n    : impl_(other.impl_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_endpoint(basic_endpoint&& other)\n    : impl_(other.impl_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another endpoint.\n  basic_endpoint& operator=(const basic_endpoint& other)\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another endpoint.\n  basic_endpoint& operator=(basic_endpoint&& other)\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// The protocol associated with the endpoint.\n  protocol_type protocol() const\n  {\n    return protocol_type(impl_.family(), impl_.protocol());\n  }\n\n  /// Get the underlying endpoint in the native type.\n  data_type* data()\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying endpoint in the native type.\n  const data_type* data() const\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying size of the endpoint in the native type.\n  std::size_t size() const\n  {\n    return impl_.size();\n  }\n\n  /// Set the underlying size of the endpoint in the native type.\n  void resize(std::size_t new_size)\n  {\n    impl_.resize(new_size);\n  }\n\n  /// Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const\n  {\n    return impl_.capacity();\n  }\n\n  /// Compare two endpoints for equality.\n  friend bool operator==(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e1.impl_ == e2.impl_;\n  }\n\n  /// Compare two endpoints for inequality.\n  friend bool operator!=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e1.impl_ == e2.impl_);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e1.impl_ < e2.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e2.impl_ < e1.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e2 < e1);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e1 < e2);\n  }\n\nprivate:\n  // The underlying generic endpoint.\n  asio::generic::detail::endpoint impl_;\n};\n\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_BASIC_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/datagram_protocol.hpp",
    "content": "//\n// generic/datagram_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP\n#define ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <typeinfo>\n#include \"asio/basic_datagram_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/generic/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\n\n/// Encapsulates the flags needed for a generic datagram-oriented socket.\n/**\n * The asio::generic::datagram_protocol class contains flags necessary\n * for datagram-oriented sockets of any address family and protocol.\n *\n * @par Examples\n * Constructing using a native address family and socket protocol:\n * @code datagram_protocol p(AF_INET, IPPROTO_UDP); @endcode\n * Constructing from a specific protocol type:\n * @code datagram_protocol p(asio::ip::udp::v4()); @endcode\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass datagram_protocol\n{\npublic:\n  /// Construct a protocol object for a specific address family and protocol.\n  datagram_protocol(int address_family, int socket_protocol)\n    : family_(address_family),\n      protocol_(socket_protocol)\n  {\n  }\n\n  /// Construct a generic protocol object from a specific protocol.\n  /**\n   * @throws @c bad_cast Thrown if the source protocol is not datagram-oriented.\n   */\n  template <typename Protocol>\n  datagram_protocol(const Protocol& source_protocol)\n    : family_(source_protocol.family()),\n      protocol_(source_protocol.protocol())\n  {\n    if (source_protocol.type() != type())\n    {\n      std::bad_cast ex;\n      asio::detail::throw_exception(ex);\n    }\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_DGRAM);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return protocol_;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const datagram_protocol& p1,\n      const datagram_protocol& p2)\n  {\n    return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const datagram_protocol& p1,\n      const datagram_protocol& p2)\n  {\n    return !(p1 == p2);\n  }\n\n  /// The type of an endpoint.\n  typedef basic_endpoint<datagram_protocol> endpoint;\n\n  /// The generic socket type.\n  typedef basic_datagram_socket<datagram_protocol> socket;\n\nprivate:\n  int family_;\n  int protocol_;\n};\n\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/detail/endpoint.hpp",
    "content": "//\n// generic/detail/endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_DETAIL_ENDPOINT_HPP\n#define ASIO_GENERIC_DETAIL_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstddef>\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\nnamespace detail {\n\n// Helper class for implementing a generic socket endpoint.\nclass endpoint\n{\npublic:\n  // Default constructor.\n  ASIO_DECL endpoint();\n\n  // Construct an endpoint from the specified raw bytes.\n  ASIO_DECL endpoint(const void* sock_addr,\n      std::size_t sock_addr_size, int sock_protocol);\n\n  // Copy constructor.\n  endpoint(const endpoint& other)\n    : data_(other.data_),\n      size_(other.size_),\n      protocol_(other.protocol_)\n  {\n  }\n\n  // Assign from another endpoint.\n  endpoint& operator=(const endpoint& other)\n  {\n    data_ = other.data_;\n    size_ = other.size_;\n    protocol_ = other.protocol_;\n    return *this;\n  }\n\n  // Get the address family associated with the endpoint.\n  int family() const\n  {\n    return data_.base.sa_family;\n  }\n\n  // Get the socket protocol associated with the endpoint.\n  int protocol() const\n  {\n    return protocol_;\n  }\n\n  // Get the underlying endpoint in the native type.\n  asio::detail::socket_addr_type* data()\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying endpoint in the native type.\n  const asio::detail::socket_addr_type* data() const\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying size of the endpoint in the native type.\n  std::size_t size() const\n  {\n    return size_;\n  }\n\n  // Set the underlying size of the endpoint in the native type.\n  ASIO_DECL void resize(std::size_t size);\n\n  // Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const\n  {\n    return sizeof(asio::detail::sockaddr_storage_type);\n  }\n\n  // Compare two endpoints for equality.\n  ASIO_DECL friend bool operator==(\n      const endpoint& e1, const endpoint& e2);\n\n  // Compare endpoints for ordering.\n  ASIO_DECL friend bool operator<(\n      const endpoint& e1, const endpoint& e2);\n\nprivate:\n  // The underlying socket address.\n  union data_union\n  {\n    asio::detail::socket_addr_type base;\n    asio::detail::sockaddr_storage_type generic;\n  } data_;\n\n  // The length of the socket address stored in the endpoint.\n  std::size_t size_;\n\n  // The socket protocol associated with the endpoint.\n  int protocol_;\n\n  // Initialise with a specified memory.\n  ASIO_DECL void init(const void* sock_addr,\n      std::size_t sock_addr_size, int sock_protocol);\n};\n\n} // namespace detail\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/generic/detail/impl/endpoint.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_GENERIC_DETAIL_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/detail/impl/endpoint.ipp",
    "content": "//\n// generic/detail/impl/endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP\n#define ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstring>\n#include <typeinfo>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/generic/detail/endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\nnamespace detail {\n\nendpoint::endpoint()\n{\n  init(0, 0, 0);\n}\n\nendpoint::endpoint(const void* sock_addr,\n    std::size_t sock_addr_size, int sock_protocol)\n{\n  init(sock_addr, sock_addr_size, sock_protocol);\n}\n\nvoid endpoint::resize(std::size_t new_size)\n{\n  if (new_size > sizeof(asio::detail::sockaddr_storage_type))\n  {\n    asio::error_code ec(asio::error::invalid_argument);\n    asio::detail::throw_error(ec);\n  }\n  else\n  {\n    size_ = new_size;\n    protocol_ = 0;\n  }\n}\n\nbool operator==(const endpoint& e1, const endpoint& e2)\n{\n  using namespace std; // For memcmp.\n  return e1.size() == e2.size() && memcmp(e1.data(), e2.data(), e1.size()) == 0;\n}\n\nbool operator<(const endpoint& e1, const endpoint& e2)\n{\n  if (e1.protocol() < e2.protocol())\n    return true;\n\n  if (e1.protocol() > e2.protocol())\n    return false;\n\n  using namespace std; // For memcmp.\n  std::size_t compare_size = e1.size() < e2.size() ? e1.size() : e2.size();\n  int compare_result = memcmp(e1.data(), e2.data(), compare_size);\n\n  if (compare_result < 0)\n    return true;\n\n  if (compare_result > 0)\n    return false;\n\n  return e1.size() < e2.size();\n}\n\nvoid endpoint::init(const void* sock_addr,\n    std::size_t sock_addr_size, int sock_protocol)\n{\n  if (sock_addr_size > sizeof(asio::detail::sockaddr_storage_type))\n  {\n    asio::error_code ec(asio::error::invalid_argument);\n    asio::detail::throw_error(ec);\n  }\n\n  using namespace std; // For memset and memcpy.\n  memset(&data_.generic, 0, sizeof(asio::detail::sockaddr_storage_type));\n  if (sock_addr_size > 0)\n    memcpy(&data_.generic, sock_addr, sock_addr_size);\n\n  size_ = sock_addr_size;\n  protocol_ = sock_protocol;\n}\n\n} // namespace detail\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP\n"
  },
  {
    "path": "src/third_party/asio/generic/raw_protocol.hpp",
    "content": "//\n// generic/raw_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_RAW_PROTOCOL_HPP\n#define ASIO_GENERIC_RAW_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <typeinfo>\n#include \"asio/basic_raw_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/generic/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\n\n/// Encapsulates the flags needed for a generic raw socket.\n/**\n * The asio::generic::raw_protocol class contains flags necessary for\n * raw sockets of any address family and protocol.\n *\n * @par Examples\n * Constructing using a native address family and socket protocol:\n * @code raw_protocol p(AF_INET, IPPROTO_ICMP); @endcode\n * Constructing from a specific protocol type:\n * @code raw_protocol p(asio::ip::icmp::v4()); @endcode\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass raw_protocol\n{\npublic:\n  /// Construct a protocol object for a specific address family and protocol.\n  raw_protocol(int address_family, int socket_protocol)\n    : family_(address_family),\n      protocol_(socket_protocol)\n  {\n  }\n\n  /// Construct a generic protocol object from a specific protocol.\n  /**\n   * @throws @c bad_cast Thrown if the source protocol is not raw-oriented.\n   */\n  template <typename Protocol>\n  raw_protocol(const Protocol& source_protocol)\n    : family_(source_protocol.family()),\n      protocol_(source_protocol.protocol())\n  {\n    if (source_protocol.type() != type())\n    {\n      std::bad_cast ex;\n      asio::detail::throw_exception(ex);\n    }\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_RAW);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return protocol_;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const raw_protocol& p1, const raw_protocol& p2)\n  {\n    return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const raw_protocol& p1, const raw_protocol& p2)\n  {\n    return !(p1 == p2);\n  }\n\n  /// The type of an endpoint.\n  typedef basic_endpoint<raw_protocol> endpoint;\n\n  /// The generic socket type.\n  typedef basic_raw_socket<raw_protocol> socket;\n\nprivate:\n  int family_;\n  int protocol_;\n};\n\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_RAW_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/seq_packet_protocol.hpp",
    "content": "//\n// generic/seq_packet_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP\n#define ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <typeinfo>\n#include \"asio/basic_seq_packet_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/generic/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\n\n/// Encapsulates the flags needed for a generic sequenced packet socket.\n/**\n * The asio::generic::seq_packet_protocol class contains flags necessary\n * for seq_packet-oriented sockets of any address family and protocol.\n *\n * @par Examples\n * Constructing using a native address family and socket protocol:\n * @code seq_packet_protocol p(AF_INET, IPPROTO_SCTP); @endcode\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass seq_packet_protocol\n{\npublic:\n  /// Construct a protocol object for a specific address family and protocol.\n  seq_packet_protocol(int address_family, int socket_protocol)\n    : family_(address_family),\n      protocol_(socket_protocol)\n  {\n  }\n\n  /// Construct a generic protocol object from a specific protocol.\n  /**\n   * @throws @c bad_cast Thrown if the source protocol is not based around\n   * sequenced packets.\n   */\n  template <typename Protocol>\n  seq_packet_protocol(const Protocol& source_protocol)\n    : family_(source_protocol.family()),\n      protocol_(source_protocol.protocol())\n  {\n    if (source_protocol.type() != type())\n    {\n      std::bad_cast ex;\n      asio::detail::throw_exception(ex);\n    }\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_SEQPACKET);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return protocol_;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const seq_packet_protocol& p1,\n      const seq_packet_protocol& p2)\n  {\n    return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const seq_packet_protocol& p1,\n      const seq_packet_protocol& p2)\n  {\n    return !(p1 == p2);\n  }\n\n  /// The type of an endpoint.\n  typedef basic_endpoint<seq_packet_protocol> endpoint;\n\n  /// The generic socket type.\n  typedef basic_seq_packet_socket<seq_packet_protocol> socket;\n\nprivate:\n  int family_;\n  int protocol_;\n};\n\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/generic/stream_protocol.hpp",
    "content": "//\n// generic/stream_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_GENERIC_STREAM_PROTOCOL_HPP\n#define ASIO_GENERIC_STREAM_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <typeinfo>\n#include \"asio/basic_socket_iostream.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/generic/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace generic {\n\n/// Encapsulates the flags needed for a generic stream-oriented socket.\n/**\n * The asio::generic::stream_protocol class contains flags necessary for\n * stream-oriented sockets of any address family and protocol.\n *\n * @par Examples\n * Constructing using a native address family and socket protocol:\n * @code stream_protocol p(AF_INET, IPPROTO_TCP); @endcode\n * Constructing from a specific protocol type:\n * @code stream_protocol p(asio::ip::tcp::v4()); @endcode\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass stream_protocol\n{\npublic:\n  /// Construct a protocol object for a specific address family and protocol.\n  stream_protocol(int address_family, int socket_protocol)\n    : family_(address_family),\n      protocol_(socket_protocol)\n  {\n  }\n\n  /// Construct a generic protocol object from a specific protocol.\n  /**\n   * @throws @c bad_cast Thrown if the source protocol is not stream-oriented.\n   */\n  template <typename Protocol>\n  stream_protocol(const Protocol& source_protocol)\n    : family_(source_protocol.family()),\n      protocol_(source_protocol.protocol())\n  {\n    if (source_protocol.type() != type())\n    {\n      std::bad_cast ex;\n      asio::detail::throw_exception(ex);\n    }\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_STREAM);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return protocol_;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const stream_protocol& p1, const stream_protocol& p2)\n  {\n    return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const stream_protocol& p1, const stream_protocol& p2)\n  {\n    return !(p1 == p2);\n  }\n\n  /// The type of an endpoint.\n  typedef basic_endpoint<stream_protocol> endpoint;\n\n  /// The generic socket type.\n  typedef basic_stream_socket<stream_protocol> socket;\n\n#if !defined(ASIO_NO_IOSTREAM)\n  /// The generic socket iostream type.\n  typedef basic_socket_iostream<stream_protocol> iostream;\n#endif // !defined(ASIO_NO_IOSTREAM)\n\nprivate:\n  int family_;\n  int protocol_;\n};\n\n} // namespace generic\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_GENERIC_STREAM_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/handler_alloc_hook.hpp",
    "content": "//\n// handler_alloc_hook.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_HANDLER_ALLOC_HOOK_HPP\n#define ASIO_HANDLER_ALLOC_HOOK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Default allocation function for handlers.\n/**\n * Asynchronous operations may need to allocate temporary objects. Since\n * asynchronous operations have a handler function object, these temporary\n * objects can be said to be associated with the handler.\n *\n * Implement asio_handler_allocate and asio_handler_deallocate for your own\n * handlers to provide custom allocation for these temporary objects.\n *\n * The default implementation of these allocation hooks uses <tt>::operator\n * new</tt> and <tt>::operator delete</tt>.\n *\n * @note All temporary objects associated with a handler will be deallocated\n * before the upcall to the handler is performed. This allows the same memory to\n * be reused for a subsequent asynchronous operation initiated by the handler.\n *\n * @par Example\n * @code\n * class my_handler;\n *\n * void* asio_handler_allocate(std::size_t size, my_handler* context)\n * {\n *   return ::operator new(size);\n * }\n *\n * void asio_handler_deallocate(void* pointer, std::size_t size,\n *     my_handler* context)\n * {\n *   ::operator delete(pointer);\n * }\n * @endcode\n */\nASIO_DECL void* asio_handler_allocate(\n    std::size_t size, ...);\n\n/// Default deallocation function for handlers.\n/**\n * Implement asio_handler_allocate and asio_handler_deallocate for your own\n * handlers to provide custom allocation for the associated temporary objects.\n *\n * The default implementation of these allocation hooks uses <tt>::operator\n * new</tt> and <tt>::operator delete</tt>.\n *\n * @sa asio_handler_allocate.\n */\nASIO_DECL void asio_handler_deallocate(\n    void* pointer, std::size_t size, ...);\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/handler_alloc_hook.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_HANDLER_ALLOC_HOOK_HPP\n"
  },
  {
    "path": "src/third_party/asio/handler_continuation_hook.hpp",
    "content": "//\n// handler_continuation_hook.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_HANDLER_CONTINUATION_HOOK_HPP\n#define ASIO_HANDLER_CONTINUATION_HOOK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Default continuation function for handlers.\n/**\n * Asynchronous operations may represent a continuation of the asynchronous\n * control flow associated with the current handler. The implementation can use\n * this knowledge to optimise scheduling of the handler.\n *\n * Implement asio_handler_is_continuation for your own handlers to indicate\n * when a handler represents a continuation.\n *\n * The default implementation of the continuation hook returns <tt>false</tt>.\n *\n * @par Example\n * @code\n * class my_handler;\n *\n * bool asio_handler_is_continuation(my_handler* context)\n * {\n *   return true;\n * }\n * @endcode\n */\ninline bool asio_handler_is_continuation(...)\n{\n  return false;\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_HANDLER_CONTINUATION_HOOK_HPP\n"
  },
  {
    "path": "src/third_party/asio/handler_invoke_hook.hpp",
    "content": "//\n// handler_invoke_hook.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_HANDLER_INVOKE_HOOK_HPP\n#define ASIO_HANDLER_INVOKE_HOOK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/** @defgroup asio_handler_invoke asio::asio_handler_invoke\n *\n * @brief Default invoke function for handlers.\n *\n * Completion handlers for asynchronous operations are invoked by the\n * io_context associated with the corresponding object (e.g. a socket or\n * deadline_timer). Certain guarantees are made on when the handler may be\n * invoked, in particular that a handler can only be invoked from a thread that\n * is currently calling @c run() on the corresponding io_context object.\n * Handlers may subsequently be invoked through other objects (such as\n * io_context::strand objects) that provide additional guarantees.\n *\n * When asynchronous operations are composed from other asynchronous\n * operations, all intermediate handlers should be invoked using the same\n * method as the final handler. This is required to ensure that user-defined\n * objects are not accessed in a way that may violate the guarantees. This\n * hooking function ensures that the invoked method used for the final handler\n * is accessible at each intermediate step.\n *\n * Implement asio_handler_invoke for your own handlers to specify a custom\n * invocation strategy.\n *\n * This default implementation invokes the function object like so:\n * @code function(); @endcode\n * If necessary, the default implementation makes a copy of the function object\n * so that the non-const operator() can be used.\n *\n * @par Example\n * @code\n * class my_handler;\n *\n * template <typename Function>\n * void asio_handler_invoke(Function function, my_handler* context)\n * {\n *   context->strand_.dispatch(function);\n * }\n * @endcode\n */\n/*@{*/\n\n/// Default handler invocation hook used for non-const function objects.\ntemplate <typename Function>\ninline void asio_handler_invoke(Function& function, ...)\n{\n  function();\n}\n\n/// Default handler invocation hook used for const function objects.\ntemplate <typename Function>\ninline void asio_handler_invoke(const Function& function, ...)\n{\n  Function tmp(function);\n  tmp();\n}\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_HANDLER_INVOKE_HOOK_HPP\n"
  },
  {
    "path": "src/third_party/asio/high_resolution_timer.hpp",
    "content": "//\n// high_resolution_timer.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_HIGH_RESOLUTION_TIMER_HPP\n#define ASIO_HIGH_RESOLUTION_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_waitable_timer.hpp\"\n#include \"asio/detail/chrono.hpp\"\n\nnamespace asio {\n\n/// Typedef for a timer based on the high resolution clock.\n/**\n * This typedef uses the C++11 @c &lt;chrono&gt; standard library facility, if\n * available. Otherwise, it may use the Boost.Chrono library. To explicitly\n * utilise Boost.Chrono, use the basic_waitable_timer template directly:\n * @code\n * typedef basic_waitable_timer<boost::chrono::high_resolution_clock> timer;\n * @endcode\n */\ntypedef basic_waitable_timer<\n    chrono::high_resolution_clock>\n  high_resolution_timer;\n\n} // namespace asio\n\n#endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_HIGH_RESOLUTION_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/awaitable.hpp",
    "content": "//\n// impl/awaitable.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_AWAITABLE_HPP\n#define ASIO_IMPL_AWAITABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <exception>\n#include <new>\n#include <tuple>\n#include <utility>\n#include \"asio/detail/thread_context.hpp\"\n#include \"asio/detail/thread_info_base.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/system_error.hpp\"\n#include \"asio/this_coro.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// An awaitable_thread represents a thread-of-execution that is composed of one\n// or more \"stack frames\", with each frame represented by an awaitable_frame.\n// All execution occurs in the context of the awaitable_thread's executor. An\n// awaitable_thread continues to \"pump\" the stack frames by repeatedly resuming\n// the top stack frame until the stack is empty, or until ownership of the\n// stack is transferred to another awaitable_thread object.\n//\n//                +------------------------------------+\n//                | top_of_stack_                      |\n//                |                                    V\n// +--------------+---+                            +-----------------+\n// |                  |                            |                 |\n// | awaitable_thread |<---------------------------+ awaitable_frame |\n// |                  |           attached_thread_ |                 |\n// +--------------+---+           (Set only when   +---+-------------+\n//                |               frames are being     |\n//                |               actively pumped      | caller_\n//                |               by a thread, and     |\n//                |               then only for        V\n//                |               the top frame.)  +-----------------+\n//                |                                |                 |\n//                |                                | awaitable_frame |\n//                |                                |                 |\n//                |                                +---+-------------+\n//                |                                    |\n//                |                                    | caller_\n//                |                                    :\n//                |                                    :\n//                |                                    |\n//                |                                    V\n//                |                                +-----------------+\n//                | bottom_of_stack_               |                 |\n//                +------------------------------->| awaitable_frame |\n//                                                 |                 |\n//                                                 +-----------------+\n\ntemplate <typename Executor>\nclass awaitable_frame_base\n{\npublic:\n#if !defined(ASIO_DISABLE_AWAITABLE_FRAME_RECYCLING)\n  void* operator new(std::size_t size)\n  {\n    return asio::detail::thread_info_base::allocate(\n        asio::detail::thread_info_base::awaitable_frame_tag(),\n        asio::detail::thread_context::thread_call_stack::top(),\n        size);\n  }\n\n  void operator delete(void* pointer, std::size_t size)\n  {\n    asio::detail::thread_info_base::deallocate(\n        asio::detail::thread_info_base::awaitable_frame_tag(),\n        asio::detail::thread_context::thread_call_stack::top(),\n        pointer, size);\n  }\n#endif // !defined(ASIO_DISABLE_AWAITABLE_FRAME_RECYCLING)\n\n  // The frame starts in a suspended state until the awaitable_thread object\n  // pumps the stack.\n  auto initial_suspend() noexcept\n  {\n    return suspend_always();\n  }\n\n  // On final suspension the frame is popped from the top of the stack.\n  auto final_suspend() noexcept\n  {\n    struct result\n    {\n      awaitable_frame_base* this_;\n\n      bool await_ready() const noexcept\n      {\n        return false;\n      }\n\n      void await_suspend(coroutine_handle<void>) noexcept\n      {\n        this_->pop_frame();\n      }\n\n      void await_resume() const noexcept\n      {\n      }\n    };\n\n    return result{this};\n  }\n\n  void set_except(std::exception_ptr e) noexcept\n  {\n    pending_exception_ = e;\n  }\n\n  void set_error(const asio::error_code& ec)\n  {\n    this->set_except(std::make_exception_ptr(asio::system_error(ec)));\n  }\n\n  void unhandled_exception()\n  {\n    set_except(std::current_exception());\n  }\n\n  void rethrow_exception()\n  {\n    if (pending_exception_)\n    {\n      std::exception_ptr ex = std::exchange(pending_exception_, nullptr);\n      std::rethrow_exception(ex);\n    }\n  }\n\n  template <typename T>\n  auto await_transform(awaitable<T, Executor> a) const\n  {\n    return a;\n  }\n\n  // This await transformation obtains the associated executor of the thread of\n  // execution.\n  auto await_transform(this_coro::executor_t) noexcept\n  {\n    struct result\n    {\n      awaitable_frame_base* this_;\n\n      bool await_ready() const noexcept\n      {\n        return true;\n      }\n\n      void await_suspend(coroutine_handle<void>) noexcept\n      {\n      }\n\n      auto await_resume() const noexcept\n      {\n        return this_->attached_thread_->get_executor();\n      }\n    };\n\n    return result{this};\n  }\n\n  // This await transformation is used to run an async operation's initiation\n  // function object after the coroutine has been suspended. This ensures that\n  // immediate resumption of the coroutine in another thread does not cause a\n  // race condition.\n  template <typename Function>\n  auto await_transform(Function f,\n      typename enable_if<\n        is_convertible<\n          typename result_of<Function(awaitable_frame_base*)>::type,\n          awaitable_thread<Executor>*\n        >::value\n      >::type* = 0)\n  {\n    struct result\n    {\n      Function function_;\n      awaitable_frame_base* this_;\n\n      bool await_ready() const noexcept\n      {\n        return false;\n      }\n\n      void await_suspend(coroutine_handle<void>) noexcept\n      {\n        function_(this_);\n      }\n\n      void await_resume() const noexcept\n      {\n      }\n    };\n\n    return result{std::move(f), this};\n  }\n\n  void attach_thread(awaitable_thread<Executor>* handler) noexcept\n  {\n    attached_thread_ = handler;\n  }\n\n  awaitable_thread<Executor>* detach_thread() noexcept\n  {\n    return std::exchange(attached_thread_, nullptr);\n  }\n\n  void push_frame(awaitable_frame_base<Executor>* caller) noexcept\n  {\n    caller_ = caller;\n    attached_thread_ = caller_->attached_thread_;\n    attached_thread_->top_of_stack_ = this;\n    caller_->attached_thread_ = nullptr;\n  }\n\n  void pop_frame() noexcept\n  {\n    if (caller_)\n      caller_->attached_thread_ = attached_thread_;\n    attached_thread_->top_of_stack_ = caller_;\n    attached_thread_ = nullptr;\n    caller_ = nullptr;\n  }\n\n  void resume()\n  {\n    coro_.resume();\n  }\n\n  void destroy()\n  {\n    coro_.destroy();\n  }\n\nprotected:\n  coroutine_handle<void> coro_ = nullptr;\n  awaitable_thread<Executor>* attached_thread_ = nullptr;\n  awaitable_frame_base<Executor>* caller_ = nullptr;\n  std::exception_ptr pending_exception_ = nullptr;\n};\n\ntemplate <typename T, typename Executor>\nclass awaitable_frame\n  : public awaitable_frame_base<Executor>\n{\npublic:\n  awaitable_frame() noexcept\n  {\n  }\n\n  awaitable_frame(awaitable_frame&& other) noexcept\n    : awaitable_frame_base<Executor>(std::move(other))\n  {\n  }\n\n  ~awaitable_frame()\n  {\n    if (has_result_)\n      static_cast<T*>(static_cast<void*>(result_))->~T();\n  }\n\n  awaitable<T, Executor> get_return_object() noexcept\n  {\n    this->coro_ = coroutine_handle<awaitable_frame>::from_promise(*this);\n    return awaitable<T, Executor>(this);\n  };\n\n  template <typename U>\n  void return_value(U&& u)\n  {\n    new (&result_) T(std::forward<U>(u));\n    has_result_ = true;\n  }\n\n  template <typename... Us>\n  void return_values(Us&&... us)\n  {\n    this->return_value(std::forward_as_tuple(std::forward<Us>(us)...));\n  }\n\n  T get()\n  {\n    this->caller_ = nullptr;\n    this->rethrow_exception();\n    return std::move(*static_cast<T*>(static_cast<void*>(result_)));\n  }\n\nprivate:\n  alignas(T) unsigned char result_[sizeof(T)];\n  bool has_result_ = false;\n};\n\ntemplate <typename Executor>\nclass awaitable_frame<void, Executor>\n  : public awaitable_frame_base<Executor>\n{\npublic:\n  awaitable<void, Executor> get_return_object()\n  {\n    this->coro_ = coroutine_handle<awaitable_frame>::from_promise(*this);\n    return awaitable<void, Executor>(this);\n  };\n\n  void return_void()\n  {\n  }\n\n  void get()\n  {\n    this->caller_ = nullptr;\n    this->rethrow_exception();\n  }\n};\n\ntemplate <typename Executor>\nclass awaitable_thread\n{\npublic:\n  typedef Executor executor_type;\n\n  // Construct from the entry point of a new thread of execution.\n  awaitable_thread(awaitable<void, Executor> p, const Executor& ex)\n    : bottom_of_stack_(std::move(p)),\n      top_of_stack_(bottom_of_stack_.frame_),\n      executor_(ex)\n  {\n  }\n\n  // Transfer ownership from another awaitable_thread.\n  awaitable_thread(awaitable_thread&& other) noexcept\n    : bottom_of_stack_(std::move(other.bottom_of_stack_)),\n      top_of_stack_(std::exchange(other.top_of_stack_, nullptr)),\n      executor_(std::move(other.executor_))\n  {\n  }\n\n  // Clean up with a last ditch effort to ensure the thread is unwound within\n  // the context of the executor.\n  ~awaitable_thread()\n  {\n    if (bottom_of_stack_.valid())\n    {\n      // Coroutine \"stack unwinding\" must be performed through the executor.\n      (post)(executor_,\n          [a = std::move(bottom_of_stack_)]() mutable\n          {\n            awaitable<void, Executor>(std::move(a));\n          });\n    }\n  }\n\n  executor_type get_executor() const noexcept\n  {\n    return executor_;\n  }\n\n  // Launch a new thread of execution.\n  void launch()\n  {\n    top_of_stack_->attach_thread(this);\n    pump();\n  }\n\nprotected:\n  template <typename> friend class awaitable_frame_base;\n\n  // Repeatedly resume the top stack frame until the stack is empty or until it\n  // has been transferred to another resumable_thread object.\n  void pump()\n  {\n    do top_of_stack_->resume(); while (top_of_stack_);\n    if (bottom_of_stack_.valid())\n    {\n      awaitable<void, Executor> a(std::move(bottom_of_stack_));\n      a.frame_->rethrow_exception();\n    }\n  }\n\n  awaitable<void, Executor> bottom_of_stack_;\n  awaitable_frame_base<Executor>* top_of_stack_;\n  executor_type executor_;\n};\n\n} // namespace detail\n} // namespace asio\n\n#if !defined(GENERATING_DOCUMENTATION)\n\nnamespace std { namespace experimental {\n\ntemplate <typename T, typename Executor, typename... Args>\nstruct coroutine_traits<asio::awaitable<T, Executor>, Args...>\n{\n  typedef asio::detail::awaitable_frame<T, Executor> promise_type;\n};\n\n}} // namespace std::experimental\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_AWAITABLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/buffered_read_stream.hpp",
    "content": "//\n// impl/buffered_read_stream.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_BUFFERED_READ_STREAM_HPP\n#define ASIO_IMPL_BUFFERED_READ_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ntemplate <typename Stream>\nstd::size_t buffered_read_stream<Stream>::fill()\n{\n  detail::buffer_resize_guard<detail::buffered_stream_storage>\n    resize_guard(storage_);\n  std::size_t previous_size = storage_.size();\n  storage_.resize(storage_.capacity());\n  storage_.resize(previous_size + next_layer_.read_some(buffer(\n          storage_.data() + previous_size,\n          storage_.size() - previous_size)));\n  resize_guard.commit();\n  return storage_.size() - previous_size;\n}\n\ntemplate <typename Stream>\nstd::size_t buffered_read_stream<Stream>::fill(asio::error_code& ec)\n{\n  detail::buffer_resize_guard<detail::buffered_stream_storage>\n    resize_guard(storage_);\n  std::size_t previous_size = storage_.size();\n  storage_.resize(storage_.capacity());\n  storage_.resize(previous_size + next_layer_.read_some(buffer(\n          storage_.data() + previous_size,\n          storage_.size() - previous_size),\n        ec));\n  resize_guard.commit();\n  return storage_.size() - previous_size;\n}\n\nnamespace detail\n{\n  template <typename ReadHandler>\n  class buffered_fill_handler\n  {\n  public:\n    buffered_fill_handler(detail::buffered_stream_storage& storage,\n        std::size_t previous_size, ReadHandler& handler)\n      : storage_(storage),\n        previous_size_(previous_size),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    buffered_fill_handler(const buffered_fill_handler& other)\n      : storage_(other.storage_),\n        previous_size_(other.previous_size_),\n        handler_(other.handler_)\n    {\n    }\n\n    buffered_fill_handler(buffered_fill_handler&& other)\n      : storage_(other.storage_),\n        previous_size_(other.previous_size_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        const std::size_t bytes_transferred)\n    {\n      storage_.resize(previous_size_ + bytes_transferred);\n      handler_(ec, bytes_transferred);\n    }\n\n  //private:\n    detail::buffered_stream_storage& storage_;\n    std::size_t previous_size_;\n    ReadHandler handler_;\n  };\n\n  template <typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      buffered_fill_handler<ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      buffered_fill_handler<ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      buffered_fill_handler<ReadHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      buffered_fill_handler<ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      buffered_fill_handler<ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Stream>\n  class initiate_async_buffered_fill\n  {\n  public:\n    typedef typename remove_reference<\n      Stream>::type::lowest_layer_type::executor_type executor_type;\n\n    explicit initiate_async_buffered_fill(Stream& next_layer)\n      : next_layer_(next_layer)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return next_layer_.lowest_layer().get_executor();\n    }\n\n    template <typename ReadHandler>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        buffered_stream_storage* storage) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      std::size_t previous_size = storage->size();\n      storage->resize(storage->capacity());\n      next_layer_.async_read_some(\n          buffer(\n            storage->data() + previous_size,\n            storage->size() - previous_size),\n          buffered_fill_handler<typename decay<ReadHandler>::type>(\n            *storage, previous_size, handler2.value));\n    }\n\n  private:\n    Stream& next_layer_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::buffered_fill_handler<ReadHandler>, Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(const detail::buffered_fill_handler<ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::buffered_fill_handler<ReadHandler>, Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(const detail::buffered_fill_handler<ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Stream>\ntemplate <\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nbuffered_read_stream<Stream>::async_fill(\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_buffered_fill<Stream>(next_layer_),\n      handler, &storage_);\n}\n\ntemplate <typename Stream>\ntemplate <typename MutableBufferSequence>\nstd::size_t buffered_read_stream<Stream>::read_some(\n    const MutableBufferSequence& buffers)\n{\n  using asio::buffer_size;\n  if (buffer_size(buffers) == 0)\n    return 0;\n\n  if (storage_.empty())\n    this->fill();\n\n  return this->copy(buffers);\n}\n\ntemplate <typename Stream>\ntemplate <typename MutableBufferSequence>\nstd::size_t buffered_read_stream<Stream>::read_some(\n    const MutableBufferSequence& buffers, asio::error_code& ec)\n{\n  ec = asio::error_code();\n\n  using asio::buffer_size;\n  if (buffer_size(buffers) == 0)\n    return 0;\n\n  if (storage_.empty() && !this->fill(ec))\n    return 0;\n\n  return this->copy(buffers);\n}\n\nnamespace detail\n{\n  template <typename MutableBufferSequence, typename ReadHandler>\n  class buffered_read_some_handler\n  {\n  public:\n    buffered_read_some_handler(detail::buffered_stream_storage& storage,\n        const MutableBufferSequence& buffers, ReadHandler& handler)\n      : storage_(storage),\n        buffers_(buffers),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n      buffered_read_some_handler(const buffered_read_some_handler& other)\n        : storage_(other.storage_),\n          buffers_(other.buffers_),\n          handler_(other.handler_)\n      {\n      }\n\n      buffered_read_some_handler(buffered_read_some_handler&& other)\n        : storage_(other.storage_),\n          buffers_(other.buffers_),\n          handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n      {\n      }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec, std::size_t)\n    {\n      if (ec || storage_.empty())\n      {\n        const std::size_t length = 0;\n        handler_(ec, length);\n      }\n      else\n      {\n        const std::size_t bytes_copied = asio::buffer_copy(\n            buffers_, storage_.data(), storage_.size());\n        storage_.consume(bytes_copied);\n        handler_(ec, bytes_copied);\n      }\n    }\n\n  //private:\n    detail::buffered_stream_storage& storage_;\n    MutableBufferSequence buffers_;\n    ReadHandler handler_;\n  };\n\n  template <typename MutableBufferSequence, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename MutableBufferSequence, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename MutableBufferSequence, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename MutableBufferSequence,\n      typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename MutableBufferSequence,\n      typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Stream>\n  class initiate_async_buffered_read_some\n  {\n  public:\n    typedef typename remove_reference<\n      Stream>::type::lowest_layer_type::executor_type executor_type;\n\n    explicit initiate_async_buffered_read_some(Stream& next_layer)\n      : next_layer_(next_layer)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return next_layer_.lowest_layer().get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        buffered_stream_storage* storage,\n        const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      using asio::buffer_size;\n      non_const_lvalue<ReadHandler> handler2(handler);\n      if (buffer_size(buffers) == 0 || !storage->empty())\n      {\n        next_layer_.async_read_some(ASIO_MUTABLE_BUFFER(0, 0),\n            buffered_read_some_handler<MutableBufferSequence,\n              typename decay<ReadHandler>::type>(\n                *storage, buffers, handler2.value));\n      }\n      else\n      {\n        initiate_async_buffered_fill<Stream>(this->next_layer_)(\n            buffered_read_some_handler<MutableBufferSequence,\n              typename decay<ReadHandler>::type>(\n                *storage, buffers, handler2.value),\n            storage);\n      }\n    }\n\n  private:\n    Stream& next_layer_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename MutableBufferSequence,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::buffered_read_some_handler<MutableBufferSequence, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename MutableBufferSequence,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::buffered_read_some_handler<MutableBufferSequence, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::buffered_read_some_handler<\n        MutableBufferSequence, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Stream>\ntemplate <typename MutableBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nbuffered_read_stream<Stream>::async_read_some(\n    const MutableBufferSequence& buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_buffered_read_some<Stream>(next_layer_),\n      handler, &storage_, buffers);\n}\n\ntemplate <typename Stream>\ntemplate <typename MutableBufferSequence>\nstd::size_t buffered_read_stream<Stream>::peek(\n    const MutableBufferSequence& buffers)\n{\n  if (storage_.empty())\n    this->fill();\n  return this->peek_copy(buffers);\n}\n\ntemplate <typename Stream>\ntemplate <typename MutableBufferSequence>\nstd::size_t buffered_read_stream<Stream>::peek(\n    const MutableBufferSequence& buffers, asio::error_code& ec)\n{\n  ec = asio::error_code();\n  if (storage_.empty() && !this->fill(ec))\n    return 0;\n  return this->peek_copy(buffers);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_BUFFERED_READ_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/buffered_write_stream.hpp",
    "content": "//\n// impl/buffered_write_stream.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP\n#define ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ntemplate <typename Stream>\nstd::size_t buffered_write_stream<Stream>::flush()\n{\n  std::size_t bytes_written = write(next_layer_,\n      buffer(storage_.data(), storage_.size()));\n  storage_.consume(bytes_written);\n  return bytes_written;\n}\n\ntemplate <typename Stream>\nstd::size_t buffered_write_stream<Stream>::flush(asio::error_code& ec)\n{\n  std::size_t bytes_written = write(next_layer_,\n      buffer(storage_.data(), storage_.size()),\n      transfer_all(), ec);\n  storage_.consume(bytes_written);\n  return bytes_written;\n}\n\nnamespace detail\n{\n  template <typename WriteHandler>\n  class buffered_flush_handler\n  {\n  public:\n    buffered_flush_handler(detail::buffered_stream_storage& storage,\n        WriteHandler& handler)\n      : storage_(storage),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    buffered_flush_handler(const buffered_flush_handler& other)\n      : storage_(other.storage_),\n        handler_(other.handler_)\n    {\n    }\n\n    buffered_flush_handler(buffered_flush_handler&& other)\n      : storage_(other.storage_),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        const std::size_t bytes_written)\n    {\n      storage_.consume(bytes_written);\n      handler_(ec, bytes_written);\n    }\n\n  //private:\n    detail::buffered_stream_storage& storage_;\n    WriteHandler handler_;\n  };\n\n  template <typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      buffered_flush_handler<WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      buffered_flush_handler<WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      buffered_flush_handler<WriteHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      buffered_flush_handler<WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      buffered_flush_handler<WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Stream>\n  class initiate_async_buffered_flush\n  {\n  public:\n    typedef typename remove_reference<\n      Stream>::type::lowest_layer_type::executor_type executor_type;\n\n    explicit initiate_async_buffered_flush(Stream& next_layer)\n      : next_layer_(next_layer)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return next_layer_.lowest_layer().get_executor();\n    }\n\n    template <typename WriteHandler>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        buffered_stream_storage* storage) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      async_write(next_layer_, buffer(storage->data(), storage->size()),\n          buffered_flush_handler<typename decay<WriteHandler>::type>(\n            *storage, handler2.value));\n    }\n\n  private:\n    Stream& next_layer_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::buffered_flush_handler<WriteHandler>, Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(const detail::buffered_flush_handler<WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::buffered_flush_handler<WriteHandler>, Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(const detail::buffered_flush_handler<WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Stream>\ntemplate <\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nbuffered_write_stream<Stream>::async_flush(\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_buffered_flush<Stream>(next_layer_),\n      handler, &storage_);\n}\n\ntemplate <typename Stream>\ntemplate <typename ConstBufferSequence>\nstd::size_t buffered_write_stream<Stream>::write_some(\n    const ConstBufferSequence& buffers)\n{\n  using asio::buffer_size;\n  if (buffer_size(buffers) == 0)\n    return 0;\n\n  if (storage_.size() == storage_.capacity())\n    this->flush();\n\n  return this->copy(buffers);\n}\n\ntemplate <typename Stream>\ntemplate <typename ConstBufferSequence>\nstd::size_t buffered_write_stream<Stream>::write_some(\n    const ConstBufferSequence& buffers, asio::error_code& ec)\n{\n  ec = asio::error_code();\n\n  using asio::buffer_size;\n  if (buffer_size(buffers) == 0)\n    return 0;\n\n  if (storage_.size() == storage_.capacity() && !flush(ec))\n    return 0;\n\n  return this->copy(buffers);\n}\n\nnamespace detail\n{\n  template <typename ConstBufferSequence, typename WriteHandler>\n  class buffered_write_some_handler\n  {\n  public:\n    buffered_write_some_handler(detail::buffered_stream_storage& storage,\n        const ConstBufferSequence& buffers, WriteHandler& handler)\n      : storage_(storage),\n        buffers_(buffers),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n      buffered_write_some_handler(const buffered_write_some_handler& other)\n        : storage_(other.storage_),\n          buffers_(other.buffers_),\n          handler_(other.handler_)\n      {\n      }\n\n      buffered_write_some_handler(buffered_write_some_handler&& other)\n        : storage_(other.storage_),\n          buffers_(other.buffers_),\n          handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n      {\n      }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec, std::size_t)\n    {\n      if (ec)\n      {\n        const std::size_t length = 0;\n        handler_(ec, length);\n      }\n      else\n      {\n        using asio::buffer_size;\n        std::size_t orig_size = storage_.size();\n        std::size_t space_avail = storage_.capacity() - orig_size;\n        std::size_t bytes_avail = buffer_size(buffers_);\n        std::size_t length = bytes_avail < space_avail\n          ? bytes_avail : space_avail;\n        storage_.resize(orig_size + length);\n        const std::size_t bytes_copied = asio::buffer_copy(\n            storage_.data() + orig_size, buffers_, length);\n        handler_(ec, bytes_copied);\n      }\n    }\n\n  //private:\n    detail::buffered_stream_storage& storage_;\n    ConstBufferSequence buffers_;\n    WriteHandler handler_;\n  };\n\n  template <typename ConstBufferSequence, typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename ConstBufferSequence, typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename ConstBufferSequence, typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename ConstBufferSequence,\n      typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename ConstBufferSequence,\n      typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Stream>\n  class initiate_async_buffered_write_some\n  {\n  public:\n    typedef typename remove_reference<\n      Stream>::type::lowest_layer_type::executor_type executor_type;\n\n    explicit initiate_async_buffered_write_some(Stream& next_layer)\n      : next_layer_(next_layer)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return next_layer_.lowest_layer().get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        buffered_stream_storage* storage,\n        const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      using asio::buffer_size;\n      non_const_lvalue<WriteHandler> handler2(handler);\n      if (buffer_size(buffers) == 0 || storage->size() < storage->capacity())\n      {\n        next_layer_.async_write_some(ASIO_CONST_BUFFER(0, 0),\n            buffered_write_some_handler<ConstBufferSequence,\n              typename decay<WriteHandler>::type>(\n                *storage, buffers, handler2.value));\n      }\n      else\n      {\n        initiate_async_buffered_flush<Stream>(this->next_layer_)(\n            buffered_write_some_handler<ConstBufferSequence,\n              typename decay<WriteHandler>::type>(\n                *storage, buffers, handler2.value),\n            storage);\n      }\n    }\n\n  private:\n    Stream& next_layer_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename ConstBufferSequence,\n    typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::buffered_write_some_handler<ConstBufferSequence, WriteHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(\n      const detail::buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename ConstBufferSequence,\n    typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::buffered_write_some_handler<ConstBufferSequence, WriteHandler>,\n    Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(\n      const detail::buffered_write_some_handler<\n        ConstBufferSequence, WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Stream>\ntemplate <typename ConstBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nbuffered_write_stream<Stream>::async_write_some(\n    const ConstBufferSequence& buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_buffered_write_some<Stream>(next_layer_),\n      handler, &storage_, buffers);\n}\n\ntemplate <typename Stream>\ntemplate <typename ConstBufferSequence>\nstd::size_t buffered_write_stream<Stream>::copy(\n    const ConstBufferSequence& buffers)\n{\n  using asio::buffer_size;\n  std::size_t orig_size = storage_.size();\n  std::size_t space_avail = storage_.capacity() - orig_size;\n  std::size_t bytes_avail = buffer_size(buffers);\n  std::size_t length = bytes_avail < space_avail ? bytes_avail : space_avail;\n  storage_.resize(orig_size + length);\n  return asio::buffer_copy(\n      storage_.data() + orig_size, buffers, length);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/co_spawn.hpp",
    "content": "//\n// impl/co_spawn.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_CO_SPAWN_HPP\n#define ASIO_IMPL_CO_SPAWN_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/awaitable.hpp\"\n#include \"asio/dispatch.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/use_awaitable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename T, typename Executor, typename F, typename Handler>\nawaitable<void, Executor> co_spawn_entry_point(\n    awaitable<T, Executor>*, Executor ex, F f, Handler handler)\n{\n  auto spawn_work = make_work_guard(ex);\n  auto handler_work = make_work_guard(handler, ex);\n\n  (void) co_await (post)(spawn_work.get_executor(),\n      use_awaitable_t<Executor>{});\n\n  bool done = false;\n  try\n  {\n    T t = co_await f();\n\n    done = true;\n\n    (dispatch)(handler_work.get_executor(),\n        [handler = std::move(handler), t = std::move(t)]() mutable\n        {\n          handler(std::exception_ptr(), std::move(t));\n        });\n  }\n  catch (...)\n  {\n    if (done)\n      throw;\n\n    (dispatch)(handler_work.get_executor(),\n        [handler = std::move(handler), e = std::current_exception()]() mutable\n        {\n          handler(e, T());\n        });\n  }\n}\n\ntemplate <typename Executor, typename F, typename Handler>\nawaitable<void, Executor> co_spawn_entry_point(\n    awaitable<void, Executor>*, Executor ex, F f, Handler handler)\n{\n  auto spawn_work = make_work_guard(ex);\n  auto handler_work = make_work_guard(handler, ex);\n\n  (void) co_await (post)(spawn_work.get_executor(),\n      use_awaitable_t<Executor>{});\n\n  std::exception_ptr e = nullptr;\n  try\n  {\n    co_await f();\n  }\n  catch (...)\n  {\n    e = std::current_exception();\n  }\n\n  (dispatch)(handler_work.get_executor(),\n      [handler = std::move(handler), e]() mutable\n      {\n        handler(e);\n      });\n}\n\ntemplate <typename Executor>\nclass initiate_co_spawn\n{\npublic:\n  typedef Executor executor_type;\n\n  template <typename OtherExecutor>\n  explicit initiate_co_spawn(const OtherExecutor& ex)\n    : ex_(ex)\n  {\n  }\n\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return ex_;\n  }\n\n  template <typename Handler, typename F>\n  void operator()(Handler&& handler, F&& f) const\n  {\n    typedef typename result_of<F()>::type awaitable_type;\n\n    auto a = (co_spawn_entry_point)(static_cast<awaitable_type*>(nullptr),\n        ex_, std::forward<F>(f), std::forward<Handler>(handler));\n    awaitable_handler<executor_type, void>(std::move(a), ex_).launch();\n  }\n\nprivate:\n  Executor ex_;\n};\n\n} // namespace detail\n\ntemplate <typename Executor, typename F,\n    ASIO_COMPLETION_TOKEN_FOR(typename detail::awaitable_signature<\n      typename result_of<F()>::type>::type) CompletionToken>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken,\n    typename detail::awaitable_signature<typename result_of<F()>::type>::type)\nco_spawn(const Executor& ex, F&& f, CompletionToken&& token,\n    typename enable_if<\n      is_executor<Executor>::value\n    >::type*)\n{\n  return async_initiate<CompletionToken,\n    typename detail::awaitable_signature<typename result_of<F()>::type>>(\n      detail::initiate_co_spawn<\n        typename result_of<F()>::type::executor_type>(ex),\n      token, std::forward<F>(f));\n}\n\ntemplate <typename ExecutionContext, typename F,\n    ASIO_COMPLETION_TOKEN_FOR(typename detail::awaitable_signature<\n      typename result_of<F()>::type>::type) CompletionToken>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken,\n    typename detail::awaitable_signature<typename result_of<F()>::type>::type)\nco_spawn(ExecutionContext& ctx, F&& f, CompletionToken&& token,\n    typename enable_if<\n      is_convertible<ExecutionContext&, execution_context&>::value\n    >::type*)\n{\n  return (co_spawn)(ctx.get_executor(), std::forward<F>(f),\n      std::forward<CompletionToken>(token));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_CO_SPAWN_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/compose.hpp",
    "content": "//\n// impl/compose.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_COMPOSE_HPP\n#define ASIO_IMPL_COMPOSE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n#include \"asio/executor_work_guard.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/system_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <typename>\n  struct composed_io_executors;\n\n  template <>\n  struct composed_io_executors<void()>\n  {\n    composed_io_executors() ASIO_NOEXCEPT\n      : head_(system_executor())\n    {\n    }\n\n    typedef system_executor head_type;\n    system_executor head_;\n  };\n\n  inline composed_io_executors<void()> make_composed_io_executors()\n  {\n    return composed_io_executors<void()>();\n  }\n\n  template <typename Head>\n  struct composed_io_executors<void(Head)>\n  {\n    explicit composed_io_executors(const Head& ex) ASIO_NOEXCEPT\n      : head_(ex)\n    {\n    }\n\n    typedef Head head_type;\n    Head head_;\n  };\n\n  template <typename Head>\n  inline composed_io_executors<void(Head)>\n  make_composed_io_executors(const Head& head)\n  {\n    return composed_io_executors<void(Head)>(head);\n  }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Head, typename... Tail>\n  struct composed_io_executors<void(Head, Tail...)>\n  {\n    explicit composed_io_executors(const Head& head,\n        const Tail&... tail) ASIO_NOEXCEPT\n      : head_(head),\n        tail_(tail...)\n    {\n    }\n\n    void reset()\n    {\n      head_.reset();\n      tail_.reset();\n    }\n\n    typedef Head head_type;\n    Head head_;\n    composed_io_executors<void(Tail...)> tail_;\n  };\n\n  template <typename Head, typename... Tail>\n  inline composed_io_executors<void(Head, Tail...)>\n  make_composed_io_executors(const Head& head, const Tail&... tail)\n  {\n    return composed_io_executors<void(Head, Tail...)>(head, tail...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF(n) \\\n  template <typename Head, ASIO_VARIADIC_TPARAMS(n)> \\\n  struct composed_io_executors<void(Head, ASIO_VARIADIC_TARGS(n))> \\\n  { \\\n    explicit composed_io_executors(const Head& head, \\\n        ASIO_VARIADIC_CONSTREF_PARAMS(n)) ASIO_NOEXCEPT \\\n      : head_(head), \\\n        tail_(ASIO_VARIADIC_BYVAL_ARGS(n)) \\\n    { \\\n    } \\\n  \\\n    void reset() \\\n    { \\\n      head_.reset(); \\\n      tail_.reset(); \\\n    } \\\n  \\\n    typedef Head head_type; \\\n    Head head_; \\\n    composed_io_executors<void(ASIO_VARIADIC_TARGS(n))> tail_; \\\n  }; \\\n  \\\n  template <typename Head, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline composed_io_executors<void(Head, ASIO_VARIADIC_TARGS(n))> \\\n  make_composed_io_executors(const Head& head, \\\n      ASIO_VARIADIC_CONSTREF_PARAMS(n)) \\\n  { \\\n    return composed_io_executors< \\\n      void(Head, ASIO_VARIADIC_TARGS(n))>( \\\n        head, ASIO_VARIADIC_BYVAL_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF)\n#undef ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename>\n  struct composed_work;\n\n  template <>\n  struct composed_work<void()>\n  {\n    typedef composed_io_executors<void()> executors_type;\n\n    composed_work(const executors_type&) ASIO_NOEXCEPT\n      : head_(system_executor())\n    {\n    }\n\n    void reset()\n    {\n      head_.reset();\n    }\n\n    typedef system_executor head_type;\n    executor_work_guard<system_executor> head_;\n  };\n\n  template <typename Head>\n  struct composed_work<void(Head)>\n  {\n    typedef composed_io_executors<void(Head)> executors_type;\n\n    explicit composed_work(const executors_type& ex) ASIO_NOEXCEPT\n      : head_(ex.head_)\n    {\n    }\n\n    void reset()\n    {\n      head_.reset();\n    }\n\n    typedef Head head_type;\n    executor_work_guard<Head> head_;\n  };\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Head, typename... Tail>\n  struct composed_work<void(Head, Tail...)>\n  {\n    typedef composed_io_executors<void(Head, Tail...)> executors_type;\n\n    explicit composed_work(const executors_type& ex) ASIO_NOEXCEPT\n      : head_(ex.head_),\n        tail_(ex.tail_)\n    {\n    }\n\n    void reset()\n    {\n      head_.reset();\n      tail_.reset();\n    }\n\n    typedef Head head_type;\n    executor_work_guard<Head> head_;\n    composed_work<void(Tail...)> tail_;\n  };\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_COMPOSED_WORK_DEF(n) \\\n  template <typename Head, ASIO_VARIADIC_TPARAMS(n)> \\\n  struct composed_work<void(Head, ASIO_VARIADIC_TARGS(n))> \\\n  { \\\n    typedef composed_io_executors<void(Head, \\\n      ASIO_VARIADIC_TARGS(n))> executors_type; \\\n  \\\n    explicit composed_work(const executors_type& ex) ASIO_NOEXCEPT \\\n      : head_(ex.head_), \\\n        tail_(ex.tail_) \\\n    { \\\n    } \\\n  \\\n    void reset() \\\n    { \\\n      head_.reset(); \\\n      tail_.reset(); \\\n    } \\\n  \\\n    typedef Head head_type; \\\n    executor_work_guard<Head> head_; \\\n    composed_work<void(ASIO_VARIADIC_TARGS(n))> tail_; \\\n  }; \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_COMPOSED_WORK_DEF)\n#undef ASIO_PRIVATE_COMPOSED_WORK_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  template <typename Impl, typename Work, typename Handler, typename Signature>\n  class composed_op;\n\n  template <typename Impl, typename Work, typename Handler,\n      typename R, typename... Args>\n  class composed_op<Impl, Work, Handler, R(Args...)>\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  template <typename Impl, typename Work, typename Handler, typename Signature>\n  class composed_op\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  {\n  public:\n    composed_op(ASIO_MOVE_ARG(Impl) impl,\n        ASIO_MOVE_ARG(Work) work,\n        ASIO_MOVE_ARG(Handler) handler)\n      : impl_(ASIO_MOVE_CAST(Impl)(impl)),\n        work_(ASIO_MOVE_CAST(Work)(work)),\n        handler_(ASIO_MOVE_CAST(Handler)(handler)),\n        invocations_(0)\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    composed_op(composed_op&& other)\n      : impl_(ASIO_MOVE_CAST(Impl)(other.impl_)),\n        work_(ASIO_MOVE_CAST(Work)(other.work_)),\n        handler_(ASIO_MOVE_CAST(Handler)(other.handler_)),\n        invocations_(other.invocations_)\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    typedef typename associated_executor<Handler,\n        typename Work::head_type>::type executor_type;\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return (get_associated_executor)(handler_, work_.head_.get_executor());\n    }\n\n    typedef typename associated_allocator<Handler,\n      std::allocator<void> >::type allocator_type;\n\n    allocator_type get_allocator() const ASIO_NOEXCEPT\n    {\n      return (get_associated_allocator)(handler_, std::allocator<void>());\n    }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    template<typename... T>\n    void operator()(ASIO_MOVE_ARG(T)... t)\n    {\n      if (invocations_ < ~unsigned(0))\n        ++invocations_;\n      impl_(*this, ASIO_MOVE_CAST(T)(t)...);\n    }\n\n    void complete(Args... args)\n    {\n      this->work_.reset();\n      this->handler_(ASIO_MOVE_CAST(Args)(args)...);\n    }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    void operator()()\n    {\n      if (invocations_ < ~unsigned(0))\n        ++invocations_;\n      impl_(*this);\n    }\n\n    void complete()\n    {\n      this->work_.reset();\n      this->handler_();\n    }\n\n#define ASIO_PRIVATE_COMPOSED_OP_DEF(n) \\\n    template<ASIO_VARIADIC_TPARAMS(n)> \\\n    void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n    { \\\n      if (invocations_ < ~unsigned(0)) \\\n        ++invocations_; \\\n      impl_(*this, ASIO_VARIADIC_MOVE_ARGS(n)); \\\n    } \\\n    \\\n    template<ASIO_VARIADIC_TPARAMS(n)> \\\n    void complete(ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n    { \\\n      this->work_.reset(); \\\n      this->handler_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n    } \\\n    /**/\n    ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_COMPOSED_OP_DEF)\n#undef ASIO_PRIVATE_COMPOSED_OP_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  //private:\n    Impl impl_;\n    Work work_;\n    Handler handler_;\n    unsigned invocations_;\n  };\n\n  template <typename Impl, typename Work, typename Handler, typename Signature>\n  inline void* asio_handler_allocate(std::size_t size,\n      composed_op<Impl, Work, Handler, Signature>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename Impl, typename Work, typename Handler, typename Signature>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      composed_op<Impl, Work, Handler, Signature>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename Impl, typename Work, typename Handler, typename Signature>\n  inline bool asio_handler_is_continuation(\n      composed_op<Impl, Work, Handler, Signature>* this_handler)\n  {\n    return this_handler->invocations_ > 1 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename Impl,\n      typename Work, typename Handler, typename Signature>\n  inline void asio_handler_invoke(Function& function,\n      composed_op<Impl, Work, Handler, Signature>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename Impl,\n      typename Work, typename Handler, typename Signature>\n  inline void asio_handler_invoke(const Function& function,\n      composed_op<Impl, Work, Handler, Signature>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Signature, typename Executors>\n  class initiate_composed_op\n  {\n  public:\n    typedef typename composed_io_executors<Executors>::head_type executor_type;\n\n    template <typename T>\n    explicit initiate_composed_op(ASIO_MOVE_ARG(T) executors)\n      : executors_(ASIO_MOVE_CAST(T)(executors))\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return executors_.head_;\n    }\n\n    template <typename Handler, typename Impl>\n    void operator()(ASIO_MOVE_ARG(Handler) handler,\n        ASIO_MOVE_ARG(Impl) impl) const\n    {\n      composed_op<typename decay<Impl>::type, composed_work<Executors>,\n        typename decay<Handler>::type, Signature>(\n          ASIO_MOVE_CAST(Impl)(impl),\n          composed_work<Executors>(executors_),\n          ASIO_MOVE_CAST(Handler)(handler))();\n    }\n\n  private:\n    composed_io_executors<Executors> executors_;\n  };\n\n  template <typename Signature, typename Executors>\n  inline initiate_composed_op<Signature, Executors> make_initiate_composed_op(\n      ASIO_MOVE_ARG(composed_io_executors<Executors>) executors)\n  {\n    return initiate_composed_op<Signature, Executors>(\n        ASIO_MOVE_CAST(composed_io_executors<Executors>)(executors));\n  }\n\n  template <typename IoObject>\n  inline typename IoObject::executor_type\n  get_composed_io_executor(IoObject& io_object)\n  {\n    return io_object.get_executor();\n  }\n\n  template <typename Executor>\n  inline const Executor& get_composed_io_executor(const Executor& ex,\n      typename enable_if<is_executor<Executor>::value>::type* = 0)\n  {\n    return ex;\n  }\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename CompletionToken, typename Signature,\n    typename Implementation, typename... IoObjectsOrExecutors>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)\nasync_compose(ASIO_MOVE_ARG(Implementation) implementation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token,\n    ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors)\n{\n  return async_initiate<CompletionToken, Signature>(\n      detail::make_initiate_composed_op<Signature>(\n        detail::make_composed_io_executors(\n          detail::get_composed_io_executor(\n            ASIO_MOVE_CAST(IoObjectsOrExecutors)(\n              io_objects_or_executors))...)),\n      token, ASIO_MOVE_CAST(Implementation)(implementation));\n}\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename CompletionToken, typename Signature, typename Implementation>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)\nasync_compose(ASIO_MOVE_ARG(Implementation) implementation,\n    ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token)\n{\n  return async_initiate<CompletionToken, Signature>(\n      detail::make_initiate_composed_op<Signature>(\n        detail::make_composed_io_executors()),\n      token, ASIO_MOVE_CAST(Implementation)(implementation));\n}\n\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n) \\\n  ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_##n\n\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1))\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2))\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3))\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T4)(x4))\n# define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T4)(x4)), \\\n  detail::get_composed_io_executor(ASIO_MOVE_CAST(T5)(x5))\n\n#define ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \\\n  template <typename CompletionToken, typename Signature, \\\n      typename Implementation, ASIO_VARIADIC_TPARAMS(n)> \\\n  ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) \\\n  async_compose(ASIO_MOVE_ARG(Implementation) implementation, \\\n      ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    return async_initiate<CompletionToken, Signature>( \\\n        detail::make_initiate_composed_op<Signature>( \\\n          detail::make_composed_io_executors( \\\n            ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n))), \\\n        token, ASIO_MOVE_CAST(Implementation)(implementation)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_COMPOSE_DEF)\n#undef ASIO_PRIVATE_ASYNC_COMPOSE_DEF\n\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4\n#undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_COMPOSE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/connect.hpp",
    "content": "//\n// impl/connect.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_CONNECT_HPP\n#define ASIO_IMPL_CONNECT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <algorithm>\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/post.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  struct default_connect_condition\n  {\n    template <typename Endpoint>\n    bool operator()(const asio::error_code&, const Endpoint&)\n    {\n      return true;\n    }\n  };\n\n  template <typename Protocol, typename Iterator>\n  inline typename Protocol::endpoint deref_connect_result(\n      Iterator iter, asio::error_code& ec)\n  {\n    return ec ? typename Protocol::endpoint() : *iter;\n  }\n\n  template <typename T, typename Iterator>\n  struct legacy_connect_condition_helper : T\n  {\n    typedef char (*fallback_func_type)(...);\n    operator fallback_func_type() const;\n  };\n\n  template <typename R, typename Arg1, typename Arg2, typename Iterator>\n  struct legacy_connect_condition_helper<R (*)(Arg1, Arg2), Iterator>\n  {\n    R operator()(Arg1, Arg2) const;\n    char operator()(...) const;\n  };\n\n  template <typename T, typename Iterator>\n  struct is_legacy_connect_condition\n  {\n    static char asio_connect_condition_check(char);\n    static char (&asio_connect_condition_check(Iterator))[2];\n\n    static const bool value =\n      sizeof(asio_connect_condition_check(\n        (*static_cast<legacy_connect_condition_helper<T, Iterator>*>(0))(\n          *static_cast<const asio::error_code*>(0),\n          *static_cast<const Iterator*>(0)))) != 1;\n  };\n\n  template <typename ConnectCondition, typename Iterator>\n  inline Iterator call_connect_condition(ConnectCondition& connect_condition,\n      const asio::error_code& ec, Iterator next, Iterator end,\n      typename enable_if<is_legacy_connect_condition<\n        ConnectCondition, Iterator>::value>::type* = 0)\n  {\n    if (next != end)\n      return connect_condition(ec, next);\n    return end;\n  }\n\n  template <typename ConnectCondition, typename Iterator>\n  inline Iterator call_connect_condition(ConnectCondition& connect_condition,\n      const asio::error_code& ec, Iterator next, Iterator end,\n      typename enable_if<!is_legacy_connect_condition<\n        ConnectCondition, Iterator>::value>::type* = 0)\n  {\n    for (;next != end; ++next)\n      if (connect_condition(ec, *next))\n        return next;\n    return end;\n  }\n}\n\ntemplate <typename Protocol, typename Executor, typename EndpointSequence>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  asio::error_code ec;\n  typename Protocol::endpoint result = connect(s, endpoints, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor, typename EndpointSequence>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, asio::error_code& ec,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  return detail::deref_connect_result<Protocol>(\n      connect(s, endpoints.begin(), endpoints.end(),\n        detail::default_connect_condition(), ec), ec);\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  asio::error_code ec;\n  Iterator result = connect(s, begin, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor, typename Iterator>\ninline Iterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, asio::error_code& ec,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  return connect(s, begin, Iterator(), detail::default_connect_condition(), ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Protocol, typename Executor, typename Iterator>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, Iterator end)\n{\n  asio::error_code ec;\n  Iterator result = connect(s, begin, end, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor, typename Iterator>\ninline Iterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, Iterator end, asio::error_code& ec)\n{\n  return connect(s, begin, end, detail::default_connect_condition(), ec);\n}\n\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  asio::error_code ec;\n  typename Protocol::endpoint result = connect(\n      s, endpoints, connect_condition, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition>\ntypename Protocol::endpoint connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    asio::error_code& ec,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  return detail::deref_connect_result<Protocol>(\n      connect(s, endpoints.begin(), endpoints.end(),\n        connect_condition, ec), ec);\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, ConnectCondition connect_condition,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  asio::error_code ec;\n  Iterator result = connect(s, begin, connect_condition, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\ninline Iterator connect(basic_socket<Protocol, Executor>& s,\n    Iterator begin, ConnectCondition connect_condition,\n    asio::error_code& ec,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  return connect(s, begin, Iterator(), connect_condition, ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    Iterator end, ConnectCondition connect_condition)\n{\n  asio::error_code ec;\n  Iterator result = connect(s, begin, end, connect_condition, ec);\n  asio::detail::throw_error(ec, \"connect\");\n  return result;\n}\n\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition>\nIterator connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    Iterator end, ConnectCondition connect_condition,\n    asio::error_code& ec)\n{\n  ec = asio::error_code();\n\n  for (Iterator iter = begin; iter != end; ++iter)\n  {\n    iter = (detail::call_connect_condition(connect_condition, ec, iter, end));\n    if (iter != end)\n    {\n      s.close(ec);\n      s.connect(*iter, ec);\n      if (!ec)\n        return iter;\n    }\n    else\n      break;\n  }\n\n  if (!ec)\n    ec = asio::error::not_found;\n\n  return end;\n}\n\nnamespace detail\n{\n  // Enable the empty base class optimisation for the connect condition.\n  template <typename ConnectCondition>\n  class base_from_connect_condition\n  {\n  protected:\n    explicit base_from_connect_condition(\n        const ConnectCondition& connect_condition)\n      : connect_condition_(connect_condition)\n    {\n    }\n\n    template <typename Iterator>\n    void check_condition(const asio::error_code& ec,\n        Iterator& iter, Iterator& end)\n    {\n      iter = detail::call_connect_condition(connect_condition_, ec, iter, end);\n    }\n\n  private:\n    ConnectCondition connect_condition_;\n  };\n\n  // The default_connect_condition implementation is essentially a no-op. This\n  // template specialisation lets us eliminate all costs associated with it.\n  template <>\n  class base_from_connect_condition<default_connect_condition>\n  {\n  protected:\n    explicit base_from_connect_condition(const default_connect_condition&)\n    {\n    }\n\n    template <typename Iterator>\n    void check_condition(const asio::error_code&, Iterator&, Iterator&)\n    {\n    }\n  };\n\n  template <typename Protocol, typename Executor, typename EndpointSequence,\n      typename ConnectCondition, typename RangeConnectHandler>\n  class range_connect_op : base_from_connect_condition<ConnectCondition>\n  {\n  public:\n    range_connect_op(basic_socket<Protocol, Executor>& sock,\n        const EndpointSequence& endpoints,\n        const ConnectCondition& connect_condition,\n        RangeConnectHandler& handler)\n      : base_from_connect_condition<ConnectCondition>(connect_condition),\n        socket_(sock),\n        endpoints_(endpoints),\n        index_(0),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(RangeConnectHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    range_connect_op(const range_connect_op& other)\n      : base_from_connect_condition<ConnectCondition>(other),\n        socket_(other.socket_),\n        endpoints_(other.endpoints_),\n        index_(other.index_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    range_connect_op(range_connect_op&& other)\n      : base_from_connect_condition<ConnectCondition>(other),\n        socket_(other.socket_),\n        endpoints_(other.endpoints_),\n        index_(other.index_),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(RangeConnectHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(asio::error_code ec, int start = 0)\n    {\n      this->process(ec, start,\n          const_cast<const EndpointSequence&>(endpoints_).begin(),\n          const_cast<const EndpointSequence&>(endpoints_).end());\n    }\n\n  //private:\n    template <typename Iterator>\n    void process(asio::error_code ec,\n        int start, Iterator begin, Iterator end)\n    {\n      Iterator iter = begin;\n      std::advance(iter, index_);\n\n      switch (start_ = start)\n      {\n        case 1:\n        for (;;)\n        {\n          this->check_condition(ec, iter, end);\n          index_ = std::distance(begin, iter);\n\n          if (iter != end)\n          {\n            socket_.close(ec);\n            socket_.async_connect(*iter,\n                ASIO_MOVE_CAST(range_connect_op)(*this));\n            return;\n          }\n\n          if (start)\n          {\n            ec = asio::error::not_found;\n            asio::post(socket_.get_executor(),\n                detail::bind_handler(\n                  ASIO_MOVE_CAST(range_connect_op)(*this), ec));\n            return;\n          }\n\n          /* fall-through */ default:\n\n          if (iter == end)\n            break;\n\n          if (!socket_.is_open())\n          {\n            ec = asio::error::operation_aborted;\n            break;\n          }\n\n          if (!ec)\n            break;\n\n          ++iter;\n          ++index_;\n        }\n\n        handler_(static_cast<const asio::error_code&>(ec),\n            static_cast<const typename Protocol::endpoint&>(\n              ec || iter == end ? typename Protocol::endpoint() : *iter));\n      }\n    }\n\n    basic_socket<Protocol, Executor>& socket_;\n    EndpointSequence endpoints_;\n    std::size_t index_;\n    int start_;\n    RangeConnectHandler handler_;\n  };\n\n  template <typename Protocol, typename Executor, typename EndpointSequence,\n      typename ConnectCondition, typename RangeConnectHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor, typename EndpointSequence,\n      typename ConnectCondition, typename RangeConnectHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor, typename EndpointSequence,\n      typename ConnectCondition, typename RangeConnectHandler>\n  inline bool asio_handler_is_continuation(\n      range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n  }\n\n  template <typename Function, typename Executor, typename Protocol,\n      typename EndpointSequence, typename ConnectCondition,\n      typename RangeConnectHandler>\n  inline void asio_handler_invoke(Function& function,\n      range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename Executor, typename Protocol,\n      typename EndpointSequence, typename ConnectCondition,\n      typename RangeConnectHandler>\n  inline void asio_handler_invoke(const Function& function,\n      range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor>\n  class initiate_async_range_connect\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_range_connect(basic_socket<Protocol, Executor>& s)\n      : socket_(s)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return socket_.get_executor();\n    }\n\n    template <typename RangeConnectHandler,\n        typename EndpointSequence, typename ConnectCondition>\n    void operator()(ASIO_MOVE_ARG(RangeConnectHandler) handler,\n        const EndpointSequence& endpoints,\n        const ConnectCondition& connect_condition) const\n    {\n      // If you get an error on the following line it means that your\n      // handler does not meet the documented type requirements for an\n      // RangeConnectHandler.\n      ASIO_RANGE_CONNECT_HANDLER_CHECK(RangeConnectHandler,\n          handler, typename Protocol::endpoint) type_check;\n\n      non_const_lvalue<RangeConnectHandler> handler2(handler);\n      range_connect_op<Protocol, Executor, EndpointSequence, ConnectCondition,\n        typename decay<RangeConnectHandler>::type>(socket_, endpoints,\n          connect_condition, handler2.value)(asio::error_code(), 1);\n    }\n\n  private:\n    basic_socket<Protocol, Executor>& socket_;\n  };\n\n  template <typename Protocol, typename Executor, typename Iterator,\n      typename ConnectCondition, typename IteratorConnectHandler>\n  class iterator_connect_op : base_from_connect_condition<ConnectCondition>\n  {\n  public:\n    iterator_connect_op(basic_socket<Protocol, Executor>& sock,\n        const Iterator& begin, const Iterator& end,\n        const ConnectCondition& connect_condition,\n        IteratorConnectHandler& handler)\n      : base_from_connect_condition<ConnectCondition>(connect_condition),\n        socket_(sock),\n        iter_(begin),\n        end_(end),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(IteratorConnectHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    iterator_connect_op(const iterator_connect_op& other)\n      : base_from_connect_condition<ConnectCondition>(other),\n        socket_(other.socket_),\n        iter_(other.iter_),\n        end_(other.end_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    iterator_connect_op(iterator_connect_op&& other)\n      : base_from_connect_condition<ConnectCondition>(other),\n        socket_(other.socket_),\n        iter_(other.iter_),\n        end_(other.end_),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(IteratorConnectHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(asio::error_code ec, int start = 0)\n    {\n      switch (start_ = start)\n      {\n        case 1:\n        for (;;)\n        {\n          this->check_condition(ec, iter_, end_);\n\n          if (iter_ != end_)\n          {\n            socket_.close(ec);\n            socket_.async_connect(*iter_,\n                ASIO_MOVE_CAST(iterator_connect_op)(*this));\n            return;\n          }\n\n          if (start)\n          {\n            ec = asio::error::not_found;\n            asio::post(socket_.get_executor(),\n                detail::bind_handler(\n                  ASIO_MOVE_CAST(iterator_connect_op)(*this), ec));\n            return;\n          }\n\n          /* fall-through */ default:\n\n          if (iter_ == end_)\n            break;\n\n          if (!socket_.is_open())\n          {\n            ec = asio::error::operation_aborted;\n            break;\n          }\n\n          if (!ec)\n            break;\n\n          ++iter_;\n        }\n\n        handler_(static_cast<const asio::error_code&>(ec),\n            static_cast<const Iterator&>(iter_));\n      }\n    }\n\n  //private:\n    basic_socket<Protocol, Executor>& socket_;\n    Iterator iter_;\n    Iterator end_;\n    int start_;\n    IteratorConnectHandler handler_;\n  };\n\n  template <typename Protocol, typename Executor, typename Iterator,\n      typename ConnectCondition, typename IteratorConnectHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      iterator_connect_op<Protocol, Executor, Iterator,\n        ConnectCondition, IteratorConnectHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor, typename Iterator,\n      typename ConnectCondition, typename IteratorConnectHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      iterator_connect_op<Protocol, Executor, Iterator,\n        ConnectCondition, IteratorConnectHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor, typename Iterator,\n      typename ConnectCondition, typename IteratorConnectHandler>\n  inline bool asio_handler_is_continuation(\n      iterator_connect_op<Protocol, Executor, Iterator,\n        ConnectCondition, IteratorConnectHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n  }\n\n  template <typename Function, typename Executor, typename Protocol,\n      typename Iterator, typename ConnectCondition,\n      typename IteratorConnectHandler>\n  inline void asio_handler_invoke(Function& function,\n      iterator_connect_op<Protocol, Executor, Iterator,\n        ConnectCondition, IteratorConnectHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename Executor, typename Protocol,\n      typename Iterator, typename ConnectCondition,\n      typename IteratorConnectHandler>\n  inline void asio_handler_invoke(const Function& function,\n      iterator_connect_op<Protocol, Executor, Iterator,\n        ConnectCondition, IteratorConnectHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Protocol, typename Executor>\n  class initiate_async_iterator_connect\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_iterator_connect(\n        basic_socket<Protocol, Executor>& s)\n      : socket_(s)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return socket_.get_executor();\n    }\n\n    template <typename IteratorConnectHandler,\n        typename Iterator, typename ConnectCondition>\n    void operator()(ASIO_MOVE_ARG(IteratorConnectHandler) handler,\n        Iterator begin, Iterator end,\n        const ConnectCondition& connect_condition) const\n    {\n      // If you get an error on the following line it means that your\n      // handler does not meet the documented type requirements for an\n      // IteratorConnectHandler.\n      ASIO_ITERATOR_CONNECT_HANDLER_CHECK(\n          IteratorConnectHandler, handler, Iterator) type_check;\n\n      non_const_lvalue<IteratorConnectHandler> handler2(handler);\n      iterator_connect_op<Protocol, Executor, Iterator, ConnectCondition,\n        typename decay<IteratorConnectHandler>::type>(socket_, begin, end,\n          connect_condition, handler2.value)(asio::error_code(), 1);\n    }\n\n  private:\n    basic_socket<Protocol, Executor>& socket_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Protocol, typename Executor, typename EndpointSequence,\n    typename ConnectCondition, typename RangeConnectHandler, typename Allocator>\nstruct associated_allocator<\n    detail::range_connect_op<Protocol, Executor, EndpointSequence,\n      ConnectCondition, RangeConnectHandler>, Allocator>\n{\n  typedef typename associated_allocator<\n      RangeConnectHandler, Allocator>::type type;\n\n  static type get(\n      const detail::range_connect_op<Protocol, Executor, EndpointSequence,\n        ConnectCondition, RangeConnectHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<RangeConnectHandler,\n        Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Protocol, typename Executor, typename EndpointSequence,\n    typename ConnectCondition, typename RangeConnectHandler, typename Executor1>\nstruct associated_executor<\n    detail::range_connect_op<Protocol, Executor, EndpointSequence,\n      ConnectCondition, RangeConnectHandler>, Executor1>\n{\n  typedef typename associated_executor<\n      RangeConnectHandler, Executor1>::type type;\n\n  static type get(\n      const detail::range_connect_op<Protocol, Executor, EndpointSequence,\n      ConnectCondition, RangeConnectHandler>& h,\n      const Executor1& ex = Executor1()) ASIO_NOEXCEPT\n  {\n    return associated_executor<RangeConnectHandler,\n        Executor1>::get(h.handler_, ex);\n  }\n};\n\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    typename ConnectCondition, typename IteratorConnectHandler,\n    typename Allocator>\nstruct associated_allocator<\n    detail::iterator_connect_op<Protocol, Executor,\n      Iterator, ConnectCondition, IteratorConnectHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<\n      IteratorConnectHandler, Allocator>::type type;\n\n  static type get(\n      const detail::iterator_connect_op<Protocol, Executor,\n        Iterator, ConnectCondition, IteratorConnectHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<IteratorConnectHandler,\n        Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    typename ConnectCondition, typename IteratorConnectHandler,\n    typename Executor1>\nstruct associated_executor<\n    detail::iterator_connect_op<Protocol, Executor,\n      Iterator, ConnectCondition, IteratorConnectHandler>,\n    Executor1>\n{\n  typedef typename associated_executor<\n      IteratorConnectHandler, Executor1>::type type;\n\n  static type get(\n      const detail::iterator_connect_op<Protocol, Executor,\n        Iterator, ConnectCondition, IteratorConnectHandler>& h,\n      const Executor1& ex = Executor1()) ASIO_NOEXCEPT\n  {\n    return associated_executor<IteratorConnectHandler,\n        Executor1>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Protocol, typename Executor, typename EndpointSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      typename Protocol::endpoint)) RangeConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint))\nasync_connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints,\n    ASIO_MOVE_ARG(RangeConnectHandler) handler,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  return async_initiate<RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint)>(\n      detail::initiate_async_range_connect<Protocol, Executor>(s),\n      handler, endpoints, detail::default_connect_condition());\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  return async_initiate<IteratorConnectHandler,\n    void (asio::error_code, Iterator)>(\n      detail::initiate_async_iterator_connect<Protocol, Executor>(s),\n      handler, begin, Iterator(), detail::default_connect_condition());\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Protocol, typename Executor, typename Iterator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin, Iterator end,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler)\n{\n  return async_initiate<IteratorConnectHandler,\n    void (asio::error_code, Iterator)>(\n      detail::initiate_async_iterator_connect<Protocol, Executor>(s),\n      handler, begin, end, detail::default_connect_condition());\n}\n\ntemplate <typename Protocol, typename Executor,\n    typename EndpointSequence, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      typename Protocol::endpoint)) RangeConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint))\nasync_connect(basic_socket<Protocol, Executor>& s,\n    const EndpointSequence& endpoints, ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(RangeConnectHandler) handler,\n    typename enable_if<is_endpoint_sequence<\n        EndpointSequence>::value>::type*)\n{\n  return async_initiate<RangeConnectHandler,\n    void (asio::error_code, typename Protocol::endpoint)>(\n      detail::initiate_async_range_connect<Protocol, Executor>(s),\n      handler, endpoints, connect_condition);\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler,\n    typename enable_if<!is_endpoint_sequence<Iterator>::value>::type*)\n{\n  return async_initiate<IteratorConnectHandler,\n    void (asio::error_code, Iterator)>(\n      detail::initiate_async_iterator_connect<Protocol, Executor>(s),\n      handler, begin, Iterator(), connect_condition);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Protocol, typename Executor,\n    typename Iterator, typename ConnectCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      Iterator)) IteratorConnectHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(IteratorConnectHandler,\n    void (asio::error_code, Iterator))\nasync_connect(basic_socket<Protocol, Executor>& s, Iterator begin,\n    Iterator end, ConnectCondition connect_condition,\n    ASIO_MOVE_ARG(IteratorConnectHandler) handler)\n{\n  return async_initiate<IteratorConnectHandler,\n    void (asio::error_code, Iterator)>(\n      detail::initiate_async_iterator_connect<Protocol, Executor>(s),\n      handler, begin, end, connect_condition);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_CONNECT_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/defer.hpp",
    "content": "//\n// impl/defer.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_DEFER_HPP\n#define ASIO_IMPL_DEFER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/work_dispatcher.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass initiate_defer\n{\npublic:\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_executor<DecayedHandler>::type ex(\n        (get_associated_executor)(handler));\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex.defer(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc);\n  }\n};\n\ntemplate <typename Executor>\nclass initiate_defer_with_executor\n{\npublic:\n  typedef Executor executor_type;\n\n  explicit initiate_defer_with_executor(const Executor& ex)\n    : ex_(ex)\n  {\n  }\n\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return ex_;\n  }\n\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex_.defer(detail::work_dispatcher<DecayedHandler>(\n          ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc);\n  }\n\nprivate:\n  Executor ex_;\n};\n\n} // namespace detail\n\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    ASIO_MOVE_ARG(CompletionToken) token)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_defer(), token);\n}\n\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_executor<Executor>::value>::type*)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_defer_with_executor<Executor>(ex), token);\n}\n\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) defer(\n    ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type*)\n{\n  return (defer)(ctx.get_executor(),\n      ASIO_MOVE_CAST(CompletionToken)(token));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_DEFER_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/detached.hpp",
    "content": "//\n// impl/detached.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_DETACHED_HPP\n#define ASIO_IMPL_DETACHED_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n  // Class to adapt a detached_t as a completion handler.\n  class detached_handler\n  {\n  public:\n    typedef void result_type;\n\n    detached_handler(detached_t)\n    {\n    }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    template <typename... Args>\n    void operator()(Args...)\n    {\n    }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    void operator()()\n    {\n    }\n\n#define ASIO_PRIVATE_DETACHED_DEF(n) \\\n    template <ASIO_VARIADIC_TPARAMS(n)> \\\n    void operator()(ASIO_VARIADIC_TARGS(n)) \\\n    { \\\n    } \\\n    /**/\n    ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_DETACHED_DEF)\n#undef ASIO_PRIVATE_DETACHED_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n  };\n\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Signature>\nstruct async_result<detached_t, Signature>\n{\n  typedef asio::detail::detached_handler completion_handler_type;\n\n  typedef void return_type;\n\n  explicit async_result(completion_handler_type&)\n  {\n  }\n\n  void get()\n  {\n  }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation, typename RawCompletionToken, typename... Args>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken),\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    ASIO_MOVE_CAST(Initiation)(initiation)(\n        detail::detached_handler(detached_t()),\n        ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation, typename RawCompletionToken>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken))\n  {\n    ASIO_MOVE_CAST(Initiation)(initiation)(\n        detail::detached_handler(detached_t()));\n  }\n\n#define ASIO_PRIVATE_INITIATE_DEF(n) \\\n  template <typename Initiation, typename RawCompletionToken, \\\n      ASIO_VARIADIC_TPARAMS(n)> \\\n  static return_type initiate( \\\n      ASIO_MOVE_ARG(Initiation) initiation, \\\n      ASIO_MOVE_ARG(RawCompletionToken), \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    ASIO_MOVE_CAST(Initiation)(initiation)( \\\n        detail::detached_handler(detached_t()), \\\n        ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF)\n#undef ASIO_PRIVATE_INITIATE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_DETACHED_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/dispatch.hpp",
    "content": "//\n// impl/dispatch.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_DISPATCH_HPP\n#define ASIO_IMPL_DISPATCH_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/work_dispatcher.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass initiate_dispatch\n{\npublic:\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_executor<DecayedHandler>::type ex(\n        (get_associated_executor)(handler));\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex.dispatch(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc);\n  }\n};\n\ntemplate <typename Executor>\nclass initiate_dispatch_with_executor\n{\npublic:\n  typedef Executor executor_type;\n\n  explicit initiate_dispatch_with_executor(const Executor& ex)\n    : ex_(ex)\n  {\n  }\n\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return ex_;\n  }\n\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex_.dispatch(detail::work_dispatcher<DecayedHandler>(\n          ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc);\n  }\n\nprivate:\n  Executor ex_;\n};\n\n} // namespace detail\n\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    ASIO_MOVE_ARG(CompletionToken) token)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_dispatch(), token);\n}\n\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_executor<Executor>::value>::type*)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_dispatch_with_executor<Executor>(ex), token);\n}\n\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) dispatch(\n    ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type*)\n{\n  return (dispatch)(ctx.get_executor(),\n      ASIO_MOVE_CAST(CompletionToken)(token));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_DISPATCH_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/error.ipp",
    "content": "//\n// impl/error.ipp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_ERROR_IPP\n#define ASIO_IMPL_ERROR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace error {\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\nnamespace detail {\n\nclass netdb_category : public asio::error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.netdb\";\n  }\n\n  std::string message(int value) const\n  {\n    if (value == error::host_not_found)\n      return \"Host not found (authoritative)\";\n    if (value == error::host_not_found_try_again)\n      return \"Host not found (non-authoritative), try again later\";\n    if (value == error::no_data)\n      return \"The query is valid, but it does not have associated data\";\n    if (value == error::no_recovery)\n      return \"A non-recoverable error occurred during database lookup\";\n    return \"asio.netdb error\";\n  }\n};\n\n} // namespace detail\n\nconst asio::error_category& get_netdb_category()\n{\n  static detail::netdb_category instance;\n  return instance;\n}\n\nnamespace detail {\n\nclass addrinfo_category : public asio::error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.addrinfo\";\n  }\n\n  std::string message(int value) const\n  {\n    if (value == error::service_not_found)\n      return \"Service not found\";\n    if (value == error::socket_type_not_supported)\n      return \"Socket type not supported\";\n    return \"asio.addrinfo error\";\n  }\n};\n\n} // namespace detail\n\nconst asio::error_category& get_addrinfo_category()\n{\n  static detail::addrinfo_category instance;\n  return instance;\n}\n\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\nnamespace detail {\n\nclass misc_category : public asio::error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.misc\";\n  }\n\n  std::string message(int value) const\n  {\n    if (value == error::already_open)\n      return \"Already open\";\n    if (value == error::eof)\n      return \"End of file\";\n    if (value == error::not_found)\n      return \"Element not found\";\n    if (value == error::fd_set_failure)\n      return \"The descriptor does not fit into the select call's fd_set\";\n    return \"asio.misc error\";\n  }\n};\n\n} // namespace detail\n\nconst asio::error_category& get_misc_category()\n{\n  static detail::misc_category instance;\n  return instance;\n}\n\n} // namespace error\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_ERROR_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/error_code.ipp",
    "content": "//\n// impl/error_code.ipp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_ERROR_CODE_IPP\n#define ASIO_IMPL_ERROR_CODE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# include <winerror.h>\n#elif defined(ASIO_WINDOWS_RUNTIME)\n# include <windows.h>\n#else\n# include <cerrno>\n# include <cstring>\n# include <string>\n#endif\n#include \"asio/detail/local_free_on_block_exit.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/error_code.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass system_category : public error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.system\";\n  }\n\n  std::string message(int value) const\n  {\n#if defined(ASIO_WINDOWS_RUNTIME) || defined(ASIO_WINDOWS_APP)\n    std::wstring wmsg(128, wchar_t());\n    for (;;)\n    {\n      DWORD wlength = ::FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM\n          | FORMAT_MESSAGE_IGNORE_INSERTS, 0, value,\n          MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),\n          &wmsg[0], static_cast<DWORD>(wmsg.size()), 0);\n      if (wlength == 0 && ::GetLastError() == ERROR_INSUFFICIENT_BUFFER)\n      {\n        wmsg.resize(wmsg.size() + wmsg.size() / 2);\n        continue;\n      }\n      if (wlength && wmsg[wlength - 1] == '\\n')\n        --wlength;\n      if (wlength && wmsg[wlength - 1] == '\\r')\n        --wlength;\n      if (wlength)\n      {\n        std::string msg(wlength * 2, char());\n        int length = ::WideCharToMultiByte(CP_ACP, 0,\n            wmsg.c_str(), static_cast<int>(wlength),\n            &msg[0], static_cast<int>(wlength * 2), 0, 0);\n        if (length <= 0)\n          return \"asio.system error\";\n        msg.resize(static_cast<std::size_t>(length));\n        return msg;\n      }\n      else\n        return \"asio.system error\";\n    }\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    char* msg = 0;\n    DWORD length = ::FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER\n        | FORMAT_MESSAGE_FROM_SYSTEM\n        | FORMAT_MESSAGE_IGNORE_INSERTS, 0, value,\n        MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (char*)&msg, 0, 0);\n    detail::local_free_on_block_exit local_free_obj(msg);\n    if (length && msg[length - 1] == '\\n')\n      msg[--length] = '\\0';\n    if (length && msg[length - 1] == '\\r')\n      msg[--length] = '\\0';\n    if (length)\n      return msg;\n    else\n      return \"asio.system error\";\n#else // defined(ASIO_WINDOWS_DESKTOP) || defined(__CYGWIN__)\n#if !defined(__sun)\n    if (value == ECANCELED)\n      return \"Operation aborted.\";\n#endif // !defined(__sun)\n#if defined(__sun) || defined(__QNX__) || defined(__SYMBIAN32__)\n    using namespace std;\n    return strerror(value);\n#else\n    char buf[256] = \"\";\n    using namespace std;\n    return strerror_result(strerror_r(value, buf, sizeof(buf)), buf);\n#endif\n#endif // defined(ASIO_WINDOWS_DESKTOP) || defined(__CYGWIN__)\n  }\n\n#if defined(ASIO_HAS_STD_ERROR_CODE)\n  std::error_condition default_error_condition(\n      int ev) const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    switch (ev)\n    {\n    case access_denied:\n      return std::errc::permission_denied;\n    case address_family_not_supported:\n      return std::errc::address_family_not_supported;\n    case address_in_use:\n      return std::errc::address_in_use;\n    case already_connected:\n      return std::errc::already_connected;\n    case already_started:\n      return std::errc::connection_already_in_progress;\n    case broken_pipe:\n      return std::errc::broken_pipe;\n    case connection_aborted:\n      return std::errc::connection_aborted;\n    case connection_refused:\n      return std::errc::connection_refused;\n    case connection_reset:\n      return std::errc::connection_reset;\n    case bad_descriptor:\n      return std::errc::bad_file_descriptor;\n    case fault:\n      return std::errc::bad_address;\n    case host_unreachable:\n      return std::errc::host_unreachable;\n    case in_progress:\n      return std::errc::operation_in_progress;\n    case interrupted:\n      return std::errc::interrupted;\n    case invalid_argument:\n      return std::errc::invalid_argument;\n    case message_size:\n      return std::errc::message_size;\n    case name_too_long:\n      return std::errc::filename_too_long;\n    case network_down:\n      return std::errc::network_down;\n    case network_reset:\n      return std::errc::network_reset;\n    case network_unreachable:\n      return std::errc::network_unreachable;\n    case no_descriptors:\n      return std::errc::too_many_files_open;\n    case no_buffer_space:\n      return std::errc::no_buffer_space;\n    case no_memory:\n      return std::errc::not_enough_memory;\n    case no_permission:\n      return std::errc::operation_not_permitted;\n    case no_protocol_option:\n      return std::errc::no_protocol_option;\n    case no_such_device:\n      return std::errc::no_such_device;\n    case not_connected:\n      return std::errc::not_connected;\n    case not_socket:\n      return std::errc::not_a_socket;\n    case operation_aborted:\n      return std::errc::operation_canceled;\n    case operation_not_supported:\n      return std::errc::operation_not_supported;\n    case shut_down:\n      return std::make_error_condition(ev, *this);\n    case timed_out:\n      return std::errc::timed_out;\n    case try_again:\n      return std::errc::resource_unavailable_try_again;\n    case would_block:\n      return std::errc::operation_would_block;\n    default:\n      return std::make_error_condition(ev, *this);\n  }\n#endif // defined(ASIO_HAS_STD_ERROR_CODE)\n\nprivate:\n  // Helper function to adapt the result from glibc's variant of strerror_r.\n  static const char* strerror_result(int, const char* s) { return s; }\n  static const char* strerror_result(const char* s, const char*) { return s; }\n};\n\n} // namespace detail\n\nconst error_category& system_category()\n{\n  static detail::system_category instance;\n  return instance;\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_ERROR_CODE_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/execution_context.hpp",
    "content": "//\n// impl/execution_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_EXECUTION_CONTEXT_HPP\n#define ASIO_IMPL_EXECUTION_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/service_registry.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Service>\ninline Service& use_service(execution_context& e)\n{\n  // Check that Service meets the necessary type requirements.\n  (void)static_cast<execution_context::service*>(static_cast<Service*>(0));\n\n  return e.service_registry_->template use_service<Service>();\n}\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename Service, typename... Args>\nService& make_service(execution_context& e, ASIO_MOVE_ARG(Args)... args)\n{\n  detail::scoped_ptr<Service> svc(\n      new Service(e, ASIO_MOVE_CAST(Args)(args)...));\n  e.service_registry_->template add_service<Service>(svc.get());\n  Service& result = *svc;\n  svc.release();\n  return result;\n}\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename Service>\nService& make_service(execution_context& e)\n{\n  detail::scoped_ptr<Service> svc(new Service(e));\n  e.service_registry_->template add_service<Service>(svc.get());\n  Service& result = *svc;\n  svc.release();\n  return result;\n}\n\n#define ASIO_PRIVATE_MAKE_SERVICE_DEF(n) \\\n  template <typename Service, ASIO_VARIADIC_TPARAMS(n)> \\\n  Service& make_service(execution_context& e, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    detail::scoped_ptr<Service> svc( \\\n        new Service(e, ASIO_VARIADIC_MOVE_ARGS(n))); \\\n    e.service_registry_->template add_service<Service>(svc.get()); \\\n    Service& result = *svc; \\\n    svc.release(); \\\n    return result; \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_MAKE_SERVICE_DEF)\n#undef ASIO_PRIVATE_MAKE_SERVICE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename Service>\ninline void add_service(execution_context& e, Service* svc)\n{\n  // Check that Service meets the necessary type requirements.\n  (void)static_cast<execution_context::service*>(static_cast<Service*>(0));\n\n  e.service_registry_->template add_service<Service>(svc);\n}\n\ntemplate <typename Service>\ninline bool has_service(execution_context& e)\n{\n  // Check that Service meets the necessary type requirements.\n  (void)static_cast<execution_context::service*>(static_cast<Service*>(0));\n\n  return e.service_registry_->template has_service<Service>();\n}\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ninline execution_context& execution_context::service::context()\n{\n  return owner_;\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_EXECUTION_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/execution_context.ipp",
    "content": "//\n// impl/execution_context.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_EXECUTION_CONTEXT_IPP\n#define ASIO_IMPL_EXECUTION_CONTEXT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/detail/service_registry.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nexecution_context::execution_context()\n  : service_registry_(new asio::detail::service_registry(*this))\n{\n}\n\nexecution_context::~execution_context()\n{\n  shutdown();\n  destroy();\n  delete service_registry_;\n}\n\nvoid execution_context::shutdown()\n{\n  service_registry_->shutdown_services();\n}\n\nvoid execution_context::destroy()\n{\n  service_registry_->destroy_services();\n}\n\nvoid execution_context::notify_fork(\n    asio::execution_context::fork_event event)\n{\n  service_registry_->notify_fork(event);\n}\n\nexecution_context::service::service(execution_context& owner)\n  : owner_(owner),\n    next_(0)\n{\n}\n\nexecution_context::service::~service()\n{\n}\n\nvoid execution_context::service::notify_fork(execution_context::fork_event)\n{\n}\n\nservice_already_exists::service_already_exists()\n  : std::logic_error(\"Service already exists.\")\n{\n}\n\ninvalid_service_owner::invalid_service_owner()\n  : std::logic_error(\"Invalid service owner.\")\n{\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_EXECUTION_CONTEXT_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/executor.hpp",
    "content": "//\n// impl/executor.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_EXECUTOR_HPP\n#define ASIO_IMPL_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/executor_function.hpp\"\n#include \"asio/detail/global.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/system_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(GENERATING_DOCUMENTATION)\n\n#if defined(ASIO_HAS_MOVE)\n\n// Lightweight, move-only function object wrapper.\nclass executor::function\n{\npublic:\n  template <typename F, typename Alloc>\n  explicit function(F f, const Alloc& a)\n  {\n    // Allocate and construct an operation to wrap the function.\n    typedef detail::executor_function<F, Alloc> func_type;\n    typename func_type::ptr p = {\n      detail::addressof(a), func_type::ptr::allocate(a), 0 };\n    func_ = new (p.v) func_type(ASIO_MOVE_CAST(F)(f), a);\n    p.v = 0;\n  }\n\n  function(function&& other) ASIO_NOEXCEPT\n    : func_(other.func_)\n  {\n    other.func_ = 0;\n  }\n\n  ~function()\n  {\n    if (func_)\n      func_->destroy();\n  }\n\n  void operator()()\n  {\n    if (func_)\n    {\n      detail::executor_function_base* func = func_;\n      func_ = 0;\n      func->complete();\n    }\n  }\n\nprivate:\n  detail::executor_function_base* func_;\n};\n\n#else // defined(ASIO_HAS_MOVE)\n\n// Not so lightweight, copyable function object wrapper.\nclass executor::function\n{\npublic:\n  template <typename F, typename Alloc>\n  explicit function(const F& f, const Alloc&)\n    : impl_(new impl<F>(f))\n  {\n  }\n\n  void operator()()\n  {\n    impl_->invoke_(impl_.get());\n  }\n\nprivate:\n  // Base class for polymorphic function implementations.\n  struct impl_base\n  {\n    void (*invoke_)(impl_base*);\n  };\n\n  // Polymorphic function implementation.\n  template <typename F>\n  struct impl : impl_base\n  {\n    impl(const F& f)\n      : function_(f)\n    {\n      invoke_ = &function::invoke<F>;\n    }\n\n    F function_;\n  };\n\n  // Helper to invoke a function.\n  template <typename F>\n  static void invoke(impl_base* i)\n  {\n    static_cast<impl<F>*>(i)->function_();\n  }\n\n  detail::shared_ptr<impl_base> impl_;\n};\n\n#endif // defined(ASIO_HAS_MOVE)\n\n// Default polymorphic allocator implementation.\ntemplate <typename Executor, typename Allocator>\nclass executor::impl\n  : public executor::impl_base\n{\npublic:\n  typedef ASIO_REBIND_ALLOC(Allocator, impl) allocator_type;\n\n  static impl_base* create(const Executor& e, Allocator a = Allocator())\n  {\n    raw_mem mem(a);\n    impl* p = new (mem.ptr_) impl(e, a);\n    mem.ptr_ = 0;\n    return p;\n  }\n\n  impl(const Executor& e, const Allocator& a) ASIO_NOEXCEPT\n    : impl_base(false),\n      ref_count_(1),\n      executor_(e),\n      allocator_(a)\n  {\n  }\n\n  impl_base* clone() const ASIO_NOEXCEPT\n  {\n    ++ref_count_;\n    return const_cast<impl_base*>(static_cast<const impl_base*>(this));\n  }\n\n  void destroy() ASIO_NOEXCEPT\n  {\n    if (--ref_count_ == 0)\n    {\n      allocator_type alloc(allocator_);\n      impl* p = this;\n      p->~impl();\n      alloc.deallocate(p, 1);\n    }\n  }\n\n  void on_work_started() ASIO_NOEXCEPT\n  {\n    executor_.on_work_started();\n  }\n\n  void on_work_finished() ASIO_NOEXCEPT\n  {\n    executor_.on_work_finished();\n  }\n\n  execution_context& context() ASIO_NOEXCEPT\n  {\n    return executor_.context();\n  }\n\n  void dispatch(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.dispatch(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  void post(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.post(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  void defer(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.defer(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  type_id_result_type target_type() const ASIO_NOEXCEPT\n  {\n    return type_id<Executor>();\n  }\n\n  void* target() ASIO_NOEXCEPT\n  {\n    return &executor_;\n  }\n\n  const void* target() const ASIO_NOEXCEPT\n  {\n    return &executor_;\n  }\n\n  bool equals(const impl_base* e) const ASIO_NOEXCEPT\n  {\n    if (this == e)\n      return true;\n    if (target_type() != e->target_type())\n      return false;\n    return executor_ == *static_cast<const Executor*>(e->target());\n  }\n\nprivate:\n  mutable detail::atomic_count ref_count_;\n  Executor executor_;\n  Allocator allocator_;\n\n  struct raw_mem\n  {\n    allocator_type allocator_;\n    impl* ptr_;\n\n    explicit raw_mem(const Allocator& a)\n      : allocator_(a),\n        ptr_(allocator_.allocate(1))\n    {\n    }\n\n    ~raw_mem()\n    {\n      if (ptr_)\n        allocator_.deallocate(ptr_, 1);\n    }\n\n  private:\n    // Disallow copying and assignment.\n    raw_mem(const raw_mem&);\n    raw_mem operator=(const raw_mem&);\n  };\n};\n\n// Polymorphic allocator specialisation for system_executor.\ntemplate <typename Allocator>\nclass executor::impl<system_executor, Allocator>\n  : public executor::impl_base\n{\npublic:\n  static impl_base* create(const system_executor&,\n      const Allocator& = Allocator())\n  {\n    return &detail::global<impl<system_executor, std::allocator<void> > >();\n  }\n\n  impl()\n    : impl_base(true)\n  {\n  }\n\n  impl_base* clone() const ASIO_NOEXCEPT\n  {\n    return const_cast<impl_base*>(static_cast<const impl_base*>(this));\n  }\n\n  void destroy() ASIO_NOEXCEPT\n  {\n  }\n\n  void on_work_started() ASIO_NOEXCEPT\n  {\n    executor_.on_work_started();\n  }\n\n  void on_work_finished() ASIO_NOEXCEPT\n  {\n    executor_.on_work_finished();\n  }\n\n  execution_context& context() ASIO_NOEXCEPT\n  {\n    return executor_.context();\n  }\n\n  void dispatch(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.dispatch(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  void post(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.post(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  void defer(ASIO_MOVE_ARG(function) f)\n  {\n    executor_.defer(ASIO_MOVE_CAST(function)(f), allocator_);\n  }\n\n  type_id_result_type target_type() const ASIO_NOEXCEPT\n  {\n    return type_id<system_executor>();\n  }\n\n  void* target() ASIO_NOEXCEPT\n  {\n    return &executor_;\n  }\n\n  const void* target() const ASIO_NOEXCEPT\n  {\n    return &executor_;\n  }\n\n  bool equals(const impl_base* e) const ASIO_NOEXCEPT\n  {\n    return this == e;\n  }\n\nprivate:\n  system_executor executor_;\n  Allocator allocator_;\n};\n\ntemplate <typename Executor>\nexecutor::executor(Executor e)\n  : impl_(impl<Executor, std::allocator<void> >::create(e))\n{\n}\n\ntemplate <typename Executor, typename Allocator>\nexecutor::executor(allocator_arg_t, const Allocator& a, Executor e)\n  : impl_(impl<Executor, Allocator>::create(e, a))\n{\n}\n\ntemplate <typename Function, typename Allocator>\nvoid executor::dispatch(ASIO_MOVE_ARG(Function) f,\n    const Allocator& a) const\n{\n  impl_base* i = get_impl();\n  if (i->fast_dispatch_)\n    system_executor().dispatch(ASIO_MOVE_CAST(Function)(f), a);\n  else\n    i->dispatch(function(ASIO_MOVE_CAST(Function)(f), a));\n}\n\ntemplate <typename Function, typename Allocator>\nvoid executor::post(ASIO_MOVE_ARG(Function) f,\n    const Allocator& a) const\n{\n  get_impl()->post(function(ASIO_MOVE_CAST(Function)(f), a));\n}\n\ntemplate <typename Function, typename Allocator>\nvoid executor::defer(ASIO_MOVE_ARG(Function) f,\n    const Allocator& a) const\n{\n  get_impl()->defer(function(ASIO_MOVE_CAST(Function)(f), a));\n}\n\ntemplate <typename Executor>\nExecutor* executor::target() ASIO_NOEXCEPT\n{\n  return impl_ && impl_->target_type() == type_id<Executor>()\n    ? static_cast<Executor*>(impl_->target()) : 0;\n}\n\ntemplate <typename Executor>\nconst Executor* executor::target() const ASIO_NOEXCEPT\n{\n  return impl_ && impl_->target_type() == type_id<Executor>()\n    ? static_cast<Executor*>(impl_->target()) : 0;\n}\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/executor.ipp",
    "content": "//\n// impl/executor.ipp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_EXECUTOR_IPP\n#define ASIO_IMPL_EXECUTOR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nbad_executor::bad_executor() ASIO_NOEXCEPT\n{\n}\n\nconst char* bad_executor::what() const ASIO_NOEXCEPT_OR_NOTHROW\n{\n  return \"bad executor\";\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_EXECUTOR_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/handler_alloc_hook.ipp",
    "content": "//\n// impl/handler_alloc_hook.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP\n#define ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/thread_context.hpp\"\n#include \"asio/detail/thread_info_base.hpp\"\n#include \"asio/handler_alloc_hook.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nvoid* asio_handler_allocate(std::size_t size, ...)\n{\n#if !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n  return detail::thread_info_base::allocate(\n      detail::thread_context::thread_call_stack::top(), size);\n#else // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n  return ::operator new(size);\n#endif // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n}\n\nvoid asio_handler_deallocate(void* pointer, std::size_t size, ...)\n{\n#if !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n  detail::thread_info_base::deallocate(\n      detail::thread_context::thread_call_stack::top(), pointer, size);\n#else // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n  (void)size;\n  ::operator delete(pointer);\n#endif // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING)\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/io_context.hpp",
    "content": "//\n// impl/io_context.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_IO_CONTEXT_HPP\n#define ASIO_IMPL_IO_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/completion_handler.hpp\"\n#include \"asio/detail/executor_op.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/detail/service_registry.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\n#if !defined(GENERATING_DOCUMENTATION)\n\nnamespace asio {\n\ntemplate <typename Service>\ninline Service& use_service(io_context& ioc)\n{\n  // Check that Service meets the necessary type requirements.\n  (void)static_cast<execution_context::service*>(static_cast<Service*>(0));\n  (void)static_cast<const execution_context::id*>(&Service::id);\n\n  return ioc.service_registry_->template use_service<Service>(ioc);\n}\n\ntemplate <>\ninline detail::io_context_impl& use_service<detail::io_context_impl>(\n    io_context& ioc)\n{\n  return ioc.impl_;\n}\n\n} // namespace asio\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else\n# include \"asio/detail/scheduler.hpp\"\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ninline io_context::executor_type\nio_context::get_executor() ASIO_NOEXCEPT\n{\n  return executor_type(*this);\n}\n\n#if defined(ASIO_HAS_CHRONO)\n\ntemplate <typename Rep, typename Period>\nstd::size_t io_context::run_for(\n    const chrono::duration<Rep, Period>& rel_time)\n{\n  return this->run_until(chrono::steady_clock::now() + rel_time);\n}\n\ntemplate <typename Clock, typename Duration>\nstd::size_t io_context::run_until(\n    const chrono::time_point<Clock, Duration>& abs_time)\n{\n  std::size_t n = 0;\n  while (this->run_one_until(abs_time))\n    if (n != (std::numeric_limits<std::size_t>::max)())\n      ++n;\n  return n;\n}\n\ntemplate <typename Rep, typename Period>\nstd::size_t io_context::run_one_for(\n    const chrono::duration<Rep, Period>& rel_time)\n{\n  return this->run_one_until(chrono::steady_clock::now() + rel_time);\n}\n\ntemplate <typename Clock, typename Duration>\nstd::size_t io_context::run_one_until(\n    const chrono::time_point<Clock, Duration>& abs_time)\n{\n  typename Clock::time_point now = Clock::now();\n  while (now < abs_time)\n  {\n    typename Clock::duration rel_time = abs_time - now;\n    if (rel_time > chrono::seconds(1))\n      rel_time = chrono::seconds(1);\n\n    asio::error_code ec;\n    std::size_t s = impl_.wait_one(\n        static_cast<long>(chrono::duration_cast<\n          chrono::microseconds>(rel_time).count()), ec);\n    asio::detail::throw_error(ec);\n\n    if (s || impl_.stopped())\n      return s;\n\n    now = Clock::now();\n  }\n\n  return 0;\n}\n\n#endif // defined(ASIO_HAS_CHRONO)\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ninline void io_context::reset()\n{\n  restart();\n}\n\nstruct io_context::initiate_dispatch\n{\n  template <typename LegacyCompletionHandler>\n  void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler,\n      io_context* self) const\n  {\n    // If you get an error on the following line it means that your handler does\n    // not meet the documented type requirements for a LegacyCompletionHandler.\n    ASIO_LEGACY_COMPLETION_HANDLER_CHECK(\n        LegacyCompletionHandler, handler) type_check;\n\n    detail::non_const_lvalue<LegacyCompletionHandler> handler2(handler);\n    if (self->impl_.can_dispatch())\n    {\n      detail::fenced_block b(detail::fenced_block::full);\n      asio_handler_invoke_helpers::invoke(\n          handler2.value, handler2.value);\n    }\n    else\n    {\n      // Allocate and construct an operation to wrap the handler.\n      typedef detail::completion_handler<\n        typename decay<LegacyCompletionHandler>::type> op;\n      typename op::ptr p = { detail::addressof(handler2.value),\n        op::ptr::allocate(handler2.value), 0 };\n      p.p = new (p.v) op(handler2.value);\n\n      ASIO_HANDLER_CREATION((*self, *p.p,\n            \"io_context\", self, 0, \"dispatch\"));\n\n      self->impl_.do_dispatch(p.p);\n      p.v = p.p = 0;\n    }\n  }\n};\n\ntemplate <typename LegacyCompletionHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\nio_context::dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler)\n{\n  return async_initiate<LegacyCompletionHandler, void ()>(\n      initiate_dispatch(), handler, this);\n}\n\nstruct io_context::initiate_post\n{\n  template <typename LegacyCompletionHandler>\n  void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler,\n      io_context* self) const\n  {\n    // If you get an error on the following line it means that your handler does\n    // not meet the documented type requirements for a LegacyCompletionHandler.\n    ASIO_LEGACY_COMPLETION_HANDLER_CHECK(\n        LegacyCompletionHandler, handler) type_check;\n\n    detail::non_const_lvalue<LegacyCompletionHandler> handler2(handler);\n\n    bool is_continuation =\n      asio_handler_cont_helpers::is_continuation(handler2.value);\n\n    // Allocate and construct an operation to wrap the handler.\n    typedef detail::completion_handler<\n      typename decay<LegacyCompletionHandler>::type> op;\n    typename op::ptr p = { detail::addressof(handler2.value),\n        op::ptr::allocate(handler2.value), 0 };\n    p.p = new (p.v) op(handler2.value);\n\n    ASIO_HANDLER_CREATION((*self, *p.p,\n          \"io_context\", self, 0, \"post\"));\n\n    self->impl_.post_immediate_completion(p.p, is_continuation);\n    p.v = p.p = 0;\n  }\n};\n\ntemplate <typename LegacyCompletionHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\nio_context::post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler)\n{\n  return async_initiate<LegacyCompletionHandler, void ()>(\n      initiate_post(), handler, this);\n}\n\ntemplate <typename Handler>\n#if defined(GENERATING_DOCUMENTATION)\nunspecified\n#else\ninline detail::wrapped_handler<io_context&, Handler>\n#endif\nio_context::wrap(Handler handler)\n{\n  return detail::wrapped_handler<io_context&, Handler>(*this, handler);\n}\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ninline io_context&\nio_context::executor_type::context() const ASIO_NOEXCEPT\n{\n  return io_context_;\n}\n\ninline void\nio_context::executor_type::on_work_started() const ASIO_NOEXCEPT\n{\n  io_context_.impl_.work_started();\n}\n\ninline void\nio_context::executor_type::on_work_finished() const ASIO_NOEXCEPT\n{\n  io_context_.impl_.work_finished();\n}\n\ntemplate <typename Function, typename Allocator>\nvoid io_context::executor_type::dispatch(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Invoke immediately if we are already inside the thread pool.\n  if (io_context_.impl_.can_dispatch())\n  {\n    // Make a local, non-const copy of the function.\n    function_type tmp(ASIO_MOVE_CAST(Function)(f));\n\n    detail::fenced_block b(detail::fenced_block::full);\n    asio_handler_invoke_helpers::invoke(tmp, tmp);\n    return;\n  }\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator, detail::operation> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((this->context(), *p.p,\n        \"io_context\", &this->context(), 0, \"dispatch\"));\n\n  io_context_.impl_.post_immediate_completion(p.p, false);\n  p.v = p.p = 0;\n}\n\ntemplate <typename Function, typename Allocator>\nvoid io_context::executor_type::post(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator, detail::operation> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((this->context(), *p.p,\n        \"io_context\", &this->context(), 0, \"post\"));\n\n  io_context_.impl_.post_immediate_completion(p.p, false);\n  p.v = p.p = 0;\n}\n\ntemplate <typename Function, typename Allocator>\nvoid io_context::executor_type::defer(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator, detail::operation> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((this->context(), *p.p,\n        \"io_context\", &this->context(), 0, \"defer\"));\n\n  io_context_.impl_.post_immediate_completion(p.p, true);\n  p.v = p.p = 0;\n}\n\ninline bool\nio_context::executor_type::running_in_this_thread() const ASIO_NOEXCEPT\n{\n  return io_context_.impl_.can_dispatch();\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\ninline io_context::work::work(asio::io_context& io_context)\n  : io_context_impl_(io_context.impl_)\n{\n  io_context_impl_.work_started();\n}\n\ninline io_context::work::work(const work& other)\n  : io_context_impl_(other.io_context_impl_)\n{\n  io_context_impl_.work_started();\n}\n\ninline io_context::work::~work()\n{\n  io_context_impl_.work_finished();\n}\n\ninline asio::io_context& io_context::work::get_io_context()\n{\n  return static_cast<asio::io_context&>(io_context_impl_.context());\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ninline asio::io_context& io_context::service::get_io_context()\n{\n  return static_cast<asio::io_context&>(context());\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_IO_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/io_context.ipp",
    "content": "//\n// impl/io_context.ipp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_IO_CONTEXT_IPP\n#define ASIO_IMPL_IO_CONTEXT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/io_context.hpp\"\n#include \"asio/detail/concurrency_hint.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/scoped_ptr.hpp\"\n#include \"asio/detail/service_registry.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n\n#if defined(ASIO_HAS_IOCP)\n# include \"asio/detail/win_iocp_io_context.hpp\"\n#else\n# include \"asio/detail/scheduler.hpp\"\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nio_context::io_context()\n  : impl_(add_impl(new impl_type(*this,\n          ASIO_CONCURRENCY_HINT_DEFAULT, false)))\n{\n}\n\nio_context::io_context(int concurrency_hint)\n  : impl_(add_impl(new impl_type(*this, concurrency_hint == 1\n          ? ASIO_CONCURRENCY_HINT_1 : concurrency_hint, false)))\n{\n}\n\nio_context::impl_type& io_context::add_impl(io_context::impl_type* impl)\n{\n  asio::detail::scoped_ptr<impl_type> scoped_impl(impl);\n  asio::add_service<impl_type>(*this, scoped_impl.get());\n  return *scoped_impl.release();\n}\n\nio_context::~io_context()\n{\n}\n\nio_context::count_type io_context::run()\n{\n  asio::error_code ec;\n  count_type s = impl_.run(ec);\n  asio::detail::throw_error(ec);\n  return s;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nio_context::count_type io_context::run(asio::error_code& ec)\n{\n  return impl_.run(ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nio_context::count_type io_context::run_one()\n{\n  asio::error_code ec;\n  count_type s = impl_.run_one(ec);\n  asio::detail::throw_error(ec);\n  return s;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nio_context::count_type io_context::run_one(asio::error_code& ec)\n{\n  return impl_.run_one(ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nio_context::count_type io_context::poll()\n{\n  asio::error_code ec;\n  count_type s = impl_.poll(ec);\n  asio::detail::throw_error(ec);\n  return s;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nio_context::count_type io_context::poll(asio::error_code& ec)\n{\n  return impl_.poll(ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nio_context::count_type io_context::poll_one()\n{\n  asio::error_code ec;\n  count_type s = impl_.poll_one(ec);\n  asio::detail::throw_error(ec);\n  return s;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nio_context::count_type io_context::poll_one(asio::error_code& ec)\n{\n  return impl_.poll_one(ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nvoid io_context::stop()\n{\n  impl_.stop();\n}\n\nbool io_context::stopped() const\n{\n  return impl_.stopped();\n}\n\nvoid io_context::restart()\n{\n  impl_.restart();\n}\n\nio_context::service::service(asio::io_context& owner)\n  : execution_context::service(owner)\n{\n}\n\nio_context::service::~service()\n{\n}\n\nvoid io_context::service::shutdown()\n{\n#if !defined(ASIO_NO_DEPRECATED)\n  shutdown_service();\n#endif // !defined(ASIO_NO_DEPRECATED)\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nvoid io_context::service::shutdown_service()\n{\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nvoid io_context::service::notify_fork(io_context::fork_event ev)\n{\n#if !defined(ASIO_NO_DEPRECATED)\n  fork_service(ev);\n#else // !defined(ASIO_NO_DEPRECATED)\n  (void)ev;\n#endif // !defined(ASIO_NO_DEPRECATED)\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nvoid io_context::service::fork_service(io_context::fork_event)\n{\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_IO_CONTEXT_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/post.hpp",
    "content": "//\n// impl/post.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_POST_HPP\n#define ASIO_IMPL_POST_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/detail/work_dispatcher.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\nclass initiate_post\n{\npublic:\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_executor<DecayedHandler>::type ex(\n        (get_associated_executor)(handler));\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex.post(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc);\n  }\n};\n\ntemplate <typename Executor>\nclass initiate_post_with_executor\n{\npublic:\n  typedef Executor executor_type;\n\n  explicit initiate_post_with_executor(const Executor& ex)\n    : ex_(ex)\n  {\n  }\n\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return ex_;\n  }\n\n  template <typename CompletionHandler>\n  void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const\n  {\n    typedef typename decay<CompletionHandler>::type DecayedHandler;\n\n    typename associated_allocator<DecayedHandler>::type alloc(\n        (get_associated_allocator)(handler));\n\n    ex_.post(detail::work_dispatcher<DecayedHandler>(\n          ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc);\n  }\n\nprivate:\n  Executor ex_;\n};\n\n} // namespace detail\n\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    ASIO_MOVE_ARG(CompletionToken) token)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_post(), token);\n}\n\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_executor<Executor>::value>::type*)\n{\n  return async_initiate<CompletionToken, void()>(\n      detail::initiate_post_with_executor<Executor>(ex), token);\n}\n\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token,\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type*)\n{\n  return (post)(ctx.get_executor(),\n      ASIO_MOVE_CAST(CompletionToken)(token));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_POST_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/read.hpp",
    "content": "//\n// impl/read.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_READ_HPP\n#define ASIO_IMPL_READ_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <algorithm>\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/base_from_completion_cond.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/consuming_buffers.hpp\"\n#include \"asio/detail/dependent_type.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <typename SyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition>\n  std::size_t read_buffer_sequence(SyncReadStream& s,\n      const MutableBufferSequence& buffers, const MutableBufferIterator&,\n      CompletionCondition completion_condition, asio::error_code& ec)\n  {\n    ec = asio::error_code();\n    asio::detail::consuming_buffers<mutable_buffer,\n        MutableBufferSequence, MutableBufferIterator> tmp(buffers);\n    while (!tmp.empty())\n    {\n      if (std::size_t max_size = detail::adapt_completion_condition_result(\n            completion_condition(ec, tmp.total_consumed())))\n        tmp.consume(s.read_some(tmp.prepare(max_size), ec));\n      else\n        break;\n    }\n    return tmp.total_consumed();;\n  }\n} // namespace detail\n\ntemplate <typename SyncReadStream, typename MutableBufferSequence,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  return detail::read_buffer_sequence(s, buffers,\n      asio::buffer_sequence_begin(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncReadStream, typename MutableBufferSequence>\ninline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s, buffers, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename MutableBufferSequence>\ninline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  return read(s, buffers, transfer_all(), ec);\n}\n\ntemplate <typename SyncReadStream, typename MutableBufferSequence,\n    typename CompletionCondition>\ninline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  ec = asio::error_code();\n  std::size_t total_transferred = 0;\n  std::size_t max_size = detail::adapt_completion_condition_result(\n        completion_condition(ec, total_transferred));\n  std::size_t bytes_available = std::min<std::size_t>(\n        std::max<std::size_t>(512, b.capacity() - b.size()),\n        std::min<std::size_t>(max_size, b.max_size() - b.size()));\n  while (bytes_available > 0)\n  {\n    std::size_t bytes_transferred = s.read_some(b.prepare(bytes_available), ec);\n    b.commit(bytes_transferred);\n    total_transferred += bytes_transferred;\n    max_size = detail::adapt_completion_condition_result(\n          completion_condition(ec, total_transferred));\n    bytes_available = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(max_size, b.max_size() - b.size()));\n  }\n  return total_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\ninline std::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\ninline std::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return read(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      transfer_all(), ec);\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\ninline std::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename SyncReadStream, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t read(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  return read(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b)\n{\n  return read(s, basic_streambuf_ref<Allocator>(b));\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    asio::error_code& ec)\n{\n  return read(s, basic_streambuf_ref<Allocator>(b), ec);\n}\n\ntemplate <typename SyncReadStream, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t read(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition)\n{\n  return read(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  DynamicBuffer_v2& b = buffers;\n\n  ec = asio::error_code();\n  std::size_t total_transferred = 0;\n  std::size_t max_size = detail::adapt_completion_condition_result(\n        completion_condition(ec, total_transferred));\n  std::size_t bytes_available = std::min<std::size_t>(\n        std::max<std::size_t>(512, b.capacity() - b.size()),\n        std::min<std::size_t>(max_size, b.max_size() - b.size()));\n  while (bytes_available > 0)\n  {\n    std::size_t pos = b.size();\n    b.grow(bytes_available);\n    std::size_t bytes_transferred = s.read_some(\n        b.data(pos, bytes_available), ec);\n    b.shrink(bytes_available - bytes_transferred);\n    total_transferred += bytes_transferred;\n    max_size = detail::adapt_completion_condition_result(\n          completion_condition(ec, total_transferred));\n    bytes_available = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(max_size, b.max_size() - b.size()));\n  }\n  return total_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\ninline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\ninline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return read(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      transfer_all(), ec);\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\ninline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"read\");\n  return bytes_transferred;\n}\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition,\n      typename ReadHandler>\n  class read_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    read_op(AsyncReadStream& stream, const MutableBufferSequence& buffers,\n        CompletionCondition& completion_condition, ReadHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        stream_(stream),\n        buffers_(buffers),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_op(const read_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        stream_(other.stream_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_op(read_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        do\n        {\n          stream_.async_read_some(buffers_.prepare(max_size),\n              ASIO_MOVE_CAST(read_op)(*this));\n          return; default:\n          buffers_.consume(bytes_transferred);\n          if ((!ec && bytes_transferred == 0) || buffers_.empty())\n            break;\n          max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        } while (max_size > 0);\n\n        handler_(ec, buffers_.total_consumed());\n      }\n    }\n\n  //private:\n    typedef asio::detail::consuming_buffers<mutable_buffer,\n        MutableBufferSequence, MutableBufferIterator> buffers_type;\n\n    AsyncReadStream& stream_;\n    buffers_type buffers_;\n    int start_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition,\n      typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_op<AsyncReadStream, MutableBufferSequence, MutableBufferIterator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition,\n      typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_op<AsyncReadStream, MutableBufferSequence, MutableBufferIterator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition,\n      typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_op<AsyncReadStream, MutableBufferSequence, MutableBufferIterator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_op<AsyncReadStream, MutableBufferSequence, MutableBufferIterator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_op<AsyncReadStream, MutableBufferSequence, MutableBufferIterator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition,\n      typename ReadHandler>\n  inline void start_read_buffer_sequence_op(AsyncReadStream& stream,\n      const MutableBufferSequence& buffers, const MutableBufferIterator&,\n      CompletionCondition& completion_condition, ReadHandler& handler)\n  {\n    detail::read_op<AsyncReadStream, MutableBufferSequence,\n      MutableBufferIterator, CompletionCondition, ReadHandler>(\n        stream, buffers, completion_condition, handler)(\n          asio::error_code(), 0, 1);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_buffer_sequence\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_buffer_sequence(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      start_read_buffer_sequence_op(stream_, buffers,\n          asio::buffer_sequence_begin(buffers),\n          completion_cond2.value, handler2.value);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename MutableBufferSequence,\n    typename MutableBufferIterator, typename CompletionCondition,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_op<AsyncReadStream, MutableBufferSequence,\n      MutableBufferIterator, CompletionCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_op<AsyncReadStream, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename MutableBufferSequence,\n    typename MutableBufferIterator, typename CompletionCondition,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_op<AsyncReadStream, MutableBufferSequence,\n      MutableBufferIterator, CompletionCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_op<AsyncReadStream, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream,\n    typename MutableBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_buffer_sequence<AsyncReadStream>(s), handler,\n      buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncReadStream, typename MutableBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, const MutableBufferSequence& buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_buffer_sequence<AsyncReadStream>(s),\n      handler, buffers, transfer_all());\n}\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename ReadHandler>\n  class read_dynbuf_v1_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    template <typename BufferSequence>\n    read_dynbuf_v1_op(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        CompletionCondition& completion_condition, ReadHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        start_(0),\n        total_transferred_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_dynbuf_v1_op(const read_dynbuf_v1_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        stream_(other.stream_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_dynbuf_v1_op(read_dynbuf_v1_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size, bytes_available;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, total_transferred_);\n        bytes_available = std::min<std::size_t>(\n              std::max<std::size_t>(512,\n                buffers_.capacity() - buffers_.size()),\n              std::min<std::size_t>(max_size,\n                buffers_.max_size() - buffers_.size()));\n        for (;;)\n        {\n          stream_.async_read_some(buffers_.prepare(bytes_available),\n              ASIO_MOVE_CAST(read_dynbuf_v1_op)(*this));\n          return; default:\n          total_transferred_ += bytes_transferred;\n          buffers_.commit(bytes_transferred);\n          max_size = this->check_for_completion(ec, total_transferred_);\n          bytes_available = std::min<std::size_t>(\n                std::max<std::size_t>(512,\n                  buffers_.capacity() - buffers_.size()),\n                std::min<std::size_t>(max_size,\n                  buffers_.max_size() - buffers_.size()));\n          if ((!ec && bytes_transferred == 0) || bytes_available == 0)\n            break;\n        }\n\n        handler_(ec, static_cast<const std::size_t&>(total_transferred_));\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    int start_;\n    std::size_t total_transferred_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_dynbuf_v1_op<AsyncReadStream, DynamicBuffer_v1,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_dynbuf_v1_op<AsyncReadStream, DynamicBuffer_v1,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_dynbuf_v1_op<AsyncReadStream, DynamicBuffer_v1,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename CompletionCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_dynbuf_v1_op<AsyncReadStream, DynamicBuffer_v1,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename CompletionCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_dynbuf_v1_op<AsyncReadStream, DynamicBuffer_v1,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_dynbuf_v1\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_dynbuf_v1(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v1,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      read_dynbuf_v1_op<AsyncReadStream, typename decay<DynamicBuffer_v1>::type,\n        CompletionCondition, typename decay<ReadHandler>::type>(\n          stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n            completion_cond2.value, handler2.value)(\n              asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_dynbuf_v1_op<AsyncReadStream,\n      DynamicBuffer_v1, CompletionCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_dynbuf_v1_op<AsyncReadStream,\n        DynamicBuffer_v1, CompletionCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_dynbuf_v1_op<AsyncReadStream,\n      DynamicBuffer_v1, CompletionCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_dynbuf_v1_op<AsyncReadStream,\n        DynamicBuffer_v1, CompletionCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      transfer_all(), ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v1, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  // If you get an error on the following line it means that your handler does\n  // not meet the documented type requirements for a ReadHandler.\n  ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_dynbuf_v1<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_read(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\ntemplate <typename AsyncReadStream,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_read(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition),\n      ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename ReadHandler>\n  class read_dynbuf_v2_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    template <typename BufferSequence>\n    read_dynbuf_v2_op(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        CompletionCondition& completion_condition, ReadHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        start_(0),\n        total_transferred_(0),\n        bytes_available_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_dynbuf_v2_op(const read_dynbuf_v2_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        stream_(other.stream_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        bytes_available_(other.bytes_available_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_dynbuf_v2_op(read_dynbuf_v2_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        bytes_available_(other.bytes_available_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size, pos;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, total_transferred_);\n        bytes_available_ = std::min<std::size_t>(\n              std::max<std::size_t>(512,\n                buffers_.capacity() - buffers_.size()),\n              std::min<std::size_t>(max_size,\n                buffers_.max_size() - buffers_.size()));\n        for (;;)\n        {\n          pos = buffers_.size();\n          buffers_.grow(bytes_available_);\n          stream_.async_read_some(buffers_.data(pos, bytes_available_),\n              ASIO_MOVE_CAST(read_dynbuf_v2_op)(*this));\n          return; default:\n          total_transferred_ += bytes_transferred;\n          buffers_.shrink(bytes_available_ - bytes_transferred);\n          max_size = this->check_for_completion(ec, total_transferred_);\n          bytes_available_ = std::min<std::size_t>(\n                std::max<std::size_t>(512,\n                  buffers_.capacity() - buffers_.size()),\n                std::min<std::size_t>(max_size,\n                  buffers_.max_size() - buffers_.size()));\n          if ((!ec && bytes_transferred == 0) || bytes_available_ == 0)\n            break;\n        }\n\n        handler_(ec, static_cast<const std::size_t&>(total_transferred_));\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    int start_;\n    std::size_t total_transferred_;\n    std::size_t bytes_available_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_dynbuf_v2_op<AsyncReadStream, DynamicBuffer_v2,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_dynbuf_v2_op<AsyncReadStream, DynamicBuffer_v2,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_dynbuf_v2_op<AsyncReadStream, DynamicBuffer_v2,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename CompletionCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_dynbuf_v2_op<AsyncReadStream, DynamicBuffer_v2,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename CompletionCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_dynbuf_v2_op<AsyncReadStream, DynamicBuffer_v2,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_dynbuf_v2\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_dynbuf_v2(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v2,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      read_dynbuf_v2_op<AsyncReadStream, typename decay<DynamicBuffer_v2>::type,\n        CompletionCondition, typename decay<ReadHandler>::type>(\n          stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n            completion_cond2.value, handler2.value)(\n              asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_dynbuf_v2_op<AsyncReadStream,\n      DynamicBuffer_v2, CompletionCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_dynbuf_v2_op<AsyncReadStream,\n        DynamicBuffer_v2, CompletionCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_dynbuf_v2_op<AsyncReadStream,\n      DynamicBuffer_v2, CompletionCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_dynbuf_v2_op<AsyncReadStream,\n        DynamicBuffer_v2, CompletionCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_read(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      transfer_all(), ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v2, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  // If you get an error on the following line it means that your handler does\n  // not meet the documented type requirements for a ReadHandler.\n  ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_dynbuf_v2<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_READ_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/read_at.hpp",
    "content": "//\n// impl/read_at.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_READ_AT_HPP\n#define ASIO_IMPL_READ_AT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <algorithm>\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/base_from_completion_cond.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/consuming_buffers.hpp\"\n#include \"asio/detail/dependent_type.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <typename SyncRandomAccessReadDevice, typename MutableBufferSequence,\n      typename MutableBufferIterator, typename CompletionCondition>\n  std::size_t read_at_buffer_sequence(SyncRandomAccessReadDevice& d,\n      uint64_t offset, const MutableBufferSequence& buffers,\n      const MutableBufferIterator&, CompletionCondition completion_condition,\n      asio::error_code& ec)\n  {\n    ec = asio::error_code();\n    asio::detail::consuming_buffers<mutable_buffer,\n        MutableBufferSequence, MutableBufferIterator> tmp(buffers);\n    while (!tmp.empty())\n    {\n      if (std::size_t max_size = detail::adapt_completion_condition_result(\n            completion_condition(ec, tmp.total_consumed())))\n      {\n        tmp.consume(d.read_some_at(offset + tmp.total_consumed(),\n              tmp.prepare(max_size), ec));\n      }\n      else\n        break;\n    }\n    return tmp.total_consumed();;\n  }\n} // namespace detail\n\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  return detail::read_at_buffer_sequence(d, offset, buffers,\n      asio::buffer_sequence_begin(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_at(\n      d, offset, buffers, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"read_at\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    asio::error_code& ec)\n{\n  return read_at(d, offset, buffers, transfer_all(), ec);\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence,\n    typename CompletionCondition>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_at(d, offset, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"read_at\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  ec = asio::error_code();\n  std::size_t total_transferred = 0;\n  std::size_t max_size = detail::adapt_completion_condition_result(\n        completion_condition(ec, total_transferred));\n  std::size_t bytes_available = read_size_helper(b, max_size);\n  while (bytes_available > 0)\n  {\n    std::size_t bytes_transferred = d.read_some_at(\n        offset + total_transferred, b.prepare(bytes_available), ec);\n    b.commit(bytes_transferred);\n    total_transferred += bytes_transferred;\n    max_size = detail::adapt_completion_condition_result(\n          completion_condition(ec, total_transferred));\n    bytes_available = read_size_helper(b, max_size);\n  }\n  return total_transferred;\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_at(\n      d, offset, b, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"read_at\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    asio::error_code& ec)\n{\n  return read_at(d, offset, b, transfer_all(), ec);\n}\n\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_at(d, offset, b,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"read_at\");\n  return bytes_transferred;\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\nnamespace detail\n{\n  template <typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  class read_at_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    read_at_op(AsyncRandomAccessReadDevice& device,\n        uint64_t offset, const MutableBufferSequence& buffers,\n        CompletionCondition& completion_condition, ReadHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        device_(device),\n        offset_(offset),\n        buffers_(buffers),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_at_op(const read_at_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        device_(other.device_),\n        offset_(other.offset_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_at_op(read_at_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        device_(other.device_),\n        offset_(other.offset_),\n        buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        do\n        {\n          device_.async_read_some_at(\n              offset_ + buffers_.total_consumed(), buffers_.prepare(max_size),\n              ASIO_MOVE_CAST(read_at_op)(*this));\n          return; default:\n          buffers_.consume(bytes_transferred);\n          if ((!ec && bytes_transferred == 0) || buffers_.empty())\n            break;\n          max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        } while (max_size > 0);\n\n        handler_(ec, buffers_.total_consumed());\n      }\n    }\n\n  //private:\n    typedef asio::detail::consuming_buffers<mutable_buffer,\n        MutableBufferSequence, MutableBufferIterator> buffers_type;\n\n    AsyncRandomAccessReadDevice& device_;\n    uint64_t offset_;\n    buffers_type buffers_;\n    int start_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n        MutableBufferIterator, CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice,\n      typename MutableBufferSequence, typename MutableBufferIterator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void start_read_at_buffer_sequence_op(AsyncRandomAccessReadDevice& d,\n      uint64_t offset, const MutableBufferSequence& buffers,\n      const MutableBufferIterator&, CompletionCondition& completion_condition,\n      ReadHandler& handler)\n  {\n    detail::read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n      MutableBufferIterator, CompletionCondition, ReadHandler>(\n        d, offset, buffers, completion_condition, handler)(\n          asio::error_code(), 0, 1);\n  }\n\n  template <typename AsyncRandomAccessReadDevice>\n  class initiate_async_read_at_buffer_sequence\n  {\n  public:\n    typedef typename AsyncRandomAccessReadDevice::executor_type executor_type;\n\n    explicit initiate_async_read_at_buffer_sequence(\n        AsyncRandomAccessReadDevice& device)\n      : device_(device)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return device_.get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        uint64_t offset, const MutableBufferSequence& buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      start_read_at_buffer_sequence_op(device_, offset, buffers,\n          asio::buffer_sequence_begin(buffers),\n          completion_cond2.value, handler2.value);\n    }\n\n  private:\n    AsyncRandomAccessReadDevice& device_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename MutableBufferSequence, typename MutableBufferIterator,\n    typename CompletionCondition, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n    MutableBufferIterator, CompletionCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_at_op<AsyncRandomAccessReadDevice,\n      MutableBufferSequence, MutableBufferIterator,\n      CompletionCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename MutableBufferSequence, typename MutableBufferIterator,\n    typename CompletionCondition, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_at_op<AsyncRandomAccessReadDevice, MutableBufferSequence,\n    MutableBufferIterator, CompletionCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_at_op<AsyncRandomAccessReadDevice,\n      MutableBufferSequence, MutableBufferIterator,\n      CompletionCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename MutableBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_at_buffer_sequence<\n        AsyncRandomAccessReadDevice>(d),\n      handler, offset, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncRandomAccessReadDevice, typename MutableBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_at_buffer_sequence<\n        AsyncRandomAccessReadDevice>(d),\n      handler, offset, buffers, transfer_all());\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\nnamespace detail\n{\n  template <typename AsyncRandomAccessReadDevice, typename Allocator,\n      typename CompletionCondition, typename ReadHandler>\n  class read_at_streambuf_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    read_at_streambuf_op(AsyncRandomAccessReadDevice& device,\n        uint64_t offset, basic_streambuf<Allocator>& streambuf,\n        CompletionCondition& completion_condition, ReadHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        device_(device),\n        offset_(offset),\n        streambuf_(streambuf),\n        start_(0),\n        total_transferred_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_at_streambuf_op(const read_at_streambuf_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        device_(other.device_),\n        offset_(other.offset_),\n        streambuf_(other.streambuf_),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_at_streambuf_op(read_at_streambuf_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        device_(other.device_),\n        offset_(other.offset_),\n        streambuf_(other.streambuf_),\n        start_(other.start_),\n        total_transferred_(other.total_transferred_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size, bytes_available;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, total_transferred_);\n        bytes_available = read_size_helper(streambuf_, max_size);\n        for (;;)\n        {\n          device_.async_read_some_at(offset_ + total_transferred_,\n              streambuf_.prepare(bytes_available),\n              ASIO_MOVE_CAST(read_at_streambuf_op)(*this));\n          return; default:\n          total_transferred_ += bytes_transferred;\n          streambuf_.commit(bytes_transferred);\n          max_size = this->check_for_completion(ec, total_transferred_);\n          bytes_available = read_size_helper(streambuf_, max_size);\n          if ((!ec && bytes_transferred == 0) || bytes_available == 0)\n            break;\n        }\n\n        handler_(ec, static_cast<const std::size_t&>(total_transferred_));\n      }\n    }\n\n  //private:\n    AsyncRandomAccessReadDevice& device_;\n    uint64_t offset_;\n    asio::basic_streambuf<Allocator>& streambuf_;\n    int start_;\n    std::size_t total_transferred_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncRandomAccessReadDevice, typename Allocator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice, typename Allocator,\n      typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice, typename Allocator,\n      typename CompletionCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessReadDevice,\n      typename Allocator, typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessReadDevice,\n      typename Allocator, typename CompletionCondition, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessReadDevice>\n  class initiate_async_read_at_streambuf\n  {\n  public:\n    typedef typename AsyncRandomAccessReadDevice::executor_type executor_type;\n\n    explicit initiate_async_read_at_streambuf(\n        AsyncRandomAccessReadDevice& device)\n      : device_(device)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return device_.get_executor();\n    }\n\n    template <typename ReadHandler,\n        typename Allocator, typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        uint64_t offset, basic_streambuf<Allocator>* b,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      read_at_streambuf_op<AsyncRandomAccessReadDevice, Allocator,\n        CompletionCondition, typename decay<ReadHandler>::type>(\n          device_, offset, *b, completion_cond2.value, handler2.value)(\n            asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncRandomAccessReadDevice& device_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessReadDevice, typename Allocator,\n    typename CompletionCondition, typename ReadHandler, typename Allocator1>\nstruct associated_allocator<\n    detail::read_at_streambuf_op<AsyncRandomAccessReadDevice,\n      Allocator, CompletionCondition, ReadHandler>,\n    Allocator1>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator1>::type type;\n\n  static type get(\n      const detail::read_at_streambuf_op<AsyncRandomAccessReadDevice,\n        Allocator, CompletionCondition, ReadHandler>& h,\n      const Allocator1& a = Allocator1()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator1>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncRandomAccessReadDevice, typename Executor,\n    typename CompletionCondition, typename ReadHandler, typename Executor1>\nstruct associated_executor<\n    detail::read_at_streambuf_op<AsyncRandomAccessReadDevice,\n      Executor, CompletionCondition, ReadHandler>,\n    Executor1>\n{\n  typedef typename associated_executor<ReadHandler, Executor1>::type type;\n\n  static type get(\n      const detail::read_at_streambuf_op<AsyncRandomAccessReadDevice,\n        Executor, CompletionCondition, ReadHandler>& h,\n      const Executor1& ex = Executor1()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor1>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_at_streambuf<AsyncRandomAccessReadDevice>(d),\n      handler, offset, &b,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncRandomAccessReadDevice, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_at_streambuf<AsyncRandomAccessReadDevice>(d),\n      handler, offset, &b, transfer_all());\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_READ_AT_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/read_until.hpp",
    "content": "//\n// impl/read_until.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_READ_UNTIL_HPP\n#define ASIO_IMPL_READ_UNTIL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include <algorithm>\n#include <string>\n#include <vector>\n#include <utility>\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/buffers_iterator.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/limits.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  // Algorithm that finds a subsequence of equal values in a sequence. Returns\n  // (iterator,true) if a full match was found, in which case the iterator\n  // points to the beginning of the match. Returns (iterator,false) if a\n  // partial match was found at the end of the first sequence, in which case\n  // the iterator points to the beginning of the partial match. Returns\n  // (last1,false) if no full or partial match was found.\n  template <typename Iterator1, typename Iterator2>\n  std::pair<Iterator1, bool> partial_search(\n      Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2)\n  {\n    for (Iterator1 iter1 = first1; iter1 != last1; ++iter1)\n    {\n      Iterator1 test_iter1 = iter1;\n      Iterator2 test_iter2 = first2;\n      for (;; ++test_iter1, ++test_iter2)\n      {\n        if (test_iter2 == last2)\n          return std::make_pair(iter1, true);\n        if (test_iter1 == last1)\n        {\n          if (test_iter2 != first2)\n            return std::make_pair(iter1, false);\n          else\n            break;\n        }\n        if (*test_iter1 != *test_iter2)\n          break;\n      }\n    }\n    return std::make_pair(last1, false);\n  }\n} // namespace detail\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\ninline std::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    char delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v1::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers = b.data();\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    iterator iter = std::find(start_pos, end, delim);\n    if (iter != end)\n    {\n      // Found a match. We're done.\n      ec = asio::error_code();\n      return iter - begin + 1;\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    b.commit(s.read_some(b.prepare(bytes_to_read), ec));\n    if (ec)\n      return 0;\n  }\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\ninline std::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v1::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers = b.data();\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    std::pair<iterator, bool> result = detail::partial_search(\n        start_pos, end, delim.begin(), delim.end());\n    if (result.first != end)\n    {\n      if (result.second)\n      {\n        // Full match. We're done.\n        ec = asio::error_code();\n        return result.first - begin + delim.length();\n      }\n      else\n      {\n        // Partial match. Next search needs to start from beginning of match.\n        search_position = result.first - begin;\n      }\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    b.commit(s.read_some(b.prepare(bytes_to_read), ec));\n    if (ec)\n      return 0;\n  }\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\ninline std::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), expr, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v1::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers = b.data();\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    boost::match_results<iterator,\n      typename std::vector<boost::sub_match<iterator> >::allocator_type>\n        match_results;\n    if (regex_search(start_pos, end, match_results, expr,\n          boost::match_default | boost::match_partial))\n    {\n      if (match_results[0].matched)\n      {\n        // Full match. We're done.\n        ec = asio::error_code();\n        return match_results[0].second - begin;\n      }\n      else\n      {\n        // Partial match. Next search needs to start from beginning of match.\n        search_position = match_results[0].first - begin;\n      }\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    b.commit(s.read_some(b.prepare(bytes_to_read), ec));\n    if (ec)\n      return 0;\n  }\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition>\ninline std::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      match_condition, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v1::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers = b.data();\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    std::pair<iterator, bool> result = match_condition(start_pos, end);\n    if (result.second)\n    {\n      // Full match. We're done.\n      ec = asio::error_code();\n      return result.first - begin;\n    }\n    else if (result.first != end)\n    {\n      // Partial match. Next search needs to start from beginning of match.\n      search_position = result.first - begin;\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    b.commit(s.read_some(b.prepare(bytes_to_read), ec));\n    if (ec)\n      return 0;\n  }\n}\n\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, char delim)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), delim);\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, char delim,\n    asio::error_code& ec)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), delim, ec);\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), delim);\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), delim, ec);\n}\n\n#if defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), expr);\n}\n\ntemplate <typename SyncReadStream, typename Allocator>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr,\n    asio::error_code& ec)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), expr, ec);\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream, typename Allocator, typename MatchCondition>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, MatchCondition match_condition,\n    typename enable_if<is_match_condition<MatchCondition>::value>::type*)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), match_condition);\n}\n\ntemplate <typename SyncReadStream, typename Allocator, typename MatchCondition>\ninline std::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<is_match_condition<MatchCondition>::value>::type*)\n{\n  return read_until(s, basic_streambuf_ref<Allocator>(b), match_condition, ec);\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\ninline std::size_t read_until(SyncReadStream& s,\n    DynamicBuffer_v2 buffers, char delim,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    char delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  DynamicBuffer_v2& b = buffers;\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v2::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers =\n      const_cast<const DynamicBuffer_v2&>(b).data(0, b.size());\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    iterator iter = std::find(start_pos, end, delim);\n    if (iter != end)\n    {\n      // Found a match. We're done.\n      ec = asio::error_code();\n      return iter - begin + 1;\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    std::size_t pos = b.size();\n    b.grow(bytes_to_read);\n    std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec);\n    b.shrink(bytes_to_read - bytes_transferred);\n    if (ec)\n      return 0;\n  }\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\ninline std::size_t read_until(SyncReadStream& s,\n    DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  DynamicBuffer_v2& b = buffers;\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v2::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers =\n      const_cast<const DynamicBuffer_v2&>(b).data(0, b.size());\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    std::pair<iterator, bool> result = detail::partial_search(\n        start_pos, end, delim.begin(), delim.end());\n    if (result.first != end)\n    {\n      if (result.second)\n      {\n        // Full match. We're done.\n        ec = asio::error_code();\n        return result.first - begin + delim.length();\n      }\n      else\n      {\n        // Partial match. Next search needs to start from beginning of match.\n        search_position = result.first - begin;\n      }\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    std::size_t pos = b.size();\n    b.grow(bytes_to_read);\n    std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec);\n    b.shrink(bytes_to_read - bytes_transferred);\n    if (ec)\n      return 0;\n  }\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\ninline std::size_t read_until(SyncReadStream& s,\n    DynamicBuffer_v2 buffers, const boost::regex& expr,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), expr, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    const boost::regex& expr, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  DynamicBuffer_v2& b = buffers;\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v2::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers =\n      const_cast<const DynamicBuffer_v2&>(b).data(0, b.size());\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    boost::match_results<iterator,\n      typename std::vector<boost::sub_match<iterator> >::allocator_type>\n        match_results;\n    if (regex_search(start_pos, end, match_results, expr,\n          boost::match_default | boost::match_partial))\n    {\n      if (match_results[0].matched)\n      {\n        // Full match. We're done.\n        ec = asio::error_code();\n        return match_results[0].second - begin;\n      }\n      else\n      {\n        // Partial match. Next search needs to start from beginning of match.\n        search_position = match_results[0].first - begin;\n      }\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    std::size_t pos = b.size();\n    b.grow(bytes_to_read);\n    std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec);\n    b.shrink(bytes_to_read - bytes_transferred);\n    if (ec)\n      return 0;\n  }\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition>\ninline std::size_t read_until(SyncReadStream& s,\n    DynamicBuffer_v2 buffers, MatchCondition match_condition,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = read_until(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      match_condition, ec);\n  asio::detail::throw_error(ec, \"read_until\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  DynamicBuffer_v2& b = buffers;\n\n  std::size_t search_position = 0;\n  for (;;)\n  {\n    // Determine the range of the data to be searched.\n    typedef typename DynamicBuffer_v2::const_buffers_type buffers_type;\n    typedef buffers_iterator<buffers_type> iterator;\n    buffers_type data_buffers =\n      const_cast<const DynamicBuffer_v2&>(b).data(0, b.size());\n    iterator begin = iterator::begin(data_buffers);\n    iterator start_pos = begin + search_position;\n    iterator end = iterator::end(data_buffers);\n\n    // Look for a match.\n    std::pair<iterator, bool> result = match_condition(start_pos, end);\n    if (result.second)\n    {\n      // Full match. We're done.\n      ec = asio::error_code();\n      return result.first - begin;\n    }\n    else if (result.first != end)\n    {\n      // Partial match. Next search needs to start from beginning of match.\n      search_position = result.first - begin;\n    }\n    else\n    {\n      // No match. Next search can start with the new data.\n      search_position = end - begin;\n    }\n\n    // Check if buffer is full.\n    if (b.size() == b.max_size())\n    {\n      ec = error::not_found;\n      return 0;\n    }\n\n    // Need more data.\n    std::size_t bytes_to_read = std::min<std::size_t>(\n          std::max<std::size_t>(512, b.capacity() - b.size()),\n          std::min<std::size_t>(65536, b.max_size() - b.size()));\n    std::size_t pos = b.size();\n    b.grow(bytes_to_read);\n    std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec);\n    b.shrink(bytes_to_read - bytes_transferred);\n    if (ec)\n      return 0;\n  }\n}\n\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  class read_until_delim_op_v1\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_delim_op_v1(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        char delim, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        delim_(delim),\n        start_(0),\n        search_position_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_delim_op_v1(const read_until_delim_op_v1& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_delim_op_v1(read_until_delim_op_v1&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t bytes_to_read;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v1::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers = buffers_.data();\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            iterator iter = std::find(start_pos, end, delim_);\n            if (iter != end)\n            {\n              // Found a match. We're done.\n              search_position_ = iter - begin + 1;\n              bytes_to_read = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              // Next search can start with the new data.\n              search_position_ = end - begin;\n              bytes_to_read = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read == 0)\n            break;\n\n          // Start a new asynchronous read op_v1eration to obtain more data.\n          stream_.async_read_some(buffers_.prepare(bytes_to_read),\n              ASIO_MOVE_CAST(read_until_delim_op_v1)(*this));\n          return; default:\n          buffers_.commit(bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    char delim_;\n    int start_;\n    std::size_t search_position_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_delim_v1\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_delim_v1(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v1>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n        char delim) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_delim_op_v1<AsyncReadStream,\n        typename decay<DynamicBuffer_v1>::type,\n          typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n            delim, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_delim_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_delim_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_delim_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    char delim, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_delim_v1<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim);\n}\n\nnamespace detail\n{\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  class read_until_delim_string_op_v1\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_delim_string_op_v1(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        const std::string& delim, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        delim_(delim),\n        start_(0),\n        search_position_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_delim_string_op_v1(const read_until_delim_string_op_v1& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_delim_string_op_v1(read_until_delim_string_op_v1&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        delim_(ASIO_MOVE_CAST(std::string)(other.delim_)),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t bytes_to_read;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v1::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers = buffers_.data();\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            std::pair<iterator, bool> result = detail::partial_search(\n                start_pos, end, delim_.begin(), delim_.end());\n            if (result.first != end && result.second)\n            {\n              // Full match. We're done.\n              search_position_ = result.first - begin + delim_.length();\n              bytes_to_read = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (result.first != end)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = result.first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read == 0)\n            break;\n\n          // Start a new asynchronous read op_v1eration to obtain more data.\n          stream_.async_read_some(buffers_.prepare(bytes_to_read),\n              ASIO_MOVE_CAST(read_until_delim_string_op_v1)(*this));\n          return; default:\n          buffers_.commit(bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    std::string delim_;\n    int start_;\n    std::size_t search_position_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_delim_string_v1\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_delim_string_v1(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v1>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n        const std::string& delim) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_delim_string_op_v1<AsyncReadStream,\n        typename decay<DynamicBuffer_v1>::type,\n          typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n            delim, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_delim_string_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_delim_string_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_delim_string_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_delim_string_v1<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      static_cast<std::string>(delim));\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename RegEx, typename ReadHandler>\n  class read_until_expr_op_v1\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_expr_op_v1(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        const boost::regex& expr, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        expr_(expr),\n        start_(0),\n        search_position_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_expr_op_v1(const read_until_expr_op_v1& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        expr_(other.expr_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_expr_op_v1(read_until_expr_op_v1&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        expr_(other.expr_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t bytes_to_read;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v1::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers = buffers_.data();\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            boost::match_results<iterator,\n              typename std::vector<boost::sub_match<iterator> >::allocator_type>\n                match_results;\n            bool match = regex_search(start_pos, end, match_results, expr_,\n                boost::match_default | boost::match_partial);\n            if (match && match_results[0].matched)\n            {\n              // Full match. We're done.\n              search_position_ = match_results[0].second - begin;\n              bytes_to_read = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (match)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = match_results[0].first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read == 0)\n            break;\n\n          // Start a new asynchronous read op_v1eration to obtain more data.\n          stream_.async_read_some(buffers_.prepare(bytes_to_read),\n              ASIO_MOVE_CAST(read_until_expr_op_v1)(*this));\n          return; default:\n          buffers_.commit(bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    RegEx expr_;\n    int start_;\n    std::size_t search_position_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename RegEx, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename RegEx, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename RegEx, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename RegEx, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename RegEx, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_expr_v1\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_expr_v1(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v1, typename RegEx>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const RegEx& expr) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_expr_op_v1<AsyncReadStream,\n        typename decay<DynamicBuffer_v1>::type,\n          RegEx, typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n            expr, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename RegEx, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_expr_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, RegEx, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename RegEx, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_expr_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, RegEx, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_expr_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, RegEx, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_expr_v1<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), expr);\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename MatchCondition, typename ReadHandler>\n  class read_until_match_op_v1\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_match_op_v1(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        MatchCondition match_condition, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        match_condition_(match_condition),\n        start_(0),\n        search_position_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_match_op_v1(const read_until_match_op_v1& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        match_condition_(other.match_condition_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_match_op_v1(read_until_match_op_v1&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        match_condition_(other.match_condition_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t bytes_to_read;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v1::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers = buffers_.data();\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            std::pair<iterator, bool> result = match_condition_(start_pos, end);\n            if (result.second)\n            {\n              // Full match. We're done.\n              search_position_ = result.first - begin;\n              bytes_to_read = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (result.first != end)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = result.first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read == 0)\n            break;\n\n          // Start a new asynchronous read op_v1eration to obtain more data.\n          stream_.async_read_some(buffers_.prepare(bytes_to_read),\n              ASIO_MOVE_CAST(read_until_match_op_v1)(*this));\n          return; default:\n          buffers_.commit(bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    MatchCondition match_condition_;\n    int start_;\n    std::size_t search_position_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename MatchCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_match_op_v1<AsyncReadStream, DynamicBuffer_v1,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename MatchCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_match_op_v1<AsyncReadStream, DynamicBuffer_v1,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v1,\n      typename MatchCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_match_op_v1<AsyncReadStream, DynamicBuffer_v1,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename MatchCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_match_op_v1<AsyncReadStream, DynamicBuffer_v1,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v1, typename MatchCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_match_op_v1<AsyncReadStream, DynamicBuffer_v1,\n      MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_match_v1\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_match_v1(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler,\n        typename DynamicBuffer_v1, typename MatchCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n        MatchCondition match_condition) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_match_op_v1<AsyncReadStream,\n        typename decay<DynamicBuffer_v1>::type,\n          MatchCondition, typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n            match_condition, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename MatchCondition, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_match_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, MatchCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_match_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, MatchCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    typename MatchCondition, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_match_op_v1<AsyncReadStream,\n      DynamicBuffer_v1, MatchCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_match_op_v1<AsyncReadStream,\n        DynamicBuffer_v1, MatchCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_match_v1<AsyncReadStream>(s), handler,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), match_condition);\n}\n\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    char delim, ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_read_until(s, basic_streambuf_ref<Allocator>(b),\n      delim, ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_read_until(s, basic_streambuf_ref<Allocator>(b),\n      delim, ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\n#if defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr,\n    ASIO_MOVE_ARG(ReadHandler) handler)\n{\n  return async_read_until(s, basic_streambuf_ref<Allocator>(b),\n      expr, ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\ntemplate <typename AsyncReadStream, typename Allocator, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<is_match_condition<MatchCondition>::value>::type*)\n{\n  return async_read_until(s, basic_streambuf_ref<Allocator>(b),\n      match_condition, ASIO_MOVE_CAST(ReadHandler)(handler));\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  class read_until_delim_op_v2\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_delim_op_v2(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        char delim, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        delim_(delim),\n        start_(0),\n        search_position_(0),\n        bytes_to_read_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_delim_op_v2(const read_until_delim_op_v2& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_delim_op_v2(read_until_delim_op_v2&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t pos;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v2::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers =\n              const_cast<const DynamicBuffer_v2&>(buffers_).data(\n                  0, buffers_.size());\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            iterator iter = std::find(start_pos, end, delim_);\n            if (iter != end)\n            {\n              // Found a match. We're done.\n              search_position_ = iter - begin + 1;\n              bytes_to_read_ = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read_ = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              // Next search can start with the new data.\n              search_position_ = end - begin;\n              bytes_to_read_ = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read_ == 0)\n            break;\n\n          // Start a new asynchronous read op_v2eration to obtain more data.\n          pos = buffers_.size();\n          buffers_.grow(bytes_to_read_);\n          stream_.async_read_some(buffers_.data(pos, bytes_to_read_),\n              ASIO_MOVE_CAST(read_until_delim_op_v2)(*this));\n          return; default:\n          buffers_.shrink(bytes_to_read_ - bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    char delim_;\n    int start_;\n    std::size_t search_position_;\n    std::size_t bytes_to_read_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_delim_v2\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_delim_v2(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v2>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, char delim) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_delim_op_v2<AsyncReadStream,\n        typename decay<DynamicBuffer_v2>::type,\n          typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n            delim, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_delim_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_delim_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_delim_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    char delim, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_delim_v2<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim);\n}\n\nnamespace detail\n{\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  class read_until_delim_string_op_v2\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_delim_string_op_v2(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        const std::string& delim, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        delim_(delim),\n        start_(0),\n        search_position_(0),\n        bytes_to_read_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_delim_string_op_v2(const read_until_delim_string_op_v2& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        delim_(other.delim_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_delim_string_op_v2(read_until_delim_string_op_v2&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        delim_(ASIO_MOVE_CAST(std::string)(other.delim_)),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t pos;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v2::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers =\n              const_cast<const DynamicBuffer_v2&>(buffers_).data(\n                  0, buffers_.size());\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            std::pair<iterator, bool> result = detail::partial_search(\n                start_pos, end, delim_.begin(), delim_.end());\n            if (result.first != end && result.second)\n            {\n              // Full match. We're done.\n              search_position_ = result.first - begin + delim_.length();\n              bytes_to_read_ = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read_ = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (result.first != end)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = result.first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read_ = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read_ == 0)\n            break;\n\n          // Start a new asynchronous read op_v2eration to obtain more data.\n          pos = buffers_.size();\n          buffers_.grow(bytes_to_read_);\n          stream_.async_read_some(buffers_.data(pos, bytes_to_read_),\n              ASIO_MOVE_CAST(read_until_delim_string_op_v2)(*this));\n          return; default:\n          buffers_.shrink(bytes_to_read_ - bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    std::string delim_;\n    int start_;\n    std::size_t search_position_;\n    std::size_t bytes_to_read_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_delim_string_v2\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_delim_string_v2(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v2>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers,\n        const std::string& delim) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_delim_string_op_v2<AsyncReadStream,\n        typename decay<DynamicBuffer_v2>::type,\n          typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n            delim, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_delim_string_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_delim_string_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_delim_string_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_delim_string_v2<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      static_cast<std::string>(delim));\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename RegEx, typename ReadHandler>\n  class read_until_expr_op_v2\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_expr_op_v2(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        const boost::regex& expr, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        expr_(expr),\n        start_(0),\n        search_position_(0),\n        bytes_to_read_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_expr_op_v2(const read_until_expr_op_v2& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        expr_(other.expr_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_expr_op_v2(read_until_expr_op_v2&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        expr_(other.expr_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t pos;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v2::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers =\n              const_cast<const DynamicBuffer_v2&>(buffers_).data(\n                  0, buffers_.size());\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            boost::match_results<iterator,\n              typename std::vector<boost::sub_match<iterator> >::allocator_type>\n                match_results;\n            bool match = regex_search(start_pos, end, match_results, expr_,\n                boost::match_default | boost::match_partial);\n            if (match && match_results[0].matched)\n            {\n              // Full match. We're done.\n              search_position_ = match_results[0].second - begin;\n              bytes_to_read_ = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read_ = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (match)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = match_results[0].first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read_ = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read_ == 0)\n            break;\n\n          // Start a new asynchronous read op_v2eration to obtain more data.\n          pos = buffers_.size();\n          buffers_.grow(bytes_to_read_);\n          stream_.async_read_some(buffers_.data(pos, bytes_to_read_),\n              ASIO_MOVE_CAST(read_until_expr_op_v2)(*this));\n          return; default:\n          buffers_.shrink(bytes_to_read_ - bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    RegEx expr_;\n    int start_;\n    std::size_t search_position_;\n    std::size_t bytes_to_read_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename RegEx, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename RegEx, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename RegEx, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename RegEx, typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename RegEx, typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_expr_v2\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_expr_v2(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler, typename DynamicBuffer_v2, typename RegEx>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers,\n        const RegEx& expr) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_expr_op_v2<AsyncReadStream,\n        typename decay<DynamicBuffer_v2>::type,\n          RegEx, typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n            expr, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename RegEx, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_expr_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, RegEx, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename RegEx, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_expr_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, RegEx, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_expr_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, RegEx, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_expr_v2<AsyncReadStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), expr);\n}\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n\nnamespace detail\n{\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename MatchCondition, typename ReadHandler>\n  class read_until_match_op_v2\n  {\n  public:\n    template <typename BufferSequence>\n    read_until_match_op_v2(AsyncReadStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        MatchCondition match_condition, ReadHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        match_condition_(match_condition),\n        start_(0),\n        search_position_(0),\n        bytes_to_read_(0),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    read_until_match_op_v2(const read_until_match_op_v2& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        match_condition_(other.match_condition_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(other.handler_)\n    {\n    }\n\n    read_until_match_op_v2(read_until_match_op_v2&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        match_condition_(other.match_condition_),\n        start_(other.start_),\n        search_position_(other.search_position_),\n        bytes_to_read_(other.bytes_to_read_),\n        handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      const std::size_t not_found = (std::numeric_limits<std::size_t>::max)();\n      std::size_t pos;\n      switch (start_ = start)\n      {\n      case 1:\n        for (;;)\n        {\n          {\n            // Determine the range of the data to be searched.\n            typedef typename DynamicBuffer_v2::const_buffers_type\n              buffers_type;\n            typedef buffers_iterator<buffers_type> iterator;\n            buffers_type data_buffers =\n              const_cast<const DynamicBuffer_v2&>(buffers_).data(\n                  0, buffers_.size());\n            iterator begin = iterator::begin(data_buffers);\n            iterator start_pos = begin + search_position_;\n            iterator end = iterator::end(data_buffers);\n\n            // Look for a match.\n            std::pair<iterator, bool> result = match_condition_(start_pos, end);\n            if (result.second)\n            {\n              // Full match. We're done.\n              search_position_ = result.first - begin;\n              bytes_to_read_ = 0;\n            }\n\n            // No match yet. Check if buffer is full.\n            else if (buffers_.size() == buffers_.max_size())\n            {\n              search_position_ = not_found;\n              bytes_to_read_ = 0;\n            }\n\n            // Need to read some more data.\n            else\n            {\n              if (result.first != end)\n              {\n                // Partial match. Next search needs to start from beginning of\n                // match.\n                search_position_ = result.first - begin;\n              }\n              else\n              {\n                // Next search can start with the new data.\n                search_position_ = end - begin;\n              }\n\n              bytes_to_read_ = std::min<std::size_t>(\n                    std::max<std::size_t>(512,\n                      buffers_.capacity() - buffers_.size()),\n                    std::min<std::size_t>(65536,\n                      buffers_.max_size() - buffers_.size()));\n            }\n          }\n\n          // Check if we're done.\n          if (!start && bytes_to_read_ == 0)\n            break;\n\n          // Start a new asynchronous read op_v2eration to obtain more data.\n          pos = buffers_.size();\n          buffers_.grow(bytes_to_read_);\n          stream_.async_read_some(buffers_.data(pos, bytes_to_read_),\n              ASIO_MOVE_CAST(read_until_match_op_v2)(*this));\n          return; default:\n          buffers_.shrink(bytes_to_read_ - bytes_transferred);\n          if (ec || bytes_transferred == 0)\n            break;\n        }\n\n        const asio::error_code result_ec =\n          (search_position_ == not_found)\n          ? error::not_found : ec;\n\n        const std::size_t result_n =\n          (ec || search_position_ == not_found)\n          ? 0 : search_position_;\n\n        handler_(result_ec, result_n);\n      }\n    }\n\n  //private:\n    AsyncReadStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    MatchCondition match_condition_;\n    int start_;\n    std::size_t search_position_;\n    std::size_t bytes_to_read_;\n    ReadHandler handler_;\n  };\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename MatchCondition, typename ReadHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      read_until_match_op_v2<AsyncReadStream, DynamicBuffer_v2,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename MatchCondition, typename ReadHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      read_until_match_op_v2<AsyncReadStream, DynamicBuffer_v2,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream, typename DynamicBuffer_v2,\n      typename MatchCondition, typename ReadHandler>\n  inline bool asio_handler_is_continuation(\n      read_until_match_op_v2<AsyncReadStream, DynamicBuffer_v2,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename MatchCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(Function& function,\n      read_until_match_op_v2<AsyncReadStream, DynamicBuffer_v2,\n        MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncReadStream,\n      typename DynamicBuffer_v2, typename MatchCondition,\n      typename ReadHandler>\n  inline void asio_handler_invoke(const Function& function,\n      read_until_match_op_v2<AsyncReadStream, DynamicBuffer_v2,\n      MatchCondition, ReadHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncReadStream>\n  class initiate_async_read_until_match_v2\n  {\n  public:\n    typedef typename AsyncReadStream::executor_type executor_type;\n\n    explicit initiate_async_read_until_match_v2(AsyncReadStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename ReadHandler,\n        typename DynamicBuffer_v2, typename MatchCondition>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers,\n        MatchCondition match_condition) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      non_const_lvalue<ReadHandler> handler2(handler);\n      read_until_match_op_v2<AsyncReadStream,\n        typename decay<DynamicBuffer_v2>::type,\n          MatchCondition, typename decay<ReadHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n            match_condition, handler2.value)(asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncReadStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename MatchCondition, typename ReadHandler, typename Allocator>\nstruct associated_allocator<\n    detail::read_until_match_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, MatchCondition, ReadHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<ReadHandler, Allocator>::type type;\n\n  static type get(\n      const detail::read_until_match_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, MatchCondition, ReadHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<ReadHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    typename MatchCondition, typename ReadHandler, typename Executor>\nstruct associated_executor<\n    detail::read_until_match_op_v2<AsyncReadStream,\n      DynamicBuffer_v2, MatchCondition, ReadHandler>,\n    Executor>\n{\n  typedef typename associated_executor<ReadHandler, Executor>::type type;\n\n  static type get(\n      const detail::read_until_match_op_v2<AsyncReadStream,\n        DynamicBuffer_v2, MatchCondition, ReadHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<ReadHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_initiate<ReadHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_read_until_match_v2<AsyncReadStream>(s), handler,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), match_condition);\n}\n\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_READ_UNTIL_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/redirect_error.hpp",
    "content": "\n// impl/redirect_error.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_REDIRECT_ERROR_HPP\n#define ASIO_IMPL_REDIRECT_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n#include \"asio/system_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n// Class to adapt a redirect_error_t as a completion handler.\ntemplate <typename Handler>\nclass redirect_error_handler\n{\npublic:\n  typedef void result_type;\n\n  template <typename CompletionToken>\n  redirect_error_handler(redirect_error_t<CompletionToken> e)\n    : ec_(e.ec_),\n      handler_(ASIO_MOVE_CAST(CompletionToken)(e.token_))\n  {\n  }\n\n  template <typename RedirectedHandler>\n  redirect_error_handler(asio::error_code& ec,\n      ASIO_MOVE_ARG(RedirectedHandler) h)\n    : ec_(ec),\n      handler_(ASIO_MOVE_CAST(RedirectedHandler)(h))\n  {\n  }\n\n  void operator()()\n  {\n    handler_();\n  }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Arg, typename... Args>\n  typename enable_if<\n    !is_same<typename decay<Arg>::type, asio::error_code>::value\n  >::type\n  operator()(ASIO_MOVE_ARG(Arg) arg, ASIO_MOVE_ARG(Args)... args)\n  {\n    handler_(ASIO_MOVE_CAST(Arg)(arg),\n        ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n  template <typename... Args>\n  void operator()(const asio::error_code& ec,\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    ec_ = ec;\n    handler_(ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Arg>\n  typename enable_if<\n    !is_same<typename decay<Arg>::type, asio::error_code>::value\n  >::type\n  operator()(ASIO_MOVE_ARG(Arg) arg)\n  {\n    handler_(ASIO_MOVE_CAST(Arg)(arg));\n  }\n\n  void operator()(const asio::error_code& ec)\n  {\n    ec_ = ec;\n    handler_();\n  }\n\n#define ASIO_PRIVATE_REDIRECT_ERROR_DEF(n) \\\n  template <typename Arg, ASIO_VARIADIC_TPARAMS(n)> \\\n  typename enable_if< \\\n    !is_same<typename decay<Arg>::type, asio::error_code>::value \\\n  >::type \\\n  operator()(ASIO_MOVE_ARG(Arg) arg, ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    handler_(ASIO_MOVE_CAST(Arg)(arg), \\\n        ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void operator()(const asio::error_code& ec, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    ec_ = ec; \\\n    handler_(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_REDIRECT_ERROR_DEF)\n#undef ASIO_PRIVATE_REDIRECT_ERROR_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n//private:\n  asio::error_code& ec_;\n  Handler handler_;\n};\n\ntemplate <typename Handler>\ninline void* asio_handler_allocate(std::size_t size,\n    redirect_error_handler<Handler>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Handler>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    redirect_error_handler<Handler>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Handler>\ninline bool asio_handler_is_continuation(\n    redirect_error_handler<Handler>* this_handler)\n{\n  return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler>\ninline void asio_handler_invoke(Function& function,\n    redirect_error_handler<Handler>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Handler>\ninline void asio_handler_invoke(const Function& function,\n    redirect_error_handler<Handler>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Signature>\nstruct redirect_error_signature\n{\n  typedef Signature type;\n};\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename R, typename... Args>\nstruct redirect_error_signature<R(asio::error_code, Args...)>\n{\n  typedef R type(Args...);\n};\n\ntemplate <typename R, typename... Args>\nstruct redirect_error_signature<R(const asio::error_code&, Args...)>\n{\n  typedef R type(Args...);\n};\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename R>\nstruct redirect_error_signature<R(asio::error_code)>\n{\n  typedef R type();\n};\n\ntemplate <typename R>\nstruct redirect_error_signature<R(const asio::error_code&)>\n{\n  typedef R type();\n};\n\n#define ASIO_PRIVATE_REDIRECT_ERROR_DEF(n) \\\n  template <typename R, ASIO_VARIADIC_TPARAMS(n)> \\\n  struct redirect_error_signature< \\\n      R(asio::error_code, ASIO_VARIADIC_TARGS(n))> \\\n  { \\\n    typedef R type(ASIO_VARIADIC_TARGS(n)); \\\n  }; \\\n  \\\n  template <typename R, ASIO_VARIADIC_TPARAMS(n)> \\\n  struct redirect_error_signature< \\\n      R(const asio::error_code&, ASIO_VARIADIC_TARGS(n))> \\\n  { \\\n    typedef R type(ASIO_VARIADIC_TARGS(n)); \\\n  }; \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_REDIRECT_ERROR_DEF)\n#undef ASIO_PRIVATE_REDIRECT_ERROR_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename CompletionToken, typename Signature>\nstruct async_result<redirect_error_t<CompletionToken>, Signature>\n{\n  typedef typename async_result<CompletionToken,\n    typename detail::redirect_error_signature<Signature>::type>\n      ::return_type return_type;\n\n  template <typename Initiation>\n  struct init_wrapper\n  {\n    template <typename Init>\n    init_wrapper(asio::error_code& ec, ASIO_MOVE_ARG(Init) init)\n      : ec_(ec),\n        initiation_(ASIO_MOVE_CAST(Init)(init))\n    {\n    }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    template <typename Handler, typename... Args>\n    void operator()(\n        ASIO_MOVE_ARG(Handler) handler,\n        ASIO_MOVE_ARG(Args)... args)\n    {\n      ASIO_MOVE_CAST(Initiation)(initiation_)(\n          detail::redirect_error_handler<\n            typename decay<Handler>::type>(\n              ec_, ASIO_MOVE_CAST(Handler)(handler)),\n          ASIO_MOVE_CAST(Args)(args)...);\n    }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    template <typename Handler>\n    void operator()(\n        ASIO_MOVE_ARG(Handler) handler)\n    {\n      ASIO_MOVE_CAST(Initiation)(initiation_)(\n          detail::redirect_error_handler<\n            typename decay<Handler>::type>(\n              ec_, ASIO_MOVE_CAST(Handler)(handler)));\n    }\n\n#define ASIO_PRIVATE_INIT_WRAPPER_DEF(n) \\\n    template <typename Handler, ASIO_VARIADIC_TPARAMS(n)> \\\n    void operator()( \\\n        ASIO_MOVE_ARG(Handler) handler, \\\n        ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n    { \\\n      ASIO_MOVE_CAST(Initiation)(initiation_)( \\\n          detail::redirect_error_handler< \\\n            typename decay<Handler>::type>( \\\n              ec_, ASIO_MOVE_CAST(Handler)(handler)), \\\n          ASIO_VARIADIC_MOVE_ARGS(n)); \\\n    } \\\n    /**/\n    ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INIT_WRAPPER_DEF)\n#undef ASIO_PRIVATE_INIT_WRAPPER_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n    asio::error_code& ec_;\n    Initiation initiation_;\n  };\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation, typename RawCompletionToken, typename... Args>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken) token,\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    return async_initiate<CompletionToken,\n      typename detail::redirect_error_signature<Signature>::type>(\n        init_wrapper<typename decay<Initiation>::type>(\n          token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)),\n        token.token_, ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename Initiation, typename RawCompletionToken>\n  static return_type initiate(\n      ASIO_MOVE_ARG(Initiation) initiation,\n      ASIO_MOVE_ARG(RawCompletionToken) token)\n  {\n    return async_initiate<CompletionToken,\n      typename detail::redirect_error_signature<Signature>::type>(\n        init_wrapper<typename decay<Initiation>::type>(\n          token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)),\n        token.token_);\n  }\n\n#define ASIO_PRIVATE_INITIATE_DEF(n) \\\n  template <typename Initiation, typename RawCompletionToken, \\\n      ASIO_VARIADIC_TPARAMS(n)> \\\n  static return_type initiate( \\\n      ASIO_MOVE_ARG(Initiation) initiation, \\\n      ASIO_MOVE_ARG(RawCompletionToken) token, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    return async_initiate<CompletionToken, \\\n      typename detail::redirect_error_signature<Signature>::type>( \\\n        init_wrapper<typename decay<Initiation>::type>( \\\n          token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)), \\\n        token.token_, ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF)\n#undef ASIO_PRIVATE_INITIATE_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n};\n\ntemplate <typename Handler, typename Executor>\nstruct associated_executor<detail::redirect_error_handler<Handler>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(\n      const detail::redirect_error_handler<Handler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\ntemplate <typename Handler, typename Allocator>\nstruct associated_allocator<detail::redirect_error_handler<Handler>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(\n      const detail::redirect_error_handler<Handler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_REDIRECT_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/serial_port_base.hpp",
    "content": "//\n// impl/serial_port_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SERIAL_PORT_BASE_HPP\n#define ASIO_IMPL_SERIAL_PORT_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ninline serial_port_base::baud_rate::baud_rate(unsigned int rate)\n  : value_(rate)\n{\n}\n\ninline unsigned int serial_port_base::baud_rate::value() const\n{\n  return value_;\n}\n\ninline serial_port_base::flow_control::type\nserial_port_base::flow_control::value() const\n{\n  return value_;\n}\n\ninline serial_port_base::parity::type serial_port_base::parity::value() const\n{\n  return value_;\n}\n\ninline serial_port_base::stop_bits::type\nserial_port_base::stop_bits::value() const\n{\n  return value_;\n}\n\ninline unsigned int serial_port_base::character_size::value() const\n{\n  return value_;\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_SERIAL_PORT_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/serial_port_base.ipp",
    "content": "//\n// impl/serial_port_base.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SERIAL_PORT_BASE_IPP\n#define ASIO_IMPL_SERIAL_PORT_BASE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT)\n\n#include <stdexcept>\n#include \"asio/error.hpp\"\n#include \"asio/serial_port_base.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n\n#if defined(GENERATING_DOCUMENTATION)\n# define ASIO_OPTION_STORAGE implementation_defined\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# define ASIO_OPTION_STORAGE DCB\n#else\n# define ASIO_OPTION_STORAGE termios\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nASIO_SYNC_OP_VOID serial_port_base::baud_rate::store(\n    ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  storage.BaudRate = value_;\n#else\n  speed_t baud;\n  switch (value_)\n  {\n  // Do POSIX-specified rates first.\n  case 0: baud = B0; break;\n  case 50: baud = B50; break;\n  case 75: baud = B75; break;\n  case 110: baud = B110; break;\n  case 134: baud = B134; break;\n  case 150: baud = B150; break;\n  case 200: baud = B200; break;\n  case 300: baud = B300; break;\n  case 600: baud = B600; break;\n  case 1200: baud = B1200; break;\n  case 1800: baud = B1800; break;\n  case 2400: baud = B2400; break;\n  case 4800: baud = B4800; break;\n  case 9600: baud = B9600; break;\n  case 19200: baud = B19200; break;\n  case 38400: baud = B38400; break;\n  // And now the extended ones conditionally.\n# ifdef B7200\n  case 7200: baud = B7200; break;\n# endif\n# ifdef B14400\n  case 14400: baud = B14400; break;\n# endif\n# ifdef B57600\n  case 57600: baud = B57600; break;\n# endif\n# ifdef B115200\n  case 115200: baud = B115200; break;\n# endif\n# ifdef B230400\n  case 230400: baud = B230400; break;\n# endif\n# ifdef B460800\n  case 460800: baud = B460800; break;\n# endif\n# ifdef B500000\n  case 500000: baud = B500000; break;\n# endif\n# ifdef B576000\n  case 576000: baud = B576000; break;\n# endif\n# ifdef B921600\n  case 921600: baud = B921600; break;\n# endif\n# ifdef B1000000\n  case 1000000: baud = B1000000; break;\n# endif\n# ifdef B1152000\n  case 1152000: baud = B1152000; break;\n# endif\n# ifdef B2000000\n  case 2000000: baud = B2000000; break;\n# endif\n# ifdef B3000000\n  case 3000000: baud = B3000000; break;\n# endif\n# ifdef B3500000\n  case 3500000: baud = B3500000; break;\n# endif\n# ifdef B4000000\n  case 4000000: baud = B4000000; break;\n# endif\n  default:\n    ec = asio::error::invalid_argument;\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n# if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n  ::cfsetspeed(&storage, baud);\n# else\n  ::cfsetispeed(&storage, baud);\n  ::cfsetospeed(&storage, baud);\n# endif\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID serial_port_base::baud_rate::load(\n    const ASIO_OPTION_STORAGE& storage, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  value_ = storage.BaudRate;\n#else\n  speed_t baud = ::cfgetospeed(&storage);\n  switch (baud)\n  {\n  // First do those specified by POSIX.\n  case B0: value_ = 0; break;\n  case B50: value_ = 50; break;\n  case B75: value_ = 75; break;\n  case B110: value_ = 110; break;\n  case B134: value_ = 134; break;\n  case B150: value_ = 150; break;\n  case B200: value_ = 200; break;\n  case B300: value_ = 300; break;\n  case B600: value_ = 600; break;\n  case B1200: value_ = 1200; break;\n  case B1800: value_ = 1800; break;\n  case B2400: value_ = 2400; break;\n  case B4800: value_ = 4800; break;\n  case B9600: value_ = 9600; break;\n  case B19200: value_ = 19200; break;\n  case B38400: value_ = 38400; break;\n  // Now conditionally handle a bunch of extended rates.\n# ifdef B7200\n  case B7200: value_ = 7200; break;\n# endif\n# ifdef B14400\n  case B14400: value_ = 14400; break;\n# endif\n# ifdef B57600\n  case B57600: value_ = 57600; break;\n# endif\n# ifdef B115200\n  case B115200: value_ = 115200; break;\n# endif\n# ifdef B230400\n  case B230400: value_ = 230400; break;\n# endif\n# ifdef B460800\n  case B460800: value_ = 460800; break;\n# endif\n# ifdef B500000\n  case B500000: value_ = 500000; break;\n# endif\n# ifdef B576000\n  case B576000: value_ = 576000; break;\n# endif\n# ifdef B921600\n  case B921600: value_ = 921600; break;\n# endif\n# ifdef B1000000\n  case B1000000: value_ = 1000000; break;\n# endif\n# ifdef B1152000\n  case B1152000: value_ = 1152000; break;\n# endif\n# ifdef B2000000\n  case B2000000: value_ = 2000000; break;\n# endif\n# ifdef B3000000\n  case B3000000: value_ = 3000000; break;\n# endif\n# ifdef B3500000\n  case B3500000: value_ = 3500000; break;\n# endif\n# ifdef B4000000\n  case B4000000: value_ = 4000000; break;\n# endif\n  default:\n    value_ = 0;\n    ec = asio::error::invalid_argument;\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nserial_port_base::flow_control::flow_control(\n    serial_port_base::flow_control::type t)\n  : value_(t)\n{\n  if (t != none && t != software && t != hardware)\n  {\n    std::out_of_range ex(\"invalid flow_control value\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nASIO_SYNC_OP_VOID serial_port_base::flow_control::store(\n    ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  storage.fOutxCtsFlow = FALSE;\n  storage.fOutxDsrFlow = FALSE;\n  storage.fTXContinueOnXoff = TRUE;\n  storage.fDtrControl = DTR_CONTROL_ENABLE;\n  storage.fDsrSensitivity = FALSE;\n  storage.fOutX = FALSE;\n  storage.fInX = FALSE;\n  storage.fRtsControl = RTS_CONTROL_ENABLE;\n  switch (value_)\n  {\n  case none:\n    break;\n  case software:\n    storage.fOutX = TRUE;\n    storage.fInX = TRUE;\n    break;\n  case hardware:\n    storage.fOutxCtsFlow = TRUE;\n    storage.fRtsControl = RTS_CONTROL_HANDSHAKE;\n    break;\n  default:\n    break;\n  }\n#else\n  switch (value_)\n  {\n  case none:\n    storage.c_iflag &= ~(IXOFF | IXON);\n# if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n    storage.c_cflag &= ~CRTSCTS;\n# elif defined(__QNXNTO__)\n    storage.c_cflag &= ~(IHFLOW | OHFLOW);\n# endif\n    break;\n  case software:\n    storage.c_iflag |= IXOFF | IXON;\n# if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n    storage.c_cflag &= ~CRTSCTS;\n# elif defined(__QNXNTO__)\n    storage.c_cflag &= ~(IHFLOW | OHFLOW);\n# endif\n    break;\n  case hardware:\n# if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n    storage.c_iflag &= ~(IXOFF | IXON);\n    storage.c_cflag |= CRTSCTS;\n    break;\n# elif defined(__QNXNTO__)\n    storage.c_iflag &= ~(IXOFF | IXON);\n    storage.c_cflag |= (IHFLOW | OHFLOW);\n    break;\n# else\n    ec = asio::error::operation_not_supported;\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n# endif\n  default:\n    break;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID serial_port_base::flow_control::load(\n    const ASIO_OPTION_STORAGE& storage, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (storage.fOutX && storage.fInX)\n  {\n    value_ = software;\n  }\n  else if (storage.fOutxCtsFlow && storage.fRtsControl == RTS_CONTROL_HANDSHAKE)\n  {\n    value_ = hardware;\n  }\n  else\n  {\n    value_ = none;\n  }\n#else\n  if (storage.c_iflag & (IXOFF | IXON))\n  {\n    value_ = software;\n  }\n# if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)\n  else if (storage.c_cflag & CRTSCTS)\n  {\n    value_ = hardware;\n  }\n# elif defined(__QNXNTO__)\n  else if (storage.c_cflag & IHFLOW && storage.c_cflag & OHFLOW)\n  {\n    value_ = hardware;\n  }\n# endif\n  else\n  {\n    value_ = none;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nserial_port_base::parity::parity(serial_port_base::parity::type t)\n  : value_(t)\n{\n  if (t != none && t != odd && t != even)\n  {\n    std::out_of_range ex(\"invalid parity value\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nASIO_SYNC_OP_VOID serial_port_base::parity::store(\n    ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  switch (value_)\n  {\n  case none:\n    storage.fParity = FALSE;\n    storage.Parity = NOPARITY;\n    break;\n  case odd:\n    storage.fParity = TRUE;\n    storage.Parity = ODDPARITY;\n    break;\n  case even:\n    storage.fParity = TRUE;\n    storage.Parity = EVENPARITY;\n    break;\n  default:\n    break;\n  }\n#else\n  switch (value_)\n  {\n  case none:\n    storage.c_iflag |= IGNPAR;\n    storage.c_cflag &= ~(PARENB | PARODD);\n    break;\n  case even:\n    storage.c_iflag &= ~(IGNPAR | PARMRK);\n    storage.c_iflag |= INPCK;\n    storage.c_cflag |= PARENB;\n    storage.c_cflag &= ~PARODD;\n    break;\n  case odd:\n    storage.c_iflag &= ~(IGNPAR | PARMRK);\n    storage.c_iflag |= INPCK;\n    storage.c_cflag |= (PARENB | PARODD);\n    break;\n  default:\n    break;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID serial_port_base::parity::load(\n    const ASIO_OPTION_STORAGE& storage, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (storage.Parity == EVENPARITY)\n  {\n    value_ = even;\n  }\n  else if (storage.Parity == ODDPARITY)\n  {\n    value_ = odd;\n  }\n  else\n  {\n    value_ = none;\n  }\n#else\n  if (storage.c_cflag & PARENB)\n  {\n    if (storage.c_cflag & PARODD)\n    {\n      value_ = odd;\n    }\n    else\n    {\n      value_ = even;\n    }\n  }\n  else\n  {\n    value_ = none;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nserial_port_base::stop_bits::stop_bits(\n    serial_port_base::stop_bits::type t)\n  : value_(t)\n{\n  if (t != one && t != onepointfive && t != two)\n  {\n    std::out_of_range ex(\"invalid stop_bits value\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nASIO_SYNC_OP_VOID serial_port_base::stop_bits::store(\n    ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  switch (value_)\n  {\n  case one:\n    storage.StopBits = ONESTOPBIT;\n    break;\n  case onepointfive:\n    storage.StopBits = ONE5STOPBITS;\n    break;\n  case two:\n    storage.StopBits = TWOSTOPBITS;\n    break;\n  default:\n    break;\n  }\n#else\n  switch (value_)\n  {\n  case one:\n    storage.c_cflag &= ~CSTOPB;\n    break;\n  case two:\n    storage.c_cflag |= CSTOPB;\n    break;\n  default:\n    ec = asio::error::operation_not_supported;\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID serial_port_base::stop_bits::load(\n    const ASIO_OPTION_STORAGE& storage, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  if (storage.StopBits == ONESTOPBIT)\n  {\n    value_ = one;\n  }\n  else if (storage.StopBits == ONE5STOPBITS)\n  {\n    value_ = onepointfive;\n  }\n  else if (storage.StopBits == TWOSTOPBITS)\n  {\n    value_ = two;\n  }\n  else\n  {\n    value_ = one;\n  }\n#else\n  value_ = (storage.c_cflag & CSTOPB) ? two : one;\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nserial_port_base::character_size::character_size(unsigned int t)\n  : value_(t)\n{\n  if (t < 5 || t > 8)\n  {\n    std::out_of_range ex(\"invalid character_size value\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nASIO_SYNC_OP_VOID serial_port_base::character_size::store(\n    ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  storage.ByteSize = value_;\n#else\n  storage.c_cflag &= ~CSIZE;\n  switch (value_)\n  {\n  case 5: storage.c_cflag |= CS5; break;\n  case 6: storage.c_cflag |= CS6; break;\n  case 7: storage.c_cflag |= CS7; break;\n  case 8: storage.c_cflag |= CS8; break;\n  default: break;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID serial_port_base::character_size::load(\n    const ASIO_OPTION_STORAGE& storage, asio::error_code& ec)\n{\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  value_ = storage.ByteSize;\n#else\n  if ((storage.c_cflag & CSIZE) == CS5) { value_ = 5; }\n  else if ((storage.c_cflag & CSIZE) == CS6) { value_ = 6; }\n  else if ((storage.c_cflag & CSIZE) == CS7) { value_ = 7; }\n  else if ((storage.c_cflag & CSIZE) == CS8) { value_ = 8; }\n  else\n  {\n    // Hmmm, use 8 for now.\n    value_ = 8;\n  }\n#endif\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#undef ASIO_OPTION_STORAGE\n\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n\n#endif // ASIO_IMPL_SERIAL_PORT_BASE_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/spawn.hpp",
    "content": "//\n// impl/spawn.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SPAWN_HPP\n#define ASIO_IMPL_SPAWN_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/bind_executor.hpp\"\n#include \"asio/detail/atomic_count.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/system_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n  template <typename Handler, typename T>\n  class coro_handler\n  {\n  public:\n    coro_handler(basic_yield_context<Handler> ctx)\n      : coro_(ctx.coro_.lock()),\n        ca_(ctx.ca_),\n        handler_(ctx.handler_),\n        ready_(0),\n        ec_(ctx.ec_),\n        value_(0)\n    {\n    }\n\n    void operator()(T value)\n    {\n      *ec_ = asio::error_code();\n      *value_ = ASIO_MOVE_CAST(T)(value);\n      if (--*ready_ == 0)\n        (*coro_)();\n    }\n\n    void operator()(asio::error_code ec, T value)\n    {\n      *ec_ = ec;\n      *value_ = ASIO_MOVE_CAST(T)(value);\n      if (--*ready_ == 0)\n        (*coro_)();\n    }\n\n  //private:\n    shared_ptr<typename basic_yield_context<Handler>::callee_type> coro_;\n    typename basic_yield_context<Handler>::caller_type& ca_;\n    Handler handler_;\n    atomic_count* ready_;\n    asio::error_code* ec_;\n    T* value_;\n  };\n\n  template <typename Handler>\n  class coro_handler<Handler, void>\n  {\n  public:\n    coro_handler(basic_yield_context<Handler> ctx)\n      : coro_(ctx.coro_.lock()),\n        ca_(ctx.ca_),\n        handler_(ctx.handler_),\n        ready_(0),\n        ec_(ctx.ec_)\n    {\n    }\n\n    void operator()()\n    {\n      *ec_ = asio::error_code();\n      if (--*ready_ == 0)\n        (*coro_)();\n    }\n\n    void operator()(asio::error_code ec)\n    {\n      *ec_ = ec;\n      if (--*ready_ == 0)\n        (*coro_)();\n    }\n\n  //private:\n    shared_ptr<typename basic_yield_context<Handler>::callee_type> coro_;\n    typename basic_yield_context<Handler>::caller_type& ca_;\n    Handler handler_;\n    atomic_count* ready_;\n    asio::error_code* ec_;\n  };\n\n  template <typename Handler, typename T>\n  inline void* asio_handler_allocate(std::size_t size,\n      coro_handler<Handler, T>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename Handler, typename T>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      coro_handler<Handler, T>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename Handler, typename T>\n  inline bool asio_handler_is_continuation(coro_handler<Handler, T>*)\n  {\n    return true;\n  }\n\n  template <typename Function, typename Handler, typename T>\n  inline void asio_handler_invoke(Function& function,\n      coro_handler<Handler, T>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename Handler, typename T>\n  inline void asio_handler_invoke(const Function& function,\n      coro_handler<Handler, T>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Handler, typename T>\n  class coro_async_result\n  {\n  public:\n    typedef coro_handler<Handler, T> completion_handler_type;\n    typedef T return_type;\n\n    explicit coro_async_result(completion_handler_type& h)\n      : handler_(h),\n        ca_(h.ca_),\n        ready_(2)\n    {\n      h.ready_ = &ready_;\n      out_ec_ = h.ec_;\n      if (!out_ec_) h.ec_ = &ec_;\n      h.value_ = &value_;\n    }\n\n    return_type get()\n    {\n      // Must not hold shared_ptr to coro while suspended.\n      handler_.coro_.reset();\n\n      if (--ready_ != 0)\n        ca_();\n      if (!out_ec_ && ec_) throw asio::system_error(ec_);\n      return ASIO_MOVE_CAST(return_type)(value_);\n    }\n\n  private:\n    completion_handler_type& handler_;\n    typename basic_yield_context<Handler>::caller_type& ca_;\n    atomic_count ready_;\n    asio::error_code* out_ec_;\n    asio::error_code ec_;\n    return_type value_;\n  };\n\n  template <typename Handler>\n  class coro_async_result<Handler, void>\n  {\n  public:\n    typedef coro_handler<Handler, void> completion_handler_type;\n    typedef void return_type;\n\n    explicit coro_async_result(completion_handler_type& h)\n      : handler_(h),\n        ca_(h.ca_),\n        ready_(2)\n    {\n      h.ready_ = &ready_;\n      out_ec_ = h.ec_;\n      if (!out_ec_) h.ec_ = &ec_;\n    }\n\n    void get()\n    {\n      // Must not hold shared_ptr to coro while suspended.\n      handler_.coro_.reset();\n\n      if (--ready_ != 0)\n        ca_();\n      if (!out_ec_ && ec_) throw asio::system_error(ec_);\n    }\n\n  private:\n    completion_handler_type& handler_;\n    typename basic_yield_context<Handler>::caller_type& ca_;\n    atomic_count ready_;\n    asio::error_code* out_ec_;\n    asio::error_code ec_;\n  };\n\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Handler, typename ReturnType>\nclass async_result<basic_yield_context<Handler>, ReturnType()>\n  : public detail::coro_async_result<Handler, void>\n{\npublic:\n  explicit async_result(\n    typename detail::coro_async_result<Handler,\n      void>::completion_handler_type& h)\n    : detail::coro_async_result<Handler, void>(h)\n  {\n  }\n};\n\ntemplate <typename Handler, typename ReturnType, typename Arg1>\nclass async_result<basic_yield_context<Handler>, ReturnType(Arg1)>\n  : public detail::coro_async_result<Handler, typename decay<Arg1>::type>\n{\npublic:\n  explicit async_result(\n    typename detail::coro_async_result<Handler,\n      typename decay<Arg1>::type>::completion_handler_type& h)\n    : detail::coro_async_result<Handler, typename decay<Arg1>::type>(h)\n  {\n  }\n};\n\ntemplate <typename Handler, typename ReturnType>\nclass async_result<basic_yield_context<Handler>,\n    ReturnType(asio::error_code)>\n  : public detail::coro_async_result<Handler, void>\n{\npublic:\n  explicit async_result(\n    typename detail::coro_async_result<Handler,\n      void>::completion_handler_type& h)\n    : detail::coro_async_result<Handler, void>(h)\n  {\n  }\n};\n\ntemplate <typename Handler, typename ReturnType, typename Arg2>\nclass async_result<basic_yield_context<Handler>,\n    ReturnType(asio::error_code, Arg2)>\n  : public detail::coro_async_result<Handler, typename decay<Arg2>::type>\n{\npublic:\n  explicit async_result(\n    typename detail::coro_async_result<Handler,\n      typename decay<Arg2>::type>::completion_handler_type& h)\n    : detail::coro_async_result<Handler, typename decay<Arg2>::type>(h)\n  {\n  }\n};\n\ntemplate <typename Handler, typename T, typename Allocator>\nstruct associated_allocator<detail::coro_handler<Handler, T>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const detail::coro_handler<Handler, T>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Handler, typename T, typename Executor>\nstruct associated_executor<detail::coro_handler<Handler, T>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const detail::coro_handler<Handler, T>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\nnamespace detail {\n\n  template <typename Handler, typename Function>\n  struct spawn_data : private noncopyable\n  {\n    template <typename Hand, typename Func>\n    spawn_data(ASIO_MOVE_ARG(Hand) handler,\n        bool call_handler, ASIO_MOVE_ARG(Func) function)\n      : handler_(ASIO_MOVE_CAST(Hand)(handler)),\n        call_handler_(call_handler),\n        function_(ASIO_MOVE_CAST(Func)(function))\n    {\n    }\n\n    weak_ptr<typename basic_yield_context<Handler>::callee_type> coro_;\n    Handler handler_;\n    bool call_handler_;\n    Function function_;\n  };\n\n  template <typename Handler, typename Function>\n  struct coro_entry_point\n  {\n    void operator()(typename basic_yield_context<Handler>::caller_type& ca)\n    {\n      shared_ptr<spawn_data<Handler, Function> > data(data_);\n#if !defined(BOOST_COROUTINES_UNIDIRECT) && !defined(BOOST_COROUTINES_V2)\n      ca(); // Yield until coroutine pointer has been initialised.\n#endif // !defined(BOOST_COROUTINES_UNIDIRECT) && !defined(BOOST_COROUTINES_V2)\n      const basic_yield_context<Handler> yield(\n          data->coro_, ca, data->handler_);\n\n      (data->function_)(yield);\n      if (data->call_handler_)\n        (data->handler_)();\n    }\n\n    shared_ptr<spawn_data<Handler, Function> > data_;\n  };\n\n  template <typename Handler, typename Function>\n  struct spawn_helper\n  {\n    void operator()()\n    {\n      typedef typename basic_yield_context<Handler>::callee_type callee_type;\n      coro_entry_point<Handler, Function> entry_point = { data_ };\n      shared_ptr<callee_type> coro(new callee_type(entry_point, attributes_));\n      data_->coro_ = coro;\n      (*coro)();\n    }\n\n    shared_ptr<spawn_data<Handler, Function> > data_;\n    boost::coroutines::attributes attributes_;\n  };\n\n  template <typename Function, typename Handler, typename Function1>\n  inline void asio_handler_invoke(Function& function,\n      spawn_helper<Handler, Function1>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->data_->handler_);\n  }\n\n  template <typename Function, typename Handler, typename Function1>\n  inline void asio_handler_invoke(const Function& function,\n      spawn_helper<Handler, Function1>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->data_->handler_);\n  }\n\n  inline void default_spawn_handler() {}\n\n} // namespace detail\n\ntemplate <typename Function>\ninline void spawn(ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes)\n{\n  typedef typename decay<Function>::type function_type;\n\n  typename associated_executor<function_type>::type ex(\n      (get_associated_executor)(function));\n\n  asio::spawn(ex, ASIO_MOVE_CAST(Function)(function), attributes);\n}\n\ntemplate <typename Handler, typename Function>\nvoid spawn(ASIO_MOVE_ARG(Handler) handler,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes,\n    typename enable_if<!is_executor<typename decay<Handler>::type>::value &&\n      !is_convertible<Handler&, execution_context&>::value>::type*)\n{\n  typedef typename decay<Handler>::type handler_type;\n  typedef typename decay<Function>::type function_type;\n\n  typename associated_executor<handler_type>::type ex(\n      (get_associated_executor)(handler));\n\n  typename associated_allocator<handler_type>::type a(\n      (get_associated_allocator)(handler));\n\n  detail::spawn_helper<handler_type, function_type> helper;\n  helper.data_.reset(\n      new detail::spawn_data<handler_type, function_type>(\n        ASIO_MOVE_CAST(Handler)(handler), true,\n        ASIO_MOVE_CAST(Function)(function)));\n  helper.attributes_ = attributes;\n\n  ex.dispatch(helper, a);\n}\n\ntemplate <typename Handler, typename Function>\nvoid spawn(basic_yield_context<Handler> ctx,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes)\n{\n  typedef typename decay<Function>::type function_type;\n\n  Handler handler(ctx.handler_); // Explicit copy that might be moved from.\n\n  typename associated_executor<Handler>::type ex(\n      (get_associated_executor)(handler));\n\n  typename associated_allocator<Handler>::type a(\n      (get_associated_allocator)(handler));\n\n  detail::spawn_helper<Handler, function_type> helper;\n  helper.data_.reset(\n      new detail::spawn_data<Handler, function_type>(\n        ASIO_MOVE_CAST(Handler)(handler), false,\n        ASIO_MOVE_CAST(Function)(function)));\n  helper.attributes_ = attributes;\n\n  ex.dispatch(helper, a);\n}\n\ntemplate <typename Function, typename Executor>\ninline void spawn(const Executor& ex,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes,\n    typename enable_if<is_executor<Executor>::value>::type*)\n{\n  asio::spawn(asio::strand<Executor>(ex),\n      ASIO_MOVE_CAST(Function)(function), attributes);\n}\n\ntemplate <typename Function, typename Executor>\ninline void spawn(const strand<Executor>& ex,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes)\n{\n  asio::spawn(asio::bind_executor(\n        ex, &detail::default_spawn_handler),\n      ASIO_MOVE_CAST(Function)(function), attributes);\n}\n\ntemplate <typename Function>\ninline void spawn(const asio::io_context::strand& s,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes)\n{\n  asio::spawn(asio::bind_executor(\n        s, &detail::default_spawn_handler),\n      ASIO_MOVE_CAST(Function)(function), attributes);\n}\n\ntemplate <typename Function, typename ExecutionContext>\ninline void spawn(ExecutionContext& ctx,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes,\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type*)\n{\n  asio::spawn(ctx.get_executor(),\n      ASIO_MOVE_CAST(Function)(function), attributes);\n}\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_SPAWN_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/src.cpp",
    "content": "//\n// impl/src.cpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#if defined(_MSC_VER) \\\n  || defined(__BORLANDC__) \\\n  || defined(__DMC__)\n# pragma message ( \\\n    \"This file is deprecated. \" \\\n    \"Please #include <asio/impl/src.hpp> instead.\")\n#elif defined(__GNUC__) \\\n  || defined(__HP_aCC) \\\n  || defined(__SUNPRO_CC) \\\n  || defined(__IBMCPP__)\n# warning \"This file is deprecated.\"\n# warning \"Please #include <asio/impl/src.hpp> instead.\"\n#endif\n\n#include \"asio/impl/src.hpp\"\n"
  },
  {
    "path": "src/third_party/asio/impl/src.hpp",
    "content": "//\n// impl/src.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SRC_HPP\n#define ASIO_IMPL_SRC_HPP\n\n#define ASIO_SOURCE\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# error Do not compile Asio library source with ASIO_HEADER_ONLY defined\n#endif\n\n#include \"asio/impl/error.ipp\"\n#include \"asio/impl/error_code.ipp\"\n#include \"asio/impl/execution_context.ipp\"\n#include \"asio/impl/executor.ipp\"\n#include \"asio/impl/handler_alloc_hook.ipp\"\n#include \"asio/impl/io_context.ipp\"\n#include \"asio/impl/serial_port_base.ipp\"\n#include \"asio/impl/system_context.ipp\"\n#include \"asio/impl/thread_pool.ipp\"\n#include \"asio/detail/impl/buffer_sequence_adapter.ipp\"\n#include \"asio/detail/impl/descriptor_ops.ipp\"\n#include \"asio/detail/impl/dev_poll_reactor.ipp\"\n#include \"asio/detail/impl/epoll_reactor.ipp\"\n#include \"asio/detail/impl/eventfd_select_interrupter.ipp\"\n#include \"asio/detail/impl/handler_tracking.ipp\"\n#include \"asio/detail/impl/kqueue_reactor.ipp\"\n#include \"asio/detail/impl/null_event.ipp\"\n#include \"asio/detail/impl/pipe_select_interrupter.ipp\"\n#include \"asio/detail/impl/posix_event.ipp\"\n#include \"asio/detail/impl/posix_mutex.ipp\"\n#include \"asio/detail/impl/posix_thread.ipp\"\n#include \"asio/detail/impl/posix_tss_ptr.ipp\"\n#include \"asio/detail/impl/reactive_descriptor_service.ipp\"\n#include \"asio/detail/impl/reactive_serial_port_service.ipp\"\n#include \"asio/detail/impl/reactive_socket_service_base.ipp\"\n#include \"asio/detail/impl/resolver_service_base.ipp\"\n#include \"asio/detail/impl/scheduler.ipp\"\n#include \"asio/detail/impl/select_reactor.ipp\"\n#include \"asio/detail/impl/service_registry.ipp\"\n#include \"asio/detail/impl/signal_set_service.ipp\"\n#include \"asio/detail/impl/socket_ops.ipp\"\n#include \"asio/detail/impl/socket_select_interrupter.ipp\"\n#include \"asio/detail/impl/strand_executor_service.ipp\"\n#include \"asio/detail/impl/strand_service.ipp\"\n#include \"asio/detail/impl/throw_error.ipp\"\n#include \"asio/detail/impl/timer_queue_ptime.ipp\"\n#include \"asio/detail/impl/timer_queue_set.ipp\"\n#include \"asio/detail/impl/win_iocp_handle_service.ipp\"\n#include \"asio/detail/impl/win_iocp_io_context.ipp\"\n#include \"asio/detail/impl/win_iocp_serial_port_service.ipp\"\n#include \"asio/detail/impl/win_iocp_socket_service_base.ipp\"\n#include \"asio/detail/impl/win_event.ipp\"\n#include \"asio/detail/impl/win_mutex.ipp\"\n#include \"asio/detail/impl/win_object_handle_service.ipp\"\n#include \"asio/detail/impl/win_static_mutex.ipp\"\n#include \"asio/detail/impl/win_thread.ipp\"\n#include \"asio/detail/impl/win_tss_ptr.ipp\"\n#include \"asio/detail/impl/winrt_ssocket_service_base.ipp\"\n#include \"asio/detail/impl/winrt_timer_scheduler.ipp\"\n#include \"asio/detail/impl/winsock_init.ipp\"\n#include \"asio/generic/detail/impl/endpoint.ipp\"\n#include \"asio/ip/impl/address.ipp\"\n#include \"asio/ip/impl/address_v4.ipp\"\n#include \"asio/ip/impl/address_v6.ipp\"\n#include \"asio/ip/impl/host_name.ipp\"\n#include \"asio/ip/impl/network_v4.ipp\"\n#include \"asio/ip/impl/network_v6.ipp\"\n#include \"asio/ip/detail/impl/endpoint.ipp\"\n#include \"asio/local/detail/impl/endpoint.ipp\"\n\n#endif // ASIO_IMPL_SRC_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/system_context.hpp",
    "content": "//\n// impl/system_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SYSTEM_CONTEXT_HPP\n#define ASIO_IMPL_SYSTEM_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/system_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ninline system_context::executor_type\nsystem_context::get_executor() ASIO_NOEXCEPT\n{\n  return system_executor();\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_SYSTEM_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/system_context.ipp",
    "content": "//\n// impl/system_context.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SYSTEM_CONTEXT_IPP\n#define ASIO_IMPL_SYSTEM_CONTEXT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/system_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nstruct system_context::thread_function\n{\n  detail::scheduler* scheduler_;\n\n  void operator()()\n  {\n    asio::error_code ec;\n    scheduler_->run(ec);\n  }\n};\n\nsystem_context::system_context()\n  : scheduler_(add_scheduler(new detail::scheduler(*this, 0, false)))\n{\n  scheduler_.work_started();\n\n  thread_function f = { &scheduler_ };\n  std::size_t num_threads = detail::thread::hardware_concurrency() * 2;\n  threads_.create_threads(f, num_threads ? num_threads : 2);\n}\n\nsystem_context::~system_context()\n{\n  scheduler_.work_finished();\n  scheduler_.stop();\n  threads_.join();\n}\n\nvoid system_context::stop()\n{\n  scheduler_.stop();\n}\n\nbool system_context::stopped() const ASIO_NOEXCEPT\n{\n  return scheduler_.stopped();\n}\n\nvoid system_context::join()\n{\n  scheduler_.work_finished();\n  threads_.join();\n}\n\ndetail::scheduler& system_context::add_scheduler(detail::scheduler* s)\n{\n  detail::scoped_ptr<detail::scheduler> scoped_impl(s);\n  asio::add_service<detail::scheduler>(*this, scoped_impl.get());\n  return *scoped_impl.release();\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_SYSTEM_CONTEXT_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/system_executor.hpp",
    "content": "//\n// impl/system_executor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_SYSTEM_EXECUTOR_HPP\n#define ASIO_IMPL_SYSTEM_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/executor_op.hpp\"\n#include \"asio/detail/global.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/system_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ninline system_context& system_executor::context() const ASIO_NOEXCEPT\n{\n  return detail::global<system_context>();\n}\n\ntemplate <typename Function, typename Allocator>\nvoid system_executor::dispatch(\n    ASIO_MOVE_ARG(Function) f, const Allocator&) const\n{\n  typename decay<Function>::type tmp(ASIO_MOVE_CAST(Function)(f));\n  asio_handler_invoke_helpers::invoke(tmp, tmp);\n}\n\ntemplate <typename Function, typename Allocator>\nvoid system_executor::post(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  system_context& ctx = detail::global<system_context>();\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((ctx, *p.p,\n        \"system_executor\", &this->context(), 0, \"post\"));\n\n  ctx.scheduler_.post_immediate_completion(p.p, false);\n  p.v = p.p = 0;\n}\n\ntemplate <typename Function, typename Allocator>\nvoid system_executor::defer(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  system_context& ctx = detail::global<system_context>();\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((ctx, *p.p,\n        \"system_executor\", &this->context(), 0, \"defer\"));\n\n  ctx.scheduler_.post_immediate_completion(p.p, true);\n  p.v = p.p = 0;\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_SYSTEM_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/thread_pool.hpp",
    "content": "//\n// impl/thread_pool.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_THREAD_POOL_HPP\n#define ASIO_IMPL_THREAD_POOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/executor_op.hpp\"\n#include \"asio/detail/fenced_block.hpp\"\n#include \"asio/detail/recycling_allocator.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\ninline thread_pool::executor_type\nthread_pool::get_executor() ASIO_NOEXCEPT\n{\n  return executor_type(*this);\n}\n\ninline thread_pool&\nthread_pool::executor_type::context() const ASIO_NOEXCEPT\n{\n  return pool_;\n}\n\ninline void\nthread_pool::executor_type::on_work_started() const ASIO_NOEXCEPT\n{\n  pool_.scheduler_.work_started();\n}\n\ninline void thread_pool::executor_type::on_work_finished()\nconst ASIO_NOEXCEPT\n{\n  pool_.scheduler_.work_finished();\n}\n\ntemplate <typename Function, typename Allocator>\nvoid thread_pool::executor_type::dispatch(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Invoke immediately if we are already inside the thread pool.\n  if (pool_.scheduler_.can_dispatch())\n  {\n    // Make a local, non-const copy of the function.\n    function_type tmp(ASIO_MOVE_CAST(Function)(f));\n\n    detail::fenced_block b(detail::fenced_block::full);\n    asio_handler_invoke_helpers::invoke(tmp, tmp);\n    return;\n  }\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((pool_, *p.p,\n        \"thread_pool\", &this->context(), 0, \"dispatch\"));\n\n  pool_.scheduler_.post_immediate_completion(p.p, false);\n  p.v = p.p = 0;\n}\n\ntemplate <typename Function, typename Allocator>\nvoid thread_pool::executor_type::post(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((pool_, *p.p,\n        \"thread_pool\", &this->context(), 0, \"post\"));\n\n  pool_.scheduler_.post_immediate_completion(p.p, false);\n  p.v = p.p = 0;\n}\n\ntemplate <typename Function, typename Allocator>\nvoid thread_pool::executor_type::defer(\n    ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n{\n  typedef typename decay<Function>::type function_type;\n\n  // Allocate and construct an operation to wrap the function.\n  typedef detail::executor_op<function_type, Allocator> op;\n  typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };\n  p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a);\n\n  ASIO_HANDLER_CREATION((pool_, *p.p,\n        \"thread_pool\", &this->context(), 0, \"defer\"));\n\n  pool_.scheduler_.post_immediate_completion(p.p, true);\n  p.v = p.p = 0;\n}\n\ninline bool\nthread_pool::executor_type::running_in_this_thread() const ASIO_NOEXCEPT\n{\n  return pool_.scheduler_.can_dispatch();\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_THREAD_POOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/thread_pool.ipp",
    "content": "//\n// impl/thread_pool.ipp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_THREAD_POOL_IPP\n#define ASIO_IMPL_THREAD_POOL_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/thread_pool.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nstruct thread_pool::thread_function\n{\n  detail::scheduler* scheduler_;\n\n  void operator()()\n  {\n    asio::error_code ec;\n    scheduler_->run(ec);\n  }\n};\n\nthread_pool::thread_pool()\n  : scheduler_(add_scheduler(new detail::scheduler(*this, 0, false)))\n{\n  scheduler_.work_started();\n\n  thread_function f = { &scheduler_ };\n  std::size_t num_threads = detail::thread::hardware_concurrency() * 2;\n  threads_.create_threads(f, num_threads ? num_threads : 2);\n}\n\nthread_pool::thread_pool(std::size_t num_threads)\n  : scheduler_(add_scheduler(new detail::scheduler(\n          *this, num_threads == 1 ? 1 : 0, false)))\n{\n  scheduler_.work_started();\n\n  thread_function f = { &scheduler_ };\n  threads_.create_threads(f, num_threads);\n}\n\nthread_pool::~thread_pool()\n{\n  stop();\n  join();\n}\n\nvoid thread_pool::stop()\n{\n  scheduler_.stop();\n}\n\nvoid thread_pool::join()\n{\n  if (!threads_.empty())\n  {\n    scheduler_.work_finished();\n    threads_.join();\n  }\n}\n\ndetail::scheduler& thread_pool::add_scheduler(detail::scheduler* s)\n{\n  detail::scoped_ptr<detail::scheduler> scoped_impl(s);\n  asio::add_service<detail::scheduler>(*this, scoped_impl.get());\n  return *scoped_impl.release();\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_THREAD_POOL_IPP\n"
  },
  {
    "path": "src/third_party/asio/impl/use_awaitable.hpp",
    "content": "//\n// impl/use_awaitable.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_USE_AWAITABLE_HPP\n#define ASIO_IMPL_USE_AWAITABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Executor, typename T>\nclass awaitable_handler_base\n  : public awaitable_thread<Executor>\n{\npublic:\n  typedef void result_type;\n  typedef awaitable<T, Executor> awaitable_type;\n\n  // Construct from the entry point of a new thread of execution.\n  awaitable_handler_base(awaitable<void, Executor> a, const Executor& ex)\n    : awaitable_thread<Executor>(std::move(a), ex)\n  {\n  }\n\n  // Transfer ownership from another awaitable_thread.\n  explicit awaitable_handler_base(awaitable_thread<Executor>* h)\n    : awaitable_thread<Executor>(std::move(*h))\n  {\n  }\n\nprotected:\n  awaitable_frame<T, Executor>* frame() noexcept\n  {\n    return static_cast<awaitable_frame<T, Executor>*>(this->top_of_stack_);\n  }\n};\n\ntemplate <typename, typename...>\nclass awaitable_handler;\n\ntemplate <typename Executor>\nclass awaitable_handler<Executor, void>\n  : public awaitable_handler_base<Executor, void>\n{\npublic:\n  using awaitable_handler_base<Executor, void>::awaitable_handler_base;\n\n  void operator()()\n  {\n    this->frame()->attach_thread(this);\n    this->frame()->return_void();\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor>\nclass awaitable_handler<Executor, asio::error_code>\n  : public awaitable_handler_base<Executor, void>\n{\npublic:\n  using awaitable_handler_base<Executor, void>::awaitable_handler_base;\n\n  void operator()(const asio::error_code& ec)\n  {\n    this->frame()->attach_thread(this);\n    if (ec)\n      this->frame()->set_error(ec);\n    else\n      this->frame()->return_void();\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor>\nclass awaitable_handler<Executor, std::exception_ptr>\n  : public awaitable_handler_base<Executor, void>\n{\npublic:\n  using awaitable_handler_base<Executor, void>::awaitable_handler_base;\n\n  void operator()(std::exception_ptr ex)\n  {\n    this->frame()->attach_thread(this);\n    if (ex)\n      this->frame()->set_except(ex);\n    else\n      this->frame()->return_void();\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename T>\nclass awaitable_handler<Executor, T>\n  : public awaitable_handler_base<Executor, T>\n{\npublic:\n  using awaitable_handler_base<Executor, T>::awaitable_handler_base;\n\n  template <typename Arg>\n  void operator()(Arg&& arg)\n  {\n    this->frame()->attach_thread(this);\n    this->frame()->return_value(std::forward<Arg>(arg));\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename T>\nclass awaitable_handler<Executor, asio::error_code, T>\n  : public awaitable_handler_base<Executor, T>\n{\npublic:\n  using awaitable_handler_base<Executor, T>::awaitable_handler_base;\n\n  template <typename Arg>\n  void operator()(const asio::error_code& ec, Arg&& arg)\n  {\n    this->frame()->attach_thread(this);\n    if (ec)\n      this->frame()->set_error(ec);\n    else\n      this->frame()->return_value(std::forward<Arg>(arg));\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename T>\nclass awaitable_handler<Executor, std::exception_ptr, T>\n  : public awaitable_handler_base<Executor, T>\n{\npublic:\n  using awaitable_handler_base<Executor, T>::awaitable_handler_base;\n\n  template <typename Arg>\n  void operator()(std::exception_ptr ex, Arg&& arg)\n  {\n    this->frame()->attach_thread(this);\n    if (ex)\n      this->frame()->set_except(ex);\n    else\n      this->frame()->return_value(std::forward<Arg>(arg));\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename... Ts>\nclass awaitable_handler\n  : public awaitable_handler_base<Executor, std::tuple<Ts...>>\n{\npublic:\n  using awaitable_handler_base<Executor,\n    std::tuple<Ts...>>::awaitable_handler_base;\n\n  template <typename... Args>\n  void operator()(Args&&... args)\n  {\n    this->frame()->attach_thread(this);\n    this->frame()->return_values(std::forward<Args>(args)...);\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename... Ts>\nclass awaitable_handler<Executor, asio::error_code, Ts...>\n  : public awaitable_handler_base<Executor, std::tuple<Ts...>>\n{\npublic:\n  using awaitable_handler_base<Executor,\n    std::tuple<Ts...>>::awaitable_handler_base;\n\n  template <typename... Args>\n  void operator()(const asio::error_code& ec, Args&&... args)\n  {\n    this->frame()->attach_thread(this);\n    if (ec)\n      this->frame()->set_error(ec);\n    else\n      this->frame()->return_values(std::forward<Args>(args)...);\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\ntemplate <typename Executor, typename... Ts>\nclass awaitable_handler<Executor, std::exception_ptr, Ts...>\n  : public awaitable_handler_base<Executor, std::tuple<Ts...>>\n{\npublic:\n  using awaitable_handler_base<Executor,\n    std::tuple<Ts...>>::awaitable_handler_base;\n\n  template <typename... Args>\n  void operator()(std::exception_ptr ex, Args&&... args)\n  {\n    this->frame()->attach_thread(this);\n    if (ex)\n      this->frame()->set_except(ex);\n    else\n      this->frame()->return_values(std::forward<Args>(args)...);\n    this->frame()->pop_frame();\n    this->pump();\n  }\n};\n\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Executor, typename R, typename... Args>\nclass async_result<use_awaitable_t<Executor>, R(Args...)>\n{\npublic:\n  typedef typename detail::awaitable_handler<\n      Executor, typename decay<Args>::type...> handler_type;\n  typedef typename handler_type::awaitable_type return_type;\n\n#if defined(_MSC_VER)\n  template <typename T>\n  static T dummy_return()\n  {\n    return std::move(*static_cast<T*>(nullptr));\n  }\n\n  template <>\n  static void dummy_return()\n  {\n  }\n#endif // defined(_MSC_VER)\n\n  template <typename Initiation, typename... InitArgs>\n  static return_type initiate(Initiation initiation,\n      use_awaitable_t<Executor>, InitArgs... args)\n  {\n    co_await [&](auto* frame)\n      {\n        handler_type handler(frame->detach_thread());\n        std::move(initiation)(std::move(handler), std::move(args)...);\n        return static_cast<handler_type*>(nullptr);\n      };\n\n    for (;;) {} // Never reached.\n#if defined(_MSC_VER)\n    co_return dummy_return<typename return_type::value_type>();\n#endif // defined(_MSC_VER)\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_USE_AWAITABLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/use_future.hpp",
    "content": "//\n// impl/use_future.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_USE_FUTURE_HPP\n#define ASIO_IMPL_USE_FUTURE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <tuple>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/packaged_task.hpp\"\n#include \"asio/system_error.hpp\"\n#include \"asio/system_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename T, typename F, typename... Args>\ninline void promise_invoke_and_set(std::promise<T>& p,\n    F& f, ASIO_MOVE_ARG(Args)... args)\n{\n#if !defined(ASIO_NO_EXCEPTIONS)\n  try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  {\n    p.set_value(f(ASIO_MOVE_CAST(Args)(args)...));\n  }\n#if !defined(ASIO_NO_EXCEPTIONS)\n  catch (...)\n  {\n    p.set_exception(std::current_exception());\n  }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n}\n\ntemplate <typename F, typename... Args>\ninline void promise_invoke_and_set(std::promise<void>& p,\n    F& f, ASIO_MOVE_ARG(Args)... args)\n{\n#if !defined(ASIO_NO_EXCEPTIONS)\n  try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  {\n    f(ASIO_MOVE_CAST(Args)(args)...);\n    p.set_value();\n  }\n#if !defined(ASIO_NO_EXCEPTIONS)\n  catch (...)\n  {\n    p.set_exception(std::current_exception());\n  }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n}\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename T, typename F>\ninline void promise_invoke_and_set(std::promise<T>& p, F& f)\n{\n#if !defined(ASIO_NO_EXCEPTIONS)\n  try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  {\n    p.set_value(f());\n  }\n#if !defined(ASIO_NO_EXCEPTIONS)\n  catch (...)\n  {\n    p.set_exception(std::current_exception());\n  }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n}\n\ntemplate <typename F, typename Args>\ninline void promise_invoke_and_set(std::promise<void>& p, F& f)\n{\n#if !defined(ASIO_NO_EXCEPTIONS)\n  try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  {\n    f();\n    p.set_value();\n#if !defined(ASIO_NO_EXCEPTIONS)\n  }\n  catch (...)\n  {\n    p.set_exception(std::current_exception());\n  }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n}\n\n#if defined(ASIO_NO_EXCEPTIONS)\n\n#define ASIO_PRIVATE_PROMISE_INVOKE_DEF(n) \\\n  template <typename T, typename F, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline void promise_invoke_and_set(std::promise<T>& p, \\\n      F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    p.set_value(f(ASIO_VARIADIC_MOVE_ARGS(n))); \\\n  } \\\n  \\\n  template <typename F, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline void promise_invoke_and_set(std::promise<void>& p, \\\n      F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    f(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n    p.set_value(); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_INVOKE_DEF)\n#undef ASIO_PRIVATE_PROMISE_INVOKE_DEF\n\n#else // defined(ASIO_NO_EXCEPTIONS)\n\n#define ASIO_PRIVATE_PROMISE_INVOKE_DEF(n) \\\n  template <typename T, typename F, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline void promise_invoke_and_set(std::promise<T>& p, \\\n      F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    try \\\n    { \\\n      p.set_value(f(ASIO_VARIADIC_MOVE_ARGS(n))); \\\n    } \\\n    catch (...) \\\n    { \\\n      p.set_exception(std::current_exception()); \\\n    } \\\n  } \\\n  \\\n  template <typename F, ASIO_VARIADIC_TPARAMS(n)> \\\n  inline void promise_invoke_and_set(std::promise<void>& p, \\\n      F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  { \\\n    try \\\n    { \\\n      f(ASIO_VARIADIC_MOVE_ARGS(n)); \\\n      p.set_value(); \\\n    } \\\n    catch (...) \\\n    { \\\n      p.set_exception(std::current_exception()); \\\n    } \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_INVOKE_DEF)\n#undef ASIO_PRIVATE_PROMISE_INVOKE_DEF\n\n#endif // defined(ASIO_NO_EXCEPTIONS)\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n// A function object adapter to invoke a nullary function object and capture\n// any exception thrown into a promise.\ntemplate <typename T, typename F>\nclass promise_invoker\n{\npublic:\n  promise_invoker(const shared_ptr<std::promise<T> >& p,\n      ASIO_MOVE_ARG(F) f)\n    : p_(p), f_(ASIO_MOVE_CAST(F)(f))\n  {\n  }\n\n  void operator()()\n  {\n#if !defined(ASIO_NO_EXCEPTIONS)\n    try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n    {\n      f_();\n    }\n#if !defined(ASIO_NO_EXCEPTIONS)\n    catch (...)\n    {\n      p_->set_exception(std::current_exception());\n    }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  }\n\nprivate:\n  shared_ptr<std::promise<T> > p_;\n  typename decay<F>::type f_;\n};\n\n// An executor that adapts the system_executor to capture any exeption thrown\n// by a submitted function object and save it into a promise.\ntemplate <typename T>\nclass promise_executor\n{\npublic:\n  explicit promise_executor(const shared_ptr<std::promise<T> >& p)\n    : p_(p)\n  {\n  }\n\n  execution_context& context() const ASIO_NOEXCEPT\n  {\n    return system_executor().context();\n  }\n\n  void on_work_started() const ASIO_NOEXCEPT {}\n  void on_work_finished() const ASIO_NOEXCEPT {}\n\n  template <typename F, typename A>\n  void dispatch(ASIO_MOVE_ARG(F) f, const A&) const\n  {\n    promise_invoker<T, F>(p_, ASIO_MOVE_CAST(F)(f))();\n  }\n\n  template <typename F, typename A>\n  void post(ASIO_MOVE_ARG(F) f, const A& a) const\n  {\n    system_executor().post(\n        promise_invoker<T, F>(p_, ASIO_MOVE_CAST(F)(f)), a);\n  }\n\n  template <typename F, typename A>\n  void defer(ASIO_MOVE_ARG(F) f, const A& a) const\n  {\n    system_executor().defer(\n        promise_invoker<T, F>(p_, ASIO_MOVE_CAST(F)(f)), a);\n  }\n\n  friend bool operator==(const promise_executor& a,\n      const promise_executor& b) ASIO_NOEXCEPT\n  {\n    return a.p_ == b.p_;\n  }\n\n  friend bool operator!=(const promise_executor& a,\n      const promise_executor& b) ASIO_NOEXCEPT\n  {\n    return a.p_ != b.p_;\n  }\n\nprivate:\n  shared_ptr<std::promise<T> > p_;\n};\n\n// The base class for all completion handlers that create promises.\ntemplate <typename T>\nclass promise_creator\n{\npublic:\n  typedef promise_executor<T> executor_type;\n\n  executor_type get_executor() const ASIO_NOEXCEPT\n  {\n    return executor_type(p_);\n  }\n\n  typedef std::future<T> future_type;\n\n  future_type get_future()\n  {\n    return p_->get_future();\n  }\n\nprotected:\n  template <typename Allocator>\n  void create_promise(const Allocator& a)\n  {\n    ASIO_REBIND_ALLOC(Allocator, char) b(a);\n    p_ = std::allocate_shared<std::promise<T>>(b, std::allocator_arg, b);\n  }\n\n  shared_ptr<std::promise<T> > p_;\n};\n\n// For completion signature void().\nclass promise_handler_0\n  : public promise_creator<void>\n{\npublic:\n  void operator()()\n  {\n    this->p_->set_value();\n  }\n};\n\n// For completion signature void(error_code).\nclass promise_handler_ec_0\n  : public promise_creator<void>\n{\npublic:\n  void operator()(const asio::error_code& ec)\n  {\n    if (ec)\n    {\n      this->p_->set_exception(\n          std::make_exception_ptr(\n            asio::system_error(ec)));\n    }\n    else\n    {\n      this->p_->set_value();\n    }\n  }\n};\n\n// For completion signature void(exception_ptr).\nclass promise_handler_ex_0\n  : public promise_creator<void>\n{\npublic:\n  void operator()(const std::exception_ptr& ex)\n  {\n    if (ex)\n    {\n      this->p_->set_exception(ex);\n    }\n    else\n    {\n      this->p_->set_value();\n    }\n  }\n};\n\n// For completion signature void(T).\ntemplate <typename T>\nclass promise_handler_1\n  : public promise_creator<T>\n{\npublic:\n  template <typename Arg>\n  void operator()(ASIO_MOVE_ARG(Arg) arg)\n  {\n    this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg));\n  }\n};\n\n// For completion signature void(error_code, T).\ntemplate <typename T>\nclass promise_handler_ec_1\n  : public promise_creator<T>\n{\npublic:\n  template <typename Arg>\n  void operator()(const asio::error_code& ec,\n      ASIO_MOVE_ARG(Arg) arg)\n  {\n    if (ec)\n    {\n      this->p_->set_exception(\n          std::make_exception_ptr(\n            asio::system_error(ec)));\n    }\n    else\n      this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg));\n  }\n};\n\n// For completion signature void(exception_ptr, T).\ntemplate <typename T>\nclass promise_handler_ex_1\n  : public promise_creator<T>\n{\npublic:\n  template <typename Arg>\n  void operator()(const std::exception_ptr& ex,\n      ASIO_MOVE_ARG(Arg) arg)\n  {\n    if (ex)\n      this->p_->set_exception(ex);\n    else\n      this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg));\n  }\n};\n\n// For completion signature void(T1, ..., Tn);\ntemplate <typename T>\nclass promise_handler_n\n  : public promise_creator<T>\n{\npublic:\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename... Args>\n  void operator()(ASIO_MOVE_ARG(Args)... args)\n  {\n    this->p_->set_value(\n        std::forward_as_tuple(\n          ASIO_MOVE_CAST(Args)(args)...));\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_CALL_OP_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  {\\\n    this->p_->set_value( \\\n        std::forward_as_tuple( \\\n          ASIO_VARIADIC_MOVE_ARGS(n))); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF)\n#undef ASIO_PRIVATE_CALL_OP_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n};\n\n// For completion signature void(error_code, T1, ..., Tn);\ntemplate <typename T>\nclass promise_handler_ec_n\n  : public promise_creator<T>\n{\npublic:\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename... Args>\n  void operator()(const asio::error_code& ec,\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    if (ec)\n    {\n      this->p_->set_exception(\n          std::make_exception_ptr(\n            asio::system_error(ec)));\n    }\n    else\n    {\n      this->p_->set_value(\n          std::forward_as_tuple(\n            ASIO_MOVE_CAST(Args)(args)...));\n    }\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_CALL_OP_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void operator()(const asio::error_code& ec, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  {\\\n    if (ec) \\\n    { \\\n      this->p_->set_exception( \\\n          std::make_exception_ptr( \\\n            asio::system_error(ec))); \\\n    } \\\n    else \\\n    { \\\n      this->p_->set_value( \\\n          std::forward_as_tuple( \\\n            ASIO_VARIADIC_MOVE_ARGS(n))); \\\n    } \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF)\n#undef ASIO_PRIVATE_CALL_OP_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n};\n\n// For completion signature void(exception_ptr, T1, ..., Tn);\ntemplate <typename T>\nclass promise_handler_ex_n\n  : public promise_creator<T>\n{\npublic:\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename... Args>\n  void operator()(const std::exception_ptr& ex,\n      ASIO_MOVE_ARG(Args)... args)\n  {\n    if (ex)\n      this->p_->set_exception(ex);\n    else\n    {\n      this->p_->set_value(\n          std::forward_as_tuple(\n            ASIO_MOVE_CAST(Args)(args)...));\n    }\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_CALL_OP_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void operator()(const std::exception_ptr& ex, \\\n      ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  {\\\n    if (ex) \\\n      this->p_->set_exception(ex); \\\n    else \\\n    { \\\n      this->p_->set_value( \\\n          std::forward_as_tuple( \\\n            ASIO_VARIADIC_MOVE_ARGS(n))); \\\n    } \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF)\n#undef ASIO_PRIVATE_CALL_OP_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n};\n\n// Helper template to choose the appropriate concrete promise handler\n// implementation based on the supplied completion signature.\ntemplate <typename> class promise_handler_selector;\n\ntemplate <>\nclass promise_handler_selector<void()>\n  : public promise_handler_0 {};\n\ntemplate <>\nclass promise_handler_selector<void(asio::error_code)>\n  : public promise_handler_ec_0 {};\n\ntemplate <>\nclass promise_handler_selector<void(std::exception_ptr)>\n  : public promise_handler_ex_0 {};\n\ntemplate <typename Arg>\nclass promise_handler_selector<void(Arg)>\n  : public promise_handler_1<Arg> {};\n\ntemplate <typename Arg>\nclass promise_handler_selector<void(asio::error_code, Arg)>\n  : public promise_handler_ec_1<Arg> {};\n\ntemplate <typename Arg>\nclass promise_handler_selector<void(std::exception_ptr, Arg)>\n  : public promise_handler_ex_1<Arg> {};\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename... Arg>\nclass promise_handler_selector<void(Arg...)>\n  : public promise_handler_n<std::tuple<Arg...> > {};\n\ntemplate <typename... Arg>\nclass promise_handler_selector<void(asio::error_code, Arg...)>\n  : public promise_handler_ec_n<std::tuple<Arg...> > {};\n\ntemplate <typename... Arg>\nclass promise_handler_selector<void(std::exception_ptr, Arg...)>\n  : public promise_handler_ex_n<std::tuple<Arg...> > {};\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#define ASIO_PRIVATE_PROMISE_SELECTOR_DEF(n) \\\n  template <typename Arg, ASIO_VARIADIC_TPARAMS(n)> \\\n  class promise_handler_selector< \\\n    void(Arg, ASIO_VARIADIC_TARGS(n))> \\\n      : public promise_handler_n< \\\n        std::tuple<Arg, ASIO_VARIADIC_TARGS(n)> > {}; \\\n  \\\n  template <typename Arg, ASIO_VARIADIC_TPARAMS(n)> \\\n  class promise_handler_selector< \\\n    void(asio::error_code, Arg, ASIO_VARIADIC_TARGS(n))> \\\n      : public promise_handler_ec_n< \\\n        std::tuple<Arg, ASIO_VARIADIC_TARGS(n)> > {}; \\\n  \\\n  template <typename Arg, ASIO_VARIADIC_TPARAMS(n)> \\\n  class promise_handler_selector< \\\n    void(std::exception_ptr, Arg, ASIO_VARIADIC_TARGS(n))> \\\n      : public promise_handler_ex_n< \\\n        std::tuple<Arg, ASIO_VARIADIC_TARGS(n)> > {}; \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_SELECTOR_DEF)\n#undef ASIO_PRIVATE_PROMISE_SELECTOR_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n// Completion handlers produced from the use_future completion token, when not\n// using use_future::operator().\ntemplate <typename Signature, typename Allocator>\nclass promise_handler\n  : public promise_handler_selector<Signature>\n{\npublic:\n  typedef Allocator allocator_type;\n  typedef void result_type;\n\n  promise_handler(use_future_t<Allocator> u)\n    : allocator_(u.get_allocator())\n  {\n    this->create_promise(allocator_);\n  }\n\n  allocator_type get_allocator() const ASIO_NOEXCEPT\n  {\n    return allocator_;\n  }\n\nprivate:\n  Allocator allocator_;\n};\n\ntemplate <typename Function, typename Signature, typename Allocator>\ninline void asio_handler_invoke(Function& f,\n    promise_handler<Signature, Allocator>* h)\n{\n  typename promise_handler<Signature, Allocator>::executor_type\n    ex(h->get_executor());\n  ex.dispatch(ASIO_MOVE_CAST(Function)(f), std::allocator<void>());\n}\n\ntemplate <typename Function, typename Signature, typename Allocator>\ninline void asio_handler_invoke(const Function& f,\n    promise_handler<Signature, Allocator>* h)\n{\n  typename promise_handler<Signature, Allocator>::executor_type\n    ex(h->get_executor());\n  ex.dispatch(f, std::allocator<void>());\n}\n\n// Helper base class for async_result specialisation.\ntemplate <typename Signature, typename Allocator>\nclass promise_async_result\n{\npublic:\n  typedef promise_handler<Signature, Allocator> completion_handler_type;\n  typedef typename completion_handler_type::future_type return_type;\n\n  explicit promise_async_result(completion_handler_type& h)\n    : future_(h.get_future())\n  {\n  }\n\n  return_type get()\n  {\n    return ASIO_MOVE_CAST(return_type)(future_);\n  }\n\nprivate:\n  return_type future_;\n};\n\n// Return value from use_future::operator().\ntemplate <typename Function, typename Allocator>\nclass packaged_token\n{\npublic:\n  packaged_token(Function f, const Allocator& a)\n    : function_(ASIO_MOVE_CAST(Function)(f)),\n      allocator_(a)\n  {\n  }\n\n//private:\n  Function function_;\n  Allocator allocator_;\n};\n\n// Completion handlers produced from the use_future completion token, when\n// using use_future::operator().\ntemplate <typename Function, typename Allocator, typename Result>\nclass packaged_handler\n  : public promise_creator<Result>\n{\npublic:\n  typedef Allocator allocator_type;\n  typedef void result_type;\n\n  packaged_handler(packaged_token<Function, Allocator> t)\n    : function_(ASIO_MOVE_CAST(Function)(t.function_)),\n      allocator_(t.allocator_)\n  {\n    this->create_promise(allocator_);\n  }\n\n  allocator_type get_allocator() const ASIO_NOEXCEPT\n  {\n    return allocator_;\n  }\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  template <typename... Args>\n  void operator()(ASIO_MOVE_ARG(Args)... args)\n  {\n    (promise_invoke_and_set)(*this->p_,\n        function_, ASIO_MOVE_CAST(Args)(args)...);\n  }\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n  void operator()()\n  {\n    (promise_invoke_and_set)(*this->p_, function_);\n  }\n\n#define ASIO_PRIVATE_CALL_OP_DEF(n) \\\n  template <ASIO_VARIADIC_TPARAMS(n)> \\\n  void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \\\n  {\\\n    (promise_invoke_and_set)(*this->p_, \\\n        function_, ASIO_VARIADIC_MOVE_ARGS(n)); \\\n  } \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF)\n#undef ASIO_PRIVATE_CALL_OP_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\nprivate:\n  Function function_;\n  Allocator allocator_;\n};\n\ntemplate <typename Function,\n    typename Function1, typename Allocator, typename Result>\ninline void asio_handler_invoke(Function& f,\n    packaged_handler<Function1, Allocator, Result>* h)\n{\n  typename packaged_handler<Function1, Allocator, Result>::executor_type\n    ex(h->get_executor());\n  ex.dispatch(ASIO_MOVE_CAST(Function)(f), std::allocator<void>());\n}\n\ntemplate <typename Function,\n    typename Function1, typename Allocator, typename Result>\ninline void asio_handler_invoke(const Function& f,\n    packaged_handler<Function1, Allocator, Result>* h)\n{\n  typename packaged_handler<Function1, Allocator, Result>::executor_type\n    ex(h->get_executor());\n  ex.dispatch(f, std::allocator<void>());\n}\n\n// Helper base class for async_result specialisation.\ntemplate <typename Function, typename Allocator, typename Result>\nclass packaged_async_result\n{\npublic:\n  typedef packaged_handler<Function, Allocator, Result> completion_handler_type;\n  typedef typename completion_handler_type::future_type return_type;\n\n  explicit packaged_async_result(completion_handler_type& h)\n    : future_(h.get_future())\n  {\n  }\n\n  return_type get()\n  {\n    return ASIO_MOVE_CAST(return_type)(future_);\n  }\n\nprivate:\n  return_type future_;\n};\n\n} // namespace detail\n\ntemplate <typename Allocator> template <typename Function>\ninline detail::packaged_token<typename decay<Function>::type, Allocator>\nuse_future_t<Allocator>::operator()(ASIO_MOVE_ARG(Function) f) const\n{\n  return detail::packaged_token<typename decay<Function>::type, Allocator>(\n      ASIO_MOVE_CAST(Function)(f), allocator_);\n}\n\n#if !defined(GENERATING_DOCUMENTATION)\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename Allocator, typename Result, typename... Args>\nclass async_result<use_future_t<Allocator>, Result(Args...)>\n  : public detail::promise_async_result<\n      void(typename decay<Args>::type...), Allocator>\n{\npublic:\n  explicit async_result(\n    typename detail::promise_async_result<void(typename decay<Args>::type...),\n      Allocator>::completion_handler_type& h)\n    : detail::promise_async_result<\n        void(typename decay<Args>::type...), Allocator>(h)\n  {\n  }\n};\n\ntemplate <typename Function, typename Allocator,\n    typename Result, typename... Args>\nclass async_result<detail::packaged_token<Function, Allocator>, Result(Args...)>\n  : public detail::packaged_async_result<Function, Allocator,\n      typename result_of<Function(Args...)>::type>\n{\npublic:\n  explicit async_result(\n    typename detail::packaged_async_result<Function, Allocator,\n      typename result_of<Function(Args...)>::type>::completion_handler_type& h)\n    : detail::packaged_async_result<Function, Allocator,\n        typename result_of<Function(Args...)>::type>(h)\n  {\n  }\n};\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\ntemplate <typename Allocator, typename Result>\nclass async_result<use_future_t<Allocator>, Result()>\n  : public detail::promise_async_result<void(), Allocator>\n{\npublic:\n  explicit async_result(\n    typename detail::promise_async_result<\n      void(), Allocator>::completion_handler_type& h)\n    : detail::promise_async_result<void(), Allocator>(h)\n  {\n  }\n};\n\ntemplate <typename Function, typename Allocator, typename Result>\nclass async_result<detail::packaged_token<Function, Allocator>, Result()>\n  : public detail::packaged_async_result<Function, Allocator,\n      typename result_of<Function()>::type>\n{\npublic:\n  explicit async_result(\n    typename detail::packaged_async_result<Function, Allocator,\n      typename result_of<Function()>::type>::completion_handler_type& h)\n    : detail::packaged_async_result<Function, Allocator,\n        typename result_of<Function()>::type>(h)\n  {\n  }\n};\n\n#define ASIO_PRIVATE_ASYNC_RESULT_DEF(n) \\\n  template <typename Allocator, \\\n      typename Result, ASIO_VARIADIC_TPARAMS(n)> \\\n  class async_result<use_future_t<Allocator>, \\\n      Result(ASIO_VARIADIC_TARGS(n))> \\\n    : public detail::promise_async_result< \\\n        void(ASIO_VARIADIC_DECAY(n)), Allocator> \\\n  { \\\n  public: \\\n    explicit async_result( \\\n      typename detail::promise_async_result< \\\n        void(ASIO_VARIADIC_DECAY(n)), \\\n        Allocator>::completion_handler_type& h) \\\n      : detail::promise_async_result< \\\n          void(ASIO_VARIADIC_DECAY(n)), Allocator>(h) \\\n    { \\\n    } \\\n  }; \\\n  \\\n  template <typename Function, typename Allocator, \\\n      typename Result, ASIO_VARIADIC_TPARAMS(n)> \\\n  class async_result<detail::packaged_token<Function, Allocator>, \\\n      Result(ASIO_VARIADIC_TARGS(n))> \\\n    : public detail::packaged_async_result<Function, Allocator, \\\n        typename result_of<Function(ASIO_VARIADIC_TARGS(n))>::type> \\\n  { \\\n  public: \\\n    explicit async_result( \\\n      typename detail::packaged_async_result<Function, Allocator, \\\n        typename result_of<Function(ASIO_VARIADIC_TARGS(n))>::type \\\n        >::completion_handler_type& h) \\\n      : detail::packaged_async_result<Function, Allocator, \\\n          typename result_of<Function(ASIO_VARIADIC_TARGS(n))>::type>(h) \\\n    { \\\n    } \\\n  }; \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_RESULT_DEF)\n#undef ASIO_PRIVATE_ASYNC_RESULT_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_USE_FUTURE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/write.hpp",
    "content": "//\n// impl/write.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_WRITE_HPP\n#define ASIO_IMPL_WRITE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/base_from_completion_cond.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/consuming_buffers.hpp\"\n#include \"asio/detail/dependent_type.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <typename SyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition>\n  std::size_t write_buffer_sequence(SyncWriteStream& s,\n      const ConstBufferSequence& buffers, const ConstBufferIterator&,\n      CompletionCondition completion_condition, asio::error_code& ec)\n  {\n    ec = asio::error_code();\n    asio::detail::consuming_buffers<const_buffer,\n        ConstBufferSequence, ConstBufferIterator> tmp(buffers);\n    while (!tmp.empty())\n    {\n      if (std::size_t max_size = detail::adapt_completion_condition_result(\n            completion_condition(ec, tmp.total_consumed())))\n        tmp.consume(s.write_some(tmp.prepare(max_size), ec));\n      else\n        break;\n    }\n    return tmp.total_consumed();;\n  }\n} // namespace detail\n\ntemplate <typename SyncWriteStream, typename ConstBufferSequence,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  return detail::write_buffer_sequence(s, buffers,\n      asio::buffer_sequence_begin(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncWriteStream, typename ConstBufferSequence>\ninline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s, buffers, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncWriteStream, typename ConstBufferSequence>\ninline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  return write(s, buffers, transfer_all(), ec);\n}\n\ntemplate <typename SyncWriteStream, typename ConstBufferSequence,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  typename decay<DynamicBuffer_v1>::type b(\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers));\n\n  std::size_t bytes_transferred = write(s, b.data(),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  b.consume(bytes_transferred);\n  return bytes_transferred;\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1>\ninline std::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      transfer_all(), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1>\ninline std::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return write(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      transfer_all(), ec);\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename SyncWriteStream, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  return write(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncWriteStream, typename Allocator>\ninline std::size_t write(SyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b)\n{\n  return write(s, basic_streambuf_ref<Allocator>(b));\n}\n\ntemplate <typename SyncWriteStream, typename Allocator>\ninline std::size_t write(SyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    asio::error_code& ec)\n{\n  return write(s, basic_streambuf_ref<Allocator>(b), ec);\n}\n\ntemplate <typename SyncWriteStream, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition)\n{\n  return write(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  std::size_t bytes_transferred = write(s, buffers.data(0, buffers.size()),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  buffers.consume(bytes_transferred);\n  return bytes_transferred;\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2>\ninline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      transfer_all(), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2>\ninline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return write(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      transfer_all(), ec);\n}\n\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\ninline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"write\");\n  return bytes_transferred;\n}\n\nnamespace detail\n{\n  template <typename AsyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition,\n      typename WriteHandler>\n  class write_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    write_op(AsyncWriteStream& stream, const ConstBufferSequence& buffers,\n        CompletionCondition& completion_condition, WriteHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        stream_(stream),\n        buffers_(buffers),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    write_op(const write_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        stream_(other.stream_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    write_op(write_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        do\n        {\n          stream_.async_write_some(buffers_.prepare(max_size),\n              ASIO_MOVE_CAST(write_op)(*this));\n          return; default:\n          buffers_.consume(bytes_transferred);\n          if ((!ec && bytes_transferred == 0) || buffers_.empty())\n            break;\n          max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        } while (max_size > 0);\n\n        handler_(ec, buffers_.total_consumed());\n      }\n    }\n\n  //private:\n    typedef asio::detail::consuming_buffers<const_buffer,\n        ConstBufferSequence, ConstBufferIterator> buffers_type;\n\n    AsyncWriteStream& stream_;\n    buffers_type buffers_;\n    int start_;\n    WriteHandler handler_;\n  };\n\n  template <typename AsyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition,\n      typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      write_op<AsyncWriteStream, ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition,\n      typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      write_op<AsyncWriteStream, ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition,\n      typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      write_op<AsyncWriteStream, ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      write_op<AsyncWriteStream, ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      write_op<AsyncWriteStream, ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition,\n      typename WriteHandler>\n  inline void start_write_buffer_sequence_op(AsyncWriteStream& stream,\n      const ConstBufferSequence& buffers, const ConstBufferIterator&,\n      CompletionCondition& completion_condition, WriteHandler& handler)\n  {\n    detail::write_op<AsyncWriteStream, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>(\n        stream, buffers, completion_condition, handler)(\n          asio::error_code(), 0, 1);\n  }\n\n  template <typename AsyncWriteStream>\n  class initiate_async_write_buffer_sequence\n  {\n  public:\n    typedef typename AsyncWriteStream::executor_type executor_type;\n\n    explicit initiate_async_write_buffer_sequence(AsyncWriteStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      start_write_buffer_sequence_op(stream_, buffers,\n          asio::buffer_sequence_begin(buffers),\n          completion_cond2.value, handler2.value);\n    }\n\n  private:\n    AsyncWriteStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream, typename ConstBufferSequence,\n    typename ConstBufferIterator, typename CompletionCondition,\n    typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::write_op<AsyncWriteStream, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(\n      const detail::write_op<AsyncWriteStream, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncWriteStream, typename ConstBufferSequence,\n    typename ConstBufferIterator, typename CompletionCondition,\n    typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::write_op<AsyncWriteStream, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>,\n    Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(\n      const detail::write_op<AsyncWriteStream, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream,\n    typename ConstBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_buffer_sequence<AsyncWriteStream>(s),\n      handler, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncWriteStream, typename ConstBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, const ConstBufferSequence& buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type*)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_buffer_sequence<AsyncWriteStream>(s),\n      handler, buffers, transfer_all());\n}\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncWriteStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename WriteHandler>\n  class write_dynbuf_v1_op\n  {\n  public:\n    template <typename BufferSequence>\n    write_dynbuf_v1_op(AsyncWriteStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        CompletionCondition& completion_condition, WriteHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        completion_condition_(\n          ASIO_MOVE_CAST(CompletionCondition)(completion_condition)),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    write_dynbuf_v1_op(const write_dynbuf_v1_op& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        completion_condition_(other.completion_condition_),\n        handler_(other.handler_)\n    {\n    }\n\n    write_dynbuf_v1_op(write_dynbuf_v1_op&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)),\n        completion_condition_(\n          ASIO_MOVE_CAST(CompletionCondition)(\n            other.completion_condition_)),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      switch (start)\n      {\n        case 1:\n        async_write(stream_, buffers_.data(),\n            ASIO_MOVE_CAST(CompletionCondition)(completion_condition_),\n            ASIO_MOVE_CAST(write_dynbuf_v1_op)(*this));\n        return; default:\n        buffers_.consume(bytes_transferred);\n        handler_(ec, static_cast<const std::size_t&>(bytes_transferred));\n      }\n    }\n\n  //private:\n    AsyncWriteStream& stream_;\n    DynamicBuffer_v1 buffers_;\n    CompletionCondition completion_condition_;\n    WriteHandler handler_;\n  };\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      write_dynbuf_v1_op<AsyncWriteStream, DynamicBuffer_v1,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      write_dynbuf_v1_op<AsyncWriteStream, DynamicBuffer_v1,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v1,\n      typename CompletionCondition, typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      write_dynbuf_v1_op<AsyncWriteStream, DynamicBuffer_v1,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename DynamicBuffer_v1, typename CompletionCondition,\n      typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      write_dynbuf_v1_op<AsyncWriteStream, DynamicBuffer_v1,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename DynamicBuffer_v1, typename CompletionCondition,\n      typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      write_dynbuf_v1_op<AsyncWriteStream, DynamicBuffer_v1,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream>\n  class initiate_async_write_dynbuf_v1\n  {\n  public:\n    typedef typename AsyncWriteStream::executor_type executor_type;\n\n    explicit initiate_async_write_dynbuf_v1(AsyncWriteStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename WriteHandler, typename DynamicBuffer_v1,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      write_dynbuf_v1_op<AsyncWriteStream,\n        typename decay<DynamicBuffer_v1>::type,\n          CompletionCondition, typename decay<WriteHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n              completion_cond2.value, handler2.value)(\n                asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncWriteStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition, typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::write_dynbuf_v1_op<AsyncWriteStream,\n      DynamicBuffer_v1, CompletionCondition, WriteHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(\n      const detail::write_dynbuf_v1_op<AsyncWriteStream,\n        DynamicBuffer_v1, CompletionCondition, WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition, typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::write_dynbuf_v1_op<AsyncWriteStream,\n      DynamicBuffer_v1, CompletionCondition, WriteHandler>,\n    Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(\n      const detail::write_dynbuf_v1_op<AsyncWriteStream,\n        DynamicBuffer_v1, CompletionCondition, WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      transfer_all(), ASIO_MOVE_CAST(WriteHandler)(handler));\n}\n\ntemplate <typename AsyncWriteStream,\n    typename DynamicBuffer_v1, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type*)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_dynbuf_v1<AsyncWriteStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename AsyncWriteStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_write(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(WriteHandler)(handler));\n}\n\ntemplate <typename AsyncWriteStream,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_write(s, basic_streambuf_ref<Allocator>(b),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition),\n      ASIO_MOVE_CAST(WriteHandler)(handler));\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\nnamespace detail\n{\n  template <typename AsyncWriteStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename WriteHandler>\n  class write_dynbuf_v2_op\n  {\n  public:\n    template <typename BufferSequence>\n    write_dynbuf_v2_op(AsyncWriteStream& stream,\n        ASIO_MOVE_ARG(BufferSequence) buffers,\n        CompletionCondition& completion_condition, WriteHandler& handler)\n      : stream_(stream),\n        buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)),\n        completion_condition_(\n          ASIO_MOVE_CAST(CompletionCondition)(completion_condition)),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    write_dynbuf_v2_op(const write_dynbuf_v2_op& other)\n      : stream_(other.stream_),\n        buffers_(other.buffers_),\n        completion_condition_(other.completion_condition_),\n        handler_(other.handler_)\n    {\n    }\n\n    write_dynbuf_v2_op(write_dynbuf_v2_op&& other)\n      : stream_(other.stream_),\n        buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)),\n        completion_condition_(\n          ASIO_MOVE_CAST(CompletionCondition)(\n            other.completion_condition_)),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      switch (start)\n      {\n        case 1:\n        async_write(stream_, buffers_.data(0, buffers_.size()),\n            ASIO_MOVE_CAST(CompletionCondition)(completion_condition_),\n            ASIO_MOVE_CAST(write_dynbuf_v2_op)(*this));\n        return; default:\n        buffers_.consume(bytes_transferred);\n        handler_(ec, static_cast<const std::size_t&>(bytes_transferred));\n      }\n    }\n\n  //private:\n    AsyncWriteStream& stream_;\n    DynamicBuffer_v2 buffers_;\n    CompletionCondition completion_condition_;\n    WriteHandler handler_;\n  };\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      write_dynbuf_v2_op<AsyncWriteStream, DynamicBuffer_v2,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      write_dynbuf_v2_op<AsyncWriteStream, DynamicBuffer_v2,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream, typename DynamicBuffer_v2,\n      typename CompletionCondition, typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      write_dynbuf_v2_op<AsyncWriteStream, DynamicBuffer_v2,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename DynamicBuffer_v2, typename CompletionCondition,\n      typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      write_dynbuf_v2_op<AsyncWriteStream, DynamicBuffer_v2,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncWriteStream,\n      typename DynamicBuffer_v2, typename CompletionCondition,\n      typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      write_dynbuf_v2_op<AsyncWriteStream, DynamicBuffer_v2,\n        CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncWriteStream>\n  class initiate_async_write_dynbuf_v2\n  {\n  public:\n    typedef typename AsyncWriteStream::executor_type executor_type;\n\n    explicit initiate_async_write_dynbuf_v2(AsyncWriteStream& stream)\n      : stream_(stream)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return stream_.get_executor();\n    }\n\n    template <typename WriteHandler, typename DynamicBuffer_v2,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        ASIO_MOVE_ARG(DynamicBuffer_v2) buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      write_dynbuf_v2_op<AsyncWriteStream,\n        typename decay<DynamicBuffer_v2>::type,\n          CompletionCondition, typename decay<WriteHandler>::type>(\n            stream_, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n              completion_cond2.value, handler2.value)(\n                asio::error_code(), 0, 1);\n    }\n\n  private:\n    AsyncWriteStream& stream_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition, typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::write_dynbuf_v2_op<AsyncWriteStream,\n      DynamicBuffer_v2, CompletionCondition, WriteHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(\n      const detail::write_dynbuf_v2_op<AsyncWriteStream,\n        DynamicBuffer_v2, CompletionCondition, WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition, typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::write_dynbuf_v2_op<AsyncWriteStream,\n      DynamicBuffer_v2, CompletionCondition, WriteHandler>,\n    Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(\n      const detail::write_dynbuf_v2_op<AsyncWriteStream,\n        DynamicBuffer_v2, CompletionCondition, WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_write(s,\n      ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      transfer_all(), ASIO_MOVE_CAST(WriteHandler)(handler));\n}\n\ntemplate <typename AsyncWriteStream,\n    typename DynamicBuffer_v2, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type*)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_dynbuf_v2<AsyncWriteStream>(s),\n      handler, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_WRITE_HPP\n"
  },
  {
    "path": "src/third_party/asio/impl/write_at.hpp",
    "content": "//\n// impl/write_at.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IMPL_WRITE_AT_HPP\n#define ASIO_IMPL_WRITE_AT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/detail/array_fwd.hpp\"\n#include \"asio/detail/base_from_completion_cond.hpp\"\n#include \"asio/detail/bind_handler.hpp\"\n#include \"asio/detail/consuming_buffers.hpp\"\n#include \"asio/detail/dependent_type.hpp\"\n#include \"asio/detail/handler_alloc_helpers.hpp\"\n#include \"asio/detail/handler_cont_helpers.hpp\"\n#include \"asio/detail/handler_invoke_helpers.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  template <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence,\n      typename ConstBufferIterator, typename CompletionCondition>\n  std::size_t write_at_buffer_sequence(SyncRandomAccessWriteDevice& d,\n      uint64_t offset, const ConstBufferSequence& buffers,\n      const ConstBufferIterator&, CompletionCondition completion_condition,\n      asio::error_code& ec)\n  {\n    ec = asio::error_code();\n    asio::detail::consuming_buffers<const_buffer,\n        ConstBufferSequence, ConstBufferIterator> tmp(buffers);\n    while (!tmp.empty())\n    {\n      if (std::size_t max_size = detail::adapt_completion_condition_result(\n            completion_condition(ec, tmp.total_consumed())))\n      {\n        tmp.consume(d.write_some_at(offset + tmp.total_consumed(),\n              tmp.prepare(max_size), ec));\n      }\n      else\n        break;\n    }\n    return tmp.total_consumed();;\n  }\n} // namespace detail\n\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  return detail::write_at_buffer_sequence(d, offset, buffers,\n      asio::buffer_sequence_begin(buffers),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write_at(\n      d, offset, buffers, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"write_at\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    asio::error_code& ec)\n{\n  return write_at(d, offset, buffers, transfer_all(), ec);\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    typename CompletionCondition>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write_at(d, offset, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"write_at\");\n  return bytes_transferred;\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec)\n{\n  std::size_t bytes_transferred = write_at(d, offset, b.data(),\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  b.consume(bytes_transferred);\n  return bytes_transferred;\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write_at(d, offset, b, transfer_all(), ec);\n  asio::detail::throw_error(ec, \"write_at\");\n  return bytes_transferred;\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    asio::error_code& ec)\n{\n  return write_at(d, offset, b, transfer_all(), ec);\n}\n\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator,\n    typename CompletionCondition>\ninline std::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition)\n{\n  asio::error_code ec;\n  std::size_t bytes_transferred = write_at(d, offset, b,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec);\n  asio::detail::throw_error(ec, \"write_at\");\n  return bytes_transferred;\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\nnamespace detail\n{\n  template <typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  class write_at_op\n    : detail::base_from_completion_cond<CompletionCondition>\n  {\n  public:\n    write_at_op(AsyncRandomAccessWriteDevice& device,\n        uint64_t offset, const ConstBufferSequence& buffers,\n        CompletionCondition& completion_condition, WriteHandler& handler)\n      : detail::base_from_completion_cond<\n          CompletionCondition>(completion_condition),\n        device_(device),\n        offset_(offset),\n        buffers_(buffers),\n        start_(0),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    write_at_op(const write_at_op& other)\n      : detail::base_from_completion_cond<CompletionCondition>(other),\n        device_(other.device_),\n        offset_(other.offset_),\n        buffers_(other.buffers_),\n        start_(other.start_),\n        handler_(other.handler_)\n    {\n    }\n\n    write_at_op(write_at_op&& other)\n      : detail::base_from_completion_cond<CompletionCondition>(\n          ASIO_MOVE_CAST(detail::base_from_completion_cond<\n            CompletionCondition>)(other)),\n        device_(other.device_),\n        offset_(other.offset_),\n        buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)),\n        start_(other.start_),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        std::size_t bytes_transferred, int start = 0)\n    {\n      std::size_t max_size;\n      switch (start_ = start)\n      {\n        case 1:\n        max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        do\n        {\n          device_.async_write_some_at(\n              offset_ + buffers_.total_consumed(), buffers_.prepare(max_size),\n              ASIO_MOVE_CAST(write_at_op)(*this));\n          return; default:\n          buffers_.consume(bytes_transferred);\n          if ((!ec && bytes_transferred == 0) || buffers_.empty())\n            break;\n          max_size = this->check_for_completion(ec, buffers_.total_consumed());\n        } while (max_size > 0);\n\n        handler_(ec, buffers_.total_consumed());\n      }\n    }\n\n  //private:\n    typedef asio::detail::consuming_buffers<const_buffer,\n        ConstBufferSequence, ConstBufferIterator> buffers_type;\n\n    AsyncRandomAccessWriteDevice& device_;\n    uint64_t offset_;\n    buffers_type buffers_;\n    int start_;\n    WriteHandler handler_;\n  };\n\n  template <typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>* this_handler)\n  {\n    return this_handler->start_ == 0 ? true\n      : asio_handler_cont_helpers::is_continuation(\n          this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n        ConstBufferIterator, CompletionCondition, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessWriteDevice,\n      typename ConstBufferSequence, typename ConstBufferIterator,\n      typename CompletionCondition, typename WriteHandler>\n  inline void start_write_at_buffer_sequence_op(AsyncRandomAccessWriteDevice& d,\n      uint64_t offset, const ConstBufferSequence& buffers,\n      const ConstBufferIterator&, CompletionCondition& completion_condition,\n      WriteHandler& handler)\n  {\n    detail::write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>(\n        d, offset, buffers, completion_condition, handler)(\n          asio::error_code(), 0, 1);\n  }\n\n  template <typename AsyncRandomAccessWriteDevice>\n  class initiate_async_write_at_buffer_sequence\n  {\n  public:\n    typedef typename AsyncRandomAccessWriteDevice::executor_type executor_type;\n\n    explicit initiate_async_write_at_buffer_sequence(\n        AsyncRandomAccessWriteDevice& device)\n      : device_(device)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return device_.get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence,\n        typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        uint64_t offset, const ConstBufferSequence& buffers,\n        ASIO_MOVE_ARG(CompletionCondition) completion_cond) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      non_const_lvalue<CompletionCondition> completion_cond2(completion_cond);\n      start_write_at_buffer_sequence_op(device_, offset, buffers,\n          asio::buffer_sequence_begin(buffers),\n          completion_cond2.value, handler2.value);\n    }\n\n  private:\n    AsyncRandomAccessWriteDevice& device_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename ConstBufferSequence, typename ConstBufferIterator,\n    typename CompletionCondition, typename WriteHandler, typename Allocator>\nstruct associated_allocator<\n    detail::write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>,\n    Allocator>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator>::type type;\n\n  static type get(\n      const detail::write_at_op<AsyncRandomAccessWriteDevice,\n        ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename ConstBufferSequence, typename ConstBufferIterator,\n    typename CompletionCondition, typename WriteHandler, typename Executor>\nstruct associated_executor<\n    detail::write_at_op<AsyncRandomAccessWriteDevice, ConstBufferSequence,\n      ConstBufferIterator, CompletionCondition, WriteHandler>,\n    Executor>\n{\n  typedef typename associated_executor<WriteHandler, Executor>::type type;\n\n  static type get(\n      const detail::write_at_op<AsyncRandomAccessWriteDevice,\n        ConstBufferSequence, ConstBufferIterator,\n        CompletionCondition, WriteHandler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename ConstBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_at_buffer_sequence<\n        AsyncRandomAccessWriteDevice>(d),\n      handler, offset, buffers,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_at_buffer_sequence<\n        AsyncRandomAccessWriteDevice>(d),\n      handler, offset, buffers, transfer_all());\n}\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\nnamespace detail\n{\n  template <typename Allocator, typename WriteHandler>\n  class write_at_streambuf_op\n  {\n  public:\n    write_at_streambuf_op(\n        asio::basic_streambuf<Allocator>& streambuf,\n        WriteHandler& handler)\n      : streambuf_(streambuf),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(handler))\n    {\n    }\n\n#if defined(ASIO_HAS_MOVE)\n    write_at_streambuf_op(const write_at_streambuf_op& other)\n      : streambuf_(other.streambuf_),\n        handler_(other.handler_)\n    {\n    }\n\n    write_at_streambuf_op(write_at_streambuf_op&& other)\n      : streambuf_(other.streambuf_),\n        handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))\n    {\n    }\n#endif // defined(ASIO_HAS_MOVE)\n\n    void operator()(const asio::error_code& ec,\n        const std::size_t bytes_transferred)\n    {\n      streambuf_.consume(bytes_transferred);\n      handler_(ec, bytes_transferred);\n    }\n\n  //private:\n    asio::basic_streambuf<Allocator>& streambuf_;\n    WriteHandler handler_;\n  };\n\n  template <typename Allocator, typename WriteHandler>\n  inline void* asio_handler_allocate(std::size_t size,\n      write_at_streambuf_op<Allocator, WriteHandler>* this_handler)\n  {\n    return asio_handler_alloc_helpers::allocate(\n        size, this_handler->handler_);\n  }\n\n  template <typename Allocator, typename WriteHandler>\n  inline void asio_handler_deallocate(void* pointer, std::size_t size,\n      write_at_streambuf_op<Allocator, WriteHandler>* this_handler)\n  {\n    asio_handler_alloc_helpers::deallocate(\n        pointer, size, this_handler->handler_);\n  }\n\n  template <typename Allocator, typename WriteHandler>\n  inline bool asio_handler_is_continuation(\n      write_at_streambuf_op<Allocator, WriteHandler>* this_handler)\n  {\n    return asio_handler_cont_helpers::is_continuation(\n        this_handler->handler_);\n  }\n\n  template <typename Function, typename Allocator, typename WriteHandler>\n  inline void asio_handler_invoke(Function& function,\n      write_at_streambuf_op<Allocator, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename Function, typename Allocator, typename WriteHandler>\n  inline void asio_handler_invoke(const Function& function,\n      write_at_streambuf_op<Allocator, WriteHandler>* this_handler)\n  {\n    asio_handler_invoke_helpers::invoke(\n        function, this_handler->handler_);\n  }\n\n  template <typename AsyncRandomAccessWriteDevice>\n  class initiate_async_write_at_streambuf\n  {\n  public:\n    typedef typename AsyncRandomAccessWriteDevice::executor_type executor_type;\n\n    explicit initiate_async_write_at_streambuf(\n        AsyncRandomAccessWriteDevice& device)\n      : device_(device)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return device_.get_executor();\n    }\n\n    template <typename WriteHandler,\n        typename Allocator, typename CompletionCondition>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        uint64_t offset, basic_streambuf<Allocator>* b,\n        ASIO_MOVE_ARG(CompletionCondition) completion_condition) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      non_const_lvalue<WriteHandler> handler2(handler);\n      async_write_at(device_, offset, b->data(),\n          ASIO_MOVE_CAST(CompletionCondition)(completion_condition),\n          write_at_streambuf_op<Allocator, typename decay<WriteHandler>::type>(\n            *b, handler2.value));\n    }\n\n  private:\n    AsyncRandomAccessWriteDevice& device_;\n  };\n} // namespace detail\n\n#if !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Allocator, typename WriteHandler, typename Allocator1>\nstruct associated_allocator<\n    detail::write_at_streambuf_op<Allocator, WriteHandler>,\n    Allocator1>\n{\n  typedef typename associated_allocator<WriteHandler, Allocator1>::type type;\n\n  static type get(\n      const detail::write_at_streambuf_op<Allocator, WriteHandler>& h,\n      const Allocator1& a = Allocator1()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<WriteHandler, Allocator1>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Executor, typename WriteHandler, typename Executor1>\nstruct associated_executor<\n    detail::write_at_streambuf_op<Executor, WriteHandler>,\n    Executor1>\n{\n  typedef typename associated_executor<WriteHandler, Executor1>::type type;\n\n  static type get(\n      const detail::write_at_streambuf_op<Executor, WriteHandler>& h,\n      const Executor1& ex = Executor1()) ASIO_NOEXCEPT\n  {\n    return associated_executor<WriteHandler, Executor1>::get(h.handler_, ex);\n  }\n};\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_at_streambuf<\n        AsyncRandomAccessWriteDevice>(d),\n      handler, offset, &b,\n      ASIO_MOVE_CAST(CompletionCondition)(completion_condition));\n}\n\ntemplate <typename AsyncRandomAccessWriteDevice, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler>\ninline ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, asio::basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(WriteHandler) handler)\n{\n  return async_initiate<WriteHandler,\n    void (asio::error_code, std::size_t)>(\n      detail::initiate_async_write_at_streambuf<\n        AsyncRandomAccessWriteDevice>(d),\n      handler, offset, &b, transfer_all());\n}\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IMPL_WRITE_AT_HPP\n"
  },
  {
    "path": "src/third_party/asio/io_context.hpp",
    "content": "//\n// io_context.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IO_CONTEXT_HPP\n#define ASIO_IO_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <stdexcept>\n#include <typeinfo>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/wrapped_handler.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#if defined(ASIO_HAS_CHRONO)\n# include \"asio/detail/chrono.hpp\"\n#endif // defined(ASIO_HAS_CHRONO)\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# include \"asio/detail/winsock_init.hpp\"\n#elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \\\n  || defined(__osf__)\n# include \"asio/detail/signal_init.hpp\"\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail {\n#if defined(ASIO_HAS_IOCP)\n  typedef class win_iocp_io_context io_context_impl;\n  class win_iocp_overlapped_ptr;\n#else\n  typedef class scheduler io_context_impl;\n#endif\n} // namespace detail\n\n/// Provides core I/O functionality.\n/**\n * The io_context class provides the core I/O functionality for users of the\n * asynchronous I/O objects, including:\n *\n * @li asio::ip::tcp::socket\n * @li asio::ip::tcp::acceptor\n * @li asio::ip::udp::socket\n * @li asio::deadline_timer.\n *\n * The io_context class also includes facilities intended for developers of\n * custom asynchronous services.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe, with the specific exceptions of the restart()\n * and notify_fork() functions. Calling restart() while there are unfinished\n * run(), run_one(), run_for(), run_until(), poll() or poll_one() calls results\n * in undefined behaviour. The notify_fork() function should not be called\n * while any io_context function, or any function on an I/O object that is\n * associated with the io_context, is being called in another thread.\n *\n * @par Concepts:\n * Dispatcher.\n *\n * @par Synchronous and asynchronous operations\n *\n * Synchronous operations on I/O objects implicitly run the io_context object\n * for an individual operation. The io_context functions run(), run_one(),\n * run_for(), run_until(), poll() or poll_one() must be called for the\n * io_context to perform asynchronous operations on behalf of a C++ program.\n * Notification that an asynchronous operation has completed is delivered by\n * invocation of the associated handler. Handlers are invoked only by a thread\n * that is currently calling any overload of run(), run_one(), run_for(),\n * run_until(), poll() or poll_one() for the io_context.\n *\n * @par Effect of exceptions thrown from handlers\n *\n * If an exception is thrown from a handler, the exception is allowed to\n * propagate through the throwing thread's invocation of run(), run_one(),\n * run_for(), run_until(), poll() or poll_one(). No other threads that are\n * calling any of these functions are affected. It is then the responsibility\n * of the application to catch the exception.\n *\n * After the exception has been caught, the run(), run_one(), run_for(),\n * run_until(), poll() or poll_one() call may be restarted @em without the need\n * for an intervening call to restart(). This allows the thread to rejoin the\n * io_context object's thread pool without impacting any other threads in the\n * pool.\n *\n * For example:\n *\n * @code\n * asio::io_context io_context;\n * ...\n * for (;;)\n * {\n *   try\n *   {\n *     io_context.run();\n *     break; // run() exited normally\n *   }\n *   catch (my_exception& e)\n *   {\n *     // Deal with exception as appropriate.\n *   }\n * }\n * @endcode\n *\n * @par Submitting arbitrary tasks to the io_context\n *\n * To submit functions to the io_context, use the @ref asio::dispatch,\n * @ref asio::post or @ref asio::defer free functions.\n *\n * For example:\n *\n * @code void my_task()\n * {\n *   ...\n * }\n *\n * ...\n *\n * asio::io_context io_context;\n *\n * // Submit a function to the io_context.\n * asio::post(io_context, my_task);\n *\n * // Submit a lambda object to the io_context.\n * asio::post(io_context,\n *     []()\n *     {\n *       ...\n *     });\n *\n * // Run the io_context until it runs out of work.\n * io_context.run(); @endcode\n *\n * @par Stopping the io_context from running out of work\n *\n * Some applications may need to prevent an io_context object's run() call from\n * returning when there is no more work to do. For example, the io_context may\n * be being run in a background thread that is launched prior to the\n * application's asynchronous operations. The run() call may be kept running by\n * creating an object of type\n * asio::executor_work_guard<io_context::executor_type>:\n *\n * @code asio::io_context io_context;\n * asio::executor_work_guard<asio::io_context::executor_type>\n *   = asio::make_work_guard(io_context);\n * ... @endcode\n *\n * To effect a shutdown, the application will then need to call the io_context\n * object's stop() member function. This will cause the io_context run() call\n * to return as soon as possible, abandoning unfinished operations and without\n * permitting ready handlers to be dispatched.\n *\n * Alternatively, if the application requires that all operations and handlers\n * be allowed to finish normally, the work object may be explicitly reset.\n *\n * @code asio::io_context io_context;\n * asio::executor_work_guard<asio::io_context::executor_type>\n *   = asio::make_work_guard(io_context);\n * ...\n * work.reset(); // Allow run() to exit. @endcode\n */\nclass io_context\n  : public execution_context\n{\nprivate:\n  typedef detail::io_context_impl impl_type;\n#if defined(ASIO_HAS_IOCP)\n  friend class detail::win_iocp_overlapped_ptr;\n#endif\n\npublic:\n  class executor_type;\n  friend class executor_type;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  class work;\n  friend class work;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  class service;\n\n#if !defined(ASIO_NO_EXTENSIONS)\n  class strand;\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n  /// The type used to count the number of handlers executed by the context.\n  typedef std::size_t count_type;\n\n  /// Constructor.\n  ASIO_DECL io_context();\n\n  /// Constructor.\n  /**\n   * Construct with a hint about the required level of concurrency.\n   *\n   * @param concurrency_hint A suggestion to the implementation on how many\n   * threads it should allow to run simultaneously.\n   */\n  ASIO_DECL explicit io_context(int concurrency_hint);\n\n  /// Destructor.\n  /**\n   * On destruction, the io_context performs the following sequence of\n   * operations:\n   *\n   * @li For each service object @c svc in the io_context set, in reverse order\n   * of the beginning of service object lifetime, performs\n   * @c svc->shutdown().\n   *\n   * @li Uninvoked handler objects that were scheduled for deferred invocation\n   * on the io_context, or any associated strand, are destroyed.\n   *\n   * @li For each service object @c svc in the io_context set, in reverse order\n   * of the beginning of service object lifetime, performs\n   * <tt>delete static_cast<io_context::service*>(svc)</tt>.\n   *\n   * @note The destruction sequence described above permits programs to\n   * simplify their resource management by using @c shared_ptr<>. Where an\n   * object's lifetime is tied to the lifetime of a connection (or some other\n   * sequence of asynchronous operations), a @c shared_ptr to the object would\n   * be bound into the handlers for all asynchronous operations associated with\n   * it. This works as follows:\n   *\n   * @li When a single connection ends, all associated asynchronous operations\n   * complete. The corresponding handler objects are destroyed, and all\n   * @c shared_ptr references to the objects are destroyed.\n   *\n   * @li To shut down the whole program, the io_context function stop() is\n   * called to terminate any run() calls as soon as possible. The io_context\n   * destructor defined above destroys all handlers, causing all @c shared_ptr\n   * references to all connection objects to be destroyed.\n   */\n  ASIO_DECL ~io_context();\n\n  /// Obtains the executor associated with the io_context.\n  executor_type get_executor() ASIO_NOEXCEPT;\n\n  /// Run the io_context object's event processing loop.\n  /**\n   * The run() function blocks until all work has finished and there are no\n   * more handlers to be dispatched, or until the io_context has been stopped.\n   *\n   * Multiple threads may call the run() function to set up a pool of threads\n   * from which the io_context may execute handlers. All threads that are\n   * waiting in the pool are equivalent and the io_context may choose any one\n   * of them to invoke a handler.\n   *\n   * A normal exit from the run() function implies that the io_context object\n   * is stopped (the stopped() function returns @c true). Subsequent calls to\n   * run(), run_one(), poll() or poll_one() will return immediately unless there\n   * is a prior call to restart().\n   *\n   * @return The number of handlers that were executed.\n   *\n   * @note Calling the run() function from a thread that is currently calling\n   * one of run(), run_one(), run_for(), run_until(), poll() or poll_one() on\n   * the same io_context object may introduce the potential for deadlock. It is\n   * the caller's reponsibility to avoid this.\n   *\n   * The poll() function may also be used to dispatch ready handlers, but\n   * without blocking.\n   */\n  ASIO_DECL count_type run();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Run the io_context object's\n  /// event processing loop.\n  /**\n   * The run() function blocks until all work has finished and there are no\n   * more handlers to be dispatched, or until the io_context has been stopped.\n   *\n   * Multiple threads may call the run() function to set up a pool of threads\n   * from which the io_context may execute handlers. All threads that are\n   * waiting in the pool are equivalent and the io_context may choose any one\n   * of them to invoke a handler.\n   *\n   * A normal exit from the run() function implies that the io_context object\n   * is stopped (the stopped() function returns @c true). Subsequent calls to\n   * run(), run_one(), poll() or poll_one() will return immediately unless there\n   * is a prior call to restart().\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of handlers that were executed.\n   *\n   * @note Calling the run() function from a thread that is currently calling\n   * one of run(), run_one(), run_for(), run_until(), poll() or poll_one() on\n   * the same io_context object may introduce the potential for deadlock. It is\n   * the caller's reponsibility to avoid this.\n   *\n   * The poll() function may also be used to dispatch ready handlers, but\n   * without blocking.\n   */\n  ASIO_DECL count_type run(asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n#if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n  /// Run the io_context object's event processing loop for a specified\n  /// duration.\n  /**\n   * The run_for() function blocks until all work has finished and there are no\n   * more handlers to be dispatched, until the io_context has been stopped, or\n   * until the specified duration has elapsed.\n   *\n   * @param rel_time The duration for which the call may block.\n   *\n   * @return The number of handlers that were executed.\n   */\n  template <typename Rep, typename Period>\n  std::size_t run_for(const chrono::duration<Rep, Period>& rel_time);\n\n  /// Run the io_context object's event processing loop until a specified time.\n  /**\n   * The run_until() function blocks until all work has finished and there are\n   * no more handlers to be dispatched, until the io_context has been stopped,\n   * or until the specified time has been reached.\n   *\n   * @param abs_time The time point until which the call may block.\n   *\n   * @return The number of handlers that were executed.\n   */\n  template <typename Clock, typename Duration>\n  std::size_t run_until(const chrono::time_point<Clock, Duration>& abs_time);\n#endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n  /// Run the io_context object's event processing loop to execute at most one\n  /// handler.\n  /**\n   * The run_one() function blocks until one handler has been dispatched, or\n   * until the io_context has been stopped.\n   *\n   * @return The number of handlers that were executed. A zero return value\n   * implies that the io_context object is stopped (the stopped() function\n   * returns @c true). Subsequent calls to run(), run_one(), poll() or\n   * poll_one() will return immediately unless there is a prior call to\n   * restart().\n   *\n   * @note Calling the run_one() function from a thread that is currently\n   * calling one of run(), run_one(), run_for(), run_until(), poll() or\n   * poll_one() on the same io_context object may introduce the potential for\n   * deadlock. It is the caller's reponsibility to avoid this.\n   */\n  ASIO_DECL count_type run_one();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overlaod.) Run the io_context object's\n  /// event processing loop to execute at most one handler.\n  /**\n   * The run_one() function blocks until one handler has been dispatched, or\n   * until the io_context has been stopped.\n   *\n   * @return The number of handlers that were executed. A zero return value\n   * implies that the io_context object is stopped (the stopped() function\n   * returns @c true). Subsequent calls to run(), run_one(), poll() or\n   * poll_one() will return immediately unless there is a prior call to\n   * restart().\n   *\n   * @return The number of handlers that were executed.\n   *\n   * @note Calling the run_one() function from a thread that is currently\n   * calling one of run(), run_one(), run_for(), run_until(), poll() or\n   * poll_one() on the same io_context object may introduce the potential for\n   * deadlock. It is the caller's reponsibility to avoid this.\n   */\n  ASIO_DECL count_type run_one(asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n#if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n  /// Run the io_context object's event processing loop for a specified duration\n  /// to execute at most one handler.\n  /**\n   * The run_one_for() function blocks until one handler has been dispatched,\n   * until the io_context has been stopped, or until the specified duration has\n   * elapsed.\n   *\n   * @param rel_time The duration for which the call may block.\n   *\n   * @return The number of handlers that were executed.\n   */\n  template <typename Rep, typename Period>\n  std::size_t run_one_for(const chrono::duration<Rep, Period>& rel_time);\n\n  /// Run the io_context object's event processing loop until a specified time\n  /// to execute at most one handler.\n  /**\n   * The run_one_until() function blocks until one handler has been dispatched,\n   * until the io_context has been stopped, or until the specified time has\n   * been reached.\n   *\n   * @param abs_time The time point until which the call may block.\n   *\n   * @return The number of handlers that were executed.\n   */\n  template <typename Clock, typename Duration>\n  std::size_t run_one_until(\n      const chrono::time_point<Clock, Duration>& abs_time);\n#endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n  /// Run the io_context object's event processing loop to execute ready\n  /// handlers.\n  /**\n   * The poll() function runs handlers that are ready to run, without blocking,\n   * until the io_context has been stopped or there are no more ready handlers.\n   *\n   * @return The number of handlers that were executed.\n   */\n  ASIO_DECL count_type poll();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Run the io_context object's\n  /// event processing loop to execute ready handlers.\n  /**\n   * The poll() function runs handlers that are ready to run, without blocking,\n   * until the io_context has been stopped or there are no more ready handlers.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of handlers that were executed.\n   */\n  ASIO_DECL count_type poll(asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Run the io_context object's event processing loop to execute one ready\n  /// handler.\n  /**\n   * The poll_one() function runs at most one handler that is ready to run,\n   * without blocking.\n   *\n   * @return The number of handlers that were executed.\n   */\n  ASIO_DECL count_type poll_one();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use non-error_code overload.) Run the io_context object's\n  /// event processing loop to execute one ready handler.\n  /**\n   * The poll_one() function runs at most one handler that is ready to run,\n   * without blocking.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @return The number of handlers that were executed.\n   */\n  ASIO_DECL count_type poll_one(asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Stop the io_context object's event processing loop.\n  /**\n   * This function does not block, but instead simply signals the io_context to\n   * stop. All invocations of its run() or run_one() member functions should\n   * return as soon as possible. Subsequent calls to run(), run_one(), poll()\n   * or poll_one() will return immediately until restart() is called.\n   */\n  ASIO_DECL void stop();\n\n  /// Determine whether the io_context object has been stopped.\n  /**\n   * This function is used to determine whether an io_context object has been\n   * stopped, either through an explicit call to stop(), or due to running out\n   * of work. When an io_context object is stopped, calls to run(), run_one(),\n   * poll() or poll_one() will return immediately without invoking any\n   * handlers.\n   *\n   * @return @c true if the io_context object is stopped, otherwise @c false.\n   */\n  ASIO_DECL bool stopped() const;\n\n  /// Restart the io_context in preparation for a subsequent run() invocation.\n  /**\n   * This function must be called prior to any second or later set of\n   * invocations of the run(), run_one(), poll() or poll_one() functions when a\n   * previous invocation of these functions returned due to the io_context\n   * being stopped or running out of work. After a call to restart(), the\n   * io_context object's stopped() function will return @c false.\n   *\n   * This function must not be called while there are any unfinished calls to\n   * the run(), run_one(), poll() or poll_one() functions.\n   */\n  ASIO_DECL void restart();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use restart().) Reset the io_context in preparation for a\n  /// subsequent run() invocation.\n  /**\n   * This function must be called prior to any second or later set of\n   * invocations of the run(), run_one(), poll() or poll_one() functions when a\n   * previous invocation of these functions returned due to the io_context\n   * being stopped or running out of work. After a call to restart(), the\n   * io_context object's stopped() function will return @c false.\n   *\n   * This function must not be called while there are any unfinished calls to\n   * the run(), run_one(), poll() or poll_one() functions.\n   */\n  void reset();\n\n  /// (Deprecated: Use asio::dispatch().) Request the io_context to\n  /// invoke the given handler.\n  /**\n   * This function is used to ask the io_context to execute the given handler.\n   *\n   * The io_context guarantees that the handler will only be called in a thread\n   * in which the run(), run_one(), poll() or poll_one() member functions is\n   * currently being invoked. The handler may be executed inside this function\n   * if the guarantee can be met.\n   *\n   * @param handler The handler to be called. The io_context will make\n   * a copy of the handler object as required. The function signature of the\n   * handler must be: @code void handler(); @endcode\n   *\n   * @note This function throws an exception only if:\n   *\n   * @li the handler's @c asio_handler_allocate function; or\n   *\n   * @li the handler's copy constructor\n   *\n   * throws an exception.\n   */\n  template <typename LegacyCompletionHandler>\n  ASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\n  dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler);\n\n  /// (Deprecated: Use asio::post().) Request the io_context to invoke\n  /// the given handler and return immediately.\n  /**\n   * This function is used to ask the io_context to execute the given handler,\n   * but without allowing the io_context to call the handler from inside this\n   * function.\n   *\n   * The io_context guarantees that the handler will only be called in a thread\n   * in which the run(), run_one(), poll() or poll_one() member functions is\n   * currently being invoked.\n   *\n   * @param handler The handler to be called. The io_context will make\n   * a copy of the handler object as required. The function signature of the\n   * handler must be: @code void handler(); @endcode\n   *\n   * @note This function throws an exception only if:\n   *\n   * @li the handler's @c asio_handler_allocate function; or\n   *\n   * @li the handler's copy constructor\n   *\n   * throws an exception.\n   */\n  template <typename LegacyCompletionHandler>\n  ASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\n  post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler);\n\n  /// (Deprecated: Use asio::bind_executor().) Create a new handler that\n  /// automatically dispatches the wrapped handler on the io_context.\n  /**\n   * This function is used to create a new handler function object that, when\n   * invoked, will automatically pass the wrapped handler to the io_context\n   * object's dispatch function.\n   *\n   * @param handler The handler to be wrapped. The io_context will make a copy\n   * of the handler object as required. The function signature of the handler\n   * must be: @code void handler(A1 a1, ... An an); @endcode\n   *\n   * @return A function object that, when invoked, passes the wrapped handler to\n   * the io_context object's dispatch function. Given a function object with the\n   * signature:\n   * @code R f(A1 a1, ... An an); @endcode\n   * If this function object is passed to the wrap function like so:\n   * @code io_context.wrap(f); @endcode\n   * then the return value is a function object with the signature\n   * @code void g(A1 a1, ... An an); @endcode\n   * that, when invoked, executes code equivalent to:\n   * @code io_context.dispatch(boost::bind(f, a1, ... an)); @endcode\n   */\n  template <typename Handler>\n#if defined(GENERATING_DOCUMENTATION)\n  unspecified\n#else\n  detail::wrapped_handler<io_context&, Handler>\n#endif\n  wrap(Handler handler);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprivate:\n#if !defined(ASIO_NO_DEPRECATED)\n  struct initiate_dispatch;\n  struct initiate_post;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  // Helper function to add the implementation.\n  ASIO_DECL impl_type& add_impl(impl_type* impl);\n\n  // Backwards compatible overload for use with services derived from\n  // io_context::service.\n  template <typename Service>\n  friend Service& use_service(io_context& ioc);\n\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  detail::winsock_init<> init_;\n#elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \\\n  || defined(__osf__)\n  detail::signal_init<> init_;\n#endif\n\n  // The implementation.\n  impl_type& impl_;\n};\n\n/// Executor used to submit functions to an io_context.\nclass io_context::executor_type\n{\npublic:\n  /// Obtain the underlying execution context.\n  io_context& context() const ASIO_NOEXCEPT;\n\n  /// Inform the io_context that it has some outstanding work to do.\n  /**\n   * This function is used to inform the io_context that some work has begun.\n   * This ensures that the io_context's run() and run_one() functions do not\n   * exit while the work is underway.\n   */\n  void on_work_started() const ASIO_NOEXCEPT;\n\n  /// Inform the io_context that some work is no longer outstanding.\n  /**\n   * This function is used to inform the io_context that some work has\n   * finished. Once the count of unfinished work reaches zero, the io_context\n   * is stopped and the run() and run_one() functions may exit.\n   */\n  void on_work_finished() const ASIO_NOEXCEPT;\n\n  /// Request the io_context to invoke the given function object.\n  /**\n   * This function is used to ask the io_context to execute the given function\n   * object. If the current thread is running the io_context, @c dispatch()\n   * executes the function before returning. Otherwise, the function will be\n   * scheduled to run on the io_context.\n   *\n   * @param f The function object to be called. The executor will make a copy\n   * of the handler object as required. The function signature of the function\n   * object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the io_context to invoke the given function object.\n  /**\n   * This function is used to ask the io_context to execute the given function\n   * object. The function object will never be executed inside @c post().\n   * Instead, it will be scheduled to run on the io_context.\n   *\n   * @param f The function object to be called. The executor will make a copy\n   * of the handler object as required. The function signature of the function\n   * object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the io_context to invoke the given function object.\n  /**\n   * This function is used to ask the io_context to execute the given function\n   * object. The function object will never be executed inside @c defer().\n   * Instead, it will be scheduled to run on the io_context.\n   *\n   * If the current thread belongs to the io_context, @c defer() will delay\n   * scheduling the function object until the current thread returns control to\n   * the pool.\n   *\n   * @param f The function object to be called. The executor will make a copy\n   * of the handler object as required. The function signature of the function\n   * object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Determine whether the io_context is running in the current thread.\n  /**\n   * @return @c true if the current thread is running the io_context. Otherwise\n   * returns @c false.\n   */\n  bool running_in_this_thread() const ASIO_NOEXCEPT;\n\n  /// Compare two executors for equality.\n  /**\n   * Two executors are equal if they refer to the same underlying io_context.\n   */\n  friend bool operator==(const executor_type& a,\n      const executor_type& b) ASIO_NOEXCEPT\n  {\n    return &a.io_context_ == &b.io_context_;\n  }\n\n  /// Compare two executors for inequality.\n  /**\n   * Two executors are equal if they refer to the same underlying io_context.\n   */\n  friend bool operator!=(const executor_type& a,\n      const executor_type& b) ASIO_NOEXCEPT\n  {\n    return &a.io_context_ != &b.io_context_;\n  }\n\nprivate:\n  friend class io_context;\n\n  // Constructor.\n  explicit executor_type(io_context& i) : io_context_(i) {}\n\n  // The underlying io_context.\n  io_context& io_context_;\n};\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// (Deprecated: Use executor_work_guard.) Class to inform the io_context when\n/// it has work to do.\n/**\n * The work class is used to inform the io_context when work starts and\n * finishes. This ensures that the io_context object's run() function will not\n * exit while work is underway, and that it does exit when there is no\n * unfinished work remaining.\n *\n * The work class is copy-constructible so that it may be used as a data member\n * in a handler class. It is not assignable.\n */\nclass io_context::work\n{\npublic:\n  /// Constructor notifies the io_context that work is starting.\n  /**\n   * The constructor is used to inform the io_context that some work has begun.\n   * This ensures that the io_context object's run() function will not exit\n   * while the work is underway.\n   */\n  explicit work(asio::io_context& io_context);\n\n  /// Copy constructor notifies the io_context that work is starting.\n  /**\n   * The constructor is used to inform the io_context that some work has begun.\n   * This ensures that the io_context object's run() function will not exit\n   * while the work is underway.\n   */\n  work(const work& other);\n\n  /// Destructor notifies the io_context that the work is complete.\n  /**\n   * The destructor is used to inform the io_context that some work has\n   * finished. Once the count of unfinished work reaches zero, the io_context\n   * object's run() function is permitted to exit.\n   */\n  ~work();\n\n  /// Get the io_context associated with the work.\n  asio::io_context& get_io_context();\n\nprivate:\n  // Prevent assignment.\n  void operator=(const work& other);\n\n  // The io_context implementation.\n  detail::io_context_impl& io_context_impl_;\n};\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n/// Base class for all io_context services.\nclass io_context::service\n  : public execution_context::service\n{\npublic:\n  /// Get the io_context object that owns the service.\n  asio::io_context& get_io_context();\n\nprivate:\n  /// Destroy all user-defined handler objects owned by the service.\n  ASIO_DECL virtual void shutdown();\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use shutdown().) Destroy all user-defined handler objects\n  /// owned by the service.\n  ASIO_DECL virtual void shutdown_service();\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Handle notification of a fork-related event to perform any necessary\n  /// housekeeping.\n  /**\n   * This function is not a pure virtual so that services only have to\n   * implement it if necessary. The default implementation does nothing.\n   */\n  ASIO_DECL virtual void notify_fork(\n      execution_context::fork_event event);\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use notify_fork().) Handle notification of a fork-related\n  /// event to perform any necessary housekeeping.\n  /**\n   * This function is not a pure virtual so that services only have to\n   * implement it if necessary. The default implementation does nothing.\n   */\n  ASIO_DECL virtual void fork_service(\n      execution_context::fork_event event);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprotected:\n  /// Constructor.\n  /**\n   * @param owner The io_context object that owns the service.\n   */\n  ASIO_DECL service(asio::io_context& owner);\n\n  /// Destructor.\n  ASIO_DECL virtual ~service();\n};\n\nnamespace detail {\n\n// Special service base class to keep classes header-file only.\ntemplate <typename Type>\nclass service_base\n  : public asio::io_context::service\n{\npublic:\n  static asio::detail::service_id<Type> id;\n\n  // Constructor.\n  service_base(asio::io_context& io_context)\n    : asio::io_context::service(io_context)\n  {\n  }\n};\n\ntemplate <typename Type>\nasio::detail::service_id<Type> service_base<Type>::id;\n\n} // namespace detail\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/io_context.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/io_context.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n// If both io_context.hpp and strand.hpp have been included, automatically\n// include the header file needed for the io_context::strand class.\n#if !defined(ASIO_NO_EXTENSIONS)\n# if defined(ASIO_STRAND_HPP)\n#  include \"asio/io_context_strand.hpp\"\n# endif // defined(ASIO_STRAND_HPP)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#endif // ASIO_IO_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/io_context_strand.hpp",
    "content": "//\n// io_context_strand.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IO_CONTEXT_STRAND_HPP\n#define ASIO_IO_CONTEXT_STRAND_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/strand_service.hpp\"\n#include \"asio/detail/wrapped_handler.hpp\"\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Provides serialised handler execution.\n/**\n * The io_context::strand class provides the ability to post and dispatch\n * handlers with the guarantee that none of those handlers will execute\n * concurrently.\n *\n * @par Order of handler invocation\n * Given:\n *\n * @li a strand object @c s\n *\n * @li an object @c a meeting completion handler requirements\n *\n * @li an object @c a1 which is an arbitrary copy of @c a made by the\n * implementation\n *\n * @li an object @c b meeting completion handler requirements\n *\n * @li an object @c b1 which is an arbitrary copy of @c b made by the\n * implementation\n *\n * if any of the following conditions are true:\n *\n * @li @c s.post(a) happens-before @c s.post(b)\n * \n * @li @c s.post(a) happens-before @c s.dispatch(b), where the latter is\n * performed outside the strand\n * \n * @li @c s.dispatch(a) happens-before @c s.post(b), where the former is\n * performed outside the strand\n * \n * @li @c s.dispatch(a) happens-before @c s.dispatch(b), where both are\n * performed outside the strand\n *   \n * then @c asio_handler_invoke(a1, &a1) happens-before\n * @c asio_handler_invoke(b1, &b1).\n * \n * Note that in the following case:\n * @code async_op_1(..., s.wrap(a));\n * async_op_2(..., s.wrap(b)); @endcode\n * the completion of the first async operation will perform @c s.dispatch(a),\n * and the second will perform @c s.dispatch(b), but the order in which those\n * are performed is unspecified. That is, you cannot state whether one\n * happens-before the other. Therefore none of the above conditions are met and\n * no ordering guarantee is made.\n *\n * @note The implementation makes no guarantee that handlers posted or\n * dispatched through different @c strand objects will be invoked concurrently.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Dispatcher.\n */\nclass io_context::strand\n{\npublic:\n  /// Constructor.\n  /**\n   * Constructs the strand.\n   *\n   * @param io_context The io_context object that the strand will use to\n   * dispatch handlers that are ready to be run.\n   */\n  explicit strand(asio::io_context& io_context)\n    : service_(asio::use_service<\n        asio::detail::strand_service>(io_context))\n  {\n    service_.construct(impl_);\n  }\n\n  /// Destructor.\n  /**\n   * Destroys a strand.\n   *\n   * Handlers posted through the strand that have not yet been invoked will\n   * still be dispatched in a way that meets the guarantee of non-concurrency.\n   */\n  ~strand()\n  {\n  }\n\n  /// Obtain the underlying execution context.\n  asio::io_context& context() const ASIO_NOEXCEPT\n  {\n    return service_.get_io_context();\n  }\n\n  /// Inform the strand that it has some outstanding work to do.\n  /**\n   * The strand delegates this call to its underlying io_context.\n   */\n  void on_work_started() const ASIO_NOEXCEPT\n  {\n    context().get_executor().on_work_started();\n  }\n\n  /// Inform the strand that some work is no longer outstanding.\n  /**\n   * The strand delegates this call to its underlying io_context.\n   */\n  void on_work_finished() const ASIO_NOEXCEPT\n  {\n    context().get_executor().on_work_finished();\n  }\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the strand to execute the given function\n   * object on its underlying io_context. The function object will be executed\n   * inside this function if the strand is not otherwise busy and if the\n   * underlying io_context's executor's @c dispatch() function is also able to\n   * execute the function before returning.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    typename decay<Function>::type tmp(ASIO_MOVE_CAST(Function)(f));\n    service_.dispatch(impl_, tmp);\n    (void)a;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use asio::dispatch().) Request the strand to invoke\n  /// the given handler.\n  /**\n   * This function is used to ask the strand to execute the given handler.\n   *\n   * The strand object guarantees that handlers posted or dispatched through\n   * the strand will not be executed concurrently. The handler may be executed\n   * inside this function if the guarantee can be met. If this function is\n   * called from within a handler that was posted or dispatched through the same\n   * strand, then the new handler will be executed immediately.\n   *\n   * The strand's guarantee is in addition to the guarantee provided by the\n   * underlying io_context. The io_context guarantees that the handler will only\n   * be called in a thread in which the io_context's run member function is\n   * currently being invoked.\n   *\n   * @param handler The handler to be called. The strand will make a copy of the\n   * handler object as required. The function signature of the handler must be:\n   * @code void handler(); @endcode\n   */\n  template <typename LegacyCompletionHandler>\n  ASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\n  dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler)\n  {\n    return async_initiate<LegacyCompletionHandler, void ()>(\n        initiate_dispatch(), handler, this);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled to run by the underlying io_context.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    typename decay<Function>::type tmp(ASIO_MOVE_CAST(Function)(f));\n    service_.post(impl_, tmp);\n    (void)a;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use asio::post().) Request the strand to invoke the\n  /// given handler and return immediately.\n  /**\n   * This function is used to ask the strand to execute the given handler, but\n   * without allowing the strand to call the handler from inside this function.\n   *\n   * The strand object guarantees that handlers posted or dispatched through\n   * the strand will not be executed concurrently. The strand's guarantee is in\n   * addition to the guarantee provided by the underlying io_context. The\n   * io_context guarantees that the handler will only be called in a thread in\n   * which the io_context's run member function is currently being invoked.\n   *\n   * @param handler The handler to be called. The strand will make a copy of the\n   * handler object as required. The function signature of the handler must be:\n   * @code void handler(); @endcode\n   */\n  template <typename LegacyCompletionHandler>\n  ASIO_INITFN_AUTO_RESULT_TYPE(LegacyCompletionHandler, void ())\n  post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler)\n  {\n    return async_initiate<LegacyCompletionHandler, void ()>(\n        initiate_post(), handler, this);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled to run by the underlying io_context.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    typename decay<Function>::type tmp(ASIO_MOVE_CAST(Function)(f));\n    service_.post(impl_, tmp);\n    (void)a;\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use asio::bind_executor().) Create a new handler that\n  /// automatically dispatches the wrapped handler on the strand.\n  /**\n   * This function is used to create a new handler function object that, when\n   * invoked, will automatically pass the wrapped handler to the strand's\n   * dispatch function.\n   *\n   * @param handler The handler to be wrapped. The strand will make a copy of\n   * the handler object as required. The function signature of the handler must\n   * be: @code void handler(A1 a1, ... An an); @endcode\n   *\n   * @return A function object that, when invoked, passes the wrapped handler to\n   * the strand's dispatch function. Given a function object with the signature:\n   * @code R f(A1 a1, ... An an); @endcode\n   * If this function object is passed to the wrap function like so:\n   * @code strand.wrap(f); @endcode\n   * then the return value is a function object with the signature\n   * @code void g(A1 a1, ... An an); @endcode\n   * that, when invoked, executes code equivalent to:\n   * @code strand.dispatch(boost::bind(f, a1, ... an)); @endcode\n   */\n  template <typename Handler>\n#if defined(GENERATING_DOCUMENTATION)\n  unspecified\n#else\n  detail::wrapped_handler<strand, Handler, detail::is_continuation_if_running>\n#endif\n  wrap(Handler handler)\n  {\n    return detail::wrapped_handler<io_context::strand, Handler,\n        detail::is_continuation_if_running>(*this, handler);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the strand is running in the current thread.\n  /**\n   * @return @c true if the current thread is executing a handler that was\n   * submitted to the strand using post(), dispatch() or wrap(). Otherwise\n   * returns @c false.\n   */\n  bool running_in_this_thread() const ASIO_NOEXCEPT\n  {\n    return service_.running_in_this_thread(impl_);\n  }\n\n  /// Compare two strands for equality.\n  /**\n   * Two strands are equal if they refer to the same ordered, non-concurrent\n   * state.\n   */\n  friend bool operator==(const strand& a, const strand& b) ASIO_NOEXCEPT\n  {\n    return a.impl_ == b.impl_;\n  }\n\n  /// Compare two strands for inequality.\n  /**\n   * Two strands are equal if they refer to the same ordered, non-concurrent\n   * state.\n   */\n  friend bool operator!=(const strand& a, const strand& b) ASIO_NOEXCEPT\n  {\n    return a.impl_ != b.impl_;\n  }\n\nprivate:\n#if !defined(ASIO_NO_DEPRECATED)\n  struct initiate_dispatch\n  {\n    template <typename LegacyCompletionHandler>\n    void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler,\n        strand* self) const\n    {\n      // If you get an error on the following line it means that your\n      // handler does not meet the documented type requirements for a\n      // LegacyCompletionHandler.\n      ASIO_LEGACY_COMPLETION_HANDLER_CHECK(\n          LegacyCompletionHandler, handler) type_check;\n\n      detail::non_const_lvalue<LegacyCompletionHandler> handler2(handler);\n      self->service_.dispatch(self->impl_, handler2.value);\n    }\n  };\n\n  struct initiate_post\n  {\n    template <typename LegacyCompletionHandler>\n    void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler,\n        strand* self) const\n    {\n      // If you get an error on the following line it means that your\n      // handler does not meet the documented type requirements for a\n      // LegacyCompletionHandler.\n      ASIO_LEGACY_COMPLETION_HANDLER_CHECK(\n          LegacyCompletionHandler, handler) type_check;\n\n      detail::non_const_lvalue<LegacyCompletionHandler> handler2(handler);\n      self->service_.post(self->impl_, handler2.value);\n    }\n  };\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  asio::detail::strand_service& service_;\n  mutable asio::detail::strand_service::implementation_type impl_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#endif // ASIO_IO_CONTEXT_STRAND_HPP\n"
  },
  {
    "path": "src/third_party/asio/io_service.hpp",
    "content": "//\n// io_service.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IO_SERVICE_HPP\n#define ASIO_IO_SERVICE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if !defined(ASIO_NO_DEPRECATED)\n/// Typedef for backwards compatibility.\ntypedef io_context io_service;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IO_SERVICE_HPP\n"
  },
  {
    "path": "src/third_party/asio/io_service_strand.hpp",
    "content": "//\n// io_service_strand.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IO_SERVICE_STRAND_HPP\n#define ASIO_IO_SERVICE_STRAND_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/io_context_strand.hpp\"\n\n#endif // ASIO_IO_SERVICE_STRAND_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address.hpp",
    "content": "//\n// ip/address.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_HPP\n#define ASIO_IP_ADDRESS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n#include \"asio/ip/address_v6.hpp\"\n#include \"asio/ip/bad_address_cast.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n# include <iosfwd>\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Implements version-independent IP addresses.\n/**\n * The asio::ip::address class provides the ability to use either IP\n * version 4 or version 6 addresses.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass address\n{\npublic:\n  /// Default constructor.\n  ASIO_DECL address() ASIO_NOEXCEPT;\n\n  /// Construct an address from an IPv4 address.\n  ASIO_DECL address(\n      const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT;\n\n  /// Construct an address from an IPv6 address.\n  ASIO_DECL address(\n      const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT;\n\n  /// Copy constructor.\n  ASIO_DECL address(const address& other) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  ASIO_DECL address(address&& other) ASIO_NOEXCEPT;\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another address.\n  ASIO_DECL address& operator=(const address& other) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another address.\n  ASIO_DECL address& operator=(address&& other) ASIO_NOEXCEPT;\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from an IPv4 address.\n  ASIO_DECL address& operator=(\n      const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT;\n\n  /// Assign from an IPv6 address.\n  ASIO_DECL address& operator=(\n      const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT;\n\n  /// Get whether the address is an IP version 4 address.\n  bool is_v4() const ASIO_NOEXCEPT\n  {\n    return type_ == ipv4;\n  }\n\n  /// Get whether the address is an IP version 6 address.\n  bool is_v6() const ASIO_NOEXCEPT\n  {\n    return type_ == ipv6;\n  }\n\n  /// Get the address as an IP version 4 address.\n  ASIO_DECL asio::ip::address_v4 to_v4() const;\n\n  /// Get the address as an IP version 6 address.\n  ASIO_DECL asio::ip::address_v6 to_v6() const;\n\n  /// Get the address as a string.\n  ASIO_DECL std::string to_string() const;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use other overload.) Get the address as a string.\n  ASIO_DECL std::string to_string(asio::error_code& ec) const;\n\n  /// (Deprecated: Use make_address().) Create an address from an IPv4 address\n  /// string in dotted decimal form, or from an IPv6 address in hexadecimal\n  /// notation.\n  static address from_string(const char* str);\n\n  /// (Deprecated: Use make_address().) Create an address from an IPv4 address\n  /// string in dotted decimal form, or from an IPv6 address in hexadecimal\n  /// notation.\n  static address from_string(const char* str, asio::error_code& ec);\n\n  /// (Deprecated: Use make_address().) Create an address from an IPv4 address\n  /// string in dotted decimal form, or from an IPv6 address in hexadecimal\n  /// notation.\n  static address from_string(const std::string& str);\n\n  /// (Deprecated: Use make_address().) Create an address from an IPv4 address\n  /// string in dotted decimal form, or from an IPv6 address in hexadecimal\n  /// notation.\n  static address from_string(\n      const std::string& str, asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the address is a loopback address.\n  ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is unspecified.\n  ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a multicast address.\n  ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT;\n\n  /// Compare two addresses for equality.\n  ASIO_DECL friend bool operator==(const address& a1,\n      const address& a2) ASIO_NOEXCEPT;\n\n  /// Compare two addresses for inequality.\n  friend bool operator!=(const address& a1,\n      const address& a2) ASIO_NOEXCEPT\n  {\n    return !(a1 == a2);\n  }\n\n  /// Compare addresses for ordering.\n  ASIO_DECL friend bool operator<(const address& a1,\n      const address& a2) ASIO_NOEXCEPT;\n\n  /// Compare addresses for ordering.\n  friend bool operator>(const address& a1,\n      const address& a2) ASIO_NOEXCEPT\n  {\n    return a2 < a1;\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator<=(const address& a1,\n      const address& a2) ASIO_NOEXCEPT\n  {\n    return !(a2 < a1);\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator>=(const address& a1,\n      const address& a2) ASIO_NOEXCEPT\n  {\n    return !(a1 < a2);\n  }\n\nprivate:\n  // The type of the address.\n  enum { ipv4, ipv6 } type_;\n\n  // The underlying IPv4 address.\n  asio::ip::address_v4 ipv4_address_;\n\n  // The underlying IPv6 address.\n  asio::ip::address_v6 ipv6_address_;\n};\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(const char* str);\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(const std::string& str);\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(string_view str);\n\n/// Create an address from an IPv4 address string in dotted decimal form,\n/// or from an IPv6 address in hexadecimal notation.\n/**\n * @relates address\n */\nASIO_DECL address make_address(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output an address as a string.\n/**\n * Used to output a human-readable string for a specified address.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param addr The address to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::address\n */\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address& addr);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/address.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/address.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_ADDRESS_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v4.hpp",
    "content": "//\n// ip/address_v4.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V4_HPP\n#define ASIO_IP_ADDRESS_V4_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/array.hpp\"\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/winsock_init.hpp\"\n#include \"asio/error_code.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n# include <iosfwd>\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Implements IP version 4 style addresses.\n/**\n * The asio::ip::address_v4 class provides the ability to use and\n * manipulate IP version 4 addresses.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass address_v4\n{\npublic:\n  /// The type used to represent an address as an unsigned integer.\n  typedef uint_least32_t uint_type;\n\n  /// The type used to represent an address as an array of bytes.\n  /**\n   * @note This type is defined in terms of the C++0x template @c std::array\n   * when it is available. Otherwise, it uses @c boost:array.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef array<unsigned char, 4> bytes_type;\n#else\n  typedef asio::detail::array<unsigned char, 4> bytes_type;\n#endif\n\n  /// Default constructor.\n  address_v4() ASIO_NOEXCEPT\n  {\n    addr_.s_addr = 0;\n  }\n\n  /// Construct an address from raw bytes.\n  ASIO_DECL explicit address_v4(const bytes_type& bytes);\n\n  /// Construct an address from an unsigned integer in host byte order.\n  ASIO_DECL explicit address_v4(uint_type addr);\n\n  /// Copy constructor.\n  address_v4(const address_v4& other) ASIO_NOEXCEPT\n    : addr_(other.addr_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  address_v4(address_v4&& other) ASIO_NOEXCEPT\n    : addr_(other.addr_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another address.\n  address_v4& operator=(const address_v4& other) ASIO_NOEXCEPT\n  {\n    addr_ = other.addr_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another address.\n  address_v4& operator=(address_v4&& other) ASIO_NOEXCEPT\n  {\n    addr_ = other.addr_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Get the address in bytes, in network byte order.\n  ASIO_DECL bytes_type to_bytes() const ASIO_NOEXCEPT;\n\n  /// Get the address as an unsigned integer in host byte order\n  ASIO_DECL uint_type to_uint() const ASIO_NOEXCEPT;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// Get the address as an unsigned long in host byte order\n  ASIO_DECL unsigned long to_ulong() const;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Get the address as a string in dotted decimal format.\n  ASIO_DECL std::string to_string() const;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use other overload.) Get the address as a string in dotted\n  /// decimal format.\n  ASIO_DECL std::string to_string(asio::error_code& ec) const;\n\n  /// (Deprecated: Use make_address_v4().) Create an address from an IP address\n  /// string in dotted decimal form.\n  static address_v4 from_string(const char* str);\n\n  /// (Deprecated: Use make_address_v4().) Create an address from an IP address\n  /// string in dotted decimal form.\n  static address_v4 from_string(\n      const char* str, asio::error_code& ec);\n\n  /// (Deprecated: Use make_address_v4().) Create an address from an IP address\n  /// string in dotted decimal form.\n  static address_v4 from_string(const std::string& str);\n\n  /// (Deprecated: Use make_address_v4().) Create an address from an IP address\n  /// string in dotted decimal form.\n  static address_v4 from_string(\n      const std::string& str, asio::error_code& ec);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the address is a loopback address.\n  ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is unspecified.\n  ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use network_v4 class.) Determine whether the address is a\n  /// class A address.\n  ASIO_DECL bool is_class_a() const;\n\n  /// (Deprecated: Use network_v4 class.) Determine whether the address is a\n  /// class B address.\n  ASIO_DECL bool is_class_b() const;\n\n  /// (Deprecated: Use network_v4 class.) Determine whether the address is a\n  /// class C address.\n  ASIO_DECL bool is_class_c() const;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the address is a multicast address.\n  ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT;\n\n  /// Compare two addresses for equality.\n  friend bool operator==(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.addr_.s_addr == a2.addr_.s_addr;\n  }\n\n  /// Compare two addresses for inequality.\n  friend bool operator!=(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.addr_.s_addr != a2.addr_.s_addr;\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator<(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.to_uint() < a2.to_uint();\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator>(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.to_uint() > a2.to_uint();\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator<=(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.to_uint() <= a2.to_uint();\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator>=(const address_v4& a1,\n      const address_v4& a2) ASIO_NOEXCEPT\n  {\n    return a1.to_uint() >= a2.to_uint();\n  }\n\n  /// Obtain an address object that represents any address.\n  static address_v4 any() ASIO_NOEXCEPT\n  {\n    return address_v4();\n  }\n\n  /// Obtain an address object that represents the loopback address.\n  static address_v4 loopback() ASIO_NOEXCEPT\n  {\n    return address_v4(0x7F000001);\n  }\n\n  /// Obtain an address object that represents the broadcast address.\n  static address_v4 broadcast() ASIO_NOEXCEPT\n  {\n    return address_v4(0xFFFFFFFF);\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use network_v4 class.) Obtain an address object that\n  /// represents the broadcast address that corresponds to the specified\n  /// address and netmask.\n  ASIO_DECL static address_v4 broadcast(\n      const address_v4& addr, const address_v4& mask);\n\n  /// (Deprecated: Use network_v4 class.) Obtain the netmask that corresponds\n  /// to the address, based on its address class.\n  ASIO_DECL static address_v4 netmask(const address_v4& addr);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprivate:\n  // The underlying IPv4 address.\n  asio::detail::in4_addr_type addr_;\n};\n\n/// Create an IPv4 address from raw bytes in network order.\n/**\n * @relates address_v4\n */\ninline address_v4 make_address_v4(const address_v4::bytes_type& bytes)\n{\n  return address_v4(bytes);\n}\n\n/// Create an IPv4 address from an unsigned integer in host byte order.\n/**\n * @relates address_v4\n */\ninline address_v4 make_address_v4(address_v4::uint_type addr)\n{\n  return address_v4(addr);\n}\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(const char* str);\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(const std::string& str);\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(string_view str);\n\n/// Create an IPv4 address from an IP address string in dotted decimal form.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output an address as a string.\n/**\n * Used to output a human-readable string for a specified address.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param addr The address to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::address_v4\n */\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address_v4& addr);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/address_v4.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/address_v4.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_ADDRESS_V4_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v4_iterator.hpp",
    "content": "//\n// ip/address_v4_iterator.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V4_ITERATOR_HPP\n#define ASIO_IP_ADDRESS_V4_ITERATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename> class basic_address_iterator;\n\n/// An input iterator that can be used for traversing IPv4 addresses.\n/**\n * In addition to satisfying the input iterator requirements, this iterator\n * also supports decrement.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <> class basic_address_iterator<address_v4>\n{\npublic:\n  /// The type of the elements pointed to by the iterator.\n  typedef address_v4 value_type;\n\n  /// Distance between two iterators.\n  typedef std::ptrdiff_t difference_type;\n\n  /// The type of a pointer to an element pointed to by the iterator.\n  typedef const address_v4* pointer;\n\n  /// The type of a reference to an element pointed to by the iterator.\n  typedef const address_v4& reference;\n\n  /// Denotes that the iterator satisfies the input iterator requirements.\n  typedef std::input_iterator_tag iterator_category;\n\n  /// Construct an iterator that points to the specified address.\n  basic_address_iterator(const address_v4& addr) ASIO_NOEXCEPT\n    : address_(addr)\n  {\n  }\n\n  /// Copy constructor.\n  basic_address_iterator(\n      const basic_address_iterator& other) ASIO_NOEXCEPT\n    : address_(other.address_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_address_iterator(basic_address_iterator&& other) ASIO_NOEXCEPT\n    : address_(ASIO_MOVE_CAST(address_v4)(other.address_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assignment operator.\n  basic_address_iterator& operator=(\n      const basic_address_iterator& other) ASIO_NOEXCEPT\n  {\n    address_ = other.address_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move assignment operator.\n  basic_address_iterator& operator=(\n      basic_address_iterator&& other) ASIO_NOEXCEPT\n  {\n    address_ = ASIO_MOVE_CAST(address_v4)(other.address_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Dereference the iterator.\n  const address_v4& operator*() const ASIO_NOEXCEPT\n  {\n    return address_;\n  }\n\n  /// Dereference the iterator.\n  const address_v4* operator->() const ASIO_NOEXCEPT\n  {\n    return &address_;\n  }\n\n  /// Pre-increment operator.\n  basic_address_iterator& operator++() ASIO_NOEXCEPT\n  {\n    address_ = address_v4((address_.to_uint() + 1) & 0xFFFFFFFF);\n    return *this;\n  }\n\n  /// Post-increment operator.\n  basic_address_iterator operator++(int) ASIO_NOEXCEPT\n  {\n    basic_address_iterator tmp(*this);\n    ++*this;\n    return tmp;\n  }\n\n  /// Pre-decrement operator.\n  basic_address_iterator& operator--() ASIO_NOEXCEPT\n  {\n    address_ = address_v4((address_.to_uint() - 1) & 0xFFFFFFFF);\n    return *this;\n  }\n\n  /// Post-decrement operator.\n  basic_address_iterator operator--(int)\n  {\n    basic_address_iterator tmp(*this);\n    --*this;\n    return tmp;\n  }\n\n  /// Compare two addresses for equality.\n  friend bool operator==(const basic_address_iterator& a,\n      const basic_address_iterator& b)\n  {\n    return a.address_ == b.address_;\n  }\n\n  /// Compare two addresses for inequality.\n  friend bool operator!=(const basic_address_iterator& a,\n      const basic_address_iterator& b)\n  {\n    return a.address_ != b.address_;\n  }\n\nprivate:\n  address_v4 address_;\n};\n\n/// An input iterator that can be used for traversing IPv4 addresses.\ntypedef basic_address_iterator<address_v4> address_v4_iterator;\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ADDRESS_V4_ITERATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v4_range.hpp",
    "content": "//\n// ip/address_v4_range.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V4_RANGE_HPP\n#define ASIO_IP_ADDRESS_V4_RANGE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/address_v4_iterator.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename> class basic_address_range;\n\n/// Represents a range of IPv4 addresses.\n/**\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <> class basic_address_range<address_v4>\n{\npublic:\n  /// The type of an iterator that points into the range.\n  typedef basic_address_iterator<address_v4> iterator;\n\n  /// Construct an empty range.\n  basic_address_range() ASIO_NOEXCEPT\n    : begin_(address_v4()),\n      end_(address_v4())\n  {\n  }\n\n  /// Construct an range that represents the given range of addresses.\n  explicit basic_address_range(const iterator& first,\n      const iterator& last) ASIO_NOEXCEPT\n    : begin_(first),\n      end_(last)\n  {\n  }\n\n  /// Copy constructor.\n  basic_address_range(const basic_address_range& other) ASIO_NOEXCEPT\n    : begin_(other.begin_),\n      end_(other.end_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_address_range(basic_address_range&& other) ASIO_NOEXCEPT\n    : begin_(ASIO_MOVE_CAST(iterator)(other.begin_)),\n      end_(ASIO_MOVE_CAST(iterator)(other.end_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assignment operator.\n  basic_address_range& operator=(\n      const basic_address_range& other) ASIO_NOEXCEPT\n  {\n    begin_ = other.begin_;\n    end_ = other.end_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move assignment operator.\n  basic_address_range& operator=(\n      basic_address_range&& other) ASIO_NOEXCEPT\n  {\n    begin_ = ASIO_MOVE_CAST(iterator)(other.begin_);\n    end_ = ASIO_MOVE_CAST(iterator)(other.end_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Obtain an iterator that points to the start of the range.\n  iterator begin() const ASIO_NOEXCEPT\n  {\n    return begin_;\n  }\n\n  /// Obtain an iterator that points to the end of the range.\n  iterator end() const ASIO_NOEXCEPT\n  {\n    return end_;\n  }\n\n  /// Determine whether the range is empty.\n  bool empty() const ASIO_NOEXCEPT\n  {\n    return size() == 0;\n  }\n\n  /// Return the size of the range.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return end_->to_uint() - begin_->to_uint();\n  }\n\n  /// Find an address in the range.\n  iterator find(const address_v4& addr) const ASIO_NOEXCEPT\n  {\n    return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_;\n  }\n\nprivate:\n  iterator begin_;\n  iterator end_;\n};\n\n/// Represents a range of IPv4 addresses.\ntypedef basic_address_range<address_v4> address_v4_range;\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ADDRESS_V4_RANGE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v6.hpp",
    "content": "//\n// ip/address_v6.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V6_HPP\n#define ASIO_IP_ADDRESS_V6_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/array.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/winsock_init.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n# include <iosfwd>\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename> class basic_address_iterator;\n\n/// Implements IP version 6 style addresses.\n/**\n * The asio::ip::address_v6 class provides the ability to use and\n * manipulate IP version 6 addresses.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass address_v6\n{\npublic:\n  /// The type used to represent an address as an array of bytes.\n  /**\n   * @note This type is defined in terms of the C++0x template @c std::array\n   * when it is available. Otherwise, it uses @c boost:array.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef array<unsigned char, 16> bytes_type;\n#else\n  typedef asio::detail::array<unsigned char, 16> bytes_type;\n#endif\n\n  /// Default constructor.\n  ASIO_DECL address_v6() ASIO_NOEXCEPT;\n\n  /// Construct an address from raw bytes and scope ID.\n  ASIO_DECL explicit address_v6(const bytes_type& bytes,\n      unsigned long scope_id = 0);\n\n  /// Copy constructor.\n  ASIO_DECL address_v6(const address_v6& other) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  ASIO_DECL address_v6(address_v6&& other) ASIO_NOEXCEPT;\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another address.\n  ASIO_DECL address_v6& operator=(\n      const address_v6& other) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another address.\n  ASIO_DECL address_v6& operator=(address_v6&& other) ASIO_NOEXCEPT;\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// The scope ID of the address.\n  /**\n   * Returns the scope ID associated with the IPv6 address.\n   */\n  unsigned long scope_id() const ASIO_NOEXCEPT\n  {\n    return scope_id_;\n  }\n\n  /// The scope ID of the address.\n  /**\n   * Modifies the scope ID associated with the IPv6 address.\n   */\n  void scope_id(unsigned long id) ASIO_NOEXCEPT\n  {\n    scope_id_ = id;\n  }\n\n  /// Get the address in bytes, in network byte order.\n  ASIO_DECL bytes_type to_bytes() const ASIO_NOEXCEPT;\n\n  /// Get the address as a string.\n  ASIO_DECL std::string to_string() const;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use other overload.) Get the address as a string.\n  ASIO_DECL std::string to_string(asio::error_code& ec) const;\n\n  /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP\n  /// address string.\n  static address_v6 from_string(const char* str);\n\n  /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP\n  /// address string.\n  static address_v6 from_string(\n      const char* str, asio::error_code& ec);\n\n  /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP\n  /// address string.\n  static address_v6 from_string(const std::string& str);\n\n  /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP\n  /// address string.\n  static address_v6 from_string(\n      const std::string& str, asio::error_code& ec);\n\n  /// (Deprecated: Use make_address_v4().) Converts an IPv4-mapped or\n  /// IPv4-compatible address to an IPv4 address.\n  ASIO_DECL address_v4 to_v4() const;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the address is a loopback address.\n  ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is unspecified.\n  ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is link local.\n  ASIO_DECL bool is_link_local() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is site local.\n  ASIO_DECL bool is_site_local() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a mapped IPv4 address.\n  ASIO_DECL bool is_v4_mapped() const ASIO_NOEXCEPT;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: No replacement.) Determine whether the address is an\n  /// IPv4-compatible address.\n  ASIO_DECL bool is_v4_compatible() const;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Determine whether the address is a multicast address.\n  ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a global multicast address.\n  ASIO_DECL bool is_multicast_global() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a link-local multicast address.\n  ASIO_DECL bool is_multicast_link_local() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a node-local multicast address.\n  ASIO_DECL bool is_multicast_node_local() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a org-local multicast address.\n  ASIO_DECL bool is_multicast_org_local() const ASIO_NOEXCEPT;\n\n  /// Determine whether the address is a site-local multicast address.\n  ASIO_DECL bool is_multicast_site_local() const ASIO_NOEXCEPT;\n\n  /// Compare two addresses for equality.\n  ASIO_DECL friend bool operator==(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT;\n\n  /// Compare two addresses for inequality.\n  friend bool operator!=(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT\n  {\n    return !(a1 == a2);\n  }\n\n  /// Compare addresses for ordering.\n  ASIO_DECL friend bool operator<(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT;\n\n  /// Compare addresses for ordering.\n  friend bool operator>(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT\n  {\n    return a2 < a1;\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator<=(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT\n  {\n    return !(a2 < a1);\n  }\n\n  /// Compare addresses for ordering.\n  friend bool operator>=(const address_v6& a1,\n      const address_v6& a2) ASIO_NOEXCEPT\n  {\n    return !(a1 < a2);\n  }\n\n  /// Obtain an address object that represents any address.\n  static address_v6 any() ASIO_NOEXCEPT\n  {\n    return address_v6();\n  }\n\n  /// Obtain an address object that represents the loopback address.\n  ASIO_DECL static address_v6 loopback() ASIO_NOEXCEPT;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use make_address_v6().) Create an IPv4-mapped IPv6 address.\n  ASIO_DECL static address_v6 v4_mapped(const address_v4& addr);\n\n  /// (Deprecated: No replacement.) Create an IPv4-compatible IPv6 address.\n  ASIO_DECL static address_v6 v4_compatible(const address_v4& addr);\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprivate:\n  friend class basic_address_iterator<address_v6>;\n\n  // The underlying IPv6 address.\n  asio::detail::in6_addr_type addr_;\n\n  // The scope ID associated with the address.\n  unsigned long scope_id_;\n};\n\n/// Create an IPv6 address from raw bytes and scope ID.\n/**\n * @relates address_v6\n */\ninline address_v6 make_address_v6(const address_v6::bytes_type& bytes,\n    unsigned long scope_id = 0)\n{\n  return address_v6(bytes, scope_id);\n}\n\n/// Create an IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(const char* str);\n\n/// Create an IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n/// Createan IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(const std::string& str);\n\n/// Create an IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create an IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(string_view str);\n\n/// Create an IPv6 address from an IP address string.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT;\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n/// Tag type used for distinguishing overloads that deal in IPv4-mapped IPv6\n/// addresses.\nenum v4_mapped_t { v4_mapped };\n\n/// Create an IPv4 address from a IPv4-mapped IPv6 address.\n/**\n * @relates address_v4\n */\nASIO_DECL address_v4 make_address_v4(\n    v4_mapped_t, const address_v6& v6_addr);\n\n/// Create an IPv4-mapped IPv6 address from an IPv4 address.\n/**\n * @relates address_v6\n */\nASIO_DECL address_v6 make_address_v6(\n    v4_mapped_t, const address_v4& v4_addr);\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output an address as a string.\n/**\n * Used to output a human-readable string for a specified address.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param addr The address to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::address_v6\n */\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address_v6& addr);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/address_v6.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/address_v6.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_ADDRESS_V6_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v6_iterator.hpp",
    "content": "//\n// ip/address_v6_iterator.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//                         Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V6_ITERATOR_HPP\n#define ASIO_IP_ADDRESS_V6_ITERATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/address_v6.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename> class basic_address_iterator;\n\n/// An input iterator that can be used for traversing IPv6 addresses.\n/**\n * In addition to satisfying the input iterator requirements, this iterator\n * also supports decrement.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <> class basic_address_iterator<address_v6>\n{\npublic:\n  /// The type of the elements pointed to by the iterator.\n  typedef address_v6 value_type;\n\n  /// Distance between two iterators.\n  typedef std::ptrdiff_t difference_type;\n\n  /// The type of a pointer to an element pointed to by the iterator.\n  typedef const address_v6* pointer;\n\n  /// The type of a reference to an element pointed to by the iterator.\n  typedef const address_v6& reference;\n\n  /// Denotes that the iterator satisfies the input iterator requirements.\n  typedef std::input_iterator_tag iterator_category;\n\n  /// Construct an iterator that points to the specified address.\n  basic_address_iterator(const address_v6& addr) ASIO_NOEXCEPT\n    : address_(addr)\n  {\n  }\n\n  /// Copy constructor.\n  basic_address_iterator(\n      const basic_address_iterator& other) ASIO_NOEXCEPT\n    : address_(other.address_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_address_iterator(basic_address_iterator&& other) ASIO_NOEXCEPT\n    : address_(ASIO_MOVE_CAST(address_v6)(other.address_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assignment operator.\n  basic_address_iterator& operator=(\n      const basic_address_iterator& other) ASIO_NOEXCEPT\n  {\n    address_ = other.address_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move assignment operator.\n  basic_address_iterator& operator=(\n      basic_address_iterator&& other) ASIO_NOEXCEPT\n  {\n    address_ = ASIO_MOVE_CAST(address_v6)(other.address_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Dereference the iterator.\n  const address_v6& operator*() const ASIO_NOEXCEPT\n  {\n    return address_;\n  }\n\n  /// Dereference the iterator.\n  const address_v6* operator->() const ASIO_NOEXCEPT\n  {\n    return &address_;\n  }\n\n  /// Pre-increment operator.\n  basic_address_iterator& operator++() ASIO_NOEXCEPT\n  {\n    for (int i = 15; i >= 0; --i)\n    {\n      if (address_.addr_.s6_addr[i] < 0xFF)\n      {\n        ++address_.addr_.s6_addr[i];\n        break;\n      }\n\n      address_.addr_.s6_addr[i] = 0;\n    }\n\n    return *this;\n  }\n\n  /// Post-increment operator.\n  basic_address_iterator operator++(int) ASIO_NOEXCEPT\n  {\n    basic_address_iterator tmp(*this);\n    ++*this;\n    return tmp;\n  }\n\n  /// Pre-decrement operator.\n  basic_address_iterator& operator--() ASIO_NOEXCEPT\n  {\n    for (int i = 15; i >= 0; --i)\n    {\n      if (address_.addr_.s6_addr[i] > 0)\n      {\n        --address_.addr_.s6_addr[i];\n        break;\n      }\n\n      address_.addr_.s6_addr[i] = 0xFF;\n    }\n\n    return *this;\n  }\n\n  /// Post-decrement operator.\n  basic_address_iterator operator--(int)\n  {\n    basic_address_iterator tmp(*this);\n    --*this;\n    return tmp;\n  }\n\n  /// Compare two addresses for equality.\n  friend bool operator==(const basic_address_iterator& a,\n      const basic_address_iterator& b)\n  {\n    return a.address_ == b.address_;\n  }\n\n  /// Compare two addresses for inequality.\n  friend bool operator!=(const basic_address_iterator& a,\n      const basic_address_iterator& b)\n  {\n    return a.address_ != b.address_;\n  }\n\nprivate:\n  address_v6 address_;\n};\n\n/// An input iterator that can be used for traversing IPv6 addresses.\ntypedef basic_address_iterator<address_v6> address_v6_iterator;\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ADDRESS_V6_ITERATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/address_v6_range.hpp",
    "content": "//\n// ip/address_v6_range.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//                         Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ADDRESS_V6_RANGE_HPP\n#define ASIO_IP_ADDRESS_V6_RANGE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/address_v6_iterator.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename> class basic_address_range;\n\n/// Represents a range of IPv6 addresses.\n/**\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <> class basic_address_range<address_v6>\n{\npublic:\n  /// The type of an iterator that points into the range.\n  typedef basic_address_iterator<address_v6> iterator;\n\n  /// Construct an empty range.\n  basic_address_range() ASIO_NOEXCEPT\n    : begin_(address_v6()),\n      end_(address_v6())\n  {\n  }\n\n  /// Construct an range that represents the given range of addresses.\n  explicit basic_address_range(const iterator& first,\n      const iterator& last) ASIO_NOEXCEPT\n    : begin_(first),\n      end_(last)\n  {\n  }\n\n  /// Copy constructor.\n  basic_address_range(const basic_address_range& other) ASIO_NOEXCEPT\n    : begin_(other.begin_),\n      end_(other.end_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_address_range(basic_address_range&& other) ASIO_NOEXCEPT\n    : begin_(ASIO_MOVE_CAST(iterator)(other.begin_)),\n      end_(ASIO_MOVE_CAST(iterator)(other.end_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assignment operator.\n  basic_address_range& operator=(\n      const basic_address_range& other) ASIO_NOEXCEPT\n  {\n    begin_ = other.begin_;\n    end_ = other.end_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move assignment operator.\n  basic_address_range& operator=(\n      basic_address_range&& other) ASIO_NOEXCEPT\n  {\n    begin_ = ASIO_MOVE_CAST(iterator)(other.begin_);\n    end_ = ASIO_MOVE_CAST(iterator)(other.end_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Obtain an iterator that points to the start of the range.\n  iterator begin() const ASIO_NOEXCEPT\n  {\n    return begin_;\n  }\n\n  /// Obtain an iterator that points to the end of the range.\n  iterator end() const ASIO_NOEXCEPT\n  {\n    return end_;\n  }\n\n  /// Determine whether the range is empty.\n  bool empty() const ASIO_NOEXCEPT\n  {\n    return begin_ == end_;\n  }\n\n  /// Find an address in the range.\n  iterator find(const address_v6& addr) const ASIO_NOEXCEPT\n  {\n    return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_;\n  }\n\nprivate:\n  iterator begin_;\n  iterator end_;\n};\n\n/// Represents a range of IPv6 addresses.\ntypedef basic_address_range<address_v6> address_v6_range;\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ADDRESS_V6_RANGE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/bad_address_cast.hpp",
    "content": "//\n// ip/bad_address_cast.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BAD_ADDRESS_CAST_HPP\n#define ASIO_IP_BAD_ADDRESS_CAST_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <typeinfo>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Thrown to indicate a failed address conversion.\nclass bad_address_cast :\n#if defined(ASIO_MSVC) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS\n  public std::exception\n#else\n  public std::bad_cast\n#endif\n{\npublic:\n  /// Default constructor.\n  bad_address_cast() {}\n\n  /// Destructor.\n  virtual ~bad_address_cast() ASIO_NOEXCEPT_OR_NOTHROW {}\n\n  /// Get the message associated with the exception.\n  virtual const char* what() const ASIO_NOEXCEPT_OR_NOTHROW\n  {\n    return \"bad address cast\";\n  }\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ADDRESS_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_endpoint.hpp",
    "content": "//\n// ip/basic_endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_ENDPOINT_HPP\n#define ASIO_IP_BASIC_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/address.hpp\"\n#include \"asio/ip/detail/endpoint.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n# include <iosfwd>\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Describes an endpoint for a version-independent IP socket.\n/**\n * The asio::ip::basic_endpoint class template describes an endpoint that\n * may be associated with a particular socket.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * Endpoint.\n */\ntemplate <typename InternetProtocol>\nclass basic_endpoint\n{\npublic:\n  /// The protocol type associated with the endpoint.\n  typedef InternetProtocol protocol_type;\n\n  /// The type of the endpoint structure. This type is dependent on the\n  /// underlying implementation of the socket layer.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined data_type;\n#else\n  typedef asio::detail::socket_addr_type data_type;\n#endif\n\n  /// Default constructor.\n  basic_endpoint() ASIO_NOEXCEPT\n    : impl_()\n  {\n  }\n\n  /// Construct an endpoint using a port number, specified in the host's byte\n  /// order. The IP address will be the any address (i.e. INADDR_ANY or\n  /// in6addr_any). This constructor would typically be used for accepting new\n  /// connections.\n  /**\n   * @par Examples\n   * To initialise an IPv4 TCP endpoint for port 1234, use:\n   * @code\n   * asio::ip::tcp::endpoint ep(asio::ip::tcp::v4(), 1234);\n   * @endcode\n   *\n   * To specify an IPv6 UDP endpoint for port 9876, use:\n   * @code\n   * asio::ip::udp::endpoint ep(asio::ip::udp::v6(), 9876);\n   * @endcode\n   */\n  basic_endpoint(const InternetProtocol& internet_protocol,\n      unsigned short port_num) ASIO_NOEXCEPT\n    : impl_(internet_protocol.family(), port_num)\n  {\n  }\n\n  /// Construct an endpoint using a port number and an IP address. This\n  /// constructor may be used for accepting connections on a specific interface\n  /// or for making a connection to a remote endpoint.\n  basic_endpoint(const asio::ip::address& addr,\n      unsigned short port_num) ASIO_NOEXCEPT\n    : impl_(addr, port_num)\n  {\n  }\n\n  /// Copy constructor.\n  basic_endpoint(const basic_endpoint& other) ASIO_NOEXCEPT\n    : impl_(other.impl_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  basic_endpoint(basic_endpoint&& other) ASIO_NOEXCEPT\n    : impl_(other.impl_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Assign from another endpoint.\n  basic_endpoint& operator=(const basic_endpoint& other) ASIO_NOEXCEPT\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-assign from another endpoint.\n  basic_endpoint& operator=(basic_endpoint&& other) ASIO_NOEXCEPT\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// The protocol associated with the endpoint.\n  protocol_type protocol() const ASIO_NOEXCEPT\n  {\n    if (impl_.is_v4())\n      return InternetProtocol::v4();\n    return InternetProtocol::v6();\n  }\n\n  /// Get the underlying endpoint in the native type.\n  data_type* data() ASIO_NOEXCEPT\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying endpoint in the native type.\n  const data_type* data() const ASIO_NOEXCEPT\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying size of the endpoint in the native type.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    return impl_.size();\n  }\n\n  /// Set the underlying size of the endpoint in the native type.\n  void resize(std::size_t new_size)\n  {\n    impl_.resize(new_size);\n  }\n\n  /// Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return impl_.capacity();\n  }\n\n  /// Get the port associated with the endpoint. The port number is always in\n  /// the host's byte order.\n  unsigned short port() const ASIO_NOEXCEPT\n  {\n    return impl_.port();\n  }\n\n  /// Set the port associated with the endpoint. The port number is always in\n  /// the host's byte order.\n  void port(unsigned short port_num) ASIO_NOEXCEPT\n  {\n    impl_.port(port_num);\n  }\n\n  /// Get the IP address associated with the endpoint.\n  asio::ip::address address() const ASIO_NOEXCEPT\n  {\n    return impl_.address();\n  }\n\n  /// Set the IP address associated with the endpoint.\n  void address(const asio::ip::address& addr) ASIO_NOEXCEPT\n  {\n    impl_.address(addr);\n  }\n\n  /// Compare two endpoints for equality.\n  friend bool operator==(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return e1.impl_ == e2.impl_;\n  }\n\n  /// Compare two endpoints for inequality.\n  friend bool operator!=(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return !(e1 == e2);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return e1.impl_ < e2.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return e2.impl_ < e1.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<=(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return !(e2 < e1);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>=(const basic_endpoint<InternetProtocol>& e1,\n      const basic_endpoint<InternetProtocol>& e2) ASIO_NOEXCEPT\n  {\n    return !(e1 < e2);\n  }\n\nprivate:\n  // The underlying IP endpoint.\n  asio::ip::detail::endpoint impl_;\n};\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output an endpoint as a string.\n/**\n * Used to output a human-readable string for a specified endpoint.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param endpoint The endpoint to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::basic_endpoint\n */\ntemplate <typename Elem, typename Traits, typename InternetProtocol>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os,\n    const basic_endpoint<InternetProtocol>& endpoint);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/basic_endpoint.hpp\"\n\n#endif // ASIO_IP_BASIC_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_resolver.hpp",
    "content": "//\n// ip/basic_resolver.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_RESOLVER_HPP\n#define ASIO_IP_BASIC_RESOLVER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/basic_resolver_results.hpp\"\n#include \"asio/ip/resolver_base.hpp\"\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/winrt_resolver_service.hpp\"\n#else\n# include \"asio/detail/resolver_service.hpp\"\n#endif\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n#if !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)\n#define ASIO_IP_BASIC_RESOLVER_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename InternetProtocol, typename Executor = executor>\nclass basic_resolver;\n\n#endif // !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)\n\n/// Provides endpoint resolution functionality.\n/**\n * The basic_resolver class template provides the ability to resolve a query\n * to a list of endpoints.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename InternetProtocol, typename Executor>\nclass basic_resolver\n  : public resolver_base\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the resolver type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The resolver type when rebound to the specified executor.\n    typedef basic_resolver<InternetProtocol, Executor1> other;\n  };\n\n  /// The protocol type.\n  typedef InternetProtocol protocol_type;\n\n  /// The endpoint type.\n  typedef typename InternetProtocol::endpoint endpoint_type;\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated.) The query type.\n  typedef basic_resolver_query<InternetProtocol> query;\n\n  /// (Deprecated.) The iterator type.\n  typedef basic_resolver_iterator<InternetProtocol> iterator;\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// The results type.\n  typedef basic_resolver_results<InternetProtocol> results_type;\n\n  /// Construct with executor.\n  /**\n   * This constructor creates a basic_resolver.\n   *\n   * @param ex The I/O executor that the resolver will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * resolver.\n   */\n  explicit basic_resolver(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct with execution context.\n  /**\n   * This constructor creates a basic_resolver.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the resolver will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the resolver.\n   */\n  template <typename ExecutionContext>\n  explicit basic_resolver(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a basic_resolver from another.\n  /**\n   * This constructor moves a resolver from one object to another.\n   *\n   * @param other The other basic_resolver object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_resolver(const executor_type&) constructor.\n   */\n  basic_resolver(basic_resolver&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a basic_resolver from another.\n  /**\n   * This assignment operator moves a resolver from one object to another.\n   * Cancels any outstanding asynchronous operations associated with the target\n   * object.\n   *\n   * @param other The other basic_resolver object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_resolver(const executor_type&) constructor.\n   */\n  basic_resolver& operator=(basic_resolver&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destroys the resolver.\n  /**\n   * This function destroys the resolver, cancelling any outstanding\n   * asynchronous wait operations associated with the resolver as if by calling\n   * @c cancel.\n   */\n  ~basic_resolver()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Cancel any asynchronous operations that are waiting on the resolver.\n  /**\n   * This function forces the completion of any pending asynchronous\n   * operations on the host resolver. The handler for each cancelled operation\n   * will be invoked with the asio::error::operation_aborted error code.\n   */\n  void cancel()\n  {\n    return impl_.get_service().cancel(impl_.get_implementation());\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use overload with separate host and service parameters.)\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve a query into a list of endpoint entries.\n   *\n   * @param q A query object that determines what endpoints will be returned.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  results_type resolve(const query& q)\n  {\n    asio::error_code ec;\n    results_type r = impl_.get_service().resolve(\n        impl_.get_implementation(), q, ec);\n    asio::detail::throw_error(ec, \"resolve\");\n    return r;\n  }\n\n  /// (Deprecated: Use overload with separate host and service parameters.)\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve a query into a list of endpoint entries.\n   *\n   * @param q A query object that determines what endpoints will be returned.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   */\n  results_type resolve(const query& q, asio::error_code& ec)\n  {\n    return impl_.get_service().resolve(impl_.get_implementation(), q, ec);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service)\n  {\n    return resolve(host, service, resolver_base::flags());\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service, asio::error_code& ec)\n  {\n    return resolve(host, service, resolver_base::flags(), ec);\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags)\n  {\n    asio::error_code ec;\n    basic_resolver_query<protocol_type> q(static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n    results_type r = impl_.get_service().resolve(\n        impl_.get_implementation(), q, ec);\n    asio::detail::throw_error(ec, \"resolve\");\n    return r;\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags,\n      asio::error_code& ec)\n  {\n    basic_resolver_query<protocol_type> q(static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n    return impl_.get_service().resolve(impl_.get_implementation(), q, ec);\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service)\n  {\n    return resolve(protocol, host, service, resolver_base::flags());\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,\n      asio::error_code& ec)\n  {\n    return resolve(protocol, host, service, resolver_base::flags(), ec);\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,\n      resolver_base::flags resolve_flags)\n  {\n    asio::error_code ec;\n    basic_resolver_query<protocol_type> q(\n        protocol, static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n    results_type r = impl_.get_service().resolve(\n        impl_.get_implementation(), q, ec);\n    asio::detail::throw_error(ec, \"resolve\");\n    return r;\n  }\n\n  /// Perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  results_type resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,\n      resolver_base::flags resolve_flags, asio::error_code& ec)\n  {\n    basic_resolver_query<protocol_type> q(\n        protocol, static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n    return impl_.get_service().resolve(impl_.get_implementation(), q, ec);\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use overload with separate host and service parameters.)\n  /// Asynchronously perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to asynchronously resolve a query into a list of\n   * endpoint entries.\n   *\n   * @param q A query object that determines what endpoints will be returned.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(const query& q,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return asio::async_initiate<ResolveHandler,\n      void (asio::error_code, results_type)>(\n        initiate_async_resolve(this), handler, q);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Asynchronously perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_resolve(host, service, resolver_base::flags(),\n        ASIO_MOVE_CAST(ResolveHandler)(handler));\n  }\n\n  /// Asynchronously perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(ASIO_STRING_VIEW_PARAM host,\n      ASIO_STRING_VIEW_PARAM service,\n      resolver_base::flags resolve_flags,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    basic_resolver_query<protocol_type> q(static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n\n    return asio::async_initiate<ResolveHandler,\n      void (asio::error_code, results_type)>(\n        initiate_async_resolve(this), handler, q);\n  }\n\n  /// Asynchronously perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_resolve(protocol, host, service, resolver_base::flags(),\n        ASIO_MOVE_CAST(ResolveHandler)(handler));\n  }\n\n  /// Asynchronously perform forward resolution of a query to a list of entries.\n  /**\n   * This function is used to resolve host and service names into a list of\n   * endpoint entries.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts. See the @ref resolver_base documentation for the set of\n   * available flags.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(const protocol_type& protocol,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,\n      resolver_base::flags resolve_flags,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    basic_resolver_query<protocol_type> q(\n        protocol, static_cast<std::string>(host),\n        static_cast<std::string>(service), resolve_flags);\n\n    return asio::async_initiate<ResolveHandler,\n      void (asio::error_code, results_type)>(\n        initiate_async_resolve(this), handler, q);\n  }\n\n  /// Perform reverse resolution of an endpoint to a list of entries.\n  /**\n   * This function is used to resolve an endpoint into a list of endpoint\n   * entries.\n   *\n   * @param e An endpoint object that determines what endpoints will be\n   * returned.\n   *\n   * @returns A range object representing the list of endpoint entries. A\n   * successful call to this function is guaranteed to return a non-empty\n   * range.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  results_type resolve(const endpoint_type& e)\n  {\n    asio::error_code ec;\n    results_type i = impl_.get_service().resolve(\n        impl_.get_implementation(), e, ec);\n    asio::detail::throw_error(ec, \"resolve\");\n    return i;\n  }\n\n  /// Perform reverse resolution of an endpoint to a list of entries.\n  /**\n   * This function is used to resolve an endpoint into a list of endpoint\n   * entries.\n   *\n   * @param e An endpoint object that determines what endpoints will be\n   * returned.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns A range object representing the list of endpoint entries. An\n   * empty range is returned if an error occurs. A successful call to this\n   * function is guaranteed to return a non-empty range.\n   */\n  results_type resolve(const endpoint_type& e, asio::error_code& ec)\n  {\n    return impl_.get_service().resolve(impl_.get_implementation(), e, ec);\n  }\n\n  /// Asynchronously perform reverse resolution of an endpoint to a list of\n  /// entries.\n  /**\n   * This function is used to asynchronously resolve an endpoint into a list of\n   * endpoint entries.\n   *\n   * @param e An endpoint object that determines what endpoints will be\n   * returned.\n   *\n   * @param handler The handler to be called when the resolve operation\n   * completes. Copies will be made of the handler as required. The function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   resolver::results_type results // Resolved endpoints as a range.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * A successful resolve operation is guaranteed to pass a non-empty range to\n   * the handler.\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        results_type)) ResolveHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ResolveHandler,\n      void (asio::error_code, results_type))\n  async_resolve(const endpoint_type& e,\n      ASIO_MOVE_ARG(ResolveHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return asio::async_initiate<ResolveHandler,\n      void (asio::error_code, results_type)>(\n        initiate_async_resolve(this), handler, e);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_resolver(const basic_resolver&) ASIO_DELETED;\n  basic_resolver& operator=(const basic_resolver&) ASIO_DELETED;\n\n  class initiate_async_resolve\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_resolve(basic_resolver* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ResolveHandler, typename Query>\n    void operator()(ASIO_MOVE_ARG(ResolveHandler) handler,\n        const Query& q) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ResolveHandler.\n      ASIO_RESOLVE_HANDLER_CHECK(\n          ResolveHandler, handler, results_type) type_check;\n\n      asio::detail::non_const_lvalue<ResolveHandler> handler2(handler);\n      self_->impl_.get_service().async_resolve(\n          self_->impl_.get_implementation(), q, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_resolver* self_;\n  };\n\n# if defined(ASIO_WINDOWS_RUNTIME)\n  asio::detail::io_object_impl<\n    asio::detail::winrt_resolver_service<InternetProtocol>,\n    Executor> impl_;\n# else\n  asio::detail::io_object_impl<\n    asio::detail::resolver_service<InternetProtocol>,\n    Executor> impl_;\n# endif\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_BASIC_RESOLVER_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_resolver_entry.hpp",
    "content": "//\n// ip/basic_resolver_entry.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_RESOLVER_ENTRY_HPP\n#define ASIO_IP_BASIC_RESOLVER_ENTRY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/string_view.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// An entry produced by a resolver.\n/**\n * The asio::ip::basic_resolver_entry class template describes an entry\n * as returned by a resolver.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename InternetProtocol>\nclass basic_resolver_entry\n{\npublic:\n  /// The protocol type associated with the endpoint entry.\n  typedef InternetProtocol protocol_type;\n\n  /// The endpoint type associated with the endpoint entry.\n  typedef typename InternetProtocol::endpoint endpoint_type;\n\n  /// Default constructor.\n  basic_resolver_entry()\n  {\n  }\n\n  /// Construct with specified endpoint, host name and service name.\n  basic_resolver_entry(const endpoint_type& ep,\n      ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service)\n    : endpoint_(ep),\n      host_name_(static_cast<std::string>(host)),\n      service_name_(static_cast<std::string>(service))\n  {\n  }\n\n  /// Get the endpoint associated with the entry.\n  endpoint_type endpoint() const\n  {\n    return endpoint_;\n  }\n\n  /// Convert to the endpoint associated with the entry.\n  operator endpoint_type() const\n  {\n    return endpoint_;\n  }\n\n  /// Get the host name associated with the entry.\n  std::string host_name() const\n  {\n    return host_name_;\n  }\n\n  /// Get the host name associated with the entry.\n  template <class Allocator>\n  std::basic_string<char, std::char_traits<char>, Allocator> host_name(\n      const Allocator& alloc = Allocator()) const\n  {\n    return std::basic_string<char, std::char_traits<char>, Allocator>(\n        host_name_.c_str(), alloc);\n  }\n\n  /// Get the service name associated with the entry.\n  std::string service_name() const\n  {\n    return service_name_;\n  }\n\n  /// Get the service name associated with the entry.\n  template <class Allocator>\n  std::basic_string<char, std::char_traits<char>, Allocator> service_name(\n      const Allocator& alloc = Allocator()) const\n  {\n    return std::basic_string<char, std::char_traits<char>, Allocator>(\n        service_name_.c_str(), alloc);\n  }\n\nprivate:\n  endpoint_type endpoint_;\n  std::string host_name_;\n  std::string service_name_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_BASIC_RESOLVER_ENTRY_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_resolver_iterator.hpp",
    "content": "//\n// ip/basic_resolver_iterator.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP\n#define ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <cstring>\n#include <iterator>\n#include <string>\n#include <vector>\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/ip/basic_resolver_entry.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/winrt_utils.hpp\"\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// An iterator over the entries produced by a resolver.\n/**\n * The asio::ip::basic_resolver_iterator class template is used to define\n * iterators over the results returned by a resolver.\n *\n * The iterator's value_type, obtained when the iterator is dereferenced, is:\n * @code const basic_resolver_entry<InternetProtocol> @endcode\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename InternetProtocol>\nclass basic_resolver_iterator\n{\npublic:\n  /// The type used for the distance between two iterators.\n  typedef std::ptrdiff_t difference_type;\n\n  /// The type of the value pointed to by the iterator.\n  typedef basic_resolver_entry<InternetProtocol> value_type;\n\n  /// The type of the result of applying operator->() to the iterator.\n  typedef const basic_resolver_entry<InternetProtocol>* pointer;\n\n  /// The type of the result of applying operator*() to the iterator.\n  typedef const basic_resolver_entry<InternetProtocol>& reference;\n\n  /// The iterator category.\n  typedef std::forward_iterator_tag iterator_category;\n\n  /// Default constructor creates an end iterator.\n  basic_resolver_iterator()\n    : index_(0)\n  {\n  }\n\n  /// Copy constructor.\n  basic_resolver_iterator(const basic_resolver_iterator& other)\n    : values_(other.values_),\n      index_(other.index_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  basic_resolver_iterator(basic_resolver_iterator&& other)\n    : values_(ASIO_MOVE_CAST(values_ptr_type)(other.values_)),\n      index_(other.index_)\n  {\n    other.index_ = 0;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Assignment operator.\n  basic_resolver_iterator& operator=(const basic_resolver_iterator& other)\n  {\n    values_ = other.values_;\n    index_ = other.index_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-assignment operator.\n  basic_resolver_iterator& operator=(basic_resolver_iterator&& other)\n  {\n    if (this != &other)\n    {\n      values_ = ASIO_MOVE_CAST(values_ptr_type)(other.values_);\n      index_ = other.index_;\n      other.index_ = 0;\n    }\n\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Dereference an iterator.\n  const basic_resolver_entry<InternetProtocol>& operator*() const\n  {\n    return dereference();\n  }\n\n  /// Dereference an iterator.\n  const basic_resolver_entry<InternetProtocol>* operator->() const\n  {\n    return &dereference();\n  }\n\n  /// Increment operator (prefix).\n  basic_resolver_iterator& operator++()\n  {\n    increment();\n    return *this;\n  }\n\n  /// Increment operator (postfix).\n  basic_resolver_iterator operator++(int)\n  {\n    basic_resolver_iterator tmp(*this);\n    ++*this;\n    return tmp;\n  }\n\n  /// Test two iterators for equality.\n  friend bool operator==(const basic_resolver_iterator& a,\n      const basic_resolver_iterator& b)\n  {\n    return a.equal(b);\n  }\n\n  /// Test two iterators for inequality.\n  friend bool operator!=(const basic_resolver_iterator& a,\n      const basic_resolver_iterator& b)\n  {\n    return !a.equal(b);\n  }\n\nprotected:\n  void increment()\n  {\n    if (++index_ == values_->size())\n    {\n      // Reset state to match a default constructed end iterator.\n      values_.reset();\n      index_ = 0;\n    }\n  }\n\n  bool equal(const basic_resolver_iterator& other) const\n  {\n    if (!values_ && !other.values_)\n      return true;\n    if (values_ != other.values_)\n      return false;\n    return index_ == other.index_;\n  }\n\n  const basic_resolver_entry<InternetProtocol>& dereference() const\n  {\n    return (*values_)[index_];\n  }\n\n  typedef std::vector<basic_resolver_entry<InternetProtocol> > values_type;\n  typedef asio::detail::shared_ptr<values_type> values_ptr_type;\n  values_ptr_type values_;\n  std::size_t index_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_resolver_query.hpp",
    "content": "//\n// ip/basic_resolver_query.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_RESOLVER_QUERY_HPP\n#define ASIO_IP_BASIC_RESOLVER_QUERY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/ip/resolver_query_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// An query to be passed to a resolver.\n/**\n * The asio::ip::basic_resolver_query class template describes a query\n * that can be passed to a resolver.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename InternetProtocol>\nclass basic_resolver_query\n  : public resolver_query_base\n{\npublic:\n  /// The protocol type associated with the endpoint query.\n  typedef InternetProtocol protocol_type;\n\n  /// Construct with specified service name for any protocol.\n  /**\n   * This constructor is typically used to perform name resolution for local\n   * service binding.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for local service\n   * binding.\n   *\n   * @note On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  basic_resolver_query(const std::string& service,\n      resolver_query_base::flags resolve_flags = passive | address_configured)\n    : hints_(),\n      host_name_(),\n      service_name_(service)\n  {\n    typename InternetProtocol::endpoint endpoint;\n    hints_.ai_flags = static_cast<int>(resolve_flags);\n    hints_.ai_family = PF_UNSPEC;\n    hints_.ai_socktype = endpoint.protocol().type();\n    hints_.ai_protocol = endpoint.protocol().protocol();\n    hints_.ai_addrlen = 0;\n    hints_.ai_canonname = 0;\n    hints_.ai_addr = 0;\n    hints_.ai_next = 0;\n  }\n\n  /// Construct with specified service name for a given protocol.\n  /**\n   * This constructor is typically used to perform name resolution for local\n   * service binding with a specific protocol version.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for local service\n   * binding.\n   *\n   * @note On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  basic_resolver_query(const protocol_type& protocol,\n      const std::string& service,\n      resolver_query_base::flags resolve_flags = passive | address_configured)\n    : hints_(),\n      host_name_(),\n      service_name_(service)\n  {\n    hints_.ai_flags = static_cast<int>(resolve_flags);\n    hints_.ai_family = protocol.family();\n    hints_.ai_socktype = protocol.type();\n    hints_.ai_protocol = protocol.protocol();\n    hints_.ai_addrlen = 0;\n    hints_.ai_canonname = 0;\n    hints_.ai_addr = 0;\n    hints_.ai_next = 0;\n  }\n\n  /// Construct with specified host name and service name for any protocol.\n  /**\n   * This constructor is typically used to perform name resolution for\n   * communication with remote hosts.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  basic_resolver_query(const std::string& host, const std::string& service,\n      resolver_query_base::flags resolve_flags = address_configured)\n    : hints_(),\n      host_name_(host),\n      service_name_(service)\n  {\n    typename InternetProtocol::endpoint endpoint;\n    hints_.ai_flags = static_cast<int>(resolve_flags);\n    hints_.ai_family = ASIO_OS_DEF(AF_UNSPEC);\n    hints_.ai_socktype = endpoint.protocol().type();\n    hints_.ai_protocol = endpoint.protocol().protocol();\n    hints_.ai_addrlen = 0;\n    hints_.ai_canonname = 0;\n    hints_.ai_addr = 0;\n    hints_.ai_next = 0;\n  }\n\n  /// Construct with specified host name and service name for a given protocol.\n  /**\n   * This constructor is typically used to perform name resolution for\n   * communication with remote hosts.\n   *\n   * @param protocol A protocol object, normally representing either the IPv4 or\n   * IPv6 version of an internet protocol.\n   *\n   * @param host A string identifying a location. May be a descriptive name or\n   * a numeric address string. If an empty string and the passive flag has been\n   * specified, the resolved endpoints are suitable for local service binding.\n   * If an empty string and passive is not specified, the resolved endpoints\n   * will use the loopback address.\n   *\n   * @param service A string identifying the requested service. This may be a\n   * descriptive name or a numeric string corresponding to a port number. May\n   * be an empty string, in which case all resolved endpoints will have a port\n   * number of 0.\n   *\n   * @param resolve_flags A set of flags that determine how name resolution\n   * should be performed. The default flags are suitable for communication with\n   * remote hosts.\n   *\n   * @note On POSIX systems, host names may be locally defined in the file\n   * <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts</tt>. Remote host name\n   * resolution is performed using DNS. Operating systems may use additional\n   * locations when resolving host names (such as NETBIOS names on Windows).\n   *\n   * On POSIX systems, service names are typically defined in the file\n   * <tt>/etc/services</tt>. On Windows, service names may be found in the file\n   * <tt>c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\services</tt>. Operating systems\n   * may use additional locations when resolving service names.\n   */\n  basic_resolver_query(const protocol_type& protocol,\n      const std::string& host, const std::string& service,\n      resolver_query_base::flags resolve_flags = address_configured)\n    : hints_(),\n      host_name_(host),\n      service_name_(service)\n  {\n    hints_.ai_flags = static_cast<int>(resolve_flags);\n    hints_.ai_family = protocol.family();\n    hints_.ai_socktype = protocol.type();\n    hints_.ai_protocol = protocol.protocol();\n    hints_.ai_addrlen = 0;\n    hints_.ai_canonname = 0;\n    hints_.ai_addr = 0;\n    hints_.ai_next = 0;\n  }\n\n  /// Get the hints associated with the query.\n  const asio::detail::addrinfo_type& hints() const\n  {\n    return hints_;\n  }\n\n  /// Get the host name associated with the query.\n  std::string host_name() const\n  {\n    return host_name_;\n  }\n\n  /// Get the service name associated with the query.\n  std::string service_name() const\n  {\n    return service_name_;\n  }\n\nprivate:\n  asio::detail::addrinfo_type hints_;\n  std::string host_name_;\n  std::string service_name_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_BASIC_RESOLVER_QUERY_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/basic_resolver_results.hpp",
    "content": "//\n// ip/basic_resolver_results.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_BASIC_RESOLVER_RESULTS_HPP\n#define ASIO_IP_BASIC_RESOLVER_RESULTS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <cstring>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n\n#if defined(ASIO_WINDOWS_RUNTIME)\n# include \"asio/detail/winrt_utils.hpp\"\n#endif // defined(ASIO_WINDOWS_RUNTIME)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// A range of entries produced by a resolver.\n/**\n * The asio::ip::basic_resolver_results class template is used to define\n * a range over the results returned by a resolver.\n *\n * The iterator's value_type, obtained when a results iterator is dereferenced,\n * is: @code const basic_resolver_entry<InternetProtocol> @endcode\n *\n * @note For backward compatibility, basic_resolver_results is derived from\n * basic_resolver_iterator. This derivation is deprecated.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename InternetProtocol>\nclass basic_resolver_results\n#if !defined(ASIO_NO_DEPRECATED)\n  : public basic_resolver_iterator<InternetProtocol>\n#else // !defined(ASIO_NO_DEPRECATED)\n  : private basic_resolver_iterator<InternetProtocol>\n#endif // !defined(ASIO_NO_DEPRECATED)\n{\npublic:\n  /// The protocol type associated with the results.\n  typedef InternetProtocol protocol_type;\n\n  /// The endpoint type associated with the results.\n  typedef typename protocol_type::endpoint endpoint_type;\n\n  /// The type of a value in the results range.\n  typedef basic_resolver_entry<protocol_type> value_type;\n\n  /// The type of a const reference to a value in the range.\n  typedef const value_type& const_reference;\n\n  /// The type of a non-const reference to a value in the range.\n  typedef value_type& reference;\n\n  /// The type of an iterator into the range.\n  typedef basic_resolver_iterator<protocol_type> const_iterator;\n\n  /// The type of an iterator into the range.\n  typedef const_iterator iterator;\n\n  /// Type used to represent the distance between two iterators in the range.\n  typedef std::ptrdiff_t difference_type;\n\n  /// Type used to represent a count of the elements in the range.\n  typedef std::size_t size_type;\n\n  /// Default constructor creates an empty range.\n  basic_resolver_results()\n  {\n  }\n\n  /// Copy constructor.\n  basic_resolver_results(const basic_resolver_results& other)\n    : basic_resolver_iterator<InternetProtocol>(other)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  basic_resolver_results(basic_resolver_results&& other)\n    : basic_resolver_iterator<InternetProtocol>(\n        ASIO_MOVE_CAST(basic_resolver_results)(other))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Assignment operator.\n  basic_resolver_results& operator=(const basic_resolver_results& other)\n  {\n    basic_resolver_iterator<InternetProtocol>::operator=(other);\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-assignment operator.\n  basic_resolver_results& operator=(basic_resolver_results&& other)\n  {\n    basic_resolver_iterator<InternetProtocol>::operator=(\n        ASIO_MOVE_CAST(basic_resolver_results)(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(GENERATING_DOCUMENTATION)\n  // Create results from an addrinfo list returned by getaddrinfo.\n  static basic_resolver_results create(\n      asio::detail::addrinfo_type* address_info,\n      const std::string& host_name, const std::string& service_name)\n  {\n    basic_resolver_results results;\n    if (!address_info)\n      return results;\n\n    std::string actual_host_name = host_name;\n    if (address_info->ai_canonname)\n      actual_host_name = address_info->ai_canonname;\n\n    results.values_.reset(new values_type);\n\n    while (address_info)\n    {\n      if (address_info->ai_family == ASIO_OS_DEF(AF_INET)\n          || address_info->ai_family == ASIO_OS_DEF(AF_INET6))\n      {\n        using namespace std; // For memcpy.\n        typename InternetProtocol::endpoint endpoint;\n        endpoint.resize(static_cast<std::size_t>(address_info->ai_addrlen));\n        memcpy(endpoint.data(), address_info->ai_addr,\n            address_info->ai_addrlen);\n        results.values_->push_back(\n            basic_resolver_entry<InternetProtocol>(endpoint,\n              actual_host_name, service_name));\n      }\n      address_info = address_info->ai_next;\n    }\n\n    return results;\n  }\n\n  // Create results from an endpoint, host name and service name.\n  static basic_resolver_results create(const endpoint_type& endpoint,\n      const std::string& host_name, const std::string& service_name)\n  {\n    basic_resolver_results results;\n    results.values_.reset(new values_type);\n    results.values_->push_back(\n        basic_resolver_entry<InternetProtocol>(\n          endpoint, host_name, service_name));\n    return results;\n  }\n\n  // Create results from a sequence of endpoints, host and service name.\n  template <typename EndpointIterator>\n  static basic_resolver_results create(\n      EndpointIterator begin, EndpointIterator end,\n      const std::string& host_name, const std::string& service_name)\n  {\n    basic_resolver_results results;\n    if (begin != end)\n    {\n      results.values_.reset(new values_type);\n      for (EndpointIterator ep_iter = begin; ep_iter != end; ++ep_iter)\n      {\n        results.values_->push_back(\n            basic_resolver_entry<InternetProtocol>(\n              *ep_iter, host_name, service_name));\n      }\n    }\n    return results;\n  }\n\n# if defined(ASIO_WINDOWS_RUNTIME)\n  // Create results from a Windows Runtime list of EndpointPair objects.\n  static basic_resolver_results create(\n      Windows::Foundation::Collections::IVectorView<\n        Windows::Networking::EndpointPair^>^ endpoints,\n      const asio::detail::addrinfo_type& hints,\n      const std::string& host_name, const std::string& service_name)\n  {\n    basic_resolver_results results;\n    if (endpoints->Size)\n    {\n      results.values_.reset(new values_type);\n      for (unsigned int i = 0; i < endpoints->Size; ++i)\n      {\n        auto pair = endpoints->GetAt(i);\n\n        if (hints.ai_family == ASIO_OS_DEF(AF_INET)\n            && pair->RemoteHostName->Type\n              != Windows::Networking::HostNameType::Ipv4)\n          continue;\n\n        if (hints.ai_family == ASIO_OS_DEF(AF_INET6)\n            && pair->RemoteHostName->Type\n              != Windows::Networking::HostNameType::Ipv6)\n          continue;\n\n        results.values_->push_back(\n            basic_resolver_entry<InternetProtocol>(\n              typename InternetProtocol::endpoint(\n                ip::make_address(\n                  asio::detail::winrt_utils::string(\n                    pair->RemoteHostName->CanonicalName)),\n                asio::detail::winrt_utils::integer(\n                  pair->RemoteServiceName)),\n              host_name, service_name));\n      }\n    }\n    return results;\n  }\n# endif // defined(ASIO_WINDOWS_RUNTIME)\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n  /// Get the number of entries in the results range.\n  size_type size() const ASIO_NOEXCEPT\n  {\n    return this->values_ ? this->values_->size() : 0;\n  }\n\n  /// Get the maximum number of entries permitted in a results range.\n  size_type max_size() const ASIO_NOEXCEPT\n  {\n    return this->values_ ? this->values_->max_size() : values_type().max_size();\n  }\n\n  /// Determine whether the results range is empty.\n  bool empty() const ASIO_NOEXCEPT\n  {\n    return this->values_ ? this->values_->empty() : true;\n  }\n\n  /// Obtain a begin iterator for the results range.\n  const_iterator begin() const\n  {\n    basic_resolver_results tmp(*this);\n    tmp.index_ = 0;\n    return ASIO_MOVE_CAST(basic_resolver_results)(tmp);\n  }\n\n  /// Obtain an end iterator for the results range.\n  const_iterator end() const\n  {\n    return const_iterator();\n  }\n\n  /// Obtain a begin iterator for the results range.\n  const_iterator cbegin() const\n  {\n    return begin();\n  }\n\n  /// Obtain an end iterator for the results range.\n  const_iterator cend() const\n  {\n    return end();\n  }\n\n  /// Swap the results range with another.\n  void swap(basic_resolver_results& that) ASIO_NOEXCEPT\n  {\n    if (this != &that)\n    {\n      this->values_.swap(that.values_);\n      std::size_t index = this->index_;\n      this->index_ = that.index_;\n      that.index_ = index;\n    }\n  }\n\n  /// Test two iterators for equality.\n  friend bool operator==(const basic_resolver_results& a,\n      const basic_resolver_results& b)\n  {\n    return a.equal(b);\n  }\n\n  /// Test two iterators for inequality.\n  friend bool operator!=(const basic_resolver_results& a,\n      const basic_resolver_results& b)\n  {\n    return !a.equal(b);\n  }\n\nprivate:\n  typedef std::vector<basic_resolver_entry<InternetProtocol> > values_type;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_BASIC_RESOLVER_RESULTS_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/detail/endpoint.hpp",
    "content": "//\n// ip/detail/endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_DETAIL_ENDPOINT_HPP\n#define ASIO_IP_DETAIL_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/winsock_init.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ip/address.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\nnamespace detail {\n\n// Helper class for implementating an IP endpoint.\nclass endpoint\n{\npublic:\n  // Default constructor.\n  ASIO_DECL endpoint() ASIO_NOEXCEPT;\n\n  // Construct an endpoint using a family and port number.\n  ASIO_DECL endpoint(int family,\n      unsigned short port_num) ASIO_NOEXCEPT;\n\n  // Construct an endpoint using an address and port number.\n  ASIO_DECL endpoint(const asio::ip::address& addr,\n      unsigned short port_num) ASIO_NOEXCEPT;\n\n  // Copy constructor.\n  endpoint(const endpoint& other) ASIO_NOEXCEPT\n    : data_(other.data_)\n  {\n  }\n\n  // Assign from another endpoint.\n  endpoint& operator=(const endpoint& other) ASIO_NOEXCEPT\n  {\n    data_ = other.data_;\n    return *this;\n  }\n\n  // Get the underlying endpoint in the native type.\n  asio::detail::socket_addr_type* data() ASIO_NOEXCEPT\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying endpoint in the native type.\n  const asio::detail::socket_addr_type* data() const ASIO_NOEXCEPT\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying size of the endpoint in the native type.\n  std::size_t size() const ASIO_NOEXCEPT\n  {\n    if (is_v4())\n      return sizeof(asio::detail::sockaddr_in4_type);\n    else\n      return sizeof(asio::detail::sockaddr_in6_type);\n  }\n\n  // Set the underlying size of the endpoint in the native type.\n  ASIO_DECL void resize(std::size_t new_size);\n\n  // Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const ASIO_NOEXCEPT\n  {\n    return sizeof(data_);\n  }\n\n  // Get the port associated with the endpoint.\n  ASIO_DECL unsigned short port() const ASIO_NOEXCEPT;\n\n  // Set the port associated with the endpoint.\n  ASIO_DECL void port(unsigned short port_num) ASIO_NOEXCEPT;\n\n  // Get the IP address associated with the endpoint.\n  ASIO_DECL asio::ip::address address() const ASIO_NOEXCEPT;\n\n  // Set the IP address associated with the endpoint.\n  ASIO_DECL void address(\n      const asio::ip::address& addr) ASIO_NOEXCEPT;\n\n  // Compare two endpoints for equality.\n  ASIO_DECL friend bool operator==(const endpoint& e1,\n      const endpoint& e2) ASIO_NOEXCEPT;\n\n  // Compare endpoints for ordering.\n  ASIO_DECL friend bool operator<(const endpoint& e1,\n      const endpoint& e2) ASIO_NOEXCEPT;\n\n  // Determine whether the endpoint is IPv4.\n  bool is_v4() const ASIO_NOEXCEPT\n  {\n    return data_.base.sa_family == ASIO_OS_DEF(AF_INET);\n  }\n\n#if !defined(ASIO_NO_IOSTREAM)\n  // Convert to a string.\n  ASIO_DECL std::string to_string() const;\n#endif // !defined(ASIO_NO_IOSTREAM)\n\nprivate:\n  // The underlying IP socket address.\n  union data_union\n  {\n    asio::detail::socket_addr_type base;\n    asio::detail::sockaddr_in4_type v4;\n    asio::detail::sockaddr_in6_type v6;\n  } data_;\n};\n\n} // namespace detail\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/detail/impl/endpoint.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_DETAIL_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/detail/impl/endpoint.ipp",
    "content": "//\n// ip/detail/impl/endpoint.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP\n#define ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstring>\n#if !defined(ASIO_NO_IOSTREAM)\n# include <sstream>\n#endif // !defined(ASIO_NO_IOSTREAM)\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ip/detail/endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\nnamespace detail {\n\nendpoint::endpoint() ASIO_NOEXCEPT\n  : data_()\n{\n  data_.v4.sin_family = ASIO_OS_DEF(AF_INET);\n  data_.v4.sin_port = 0;\n  data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY);\n}\n\nendpoint::endpoint(int family, unsigned short port_num) ASIO_NOEXCEPT\n  : data_()\n{\n  using namespace std; // For memcpy.\n  if (family == ASIO_OS_DEF(AF_INET))\n  {\n    data_.v4.sin_family = ASIO_OS_DEF(AF_INET);\n    data_.v4.sin_port =\n      asio::detail::socket_ops::host_to_network_short(port_num);\n    data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY);\n  }\n  else\n  {\n    data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6);\n    data_.v6.sin6_port =\n      asio::detail::socket_ops::host_to_network_short(port_num);\n    data_.v6.sin6_flowinfo = 0;\n    data_.v6.sin6_addr.s6_addr[0] = 0; data_.v6.sin6_addr.s6_addr[1] = 0;\n    data_.v6.sin6_addr.s6_addr[2] = 0; data_.v6.sin6_addr.s6_addr[3] = 0;\n    data_.v6.sin6_addr.s6_addr[4] = 0; data_.v6.sin6_addr.s6_addr[5] = 0;\n    data_.v6.sin6_addr.s6_addr[6] = 0; data_.v6.sin6_addr.s6_addr[7] = 0;\n    data_.v6.sin6_addr.s6_addr[8] = 0; data_.v6.sin6_addr.s6_addr[9] = 0;\n    data_.v6.sin6_addr.s6_addr[10] = 0; data_.v6.sin6_addr.s6_addr[11] = 0;\n    data_.v6.sin6_addr.s6_addr[12] = 0; data_.v6.sin6_addr.s6_addr[13] = 0;\n    data_.v6.sin6_addr.s6_addr[14] = 0; data_.v6.sin6_addr.s6_addr[15] = 0;\n    data_.v6.sin6_scope_id = 0;\n  }\n}\n\nendpoint::endpoint(const asio::ip::address& addr,\n    unsigned short port_num) ASIO_NOEXCEPT\n  : data_()\n{\n  using namespace std; // For memcpy.\n  if (addr.is_v4())\n  {\n    data_.v4.sin_family = ASIO_OS_DEF(AF_INET);\n    data_.v4.sin_port =\n      asio::detail::socket_ops::host_to_network_short(port_num);\n    data_.v4.sin_addr.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n        addr.to_v4().to_uint());\n  }\n  else\n  {\n    data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6);\n    data_.v6.sin6_port =\n      asio::detail::socket_ops::host_to_network_short(port_num);\n    data_.v6.sin6_flowinfo = 0;\n    asio::ip::address_v6 v6_addr = addr.to_v6();\n    asio::ip::address_v6::bytes_type bytes = v6_addr.to_bytes();\n    memcpy(data_.v6.sin6_addr.s6_addr, bytes.data(), 16);\n    data_.v6.sin6_scope_id =\n      static_cast<asio::detail::u_long_type>(\n        v6_addr.scope_id());\n  }\n}\n\nvoid endpoint::resize(std::size_t new_size)\n{\n  if (new_size > sizeof(asio::detail::sockaddr_storage_type))\n  {\n    asio::error_code ec(asio::error::invalid_argument);\n    asio::detail::throw_error(ec);\n  }\n}\n\nunsigned short endpoint::port() const ASIO_NOEXCEPT\n{\n  if (is_v4())\n  {\n    return asio::detail::socket_ops::network_to_host_short(\n        data_.v4.sin_port);\n  }\n  else\n  {\n    return asio::detail::socket_ops::network_to_host_short(\n        data_.v6.sin6_port);\n  }\n}\n\nvoid endpoint::port(unsigned short port_num) ASIO_NOEXCEPT\n{\n  if (is_v4())\n  {\n    data_.v4.sin_port\n      = asio::detail::socket_ops::host_to_network_short(port_num);\n  }\n  else\n  {\n    data_.v6.sin6_port\n      = asio::detail::socket_ops::host_to_network_short(port_num);\n  }\n}\n\nasio::ip::address endpoint::address() const ASIO_NOEXCEPT\n{\n  using namespace std; // For memcpy.\n  if (is_v4())\n  {\n    return asio::ip::address_v4(\n        asio::detail::socket_ops::network_to_host_long(\n          data_.v4.sin_addr.s_addr));\n  }\n  else\n  {\n    asio::ip::address_v6::bytes_type bytes;\n#if defined(ASIO_HAS_STD_ARRAY)\n    memcpy(bytes.data(), data_.v6.sin6_addr.s6_addr, 16);\n#else // defined(ASIO_HAS_STD_ARRAY)\n    memcpy(bytes.elems, data_.v6.sin6_addr.s6_addr, 16);\n#endif // defined(ASIO_HAS_STD_ARRAY)\n    return asio::ip::address_v6(bytes, data_.v6.sin6_scope_id);\n  }\n}\n\nvoid endpoint::address(const asio::ip::address& addr) ASIO_NOEXCEPT\n{\n  endpoint tmp_endpoint(addr, port());\n  data_ = tmp_endpoint.data_;\n}\n\nbool operator==(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT\n{\n  return e1.address() == e2.address() && e1.port() == e2.port();\n}\n\nbool operator<(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT\n{\n  if (e1.address() < e2.address())\n    return true;\n  if (e1.address() != e2.address())\n    return false;\n  return e1.port() < e2.port();\n}\n\n#if !defined(ASIO_NO_IOSTREAM)\nstd::string endpoint::to_string() const\n{\n  std::ostringstream tmp_os;\n  tmp_os.imbue(std::locale::classic());\n  if (is_v4())\n    tmp_os << address();\n  else\n    tmp_os << '[' << address() << ']';\n  tmp_os << ':' << port();\n\n  return tmp_os.str();\n}\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace detail\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/detail/socket_option.hpp",
    "content": "//\n// detail/socket_option.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_DETAIL_SOCKET_OPTION_HPP\n#define ASIO_IP_DETAIL_SOCKET_OPTION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <cstring>\n#include <stdexcept>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/ip/address.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\nnamespace detail {\nnamespace socket_option {\n\n// Helper template for implementing multicast enable loopback options.\ntemplate <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>\nclass multicast_enable_loopback\n{\npublic:\n#if defined(__sun) || defined(__osf__)\n  typedef unsigned char ipv4_value_type;\n  typedef unsigned char ipv6_value_type;\n#elif defined(_AIX) || defined(__hpux) || defined(__QNXNTO__) \n  typedef unsigned char ipv4_value_type;\n  typedef unsigned int ipv6_value_type;\n#else\n  typedef int ipv4_value_type;\n  typedef int ipv6_value_type;\n#endif\n\n  // Default constructor.\n  multicast_enable_loopback()\n    : ipv4_value_(0),\n      ipv6_value_(0)\n  {\n  }\n\n  // Construct with a specific option value.\n  explicit multicast_enable_loopback(bool v)\n    : ipv4_value_(v ? 1 : 0),\n      ipv6_value_(v ? 1 : 0)\n  {\n  }\n\n  // Set the value of the boolean.\n  multicast_enable_loopback& operator=(bool v)\n  {\n    ipv4_value_ = v ? 1 : 0;\n    ipv6_value_ = v ? 1 : 0;\n    return *this;\n  }\n\n  // Get the current value of the boolean.\n  bool value() const\n  {\n    return !!ipv4_value_;\n  }\n\n  // Convert to bool.\n  operator bool() const\n  {\n    return !!ipv4_value_;\n  }\n\n  // Test for false.\n  bool operator!() const\n  {\n    return !ipv4_value_;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Level;\n    return IPv4_Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Name;\n    return IPv4_Name;\n  }\n\n  // Get the address of the boolean data.\n  template <typename Protocol>\n  void* data(const Protocol& protocol)\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the address of the boolean data.\n  template <typename Protocol>\n  const void* data(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the size of the boolean data.\n  template <typename Protocol>\n  std::size_t size(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return sizeof(ipv6_value_);\n    return sizeof(ipv4_value_);\n  }\n\n  // Set the size of the boolean data.\n  template <typename Protocol>\n  void resize(const Protocol& protocol, std::size_t s)\n  {\n    if (protocol.family() == PF_INET6)\n    {\n      if (s != sizeof(ipv6_value_))\n      {\n        std::length_error ex(\"multicast_enable_loopback socket option resize\");\n        asio::detail::throw_exception(ex);\n      }\n      ipv4_value_ = ipv6_value_ ? 1 : 0;\n    }\n    else\n    {\n      if (s != sizeof(ipv4_value_))\n      {\n        std::length_error ex(\"multicast_enable_loopback socket option resize\");\n        asio::detail::throw_exception(ex);\n      }\n      ipv6_value_ = ipv4_value_ ? 1 : 0;\n    }\n  }\n\nprivate:\n  ipv4_value_type ipv4_value_;\n  ipv6_value_type ipv6_value_;\n};\n\n// Helper template for implementing unicast hops options.\ntemplate <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>\nclass unicast_hops\n{\npublic:\n  // Default constructor.\n  unicast_hops()\n    : value_(0)\n  {\n  }\n\n  // Construct with a specific option value.\n  explicit unicast_hops(int v)\n    : value_(v)\n  {\n  }\n\n  // Set the value of the option.\n  unicast_hops& operator=(int v)\n  {\n    value_ = v;\n    return *this;\n  }\n\n  // Get the current value of the option.\n  int value() const\n  {\n    return value_;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Level;\n    return IPv4_Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Name;\n    return IPv4_Name;\n  }\n\n  // Get the address of the data.\n  template <typename Protocol>\n  int* data(const Protocol&)\n  {\n    return &value_;\n  }\n\n  // Get the address of the data.\n  template <typename Protocol>\n  const int* data(const Protocol&) const\n  {\n    return &value_;\n  }\n\n  // Get the size of the data.\n  template <typename Protocol>\n  std::size_t size(const Protocol&) const\n  {\n    return sizeof(value_);\n  }\n\n  // Set the size of the data.\n  template <typename Protocol>\n  void resize(const Protocol&, std::size_t s)\n  {\n    if (s != sizeof(value_))\n    {\n      std::length_error ex(\"unicast hops socket option resize\");\n      asio::detail::throw_exception(ex);\n    }\n#if defined(__hpux)\n    if (value_ < 0)\n      value_ = value_ & 0xFF;\n#endif\n  }\n\nprivate:\n  int value_;\n};\n\n// Helper template for implementing multicast hops options.\ntemplate <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>\nclass multicast_hops\n{\npublic:\n#if defined(ASIO_WINDOWS) && defined(UNDER_CE)\n  typedef int ipv4_value_type;\n#else\n  typedef unsigned char ipv4_value_type;\n#endif\n  typedef int ipv6_value_type;\n\n  // Default constructor.\n  multicast_hops()\n    : ipv4_value_(0),\n      ipv6_value_(0)\n  {\n  }\n\n  // Construct with a specific option value.\n  explicit multicast_hops(int v)\n  {\n    if (v < 0 || v > 255)\n    {\n      std::out_of_range ex(\"multicast hops value out of range\");\n      asio::detail::throw_exception(ex);\n    }\n    ipv4_value_ = (ipv4_value_type)v;\n    ipv6_value_ = v;\n  }\n\n  // Set the value of the option.\n  multicast_hops& operator=(int v)\n  {\n    if (v < 0 || v > 255)\n    {\n      std::out_of_range ex(\"multicast hops value out of range\");\n      asio::detail::throw_exception(ex);\n    }\n    ipv4_value_ = (ipv4_value_type)v;\n    ipv6_value_ = v;\n    return *this;\n  }\n\n  // Get the current value of the option.\n  int value() const\n  {\n    return ipv6_value_;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Level;\n    return IPv4_Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Name;\n    return IPv4_Name;\n  }\n\n  // Get the address of the data.\n  template <typename Protocol>\n  void* data(const Protocol& protocol)\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the address of the data.\n  template <typename Protocol>\n  const void* data(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the size of the data.\n  template <typename Protocol>\n  std::size_t size(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return sizeof(ipv6_value_);\n    return sizeof(ipv4_value_);\n  }\n\n  // Set the size of the data.\n  template <typename Protocol>\n  void resize(const Protocol& protocol, std::size_t s)\n  {\n    if (protocol.family() == PF_INET6)\n    {\n      if (s != sizeof(ipv6_value_))\n      {\n        std::length_error ex(\"multicast hops socket option resize\");\n        asio::detail::throw_exception(ex);\n      }\n      if (ipv6_value_ < 0)\n        ipv4_value_ = 0;\n      else if (ipv6_value_ > 255)\n        ipv4_value_ = 255;\n      else\n        ipv4_value_ = (ipv4_value_type)ipv6_value_;\n    }\n    else\n    {\n      if (s != sizeof(ipv4_value_))\n      {\n        std::length_error ex(\"multicast hops socket option resize\");\n        asio::detail::throw_exception(ex);\n      }\n      ipv6_value_ = ipv4_value_;\n    }\n  }\n\nprivate:\n  ipv4_value_type ipv4_value_;\n  ipv6_value_type ipv6_value_;\n};\n\n// Helper template for implementing ip_mreq-based options.\ntemplate <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>\nclass multicast_request\n{\npublic:\n  // Default constructor.\n  multicast_request()\n    : ipv4_value_(), // Zero-initialisation gives the \"any\" address.\n      ipv6_value_() // Zero-initialisation gives the \"any\" address.\n  {\n  }\n\n  // Construct with multicast address only.\n  explicit multicast_request(const address& multicast_address)\n    : ipv4_value_(), // Zero-initialisation gives the \"any\" address.\n      ipv6_value_() // Zero-initialisation gives the \"any\" address.\n  {\n    if (multicast_address.is_v6())\n    {\n      using namespace std; // For memcpy.\n      address_v6 ipv6_address = multicast_address.to_v6();\n      address_v6::bytes_type bytes = ipv6_address.to_bytes();\n      memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16);\n      ipv6_value_.ipv6mr_interface = ipv6_address.scope_id();\n    }\n    else\n    {\n      ipv4_value_.imr_multiaddr.s_addr =\n        asio::detail::socket_ops::host_to_network_long(\n            multicast_address.to_v4().to_uint());\n      ipv4_value_.imr_interface.s_addr =\n        asio::detail::socket_ops::host_to_network_long(\n            address_v4::any().to_uint());\n    }\n  }\n\n  // Construct with multicast address and IPv4 address specifying an interface.\n  explicit multicast_request(const address_v4& multicast_address,\n      const address_v4& network_interface = address_v4::any())\n    : ipv6_value_() // Zero-initialisation gives the \"any\" address.\n  {\n    ipv4_value_.imr_multiaddr.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n          multicast_address.to_uint());\n    ipv4_value_.imr_interface.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n          network_interface.to_uint());\n  }\n\n  // Construct with multicast address and IPv6 network interface index.\n  explicit multicast_request(\n      const address_v6& multicast_address,\n      unsigned long network_interface = 0)\n    : ipv4_value_() // Zero-initialisation gives the \"any\" address.\n  {\n    using namespace std; // For memcpy.\n    address_v6::bytes_type bytes = multicast_address.to_bytes();\n    memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16);\n    if (network_interface)\n      ipv6_value_.ipv6mr_interface = network_interface;\n    else\n      ipv6_value_.ipv6mr_interface = multicast_address.scope_id();\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Level;\n    return IPv4_Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Name;\n    return IPv4_Name;\n  }\n\n  // Get the address of the option data.\n  template <typename Protocol>\n  const void* data(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the size of the option data.\n  template <typename Protocol>\n  std::size_t size(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return sizeof(ipv6_value_);\n    return sizeof(ipv4_value_);\n  }\n\nprivate:\n  asio::detail::in4_mreq_type ipv4_value_;\n  asio::detail::in6_mreq_type ipv6_value_;\n};\n\n// Helper template for implementing options that specify a network interface.\ntemplate <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>\nclass network_interface\n{\npublic:\n  // Default constructor.\n  network_interface()\n  {\n    ipv4_value_.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n          address_v4::any().to_uint());\n    ipv6_value_ = 0;\n  }\n\n  // Construct with IPv4 interface.\n  explicit network_interface(const address_v4& ipv4_interface)\n  {\n    ipv4_value_.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n          ipv4_interface.to_uint());\n    ipv6_value_ = 0;\n  }\n\n  // Construct with IPv6 interface.\n  explicit network_interface(unsigned int ipv6_interface)\n  {\n    ipv4_value_.s_addr =\n      asio::detail::socket_ops::host_to_network_long(\n          address_v4::any().to_uint());\n    ipv6_value_ = ipv6_interface;\n  }\n\n  // Get the level of the socket option.\n  template <typename Protocol>\n  int level(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Level;\n    return IPv4_Level;\n  }\n\n  // Get the name of the socket option.\n  template <typename Protocol>\n  int name(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return IPv6_Name;\n    return IPv4_Name;\n  }\n\n  // Get the address of the option data.\n  template <typename Protocol>\n  const void* data(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return &ipv6_value_;\n    return &ipv4_value_;\n  }\n\n  // Get the size of the option data.\n  template <typename Protocol>\n  std::size_t size(const Protocol& protocol) const\n  {\n    if (protocol.family() == PF_INET6)\n      return sizeof(ipv6_value_);\n    return sizeof(ipv4_value_);\n  }\n\nprivate:\n  asio::detail::in4_addr_type ipv4_value_;\n  unsigned int ipv6_value_;\n};\n\n} // namespace socket_option\n} // namespace detail\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_DETAIL_SOCKET_OPTION_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/host_name.hpp",
    "content": "//\n// ip/host_name.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_HOST_NAME_HPP\n#define ASIO_IP_HOST_NAME_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/error_code.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Get the current host name.\nASIO_DECL std::string host_name();\n\n/// Get the current host name.\nASIO_DECL std::string host_name(asio::error_code& ec);\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/host_name.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_HOST_NAME_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/icmp.hpp",
    "content": "//\n// ip/icmp.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_ICMP_HPP\n#define ASIO_IP_ICMP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/basic_raw_socket.hpp\"\n#include \"asio/ip/basic_endpoint.hpp\"\n#include \"asio/ip/basic_resolver.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Encapsulates the flags needed for ICMP.\n/**\n * The asio::ip::icmp class contains flags necessary for ICMP sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol, InternetProtocol.\n */\nclass icmp\n{\npublic:\n  /// The type of a ICMP endpoint.\n  typedef basic_endpoint<icmp> endpoint;\n\n  /// Construct to represent the IPv4 ICMP protocol.\n  static icmp v4() ASIO_NOEXCEPT\n  {\n    return icmp(ASIO_OS_DEF(IPPROTO_ICMP),\n        ASIO_OS_DEF(AF_INET));\n  }\n\n  /// Construct to represent the IPv6 ICMP protocol.\n  static icmp v6() ASIO_NOEXCEPT\n  {\n    return icmp(ASIO_OS_DEF(IPPROTO_ICMPV6),\n        ASIO_OS_DEF(AF_INET6));\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_RAW);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return protocol_;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// The ICMP socket type.\n  typedef basic_raw_socket<icmp> socket;\n\n  /// The ICMP resolver type.\n  typedef basic_resolver<icmp> resolver;\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const icmp& p1, const icmp& p2)\n  {\n    return p1.protocol_ == p2.protocol_ && p1.family_ == p2.family_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const icmp& p1, const icmp& p2)\n  {\n    return p1.protocol_ != p2.protocol_ || p1.family_ != p2.family_;\n  }\n\nprivate:\n  // Construct with a specific family.\n  explicit icmp(int protocol_id, int protocol_family) ASIO_NOEXCEPT\n    : protocol_(protocol_id),\n      family_(protocol_family)\n  {\n  }\n\n  int protocol_;\n  int family_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_ICMP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address.hpp",
    "content": "//\n// ip/impl/address.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_HPP\n#define ASIO_IP_IMPL_ADDRESS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ninline address address::from_string(const char* str)\n{\n  return asio::ip::make_address(str);\n}\n\ninline address address::from_string(\n    const char* str, asio::error_code& ec)\n{\n  return asio::ip::make_address(str, ec);\n}\n\ninline address address::from_string(const std::string& str)\n{\n  return asio::ip::make_address(str);\n}\n\ninline address address::from_string(\n    const std::string& str, asio::error_code& ec)\n{\n  return asio::ip::make_address(str, ec);\n}\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address& addr)\n{\n  return os << addr.to_string().c_str();\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_ADDRESS_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address.ipp",
    "content": "//\n// ip/impl/address.ipp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_IPP\n#define ASIO_IP_IMPL_ADDRESS_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <typeinfo>\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ip/address.hpp\"\n#include \"asio/ip/bad_address_cast.hpp\"\n#include \"asio/system_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\naddress::address() ASIO_NOEXCEPT\n  : type_(ipv4),\n    ipv4_address_(),\n    ipv6_address_()\n{\n}\n\naddress::address(\n    const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT\n  : type_(ipv4),\n    ipv4_address_(ipv4_address),\n    ipv6_address_()\n{\n}\n\naddress::address(\n    const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT\n  : type_(ipv6),\n    ipv4_address_(),\n    ipv6_address_(ipv6_address)\n{\n}\n\naddress::address(const address& other) ASIO_NOEXCEPT\n  : type_(other.type_),\n    ipv4_address_(other.ipv4_address_),\n    ipv6_address_(other.ipv6_address_)\n{\n}\n\n#if defined(ASIO_HAS_MOVE)\naddress::address(address&& other) ASIO_NOEXCEPT\n  : type_(other.type_),\n    ipv4_address_(other.ipv4_address_),\n    ipv6_address_(other.ipv6_address_)\n{\n}\n#endif // defined(ASIO_HAS_MOVE)\n\naddress& address::operator=(const address& other) ASIO_NOEXCEPT\n{\n  type_ = other.type_;\n  ipv4_address_ = other.ipv4_address_;\n  ipv6_address_ = other.ipv6_address_;\n  return *this;\n}\n\n#if defined(ASIO_HAS_MOVE)\naddress& address::operator=(address&& other) ASIO_NOEXCEPT\n{\n  type_ = other.type_;\n  ipv4_address_ = other.ipv4_address_;\n  ipv6_address_ = other.ipv6_address_;\n  return *this;\n}\n#endif // defined(ASIO_HAS_MOVE)\n\naddress& address::operator=(\n    const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT\n{\n  type_ = ipv4;\n  ipv4_address_ = ipv4_address;\n  ipv6_address_ = asio::ip::address_v6();\n  return *this;\n}\n\naddress& address::operator=(\n    const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT\n{\n  type_ = ipv6;\n  ipv4_address_ = asio::ip::address_v4();\n  ipv6_address_ = ipv6_address;\n  return *this;\n}\n\naddress make_address(const char* str)\n{\n  asio::error_code ec;\n  address addr = make_address(str, ec);\n  asio::detail::throw_error(ec);\n  return addr;\n}\n\naddress make_address(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  asio::ip::address_v6 ipv6_address =\n    asio::ip::make_address_v6(str, ec);\n  if (!ec)\n    return address(ipv6_address);\n\n  asio::ip::address_v4 ipv4_address =\n    asio::ip::make_address_v4(str, ec);\n  if (!ec)\n    return address(ipv4_address);\n\n  return address();\n}\n\naddress make_address(const std::string& str)\n{\n  return make_address(str.c_str());\n}\n\naddress make_address(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address(str.c_str(), ec);\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\naddress make_address(string_view str)\n{\n  return make_address(static_cast<std::string>(str));\n}\n\naddress make_address(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address(static_cast<std::string>(str), ec);\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\nasio::ip::address_v4 address::to_v4() const\n{\n  if (type_ != ipv4)\n  {\n    bad_address_cast ex;\n    asio::detail::throw_exception(ex);\n  }\n  return ipv4_address_;\n}\n\nasio::ip::address_v6 address::to_v6() const\n{\n  if (type_ != ipv6)\n  {\n    bad_address_cast ex;\n    asio::detail::throw_exception(ex);\n  }\n  return ipv6_address_;\n}\n\nstd::string address::to_string() const\n{\n  if (type_ == ipv6)\n    return ipv6_address_.to_string();\n  return ipv4_address_.to_string();\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nstd::string address::to_string(asio::error_code& ec) const\n{\n  if (type_ == ipv6)\n    return ipv6_address_.to_string(ec);\n  return ipv4_address_.to_string(ec);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nbool address::is_loopback() const ASIO_NOEXCEPT\n{\n  return (type_ == ipv4)\n    ? ipv4_address_.is_loopback()\n    : ipv6_address_.is_loopback();\n}\n\nbool address::is_unspecified() const ASIO_NOEXCEPT\n{\n  return (type_ == ipv4)\n    ? ipv4_address_.is_unspecified()\n    : ipv6_address_.is_unspecified();\n}\n\nbool address::is_multicast() const ASIO_NOEXCEPT\n{\n  return (type_ == ipv4)\n    ? ipv4_address_.is_multicast()\n    : ipv6_address_.is_multicast();\n}\n\nbool operator==(const address& a1, const address& a2) ASIO_NOEXCEPT\n{\n  if (a1.type_ != a2.type_)\n    return false;\n  if (a1.type_ == address::ipv6)\n    return a1.ipv6_address_ == a2.ipv6_address_;\n  return a1.ipv4_address_ == a2.ipv4_address_;\n}\n\nbool operator<(const address& a1, const address& a2) ASIO_NOEXCEPT\n{\n  if (a1.type_ < a2.type_)\n    return true;\n  if (a1.type_ > a2.type_)\n    return false;\n  if (a1.type_ == address::ipv6)\n    return a1.ipv6_address_ < a2.ipv6_address_;\n  return a1.ipv4_address_ < a2.ipv4_address_;\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_ADDRESS_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address_v4.hpp",
    "content": "//\n// ip/impl/address_v4.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_V4_HPP\n#define ASIO_IP_IMPL_ADDRESS_V4_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ninline address_v4 address_v4::from_string(const char* str)\n{\n  return asio::ip::make_address_v4(str);\n}\n\ninline address_v4 address_v4::from_string(\n    const char* str, asio::error_code& ec)\n{\n  return asio::ip::make_address_v4(str, ec);\n}\n\ninline address_v4 address_v4::from_string(const std::string& str)\n{\n  return asio::ip::make_address_v4(str);\n}\n\ninline address_v4 address_v4::from_string(\n    const std::string& str, asio::error_code& ec)\n{\n  return asio::ip::make_address_v4(str, ec);\n}\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address_v4& addr)\n{\n  return os << addr.to_string().c_str();\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_ADDRESS_V4_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address_v4.ipp",
    "content": "//\n// ip/impl/address_v4.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_V4_IPP\n#define ASIO_IP_IMPL_ADDRESS_V4_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <climits>\n#include <limits>\n#include <stdexcept>\n#include \"asio/error.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\naddress_v4::address_v4(const address_v4::bytes_type& bytes)\n{\n#if UCHAR_MAX > 0xFF\n  if (bytes[0] > 0xFF || bytes[1] > 0xFF\n      || bytes[2] > 0xFF || bytes[3] > 0xFF)\n  {\n    std::out_of_range ex(\"address_v4 from bytes_type\");\n    asio::detail::throw_exception(ex);\n  }\n#endif // UCHAR_MAX > 0xFF\n\n  using namespace std; // For memcpy.\n  memcpy(&addr_.s_addr, bytes.data(), 4);\n}\n\naddress_v4::address_v4(address_v4::uint_type addr)\n{\n  if ((std::numeric_limits<uint_type>::max)() > 0xFFFFFFFF)\n  {\n    std::out_of_range ex(\"address_v4 from unsigned integer\");\n    asio::detail::throw_exception(ex);\n  }\n\n  addr_.s_addr = asio::detail::socket_ops::host_to_network_long(\n      static_cast<asio::detail::u_long_type>(addr));\n}\n\naddress_v4::bytes_type address_v4::to_bytes() const ASIO_NOEXCEPT\n{\n  using namespace std; // For memcpy.\n  bytes_type bytes;\n#if defined(ASIO_HAS_STD_ARRAY)\n  memcpy(bytes.data(), &addr_.s_addr, 4);\n#else // defined(ASIO_HAS_STD_ARRAY)\n  memcpy(bytes.elems, &addr_.s_addr, 4);\n#endif // defined(ASIO_HAS_STD_ARRAY)\n  return bytes;\n}\n\naddress_v4::uint_type address_v4::to_uint() const ASIO_NOEXCEPT\n{\n  return asio::detail::socket_ops::network_to_host_long(addr_.s_addr);\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nunsigned long address_v4::to_ulong() const\n{\n  return asio::detail::socket_ops::network_to_host_long(addr_.s_addr);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nstd::string address_v4::to_string() const\n{\n  asio::error_code ec;\n  char addr_str[asio::detail::max_addr_v4_str_len];\n  const char* addr =\n    asio::detail::socket_ops::inet_ntop(\n        ASIO_OS_DEF(AF_INET), &addr_, addr_str,\n        asio::detail::max_addr_v4_str_len, 0, ec);\n  if (addr == 0)\n    asio::detail::throw_error(ec);\n  return addr;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nstd::string address_v4::to_string(asio::error_code& ec) const\n{\n  char addr_str[asio::detail::max_addr_v4_str_len];\n  const char* addr =\n    asio::detail::socket_ops::inet_ntop(\n        ASIO_OS_DEF(AF_INET), &addr_, addr_str,\n        asio::detail::max_addr_v4_str_len, 0, ec);\n  if (addr == 0)\n    return std::string();\n  return addr;\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nbool address_v4::is_loopback() const ASIO_NOEXCEPT\n{\n  return (to_uint() & 0xFF000000) == 0x7F000000;\n}\n\nbool address_v4::is_unspecified() const ASIO_NOEXCEPT\n{\n  return to_uint() == 0;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nbool address_v4::is_class_a() const\n{\n  return (to_uint() & 0x80000000) == 0;\n}\n\nbool address_v4::is_class_b() const\n{\n  return (to_uint() & 0xC0000000) == 0x80000000;\n}\n\nbool address_v4::is_class_c() const\n{\n  return (to_uint() & 0xE0000000) == 0xC0000000;\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nbool address_v4::is_multicast() const ASIO_NOEXCEPT\n{\n  return (to_uint() & 0xF0000000) == 0xE0000000;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\naddress_v4 address_v4::broadcast(const address_v4& addr, const address_v4& mask)\n{\n  return address_v4(addr.to_uint() | (mask.to_uint() ^ 0xFFFFFFFF));\n}\n\naddress_v4 address_v4::netmask(const address_v4& addr)\n{\n  if (addr.is_class_a())\n    return address_v4(0xFF000000);\n  if (addr.is_class_b())\n    return address_v4(0xFFFF0000);\n  if (addr.is_class_c())\n    return address_v4(0xFFFFFF00);\n  return address_v4(0xFFFFFFFF);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\naddress_v4 make_address_v4(const char* str)\n{\n  asio::error_code ec;\n  address_v4 addr = make_address_v4(str, ec);\n  asio::detail::throw_error(ec);\n  return addr;\n}\n\naddress_v4 make_address_v4(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  address_v4::bytes_type bytes;\n  if (asio::detail::socket_ops::inet_pton(\n        ASIO_OS_DEF(AF_INET), str, &bytes, 0, ec) <= 0)\n    return address_v4();\n  return address_v4(bytes);\n}\n\naddress_v4 make_address_v4(const std::string& str)\n{\n  return make_address_v4(str.c_str());\n}\n\naddress_v4 make_address_v4(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address_v4(str.c_str(), ec);\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\naddress_v4 make_address_v4(string_view str)\n{\n  return make_address_v4(static_cast<std::string>(str));\n}\n\naddress_v4 make_address_v4(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address_v4(static_cast<std::string>(str), ec);\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_ADDRESS_V4_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address_v6.hpp",
    "content": "//\n// ip/impl/address_v6.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_V6_HPP\n#define ASIO_IP_IMPL_ADDRESS_V6_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n#if !defined(ASIO_NO_DEPRECATED)\n\ninline address_v6 address_v6::from_string(const char* str)\n{\n  return asio::ip::make_address_v6(str);\n}\n\ninline address_v6 address_v6::from_string(\n    const char* str, asio::error_code& ec)\n{\n  return asio::ip::make_address_v6(str, ec);\n}\n\ninline address_v6 address_v6::from_string(const std::string& str)\n{\n  return asio::ip::make_address_v6(str);\n}\n\ninline address_v6 address_v6::from_string(\n    const std::string& str, asio::error_code& ec)\n{\n  return asio::ip::make_address_v6(str, ec);\n}\n\n#endif // !defined(ASIO_NO_DEPRECATED)\n\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const address_v6& addr)\n{\n  return os << addr.to_string().c_str();\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_ADDRESS_V6_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/address_v6.ipp",
    "content": "//\n// ip/impl/address_v6.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_ADDRESS_V6_IPP\n#define ASIO_IP_IMPL_ADDRESS_V6_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstring>\n#include <stdexcept>\n#include <typeinfo>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ip/address_v6.hpp\"\n#include \"asio/ip/bad_address_cast.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\naddress_v6::address_v6() ASIO_NOEXCEPT\n  : addr_(),\n    scope_id_(0)\n{\n}\n\naddress_v6::address_v6(const address_v6::bytes_type& bytes,\n    unsigned long scope)\n  : scope_id_(scope)\n{\n#if UCHAR_MAX > 0xFF\n  for (std::size_t i = 0; i < bytes.size(); ++i)\n  {\n    if (bytes[i] > 0xFF)\n    {\n      std::out_of_range ex(\"address_v6 from bytes_type\");\n      asio::detail::throw_exception(ex);\n    }\n  }\n#endif // UCHAR_MAX > 0xFF\n\n  using namespace std; // For memcpy.\n  memcpy(addr_.s6_addr, bytes.data(), 16);\n}\n\naddress_v6::address_v6(const address_v6& other) ASIO_NOEXCEPT\n  : addr_(other.addr_),\n    scope_id_(other.scope_id_)\n{\n}\n\n#if defined(ASIO_HAS_MOVE)\naddress_v6::address_v6(address_v6&& other) ASIO_NOEXCEPT\n  : addr_(other.addr_),\n    scope_id_(other.scope_id_)\n{\n}\n#endif // defined(ASIO_HAS_MOVE)\n\naddress_v6& address_v6::operator=(const address_v6& other) ASIO_NOEXCEPT\n{\n  addr_ = other.addr_;\n  scope_id_ = other.scope_id_;\n  return *this;\n}\n\n#if defined(ASIO_HAS_MOVE)\naddress_v6& address_v6::operator=(address_v6&& other) ASIO_NOEXCEPT\n{\n  addr_ = other.addr_;\n  scope_id_ = other.scope_id_;\n  return *this;\n}\n#endif // defined(ASIO_HAS_MOVE)\n\naddress_v6::bytes_type address_v6::to_bytes() const ASIO_NOEXCEPT\n{\n  using namespace std; // For memcpy.\n  bytes_type bytes;\n#if defined(ASIO_HAS_STD_ARRAY)\n  memcpy(bytes.data(), addr_.s6_addr, 16);\n#else // defined(ASIO_HAS_STD_ARRAY)\n  memcpy(bytes.elems, addr_.s6_addr, 16);\n#endif // defined(ASIO_HAS_STD_ARRAY)\n  return bytes;\n}\n\nstd::string address_v6::to_string() const\n{\n  asio::error_code ec;\n  char addr_str[asio::detail::max_addr_v6_str_len];\n  const char* addr =\n    asio::detail::socket_ops::inet_ntop(\n        ASIO_OS_DEF(AF_INET6), &addr_, addr_str,\n        asio::detail::max_addr_v6_str_len, scope_id_, ec);\n  if (addr == 0)\n    asio::detail::throw_error(ec);\n  return addr;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nstd::string address_v6::to_string(asio::error_code& ec) const\n{\n  char addr_str[asio::detail::max_addr_v6_str_len];\n  const char* addr =\n    asio::detail::socket_ops::inet_ntop(\n        ASIO_OS_DEF(AF_INET6), &addr_, addr_str,\n        asio::detail::max_addr_v6_str_len, scope_id_, ec);\n  if (addr == 0)\n    return std::string();\n  return addr;\n}\n\naddress_v4 address_v6::to_v4() const\n{\n  if (!is_v4_mapped() && !is_v4_compatible())\n  {\n    bad_address_cast ex;\n    asio::detail::throw_exception(ex);\n  }\n\n  address_v4::bytes_type v4_bytes = { { addr_.s6_addr[12],\n    addr_.s6_addr[13], addr_.s6_addr[14], addr_.s6_addr[15] } };\n  return address_v4(v4_bytes);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nbool address_v6::is_loopback() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0)\n      && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0)\n      && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0)\n      && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0)\n      && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0)\n      && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0)\n      && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0)\n      && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 1));\n}\n\nbool address_v6::is_unspecified() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0)\n      && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0)\n      && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0)\n      && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0)\n      && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0)\n      && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0)\n      && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0)\n      && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 0));\n}\n\nbool address_v6::is_link_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0x80));\n}\n\nbool address_v6::is_site_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0xc0));\n}\n\nbool address_v6::is_v4_mapped() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0)\n      && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0)\n      && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0)\n      && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0)\n      && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0)\n      && (addr_.s6_addr[10] == 0xff) && (addr_.s6_addr[11] == 0xff));\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\nbool address_v6::is_v4_compatible() const\n{\n  return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0)\n      && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0)\n      && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0)\n      && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0)\n      && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0)\n      && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0)\n      && !((addr_.s6_addr[12] == 0)\n        && (addr_.s6_addr[13] == 0)\n        && (addr_.s6_addr[14] == 0)\n        && ((addr_.s6_addr[15] == 0) || (addr_.s6_addr[15] == 1))));\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nbool address_v6::is_multicast() const ASIO_NOEXCEPT\n{\n  return (addr_.s6_addr[0] == 0xff);\n}\n\nbool address_v6::is_multicast_global() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x0e));\n}\n\nbool address_v6::is_multicast_link_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x02));\n}\n\nbool address_v6::is_multicast_node_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x01));\n}\n\nbool address_v6::is_multicast_org_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x08));\n}\n\nbool address_v6::is_multicast_site_local() const ASIO_NOEXCEPT\n{\n  return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x05));\n}\n\nbool operator==(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT\n{\n  using namespace std; // For memcmp.\n  return memcmp(&a1.addr_, &a2.addr_,\n      sizeof(asio::detail::in6_addr_type)) == 0\n    && a1.scope_id_ == a2.scope_id_;\n}\n\nbool operator<(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT\n{\n  using namespace std; // For memcmp.\n  int memcmp_result = memcmp(&a1.addr_, &a2.addr_,\n      sizeof(asio::detail::in6_addr_type));\n  if (memcmp_result < 0)\n    return true;\n  if (memcmp_result > 0)\n    return false;\n  return a1.scope_id_ < a2.scope_id_;\n}\n\naddress_v6 address_v6::loopback() ASIO_NOEXCEPT\n{\n  address_v6 tmp;\n  tmp.addr_.s6_addr[15] = 1;\n  return tmp;\n}\n\n#if !defined(ASIO_NO_DEPRECATED)\naddress_v6 address_v6::v4_mapped(const address_v4& addr)\n{\n  address_v4::bytes_type v4_bytes = addr.to_bytes();\n  bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF,\n    v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } };\n  return address_v6(v6_bytes);\n}\n\naddress_v6 address_v6::v4_compatible(const address_v4& addr)\n{\n  address_v4::bytes_type v4_bytes = addr.to_bytes();\n  bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n    v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } };\n  return address_v6(v6_bytes);\n}\n#endif // !defined(ASIO_NO_DEPRECATED)\n\naddress_v6 make_address_v6(const char* str)\n{\n  asio::error_code ec;\n  address_v6 addr = make_address_v6(str, ec);\n  asio::detail::throw_error(ec);\n  return addr;\n}\n\naddress_v6 make_address_v6(const char* str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  address_v6::bytes_type bytes;\n  unsigned long scope_id = 0;\n  if (asio::detail::socket_ops::inet_pton(\n        ASIO_OS_DEF(AF_INET6), str, &bytes[0], &scope_id, ec) <= 0)\n    return address_v6();\n  return address_v6(bytes, scope_id);\n}\n\naddress_v6 make_address_v6(const std::string& str)\n{\n  return make_address_v6(str.c_str());\n}\n\naddress_v6 make_address_v6(const std::string& str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address_v6(str.c_str(), ec);\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\naddress_v6 make_address_v6(string_view str)\n{\n  return make_address_v6(static_cast<std::string>(str));\n}\n\naddress_v6 make_address_v6(string_view str,\n    asio::error_code& ec) ASIO_NOEXCEPT\n{\n  return make_address_v6(static_cast<std::string>(str), ec);\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\naddress_v4 make_address_v4(\n    v4_mapped_t, const address_v6& v6_addr)\n{\n  if (!v6_addr.is_v4_mapped())\n  {\n    bad_address_cast ex;\n    asio::detail::throw_exception(ex);\n  }\n\n  address_v6::bytes_type v6_bytes = v6_addr.to_bytes();\n  address_v4::bytes_type v4_bytes = { { v6_bytes[12],\n    v6_bytes[13], v6_bytes[14], v6_bytes[15] } };\n  return address_v4(v4_bytes);\n}\n\naddress_v6 make_address_v6(\n    v4_mapped_t, const address_v4& v4_addr)\n{\n  address_v4::bytes_type v4_bytes = v4_addr.to_bytes();\n  address_v6::bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n    0xFF, 0xFF, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } };\n  return address_v6(v6_bytes);\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_ADDRESS_V6_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/basic_endpoint.hpp",
    "content": "//\n// ip/impl/basic_endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_BASIC_ENDPOINT_HPP\n#define ASIO_IP_IMPL_BASIC_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename Elem, typename Traits, typename InternetProtocol>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os,\n    const basic_endpoint<InternetProtocol>& endpoint)\n{\n  asio::ip::detail::endpoint tmp_ep(endpoint.address(), endpoint.port());\n  return os << tmp_ep.to_string().c_str();\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_BASIC_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/host_name.ipp",
    "content": "//\n// ip/impl/host_name.ipp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_HOST_NAME_IPP\n#define ASIO_IP_IMPL_HOST_NAME_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/winsock_init.hpp\"\n#include \"asio/ip/host_name.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\nstd::string host_name()\n{\n  char name[1024];\n  asio::error_code ec;\n  if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0)\n  {\n    asio::detail::throw_error(ec);\n    return std::string();\n  }\n  return std::string(name);\n}\n\nstd::string host_name(asio::error_code& ec)\n{\n  char name[1024];\n  if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0)\n    return std::string();\n  return std::string(name);\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_HOST_NAME_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/network_v4.hpp",
    "content": "//\n// ip/impl/network_v4.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_NETWORK_V4_HPP\n#define ASIO_IP_IMPL_NETWORK_V4_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const network_v4& addr)\n{\n  asio::error_code ec;\n  std::string s = addr.to_string(ec);\n  if (ec)\n  {\n    if (os.exceptions() & std::basic_ostream<Elem, Traits>::failbit)\n      asio::detail::throw_error(ec);\n    else\n      os.setstate(std::basic_ostream<Elem, Traits>::failbit);\n  }\n  else\n    for (std::string::iterator i = s.begin(); i != s.end(); ++i)\n      os << os.widen(*i);\n  return os;\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_NETWORK_V4_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/network_v4.ipp",
    "content": "//\n// ip/impl/network_v4.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_NETWORK_V4_IPP\n#define ASIO_IP_IMPL_NETWORK_V4_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <climits>\n#include <cstdio>\n#include <cstdlib>\n#include <stdexcept>\n#include \"asio/error.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/ip/network_v4.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\nnetwork_v4::network_v4(const address_v4& addr, unsigned short prefix_len)\n  : address_(addr),\n    prefix_length_(prefix_len)\n{\n  if (prefix_len > 32)\n  {\n    std::out_of_range ex(\"prefix length too large\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nnetwork_v4::network_v4(const address_v4& addr, const address_v4& mask)\n  : address_(addr),\n    prefix_length_(0)\n{\n  address_v4::bytes_type mask_bytes = mask.to_bytes();\n  bool finished = false;\n  for (std::size_t i = 0; i < mask_bytes.size(); ++i)\n  {\n    if (finished)\n    {\n      if (mask_bytes[i])\n      {\n        std::invalid_argument ex(\"non-contiguous netmask\");\n        asio::detail::throw_exception(ex);\n      }\n      continue;\n    }\n    else\n    {\n      switch (mask_bytes[i])\n      {\n      case 255:\n        prefix_length_ += 8;\n        break;\n      case 254: // prefix_length_ += 7\n        prefix_length_ += 1;\n      case 252: // prefix_length_ += 6\n        prefix_length_ += 1;\n      case 248: // prefix_length_ += 5\n        prefix_length_ += 1;\n      case 240: // prefix_length_ += 4\n        prefix_length_ += 1;\n      case 224: // prefix_length_ += 3\n        prefix_length_ += 1;\n      case 192: // prefix_length_ += 2\n        prefix_length_ += 1;\n      case 128: // prefix_length_ += 1\n        prefix_length_ += 1;\n      case 0:   // nbits += 0\n        finished = true;\n        break;\n      default:\n        std::out_of_range ex(\"non-contiguous netmask\");\n        asio::detail::throw_exception(ex);\n      }\n    }\n  }\n}\n\naddress_v4 network_v4::netmask() const ASIO_NOEXCEPT\n{\n  uint32_t nmbits = 0xffffffff;\n  if (prefix_length_ == 0)\n    nmbits = 0;\n  else\n    nmbits = nmbits << (32 - prefix_length_);\n  return address_v4(nmbits);\n}\n\naddress_v4_range network_v4::hosts() const ASIO_NOEXCEPT\n{\n  return is_host()\n    ? address_v4_range(address_, address_v4(address_.to_uint() + 1))\n    : address_v4_range(address_v4(network().to_uint() + 1), broadcast());\n}\n\nbool network_v4::is_subnet_of(const network_v4& other) const\n{\n  if (other.prefix_length_ >= prefix_length_)\n    return false; // Only real subsets are allowed.\n  const network_v4 me(address_, other.prefix_length_);\n  return other.canonical() == me.canonical();\n}\n\nstd::string network_v4::to_string() const\n{\n  asio::error_code ec;\n  std::string addr = to_string(ec);\n  asio::detail::throw_error(ec);\n  return addr;\n}\n\nstd::string network_v4::to_string(asio::error_code& ec) const\n{\n  using namespace std; // For sprintf.\n  ec = asio::error_code();\n  char prefix_len[16];\n#if defined(ASIO_HAS_SECURE_RTL)\n  sprintf_s(prefix_len, sizeof(prefix_len), \"/%u\", prefix_length_);\n#else // defined(ASIO_HAS_SECURE_RTL)\n  sprintf(prefix_len, \"/%u\", prefix_length_);\n#endif // defined(ASIO_HAS_SECURE_RTL)\n  return address_.to_string() + prefix_len;\n}\n\nnetwork_v4 make_network_v4(const char* str)\n{\n  return make_network_v4(std::string(str));\n}\n\nnetwork_v4 make_network_v4(const char* str, asio::error_code& ec)\n{\n  return make_network_v4(std::string(str), ec);\n}\n\nnetwork_v4 make_network_v4(const std::string& str)\n{\n  asio::error_code ec;\n  network_v4 net = make_network_v4(str, ec);\n  asio::detail::throw_error(ec);\n  return net;\n}\n\nnetwork_v4 make_network_v4(const std::string& str,\n    asio::error_code& ec)\n{\n  std::string::size_type pos = str.find_first_of(\"/\");\n\n  if (pos == std::string::npos)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v4();\n  }\n\n  if (pos == str.size() - 1)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v4();\n  }\n\n  std::string::size_type end = str.find_first_not_of(\"0123456789\", pos + 1);\n  if (end != std::string::npos)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v4();\n  }\n\n  const address_v4 addr = make_address_v4(str.substr(0, pos), ec);\n  if (ec)\n    return network_v4();\n\n  const int prefix_len = std::atoi(str.substr(pos + 1).c_str());\n  if (prefix_len < 0 || prefix_len > 32)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v4();\n  }\n\n  return network_v4(addr, static_cast<unsigned short>(prefix_len));\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\nnetwork_v4 make_network_v4(string_view str)\n{\n  return make_network_v4(static_cast<std::string>(str));\n}\n\nnetwork_v4 make_network_v4(string_view str,\n    asio::error_code& ec)\n{\n  return make_network_v4(static_cast<std::string>(str), ec);\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_NETWORK_V4_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/network_v6.hpp",
    "content": "//\n// ip/impl/network_v6.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_NETWORK_V6_HPP\n#define ASIO_IP_IMPL_NETWORK_V6_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const network_v6& addr)\n{\n  asio::error_code ec;\n  std::string s = addr.to_string(ec);\n  if (ec)\n  {\n    if (os.exceptions() & std::basic_ostream<Elem, Traits>::failbit)\n      asio::detail::throw_error(ec);\n    else\n      os.setstate(std::basic_ostream<Elem, Traits>::failbit);\n  }\n  else\n    for (std::string::iterator i = s.begin(); i != s.end(); ++i)\n      os << os.widen(*i);\n  return os;\n}\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_IP_IMPL_NETWORK_V6_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/impl/network_v6.ipp",
    "content": "//\n// ip/impl/network_v6.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_IMPL_NETWORK_V6_IPP\n#define ASIO_IP_IMPL_NETWORK_V6_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <climits>\n#include <cstdio>\n#include <cstdlib>\n#include <stdexcept>\n#include \"asio/error.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/throw_exception.hpp\"\n#include \"asio/ip/network_v6.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\nnetwork_v6::network_v6(const address_v6& addr, unsigned short prefix_len)\n  : address_(addr),\n    prefix_length_(prefix_len)\n{\n  if (prefix_len > 128)\n  {\n    std::out_of_range ex(\"prefix length too large\");\n    asio::detail::throw_exception(ex);\n  }\n}\n\nASIO_DECL address_v6 network_v6::network() const ASIO_NOEXCEPT\n{\n  address_v6::bytes_type bytes(address_.to_bytes());\n  for (std::size_t i = 0; i < 16; ++i)\n  {\n    if (prefix_length_ <= i * 8)\n      bytes[i] = 0;\n    else if (prefix_length_ < (i + 1) * 8)\n      bytes[i] &= 0xFF00 >> (prefix_length_ % 8);\n  }\n  return address_v6(bytes, address_.scope_id());\n}\n\naddress_v6_range network_v6::hosts() const ASIO_NOEXCEPT\n{\n  address_v6::bytes_type begin_bytes(address_.to_bytes());\n  address_v6::bytes_type end_bytes(address_.to_bytes());\n  for (std::size_t i = 0; i < 16; ++i)\n  {\n    if (prefix_length_ <= i * 8)\n    {\n      begin_bytes[i] = 0;\n      end_bytes[i] = 0xFF;\n    }\n    else if (prefix_length_ < (i + 1) * 8)\n    {\n      begin_bytes[i] &= 0xFF00 >> (prefix_length_ % 8);\n      end_bytes[i] |= 0xFF >> (prefix_length_ % 8);\n    }\n  }\n  return address_v6_range(\n      address_v6_iterator(address_v6(begin_bytes, address_.scope_id())),\n      ++address_v6_iterator(address_v6(end_bytes, address_.scope_id())));\n}\n\nbool network_v6::is_subnet_of(const network_v6& other) const\n{\n  if (other.prefix_length_ >= prefix_length_)\n    return false; // Only real subsets are allowed.\n  const network_v6 me(address_, other.prefix_length_);\n  return other.canonical() == me.canonical();\n}\n\nstd::string network_v6::to_string() const\n{\n  asio::error_code ec;\n  std::string addr = to_string(ec);\n  asio::detail::throw_error(ec);\n  return addr;\n}\n\nstd::string network_v6::to_string(asio::error_code& ec) const\n{\n  using namespace std; // For sprintf.\n  ec = asio::error_code();\n  char prefix_len[16];\n#if defined(ASIO_HAS_SECURE_RTL)\n  sprintf_s(prefix_len, sizeof(prefix_len), \"/%u\", prefix_length_);\n#else // defined(ASIO_HAS_SECURE_RTL)\n  sprintf(prefix_len, \"/%u\", prefix_length_);\n#endif // defined(ASIO_HAS_SECURE_RTL)\n  return address_.to_string() + prefix_len;\n}\n\nnetwork_v6 make_network_v6(const char* str)\n{\n  return make_network_v6(std::string(str));\n}\n\nnetwork_v6 make_network_v6(const char* str, asio::error_code& ec)\n{\n  return make_network_v6(std::string(str), ec);\n}\n\nnetwork_v6 make_network_v6(const std::string& str)\n{\n  asio::error_code ec;\n  network_v6 net = make_network_v6(str, ec);\n  asio::detail::throw_error(ec);\n  return net;\n}\n\nnetwork_v6 make_network_v6(const std::string& str,\n    asio::error_code& ec)\n{\n  std::string::size_type pos = str.find_first_of(\"/\");\n\n  if (pos == std::string::npos)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v6();\n  }\n\n  if (pos == str.size() - 1)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v6();\n  }\n\n  std::string::size_type end = str.find_first_not_of(\"0123456789\", pos + 1);\n  if (end != std::string::npos)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v6();\n  }\n\n  const address_v6 addr = make_address_v6(str.substr(0, pos), ec);\n  if (ec)\n    return network_v6();\n\n  const int prefix_len = std::atoi(str.substr(pos + 1).c_str());\n  if (prefix_len < 0 || prefix_len > 128)\n  {\n    ec = asio::error::invalid_argument;\n    return network_v6();\n  }\n\n  return network_v6(addr, static_cast<unsigned short>(prefix_len));\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\n\nnetwork_v6 make_network_v6(string_view str)\n{\n  return make_network_v6(static_cast<std::string>(str));\n}\n\nnetwork_v6 make_network_v6(string_view str,\n    asio::error_code& ec)\n{\n  return make_network_v6(static_cast<std::string>(str), ec);\n}\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_IMPL_NETWORK_V6_IPP\n"
  },
  {
    "path": "src/third_party/asio/ip/multicast.hpp",
    "content": "//\n// ip/multicast.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_MULTICAST_HPP\n#define ASIO_IP_MULTICAST_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/ip/detail/socket_option.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\nnamespace multicast {\n\n/// Socket option to join a multicast group on a specified interface.\n/**\n * Implements the IPPROTO_IP/IP_ADD_MEMBERSHIP socket option.\n *\n * @par Examples\n * Setting the option to join a multicast group:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::address multicast_address =\n *   asio::ip::address::from_string(\"225.0.0.1\");\n * asio::ip::multicast::join_group option(multicast_address);\n * socket.set_option(option);\n * @endcode\n *\n * @par Concepts:\n * SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined join_group;\n#else\ntypedef asio::ip::detail::socket_option::multicast_request<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_ADD_MEMBERSHIP),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_JOIN_GROUP)> join_group;\n#endif\n\n/// Socket option to leave a multicast group on a specified interface.\n/**\n * Implements the IPPROTO_IP/IP_DROP_MEMBERSHIP socket option.\n *\n * @par Examples\n * Setting the option to leave a multicast group:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::address multicast_address =\n *   asio::ip::address::from_string(\"225.0.0.1\");\n * asio::ip::multicast::leave_group option(multicast_address);\n * socket.set_option(option);\n * @endcode\n *\n * @par Concepts:\n * SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined leave_group;\n#else\ntypedef asio::ip::detail::socket_option::multicast_request<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_DROP_MEMBERSHIP),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_LEAVE_GROUP)> leave_group;\n#endif\n\n/// Socket option for local interface to use for outgoing multicast packets.\n/**\n * Implements the IPPROTO_IP/IP_MULTICAST_IF socket option.\n *\n * @par Examples\n * Setting the option:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::address_v4 local_interface =\n *   asio::ip::address_v4::from_string(\"1.2.3.4\");\n * asio::ip::multicast::outbound_interface option(local_interface);\n * socket.set_option(option);\n * @endcode\n *\n * @par Concepts:\n * SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined outbound_interface;\n#else\ntypedef asio::ip::detail::socket_option::network_interface<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_MULTICAST_IF),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_MULTICAST_IF)> outbound_interface;\n#endif\n\n/// Socket option for time-to-live associated with outgoing multicast packets.\n/**\n * Implements the IPPROTO_IP/IP_MULTICAST_TTL socket option.\n *\n * @par Examples\n * Setting the option:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::multicast::hops option(4);\n * socket.set_option(option);\n * @endcode\n *\n * @par\n * Getting the current option value:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::multicast::hops option;\n * socket.get_option(option);\n * int ttl = option.value();\n * @endcode\n *\n * @par Concepts:\n * GettableSocketOption, SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined hops;\n#else\ntypedef asio::ip::detail::socket_option::multicast_hops<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_MULTICAST_TTL),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_MULTICAST_HOPS)> hops;\n#endif\n\n/// Socket option determining whether outgoing multicast packets will be\n/// received on the same socket if it is a member of the multicast group.\n/**\n * Implements the IPPROTO_IP/IP_MULTICAST_LOOP socket option.\n *\n * @par Examples\n * Setting the option:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::multicast::enable_loopback option(true);\n * socket.set_option(option);\n * @endcode\n *\n * @par\n * Getting the current option value:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::multicast::enable_loopback option;\n * socket.get_option(option);\n * bool is_set = option.value();\n * @endcode\n *\n * @par Concepts:\n * GettableSocketOption, SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined enable_loopback;\n#else\ntypedef asio::ip::detail::socket_option::multicast_enable_loopback<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_MULTICAST_LOOP),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_MULTICAST_LOOP)> enable_loopback;\n#endif\n\n} // namespace multicast\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_MULTICAST_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/network_v4.hpp",
    "content": "//\n// ip/network_v4.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_NETWORK_V4_HPP\n#define ASIO_IP_NETWORK_V4_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ip/address_v4_range.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Represents an IPv4 network.\n/**\n * The asio::ip::network_v4 class provides the ability to use and\n * manipulate IP version 4 networks.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass network_v4\n{\npublic:\n  /// Default constructor.\n  network_v4() ASIO_NOEXCEPT\n    : address_(),\n      prefix_length_(0)\n  {\n  }\n\n  /// Construct a network based on the specified address and prefix length.\n  ASIO_DECL network_v4(const address_v4& addr,\n      unsigned short prefix_len);\n\n  /// Construct network based on the specified address and netmask.\n  ASIO_DECL network_v4(const address_v4& addr,\n      const address_v4& mask);\n\n  /// Copy constructor.\n  network_v4(const network_v4& other) ASIO_NOEXCEPT\n    : address_(other.address_),\n      prefix_length_(other.prefix_length_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  network_v4(network_v4&& other) ASIO_NOEXCEPT\n    : address_(ASIO_MOVE_CAST(address_v4)(other.address_)),\n      prefix_length_(other.prefix_length_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another network.\n  network_v4& operator=(const network_v4& other) ASIO_NOEXCEPT\n  {\n    address_ = other.address_;\n    prefix_length_ = other.prefix_length_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another network.\n  network_v4& operator=(network_v4&& other) ASIO_NOEXCEPT\n  {\n    address_ = ASIO_MOVE_CAST(address_v4)(other.address_);\n    prefix_length_ = other.prefix_length_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Obtain the address object specified when the network object was created.\n  address_v4 address() const ASIO_NOEXCEPT\n  {\n    return address_;\n  }\n\n  /// Obtain the prefix length that was specified when the network object was\n  /// created.\n  unsigned short prefix_length() const ASIO_NOEXCEPT\n  {\n    return prefix_length_;\n  }\n\n  /// Obtain the netmask that was specified when the network object was created.\n  ASIO_DECL address_v4 netmask() const ASIO_NOEXCEPT;\n\n  /// Obtain an address object that represents the network address.\n  address_v4 network() const ASIO_NOEXCEPT\n  {\n    return address_v4(address_.to_uint() & netmask().to_uint());\n  }\n\n  /// Obtain an address object that represents the network's broadcast address.\n  address_v4 broadcast() const ASIO_NOEXCEPT\n  {\n    return address_v4(network().to_uint() | (netmask().to_uint() ^ 0xFFFFFFFF));\n  }\n\n  /// Obtain an address range corresponding to the hosts in the network.\n  ASIO_DECL address_v4_range hosts() const ASIO_NOEXCEPT;\n\n  /// Obtain the true network address, omitting any host bits.\n  network_v4 canonical() const ASIO_NOEXCEPT\n  {\n    return network_v4(network(), netmask());\n  }\n\n  /// Test if network is a valid host address.\n  bool is_host() const ASIO_NOEXCEPT\n  {\n    return prefix_length_ == 32;\n  }\n\n  /// Test if a network is a real subnet of another network.\n  ASIO_DECL bool is_subnet_of(const network_v4& other) const;\n\n  /// Get the network as an address in dotted decimal format.\n  ASIO_DECL std::string to_string() const;\n\n  /// Get the network as an address in dotted decimal format.\n  ASIO_DECL std::string to_string(asio::error_code& ec) const;\n\n  /// Compare two networks for equality.\n  friend bool operator==(const network_v4& a, const network_v4& b)\n  {\n    return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_;\n  }\n\n  /// Compare two networks for inequality.\n  friend bool operator!=(const network_v4& a, const network_v4& b)\n  {\n    return !(a == b);\n  }\n\nprivate:\n  address_v4 address_;\n  unsigned short prefix_length_;\n};\n\n/// Create an IPv4 network from an address and prefix length.\n/**\n * @relates address_v4\n */\ninline network_v4 make_network_v4(\n    const address_v4& addr, unsigned short prefix_len)\n{\n  return network_v4(addr, prefix_len);\n}\n\n/// Create an IPv4 network from an address and netmask.\n/**\n * @relates address_v4\n */\ninline network_v4 make_network_v4(\n    const address_v4& addr, const address_v4& mask)\n{\n  return network_v4(addr, mask);\n}\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(const char* str);\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(\n    const char* str, asio::error_code& ec);\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(const std::string& str);\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(\n    const std::string& str, asio::error_code& ec);\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(string_view str);\n\n/// Create an IPv4 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v4\n */\nASIO_DECL network_v4 make_network_v4(\n    string_view str, asio::error_code& ec);\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output a network as a string.\n/**\n * Used to output a human-readable string for a specified network.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param net The network to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::address_v4\n */\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const network_v4& net);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/network_v4.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/network_v4.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_NETWORK_V4_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/network_v6.hpp",
    "content": "//\n// ip/network_v6.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_NETWORK_V6_HPP\n#define ASIO_IP_NETWORK_V6_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <string>\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ip/address_v6_range.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Represents an IPv6 network.\n/**\n * The asio::ip::network_v6 class provides the ability to use and\n * manipulate IP version 6 networks.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass network_v6\n{\npublic:\n  /// Default constructor.\n  network_v6() ASIO_NOEXCEPT\n    : address_(),\n      prefix_length_(0)\n  {\n  }\n\n  /// Construct a network based on the specified address and prefix length.\n  ASIO_DECL network_v6(const address_v6& addr,\n      unsigned short prefix_len);\n\n  /// Copy constructor.\n  network_v6(const network_v6& other) ASIO_NOEXCEPT\n    : address_(other.address_),\n      prefix_length_(other.prefix_length_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  network_v6(network_v6&& other) ASIO_NOEXCEPT\n    : address_(ASIO_MOVE_CAST(address_v6)(other.address_)),\n      prefix_length_(other.prefix_length_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another network.\n  network_v6& operator=(const network_v6& other) ASIO_NOEXCEPT\n  {\n    address_ = other.address_;\n    prefix_length_ = other.prefix_length_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another network.\n  network_v6& operator=(network_v6&& other) ASIO_NOEXCEPT\n  {\n    address_ = ASIO_MOVE_CAST(address_v6)(other.address_);\n    prefix_length_ = other.prefix_length_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Obtain the address object specified when the network object was created.\n  address_v6 address() const ASIO_NOEXCEPT\n  {\n    return address_;\n  }\n\n  /// Obtain the prefix length that was specified when the network object was\n  /// created.\n  unsigned short prefix_length() const ASIO_NOEXCEPT\n  {\n    return prefix_length_;\n  }\n\n  /// Obtain an address object that represents the network address.\n  ASIO_DECL address_v6 network() const ASIO_NOEXCEPT;\n\n  /// Obtain an address range corresponding to the hosts in the network.\n  ASIO_DECL address_v6_range hosts() const ASIO_NOEXCEPT;\n\n  /// Obtain the true network address, omitting any host bits.\n  network_v6 canonical() const ASIO_NOEXCEPT\n  {\n    return network_v6(network(), prefix_length());\n  }\n\n  /// Test if network is a valid host address.\n  bool is_host() const ASIO_NOEXCEPT\n  {\n    return prefix_length_ == 128;\n  }\n\n  /// Test if a network is a real subnet of another network.\n  ASIO_DECL bool is_subnet_of(const network_v6& other) const;\n\n  /// Get the network as an address in dotted decimal format.\n  ASIO_DECL std::string to_string() const;\n\n  /// Get the network as an address in dotted decimal format.\n  ASIO_DECL std::string to_string(asio::error_code& ec) const;\n\n  /// Compare two networks for equality.\n  friend bool operator==(const network_v6& a, const network_v6& b)\n  {\n    return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_;\n  }\n\n  /// Compare two networks for inequality.\n  friend bool operator!=(const network_v6& a, const network_v6& b)\n  {\n    return !(a == b);\n  }\n\nprivate:\n  address_v6 address_;\n  unsigned short prefix_length_;\n};\n\n/// Create an IPv6 network from an address and prefix length.\n/**\n * @relates address_v6\n */\ninline network_v6 make_network_v6(\n    const address_v6& addr, unsigned short prefix_len)\n{\n  return network_v6(addr, prefix_len);\n}\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(const char* str);\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(\n    const char* str, asio::error_code& ec);\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(const std::string& str);\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(\n    const std::string& str, asio::error_code& ec);\n\n#if defined(ASIO_HAS_STRING_VIEW) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(string_view str);\n\n/// Create an IPv6 network from a string containing IP address and prefix\n/// length.\n/**\n * @relates network_v6\n */\nASIO_DECL network_v6 make_network_v6(\n    string_view str, asio::error_code& ec);\n\n#endif // defined(ASIO_HAS_STRING_VIEW)\n       //  || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Output a network as a string.\n/**\n * Used to output a human-readable string for a specified network.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param net The network to be written.\n *\n * @return The output stream.\n *\n * @relates asio::ip::address_v6\n */\ntemplate <typename Elem, typename Traits>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os, const network_v6& net);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ip/impl/network_v6.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ip/impl/network_v6.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_IP_NETWORK_V6_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/resolver_base.hpp",
    "content": "//\n// ip/resolver_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_RESOLVER_BASE_HPP\n#define ASIO_IP_RESOLVER_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// The resolver_base class is used as a base for the basic_resolver class\n/// templates to provide a common place to define the flag constants.\nclass resolver_base\n{\npublic:\n#if defined(GENERATING_DOCUMENTATION)\n  /// A bitmask type (C++ Std [lib.bitmask.types]).\n  typedef unspecified flags;\n\n  /// Determine the canonical name of the host specified in the query.\n  static const flags canonical_name = implementation_defined;\n\n  /// Indicate that returned endpoint is intended for use as a locally bound\n  /// socket endpoint.\n  static const flags passive = implementation_defined;\n\n  /// Host name should be treated as a numeric string defining an IPv4 or IPv6\n  /// address and no name resolution should be attempted.\n  static const flags numeric_host = implementation_defined;\n\n  /// Service name should be treated as a numeric string defining a port number\n  /// and no name resolution should be attempted.\n  static const flags numeric_service = implementation_defined;\n\n  /// If the query protocol family is specified as IPv6, return IPv4-mapped\n  /// IPv6 addresses on finding no IPv6 addresses.\n  static const flags v4_mapped = implementation_defined;\n\n  /// If used with v4_mapped, return all matching IPv6 and IPv4 addresses.\n  static const flags all_matching = implementation_defined;\n\n  /// Only return IPv4 addresses if a non-loopback IPv4 address is configured\n  /// for the system. Only return IPv6 addresses if a non-loopback IPv6 address\n  /// is configured for the system.\n  static const flags address_configured = implementation_defined;\n#else\n  enum flags\n  {\n    canonical_name = ASIO_OS_DEF(AI_CANONNAME),\n    passive = ASIO_OS_DEF(AI_PASSIVE),\n    numeric_host = ASIO_OS_DEF(AI_NUMERICHOST),\n    numeric_service = ASIO_OS_DEF(AI_NUMERICSERV),\n    v4_mapped = ASIO_OS_DEF(AI_V4MAPPED),\n    all_matching = ASIO_OS_DEF(AI_ALL),\n    address_configured = ASIO_OS_DEF(AI_ADDRCONFIG)\n  };\n\n  // Implement bitmask operations as shown in C++ Std [lib.bitmask.types].\n\n  friend flags operator&(flags x, flags y)\n  {\n    return static_cast<flags>(\n        static_cast<unsigned int>(x) & static_cast<unsigned int>(y));\n  }\n\n  friend flags operator|(flags x, flags y)\n  {\n    return static_cast<flags>(\n        static_cast<unsigned int>(x) | static_cast<unsigned int>(y));\n  }\n\n  friend flags operator^(flags x, flags y)\n  {\n    return static_cast<flags>(\n        static_cast<unsigned int>(x) ^ static_cast<unsigned int>(y));\n  }\n\n  friend flags operator~(flags x)\n  {\n    return static_cast<flags>(~static_cast<unsigned int>(x));\n  }\n\n  friend flags& operator&=(flags& x, flags y)\n  {\n    x = x & y;\n    return x;\n  }\n\n  friend flags& operator|=(flags& x, flags y)\n  {\n    x = x | y;\n    return x;\n  }\n\n  friend flags& operator^=(flags& x, flags y)\n  {\n    x = x ^ y;\n    return x;\n  }\n#endif\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~resolver_base()\n  {\n  }\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_RESOLVER_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/resolver_query_base.hpp",
    "content": "//\n// ip/resolver_query_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_RESOLVER_QUERY_BASE_HPP\n#define ASIO_IP_RESOLVER_QUERY_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ip/resolver_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// The resolver_query_base class is used as a base for the\n/// basic_resolver_query class templates to provide a common place to define\n/// the flag constants.\nclass resolver_query_base : public resolver_base\n{\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~resolver_query_base()\n  {\n  }\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_RESOLVER_QUERY_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/tcp.hpp",
    "content": "//\n// ip/tcp.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_TCP_HPP\n#define ASIO_IP_TCP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/basic_socket_acceptor.hpp\"\n#include \"asio/basic_socket_iostream.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/detail/socket_option.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/ip/basic_endpoint.hpp\"\n#include \"asio/ip/basic_resolver.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Encapsulates the flags needed for TCP.\n/**\n * The asio::ip::tcp class contains flags necessary for TCP sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol, InternetProtocol.\n */\nclass tcp\n{\npublic:\n  /// The type of a TCP endpoint.\n  typedef basic_endpoint<tcp> endpoint;\n\n  /// Construct to represent the IPv4 TCP protocol.\n  static tcp v4() ASIO_NOEXCEPT\n  {\n    return tcp(ASIO_OS_DEF(AF_INET));\n  }\n\n  /// Construct to represent the IPv6 TCP protocol.\n  static tcp v6() ASIO_NOEXCEPT\n  {\n    return tcp(ASIO_OS_DEF(AF_INET6));\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_STREAM);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(IPPROTO_TCP);\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// The TCP socket type.\n  typedef basic_stream_socket<tcp> socket;\n\n  /// The TCP acceptor type.\n  typedef basic_socket_acceptor<tcp> acceptor;\n\n  /// The TCP resolver type.\n  typedef basic_resolver<tcp> resolver;\n\n#if !defined(ASIO_NO_IOSTREAM)\n  /// The TCP iostream type.\n  typedef basic_socket_iostream<tcp> iostream;\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n  /// Socket option for disabling the Nagle algorithm.\n  /**\n   * Implements the IPPROTO_TCP/TCP_NODELAY socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::no_delay option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::ip::tcp::no_delay option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined no_delay;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(IPPROTO_TCP), ASIO_OS_DEF(TCP_NODELAY)> no_delay;\n#endif\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const tcp& p1, const tcp& p2)\n  {\n    return p1.family_ == p2.family_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const tcp& p1, const tcp& p2)\n  {\n    return p1.family_ != p2.family_;\n  }\n\nprivate:\n  // Construct with a specific family.\n  explicit tcp(int protocol_family) ASIO_NOEXCEPT\n    : family_(protocol_family)\n  {\n  }\n\n  int family_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_TCP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/udp.hpp",
    "content": "//\n// ip/udp.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_UDP_HPP\n#define ASIO_IP_UDP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/basic_datagram_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/ip/basic_endpoint.hpp\"\n#include \"asio/ip/basic_resolver.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Encapsulates the flags needed for UDP.\n/**\n * The asio::ip::udp class contains flags necessary for UDP sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol, InternetProtocol.\n */\nclass udp\n{\npublic:\n  /// The type of a UDP endpoint.\n  typedef basic_endpoint<udp> endpoint;\n\n  /// Construct to represent the IPv4 UDP protocol.\n  static udp v4() ASIO_NOEXCEPT\n  {\n    return udp(ASIO_OS_DEF(AF_INET));\n  }\n\n  /// Construct to represent the IPv6 UDP protocol.\n  static udp v6() ASIO_NOEXCEPT\n  {\n    return udp(ASIO_OS_DEF(AF_INET6));\n  }\n\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(SOCK_DGRAM);\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return ASIO_OS_DEF(IPPROTO_UDP);\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return family_;\n  }\n\n  /// The UDP socket type.\n  typedef basic_datagram_socket<udp> socket;\n\n  /// The UDP resolver type.\n  typedef basic_resolver<udp> resolver;\n\n  /// Compare two protocols for equality.\n  friend bool operator==(const udp& p1, const udp& p2)\n  {\n    return p1.family_ == p2.family_;\n  }\n\n  /// Compare two protocols for inequality.\n  friend bool operator!=(const udp& p1, const udp& p2)\n  {\n    return p1.family_ != p2.family_;\n  }\n\nprivate:\n  // Construct with a specific family.\n  explicit udp(int protocol_family) ASIO_NOEXCEPT\n    : family_(protocol_family)\n  {\n  }\n\n  int family_;\n};\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_UDP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/unicast.hpp",
    "content": "//\n// ip/unicast.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_UNICAST_HPP\n#define ASIO_IP_UNICAST_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/ip/detail/socket_option.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\nnamespace unicast {\n\n/// Socket option for time-to-live associated with outgoing unicast packets.\n/**\n * Implements the IPPROTO_IP/IP_UNICAST_TTL socket option.\n *\n * @par Examples\n * Setting the option:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::unicast::hops option(4);\n * socket.set_option(option);\n * @endcode\n *\n * @par\n * Getting the current option value:\n * @code\n * asio::ip::udp::socket socket(my_context);\n * ...\n * asio::ip::unicast::hops option;\n * socket.get_option(option);\n * int ttl = option.value();\n * @endcode\n *\n * @par Concepts:\n * GettableSocketOption, SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined hops;\n#else\ntypedef asio::ip::detail::socket_option::unicast_hops<\n  ASIO_OS_DEF(IPPROTO_IP),\n  ASIO_OS_DEF(IP_TTL),\n  ASIO_OS_DEF(IPPROTO_IPV6),\n  ASIO_OS_DEF(IPV6_UNICAST_HOPS)> hops;\n#endif\n\n} // namespace unicast\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_UNICAST_HPP\n"
  },
  {
    "path": "src/third_party/asio/ip/v6_only.hpp",
    "content": "//\n// ip/v6_only.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IP_V6_ONLY_HPP\n#define ASIO_IP_V6_ONLY_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/socket_option.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ip {\n\n/// Socket option for determining whether an IPv6 socket supports IPv6\n/// communication only.\n/**\n * Implements the IPPROTO_IPV6/IP_V6ONLY socket option.\n *\n * @par Examples\n * Setting the option:\n * @code\n * asio::ip::tcp::socket socket(my_context);\n * ...\n * asio::ip::v6_only option(true);\n * socket.set_option(option);\n * @endcode\n *\n * @par\n * Getting the current option value:\n * @code\n * asio::ip::tcp::socket socket(my_context);\n * ...\n * asio::ip::v6_only option;\n * socket.get_option(option);\n * bool v6_only = option.value();\n * @endcode\n *\n * @par Concepts:\n * GettableSocketOption, SettableSocketOption.\n */\n#if defined(GENERATING_DOCUMENTATION)\ntypedef implementation_defined v6_only;\n#elif defined(IPV6_V6ONLY)\ntypedef asio::detail::socket_option::boolean<\n    IPPROTO_IPV6, IPV6_V6ONLY> v6_only;\n#else\ntypedef asio::detail::socket_option::boolean<\n    asio::detail::custom_socket_option_level,\n    asio::detail::always_fail_option> v6_only;\n#endif\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IP_V6_ONLY_HPP\n"
  },
  {
    "path": "src/third_party/asio/is_executor.hpp",
    "content": "//\n// is_executor.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IS_EXECUTOR_HPP\n#define ASIO_IS_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// The is_executor trait detects whether a type T meets the Executor type\n/// requirements.\n/**\n * Class template @c is_executor is a UnaryTypeTrait that is derived from @c\n * true_type if the type @c T meets the syntactic requirements for Executor,\n * otherwise @c false_type.\n */\ntemplate <typename T>\nstruct is_executor\n#if defined(GENERATING_DOCUMENTATION)\n  : integral_constant<bool, automatically_determined>\n#else // defined(GENERATING_DOCUMENTATION)\n  : asio::detail::is_executor<T>\n#endif // defined(GENERATING_DOCUMENTATION)\n{\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IS_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/is_read_buffered.hpp",
    "content": "//\n// is_read_buffered.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IS_READ_BUFFERED_HPP\n#define ASIO_IS_READ_BUFFERED_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/buffered_read_stream_fwd.hpp\"\n#include \"asio/buffered_stream_fwd.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail {\n\ntemplate <typename Stream>\nchar is_read_buffered_helper(buffered_stream<Stream>* s);\n\ntemplate <typename Stream>\nchar is_read_buffered_helper(buffered_read_stream<Stream>* s);\n\nstruct is_read_buffered_big_type { char data[10]; };\nis_read_buffered_big_type is_read_buffered_helper(...);\n\n} // namespace detail\n\n/// The is_read_buffered class is a traits class that may be used to determine\n/// whether a stream type supports buffering of read data.\ntemplate <typename Stream>\nclass is_read_buffered\n{\npublic:\n#if defined(GENERATING_DOCUMENTATION)\n  /// The value member is true only if the Stream type supports buffering of\n  /// read data.\n  static const bool value;\n#else\n  ASIO_STATIC_CONSTANT(bool,\n      value = sizeof(detail::is_read_buffered_helper((Stream*)0)) == 1);\n#endif\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IS_READ_BUFFERED_HPP\n"
  },
  {
    "path": "src/third_party/asio/is_write_buffered.hpp",
    "content": "//\n// is_write_buffered.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_IS_WRITE_BUFFERED_HPP\n#define ASIO_IS_WRITE_BUFFERED_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/buffered_stream_fwd.hpp\"\n#include \"asio/buffered_write_stream_fwd.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail {\n\ntemplate <typename Stream>\nchar is_write_buffered_helper(buffered_stream<Stream>* s);\n\ntemplate <typename Stream>\nchar is_write_buffered_helper(buffered_write_stream<Stream>* s);\n\nstruct is_write_buffered_big_type { char data[10]; };\nis_write_buffered_big_type is_write_buffered_helper(...);\n\n} // namespace detail\n\n/// The is_write_buffered class is a traits class that may be used to determine\n/// whether a stream type supports buffering of written data.\ntemplate <typename Stream>\nclass is_write_buffered\n{\npublic:\n#if defined(GENERATING_DOCUMENTATION)\n  /// The value member is true only if the Stream type supports buffering of\n  /// written data.\n  static const bool value;\n#else\n  ASIO_STATIC_CONSTANT(bool,\n      value = sizeof(detail::is_write_buffered_helper((Stream*)0)) == 1);\n#endif\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_IS_WRITE_BUFFERED_HPP\n"
  },
  {
    "path": "src/third_party/asio/local/basic_endpoint.hpp",
    "content": "//\n// local/basic_endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Derived from a public domain implementation written by Daniel Casimiro.\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_BASIC_ENDPOINT_HPP\n#define ASIO_LOCAL_BASIC_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/local/detail/endpoint.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n# include <iosfwd>\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\n\n/// Describes an endpoint for a UNIX socket.\n/**\n * The asio::local::basic_endpoint class template describes an endpoint\n * that may be associated with a particular UNIX socket.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * Endpoint.\n */\ntemplate <typename Protocol>\nclass basic_endpoint\n{\npublic:\n  /// The protocol type associated with the endpoint.\n  typedef Protocol protocol_type;\n\n  /// The type of the endpoint structure. This type is dependent on the\n  /// underlying implementation of the socket layer.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined data_type;\n#else\n  typedef asio::detail::socket_addr_type data_type;\n#endif\n\n  /// Default constructor.\n  basic_endpoint() ASIO_NOEXCEPT\n  {\n  }\n\n  /// Construct an endpoint using the specified path name.\n  basic_endpoint(const char* path_name)\n    : impl_(path_name)\n  {\n  }\n\n  /// Construct an endpoint using the specified path name.\n  basic_endpoint(const std::string& path_name)\n    : impl_(path_name)\n  {\n  }\n\n  #if defined(ASIO_HAS_STRING_VIEW)\n  /// Construct an endpoint using the specified path name.\n  basic_endpoint(string_view path_name)\n    : impl_(path_name)\n  {\n  }\n  #endif // defined(ASIO_HAS_STRING_VIEW)\n\n  /// Copy constructor.\n  basic_endpoint(const basic_endpoint& other)\n    : impl_(other.impl_)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move constructor.\n  basic_endpoint(basic_endpoint&& other)\n    : impl_(other.impl_)\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// Assign from another endpoint.\n  basic_endpoint& operator=(const basic_endpoint& other)\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  /// Move-assign from another endpoint.\n  basic_endpoint& operator=(basic_endpoint&& other)\n  {\n    impl_ = other.impl_;\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  /// The protocol associated with the endpoint.\n  protocol_type protocol() const\n  {\n    return protocol_type();\n  }\n\n  /// Get the underlying endpoint in the native type.\n  data_type* data()\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying endpoint in the native type.\n  const data_type* data() const\n  {\n    return impl_.data();\n  }\n\n  /// Get the underlying size of the endpoint in the native type.\n  std::size_t size() const\n  {\n    return impl_.size();\n  }\n\n  /// Set the underlying size of the endpoint in the native type.\n  void resize(std::size_t new_size)\n  {\n    impl_.resize(new_size);\n  }\n\n  /// Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const\n  {\n    return impl_.capacity();\n  }\n\n  /// Get the path associated with the endpoint.\n  std::string path() const\n  {\n    return impl_.path();\n  }\n\n  /// Set the path associated with the endpoint.\n  void path(const char* p)\n  {\n    impl_.path(p);\n  }\n\n  /// Set the path associated with the endpoint.\n  void path(const std::string& p)\n  {\n    impl_.path(p);\n  }\n\n  /// Compare two endpoints for equality.\n  friend bool operator==(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e1.impl_ == e2.impl_;\n  }\n\n  /// Compare two endpoints for inequality.\n  friend bool operator!=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e1.impl_ == e2.impl_);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e1.impl_ < e2.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return e2.impl_ < e1.impl_;\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator<=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e2 < e1);\n  }\n\n  /// Compare endpoints for ordering.\n  friend bool operator>=(const basic_endpoint<Protocol>& e1,\n      const basic_endpoint<Protocol>& e2)\n  {\n    return !(e1 < e2);\n  }\n\nprivate:\n  // The underlying UNIX domain endpoint.\n  asio::local::detail::endpoint impl_;\n};\n\n/// Output an endpoint as a string.\n/**\n * Used to output a human-readable string for a specified endpoint.\n *\n * @param os The output stream to which the string will be written.\n *\n * @param endpoint The endpoint to be written.\n *\n * @return The output stream.\n *\n * @relates asio::local::basic_endpoint\n */\ntemplate <typename Elem, typename Traits, typename Protocol>\nstd::basic_ostream<Elem, Traits>& operator<<(\n    std::basic_ostream<Elem, Traits>& os,\n    const basic_endpoint<Protocol>& endpoint)\n{\n  os << endpoint.path();\n  return os;\n}\n\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_LOCAL_BASIC_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/local/connect_pair.hpp",
    "content": "//\n// local/connect_pair.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_CONNECT_PAIR_HPP\n#define ASIO_LOCAL_CONNECT_PAIR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_socket.hpp\"\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/local/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\n\n/// Create a pair of connected sockets.\ntemplate <typename Protocol, typename Executor1, typename Executor2>\nvoid connect_pair(basic_socket<Protocol, Executor1>& socket1,\n    basic_socket<Protocol, Executor2>& socket2);\n\n/// Create a pair of connected sockets.\ntemplate <typename Protocol, typename Executor1, typename Executor2>\nASIO_SYNC_OP_VOID connect_pair(basic_socket<Protocol, Executor1>& socket1,\n    basic_socket<Protocol, Executor2>& socket2, asio::error_code& ec);\n\ntemplate <typename Protocol, typename Executor1, typename Executor2>\ninline void connect_pair(basic_socket<Protocol, Executor1>& socket1,\n    basic_socket<Protocol, Executor2>& socket2)\n{\n  asio::error_code ec;\n  connect_pair(socket1, socket2, ec);\n  asio::detail::throw_error(ec, \"connect_pair\");\n}\n\ntemplate <typename Protocol, typename Executor1, typename Executor2>\ninline ASIO_SYNC_OP_VOID connect_pair(\n    basic_socket<Protocol, Executor1>& socket1,\n    basic_socket<Protocol, Executor2>& socket2, asio::error_code& ec)\n{\n  // Check that this function is only being used with a UNIX domain socket.\n  asio::local::basic_endpoint<Protocol>* tmp\n    = static_cast<typename Protocol::endpoint*>(0);\n  (void)tmp;\n\n  Protocol protocol;\n  asio::detail::socket_type sv[2];\n  if (asio::detail::socket_ops::socketpair(protocol.family(),\n        protocol.type(), protocol.protocol(), sv, ec)\n      == asio::detail::socket_error_retval)\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n\n  socket1.assign(protocol, sv[0], ec);\n  if (ec)\n  {\n    asio::error_code temp_ec;\n    asio::detail::socket_ops::state_type state[2] = { 0, 0 };\n    asio::detail::socket_ops::close(sv[0], state[0], true, temp_ec);\n    asio::detail::socket_ops::close(sv[1], state[1], true, temp_ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  socket2.assign(protocol, sv[1], ec);\n  if (ec)\n  {\n    asio::error_code temp_ec;\n    socket1.close(temp_ec);\n    asio::detail::socket_ops::state_type state = 0;\n    asio::detail::socket_ops::close(sv[1], state, true, temp_ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_LOCAL_CONNECT_PAIR_HPP\n"
  },
  {
    "path": "src/third_party/asio/local/datagram_protocol.hpp",
    "content": "//\n// local/datagram_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP\n#define ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_datagram_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/local/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\n\n/// Encapsulates the flags needed for datagram-oriented UNIX sockets.\n/**\n * The asio::local::datagram_protocol class contains flags necessary for\n * datagram-oriented UNIX domain sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass datagram_protocol\n{\npublic:\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return SOCK_DGRAM;\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return 0;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return AF_UNIX;\n  }\n\n  /// The type of a UNIX domain endpoint.\n  typedef basic_endpoint<datagram_protocol> endpoint;\n\n  /// The UNIX domain socket type.\n  typedef basic_datagram_socket<datagram_protocol> socket;\n};\n\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/local/detail/endpoint.hpp",
    "content": "//\n// local/detail/endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Derived from a public domain implementation written by Daniel Casimiro.\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_DETAIL_ENDPOINT_HPP\n#define ASIO_LOCAL_DETAIL_ENDPOINT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS)\n\n#include <cstddef>\n#include <string>\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/detail/string_view.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\nnamespace detail {\n\n// Helper class for implementing a UNIX domain endpoint.\nclass endpoint\n{\npublic:\n  // Default constructor.\n  ASIO_DECL endpoint();\n\n  // Construct an endpoint using the specified path name.\n  ASIO_DECL endpoint(const char* path_name);\n\n  // Construct an endpoint using the specified path name.\n  ASIO_DECL endpoint(const std::string& path_name);\n\n  #if defined(ASIO_HAS_STRING_VIEW)\n  // Construct an endpoint using the specified path name.\n  ASIO_DECL endpoint(string_view path_name);\n  #endif // defined(ASIO_HAS_STRING_VIEW)\n\n  // Copy constructor.\n  endpoint(const endpoint& other)\n    : data_(other.data_),\n      path_length_(other.path_length_)\n  {\n  }\n\n  // Assign from another endpoint.\n  endpoint& operator=(const endpoint& other)\n  {\n    data_ = other.data_;\n    path_length_ = other.path_length_;\n    return *this;\n  }\n\n  // Get the underlying endpoint in the native type.\n  asio::detail::socket_addr_type* data()\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying endpoint in the native type.\n  const asio::detail::socket_addr_type* data() const\n  {\n    return &data_.base;\n  }\n\n  // Get the underlying size of the endpoint in the native type.\n  std::size_t size() const\n  {\n    return path_length_\n      + offsetof(asio::detail::sockaddr_un_type, sun_path);\n  }\n\n  // Set the underlying size of the endpoint in the native type.\n  ASIO_DECL void resize(std::size_t size);\n\n  // Get the capacity of the endpoint in the native type.\n  std::size_t capacity() const\n  {\n    return sizeof(asio::detail::sockaddr_un_type);\n  }\n\n  // Get the path associated with the endpoint.\n  ASIO_DECL std::string path() const;\n\n  // Set the path associated with the endpoint.\n  ASIO_DECL void path(const char* p);\n\n  // Set the path associated with the endpoint.\n  ASIO_DECL void path(const std::string& p);\n\n  // Compare two endpoints for equality.\n  ASIO_DECL friend bool operator==(\n      const endpoint& e1, const endpoint& e2);\n\n  // Compare endpoints for ordering.\n  ASIO_DECL friend bool operator<(\n      const endpoint& e1, const endpoint& e2);\n\nprivate:\n  // The underlying UNIX socket address.\n  union data_union\n  {\n    asio::detail::socket_addr_type base;\n    asio::detail::sockaddr_un_type local;\n  } data_;\n\n  // The length of the path associated with the endpoint.\n  std::size_t path_length_;\n\n  // Initialise with a specified path.\n  ASIO_DECL void init(const char* path, std::size_t path_length);\n};\n\n} // namespace detail\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/local/detail/impl/endpoint.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n\n#endif // ASIO_LOCAL_DETAIL_ENDPOINT_HPP\n"
  },
  {
    "path": "src/third_party/asio/local/detail/impl/endpoint.ipp",
    "content": "//\n// local/detail/impl/endpoint.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Derived from a public domain implementation written by Daniel Casimiro.\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP\n#define ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS)\n\n#include <cstring>\n#include \"asio/detail/socket_ops.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/local/detail/endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\nnamespace detail {\n\nendpoint::endpoint()\n{\n  init(\"\", 0);\n}\n\nendpoint::endpoint(const char* path_name)\n{\n  using namespace std; // For strlen.\n  init(path_name, strlen(path_name));\n}\n\nendpoint::endpoint(const std::string& path_name)\n{\n  init(path_name.data(), path_name.length());\n}\n\n#if defined(ASIO_HAS_STRING_VIEW)\nendpoint::endpoint(string_view path_name)\n{\n  init(path_name.data(), path_name.length());\n}\n#endif // defined(ASIO_HAS_STRING_VIEW)\n\nvoid endpoint::resize(std::size_t new_size)\n{\n  if (new_size > sizeof(asio::detail::sockaddr_un_type))\n  {\n    asio::error_code ec(asio::error::invalid_argument);\n    asio::detail::throw_error(ec);\n  }\n  else if (new_size == 0)\n  {\n    path_length_ = 0;\n  }\n  else\n  {\n    path_length_ = new_size\n      - offsetof(asio::detail::sockaddr_un_type, sun_path);\n\n    // The path returned by the operating system may be NUL-terminated.\n    if (path_length_ > 0 && data_.local.sun_path[path_length_ - 1] == 0)\n      --path_length_;\n  }\n}\n\nstd::string endpoint::path() const\n{\n  return std::string(data_.local.sun_path, path_length_);\n}\n\nvoid endpoint::path(const char* p)\n{\n  using namespace std; // For strlen.\n  init(p, strlen(p));\n}\n\nvoid endpoint::path(const std::string& p)\n{\n  init(p.data(), p.length());\n}\n\nbool operator==(const endpoint& e1, const endpoint& e2)\n{\n  return e1.path() == e2.path();\n}\n\nbool operator<(const endpoint& e1, const endpoint& e2)\n{\n  return e1.path() < e2.path();\n}\n\nvoid endpoint::init(const char* path_name, std::size_t path_length)\n{\n  if (path_length > sizeof(data_.local.sun_path) - 1)\n  {\n    // The buffer is not large enough to store this address.\n    asio::error_code ec(asio::error::name_too_long);\n    asio::detail::throw_error(ec);\n  }\n\n  using namespace std; // For memcpy.\n  data_.local = asio::detail::sockaddr_un_type();\n  data_.local.sun_family = AF_UNIX;\n  if (path_length > 0)\n    memcpy(data_.local.sun_path, path_name, path_length);\n  path_length_ = path_length;\n\n  // NUL-terminate normal path names. Names that start with a NUL are in the\n  // UNIX domain protocol's \"abstract namespace\" and are not NUL-terminated.\n  if (path_length > 0 && data_.local.sun_path[0] == 0)\n    data_.local.sun_path[path_length] = 0;\n}\n\n} // namespace detail\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n\n#endif // ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP\n"
  },
  {
    "path": "src/third_party/asio/local/stream_protocol.hpp",
    "content": "//\n// local/stream_protocol.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_LOCAL_STREAM_PROTOCOL_HPP\n#define ASIO_LOCAL_STREAM_PROTOCOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_LOCAL_SOCKETS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_socket_acceptor.hpp\"\n#include \"asio/basic_socket_iostream.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/local/basic_endpoint.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace local {\n\n/// Encapsulates the flags needed for stream-oriented UNIX sockets.\n/**\n * The asio::local::stream_protocol class contains flags necessary for\n * stream-oriented UNIX domain sockets.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Safe.\n *\n * @par Concepts:\n * Protocol.\n */\nclass stream_protocol\n{\npublic:\n  /// Obtain an identifier for the type of the protocol.\n  int type() const ASIO_NOEXCEPT\n  {\n    return SOCK_STREAM;\n  }\n\n  /// Obtain an identifier for the protocol.\n  int protocol() const ASIO_NOEXCEPT\n  {\n    return 0;\n  }\n\n  /// Obtain an identifier for the protocol family.\n  int family() const ASIO_NOEXCEPT\n  {\n    return AF_UNIX;\n  }\n\n  /// The type of a UNIX domain endpoint.\n  typedef basic_endpoint<stream_protocol> endpoint;\n\n  /// The UNIX domain socket type.\n  typedef basic_stream_socket<stream_protocol> socket;\n\n  /// The UNIX domain acceptor type.\n  typedef basic_socket_acceptor<stream_protocol> acceptor;\n\n#if !defined(ASIO_NO_IOSTREAM)\n  /// The UNIX domain iostream type.\n  typedef basic_socket_iostream<stream_protocol> iostream;\n#endif // !defined(ASIO_NO_IOSTREAM)\n};\n\n} // namespace local\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_LOCAL_SOCKETS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_LOCAL_STREAM_PROTOCOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/packaged_task.hpp",
    "content": "//\n// packaged_task.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_PACKAGED_TASK_HPP\n#define ASIO_PACKAGED_TASK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/future.hpp\"\n\n#if defined(ASIO_HAS_STD_FUTURE_CLASS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/variadic_templates.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_VARIADIC_TEMPLATES) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Partial specialisation of @c async_result for @c std::packaged_task.\ntemplate <typename Result, typename... Args, typename Signature>\nclass async_result<std::packaged_task<Result(Args...)>, Signature>\n{\npublic:\n  /// The packaged task is the concrete completion handler type.\n  typedef std::packaged_task<Result(Args...)> completion_handler_type;\n\n  /// The return type of the initiating function is the future obtained from\n  /// the packaged task.\n  typedef std::future<Result> return_type;\n\n  /// The constructor extracts the future from the packaged task.\n  explicit async_result(completion_handler_type& h)\n    : future_(h.get_future())\n  {\n  }\n\n  /// Returns the packaged task's future.\n  return_type get()\n  {\n    return std::move(future_);\n  }\n\nprivate:\n  return_type future_;\n};\n\n#else // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n      //   || defined(GENERATING_DOCUMENTATION)\n\ntemplate <typename Result, typename Signature>\nstruct async_result<std::packaged_task<Result()>, Signature>\n{\n  typedef std::packaged_task<Result()> completion_handler_type;\n  typedef std::future<Result> return_type;\n\n  explicit async_result(completion_handler_type& h)\n    : future_(h.get_future())\n  {\n  }\n\n  return_type get()\n  {\n    return std::move(future_);\n  }\n\nprivate:\n  return_type future_;\n};\n\n#define ASIO_PRIVATE_ASYNC_RESULT_DEF(n) \\\n  template <typename Result, \\\n    ASIO_VARIADIC_TPARAMS(n), typename Signature> \\\n  class async_result< \\\n    std::packaged_task<Result(ASIO_VARIADIC_TARGS(n))>, Signature> \\\n  { \\\n  public: \\\n    typedef std::packaged_task< \\\n      Result(ASIO_VARIADIC_TARGS(n))> \\\n        completion_handler_type; \\\n  \\\n    typedef std::future<Result> return_type; \\\n  \\\n    explicit async_result(completion_handler_type& h) \\\n      : future_(h.get_future()) \\\n    { \\\n    } \\\n  \\\n    return_type get() \\\n    { \\\n      return std::move(future_); \\\n    } \\\n  \\\n  private: \\\n    return_type future_; \\\n  }; \\\n  /**/\n  ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_RESULT_DEF)\n#undef ASIO_PRIVATE_ASYNC_RESULT_DEF\n\n#endif // defined(ASIO_HAS_VARIADIC_TEMPLATES)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_STD_FUTURE_CLASS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_PACKAGED_TASK_HPP\n"
  },
  {
    "path": "src/third_party/asio/placeholders.hpp",
    "content": "//\n// placeholders.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_PLACEHOLDERS_HPP\n#define ASIO_PLACEHOLDERS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_BIND)\n# include <boost/bind/arg.hpp>\n#endif // defined(ASIO_HAS_BOOST_BIND)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace placeholders {\n\n#if defined(GENERATING_DOCUMENTATION)\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the error argument of a handler for any of the asynchronous functions.\nunspecified error;\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the bytes_transferred argument of a handler for asynchronous functions such\n/// as asio::basic_stream_socket::async_write_some or\n/// asio::async_write.\nunspecified bytes_transferred;\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the iterator argument of a handler for asynchronous functions such as\n/// asio::async_connect.\nunspecified iterator;\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the results argument of a handler for asynchronous functions such as\n/// asio::basic_resolver::async_resolve.\nunspecified results;\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the results argument of a handler for asynchronous functions such as\n/// asio::async_connect.\nunspecified endpoint;\n\n/// An argument placeholder, for use with boost::bind(), that corresponds to\n/// the signal_number argument of a handler for asynchronous functions such as\n/// asio::signal_set::async_wait.\nunspecified signal_number;\n\n#elif defined(ASIO_HAS_BOOST_BIND)\n# if defined(__BORLANDC__) || defined(__GNUC__)\n\ninline boost::arg<1> error()\n{\n  return boost::arg<1>();\n}\n\ninline boost::arg<2> bytes_transferred()\n{\n  return boost::arg<2>();\n}\n\ninline boost::arg<2> iterator()\n{\n  return boost::arg<2>();\n}\n\ninline boost::arg<2> results()\n{\n  return boost::arg<2>();\n}\n\ninline boost::arg<2> endpoint()\n{\n  return boost::arg<2>();\n}\n\ninline boost::arg<2> signal_number()\n{\n  return boost::arg<2>();\n}\n\n# else\n\nnamespace detail\n{\n  template <int Number>\n  struct placeholder\n  {\n    static boost::arg<Number>& get()\n    {\n      static boost::arg<Number> result;\n      return result;\n    }\n  };\n}\n\n#  if defined(ASIO_MSVC) && (ASIO_MSVC < 1400)\n\nstatic boost::arg<1>& error\n  = asio::placeholders::detail::placeholder<1>::get();\nstatic boost::arg<2>& bytes_transferred\n  = asio::placeholders::detail::placeholder<2>::get();\nstatic boost::arg<2>& iterator\n  = asio::placeholders::detail::placeholder<2>::get();\nstatic boost::arg<2>& results\n  = asio::placeholders::detail::placeholder<2>::get();\nstatic boost::arg<2>& endpoint\n  = asio::placeholders::detail::placeholder<2>::get();\nstatic boost::arg<2>& signal_number\n  = asio::placeholders::detail::placeholder<2>::get();\n\n#  else\n\nnamespace\n{\n  boost::arg<1>& error\n    = asio::placeholders::detail::placeholder<1>::get();\n  boost::arg<2>& bytes_transferred\n    = asio::placeholders::detail::placeholder<2>::get();\n  boost::arg<2>& iterator\n    = asio::placeholders::detail::placeholder<2>::get();\n  boost::arg<2>& results\n    = asio::placeholders::detail::placeholder<2>::get();\n  boost::arg<2>& endpoint\n    = asio::placeholders::detail::placeholder<2>::get();\n  boost::arg<2>& signal_number\n    = asio::placeholders::detail::placeholder<2>::get();\n} // namespace\n\n#  endif\n# endif\n#endif\n\n} // namespace placeholders\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_PLACEHOLDERS_HPP\n"
  },
  {
    "path": "src/third_party/asio/posix/basic_descriptor.hpp",
    "content": "//\n// posix/basic_descriptor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POSIX_BASIC_DESCRIPTOR_HPP\n#define ASIO_POSIX_BASIC_DESCRIPTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/reactive_descriptor_service.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/posix/descriptor_base.hpp\"\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace posix {\n\n/// Provides POSIX descriptor functionality.\n/**\n * The posix::basic_descriptor class template provides the ability to wrap a\n * POSIX descriptor.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Executor = executor>\nclass basic_descriptor\n  : public descriptor_base\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the descriptor type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The descriptor type when rebound to the specified executor.\n    typedef basic_descriptor<Executor1> other;\n  };\n\n  /// The native representation of a descriptor.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef detail::reactive_descriptor_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// A descriptor is always the lowest layer.\n  typedef basic_descriptor lowest_layer_type;\n\n  /// Construct a descriptor without opening it.\n  /**\n   * This constructor creates a descriptor without opening it.\n   *\n   * @param ex The I/O executor that the descriptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * descriptor.\n   */\n  explicit basic_descriptor(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct a descriptor without opening it.\n  /**\n   * This constructor creates a descriptor without opening it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the descriptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the descriptor.\n   */\n  template <typename ExecutionContext>\n  explicit basic_descriptor(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct a descriptor on an existing native descriptor.\n  /**\n   * This constructor creates a descriptor object to hold an existing native\n   * descriptor.\n   *\n   * @param ex The I/O executor that the descriptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * descriptor.\n   *\n   * @param native_descriptor A native descriptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_descriptor(const executor_type& ex,\n      const native_handle_type& native_descriptor)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_descriptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct a descriptor on an existing native descriptor.\n  /**\n   * This constructor creates a descriptor object to hold an existing native\n   * descriptor.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the descriptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the descriptor.\n   *\n   * @param native_descriptor A native descriptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_descriptor(ExecutionContext& context,\n      const native_handle_type& native_descriptor,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_descriptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a descriptor from another.\n  /**\n   * This constructor moves a descriptor from one object to another.\n   *\n   * @param other The other descriptor object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_descriptor(const executor_type&)\n   * constructor.\n   */\n  basic_descriptor(basic_descriptor&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign a descriptor from another.\n  /**\n   * This assignment operator moves a descriptor from one object to another.\n   *\n   * @param other The other descriptor object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_descriptor(const executor_type&)\n   * constructor.\n   */\n  basic_descriptor& operator=(basic_descriptor&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * layers. Since a descriptor cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A reference to the lowest layer in the stack of layers. Ownership\n   * is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return *this;\n  }\n\n  /// Get a const reference to the lowest layer.\n  /**\n   * This function returns a const reference to the lowest layer in a stack of\n   * layers. Since a descriptor cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A const reference to the lowest layer in the stack of layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return *this;\n  }\n\n  /// Assign an existing native descriptor to the descriptor.\n  /*\n   * This function opens the descriptor to hold an existing native descriptor.\n   *\n   * @param native_descriptor A native descriptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const native_handle_type& native_descriptor)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(),\n        native_descriptor, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assign an existing native descriptor to the descriptor.\n  /*\n   * This function opens the descriptor to hold an existing native descriptor.\n   *\n   * @param native_descriptor A native descriptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const native_handle_type& native_descriptor,\n      asio::error_code& ec)\n  {\n    impl_.get_service().assign(\n        impl_.get_implementation(), native_descriptor, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the descriptor is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Close the descriptor.\n  /**\n   * This function is used to close the descriptor. Any asynchronous read or\n   * write operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure. Note that, even if\n   * the function indicates an error, the underlying descriptor is closed.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the descriptor.\n  /**\n   * This function is used to close the descriptor. Any asynchronous read or\n   * write operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any. Note that, even if\n   * the function indicates an error, the underlying descriptor is closed.\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the native descriptor representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * descriptor. This is intended to allow access to native descriptor\n   * functionality that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Release ownership of the native descriptor implementation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * descriptor. After calling this function, @c is_open() returns false. The\n   * caller is responsible for closing the descriptor.\n   *\n   * All outstanding asynchronous read or write operations will finish\n   * immediately, and the handlers for cancelled operations will be passed the\n   * asio::error::operation_aborted error.\n   */\n  native_handle_type release()\n  {\n    return impl_.get_service().release(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the descriptor.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the descriptor.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform an IO control command on the descriptor.\n  /**\n   * This function is used to execute an IO control command on the descriptor.\n   *\n   * @param command The IO control command to be performed on the descriptor.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @sa IoControlCommand @n\n   * asio::posix::descriptor_base::bytes_readable @n\n   * asio::posix::descriptor_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * asio::posix::stream_descriptor::bytes_readable command;\n   * descriptor.io_control(command);\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  void io_control(IoControlCommand& command)\n  {\n    asio::error_code ec;\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    asio::detail::throw_error(ec, \"io_control\");\n  }\n\n  /// Perform an IO control command on the descriptor.\n  /**\n   * This function is used to execute an IO control command on the descriptor.\n   *\n   * @param command The IO control command to be performed on the descriptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @sa IoControlCommand @n\n   * asio::posix::descriptor_base::bytes_readable @n\n   * asio::posix::descriptor_base::non_blocking_io\n   *\n   * @par Example\n   * Getting the number of bytes ready to read:\n   * @code\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * asio::posix::stream_descriptor::bytes_readable command;\n   * asio::error_code ec;\n   * descriptor.io_control(command, ec);\n   * if (ec)\n   * {\n   *   // An error occurred.\n   * }\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   */\n  template <typename IoControlCommand>\n  ASIO_SYNC_OP_VOID io_control(IoControlCommand& command,\n      asio::error_code& ec)\n  {\n    impl_.get_service().io_control(impl_.get_implementation(), command, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the descriptor.\n  /**\n   * @returns @c true if the descriptor's synchronous operations will fail with\n   * asio::error::would_block if they are unable to perform the requested\n   * operation immediately. If @c false, synchronous operations will block\n   * until complete.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  bool non_blocking() const\n  {\n    return impl_.get_service().non_blocking(impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the descriptor.\n  /**\n   * @param mode If @c true, the descriptor's synchronous operations will fail\n   * with asio::error::would_block if they are unable to perform the\n   * requested operation immediately. If @c false, synchronous operations will\n   * block until complete.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  void non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the descriptor.\n  /**\n   * @param mode If @c true, the descriptor's synchronous operations will fail\n   * with asio::error::would_block if they are unable to perform the\n   * requested operation immediately. If @c false, synchronous operations will\n   * block until complete.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note The non-blocking mode has no effect on the behaviour of asynchronous\n   * operations. Asynchronous operations will never fail with the error\n   * asio::error::would_block.\n   */\n  ASIO_SYNC_OP_VOID non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Gets the non-blocking mode of the native descriptor implementation.\n  /**\n   * This function is used to retrieve the non-blocking mode of the underlying\n   * native descriptor. This mode has no effect on the behaviour of the\n   * descriptor object's synchronous operations.\n   *\n   * @returns @c true if the underlying descriptor is in non-blocking mode and\n   * direct system calls may fail with asio::error::would_block (or the\n   * equivalent system error).\n   *\n   * @note The current non-blocking mode is cached by the descriptor object.\n   * Consequently, the return value may be incorrect if the non-blocking mode\n   * was set directly on the native descriptor.\n   */\n  bool native_non_blocking() const\n  {\n    return impl_.get_service().native_non_blocking(\n        impl_.get_implementation());\n  }\n\n  /// Sets the non-blocking mode of the native descriptor implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native descriptor. It has no effect on the behaviour of the descriptor\n   * object's synchronous operations.\n   *\n   * @param mode If @c true, the underlying descriptor is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @throws asio::system_error Thrown on failure. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   */\n  void native_non_blocking(bool mode)\n  {\n    asio::error_code ec;\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    asio::detail::throw_error(ec, \"native_non_blocking\");\n  }\n\n  /// Sets the non-blocking mode of the native descriptor implementation.\n  /**\n   * This function is used to modify the non-blocking mode of the underlying\n   * native descriptor. It has no effect on the behaviour of the descriptor\n   * object's synchronous operations.\n   *\n   * @param mode If @c true, the underlying descriptor is put into non-blocking\n   * mode and direct system calls may fail with asio::error::would_block\n   * (or the equivalent system error).\n   *\n   * @param ec Set to indicate what error occurred, if any. If the @c mode is\n   * @c false, but the current value of @c non_blocking() is @c true, this\n   * function fails with asio::error::invalid_argument, as the\n   * combination does not make sense.\n   */\n  ASIO_SYNC_OP_VOID native_non_blocking(\n      bool mode, asio::error_code& ec)\n  {\n    impl_.get_service().native_non_blocking(\n        impl_.get_implementation(), mode, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Wait for the descriptor to become ready to read, ready to write, or to\n  /// have pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for a descriptor to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired descriptor state.\n   *\n   * @par Example\n   * Waiting for a descriptor to become readable.\n   * @code\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * descriptor.wait(asio::posix::stream_descriptor::wait_read);\n   * @endcode\n   */\n  void wait(wait_type w)\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Wait for the descriptor to become ready to read, ready to write, or to\n  /// have pending error conditions.\n  /**\n   * This function is used to perform a blocking wait for a descriptor to enter\n   * a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired descriptor state.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @par Example\n   * Waiting for a descriptor to become readable.\n   * @code\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * asio::error_code ec;\n   * descriptor.wait(asio::posix::stream_descriptor::wait_read, ec);\n   * @endcode\n   */\n  ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), w, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Asynchronously wait for the descriptor to become ready to read, ready to\n  /// write, or to have pending error conditions.\n  /**\n   * This function is used to perform an asynchronous wait for a descriptor to\n   * enter a ready to read, write or error condition state.\n   *\n   * @param w Specifies the desired descriptor state.\n   *\n   * @param handler The handler to be called when the wait operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @par Example\n   * @code\n   * void wait_handler(const asio::error_code& error)\n   * {\n   *   if (!error)\n   *   {\n   *     // Wait succeeded.\n   *   }\n   * }\n   *\n   * ...\n   *\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * descriptor.async_wait(\n   *     asio::posix::stream_descriptor::wait_read,\n   *     wait_handler);\n   * @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(wait_type w,\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler, w);\n  }\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  /**\n   * This function destroys the descriptor, cancelling any outstanding\n   * asynchronous wait operations associated with the descriptor as if by\n   * calling @c cancel.\n   */\n  ~basic_descriptor()\n  {\n  }\n\n  detail::io_object_impl<detail::reactive_descriptor_service, Executor> impl_;\n\nprivate:\n  // Disallow copying and assignment.\n  basic_descriptor(const basic_descriptor&) ASIO_DELETED;\n  basic_descriptor& operator=(const basic_descriptor&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_descriptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler, wait_type w) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), w, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_descriptor* self_;\n  };\n};\n\n} // namespace posix\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_POSIX_BASIC_DESCRIPTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/posix/basic_stream_descriptor.hpp",
    "content": "//\n// posix/basic_stream_descriptor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP\n#define ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/posix/descriptor.hpp\"\n\n#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\nnamespace asio {\nnamespace posix {\n\n/// Provides stream-oriented descriptor functionality.\n/**\n * The posix::basic_stream_descriptor class template provides asynchronous and\n * blocking stream-oriented descriptor functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Executor = executor>\nclass basic_stream_descriptor\n  : public basic_descriptor<Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the descriptor type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The descriptor type when rebound to the specified executor.\n    typedef basic_stream_descriptor<Executor1> other;\n  };\n\n  /// The native representation of a descriptor.\n  typedef typename basic_descriptor<Executor>::native_handle_type\n    native_handle_type;\n\n  /// Construct a stream descriptor without opening it.\n  /**\n   * This constructor creates a stream descriptor without opening it. The\n   * descriptor needs to be opened and then connected or accepted before data\n   * can be sent or received on it.\n   *\n   * @param ex The I/O executor that the descriptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * descriptor.\n   */\n  explicit basic_stream_descriptor(const executor_type& ex)\n    : basic_descriptor<Executor>(ex)\n  {\n  }\n\n  /// Construct a stream descriptor without opening it.\n  /**\n   * This constructor creates a stream descriptor without opening it. The\n   * descriptor needs to be opened and then connected or accepted before data\n   * can be sent or received on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the descriptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the descriptor.\n   */\n  template <typename ExecutionContext>\n  explicit basic_stream_descriptor(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_descriptor<Executor>(context)\n  {\n  }\n\n  /// Construct a stream descriptor on an existing native descriptor.\n  /**\n   * This constructor creates a stream descriptor object to hold an existing\n   * native descriptor.\n   *\n   * @param ex The I/O executor that the descriptor will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * descriptor.\n   *\n   * @param native_descriptor The new underlying descriptor implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_stream_descriptor(const executor_type& ex,\n      const native_handle_type& native_descriptor)\n    : basic_descriptor<Executor>(ex, native_descriptor)\n  {\n  }\n\n  /// Construct a stream descriptor on an existing native descriptor.\n  /**\n   * This constructor creates a stream descriptor object to hold an existing\n   * native descriptor.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the descriptor will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the descriptor.\n   *\n   * @param native_descriptor The new underlying descriptor implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_stream_descriptor(ExecutionContext& context,\n      const native_handle_type& native_descriptor,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_descriptor<Executor>(context, native_descriptor)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a stream descriptor from another.\n  /**\n   * This constructor moves a stream descriptor from one object to another.\n   *\n   * @param other The other stream descriptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_descriptor(const executor_type&)\n   * constructor.\n   */\n  basic_stream_descriptor(basic_stream_descriptor&& other)\n    : descriptor(std::move(other))\n  {\n  }\n\n  /// Move-assign a stream descriptor from another.\n  /**\n   * This assignment operator moves a stream descriptor from one object to\n   * another.\n   *\n   * @param other The other stream descriptor object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_descriptor(const executor_type&)\n   * constructor.\n   */\n  basic_stream_descriptor& operator=(basic_stream_descriptor&& other)\n  {\n    descriptor::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Write some data to the descriptor.\n  /**\n   * This function is used to write data to the stream descriptor. The function\n   * call will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the descriptor.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * descriptor.write_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().write_some(\n        this->impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"write_some\");\n    return s;\n  }\n\n  /// Write some data to the descriptor.\n  /**\n   * This function is used to write data to the stream descriptor. The function\n   * call will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the descriptor.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().write_some(\n        this->impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous write.\n  /**\n   * This function is used to asynchronously write data to the stream\n   * descriptor. The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be written to the descriptor.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The write operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * descriptor.async_write_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_write_some(this), handler, buffers);\n  }\n\n  /// Read some data from the descriptor.\n  /**\n   * This function is used to read data from the stream descriptor. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * descriptor.read_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().read_some(\n        this->impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"read_some\");\n    return s;\n  }\n\n  /// Read some data from the descriptor.\n  /**\n   * This function is used to read data from the stream descriptor. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().read_some(\n        this->impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous read.\n  /**\n   * This function is used to asynchronously read data from the stream\n   * descriptor. The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The read operation may not read all of the requested number of bytes.\n   * Consider using the @ref async_read function if you need to ensure that the\n   * requested amount of data is read before the asynchronous operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * descriptor.async_read_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_read_some(this), handler, buffers);\n  }\n\nprivate:\n  class initiate_async_write_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_write_some(basic_stream_descriptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_write_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_descriptor* self_;\n  };\n\n  class initiate_async_read_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_read_some(basic_stream_descriptor* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_read_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_descriptor* self_;\n  };\n};\n\n} // namespace posix\n} // namespace asio\n\n#endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/posix/descriptor.hpp",
    "content": "//\n// posix/descriptor.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POSIX_DESCRIPTOR_HPP\n#define ASIO_POSIX_DESCRIPTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/posix/basic_descriptor.hpp\"\n\nnamespace asio {\nnamespace posix {\n\n/// Typedef for the typical usage of basic_descriptor.\ntypedef basic_descriptor<> descriptor;\n\n} // namespace posix\n} // namespace asio\n\n#endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n       // || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_POSIX_DESCRIPTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/posix/descriptor_base.hpp",
    "content": "//\n// posix/descriptor_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POSIX_DESCRIPTOR_BASE_HPP\n#define ASIO_POSIX_DESCRIPTOR_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/io_control.hpp\"\n#include \"asio/detail/socket_option.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace posix {\n\n/// The descriptor_base class is used as a base for the descriptor class as a\n/// place to define the associated IO control commands.\nclass descriptor_base\n{\npublic:\n  /// Wait types.\n  /**\n   * For use with descriptor::wait() and descriptor::async_wait().\n   */\n  enum wait_type\n  {\n    /// Wait for a descriptor to become ready to read.\n    wait_read,\n\n    /// Wait for a descriptor to become ready to write.\n    wait_write,\n\n    /// Wait for a descriptor to have error conditions pending.\n    wait_error\n  };\n\n  /// IO control command to get the amount of data that can be read without\n  /// blocking.\n  /**\n   * Implements the FIONREAD IO control command.\n   *\n   * @par Example\n   * @code\n   * asio::posix::stream_descriptor descriptor(my_context);\n   * ...\n   * asio::descriptor_base::bytes_readable command(true);\n   * descriptor.io_control(command);\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   *\n   * @par Concepts:\n   * IoControlCommand.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined bytes_readable;\n#else\n  typedef asio::detail::io_control::bytes_readable bytes_readable;\n#endif\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~descriptor_base()\n  {\n  }\n};\n\n} // namespace posix\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_POSIX_DESCRIPTOR_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/posix/stream_descriptor.hpp",
    "content": "//\n// posix/stream_descriptor.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POSIX_STREAM_DESCRIPTOR_HPP\n#define ASIO_POSIX_STREAM_DESCRIPTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/posix/basic_stream_descriptor.hpp\"\n\nnamespace asio {\nnamespace posix {\n\n/// Typedef for the typical usage of a stream-oriented descriptor.\ntypedef basic_stream_descriptor<> stream_descriptor;\n\n} // namespace posix\n} // namespace asio\n\n#endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_POSIX_STREAM_DESCRIPTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/post.hpp",
    "content": "//\n// post.hpp\n// ~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_POST_HPP\n#define ASIO_POST_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the object's associated\n * executor. The function object is queued for execution, and is never called\n * from the current thread prior to returning from <tt>post()</tt>.\n *\n * The use of @c post(), rather than @ref defer(), indicates the caller's\n * preference that the function object be eagerly queued for execution.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Performs <tt>ex.post(std::move(handler), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    ASIO_MOVE_ARG(CompletionToken) token);\n\n/// Submits a completion token or function object for execution.\n/**\n * This function submits an object for execution using the specified executor.\n * The function object is queued for execution, and is never called from the\n * current thread prior to returning from <tt>post()</tt>.\n *\n * The use of @c post(), rather than @ref defer(), indicates the caller's\n * preference that the function object be eagerly queued for execution.\n *\n * This function has the following effects:\n *\n * @li Constructs a function object handler of type @c Handler, initialized\n * with <tt>handler(forward<CompletionToken>(token))</tt>.\n *\n * @li Constructs an object @c result of type <tt>async_result<Handler></tt>,\n * initializing the object as <tt>result(handler)</tt>.\n *\n * @li Obtains the handler's associated executor object @c ex1 by performing\n * <tt>get_associated_executor(handler)</tt>.\n *\n * @li Creates a work object @c w by performing <tt>make_work(ex1)</tt>.\n *\n * @li Obtains the handler's associated allocator object @c alloc by performing\n * <tt>get_associated_allocator(handler)</tt>.\n *\n * @li Constructs a function object @c f with a function call operator that\n * performs <tt>ex1.dispatch(std::move(handler), alloc)</tt> followed by\n * <tt>w.reset()</tt>.\n *\n * @li Performs <tt>Executor(ex).post(std::move(f), alloc)</tt>.\n *\n * @li Returns <tt>result.get()</tt>.\n */\ntemplate <typename Executor,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(Executor)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    const Executor& ex,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(Executor),\n    typename enable_if<is_executor<Executor>::value>::type* = 0);\n\n/// Submits a completion token or function object for execution.\n/**\n * @returns <tt>post(ctx.get_executor(), forward<CompletionToken>(token))</tt>.\n */\ntemplate <typename ExecutionContext,\n    ASIO_COMPLETION_TOKEN_FOR(void()) CompletionToken\n      ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n        typename ExecutionContext::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, void()) post(\n    ExecutionContext& ctx,\n    ASIO_MOVE_ARG(CompletionToken) token\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename ExecutionContext::executor_type),\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type* = 0);\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/post.hpp\"\n\n#endif // ASIO_POST_HPP\n"
  },
  {
    "path": "src/third_party/asio/read.hpp",
    "content": "//\n// read.hpp\n// ~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_READ_HPP\n#define ASIO_READ_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n# include \"asio/basic_streambuf_fwd.hpp\"\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/**\n * @defgroup read asio::read\n *\n * @brief The @c read function is a composed operation that reads a certain\n * amount of data from a stream before returning.\n */\n/*@{*/\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read(s, asio::buffer(data, size)); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncReadStream, typename MutableBufferSequence>\nstd::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read(s, asio::buffer(data, size), ec); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncReadStream, typename MutableBufferSequence>\nstd::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read(s, asio::buffer(data, size),\n *     asio::transfer_at_least(32)); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename SyncReadStream, typename MutableBufferSequence,\n  typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncReadStream, typename MutableBufferSequence,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, b,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read(SyncReadStream& s, basic_streambuf<Allocator>& b);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, b,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read(SyncReadStream& s, basic_streambuf<Allocator>& b,\n    asio::error_code& ec);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncReadStream, typename Allocator,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncReadStream, typename Allocator,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Attempt to read a certain amount of data from a stream before returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * stream. The call will block until one of the following conditions is true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's read_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/*@}*/\n/**\n * @defgroup async_read asio::async_read\n *\n * @brief The @c async_read function is a composed asynchronous operation that\n * reads a certain amount of data from a stream before completion.\n */\n/*@{*/\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream. Although the buffers object may be copied as necessary, ownership of\n * the underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code\n * asio::async_read(s, asio::buffer(data, size), handler);\n * @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read(\n *     s, buffers,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncReadStream, typename MutableBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, const MutableBufferSequence& buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * stream. Although the buffers object may be copied as necessary, ownership of\n * the underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's async_read_some function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::async_read(s,\n *     asio::buffer(data, size),\n *     asio::transfer_at_least(32),\n *     handler); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncReadStream,\n    typename MutableBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_mutable_buffer_sequence<MutableBufferSequence>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read(\n *     s, buffers,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's async_read_some function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v1, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A basic_streambuf object into which the data will be read. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read(\n *     s, b,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type));\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The supplied buffer is full (that is, it has reached maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A basic_streambuf object into which the data will be read. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's async_read_some function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncReadStream,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type));\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read(\n *     s, buffers,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to read a certain amount of data from a\n/// stream.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions is\n * true:\n *\n * @li The specified dynamic buffer sequence is full (that is, it has reached\n * maximum size).\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other read operations (such\n * as async_read, the stream's async_read_some function, or any other composed\n * operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the stream's async_read_some function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes copied into the\n *                                           // buffers. If an error occurred,\n *                                           // this will be the  number of\n *                                           // bytes successfully transferred\n *                                           // prior to the error.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v2, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/read.hpp\"\n\n#endif // ASIO_READ_HPP\n"
  },
  {
    "path": "src/third_party/asio/read_at.hpp",
    "content": "//\n// read_at.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_READ_AT_HPP\n#define ASIO_READ_AT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n# include \"asio/basic_streambuf_fwd.hpp\"\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/**\n * @defgroup read_at asio::read_at\n *\n * @brief The @c read_at function is a composed operation that reads a certain\n * amount of data at the specified offset before returning.\n */\n/*@{*/\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read_at(d, 42, asio::buffer(data, size)); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read_at(\n *     d, 42, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read_at(d, 42,\n *     asio::buffer(data, size), ec); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read_at(\n *     d, 42, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    asio::error_code& ec);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's read_some_at function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::read_at(d, 42, asio::buffer(data, size),\n *     asio::transfer_at_least(32)); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's read_some_at function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncRandomAccessReadDevice, typename MutableBufferSequence,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read_at(\n *     d, 42, b,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::read_at(\n *     d, 42, b,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    asio::error_code& ec);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's read_some_at function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition);\n\n/// Attempt to read a certain amount of data at the specified offset before\n/// returning.\n/**\n * This function is used to read a certain number of bytes of data from a\n * random access device at the specified offset. The call will block until one\n * of the following conditions is true:\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the SyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b The basic_streambuf object into which the data will be read.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's read_some_at function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes read. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncRandomAccessReadDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t read_at(SyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n/**\n * @defgroup async_read_at asio::async_read_at\n *\n * @brief The @c async_read_at function is a composed asynchronous operation\n * that reads a certain amount of data at the specified offset.\n */\n/*@{*/\n\n/// Start an asynchronous operation to read a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a random access device at the specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the AsyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device. Although the buffers object may be copied as necessary, ownership of\n * the underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes copied into the buffers. If an error\n *   // occurred, this will be the number of bytes successfully\n *   // transferred prior to the error.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code\n * asio::async_read_at(d, 42, asio::buffer(data, size), handler);\n * @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read_at(\n *     d, 42, buffers,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncRandomAccessReadDevice, typename MutableBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessReadDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset,\n    const MutableBufferSequence& buffers,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessReadDevice::executor_type));\n\n/// Start an asynchronous operation to read a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a random access device at the specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li The supplied buffers are full. That is, the bytes transferred is equal to\n * the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * @param d The device from which the data is to be read. The type must support\n * the AsyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param buffers One or more buffers into which the data will be read. The sum\n * of the buffer sizes indicates the maximum number of bytes to read from the\n * device. Although the buffers object may be copied as necessary, ownership of\n * the underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's async_read_some_at function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes copied into the buffers. If an error\n *   // occurred, this will be the number of bytes successfully\n *   // transferred prior to the error.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To read into a single data buffer use the @ref buffer function as follows:\n * @code asio::async_read_at(d, 42,\n *     asio::buffer(data, size),\n *     asio::transfer_at_least(32),\n *     handler); @endcode\n * See the @ref buffer documentation for information on reading into multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename MutableBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessReadDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, const MutableBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessReadDevice::executor_type));\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Start an asynchronous operation to read a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a random access device at the specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the AsyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b A basic_streambuf object into which the data will be read. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes copied into the buffers. If an error\n *   // occurred, this will be the number of bytes successfully\n *   // transferred prior to the error.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note This overload is equivalent to calling:\n * @code asio::async_read_at(\n *     d, 42, b,\n *     asio::transfer_all(),\n *     handler); @endcode\n */\ntemplate <typename AsyncRandomAccessReadDevice, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessReadDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessReadDevice::executor_type));\n\n/// Start an asynchronous operation to read a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously read a certain number of bytes of\n * data from a random access device at the specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_read_some_at function.\n *\n * @param d The device from which the data is to be read. The type must support\n * the AsyncRandomAccessReadDevice concept.\n *\n * @param offset The offset at which the data will be read.\n *\n * @param b A basic_streambuf object into which the data will be read. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the read operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_read_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the read operation is complete. A non-zero\n * return value indicates the maximum number of bytes to be read on the next\n * call to the device's async_read_some_at function.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes copied into the buffers. If an error\n *   // occurred, this will be the number of bytes successfully\n *   // transferred prior to the error.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncRandomAccessReadDevice,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessReadDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_at(AsyncRandomAccessReadDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessReadDevice::executor_type));\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/read_at.hpp\"\n\n#endif // ASIO_READ_AT_HPP\n"
  },
  {
    "path": "src/third_party/asio/read_until.hpp",
    "content": "//\n// read_until.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_READ_UNTIL_HPP\n#define ASIO_READ_UNTIL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include <string>\n#include \"asio/async_result.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/regex_fwd.hpp\"\n#include \"asio/detail/string_view.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n# include \"asio/basic_streambuf_fwd.hpp\"\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nnamespace detail\n{\n  char (&has_result_type_helper(...))[2];\n\n  template <typename T>\n  char has_result_type_helper(T*, typename T::result_type* = 0);\n\n  template <typename T>\n  struct has_result_type\n  {\n    enum { value = (sizeof((has_result_type_helper)((T*)(0))) == 1) };\n  };\n} // namespace detail\n\n/// Type trait used to determine whether a type can be used as a match condition\n/// function with read_until and async_read_until.\ntemplate <typename T>\nstruct is_match_condition\n{\n#if defined(GENERATING_DOCUMENTATION)\n  /// The value member is true if the type may be used as a match condition.\n  static const bool value;\n#else\n  enum\n  {\n    value = asio::is_function<\n        typename asio::remove_pointer<T>::type>::value\n      || detail::has_result_type<T>::value\n  };\n#endif\n};\n\n/**\n * @defgroup read_until asio::read_until\n *\n * @brief The @c read_until function is a composed operation that reads data\n * into a dynamic buffer sequence, or into a streambuf, until it contains a\n * delimiter, matches a regular expression, or a function object indicates a\n * match.\n */\n/*@{*/\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a newline is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), '\\n');\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    char delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a CR-LF sequence is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), \"\\r\\n\");\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a dynamic buffer sequence until some part of the data it\n/// contains matches a regular expression.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains some data\n * that matches a regular expression. The call will block until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the\n * regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains data that matches the regular expression, the function returns\n * immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the substring that matches the regular expression.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a CR-LF sequence is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), boost::regex(\"\\r\\n\"));\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until some part of the data it\n/// contains matches a regular expression.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains some data\n * that matches a regular expression. The call will block until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the\n * regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains data that matches the regular expression, the function returns\n * immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the substring that matches the regular expression. Returns 0\n * if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v1>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a dynamic buffer sequence until a function object indicates a\n/// match.\n\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until a user-defined match condition function object, when applied\n * to the data contained in the dynamic buffer sequence, indicates a successful\n * match. The call will block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<typename DynamicBuffer_v1::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @returns The number of bytes in the dynamic_buffer's get area that\n * have been fully consumed by the match function.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the function object.\n * An application will typically leave that data in the dynamic buffer sequence\n * for a subsequent read_until operation to examine.\n\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To read data into a dynamic buffer sequence until whitespace is encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::const_buffers_1> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * std::string data;\n * asio::read_until(s, data, match_whitespace);\n * @endcode\n *\n * To read data into a @c std::string until a matching character is found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * std::string data;\n * asio::read_until(s, data, match_char('a'));\n * @endcode\n */\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until a function object indicates a\n/// match.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until a user-defined match condition function object, when applied\n * to the data contained in the dynamic buffer sequence, indicates a successful\n * match. The call will block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<DynamicBuffer_v1::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area that\n * have been fully consumed by the match function. Returns 0 if an error\n * occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the function object.\n * An application will typically leave that data in the dynamic buffer sequence\n * for a subsequent read_until operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n */\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Read data into a streambuf until it contains a specified delimiter.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains the specified delimiter. The call will block\n * until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains the\n * delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the delimiter.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond the delimiter. An application will typically leave\n * that data in the streambuf for a subsequent read_until operation to examine.\n *\n * @par Example\n * To read data into a streambuf until a newline is encountered:\n * @code asio::streambuf b;\n * asio::read_until(s, b, '\\n');\n * std::istream is(&b);\n * std::string line;\n * std::getline(is, line); @endcode\n * After the @c read_until operation completes successfully, the buffer @c b\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, char delim);\n\n/// Read data into a streambuf until it contains a specified delimiter.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains the specified delimiter. The call will block\n * until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains the\n * delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond the delimiter. An application will typically leave\n * that data in the streambuf for a subsequent read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, char delim,\n    asio::error_code& ec);\n\n/// Read data into a streambuf until it contains a specified delimiter.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains the specified delimiter. The call will block\n * until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains the\n * delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the delimiter.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond the delimiter. An application will typically leave\n * that data in the streambuf for a subsequent read_until operation to examine.\n *\n * @par Example\n * To read data into a streambuf until a newline is encountered:\n * @code asio::streambuf b;\n * asio::read_until(s, b, \"\\r\\n\");\n * std::istream is(&b);\n * std::string line;\n * std::getline(is, line); @endcode\n * After the @c read_until operation completes successfully, the buffer @c b\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim);\n\n/// Read data into a streambuf until it contains a specified delimiter.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains the specified delimiter. The call will block\n * until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains the\n * delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond the delimiter. An application will typically leave\n * that data in the streambuf for a subsequent read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec);\n\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a streambuf until some part of the data it contains matches\n/// a regular expression.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains some data that matches a regular expression.\n * The call will block until one of the following conditions is true:\n *\n * @li A substring of the streambuf's get area matches the regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains data that\n * matches the regular expression, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the substring that matches the regular expression.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond that which matched the regular expression. An\n * application will typically leave that data in the streambuf for a subsequent\n * read_until operation to examine.\n *\n * @par Example\n * To read data into a streambuf until a CR-LF sequence is encountered:\n * @code asio::streambuf b;\n * asio::read_until(s, b, boost::regex(\"\\r\\n\"));\n * std::istream is(&b);\n * std::string line;\n * std::getline(is, line); @endcode\n * After the @c read_until operation completes successfully, the buffer @c b\n * contains the data which matched the regular expression:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr);\n\n/// Read data into a streambuf until some part of the data it contains matches\n/// a regular expression.\n/**\n * This function is used to read data into the specified streambuf until the\n * streambuf's get area contains some data that matches a regular expression.\n * The call will block until one of the following conditions is true:\n *\n * @li A substring of the streambuf's get area matches the regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the streambuf's get area already contains data that\n * matches the regular expression, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the streambuf's get area up to and including\n * the substring that matches the regular expression. Returns 0 if an error\n * occurred.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond that which matched the regular expression. An\n * application will typically leave that data in the streambuf for a subsequent\n * read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename Allocator>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr,\n    asio::error_code& ec);\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a streambuf until a function object indicates a match.\n/**\n * This function is used to read data into the specified streambuf until a\n * user-defined match condition function object, when applied to the data\n * contained in the streambuf, indicates a successful match. The call will\n * block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<basic_streambuf<Allocator>::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @returns The number of bytes in the streambuf's get area that have been fully\n * consumed by the match function.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond that which matched the function object. An application\n * will typically leave that data in the streambuf for a subsequent read_until\n * operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To read data into a streambuf until whitespace is encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::streambuf::const_buffers_type> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * asio::streambuf b;\n * asio::read_until(s, b, match_whitespace);\n * @endcode\n *\n * To read data into a streambuf until a matching character is found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * asio::streambuf b;\n * asio::read_until(s, b, match_char('a'));\n * @endcode\n */\ntemplate <typename SyncReadStream, typename Allocator, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, MatchCondition match_condition,\n    typename enable_if<is_match_condition<MatchCondition>::value>::type* = 0);\n\n/// Read data into a streambuf until a function object indicates a match.\n/**\n * This function is used to read data into the specified streambuf until a\n * user-defined match condition function object, when applied to the data\n * contained in the streambuf, indicates a successful match. The call will\n * block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<basic_streambuf<Allocator>::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the streambuf's get area that have been fully\n * consumed by the match function. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the streambuf may contain\n * additional data beyond that which matched the function object. An application\n * will typically leave that data in the streambuf for a subsequent read_until\n * operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n */\ntemplate <typename SyncReadStream, typename Allocator, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<is_match_condition<MatchCondition>::value>::type* = 0);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a newline is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), '\\n');\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, char delim,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter character.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    char delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a CR-LF sequence is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), \"\\r\\n\");\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until it contains a specified\n/// delimiter.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains the specified\n * delimiter. The call will block until one of the following conditions is\n * true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains the delimiter, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n *\n * @param delim The delimiter string.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the delimiter. Returns 0 if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond the delimiter. An application will\n * typically leave that data in the dynamic buffer sequence for a subsequent\n * read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a dynamic buffer sequence until some part of the data it\n/// contains matches a regular expression.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains some data\n * that matches a regular expression. The call will block until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the\n * regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains data that matches the regular expression, the function returns\n * immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the substring that matches the regular expression.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent read_until operation to examine.\n *\n * @par Example\n * To read data into a @c std::string until a CR-LF sequence is encountered:\n * @code std::string data;\n * std::string n = asio::read_until(s,\n *     asio::dynamic_buffer(data), boost::regex(\"\\r\\n\"));\n * std::string line = data.substr(0, n);\n * data.erase(0, n); @endcode\n * After the @c read_until operation completes successfully, the string @c data\n * contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c b as\n * follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c read_until operation.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    const boost::regex& expr,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until some part of the data it\n/// contains matches a regular expression.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until the dynamic buffer sequence's get area contains some data\n * that matches a regular expression. The call will block until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the\n * regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the dynamic buffer sequence's get area already\n * contains data that matches the regular expression, the function returns\n * immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param expr The regular expression.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area up to\n * and including the substring that matches the regular expression. Returns 0\n * if an error occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent read_until operation to examine.\n */\ntemplate <typename SyncReadStream, typename DynamicBuffer_v2>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    const boost::regex& expr, asio::error_code& ec,\n    typename enable_if<\n        is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Read data into a dynamic buffer sequence until a function object indicates a\n/// match.\n\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until a user-defined match condition function object, when applied\n * to the data contained in the dynamic buffer sequence, indicates a successful\n * match. The call will block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<typename DynamicBuffer_v2::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @returns The number of bytes in the dynamic_buffer's get area that\n * have been fully consumed by the match function.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the function object.\n * An application will typically leave that data in the dynamic buffer sequence\n * for a subsequent read_until operation to examine.\n\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To read data into a dynamic buffer sequence until whitespace is encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::const_buffers_1> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * std::string data;\n * asio::read_until(s, data, match_whitespace);\n * @endcode\n *\n * To read data into a @c std::string until a matching character is found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * std::string data;\n * asio::read_until(s, data, match_char('a'));\n * @endcode\n */\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    MatchCondition match_condition,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Read data into a dynamic buffer sequence until a function object indicates a\n/// match.\n/**\n * This function is used to read data into the specified dynamic buffer\n * sequence until a user-defined match condition function object, when applied\n * to the data contained in the dynamic buffer sequence, indicates a successful\n * match. The call will block until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * read_some function. If the match condition function object already indicates\n * a match, the function returns immediately.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the SyncReadStream concept.\n *\n * @param buffers A dynamic buffer sequence into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<DynamicBuffer_v2::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes in the dynamic buffer sequence's get area that\n * have been fully consumed by the match function. Returns 0 if an error\n * occurred.\n *\n * @note After a successful read_until operation, the dynamic buffer sequence\n * may contain additional data beyond that which matched the function object.\n * An application will typically leave that data in the dynamic buffer sequence\n * for a subsequent read_until operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n */\ntemplate <typename SyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition>\nstd::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers,\n    MatchCondition match_condition, asio::error_code& ec,\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n/**\n * @defgroup async_read_until asio::async_read_until\n *\n * @brief The @c async_read_until function is a composed asynchronous operation\n * that reads data into a dynamic buffer sequence, or into a streambuf, until\n * it contains a delimiter, matches a regular expression, or a function object\n * indicates a match.\n */\n/*@{*/\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until it contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains the\n * specified delimiter. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains the delimiter, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param delim The delimiter character.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond the delimiter. An application\n * will typically leave that data in the dynamic buffer sequence for a\n * subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a newline is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data, '\\n', handler); @endcode\n * After the @c async_read_until operation completes successfully, the buffer\n * @c data contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until it contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains the\n * specified delimiter. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains the delimiter, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param delim The delimiter string.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond the delimiter. An application\n * will typically leave that data in the dynamic buffer sequence for a\n * subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a CR-LF sequence is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data, \"\\r\\n\", handler); @endcode\n * After the @c async_read_until operation completes successfully, the string\n * @c data contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the string @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until some part of its data matches a regular expression.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains some\n * data that matches a regular expression. The function call always returns\n * immediately. The asynchronous operation will continue until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the regular\n * expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains data that matches\n * the regular expression, this asynchronous operation completes immediately.\n * The program must ensure that the stream performs no other read operations\n * (such as async_read, async_read_until, the stream's async_read_some\n * function, or any other composed operations that perform reads) until this\n * operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param expr The regular expression.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer\n *   // sequence's get area up to and including the\n *   // substring that matches the regular expression.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a CR-LF sequence is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data,\n *     boost::regex(\"\\r\\n\"), handler); @endcode\n * After the @c async_read_until operation completes successfully, the string\n * @c data contains the data which matched the regular expression:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the match,\n * so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the string @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    const boost::regex& expr,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until a function object indicates a match.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until a user-defined match condition function object, when\n * applied to the data contained in the dynamic buffer sequence, indicates a\n * successful match. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the match condition function object already indicates a match, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<typename DynamicBuffer_v1::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area that have been fully consumed by the match\n *   // function. O if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond that which matched the function\n * object. An application will typically leave that data in the dynamic buffer\n * sequence for a subsequent async_read_until operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To asynchronously read data into a @c std::string until whitespace is\n * encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::const_buffers_1> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * std::string data;\n * asio::async_read_until(s, data, match_whitespace, handler);\n * @endcode\n *\n * To asynchronously read data into a @c std::string until a matching character\n * is found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * std::string data;\n * asio::async_read_until(s, data, match_char('a'), handler);\n * @endcode\n */\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v1, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    MatchCondition match_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Start an asynchronous operation to read data into a streambuf until it\n/// contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified\n * streambuf until the streambuf's get area contains the specified delimiter.\n * The function call always returns immediately. The asynchronous operation\n * will continue until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the streambuf's get area already contains the delimiter, this asynchronous\n * operation completes immediately. The program must ensure that the stream\n * performs no other read operations (such as async_read, async_read_until, the\n * stream's async_read_some function, or any other composed operations that\n * perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read. Ownership of\n * the streambuf is retained by the caller, which must guarantee that it remains\n * valid until the handler is called.\n *\n * @param delim The delimiter character.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the streambuf's get\n *   // area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the streambuf may\n * contain additional data beyond the delimiter. An application will typically\n * leave that data in the streambuf for a subsequent async_read_until operation\n * to examine.\n *\n * @par Example\n * To asynchronously read data into a streambuf until a newline is encountered:\n * @code asio::streambuf b;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::istream is(&b);\n *     std::string line;\n *     std::getline(is, line);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, b, '\\n', handler); @endcode\n * After the @c async_read_until operation completes successfully, the buffer\n * @c b contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, char delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type));\n\n/// Start an asynchronous operation to read data into a streambuf until it\n/// contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified\n * streambuf until the streambuf's get area contains the specified delimiter.\n * The function call always returns immediately. The asynchronous operation\n * will continue until one of the following conditions is true:\n *\n * @li The get area of the streambuf contains the specified delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the streambuf's get area already contains the delimiter, this asynchronous\n * operation completes immediately. The program must ensure that the stream\n * performs no other read operations (such as async_read, async_read_until, the\n * stream's async_read_some function, or any other composed operations that\n * perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read. Ownership of\n * the streambuf is retained by the caller, which must guarantee that it remains\n * valid until the handler is called.\n *\n * @param delim The delimiter string.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the streambuf's get\n *   // area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the streambuf may\n * contain additional data beyond the delimiter. An application will typically\n * leave that data in the streambuf for a subsequent async_read_until operation\n * to examine.\n *\n * @par Example\n * To asynchronously read data into a streambuf until a newline is encountered:\n * @code asio::streambuf b;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::istream is(&b);\n *     std::string line;\n *     std::getline(is, line);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, b, \"\\r\\n\", handler); @endcode\n * After the @c async_read_until operation completes successfully, the buffer\n * @c b contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type));\n\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a streambuf until some\n/// part of its data matches a regular expression.\n/**\n * This function is used to asynchronously read data into the specified\n * streambuf until the streambuf's get area contains some data that matches a\n * regular expression. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li A substring of the streambuf's get area matches the regular expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the streambuf's get area already contains data that matches the regular\n * expression, this asynchronous operation completes immediately. The program\n * must ensure that the stream performs no other read operations (such as\n * async_read, async_read_until, the stream's async_read_some function, or any\n * other composed operations that perform reads) until this operation\n * completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read. Ownership of\n * the streambuf is retained by the caller, which must guarantee that it remains\n * valid until the handler is called.\n *\n * @param expr The regular expression.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the streambuf's get\n *   // area up to and including the substring\n *   // that matches the regular. expression.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the streambuf may\n * contain additional data beyond that which matched the regular expression. An\n * application will typically leave that data in the streambuf for a subsequent\n * async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a streambuf until a CR-LF sequence is\n * encountered:\n * @code asio::streambuf b;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::istream is(&b);\n *     std::string line;\n *     std::getline(is, line);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, b, boost::regex(\"\\r\\n\"), handler); @endcode\n * After the @c async_read_until operation completes successfully, the buffer\n * @c b contains the data which matched the regular expression:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c std::getline then extracts the data up to and including the\n * newline (which is discarded), so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r' } @endcode\n * The remaining data is left in the buffer @c b as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b, const boost::regex& expr,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type));\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a streambuf until a\n/// function object indicates a match.\n/**\n * This function is used to asynchronously read data into the specified\n * streambuf until a user-defined match condition function object, when applied\n * to the data contained in the streambuf, indicates a successful match. The\n * function call always returns immediately. The asynchronous operation will\n * continue until one of the following conditions is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the match condition function object already indicates a match, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param b A streambuf object into which the data will be read.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<basic_streambuf<Allocator>::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the streambuf's get\n *   // area that have been fully consumed by the\n *   // match function. O if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the streambuf may\n * contain additional data beyond that which matched the function object. An\n * application will typically leave that data in the streambuf for a subsequent\n * async_read_until operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To asynchronously read data into a streambuf until whitespace is encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::streambuf::const_buffers_type> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * asio::streambuf b;\n * asio::async_read_until(s, b, match_whitespace, handler);\n * @endcode\n *\n * To asynchronously read data into a streambuf until a matching character is\n * found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * asio::streambuf b;\n * asio::async_read_until(s, b, match_char('a'), handler);\n * @endcode\n */\ntemplate <typename AsyncReadStream, typename Allocator, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s,\n    asio::basic_streambuf<Allocator>& b,\n    MatchCondition match_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<is_match_condition<MatchCondition>::value>::type* = 0);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until it contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains the\n * specified delimiter. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains the delimiter, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param delim The delimiter character.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond the delimiter. An application\n * will typically leave that data in the dynamic buffer sequence for a\n * subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a newline is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data, '\\n', handler); @endcode\n * After the @c async_read_until operation completes successfully, the buffer\n * @c data contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the buffer @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, char delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until it contains a specified delimiter.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains the\n * specified delimiter. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The get area of the dynamic buffer sequence contains the specified\n * delimiter.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains the delimiter, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param delim The delimiter string.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area up to and including the delimiter.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond the delimiter. An application\n * will typically leave that data in the dynamic buffer sequence for a\n * subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a CR-LF sequence is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data, \"\\r\\n\", handler); @endcode\n * After the @c async_read_until operation completes successfully, the string\n * @c data contains the delimiter:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the\n * delimiter, so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the string @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    ASIO_STRING_VIEW_PARAM delim,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if defined(ASIO_HAS_BOOST_REGEX) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until some part of its data matches a regular expression.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until the dynamic buffer sequence's get area contains some\n * data that matches a regular expression. The function call always returns\n * immediately. The asynchronous operation will continue until one of the\n * following conditions is true:\n *\n * @li A substring of the dynamic buffer sequence's get area matches the regular\n * expression.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the dynamic buffer sequence's get area already contains data that matches\n * the regular expression, this asynchronous operation completes immediately.\n * The program must ensure that the stream performs no other read operations\n * (such as async_read, async_read_until, the stream's async_read_some\n * function, or any other composed operations that perform reads) until this\n * operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param expr The regular expression.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer\n *   // sequence's get area up to and including the\n *   // substring that matches the regular expression.\n *   // 0 if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond that which matched the regular\n * expression. An application will typically leave that data in the dynamic\n * buffer sequence for a subsequent async_read_until operation to examine.\n *\n * @par Example\n * To asynchronously read data into a @c std::string until a CR-LF sequence is\n * encountered:\n * @code std::string data;\n * ...\n * void handler(const asio::error_code& e, std::size_t size)\n * {\n *   if (!e)\n *   {\n *     std::string line = data.substr(0, n);\n *     data.erase(0, n);\n *     ...\n *   }\n * }\n * ...\n * asio::async_read_until(s, data,\n *     boost::regex(\"\\r\\n\"), handler); @endcode\n * After the @c async_read_until operation completes successfully, the string\n * @c data contains the data which matched the regular expression:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n', 'd', 'e', ... } @endcode\n * The call to @c substr then extracts the data up to and including the match,\n * so that the string @c line contains:\n * @code { 'a', 'b', ..., 'c', '\\r', '\\n' } @endcode\n * After the call to @c erase, the remaining data is left in the string @c data\n * as follows:\n * @code { 'd', 'e', ... } @endcode\n * This data may be the start of a new line, to be extracted by a subsequent\n * @c async_read_until operation.\n */\ntemplate <typename AsyncReadStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    const boost::regex& expr,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#endif // defined(ASIO_HAS_BOOST_REGEX)\n       // || defined(GENERATING_DOCUMENTATION)\n\n/// Start an asynchronous operation to read data into a dynamic buffer sequence\n/// until a function object indicates a match.\n/**\n * This function is used to asynchronously read data into the specified dynamic\n * buffer sequence until a user-defined match condition function object, when\n * applied to the data contained in the dynamic buffer sequence, indicates a\n * successful match. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li The match condition function object returns a std::pair where the second\n * element evaluates to true.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_read_some function, and is known as a <em>composed operation</em>. If\n * the match condition function object already indicates a match, this\n * asynchronous operation completes immediately. The program must ensure that\n * the stream performs no other read operations (such as async_read,\n * async_read_until, the stream's async_read_some function, or any other\n * composed operations that perform reads) until this operation completes.\n *\n * @param s The stream from which the data is to be read. The type must support\n * the AsyncReadStream concept.\n *\n * @param buffers The dynamic buffer sequence into which the data will be read.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param match_condition The function object to be called to determine whether\n * a match exists. The signature of the function object must be:\n * @code pair<iterator, bool> match_condition(iterator begin, iterator end);\n * @endcode\n * where @c iterator represents the type:\n * @code buffers_iterator<typename DynamicBuffer_v2::const_buffers_type>\n * @endcode\n * The iterator parameters @c begin and @c end define the range of bytes to be\n * scanned to determine whether there is a match. The @c first member of the\n * return value is an iterator marking one-past-the-end of the bytes that have\n * been consumed by the match function. This iterator is used to calculate the\n * @c begin parameter for any subsequent invocation of the match condition. The\n * @c second member of the return value is true if a match has been found, false\n * otherwise.\n *\n * @param handler The handler to be called when the read operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // The number of bytes in the dynamic buffer sequence's\n *   // get area that have been fully consumed by the match\n *   // function. O if an error occurred.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @note After a successful async_read_until operation, the dynamic buffer\n * sequence may contain additional data beyond that which matched the function\n * object. An application will typically leave that data in the dynamic buffer\n * sequence for a subsequent async_read_until operation to examine.\n *\n * @note The default implementation of the @c is_match_condition type trait\n * evaluates to true for function pointers and function objects with a\n * @c result_type typedef. It must be specialised for other user-defined\n * function objects.\n *\n * @par Examples\n * To asynchronously read data into a @c std::string until whitespace is\n * encountered:\n * @code typedef asio::buffers_iterator<\n *     asio::const_buffers_1> iterator;\n *\n * std::pair<iterator, bool>\n * match_whitespace(iterator begin, iterator end)\n * {\n *   iterator i = begin;\n *   while (i != end)\n *     if (std::isspace(*i++))\n *       return std::make_pair(i, true);\n *   return std::make_pair(i, false);\n * }\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * std::string data;\n * asio::async_read_until(s, data, match_whitespace, handler);\n * @endcode\n *\n * To asynchronously read data into a @c std::string until a matching character\n * is found:\n * @code class match_char\n * {\n * public:\n *   explicit match_char(char c) : c_(c) {}\n *\n *   template <typename Iterator>\n *   std::pair<Iterator, bool> operator()(\n *       Iterator begin, Iterator end) const\n *   {\n *     Iterator i = begin;\n *     while (i != end)\n *       if (c_ == *i++)\n *         return std::make_pair(i, true);\n *     return std::make_pair(i, false);\n *   }\n *\n * private:\n *   char c_;\n * };\n *\n * namespace asio {\n *   template <> struct is_match_condition<match_char>\n *     : public boost::true_type {};\n * } // namespace asio\n * ...\n * void handler(const asio::error_code& e, std::size_t size);\n * ...\n * std::string data;\n * asio::async_read_until(s, data, match_char('a'), handler);\n * @endcode\n */\ntemplate <typename AsyncReadStream,\n    typename DynamicBuffer_v2, typename MatchCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) ReadHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncReadStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n    void (asio::error_code, std::size_t))\nasync_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers,\n    MatchCondition match_condition,\n    ASIO_MOVE_ARG(ReadHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncReadStream::executor_type),\n    typename enable_if<\n      is_match_condition<MatchCondition>::value\n        && is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/read_until.hpp\"\n\n#endif // ASIO_READ_UNTIL_HPP\n"
  },
  {
    "path": "src/third_party/asio/redirect_error.hpp",
    "content": "//\n// redirect_error.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_REDIRECT_ERROR_HPP\n#define ASIO_REDIRECT_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/error_code.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Completion token type used to specify that an error produced by an\n/// asynchronous operation is captured to an error_code variable.\n/**\n * The redirect_error_t class is used to indicate that any error_code produced\n * by an asynchronous operation is captured to a specified variable.\n */\ntemplate <typename CompletionToken>\nclass redirect_error_t\n{\npublic:\n  /// Constructor. \n  template <typename T>\n  redirect_error_t(ASIO_MOVE_ARG(T) completion_token,\n      asio::error_code& ec)\n    : token_(ASIO_MOVE_CAST(T)(completion_token)),\n      ec_(ec)\n  {\n  }\n\n//private:\n  CompletionToken token_;\n  asio::error_code& ec_;\n};\n\n/// Create a completion token to capture error_code values to a variable.\ntemplate <typename CompletionToken>\ninline redirect_error_t<typename decay<CompletionToken>::type> redirect_error(\n    ASIO_MOVE_ARG(CompletionToken) completion_token,\n    asio::error_code& ec)\n{\n  return redirect_error_t<typename decay<CompletionToken>::type>(\n      ASIO_MOVE_CAST(CompletionToken)(completion_token), ec);\n}\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/redirect_error.hpp\"\n\n#endif // ASIO_REDIRECT_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/serial_port.hpp",
    "content": "//\n// serial_port.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SERIAL_PORT_HPP\n#define ASIO_SERIAL_PORT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_serial_port.hpp\"\n\nnamespace asio {\n\n/// Typedef for the typical usage of a serial port.\ntypedef basic_serial_port<> serial_port;\n\n} // namespace asio\n\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_SERIAL_PORT_HPP\n"
  },
  {
    "path": "src/third_party/asio/serial_port_base.hpp",
    "content": "//\n// serial_port_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SERIAL_PORT_BASE_HPP\n#define ASIO_SERIAL_PORT_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_SERIAL_PORT) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n# include <termios.h>\n#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)\n\n#include \"asio/detail/socket_types.hpp\"\n#include \"asio/error_code.hpp\"\n\n#if defined(GENERATING_DOCUMENTATION)\n# define ASIO_OPTION_STORAGE implementation_defined\n#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n# define ASIO_OPTION_STORAGE DCB\n#else\n# define ASIO_OPTION_STORAGE termios\n#endif\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// The serial_port_base class is used as a base for the basic_serial_port class\n/// template so that we have a common place to define the serial port options.\nclass serial_port_base\n{\npublic:\n  /// Serial port option to permit changing the baud rate.\n  /**\n   * Implements changing the baud rate for a given serial port.\n   */\n  class baud_rate\n  {\n  public:\n    explicit baud_rate(unsigned int rate = 0);\n    unsigned int value() const;\n    ASIO_DECL ASIO_SYNC_OP_VOID store(\n        ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec) const;\n    ASIO_DECL ASIO_SYNC_OP_VOID load(\n        const ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec);\n  private:\n    unsigned int value_;\n  };\n\n  /// Serial port option to permit changing the flow control.\n  /**\n   * Implements changing the flow control for a given serial port.\n   */\n  class flow_control\n  {\n  public:\n    enum type { none, software, hardware };\n    ASIO_DECL explicit flow_control(type t = none);\n    type value() const;\n    ASIO_DECL ASIO_SYNC_OP_VOID store(\n        ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec) const;\n    ASIO_DECL ASIO_SYNC_OP_VOID load(\n        const ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec);\n  private:\n    type value_;\n  };\n\n  /// Serial port option to permit changing the parity.\n  /**\n   * Implements changing the parity for a given serial port.\n   */\n  class parity\n  {\n  public:\n    enum type { none, odd, even };\n    ASIO_DECL explicit parity(type t = none);\n    type value() const;\n    ASIO_DECL ASIO_SYNC_OP_VOID store(\n        ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec) const;\n    ASIO_DECL ASIO_SYNC_OP_VOID load(\n        const ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec);\n  private:\n    type value_;\n  };\n\n  /// Serial port option to permit changing the number of stop bits.\n  /**\n   * Implements changing the number of stop bits for a given serial port.\n   */\n  class stop_bits\n  {\n  public:\n    enum type { one, onepointfive, two };\n    ASIO_DECL explicit stop_bits(type t = one);\n    type value() const;\n    ASIO_DECL ASIO_SYNC_OP_VOID store(\n        ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec) const;\n    ASIO_DECL ASIO_SYNC_OP_VOID load(\n        const ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec);\n  private:\n    type value_;\n  };\n\n  /// Serial port option to permit changing the character size.\n  /**\n   * Implements changing the character size for a given serial port.\n   */\n  class character_size\n  {\n  public:\n    ASIO_DECL explicit character_size(unsigned int t = 8);\n    unsigned int value() const;\n    ASIO_DECL ASIO_SYNC_OP_VOID store(\n        ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec) const;\n    ASIO_DECL ASIO_SYNC_OP_VOID load(\n        const ASIO_OPTION_STORAGE& storage,\n        asio::error_code& ec);\n  private:\n    unsigned int value_;\n  };\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~serial_port_base()\n  {\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#undef ASIO_OPTION_STORAGE\n\n#include \"asio/impl/serial_port_base.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/serial_port_base.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // defined(ASIO_HAS_SERIAL_PORT)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_SERIAL_PORT_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/signal_set.hpp",
    "content": "//\n// signal_set.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SIGNAL_SET_HPP\n#define ASIO_SIGNAL_SET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/basic_signal_set.hpp\"\n\nnamespace asio {\n\n/// Typedef for the typical usage of a signal set.\ntypedef basic_signal_set<> signal_set;\n\n} // namespace asio\n\n#endif // ASIO_SIGNAL_SET_HPP\n"
  },
  {
    "path": "src/third_party/asio/socket_base.hpp",
    "content": "//\n// socket_base.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SOCKET_BASE_HPP\n#define ASIO_SOCKET_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/io_control.hpp\"\n#include \"asio/detail/socket_option.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// The socket_base class is used as a base for the basic_stream_socket and\n/// basic_datagram_socket class templates so that we have a common place to\n/// define the shutdown_type and enum.\nclass socket_base\n{\npublic:\n  /// Different ways a socket may be shutdown.\n  enum shutdown_type\n  {\n#if defined(GENERATING_DOCUMENTATION)\n    /// Shutdown the receive side of the socket.\n    shutdown_receive = implementation_defined,\n\n    /// Shutdown the send side of the socket.\n    shutdown_send = implementation_defined,\n\n    /// Shutdown both send and receive on the socket.\n    shutdown_both = implementation_defined\n#else\n    shutdown_receive = ASIO_OS_DEF(SHUT_RD),\n    shutdown_send = ASIO_OS_DEF(SHUT_WR),\n    shutdown_both = ASIO_OS_DEF(SHUT_RDWR)\n#endif\n  };\n\n  /// Bitmask type for flags that can be passed to send and receive operations.\n  typedef int message_flags;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Peek at incoming data without removing it from the input queue.\n  static const int message_peek = implementation_defined;\n\n  /// Process out-of-band data.\n  static const int message_out_of_band = implementation_defined;\n\n  /// Specify that the data should not be subject to routing.\n  static const int message_do_not_route = implementation_defined;\n\n  /// Specifies that the data marks the end of a record.\n  static const int message_end_of_record = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(int,\n      message_peek = ASIO_OS_DEF(MSG_PEEK));\n  ASIO_STATIC_CONSTANT(int,\n      message_out_of_band = ASIO_OS_DEF(MSG_OOB));\n  ASIO_STATIC_CONSTANT(int,\n      message_do_not_route = ASIO_OS_DEF(MSG_DONTROUTE));\n  ASIO_STATIC_CONSTANT(int,\n      message_end_of_record = ASIO_OS_DEF(MSG_EOR));\n#endif\n\n  /// Wait types.\n  /**\n   * For use with basic_socket::wait() and basic_socket::async_wait().\n   */\n  enum wait_type\n  {\n    /// Wait for a socket to become ready to read.\n    wait_read,\n\n    /// Wait for a socket to become ready to write.\n    wait_write,\n\n    /// Wait for a socket to have error conditions pending.\n    wait_error\n  };\n\n  /// Socket option to permit sending of broadcast messages.\n  /**\n   * Implements the SOL_SOCKET/SO_BROADCAST socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::udp::socket socket(my_context);\n   * ...\n   * asio::socket_base::broadcast option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::udp::socket socket(my_context);\n   * ...\n   * asio::socket_base::broadcast option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined broadcast;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_BROADCAST)>\n      broadcast;\n#endif\n\n  /// Socket option to enable socket-level debugging.\n  /**\n   * Implements the SOL_SOCKET/SO_DEBUG socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::debug option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::debug option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined debug;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DEBUG)> debug;\n#endif\n\n  /// Socket option to prevent routing, use local interfaces only.\n  /**\n   * Implements the SOL_SOCKET/SO_DONTROUTE socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::udp::socket socket(my_context);\n   * ...\n   * asio::socket_base::do_not_route option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::udp::socket socket(my_context);\n   * ...\n   * asio::socket_base::do_not_route option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined do_not_route;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DONTROUTE)>\n      do_not_route;\n#endif\n\n  /// Socket option to send keep-alives.\n  /**\n   * Implements the SOL_SOCKET/SO_KEEPALIVE socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::keep_alive option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::keep_alive option;\n   * socket.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined keep_alive;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_KEEPALIVE)> keep_alive;\n#endif\n\n  /// Socket option for the send buffer size of a socket.\n  /**\n   * Implements the SOL_SOCKET/SO_SNDBUF socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::send_buffer_size option(8192);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::send_buffer_size option;\n   * socket.get_option(option);\n   * int size = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Integer_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined send_buffer_size;\n#else\n  typedef asio::detail::socket_option::integer<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDBUF)>\n      send_buffer_size;\n#endif\n\n  /// Socket option for the send low watermark.\n  /**\n   * Implements the SOL_SOCKET/SO_SNDLOWAT socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::send_low_watermark option(1024);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::send_low_watermark option;\n   * socket.get_option(option);\n   * int size = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Integer_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined send_low_watermark;\n#else\n  typedef asio::detail::socket_option::integer<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDLOWAT)>\n      send_low_watermark;\n#endif\n\n  /// Socket option for the receive buffer size of a socket.\n  /**\n   * Implements the SOL_SOCKET/SO_RCVBUF socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::receive_buffer_size option(8192);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::receive_buffer_size option;\n   * socket.get_option(option);\n   * int size = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Integer_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined receive_buffer_size;\n#else\n  typedef asio::detail::socket_option::integer<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVBUF)>\n      receive_buffer_size;\n#endif\n\n  /// Socket option for the receive low watermark.\n  /**\n   * Implements the SOL_SOCKET/SO_RCVLOWAT socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::receive_low_watermark option(1024);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::receive_low_watermark option;\n   * socket.get_option(option);\n   * int size = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Integer_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined receive_low_watermark;\n#else\n  typedef asio::detail::socket_option::integer<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVLOWAT)>\n      receive_low_watermark;\n#endif\n\n  /// Socket option to allow the socket to be bound to an address that is\n  /// already in use.\n  /**\n   * Implements the SOL_SOCKET/SO_REUSEADDR socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::socket_base::reuse_address option(true);\n   * acceptor.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::socket_base::reuse_address option;\n   * acceptor.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined reuse_address;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_REUSEADDR)>\n      reuse_address;\n#endif\n\n  /// Socket option to specify whether the socket lingers on close if unsent\n  /// data is present.\n  /**\n   * Implements the SOL_SOCKET/SO_LINGER socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::linger option(true, 30);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::linger option;\n   * socket.get_option(option);\n   * bool is_set = option.enabled();\n   * unsigned short timeout = option.timeout();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Linger_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined linger;\n#else\n  typedef asio::detail::socket_option::linger<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_LINGER)>\n      linger;\n#endif\n\n  /// Socket option for putting received out-of-band data inline.\n  /**\n   * Implements the SOL_SOCKET/SO_OOBINLINE socket option.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::out_of_band_inline option(true);\n   * socket.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::out_of_band_inline option;\n   * socket.get_option(option);\n   * bool value = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined out_of_band_inline;\n#else\n  typedef asio::detail::socket_option::boolean<\n    ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_OOBINLINE)>\n      out_of_band_inline;\n#endif\n\n  /// Socket option to report aborted connections on accept.\n  /**\n   * Implements a custom socket option that determines whether or not an accept\n   * operation is permitted to fail with asio::error::connection_aborted.\n   * By default the option is false.\n   *\n   * @par Examples\n   * Setting the option:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::socket_base::enable_connection_aborted option(true);\n   * acceptor.set_option(option);\n   * @endcode\n   *\n   * @par\n   * Getting the current option value:\n   * @code\n   * asio::ip::tcp::acceptor acceptor(my_context);\n   * ...\n   * asio::socket_base::enable_connection_aborted option;\n   * acceptor.get_option(option);\n   * bool is_set = option.value();\n   * @endcode\n   *\n   * @par Concepts:\n   * Socket_Option, Boolean_Socket_Option.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined enable_connection_aborted;\n#else\n  typedef asio::detail::socket_option::boolean<\n    asio::detail::custom_socket_option_level,\n    asio::detail::enable_connection_aborted_option>\n    enable_connection_aborted;\n#endif\n\n  /// IO control command to get the amount of data that can be read without\n  /// blocking.\n  /**\n   * Implements the FIONREAD IO control command.\n   *\n   * @par Example\n   * @code\n   * asio::ip::tcp::socket socket(my_context);\n   * ...\n   * asio::socket_base::bytes_readable command(true);\n   * socket.io_control(command);\n   * std::size_t bytes_readable = command.get();\n   * @endcode\n   *\n   * @par Concepts:\n   * IO_Control_Command, Size_IO_Control_Command.\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined bytes_readable;\n#else\n  typedef asio::detail::io_control::bytes_readable bytes_readable;\n#endif\n\n  /// The maximum length of the queue of pending incoming connections.\n#if defined(GENERATING_DOCUMENTATION)\n  static const int max_listen_connections = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(int, max_listen_connections\n      = ASIO_OS_DEF(SOMAXCONN));\n#endif\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use max_listen_connections.) The maximum length of the queue\n  /// of pending incoming connections.\n#if defined(GENERATING_DOCUMENTATION)\n  static const int max_connections = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(int, max_connections\n      = ASIO_OS_DEF(SOMAXCONN));\n#endif\n#endif // !defined(ASIO_NO_DEPRECATED)\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~socket_base()\n  {\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SOCKET_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/spawn.hpp",
    "content": "//\n// spawn.hpp\n// ~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SPAWN_HPP\n#define ASIO_SPAWN_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <boost/coroutine/all.hpp>\n#include \"asio/bind_executor.hpp\"\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/detail/wrapped_handler.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/io_context.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/strand.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Context object the represents the currently executing coroutine.\n/**\n * The basic_yield_context class is used to represent the currently executing\n * stackful coroutine. A basic_yield_context may be passed as a handler to an\n * asynchronous operation. For example:\n *\n * @code template <typename Handler>\n * void my_coroutine(basic_yield_context<Handler> yield)\n * {\n *   ...\n *   std::size_t n = my_socket.async_read_some(buffer, yield);\n *   ...\n * } @endcode\n *\n * The initiating function (async_read_some in the above example) suspends the\n * current coroutine. The coroutine is resumed when the asynchronous operation\n * completes, and the result of the operation is returned.\n */\ntemplate <typename Handler>\nclass basic_yield_context\n{\npublic:\n  /// The coroutine callee type, used by the implementation.\n  /**\n   * When using Boost.Coroutine v1, this type is:\n   * @code typename coroutine<void()> @endcode\n   * When using Boost.Coroutine v2 (unidirectional coroutines), this type is:\n   * @code push_coroutine<void> @endcode\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined callee_type;\n#elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2)\n  typedef boost::coroutines::push_coroutine<void> callee_type;\n#else\n  typedef boost::coroutines::coroutine<void()> callee_type;\n#endif\n  \n  /// The coroutine caller type, used by the implementation.\n  /**\n   * When using Boost.Coroutine v1, this type is:\n   * @code typename coroutine<void()>::caller_type @endcode\n   * When using Boost.Coroutine v2 (unidirectional coroutines), this type is:\n   * @code pull_coroutine<void> @endcode\n   */\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined caller_type;\n#elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2)\n  typedef boost::coroutines::pull_coroutine<void> caller_type;\n#else\n  typedef boost::coroutines::coroutine<void()>::caller_type caller_type;\n#endif\n\n  /// Construct a yield context to represent the specified coroutine.\n  /**\n   * Most applications do not need to use this constructor. Instead, the\n   * spawn() function passes a yield context as an argument to the coroutine\n   * function.\n   */\n  basic_yield_context(\n      const detail::weak_ptr<callee_type>& coro,\n      caller_type& ca, Handler& handler)\n    : coro_(coro),\n      ca_(ca),\n      handler_(handler),\n      ec_(0)\n  {\n  }\n\n  /// Construct a yield context from another yield context type.\n  /**\n   * Requires that OtherHandler be convertible to Handler.\n   */\n  template <typename OtherHandler>\n  basic_yield_context(const basic_yield_context<OtherHandler>& other)\n    : coro_(other.coro_),\n      ca_(other.ca_),\n      handler_(other.handler_),\n      ec_(other.ec_)\n  {\n  }\n\n  /// Return a yield context that sets the specified error_code.\n  /**\n   * By default, when a yield context is used with an asynchronous operation, a\n   * non-success error_code is converted to system_error and thrown. This\n   * operator may be used to specify an error_code object that should instead be\n   * set with the asynchronous operation's result. For example:\n   *\n   * @code template <typename Handler>\n   * void my_coroutine(basic_yield_context<Handler> yield)\n   * {\n   *   ...\n   *   std::size_t n = my_socket.async_read_some(buffer, yield[ec]);\n   *   if (ec)\n   *   {\n   *     // An error occurred.\n   *   }\n   *   ...\n   * } @endcode\n   */\n  basic_yield_context operator[](asio::error_code& ec) const\n  {\n    basic_yield_context tmp(*this);\n    tmp.ec_ = &ec;\n    return tmp;\n  }\n\n#if defined(GENERATING_DOCUMENTATION)\nprivate:\n#endif // defined(GENERATING_DOCUMENTATION)\n  detail::weak_ptr<callee_type> coro_;\n  caller_type& ca_;\n  Handler handler_;\n  asio::error_code* ec_;\n};\n\n#if defined(GENERATING_DOCUMENTATION)\n/// Context object that represents the currently executing coroutine.\ntypedef basic_yield_context<unspecified> yield_context;\n#else // defined(GENERATING_DOCUMENTATION)\ntypedef basic_yield_context<\n  executor_binder<void(*)(), executor> > yield_context;\n#endif // defined(GENERATING_DOCUMENTATION)\n\n/**\n * @defgroup spawn asio::spawn\n *\n * @brief Start a new stackful coroutine.\n *\n * The spawn() function is a high-level wrapper over the Boost.Coroutine\n * library. This function enables programs to implement asynchronous logic in a\n * synchronous manner, as illustrated by the following example:\n *\n * @code asio::spawn(my_strand, do_echo);\n *\n * // ...\n *\n * void do_echo(asio::yield_context yield)\n * {\n *   try\n *   {\n *     char data[128];\n *     for (;;)\n *     {\n *       std::size_t length =\n *         my_socket.async_read_some(\n *           asio::buffer(data), yield);\n *\n *       asio::async_write(my_socket,\n *           asio::buffer(data, length), yield);\n *     }\n *   }\n *   catch (std::exception& e)\n *   {\n *     // ...\n *   }\n * } @endcode\n */\n/*@{*/\n\n/// Start a new stackful coroutine, calling the specified handler when it\n/// completes.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(basic_yield_context<Handler> yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Function>\nvoid spawn(ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes());\n\n/// Start a new stackful coroutine, calling the specified handler when it\n/// completes.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param handler A handler to be called when the coroutine exits. More\n * importantly, the handler provides an execution context (via the the handler\n * invocation hook) for the coroutine. The handler must have the signature:\n * @code void handler(); @endcode\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(basic_yield_context<Handler> yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Handler, typename Function>\nvoid spawn(ASIO_MOVE_ARG(Handler) handler,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes(),\n    typename enable_if<!is_executor<typename decay<Handler>::type>::value &&\n      !is_convertible<Handler&, execution_context&>::value>::type* = 0);\n\n/// Start a new stackful coroutine, inheriting the execution context of another.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param ctx Identifies the current coroutine as a parent of the new\n * coroutine. This specifies that the new coroutine should inherit the\n * execution context of the parent. For example, if the parent coroutine is\n * executing in a particular strand, then the new coroutine will execute in the\n * same strand.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(basic_yield_context<Handler> yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Handler, typename Function>\nvoid spawn(basic_yield_context<Handler> ctx,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes());\n\n/// Start a new stackful coroutine that executes on a given executor.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param ex Identifies the executor that will run the coroutine. The new\n * coroutine is implicitly given its own strand within this executor.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(yield_context yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Function, typename Executor>\nvoid spawn(const Executor& ex,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes(),\n    typename enable_if<is_executor<Executor>::value>::type* = 0);\n\n/// Start a new stackful coroutine that executes on a given strand.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param ex Identifies the strand that will run the coroutine.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(yield_context yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Function, typename Executor>\nvoid spawn(const strand<Executor>& ex,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes());\n\n/// Start a new stackful coroutine that executes in the context of a strand.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param s Identifies a strand. By starting multiple coroutines on the same\n * strand, the implementation ensures that none of those coroutines can execute\n * simultaneously.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(yield_context yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Function>\nvoid spawn(const asio::io_context::strand& s,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes());\n\n/// Start a new stackful coroutine that executes on a given execution context.\n/**\n * This function is used to launch a new coroutine.\n *\n * @param ctx Identifies the execution context that will run the coroutine. The\n * new coroutine is implicitly given its own strand within this execution\n * context.\n *\n * @param function The coroutine function. The function must have the signature:\n * @code void function(yield_context yield); @endcode\n *\n * @param attributes Boost.Coroutine attributes used to customise the coroutine.\n */\ntemplate <typename Function, typename ExecutionContext>\nvoid spawn(ExecutionContext& ctx,\n    ASIO_MOVE_ARG(Function) function,\n    const boost::coroutines::attributes& attributes\n      = boost::coroutines::attributes(),\n    typename enable_if<is_convertible<\n      ExecutionContext&, execution_context&>::value>::type* = 0);\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/spawn.hpp\"\n\n#endif // ASIO_SPAWN_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/context.hpp",
    "content": "//\n// ssl/context.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_CONTEXT_HPP\n#define ASIO_SSL_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <string>\n#include \"asio/buffer.hpp\"\n#include \"asio/io_context.hpp\"\n#include \"asio/ssl/context_base.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n#include \"asio/ssl/detail/openssl_init.hpp\"\n#include \"asio/ssl/detail/password_callback.hpp\"\n#include \"asio/ssl/detail/verify_callback.hpp\"\n#include \"asio/ssl/verify_mode.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\nclass context\n  : public context_base,\n    private noncopyable\n{\npublic:\n  /// The native handle type of the SSL context.\n  typedef SSL_CTX* native_handle_type;\n\n  /// Constructor.\n  ASIO_DECL explicit context(method m);\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a context from another.\n  /**\n   * This constructor moves an SSL context from one object to another.\n   *\n   * @param other The other context object from which the move will occur.\n   *\n   * @note Following the move, the following operations only are valid for the\n   * moved-from object:\n   * @li Destruction.\n   * @li As a target for move-assignment.\n   */\n  ASIO_DECL context(context&& other);\n\n  /// Move-assign a context from another.\n  /**\n   * This assignment operator moves an SSL context from one object to another.\n   *\n   * @param other The other context object from which the move will occur.\n   *\n   * @note Following the move, the following operations only are valid for the\n   * moved-from object:\n   * @li Destruction.\n   * @li As a target for move-assignment.\n   */\n  ASIO_DECL context& operator=(context&& other);\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor.\n  ASIO_DECL ~context();\n\n  /// Get the underlying implementation in the native type.\n  /**\n   * This function may be used to obtain the underlying implementation of the\n   * context. This is intended to allow access to context functionality that is\n   * not otherwise provided.\n   */\n  ASIO_DECL native_handle_type native_handle();\n\n  /// Clear options on the context.\n  /**\n   * This function may be used to configure the SSL options used by the context.\n   *\n   * @param o A bitmask of options. The available option values are defined in\n   * the context_base class. The specified options, if currently enabled on the\n   * context, are cleared.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_clear_options.\n   */\n  ASIO_DECL void clear_options(options o);\n\n  /// Clear options on the context.\n  /**\n   * This function may be used to configure the SSL options used by the context.\n   *\n   * @param o A bitmask of options. The available option values are defined in\n   * the context_base class. The specified options, if currently enabled on the\n   * context, are cleared.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_clear_options.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID clear_options(options o,\n      asio::error_code& ec);\n\n  /// Set options on the context.\n  /**\n   * This function may be used to configure the SSL options used by the context.\n   *\n   * @param o A bitmask of options. The available option values are defined in\n   * the context_base class. The options are bitwise-ored with any existing\n   * value for the options.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_options.\n   */\n  ASIO_DECL void set_options(options o);\n\n  /// Set options on the context.\n  /**\n   * This function may be used to configure the SSL options used by the context.\n   *\n   * @param o A bitmask of options. The available option values are defined in\n   * the context_base class. The options are bitwise-ored with any existing\n   * value for the options.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_options.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID set_options(options o,\n      asio::error_code& ec);\n\n  /// Set the peer verification mode.\n  /**\n   * This function may be used to configure the peer verification mode used by\n   * the context.\n   *\n   * @param v A bitmask of peer verification modes. See @ref verify_mode for\n   * available values.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_verify.\n   */\n  ASIO_DECL void set_verify_mode(verify_mode v);\n\n  /// Set the peer verification mode.\n  /**\n   * This function may be used to configure the peer verification mode used by\n   * the context.\n   *\n   * @param v A bitmask of peer verification modes. See @ref verify_mode for\n   * available values.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_verify.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID set_verify_mode(\n      verify_mode v, asio::error_code& ec);\n\n  /// Set the peer verification depth.\n  /**\n   * This function may be used to configure the maximum verification depth\n   * allowed by the context.\n   *\n   * @param depth Maximum depth for the certificate chain verification that\n   * shall be allowed.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_verify_depth.\n   */\n  ASIO_DECL void set_verify_depth(int depth);\n\n  /// Set the peer verification depth.\n  /**\n   * This function may be used to configure the maximum verification depth\n   * allowed by the context.\n   *\n   * @param depth Maximum depth for the certificate chain verification that\n   * shall be allowed.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_verify_depth.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID set_verify_depth(\n      int depth, asio::error_code& ec);\n\n  /// Set the callback used to verify peer certificates.\n  /**\n   * This function is used to specify a callback function that will be called\n   * by the implementation when it needs to verify a peer certificate.\n   *\n   * @param callback The function object to be used for verifying a certificate.\n   * The function signature of the handler must be:\n   * @code bool verify_callback(\n   *   bool preverified, // True if the certificate passed pre-verification.\n   *   verify_context& ctx // The peer certificate and other context.\n   * ); @endcode\n   * The return value of the callback is true if the certificate has passed\n   * verification, false otherwise.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_verify.\n   */\n  template <typename VerifyCallback>\n  void set_verify_callback(VerifyCallback callback);\n\n  /// Set the callback used to verify peer certificates.\n  /**\n   * This function is used to specify a callback function that will be called\n   * by the implementation when it needs to verify a peer certificate.\n   *\n   * @param callback The function object to be used for verifying a certificate.\n   * The function signature of the handler must be:\n   * @code bool verify_callback(\n   *   bool preverified, // True if the certificate passed pre-verification.\n   *   verify_context& ctx // The peer certificate and other context.\n   * ); @endcode\n   * The return value of the callback is true if the certificate has passed\n   * verification, false otherwise.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_verify.\n   */\n  template <typename VerifyCallback>\n  ASIO_SYNC_OP_VOID set_verify_callback(VerifyCallback callback,\n      asio::error_code& ec);\n\n  /// Load a certification authority file for performing verification.\n  /**\n   * This function is used to load one or more trusted certification authorities\n   * from a file.\n   *\n   * @param filename The name of a file containing certification authority\n   * certificates in PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_load_verify_locations.\n   */\n  ASIO_DECL void load_verify_file(const std::string& filename);\n\n  /// Load a certification authority file for performing verification.\n  /**\n   * This function is used to load the certificates for one or more trusted\n   * certification authorities from a file.\n   *\n   * @param filename The name of a file containing certification authority\n   * certificates in PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_load_verify_locations.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID load_verify_file(\n      const std::string& filename, asio::error_code& ec);\n\n  /// Add certification authority for performing verification.\n  /**\n   * This function is used to add one trusted certification authority\n   * from a memory buffer.\n   *\n   * @param ca The buffer containing the certification authority certificate.\n   * The certificate must use the PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert.\n   */\n  ASIO_DECL void add_certificate_authority(const const_buffer& ca);\n\n  /// Add certification authority for performing verification.\n  /**\n   * This function is used to add one trusted certification authority\n   * from a memory buffer.\n   *\n   * @param ca The buffer containing the certification authority certificate.\n   * The certificate must use the PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID add_certificate_authority(\n      const const_buffer& ca, asio::error_code& ec);\n\n  /// Configures the context to use the default directories for finding\n  /// certification authority certificates.\n  /**\n   * This function specifies that the context should use the default,\n   * system-dependent directories for locating certification authority\n   * certificates.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_default_verify_paths.\n   */\n  ASIO_DECL void set_default_verify_paths();\n\n  /// Configures the context to use the default directories for finding\n  /// certification authority certificates.\n  /**\n   * This function specifies that the context should use the default,\n   * system-dependent directories for locating certification authority\n   * certificates.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_default_verify_paths.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID set_default_verify_paths(\n      asio::error_code& ec);\n\n  /// Add a directory containing certificate authority files to be used for\n  /// performing verification.\n  /**\n   * This function is used to specify the name of a directory containing\n   * certification authority certificates. Each file in the directory must\n   * contain a single certificate. The files must be named using the subject\n   * name's hash and an extension of \".0\".\n   *\n   * @param path The name of a directory containing the certificates.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_load_verify_locations.\n   */\n  ASIO_DECL void add_verify_path(const std::string& path);\n\n  /// Add a directory containing certificate authority files to be used for\n  /// performing verification.\n  /**\n   * This function is used to specify the name of a directory containing\n   * certification authority certificates. Each file in the directory must\n   * contain a single certificate. The files must be named using the subject\n   * name's hash and an extension of \".0\".\n   *\n   * @param path The name of a directory containing the certificates.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_load_verify_locations.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID add_verify_path(\n      const std::string& path, asio::error_code& ec);\n\n  /// Use a certificate from a memory buffer.\n  /**\n   * This function is used to load a certificate into the context from a buffer.\n   *\n   * @param certificate The buffer containing the certificate.\n   *\n   * @param format The certificate format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1.\n   */\n  ASIO_DECL void use_certificate(\n      const const_buffer& certificate, file_format format);\n\n  /// Use a certificate from a memory buffer.\n  /**\n   * This function is used to load a certificate into the context from a buffer.\n   *\n   * @param certificate The buffer containing the certificate.\n   *\n   * @param format The certificate format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_certificate(\n      const const_buffer& certificate, file_format format,\n      asio::error_code& ec);\n\n  /// Use a certificate from a file.\n  /**\n   * This function is used to load a certificate into the context from a file.\n   *\n   * @param filename The name of the file containing the certificate.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_certificate_file.\n   */\n  ASIO_DECL void use_certificate_file(\n      const std::string& filename, file_format format);\n\n  /// Use a certificate from a file.\n  /**\n   * This function is used to load a certificate into the context from a file.\n   *\n   * @param filename The name of the file containing the certificate.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_certificate_file.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_file(\n      const std::string& filename, file_format format,\n      asio::error_code& ec);\n\n  /// Use a certificate chain from a memory buffer.\n  /**\n   * This function is used to load a certificate chain into the context from a\n   * buffer.\n   *\n   * @param chain The buffer containing the certificate chain. The certificate\n   * chain must use the PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert.\n   */\n  ASIO_DECL void use_certificate_chain(const const_buffer& chain);\n\n  /// Use a certificate chain from a memory buffer.\n  /**\n   * This function is used to load a certificate chain into the context from a\n   * buffer.\n   *\n   * @param chain The buffer containing the certificate chain. The certificate\n   * chain must use the PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_chain(\n      const const_buffer& chain, asio::error_code& ec);\n\n  /// Use a certificate chain from a file.\n  /**\n   * This function is used to load a certificate chain into the context from a\n   * file.\n   *\n   * @param filename The name of the file containing the certificate. The file\n   * must use the PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_certificate_chain_file.\n   */\n  ASIO_DECL void use_certificate_chain_file(const std::string& filename);\n\n  /// Use a certificate chain from a file.\n  /**\n   * This function is used to load a certificate chain into the context from a\n   * file.\n   *\n   * @param filename The name of the file containing the certificate. The file\n   * must use the PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_certificate_chain_file.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_chain_file(\n      const std::string& filename, asio::error_code& ec);\n\n  /// Use a private key from a memory buffer.\n  /**\n   * This function is used to load a private key into the context from a buffer.\n   *\n   * @param private_key The buffer containing the private key.\n   *\n   * @param format The private key format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1.\n   */\n  ASIO_DECL void use_private_key(\n      const const_buffer& private_key, file_format format);\n\n  /// Use a private key from a memory buffer.\n  /**\n   * This function is used to load a private key into the context from a buffer.\n   *\n   * @param private_key The buffer containing the private key.\n   *\n   * @param format The private key format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_private_key(\n      const const_buffer& private_key, file_format format,\n      asio::error_code& ec);\n\n  /// Use a private key from a file.\n  /**\n   * This function is used to load a private key into the context from a file.\n   *\n   * @param filename The name of the file containing the private key.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_PrivateKey_file.\n   */\n  ASIO_DECL void use_private_key_file(\n      const std::string& filename, file_format format);\n\n  /// Use a private key from a file.\n  /**\n   * This function is used to load a private key into the context from a file.\n   *\n   * @param filename The name of the file containing the private key.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_PrivateKey_file.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_private_key_file(\n      const std::string& filename, file_format format,\n      asio::error_code& ec);\n\n  /// Use an RSA private key from a memory buffer.\n  /**\n   * This function is used to load an RSA private key into the context from a\n   * buffer.\n   *\n   * @param private_key The buffer containing the RSA private key.\n   *\n   * @param format The private key format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1.\n   */\n  ASIO_DECL void use_rsa_private_key(\n      const const_buffer& private_key, file_format format);\n\n  /// Use an RSA private key from a memory buffer.\n  /**\n   * This function is used to load an RSA private key into the context from a\n   * buffer.\n   *\n   * @param private_key The buffer containing the RSA private key.\n   *\n   * @param format The private key format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_rsa_private_key(\n      const const_buffer& private_key, file_format format,\n      asio::error_code& ec);\n\n  /// Use an RSA private key from a file.\n  /**\n   * This function is used to load an RSA private key into the context from a\n   * file.\n   *\n   * @param filename The name of the file containing the RSA private key.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_use_RSAPrivateKey_file.\n   */\n  ASIO_DECL void use_rsa_private_key_file(\n      const std::string& filename, file_format format);\n\n  /// Use an RSA private key from a file.\n  /**\n   * This function is used to load an RSA private key into the context from a\n   * file.\n   *\n   * @param filename The name of the file containing the RSA private key.\n   *\n   * @param format The file format (ASN.1 or PEM).\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_use_RSAPrivateKey_file.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_rsa_private_key_file(\n      const std::string& filename, file_format format,\n      asio::error_code& ec);\n\n  /// Use the specified memory buffer to obtain the temporary Diffie-Hellman\n  /// parameters.\n  /**\n   * This function is used to load Diffie-Hellman parameters into the context\n   * from a buffer.\n   *\n   * @param dh The memory buffer containing the Diffie-Hellman parameters. The\n   * buffer must use the PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_tmp_dh.\n   */\n  ASIO_DECL void use_tmp_dh(const const_buffer& dh);\n\n  /// Use the specified memory buffer to obtain the temporary Diffie-Hellman\n  /// parameters.\n  /**\n   * This function is used to load Diffie-Hellman parameters into the context\n   * from a buffer.\n   *\n   * @param dh The memory buffer containing the Diffie-Hellman parameters. The\n   * buffer must use the PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_tmp_dh.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_tmp_dh(\n      const const_buffer& dh, asio::error_code& ec);\n\n  /// Use the specified file to obtain the temporary Diffie-Hellman parameters.\n  /**\n   * This function is used to load Diffie-Hellman parameters into the context\n   * from a file.\n   *\n   * @param filename The name of the file containing the Diffie-Hellman\n   * parameters. The file must use the PEM format.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_tmp_dh.\n   */\n  ASIO_DECL void use_tmp_dh_file(const std::string& filename);\n\n  /// Use the specified file to obtain the temporary Diffie-Hellman parameters.\n  /**\n   * This function is used to load Diffie-Hellman parameters into the context\n   * from a file.\n   *\n   * @param filename The name of the file containing the Diffie-Hellman\n   * parameters. The file must use the PEM format.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_tmp_dh.\n   */\n  ASIO_DECL ASIO_SYNC_OP_VOID use_tmp_dh_file(\n      const std::string& filename, asio::error_code& ec);\n\n  /// Set the password callback.\n  /**\n   * This function is used to specify a callback function to obtain password\n   * information about an encrypted key in PEM format.\n   *\n   * @param callback The function object to be used for obtaining the password.\n   * The function signature of the handler must be:\n   * @code std::string password_callback(\n   *   std::size_t max_length,  // The maximum size for a password.\n   *   password_purpose purpose // Whether password is for reading or writing.\n   * ); @endcode\n   * The return value of the callback is a string containing the password.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_CTX_set_default_passwd_cb.\n   */\n  template <typename PasswordCallback>\n  void set_password_callback(PasswordCallback callback);\n\n  /// Set the password callback.\n  /**\n   * This function is used to specify a callback function to obtain password\n   * information about an encrypted key in PEM format.\n   *\n   * @param callback The function object to be used for obtaining the password.\n   * The function signature of the handler must be:\n   * @code std::string password_callback(\n   *   std::size_t max_length,  // The maximum size for a password.\n   *   password_purpose purpose // Whether password is for reading or writing.\n   * ); @endcode\n   * The return value of the callback is a string containing the password.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_CTX_set_default_passwd_cb.\n   */\n  template <typename PasswordCallback>\n  ASIO_SYNC_OP_VOID set_password_callback(PasswordCallback callback,\n      asio::error_code& ec);\n\nprivate:\n  struct bio_cleanup;\n  struct x509_cleanup;\n  struct evp_pkey_cleanup;\n  struct rsa_cleanup;\n  struct dh_cleanup;\n\n  // Helper function used to set a peer certificate verification callback.\n  ASIO_DECL ASIO_SYNC_OP_VOID do_set_verify_callback(\n      detail::verify_callback_base* callback, asio::error_code& ec);\n\n  // Callback used when the SSL implementation wants to verify a certificate.\n  ASIO_DECL static int verify_callback_function(\n      int preverified, X509_STORE_CTX* ctx);\n\n  // Helper function used to set a password callback.\n  ASIO_DECL ASIO_SYNC_OP_VOID do_set_password_callback(\n      detail::password_callback_base* callback, asio::error_code& ec);\n\n  // Callback used when the SSL implementation wants a password.\n  ASIO_DECL static int password_callback_function(\n      char* buf, int size, int purpose, void* data);\n\n  // Helper function to set the temporary Diffie-Hellman parameters from a BIO.\n  ASIO_DECL ASIO_SYNC_OP_VOID do_use_tmp_dh(\n      BIO* bio, asio::error_code& ec);\n\n  // Helper function to make a BIO from a memory buffer.\n  ASIO_DECL BIO* make_buffer_bio(const const_buffer& b);\n\n  // The underlying native implementation.\n  native_handle_type handle_;\n\n  // Ensure openssl is initialised.\n  asio::ssl::detail::openssl_init<> init_;\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/ssl/impl/context.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ssl/impl/context.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SSL_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/context_base.hpp",
    "content": "//\n// ssl/context_base.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_CONTEXT_BASE_HPP\n#define ASIO_SSL_CONTEXT_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// The context_base class is used as a base for the basic_context class\n/// template so that we have a common place to define various enums.\nclass context_base\n{\npublic:\n  /// Different methods supported by a context.\n  enum method\n  {\n    /// Generic SSL version 2.\n    sslv2,\n\n    /// SSL version 2 client.\n    sslv2_client,\n\n    /// SSL version 2 server.\n    sslv2_server,\n\n    /// Generic SSL version 3.\n    sslv3,\n\n    /// SSL version 3 client.\n    sslv3_client,\n\n    /// SSL version 3 server.\n    sslv3_server,\n\n    /// Generic TLS version 1.\n    tlsv1,\n\n    /// TLS version 1 client.\n    tlsv1_client,\n\n    /// TLS version 1 server.\n    tlsv1_server,\n\n    /// Generic SSL/TLS.\n    sslv23,\n\n    /// SSL/TLS client.\n    sslv23_client,\n\n    /// SSL/TLS server.\n    sslv23_server,\n\n    /// Generic TLS version 1.1.\n    tlsv11,\n\n    /// TLS version 1.1 client.\n    tlsv11_client,\n\n    /// TLS version 1.1 server.\n    tlsv11_server,\n\n    /// Generic TLS version 1.2.\n    tlsv12,\n\n    /// TLS version 1.2 client.\n    tlsv12_client,\n\n    /// TLS version 1.2 server.\n    tlsv12_server,\n\n    /// Generic TLS version 1.3.\n    tlsv13,\n\n    /// TLS version 1.3 client.\n    tlsv13_client,\n\n    /// TLS version 1.3 server.\n    tlsv13_server,\n\n    /// Generic TLS.\n    tls,\n\n    /// TLS client.\n    tls_client,\n\n    /// TLS server.\n    tls_server\n  };\n\n  /// Bitmask type for SSL options.\n  typedef long options;\n\n#if defined(GENERATING_DOCUMENTATION)\n  /// Implement various bug workarounds.\n  static const long default_workarounds = implementation_defined;\n\n  /// Always create a new key when using tmp_dh parameters.\n  static const long single_dh_use = implementation_defined;\n\n  /// Disable SSL v2.\n  static const long no_sslv2 = implementation_defined;\n\n  /// Disable SSL v3.\n  static const long no_sslv3 = implementation_defined;\n\n  /// Disable TLS v1.\n  static const long no_tlsv1 = implementation_defined;\n\n  /// Disable TLS v1.1.\n  static const long no_tlsv1_1 = implementation_defined;\n\n  /// Disable TLS v1.2.\n  static const long no_tlsv1_2 = implementation_defined;\n\n  /// Disable TLS v1.3.\n  static const long no_tlsv1_3 = implementation_defined;\n\n  /// Disable compression. Compression is disabled by default.\n  static const long no_compression = implementation_defined;\n#else\n  ASIO_STATIC_CONSTANT(long, default_workarounds = SSL_OP_ALL);\n  ASIO_STATIC_CONSTANT(long, single_dh_use = SSL_OP_SINGLE_DH_USE);\n  ASIO_STATIC_CONSTANT(long, no_sslv2 = SSL_OP_NO_SSLv2);\n  ASIO_STATIC_CONSTANT(long, no_sslv3 = SSL_OP_NO_SSLv3);\n  ASIO_STATIC_CONSTANT(long, no_tlsv1 = SSL_OP_NO_TLSv1);\n# if defined(SSL_OP_NO_TLSv1_1)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = SSL_OP_NO_TLSv1_1);\n# else // defined(SSL_OP_NO_TLSv1_1)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = 0x10000000L);\n# endif // defined(SSL_OP_NO_TLSv1_1)\n# if defined(SSL_OP_NO_TLSv1_2)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = SSL_OP_NO_TLSv1_2);\n# else // defined(SSL_OP_NO_TLSv1_2)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = 0x08000000L);\n# endif // defined(SSL_OP_NO_TLSv1_2)\n# if defined(SSL_OP_NO_TLSv1_3)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_3 = SSL_OP_NO_TLSv1_3);\n# else // defined(SSL_OP_NO_TLSv1_3)\n  ASIO_STATIC_CONSTANT(long, no_tlsv1_3 = 0x20000000L);\n# endif // defined(SSL_OP_NO_TLSv1_3)\n# if defined(SSL_OP_NO_COMPRESSION)\n  ASIO_STATIC_CONSTANT(long, no_compression = SSL_OP_NO_COMPRESSION);\n# else // defined(SSL_OP_NO_COMPRESSION)\n  ASIO_STATIC_CONSTANT(long, no_compression = 0x20000L);\n# endif // defined(SSL_OP_NO_COMPRESSION)\n#endif\n\n  /// File format types.\n  enum file_format\n  {\n    /// ASN.1 file.\n    asn1,\n\n    /// PEM file.\n    pem\n  };\n\n#if !defined(GENERATING_DOCUMENTATION)\n  // The following types and constants are preserved for backward compatibility.\n  // New programs should use the equivalents of the same names that are defined\n  // in the asio::ssl namespace.\n  typedef int verify_mode;\n  ASIO_STATIC_CONSTANT(int, verify_none = SSL_VERIFY_NONE);\n  ASIO_STATIC_CONSTANT(int, verify_peer = SSL_VERIFY_PEER);\n  ASIO_STATIC_CONSTANT(int,\n      verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT);\n  ASIO_STATIC_CONSTANT(int, verify_client_once = SSL_VERIFY_CLIENT_ONCE);\n#endif\n\n  /// Purpose of PEM password.\n  enum password_purpose\n  {\n    /// The password is needed for reading/decryption.\n    for_reading,\n\n    /// The password is needed for writing/encryption.\n    for_writing\n  };\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~context_base()\n  {\n  }\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_CONTEXT_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/buffered_handshake_op.hpp",
    "content": "//\n// ssl/detail/buffered_handshake_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP\n#define ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/ssl/detail/engine.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence>\nclass buffered_handshake_op\n{\npublic:\n  buffered_handshake_op(stream_base::handshake_type type,\n      const ConstBufferSequence& buffers)\n    : type_(type),\n      buffers_(buffers),\n      total_buffer_size_(asio::buffer_size(buffers_))\n  {\n  }\n\n  engine::want operator()(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred) const\n  {\n    return this->process(eng, ec, bytes_transferred,\n        asio::buffer_sequence_begin(buffers_),\n        asio::buffer_sequence_end(buffers_));\n  }\n\n  template <typename Handler>\n  void call_handler(Handler& handler,\n      const asio::error_code& ec,\n      const std::size_t& bytes_transferred) const\n  {\n    handler(ec, bytes_transferred);\n  }\n\nprivate:\n  template <typename Iterator>\n  engine::want process(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred,\n      Iterator begin, Iterator end) const\n  {\n    Iterator iter = begin;\n    std::size_t accumulated_size = 0;\n\n    for (;;)\n    {\n      engine::want want = eng.handshake(type_, ec);\n      if (want != engine::want_input_and_retry\n          || bytes_transferred == total_buffer_size_)\n        return want;\n\n      // Find the next buffer piece to be fed to the engine.\n      while (iter != end)\n      {\n        const_buffer buffer(*iter);\n\n        // Skip over any buffers which have already been consumed by the engine.\n        if (bytes_transferred >= accumulated_size + buffer.size())\n        {\n          accumulated_size += buffer.size();\n          ++iter;\n          continue;\n        }\n\n        // The current buffer may have been partially consumed by the engine on\n        // a previous iteration. If so, adjust the buffer to point to the\n        // unused portion.\n        if (bytes_transferred > accumulated_size)\n          buffer = buffer + (bytes_transferred - accumulated_size);\n\n        // Pass the buffer to the engine, and update the bytes transferred to\n        // reflect the total number of bytes consumed so far.\n        bytes_transferred += buffer.size();\n        buffer = eng.put_input(buffer);\n        bytes_transferred -= buffer.size();\n        break;\n      }\n    }\n  }\n\n  stream_base::handshake_type type_;\n  ConstBufferSequence buffers_;\n  std::size_t total_buffer_size_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/engine.hpp",
    "content": "//\n// ssl/detail/engine.hpp\n// ~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_ENGINE_HPP\n#define ASIO_SSL_DETAIL_ENGINE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/buffer.hpp\"\n#include \"asio/detail/static_mutex.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n#include \"asio/ssl/detail/verify_callback.hpp\"\n#include \"asio/ssl/stream_base.hpp\"\n#include \"asio/ssl/verify_mode.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass engine\n{\npublic:\n  enum want\n  {\n    // Returned by functions to indicate that the engine wants input. The input\n    // buffer should be updated to point to the data. The engine then needs to\n    // be called again to retry the operation.\n    want_input_and_retry = -2,\n\n    // Returned by functions to indicate that the engine wants to write output.\n    // The output buffer points to the data to be written. The engine then\n    // needs to be called again to retry the operation.\n    want_output_and_retry = -1,\n\n    // Returned by functions to indicate that the engine doesn't need input or\n    // output.\n    want_nothing = 0,\n\n    // Returned by functions to indicate that the engine wants to write output.\n    // The output buffer points to the data to be written. After that the\n    // operation is complete, and the engine does not need to be called again.\n    want_output = 1\n  };\n\n  // Construct a new engine for the specified context.\n  ASIO_DECL explicit engine(SSL_CTX* context);\n\n  // Destructor.\n  ASIO_DECL ~engine();\n\n  // Get the underlying implementation in the native type.\n  ASIO_DECL SSL* native_handle();\n\n  // Set the peer verification mode.\n  ASIO_DECL asio::error_code set_verify_mode(\n      verify_mode v, asio::error_code& ec);\n\n  // Set the peer verification depth.\n  ASIO_DECL asio::error_code set_verify_depth(\n      int depth, asio::error_code& ec);\n\n  // Set a peer certificate verification callback.\n  ASIO_DECL asio::error_code set_verify_callback(\n      verify_callback_base* callback, asio::error_code& ec);\n\n  // Perform an SSL handshake using either SSL_connect (client-side) or\n  // SSL_accept (server-side).\n  ASIO_DECL want handshake(\n      stream_base::handshake_type type, asio::error_code& ec);\n\n  // Perform a graceful shutdown of the SSL session.\n  ASIO_DECL want shutdown(asio::error_code& ec);\n\n  // Write bytes to the SSL session.\n  ASIO_DECL want write(const asio::const_buffer& data,\n      asio::error_code& ec, std::size_t& bytes_transferred);\n\n  // Read bytes from the SSL session.\n  ASIO_DECL want read(const asio::mutable_buffer& data,\n      asio::error_code& ec, std::size_t& bytes_transferred);\n\n  // Get output data to be written to the transport.\n  ASIO_DECL asio::mutable_buffer get_output(\n      const asio::mutable_buffer& data);\n\n  // Put input data that was read from the transport.\n  ASIO_DECL asio::const_buffer put_input(\n      const asio::const_buffer& data);\n\n  // Map an error::eof code returned by the underlying transport according to\n  // the type and state of the SSL session. Returns a const reference to the\n  // error code object, suitable for passing to a completion handler.\n  ASIO_DECL const asio::error_code& map_error_code(\n      asio::error_code& ec) const;\n\nprivate:\n  // Disallow copying and assignment.\n  engine(const engine&);\n  engine& operator=(const engine&);\n\n  // Callback used when the SSL implementation wants to verify a certificate.\n  ASIO_DECL static int verify_callback_function(\n      int preverified, X509_STORE_CTX* ctx);\n\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n  // The SSL_accept function may not be thread safe. This mutex is used to\n  // protect all calls to the SSL_accept function.\n  ASIO_DECL static asio::detail::static_mutex& accept_mutex();\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n\n  // Perform one operation. Returns >= 0 on success or error, want_read if the\n  // operation needs more input, or want_write if it needs to write some output\n  // before the operation can complete.\n  ASIO_DECL want perform(int (engine::* op)(void*, std::size_t),\n      void* data, std::size_t length, asio::error_code& ec,\n      std::size_t* bytes_transferred);\n\n  // Adapt the SSL_accept function to the signature needed for perform().\n  ASIO_DECL int do_accept(void*, std::size_t);\n\n  // Adapt the SSL_connect function to the signature needed for perform().\n  ASIO_DECL int do_connect(void*, std::size_t);\n\n  // Adapt the SSL_shutdown function to the signature needed for perform().\n  ASIO_DECL int do_shutdown(void*, std::size_t);\n\n  // Adapt the SSL_read function to the signature needed for perform().\n  ASIO_DECL int do_read(void* data, std::size_t length);\n\n  // Adapt the SSL_write function to the signature needed for perform().\n  ASIO_DECL int do_write(void* data, std::size_t length);\n\n  SSL* ssl_;\n  BIO* ext_bio_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ssl/detail/impl/engine.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SSL_DETAIL_ENGINE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/handshake_op.hpp",
    "content": "//\n// ssl/detail/handshake_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP\n#define ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/ssl/detail/engine.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass handshake_op\n{\npublic:\n  handshake_op(stream_base::handshake_type type)\n    : type_(type)\n  {\n  }\n\n  engine::want operator()(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred) const\n  {\n    bytes_transferred = 0;\n    return eng.handshake(type_, ec);\n  }\n\n  template <typename Handler>\n  void call_handler(Handler& handler,\n      const asio::error_code& ec,\n      const std::size_t&) const\n  {\n    handler(ec);\n  }\n\nprivate:\n  stream_base::handshake_type type_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/impl/engine.ipp",
    "content": "//\n// ssl/detail/impl/engine.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_IMPL_ENGINE_IPP\n#define ASIO_SSL_DETAIL_IMPL_ENGINE_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ssl/detail/engine.hpp\"\n#include \"asio/ssl/error.hpp\"\n#include \"asio/ssl/verify_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nengine::engine(SSL_CTX* context)\n  : ssl_(::SSL_new(context))\n{\n  if (!ssl_)\n  {\n    asio::error_code ec(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    asio::detail::throw_error(ec, \"engine\");\n  }\n\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n  accept_mutex().init();\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n\n  ::SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE);\n  ::SSL_set_mode(ssl_, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);\n#if defined(SSL_MODE_RELEASE_BUFFERS)\n  ::SSL_set_mode(ssl_, SSL_MODE_RELEASE_BUFFERS);\n#endif // defined(SSL_MODE_RELEASE_BUFFERS)\n\n  ::BIO* int_bio = 0;\n  ::BIO_new_bio_pair(&int_bio, 0, &ext_bio_, 0);\n  ::SSL_set_bio(ssl_, int_bio, int_bio);\n}\n\nengine::~engine()\n{\n  if (SSL_get_app_data(ssl_))\n  {\n    delete static_cast<verify_callback_base*>(SSL_get_app_data(ssl_));\n    SSL_set_app_data(ssl_, 0);\n  }\n\n  ::BIO_free(ext_bio_);\n  ::SSL_free(ssl_);\n}\n\nSSL* engine::native_handle()\n{\n  return ssl_;\n}\n\nasio::error_code engine::set_verify_mode(\n    verify_mode v, asio::error_code& ec)\n{\n  ::SSL_set_verify(ssl_, v, ::SSL_get_verify_callback(ssl_));\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code engine::set_verify_depth(\n    int depth, asio::error_code& ec)\n{\n  ::SSL_set_verify_depth(ssl_, depth);\n\n  ec = asio::error_code();\n  return ec;\n}\n\nasio::error_code engine::set_verify_callback(\n    verify_callback_base* callback, asio::error_code& ec)\n{\n  if (SSL_get_app_data(ssl_))\n    delete static_cast<verify_callback_base*>(SSL_get_app_data(ssl_));\n\n  SSL_set_app_data(ssl_, callback);\n\n  ::SSL_set_verify(ssl_, ::SSL_get_verify_mode(ssl_),\n      &engine::verify_callback_function);\n\n  ec = asio::error_code();\n  return ec;\n}\n\nint engine::verify_callback_function(int preverified, X509_STORE_CTX* ctx)\n{\n  if (ctx)\n  {\n    if (SSL* ssl = static_cast<SSL*>(\n          ::X509_STORE_CTX_get_ex_data(\n            ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx())))\n    {\n      if (SSL_get_app_data(ssl))\n      {\n        verify_callback_base* callback =\n          static_cast<verify_callback_base*>(\n              SSL_get_app_data(ssl));\n\n        verify_context verify_ctx(ctx);\n        return callback->call(preverified != 0, verify_ctx) ? 1 : 0;\n      }\n    }\n  }\n\n  return 0;\n}\n\nengine::want engine::handshake(\n    stream_base::handshake_type type, asio::error_code& ec)\n{\n  return perform((type == asio::ssl::stream_base::client)\n      ? &engine::do_connect : &engine::do_accept, 0, 0, ec, 0);\n}\n\nengine::want engine::shutdown(asio::error_code& ec)\n{\n  return perform(&engine::do_shutdown, 0, 0, ec, 0);\n}\n\nengine::want engine::write(const asio::const_buffer& data,\n    asio::error_code& ec, std::size_t& bytes_transferred)\n{\n  if (data.size() == 0)\n  {\n    ec = asio::error_code();\n    return engine::want_nothing;\n  }\n\n  return perform(&engine::do_write,\n      const_cast<void*>(data.data()),\n      data.size(), ec, &bytes_transferred);\n}\n\nengine::want engine::read(const asio::mutable_buffer& data,\n    asio::error_code& ec, std::size_t& bytes_transferred)\n{\n  if (data.size() == 0)\n  {\n    ec = asio::error_code();\n    return engine::want_nothing;\n  }\n\n  return perform(&engine::do_read, data.data(),\n      data.size(), ec, &bytes_transferred);\n}\n\nasio::mutable_buffer engine::get_output(\n    const asio::mutable_buffer& data)\n{\n  int length = ::BIO_read(ext_bio_,\n      data.data(), static_cast<int>(data.size()));\n\n  return asio::buffer(data,\n      length > 0 ? static_cast<std::size_t>(length) : 0);\n}\n\nasio::const_buffer engine::put_input(\n    const asio::const_buffer& data)\n{\n  int length = ::BIO_write(ext_bio_,\n      data.data(), static_cast<int>(data.size()));\n\n  return asio::buffer(data +\n      (length > 0 ? static_cast<std::size_t>(length) : 0));\n}\n\nconst asio::error_code& engine::map_error_code(\n    asio::error_code& ec) const\n{\n  // We only want to map the error::eof code.\n  if (ec != asio::error::eof)\n    return ec;\n\n  // If there's data yet to be read, it's an error.\n  if (BIO_wpending(ext_bio_))\n  {\n    ec = asio::ssl::error::stream_truncated;\n    return ec;\n  }\n\n  // SSL v2 doesn't provide a protocol-level shutdown, so an eof on the\n  // underlying transport is passed through.\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L)\n  if (SSL_version(ssl_) == SSL2_VERSION)\n    return ec;\n#endif // (OPENSSL_VERSION_NUMBER < 0x10100000L)\n\n  // Otherwise, the peer should have negotiated a proper shutdown.\n  if ((::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) == 0)\n  {\n    ec = asio::ssl::error::stream_truncated;\n  }\n\n  return ec;\n}\n\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\nasio::detail::static_mutex& engine::accept_mutex()\n{\n  static asio::detail::static_mutex mutex = ASIO_STATIC_MUTEX_INIT;\n  return mutex;\n}\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n\nengine::want engine::perform(int (engine::* op)(void*, std::size_t),\n    void* data, std::size_t length, asio::error_code& ec,\n    std::size_t* bytes_transferred)\n{\n  std::size_t pending_output_before = ::BIO_ctrl_pending(ext_bio_);\n  ::ERR_clear_error();\n  int result = (this->*op)(data, length);\n  int ssl_error = ::SSL_get_error(ssl_, result);\n  int sys_error = static_cast<int>(::ERR_get_error());\n  std::size_t pending_output_after = ::BIO_ctrl_pending(ext_bio_);\n\n  if (ssl_error == SSL_ERROR_SSL)\n  {\n    ec = asio::error_code(sys_error,\n        asio::error::get_ssl_category());\n    return pending_output_after > pending_output_before\n      ? want_output : want_nothing;\n  }\n\n  if (ssl_error == SSL_ERROR_SYSCALL)\n  {\n    if (sys_error == 0)\n    {\n      ec = asio::ssl::error::unspecified_system_error;\n    }\n    else\n    {\n      ec = asio::error_code(sys_error,\n          asio::error::get_ssl_category());\n    }\n    return pending_output_after > pending_output_before\n      ? want_output : want_nothing;\n  }\n\n  if (result > 0 && bytes_transferred)\n    *bytes_transferred = static_cast<std::size_t>(result);\n\n  if (ssl_error == SSL_ERROR_WANT_WRITE)\n  {\n    ec = asio::error_code();\n    return want_output_and_retry;\n  }\n  else if (pending_output_after > pending_output_before)\n  {\n    ec = asio::error_code();\n    return result > 0 ? want_output : want_output_and_retry;\n  }\n  else if (ssl_error == SSL_ERROR_WANT_READ)\n  {\n    ec = asio::error_code();\n    return want_input_and_retry;\n  }\n  else if (ssl_error == SSL_ERROR_ZERO_RETURN)\n  {\n    ec = asio::error::eof;\n    return want_nothing;\n  }\n  else if (ssl_error == SSL_ERROR_NONE)\n  {\n    ec = asio::error_code();\n    return want_nothing;\n  }\n  else\n  {\n    ec = asio::ssl::error::unexpected_result;\n    return want_nothing;\n  }\n}\n\nint engine::do_accept(void*, std::size_t)\n{\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n  asio::detail::static_mutex::scoped_lock lock(accept_mutex());\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n  return ::SSL_accept(ssl_);\n}\n\nint engine::do_connect(void*, std::size_t)\n{\n  return ::SSL_connect(ssl_);\n}\n\nint engine::do_shutdown(void*, std::size_t)\n{\n  int result = ::SSL_shutdown(ssl_);\n  if (result == 0)\n    result = ::SSL_shutdown(ssl_);\n  return result;\n}\n\nint engine::do_read(void* data, std::size_t length)\n{\n  return ::SSL_read(ssl_, data,\n      length < INT_MAX ? static_cast<int>(length) : INT_MAX);\n}\n\nint engine::do_write(void* data, std::size_t length)\n{\n  return ::SSL_write(ssl_, data,\n      length < INT_MAX ? static_cast<int>(length) : INT_MAX);\n}\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_IMPL_ENGINE_IPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/impl/openssl_init.ipp",
    "content": "//\n// ssl/detail/impl/openssl_init.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com\n// Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP\n#define ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <vector>\n#include \"asio/detail/assert.hpp\"\n#include \"asio/detail/mutex.hpp\"\n#include \"asio/detail/tss_ptr.hpp\"\n#include \"asio/ssl/detail/openssl_init.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass openssl_init_base::do_init\n{\npublic:\n  do_init()\n  {\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L)\n    ::SSL_library_init();\n    ::SSL_load_error_strings();        \n    ::OpenSSL_add_all_algorithms();\n\n    mutexes_.resize(::CRYPTO_num_locks());\n    for (size_t i = 0; i < mutexes_.size(); ++i)\n      mutexes_[i].reset(new asio::detail::mutex);\n    ::CRYPTO_set_locking_callback(&do_init::openssl_locking_func);\n#endif // (OPENSSL_VERSION_NUMBER < 0x10100000L)\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n    ::CRYPTO_set_id_callback(&do_init::openssl_id_func);\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    null_compression_methods_ = sk_SSL_COMP_new_null();\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n  }\n\n  ~do_init()\n  {\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    sk_SSL_COMP_free(null_compression_methods_);\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n    ::CRYPTO_set_id_callback(0);\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L)\n    ::CRYPTO_set_locking_callback(0);\n    ::ERR_free_strings();\n    ::EVP_cleanup();\n    ::CRYPTO_cleanup_all_ex_data();\n#endif // (OPENSSL_VERSION_NUMBER < 0x10100000L)\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n    ::ERR_remove_state(0);\n#elif (OPENSSL_VERSION_NUMBER < 0x10100000L)\n    ::ERR_remove_thread_state(NULL);\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n#if (OPENSSL_VERSION_NUMBER >= 0x10002000L) \\\n    && (OPENSSL_VERSION_NUMBER < 0x10100000L) \\\n    && !defined(SSL_OP_NO_COMPRESSION)\n    ::SSL_COMP_free_compression_methods();\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L)\n       // && (OPENSSL_VERSION_NUMBER < 0x10100000L)\n       // && !defined(SSL_OP_NO_COMPRESSION)\n#if !defined(OPENSSL_IS_BORINGSSL) && !defined(ASIO_USE_WOLFSSL)\n    ::CONF_modules_unload(1);\n#endif // !defined(OPENSSL_IS_BORINGSSL) && !defined(ASIO_USE_WOLFSSL)\n#if !defined(OPENSSL_NO_ENGINE) \\\n  && (OPENSSL_VERSION_NUMBER < 0x10100000L)\n    ::ENGINE_cleanup();\n#endif // !defined(OPENSSL_NO_ENGINE)\n       // && (OPENSSL_VERSION_NUMBER < 0x10100000L)\n  }\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n  STACK_OF(SSL_COMP)* get_null_compression_methods() const\n  {\n    return null_compression_methods_;\n  }\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n\nprivate:\n#if (OPENSSL_VERSION_NUMBER < 0x10000000L)\n  static unsigned long openssl_id_func()\n  {\n#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    return ::GetCurrentThreadId();\n#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n    void* id = &errno;\n    ASIO_ASSERT(sizeof(unsigned long) >= sizeof(void*));\n    return reinterpret_cast<unsigned long>(id);\n#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)\n  }\n#endif // (OPENSSL_VERSION_NUMBER < 0x10000000L)\n\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L)\n  static void openssl_locking_func(int mode, int n, \n    const char* /*file*/, int /*line*/)\n  {\n    if (mode & CRYPTO_LOCK)\n      instance()->mutexes_[n]->lock();\n    else\n      instance()->mutexes_[n]->unlock();\n  }\n\n  // Mutexes to be used in locking callbacks.\n  std::vector<asio::detail::shared_ptr<\n        asio::detail::mutex> > mutexes_;\n#endif // (OPENSSL_VERSION_NUMBER < 0x10100000L)\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n  STACK_OF(SSL_COMP)* null_compression_methods_;\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n};\n\nasio::detail::shared_ptr<openssl_init_base::do_init>\nopenssl_init_base::instance()\n{\n  static asio::detail::shared_ptr<do_init> init(new do_init);\n  return init;\n}\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\nSTACK_OF(SSL_COMP)* openssl_init_base::get_null_compression_methods()\n{\n  return instance()->get_null_compression_methods();\n}\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/io.hpp",
    "content": "//\n// ssl/detail/io.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_IO_HPP\n#define ASIO_SSL_DETAIL_IO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/ssl/detail/engine.hpp\"\n#include \"asio/ssl/detail/stream_core.hpp\"\n#include \"asio/write.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\ntemplate <typename Stream, typename Operation>\nstd::size_t io(Stream& next_layer, stream_core& core,\n    const Operation& op, asio::error_code& ec)\n{\n  asio::error_code io_ec;\n  std::size_t bytes_transferred = 0;\n  do switch (op(core.engine_, ec, bytes_transferred))\n  {\n  case engine::want_input_and_retry:\n\n    // If the input buffer is empty then we need to read some more data from\n    // the underlying transport.\n    if (core.input_.size() == 0)\n    {\n      core.input_ = asio::buffer(core.input_buffer_,\n          next_layer.read_some(core.input_buffer_, io_ec));\n      if (!ec)\n        ec = io_ec;\n    }\n\n    // Pass the new input data to the engine.\n    core.input_ = core.engine_.put_input(core.input_);\n\n    // Try the operation again.\n    continue;\n\n  case engine::want_output_and_retry:\n\n    // Get output data from the engine and write it to the underlying\n    // transport.\n    asio::write(next_layer,\n        core.engine_.get_output(core.output_buffer_), io_ec);\n    if (!ec)\n      ec = io_ec;\n\n    // Try the operation again.\n    continue;\n\n  case engine::want_output:\n\n    // Get output data from the engine and write it to the underlying\n    // transport.\n    asio::write(next_layer,\n        core.engine_.get_output(core.output_buffer_), io_ec);\n    if (!ec)\n      ec = io_ec;\n\n    // Operation is complete. Return result to caller.\n    core.engine_.map_error_code(ec);\n    return bytes_transferred;\n\n  default:\n\n    // Operation is complete. Return result to caller.\n    core.engine_.map_error_code(ec);\n    return bytes_transferred;\n\n  } while (!ec);\n\n  // Operation failed. Return result to caller.\n  core.engine_.map_error_code(ec);\n  return 0;\n}\n\ntemplate <typename Stream, typename Operation, typename Handler>\nclass io_op\n{\npublic:\n  io_op(Stream& next_layer, stream_core& core,\n      const Operation& op, Handler& handler)\n    : next_layer_(next_layer),\n      core_(core),\n      op_(op),\n      start_(0),\n      want_(engine::want_nothing),\n      bytes_transferred_(0),\n      handler_(ASIO_MOVE_CAST(Handler)(handler))\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE)\n  io_op(const io_op& other)\n    : next_layer_(other.next_layer_),\n      core_(other.core_),\n      op_(other.op_),\n      start_(other.start_),\n      want_(other.want_),\n      ec_(other.ec_),\n      bytes_transferred_(other.bytes_transferred_),\n      handler_(other.handler_)\n  {\n  }\n\n  io_op(io_op&& other)\n    : next_layer_(other.next_layer_),\n      core_(other.core_),\n      op_(ASIO_MOVE_CAST(Operation)(other.op_)),\n      start_(other.start_),\n      want_(other.want_),\n      ec_(other.ec_),\n      bytes_transferred_(other.bytes_transferred_),\n      handler_(ASIO_MOVE_CAST(Handler)(other.handler_))\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE)\n\n  void operator()(asio::error_code ec,\n      std::size_t bytes_transferred = ~std::size_t(0), int start = 0)\n  {\n    switch (start_ = start)\n    {\n    case 1: // Called after at least one async operation.\n      do\n      {\n        switch (want_ = op_(core_.engine_, ec_, bytes_transferred_))\n        {\n        case engine::want_input_and_retry:\n\n          // If the input buffer already has data in it we can pass it to the\n          // engine and then retry the operation immediately.\n          if (core_.input_.size() != 0)\n          {\n            core_.input_ = core_.engine_.put_input(core_.input_);\n            continue;\n          }\n\n          // The engine wants more data to be read from input. However, we\n          // cannot allow more than one read operation at a time on the\n          // underlying transport. The pending_read_ timer's expiry is set to\n          // pos_infin if a read is in progress, and neg_infin otherwise.\n          if (core_.expiry(core_.pending_read_) == core_.neg_infin())\n          {\n            // Prevent other read operations from being started.\n            core_.pending_read_.expires_at(core_.pos_infin());\n\n            // Start reading some data from the underlying transport.\n            next_layer_.async_read_some(\n                asio::buffer(core_.input_buffer_),\n                ASIO_MOVE_CAST(io_op)(*this));\n          }\n          else\n          {\n            // Wait until the current read operation completes.\n            core_.pending_read_.async_wait(ASIO_MOVE_CAST(io_op)(*this));\n          }\n\n          // Yield control until asynchronous operation completes. Control\n          // resumes at the \"default:\" label below.\n          return;\n\n        case engine::want_output_and_retry:\n        case engine::want_output:\n\n          // The engine wants some data to be written to the output. However, we\n          // cannot allow more than one write operation at a time on the\n          // underlying transport. The pending_write_ timer's expiry is set to\n          // pos_infin if a write is in progress, and neg_infin otherwise.\n          if (core_.expiry(core_.pending_write_) == core_.neg_infin())\n          {\n            // Prevent other write operations from being started.\n            core_.pending_write_.expires_at(core_.pos_infin());\n\n            // Start writing all the data to the underlying transport.\n            asio::async_write(next_layer_,\n                core_.engine_.get_output(core_.output_buffer_),\n                ASIO_MOVE_CAST(io_op)(*this));\n          }\n          else\n          {\n            // Wait until the current write operation completes.\n            core_.pending_write_.async_wait(ASIO_MOVE_CAST(io_op)(*this));\n          }\n\n          // Yield control until asynchronous operation completes. Control\n          // resumes at the \"default:\" label below.\n          return;\n\n        default:\n\n          // The SSL operation is done and we can invoke the handler, but we\n          // have to keep in mind that this function might be being called from\n          // the async operation's initiating function. In this case we're not\n          // allowed to call the handler directly. Instead, issue a zero-sized\n          // read so the handler runs \"as-if\" posted using io_context::post().\n          if (start)\n          {\n            next_layer_.async_read_some(\n                asio::buffer(core_.input_buffer_, 0),\n                ASIO_MOVE_CAST(io_op)(*this));\n\n            // Yield control until asynchronous operation completes. Control\n            // resumes at the \"default:\" label below.\n            return;\n          }\n          else\n          {\n            // Continue on to run handler directly.\n            break;\n          }\n        }\n\n        default:\n        if (bytes_transferred == ~std::size_t(0))\n          bytes_transferred = 0; // Timer cancellation, no data transferred.\n        else if (!ec_)\n          ec_ = ec;\n\n        switch (want_)\n        {\n        case engine::want_input_and_retry:\n\n          // Add received data to the engine's input.\n          core_.input_ = asio::buffer(\n              core_.input_buffer_, bytes_transferred);\n          core_.input_ = core_.engine_.put_input(core_.input_);\n\n          // Release any waiting read operations.\n          core_.pending_read_.expires_at(core_.neg_infin());\n\n          // Try the operation again.\n          continue;\n\n        case engine::want_output_and_retry:\n\n          // Release any waiting write operations.\n          core_.pending_write_.expires_at(core_.neg_infin());\n\n          // Try the operation again.\n          continue;\n\n        case engine::want_output:\n\n          // Release any waiting write operations.\n          core_.pending_write_.expires_at(core_.neg_infin());\n\n          // Fall through to call handler.\n\n        default:\n\n          // Pass the result to the handler.\n          op_.call_handler(handler_,\n              core_.engine_.map_error_code(ec_),\n              ec_ ? 0 : bytes_transferred_);\n\n          // Our work here is done.\n          return;\n        }\n      } while (!ec_);\n\n      // Operation failed. Pass the result to the handler.\n      op_.call_handler(handler_, core_.engine_.map_error_code(ec_), 0);\n    }\n  }\n\n//private:\n  Stream& next_layer_;\n  stream_core& core_;\n  Operation op_;\n  int start_;\n  engine::want want_;\n  asio::error_code ec_;\n  std::size_t bytes_transferred_;\n  Handler handler_;\n};\n\ntemplate <typename Stream, typename Operation, typename Handler>\ninline void* asio_handler_allocate(std::size_t size,\n    io_op<Stream, Operation, Handler>* this_handler)\n{\n  return asio_handler_alloc_helpers::allocate(\n      size, this_handler->handler_);\n}\n\ntemplate <typename Stream, typename Operation, typename Handler>\ninline void asio_handler_deallocate(void* pointer, std::size_t size,\n    io_op<Stream, Operation, Handler>* this_handler)\n{\n  asio_handler_alloc_helpers::deallocate(\n      pointer, size, this_handler->handler_);\n}\n\ntemplate <typename Stream, typename Operation, typename Handler>\ninline bool asio_handler_is_continuation(\n    io_op<Stream, Operation, Handler>* this_handler)\n{\n  return this_handler->start_ == 0 ? true\n    : asio_handler_cont_helpers::is_continuation(this_handler->handler_);\n}\n\ntemplate <typename Function, typename Stream,\n    typename Operation, typename Handler>\ninline void asio_handler_invoke(Function& function,\n    io_op<Stream, Operation, Handler>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Function, typename Stream,\n    typename Operation, typename Handler>\ninline void asio_handler_invoke(const Function& function,\n    io_op<Stream, Operation, Handler>* this_handler)\n{\n  asio_handler_invoke_helpers::invoke(\n      function, this_handler->handler_);\n}\n\ntemplate <typename Stream, typename Operation, typename Handler>\ninline void async_io(Stream& next_layer, stream_core& core,\n    const Operation& op, Handler& handler)\n{\n  io_op<Stream, Operation, Handler>(\n    next_layer, core, op, handler)(\n      asio::error_code(), 0, 1);\n}\n\n} // namespace detail\n} // namespace ssl\n\ntemplate <typename Stream, typename Operation,\n    typename Handler, typename Allocator>\nstruct associated_allocator<\n    ssl::detail::io_op<Stream, Operation, Handler>, Allocator>\n{\n  typedef typename associated_allocator<Handler, Allocator>::type type;\n\n  static type get(const ssl::detail::io_op<Stream, Operation, Handler>& h,\n      const Allocator& a = Allocator()) ASIO_NOEXCEPT\n  {\n    return associated_allocator<Handler, Allocator>::get(h.handler_, a);\n  }\n};\n\ntemplate <typename Stream, typename Operation,\n    typename Handler, typename Executor>\nstruct associated_executor<\n    ssl::detail::io_op<Stream, Operation, Handler>, Executor>\n{\n  typedef typename associated_executor<Handler, Executor>::type type;\n\n  static type get(const ssl::detail::io_op<Stream, Operation, Handler>& h,\n      const Executor& ex = Executor()) ASIO_NOEXCEPT\n  {\n    return associated_executor<Handler, Executor>::get(h.handler_, ex);\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_IO_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/openssl_init.hpp",
    "content": "//\n// ssl/detail/openssl_init.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_OPENSSL_INIT_HPP\n#define ASIO_SSL_DETAIL_OPENSSL_INIT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstring>\n#include \"asio/detail/memory.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass openssl_init_base\n  : private noncopyable\n{\nprotected:\n  // Class that performs the actual initialisation.\n  class do_init;\n\n  // Helper function to manage a do_init singleton. The static instance of the\n  // openssl_init object ensures that this function is always called before\n  // main, and therefore before any other threads can get started. The do_init\n  // instance must be static in this function to ensure that it gets\n  // initialised before any other global objects try to use it.\n  ASIO_DECL static asio::detail::shared_ptr<do_init> instance();\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n  // Get an empty stack of compression methods, to be used when disabling\n  // compression.\n  ASIO_DECL static STACK_OF(SSL_COMP)* get_null_compression_methods();\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n};\n\ntemplate <bool Do_Init = true>\nclass openssl_init : private openssl_init_base\n{\npublic:\n  // Constructor.\n  openssl_init()\n    : ref_(instance())\n  {\n    using namespace std; // For memmove.\n\n    // Ensure openssl_init::instance_ is linked in.\n    openssl_init* tmp = &instance_;\n    memmove(&tmp, &tmp, sizeof(openssl_init*));\n  }\n\n  // Destructor.\n  ~openssl_init()\n  {\n  }\n\n#if !defined(SSL_OP_NO_COMPRESSION) \\\n  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n  using openssl_init_base::get_null_compression_methods;\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n       // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n\nprivate:\n  // Instance to force initialisation of openssl at global scope.\n  static openssl_init instance_;\n\n  // Reference to singleton do_init object to ensure that openssl does not get\n  // cleaned up until the last user has finished with it.\n  asio::detail::shared_ptr<do_init> ref_;\n};\n\ntemplate <bool Do_Init>\nopenssl_init<Do_Init> openssl_init<Do_Init>::instance_;\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ssl/detail/impl/openssl_init.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SSL_DETAIL_OPENSSL_INIT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/openssl_types.hpp",
    "content": "//\n// ssl/detail/openssl_types.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP\n#define ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/socket_types.hpp\"\n#if defined(ASIO_USE_WOLFSSL)\n# include <wolfssl/options.h>\n#endif // defined(ASIO_USE_WOLFSSL)\n#include <openssl/conf.h>\n#include <openssl/ssl.h>\n#if !defined(OPENSSL_NO_ENGINE)\n# include <openssl/engine.h>\n#endif // !defined(OPENSSL_NO_ENGINE)\n#include <openssl/dh.h>\n#include <openssl/err.h>\n#include <openssl/rsa.h>\n#include <openssl/x509v3.h>\n\n#endif // ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/password_callback.hpp",
    "content": "//\n// ssl/detail/password_callback.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP\n#define ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstddef>\n#include <string>\n#include \"asio/ssl/context_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass password_callback_base\n{\npublic:\n  virtual ~password_callback_base()\n  {\n  }\n\n  virtual std::string call(std::size_t size,\n      context_base::password_purpose purpose) = 0;\n};\n\ntemplate <typename PasswordCallback>\nclass password_callback : public password_callback_base\n{\npublic:\n  explicit password_callback(PasswordCallback callback)\n    : callback_(callback)\n  {\n  }\n\n  virtual std::string call(std::size_t size,\n      context_base::password_purpose purpose)\n  {\n    return callback_(size, purpose);\n  }\n\nprivate:\n  PasswordCallback callback_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/read_op.hpp",
    "content": "//\n// ssl/detail/read_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_READ_OP_HPP\n#define ASIO_SSL_DETAIL_READ_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/ssl/detail/engine.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\ntemplate <typename MutableBufferSequence>\nclass read_op\n{\npublic:\n  read_op(const MutableBufferSequence& buffers)\n    : buffers_(buffers)\n  {\n  }\n\n  engine::want operator()(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred) const\n  {\n    asio::mutable_buffer buffer =\n      asio::detail::buffer_sequence_adapter<asio::mutable_buffer,\n        MutableBufferSequence>::first(buffers_);\n\n    return eng.read(buffer, ec, bytes_transferred);\n  }\n\n  template <typename Handler>\n  void call_handler(Handler& handler,\n      const asio::error_code& ec,\n      const std::size_t& bytes_transferred) const\n  {\n    handler(ec, bytes_transferred);\n  }\n\nprivate:\n  MutableBufferSequence buffers_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_READ_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/shutdown_op.hpp",
    "content": "//\n// ssl/detail/shutdown_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP\n#define ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/ssl/detail/engine.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass shutdown_op\n{\npublic:\n  engine::want operator()(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred) const\n  {\n    bytes_transferred = 0;\n    return eng.shutdown(ec);\n  }\n\n  template <typename Handler>\n  void call_handler(Handler& handler,\n      const asio::error_code& ec,\n      const std::size_t&) const\n  {\n    if (ec == asio::error::eof)\n    {\n      // The engine only generates an eof when the shutdown notification has\n      // been received from the peer. This indicates that the shutdown has\n      // completed successfully, and thus need not be passed on to the handler.\n      handler(asio::error_code());\n    }\n    else\n    {\n      handler(ec);\n    }\n  }\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/stream_core.hpp",
    "content": "//\n// ssl/detail/stream_core.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_STREAM_CORE_HPP\n#define ASIO_SSL_DETAIL_STREAM_CORE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n# include \"asio/deadline_timer.hpp\"\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n# include \"asio/steady_timer.hpp\"\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n#include \"asio/ssl/detail/engine.hpp\"\n#include \"asio/buffer.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nstruct stream_core\n{\n  // According to the OpenSSL documentation, this is the buffer size that is\n  // sufficient to hold the largest possible TLS record.\n  enum { max_tls_record_size = 17 * 1024 };\n\n  template <typename Executor>\n  stream_core(SSL_CTX* context, const Executor& ex)\n    : engine_(context),\n      pending_read_(ex),\n      pending_write_(ex),\n      output_buffer_space_(max_tls_record_size),\n      output_buffer_(asio::buffer(output_buffer_space_)),\n      input_buffer_space_(max_tls_record_size),\n      input_buffer_(asio::buffer(input_buffer_space_))\n  {\n    pending_read_.expires_at(neg_infin());\n    pending_write_.expires_at(neg_infin());\n  }\n\n  ~stream_core()\n  {\n  }\n\n  // The SSL engine.\n  engine engine_;\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n  // Timer used for storing queued read operations.\n  asio::deadline_timer pending_read_;\n\n  // Timer used for storing queued write operations.\n  asio::deadline_timer pending_write_;\n\n  // Helper function for obtaining a time value that always fires.\n  static asio::deadline_timer::time_type neg_infin()\n  {\n    return boost::posix_time::neg_infin;\n  }\n\n  // Helper function for obtaining a time value that never fires.\n  static asio::deadline_timer::time_type pos_infin()\n  {\n    return boost::posix_time::pos_infin;\n  }\n\n  // Helper function to get a timer's expiry time.\n  static asio::deadline_timer::time_type expiry(\n      const asio::deadline_timer& timer)\n  {\n    return timer.expires_at();\n  }\n#else // defined(ASIO_HAS_BOOST_DATE_TIME)\n  // Timer used for storing queued read operations.\n  asio::steady_timer pending_read_;\n\n  // Timer used for storing queued write operations.\n  asio::steady_timer pending_write_;\n\n  // Helper function for obtaining a time value that always fires.\n  static asio::steady_timer::time_point neg_infin()\n  {\n    return (asio::steady_timer::time_point::min)();\n  }\n\n  // Helper function for obtaining a time value that never fires.\n  static asio::steady_timer::time_point pos_infin()\n  {\n    return (asio::steady_timer::time_point::max)();\n  }\n\n  // Helper function to get a timer's expiry time.\n  static asio::steady_timer::time_point expiry(\n      const asio::steady_timer& timer)\n  {\n    return timer.expiry();\n  }\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n  // Buffer space used to prepare output intended for the transport.\n  std::vector<unsigned char> output_buffer_space_;\n\n  // A buffer that may be used to prepare output intended for the transport.\n  const asio::mutable_buffer output_buffer_;\n\n  // Buffer space used to read input intended for the engine.\n  std::vector<unsigned char> input_buffer_space_;\n\n  // A buffer that may be used to read input intended for the engine.\n  const asio::mutable_buffer input_buffer_;\n\n  // The buffer pointing to the engine's unconsumed input.\n  asio::const_buffer input_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_STREAM_CORE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/verify_callback.hpp",
    "content": "//\n// ssl/detail/verify_callback.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP\n#define ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/ssl/verify_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\nclass verify_callback_base\n{\npublic:\n  virtual ~verify_callback_base()\n  {\n  }\n\n  virtual bool call(bool preverified, verify_context& ctx) = 0;\n};\n\ntemplate <typename VerifyCallback>\nclass verify_callback : public verify_callback_base\n{\npublic:\n  explicit verify_callback(VerifyCallback callback)\n    : callback_(callback)\n  {\n  }\n\n  virtual bool call(bool preverified, verify_context& ctx)\n  {\n    return callback_(preverified, ctx);\n  }\n\nprivate:\n  VerifyCallback callback_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/detail/write_op.hpp",
    "content": "//\n// ssl/detail/write_op.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_DETAIL_WRITE_OP_HPP\n#define ASIO_SSL_DETAIL_WRITE_OP_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/ssl/detail/engine.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\nnamespace detail {\n\ntemplate <typename ConstBufferSequence>\nclass write_op\n{\npublic:\n  write_op(const ConstBufferSequence& buffers)\n    : buffers_(buffers)\n  {\n  }\n\n  engine::want operator()(engine& eng,\n      asio::error_code& ec,\n      std::size_t& bytes_transferred) const\n  {\n    asio::const_buffer buffer =\n      asio::detail::buffer_sequence_adapter<asio::const_buffer,\n        ConstBufferSequence>::first(buffers_);\n\n    return eng.write(buffer, ec, bytes_transferred);\n  }\n\n  template <typename Handler>\n  void call_handler(Handler& handler,\n      const asio::error_code& ec,\n      const std::size_t& bytes_transferred) const\n  {\n    handler(ec, bytes_transferred);\n  }\n\nprivate:\n  ConstBufferSequence buffers_;\n};\n\n} // namespace detail\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_DETAIL_WRITE_OP_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/error.hpp",
    "content": "//\n// ssl/error.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_ERROR_HPP\n#define ASIO_SSL_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace error {\n\nenum ssl_errors\n{\n  // Error numbers are those produced by openssl.\n};\n\nextern ASIO_DECL\nconst asio::error_category& get_ssl_category();\n\nstatic const asio::error_category&\n  ssl_category ASIO_UNUSED_VARIABLE\n  = asio::error::get_ssl_category();\n\n} // namespace error\nnamespace ssl {\nnamespace error {\n\nenum stream_errors\n{\n#if defined(GENERATING_DOCUMENTATION)\n  /// The underlying stream closed before the ssl stream gracefully shut down.\n  stream_truncated,\n\n  /// The underlying SSL library returned a system error without providing\n  /// further information.\n  unspecified_system_error,\n\n  /// The underlying SSL library generated an unexpected result from a function\n  /// call.\n  unexpected_result\n#else // defined(GENERATING_DOCUMENTATION)\n# if (OPENSSL_VERSION_NUMBER < 0x10100000L) \\\n    && !defined(OPENSSL_IS_BORINGSSL) \\\n    && !defined(ASIO_USE_WOLFSSL)\n  stream_truncated = ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ),\n# else\n  stream_truncated = 1,\n# endif\n  unspecified_system_error = 2,\n  unexpected_result = 3\n#endif // defined(GENERATING_DOCUMENTATION)\n};\n\nextern ASIO_DECL\nconst asio::error_category& get_stream_category();\n\nstatic const asio::error_category&\n  stream_category ASIO_UNUSED_VARIABLE\n  = asio::ssl::error::get_stream_category();\n\n} // namespace error\n} // namespace ssl\n} // namespace asio\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\nnamespace std {\n\ntemplate<> struct is_error_code_enum<asio::error::ssl_errors>\n{\n  static const bool value = true;\n};\n\ntemplate<> struct is_error_code_enum<asio::ssl::error::stream_errors>\n{\n  static const bool value = true;\n};\n\n} // namespace std\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\nnamespace asio {\nnamespace error {\n\ninline asio::error_code make_error_code(ssl_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_ssl_category());\n}\n\n} // namespace error\nnamespace ssl {\nnamespace error {\n\ninline asio::error_code make_error_code(stream_errors e)\n{\n  return asio::error_code(\n      static_cast<int>(e), get_stream_category());\n}\n\n} // namespace error\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ssl/impl/error.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SSL_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/impl/context.hpp",
    "content": "//\n// ssl/impl/context.hpp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com\n// Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_IMPL_CONTEXT_HPP\n#define ASIO_SSL_IMPL_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/throw_error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\ntemplate <typename VerifyCallback>\nvoid context::set_verify_callback(VerifyCallback callback)\n{\n  asio::error_code ec;\n  this->set_verify_callback(callback, ec);\n  asio::detail::throw_error(ec, \"set_verify_callback\");\n}\n\ntemplate <typename VerifyCallback>\nASIO_SYNC_OP_VOID context::set_verify_callback(\n    VerifyCallback callback, asio::error_code& ec)\n{\n  do_set_verify_callback(\n      new detail::verify_callback<VerifyCallback>(callback), ec);\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\ntemplate <typename PasswordCallback>\nvoid context::set_password_callback(PasswordCallback callback)\n{\n  asio::error_code ec;\n  this->set_password_callback(callback, ec);\n  asio::detail::throw_error(ec, \"set_password_callback\");\n}\n\ntemplate <typename PasswordCallback>\nASIO_SYNC_OP_VOID context::set_password_callback(\n    PasswordCallback callback, asio::error_code& ec)\n{\n  do_set_password_callback(\n      new detail::password_callback<PasswordCallback>(callback), ec);\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_IMPL_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/impl/context.ipp",
    "content": "//\n// ssl/impl/context.ipp\n// ~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com\n// Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_IMPL_CONTEXT_IPP\n#define ASIO_SSL_IMPL_CONTEXT_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cstring>\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/ssl/context.hpp\"\n#include \"asio/ssl/error.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\nstruct context::bio_cleanup\n{\n  BIO* p;\n  ~bio_cleanup() { if (p) ::BIO_free(p); }\n};\n\nstruct context::x509_cleanup\n{\n  X509* p;\n  ~x509_cleanup() { if (p) ::X509_free(p); }\n};\n\nstruct context::evp_pkey_cleanup\n{\n  EVP_PKEY* p;\n  ~evp_pkey_cleanup() { if (p) ::EVP_PKEY_free(p); }\n};\n\nstruct context::rsa_cleanup\n{\n  RSA* p;\n  ~rsa_cleanup() { if (p) ::RSA_free(p); }\n};\n\nstruct context::dh_cleanup\n{\n  DH* p;\n  ~dh_cleanup() { if (p) ::DH_free(p); }\n};\n\ncontext::context(context::method m)\n  : handle_(0)\n{\n  ::ERR_clear_error();\n\n  switch (m)\n  {\n    // SSL v2.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2)\n  case context::sslv2:\n  case context::sslv2_client:\n  case context::sslv2_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2)\n  case context::sslv2:\n    handle_ = ::SSL_CTX_new(::SSLv2_method());\n    break;\n  case context::sslv2_client:\n    handle_ = ::SSL_CTX_new(::SSLv2_client_method());\n    break;\n  case context::sslv2_server:\n    handle_ = ::SSL_CTX_new(::SSLv2_server_method());\n    break;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2)\n\n    // SSL v3.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::sslv3:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION);\n    }\n    break;\n  case context::sslv3_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION);\n    }\n    break;\n  case context::sslv3_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION);\n    }\n    break;\n#elif defined(OPENSSL_NO_SSL3)\n  case context::sslv3:\n  case context::sslv3_client:\n  case context::sslv3_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#else // defined(OPENSSL_NO_SSL3)\n  case context::sslv3:\n    handle_ = ::SSL_CTX_new(::SSLv3_method());\n    break;\n  case context::sslv3_client:\n    handle_ = ::SSL_CTX_new(::SSLv3_client_method());\n    break;\n  case context::sslv3_server:\n    handle_ = ::SSL_CTX_new(::SSLv3_server_method());\n    break;\n#endif // defined(OPENSSL_NO_SSL3)\n\n    // TLS v1.0.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tlsv1:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION);\n    }\n    break;\n  case context::tlsv1_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION);\n    }\n    break;\n  case context::tlsv1_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION);\n    }\n    break;\n#elif defined(SSL_TXT_TLSV1)\n  case context::tlsv1:\n    handle_ = ::SSL_CTX_new(::TLSv1_method());\n    break;\n  case context::tlsv1_client:\n    handle_ = ::SSL_CTX_new(::TLSv1_client_method());\n    break;\n  case context::tlsv1_server:\n    handle_ = ::SSL_CTX_new(::TLSv1_server_method());\n    break;\n#else // defined(SSL_TXT_TLSV1)\n  case context::tlsv1:\n  case context::tlsv1_client:\n  case context::tlsv1_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#endif // defined(SSL_TXT_TLSV1)\n\n    // TLS v1.1.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tlsv11:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION);\n    }\n    break;\n  case context::tlsv11_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION);\n    }\n    break;\n  case context::tlsv11_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION);\n    }\n    break;\n#elif defined(SSL_TXT_TLSV1_1)\n  case context::tlsv11:\n    handle_ = ::SSL_CTX_new(::TLSv1_1_method());\n    break;\n  case context::tlsv11_client:\n    handle_ = ::SSL_CTX_new(::TLSv1_1_client_method());\n    break;\n  case context::tlsv11_server:\n    handle_ = ::SSL_CTX_new(::TLSv1_1_server_method());\n    break;\n#else // defined(SSL_TXT_TLSV1_1)\n  case context::tlsv11:\n  case context::tlsv11_client:\n  case context::tlsv11_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#endif // defined(SSL_TXT_TLSV1_1)\n\n    // TLS v1.2.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tlsv12:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION);\n    }\n    break;\n  case context::tlsv12_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION);\n    }\n    break;\n  case context::tlsv12_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION);\n    }\n    break;\n#elif defined(SSL_TXT_TLSV1_2)\n  case context::tlsv12:\n    handle_ = ::SSL_CTX_new(::TLSv1_2_method());\n    break;\n  case context::tlsv12_client:\n    handle_ = ::SSL_CTX_new(::TLSv1_2_client_method());\n    break;\n  case context::tlsv12_server:\n    handle_ = ::SSL_CTX_new(::TLSv1_2_server_method());\n    break;\n#else // defined(SSL_TXT_TLSV1_2)\n  case context::tlsv12:\n  case context::tlsv12_client:\n  case context::tlsv12_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#endif // defined(SSL_TXT_TLSV1_2)\n\n    // TLS v1.3.\n#if (OPENSSL_VERSION_NUMBER >= 0x10101000L) \\\n    && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tlsv13:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION);\n    }\n    break;\n  case context::tlsv13_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION);\n    }\n    break;\n  case context::tlsv13_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n    {\n      SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION);\n      SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION);\n    }\n    break;\n#else // (OPENSSL_VERSION_NUMBER >= 0x10101000L)\n      //   && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tlsv13:\n  case context::tlsv13_client:\n  case context::tlsv13_server:\n    asio::detail::throw_error(\n        asio::error::invalid_argument, \"context\");\n    break;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10101000L)\n       //   && !defined(LIBRESSL_VERSION_NUMBER)\n\n    // Any supported SSL/TLS version.\n  case context::sslv23:\n    handle_ = ::SSL_CTX_new(::SSLv23_method());\n    break;\n  case context::sslv23_client:\n    handle_ = ::SSL_CTX_new(::SSLv23_client_method());\n    break;\n  case context::sslv23_server:\n    handle_ = ::SSL_CTX_new(::SSLv23_server_method());\n    break;\n\n    // Any supported TLS version.\n#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)\n  case context::tls:\n    handle_ = ::SSL_CTX_new(::TLS_method());\n    if (handle_)\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n    break;\n  case context::tls_client:\n    handle_ = ::SSL_CTX_new(::TLS_client_method());\n    if (handle_)\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n    break;\n  case context::tls_server:\n    handle_ = ::SSL_CTX_new(::TLS_server_method());\n    if (handle_)\n      SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION);\n    break;\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n  case context::tls:\n    handle_ = ::SSL_CTX_new(::SSLv23_method());\n    if (handle_)\n      SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);\n    break;\n  case context::tls_client:\n    handle_ = ::SSL_CTX_new(::SSLv23_client_method());\n    if (handle_)\n      SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);\n    break;\n  case context::tls_server:\n    handle_ = ::SSL_CTX_new(::SSLv23_server_method());\n    if (handle_)\n      SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);\n    break;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n\n  default:\n    handle_ = ::SSL_CTX_new(0);\n    break;\n  }\n\n  if (handle_ == 0)\n  {\n    asio::error_code ec(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    asio::detail::throw_error(ec, \"context\");\n  }\n\n  set_options(no_compression);\n}\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\ncontext::context(context&& other)\n{\n  handle_ = other.handle_;\n  other.handle_ = 0;\n}\n\ncontext& context::operator=(context&& other)\n{\n  context tmp(ASIO_MOVE_CAST(context)(*this));\n  handle_ = other.handle_;\n  other.handle_ = 0;\n  return *this;\n}\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\ncontext::~context()\n{\n  if (handle_)\n  {\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n    void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    void* cb_userdata = handle_->default_passwd_callback_userdata;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    if (cb_userdata)\n    {\n      detail::password_callback_base* callback =\n        static_cast<detail::password_callback_base*>(\n            cb_userdata);\n      delete callback;\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n      ::SSL_CTX_set_default_passwd_cb_userdata(handle_, 0);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n      handle_->default_passwd_callback_userdata = 0;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    }\n\n    if (SSL_CTX_get_app_data(handle_))\n    {\n      detail::verify_callback_base* callback =\n        static_cast<detail::verify_callback_base*>(\n            SSL_CTX_get_app_data(handle_));\n      delete callback;\n      SSL_CTX_set_app_data(handle_, 0);\n    }\n\n    ::SSL_CTX_free(handle_);\n  }\n}\n\ncontext::native_handle_type context::native_handle()\n{\n  return handle_;\n}\n\nvoid context::clear_options(context::options o)\n{\n  asio::error_code ec;\n  clear_options(o, ec);\n  asio::detail::throw_error(ec, \"clear_options\");\n}\n\nASIO_SYNC_OP_VOID context::clear_options(\n    context::options o, asio::error_code& ec)\n{\n#if (OPENSSL_VERSION_NUMBER >= 0x009080DFL) \\\n  && (OPENSSL_VERSION_NUMBER != 0x00909000L)\n# if !defined(SSL_OP_NO_COMPRESSION)\n  if ((o & context::no_compression) != 0)\n  {\n# if (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    handle_->comp_methods = SSL_COMP_get_compression_methods();\n# endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    o ^= context::no_compression;\n  }\n# endif // !defined(SSL_OP_NO_COMPRESSION)\n\n  ::SSL_CTX_clear_options(handle_, o);\n\n  ec = asio::error_code();\n#else // (OPENSSL_VERSION_NUMBER >= 0x009080DFL)\n      //   && (OPENSSL_VERSION_NUMBER != 0x00909000L)\n  (void)o;\n  ec = asio::error::operation_not_supported;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x009080DFL)\n       //   && (OPENSSL_VERSION_NUMBER != 0x00909000L)\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::set_options(context::options o)\n{\n  asio::error_code ec;\n  set_options(o, ec);\n  asio::detail::throw_error(ec, \"set_options\");\n}\n\nASIO_SYNC_OP_VOID context::set_options(\n    context::options o, asio::error_code& ec)\n{\n#if !defined(SSL_OP_NO_COMPRESSION)\n  if ((o & context::no_compression) != 0)\n  {\n#if (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    handle_->comp_methods =\n      asio::ssl::detail::openssl_init<>::get_null_compression_methods();\n#endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L)\n    o ^= context::no_compression;\n  }\n#endif // !defined(SSL_OP_NO_COMPRESSION)\n\n  ::SSL_CTX_set_options(handle_, o);\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::set_verify_mode(verify_mode v)\n{\n  asio::error_code ec;\n  set_verify_mode(v, ec);\n  asio::detail::throw_error(ec, \"set_verify_mode\");\n}\n\nASIO_SYNC_OP_VOID context::set_verify_mode(\n    verify_mode v, asio::error_code& ec)\n{\n  ::SSL_CTX_set_verify(handle_, v, ::SSL_CTX_get_verify_callback(handle_));\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::set_verify_depth(int depth)\n{\n  asio::error_code ec;\n  set_verify_depth(depth, ec);\n  asio::detail::throw_error(ec, \"set_verify_depth\");\n}\n\nASIO_SYNC_OP_VOID context::set_verify_depth(\n    int depth, asio::error_code& ec)\n{\n  ::SSL_CTX_set_verify_depth(handle_, depth);\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::load_verify_file(const std::string& filename)\n{\n  asio::error_code ec;\n  load_verify_file(filename, ec);\n  asio::detail::throw_error(ec, \"load_verify_file\");\n}\n\nASIO_SYNC_OP_VOID context::load_verify_file(\n    const std::string& filename, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_load_verify_locations(handle_, filename.c_str(), 0) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::add_certificate_authority(const const_buffer& ca)\n{\n  asio::error_code ec;\n  add_certificate_authority(ca, ec);\n  asio::detail::throw_error(ec, \"add_certificate_authority\");\n}\n\nASIO_SYNC_OP_VOID context::add_certificate_authority(\n    const const_buffer& ca, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  bio_cleanup bio = { make_buffer_bio(ca) };\n  if (bio.p)\n  {\n    if (X509_STORE* store = ::SSL_CTX_get_cert_store(handle_))\n    {\n      for (;;)\n      {\n        x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) };\n        if (!cert.p)\n          break;\n\n        if (::X509_STORE_add_cert(store, cert.p) != 1)\n        {\n          ec = asio::error_code(\n              static_cast<int>(::ERR_get_error()),\n              asio::error::get_ssl_category());\n          ASIO_SYNC_OP_VOID_RETURN(ec);\n        }\n      }\n    }\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::set_default_verify_paths()\n{\n  asio::error_code ec;\n  set_default_verify_paths(ec);\n  asio::detail::throw_error(ec, \"set_default_verify_paths\");\n}\n\nASIO_SYNC_OP_VOID context::set_default_verify_paths(\n    asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_set_default_verify_paths(handle_) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::add_verify_path(const std::string& path)\n{\n  asio::error_code ec;\n  add_verify_path(path, ec);\n  asio::detail::throw_error(ec, \"add_verify_path\");\n}\n\nASIO_SYNC_OP_VOID context::add_verify_path(\n    const std::string& path, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_load_verify_locations(handle_, 0, path.c_str()) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_certificate(\n    const const_buffer& certificate, file_format format)\n{\n  asio::error_code ec;\n  use_certificate(certificate, format, ec);\n  asio::detail::throw_error(ec, \"use_certificate\");\n}\n\nASIO_SYNC_OP_VOID context::use_certificate(\n    const const_buffer& certificate, file_format format,\n    asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  if (format == context_base::asn1)\n  {\n    if (::SSL_CTX_use_certificate_ASN1(handle_,\n          static_cast<int>(certificate.size()),\n          static_cast<const unsigned char*>(certificate.data())) == 1)\n    {\n      ec = asio::error_code();\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n  else if (format == context_base::pem)\n  {\n    bio_cleanup bio = { make_buffer_bio(certificate) };\n    if (bio.p)\n    {\n      x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) };\n      if (cert.p)\n      {\n        if (::SSL_CTX_use_certificate(handle_, cert.p) == 1)\n        {\n          ec = asio::error_code();\n          ASIO_SYNC_OP_VOID_RETURN(ec);\n        }\n      }\n    }\n  }\n  else\n  {\n    ec = asio::error::invalid_argument;\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_certificate_file(\n    const std::string& filename, file_format format)\n{\n  asio::error_code ec;\n  use_certificate_file(filename, format, ec);\n  asio::detail::throw_error(ec, \"use_certificate_file\");\n}\n\nASIO_SYNC_OP_VOID context::use_certificate_file(\n    const std::string& filename, file_format format,\n    asio::error_code& ec)\n{\n  int file_type;\n  switch (format)\n  {\n  case context_base::asn1:\n    file_type = SSL_FILETYPE_ASN1;\n    break;\n  case context_base::pem:\n    file_type = SSL_FILETYPE_PEM;\n    break;\n  default:\n    {\n      ec = asio::error::invalid_argument;\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_use_certificate_file(handle_, filename.c_str(), file_type) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_certificate_chain(const const_buffer& chain)\n{\n  asio::error_code ec;\n  use_certificate_chain(chain, ec);\n  asio::detail::throw_error(ec, \"use_certificate_chain\");\n}\n\nASIO_SYNC_OP_VOID context::use_certificate_chain(\n    const const_buffer& chain, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  bio_cleanup bio = { make_buffer_bio(chain) };\n  if (bio.p)\n  {\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n    pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_);\n    void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    pem_password_cb* callback = handle_->default_passwd_callback;\n    void* cb_userdata = handle_->default_passwd_callback_userdata;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    x509_cleanup cert = {\n      ::PEM_read_bio_X509_AUX(bio.p, 0,\n          callback,\n          cb_userdata) };\n    if (!cert.p)\n    {\n      ec = asio::error_code(ERR_R_PEM_LIB,\n          asio::error::get_ssl_category());\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n\n    int result = ::SSL_CTX_use_certificate(handle_, cert.p);\n    if (result == 0 || ::ERR_peek_error() != 0)\n    {\n      ec = asio::error_code(\n          static_cast<int>(::ERR_get_error()),\n          asio::error::get_ssl_category());\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n\n#if ((OPENSSL_VERSION_NUMBER >= 0x10002000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n    ::SSL_CTX_clear_chain_certs(handle_);\n#else\n    if (handle_->extra_certs)\n    {\n      ::sk_X509_pop_free(handle_->extra_certs, X509_free);\n      handle_->extra_certs = 0;\n    }\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L)\n\n    while (X509* cacert = ::PEM_read_bio_X509(bio.p, 0,\n          callback,\n          cb_userdata))\n    {\n      if (!::SSL_CTX_add_extra_chain_cert(handle_, cacert))\n      {\n        ec = asio::error_code(\n            static_cast<int>(::ERR_get_error()),\n            asio::error::get_ssl_category());\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n  \n    result = ::ERR_peek_last_error();\n    if ((ERR_GET_LIB(result) == ERR_LIB_PEM)\n        && (ERR_GET_REASON(result) == PEM_R_NO_START_LINE))\n    {\n      ::ERR_clear_error();\n      ec = asio::error_code();\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_certificate_chain_file(const std::string& filename)\n{\n  asio::error_code ec;\n  use_certificate_chain_file(filename, ec);\n  asio::detail::throw_error(ec, \"use_certificate_chain_file\");\n}\n\nASIO_SYNC_OP_VOID context::use_certificate_chain_file(\n    const std::string& filename, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_use_certificate_chain_file(handle_, filename.c_str()) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_private_key(\n    const const_buffer& private_key, context::file_format format)\n{\n  asio::error_code ec;\n  use_private_key(private_key, format, ec);\n  asio::detail::throw_error(ec, \"use_private_key\");\n}\n\nASIO_SYNC_OP_VOID context::use_private_key(\n    const const_buffer& private_key, context::file_format format,\n    asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n    pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_);\n    void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    pem_password_cb* callback = handle_->default_passwd_callback;\n    void* cb_userdata = handle_->default_passwd_callback_userdata;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n\n  bio_cleanup bio = { make_buffer_bio(private_key) };\n  if (bio.p)\n  {\n    evp_pkey_cleanup evp_private_key = { 0 };\n    switch (format)\n    {\n    case context_base::asn1:\n      evp_private_key.p = ::d2i_PrivateKey_bio(bio.p, 0);\n      break;\n    case context_base::pem:\n      evp_private_key.p = ::PEM_read_bio_PrivateKey(\n          bio.p, 0, callback,\n          cb_userdata);\n      break;\n    default:\n      {\n        ec = asio::error::invalid_argument;\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n\n    if (evp_private_key.p)\n    {\n      if (::SSL_CTX_use_PrivateKey(handle_, evp_private_key.p) == 1)\n      {\n        ec = asio::error_code();\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_private_key_file(\n    const std::string& filename, context::file_format format)\n{\n  asio::error_code ec;\n  use_private_key_file(filename, format, ec);\n  asio::detail::throw_error(ec, \"use_private_key_file\");\n}\n\nvoid context::use_rsa_private_key(\n    const const_buffer& private_key, context::file_format format)\n{\n  asio::error_code ec;\n  use_rsa_private_key(private_key, format, ec);\n  asio::detail::throw_error(ec, \"use_rsa_private_key\");\n}\n\nASIO_SYNC_OP_VOID context::use_rsa_private_key(\n    const const_buffer& private_key, context::file_format format,\n    asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n    pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_);\n    void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n    pem_password_cb* callback = handle_->default_passwd_callback;\n    void* cb_userdata = handle_->default_passwd_callback_userdata;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n\n  bio_cleanup bio = { make_buffer_bio(private_key) };\n  if (bio.p)\n  {\n    rsa_cleanup rsa_private_key = { 0 };\n    switch (format)\n    {\n    case context_base::asn1:\n      rsa_private_key.p = ::d2i_RSAPrivateKey_bio(bio.p, 0);\n      break;\n    case context_base::pem:\n      rsa_private_key.p = ::PEM_read_bio_RSAPrivateKey(\n          bio.p, 0, callback,\n          cb_userdata);\n      break;\n    default:\n      {\n        ec = asio::error::invalid_argument;\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n\n    if (rsa_private_key.p)\n    {\n      if (::SSL_CTX_use_RSAPrivateKey(handle_, rsa_private_key.p) == 1)\n      {\n        ec = asio::error_code();\n        ASIO_SYNC_OP_VOID_RETURN(ec);\n      }\n    }\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID context::use_private_key_file(\n    const std::string& filename, context::file_format format,\n    asio::error_code& ec)\n{\n  int file_type;\n  switch (format)\n  {\n  case context_base::asn1:\n    file_type = SSL_FILETYPE_ASN1;\n    break;\n  case context_base::pem:\n    file_type = SSL_FILETYPE_PEM;\n    break;\n  default:\n    {\n      ec = asio::error::invalid_argument;\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_use_PrivateKey_file(handle_, filename.c_str(), file_type) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_rsa_private_key_file(\n    const std::string& filename, context::file_format format)\n{\n  asio::error_code ec;\n  use_rsa_private_key_file(filename, format, ec);\n  asio::detail::throw_error(ec, \"use_rsa_private_key_file\");\n}\n\nASIO_SYNC_OP_VOID context::use_rsa_private_key_file(\n    const std::string& filename, context::file_format format,\n    asio::error_code& ec)\n{\n  int file_type;\n  switch (format)\n  {\n  case context_base::asn1:\n    file_type = SSL_FILETYPE_ASN1;\n    break;\n  case context_base::pem:\n    file_type = SSL_FILETYPE_PEM;\n    break;\n  default:\n    {\n      ec = asio::error::invalid_argument;\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n\n  ::ERR_clear_error();\n\n  if (::SSL_CTX_use_RSAPrivateKey_file(\n        handle_, filename.c_str(), file_type) != 1)\n  {\n    ec = asio::error_code(\n        static_cast<int>(::ERR_get_error()),\n        asio::error::get_ssl_category());\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_tmp_dh(const const_buffer& dh)\n{\n  asio::error_code ec;\n  use_tmp_dh(dh, ec);\n  asio::detail::throw_error(ec, \"use_tmp_dh\");\n}\n\nASIO_SYNC_OP_VOID context::use_tmp_dh(\n    const const_buffer& dh, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  bio_cleanup bio = { make_buffer_bio(dh) };\n  if (bio.p)\n  {\n    return do_use_tmp_dh(bio.p, ec);\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nvoid context::use_tmp_dh_file(const std::string& filename)\n{\n  asio::error_code ec;\n  use_tmp_dh_file(filename, ec);\n  asio::detail::throw_error(ec, \"use_tmp_dh_file\");\n}\n\nASIO_SYNC_OP_VOID context::use_tmp_dh_file(\n    const std::string& filename, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  bio_cleanup bio = { ::BIO_new_file(filename.c_str(), \"r\") };\n  if (bio.p)\n  {\n    return do_use_tmp_dh(bio.p, ec);\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID context::do_use_tmp_dh(\n    BIO* bio, asio::error_code& ec)\n{\n  ::ERR_clear_error();\n\n  dh_cleanup dh = { ::PEM_read_bio_DHparams(bio, 0, 0, 0) };\n  if (dh.p)\n  {\n    if (::SSL_CTX_set_tmp_dh(handle_, dh.p) == 1)\n    {\n      ec = asio::error_code();\n      ASIO_SYNC_OP_VOID_RETURN(ec);\n    }\n  }\n\n  ec = asio::error_code(\n      static_cast<int>(::ERR_get_error()),\n      asio::error::get_ssl_category());\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nASIO_SYNC_OP_VOID context::do_set_verify_callback(\n    detail::verify_callback_base* callback, asio::error_code& ec)\n{\n  if (SSL_CTX_get_app_data(handle_))\n  {\n    delete static_cast<detail::verify_callback_base*>(\n        SSL_CTX_get_app_data(handle_));\n  }\n\n  SSL_CTX_set_app_data(handle_, callback);\n\n  ::SSL_CTX_set_verify(handle_,\n      ::SSL_CTX_get_verify_mode(handle_),\n      &context::verify_callback_function);\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nint context::verify_callback_function(int preverified, X509_STORE_CTX* ctx)\n{\n  if (ctx)\n  {\n    if (SSL* ssl = static_cast<SSL*>(\n          ::X509_STORE_CTX_get_ex_data(\n            ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx())))\n    {\n      if (SSL_CTX* handle = ::SSL_get_SSL_CTX(ssl))\n      {\n        if (SSL_CTX_get_app_data(handle))\n        {\n          detail::verify_callback_base* callback =\n            static_cast<detail::verify_callback_base*>(\n                SSL_CTX_get_app_data(handle));\n\n          verify_context verify_ctx(ctx);\n          return callback->call(preverified != 0, verify_ctx) ? 1 : 0;\n        }\n      }\n    }\n  }\n\n  return 0;\n}\n\nASIO_SYNC_OP_VOID context::do_set_password_callback(\n    detail::password_callback_base* callback, asio::error_code& ec)\n{\n#if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \\\n      && !defined(LIBRESSL_VERSION_NUMBER)) \\\n    || defined(ASIO_USE_WOLFSSL)\n  void* old_callback = ::SSL_CTX_get_default_passwd_cb_userdata(handle_);\n  ::SSL_CTX_set_default_passwd_cb_userdata(handle_, callback);\n#else // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n  void* old_callback = handle_->default_passwd_callback_userdata;\n  handle_->default_passwd_callback_userdata = callback;\n#endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L)\n\n  if (old_callback)\n    delete static_cast<detail::password_callback_base*>(\n        old_callback);\n\n  SSL_CTX_set_default_passwd_cb(handle_, &context::password_callback_function);\n\n  ec = asio::error_code();\n  ASIO_SYNC_OP_VOID_RETURN(ec);\n}\n\nint context::password_callback_function(\n    char* buf, int size, int purpose, void* data)\n{\n  using namespace std; // For strncat and strlen.\n\n  if (data)\n  {\n    detail::password_callback_base* callback =\n      static_cast<detail::password_callback_base*>(data);\n\n    std::string passwd = callback->call(static_cast<std::size_t>(size),\n        purpose ? context_base::for_writing : context_base::for_reading);\n\n#if defined(ASIO_HAS_SECURE_RTL)\n    strcpy_s(buf, size, passwd.c_str());\n#else // defined(ASIO_HAS_SECURE_RTL)\n    *buf = '\\0';\n    if (size > 0)\n      strncat(buf, passwd.c_str(), size - 1);\n#endif // defined(ASIO_HAS_SECURE_RTL)\n\n    return static_cast<int>(strlen(buf));\n  }\n\n  return 0;\n}\n\nBIO* context::make_buffer_bio(const const_buffer& b)\n{\n  return ::BIO_new_mem_buf(\n      const_cast<void*>(b.data()),\n      static_cast<int>(b.size()));\n}\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_IMPL_CONTEXT_IPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/impl/error.ipp",
    "content": "//\n// ssl/impl/error.ipp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_IMPL_ERROR_IPP\n#define ASIO_SSL_IMPL_ERROR_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ssl/error.hpp\"\n#include \"asio/ssl/detail/openssl_init.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace error {\nnamespace detail {\n\nclass ssl_category : public asio::error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.ssl\";\n  }\n\n  std::string message(int value) const\n  {\n    const char* s = ::ERR_reason_error_string(value);\n    return s ? s : \"asio.ssl error\";\n  }\n};\n\n} // namespace detail\n\nconst asio::error_category& get_ssl_category()\n{\n  static detail::ssl_category instance;\n  return instance;\n}\n\n} // namespace error\nnamespace ssl {\nnamespace error {\n\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L) && !defined(OPENSSL_IS_BORINGSSL)\n\nconst asio::error_category& get_stream_category()\n{\n  return asio::error::get_ssl_category();\n}\n\n#else\n\nnamespace detail {\n\nclass stream_category : public asio::error_category\n{\npublic:\n  const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT\n  {\n    return \"asio.ssl.stream\";\n  }\n\n  std::string message(int value) const\n  {\n    switch (value)\n    {\n    case stream_truncated: return \"stream truncated\";\n    case unspecified_system_error: return \"unspecified system error\";\n    case unexpected_result: return \"unexpected result\";\n    default: return \"asio.ssl.stream error\";\n    }\n  }\n};\n\n} // namespace detail\n\nconst asio::error_category& get_stream_category()\n{\n  static detail::stream_category instance;\n  return instance;\n}\n\n#endif\n\n} // namespace error\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_IMPL_ERROR_IPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/impl/rfc2818_verification.ipp",
    "content": "//\n// ssl/impl/rfc2818_verification.ipp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP\n#define ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <cctype>\n#include <cstring>\n#include \"asio/ip/address.hpp\"\n#include \"asio/ssl/rfc2818_verification.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\nbool rfc2818_verification::operator()(\n    bool preverified, verify_context& ctx) const\n{\n  using namespace std; // For memcmp.\n\n  // Don't bother looking at certificates that have failed pre-verification.\n  if (!preverified)\n    return false;\n\n  // We're only interested in checking the certificate at the end of the chain.\n  int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle());\n  if (depth > 0)\n    return true;\n\n  // Try converting the host name to an address. If it is an address then we\n  // need to look for an IP address in the certificate rather than a host name.\n  asio::error_code ec;\n  ip::address address = ip::make_address(host_, ec);\n  bool is_address = !ec;\n\n  X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle());\n\n  // Go through the alternate names in the certificate looking for matching DNS\n  // or IP address entries.\n  GENERAL_NAMES* gens = static_cast<GENERAL_NAMES*>(\n      X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0));\n  for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i)\n  {\n    GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i);\n    if (gen->type == GEN_DNS && !is_address)\n    {\n      ASN1_IA5STRING* domain = gen->d.dNSName;\n      if (domain->type == V_ASN1_IA5STRING && domain->data && domain->length)\n      {\n        const char* pattern = reinterpret_cast<const char*>(domain->data);\n        std::size_t pattern_length = domain->length;\n        if (match_pattern(pattern, pattern_length, host_.c_str()))\n        {\n          GENERAL_NAMES_free(gens);\n          return true;\n        }\n      }\n    }\n    else if (gen->type == GEN_IPADD && is_address)\n    {\n      ASN1_OCTET_STRING* ip_address = gen->d.iPAddress;\n      if (ip_address->type == V_ASN1_OCTET_STRING && ip_address->data)\n      {\n        if (address.is_v4() && ip_address->length == 4)\n        {\n          ip::address_v4::bytes_type bytes = address.to_v4().to_bytes();\n          if (memcmp(bytes.data(), ip_address->data, 4) == 0)\n          {\n            GENERAL_NAMES_free(gens);\n            return true;\n          }\n        }\n        else if (address.is_v6() && ip_address->length == 16)\n        {\n          ip::address_v6::bytes_type bytes = address.to_v6().to_bytes();\n          if (memcmp(bytes.data(), ip_address->data, 16) == 0)\n          {\n            GENERAL_NAMES_free(gens);\n            return true;\n          }\n        }\n      }\n    }\n  }\n  GENERAL_NAMES_free(gens);\n\n  // No match in the alternate names, so try the common names. We should only\n  // use the \"most specific\" common name, which is the last one in the list.\n  X509_NAME* name = X509_get_subject_name(cert);\n  int i = -1;\n  ASN1_STRING* common_name = 0;\n  while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0)\n  {\n    X509_NAME_ENTRY* name_entry = X509_NAME_get_entry(name, i);\n    common_name = X509_NAME_ENTRY_get_data(name_entry);\n  }\n  if (common_name && common_name->data && common_name->length)\n  {\n    const char* pattern = reinterpret_cast<const char*>(common_name->data);\n    std::size_t pattern_length = common_name->length;\n    if (match_pattern(pattern, pattern_length, host_.c_str()))\n      return true;\n  }\n\n  return false;\n}\n\nbool rfc2818_verification::match_pattern(const char* pattern,\n    std::size_t pattern_length, const char* host)\n{\n  using namespace std; // For tolower.\n\n  const char* p = pattern;\n  const char* p_end = p + pattern_length;\n  const char* h = host;\n\n  while (p != p_end && *h)\n  {\n    if (*p == '*')\n    {\n      ++p;\n      while (*h && *h != '.')\n        if (match_pattern(p, p_end - p, h++))\n          return true;\n    }\n    else if (tolower(*p) == tolower(*h))\n    {\n      ++p;\n      ++h;\n    }\n    else\n    {\n      return false;\n    }\n  }\n\n  return p == p_end && !*h;\n}\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/impl/src.hpp",
    "content": "//\n// impl/ssl/src.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_IMPL_SRC_HPP\n#define ASIO_SSL_IMPL_SRC_HPP\n\n#define ASIO_SOURCE\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# error Do not compile Asio library source with ASIO_HEADER_ONLY defined\n#endif\n\n#include \"asio/ssl/impl/context.ipp\"\n#include \"asio/ssl/impl/error.ipp\"\n#include \"asio/ssl/detail/impl/engine.ipp\"\n#include \"asio/ssl/detail/impl/openssl_init.ipp\"\n#include \"asio/ssl/impl/rfc2818_verification.ipp\"\n\n#endif // ASIO_SSL_IMPL_SRC_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/rfc2818_verification.hpp",
    "content": "//\n// ssl/rfc2818_verification.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_RFC2818_VERIFICATION_HPP\n#define ASIO_SSL_RFC2818_VERIFICATION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include <string>\n#include \"asio/ssl/detail/openssl_types.hpp\"\n#include \"asio/ssl/verify_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// Verifies a certificate against a hostname according to the rules described\n/// in RFC 2818.\n/**\n * @par Example\n * The following example shows how to synchronously open a secure connection to\n * a given host name:\n * @code\n * using asio::ip::tcp;\n * namespace ssl = asio::ssl;\n * typedef ssl::stream<tcp::socket> ssl_socket;\n *\n * // Create a context that uses the default paths for finding CA certificates.\n * ssl::context ctx(ssl::context::sslv23);\n * ctx.set_default_verify_paths();\n *\n * // Open a socket and connect it to the remote host.\n * asio::io_context io_context;\n * ssl_socket sock(io_context, ctx);\n * tcp::resolver resolver(io_context);\n * tcp::resolver::query query(\"host.name\", \"https\");\n * asio::connect(sock.lowest_layer(), resolver.resolve(query));\n * sock.lowest_layer().set_option(tcp::no_delay(true));\n *\n * // Perform SSL handshake and verify the remote host's certificate.\n * sock.set_verify_mode(ssl::verify_peer);\n * sock.set_verify_callback(ssl::rfc2818_verification(\"host.name\"));\n * sock.handshake(ssl_socket::client);\n *\n * // ... read and write as normal ...\n * @endcode\n */\nclass rfc2818_verification\n{\npublic:\n  /// The type of the function object's result.\n  typedef bool result_type;\n\n  /// Constructor.\n  explicit rfc2818_verification(const std::string& host)\n    : host_(host)\n  {\n  }\n\n  /// Perform certificate verification.\n  ASIO_DECL bool operator()(bool preverified, verify_context& ctx) const;\n\nprivate:\n  // Helper function to check a host name against a pattern.\n  ASIO_DECL static bool match_pattern(const char* pattern,\n      std::size_t pattern_length, const char* host);\n\n  // Helper function to check a host name against an IPv4 address\n  // The host name to be checked.\n  std::string host_;\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/ssl/impl/rfc2818_verification.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SSL_RFC2818_VERIFICATION_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/stream.hpp",
    "content": "//\n// ssl/stream.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_STREAM_HPP\n#define ASIO_SSL_STREAM_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/buffer_sequence_adapter.hpp\"\n#include \"asio/detail/handler_type_requirements.hpp\"\n#include \"asio/detail/non_const_lvalue.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n#include \"asio/ssl/context.hpp\"\n#include \"asio/ssl/detail/buffered_handshake_op.hpp\"\n#include \"asio/ssl/detail/handshake_op.hpp\"\n#include \"asio/ssl/detail/io.hpp\"\n#include \"asio/ssl/detail/read_op.hpp\"\n#include \"asio/ssl/detail/shutdown_op.hpp\"\n#include \"asio/ssl/detail/stream_core.hpp\"\n#include \"asio/ssl/detail/write_op.hpp\"\n#include \"asio/ssl/stream_base.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// Provides stream-oriented functionality using SSL.\n/**\n * The stream class template provides asynchronous and blocking stream-oriented\n * functionality using SSL.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe. The application must also ensure that all\n * asynchronous operations are performed within the same implicit or explicit\n * strand.\n *\n * @par Example\n * To use the SSL stream template with an ip::tcp::socket, you would write:\n * @code\n * asio::io_context my_context;\n * asio::ssl::context ctx(asio::ssl::context::sslv23);\n * asio::ssl::stream<asio:ip::tcp::socket> sock(my_context, ctx);\n * @endcode\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Stream>\nclass stream :\n  public stream_base,\n  private noncopyable\n{\npublic:\n  /// The native handle type of the SSL stream.\n  typedef SSL* native_handle_type;\n\n  /// Structure for use with deprecated impl_type.\n  struct impl_struct\n  {\n    SSL* ssl;\n  };\n\n  /// The type of the next layer.\n  typedef typename remove_reference<Stream>::type next_layer_type;\n\n  /// The type of the lowest layer.\n  typedef typename next_layer_type::lowest_layer_type lowest_layer_type;\n\n  /// The type of the executor associated with the object.\n  typedef typename lowest_layer_type::executor_type executor_type;\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Construct a stream.\n  /**\n   * This constructor creates a stream and initialises the underlying stream\n   * object.\n   *\n   * @param arg The argument to be passed to initialise the underlying stream.\n   *\n   * @param ctx The SSL context to be used for the stream.\n   */\n  template <typename Arg>\n  stream(Arg&& arg, context& ctx)\n    : next_layer_(ASIO_MOVE_CAST(Arg)(arg)),\n      core_(ctx.native_handle(), next_layer_.lowest_layer().get_executor())\n  {\n  }\n#else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  template <typename Arg>\n  stream(Arg& arg, context& ctx)\n    : next_layer_(arg),\n      core_(ctx.native_handle(), next_layer_.lowest_layer().get_executor())\n  {\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor.\n  /**\n   * @note A @c stream object must not be destroyed while there are pending\n   * asynchronous operations associated with it.\n   */\n  ~stream()\n  {\n  }\n\n  /// Get the executor associated with the object.\n  /**\n   * This function may be used to obtain the executor object that the stream\n   * uses to dispatch handlers for asynchronous operations.\n   *\n   * @return A copy of the executor that stream will use to dispatch handlers.\n   */\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return next_layer_.lowest_layer().get_executor();\n  }\n\n  /// Get the underlying implementation in the native type.\n  /**\n   * This function may be used to obtain the underlying implementation of the\n   * context. This is intended to allow access to context functionality that is\n   * not otherwise provided.\n   *\n   * @par Example\n   * The native_handle() function returns a pointer of type @c SSL* that is\n   * suitable for passing to functions such as @c SSL_get_verify_result and\n   * @c SSL_get_peer_certificate:\n   * @code\n   * asio::ssl::stream<asio:ip::tcp::socket> sock(my_context, ctx);\n   *\n   * // ... establish connection and perform handshake ...\n   *\n   * if (X509* cert = SSL_get_peer_certificate(sock.native_handle()))\n   * {\n   *   if (SSL_get_verify_result(sock.native_handle()) == X509_V_OK)\n   *   {\n   *     // ...\n   *   }\n   * }\n   * @endcode\n   */\n  native_handle_type native_handle()\n  {\n    return core_.engine_.native_handle();\n  }\n\n  /// Get a reference to the next layer.\n  /**\n   * This function returns a reference to the next layer in a stack of stream\n   * layers.\n   *\n   * @return A reference to the next layer in the stack of stream layers.\n   * Ownership is not transferred to the caller.\n   */\n  const next_layer_type& next_layer() const\n  {\n    return next_layer_;\n  }\n\n  /// Get a reference to the next layer.\n  /**\n   * This function returns a reference to the next layer in a stack of stream\n   * layers.\n   *\n   * @return A reference to the next layer in the stack of stream layers.\n   * Ownership is not transferred to the caller.\n   */\n  next_layer_type& next_layer()\n  {\n    return next_layer_;\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * stream layers.\n   *\n   * @return A reference to the lowest layer in the stack of stream layers.\n   * Ownership is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * stream layers.\n   *\n   * @return A reference to the lowest layer in the stack of stream layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return next_layer_.lowest_layer();\n  }\n\n  /// Set the peer verification mode.\n  /**\n   * This function may be used to configure the peer verification mode used by\n   * the stream. The new mode will override the mode inherited from the context.\n   *\n   * @param v A bitmask of peer verification modes. See @ref verify_mode for\n   * available values.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_set_verify.\n   */\n  void set_verify_mode(verify_mode v)\n  {\n    asio::error_code ec;\n    set_verify_mode(v, ec);\n    asio::detail::throw_error(ec, \"set_verify_mode\");\n  }\n\n  /// Set the peer verification mode.\n  /**\n   * This function may be used to configure the peer verification mode used by\n   * the stream. The new mode will override the mode inherited from the context.\n   *\n   * @param v A bitmask of peer verification modes. See @ref verify_mode for\n   * available values.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_set_verify.\n   */\n  ASIO_SYNC_OP_VOID set_verify_mode(\n      verify_mode v, asio::error_code& ec)\n  {\n    core_.engine_.set_verify_mode(v, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Set the peer verification depth.\n  /**\n   * This function may be used to configure the maximum verification depth\n   * allowed by the stream.\n   *\n   * @param depth Maximum depth for the certificate chain verification that\n   * shall be allowed.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_set_verify_depth.\n   */\n  void set_verify_depth(int depth)\n  {\n    asio::error_code ec;\n    set_verify_depth(depth, ec);\n    asio::detail::throw_error(ec, \"set_verify_depth\");\n  }\n\n  /// Set the peer verification depth.\n  /**\n   * This function may be used to configure the maximum verification depth\n   * allowed by the stream.\n   *\n   * @param depth Maximum depth for the certificate chain verification that\n   * shall be allowed.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_set_verify_depth.\n   */\n  ASIO_SYNC_OP_VOID set_verify_depth(\n      int depth, asio::error_code& ec)\n  {\n    core_.engine_.set_verify_depth(depth, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Set the callback used to verify peer certificates.\n  /**\n   * This function is used to specify a callback function that will be called\n   * by the implementation when it needs to verify a peer certificate.\n   *\n   * @param callback The function object to be used for verifying a certificate.\n   * The function signature of the handler must be:\n   * @code bool verify_callback(\n   *   bool preverified, // True if the certificate passed pre-verification.\n   *   verify_context& ctx // The peer certificate and other context.\n   * ); @endcode\n   * The return value of the callback is true if the certificate has passed\n   * verification, false otherwise.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note Calls @c SSL_set_verify.\n   */\n  template <typename VerifyCallback>\n  void set_verify_callback(VerifyCallback callback)\n  {\n    asio::error_code ec;\n    this->set_verify_callback(callback, ec);\n    asio::detail::throw_error(ec, \"set_verify_callback\");\n  }\n\n  /// Set the callback used to verify peer certificates.\n  /**\n   * This function is used to specify a callback function that will be called\n   * by the implementation when it needs to verify a peer certificate.\n   *\n   * @param callback The function object to be used for verifying a certificate.\n   * The function signature of the handler must be:\n   * @code bool verify_callback(\n   *   bool preverified, // True if the certificate passed pre-verification.\n   *   verify_context& ctx // The peer certificate and other context.\n   * ); @endcode\n   * The return value of the callback is true if the certificate has passed\n   * verification, false otherwise.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @note Calls @c SSL_set_verify.\n   */\n  template <typename VerifyCallback>\n  ASIO_SYNC_OP_VOID set_verify_callback(VerifyCallback callback,\n      asio::error_code& ec)\n  {\n    core_.engine_.set_verify_callback(\n        new detail::verify_callback<VerifyCallback>(callback), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform SSL handshaking.\n  /**\n   * This function is used to perform SSL handshaking on the stream. The\n   * function call will block until handshaking is complete or an error occurs.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void handshake(handshake_type type)\n  {\n    asio::error_code ec;\n    handshake(type, ec);\n    asio::detail::throw_error(ec, \"handshake\");\n  }\n\n  /// Perform SSL handshaking.\n  /**\n   * This function is used to perform SSL handshaking on the stream. The\n   * function call will block until handshaking is complete or an error occurs.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID handshake(handshake_type type,\n      asio::error_code& ec)\n  {\n    detail::io(next_layer_, core_, detail::handshake_op(type), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform SSL handshaking.\n  /**\n   * This function is used to perform SSL handshaking on the stream. The\n   * function call will block until handshaking is complete or an error occurs.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @param buffers The buffered data to be reused for the handshake.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ConstBufferSequence>\n  void handshake(handshake_type type, const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    handshake(type, buffers, ec);\n    asio::detail::throw_error(ec, \"handshake\");\n  }\n\n  /// Perform SSL handshaking.\n  /**\n   * This function is used to perform SSL handshaking on the stream. The\n   * function call will block until handshaking is complete or an error occurs.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @param buffers The buffered data to be reused for the handshake.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  template <typename ConstBufferSequence>\n  ASIO_SYNC_OP_VOID handshake(handshake_type type,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    detail::io(next_layer_, core_,\n        detail::buffered_handshake_op<ConstBufferSequence>(type, buffers), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Start an asynchronous SSL handshake.\n  /**\n   * This function is used to asynchronously perform an SSL handshake on the\n   * stream. This function call always returns immediately.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @param handler The handler to be called when the handshake operation\n   * completes. Copies will be made of the handler as required. The equivalent\n   * function signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        HandshakeHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(HandshakeHandler,\n      void (asio::error_code))\n  async_handshake(handshake_type type,\n      ASIO_MOVE_ARG(HandshakeHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<HandshakeHandler,\n      void (asio::error_code)>(\n        initiate_async_handshake(this), handler, type);\n  }\n\n  /// Start an asynchronous SSL handshake.\n  /**\n   * This function is used to asynchronously perform an SSL handshake on the\n   * stream. This function call always returns immediately.\n   *\n   * @param type The type of handshaking to be performed, i.e. as a client or as\n   * a server.\n   *\n   * @param buffers The buffered data to be reused for the handshake. Although\n   * the buffers object may be copied as necessary, ownership of the underlying\n   * buffers is retained by the caller, which must guarantee that they remain\n   * valid until the handler is called.\n   *\n   * @param handler The handler to be called when the handshake operation\n   * completes. Copies will be made of the handler as required. The equivalent\n   * function signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred // Amount of buffers used in handshake.\n   * ); @endcode\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) BufferedHandshakeHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(BufferedHandshakeHandler,\n      void (asio::error_code, std::size_t))\n  async_handshake(handshake_type type, const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(BufferedHandshakeHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<BufferedHandshakeHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_buffered_handshake(this), handler, type, buffers);\n  }\n\n  /// Shut down SSL on the stream.\n  /**\n   * This function is used to shut down SSL on the stream. The function call\n   * will block until SSL has been shut down or an error occurs.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void shutdown()\n  {\n    asio::error_code ec;\n    shutdown(ec);\n    asio::detail::throw_error(ec, \"shutdown\");\n  }\n\n  /// Shut down SSL on the stream.\n  /**\n   * This function is used to shut down SSL on the stream. The function call\n   * will block until SSL has been shut down or an error occurs.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID shutdown(asio::error_code& ec)\n  {\n    detail::io(next_layer_, core_, detail::shutdown_op(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Asynchronously shut down SSL on the stream.\n  /**\n   * This function is used to asynchronously shut down SSL on the stream. This\n   * function call always returns immediately.\n   *\n   * @param handler The handler to be called when the handshake operation\n   * completes. Copies will be made of the handler as required. The equivalent\n   * function signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        ShutdownHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ShutdownHandler,\n      void (asio::error_code))\n  async_shutdown(\n      ASIO_MOVE_ARG(ShutdownHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ShutdownHandler,\n      void (asio::error_code)>(\n        initiate_async_shutdown(this), handler);\n  }\n\n  /// Write some data to the stream.\n  /**\n   * This function is used to write data on the stream. The function call will\n   * block until one or more bytes of data has been written successfully, or\n   * until an error occurs.\n   *\n   * @param buffers The data to be written.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that all\n   * data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t n = write_some(buffers, ec);\n    asio::detail::throw_error(ec, \"write_some\");\n    return n;\n  }\n\n  /// Write some data to the stream.\n  /**\n   * This function is used to write data on the stream. The function call will\n   * block until one or more bytes of data has been written successfully, or\n   * until an error occurs.\n   *\n   * @param buffers The data to be written to the stream.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that all\n   * data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return detail::io(next_layer_, core_,\n        detail::write_op<ConstBufferSequence>(buffers), ec);\n  }\n\n  /// Start an asynchronous write.\n  /**\n   * This function is used to asynchronously write one or more bytes of data to\n   * the stream. The function call always returns immediately.\n   *\n   * @param buffers The data to be written to the stream. Although the buffers\n   * object may be copied as necessary, ownership of the underlying buffers is\n   * retained by the caller, which must guarantee that they remain valid until\n   * the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The equivalent function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   *\n   * @note The async_write_some operation may not transmit all of the data to\n   * the peer. Consider using the @ref async_write function if you need to\n   * ensure that all data is written before the asynchronous operation\n   * completes.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_write_some(this), handler, buffers);\n  }\n\n  /// Read some data from the stream.\n  /**\n   * This function is used to read data from the stream. The function call will\n   * block until one or more bytes of data has been read successfully, or until\n   * an error occurs.\n   *\n   * @param buffers The buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t n = read_some(buffers, ec);\n    asio::detail::throw_error(ec, \"read_some\");\n    return n;\n  }\n\n  /// Read some data from the stream.\n  /**\n   * This function is used to read data from the stream. The function call will\n   * block until one or more bytes of data has been read successfully, or until\n   * an error occurs.\n   *\n   * @param buffers The buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that the\n   * requested amount of data is read before the blocking operation completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return detail::io(next_layer_, core_,\n        detail::read_op<MutableBufferSequence>(buffers), ec);\n  }\n\n  /// Start an asynchronous read.\n  /**\n   * This function is used to asynchronously read one or more bytes of data from\n   * the stream. The function call always returns immediately.\n   *\n   * @param buffers The buffers into which the data will be read. Although the\n   * buffers object may be copied as necessary, ownership of the underlying\n   * buffers is retained by the caller, which must guarantee that they remain\n   * valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The equivalent function\n   * signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   *\n   * @note The async_read_some operation may not read all of the requested\n   * number of bytes. Consider using the @ref async_read function if you need to\n   * ensure that the requested amount of data is read before the asynchronous\n   * operation completes.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_read_some(this), handler, buffers);\n  }\n\nprivate:\n  class initiate_async_handshake\n  {\n  public:\n    typedef stream::executor_type executor_type;\n\n    explicit initiate_async_handshake(stream* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename HandshakeHandler>\n    void operator()(ASIO_MOVE_ARG(HandshakeHandler) handler,\n        handshake_type type) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a HandshakeHandler.\n      ASIO_HANDSHAKE_HANDLER_CHECK(HandshakeHandler, handler) type_check;\n\n      asio::detail::non_const_lvalue<HandshakeHandler> handler2(handler);\n      detail::async_io(self_->next_layer_, self_->core_,\n          detail::handshake_op(type), handler2.value);\n    }\n\n  private:\n    stream* self_;\n  };\n\n  class initiate_async_buffered_handshake\n  {\n  public:\n    typedef stream::executor_type executor_type;\n\n    explicit initiate_async_buffered_handshake(stream* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename BufferedHandshakeHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(BufferedHandshakeHandler) handler,\n        handshake_type type, const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your\n      // handler does not meet the documented type requirements for a\n      // BufferedHandshakeHandler.\n      ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK(\n          BufferedHandshakeHandler, handler) type_check;\n\n      asio::detail::non_const_lvalue<\n          BufferedHandshakeHandler> handler2(handler);\n      detail::async_io(self_->next_layer_, self_->core_,\n          detail::buffered_handshake_op<ConstBufferSequence>(type, buffers),\n          handler2.value);\n    }\n\n  private:\n    stream* self_;\n  };\n\n  class initiate_async_shutdown\n  {\n  public:\n    typedef typename stream::executor_type executor_type;\n\n    explicit initiate_async_shutdown(stream* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ShutdownHandler>\n    void operator()(ASIO_MOVE_ARG(ShutdownHandler) handler) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ShutdownHandler.\n      ASIO_HANDSHAKE_HANDLER_CHECK(ShutdownHandler, handler) type_check;\n\n      asio::detail::non_const_lvalue<ShutdownHandler> handler2(handler);\n      detail::async_io(self_->next_layer_, self_->core_,\n          detail::shutdown_op(), handler2.value);\n    }\n\n  private:\n    stream* self_;\n  };\n\n  class initiate_async_write_some\n  {\n  public:\n    typedef typename stream::executor_type executor_type;\n\n    explicit initiate_async_write_some(stream* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      asio::detail::non_const_lvalue<WriteHandler> handler2(handler);\n      detail::async_io(self_->next_layer_, self_->core_,\n          detail::write_op<ConstBufferSequence>(buffers), handler2.value);\n    }\n\n  private:\n    stream* self_;\n  };\n\n  class initiate_async_read_some\n  {\n  public:\n    typedef typename stream::executor_type executor_type;\n\n    explicit initiate_async_read_some(stream* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      asio::detail::non_const_lvalue<ReadHandler> handler2(handler);\n      detail::async_io(self_->next_layer_, self_->core_,\n          detail::read_op<MutableBufferSequence>(buffers), handler2.value);\n    }\n\n  private:\n    stream* self_;\n  };\n\n  Stream next_layer_;\n  detail::stream_core core_;\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_STREAM_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/stream_base.hpp",
    "content": "//\n// ssl/stream_base.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_STREAM_BASE_HPP\n#define ASIO_SSL_STREAM_BASE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// The stream_base class is used as a base for the asio::ssl::stream\n/// class template so that we have a common place to define various enums.\nclass stream_base\n{\npublic:\n  /// Different handshake types.\n  enum handshake_type\n  {\n    /// Perform handshaking as a client.\n    client,\n\n    /// Perform handshaking as a server.\n    server\n  };\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  ~stream_base()\n  {\n  }\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_STREAM_BASE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/verify_context.hpp",
    "content": "//\n// ssl/verify_context.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_VERIFY_CONTEXT_HPP\n#define ASIO_SSL_VERIFY_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// A simple wrapper around the X509_STORE_CTX type, used during verification of\n/// a peer certificate.\n/**\n * @note The verify_context does not own the underlying X509_STORE_CTX object.\n */\nclass verify_context\n  : private noncopyable\n{\npublic:\n  /// The native handle type of the verification context.\n  typedef X509_STORE_CTX* native_handle_type;\n\n  /// Constructor.\n  explicit verify_context(native_handle_type handle)\n    : handle_(handle)\n  {\n  }\n\n  /// Get the underlying implementation in the native type.\n  /**\n   * This function may be used to obtain the underlying implementation of the\n   * context. This is intended to allow access to context functionality that is\n   * not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return handle_;\n  }\n\nprivate:\n  // The underlying native implementation.\n  native_handle_type handle_;\n};\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_VERIFY_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl/verify_mode.hpp",
    "content": "//\n// ssl/verify_mode.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_VERIFY_MODE_HPP\n#define ASIO_SSL_VERIFY_MODE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/ssl/detail/openssl_types.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace ssl {\n\n/// Bitmask type for peer verification.\n/**\n * Possible values are:\n *\n * @li @ref verify_none\n * @li @ref verify_peer\n * @li @ref verify_fail_if_no_peer_cert\n * @li @ref verify_client_once\n */\ntypedef int verify_mode;\n\n#if defined(GENERATING_DOCUMENTATION)\n/// No verification.\nconst int verify_none = implementation_defined;\n\n/// Verify the peer.\nconst int verify_peer = implementation_defined;\n\n/// Fail verification if the peer has no certificate. Ignored unless\n/// @ref verify_peer is set.\nconst int verify_fail_if_no_peer_cert = implementation_defined;\n\n/// Do not request client certificate on renegotiation. Ignored unless\n/// @ref verify_peer is set.\nconst int verify_client_once = implementation_defined;\n#else\nconst int verify_none = SSL_VERIFY_NONE;\nconst int verify_peer = SSL_VERIFY_PEER;\nconst int verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT;\nconst int verify_client_once = SSL_VERIFY_CLIENT_ONCE;\n#endif\n\n} // namespace ssl\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SSL_VERIFY_MODE_HPP\n"
  },
  {
    "path": "src/third_party/asio/ssl.hpp",
    "content": "//\n// ssl.hpp\n// ~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SSL_HPP\n#define ASIO_SSL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/ssl/context.hpp\"\n#include \"asio/ssl/context_base.hpp\"\n#include \"asio/ssl/error.hpp\"\n#include \"asio/ssl/rfc2818_verification.hpp\"\n#include \"asio/ssl/stream.hpp\"\n#include \"asio/ssl/stream_base.hpp\"\n#include \"asio/ssl/verify_context.hpp\"\n#include \"asio/ssl/verify_mode.hpp\"\n\n#endif // ASIO_SSL_HPP\n"
  },
  {
    "path": "src/third_party/asio/steady_timer.hpp",
    "content": "//\n// steady_timer.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_STEADY_TIMER_HPP\n#define ASIO_STEADY_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_waitable_timer.hpp\"\n#include \"asio/detail/chrono.hpp\"\n\nnamespace asio {\n\n/// Typedef for a timer based on the steady clock.\n/**\n * This typedef uses the C++11 @c &lt;chrono&gt; standard library facility, if\n * available. Otherwise, it may use the Boost.Chrono library. To explicitly\n * utilise Boost.Chrono, use the basic_waitable_timer template directly:\n * @code\n * typedef basic_waitable_timer<boost::chrono::steady_clock> timer;\n * @endcode\n */\ntypedef basic_waitable_timer<chrono::steady_clock> steady_timer;\n\n} // namespace asio\n\n#endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_STEADY_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/strand.hpp",
    "content": "//\n// strand.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_STRAND_HPP\n#define ASIO_STRAND_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/strand_executor_service.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Provides serialised function invocation for any executor type.\ntemplate <typename Executor>\nclass strand\n{\npublic:\n  /// The type of the underlying executor.\n  typedef Executor inner_executor_type;\n\n  /// Default constructor.\n  /**\n   * This constructor is only valid if the underlying executor type is default\n   * constructible.\n   */\n  strand()\n    : executor_(),\n      impl_(use_service<detail::strand_executor_service>(\n            executor_.context()).create_implementation())\n  {\n  }\n\n  /// Construct a strand for the specified executor.\n  explicit strand(const Executor& e)\n    : executor_(e),\n      impl_(use_service<detail::strand_executor_service>(\n            executor_.context()).create_implementation())\n  {\n  }\n\n  /// Copy constructor.\n  strand(const strand& other) ASIO_NOEXCEPT\n    : executor_(other.executor_),\n      impl_(other.impl_)\n  {\n  }\n\n  /// Converting constructor.\n  /**\n   * This constructor is only valid if the @c OtherExecutor type is convertible\n   * to @c Executor.\n   */\n  template <class OtherExecutor>\n  strand(\n      const strand<OtherExecutor>& other) ASIO_NOEXCEPT\n    : executor_(other.executor_),\n      impl_(other.impl_)\n  {\n  }\n\n  /// Assignment operator.\n  strand& operator=(const strand& other) ASIO_NOEXCEPT\n  {\n    executor_ = other.executor_;\n    impl_ = other.impl_;\n    return *this;\n  }\n\n  /// Converting assignment operator.\n  /**\n   * This assignment operator is only valid if the @c OtherExecutor type is\n   * convertible to @c Executor.\n   */\n  template <class OtherExecutor>\n  strand& operator=(\n      const strand<OtherExecutor>& other) ASIO_NOEXCEPT\n  {\n    executor_ = other.executor_;\n    impl_ = other.impl_;\n    return *this;\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move constructor.\n  strand(strand&& other) ASIO_NOEXCEPT\n    : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)),\n      impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_))\n  {\n  }\n\n  /// Converting move constructor.\n  /**\n   * This constructor is only valid if the @c OtherExecutor type is convertible\n   * to @c Executor.\n   */\n  template <class OtherExecutor>\n  strand(strand<OtherExecutor>&& other) ASIO_NOEXCEPT\n    : executor_(ASIO_MOVE_CAST(OtherExecutor)(other)),\n      impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_))\n  {\n  }\n\n  /// Move assignment operator.\n  strand& operator=(strand&& other) ASIO_NOEXCEPT\n  {\n    executor_ = ASIO_MOVE_CAST(Executor)(other);\n    impl_ = ASIO_MOVE_CAST(implementation_type)(other.impl_);\n    return *this;\n  }\n\n  /// Converting move assignment operator.\n  /**\n   * This assignment operator is only valid if the @c OtherExecutor type is\n   * convertible to @c Executor.\n   */\n  template <class OtherExecutor>\n  strand& operator=(\n      const strand<OtherExecutor>&& other) ASIO_NOEXCEPT\n  {\n    executor_ = ASIO_MOVE_CAST(OtherExecutor)(other);\n    impl_ = ASIO_MOVE_CAST(implementation_type)(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Destructor.\n  ~strand()\n  {\n  }\n\n  /// Obtain the underlying executor.\n  inner_executor_type get_inner_executor() const ASIO_NOEXCEPT\n  {\n    return executor_;\n  }\n\n  /// Obtain the underlying execution context.\n  execution_context& context() const ASIO_NOEXCEPT\n  {\n    return executor_.context();\n  }\n\n  /// Inform the strand that it has some outstanding work to do.\n  /**\n   * The strand delegates this call to its underlying executor.\n   */\n  void on_work_started() const ASIO_NOEXCEPT\n  {\n    executor_.on_work_started();\n  }\n\n  /// Inform the strand that some work is no longer outstanding.\n  /**\n   * The strand delegates this call to its underlying executor.\n   */\n  void on_work_finished() const ASIO_NOEXCEPT\n  {\n    executor_.on_work_finished();\n  }\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the strand to execute the given function\n   * object on its underlying executor. The function object will be executed\n   * inside this function if the strand is not otherwise busy and if the\n   * underlying executor's @c dispatch() function is also able to execute the\n   * function before returning.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    detail::strand_executor_service::dispatch(impl_,\n        executor_, ASIO_MOVE_CAST(Function)(f), a);\n  }\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled by the underlying executor's defer function.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    detail::strand_executor_service::post(impl_,\n        executor_, ASIO_MOVE_CAST(Function)(f), a);\n  }\n\n  /// Request the strand to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled by the underlying executor's defer function.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const\n  {\n    detail::strand_executor_service::defer(impl_,\n        executor_, ASIO_MOVE_CAST(Function)(f), a);\n  }\n\n  /// Determine whether the strand is running in the current thread.\n  /**\n   * @return @c true if the current thread is executing a function that was\n   * submitted to the strand using post(), dispatch() or defer(). Otherwise\n   * returns @c false.\n   */\n  bool running_in_this_thread() const ASIO_NOEXCEPT\n  {\n    return detail::strand_executor_service::running_in_this_thread(impl_);\n  }\n\n  /// Compare two strands for equality.\n  /**\n   * Two strands are equal if they refer to the same ordered, non-concurrent\n   * state.\n   */\n  friend bool operator==(const strand& a, const strand& b) ASIO_NOEXCEPT\n  {\n    return a.impl_ == b.impl_;\n  }\n\n  /// Compare two strands for inequality.\n  /**\n   * Two strands are equal if they refer to the same ordered, non-concurrent\n   * state.\n   */\n  friend bool operator!=(const strand& a, const strand& b) ASIO_NOEXCEPT\n  {\n    return a.impl_ != b.impl_;\n  }\n\nprivate:\n  Executor executor_;\n  typedef detail::strand_executor_service::implementation_type\n    implementation_type;\n  implementation_type impl_;\n};\n\n/** @defgroup make_strand asio::make_strand\n *\n * @brief The asio::make_strand function creates a @ref strand object for\n * an executor or execution context.\n */\n/*@{*/\n\n/// Create a @ref strand object for an executor.\ntemplate <typename Executor>\ninline strand<Executor> make_strand(const Executor& ex,\n    typename enable_if<is_executor<Executor>::value>::type* = 0)\n{\n  return strand<Executor>(ex);\n}\n\n/// Create a @ref strand object for an execution context.\ntemplate <typename ExecutionContext>\ninline strand<typename ExecutionContext::executor_type>\nmake_strand(ExecutionContext& ctx,\n    typename enable_if<\n      is_convertible<ExecutionContext&, execution_context&>::value>::type* = 0)\n{\n  return strand<typename ExecutionContext::executor_type>(ctx.get_executor());\n}\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n// If both io_context.hpp and strand.hpp have been included, automatically\n// include the header file needed for the io_context::strand class.\n#if !defined(ASIO_NO_EXTENSIONS)\n# if defined(ASIO_IO_CONTEXT_HPP)\n#  include \"asio/io_context_strand.hpp\"\n# endif // defined(ASIO_IO_CONTEXT_HPP)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#endif // ASIO_STRAND_HPP\n"
  },
  {
    "path": "src/third_party/asio/streambuf.hpp",
    "content": "//\n// streambuf.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_STREAMBUF_HPP\n#define ASIO_STREAMBUF_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if !defined(ASIO_NO_IOSTREAM)\n\n#include \"asio/basic_streambuf.hpp\"\n\nnamespace asio {\n\n/// Typedef for the typical usage of basic_streambuf.\ntypedef basic_streambuf<> streambuf;\n\n} // namespace asio\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n\n#endif // ASIO_STREAMBUF_HPP\n"
  },
  {
    "path": "src/third_party/asio/system_context.hpp",
    "content": "//\n// system_context.hpp\n// ~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SYSTEM_CONTEXT_HPP\n#define ASIO_SYSTEM_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/scheduler.hpp\"\n#include \"asio/detail/thread_group.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass system_executor;\n\n/// The executor context for the system executor.\nclass system_context : public execution_context\n{\npublic:\n  /// The executor type associated with the context.\n  typedef system_executor executor_type;\n\n  /// Destructor shuts down all threads in the system thread pool.\n  ASIO_DECL ~system_context();\n\n  /// Obtain an executor for the context.\n  executor_type get_executor() ASIO_NOEXCEPT;\n\n  /// Signal all threads in the system thread pool to stop.\n  ASIO_DECL void stop();\n\n  /// Determine whether the system thread pool has been stopped.\n  ASIO_DECL bool stopped() const ASIO_NOEXCEPT;\n\n  /// Join all threads in the system thread pool.\n  ASIO_DECL void join();\n\n#if defined(GENERATING_DOCUMENTATION)\nprivate:\n#endif // defined(GENERATING_DOCUMENTATION)\n  // Constructor creates all threads in the system thread pool.\n  ASIO_DECL system_context();\n\nprivate:\n  friend class system_executor;\n\n  struct thread_function;\n\n  // Helper function to create the underlying scheduler.\n  ASIO_DECL detail::scheduler& add_scheduler(detail::scheduler* s);\n\n  // The underlying scheduler.\n  detail::scheduler& scheduler_;\n\n  // The threads in the system thread pool.\n  detail::thread_group threads_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/system_context.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/system_context.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_SYSTEM_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/system_error.hpp",
    "content": "//\n// system_error.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SYSTEM_ERROR_HPP\n#define ASIO_SYSTEM_ERROR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\n# include <system_error>\n#else // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n# include <cerrno>\n# include <exception>\n# include <string>\n# include \"asio/error_code.hpp\"\n# include \"asio/detail/scoped_ptr.hpp\"\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n#if defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\ntypedef std::system_error system_error;\n\n#else // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n/// The system_error class is used to represent system conditions that\n/// prevent the library from operating correctly.\nclass system_error\n  : public std::exception\n{\npublic:\n  /// Construct with an error code.\n  system_error(const error_code& ec)\n    : code_(ec),\n      context_()\n  {\n  }\n\n  /// Construct with an error code and context.\n  system_error(const error_code& ec, const std::string& context)\n    : code_(ec),\n      context_(context)\n  {\n  }\n\n  /// Copy constructor.\n  system_error(const system_error& other)\n    : std::exception(other),\n      code_(other.code_),\n      context_(other.context_),\n      what_()\n  {\n  }\n\n  /// Destructor.\n  virtual ~system_error() throw ()\n  {\n  }\n\n  /// Assignment operator.\n  system_error& operator=(const system_error& e)\n  {\n    context_ = e.context_;\n    code_ = e.code_;\n    what_.reset();\n    return *this;\n  }\n\n  /// Get a string representation of the exception.\n  virtual const char* what() const throw ()\n  {\n#if !defined(ASIO_NO_EXCEPTIONS)\n    try\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n    {\n      if (!what_.get())\n      {\n        std::string tmp(context_);\n        if (tmp.length())\n          tmp += \": \";\n        tmp += code_.message();\n        what_.reset(new std::string(tmp));\n      }\n      return what_->c_str();\n    }\n#if !defined(ASIO_NO_EXCEPTIONS)\n    catch (std::exception&)\n    {\n      return \"system_error\";\n    }\n#endif // !defined(ASIO_NO_EXCEPTIONS)\n  }\n\n  /// Get the error code associated with the exception.\n  error_code code() const\n  {\n    return code_;\n  }\n\nprivate:\n  // The code associated with the error.\n  error_code code_;\n\n  // The context associated with the error.\n  std::string context_;\n\n  // The string representation of the error.\n  mutable asio::detail::scoped_ptr<std::string> what_;\n};\n\n#endif // defined(ASIO_HAS_STD_SYSTEM_ERROR)\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_SYSTEM_ERROR_HPP\n"
  },
  {
    "path": "src/third_party/asio/system_executor.hpp",
    "content": "//\n// system_executor.hpp\n// ~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SYSTEM_EXECUTOR_HPP\n#define ASIO_SYSTEM_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass system_context;\n\n/// An executor that uses arbitrary threads.\n/**\n * The system executor represents an execution context where functions are\n * permitted to run on arbitrary threads. The post() and defer() functions\n * schedule the function to run on an unspecified system thread pool, and\n * dispatch() invokes the function immediately.\n */\nclass system_executor\n{\npublic:\n  /// Obtain the underlying execution context.\n  system_context& context() const ASIO_NOEXCEPT;\n\n  /// Inform the executor that it has some outstanding work to do.\n  /**\n   * For the system executor, this is a no-op.\n   */\n  void on_work_started() const ASIO_NOEXCEPT\n  {\n  }\n\n  /// Inform the executor that some work is no longer outstanding.\n  /**\n   * For the system executor, this is a no-op.\n   */\n  void on_work_finished() const ASIO_NOEXCEPT\n  {\n  }\n\n  /// Request the system executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will always be executed inside this function.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the system executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled to run on an unspecified system thread pool.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the system executor to invoke the given function object.\n  /**\n   * This function is used to ask the executor to execute the given function\n   * object. The function object will never be executed inside this function.\n   * Instead, it will be scheduled to run on an unspecified system thread pool.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Compare two executors for equality.\n  /**\n   * System executors always compare equal.\n   */\n  friend bool operator==(const system_executor&,\n      const system_executor&) ASIO_NOEXCEPT\n  {\n    return true;\n  }\n\n  /// Compare two executors for inequality.\n  /**\n   * System executors always compare equal.\n   */\n  friend bool operator!=(const system_executor&,\n      const system_executor&) ASIO_NOEXCEPT\n  {\n    return false;\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/system_executor.hpp\"\n\n#endif // ASIO_SYSTEM_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/system_timer.hpp",
    "content": "//\n// system_timer.hpp\n// ~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_SYSTEM_TIMER_HPP\n#define ASIO_SYSTEM_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/basic_waitable_timer.hpp\"\n#include \"asio/detail/chrono.hpp\"\n\nnamespace asio {\n\n/// Typedef for a timer based on the system clock.\n/**\n * This typedef uses the C++11 @c &lt;chrono&gt; standard library facility, if\n * available. Otherwise, it may use the Boost.Chrono library. To explicitly\n * utilise Boost.Chrono, use the basic_waitable_timer template directly:\n * @code\n * typedef basic_waitable_timer<boost::chrono::system_clock> timer;\n * @endcode\n */\ntypedef basic_waitable_timer<chrono::system_clock> system_timer;\n\n} // namespace asio\n\n#endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_SYSTEM_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/this_coro.hpp",
    "content": "//\n// this_coro.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_THIS_CORO_HPP\n#define ASIO_THIS_CORO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace this_coro {\n\n/// Awaitable type that returns the executor of the current coroutine.\nstruct executor_t\n{\n  ASIO_CONSTEXPR executor_t()\n  {\n  }\n};\n\n/// Awaitable object that returns the executor of the current coroutine.\n#if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION)\nconstexpr executor_t executor;\n#elif defined(ASIO_MSVC)\n__declspec(selectany) executor_t executor;\n#endif\n\n} // namespace this_coro\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_THIS_CORO_HPP\n"
  },
  {
    "path": "src/third_party/asio/thread.hpp",
    "content": "//\n// thread.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_THREAD_HPP\n#define ASIO_THREAD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/thread.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// A simple abstraction for starting threads.\n/**\n * The asio::thread class implements the smallest possible subset of the\n * functionality of boost::thread. It is intended to be used only for starting\n * a thread and waiting for it to exit. If more extensive threading\n * capabilities are required, you are strongly advised to use something else.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Example\n * A typical use of asio::thread would be to launch a thread to run an\n * io_context's event processing loop:\n *\n * @par\n * @code asio::io_context io_context;\n * // ...\n * asio::thread t(boost::bind(&asio::io_context::run, &io_context));\n * // ...\n * t.join(); @endcode\n */\nclass thread\n  : private noncopyable\n{\npublic:\n  /// Start a new thread that executes the supplied function.\n  /**\n   * This constructor creates a new thread that will execute the given function\n   * or function object.\n   *\n   * @param f The function or function object to be run in the thread. The\n   * function signature must be: @code void f(); @endcode\n   */\n  template <typename Function>\n  explicit thread(Function f)\n    : impl_(f)\n  {\n  }\n\n  /// Destructor.\n  ~thread()\n  {\n  }\n\n  /// Wait for the thread to exit.\n  /**\n   * This function will block until the thread has exited.\n   *\n   * If this function is not called before the thread object is destroyed, the\n   * thread itself will continue to run until completion. You will, however,\n   * no longer have the ability to wait for it to exit.\n   */\n  void join()\n  {\n    impl_.join();\n  }\n\nprivate:\n  detail::thread impl_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_THREAD_HPP\n"
  },
  {
    "path": "src/third_party/asio/thread_pool.hpp",
    "content": "//\n// thread_pool.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_THREAD_POOL_HPP\n#define ASIO_THREAD_POOL_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/scheduler.hpp\"\n#include \"asio/detail/thread_group.hpp\"\n#include \"asio/execution_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// A simple fixed-size thread pool.\n/**\n * The thread pool class is an execution context where functions are permitted\n * to run on one of a fixed number of threads.\n *\n * @par Submitting tasks to the pool\n *\n * To submit functions to the thread_pool, use the @ref asio::dispatch,\n * @ref asio::post or @ref asio::defer free functions.\n *\n * For example:\n *\n * @code void my_task()\n * {\n *   ...\n * }\n *\n * ...\n *\n * // Launch the pool with four threads.\n * asio::thread_pool pool(4);\n *\n * // Submit a function to the pool.\n * asio::post(pool, my_task);\n *\n * // Submit a lambda object to the pool.\n * asio::post(pool,\n *     []()\n *     {\n *       ...\n *     });\n *\n * // Wait for all tasks in the pool to complete.\n * pool.join(); @endcode\n */\nclass thread_pool\n  : public execution_context\n{\npublic:\n  class executor_type;\n\n  /// Constructs a pool with an automatically determined number of threads.\n  ASIO_DECL thread_pool();\n\n  /// Constructs a pool with a specified number of threads.\n  ASIO_DECL thread_pool(std::size_t num_threads);\n\n  /// Destructor.\n  /**\n   * Automatically stops and joins the pool, if not explicitly done beforehand.\n   */\n  ASIO_DECL ~thread_pool();\n\n  /// Obtains the executor associated with the pool.\n  executor_type get_executor() ASIO_NOEXCEPT;\n\n  /// Stops the threads.\n  /**\n   * This function stops the threads as soon as possible. As a result of calling\n   * @c stop(), pending function objects may be never be invoked.\n   */\n  ASIO_DECL void stop();\n\n  /// Joins the threads.\n  /**\n   * This function blocks until the threads in the pool have completed. If @c\n   * stop() is not called prior to @c join(), the @c join() call will wait\n   * until the pool has no more outstanding work.\n   */\n  ASIO_DECL void join();\n\nprivate:\n  friend class executor_type;\n  struct thread_function;\n\n  // Helper function to create the underlying scheduler.\n  ASIO_DECL detail::scheduler& add_scheduler(detail::scheduler* s);\n\n  // The underlying scheduler.\n  detail::scheduler& scheduler_;\n\n  // The threads in the pool.\n  detail::thread_group threads_;\n};\n\n/// Executor used to submit functions to a thread pool.\nclass thread_pool::executor_type\n{\npublic:\n  /// Obtain the underlying execution context.\n  thread_pool& context() const ASIO_NOEXCEPT;\n\n  /// Inform the thread pool that it has some outstanding work to do.\n  /**\n   * This function is used to inform the thread pool that some work has begun.\n   * This ensures that the thread pool's join() function will not return while\n   * the work is underway.\n   */\n  void on_work_started() const ASIO_NOEXCEPT;\n\n  /// Inform the thread pool that some work is no longer outstanding.\n  /**\n   * This function is used to inform the thread pool that some work has\n   * finished. Once the count of unfinished work reaches zero, the thread\n   * pool's join() function is permitted to exit.\n   */\n  void on_work_finished() const ASIO_NOEXCEPT;\n\n  /// Request the thread pool to invoke the given function object.\n  /**\n   * This function is used to ask the thread pool to execute the given function\n   * object. If the current thread belongs to the pool, @c dispatch() executes\n   * the function before returning. Otherwise, the function will be scheduled\n   * to run on the thread pool.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the thread pool to invoke the given function object.\n  /**\n   * This function is used to ask the thread pool to execute the given function\n   * object. The function object will never be executed inside @c post().\n   * Instead, it will be scheduled to run on the thread pool.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Request the thread pool to invoke the given function object.\n  /**\n   * This function is used to ask the thread pool to execute the given function\n   * object. The function object will never be executed inside @c defer().\n   * Instead, it will be scheduled to run on the thread pool.\n   *\n   * If the current thread belongs to the thread pool, @c defer() will delay\n   * scheduling the function object until the current thread returns control to\n   * the pool.\n   *\n   * @param f The function object to be called. The executor will make\n   * a copy of the handler object as required. The function signature of the\n   * function object must be: @code void function(); @endcode\n   *\n   * @param a An allocator that may be used by the executor to allocate the\n   * internal storage needed for function invocation.\n   */\n  template <typename Function, typename Allocator>\n  void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const;\n\n  /// Determine whether the thread pool is running in the current thread.\n  /**\n   * @return @c true if the current thread belongs to the pool. Otherwise\n   * returns @c false.\n   */\n  bool running_in_this_thread() const ASIO_NOEXCEPT;\n\n  /// Compare two executors for equality.\n  /**\n   * Two executors are equal if they refer to the same underlying thread pool.\n   */\n  friend bool operator==(const executor_type& a,\n      const executor_type& b) ASIO_NOEXCEPT\n  {\n    return &a.pool_ == &b.pool_;\n  }\n\n  /// Compare two executors for inequality.\n  /**\n   * Two executors are equal if they refer to the same underlying thread pool.\n   */\n  friend bool operator!=(const executor_type& a,\n      const executor_type& b) ASIO_NOEXCEPT\n  {\n    return &a.pool_ != &b.pool_;\n  }\n\nprivate:\n  friend class thread_pool;\n\n  // Constructor.\n  explicit executor_type(thread_pool& p) : pool_(p) {}\n\n  // The underlying thread pool.\n  thread_pool& pool_;\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/thread_pool.hpp\"\n#if defined(ASIO_HEADER_ONLY)\n# include \"asio/impl/thread_pool.ipp\"\n#endif // defined(ASIO_HEADER_ONLY)\n\n#endif // ASIO_THREAD_POOL_HPP\n"
  },
  {
    "path": "src/third_party/asio/time_traits.hpp",
    "content": "//\n// time_traits.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TIME_TRAITS_HPP\n#define ASIO_TIME_TRAITS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/socket_types.hpp\" // Must come before posix_time.\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include <boost/date_time/posix_time/posix_time_types.hpp>\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Time traits suitable for use with the deadline timer.\ntemplate <typename Time>\nstruct time_traits;\n\n/// Time traits specialised for posix_time.\ntemplate <>\nstruct time_traits<boost::posix_time::ptime>\n{\n  /// The time type.\n  typedef boost::posix_time::ptime time_type;\n\n  /// The duration type.\n  typedef boost::posix_time::time_duration duration_type;\n\n  /// Get the current time.\n  static time_type now()\n  {\n#if defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK)\n    return boost::posix_time::microsec_clock::universal_time();\n#else // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK)\n    return boost::posix_time::second_clock::universal_time();\n#endif // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK)\n  }\n\n  /// Add a duration to a time.\n  static time_type add(const time_type& t, const duration_type& d)\n  {\n    return t + d;\n  }\n\n  /// Subtract one time from another.\n  static duration_type subtract(const time_type& t1, const time_type& t2)\n  {\n    return t1 - t2;\n  }\n\n  /// Test whether one time is less than another.\n  static bool less_than(const time_type& t1, const time_type& t2)\n  {\n    return t1 < t2;\n  }\n\n  /// Convert to POSIX duration type.\n  static boost::posix_time::time_duration to_posix_duration(\n      const duration_type& d)\n  {\n    return d;\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n       // || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_TIME_TRAITS_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/buffer.hpp",
    "content": "//\n// ts/buffer.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_BUFFER_HPP\n#define ASIO_TS_BUFFER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/buffer.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/read.hpp\"\n#include \"asio/write.hpp\"\n#include \"asio/read_until.hpp\"\n\n#endif // ASIO_TS_BUFFER_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/executor.hpp",
    "content": "//\n// ts/executor.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_EXECUTOR_HPP\n#define ASIO_TS_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/async_result.hpp\"\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/bind_executor.hpp\"\n#include \"asio/executor_work_guard.hpp\"\n#include \"asio/system_executor.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/dispatch.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/defer.hpp\"\n#include \"asio/strand.hpp\"\n#include \"asio/packaged_task.hpp\"\n#include \"asio/use_future.hpp\"\n\n#endif // ASIO_TS_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/internet.hpp",
    "content": "//\n// ts/internet.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_INTERNET_HPP\n#define ASIO_TS_INTERNET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/ip/address.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n#include \"asio/ip/address_v4_iterator.hpp\"\n#include \"asio/ip/address_v4_range.hpp\"\n#include \"asio/ip/address_v6.hpp\"\n#include \"asio/ip/address_v6_iterator.hpp\"\n#include \"asio/ip/address_v6_range.hpp\"\n#include \"asio/ip/bad_address_cast.hpp\"\n#include \"asio/ip/basic_endpoint.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/basic_resolver_entry.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver.hpp\"\n#include \"asio/ip/host_name.hpp\"\n#include \"asio/ip/network_v4.hpp\"\n#include \"asio/ip/network_v6.hpp\"\n#include \"asio/ip/tcp.hpp\"\n#include \"asio/ip/udp.hpp\"\n#include \"asio/ip/v6_only.hpp\"\n#include \"asio/ip/unicast.hpp\"\n#include \"asio/ip/multicast.hpp\"\n\n#endif // ASIO_TS_INTERNET_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/io_context.hpp",
    "content": "//\n// ts/io_context.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_IO_CONTEXT_HPP\n#define ASIO_TS_IO_CONTEXT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/io_context.hpp\"\n\n#endif // ASIO_TS_IO_CONTEXT_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/net.hpp",
    "content": "//\n// ts/net.hpp\n// ~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_NET_HPP\n#define ASIO_TS_NET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/ts/netfwd.hpp\"\n#include \"asio/ts/executor.hpp\"\n#include \"asio/ts/io_context.hpp\"\n#include \"asio/ts/timer.hpp\"\n#include \"asio/ts/buffer.hpp\"\n#include \"asio/ts/socket.hpp\"\n#include \"asio/ts/internet.hpp\"\n\n#endif // ASIO_TS_NET_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/netfwd.hpp",
    "content": "//\n// ts/netfwd.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_NETFWD_HPP\n#define ASIO_TS_NETFWD_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CHRONO)\n# include \"asio/detail/chrono.hpp\"\n#endif // defined(ASIO_HAS_CHRONO)\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n# include \"asio/detail/date_time_fwd.hpp\"\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#if !defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\nclass execution_context;\n\ntemplate <typename T, typename Executor>\nclass executor_binder;\n\ntemplate <typename Executor>\nclass executor_work_guard;\n\nclass system_executor;\n\nclass executor;\n\ntemplate <typename Executor>\nclass strand;\n\nclass io_context;\n\ntemplate <typename Clock>\nstruct wait_traits;\n\n#if defined(ASIO_HAS_BOOST_DATE_TIME)\n\ntemplate <typename Time>\nstruct time_traits;\n\n#endif // defined(ASIO_HAS_BOOST_DATE_TIME)\n\n#if !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL)\n#define ASIO_BASIC_WAITABLE_TIMER_FWD_DECL\n\ntemplate <typename Clock,\n    typename WaitTraits = wait_traits<Clock>,\n    typename Executor = executor>\nclass basic_waitable_timer;\n\n#endif // !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL)\n\n#if defined(ASIO_HAS_CHRONO)\n\ntypedef basic_waitable_timer<chrono::system_clock> system_timer;\n\ntypedef basic_waitable_timer<chrono::steady_clock> steady_timer;\n\ntypedef basic_waitable_timer<chrono::high_resolution_clock>\n  high_resolution_timer;\n\n#endif // defined(ASIO_HAS_CHRONO)\n\n#if !defined(ASIO_BASIC_SOCKET_FWD_DECL)\n#define ASIO_BASIC_SOCKET_FWD_DECL\n\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_socket;\n\n#endif // !defined(ASIO_BASIC_SOCKET_FWD_DECL)\n\n#if !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL)\n#define ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL\n\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_datagram_socket;\n\n#endif // !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL)\n\n#if !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL)\n#define ASIO_BASIC_STREAM_SOCKET_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_stream_socket;\n\n#endif // !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL)\n\n#if !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL)\n#define ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL\n\ntemplate <typename Protocol, typename Executor = executor>\nclass basic_socket_acceptor;\n\n#endif // !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL)\n\n#if !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL)\n#define ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol,\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  || defined(GENERATING_DOCUMENTATION)\n    typename Clock = boost::posix_time::ptime,\n    typename WaitTraits = time_traits<Clock> >\n#else\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#endif\nclass basic_socket_streambuf;\n\n#endif // !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL)\n\n#if !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL)\n#define ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL\n\n// Forward declaration with defaulted arguments.\ntemplate <typename Protocol,\n#if defined(ASIO_HAS_BOOST_DATE_TIME) \\\n  || defined(GENERATING_DOCUMENTATION)\n    typename Clock = boost::posix_time::ptime,\n    typename WaitTraits = time_traits<Clock> >\n#else\n    typename Clock = chrono::steady_clock,\n    typename WaitTraits = wait_traits<Clock> >\n#endif\nclass basic_socket_iostream;\n\n#endif // !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL)\n\nnamespace ip {\n\nclass address;\n\nclass address_v4;\n\nclass address_v6;\n\ntemplate <typename Address>\nclass basic_address_iterator;\n\ntypedef basic_address_iterator<address_v4> address_v4_iterator;\n\ntypedef basic_address_iterator<address_v6> address_v6_iterator;\n\ntemplate <typename Address>\nclass basic_address_range;\n\ntypedef basic_address_range<address_v4> address_v4_range;\n\ntypedef basic_address_range<address_v6> address_v6_range;\n\nclass network_v4;\n\nclass network_v6;\n\ntemplate <typename InternetProtocol>\nclass basic_endpoint;\n\ntemplate <typename InternetProtocol>\nclass basic_resolver_entry;\n\ntemplate <typename InternetProtocol>\nclass basic_resolver_results;\n\n#if !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)\n#define ASIO_IP_BASIC_RESOLVER_FWD_DECL\n\ntemplate <typename InternetProtocol, typename Executor = executor>\nclass basic_resolver;\n\n#endif // !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)\n\nclass tcp;\n\nclass udp;\n\n} // namespace ip\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // !defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_TS_NETFWD_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/socket.hpp",
    "content": "//\n// ts/socket.hpp\n// ~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_SOCKET_HPP\n#define ASIO_TS_SOCKET_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/socket_base.hpp\"\n#include \"asio/basic_socket.hpp\"\n#include \"asio/basic_datagram_socket.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/basic_socket_acceptor.hpp\"\n#include \"asio/basic_socket_streambuf.hpp\"\n#include \"asio/basic_socket_iostream.hpp\"\n#include \"asio/connect.hpp\"\n\n#endif // ASIO_TS_SOCKET_HPP\n"
  },
  {
    "path": "src/third_party/asio/ts/timer.hpp",
    "content": "//\n// ts/timer.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_TS_TIMER_HPP\n#define ASIO_TS_TIMER_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/chrono.hpp\"\n\n#include \"asio/wait_traits.hpp\"\n#include \"asio/basic_waitable_timer.hpp\"\n#include \"asio/system_timer.hpp\"\n#include \"asio/steady_timer.hpp\"\n#include \"asio/high_resolution_timer.hpp\"\n\n#endif // ASIO_TS_TIMER_HPP\n"
  },
  {
    "path": "src/third_party/asio/unyield.hpp",
    "content": "//\n// unyield.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifdef reenter\n# undef reenter\n#endif\n\n#ifdef yield\n# undef yield\n#endif\n\n#ifdef fork\n# undef fork\n#endif\n"
  },
  {
    "path": "src/third_party/asio/use_awaitable.hpp",
    "content": "//\n// use_awaitable.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_USE_AWAITABLE_HPP\n#define ASIO_USE_AWAITABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/awaitable.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// A completion token that represents the currently executing coroutine.\n/**\n * The @c use_awaitable_t class, with its value @c use_awaitable, is used to\n * represent the currently executing coroutine. This completion token may be\n * passed as a handler to an asynchronous operation. For example:\n *\n * @code awaitable<void> my_coroutine()\n * {\n *   std::size_t n = co_await my_socket.async_read_some(buffer, use_awaitable);\n *   ...\n * } @endcode\n *\n * When used with co_await, the initiating function (@c async_read_some in the\n * above example) suspends the current coroutine. The coroutine is resumed when\n * the asynchronous operation completes, and the result of the operation is\n * returned.\n */\ntemplate <typename Executor = executor>\nstruct use_awaitable_t\n{\n  /// Default constructor.\n  ASIO_CONSTEXPR use_awaitable_t()\n  {\n  }\n\n  /// Adapts an executor to add the @c use_awaitable_t completion token as the\n  /// default.\n  template <typename InnerExecutor>\n  struct executor_with_default : InnerExecutor\n  {\n    /// Specify @c use_awaitable_t as the default completion token type.\n    typedef use_awaitable_t default_completion_token_type;\n\n    /// Construct the adapted executor from the inner executor type.\n    executor_with_default(const InnerExecutor& ex) ASIO_NOEXCEPT\n      : InnerExecutor(ex)\n    {\n    }\n  };\n\n  /// Type alias to adapt an I/O object to use @c use_awaitable_t as its\n  /// default completion token type.\n#if defined(ASIO_HAS_ALIAS_TEMPLATES) \\\n  || defined(GENERATING_DOCUMENTATION)\n  template <typename T>\n  using as_default_on_t = typename T::template rebind_executor<\n      executor_with_default<typename T::executor_type> >::other;\n#endif // defined(ASIO_HAS_ALIAS_TEMPLATES)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n  /// Function helper to adapt an I/O object to use @c use_awaitable_t as its\n  /// default completion token type.\n  template <typename T>\n  static typename T::template rebind_executor<\n      executor_with_default<typename T::executor_type>\n    >::other\n  as_default_on(ASIO_MOVE_ARG(T) object)\n  {\n    return typename as_default_on_t<typename decay<T>::type>::type(\n        ASIO_MOVE_CAST(T)(object));\n  }\n};\n\n/// A completion token object that represents the currently executing coroutine.\n/**\n * See the documentation for asio::use_awaitable_t for a usage example.\n */\n#if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION)\nconstexpr use_awaitable_t<> use_awaitable;\n#elif defined(ASIO_MSVC)\n__declspec(selectany) use_awaitable_t<> use_awaitable;\n#endif\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/use_awaitable.hpp\"\n\n#endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_USE_AWAITABLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/use_future.hpp",
    "content": "//\n// use_future.hpp\n// ~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_USE_FUTURE_HPP\n#define ASIO_USE_FUTURE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/future.hpp\"\n\n#if defined(ASIO_HAS_STD_FUTURE_CLASS) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include <memory>\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace detail {\n\ntemplate <typename Function, typename Allocator>\nclass packaged_token;\n\ntemplate <typename Function, typename Allocator, typename Result>\nclass packaged_handler;\n\n} // namespace detail\n\n/// Class used to specify that an asynchronous operation should return a future.\n/**\n * The use_future_t class is used to indicate that an asynchronous operation\n * should return a std::future object. A use_future_t object may be passed as a\n * handler to an asynchronous operation, typically using the special value @c\n * asio::use_future. For example:\n *\n * @code std::future<std::size_t> my_future\n *   = my_socket.async_read_some(my_buffer, asio::use_future); @endcode\n *\n * The initiating function (async_read_some in the above example) returns a\n * future that will receive the result of the operation. If the operation\n * completes with an error_code indicating failure, it is converted into a\n * system_error and passed back to the caller via the future.\n */\ntemplate <typename Allocator = std::allocator<void> >\nclass use_future_t\n{\npublic:\n  /// The allocator type. The allocator is used when constructing the\n  /// @c std::promise object for a given asynchronous operation.\n  typedef Allocator allocator_type;\n\n  /// Construct using default-constructed allocator.\n  ASIO_CONSTEXPR use_future_t()\n  {\n  }\n\n  /// Construct using specified allocator.\n  explicit use_future_t(const Allocator& allocator)\n    : allocator_(allocator)\n  {\n  }\n\n#if !defined(ASIO_NO_DEPRECATED)\n  /// (Deprecated: Use rebind().) Specify an alternate allocator.\n  template <typename OtherAllocator>\n  use_future_t<OtherAllocator> operator[](const OtherAllocator& allocator) const\n  {\n    return use_future_t<OtherAllocator>(allocator);\n  }\n#endif // !defined(ASIO_NO_DEPRECATED)\n\n  /// Specify an alternate allocator.\n  template <typename OtherAllocator>\n  use_future_t<OtherAllocator> rebind(const OtherAllocator& allocator) const\n  {\n    return use_future_t<OtherAllocator>(allocator);\n  }\n\n  /// Obtain allocator.\n  allocator_type get_allocator() const\n  {\n    return allocator_;\n  }\n\n  /// Wrap a function object in a packaged task.\n  /**\n   * The @c package function is used to adapt a function object as a packaged\n   * task. When this adapter is passed as a completion token to an asynchronous\n   * operation, the result of the function object is retuned via a std::future.\n   *\n   * @par Example\n   *\n   * @code std::future<std::size_t> fut =\n   *   my_socket.async_read_some(buffer,\n   *     use_future([](asio::error_code ec, std::size_t n)\n   *       {\n   *         return ec ? 0 : n;\n   *       }));\n   * ...\n   * std::size_t n = fut.get(); @endcode\n   */\n  template <typename Function>\n#if defined(GENERATING_DOCUMENTATION)\n  unspecified\n#else // defined(GENERATING_DOCUMENTATION)\n  detail::packaged_token<typename decay<Function>::type, Allocator>\n#endif // defined(GENERATING_DOCUMENTATION)\n  operator()(ASIO_MOVE_ARG(Function) f) const;\n\nprivate:\n  // Helper type to ensure that use_future can be constexpr default-constructed\n  // even when std::allocator<void> can't be.\n  struct std_allocator_void\n  {\n    ASIO_CONSTEXPR std_allocator_void()\n    {\n    }\n\n    operator std::allocator<void>() const\n    {\n      return std::allocator<void>();\n    }\n  };\n\n  typename conditional<\n    is_same<std::allocator<void>, Allocator>::value,\n    std_allocator_void, Allocator>::type allocator_;\n};\n\n/// A special value, similar to std::nothrow.\n/**\n * See the documentation for asio::use_future_t for a usage example.\n */\n#if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION)\nconstexpr use_future_t<> use_future;\n#elif defined(ASIO_MSVC)\n__declspec(selectany) use_future_t<> use_future;\n#endif\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/use_future.hpp\"\n\n#endif // defined(ASIO_HAS_STD_FUTURE_CLASS)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_USE_FUTURE_HPP\n"
  },
  {
    "path": "src/third_party/asio/uses_executor.hpp",
    "content": "//\n// uses_executor.hpp\n// ~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_USES_EXECUTOR_HPP\n#define ASIO_USES_EXECUTOR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/detail/type_traits.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// A special type, similar to std::nothrow_t, used to disambiguate\n/// constructors that accept executor arguments.\n/**\n * The executor_arg_t struct is an empty structure type used as a unique type\n * to disambiguate constructor and function overloading. Specifically, some\n * types have constructors with executor_arg_t as the first argument,\n * immediately followed by an argument of a type that satisfies the Executor\n * type requirements.\n */\nstruct executor_arg_t\n{\n  /// Constructor.\n  ASIO_CONSTEXPR executor_arg_t() ASIO_NOEXCEPT\n  {\n  }\n};\n\n/// A special value, similar to std::nothrow, used to disambiguate constructors\n/// that accept executor arguments.\n/**\n * See asio::executor_arg_t and asio::uses_executor\n * for more information.\n */\n#if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION)\nconstexpr executor_arg_t executor_arg;\n#elif defined(ASIO_MSVC)\n__declspec(selectany) executor_arg_t executor_arg;\n#endif\n\n/// The uses_executor trait detects whether a type T has an associated executor\n/// that is convertible from type Executor.\n/**\n * Meets the BinaryTypeTrait requirements. The Asio library provides a\n * definition that is derived from false_type. A program may specialize this\n * template to derive from true_type for a user-defined type T that can be\n * constructed with an executor, where the first argument of a constructor has\n * type executor_arg_t and the second argument is convertible from type\n * Executor.\n */\ntemplate <typename T, typename Executor>\nstruct uses_executor : false_type {};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_USES_EXECUTOR_HPP\n"
  },
  {
    "path": "src/third_party/asio/version.hpp",
    "content": "//\n// version.hpp\n// ~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_VERSION_HPP\n#define ASIO_VERSION_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n// ASIO_VERSION % 100 is the sub-minor version\n// ASIO_VERSION / 100 % 1000 is the minor version\n// ASIO_VERSION / 100000 is the major version\n#define ASIO_VERSION 101401 // 1.14.1\n\n#endif // ASIO_VERSION_HPP\n"
  },
  {
    "path": "src/third_party/asio/wait_traits.hpp",
    "content": "//\n// wait_traits.hpp\n// ~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WAIT_TRAITS_HPP\n#define ASIO_WAIT_TRAITS_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/// Wait traits suitable for use with the basic_waitable_timer class template.\ntemplate <typename Clock>\nstruct wait_traits\n{\n  /// Convert a clock duration into a duration used for waiting.\n  /**\n   * @returns @c d.\n   */\n  static typename Clock::duration to_wait_duration(\n      const typename Clock::duration& d)\n  {\n    return d;\n  }\n\n  /// Convert a clock duration into a duration used for waiting.\n  /**\n   * @returns @c d.\n   */\n  static typename Clock::duration to_wait_duration(\n      const typename Clock::time_point& t)\n  {\n    typename Clock::time_point now = Clock::now();\n    if (now + (Clock::duration::max)() < t)\n      return (Clock::duration::max)();\n    if (now + (Clock::duration::min)() > t)\n      return (Clock::duration::min)();\n    return t - now;\n  }\n};\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // ASIO_WAIT_TRAITS_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/basic_object_handle.hpp",
    "content": "//\n// windows/basic_object_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP\n#define ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_object_handle_service.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Provides object-oriented handle functionality.\n/**\n * The windows::basic_object_handle class provides asynchronous and blocking\n * object-oriented handle functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Executor = executor>\nclass basic_object_handle\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the handle type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The handle type when rebound to the specified executor.\n    typedef basic_object_handle<Executor1> other;\n  };\n\n  /// The native representation of a handle.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef asio::detail::win_object_handle_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// An object handle is always the lowest layer.\n  typedef basic_object_handle lowest_layer_type;\n\n  /// Construct an object handle without opening it.\n  /**\n   * This constructor creates an object handle without opening it.\n   *\n   * @param ex The I/O executor that the object handle will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * object handle.\n   */\n  explicit basic_object_handle(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct an object handle without opening it.\n  /**\n   * This constructor creates an object handle without opening it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the object handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the object handle.\n   */\n  template <typename ExecutionContext>\n  explicit basic_object_handle(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value,\n        basic_object_handle\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct an object handle on an existing native handle.\n  /**\n   * This constructor creates an object handle object to hold an existing native\n   * handle.\n   *\n   * @param ex The I/O executor that the object handle will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the\n   * object handle.\n   *\n   * @param native_handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_object_handle(const executor_type& ex,\n      const native_handle_type& native_handle)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), native_handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct an object handle on an existing native handle.\n  /**\n   * This constructor creates an object handle object to hold an existing native\n   * handle.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the object handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the object handle.\n   *\n   * @param native_handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_object_handle(ExecutionContext& context,\n      const native_handle_type& native_handle,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), native_handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct an object handle from another.\n  /**\n   * This constructor moves an object handle from one object to another.\n   *\n   * @param other The other object handle object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_object_handle(const executor_type&)\n   * constructor.\n   */\n  basic_object_handle(basic_object_handle&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign an object handle from another.\n  /**\n   * This assignment operator moves an object handle from one object to another.\n   *\n   * @param other The other object handle object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_object_handle(const executor_type&)\n   * constructor.\n   */\n  basic_object_handle& operator=(basic_object_handle&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * layers. Since an object handle cannot contain any further layers, it simply\n   * returns a reference to itself.\n   *\n   * @return A reference to the lowest layer in the stack of layers. Ownership\n   * is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return *this;\n  }\n\n  /// Get a const reference to the lowest layer.\n  /**\n   * This function returns a const reference to the lowest layer in a stack of\n   * layers. Since an object handle cannot contain any further layers, it simply\n   * returns a reference to itself.\n   *\n   * @return A const reference to the lowest layer in the stack of layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return *this;\n  }\n\n  /// Assign an existing native handle to the handle.\n  /*\n   * This function opens the handle to hold an existing native handle.\n   *\n   * @param handle A native handle.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const native_handle_type& handle)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assign an existing native handle to the handle.\n  /*\n   * This function opens the handle to hold an existing native handle.\n   *\n   * @param handle A native handle.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const native_handle_type& handle,\n      asio::error_code& ec)\n  {\n    impl_.get_service().assign(impl_.get_implementation(), handle, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the handle is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Close the handle.\n  /**\n   * This function is used to close the handle. Any asynchronous read or write\n   * operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the handle.\n  /**\n   * This function is used to close the handle. Any asynchronous read or write\n   * operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the native handle representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * handle. This is intended to allow access to native handle functionality\n   * that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the handle.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the handle.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Perform a blocking wait on the object handle.\n  /**\n   * This function is used to wait for the object handle to be set to the\n   * signalled state. This function blocks and does not return until the object\n   * handle has been set to the signalled state.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void wait()\n  {\n    asio::error_code ec;\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"wait\");\n  }\n\n  /// Perform a blocking wait on the object handle.\n  /**\n   * This function is used to wait for the object handle to be set to the\n   * signalled state. This function blocks and does not return until the object\n   * handle has been set to the signalled state.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  void wait(asio::error_code& ec)\n  {\n    impl_.get_service().wait(impl_.get_implementation(), ec);\n  }\n\n  /// Start an asynchronous wait on the object handle.\n  /**\n   * This function is be used to initiate an asynchronous wait against the\n   * object handle. It always returns immediately.\n   *\n   * @param handler The handler to be called when the object handle is set to\n   * the signalled state. Copies will be made of the handler as required. The\n   * function signature of the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error // Result of operation.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   */\n  template <\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code))\n        WaitHandler ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WaitHandler,\n      void (asio::error_code))\n  async_wait(\n      ASIO_MOVE_ARG(WaitHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WaitHandler, void (asio::error_code)>(\n        initiate_async_wait(this), handler);\n  }\n\nprivate:\n  // Disallow copying and assignment.\n  basic_object_handle(const basic_object_handle&) ASIO_DELETED;\n  basic_object_handle& operator=(const basic_object_handle&) ASIO_DELETED;\n\n  class initiate_async_wait\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_wait(basic_object_handle* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WaitHandler>\n    void operator()(ASIO_MOVE_ARG(WaitHandler) handler) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WaitHandler.\n      ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check;\n\n      detail::non_const_lvalue<WaitHandler> handler2(handler);\n      self_->impl_.get_service().async_wait(\n          self_->impl_.get_implementation(), handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_object_handle* self_;\n  };\n\n  asio::detail::io_object_impl<\n    asio::detail::win_object_handle_service, Executor> impl_;\n};\n\n} // namespace windows\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/basic_overlapped_handle.hpp",
    "content": "//\n// windows/basic_overlapped_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP\n#define ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \\\n  || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/io_object_impl.hpp\"\n#include \"asio/detail/throw_error.hpp\"\n#include \"asio/detail/win_iocp_handle_service.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n\n#if defined(ASIO_HAS_MOVE)\n# include <utility>\n#endif // defined(ASIO_HAS_MOVE)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Provides Windows handle functionality for objects that support\n/// overlapped I/O.\n/**\n * The windows::overlapped_handle class provides the ability to wrap a Windows\n * handle. The underlying object referred to by the handle must support\n * overlapped I/O.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Executor = executor>\nclass basic_overlapped_handle\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the handle type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The handle type when rebound to the specified executor.\n    typedef basic_overlapped_handle<Executor1> other;\n  };\n\n  /// The native representation of a handle.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef asio::detail::win_iocp_handle_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// An overlapped_handle is always the lowest layer.\n  typedef basic_overlapped_handle lowest_layer_type;\n\n  /// Construct an overlapped handle without opening it.\n  /**\n   * This constructor creates an overlapped handle without opening it.\n   *\n   * @param ex The I/O executor that the overlapped handle will use, by default,\n   * to dispatch handlers for any asynchronous operations performed on the\n   * overlapped handle.\n   */\n  explicit basic_overlapped_handle(const executor_type& ex)\n    : impl_(ex)\n  {\n  }\n\n  /// Construct an overlapped handle without opening it.\n  /**\n   * This constructor creates an overlapped handle without opening it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the overlapped handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the overlapped handle.\n   */\n  template <typename ExecutionContext>\n  explicit basic_overlapped_handle(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value,\n        basic_overlapped_handle\n      >::type* = 0)\n    : impl_(context)\n  {\n  }\n\n  /// Construct an overlapped handle on an existing native handle.\n  /**\n   * This constructor creates an overlapped handle object to hold an existing\n   * native handle.\n   *\n   * @param ex The I/O executor that the overlapped handle will use, by default,\n   * to dispatch handlers for any asynchronous operations performed on the\n   * overlapped handle.\n   *\n   * @param native_handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_overlapped_handle(const executor_type& ex,\n      const native_handle_type& native_handle)\n    : impl_(ex)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), native_handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Construct an overlapped handle on an existing native handle.\n  /**\n   * This constructor creates an overlapped handle object to hold an existing\n   * native handle.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the overlapped handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the overlapped handle.\n   *\n   * @param native_handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_overlapped_handle(ExecutionContext& context,\n      const native_handle_type& native_handle,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), native_handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct an overlapped handle from another.\n  /**\n   * This constructor moves a handle from one object to another.\n   *\n   * @param other The other overlapped handle object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c overlapped_handle(const executor_type&)\n   * constructor.\n   */\n  basic_overlapped_handle(basic_overlapped_handle&& other)\n    : impl_(std::move(other.impl_))\n  {\n  }\n\n  /// Move-assign an overlapped handle from another.\n  /**\n   * This assignment operator moves a handle from one object to another.\n   *\n   * @param other The other overlapped handle object from which the move will\n   * occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c overlapped_handle(const executor_type&)\n   * constructor.\n   */\n  basic_overlapped_handle& operator=(basic_overlapped_handle&& other)\n  {\n    impl_ = std::move(other.impl_);\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Get the executor associated with the object.\n  executor_type get_executor() ASIO_NOEXCEPT\n  {\n    return impl_.get_executor();\n  }\n\n  /// Get a reference to the lowest layer.\n  /**\n   * This function returns a reference to the lowest layer in a stack of\n   * layers. Since an overlapped_handle cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A reference to the lowest layer in the stack of layers. Ownership\n   * is not transferred to the caller.\n   */\n  lowest_layer_type& lowest_layer()\n  {\n    return *this;\n  }\n\n  /// Get a const reference to the lowest layer.\n  /**\n   * This function returns a const reference to the lowest layer in a stack of\n   * layers. Since an overlapped_handle cannot contain any further layers, it\n   * simply returns a reference to itself.\n   *\n   * @return A const reference to the lowest layer in the stack of layers.\n   * Ownership is not transferred to the caller.\n   */\n  const lowest_layer_type& lowest_layer() const\n  {\n    return *this;\n  }\n\n  /// Assign an existing native handle to the handle.\n  /*\n   * This function opens the handle to hold an existing native handle.\n   *\n   * @param handle A native handle.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void assign(const native_handle_type& handle)\n  {\n    asio::error_code ec;\n    impl_.get_service().assign(impl_.get_implementation(), handle, ec);\n    asio::detail::throw_error(ec, \"assign\");\n  }\n\n  /// Assign an existing native handle to the handle.\n  /*\n   * This function opens the handle to hold an existing native handle.\n   *\n   * @param handle A native handle.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID assign(const native_handle_type& handle,\n      asio::error_code& ec)\n  {\n    impl_.get_service().assign(impl_.get_implementation(), handle, ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Determine whether the handle is open.\n  bool is_open() const\n  {\n    return impl_.get_service().is_open(impl_.get_implementation());\n  }\n\n  /// Close the handle.\n  /**\n   * This function is used to close the handle. Any asynchronous read or write\n   * operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void close()\n  {\n    asio::error_code ec;\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"close\");\n  }\n\n  /// Close the handle.\n  /**\n   * This function is used to close the handle. Any asynchronous read or write\n   * operations will be cancelled immediately, and will complete with the\n   * asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID close(asio::error_code& ec)\n  {\n    impl_.get_service().close(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\n  /// Get the native handle representation.\n  /**\n   * This function may be used to obtain the underlying representation of the\n   * handle. This is intended to allow access to native handle functionality\n   * that is not otherwise provided.\n   */\n  native_handle_type native_handle()\n  {\n    return impl_.get_service().native_handle(impl_.get_implementation());\n  }\n\n  /// Cancel all asynchronous operations associated with the handle.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  void cancel()\n  {\n    asio::error_code ec;\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    asio::detail::throw_error(ec, \"cancel\");\n  }\n\n  /// Cancel all asynchronous operations associated with the handle.\n  /**\n   * This function causes all outstanding asynchronous read or write operations\n   * to finish immediately, and the handlers for cancelled operations will be\n   * passed the asio::error::operation_aborted error.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   */\n  ASIO_SYNC_OP_VOID cancel(asio::error_code& ec)\n  {\n    impl_.get_service().cancel(impl_.get_implementation(), ec);\n    ASIO_SYNC_OP_VOID_RETURN(ec);\n  }\n\nprotected:\n  /// Protected destructor to prevent deletion through this type.\n  /**\n   * This function destroys the handle, cancelling any outstanding asynchronous\n   * wait operations associated with the handle as if by calling @c cancel.\n   */\n  ~basic_overlapped_handle()\n  {\n  }\n\n  asio::detail::io_object_impl<\n    asio::detail::win_iocp_handle_service, Executor> impl_;\n\nprivate:\n  // Disallow copying and assignment.\n  basic_overlapped_handle(const basic_overlapped_handle&) ASIO_DELETED;\n  basic_overlapped_handle& operator=(\n      const basic_overlapped_handle&) ASIO_DELETED;\n};\n\n} // namespace windows\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n       //   || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/basic_random_access_handle.hpp",
    "content": "//\n// windows/basic_random_access_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP\n#define ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/windows/basic_overlapped_handle.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Provides random-access handle functionality.\n/**\n * The windows::basic_random_access_handle class provides asynchronous and\n * blocking random-access handle functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\ntemplate <typename Executor = executor>\nclass basic_random_access_handle\n  : public basic_overlapped_handle<Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the handle type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The handle type when rebound to the specified executor.\n    typedef basic_random_access_handle<Executor1> other;\n  };\n\n  /// The native representation of a handle.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef asio::detail::win_iocp_handle_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// Construct a random-access handle without opening it.\n  /**\n   * This constructor creates a random-access handle without opening it.\n   *\n   * @param ex The I/O executor that the random-access handle will use, by\n   * default, to dispatch handlers for any asynchronous operations performed on\n   * the random-access handle.\n   */\n  explicit basic_random_access_handle(const executor_type& ex)\n    : basic_overlapped_handle<Executor>(ex)\n  {\n  }\n\n  /// Construct a random-access handle without opening it.\n  /**\n   * This constructor creates a random-access handle without opening it. The\n   * handle needs to be opened or assigned before data can be sent or received\n   * on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the random-access handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the random-access handle.\n   */\n  template <typename ExecutionContext>\n  explicit basic_random_access_handle(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value,\n        basic_random_access_handle\n      >::type* = 0)\n    : basic_overlapped_handle<Executor>(context)\n  {\n  }\n\n  /// Construct a random-access handle on an existing native handle.\n  /**\n   * This constructor creates a random-access handle object to hold an existing\n   * native handle.\n   *\n   * @param ex The I/O executor that the random-access handle will use, by\n   * default, to dispatch handlers for any asynchronous operations performed on\n   * the random-access handle.\n   *\n   * @param handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_random_access_handle(const executor_type& ex,\n      const native_handle_type& handle)\n    : basic_overlapped_handle<Executor>(ex, handle)\n  {\n  }\n\n  /// Construct a random-access handle on an existing native handle.\n  /**\n   * This constructor creates a random-access handle object to hold an existing\n   * native handle.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the random-access handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the random-access handle.\n   *\n   * @param handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_random_access_handle(ExecutionContext& context,\n      const native_handle_type& handle,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_overlapped_handle<Executor>(context, handle)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a random-access handle from another.\n  /**\n   * This constructor moves a random-access handle from one object to another.\n   *\n   * @param other The other random-access handle object from which the\n   * move will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_random_access_handle(const executor_type&)\n   * constructor.\n   */\n  basic_random_access_handle(basic_random_access_handle&& other)\n    : basic_overlapped_handle<Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a random-access handle from another.\n  /**\n   * This assignment operator moves a random-access handle from one object to\n   * another.\n   *\n   * @param other The other random-access handle object from which the\n   * move will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_random_access_handle(const executor_type&)\n   * constructor.\n   */\n  basic_random_access_handle& operator=(basic_random_access_handle&& other)\n  {\n    basic_overlapped_handle<Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Write some data to the handle at the specified offset.\n  /**\n   * This function is used to write data to the random-access handle. The\n   * function call will block until one or more bytes of the data has been\n   * written successfully, or until an error occurs.\n   *\n   * @param offset The offset at which the data will be written.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The write_some_at operation may not write all of the data. Consider\n   * using the @ref write_at function if you need to ensure that all data is\n   * written before the blocking operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.write_some_at(42, asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some_at(uint64_t offset,\n      const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().write_some_at(\n        this->impl_.get_implementation(), offset, buffers, ec);\n    asio::detail::throw_error(ec, \"write_some_at\");\n    return s;\n  }\n\n  /// Write some data to the handle at the specified offset.\n  /**\n   * This function is used to write data to the random-access handle. The\n   * function call will block until one or more bytes of the data has been\n   * written successfully, or until an error occurs.\n   *\n   * @param offset The offset at which the data will be written.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write_at function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some_at(uint64_t offset,\n      const ConstBufferSequence& buffers, asio::error_code& ec)\n  {\n    return this->impl_.get_service().write_some_at(\n        this->impl_.get_implementation(), offset, buffers, ec);\n  }\n\n  /// Start an asynchronous write at the specified offset.\n  /**\n   * This function is used to asynchronously write data to the random-access\n   * handle. The function call always returns immediately.\n   *\n   * @param offset The offset at which the data will be written.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The write operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write_at function if you need to ensure that\n   * all data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.async_write_some_at(42, asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some_at(uint64_t offset,\n      const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_write_some_at(this), handler, offset, buffers);\n  }\n\n  /// Read some data from the handle at the specified offset.\n  /**\n   * This function is used to read data from the random-access handle. The\n   * function call will block until one or more bytes of data has been read\n   * successfully, or until an error occurs.\n   *\n   * @param offset The offset at which the data will be read.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read_at function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.read_some_at(42, asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some_at(uint64_t offset,\n      const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().read_some_at(\n        this->impl_.get_implementation(), offset, buffers, ec);\n    asio::detail::throw_error(ec, \"read_some_at\");\n    return s;\n  }\n\n  /// Read some data from the handle at the specified offset.\n  /**\n   * This function is used to read data from the random-access handle. The\n   * function call will block until one or more bytes of data has been read\n   * successfully, or until an error occurs.\n   *\n   * @param offset The offset at which the data will be read.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read_at function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some_at(uint64_t offset,\n      const MutableBufferSequence& buffers, asio::error_code& ec)\n  {\n    return this->impl_.get_service().read_some_at(\n        this->impl_.get_implementation(), offset, buffers, ec);\n  }\n\n  /// Start an asynchronous read at the specified offset.\n  /**\n   * This function is used to asynchronously read data from the random-access\n   * handle. The function call always returns immediately.\n   *\n   * @param offset The offset at which the data will be read.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The read operation may not read all of the requested number of bytes.\n   * Consider using the @ref async_read_at function if you need to ensure that\n   * the requested amount of data is read before the asynchronous operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.async_read_some_at(42, asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some_at(uint64_t offset,\n      const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_read_some_at(this), handler, offset, buffers);\n  }\n\nprivate:\n  class initiate_async_write_some_at\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_write_some_at(basic_random_access_handle* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        uint64_t offset, const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_write_some_at(\n          self_->impl_.get_implementation(), offset, buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_random_access_handle* self_;\n  };\n\n  class initiate_async_read_some_at\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_read_some_at(basic_random_access_handle* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        uint64_t offset, const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_read_some_at(\n          self_->impl_.get_implementation(), offset, buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_random_access_handle* self_;\n  };\n};\n\n} // namespace windows\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/basic_stream_handle.hpp",
    "content": "//\n// windows/basic_stream_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP\n#define ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include \"asio/windows/basic_overlapped_handle.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Provides stream-oriented handle functionality.\n/**\n * The windows::basic_stream_handle class provides asynchronous and blocking\n * stream-oriented handle functionality.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n *\n * @par Concepts:\n * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.\n */\ntemplate <typename Executor = executor>\nclass basic_stream_handle\n  : public basic_overlapped_handle<Executor>\n{\npublic:\n  /// The type of the executor associated with the object.\n  typedef Executor executor_type;\n\n  /// Rebinds the handle type to another executor.\n  template <typename Executor1>\n  struct rebind_executor\n  {\n    /// The handle type when rebound to the specified executor.\n    typedef basic_stream_handle<Executor1> other;\n  };\n\n  /// The native representation of a handle.\n#if defined(GENERATING_DOCUMENTATION)\n  typedef implementation_defined native_handle_type;\n#else\n  typedef asio::detail::win_iocp_handle_service::native_handle_type\n    native_handle_type;\n#endif\n\n  /// Construct a stream handle without opening it.\n  /**\n   * This constructor creates a stream handle without opening it.\n   *\n   * @param ex The I/O executor that the stream handle will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the stream\n   * handle.\n   */\n  explicit basic_stream_handle(const executor_type& ex)\n    : basic_overlapped_handle<Executor>(ex)\n  {\n  }\n\n  /// Construct a stream handle without opening it.\n  /**\n   * This constructor creates a stream handle without opening it. The handle\n   * needs to be opened or assigned before data can be sent or received on it.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the stream handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the stream handle.\n   */\n  template <typename ExecutionContext>\n  explicit basic_stream_handle(ExecutionContext& context,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value,\n        basic_stream_handle\n      >::type* = 0)\n    : basic_overlapped_handle<Executor>(context)\n  {\n  }\n\n  /// Construct a stream handle on an existing native handle.\n  /**\n   * This constructor creates a stream handle object to hold an existing native\n   * handle.\n   *\n   * @param ex The I/O executor that the stream handle will use, by default, to\n   * dispatch handlers for any asynchronous operations performed on the stream\n   * handle.\n   *\n   * @param handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  basic_stream_handle(const executor_type& ex, const native_handle_type& handle)\n    : basic_overlapped_handle<Executor>(ex, handle)\n  {\n  }\n\n  /// Construct a stream handle on an existing native handle.\n  /**\n   * This constructor creates a stream handle object to hold an existing native\n   * handle.\n   *\n   * @param context An execution context which provides the I/O executor that\n   * the stream handle will use, by default, to dispatch handlers for any\n   * asynchronous operations performed on the stream handle.\n   *\n   * @param handle The new underlying handle implementation.\n   *\n   * @throws asio::system_error Thrown on failure.\n   */\n  template <typename ExecutionContext>\n  basic_stream_handle(ExecutionContext& context,\n      const native_handle_type& handle,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : basic_overlapped_handle<Executor>(context, handle)\n  {\n  }\n\n#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n  /// Move-construct a stream handle from another.\n  /**\n   * This constructor moves a stream handle from one object to another.\n   *\n   * @param other The other stream handle object from which the move\n   * will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_handle(const executor_type&)\n   * constructor.\n   */\n  basic_stream_handle(basic_stream_handle&& other)\n    : basic_overlapped_handle<Executor>(std::move(other))\n  {\n  }\n\n  /// Move-assign a stream handle from another.\n  /**\n   * This assignment operator moves a stream handle from one object to\n   * another.\n   *\n   * @param other The other stream handle object from which the move will occur.\n   *\n   * @note Following the move, the moved-from object is in the same state as if\n   * constructed using the @c basic_stream_handle(const executor_type&)\n   * constructor.\n   */\n  basic_stream_handle& operator=(basic_stream_handle&& other)\n  {\n    basic_overlapped_handle<Executor>::operator=(std::move(other));\n    return *this;\n  }\n#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)\n\n  /// Write some data to the handle.\n  /**\n   * This function is used to write data to the stream handle. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   *\n   * @returns The number of bytes written.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.write_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().write_some(\n        this->impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"write_some\");\n    return s;\n  }\n\n  /// Write some data to the handle.\n  /**\n   * This function is used to write data to the stream handle. The function call\n   * will block until one or more bytes of the data has been written\n   * successfully, or until an error occurs.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes written. Returns 0 if an error occurred.\n   *\n   * @note The write_some operation may not transmit all of the data to the\n   * peer. Consider using the @ref write function if you need to ensure that\n   * all data is written before the blocking operation completes.\n   */\n  template <typename ConstBufferSequence>\n  std::size_t write_some(const ConstBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().write_some(\n        this->impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous write.\n  /**\n   * This function is used to asynchronously write data to the stream handle.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more data buffers to be written to the handle.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the write operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes written.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The write operation may not transmit all of the data to the peer.\n   * Consider using the @ref async_write function if you need to ensure that all\n   * data is written before the asynchronous operation completes.\n   *\n   * @par Example\n   * To write a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.async_write_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on writing multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename ConstBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) WriteHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n      void (asio::error_code, std::size_t))\n  async_write_some(const ConstBufferSequence& buffers,\n      ASIO_MOVE_ARG(WriteHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<WriteHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_write_some(this), handler, buffers);\n  }\n\n  /// Read some data from the handle.\n  /**\n   * This function is used to read data from the stream handle. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @returns The number of bytes read.\n   *\n   * @throws asio::system_error Thrown on failure. An error code of\n   * asio::error::eof indicates that the connection was closed by the\n   * peer.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.read_some(asio::buffer(data, size));\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers)\n  {\n    asio::error_code ec;\n    std::size_t s = this->impl_.get_service().read_some(\n        this->impl_.get_implementation(), buffers, ec);\n    asio::detail::throw_error(ec, \"read_some\");\n    return s;\n  }\n\n  /// Read some data from the handle.\n  /**\n   * This function is used to read data from the stream handle. The function\n   * call will block until one or more bytes of data has been read successfully,\n   * or until an error occurs.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   *\n   * @param ec Set to indicate what error occurred, if any.\n   *\n   * @returns The number of bytes read. Returns 0 if an error occurred.\n   *\n   * @note The read_some operation may not read all of the requested number of\n   * bytes. Consider using the @ref read function if you need to ensure that\n   * the requested amount of data is read before the blocking operation\n   * completes.\n   */\n  template <typename MutableBufferSequence>\n  std::size_t read_some(const MutableBufferSequence& buffers,\n      asio::error_code& ec)\n  {\n    return this->impl_.get_service().read_some(\n        this->impl_.get_implementation(), buffers, ec);\n  }\n\n  /// Start an asynchronous read.\n  /**\n   * This function is used to asynchronously read data from the stream handle.\n   * The function call always returns immediately.\n   *\n   * @param buffers One or more buffers into which the data will be read.\n   * Although the buffers object may be copied as necessary, ownership of the\n   * underlying memory blocks is retained by the caller, which must guarantee\n   * that they remain valid until the handler is called.\n   *\n   * @param handler The handler to be called when the read operation completes.\n   * Copies will be made of the handler as required. The function signature of\n   * the handler must be:\n   * @code void handler(\n   *   const asio::error_code& error, // Result of operation.\n   *   std::size_t bytes_transferred           // Number of bytes read.\n   * ); @endcode\n   * Regardless of whether the asynchronous operation completes immediately or\n   * not, the handler will not be invoked from within this function. On\n   * immediate completion, invocation of the handler will be performed in a\n   * manner equivalent to using asio::post().\n   *\n   * @note The read operation may not read all of the requested number of bytes.\n   * Consider using the @ref async_read function if you need to ensure that the\n   * requested amount of data is read before the asynchronous operation\n   * completes.\n   *\n   * @par Example\n   * To read into a single data buffer use the @ref buffer function as follows:\n   * @code\n   * handle.async_read_some(asio::buffer(data, size), handler);\n   * @endcode\n   * See the @ref buffer documentation for information on reading into multiple\n   * buffers in one go, and how to use it with arrays, boost::array or\n   * std::vector.\n   */\n  template <typename MutableBufferSequence,\n      ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n        std::size_t)) ReadHandler\n          ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(executor_type)>\n  ASIO_INITFN_AUTO_RESULT_TYPE(ReadHandler,\n      void (asio::error_code, std::size_t))\n  async_read_some(const MutableBufferSequence& buffers,\n      ASIO_MOVE_ARG(ReadHandler) handler\n        ASIO_DEFAULT_COMPLETION_TOKEN(executor_type))\n  {\n    return async_initiate<ReadHandler,\n      void (asio::error_code, std::size_t)>(\n        initiate_async_read_some(this), handler, buffers);\n  }\n\nprivate:\n  class initiate_async_write_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_write_some(basic_stream_handle* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename WriteHandler, typename ConstBufferSequence>\n    void operator()(ASIO_MOVE_ARG(WriteHandler) handler,\n        const ConstBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a WriteHandler.\n      ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;\n\n      detail::non_const_lvalue<WriteHandler> handler2(handler);\n      self_->impl_.get_service().async_write_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_handle* self_;\n  };\n\n  class initiate_async_read_some\n  {\n  public:\n    typedef Executor executor_type;\n\n    explicit initiate_async_read_some(basic_stream_handle* self)\n      : self_(self)\n    {\n    }\n\n    executor_type get_executor() const ASIO_NOEXCEPT\n    {\n      return self_->get_executor();\n    }\n\n    template <typename ReadHandler, typename MutableBufferSequence>\n    void operator()(ASIO_MOVE_ARG(ReadHandler) handler,\n        const MutableBufferSequence& buffers) const\n    {\n      // If you get an error on the following line it means that your handler\n      // does not meet the documented type requirements for a ReadHandler.\n      ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check;\n\n      detail::non_const_lvalue<ReadHandler> handler2(handler);\n      self_->impl_.get_service().async_read_some(\n          self_->impl_.get_implementation(), buffers, handler2.value,\n          self_->impl_.get_implementation_executor());\n    }\n\n  private:\n    basic_stream_handle* self_;\n  };\n};\n\n} // namespace windows\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/object_handle.hpp",
    "content": "//\n// windows/object_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_OBJECT_HANDLE_HPP\n#define ASIO_WINDOWS_OBJECT_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/windows/basic_object_handle.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Typedef for the typical usage of an object handle.\ntypedef basic_object_handle<> object_handle;\n\n} // namespace windows\n} // namespace asio\n\n#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_OBJECT_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/overlapped_handle.hpp",
    "content": "//\n// windows/overlapped_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP\n#define ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \\\n  || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/windows/basic_overlapped_handle.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Typedef for the typical usage of an overlapped handle.\ntypedef basic_overlapped_handle<> overlapped_handle;\n\n} // namespace windows\n} // namespace asio\n\n#endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n       //   || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/overlapped_ptr.hpp",
    "content": "//\n// windows/overlapped_ptr.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_OVERLAPPED_PTR_HPP\n#define ASIO_WINDOWS_OVERLAPPED_PTR_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/detail/noncopyable.hpp\"\n#include \"asio/detail/win_iocp_overlapped_ptr.hpp\"\n#include \"asio/io_context.hpp\"\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Wraps a handler to create an OVERLAPPED object for use with overlapped I/O.\n/**\n * A special-purpose smart pointer used to wrap an application handler so that\n * it can be passed as the LPOVERLAPPED argument to overlapped I/O functions.\n *\n * @par Thread Safety\n * @e Distinct @e objects: Safe.@n\n * @e Shared @e objects: Unsafe.\n */\nclass overlapped_ptr\n  : private noncopyable\n{\npublic:\n  /// Construct an empty overlapped_ptr.\n  overlapped_ptr()\n    : impl_()\n  {\n  }\n\n  /// Construct an overlapped_ptr to contain the specified handler.\n  template <typename ExecutionContext, typename Handler>\n  explicit overlapped_ptr(ExecutionContext& context,\n      ASIO_MOVE_ARG(Handler) handler,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n    : impl_(context.get_executor(), ASIO_MOVE_CAST(Handler)(handler))\n  {\n  }\n\n  /// Construct an overlapped_ptr to contain the specified handler.\n  template <typename Executor, typename Handler>\n  explicit overlapped_ptr(const Executor& ex,\n      ASIO_MOVE_ARG(Handler) handler,\n      typename enable_if<\n        is_executor<Executor>::value\n      >::type* = 0)\n    : impl_(ex, ASIO_MOVE_CAST(Handler)(handler))\n  {\n  }\n\n  /// Destructor automatically frees the OVERLAPPED object unless released.\n  ~overlapped_ptr()\n  {\n  }\n\n  /// Reset to empty.\n  void reset()\n  {\n    impl_.reset();\n  }\n\n  /// Reset to contain the specified handler, freeing any current OVERLAPPED\n  /// object.\n  template <typename ExecutionContext, typename Handler>\n  void reset(ExecutionContext& context, ASIO_MOVE_ARG(Handler) handler,\n      typename enable_if<\n        is_convertible<ExecutionContext&, execution_context&>::value\n      >::type* = 0)\n  {\n    impl_.reset(context.get_executor(), ASIO_MOVE_CAST(Handler)(handler));\n  }\n\n  /// Reset to contain the specified handler, freeing any current OVERLAPPED\n  /// object.\n  template <typename Executor, typename Handler>\n  void reset(const Executor& ex, ASIO_MOVE_ARG(Handler) handler,\n      typename enable_if<\n        is_executor<Executor>::value\n      >::type* = 0)\n  {\n    impl_.reset(ex, ASIO_MOVE_CAST(Handler)(handler));\n  }\n\n  /// Get the contained OVERLAPPED object.\n  OVERLAPPED* get()\n  {\n    return impl_.get();\n  }\n\n  /// Get the contained OVERLAPPED object.\n  const OVERLAPPED* get() const\n  {\n    return impl_.get();\n  }\n\n  /// Release ownership of the OVERLAPPED object.\n  OVERLAPPED* release()\n  {\n    return impl_.release();\n  }\n\n  /// Post completion notification for overlapped operation. Releases ownership.\n  void complete(const asio::error_code& ec,\n      std::size_t bytes_transferred)\n  {\n    impl_.complete(ec, bytes_transferred);\n  }\n\nprivate:\n  detail::win_iocp_overlapped_ptr impl_;\n};\n\n} // namespace windows\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#endif // defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_OVERLAPPED_PTR_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/random_access_handle.hpp",
    "content": "//\n// windows/random_access_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP\n#define ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/windows/basic_random_access_handle.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Typedef for the typical usage of a random-access handle.\ntypedef basic_random_access_handle<> random_access_handle;\n\n} // namespace windows\n} // namespace asio\n\n#endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/windows/stream_handle.hpp",
    "content": "//\n// windows/stream_handle.hpp\n// ~~~~~~~~~~~~~~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WINDOWS_STREAM_HANDLE_HPP\n#define ASIO_WINDOWS_STREAM_HANDLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n\n#if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \\\n  || defined(GENERATING_DOCUMENTATION)\n\n#include \"asio/windows/basic_stream_handle.hpp\"\n\nnamespace asio {\nnamespace windows {\n\n/// Typedef for the typical usage of a stream-oriented handle.\ntypedef basic_stream_handle<> stream_handle;\n\n} // namespace windows\n} // namespace asio\n\n#endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE)\n       //   || defined(GENERATING_DOCUMENTATION)\n\n#endif // ASIO_WINDOWS_STREAM_HANDLE_HPP\n"
  },
  {
    "path": "src/third_party/asio/write.hpp",
    "content": "//\n// write.hpp\n// ~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WRITE_HPP\n#define ASIO_WRITE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n# include \"asio/basic_streambuf_fwd.hpp\"\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/**\n * @defgroup write asio::write\n *\n * @brief The @c write function is a composed operation that writes a certain\n * amount of data to a stream before returning.\n */\n/*@{*/\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * stream.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write(s, asio::buffer(data, size)); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncWriteStream, typename ConstBufferSequence>\nstd::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * stream.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write(s, asio::buffer(data, size), ec); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncWriteStream, typename ConstBufferSequence>\nstd::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * stream.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write(s, asio::buffer(data, size),\n *     asio::transfer_at_least(32)); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename SyncWriteStream, typename ConstBufferSequence,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * stream.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncWriteStream, typename ConstBufferSequence,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1>\nstd::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1>\nstd::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v1,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, b,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncWriteStream, typename Allocator>\nstd::size_t write(SyncWriteStream& s, basic_streambuf<Allocator>& b);\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, b,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncWriteStream, typename Allocator>\nstd::size_t write(SyncWriteStream& s, basic_streambuf<Allocator>& b,\n    asio::error_code& ec);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncWriteStream, typename Allocator,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncWriteStream, typename Allocator,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition, asio::error_code& ec);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2>\nstd::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Write all of the supplied data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write(\n *     s, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2>\nstd::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Write a certain amount of data to a stream before returning.\n/**\n * This function is used to write a certain number of bytes of data to a stream.\n * The call will block until one of the following conditions is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * write_some function.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the SyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Successfully written data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's write_some function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncWriteStream, typename DynamicBuffer_v2,\n    typename CompletionCondition>\nstd::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition, asio::error_code& ec,\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/*@}*/\n/**\n * @defgroup async_write asio::async_write\n *\n * @brief The @c async_write function is a composed asynchronous operation that\n * writes a certain amount of data to a stream before completion.\n */\n/*@{*/\n\n/// Start an asynchronous operation to write all of the supplied data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of\n * the handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code\n * asio::async_write(s, asio::buffer(data, size), handler);\n * @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncWriteStream, typename ConstBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, const ConstBufferSequence& buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to write a certain amount of data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers One or more buffers containing the data to be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's async_write_some function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::async_write(s,\n *     asio::buffer(data, size),\n *     asio::transfer_at_least(32),\n *     handler); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncWriteStream,\n    typename ConstBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_const_buffer_sequence<ConstBufferSequence>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to write all of the supplied data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called. Successfully written\n * data is automatically consumed from the buffers.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v1,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to write a certain amount of data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called. Successfully written\n * data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's async_write_some function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream,\n    typename DynamicBuffer_v1, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s,\n    ASIO_MOVE_ARG(DynamicBuffer_v1) buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v1<typename decay<DynamicBuffer_v1>::type>::value\n        && !is_dynamic_buffer_v2<typename decay<DynamicBuffer_v1>::type>::value\n    >::type* = 0);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Start an asynchronous operation to write all of the supplied data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param b A basic_streambuf object from which data will be written. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type));\n\n/// Start an asynchronous operation to write a certain amount of data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param b A basic_streambuf object from which data will be written. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's async_write_some function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, basic_streambuf<Allocator>& b,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type));\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n#endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1)\n\n/// Start an asynchronous operation to write all of the supplied data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called. Successfully written\n * data is automatically consumed from the buffers.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream, typename DynamicBuffer_v2,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/// Start an asynchronous operation to write a certain amount of data to a\n/// stream.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a stream. The function call always returns immediately. The\n * asynchronous operation will continue until one of the following conditions\n * is true:\n *\n * @li All of the data in the supplied dynamic buffer sequence has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the stream's\n * async_write_some function, and is known as a <em>composed operation</em>. The\n * program must ensure that the stream performs no other write operations (such\n * as async_write, the stream's async_write_some function, or any other composed\n * operations that perform writes) until this operation completes.\n *\n * @param s The stream to which the data is to be written. The type must support\n * the AsyncWriteStream concept.\n *\n * @param buffers The dynamic buffer sequence from which data will be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called. Successfully written\n * data is automatically consumed from the buffers.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the stream's async_write_some function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   const asio::error_code& error, // Result of operation.\n *\n *   std::size_t bytes_transferred           // Number of bytes written from the\n *                                           // buffers. If an error occurred,\n *                                           // this will be less than the sum\n *                                           // of the buffer sizes.\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncWriteStream,\n    typename DynamicBuffer_v2, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncWriteStream::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncWriteStream::executor_type),\n    typename enable_if<\n      is_dynamic_buffer_v2<DynamicBuffer_v2>::value\n    >::type* = 0);\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/write.hpp\"\n\n#endif // ASIO_WRITE_HPP\n"
  },
  {
    "path": "src/third_party/asio/write_at.hpp",
    "content": "//\n// write_at.hpp\n// ~~~~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_WRITE_AT_HPP\n#define ASIO_WRITE_AT_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/detail/config.hpp\"\n#include <cstddef>\n#include \"asio/async_result.hpp\"\n#include \"asio/detail/cstdint.hpp\"\n#include \"asio/error.hpp\"\n\n#if !defined(ASIO_NO_EXTENSIONS)\n# include \"asio/basic_streambuf_fwd.hpp\"\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n#include \"asio/detail/push_options.hpp\"\n\nnamespace asio {\n\n/**\n * @defgroup write_at asio::write_at\n *\n * @brief The @c write_at function is a composed operation that writes a\n * certain amount of data at a specified offset before returning.\n */\n/*@{*/\n\n/// Write all of the supplied data at the specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * device.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write_at(d, 42, asio::buffer(data, size)); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write_at(\n *     d, offset, buffers,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers);\n\n/// Write all of the supplied data at the specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * device.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write_at(d, 42,\n *     asio::buffer(data, size), ec); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write_at(\n *     d, offset, buffers,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    asio::error_code& ec);\n\n/// Write a certain amount of data at a specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * device.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's write_some_at function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::write_at(d, 42, asio::buffer(data, size),\n *     asio::transfer_at_least(32)); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition);\n\n/// Write a certain amount of data at a specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written. The sum\n * of the buffer sizes indicates the maximum number of bytes to write to the\n * device.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's write_some_at function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition, asio::error_code& ec);\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Write all of the supplied data at the specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write_at(\n *     d, 42, b,\n *     asio::transfer_all()); @endcode\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b);\n\n/// Write all of the supplied data at the specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes transferred.\n *\n * @note This overload is equivalent to calling:\n * @code asio::write_at(\n *     d, 42, b,\n *     asio::transfer_all(), ec); @endcode\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    asio::error_code& ec);\n\n/// Write a certain amount of data at a specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's write_some_at function.\n *\n * @returns The number of bytes transferred.\n *\n * @throws asio::system_error Thrown on failure.\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset,\n    basic_streambuf<Allocator>& b, CompletionCondition completion_condition);\n\n/// Write a certain amount of data at a specified offset before returning.\n/**\n * This function is used to write a certain number of bytes of data to a random\n * access device at a specified offset. The call will block until one of the\n * following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * write_some_at function.\n *\n * @param d The device to which the data is to be written. The type must support\n * the SyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b The basic_streambuf object from which data will be written.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's write_some_at function.\n *\n * @param ec Set to indicate what error occurred, if any.\n *\n * @returns The number of bytes written. If an error occurs, returns the total\n * number of bytes successfully transferred prior to the error.\n */\ntemplate <typename SyncRandomAccessWriteDevice, typename Allocator,\n    typename CompletionCondition>\nstd::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset,\n    basic_streambuf<Allocator>& b, CompletionCondition completion_condition,\n    asio::error_code& ec);\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n/**\n * @defgroup async_write_at asio::async_write_at\n *\n * @brief The @c async_write_at function is a composed asynchronous operation\n * that writes a certain amount of data at the specified offset before\n * completion.\n */\n/*@{*/\n\n/// Start an asynchronous operation to write all of the supplied data at the\n/// specified offset.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a random access device at a specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_write_some_at function, and is known as a <em>composed operation</em>.\n * The program must ensure that the device performs no <em>overlapping</em>\n * write operations (such as async_write_at, the device's async_write_some_at\n * function, or any other composed operations that perform writes) until this\n * operation completes. Operations are overlapping if the regions defined by\n * their offsets, and the numbers of bytes to write, intersect.\n *\n * @param d The device to which the data is to be written. The type must support\n * the AsyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of\n * the handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes written from the buffers. If an error\n *   // occurred, this will be less than the sum of the buffer sizes.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code\n * asio::async_write_at(d, 42, asio::buffer(data, size), handler);\n * @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncRandomAccessWriteDevice, typename ConstBufferSequence,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessWriteDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset,\n    const ConstBufferSequence& buffers,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessWriteDevice::executor_type));\n\n/// Start an asynchronous operation to write a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a random access device at a specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li All of the data in the supplied buffers has been written. That is, the\n * bytes transferred is equal to the sum of the buffer sizes.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_write_some_at function, and is known as a <em>composed operation</em>.\n * The program must ensure that the device performs no <em>overlapping</em>\n * write operations (such as async_write_at, the device's async_write_some_at\n * function, or any other composed operations that perform writes) until this\n * operation completes. Operations are overlapping if the regions defined by\n * their offsets, and the numbers of bytes to write, intersect.\n *\n * @param d The device to which the data is to be written. The type must support\n * the AsyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param buffers One or more buffers containing the data to be written.\n * Although the buffers object may be copied as necessary, ownership of the\n * underlying memory blocks is retained by the caller, which must guarantee\n * that they remain valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's async_write_some_at function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes written from the buffers. If an error\n *   // occurred, this will be less than the sum of the buffer sizes.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n *\n * @par Example\n * To write a single data buffer use the @ref buffer function as follows:\n * @code asio::async_write_at(d, 42,\n *     asio::buffer(data, size),\n *     asio::transfer_at_least(32),\n *     handler); @endcode\n * See the @ref buffer documentation for information on writing multiple\n * buffers in one go, and how to use it with arrays, boost::array or\n * std::vector.\n */\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename ConstBufferSequence, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessWriteDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, const ConstBufferSequence& buffers,\n    CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessWriteDevice::executor_type));\n\n#if !defined(ASIO_NO_EXTENSIONS)\n#if !defined(ASIO_NO_IOSTREAM)\n\n/// Start an asynchronous operation to write all of the supplied data at the\n/// specified offset.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a random access device at a specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li An error occurred.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_write_some_at function, and is known as a <em>composed operation</em>.\n * The program must ensure that the device performs no <em>overlapping</em>\n * write operations (such as async_write_at, the device's async_write_some_at\n * function, or any other composed operations that perform writes) until this\n * operation completes. Operations are overlapping if the regions defined by\n * their offsets, and the numbers of bytes to write, intersect.\n *\n * @param d The device to which the data is to be written. The type must support\n * the AsyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b A basic_streambuf object from which data will be written. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes written from the buffers. If an error\n *   // occurred, this will be less than the sum of the buffer sizes.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncRandomAccessWriteDevice, typename Allocator,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessWriteDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d,\n    uint64_t offset, basic_streambuf<Allocator>& b,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessWriteDevice::executor_type));\n\n/// Start an asynchronous operation to write a certain amount of data at the\n/// specified offset.\n/**\n * This function is used to asynchronously write a certain number of bytes of\n * data to a random access device at a specified offset. The function call\n * always returns immediately. The asynchronous operation will continue until\n * one of the following conditions is true:\n *\n * @li All of the data in the supplied basic_streambuf has been written.\n *\n * @li The completion_condition function object returns 0.\n *\n * This operation is implemented in terms of zero or more calls to the device's\n * async_write_some_at function, and is known as a <em>composed operation</em>.\n * The program must ensure that the device performs no <em>overlapping</em>\n * write operations (such as async_write_at, the device's async_write_some_at\n * function, or any other composed operations that perform writes) until this\n * operation completes. Operations are overlapping if the regions defined by\n * their offsets, and the numbers of bytes to write, intersect.\n *\n * @param d The device to which the data is to be written. The type must support\n * the AsyncRandomAccessWriteDevice concept.\n *\n * @param offset The offset at which the data will be written.\n *\n * @param b A basic_streambuf object from which data will be written. Ownership\n * of the streambuf is retained by the caller, which must guarantee that it\n * remains valid until the handler is called.\n *\n * @param completion_condition The function object to be called to determine\n * whether the write operation is complete. The signature of the function object\n * must be:\n * @code std::size_t completion_condition(\n *   // Result of latest async_write_some_at operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes transferred so far.\n *   std::size_t bytes_transferred\n * ); @endcode\n * A return value of 0 indicates that the write operation is complete. A\n * non-zero return value indicates the maximum number of bytes to be written on\n * the next call to the device's async_write_some_at function.\n *\n * @param handler The handler to be called when the write operation completes.\n * Copies will be made of the handler as required. The function signature of the\n * handler must be:\n * @code void handler(\n *   // Result of operation.\n *   const asio::error_code& error,\n *\n *   // Number of bytes written from the buffers. If an error\n *   // occurred, this will be less than the sum of the buffer sizes.\n *   std::size_t bytes_transferred\n * ); @endcode\n * Regardless of whether the asynchronous operation completes immediately or\n * not, the handler will not be invoked from within this function. On\n * immediate completion, invocation of the handler will be performed in a\n * manner equivalent to using asio::post().\n */\ntemplate <typename AsyncRandomAccessWriteDevice,\n    typename Allocator, typename CompletionCondition,\n    ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,\n      std::size_t)) WriteHandler\n        ASIO_DEFAULT_COMPLETION_TOKEN_TYPE(\n          typename AsyncRandomAccessWriteDevice::executor_type)>\nASIO_INITFN_AUTO_RESULT_TYPE(WriteHandler,\n    void (asio::error_code, std::size_t))\nasync_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset,\n    basic_streambuf<Allocator>& b, CompletionCondition completion_condition,\n    ASIO_MOVE_ARG(WriteHandler) handler\n      ASIO_DEFAULT_COMPLETION_TOKEN(\n        typename AsyncRandomAccessWriteDevice::executor_type));\n\n#endif // !defined(ASIO_NO_IOSTREAM)\n#endif // !defined(ASIO_NO_EXTENSIONS)\n\n/*@}*/\n\n} // namespace asio\n\n#include \"asio/detail/pop_options.hpp\"\n\n#include \"asio/impl/write_at.hpp\"\n\n#endif // ASIO_WRITE_AT_HPP\n"
  },
  {
    "path": "src/third_party/asio/yield.hpp",
    "content": "//\n// yield.hpp\n// ~~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#include \"coroutine.hpp\"\n\n#ifndef reenter\n# define reenter(c) ASIO_CORO_REENTER(c)\n#endif\n\n#ifndef yield\n# define yield ASIO_CORO_YIELD\n#endif\n\n#ifndef fork\n# define fork ASIO_CORO_FORK\n#endif\n"
  },
  {
    "path": "src/third_party/asio.hpp",
    "content": "//\n// asio.hpp\n// ~~~~~~~~\n//\n// Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#ifndef ASIO_HPP\n#define ASIO_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1200)\n# pragma once\n#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)\n\n#include \"asio/associated_allocator.hpp\"\n#include \"asio/associated_executor.hpp\"\n#include \"asio/async_result.hpp\"\n#include \"asio/awaitable.hpp\"\n#include \"asio/basic_datagram_socket.hpp\"\n#include \"asio/basic_deadline_timer.hpp\"\n#include \"asio/basic_io_object.hpp\"\n#include \"asio/basic_raw_socket.hpp\"\n#include \"asio/basic_seq_packet_socket.hpp\"\n#include \"asio/basic_serial_port.hpp\"\n#include \"asio/basic_signal_set.hpp\"\n#include \"asio/basic_socket.hpp\"\n#include \"asio/basic_socket_acceptor.hpp\"\n#include \"asio/basic_socket_iostream.hpp\"\n#include \"asio/basic_socket_streambuf.hpp\"\n#include \"asio/basic_stream_socket.hpp\"\n#include \"asio/basic_streambuf.hpp\"\n#include \"asio/basic_waitable_timer.hpp\"\n#include \"asio/bind_executor.hpp\"\n#include \"asio/buffer.hpp\"\n#include \"asio/buffered_read_stream_fwd.hpp\"\n#include \"asio/buffered_read_stream.hpp\"\n#include \"asio/buffered_stream_fwd.hpp\"\n#include \"asio/buffered_stream.hpp\"\n#include \"asio/buffered_write_stream_fwd.hpp\"\n#include \"asio/buffered_write_stream.hpp\"\n#include \"asio/buffers_iterator.hpp\"\n#include \"asio/co_spawn.hpp\"\n#include \"asio/completion_condition.hpp\"\n#include \"asio/compose.hpp\"\n#include \"asio/connect.hpp\"\n#include \"asio/coroutine.hpp\"\n#include \"asio/deadline_timer.hpp\"\n#include \"asio/defer.hpp\"\n#include \"asio/detached.hpp\"\n#include \"asio/dispatch.hpp\"\n#include \"asio/error.hpp\"\n#include \"asio/error_code.hpp\"\n#include \"asio/execution_context.hpp\"\n#include \"asio/executor.hpp\"\n#include \"asio/executor_work_guard.hpp\"\n#include \"asio/generic/basic_endpoint.hpp\"\n#include \"asio/generic/datagram_protocol.hpp\"\n#include \"asio/generic/raw_protocol.hpp\"\n#include \"asio/generic/seq_packet_protocol.hpp\"\n#include \"asio/generic/stream_protocol.hpp\"\n#include \"asio/handler_alloc_hook.hpp\"\n#include \"asio/handler_continuation_hook.hpp\"\n#include \"asio/handler_invoke_hook.hpp\"\n#include \"asio/high_resolution_timer.hpp\"\n#include \"asio/io_context.hpp\"\n#include \"asio/io_context_strand.hpp\"\n#include \"asio/io_service.hpp\"\n#include \"asio/io_service_strand.hpp\"\n#include \"asio/ip/address.hpp\"\n#include \"asio/ip/address_v4.hpp\"\n#include \"asio/ip/address_v4_iterator.hpp\"\n#include \"asio/ip/address_v4_range.hpp\"\n#include \"asio/ip/address_v6.hpp\"\n#include \"asio/ip/address_v6_iterator.hpp\"\n#include \"asio/ip/address_v6_range.hpp\"\n#include \"asio/ip/network_v4.hpp\"\n#include \"asio/ip/network_v6.hpp\"\n#include \"asio/ip/bad_address_cast.hpp\"\n#include \"asio/ip/basic_endpoint.hpp\"\n#include \"asio/ip/basic_resolver.hpp\"\n#include \"asio/ip/basic_resolver_entry.hpp\"\n#include \"asio/ip/basic_resolver_iterator.hpp\"\n#include \"asio/ip/basic_resolver_query.hpp\"\n#include \"asio/ip/host_name.hpp\"\n#include \"asio/ip/icmp.hpp\"\n#include \"asio/ip/multicast.hpp\"\n#include \"asio/ip/resolver_base.hpp\"\n#include \"asio/ip/resolver_query_base.hpp\"\n#include \"asio/ip/tcp.hpp\"\n#include \"asio/ip/udp.hpp\"\n#include \"asio/ip/unicast.hpp\"\n#include \"asio/ip/v6_only.hpp\"\n#include \"asio/is_executor.hpp\"\n#include \"asio/is_read_buffered.hpp\"\n#include \"asio/is_write_buffered.hpp\"\n#include \"asio/local/basic_endpoint.hpp\"\n#include \"asio/local/connect_pair.hpp\"\n#include \"asio/local/datagram_protocol.hpp\"\n#include \"asio/local/stream_protocol.hpp\"\n#include \"asio/packaged_task.hpp\"\n#include \"asio/placeholders.hpp\"\n#include \"asio/posix/basic_descriptor.hpp\"\n#include \"asio/posix/basic_stream_descriptor.hpp\"\n#include \"asio/posix/descriptor.hpp\"\n#include \"asio/posix/descriptor_base.hpp\"\n#include \"asio/posix/stream_descriptor.hpp\"\n#include \"asio/post.hpp\"\n#include \"asio/read.hpp\"\n#include \"asio/read_at.hpp\"\n#include \"asio/read_until.hpp\"\n#include \"asio/redirect_error.hpp\"\n#include \"asio/serial_port.hpp\"\n#include \"asio/serial_port_base.hpp\"\n#include \"asio/signal_set.hpp\"\n#include \"asio/socket_base.hpp\"\n#include \"asio/steady_timer.hpp\"\n#include \"asio/strand.hpp\"\n#include \"asio/streambuf.hpp\"\n#include \"asio/system_context.hpp\"\n#include \"asio/system_error.hpp\"\n#include \"asio/system_executor.hpp\"\n#include \"asio/system_timer.hpp\"\n#include \"asio/this_coro.hpp\"\n#include \"asio/thread.hpp\"\n#include \"asio/thread_pool.hpp\"\n#include \"asio/time_traits.hpp\"\n#include \"asio/use_awaitable.hpp\"\n#include \"asio/use_future.hpp\"\n#include \"asio/uses_executor.hpp\"\n#include \"asio/version.hpp\"\n#include \"asio/wait_traits.hpp\"\n#include \"asio/windows/basic_object_handle.hpp\"\n#include \"asio/windows/basic_overlapped_handle.hpp\"\n#include \"asio/windows/basic_random_access_handle.hpp\"\n#include \"asio/windows/basic_stream_handle.hpp\"\n#include \"asio/windows/object_handle.hpp\"\n#include \"asio/windows/overlapped_handle.hpp\"\n#include \"asio/windows/overlapped_ptr.hpp\"\n#include \"asio/windows/random_access_handle.hpp\"\n#include \"asio/windows/stream_handle.hpp\"\n#include \"asio/write.hpp\"\n#include \"asio/write_at.hpp\"\n\n#endif // ASIO_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/LICENSE.md",
    "content": "This license file applies to everything in this repository except that which\nis explicitly annotated as being written by other authors, i.e. the Boost\nqueue (included in the benchmarks for comparison), Intel's TBB library (ditto),\nthe CDSChecker tool (used for verification), the Relacy model checker (ditto),\nand Jeff Preshing's semaphore implementation (used in the blocking queue) which\nhas a zlib license (embedded in blockingconcurrentqueue.h).\n\n---\n\nSimplified BSD License:\n\nCopyright (c) 2013-2016, Cameron Desrochers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n- Redistributions of source code must retain the above copyright notice, this list of\nconditions and the following disclaimer.\n- Redistributions in binary form must reproduce the above copyright notice, this list of\nconditions and the following disclaimer in the documentation and/or other materials\nprovided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL\nTHE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\nOF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\nTORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\nEVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n---\n\nI have also chosen to dual-license under the Boost Software License as an alternative to\nthe Simplified BSD license above:\n\nBoost Software License - Version 1.0 - August 17th, 2003\n\nPermission is hereby granted, free of charge, to any person or organization\nobtaining a copy of the software and accompanying documentation covered by\nthis license (the \"Software\") to use, reproduce, display, distribute,\nexecute, and transmit the Software, and to prepare derivative works of the\nSoftware, and to permit third-parties to whom the Software is furnished to\ndo so, all subject to the following:\n\nThe copyright notices in the Software and this entire statement, including\nthe above license grant, this restriction and the following disclaimer,\nmust be included in all copies of the Software, in whole or in part, and\nall derivative works of the Software, unless such copies or derivative\nworks are solely in the form of machine-executable object code generated by\na source language processor.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "src/third_party/concurrentqueue/README.md",
    "content": "# moodycamel::ConcurrentQueue<T>\r\n\r\nAn industrial-strength lock-free queue for C++.\r\n\r\nNote: If all you need is a single-producer, single-consumer queue, I have [one of those too][spsc].\r\n\r\n## Features\r\n\r\n- Knock-your-socks-off [blazing fast performance][benchmarks].\r\n- Single-header implementation. Just drop it in your project.\r\n- Fully thread-safe lock-free queue. Use concurrently from any number of threads.\r\n- C++11 implementation -- elements are moved (instead of copied) where possible.\r\n- Templated, obviating the need to deal exclusively with pointers -- memory is managed for you.\r\n- No artificial limitations on element types or maximum count.\r\n- Memory can be allocated once up-front, or dynamically as needed.\r\n- Fully portable (no assembly; all is done through standard C++11 primitives).\r\n- Supports super-fast bulk operations.\r\n- Includes a low-overhead blocking version (BlockingConcurrentQueue).\r\n- Exception safe.\r\n\r\n## Reasons to use\r\n\r\nThere are not that many full-fledged lock-free queues for C++. Boost has one, but it's limited to objects with trivial\r\nassignment operators and trivial destructors, for example.\r\nIntel's TBB queue isn't lock-free, and requires trivial constructors too.\r\nThere're many academic papers that implement lock-free queues in C++, but usable source code is\r\nhard to find, and tests even more so.\r\n\r\nThis queue not only has less limitations than others (for the most part), but [it's also faster][benchmarks].\r\nIt's been fairly well-tested, and offers advanced features like **bulk enqueueing/dequeueing**\r\n(which, with my new design, is much faster than one element at a time, approaching and even surpassing\r\nthe speed of a non-concurrent queue even under heavy contention).\r\n\r\nIn short, there was a lock-free queue shaped hole in the C++ open-source universe, and I set out\r\nto fill it with the fastest, most complete, and well-tested design and implementation I could.\r\nThe result is `moodycamel::ConcurrentQueue` :-)\r\n\r\n## Reasons *not* to use\r\n\r\nThe fastest synchronization of all is the kind that never takes place. Fundamentally,\r\nconcurrent data structures require some synchronization, and that takes time. Every effort\r\nwas made, of course, to minimize the overhead, but if you can avoid sharing data between\r\nthreads, do so!\r\n\r\nWhy use concurrent data structures at all, then? Because they're gosh darn convenient! (And, indeed,\r\nsometimes sharing data concurrently is unavoidable.)\r\n\r\nMy queue is **not linearizable** (see the next section on high-level design). The foundations of\r\nits design assume that producers are independent; if this is not the case, and your producers\r\nco-ordinate amongst themselves in some fashion, be aware that the elements won't necessarily\r\ncome out of the queue in the same order they were put in *relative to the ordering formed by that co-ordination*\r\n(but they will still come out in the order they were put in by any *individual* producer). If this affects\r\nyour use case, you may be better off with another implementation; either way, it's an important limitation\r\nto be aware of.\r\n\r\nMy queue is also **not NUMA aware**, and does a lot of memory re-use internally, meaning it probably doesn't\r\nscale particularly well on NUMA architectures; however, I don't know of any other lock-free queue that *is*\r\nNUMA aware (except for [SALSA][salsa], which is very cool, but has no publicly available implementation that I know of).\r\n\r\nFinally, the queue is **not sequentially consistent**; there *is* a happens-before relationship between when an element is put\r\nin the queue and when it comes out, but other things (such as pumping the queue until it's empty) require more thought\r\nto get right in all eventualities, because explicit memory ordering may have to be done to get the desired effect. In other words,\r\nit can sometimes be difficult to use the queue correctly. This is why it's a good idea to follow the [samples][samples.md] where possible.\r\nOn the other hand, the upside of this lack of sequential consistency is better performance.\r\n\r\n## High-level design\r\n\r\nElements are stored internally using contiguous blocks instead of linked lists for better performance.\r\nThe queue is made up of a collection of sub-queues, one for each producer. When a consumer\r\nwants to dequeue an element, it checks all the sub-queues until it finds one that's not empty.\r\nAll of this is largely transparent to the user of the queue, however -- it mostly just works<sup>TM</sup>.\r\n\r\nOne particular consequence of this design, however, (which seems to be non-intuitive) is that if two producers\r\nenqueue at the same time, there is no defined ordering between the elements when they're later dequeued.\r\nNormally this is fine, because even with a fully linearizable queue there'd be a race between the producer\r\nthreads and so you couldn't rely on the ordering anyway. However, if for some reason you do extra explicit synchronization\r\nbetween the two producer threads yourself, thus defining a total order between enqueue operations, you might expect\r\nthat the elements would come out in the same total order, which is a guarantee my queue does not offer. At that\r\npoint, though, there semantically aren't really two separate producers, but rather one that happens to be spread\r\nacross multiple threads. In this case, you can still establish a total ordering with my queue by creating\r\na single producer token, and using that from both threads to enqueue (taking care to synchronize access to the token,\r\nof course, but there was already extra synchronization involved anyway).\r\n\r\nI've written a more detailed [overview of the internal design][blog], as well as [the full\r\nnitty-gritty details of the design][design], on my blog. Finally, the\r\n[source][source] itself is available for perusal for those interested in its implementation.\r\n\r\n## Basic use\r\n\r\nThe entire queue's implementation is contained in **one header**, [`concurrentqueue.h`][concurrentqueue.h].\r\nSimply download and include that to use the queue. The blocking version is in a separate header,\r\n[`blockingconcurrentqueue.h`][blockingconcurrentqueue.h], that depends on the first.\r\nThe implementation makes use of certain key C++11 features, so it requires a fairly recent compiler\r\n(e.g. VS2012+ or g++ 4.8; note that g++ 4.6 has a known bug with `std::atomic` and is thus not supported).\r\nThe algorithm implementations themselves are platform independent.\r\n\r\nUse it like you would any other templated queue, with the exception that you can use\r\nit from many threads at once :-)\r\n\r\nSimple example:\r\n\r\n    #include \"concurrentqueue.h\"\r\n    \r\n    moodycamel::ConcurrentQueue<int> q;\r\n    q.enqueue(25);\r\n    \r\n    int item;\r\n    bool found = q.try_dequeue(item);\r\n    assert(found && item == 25);\r\n\r\nDescription of basic methods:\r\n- `ConcurrentQueue(size_t initialSizeEstimate)`\r\n      Constructor which optionally accepts an estimate of the number of elements the queue will hold\r\n- `enqueue(T&& item)`\r\n      Enqueues one item, allocating extra space if necessary\r\n- `try_enqueue(T&& item)`\r\n      Enqueues one item, but only if enough memory is already allocated\r\n- `try_dequeue(T& item)`\r\n      Dequeues one item, returning true if an item was found or false if the queue appeared empty\r\n\r\nNote that it is up to the user to ensure that the queue object is completely constructed before\r\nbeing used by any other threads (this includes making the memory effects of construction\r\nvisible, possibly via a memory barrier). Similarly, it's important that all threads have\r\nfinished using the queue (and the memory effects have fully propagated) before it is\r\ndestructed.\r\n\r\nThere's usually two versions of each method, one \"explicit\" version that takes a user-allocated per-producer or\r\nper-consumer token, and one \"implicit\" version that works without tokens. Using the explicit methods is almost\r\nalways faster (though not necessarily by a huge factor). Apart from performance, the primary distinction between them\r\nis their sub-queue allocation behaviour for enqueue operations: Using the implicit enqueue methods causes an\r\nautomatically-allocated thread-local producer sub-queue to be allocated (it is marked for reuse once the thread exits).\r\nExplicit producers, on the other hand, are tied directly to their tokens' lifetimes (and are also recycled as needed).\r\n\r\nFull API (pseudocode):\r\n\r\n\t# Allocates more memory if necessary\r\n\tenqueue(item) : bool\r\n\tenqueue(prod_token, item) : bool\r\n\tenqueue_bulk(item_first, count) : bool\r\n\tenqueue_bulk(prod_token, item_first, count) : bool\r\n\t\r\n\t# Fails if not enough memory to enqueue\r\n\ttry_enqueue(item) : bool\r\n\ttry_enqueue(prod_token, item) : bool\r\n\ttry_enqueue_bulk(item_first, count) : bool\r\n\ttry_enqueue_bulk(prod_token, item_first, count) : bool\r\n\t\r\n\t# Attempts to dequeue from the queue (never allocates)\r\n\ttry_dequeue(item&) : bool\r\n\ttry_dequeue(cons_token, item&) : bool\r\n\ttry_dequeue_bulk(item_first, max) : size_t\r\n\ttry_dequeue_bulk(cons_token, item_first, max) : size_t\r\n\t\r\n\t# If you happen to know which producer you want to dequeue from\r\n\ttry_dequeue_from_producer(prod_token, item&) : bool\r\n\ttry_dequeue_bulk_from_producer(prod_token, item_first, max) : size_t\r\n\t\r\n\t# A not-necessarily-accurate count of the total number of elements\r\n\tsize_approx() : size_t\r\n\r\n## Blocking version\r\n\r\nAs mentioned above, a full blocking wrapper of the queue is provided that adds\r\n`wait_dequeue` and `wait_dequeue_bulk` methods in addition to the regular interface.\r\nThis wrapper is extremely low-overhead, but slightly less fast than the non-blocking\r\nqueue (due to the necessary bookkeeping involving a lightweight semaphore).\r\n\r\nThere are also timed versions that allow a timeout to be specified (either in microseconds\r\nor with a `std::chrono` object).\r\n\r\nThe only major caveat with the blocking version is that you must be careful not to\r\ndestroy the queue while somebody is waiting on it. This generally means you need to\r\nknow for certain that another element is going to come along before you call one of\r\nthe blocking methods. (To be fair, the non-blocking version cannot be destroyed while\r\nin use either, but it can be easier to coordinate the cleanup.)\r\n\r\nBlocking example:\r\n\r\n    #include \"blockingconcurrentqueue.h\"\r\n    \r\n    moodycamel::BlockingConcurrentQueue<int> q;\r\n    std::thread producer([&]() {\r\n        for (int i = 0; i != 100; ++i) {\r\n            std::this_thread::sleep_for(std::chrono::milliseconds(i % 10));\r\n            q.enqueue(i);\r\n        }\r\n    });\r\n    std::thread consumer([&]() {\r\n        for (int i = 0; i != 100; ++i) {\r\n            int item;\r\n            q.wait_dequeue(item);\r\n            assert(item == i);\r\n            \r\n            if (q.wait_dequeue_timed(item, std::chrono::milliseconds(5))) {\r\n                ++i;\r\n                assert(item == i);\r\n            }\r\n        }\r\n    });\r\n    producer.join();\r\n    consumer.join();\r\n    \r\n    assert(q.size_approx() == 0);\r\n\r\n## Advanced features\r\n\r\n#### Tokens\r\n\r\nThe queue can take advantage of extra per-producer and per-consumer storage if\r\nit's available to speed up its operations. This takes the form of \"tokens\":\r\nYou can create a consumer token and/or a producer token for each thread or task\r\n(tokens themselves are not thread-safe), and use the methods that accept a token\r\nas their first parameter:\r\n\r\n    moodycamel::ConcurrentQueue<int> q;\r\n    \r\n    moodycamel::ProducerToken ptok(q);\r\n    q.enqueue(ptok, 17);\r\n    \r\n    moodycamel::ConsumerToken ctok(q);\r\n    int item;\r\n    q.try_dequeue(ctok, item);\r\n    assert(item == 17);\r\n\r\nIf you happen to know which producer you want to consume from (e.g. in\r\na single-producer, multi-consumer scenario), you can use the `try_dequeue_from_producer`\r\nmethods, which accept a producer token instead of a consumer token, and cut some overhead.\r\n\r\nNote that tokens work with the blocking version of the queue too.\r\n\r\nWhen producing or consuming many elements, the most efficient way is to:\r\n\r\n1. Use the bulk methods of the queue with tokens\r\n2. Failing that, use the bulk methods without tokens\r\n3. Failing that, use the single-item methods with tokens\r\n4. Failing that, use the single-item methods without tokens\r\n\r\nHaving said that, don't create tokens willy-nilly -- ideally there would be\r\none token (of each kind) per thread. The queue will work with what it is\r\ngiven, but it performs best when used with tokens.\r\n\r\nNote that tokens aren't actually tied to any given thread; it's not technically\r\nrequired that they be local to the thread, only that they be used by a single\r\nproducer/consumer at a time.\r\n\r\n#### Bulk operations\r\n\r\nThanks to the [novel design][blog] of the queue, it's just as easy to enqueue/dequeue multiple\r\nitems as it is to do one at a time. This means that overhead can be cut drastically for\r\nbulk operations. Example syntax:\r\n\r\n    moodycamel::ConcurrentQueue<int> q;\r\n\r\n    int items[] = { 1, 2, 3, 4, 5 };\r\n    q.enqueue_bulk(items, 5);\r\n    \r\n    int results[5];     // Could also be any iterator\r\n    size_t count = q.try_dequeue_bulk(results, 5);\r\n    for (size_t i = 0; i != count; ++i) {\r\n        assert(results[i] == items[i]);\r\n    }\r\n\r\n#### Preallocation (correctly using `try_enqueue`)\r\n\r\n`try_enqueue`, unlike just plain `enqueue`, will never allocate memory. If there's not enough room in the\r\nqueue, it simply returns false. The key to using this method properly, then, is to ensure enough space is\r\npre-allocated for your desired maximum element count.\r\n\r\nThe constructor accepts a count of the number of elements that it should reserve space for. Because the\r\nqueue works with blocks of elements, however, and not individual elements themselves, the value to pass\r\nin order to obtain an effective number of pre-allocated element slots is non-obvious.\r\n\r\nFirst, be aware that the count passed is rounded up to the next multiple of the block size. Note that the\r\ndefault block size is 32 (this can be changed via the traits). Second, once a slot in a block has been\r\nenqueued to, that slot cannot be re-used until the rest of the block has completely been completely filled\r\nup and then completely emptied. This affects the number of blocks you need in order to account for the\r\noverhead of partially-filled blocks. Third, each producer (whether implicit or explicit) claims and recycles\r\nblocks in a different manner, which again affects the number of blocks you need to account for a desired number of\r\nusable slots.\r\n\r\nSuppose you want the queue to be able to hold at least `N` elements at any given time. Without delving too\r\ndeep into the rather arcane implementation details, here are some simple formulas for the number of elements\r\nto request for pre-allocation in such a case. Note the division is intended to be arithmetic division and not\r\ninteger division (in order for `ceil()` to work).\r\n\r\nFor explicit producers (using tokens to enqueue):\r\n\r\n    (ceil(N / BLOCK_SIZE) + 1) * MAX_NUM_PRODUCERS * BLOCK_SIZE\r\n\r\nFor implicit producers (no tokens):\r\n\r\n    (ceil(N / BLOCK_SIZE) - 1 + 2 * MAX_NUM_PRODUCERS) * BLOCK_SIZE\r\n\r\nWhen using mixed producer types:\r\n\r\n    ((ceil(N / BLOCK_SIZE) - 1) * (MAX_EXPLICIT_PRODUCERS + 1) + 2 * (MAX_IMPLICIT_PRODUCERS + MAX_EXPLICIT_PRODUCERS)) * BLOCK_SIZE\r\n\r\nIf these formulas seem rather inconvenient, you can use the constructor overload that accepts the minimum\r\nnumber of elements (`N`) and the maximum number of explicit and implicit producers directly, and let it do the\r\ncomputation for you.\r\n\r\nFinally, it's important to note that because the queue is only eventually consistent and takes advantage of\r\nweak memory ordering for speed, there's always a possibility that under contention `try_enqueue` will fail\r\neven if the queue is correctly pre-sized for the desired number of elements. (e.g. A given thread may think that\r\nthe queue's full even when that's no longer the case.) So no matter what, you still need to handle the failure\r\ncase (perhaps looping until it succeeds), unless you don't mind dropping elements.\r\n\r\n#### Exception safety\r\n\r\nThe queue is exception safe, and will never become corrupted if used with a type that may throw exceptions.\r\nThe queue itself never throws any exceptions (operations fail gracefully (return false) if memory allocation\r\nfails instead of throwing `std::bad_alloc`).\r\n\r\nIt is important to note that the guarantees of exception safety only hold if the element type never throws\r\nfrom its destructor, and that any iterators passed into the queue (for bulk operations) never throw either.\r\nNote that in particular this means `std::back_inserter` iterators must be used with care, since the vector\r\nbeing inserted into may need to allocate and throw a `std::bad_alloc` exception from inside the iterator;\r\nso be sure to reserve enough capacity in the target container first if you do this.\r\n\r\nThe guarantees are presently as follows:\r\n- Enqueue operations are rolled back completely if an exception is thrown from an element's constructor.\r\n  For bulk enqueue operations, this means that elements are copied instead of moved (in order to avoid\r\n  having only some of the objects be moved in the event of an exception). Non-bulk enqueues always use\r\n  the move constructor if one is available.\r\n- If the assignment operator throws during a dequeue operation (both single and bulk), the element(s) are\r\n  considered dequeued regardless. In such a case, the dequeued elements are all properly destructed before\r\n  the exception is propagated, but there's no way to get the elements themselves back.\r\n- Any exception that is thrown is propagated up the call stack, at which point the queue is in a consistent\r\n  state.\r\n\r\nNote: If any of your type's copy constructors/move constructors/assignment operators don't throw, be sure\r\nto annotate them with `noexcept`; this will avoid the exception-checking overhead in the queue where possible\r\n(even with zero-cost exceptions, there's still a code size impact that has to be taken into account).\r\n\r\n#### Traits\r\n\r\nThe queue also supports a traits template argument which defines various types, constants,\r\nand the memory allocation and deallocation functions that are to be used by the queue. The typical pattern\r\nto providing your own traits is to create a class that inherits from the default traits\r\nand override only the values you wish to change. Example:\r\n\r\n    struct MyTraits : public moodycamel::ConcurrentQueueDefaultTraits\r\n    {\r\n    \tstatic const size_t BLOCK_SIZE = 256;\t\t// Use bigger blocks\r\n    };\r\n    \r\n    moodycamel::ConcurrentQueue<int, MyTraits> q;\r\n\r\n#### How to dequeue types without calling the constructor\r\n\r\nThe normal way to dequeue an item is to pass in an existing object by reference, which\r\nis then assigned to internally by the queue (using the move-assignment operator if possible).\r\nThis can pose a problem for types that are\r\nexpensive to construct or don't have a default constructor; fortunately, there is a simple\r\nworkaround: Create a wrapper class that copies the memory contents of the object when it\r\nis assigned by the queue (a poor man's move, essentially). Note that this only works if\r\nthe object contains no internal pointers. Example:\r\n\r\n    struct MyObjectMover {\r\n        inline void operator=(MyObject&& obj)\r\n        {\r\n            std::memcpy(data, &obj, sizeof(MyObject));\r\n            \r\n            // TODO: Cleanup obj so that when it's destructed by the queue\r\n            // it doesn't corrupt the data of the object we just moved it into\r\n        }\r\n        \r\n        inline MyObject& obj() { return *reinterpret_cast<MyObject*>(data); }\r\n    \r\n    private:\r\n        align(alignof(MyObject)) char data[sizeof(MyObject)];\r\n    };\r\n\r\nA less dodgy alternative, if moves are cheap but default construction is not, is to use a\r\nwrapper that defers construction until the object is assigned, enabling use of the move\r\nconstructor:\r\n\r\n    struct MyObjectMover {\r\n        inline void operator=(MyObject&& x) {\r\n            new (data) MyObject(std::move(x));\r\n            created = true;\r\n        }\r\n    \r\n        inline MyObject& obj() {\r\n            assert(created);\r\n            return *reinterpret_cast<MyObject*>(data);\r\n        }\r\n    \r\n        ~MyObjectMover() {\r\n            if (created)\r\n                obj().~MyObject();\r\n        }\r\n    \r\n    private:\r\n        align(alignof(MyObject)) char data[sizeof(MyObject)];\r\n        bool created = false;\r\n    };\r\n\r\n\r\n## Samples\r\n\r\nThere are some more detailed samples [here][samples.md]. The source of\r\nthe [unit tests][unittest-src] and [benchmarks][benchmark-src] are available for reference as well.\r\n\r\n## Benchmarks\r\n\r\nSee my blog post for some [benchmark results][benchmarks] (including versus `boost::lockfree::queue` and `tbb::concurrent_queue`),\r\nor run the benchmarks yourself (requires MinGW and certain GnuWin32 utilities to build on Windows, or a recent\r\ng++ on Linux):\r\n\r\n    cd build\r\n    make benchmarks\r\n    bin/benchmarks\r\n\r\nThe short version of the benchmarks is that it's so fast (especially the bulk methods), that if you're actually\r\nusing the queue to *do* anything, the queue won't be your bottleneck.\r\n\r\n## Tests (and bugs)\r\n\r\nI've written quite a few unit tests as well as a randomized long-running fuzz tester. I also ran the\r\ncore queue algorithm through the [CDSChecker][cdschecker] C++11 memory model model checker. Some of the\r\ninner algorithms were tested separately using the [Relacy][relacy] model checker, and full integration\r\ntests were also performed with Relacy.\r\nI've tested\r\non Linux (Fedora 19) and Windows (7), but only on x86 processors so far (Intel and AMD). The code was\r\nwritten to be platform-independent, however, and should work across all processors and OSes.\r\n\r\nDue to the complexity of the implementation and the difficult-to-test nature of lock-free code in general,\r\nthere may still be bugs. If anyone is seeing buggy behaviour, I'd like to hear about it! (Especially if\r\na unit test for it can be cooked up.) Just open an issue on GitHub.\r\n\r\n## License\r\n\r\nI'm releasing the source of this repository (with the exception of third-party code, i.e. the Boost queue\r\n(used in the benchmarks for comparison), Intel's TBB library (ditto), CDSChecker, Relacy, and Jeff Preshing's\r\ncross-platform semaphore, which all have their own licenses)\r\nunder a simplified BSD license. I'm also dual-licensing under the Boost Software License.\r\nSee the [LICENSE.md][license] file for more details.\r\n\r\nNote that lock-free programming is a patent minefield, and this code may very\r\nwell violate a pending patent (I haven't looked), though it does not to my present knowledge.\r\nI did design and implement this queue from scratch.\r\n\r\n## Diving into the code\r\n\r\nIf you're interested in the source code itself, it helps to have a rough idea of how it's laid out. This\r\nsection attempts to describe that.\r\n\r\nThe queue is formed of several basic parts (listed here in roughly the order they appear in the source). There's the\r\nhelper functions (e.g. for rounding to a power of 2). There's the default traits of the queue, which contain the\r\nconstants and malloc/free functions used by the queue. There's the producer and consumer tokens. Then there's the queue's\r\npublic API itself, starting with the constructor, destructor, and swap/assignment methods. There's the public enqueue methods,\r\nwhich are all wrappers around a small set of private enqueue methods found later on. There's the dequeue methods, which are\r\ndefined inline and are relatively straightforward.\r\n\r\nThen there's all the main internal data structures. First, there's a lock-free free list, used for recycling spent blocks (elements\r\nare enqueued to blocks internally). Then there's the block structure itself, which has two different ways of tracking whether\r\nit's fully emptied or not (remember, given two parallel consumers, there's no way to know which one will finish first) depending on where it's used.\r\nThen there's a small base class for the two types of internal SPMC producer queues (one for explicit producers that holds onto memory\r\nbut attempts to be faster, and one for implicit ones which attempt to recycle more memory back into the parent but is a little slower).\r\nThe explicit producer is defined first, then the implicit one. They both contain the same general four methods: One to enqueue, one to\r\ndequeue, one to enqueue in bulk, and one to dequeue in bulk. (Obviously they have constructors and destructors too, and helper methods.)\r\nThe main difference between them is how the block handling is done (they both use the same blocks, but in different ways, and map indices\r\nto them in different ways).\r\n\r\nFinally, there's the miscellaneous internal methods: There's the ones that handle the initial block pool (populated when the queue is constructed),\r\nand an abstract block pool that comprises the initial pool and any blocks on the free list. There's ones that handle the producer list\r\n(a lock-free add-only linked list of all the producers in the system). There's ones that handle the implicit producer lookup table (which\r\nis really a sort of specialized TLS lookup). And then there's some helper methods for allocating and freeing objects, and the data members\r\nof the queue itself, followed lastly by the free-standing swap functions.\r\n\r\n\r\n[blog]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++\r\n[design]: http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue\r\n[samples.md]: https://github.com/cameron314/concurrentqueue/blob/master/samples.md\r\n[source]: https://github.com/cameron314/concurrentqueue\r\n[concurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/concurrentqueue.h\r\n[blockingconcurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/blockingconcurrentqueue.h\r\n[unittest-src]: https://github.com/cameron314/concurrentqueue/tree/master/tests/unittests\r\n[benchmarks]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++#benchmarks\r\n[benchmark-src]: https://github.com/cameron314/concurrentqueue/tree/master/benchmarks\r\n[license]: https://github.com/cameron314/concurrentqueue/blob/master/LICENSE.md\r\n[cdschecker]: http://demsky.eecs.uci.edu/c11modelchecker.html\r\n[relacy]: http://www.1024cores.net/home/relacy-race-detector\r\n[spsc]: https://github.com/cameron314/readerwriterqueue\r\n[salsa]: http://webee.technion.ac.il/~idish/ftp/spaa049-gidron.pdf\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/benchmarks.cpp",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n// Benchmarks for moodycamel::ConcurrentQueue.\n// Provides comparative timings of various operations under\n// highly artificial circumstances. You've been warned :-)\n\n#include <cstdio>\n#include <cstring>\n#include <string>\n#include <cstdint>\n#include <cmath>\n#include <cstdarg>\n#include <fstream>\n#include <ctime>\n#include <random>\n#include <vector>\n#include <map>\n#include <cassert>\n#include <thread>\n#include <algorithm>\n#include <cctype>\n\n#include \"../blockingconcurrentqueue.h\"\n#include \"lockbasedqueue.h\"\n#include \"simplelockfree.h\"\n#include \"boostqueue.h\"\n#include \"tbbqueue.h\"\n#include \"stdqueue.h\"\n#include \"../tests/common/simplethread.h\"\n#include \"../tests/common/systemtime.h\"\n#include \"cpuid.h\"\n\nusing namespace moodycamel;\n\n\ntypedef std::minstd_rand RNG_t;\n\nbool precise = false;\n\n\nenum benchmark_type_t\n{\n\tbench_balanced,\n\tbench_only_enqueue,\n\tbench_only_enqueue_prealloc,\n\tbench_only_enqueue_bulk,\n\tbench_only_enqueue_bulk_prealloc,\n\tbench_only_dequeue,\n\tbench_only_dequeue_bulk,\n\tbench_mostly_enqueue,\n\tbench_mostly_enqueue_bulk,\n\tbench_mostly_dequeue,\n\tbench_mostly_dequeue_bulk,\n\tbench_spmc,\n\tbench_spmc_preproduced,\n\tbench_mpsc,\n\tbench_empty_dequeue,\n\tbench_enqueue_dequeue_pairs,\n\tbench_heavy_concurrent,\n\t\n\tBENCHMARK_TYPE_COUNT\n};\n\nconst char BENCHMARK_SHORT_NAMES[BENCHMARK_TYPE_COUNT][32] = {\n\t\"balanced\",\n\t\"only_enqueue\",\n\t\"only_enqueue_prealloc\",\n\t\"only_enqueue_bulk\",\n\t\"only_enqueue_bulk_prealloc\",\n\t\"only_dequeue\",\n\t\"only_dequeue_bulk\",\n\t\"mostly_enqueue\",\n\t\"mostly_enqueue_bulk\",\n\t\"mostly_dequeue\",\n\t\"mostly_dequeue_bulk\",\n\t\"spmc\",\n\t\"spmc_preproduced\",\n\t\"mpsc\",\n\t\"empty_dequeue\",\n\t\"enqueue_dequeue_pairs\",\n\t\"heavy_concurrent\"\n};\n\nconst char BENCHMARK_NAMES[BENCHMARK_TYPE_COUNT][64] = {\n\t\"balanced\",\n\t\"only enqueue\",\n\t\"only enqueue (pre-allocated)\",\n\t\"only enqueue bulk\",\n\t\"only enqueue bulk (pre-allocated)\",\n\t\"only dequeue\",\n\t\"only dequeue bulk\",\n\t\"mostly enqueue\",\n\t\"mostly enqueue bulk\",\n\t\"mostly dequeue\",\n\t\"mostly dequeue bulk\",\n\t\"single-producer, multi-consumer\",\n\t\"single-producer, multi-consumer (pre-produced)\",\n\t\"multi-producer, single-consumer\",\n\t\"dequeue from empty\",\n\t\"enqueue-dequeue pairs\",\n\t\"heavy concurrent\"\n};\n\nconst char BENCHMARK_DESCS[BENCHMARK_TYPE_COUNT][256] = {\n\t\"Measures the average operation speed with multiple symmetrical threads\\n  under reasonable load -- small random intervals between accesses\",\n\t\"Measures the average operation speed when all threads are producers\",\n\t\"Measures the average operation speed when all threads are producers,\\n  and the queue has been stretched out first\",\n\t\"Measures the average speed of enqueueing an item in bulk when all threads are producers\",\n\t\"Measures the average speed of enqueueing an item in bulk when all threads are producers,\\n  and the queue has been stretched out first\",\n\t\"Measures the average operation speed when all threads are consumers\",\n\t\"Measures the average speed of dequeueing an item in bulk when all threads are consumers\",\n\t\"Measures the average operation speed when most threads are enqueueing\",\n\t\"Measures the average speed of enqueueing an item in bulk under light contention\",\n\t\"Measures the average operation speed when most threads are dequeueing\",\n\t\"Measures the average speed of dequeueing an item in bulk under light contention\",\n\t\"Measures the average speed of dequeueing with only one producer, but multiple consumers\",\n\t\"Measures the average speed of dequeueing from a queue pre-filled by one thread\",\n\t\"Measures the average speed of dequeueing with only one consumer, but multiple producers\",\n\t\"Measures the average speed of attempting to dequeue from an empty queue\\n  (that eight separate threads had at one point enqueued to)\",\n\t\"Measures the average operation speed with each thread doing an enqueue\\n  followed by a dequeue\",\n\t\"Measures the average operation speed with many threads under heavy load\"\n};\n\nconst char BENCHMARK_SINGLE_THREAD_NOTES[BENCHMARK_TYPE_COUNT][256] = {\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"No contention -- measures raw failed dequeue speed on empty queue\",\n\t\"No contention -- measures speed of immediately dequeueing the item that was just enqueued\",\n\t\"\"\n};\n\nint BENCHMARK_THREADS_MEASURED[BENCHMARK_TYPE_COUNT] = {\n\t0,\t// measures nthreads\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t0,\n\t-1,\t// nthreads - 1\n\t0,\n\t1,\t// 1\n\t0,\n\t0,\n\t0,\n};\n\nint BENCHMARK_THREADS[BENCHMARK_TYPE_COUNT][9] = {\n\t{ 2, 3, 4,  8, 12, 16, 32,  0, 0 },\n\t{ 1, 2, 4,  8, 12, 16, 32, 48, 0 },\n\t{ 1, 2, 4,  8, 32,  0,  0,  0, 0 },\n\t{ 1, 2, 4,  8, 12, 16, 32, 48, 0 },\n\t{ 1, 2, 4,  8, 32,  0,  0,  0, 0 },\n\t{ 1, 2, 4,  8, 12, 16, 32, 48, 0 },\n\t{ 1, 2, 4,  8, 12, 16, 32, 48, 0 },\n\t{ 2, 4, 8, 32,  0,  0,  0,  0, 0 },\n\t{ 2, 4, 8, 32,  0,  0,  0,  0, 0 },\n\t{ 2, 4, 8,  0,  0,  0,  0,  0, 0 },\n\t{ 2, 4, 8,  0,  0,  0,  0,  0, 0 },\n\t{ 2, 4, 8, 16,  0,  0,  0,  0, 0 },\n\t{ 1, 3, 7, 15,  0,  0,  0,  0, 0 },\n\t{ 2, 4, 8, 16,  0,  0,  0,  0, 0 },\n\t{ 1, 2, 8, 32,  0,  0,  0,  0, 0 },\n\t{ 1, 2, 4,  8, 32,  0,  0,  0, 0 },\n\t{ 2, 3, 4,  8, 12, 16, 32, 48, 0 },\n};\n\nenum queue_id_t\n{\n\tqueue_moodycamel_ConcurrentQueue,\n\tqueue_moodycamel_BlockingConcurrentQueue,\n\tqueue_boost,\n\tqueue_tbb,\n\tqueue_simplelockfree,\n\tqueue_lockbased,\n\tqueue_std,\n\t\n\tQUEUE_COUNT\n};\n\nconst char QUEUE_NAMES[QUEUE_COUNT][64] = {\n\t\"moodycamel::ConcurrentQueue\",\n\t\"moodycamel::BlockingConcurrentQueue\",\n\t\"boost::lockfree::queue\",\n\t\"tbb::concurrent_queue\",\n\t\"SimpleLockFreeQueue\",\n\t\"LockBasedQueue\",\n\t\"std::queue\",\n};\n\nconst char QUEUE_SUMMARY_NOTES[QUEUE_COUNT][128] = {\n\t\"including bulk\",\n\t\"including bulk\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"\",\n\t\"single thread only\",\n};\n\nconst bool QUEUE_TOKEN_SUPPORT[QUEUE_COUNT] = {\n\ttrue,\n\ttrue,\n\tfalse,\n\tfalse,\n\tfalse,\n\tfalse,\n\tfalse,\n};\n\nconst int QUEUE_MAX_THREADS[QUEUE_COUNT] = {\n\t-1,\t\t// no limit\n\t-1,\n\t-1,\n\t-1,\n\t-1,\n\t-1,\n\t1,\n};\n\nconst bool QUEUE_BENCH_SUPPORT[QUEUE_COUNT][BENCHMARK_TYPE_COUNT] = {\n\t{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },\n\t{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },\n\t{ 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1 },\n\t{ 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1 },\n\t{ 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1 },\n\t{ 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1 },\n\t{ 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0 },\n};\n\n\nstruct Traits : public moodycamel::ConcurrentQueueDefaultTraits\n{\n\t// Use a slightly larger default block size; the default offers\n\t// a good trade off between speed and memory usage, but a bigger\n\t// block size will improve throughput (which is mostly what\n\t// we're after with these benchmarks).\n\tstatic const size_t BLOCK_SIZE = 64;\n};\n\n\ntypedef std::uint64_t counter_t;\n\nconst counter_t BULK_BATCH_SIZE = 2300;\n\nstruct BenchmarkResult\n{\n\tdouble elapsedTime;\n\tcounter_t operations;\n\t\n\tinline bool operator<(BenchmarkResult const& other) const\n\t{\n\t\treturn elapsedTime < other.elapsedTime;\n\t}\n};\n\n\ntemplate<typename TFunc>\ncounter_t rampUpToMeasurableNumberOfMaxOps(TFunc const& func, counter_t startOps = 256)\n{\n\tcounter_t ops = startOps;\n\tdouble time;\n\tdo {\n\t\ttime = func(ops);\n\t\tops *= 2;\n\t} while (time < (precise ? 30 : 10));\n#ifdef NDEBUG\n\treturn ops / 2;\n#else\n\treturn ops / 4;\n#endif\n}\n\ncounter_t adjustForThreads(counter_t suggestedOps, int nthreads)\n{\n\treturn std::max((counter_t)(suggestedOps / std::pow(2, std::sqrt((nthreads - 1) * 3))), suggestedOps / 16);\n}\n\n\ntemplate<typename TQueue>\ncounter_t determineMaxOpsForBenchmark(benchmark_type_t benchmark, int nthreads, bool useTokens, unsigned int randSeed)\n{\n\tswitch (benchmark) {\n\tcase bench_balanced: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([&](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tRNG_t rng(randSeed * 1);\n\t\t\tstd::uniform_int_distribution<int> rand(0, 20);\n\t\t\tdouble total = 0;\n\t\t\tSystemTime start;\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tstart = getSystemTime();\n\t\t\t\tq.enqueue(i);\n\t\t\t\ttotal += getTimeDelta(start);\n\t\t\t}\n\t\t\treturn total;\n\t\t}), nthreads);\n\t}\n\tcase bench_only_enqueue:\n\tcase bench_only_enqueue_prealloc:\n\tcase bench_mostly_enqueue: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\tcase bench_only_dequeue:\n\tcase bench_mostly_dequeue:\n\tcase bench_spmc:\n\tcase bench_spmc_preproduced:\n\tcase bench_mpsc: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t}\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.try_dequeue(item);\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\tcase bench_only_enqueue_bulk:\n\tcase bench_only_enqueue_bulk_prealloc:\n\tcase bench_mostly_enqueue_bulk: {\n\t\tstd::vector<counter_t> data;\n\t\tfor (counter_t i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\tdata.push_back(i);\n\t\t}\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([&](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\tcase bench_only_dequeue_bulk:\n\tcase bench_mostly_dequeue_bulk: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tstd::vector<int> data(BULK_BATCH_SIZE);\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t}\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.try_dequeue_bulk(data.begin(), data.size());\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t\treturn 0;\n\t}\n\tcase bench_empty_dequeue: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.try_dequeue(item);\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\tcase bench_enqueue_dequeue_pairs: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t\tq.try_dequeue(item);\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\t\n\tcase bench_heavy_concurrent: {\n\t\treturn adjustForThreads(rampUpToMeasurableNumberOfMaxOps([](counter_t ops) {\n\t\t\tTQueue q;\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tfor (counter_t i = 0; i != ops; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t\tq.try_dequeue(item);\n\t\t\t}\n\t\t\treturn getTimeDelta(start);\n\t\t}), nthreads);\n\t}\n\t\n\tdefault:\n\t\tassert(false && \"Every benchmark type must be handled here!\");\n\t\treturn 0;\n\t}\n}\n\n\n// Returns time elapsed, in (fractional) milliseconds\ntemplate<typename TQueue>\ndouble runBenchmark(benchmark_type_t benchmark, int nthreads, bool useTokens, unsigned int randSeed, counter_t maxOps, int maxThreads, counter_t& out_opCount)\n{\n\tdouble result = 0;\n\tvolatile int forceNoOptimizeDummy;\n\t\n\tswitch (benchmark) {\n\tcase bench_balanced: {\n\t\t// Measures the average operation speed with multiple symmetrical threads under reasonable load\n\t\tTQueue q;\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<counter_t> ops(nthreads);\n\t\tstd::vector<double> times(nthreads);\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tint item;\n\t\t\t\tSystemTime start;\n\t\t\t\tRNG_t rng(randSeed * (id + 1));\n\t\t\t\tstd::uniform_int_distribution<int> rand(0, 20);\n\t\t\t\tops[id] = 0;\n\t\t\t\ttimes[id] = 0;\n\t\t\t\ttypename TQueue::consumer_token_t consTok(q);\n\t\t\t\ttypename TQueue::producer_token_t prodTok(q);\n\t\t\t\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tif (rand(rng) == 0) {\n\t\t\t\t\t\tstart = getSystemTime();\n\t\t\t\t\t\tif ((i & 1) == 0) {\n\t\t\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\t\t\tq.try_dequeue(consTok, item);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\t\t\tq.enqueue(prodTok, i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttimes[id] += getTimeDelta(start);\n\t\t\t\t\t\t++ops[id];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, tid);\n\t\t}\n\t\tout_opCount = 0;\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tout_opCount += ops[tid];\n\t\t\tresult += times[tid];\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_only_enqueue_prealloc: {\n\t\tout_opCount = maxOps * nthreads;\n\t\t\n\t\tTQueue q;\n\t\t{\n\t\t\t// Enqueue opcount elements first, then dequeue them; this\n\t\t\t// will \"stretch out\" the queue, letting implementatations\n\t\t\t// that re-use memory internally avoid having to allocate\n\t\t\t// more later during the timed enqueue operations.\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t\t\n\t\t\t// Now empty the queue\n\t\t\tint item;\n\t\t\twhile (q.try_dequeue(item))\n\t\t\t\tcontinue;\n\t\t}\n\t\t\n\t\tif (nthreads == 1) {\n\t\t\t// No contention -- measures raw single-item enqueue speed\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(i);\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_only_enqueue: {\n\t\tout_opCount = maxOps * nthreads;\n\t\t\n\t\tTQueue q;\n\t\tif (nthreads == 1) {\n\t\t\t// No contention -- measures raw single-item enqueue speed\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(i);\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_spmc_preproduced:\n\tcase bench_only_dequeue: {\n\t\tout_opCount = maxOps * nthreads;\n\t\t\n\t\tTQueue q;\n\t\t{\n\t\t\t// Fill up the queue first\n\t\t\tstd::vector<SimpleThread> threads(benchmark == bench_spmc_preproduced ? 1 : nthreads);\n\t\t\tcounter_t itemsPerThread = benchmark == bench_spmc_preproduced ? maxOps * nthreads : maxOps;\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](size_t id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != itemsPerThread; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != itemsPerThread; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t}\n\t\t\n\t\tif (nthreads == 1) {\n\t\t\t// No contention -- measures raw single-item dequeue speed\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tint item;\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_mostly_enqueue: {\n\t\t// Measures the average operation speed when most threads are enqueueing\n\t\tTQueue q;\n\t\tout_opCount = maxOps * nthreads;\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<double> timings(nthreads);\n\t\tauto dequeueThreads = std::max(1, nthreads / 4);\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads - dequeueThreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tfor (int tid = nthreads - dequeueThreads; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tint item;\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_mostly_dequeue: {\n\t\t// Measures the average operation speed when most threads are dequeueing\n\t\tTQueue q;\n\t\tout_opCount = maxOps * nthreads;\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<double> timings(nthreads);\n\t\tauto enqueueThreads = std::max(1, nthreads / 4);\n\t\t{\n\t\t\t// Fill up the queue first\n\t\t\tstd::vector<SimpleThread> threads(enqueueThreads);\n\t\t\tfor (int tid = 0; tid != enqueueThreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (int tid = 0; tid != enqueueThreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t}\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads - enqueueThreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tint item;\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tfor (int tid = nthreads - enqueueThreads; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_only_enqueue_bulk_prealloc: {\n\t\tTQueue q;\n\t\t{\n\t\t\t// Enqueue opcount elements first, then dequeue them; this\n\t\t\t// will \"stretch out\" the queue, letting implementatations\n\t\t\t// that re-use memory internally avoid having to allocate\n\t\t\t// more later during the timed enqueue operations.\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t\t\n\t\t\t// Now empty the queue\n\t\t\tint item;\n\t\t\twhile (q.try_dequeue(item))\n\t\t\t\tcontinue;\n\t\t}\n\t\t\n\t\tstd::vector<counter_t> data;\n\t\tfor (counter_t i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\tdata.push_back(i);\n\t\t}\n\t\t\n\t\tout_opCount = maxOps * BULK_BATCH_SIZE * nthreads;\n\t\tif (nthreads == 1) {\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_only_enqueue_bulk: {\n\t\tTQueue q;\n\t\tstd::vector<counter_t> data;\n\t\tfor (counter_t i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\tdata.push_back(i);\n\t\t}\n\t\t\n\t\tout_opCount = maxOps * BULK_BATCH_SIZE * nthreads;\n\t\tif (nthreads == 1) {\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_mostly_enqueue_bulk: {\n\t\t// Measures the average speed of enqueueing in bulk under light contention\n\t\tTQueue q;\n\t\tstd::vector<counter_t> data;\n\t\tfor (counter_t i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\tdata.push_back(i);\n\t\t}\n\t\t\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<double> timings(nthreads);\n\t\tauto dequeueThreads = std::max(1, nthreads / 4);\n\t\tstd::vector<counter_t> ops(nthreads - dequeueThreads);\n\t\tout_opCount = maxOps * BULK_BATCH_SIZE * (nthreads - dequeueThreads);\t// dequeue ops added after\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads - dequeueThreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tfor (int tid = nthreads - dequeueThreads; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id, int idBase0) {\n\t\t\t\tstd::vector<int> items(BULK_BATCH_SIZE);\n\t\t\t\t\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tcounter_t totalOps = 0;\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(tok, items.begin(), items.size());\n\t\t\t\t\t\ttotalOps += actual + (actual == items.size() ? 0 : 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(items.begin(), items.size());\n\t\t\t\t\t\ttotalOps += actual + (actual == items.size() ? 0 : 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\tops[idBase0] = totalOps;\n\t\t\t}, tid, tid - (nthreads - dequeueThreads));\n\t\t}\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t\tif (tid < dequeueThreads) {\n\t\t\t\tout_opCount += ops[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_only_dequeue_bulk: {\n\t\t// Measures the average speed of dequeueing in bulk when all threads are consumers\n\t\tTQueue q;\n\t\t{\n\t\t\t// Fill up the queue first\n\t\t\tstd::vector<int> data(BULK_BATCH_SIZE);\n\t\t\tfor (int i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\t\tdata[i] = i;\n\t\t\t}\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(tok, data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(data.cbegin(), data.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t}\n\t\tif (nthreads == 1) {\n\t\t\tout_opCount = maxOps * BULK_BATCH_SIZE;\n\t\t\tauto start = getSystemTime();\n\t\t\tstd::vector<int> items(BULK_BATCH_SIZE);\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue_bulk(tok, items.begin(), items.size());\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue_bulk(items.begin(), items.size());\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::vector<counter_t> ops(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tstd::vector<int> items(BULK_BATCH_SIZE);\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tcounter_t totalOps = 0;\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(tok, items.begin(), items.size());\n\t\t\t\t\t\t\ttotalOps += actual + (actual == items.size() ? 0 : 1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(items.begin(), items.size());\n\t\t\t\t\t\t\ttotalOps += actual + (actual == items.size() ? 0 : 1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t\tops[id] = totalOps;\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tout_opCount = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t\tout_opCount += ops[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_mostly_dequeue_bulk: {\n\t\t// Measures the average speed of dequeueing in bulk under light contention\n\t\tTQueue q;\n\t\tauto enqueueThreads = std::max(1, nthreads / 4);\n\t\tout_opCount = maxOps * BULK_BATCH_SIZE * enqueueThreads;\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<double> timings(nthreads);\n\t\tstd::vector<counter_t> ops(nthreads - enqueueThreads);\n\t\tstd::vector<int> enqueueData(BULK_BATCH_SIZE);\n\t\tfor (int i = 0; i != BULK_BATCH_SIZE; ++i) {\n\t\t\tenqueueData[i] = i;\n\t\t}\n\t\t{\n\t\t\t// Fill up the queue first\n\t\t\tstd::vector<SimpleThread> threads(enqueueThreads);\n\t\t\tfor (int tid = 0; tid != enqueueThreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(tok, enqueueData.cbegin(), enqueueData.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue_bulk(enqueueData.cbegin(), enqueueData.size());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (int tid = 0; tid != enqueueThreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t}\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads - enqueueThreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tstd::vector<int> data(BULK_BATCH_SIZE);\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\tcounter_t totalOps = 0;\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(tok, data.begin(), data.size());\n\t\t\t\t\t\ttotalOps += actual + (actual == data.size() ? 0 : 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tauto actual = q.try_dequeue_bulk(data.begin(), data.size());\n\t\t\t\t\t\ttotalOps += actual + (actual == data.size() ? 0 : 1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\tops[id] = totalOps;\n\t\t\t}, tid);\n\t\t}\n\t\tfor (int tid = nthreads - enqueueThreads; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue_bulk(tok, enqueueData.cbegin(), enqueueData.size());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\tq.enqueue_bulk(enqueueData.cbegin(), enqueueData.size());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t\tif (tid < nthreads - enqueueThreads) {\n\t\t\t\tout_opCount += ops[tid];\n\t\t\t}\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_spmc: {\n\t\tcounter_t elementsToDequeue = maxOps * (nthreads - 1);\n\t\t\n\t\tTQueue q;\n\t\tstd::vector<SimpleThread> threads(nthreads - 1);\n\t\tstd::vector<double> timings(nthreads - 1);\n\t\tstd::vector<counter_t> ops(nthreads - 1);\n\t\tstd::atomic<bool> lynchpin(false);\n\t\tstd::atomic<counter_t> totalDequeued(0);\n\t\tfor (int tid = 0; tid != nthreads - 1; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\twhile (!lynchpin.load(std::memory_order_relaxed)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tint item;\n\t\t\t\tcounter_t i = 0;\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (useTokens) {\n\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\tif (q.try_dequeue(tok, item)) {\n\t\t\t\t\t\t\ttotalDequeued.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (totalDequeued.load(std::memory_order_relaxed) == elementsToDequeue) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t++i;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\ttotalDequeued.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (totalDequeued.load(std::memory_order_relaxed) == elementsToDequeue) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t++i;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\tops[id] = i;\n\t\t\t}, tid);\n\t\t}\n\t\t\n\t\tlynchpin.store(true, std::memory_order_seq_cst);\n\t\tfor (counter_t i = 0; i != elementsToDequeue; ++i) {\n\t\t\tq.enqueue(i);\n\t\t}\n\t\t\n\t\tresult = 0;\n\t\tout_opCount = 0;\n\t\tfor (int tid = 0; tid != nthreads - 1; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t\tout_opCount += ops[tid];\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_mpsc: {\n\t\tTQueue q;\n\t\tcounter_t elementsToDequeue = maxOps * (nthreads - 1);\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tif (tid == 0) {\n\t\t\t\t// Consumer thread\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_seq_cst);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tint item;\n\t\t\t\t\tout_opCount = 0;\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != elementsToDequeue;) {\n\t\t\t\t\t\t\ti += q.try_dequeue(tok, item) ? 1 : 0;\n\t\t\t\t\t\t\t++out_opCount;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != elementsToDequeue;) {\n\t\t\t\t\t\t\ti += q.try_dequeue(item) ? 1 : 0;\n\t\t\t\t\t\t\t++out_opCount;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tresult = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_seq_cst);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t}\n\t\t\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tcase bench_empty_dequeue: {\n\t\t// Measures the average speed of attempting to dequeue from an empty queue\n\t\tTQueue q;\n\t\t// Fill up then empty the queue first\n\t\t{\n\t\t\tstd::vector<SimpleThread> threads(maxThreads > 0 ? maxThreads : 8);\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](size_t id) {\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != 10000; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != 10000; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t\t\n\t\t\t// Empty the queue\n\t\t\tint item;\n\t\t\twhile (q.try_dequeue(item))\n\t\t\t\tcontinue;\n\t\t}\n\t\t\n\t\tif (nthreads == 1) {\n\t\t\t// No contention -- measures raw failed dequeue speed on empty queue\n\t\t\tint item;\n\t\t\tout_opCount = maxOps;\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\t}\n\t\telse {\n\t\t\tout_opCount = maxOps * nthreads;\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tint item;\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::consumer_token_t tok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(tok, item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t\tint item;\n\t\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\t}\n\t\tbreak;\n\t}\n\t\n\tcase bench_enqueue_dequeue_pairs: {\n\t\t// Measures the average speed of attempting to dequeue from an empty queue\n\t\t// (that eight separate threads had at one point enqueued to)\n\t\tout_opCount = maxOps * 2 * nthreads;\n\t\tTQueue q;\n\t\tif (nthreads == 1) {\n\t\t\t// No contention -- measures speed of immediately dequeueing the item that was just enqueued\n\t\t\tint item;\n\t\t\tauto start = getSystemTime();\n\t\t\tif (useTokens) {\n\t\t\t\ttypename TQueue::producer_token_t ptok(q);\n\t\t\t\ttypename TQueue::consumer_token_t ctok(q);\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(ptok, i);\n\t\t\t\t\tq.try_dequeue(ctok, item);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult = getTimeDelta(start);\n\t\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\t}\n\t\telse {\n\t\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\t\tstd::vector<double> timings(nthreads);\n\t\t\tstd::atomic<int> ready(0);\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\n\t\t\t\t\tint item;\n\t\t\t\t\tauto start = getSystemTime();\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::producer_token_t ptok(q);\n\t\t\t\t\t\ttypename TQueue::consumer_token_t ctok(q);\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(ptok, i);\n\t\t\t\t\t\t\tq.try_dequeue(ctok, item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tresult = 0;\n\t\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tresult += timings[tid];\n\t\t\t}\n\t\t\tint item;\n\t\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\t}\n\t\tbreak;\n\t}\n\t\n\tcase bench_heavy_concurrent: {\n\t\t// Measures the average operation speed with many threads under heavy load\n\t\tout_opCount = maxOps * nthreads;\n\t\tTQueue q;\n\t\tstd::vector<SimpleThread> threads(nthreads);\n\t\tstd::vector<double> timings(nthreads);\n\t\tstd::atomic<int> ready(0);\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](int id) {\n\t\t\t\tready.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\twhile (ready.load(std::memory_order_relaxed) != nthreads)\n\t\t\t\t\tcontinue;\n\t\t\t\t\n\t\t\t\tauto start = getSystemTime();\n\t\t\t\tif (id < 2) {\n\t\t\t\t\t// Alternate\n\t\t\t\t\tint item;\n\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\ttypename TQueue::consumer_token_t consTok(q);\n\t\t\t\t\t\ttypename TQueue::producer_token_t prodTok(q);\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps / 2; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(consTok, item);\n\t\t\t\t\t\t\tq.enqueue(prodTok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps / 2; ++i) {\n\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tif ((id & 1) == 0) {\n\t\t\t\t\t\t// Enqueue\n\t\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\t\ttypename TQueue::producer_token_t prodTok(q);\n\t\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\t\tq.enqueue(prodTok, i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\t// Dequeue\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tif (useTokens) {\n\t\t\t\t\t\t\ttypename TQueue::consumer_token_t consTok(q);\n\t\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\t\tq.try_dequeue(consTok, item);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (counter_t i = 0; i != maxOps; ++i) {\n\t\t\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttimings[id] = getTimeDelta(start);\n\t\t\t}, tid);\n\t\t}\n\t\tresult = 0;\n\t\tfor (int tid = 0; tid != nthreads; ++tid) {\n\t\t\tthreads[tid].join();\n\t\t\tresult += timings[tid];\n\t\t}\n\t\tint item;\n\t\tforceNoOptimizeDummy = q.try_dequeue(item) ? 1 : 0;\n\t\tbreak;\n\t}\n\t\n\tdefault:\n\t\tassert(false && \"Every benchmark type must be handled here!\");\n\t\tresult = 0;\n\t\tout_opCount = 0;\n\t}\n\t\n\t(void)forceNoOptimizeDummy;\n\t\n\treturn result;\n}\n\n\nconst char* LOG_FILE = \"benchmarks.log\";\nstd::ofstream* logOut;\nbool logErrorReported = false;\n\nvoid sayf(int indent, const char* fmt, ...)\n{\n\tstatic char indentBuffer[] = \"                        \";\n\tstatic char buf[2048];\n\t\n\tindentBuffer[indent] = '\\0';\n\t\n\tva_list arglist;\n\tva_start(arglist, fmt);\n\tvsprintf(buf, fmt, arglist);\n\tva_end(arglist);\n\t\n\tif (*logOut) {\n\t\t(*logOut) << indentBuffer << buf;\n\t}\n\telse if (!logErrorReported) {\n\t\tstd::printf(\"Note: Error writing to log file. Future output will appear only on stdout\\n\");\n\t\tlogErrorReported = true;\n\t}\n\tstd::printf(\"%s%s\", indentBuffer, buf);\n\t\n\tindentBuffer[indent] = ' ';\n}\n\n\n// Returns a formatted timestamp.\n// Returned buffer is only valid until the next call.\n// Not thread-safe.\nstatic const char* timestamp()\n{\n\tstatic char buf[32];\n\ttime_t time = std::time(NULL);\n\tstrcpy(buf, std::asctime(std::localtime(&time)));\n\tbuf[strlen(buf) - 1] = '\\0';\t// Remove trailing newline\n\treturn buf;\n}\n\nstatic inline bool isvowel(char ch)\n{\n\tch = std::tolower(ch);\n\tfor (const char* v = \"aeiou\"; *v != '\\0'; ++v) {\n\t\tif (*v == ch) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\nstatic inline double safe_divide(double a, double b)\n{\n\treturn b == 0 ? 0 : a / b;\n}\n\n// Returns a positive number formatted in a string in a human-readable way.\n// The string is always 7 characters or less (excluding null byte).\n// Returned buffer is only valid until the sixteenth next call.\n// Not thread safe.\nstatic const char* pretty(double num)\n{\n\tassert(num >= 0);\n\t\n#if defined(_MSC_VER) && _MSC_VER < 1800\n\tif (!_finite(num)) {\n\t\treturn \"inf\";\n\t}\n\tif (_isnan(num)) {\n\t\treturn \"nan\";\n\t}\n#else\n\tif (std::isinf(num)) {\n\t\treturn \"inf\";\n\t}\n\tif (std::isnan(num)) {\n\t\treturn \"nan\";\n\t}\n#endif\n\t\n\tstatic char bufs[16][8];\n\tstatic int nextBuf = 0;\n\tchar* buf = bufs[nextBuf++];\n\tnextBuf &= 15;\n\t\n\tint suffix = 0;\n\tif (num < 1) {\n\t\tstatic const char minisufs[] = \"\\0munpfazy\";\n\t\twhile (num < 0.01) {\n\t\t\t++suffix;\n\t\t\tnum *= 1000;\n\t\t}\n\t\tsprintf(buf, \"%1.4f%c\", num, minisufs[suffix]);\n\t}\n\telse {\n\t\tstatic const char megasufs[] = \"\\0kMGTPEZY\";\n\t\twhile (num >= 1000) {\n\t\t\t++suffix;\n\t\t\tnum /= 1000;\n\t\t}\n\t\tsprintf(buf, \"%.2f%c\", num, megasufs[suffix]);\n\t}\n\t\n\treturn buf;\n}\n\nvoid printBenchmarkNames()\n{\n\tstd::printf(\"   Supported benchmarks are:\\n\");\n\t\n\tfor (int i = 0; i != BENCHMARK_TYPE_COUNT; ++i) {\n\t\tstd::printf(\"      %s\\n\", BENCHMARK_SHORT_NAMES[i]);\n\t}\n}\n\n\nint main(int argc, char** argv)\n{\n\t// Disable buffering (so that when run in, e.g., Sublime Text, the output appears as it is written)\n\tstd::setvbuf(stdout, nullptr, _IONBF, 0);\n\t\n\t// Isolate the executable name\n\tstd::string progName = argv[0];\n\tauto slash = progName.find_last_of(\"/\\\\\");\n\tif (slash != std::string::npos) {\n\t\tprogName = progName.substr(slash + 1);\n\t}\n\t\n\tstd::map<std::string, benchmark_type_t> benchmarkMap;\n\tfor (int i = 0; i != BENCHMARK_TYPE_COUNT; ++i) {\n\t\tbenchmarkMap.insert(std::make_pair(std::string(BENCHMARK_SHORT_NAMES[i]), (benchmark_type_t)i));\n\t}\n\tstd::vector<benchmark_type_t> selectedBenchmarks;\n\t\n\tbool showHelp = false;\n\tbool error = false;\n\tbool printedBenchmarks = false;\n\tfor (int i = 1; i < argc; ++i) {\n\t\tif (std::strcmp(argv[i], \"-h\") == 0 || std::strcmp(argv[i], \"--help\") == 0) {\n\t\t\tshowHelp = true;\n\t\t}\n\t\telse if (std::strcmp(argv[i], \"-p\") == 0 || std::strcmp(argv[i], \"--precise\") == 0) {\n\t\t\tprecise = true;\n\t\t}\n\t\telse if (std::strcmp(argv[i], \"--run\") == 0) {\n\t\t\tif (i + 1 == argc || argv[i + 1][0] == '-') {\n\t\t\t\tstd::printf(\"Expected benchmark name argument for --run option.\\n\");\n\t\t\t\tif (!printedBenchmarks) {\n\t\t\t\t\tprintBenchmarkNames();\n\t\t\t\t\tprintedBenchmarks = true;\n\t\t\t\t}\n\t\t\t\terror = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t\n\t\t\tauto it = benchmarkMap.find(argv[++i]);\n\t\t\tif (it == benchmarkMap.end()) {\n\t\t\t\tstd::printf(\"Unrecognized benchmark name '%s'.\\n\", argv[i]);\n\t\t\t\tif (!printedBenchmarks) {\n\t\t\t\t\tprintBenchmarkNames();\n\t\t\t\t\tprintedBenchmarks = true;\n\t\t\t\t}\n\t\t\t\terror = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t\n\t\t\tselectedBenchmarks.push_back(it->second);\n\t\t}\n\t\telse {\n\t\t\tstd::printf(\"Unrecognized option '%s'\\n\", argv[i]);\n\t\t\terror = true;\n\t\t}\n\t}\n\tif (showHelp || error) {\n\t\tif (error) {\n\t\t\tstd::printf(\"\\n\");\n\t\t}\n\t\tstd::printf(\"%s\\n    Description: Runs benchmarks for moodycamel::ConcurrentQueue\\n\", progName.c_str());\n\t\tstd::printf(\"    --help            Prints this help blurb\\n\");\n\t\tstd::printf(\"    --precise         Generate more precise benchmark results (slower)\\n\");\n\t\tstd::printf(\"    --run benchmark   Runs only the selected benchmark (can be used multiple times)\\n\");\n\t\treturn error ? 1 : 0;\n\t}\n\t\n\tbool logExists = true;\n\t{\n\t\tstd::ifstream fin(LOG_FILE);\n\t\tif (!fin) {\n\t\t\tlogExists = false;\n\t\t}\n\t}\n\t\n\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\tlogOut = &fout;\n\tif (fout) {\n\t\tif (logExists) {\n\t\t\tfout << \"\\n\\n\\n\";\n\t\t}\n\t\tfout << \"--- New run (\" << timestamp() << \") ---\\n\";\n\t}\n\telse {\n\t\tstd::printf(\"Note: Error opening log file '%s'. Output will appear only on stdout.\\n\\n\", LOG_FILE);\n\t\tlogErrorReported = true;\n\t}\n\t\n\tconst char* bitStr = \"\";\n\tif (sizeof(void*) == 4 || sizeof(void*) == 8) {\n\t\tbitStr = sizeof(void*) == 4 ? \" 32-bit\" : \" 64-bit\";\n\t}\n\t\n\tconst char* cpuStr = getCPUString();\n\tsayf(0, \"Running%s benchmarks on a%s %s\\n\", bitStr, isvowel(cpuStr[0]) ? \"n\" : \"\", cpuStr);\n\tif (precise) {\n\t\tsayf(4, \"(precise mode)\\n\");\n\t}\n\tif (selectedBenchmarks.size() > 0) {\n\t\tsayf(4, \"(selected benchmarks only)\\n\");\n\t}\n\tsayf(0, \"Note that these are synthetic benchmarks. Take them with a grain of salt.\\n\\n\");\n\t\n\tsayf(0, \"Legend:\\n\");\n\tsayf(4, \"'Avg':     Average time taken per operation, normalized to be per thread\\n\");\n\tsayf(4, \"'Range':   The minimum and maximum times taken per operation (per thread)\\n\");\n\tsayf(4, \"'Ops/s':   Overall operations per second\\n\");\n\tsayf(4, \"'Ops/s/t': Operations per second per thread (inverse of 'Avg')\\n\");\n\tsayf(4, \"Operations include those that fail (e.g. because the queue is empty).\\n\");\n\tsayf(4, \"Each logical enqueue/dequeue counts as an individual operation when in bulk.\\n\");\n\tsayf(0, \"\\n\");\n\t\n\t\n#ifdef NDEBUG\n\tconst int ITERATIONS = precise ? 100 : 10;\n#else\n\tconst int ITERATIONS = precise ? 20 : 2;\n#endif\n\t\n\t\n\tconst double FASTEST_PERCENT_CONSIDERED = precise ? 8 : 50;\t// Only consider the top % of runs\n\t\n\t// Make sure each run of a given benchmark has the same seed (otherwise different runs are not comparable)\n\tstd::srand(std::time(NULL));\n\tunsigned int randSeeds[BENCHMARK_TYPE_COUNT];\n\tfor (unsigned int i = 0; i != BENCHMARK_TYPE_COUNT; ++i) {\n\t\trandSeeds[i] = std::rand() * (i + 1) + 1;\n\t}\n\t\n\tdouble opsst = 0;\t\t// ops/s/thread\n\t\n\tdouble totalWeightedOpsst[QUEUE_COUNT];\n\tdouble totalWeight[QUEUE_COUNT];\n\tfor (int i = 0; i != QUEUE_COUNT; ++i) {\n\t\ttotalWeightedOpsst[i] = 0;\n\t\ttotalWeight[i] = 0;\n\t}\n\t\n\tauto logicalCores = std::thread::hardware_concurrency();\n\t\n\tif (selectedBenchmarks.size() == 0) {\n\t\tfor (int i = 0; i != BENCHMARK_TYPE_COUNT; ++i) {\n\t\t\tselectedBenchmarks.push_back((benchmark_type_t)i);\n\t\t}\n\t}\n\t\n\tint indent = 0;\n\tfor (auto selectedIt = selectedBenchmarks.cbegin(); selectedIt != selectedBenchmarks.cend(); ++selectedIt) {\n\t\tint benchmark = static_cast<int>(*selectedIt);\n\t\tauto seed = randSeeds[benchmark];\n\t\t\n\t\tbool anyQueueSupportsBenchmark = false;\n\t\tfor (int queue = 0; queue != QUEUE_COUNT; ++queue) {\n\t\t\tif (QUEUE_BENCH_SUPPORT[queue][benchmark]) {\n\t\t\t\tanyQueueSupportsBenchmark = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif (!anyQueueSupportsBenchmark) {\n\t\t\tcontinue;\n\t\t}\n\t\t\n\t\tsayf(0, \"%s\", BENCHMARK_NAMES[benchmark]);\n\t\tif (BENCHMARK_THREADS_MEASURED[benchmark] != 0) {\n\t\t\tif (BENCHMARK_THREADS_MEASURED[benchmark] < 0) {\n\t\t\t\tsayf(0, \" (measuring all but %d %s)\", -BENCHMARK_THREADS_MEASURED[benchmark], BENCHMARK_THREADS_MEASURED[benchmark] == -1 ? \"thread\" : \"threads\");\n\t\t\t}\n\t\t\telse {\n\t\t\t\tsayf(0, \" (measuring %d %s)\", BENCHMARK_THREADS_MEASURED[benchmark], BENCHMARK_THREADS_MEASURED[benchmark] == 1 ? \"thread\" : \"threads\");\n\t\t\t}\n\t\t}\n\t\tsayf(0, \":\\n\");\n\t\tindent += 2;\n\t\tsayf(indent, \"(%s)\\n\", BENCHMARK_DESCS[benchmark]);\n\t\t\n\t\tfor (int queue = 0; queue != QUEUE_COUNT; ++queue) {\n\t\t\tsayf(indent, \"> %s\\n\", QUEUE_NAMES[queue]);\n\t\t\t\n\t\t\tif (!QUEUE_BENCH_SUPPORT[queue][benchmark]) {\n\t\t\t\tsayf(indent + 3, \"(skipping, benchmark not supported...)\\n\\n\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t\n\t\t\tif (QUEUE_TOKEN_SUPPORT[queue]) {\n\t\t\t\tindent += 4;\n\t\t\t}\n\t\t\tfor (int useTokens = 0; useTokens != 2; ++useTokens) {\n\t\t\t\tif (QUEUE_TOKEN_SUPPORT[queue]) {\n\t\t\t\t\tsayf(indent, \"%s tokens\\n\", useTokens == 0 ? \"Without\" : \"With\");\n\t\t\t\t}\n\t\t\t\tif (useTokens == 1 && !QUEUE_TOKEN_SUPPORT[queue]) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tindent += 3;\n\t\t\t\t\n\t\t\t\tstd::vector<double> opssts;\n\t\t\t\tstd::vector<int> threadCounts;\n\t\t\t\tfor (int nthreadIndex = 0; BENCHMARK_THREADS[benchmark][nthreadIndex] != 0; ++nthreadIndex) {\n\t\t\t\t\tint nthreads = BENCHMARK_THREADS[benchmark][nthreadIndex];\n\t\t\t\t\tint measuredThreads = nthreads;\n\t\t\t\t\tif (BENCHMARK_THREADS_MEASURED[benchmark] != 0) {\n\t\t\t\t\t\tmeasuredThreads = BENCHMARK_THREADS_MEASURED[benchmark] < 0 ? nthreads + BENCHMARK_THREADS_MEASURED[benchmark] : BENCHMARK_THREADS_MEASURED[benchmark];\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tif (logicalCores > 0 && (unsigned int)nthreads > 3 * logicalCores) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tif (QUEUE_MAX_THREADS[queue] >= 0 && QUEUE_MAX_THREADS[queue] < nthreads) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tcounter_t maxOps;\n\t\t\t\t\tswitch ((queue_id_t)queue) {\n\t\t\t\t\tcase queue_moodycamel_ConcurrentQueue:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<moodycamel::ConcurrentQueue<int, Traits>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_moodycamel_BlockingConcurrentQueue:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<moodycamel::BlockingConcurrentQueue<int, Traits>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_lockbased:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<LockBasedQueue<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_simplelockfree:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<SimpleLockFreeQueue<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_boost:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<BoostQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_tbb:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<TbbQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase queue_std:\n\t\t\t\t\t\tmaxOps = determineMaxOpsForBenchmark<StdQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tassert(false && \"There should be a case here for every queue in the benchmarks!\");\n\t\t\t\t\t}\n\t\t\t\t\t//std::printf(\"maxOps: %llu\\n\", maxOps);\n\t\t\t\t\t\n\t\t\t\t\tint maxThreads = QUEUE_MAX_THREADS[queue];\n\t\t\t\t\tstd::vector<BenchmarkResult> results(ITERATIONS);\n\t\t\t\t\tfor (int i = 0; i < ITERATIONS; ++i) {\n\t\t\t\t\t\tdouble elapsed;\n\t\t\t\t\t\tcounter_t ops = 0;\n\t\t\t\t\t\t\n\t\t\t\t\t\tswitch ((queue_id_t)queue) {\n\t\t\t\t\t\tcase queue_moodycamel_ConcurrentQueue:\n\t\t\t\t\t\t\telapsed = runBenchmark<moodycamel::ConcurrentQueue<int, Traits>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_moodycamel_BlockingConcurrentQueue:\n\t\t\t\t\t\t\telapsed = runBenchmark<moodycamel::BlockingConcurrentQueue<int, Traits>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_lockbased:\n\t\t\t\t\t\t\telapsed = runBenchmark<LockBasedQueue<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_simplelockfree:\n\t\t\t\t\t\t\telapsed = runBenchmark<SimpleLockFreeQueue<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_boost:\n\t\t\t\t\t\t\telapsed = runBenchmark<BoostQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_tbb:\n\t\t\t\t\t\t\telapsed = runBenchmark<TbbQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase queue_std:\n\t\t\t\t\t\t\telapsed = runBenchmark<StdQueueWrapper<int>>((benchmark_type_t)benchmark, nthreads, (bool)useTokens, seed, maxOps, maxThreads, ops);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tassert(false && \"There should be a case here for every queue in the benchmarks!\");\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tresults[i].elapsedTime = elapsed;\n\t\t\t\t\t\tresults[i].operations = ops;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tstd::sort(&results[0], &results[0] + ITERATIONS);\n\t\t\t\t\tint consideredCount = std::max(2, (int)(ITERATIONS * FASTEST_PERCENT_CONSIDERED / 100));\n\t\t\t\t\t\n\t\t\t\t\tdouble min = safe_divide(results[0].elapsedTime / 1000.0, (double)results[0].operations / measuredThreads);\n\t\t\t\t\tdouble max = safe_divide(results[0].elapsedTime / 1000.0, (double)results[0].operations / measuredThreads);\n\t\t\t\t\tdouble ops = 0;\n\t\t\t\t\tdouble time = 0;\n\t\t\t\t\tfor (int i = 0; i != consideredCount; ++i) {\n\t\t\t\t\t\tdouble msPerOperation = safe_divide(results[i].elapsedTime / 1000.0, (double)results[i].operations / measuredThreads);\n\t\t\t\t\t\tif (msPerOperation < min) {\n\t\t\t\t\t\t\tmin = msPerOperation;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (msPerOperation > max) {\n\t\t\t\t\t\t\tmax = msPerOperation;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\ttime += results[i].elapsedTime;\n\t\t\t\t\t\tops += results[i].operations;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tdouble avg = safe_divide(time / 1000.0, ops / measuredThreads);\n\t\t\t\t\tdouble opsPerSecond = safe_divide(ops, time / 1000.0);\n\t\t\t\t\topsst = opsPerSecond / (double)measuredThreads;\n\t\t\t\t\t\n\t\t\t\t\topssts.push_back(opsst);\n\t\t\t\t\tthreadCounts.push_back(measuredThreads);\n\t\t\t\t\t\n\t\t\t\t\tsayf(indent, \"%-3d %7s:  Avg: %7ss  Range: [%7ss, %7ss]  Ops/s: %7s  Ops/s/t: %7s\\n\", nthreads, nthreads != 1 ? \"threads\" : \"thread\", pretty(avg), pretty(min), pretty(max), pretty(opsPerSecond), pretty(opsst));\n\t\t\t\t\tif (nthreads == 1 && BENCHMARK_SINGLE_THREAD_NOTES[benchmark][0] != '\\0') {\n\t\t\t\t\t\tsayf(indent + 7, \"^ Note: %s\\n\", BENCHMARK_SINGLE_THREAD_NOTES[benchmark]);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\topsst = 0;\n\t\t\t\tdouble divisor = 0;\n\t\t\t\tfor (size_t i = 0; i != opssts.size(); ++i) {\n\t\t\t\t\topsst += opssts[i] * std::sqrt(threadCounts[i]);\n\t\t\t\t\ttotalWeightedOpsst[queue] += opssts[i] * std::sqrt(threadCounts[i]);\n\t\t\t\t\tdivisor += std::sqrt(threadCounts[i]);\n\t\t\t\t\ttotalWeight[queue] += std::sqrt(threadCounts[i]);\n\t\t\t\t}\n\t\t\t\topsst /= divisor;\n\t\t\t\tsayf(indent, \"Operations per second per thread (weighted average): %7s\\n\\n\", opsst == 0 ? \"(n/a)\" : pretty(opsst));\n\t\t\t\t\n\t\t\t\tindent -= 3;\n\t\t\t}\n\t\t\tif (QUEUE_TOKEN_SUPPORT[queue]) {\n\t\t\t\tindent -= 4;\n\t\t\t}\n\t\t}\n\t\tindent -= 2;\n\t}\n\t\n\tsayf(0, \"Overall average operations per second per thread (where higher-concurrency runs have more weight):\\n\");\n\tsayf(0, \"(Take this summary with a grain of salt -- look at the individual benchmark results for a much\\nbetter idea of how the queues measure up to each other):\\n\");\n\tfor (int queue = 0; queue != QUEUE_COUNT; ++queue) {\n\t\topsst = safe_divide(totalWeightedOpsst[queue], totalWeight[queue]);\n\t\tif (QUEUE_SUMMARY_NOTES[queue] != nullptr && QUEUE_SUMMARY_NOTES[queue][0] != '\\0') {\n\t\t\tsayf(4, \"%s (%s): %7s\\n\", QUEUE_NAMES[queue], QUEUE_SUMMARY_NOTES[queue], opsst == 0 ? \"(n/a)\" : pretty(opsst));\n\t\t}\n\t\telse {\n\t\t\tsayf(4, \"%s: %7s\\n\", QUEUE_NAMES[queue], opsst == 0 ? \"(n/a)\" : pretty(opsst));\n\t\t}\n\t}\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/LICENSE_1_0.txt",
    "content": "Boost Software License - Version 1.0 - August 17th, 2003\r\n\r\nPermission is hereby granted, free of charge, to any person or organization\r\nobtaining a copy of the software and accompanying documentation covered by\r\nthis license (the \"Software\") to use, reproduce, display, distribute,\r\nexecute, and transmit the Software, and to prepare derivative works of the\r\nSoftware, and to permit third-parties to whom the Software is furnished to\r\ndo so, all subject to the following:\r\n\r\nThe copyright notices in the Software and this entire statement, including\r\nthe above license grant, this restriction and the following disclaimer,\r\nmust be included in all copies of the Software, in whole or in part, and\r\nall derivative works of the Software, unless such copies or derivative\r\nworks are solely in the form of machine-executable object code generated by\r\na source language processor.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\r\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\r\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\r\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\r\nDEALINGS IN THE SOFTWARE.\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/README.txt",
    "content": "This is a partial copy of Boost 1.60, specifically only the parts that\r\nboost/lockfree/queue.hpp depends on (extracted using bcp)."
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/array.hpp",
    "content": "/* The following code declares class array,\n * an STL container (as wrapper) for arrays of constant size.\n *\n * See\n *      http://www.boost.org/libs/array/\n * for documentation.\n *\n * The original author site is at: http://www.josuttis.com/\n *\n * (C) Copyright Nicolai M. Josuttis 2001.\n *\n * Distributed under the Boost Software License, Version 1.0. (See\n * accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * 14 Apr 2012 - (mtc) Added support for boost::hash\n * 28 Dec 2010 - (mtc) Added cbegin and cend (and crbegin and crend) for C++Ox compatibility.\n * 10 Mar 2010 - (mtc) fill method added, matching resolution of the standard library working group.\n *      See <http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#776> or Trac issue #3168\n *      Eventually, we should remove \"assign\" which is now a synonym for \"fill\" (Marshall Clow)\n * 10 Mar 2010 - added workaround for SUNCC and !STLPort [trac #3893] (Marshall Clow)\n * 29 Jan 2004 - c_array() added, BOOST_NO_PRIVATE_IN_AGGREGATE removed (Nico Josuttis)\n * 23 Aug 2002 - fix for Non-MSVC compilers combined with MSVC libraries.\n * 05 Aug 2001 - minor update (Nico Josuttis)\n * 20 Jan 2001 - STLport fix (Beman Dawes)\n * 29 Sep 2000 - Initial Revision (Nico Josuttis)\n *\n * Jan 29, 2004\n */\n#ifndef BOOST_ARRAY_HPP\n#define BOOST_ARRAY_HPP\n\n#include <boost/detail/workaround.hpp>\n\n#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)  \n# pragma warning(push)  \n# pragma warning(disable:4996) // 'std::equal': Function call with parameters that may be unsafe\n# pragma warning(disable:4510) // boost::array<T,N>' : default constructor could not be generated \n# pragma warning(disable:4610) // warning C4610: class 'boost::array<T,N>' can never be instantiated - user defined constructor required \n#endif\n\n#include <cstddef>\n#include <stdexcept>\n#include <boost/assert.hpp>\n#include <boost/swap.hpp>\n\n// Handles broken standard libraries better than <iterator>\n#include <boost/detail/iterator.hpp>\n#include <boost/throw_exception.hpp>\n#include <boost/functional/hash_fwd.hpp>\n#include <algorithm>\n\n// FIXES for broken compilers\n#include <boost/config.hpp>\n\n\nnamespace boost {\n\n    template<class T, std::size_t N>\n    class array {\n      public:\n        T elems[N];    // fixed-size array of elements of type T\n\n      public:\n        // type definitions\n        typedef T              value_type;\n        typedef T*             iterator;\n        typedef const T*       const_iterator;\n        typedef T&             reference;\n        typedef const T&       const_reference;\n        typedef std::size_t    size_type;\n        typedef std::ptrdiff_t difference_type;\n\n        // iterator support\n        iterator        begin()       { return elems; }\n        const_iterator  begin() const { return elems; }\n        const_iterator cbegin() const { return elems; }\n        \n        iterator        end()       { return elems+N; }\n        const_iterator  end() const { return elems+N; }\n        const_iterator cend() const { return elems+N; }\n\n        // reverse iterator support\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS)\n        typedef std::reverse_iterator<iterator> reverse_iterator;\n        typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n#elif defined(_MSC_VER) && (_MSC_VER == 1300) && defined(BOOST_DINKUMWARE_STDLIB) && (BOOST_DINKUMWARE_STDLIB == 310)\n        // workaround for broken reverse_iterator in VC7\n        typedef std::reverse_iterator<std::_Ptrit<value_type, difference_type, iterator,\n                                      reference, iterator, reference> > reverse_iterator;\n        typedef std::reverse_iterator<std::_Ptrit<value_type, difference_type, const_iterator,\n                                      const_reference, iterator, reference> > const_reverse_iterator;\n#elif defined(_RWSTD_NO_CLASS_PARTIAL_SPEC) \n        typedef std::reverse_iterator<iterator, std::random_access_iterator_tag, \n              value_type, reference, iterator, difference_type> reverse_iterator; \n        typedef std::reverse_iterator<const_iterator, std::random_access_iterator_tag,\n              value_type, const_reference, const_iterator, difference_type> const_reverse_iterator;\n#else\n        // workaround for broken reverse_iterator implementations\n        typedef std::reverse_iterator<iterator,T> reverse_iterator;\n        typedef std::reverse_iterator<const_iterator,T> const_reverse_iterator;\n#endif\n\n        reverse_iterator rbegin() { return reverse_iterator(end()); }\n        const_reverse_iterator rbegin() const {\n            return const_reverse_iterator(end());\n        }\n        const_reverse_iterator crbegin() const {\n            return const_reverse_iterator(end());\n        }\n\n        reverse_iterator rend() { return reverse_iterator(begin()); }\n        const_reverse_iterator rend() const {\n            return const_reverse_iterator(begin());\n        }\n        const_reverse_iterator crend() const {\n            return const_reverse_iterator(begin());\n        }\n\n        // operator[]\n        reference operator[](size_type i) \n        { \n            BOOST_ASSERT_MSG( i < N, \"out of range\" );\n            return elems[i];\n        }\n        \n        const_reference operator[](size_type i) const \n        {     \n            BOOST_ASSERT_MSG( i < N, \"out of range\" );\n            return elems[i]; \n        }\n\n        // at() with range check\n        reference at(size_type i) { rangecheck(i); return elems[i]; }\n        const_reference at(size_type i) const { rangecheck(i); return elems[i]; }\n    \n        // front() and back()\n        reference front() \n        { \n            return elems[0]; \n        }\n        \n        const_reference front() const \n        {\n            return elems[0];\n        }\n        \n        reference back() \n        { \n            return elems[N-1]; \n        }\n        \n        const_reference back() const \n        { \n            return elems[N-1]; \n        }\n\n        // size is constant\n        static size_type size() { return N; }\n        static bool empty() { return false; }\n        static size_type max_size() { return N; }\n        enum { static_size = N };\n\n        // swap (note: linear complexity)\n        void swap (array<T,N>& y) {\n            for (size_type i = 0; i < N; ++i)\n                boost::swap(elems[i],y.elems[i]);\n        }\n\n        // direct access to data (read-only)\n        const T* data() const { return elems; }\n        T* data() { return elems; }\n\n        // use array as C array (direct read/write access to data)\n        T* c_array() { return elems; }\n\n        // assignment with type conversion\n        template <typename T2>\n        array<T,N>& operator= (const array<T2,N>& rhs) {\n            std::copy(rhs.begin(),rhs.end(), begin());\n            return *this;\n        }\n\n        // assign one value to all elements\n        void assign (const T& value) { fill ( value ); }    // A synonym for fill\n        void fill   (const T& value)\n        {\n            std::fill_n(begin(),size(),value);\n        }\n\n        // check range (may be private because it is static)\n        static void rangecheck (size_type i) {\n            if (i >= size()) {\n                std::out_of_range e(\"array<>: index out of range\");\n                boost::throw_exception(e);\n            }\n        }\n\n    };\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n    template< class T >\n    class array< T, 0 > {\n\n      public:\n        // type definitions\n        typedef T              value_type;\n        typedef T*             iterator;\n        typedef const T*       const_iterator;\n        typedef T&             reference;\n        typedef const T&       const_reference;\n        typedef std::size_t    size_type;\n        typedef std::ptrdiff_t difference_type;\n\n        // iterator support\n        iterator        begin()       { return       iterator( reinterpret_cast<       T * >( this ) ); }\n        const_iterator  begin() const { return const_iterator( reinterpret_cast< const T * >( this ) ); }\n        const_iterator cbegin() const { return const_iterator( reinterpret_cast< const T * >( this ) ); }\n\n        iterator        end()       { return  begin(); }\n        const_iterator  end() const { return  begin(); }\n        const_iterator cend() const { return cbegin(); }\n\n        // reverse iterator support\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS)\n        typedef std::reverse_iterator<iterator> reverse_iterator;\n        typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n#elif defined(_MSC_VER) && (_MSC_VER == 1300) && defined(BOOST_DINKUMWARE_STDLIB) && (BOOST_DINKUMWARE_STDLIB == 310)\n        // workaround for broken reverse_iterator in VC7\n        typedef std::reverse_iterator<std::_Ptrit<value_type, difference_type, iterator,\n                                      reference, iterator, reference> > reverse_iterator;\n        typedef std::reverse_iterator<std::_Ptrit<value_type, difference_type, const_iterator,\n                                      const_reference, iterator, reference> > const_reverse_iterator;\n#elif defined(_RWSTD_NO_CLASS_PARTIAL_SPEC) \n        typedef std::reverse_iterator<iterator, std::random_access_iterator_tag, \n              value_type, reference, iterator, difference_type> reverse_iterator; \n        typedef std::reverse_iterator<const_iterator, std::random_access_iterator_tag,\n              value_type, const_reference, const_iterator, difference_type> const_reverse_iterator;\n#else\n        // workaround for broken reverse_iterator implementations\n        typedef std::reverse_iterator<iterator,T> reverse_iterator;\n        typedef std::reverse_iterator<const_iterator,T> const_reverse_iterator;\n#endif\n\n        reverse_iterator rbegin() { return reverse_iterator(end()); }\n        const_reverse_iterator rbegin() const {\n            return const_reverse_iterator(end());\n        }\n        const_reverse_iterator crbegin() const {\n            return const_reverse_iterator(end());\n        }\n\n        reverse_iterator rend() { return reverse_iterator(begin()); }\n        const_reverse_iterator rend() const {\n            return const_reverse_iterator(begin());\n        }\n        const_reverse_iterator crend() const {\n            return const_reverse_iterator(begin());\n        }\n\n        // operator[]\n        reference operator[](size_type /*i*/)\n        {\n            return failed_rangecheck();\n        }\n\n        const_reference operator[](size_type /*i*/) const\n        {\n            return failed_rangecheck();\n        }\n\n        // at() with range check\n        reference at(size_type /*i*/)               {   return failed_rangecheck(); }\n        const_reference at(size_type /*i*/) const   {   return failed_rangecheck(); }\n\n        // front() and back()\n        reference front()\n        {\n            return failed_rangecheck();\n        }\n\n        const_reference front() const\n        {\n            return failed_rangecheck();\n        }\n\n        reference back()\n        {\n            return failed_rangecheck();\n        }\n\n        const_reference back() const\n        {\n            return failed_rangecheck();\n        }\n\n        // size is constant\n        static size_type size() { return 0; }\n        static bool empty() { return true; }\n        static size_type max_size() { return 0; }\n        enum { static_size = 0 };\n\n        void swap (array<T,0>& /*y*/) {\n        }\n\n        // direct access to data (read-only)\n        const T* data() const { return 0; }\n        T* data() { return 0; }\n\n        // use array as C array (direct read/write access to data)\n        T* c_array() { return 0; }\n\n        // assignment with type conversion\n        template <typename T2>\n        array<T,0>& operator= (const array<T2,0>& ) {\n            return *this;\n        }\n\n        // assign one value to all elements\n        void assign (const T& value) { fill ( value ); }\n        void fill   (const T& ) {}\n        \n        // check range (may be private because it is static)\n        static reference failed_rangecheck () {\n                std::out_of_range e(\"attempt to access element of an empty array\");\n                boost::throw_exception(e);\n#if defined(BOOST_NO_EXCEPTIONS) || (!defined(BOOST_MSVC) && !defined(__PATHSCALE__))\n                //\n                // We need to return something here to keep\n                // some compilers happy: however we will never\n                // actually get here....\n                //\n                static T placeholder;\n                return placeholder;\n#endif\n            }\n    };\n#endif\n\n    // comparisons\n    template<class T, std::size_t N>\n    bool operator== (const array<T,N>& x, const array<T,N>& y) {\n        return std::equal(x.begin(), x.end(), y.begin());\n    }\n    template<class T, std::size_t N>\n    bool operator< (const array<T,N>& x, const array<T,N>& y) {\n        return std::lexicographical_compare(x.begin(),x.end(),y.begin(),y.end());\n    }\n    template<class T, std::size_t N>\n    bool operator!= (const array<T,N>& x, const array<T,N>& y) {\n        return !(x==y);\n    }\n    template<class T, std::size_t N>\n    bool operator> (const array<T,N>& x, const array<T,N>& y) {\n        return y<x;\n    }\n    template<class T, std::size_t N>\n    bool operator<= (const array<T,N>& x, const array<T,N>& y) {\n        return !(y<x);\n    }\n    template<class T, std::size_t N>\n    bool operator>= (const array<T,N>& x, const array<T,N>& y) {\n        return !(x<y);\n    }\n\n    // global swap()\n    template<class T, std::size_t N>\n    inline void swap (array<T,N>& x, array<T,N>& y) {\n        x.swap(y);\n    }\n\n#if defined(__SUNPRO_CC)\n//  Trac ticket #4757; the Sun Solaris compiler can't handle\n//  syntax like 'T(&get_c_array(boost::array<T,N>& arg))[N]'\n//  \n//  We can't just use this for all compilers, because the \n//      borland compilers can't handle this form. \n    namespace detail {\n       template <typename T, std::size_t N> struct c_array\n       {\n           typedef T type[N];\n       };\n    }\n    \n   // Specific for boost::array: simply returns its elems data member.\n   template <typename T, std::size_t N>\n   typename detail::c_array<T,N>::type& get_c_array(boost::array<T,N>& arg)\n   {\n       return arg.elems;\n   }\n\n   // Specific for boost::array: simply returns its elems data member.\n   template <typename T, std::size_t N>\n   typename const detail::c_array<T,N>::type& get_c_array(const boost::array<T,N>& arg)\n   {\n       return arg.elems;\n   }\n#else\n// Specific for boost::array: simply returns its elems data member.\n    template <typename T, std::size_t N>\n    T(&get_c_array(boost::array<T,N>& arg))[N]\n    {\n        return arg.elems;\n    }\n    \n    // Const version.\n    template <typename T, std::size_t N>\n    const T(&get_c_array(const boost::array<T,N>& arg))[N]\n    {\n        return arg.elems;\n    }\n#endif\n    \n#if 0\n    // Overload for std::array, assuming that std::array will have\n    // explicit conversion functions as discussed at the WG21 meeting\n    // in Summit, March 2009.\n    template <typename T, std::size_t N>\n    T(&get_c_array(std::array<T,N>& arg))[N]\n    {\n        return static_cast<T(&)[N]>(arg);\n    }\n    \n    // Const version.\n    template <typename T, std::size_t N>\n    const T(&get_c_array(const std::array<T,N>& arg))[N]\n    {\n        return static_cast<T(&)[N]>(arg);\n    }\n#endif\n\n\n    template<class T, std::size_t N>\n    std::size_t hash_value(const array<T,N>& arr)\n    {\n        return boost::hash_range(arr.begin(), arr.end());\n    }\n\n} /* namespace boost */\n\n\n#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)  \n# pragma warning(pop)  \n#endif \n\n#endif /*BOOST_ARRAY_HPP*/\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/assert.hpp",
    "content": "//\n//  boost/assert.hpp - BOOST_ASSERT(expr)\n//                     BOOST_ASSERT_MSG(expr, msg)\n//                     BOOST_VERIFY(expr)\n//                     BOOST_VERIFY_MSG(expr, msg)\n//                     BOOST_ASSERT_IS_VOID\n//\n//  Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd.\n//  Copyright (c) 2007, 2014 Peter Dimov\n//  Copyright (c) Beman Dawes 2011\n//  Copyright (c) 2015 Ion Gaztanaga\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt\n//\n//  Note: There are no include guards. This is intentional.\n//\n//  See http://www.boost.org/libs/assert/assert.html for documentation.\n//\n\n//\n// Stop inspect complaining about use of 'assert':\n//\n// boostinspect:naassert_macro\n//\n\n//\n// BOOST_ASSERT, BOOST_ASSERT_MSG, BOOST_ASSERT_IS_VOID\n//\n\n#undef BOOST_ASSERT\n#undef BOOST_ASSERT_MSG\n#undef BOOST_ASSERT_IS_VOID\n\n#if defined(BOOST_DISABLE_ASSERTS) || ( defined(BOOST_ENABLE_ASSERT_DEBUG_HANDLER) && defined(NDEBUG) )\n\n# define BOOST_ASSERT(expr) ((void)0)\n# define BOOST_ASSERT_MSG(expr, msg) ((void)0)\n# define BOOST_ASSERT_IS_VOID\n\n#elif defined(BOOST_ENABLE_ASSERT_HANDLER) || ( defined(BOOST_ENABLE_ASSERT_DEBUG_HANDLER) && !defined(NDEBUG) )\n\n#include <boost/config.hpp> // for BOOST_LIKELY\n#include <boost/current_function.hpp>\n\nnamespace boost\n{\n    void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined\n    void assertion_failed_msg(char const * expr, char const * msg, char const * function, char const * file, long line); // user defined\n} // namespace boost\n\n#define BOOST_ASSERT(expr) (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))\n#define BOOST_ASSERT_MSG(expr, msg) (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed_msg(#expr, msg, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))\n\n#else\n\n# include <assert.h> // .h to support old libraries w/o <cassert> - effect is the same\n\n# define BOOST_ASSERT(expr) assert(expr)\n# define BOOST_ASSERT_MSG(expr, msg) assert((expr)&&(msg))\n#if defined(NDEBUG)\n# define BOOST_ASSERT_IS_VOID\n#endif\n\n#endif\n\n//\n// BOOST_VERIFY, BOOST_VERIFY_MSG\n//\n\n#undef BOOST_VERIFY\n#undef BOOST_VERIFY_MSG\n\n#if defined(BOOST_DISABLE_ASSERTS) || ( !defined(BOOST_ENABLE_ASSERT_HANDLER) && defined(NDEBUG) )\n\n# define BOOST_VERIFY(expr) ((void)(expr))\n# define BOOST_VERIFY_MSG(expr, msg) ((void)(expr))\n\n#else\n\n# define BOOST_VERIFY(expr) BOOST_ASSERT(expr)\n# define BOOST_VERIFY_MSG(expr, msg) BOOST_ASSERT_MSG(expr,msg)\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/atomic.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/atomic.hpp\n *\n * This header contains definition of \\c atomic template and \\c atomic_flag.\n */\n\n#ifndef BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_\n#define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_\n\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/fences.hpp>\n#include <boost/atomic/atomic_flag.hpp>\n#include <boost/atomic/detail/atomic_template.hpp>\n#include <boost/atomic/detail/operations.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\n\nusing atomics::atomic;\n\nusing atomics::atomic_char;\nusing atomics::atomic_uchar;\nusing atomics::atomic_schar;\nusing atomics::atomic_uint8_t;\nusing atomics::atomic_int8_t;\nusing atomics::atomic_ushort;\nusing atomics::atomic_short;\nusing atomics::atomic_uint16_t;\nusing atomics::atomic_int16_t;\nusing atomics::atomic_uint;\nusing atomics::atomic_int;\nusing atomics::atomic_uint32_t;\nusing atomics::atomic_int32_t;\nusing atomics::atomic_ulong;\nusing atomics::atomic_long;\nusing atomics::atomic_uint64_t;\nusing atomics::atomic_int64_t;\n#ifdef BOOST_HAS_LONG_LONG\nusing atomics::atomic_ullong;\nusing atomics::atomic_llong;\n#endif\nusing atomics::atomic_address;\nusing atomics::atomic_bool;\nusing atomics::atomic_wchar_t;\n#if !defined(BOOST_NO_CXX11_CHAR16_T)\nusing atomics::atomic_char16_t;\n#endif\n#if !defined(BOOST_NO_CXX11_CHAR32_T)\nusing atomics::atomic_char32_t;\n#endif\n\nusing atomics::atomic_int_least8_t;\nusing atomics::atomic_uint_least8_t;\nusing atomics::atomic_int_least16_t;\nusing atomics::atomic_uint_least16_t;\nusing atomics::atomic_int_least32_t;\nusing atomics::atomic_uint_least32_t;\nusing atomics::atomic_int_least64_t;\nusing atomics::atomic_uint_least64_t;\nusing atomics::atomic_int_fast8_t;\nusing atomics::atomic_uint_fast8_t;\nusing atomics::atomic_int_fast16_t;\nusing atomics::atomic_uint_fast16_t;\nusing atomics::atomic_int_fast32_t;\nusing atomics::atomic_uint_fast32_t;\nusing atomics::atomic_int_fast64_t;\nusing atomics::atomic_uint_fast64_t;\nusing atomics::atomic_intmax_t;\nusing atomics::atomic_uintmax_t;\n\nusing atomics::atomic_size_t;\nusing atomics::atomic_ptrdiff_t;\n\n#if defined(BOOST_HAS_INTPTR_T)\nusing atomics::atomic_intptr_t;\nusing atomics::atomic_uintptr_t;\n#endif\n\n} // namespace boost\n\n#endif // BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/atomic_flag.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/atomic_flag.hpp\n *\n * This header contains definition of \\c atomic_flag.\n */\n\n#ifndef BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_\n#define BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_\n\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/operations.hpp>\n#include <boost/atomic/detail/atomic_flag.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\n\nusing atomics::atomic_flag;\n\n} // namespace boost\n\n#endif // BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/capabilities.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/capabilities.hpp\n *\n * This header defines feature capabilities macros.\n */\n\n#ifndef BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_\n#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/platform.hpp>\n#include <boost/atomic/detail/int_sizes.hpp>\n\n#if !defined(BOOST_ATOMIC_EMULATED)\n#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_)\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#ifndef BOOST_ATOMIC_INT8_LOCK_FREE\n#define BOOST_ATOMIC_INT8_LOCK_FREE 0\n#endif\n\n#ifndef BOOST_ATOMIC_INT16_LOCK_FREE\n#define BOOST_ATOMIC_INT16_LOCK_FREE 0\n#endif\n\n#ifndef BOOST_ATOMIC_INT32_LOCK_FREE\n#define BOOST_ATOMIC_INT32_LOCK_FREE 0\n#endif\n\n#ifndef BOOST_ATOMIC_INT64_LOCK_FREE\n#define BOOST_ATOMIC_INT64_LOCK_FREE 0\n#endif\n\n#ifndef BOOST_ATOMIC_INT128_LOCK_FREE\n#define BOOST_ATOMIC_INT128_LOCK_FREE 0\n#endif\n\n\n#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE\n#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#endif\n\n#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE\n#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#endif\n\n#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE\n#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#endif\n\n#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE\n#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#else\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0\n#endif\n#endif\n\n#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE\n#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1\n#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2\n#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4\n#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8\n#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#else\n#define BOOST_ATOMIC_SHORT_LOCK_FREE 0\n#endif\n#endif\n\n#ifndef BOOST_ATOMIC_INT_LOCK_FREE\n#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1\n#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2\n#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4\n#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8\n#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#else\n#define BOOST_ATOMIC_INT_LOCK_FREE 0\n#endif\n#endif\n\n#ifndef BOOST_ATOMIC_LONG_LOCK_FREE\n#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1\n#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2\n#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4\n#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8\n#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#else\n#define BOOST_ATOMIC_LONG_LOCK_FREE 0\n#endif\n#endif\n\n#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE\n#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1\n#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2\n#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4\n#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8\n#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#else\n#define BOOST_ATOMIC_LLONG_LOCK_FREE 0\n#endif\n#endif\n\n#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE\n#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8\n#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4\n#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#else\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 0\n#endif\n#endif\n\n#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE\n\n#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE\n// We store bools in 1-byte storage in all backends\n#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#endif\n\n#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE\n#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_BOOL_LOCK_FREE\n#endif\n\n#ifndef BOOST_ATOMIC_THREAD_FENCE\n#define BOOST_ATOMIC_THREAD_FENCE 0\n#endif\n\n#ifndef BOOST_ATOMIC_SIGNAL_FENCE\n#define BOOST_ATOMIC_SIGNAL_FENCE 0\n#endif\n\n#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/atomic_flag.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/atomic_flag.hpp\n *\n * This header contains interface definition of \\c atomic_flag.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_\n\n#include <boost/assert.hpp>\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/operations_lockfree.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n/*\n * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,\n *                      see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.\n */\n\nnamespace boost {\nnamespace atomics {\n\n#if defined(BOOST_NO_CXX11_CONSTEXPR) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)\n#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT\n#else\n#define BOOST_ATOMIC_FLAG_INIT {}\n#endif\n\nstruct atomic_flag\n{\n    typedef atomics::detail::operations< 1u, false > operations;\n    typedef operations::storage_type storage_type;\n\n    operations::aligned_storage_type m_storage;\n\n    BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0)\n    {\n    }\n\n    BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return operations::test_and_set(m_storage.value, order);\n    }\n\n    BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n        operations::clear(m_storage.value, order);\n    }\n\n    BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))\n    BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))\n};\n\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/atomic_template.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/atomic_template.hpp\n *\n * This header contains interface definition of \\c atomic template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_\n\n#include <cstddef>\n#include <boost/cstdint.hpp>\n#include <boost/assert.hpp>\n#include <boost/type_traits/is_signed.hpp>\n#include <boost/type_traits/is_integral.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/bitwise_cast.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(BOOST_MSVC)\n#pragma warning(push)\n// 'boost::atomics::atomic<T>' : multiple assignment operators specified\n#pragma warning(disable: 4522)\n#endif\n\n/*\n * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,\n *                      see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.\n */\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nBOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT\n{\n    return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);\n}\n\nBOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n{\n    // 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp\n    // Given the enum values we can test the strength of memory order requirements with this single condition.\n    return (failure_order & 15u) <= (success_order & 15u);\n}\n\ntemplate< typename T, bool IsInt = boost::is_integral< T >::value >\nstruct classify\n{\n    typedef void type;\n};\n\ntemplate< typename T >\nstruct classify< T, true > { typedef int type; };\n\ntemplate< typename T >\nstruct classify< T*, false > { typedef void* type; };\n\ntemplate< typename T, typename Kind >\nclass base_atomic;\n\n//! Implementation for integers\ntemplate< typename T >\nclass base_atomic< T, int >\n{\nprivate:\n    typedef T value_type;\n    typedef T difference_type;\n    typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::is_signed< T >::value > operations;\n\nprotected:\n    typedef value_type value_arg_type;\n\npublic:\n    typedef typename operations::storage_type storage_type;\n\nprotected:\n    typename operations::aligned_storage_type m_storage;\n\npublic:\n    BOOST_DEFAULTED_FUNCTION(base_atomic(), {})\n    BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}\n\n    BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_consume);\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        operations::store(m_storage.value, static_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_release);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        return static_cast< value_type >(operations::load(m_storage.value, order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::exchange(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = static_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);\n        expected = static_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = static_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);\n        expected = static_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::fetch_and(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::fetch_or(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return static_cast< value_type >(operations::fetch_xor(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT\n    {\n        return operations::is_lock_free(m_storage.value);\n    }\n\n    BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(1) + 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(1) - 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(v) + v;\n    }\n\n    BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(v) - v;\n    }\n\n    BOOST_FORCEINLINE value_type operator&=(value_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_and(v) & v;\n    }\n\n    BOOST_FORCEINLINE value_type operator|=(value_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_or(v) | v;\n    }\n\n    BOOST_FORCEINLINE value_type operator^=(value_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_xor(v) ^ v;\n    }\n\n    BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))\n    BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))\n};\n\n//! Implementation for bool\ntemplate< >\nclass base_atomic< bool, int >\n{\nprivate:\n    typedef bool value_type;\n    typedef atomics::detail::operations< 1u, false > operations;\n\nprotected:\n    typedef value_type value_arg_type;\n\npublic:\n    typedef operations::storage_type storage_type;\n\nprotected:\n    operations::aligned_storage_type m_storage;\n\npublic:\n    BOOST_DEFAULTED_FUNCTION(base_atomic(), {})\n    BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}\n\n    BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_consume);\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        operations::store(m_storage.value, static_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_release);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        return !!operations::load(m_storage.value, order);\n    }\n\n    BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return !!operations::exchange(m_storage.value, static_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = static_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);\n        expected = !!old_value;\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = static_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);\n        expected = !!old_value;\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT\n    {\n        return operations::is_lock_free(m_storage.value);\n    }\n\n    BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))\n    BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))\n};\n\n\n//! Implementation for user-defined types, such as structs and enums\ntemplate< typename T >\nclass base_atomic< T, void >\n{\nprivate:\n    typedef T value_type;\n    typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;\n\nprotected:\n    typedef value_type const& value_arg_type;\n\npublic:\n    typedef typename operations::storage_type storage_type;\n\nprotected:\n    typename operations::aligned_storage_type m_storage;\n\npublic:\n    BOOST_FORCEINLINE explicit base_atomic(value_type const& v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))\n    {\n    }\n\n    BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_consume);\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_release);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));\n    }\n\n    BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT\n    {\n        return operations::is_lock_free(m_storage.value);\n    }\n\n    BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))\n    BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))\n};\n\n\n//! Implementation for pointers\ntemplate< typename T >\nclass base_atomic< T*, void* >\n{\nprivate:\n    typedef T* value_type;\n    typedef std::ptrdiff_t difference_type;\n    typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;\n\nprotected:\n    typedef value_type value_arg_type;\n\npublic:\n    typedef typename operations::storage_type storage_type;\n\nprotected:\n    typename operations::aligned_storage_type m_storage;\n\npublic:\n    BOOST_DEFAULTED_FUNCTION(base_atomic(), {})\n    BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))\n    {\n    }\n\n    BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_consume);\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_release);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));\n    }\n\n    BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT\n    {\n        return operations::is_lock_free(m_storage.value);\n    }\n\n    BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(1) + 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(1) - 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(v) + v;\n    }\n\n    BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(v) - v;\n    }\n\n    BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))\n    BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))\n};\n\n\n//! Implementation for void pointers\ntemplate< >\nclass base_atomic< void*, void* >\n{\nprivate:\n    typedef void* value_type;\n    typedef std::ptrdiff_t difference_type;\n    typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;\n\nprotected:\n    typedef value_type value_arg_type;\n\npublic:\n    typedef operations::storage_type storage_type;\n\nprotected:\n    operations::aligned_storage_type m_storage;\n\npublic:\n    BOOST_DEFAULTED_FUNCTION(base_atomic(), {})\n    BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))\n    {\n    }\n\n    BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_consume);\n        BOOST_ASSERT(order != memory_order_acquire);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);\n    }\n\n    BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(order != memory_order_release);\n        BOOST_ASSERT(order != memory_order_acq_rel);\n\n        return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT\n    {\n        BOOST_ASSERT(failure_order != memory_order_release);\n        BOOST_ASSERT(failure_order != memory_order_acq_rel);\n        BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));\n\n        storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);\n        const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);\n        expected = atomics::detail::bitwise_cast< value_type >(old_value);\n        return res;\n    }\n\n    BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT\n    {\n        return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));\n    }\n\n    BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT\n    {\n        return operations::is_lock_free(m_storage.value);\n    }\n\n    BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_add(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT\n    {\n        return (char*)fetch_add(1) + 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT\n    {\n        return fetch_sub(1);\n    }\n\n    BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT\n    {\n        return (char*)fetch_sub(1) - 1;\n    }\n\n    BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return (char*)fetch_add(v) + v;\n    }\n\n    BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT\n    {\n        return (char*)fetch_sub(v) - v;\n    }\n\n    BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))\n    BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))\n};\n\n} // namespace detail\n\ntemplate< typename T >\nclass atomic :\n    public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type >\n{\nprivate:\n    typedef T value_type;\n    typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > base_type;\n    typedef typename base_type::value_arg_type value_arg_type;\n\npublic:\n    typedef typename base_type::storage_type storage_type;\n\npublic:\n    BOOST_DEFAULTED_FUNCTION(atomic(), BOOST_NOEXCEPT {})\n\n    // NOTE: The constructor is made explicit because gcc 4.7 complains that\n    //       operator=(value_arg_type) is considered ambiguous with operator=(atomic const&)\n    //       in assignment expressions, even though conversion to atomic<> is less preferred\n    //       than conversion to value_arg_type.\n    BOOST_FORCEINLINE explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}\n\n    BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT\n    {\n        this->store(v);\n        return v;\n    }\n\n    BOOST_FORCEINLINE operator value_type() volatile const BOOST_NOEXCEPT\n    {\n        return this->load();\n    }\n\n    BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage.value; }\n    BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage.value; }\n    BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage.value; }\n    BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage.value; }\n\n    BOOST_DELETED_FUNCTION(atomic(atomic const&))\n    BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))\n    BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile)\n};\n\ntypedef atomic< char > atomic_char;\ntypedef atomic< unsigned char > atomic_uchar;\ntypedef atomic< signed char > atomic_schar;\ntypedef atomic< uint8_t > atomic_uint8_t;\ntypedef atomic< int8_t > atomic_int8_t;\ntypedef atomic< unsigned short > atomic_ushort;\ntypedef atomic< short > atomic_short;\ntypedef atomic< uint16_t > atomic_uint16_t;\ntypedef atomic< int16_t > atomic_int16_t;\ntypedef atomic< unsigned int > atomic_uint;\ntypedef atomic< int > atomic_int;\ntypedef atomic< uint32_t > atomic_uint32_t;\ntypedef atomic< int32_t > atomic_int32_t;\ntypedef atomic< unsigned long > atomic_ulong;\ntypedef atomic< long > atomic_long;\ntypedef atomic< uint64_t > atomic_uint64_t;\ntypedef atomic< int64_t > atomic_int64_t;\n#ifdef BOOST_HAS_LONG_LONG\ntypedef atomic< boost::ulong_long_type > atomic_ullong;\ntypedef atomic< boost::long_long_type > atomic_llong;\n#endif\ntypedef atomic< void* > atomic_address;\ntypedef atomic< bool > atomic_bool;\ntypedef atomic< wchar_t > atomic_wchar_t;\n#if !defined(BOOST_NO_CXX11_CHAR16_T)\ntypedef atomic< char16_t > atomic_char16_t;\n#endif\n#if !defined(BOOST_NO_CXX11_CHAR32_T)\ntypedef atomic< char32_t > atomic_char32_t;\n#endif\n\ntypedef atomic< int_least8_t > atomic_int_least8_t;\ntypedef atomic< uint_least8_t > atomic_uint_least8_t;\ntypedef atomic< int_least16_t > atomic_int_least16_t;\ntypedef atomic< uint_least16_t > atomic_uint_least16_t;\ntypedef atomic< int_least32_t > atomic_int_least32_t;\ntypedef atomic< uint_least32_t > atomic_uint_least32_t;\ntypedef atomic< int_least64_t > atomic_int_least64_t;\ntypedef atomic< uint_least64_t > atomic_uint_least64_t;\ntypedef atomic< int_fast8_t > atomic_int_fast8_t;\ntypedef atomic< uint_fast8_t > atomic_uint_fast8_t;\ntypedef atomic< int_fast16_t > atomic_int_fast16_t;\ntypedef atomic< uint_fast16_t > atomic_uint_fast16_t;\ntypedef atomic< int_fast32_t > atomic_int_fast32_t;\ntypedef atomic< uint_fast32_t > atomic_uint_fast32_t;\ntypedef atomic< int_fast64_t > atomic_int_fast64_t;\ntypedef atomic< uint_fast64_t > atomic_uint_fast64_t;\ntypedef atomic< intmax_t > atomic_intmax_t;\ntypedef atomic< uintmax_t > atomic_uintmax_t;\n\ntypedef atomic< std::size_t > atomic_size_t;\ntypedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;\n\n#if defined(BOOST_HAS_INTPTR_T)\ntypedef atomic< intptr_t > atomic_intptr_t;\ntypedef atomic< uintptr_t > atomic_uintptr_t;\n#endif\n\n} // namespace atomics\n} // namespace boost\n\n#if defined(BOOST_MSVC)\n#pragma warning(pop)\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/bitwise_cast.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2013 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/bitwise_cast.hpp\n *\n * This header defines \\c bitwise_cast used to convert between storage and value types\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)\n#include <cstring>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< typename To, typename From >\nBOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT\n{\n    struct\n    {\n        To to;\n    }\n    value = {};\n    BOOST_ATOMIC_DETAIL_MEMCPY\n    (\n        &reinterpret_cast< char& >(value.to),\n        &reinterpret_cast< const char& >(from),\n        (sizeof(From) < sizeof(To) ? sizeof(From) : sizeof(To))\n    );\n    return value.to;\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_alpha.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_alpha.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2009 Phil Endecott\n * Copyright (c) 2013 Tim Blechmann\n * ARM Code by Phil Endecott, based on other architectures.\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_arm.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__))\n// ARMv7 and later have dmb instruction\n#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1\n#endif\n\n#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))\n// ARMv6k and ARMv7 have 8 and 16 ldrex/strex variants\n#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1\n#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1\n#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7M__))\n// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.\n// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,\n// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM\n// or ARMv7 (both ARM and Thumb 2) it works as expected.\n#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1\n#endif\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#endif\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_atomic.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_atomic.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/int_sizes.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1\n#endif\n\n#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))\n#define BOOST_ATOMIC_INT128_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_INT128_LOCK_FREE 0\n#endif\n\n#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2\n#define BOOST_ATOMIC_LLONG_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE\n#endif\n\n#if __GCC_ATOMIC_LONG_LOCK_FREE == 2\n#define BOOST_ATOMIC_LONG_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE\n#endif\n\n#if __GCC_ATOMIC_INT_LOCK_FREE == 2\n#define BOOST_ATOMIC_INT_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE\n#endif\n\n#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2\n#define BOOST_ATOMIC_SHORT_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE\n#endif\n\n#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2\n#define BOOST_ATOMIC_CHAR_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE\n#endif\n\n#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n#else\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 0\n#endif\n\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE\n\n#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE\n#else\n#define BOOST_ATOMIC_INT16_LOCK_FREE 0\n#endif\n\n#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4\n#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4\n#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4\n#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4\n#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE\n#else\n#define BOOST_ATOMIC_INT32_LOCK_FREE 0\n#endif\n\n#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8\n#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8\n#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8\n#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8\n#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE\n#else\n#define BOOST_ATOMIC_INT64_LOCK_FREE 0\n#endif\n\n\n#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE\n#else\n#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0\n#endif\n\n#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE\n#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_ppc.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_ppc.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#if defined(__powerpc64__) || defined(__PPC64__)\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#endif\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_sparc.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2010 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_sparc.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_sync.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_sync.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1\n#endif\n\n#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1\n#endif\n\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#endif\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#endif\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#endif\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\\\n    || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#endif\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_INT128_LOCK_FREE 2\n#endif\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_gcc_x86.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2013 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_gcc_x86.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__i386__) &&\\\n    (\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\\\n        defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\\\n        defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\\\n    )\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1\n#endif\n\n#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#endif\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))\n#define BOOST_ATOMIC_INT128_LOCK_FREE 2\n#endif\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_linux_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009, 2011 Helge Bahmann\n * Copyright (c) 2009 Phil Endecott\n * Copyright (c) 2013 Tim Blechmann\n * Linux-specific code by Phil Endecott\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_linux_arm.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_msvc_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2012 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_msvc_arm.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_msvc_x86.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2012 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_msvc_x86.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(_M_IX86) && _M_IX86 >= 500\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1\n#endif\n\n#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)\n#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n\n#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n#define BOOST_ATOMIC_INT64_LOCK_FREE 2\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))\n#define BOOST_ATOMIC_INT128_LOCK_FREE 2\n#endif\n\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/caps_windows.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2012 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/caps_windows.hpp\n *\n * This header defines feature capabilities macros\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_INT8_LOCK_FREE 2\n#define BOOST_ATOMIC_INT16_LOCK_FREE 2\n#define BOOST_ATOMIC_INT32_LOCK_FREE 2\n#define BOOST_ATOMIC_POINTER_LOCK_FREE 2\n\n#define BOOST_ATOMIC_THREAD_FENCE 2\n#define BOOST_ATOMIC_SIGNAL_FENCE 2\n\n#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/config.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2012 Hartmut Kaiser\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/config.hpp\n *\n * This header defines configuraion macros for Boost.Atomic\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_\n\n#include <boost/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__has_builtin)\n#if __has_builtin(__builtin_memcpy)\n#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY\n#endif\n#if __has_builtin(__builtin_memcmp)\n#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP\n#endif\n#elif defined(BOOST_GCC)\n#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY\n#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)\n#define BOOST_ATOMIC_DETAIL_MEMCPY __builtin_memcpy\n#else\n#define BOOST_ATOMIC_DETAIL_MEMCPY std::memcpy\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP)\n#define BOOST_ATOMIC_DETAIL_MEMCMP __builtin_memcmp\n#else\n#define BOOST_ATOMIC_DETAIL_MEMCMP std::memcmp\n#endif\n\n#if defined(__CUDACC__)\n// nvcc does not support alternatives in asm statement constraints\n#define BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES\n// nvcc does not support condition code register (\"cc\") clobber in asm statements\n#define BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC\n#endif\n\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC)\n#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC \"cc\"\n#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"cc\",\n#else\n#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA\n#endif\n\n#if (defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) < 403)\n// This macro indicates we're using older binutils that don't support implied zero displacements for memory opereands,\n// making code like this invalid:\n//   movl 4+(%%edx), %%eax\n#define BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS\n#endif\n\n#if defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40500)\n// This macro indicates that the compiler does not support allocating rax:rdx register pairs (\"A\") in asm blocks\n#define BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/int_sizes.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/int_sizes.hpp\n *\n * This header defines macros for testing buitin integer type sizes\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n// GCC and compatible compilers define internal macros with builtin type traits\n#if defined(__SIZEOF_SHORT__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT __SIZEOF_SHORT__\n#endif\n#if defined(__SIZEOF_INT__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_INT __SIZEOF_INT__\n#endif\n#if defined(__SIZEOF_LONG__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG __SIZEOF_LONG__\n#endif\n#if defined(__SIZEOF_LONG_LONG__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG __SIZEOF_LONG_LONG__\n#endif\n#if defined(__SIZEOF_WCHAR_T__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__\n#endif\n#if defined(__SIZEOF_POINTER__)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__\n#elif defined(_MSC_VER)\n#if defined(_M_AMD64) || defined(_M_IA64)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8\n#else\n#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4\n#endif\n#endif\n\n#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\\\n    !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)\n\n// Try to deduce sizes from limits\n#include <limits.h>\n#include <boost/cstdint.hpp>\n\n#if (USHRT_MAX + 0) == 0xff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1\n#elif (USHRT_MAX + 0) == 0xffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 2\n#elif (USHRT_MAX + 0) == 0xffffffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 4\n#elif (USHRT_MAX + 0) == UINT64_C(0xffffffffffffffff)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8\n#endif\n\n#if (UINT_MAX + 0) == 0xff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1\n#elif (UINT_MAX + 0) == 0xffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 2\n#elif (UINT_MAX + 0) == 0xffffffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 4\n#elif (UINT_MAX + 0) == UINT64_C(0xffffffffffffffff)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8\n#endif\n\n#if (ULONG_MAX + 0) == 0xff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1\n#elif (ULONG_MAX + 0) == 0xffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 2\n#elif (ULONG_MAX + 0) == 0xffffffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 4\n#elif (ULONG_MAX + 0) == UINT64_C(0xffffffffffffffff)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8\n#endif\n\n#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8\n#else\n\n// The list of the non-standard macros (the ones except ULLONG_MAX) is taken from cstdint.hpp\n#if defined(ULLONG_MAX)\n#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULLONG_MAX\n#elif defined(ULONG_LONG_MAX)\n#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONG_LONG_MAX\n#elif defined(ULONGLONG_MAX)\n#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONGLONG_MAX\n#elif defined(_LLONG_MAX) // strangely enough, this one seems to be holding the limit for the unsigned integer\n#define BOOST_ATOMIC_DETAIL_ULLONG_MAX _LLONG_MAX\n#endif\n\n#if (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 1\n#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 2\n#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffffffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 4\n#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == UINT64_C(0xffffffffffffffff)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8\n#endif\n\n#endif // defined(__hpux)\n\n#endif\n\n#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)\n\n#include <wchar.h>\n#include <boost/cstdint.hpp>\n\n #if defined(_MSC_VER) && ( _MSC_VER <= 1310 || defined(UNDER_CE) && _MSC_VER <= 1500 )\n// MSVC 7.1 and MSVC 8 (arm) define WCHAR_MAX to a value not suitable for constant expressions\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2\n#elif (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1\n#elif (WCHAR_MAX + 0) == 0xffff || (WCHAR_MAX + 0) == 0x7fff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2\n#elif (WCHAR_MAX + 0) == 0xffffffff || (WCHAR_MAX + 0) == 0x7fffffff\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 4\n#elif (WCHAR_MAX + 0) == UINT64_C(0xffffffffffffffff) || (WCHAR_MAX + 0) == INT64_C(0x7fffffffffffffff)\n#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 8\n#endif\n#endif\n\n#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\\\n    !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\\\n    !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)\n#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers.\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/interlocked.hpp",
    "content": "#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP\n#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP\n\n//  Copyright (c) 2009 Helge Bahmann\n//  Copyright (c) 2012 - 2014 Andrey Semashev\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(_WIN32_WCE)\n\n#if _WIN32_WCE >= 0x600\n\nextern \"C\" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );\nextern \"C\" long __cdecl _InterlockedExchangeAdd( long volatile *, long );\nextern \"C\" long __cdecl _InterlockedExchange( long volatile *, long );\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))\n\n#else // _WIN32_WCE >= 0x600\n\nextern \"C\" long __cdecl InterlockedCompareExchange( long*, long, long );\nextern \"C\" long __cdecl InterlockedExchangeAdd( long*, long );\nextern \"C\" long __cdecl InterlockedExchange( long*, long );\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), exchange, compare)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))\n\n#endif // _WIN32_WCE >= 0x600\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))\n\n#elif defined(_MSC_VER) && _MSC_VER >= 1310\n\n#if _MSC_VER < 1400\n\nextern \"C\" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );\nextern \"C\" long __cdecl _InterlockedExchangeAdd( long volatile *, long );\nextern \"C\" long __cdecl _InterlockedExchange( long volatile *, long );\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange)\n#pragma intrinsic(_InterlockedExchangeAdd)\n#pragma intrinsic(_InterlockedExchange)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))\n\n#else // _MSC_VER < 1400\n\n#include <intrin.h>\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange)\n#pragma intrinsic(_InterlockedExchangeAdd)\n#pragma intrinsic(_InterlockedExchange)\n#pragma intrinsic(_InterlockedAnd)\n#pragma intrinsic(_InterlockedOr)\n#pragma intrinsic(_InterlockedXor)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))\n\n#if (defined(_M_IX86) && _M_IX86 >= 500) || defined(_M_AMD64) || defined(_M_IA64)\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange64)\n#endif\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#endif\n\n#if _MSC_VER >= 1500 && defined(_M_AMD64)\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange128)\n#endif\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) _InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))\n#endif\n\n#if _MSC_VER >= 1600\n\n// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.\n// Note that for each bit count these macros must be either all defined or all not defined.\n// Otherwise atomic<> operations will be implemented inconsistently.\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange8)\n#pragma intrinsic(_InterlockedExchangeAdd8)\n#pragma intrinsic(_InterlockedExchange8)\n#pragma intrinsic(_InterlockedAnd8)\n#pragma intrinsic(_InterlockedOr8)\n#pragma intrinsic(_InterlockedXor8)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange16)\n#pragma intrinsic(_InterlockedExchangeAdd16)\n#pragma intrinsic(_InterlockedExchange16)\n#pragma intrinsic(_InterlockedAnd16)\n#pragma intrinsic(_InterlockedOr16)\n#pragma intrinsic(_InterlockedXor16)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))\n\n#endif // _MSC_VER >= 1600\n\n#if defined(_M_AMD64) || defined(_M_IA64)\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedExchangeAdd64)\n#pragma intrinsic(_InterlockedExchange64)\n#pragma intrinsic(_InterlockedAnd64)\n#pragma intrinsic(_InterlockedOr64)\n#pragma intrinsic(_InterlockedXor64)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchangePointer)\n#pragma intrinsic(_InterlockedExchangePointer)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))\n\n#elif defined(_M_IX86)\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))\n\n#endif\n\n#if _MSC_VER >= 1700 && defined(_M_ARM)\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedExchangeAdd64)\n#pragma intrinsic(_InterlockedExchange64)\n#pragma intrinsic(_InterlockedAnd64)\n#pragma intrinsic(_InterlockedOr64)\n#pragma intrinsic(_InterlockedXor64)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedCompareExchange8_nf)\n#pragma intrinsic(_InterlockedCompareExchange8_acq)\n#pragma intrinsic(_InterlockedCompareExchange8_rel)\n#pragma intrinsic(_InterlockedCompareExchange16_nf)\n#pragma intrinsic(_InterlockedCompareExchange16_acq)\n#pragma intrinsic(_InterlockedCompareExchange16_rel)\n#pragma intrinsic(_InterlockedCompareExchange_nf)\n#pragma intrinsic(_InterlockedCompareExchange_acq)\n#pragma intrinsic(_InterlockedCompareExchange_rel)\n#pragma intrinsic(_InterlockedCompareExchange64)\n#pragma intrinsic(_InterlockedCompareExchange64_nf)\n#pragma intrinsic(_InterlockedCompareExchange64_acq)\n#pragma intrinsic(_InterlockedCompareExchange64_rel)\n#pragma intrinsic(_InterlockedCompareExchangePointer)\n#pragma intrinsic(_InterlockedCompareExchangePointer_nf)\n#pragma intrinsic(_InterlockedCompareExchangePointer_acq)\n#pragma intrinsic(_InterlockedCompareExchangePointer_rel)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) _InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) _InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) _InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) _InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) _InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) _InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) _InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) _InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) _InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) _InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedExchangeAdd8_nf)\n#pragma intrinsic(_InterlockedExchangeAdd8_acq)\n#pragma intrinsic(_InterlockedExchangeAdd8_rel)\n#pragma intrinsic(_InterlockedExchangeAdd16_nf)\n#pragma intrinsic(_InterlockedExchangeAdd16_acq)\n#pragma intrinsic(_InterlockedExchangeAdd16_rel)\n#pragma intrinsic(_InterlockedExchangeAdd_nf)\n#pragma intrinsic(_InterlockedExchangeAdd_acq)\n#pragma intrinsic(_InterlockedExchangeAdd_rel)\n#pragma intrinsic(_InterlockedExchangeAdd64_nf)\n#pragma intrinsic(_InterlockedExchangeAdd64_acq)\n#pragma intrinsic(_InterlockedExchangeAdd64_rel)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend))\n\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedExchange8_nf)\n#pragma intrinsic(_InterlockedExchange8_acq)\n#pragma intrinsic(_InterlockedExchange16_nf)\n#pragma intrinsic(_InterlockedExchange16_acq)\n#pragma intrinsic(_InterlockedExchange_nf)\n#pragma intrinsic(_InterlockedExchange_acq)\n#pragma intrinsic(_InterlockedExchange64_nf)\n#pragma intrinsic(_InterlockedExchange64_acq)\n#pragma intrinsic(_InterlockedExchangePointer)\n#pragma intrinsic(_InterlockedExchangePointer_nf)\n#pragma intrinsic(_InterlockedExchangePointer_acq)\n#if _MSC_VER >= 1800\n#pragma intrinsic(_InterlockedExchange8_rel)\n#pragma intrinsic(_InterlockedExchange16_rel)\n#pragma intrinsic(_InterlockedExchange_rel)\n#pragma intrinsic(_InterlockedExchange64_rel)\n#pragma intrinsic(_InterlockedExchangePointer_rel)\n#endif\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval))\n\n#if _MSC_VER >= 1800\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval))\n#else\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval)\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval)\n#endif\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedAnd8_nf)\n#pragma intrinsic(_InterlockedAnd8_acq)\n#pragma intrinsic(_InterlockedAnd8_rel)\n#pragma intrinsic(_InterlockedAnd16_nf)\n#pragma intrinsic(_InterlockedAnd16_acq)\n#pragma intrinsic(_InterlockedAnd16_rel)\n#pragma intrinsic(_InterlockedAnd_nf)\n#pragma intrinsic(_InterlockedAnd_acq)\n#pragma intrinsic(_InterlockedAnd_rel)\n#pragma intrinsic(_InterlockedAnd64_nf)\n#pragma intrinsic(_InterlockedAnd64_acq)\n#pragma intrinsic(_InterlockedAnd64_rel)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedOr8_nf)\n#pragma intrinsic(_InterlockedOr8_acq)\n#pragma intrinsic(_InterlockedOr8_rel)\n#pragma intrinsic(_InterlockedOr16_nf)\n#pragma intrinsic(_InterlockedOr16_acq)\n#pragma intrinsic(_InterlockedOr16_rel)\n#pragma intrinsic(_InterlockedOr_nf)\n#pragma intrinsic(_InterlockedOr_acq)\n#pragma intrinsic(_InterlockedOr_rel)\n#pragma intrinsic(_InterlockedOr64_nf)\n#pragma intrinsic(_InterlockedOr64_acq)\n#pragma intrinsic(_InterlockedOr64_rel)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg))\n\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_InterlockedXor8_nf)\n#pragma intrinsic(_InterlockedXor8_acq)\n#pragma intrinsic(_InterlockedXor8_rel)\n#pragma intrinsic(_InterlockedXor16_nf)\n#pragma intrinsic(_InterlockedXor16_acq)\n#pragma intrinsic(_InterlockedXor16_rel)\n#pragma intrinsic(_InterlockedXor_nf)\n#pragma intrinsic(_InterlockedXor_acq)\n#pragma intrinsic(_InterlockedXor_rel)\n#pragma intrinsic(_InterlockedXor64_nf)\n#pragma intrinsic(_InterlockedXor64_acq)\n#pragma intrinsic(_InterlockedXor64_rel)\n#endif\n\n#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))\n#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))\n\n#endif // _MSC_VER >= 1700 && defined(_M_ARM)\n\n#endif // _MSC_VER < 1400\n\n#else // defined(_MSC_VER) && _MSC_VER >= 1310\n\n#if defined(BOOST_USE_WINDOWS_H)\n\n#include <windows.h>\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))\n\n#if defined(_WIN64)\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))\n\n#else // defined(_WIN64)\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))\n\n#endif // defined(_WIN64)\n\n#else // defined(BOOST_USE_WINDOWS_H)\n\n#if defined(__MINGW64__)\n#define BOOST_ATOMIC_INTERLOCKED_IMPORT\n#else\n#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nextern \"C\" {\n\nBOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);\nBOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);\nBOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))\n\n#if defined(_WIN64)\n\nBOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);\nBOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);\nBOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);\n\nBOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);\nBOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))\n\n#else // defined(_WIN64)\n\n#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))\n#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))\n\n#endif // defined(_WIN64)\n\n} // extern \"C\"\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#undef BOOST_ATOMIC_INTERLOCKED_IMPORT\n\n#endif // defined(BOOST_USE_WINDOWS_H)\n\n#endif // defined(_MSC_VER)\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/link.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2012 Hartmut Kaiser\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/config.hpp\n *\n * This header defines macros for linking with compiled library of Boost.Atomic\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n///////////////////////////////////////////////////////////////////////////////\n//  Set up dll import/export options\n#if (defined(BOOST_ATOMIC_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && \\\n    !defined(BOOST_ATOMIC_STATIC_LINK)\n\n#if defined(BOOST_ATOMIC_SOURCE)\n#define BOOST_ATOMIC_DECL BOOST_SYMBOL_EXPORT\n#define BOOST_ATOMIC_BUILD_DLL\n#else\n#define BOOST_ATOMIC_DECL BOOST_SYMBOL_IMPORT\n#endif\n\n#endif // building a shared library\n\n#ifndef BOOST_ATOMIC_DECL\n#define BOOST_ATOMIC_DECL\n#endif\n\n///////////////////////////////////////////////////////////////////////////////\n//  Auto library naming\n#if !defined(BOOST_ATOMIC_SOURCE) && !defined(BOOST_ALL_NO_LIB) && \\\n    !defined(BOOST_ATOMIC_NO_LIB)\n\n#define BOOST_LIB_NAME boost_atomic\n\n// tell the auto-link code to select a dll when required:\n#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_ATOMIC_DYN_LINK)\n#define BOOST_DYN_LINK\n#endif\n\n#include <boost/config/auto_link.hpp>\n\n#endif  // auto-linking disabled\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/lockpool.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013-2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/lockpool.hpp\n *\n * This header contains declaration of the lockpool used to emulate atomic ops.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/link.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nstruct lockpool\n{\n    class scoped_lock\n    {\n        void* m_lock;\n\n    public:\n        explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr) BOOST_NOEXCEPT;\n        BOOST_ATOMIC_DECL ~scoped_lock() BOOST_NOEXCEPT;\n\n        BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))\n        BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))\n    };\n\n    static BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;\n    static BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;\n};\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/operations.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/operations.hpp\n *\n * This header defines atomic operations, including the emulated version.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_\n\n#include <boost/atomic/detail/operations_lockfree.hpp>\n#include <boost/atomic/detail/ops_emulated.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/operations_fwd.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/operations_fwd.hpp\n *\n * This header contains forward declaration of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_\n\n#include <cstddef>\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< std::size_t Size, bool Signed >\nstruct operations;\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/operations_lockfree.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/operations_lockfree.hpp\n *\n * This header defines lockfree atomic operations.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/platform.hpp>\n\n#if !defined(BOOST_ATOMIC_EMULATED)\n#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_)\n#else\n#include <boost/atomic/detail/operations_fwd.hpp>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_cas_based.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_cas_based.hpp\n *\n * This header contains CAS-based implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< typename Base >\nstruct cas_based_exchange :\n    public Base\n{\n    typedef typename Base::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n};\n\ntemplate< typename Base >\nstruct cas_based_operations :\n    public Base\n{\n    typedef typename Base::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!Base::exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        Base::store(storage, (storage_type)0, order);\n    }\n};\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_emulated.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_emulated.hpp\n *\n * This header contains lockpool-based implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_\n\n#include <cstddef>\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/detail/lockpool.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< typename T >\nstruct emulated_operations\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        lockpool::scoped_lock lock(&storage);\n        const_cast< storage_type& >(storage) = v;\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT\n    {\n        lockpool::scoped_lock lock(&storage);\n        return const_cast< storage_type const& >(storage);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s += v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s -= v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s = v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        const bool res = old_val == expected;\n        if (res)\n            s = desired;\n        expected = old_val;\n\n        return res;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        // Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call\n        // is that MSVC-12 ICEs in this case.\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        const bool res = old_val == expected;\n        if (res)\n            s = desired;\n        expected = old_val;\n\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s &= v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s |= v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type& s = const_cast< storage_type& >(storage);\n        lockpool::scoped_lock lock(&storage);\n        storage_type old_val = s;\n        s ^= v;\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return false;\n    }\n};\n\ntemplate< std::size_t Size, bool Signed >\nstruct operations :\n    public emulated_operations< typename make_storage_type< Size, Signed >::type >\n{\n    typedef typename make_storage_type< Size, Signed >::aligned aligned_storage_type;\n};\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_extending_cas_based.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_extending_cas_based.hpp\n *\n * This header contains a boilerplate of the \\c operations template implementation that requires sign/zero extension in arithmetic operations.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_\n\n#include <cstddef>\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< typename Base, std::size_t Size, bool Signed >\nstruct extending_cas_based_operations :\n    public Base\n{\n    typedef typename Base::storage_type storage_type;\n    typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        emulated_storage_type new_val;\n        do\n        {\n            new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);\n        }\n        while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));\n        return old_val;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type old_val;\n        atomics::detail::non_atomic_load(storage, old_val);\n        emulated_storage_type new_val;\n        do\n        {\n            new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);\n        }\n        while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));\n        return old_val;\n    }\n};\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_alpha.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_alpha.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n/*\n  Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html\n  (HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.\n */\n\n/*\n    NB: The most natural thing would be to write the increment/decrement\n    operators along the following lines:\n\n    __asm__ __volatile__\n    (\n        \"1: ldl_l %0,%1 \\n\"\n        \"addl %0,1,%0 \\n\"\n        \"stl_c %0,%1 \\n\"\n        \"beq %0,1b\\n\"\n        : \"=&b\" (tmp)\n        : \"m\" (value)\n        : \"cc\"\n    );\n\n    However according to the comments on the HP website and matching\n    comments in the Linux kernel sources this defies branch prediction,\n    as the cpu assumes that backward branches are always taken; so\n    instead copy the trick from the Linux kernel, introduce a forward\n    branch and back again.\n\n    I have, however, had a hard time measuring the difference between\n    the two versions in microbenchmarks -- I am leaving it in nevertheless\n    as it apparently does not hurt either.\n*/\n\nstruct gcc_alpha_operations_base\n{\n    static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            __asm__ __volatile__ (\"mb\" ::: \"memory\");\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            __asm__ __volatile__ (\"mb\" ::: \"memory\");\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"mb\" ::: \"memory\");\n    }\n};\n\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public gcc_alpha_operations_base\n{\n    typedef typename make_storage_type< 4u, Signed >::type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"mov %3, %1\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (tmp)        // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        int success;\n        storage_type current;\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %2, %4\\n\"                // current = *(&storage)\n            \"cmpeq %2, %0, %3\\n\"            // success = current == expected\n            \"mov %2, %0\\n\"                  // expected = current\n            \"beq %3, 2f\\n\"                  // if (success == 0) goto end\n            \"stl_c %1, %4\\n\"                // storage = desired; desired = store succeeded\n            \"mov %1, %3\\n\"                  // success = desired\n            \"2:\\n\"\n            : \"+&r\" (expected),  // %0\n              \"+&r\" (desired),   // %1\n              \"=&r\" (current),   // %2\n              \"=&r\" (success)    // %3\n            : \"m\" (storage)      // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        storage_type current, tmp;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"mov %5, %1\\n\"                  // tmp = desired\n            \"ldl_l %2, %4\\n\"                // current = *(&storage)\n            \"cmpeq %2, %0, %3\\n\"            // success = current == expected\n            \"mov %2, %0\\n\"                  // expected = current\n            \"beq %3, 2f\\n\"                  // if (success == 0) goto end\n            \"stl_c %1, %4\\n\"                // storage = tmp; tmp = store succeeded\n            \"beq %1, 3f\\n\"                  // if (tmp == 0) goto retry\n            \"mov %1, %3\\n\"                  // success = tmp\n            \"2:\\n\"\n\n            \".subsection 2\\n\"\n            \"3: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"+&r\" (expected),  // %0\n              \"=&r\" (tmp),       // %1\n              \"=&r\" (current),   // %2\n              \"=&r\" (success)    // %3\n            : \"m\" (storage),     // %4\n              \"r\" (desired)      // %5\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"addl %0, %3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"subl %0, %3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"and %0, %3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"bis %0, %3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"xor %0, %3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n\ntemplate< >\nstruct operations< 1u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"addl %0, %3, %1\\n\"\n            \"zapnot %1, #1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"subl %0, %3, %1\\n\"\n            \"zapnot %1, #1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 1u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"addl %0, %3, %1\\n\"\n            \"sextb %1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"subl %0, %3, %1\\n\"\n            \"sextb %1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\ntemplate< >\nstruct operations< 2u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"addl %0, %3, %1\\n\"\n            \"zapnot %1, #3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"subl %0, %3, %1\\n\"\n            \"zapnot %1, #3, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 2u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"addl %0, %3, %1\\n\"\n            \"sextw %1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldl_l %0, %2\\n\"\n            \"subl %0, %3, %1\\n\"\n            \"sextw %1, %1\\n\"\n            \"stl_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public gcc_alpha_operations_base\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"mov %3, %1\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (tmp)        // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        int success;\n        storage_type current;\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %2, %4\\n\"                // current = *(&storage)\n            \"cmpeq %2, %0, %3\\n\"            // success = current == expected\n            \"mov %2, %0\\n\"                  // expected = current\n            \"beq %3, 2f\\n\"                  // if (success == 0) goto end\n            \"stq_c %1, %4\\n\"                // storage = desired; desired = store succeeded\n            \"mov %1, %3\\n\"                  // success = desired\n            \"2:\\n\"\n            : \"+&r\" (expected),  // %0\n              \"+&r\" (desired),   // %1\n              \"=&r\" (current),   // %2\n              \"=&r\" (success)    // %3\n            : \"m\" (storage)      // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        storage_type current, tmp;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"mov %5, %1\\n\"                  // tmp = desired\n            \"ldq_l %2, %4\\n\"                // current = *(&storage)\n            \"cmpeq %2, %0, %3\\n\"            // success = current == expected\n            \"mov %2, %0\\n\"                  // expected = current\n            \"beq %3, 2f\\n\"                  // if (success == 0) goto end\n            \"stq_c %1, %4\\n\"                // storage = tmp; tmp = store succeeded\n            \"beq %1, 3f\\n\"                  // if (tmp == 0) goto retry\n            \"mov %1, %3\\n\"                  // success = tmp\n            \"2:\\n\"\n\n            \".subsection 2\\n\"\n            \"3: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"+&r\" (expected),  // %0\n              \"=&r\" (tmp),       // %1\n              \"=&r\" (current),   // %2\n              \"=&r\" (success)    // %3\n            : \"m\" (storage),     // %4\n              \"r\" (desired)      // %5\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"addq %0, %3, %1\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"subq %0, %3, %1\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"and %0, %3, %1\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"bis %0, %3, %1\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, modified;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\"\n            \"ldq_l %0, %2\\n\"\n            \"xor %0, %3, %1\\n\"\n            \"stq_c %1, %2\\n\"\n            \"beq %1, 2f\\n\"\n\n            \".subsection 2\\n\"\n            \"2: br 1b\\n\"\n            \".previous\\n\"\n\n            : \"=&r\" (original),  // %0\n              \"=&r\" (modified)   // %1\n            : \"m\" (storage),     // %2\n              \"r\" (v)            // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"mb\" ::: \"memory\");\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_arm.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_\n\n#include <boost/cstdint.hpp>\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n// From the ARM Architecture Reference Manual for architecture v6:\n//\n// LDREX{<cond>} <Rd>, [<Rn>]\n// <Rd> Specifies the destination register for the memory word addressed by <Rd>\n// <Rn> Specifies the register containing the address.\n//\n// STREX{<cond>} <Rd>, <Rm>, [<Rn>]\n// <Rd> Specifies the destination register for the returned status value.\n//      0  if the operation updates memory\n//      1  if the operation fails to update memory\n// <Rm> Specifies the register containing the word to be stored to memory.\n// <Rn> Specifies the register containing the address.\n// Rd must not be the same register as Rm or Rn.\n//\n// ARM v7 is like ARM v6 plus:\n// There are half-word and byte versions of the LDREX and STREX instructions,\n// LDREXH, LDREXB, STREXH and STREXB.\n// There are also double-word versions, LDREXD and STREXD.\n// (Actually it looks like these are available from version 6k onwards.)\n// FIXME these are not yet used; should be mostly a matter of copy-and-paste.\n// I think you can supply an immediate offset to the address.\n//\n// A memory barrier is effected using a \"co-processor 15\" instruction,\n// though a separate assembler mnemonic is available for it in v7.\n//\n// \"Thumb 1\" is a subset of the ARM instruction set that uses a 16-bit encoding.  It\n// doesn't include all instructions and in particular it doesn't include the co-processor\n// instruction used for the memory barrier or the load-locked/store-conditional\n// instructions.  So, if we're compiling in \"Thumb 1\" mode, we need to wrap all of our\n// asm blocks with code to temporarily change to ARM mode.\n//\n// You can only change between ARM and Thumb modes when branching using the bx instruction.\n// bx takes an address specified in a register.  The least significant bit of the address\n// indicates the mode, so 1 is added to indicate that the destination code is Thumb.\n// A temporary register is needed for the address and is passed as an argument to these\n// macros.  It must be one of the \"low\" registers accessible to Thumb code, specified\n// using the \"l\" attribute in the asm statement.\n//\n// Architecture v7 introduces \"Thumb 2\", which does include (almost?) all of the ARM\n// instruction set.  (Actually, there was an extension of v6 called v6T2 which supported\n// \"Thumb 2\" mode, but its architecture manual is no longer available, referring to v7.)\n// So in v7 we don't need to change to ARM mode; we can write \"universal\n// assembler\" which will assemble to Thumb 2 or ARM code as appropriate.  The only thing\n// we need to do to make this \"universal\" assembler mode work is to insert \"IT\" instructions\n// to annotate the conditional instructions.  These are ignored in other modes (e.g. v6),\n// so they can always be present.\n\n// A note about memory_order_consume. Technically, this architecture allows to avoid\n// unnecessary memory barrier after consume load since it supports data dependency ordering.\n// However, some compiler optimizations may break a seemingly valid code relying on data\n// dependency tracking by injecting bogus branches to aid out of order execution.\n// This may happen not only in Boost.Atomic code but also in user's code, which we have no\n// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.\n// For this reason we promote memory_order_consume to memory_order_acquire.\n\n#if defined(__thumb__) && !defined(__thumb2__)\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) \"adr \" #TMPREG \", 8f\\n\" \"bx \" #TMPREG \"\\n\" \".arm\\n\" \".align 4\\n\" \"8:\\n\"\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)   \"adr \" #TMPREG \", 9f + 1\\n\" \"bx \" #TMPREG \"\\n\" \".thumb\\n\" \".align 2\\n\" \"9:\\n\"\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) \"=&l\" (var)\n#else\n// The tmpreg may be wasted in this case, which is non-optimal.\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)\n#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) \"=&r\" (var)\n#endif\n\nstruct gcc_arm_operations_base\n{\n    static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT\n    {\n#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)\n        // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the \"dmb\" instruction such as \"ish\" or \"#11\".\n        // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.\n        // Since we cannot detect binutils version at compile time, we'll have to always use this hack.\n        __asm__ __volatile__\n        (\n#if defined(__thumb2__)\n            \".short 0xF3BF, 0x8F5B\\n\" // dmb ish\n#else\n            \".word 0xF57FF05B\\n\" // dmb ish\n#endif\n            :\n            :\n            : \"memory\"\n        );\n#else\n        int tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"mcr\\tp15, 0, r0, c7, c10, 5\\n\"\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : \"=&l\" (tmp)\n            :\n            : \"memory\"\n        );\n#endif\n    }\n};\n\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public gcc_arm_operations_base\n{\n    typedef typename make_storage_type< 4u, Signed >::type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original;\n        fence_before(order);\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex %[original], %[storage]\\n\"          // load the original value\n            \"strex %[tmp], %[value], %[storage]\\n\"     // store the replacement, tmp = store failed\n            \"teq   %[tmp], #0\\n\"                       // check if store succeeded\n            \"bne   1b\\n\"\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [tmp] \"=&l\" (tmp), [original] \"=&r\" (original), [storage] \"+Q\" (storage)\n            : [value] \"r\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        uint32_t success;\n        uint32_t tmp;\n        storage_type original;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"mov     %[success], #0\\n\"                      // success = 0\n            \"ldrex   %[original], %[storage]\\n\"             // original = *(&storage)\n            \"cmp     %[original], %[expected]\\n\"            // flags = original==expected\n            \"itt     eq\\n\"                                  // [hint that the following 2 instructions are conditional on flags.equal]\n            \"strexeq %[success], %[desired], %[storage]\\n\"  // if (flags.equal) *(&storage) = desired, success = store failed\n            \"eoreq   %[success], %[success], #1\\n\"          // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [success] \"=&r\" (success),    // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [expected] \"r\" (expected),    // %4\n              [desired] \"r\" (desired)       // %5\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = original;\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        uint32_t success;\n        uint32_t tmp;\n        storage_type original;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"mov     %[success], #0\\n\"                      // success = 0\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"             // original = *(&storage)\n            \"cmp     %[original], %[expected]\\n\"            // flags = original==expected\n            \"bne     2f\\n\"                                  // if (!flags.equal) goto end\n            \"strex   %[success], %[desired], %[storage]\\n\"  // *(&storage) = desired, success = store failed\n            \"eors    %[success], %[success], #1\\n\"          // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0\n            \"beq     1b\\n\"                                  // if (flags.equal) goto retry\n            \"2:\\n\"\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [success] \"=&r\" (success),    // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [expected] \"r\" (expected),    // %4\n              [desired] \"r\" (desired)       // %5\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = original;\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"add     %[result], %[original], %[value]\\n\"  // result = original + value\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"sub     %[result], %[original], %[value]\\n\"  // result = original - value\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"and     %[result], %[original], %[value]\\n\"  // result = original & value\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"orr     %[result], %[original], %[value]\\n\"  // result = original | value\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"eor     %[result], %[original], %[value]\\n\"  // result = original ^ value\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n\ntemplate< >\nstruct operations< 1u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"add     %[result], %[original], %[value]\\n\"  // result = original + value\n            \"uxtb    %[result], %[result]\\n\"              // zero extend result from 8 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"sub     %[result], %[original], %[value]\\n\"  // result = original - value\n            \"uxtb    %[result], %[result]\\n\"              // zero extend result from 8 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 1u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"add     %[result], %[original], %[value]\\n\"  // result = original + value\n            \"sxtb    %[result], %[result]\\n\"              // sign extend result from 8 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"sub     %[result], %[original], %[value]\\n\"  // result = original - value\n            \"sxtb    %[result], %[result]\\n\"              // sign extend result from 8 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\ntemplate< >\nstruct operations< 2u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"add     %[result], %[original], %[value]\\n\"  // result = original + value\n            \"uxth    %[result], %[result]\\n\"              // zero extend result from 16 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"sub     %[result], %[original], %[value]\\n\"  // result = original - value\n            \"uxth    %[result], %[result]\\n\"              // zero extend result from 16 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 2u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"add     %[result], %[original], %[value]\\n\"  // result = original + value\n            \"sxth    %[result], %[result]\\n\"              // sign extend result from 16 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        uint32_t tmp;\n        storage_type original, result;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])\n            \"1:\\n\"\n            \"ldrex   %[original], %[storage]\\n\"           // original = *(&storage)\n            \"sub     %[result], %[original], %[value]\\n\"  // result = original - value\n            \"sxth    %[result], %[result]\\n\"              // sign extend result from 16 to 32 bits\n            \"strex   %[tmp], %[result], %[storage]\\n\"     // *(&storage) = result, tmp = store failed\n            \"teq     %[tmp], #0\\n\"                        // flags = tmp==0\n            \"bne     1b\\n\"                                // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])\n            : [original] \"=&r\" (original),  // %0\n              [result] \"=&r\" (result),      // %1\n              [tmp] \"=&l\" (tmp),            // %2\n              [storage] \"+Q\" (storage)      // %3\n            : [value] \"r\" (v)               // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\n#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)\n\n// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd.\n// Any other instructions result in a non-atomic sequence of 32-bit accesses.\n// See \"ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition\",\n// Section A3.5.3 \"Atomicity in the ARM architecture\".\n\n// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values.\n// In order to pass the 64-bit operands to/from asm blocks, we use undocumented gcc feature:\n// the lower half (Rt) of the operand is accessible normally, via the numbered placeholder (e.g. %0),\n// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).\n// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public gcc_arm_operations_base\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        exchange(storage, v, order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"ldrexd %1, %H1, [%2]\\n\"\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original)   // %1\n            : \"r\" (&storage)     // %2\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original;\n        fence_before(order);\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd %1, %H1, [%3]\\n\"        // load the original value\n            \"strexd %0, %2, %H2, [%3]\\n\"    // store the replacement, tmp = store failed\n            \"teq    %0, #0\\n\"               // check if store succeeded\n            \"bne    1b\\n\"\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original)   // %1\n            : \"r\" (v),           // %2\n              \"r\" (&storage)     // %3\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        uint32_t tmp;\n        storage_type original, old_val = expected;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"ldrexd   %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"cmp      %1, %2\\n\"                      // flags = original.lo==old_val.lo\n            \"ittt     eq\\n\"                          // [hint that the following 3 instructions are conditional on flags.equal]\n            \"cmpeq    %H1, %H2\\n\"                    // if (flags.equal) flags = original.hi==old_val.hi\n            \"strexdeq %0, %4, %H4, [%3]\\n\"           // if (flags.equal) *(&storage) = desired, tmp = store failed\n            \"teqeq    %0, #0\\n\"                      // if (flags.equal) flags = tmp==0\n            \"ite      eq\\n\"                          // [hint that the following 2 instructions are conditional on flags.equal]\n            \"moveq    %2, #1\\n\"                      // if (flags.equal) old_val.lo = 1\n            \"movne    %2, #0\\n\"                      // if (!flags.equal) old_val.lo = 0\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"+r\" (old_val)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (desired)      // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        const uint32_t success = (uint32_t)old_val;\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = original;\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        uint32_t tmp;\n        storage_type original, old_val = expected;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"cmp     %1, %2\\n\"                      // flags = original.lo==old_val.lo\n            \"it      eq\\n\"                          // [hint that the following instruction is conditional on flags.equal]\n            \"cmpeq   %H1, %H2\\n\"                    // if (flags.equal) flags = original.hi==old_val.hi\n            \"bne     2f\\n\"                          // if (!flags.equal) goto end\n            \"strexd  %0, %4, %H4, [%3]\\n\"           // *(&storage) = desired, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags.equal = tmp == 0\n            \"bne     1b\\n\"                          // if (flags.equal) goto retry\n            \"2:\\n\"\n            \"ite      eq\\n\"                         // [hint that the following 2 instructions are conditional on flags.equal]\n            \"moveq    %2, #1\\n\"                     // if (flags.equal) old_val.lo = 1\n            \"movne    %2, #0\\n\"                     // if (!flags.equal) old_val.lo = 0\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"+r\" (old_val)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (desired)      // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        const uint32_t success = (uint32_t)old_val;\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = original;\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage_type original, result;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"adds    %2, %1, %4\\n\"                  // result = original + value\n            \"adc     %H2, %H1, %H4\\n\"\n            \"strexd  %0, %2, %H2, [%3]\\n\"           // *(&storage) = result, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags = tmp==0\n            \"bne     1b\\n\"                          // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"=&r\" (result)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (v)            // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage_type original, result;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"subs    %2, %1, %4\\n\"                  // result = original - value\n            \"sbc     %H2, %H1, %H4\\n\"\n            \"strexd  %0, %2, %H2, [%3]\\n\"           // *(&storage) = result, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags = tmp==0\n            \"bne     1b\\n\"                          // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"=&r\" (result)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (v)            // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage_type original, result;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"and     %2, %1, %4\\n\"                  // result = original & value\n            \"and     %H2, %H1, %H4\\n\"\n            \"strexd  %0, %2, %H2, [%3]\\n\"           // *(&storage) = result, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags = tmp==0\n            \"bne     1b\\n\"                          // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"=&r\" (result)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (v)            // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage_type original, result;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"orr     %2, %1, %4\\n\"                  // result = original | value\n            \"orr     %H2, %H1, %H4\\n\"\n            \"strexd  %0, %2, %H2, [%3]\\n\"           // *(&storage) = result, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags = tmp==0\n            \"bne     1b\\n\"                          // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"=&r\" (result)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (v)            // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        storage_type original, result;\n        uint32_t tmp;\n        __asm__ __volatile__\n        (\n            BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)\n            \"1:\\n\"\n            \"ldrexd  %1, %H1, [%3]\\n\"               // original = *(&storage)\n            \"eor     %2, %1, %4\\n\"                  // result = original ^ value\n            \"eor     %H2, %H1, %H4\\n\"\n            \"strexd  %0, %2, %H2, [%3]\\n\"           // *(&storage) = result, tmp = store failed\n            \"teq     %0, #0\\n\"                      // flags = tmp==0\n            \"bne     1b\\n\"                          // if (!flags.equal) goto retry\n            BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)\n            : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0\n              \"=&r\" (original),  // %1\n              \"=&r\" (result)     // %2\n            : \"r\" (&storage),    // %3\n              \"r\" (v)            // %4\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)\n\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        gcc_arm_operations_base::hardware_full_fence();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_atomic.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_atomic.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))\n#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>\n#include <boost/atomic/detail/ops_cas_based.hpp>\n#endif\n\n#if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\\\n    __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\\\n    __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\\\n    __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE\n// There are platforms where we need to use larger storage types\n#include <boost/atomic/detail/int_sizes.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__INTEL_COMPILER)\n// This is used to suppress warning #32013 described below for Intel Compiler.\n// In debug builds the compiler does not inline any functions, so basically\n// every atomic function call results in this warning. I don't know any other\n// way to selectively disable just this one warning.\n#pragma system_header\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n/*!\n * The function converts \\c boost::memory_order values to the compiler-specific constants.\n *\n * NOTE: The intention is that the function is optimized away by the compiler, and the\n *       compiler-specific constants are passed to the intrinsics. I know constexpr doesn't\n *       work in this case because the standard atomics interface require memory ordering\n *       constants to be passed as function arguments, at which point they stop being constexpr.\n *       However it is crucial that the compiler sees constants and not runtime values,\n *       because otherwise it just ignores the ordering value and always uses seq_cst.\n *       This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and\n *       gcc 4.8.2. Intel Compiler issues a warning in this case:\n *\n *       warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.\n *\n *       while gcc acts silently.\n *\n *       To mitigate the problem ALL functions, including the atomic<> members must be\n *       declared with BOOST_FORCEINLINE. In this case the compilers are able to see that\n *       all functions are called with constant orderings and call intrinstcts properly.\n *\n *       Unfortunately, this still doesn't work in debug mode as the compiler doesn't\n *       inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic\n *       operaions will be executed with seq_cst semantics.\n */\nBOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT\n{\n    return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :\n        (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :\n        (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));\n}\n\ntemplate< typename T >\nstruct gcc_atomic_operations\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return __atomic_compare_exchange_n\n        (\n            &storage, &expected, desired, false,\n            atomics::detail::convert_memory_order_to_gcc(success_order),\n            atomics::detail::convert_memory_order_to_gcc(failure_order)\n        );\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return __atomic_compare_exchange_n\n        (\n            &storage, &expected, desired, true,\n            atomics::detail::convert_memory_order_to_gcc(success_order),\n            atomics::detail::convert_memory_order_to_gcc(failure_order)\n        );\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile& storage) BOOST_NOEXCEPT\n    {\n        return __atomic_is_lock_free(sizeof(storage_type), &storage);\n    }\n};\n\n#if BOOST_ATOMIC_INT128_LOCK_FREE > 0\n#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\n// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149\n// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16\ntemplate< bool Signed >\nstruct operations< 16u, Signed > :\n    public cas_based_operations< gcc_dcas_x86_64< Signed > >\n{\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 16u, Signed > :\n    public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n#endif\n\n\n#if BOOST_ATOMIC_INT64_LOCK_FREE > 0\n#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n\n// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public cas_based_operations< gcc_dcas_x86< Signed > >\n{\n};\n\n#elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)\n\n#define BOOST_ATOMIC_DETAIL_INT64_EXTENDED\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >\n{\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n#endif\n\n#if BOOST_ATOMIC_INT32_LOCK_FREE > 0\n#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)\n\n#define BOOST_ATOMIC_DETAIL_INT32_EXTENDED\n\n#if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >\n{\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n};\n\n#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n\n#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >\n{\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n#endif\n\n#if BOOST_ATOMIC_INT16_LOCK_FREE > 0\n#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)\n\n#define BOOST_ATOMIC_DETAIL_INT16_EXTENDED\n\n#if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >\n{\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n};\n\n#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >\n{\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >\n{\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n#endif\n\n#if BOOST_ATOMIC_INT8_LOCK_FREE > 0\n#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\\\n    (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\\\n    (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\\\n    (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)\n\n#if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >\n{\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n};\n\n#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >\n{\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n};\n\n#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >\n{\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >\n{\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n};\n\n#endif\n#endif\n\n#undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED\n#undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED\n#undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_ppc.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_ppc.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n// The implementation below uses information from this document:\n// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html\n\n/*\n    Refer to: Motorola: \"Programming Environments Manual for 32-Bit\n    Implementations of the PowerPC Architecture\", Appendix E:\n    \"Synchronization Programming Examples\" for an explanation of what is\n    going on here (can be found on the web at various places by the\n    name \"MPCFPE32B.pdf\", Google is your friend...)\n\n    Most of the atomic operations map to instructions in a relatively\n    straight-forward fashion, but \"load\"s may at first glance appear\n    a bit strange as they map to:\n\n            lwz %rX, addr\n            cmpw %rX, %rX\n            bne- 1f\n        1:\n\n    That is, the CPU is forced to perform a branch that \"formally\" depends\n    on the value retrieved from memory. This scheme has an overhead of\n    about 1-2 clock cycles per load, but it allows to map \"acquire\" to\n    the \"isync\" instruction instead of \"sync\" uniformly and for all type\n    of atomic operations. Since \"isync\" has a cost of about 15 clock\n    cycles, while \"sync\" hast a cost of about 50 clock cycles, the small\n    penalty to atomic loads more than compensates for this.\n\n    Byte- and halfword-sized atomic values are realized by encoding the\n    value to be represented into a word, performing sign/zero extension\n    as appropriate. This means that after add/sub operations the value\n    needs fixing up to accurately preserve the wrap-around semantic of\n    the smaller type. (Nothing special needs to be done for the bit-wise\n    and the \"exchange type\" operators as the compiler already sees to\n    it that values carried in registers are extended appropriately and\n    everything falls into place naturally).\n\n    The register constraint \"b\"  instructs gcc to use any register\n    except r0; this is sometimes required because the encoding for\n    r0 is used to signify \"constant zero\" in a number of instructions,\n    making r0 unusable in this place. For simplicity this constraint\n    is used everywhere since I am to lazy to look this up on a\n    per-instruction basis, and ppc has enough registers for this not\n    to pose a problem.\n*/\n\n// A note about memory_order_consume. Technically, this architecture allows to avoid\n// unnecessary memory barrier after consume load since it supports data dependency ordering.\n// However, some compiler optimizations may break a seemingly valid code relying on data\n// dependency tracking by injecting bogus branches to aid out of order execution.\n// This may happen not only in Boost.Atomic code but also in user's code, which we have no\n// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.\n// For this reason we promote memory_order_consume to memory_order_acquire.\n\nstruct gcc_ppc_operations_base\n{\n    static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(__powerpc64__) || defined(__PPC64__)\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"sync\" ::: \"memory\");\n        else if ((order & memory_order_release) != 0)\n            __asm__ __volatile__ (\"lwsync\" ::: \"memory\");\n#else\n        if ((order & memory_order_release) != 0)\n            __asm__ __volatile__ (\"sync\" ::: \"memory\");\n#endif\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            __asm__ __volatile__ (\"isync\" ::: \"memory\");\n    }\n};\n\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public gcc_ppc_operations_base\n{\n    typedef typename make_storage_type< 4u, Signed >::type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"stw %1, %0\\n\\t\"\n            : \"+m\" (storage)\n            : \"r\" (v)\n        );\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v;\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"sync\" ::: \"memory\");\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n        {\n            __asm__ __volatile__\n            (\n                \"lwz %0, %1\\n\\t\"\n                \"cmpw %0, %0\\n\\t\"\n                \"bne- 1f\\n\\t\"\n                \"1:\\n\\t\"\n                \"isync\\n\\t\"\n                : \"=&r\" (v)\n                : \"m\" (storage)\n                : \"cr0\", \"memory\"\n            );\n        }\n        else\n        {\n            __asm__ __volatile__\n            (\n                \"lwz %0, %1\\n\\t\"\n                : \"=&r\" (v)\n                : \"m\" (storage)\n            );\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y1\\n\\t\"\n            \"stwcx. %2,%y1\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"+Z\" (storage)\n            : \"b\" (v)\n            : \"cr0\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"li %1, 0\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"cmpw %0, %3\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"stwcx. %4,%y2\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"li %1, 1\\n\\t\"\n            \"1:\\n\\t\"\n            : \"=&b\" (expected), \"=&b\" (success), \"+Z\" (storage)\n            : \"b\" (expected), \"b\" (desired)\n            : \"cr0\"\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"li %1, 0\\n\\t\"\n            \"0: lwarx %0,%y2\\n\\t\"\n            \"cmpw %0, %3\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"stwcx. %4,%y2\\n\\t\"\n            \"bne- 0b\\n\\t\"\n            \"li %1, 1\\n\\t\"\n            \"1:\\n\\t\"\n            : \"=&b\" (expected), \"=&b\" (success), \"+Z\" (storage)\n            : \"b\" (expected), \"b\" (desired)\n            : \"cr0\"\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"and %1,%0,%3\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"or %1,%0,%3\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"xor %1,%0,%3\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n\ntemplate< >\nstruct operations< 1u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"rlwinm %1, %1, 0, 0xff\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"rlwinm %1, %1, 0, 0xff\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 1u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"extsb %1, %1\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"extsb %1, %1\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\ntemplate< >\nstruct operations< 2u, false > :\n    public operations< 4u, false >\n{\n    typedef operations< 4u, false > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"rlwinm %1, %1, 0, 0xffff\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"rlwinm %1, %1, 0, 0xffff\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\ntemplate< >\nstruct operations< 2u, true > :\n    public operations< 4u, true >\n{\n    typedef operations< 4u, true > base_type;\n    typedef base_type::storage_type storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"extsh %1, %1\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"lwarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"extsh %1, %1\\n\\t\"\n            \"stwcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n};\n\n\n#if defined(__powerpc64__) || defined(__PPC64__)\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public gcc_ppc_operations_base\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"std %1, %0\\n\\t\"\n            : \"+m\" (storage)\n            : \"r\" (v)\n        );\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v;\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"sync\" ::: \"memory\");\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n        {\n            __asm__ __volatile__\n            (\n                \"ld %0, %1\\n\\t\"\n                \"cmpd %0, %0\\n\\t\"\n                \"bne- 1f\\n\\t\"\n                \"1:\\n\\t\"\n                \"isync\\n\\t\"\n                : \"=&b\" (v)\n                : \"m\" (storage)\n                : \"cr0\", \"memory\"\n            );\n        }\n        else\n        {\n            __asm__ __volatile__\n            (\n                \"ld %0, %1\\n\\t\"\n                : \"=&b\" (v)\n                : \"m\" (storage)\n            );\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y1\\n\\t\"\n            \"stdcx. %2,%y1\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"+Z\" (storage)\n            : \"b\" (v)\n            : \"cr0\"\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"li %1, 0\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"cmpd %0, %3\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"stdcx. %4,%y2\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"li %1, 1\\n\\t\"\n            \"1:\"\n            : \"=&b\" (expected), \"=&b\" (success), \"+Z\" (storage)\n            : \"b\" (expected), \"b\" (desired)\n            : \"cr0\"\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        int success;\n        fence_before(success_order);\n        __asm__ __volatile__\n        (\n            \"li %1, 0\\n\\t\"\n            \"0: ldarx %0,%y2\\n\\t\"\n            \"cmpd %0, %3\\n\\t\"\n            \"bne- 1f\\n\\t\"\n            \"stdcx. %4,%y2\\n\\t\"\n            \"bne- 0b\\n\\t\"\n            \"li %1, 1\\n\\t\"\n            \"1:\\n\\t\"\n            : \"=&b\" (expected), \"=&b\" (success), \"+Z\" (storage)\n            : \"b\" (expected), \"b\" (desired)\n            : \"cr0\"\n        );\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        return !!success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"add %1,%0,%3\\n\\t\"\n            \"stdcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"sub %1,%0,%3\\n\\t\"\n            \"stdcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"and %1,%0,%3\\n\\t\"\n            \"stdcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"or %1,%0,%3\\n\\t\"\n            \"stdcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type original, tmp;\n        fence_before(order);\n        __asm__ __volatile__\n        (\n            \"1:\\n\\t\"\n            \"ldarx %0,%y2\\n\\t\"\n            \"xor %1,%0,%3\\n\\t\"\n            \"stdcx. %1,%y2\\n\\t\"\n            \"bne- 1b\\n\\t\"\n            : \"=&b\" (original), \"=&b\" (tmp), \"+Z\" (storage)\n            : \"b\" (v)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC\n        );\n        fence_after(order);\n        return original;\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, 0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n#endif // defined(__powerpc64__) || defined(__PPC64__)\n\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    switch (order)\n    {\n    case memory_order_consume:\n    case memory_order_acquire:\n    case memory_order_release:\n    case memory_order_acq_rel:\n#if defined(__powerpc64__) || defined(__PPC64__)\n        __asm__ __volatile__ (\"lwsync\" ::: \"memory\");\n        break;\n#endif\n    case memory_order_seq_cst:\n        __asm__ __volatile__ (\"sync\" ::: \"memory\");\n        break;\n    default:;\n    }\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n#if defined(__ibmxl__) || defined(__IBMCPP__)\n        __fence();\n#else\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n#endif\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_sparc.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2010 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_sparc.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/ops_cas_based.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nstruct gcc_sparc_cas_base\n{\n    static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"membar #Sync\" ::: \"memory\");\n        else if ((order & memory_order_release) != 0)\n            __asm__ __volatile__ (\"membar #StoreStore | #LoadStore\" ::: \"memory\");\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"membar #Sync\" ::: \"memory\");\n        else if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            __asm__ __volatile__ (\"membar #StoreStore | #LoadStore\" ::: \"memory\");\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            __asm__ __volatile__ (\"membar #Sync\" ::: \"memory\");\n    }\n};\n\ntemplate< bool Signed >\nstruct gcc_sparc_cas32 :\n    public gcc_sparc_cas_base\n{\n    typedef typename make_storage_type< 4u, Signed >::type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before_store(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        storage_type previous = expected;\n        __asm__ __volatile__\n        (\n            \"cas [%1], %2, %0\"\n            : \"+r\" (desired)\n            : \"r\" (&storage), \"r\" (previous)\n            : \"memory\"\n        );\n        const bool success = (desired == previous);\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = desired;\n        return success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        __asm__ __volatile__\n        (\n            \"swap [%1], %0\"\n            : \"+r\" (v)\n            : \"r\" (&storage)\n            : \"memory\"\n        );\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public cas_based_operations< gcc_sparc_cas32< Signed > >\n{\n};\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >\n{\n};\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >\n{\n};\n\ntemplate< bool Signed >\nstruct gcc_sparc_cas64 :\n    public gcc_sparc_cas_base\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before_store(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        fence_before(success_order);\n        storage_type previous = expected;\n        __asm__ __volatile__\n        (\n            \"casx [%1], %2, %0\"\n            : \"+r\" (desired)\n            : \"r\" (&storage), \"r\" (previous)\n            : \"memory\"\n        );\n        const bool success = (desired == previous);\n        if (success)\n            fence_after(success_order);\n        else\n            fence_after(failure_order);\n        expected = desired;\n        return success;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public cas_based_operations< cas_based_exchange< gcc_sparc_cas64< Signed > > >\n{\n};\n\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    switch (order)\n    {\n    case memory_order_release:\n        __asm__ __volatile__ (\"membar #StoreStore | #LoadStore\" ::: \"memory\");\n        break;\n    case memory_order_consume:\n    case memory_order_acquire:\n        __asm__ __volatile__ (\"membar #LoadLoad | #LoadStore\" ::: \"memory\");\n        break;\n    case memory_order_acq_rel:\n        __asm__ __volatile__ (\"membar #LoadLoad | #LoadStore | #StoreStore\" ::: \"memory\");\n        break;\n    case memory_order_seq_cst:\n        __asm__ __volatile__ (\"membar #Sync\" ::: \"memory\");\n        break;\n    case memory_order_relaxed:\n    default:\n        break;\n    }\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_sync.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_sync.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nstruct gcc_sync_operations_base\n{\n    static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            __sync_synchronize();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            __sync_synchronize();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & (memory_order_acquire | memory_order_consume)) != 0)\n            __sync_synchronize();\n    }\n};\n\ntemplate< typename T >\nstruct gcc_sync_operations :\n    public gcc_sync_operations_base\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before_store(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return __sync_fetch_and_add(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return __sync_fetch_and_sub(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of\n        // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always\n        // add a check here and fall back to a CAS loop.\n        if ((order & memory_order_release) != 0)\n            __sync_synchronize();\n        return __sync_lock_test_and_set(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type expected2 = expected;\n        storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);\n\n        if (old_val == expected2)\n        {\n            return true;\n        }\n        else\n        {\n            expected = old_val;\n            return false;\n        }\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return __sync_fetch_and_and(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return __sync_fetch_and_or(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return __sync_fetch_and_xor(&storage, v);\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            __sync_synchronize();\n        return !!__sync_lock_test_and_set(&storage, 1);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        __sync_lock_release(&storage);\n        if (order == memory_order_seq_cst)\n            __sync_synchronize();\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n#if BOOST_ATOMIC_INT8_LOCK_FREE > 0\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\n    public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >\n#else\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >\n#endif\n{\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n#else\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n#endif\n};\n#endif\n\n#if BOOST_ATOMIC_INT16_LOCK_FREE > 0\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\n    public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >\n#else\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >\n#endif\n{\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n#else\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n#endif\n};\n#endif\n\n#if BOOST_ATOMIC_INT32_LOCK_FREE > 0\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >\n#else\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >\n#endif\n{\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n#else\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n#endif\n};\n#endif\n\n#if BOOST_ATOMIC_INT64_LOCK_FREE > 0\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >\n#else\n    public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >\n#endif\n{\n#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n#else\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n#endif\n};\n#endif\n\n#if BOOST_ATOMIC_INT128_LOCK_FREE > 0\ntemplate< bool Signed >\nstruct operations< 16u, Signed > :\n    public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >\n{\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n};\n#endif\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __sync_synchronize();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_x86.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_x86.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>\n#include <boost/atomic/detail/ops_cas_based.hpp>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(__x86_64__)\n#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"rdx\"\n#else\n#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"edx\"\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nstruct gcc_x86_operations_base\n{\n    static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            __asm__ __volatile__ (\"\" ::: \"memory\");\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_acquire) != 0)\n            __asm__ __volatile__ (\"\" ::: \"memory\");\n    }\n};\n\ntemplate< typename T, typename Derived >\nstruct gcc_x86_operations :\n    public gcc_x86_operations_base\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        if (order != memory_order_seq_cst)\n        {\n            fence_before(order);\n            storage = v;\n            fence_after(order);\n        }\n        else\n        {\n            Derived::exchange(storage, v, order);\n        }\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        return Derived::fetch_add(storage, -v, order);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!Derived::exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >\n{\n    typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"lock; xaddb %0, %1\"\n            : \"+q\" (v), \"+m\" (storage)\n            :\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"xchgb %0, %1\"\n            : \"+q\" (v), \"+m\" (storage)\n            :\n            : \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchgb %3, %1\\n\\t\"\n            \"sete %2\"\n            : \"+a\" (previous), \"+m\" (storage), \"=q\" (success)\n            : \"q\" (desired)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        expected = previous;\n        return success;\n    }\n\n#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\\\n    __asm__ __volatile__\\\n    (\\\n        \"xor %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \", %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"\\n\\t\"\\\n        \".align 16\\n\\t\"\\\n        \"1: movb %[arg], %%dl\\n\\t\"\\\n        op \" %%al, %%dl\\n\\t\"\\\n        \"lock; cmpxchgb %%dl, %[storage]\\n\\t\"\\\n        \"jne 1b\"\\\n        : [res] \"+a\" (result), [storage] \"+m\" (storage)\\\n        : [arg] \"q\" (argument)\\\n        : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, \"memory\"\\\n    )\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"andb\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"orb\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"xorb\", v, res);\n        return res;\n    }\n\n#undef BOOST_ATOMIC_DETAIL_CAS_LOOP\n};\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >\n{\n    typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"lock; xaddw %0, %1\"\n            : \"+q\" (v), \"+m\" (storage)\n            :\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"xchgw %0, %1\"\n            : \"+q\" (v), \"+m\" (storage)\n            :\n            : \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchgw %3, %1\\n\\t\"\n            \"sete %2\"\n            : \"+a\" (previous), \"+m\" (storage), \"=q\" (success)\n            : \"q\" (desired)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        expected = previous;\n        return success;\n    }\n\n#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\\\n    __asm__ __volatile__\\\n    (\\\n        \"xor %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \", %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"\\n\\t\"\\\n        \".align 16\\n\\t\"\\\n        \"1: movw %[arg], %%dx\\n\\t\"\\\n        op \" %%ax, %%dx\\n\\t\"\\\n        \"lock; cmpxchgw %%dx, %[storage]\\n\\t\"\\\n        \"jne 1b\"\\\n        : [res] \"+a\" (result), [storage] \"+m\" (storage)\\\n        : [arg] \"q\" (argument)\\\n        : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, \"memory\"\\\n    )\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"andw\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"orw\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"xorw\", v, res);\n        return res;\n    }\n\n#undef BOOST_ATOMIC_DETAIL_CAS_LOOP\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >\n{\n    typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"lock; xaddl %0, %1\"\n            : \"+r\" (v), \"+m\" (storage)\n            :\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"xchgl %0, %1\"\n            : \"+r\" (v), \"+m\" (storage)\n            :\n            : \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchgl %3, %1\\n\\t\"\n            \"sete %2\"\n            : \"+a\" (previous), \"+m\" (storage), \"=q\" (success)\n            : \"r\" (desired)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        expected = previous;\n        return success;\n    }\n\n#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\\\n    __asm__ __volatile__\\\n    (\\\n        \"xor %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \", %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"\\n\\t\"\\\n        \".align 16\\n\\t\"\\\n        \"1: movl %[arg], %%edx\\n\\t\"\\\n        op \" %%eax, %%edx\\n\\t\"\\\n        \"lock; cmpxchgl %%edx, %[storage]\\n\\t\"\\\n        \"jne 1b\"\\\n        : [res] \"+a\" (result), [storage] \"+m\" (storage)\\\n        : [arg] \"r\" (argument)\\\n        : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, \"memory\"\\\n    )\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"andl\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"orl\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"xorl\", v, res);\n        return res;\n    }\n\n#undef BOOST_ATOMIC_DETAIL_CAS_LOOP\n};\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public cas_based_operations< gcc_dcas_x86< Signed > >\n{\n};\n\n#elif defined(__x86_64__)\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >\n{\n    typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"lock; xaddq %0, %1\"\n            : \"+r\" (v), \"+m\" (storage)\n            :\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        __asm__ __volatile__\n        (\n            \"xchgq %0, %1\"\n            : \"+r\" (v), \"+m\" (storage)\n            :\n            : \"memory\"\n        );\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchgq %3, %1\\n\\t\"\n            \"sete %2\"\n            : \"+a\" (previous), \"+m\" (storage), \"=q\" (success)\n            : \"r\" (desired)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        expected = previous;\n        return success;\n    }\n\n#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\\\n    __asm__ __volatile__\\\n    (\\\n        \"xor %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \", %%\" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER \"\\n\\t\"\\\n        \".align 16\\n\\t\"\\\n        \"1: movq %[arg], %%rdx\\n\\t\"\\\n        op \" %%rax, %%rdx\\n\\t\"\\\n        \"lock; cmpxchgq %%rdx, %[storage]\\n\\t\"\\\n        \"jne 1b\"\\\n        : [res] \"+a\" (result), [storage] \"+m\" (storage)\\\n        : [arg] \"r\" (argument)\\\n        : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, \"memory\"\\\n    )\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"andq\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"orq\", v, res);\n        return res;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        BOOST_ATOMIC_DETAIL_CAS_LOOP(\"xorq\", v, res);\n        return res;\n    }\n\n#undef BOOST_ATOMIC_DETAIL_CAS_LOOP\n};\n\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\ntemplate< bool Signed >\nstruct operations< 16u, Signed > :\n    public cas_based_operations< gcc_dcas_x86_64< Signed > >\n{\n};\n\n#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order == memory_order_seq_cst)\n    {\n        __asm__ __volatile__\n        (\n#if defined(__x86_64__) || defined(__SSE2__)\n            \"mfence\\n\"\n#else\n            \"lock; addl $0, (%%esp)\\n\"\n#endif\n            ::: \"memory\"\n        );\n    }\n    else if ((order & (memory_order_acquire | memory_order_release)) != 0)\n    {\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n    }\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_gcc_x86_dcas.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_gcc_x86_dcas.hpp\n *\n * This header contains implementation of the double-width CAS primitive for x86.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_\n\n#include <boost/cstdint.hpp>\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/capabilities.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n\ntemplate< bool Signed >\nstruct gcc_dcas_x86\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        if ((((uint32_t)&storage) & 0x00000007) == 0)\n        {\n#if defined(__SSE2__)\n            __asm__ __volatile__\n            (\n#if defined(__AVX__)\n                \"vmovq %1, %%xmm4\\n\\t\"\n                \"vmovq %%xmm4, %0\\n\\t\"\n#else\n                \"movq %1, %%xmm4\\n\\t\"\n                \"movq %%xmm4, %0\\n\\t\"\n#endif\n                : \"=m\" (storage)\n                : \"m\" (v)\n                : \"memory\", \"xmm4\"\n            );\n#else\n            __asm__ __volatile__\n            (\n                \"fildll %1\\n\\t\"\n                \"fistpll %0\\n\\t\"\n                : \"=m\" (storage)\n                : \"m\" (v)\n                : \"memory\"\n            );\n#endif\n        }\n        else\n        {\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n#if defined(__PIC__)\n            uint32_t scratch;\n            __asm__ __volatile__\n            (\n                \"movl %%ebx, %[scratch]\\n\\t\"\n                \"movl %[value_lo], %%ebx\\n\\t\"\n                \"movl %[dest], %%eax\\n\\t\"\n                \"movl 4+%[dest], %%edx\\n\\t\"\n                \".align 16\\n\\t\"\n                \"1: lock; cmpxchg8b %[dest]\\n\\t\"\n                \"jne 1b\\n\\t\"\n                \"movl %[scratch], %%ebx\\n\\t\"\n                : [scratch] \"=m\" (scratch), [dest] \"=o\" (storage)\n                : [value_lo] \"a\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32))\n                : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"edx\", \"memory\"\n            );\n#else // defined(__PIC__)\n            __asm__ __volatile__\n            (\n                \"movl %[dest], %%eax\\n\\t\"\n                \"movl 4+%[dest], %%edx\\n\\t\"\n                \".align 16\\n\\t\"\n                \"1: lock; cmpxchg8b %[dest]\\n\\t\"\n                \"jne 1b\\n\\t\"\n                : [dest] \"=o\" (storage)\n                : [value_lo] \"b\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32))\n                : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"eax\", \"edx\", \"memory\"\n            );\n#endif // defined(__PIC__)\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n#if defined(__PIC__)\n            uint32_t scratch;\n            __asm__ __volatile__\n            (\n                \"movl %%ebx, %[scratch]\\n\\t\"\n                \"movl %[value_lo], %%ebx\\n\\t\"\n                \"movl 0(%[dest]), %%eax\\n\\t\"\n                \"movl 4(%[dest]), %%edx\\n\\t\"\n                \".align 16\\n\\t\"\n                \"1: lock; cmpxchg8b 0(%[dest])\\n\\t\"\n                \"jne 1b\\n\\t\"\n                \"movl %[scratch], %%ebx\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n                : [scratch] \"=m,m\" (scratch)\n                : [value_lo] \"a,a\" ((uint32_t)v), \"c,c\" ((uint32_t)(v >> 32)), [dest] \"D,S\" (&storage)\n#else\n                : [scratch] \"=m\" (scratch)\n                : [value_lo] \"a\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32)), [dest] \"D\" (&storage)\n#endif\n                : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"edx\", \"memory\"\n            );\n#else // defined(__PIC__)\n            __asm__ __volatile__\n            (\n                \"movl 0(%[dest]), %%eax\\n\\t\"\n                \"movl 4(%[dest]), %%edx\\n\\t\"\n                \".align 16\\n\\t\"\n                \"1: lock; cmpxchg8b 0(%[dest])\\n\\t\"\n                \"jne 1b\\n\\t\"\n                :\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n                : [value_lo] \"b,b\" ((uint32_t)v), \"c,c\" ((uint32_t)(v >> 32)), [dest] \"D,S\" (&storage)\n#else\n                : [value_lo] \"b\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32)), [dest] \"D\" (&storage)\n#endif\n                : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"eax\", \"edx\", \"memory\"\n            );\n#endif // defined(__PIC__)\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        }\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type value;\n\n        if ((((uint32_t)&storage) & 0x00000007) == 0)\n        {\n#if defined(__SSE2__)\n            __asm__ __volatile__\n            (\n#if defined(__AVX__)\n                \"vmovq %1, %%xmm4\\n\\t\"\n                \"vmovq %%xmm4, %0\\n\\t\"\n#else\n                \"movq %1, %%xmm4\\n\\t\"\n                \"movq %%xmm4, %0\\n\\t\"\n#endif\n                : \"=m\" (value)\n                : \"m\" (storage)\n                : \"memory\", \"xmm4\"\n            );\n#else\n            __asm__ __volatile__\n            (\n                \"fildll %1\\n\\t\"\n                \"fistpll %0\\n\\t\"\n                : \"=m\" (value)\n                : \"m\" (storage)\n                : \"memory\"\n            );\n#endif\n        }\n        else\n        {\n#if defined(__clang__)\n            // Clang cannot allocate eax:edx register pairs but it has sync intrinsics\n            value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0);\n#else\n            // We don't care for comparison result here; the previous value will be stored into value anyway.\n            // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.\n            __asm__ __volatile__\n            (\n                \"movl %%ebx, %%eax\\n\\t\"\n                \"movl %%ecx, %%edx\\n\\t\"\n                \"lock; cmpxchg8b %[storage]\\n\\t\"\n                : \"=&A\" (value)\n                : [storage] \"m\" (storage)\n                : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n            );\n#endif\n        }\n\n        return value;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n#if defined(__clang__)\n        // Clang cannot allocate eax:edx register pairs but it has sync intrinsics\n        storage_type old_expected = expected;\n        expected = __sync_val_compare_and_swap(&storage, old_expected, desired);\n        return expected == old_expected;\n#elif defined(__PIC__)\n        // Make sure ebx is saved and restored properly in case\n        // of position independent code. To make this work\n        // setup register constraints such that ebx can not be\n        // used by accident e.g. as base address for the variable\n        // to be modified. Accessing \"scratch\" should always be okay,\n        // as it can only be placed on the stack (and therefore\n        // accessed through ebp or esp only).\n        //\n        // In theory, could push/pop ebx onto/off the stack, but movs\n        // to a prepared stack slot turn out to be faster.\n\n        uint32_t scratch;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"movl %%ebx, %[scratch]\\n\\t\"\n            \"movl %[desired_lo], %%ebx\\n\\t\"\n            \"lock; cmpxchg8b %[dest]\\n\\t\"\n            \"movl %[scratch], %%ebx\\n\\t\"\n            \"sete %[success]\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n            : \"+A,A,A,A,A,A\" (expected), [dest] \"+m,m,m,m,m,m\" (storage), [scratch] \"=m,m,m,m,m,m\" (scratch), [success] \"=q,m,q,m,q,m\" (success)\n            : [desired_lo] \"S,S,D,D,m,m\" ((uint32_t)desired), \"c,c,c,c,c,c\" ((uint32_t)(desired >> 32))\n#else\n            : \"+A\" (expected), [dest] \"+m\" (storage), [scratch] \"=m\" (scratch), [success] \"=q\" (success)\n            : [desired_lo] \"S\" ((uint32_t)desired), \"c\" ((uint32_t)(desired >> 32))\n#endif\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return success;\n#else\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchg8b %[dest]\\n\\t\"\n            \"sete %[success]\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n            : \"+A,A\" (expected), [dest] \"+m,m\" (storage), [success] \"=q,m\" (success)\n            : \"b,b\" ((uint32_t)desired), \"c,c\" ((uint32_t)(desired >> 32))\n#else\n            : \"+A\" (expected), [dest] \"+m\" (storage), [success] \"=q\" (success)\n            : \"b\" ((uint32_t)desired), \"c\" ((uint32_t)(desired >> 32))\n#endif\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return success;\n#endif\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(__clang__)\n        // Clang cannot allocate eax:edx register pairs but it has sync intrinsics\n        storage_type old_val = storage;\n        while (true)\n        {\n            storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);\n            if (val == old_val)\n                return val;\n            old_val = val;\n        }\n#elif !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n#if defined(__PIC__)\n        uint32_t scratch;\n        __asm__ __volatile__\n        (\n            \"movl %%ebx, %[scratch]\\n\\t\"\n            \"movl %%eax, %%ebx\\n\\t\"\n            \"movl %%edx, %%ecx\\n\\t\"\n            \"movl %[dest], %%eax\\n\\t\"\n            \"movl 4+%[dest], %%edx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg8b %[dest]\\n\\t\"\n            \"jne 1b\\n\\t\"\n            \"movl %[scratch], %%ebx\\n\\t\"\n            : \"+A\" (v), [scratch] \"=m\" (scratch), [dest] \"+o\" (storage)\n            :\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"ecx\", \"memory\"\n        );\n        return v;\n#else // defined(__PIC__)\n        __asm__ __volatile__\n        (\n            \"movl %[dest], %%eax\\n\\t\"\n            \"movl 4+%[dest], %%edx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg8b %[dest]\\n\\t\"\n            \"jne 1b\\n\\t\"\n            : \"=A\" (v), [dest] \"+o\" (storage)\n            : \"b\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32))\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n#endif // defined(__PIC__)\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n#if defined(__PIC__)\n        uint32_t scratch;\n        __asm__ __volatile__\n        (\n            \"movl %%ebx, %[scratch]\\n\\t\"\n            \"movl %%eax, %%ebx\\n\\t\"\n            \"movl %%edx, %%ecx\\n\\t\"\n            \"movl 0(%[dest]), %%eax\\n\\t\"\n            \"movl 4(%[dest]), %%edx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg8b 0(%[dest])\\n\\t\"\n            \"jne 1b\\n\\t\"\n            \"movl %[scratch], %%ebx\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n            : \"+A,A\" (v), [scratch] \"=m,m\" (scratch)\n            : [dest] \"D,S\" (&storage)\n#else\n            : \"+A\" (v), [scratch] \"=m\" (scratch)\n            : [dest] \"D\" (&storage)\n#endif\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"ecx\", \"memory\"\n        );\n        return v;\n#else // defined(__PIC__)\n        __asm__ __volatile__\n        (\n            \"movl 0(%[dest]), %%eax\\n\\t\"\n            \"movl 4(%[dest]), %%edx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg8b 0(%[dest])\\n\\t\"\n            \"jne 1b\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n            : \"=A,A\" (v)\n            : \"b,b\" ((uint32_t)v), \"c,c\" ((uint32_t)(v >> 32)), [dest] \"D,S\" (&storage)\n#else\n            : \"=A\" (v)\n            : \"b\" ((uint32_t)v), \"c\" ((uint32_t)(v >> 32)), [dest] \"D\" (&storage)\n#endif\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return v;\n#endif // defined(__PIC__)\n#endif\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\ntemplate< bool Signed >\nstruct gcc_dcas_x86_64\n{\n    typedef typename make_storage_type< 16u, Signed >::type storage_type;\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        uint64_t const* p_value = (uint64_t const*)&v;\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %[dest], %%rax\\n\\t\"\n            \"movq 8+%[dest], %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b %[dest]\\n\\t\"\n            \"jne 1b\\n\\t\"\n            : [dest] \"=o\" (storage)\n            : \"b\" (p_value[0]), \"c\" (p_value[1])\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"rax\", \"rdx\", \"memory\"\n        );\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq 0(%[dest]), %%rax\\n\\t\"\n            \"movq 8(%[dest]), %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b 0(%[dest])\\n\\t\"\n            \"jne 1b\\n\\t\"\n            :\n            : \"b\" (p_value[0]), \"c\" (p_value[1]), [dest] \"r\" (&storage)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"rax\", \"rdx\", \"memory\"\n        );\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT\n    {\n#if defined(__clang__)\n        // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics\n        storage_type value = storage_type();\n        return __sync_val_compare_and_swap(&storage, value, value);\n#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap\n        storage_type value;\n\n        // We don't care for comparison result here; the previous value will be stored into value anyway.\n        // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %%rbx, %%rax\\n\\t\"\n            \"movq %%rcx, %%rdx\\n\\t\"\n            \"lock; cmpxchg16b %[storage]\\n\\t\"\n            \"movq %%rax, %[value]\\n\\t\"\n            \"movq %%rdx, 8+%[value]\\n\\t\"\n            : [value] \"=o\" (value)\n            : [storage] \"m\" (storage)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %%rbx, %%rax\\n\\t\"\n            \"movq %%rcx, %%rdx\\n\\t\"\n            \"lock; cmpxchg16b %[storage]\\n\\t\"\n            \"movq %%rax, 0(%[value])\\n\\t\"\n            \"movq %%rdx, 8(%[value])\\n\\t\"\n            :\n            : [storage] \"m\" (storage), [value] \"r\" (&value)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n\n        return value;\n#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        storage_type value;\n\n        // We don't care for comparison result here; the previous value will be stored into value anyway.\n        // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.\n        __asm__ __volatile__\n        (\n            \"movq %%rbx, %%rax\\n\\t\"\n            \"movq %%rcx, %%rdx\\n\\t\"\n            \"lock; cmpxchg16b %[storage]\\n\\t\"\n            : \"=&A\" (value)\n            : [storage] \"m\" (storage)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n\n        return value;\n#endif\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n#if defined(__clang__)\n        // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics\n        storage_type old_expected = expected;\n        expected = __sync_val_compare_and_swap(&storage, old_expected, desired);\n        return expected == old_expected;\n#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap\n        uint64_t const* p_desired = (uint64_t const*)&desired;\n        bool success;\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %[expected], %%rax\\n\\t\"\n            \"movq 8+%[expected], %%rdx\\n\\t\"\n            \"lock; cmpxchg16b %[dest]\\n\\t\"\n            \"sete %[success]\\n\\t\"\n            \"movq %%rax, %[expected]\\n\\t\"\n            \"movq %%rdx, 8+%[expected]\\n\\t\"\n            : [dest] \"+m\" (storage), [expected] \"+o\" (expected), [success] \"=q\" (success)\n            : \"b\" (p_desired[0]), \"c\" (p_desired[1])\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq 0(%[expected]), %%rax\\n\\t\"\n            \"movq 8(%[expected]), %%rdx\\n\\t\"\n            \"lock; cmpxchg16b %[dest]\\n\\t\"\n            \"sete %[success]\\n\\t\"\n            \"movq %%rax, 0(%[expected])\\n\\t\"\n            \"movq %%rdx, 8(%[expected])\\n\\t\"\n            : [dest] \"+m\" (storage), [success] \"=q\" (success)\n            : \"b\" (p_desired[0]), \"c\" (p_desired[1]), [expected] \"r\" (&expected)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n\n        return success;\n#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        uint64_t const* p_desired = (uint64_t const*)&desired;\n        bool success;\n        __asm__ __volatile__\n        (\n            \"lock; cmpxchg16b %[dest]\\n\\t\"\n            \"sete %[success]\\n\\t\"\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)\n            : \"+A,A\" (expected), [dest] \"+m,m\" (storage), [success] \"=q,m\" (success)\n            : \"b,b\" (p_desired[0]), \"c,c\" (p_desired[1])\n#else\n            : \"+A\" (expected), [dest] \"+m\" (storage), [success] \"=q\" (success)\n            : \"b\" (p_desired[0]), \"c\" (p_desired[1])\n#endif\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n        return success;\n#endif\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(__clang__)\n        // Clang cannot allocate eax:edx register pairs but it has sync intrinsics\n        storage_type old_val = storage;\n        while (true)\n        {\n            storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);\n            if (val == old_val)\n                return val;\n            old_val = val;\n        }\n#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap\n        storage_type old_value;\n        uint64_t const* p_value = (uint64_t const*)&v;\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %[dest], %%rax\\n\\t\"\n            \"movq 8+%[dest], %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b %[dest]\\n\\t\"\n            \"jne 1b\\n\\t\"\n            \"movq %%rax, %[old_value]\\n\\t\"\n            \"movq %%rdx, 8+%[old_value]\\n\\t\"\n            : [dest] \"+o\" (storage), [old_value] \"=o\" (old_value)\n            : \"b\" (p_value[0]), \"c\" (p_value[1])\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq 0(%[dest]), %%rax\\n\\t\"\n            \"movq 8(%[dest]), %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b 0(%[dest])\\n\\t\"\n            \"jne 1b\\n\\t\"\n            \"movq %%rax, 0(%[old_value])\\n\\t\"\n            \"movq %%rdx, 8(%[old_value])\\n\\t\"\n            :\n            : \"b\" (p_value[0]), \"c\" (p_value[1]), [dest] \"r\" (&storage), [old_value] \"r\" (&old_value)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\", \"rax\", \"rdx\"\n        );\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n\n        return old_value;\n#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)\n        uint64_t const* p_value = (uint64_t const*)&v;\n#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq %[dest], %%rax\\n\\t\"\n            \"movq 8+%[dest], %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b %[dest]\\n\\t\"\n            \"jne 1b\\n\\t\"\n            : \"=&A\" (v), [dest] \"+o\" (storage)\n            : \"b\" (p_value[0]), \"c\" (p_value[1])\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n        __asm__ __volatile__\n        (\n            \"movq 0(%[dest]), %%rax\\n\\t\"\n            \"movq 8(%[dest]), %%rdx\\n\\t\"\n            \".align 16\\n\\t\"\n            \"1: lock; cmpxchg16b 0(%[dest])\\n\\t\"\n            \"jne 1b\\n\\t\"\n            : \"=&A\" (v)\n            : \"b\" (p_value[0]), \"c\" (p_value[1]), [dest] \"r\" (&storage)\n            : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA \"memory\"\n        );\n#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)\n\n        return v;\n#endif\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\n#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_linux_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009, 2011 Helge Bahmann\n * Copyright (c) 2009 Phil Endecott\n * Copyright (c) 2013 Tim Blechmann\n * Linux-specific code by Phil Endecott\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_linux_arm.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/ops_cas_based.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n// Different ARM processors have different atomic instructions.  In particular,\n// architecture versions before v6 (which are still in widespread use, e.g. the\n// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.\n// On Linux the kernel provides some support that lets us abstract away from\n// these differences: it provides emulated CAS and barrier functions at special\n// addresses that are guaranteed not to be interrupted by the kernel.  Using\n// this facility is slightly slower than inline assembler would be, but much\n// faster than a system call.\n//\n// While this emulated CAS is \"strong\" in the sense that it does not fail\n// \"spuriously\" (i.e.: it never fails to perform the exchange when the value\n// found equals the value expected), it does not return the found value on\n// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must\n// return the found value on failure, and we have to manually load this value\n// after the emulated CAS reports failure. This in turn introduces a race\n// between the CAS failing (due to the \"wrong\" value being found) and subsequently\n// loading (which might turn up the \"right\" value). From an application's\n// point of view this looks like \"spurious failure\", and therefore the\n// emulated CAS is only good enough to provide compare_exchange_weak\n// semantics.\n\nstruct linux_arm_cas_base\n{\n    static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & memory_order_release) != 0)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        if (order == memory_order_seq_cst)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT\n    {\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            hardware_full_fence();\n    }\n\n    static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT\n    {\n        typedef void (*kernel_dmb_t)(void);\n        ((kernel_dmb_t)0xffff0fa0)();\n    }\n};\n\ntemplate< bool Signed >\nstruct linux_arm_cas :\n    public linux_arm_cas_base\n{\n    typedef typename make_storage_type< 4u, Signed >::type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        fence_before_store(order);\n        storage = v;\n        fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        while (true)\n        {\n            storage_type tmp = expected;\n            if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))\n                return true;\n            if (tmp != expected)\n            {\n                expected = tmp;\n                return false;\n            }\n        }\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);\n\n        if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)\n        {\n            return true;\n        }\n        else\n        {\n            expected = storage;\n            return false;\n        }\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 1u, Signed >\n{\n};\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 2u, Signed >\n{\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >\n{\n};\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        linux_arm_cas_base::hardware_full_fence();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        __asm__ __volatile__ (\"\" ::: \"memory\");\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_msvc_arm.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_msvc_arm.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_\n\n#include <intrin.h>\n#include <boost/memory_order.hpp>\n#include <boost/type_traits/make_signed.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/interlocked.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/ops_msvc_common.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))\n#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))\n#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))\n#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))\n#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))\n#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))\n#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))\n#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n// A note about memory_order_consume. Technically, this architecture allows to avoid\n// unnecessary memory barrier after consume load since it supports data dependency ordering.\n// However, some compiler optimizations may break a seemingly valid code relying on data\n// dependency tracking by injecting bogus branches to aid out of order execution.\n// This may happen not only in Boost.Atomic code but also in user's code, which we have no\n// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.\n// For this reason we promote memory_order_consume to memory_order_acquire.\n\nstruct msvc_arm_operations_base\n{\n    static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT\n    {\n        __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later\n    }\n\n    static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        if ((order & memory_order_release) != 0)\n            hardware_full_fence();\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        if (order == memory_order_seq_cst)\n            hardware_full_fence();\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        if ((order & (memory_order_consume | memory_order_acquire)) != 0)\n            hardware_full_fence();\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        // Combine order flags together and promote memory_order_consume to memory_order_acquire\n        return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) << 1u));\n    }\n};\n\ntemplate< typename T, typename Derived >\nstruct msvc_arm_operations :\n    public msvc_arm_operations_base\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        typedef typename make_signed< storage_type >::type signed_storage_type;\n        return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!Derived::exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        Derived::store(storage, (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >\n{\n    typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before_store(order);\n        BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);\n        base_type::fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);\n        base_type::fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected, old_val;\n\n        switch (cas_common_order(success_order, failure_order))\n        {\n        case memory_order_relaxed:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));\n            break;\n        case memory_order_release:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));\n            break;\n        }\n        expected = old_val;\n\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));\n            break;\n        }\n        return v;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >\n{\n    typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before_store(order);\n        BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);\n        base_type::fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);\n        base_type::fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected, old_val;\n\n        switch (cas_common_order(success_order, failure_order))\n        {\n        case memory_order_relaxed:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));\n            break;\n        case memory_order_release:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));\n            break;\n        }\n        expected = old_val;\n\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));\n            break;\n        }\n        return v;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >\n{\n    typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before_store(order);\n        BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);\n        base_type::fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);\n        base_type::fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected, old_val;\n\n        switch (cas_common_order(success_order, failure_order))\n        {\n        case memory_order_relaxed:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));\n            break;\n        case memory_order_release:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));\n            break;\n        }\n        expected = old_val;\n\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));\n            break;\n        }\n        return v;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >\n{\n    typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before_store(order);\n        BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);\n        base_type::fence_after_store(order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);\n        base_type::fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected, old_val;\n\n        switch (cas_common_order(success_order, failure_order))\n        {\n        case memory_order_relaxed:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));\n            break;\n        case memory_order_release:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));\n            break;\n        }\n        expected = old_val;\n\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));\n            break;\n        }\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        switch (order)\n        {\n        case memory_order_relaxed:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));\n            break;\n        case memory_order_consume:\n        case memory_order_acquire:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));\n            break;\n        case memory_order_release:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));\n            break;\n        case memory_order_acq_rel:\n        case memory_order_seq_cst:\n        default:\n            v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));\n            break;\n        }\n        return v;\n    }\n};\n\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    if (order != memory_order_relaxed)\n        msvc_arm_operations_base::hardware_full_fence();\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8\n#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16\n#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32\n#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64\n#undef BOOST_ATOMIC_DETAIL_ARM_STORE8\n#undef BOOST_ATOMIC_DETAIL_ARM_STORE16\n#undef BOOST_ATOMIC_DETAIL_ARM_STORE32\n#undef BOOST_ATOMIC_DETAIL_ARM_STORE64\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_msvc_common.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_msvc_common.hpp\n *\n * This header contains common tools for MSVC implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n// Define compiler barriers\n#if defined(__INTEL_COMPILER)\n#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier()\n#elif defined(_MSC_VER) && !defined(_WIN32_WCE)\nextern \"C\" void _ReadWriteBarrier(void);\n#pragma intrinsic(_ReadWriteBarrier)\n#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() _ReadWriteBarrier()\n#endif\n\n#ifndef BOOST_ATOMIC_DETAIL_COMPILER_BARRIER\n#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER()\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_msvc_x86.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_msvc_x86.hpp\n *\n * This header contains implementation of the \\c operations template.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/type_traits/make_signed.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/interlocked.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n#include <boost/cstdint.hpp>\n#include <boost/atomic/detail/ops_cas_based.hpp>\n#endif\n#include <boost/atomic/detail/ops_msvc_common.hpp>\n#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(BOOST_MSVC)\n#pragma warning(push)\n// frame pointer register 'ebx' modified by inline assembly code. See the note below.\n#pragma warning(disable: 4731)\n#endif\n\n#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))\nextern \"C\" void _mm_mfence(void);\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_mm_mfence)\n#endif\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\n/*\n * Implementation note for asm blocks.\n *\n * http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29\n *\n * Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.\n * To be able to access both the local variables and the function parameters after the alignment, the compiler\n * maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.\n * If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX\n * in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.\n * Either move the eight-byte aligned types out of the function, or avoid using EBX.\n *\n * Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx\n * whenever we have to clobber it. Additionally, we disable warning C4731 above so that the compiler\n * doesn't spam about ebx use.\n */\n\nstruct msvc_x86_operations_base\n{\n    static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT\n    {\n#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))\n        // Use mfence only if SSE2 is available\n        _mm_mfence();\n#else\n        long tmp;\n        BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);\n#endif\n    }\n\n    static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        // On x86 and x86_64 there is no need for a hardware barrier,\n        // even if seq_cst memory order is requested, because all\n        // seq_cst writes are implemented with lock-prefixed operations\n        // or xchg which has implied lock prefix. Therefore normal loads\n        // are already ordered with seq_cst stores on these architectures.\n    }\n};\n\ntemplate< typename T, typename Derived >\nstruct msvc_x86_operations :\n    public msvc_x86_operations_base\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        if (order != memory_order_seq_cst)\n        {\n            fence_before(order);\n            storage = v;\n            fence_after(order);\n        }\n        else\n        {\n            Derived::exchange(storage, v, order);\n        }\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type v = storage;\n        fence_after_load(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        typedef typename make_signed< storage_type >::type signed_storage_type;\n        return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!Derived::exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));\n        expected = old_val;\n        return (previous == old_val);\n    }\n\n#if defined(BOOST_ATOMIC_INTERLOCKED_AND)\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));\n    }\n#else\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}\n        return res;\n    }\n#endif\n\n#if defined(BOOST_ATOMIC_INTERLOCKED_OR)\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));\n    }\n#else\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}\n        return res;\n    }\n#endif\n\n#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));\n    }\n#else\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}\n        return res;\n    }\n#endif\n};\n\n#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));\n        expected = old_val;\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));\n    }\n};\n\n#elif defined(_M_IX86)\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        __asm\n        {\n            mov edx, storage\n            movzx eax, v\n            lock xadd byte ptr [edx], al\n            mov v, al\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        __asm\n        {\n            mov edx, storage\n            movzx eax, v\n            xchg byte ptr [edx], al\n            mov v, al\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(success_order);\n        bool success;\n        __asm\n        {\n            mov esi, expected\n            mov edi, storage\n            movzx eax, byte ptr [esi]\n            movzx edx, desired\n            lock cmpxchg byte ptr [edi], dl\n            mov byte ptr [esi], al\n            sete success\n        };\n        // The success and failure fences are equivalent anyway\n        base_type::fence_after(success_order);\n        return success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, byte ptr [edi]\n            align 16\n        again:\n            mov dl, al\n            and dl, bl\n            lock cmpxchg byte ptr [edi], dl\n            jne again\n            mov v, al\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, byte ptr [edi]\n            align 16\n        again:\n            mov dl, al\n            or dl, bl\n            lock cmpxchg byte ptr [edi], dl\n            jne again\n            mov v, al\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, byte ptr [edi]\n            align 16\n        again:\n            mov dl, al\n            xor dl, bl\n            lock cmpxchg byte ptr [edi], dl\n            jne again\n            mov v, al\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >\n{\n};\n\n#endif\n\n#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));\n        expected = old_val;\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));\n    }\n};\n\n#elif defined(_M_IX86)\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        __asm\n        {\n            mov edx, storage\n            movzx eax, v\n            lock xadd word ptr [edx], ax\n            mov v, ax\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        __asm\n        {\n            mov edx, storage\n            movzx eax, v\n            xchg word ptr [edx], ax\n            mov v, ax\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(success_order);\n        bool success;\n        __asm\n        {\n            mov esi, expected\n            mov edi, storage\n            movzx eax, word ptr [esi]\n            movzx edx, desired\n            lock cmpxchg word ptr [edi], dx\n            mov word ptr [esi], ax\n            sete success\n        };\n        // The success and failure fences are equivalent anyway\n        base_type::fence_after(success_order);\n        return success;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, word ptr [edi]\n            align 16\n        again:\n            mov dx, ax\n            and dx, bx\n            lock cmpxchg word ptr [edi], dx\n            jne again\n            mov v, ax\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, word ptr [edi]\n            align 16\n        again:\n            mov dx, ax\n            or dx, bx\n            lock cmpxchg word ptr [edi], dx\n            jne again\n            mov v, ax\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            xor edx, edx\n            mov edi, storage\n            movzx ebx, v\n            movzx eax, word ptr [edi]\n            align 16\n        again:\n            mov dx, ax\n            xor dx, bx\n            lock cmpxchg word ptr [edi], dx\n            jne again\n            mov v, ax\n            mov ebx, backup\n        };\n        base_type::fence_after(order);\n        return v;\n    }\n};\n\n#else\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >\n{\n};\n\n#endif\n\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)\n\ntemplate< bool Signed >\nstruct msvc_dcas_x86\n{\n    typedef typename make_storage_type< 8u, Signed >::type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:\n    //\n    // The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:\n    // * Reading or writing a quadword aligned on a 64-bit boundary\n    //\n    // Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations\n    // have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        storage_type volatile* p = &storage;\n        if (((uint32_t)p & 0x00000007) == 0)\n        {\n#if defined(_M_IX86_FP) && _M_IX86_FP >= 2\n#if defined(__AVX__)\n            __asm\n            {\n                mov edx, p\n                vmovq xmm4, v\n                vmovq qword ptr [edx], xmm4\n            };\n#else\n            __asm\n            {\n                mov edx, p\n                movq xmm4, v\n                movq qword ptr [edx], xmm4\n            };\n#endif\n#else\n            __asm\n            {\n                mov edx, p\n                fild v\n                fistp qword ptr [edx]\n            };\n#endif\n        }\n        else\n        {\n            int backup;\n            __asm\n            {\n                mov backup, ebx\n                mov edi, p\n                mov ebx, dword ptr [v]\n                mov ecx, dword ptr [v + 4]\n                mov eax, dword ptr [edi]\n                mov edx, dword ptr [edi + 4]\n                align 16\n            again:\n                lock cmpxchg8b qword ptr [edi]\n                jne again\n                mov ebx, backup\n            };\n        }\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        storage_type const volatile* p = &storage;\n        storage_type value;\n\n        if (((uint32_t)p & 0x00000007) == 0)\n        {\n#if defined(_M_IX86_FP) && _M_IX86_FP >= 2\n#if defined(__AVX__)\n            __asm\n            {\n                mov edx, p\n                vmovq xmm4, qword ptr [edx]\n                vmovq value, xmm4\n            };\n#else\n            __asm\n            {\n                mov edx, p\n                movq xmm4, qword ptr [edx]\n                movq value, xmm4\n            };\n#endif\n#else\n            __asm\n            {\n                mov edx, p\n                fild qword ptr [edx]\n                fistp value\n            };\n#endif\n        }\n        else\n        {\n            // We don't care for comparison result here; the previous value will be stored into value anyway.\n            // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.\n            __asm\n            {\n                mov edi, p\n                mov eax, ebx\n                mov edx, ecx\n                lock cmpxchg8b qword ptr [edi]\n                mov dword ptr [value], eax\n                mov dword ptr [value + 4], edx\n            };\n        }\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        return value;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        // MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,\n        // even though the _InterlockedCompareExchange64 intrinsic already provides one.\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        storage_type volatile* p = &storage;\n#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)\n        const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);\n        const bool result = (old_val == expected);\n        expected = old_val;\n#else\n        bool result;\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            mov edi, p\n            mov esi, expected\n            mov ebx, dword ptr [desired]\n            mov ecx, dword ptr [desired + 4]\n            mov eax, dword ptr [esi]\n            mov edx, dword ptr [esi + 4]\n            lock cmpxchg8b qword ptr [edi]\n            mov dword ptr [esi], eax\n            mov dword ptr [esi + 4], edx\n            mov ebx, backup\n            sete result\n        };\n#endif\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        return result;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        storage_type volatile* p = &storage;\n        int backup;\n        __asm\n        {\n            mov backup, ebx\n            mov edi, p\n            mov ebx, dword ptr [v]\n            mov ecx, dword ptr [v + 4]\n            mov eax, dword ptr [edi]\n            mov edx, dword ptr [edi + 4]\n            align 16\n        again:\n            lock cmpxchg8b qword ptr [edi]\n            jne again\n            mov ebx, backup\n            mov dword ptr [v], eax\n            mov dword ptr [v + 4], edx\n        };\n\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public cas_based_operations< msvc_dcas_x86< Signed > >\n{\n};\n\n#elif defined(_M_AMD64)\n\ntemplate< bool Signed >\nstruct operations< 8u, Signed > :\n    public msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >\n{\n    typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));\n        expected = old_val;\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));\n    }\n};\n\n#endif\n\n#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\ntemplate< bool Signed >\nstruct msvc_dcas_x86_64\n{\n    typedef typename make_storage_type< 16u, Signed >::type storage_type;\n    typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type value = const_cast< storage_type& >(storage);\n        while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT\n    {\n        storage_type value = storage_type();\n        BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);\n        return value;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT\n    {\n        return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 16u, Signed > :\n    public cas_based_operations< cas_based_exchange< msvc_dcas_x86_64< Signed > > >\n{\n};\n\n#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    if (order == memory_order_seq_cst)\n        msvc_x86_operations_base::hardware_full_fence();\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#if defined(BOOST_MSVC)\n#pragma warning(pop)\n#endif\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/ops_windows.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/ops_windows.hpp\n *\n * This header contains implementation of the \\c operations template.\n *\n * This implementation is the most basic version for Windows. It should\n * work for any non-MSVC-like compilers as long as there are Interlocked WinAPI\n * functions available. This version is also used for WinCE.\n *\n * Notably, this implementation is not as efficient as other\n * versions based on compiler intrinsics.\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/type_traits/make_signed.hpp>\n#include <boost/atomic/detail/config.hpp>\n#include <boost/atomic/detail/interlocked.hpp>\n#include <boost/atomic/detail/storage_type.hpp>\n#include <boost/atomic/detail/operations_fwd.hpp>\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/ops_msvc_common.hpp>\n#include <boost/atomic/detail/ops_extending_cas_based.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nstruct windows_operations_base\n{\n    static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT\n    {\n        long tmp;\n        BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);\n    }\n\n    static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n\n    static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT\n    {\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    }\n};\n\ntemplate< typename T, typename Derived >\nstruct windows_operations :\n    public windows_operations_base\n{\n    typedef T storage_type;\n\n    static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        Derived::exchange(storage, v, order);\n    }\n\n    static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return Derived::fetch_add(const_cast< storage_type volatile& >(storage), (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        typedef typename make_signed< storage_type >::type signed_storage_type;\n        return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_weak(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);\n    }\n\n    static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        return !!Derived::exchange(storage, (storage_type)1, order);\n    }\n\n    static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT\n    {\n        store(storage, (storage_type)0, order);\n    }\n\n    static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT\n    {\n        return true;\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 4u, Signed > :\n    public windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >\n{\n    typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;\n    typedef typename base_type::storage_type storage_type;\n    typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;\n\n    static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n        base_type::fence_before(order);\n        v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));\n        base_type::fence_after(order);\n        return v;\n    }\n\n    static BOOST_FORCEINLINE bool compare_exchange_strong(\n        storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT\n    {\n        storage_type previous = expected;\n        base_type::fence_before(success_order);\n        storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));\n        expected = old_val;\n        // The success and failure fences are the same anyway\n        base_type::fence_after(success_order);\n        return (previous == old_val);\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(BOOST_ATOMIC_INTERLOCKED_AND)\n        base_type::fence_before(order);\n        v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));\n        base_type::fence_after(order);\n        return v;\n#else\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}\n        return res;\n#endif\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(BOOST_ATOMIC_INTERLOCKED_OR)\n        base_type::fence_before(order);\n        v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));\n        base_type::fence_after(order);\n        return v;\n#else\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}\n        return res;\n#endif\n    }\n\n    static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT\n    {\n#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)\n        base_type::fence_before(order);\n        v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));\n        base_type::fence_after(order);\n        return v;\n#else\n        storage_type res = storage;\n        while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}\n        return res;\n#endif\n    }\n};\n\ntemplate< bool Signed >\nstruct operations< 1u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >\n{\n};\n\ntemplate< bool Signed >\nstruct operations< 2u, Signed > :\n    public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >\n{\n};\n\nBOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n    if (order == memory_order_seq_cst)\n        windows_operations_base::hardware_full_fence();\n    BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\nBOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    if (order != memory_order_relaxed)\n        BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/pause.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n *    (See accompanying file LICENSE_1_0.txt or copy at\n *          http://www.boost.org/LICENSE_1_0.txt)\n *\n * (C) Copyright 2013 Tim Blechmann\n * (C) Copyright 2013 Andrey Semashev\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))\nextern \"C\" void _mm_pause(void);\n#if defined(BOOST_MSVC)\n#pragma intrinsic(_mm_pause)\n#endif\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\nBOOST_FORCEINLINE void pause() BOOST_NOEXCEPT\n{\n#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))\n    _mm_pause();\n#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n    __asm__ __volatile__(\"pause;\");\n#endif\n}\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/platform.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/platform.hpp\n *\n * This header defines macros for the target platform detection\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_\n\n#include <boost/atomic/detail/config.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)\n\n// Compiler-based backends\n#if (defined(__ibmxl__) || defined(__IBMCPP__)) && defined(__PPC__)\n\n// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics.\n// It does support GCC inline assembler though.\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc\n\n#elif ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\\\n    (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\\\n    (\\\n        (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\\\n        (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\\\n        (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\\\n        (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\\\n        (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\\\n        (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\\\n    )\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic\n\n#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86\n\n#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc\n\n// This list of ARM architecture versions comes from Apple's arm/arch.h header.\n// I don't know how complete it is.\n#elif defined(__GNUC__) &&\\\n    (\\\n        defined(__ARM_ARCH_6__)  || defined(__ARM_ARCH_6J__) ||\\\n        defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\\\n        defined(__ARM_ARCH_6ZK__) ||\\\n        defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\\\n        defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\\\n        defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\\\n    )\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm\n\n#elif defined(__GNUC__) && defined(__sparc_v9__)\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc\n\n#elif defined(__GNUC__) && defined(__alpha__)\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha\n\n#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\\\n    (\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) ||\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) ||\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\\\n        defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\\\n    )\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync\n\n#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86\n\n#elif defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM)\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm\n\n#endif\n\n// OS-based backends\n#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)\n\n#if defined(__linux__) && defined(__arm__)\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm\n\n#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)\n\n#define BOOST_ATOMIC_DETAIL_PLATFORM windows\n\n#endif\n\n#endif // !defined(BOOST_ATOMIC_DETAIL_PLATFORM)\n\n#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK)\n\n#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)\n#define BOOST_ATOMIC_DETAIL_PLATFORM emulated\n#define BOOST_ATOMIC_EMULATED\n#endif\n\n#define BOOST_ATOMIC_DETAIL_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_PLATFORM).hpp>\n\n#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/detail/storage_type.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2009 Helge Bahmann\n * Copyright (c) 2012 Tim Blechmann\n * Copyright (c) 2013 - 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/detail/storage_type.hpp\n *\n * This header defines underlying types used as storage\n */\n\n#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_\n#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_\n\n#include <cstddef>\n#include <boost/cstdint.hpp>\n#include <boost/atomic/detail/config.hpp>\n#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)\n#include <cstring>\n#endif\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\nnamespace boost {\nnamespace atomics {\nnamespace detail {\n\ntemplate< typename T >\nBOOST_FORCEINLINE void non_atomic_load(T const volatile& from, T& to) BOOST_NOEXCEPT\n{\n    to = from;\n}\n\ntemplate< std::size_t Size >\nstruct buffer_storage\n{\n    BOOST_ALIGNMENT(16) unsigned char data[Size];\n\n    BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT\n    {\n        return (data[0] == 0u && BOOST_ATOMIC_DETAIL_MEMCMP(data, data + 1, Size - 1) == 0);\n    }\n\n    BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT\n    {\n        return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) == 0;\n    }\n\n    BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT\n    {\n        return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) != 0;\n    }\n};\n\ntemplate< std::size_t Size >\nBOOST_FORCEINLINE void non_atomic_load(buffer_storage< Size > const volatile& from, buffer_storage< Size >& to) BOOST_NOEXCEPT\n{\n    BOOST_ATOMIC_DETAIL_MEMCPY(to.data, const_cast< unsigned char const* >(from.data), Size);\n}\n\ntemplate< std::size_t Size, bool Signed >\nstruct make_storage_type\n{\n    typedef buffer_storage< Size > type;\n\n    struct aligned\n    {\n        type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 1u, false >\n{\n    typedef boost::uint8_t type;\n\n    struct aligned\n    {\n        type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 1u, true >\n{\n    typedef boost::int8_t type;\n\n    struct aligned\n    {\n        type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 2u, false >\n{\n    typedef boost::uint16_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(2) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 2u, true >\n{\n    typedef boost::int16_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(2) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 4u, false >\n{\n    typedef boost::uint32_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(4) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 4u, true >\n{\n    typedef boost::int32_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(4) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 8u, false >\n{\n    typedef boost::uint64_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(8) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 8u, true >\n{\n    typedef boost::int64_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(8) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\n#if defined(BOOST_HAS_INT128)\n\ntemplate< >\nstruct make_storage_type< 16u, false >\n{\n    typedef boost::uint128_type type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(16) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\ntemplate< >\nstruct make_storage_type< 16u, true >\n{\n    typedef boost::int128_type type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(16) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\n#elif !defined(BOOST_NO_ALIGNMENT)\n\nstruct storage128_t\n{\n    boost::uint64_t data[2];\n\n    BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT\n    {\n        return data[0] == 0 && data[1] == 0;\n    }\n};\n\nBOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT\n{\n    return left.data[0] == right.data[0] && left.data[1] == right.data[1];\n}\nBOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT\n{\n    return !(left == right);\n}\n\nBOOST_FORCEINLINE void non_atomic_load(storage128_t const volatile& from, storage128_t& to) BOOST_NOEXCEPT\n{\n    to.data[0] = from.data[0];\n    to.data[1] = from.data[1];\n}\n\ntemplate< bool Signed >\nstruct make_storage_type< 16u, Signed >\n{\n    typedef storage128_t type;\n\n    struct aligned\n    {\n        BOOST_ALIGNMENT(16) type value;\n\n        BOOST_DEFAULTED_FUNCTION(aligned(), {})\n        BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}\n    };\n};\n\n#endif\n\ntemplate< typename T >\nstruct storage_size_of\n{\n    enum _\n    {\n        size = sizeof(T),\n        value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size)))\n    };\n};\n\n} // namespace detail\n} // namespace atomics\n} // namespace boost\n\n#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic/fences.hpp",
    "content": "/*\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n *\n * Copyright (c) 2011 Helge Bahmann\n * Copyright (c) 2013 Tim Blechmann\n * Copyright (c) 2014 Andrey Semashev\n */\n/*!\n * \\file   atomic/fences.hpp\n *\n * This header contains definition of \\c atomic_thread_fence and \\c atomic_signal_fence functions.\n */\n\n#ifndef BOOST_ATOMIC_FENCES_HPP_INCLUDED_\n#define BOOST_ATOMIC_FENCES_HPP_INCLUDED_\n\n#include <boost/memory_order.hpp>\n#include <boost/atomic/capabilities.hpp>\n#include <boost/atomic/detail/operations.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n/*\n * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,\n *                      see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.\n */\n\nnamespace boost {\n\nnamespace atomics {\n\n#if BOOST_ATOMIC_THREAD_FENCE > 0\nBOOST_FORCEINLINE void atomic_thread_fence(memory_order order) BOOST_NOEXCEPT\n{\n    detail::thread_fence(order);\n}\n#else\nBOOST_FORCEINLINE void atomic_thread_fence(memory_order) BOOST_NOEXCEPT\n{\n    detail::lockpool::thread_fence();\n}\n#endif\n\n#if BOOST_ATOMIC_SIGNAL_FENCE > 0\nBOOST_FORCEINLINE void atomic_signal_fence(memory_order order) BOOST_NOEXCEPT\n{\n    detail::signal_fence(order);\n}\n#else\nBOOST_FORCEINLINE void atomic_signal_fence(memory_order) BOOST_NOEXCEPT\n{\n    detail::lockpool::signal_fence();\n}\n#endif\n\n} // namespace atomics\n\nusing atomics::atomic_thread_fence;\nusing atomics::atomic_signal_fence;\n\n} // namespace boost\n\n#endif // BOOST_ATOMIC_FENCES_HPP_INCLUDED_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/atomic.hpp",
    "content": "#ifndef BOOST_ATOMIC_HPP\n#define BOOST_ATOMIC_HPP\n\n//  Copyright (c) 2011 Helge Bahmann\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n// This header includes all Boost.Atomic public headers\n\n#include <boost/atomic/atomic.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi/borland_prefix.hpp",
    "content": "//  (C) Copyright John Maddock 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  for C++ Builder the following options effect the ABI:\n//\n//  -b (on or off - effect emum sizes)\n//  -Vx  (on or off - empty members)\n//  -Ve (on or off - empty base classes)\n//  -aX (alignment - 5 options).\n//  -pX (Calling convention - 4 options)\n//  -VmX (member pointer size and layout - 5 options)\n//  -VC (on or off, changes name mangling)\n//  -Vl (on or off, changes struct layout).\n\n//  In addition the following warnings are sufficiently annoying (and\n//  unfixable) to have them turned off by default:\n//\n//  8027 - functions containing [for|while] loops are not expanded inline\n//  8026 - functions taking class by value arguments are not expanded inline\n\n#pragma nopushoptwarn\n#  pragma option push -a8 -Vx- -Ve- -b- -pc -Vmv -VC- -Vl- -w-8027 -w-8026\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi/borland_suffix.hpp",
    "content": "//  (C) Copyright John Maddock 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#  pragma option pop\n#pragma nopushoptwarn\n\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi/msvc_prefix.hpp",
    "content": "//  (C) Copyright John Maddock 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//\n// Boost binaries are built with the compiler's default ABI settings,\n// if the user changes their default alignment in the VS IDE then their\n// code will no longer be binary compatible with the bjam built binaries\n// unless this header is included to force Boost code into a consistent ABI.\n//\n// Note that inclusion of this header is only necessary for libraries with \n// separate source, header only libraries DO NOT need this as long as all\n// translation units are built with the same options.\n//\n#if defined(_M_X64)\n#  pragma pack(push,16)\n#else\n#  pragma pack(push,8)\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi/msvc_suffix.hpp",
    "content": "//  (C) Copyright John Maddock 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#pragma pack(pop)\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi_prefix.hpp",
    "content": "//  abi_prefix header  -------------------------------------------------------//\n\n// (c) Copyright John Maddock 2003\n   \n// Use, modification and distribution are subject to the Boost Software License,\n// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt).\n\n#ifndef BOOST_CONFIG_ABI_PREFIX_HPP\n# define BOOST_CONFIG_ABI_PREFIX_HPP\n#else\n# error double inclusion of header boost/config/abi_prefix.hpp is an error\n#endif\n\n#include <boost/config.hpp>\n\n// this must occur after all other includes and before any code appears:\n#ifdef BOOST_HAS_ABI_HEADERS\n#  include BOOST_ABI_PREFIX\n#endif\n\n#if defined( __BORLANDC__ )\n#pragma nopushoptwarn\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/abi_suffix.hpp",
    "content": "//  abi_sufffix header  -------------------------------------------------------//\n\n// (c) Copyright John Maddock 2003\n   \n// Use, modification and distribution are subject to the Boost Software License,\n// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt).\n\n// This header should be #included AFTER code that was preceded by a #include\n// <boost/config/abi_prefix.hpp>.\n\n#ifndef BOOST_CONFIG_ABI_PREFIX_HPP\n# error Header boost/config/abi_suffix.hpp must only be used after boost/config/abi_prefix.hpp\n#else\n# undef BOOST_CONFIG_ABI_PREFIX_HPP\n#endif\n\n// the suffix header occurs after all of our code:\n#ifdef BOOST_HAS_ABI_HEADERS\n#  include BOOST_ABI_SUFFIX\n#endif\n\n#if defined( __BORLANDC__ )\n#pragma nopushoptwarn\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/auto_link.hpp",
    "content": "//  (C) Copyright John Maddock 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n /*\n  *   LOCATION:    see http://www.boost.org for most recent version.\n  *   FILE         auto_link.hpp\n  *   VERSION      see <boost/version.hpp>\n  *   DESCRIPTION: Automatic library inclusion for Borland/Microsoft compilers.\n  */\n\n/*************************************************************************\n\nUSAGE:\n~~~~~~\n\nBefore including this header you must define one or more of define the following macros:\n\nBOOST_LIB_NAME:           Required: A string containing the basename of the library,\n                          for example boost_regex.\nBOOST_LIB_TOOLSET:        Optional: the base name of the toolset.\nBOOST_DYN_LINK:           Optional: when set link to dll rather than static library.\nBOOST_LIB_DIAGNOSTIC:     Optional: when set the header will print out the name\n                          of the library selected (useful for debugging).\nBOOST_AUTO_LINK_NOMANGLE: Specifies that we should link to BOOST_LIB_NAME.lib,\n                          rather than a mangled-name version.\nBOOST_AUTO_LINK_TAGGED:   Specifies that we link to libraries built with the --layout=tagged option.\n                          This is essentially the same as the default name-mangled version, but without\n                          the compiler name and version, or the Boost version.  Just the build options.\n\nThese macros will be undef'ed at the end of the header, further this header\nhas no include guards - so be sure to include it only once from your library!\n\nAlgorithm:\n~~~~~~~~~~\n\nLibraries for Borland and Microsoft compilers are automatically\nselected here, the name of the lib is selected according to the following\nformula:\n\nBOOST_LIB_PREFIX\n   + BOOST_LIB_NAME\n   + \"_\"\n   + BOOST_LIB_TOOLSET\n   + BOOST_LIB_THREAD_OPT\n   + BOOST_LIB_RT_OPT\n   \"-\"\n   + BOOST_LIB_VERSION\n\nThese are defined as:\n\nBOOST_LIB_PREFIX:     \"lib\" for static libraries otherwise \"\".\n\nBOOST_LIB_NAME:       The base name of the lib ( for example boost_regex).\n\nBOOST_LIB_TOOLSET:    The compiler toolset name (vc6, vc7, bcb5 etc).\n\nBOOST_LIB_THREAD_OPT: \"-mt\" for multithread builds, otherwise nothing.\n\nBOOST_LIB_RT_OPT:     A suffix that indicates the runtime library used,\n                      contains one or more of the following letters after\n                      a hyphen:\n\n                      s      static runtime (dynamic if not present).\n                      g      debug/diagnostic runtime (release if not present).\n                      y      Python debug/diagnostic runtime (release if not present).\n                      d      debug build (release if not present).\n                      p      STLport build.\n                      n      STLport build without its IOStreams.\n\nBOOST_LIB_VERSION:    The Boost version, in the form x_y, for Boost version x.y.\n\n\n***************************************************************************/\n\n#ifdef __cplusplus\n#  ifndef BOOST_CONFIG_HPP\n#     include <boost/config.hpp>\n#  endif\n#elif defined(_MSC_VER) && !defined(__MWERKS__) && !defined(__EDG_VERSION__)\n//\n// C language compatability (no, honestly)\n//\n#  define BOOST_MSVC _MSC_VER\n#  define BOOST_STRINGIZE(X) BOOST_DO_STRINGIZE(X)\n#  define BOOST_DO_STRINGIZE(X) #X\n#endif\n//\n// Only include what follows for known and supported compilers:\n//\n#if defined(BOOST_MSVC) \\\n    || defined(__BORLANDC__) \\\n    || (defined(__MWERKS__) && defined(_WIN32) && (__MWERKS__ >= 0x3000)) \\\n    || (defined(__ICL) && defined(_MSC_EXTENSIONS) && (_MSC_VER >= 1200))\n\n#ifndef BOOST_VERSION_HPP\n#  include <boost/version.hpp>\n#endif\n\n#ifndef BOOST_LIB_NAME\n#  error \"Macro BOOST_LIB_NAME not set (internal error)\"\n#endif\n\n//\n// error check:\n//\n#if defined(__MSVC_RUNTIME_CHECKS) && !defined(_DEBUG)\n#  pragma message(\"Using the /RTC option without specifying a debug runtime will lead to linker errors\")\n#  pragma message(\"Hint: go to the code generation options and switch to one of the debugging runtimes\")\n#  error \"Incompatible build options\"\n#endif\n//\n// select toolset if not defined already:\n//\n#ifndef BOOST_LIB_TOOLSET\n#  if defined(BOOST_MSVC) && (BOOST_MSVC < 1200)\n    // Note: no compilers before 1200 are supported\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1300)\n\n#    ifdef UNDER_CE\n       // eVC4:\n#      define BOOST_LIB_TOOLSET \"evc4\"\n#    else\n       // vc6:\n#      define BOOST_LIB_TOOLSET \"vc6\"\n#    endif\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1310)\n\n     // vc7:\n#    define BOOST_LIB_TOOLSET \"vc7\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1400)\n\n     // vc71:\n#    define BOOST_LIB_TOOLSET \"vc71\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1500)\n\n     // vc80:\n#    define BOOST_LIB_TOOLSET \"vc80\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1600)\n\n     // vc90:\n#    define BOOST_LIB_TOOLSET \"vc90\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1700)\n\n     // vc10:\n#    define BOOST_LIB_TOOLSET \"vc100\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1800)\n\n     // vc11:\n#    define BOOST_LIB_TOOLSET \"vc110\"\n\n#  elif defined(BOOST_MSVC) && (BOOST_MSVC < 1900)\n\n     // vc12:\n#    define BOOST_LIB_TOOLSET \"vc120\"\n\n# elif defined(BOOST_MSVC)\n\n   // vc14:\n#  define BOOST_LIB_TOOLSET \"vc140\"\n\n#  elif defined(__BORLANDC__)\n\n     // CBuilder 6:\n#    define BOOST_LIB_TOOLSET \"bcb\"\n\n#  elif defined(__ICL)\n\n     // Intel C++, no version number:\n#    define BOOST_LIB_TOOLSET \"iw\"\n\n#  elif defined(__MWERKS__) && (__MWERKS__ <= 0x31FF )\n\n     // Metrowerks CodeWarrior 8.x\n#    define BOOST_LIB_TOOLSET \"cw8\"\n\n#  elif defined(__MWERKS__) && (__MWERKS__ <= 0x32FF )\n\n     // Metrowerks CodeWarrior 9.x\n#    define BOOST_LIB_TOOLSET \"cw9\"\n\n#  endif\n#endif // BOOST_LIB_TOOLSET\n\n//\n// select thread opt:\n//\n#if defined(_MT) || defined(__MT__)\n#  define BOOST_LIB_THREAD_OPT \"-mt\"\n#else\n#  define BOOST_LIB_THREAD_OPT\n#endif\n\n#if defined(_MSC_VER) || defined(__MWERKS__)\n\n#  ifdef _DLL\n\n#     if (defined(__SGI_STL_PORT) || defined(_STLPORT_VERSION)) && (defined(_STLP_OWN_IOSTREAMS) || defined(__STL_OWN_IOSTREAMS))\n\n#        if defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-gydp\"\n#        elif defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\n#            define BOOST_LIB_RT_OPT \"-gdp\"\n#        elif defined(_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-gydp\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        elif defined(_DEBUG)\n#            define BOOST_LIB_RT_OPT \"-gdp\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        else\n#            define BOOST_LIB_RT_OPT \"-p\"\n#        endif\n\n#     elif defined(__SGI_STL_PORT) || defined(_STLPORT_VERSION)\n\n#        if defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-gydpn\"\n#        elif defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\n#            define BOOST_LIB_RT_OPT \"-gdpn\"\n#        elif defined(_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-gydpn\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        elif defined(_DEBUG)\n#            define BOOST_LIB_RT_OPT \"-gdpn\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        else\n#            define BOOST_LIB_RT_OPT \"-pn\"\n#        endif\n\n#     else\n\n#        if defined(_DEBUG) && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-gyd\"\n#        elif defined(_DEBUG)\n#            define BOOST_LIB_RT_OPT \"-gd\"\n#        else\n#            define BOOST_LIB_RT_OPT\n#        endif\n\n#     endif\n\n#  else\n\n#     if (defined(__SGI_STL_PORT) || defined(_STLPORT_VERSION)) && (defined(_STLP_OWN_IOSTREAMS) || defined(__STL_OWN_IOSTREAMS))\n\n#        if defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-sgydp\"\n#        elif defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\n#            define BOOST_LIB_RT_OPT \"-sgdp\"\n#        elif defined(_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#             define BOOST_LIB_RT_OPT \"-sgydp\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        elif defined(_DEBUG)\n#             define BOOST_LIB_RT_OPT \"-sgdp\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        else\n#            define BOOST_LIB_RT_OPT \"-sp\"\n#        endif\n\n#     elif defined(__SGI_STL_PORT) || defined(_STLPORT_VERSION)\n\n#        if defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#            define BOOST_LIB_RT_OPT \"-sgydpn\"\n#        elif defined(_DEBUG) && (defined(__STL_DEBUG) || defined(_STLP_DEBUG))\n#            define BOOST_LIB_RT_OPT \"-sgdpn\"\n#        elif defined(_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#             define BOOST_LIB_RT_OPT \"-sgydpn\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        elif defined(_DEBUG)\n#             define BOOST_LIB_RT_OPT \"-sgdpn\"\n#            pragma message(\"warning: STLport debug versions are built with /D_STLP_DEBUG=1\")\n#            error \"Build options aren't compatible with pre-built libraries\"\n#        else\n#            define BOOST_LIB_RT_OPT \"-spn\"\n#        endif\n\n#     else\n\n#        if defined(_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#             define BOOST_LIB_RT_OPT \"-sgyd\"\n#        elif defined(_DEBUG)\n#             define BOOST_LIB_RT_OPT \"-sgd\"\n#        else\n#            define BOOST_LIB_RT_OPT \"-s\"\n#        endif\n\n#     endif\n\n#  endif\n\n#elif defined(__BORLANDC__)\n\n//\n// figure out whether we want the debug builds or not:\n//\n#if __BORLANDC__ > 0x561\n#pragma defineonoption BOOST_BORLAND_DEBUG -v\n#endif\n//\n// sanity check:\n//\n#if defined(__STL_DEBUG) || defined(_STLP_DEBUG)\n#error \"Pre-built versions of the Boost libraries are not provided in STLport-debug form\"\n#endif\n\n#  ifdef _RTLDLL\n\n#     if defined(BOOST_BORLAND_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#         define BOOST_LIB_RT_OPT \"-yd\"\n#     elif defined(BOOST_BORLAND_DEBUG)\n#         define BOOST_LIB_RT_OPT \"-d\"\n#     elif defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#         define BOOST_LIB_RT_OPT -y\n#     else\n#         define BOOST_LIB_RT_OPT\n#     endif\n\n#  else\n\n#     if defined(BOOST_BORLAND_DEBUG)\\\n               && defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#         define BOOST_LIB_RT_OPT \"-syd\"\n#     elif defined(BOOST_BORLAND_DEBUG)\n#         define BOOST_LIB_RT_OPT \"-sd\"\n#     elif defined(BOOST_DEBUG_PYTHON) && defined(BOOST_LINKING_PYTHON)\n#         define BOOST_LIB_RT_OPT \"-sy\"\n#     else\n#         define BOOST_LIB_RT_OPT \"-s\"\n#     endif\n\n#  endif\n\n#endif\n\n//\n// select linkage opt:\n//\n#if (defined(_DLL) || defined(_RTLDLL)) && defined(BOOST_DYN_LINK)\n#  define BOOST_LIB_PREFIX\n#elif defined(BOOST_DYN_LINK)\n#  error \"Mixing a dll boost library with a static runtime is a really bad idea...\"\n#else\n#  define BOOST_LIB_PREFIX \"lib\"\n#endif\n\n//\n// now include the lib:\n//\n#if defined(BOOST_LIB_NAME) \\\n      && defined(BOOST_LIB_PREFIX) \\\n      && defined(BOOST_LIB_TOOLSET) \\\n      && defined(BOOST_LIB_THREAD_OPT) \\\n      && defined(BOOST_LIB_RT_OPT) \\\n      && defined(BOOST_LIB_VERSION)\n\n#ifdef BOOST_AUTO_LINK_TAGGED\n#  pragma comment(lib, BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \".lib\")\n#  ifdef BOOST_LIB_DIAGNOSTIC\n#     pragma message (\"Linking to lib file: \" BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \".lib\")\n#  endif\n#elif defined(BOOST_AUTO_LINK_NOMANGLE)\n#  pragma comment(lib, BOOST_STRINGIZE(BOOST_LIB_NAME) \".lib\")\n#  ifdef BOOST_LIB_DIAGNOSTIC\n#     pragma message (\"Linking to lib file: \" BOOST_STRINGIZE(BOOST_LIB_NAME) \".lib\")\n#  endif\n#elif defined(BOOST_LIB_BUILDID)\n#  pragma comment(lib, BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) \"-\" BOOST_LIB_TOOLSET BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \"-\" BOOST_LIB_VERSION \"-\" BOOST_STRINGIZE(BOOST_LIB_BUILDID) \".lib\")\n#  ifdef BOOST_LIB_DIAGNOSTIC\n#     pragma message (\"Linking to lib file: \" BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) \"-\" BOOST_LIB_TOOLSET BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \"-\" BOOST_LIB_VERSION \"-\" BOOST_STRINGIZE(BOOST_LIB_BUILDID) \".lib\")\n#  endif\n#else\n#  pragma comment(lib, BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) \"-\" BOOST_LIB_TOOLSET BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \"-\" BOOST_LIB_VERSION \".lib\")\n#  ifdef BOOST_LIB_DIAGNOSTIC\n#     pragma message (\"Linking to lib file: \" BOOST_LIB_PREFIX BOOST_STRINGIZE(BOOST_LIB_NAME) \"-\" BOOST_LIB_TOOLSET BOOST_LIB_THREAD_OPT BOOST_LIB_RT_OPT \"-\" BOOST_LIB_VERSION \".lib\")\n#  endif\n#endif\n\n#else\n#  error \"some required macros where not defined (internal logic error).\"\n#endif\n\n\n#endif // _MSC_VER || __BORLANDC__\n\n//\n// finally undef any macros we may have set:\n//\n#ifdef BOOST_LIB_PREFIX\n#  undef BOOST_LIB_PREFIX\n#endif\n#if defined(BOOST_LIB_NAME)\n#  undef BOOST_LIB_NAME\n#endif\n// Don't undef this one: it can be set by the user and should be the \n// same for all libraries:\n//#if defined(BOOST_LIB_TOOLSET)\n//#  undef BOOST_LIB_TOOLSET\n//#endif\n#if defined(BOOST_LIB_THREAD_OPT)\n#  undef BOOST_LIB_THREAD_OPT\n#endif\n#if defined(BOOST_LIB_RT_OPT)\n#  undef BOOST_LIB_RT_OPT\n#endif\n#if defined(BOOST_LIB_LINK_OPT)\n#  undef BOOST_LIB_LINK_OPT\n#endif\n#if defined(BOOST_LIB_DEBUG_OPT)\n#  undef BOOST_LIB_DEBUG_OPT\n#endif\n#if defined(BOOST_DYN_LINK)\n#  undef BOOST_DYN_LINK\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/borland.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Borland C++ compiler setup:\n\n//\n// versions check:\n// we don't support Borland prior to version 5.4:\n#if __BORLANDC__ < 0x540\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n\n// last known compiler version:\n#if (__BORLANDC__ > 0x613)\n//#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n//#  else\n//#     pragma message( \"Unknown compiler version - please run the configure tests and report the results\")\n//#  endif\n#elif (__BORLANDC__ == 0x600)\n#  error \"CBuilderX preview compiler is no longer supported\"\n#endif\n\n//\n// Support macros to help with standard library detection\n#if (__BORLANDC__ < 0x560) || defined(_USE_OLD_RW_STL)\n#  define BOOST_BCB_WITH_ROGUE_WAVE\n#elif __BORLANDC__ < 0x570\n#  define BOOST_BCB_WITH_STLPORT\n#else\n#  define BOOST_BCB_WITH_DINKUMWARE\n#endif\n\n//\n// Version 5.0 and below:\n#   if __BORLANDC__ <= 0x0550\n// Borland C++Builder 4 and 5:\n#     define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#     if __BORLANDC__ == 0x0550\n// Borland C++Builder 5, command-line compiler 5.5:\n#       define BOOST_NO_OPERATORS_IN_NAMESPACE\n#     endif\n// Variadic macros do not exist for C++ Builder versions 5 and below\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#   endif\n\n// Version 5.51 and below:\n#if (__BORLANDC__ <= 0x551)\n#  define BOOST_NO_CV_SPECIALIZATIONS\n#  define BOOST_NO_CV_VOID_SPECIALIZATIONS\n#  define BOOST_NO_DEDUCED_TYPENAME\n// workaround for missing WCHAR_MAX/WCHAR_MIN:\n#ifdef __cplusplus\n#include <climits>\n#include <cwchar>\n#else\n#include <limits.h>\n#include <wchar.h>\n#endif // __cplusplus\n#ifndef WCHAR_MAX\n#  define WCHAR_MAX 0xffff\n#endif\n#ifndef WCHAR_MIN\n#  define WCHAR_MIN 0\n#endif\n#endif\n\n// Borland C++ Builder 6 and below:\n#if (__BORLANDC__ <= 0x564)\n\n#  if defined(NDEBUG) && defined(__cplusplus)\n      // fix broken <cstring> so that Boost.test works:\n#     include <cstring>\n#     undef strcmp\n#  endif\n   // fix broken errno declaration:\n#  include <errno.h>\n#  ifndef errno\n#     define errno errno\n#  endif\n\n#endif\n\n//\n// new bug in 5.61:\n#if (__BORLANDC__ >= 0x561) && (__BORLANDC__ <= 0x580)\n   // this seems to be needed by the command line compiler, but not the IDE:\n#  define BOOST_NO_MEMBER_FUNCTION_SPECIALIZATIONS\n#endif\n\n// Borland C++ Builder 2006 Update 2 and below:\n#if (__BORLANDC__ <= 0x582)\n#  define BOOST_NO_SFINAE\n#  define BOOST_BCB_PARTIAL_SPECIALIZATION_BUG\n#  define BOOST_NO_TEMPLATE_TEMPLATES\n\n#  define BOOST_NO_PRIVATE_IN_AGGREGATE\n\n#  ifdef _WIN32\n#     define BOOST_NO_SWPRINTF\n#  elif defined(linux) || defined(__linux__) || defined(__linux)\n      // we should really be able to do without this\n      // but the wcs* functions aren't imported into std::\n#     define BOOST_NO_STDC_NAMESPACE\n      // _CPPUNWIND doesn't get automatically set for some reason:\n#     pragma defineonoption BOOST_CPPUNWIND -x\n#  endif\n#endif\n\n#if (__BORLANDC__ <= 0x613)  // Beman has asked Alisdair for more info\n   // we shouldn't really need this - but too many things choke\n   // without it, this needs more investigation:\n#  define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#  define BOOST_NO_IS_ABSTRACT\n#  define BOOST_NO_FUNCTION_TYPE_SPECIALIZATIONS\n#  define BOOST_NO_USING_TEMPLATE\n#  define BOOST_SP_NO_SP_CONVERTIBLE\n\n// Temporary workaround\n#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif\n\n// Borland C++ Builder 2008 and below:\n#  define BOOST_NO_INTEGRAL_INT64_T\n#  define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#  define BOOST_NO_DEPENDENT_NESTED_DERIVATIONS\n#  define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#  define BOOST_NO_USING_DECLARATION_OVERLOADS_FROM_TYPENAME_BASE\n#  define BOOST_NO_NESTED_FRIENDSHIP\n#  define BOOST_NO_TYPENAME_WITH_CTOR\n#if (__BORLANDC__ < 0x600)\n#  define BOOST_ILLEGAL_CV_REFERENCES\n#endif\n\n//\n//  Positive Feature detection\n//\n// Borland C++ Builder 2008 and below:\n#if (__BORLANDC__ >= 0x599)\n#  pragma defineonoption BOOST_CODEGEAR_0X_SUPPORT -Ax\n#endif\n//\n// C++0x Macros:\n//\n#if !defined( BOOST_CODEGEAR_0X_SUPPORT ) || (__BORLANDC__ < 0x610)\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#  define BOOST_NO_CXX11_DECLTYPE\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#else\n#  define BOOST_HAS_ALIGNOF\n#  define BOOST_HAS_CHAR16_T\n#  define BOOST_HAS_CHAR32_T\n#  define BOOST_HAS_DECLTYPE\n#  define BOOST_HAS_EXPLICIT_CONVERSION_OPS\n#  define BOOST_HAS_REF_QUALIFIER\n#  define BOOST_HAS_RVALUE_REFS\n#  define BOOST_HAS_STATIC_ASSERT\n#endif\n\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS    // UTF-8 still not supported\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#if __BORLANDC__ >= 0x590\n#  define BOOST_HAS_TR1_HASH\n\n#  define BOOST_HAS_MACRO_USE_FACET\n#endif\n\n//\n// Post 0x561 we have long long and stdint.h:\n#if __BORLANDC__ >= 0x561\n#  ifndef __NO_LONG_LONG\n#     define BOOST_HAS_LONG_LONG\n#  else\n#     define BOOST_NO_LONG_LONG\n#  endif\n   // On non-Win32 platforms let the platform config figure this out:\n#  ifdef _WIN32\n#      define BOOST_HAS_STDINT_H\n#  endif\n#endif\n\n// Borland C++Builder 6 defaults to using STLPort.  If _USE_OLD_RW_STL is\n// defined, then we have 0x560 or greater with the Rogue Wave implementation\n// which presumably has the std::DBL_MAX bug.\n#if defined( BOOST_BCB_WITH_ROGUE_WAVE )\n// <climits> is partly broken, some macros define symbols that are really in\n// namespace std, so you end up having to use illegal constructs like\n// std::DBL_MAX, as a fix we'll just include float.h and have done with:\n#include <float.h>\n#endif\n//\n// __int64:\n//\n#if (__BORLANDC__ >= 0x530) && !defined(__STRICT_ANSI__)\n#  define BOOST_HAS_MS_INT64\n#endif\n//\n// check for exception handling support:\n//\n#if !defined(_CPPUNWIND) && !defined(BOOST_CPPUNWIND) && !defined(__EXCEPTIONS) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n//\n// all versions have a <dirent.h>:\n//\n#ifndef __STRICT_ANSI__\n#  define BOOST_HAS_DIRENT_H\n#endif\n//\n// all versions support __declspec:\n//\n#if defined(__STRICT_ANSI__)\n// config/platform/win32.hpp will define BOOST_SYMBOL_EXPORT, etc., unless already defined\n#  define BOOST_SYMBOL_EXPORT\n#endif\n//\n// ABI fixing headers:\n//\n#if __BORLANDC__ != 0x600 // not implemented for version 6 compiler yet\n#ifndef BOOST_ABI_PREFIX\n#  define BOOST_ABI_PREFIX \"boost/config/abi/borland_prefix.hpp\"\n#endif\n#ifndef BOOST_ABI_SUFFIX\n#  define BOOST_ABI_SUFFIX \"boost/config/abi/borland_suffix.hpp\"\n#endif\n#endif\n//\n// Disable Win32 support in ANSI mode:\n//\n#if __BORLANDC__ < 0x600\n#  pragma defineonoption BOOST_DISABLE_WIN32 -A\n#elif defined(__STRICT_ANSI__)\n#  define BOOST_DISABLE_WIN32\n#endif\n//\n// MSVC compatibility mode does some nasty things:\n// TODO: look up if this doesn't apply to the whole 12xx range\n//\n#if defined(_MSC_VER) && (_MSC_VER <= 1200)\n#  define BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP\n#  define BOOST_NO_VOID_RETURNS\n#endif\n\n// Borland did not implement value-initialization completely, as I reported\n// in 2007, Borland Report 51854, \"Value-initialization: POD struct should be\n// zero-initialized\", http://qc.embarcadero.com/wc/qcmain.aspx?d=51854\n// See also: http://www.boost.org/libs/utility/value_init.htm#compiler_issues\n// (Niels Dekker, LKEB, April 2010)\n#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n\n#define BOOST_COMPILER \"Borland C++ version \" BOOST_STRINGIZE(__BORLANDC__)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/clang.hpp",
    "content": "// (C) Copyright Douglas Gregor 2010\n//\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n// Clang compiler setup.\n\n#define BOOST_HAS_PRAGMA_ONCE\n\n// Detecting `-fms-extension` compiler flag assuming that _MSC_VER defined when that flag is used.\n#if defined (_MSC_VER) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 4))\n#   define BOOST_HAS_PRAGMA_DETECT_MISMATCH\n#endif\n\n// When compiling with clang before __has_extension was defined,\n// even if one writes 'defined(__has_extension) && __has_extension(xxx)',\n// clang reports a compiler error. So the only workaround found is:\n\n#ifndef __has_extension\n#define __has_extension __has_feature\n#endif\n\n#ifndef __has_attribute\n#define __has_attribute(x) 0\n#endif\n\n#if !__has_feature(cxx_exceptions) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n\n#if !__has_feature(cxx_rtti) && !defined(BOOST_NO_RTTI)\n#  define BOOST_NO_RTTI\n#endif\n\n#if !__has_feature(cxx_rtti) && !defined(BOOST_NO_TYPEID)\n#  define BOOST_NO_TYPEID\n#endif\n\n#if defined(__int64) && !defined(__GNUC__)\n#  define BOOST_HAS_MS_INT64\n#endif\n\n#define BOOST_HAS_NRVO\n\n// Branch prediction hints\n#if defined(__has_builtin)\n#if __has_builtin(__builtin_expect)\n#define BOOST_LIKELY(x) __builtin_expect(x, 1)\n#define BOOST_UNLIKELY(x) __builtin_expect(x, 0)\n#endif\n#endif\n\n// Clang supports \"long long\" in all compilation modes.\n#define BOOST_HAS_LONG_LONG\n\n//\n// We disable this if the compiler is really nvcc as it\n// doesn't actually support __int128 as of CUDA_VERSION=5000\n// even though it defines __SIZEOF_INT128__.\n// See https://svn.boost.org/trac/boost/ticket/10418\n// Only re-enable this for nvcc if you're absolutely sure\n// of the circumstances under which it's supported.\n// Similarly __SIZEOF_INT128__ is defined when targetting msvc\n// compatibility even though the required support functions are absent.\n//\n#if defined(__SIZEOF_INT128__) && !defined(__CUDACC__) && !defined(_MSC_VER)\n#  define BOOST_HAS_INT128\n#endif\n\n\n//\n// Dynamic shared object (DSO) and dynamic-link library (DLL) support\n//\n#if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n#  define BOOST_SYMBOL_EXPORT __attribute__((__visibility__(\"default\")))\n#  define BOOST_SYMBOL_IMPORT\n#  define BOOST_SYMBOL_VISIBLE __attribute__((__visibility__(\"default\")))\n#endif\n\n//\n// The BOOST_FALLTHROUGH macro can be used to annotate implicit fall-through\n// between switch labels.\n//\n#if __cplusplus >= 201103L && defined(__has_warning)\n#  if __has_feature(cxx_attributes) && __has_warning(\"-Wimplicit-fallthrough\")\n#    define BOOST_FALLTHROUGH [[clang::fallthrough]]\n#  endif\n#endif\n\n#if !__has_feature(cxx_auto_type)\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#endif\n\n//\n// Currently clang on Windows using VC++ RTL does not support C++11's char16_t or char32_t\n//\n#if defined(_MSC_VER) || !(defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L)\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#endif\n\n#if !__has_feature(cxx_constexpr)\n#  define BOOST_NO_CXX11_CONSTEXPR\n#endif\n\n#if !__has_feature(cxx_decltype)\n#  define BOOST_NO_CXX11_DECLTYPE\n#endif\n\n#if !__has_feature(cxx_decltype_incomplete_return_types)\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#endif\n\n#if !__has_feature(cxx_defaulted_functions)\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#endif\n\n#if !__has_feature(cxx_deleted_functions)\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#endif\n\n#if !__has_feature(cxx_explicit_conversions)\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#endif\n\n#if !__has_feature(cxx_default_function_template_args)\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#endif\n\n#if !__has_feature(cxx_generalized_initializers)\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#endif\n\n#if !__has_feature(cxx_lambdas)\n#  define BOOST_NO_CXX11_LAMBDAS\n#endif\n\n#if !__has_feature(cxx_local_type_template_args)\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#endif\n\n#if !__has_feature(cxx_noexcept)\n#  define BOOST_NO_CXX11_NOEXCEPT\n#endif\n\n#if !__has_feature(cxx_nullptr)\n#  define BOOST_NO_CXX11_NULLPTR\n#endif\n\n#if !__has_feature(cxx_range_for)\n#  define BOOST_NO_CXX11_RANGE_BASED_FOR\n#endif\n\n#if !__has_feature(cxx_raw_string_literals)\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#endif\n\n#if !__has_feature(cxx_reference_qualified_functions)\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#endif\n\n#if !__has_feature(cxx_generalized_initializers)\n#  define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#endif\n\n#if !__has_feature(cxx_rvalue_references)\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#endif\n\n#if !__has_feature(cxx_strong_enums)\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#endif\n\n#if !__has_feature(cxx_static_assert)\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#endif\n\n#if !__has_feature(cxx_alias_templates)\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#endif\n\n#if !__has_feature(cxx_unicode_literals)\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#endif\n\n#if !__has_feature(cxx_variadic_templates)\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#endif\n\n#if !__has_feature(cxx_user_literals)\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#endif\n\n#if !__has_feature(cxx_alignas)\n#  define BOOST_NO_CXX11_ALIGNAS\n#endif\n\n#if !__has_feature(cxx_trailing_return)\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#endif\n\n#if !__has_feature(cxx_inline_namespaces)\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#endif\n\n#if !__has_feature(cxx_override_control)\n#  define BOOST_NO_CXX11_FINAL\n#endif\n\n#if !(__has_feature(__cxx_binary_literals__) || __has_extension(__cxx_binary_literals__))\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n\n#if !__has_feature(__cxx_decltype_auto__)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n\n#if !__has_feature(__cxx_aggregate_nsdmi__)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n\n#if !__has_feature(__cxx_init_captures__)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n\n#if !__has_feature(__cxx_generic_lambdas__)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n\n// clang < 3.5 has a defect with dependent type, like following.\n//\n//  template <class T>\n//  constexpr typename enable_if<pred<T> >::type foo(T &)\n//  { } // error: no return statement in constexpr function\n//\n// This issue also affects C++11 mode, but C++11 constexpr requires return stmt.\n// Therefore we don't care such case.\n//\n// Note that we can't check Clang version directly as the numbering system changes depending who's\n// creating the Clang release (see https://github.com/boostorg/config/pull/39#issuecomment-59927873)\n// so instead verify that we have a feature that was introduced at the same time as working C++14\n// constexpr (generic lambda's in this case):\n//\n#if !__has_feature(__cxx_generic_lambdas__) || !__has_feature(__cxx_relaxed_constexpr__)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n\n#if !__has_feature(__cxx_return_type_deduction__)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n\n#if !__has_feature(__cxx_variable_templates__)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#if __cplusplus < 201400\n// All versions with __cplusplus above this value seem to support this:\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n//\n// __builtin_unreachable:\n#if defined(__has_builtin) && __has_builtin(__builtin_unreachable)\n#define BOOST_UNREACHABLE_RETURN(x) __builtin_unreachable();\n#endif\n\n// Clang has supported the 'unused' attribute since the first release.\n#define BOOST_ATTRIBUTE_UNUSED __attribute__((__unused__))\n\n#ifndef BOOST_COMPILER\n#  define BOOST_COMPILER \"Clang version \" __clang_version__\n#endif\n\n// Macro used to identify the Clang compiler.\n#define BOOST_CLANG 1\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/codegear.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  CodeGear C++ compiler setup:\n\n#if !defined( BOOST_WITH_CODEGEAR_WARNINGS )\n// these warnings occur frequently in optimized template code\n# pragma warn -8004 // var assigned value, but never used\n# pragma warn -8008 // condition always true/false\n# pragma warn -8066 // dead code can never execute\n# pragma warn -8104 // static members with ctors not threadsafe\n# pragma warn -8105 // reference member in class without ctors\n#endif\n//\n// versions check:\n// last known and checked version is 0x621\n#if (__CODEGEARC__ > 0x621)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  else\n#     pragma message( \"Unknown compiler version - please run the configure tests and report the results\")\n#  endif\n#endif\n\n// CodeGear C++ Builder 2009\n#if (__CODEGEARC__ <= 0x613)\n#  define BOOST_NO_INTEGRAL_INT64_T\n#  define BOOST_NO_DEPENDENT_NESTED_DERIVATIONS\n#  define BOOST_NO_PRIVATE_IN_AGGREGATE\n#  define BOOST_NO_USING_DECLARATION_OVERLOADS_FROM_TYPENAME_BASE\n   // we shouldn't really need this - but too many things choke\n   // without it, this needs more investigation:\n#  define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#  define BOOST_SP_NO_SP_CONVERTIBLE\n#endif\n\n// CodeGear C++ Builder 2010\n#if (__CODEGEARC__ <= 0x621)\n#  define BOOST_NO_TYPENAME_WITH_CTOR    // Cannot use typename keyword when making temporaries of a dependant type\n#  define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#  define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#  define BOOST_NO_NESTED_FRIENDSHIP     // TC1 gives nested classes access rights as any other member\n#  define BOOST_NO_USING_TEMPLATE\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n// Temporary hack, until specific MPL preprocessed headers are generated\n#  define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n// CodeGear has not yet completely implemented value-initialization, for\n// example for array types, as I reported in 2010: Embarcadero Report 83751,\n// \"Value-initialization: arrays should have each element value-initialized\",\n// http://qc.embarcadero.com/wc/qcmain.aspx?d=83751\n// Last checked version: Embarcadero C++ 6.21\n// See also: http://www.boost.org/libs/utility/value_init.htm#compiler_issues\n// (Niels Dekker, LKEB, April 2010)\n#  define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n\n#  if defined(NDEBUG) && defined(__cplusplus)\n      // fix broken <cstring> so that Boost.test works:\n#     include <cstring>\n#     undef strcmp\n#  endif\n   // fix broken errno declaration:\n#  include <errno.h>\n#  ifndef errno\n#     define errno errno\n#  endif\n\n#endif\n\n// Reportedly, #pragma once is supported since C++ Builder 2010\n#if (__CODEGEARC__ >= 0x620)\n#  define BOOST_HAS_PRAGMA_ONCE\n#endif\n\n//\n// C++0x macros:\n//\n#if (__CODEGEARC__ <= 0x620)\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#else\n#define BOOST_HAS_STATIC_ASSERT\n#endif\n#define BOOST_HAS_CHAR16_T\n#define BOOST_HAS_CHAR32_T\n#define BOOST_HAS_LONG_LONG\n// #define BOOST_HAS_ALIGNOF\n#define BOOST_HAS_DECLTYPE\n#define BOOST_HAS_EXPLICIT_CONVERSION_OPS\n// #define BOOST_HAS_RVALUE_REFS\n#define BOOST_HAS_SCOPED_ENUM\n// #define BOOST_HAS_STATIC_ASSERT\n#define BOOST_HAS_STD_TYPE_TRAITS\n\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n//\n// TR1 macros:\n//\n#define BOOST_HAS_TR1_HASH\n#define BOOST_HAS_TR1_TYPE_TRAITS\n#define BOOST_HAS_TR1_UNORDERED_MAP\n#define BOOST_HAS_TR1_UNORDERED_SET\n\n#define BOOST_HAS_MACRO_USE_FACET\n\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n\n// On non-Win32 platforms let the platform config figure this out:\n#ifdef _WIN32\n#  define BOOST_HAS_STDINT_H\n#endif\n\n//\n// __int64:\n//\n#if !defined(__STRICT_ANSI__)\n#  define BOOST_HAS_MS_INT64\n#endif\n//\n// check for exception handling support:\n//\n#if !defined(_CPPUNWIND) && !defined(BOOST_CPPUNWIND) && !defined(__EXCEPTIONS) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n//\n// all versions have a <dirent.h>:\n//\n#if !defined(__STRICT_ANSI__)\n#  define BOOST_HAS_DIRENT_H\n#endif\n//\n// all versions support __declspec:\n//\n#if defined(__STRICT_ANSI__)\n// config/platform/win32.hpp will define BOOST_SYMBOL_EXPORT, etc., unless already defined\n#  define BOOST_SYMBOL_EXPORT\n#endif\n//\n// ABI fixing headers:\n//\n#ifndef BOOST_ABI_PREFIX\n#  define BOOST_ABI_PREFIX \"boost/config/abi/borland_prefix.hpp\"\n#endif\n#ifndef BOOST_ABI_SUFFIX\n#  define BOOST_ABI_SUFFIX \"boost/config/abi/borland_suffix.hpp\"\n#endif\n//\n// Disable Win32 support in ANSI mode:\n//\n#  pragma defineonoption BOOST_DISABLE_WIN32 -A\n//\n// MSVC compatibility mode does some nasty things:\n// TODO: look up if this doesn't apply to the whole 12xx range\n//\n#if defined(_MSC_VER) && (_MSC_VER <= 1200)\n#  define BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP\n#  define BOOST_NO_VOID_RETURNS\n#endif\n\n#define BOOST_COMPILER \"CodeGear C++ version \" BOOST_STRINGIZE(__CODEGEARC__)\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/comeau.hpp",
    "content": "//  (C) Copyright John Maddock 2001. \n//  (C) Copyright Douglas Gregor 2001. \n//  (C) Copyright Peter Dimov 2001. \n//  (C) Copyright Aleksey Gurtovoy 2003. \n//  (C) Copyright Beman Dawes 2003. \n//  (C) Copyright Jens Maurer 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Comeau C++ compiler setup:\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n#if (__COMO_VERSION__ <= 4245)\n\n#  if defined(_MSC_VER) && _MSC_VER <= 1300\n#     if _MSC_VER > 100\n         // only set this in non-strict mode:\n#        define BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP\n#     endif\n#  endif\n\n// Void returns don't work when emulating VC 6 (Peter Dimov)\n// TODO: look up if this doesn't apply to the whole 12xx range\n#  if defined(_MSC_VER) && (_MSC_VER < 1300)\n#     define BOOST_NO_VOID_RETURNS\n#  endif\n\n#endif  // version 4245\n\n//\n// enable __int64 support in VC emulation mode\n//\n#  if defined(_MSC_VER) && (_MSC_VER >= 1200)\n#     define BOOST_HAS_MS_INT64\n#  endif\n\n#define BOOST_COMPILER \"Comeau compiler version \" BOOST_STRINGIZE(__COMO_VERSION__)\n\n//\n// versions check:\n// we don't know Comeau prior to version 4245:\n#if __COMO_VERSION__ < 4245\n#  error \"Compiler not configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 4245:\n#if (__COMO_VERSION__ > 4245)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/common_edg.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002.\n//  (C) Copyright Jens Maurer 2001.\n//  (C) Copyright David Abrahams 2002.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  (C) Copyright Markus Schoepflin 2005.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//\n// Options common to all edg based compilers.\n//\n// This is included from within the individual compiler mini-configs.\n\n#ifndef  __EDG_VERSION__\n#  error This file requires that __EDG_VERSION__ be defined.\n#endif\n\n#if (__EDG_VERSION__ <= 238)\n#   define BOOST_NO_INTEGRAL_INT64_T\n#   define BOOST_NO_SFINAE\n#endif\n\n#if (__EDG_VERSION__ <= 240)\n#   define BOOST_NO_VOID_RETURNS\n#endif\n\n#if (__EDG_VERSION__ <= 241) && !defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP)\n#   define BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP\n#endif\n\n#if (__EDG_VERSION__ <= 244) && !defined(BOOST_NO_TEMPLATE_TEMPLATES)\n#   define BOOST_NO_TEMPLATE_TEMPLATES\n#endif\n\n#if (__EDG_VERSION__ < 300) && !defined(BOOST_NO_IS_ABSTRACT)\n#   define BOOST_NO_IS_ABSTRACT\n#endif\n\n#if (__EDG_VERSION__ <= 303) && !defined(BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL)\n#   define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#endif\n\n// See also kai.hpp which checks a Kai-specific symbol for EH\n# if !defined(__KCC) && !defined(__EXCEPTIONS) && !defined(BOOST_NO_EXCEPTIONS)\n#     define BOOST_NO_EXCEPTIONS\n# endif\n\n# if !defined(__NO_LONG_LONG)\n#     define BOOST_HAS_LONG_LONG\n# else\n#     define BOOST_NO_LONG_LONG\n# endif\n\n// Not sure what version was the first to support #pragma once, but\n// different EDG-based compilers (e.g. Intel) supported it for ages.\n// Add a proper version check if it causes problems.\n#define BOOST_HAS_PRAGMA_ONCE\n\n//\n// C++0x features\n//\n//   See above for BOOST_NO_LONG_LONG\n//\n#if (__EDG_VERSION__ < 310)\n#  define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#endif\n#if (__EDG_VERSION__ <= 310)\n// No support for initializer lists\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#endif\n#if (__EDG_VERSION__ < 400)\n#  define BOOST_NO_CXX11_VARIADIC_MACROS\n#endif\n\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#ifdef c_plusplus\n// EDG has \"long long\" in non-strict mode\n// However, some libraries have insufficient \"long long\" support\n// #define BOOST_HAS_LONG_LONG\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/compaq_cxx.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Tru64 C++ compiler setup (now HP):\n\n#define BOOST_COMPILER \"HP Tru64 C++ \" BOOST_STRINGIZE(__DECCXX_VER)\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n//\n// versions check:\n// Nothing to do here?\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/cray.hpp",
    "content": "//  (C) Copyright John Maddock 2011.\n//  (C) Copyright Cray, Inc. 2013\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Greenhills C compiler setup:\n\n#define BOOST_COMPILER \"Cray C version \" BOOST_STRINGIZE(_RELEASE)\n\n#if _RELEASE < 8\n#  error \"Boost is not configured for Cray compilers prior to version 8, please try the configure script.\"\n#endif\n\n//\n// Check this is a recent EDG based compiler, otherwise we don't support it here:\n//\n#ifndef __EDG_VERSION__\n#  error \"Unsupported Cray compiler, please try running the configure script.\"\n#endif\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n\n//\n//\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_HAS_NRVO\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#define BOOST_HAS_NRVO\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n\n//#define BOOST_BCB_PARTIAL_SPECIALIZATION_BUG\n#define BOOST_MATH_DISABLE_STD_FPCLASSIFY\n//#define BOOST_HAS_FPCLASSIFY\n\n#define BOOST_SP_USE_PTHREADS \n#define BOOST_AC_USE_PTHREADS \n\n/* everything that follows is working around what are thought to be\n * compiler shortcomings.  Revist all of these regularly.\n */\n\n//#define BOOST_USE_ENUM_STATIC_ASSERT\n//#define BOOST_BUGGY_INTEGRAL_CONSTANT_EXPRESSIONS //(this may be implied by the previous #define\n\n// These constants should be provided by the \n// compiler, at least when -hgnu is asserted on the command line.\n\n#ifndef __ATOMIC_RELAXED\n#define __ATOMIC_RELAXED 0\n#define __ATOMIC_CONSUME 1\n#define __ATOMIC_ACQUIRE 2\n#define __ATOMIC_RELEASE 3\n#define __ATOMIC_ACQ_REL 4\n#define __ATOMIC_SEQ_CST 5\n#endif\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/digitalmars.hpp",
    "content": "//  Copyright (C) Christof Meerwald 2003\n//  Copyright (C) Dan Watkins 2003\n//\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  Digital Mars C++ compiler setup:\n#define BOOST_COMPILER __DMC_VERSION_STRING__\n\n#define BOOST_HAS_LONG_LONG\n#define BOOST_HAS_PRAGMA_ONCE\n\n#if !defined(BOOST_STRICT_CONFIG)\n#define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#define BOOST_NO_OPERATORS_IN_NAMESPACE\n#define BOOST_NO_UNREACHABLE_RETURN_DETECTION\n#define BOOST_NO_SFINAE\n#define BOOST_NO_USING_TEMPLATE\n#define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#endif\n\n//\n// has macros:\n#define BOOST_HAS_DIRENT_H\n#define BOOST_HAS_STDINT_H\n#define BOOST_HAS_WINTHREADS\n\n#if (__DMC__ >= 0x847)\n#define BOOST_HAS_EXPM1\n#define BOOST_HAS_LOG1P\n#endif\n\n//\n// Is this really the best way to detect whether the std lib is in namespace std?\n//\n#ifdef __cplusplus\n#include <cstddef>\n#endif\n#if !defined(__STL_IMPORT_VENDOR_CSTD) && !defined(_STLP_IMPORT_VENDOR_CSTD)\n#  define BOOST_NO_STDC_NAMESPACE\n#endif\n\n\n// check for exception handling support:\n#if !defined(_CPPUNWIND) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n\n//\n// C++0x features\n//\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#if (__DMC__ <= 0x840)\n#error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version is ...:\n#if (__DMC__ > 0x848)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/gcc.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright Darin Adler 2001 - 2002.\n//  (C) Copyright Jens Maurer 2001 - 2002.\n//  (C) Copyright Beman Dawes 2001 - 2003.\n//  (C) Copyright Douglas Gregor 2002.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Synge Todo 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  GNU C++ compiler setup.\n\n//\n// Define BOOST_GCC so we know this is \"real\" GCC and not some pretender:\n//\n#define BOOST_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)\n#if !defined(__CUDACC__)\n#define BOOST_GCC BOOST_GCC_VERSION\n#endif\n\n#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)\n#  define BOOST_GCC_CXX11\n#endif\n\n#if __GNUC__ == 3\n#  if defined (__PATHSCALE__)\n#     define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#     define BOOST_NO_IS_ABSTRACT\n#  endif\n\n#  if __GNUC_MINOR__ < 4\n#     define BOOST_NO_IS_ABSTRACT\n#  endif\n#  define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#endif\n#if __GNUC__ < 4\n//\n// All problems to gcc-3.x and earlier here:\n//\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#  ifdef __OPEN64__\n#     define BOOST_NO_IS_ABSTRACT\n#  endif\n#endif\n\n// GCC prior to 3.4 had #pragma once too but it didn't work well with filesystem links\n#if BOOST_GCC_VERSION >= 30400\n#define BOOST_HAS_PRAGMA_ONCE\n#endif\n\n#if BOOST_GCC_VERSION < 40400\n// Previous versions of GCC did not completely implement value-initialization:\n// GCC Bug 30111, \"Value-initialization of POD base class doesn't initialize\n// members\", reported by Jonathan Wakely in 2006,\n// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=30111 (fixed for GCC 4.4)\n// GCC Bug 33916, \"Default constructor fails to initialize array members\",\n// reported by Michael Elizabeth Chastain in 2007,\n// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916 (fixed for GCC 4.2.4)\n// See also: http://www.boost.org/libs/utility/value_init.htm#compiler_issues\n#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#endif\n\n#if !defined(__EXCEPTIONS) && !defined(BOOST_NO_EXCEPTIONS)\n# define BOOST_NO_EXCEPTIONS\n#endif\n\n\n//\n// Threading support: Turn this on unconditionally here (except for\n// those platforms where we can know for sure). It will get turned off again\n// later if no threading API is detected.\n//\n#if !defined(__MINGW32__) && !defined(linux) && !defined(__linux) && !defined(__linux__)\n# define BOOST_HAS_THREADS\n#endif\n\n//\n// gcc has \"long long\"\n// Except on Darwin with standard compliance enabled (-pedantic)\n// Apple gcc helpfully defines this macro we can query\n//\n#if !defined(__DARWIN_NO_LONG_LONG)\n# define BOOST_HAS_LONG_LONG\n#endif\n\n//\n// gcc implements the named return value optimization since version 3.1\n//\n#define BOOST_HAS_NRVO\n\n// Branch prediction hints\n#define BOOST_LIKELY(x) __builtin_expect(x, 1)\n#define BOOST_UNLIKELY(x) __builtin_expect(x, 0)\n\n//\n// Dynamic shared object (DSO) and dynamic-link library (DLL) support\n//\n#if __GNUC__ >= 4\n#  if (defined(_WIN32) || defined(__WIN32__) || defined(WIN32)) && !defined(__CYGWIN__)\n     // All Win32 development environments, including 64-bit Windows and MinGW, define\n     // _WIN32 or one of its variant spellings. Note that Cygwin is a POSIX environment,\n     // so does not define _WIN32 or its variants.\n#    define BOOST_HAS_DECLSPEC\n#    define BOOST_SYMBOL_EXPORT __attribute__((__dllexport__))\n#    define BOOST_SYMBOL_IMPORT __attribute__((__dllimport__))\n#  else\n#    define BOOST_SYMBOL_EXPORT __attribute__((__visibility__(\"default\")))\n#    define BOOST_SYMBOL_IMPORT\n#  endif\n#  define BOOST_SYMBOL_VISIBLE __attribute__((__visibility__(\"default\")))\n#else\n// config/platform/win32.hpp will define BOOST_SYMBOL_EXPORT, etc., unless already defined\n#  define BOOST_SYMBOL_EXPORT\n#endif\n\n//\n// RTTI and typeinfo detection is possible post gcc-4.3:\n//\n#if BOOST_GCC_VERSION > 40300\n#  ifndef __GXX_RTTI\n#     ifndef BOOST_NO_TYPEID\n#        define BOOST_NO_TYPEID\n#     endif\n#     ifndef BOOST_NO_RTTI\n#        define BOOST_NO_RTTI\n#     endif\n#  endif\n#endif\n\n//\n// Recent GCC versions have __int128 when in 64-bit mode.\n//\n// We disable this if the compiler is really nvcc as it\n// doesn't actually support __int128 as of CUDA_VERSION=5000\n// even though it defines __SIZEOF_INT128__.\n// See https://svn.boost.org/trac/boost/ticket/8048\n// Only re-enable this for nvcc if you're absolutely sure\n// of the circumstances under which it's supported:\n//\n#if defined(__SIZEOF_INT128__) && !defined(__CUDACC__)\n#  define BOOST_HAS_INT128\n#endif\n//\n// Recent GCC versions have a __float128 native type, we need to\n// include a std lib header to detect this - not ideal, but we'll\n// be including <cstddef> later anyway when we select the std lib.\n//\n#ifdef __cplusplus\n#include <cstddef>\n#else\n#include <stddef.h>\n#endif\n#if defined(_GLIBCXX_USE_FLOAT128) && !defined(__STRICT_ANSI__)\n# define BOOST_HAS_FLOAT128\n#endif\n\n// C++0x features in 4.3.n and later\n//\n#if (BOOST_GCC_VERSION >= 40300) && defined(BOOST_GCC_CXX11)\n// C++0x features are only enabled when -std=c++0x or -std=gnu++0x are\n// passed on the command line, which in turn defines\n// __GXX_EXPERIMENTAL_CXX0X__.\n#  define BOOST_HAS_DECLTYPE\n#  define BOOST_HAS_RVALUE_REFS\n#  define BOOST_HAS_STATIC_ASSERT\n#  define BOOST_HAS_VARIADIC_TMPL\n#else\n#  define BOOST_NO_CXX11_DECLTYPE\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#endif\n\n// C++0x features in 4.4.n and later\n//\n#if (BOOST_GCC_VERSION < 40400) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#endif\n\n#if BOOST_GCC_VERSION < 40500\n#  define BOOST_NO_SFINAE_EXPR\n#endif\n\n// GCC 4.5 forbids declaration of defaulted functions in private or protected sections\n#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 5) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS\n#endif\n\n// C++0x features in 4.5.0 and later\n//\n#if (BOOST_GCC_VERSION < 40500) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_LAMBDAS\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#endif\n\n// C++0x features in 4.5.1 and later\n//\n#if (BOOST_GCC_VERSION < 40501) || !defined(BOOST_GCC_CXX11)\n// scoped enums have a serious bug in 4.4.0, so define BOOST_NO_CXX11_SCOPED_ENUMS before 4.5.1\n// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38064\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#endif\n\n// C++0x features in 4.6.n and later\n//\n#if (BOOST_GCC_VERSION < 40600) || !defined(BOOST_GCC_CXX11)\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#endif\n\n// C++0x features in 4.7.n and later\n//\n#if (BOOST_GCC_VERSION < 40700) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_FINAL\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#  define BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS\n#endif\n\n// C++0x features in 4.8.n and later\n//\n#if (BOOST_GCC_VERSION < 40800) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_ALIGNAS\n#endif\n\n// C++0x features in 4.8.1 and later\n//\n#if (BOOST_GCC_VERSION < 40801) || !defined(BOOST_GCC_CXX11)\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n\n// C++14 features in 4.9.0 and later\n//\n#if (BOOST_GCC_VERSION < 40900) || (__cplusplus < 201300)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#  if !((BOOST_GCC_VERSION >= 40801) && (BOOST_GCC_VERSION < 40900) && defined(BOOST_GCC_CXX11))\n#     define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#  endif\n#endif\n\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n//\n// Unused attribute:\n#if __GNUC__ >= 4\n#  define BOOST_ATTRIBUTE_UNUSED __attribute__((__unused__))\n#endif\n//\n// __builtin_unreachable:\n#if BOOST_GCC_VERSION >= 40800\n#define BOOST_UNREACHABLE_RETURN(x) __builtin_unreachable();\n#endif\n\n#ifndef BOOST_COMPILER\n#  define BOOST_COMPILER \"GNU C++ version \" __VERSION__\n#endif\n\n// ConceptGCC compiler:\n//   http://www.generic-programming.org/software/ConceptGCC/\n#ifdef __GXX_CONCEPTS__\n#  define BOOST_HAS_CONCEPTS\n#  define BOOST_COMPILER \"ConceptGCC version \" __VERSION__\n#endif\n\n// versions check:\n// we don't know gcc prior to version 3.30:\n#if (BOOST_GCC_VERSION< 30300)\n#  error \"Compiler not configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 4.9:\n#if (BOOST_GCC_VERSION > 40900)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  else\n// we don't emit warnings here anymore since there are no defect macros defined for\n// gcc post 3.4, so any failures are gcc regressions...\n//#     warning \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/gcc_xml.hpp",
    "content": "//  (C) Copyright John Maddock 2006.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  GCC-XML C++ compiler setup:\n\n#  if !defined(__GCCXML_GNUC__) || ((__GCCXML_GNUC__ <= 3) && (__GCCXML_GNUC_MINOR__ <= 3))\n#     define BOOST_NO_IS_ABSTRACT\n#  endif\n\n//\n// Threading support: Turn this on unconditionally here (except for\n// those platforms where we can know for sure). It will get turned off again\n// later if no threading API is detected.\n//\n#if !defined(__MINGW32__) && !defined(_MSC_VER) && !defined(linux) && !defined(__linux) && !defined(__linux__)\n# define BOOST_HAS_THREADS\n#endif\n\n//\n// gcc has \"long long\"\n//\n#define BOOST_HAS_LONG_LONG\n\n// C++0x features:\n//\n#  define BOOST_NO_CXX11_CONSTEXPR\n#  define BOOST_NO_CXX11_NULLPTR\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#  define BOOST_NO_CXX11_DECLTYPE\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#  define BOOST_NO_CXX11_VARIADIC_MACROS\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#  define BOOST_NO_SFINAE_EXPR\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_LAMBDAS\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#  define BOOST_NO_CXX11_RANGE_BASED_FOR\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#  define BOOST_NO_CXX11_NOEXCEPT\n#  define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#  define BOOST_NO_CXX11_ALIGNAS\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#define BOOST_COMPILER \"GCC-XML C++ version \" __GCCXML__\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/greenhills.hpp",
    "content": "//  (C) Copyright John Maddock 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Greenhills C++ compiler setup:\n\n#define BOOST_COMPILER \"Greenhills C++ version \" BOOST_STRINGIZE(__ghs)\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n//\n// versions check:\n// we don't support Greenhills prior to version 0:\n#if __ghs < 0\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 0:\n#if (__ghs > 0)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/hp_acc.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright Jens Maurer 2001 - 2003.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Toon Knapen 2003.\n//  (C) Copyright Boris Gubenko 2006 - 2007.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  HP aCC C++ compiler setup:\n\n#if defined(__EDG__)\n#include \"boost/config/compiler/common_edg.hpp\"\n#endif\n\n#if (__HP_aCC <= 33100)\n#    define BOOST_NO_INTEGRAL_INT64_T\n#    define BOOST_NO_OPERATORS_IN_NAMESPACE\n#  if !defined(_NAMESPACE_STD)\n#     define BOOST_NO_STD_LOCALE\n#     define BOOST_NO_STRINGSTREAM\n#  endif\n#endif\n\n#if (__HP_aCC <= 33300)\n// member templates are sufficiently broken that we disable them for now\n#    define BOOST_NO_MEMBER_TEMPLATES\n#    define BOOST_NO_DEPENDENT_NESTED_DERIVATIONS\n#    define BOOST_NO_USING_DECLARATION_OVERLOADS_FROM_TYPENAME_BASE\n#endif\n\n#if (__HP_aCC <= 38000)\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#endif\n\n#if (__HP_aCC > 50000) && (__HP_aCC < 60000)\n#    define BOOST_NO_UNREACHABLE_RETURN_DETECTION\n#    define BOOST_NO_TEMPLATE_TEMPLATES\n#    define BOOST_NO_SWPRINTF\n#    define BOOST_NO_DEPENDENT_TYPES_IN_TEMPLATE_VALUE_PARAMETERS\n#    define BOOST_NO_IS_ABSTRACT\n#    define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#endif\n\n// optional features rather than defects:\n#if (__HP_aCC >= 33900)\n#    define BOOST_HAS_LONG_LONG\n#    define BOOST_HAS_PARTIAL_STD_ALLOCATOR\n#endif\n\n#if (__HP_aCC >= 50000 ) && (__HP_aCC <= 53800 ) || (__HP_aCC < 31300 )\n#    define BOOST_NO_MEMBER_TEMPLATE_KEYWORD\n#endif\n\n// This macro should not be defined when compiling in strict ansi\n// mode, but, currently, we don't have the ability to determine\n// what standard mode we are compiling with. Some future version\n// of aCC6 compiler will provide predefined macros reflecting the\n// compilation options, including the standard mode.\n#if (__HP_aCC >= 60000) || ((__HP_aCC > 38000) && defined(__hpxstd98))\n#    define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#endif\n\n#define BOOST_COMPILER \"HP aCC version \" BOOST_STRINGIZE(__HP_aCC)\n\n//\n// versions check:\n// we don't support HP aCC prior to version 33000:\n#if __HP_aCC < 33000\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n\n//\n// Extended checks for supporting aCC on PA-RISC\n#if __HP_aCC > 30000 && __HP_aCC < 50000\n#  if __HP_aCC < 38000\n      // versions prior to version A.03.80 not supported\n#     error \"Compiler version not supported - version A.03.80 or higher is required\"\n#  elif !defined(__hpxstd98)\n      // must compile using the option +hpxstd98 with version A.03.80 and above\n#     error \"Compiler option '+hpxstd98' is required for proper support\"\n#  endif //PA-RISC\n#endif\n\n//\n// C++0x features\n//\n//   See boost\\config\\suffix.hpp for BOOST_NO_LONG_LONG\n//\n#if !defined(__EDG__)\n\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n\n/*\n  See https://forums13.itrc.hp.com/service/forums/questionanswer.do?threadId=1443331 and\n      https://forums13.itrc.hp.com/service/forums/questionanswer.do?threadId=1443436\n*/\n\n#if (__HP_aCC < 62500) || !defined(HP_CXX0x_SOURCE)\n  #define BOOST_NO_CXX11_VARIADIC_MACROS\n#endif\n\n#endif\n\n//\n// last known and checked version for HP-UX/ia64 is 61300\n// last known and checked version for PA-RISC is 38000\n#if ((__HP_aCC > 61300) || ((__HP_aCC > 38000) && defined(__hpxstd98)))\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/intel.hpp",
    "content": "//  (C) Copyright John Maddock 2001-8.\n//  (C) Copyright Peter Dimov 2001.\n//  (C) Copyright Jens Maurer 2001.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Aleksey Gurtovoy 2002 - 2003.\n//  (C) Copyright Guillaume Melquiond 2002 - 2003.\n//  (C) Copyright Beman Dawes 2003.\n//  (C) Copyright Martin Wille 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Intel compiler setup:\n\n#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500) && (defined(_MSC_VER) || defined(__GNUC__))\n\n#ifdef _MSC_VER\n\n#include <boost/config/compiler/visualc.hpp>\n\n#undef BOOST_MSVC\n#undef BOOST_MSVC_FULL_VER\n\n#if (__INTEL_COMPILER >= 1500) && (_MSC_VER >= 1900)\n//\n// These appear to be supported, even though VC++ may not support them:\n//\n#define BOOST_HAS_EXPM1\n#define BOOST_HAS_LOG1P\n#undef BOOST_NO_CXX14_BINARY_LITERALS\n// This one may be a little risky to enable??\n#undef BOOST_NO_SFINAE_EXPR\n\n#endif\n\n#else\n\n#include <boost/config/compiler/gcc.hpp>\n\n#undef BOOST_GCC_VERSION\n#undef BOOST_GCC_CXX11\n\n#endif\n\n#undef BOOST_COMPILER\n\n#if defined(__INTEL_COMPILER)\n#if __INTEL_COMPILER == 9999\n#  define BOOST_INTEL_CXX_VERSION 1200 // Intel bug in 12.1.\n#else\n#  define BOOST_INTEL_CXX_VERSION __INTEL_COMPILER\n#endif\n#elif defined(__ICL)\n#  define BOOST_INTEL_CXX_VERSION __ICL\n#elif defined(__ICC)\n#  define BOOST_INTEL_CXX_VERSION __ICC\n#elif defined(__ECC)\n#  define BOOST_INTEL_CXX_VERSION __ECC\n#endif\n\n// Flags determined by comparing output of 'icpc -dM -E' with and without '-std=c++0x'\n#if (!(defined(_WIN32) || defined(_WIN64)) && defined(__STDC_HOSTED__) && (__STDC_HOSTED__ && (BOOST_INTEL_CXX_VERSION <= 1200))) || defined(__GXX_EXPERIMENTAL_CPP0X__) || defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  define BOOST_INTEL_STDCXX0X\n#endif\n#if defined(_MSC_VER) && (_MSC_VER >= 1600)\n#  define BOOST_INTEL_STDCXX0X\n#endif\n\n#ifdef __GNUC__\n#  define BOOST_INTEL_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)\n#endif\n\n#if !defined(BOOST_COMPILER)\n#  if defined(BOOST_INTEL_STDCXX0X)\n#    define BOOST_COMPILER \"Intel C++ C++0x mode version \" BOOST_STRINGIZE(BOOST_INTEL_CXX_VERSION)\n#  else\n#    define BOOST_COMPILER \"Intel C++ version \" BOOST_STRINGIZE(BOOST_INTEL_CXX_VERSION)\n#  endif\n#endif\n\n#define BOOST_INTEL BOOST_INTEL_CXX_VERSION\n\n#if defined(_WIN32) || defined(_WIN64)\n#  define BOOST_INTEL_WIN BOOST_INTEL\n#else\n#  define BOOST_INTEL_LINUX BOOST_INTEL\n#endif\n\n#else\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n#if defined(__INTEL_COMPILER)\n#if __INTEL_COMPILER == 9999\n#  define BOOST_INTEL_CXX_VERSION 1200 // Intel bug in 12.1.\n#else\n#  define BOOST_INTEL_CXX_VERSION __INTEL_COMPILER\n#endif\n#elif defined(__ICL)\n#  define BOOST_INTEL_CXX_VERSION __ICL\n#elif defined(__ICC)\n#  define BOOST_INTEL_CXX_VERSION __ICC\n#elif defined(__ECC)\n#  define BOOST_INTEL_CXX_VERSION __ECC\n#endif\n\n// Flags determined by comparing output of 'icpc -dM -E' with and without '-std=c++0x'\n#if (!(defined(_WIN32) || defined(_WIN64)) && defined(__STDC_HOSTED__) && (__STDC_HOSTED__ && (BOOST_INTEL_CXX_VERSION <= 1200))) || defined(__GXX_EXPERIMENTAL_CPP0X__) || defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  define BOOST_INTEL_STDCXX0X\n#endif\n#if defined(_MSC_VER) && (_MSC_VER >= 1600)\n#  define BOOST_INTEL_STDCXX0X\n#endif\n\n#ifdef __GNUC__\n#  define BOOST_INTEL_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)\n#endif\n\n#if !defined(BOOST_COMPILER)\n#  if defined(BOOST_INTEL_STDCXX0X)\n#    define BOOST_COMPILER \"Intel C++ C++0x mode version \" BOOST_STRINGIZE(BOOST_INTEL_CXX_VERSION)\n#  else\n#    define BOOST_COMPILER \"Intel C++ version \" BOOST_STRINGIZE(BOOST_INTEL_CXX_VERSION)\n#  endif\n#endif\n\n#define BOOST_INTEL BOOST_INTEL_CXX_VERSION\n\n#if defined(_WIN32) || defined(_WIN64)\n#  define BOOST_INTEL_WIN BOOST_INTEL\n#else\n#  define BOOST_INTEL_LINUX BOOST_INTEL\n#endif\n\n#if (BOOST_INTEL_CXX_VERSION <= 600)\n\n#  if defined(_MSC_VER) && (_MSC_VER <= 1300) // added check for <= VC 7 (Peter Dimov)\n\n// Boost libraries assume strong standard conformance unless otherwise\n// indicated by a config macro. As configured by Intel, the EDG front-end\n// requires certain compiler options be set to achieve that strong conformance.\n// Particularly /Qoption,c,--arg_dep_lookup (reported by Kirk Klobe & Thomas Witt)\n// and /Zc:wchar_t,forScope. See boost-root/tools/build/intel-win32-tools.jam for\n// details as they apply to particular versions of the compiler. When the\n// compiler does not predefine a macro indicating if an option has been set,\n// this config file simply assumes the option has been set.\n// Thus BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP will not be defined, even if\n// the compiler option is not enabled.\n\n#     define BOOST_NO_SWPRINTF\n#  endif\n\n// Void returns, 64 bit integrals don't work when emulating VC 6 (Peter Dimov)\n\n#  if defined(_MSC_VER) && (_MSC_VER <= 1200)\n#     define BOOST_NO_VOID_RETURNS\n#     define BOOST_NO_INTEGRAL_INT64_T\n#  endif\n\n#endif\n\n#if (BOOST_INTEL_CXX_VERSION <= 710) && defined(_WIN32)\n#  define BOOST_NO_POINTER_TO_MEMBER_TEMPLATE_PARAMETERS\n#endif\n\n// See http://aspn.activestate.com/ASPN/Mail/Message/boost/1614864\n#if BOOST_INTEL_CXX_VERSION < 600\n#  define BOOST_NO_INTRINSIC_WCHAR_T\n#else\n// We should test the macro _WCHAR_T_DEFINED to check if the compiler\n// supports wchar_t natively. *BUT* there is a problem here: the standard\n// headers define this macro if they typedef wchar_t. Anyway, we're lucky\n// because they define it without a value, while Intel C++ defines it\n// to 1. So we can check its value to see if the macro was defined natively\n// or not.\n// Under UNIX, the situation is exactly the same, but the macro _WCHAR_T\n// is used instead.\n#  if ((_WCHAR_T_DEFINED + 0) == 0) && ((_WCHAR_T + 0) == 0)\n#    define BOOST_NO_INTRINSIC_WCHAR_T\n#  endif\n#endif\n\n#if defined(__GNUC__) && !defined(BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL)\n//\n// Figure out when Intel is emulating this gcc bug\n// (All Intel versions prior to 9.0.26, and versions\n// later than that if they are set up to emulate gcc 3.2\n// or earlier):\n//\n#  if ((__GNUC__ == 3) && (__GNUC_MINOR__ <= 2)) || (BOOST_INTEL < 900) || (__INTEL_COMPILER_BUILD_DATE < 20050912)\n#     define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#  endif\n#endif\n#if (defined(__GNUC__) && (__GNUC__ < 4)) || (defined(_WIN32) && (BOOST_INTEL_CXX_VERSION <= 1200)) || (BOOST_INTEL_CXX_VERSION <= 1200)\n// GCC or VC emulation:\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#endif\n//\n// Verify that we have actually got BOOST_NO_INTRINSIC_WCHAR_T\n// set correctly, if we don't do this now, we will get errors later\n// in type_traits code among other things, getting this correct\n// for the Intel compiler is actually remarkably fragile and tricky:\n//\n#ifdef __cplusplus\n#if defined(BOOST_NO_INTRINSIC_WCHAR_T)\n#include <cwchar>\ntemplate< typename T > struct assert_no_intrinsic_wchar_t;\ntemplate<> struct assert_no_intrinsic_wchar_t<wchar_t> { typedef void type; };\n// if you see an error here then you need to unset BOOST_NO_INTRINSIC_WCHAR_T\n// where it is defined above:\ntypedef assert_no_intrinsic_wchar_t<unsigned short>::type assert_no_intrinsic_wchar_t_;\n#else\ntemplate< typename T > struct assert_intrinsic_wchar_t;\ntemplate<> struct assert_intrinsic_wchar_t<wchar_t> {};\n// if you see an error here then define BOOST_NO_INTRINSIC_WCHAR_T on the command line:\ntemplate<> struct assert_intrinsic_wchar_t<unsigned short> {};\n#endif\n#endif\n\n#if defined(_MSC_VER) && (_MSC_VER+0 >= 1000)\n#  if _MSC_VER >= 1200\n#     define BOOST_HAS_MS_INT64\n#  endif\n#  define BOOST_NO_SWPRINTF\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#elif defined(_WIN32)\n#  define BOOST_DISABLE_WIN32\n#endif\n\n// I checked version 6.0 build 020312Z, it implements the NRVO.\n// Correct this as you find out which version of the compiler\n// implemented the NRVO first.  (Daniel Frey)\n#if (BOOST_INTEL_CXX_VERSION >= 600)\n#  define BOOST_HAS_NRVO\n#endif\n\n// Branch prediction hints\n// I'm not sure 8.0 was the first version to support these builtins,\n// update the condition if the version is not accurate. (Andrey Semashev)\n#if defined(__GNUC__) && BOOST_INTEL_CXX_VERSION >= 800\n#define BOOST_LIKELY(x) __builtin_expect(x, 1)\n#define BOOST_UNLIKELY(x) __builtin_expect(x, 0)\n#endif\n\n// RTTI\n// __RTTI is the EDG macro\n// __INTEL_RTTI__ is the Intel macro\n// __GXX_RTTI is the g++ macro\n// _CPPRTTI is the MSVC++ macro\n#if !defined(__RTTI) && !defined(__INTEL_RTTI__) && !defined(__GXX_RTTI) && !defined(_CPPRTTI)\n\n#if !defined(BOOST_NO_RTTI)\n# define BOOST_NO_RTTI\n#endif\n\n// in MS mode, static typeid works even when RTTI is off\n#if !defined(_MSC_VER) && !defined(BOOST_NO_TYPEID)\n# define BOOST_NO_TYPEID\n#endif\n\n#endif\n\n//\n// versions check:\n// we don't support Intel prior to version 6.0:\n#if BOOST_INTEL_CXX_VERSION < 600\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n\n// Intel on MacOS requires\n#if defined(__APPLE__) && defined(__INTEL_COMPILER)\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#endif\n\n// Intel on Altix Itanium\n#if defined(__itanium__) && defined(__INTEL_COMPILER)\n#  define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#endif\n\n//\n// An attempt to value-initialize a pointer-to-member may trigger an\n// internal error on Intel <= 11.1 (last checked version), as was\n// reported by John Maddock, Intel support issue 589832, May 2010.\n// Moreover, according to test results from Huang-Vista-x86_32_intel,\n// intel-vc9-win-11.1 may leave a non-POD array uninitialized, in some\n// cases when it should be value-initialized.\n// (Niels Dekker, LKEB, May 2010)\n// Apparently Intel 12.1 (compiler version number 9999 !!) has the same issue (compiler regression).\n#if defined(__INTEL_COMPILER)\n#  if (__INTEL_COMPILER <= 1110) || (__INTEL_COMPILER == 9999) || (defined(_WIN32) && (__INTEL_COMPILER < 1600))\n#    define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#  endif\n#endif\n\n//\n// Dynamic shared object (DSO) and dynamic-link library (DLL) support\n//\n#if defined(__GNUC__) && (__GNUC__ >= 4)\n#  define BOOST_SYMBOL_EXPORT __attribute__((visibility(\"default\")))\n#  define BOOST_SYMBOL_IMPORT\n#  define BOOST_SYMBOL_VISIBLE __attribute__((visibility(\"default\")))\n#endif\n//\n// C++0x features\n// For each feature we need to check both the Intel compiler version, \n// and the version of MSVC or GCC that we are emulating.\n// See http://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler/\n// for a list of which features were implemented in which Intel releases.\n//\n#if defined(BOOST_INTEL_STDCXX0X)\n// BOOST_NO_CXX11_CONSTEXPR:\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40600)) && !defined(_MSC_VER)\n// Available in earlier Intel versions, but fail our tests:\n#  undef BOOST_NO_CXX11_CONSTEXPR\n#endif\n// BOOST_NO_CXX11_NULLPTR:\n#if (BOOST_INTEL_CXX_VERSION >= 1210) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40600)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_NULLPTR\n#endif\n// BOOST_NO_CXX11_TEMPLATE_ALIASES\n#if (BOOST_INTEL_CXX_VERSION >= 1210) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40700)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_TEMPLATE_ALIASES\n#endif\n\n// BOOST_NO_CXX11_DECLTYPE\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40300)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_DECLTYPE\n#endif\n\n// BOOST_NO_CXX11_DECLTYPE_N3276\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40800)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_DECLTYPE_N3276\n#endif\n\n// BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40300)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#endif\n\n// BOOST_NO_CXX11_RVALUE_REFERENCES\n#if (BOOST_INTEL_CXX_VERSION >= 1300) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40300)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n// This is available from earlier Intel versions, but breaks Filesystem and other libraries:\n#  undef BOOST_NO_CXX11_RVALUE_REFERENCES\n#endif\n\n// BOOST_NO_CXX11_STATIC_ASSERT\n#if (BOOST_INTEL_CXX_VERSION >= 1110) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40300)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_STATIC_ASSERT\n#endif\n\n// BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#endif\n\n// BOOST_NO_CXX11_VARIADIC_MACROS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40200)) && (!defined(_MSC_VER) || (_MSC_VER >= 1400))\n#  undef BOOST_NO_CXX11_VARIADIC_MACROS\n#endif\n\n// BOOST_NO_CXX11_AUTO_DECLARATIONS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_AUTO_DECLARATIONS\n#endif\n\n// BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#endif\n\n// BOOST_NO_CXX11_CHAR16_T\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n#  undef BOOST_NO_CXX11_CHAR16_T\n#endif\n\n// BOOST_NO_CXX11_CHAR32_T\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n#  undef BOOST_NO_CXX11_CHAR32_T\n#endif\n\n// BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#endif\n\n// BOOST_NO_CXX11_DELETED_FUNCTIONS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_DELETED_FUNCTIONS\n#endif\n\n// BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_VER >= 1700))\n#  undef BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#endif\n\n// BOOST_NO_CXX11_SCOPED_ENUMS\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40501)) && (!defined(_MSC_VER) || (_MSC_VER >= 1700))\n// This is available but broken in earlier Intel releases.\n#  undef BOOST_NO_CXX11_SCOPED_ENUMS\n#endif\n\n// BOOST_NO_SFINAE_EXPR\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n#  undef BOOST_NO_SFINAE_EXPR\n#endif\n\n// BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n// This is available in earlier Intel releases, but breaks Multiprecision:\n#  undef BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#endif\n\n// BOOST_NO_CXX11_LAMBDAS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500)) && (!defined(_MSC_VER) || (_MSC_VER >= 1600))\n#  undef BOOST_NO_CXX11_LAMBDAS\n#endif\n\n// BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500))\n#  undef BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#endif\n\n// BOOST_NO_CXX11_RANGE_BASED_FOR\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40600)) && (!defined(_MSC_VER) || (_MSC_VER >= 1700))\n#  undef BOOST_NO_CXX11_RANGE_BASED_FOR\n#endif\n\n// BOOST_NO_CXX11_RAW_LITERALS\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_RAW_LITERALS\n#endif\n\n// BOOST_NO_CXX11_UNICODE_LITERALS\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40500)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n#  undef BOOST_NO_CXX11_UNICODE_LITERALS\n#endif\n\n// BOOST_NO_CXX11_NOEXCEPT\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40600)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n// Available in earlier Intel release, but generates errors when used with \n// conditional exception specifications, for example in multiprecision:\n#  undef BOOST_NO_CXX11_NOEXCEPT\n#endif\n\n// BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40600)) && (!defined(_MSC_VER) || (_MSC_VER >= 9999))\n#  undef BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#endif\n\n// BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40700)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 190021730))\n#  undef BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#endif\n\n// BOOST_NO_CXX11_ALIGNAS\n#if (BOOST_INTEL_CXX_VERSION >= 1500) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40800)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 190021730))\n#  undef BOOST_NO_CXX11_ALIGNAS\n#endif\n\n// BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#if (BOOST_INTEL_CXX_VERSION >= 1200) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 180020827))\n#  undef BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#endif\n\n// BOOST_NO_CXX11_INLINE_NAMESPACES\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40400)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 190021730))\n#  undef BOOST_NO_CXX11_INLINE_NAMESPACES\n#endif\n\n// BOOST_NO_CXX11_REF_QUALIFIERS\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40800)) && (!defined(_MSC_VER) || (_MSC_FULL_VER >= 190021730))\n#  undef BOOST_NO_CXX11_REF_QUALIFIERS\n#endif\n\n// BOOST_NO_CXX11_FINAL\n#if (BOOST_INTEL_CXX_VERSION >= 1400) && (!defined(BOOST_INTEL_GCC_VERSION) || (BOOST_INTEL_GCC_VERSION >= 40700)) && (!defined(_MSC_VER) || (_MSC_VER >= 1700))\n#  undef BOOST_NO_CXX11_FINAL\n#endif\n\n#endif\n\n//\n// Broken in all versions up to 15:\n#define BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS\n\n#if defined(BOOST_INTEL_STDCXX0X) && (BOOST_INTEL_CXX_VERSION <= 1310)\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#endif\n\n#if defined(BOOST_INTEL_STDCXX0X) && (BOOST_INTEL_CXX_VERSION == 1400)\n// A regression in Intel's compiler means that <tuple> seems to be broken in this release as well as <future> :\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#endif\n\n#if (BOOST_INTEL_CXX_VERSION < 1200)\n//\n// fenv.h appears not to work with Intel prior to 12.0:\n//\n#  define BOOST_NO_FENV_H\n#endif\n\n// Intel 13.10 fails to access defaulted functions of a base class declared in private or protected sections,\n// producing the following errors:\n// error #453: protected function \"...\" (declared at ...\") is not accessible through a \"...\" pointer or object\n#if (BOOST_INTEL_CXX_VERSION <= 1310)\n#  define BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS\n#endif\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1600)\n#  define BOOST_HAS_STDINT_H\n#endif\n\n#if defined(__LP64__) && defined(__GNUC__) && (BOOST_INTEL_CXX_VERSION >= 1310) && !defined(__CUDACC__)\n#  define BOOST_HAS_INT128\n#endif\n\n#endif\n//\n// last known and checked version:\n#if (BOOST_INTEL_CXX_VERSION > 1500)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  elif defined(_MSC_VER)\n//\n//      We don't emit this warning any more, since we have so few\n//      defect macros set anyway (just the one).\n//\n//#     pragma message(\"Unknown compiler version - please run the configure tests and report the results\")\n#  endif\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/kai.hpp",
    "content": "//  (C) Copyright John Maddock 2001. \n//  (C) Copyright David Abrahams 2002. \n//  (C) Copyright Aleksey Gurtovoy 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Kai C++ compiler setup:\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n#   if (__KCC_VERSION <= 4001) || !defined(BOOST_STRICT_CONFIG)\n      // at least on Sun, the contents of <cwchar> is not in namespace std\n#     define BOOST_NO_STDC_NAMESPACE\n#   endif\n\n// see also common_edg.hpp which needs a special check for __KCC\n# if !defined(_EXCEPTIONS) && !defined(BOOST_NO_EXCEPTIONS)\n#     define BOOST_NO_EXCEPTIONS\n# endif\n\n//\n// last known and checked version is 4001:\n#if (__KCC_VERSION > 4001)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/metrowerks.hpp",
    "content": "//  (C) Copyright John Maddock 2001.\n//  (C) Copyright Darin Adler 2001.\n//  (C) Copyright Peter Dimov 2001.\n//  (C) Copyright David Abrahams 2001 - 2002.\n//  (C) Copyright Beman Dawes 2001 - 2003.\n//  (C) Copyright Stefan Slapeta 2004.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Metrowerks C++ compiler setup:\n\n// locale support is disabled when linking with the dynamic runtime\n#   ifdef _MSL_NO_LOCALE\n#     define BOOST_NO_STD_LOCALE\n#   endif\n\n#   if __MWERKS__ <= 0x2301  // 5.3\n#     define BOOST_NO_FUNCTION_TEMPLATE_ORDERING\n#     define BOOST_NO_POINTER_TO_MEMBER_CONST\n#     define BOOST_NO_DEPENDENT_TYPES_IN_TEMPLATE_VALUE_PARAMETERS\n#     define BOOST_NO_MEMBER_TEMPLATE_KEYWORD\n#   endif\n\n#   if __MWERKS__ <= 0x2401  // 6.2\n//#     define BOOST_NO_FUNCTION_TEMPLATE_ORDERING\n#   endif\n\n#   if(__MWERKS__ <= 0x2407)  // 7.x\n#     define BOOST_NO_MEMBER_FUNCTION_SPECIALIZATIONS\n#     define BOOST_NO_UNREACHABLE_RETURN_DETECTION\n#   endif\n\n#   if(__MWERKS__ <= 0x3003)  // 8.x\n#     define BOOST_NO_SFINAE\n#    endif\n\n// the \"|| !defined(BOOST_STRICT_CONFIG)\" part should apply to the last\n// tested version *only*:\n#   if(__MWERKS__ <= 0x3207) || !defined(BOOST_STRICT_CONFIG) // 9.6\n#     define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#     define BOOST_NO_IS_ABSTRACT\n#    endif\n\n#if !__option(wchar_type)\n#   define BOOST_NO_INTRINSIC_WCHAR_T\n#endif\n\n#if !__option(exceptions) && !defined(BOOST_NO_EXCEPTIONS)\n#   define BOOST_NO_EXCEPTIONS\n#endif\n\n#if (__INTEL__ && _WIN32) || (__POWERPC__ && macintosh)\n#   if __MWERKS__ == 0x3000\n#     define BOOST_COMPILER_VERSION 8.0\n#   elif __MWERKS__ == 0x3001\n#     define BOOST_COMPILER_VERSION 8.1\n#   elif __MWERKS__ == 0x3002\n#     define BOOST_COMPILER_VERSION 8.2\n#   elif __MWERKS__ == 0x3003\n#     define BOOST_COMPILER_VERSION 8.3\n#   elif __MWERKS__ == 0x3200\n#     define BOOST_COMPILER_VERSION 9.0\n#   elif __MWERKS__ == 0x3201\n#     define BOOST_COMPILER_VERSION 9.1\n#   elif __MWERKS__ == 0x3202\n#     define BOOST_COMPILER_VERSION 9.2\n#   elif __MWERKS__ == 0x3204\n#     define BOOST_COMPILER_VERSION 9.3\n#   elif __MWERKS__ == 0x3205\n#     define BOOST_COMPILER_VERSION 9.4\n#   elif __MWERKS__ == 0x3206\n#     define BOOST_COMPILER_VERSION 9.5\n#   elif __MWERKS__ == 0x3207\n#     define BOOST_COMPILER_VERSION 9.6\n#   else\n#     define BOOST_COMPILER_VERSION __MWERKS__\n#   endif\n#else\n#  define BOOST_COMPILER_VERSION __MWERKS__\n#endif\n\n//\n// C++0x features\n//\n//   See boost\\config\\suffix.hpp for BOOST_NO_LONG_LONG\n//\n#if __MWERKS__ > 0x3206 && __option(rvalue_refs)\n#  define BOOST_HAS_RVALUE_REFS\n#else\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#endif\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#define BOOST_COMPILER \"Metrowerks CodeWarrior C++ version \" BOOST_STRINGIZE(BOOST_COMPILER_VERSION)\n\n//\n// versions check:\n// we don't support Metrowerks prior to version 5.3:\n#if __MWERKS__ < 0x2301\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version:\n#if (__MWERKS__ > 0x3205)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/mpw.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  MPW C++ compilers setup:\n\n#   if    defined(__SC__)\n#     define BOOST_COMPILER \"MPW SCpp version \" BOOST_STRINGIZE(__SC__)\n#   elif defined(__MRC__)\n#     define BOOST_COMPILER \"MPW MrCpp version \" BOOST_STRINGIZE(__MRC__)\n#   else\n#     error \"Using MPW compiler configuration by mistake.  Please update.\"\n#   endif\n\n//\n// MPW 8.90:\n//\n#if (MPW_CPLUS <= 0x890) || !defined(BOOST_STRICT_CONFIG)\n#  define BOOST_NO_CV_SPECIALIZATIONS\n#  define BOOST_NO_DEPENDENT_NESTED_DERIVATIONS\n#  define BOOST_NO_DEPENDENT_TYPES_IN_TEMPLATE_VALUE_PARAMETERS\n#  define BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n#  define BOOST_NO_INTRINSIC_WCHAR_T\n#  define BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n#  define BOOST_NO_USING_TEMPLATE\n\n#  define BOOST_NO_CWCHAR\n#  define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n\n#  define BOOST_NO_STD_ALLOCATOR /* actually a bug with const reference overloading */\n\n#endif\n\n//\n// C++0x features\n//\n//   See boost\\config\\suffix.hpp for BOOST_NO_LONG_LONG\n//\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n//\n// versions check:\n// we don't support MPW prior to version 8.9:\n#if MPW_CPLUS < 0x890\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 0x890:\n#if (MPW_CPLUS > 0x890)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/nvcc.hpp",
    "content": "//  (C) Copyright Eric Jourdanneau, Joel Falcou 2010\n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  NVIDIA CUDA C++ compiler setup\n\n#ifndef BOOST_COMPILER\n#  define BOOST_COMPILER \"NVIDIA CUDA C++ Compiler\"\n#endif\n\n// NVIDIA Specific support\n// BOOST_GPU_ENABLED : Flag a function or a method as being enabled on the host and device\n#define BOOST_GPU_ENABLED __host__ __device__\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/pathscale.hpp",
    "content": "//  (C) Copyright Bryce Lelbach 2011\n\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n// PathScale EKOPath C++ Compiler\n\n#ifndef BOOST_COMPILER\n#  define BOOST_COMPILER \"PathScale EKOPath C++ Compiler version \" __PATHSCALE__\n#endif\n\n#if __PATHCC__ >= 4\n#  define BOOST_MSVC6_MEMBER_TEMPLATES\n#  define BOOST_HAS_UNISTD_H\n#  define BOOST_HAS_STDINT_H\n#  define BOOST_HAS_SIGACTION\n#  define BOOST_HAS_SCHED_YIELD\n#  define BOOST_HAS_THREADS\n#  define BOOST_HAS_PTHREADS\n#  define BOOST_HAS_PTHREAD_YIELD\n#  define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#  define BOOST_HAS_PARTIAL_STD_ALLOCATOR\n#  define BOOST_HAS_NRVO\n#  define BOOST_HAS_NL_TYPES_H\n#  define BOOST_HAS_NANOSLEEP\n#  define BOOST_HAS_LONG_LONG\n#  define BOOST_HAS_LOG1P\n#  define BOOST_HAS_GETTIMEOFDAY\n#  define BOOST_HAS_EXPM1\n#  define BOOST_HAS_DIRENT_H\n#  define BOOST_HAS_CLOCK_GETTIME\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#  define BOOST_NO_SFINAE_EXPR\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_RANGE_BASED_FOR\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#  define BOOST_NO_CXX11_NULLPTR\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_NOEXCEPT\n#  define BOOST_NO_CXX11_LAMBDAS\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#  define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#  define BOOST_NO_CXX11_DECLTYPE\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#  define BOOST_NO_CXX11_CONSTEXPR\n#  define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#  define BOOST_NO_CXX11_CHAR32_T\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#  define BOOST_NO_CXX11_ALIGNAS\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#  define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/pgi.hpp",
    "content": "//  (C) Copyright Noel Belcourt 2007.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  PGI C++ compiler setup:\n\n#define BOOST_COMPILER_VERSION __PGIC__##__PGIC_MINOR__\n#define BOOST_COMPILER \"PGI compiler version \" BOOST_STRINGIZE(BOOST_COMPILER_VERSION)\n\n//\n// Threading support:\n// Turn this on unconditionally here, it will get turned off again later\n// if no threading API is detected.\n//\n\n#if __PGIC__ >= 11\n\n// options requested by configure --enable-test\n#define BOOST_HAS_PTHREADS\n#define BOOST_HAS_THREADS\n#define BOOST_HAS_PTHREAD_YIELD\n#define BOOST_HAS_NRVO\n#define BOOST_HAS_LONG_LONG\n\n// options --enable-test wants undefined\n#undef BOOST_NO_STDC_NAMESPACE\n#undef BOOST_NO_EXCEPTION_STD_NAMESPACE\n#undef BOOST_DEDUCED_TYPENAME\n\n#define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n\n#elif __PGIC__ >= 10\n\n// options requested by configure --enable-test\n#define BOOST_HAS_THREADS\n#define BOOST_HAS_NRVO\n#define BOOST_HAS_LONG_LONG\n#if defined(linux) || defined(__linux) || defined(__linux__)\n#  define BOOST_HAS_STDINT_H\n#endif\n\n// options --enable-test wants undefined\n#undef BOOST_NO_STDC_NAMESPACE\n#undef BOOST_NO_EXCEPTION_STD_NAMESPACE\n#undef BOOST_DEDUCED_TYPENAME\n\n#elif __PGIC__ >= 7\n\n#define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#define BOOST_NO_SWPRINTF\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n\n#else\n\n#  error \"Pgi compiler not configured - please reconfigure\"\n\n#endif\n//\n// C++0x features\n//\n//   See boost\\config\\suffix.hpp for BOOST_NO_LONG_LONG\n//\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_NUMERIC_LIMITS\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_SWPRINTF\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n\n#define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#define BOOST_NO_CXX11_HDR_TYPEINDEX\n#define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#define BOOST_NO_CXX11_HDR_TUPLE\n#define BOOST_NO_CXX11_HDR_THREAD\n#define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#define BOOST_NO_CXX11_HDR_REGEX\n#define BOOST_NO_CXX11_HDR_RATIO\n#define BOOST_NO_CXX11_HDR_RANDOM\n#define BOOST_NO_CXX11_HDR_MUTEX\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_HDR_FUTURE\n#define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#define BOOST_NO_CXX11_HDR_CODECVT\n#define BOOST_NO_CXX11_HDR_CHRONO\n#define BOOST_NO_CXX11_HDR_ARRAY\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n//\n// version check:\n// probably nothing to do here?\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/sgi_mipspro.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  SGI C++ compiler setup:\n\n#define BOOST_COMPILER \"SGI Irix compiler version \" BOOST_STRINGIZE(_COMPILER_VERSION)\n\n#include \"boost/config/compiler/common_edg.hpp\"\n\n//\n// Threading support:\n// Turn this on unconditionally here, it will get turned off again later\n// if no threading API is detected.\n//\n#define BOOST_HAS_THREADS\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n\n#undef BOOST_NO_SWPRINTF\n#undef BOOST_DEDUCED_TYPENAME\n\n//\n// version check:\n// probably nothing to do here?\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/sunpro_cc.hpp",
    "content": "//  (C) Copyright John Maddock 2001.\n//  (C) Copyright Jens Maurer 2001 - 2003.\n//  (C) Copyright Peter Dimov 2002.\n//  (C) Copyright Aleksey Gurtovoy 2002 - 2003.\n//  (C) Copyright David Abrahams 2002.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Sun C++ compiler setup:\n\n#    if __SUNPRO_CC <= 0x500\n#      define BOOST_NO_MEMBER_TEMPLATES\n#      define BOOST_NO_FUNCTION_TEMPLATE_ORDERING\n#    endif\n\n#    if (__SUNPRO_CC <= 0x520)\n       //\n       // Sunpro 5.2 and earler:\n       //\n       // although sunpro 5.2 supports the syntax for\n       // inline initialization it often gets the value\n       // wrong, especially where the value is computed\n       // from other constants (J Maddock 6th May 2001)\n#      define BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n\n       // Although sunpro 5.2 supports the syntax for\n       // partial specialization, it often seems to\n       // bind to the wrong specialization.  Better\n       // to disable it until suppport becomes more stable\n       // (J Maddock 6th May 2001).\n#      define BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n#    endif\n\n#    if (__SUNPRO_CC <= 0x530)\n       // Requesting debug info (-g) with Boost.Python results\n       // in an internal compiler error for \"static const\"\n       // initialized in-class.\n       //    >> Assertion:   (../links/dbg_cstabs.cc, line 611)\n       //         while processing ../test.cpp at line 0.\n       // (Jens Maurer according to Gottfried Ganssauge 04 Mar 2002)\n#      define BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n\n       // SunPro 5.3 has better support for partial specialization,\n       // but breaks when compiling std::less<shared_ptr<T> >\n       // (Jens Maurer 4 Nov 2001).\n\n       // std::less specialization fixed as reported by George\n       // Heintzelman; partial specialization re-enabled\n       // (Peter Dimov 17 Jan 2002)\n\n//#      define BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n       // integral constant expressions with 64 bit numbers fail\n#      define BOOST_NO_INTEGRAL_INT64_T\n#    endif\n\n#    if (__SUNPRO_CC < 0x570)\n#      define BOOST_NO_TEMPLATE_TEMPLATES\n       // see http://lists.boost.org/MailArchives/boost/msg47184.php\n       // and http://lists.boost.org/MailArchives/boost/msg47220.php\n#      define BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n#      define BOOST_NO_SFINAE\n#      define BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS\n#    endif\n#    if (__SUNPRO_CC <= 0x580)\n#      define BOOST_NO_IS_ABSTRACT\n#    endif\n\n#    if (__SUNPRO_CC <= 0x5100)\n       // Sun 5.10 may not correctly value-initialize objects of\n       // some user defined types, as was reported in April 2010\n       // (CR 6947016), and confirmed by Steve Clamage.\n       // (Niels Dekker, LKEB, May 2010).\n#      define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#    endif\n\n//\n// Dynamic shared object (DSO) and dynamic-link library (DLL) support\n//\n#if __SUNPRO_CC > 0x500\n#  define BOOST_SYMBOL_EXPORT __global\n#  define BOOST_SYMBOL_IMPORT __global\n#  define BOOST_SYMBOL_VISIBLE __global\n#endif\n\n#if (__SUNPRO_CC < 0x5130)\n// C++03 features in 12.4:\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_ADL_BARRIER\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n#endif\n\n#if (__SUNPRO_CC < 0x5130) || (__cplusplus < 201100)\n// C++11 only featuires in 12.4:\n#define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#define BOOST_NO_CXX11_CHAR16_T\n#define BOOST_NO_CXX11_CHAR32_T\n#define BOOST_NO_CXX11_CONSTEXPR\n#define BOOST_NO_CXX11_DECLTYPE\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_RVALUE_REFERENCES\n#define BOOST_NO_CXX11_SCOPED_ENUMS\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_FINAL\n#endif\n\n#if (__SUNPRO_CC < 0x5140) || (__cplusplus < 201103)\n#define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#define BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#endif\n\n#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n//\n// C++0x features\n//\n#  define BOOST_HAS_LONG_LONG\n\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n//\n// Version\n//\n\n#define BOOST_COMPILER \"Sun compiler version \" BOOST_STRINGIZE(__SUNPRO_CC)\n\n//\n// versions check:\n// we don't support sunpro prior to version 4:\n#if __SUNPRO_CC < 0x400\n#error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 0x590:\n#if (__SUNPRO_CC > 0x590)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/vacpp.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright Toon Knapen 2001 - 2003.\n//  (C) Copyright Lie-Quan Lee 2001.\n//  (C) Copyright Markus Schoepflin 2002 - 2003.\n//  (C) Copyright Beman Dawes 2002 - 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Visual Age (IBM) C++ compiler setup:\n\n#if __IBMCPP__ <= 501\n#  define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#  define BOOST_NO_MEMBER_FUNCTION_SPECIALIZATIONS\n#endif\n\n#if (__IBMCPP__ <= 502)\n// Actually the compiler supports inclass member initialization but it\n// requires a definition for the class member and it doesn't recognize\n// it as an integral constant expression when used as a template argument.\n#  define BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n#  define BOOST_NO_INTEGRAL_INT64_T\n#  define BOOST_NO_MEMBER_TEMPLATE_KEYWORD\n#endif\n\n#if (__IBMCPP__ <= 600) || !defined(BOOST_STRICT_CONFIG)\n#  define BOOST_NO_POINTER_TO_MEMBER_TEMPLATE_PARAMETERS\n#endif\n\n#if (__IBMCPP__ <= 1110)\n// XL C++ V11.1 and earlier versions may not always value-initialize\n// a temporary object T(), when T is a non-POD aggregate class type.\n// Michael Wong (IBM Canada Ltd) has confirmed this issue and gave it\n// high priority. -- Niels Dekker (LKEB), May 2010.\n#  define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n#endif\n\n//\n// On AIX thread support seems to be indicated by _THREAD_SAFE:\n//\n#ifdef _THREAD_SAFE\n#  define BOOST_HAS_THREADS\n#endif\n\n#define BOOST_COMPILER \"IBM Visual Age version \" BOOST_STRINGIZE(__IBMCPP__)\n\n//\n// versions check:\n// we don't support Visual age prior to version 5:\n#if __IBMCPP__ < 500\n#error \"Compiler not supported or configured - please reconfigure\"\n#endif\n//\n// last known and checked version is 1210:\n#if (__IBMCPP__ > 1210)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  endif\n#endif\n\n// Some versions of the compiler have issues with default arguments on partial specializations\n#if __IBMCPP__ <= 1010\n#define BOOST_NO_PARTIAL_SPECIALIZATION_IMPLICIT_DEFAULT_ARGS\n#endif\n\n//\n// C++0x features\n//\n//   See boost\\config\\suffix.hpp for BOOST_NO_LONG_LONG\n//\n#if ! __IBMCPP_AUTO_TYPEDEDUCTION\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#endif\n#if ! __IBMCPP_UTF_LITERAL__\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#endif\n#if ! __IBMCPP_CONSTEXPR\n#  define BOOST_NO_CXX11_CONSTEXPR\n#endif\n#if ! __IBMCPP_DECLTYPE\n#  define BOOST_NO_CXX11_DECLTYPE\n#else\n#  define BOOST_HAS_DECLTYPE\n#endif\n#define BOOST_NO_CXX11_DECLTYPE_N3276\n#define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#if ! __IBMCPP_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#endif\n#if ! __IBMCPP_EXTERN_TEMPLATE\n#  define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#endif\n#if ! __IBMCPP_VARIADIC_TEMPLATES\n// not enabled separately at this time\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#endif\n#define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#define BOOST_NO_CXX11_LAMBDAS\n#define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#define BOOST_NO_CXX11_NOEXCEPT\n#define BOOST_NO_CXX11_NULLPTR\n#define BOOST_NO_CXX11_RANGE_BASED_FOR\n#define BOOST_NO_CXX11_RAW_LITERALS\n#define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#if ! __IBMCPP_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#endif\n#if ! __IBMCPP_SCOPED_ENUM\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#endif\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#if ! __IBMCPP_STATIC_ASSERT\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#endif\n#define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#define BOOST_NO_CXX11_UNICODE_LITERALS\n#if ! __IBMCPP_VARIADIC_TEMPLATES\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#endif\n#if ! __C99_MACRO_WITH_VA_ARGS\n#  define BOOST_NO_CXX11_VARIADIC_MACROS\n#endif\n#define BOOST_NO_CXX11_ALIGNAS\n#define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#define BOOST_NO_CXX11_INLINE_NAMESPACES\n#define BOOST_NO_CXX11_REF_QUALIFIERS\n#define BOOST_NO_CXX11_FINAL\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n#if (__cplusplus < 201304) // There's no SD6 check for this....\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n#if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n#if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n#if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/visualc.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright Darin Adler 2001 - 2002.\n//  (C) Copyright Peter Dimov 2001.\n//  (C) Copyright Aleksey Gurtovoy 2002.\n//  (C) Copyright David Abrahams 2002 - 2003.\n//  (C) Copyright Beman Dawes 2002 - 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n//\n//  Microsoft Visual C++ compiler setup:\n//\n//  We need to be careful with the checks in this file, as contrary\n//  to popular belief there are versions with _MSC_VER with the final\n//  digit non-zero (mainly the MIPS cross compiler).\n//\n//  So we either test _MSC_VER >= XXXX or else _MSC_VER < XXXX.\n//  No other comparisons (==, >, or <=) are safe.\n//\n\n#define BOOST_MSVC _MSC_VER\n\n//\n// Helper macro BOOST_MSVC_FULL_VER for use in Boost code:\n//\n#if _MSC_FULL_VER > 100000000\n#  define BOOST_MSVC_FULL_VER _MSC_FULL_VER\n#else\n#  define BOOST_MSVC_FULL_VER (_MSC_FULL_VER * 10)\n#endif\n\n// Attempt to suppress VC6 warnings about the length of decorated names (obsolete):\n#pragma warning( disable : 4503 ) // warning: decorated name length exceeded\n\n#define BOOST_HAS_PRAGMA_ONCE\n\n//\n// versions check:\n// we don't support Visual C++ prior to version 7.1:\n#if _MSC_VER < 1310\n#  error \"Compiler not supported or configured - please reconfigure\"\n#endif\n\n#if _MSC_FULL_VER < 180020827\n#  define BOOST_NO_FENV_H\n#endif\n\n#if _MSC_VER < 1400\n// although a conforming signature for swprint exists in VC7.1\n// it appears not to actually work:\n#  define BOOST_NO_SWPRINTF\n// Our extern template tests also fail for this compiler:\n#  define BOOST_NO_CXX11_EXTERN_TEMPLATE\n// Variadic macros do not exist for VC7.1 and lower\n#  define BOOST_NO_CXX11_VARIADIC_MACROS\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#endif\n\n#if _MSC_VER < 1500  // 140X == VC++ 8.0\n#  define BOOST_NO_MEMBER_TEMPLATE_FRIENDS\n#endif\n\n#if _MSC_VER < 1600  // 150X == VC++ 9.0\n   // A bug in VC9:\n#  define BOOST_NO_ADL_BARRIER\n#endif\n\n\n#ifndef _NATIVE_WCHAR_T_DEFINED\n#  define BOOST_NO_INTRINSIC_WCHAR_T\n#endif\n\n//\n// check for exception handling support:\n#if !defined(_CPPUNWIND) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n\n//\n// __int64 support:\n//\n#define BOOST_HAS_MS_INT64\n#if defined(_MSC_EXTENSIONS) || (_MSC_VER >= 1400)\n#   define BOOST_HAS_LONG_LONG\n#else\n#   define BOOST_NO_LONG_LONG\n#endif\n#if (_MSC_VER >= 1400) && !defined(_DEBUG)\n#   define BOOST_HAS_NRVO\n#endif\n#if _MSC_VER >= 1600  // 160X == VC++ 10.0\n#  define BOOST_HAS_PRAGMA_DETECT_MISMATCH\n#endif\n//\n// disable Win32 API's if compiler extensions are\n// turned off:\n//\n#if !defined(_MSC_EXTENSIONS) && !defined(BOOST_DISABLE_WIN32)\n#  define BOOST_DISABLE_WIN32\n#endif\n#if !defined(_CPPRTTI) && !defined(BOOST_NO_RTTI)\n#  define BOOST_NO_RTTI\n#endif\n\n//\n// TR1 features:\n//\n#if _MSC_VER >= 1700\n// # define BOOST_HAS_TR1_HASH\t\t\t// don't know if this is true yet.\n// # define BOOST_HAS_TR1_TYPE_TRAITS\t// don't know if this is true yet.\n# define BOOST_HAS_TR1_UNORDERED_MAP\n# define BOOST_HAS_TR1_UNORDERED_SET\n#endif\n\n//\n// C++0x features\n//\n//   See above for BOOST_NO_LONG_LONG\n\n// C++ features supported by VC++ 10 (aka 2010)\n//\n#if _MSC_VER < 1600\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#  define BOOST_NO_CXX11_LAMBDAS\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#  define BOOST_NO_CXX11_NULLPTR\n#  define BOOST_NO_CXX11_DECLTYPE\n#endif // _MSC_VER < 1600\n\n#if _MSC_VER >= 1600\n#  define BOOST_HAS_STDINT_H\n#endif\n\n// C++11 features supported by VC++ 11 (aka 2012)\n//\n#if _MSC_VER < 1700\n#  define BOOST_NO_CXX11_FINAL\n#  define BOOST_NO_CXX11_RANGE_BASED_FOR\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#endif // _MSC_VER < 1700\n\n// C++11 features supported by VC++ 12 (aka 2013).\n//\n#if _MSC_FULL_VER < 180020827\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#  define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#endif\n\n// C++11 features supported by VC++ 14 (aka 2015)\n//\n#if (_MSC_FULL_VER < 190023026)\n#  define BOOST_NO_CXX11_NOEXCEPT\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#  define BOOST_NO_CXX11_ALIGNAS\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n\n// MSVC including version 14 has not yet completely\n// implemented value-initialization, as is reported:\n// \"VC++ does not value-initialize members of derived classes without\n// user-declared constructor\", reported in 2009 by Sylvester Hesp:\n// https://connect.microsoft.com/VisualStudio/feedback/details/484295\n// \"Presence of copy constructor breaks member class initialization\",\n// reported in 2009 by Alex Vakulenko:\n// https://connect.microsoft.com/VisualStudio/feedback/details/499606\n// \"Value-initialization in new-expression\", reported in 2005 by\n// Pavel Kuznetsov (MetaCommunications Engineering):\n// https://connect.microsoft.com/VisualStudio/feedback/details/100744\n// Reported again by John Maddock in 2015 for VC14:\n// https://connect.microsoft.com/VisualStudio/feedback/details/1582233/c-subobjects-still-not-value-initialized-correctly\n// See also: http://www.boost.org/libs/utility/value_init.htm#compiler_issues\n// (Niels Dekker, LKEB, May 2010)\n#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION\n// C++11 features not supported by any versions\n#define BOOST_NO_SFINAE_EXPR\n#define BOOST_NO_TWO_PHASE_NAME_LOOKUP\n//\n// This is somewhat supported in VC14, but we may need to wait for\n// a service release before enabling:\n//\n#define BOOST_NO_CXX11_CONSTEXPR\n\n// C++ 14:\n#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n//\n// prefix and suffix headers:\n//\n#ifndef BOOST_ABI_PREFIX\n#  define BOOST_ABI_PREFIX \"boost/config/abi/msvc_prefix.hpp\"\n#endif\n#ifndef BOOST_ABI_SUFFIX\n#  define BOOST_ABI_SUFFIX \"boost/config/abi/msvc_suffix.hpp\"\n#endif\n\n#ifndef BOOST_COMPILER\n// TODO:\n// these things are mostly bogus. 1200 means version 12.0 of the compiler. The\n// artificial versions assigned to them only refer to the versions of some IDE\n// these compilers have been shipped with, and even that is not all of it. Some\n// were shipped with freely downloadable SDKs, others as crosscompilers in eVC.\n// IOW, you can't use these 'versions' in any sensible way. Sorry.\n# if defined(UNDER_CE)\n#   if _MSC_VER < 1400\n      // Note: I'm not aware of any CE compiler with version 13xx\n#      if defined(BOOST_ASSERT_CONFIG)\n#         error \"Unknown EVC++ compiler version - please run the configure tests and report the results\"\n#      else\n#         pragma message(\"Unknown EVC++ compiler version - please run the configure tests and report the results\")\n#      endif\n#   elif _MSC_VER < 1500\n#     define BOOST_COMPILER_VERSION evc8\n#   elif _MSC_VER < 1600\n#     define BOOST_COMPILER_VERSION evc9\n#   elif _MSC_VER < 1700\n#     define BOOST_COMPILER_VERSION evc10\n#   elif _MSC_VER < 1800 \n#     define BOOST_COMPILER_VERSION evc11 \n#   elif _MSC_VER < 1900 \n#     define BOOST_COMPILER_VERSION evc12\n#   elif _MSC_VER < 2000  \n#     define BOOST_COMPILER_VERSION evc14\n#   else\n#      if defined(BOOST_ASSERT_CONFIG)\n#         error \"Unknown EVC++ compiler version - please run the configure tests and report the results\"\n#      else\n#         pragma message(\"Unknown EVC++ compiler version - please run the configure tests and report the results\")\n#      endif\n#   endif\n# else\n#   if _MSC_VER < 1310\n      // Note: Versions up to 7.0 aren't supported.\n#     define BOOST_COMPILER_VERSION 5.0\n#   elif _MSC_VER < 1300\n#     define BOOST_COMPILER_VERSION 6.0\n#   elif _MSC_VER < 1310\n#     define BOOST_COMPILER_VERSION 7.0\n#   elif _MSC_VER < 1400\n#     define BOOST_COMPILER_VERSION 7.1\n#   elif _MSC_VER < 1500\n#     define BOOST_COMPILER_VERSION 8.0\n#   elif _MSC_VER < 1600\n#     define BOOST_COMPILER_VERSION 9.0\n#   elif _MSC_VER < 1700\n#     define BOOST_COMPILER_VERSION 10.0\n#   elif _MSC_VER < 1800 \n#     define BOOST_COMPILER_VERSION 11.0\n#   elif _MSC_VER < 1900\n#     define BOOST_COMPILER_VERSION 12.0\n#   elif _MSC_VER < 2000\n#     define BOOST_COMPILER_VERSION 14.0\n#   else\n#     define BOOST_COMPILER_VERSION _MSC_VER\n#   endif\n# endif\n\n#  define BOOST_COMPILER \"Microsoft Visual C++ version \" BOOST_STRINGIZE(BOOST_COMPILER_VERSION)\n#endif\n\n//\n// last known and checked version is 19.00.23026 (VC++ 2015 RTM):\n#if (_MSC_VER > 1900)\n#  if defined(BOOST_ASSERT_CONFIG)\n#     error \"Unknown compiler version - please run the configure tests and report the results\"\n#  else\n#     pragma message(\"Unknown compiler version - please run the configure tests and report the results\")\n#  endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/compiler/xlcpp.hpp",
    "content": "// (C) Copyright Douglas Gregor 2010\n//\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  compiler setup for IBM XL C/C++ for Linux (Little Endian) based on clang.\n\n#define BOOST_HAS_PRAGMA_ONCE\n\n// Detecting `-fms-extension` compiler flag assuming that _MSC_VER defined when that flag is used.\n#if defined (_MSC_VER) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 4))\n#   define BOOST_HAS_PRAGMA_DETECT_MISMATCH\n#endif\n\n// When compiling with clang before __has_extension was defined,\n// even if one writes 'defined(__has_extension) && __has_extension(xxx)',\n// clang reports a compiler error. So the only workaround found is:\n\n#ifndef __has_extension\n#define __has_extension __has_feature\n#endif\n\n#if !__has_feature(cxx_exceptions) && !defined(BOOST_NO_EXCEPTIONS)\n#  define BOOST_NO_EXCEPTIONS\n#endif\n\n#if !__has_feature(cxx_rtti) && !defined(BOOST_NO_RTTI)\n#  define BOOST_NO_RTTI\n#endif\n\n#if !__has_feature(cxx_rtti) && !defined(BOOST_NO_TYPEID)\n#  define BOOST_NO_TYPEID\n#endif\n\n#if defined(__int64) && !defined(__GNUC__)\n#  define BOOST_HAS_MS_INT64\n#endif\n\n#define BOOST_HAS_NRVO\n\n// Branch prediction hints\n#if defined(__has_builtin)\n#if __has_builtin(__builtin_expect)\n#define BOOST_LIKELY(x) __builtin_expect(x, 1)\n#define BOOST_UNLIKELY(x) __builtin_expect(x, 0)\n#endif\n#endif\n\n// Clang supports \"long long\" in all compilation modes.\n#define BOOST_HAS_LONG_LONG\n\n//\n// Dynamic shared object (DSO) and dynamic-link library (DLL) support\n//\n#if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32)\n#  define BOOST_SYMBOL_EXPORT __attribute__((__visibility__(\"default\")))\n#  define BOOST_SYMBOL_IMPORT\n#  define BOOST_SYMBOL_VISIBLE __attribute__((__visibility__(\"default\")))\n#endif\n\n//\n// The BOOST_FALLTHROUGH macro can be used to annotate implicit fall-through\n// between switch labels.\n//\n#if __cplusplus >= 201103L && defined(__has_warning)\n#  if __has_feature(cxx_attributes) && __has_warning(\"-Wimplicit-fallthrough\")\n#    define BOOST_FALLTHROUGH [[clang::fallthrough]]\n#  endif\n#endif\n\n#if !__has_feature(cxx_auto_type)\n#  define BOOST_NO_CXX11_AUTO_DECLARATIONS\n#  define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS\n#endif\n\n//\n// Currently clang on Windows using VC++ RTL does not support C++11's char16_t or char32_t\n//\n#if defined(_MSC_VER) || !(defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L)\n#  define BOOST_NO_CXX11_CHAR16_T\n#  define BOOST_NO_CXX11_CHAR32_T\n#endif\n\n#if !__has_feature(cxx_constexpr)\n#  define BOOST_NO_CXX11_CONSTEXPR\n#endif\n\n#if !__has_feature(cxx_decltype)\n#  define BOOST_NO_CXX11_DECLTYPE\n#endif\n\n#if !__has_feature(cxx_decltype_incomplete_return_types)\n#  define BOOST_NO_CXX11_DECLTYPE_N3276\n#endif\n\n#if !__has_feature(cxx_defaulted_functions)\n#  define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n#endif\n\n#if !__has_feature(cxx_deleted_functions)\n#  define BOOST_NO_CXX11_DELETED_FUNCTIONS\n#endif\n\n#if !__has_feature(cxx_explicit_conversions)\n#  define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS\n#endif\n\n#if !__has_feature(cxx_default_function_template_args)\n#  define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#endif\n\n#if !__has_feature(cxx_generalized_initializers)\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#endif\n\n#if !__has_feature(cxx_lambdas)\n#  define BOOST_NO_CXX11_LAMBDAS\n#endif\n\n#if !__has_feature(cxx_local_type_template_args)\n#  define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#endif\n\n#if !__has_feature(cxx_noexcept)\n#  define BOOST_NO_CXX11_NOEXCEPT\n#endif\n\n#if !__has_feature(cxx_nullptr)\n#  define BOOST_NO_CXX11_NULLPTR\n#endif\n\n#if !__has_feature(cxx_range_for)\n#  define BOOST_NO_CXX11_RANGE_BASED_FOR\n#endif\n\n#if !__has_feature(cxx_raw_string_literals)\n#  define BOOST_NO_CXX11_RAW_LITERALS\n#endif\n\n#if !__has_feature(cxx_reference_qualified_functions)\n#  define BOOST_NO_CXX11_REF_QUALIFIERS\n#endif\n\n#if !__has_feature(cxx_generalized_initializers)\n#  define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX\n#endif\n\n#if !__has_feature(cxx_rvalue_references)\n#  define BOOST_NO_CXX11_RVALUE_REFERENCES\n#endif\n\n#if !__has_feature(cxx_strong_enums)\n#  define BOOST_NO_CXX11_SCOPED_ENUMS\n#endif\n\n#if !__has_feature(cxx_static_assert)\n#  define BOOST_NO_CXX11_STATIC_ASSERT\n#endif\n\n#if !__has_feature(cxx_alias_templates)\n#  define BOOST_NO_CXX11_TEMPLATE_ALIASES\n#endif\n\n#if !__has_feature(cxx_unicode_literals)\n#  define BOOST_NO_CXX11_UNICODE_LITERALS\n#endif\n\n#if !__has_feature(cxx_variadic_templates)\n#  define BOOST_NO_CXX11_VARIADIC_TEMPLATES\n#endif\n\n#if !__has_feature(cxx_user_literals)\n#  define BOOST_NO_CXX11_USER_DEFINED_LITERALS\n#endif\n\n#if !__has_feature(cxx_alignas)\n#  define BOOST_NO_CXX11_ALIGNAS\n#endif\n\n#if !__has_feature(cxx_trailing_return)\n#  define BOOST_NO_CXX11_TRAILING_RESULT_TYPES\n#endif\n\n#if !__has_feature(cxx_inline_namespaces)\n#  define BOOST_NO_CXX11_INLINE_NAMESPACES\n#endif\n\n#if !__has_feature(cxx_override_control)\n#  define BOOST_NO_CXX11_FINAL\n#endif\n\n#if !(__has_feature(__cxx_binary_literals__) || __has_extension(__cxx_binary_literals__))\n#  define BOOST_NO_CXX14_BINARY_LITERALS\n#endif\n\n#if !__has_feature(__cxx_decltype_auto__)\n#  define BOOST_NO_CXX14_DECLTYPE_AUTO\n#endif\n\n#if !__has_feature(__cxx_aggregate_nsdmi__)\n#  define BOOST_NO_CXX14_AGGREGATE_NSDMI\n#endif\n\n#if !__has_feature(__cxx_init_captures__)\n#  define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES\n#endif\n\n#if !__has_feature(__cxx_generic_lambdas__)\n#  define BOOST_NO_CXX14_GENERIC_LAMBDAS\n#endif\n\n// clang < 3.5 has a defect with dependent type, like following.\n//\n//  template <class T>\n//  constexpr typename enable_if<pred<T> >::type foo(T &)\n//  { } // error: no return statement in constexpr function\n//\n// This issue also affects C++11 mode, but C++11 constexpr requires return stmt.\n// Therefore we don't care such case.\n//\n// Note that we can't check Clang version directly as the numbering system changes depending who's\n// creating the Clang release (see https://github.com/boostorg/config/pull/39#issuecomment-59927873)\n// so instead verify that we have a feature that was introduced at the same time as working C++14\n// constexpr (generic lambda's in this case):\n//\n#if !__has_feature(__cxx_generic_lambdas__) || !__has_feature(__cxx_relaxed_constexpr__)\n#  define BOOST_NO_CXX14_CONSTEXPR\n#endif\n\n#if !__has_feature(__cxx_return_type_deduction__)\n#  define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION\n#endif\n\n#if !__has_feature(__cxx_variable_templates__)\n#  define BOOST_NO_CXX14_VARIABLE_TEMPLATES\n#endif\n\n#if __cplusplus < 201400\n// All versions with __cplusplus above this value seem to support this:\n#  define BOOST_NO_CXX14_DIGIT_SEPARATORS\n#endif\n\n\n// Unused attribute:\n#if defined(__GNUC__) && (__GNUC__ >= 4)\n#  define BOOST_ATTRIBUTE_UNUSED __attribute__((unused))\n#endif\n\n#ifndef BOOST_COMPILER\n#  define BOOST_COMPILER \"Clang version \" __clang_version__\n#endif\n\n// Macro used to identify the Clang compiler.\n#define BOOST_CLANG 1\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/no_tr1/cmath.hpp",
    "content": "//  (C) Copyright John Maddock 2008.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// The aim of this header is just to include <cmath> but to do\n// so in a way that does not result in recursive inclusion of\n// the Boost TR1 components if boost/tr1/tr1/cmath is in the\n// include search path.  We have to do this to avoid circular\n// dependencies:\n//\n\n#ifndef BOOST_CONFIG_CMATH\n#  define BOOST_CONFIG_CMATH\n\n#  ifndef BOOST_TR1_NO_RECURSION\n#     define BOOST_TR1_NO_RECURSION\n#     define BOOST_CONFIG_NO_CMATH_RECURSION\n#  endif\n\n#  include <cmath>\n\n#  ifdef BOOST_CONFIG_NO_CMATH_RECURSION\n#     undef BOOST_TR1_NO_RECURSION\n#     undef BOOST_CONFIG_NO_CMATH_RECURSION\n#  endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/no_tr1/complex.hpp",
    "content": "//  (C) Copyright John Maddock 2005.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// The aim of this header is just to include <complex> but to do\n// so in a way that does not result in recursive inclusion of\n// the Boost TR1 components if boost/tr1/tr1/complex is in the\n// include search path.  We have to do this to avoid circular\n// dependencies:\n//\n\n#ifndef BOOST_CONFIG_COMPLEX\n#  define BOOST_CONFIG_COMPLEX\n\n#  ifndef BOOST_TR1_NO_RECURSION\n#     define BOOST_TR1_NO_RECURSION\n#     define BOOST_CONFIG_NO_COMPLEX_RECURSION\n#  endif\n\n#  include <complex>\n\n#  ifdef BOOST_CONFIG_NO_COMPLEX_RECURSION\n#     undef BOOST_TR1_NO_RECURSION\n#     undef BOOST_CONFIG_NO_COMPLEX_RECURSION\n#  endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/no_tr1/functional.hpp",
    "content": "//  (C) Copyright John Maddock 2005.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// The aim of this header is just to include <functional> but to do\n// so in a way that does not result in recursive inclusion of\n// the Boost TR1 components if boost/tr1/tr1/functional is in the\n// include search path.  We have to do this to avoid circular\n// dependencies:\n//\n\n#ifndef BOOST_CONFIG_FUNCTIONAL\n#  define BOOST_CONFIG_FUNCTIONAL\n\n#  ifndef BOOST_TR1_NO_RECURSION\n#     define BOOST_TR1_NO_RECURSION\n#     define BOOST_CONFIG_NO_FUNCTIONAL_RECURSION\n#  endif\n\n#  include <functional>\n\n#  ifdef BOOST_CONFIG_NO_FUNCTIONAL_RECURSION\n#     undef BOOST_TR1_NO_RECURSION\n#     undef BOOST_CONFIG_NO_FUNCTIONAL_RECURSION\n#  endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/no_tr1/memory.hpp",
    "content": "//  (C) Copyright John Maddock 2005.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// The aim of this header is just to include <memory> but to do\n// so in a way that does not result in recursive inclusion of\n// the Boost TR1 components if boost/tr1/tr1/memory is in the\n// include search path.  We have to do this to avoid circular\n// dependencies:\n//\n\n#ifndef BOOST_CONFIG_MEMORY\n#  define BOOST_CONFIG_MEMORY\n\n#  ifndef BOOST_TR1_NO_RECURSION\n#     define BOOST_TR1_NO_RECURSION\n#     define BOOST_CONFIG_NO_MEMORY_RECURSION\n#  endif\n\n#  include <memory>\n\n#  ifdef BOOST_CONFIG_NO_MEMORY_RECURSION\n#     undef BOOST_TR1_NO_RECURSION\n#     undef BOOST_CONFIG_NO_MEMORY_RECURSION\n#  endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/no_tr1/utility.hpp",
    "content": "//  (C) Copyright John Maddock 2005.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// The aim of this header is just to include <utility> but to do\n// so in a way that does not result in recursive inclusion of\n// the Boost TR1 components if boost/tr1/tr1/utility is in the\n// include search path.  We have to do this to avoid circular\n// dependencies:\n//\n\n#ifndef BOOST_CONFIG_UTILITY\n#  define BOOST_CONFIG_UTILITY\n\n#  ifndef BOOST_TR1_NO_RECURSION\n#     define BOOST_TR1_NO_RECURSION\n#     define BOOST_CONFIG_NO_UTILITY_RECURSION\n#  endif\n\n#  include <utility>\n\n#  ifdef BOOST_CONFIG_NO_UTILITY_RECURSION\n#     undef BOOST_TR1_NO_RECURSION\n#     undef BOOST_CONFIG_NO_UTILITY_RECURSION\n#  endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/aix.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  IBM/Aix specific config options:\n\n#define BOOST_PLATFORM \"IBM Aix\"\n\n#define BOOST_HAS_UNISTD_H\n#define BOOST_HAS_NL_TYPES_H\n#define BOOST_HAS_NANOSLEEP\n#define BOOST_HAS_CLOCK_GETTIME\n\n// This needs support in \"boost/cstdint.hpp\" exactly like FreeBSD.\n// This platform has header named <inttypes.h> which includes all\n// the things needed.\n#define BOOST_HAS_STDINT_H\n\n// Threading API's:\n#define BOOST_HAS_PTHREADS\n#define BOOST_HAS_PTHREAD_DELAY_NP\n#define BOOST_HAS_SCHED_YIELD\n//#define BOOST_HAS_PTHREAD_YIELD\n\n// boilerplate code:\n#include <boost/config/posix_features.hpp>\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/amigaos.hpp",
    "content": "//  (C) Copyright John Maddock 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n#define BOOST_PLATFORM \"AmigaOS\"\n\n#define BOOST_DISABLE_THREADS\n#define BOOST_NO_CWCHAR\n#define BOOST_NO_STD_WSTRING\n#define BOOST_NO_INTRINSIC_WCHAR_T\n \n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/beos.hpp",
    "content": "//  (C) Copyright John Maddock 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  BeOS specific config options:\n\n#define BOOST_PLATFORM \"BeOS\"\n\n#define BOOST_NO_CWCHAR\n#define BOOST_NO_CWCTYPE\n#define BOOST_HAS_UNISTD_H\n\n#define BOOST_HAS_BETHREADS\n\n#ifndef BOOST_DISABLE_THREADS\n#  define BOOST_HAS_THREADS\n#endif\n\n// boilerplate code:\n#include <boost/config/posix_features.hpp>\n \n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/bsd.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Darin Adler 2001. \n//  (C) Copyright Douglas Gregor 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  generic BSD config options:\n\n#if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) && !defined(__DragonFly__)\n#error \"This platform is not BSD\"\n#endif\n\n#ifdef __FreeBSD__\n#define BOOST_PLATFORM \"FreeBSD \" BOOST_STRINGIZE(__FreeBSD__)\n#elif defined(__NetBSD__)\n#define BOOST_PLATFORM \"NetBSD \" BOOST_STRINGIZE(__NetBSD__)\n#elif defined(__OpenBSD__)\n#define BOOST_PLATFORM \"OpenBSD \" BOOST_STRINGIZE(__OpenBSD__)\n#elif defined(__DragonFly__)\n#define BOOST_PLATFORM \"DragonFly \" BOOST_STRINGIZE(__DragonFly__)\n#endif\n\n//\n// is this the correct version check?\n// FreeBSD has <nl_types.h> but does not\n// advertise the fact in <unistd.h>:\n//\n#if (defined(__FreeBSD__) && (__FreeBSD__ >= 3)) || defined(__DragonFly__)\n#  define BOOST_HAS_NL_TYPES_H\n#endif\n\n//\n// FreeBSD 3.x has pthreads support, but defines _POSIX_THREADS in <pthread.h>\n// and not in <unistd.h>\n//\n#if (defined(__FreeBSD__) && (__FreeBSD__ <= 3))\\\n   || defined(__OpenBSD__) || defined(__DragonFly__) \n#  define BOOST_HAS_PTHREADS\n#endif\n\n//\n// No wide character support in the BSD header files:\n//\n#if defined(__NetBSD__)\n#define __NetBSD_GCC__ (__GNUC__         * 1000000 \\\n                       + __GNUC_MINOR__ *    1000 \\\n                       + __GNUC_PATCHLEVEL__)\n// XXX - the following is required until c++config.h\n//       defines _GLIBCXX_HAVE_SWPRINTF and friends\n//       or the preprocessor conditionals are removed\n//       from the cwchar header.\n#define _GLIBCXX_HAVE_SWPRINTF 1\n#endif\n\n#if !((defined(__FreeBSD__) && (__FreeBSD__ >= 5)) \\\n      || (defined(__NetBSD_GCC__) && (__NetBSD_GCC__ >= 2095003)) || defined(__DragonFly__))\n#  define BOOST_NO_CWCHAR\n#endif\n//\n// The BSD <ctype.h> has macros only, no functions:\n//\n#if !defined(__OpenBSD__) || defined(__DragonFly__)\n#  define BOOST_NO_CTYPE_FUNCTIONS\n#endif\n\n//\n// thread API's not auto detected:\n//\n#define BOOST_HAS_SCHED_YIELD\n#define BOOST_HAS_NANOSLEEP\n#define BOOST_HAS_GETTIMEOFDAY\n#define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#define BOOST_HAS_SIGACTION\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/cloudabi.hpp",
    "content": "//       Copyright Nuxi, https://nuxi.nl/ 2015.\n// Distributed under the Boost Software License, Version 1.0.\n//    (See accompanying file LICENSE_1_0.txt or copy at\n//          http://www.boost.org/LICENSE_1_0.txt)\n\n#define BOOST_PLATFORM \"CloudABI\"\n\n#define BOOST_HAS_DIRENT_H\n#define BOOST_HAS_STDINT_H\n#define BOOST_HAS_UNISTD_H\n\n#define BOOST_HAS_CLOCK_GETTIME\n#define BOOST_HAS_EXPM1\n#define BOOST_HAS_GETTIMEOFDAY\n#define BOOST_HAS_LOG1P\n#define BOOST_HAS_NANOSLEEP\n#define BOOST_HAS_PTHREADS\n#define BOOST_HAS_SCHED_YIELD\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/cray.hpp",
    "content": "//  (C) Copyright John Maddock 2011.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n\n//  See http://www.boost.org for most recent version.\n\n//  SGI Irix specific config options:\n\n#define BOOST_PLATFORM \"Cray\"\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/cygwin.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  cygwin specific config options:\n\n#define BOOST_PLATFORM \"Cygwin\"\n#define BOOST_HAS_DIRENT_H\n#define BOOST_HAS_LOG1P\n#define BOOST_HAS_EXPM1\n\n//\n// Threading API:\n// See if we have POSIX threads, if we do use them, otherwise\n// revert to native Win threads.\n#define BOOST_HAS_UNISTD_H\n#include <unistd.h>\n#if defined(_POSIX_THREADS) && (_POSIX_THREADS+0 >= 0) && !defined(BOOST_HAS_WINTHREADS)\n#  define BOOST_HAS_PTHREADS\n#  define BOOST_HAS_SCHED_YIELD\n#  define BOOST_HAS_GETTIMEOFDAY\n#  define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#  define BOOST_HAS_SIGACTION\n#else\n#  if !defined(BOOST_HAS_WINTHREADS)\n#     define BOOST_HAS_WINTHREADS\n#  endif\n#  define BOOST_HAS_FTIME\n#endif\n\n//\n// find out if we have a stdint.h, there should be a better way to do this:\n//\n#include <sys/types.h>\n#ifdef _STDINT_H\n#define BOOST_HAS_STDINT_H\n#endif\n\n/// Cygwin has no fenv.h\n#define BOOST_NO_FENV_H\n\n// boilerplate code:\n#include <boost/config/posix_features.hpp>\n\n//\n// Cygwin lies about XSI conformance, there is no nl_types.h:\n//\n#ifdef BOOST_HAS_NL_TYPES_H\n#  undef BOOST_HAS_NL_TYPES_H\n#endif\n \n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/haiku.hpp",
    "content": "//  (C) Copyright Jessica Hamilton 2014.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Haiku specific config options:\n\n#define BOOST_PLATFORM \"Haiku\"\n\n#define BOOST_HAS_UNISTD_H\n#define BOOST_HAS_STDINT_H\n\n#ifndef BOOST_DISABLE_THREADS\n#  define BOOST_HAS_THREADS\n#endif\n\n#define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#define BOOST_NO_CXX11_STATIC_ASSERT\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n\n//\n// thread API's not auto detected:\n//\n#define BOOST_HAS_SCHED_YIELD\n#define BOOST_HAS_GETTIMEOFDAY\n\n// boilerplate code:\n#include <boost/config/posix_features.hpp>\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/hpux.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2001 - 2003. \n//  (C) Copyright David Abrahams 2002. \n//  (C) Copyright Toon Knapen 2003. \n//  (C) Copyright Boris Gubenko 2006 - 2007.\n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  hpux specific config options:\n\n#define BOOST_PLATFORM \"HP-UX\"\n\n// In principle, HP-UX has a nice <stdint.h> under the name <inttypes.h>\n// However, it has the following problem:\n// Use of UINT32_C(0) results in \"0u l\" for the preprocessed source\n// (verifyable with gcc 2.95.3)\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__HP_aCC)\n#  define BOOST_HAS_STDINT_H\n#endif\n\n#if !(defined(__HP_aCC) || !defined(_INCLUDE__STDC_A1_SOURCE))\n#  define BOOST_NO_SWPRINTF\n#endif\n#if defined(__HP_aCC) && !defined(_INCLUDE__STDC_A1_SOURCE)\n#  define BOOST_NO_CWCTYPE\n#endif\n\n#if defined(__GNUC__)\n#  if (__GNUC__ < 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ < 3))\n      // GNU C on HP-UX does not support threads (checked up to gcc 3.3)\n#     define BOOST_DISABLE_THREADS\n#  elif !defined(BOOST_DISABLE_THREADS)\n      // threads supported from gcc-3.3 onwards:\n#     define BOOST_HAS_THREADS\n#     define BOOST_HAS_PTHREADS\n#  endif\n#elif defined(__HP_aCC) && !defined(BOOST_DISABLE_THREADS)\n#  define BOOST_HAS_PTHREADS\n#endif\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n// the following are always available:\n#ifndef BOOST_HAS_GETTIMEOFDAY\n#  define BOOST_HAS_GETTIMEOFDAY\n#endif\n#ifndef BOOST_HAS_SCHED_YIELD\n#    define BOOST_HAS_SCHED_YIELD\n#endif\n#ifndef BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#    define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#endif\n#ifndef BOOST_HAS_NL_TYPES_H\n#    define BOOST_HAS_NL_TYPES_H\n#endif\n#ifndef BOOST_HAS_NANOSLEEP\n#    define BOOST_HAS_NANOSLEEP\n#endif\n#ifndef BOOST_HAS_GETTIMEOFDAY\n#    define BOOST_HAS_GETTIMEOFDAY\n#endif\n#ifndef BOOST_HAS_DIRENT_H\n#    define BOOST_HAS_DIRENT_H\n#endif\n#ifndef BOOST_HAS_CLOCK_GETTIME\n#    define BOOST_HAS_CLOCK_GETTIME\n#endif\n#ifndef BOOST_HAS_SIGACTION\n#  define BOOST_HAS_SIGACTION\n#endif\n#ifndef BOOST_HAS_NRVO \n#  ifndef __parisc\n#    define BOOST_HAS_NRVO\n#  endif\n#endif\n#ifndef BOOST_HAS_LOG1P \n#  define BOOST_HAS_LOG1P\n#endif\n#ifndef BOOST_HAS_EXPM1\n#  define BOOST_HAS_EXPM1\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/irix.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n\n//  See http://www.boost.org for most recent version.\n\n//  SGI Irix specific config options:\n\n#define BOOST_PLATFORM \"SGI Irix\"\n\n#define BOOST_NO_SWPRINTF \n//\n// these are not auto detected by POSIX feature tests:\n//\n#define BOOST_HAS_GETTIMEOFDAY\n#define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n\n#ifdef __GNUC__\n   // GNU C on IRIX does not support threads (checked up to gcc 3.3)\n#  define BOOST_DISABLE_THREADS\n#endif\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/linux.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  linux specific config options:\n\n#define BOOST_PLATFORM \"linux\"\n\n// make sure we have __GLIBC_PREREQ if available at all\n#ifdef __cplusplus\n#include <cstdlib>\n#else\n#include <stdlib.h>\n#endif\n\n//\n// <stdint.h> added to glibc 2.1.1\n// We can only test for 2.1 though:\n//\n#if defined(__GLIBC__) && ((__GLIBC__ > 2) || ((__GLIBC__ == 2) && (__GLIBC_MINOR__ >= 1)))\n   // <stdint.h> defines int64_t unconditionally, but <sys/types.h> defines\n   // int64_t only if __GNUC__.  Thus, assume a fully usable <stdint.h>\n   // only when using GCC.\n#  if defined __GNUC__\n#    define BOOST_HAS_STDINT_H\n#  endif\n#endif\n\n#if defined(__LIBCOMO__)\n   //\n   // como on linux doesn't have std:: c functions:\n   // NOTE: versions of libcomo prior to beta28 have octal version numbering,\n   // e.g. version 25 is 21 (dec)\n   //\n#  if __LIBCOMO_VERSION__ <= 20\n#    define BOOST_NO_STDC_NAMESPACE\n#  endif\n\n#  if __LIBCOMO_VERSION__ <= 21\n#    define BOOST_NO_SWPRINTF\n#  endif\n\n#endif\n\n//\n// If glibc is past version 2 then we definitely have\n// gettimeofday, earlier versions may or may not have it:\n//\n#if defined(__GLIBC__) && (__GLIBC__ >= 2)\n#  define BOOST_HAS_GETTIMEOFDAY\n#endif\n\n#ifdef __USE_POSIX199309\n#  define BOOST_HAS_NANOSLEEP\n#endif\n\n#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)\n// __GLIBC_PREREQ is available since 2.1.2\n\n   // swprintf is available since glibc 2.2.0\n#  if !__GLIBC_PREREQ(2,2) || (!defined(__USE_ISOC99) && !defined(__USE_UNIX98))\n#    define BOOST_NO_SWPRINTF\n#  endif\n#else\n#  define BOOST_NO_SWPRINTF\n#endif\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n#ifdef __USE_GNU\n#define BOOST_HAS_PTHREAD_YIELD\n#endif\n\n#ifndef __GNUC__\n//\n// if the compiler is not gcc we still need to be able to parse\n// the GNU system headers, some of which (mainly <stdint.h>)\n// use GNU specific extensions:\n//\n#  ifndef __extension__\n#     define __extension__\n#  endif\n#  ifndef __const__\n#     define __const__ const\n#  endif\n#  ifndef __volatile__\n#     define __volatile__ volatile\n#  endif\n#  ifndef __signed__\n#     define __signed__ signed\n#  endif\n#  ifndef __typeof__\n#     define __typeof__ typeof\n#  endif\n#  ifndef __inline__\n#     define __inline__ inline\n#  endif\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/macos.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Darin Adler 2001 - 2002. \n//  (C) Copyright Bill Kempf 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Mac OS specific config options:\n\n#define BOOST_PLATFORM \"Mac OS\"\n\n#if __MACH__ && !defined(_MSL_USING_MSL_C)\n\n// Using the Mac OS X system BSD-style C library.\n\n#  ifndef BOOST_HAS_UNISTD_H\n#    define BOOST_HAS_UNISTD_H\n#  endif\n//\n// Begin by including our boilerplate code for POSIX\n// feature detection, this is safe even when using\n// the MSL as Metrowerks supply their own <unistd.h>\n// to replace the platform-native BSD one. G++ users\n// should also always be able to do this on MaxOS X.\n//\n#  include <boost/config/posix_features.hpp>\n#  ifndef BOOST_HAS_STDINT_H\n#     define BOOST_HAS_STDINT_H\n#  endif\n\n//\n// BSD runtime has pthreads, sigaction, sched_yield and gettimeofday,\n// of these only pthreads are advertised in <unistd.h>, so set the \n// other options explicitly:\n//\n#  define BOOST_HAS_SCHED_YIELD\n#  define BOOST_HAS_GETTIMEOFDAY\n#  define BOOST_HAS_SIGACTION\n\n#  if (__GNUC__ < 3) && !defined( __APPLE_CC__)\n\n// GCC strange \"ignore std\" mode works better if you pretend everything\n// is in the std namespace, for the most part.\n\n#    define BOOST_NO_STDC_NAMESPACE\n#  endif\n\n#  if (__GNUC__ >= 4)\n\n// Both gcc and intel require these.  \n#    define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#    define BOOST_HAS_NANOSLEEP\n\n#  endif\n\n#else\n\n// Using the MSL C library.\n\n// We will eventually support threads in non-Carbon builds, but we do\n// not support this yet.\n#  if ( defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON ) || ( defined(TARGET_CARBON) && TARGET_CARBON )\n\n#  if !defined(BOOST_HAS_PTHREADS)\n// MPTasks support is deprecated/removed from Boost:\n//#    define BOOST_HAS_MPTASKS\n#  elif ( __dest_os == __mac_os_x )\n// We are doing a Carbon/Mach-O/MSL build which has pthreads, but only the\n// gettimeofday and no posix.\n#  define BOOST_HAS_GETTIMEOFDAY\n#  endif\n\n#ifdef BOOST_HAS_PTHREADS\n#  define BOOST_HAS_THREADS\n#endif\n\n// The remote call manager depends on this.\n#    define BOOST_BIND_ENABLE_PASCAL\n\n#  endif\n\n#endif\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/qnxnto.hpp",
    "content": "//  (C) Copyright Jim Douglas 2005. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  QNX specific config options:\n\n#define BOOST_PLATFORM \"QNX\"\n\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n// QNX claims XOpen version 5 compatibility, but doesn't have an nl_types.h\n// or log1p and expm1:\n#undef  BOOST_HAS_NL_TYPES_H\n#undef  BOOST_HAS_LOG1P\n#undef  BOOST_HAS_EXPM1\n\n#define BOOST_HAS_PTHREADS\n#define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n\n#define BOOST_HAS_GETTIMEOFDAY\n#define BOOST_HAS_CLOCK_GETTIME\n#define BOOST_HAS_NANOSLEEP\n\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/solaris.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  sun specific config options:\n\n#define BOOST_PLATFORM \"Sun Solaris\"\n\n#define BOOST_HAS_GETTIMEOFDAY\n\n// boilerplate code:\n#define BOOST_HAS_UNISTD_H\n#include <boost/config/posix_features.hpp>\n\n//\n// pthreads don't actually work with gcc unless _PTHREADS is defined:\n//\n#if defined(__GNUC__) && defined(_POSIX_THREADS) && !defined(_PTHREADS)\n# undef BOOST_HAS_PTHREADS\n#endif\n\n#define BOOST_HAS_STDINT_H \n#define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE \n#define BOOST_HAS_LOG1P \n#define BOOST_HAS_EXPM1\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/symbian.hpp",
    "content": "//  (C) Copyright Yuriy Krasnoschek 2009. \n//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  symbian specific config options:\n\n\n#define BOOST_PLATFORM \"Symbian\"\n#define BOOST_SYMBIAN 1\n\n\n#if defined(__S60_3X__)\n// Open C / C++ plugin was introdused in this SDK, earlier versions don't have CRT / STL\n#  define BOOST_S60_3rd_EDITION_FP2_OR_LATER_SDK\n// make sure we have __GLIBC_PREREQ if available at all\n#ifdef __cplusplus\n#include <cstdlib>\n#else\n#include <stdlib.h>\n#endif// boilerplate code:\n#  define BOOST_HAS_UNISTD_H\n#  include <boost/config/posix_features.hpp>\n// S60 SDK defines _POSIX_VERSION as POSIX.1\n#  ifndef BOOST_HAS_STDINT_H\n#    define BOOST_HAS_STDINT_H\n#  endif\n#  ifndef BOOST_HAS_GETTIMEOFDAY\n#    define BOOST_HAS_GETTIMEOFDAY\n#  endif\n#  ifndef BOOST_HAS_DIRENT_H\n#    define BOOST_HAS_DIRENT_H\n#  endif\n#  ifndef BOOST_HAS_SIGACTION\n#    define BOOST_HAS_SIGACTION\n#  endif\n#  ifndef BOOST_HAS_PTHREADS\n#    define BOOST_HAS_PTHREADS\n#  endif\n#  ifndef BOOST_HAS_NANOSLEEP\n#    define BOOST_HAS_NANOSLEEP\n#  endif\n#  ifndef BOOST_HAS_SCHED_YIELD\n#    define BOOST_HAS_SCHED_YIELD\n#  endif\n#  ifndef BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#    define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#  endif\n#  ifndef BOOST_HAS_LOG1P\n#    define BOOST_HAS_LOG1P\n#  endif\n#  ifndef BOOST_HAS_EXPM1\n#    define BOOST_HAS_EXPM1\n#  endif\n#  ifndef BOOST_POSIX_API\n#    define BOOST_POSIX_API\n#  endif\n// endianess support\n#  include <sys/endian.h>\n// Symbian SDK provides _BYTE_ORDER instead of __BYTE_ORDER\n#  ifndef __LITTLE_ENDIAN\n#    ifdef _LITTLE_ENDIAN\n#      define __LITTLE_ENDIAN _LITTLE_ENDIAN\n#    else\n#      define __LITTLE_ENDIAN 1234\n#    endif\n#  endif\n#  ifndef __BIG_ENDIAN\n#    ifdef _BIG_ENDIAN\n#      define __BIG_ENDIAN _BIG_ENDIAN\n#    else\n#      define __BIG_ENDIAN 4321\n#    endif\n#  endif\n#  ifndef __BYTE_ORDER\n#    define __BYTE_ORDER __LITTLE_ENDIAN // Symbian is LE\n#  endif\n// Known limitations\n#  define BOOST_ASIO_DISABLE_SERIAL_PORT\n#  define BOOST_DATE_TIME_NO_LOCALE\n#  define BOOST_NO_STD_WSTRING\n#  define BOOST_EXCEPTION_DISABLE\n#  define BOOST_NO_EXCEPTIONS\n\n#else // TODO: More platform support e.g. UIQ\n#  error \"Unsuppoted Symbian SDK\"\n#endif\n\n#if defined(__WINSCW__) && !defined(BOOST_DISABLE_WIN32)\n#  define BOOST_DISABLE_WIN32 // winscw defines WIN32 macro\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/vms.hpp",
    "content": "//  (C) Copyright Artyom Beilis 2010.  \n//  Use, modification and distribution are subject to the  \n//  Boost Software License, Version 1.0. (See accompanying file  \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) \n\n#ifndef BOOST_CONFIG_PLATFORM_VMS_HPP \n#define BOOST_CONFIG_PLATFORM_VMS_HPP \n\n#define BOOST_PLATFORM \"OpenVMS\" \n\n#undef  BOOST_HAS_STDINT_H \n#define BOOST_HAS_UNISTD_H \n#define BOOST_HAS_NL_TYPES_H \n#define BOOST_HAS_GETTIMEOFDAY \n#define BOOST_HAS_DIRENT_H \n#define BOOST_HAS_PTHREADS \n#define BOOST_HAS_NANOSLEEP \n#define BOOST_HAS_CLOCK_GETTIME \n#define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE \n#define BOOST_HAS_LOG1P \n#define BOOST_HAS_EXPM1 \n#define BOOST_HAS_THREADS \n#undef  BOOST_HAS_SCHED_YIELD \n\n#endif \n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/vxworks.hpp",
    "content": "//  (C) Copyright Dustin Spicuzza 2009.\n//      Adapted to vxWorks 6.9 by Peter Brockamp 2012.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Since WRS does not yet properly support boost under vxWorks\n//  and this file was badly outdated, but I was keen on using it,\n//  I patched boost myself to make things work. This has been tested\n//  and adapted by me for vxWorks 6.9 *only*, as I'm lacking access\n//  to earlier 6.X versions! The only thing I know for sure is that\n//  very old versions of vxWorks (namely everything below 6.x) are\n//  absolutely unable to use boost. This is mainly due to the completely\n//  outdated libraries and ancient compiler (GCC 2.96 or worse). Do\n//  not even think of getting this to work, a miserable failure will\n//  be guaranteed!\n//  Equally, this file has been tested for RTPs (Real Time Processes)\n//  only, not for DKMs (Downloadable Kernel Modules). These two types\n//  of executables differ largely in the available functionality of\n//  the C-library, STL, and so on. A DKM uses a library similar to those\n//  of vxWorks 5.X - with all its limitations and incompatibilities\n//  with respect to ANSI C++ and STL. So probably there might be problems\n//  with the usage of boost from DKMs. WRS or any voluteers are free to\n//  prove the opposite!\n\n// ====================================================================\n//\n// Some important information regarding the usage of POSIX semaphores:\n// -------------------------------------------------------------------\n//\n// VxWorks as a real time operating system handles threads somewhat\n// different from what \"normal\" OSes do, regarding their scheduling!\n// This could lead to a scenario called \"priority inversion\" when using\n// semaphores, see http://en.wikipedia.org/wiki/Priority_inversion.\n//\n// Now, VxWorks POSIX-semaphores for DKM's default to the usage of\n// priority inverting semaphores, which is fine. On the other hand,\n// for RTP's it defaults to using non priority inverting semaphores,\n// which could easily pose a serious problem for a real time process,\n// i.e. deadlocks! To overcome this two possibilities do exist:\n//\n// a) Patch every piece of boost that uses semaphores to instanciate\n//    the proper type of semaphores. This is non-intrusive with respect\n//    to the OS and could relatively easy been done by giving all\n//    semaphores attributes deviating from the default (for in-depth\n//    information see the POSIX functions pthread_mutexattr_init()\n//    and pthread_mutexattr_setprotocol()). However this breaks all\n//    too easily, as with every new version some boost library could\n//    all in a sudden start using semaphores, resurrecting the very\n//    same, hard to locate problem over and over again!\n//\n// b) We could change the default properties for POSIX-semaphores\n//    that VxWorks uses for RTP's and this is being suggested here,\n//    as it will more or less seamlessly integrate with boost. I got\n//    the following information from WRS how to do this, compare\n//    Wind River TSR# 1209768:\n//\n// Instructions for changing the default properties of POSIX-\n// semaphores for RTP's in VxWorks 6.9:\n// - Edit the file /vxworks-6.9/target/usr/src/posix/pthreadLib.c\n//   in the root of your Workbench-installation.\n// - Around line 917 there should be the definition of the default\n//   mutex attributes:\n//\n//   LOCAL pthread_mutexattr_t defaultMutexAttr =\n//       {\n//       PTHREAD_INITIALIZED_OBJ, PTHREAD_PRIO_NONE, 0,\n//       PTHREAD_MUTEX_DEFAULT\n//       };\n//\n//   Here, replace PTHREAD_PRIO_NONE by PTHREAD_PRIO_INHERIT.\n// - Around line 1236 there should be a definition for the function\n//   pthread_mutexattr_init(). A couple of lines below you should\n//   find a block of code like this:\n//\n//   pAttr->mutexAttrStatus      = PTHREAD_INITIALIZED_OBJ;\n//   pAttr->mutexAttrProtocol    = PTHREAD_PRIO_NONE;\n//   pAttr->mutexAttrPrioceiling = 0;\n//   pAttr->mutexAttrType        = PTHREAD_MUTEX_DEFAULT;\n//\n//   Here again, replace PTHREAD_PRIO_NONE by PTHREAD_PRIO_INHERIT.\n// - Finally, rebuild your VSB. This will create a new VxWorks kernel\n//   with the changed properties. That's it! Now, using boost should\n//   no longer cause any problems with task deadlocks!\n//\n// And here's another useful piece of information concerning VxWorks'\n// POSIX-functionality in general:\n// VxWorks is not a genuine POSIX-OS in itself, rather it is using a\n// kind of compatibility layer (sort of a wrapper) to emulate the\n// POSIX-functionality by using its own resources and functions.\n// At the time a task (thread) calls it's first POSIX-function during\n// runtime it is being transformed by the OS into a POSIX-thread.\n// This transformation does include a call to malloc() to allocate the\n// memory required for the housekeeping of POSIX-threads. In a high\n// priority RTP this malloc() call may be highly undesirable, as its\n// timing is more or less unpredictable (depending on what your actual\n// heap looks like). You can circumvent this problem by calling the\n// function thread_self() at a well defined point in the code of the\n// task, e.g. shortly after the task spawns up. Thereby you are able\n// to define the time when the task-transformation will take place and\n// you could shift it to an uncritical point where a malloc() call is\n// tolerable. So, if this could pose a problem for your code, remember\n// to call thread_self() from the affected task at an early stage.\n//\n// ====================================================================\n\n// Block out all versions before vxWorks 6.x, as these don't work:\n// Include header with the vxWorks version information and query them\n#include <version.h>\n#if !defined(_WRS_VXWORKS_MAJOR) || (_WRS_VXWORKS_MAJOR < 6)\n#  error \"The vxWorks version you're using is so badly outdated,\\\n          it doesn't work at all with boost, sorry, no chance!\"\n#endif\n\n// Handle versions above 5.X but below 6.9\n#if (_WRS_VXWORKS_MAJOR == 6) && (_WRS_VXWORKS_MINOR < 9)\n// TODO: Starting from what version does vxWorks work with boost?\n// We can't reasonably insert a #warning \"\" as a user hint here,\n// as this will show up with every file including some boost header,\n// badly bugging the user... So for the time being we just leave it.\n#endif\n\n// vxWorks specific config options:\n// --------------------------------\n#define BOOST_PLATFORM \"vxWorks\"\n\n// Special behaviour for DKMs:\n#ifdef _WRS_KERNEL\n  // DKMs do not have the <cwchar>-header,\n  // but apparently they do have an intrinsic wchar_t meanwhile!\n#  define BOOST_NO_CWCHAR\n\n  // Lots of wide-functions and -headers are unavailable for DKMs as well:\n#  define BOOST_NO_CWCTYPE\n#  define BOOST_NO_SWPRINTF\n#  define BOOST_NO_STD_WSTRING\n#  define BOOST_NO_STD_WSTREAMBUF\n#endif\n\n// Generally available headers:\n#define BOOST_HAS_UNISTD_H\n#define BOOST_HAS_STDINT_H\n#define BOOST_HAS_DIRENT_H\n#define BOOST_HAS_SLIST\n\n// vxWorks does not have installed an iconv-library by default,\n// so unfortunately no Unicode support from scratch is available!\n// Thus, instead it is suggested to switch to ICU, as this seems\n// to be the most complete and portable option...\n#define BOOST_LOCALE_WITH_ICU\n\n// Generally available functionality:\n#define BOOST_HAS_THREADS\n#define BOOST_HAS_NANOSLEEP\n#define BOOST_HAS_GETTIMEOFDAY\n#define BOOST_HAS_CLOCK_GETTIME\n#define BOOST_HAS_MACRO_USE_FACET\n\n// Generally unavailable functionality, delivered by boost's test function:\n//#define BOOST_NO_DEDUCED_TYPENAME // Commented this out, boost's test gives an errorneous result!\n#define BOOST_NO_CXX11_EXTERN_TEMPLATE\n#define BOOST_NO_CXX11_VARIADIC_MACROS\n\n// Generally available threading API's:\n#define BOOST_HAS_PTHREADS\n#define BOOST_HAS_SCHED_YIELD\n#define BOOST_HAS_SIGACTION\n\n// Functionality available for RTPs only:\n#ifdef __RTP__\n#  define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#  define BOOST_HAS_LOG1P\n#  define BOOST_HAS_EXPM1\n#endif\n\n// Functionality available for DKMs only:\n#ifdef _WRS_KERNEL\n  // Luckily, at the moment there seems to be none!\n#endif\n\n// These #defines allow posix_features to work, since vxWorks doesn't\n// #define them itself for DKMs (for RTPs on the contrary it does):\n#ifdef _WRS_KERNEL\n#  ifndef _POSIX_TIMERS\n#    define _POSIX_TIMERS  1\n#  endif\n#  ifndef _POSIX_THREADS\n#    define _POSIX_THREADS 1\n#  endif\n#endif\n\n// vxWorks doesn't work with asio serial ports:\n#define BOOST_ASIO_DISABLE_SERIAL_PORT\n// TODO: The problem here seems to bee that vxWorks uses its own, very specific\n//       ways to handle serial ports, incompatible with POSIX or anything...\n//       Maybe a specific implementation would be possible, but until the\n//       straight need arises... This implementation would presumably consist\n//       of some vxWorks specific ioctl-calls, etc. Any voluteers?\n\n// vxWorks-around: <time.h> #defines CLOCKS_PER_SEC as sysClkRateGet() but\n//                 miserably fails to #include the required <sysLib.h> to make\n//                 sysClkRateGet() available! So we manually include it here.\n#ifdef __RTP__\n#  include <time.h>\n#  include <sysLib.h>\n#endif\n\n// vxWorks-around: In <stdint.h> the macros INT32_C(), UINT32_C(), INT64_C() and\n//                 UINT64_C() are defined errorneously, yielding not a signed/\n//                 unsigned long/long long type, but a signed/unsigned int/long\n//                 type. Eventually this leads to compile errors in ratio_fwd.hpp,\n//                 when trying to define several constants which do not fit into a\n//                 long type! We correct them here by redefining.\n#include <cstdint>\n\n// Some macro-magic to do the job\n#define VX_JOIN(X, Y)     VX_DO_JOIN(X, Y)\n#define VX_DO_JOIN(X, Y)  VX_DO_JOIN2(X, Y)\n#define VX_DO_JOIN2(X, Y) X##Y\n\n// Correctly setup the macros\n#undef  INT32_C\n#undef  UINT32_C\n#undef  INT64_C\n#undef  UINT64_C\n#define INT32_C(x)  VX_JOIN(x, L)\n#define UINT32_C(x) VX_JOIN(x, UL)\n#define INT64_C(x)  VX_JOIN(x, LL)\n#define UINT64_C(x) VX_JOIN(x, ULL)\n\n// #include Libraries required for the following function adaption\n#include <ioLib.h>\n#include <tickLib.h>\n#include <sys/time.h>\n\n// Use C-linkage for the following helper functions\nextern \"C\" {\n\n// vxWorks-around: The required functions getrlimit() and getrlimit() are missing.\n//                 But we have the similar functions getprlimit() and setprlimit(),\n//                 which may serve the purpose.\n//                 Problem: The vxWorks-documentation regarding these functions\n//                 doesn't deserve its name! It isn't documented what the first two\n//                 parameters idtype and id mean, so we must fall back to an educated\n//                 guess - null, argh... :-/\n\n// TODO: getprlimit() and setprlimit() do exist for RTPs only, for whatever reason.\n//       Thus for DKMs there would have to be another implementation.\n#ifdef __RTP__\n  inline int getrlimit(int resource, struct rlimit *rlp){\n    return getprlimit(0, 0, resource, rlp);\n  }\n\n  inline int setrlimit(int resource, const struct rlimit *rlp){\n    return setprlimit(0, 0, resource, const_cast<struct rlimit*>(rlp));\n  }\n#endif\n\n// vxWorks has ftruncate() only, so we do simulate truncate():\ninline int truncate(const char *p, off_t l){\n  int fd = open(p, O_WRONLY);\n  if (fd == -1){\n    errno = EACCES;\n    return -1;\n  }\n  if (ftruncate(fd, l) == -1){\n    close(fd);\n    errno = EACCES;\n    return -1;\n  }\n  return close(fd);\n}\n\n// Fake symlink handling by dummy functions:\ninline int symlink(const char*, const char*){\n  // vxWorks has no symlinks -> always return an error!\n  errno = EACCES;\n  return -1;\n}\n\ninline ssize_t readlink(const char*, char*, size_t){\n  // vxWorks has no symlinks -> always return an error!\n  errno = EACCES;\n  return -1;\n}\n\n// vxWorks claims to implement gettimeofday in sys/time.h\n// but nevertheless does not provide it! See\n// https://support.windriver.com/olsPortal/faces/maintenance/techtipDetail_noHeader.jspx?docId=16442&contentId=WR_TECHTIP_006256\n// We implement a surrogate version here via clock_gettime:\ninline int gettimeofday(struct timeval *tv, void * /*tzv*/) {\n  struct timespec ts;\n  clock_gettime(CLOCK_MONOTONIC, &ts);\n  tv->tv_sec  = ts.tv_sec;\n  tv->tv_usec = ts.tv_nsec / 1000;\n  return 0;\n}\n\n// vxWorks does provide neither struct tms nor function times()!\n// We implement an empty dummy-function, simply setting the user\n// and system time to the half of thew actual system ticks-value\n// and the child user and system time to 0.\n// Rather ugly but at least it suppresses compiler errors...\n// Unfortunately, this of course *does* have an severe impact on\n// dependant libraries, actually this is chrono only! Here it will\n// not be possible to correctly use user and system times! But\n// as vxWorks is lacking the ability to calculate user and system\n// process times there seems to be no other possible solution.\nstruct tms{\n  clock_t tms_utime;  // User CPU time\n  clock_t tms_stime;  // System CPU time\n  clock_t tms_cutime; // User CPU time of terminated child processes\n  clock_t tms_cstime; // System CPU time of terminated child processes\n};\n\ninline clock_t times(struct tms *t){\n  struct timespec ts;\n  clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);\n  clock_t ticks(static_cast<clock_t>(static_cast<double>(ts.tv_sec)  * CLOCKS_PER_SEC +\n                                     static_cast<double>(ts.tv_nsec) * CLOCKS_PER_SEC / 1000000.0));\n  t->tms_utime  = ticks/2U;\n  t->tms_stime  = ticks/2U;\n  t->tms_cutime = 0; // vxWorks is lacking the concept of a child process!\n  t->tms_cstime = 0; // -> Set the wait times for childs to 0\n  return ticks;\n}\n\n} // extern \"C\"\n\n// Put the selfmade functions into the std-namespace, just in case\nnamespace std {\n# ifdef __RTP__\n    using ::getrlimit;\n    using ::setrlimit;\n# endif\n  using ::truncate;\n  using ::symlink;\n  using ::readlink;\n  using ::times;\n  using ::gettimeofday;\n}\n\n// Some more macro-magic:\n// vxWorks-around: Some functions are not present or broken in vxWorks\n//                 but may be patched to life via helper macros...\n\n// Include signal.h which might contain a typo to be corrected here\n#include <signal.h>\n\n#define getpagesize()    sysconf(_SC_PAGESIZE)         // getpagesize is deprecated anyway!\n#ifndef S_ISSOCK\n#  define S_ISSOCK(mode) ((mode & S_IFMT) == S_IFSOCK) // Is file a socket?\n#endif\n#define lstat(p, b)      stat(p, b)                    // lstat() == stat(), as vxWorks has no symlinks!\n#ifndef FPE_FLTINV\n#  define FPE_FLTINV     (FPE_FLTSUB+1)                // vxWorks has no FPE_FLTINV, so define one as a dummy\n#endif\n#if !defined(BUS_ADRALN) && defined(BUS_ADRALNR)\n#  define BUS_ADRALN     BUS_ADRALNR                   // Correct a supposed typo in vxWorks' <signal.h>\n#endif\n//typedef int              locale_t;                     // locale_t is a POSIX-extension, currently unpresent in vxWorks!\n\n// #include boilerplate code:\n#include <boost/config/posix_features.hpp>\n\n// vxWorks lies about XSI conformance, there is no nl_types.h:\n#undef BOOST_HAS_NL_TYPES_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/platform/win32.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Bill Kempf 2001. \n//  (C) Copyright Aleksey Gurtovoy 2003. \n//  (C) Copyright Rene Rivera 2005.\n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Win32 specific config options:\n\n#define BOOST_PLATFORM \"Win32\"\n\n//  Get the information about the MinGW runtime, i.e. __MINGW32_*VERSION.\n#if defined(__MINGW32__)\n#  include <_mingw.h>\n#endif\n\n#if defined(__GNUC__) && !defined(BOOST_NO_SWPRINTF)\n#  define BOOST_NO_SWPRINTF\n#endif\n\n//  Default defines for BOOST_SYMBOL_EXPORT and BOOST_SYMBOL_IMPORT\n//  If a compiler doesn't support __declspec(dllexport)/__declspec(dllimport),\n//  its boost/config/compiler/ file must define BOOST_SYMBOL_EXPORT and\n//  BOOST_SYMBOL_IMPORT\n#ifndef BOOST_SYMBOL_EXPORT\n#  define BOOST_HAS_DECLSPEC\n#  define BOOST_SYMBOL_EXPORT __declspec(dllexport)\n#  define BOOST_SYMBOL_IMPORT __declspec(dllimport)\n#endif\n\n#if defined(__MINGW32__) && ((__MINGW32_MAJOR_VERSION > 2) || ((__MINGW32_MAJOR_VERSION == 2) && (__MINGW32_MINOR_VERSION >= 0)))\n#  define BOOST_HAS_STDINT_H\n#  ifndef __STDC_LIMIT_MACROS\n#     define __STDC_LIMIT_MACROS\n#  endif\n#  define BOOST_HAS_DIRENT_H\n#  define BOOST_HAS_UNISTD_H\n#endif\n\n#if defined(__MINGW32__) && (__GNUC__ >= 4)\n// Mingw has these functions but there are persistent problems\n// with calls to these crashing, so disable for now:\n//#  define BOOST_HAS_EXPM1\n//#  define BOOST_HAS_LOG1P\n#  define BOOST_HAS_GETTIMEOFDAY\n#endif\n//\n// Win32 will normally be using native Win32 threads,\n// but there is a pthread library avaliable as an option,\n// we used to disable this when BOOST_DISABLE_WIN32 was \n// defined but no longer - this should allow some\n// files to be compiled in strict mode - while maintaining\n// a consistent setting of BOOST_HAS_THREADS across\n// all translation units (needed for shared_ptr etc).\n//\n\n#ifndef BOOST_HAS_PTHREADS\n#  define BOOST_HAS_WINTHREADS\n#endif\n\n//\n// WinCE configuration:\n//\n#if defined(_WIN32_WCE) || defined(UNDER_CE)\n#  define BOOST_NO_ANSI_APIS\n// Windows CE does not have a conforming signature for swprintf\n#  define BOOST_NO_SWPRINTF\n#else\n#  define BOOST_HAS_GETSYSTEMTIMEASFILETIME\n#  define BOOST_HAS_THREADEX\n#  define BOOST_HAS_GETSYSTEMTIMEASFILETIME\n#endif\n\n//\n// Windows Runtime\n//\n#if defined(WINAPI_FAMILY) && \\\n  (WINAPI_FAMILY == WINAPI_FAMILY_APP || WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP)\n#  define BOOST_NO_ANSI_APIS\n#endif\n\n#ifndef BOOST_DISABLE_WIN32\n// WEK: Added\n#define BOOST_HAS_FTIME\n#define BOOST_WINDOWS 1\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/posix_features.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n\n//  See http://www.boost.org for most recent version.\n\n// All POSIX feature tests go in this file,\n// Note that we test _POSIX_C_SOURCE and _XOPEN_SOURCE as well\n// _POSIX_VERSION and _XOPEN_VERSION: on some systems POSIX API's\n// may be present but none-functional unless _POSIX_C_SOURCE and\n// _XOPEN_SOURCE have been defined to the right value (it's up\n// to the user to do this *before* including any header, although\n// in most cases the compiler will do this for you).\n\n#  if defined(BOOST_HAS_UNISTD_H)\n#     include <unistd.h>\n\n      // XOpen has <nl_types.h>, but is this the correct version check?\n#     if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 3)\n#        define BOOST_HAS_NL_TYPES_H\n#     endif\n\n      // POSIX version 6 requires <stdint.h>\n#     if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200100)\n#        define BOOST_HAS_STDINT_H\n#     endif\n\n      // POSIX version 2 requires <dirent.h>\n#     if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 199009L)\n#        define BOOST_HAS_DIRENT_H\n#     endif\n\n      // POSIX version 3 requires <signal.h> to have sigaction:\n#     if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 199506L)\n#        define BOOST_HAS_SIGACTION\n#     endif\n      // POSIX defines _POSIX_THREADS > 0 for pthread support,\n      // however some platforms define _POSIX_THREADS without\n      // a value, hence the (_POSIX_THREADS+0 >= 0) check.\n      // Strictly speaking this may catch platforms with a\n      // non-functioning stub <pthreads.h>, but such occurrences should\n      // occur very rarely if at all.\n#     if defined(_POSIX_THREADS) && (_POSIX_THREADS+0 >= 0) && !defined(BOOST_HAS_WINTHREADS) && !defined(BOOST_HAS_MPTASKS)\n#        define BOOST_HAS_PTHREADS\n#     endif\n\n      // BOOST_HAS_NANOSLEEP:\n      // This is predicated on _POSIX_TIMERS or _XOPEN_REALTIME:\n#     if (defined(_POSIX_TIMERS) && (_POSIX_TIMERS+0 >= 0)) \\\n             || (defined(_XOPEN_REALTIME) && (_XOPEN_REALTIME+0 >= 0))\n#        define BOOST_HAS_NANOSLEEP\n#     endif\n\n      // BOOST_HAS_CLOCK_GETTIME:\n      // This is predicated on _POSIX_TIMERS (also on _XOPEN_REALTIME\n      // but at least one platform - linux - defines that flag without\n      // defining clock_gettime):\n#     if (defined(_POSIX_TIMERS) && (_POSIX_TIMERS+0 >= 0))\n#        define BOOST_HAS_CLOCK_GETTIME\n#     endif\n\n      // BOOST_HAS_SCHED_YIELD:\n      // This is predicated on _POSIX_PRIORITY_SCHEDULING or\n      // on _POSIX_THREAD_PRIORITY_SCHEDULING or on _XOPEN_REALTIME.\n#     if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING+0 > 0)\\\n            || (defined(_POSIX_THREAD_PRIORITY_SCHEDULING) && (_POSIX_THREAD_PRIORITY_SCHEDULING+0 > 0))\\\n            || (defined(_XOPEN_REALTIME) && (_XOPEN_REALTIME+0 >= 0))\n#        define BOOST_HAS_SCHED_YIELD\n#     endif\n\n      // BOOST_HAS_GETTIMEOFDAY:\n      // BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE:\n      // These are predicated on _XOPEN_VERSION, and appears to be first released\n      // in issue 4, version 2 (_XOPEN_VERSION > 500).\n      // Likewise for the functions log1p and expm1.\n#     if defined(_XOPEN_VERSION) && (_XOPEN_VERSION+0 >= 500)\n#        define BOOST_HAS_GETTIMEOFDAY\n#        if defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE+0 >= 500)\n#           define BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#        endif\n#        ifndef BOOST_HAS_LOG1P\n#           define BOOST_HAS_LOG1P\n#        endif\n#        ifndef BOOST_HAS_EXPM1\n#           define BOOST_HAS_EXPM1\n#        endif\n#     endif\n\n#  endif\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/requires_threads.hpp",
    "content": "//  (C) Copyright John Maddock 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n\n#ifndef BOOST_CONFIG_REQUIRES_THREADS_HPP\n#define BOOST_CONFIG_REQUIRES_THREADS_HPP\n\n#ifndef BOOST_CONFIG_HPP\n#  include <boost/config.hpp>\n#endif\n\n#if defined(BOOST_DISABLE_THREADS)\n\n//\n// special case to handle versions of gcc which don't currently support threads:\n//\n#if defined(__GNUC__) && ((__GNUC__ < 3) || (__GNUC_MINOR__ <= 3) || !defined(BOOST_STRICT_CONFIG))\n//\n// this is checked up to gcc 3.3:\n//\n#if defined(__sgi) || defined(__hpux)\n#  error \"Multi-threaded programs are not supported by gcc on HPUX or Irix (last checked with gcc 3.3)\"\n#endif\n\n#endif\n\n#  error \"Threading support unavaliable: it has been explicitly disabled with BOOST_DISABLE_THREADS\"\n\n#elif !defined(BOOST_HAS_THREADS)\n\n# if defined __COMO__\n//  Comeau C++\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -D_MT (Windows) or -D_REENTRANT (Unix)\"\n\n#elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC)\n//  Intel\n#ifdef _WIN32\n#  error \"Compiler threading support is not turned on. Please set the correct command line options for threading: either /MT /MTd /MD or /MDd\"\n#else\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -openmp\"\n#endif\n\n# elif defined __GNUC__\n//  GNU C++:\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -pthread (Linux), -pthreads (Solaris) or -mthreads (Mingw32)\"\n\n#elif defined __sgi\n//  SGI MIPSpro C++\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -D_SGI_MP_SOURCE\"\n\n#elif defined __DECCXX\n//  Compaq Tru64 Unix cxx\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -pthread\"\n\n#elif defined __BORLANDC__\n//  Borland\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -tWM\"\n\n#elif defined  __MWERKS__\n//  Metrowerks CodeWarrior\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: either -runtime sm, -runtime smd, -runtime dm, or -runtime dmd\"\n\n#elif defined  __SUNPRO_CC\n//  Sun Workshop Compiler C++\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -mt\"\n\n#elif defined __HP_aCC\n//  HP aCC\n#   error \"Compiler threading support is not turned on. Please set the correct command line options for threading: -mt\"\n\n#elif defined(__IBMCPP__)\n//  IBM Visual Age\n#   error \"Compiler threading support is not turned on. Please compile the code with the xlC_r compiler\"\n\n#elif defined _MSC_VER\n//  Microsoft Visual C++\n//\n//  Must remain the last #elif since some other vendors (Metrowerks, for\n//  example) also #define _MSC_VER\n#  error \"Compiler threading support is not turned on. Please set the correct command line options for threading: either /MT /MTd /MD or /MDd\"\n\n#else\n\n#  error \"Compiler threading support is not turned on.  Please consult your compiler's documentation for the appropriate options to use\"\n\n#endif // compilers\n\n#endif // BOOST_HAS_THREADS\n\n#endif // BOOST_CONFIG_REQUIRES_THREADS_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/select_compiler_config.hpp",
    "content": "//  Boost compiler configuration selection header file\n\n//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Martin Wille 2003.\n//  (C) Copyright Guillaume Melquiond 2003.\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  (See accompanying file LICENSE_1_0.txt or copy at\n//   http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/ for most recent version.\n\n// locate which compiler we are using and define\n// BOOST_COMPILER_CONFIG as needed: \n\n#if defined __CUDACC__\n//  NVIDIA CUDA C++ compiler for GPU\n#   include \"boost/config/compiler/nvcc.hpp\"\n\n#endif\n\n#if defined(__GCCXML__)\n// GCC-XML emulates other compilers, it has to appear first here!\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/gcc_xml.hpp\"\n\n#elif defined(_CRAYC)\n// EDG based Cray compiler:\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/cray.hpp\"\n\n#elif defined __COMO__\n//  Comeau C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/comeau.hpp\"\n\n#elif defined(__PATHSCALE__) && (__PATHCC__ >= 4)\n// PathScale EKOPath compiler (has to come before clang and gcc)\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/pathscale.hpp\"\n\n#elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC)\n//  Intel\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/intel.hpp\"\n\n#elif defined __clang__ && !defined(__CUDACC__) && !defined(__ibmxl__)\n// when using clang and cuda at same time, you want to appear as gcc\n//  Clang C++ emulates GCC, so it has to appear early.\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/clang.hpp\"\n\n#elif defined __DMC__\n//  Digital Mars C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/digitalmars.hpp\"\n\n# elif defined(__GNUC__) && !defined(__ibmxl__)\n//  GNU C++:\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/gcc.hpp\"\n\n#elif defined __KCC\n//  Kai C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/kai.hpp\"\n\n#elif defined __sgi\n//  SGI MIPSpro C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/sgi_mipspro.hpp\"\n\n#elif defined __DECCXX\n//  Compaq Tru64 Unix cxx\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/compaq_cxx.hpp\"\n\n#elif defined __ghs\n//  Greenhills C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/greenhills.hpp\"\n\n#elif defined __CODEGEARC__\n//  CodeGear - must be checked for before Borland\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/codegear.hpp\"\n\n#elif defined __BORLANDC__\n//  Borland\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/borland.hpp\"\n\n#elif defined  __MWERKS__\n//  Metrowerks CodeWarrior\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/metrowerks.hpp\"\n\n#elif defined  __SUNPRO_CC\n//  Sun Workshop Compiler C++\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/sunpro_cc.hpp\"\n\n#elif defined __HP_aCC\n//  HP aCC\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/hp_acc.hpp\"\n\n#elif defined(__MRC__) || defined(__SC__)\n//  MPW MrCpp or SCpp\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/mpw.hpp\"\n\n#elif defined(__ibmxl__)\n// IBM XL C/C++ for Linux (Little Endian)\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/xlcpp.hpp\"\n\n#elif defined(__IBMCPP__)\n//  IBM Visual Age or IBM XL C/C++ for Linux (Big Endian)\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/vacpp.hpp\"\n\n#elif defined(__PGI)\n//  Portland Group Inc.\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/pgi.hpp\"\n\n#elif defined _MSC_VER\n//  Microsoft Visual C++\n//\n//  Must remain the last #elif since some other vendors (Metrowerks, for\n//  example) also #define _MSC_VER\n#   define BOOST_COMPILER_CONFIG \"boost/config/compiler/visualc.hpp\"\n\n#elif defined (BOOST_ASSERT_CONFIG)\n// this must come last - generate an error if we don't\n// recognise the compiler:\n#  error \"Unknown compiler - please configure (http://www.boost.org/libs/config/config.htm#configuring) and report the results to the main boost mailing list (http://www.boost.org/more/mailing_lists.htm#main)\"\n\n#endif\n\n#if 0\n//\n// This section allows dependency scanners to find all the headers we *might* include:\n//\n#include \"boost/config/compiler/gcc_xml.hpp\"\n#include \"boost/config/compiler/cray.hpp\"\n#include \"boost/config/compiler/comeau.hpp\"\n#include \"boost/config/compiler/pathscale.hpp\"\n#include \"boost/config/compiler/intel.hpp\"\n#include \"boost/config/compiler/clang.hpp\"\n#include \"boost/config/compiler/digitalmars.hpp\"\n#include \"boost/config/compiler/gcc.hpp\"\n#include \"boost/config/compiler/kai.hpp\"\n#include \"boost/config/compiler/sgi_mipspro.hpp\"\n#include \"boost/config/compiler/compaq_cxx.hpp\"\n#include \"boost/config/compiler/greenhills.hpp\"\n#include \"boost/config/compiler/codegear.hpp\"\n#include \"boost/config/compiler/borland.hpp\"\n#include \"boost/config/compiler/metrowerks.hpp\"\n#include \"boost/config/compiler/sunpro_cc.hpp\"\n#include \"boost/config/compiler/hp_acc.hpp\"\n#include \"boost/config/compiler/mpw.hpp\"\n#include \"boost/config/compiler/vacpp.hpp\"\n#include \"boost/config/compiler/pgi.hpp\"\n#include \"boost/config/compiler/visualc.hpp\"\n\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/select_platform_config.hpp",
    "content": "//  Boost compiler configuration selection header file\n\n//  (C) Copyright John Maddock 2001 - 2002. \n//  (C) Copyright Jens Maurer 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n// locate which platform we are on and define BOOST_PLATFORM_CONFIG as needed.\n// Note that we define the headers to include using \"header_name\" not\n// <header_name> in order to prevent macro expansion within the header\n// name (for example \"linux\" is a macro on linux systems).\n\n#if (defined(linux) || defined(__linux) || defined(__linux__) || defined(__GNU__) || defined(__GLIBC__)) && !defined(_CRAYC)\n// linux, also other platforms (Hurd etc) that use GLIBC, should these really have their own config headers though?\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/linux.hpp\"\n\n#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)\n// BSD:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/bsd.hpp\"\n\n#elif defined(sun) || defined(__sun)\n// solaris:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/solaris.hpp\"\n\n#elif defined(__sgi)\n// SGI Irix:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/irix.hpp\"\n\n#elif defined(__hpux)\n// hp unix:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/hpux.hpp\"\n\n#elif defined(__CYGWIN__)\n// cygwin is not win32:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/cygwin.hpp\"\n\n#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)\n// win32:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/win32.hpp\"\n\n#elif defined(__HAIKU__)\n// Haiku\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/haiku.hpp\"\n\n#elif defined(__BEOS__)\n// BeOS\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/beos.hpp\"\n\n#elif defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)\n// MacOS\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/macos.hpp\"\n\n#elif defined(__IBMCPP__) || defined(_AIX)\n// IBM\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/aix.hpp\"\n\n#elif defined(__amigaos__)\n// AmigaOS\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/amigaos.hpp\"\n\n#elif defined(__QNXNTO__)\n// QNX:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/qnxnto.hpp\"\n\n#elif defined(__VXWORKS__)\n// vxWorks:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/vxworks.hpp\"\n\n#elif defined(__SYMBIAN32__) \n// Symbian: \n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/symbian.hpp\" \n\n#elif defined(_CRAYC)\n// Cray:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/cray.hpp\" \n\n#elif defined(__VMS) \n// VMS:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/vms.hpp\" \n\n#elif defined(__CloudABI__)\n// Nuxi CloudABI:\n#  define BOOST_PLATFORM_CONFIG \"boost/config/platform/cloudabi.hpp\"\n#else\n\n#  if defined(unix) \\\n      || defined(__unix) \\\n      || defined(_XOPEN_SOURCE) \\\n      || defined(_POSIX_SOURCE)\n\n   // generic unix platform:\n\n#  ifndef BOOST_HAS_UNISTD_H\n#     define BOOST_HAS_UNISTD_H\n#  endif\n\n#  include <boost/config/posix_features.hpp>\n\n#  endif\n\n#  if defined (BOOST_ASSERT_CONFIG)\n      // this must come last - generate an error if we don't\n      // recognise the platform:\n#     error \"Unknown platform - please configure and report the results to boost.org\"\n#  endif\n\n#endif\n\n#if 0\n//\n// This section allows dependency scanners to find all the files we *might* include:\n//\n#  include \"boost/config/platform/linux.hpp\"\n#  include \"boost/config/platform/bsd.hpp\"\n#  include \"boost/config/platform/solaris.hpp\"\n#  include \"boost/config/platform/irix.hpp\"\n#  include \"boost/config/platform/hpux.hpp\"\n#  include \"boost/config/platform/cygwin.hpp\"\n#  include \"boost/config/platform/win32.hpp\"\n#  include \"boost/config/platform/beos.hpp\"\n#  include \"boost/config/platform/macos.hpp\"\n#  include \"boost/config/platform/aix.hpp\"\n#  include \"boost/config/platform/amigaos.hpp\"\n#  include \"boost/config/platform/qnxnto.hpp\"\n#  include \"boost/config/platform/vxworks.hpp\"\n#  include \"boost/config/platform/symbian.hpp\" \n#  include \"boost/config/platform/cray.hpp\" \n#  include \"boost/config/platform/vms.hpp\" \n#  include <boost/config/posix_features.hpp>\n\n\n\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/select_stdlib_config.hpp",
    "content": "//  Boost compiler configuration selection header file\n\n//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2001 - 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n\n//  See http://www.boost.org for most recent version.\n\n// locate which std lib we are using and define BOOST_STDLIB_CONFIG as needed:\n\n// First include <cstddef> to determine if some version of STLport is in use as the std lib\n// (do not rely on this header being included since users can short-circuit this header \n//  if they know whose std lib they are using.)\n#ifdef __cplusplus\n#  include <cstddef>\n#else\n#  include <stddef.h>\n#endif\n\n#if defined(__SGI_STL_PORT) || defined(_STLPORT_VERSION)\n// STLPort library; this _must_ come first, otherwise since\n// STLport typically sits on top of some other library, we\n// can end up detecting that first rather than STLport:\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/stlport.hpp\"\n\n#else\n\n// If our std lib was not some version of STLport, and has not otherwise\n// been detected, then include <utility> as it is about \n// the smallest of the std lib headers that includes real C++ stuff.\n// Some std libs do not include their C++-related macros in <cstddef> \n// so this additional include makes sure we get those definitions.\n// Note: do not rely on this header being included since users can short-circuit this \n// #include if they know whose std lib they are using.\n#if !defined(__LIBCOMO__) && !defined(__STD_RWCOMPILER_H__) && !defined(_RWSTD_VER)\\\n   && !defined(_LIBCPP_VERSION) && !defined(__GLIBCPP__) && !defined(__GLIBCXX__)\\\n   && !defined(__STL_CONFIG_H) && !defined(__MSL_CPP__) && !defined(__IBMCPP__)\\\n   && !defined(MSIPL_COMPILE_H) && !defined(_YVALS) && !defined(_CPPLIB_VER)\n#include <utility>\n#endif\n\n#if defined(__LIBCOMO__)\n// Comeau STL:\n#define BOOST_STDLIB_CONFIG \"boost/config/stdlib/libcomo.hpp\"\n\n#elif defined(__STD_RWCOMPILER_H__) || defined(_RWSTD_VER)\n// Rogue Wave library:\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/roguewave.hpp\"\n\n#elif defined(_LIBCPP_VERSION)\n// libc++\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/libcpp.hpp\"\n\n#elif defined(__GLIBCPP__) || defined(__GLIBCXX__)\n// GNU libstdc++ 3\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/libstdcpp3.hpp\"\n\n#elif defined(__STL_CONFIG_H)\n// generic SGI STL\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/sgi.hpp\"\n\n#elif defined(__MSL_CPP__)\n// MSL standard lib:\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/msl.hpp\"\n\n#elif defined(__IBMCPP__)\n// take the default VACPP std lib\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/vacpp.hpp\"\n\n#elif defined(MSIPL_COMPILE_H)\n// Modena C++ standard library\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/modena.hpp\"\n\n#elif (defined(_YVALS) && !defined(__IBMCPP__)) || defined(_CPPLIB_VER)\n// Dinkumware Library (this has to appear after any possible replacement libraries):\n#  define BOOST_STDLIB_CONFIG \"boost/config/stdlib/dinkumware.hpp\"\n\n#elif defined (BOOST_ASSERT_CONFIG)\n// this must come last - generate an error if we don't\n// recognise the library:\n#  error \"Unknown standard library - please configure and report the results to boost.org\"\n\n#endif\n\n#endif\n\n#if 0\n//\n// This section allows dependency scanners to find all the files we *might* include:\n//\n#  include \"boost/config/stdlib/stlport.hpp\"\n#  include \"boost/config/stdlib/libcomo.hpp\"\n#  include \"boost/config/stdlib/roguewave.hpp\"\n#  include \"boost/config/stdlib/libcpp.hpp\"\n#  include \"boost/config/stdlib/libstdcpp3.hpp\"\n#  include \"boost/config/stdlib/sgi.hpp\"\n#  include \"boost/config/stdlib/msl.hpp\"\n#  include \"boost/config/stdlib/vacpp.hpp\"\n#  include \"boost/config/stdlib/modena.hpp\"\n#  include \"boost/config/stdlib/dinkumware.hpp\"\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/dinkumware.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003.\n//  (C) Copyright Jens Maurer 2001.\n//  (C) Copyright Peter Dimov 2001.\n//  (C) Copyright David Abrahams 2002.\n//  (C) Copyright Guillaume Melquiond 2003.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Dinkumware standard library config:\n\n#if !defined(_YVALS) && !defined(_CPPLIB_VER)\n#include <boost/config/no_tr1/utility.hpp>\n#if !defined(_YVALS) && !defined(_CPPLIB_VER)\n#error This is not the Dinkumware lib!\n#endif\n#endif\n\n\n#if defined(_CPPLIB_VER) && (_CPPLIB_VER >= 306)\n   // full dinkumware 3.06 and above\n   // fully conforming provided the compiler supports it:\n#  if !(defined(_GLOBAL_USING) && (_GLOBAL_USING+0 > 0)) && !defined(__BORLANDC__) && !defined(_STD) && !(defined(__ICC) && (__ICC >= 700))   // can be defined in yvals.h\n#     define BOOST_NO_STDC_NAMESPACE\n#  endif\n#  if !(defined(_HAS_MEMBER_TEMPLATES_REBIND) && (_HAS_MEMBER_TEMPLATES_REBIND+0 > 0)) && !(defined(_MSC_VER) && (_MSC_VER > 1300)) && defined(BOOST_MSVC)\n#     define BOOST_NO_STD_ALLOCATOR\n#  endif\n#  define BOOST_HAS_PARTIAL_STD_ALLOCATOR\n#  if defined(BOOST_MSVC) && (BOOST_MSVC < 1300)\n      // if this lib version is set up for vc6 then there is no std::use_facet:\n#     define BOOST_NO_STD_USE_FACET\n#     define BOOST_HAS_TWO_ARG_USE_FACET\n      // C lib functions aren't in namespace std either:\n#     define BOOST_NO_STDC_NAMESPACE\n      // and nor is <exception>\n#     define BOOST_NO_EXCEPTION_STD_NAMESPACE\n#  endif\n// There's no numeric_limits<long long> support unless _LONGLONG is defined:\n#  if !defined(_LONGLONG) && (_CPPLIB_VER <= 310)\n#     define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#  endif\n// 3.06 appears to have (non-sgi versions of) <hash_set> & <hash_map>,\n// and no <slist> at all\n#else\n#  define BOOST_MSVC_STD_ITERATOR 1\n#  define BOOST_NO_STD_ITERATOR\n#  define BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS\n#  define BOOST_NO_STD_ALLOCATOR\n#  define BOOST_NO_STDC_NAMESPACE\n#  define BOOST_NO_STD_USE_FACET\n#  define BOOST_NO_STD_OUTPUT_ITERATOR_ASSIGN\n#  define BOOST_HAS_MACRO_USE_FACET\n#  ifndef _CPPLIB_VER\n      // Updated Dinkum library defines this, and provides\n      // its own min and max definitions, as does MTA version.\n#     ifndef __MTA__ \n#        define BOOST_NO_STD_MIN_MAX\n#     endif\n#     define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#  endif\n#endif\n\n//\n// std extension namespace is stdext for vc7.1 and later, \n// the same applies to other compilers that sit on top\n// of vc7.1 (Intel and Comeau):\n//\n#if defined(_MSC_VER) && (_MSC_VER >= 1310) && !defined(__BORLANDC__)\n#  define BOOST_STD_EXTENSION_NAMESPACE stdext\n#endif\n\n\n#if (defined(_MSC_VER) && (_MSC_VER <= 1300) && !defined(__BORLANDC__)) || !defined(_CPPLIB_VER) || (_CPPLIB_VER < 306)\n   // if we're using a dinkum lib that's\n   // been configured for VC6/7 then there is\n   // no iterator traits (true even for icl)\n#  define BOOST_NO_STD_ITERATOR_TRAITS\n#endif\n\n#if defined(__ICL) && (__ICL < 800) && defined(_CPPLIB_VER) && (_CPPLIB_VER <= 310)\n// Intel C++ chokes over any non-trivial use of <locale>\n// this may be an overly restrictive define, but regex fails without it:\n#  define BOOST_NO_STD_LOCALE\n#endif\n\n// Fix for VC++ 8.0 on up ( I do not have a previous version to test )\n// or clang-cl. If exceptions are off you must manually include the \n// <exception> header before including the <typeinfo> header. Admittedly \n// trying to use Boost libraries or the standard C++ libraries without \n// exception support is not suggested but currently clang-cl ( v 3.4 ) \n// does not support exceptions and must be compiled with exceptions off.\n#if !_HAS_EXCEPTIONS && ((defined(BOOST_MSVC) && BOOST_MSVC >= 1400) || (defined(__clang__) && defined(_MSC_VER)))\n#include <exception>\n#endif\n#include <typeinfo>\n#if ( (!_HAS_EXCEPTIONS && !defined(__ghs__)) || (!_HAS_NAMESPACE && defined(__ghs__)) ) && !defined(__TI_COMPILER_VERSION__) && !defined(__VISUALDSPVERSION__)\n#  define BOOST_NO_STD_TYPEINFO\n#endif  \n\n//  C++0x headers implemented in 520 (as shipped by Microsoft)\n//\n#if !defined(_CPPLIB_VER) || _CPPLIB_VER < 520\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_SMART_PTR\n#endif\n\n#if ((!defined(_HAS_TR1_IMPORTS) || (_HAS_TR1_IMPORTS+0 == 0)) && !defined(BOOST_NO_CXX11_HDR_TUPLE)) \\\n  && (!defined(_CPPLIB_VER) || _CPPLIB_VER < 610)\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#endif\n\n//  C++0x headers implemented in 540 (as shipped by Microsoft)\n//\n#if !defined(_CPPLIB_VER) || _CPPLIB_VER < 540\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#endif\n\n//  C++0x headers implemented in 610 (as shipped by Microsoft)\n//\n#if !defined(_CPPLIB_VER) || _CPPLIB_VER < 610\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_ALLOCATOR\n// 540 has std::align but it is not a conforming implementation\n#  define BOOST_NO_CXX11_STD_ALIGN\n#endif\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#elif !defined(_CPPLIB_VER) || (_CPPLIB_VER < 650)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#if defined(BOOST_INTEL) && (BOOST_INTEL <= 1400)\n// Intel's compiler can't handle this header yet:\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#endif\n\n\n//  520..610 have std::addressof, but it doesn't support functions\n//\n#if !defined(_CPPLIB_VER) || _CPPLIB_VER < 650\n#  define BOOST_NO_CXX11_ADDRESSOF\n#endif\n\n// Bug specific to VC14, \n// See https://connect.microsoft.com/VisualStudio/feedback/details/1348277/link-error-when-using-std-codecvt-utf8-utf16-char16-t\n// and discussion here: http://blogs.msdn.com/b/vcblog/archive/2014/11/12/visual-studio-2015-preview-now-available.aspx?PageIndex=2\n#if defined(_CPPLIB_VER) && (_CPPLIB_VER == 650)\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#endif\n\n#if defined(_CPPLIB_VER) && (_CPPLIB_VER >= 650)\n// If _HAS_AUTO_PTR_ETC is defined to 0, std::auto_ptr is not available.\n// See https://www.visualstudio.com/en-us/news/vs2015-vs.aspx#C++\n// and http://blogs.msdn.com/b/vcblog/archive/2015/06/19/c-11-14-17-features-in-vs-2015-rtm.aspx\n#  if defined(_HAS_AUTO_PTR_ETC) && (_HAS_AUTO_PTR_ETC == 0)\n#    define BOOST_NO_AUTO_PTR\n#  endif\n#endif\n\n#ifdef _CPPLIB_VER\n#  define BOOST_DINKUMWARE_STDLIB _CPPLIB_VER\n#else\n#  define BOOST_DINKUMWARE_STDLIB 1\n#endif\n\n#ifdef _CPPLIB_VER\n#  define BOOST_STDLIB \"Dinkumware standard library version \" BOOST_STRINGIZE(_CPPLIB_VER)\n#else\n#  define BOOST_STDLIB \"Dinkumware standard library version 1.x\"\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/libcomo.hpp",
    "content": "//  (C) Copyright John Maddock 2002 - 2003. \n//  (C) Copyright Jens Maurer 2002 - 2003. \n//  (C) Copyright Beman Dawes 2002 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Comeau STL:\n\n#if !defined(__LIBCOMO__)\n#  include <boost/config/no_tr1/utility.hpp>\n#  if !defined(__LIBCOMO__)\n#      error \"This is not the Comeau STL!\"\n#  endif\n#endif\n\n//\n// std::streambuf<wchar_t> is non-standard\n// NOTE: versions of libcomo prior to beta28 have octal version numbering,\n// e.g. version 25 is 21 (dec)\n#if __LIBCOMO_VERSION__ <= 22\n#  define BOOST_NO_STD_WSTREAMBUF\n#endif\n\n#if (__LIBCOMO_VERSION__ <= 31) && defined(_WIN32)\n#define BOOST_NO_SWPRINTF\n#endif\n\n#if __LIBCOMO_VERSION__ >= 31\n#  define BOOST_HAS_HASH\n#  define BOOST_HAS_SLIST\n#endif\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n//\n// Intrinsic type_traits support.\n// The SGI STL has it's own __type_traits class, which\n// has intrinsic compiler support with SGI's compilers.\n// Whatever map SGI style type traits to boost equivalents:\n//\n#define BOOST_HAS_SGI_TYPE_TRAITS\n\n#define BOOST_STDLIB \"Comeau standard library \" BOOST_STRINGIZE(__LIBCOMO_VERSION__)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/libcpp.hpp",
    "content": "//  (C) Copyright Christopher Jefferson 2011.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  config for libc++\n//  Might need more in here later.\n\n#if !defined(_LIBCPP_VERSION)\n#  include <ciso646>\n#  if !defined(_LIBCPP_VERSION)\n#      error \"This is not libc++!\"\n#  endif\n#endif\n\n#define BOOST_STDLIB \"libc++ version \" BOOST_STRINGIZE(_LIBCPP_VERSION)\n\n#define BOOST_HAS_THREADS\n\n#ifdef _LIBCPP_HAS_NO_VARIADICS\n#    define BOOST_NO_CXX11_HDR_TUPLE\n#endif\n\n// BOOST_NO_CXX11_ALLOCATOR should imply no support for the C++11\n// allocator model. The C++11 allocator model requires a conforming\n// std::allocator_traits which is only possible with C++11 template\n// aliases since members rebind_alloc and rebind_traits require it.\n#if defined(_LIBCPP_HAS_NO_TEMPLATE_ALIASES)\n#    define BOOST_NO_CXX11_ALLOCATOR\n#endif\n\n#if __cplusplus < 201103\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n#endif\n\n//\n// These appear to be unusable/incomplete so far:\n//\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n\n// libc++ uses a non-standard messages_base\n#define BOOST_NO_STD_MESSAGES\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus <= 201103\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n//  --- end ---\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/libstdcpp3.hpp",
    "content": "//  (C) Copyright John Maddock 2001.\n//  (C) Copyright Jens Maurer 2001.\n//  Use, modification and distribution are subject to the\n//  Boost Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  config for libstdc++ v3\n//  not much to go in here:\n\n#define BOOST_GNU_STDLIB 1\n\n#ifdef __GLIBCXX__\n#define BOOST_STDLIB \"GNU libstdc++ version \" BOOST_STRINGIZE(__GLIBCXX__)\n#else\n#define BOOST_STDLIB \"GNU libstdc++ version \" BOOST_STRINGIZE(__GLIBCPP__)\n#endif\n\n#if !defined(_GLIBCPP_USE_WCHAR_T) && !defined(_GLIBCXX_USE_WCHAR_T)\n#  define BOOST_NO_CWCHAR\n#  define BOOST_NO_CWCTYPE\n#  define BOOST_NO_STD_WSTRING\n#  define BOOST_NO_STD_WSTREAMBUF\n#endif\n\n#if defined(__osf__) && !defined(_REENTRANT) \\\n  && ( defined(_GLIBCXX_HAVE_GTHR_DEFAULT) || defined(_GLIBCPP_HAVE_GTHR_DEFAULT) )\n// GCC 3 on Tru64 forces the definition of _REENTRANT when any std lib header\n// file is included, therefore for consistency we define it here as well.\n#  define _REENTRANT\n#endif\n\n#ifdef __GLIBCXX__ // gcc 3.4 and greater:\n#  if defined(_GLIBCXX_HAVE_GTHR_DEFAULT) \\\n        || defined(_GLIBCXX__PTHREADS) \\\n        || defined(_GLIBCXX_HAS_GTHREADS) \\\n        || defined(_WIN32) \\\n        || defined(_AIX) \\\n        || defined(__HAIKU__)\n      //\n      // If the std lib has thread support turned on, then turn it on in Boost\n      // as well.  We do this because some gcc-3.4 std lib headers define _REENTANT\n      // while others do not...\n      //\n#     define BOOST_HAS_THREADS\n#  else\n#     define BOOST_DISABLE_THREADS\n#  endif\n#elif defined(__GLIBCPP__) \\\n        && !defined(_GLIBCPP_HAVE_GTHR_DEFAULT) \\\n        && !defined(_GLIBCPP__PTHREADS)\n   // disable thread support if the std lib was built single threaded:\n#  define BOOST_DISABLE_THREADS\n#endif\n\n#if (defined(linux) || defined(__linux) || defined(__linux__)) && defined(__arm__) && defined(_GLIBCPP_HAVE_GTHR_DEFAULT)\n// linux on arm apparently doesn't define _REENTRANT\n// so just turn on threading support whenever the std lib is thread safe:\n#  define BOOST_HAS_THREADS\n#endif\n\n#if !defined(_GLIBCPP_USE_LONG_LONG) \\\n    && !defined(_GLIBCXX_USE_LONG_LONG)\\\n    && defined(BOOST_HAS_LONG_LONG)\n// May have been set by compiler/*.hpp, but \"long long\" without library\n// support is useless.\n#  undef BOOST_HAS_LONG_LONG\n#endif\n\n// Apple doesn't seem to reliably defined a *unix* macro\n#if !defined(CYGWIN) && (  defined(__unix__)  \\\n                        || defined(__unix)    \\\n                        || defined(unix)      \\\n                        || defined(__APPLE__) \\\n                        || defined(__APPLE)   \\\n                        || defined(APPLE))\n#  include <unistd.h>\n#endif\n\n#if defined(__GLIBCXX__) || (defined(__GLIBCPP__) && __GLIBCPP__>=20020514) // GCC >= 3.1.0\n#  define BOOST_STD_EXTENSION_NAMESPACE __gnu_cxx\n#  define BOOST_HAS_SLIST\n#  define BOOST_HAS_HASH\n#  define BOOST_SLIST_HEADER <ext/slist>\n# if !defined(__GNUC__) || __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)\n#   define BOOST_HASH_SET_HEADER <ext/hash_set>\n#   define BOOST_HASH_MAP_HEADER <ext/hash_map>\n# else\n#   define BOOST_HASH_SET_HEADER <backward/hash_set>\n#   define BOOST_HASH_MAP_HEADER <backward/hash_map>\n# endif\n#endif\n\n//\n// Decide whether we have C++11 support turned on:\n//\n#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103)\n#  define BOOST_LIBSTDCXX11\n#endif\n//\n//  Decide which version of libstdc++ we have, normally\n//  stdlibc++ C++0x support is detected via __GNUC__, __GNUC_MINOR__, and possibly\n//  __GNUC_PATCHLEVEL__ at the suggestion of Jonathan Wakely, one of the stdlibc++\n//  developers. He also commented:\n//\n//       \"I'm not sure how useful __GLIBCXX__ is for your purposes, for instance in\n//       GCC 4.2.4 it is set to 20080519 but in GCC 4.3.0 it is set to 20080305.\n//       Although 4.3.0 was released earlier than 4.2.4, it has better C++0x support\n//       than any release in the 4.2 series.\"\n//\n//  Another resource for understanding stdlibc++ features is:\n//  http://gcc.gnu.org/onlinedocs/libstdc++/manual/status.html#manual.intro.status.standard.200x\n//\n//  However, using the GCC version number fails when the compiler is clang since this\n//  only ever claims to emulate GCC-4.2, see https://svn.boost.org/trac/boost/ticket/7473\n//  for a long discussion on this issue.  What we can do though is use clang's __has_include\n//  to detect the presence of a C++11 header that was introduced with a specific GCC release.\n//  We still have to be careful though as many such headers were buggy and/or incomplete when\n//  first introduced, so we only check for headers that were fully featured from day 1, and then\n//  use that to infer the underlying GCC version:\n//\n#ifdef __clang__\n\n#if __has_include(<experimental/any>)\n#  define BOOST_LIBSTDCXX_VERSION 50100\n#elif __has_include(<shared_mutex>)\n#  define BOOST_LIBSTDCXX_VERSION 40900\n#elif __has_include(<ext/cmath>)\n#  define BOOST_LIBSTDCXX_VERSION 40800\n#elif __has_include(<scoped_allocator>)\n#  define BOOST_LIBSTDCXX_VERSION 40700\n#elif __has_include(<typeindex>)\n#  define BOOST_LIBSTDCXX_VERSION 40600\n#elif __has_include(<future>)\n#  define BOOST_LIBSTDCXX_VERSION 40500\n#elif  __has_include(<ratio>)\n#  define BOOST_LIBSTDCXX_VERSION 40400\n#elif __has_include(<array>)\n#  define BOOST_LIBSTDCXX_VERSION 40300\n#endif\n//\n//  GCC 4.8 and 9 add working versions of <atomic> and <regex> respectively.\n//  However, we have no test for these as the headers were present but broken\n//  in early GCC versions.\n//\n#endif\n\n#if defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x5130) && (__cplusplus >= 201103L)\n//\n// Oracle Solaris compiler uses it's own verison of libstdc++ but doesn't \n// set __GNUC__\n//\n#define BOOST_LIBSTDCXX_VERSION 40800\n#endif\n\n#if !defined(BOOST_LIBSTDCXX_VERSION)\n#  define BOOST_LIBSTDCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)\n#endif\n\n//  C++0x headers in GCC 4.3.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40300) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#endif\n\n//  C++0x headers in GCC 4.4.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40400) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_SMART_PTR\n#else\n#  define BOOST_HAS_TR1_COMPLEX_INVERSE_TRIG \n#  define BOOST_HAS_TR1_COMPLEX_OVERLOADS \n#endif\n\n//  C++0x features in GCC 4.5.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40500) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#endif\n\n//  C++0x features in GCC 4.6.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40600) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_ADDRESSOF\n#endif\n\n//  C++0x features in GCC 4.7.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40700) || !defined(BOOST_LIBSTDCXX11)\n// Note that although <chrono> existed prior to 4.7, \"steady_clock\" is spelled \"monotonic_clock\"\n// so 4.7.0 is the first truely conforming one.\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_ALLOCATOR\n#endif\n//  C++0x features in GCC 4.8.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40800) || !defined(BOOST_LIBSTDCXX11)\n// Note that although <atomic> existed prior to gcc 4.8 it was largely unimplemented for many types:\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_HDR_THREAD\n#endif\n//  C++0x features in GCC 4.9.0 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 40900) || !defined(BOOST_LIBSTDCXX11)\n// Although <regex> is present and compilable against, the actual implementation is not functional\n// even for the simplest patterns such as \"\\d\" or \"[0-9]\". This is the case at least in gcc up to 4.8, inclusively.\n#  define BOOST_NO_CXX11_HDR_REGEX\n#endif\n\n#if defined(__clang_major__) && ((__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 7)))\n// As of clang-3.6, libstdc++ header <atomic> throws up errors with clang:\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#endif\n//\n//  C++0x features in GCC 5.1 and later\n//\n#if (BOOST_LIBSTDCXX_VERSION < 50100) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_STD_ALIGN\n#endif\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus <= 201103\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#elif __cplusplus < 201402 || (BOOST_LIBSTDCXX_VERSION < 40900) || !defined(BOOST_LIBSTDCXX11)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n//\n// Headers not present on Solaris with the Oracle compiler:\n#if defined(__SUNPRO_CC)\n#define BOOST_NO_CXX11_HDR_FUTURE\n#define BOOST_NO_CXX11_HDR_FORWARD_LIST \n#define BOOST_NO_CXX11_HDR_ATOMIC\n// shared_ptr is present, but is not convertible to bool\n// which causes all kinds of problems especially in Boost.Thread\n// but probably elsewhere as well.\n#define BOOST_NO_CXX11_SMART_PTR\n#endif\n\n#if (!defined(_GLIBCXX_HAS_GTHREADS) || !defined(_GLIBCXX_USE_C99_STDINT_TR1))\n   // Headers not always available:\n#  ifndef BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#     define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  endif\n#  ifndef BOOST_NO_CXX11_HDR_MUTEX\n#     define BOOST_NO_CXX11_HDR_MUTEX\n#  endif\n#  ifndef BOOST_NO_CXX11_HDR_THREAD\n#     define BOOST_NO_CXX11_HDR_THREAD\n#  endif\n#  ifndef BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#     define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#  endif\n#endif\n\n#if (!defined(_GTHREAD_USE_MUTEX_TIMEDLOCK) || (_GTHREAD_USE_MUTEX_TIMEDLOCK == 0)) && !defined(BOOST_NO_CXX11_HDR_MUTEX)\n// Timed mutexes are not always available:\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#endif\n\n//  --- end ---\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/modena.hpp",
    "content": "//  (C) Copyright Jens Maurer 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Modena C++ standard library (comes with KAI C++)\n\n#if !defined(MSIPL_COMPILE_H)\n#  include <boost/config/no_tr1/utility.hpp>\n#  if !defined(__MSIPL_COMPILE_H)\n#      error \"This is not the Modena C++ library!\"\n#  endif\n#endif\n\n#ifndef MSIPL_NL_TYPES\n#define BOOST_NO_STD_MESSAGES\n#endif\n\n#ifndef MSIPL_WCHART\n#define BOOST_NO_STD_WSTRING\n#endif\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#define BOOST_STDLIB \"Modena C++ standard library\"\n\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/msl.hpp",
    "content": "//  (C) Copyright John Maddock 2001. \n//  (C) Copyright Darin Adler 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Metrowerks standard library:\n\n#ifndef __MSL_CPP__\n#  include <boost/config/no_tr1/utility.hpp>\n#  ifndef __MSL_CPP__\n#     error This is not the MSL standard library!\n#  endif\n#endif\n\n#if __MSL_CPP__ >= 0x6000  // Pro 6\n#  define BOOST_HAS_HASH\n#  define BOOST_STD_EXTENSION_NAMESPACE Metrowerks\n#endif\n#define BOOST_HAS_SLIST\n\n#if __MSL_CPP__ < 0x6209\n#  define BOOST_NO_STD_MESSAGES\n#endif\n\n// check C lib version for <stdint.h>\n#include <cstddef>\n\n#if defined(__MSL__) && (__MSL__ >= 0x5000)\n#  define BOOST_HAS_STDINT_H\n#  if !defined(__PALMOS_TRAPS__)\n#    define BOOST_HAS_UNISTD_H\n#  endif\n   // boilerplate code:\n#  include <boost/config/posix_features.hpp>\n#endif\n\n#if defined(_MWMT) || _MSL_THREADSAFE\n#  define BOOST_HAS_THREADS\n#endif\n\n#ifdef _MSL_NO_EXPLICIT_FUNC_TEMPLATE_ARG\n#  define BOOST_NO_STD_USE_FACET\n#  define BOOST_HAS_TWO_ARG_USE_FACET\n#endif\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#define BOOST_STDLIB \"Metrowerks Standard Library version \" BOOST_STRINGIZE(__MSL_CPP__)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/roguewave.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Jens Maurer 2001. \n//  (C) Copyright David Abrahams 2003. \n//  (C) Copyright Boris Gubenko 2007. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  Rogue Wave std lib:\n\n#define BOOST_RW_STDLIB 1 \n\n#if !defined(__STD_RWCOMPILER_H__) && !defined(_RWSTD_VER)\n#  include <boost/config/no_tr1/utility.hpp>\n#  if !defined(__STD_RWCOMPILER_H__) && !defined(_RWSTD_VER)\n#     error This is not the Rogue Wave standard library\n#  endif\n#endif\n//\n// figure out a consistent version number:\n//\n#ifndef _RWSTD_VER\n#  define BOOST_RWSTD_VER 0x010000\n#elif _RWSTD_VER < 0x010000\n#  define BOOST_RWSTD_VER (_RWSTD_VER << 8)\n#else\n#  define BOOST_RWSTD_VER _RWSTD_VER\n#endif\n\n#ifndef _RWSTD_VER\n#  define BOOST_STDLIB \"Rogue Wave standard library version (Unknown version)\"\n#elif _RWSTD_VER < 0x04010200\n #  define BOOST_STDLIB \"Rogue Wave standard library version \" BOOST_STRINGIZE(_RWSTD_VER)\n#else\n#  ifdef _RWSTD_VER_STR\n#    define BOOST_STDLIB \"Apache STDCXX standard library version \" _RWSTD_VER_STR\n#  else\n#    define BOOST_STDLIB \"Apache STDCXX standard library version \" BOOST_STRINGIZE(_RWSTD_VER)\n#  endif\n#endif\n\n//\n// Prior to version 2.2.0 the primary template for std::numeric_limits\n// does not have compile time constants, even though specializations of that\n// template do:\n//\n#if BOOST_RWSTD_VER < 0x020200\n#  define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#endif\n\n// Sun CC 5.5 patch 113817-07 adds long long specialization, but does not change the\n// library version number (http://sunsolve6.sun.com/search/document.do?assetkey=1-21-113817):\n#if BOOST_RWSTD_VER <= 0x020101 && (!defined(__SUNPRO_CC) || (__SUNPRO_CC < 0x550))\n#  define BOOST_NO_LONG_LONG_NUMERIC_LIMITS\n# endif\n\n//\n// Borland version of numeric_limits lacks __int64 specialisation:\n//\n#ifdef __BORLANDC__\n#  define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#endif\n\n//\n// No std::iterator if it can't figure out default template args:\n//\n#if defined(_RWSTD_NO_SIMPLE_DEFAULT_TEMPLATES) || defined(RWSTD_NO_SIMPLE_DEFAULT_TEMPLATES) || (BOOST_RWSTD_VER < 0x020000)\n#  define BOOST_NO_STD_ITERATOR\n#endif\n\n//\n// No iterator traits without partial specialization:\n//\n#if defined(_RWSTD_NO_CLASS_PARTIAL_SPEC) || defined(RWSTD_NO_CLASS_PARTIAL_SPEC)\n#  define BOOST_NO_STD_ITERATOR_TRAITS\n#endif\n\n//\n// Prior to version 2.0, std::auto_ptr was buggy, and there were no\n// new-style iostreams, and no conformant std::allocator:\n//\n#if (BOOST_RWSTD_VER < 0x020000)\n#  define BOOST_NO_AUTO_PTR\n#  define BOOST_NO_STRINGSTREAM\n#  define BOOST_NO_STD_ALLOCATOR\n#  define BOOST_NO_STD_LOCALE\n#endif\n\n//\n// No template iterator constructors without member template support:\n//\n#if defined(RWSTD_NO_MEMBER_TEMPLATES) || defined(_RWSTD_NO_MEMBER_TEMPLATES)\n#  define BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS\n#endif\n\n//\n// RW defines _RWSTD_ALLOCATOR if the allocator is conformant and in use\n// (the or _HPACC_ part is a hack - the library seems to define _RWSTD_ALLOCATOR\n// on HP aCC systems even though the allocator is in fact broken):\n//\n#if !defined(_RWSTD_ALLOCATOR) || (defined(__HP_aCC) && __HP_aCC <= 33100)\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n\n//\n// If we have a std::locale, we still may not have std::use_facet:\n//\n#if defined(_RWSTD_NO_TEMPLATE_ON_RETURN_TYPE) && !defined(BOOST_NO_STD_LOCALE)\n#  define BOOST_NO_STD_USE_FACET\n#  define BOOST_HAS_TWO_ARG_USE_FACET\n#endif\n\n//\n// There's no std::distance prior to version 2, or without\n// partial specialization support:\n//\n#if (BOOST_RWSTD_VER < 0x020000) || defined(_RWSTD_NO_CLASS_PARTIAL_SPEC)\n    #define BOOST_NO_STD_DISTANCE\n#endif\n\n//\n// Some versions of the rogue wave library don't have assignable\n// OutputIterators:\n//\n#if BOOST_RWSTD_VER < 0x020100\n#  define BOOST_NO_STD_OUTPUT_ITERATOR_ASSIGN\n#endif\n\n//\n// Disable BOOST_HAS_LONG_LONG when the library has no support for it.\n//\n#if !defined(_RWSTD_LONG_LONG) && defined(BOOST_HAS_LONG_LONG)\n#  undef BOOST_HAS_LONG_LONG\n#endif\n\n//\n// check that on HP-UX, the proper RW library is used\n//\n#if defined(__HP_aCC) && !defined(_HP_NAMESPACE_STD)\n#  error \"Boost requires Standard RW library. Please compile and link with -AA\"\n#endif\n\n//\n// Define macros specific to RW V2.2 on HP-UX\n//\n#if defined(__HP_aCC) && (BOOST_RWSTD_VER == 0x02020100)\n#  ifndef __HP_TC1_MAKE_PAIR\n#    define __HP_TC1_MAKE_PAIR\n#  endif\n#  ifndef _HP_INSTANTIATE_STD2_VL\n#    define _HP_INSTANTIATE_STD2_VL\n#  endif\n#endif\n\n#if _RWSTD_VER < 0x05000000\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#endif\n// type_traits header is incomplete:\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n//\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/sgi.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2003. \n//  (C) Copyright Darin Adler 2001. \n//  (C) Copyright Jens Maurer 2001 - 2003. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  generic SGI STL:\n\n#if !defined(__STL_CONFIG_H)\n#  include <boost/config/no_tr1/utility.hpp>\n#  if !defined(__STL_CONFIG_H)\n#      error \"This is not the SGI STL!\"\n#  endif\n#endif\n\n//\n// No std::iterator traits without partial specialisation:\n//\n#if !defined(__STL_CLASS_PARTIAL_SPECIALIZATION)\n#  define BOOST_NO_STD_ITERATOR_TRAITS\n#endif\n\n//\n// No std::stringstream with gcc < 3\n//\n#if defined(__GNUC__) && (__GNUC__ < 3) && \\\n     ((__GNUC_MINOR__ < 95) || (__GNUC_MINOR__ == 96)) && \\\n     !defined(__STL_USE_NEW_IOSTREAMS) || \\\n   defined(__APPLE_CC__)\n   // Note that we only set this for GNU C++ prior to 2.95 since the\n   // latest patches for that release do contain a minimal <sstream>\n   // If you are running a 2.95 release prior to 2.95.3 then this will need\n   // setting, but there is no way to detect that automatically (other\n   // than by running the configure script).\n   // Also, the unofficial GNU C++ 2.96 included in RedHat 7.1 doesn't\n   // have <sstream>.\n#  define BOOST_NO_STRINGSTREAM\n#endif\n\n// Apple doesn't seem to reliably defined a *unix* macro\n#if !defined(CYGWIN) && (  defined(__unix__)  \\\n                        || defined(__unix)    \\\n                        || defined(unix)      \\\n                        || defined(__APPLE__) \\\n                        || defined(__APPLE)   \\\n                        || defined(APPLE))\n#  include <unistd.h>\n#endif\n\n\n//\n// Assume no std::locale without own iostreams (this may be an\n// incorrect assumption in some cases):\n//\n#if !defined(__SGI_STL_OWN_IOSTREAMS) && !defined(__STL_USE_NEW_IOSTREAMS)\n#  define BOOST_NO_STD_LOCALE\n#endif\n\n//\n// Original native SGI streams have non-standard std::messages facet:\n//\n#if defined(__sgi) && (_COMPILER_VERSION <= 650) && !defined(__SGI_STL_OWN_IOSTREAMS)\n#  define BOOST_NO_STD_LOCALE\n#endif\n\n//\n// SGI's new iostreams have missing \"const\" in messages<>::open\n//\n#if defined(__sgi) && (_COMPILER_VERSION <= 740) && defined(__STL_USE_NEW_IOSTREAMS)\n#  define BOOST_NO_STD_MESSAGES\n#endif\n\n//\n// No template iterator constructors, or std::allocator\n// without member templates:\n//\n#if !defined(__STL_MEMBER_TEMPLATES)\n#  define BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n\n//\n// We always have SGI style hash_set, hash_map, and slist:\n//\n#define BOOST_HAS_HASH\n#define BOOST_HAS_SLIST\n\n//\n// If this is GNU libstdc++2, then no <limits> and no std::wstring:\n//\n#if (defined(__GNUC__) && (__GNUC__ < 3))\n#  include <string>\n#  if defined(__BASTRING__)\n#     define BOOST_NO_LIMITS\n// Note: <boost/limits.hpp> will provide compile-time constants\n#     undef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#     define BOOST_NO_STD_WSTRING\n#  endif\n#endif\n\n//\n// There is no standard iterator unless we have namespace support:\n//\n#if !defined(__STL_USE_NAMESPACES)\n#  define BOOST_NO_STD_ITERATOR\n#endif\n\n//\n// Intrinsic type_traits support.\n// The SGI STL has it's own __type_traits class, which\n// has intrinsic compiler support with SGI's compilers.\n// Whatever map SGI style type traits to boost equivalents:\n//\n#define BOOST_HAS_SGI_TYPE_TRAITS\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#define BOOST_STDLIB \"SGI standard library\""
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/stlport.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002. \n//  (C) Copyright Darin Adler 2001. \n//  (C) Copyright Jens Maurer 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n//  STLPort standard library config:\n\n#if !defined(__SGI_STL_PORT) && !defined(_STLPORT_VERSION)\n#  include <cstddef>\n#  if !defined(__SGI_STL_PORT) && !defined(_STLPORT_VERSION)\n#      error \"This is not STLPort!\"\n#  endif\n#endif\n\n// Apple doesn't seem to reliably defined a *unix* macro\n#if !defined(CYGWIN) && (  defined(__unix__)  \\\n                        || defined(__unix)    \\\n                        || defined(unix)      \\\n                        || defined(__APPLE__) \\\n                        || defined(__APPLE)   \\\n                        || defined(APPLE))\n#  include <unistd.h>\n#endif\n\n//\n// __STL_STATIC_CONST_INIT_BUG implies BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n// for versions prior to 4.1(beta)\n//\n#if (defined(__STL_STATIC_CONST_INIT_BUG) || defined(_STLP_STATIC_CONST_INIT_BUG)) && (__SGI_STL_PORT <= 0x400)\n#  define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#endif\n\n//\n// If STLport thinks that there is no partial specialisation, then there is no\n// std::iterator traits:\n//\n#if !(defined(_STLP_CLASS_PARTIAL_SPECIALIZATION) || defined(__STL_CLASS_PARTIAL_SPECIALIZATION))\n#  define BOOST_NO_STD_ITERATOR_TRAITS\n#endif\n\n//\n// No new style iostreams on GCC without STLport's iostreams enabled:\n//\n#if (defined(__GNUC__) && (__GNUC__ < 3)) && !(defined(__SGI_STL_OWN_IOSTREAMS) || defined(_STLP_OWN_IOSTREAMS))\n#  define BOOST_NO_STRINGSTREAM\n#endif\n\n//\n// No new iostreams implies no std::locale, and no std::stringstream:\n//\n#if defined(__STL_NO_IOSTREAMS) || defined(__STL_NO_NEW_IOSTREAMS) || defined(_STLP_NO_IOSTREAMS) || defined(_STLP_NO_NEW_IOSTREAMS)\n#  define BOOST_NO_STD_LOCALE\n#  define BOOST_NO_STRINGSTREAM\n#endif\n\n//\n// If the streams are not native, and we have a \"using ::x\" compiler bug\n// then the io stream facets are not available in namespace std::\n//\n#ifdef _STLPORT_VERSION\n#  if !(_STLPORT_VERSION >= 0x500) && !defined(_STLP_OWN_IOSTREAMS) && defined(_STLP_USE_NAMESPACES) && defined(BOOST_NO_USING_TEMPLATE) && !defined(__BORLANDC__)\n#     define BOOST_NO_STD_LOCALE\n#  endif\n#else\n#  if !defined(__SGI_STL_OWN_IOSTREAMS) && defined(__STL_USE_NAMESPACES) && defined(BOOST_NO_USING_TEMPLATE) && !defined(__BORLANDC__)\n#     define BOOST_NO_STD_LOCALE\n#  endif\n#endif\n\n#if defined(_STLPORT_VERSION) && (_STLPORT_VERSION >= 0x520)\n#  define BOOST_HAS_TR1_UNORDERED_SET\n#  define BOOST_HAS_TR1_UNORDERED_MAP\n#endif\n//\n// Without member template support enabled, their are no template\n// iterate constructors, and no std::allocator:\n//\n#if !(defined(__STL_MEMBER_TEMPLATES) || defined(_STLP_MEMBER_TEMPLATES))\n#  define BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n//\n// however we always have at least a partial allocator:\n//\n#define BOOST_HAS_PARTIAL_STD_ALLOCATOR\n\n#if !defined(_STLP_MEMBER_TEMPLATE_CLASSES) || defined(_STLP_DONT_SUPPORT_REBIND_MEMBER_TEMPLATE)\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n\n#if defined(_STLP_NO_MEMBER_TEMPLATE_KEYWORD) && defined(BOOST_MSVC) && (BOOST_MSVC <= 1300)\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n\n//\n// If STLport thinks there is no wchar_t at all, then we have to disable\n// the support for the relevant specilazations of std:: templates.\n//\n#if !defined(_STLP_HAS_WCHAR_T) && !defined(_STLP_WCHAR_T_IS_USHORT)\n#  ifndef  BOOST_NO_STD_WSTRING\n#     define BOOST_NO_STD_WSTRING\n#  endif\n#  ifndef  BOOST_NO_STD_WSTREAMBUF\n#     define BOOST_NO_STD_WSTREAMBUF\n#  endif\n#endif\n\n//\n// We always have SGI style hash_set, hash_map, and slist:\n//\n#ifndef _STLP_NO_EXTENSIONS\n#define BOOST_HAS_HASH\n#define BOOST_HAS_SLIST\n#endif\n\n//\n// STLport does a good job of importing names into namespace std::,\n// but doesn't always get them all, define BOOST_NO_STDC_NAMESPACE, since our\n// workaround does not conflict with STLports:\n//\n//\n// Harold Howe says:\n// Borland switched to STLport in BCB6. Defining BOOST_NO_STDC_NAMESPACE with\n// BCB6 does cause problems. If we detect C++ Builder, then don't define \n// BOOST_NO_STDC_NAMESPACE\n//\n#if !defined(__BORLANDC__) && !defined(__DMC__)\n//\n// If STLport is using it's own namespace, and the real names are in\n// the global namespace, then we duplicate STLport's using declarations\n// (by defining BOOST_NO_STDC_NAMESPACE), we do this because STLport doesn't\n// necessarily import all the names we need into namespace std::\n// \n#  if (defined(__STL_IMPORT_VENDOR_CSTD) \\\n         || defined(__STL_USE_OWN_NAMESPACE) \\\n         || defined(_STLP_IMPORT_VENDOR_CSTD) \\\n         || defined(_STLP_USE_OWN_NAMESPACE)) \\\n      && (defined(__STL_VENDOR_GLOBAL_CSTD) || defined (_STLP_VENDOR_GLOBAL_CSTD))\n#     define BOOST_NO_STDC_NAMESPACE\n#     define BOOST_NO_EXCEPTION_STD_NAMESPACE\n#  endif\n#elif defined(__BORLANDC__) && __BORLANDC__ < 0x560\n// STLport doesn't import std::abs correctly:\n#include <stdlib.h>\nnamespace std { using ::abs; }\n// and strcmp/strcpy don't get imported either ('cos they are macros)\n#include <string.h>\n#ifdef strcpy\n#  undef strcpy\n#endif\n#ifdef strcmp\n#  undef strcmp\n#endif\n#ifdef _STLP_VENDOR_CSTD\nnamespace std{ using _STLP_VENDOR_CSTD::strcmp; using _STLP_VENDOR_CSTD::strcpy; }\n#endif\n#endif\n\n//\n// std::use_facet may be non-standard, uses a class instead:\n//\n#if defined(__STL_NO_EXPLICIT_FUNCTION_TMPL_ARGS) || defined(_STLP_NO_EXPLICIT_FUNCTION_TMPL_ARGS)\n#  define BOOST_NO_STD_USE_FACET\n#  define BOOST_HAS_STLP_USE_FACET\n#endif\n\n//\n// If STLport thinks there are no wide functions, <cwchar> etc. is not working; but\n// only if BOOST_NO_STDC_NAMESPACE is not defined (if it is then we do the import \n// into std:: ourselves).\n//\n#if defined(_STLP_NO_NATIVE_WIDE_FUNCTIONS) && !defined(BOOST_NO_STDC_NAMESPACE)\n#  define BOOST_NO_CWCHAR\n#  define BOOST_NO_CWCTYPE\n#endif\n\n//\n// If STLport for some reason was configured so that it thinks that wchar_t\n// is not an intrinsic type, then we have to disable the support for it as\n// well (we would be missing required specializations otherwise).\n//\n#if !defined( _STLP_HAS_WCHAR_T) || defined(_STLP_WCHAR_T_IS_USHORT)\n#  undef  BOOST_NO_INTRINSIC_WCHAR_T\n#  define BOOST_NO_INTRINSIC_WCHAR_T\n#endif\n\n//\n// Borland ships a version of STLport with C++ Builder 6 that lacks\n// hashtables and the like:\n//\n#if defined(__BORLANDC__) && (__BORLANDC__ == 0x560)\n#  undef BOOST_HAS_HASH\n#endif\n\n//\n// gcc-2.95.3/STLPort does not like the using declarations we use to get ADL with std::min/max\n//\n#if defined(__GNUC__) && (__GNUC__ < 3)\n#  include <algorithm> // for std::min and std::max\n#  define BOOST_USING_STD_MIN() ((void)0)\n#  define BOOST_USING_STD_MAX() ((void)0)\nnamespace boost { using std::min; using std::max; }\n#endif\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#define BOOST_STDLIB \"STLPort standard library version \" BOOST_STRINGIZE(__SGI_STL_PORT)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/stdlib/vacpp.hpp",
    "content": "//  (C) Copyright John Maddock 2001 - 2002. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org for most recent version.\n\n#if __IBMCPP__ <= 501\n#  define BOOST_NO_STD_ALLOCATOR\n#endif\n\n#define BOOST_HAS_MACRO_USE_FACET\n#define BOOST_NO_STD_MESSAGES\n\n// Apple doesn't seem to reliably defined a *unix* macro\n#if !defined(CYGWIN) && (  defined(__unix__)  \\\n                        || defined(__unix)    \\\n                        || defined(unix)      \\\n                        || defined(__APPLE__) \\\n                        || defined(__APPLE)   \\\n                        || defined(APPLE))\n#  include <unistd.h>\n#endif\n\n//  C++0x headers not yet implemented\n//\n#  define BOOST_NO_CXX11_HDR_ARRAY\n#  define BOOST_NO_CXX11_HDR_CHRONO\n#  define BOOST_NO_CXX11_HDR_CODECVT\n#  define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE\n#  define BOOST_NO_CXX11_HDR_FORWARD_LIST\n#  define BOOST_NO_CXX11_HDR_FUTURE\n#  define BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n#  define BOOST_NO_CXX11_HDR_MUTEX\n#  define BOOST_NO_CXX11_HDR_RANDOM\n#  define BOOST_NO_CXX11_HDR_RATIO\n#  define BOOST_NO_CXX11_HDR_REGEX\n#  define BOOST_NO_CXX11_HDR_SYSTEM_ERROR\n#  define BOOST_NO_CXX11_HDR_THREAD\n#  define BOOST_NO_CXX11_HDR_TUPLE\n#  define BOOST_NO_CXX11_HDR_TYPE_TRAITS\n#  define BOOST_NO_CXX11_HDR_TYPEINDEX\n#  define BOOST_NO_CXX11_HDR_UNORDERED_MAP\n#  define BOOST_NO_CXX11_HDR_UNORDERED_SET\n#  define BOOST_NO_CXX11_NUMERIC_LIMITS\n#  define BOOST_NO_CXX11_ALLOCATOR\n#  define BOOST_NO_CXX11_ATOMIC_SMART_PTR\n#  define BOOST_NO_CXX11_SMART_PTR\n#  define BOOST_NO_CXX11_HDR_FUNCTIONAL\n#  define BOOST_NO_CXX11_HDR_ATOMIC\n#  define BOOST_NO_CXX11_STD_ALIGN\n#  define BOOST_NO_CXX11_ADDRESSOF\n\n#if defined(__has_include)\n#if !__has_include(<shared_mutex>)\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#elif __cplusplus < 201402\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n#else\n#  define BOOST_NO_CXX14_HDR_SHARED_MUTEX\n#endif\n\n#define BOOST_STDLIB \"Visual Age default standard library\"\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/suffix.hpp",
    "content": "//  Boost config.hpp configuration header file  ------------------------------//\n//  boostinspect:ndprecated_macros -- tell the inspect tool to ignore this file\n\n//  Copyright (c) 2001-2003 John Maddock\n//  Copyright (c) 2001 Darin Adler\n//  Copyright (c) 2001 Peter Dimov\n//  Copyright (c) 2002 Bill Kempf\n//  Copyright (c) 2002 Jens Maurer\n//  Copyright (c) 2002-2003 David Abrahams\n//  Copyright (c) 2003 Gennaro Prota\n//  Copyright (c) 2003 Eric Friedman\n//  Copyright (c) 2010 Eric Jourdanneau, Joel Falcou\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/ for most recent version.\n\n//  Boost config.hpp policy and rationale documentation has been moved to\n//  http://www.boost.org/libs/config/\n//\n//  This file is intended to be stable, and relatively unchanging.\n//  It should contain boilerplate code only - no compiler specific\n//  code unless it is unavoidable - no changes unless unavoidable.\n\n#ifndef BOOST_CONFIG_SUFFIX_HPP\n#define BOOST_CONFIG_SUFFIX_HPP\n\n#if defined(__GNUC__) && (__GNUC__ >= 4)\n//\n// Some GCC-4.x versions issue warnings even when __extension__ is used,\n// so use this as a workaround:\n//\n#pragma GCC system_header\n#endif\n\n//\n// ensure that visibility macros are always defined, thus symplifying use\n//\n#ifndef BOOST_SYMBOL_EXPORT\n# define BOOST_SYMBOL_EXPORT\n#endif\n#ifndef BOOST_SYMBOL_IMPORT\n# define BOOST_SYMBOL_IMPORT\n#endif\n#ifndef BOOST_SYMBOL_VISIBLE\n# define BOOST_SYMBOL_VISIBLE\n#endif\n\n//\n// look for long long by looking for the appropriate macros in <limits.h>.\n// Note that we use limits.h rather than climits for maximal portability,\n// remember that since these just declare a bunch of macros, there should be\n// no namespace issues from this.\n//\n#if !defined(BOOST_HAS_LONG_LONG) && !defined(BOOST_NO_LONG_LONG)                                              \\\n   && !defined(BOOST_MSVC) && !defined(__BORLANDC__)\n# include <limits.h>\n# if (defined(ULLONG_MAX) || defined(ULONG_LONG_MAX) || defined(ULONGLONG_MAX))\n#   define BOOST_HAS_LONG_LONG\n# else\n#   define BOOST_NO_LONG_LONG\n# endif\n#endif\n\n// GCC 3.x will clean up all of those nasty macro definitions that\n// BOOST_NO_CTYPE_FUNCTIONS is intended to help work around, so undefine\n// it under GCC 3.x.\n#if defined(__GNUC__) && (__GNUC__ >= 3) && defined(BOOST_NO_CTYPE_FUNCTIONS)\n#  undef BOOST_NO_CTYPE_FUNCTIONS\n#endif\n\n//\n// Assume any extensions are in namespace std:: unless stated otherwise:\n//\n#  ifndef BOOST_STD_EXTENSION_NAMESPACE\n#    define BOOST_STD_EXTENSION_NAMESPACE std\n#  endif\n\n//\n// If cv-qualified specializations are not allowed, then neither are cv-void ones:\n//\n#  if defined(BOOST_NO_CV_SPECIALIZATIONS) \\\n      && !defined(BOOST_NO_CV_VOID_SPECIALIZATIONS)\n#     define BOOST_NO_CV_VOID_SPECIALIZATIONS\n#  endif\n\n//\n// If there is no numeric_limits template, then it can't have any compile time\n// constants either!\n//\n#  if defined(BOOST_NO_LIMITS) \\\n      && !defined(BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS)\n#     define BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS\n#     define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#     define BOOST_NO_LONG_LONG_NUMERIC_LIMITS\n#  endif\n\n//\n// if there is no long long then there is no specialisation\n// for numeric_limits<long long> either:\n//\n#if !defined(BOOST_HAS_LONG_LONG) && !defined(BOOST_NO_LONG_LONG_NUMERIC_LIMITS)\n#  define BOOST_NO_LONG_LONG_NUMERIC_LIMITS\n#endif\n\n//\n// if there is no __int64 then there is no specialisation\n// for numeric_limits<__int64> either:\n//\n#if !defined(BOOST_HAS_MS_INT64) && !defined(BOOST_NO_MS_INT64_NUMERIC_LIMITS)\n#  define BOOST_NO_MS_INT64_NUMERIC_LIMITS\n#endif\n\n//\n// if member templates are supported then so is the\n// VC6 subset of member templates:\n//\n#  if !defined(BOOST_NO_MEMBER_TEMPLATES) \\\n       && !defined(BOOST_MSVC6_MEMBER_TEMPLATES)\n#     define BOOST_MSVC6_MEMBER_TEMPLATES\n#  endif\n\n//\n// Without partial specialization, can't test for partial specialisation bugs:\n//\n#  if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n      && !defined(BOOST_BCB_PARTIAL_SPECIALIZATION_BUG)\n#     define BOOST_BCB_PARTIAL_SPECIALIZATION_BUG\n#  endif\n\n//\n// Without partial specialization, we can't have array-type partial specialisations:\n//\n#  if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n      && !defined(BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS)\n#     define BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS\n#  endif\n\n//\n// Without partial specialization, std::iterator_traits can't work:\n//\n#  if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n      && !defined(BOOST_NO_STD_ITERATOR_TRAITS)\n#     define BOOST_NO_STD_ITERATOR_TRAITS\n#  endif\n\n//\n// Without partial specialization, partial\n// specialization with default args won't work either:\n//\n#  if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n      && !defined(BOOST_NO_PARTIAL_SPECIALIZATION_IMPLICIT_DEFAULT_ARGS)\n#     define BOOST_NO_PARTIAL_SPECIALIZATION_IMPLICIT_DEFAULT_ARGS\n#  endif\n\n//\n// Without member template support, we can't have template constructors\n// in the standard library either:\n//\n#  if defined(BOOST_NO_MEMBER_TEMPLATES) \\\n      && !defined(BOOST_MSVC6_MEMBER_TEMPLATES) \\\n      && !defined(BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS)\n#     define BOOST_NO_TEMPLATED_ITERATOR_CONSTRUCTORS\n#  endif\n\n//\n// Without member template support, we can't have a conforming\n// std::allocator template either:\n//\n#  if defined(BOOST_NO_MEMBER_TEMPLATES) \\\n      && !defined(BOOST_MSVC6_MEMBER_TEMPLATES) \\\n      && !defined(BOOST_NO_STD_ALLOCATOR)\n#     define BOOST_NO_STD_ALLOCATOR\n#  endif\n\n//\n// without ADL support then using declarations will break ADL as well:\n//\n#if defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) && !defined(BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL)\n#  define BOOST_FUNCTION_SCOPE_USING_DECLARATION_BREAKS_ADL\n#endif\n\n//\n// Without typeid support we have no dynamic RTTI either:\n//\n#if defined(BOOST_NO_TYPEID) && !defined(BOOST_NO_RTTI)\n#  define BOOST_NO_RTTI\n#endif\n\n//\n// If we have a standard allocator, then we have a partial one as well:\n//\n#if !defined(BOOST_NO_STD_ALLOCATOR)\n#  define BOOST_HAS_PARTIAL_STD_ALLOCATOR\n#endif\n\n//\n// We can't have a working std::use_facet if there is no std::locale:\n//\n#  if defined(BOOST_NO_STD_LOCALE) && !defined(BOOST_NO_STD_USE_FACET)\n#     define BOOST_NO_STD_USE_FACET\n#  endif\n\n//\n// We can't have a std::messages facet if there is no std::locale:\n//\n#  if defined(BOOST_NO_STD_LOCALE) && !defined(BOOST_NO_STD_MESSAGES)\n#     define BOOST_NO_STD_MESSAGES\n#  endif\n\n//\n// We can't have a working std::wstreambuf if there is no std::locale:\n//\n#  if defined(BOOST_NO_STD_LOCALE) && !defined(BOOST_NO_STD_WSTREAMBUF)\n#     define BOOST_NO_STD_WSTREAMBUF\n#  endif\n\n//\n// We can't have a <cwctype> if there is no <cwchar>:\n//\n#  if defined(BOOST_NO_CWCHAR) && !defined(BOOST_NO_CWCTYPE)\n#     define BOOST_NO_CWCTYPE\n#  endif\n\n//\n// We can't have a swprintf if there is no <cwchar>:\n//\n#  if defined(BOOST_NO_CWCHAR) && !defined(BOOST_NO_SWPRINTF)\n#     define BOOST_NO_SWPRINTF\n#  endif\n\n//\n// If Win32 support is turned off, then we must turn off\n// threading support also, unless there is some other\n// thread API enabled:\n//\n#if defined(BOOST_DISABLE_WIN32) && defined(_WIN32) \\\n   && !defined(BOOST_DISABLE_THREADS) && !defined(BOOST_HAS_PTHREADS)\n#  define BOOST_DISABLE_THREADS\n#endif\n\n//\n// Turn on threading support if the compiler thinks that it's in\n// multithreaded mode.  We put this here because there are only a\n// limited number of macros that identify this (if there's any missing\n// from here then add to the appropriate compiler section):\n//\n#if (defined(__MT__) || defined(_MT) || defined(_REENTRANT) \\\n    || defined(_PTHREADS) || defined(__APPLE__) || defined(__DragonFly__)) \\\n    && !defined(BOOST_HAS_THREADS)\n#  define BOOST_HAS_THREADS\n#endif\n\n//\n// Turn threading support off if BOOST_DISABLE_THREADS is defined:\n//\n#if defined(BOOST_DISABLE_THREADS) && defined(BOOST_HAS_THREADS)\n#  undef BOOST_HAS_THREADS\n#endif\n\n//\n// Turn threading support off if we don't recognise the threading API:\n//\n#if defined(BOOST_HAS_THREADS) && !defined(BOOST_HAS_PTHREADS)\\\n      && !defined(BOOST_HAS_WINTHREADS) && !defined(BOOST_HAS_BETHREADS)\\\n      && !defined(BOOST_HAS_MPTASKS)\n#  undef BOOST_HAS_THREADS\n#endif\n\n//\n// Turn threading detail macros off if we don't (want to) use threading\n//\n#ifndef BOOST_HAS_THREADS\n#  undef BOOST_HAS_PTHREADS\n#  undef BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE\n#  undef BOOST_HAS_PTHREAD_YIELD\n#  undef BOOST_HAS_PTHREAD_DELAY_NP\n#  undef BOOST_HAS_WINTHREADS\n#  undef BOOST_HAS_BETHREADS\n#  undef BOOST_HAS_MPTASKS\n#endif\n\n//\n// If the compiler claims to be C99 conformant, then it had better\n// have a <stdint.h>:\n//\n#  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)\n#     define BOOST_HAS_STDINT_H\n#     ifndef BOOST_HAS_LOG1P\n#        define BOOST_HAS_LOG1P\n#     endif\n#     ifndef BOOST_HAS_EXPM1\n#        define BOOST_HAS_EXPM1\n#     endif\n#  endif\n\n//\n// Define BOOST_NO_SLIST and BOOST_NO_HASH if required.\n// Note that this is for backwards compatibility only.\n//\n#  if !defined(BOOST_HAS_SLIST) && !defined(BOOST_NO_SLIST)\n#     define BOOST_NO_SLIST\n#  endif\n\n#  if !defined(BOOST_HAS_HASH) && !defined(BOOST_NO_HASH)\n#     define BOOST_NO_HASH\n#  endif\n\n//\n// Set BOOST_SLIST_HEADER if not set already:\n//\n#if defined(BOOST_HAS_SLIST) && !defined(BOOST_SLIST_HEADER)\n#  define BOOST_SLIST_HEADER <slist>\n#endif\n\n//\n// Set BOOST_HASH_SET_HEADER if not set already:\n//\n#if defined(BOOST_HAS_HASH) && !defined(BOOST_HASH_SET_HEADER)\n#  define BOOST_HASH_SET_HEADER <hash_set>\n#endif\n\n//\n// Set BOOST_HASH_MAP_HEADER if not set already:\n//\n#if defined(BOOST_HAS_HASH) && !defined(BOOST_HASH_MAP_HEADER)\n#  define BOOST_HASH_MAP_HEADER <hash_map>\n#endif\n\n//  BOOST_HAS_ABI_HEADERS\n//  This macro gets set if we have headers that fix the ABI,\n//  and prevent ODR violations when linking to external libraries:\n#if defined(BOOST_ABI_PREFIX) && defined(BOOST_ABI_SUFFIX) && !defined(BOOST_HAS_ABI_HEADERS)\n#  define BOOST_HAS_ABI_HEADERS\n#endif\n\n#if defined(BOOST_HAS_ABI_HEADERS) && defined(BOOST_DISABLE_ABI_HEADERS)\n#  undef BOOST_HAS_ABI_HEADERS\n#endif\n\n//  BOOST_NO_STDC_NAMESPACE workaround  --------------------------------------//\n//  Because std::size_t usage is so common, even in boost headers which do not\n//  otherwise use the C library, the <cstddef> workaround is included here so\n//  that ugly workaround code need not appear in many other boost headers.\n//  NOTE WELL: This is a workaround for non-conforming compilers; <cstddef>\n//  must still be #included in the usual places so that <cstddef> inclusion\n//  works as expected with standard conforming compilers.  The resulting\n//  double inclusion of <cstddef> is harmless.\n\n# if defined(BOOST_NO_STDC_NAMESPACE) && defined(__cplusplus)\n#   include <cstddef>\n    namespace std { using ::ptrdiff_t; using ::size_t; }\n# endif\n\n//  Workaround for the unfortunate min/max macros defined by some platform headers\n\n#define BOOST_PREVENT_MACRO_SUBSTITUTION\n\n#ifndef BOOST_USING_STD_MIN\n#  define BOOST_USING_STD_MIN() using std::min\n#endif\n\n#ifndef BOOST_USING_STD_MAX\n#  define BOOST_USING_STD_MAX() using std::max\n#endif\n\n//  BOOST_NO_STD_MIN_MAX workaround  -----------------------------------------//\n\n#  if defined(BOOST_NO_STD_MIN_MAX) && defined(__cplusplus)\n\nnamespace std {\n  template <class _Tp>\n  inline const _Tp& min BOOST_PREVENT_MACRO_SUBSTITUTION (const _Tp& __a, const _Tp& __b) {\n    return __b < __a ? __b : __a;\n  }\n  template <class _Tp>\n  inline const _Tp& max BOOST_PREVENT_MACRO_SUBSTITUTION (const _Tp& __a, const _Tp& __b) {\n    return  __a < __b ? __b : __a;\n  }\n}\n\n#  endif\n\n// BOOST_STATIC_CONSTANT workaround --------------------------------------- //\n// On compilers which don't allow in-class initialization of static integral\n// constant members, we must use enums as a workaround if we want the constants\n// to be available at compile-time. This macro gives us a convenient way to\n// declare such constants.\n\n#  ifdef BOOST_NO_INCLASS_MEMBER_INITIALIZATION\n#       define BOOST_STATIC_CONSTANT(type, assignment) enum { assignment }\n#  else\n#     define BOOST_STATIC_CONSTANT(type, assignment) static const type assignment\n#  endif\n\n// BOOST_USE_FACET / HAS_FACET workaround ----------------------------------//\n// When the standard library does not have a conforming std::use_facet there\n// are various workarounds available, but they differ from library to library.\n// The same problem occurs with has_facet.\n// These macros provide a consistent way to access a locale's facets.\n// Usage:\n//    replace\n//       std::use_facet<Type>(loc);\n//    with\n//       BOOST_USE_FACET(Type, loc);\n//    Note do not add a std:: prefix to the front of BOOST_USE_FACET!\n//  Use for BOOST_HAS_FACET is analogous.\n\n#if defined(BOOST_NO_STD_USE_FACET)\n#  ifdef BOOST_HAS_TWO_ARG_USE_FACET\n#     define BOOST_USE_FACET(Type, loc) std::use_facet(loc, static_cast<Type*>(0))\n#     define BOOST_HAS_FACET(Type, loc) std::has_facet(loc, static_cast<Type*>(0))\n#  elif defined(BOOST_HAS_MACRO_USE_FACET)\n#     define BOOST_USE_FACET(Type, loc) std::_USE(loc, Type)\n#     define BOOST_HAS_FACET(Type, loc) std::_HAS(loc, Type)\n#  elif defined(BOOST_HAS_STLP_USE_FACET)\n#     define BOOST_USE_FACET(Type, loc) (*std::_Use_facet<Type >(loc))\n#     define BOOST_HAS_FACET(Type, loc) std::has_facet< Type >(loc)\n#  endif\n#else\n#  define BOOST_USE_FACET(Type, loc) std::use_facet< Type >(loc)\n#  define BOOST_HAS_FACET(Type, loc) std::has_facet< Type >(loc)\n#endif\n\n// BOOST_NESTED_TEMPLATE workaround ------------------------------------------//\n// Member templates are supported by some compilers even though they can't use\n// the A::template member<U> syntax, as a workaround replace:\n//\n// typedef typename A::template rebind<U> binder;\n//\n// with:\n//\n// typedef typename A::BOOST_NESTED_TEMPLATE rebind<U> binder;\n\n#ifndef BOOST_NO_MEMBER_TEMPLATE_KEYWORD\n#  define BOOST_NESTED_TEMPLATE template\n#else\n#  define BOOST_NESTED_TEMPLATE\n#endif\n\n// BOOST_UNREACHABLE_RETURN(x) workaround -------------------------------------//\n// Normally evaluates to nothing, unless BOOST_NO_UNREACHABLE_RETURN_DETECTION\n// is defined, in which case it evaluates to return x; Use when you have a return\n// statement that can never be reached.\n\n#ifndef BOOST_UNREACHABLE_RETURN\n#  ifdef BOOST_NO_UNREACHABLE_RETURN_DETECTION\n#     define BOOST_UNREACHABLE_RETURN(x) return x;\n#  else\n#     define BOOST_UNREACHABLE_RETURN(x)\n#  endif\n#endif\n\n// BOOST_DEDUCED_TYPENAME workaround ------------------------------------------//\n//\n// Some compilers don't support the use of `typename' for dependent\n// types in deduced contexts, e.g.\n//\n//     template <class T> void f(T, typename T::type);\n//                                  ^^^^^^^^\n// Replace these declarations with:\n//\n//     template <class T> void f(T, BOOST_DEDUCED_TYPENAME T::type);\n\n#ifndef BOOST_NO_DEDUCED_TYPENAME\n#  define BOOST_DEDUCED_TYPENAME typename\n#else\n#  define BOOST_DEDUCED_TYPENAME\n#endif\n\n#ifndef BOOST_NO_TYPENAME_WITH_CTOR\n#  define BOOST_CTOR_TYPENAME typename\n#else\n#  define BOOST_CTOR_TYPENAME\n#endif\n\n// long long workaround ------------------------------------------//\n// On gcc (and maybe other compilers?) long long is alway supported\n// but it's use may generate either warnings (with -ansi), or errors\n// (with -pedantic -ansi) unless it's use is prefixed by __extension__\n//\n#if defined(BOOST_HAS_LONG_LONG) && defined(__cplusplus)\nnamespace boost{\n#  ifdef __GNUC__\n   __extension__ typedef long long long_long_type;\n   __extension__ typedef unsigned long long ulong_long_type;\n#  else\n   typedef long long long_long_type;\n   typedef unsigned long long ulong_long_type;\n#  endif\n}\n#endif\n// same again for __int128:\n#if defined(BOOST_HAS_INT128) && defined(__cplusplus)\nnamespace boost{\n#  ifdef __GNUC__\n   __extension__ typedef __int128 int128_type;\n   __extension__ typedef unsigned __int128 uint128_type;\n#  else\n   typedef __int128 int128_type;\n   typedef unsigned __int128 uint128_type;\n#  endif\n}\n#endif\n// same again for __float128:\n#if defined(BOOST_HAS_FLOAT128) && defined(__cplusplus)\nnamespace boost {\n#  ifdef __GNUC__\n   __extension__ typedef __float128 float128_type;\n#  else\n   typedef __float128 float128_type;\n#  endif\n}\n#endif\n\n// BOOST_[APPEND_]EXPLICIT_TEMPLATE_[NON_]TYPE macros --------------------------//\n\n// These macros are obsolete. Port away and remove.\n\n#  define BOOST_EXPLICIT_TEMPLATE_TYPE(t)\n#  define BOOST_EXPLICIT_TEMPLATE_TYPE_SPEC(t)\n#  define BOOST_EXPLICIT_TEMPLATE_NON_TYPE(t, v)\n#  define BOOST_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v)\n\n#  define BOOST_APPEND_EXPLICIT_TEMPLATE_TYPE(t)\n#  define BOOST_APPEND_EXPLICIT_TEMPLATE_TYPE_SPEC(t)\n#  define BOOST_APPEND_EXPLICIT_TEMPLATE_NON_TYPE(t, v)\n#  define BOOST_APPEND_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v)\n\n// When BOOST_NO_STD_TYPEINFO is defined, we can just import\n// the global definition into std namespace:\n#if defined(BOOST_NO_STD_TYPEINFO) && defined(__cplusplus)\n#include <typeinfo>\nnamespace std{ using ::type_info; }\n#endif\n\n// ---------------------------------------------------------------------------//\n\n//\n// Helper macro BOOST_STRINGIZE:\n// Converts the parameter X to a string after macro replacement\n// on X has been performed.\n//\n#define BOOST_STRINGIZE(X) BOOST_DO_STRINGIZE(X)\n#define BOOST_DO_STRINGIZE(X) #X\n\n//\n// Helper macro BOOST_JOIN:\n// The following piece of macro magic joins the two\n// arguments together, even when one of the arguments is\n// itself a macro (see 16.3.1 in C++ standard).  The key\n// is that macro expansion of macro arguments does not\n// occur in BOOST_DO_JOIN2 but does in BOOST_DO_JOIN.\n//\n#define BOOST_JOIN( X, Y ) BOOST_DO_JOIN( X, Y )\n#define BOOST_DO_JOIN( X, Y ) BOOST_DO_JOIN2(X,Y)\n#define BOOST_DO_JOIN2( X, Y ) X##Y\n\n//\n// Set some default values for compiler/library/platform names.\n// These are for debugging config setup only:\n//\n#  ifndef BOOST_COMPILER\n#     define BOOST_COMPILER \"Unknown ISO C++ Compiler\"\n#  endif\n#  ifndef BOOST_STDLIB\n#     define BOOST_STDLIB \"Unknown ISO standard library\"\n#  endif\n#  ifndef BOOST_PLATFORM\n#     if defined(unix) || defined(__unix) || defined(_XOPEN_SOURCE) \\\n         || defined(_POSIX_SOURCE)\n#        define BOOST_PLATFORM \"Generic Unix\"\n#     else\n#        define BOOST_PLATFORM \"Unknown\"\n#     endif\n#  endif\n\n//\n// Set some default values GPU support\n//\n#  ifndef BOOST_GPU_ENABLED\n#  define BOOST_GPU_ENABLED\n#  endif\n\n// BOOST_FORCEINLINE ---------------------------------------------//\n// Macro to use in place of 'inline' to force a function to be inline\n#if !defined(BOOST_FORCEINLINE)\n#  if defined(_MSC_VER)\n#    define BOOST_FORCEINLINE __forceinline\n#  elif defined(__GNUC__) && __GNUC__ > 3\n     // Clang also defines __GNUC__ (as 4)\n#    define BOOST_FORCEINLINE inline __attribute__ ((__always_inline__))\n#  else\n#    define BOOST_FORCEINLINE inline\n#  endif\n#endif\n\n// BOOST_NOINLINE ---------------------------------------------//\n// Macro to use in place of 'inline' to prevent a function to be inlined\n#if !defined(BOOST_NOINLINE)\n#  if defined(_MSC_VER)\n#    define BOOST_NOINLINE __declspec(noinline)\n#  elif defined(__GNUC__) && __GNUC__ > 3\n     // Clang also defines __GNUC__ (as 4)\n#    if defined(__CUDACC__)\n       // nvcc doesn't always parse __noinline__, \n       // see: https://svn.boost.org/trac/boost/ticket/9392\n#      define BOOST_NOINLINE __attribute__ ((noinline))\n#    else\n#      define BOOST_NOINLINE __attribute__ ((__noinline__))\n#    endif\n#  else\n#    define BOOST_NOINLINE\n#  endif\n#endif\n\n// BOOST_NORETURN ---------------------------------------------//\n// Macro to use before a function declaration/definition to designate\n// the function as not returning normally (i.e. with a return statement\n// or by leaving the function scope, if the function return type is void).\n#if !defined(BOOST_NORETURN)\n#  if defined(_MSC_VER)\n#    define BOOST_NORETURN __declspec(noreturn)\n#  elif defined(__GNUC__)\n#    define BOOST_NORETURN __attribute__ ((__noreturn__))\n#  else\n#    define BOOST_NO_NORETURN\n#    define BOOST_NORETURN\n#  endif\n#endif\n\n// Branch prediction hints\n// These macros are intended to wrap conditional expressions that yield true or false\n//\n//  if (BOOST_LIKELY(var == 10))\n//  {\n//     // the most probable code here\n//  }\n//\n#if !defined(BOOST_LIKELY)\n#  define BOOST_LIKELY(x) x\n#endif\n#if !defined(BOOST_UNLIKELY)\n#  define BOOST_UNLIKELY(x) x\n#endif\n\n// Type and data alignment specification\n//\n#if !defined(BOOST_NO_CXX11_ALIGNAS)\n#  define BOOST_ALIGNMENT(x) alignas(x)\n#elif defined(_MSC_VER)\n#  define BOOST_ALIGNMENT(x) __declspec(align(x))\n#elif defined(__GNUC__)\n#  define BOOST_ALIGNMENT(x) __attribute__ ((__aligned__(x)))\n#else\n#  define BOOST_NO_ALIGNMENT\n#  define BOOST_ALIGNMENT(x)\n#endif\n\n// Lack of non-public defaulted functions is implied by the lack of any defaulted functions\n#if !defined(BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS) && defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS)\n#  define BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS\n#endif\n\n// Defaulted and deleted function declaration helpers\n// These macros are intended to be inside a class definition.\n// BOOST_DEFAULTED_FUNCTION accepts the function declaration and its\n// body, which will be used if the compiler doesn't support defaulted functions.\n// BOOST_DELETED_FUNCTION only accepts the function declaration. It\n// will expand to a private function declaration, if the compiler doesn't support\n// deleted functions. Because of this it is recommended to use BOOST_DELETED_FUNCTION\n// in the end of the class definition.\n//\n//  class my_class\n//  {\n//  public:\n//      // Default-constructible\n//      BOOST_DEFAULTED_FUNCTION(my_class(), {})\n//      // Copying prohibited\n//      BOOST_DELETED_FUNCTION(my_class(my_class const&))\n//      BOOST_DELETED_FUNCTION(my_class& operator= (my_class const&))\n//  };\n//\n#if !(defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS) || defined(BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS))\n#   define BOOST_DEFAULTED_FUNCTION(fun, body) fun = default;\n#else\n#   define BOOST_DEFAULTED_FUNCTION(fun, body) fun body\n#endif\n\n#if !defined(BOOST_NO_CXX11_DELETED_FUNCTIONS)\n#   define BOOST_DELETED_FUNCTION(fun) fun = delete;\n#else\n#   define BOOST_DELETED_FUNCTION(fun) private: fun;\n#endif\n\n//\n// Set BOOST_NO_DECLTYPE_N3276 when BOOST_NO_DECLTYPE is defined\n//\n#if defined(BOOST_NO_CXX11_DECLTYPE) && !defined(BOOST_NO_CXX11_DECLTYPE_N3276)\n#define BOOST_NO_CXX11_DECLTYPE_N3276 BOOST_NO_CXX11_DECLTYPE\n#endif\n\n//  -------------------- Deprecated macros for 1.50 ---------------------------\n//  These will go away in a future release\n\n//  Use BOOST_NO_CXX11_HDR_UNORDERED_SET or BOOST_NO_CXX11_HDR_UNORDERED_MAP\n//           instead of BOOST_NO_STD_UNORDERED\n#if defined(BOOST_NO_CXX11_HDR_UNORDERED_MAP) || defined (BOOST_NO_CXX11_HDR_UNORDERED_SET)\n# ifndef BOOST_NO_CXX11_STD_UNORDERED\n#  define BOOST_NO_CXX11_STD_UNORDERED\n# endif\n#endif\n\n//  Use BOOST_NO_CXX11_HDR_INITIALIZER_LIST instead of BOOST_NO_INITIALIZER_LISTS\n#if defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST) && !defined(BOOST_NO_INITIALIZER_LISTS)\n#  define BOOST_NO_INITIALIZER_LISTS\n#endif\n\n//  Use BOOST_NO_CXX11_HDR_ARRAY instead of BOOST_NO_0X_HDR_ARRAY\n#if defined(BOOST_NO_CXX11_HDR_ARRAY) && !defined(BOOST_NO_0X_HDR_ARRAY)\n#  define BOOST_NO_0X_HDR_ARRAY\n#endif\n//  Use BOOST_NO_CXX11_HDR_CHRONO instead of BOOST_NO_0X_HDR_CHRONO\n#if defined(BOOST_NO_CXX11_HDR_CHRONO) && !defined(BOOST_NO_0X_HDR_CHRONO)\n#  define BOOST_NO_0X_HDR_CHRONO\n#endif\n//  Use BOOST_NO_CXX11_HDR_CODECVT instead of BOOST_NO_0X_HDR_CODECVT\n#if defined(BOOST_NO_CXX11_HDR_CODECVT) && !defined(BOOST_NO_0X_HDR_CODECVT)\n#  define BOOST_NO_0X_HDR_CODECVT\n#endif\n//  Use BOOST_NO_CXX11_HDR_CONDITION_VARIABLE instead of BOOST_NO_0X_HDR_CONDITION_VARIABLE\n#if defined(BOOST_NO_CXX11_HDR_CONDITION_VARIABLE) && !defined(BOOST_NO_0X_HDR_CONDITION_VARIABLE)\n#  define BOOST_NO_0X_HDR_CONDITION_VARIABLE\n#endif\n//  Use BOOST_NO_CXX11_HDR_FORWARD_LIST instead of BOOST_NO_0X_HDR_FORWARD_LIST\n#if defined(BOOST_NO_CXX11_HDR_FORWARD_LIST) && !defined(BOOST_NO_0X_HDR_FORWARD_LIST)\n#  define BOOST_NO_0X_HDR_FORWARD_LIST\n#endif\n//  Use BOOST_NO_CXX11_HDR_FUTURE instead of BOOST_NO_0X_HDR_FUTURE\n#if defined(BOOST_NO_CXX11_HDR_FUTURE) && !defined(BOOST_NO_0X_HDR_FUTURE)\n#  define BOOST_NO_0X_HDR_FUTURE\n#endif\n\n//  Use BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n//  instead of BOOST_NO_0X_HDR_INITIALIZER_LIST or BOOST_NO_INITIALIZER_LISTS\n#ifdef BOOST_NO_CXX11_HDR_INITIALIZER_LIST\n# ifndef BOOST_NO_0X_HDR_INITIALIZER_LIST\n#  define BOOST_NO_0X_HDR_INITIALIZER_LIST\n# endif\n# ifndef BOOST_NO_INITIALIZER_LISTS\n#  define BOOST_NO_INITIALIZER_LISTS\n# endif\n#endif\n\n//  Use BOOST_NO_CXX11_HDR_MUTEX instead of BOOST_NO_0X_HDR_MUTEX\n#if defined(BOOST_NO_CXX11_HDR_MUTEX) && !defined(BOOST_NO_0X_HDR_MUTEX)\n#  define BOOST_NO_0X_HDR_MUTEX\n#endif\n//  Use BOOST_NO_CXX11_HDR_RANDOM instead of BOOST_NO_0X_HDR_RANDOM\n#if defined(BOOST_NO_CXX11_HDR_RANDOM) && !defined(BOOST_NO_0X_HDR_RANDOM)\n#  define BOOST_NO_0X_HDR_RANDOM\n#endif\n//  Use BOOST_NO_CXX11_HDR_RATIO instead of BOOST_NO_0X_HDR_RATIO\n#if defined(BOOST_NO_CXX11_HDR_RATIO) && !defined(BOOST_NO_0X_HDR_RATIO)\n#  define BOOST_NO_0X_HDR_RATIO\n#endif\n//  Use BOOST_NO_CXX11_HDR_REGEX instead of BOOST_NO_0X_HDR_REGEX\n#if defined(BOOST_NO_CXX11_HDR_REGEX) && !defined(BOOST_NO_0X_HDR_REGEX)\n#  define BOOST_NO_0X_HDR_REGEX\n#endif\n//  Use BOOST_NO_CXX11_HDR_SYSTEM_ERROR instead of BOOST_NO_0X_HDR_SYSTEM_ERROR\n#if defined(BOOST_NO_CXX11_HDR_SYSTEM_ERROR) && !defined(BOOST_NO_0X_HDR_SYSTEM_ERROR)\n#  define BOOST_NO_0X_HDR_SYSTEM_ERROR\n#endif\n//  Use BOOST_NO_CXX11_HDR_THREAD instead of BOOST_NO_0X_HDR_THREAD\n#if defined(BOOST_NO_CXX11_HDR_THREAD) && !defined(BOOST_NO_0X_HDR_THREAD)\n#  define BOOST_NO_0X_HDR_THREAD\n#endif\n//  Use BOOST_NO_CXX11_HDR_TUPLE instead of BOOST_NO_0X_HDR_TUPLE\n#if defined(BOOST_NO_CXX11_HDR_TUPLE) && !defined(BOOST_NO_0X_HDR_TUPLE)\n#  define BOOST_NO_0X_HDR_TUPLE\n#endif\n//  Use BOOST_NO_CXX11_HDR_TYPE_TRAITS instead of BOOST_NO_0X_HDR_TYPE_TRAITS\n#if defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_NO_0X_HDR_TYPE_TRAITS)\n#  define BOOST_NO_0X_HDR_TYPE_TRAITS\n#endif\n//  Use BOOST_NO_CXX11_HDR_TYPEINDEX instead of BOOST_NO_0X_HDR_TYPEINDEX\n#if defined(BOOST_NO_CXX11_HDR_TYPEINDEX) && !defined(BOOST_NO_0X_HDR_TYPEINDEX)\n#  define BOOST_NO_0X_HDR_TYPEINDEX\n#endif\n//  Use BOOST_NO_CXX11_HDR_UNORDERED_MAP instead of BOOST_NO_0X_HDR_UNORDERED_MAP\n#if defined(BOOST_NO_CXX11_HDR_UNORDERED_MAP) && !defined(BOOST_NO_0X_HDR_UNORDERED_MAP)\n#  define BOOST_NO_0X_HDR_UNORDERED_MAP\n#endif\n//  Use BOOST_NO_CXX11_HDR_UNORDERED_SET instead of BOOST_NO_0X_HDR_UNORDERED_SET\n#if defined(BOOST_NO_CXX11_HDR_UNORDERED_SET) && !defined(BOOST_NO_0X_HDR_UNORDERED_SET)\n#  define BOOST_NO_0X_HDR_UNORDERED_SET\n#endif\n\n//  ------------------ End of deprecated macros for 1.50 ---------------------------\n\n//  -------------------- Deprecated macros for 1.51 ---------------------------\n//  These will go away in a future release\n\n//  Use     BOOST_NO_CXX11_AUTO_DECLARATIONS instead of   BOOST_NO_AUTO_DECLARATIONS\n#if defined(BOOST_NO_CXX11_AUTO_DECLARATIONS) && !defined(BOOST_NO_AUTO_DECLARATIONS)\n#  define BOOST_NO_AUTO_DECLARATIONS\n#endif\n//  Use     BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS instead of   BOOST_NO_AUTO_MULTIDECLARATIONS\n#if defined(BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS) && !defined(BOOST_NO_AUTO_MULTIDECLARATIONS)\n#  define BOOST_NO_AUTO_MULTIDECLARATIONS\n#endif\n//  Use     BOOST_NO_CXX11_CHAR16_T instead of   BOOST_NO_CHAR16_T\n#if defined(BOOST_NO_CXX11_CHAR16_T) && !defined(BOOST_NO_CHAR16_T)\n#  define BOOST_NO_CHAR16_T\n#endif\n//  Use     BOOST_NO_CXX11_CHAR32_T instead of   BOOST_NO_CHAR32_T\n#if defined(BOOST_NO_CXX11_CHAR32_T) && !defined(BOOST_NO_CHAR32_T)\n#  define BOOST_NO_CHAR32_T\n#endif\n//  Use     BOOST_NO_CXX11_TEMPLATE_ALIASES instead of   BOOST_NO_TEMPLATE_ALIASES\n#if defined(BOOST_NO_CXX11_TEMPLATE_ALIASES) && !defined(BOOST_NO_TEMPLATE_ALIASES)\n#  define BOOST_NO_TEMPLATE_ALIASES\n#endif\n//  Use     BOOST_NO_CXX11_CONSTEXPR instead of   BOOST_NO_CONSTEXPR\n#if defined(BOOST_NO_CXX11_CONSTEXPR) && !defined(BOOST_NO_CONSTEXPR)\n#  define BOOST_NO_CONSTEXPR\n#endif\n//  Use     BOOST_NO_CXX11_DECLTYPE_N3276 instead of   BOOST_NO_DECLTYPE_N3276\n#if defined(BOOST_NO_CXX11_DECLTYPE_N3276) && !defined(BOOST_NO_DECLTYPE_N3276)\n#  define BOOST_NO_DECLTYPE_N3276\n#endif\n//  Use     BOOST_NO_CXX11_DECLTYPE instead of   BOOST_NO_DECLTYPE\n#if defined(BOOST_NO_CXX11_DECLTYPE) && !defined(BOOST_NO_DECLTYPE)\n#  define BOOST_NO_DECLTYPE\n#endif\n//  Use     BOOST_NO_CXX11_DEFAULTED_FUNCTIONS instead of   BOOST_NO_DEFAULTED_FUNCTIONS\n#if defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS) && !defined(BOOST_NO_DEFAULTED_FUNCTIONS)\n#  define BOOST_NO_DEFAULTED_FUNCTIONS\n#endif\n//  Use     BOOST_NO_CXX11_DELETED_FUNCTIONS instead of   BOOST_NO_DELETED_FUNCTIONS\n#if defined(BOOST_NO_CXX11_DELETED_FUNCTIONS) && !defined(BOOST_NO_DELETED_FUNCTIONS)\n#  define BOOST_NO_DELETED_FUNCTIONS\n#endif\n//  Use     BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS instead of   BOOST_NO_EXPLICIT_CONVERSION_OPERATORS\n#if defined(BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS) && !defined(BOOST_NO_EXPLICIT_CONVERSION_OPERATORS)\n#  define BOOST_NO_EXPLICIT_CONVERSION_OPERATORS\n#endif\n//  Use     BOOST_NO_CXX11_EXTERN_TEMPLATE instead of   BOOST_NO_EXTERN_TEMPLATE\n#if defined(BOOST_NO_CXX11_EXTERN_TEMPLATE) && !defined(BOOST_NO_EXTERN_TEMPLATE)\n#  define BOOST_NO_EXTERN_TEMPLATE\n#endif\n//  Use     BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS instead of   BOOST_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#if defined(BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS) && !defined(BOOST_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)\n#  define BOOST_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS\n#endif\n//  Use     BOOST_NO_CXX11_LAMBDAS instead of   BOOST_NO_LAMBDAS\n#if defined(BOOST_NO_CXX11_LAMBDAS) && !defined(BOOST_NO_LAMBDAS)\n#  define BOOST_NO_LAMBDAS\n#endif\n//  Use     BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS instead of   BOOST_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#if defined(BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS) && !defined(BOOST_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS)\n#  define BOOST_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS\n#endif\n//  Use     BOOST_NO_CXX11_NOEXCEPT instead of   BOOST_NO_NOEXCEPT\n#if defined(BOOST_NO_CXX11_NOEXCEPT) && !defined(BOOST_NO_NOEXCEPT)\n#  define BOOST_NO_NOEXCEPT\n#endif\n//  Use     BOOST_NO_CXX11_NULLPTR instead of   BOOST_NO_NULLPTR\n#if defined(BOOST_NO_CXX11_NULLPTR) && !defined(BOOST_NO_NULLPTR)\n#  define BOOST_NO_NULLPTR\n#endif\n//  Use     BOOST_NO_CXX11_RAW_LITERALS instead of   BOOST_NO_RAW_LITERALS\n#if defined(BOOST_NO_CXX11_RAW_LITERALS) && !defined(BOOST_NO_RAW_LITERALS)\n#  define BOOST_NO_RAW_LITERALS\n#endif\n//  Use     BOOST_NO_CXX11_RVALUE_REFERENCES instead of   BOOST_NO_RVALUE_REFERENCES\n#if defined(BOOST_NO_CXX11_RVALUE_REFERENCES) && !defined(BOOST_NO_RVALUE_REFERENCES)\n#  define BOOST_NO_RVALUE_REFERENCES\n#endif\n//  Use     BOOST_NO_CXX11_SCOPED_ENUMS instead of   BOOST_NO_SCOPED_ENUMS\n#if defined(BOOST_NO_CXX11_SCOPED_ENUMS) && !defined(BOOST_NO_SCOPED_ENUMS)\n#  define BOOST_NO_SCOPED_ENUMS\n#endif\n//  Use     BOOST_NO_CXX11_STATIC_ASSERT instead of   BOOST_NO_STATIC_ASSERT\n#if defined(BOOST_NO_CXX11_STATIC_ASSERT) && !defined(BOOST_NO_STATIC_ASSERT)\n#  define BOOST_NO_STATIC_ASSERT\n#endif\n//  Use     BOOST_NO_CXX11_STD_UNORDERED instead of   BOOST_NO_STD_UNORDERED\n#if defined(BOOST_NO_CXX11_STD_UNORDERED) && !defined(BOOST_NO_STD_UNORDERED)\n#  define BOOST_NO_STD_UNORDERED\n#endif\n//  Use     BOOST_NO_CXX11_UNICODE_LITERALS instead of   BOOST_NO_UNICODE_LITERALS\n#if defined(BOOST_NO_CXX11_UNICODE_LITERALS) && !defined(BOOST_NO_UNICODE_LITERALS)\n#  define BOOST_NO_UNICODE_LITERALS\n#endif\n//  Use     BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX instead of   BOOST_NO_UNIFIED_INITIALIZATION_SYNTAX\n#if defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) && !defined(BOOST_NO_UNIFIED_INITIALIZATION_SYNTAX)\n#  define BOOST_NO_UNIFIED_INITIALIZATION_SYNTAX\n#endif\n//  Use     BOOST_NO_CXX11_VARIADIC_TEMPLATES instead of   BOOST_NO_VARIADIC_TEMPLATES\n#if defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_NO_VARIADIC_TEMPLATES)\n#  define BOOST_NO_VARIADIC_TEMPLATES\n#endif\n//  Use     BOOST_NO_CXX11_VARIADIC_MACROS instead of   BOOST_NO_VARIADIC_MACROS\n#if defined(BOOST_NO_CXX11_VARIADIC_MACROS) && !defined(BOOST_NO_VARIADIC_MACROS)\n#  define BOOST_NO_VARIADIC_MACROS\n#endif\n//  Use     BOOST_NO_CXX11_NUMERIC_LIMITS instead of   BOOST_NO_NUMERIC_LIMITS_LOWEST\n#if defined(BOOST_NO_CXX11_NUMERIC_LIMITS) && !defined(BOOST_NO_NUMERIC_LIMITS_LOWEST)\n#  define BOOST_NO_NUMERIC_LIMITS_LOWEST\n#endif\n//  ------------------ End of deprecated macros for 1.51 ---------------------------\n\n\n\n//\n// Helper macros BOOST_NOEXCEPT, BOOST_NOEXCEPT_IF, BOOST_NOEXCEPT_EXPR\n// These aid the transition to C++11 while still supporting C++03 compilers\n//\n#ifdef BOOST_NO_CXX11_NOEXCEPT\n#  define BOOST_NOEXCEPT\n#  define BOOST_NOEXCEPT_OR_NOTHROW throw()\n#  define BOOST_NOEXCEPT_IF(Predicate)\n#  define BOOST_NOEXCEPT_EXPR(Expression) false\n#else\n#  define BOOST_NOEXCEPT noexcept\n#  define BOOST_NOEXCEPT_OR_NOTHROW noexcept\n#  define BOOST_NOEXCEPT_IF(Predicate) noexcept((Predicate))\n#  define BOOST_NOEXCEPT_EXPR(Expression) noexcept((Expression))\n#endif\n//\n// Helper macro BOOST_FALLTHROUGH\n// Fallback definition of BOOST_FALLTHROUGH macro used to mark intended\n// fall-through between case labels in a switch statement. We use a definition\n// that requires a semicolon after it to avoid at least one type of misuse even\n// on unsupported compilers.\n//\n#ifndef BOOST_FALLTHROUGH\n#  define BOOST_FALLTHROUGH ((void)0)\n#endif\n\n//\n// constexpr workarounds\n//\n#if defined(BOOST_NO_CXX11_CONSTEXPR)\n#define BOOST_CONSTEXPR\n#define BOOST_CONSTEXPR_OR_CONST const\n#else\n#define BOOST_CONSTEXPR constexpr\n#define BOOST_CONSTEXPR_OR_CONST constexpr\n#endif\n#if defined(BOOST_NO_CXX14_CONSTEXPR)\n#define BOOST_CXX14_CONSTEXPR\n#else\n#define BOOST_CXX14_CONSTEXPR constexpr\n#endif\n\n//\n// Unused variable/typedef workarounds:\n//\n#ifndef BOOST_ATTRIBUTE_UNUSED\n#  define BOOST_ATTRIBUTE_UNUSED\n#endif\n\n#define BOOST_STATIC_CONSTEXPR  static BOOST_CONSTEXPR_OR_CONST\n\n//\n// Set BOOST_HAS_STATIC_ASSERT when BOOST_NO_CXX11_STATIC_ASSERT is not defined\n//\n#if !defined(BOOST_NO_CXX11_STATIC_ASSERT) && !defined(BOOST_HAS_STATIC_ASSERT)\n#  define BOOST_HAS_STATIC_ASSERT\n#endif\n\n//\n// Set BOOST_HAS_RVALUE_REFS when BOOST_NO_CXX11_RVALUE_REFERENCES is not defined\n//\n#if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES) && !defined(BOOST_HAS_RVALUE_REFS)\n#define BOOST_HAS_RVALUE_REFS\n#endif\n\n//\n// Set BOOST_HAS_VARIADIC_TMPL when BOOST_NO_CXX11_VARIADIC_TEMPLATES is not defined\n//\n#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_HAS_VARIADIC_TMPL)\n#define BOOST_HAS_VARIADIC_TMPL\n#endif\n//\n// Set BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS when\n// BOOST_NO_CXX11_VARIADIC_TEMPLATES is set:\n//\n#if defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS)\n#  define BOOST_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS\n#endif\n\n//\n// Finish off with checks for macros that are depricated / no longer supported,\n// if any of these are set then it's very likely that much of Boost will no\n// longer work.  So stop with a #error for now, but give the user a chance\n// to continue at their own risk if they really want to:\n//\n#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_CONFIG_ALLOW_DEPRECATED)\n#  error \"You are using a compiler which lacks features which are now a minimum requirement in order to use Boost, define BOOST_CONFIG_ALLOW_DEPRECATED if you want to continue at your own risk!!!\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/user.hpp",
    "content": "//  boost/config/user.hpp  ---------------------------------------------------//\n\n//  (C) Copyright John Maddock 2001. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  Do not check in modified versions of this file,\n//  This file may be customized by the end user, but not by boost.\n\n//\n//  Use this file to define a site and compiler specific\n//  configuration policy:\n//\n\n// define this to locate a compiler config file:\n// #define BOOST_COMPILER_CONFIG <myheader>\n\n// define this to locate a stdlib config file:\n// #define BOOST_STDLIB_CONFIG   <myheader>\n\n// define this to locate a platform config file:\n// #define BOOST_PLATFORM_CONFIG <myheader>\n\n// define this to disable compiler config,\n// use if your compiler config has nothing to set:\n// #define BOOST_NO_COMPILER_CONFIG\n\n// define this to disable stdlib config,\n// use if your stdlib config has nothing to set:\n// #define BOOST_NO_STDLIB_CONFIG\n\n// define this to disable platform config,\n// use if your platform config has nothing to set:\n// #define BOOST_NO_PLATFORM_CONFIG\n\n// define this to disable all config options,\n// excluding the user config.  Use if your\n// setup is fully ISO compliant, and has no\n// useful extensions, or for autoconf generated\n// setups:\n// #define BOOST_NO_CONFIG\n\n// define this to make the config \"optimistic\"\n// about unknown compiler versions.  Normally\n// unknown compiler versions are assumed to have\n// all the defects of the last known version, however\n// setting this flag, causes the config to assume\n// that unknown compiler versions are fully conformant\n// with the standard:\n// #define BOOST_STRICT_CONFIG\n\n// define this to cause the config to halt compilation\n// with an #error if it encounters anything unknown --\n// either an unknown compiler version or an unknown\n// compiler/platform/library:\n// #define BOOST_ASSERT_CONFIG\n\n\n// define if you want to disable threading support, even\n// when available:\n// #define BOOST_DISABLE_THREADS\n\n// define when you want to disable Win32 specific features\n// even when available:\n// #define BOOST_DISABLE_WIN32\n\n// BOOST_DISABLE_ABI_HEADERS: Stops boost headers from including any \n// prefix/suffix headers that normally control things like struct \n// packing and alignment. \n// #define BOOST_DISABLE_ABI_HEADERS\n\n// BOOST_ABI_PREFIX: A prefix header to include in place of whatever\n// boost.config would normally select, any replacement should set up \n// struct packing and alignment options as required. \n// #define BOOST_ABI_PREFIX my-header-name\n\n// BOOST_ABI_SUFFIX: A suffix header to include in place of whatever \n// boost.config would normally select, any replacement should undo \n// the effects of the prefix header. \n// #define BOOST_ABI_SUFFIX my-header-name\n\n// BOOST_ALL_DYN_LINK: Forces all libraries that have separate source, \n// to be linked as dll's rather than static libraries on Microsoft Windows \n// (this macro is used to turn on __declspec(dllimport) modifiers, so that \n// the compiler knows which symbols to look for in a dll rather than in a \n// static library).  Note that there may be some libraries that can only \n// be linked in one way (statically or dynamically), in these cases this \n// macro has no effect.\n// #define BOOST_ALL_DYN_LINK\n \n// BOOST_WHATEVER_DYN_LINK: Forces library \"whatever\" to be linked as a dll \n// rather than a static library on Microsoft Windows: replace the WHATEVER \n// part of the macro name with the name of the library that you want to \n// dynamically link to, for example use BOOST_DATE_TIME_DYN_LINK or \n// BOOST_REGEX_DYN_LINK etc (this macro is used to turn on __declspec(dllimport) \n// modifiers, so that the compiler knows which symbols to look for in a dll \n// rather than in a static library).  \n// Note that there may be some libraries that can only \n// be linked in one way (statically or dynamically), \n// in these cases this macro is unsupported.\n// #define BOOST_WHATEVER_DYN_LINK\n \n// BOOST_ALL_NO_LIB: Tells the config system not to automatically select \n// which libraries to link against.  \n// Normally if a compiler supports #pragma lib, then the correct library \n// build variant will be automatically selected and linked against, \n// simply by the act of including one of that library's headers.  \n// This macro turns that feature off.\n// #define BOOST_ALL_NO_LIB\n \n// BOOST_WHATEVER_NO_LIB: Tells the config system not to automatically \n// select which library to link against for library \"whatever\", \n// replace WHATEVER in the macro name with the name of the library; \n// for example BOOST_DATE_TIME_NO_LIB or BOOST_REGEX_NO_LIB.  \n// Normally if a compiler supports #pragma lib, then the correct library \n// build variant will be automatically selected and linked against, simply \n// by the act of including one of that library's headers.  This macro turns \n// that feature off.\n// #define BOOST_WHATEVER_NO_LIB\n \n// BOOST_LIB_BUILDID: Set to the same value as the value passed to Boost.Build's\n// --buildid command line option.  For example if you built using:\n//\n// bjam address-model=64 --buildid=amd64\n//\n// then compile your code with:\n//\n// -DBOOST_LIB_BUILDID = amd64\n//\n// to ensure the correct libraries are selected at link time.\n// #define BOOST_LIB_BUILDID amd64\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config/warning_disable.hpp",
    "content": "//  Copyright John Maddock 2008\n//  Use, modification, and distribution is subject to the Boost Software\n//  License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n//\n//  This file exists to turn off some overly-pedantic warning emitted\n//  by certain compilers.  You should include this header only in:\n//\n//  * A test case, before any other headers, or,\n//  * A library source file before any other headers.\n//\n//  IT SHOULD NOT BE INCLUDED BY ANY BOOST HEADER.\n//\n//  YOU SHOULD NOT INCLUDE IT IF YOU CAN REASONABLY FIX THE WARNING.\n//\n//  The only warnings disabled here are those that are:\n//\n//  * Quite unreasonably pedantic.\n//  * Generally only emitted by a single compiler.\n//  * Can't easily be fixed: for example if the vendors own std lib \n//    code emits these warnings!\n//\n//  Note that THIS HEADER MUST NOT INCLUDE ANY OTHER HEADERS:\n//  not even std library ones!  Doing so may turn the warning\n//  off too late to be of any use.  For example the VC++ C4996\n//  warning can be emitted from <iosfwd> if that header is included\n//  before or by this one :-(\n//\n\n#ifndef BOOST_CONFIG_WARNING_DISABLE_HPP\n#define BOOST_CONFIG_WARNING_DISABLE_HPP\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) \n   // Error 'function': was declared deprecated\n   // http://msdn2.microsoft.com/en-us/library/ttcz0bys(VS.80).aspx\n   // This error is emitted when you use some perfectly conforming\n   // std lib functions in a perfectly correct way, and also by\n   // some of Microsoft's own std lib code !\n#  pragma warning(disable:4996)\n#endif\n#if defined(__INTEL_COMPILER) || defined(__ICL)\n   // As above: gives warning when a \"deprecated\"\n   // std library function is encountered.\n#  pragma warning(disable:1786)\n#endif\n\n#endif // BOOST_CONFIG_WARNING_DISABLE_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/config.hpp",
    "content": "//  Boost config.hpp configuration header file  ------------------------------//\n\n//  (C) Copyright John Maddock 2002.\n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/libs/config for most recent version.\n\n//  Boost config.hpp policy and rationale documentation has been moved to\n//  http://www.boost.org/libs/config\n//\n//  CAUTION: This file is intended to be completely stable -\n//           DO NOT MODIFY THIS FILE!\n//\n\n#ifndef BOOST_CONFIG_HPP\n#define BOOST_CONFIG_HPP\n\n// if we don't have a user config, then use the default location:\n#if !defined(BOOST_USER_CONFIG) && !defined(BOOST_NO_USER_CONFIG)\n#  define BOOST_USER_CONFIG <boost/config/user.hpp>\n#if 0\n// For dependency trackers:\n#  include <boost/config/user.hpp>\n#endif\n#endif\n// include it first:\n#ifdef BOOST_USER_CONFIG\n#  include BOOST_USER_CONFIG\n#endif\n\n// if we don't have a compiler config set, try and find one:\n#if !defined(BOOST_COMPILER_CONFIG) && !defined(BOOST_NO_COMPILER_CONFIG) && !defined(BOOST_NO_CONFIG)\n#  include <boost/config/select_compiler_config.hpp>\n#endif\n// if we have a compiler config, include it now:\n#ifdef BOOST_COMPILER_CONFIG\n#  include BOOST_COMPILER_CONFIG\n#endif\n\n// if we don't have a std library config set, try and find one:\n#if !defined(BOOST_STDLIB_CONFIG) && !defined(BOOST_NO_STDLIB_CONFIG) && !defined(BOOST_NO_CONFIG) && defined(__cplusplus)\n#  include <boost/config/select_stdlib_config.hpp>\n#endif\n// if we have a std library config, include it now:\n#ifdef BOOST_STDLIB_CONFIG\n#  include BOOST_STDLIB_CONFIG\n#endif\n\n// if we don't have a platform config set, try and find one:\n#if !defined(BOOST_PLATFORM_CONFIG) && !defined(BOOST_NO_PLATFORM_CONFIG) && !defined(BOOST_NO_CONFIG)\n#  include <boost/config/select_platform_config.hpp>\n#endif\n// if we have a platform config, include it now:\n#ifdef BOOST_PLATFORM_CONFIG\n#  include BOOST_PLATFORM_CONFIG\n#endif\n\n// get config suffix code:\n#include <boost/config/suffix.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n#endif  // BOOST_CONFIG_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/core/enable_if.hpp",
    "content": "// Boost enable_if library\n\n// Copyright 2003 (c) The Trustees of Indiana University.\n\n// Use, modification, and distribution is subject to the Boost Software\n// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n//    Authors: Jaakko Jarvi (jajarvi at osl.iu.edu)\n//             Jeremiah Willcock (jewillco at osl.iu.edu)\n//             Andrew Lumsdaine (lums at osl.iu.edu)\n\n\n#ifndef BOOST_CORE_ENABLE_IF_HPP\n#define BOOST_CORE_ENABLE_IF_HPP\n\n#include \"boost/config.hpp\"\n\n// Even the definition of enable_if causes problems on some compilers,\n// so it's macroed out for all compilers that do not support SFINAE\n\n#ifndef BOOST_NO_SFINAE\n\nnamespace boost\n{\n  template<typename T, typename R=void>\n  struct enable_if_has_type\n  {\n    typedef R type;\n  };\n \n  template <bool B, class T = void>\n  struct enable_if_c {\n    typedef T type;\n  };\n\n  template <class T>\n  struct enable_if_c<false, T> {};\n\n  template <class Cond, class T = void> \n  struct enable_if : public enable_if_c<Cond::value, T> {};\n\n  template <bool B, class T>\n  struct lazy_enable_if_c {\n    typedef typename T::type type;\n  };\n\n  template <class T>\n  struct lazy_enable_if_c<false, T> {};\n\n  template <class Cond, class T> \n  struct lazy_enable_if : public lazy_enable_if_c<Cond::value, T> {};\n\n\n  template <bool B, class T = void>\n  struct disable_if_c {\n    typedef T type;\n  };\n\n  template <class T>\n  struct disable_if_c<true, T> {};\n\n  template <class Cond, class T = void> \n  struct disable_if : public disable_if_c<Cond::value, T> {};\n\n  template <bool B, class T>\n  struct lazy_disable_if_c {\n    typedef typename T::type type;\n  };\n\n  template <class T>\n  struct lazy_disable_if_c<true, T> {};\n\n  template <class Cond, class T> \n  struct lazy_disable_if : public lazy_disable_if_c<Cond::value, T> {};\n\n} // namespace boost\n\n#else\n\nnamespace boost {\n\n  namespace detail { typedef void enable_if_default_T; }\n\n  template <typename T>\n  struct enable_if_does_not_work_on_this_compiler;\n\n  template<typename T, typename R=void>\n  struct enable_if_has_type : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <bool B, class T = detail::enable_if_default_T>\n  struct enable_if_c : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <bool B, class T = detail::enable_if_default_T> \n  struct disable_if_c : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <bool B, class T = detail::enable_if_default_T> \n  struct lazy_enable_if_c : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <bool B, class T = detail::enable_if_default_T> \n  struct lazy_disable_if_c : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <class Cond, class T = detail::enable_if_default_T> \n  struct enable_if : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <class Cond, class T = detail::enable_if_default_T> \n  struct disable_if : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <class Cond, class T = detail::enable_if_default_T> \n  struct lazy_enable_if : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n  template <class Cond, class T = detail::enable_if_default_T> \n  struct lazy_disable_if : enable_if_does_not_work_on_this_compiler<T>\n  { };\n\n} // namespace boost\n\n#endif // BOOST_NO_SFINAE\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/core/noncopyable.hpp",
    "content": "//  Boost noncopyable.hpp header file  --------------------------------------//\n\n//  (C) Copyright Beman Dawes 1999-2003. Distributed under the Boost\n//  Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/libs/utility for documentation.\n\n#ifndef BOOST_CORE_NONCOPYABLE_HPP\n#define BOOST_CORE_NONCOPYABLE_HPP\n\n#include <boost/config.hpp>\n\nnamespace boost {\n\n//  Private copy constructor and copy assignment ensure classes derived from\n//  class noncopyable cannot be copied.\n\n//  Contributed by Dave Abrahams\n\nnamespace noncopyable_  // protection from unintended ADL\n{\n  class noncopyable\n  {\n  protected:\n#if !defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS) && !defined(BOOST_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS)\n      BOOST_CONSTEXPR noncopyable() = default;\n      ~noncopyable() = default;\n#else\n      noncopyable() {}\n      ~noncopyable() {}\n#endif\n#if !defined(BOOST_NO_CXX11_DELETED_FUNCTIONS)\n      noncopyable( const noncopyable& ) = delete;\n      noncopyable& operator=( const noncopyable& ) = delete;\n#else\n  private:  // emphasize the following members are private\n      noncopyable( const noncopyable& );\n      noncopyable& operator=( const noncopyable& );\n#endif\n  };\n}\n\ntypedef noncopyable_::noncopyable noncopyable;\n\n} // namespace boost\n\n#endif  // BOOST_CORE_NONCOPYABLE_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/core/swap.hpp",
    "content": "// Copyright (C) 2007, 2008 Steven Watanabe, Joseph Gauterin, Niels Dekker\n//\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n// For more information, see http://www.boost.org\n\n\n#ifndef BOOST_CORE_SWAP_HPP\n#define BOOST_CORE_SWAP_HPP\n\n// Note: the implementation of this utility contains various workarounds:\n// - swap_impl is put outside the boost namespace, to avoid infinite\n// recursion (causing stack overflow) when swapping objects of a primitive\n// type.\n// - swap_impl has a using-directive, rather than a using-declaration,\n// because some compilers (including MSVC 7.1, Borland 5.9.3, and\n// Intel 8.1) don't do argument-dependent lookup when it has a\n// using-declaration instead.\n// - boost::swap has two template arguments, instead of one, to\n// avoid ambiguity when swapping objects of a Boost type that does\n// not have its own boost::swap overload.\n\n#include <utility> //for std::swap (C++11)\n#include <algorithm> //for std::swap (C++98)\n#include <cstddef> //for std::size_t\n#include <boost/config.hpp>\n\nnamespace boost_swap_impl\n{\n  template<class T>\n  BOOST_GPU_ENABLED\n  void swap_impl(T& left, T& right)\n  {\n    using namespace std;//use std::swap if argument dependent lookup fails\n    swap(left,right);\n  }\n\n  template<class T, std::size_t N>\n  BOOST_GPU_ENABLED\n  void swap_impl(T (& left)[N], T (& right)[N])\n  {\n    for (std::size_t i = 0; i < N; ++i)\n    {\n      ::boost_swap_impl::swap_impl(left[i], right[i]);\n    }\n  }\n}\n\nnamespace boost\n{\n  template<class T1, class T2>\n  BOOST_GPU_ENABLED\n  void swap(T1& left, T2& right)\n  {\n    ::boost_swap_impl::swap_impl(left, right);\n  }\n}\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/cstdint.hpp",
    "content": "//  boost cstdint.hpp header file  ------------------------------------------//\n\n//  (C) Copyright Beman Dawes 1999.\n//  (C) Copyright Jens Mauer 2001\n//  (C) Copyright John Maddock 2001\n//  Distributed under the Boost\n//  Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/libs/integer for documentation.\n\n//  Revision History\n//   31 Oct 01  use BOOST_HAS_LONG_LONG to check for \"long long\" (Jens M.)\n//   16 Apr 01  check LONGLONG_MAX when looking for \"long long\" (Jens Maurer)\n//   23 Jan 01  prefer \"long\" over \"int\" for int32_t and intmax_t (Jens Maurer)\n//   12 Nov 00  Merged <boost/stdint.h> (Jens Maurer)\n//   23 Sep 00  Added INTXX_C macro support (John Maddock).\n//   22 Sep 00  Better 64-bit support (John Maddock)\n//   29 Jun 00  Reimplement to avoid including stdint.h within namespace boost\n//    8 Aug 99  Initial version (Beman Dawes)\n\n\n#ifndef BOOST_CSTDINT_HPP\n#define BOOST_CSTDINT_HPP\n\n//\n// Since we always define the INT#_C macros as per C++0x,\n// define __STDC_CONSTANT_MACROS so that <stdint.h> does the right\n// thing if possible, and so that the user knows that the macros\n// are actually defined as per C99.\n//\n#ifndef __STDC_CONSTANT_MACROS\n#  define __STDC_CONSTANT_MACROS\n#endif\n\n#include <boost/config.hpp>\n\n//\n// Note that GLIBC is a bit inconsistent about whether int64_t is defined or not\n// depending upon what headers happen to have been included first...\n// so we disable use of stdint.h when GLIBC does not define __GLIBC_HAVE_LONG_LONG.\n// See https://svn.boost.org/trac/boost/ticket/3548 and http://sources.redhat.com/bugzilla/show_bug.cgi?id=10990\n//\n#if defined(BOOST_HAS_STDINT_H)\t\t\t\t\t\\\n  && (!defined(__GLIBC__)\t\t\t\t\t\\\n      || defined(__GLIBC_HAVE_LONG_LONG)\t\t\t\\\n      || (defined(__GLIBC__) && ((__GLIBC__ > 2) || ((__GLIBC__ == 2) && (__GLIBC_MINOR__ >= 17)))))\n\n// The following #include is an implementation artifact; not part of interface.\n# ifdef __hpux\n// HP-UX has a vaguely nice <stdint.h> in a non-standard location\n#   include <inttypes.h>\n#   ifdef __STDC_32_MODE__\n      // this is triggered with GCC, because it defines __cplusplus < 199707L\n#     define BOOST_NO_INT64_T\n#   endif\n# elif defined(__FreeBSD__) || defined(__IBMCPP__) || defined(_AIX)\n#   include <inttypes.h>\n# else\n#   include <stdint.h>\n\n// There is a bug in Cygwin two _C macros\n#   if defined(__STDC_CONSTANT_MACROS) && defined(__CYGWIN__)\n#     undef INTMAX_C\n#     undef UINTMAX_C\n#     define INTMAX_C(c) c##LL\n#     define UINTMAX_C(c) c##ULL\n#   endif\n\n# endif\n\n#if defined(__QNX__) && defined(__EXT_QNX) \n\n// QNX (Dinkumware stdlib) defines these as non-standard names.\n// Reflect to the standard names.\n\ntypedef ::intleast8_t int_least8_t;\ntypedef ::intfast8_t int_fast8_t;\ntypedef ::uintleast8_t uint_least8_t;\ntypedef ::uintfast8_t uint_fast8_t;\n\ntypedef ::intleast16_t int_least16_t;\ntypedef ::intfast16_t int_fast16_t;\ntypedef ::uintleast16_t uint_least16_t;\ntypedef ::uintfast16_t uint_fast16_t;\n\ntypedef ::intleast32_t int_least32_t;\ntypedef ::intfast32_t int_fast32_t;\ntypedef ::uintleast32_t uint_least32_t;\ntypedef ::uintfast32_t uint_fast32_t;\n\n# ifndef BOOST_NO_INT64_T\n\ntypedef ::intleast64_t int_least64_t;\ntypedef ::intfast64_t int_fast64_t;\ntypedef ::uintleast64_t uint_least64_t;\ntypedef ::uintfast64_t uint_fast64_t;\n\n# endif\n\n#endif\n\nnamespace boost\n{\n\n  using ::int8_t;\n  using ::int_least8_t;\n  using ::int_fast8_t;\n  using ::uint8_t;\n  using ::uint_least8_t;\n  using ::uint_fast8_t;\n\n  using ::int16_t;\n  using ::int_least16_t;\n  using ::int_fast16_t;\n  using ::uint16_t;\n  using ::uint_least16_t;\n  using ::uint_fast16_t;\n\n  using ::int32_t;\n  using ::int_least32_t;\n  using ::int_fast32_t;\n  using ::uint32_t;\n  using ::uint_least32_t;\n  using ::uint_fast32_t;\n\n# ifndef BOOST_NO_INT64_T\n\n  using ::int64_t;\n  using ::int_least64_t;\n  using ::int_fast64_t;\n  using ::uint64_t;\n  using ::uint_least64_t;\n  using ::uint_fast64_t;\n\n# endif\n\n  using ::intmax_t;\n  using ::uintmax_t;\n\n} // namespace boost\n\n#elif defined(__FreeBSD__) && (__FreeBSD__ <= 4) || defined(__osf__) || defined(__VMS) || defined(__SOLARIS9__) || defined(__NetBSD__)\n// FreeBSD and Tru64 have an <inttypes.h> that contains much of what we need.\n# include <inttypes.h>\n\nnamespace boost {\n\n  using ::int8_t;\n  typedef int8_t int_least8_t;\n  typedef int8_t int_fast8_t;\n  using ::uint8_t;\n  typedef uint8_t uint_least8_t;\n  typedef uint8_t uint_fast8_t;\n\n  using ::int16_t;\n  typedef int16_t int_least16_t;\n  typedef int16_t int_fast16_t;\n  using ::uint16_t;\n  typedef uint16_t uint_least16_t;\n  typedef uint16_t uint_fast16_t;\n\n  using ::int32_t;\n  typedef int32_t int_least32_t;\n  typedef int32_t int_fast32_t;\n  using ::uint32_t;\n  typedef uint32_t uint_least32_t;\n  typedef uint32_t uint_fast32_t;\n\n# ifndef BOOST_NO_INT64_T\n\n  using ::int64_t;\n  typedef int64_t int_least64_t;\n  typedef int64_t int_fast64_t;\n  using ::uint64_t;\n  typedef uint64_t uint_least64_t;\n  typedef uint64_t uint_fast64_t;\n\n  typedef int64_t intmax_t;\n  typedef uint64_t uintmax_t;\n\n# else\n\n  typedef int32_t intmax_t;\n  typedef uint32_t uintmax_t;\n\n# endif\n\n} // namespace boost\n\n#else  // BOOST_HAS_STDINT_H\n\n# include <boost/limits.hpp> // implementation artifact; not part of interface\n# include <limits.h>         // needed for limits macros\n\n\nnamespace boost\n{\n\n//  These are fairly safe guesses for some 16-bit, and most 32-bit and 64-bit\n//  platforms.  For other systems, they will have to be hand tailored.\n//\n//  Because the fast types are assumed to be the same as the undecorated types,\n//  it may be possible to hand tailor a more efficient implementation.  Such\n//  an optimization may be illusionary; on the Intel x86-family 386 on, for\n//  example, byte arithmetic and load/stores are as fast as \"int\" sized ones.\n\n//  8-bit types  ------------------------------------------------------------//\n\n# if UCHAR_MAX == 0xff\n     typedef signed char     int8_t;\n     typedef signed char     int_least8_t;\n     typedef signed char     int_fast8_t;\n     typedef unsigned char   uint8_t;\n     typedef unsigned char   uint_least8_t;\n     typedef unsigned char   uint_fast8_t;\n# else\n#    error defaults not correct; you must hand modify boost/cstdint.hpp\n# endif\n\n//  16-bit types  -----------------------------------------------------------//\n\n# if USHRT_MAX == 0xffff\n#  if defined(__crayx1)\n     // The Cray X1 has a 16-bit short, however it is not recommend\n     // for use in performance critical code.\n     typedef short           int16_t;\n     typedef short           int_least16_t;\n     typedef int             int_fast16_t;\n     typedef unsigned short  uint16_t;\n     typedef unsigned short  uint_least16_t;\n     typedef unsigned int    uint_fast16_t;\n#  else\n     typedef short           int16_t;\n     typedef short           int_least16_t;\n     typedef short           int_fast16_t;\n     typedef unsigned short  uint16_t;\n     typedef unsigned short  uint_least16_t;\n     typedef unsigned short  uint_fast16_t;\n#  endif\n# elif (USHRT_MAX == 0xffffffff) && defined(__MTA__)\n      // On MTA / XMT short is 32 bits unless the -short16 compiler flag is specified\n      // MTA / XMT does support the following non-standard integer types\n      typedef __short16           int16_t;\n      typedef __short16           int_least16_t;\n      typedef __short16           int_fast16_t;\n      typedef unsigned __short16  uint16_t;\n      typedef unsigned __short16  uint_least16_t;\n      typedef unsigned __short16  uint_fast16_t;\n# elif (USHRT_MAX == 0xffffffff) && defined(CRAY)\n     // no 16-bit types on Cray:\n     typedef short           int_least16_t;\n     typedef short           int_fast16_t;\n     typedef unsigned short  uint_least16_t;\n     typedef unsigned short  uint_fast16_t;\n# else\n#    error defaults not correct; you must hand modify boost/cstdint.hpp\n# endif\n\n//  32-bit types  -----------------------------------------------------------//\n\n# if UINT_MAX == 0xffffffff\n     typedef int             int32_t;\n     typedef int             int_least32_t;\n     typedef int             int_fast32_t;\n     typedef unsigned int    uint32_t;\n     typedef unsigned int    uint_least32_t;\n     typedef unsigned int    uint_fast32_t;\n# elif (USHRT_MAX == 0xffffffff)\n     typedef short             int32_t;\n     typedef short             int_least32_t;\n     typedef short             int_fast32_t;\n     typedef unsigned short    uint32_t;\n     typedef unsigned short    uint_least32_t;\n     typedef unsigned short    uint_fast32_t;\n# elif ULONG_MAX == 0xffffffff\n     typedef long            int32_t;\n     typedef long            int_least32_t;\n     typedef long            int_fast32_t;\n     typedef unsigned long   uint32_t;\n     typedef unsigned long   uint_least32_t;\n     typedef unsigned long   uint_fast32_t;\n# elif (UINT_MAX == 0xffffffffffffffff) && defined(__MTA__)\n      // Integers are 64 bits on the MTA / XMT\n      typedef __int32           int32_t;\n      typedef __int32           int_least32_t;\n      typedef __int32           int_fast32_t;\n      typedef unsigned __int32  uint32_t;\n      typedef unsigned __int32  uint_least32_t;\n      typedef unsigned __int32  uint_fast32_t;\n# else\n#    error defaults not correct; you must hand modify boost/cstdint.hpp\n# endif\n\n//  64-bit types + intmax_t and uintmax_t  ----------------------------------//\n\n# if defined(BOOST_HAS_LONG_LONG) && \\\n   !defined(BOOST_MSVC) && !defined(__BORLANDC__) && \\\n   (!defined(__GLIBCPP__) || defined(_GLIBCPP_USE_LONG_LONG)) && \\\n   (defined(ULLONG_MAX) || defined(ULONG_LONG_MAX) || defined(ULONGLONG_MAX))\n#    if defined(__hpux)\n     // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions\n#    elif (defined(ULLONG_MAX) && ULLONG_MAX == 18446744073709551615ULL) || (defined(ULONG_LONG_MAX) && ULONG_LONG_MAX == 18446744073709551615ULL) || (defined(ULONGLONG_MAX) && ULONGLONG_MAX == 18446744073709551615ULL)\n                                                                 // 2**64 - 1\n#    else\n#       error defaults not correct; you must hand modify boost/cstdint.hpp\n#    endif\n\n     typedef  ::boost::long_long_type            intmax_t;\n     typedef  ::boost::ulong_long_type   uintmax_t;\n     typedef  ::boost::long_long_type            int64_t;\n     typedef  ::boost::long_long_type            int_least64_t;\n     typedef  ::boost::long_long_type            int_fast64_t;\n     typedef  ::boost::ulong_long_type   uint64_t;\n     typedef  ::boost::ulong_long_type   uint_least64_t;\n     typedef  ::boost::ulong_long_type   uint_fast64_t;\n\n# elif ULONG_MAX != 0xffffffff\n\n#    if ULONG_MAX == 18446744073709551615 // 2**64 - 1\n     typedef long                 intmax_t;\n     typedef unsigned long        uintmax_t;\n     typedef long                 int64_t;\n     typedef long                 int_least64_t;\n     typedef long                 int_fast64_t;\n     typedef unsigned long        uint64_t;\n     typedef unsigned long        uint_least64_t;\n     typedef unsigned long        uint_fast64_t;\n#    else\n#       error defaults not correct; you must hand modify boost/cstdint.hpp\n#    endif\n# elif defined(__GNUC__) && defined(BOOST_HAS_LONG_LONG)\n     __extension__ typedef long long            intmax_t;\n     __extension__ typedef unsigned long long   uintmax_t;\n     __extension__ typedef long long            int64_t;\n     __extension__ typedef long long            int_least64_t;\n     __extension__ typedef long long            int_fast64_t;\n     __extension__ typedef unsigned long long   uint64_t;\n     __extension__ typedef unsigned long long   uint_least64_t;\n     __extension__ typedef unsigned long long   uint_fast64_t;\n# elif defined(BOOST_HAS_MS_INT64)\n     //\n     // we have Borland/Intel/Microsoft __int64:\n     //\n     typedef __int64             intmax_t;\n     typedef unsigned __int64    uintmax_t;\n     typedef __int64             int64_t;\n     typedef __int64             int_least64_t;\n     typedef __int64             int_fast64_t;\n     typedef unsigned __int64    uint64_t;\n     typedef unsigned __int64    uint_least64_t;\n     typedef unsigned __int64    uint_fast64_t;\n# else // assume no 64-bit integers\n#  define BOOST_NO_INT64_T\n     typedef int32_t              intmax_t;\n     typedef uint32_t             uintmax_t;\n# endif\n\n} // namespace boost\n\n\n#endif // BOOST_HAS_STDINT_H\n\n// intptr_t/uintptr_t are defined separately because they are optional and not universally available\n#if defined(BOOST_WINDOWS) && !defined(_WIN32_WCE) && !defined(BOOST_HAS_STDINT_H)\n// Older MSVC don't have stdint.h and have intptr_t/uintptr_t defined in stddef.h\n#include <stddef.h>\n#endif\n\n// PGI seems to not support intptr_t/uintptr_t properly. BOOST_HAS_STDINT_H is not defined for this compiler by Boost.Config.\n#if !defined(__PGIC__)\n\n#if (defined(BOOST_WINDOWS) && !defined(_WIN32_WCE)) \\\n    || (defined(_XOPEN_UNIX) && (_XOPEN_UNIX+0 > 0) && !defined(__UCLIBC__)) \\\n    || defined(__CYGWIN__) \\\n    || defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__) \\\n    || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(sun)\n\nnamespace boost {\n    using ::intptr_t;\n    using ::uintptr_t;\n}\n#define BOOST_HAS_INTPTR_T\n\n// Clang pretends to be GCC, so it'll match this condition\n#elif defined(__GNUC__) && defined(__INTPTR_TYPE__) && defined(__UINTPTR_TYPE__)\n\nnamespace boost {\n    typedef __INTPTR_TYPE__ intptr_t;\n    typedef __UINTPTR_TYPE__ uintptr_t;\n}\n#define BOOST_HAS_INTPTR_T\n\n#endif\n\n#endif // !defined(__PGIC__)\n\n#endif // BOOST_CSTDINT_HPP\n\n\n/****************************************************\n\nMacro definition section:\n\nAdded 23rd September 2000 (John Maddock).\nModified 11th September 2001 to be excluded when\nBOOST_HAS_STDINT_H is defined (John Maddock).\nModified 11th Dec 2009 to always define the\nINT#_C macros if they're not already defined (John Maddock).\n\n******************************************************/\n\n#if !defined(BOOST__STDC_CONSTANT_MACROS_DEFINED) && \\\n   (!defined(INT8_C) || !defined(INT16_C) || !defined(INT32_C) || !defined(INT64_C))\n//\n// For the following code we get several warnings along the lines of:\n//\n// boost/cstdint.hpp:428:35: error: use of C99 long long integer constant\n//\n// So we declare this a system header to suppress these warnings.\n//\n#if defined(__GNUC__) && (__GNUC__ >= 4)\n#pragma GCC system_header\n#endif\n\n#include <limits.h>\n# define BOOST__STDC_CONSTANT_MACROS_DEFINED\n# if defined(BOOST_HAS_MS_INT64)\n//\n// Borland/Intel/Microsoft compilers have width specific suffixes:\n//\n#ifndef INT8_C\n#  define INT8_C(value)     value##i8\n#endif\n#ifndef INT16_C\n#  define INT16_C(value)    value##i16\n#endif\n#ifndef INT32_C\n#  define INT32_C(value)    value##i32\n#endif\n#ifndef INT64_C\n#  define INT64_C(value)    value##i64\n#endif\n#  ifdef __BORLANDC__\n    // Borland bug: appending ui8 makes the type a signed char\n#   define UINT8_C(value)    static_cast<unsigned char>(value##u)\n#  else\n#   define UINT8_C(value)    value##ui8\n#  endif\n#ifndef UINT16_C\n#  define UINT16_C(value)   value##ui16\n#endif\n#ifndef UINT32_C\n#  define UINT32_C(value)   value##ui32\n#endif\n#ifndef UINT64_C\n#  define UINT64_C(value)   value##ui64\n#endif\n#ifndef INTMAX_C\n#  define INTMAX_C(value)   value##i64\n#  define UINTMAX_C(value)  value##ui64\n#endif\n\n# else\n//  do it the old fashioned way:\n\n//  8-bit types  ------------------------------------------------------------//\n\n#  if (UCHAR_MAX == 0xff) && !defined(INT8_C)\n#   define INT8_C(value) static_cast<boost::int8_t>(value)\n#   define UINT8_C(value) static_cast<boost::uint8_t>(value##u)\n#  endif\n\n//  16-bit types  -----------------------------------------------------------//\n\n#  if (USHRT_MAX == 0xffff) && !defined(INT16_C)\n#   define INT16_C(value) static_cast<boost::int16_t>(value)\n#   define UINT16_C(value) static_cast<boost::uint16_t>(value##u)\n#  endif\n\n//  32-bit types  -----------------------------------------------------------//\n#ifndef INT32_C\n#  if (UINT_MAX == 0xffffffff)\n#   define INT32_C(value) value\n#   define UINT32_C(value) value##u\n#  elif ULONG_MAX == 0xffffffff\n#   define INT32_C(value) value##L\n#   define UINT32_C(value) value##uL\n#  endif\n#endif\n\n//  64-bit types + intmax_t and uintmax_t  ----------------------------------//\n#ifndef INT64_C\n#  if defined(BOOST_HAS_LONG_LONG) && \\\n    (defined(ULLONG_MAX) || defined(ULONG_LONG_MAX) || defined(ULONGLONG_MAX) || defined(_ULLONG_MAX) || defined(_LLONG_MAX))\n\n#    if defined(__hpux)\n        // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions\n#       define INT64_C(value) value##LL\n#       define UINT64_C(value) value##uLL\n#    elif (defined(ULLONG_MAX) && ULLONG_MAX == 18446744073709551615ULL) ||  \\\n        (defined(ULONG_LONG_MAX) && ULONG_LONG_MAX == 18446744073709551615ULL) ||  \\\n        (defined(ULONGLONG_MAX) && ULONGLONG_MAX == 18446744073709551615ULL) || \\\n        (defined(_ULLONG_MAX) && _ULLONG_MAX == 18446744073709551615ULL) || \\\n        (defined(_LLONG_MAX) && _LLONG_MAX == 9223372036854775807LL)\n\n#       define INT64_C(value) value##LL\n#       define UINT64_C(value) value##uLL\n#    else\n#       error defaults not correct; you must hand modify boost/cstdint.hpp\n#    endif\n#  elif ULONG_MAX != 0xffffffff\n\n#    if ULONG_MAX == 18446744073709551615U // 2**64 - 1\n#       define INT64_C(value) value##L\n#       define UINT64_C(value) value##uL\n#    else\n#       error defaults not correct; you must hand modify boost/cstdint.hpp\n#    endif\n#  elif defined(BOOST_HAS_LONG_LONG)\n     // Usual macros not defined, work things out for ourselves:\n#    if(~0uLL == 18446744073709551615ULL)\n#       define INT64_C(value) value##LL\n#       define UINT64_C(value) value##uLL\n#    else\n#       error defaults not correct; you must hand modify boost/cstdint.hpp\n#    endif\n#  else\n#    error defaults not correct; you must hand modify boost/cstdint.hpp\n#  endif\n\n#  ifdef BOOST_NO_INT64_T\n#   define INTMAX_C(value) INT32_C(value)\n#   define UINTMAX_C(value) UINT32_C(value)\n#  else\n#   define INTMAX_C(value) INT64_C(value)\n#   define UINTMAX_C(value) UINT64_C(value)\n#  endif\n#endif\n# endif // Borland/Microsoft specific width suffixes\n\n#endif // INT#_C macros.\n\n\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/current_function.hpp",
    "content": "#ifndef BOOST_CURRENT_FUNCTION_HPP_INCLUDED\n#define BOOST_CURRENT_FUNCTION_HPP_INCLUDED\n\n// MS compatible compilers support #pragma once\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1020)\n# pragma once\n#endif\n\n//\n//  boost/current_function.hpp - BOOST_CURRENT_FUNCTION\n//\n//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt\n//\n//  http://www.boost.org/libs/assert/current_function.html\n//\n\nnamespace boost\n{\n\nnamespace detail\n{\n\ninline void current_function_helper()\n{\n\n#if defined(__GNUC__) || (defined(__MWERKS__) && (__MWERKS__ >= 0x3000)) || (defined(__ICC) && (__ICC >= 600)) || defined(__ghs__)\n\n# define BOOST_CURRENT_FUNCTION __PRETTY_FUNCTION__\n\n#elif defined(__DMC__) && (__DMC__ >= 0x810)\n\n# define BOOST_CURRENT_FUNCTION __PRETTY_FUNCTION__\n\n#elif defined(__FUNCSIG__)\n\n# define BOOST_CURRENT_FUNCTION __FUNCSIG__\n\n#elif (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 600)) || (defined(__IBMCPP__) && (__IBMCPP__ >= 500))\n\n# define BOOST_CURRENT_FUNCTION __FUNCTION__\n\n#elif defined(__BORLANDC__) && (__BORLANDC__ >= 0x550)\n\n# define BOOST_CURRENT_FUNCTION __FUNC__\n\n#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)\n\n# define BOOST_CURRENT_FUNCTION __func__\n\n#elif defined(__cplusplus) && (__cplusplus >= 201103)\n\n# define BOOST_CURRENT_FUNCTION __func__\n\n#else\n\n# define BOOST_CURRENT_FUNCTION \"(unknown)\"\n\n#endif\n\n}\n\n} // namespace detail\n\n} // namespace boost\n\n#endif // #ifndef BOOST_CURRENT_FUNCTION_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/detail/is_xxx.hpp",
    "content": "// Copyright David Abrahams 2005. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_DETAIL_IS_XXX_DWA20051011_HPP\n# define BOOST_DETAIL_IS_XXX_DWA20051011_HPP\n\n# include <boost/config.hpp>\n# include <boost/mpl/bool.hpp>\n# include <boost/preprocessor/enum_params.hpp>\n\n\n#  define BOOST_DETAIL_IS_XXX_DEF(name, qualified_name, nargs)  \\\ntemplate <class T>                                              \\\nstruct is_##name : mpl::false_                                  \\\n{                                                               \\\n};                                                              \\\n                                                                \\\ntemplate < BOOST_PP_ENUM_PARAMS_Z(1, nargs, class T) >          \\\nstruct is_##name<                                               \\\n   qualified_name< BOOST_PP_ENUM_PARAMS_Z(1, nargs, T) >        \\\n>                                                               \\\n   : mpl::true_                                                 \\\n{                                                               \\\n};\n\n\n#endif // BOOST_DETAIL_IS_XXX_DWA20051011_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/detail/iterator.hpp",
    "content": "// (C) Copyright David Abrahams 2002.\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef ITERATOR_DWA122600_HPP_\n#define ITERATOR_DWA122600_HPP_\n\n// This header is obsolete and will be deprecated.\n\n#include <iterator>\n\nnamespace boost\n{\n\nnamespace detail\n{\n\nusing std::iterator_traits;\nusing std::distance;\n\n} // namespace detail\n\n} // namespace boost\n\n#endif // ITERATOR_DWA122600_HPP_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/detail/workaround.hpp",
    "content": "// Copyright David Abrahams 2002.\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n#ifndef WORKAROUND_DWA2002126_HPP\n# define WORKAROUND_DWA2002126_HPP\n\n// Compiler/library version workaround macro\n//\n// Usage:\n//\n//     #if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n//        // workaround for eVC4 and VC6\n//        ... // workaround code here\n//     #endif\n//\n// When BOOST_STRICT_CONFIG is defined, expands to 0. Otherwise, the\n// first argument must be undefined or expand to a numeric\n// value. The above expands to:\n//\n//     (BOOST_MSVC) != 0 && (BOOST_MSVC) < 1300\n//\n// When used for workarounds that apply to the latest known version \n// and all earlier versions of a compiler, the following convention \n// should be observed:\n//\n//     #if BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1301))\n//\n// The version number in this case corresponds to the last version in\n// which the workaround was known to have been required. When\n// BOOST_DETECT_OUTDATED_WORKAROUNDS is not the defined, the macro\n// BOOST_TESTED_AT(x) expands to \"!= 0\", which effectively activates\n// the workaround for any version of the compiler. When\n// BOOST_DETECT_OUTDATED_WORKAROUNDS is defined, a compiler warning or\n// error will be issued if the compiler version exceeds the argument\n// to BOOST_TESTED_AT().  This can be used to locate workarounds which\n// may be obsoleted by newer versions.\n\n# ifndef BOOST_STRICT_CONFIG\n\n#include <boost/config.hpp>\n\n#ifndef __BORLANDC__\n#define __BORLANDC___WORKAROUND_GUARD 1\n#else\n#define __BORLANDC___WORKAROUND_GUARD 0\n#endif\n#ifndef __CODEGEARC__\n#define __CODEGEARC___WORKAROUND_GUARD 1\n#else\n#define __CODEGEARC___WORKAROUND_GUARD 0\n#endif\n#ifndef _MSC_VER\n#define _MSC_VER_WORKAROUND_GUARD 1\n#else\n#define _MSC_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef _MSC_FULL_VER\n#define _MSC_FULL_VER_WORKAROUND_GUARD 1\n#else\n#define _MSC_FULL_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_MSVC\n#define BOOST_MSVC_WORKAROUND_GUARD 1\n#else\n#define BOOST_MSVC_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_MSVC_FULL_VER\n#define BOOST_MSVC_FULL_VER_WORKAROUND_GUARD 1\n#else\n#define BOOST_MSVC_FULL_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef __GNUC__\n#define __GNUC___WORKAROUND_GUARD 1\n#else\n#define __GNUC___WORKAROUND_GUARD 0\n#endif\n#ifndef __GNUC_MINOR__\n#define __GNUC_MINOR___WORKAROUND_GUARD 1\n#else\n#define __GNUC_MINOR___WORKAROUND_GUARD 0\n#endif\n#ifndef __GNUC_PATCHLEVEL__\n#define __GNUC_PATCHLEVEL___WORKAROUND_GUARD 1\n#else\n#define __GNUC_PATCHLEVEL___WORKAROUND_GUARD 0\n#endif\n#ifndef __IBMCPP__\n#define __IBMCPP___WORKAROUND_GUARD 1\n#else\n#define __IBMCPP___WORKAROUND_GUARD 0\n#endif\n#ifndef __SUNPRO_CC\n#define __SUNPRO_CC_WORKAROUND_GUARD 1\n#else\n#define __SUNPRO_CC_WORKAROUND_GUARD 0\n#endif\n#ifndef __DECCXX_VER\n#define __DECCXX_VER_WORKAROUND_GUARD 1\n#else\n#define __DECCXX_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef __MWERKS__\n#define __MWERKS___WORKAROUND_GUARD 1\n#else\n#define __MWERKS___WORKAROUND_GUARD 0\n#endif\n#ifndef __EDG__\n#define __EDG___WORKAROUND_GUARD 1\n#else\n#define __EDG___WORKAROUND_GUARD 0\n#endif\n#ifndef __EDG_VERSION__\n#define __EDG_VERSION___WORKAROUND_GUARD 1\n#else\n#define __EDG_VERSION___WORKAROUND_GUARD 0\n#endif\n#ifndef __HP_aCC\n#define __HP_aCC_WORKAROUND_GUARD 1\n#else\n#define __HP_aCC_WORKAROUND_GUARD 0\n#endif\n#ifndef __hpxstd98\n#define __hpxstd98_WORKAROUND_GUARD 1\n#else\n#define __hpxstd98_WORKAROUND_GUARD 0\n#endif\n#ifndef _CRAYC\n#define _CRAYC_WORKAROUND_GUARD 1\n#else\n#define _CRAYC_WORKAROUND_GUARD 0\n#endif\n#ifndef __DMC__\n#define __DMC___WORKAROUND_GUARD 1\n#else\n#define __DMC___WORKAROUND_GUARD 0\n#endif\n#ifndef MPW_CPLUS\n#define MPW_CPLUS_WORKAROUND_GUARD 1\n#else\n#define MPW_CPLUS_WORKAROUND_GUARD 0\n#endif\n#ifndef __COMO__\n#define __COMO___WORKAROUND_GUARD 1\n#else\n#define __COMO___WORKAROUND_GUARD 0\n#endif\n#ifndef __COMO_VERSION__\n#define __COMO_VERSION___WORKAROUND_GUARD 1\n#else\n#define __COMO_VERSION___WORKAROUND_GUARD 0\n#endif\n#ifndef __INTEL_COMPILER\n#define __INTEL_COMPILER_WORKAROUND_GUARD 1\n#else\n#define __INTEL_COMPILER_WORKAROUND_GUARD 0\n#endif\n#ifndef __ICL\n#define __ICL_WORKAROUND_GUARD 1\n#else\n#define __ICL_WORKAROUND_GUARD 0\n#endif\n#ifndef _COMPILER_VERSION\n#define _COMPILER_VERSION_WORKAROUND_GUARD 1\n#else\n#define _COMPILER_VERSION_WORKAROUND_GUARD 0\n#endif\n\n#ifndef _RWSTD_VER\n#define _RWSTD_VER_WORKAROUND_GUARD 1\n#else\n#define _RWSTD_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_RWSTD_VER\n#define BOOST_RWSTD_VER_WORKAROUND_GUARD 1\n#else\n#define BOOST_RWSTD_VER_WORKAROUND_GUARD 0\n#endif\n#ifndef __GLIBCPP__\n#define __GLIBCPP___WORKAROUND_GUARD 1\n#else\n#define __GLIBCPP___WORKAROUND_GUARD 0\n#endif\n#ifndef _GLIBCXX_USE_C99_FP_MACROS_DYNAMIC\n#define _GLIBCXX_USE_C99_FP_MACROS_DYNAMIC_WORKAROUND_GUARD 1\n#else\n#define _GLIBCXX_USE_C99_FP_MACROS_DYNAMIC_WORKAROUND_GUARD 0\n#endif\n#ifndef __SGI_STL_PORT\n#define __SGI_STL_PORT_WORKAROUND_GUARD 1\n#else\n#define __SGI_STL_PORT_WORKAROUND_GUARD 0\n#endif\n#ifndef _STLPORT_VERSION\n#define _STLPORT_VERSION_WORKAROUND_GUARD 1\n#else\n#define _STLPORT_VERSION_WORKAROUND_GUARD 0\n#endif\n#ifndef __LIBCOMO_VERSION__\n#define __LIBCOMO_VERSION___WORKAROUND_GUARD 1\n#else\n#define __LIBCOMO_VERSION___WORKAROUND_GUARD 0\n#endif\n#ifndef _CPPLIB_VER\n#define _CPPLIB_VER_WORKAROUND_GUARD 1\n#else\n#define _CPPLIB_VER_WORKAROUND_GUARD 0\n#endif\n\n#ifndef BOOST_INTEL_CXX_VERSION\n#define BOOST_INTEL_CXX_VERSION_WORKAROUND_GUARD 1\n#else\n#define BOOST_INTEL_CXX_VERSION_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_INTEL_WIN\n#define BOOST_INTEL_WIN_WORKAROUND_GUARD 1\n#else\n#define BOOST_INTEL_WIN_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_DINKUMWARE_STDLIB\n#define BOOST_DINKUMWARE_STDLIB_WORKAROUND_GUARD 1\n#else\n#define BOOST_DINKUMWARE_STDLIB_WORKAROUND_GUARD 0\n#endif\n#ifndef BOOST_INTEL\n#define BOOST_INTEL_WORKAROUND_GUARD 1\n#else\n#define BOOST_INTEL_WORKAROUND_GUARD 0\n#endif\n// Always define to zero, if it's used it'll be defined my MPL:\n#define BOOST_MPL_CFG_GCC_WORKAROUND_GUARD 0\n\n#  define BOOST_WORKAROUND(symbol, test)                \\\n         ((symbol ## _WORKAROUND_GUARD + 0 == 0) &&     \\\n         (symbol != 0) && (1 % (( (symbol test) ) + 1)))\n//                              ^ ^           ^ ^\n// The extra level of parenthesis nesting above, along with the\n// BOOST_OPEN_PAREN indirection below, is required to satisfy the\n// broken preprocessor in MWCW 8.3 and earlier.\n//\n// The basic mechanism works as follows:\n//      (symbol test) + 1        =>   if (symbol test) then 2 else 1\n//      1 % ((symbol test) + 1)  =>   if (symbol test) then 1 else 0\n//\n// The complication with % is for cooperation with BOOST_TESTED_AT().\n// When \"test\" is BOOST_TESTED_AT(x) and\n// BOOST_DETECT_OUTDATED_WORKAROUNDS is #defined,\n//\n//      symbol test              =>   if (symbol <= x) then 1 else -1\n//      (symbol test) + 1        =>   if (symbol <= x) then 2 else 0\n//      1 % ((symbol test) + 1)  =>   if (symbol <= x) then 1 else divide-by-zero\n//\n\n#  ifdef BOOST_DETECT_OUTDATED_WORKAROUNDS\n#   define BOOST_OPEN_PAREN (\n#   define BOOST_TESTED_AT(value)  > value) ?(-1): BOOST_OPEN_PAREN 1\n#  else\n#   define BOOST_TESTED_AT(value) != ((value)-(value))\n#  endif\n\n# else\n\n#  define BOOST_WORKAROUND(symbol, test) 0\n\n# endif \n\n#endif // WORKAROUND_DWA2002126_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/exception/exception.hpp",
    "content": "//Copyright (c) 2006-2009 Emil Dotchevski and Reverge Studios, Inc.\n\n//Distributed under the Boost Software License, Version 1.0. (See accompanying\n//file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef UUID_274DA366004E11DCB1DDFE2E56D89593\n#define UUID_274DA366004E11DCB1DDFE2E56D89593\n#if (__GNUC__*100+__GNUC_MINOR__>301) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma GCC system_header\n#endif\n#if defined(_MSC_VER) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma warning(push,1)\n#endif\n\nnamespace\nboost\n    {\n    namespace\n    exception_detail\n        {\n        template <class T>\n        class\n        refcount_ptr\n            {\n            public:\n\n            refcount_ptr():\n                px_(0)\n                {\n                }\n\n            ~refcount_ptr()\n                {\n                release();\n                }\n\n            refcount_ptr( refcount_ptr const & x ):\n                px_(x.px_)\n                {\n                add_ref();\n                }\n\n            refcount_ptr &\n            operator=( refcount_ptr const & x )\n                {\n                adopt(x.px_);\n                return *this;\n                }\n\n            void\n            adopt( T * px )\n                {\n                release();\n                px_=px;\n                add_ref();\n                }\n\n            T *\n            get() const\n                {\n                return px_;\n                }\n\n            private:\n\n            T * px_;\n\n            void\n            add_ref()\n                {\n                if( px_ )\n                    px_->add_ref();\n                }\n\n            void\n            release()\n                {\n                if( px_ && px_->release() )\n                    px_=0;\n                }\n            };\n        }\n\n    ////////////////////////////////////////////////////////////////////////\n\n    template <class Tag,class T>\n    class error_info;\n\n    typedef error_info<struct throw_function_,char const *> throw_function;\n    typedef error_info<struct throw_file_,char const *> throw_file;\n    typedef error_info<struct throw_line_,int> throw_line;\n\n    template <>\n    class\n    error_info<throw_function_,char const *>\n        {\n        public:\n        typedef char const * value_type;\n        value_type v_;\n        explicit\n        error_info( value_type v ):\n            v_(v)\n            {\n            }\n        };\n\n    template <>\n    class\n    error_info<throw_file_,char const *>\n        {\n        public:\n        typedef char const * value_type;\n        value_type v_;\n        explicit\n        error_info( value_type v ):\n            v_(v)\n            {\n            }\n        };\n\n    template <>\n    class\n    error_info<throw_line_,int>\n        {\n        public:\n        typedef int value_type;\n        value_type v_;\n        explicit\n        error_info( value_type v ):\n            v_(v)\n            {\n            }\n        };\n\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility push (default)\n# endif\n#endif\n    class exception;\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility pop\n# endif\n#endif\n\n    template <class T>\n    class shared_ptr;\n\n    namespace\n    exception_detail\n        {\n        class error_info_base;\n        struct type_info_;\n\n        struct\n        error_info_container\n            {\n            virtual char const * diagnostic_information( char const * ) const = 0;\n            virtual shared_ptr<error_info_base> get( type_info_ const & ) const = 0;\n            virtual void set( shared_ptr<error_info_base> const &, type_info_ const & ) = 0;\n            virtual void add_ref() const = 0;\n            virtual bool release() const = 0;\n            virtual refcount_ptr<exception_detail::error_info_container> clone() const = 0;\n\n            protected:\n\n            ~error_info_container() throw()\n                {\n                }\n            };\n\n        template <class>\n        struct get_info;\n\n        template <>\n        struct get_info<throw_function>;\n\n        template <>\n        struct get_info<throw_file>;\n\n        template <>\n        struct get_info<throw_line>;\n\n        char const * get_diagnostic_information( exception const &, char const * );\n\n        void copy_boost_exception( exception *, exception const * );\n\n        template <class E,class Tag,class T>\n        E const & set_info( E const &, error_info<Tag,T> const & );\n\n        template <class E>\n        E const & set_info( E const &, throw_function const & );\n\n        template <class E>\n        E const & set_info( E const &, throw_file const & );\n\n        template <class E>\n        E const & set_info( E const &, throw_line const & );\n        }\n\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility push (default)\n# endif\n#endif\n    class\n    exception\n        {\n        //<N3757>\n        public:\n        template <class Tag> void set( typename Tag::type const & );\n        template <class Tag> typename Tag::type const * get() const;\n        //</N3757>\n\n        protected:\n\n        exception():\n            throw_function_(0),\n            throw_file_(0),\n            throw_line_(-1)\n            {\n            }\n\n#ifdef __HP_aCC\n        //On HP aCC, this protected copy constructor prevents throwing boost::exception.\n        //On all other platforms, the same effect is achieved by the pure virtual destructor.\n        exception( exception const & x ) throw():\n            data_(x.data_),\n            throw_function_(x.throw_function_),\n            throw_file_(x.throw_file_),\n            throw_line_(x.throw_line_)\n            {\n            }\n#endif\n\n        virtual ~exception() throw()\n#ifndef __HP_aCC\n            = 0 //Workaround for HP aCC, =0 incorrectly leads to link errors.\n#endif\n            ;\n\n#if (defined(__MWERKS__) && __MWERKS__<=0x3207) || (defined(_MSC_VER) && _MSC_VER<=1310)\n        public:\n#else\n        private:\n\n        template <class E>\n        friend E const & exception_detail::set_info( E const &, throw_function const & );\n\n        template <class E>\n        friend E const & exception_detail::set_info( E const &, throw_file const & );\n\n        template <class E>\n        friend E const & exception_detail::set_info( E const &, throw_line const & );\n\n        template <class E,class Tag,class T>\n        friend E const & exception_detail::set_info( E const &, error_info<Tag,T> const & );\n\n        friend char const * exception_detail::get_diagnostic_information( exception const &, char const * );\n\n        template <class>\n        friend struct exception_detail::get_info;\n        friend struct exception_detail::get_info<throw_function>;\n        friend struct exception_detail::get_info<throw_file>;\n        friend struct exception_detail::get_info<throw_line>;\n        friend void exception_detail::copy_boost_exception( exception *, exception const * );\n#endif\n        mutable exception_detail::refcount_ptr<exception_detail::error_info_container> data_;\n        mutable char const * throw_function_;\n        mutable char const * throw_file_;\n        mutable int throw_line_;\n        };\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility pop\n# endif\n#endif\n\n    inline\n    exception::\n    ~exception() throw()\n        {\n        }\n\n    namespace\n    exception_detail\n        {\n        template <class E>\n        E const &\n        set_info( E const & x, throw_function const & y )\n            {\n            x.throw_function_=y.v_;\n            return x;\n            }\n\n        template <class E>\n        E const &\n        set_info( E const & x, throw_file const & y )\n            {\n            x.throw_file_=y.v_;\n            return x;\n            }\n\n        template <class E>\n        E const &\n        set_info( E const & x, throw_line const & y )\n            {\n            x.throw_line_=y.v_;\n            return x;\n            }\n        }\n\n    ////////////////////////////////////////////////////////////////////////\n\n    namespace\n    exception_detail\n        {\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility push (default)\n# endif\n#endif\n        template <class T>\n        struct\n        error_info_injector:\n            public T,\n            public exception\n            {\n            explicit\n            error_info_injector( T const & x ):\n                T(x)\n                {\n                }\n\n            ~error_info_injector() throw()\n                {\n                }\n            };\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility pop\n# endif\n#endif\n\n        struct large_size { char c[256]; };\n        large_size dispatch_boost_exception( exception const * );\n\n        struct small_size { };\n        small_size dispatch_boost_exception( void const * );\n\n        template <class,int>\n        struct enable_error_info_helper;\n\n        template <class T>\n        struct\n        enable_error_info_helper<T,sizeof(large_size)>\n            {\n            typedef T type;\n            };\n\n        template <class T>\n        struct\n        enable_error_info_helper<T,sizeof(small_size)>\n            {\n            typedef error_info_injector<T> type;\n            };\n\n        template <class T>\n        struct\n        enable_error_info_return_type\n            {\n            typedef typename enable_error_info_helper<T,sizeof(exception_detail::dispatch_boost_exception(static_cast<T *>(0)))>::type type;\n            };\n        }\n\n    template <class T>\n    inline\n    typename\n    exception_detail::enable_error_info_return_type<T>::type\n    enable_error_info( T const & x )\n        {\n        typedef typename exception_detail::enable_error_info_return_type<T>::type rt;\n        return rt(x);\n        }\n\n    ////////////////////////////////////////////////////////////////////////\n\n    namespace\n    exception_detail\n        {\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility push (default)\n# endif\n#endif\n        class\n        clone_base\n            {\n            public:\n\n            virtual clone_base const * clone() const = 0;\n            virtual void rethrow() const = 0;\n\n            virtual\n            ~clone_base() throw()\n                {\n                }\n            };\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility pop\n# endif\n#endif\n\n        inline\n        void\n        copy_boost_exception( exception * a, exception const * b )\n            {\n            refcount_ptr<error_info_container> data;\n            if( error_info_container * d=b->data_.get() )\n                data = d->clone();\n            a->throw_file_ = b->throw_file_;\n            a->throw_line_ = b->throw_line_;\n            a->throw_function_ = b->throw_function_;\n            a->data_ = data;\n            }\n\n        inline\n        void\n        copy_boost_exception( void *, void const * )\n            {\n            }\n\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility push (default)\n# endif\n#endif\n        template <class T>\n        class\n        clone_impl:\n            public T,\n            public virtual clone_base\n            {\n            struct clone_tag { };\n            clone_impl( clone_impl const & x, clone_tag ):\n                T(x)\n                {\n                copy_boost_exception(this,&x);\n                }\n\n            public:\n\n            explicit\n            clone_impl( T const & x ):\n                T(x)\n                {\n                copy_boost_exception(this,&x);\n                }\n\n            ~clone_impl() throw()\n                {\n                }\n\n            private:\n\n            clone_base const *\n            clone() const\n                {\n                return new clone_impl(*this,clone_tag());\n                }\n\n            void\n            rethrow() const\n                {\n                throw*this;\n                }\n            };\n        }\n#if defined(__GNUC__)\n# if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)\n#  pragma GCC visibility pop\n# endif\n#endif\n\n    template <class T>\n    inline\n    exception_detail::clone_impl<T>\n    enable_current_exception( T const & x )\n        {\n        return exception_detail::clone_impl<T>(x);\n        }\n    }\n\n#if defined(_MSC_VER) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma warning(pop)\n#endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/functional/hash/hash_fwd.hpp",
    "content": "\n// Copyright 2005-2009 Daniel James.\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  Based on Peter Dimov's proposal\n//  http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf\n//  issue 6.18. \n\n#if !defined(BOOST_FUNCTIONAL_HASH_FWD_HPP)\n#define BOOST_FUNCTIONAL_HASH_FWD_HPP\n\n#include <boost/config.hpp>\n#if defined(BOOST_HAS_PRAGMA_ONCE)\n#pragma once\n#endif\n\n#include <cstddef>\n#include <boost/detail/workaround.hpp>\n\nnamespace boost\n{\n    template <class T> struct hash;\n\n    template <class T> void hash_combine(std::size_t& seed, T const& v);\n\n    template <class It> std::size_t hash_range(It, It);\n    template <class It> void hash_range(std::size_t&, It, It);\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x551))\n    template <class T> inline std::size_t hash_range(T*, T*);\n    template <class T> inline void hash_range(std::size_t&, T*, T*);\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/functional/hash_fwd.hpp",
    "content": "\n// Copyright 2005-2009 Daniel James.\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#include <boost/config.hpp>\n#if defined(BOOST_HAS_PRAGMA_ONCE)\n#pragma once\n#endif\n\n#include <boost/functional/hash/hash_fwd.hpp>\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/limits.hpp",
    "content": "\n//  (C) Copyright John maddock 1999. \n//  (C) David Abrahams 2002.  Distributed under the Boost\n//  Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// use this header as a workaround for missing <limits>\n\n//  See http://www.boost.org/libs/compatibility/index.html for documentation.\n\n#ifndef BOOST_LIMITS\n#define BOOST_LIMITS\n\n#include <boost/config.hpp>\n\n#ifdef BOOST_NO_LIMITS\n#  error \"There is no std::numeric_limits suppport available.\"\n#else\n# include <limits>\n#endif\n\n#if (defined(BOOST_HAS_LONG_LONG) && defined(BOOST_NO_LONG_LONG_NUMERIC_LIMITS)) \\\n      || (defined(BOOST_HAS_MS_INT64) && defined(BOOST_NO_MS_INT64_NUMERIC_LIMITS))\n// Add missing specializations for numeric_limits:\n#ifdef BOOST_HAS_MS_INT64\n#  define BOOST_LLT __int64\n#  define BOOST_ULLT unsigned __int64\n#else\n#  define BOOST_LLT  ::boost::long_long_type\n#  define BOOST_ULLT  ::boost::ulong_long_type\n#endif\n\n#include <climits>  // for CHAR_BIT\n\nnamespace std\n{\n  template<>\n  class numeric_limits<BOOST_LLT> \n  {\n   public:\n\n      BOOST_STATIC_CONSTANT(bool, is_specialized = true);\n#ifdef BOOST_HAS_MS_INT64\n      static BOOST_LLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 0x8000000000000000i64; }\n      static BOOST_LLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 0x7FFFFFFFFFFFFFFFi64; }\n#elif defined(LLONG_MAX)\n      static BOOST_LLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return LLONG_MIN; }\n      static BOOST_LLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return LLONG_MAX; }\n#elif defined(LONGLONG_MAX)\n      static BOOST_LLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return LONGLONG_MIN; }\n      static BOOST_LLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return LONGLONG_MAX; }\n#else\n      static BOOST_LLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 1LL << (sizeof(BOOST_LLT) * CHAR_BIT - 1); }\n      static BOOST_LLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ~(min)(); }\n#endif\n      BOOST_STATIC_CONSTANT(int, digits = sizeof(BOOST_LLT) * CHAR_BIT -1);\n      BOOST_STATIC_CONSTANT(int, digits10 = (CHAR_BIT * sizeof (BOOST_LLT) - 1) * 301L / 1000);\n      BOOST_STATIC_CONSTANT(bool, is_signed = true);\n      BOOST_STATIC_CONSTANT(bool, is_integer = true);\n      BOOST_STATIC_CONSTANT(bool, is_exact = true);\n      BOOST_STATIC_CONSTANT(int, radix = 2);\n      static BOOST_LLT epsilon() throw() { return 0; };\n      static BOOST_LLT round_error() throw() { return 0; };\n\n      BOOST_STATIC_CONSTANT(int, min_exponent = 0);\n      BOOST_STATIC_CONSTANT(int, min_exponent10 = 0);\n      BOOST_STATIC_CONSTANT(int, max_exponent = 0);\n      BOOST_STATIC_CONSTANT(int, max_exponent10 = 0);\n\n      BOOST_STATIC_CONSTANT(bool, has_infinity = false);\n      BOOST_STATIC_CONSTANT(bool, has_quiet_NaN = false);\n      BOOST_STATIC_CONSTANT(bool, has_signaling_NaN = false);\n      BOOST_STATIC_CONSTANT(bool, has_denorm = false);\n      BOOST_STATIC_CONSTANT(bool, has_denorm_loss = false);\n      static BOOST_LLT infinity() throw() { return 0; };\n      static BOOST_LLT quiet_NaN() throw() { return 0; };\n      static BOOST_LLT signaling_NaN() throw() { return 0; };\n      static BOOST_LLT denorm_min() throw() { return 0; };\n\n      BOOST_STATIC_CONSTANT(bool, is_iec559 = false);\n      BOOST_STATIC_CONSTANT(bool, is_bounded = true);\n      BOOST_STATIC_CONSTANT(bool, is_modulo = true);\n\n      BOOST_STATIC_CONSTANT(bool, traps = false);\n      BOOST_STATIC_CONSTANT(bool, tinyness_before = false);\n      BOOST_STATIC_CONSTANT(float_round_style, round_style = round_toward_zero);\n      \n  };\n\n  template<>\n  class numeric_limits<BOOST_ULLT> \n  {\n   public:\n\n      BOOST_STATIC_CONSTANT(bool, is_specialized = true);\n#ifdef BOOST_HAS_MS_INT64\n      static BOOST_ULLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 0ui64; }\n      static BOOST_ULLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 0xFFFFFFFFFFFFFFFFui64; }\n#elif defined(ULLONG_MAX) && defined(ULLONG_MIN)\n      static BOOST_ULLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ULLONG_MIN; }\n      static BOOST_ULLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ULLONG_MAX; }\n#elif defined(ULONGLONG_MAX) && defined(ULONGLONG_MIN)\n      static BOOST_ULLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ULONGLONG_MIN; }\n      static BOOST_ULLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ULONGLONG_MAX; }\n#else\n      static BOOST_ULLT min BOOST_PREVENT_MACRO_SUBSTITUTION (){ return 0uLL; }\n      static BOOST_ULLT max BOOST_PREVENT_MACRO_SUBSTITUTION (){ return ~0uLL; }\n#endif\n      BOOST_STATIC_CONSTANT(int, digits = sizeof(BOOST_LLT) * CHAR_BIT);\n      BOOST_STATIC_CONSTANT(int, digits10 = (CHAR_BIT * sizeof (BOOST_LLT)) * 301L / 1000);\n      BOOST_STATIC_CONSTANT(bool, is_signed = false);\n      BOOST_STATIC_CONSTANT(bool, is_integer = true);\n      BOOST_STATIC_CONSTANT(bool, is_exact = true);\n      BOOST_STATIC_CONSTANT(int, radix = 2);\n      static BOOST_ULLT epsilon() throw() { return 0; };\n      static BOOST_ULLT round_error() throw() { return 0; };\n\n      BOOST_STATIC_CONSTANT(int, min_exponent = 0);\n      BOOST_STATIC_CONSTANT(int, min_exponent10 = 0);\n      BOOST_STATIC_CONSTANT(int, max_exponent = 0);\n      BOOST_STATIC_CONSTANT(int, max_exponent10 = 0);\n\n      BOOST_STATIC_CONSTANT(bool, has_infinity = false);\n      BOOST_STATIC_CONSTANT(bool, has_quiet_NaN = false);\n      BOOST_STATIC_CONSTANT(bool, has_signaling_NaN = false);\n      BOOST_STATIC_CONSTANT(bool, has_denorm = false);\n      BOOST_STATIC_CONSTANT(bool, has_denorm_loss = false);\n      static BOOST_ULLT infinity() throw() { return 0; };\n      static BOOST_ULLT quiet_NaN() throw() { return 0; };\n      static BOOST_ULLT signaling_NaN() throw() { return 0; };\n      static BOOST_ULLT denorm_min() throw() { return 0; };\n\n      BOOST_STATIC_CONSTANT(bool, is_iec559 = false);\n      BOOST_STATIC_CONSTANT(bool, is_bounded = true);\n      BOOST_STATIC_CONSTANT(bool, is_modulo = true);\n\n      BOOST_STATIC_CONSTANT(bool, traps = false);\n      BOOST_STATIC_CONSTANT(bool, tinyness_before = false);\n      BOOST_STATIC_CONSTANT(float_round_style, round_style = round_toward_zero);\n      \n  };\n}\n#endif \n\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/atomic.hpp",
    "content": "//  Copyright (C) 2011-2013 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_DETAIL_ATOMIC_HPP\n#define BOOST_LOCKFREE_DETAIL_ATOMIC_HPP\n\n#include <boost/config.hpp>\n\n#ifndef BOOST_LOCKFREE_FORCE_STD_ATOMIC\n\n#define BOOST_LOCKFREE_NO_HDR_ATOMIC\n\n// MSVC supports atomic<> from version 2012 onwards.\n#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)\n#undef BOOST_LOCKFREE_NO_HDR_ATOMIC\n#endif\n\n\n// GCC supports atomic<> from version 4.8 onwards.\n#if (BOOST_GCC >= 40800) && (__cplusplus >= 201103L)\n#undef BOOST_LOCKFREE_NO_HDR_ATOMIC\n#endif\n\n\n// Apple clang is 2 mayor versions ahead, but in fact 1 minor version behind\n#ifdef BOOST_CLANG\n\n#define BOOST_ATOMIC_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)\n\n#if  defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 60100) && (__cplusplus >= 201103L)\n#undef BOOST_LOCKFREE_NO_HDR_ATOMIC\n#endif\n\n#if !defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 30600) && (__cplusplus >= 201103L)\n#undef BOOST_LOCKFREE_NO_HDR_ATOMIC\n#endif\n\n#undef BOOST_ATOMIC_CLANG_VERSION\n\n#endif // BOOST_CLANG\n\n#endif // BOOST_LOCKFREE_FORCE_STD_ATOMIC\n\n\n#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)\n#include <boost/atomic.hpp>\n#else\n#include <atomic>\n#endif\n\nnamespace boost {\nnamespace lockfree {\nnamespace detail {\n\n#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)\nusing boost::atomic;\nusing boost::memory_order_acquire;\nusing boost::memory_order_consume;\nusing boost::memory_order_relaxed;\nusing boost::memory_order_release;\n#else\nusing std::atomic;\nusing std::memory_order_acquire;\nusing std::memory_order_consume;\nusing std::memory_order_relaxed;\nusing std::memory_order_release;\n#endif\n\n}\nusing detail::atomic;\nusing detail::memory_order_acquire;\nusing detail::memory_order_consume;\nusing detail::memory_order_relaxed;\nusing detail::memory_order_release;\n\n}}\n\n#endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/copy_payload.hpp",
    "content": "//  boost lockfree: copy_payload helper\n//\n//  Copyright (C) 2011 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED\n#define BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED\n\n#include <boost/mpl/if.hpp>\n#include <boost/type_traits/is_convertible.hpp>\n\n#if defined(_MSC_VER)\n#pragma warning(push)\n#pragma warning(disable: 4512) // assignment operator could not be generated\n#endif\n\nnamespace boost    {\nnamespace lockfree {\nnamespace detail   {\n\nstruct copy_convertible\n{\n    template <typename T, typename U>\n    static void copy(T & t, U & u)\n    {\n        u = t;\n    }\n};\n\nstruct copy_constructible_and_copyable\n{\n    template <typename T, typename U>\n    static void copy(T & t, U & u)\n    {\n        u = U(t);\n    }\n};\n\ntemplate <typename T, typename U>\nvoid copy_payload(T & t, U & u)\n{\n    typedef typename boost::mpl::if_<typename boost::is_convertible<T, U>::type,\n                                     copy_convertible,\n                                     copy_constructible_and_copyable\n                                    >::type copy_type;\n    copy_type::copy(t, u);\n}\n\ntemplate <typename T>\nstruct consume_via_copy\n{\n    consume_via_copy(T & out):\n        out_(out)\n    {}\n\n    template <typename U>\n    void operator()(U & element)\n    {\n        copy_payload(element, out_);\n    }\n\n    T & out_;\n};\n\nstruct consume_noop\n{\n    template <typename U>\n    void operator()(const U &)\n    {\n    }\n};\n\n\n}}}\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n\n#endif  /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/freelist.hpp",
    "content": "//  lock-free freelist\n//\n//  Copyright (C) 2008-2013 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED\n#define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED\n\n#include <limits>\n#include <memory>\n\n#include <boost/array.hpp>\n#include <boost/config.hpp>\n#include <boost/cstdint.hpp>\n#include <boost/noncopyable.hpp>\n#include <boost/static_assert.hpp>\n\n#include <boost/lockfree/detail/atomic.hpp>\n#include <boost/lockfree/detail/parameter.hpp>\n#include <boost/lockfree/detail/tagged_ptr.hpp>\n\n#if defined(_MSC_VER)\n#pragma warning(push)\n#pragma warning(disable: 4100) // unreferenced formal parameter\n#pragma warning(disable: 4127) // conditional expression is constant\n#endif\n\nnamespace boost    {\nnamespace lockfree {\nnamespace detail   {\n\ntemplate <typename T,\n          typename Alloc = std::allocator<T>\n         >\nclass freelist_stack:\n    Alloc\n{\n    struct freelist_node\n    {\n        tagged_ptr<freelist_node> next;\n    };\n\n    typedef tagged_ptr<freelist_node> tagged_node_ptr;\n\npublic:\n    typedef tagged_ptr<T> tagged_node_handle;\n\n    template <typename Allocator>\n    freelist_stack (Allocator const & alloc, std::size_t n = 0):\n        Alloc(alloc),\n        pool_(tagged_node_ptr(NULL))\n    {\n        for (std::size_t i = 0; i != n; ++i) {\n            T * node = Alloc::allocate(1);\n#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR\n            destruct<false>(node);\n#else\n            deallocate<false>(node);\n#endif\n        }\n    }\n\n    template <bool ThreadSafe>\n    void reserve (std::size_t count)\n    {\n        for (std::size_t i = 0; i != count; ++i) {\n            T * node = Alloc::allocate(1);\n            deallocate<ThreadSafe>(node);\n        }\n    }\n\n    template <bool ThreadSafe, bool Bounded>\n    T * construct (void)\n    {\n        T * node = allocate<ThreadSafe, Bounded>();\n        if (node)\n            new(node) T();\n        return node;\n    }\n\n    template <bool ThreadSafe, bool Bounded, typename ArgumentType>\n    T * construct (ArgumentType const & arg)\n    {\n        T * node = allocate<ThreadSafe, Bounded>();\n        if (node)\n            new(node) T(arg);\n        return node;\n    }\n\n    template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>\n    T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)\n    {\n        T * node = allocate<ThreadSafe, Bounded>();\n        if (node)\n            new(node) T(arg1, arg2);\n        return node;\n    }\n\n    template <bool ThreadSafe>\n    void destruct (tagged_node_handle tagged_ptr)\n    {\n        T * n = tagged_ptr.get_ptr();\n        n->~T();\n        deallocate<ThreadSafe>(n);\n    }\n\n    template <bool ThreadSafe>\n    void destruct (T * n)\n    {\n        n->~T();\n        deallocate<ThreadSafe>(n);\n    }\n\n    ~freelist_stack(void)\n    {\n        tagged_node_ptr current = pool_.load();\n\n        while (current) {\n            freelist_node * current_ptr = current.get_ptr();\n            if (current_ptr)\n                current = current_ptr->next;\n            Alloc::deallocate((T*)current_ptr, 1);\n        }\n    }\n\n    bool is_lock_free(void) const\n    {\n        return pool_.is_lock_free();\n    }\n\n    T * get_handle(T * pointer) const\n    {\n        return pointer;\n    }\n\n    T * get_handle(tagged_node_handle const & handle) const\n    {\n        return get_pointer(handle);\n    }\n\n    T * get_pointer(tagged_node_handle const & tptr) const\n    {\n        return tptr.get_ptr();\n    }\n\n    T * get_pointer(T * pointer) const\n    {\n        return pointer;\n    }\n\n    T * null_handle(void) const\n    {\n        return NULL;\n    }\n\nprotected: // allow use from subclasses\n    template <bool ThreadSafe, bool Bounded>\n    T * allocate (void)\n    {\n        if (ThreadSafe)\n            return allocate_impl<Bounded>();\n        else\n            return allocate_impl_unsafe<Bounded>();\n    }\n\nprivate:\n    template <bool Bounded>\n    T * allocate_impl (void)\n    {\n        tagged_node_ptr old_pool = pool_.load(memory_order_consume);\n\n        for(;;) {\n            if (!old_pool.get_ptr()) {\n                if (!Bounded)\n                    return Alloc::allocate(1);\n                else\n                    return 0;\n            }\n\n            freelist_node * new_pool_ptr = old_pool->next.get_ptr();\n            tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());\n\n            if (pool_.compare_exchange_weak(old_pool, new_pool)) {\n                void * ptr = old_pool.get_ptr();\n                return reinterpret_cast<T*>(ptr);\n            }\n        }\n    }\n\n    template <bool Bounded>\n    T * allocate_impl_unsafe (void)\n    {\n        tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);\n\n        if (!old_pool.get_ptr()) {\n            if (!Bounded)\n                return Alloc::allocate(1);\n            else\n                return 0;\n        }\n\n        freelist_node * new_pool_ptr = old_pool->next.get_ptr();\n        tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());\n\n        pool_.store(new_pool, memory_order_relaxed);\n        void * ptr = old_pool.get_ptr();\n        return reinterpret_cast<T*>(ptr);\n    }\n\nprotected:\n    template <bool ThreadSafe>\n    void deallocate (T * n)\n    {\n        if (ThreadSafe)\n            deallocate_impl(n);\n        else\n            deallocate_impl_unsafe(n);\n    }\n\nprivate:\n    void deallocate_impl (T * n)\n    {\n        void * node = n;\n        tagged_node_ptr old_pool = pool_.load(memory_order_consume);\n        freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);\n\n        for(;;) {\n            tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());\n            new_pool->next.set_ptr(old_pool.get_ptr());\n\n            if (pool_.compare_exchange_weak(old_pool, new_pool))\n                return;\n        }\n    }\n\n    void deallocate_impl_unsafe (T * n)\n    {\n        void * node = n;\n        tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);\n        freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);\n\n        tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());\n        new_pool->next.set_ptr(old_pool.get_ptr());\n\n        pool_.store(new_pool, memory_order_relaxed);\n    }\n\n    atomic<tagged_node_ptr> pool_;\n};\n\nclass tagged_index\n{\npublic:\n    typedef boost::uint16_t tag_t;\n    typedef boost::uint16_t index_t;\n\n    /** uninitialized constructor */\n    tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)\n    {}\n\n    /** copy constructor */\n#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n    tagged_index(tagged_index const & rhs):\n        index(rhs.index), tag(rhs.tag)\n    {}\n#else\n    tagged_index(tagged_index const & rhs) = default;\n#endif\n\n    explicit tagged_index(index_t i, tag_t t = 0):\n        index(i), tag(t)\n    {}\n\n    /** index access */\n    /* @{ */\n    index_t get_index() const\n    {\n        return index;\n    }\n\n    void set_index(index_t i)\n    {\n        index = i;\n    }\n    /* @} */\n\n    /** tag access */\n    /* @{ */\n    tag_t get_tag() const\n    {\n        return tag;\n    }\n\n    tag_t get_next_tag() const\n    {\n        tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();\n        return next;\n    }\n\n    void set_tag(tag_t t)\n    {\n        tag = t;\n    }\n    /* @} */\n\n    bool operator==(tagged_index const & rhs) const\n    {\n        return (index == rhs.index) && (tag == rhs.tag);\n    }\n\n    bool operator!=(tagged_index const & rhs) const\n    {\n        return !operator==(rhs);\n    }\n\nprotected:\n    index_t index;\n    tag_t tag;\n};\n\ntemplate <typename T,\n          std::size_t size>\nstruct compiletime_sized_freelist_storage\n{\n    // array-based freelists only support a 16bit address space.\n    BOOST_STATIC_ASSERT(size < 65536);\n\n    boost::array<char, size * sizeof(T)> data;\n\n    // unused ... only for API purposes\n    template <typename Allocator>\n    compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */)\n    {}\n\n    T * nodes(void) const\n    {\n        return reinterpret_cast<T*>(const_cast<char*>(data.data()));\n    }\n\n    std::size_t node_count(void) const\n    {\n        return size;\n    }\n};\n\ntemplate <typename T,\n          typename Alloc = std::allocator<T> >\nstruct runtime_sized_freelist_storage:\n    Alloc\n{\n    T * nodes_;\n    std::size_t node_count_;\n\n    template <typename Allocator>\n    runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):\n        Alloc(alloc), node_count_(count)\n    {\n        if (count > 65535)\n            boost::throw_exception(std::runtime_error(\"boost.lockfree: freelist size is limited to a maximum of 65535 objects\"));\n        nodes_ = Alloc::allocate(count);\n    }\n\n    ~runtime_sized_freelist_storage(void)\n    {\n        Alloc::deallocate(nodes_, node_count_);\n    }\n\n    T * nodes(void) const\n    {\n        return nodes_;\n    }\n\n    std::size_t node_count(void) const\n    {\n        return node_count_;\n    }\n};\n\n\ntemplate <typename T,\n          typename NodeStorage = runtime_sized_freelist_storage<T>\n         >\nclass fixed_size_freelist:\n    NodeStorage\n{\n    struct freelist_node\n    {\n        tagged_index next;\n    };\n\n    typedef tagged_index::index_t index_t;\n\n    void initialize(void)\n    {\n        T * nodes = NodeStorage::nodes();\n        for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {\n            tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);\n            next_index->set_index(null_handle());\n\n#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR\n            destruct<false>(nodes + i);\n#else\n            deallocate<false>(static_cast<index_t>(i));\n#endif\n        }\n    }\n\npublic:\n    typedef tagged_index tagged_node_handle;\n\n    template <typename Allocator>\n    fixed_size_freelist (Allocator const & alloc, std::size_t count):\n        NodeStorage(alloc, count),\n        pool_(tagged_index(static_cast<index_t>(count), 0))\n    {\n        initialize();\n    }\n\n    fixed_size_freelist (void):\n        pool_(tagged_index(NodeStorage::node_count(), 0))\n    {\n        initialize();\n    }\n\n    template <bool ThreadSafe, bool Bounded>\n    T * construct (void)\n    {\n        index_t node_index = allocate<ThreadSafe>();\n        if (node_index == null_handle())\n            return NULL;\n\n        T * node = NodeStorage::nodes() + node_index;\n        new(node) T();\n        return node;\n    }\n\n    template <bool ThreadSafe, bool Bounded, typename ArgumentType>\n    T * construct (ArgumentType const & arg)\n    {\n        index_t node_index = allocate<ThreadSafe>();\n        if (node_index == null_handle())\n            return NULL;\n\n        T * node = NodeStorage::nodes() + node_index;\n        new(node) T(arg);\n        return node;\n    }\n\n    template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>\n    T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)\n    {\n        index_t node_index = allocate<ThreadSafe>();\n        if (node_index == null_handle())\n            return NULL;\n\n        T * node = NodeStorage::nodes() + node_index;\n        new(node) T(arg1, arg2);\n        return node;\n    }\n\n    template <bool ThreadSafe>\n    void destruct (tagged_node_handle tagged_index)\n    {\n        index_t index = tagged_index.get_index();\n        T * n = NodeStorage::nodes() + index;\n        (void)n; // silence msvc warning\n        n->~T();\n        deallocate<ThreadSafe>(index);\n    }\n\n    template <bool ThreadSafe>\n    void destruct (T * n)\n    {\n        n->~T();\n        deallocate<ThreadSafe>(n - NodeStorage::nodes());\n    }\n\n    bool is_lock_free(void) const\n    {\n        return pool_.is_lock_free();\n    }\n\n    index_t null_handle(void) const\n    {\n        return static_cast<index_t>(NodeStorage::node_count());\n    }\n\n    index_t get_handle(T * pointer) const\n    {\n        if (pointer == NULL)\n            return null_handle();\n        else\n            return static_cast<index_t>(pointer - NodeStorage::nodes());\n    }\n\n    index_t get_handle(tagged_node_handle const & handle) const\n    {\n        return handle.get_index();\n    }\n\n    T * get_pointer(tagged_node_handle const & tptr) const\n    {\n        return get_pointer(tptr.get_index());\n    }\n\n    T * get_pointer(index_t index) const\n    {\n        if (index == null_handle())\n            return 0;\n        else\n            return NodeStorage::nodes() + index;\n    }\n\n    T * get_pointer(T * ptr) const\n    {\n        return ptr;\n    }\n\nprotected: // allow use from subclasses\n    template <bool ThreadSafe>\n    index_t allocate (void)\n    {\n        if (ThreadSafe)\n            return allocate_impl();\n        else\n            return allocate_impl_unsafe();\n    }\n\nprivate:\n    index_t allocate_impl (void)\n    {\n        tagged_index old_pool = pool_.load(memory_order_consume);\n\n        for(;;) {\n            index_t index = old_pool.get_index();\n            if (index == null_handle())\n                return index;\n\n            T * old_node = NodeStorage::nodes() + index;\n            tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);\n\n            tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());\n\n            if (pool_.compare_exchange_weak(old_pool, new_pool))\n                return old_pool.get_index();\n        }\n    }\n\n    index_t allocate_impl_unsafe (void)\n    {\n        tagged_index old_pool = pool_.load(memory_order_consume);\n\n        index_t index = old_pool.get_index();\n        if (index == null_handle())\n            return index;\n\n        T * old_node = NodeStorage::nodes() + index;\n        tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);\n\n        tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());\n\n        pool_.store(new_pool, memory_order_relaxed);\n        return old_pool.get_index();\n    }\n\n    template <bool ThreadSafe>\n    void deallocate (index_t index)\n    {\n        if (ThreadSafe)\n            deallocate_impl(index);\n        else\n            deallocate_impl_unsafe(index);\n    }\n\n    void deallocate_impl (index_t index)\n    {\n        freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);\n        tagged_index old_pool = pool_.load(memory_order_consume);\n\n        for(;;) {\n            tagged_index new_pool (index, old_pool.get_tag());\n            new_pool_node->next.set_index(old_pool.get_index());\n\n            if (pool_.compare_exchange_weak(old_pool, new_pool))\n                return;\n        }\n    }\n\n    void deallocate_impl_unsafe (index_t index)\n    {\n        freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);\n        tagged_index old_pool = pool_.load(memory_order_consume);\n\n        tagged_index new_pool (index, old_pool.get_tag());\n        new_pool_node->next.set_index(old_pool.get_index());\n\n        pool_.store(new_pool);\n    }\n\n    atomic<tagged_index> pool_;\n};\n\ntemplate <typename T,\n          typename Alloc,\n          bool IsCompileTimeSized,\n          bool IsFixedSize,\n          std::size_t Capacity\n          >\nstruct select_freelist\n{\n    typedef typename mpl::if_c<IsCompileTimeSized,\n                               compiletime_sized_freelist_storage<T, Capacity>,\n                               runtime_sized_freelist_storage<T, Alloc>\n                              >::type fixed_sized_storage_type;\n\n    typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,\n                               fixed_size_freelist<T, fixed_sized_storage_type>,\n                               freelist_stack<T, Alloc>\n                              >::type type;\n};\n\ntemplate <typename T, bool IsNodeBased>\nstruct select_tagged_handle\n{\n    typedef typename mpl::if_c<IsNodeBased,\n                               tagged_ptr<T>,\n                               tagged_index\n                              >::type tagged_handle_type;\n\n    typedef typename mpl::if_c<IsNodeBased,\n                               T*,\n                               typename tagged_index::index_t\n                              >::type handle_type;\n};\n\n\n} /* namespace detail */\n} /* namespace lockfree */\n} /* namespace boost */\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n\n\n#endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/parameter.hpp",
    "content": "// boost lockfree\n//\n// Copyright (C) 2011 Tim Blechmann\n//\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_DETAIL_PARAMETER_HPP\n#define BOOST_LOCKFREE_DETAIL_PARAMETER_HPP\n\n#include <boost/lockfree/policies.hpp>\n\nnamespace boost {\nnamespace lockfree {\nnamespace detail {\n\nnamespace mpl = boost::mpl;\n\ntemplate <typename bound_args, typename tag_type>\nstruct has_arg\n{\n    typedef typename parameter::binding<bound_args, tag_type, mpl::void_>::type type;\n    static const bool value = mpl::is_not_void_<type>::type::value;\n};\n\n\ntemplate <typename bound_args>\nstruct extract_capacity\n{\n    static const bool has_capacity = has_arg<bound_args, tag::capacity>::value;\n\n    typedef typename mpl::if_c<has_capacity,\n                               typename has_arg<bound_args, tag::capacity>::type,\n                               mpl::size_t< 0 >\n                              >::type capacity_t;\n\n    static const std::size_t capacity = capacity_t::value;\n};\n\n\ntemplate <typename bound_args, typename T>\nstruct extract_allocator\n{\n    static const bool has_allocator = has_arg<bound_args, tag::allocator>::value;\n\n    typedef typename mpl::if_c<has_allocator,\n                               typename has_arg<bound_args, tag::allocator>::type,\n                               std::allocator<T>\n                              >::type allocator_arg;\n\n    typedef typename allocator_arg::template rebind<T>::other type;\n};\n\ntemplate <typename bound_args, bool default_ = false>\nstruct extract_fixed_sized\n{\n    static const bool has_fixed_sized = has_arg<bound_args, tag::fixed_sized>::value;\n\n    typedef typename mpl::if_c<has_fixed_sized,\n                               typename has_arg<bound_args, tag::fixed_sized>::type,\n                               mpl::bool_<default_>\n                              >::type type;\n\n    static const bool value = type::value;\n};\n\n\n} /* namespace detail */\n} /* namespace lockfree */\n} /* namespace boost */\n\n#endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/prefix.hpp",
    "content": "//  Copyright (C) 2009 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_PREFIX_HPP_INCLUDED\n#define BOOST_LOCKFREE_PREFIX_HPP_INCLUDED\n\n/* this file defines the following macros:\n   BOOST_LOCKFREE_CACHELINE_BYTES: size of a cache line\n   BOOST_LOCKFREE_PTR_COMPRESSION: use tag/pointer compression to utilize parts\n                                   of the virtual address space as tag (at least 16bit)\n   BOOST_LOCKFREE_DCAS_ALIGNMENT:  symbol used for aligning structs at cache line\n                                   boundaries\n*/\n\n#define BOOST_LOCKFREE_CACHELINE_BYTES 64\n\n#ifdef _MSC_VER\n\n#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __declspec(align(BOOST_LOCKFREE_CACHELINE_BYTES))\n\n#if defined(_M_IX86)\n    #define BOOST_LOCKFREE_DCAS_ALIGNMENT\n#elif defined(_M_X64) || defined(_M_IA64)\n    #define BOOST_LOCKFREE_PTR_COMPRESSION 1\n    #define BOOST_LOCKFREE_DCAS_ALIGNMENT __declspec(align(16))\n#endif\n\n#endif /* _MSC_VER */\n\n#ifdef __GNUC__\n\n#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __attribute__((aligned(BOOST_LOCKFREE_CACHELINE_BYTES)))\n\n#if defined(__i386__) || defined(__ppc__)\n    #define BOOST_LOCKFREE_DCAS_ALIGNMENT\n#elif defined(__x86_64__)\n    #define BOOST_LOCKFREE_PTR_COMPRESSION 1\n    #define BOOST_LOCKFREE_DCAS_ALIGNMENT __attribute__((aligned(16)))\n#elif defined(__alpha__)\n    // LATER: alpha may benefit from pointer compression. but what is the maximum size of the address space?\n    #define BOOST_LOCKFREE_DCAS_ALIGNMENT\n#endif\n#endif /* __GNUC__ */\n\n#ifndef BOOST_LOCKFREE_DCAS_ALIGNMENT\n#define BOOST_LOCKFREE_DCAS_ALIGNMENT /*BOOST_LOCKFREE_DCAS_ALIGNMENT*/\n#endif\n\n#ifndef BOOST_LOCKFREE_CACHELINE_ALIGNMENT\n#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT /*BOOST_LOCKFREE_CACHELINE_ALIGNMENT*/\n#endif\n\n#endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/tagged_ptr.hpp",
    "content": "//  tagged pointer, for aba prevention\n//\n//  Copyright (C) 2008 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED\n#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <boost/lockfree/detail/prefix.hpp>\n\n#ifndef BOOST_LOCKFREE_PTR_COMPRESSION\n#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>\n#else\n#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>\n#endif\n\n#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/tagged_ptr_dcas.hpp",
    "content": "//  tagged pointer, for aba prevention\n//\n//  Copyright (C) 2008 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED\n#define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED\n\n#include <cstddef>              /* for std::size_t */\n#include <limits>\n\n\nnamespace boost {\nnamespace lockfree {\nnamespace detail {\n\ntemplate <class T>\nclass BOOST_LOCKFREE_DCAS_ALIGNMENT tagged_ptr\n{\npublic:\n    typedef std::size_t tag_t;\n\n    /** uninitialized constructor */\n    tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)\n    {}\n\n#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n    tagged_ptr(tagged_ptr const & p):\n        ptr(p.ptr), tag(p.tag)\n    {}\n#else\n    tagged_ptr(tagged_ptr const & p) = default;\n#endif\n\n    explicit tagged_ptr(T * p, tag_t t = 0):\n        ptr(p), tag(t)\n    {}\n\n    /** unsafe set operation */\n    /* @{ */\n#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n    tagged_ptr & operator= (tagged_ptr const & p)\n    {\n        set(p.ptr, p.tag);\n        return *this;\n    }\n#else\n    tagged_ptr & operator= (tagged_ptr const & p) = default;\n#endif\n\n    void set(T * p, tag_t t)\n    {\n        ptr = p;\n        tag = t;\n    }\n    /* @} */\n\n    /** comparing semantics */\n    /* @{ */\n    bool operator== (volatile tagged_ptr const & p) const\n    {\n        return (ptr == p.ptr) && (tag == p.tag);\n    }\n\n    bool operator!= (volatile tagged_ptr const & p) const\n    {\n        return !operator==(p);\n    }\n    /* @} */\n\n    /** pointer access */\n    /* @{ */\n    T * get_ptr(void) const\n    {\n        return ptr;\n    }\n\n    void set_ptr(T * p)\n    {\n        ptr = p;\n    }\n    /* @} */\n\n    /** tag access */\n    /* @{ */\n    tag_t get_tag() const\n    {\n        return tag;\n    }\n\n    tag_t get_next_tag() const\n    {\n        tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();\n        return next;\n    }\n\n    void set_tag(tag_t t)\n    {\n        tag = t;\n    }\n    /* @} */\n\n    /** smart pointer support  */\n    /* @{ */\n    T & operator*() const\n    {\n        return *ptr;\n    }\n\n    T * operator->() const\n    {\n        return ptr;\n    }\n\n    operator bool(void) const\n    {\n        return ptr != 0;\n    }\n    /* @} */\n\nprotected:\n    T * ptr;\n    tag_t tag;\n};\n\n} /* namespace detail */\n} /* namespace lockfree */\n} /* namespace boost */\n\n#endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp",
    "content": "//  tagged pointer, for aba prevention\n//\n//  Copyright (C) 2008, 2009 Tim Blechmann, based on code by Cory Nelson\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED\n#define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED\n\n#include <cstddef>              /* for std::size_t */\n#include <limits>\n\n#include <boost/cstdint.hpp>\n\n\nnamespace boost {\nnamespace lockfree {\nnamespace detail {\n\n#if defined (__x86_64__) || defined (_M_X64)\n\ntemplate <class T>\nclass tagged_ptr\n{\n    typedef boost::uint64_t compressed_ptr_t;\n\npublic:\n    typedef boost::uint16_t tag_t;\n\nprivate:\n    union cast_unit\n    {\n        compressed_ptr_t value;\n        tag_t tag[4];\n    };\n\n    static const int tag_index = 3;\n    static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1;\n\n    static T* extract_ptr(volatile compressed_ptr_t const & i)\n    {\n        return (T*)(i & ptr_mask);\n    }\n\n    static tag_t extract_tag(volatile compressed_ptr_t const & i)\n    {\n        cast_unit cu;\n        cu.value = i;\n        return cu.tag[tag_index];\n    }\n\n    static compressed_ptr_t pack_ptr(T * ptr, tag_t tag)\n    {\n        cast_unit ret;\n        ret.value = compressed_ptr_t(ptr);\n        ret.tag[tag_index] = tag;\n        return ret.value;\n    }\n\npublic:\n    /** uninitialized constructor */\n    tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)\n    {}\n\n    /** copy constructor */\n#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n    tagged_ptr(tagged_ptr const & p):\n        ptr(p.ptr)\n    {}\n#else\n    tagged_ptr(tagged_ptr const & p) = default;\n#endif\n\n    explicit tagged_ptr(T * p, tag_t t = 0):\n        ptr(pack_ptr(p, t))\n    {}\n\n    /** unsafe set operation */\n    /* @{ */\n#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS\n    tagged_ptr & operator= (tagged_ptr const & p)\n    {\n         ptr = p.ptr;\n         return *this;\n    }\n#else\n    tagged_ptr & operator= (tagged_ptr const & p) = default;\n#endif\n\n    void set(T * p, tag_t t)\n    {\n        ptr = pack_ptr(p, t);\n    }\n    /* @} */\n\n    /** comparing semantics */\n    /* @{ */\n    bool operator== (volatile tagged_ptr const & p) const\n    {\n        return (ptr == p.ptr);\n    }\n\n    bool operator!= (volatile tagged_ptr const & p) const\n    {\n        return !operator==(p);\n    }\n    /* @} */\n\n    /** pointer access */\n    /* @{ */\n    T * get_ptr() const\n    {\n        return extract_ptr(ptr);\n    }\n\n    void set_ptr(T * p)\n    {\n        tag_t tag = get_tag();\n        ptr = pack_ptr(p, tag);\n    }\n    /* @} */\n\n    /** tag access */\n    /* @{ */\n    tag_t get_tag() const\n    {\n        return extract_tag(ptr);\n    }\n\n    tag_t get_next_tag() const\n    {\n        tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();\n        return next;\n    }\n\n    void set_tag(tag_t t)\n    {\n        T * p = get_ptr();\n        ptr = pack_ptr(p, t);\n    }\n    /* @} */\n\n    /** smart pointer support  */\n    /* @{ */\n    T & operator*() const\n    {\n        return *get_ptr();\n    }\n\n    T * operator->() const\n    {\n        return get_ptr();\n    }\n\n    operator bool(void) const\n    {\n        return get_ptr() != 0;\n    }\n    /* @} */\n\nprotected:\n    compressed_ptr_t ptr;\n};\n#else\n#error unsupported platform\n#endif\n\n} /* namespace detail */\n} /* namespace lockfree */\n} /* namespace boost */\n\n#endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/policies.hpp",
    "content": "// boost lockfree\n//\n// Copyright (C) 2011 Tim Blechmann\n//\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_POLICIES_HPP_INCLUDED\n#define BOOST_LOCKFREE_POLICIES_HPP_INCLUDED\n\n#include <boost/parameter.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/size_t.hpp>\n#include <boost/mpl/void.hpp>\n\nnamespace boost {\nnamespace lockfree {\n\n#ifndef BOOST_DOXYGEN_INVOKED\nnamespace tag { struct allocator ; }\nnamespace tag { struct fixed_sized; }\nnamespace tag { struct capacity; }\n\n#endif\n\n/** Configures a data structure as \\b fixed-sized.\n *\n *  The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible size of the\n *  queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on platforms that lack\n *  double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom.\n *  This implies that a data structure is bounded.\n * */\ntemplate <bool IsFixedSized>\nstruct fixed_sized:\n    boost::parameter::template_keyword<tag::fixed_sized, boost::mpl::bool_<IsFixedSized> >\n{};\n\n/** Sets the \\b capacity of a data structure at compile-time.\n *\n * This implies that a data structure is bounded and fixed-sized.\n * */\ntemplate <size_t Size>\nstruct capacity:\n    boost::parameter::template_keyword<tag::capacity, boost::mpl::size_t<Size> >\n{};\n\n/** Defines the \\b allocator type of a data structure.\n * */\ntemplate <class Alloc>\nstruct allocator:\n    boost::parameter::template_keyword<tag::allocator, Alloc>\n{};\n\n}\n}\n\n#endif /* BOOST_LOCKFREE_POLICIES_HPP_INCLUDED */\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/lockfree/queue.hpp",
    "content": "//  lock-free queue from\n//  Michael, M. M. and Scott, M. L.,\n//  \"simple, fast and practical non-blocking and blocking concurrent queue algorithms\"\n//\n//  Copyright (C) 2008-2013 Tim Blechmann\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED\n#define BOOST_LOCKFREE_FIFO_HPP_INCLUDED\n\n#include <boost/assert.hpp>\n#include <boost/static_assert.hpp>\n#include <boost/type_traits/has_trivial_assign.hpp>\n#include <boost/type_traits/has_trivial_destructor.hpp>\n#include <boost/config.hpp> // for BOOST_LIKELY\n\n#include <boost/lockfree/detail/atomic.hpp>\n#include <boost/lockfree/detail/copy_payload.hpp>\n#include <boost/lockfree/detail/freelist.hpp>\n#include <boost/lockfree/detail/parameter.hpp>\n#include <boost/lockfree/detail/tagged_ptr.hpp>\n\n#ifdef BOOST_HAS_PRAGMA_ONCE\n#pragma once\n#endif\n\n\n#if defined(_MSC_VER)\n#pragma warning(push)\n#pragma warning(disable: 4324) // structure was padded due to __declspec(align())\n#endif\n\n\nnamespace boost    {\nnamespace lockfree {\nnamespace detail   {\n\ntypedef parameter::parameters<boost::parameter::optional<tag::allocator>,\n                              boost::parameter::optional<tag::capacity>\n                             > queue_signature;\n\n} /* namespace detail */\n\n\n/** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,\n *  construction/destruction has to be synchronized. It uses a freelist for memory management,\n *  freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.\n *\n *  \\b Policies:\n *  - \\ref boost::lockfree::fixed_sized, defaults to \\c boost::lockfree::fixed_sized<false> \\n\n *    Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \\n\n *    If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed\n *    by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index\n *    type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way\n *    to achieve lock-freedom.\n *\n *  - \\ref boost::lockfree::capacity, optional \\n\n *    If this template argument is passed to the options, the size of the queue is set at compile-time.\\n\n *    It this option implies \\c fixed_sized<true>\n *\n *  - \\ref boost::lockfree::allocator, defaults to \\c boost::lockfree::allocator<std::allocator<void>> \\n\n *    Specifies the allocator that is used for the internal freelist\n *\n *  \\b Requirements:\n *   - T must have a copy constructor\n *   - T must have a trivial assignment operator\n *   - T must have a trivial destructor\n *\n * */\n#ifndef BOOST_DOXYGEN_INVOKED\ntemplate <typename T,\n          class A0 = boost::parameter::void_,\n          class A1 = boost::parameter::void_,\n          class A2 = boost::parameter::void_>\n#else\ntemplate <typename T, ...Options>\n#endif\nclass queue\n{\nprivate:\n#ifndef BOOST_DOXYGEN_INVOKED\n\n#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR\n    BOOST_STATIC_ASSERT((boost::has_trivial_destructor<T>::value));\n#endif\n\n#ifdef BOOST_HAS_TRIVIAL_ASSIGN\n    BOOST_STATIC_ASSERT((boost::has_trivial_assign<T>::value));\n#endif\n\n    typedef typename detail::queue_signature::bind<A0, A1, A2>::type bound_args;\n\n    static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;\n    static const size_t capacity = detail::extract_capacity<bound_args>::capacity + 1; // the queue uses one dummy node\n    static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;\n    static const bool node_based = !(has_capacity || fixed_sized);\n    static const bool compile_time_sized = has_capacity;\n\n    struct BOOST_LOCKFREE_CACHELINE_ALIGNMENT node\n    {\n        typedef typename detail::select_tagged_handle<node, node_based>::tagged_handle_type tagged_node_handle;\n        typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;\n\n        node(T const & v, handle_type null_handle):\n            data(v)//, next(tagged_node_handle(0, 0))\n        {\n            /* increment tag to avoid ABA problem */\n            tagged_node_handle old_next = next.load(memory_order_relaxed);\n            tagged_node_handle new_next (null_handle, old_next.get_next_tag());\n            next.store(new_next, memory_order_release);\n        }\n\n        node (handle_type null_handle):\n            next(tagged_node_handle(null_handle, 0))\n        {}\n\n        node(void)\n        {}\n\n        atomic<tagged_node_handle> next;\n        T data;\n    };\n\n    typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;\n    typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;\n    typedef typename pool_t::tagged_node_handle tagged_node_handle;\n    typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;\n\n    void initialize(void)\n    {\n        node * n = pool.template construct<true, false>(pool.null_handle());\n        tagged_node_handle dummy_node(pool.get_handle(n), 0);\n        head_.store(dummy_node, memory_order_relaxed);\n        tail_.store(dummy_node, memory_order_release);\n    }\n\n    struct implementation_defined\n    {\n        typedef node_allocator allocator;\n        typedef std::size_t size_type;\n    };\n\n#endif\n\n    BOOST_DELETED_FUNCTION(queue(queue const&))\n    BOOST_DELETED_FUNCTION(queue& operator= (queue const&))\n\npublic:\n    typedef T value_type;\n    typedef typename implementation_defined::allocator allocator;\n    typedef typename implementation_defined::size_type size_type;\n\n    /**\n     * \\return true, if implementation is lock-free.\n     *\n     * \\warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.\n     *       On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is\n     *       no possibility to provide a completely accurate implementation, because one would need to test every internal\n     *       node, which is impossible if further nodes will be allocated from the operating system.\n     * */\n    bool is_lock_free (void) const\n    {\n        return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();\n    }\n\n    //! Construct queue\n    // @{\n    queue(void):\n        head_(tagged_node_handle(0, 0)),\n        tail_(tagged_node_handle(0, 0)),\n        pool(node_allocator(), capacity)\n    {\n        BOOST_ASSERT(has_capacity);\n        initialize();\n    }\n\n    template <typename U>\n    explicit queue(typename node_allocator::template rebind<U>::other const & alloc):\n        head_(tagged_node_handle(0, 0)),\n        tail_(tagged_node_handle(0, 0)),\n        pool(alloc, capacity)\n    {\n        BOOST_STATIC_ASSERT(has_capacity);\n        initialize();\n    }\n\n    explicit queue(allocator const & alloc):\n        head_(tagged_node_handle(0, 0)),\n        tail_(tagged_node_handle(0, 0)),\n        pool(alloc, capacity)\n    {\n        BOOST_ASSERT(has_capacity);\n        initialize();\n    }\n    // @}\n\n    //! Construct queue, allocate n nodes for the freelist.\n    // @{\n    explicit queue(size_type n):\n        head_(tagged_node_handle(0, 0)),\n        tail_(tagged_node_handle(0, 0)),\n        pool(node_allocator(), n + 1)\n    {\n        BOOST_ASSERT(!has_capacity);\n        initialize();\n    }\n\n    template <typename U>\n    queue(size_type n, typename node_allocator::template rebind<U>::other const & alloc):\n        head_(tagged_node_handle(0, 0)),\n        tail_(tagged_node_handle(0, 0)),\n        pool(alloc, n + 1)\n    {\n        BOOST_STATIC_ASSERT(!has_capacity);\n        initialize();\n    }\n    // @}\n\n    /** \\copydoc boost::lockfree::stack::reserve\n     * */\n    void reserve(size_type n)\n    {\n        pool.template reserve<true>(n);\n    }\n\n    /** \\copydoc boost::lockfree::stack::reserve_unsafe\n     * */\n    void reserve_unsafe(size_type n)\n    {\n        pool.template reserve<false>(n);\n    }\n\n    /** Destroys queue, free all nodes from freelist.\n     * */\n    ~queue(void)\n    {\n        T dummy;\n        while(unsynchronized_pop(dummy))\n        {}\n\n        pool.template destruct<false>(head_.load(memory_order_relaxed));\n    }\n\n    /** Check if the queue is empty\n     *\n     * \\return true, if the queue is empty, false otherwise\n     * \\note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this\n     *       value in program logic.\n     * */\n    bool empty(void) const\n    {\n        return pool.get_handle(head_.load()) == pool.get_handle(tail_.load());\n    }\n\n    /** Pushes object t to the queue.\n     *\n     * \\post object will be pushed to the queue, if internal node can be allocated\n     * \\returns true, if the push operation is successful.\n     *\n     * \\note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated\n     *                    from the OS. This may not be lock-free.\n     * */\n    bool push(T const & t)\n    {\n        return do_push<false>(t);\n    }\n\n    /** Pushes object t to the queue.\n     *\n     * \\post object will be pushed to the queue, if internal node can be allocated\n     * \\returns true, if the push operation is successful.\n     *\n     * \\note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail\n     * \\throws if memory allocator throws\n     * */\n    bool bounded_push(T const & t)\n    {\n        return do_push<true>(t);\n    }\n\n\nprivate:\n#ifndef BOOST_DOXYGEN_INVOKED\n    template <bool Bounded>\n    bool do_push(T const & t)\n    {\n        node * n = pool.template construct<true, Bounded>(t, pool.null_handle());\n        handle_type node_handle = pool.get_handle(n);\n\n        if (n == NULL)\n            return false;\n\n        for (;;) {\n            tagged_node_handle tail = tail_.load(memory_order_acquire);\n            node * tail_node = pool.get_pointer(tail);\n            tagged_node_handle next = tail_node->next.load(memory_order_acquire);\n            node * next_ptr = pool.get_pointer(next);\n\n            tagged_node_handle tail2 = tail_.load(memory_order_acquire);\n            if (BOOST_LIKELY(tail == tail2)) {\n                if (next_ptr == 0) {\n                    tagged_node_handle new_tail_next(node_handle, next.get_next_tag());\n                    if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) {\n                        tagged_node_handle new_tail(node_handle, tail.get_next_tag());\n                        tail_.compare_exchange_strong(tail, new_tail);\n                        return true;\n                    }\n                }\n                else {\n                    tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_next_tag());\n                    tail_.compare_exchange_strong(tail, new_tail);\n                }\n            }\n        }\n    }\n#endif\n\npublic:\n\n    /** Pushes object t to the queue.\n     *\n     * \\post object will be pushed to the queue, if internal node can be allocated\n     * \\returns true, if the push operation is successful.\n     *\n     * \\note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated\n     *       from the OS. This may not be lock-free.\n     * \\throws if memory allocator throws\n     * */\n    bool unsynchronized_push(T const & t)\n    {\n        node * n = pool.template construct<false, false>(t, pool.null_handle());\n\n        if (n == NULL)\n            return false;\n\n        for (;;) {\n            tagged_node_handle tail = tail_.load(memory_order_relaxed);\n            tagged_node_handle next = tail->next.load(memory_order_relaxed);\n            node * next_ptr = next.get_ptr();\n\n            if (next_ptr == 0) {\n                tail->next.store(tagged_node_handle(n, next.get_next_tag()), memory_order_relaxed);\n                tail_.store(tagged_node_handle(n, tail.get_next_tag()), memory_order_relaxed);\n                return true;\n            }\n            else\n                tail_.store(tagged_node_handle(next_ptr, tail.get_next_tag()), memory_order_relaxed);\n        }\n    }\n\n    /** Pops object from queue.\n     *\n     * \\post if pop operation is successful, object will be copied to ret.\n     * \\returns true, if the pop operation is successful, false if queue was empty.\n     *\n     * \\note Thread-safe and non-blocking\n     * */\n    bool pop (T & ret)\n    {\n        return pop<T>(ret);\n    }\n\n    /** Pops object from queue.\n     *\n     * \\pre type U must be constructible by T and copyable, or T must be convertible to U\n     * \\post if pop operation is successful, object will be copied to ret.\n     * \\returns true, if the pop operation is successful, false if queue was empty.\n     *\n     * \\note Thread-safe and non-blocking\n     * */\n    template <typename U>\n    bool pop (U & ret)\n    {\n        for (;;) {\n            tagged_node_handle head = head_.load(memory_order_acquire);\n            node * head_ptr = pool.get_pointer(head);\n\n            tagged_node_handle tail = tail_.load(memory_order_acquire);\n            tagged_node_handle next = head_ptr->next.load(memory_order_acquire);\n            node * next_ptr = pool.get_pointer(next);\n\n            tagged_node_handle head2 = head_.load(memory_order_acquire);\n            if (BOOST_LIKELY(head == head2)) {\n                if (pool.get_handle(head) == pool.get_handle(tail)) {\n                    if (next_ptr == 0)\n                        return false;\n\n                    tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());\n                    tail_.compare_exchange_strong(tail, new_tail);\n\n                } else {\n                    if (next_ptr == 0)\n                        /* this check is not part of the original algorithm as published by michael and scott\n                         *\n                         * however we reuse the tagged_ptr part for the freelist and clear the next part during node\n                         * allocation. we can observe a null-pointer here.\n                         * */\n                        continue;\n                    detail::copy_payload(next_ptr->data, ret);\n\n                    tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());\n                    if (head_.compare_exchange_weak(head, new_head)) {\n                        pool.template destruct<true>(head);\n                        return true;\n                    }\n                }\n            }\n        }\n    }\n\n    /** Pops object from queue.\n     *\n     * \\post if pop operation is successful, object will be copied to ret.\n     * \\returns true, if the pop operation is successful, false if queue was empty.\n     *\n     * \\note Not thread-safe, but non-blocking\n     *\n     * */\n    bool unsynchronized_pop (T & ret)\n    {\n        return unsynchronized_pop<T>(ret);\n    }\n\n    /** Pops object from queue.\n     *\n     * \\pre type U must be constructible by T and copyable, or T must be convertible to U\n     * \\post if pop operation is successful, object will be copied to ret.\n     * \\returns true, if the pop operation is successful, false if queue was empty.\n     *\n     * \\note Not thread-safe, but non-blocking\n     *\n     * */\n    template <typename U>\n    bool unsynchronized_pop (U & ret)\n    {\n        for (;;) {\n            tagged_node_handle head = head_.load(memory_order_relaxed);\n            node * head_ptr = pool.get_pointer(head);\n            tagged_node_handle tail = tail_.load(memory_order_relaxed);\n            tagged_node_handle next = head_ptr->next.load(memory_order_relaxed);\n            node * next_ptr = pool.get_pointer(next);\n\n            if (pool.get_handle(head) == pool.get_handle(tail)) {\n                if (next_ptr == 0)\n                    return false;\n\n                tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());\n                tail_.store(new_tail);\n            } else {\n                if (next_ptr == 0)\n                    /* this check is not part of the original algorithm as published by michael and scott\n                     *\n                     * however we reuse the tagged_ptr part for the freelist and clear the next part during node\n                     * allocation. we can observe a null-pointer here.\n                     * */\n                    continue;\n                detail::copy_payload(next_ptr->data, ret);\n                tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());\n                head_.store(new_head);\n                pool.template destruct<false>(head);\n                return true;\n            }\n        }\n    }\n\n    /** consumes one element via a functor\n     *\n     *  pops one element from the queue and applies the functor on this object\n     *\n     * \\returns true, if one element was consumed\n     *\n     * \\note Thread-safe and non-blocking, if functor is thread-safe and non-blocking\n     * */\n    template <typename Functor>\n    bool consume_one(Functor & f)\n    {\n        T element;\n        bool success = pop(element);\n        if (success)\n            f(element);\n\n        return success;\n    }\n\n    /// \\copydoc boost::lockfree::queue::consume_one(Functor & rhs)\n    template <typename Functor>\n    bool consume_one(Functor const & f)\n    {\n        T element;\n        bool success = pop(element);\n        if (success)\n            f(element);\n\n        return success;\n    }\n\n    /** consumes all elements via a functor\n     *\n     * sequentially pops all elements from the queue and applies the functor on each object\n     *\n     * \\returns number of elements that are consumed\n     *\n     * \\note Thread-safe and non-blocking, if functor is thread-safe and non-blocking\n     * */\n    template <typename Functor>\n    size_t consume_all(Functor & f)\n    {\n        size_t element_count = 0;\n        while (consume_one(f))\n            element_count += 1;\n\n        return element_count;\n    }\n\n    /// \\copydoc boost::lockfree::queue::consume_all(Functor & rhs)\n    template <typename Functor>\n    size_t consume_all(Functor const & f)\n    {\n        size_t element_count = 0;\n        while (consume_one(f))\n            element_count += 1;\n\n        return element_count;\n    }\n\nprivate:\n#ifndef BOOST_DOXYGEN_INVOKED\n    atomic<tagged_node_handle> head_;\n    static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);\n    char padding1[padding_size];\n    atomic<tagged_node_handle> tail_;\n    char padding2[padding_size];\n\n    pool_t pool;\n#endif\n};\n\n} /* namespace lockfree */\n} /* namespace boost */\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n\n#endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/memory_order.hpp",
    "content": "#ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED\n#define BOOST_MEMORY_ORDER_HPP_INCLUDED\n\n// MS compatible compilers support #pragma once\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1020)\n# pragma once\n#endif\n\n//  boost/memory_order.hpp\n//\n//  Defines enum boost::memory_order per the C++0x working draft\n//\n//  Copyright (c) 2008, 2009 Peter Dimov\n//\n//  Distributed under the Boost Software License, Version 1.0.\n//  See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n\nnamespace boost\n{\n\n//\n// Enum values are chosen so that code that needs to insert\n// a trailing fence for acquire semantics can use a single\n// test such as:\n//\n// if( mo & memory_order_acquire ) { ...fence... }\n//\n// For leading fences one can use:\n//\n// if( mo & memory_order_release ) { ...fence... }\n//\n// Architectures such as Alpha that need a fence on consume\n// can use:\n//\n// if( mo & ( memory_order_acquire | memory_order_consume ) ) { ...fence... }\n//\n// The values are also in the order of increasing \"strength\"\n// of the fences so that success/failure orders can be checked\n// efficiently in compare_exchange methods.\n//\n\nenum memory_order\n{\n    memory_order_relaxed = 0,\n    memory_order_consume = 1,\n    memory_order_acquire = 2,\n    memory_order_release = 4,\n    memory_order_acq_rel = 6, // acquire | release\n    memory_order_seq_cst = 14 // acq_rel | 8\n};\n\n} // namespace boost\n\n#endif // #ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/O1_size.hpp",
    "content": "\n#ifndef BOOST_MPL_O1_SIZE_HPP_INCLUDED\n#define BOOST_MPL_O1_SIZE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/O1_size_fwd.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/O1_size_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\n// returns sequence size if it's an O(1) operation; otherwise returns -1\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct O1_size\n    : O1_size_impl< typename sequence_tag<Sequence>::type >\n        ::template apply< Sequence >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1, O1_size, (Sequence))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, O1_size)\n\n}}\n\n#endif // BOOST_MPL_O1_SIZE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/O1_size_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_O1_SIZE_FWD_HPP_INCLUDED\n#define BOOST_MPL_O1_SIZE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct O1_size_impl;\ntemplate< typename Sequence > struct O1_size;\n\n}}\n\n#endif // BOOST_MPL_O1_SIZE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/always.hpp",
    "content": "\n#ifndef BOOST_MPL_ALWAYS_HPP_INCLUDED\n#define BOOST_MPL_ALWAYS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/arity_spec.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Value > struct always\n{\n    template<\n        BOOST_MPL_PP_DEFAULT_PARAMS(BOOST_MPL_LIMIT_METAFUNCTION_ARITY, typename T, na)\n        >\n    struct apply\n    {\n        typedef Value type;\n    };\n};\n\nBOOST_MPL_AUX_ARITY_SPEC(0, always)\n\n}}\n\n#endif // BOOST_MPL_ALWAYS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/and.hpp",
    "content": "\n#ifndef BOOST_MPL_AND_HPP_INCLUDED\n#define BOOST_MPL_AND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/nested_type_wknd.hpp>\n#   include <boost/mpl/aux_/na_spec.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n\n// agurt, 19/may/04: workaround a conflict with <iso646.h> header's \n// 'or' and 'and' macros, see http://tinyurl.com/3et69; 'defined(and)'\n// has to be checked in a separate condition, otherwise GCC complains \n// about 'and' being an alternative token\n#if defined(_MSC_VER) && !defined(__clang__)\n#ifndef __GCCXML__\n#if defined(and) \n#   pragma push_macro(\"and\")\n#   undef and\n#   define and(x)\n#endif\n#endif\n#endif\n\n#   define BOOST_MPL_PREPROCESSED_HEADER and.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#if defined(_MSC_VER) && !defined(__clang__)\n#ifndef __GCCXML__\n#if defined(and) \n#   pragma pop_macro(\"and\")\n#endif\n#endif\n#endif\n\n#else\n\n#   define AUX778076_OP_NAME and_\n#   define AUX778076_OP_VALUE1 false\n#   define AUX778076_OP_VALUE2 true\n#   include <boost/mpl/aux_/logical_op.hpp>\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/apply.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_APPLY_HPP_INCLUDED\n#define BOOST_MPL_APPLY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/apply_fwd.hpp>\n#   include <boost/mpl/apply_wrap.hpp>\n#   include <boost/mpl/placeholders.hpp>\n#   include <boost/mpl/lambda.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER apply.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/partial_spec_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/config/lambda.hpp>\n#   include <boost/mpl/aux_/config/dtp.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n#   include <boost/mpl/aux_/config/eti.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n// local macros, #undef-ined at the end of the header\n#   define AUX778076_APPLY_PARAMS(param) \\\n    BOOST_MPL_PP_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        ) \\\n    /**/\n\n#   define AUX778076_APPLY_DEF_PARAMS(param, value) \\\n    BOOST_MPL_PP_DEFAULT_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        , value \\\n        ) \\\n    /**/\n\n#   define AUX778076_APPLY_N_PARAMS(n, param) \\\n    BOOST_MPL_PP_PARAMS(n, param) \\\n    /**/\n\n#   define AUX778076_APPLY_N_COMMA_PARAMS(n, param) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_MPL_PP_PARAMS(n, param) \\\n    /**/\n\n#   define AUX778076_APPLY_N_PARTIAL_SPEC_PARAMS(n, param, def) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_MPL_PP_PARTIAL_SPEC_PARAMS(n, param, def) \\\n    /**/\n    \n#   define AUX778076_APPLY_N_SPEC_PARAMS(n, param) \\\n    BOOST_MPL_PP_ENUM(BOOST_PP_INC(n), param) \\\n    /**/\n\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/apply.hpp>))\n#include BOOST_PP_ITERATE()\n\n#   if !defined(BOOST_MPL_CFG_NO_APPLY_TEMPLATE)\n// real C++ version is already taken care of\n#   if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\nnamespace aux {\n// apply_count_args\n#define AUX778076_COUNT_ARGS_PREFIX apply\n#define AUX778076_COUNT_ARGS_DEFAULT na\n#define AUX778076_COUNT_ARGS_ARITY BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n#include <boost/mpl/aux_/count_args.hpp>\n}\n\n\ntemplate<\n      typename F, AUX778076_APPLY_DEF_PARAMS(typename T, na)\n    >\nstruct apply\n    : aux::apply_chooser< \n          aux::apply_count_args< AUX778076_APPLY_PARAMS(T) >::value\n        >::template result_< F, AUX778076_APPLY_PARAMS(T) >::type\n{\n};\n\n#   endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n#   endif // BOOST_MPL_CFG_NO_APPLY_TEMPLATE\n\n#   undef AUX778076_APPLY_N_SPEC_PARAMS\n#   undef AUX778076_APPLY_N_PARTIAL_SPEC_PARAMS\n#   undef AUX778076_APPLY_N_COMMA_PARAMS\n#   undef AUX778076_APPLY_N_PARAMS\n#   undef AUX778076_APPLY_DEF_PARAMS\n#   undef AUX778076_APPLY_PARAMS\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_APPLY_HPP_INCLUDED\n\n///// iteration, depth == 1\n\n// For gcc 4.4 compatability, we must include the\n// BOOST_PP_ITERATION_DEPTH test inside an #else clause.\n#else // BOOST_PP_IS_ITERATING\n#if BOOST_PP_ITERATION_DEPTH() == 1\n\n#   define i_ BOOST_PP_FRAME_ITERATION(1)\n\ntemplate<\n      typename F AUX778076_APPLY_N_COMMA_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply,i_)\n#if !BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    : BOOST_PP_CAT(apply_wrap,i_)< \n          typename lambda<F>::type\n        AUX778076_APPLY_N_COMMA_PARAMS(i_, T)\n        >\n{\n#else\n{\n    typedef typename BOOST_PP_CAT(apply_wrap,i_)< \n          typename lambda<F>::type\n        AUX778076_APPLY_N_COMMA_PARAMS(i_, T)\n        >::type type;\n#endif\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          BOOST_PP_INC(i_)\n        , BOOST_PP_CAT(apply,i_)\n        , (F AUX778076_APPLY_N_COMMA_PARAMS(i_,T))\n        )\n};\n\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\n/// workaround for ETI bug\ntemplate<>\nstruct BOOST_PP_CAT(apply,i_)<AUX778076_APPLY_N_SPEC_PARAMS(i_, int)>\n{\n    typedef int type;\n};\n#endif\n\n#   if !defined(BOOST_MPL_CFG_NO_APPLY_TEMPLATE)\n#   if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\n#if i_ == BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n/// primary template (not a specialization!)\ntemplate<\n      typename F AUX778076_APPLY_N_COMMA_PARAMS(i_, typename T)\n    >\nstruct apply\n    : BOOST_PP_CAT(apply,i_)< F AUX778076_APPLY_N_COMMA_PARAMS(i_, T) >\n{\n};\n#else\ntemplate<\n      typename F AUX778076_APPLY_N_COMMA_PARAMS(i_, typename T)\n    >\nstruct apply< F AUX778076_APPLY_N_PARTIAL_SPEC_PARAMS(i_, T, na) >\n    : BOOST_PP_CAT(apply,i_)< F AUX778076_APPLY_N_COMMA_PARAMS(i_, T) >\n{\n};\n#endif\n\n#   else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#if !defined(BOOST_MPL_CFG_NO_APPLY_TEMPLATE)\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<i_>\n{\n    template<\n          typename F, AUX778076_APPLY_PARAMS(typename T)\n        >\n    struct result_\n    {\n        typedef BOOST_PP_CAT(apply,i_)<\n              F AUX778076_APPLY_N_COMMA_PARAMS(i_, T)\n            > type;\n    };\n};\n\n} // namespace aux\n#endif\n\n#   endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n#   endif // BOOST_MPL_CFG_NO_APPLY_TEMPLATE\n\n#   undef i_\n\n#endif // BOOST_PP_ITERATION_DEPTH()\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/apply_fwd.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_APPLY_FWD_HPP_INCLUDED\n#define BOOST_MPL_APPLY_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/aux_/na.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER apply_fwd.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n// agurt, 15/jan/02: top-level 'apply' template gives an ICE on MSVC\n// (for known reasons)\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n#   define BOOST_MPL_CFG_NO_APPLY_TEMPLATE\n#endif\n\nnamespace boost { namespace mpl {\n\n// local macro, #undef-ined at the end of the header\n#   define AUX778076_APPLY_DEF_PARAMS(param, value) \\\n    BOOST_MPL_PP_DEFAULT_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        , value \\\n        ) \\\n    /**/\n\n#   define AUX778076_APPLY_N_COMMA_PARAMS(n, param) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_MPL_PP_PARAMS(n, param) \\\n    /**/\n\n#   if !defined(BOOST_MPL_CFG_NO_APPLY_TEMPLATE)\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n// forward declaration\ntemplate<\n      typename F, AUX778076_APPLY_DEF_PARAMS(typename T, na)\n    >\nstruct apply;\n#else\nnamespace aux {\ntemplate< BOOST_AUX_NTTP_DECL(int, arity_) > struct apply_chooser;\n}\n#endif\n\n#   endif // BOOST_MPL_CFG_NO_APPLY_TEMPLATE\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/apply_fwd.hpp>))\n#include BOOST_PP_ITERATE()\n\n\n#   undef AUX778076_APPLY_N_COMMA_PARAMS\n#   undef AUX778076_APPLY_DEF_PARAMS\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_APPLY_FWD_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\ntemplate<\n      typename F AUX778076_APPLY_N_COMMA_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply,i_);\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/apply_wrap.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_APPLY_WRAP_HPP_INCLUDED\n#define BOOST_MPL_APPLY_WRAP_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/aux_/arity.hpp>\n#   include <boost/mpl/aux_/has_apply.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/msvc_never_true.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER apply_wrap.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/preprocessor/add.hpp>\n#   include <boost/mpl/aux_/config/bcc.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/config/dtp.hpp>\n#   include <boost/mpl/aux_/config/eti.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/logical/and.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n\n\nnamespace boost { namespace mpl {\n\n// local macros, #undef-ined at the end of the header\n#   define AUX778076_APPLY_WRAP_PARAMS(n, param) \\\n    BOOST_MPL_PP_PARAMS(n, param) \\\n    /**/\n\n#   define AUX778076_APPLY_WRAP_SPEC_PARAMS(n, param) \\\n    BOOST_MPL_PP_ENUM(BOOST_PP_INC(n), param) \\\n    /**/\n\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/apply_wrap.hpp>))\n#include BOOST_PP_ITERATE()\n\n\n#   undef AUX778076_APPLY_WRAP_SPEC_PARAMS\n#   undef AUX778076_APPLY_WRAP_PARAMS\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_APPLY_WRAP_HPP_INCLUDED\n\n///// iteration, depth == 1\n\n// For gcc 4.4 compatability, we must include the\n// BOOST_PP_ITERATION_DEPTH test inside an #else clause.\n#else // BOOST_PP_IS_ITERATING\n#if BOOST_PP_ITERATION_DEPTH() == 1\n\n#   define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#   if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n// MSVC version\n\n#define AUX778076_MSVC_DTW_NAME BOOST_PP_CAT(msvc_apply,i_)\n#define AUX778076_MSVC_DTW_ORIGINAL_NAME apply\n#define AUX778076_MSVC_DTW_ARITY i_\n#include <boost/mpl/aux_/msvc_dtw.hpp>\n\ntemplate<\n      typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply_wrap,i_)\n{\n    // Metafunction forwarding confuses vc6\n    typedef typename BOOST_PP_CAT(msvc_apply,i_)<F>::template result_<\n          AUX778076_APPLY_WRAP_PARAMS(i_, T)\n        >::type type;\n};\n\n#   elif defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n// MWCW/Borland version\n\ntemplate<\n      int N, typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply_wrap_impl,i_);\n\n#define BOOST_PP_ITERATION_PARAMS_2 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY - i_, <boost/mpl/apply_wrap.hpp>))\n#include BOOST_PP_ITERATE()\n\ntemplate<\n      typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply_wrap,i_)\n    : BOOST_PP_CAT(apply_wrap_impl,i_)<\n          ::boost::mpl::aux::arity<F,i_>::value\n        , F\n        BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, T)\n        >::type\n{\n};\n\n#   else\n// ISO98 C++, with minor concession to vc7\n\ntemplate<\n      typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n#if i_ == 0\n    , typename has_apply_ = typename aux::has_apply<F>::type\n#endif\n    >\nstruct BOOST_PP_CAT(apply_wrap,i_)\n// metafunction forwarding confuses MSVC 7.0\n#if !BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n    : F::template apply< AUX778076_APPLY_WRAP_PARAMS(i_, T) >\n{\n#else\n{    \n    typedef typename F::template apply<\n         AUX778076_APPLY_WRAP_PARAMS(i_, T)\n        >::type type;\n#endif\n};\n\n#if i_ == 0 && !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\ntemplate< typename F >\nstruct BOOST_PP_CAT(apply_wrap,i_)<F,true_>\n    : F::apply\n{\n};\n#endif\n\n#   endif // workarounds\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\n/// workaround for ETI bug\ntemplate<>\nstruct BOOST_PP_CAT(apply_wrap,i_)<AUX778076_APPLY_WRAP_SPEC_PARAMS(i_, int)>\n{\n    typedef int type;\n};\n#endif\n\n#   undef i_\n\n///// iteration, depth == 2\n\n#elif BOOST_PP_ITERATION_DEPTH() == 2\n\n#   define j_ BOOST_PP_FRAME_ITERATION(2)\n\n#if i_ == 0 && j_ == 0 \\\n    && defined(BOOST_MPL_CFG_BCC590_WORKAROUNDS) \\\n    && !defined(BOOST_MPL_CFG_NO_HAS_APPLY)\n\ntemplate< typename F, bool F_has_apply >\nstruct apply_wrap_impl0_bcb {\n    typedef typename F::template apply< na > type;\n};\n\ntemplate< typename F >\nstruct apply_wrap_impl0_bcb< F, true > {\n    typedef typename F::apply type;\n};\n\ntemplate<\n      typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply_wrap_impl,i_)<\n          BOOST_MPL_PP_ADD(i_, j_)\n        , F\n        BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, T)\n        >\n{\n    typedef apply_wrap_impl0_bcb< F, aux::has_apply< F >::value >::type type;\n};\n#else\n\ntemplate<\n      typename F BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(apply_wrap_impl,i_)<\n          BOOST_MPL_PP_ADD(i_, j_)\n        , F\n        BOOST_PP_COMMA_IF(i_) AUX778076_APPLY_WRAP_PARAMS(i_, T)\n        >\n{\n    typedef typename F::template apply<\n          AUX778076_APPLY_WRAP_PARAMS(i_, T)\n#if i_ == 0 && j_ == 0\n/// since the defaults are \"lost\", we have to pass *something* even for nullary\n/// metafunction classes\n        na\n#else\n        BOOST_PP_COMMA_IF(BOOST_PP_AND(i_, j_)) BOOST_MPL_PP_ENUM(j_, na)\n#endif\n        > type;\n};\n\n#endif\n\n#   undef j_\n\n#endif // BOOST_PP_ITERATION_DEPTH()\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/arg.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_ARG_HPP_INCLUDED\n#define BOOST_MPL_ARG_HPP_INCLUDED\n\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/arg_fwd.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/na_assert.hpp>\n#   include <boost/mpl/aux_/arity_spec.hpp>\n#   include <boost/mpl/aux_/arg_typedef.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER arg.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/config/lambda.hpp>\n#   include <boost/mpl/aux_/config/dtp.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\n// local macro, #undef-ined at the end of the header\n#if !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#   define AUX778076_ARG_N_DEFAULT_PARAMS(param,value) \\\n    BOOST_MPL_PP_DEFAULT_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        , value \\\n        ) \\\n    /**/\n#else\n#   define AUX778076_ARG_N_DEFAULT_PARAMS(param,value) \\\n    BOOST_MPL_PP_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        ) \\\n    /**/\n#endif\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/arg.hpp>))\n#include BOOST_PP_ITERATE()\n\n\n#   undef AUX778076_ARG_N_DEFAULT_PARAMS\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int,arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_ARG_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#if i_ > 0\n\ntemplate<> struct arg<i_>\n{\n    BOOST_STATIC_CONSTANT(int, value = i_);\n    typedef arg<BOOST_PP_INC(i_)> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          AUX778076_ARG_N_DEFAULT_PARAMS(typename U, na)\n        >\n    struct apply\n    {\n        typedef BOOST_PP_CAT(U,i_) type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\n#else\n\ntemplate<> struct arg<-1>\n{\n    BOOST_STATIC_CONSTANT(int, value = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          AUX778076_ARG_N_DEFAULT_PARAMS(typename U, na)\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\n#endif // i_ > 0\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/arg_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_ARG_FWD_HPP_INCLUDED\n#define BOOST_MPL_ARG_FWD_HPP_INCLUDED\n\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > struct arg;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(arg)\n\n#endif // BOOST_MPL_ARG_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/assert.hpp",
    "content": "\n#ifndef BOOST_MPL_ASSERT_HPP_INCLUDED\n#define BOOST_MPL_ASSERT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/not.hpp>\n#include <boost/mpl/aux_/value_wknd.hpp>\n#include <boost/mpl/aux_/nested_type_wknd.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/adl_barrier.hpp>\n\n#include <boost/mpl/aux_/config/nttp.hpp>\n#include <boost/mpl/aux_/config/dtp.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/gpu.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/pp_counter.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/preprocessor/cat.hpp>\n\n#include <boost/config.hpp> // make sure 'size_t' is placed into 'std'\n#include <cstddef>\n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1700)\n#include <boost/mpl/if.hpp>\n#endif\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n    || (BOOST_MPL_CFG_GCC != 0) \\\n    || BOOST_WORKAROUND(__IBMCPP__, <= 600)\n#   define BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES\n#endif\n\n#if BOOST_WORKAROUND(__MWERKS__, < 0x3202) \\\n    || BOOST_WORKAROUND(__EDG_VERSION__, <= 238) \\\n    || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n    || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n#   define BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER\n#endif\n\n// agurt, 10/nov/06: use enums for Borland (which cannot cope with static constants) \n// and GCC (which issues \"unused variable\" warnings when static constants are used \n// at a function scope)\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n    || (BOOST_MPL_CFG_GCC != 0) || (BOOST_MPL_CFG_GPU != 0)\n#   define BOOST_MPL_AUX_ASSERT_CONSTANT(T, expr) enum { expr }\n#else\n#   define BOOST_MPL_AUX_ASSERT_CONSTANT(T, expr) BOOST_STATIC_CONSTANT(T, expr)\n#endif\n\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\nstruct failed {};\n\n// agurt, 24/aug/04: MSVC 7.1 workaround here and below: return/accept \n// 'assert<false>' by reference; can't apply it unconditionally -- apparently it\n// degrades the quality of GCC diagnostics\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1310)\n#   define AUX778076_ASSERT_ARG(x) x&\n#else\n#   define AUX778076_ASSERT_ARG(x) x\n#endif\n\ntemplate< bool C >  struct assert        { typedef void* type; };\ntemplate<>          struct assert<false> { typedef AUX778076_ASSERT_ARG(assert) type; };\n\ntemplate< bool C >\nint assertion_failed( typename assert<C>::type );\n\ntemplate< bool C >\nstruct assertion\n{\n    static int failed( assert<false> );\n};\n\ntemplate<>\nstruct assertion<true>\n{\n    static int failed( void* );\n};\n\nstruct assert_\n{\n#if !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n    template< typename T1, typename T2 = na, typename T3 = na, typename T4 = na > struct types {};\n#endif\n    static assert_ const arg;\n    enum relations { equal = 1, not_equal, greater, greater_equal, less, less_equal };\n};\n\n\n#if !defined(BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES)\n\nbool operator==( failed, failed );\nbool operator!=( failed, failed );\nbool operator>( failed, failed );\nbool operator>=( failed, failed );\nbool operator<( failed, failed );\nbool operator<=( failed, failed );\n\n#if defined(__EDG_VERSION__)\ntemplate< bool (*)(failed, failed), long x, long y > struct assert_relation {};\n#   define BOOST_MPL_AUX_ASSERT_RELATION(x, y, r) assert_relation<r,x,y>\n#else\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, x), BOOST_MPL_AUX_NTTP_DECL(long, y), bool (*)(failed, failed) > \nstruct assert_relation {};\n#   define BOOST_MPL_AUX_ASSERT_RELATION(x, y, r) assert_relation<x,y,r>\n#endif\n\n#else // BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES\n\nboost::mpl::aux::weighted_tag<1>::type operator==( assert_, assert_ );\nboost::mpl::aux::weighted_tag<2>::type operator!=( assert_, assert_ );\nboost::mpl::aux::weighted_tag<3>::type operator>(  assert_, assert_ );\nboost::mpl::aux::weighted_tag<4>::type operator>=( assert_, assert_ );\nboost::mpl::aux::weighted_tag<5>::type operator<( assert_, assert_ );\nboost::mpl::aux::weighted_tag<6>::type operator<=( assert_, assert_ );\n\ntemplate< assert_::relations r, long x, long y > struct assert_relation {};\n\n#endif \n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1700)\n\ntemplate<class Pred>\nstruct extract_assert_pred;\n\ntemplate<class Pred>\nstruct extract_assert_pred<void(Pred)> { typedef Pred type; };\n\ntemplate<class Pred>\nstruct eval_assert {\n    typedef typename extract_assert_pred<Pred>::type P;\n    typedef typename P::type p_type;\n    typedef typename ::boost::mpl::if_c<p_type::value,\n        AUX778076_ASSERT_ARG(assert<false>),\n        failed ************ P::************\n    >::type type;\n};\n\ntemplate<class Pred>\nstruct eval_assert_not {\n    typedef typename extract_assert_pred<Pred>::type P;\n    typedef typename P::type p_type;\n    typedef typename ::boost::mpl::if_c<!p_type::value,\n        AUX778076_ASSERT_ARG(assert<false>),\n        failed ************ ::boost::mpl::not_<P>::************\n    >::type type;\n};\n\ntemplate< typename T >\nT make_assert_arg();\n\n#elif !defined(BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER)\n\ntemplate< bool > struct assert_arg_pred_impl { typedef int type; };\ntemplate<> struct assert_arg_pred_impl<true> { typedef void* type; };\n\ntemplate< typename P > struct assert_arg_pred\n{\n    typedef typename P::type p_type;\n    typedef typename assert_arg_pred_impl< p_type::value >::type type;\n};\n\ntemplate< typename P > struct assert_arg_pred_not\n{\n    typedef typename P::type p_type;\n    BOOST_MPL_AUX_ASSERT_CONSTANT( bool, p = !p_type::value );\n    typedef typename assert_arg_pred_impl<p>::type type;\n};\n\ntemplate< typename Pred >\nfailed ************ (Pred::************ \n      assert_arg( void (*)(Pred), typename assert_arg_pred<Pred>::type )\n    );\n\ntemplate< typename Pred >\nfailed ************ (boost::mpl::not_<Pred>::************ \n      assert_not_arg( void (*)(Pred), typename assert_arg_pred_not<Pred>::type )\n    );\n\ntemplate< typename Pred >\nAUX778076_ASSERT_ARG(assert<false>)\nassert_arg( void (*)(Pred), typename assert_arg_pred_not<Pred>::type );\n\ntemplate< typename Pred >\nAUX778076_ASSERT_ARG(assert<false>)\nassert_not_arg( void (*)(Pred), typename assert_arg_pred<Pred>::type );\n\n\n#else // BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER\n        \ntemplate< bool c, typename Pred > struct assert_arg_type_impl\n{\n    typedef failed      ************ Pred::* mwcw83_wknd;\n    typedef mwcw83_wknd ************* type;\n};\n\ntemplate< typename Pred > struct assert_arg_type_impl<true,Pred>\n{\n    typedef AUX778076_ASSERT_ARG(assert<false>) type;\n};\n\ntemplate< typename Pred > struct assert_arg_type\n    : assert_arg_type_impl< BOOST_MPL_AUX_VALUE_WKND(BOOST_MPL_AUX_NESTED_TYPE_WKND(Pred))::value, Pred >\n{\n};\n\ntemplate< typename Pred >\ntypename assert_arg_type<Pred>::type \nassert_arg(void (*)(Pred), int);\n\ntemplate< typename Pred >\ntypename assert_arg_type< boost::mpl::not_<Pred> >::type \nassert_not_arg(void (*)(Pred), int);\n\n#   if !defined(BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES)\ntemplate< long x, long y, bool (*r)(failed, failed) >\ntypename assert_arg_type_impl< false,BOOST_MPL_AUX_ASSERT_RELATION(x,y,r) >::type\nassert_rel_arg( BOOST_MPL_AUX_ASSERT_RELATION(x,y,r) );\n#   else\ntemplate< assert_::relations r, long x, long y >\ntypename assert_arg_type_impl< false,assert_relation<r,x,y> >::type\nassert_rel_arg( assert_relation<r,x,y> );\n#   endif\n\n#endif // BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER\n\n#undef AUX778076_ASSERT_ARG\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1700)\n\n// BOOST_MPL_ASSERT((pred<x,...>))\n\n#define BOOST_MPL_ASSERT(pred) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n          boost::mpl::assertion_failed<false>( \\\n              boost::mpl::make_assert_arg< \\\n                  typename boost::mpl::eval_assert<void pred>::type \\\n                >() \\\n            ) \\\n        ) \\\n    ) \\\n/**/\n\n// BOOST_MPL_ASSERT_NOT((pred<x,...>))\n\n#define BOOST_MPL_ASSERT_NOT(pred) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n          boost::mpl::assertion_failed<false>( \\\n              boost::mpl::make_assert_arg< \\\n                  typename boost::mpl::eval_assert_not<void pred>::type \\\n                >() \\\n            ) \\\n        ) \\\n    ) \\\n/**/\n\n#else\n\n// BOOST_MPL_ASSERT((pred<x,...>))\n\n#define BOOST_MPL_ASSERT(pred) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n          boost::mpl::assertion_failed<false>( \\\n              boost::mpl::assert_arg( (void (*) pred)0, 1 ) \\\n            ) \\\n        ) \\\n    ) \\\n/**/\n\n// BOOST_MPL_ASSERT_NOT((pred<x,...>))\n\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n#   define BOOST_MPL_ASSERT_NOT(pred) \\\nenum { \\\n      BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n          boost::mpl::assertion<false>::failed( \\\n              boost::mpl::assert_not_arg( (void (*) pred)0, 1 ) \\\n            ) \\\n        ) \\\n}\\\n/**/\n#else\n#   define BOOST_MPL_ASSERT_NOT(pred) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n          boost::mpl::assertion_failed<false>( \\\n              boost::mpl::assert_not_arg( (void (*) pred)0, 1 ) \\\n            ) \\\n        ) \\\n   ) \\\n/**/\n#endif\n\n#endif\n\n// BOOST_MPL_ASSERT_RELATION(x, ==|!=|<=|<|>=|>, y)\n\n#if defined(BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES)\n\n#   if !defined(BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER)\n// agurt, 9/nov/06: 'enum' below is a workaround for gcc 4.0.4/4.1.1 bugs #29522 and #29518\n#   define BOOST_MPL_ASSERT_RELATION_IMPL(counter, x, rel, y)      \\\nenum { BOOST_PP_CAT(mpl_assert_rel_value,counter) = (x rel y) }; \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,counter) = sizeof( \\\n        boost::mpl::assertion_failed<BOOST_PP_CAT(mpl_assert_rel_value,counter)>( \\\n            (boost::mpl::failed ************ ( boost::mpl::assert_relation< \\\n                  boost::mpl::assert_::relations( sizeof( \\\n                      boost::mpl::assert_::arg rel boost::mpl::assert_::arg \\\n                    ) ) \\\n                , x \\\n                , y \\\n                >::************)) 0 ) \\\n        ) \\\n    ) \\\n/**/\n#   else\n#   define BOOST_MPL_ASSERT_RELATION_IMPL(counter, x, rel, y)    \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assert_rel,counter) = sizeof( \\\n          boost::mpl::assert_::arg rel boost::mpl::assert_::arg \\\n        ) \\\n    ); \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( bool, BOOST_PP_CAT(mpl_assert_rel_value,counter) = (x rel y) ); \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,counter) = sizeof( \\\n        boost::mpl::assertion_failed<BOOST_PP_CAT(mpl_assert_rel_value,counter)>( \\\n              boost::mpl::assert_rel_arg( boost::mpl::assert_relation< \\\n                  boost::mpl::assert_::relations(BOOST_PP_CAT(mpl_assert_rel,counter)) \\\n                , x \\\n                , y \\\n                >() ) \\\n            ) \\\n        ) \\\n    ) \\\n/**/\n#   endif\n\n#   define BOOST_MPL_ASSERT_RELATION(x, rel, y) \\\nBOOST_MPL_ASSERT_RELATION_IMPL(BOOST_MPL_AUX_PP_COUNTER(), x, rel, y) \\\n/**/\n\n#else // !BOOST_MPL_CFG_ASSERT_USE_RELATION_NAMES\n\n#   if defined(BOOST_MPL_CFG_ASSERT_BROKEN_POINTER_TO_POINTER_TO_MEMBER)\n#   define BOOST_MPL_ASSERT_RELATION(x, rel, y) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n        boost::mpl::assertion_failed<(x rel y)>( boost::mpl::assert_rel_arg( \\\n              boost::mpl::BOOST_MPL_AUX_ASSERT_RELATION(x,y,(&boost::mpl::operator rel))() \\\n            ) ) \\\n        ) \\\n    ) \\\n/**/\n#   else\n#   define BOOST_MPL_ASSERT_RELATION(x, rel, y) \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,BOOST_MPL_AUX_PP_COUNTER()) = sizeof( \\\n        boost::mpl::assertion_failed<(x rel y)>( (boost::mpl::failed ************ ( \\\n            boost::mpl::BOOST_MPL_AUX_ASSERT_RELATION(x,y,(&boost::mpl::operator rel))::************))0 ) \\\n        ) \\\n    ) \\\n/**/\n#   endif\n\n#endif\n\n\n// BOOST_MPL_ASSERT_MSG( (pred<x,...>::value), USER_PROVIDED_MESSAGE, (types<x,...>) ) \n\n#if BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3202))\n#   define BOOST_MPL_ASSERT_MSG_IMPL( counter, c, msg, types_ ) \\\nstruct msg; \\\ntypedef struct BOOST_PP_CAT(msg,counter) : boost::mpl::assert_ \\\n{ \\\n    using boost::mpl::assert_::types; \\\n    static boost::mpl::failed ************ (msg::************ assert_arg()) types_ \\\n    { return 0; } \\\n} BOOST_PP_CAT(mpl_assert_arg,counter); \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,counter) = sizeof( \\\n        boost::mpl::assertion<(c)>::failed( BOOST_PP_CAT(mpl_assert_arg,counter)::assert_arg() ) \\\n        ) \\\n    ) \\\n/**/\n#else\n#   define BOOST_MPL_ASSERT_MSG_IMPL( counter, c, msg, types_ )  \\\nstruct msg; \\\ntypedef struct BOOST_PP_CAT(msg,counter) : boost::mpl::assert_ \\\n{ \\\n    static boost::mpl::failed ************ (msg::************ assert_arg()) types_ \\\n    { return 0; } \\\n} BOOST_PP_CAT(mpl_assert_arg,counter); \\\nBOOST_MPL_AUX_ASSERT_CONSTANT( \\\n      std::size_t \\\n    , BOOST_PP_CAT(mpl_assertion_in_line_,counter) = sizeof( \\\n        boost::mpl::assertion_failed<(c)>( BOOST_PP_CAT(mpl_assert_arg,counter)::assert_arg() ) \\\n        ) \\\n    ) \\\n/**/\n#endif\n\n#define BOOST_MPL_ASSERT_MSG( c, msg, types_ ) \\\nBOOST_MPL_ASSERT_MSG_IMPL( BOOST_MPL_AUX_PP_COUNTER(), c, msg, types_ ) \\\n/**/\n\n#endif // BOOST_MPL_ASSERT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/at_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_AT_FWD_HPP_INCLUDED\n#define BOOST_MPL_AT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct at_impl;\ntemplate< typename Sequence, typename N > struct at;\n\n}}\n\n#endif // BOOST_MPL_AT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/O1_size_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_O1_SIZE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_O1_SIZE_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/O1_size_fwd.hpp>\n#include <boost/mpl/long.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/aux_/has_size.hpp>\n#include <boost/mpl/aux_/config/forwarding.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl {\n\n// default implementation - returns 'Sequence::size' if sequence has a 'size'\n// member, and -1 otherwise; conrete sequences might override it by \n// specializing either the 'O1_size_impl' or the primary 'O1_size' template\n\n#   if !BOOST_WORKAROUND(BOOST_MSVC, < 1300) \\\n    && !BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3003))\n\nnamespace aux {\ntemplate< typename Sequence > struct O1_size_impl\n    : Sequence::size\n{\n};\n}\n\ntemplate< typename Tag >\nstruct O1_size_impl\n{\n    template< typename Sequence > struct apply\n#if !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING)\n        : if_<\n              aux::has_size<Sequence>\n            , aux::O1_size_impl<Sequence>\n            , long_<-1>\n            >::type\n    {\n#else\n    {\n        typedef typename if_<\n              aux::has_size<Sequence>\n            , aux::O1_size_impl<Sequence>\n            , long_<-1>\n            >::type type;\n\n        BOOST_STATIC_CONSTANT(long, value =\n              (if_<\n                  aux::has_size<Sequence>\n                , aux::O1_size_impl<Sequence>\n                , long_<-1>\n                >::type::value)\n            );\n#endif\n    };\n};\n\n#   else // BOOST_MSVC\n\ntemplate< typename Tag >\nstruct O1_size_impl\n{\n    template< typename Sequence > struct apply\n        : long_<-1>\n        {\n        };\n};\n\n#   endif\n\n}}\n\n#endif // BOOST_MPL_O1_SIZE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/adl_barrier.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_ADL_BARRIER_HPP_INCLUDED\n#define BOOST_MPL_AUX_ADL_BARRIER_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/adl.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_ADL_BARRIER_NAMESPACE)\n\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE mpl_\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN namespace mpl_ {\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE }\n#   define BOOST_MPL_AUX_ADL_BARRIER_DECL(type) \\\n    namespace boost { namespace mpl { \\\n    using ::BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::type; \\\n    } } \\\n/**/\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\nnamespace BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE { namespace aux {} }\nnamespace boost { namespace mpl { using namespace BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE; \nnamespace aux { using namespace BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::aux; }\n}}\n#endif\n\n#else // BOOST_MPL_CFG_NO_ADL_BARRIER_NAMESPACE\n\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE boost::mpl\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN namespace boost { namespace mpl {\n#   define BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE }}\n#   define BOOST_MPL_AUX_ADL_BARRIER_DECL(type) /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_ADL_BARRIER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/arg_typedef.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_ARG_TYPEDEF_HPP_INCLUDED\n#define BOOST_MPL_AUX_ARG_TYPEDEF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/lambda.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT) \\\n    || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n    \n#   define BOOST_MPL_AUX_ARG_TYPEDEF(T, name) typedef T name;\n\n#else\n\n#   define BOOST_MPL_AUX_ARG_TYPEDEF(T, name) /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_ARG_TYPEDEF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/arity.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_ARITY_HPP_INCLUDED\n#define BOOST_MPL_AUX_ARITY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/dtp.hpp>\n\n#if defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n#   include <boost/mpl/aux_/config/static_constant.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\n// agurt, 15/mar/02: it's possible to implement the template so that it will \n// \"just work\" and do not require any specialization, but not on the compilers\n// that require the arity workaround in the first place\ntemplate< typename F, BOOST_MPL_AUX_NTTP_DECL(int, N) >\nstruct arity\n{\n    BOOST_STATIC_CONSTANT(int, value = N);\n};\n\n}}}\n\n#endif // BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES\n\n#endif // BOOST_MPL_AUX_ARITY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/arity_spec.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_ARITY_SPEC_HPP_INCLUDED\n#define BOOST_MPL_AUX_ARITY_SPEC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/int.hpp>\n#include <boost/mpl/limits/arity.hpp>\n#include <boost/mpl/aux_/config/dtp.hpp>\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/arity.hpp>\n#include <boost/mpl/aux_/template_arity_fwd.hpp>\n#include <boost/mpl/aux_/config/ttp.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\n#if defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#   define BOOST_MPL_AUX_NONTYPE_ARITY_SPEC(i,type,name) \\\nnamespace aux { \\\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N), BOOST_MPL_PP_PARAMS(i,type T) > \\\nstruct arity< \\\n      name< BOOST_MPL_PP_PARAMS(i,T) > \\\n    , N \\\n    > \\\n{ \\\n    BOOST_STATIC_CONSTANT(int \\\n        , value = BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        ); \\\n}; \\\n} \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NONTYPE_ARITY_SPEC(i,type,name) /**/\n#endif\n\n#   define BOOST_MPL_AUX_ARITY_SPEC(i,name) \\\n    BOOST_MPL_AUX_NONTYPE_ARITY_SPEC(i,typename,name) \\\n/**/\n\n\n#if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING) \\\n    && !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n#   define BOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(i, name) \\\nnamespace aux { \\\ntemplate< BOOST_MPL_PP_PARAMS(i,typename T) > \\\nstruct template_arity< name<BOOST_MPL_PP_PARAMS(i,T)> > \\\n    : int_<i> \\\n{ \\\n}; \\\n} \\\n/**/\n#else\n#   define BOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(i, name) /**/\n#endif\n\n\n#endif // BOOST_MPL_AUX_ARITY_SPEC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/begin_end_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end_fwd.hpp>\n#include <boost/mpl/sequence_tag_fwd.hpp>\n#include <boost/mpl/void.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/aux_/has_begin.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/traits_lambda_spec.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n\nnamespace boost { namespace mpl {\n\n\nnamespace aux { \n\ntemplate< typename Sequence > \nstruct begin_type \n{ \n    typedef typename Sequence::begin type; \n};\ntemplate< typename Sequence > \nstruct end_type\n{ \n    typedef typename Sequence::end type; \n};\n\n}\n\n// default implementation; conrete sequences might override it by \n// specializing either the 'begin_impl/end_impl' or the primary \n// 'begin/end' templates\n\ntemplate< typename Tag >\nstruct begin_impl\n{\n    template< typename Sequence > struct apply\n    {\n        typedef typename eval_if<aux::has_begin<Sequence, true_>,\n                                 aux::begin_type<Sequence>, void_>::type type;\n    };\n};\n\ntemplate< typename Tag >\nstruct end_impl\n{\n    template< typename Sequence > struct apply\n    {\n        typedef typename eval_if<aux::has_begin<Sequence, true_>,\n                                 aux::end_type<Sequence>, void_>::type type;\n    };\n};\n\n// specialize 'begin_trait/end_trait' for two pre-defined tags\n\n#   define AUX778076_IMPL_SPEC(name, tag, result) \\\ntemplate<> \\\nstruct name##_impl<tag> \\\n{ \\\n    template< typename Sequence > struct apply \\\n    { \\\n        typedef result type; \\\n    }; \\\n}; \\\n/**/\n\n// a sequence with nested 'begin/end' typedefs; just query them\nAUX778076_IMPL_SPEC(begin, nested_begin_end_tag, typename Sequence::begin)\nAUX778076_IMPL_SPEC(end, nested_begin_end_tag, typename Sequence::end)\n\n// if a type 'T' does not contain 'begin/end' or 'tag' members \n// and doesn't specialize either 'begin/end' or 'begin_impl/end_impl' \n// templates, then we end up here\nAUX778076_IMPL_SPEC(begin, non_sequence_tag, void_)\nAUX778076_IMPL_SPEC(end, non_sequence_tag, void_)\nAUX778076_IMPL_SPEC(begin, na, void_)\nAUX778076_IMPL_SPEC(end, na, void_)\n\n#   undef AUX778076_IMPL_SPEC\n\n\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(1,begin_impl)\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(1,end_impl)\n\n}}\n\n#endif // BOOST_MPL_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/clear_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CLEAR_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_CLEAR_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/clear_fwd.hpp>\n#include <boost/mpl/aux_/traits_lambda_spec.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n\nnamespace boost { namespace mpl {\n\n// no default implementation; the definition is needed to make MSVC happy\n\ntemplate< typename Tag >\nstruct clear_impl\n{\n    template< typename Sequence > struct apply;\n};\n\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(1, clear_impl)\n\n}}\n\n#endif // BOOST_MPL_AUX_CLEAR_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/common_name_wknd.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_COMMON_NAME_WKND_HPP_INCLUDED\n#define BOOST_MPL_AUX_COMMON_NAME_WKND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(__BORLANDC__, < 0x561)\n// agurt, 12/nov/02: to suppress the bogus \"Cannot have both a template class \n// and function named 'xxx'\" diagnostic\n#   define BOOST_MPL_AUX_COMMON_NAME_WKND(name) \\\nnamespace name_##wknd { \\\ntemplate< typename > void name(); \\\n} \\\n/**/\n\n#else\n\n#   define BOOST_MPL_AUX_COMMON_NAME_WKND(name) /**/\n\n#endif // __BORLANDC__\n\n#endif // BOOST_MPL_AUX_COMMON_NAME_WKND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/adl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_ADL_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_ADL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/intel.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n// agurt, 25/apr/04: technically, the ADL workaround is only needed for GCC,\n// but putting everything expect public, user-specializable metafunctions into\n// a separate global namespace has a nice side effect of reducing the length \n// of template instantiation symbols, so we apply the workaround on all \n// platforms that can handle it\n\n#if !defined(BOOST_MPL_CFG_NO_ADL_BARRIER_NAMESPACE) \\\n    && (   BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1400)) \\\n        || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840)) \\\n        || BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3202)) \\\n        || BOOST_WORKAROUND(BOOST_INTEL_CXX_VERSION, BOOST_TESTED_AT(810)) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_ADL_BARRIER_NAMESPACE\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_ADL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/arrays.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_ARRAYS_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_ARRAYS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_NO_DEPENDENT_ARRAY_TYPES) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && ( BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        || BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_DEPENDENT_ARRAY_TYPES\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_ARRAYS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/bcc.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_BCC_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_BCC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date: 2004-09-02 10:41:37 -0500 (Thu, 02 Sep 2004) $\n// $Revision: 24874 $\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_BCC590_WORKAROUNDS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__BORLANDC__, >= 0x590) \\\n    && BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n\n#   define BOOST_MPL_CFG_BCC590_WORKAROUNDS\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_BCC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/bind.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_BIND_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_BIND_HPP_INCLUDED\n\n// Copyright David Abrahams 2002\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && (   BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n        || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_BIND_TEMPLATE\n\n#endif\n\n//#define BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT\n\n#endif // BOOST_MPL_AUX_CONFIG_BIND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/compiler.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_COMPILER_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_COMPILER_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_CFG_COMPILER_DIR)\n\n#   include <boost/mpl/aux_/config/dtp.hpp>\n#   include <boost/mpl/aux_/config/ttp.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   include <boost/mpl/aux_/config/gcc.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n#       define BOOST_MPL_CFG_COMPILER_DIR msvc60\n\n#   elif BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n#       define BOOST_MPL_CFG_COMPILER_DIR msvc70\n\n#   elif BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304))\n#       define BOOST_MPL_CFG_COMPILER_DIR gcc\n\n#   elif BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n#       if !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#           define BOOST_MPL_CFG_COMPILER_DIR bcc551\n#       elif BOOST_WORKAROUND(__BORLANDC__, >= 0x590)\n#           define BOOST_MPL_CFG_COMPILER_DIR bcc\n#       else\n#           define BOOST_MPL_CFG_COMPILER_DIR bcc_pre590\n#       endif\n\n#   elif BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n#       define BOOST_MPL_CFG_COMPILER_DIR dmc\n\n#   elif defined(__MWERKS__)\n#       if defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#           define BOOST_MPL_CFG_COMPILER_DIR mwcw\n#       else\n#           define BOOST_MPL_CFG_COMPILER_DIR plain\n#       endif\n\n#   elif defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n#       define BOOST_MPL_CFG_COMPILER_DIR no_ctps\n\n#   elif defined(BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS)\n#       define BOOST_MPL_CFG_COMPILER_DIR no_ttp\n\n#   else\n#       define BOOST_MPL_CFG_COMPILER_DIR plain\n#   endif\n\n#endif // BOOST_MPL_CFG_COMPILER_DIR\n\n#endif // BOOST_MPL_AUX_CONFIG_COMPILER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/ctps.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_CTPS_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_CTPS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n#include <boost/config.hpp>\n\n#if    !defined(BOOST_MPL_CFG_NO_NONTYPE_TEMPLATE_PARTIAL_SPEC) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__BORLANDC__, < 0x582)\n\n#   define BOOST_MPL_CFG_NO_NONTYPE_TEMPLATE_PARTIAL_SPEC\n\n#endif\n\n// BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION is defined in <boost/config.hpp>\n\n#endif // BOOST_MPL_AUX_CONFIG_CTPS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/dmc_ambiguous_ctps.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_DMC_AMBIGUOUS_CTPS_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_DMC_AMBIGUOUS_CTPS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n\n#   define BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_DMC_AMBIGUOUS_CTPS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/dtp.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_DTP_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_DTP_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n// MWCW 7.x-8.0 \"losts\" default template parameters of nested class \n// templates when their owner classes are passed as arguments to other \n// templates; Borland 5.5.1 \"forgets\" them from the very beginning (if \n// the owner class is a class template), and Borland 5.6 isn't even\n// able to compile a definition of nested class template with DTP\n\n#if    !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__BORLANDC__, >= 0x560) \\\n    && BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n\n#   define BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES\n\n#endif\n\n\n#if    !defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && (   BOOST_WORKAROUND(__MWERKS__, <= 0x3001) \\\n        || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        || defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES) \\\n        )\n        \n#   define BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_DTP_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/eti.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_ETI_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_ETI_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n// flags for MSVC 6.5's so-called \"early template instantiation bug\"\n#if    !defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n\n#   define BOOST_MPL_CFG_MSVC_60_ETI_BUG\n\n#endif\n\n#if    !defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n\n#   define BOOST_MPL_CFG_MSVC_70_ETI_BUG\n\n#endif\n\n#if    !defined(BOOST_MPL_CFG_MSVC_ETI_BUG) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && ( defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG) \\\n        || defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG) \\\n        )\n\n#   define BOOST_MPL_CFG_MSVC_ETI_BUG\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_ETI_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/forwarding.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_FORWARDING_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_FORWARDING_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n\n#   define BOOST_MPL_CFG_NO_NESTED_FORWARDING\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_FORWARDING_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/gcc.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_GCC_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_GCC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if defined(__GNUC__) && !defined(__EDG_VERSION__)\n#   define BOOST_MPL_CFG_GCC ((__GNUC__ << 8) | __GNUC_MINOR__)\n#else\n#   define BOOST_MPL_CFG_GCC 0\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_GCC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/gpu.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_GPU_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_GPU_HPP_INCLUDED\n\n// Copyright Eric Niebler 2014\n//\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/config.hpp>\n\n#if !defined(BOOST_MPL_CFG_GPU_ENABLED) \\\n\n#   define BOOST_MPL_CFG_GPU_ENABLED BOOST_GPU_ENABLED\n\n#endif\n\n#if defined __CUDACC__\n\n#    define BOOST_MPL_CFG_GPU 1\n\n#else\n\n#    define BOOST_MPL_CFG_GPU 0\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_GPU_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/has_apply.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_HAS_APPLY_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_HAS_APPLY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/has_xxx.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_HAS_APPLY) \\\n    && (   defined(BOOST_MPL_CFG_NO_HAS_XXX) \\\n        || BOOST_WORKAROUND(__EDG_VERSION__, < 300) \\\n        || BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n        || BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3202)) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_HAS_APPLY\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_HAS_APPLY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/has_xxx.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_HAS_XXX_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_HAS_XXX_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n// Copyright David Abrahams 2002-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/overload_resolution.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n// agurt, 11/jan/03: signals a stub-only 'has_xxx' implementation\n\n#if !defined(BOOST_MPL_CFG_NO_HAS_XXX) \\\n    && (   defined(BOOST_MPL_CFG_BROKEN_OVERLOAD_RESOLUTION) \\\n        || BOOST_WORKAROUND(__GNUC__, <= 2) \\\n        || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840)) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_HAS_XXX\n#   define BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_HAS_XXX_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/integral.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_INTEGRAL_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_INTEGRAL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_BCC_INTEGRAL_CONSTANTS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n\n#   define BOOST_MPL_CFG_BCC_INTEGRAL_CONSTANTS\n\n#endif\n\n#if    !defined(BOOST_MPL_CFG_NO_NESTED_VALUE_ARITHMETIC) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && ( BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n        || BOOST_WORKAROUND(__EDG_VERSION__, <= 238) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_NESTED_VALUE_ARITHMETIC\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_INTEGRAL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/intel.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_INTEL_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_INTEL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n\n// BOOST_INTEL_CXX_VERSION is defined here:\n#include <boost/config.hpp>\n\n#endif // BOOST_MPL_AUX_CONFIG_INTEL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/lambda.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_LAMBDA_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_LAMBDA_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/ttp.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n\n// agurt, 15/jan/02: full-fledged implementation requires both \n// template template parameters _and_ partial specialization\n\n#if    !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT) \\\n    && (   defined(BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS) \\\n        || defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n        )\n\n#   define BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_LAMBDA_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/msvc.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_MSVC_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_MSVC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n\n// BOOST_MSVC is defined here:\n#include <boost/config.hpp>\n\n#endif // BOOST_MPL_AUX_CONFIG_MSVC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/msvc_typename.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_MSVC_TYPENAME_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_MSVC_TYPENAME_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n#   define BOOST_MSVC_TYPENAME\n#else\n#   define BOOST_MSVC_TYPENAME typename\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_MSVC_TYPENAME_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/nttp.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_NTTP_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_NTTP_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n// MSVC 6.5 ICE-s on the code as simple as this (see \"aux_/nttp_decl.hpp\"\n// for a workaround):\n//\n//    namespace std {\n//    template< typename Char > struct string;\n//    }\n//\n//    void foo(std::string<char>);\n//\n//    namespace boost { namespace mpl {\n//    template< int > struct arg;\n//    }}\n\n#if    !defined(BOOST_MPL_CFG_NTTP_BUG) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n\n#   define BOOST_MPL_CFG_NTTP_BUG\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_NTTP_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/operators.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_OPERATORS_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_OPERATORS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !defined(BOOST_MPL_CFG_USE_OPERATORS_OVERLOADING) \\\n    && ( BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n        || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        || BOOST_WORKAROUND(__EDG_VERSION__, <= 245) \\\n        || BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, <= 0x0295) \\\n        || BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(600)) \\\n        || BOOST_WORKAROUND(__NVCC__, BOOST_TESTED_AT(1)) \\\n        )\n\n#   define BOOST_MPL_CFG_USE_OPERATORS_OVERLOADING\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_OPERATORS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/overload_resolution.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_OVERLOAD_RESOLUTION_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_OVERLOAD_RESOLUTION_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if    !defined(BOOST_MPL_CFG_BROKEN_OVERLOAD_RESOLUTION) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && (   BOOST_WORKAROUND(__BORLANDC__, < 0x590) \\\n        || BOOST_WORKAROUND(__MWERKS__, < 0x3001) \\\n        )\n\n#   define BOOST_MPL_CFG_BROKEN_OVERLOAD_RESOLUTION\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_OVERLOAD_RESOLUTION_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/pp_counter.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_PP_COUNTER_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_PP_COUNTER_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_AUX_PP_COUNTER)\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   if BOOST_WORKAROUND(BOOST_MSVC, >= 1300)\n#       define BOOST_MPL_AUX_PP_COUNTER() __COUNTER__\n#   else\n#       define BOOST_MPL_AUX_PP_COUNTER() __LINE__\n#   endif\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_PP_COUNTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/preprocessor.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_PREPROCESSOR_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_PREPROCESSOR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !defined(BOOST_MPL_CFG_BROKEN_PP_MACRO_EXPANSION) \\\n    && (   BOOST_WORKAROUND(__MWERKS__, <= 0x3003) \\\n        || BOOST_WORKAROUND(__BORLANDC__, < 0x582) \\\n        || BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(502)) \\\n        )\n\n#   define BOOST_MPL_CFG_BROKEN_PP_MACRO_EXPANSION\n\n#endif\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n#   define BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES\n#endif\n\n#if !defined(BOOST_NEEDS_TOKEN_PASTING_OP_FOR_TOKENS_JUXTAPOSING) \\\n    && BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n#   define BOOST_NEEDS_TOKEN_PASTING_OP_FOR_TOKENS_JUXTAPOSING\n#endif\n\n\n#endif // BOOST_MPL_AUX_CONFIG_PREPROCESSOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/static_constant.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_STATIC_CONSTANT_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_STATIC_CONSTANT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n// BOOST_STATIC_CONSTANT is defined here:\n#   include <boost/config.hpp>\n#else\n// undef the macro for the preprocessing mode\n#   undef BOOST_STATIC_CONSTANT\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_STATIC_CONSTANT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/ttp.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_TTP_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_TTP_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS) \\\n    && ( defined(BOOST_NO_TEMPLATE_TEMPLATES) \\\n      || BOOST_WORKAROUND( __BORLANDC__, BOOST_TESTED_AT( 0x590) ) \\\n       )\n\n#   define BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS\n\n#endif\n\n\n#if    !defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE) \\\n    && (   BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0302)) \\\n        || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n        )\n\n#   define BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING\n\n#endif\n\n#endif // BOOST_MPL_AUX_CONFIG_TTP_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/use_preprocessed.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_USE_PREPROCESSED_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_USE_PREPROCESSED_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n// #define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_AUX_CONFIG_USE_PREPROCESSED_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/config/workaround.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_CONFIG_WORKAROUND_HPP_INCLUDED\n#define BOOST_MPL_AUX_CONFIG_WORKAROUND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/detail/workaround.hpp>\n\n#endif // BOOST_MPL_AUX_CONFIG_WORKAROUND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/count_args.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/preprocessor/expr_if.hpp>\n#include <boost/preprocessor/inc.hpp>\n#include <boost/preprocessor/cat.hpp>\n\n#if !defined(AUX778076_COUNT_ARGS_PARAM_NAME)\n#   define AUX778076_COUNT_ARGS_PARAM_NAME T\n#endif\n\n#if !defined(AUX778076_COUNT_ARGS_TEMPLATE_PARAM)\n#   define AUX778076_COUNT_ARGS_TEMPLATE_PARAM typename AUX778076_COUNT_ARGS_PARAM_NAME\n#endif\n\n// local macros, #undef-ined at the end of the header\n\n#if !defined(AUX778076_COUNT_ARGS_USE_STANDARD_PP_PRIMITIVES)\n\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n\n#   define AUX778076_COUNT_ARGS_REPEAT BOOST_MPL_PP_REPEAT\n#   define AUX778076_COUNT_ARGS_PARAMS(param) \\\n    BOOST_MPL_PP_PARAMS( \\\n          AUX778076_COUNT_ARGS_ARITY \\\n        , param \\\n        ) \\\n    /**/\n\n#else\n\n#   include <boost/preprocessor/enum_shifted_params.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/inc.hpp>\n\n#   define AUX778076_COUNT_ARGS_REPEAT BOOST_PP_REPEAT\n#   define AUX778076_COUNT_ARGS_PARAMS(param) \\\n    BOOST_PP_ENUM_SHIFTED_PARAMS( \\\n          BOOST_PP_INC(AUX778076_COUNT_ARGS_ARITY) \\\n        , param \\\n        ) \\\n    /**/\n\n#endif // AUX778076_COUNT_ARGS_USE_STANDARD_PP_PRIMITIVES\n\n\n#define AUX778076_IS_ARG_TEMPLATE_NAME \\\n    BOOST_PP_CAT(is_,BOOST_PP_CAT(AUX778076_COUNT_ARGS_PREFIX,_arg)) \\\n/**/\n\n#define AUX778076_COUNT_ARGS_FUNC(unused, i, param) \\\n    BOOST_PP_EXPR_IF(i, +) \\\n    AUX778076_IS_ARG_TEMPLATE_NAME<BOOST_PP_CAT(param,BOOST_PP_INC(i))>::value \\\n/**/\n\n// is_<xxx>_arg\ntemplate< AUX778076_COUNT_ARGS_TEMPLATE_PARAM >\nstruct AUX778076_IS_ARG_TEMPLATE_NAME\n{\n    BOOST_STATIC_CONSTANT(bool, value = true);\n};\n\ntemplate<>\nstruct AUX778076_IS_ARG_TEMPLATE_NAME<AUX778076_COUNT_ARGS_DEFAULT>\n{\n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n// <xxx>_count_args\ntemplate<\n      AUX778076_COUNT_ARGS_PARAMS(AUX778076_COUNT_ARGS_TEMPLATE_PARAM)\n    >\nstruct BOOST_PP_CAT(AUX778076_COUNT_ARGS_PREFIX,_count_args)\n{\n    BOOST_STATIC_CONSTANT(int, value = AUX778076_COUNT_ARGS_REPEAT(\n          AUX778076_COUNT_ARGS_ARITY\n        , AUX778076_COUNT_ARGS_FUNC\n        , AUX778076_COUNT_ARGS_PARAM_NAME\n        ));\n};\n\n#undef AUX778076_COUNT_ARGS_FUNC\n#undef AUX778076_IS_ARG_TEMPLATE_NAME\n#undef AUX778076_COUNT_ARGS_PARAMS\n#undef AUX778076_COUNT_ARGS_REPEAT\n\n#undef AUX778076_COUNT_ARGS_ARITY\n#undef AUX778076_COUNT_ARGS_DEFAULT\n#undef AUX778076_COUNT_ARGS_PREFIX\n#undef AUX778076_COUNT_ARGS_USE_STANDARD_PP_PRIMITIVES\n#undef AUX778076_COUNT_ARGS_TEMPLATE_PARAM\n#undef AUX778076_COUNT_ARGS_PARAM_NAME\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/find_if_pred.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_FIND_IF_PRED_HPP_INCLUDED\n#define BOOST_MPL_AUX_FIND_IF_PRED_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Eric Friedman 2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n#include <boost/mpl/aux_/iter_apply.hpp>\n#include <boost/mpl/not.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Predicate >\nstruct find_if_pred\n{\n    template< typename Iterator >\n    struct apply\n    {\n        typedef not_< aux::iter_apply1<Predicate,Iterator> > type;\n    };\n};\n\n}}}\n\n#endif // BOOST_MPL_AUX_FIND_IF_PRED_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/fold_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_FOLD_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_FOLD_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/next_prior.hpp>\n#   include <boost/mpl/apply.hpp>\n#   include <boost/mpl/deref.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n#       include <boost/mpl/if.hpp>\n#       include <boost/type_traits/is_same.hpp>\n#   endif\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER fold_impl.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   define AUX778076_FOLD_IMPL_OP(iter) typename deref<iter>::type\n#   define AUX778076_FOLD_IMPL_NAME_PREFIX fold\n#   include <boost/mpl/aux_/fold_impl_body.hpp>\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_FOLD_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/fold_impl_body.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#   include <boost/mpl/limits/unrolling.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n#   include <boost/mpl/aux_/config/eti.hpp>\n\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/dec.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n// local macros, #undef-ined at the end of the header\n\n#   define AUX778076_ITER_FOLD_STEP(unused, i, unused2) \\\n    typedef typename apply2< \\\n          ForwardOp \\\n        , BOOST_PP_CAT(state,i) \\\n        , AUX778076_FOLD_IMPL_OP(BOOST_PP_CAT(iter,i)) \\\n        >::type BOOST_PP_CAT(state,BOOST_PP_INC(i)); \\\n    typedef typename mpl::next<BOOST_PP_CAT(iter,i)>::type \\\n        BOOST_PP_CAT(iter,BOOST_PP_INC(i)); \\\n    /**/\n\n#   define AUX778076_FOLD_IMPL_NAME \\\n    BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_impl) \\\n    /**/\n\n#   define AUX778076_FOLD_CHUNK_NAME \\\n    BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_chunk) \\\n    /**/\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME;\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\n#   if !BOOST_WORKAROUND(__BORLANDC__, < 0x600)\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_UNROLLING, <boost/mpl/aux_/fold_impl_body.hpp>))\n#   include BOOST_PP_ITERATE()\n\n// implementation for N that exceeds BOOST_MPL_LIMIT_UNROLLING\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME\n{\n    typedef AUX778076_FOLD_IMPL_NAME<\n          BOOST_MPL_LIMIT_UNROLLING\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef AUX778076_FOLD_IMPL_NAME<\n          ( (N - BOOST_MPL_LIMIT_UNROLLING) < 0 ? 0 : N - BOOST_MPL_LIMIT_UNROLLING )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n        \n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\n// fallback implementation for sequences of unknown size\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME<-1,First,Last,State,ForwardOp>\n    : AUX778076_FOLD_IMPL_NAME<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State,AUX778076_FOLD_IMPL_OP(First)>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME<-1,Last,Last,State,ForwardOp>\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n#   else // BOOST_WORKAROUND(__BORLANDC__, < 0x600)\n\n// Borland have some serious problems with the unrolled version, so\n// we always use a basic implementation\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME\n{\n    typedef AUX778076_FOLD_IMPL_NAME<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State,AUX778076_FOLD_IMPL_OP(First)>::type\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n    typedef state type;\n};\n\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N)\n     , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME<N,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n    typedef state type;\n};\n\n#   endif // BOOST_WORKAROUND(__BORLANDC__, < 0x600)\n \n#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) >\nstruct AUX778076_FOLD_CHUNK_NAME;\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_UNROLLING, <boost/mpl/aux_/fold_impl_body.hpp>))\n#   include BOOST_PP_ITERATE()\n\n// implementation for N that exceeds BOOST_MPL_LIMIT_UNROLLING\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > \nstruct AUX778076_FOLD_CHUNK_NAME\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        > \n    struct result_\n    {\n        typedef AUX778076_FOLD_IMPL_NAME<\n              BOOST_MPL_LIMIT_UNROLLING\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef AUX778076_FOLD_IMPL_NAME<\n              ( (N - BOOST_MPL_LIMIT_UNROLLING) < 0 ? 0 : N - BOOST_MPL_LIMIT_UNROLLING )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\n// fallback implementation for sequences of unknown size\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step);\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_null_step)\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<> \nstruct AUX778076_FOLD_CHUNK_NAME<-1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        > \n    struct result_\n    {\n        typedef typename if_<\n              typename is_same<First,Last>::type\n            , BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_null_step)<Last,State>\n            , BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step)<First,Last,State,ForwardOp>\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n    /// ETI workaround\n    template<> struct result_<int,int,int,int>\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n#endif\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step)\n{\n    // can't inherit here - it breaks MSVC 7.0\n    typedef AUX778076_FOLD_CHUNK_NAME<-1>::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State,AUX778076_FOLD_IMPL_OP(First)>::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME\n    : AUX778076_FOLD_CHUNK_NAME<N>\n        ::template result_<First,Last,State,ForwardOp>\n{\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n}}}\n\n#   undef AUX778076_FOLD_IMPL_NAME\n#   undef AUX778076_FOLD_CHUNK_NAME\n#   undef AUX778076_ITER_FOLD_STEP\n\n#undef AUX778076_FOLD_IMPL_OP\n#undef AUX778076_FOLD_IMPL_NAME_PREFIX\n\n///// iteration\n\n#else\n\n#   define n_ BOOST_PP_FRAME_ITERATION(1)\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct AUX778076_FOLD_IMPL_NAME<n_,First,Last,State,ForwardOp>\n{\n    typedef First iter0;\n    typedef State state0;\n\n    BOOST_MPL_PP_REPEAT(n_, AUX778076_ITER_FOLD_STEP, unused)\n\n    typedef BOOST_PP_CAT(state,n_) state;\n    typedef BOOST_PP_CAT(iter,n_) iterator;\n};\n\n#else\n\ntemplate<> struct AUX778076_FOLD_CHUNK_NAME<n_>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n\n        BOOST_MPL_PP_REPEAT(n_, AUX778076_ITER_FOLD_STEP, unused)\n\n        typedef BOOST_PP_CAT(state,n_) state;\n        typedef BOOST_PP_CAT(iter,n_) iterator;\n    };\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n    /// ETI workaround\n    template<> struct result_<int,int,int,int>\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n#endif\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#   undef n_\n\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/full_lambda.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_AUX_FULL_LAMBDA_HPP_INCLUDED\n#define BOOST_MPL_AUX_FULL_LAMBDA_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/lambda_fwd.hpp>\n#   include <boost/mpl/bind_fwd.hpp>\n#   include <boost/mpl/protect.hpp>\n#   include <boost/mpl/quote.hpp>\n#   include <boost/mpl/arg.hpp>\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/int_fwd.hpp>\n#   include <boost/mpl/aux_/template_arity.hpp>\n#   include <boost/mpl/aux_/na_spec.hpp>\n#   include <boost/mpl/aux_/config/ttp.hpp>\n#   if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\n#       include <boost/mpl/if.hpp>\n#   endif\n#endif\n\n#include <boost/mpl/aux_/lambda_arity_param.hpp>\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER full_lambda.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/config/dmc_ambiguous_ctps.hpp>\n\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n// local macros, #undef-ined at the end of the header\n#   define AUX778076_LAMBDA_PARAMS(i_, param) \\\n    BOOST_MPL_PP_PARAMS(i_, param) \\\n    /**/\n\n#   define AUX778076_BIND_PARAMS(param) \\\n    BOOST_MPL_PP_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        ) \\\n    /**/\n\n#   define AUX778076_BIND_N_PARAMS(i_, param) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    BOOST_MPL_PP_PARAMS(i_, param) \\\n    /**/\n\n#   define AUX778076_ARITY_PARAM(param) \\\n    BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(param) \\\n    /**/\n\n\n#define n_ BOOST_MPL_LIMIT_METAFUNCTION_ARITY\nnamespace aux {\n\ntemplate<\n      BOOST_MPL_PP_DEFAULT_PARAMS(n_,bool C,false)\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< BOOST_MPL_PP_ENUM(n_,false) >\n    : false_\n{\n};\n\n} // namespace aux\n#undef n_\n\ntemplate<\n      typename T\n    , typename Tag\n    AUX778076_ARITY_PARAM(typename Arity)\n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>,Tag AUX778076_ARITY_PARAM(int_<-1>) >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type; \n};\n\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/aux_/full_lambda.hpp>))\n#include BOOST_PP_ITERATE()\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>,Tag AUX778076_ARITY_PARAM(int_<1>) >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\ntemplate<\n      typename F, AUX778076_BIND_PARAMS(typename T)\n    , typename Tag\n    >\nstruct lambda<\n          bind<F,AUX778076_BIND_PARAMS(T)>\n        , Tag\n        AUX778076_ARITY_PARAM(int_<BOOST_PP_INC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY)>)\n        >\n{\n    typedef false_ is_le;\n    typedef bind<F, AUX778076_BIND_PARAMS(T)> result_;\n    typedef result_ type;\n};\n\n\n#if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\n\ntemplate<\n      typename F\n    , typename Tag1\n    , typename Tag2\n    , typename Arity\n    >\nstruct lambda<\n          lambda<F,Tag1,Arity>\n        , Tag2\n        , int_<3>\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n\n    typedef typename l1::is_le is_le;\n    typedef bind1< quote1<aux::template_arity>, typename l1::result_ > arity_;\n    typedef lambda< typename if_<is_le,arity_,Arity>::type,Tag2 > l3;\n    \n    typedef aux::le_result3<is_le, Tag2, mpl::lambda, l1, l2, l3> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\n#elif !defined(BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS)\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    \n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\n#endif\n\n#   undef AUX778076_ARITY_PARAM\n#   undef AUX778076_BIND_N_PARAMS\n#   undef AUX778076_BIND_PARAMS\n#   undef AUX778076_LAMBDA_PARAMS\n\n#if !defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n#else\nBOOST_MPL_AUX_NA_SPEC2(2, 3, lambda)\n#endif\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_FULL_LAMBDA_HPP_INCLUDED\n\n///// iteration, depth == 1\n\n// For gcc 4.4 compatability, we must include the\n// BOOST_PP_ITERATION_DEPTH test inside an #else clause.\n#else // BOOST_PP_IS_ITERATING\n#if BOOST_PP_ITERATION_DEPTH() == 1\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#if i_ > 0\n\nnamespace aux {\n\n#   define AUX778076_RESULT(unused, i_, T) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    typename BOOST_PP_CAT(T, BOOST_PP_INC(i_))::result_ \\\n    /**/\n\n#   define AUX778076_TYPE(unused, i_, T) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    typename BOOST_PP_CAT(T, BOOST_PP_INC(i_))::type \\\n    /**/\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< AUX778076_LAMBDA_PARAMS(i_, typename P) > class F\n    , AUX778076_LAMBDA_PARAMS(i_, typename L)\n    >\nstruct BOOST_PP_CAT(le_result,i_)\n{\n    typedef F<\n          BOOST_MPL_PP_REPEAT(i_, AUX778076_TYPE, L)\n        > result_;\n    \n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< AUX778076_LAMBDA_PARAMS(i_, typename P) > class F\n    , AUX778076_LAMBDA_PARAMS(i_, typename L)\n    >\nstruct BOOST_PP_CAT(le_result,i_)< true_,Tag,F,AUX778076_LAMBDA_PARAMS(i_, L) >\n{\n    typedef BOOST_PP_CAT(bind,i_)<\n          BOOST_PP_CAT(quote,i_)<F,Tag>\n        , BOOST_MPL_PP_REPEAT(i_, AUX778076_RESULT, L)\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n#   undef AUX778076_TYPE\n#   undef AUX778076_RESULT\n\n} // namespace aux\n\n\n#   define AUX778076_LAMBDA_TYPEDEF(unused, i_, T) \\\n    typedef lambda< BOOST_PP_CAT(T, BOOST_PP_INC(i_)), Tag > \\\n        BOOST_PP_CAT(l,BOOST_PP_INC(i_)); \\\n/**/\n\n#   define AUX778076_IS_LE_TYPEDEF(unused, i_, unused2) \\\n    typedef typename BOOST_PP_CAT(l,BOOST_PP_INC(i_))::is_le \\\n        BOOST_PP_CAT(is_le,BOOST_PP_INC(i_)); \\\n/**/\n\n#   define AUX778076_IS_LAMBDA_EXPR(unused, i_, unused2) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    BOOST_PP_CAT(is_le,BOOST_PP_INC(i_))::value \\\n/**/\n\ntemplate<\n      template< AUX778076_LAMBDA_PARAMS(i_, typename P) > class F\n    , AUX778076_LAMBDA_PARAMS(i_, typename T)\n    , typename Tag\n    >\nstruct lambda< \n          F<AUX778076_LAMBDA_PARAMS(i_, T)>\n        , Tag\n        AUX778076_ARITY_PARAM(int_<i_>)\n        >\n{\n    BOOST_MPL_PP_REPEAT(i_, AUX778076_LAMBDA_TYPEDEF, T)\n    BOOST_MPL_PP_REPEAT(i_, AUX778076_IS_LE_TYPEDEF, unused)\n\n    typedef typename aux::lambda_or<\n          BOOST_MPL_PP_REPEAT(i_, AUX778076_IS_LAMBDA_EXPR, unused)\n        >::type is_le;\n\n    typedef aux::BOOST_PP_CAT(le_result,i_)<\n          is_le, Tag, F, AUX778076_LAMBDA_PARAMS(i_, l)\n        > le_result_;\n    \n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\n\n#   undef AUX778076_IS_LAMBDA_EXPR\n#   undef AUX778076_IS_LE_TYPEDEF\n#   undef AUX778076_LAMBDA_TYPEDEF\n\n#endif // i_ > 0\n\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T)\n    , typename Tag\n    >\nstruct lambda<\n          BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_, T)>\n        , Tag\n        AUX778076_ARITY_PARAM(int_<BOOST_PP_INC(i_)>)\n        >\n{\n    typedef false_ is_le;\n    typedef BOOST_PP_CAT(bind,i_)<\n          F\n        AUX778076_BIND_N_PARAMS(i_, T)\n        > result_;\n        \n    typedef result_ type;\n};\n\n#undef i_\n#endif // BOOST_PP_ITERATION_DEPTH()\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_apply.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_APPLY_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_APPLY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_xxx.hpp>\n#include <boost/mpl/aux_/config/has_apply.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n#if !defined(BOOST_MPL_CFG_NO_HAS_APPLY)\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_apply, apply, false)\n#else\ntemplate< typename T, typename fallback_ = false_ >\nstruct has_apply\n    : fallback_\n{\n};\n#endif\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_APPLY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_begin.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_BEGIN_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_BEGIN_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_xxx.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_begin, begin, true)\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_BEGIN_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_key_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n// Copyright David Abrahams 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_key_fwd.hpp>\n#include <boost/mpl/aux_/traits_lambda_spec.hpp>\n\nnamespace boost { namespace mpl {\n\n// no default implementation; the definition is needed to make MSVC happy \n\ntemplate< typename Tag > struct has_key_impl\n{\n    template< typename AssociativeSequence, typename Key > struct apply;\n};\n\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(2,has_key_impl)\n\n}}\n\n#endif // BOOST_MPL_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_rebind.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_REBIND_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_REBIND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/intel.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 244) && !defined(BOOST_INTEL_CXX_VERSION)\n#   include <boost/mpl/has_xxx.hpp>\n#elif BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n#   include <boost/mpl/has_xxx.hpp>\n#   include <boost/mpl/if.hpp>\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/msvc_is_class.hpp>\n#elif BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n#   include <boost/mpl/if.hpp>\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/yes_no.hpp>\n#   include <boost/mpl/aux_/config/static_constant.hpp>\n#   include <boost/type_traits/is_class.hpp>\n#else\n#   include <boost/mpl/aux_/type_wrapper.hpp>\n#   include <boost/mpl/aux_/yes_no.hpp>\n#   include <boost/mpl/aux_/config/static_constant.hpp>\n#endif\n\nnamespace boost { namespace mpl { namespace aux {\n\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 244) && !defined(BOOST_INTEL_CXX_VERSION)\n\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_rebind, rebind, false)\n\n#elif BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_rebind_impl, rebind, false)\n\ntemplate< typename T >\nstruct has_rebind\n    : if_< \n          msvc_is_class<T>\n        , has_rebind_impl<T>\n        , bool_<false>\n        >::type\n{\n};\n\n#else // the rest\n\ntemplate< typename T > struct has_rebind_tag {};\nno_tag operator|(has_rebind_tag<int>, void const volatile*);\n\n#   if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\ntemplate< typename T >\nstruct has_rebind\n{\n    static has_rebind_tag<T>* get();\n    BOOST_STATIC_CONSTANT(bool, value = \n          sizeof(has_rebind_tag<int>() | get()) == sizeof(yes_tag)\n        );\n};\n#   else // __BORLANDC__\ntemplate< typename T >\nstruct has_rebind_impl\n{\n    static T* get();\n    BOOST_STATIC_CONSTANT(bool, value = \n          sizeof(has_rebind_tag<int>() | get()) == sizeof(yes_tag)\n        );\n};\n\ntemplate< typename T >\nstruct has_rebind\n    : if_< \n          is_class<T>\n        , has_rebind_impl<T>\n        , bool_<false>\n        >::type\n{\n};\n#   endif // __BORLANDC__\n\n#endif\n\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_REBIND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_size.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_SIZE_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_SIZE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_xxx.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\nBOOST_MPL_HAS_XXX_TRAIT_DEF(size)\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_SIZE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_tag.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_TAG_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_xxx.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_tag, tag, false)\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/has_type.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_HAS_TYPE_HPP_INCLUDED\n#define BOOST_MPL_AUX_HAS_TYPE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_xxx.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\nBOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(has_type, type, true)\n}}}\n\n#endif // BOOST_MPL_AUX_HAS_TYPE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/include_preprocessed.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n// Copyright Aleksey Gurtovoy 2000-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/compiler.hpp>\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n#include <boost/preprocessor/cat.hpp>\n#include <boost/preprocessor/stringize.hpp>\n\n#if !defined(BOOST_NEEDS_TOKEN_PASTING_OP_FOR_TOKENS_JUXTAPOSING)\n#   define AUX778076_PREPROCESSED_HEADER \\\n    BOOST_MPL_CFG_COMPILER_DIR/BOOST_MPL_PREPROCESSED_HEADER \\\n/**/\n#else\n#   define AUX778076_PREPROCESSED_HEADER \\\n    BOOST_PP_CAT(BOOST_MPL_CFG_COMPILER_DIR,/)##BOOST_MPL_PREPROCESSED_HEADER \\\n/**/\n#endif\n\n#if BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(700))\n#   define AUX778076_INCLUDE_STRING BOOST_PP_STRINGIZE(boost/mpl/aux_/preprocessed/AUX778076_PREPROCESSED_HEADER)\n#   include AUX778076_INCLUDE_STRING\n#   undef AUX778076_INCLUDE_STRING\n#else\n#   include BOOST_PP_STRINGIZE(boost/mpl/aux_/preprocessed/AUX778076_PREPROCESSED_HEADER)\n#endif\n\n#   undef AUX778076_PREPROCESSED_HEADER\n\n#undef BOOST_MPL_PREPROCESSED_HEADER\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/insert_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_INSERT_IMPL_HPP_INCLUDED\n#define BOOST_MPL_INSERT_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/reverse_fold.hpp>\n#include <boost/mpl/iterator_range.hpp>\n#include <boost/mpl/clear.hpp>\n#include <boost/mpl/push_front.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/traits_lambda_spec.hpp>\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\n// default implementation; conrete sequences might override it by \n// specializing either the 'insert_impl' or the primary 'insert' template\n\ntemplate< typename Tag >\nstruct insert_impl\n{\n    template<\n          typename Sequence\n        , typename Pos\n        , typename T\n        >\n    struct apply\n    {\n        typedef iterator_range<\n              typename begin<Sequence>::type\n            , Pos\n            > first_half_;\n\n        typedef iterator_range<\n              Pos\n            , typename end<Sequence>::type\n            > second_half_;\n\n        typedef typename reverse_fold<\n              second_half_\n            , typename clear<Sequence>::type\n            , push_front<_,_>\n            >::type half_sequence_;\n\n        typedef typename reverse_fold<\n              first_half_\n            , typename push_front<half_sequence_,T>::type\n            , push_front<_,_>\n            >::type type;\n    };\n};\n\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(3,insert_impl)\n\n}}\n\n#endif // BOOST_MPL_INSERT_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/integral_wrapper.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION!\n\n#include <boost/mpl/integral_c_tag.hpp>\n#include <boost/mpl/aux_/static_cast.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/preprocessor/cat.hpp>\n\n#if !defined(AUX_WRAPPER_NAME)\n#   define AUX_WRAPPER_NAME BOOST_PP_CAT(AUX_WRAPPER_VALUE_TYPE,_)\n#endif\n\n#if !defined(AUX_WRAPPER_PARAMS)\n#   define AUX_WRAPPER_PARAMS(N) BOOST_MPL_AUX_NTTP_DECL(AUX_WRAPPER_VALUE_TYPE, N)\n#endif\n\n#if !defined(AUX_WRAPPER_INST)\n#   if BOOST_WORKAROUND(__MWERKS__, <= 0x2407)\n#       define AUX_WRAPPER_INST(value) AUX_WRAPPER_NAME< value >\n#   else \n#       define AUX_WRAPPER_INST(value) BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::AUX_WRAPPER_NAME< value >\n#   endif\n#endif\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< AUX_WRAPPER_PARAMS(N) >\nstruct AUX_WRAPPER_NAME\n{\n    BOOST_STATIC_CONSTANT(AUX_WRAPPER_VALUE_TYPE, value = N);\n// agurt, 08/mar/03: SGI MIPSpro C++ workaround, have to #ifdef because some \n// other compilers (e.g. MSVC) are not particulary happy about it\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 238)\n    typedef struct AUX_WRAPPER_NAME type;\n#else\n    typedef AUX_WRAPPER_NAME type;\n#endif\n    typedef AUX_WRAPPER_VALUE_TYPE value_type;\n    typedef integral_c_tag tag;\n\n// have to #ifdef here: some compilers don't like the 'N + 1' form (MSVC),\n// while some other don't like 'value + 1' (Borland), and some don't like\n// either\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 243)\n private:\n    BOOST_STATIC_CONSTANT(AUX_WRAPPER_VALUE_TYPE, next_value = BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (N + 1)));\n    BOOST_STATIC_CONSTANT(AUX_WRAPPER_VALUE_TYPE, prior_value = BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (N - 1)));\n public:\n    typedef AUX_WRAPPER_INST(next_value) next;\n    typedef AUX_WRAPPER_INST(prior_value) prior;\n#elif BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x561)) \\\n    || BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(502)) \\\n    || (BOOST_WORKAROUND(__HP_aCC, <= 53800) && (BOOST_WORKAROUND(__hpxstd98, != 1)))\n    typedef AUX_WRAPPER_INST( BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (N + 1)) ) next;\n    typedef AUX_WRAPPER_INST( BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (N - 1)) ) prior;\n#else\n    typedef AUX_WRAPPER_INST( BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (value + 1)) ) next;\n    typedef AUX_WRAPPER_INST( BOOST_MPL_AUX_STATIC_CAST(AUX_WRAPPER_VALUE_TYPE, (value - 1)) ) prior;\n#endif\n\n    // enables uniform function call syntax for families of overloaded \n    // functions that return objects of both arithmetic ('int', 'long',\n    // 'double', etc.) and wrapped integral types (for an example, see \n    // \"mpl/example/power.cpp\")\n    BOOST_CONSTEXPR operator AUX_WRAPPER_VALUE_TYPE() const { return static_cast<AUX_WRAPPER_VALUE_TYPE>(this->value); } \n};\n\n#if !defined(BOOST_NO_INCLASS_MEMBER_INITIALIZATION)\ntemplate< AUX_WRAPPER_PARAMS(N) >\nAUX_WRAPPER_VALUE_TYPE const AUX_WRAPPER_INST(N)::value;\n#endif\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\n#undef AUX_WRAPPER_NAME\n#undef AUX_WRAPPER_PARAMS\n#undef AUX_WRAPPER_INST\n#undef AUX_WRAPPER_VALUE_TYPE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/is_msvc_eti_arg.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_IS_MSVC_ETI_ARG_HPP_INCLUDED\n#define BOOST_MPL_AUX_IS_MSVC_ETI_ARG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n\ntemplate< typename T >\nstruct is_msvc_eti_arg\n{ \n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n#else // BOOST_MPL_CFG_MSVC_60_ETI_BUG\n\nstruct eti_int_convertible\n{\n    eti_int_convertible(int);\n};\n\ntemplate< typename T >\nstruct is_msvc_eti_arg\n{ \n    static no_tag test(...);\n    static yes_tag test(eti_int_convertible);\n    static T& get();\n\n    BOOST_STATIC_CONSTANT(bool, value = \n          sizeof(test(get())) == sizeof(yes_tag)\n        );\n};\n\n#endif\n\ntemplate<>\nstruct is_msvc_eti_arg<int>\n{ \n    BOOST_STATIC_CONSTANT(bool, value = true);\n};\n\n#endif // BOOST_MPL_CFG_MSVC_ETI_BUG\n\n}}}\n\n#endif // BOOST_MPL_AUX_IS_MSVC_ETI_ARG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/iter_apply.hpp",
    "content": "\n#ifndef BOOST_MPL_ITER_APPLY_HPP_INCLUDED\n#define BOOST_MPL_ITER_APPLY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/apply.hpp>\n#include <boost/mpl/deref.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate<\n      typename F\n    , typename Iterator\n    >\nstruct iter_apply1\n    : apply1< F,typename deref<Iterator>::type >\n{\n};\n\ntemplate<\n      typename F\n    , typename Iterator1\n    , typename Iterator2\n    >\nstruct iter_apply2\n    : apply2<\n          F\n        , typename deref<Iterator1>::type\n        , typename deref<Iterator2>::type\n        >\n{\n};\n\n}}}\n\n#endif // BOOST_MPL_ITER_APPLY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/iter_fold_if_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_ITER_FOLD_IF_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_ITER_FOLD_IF_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/identity.hpp>\n#   include <boost/mpl/next.hpp>\n#   include <boost/mpl/if.hpp>\n#   include <boost/mpl/apply.hpp>\n#   include <boost/mpl/aux_/value_wknd.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER iter_fold_if_impl.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/unrolling.hpp>\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/dec.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2<StateOp,State,Iterator>::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\n// agurt, 25/jun/02: MSVC 6.5 workaround, had to get rid of inheritance \n// here and in 'iter_fold_if_backward_step', because sometimes it interfered \n// with the \"early template instantiation bug\" in _really_ ugly ways\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2<Predicate,State,Iterator>::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp,mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2<Predicate,State,Iterator>::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp,identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\n\n// local macros, #undef-ined at the end of the header\n\n#   define AUX_ITER_FOLD_FORWARD_STEP(unused, i, unused2) \\\n    typedef iter_fold_if_forward_step< \\\n          typename BOOST_PP_CAT(forward_step,i)::iterator \\\n        , typename BOOST_PP_CAT(forward_step,i)::state \\\n        , ForwardOp \\\n        , ForwardPredicate \\\n        > BOOST_PP_CAT(forward_step, BOOST_PP_INC(i)); \\\n    /**/\n\n#   define AUX_ITER_FOLD_BACKWARD_STEP_FUNC(i) \\\n    typedef iter_fold_if_backward_step< \\\n          typename BOOST_PP_CAT(forward_step,BOOST_PP_DEC(i))::iterator \\\n        , typename BOOST_PP_CAT(backward_step,i)::state \\\n        , BackwardOp \\\n        , BackwardPredicate \\\n        > BOOST_PP_CAT(backward_step,BOOST_PP_DEC(i)); \\\n    /**/\n\n#   define AUX_ITER_FOLD_BACKWARD_STEP(unused, i, unused2) \\\n    AUX_ITER_FOLD_BACKWARD_STEP_FUNC( \\\n        BOOST_PP_SUB_D(1,BOOST_MPL_LIMIT_UNROLLING,i) \\\n        ) \\\n    /**/\n\n#   define AUX_LAST_FORWARD_STEP \\\n    BOOST_PP_CAT(forward_step, BOOST_MPL_LIMIT_UNROLLING) \\\n    /**/\n\n#   define AUX_LAST_BACKWARD_STEP \\\n    BOOST_PP_CAT(backward_step, BOOST_MPL_LIMIT_UNROLLING) \\\n    /**/\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step<Iterator,State> forward_step0;\n    BOOST_PP_REPEAT(\n          BOOST_MPL_LIMIT_UNROLLING\n        , AUX_ITER_FOLD_FORWARD_STEP\n        , unused\n        )\n    \n    typedef typename if_<\n          typename AUX_LAST_FORWARD_STEP::not_last\n        , iter_fold_if_impl<\n              typename AUX_LAST_FORWARD_STEP::iterator\n            , typename AUX_LAST_FORWARD_STEP::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename AUX_LAST_FORWARD_STEP::iterator\n            , typename AUX_LAST_FORWARD_STEP::state\n            >\n        >::type AUX_LAST_BACKWARD_STEP;\n\n    BOOST_PP_REPEAT(\n          BOOST_MPL_LIMIT_UNROLLING\n        , AUX_ITER_FOLD_BACKWARD_STEP\n        , unused\n        )\n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename AUX_LAST_BACKWARD_STEP::iterator iterator;\n};\n\n#   undef AUX_LAST_BACKWARD_STEP\n#   undef AUX_LAST_FORWARD_STEP\n#   undef AUX_ITER_FOLD_BACKWARD_STEP\n#   undef AUX_ITER_FOLD_BACKWARD_STEP_FUNC\n#   undef AUX_ITER_FOLD_FORWARD_STEP\n\n}}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_ITER_FOLD_IF_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/lambda_arity_param.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_LAMBDA_ARITY_PARAM_HPP_INCLUDED\n#define BOOST_MPL_AUX_LAMBDA_ARITY_PARAM_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/ttp.hpp>\n\n#if !defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\n#   define BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(param)    \n#else\n#   define BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(param) , param\n#endif\n\n#endif // BOOST_MPL_AUX_LAMBDA_ARITY_PARAM_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/lambda_no_ctps.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_AUX_LAMBDA_NO_CTPS_HPP_INCLUDED\n#define BOOST_MPL_AUX_LAMBDA_NO_CTPS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/lambda_fwd.hpp>\n#   include <boost/mpl/bind_fwd.hpp>\n#   include <boost/mpl/protect.hpp>\n#   include <boost/mpl/is_placeholder.hpp>\n#   include <boost/mpl/if.hpp>\n#   include <boost/mpl/identity.hpp>\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/na_spec.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n#   include <boost/mpl/aux_/template_arity.hpp>\n#   include <boost/mpl/aux_/value_wknd.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if    !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER lambda_no_ctps.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define AUX778076_LAMBDA_PARAMS(i_, param) \\\n    BOOST_MPL_PP_PARAMS(i_, param) \\\n    /**/\n\nnamespace aux {\n\n#define n_ BOOST_MPL_LIMIT_METAFUNCTION_ARITY\ntemplate<\n      BOOST_MPL_PP_DEFAULT_PARAMS(n_,bool C,false)\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< BOOST_MPL_PP_ENUM(n_,false) >\n    : false_\n{\n};\n#undef n_\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(1, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/aux_/lambda_no_ctps.hpp>))\n#include BOOST_PP_ITERATE()\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    \n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n#   undef AUX778076_LAMBDA_PARAMS\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_LAMBDA_NO_CTPS_HPP_INCLUDED\n\n///// iteration, depth == 1\n\n#else\n\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#   define AUX778076_LAMBDA_TYPEDEF(unused, i_, F) \\\n    typedef lambda< \\\n          typename F::BOOST_PP_CAT(arg,BOOST_PP_INC(i_)) \\\n        , Tag \\\n        , false_ \\\n        > BOOST_PP_CAT(l,BOOST_PP_INC(i_)); \\\n    /**/\n\n#   define AUX778076_IS_LE_TYPEDEF(unused, i_, unused2) \\\n    typedef typename BOOST_PP_CAT(l,BOOST_PP_INC(i_))::is_le \\\n        BOOST_PP_CAT(is_le,BOOST_PP_INC(i_)); \\\n    /**/\n\n#   define AUX778076_IS_LAMBDA_EXPR(unused, i_, unused2) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    BOOST_MPL_AUX_MSVC_VALUE_WKND(BOOST_PP_CAT(is_le,BOOST_PP_INC(i_)))::value \\\n    /**/\n\n#   define AUX778076_LAMBDA_RESULT(unused, i_, unused2) \\\n    , typename BOOST_PP_CAT(l,BOOST_PP_INC(i_))::type \\\n    /**/\n\ntemplate<> struct lambda_impl< int_<i_> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        BOOST_MPL_PP_REPEAT(i_, AUX778076_LAMBDA_TYPEDEF, F)\n        BOOST_MPL_PP_REPEAT(i_, AUX778076_IS_LE_TYPEDEF, unused)\n\n        typedef aux::lambda_or<\n              BOOST_MPL_PP_REPEAT(i_, AUX778076_IS_LAMBDA_EXPR, unused)\n            > is_le;\n\n        typedef BOOST_PP_CAT(bind,i_)<\n              typename F::rebind\n            BOOST_MPL_PP_REPEAT(i_, AUX778076_LAMBDA_RESULT, unused)\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n    \n        typedef typename type_::type type;\n    };\n};\n\n#   undef AUX778076_LAMBDA_RESULT\n#   undef AUX778076_IS_LAMBDA_EXPR\n#   undef AUX778076_IS_LE_TYPEDEF\n#   undef AUX778076_LAMBDA_TYPEDEF\n\n#undef i_\n\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/lambda_spec.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_LAMBDA_SPEC_HPP_INCLUDED\n#define BOOST_MPL_AUX_LAMBDA_SPEC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2007\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/void.hpp>\n#include <boost/mpl/lambda_fwd.hpp>\n#include <boost/mpl/int_fwd.hpp>\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/lambda_arity_param.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n\n#   define BOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(i, name) \\\ntemplate< \\\n      BOOST_MPL_PP_PARAMS(i, typename T) \\\n    , typename Tag \\\n    > \\\nstruct lambda< \\\n      name< BOOST_MPL_PP_PARAMS(i, T) > \\\n    , Tag \\\n    BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(int_<i>) \\\n    > \\\n{ \\\n    typedef false_ is_le; \\\n    typedef name< BOOST_MPL_PP_PARAMS(i, T) > result_; \\\n    typedef result_ type; \\\n}; \\\n/**/\n\n#else\n\n#   define BOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(i, name) /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_LAMBDA_SPEC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/lambda_support.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_LAMBDA_SUPPORT_HPP_INCLUDED\n#define BOOST_MPL_AUX_LAMBDA_SUPPORT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) /**/\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT(i,name,params) /**/\n\n#else\n\n#   include <boost/mpl/int_fwd.hpp>\n#   include <boost/mpl/aux_/yes_no.hpp>\n#   include <boost/mpl/aux_/na_fwd.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   include <boost/preprocessor/tuple/to_list.hpp>\n#   include <boost/preprocessor/list/for_each_i.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_ARG_TYPEDEF_FUNC(R,typedef_,i,param) \\\n    typedef_ param BOOST_PP_CAT(arg,BOOST_PP_INC(i)); \\\n    /**/\n\n// agurt, 07/mar/03: restore an old revision for the sake of SGI MIPSpro C++\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 238) \n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT(i, name, params) \\\n    typedef BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::int_<i> arity; \\\n    BOOST_PP_LIST_FOR_EACH_I_R( \\\n          1 \\\n        , BOOST_MPL_AUX_LAMBDA_SUPPORT_ARG_TYPEDEF_FUNC \\\n        , typedef \\\n        , BOOST_PP_TUPLE_TO_LIST(i,params) \\\n        ) \\\n    struct rebind \\\n    { \\\n        template< BOOST_MPL_PP_PARAMS(i,typename U) > struct apply \\\n            : name< BOOST_MPL_PP_PARAMS(i,U) > \\\n        { \\\n        }; \\\n    }; \\\n    /**/\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(i, name, params) \\\n    /**/\n\n#elif BOOST_WORKAROUND(__EDG_VERSION__, <= 244) && !defined(BOOST_INTEL_CXX_VERSION)\n// agurt, 18/jan/03: old EDG-based compilers actually enforce 11.4 para 9\n// (in strict mode), so we have to provide an alternative to the \n// MSVC-optimized implementation\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n    typedef BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::int_<i> arity; \\\n    BOOST_PP_LIST_FOR_EACH_I_R( \\\n          1 \\\n        , BOOST_MPL_AUX_LAMBDA_SUPPORT_ARG_TYPEDEF_FUNC \\\n        , typedef \\\n        , BOOST_PP_TUPLE_TO_LIST(i,params) \\\n        ) \\\n    struct rebind; \\\n/**/\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT(i, name, params) \\\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n}; \\\ntemplate< BOOST_MPL_PP_PARAMS(i,typename T) > \\\nstruct name<BOOST_MPL_PP_PARAMS(i,T)>::rebind \\\n{ \\\n    template< BOOST_MPL_PP_PARAMS(i,typename U) > struct apply \\\n        : name< BOOST_MPL_PP_PARAMS(i,U) > \\\n    { \\\n    }; \\\n/**/\n\n#else // __EDG_VERSION__\n\nnamespace boost { namespace mpl { namespace aux {\ntemplate< typename T > struct has_rebind_tag;\n}}}\n\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n    typedef BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::int_<i> arity; \\\n    BOOST_PP_LIST_FOR_EACH_I_R( \\\n          1 \\\n        , BOOST_MPL_AUX_LAMBDA_SUPPORT_ARG_TYPEDEF_FUNC \\\n        , typedef \\\n        , BOOST_PP_TUPLE_TO_LIST(i,params) \\\n        ) \\\n    friend class BOOST_PP_CAT(name,_rebind); \\\n    typedef BOOST_PP_CAT(name,_rebind) rebind; \\\n/**/\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610))\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_HAS_REBIND(i, name, params) \\\ntemplate< BOOST_MPL_PP_PARAMS(i,typename T) > \\\n::boost::mpl::aux::yes_tag operator|( \\\n      ::boost::mpl::aux::has_rebind_tag<int> \\\n    , name<BOOST_MPL_PP_PARAMS(i,T)>* \\\n    ); \\\n::boost::mpl::aux::no_tag operator|( \\\n      ::boost::mpl::aux::has_rebind_tag<int> \\\n    , name< BOOST_MPL_PP_ENUM(i,::boost::mpl::na) >* \\\n    ); \\\n/**/\n#elif !BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_HAS_REBIND(i, name, params) \\\ntemplate< BOOST_MPL_PP_PARAMS(i,typename T) > \\\n::boost::mpl::aux::yes_tag operator|( \\\n      ::boost::mpl::aux::has_rebind_tag<int> \\\n    , ::boost::mpl::aux::has_rebind_tag< name<BOOST_MPL_PP_PARAMS(i,T)> >* \\\n    ); \\\n/**/\n#else\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT_HAS_REBIND(i, name, params) /**/\n#endif\n\n#   if !defined(__BORLANDC__)\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT(i, name, params) \\\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n}; \\\nBOOST_MPL_AUX_LAMBDA_SUPPORT_HAS_REBIND(i, name, params) \\\nclass BOOST_PP_CAT(name,_rebind) \\\n{ \\\n public: \\\n    template< BOOST_MPL_PP_PARAMS(i,typename U) > struct apply \\\n        : name< BOOST_MPL_PP_PARAMS(i,U) > \\\n    { \\\n    }; \\\n/**/\n#   else\n#   define BOOST_MPL_AUX_LAMBDA_SUPPORT(i, name, params) \\\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(i, name, params) \\\n}; \\\nBOOST_MPL_AUX_LAMBDA_SUPPORT_HAS_REBIND(i, name, params) \\\nclass BOOST_PP_CAT(name,_rebind) \\\n{ \\\n public: \\\n    template< BOOST_MPL_PP_PARAMS(i,typename U) > struct apply \\\n    { \\\n        typedef typename name< BOOST_MPL_PP_PARAMS(i,U) >::type type; \\\n    }; \\\n/**/\n#   endif // __BORLANDC__\n\n#endif // __EDG_VERSION__\n\n#endif // BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n#endif // BOOST_MPL_AUX_LAMBDA_SUPPORT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/logical_op.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION!\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/nested_type_wknd.hpp>\n#   include <boost/mpl/aux_/na_spec.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n#endif\n\n#include <boost/mpl/limits/arity.hpp>\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/preprocessor/ext_params.hpp>\n#include <boost/mpl/aux_/preprocessor/def_params_tail.hpp>\n#include <boost/mpl/aux_/preprocessor/enum.hpp>\n#include <boost/mpl/aux_/preprocessor/sub.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/preprocessor/dec.hpp>\n#include <boost/preprocessor/inc.hpp>\n#include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define AUX778076_PARAMS(param, sub) \\\n    BOOST_MPL_PP_PARAMS( \\\n          BOOST_MPL_PP_SUB(BOOST_MPL_LIMIT_METAFUNCTION_ARITY, sub) \\\n        , param \\\n        ) \\\n    /**/\n\n#   define AUX778076_SHIFTED_PARAMS(param, sub) \\\n    BOOST_MPL_PP_EXT_PARAMS( \\\n          2, BOOST_MPL_PP_SUB(BOOST_PP_INC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY), sub) \\\n        , param \\\n        ) \\\n    /**/\n\n#   define AUX778076_SPEC_PARAMS(param) \\\n    BOOST_MPL_PP_ENUM( \\\n          BOOST_PP_DEC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY) \\\n        , param \\\n        ) \\\n    /**/\n\nnamespace aux {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate< bool C_, AUX778076_PARAMS(typename T, 1) >\nstruct BOOST_PP_CAT(AUX778076_OP_NAME,impl)\n    : BOOST_PP_CAT(AUX778076_OP_VALUE1,_)\n{\n};\n\ntemplate< AUX778076_PARAMS(typename T, 1) >\nstruct BOOST_PP_CAT(AUX778076_OP_NAME,impl)< AUX778076_OP_VALUE2,AUX778076_PARAMS(T, 1) >\n    : BOOST_PP_CAT(AUX778076_OP_NAME,impl)<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , AUX778076_SHIFTED_PARAMS(T, 1)\n        , BOOST_PP_CAT(AUX778076_OP_VALUE2,_)\n        >\n{\n};\n\ntemplate<>\nstruct BOOST_PP_CAT(AUX778076_OP_NAME,impl)<\n          AUX778076_OP_VALUE2\n        , AUX778076_SPEC_PARAMS(BOOST_PP_CAT(AUX778076_OP_VALUE2,_))\n        >\n    : BOOST_PP_CAT(AUX778076_OP_VALUE2,_)\n{\n};\n\n#else\n\ntemplate< bool C_ > struct BOOST_PP_CAT(AUX778076_OP_NAME,impl)\n{\n    template< AUX778076_PARAMS(typename T, 1) > struct result_\n        : BOOST_PP_CAT(AUX778076_OP_VALUE1,_)\n    {\n    };\n};\n\ntemplate<> struct BOOST_PP_CAT(AUX778076_OP_NAME,impl)<AUX778076_OP_VALUE2>\n{\n    template< AUX778076_PARAMS(typename T, 1) > struct result_\n        : BOOST_PP_CAT(AUX778076_OP_NAME,impl)< \n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< AUX778076_SHIFTED_PARAMS(T,1),BOOST_PP_CAT(AUX778076_OP_VALUE2,_) >\n    {\n    };\n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n    template<> struct result_<AUX778076_SPEC_PARAMS(BOOST_PP_CAT(AUX778076_OP_VALUE2,_))>\n        : BOOST_PP_CAT(AUX778076_OP_VALUE2,_)\n    {\n    };\n};\n#else\n};\n\ntemplate<>\nstruct BOOST_PP_CAT(AUX778076_OP_NAME,impl)<AUX778076_OP_VALUE2>\n    ::result_< AUX778076_SPEC_PARAMS(BOOST_PP_CAT(AUX778076_OP_VALUE2,_)) >\n        : BOOST_PP_CAT(AUX778076_OP_VALUE2,_)\n{\n};\n#endif // BOOST_MSVC == 1300\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    BOOST_MPL_PP_DEF_PARAMS_TAIL(2, typename T, BOOST_PP_CAT(AUX778076_OP_VALUE2,_))\n    >\nstruct AUX778076_OP_NAME\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n    : aux::BOOST_PP_CAT(AUX778076_OP_NAME,impl)<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , AUX778076_SHIFTED_PARAMS(T,0)\n        >\n#else\n    : aux::BOOST_PP_CAT(AUX778076_OP_NAME,impl)< \n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< AUX778076_SHIFTED_PARAMS(T,0) >\n#endif\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n        , AUX778076_OP_NAME\n        , (AUX778076_PARAMS(T, 0))\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n    , AUX778076_OP_NAME\n    )\n\n}}\n\n#undef AUX778076_SPEC_PARAMS\n#undef AUX778076_SHIFTED_PARAMS\n#undef AUX778076_PARAMS\n#undef AUX778076_OP_NAME\n#undef AUX778076_OP_VALUE1\n#undef AUX778076_OP_VALUE2\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/msvc_dtw.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION!\n\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n\n// local macros, #undef-ined at the end of the header\n#define AUX778076_DTW_PARAMS(param) \\\n    BOOST_MPL_PP_PARAMS(AUX778076_MSVC_DTW_ARITY, param) \\\n/**/\n\n#define AUX778076_DTW_ORIGINAL_NAME \\\n    AUX778076_MSVC_DTW_ORIGINAL_NAME \\\n/**/\n\n// warning: not a well-formed C++\n// workaround for MSVC 6.5's \"dependent template typedef bug\"\n\ntemplate< typename F>\nstruct AUX778076_MSVC_DTW_NAME\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n#if AUX778076_MSVC_DTW_ARITY > 0\n        template< AUX778076_DTW_PARAMS(typename P) > struct AUX778076_DTW_ORIGINAL_NAME\n        {\n            typedef int type;\n        };\n    };\n\n    template< AUX778076_DTW_PARAMS(typename T) > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template AUX778076_DTW_ORIGINAL_NAME< AUX778076_DTW_PARAMS(T) >\n    {\n    };\n#else\n        template< typename P = int > struct AUX778076_DTW_ORIGINAL_NAME\n        {\n            typedef int type;\n        };\n    };\n\n    template< typename T = int > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template AUX778076_DTW_ORIGINAL_NAME<>\n    {\n    };\n#endif\n};\n\n#undef AUX778076_DTW_ORIGINAL_NAME\n#undef AUX778076_DTW_PARAMS\n\n#undef AUX778076_MSVC_DTW_NAME\n#undef AUX778076_MSVC_DTW_ORIGINAL_NAME\n#undef AUX778076_MSVC_DTW_ARITY\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/msvc_eti_base.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED\n#define BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/is_msvc_eti_arg.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\n#if defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)\n\ntemplate< bool > struct msvc_eti_base_impl\n{\n    template< typename T > struct result_\n        : T\n    {\n        typedef T type;\n    };\n};\n\ntemplate<> struct msvc_eti_base_impl<true>\n{\n    template< typename T > struct result_\n    {\n        typedef result_ type;\n        typedef result_ first;\n        typedef result_ second;\n        typedef result_ tag;\n        enum { value = 0 };\n    };\n};\n\ntemplate< typename T > struct msvc_eti_base\n    : msvc_eti_base_impl< is_msvc_eti_arg<T>::value >\n        ::template result_<T>\n{\n};\n\n#else // !BOOST_MPL_CFG_MSVC_70_ETI_BUG\n\ntemplate< typename T > struct msvc_eti_base\n    : T\n{\n#if BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304))\n    msvc_eti_base();\n#endif\n    typedef T type;\n};\n\n#endif \n\ntemplate<> struct msvc_eti_base<int>\n{\n    typedef msvc_eti_base type;\n    typedef msvc_eti_base first;\n    typedef msvc_eti_base second;\n    typedef msvc_eti_base tag;\n    enum { value = 0 };\n};\n\n}}}\n\n#endif // BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/msvc_is_class.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_MSVC_IS_CLASS_HPP_INCLUDED\n#define BOOST_MPL_AUX_MSVC_IS_CLASS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/type_wrapper.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n\n#include <boost/type_traits/is_reference.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename T > struct is_class_helper\n{\n    typedef int (T::* type)();\n};\n\n// MSVC 6.x-specific lightweight 'is_class' implementation; \n// Distinguishing feature: does not instantiate the type being tested.\ntemplate< typename T >\nstruct msvc_is_class_impl\n{\n    template< typename U>\n    static yes_tag  test(type_wrapper<U>*, /*typename*/ is_class_helper<U>::type = 0);\n    static no_tag   test(void const volatile*, ...);\n\n    enum { value = sizeof(test((type_wrapper<T>*)0)) == sizeof(yes_tag) };\n    typedef bool_<value> type;\n};\n\n// agurt, 17/sep/04: have to check for 'is_reference' upfront to avoid ICEs in\n// complex metaprograms\ntemplate< typename T >\nstruct msvc_is_class\n    : if_<\n          is_reference<T>\n        , false_\n        , msvc_is_class_impl<T>\n        >::type\n{\n};\n\n}}}\n\n#endif // BOOST_MPL_AUX_MSVC_IS_CLASS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/msvc_never_true.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_MSVC_NEVER_TRUE_HPP_INCLUDED\n#define BOOST_MPL_AUX_MSVC_NEVER_TRUE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename T >\nstruct msvc_never_true\n{\n    enum { value = false };\n};\n\n}}}\n\n#endif // BOOST_MSVC\n\n#endif // BOOST_MPL_AUX_MSVC_NEVER_TRUE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/msvc_type.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_MSVC_TYPE_HPP_INCLUDED\n#define BOOST_MPL_AUX_MSVC_TYPE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/is_msvc_eti_arg.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\n#if defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)\n\ntemplate< bool > struct msvc_type_impl\n{\n    template< typename T > struct result_\n    {\n        typedef typename T::type type;\n    };\n};\n\ntemplate<> struct msvc_type_impl<true>\n{\n    template< typename T > struct result_\n    {\n        typedef result_ type;\n    };\n};\n\ntemplate< typename T > struct msvc_type\n    : msvc_type_impl< is_msvc_eti_arg<T>::value >\n        ::template result_<T>\n{\n};\n\n#else // BOOST_MPL_CFG_MSVC_70_ETI_BUG\n\ntemplate< typename T > struct msvc_type \n{\n    typedef typename T::type type;\n};\n\ntemplate<> struct msvc_type<int>\n{\n    typedef int type;\n};\n\n#endif\n\n}}}\n\n#endif // BOOST_MPL_AUX_MSVC_TYPE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/na.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NA_HPP_INCLUDED\n#define BOOST_MPL_AUX_NA_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/na_fwd.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T >\nstruct is_na\n    : false_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using false_::value;\n#endif\n};\n\ntemplate<>\nstruct is_na<na>\n    : true_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using true_::value;\n#endif\n};\n\ntemplate< typename T >\nstruct is_not_na\n    : true_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using true_::value;\n#endif\n};\n\ntemplate<>\nstruct is_not_na<na>\n    : false_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using false_::value;\n#endif\n};\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\ntemplate< typename T, typename U > struct if_na\n{\n    typedef T type;\n};\n\ntemplate< typename U > struct if_na<na,U>\n{\n    typedef U type;\n};\n#else\ntemplate< typename T > struct if_na_impl\n{\n    template< typename U > struct apply\n    {\n        typedef T type;\n    };\n};\n\ntemplate<> struct if_na_impl<na>\n{\n    template< typename U > struct apply\n    {\n        typedef U type;\n    };\n};\n\ntemplate< typename T, typename U > struct if_na\n    : if_na_impl<T>::template apply<U>\n{\n};\n#endif\n\n}}\n\n#endif // BOOST_MPL_AUX_NA_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/na_assert.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NA_ASSERT_HPP_INCLUDED\n#define BOOST_MPL_AUX_NA_ASSERT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if !BOOST_WORKAROUND(_MSC_FULL_VER, <= 140050601)    \\\n    && !BOOST_WORKAROUND(__EDG_VERSION__, <= 243)\n#   include <boost/mpl/assert.hpp>\n#   define BOOST_MPL_AUX_ASSERT_NOT_NA(x) \\\n    BOOST_MPL_ASSERT_NOT((boost::mpl::is_na<type>)) \\\n/**/\n#else\n#   include <boost/static_assert.hpp>\n#   define BOOST_MPL_AUX_ASSERT_NOT_NA(x) \\\n    BOOST_STATIC_ASSERT(!boost::mpl::is_na<x>::value) \\\n/**/\n#endif\n\n#endif // BOOST_MPL_AUX_NA_ASSERT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/na_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NA_FWD_HPP_INCLUDED\n#define BOOST_MPL_AUX_NA_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\n// n.a. == not available\nstruct na\n{\n    typedef na type;\n    enum { value = 0 };\n};\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(na)\n\n#endif // BOOST_MPL_AUX_NA_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/na_spec.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NA_SPEC_HPP_INCLUDED\n#define BOOST_MPL_AUX_NA_SPEC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/lambda_fwd.hpp>\n#   include <boost/mpl/int.hpp>\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/arity.hpp>\n#   include <boost/mpl/aux_/template_arity_fwd.hpp>\n#endif\n\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/preprocessor/enum.hpp>\n#include <boost/mpl/aux_/preprocessor/def_params_tail.hpp>\n#include <boost/mpl/aux_/lambda_arity_param.hpp>\n#include <boost/mpl/aux_/config/dtp.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/config/ttp.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n#include <boost/mpl/aux_/config/overload_resolution.hpp>\n\n\n#define BOOST_MPL_AUX_NA_PARAMS(i) \\\n    BOOST_MPL_PP_ENUM(i, na) \\\n/**/\n\n#if defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#   define BOOST_MPL_AUX_NA_SPEC_ARITY(i, name) \\\nnamespace aux { \\\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > \\\nstruct arity< \\\n          name< BOOST_MPL_AUX_NA_PARAMS(i) > \\\n        , N \\\n        > \\\n    : int_< BOOST_MPL_LIMIT_METAFUNCTION_ARITY > \\\n{ \\\n}; \\\n} \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NA_SPEC_ARITY(i, name) /**/\n#endif\n\n#define BOOST_MPL_AUX_NA_SPEC_MAIN(i, name) \\\ntemplate<> \\\nstruct name< BOOST_MPL_AUX_NA_PARAMS(i) > \\\n{ \\\n    template< \\\n          BOOST_MPL_PP_PARAMS(i, typename T) \\\n        BOOST_MPL_PP_NESTED_DEF_PARAMS_TAIL(i, typename T, na) \\\n        > \\\n    struct apply \\\n        : name< BOOST_MPL_PP_PARAMS(i, T) > \\\n    { \\\n    }; \\\n}; \\\n/**/\n\n#if defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n#   define BOOST_MPL_AUX_NA_SPEC_LAMBDA(i, name) \\\ntemplate<> \\\nstruct lambda< \\\n      name< BOOST_MPL_AUX_NA_PARAMS(i) > \\\n    , void_ \\\n    , true_ \\\n    > \\\n{ \\\n    typedef false_ is_le; \\\n    typedef name< BOOST_MPL_AUX_NA_PARAMS(i) > type; \\\n}; \\\ntemplate<> \\\nstruct lambda< \\\n      name< BOOST_MPL_AUX_NA_PARAMS(i) > \\\n    , void_ \\\n    , false_ \\\n    > \\\n{ \\\n    typedef false_ is_le; \\\n    typedef name< BOOST_MPL_AUX_NA_PARAMS(i) > type; \\\n}; \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NA_SPEC_LAMBDA(i, name) \\\ntemplate< typename Tag > \\\nstruct lambda< \\\n      name< BOOST_MPL_AUX_NA_PARAMS(i) > \\\n    , Tag \\\n    BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(int_<-1>) \\\n    > \\\n{ \\\n    typedef false_ is_le; \\\n    typedef name< BOOST_MPL_AUX_NA_PARAMS(i) > result_; \\\n    typedef name< BOOST_MPL_AUX_NA_PARAMS(i) > type; \\\n}; \\\n/**/\n#endif\n\n#if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING) \\\n    || defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT) \\\n        && defined(BOOST_MPL_CFG_BROKEN_OVERLOAD_RESOLUTION)\n#   define BOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(i, j, name) \\\nnamespace aux { \\\ntemplate< BOOST_MPL_PP_PARAMS(j, typename T) > \\\nstruct template_arity< \\\n          name< BOOST_MPL_PP_PARAMS(j, T) > \\\n        > \\\n    : int_<j> \\\n{ \\\n}; \\\n\\\ntemplate<> \\\nstruct template_arity< \\\n          name< BOOST_MPL_PP_ENUM(i, na) > \\\n        > \\\n    : int_<-1> \\\n{ \\\n}; \\\n} \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(i, j, name) /**/\n#endif\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\n#   define BOOST_MPL_AUX_NA_SPEC_ETI(i, name) \\\ntemplate<> \\\nstruct name< BOOST_MPL_PP_ENUM(i, int) > \\\n{ \\\n    typedef int type; \\\n    enum { value = 0 }; \\\n}; \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NA_SPEC_ETI(i, name) /**/\n#endif\n\n#define BOOST_MPL_AUX_NA_PARAM(param) param = na\n\n#define BOOST_MPL_AUX_NA_SPEC_NO_ETI(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_MAIN(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_ARITY(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(i, i, name) \\\n/**/\n\n#define BOOST_MPL_AUX_NA_SPEC(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_NO_ETI(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_ETI(i, name) \\\n/**/\n\n#define BOOST_MPL_AUX_NA_SPEC2(i, j, name) \\\nBOOST_MPL_AUX_NA_SPEC_MAIN(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_ETI(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_ARITY(i, name) \\\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(i, j, name) \\\n/**/\n\n\n#endif // BOOST_MPL_AUX_NA_SPEC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/nested_type_wknd.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NESTED_TYPE_WKND_HPP_INCLUDED\n#define BOOST_MPL_AUX_NESTED_TYPE_WKND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0302)) \\\n    || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x561)) \\\n    || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x530)) \\\n    || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n\nnamespace boost { namespace mpl { namespace aux {\ntemplate< typename T > struct nested_type_wknd\n    : T::type\n{\n};\n}}}\n\n#if BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n#   define BOOST_MPL_AUX_NESTED_TYPE_WKND(T) \\\n    aux::nested_type_wknd<T> \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NESTED_TYPE_WKND(T) \\\n    ::boost::mpl::aux::nested_type_wknd<T> \\\n/**/\n#endif\n\n#else // !BOOST_MPL_CFG_GCC et al.\n\n#   define BOOST_MPL_AUX_NESTED_TYPE_WKND(T) T::type\n\n#endif \n\n#endif // BOOST_MPL_AUX_NESTED_TYPE_WKND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/nttp_decl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_NTTP_DECL_HPP_INCLUDED\n#define BOOST_MPL_AUX_NTTP_DECL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/nttp.hpp>\n\n#if defined(BOOST_MPL_CFG_NTTP_BUG)\n\ntypedef bool        _mpl_nttp_bool;\ntypedef int         _mpl_nttp_int;\ntypedef unsigned    _mpl_nttp_unsigned;\ntypedef long        _mpl_nttp_long;\n\n#   include <boost/preprocessor/cat.hpp>\n#   define BOOST_MPL_AUX_NTTP_DECL(T, x) BOOST_PP_CAT(_mpl_nttp_,T) x /**/\n\n#else\n\n#   define BOOST_MPL_AUX_NTTP_DECL(T, x) T x /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_NTTP_DECL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/overload_names.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_OVERLOAD_NAMES_HPP_INCLUDED\n#define BOOST_MPL_AUX_OVERLOAD_NAMES_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/ptr_to_ref.hpp>\n#include <boost/mpl/aux_/config/operators.hpp>\n\n#if defined(BOOST_MPL_CFG_USE_OPERATORS_OVERLOADING)\n\n#   include <boost/mpl/aux_/static_cast.hpp>\n\n#   define BOOST_MPL_AUX_OVERLOAD_VALUE_BY_KEY  operator/\n#   define BOOST_MPL_AUX_OVERLOAD_ITEM_BY_ORDER operator|\n#   define BOOST_MPL_AUX_OVERLOAD_ORDER_BY_KEY  operator||\n#   define BOOST_MPL_AUX_OVERLOAD_IS_MASKED     operator%\n\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_VALUE_BY_KEY(T, x)   BOOST_MPL_AUX_PTR_TO_REF(T) / x\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_ITEM_BY_ORDER(T, x)  BOOST_MPL_AUX_PTR_TO_REF(T) | x\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_ORDER_BY_KEY(T, x)   BOOST_MPL_AUX_PTR_TO_REF(T) || x\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_IS_MASKED(T, x)      BOOST_MPL_AUX_PTR_TO_REF(T) % x\n\n#else\n\n#   define BOOST_MPL_AUX_OVERLOAD_VALUE_BY_KEY  value_by_key_\n#   define BOOST_MPL_AUX_OVERLOAD_ITEM_BY_ORDER item_by_order_\n#   define BOOST_MPL_AUX_OVERLOAD_ORDER_BY_KEY  order_by_key_\n#   define BOOST_MPL_AUX_OVERLOAD_IS_MASKED     is_masked_\n\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_VALUE_BY_KEY(T, x)   T::BOOST_MPL_AUX_OVERLOAD_VALUE_BY_KEY( BOOST_MPL_AUX_PTR_TO_REF(T), x )\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_ITEM_BY_ORDER(T, x)  T::BOOST_MPL_AUX_OVERLOAD_ITEM_BY_ORDER( BOOST_MPL_AUX_PTR_TO_REF(T), x )\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_ORDER_BY_KEY(T, x)   T::BOOST_MPL_AUX_OVERLOAD_ORDER_BY_KEY( BOOST_MPL_AUX_PTR_TO_REF(T), x )\n#   define BOOST_MPL_AUX_OVERLOAD_CALL_IS_MASKED(T, x)      T::BOOST_MPL_AUX_OVERLOAD_IS_MASKED( BOOST_MPL_AUX_PTR_TO_REF(T), x )\n\n#endif\n\n#endif // BOOST_MPL_AUX_OVERLOAD_NAMES_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      int N, typename F\n    >\nstruct apply_wrap_impl0;\n\ntemplate< typename F, bool F_has_apply >\nstruct apply_wrap_impl0_bcb {\n    typedef typename F::template apply<na> type;\n};\n\ntemplate< typename F >\nstruct apply_wrap_impl0_bcb< F,true > {\n    typedef typename F::apply type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          0\n        , F\n       \n        >\n{\n    typedef apply_wrap_impl0_bcb< F, aux::has_apply<F>::value >::type type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          1\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          2\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          3\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          4\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          5\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap0\n    : apply_wrap_impl0<\n          ::boost::mpl::aux::arity< F,0 >::value\n        , F\n       \n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1\n    >\nstruct apply_wrap_impl1;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          1\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          2\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          3\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          4\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          5\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap1\n    : apply_wrap_impl1<\n          ::boost::mpl::aux::arity< F,1 >::value\n        , F\n        , T1\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          2\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          3\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          4\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          5\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap2\n    : apply_wrap_impl2<\n          ::boost::mpl::aux::arity< F,2 >::value\n        , F\n        , T1, T2\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          3\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          4\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          5\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap3\n    : apply_wrap_impl3<\n          ::boost::mpl::aux::arity< F,3 >::value\n        , F\n        , T1, T2, T3\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          4\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          5\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap4\n    : apply_wrap_impl4<\n          ::boost::mpl::aux::arity< F,4 >::value\n        , F\n        , T1, T2, T3, T4\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5<\n          5\n        , F\n        , T1, T2, T3, T4, T5\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4, T5\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap5\n    : apply_wrap_impl5<\n          ::boost::mpl::aux::arity< F,5 >::value\n        , F\n        , T1, T2, T3, T4, T5\n        >::type\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Arity\n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>,Tag, int_< -1 > >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>,Tag, int_<1> >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\ntemplate<\n      typename F\n    , typename Tag1\n    , typename Tag2\n    , typename Arity\n    >\nstruct lambda<\n          lambda< F,Tag1,Arity >\n        , Tag2\n        , int_<3>\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef bind1< quote1<aux::template_arity>, typename l1::result_ > arity_;\n    typedef lambda< typename if_< is_le,arity_,Arity >::type, Tag2 > l3;\n    typedef aux::le_result3<is_le, Tag2, mpl::lambda, l1, l2, l3> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 3, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1, typename T2, typename T3, typename T4, typename T5\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n\n{\n    typedef typename T::type type;\n};\n\ntemplate< typename T >\nstruct quote_impl< T,false >\n{\n    typedef T type;\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n    {\n        typedef typename quote_impl<\n              F<U1>\n            , aux::has_type< F<U1> >::value\n            >::type type;\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n    {\n        typedef typename quote_impl<\n              F< U1,U2 >\n            , aux::has_type< F< U1,U2 > >::value\n            >::type type;\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n    {\n        typedef typename quote_impl<\n              F< U1,U2,U3 >\n            , aux::has_type< F< U1,U2,U3 > >::value\n            >::type type;\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n    {\n        typedef typename quote_impl<\n              F< U1,U2,U3,U4 >\n            , aux::has_type< F< U1,U2,U3,U4 > >::value\n            >::type type;\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n    {\n        typedef typename quote_impl<\n              F< U1,U2,U3,U4,U5 >\n            , aux::has_type< F< U1,U2,U3,U4,U5 > >::value\n            >::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n    {\n        typedef typename aux::unpack_args_impl<\n              size<Args>::value\n            , F\n            , Args\n            >::type type;\n\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      int N, typename F\n    >\nstruct apply_wrap_impl0;\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          0\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n/// since the defaults are \"lost\", we have to pass *something* even for nullary\n/// metafunction classes\n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          1\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          2\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          3\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          4\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          5\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap0\n    : apply_wrap_impl0<\n          ::boost::mpl::aux::arity< F,0 >::value\n        , F\n       \n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1\n    >\nstruct apply_wrap_impl1;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          1\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          2\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          3\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          4\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          5\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap1\n    : apply_wrap_impl1<\n          ::boost::mpl::aux::arity< F,1 >::value\n        , F\n        , T1\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          2\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          3\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          4\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          5\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap2\n    : apply_wrap_impl2<\n          ::boost::mpl::aux::arity< F,2 >::value\n        , F\n        , T1, T2\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          3\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          4\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          5\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap3\n    : apply_wrap_impl3<\n          ::boost::mpl::aux::arity< F,3 >::value\n        , F\n        , T1, T2, T3\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          4\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          5\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap4\n    : apply_wrap_impl4<\n          ::boost::mpl::aux::arity< F,4 >::value\n        , F\n        , T1, T2, T3, T4\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5<\n          5\n        , F\n        , T1, T2, T3, T4, T5\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4, T5\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap5\n    : apply_wrap_impl5<\n          ::boost::mpl::aux::arity< F,5 >::value\n        , F\n        , T1, T2, T3, T4, T5\n        >::type\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Arity\n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>,Tag, int_< -1 > >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>,Tag, int_<1> >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\ntemplate<\n      typename F\n    , typename Tag1\n    , typename Tag2\n    , typename Arity\n    >\nstruct lambda<\n          lambda< F,Tag1,Arity >\n        , Tag2\n        , int_<3>\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef bind1< quote1<aux::template_arity>, typename l1::result_ > arity_;\n    typedef lambda< typename if_< is_le,arity_,Arity >::type, Tag2 > l3;\n    typedef aux::le_result3<is_le, Tag2, mpl::lambda, l1, l2, l3> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 3, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n    {\n        typedef typename aux::unpack_args_impl<\n              size<Args>::value\n            , F\n            , Args\n            >::type type;\n\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc551/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      int N, typename F\n    >\nstruct apply_wrap_impl0;\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          0\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n/// since the defaults are \"lost\", we have to pass *something* even for nullary\n/// metafunction classes\n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          1\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          2\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          3\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          4\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          5\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap0\n    : apply_wrap_impl0<\n          ::boost::mpl::aux::arity< F,0 >::value\n        , F\n       \n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1\n    >\nstruct apply_wrap_impl1;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          1\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          2\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          3\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          4\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          5\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap1\n    : apply_wrap_impl1<\n          ::boost::mpl::aux::arity< F,1 >::value\n        , F\n        , T1\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          2\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          3\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          4\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          5\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap2\n    : apply_wrap_impl2<\n          ::boost::mpl::aux::arity< F,2 >::value\n        , F\n        , T1, T2\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          3\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          4\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          5\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap3\n    : apply_wrap_impl3<\n          ::boost::mpl::aux::arity< F,3 >::value\n        , F\n        , T1, T2, T3\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          4\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          5\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap4\n    : apply_wrap_impl4<\n          ::boost::mpl::aux::arity< F,4 >::value\n        , F\n        , T1, T2, T3, T4\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5<\n          5\n        , F\n        , T1, T2, T3, T4, T5\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4, T5\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap5\n    : apply_wrap_impl5<\n          ::boost::mpl::aux::arity< F,5 >::value\n        , F\n        , T1, T2, T3, T4, T5\n        >::type\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4, typename U5\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Arity\n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>,Tag, int_< -1 > >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>,Tag, int_<1> >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\ntemplate<\n      typename F\n    , typename Tag1\n    , typename Tag2\n    , typename Arity\n    >\nstruct lambda<\n          lambda< F,Tag1,Arity >\n        , Tag2\n        , int_<3>\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef bind1< quote1<aux::template_arity>, typename l1::result_ > arity_;\n    typedef lambda< typename if_< is_le,arity_,Arity >::type, Tag2 > l3;\n    typedef aux::le_result3<is_le, Tag2, mpl::lambda, l1, l2, l3> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 3, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1, typename T2, typename T3, typename T4, typename T5\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"quote.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n    {\n        typedef typename aux::unpack_args_impl<\n              size<Args>::value\n            , F\n            , Args\n            >::type type;\n\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/bcc_pre590/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n    : F::template apply<  >\n{\n};\n\ntemplate< typename F >\nstruct apply_wrap0< F,true_ >\n    : F::apply\n{\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n    : F::template apply<T1>\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n    : F::template apply< T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n    : F::template apply< T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n    : F::template apply< T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n    : F::template apply< T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, int dummy_\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, int dummy_\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1, int dummy_\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, int dummy_\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, int dummy_\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, int dummy_\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, int dummy_\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, int dummy_\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , int dummy_\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , int dummy_\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, int dummy_\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, int dummy_\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, int dummy_\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, int dummy_\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1, int dummy_\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, int dummy_\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, int dummy_\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, int dummy_\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, int dummy_\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, int dummy_\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , int dummy_\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , int dummy_\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, int dummy_\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, int dummy_\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, int dummy_ = 0\n    >\nstruct bind;\n\ntemplate<\n      typename F, int dummy_ = 0\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1, int dummy_ = 0\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2, int dummy_ = 0\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, int dummy_ = 0\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , int dummy_ = 0\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, int dummy_ = 0\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n    : T\n{\n};\n\ntemplate< typename T >\nstruct quote_impl< T,false >\n{\n    typedef T type;\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl<\n              F<U1>\n            , aux::has_type< F<U1> >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl<\n              F< U1,U2 >\n            , aux::has_type< F< U1,U2 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl<\n              F< U1,U2,U3 >\n            , aux::has_type< F< U1,U2,U3 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4 >\n            , aux::has_type< F< U1,U2,U3,U4 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4,U5 >\n            , aux::has_type< F< U1,U2,U3,U4,U5 > >::value\n            >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value,F, Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/dmc/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n    : F::template apply<  >\n{\n};\n\ntemplate< typename F >\nstruct apply_wrap0< F,true_ >\n    : F::apply\n{\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n    : F::template apply<T1>\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n    : F::template apply< T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n    : F::template apply< T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n    : F::template apply< T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n    : F::template apply< T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct bind;\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Arity\n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>,Tag, int_< -1 > >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n        , int_<1>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n        , int_<2>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n        , int_<3>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n        , int_<4>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<5>\n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>,Tag, int_<1> >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n        , int_<6>\n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\ntemplate<\n      typename F\n    , typename Tag1\n    , typename Tag2\n    , typename Arity\n    >\nstruct lambda<\n          lambda< F,Tag1,Arity >\n        , Tag2\n        , int_<3>\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef bind1< quote1<aux::template_arity>, typename l1::result_ > arity_;\n    typedef lambda< typename if_< is_le,arity_,Arity >::type, Tag2 > l3;\n    typedef aux::le_result3<is_le, Tag2, mpl::lambda, l1, l2, l3> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 3, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n{\n    typedef typename T::type type;\n};\n\ntemplate< typename T >\nstruct quote_impl< T,false >\n{\n    typedef T type;\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl<\n              F<U1>\n            , aux::has_type< F<U1> >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl<\n              F< U1,U2 >\n            , aux::has_type< F< U1,U2 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl<\n              F< U1,U2,U3 >\n            , aux::has_type< F< U1,U2,U3 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4 >\n            , aux::has_type< F< U1,U2,U3,U4 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4,U5 >\n            , aux::has_type< F< U1,U2,U3,U4,U5 > >::value\n            >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// *Preprocessed* version of the main \"template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\ntemplate< int N > struct arity_tag\n{\n    typedef char (&type)[N + 1];\n};\n\ntemplate<\n      int C1, int C2, int C3, int C4, int C5, int C6\n    >\nstruct max_arity\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          ( C6 > 0 ? C6 : ( C5 > 0 ? C5 : ( C4 > 0 ? C4 : ( C3 > 0 ? C3 : ( C2 > 0 ? C2 : ( C1 > 0 ? C1 : -1 ) ) ) ) ) )\n        );\n};\n\narity_tag<0>::type arity_helper(...);\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    >\ntypename arity_tag<1>::type\narity_helper(type_wrapper< F<T1> >, arity_tag<1>);\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    >\ntypename arity_tag<2>::type\narity_helper(type_wrapper< F< T1,T2 > >, arity_tag<2>);\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    >\ntypename arity_tag<3>::type\narity_helper(type_wrapper< F< T1,T2,T3 > >, arity_tag<3>);\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    >\ntypename arity_tag<4>::type\narity_helper(type_wrapper< F< T1,T2,T3,T4 > >, arity_tag<4>);\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    >\ntypename arity_tag<5>::type\narity_helper(type_wrapper< F< T1,T2,T3,T4,T5 > >, arity_tag<5>);\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5, typename P6\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6\n    >\ntypename arity_tag<6>::type\narity_helper(type_wrapper< F< T1,T2,T3,T4,T5,T6 > >, arity_tag<6>);\ntemplate< typename F, int N >\nstruct template_arity_impl\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          sizeof(::boost::mpl::aux::arity_helper(type_wrapper<F>(), arity_tag<N>())) - 1\n        );\n};\n\ntemplate< typename F >\nstruct template_arity\n{\n    BOOST_STATIC_CONSTANT(int, value  = (\n          max_arity< template_arity_impl< F,1 >::value, template_arity_impl< F,2 >::value, template_arity_impl< F,3 >::value, template_arity_impl< F,4 >::value, template_arity_impl< F,5 >::value, template_arity_impl< F,6 >::value >::value\n        ));\n    typedef mpl::int_<value> type;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value,F, Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/gcc/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n\n    /// ETI workaround\n    template<> struct apply<int>\n    {\n        typedef int type;\n    };\n\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct and_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : false_\n    {\n    };\n};\n\ntemplate<> struct and_impl<true>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : and_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,true_ >\n    {\n    };\n};\n\ntemplate<>\nstruct and_impl<true>\n    ::result_< true_,true_,true_,true_ >\n        : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n{\n    typedef typename apply_wrap0<\n          typename lambda<F>::type\n       \n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply0<int>\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n{\n    typedef typename apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply1< int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n{\n    typedef typename apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply2< int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n{\n    typedef typename apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply3< int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n{\n    typedef typename apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply4< int,int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n{\n    typedef typename apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply5< int,int,int,int,int,int >\n{\n    typedef int type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename F>\nstruct msvc_apply0\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template< typename P  = int > struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template< typename T  = int > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply<>\n    {\n    };\n\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap0\n{\n    typedef typename msvc_apply0<F>::template result_<\n         \n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap0<int>\n{\n    typedef int type;\n};\n\ntemplate< typename F>\nstruct msvc_apply1\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template< typename P1 > struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template< typename T1 > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply<T1>\n    {\n    };\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap1\n{\n    typedef typename msvc_apply1<F>::template result_<\n          T1\n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap1< int,int >\n{\n    typedef int type;\n};\n\ntemplate< typename F>\nstruct msvc_apply2\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template< typename P1, typename P2 > struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template< typename T1, typename T2 > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply< T1,T2 >\n    {\n    };\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap2\n{\n    typedef typename msvc_apply2<F>::template result_<\n          T1, T2\n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap2< int,int,int >\n{\n    typedef int type;\n};\n\ntemplate< typename F>\nstruct msvc_apply3\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template< typename P1, typename P2, typename P3 > struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template< typename T1, typename T2, typename T3 > struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply< T1,T2,T3 >\n    {\n    };\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap3\n{\n    typedef typename msvc_apply3<F>::template result_<\n          T1, T2, T3\n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap3< int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate< typename F>\nstruct msvc_apply4\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template<\n              typename P1, typename P2, typename P3, typename P4\n            >\n        struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply< T1,T2,T3,T4 >\n    {\n    };\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap4\n{\n    typedef typename msvc_apply4<F>::template result_<\n          T1, T2, T3, T4\n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap4< int,int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate< typename F>\nstruct msvc_apply5\n{\n    template< bool > struct f_ : F {};\n    template<> struct f_<true>\n    {\n        template<\n              typename P1, typename P2, typename P3, typename P4\n            , typename P5\n            >\n        struct apply\n        {\n            typedef int type;\n        };\n    };\n\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n        : f_< aux::msvc_never_true<F>::value >\n            ::template apply< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap5\n{\n    typedef typename msvc_apply5<F>::template result_<\n          T1, T2, T3, T4, T5\n        >::type type;\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap5< int,int,int,int,int,int >\n{\n    typedef int type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< typename T >\nstruct replace_unnamed_arg_impl\n{\n    template< typename Arg > struct result_\n    {\n        typedef Arg next;\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct replace_unnamed_arg_impl< arg< -1 > >\n{\n    template< typename Arg > struct result_\n    {\n        typedef typename next<Arg>::type next;\n        typedef Arg type;\n    };\n};\n\ntemplate< typename T, typename Arg >\nstruct replace_unnamed_arg\n    : replace_unnamed_arg_impl<T>::template result_<Arg>\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitand_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitand_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n\n    : if_<\n\n          is_na<N3>\n        , bitand_2< N1,N2 >\n        , bitand_<\n              bitand_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitand_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitand_impl<\n              typename bitand_tag<N1>::type\n            , typename bitand_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitand_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitand_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 & n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitand_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitor_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n\n    : if_<\n\n          is_na<N3>\n        , bitor_2< N1,N2 >\n        , bitor_<\n              bitor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitor_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitor_impl<\n              typename bitor_tag<N1>::type\n            , typename bitor_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 | n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitxor_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitxor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n\n    : if_<\n\n          is_na<N3>\n        , bitxor_2< N1,N2 >\n        , bitxor_<\n              bitxor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitxor_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitxor_impl<\n              typename bitxor_tag<N1>::type\n            , typename bitxor_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitxor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitxor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 ^ n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitxor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct deque_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_deque_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_deque_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct deque_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_deque_arg<T1>::value + is_deque_arg<T2>::value \n        + is_deque_arg<T3>::value + is_deque_arg<T4>::value \n        + is_deque_arg<T5>::value + is_deque_arg<T6>::value \n        + is_deque_arg<T7>::value + is_deque_arg<T8>::value \n        + is_deque_arg<T9>::value + is_deque_arg<T10>::value \n        + is_deque_arg<T11>::value + is_deque_arg<T12>::value \n        + is_deque_arg<T13>::value + is_deque_arg<T14>::value \n        + is_deque_arg<T15>::value + is_deque_arg<T16>::value \n        + is_deque_arg<T17>::value + is_deque_arg<T18>::value \n        + is_deque_arg<T19>::value + is_deque_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque_impl\n{\n    typedef aux::deque_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::deque_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque\n    : aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct divides_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct divides2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n\n    : if_<\n\n          is_na<N3>\n        , divides2< N1,N2 >\n        , divides<\n              divides2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct divides2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          divides_impl<\n              typename divides_tag<N1>::type\n            , typename divides_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, divides2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct divides_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 / n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::divides_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct equal_to_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n    : aux::msvc_eti_base< typename apply_wrap2<\n          equal_to_impl<\n              typename equal_to_tag<N1>::type\n            , typename equal_to_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value ==\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate< int N >\nstruct fold_chunk;\n\ntemplate<> struct fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate< int N >\nstruct fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , fold_null_step< Last,State >\n            , fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step\n{\n    typedef fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n    : fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct greater_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n    : aux::msvc_eti_base< typename apply_wrap2<\n          greater_impl<\n              typename greater_tag<N1>::type\n            , typename greater_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n    : aux::msvc_eti_base< typename apply_wrap2<\n          greater_equal_impl<\n              typename greater_equal_tag<N1>::type\n            , typename greater_equal_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C1, bool C2 >\nstruct inherit2_impl\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1, T2\n    {\n        typedef Derived type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< false,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1\n    {\n        typedef T1 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,false >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T2\n    {\n        typedef T2 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n    {\n        typedef T1 type_;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : aux::inherit2_impl<\n          is_empty_base<T1>::value\n        , is_empty_base<T2>::value\n        >::template result_< inherit2< T1,T2 >,T1, T2 >\n{\n    typedef typename inherit2::type_ type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate< int N >\nstruct iter_fold_chunk;\n\ntemplate<> struct iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate< int N >\nstruct iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef iter_fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , iter_fold_null_step< Last,State >\n            , iter_fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step\n{\n    typedef iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n    : iter_fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct less_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n    : aux::msvc_eti_base< typename apply_wrap2<\n          less_impl<\n              typename less_tag<N1>::type\n            , typename less_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N2)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N1)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct less_equal_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n    : aux::msvc_eti_base< typename apply_wrap2<\n          less_equal_impl<\n              typename less_equal_tag<N1>::type\n            , typename less_equal_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef list0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_list_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct list_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_arg<T1>::value + is_list_arg<T2>::value \n        + is_list_arg<T3>::value + is_list_arg<T4>::value \n        + is_list_arg<T5>::value + is_list_arg<T6>::value \n        + is_list_arg<T7>::value + is_list_arg<T8>::value \n        + is_list_arg<T9>::value + is_list_arg<T10>::value \n        + is_list_arg<T11>::value + is_list_arg<T12>::value \n        + is_list_arg<T13>::value + is_list_arg<T14>::value \n        + is_list_arg<T15>::value + is_list_arg<T16>::value \n        + is_list_arg<T17>::value + is_list_arg<T18>::value \n        + is_list_arg<T19>::value + is_list_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list_impl\n{\n    typedef aux::list_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::list_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list\n    : aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_list_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct list_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_c_arg<C1>::value + is_list_c_arg<C2>::value \n        + is_list_c_arg<C3>::value + is_list_c_arg<C4>::value \n        + is_list_c_arg<C5>::value + is_list_c_arg<C6>::value \n        + is_list_c_arg<C7>::value + is_list_c_arg<C8>::value \n        + is_list_c_arg<C9>::value + is_list_c_arg<C10>::value \n        + is_list_c_arg<C11>::value + is_list_c_arg<C12>::value \n        + is_list_c_arg<C13>::value + is_list_c_arg<C14>::value \n        + is_list_c_arg<C15>::value + is_list_c_arg<C16>::value \n        + is_list_c_arg<C17>::value + is_list_c_arg<C18>::value \n        + is_list_c_arg<C19>::value + is_list_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c_impl\n{\n    typedef aux::list_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::list_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c\n    : aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct map_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef map0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_map_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_map_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct map_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_map_arg<T1>::value + is_map_arg<T2>::value \n        + is_map_arg<T3>::value + is_map_arg<T4>::value \n        + is_map_arg<T5>::value + is_map_arg<T6>::value \n        + is_map_arg<T7>::value + is_map_arg<T8>::value \n        + is_map_arg<T9>::value + is_map_arg<T10>::value \n        + is_map_arg<T11>::value + is_map_arg<T12>::value \n        + is_map_arg<T13>::value + is_map_arg<T14>::value \n        + is_map_arg<T15>::value + is_map_arg<T16>::value \n        + is_map_arg<T17>::value + is_map_arg<T18>::value \n        + is_map_arg<T19>::value + is_map_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map_impl\n{\n    typedef aux::map_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::map_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map\n    : aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct minus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct minus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n\n    : if_<\n\n          is_na<N3>\n        , minus2< N1,N2 >\n        , minus<\n              minus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct minus2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          minus_impl<\n              typename minus_tag<N1>::type\n            , typename minus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, minus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct minus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 - n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::minus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct modulus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n    : aux::msvc_eti_base< typename apply_wrap2<\n          modulus_impl<\n              typename modulus_tag<N1>::type\n            , typename modulus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct modulus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 % n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::modulus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n    : aux::msvc_eti_base< typename apply_wrap2<\n          not_equal_to_impl<\n              typename not_equal_to_tag<N1>::type\n            , typename not_equal_to_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value !=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct or_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : true_\n    {\n    };\n};\n\ntemplate<> struct or_impl<false>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : or_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,false_ >\n    {\n    };\n};\n\ntemplate<>\nstruct or_impl<false>\n    ::result_< false_,false_,false_,false_ >\n        : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct plus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct plus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n\n    : if_<\n\n          is_na<N3>\n        , plus2< N1,N2 >\n        , plus<\n              plus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct plus2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          plus_impl<\n              typename plus_tag<N1>::type\n            , typename plus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, plus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct plus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 + n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::plus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n    /// ETI workaround\n    template<> struct result_< int,int,int,int,int >\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef set0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_set_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct set_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_arg<T1>::value + is_set_arg<T2>::value \n        + is_set_arg<T3>::value + is_set_arg<T4>::value \n        + is_set_arg<T5>::value + is_set_arg<T6>::value \n        + is_set_arg<T7>::value + is_set_arg<T8>::value \n        + is_set_arg<T9>::value + is_set_arg<T10>::value \n        + is_set_arg<T11>::value + is_set_arg<T12>::value \n        + is_set_arg<T13>::value + is_set_arg<T14>::value \n        + is_set_arg<T15>::value + is_set_arg<T16>::value \n        + is_set_arg<T17>::value + is_set_arg<T18>::value \n        + is_set_arg<T19>::value + is_set_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set_impl\n{\n    typedef aux::set_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::set_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set\n    : aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_set_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct set_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_c_arg<C1>::value + is_set_c_arg<C2>::value \n        + is_set_c_arg<C3>::value + is_set_c_arg<C4>::value \n        + is_set_c_arg<C5>::value + is_set_c_arg<C6>::value \n        + is_set_c_arg<C7>::value + is_set_c_arg<C8>::value \n        + is_set_c_arg<C9>::value + is_set_c_arg<C10>::value \n        + is_set_c_arg<C11>::value + is_set_c_arg<C12>::value \n        + is_set_c_arg<C13>::value + is_set_c_arg<C14>::value \n        + is_set_c_arg<C15>::value + is_set_c_arg<C16>::value \n        + is_set_c_arg<C17>::value + is_set_c_arg<C18>::value \n        + is_set_c_arg<C19>::value + is_set_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c_impl\n{\n    typedef aux::set_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::set_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c\n    : aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct shift_left_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n    : aux::msvc_eti_base< typename apply_wrap2<\n          shift_left_impl<\n              typename shift_left_tag<N1>::type\n            , typename shift_left_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_left_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n << s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_left_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct shift_right_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n    : aux::msvc_eti_base< typename apply_wrap2<\n          shift_right_impl<\n              typename shift_right_tag<N1>::type\n            , typename shift_right_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_right_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n >> s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_right_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\ntemplate<>\nstruct template_arity<int>\n    : mpl::int_< -1 >\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct times_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct times2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n\n    : if_<\n\n          is_na<N3>\n        , times2< N1,N2 >\n        , times<\n              times2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct times2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          times_impl<\n              typename times_tag<N1>::type\n            , typename times_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, times2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct times_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 * n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::times_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, size) > struct unpack_args_impl\n{\n    template< typename F, typename Args > struct apply;\n};\n\ntemplate<> struct unpack_args_impl<0>\n{\n    template< typename F, typename Args > struct apply\n        : apply0<\n              F\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<1>\n{\n    template< typename F, typename Args > struct apply\n        : apply1<\n              F\n            , typename at_c< Args,0 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<2>\n{\n    template< typename F, typename Args > struct apply\n        : apply2<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<3>\n{\n    template< typename F, typename Args > struct apply\n        : apply3<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<4>\n{\n    template< typename F, typename Args > struct apply\n        : apply4<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<5>\n{\n    template< typename F, typename Args > struct apply\n        : apply5<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            , typename at_c< Args,4 >::type\n            >\n    {\n    };\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value >\n            ::template apply< F,Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_vector_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct vector_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_arg<T1>::value + is_vector_arg<T2>::value \n        + is_vector_arg<T3>::value + is_vector_arg<T4>::value \n        + is_vector_arg<T5>::value + is_vector_arg<T6>::value \n        + is_vector_arg<T7>::value + is_vector_arg<T8>::value \n        + is_vector_arg<T9>::value + is_vector_arg<T10>::value \n        + is_vector_arg<T11>::value + is_vector_arg<T12>::value \n        + is_vector_arg<T13>::value + is_vector_arg<T14>::value \n        + is_vector_arg<T15>::value + is_vector_arg<T16>::value \n        + is_vector_arg<T17>::value + is_vector_arg<T18>::value \n        + is_vector_arg<T19>::value + is_vector_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector_impl\n{\n    typedef aux::vector_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::vector_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector\n    : aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc60/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector1_c<\n              T, T(C0)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector2_c<\n              T, T(C0), T(C1)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector3_c<\n              T, T(C0), T(C1), T(C2)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector4_c<\n              T, T(C0), T(C1), T(C2), T(C3)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector5_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector6_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector7_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector8_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector9_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector10_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector11_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector12_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector13_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector14_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector15_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector16_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector17_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector18_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector19_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector20_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_vector_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct vector_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_c_arg<C1>::value + is_vector_c_arg<C2>::value \n        + is_vector_c_arg<C3>::value + is_vector_c_arg<C4>::value \n        + is_vector_c_arg<C5>::value + is_vector_c_arg<C6>::value \n        + is_vector_c_arg<C7>::value + is_vector_c_arg<C8>::value \n        + is_vector_c_arg<C9>::value + is_vector_c_arg<C10>::value \n        + is_vector_c_arg<C11>::value + is_vector_c_arg<C12>::value \n        + is_vector_c_arg<C13>::value + is_vector_c_arg<C14>::value \n        + is_vector_c_arg<C15>::value + is_vector_c_arg<C16>::value \n        + is_vector_c_arg<C17>::value + is_vector_c_arg<C18>::value \n        + is_vector_c_arg<C19>::value + is_vector_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c_impl\n{\n    typedef aux::vector_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::vector_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c\n    : aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct and_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : false_\n    {\n    };\n};\n\ntemplate<> struct and_impl<true>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : and_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,true_ >\n    {\n    };\n\n    template<> struct result_< true_,true_,true_,true_ >\n        : true_\n    {\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply0<int>\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply1< int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply2< int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply3< int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply4< int,int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply5< int,int,int,int,int,int >\n{\n    typedef int type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n{\n    typedef typename F::template apply<\n        \n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap0<int>\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n{\n    typedef typename F::template apply<\n         T1\n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap1< int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n{\n    typedef typename F::template apply<\n         T1, T2\n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap2< int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n{\n    typedef typename F::template apply<\n         T1, T2, T3\n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap3< int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n{\n    typedef typename F::template apply<\n         T1, T2, T3, T4\n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap4< int,int,int,int,int >\n{\n    typedef int type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n{\n    typedef typename F::template apply<\n         T1, T2, T3, T4, T5\n        >::type type;\n\n};\n\n/// workaround for ETI bug\ntemplate<>\nstruct apply_wrap5< int,int,int,int,int,int >\n{\n    typedef int type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< typename T >\nstruct replace_unnamed_arg_impl\n{\n    template< typename Arg > struct result_\n    {\n        typedef Arg next;\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct replace_unnamed_arg_impl< arg< -1 > >\n{\n    template< typename Arg > struct result_\n    {\n        typedef typename next<Arg>::type next;\n        typedef Arg type;\n    };\n};\n\ntemplate< typename T, typename Arg >\nstruct replace_unnamed_arg\n    : replace_unnamed_arg_impl<T>::template result_<Arg>\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitand_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitand_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , bitand_2< N1,N2 >\n        , bitand_<\n              bitand_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitand_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitand_impl<\n              typename bitand_tag<N1>::type\n            , typename bitand_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitand_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitand_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 & n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitand_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitor_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , bitor_2< N1,N2 >\n        , bitor_<\n              bitor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitor_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitor_impl<\n              typename bitor_tag<N1>::type\n            , typename bitor_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 | n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct bitxor_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitxor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , bitxor_2< N1,N2 >\n        , bitxor_<\n              bitxor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitxor_2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          bitxor_impl<\n              typename bitxor_tag<N1>::type\n            , typename bitxor_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitxor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitxor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 ^ n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitxor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct deque_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_deque_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_deque_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct deque_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_deque_arg<T1>::value + is_deque_arg<T2>::value \n        + is_deque_arg<T3>::value + is_deque_arg<T4>::value \n        + is_deque_arg<T5>::value + is_deque_arg<T6>::value \n        + is_deque_arg<T7>::value + is_deque_arg<T8>::value \n        + is_deque_arg<T9>::value + is_deque_arg<T10>::value \n        + is_deque_arg<T11>::value + is_deque_arg<T12>::value \n        + is_deque_arg<T13>::value + is_deque_arg<T14>::value \n        + is_deque_arg<T15>::value + is_deque_arg<T16>::value \n        + is_deque_arg<T17>::value + is_deque_arg<T18>::value \n        + is_deque_arg<T19>::value + is_deque_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque_impl\n{\n    typedef aux::deque_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::deque_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque\n    : aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct divides_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct divides2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , divides2< N1,N2 >\n        , divides<\n              divides2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct divides2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          divides_impl<\n              typename divides_tag<N1>::type\n            , typename divides_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, divides2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct divides_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 / n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::divides_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct equal_to_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n    : aux::msvc_eti_base< typename apply_wrap2<\n          equal_to_impl<\n              typename equal_to_tag<N1>::type\n            , typename equal_to_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value ==\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate< int N >\nstruct fold_chunk;\n\ntemplate<> struct fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< int N >\nstruct fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , fold_null_step< Last,State >\n            , fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step\n{\n    typedef fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n    : fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct greater_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n    : aux::msvc_eti_base< typename apply_wrap2<\n          greater_impl<\n              typename greater_tag<N1>::type\n            , typename greater_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n    : aux::msvc_eti_base< typename apply_wrap2<\n          greater_equal_impl<\n              typename greater_equal_tag<N1>::type\n            , typename greater_equal_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C1, bool C2 >\nstruct inherit2_impl\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1, T2\n    {\n        typedef Derived type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< false,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1\n    {\n        typedef T1 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,false >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T2\n    {\n        typedef T2 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n    {\n        typedef T1 type_;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : aux::inherit2_impl<\n          is_empty_base<T1>::value\n        , is_empty_base<T2>::value\n        >::template result_< inherit2< T1,T2 >,T1, T2 >\n{\n    typedef typename inherit2::type_ type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate< int N >\nstruct iter_fold_chunk;\n\ntemplate<> struct iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< int N >\nstruct iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef iter_fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , iter_fold_null_step< Last,State >\n            , iter_fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step\n{\n    typedef iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n    : iter_fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct less_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n    : aux::msvc_eti_base< typename apply_wrap2<\n          less_impl<\n              typename less_tag<N1>::type\n            , typename less_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N2)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N1)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct less_equal_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n    : aux::msvc_eti_base< typename apply_wrap2<\n          less_equal_impl<\n              typename less_equal_tag<N1>::type\n            , typename less_equal_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef list0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_list_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct list_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_arg<T1>::value + is_list_arg<T2>::value \n        + is_list_arg<T3>::value + is_list_arg<T4>::value \n        + is_list_arg<T5>::value + is_list_arg<T6>::value \n        + is_list_arg<T7>::value + is_list_arg<T8>::value \n        + is_list_arg<T9>::value + is_list_arg<T10>::value \n        + is_list_arg<T11>::value + is_list_arg<T12>::value \n        + is_list_arg<T13>::value + is_list_arg<T14>::value \n        + is_list_arg<T15>::value + is_list_arg<T16>::value \n        + is_list_arg<T17>::value + is_list_arg<T18>::value \n        + is_list_arg<T19>::value + is_list_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list_impl\n{\n    typedef aux::list_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::list_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list\n    : aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_list_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct list_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_c_arg<C1>::value + is_list_c_arg<C2>::value \n        + is_list_c_arg<C3>::value + is_list_c_arg<C4>::value \n        + is_list_c_arg<C5>::value + is_list_c_arg<C6>::value \n        + is_list_c_arg<C7>::value + is_list_c_arg<C8>::value \n        + is_list_c_arg<C9>::value + is_list_c_arg<C10>::value \n        + is_list_c_arg<C11>::value + is_list_c_arg<C12>::value \n        + is_list_c_arg<C13>::value + is_list_c_arg<C14>::value \n        + is_list_c_arg<C15>::value + is_list_c_arg<C16>::value \n        + is_list_c_arg<C17>::value + is_list_c_arg<C18>::value \n        + is_list_c_arg<C19>::value + is_list_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c_impl\n{\n    typedef aux::list_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::list_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c\n    : aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct map_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef map0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_map_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_map_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct map_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_map_arg<T1>::value + is_map_arg<T2>::value \n        + is_map_arg<T3>::value + is_map_arg<T4>::value \n        + is_map_arg<T5>::value + is_map_arg<T6>::value \n        + is_map_arg<T7>::value + is_map_arg<T8>::value \n        + is_map_arg<T9>::value + is_map_arg<T10>::value \n        + is_map_arg<T11>::value + is_map_arg<T12>::value \n        + is_map_arg<T13>::value + is_map_arg<T14>::value \n        + is_map_arg<T15>::value + is_map_arg<T16>::value \n        + is_map_arg<T17>::value + is_map_arg<T18>::value \n        + is_map_arg<T19>::value + is_map_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map_impl\n{\n    typedef aux::map_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::map_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map\n    : aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct minus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct minus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , minus2< N1,N2 >\n        , minus<\n              minus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct minus2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          minus_impl<\n              typename minus_tag<N1>::type\n            , typename minus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, minus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct minus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 - n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::minus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct modulus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n    : aux::msvc_eti_base< typename apply_wrap2<\n          modulus_impl<\n              typename modulus_tag<N1>::type\n            , typename modulus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct modulus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 % n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::modulus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n    : aux::msvc_eti_base< typename apply_wrap2<\n          not_equal_to_impl<\n              typename not_equal_to_tag<N1>::type\n            , typename not_equal_to_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value !=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct or_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : true_\n    {\n    };\n};\n\ntemplate<> struct or_impl<false>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : or_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,false_ >\n    {\n    };\n\n    template<> struct result_< false_,false_,false_,false_ >\n        : false_\n    {\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct plus_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct plus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , plus2< N1,N2 >\n        , plus<\n              plus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct plus2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          plus_impl<\n              typename plus_tag<N1>::type\n            , typename plus_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, plus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct plus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 + n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::plus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\ntemplate< bool > struct quote_impl\n{\n    template< typename T > struct result_\n        : T\n    {\n    };\n};\n\ntemplate<> struct quote_impl<false>\n{\n    template< typename T > struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl< aux::has_type< F<U1> >::value >\n            ::template result_< F<U1> >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2 > >::value >\n            ::template result_< F< U1,U2 > >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3 > >::value >\n            ::template result_< F< U1,U2,U3 > >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3,U4 > >::value >\n            ::template result_< F< U1,U2,U3,U4 > >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3,U4,U5 > >::value >\n            ::template result_< F< U1,U2,U3,U4,U5 > >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef set0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_set_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct set_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_arg<T1>::value + is_set_arg<T2>::value \n        + is_set_arg<T3>::value + is_set_arg<T4>::value \n        + is_set_arg<T5>::value + is_set_arg<T6>::value \n        + is_set_arg<T7>::value + is_set_arg<T8>::value \n        + is_set_arg<T9>::value + is_set_arg<T10>::value \n        + is_set_arg<T11>::value + is_set_arg<T12>::value \n        + is_set_arg<T13>::value + is_set_arg<T14>::value \n        + is_set_arg<T15>::value + is_set_arg<T16>::value \n        + is_set_arg<T17>::value + is_set_arg<T18>::value \n        + is_set_arg<T19>::value + is_set_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set_impl\n{\n    typedef aux::set_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::set_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set\n    : aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_set_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct set_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_c_arg<C1>::value + is_set_c_arg<C2>::value \n        + is_set_c_arg<C3>::value + is_set_c_arg<C4>::value \n        + is_set_c_arg<C5>::value + is_set_c_arg<C6>::value \n        + is_set_c_arg<C7>::value + is_set_c_arg<C8>::value \n        + is_set_c_arg<C9>::value + is_set_c_arg<C10>::value \n        + is_set_c_arg<C11>::value + is_set_c_arg<C12>::value \n        + is_set_c_arg<C13>::value + is_set_c_arg<C14>::value \n        + is_set_c_arg<C15>::value + is_set_c_arg<C16>::value \n        + is_set_c_arg<C17>::value + is_set_c_arg<C18>::value \n        + is_set_c_arg<C19>::value + is_set_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c_impl\n{\n    typedef aux::set_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::set_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c\n    : aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct shift_left_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n    : aux::msvc_eti_base< typename apply_wrap2<\n          shift_left_impl<\n              typename shift_left_tag<N1>::type\n            , typename shift_left_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_left_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n << s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_left_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct shift_right_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n    : tag< T,na >\n{\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n    : aux::msvc_eti_base< typename apply_wrap2<\n          shift_right_impl<\n              typename shift_right_tag<N1>::type\n            , typename shift_right_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_right_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n >> s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_right_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\ntemplate<>\nstruct template_arity<int>\n    : mpl::int_< -1 >\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag1_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value\n    , BOOST_MPL_AUX_NTTP_DECL(int, tag2_)  = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value\n    >\nstruct times_impl\n    : if_c<\n          ( tag1_ > tag2_ )\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n    : tag< T,na >\n{\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct times2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n\n    : aux::msvc_eti_base< typename if_<\n\n          is_na<N3>\n        , times2< N1,N2 >\n        , times<\n              times2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n    >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct times2\n    : aux::msvc_eti_base< typename apply_wrap2<\n          times_impl<\n              typename times_tag<N1>::type\n            , typename times_tag<N2>::type\n            >\n        , N1\n        , N2\n        >::type >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, times2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct times_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 * n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::times_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, size) > struct unpack_args_impl\n{\n    template< typename F, typename Args > struct apply;\n};\n\ntemplate<> struct unpack_args_impl<0>\n{\n    template< typename F, typename Args > struct apply\n        : apply0<\n              F\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<1>\n{\n    template< typename F, typename Args > struct apply\n        : apply1<\n              F\n            , typename at_c< Args,0 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<2>\n{\n    template< typename F, typename Args > struct apply\n        : apply2<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<3>\n{\n    template< typename F, typename Args > struct apply\n        : apply3<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<4>\n{\n    template< typename F, typename Args > struct apply\n        : apply4<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<5>\n{\n    template< typename F, typename Args > struct apply\n        : apply5<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            , typename at_c< Args,4 >::type\n            >\n    {\n    };\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value >\n            ::template apply< F,Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_vector_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct vector_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_arg<T1>::value + is_vector_arg<T2>::value \n        + is_vector_arg<T3>::value + is_vector_arg<T4>::value \n        + is_vector_arg<T5>::value + is_vector_arg<T6>::value \n        + is_vector_arg<T7>::value + is_vector_arg<T8>::value \n        + is_vector_arg<T9>::value + is_vector_arg<T10>::value \n        + is_vector_arg<T11>::value + is_vector_arg<T12>::value \n        + is_vector_arg<T13>::value + is_vector_arg<T14>::value \n        + is_vector_arg<T15>::value + is_vector_arg<T16>::value \n        + is_vector_arg<T17>::value + is_vector_arg<T18>::value \n        + is_vector_arg<T19>::value + is_vector_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector_impl\n{\n    typedef aux::vector_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::vector_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector\n    : aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/msvc70/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector1_c<\n              T, T(C0)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector2_c<\n              T, T(C0), T(C1)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector3_c<\n              T, T(C0), T(C1), T(C2)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector4_c<\n              T, T(C0), T(C1), T(C2), T(C3)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector5_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector6_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector7_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector8_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector9_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector10_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector11_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector12_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector13_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector14_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector15_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector16_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector17_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector18_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector19_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector20_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_vector_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct vector_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_c_arg<C1>::value + is_vector_c_arg<C2>::value \n        + is_vector_c_arg<C3>::value + is_vector_c_arg<C4>::value \n        + is_vector_c_arg<C5>::value + is_vector_c_arg<C6>::value \n        + is_vector_c_arg<C7>::value + is_vector_c_arg<C8>::value \n        + is_vector_c_arg<C9>::value + is_vector_c_arg<C10>::value \n        + is_vector_c_arg<C11>::value + is_vector_c_arg<C12>::value \n        + is_vector_c_arg<C13>::value + is_vector_c_arg<C14>::value \n        + is_vector_c_arg<C15>::value + is_vector_c_arg<C16>::value \n        + is_vector_c_arg<C17>::value + is_vector_c_arg<C18>::value \n        + is_vector_c_arg<C19>::value + is_vector_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c_impl\n{\n    typedef aux::vector_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::vector_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c\n    : aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      int N, typename F\n    >\nstruct apply_wrap_impl0;\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          0\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n/// since the defaults are \"lost\", we have to pass *something* even for nullary\n/// metafunction classes\n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          1\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          2\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          3\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          4\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap_impl0<\n          5\n        , F\n       \n        >\n{\n    typedef typename F::template apply<\n         \n        na, na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F\n    >\nstruct apply_wrap0\n    : apply_wrap_impl0<\n          ::boost::mpl::aux::arity< F,0 >::value\n        , F\n       \n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1\n    >\nstruct apply_wrap_impl1;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          1\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          2\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          3\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          4\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap_impl1<\n          5\n        , F\n        , T1\n        >\n{\n    typedef typename F::template apply<\n          T1\n        , na, na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply_wrap1\n    : apply_wrap_impl1<\n          ::boost::mpl::aux::arity< F,1 >::value\n        , F\n        , T1\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          2\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          3\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          4\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap_impl2<\n          5\n        , F\n        , T1, T2\n        >\n{\n    typedef typename F::template apply<\n          T1, T2\n\n        , na, na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply_wrap2\n    : apply_wrap_impl2<\n          ::boost::mpl::aux::arity< F,2 >::value\n        , F\n        , T1, T2\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          3\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          4\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap_impl3<\n          5\n        , F\n        , T1, T2, T3\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3\n\n        , na, na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply_wrap3\n    : apply_wrap_impl3<\n          ::boost::mpl::aux::arity< F,3 >::value\n        , F\n        , T1, T2, T3\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          4\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap_impl4<\n          5\n        , F\n        , T1, T2, T3, T4\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4\n\n        , na\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply_wrap4\n    : apply_wrap_impl4<\n          ::boost::mpl::aux::arity< F,4 >::value\n        , F\n        , T1, T2, T3, T4\n        >::type\n{\n};\n\ntemplate<\n      int N, typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap_impl5<\n          5\n        , F\n        , T1, T2, T3, T4, T5\n        >\n{\n    typedef typename F::template apply<\n          T1, T2, T3, T4, T5\n\n        > type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply_wrap5\n    : apply_wrap_impl5<\n          ::boost::mpl::aux::arity< F,5 >::value\n        , F\n        , T1, T2, T3, T4, T5\n        >::type\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct bind;\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n    : T\n{\n};\n\ntemplate< typename T >\nstruct quote_impl< T,false >\n{\n    typedef T type;\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl<\n              F<U1>\n            , aux::has_type< F<U1> >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl<\n              F< U1,U2 >\n            , aux::has_type< F< U1,U2 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl<\n              F< U1,U2,U3 >\n            , aux::has_type< F< U1,U2,U3 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4 >\n            , aux::has_type< F< U1,U2,U3,U4 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4,U5 >\n            , aux::has_type< F< U1,U2,U3,U4,U5 > >::value\n            >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value,F, Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/mwcw/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct and_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : false_\n    {\n    };\n};\n\ntemplate<> struct and_impl<true>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : and_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,true_ >\n    {\n    };\n};\n\ntemplate<>\nstruct and_impl<true>\n    ::result_< true_,true_,true_,true_ >\n        : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<0>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply0<\n              F\n            > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<1>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply1<\n              F, T1\n            > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<2>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply2<\n              F, T1, T2\n            > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<3>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply3<\n              F, T1, T2, T3\n            > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<4>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply4<\n              F, T1, T2, T3, T4\n            > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\nnamespace aux {\n\ntemplate<>\nstruct apply_chooser<5>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef apply5<\n              F, T1, T2, T3, T4, T5\n            > type;\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_apply_arg\n{\n    static bool const value  = true;\n};\n\ntemplate<>\nstruct is_apply_arg<na>\n{\n    static bool const value  = false;\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    >\nstruct apply_count_args\n{\n    static int const value  = is_apply_arg<T1>::value + is_apply_arg<T2>::value + is_apply_arg<T3>::value + is_apply_arg<T4>::value + is_apply_arg<T5>::value;\n\n};\n\n}\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply\n    : aux::apply_chooser<\n          aux::apply_count_args< T1,T2,T3,T4,T5 >::value\n        >::template result_< F,T1,T2,T3,T4,T5 >::type\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< BOOST_AUX_NTTP_DECL(int, arity_) > struct apply_chooser;\n}\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n    : F::template apply<  >\n{\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n    : F::template apply<T1>\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n    : F::template apply< T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n    : F::template apply< T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n    : F::template apply< T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n    : F::template apply< T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag is_bind_helper(bind< F,T1,T2,T3,T4,T5 >*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<0>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind0<F> type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<1>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind1< F,T1 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<2>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind2< F,T1,T2 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<3>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind3< F,T1,T2,T3 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<4>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind4< F,T1,T2,T3,T4 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<5>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind5< F,T1,T2,T3,T4,T5 > type;\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_bind_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_bind_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    >\nstruct bind_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_bind_arg<T1>::value + is_bind_arg<T2>::value \n        + is_bind_arg<T3>::value + is_bind_arg<T4>::value \n        + is_bind_arg<T5>::value\n        );\n\n};\n\n}\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : aux::bind_chooser<\n          aux::bind_count_args< T1,T2,T3,T4,T5 >::value\n        >::template result_< F,T1,T2,T3,T4,T5 >::type\n{\n};\n\nBOOST_MPL_AUX_ARITY_SPEC(\n      6\n    , bind\n    )\n\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(\n      6\n    , bind\n    )\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct resolve_arg_impl<true>\n{\n    template<\n          typename T, typename U1, typename U2, typename U3\n        , typename U4, typename U5\n        >\n    struct result_\n    {\n        typedef typename apply_wrap5<\n              T\n            , U1, U2, U3, U4, U5\n            >::type type;\n    };\n};\n\ntemplate< typename T > struct is_bind_template;\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,U1,U2,U3,U4,U5 >\n{\n};\n\ntemplate< typename T >\nstruct replace_unnamed_arg_impl\n{\n    template< typename Arg > struct result_\n    {\n        typedef Arg next;\n        typedef T type;\n    };\n};\n\ntemplate<>\nstruct replace_unnamed_arg_impl< arg< -1 > >\n{\n    template< typename Arg > struct result_\n    {\n        typedef typename next<Arg>::type next;\n        typedef Arg type;\n    };\n};\n\ntemplate< typename T, typename Arg >\nstruct replace_unnamed_arg\n    : replace_unnamed_arg_impl<T>::template result_<Arg>\n{\n};\n\ntemplate< int arity_ > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag is_bind_helper(bind< F,T1,T2,T3,T4,T5 >*);\n\ntemplate< int N >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_  = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value  = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n              sizeof(aux::is_bind_helper(static_cast<T*>(0)))\n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F\n    >\naux::yes_tag\nis_bind_helper(bind0<F>*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<0>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind0<F> type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1\n    >\naux::yes_tag\nis_bind_helper(bind1< F,T1 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<1>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind1< F,T1 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\naux::yes_tag\nis_bind_helper(bind2< F,T1,T2 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<2>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind2< F,T1,T2 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\naux::yes_tag\nis_bind_helper(bind3< F,T1,T2,T3 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<3>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind3< F,T1,T2,T3 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\naux::yes_tag\nis_bind_helper(bind4< F,T1,T2,T3,T4 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<4>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind4< F,T1,T2,T3,T4 > type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\naux::yes_tag\nis_bind_helper(bind5< F,T1,T2,T3,T4,T5 >*);\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<5>\n{\n    template<\n          typename F, typename T1, typename T2, typename T3, typename T4\n        , typename T5\n        >\n    struct result_\n    {\n        typedef bind5< F,T1,T2,T3,T4,T5 > type;\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_bind_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_bind_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    >\nstruct bind_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_bind_arg<T1>::value + is_bind_arg<T2>::value \n        + is_bind_arg<T3>::value + is_bind_arg<T4>::value \n        + is_bind_arg<T5>::value\n        );\n\n};\n\n}\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : aux::bind_chooser<\n          aux::bind_count_args< T1,T2,T3,T4,T5 >::value\n        >::template result_< F,T1,T2,T3,T4,T5 >::type\n{\n};\n\nBOOST_MPL_AUX_ARITY_SPEC(\n      6\n    , bind\n    )\n\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(\n      6\n    , bind\n    )\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct bind;\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitand_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitand_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n\n    : if_<\n\n          is_na<N3>\n        , bitand_2< N1,N2 >\n        , bitand_<\n              bitand_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitand_2\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitand_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n\n    : if_<\n\n          is_na<N3>\n        , bitor_2< N1,N2 >\n        , bitor_<\n              bitor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitor_2\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct bitxor_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct bitxor_2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n\n    : if_<\n\n          is_na<N3>\n        , bitxor_2< N1,N2 >\n        , bitxor_<\n              bitxor_2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct bitxor_2\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, bitxor_2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct deque_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct deque_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_deque_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_deque_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct deque_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_deque_arg<T1>::value + is_deque_arg<T2>::value \n        + is_deque_arg<T3>::value + is_deque_arg<T4>::value \n        + is_deque_arg<T5>::value + is_deque_arg<T6>::value \n        + is_deque_arg<T7>::value + is_deque_arg<T8>::value \n        + is_deque_arg<T9>::value + is_deque_arg<T10>::value \n        + is_deque_arg<T11>::value + is_deque_arg<T12>::value \n        + is_deque_arg<T13>::value + is_deque_arg<T14>::value \n        + is_deque_arg<T15>::value + is_deque_arg<T16>::value \n        + is_deque_arg<T17>::value + is_deque_arg<T18>::value \n        + is_deque_arg<T19>::value + is_deque_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque_impl\n{\n    typedef aux::deque_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::deque_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque\n    : aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::deque_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct divides_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct divides2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n\n    : if_<\n\n          is_na<N3>\n        , divides2< N1,N2 >\n        , divides<\n              divides2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct divides2\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, divides2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate< int N >\nstruct fold_chunk;\n\ntemplate<> struct fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< int N >\nstruct fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , fold_null_step< Last,State >\n            , fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_step\n{\n    typedef fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n    : fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct greater_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C1, bool C2 >\nstruct inherit2_impl\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1, T2\n    {\n        typedef Derived type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< false,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T1\n    {\n        typedef T1 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,false >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n        : T2\n    {\n        typedef T2 type_;\n    };\n};\n\ntemplate<>\nstruct inherit2_impl< true,true >\n{\n    template< typename Derived, typename T1, typename T2 > struct result_\n    {\n        typedef T1 type_;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : aux::inherit2_impl<\n          is_empty_base<T1>::value\n        , is_empty_base<T2>::value\n        >::template result_< inherit2< T1,T2 >,T1, T2 >\n{\n    typedef typename inherit2::type_ type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate< int N >\nstruct iter_fold_chunk;\n\ntemplate<> struct iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef state1 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef state2 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef state3 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State state0;\n        typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef state4 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< int N >\nstruct iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef iter_fold_impl<\n              4\n            , First\n            , Last\n            , State\n            , ForwardOp\n            > chunk_;\n\n        typedef iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , typename chunk_::iterator\n            , Last\n            , typename chunk_::state\n            , ForwardOp\n            > res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , iter_fold_null_step< Last,State >\n            , iter_fold_step< First,Last,State,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_step\n{\n    typedef iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        > chunk_;\n\n    typedef typename chunk_::state state;\n    typedef typename chunk_::iterator iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n    : iter_fold_chunk<N>\n        ::template result_< First,Last,State,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct less_equal_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef list0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename list20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_list_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct list_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_arg<T1>::value + is_list_arg<T2>::value \n        + is_list_arg<T3>::value + is_list_arg<T4>::value \n        + is_list_arg<T5>::value + is_list_arg<T6>::value \n        + is_list_arg<T7>::value + is_list_arg<T8>::value \n        + is_list_arg<T9>::value + is_list_arg<T10>::value \n        + is_list_arg<T11>::value + is_list_arg<T12>::value \n        + is_list_arg<T13>::value + is_list_arg<T14>::value \n        + is_list_arg<T15>::value + is_list_arg<T16>::value \n        + is_list_arg<T17>::value + is_list_arg<T18>::value \n        + is_list_arg<T19>::value + is_list_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list_impl\n{\n    typedef aux::list_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::list_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list\n    : aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::list_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct list_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct list_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename list20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_list_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_list_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct list_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_list_c_arg<C1>::value + is_list_c_arg<C2>::value \n        + is_list_c_arg<C3>::value + is_list_c_arg<C4>::value \n        + is_list_c_arg<C5>::value + is_list_c_arg<C6>::value \n        + is_list_c_arg<C7>::value + is_list_c_arg<C8>::value \n        + is_list_c_arg<C9>::value + is_list_c_arg<C10>::value \n        + is_list_c_arg<C11>::value + is_list_c_arg<C12>::value \n        + is_list_c_arg<C13>::value + is_list_c_arg<C14>::value \n        + is_list_c_arg<C15>::value + is_list_c_arg<C16>::value \n        + is_list_c_arg<C17>::value + is_list_c_arg<C18>::value \n        + is_list_c_arg<C19>::value + is_list_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c_impl\n{\n    typedef aux::list_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::list_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c\n    : aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::list_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct map_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef map0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct map_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename map20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_map_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_map_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct map_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_map_arg<T1>::value + is_map_arg<T2>::value \n        + is_map_arg<T3>::value + is_map_arg<T4>::value \n        + is_map_arg<T5>::value + is_map_arg<T6>::value \n        + is_map_arg<T7>::value + is_map_arg<T8>::value \n        + is_map_arg<T9>::value + is_map_arg<T10>::value \n        + is_map_arg<T11>::value + is_map_arg<T12>::value \n        + is_map_arg<T13>::value + is_map_arg<T14>::value \n        + is_map_arg<T15>::value + is_map_arg<T16>::value \n        + is_map_arg<T17>::value + is_map_arg<T18>::value \n        + is_map_arg<T19>::value + is_map_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map_impl\n{\n    typedef aux::map_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::map_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map\n    : aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::map_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct minus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct minus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n\n    : if_<\n\n          is_na<N3>\n        , minus2< N1,N2 >\n        , minus<\n              minus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct minus2\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, minus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct modulus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct not_equal_to_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< bool C_ > struct or_impl\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : true_\n    {\n    };\n};\n\ntemplate<> struct or_impl<false>\n{\n    template<\n          typename T1, typename T2, typename T3, typename T4\n        >\n    struct result_\n        : or_impl<\n              BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n            >::template result_< T2,T3,T4,false_ >\n    {\n    };\n};\n\ntemplate<>\nstruct or_impl<false>\n    ::result_< false_,false_,false_,false_ >\n        : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        >::template result_< T2,T3,T4,T5 >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct plus_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct plus2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n\n    : if_<\n\n          is_na<N3>\n        , plus2< N1,N2 >\n        , plus<\n              plus2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct plus2\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, plus2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\ntemplate< bool > struct quote_impl\n{\n    template< typename T > struct result_\n        : T\n    {\n    };\n};\n\ntemplate<> struct quote_impl<false>\n{\n    template< typename T > struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl< aux::has_type< F<U1> >::value >\n            ::template result_< F<U1> >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2 > >::value >\n            ::template result_< F< U1,U2 > >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3 > >::value >\n            ::template result_< F< U1,U2,U3 > >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3,U4 > >::value >\n            ::template result_< F< U1,U2,U3,U4 > >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl< aux::has_type< F< U1,U2,U3,U4,U5 > >::value >\n            ::template result_< F< U1,U2,U3,U4,U5 > >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate< long N >\nstruct reverse_fold_chunk;\n\ntemplate<> struct reverse_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n        typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n        typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n        typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_fold_null_step< Last,State >\n            , reverse_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_step\n{\n    typedef reverse_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n    : reverse_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk;\n\ntemplate<> struct reverse_iter_fold_chunk<0>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef fwd_state0 bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter0 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        \n\n        typedef fwd_state1 bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        typedef bkwd_state0 state;\n        typedef iter1 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<2>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        \n\n        typedef fwd_state2 bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter2 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<3>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        \n\n        typedef fwd_state3 bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter3 iterator;\n    };\n};\n\ntemplate<> struct reverse_iter_fold_chunk<4>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef fwd_state4 bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef iter4 iterator;\n    };\n};\n\ntemplate< long N >\nstruct reverse_iter_fold_chunk\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n        typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n        typedef typename mpl::next<iter0>::type iter1;\n        typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n        typedef typename mpl::next<iter1>::type iter2;\n        typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n        typedef typename mpl::next<iter2>::type iter3;\n        typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n        typedef typename mpl::next<iter3>::type iter4;\n        \n\n        typedef reverse_iter_fold_impl<\n              ( (N - 4) < 0 ? 0 : N - 4 )\n            , iter4\n            , Last\n            , fwd_state4\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n\n        typedef typename nested_chunk::state bkwd_state4;\n        typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n        typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n        typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n        typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n        \n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step;\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct reverse_iter_fold_null_step\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<>\nstruct reverse_iter_fold_chunk< -1 >\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef typename if_<\n              typename is_same< First,Last >::type\n            , reverse_iter_fold_null_step< Last,State >\n            , reverse_iter_fold_step< First,Last,State,BackwardOp,ForwardOp >\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_step\n{\n    typedef reverse_iter_fold_chunk< -1 >::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n    : reverse_iter_fold_chunk<N>\n        ::template result_< First,Last,State,BackwardOp,ForwardOp >\n{\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef set0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename set20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_set_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct set_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_arg<T1>::value + is_set_arg<T2>::value \n        + is_set_arg<T3>::value + is_set_arg<T4>::value \n        + is_set_arg<T5>::value + is_set_arg<T6>::value \n        + is_set_arg<T7>::value + is_set_arg<T8>::value \n        + is_set_arg<T9>::value + is_set_arg<T10>::value \n        + is_set_arg<T11>::value + is_set_arg<T12>::value \n        + is_set_arg<T13>::value + is_set_arg<T14>::value \n        + is_set_arg<T15>::value + is_set_arg<T16>::value \n        + is_set_arg<T17>::value + is_set_arg<T18>::value \n        + is_set_arg<T19>::value + is_set_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set_impl\n{\n    typedef aux::set_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::set_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set\n    : aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::set_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct set_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set1_c<\n              T, C0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set2_c<\n              T, C0, C1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set3_c<\n              T, C0, C1, C2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set4_c<\n              T, C0, C1, C2, C3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set5_c<\n              T, C0, C1, C2, C3, C4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set6_c<\n              T, C0, C1, C2, C3, C4, C5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set7_c<\n              T, C0, C1, C2, C3, C4, C5, C6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set8_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set9_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set10_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set11_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set12_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set13_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set14_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set15_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set16_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set17_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set18_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set19_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct set_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename set20_c<\n              T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_set_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_set_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct set_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_set_c_arg<C1>::value + is_set_c_arg<C2>::value \n        + is_set_c_arg<C3>::value + is_set_c_arg<C4>::value \n        + is_set_c_arg<C5>::value + is_set_c_arg<C6>::value \n        + is_set_c_arg<C7>::value + is_set_c_arg<C8>::value \n        + is_set_c_arg<C9>::value + is_set_c_arg<C10>::value \n        + is_set_c_arg<C11>::value + is_set_c_arg<C12>::value \n        + is_set_c_arg<C13>::value + is_set_c_arg<C14>::value \n        + is_set_c_arg<C15>::value + is_set_c_arg<C16>::value \n        + is_set_c_arg<C17>::value + is_set_c_arg<C18>::value \n        + is_set_c_arg<C19>::value + is_set_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c_impl\n{\n    typedef aux::set_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::set_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c\n    : aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::set_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_left_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct shift_right_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< na,integral_c_tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate<> struct times_impl< integral_c_tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\n/// forward declaration\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct times2;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n\n    : if_<\n\n          is_na<N3>\n        , times2< N1,N2 >\n        , times<\n              times2< N1,N2 >\n            , N3, N4, N5\n            >\n        >::type\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1\n    , typename N2\n    >\nstruct times2\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, times2, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, size) > struct unpack_args_impl\n{\n    template< typename F, typename Args > struct apply;\n};\n\ntemplate<> struct unpack_args_impl<0>\n{\n    template< typename F, typename Args > struct apply\n        : apply0<\n              F\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<1>\n{\n    template< typename F, typename Args > struct apply\n        : apply1<\n              F\n            , typename at_c< Args,0 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<2>\n{\n    template< typename F, typename Args > struct apply\n        : apply2<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<3>\n{\n    template< typename F, typename Args > struct apply\n        : apply3<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<4>\n{\n    template< typename F, typename Args > struct apply\n        : apply4<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            >\n    {\n    };\n};\n\ntemplate<> struct unpack_args_impl<5>\n{\n    template< typename F, typename Args > struct apply\n        : apply5<\n              F\n            , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n            , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n            , typename at_c< Args,4 >::type\n            >\n    {\n    };\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value >\n            ::template apply< F,Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<0>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef vector0<\n             \n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<1>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector1<\n              T0\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<2>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector2<\n              T0, T1\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<3>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector3<\n              T0, T1, T2\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<4>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector4<\n              T0, T1, T2, T3\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<5>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector5<\n              T0, T1, T2, T3, T4\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<6>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector6<\n              T0, T1, T2, T3, T4, T5\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<7>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector7<\n              T0, T1, T2, T3, T4, T5, T6\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<8>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector8<\n              T0, T1, T2, T3, T4, T5, T6, T7\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<9>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector9<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<10>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector10<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<11>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector11<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<12>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector12<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<13>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector13<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<14>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector14<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<15>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector15<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<16>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector16<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<17>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector17<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<18>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector18<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<19>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector19<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_chooser<20>\n{\n    template<\n          typename T0, typename T1, typename T2, typename T3, typename T4\n        , typename T5, typename T6, typename T7, typename T8, typename T9\n        , typename T10, typename T11, typename T12, typename T13, typename T14\n        , typename T15, typename T16, typename T17, typename T18, typename T19\n        >\n    struct result_\n    {\n        typedef typename vector20<\n              T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< typename T >\nstruct is_vector_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_arg<na>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename T6, typename T7, typename T8, typename T9, typename T10\n    , typename T11, typename T12, typename T13, typename T14, typename T15\n    , typename T16, typename T17, typename T18, typename T19, typename T20\n    >\nstruct vector_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_arg<T1>::value + is_vector_arg<T2>::value \n        + is_vector_arg<T3>::value + is_vector_arg<T4>::value \n        + is_vector_arg<T5>::value + is_vector_arg<T6>::value \n        + is_vector_arg<T7>::value + is_vector_arg<T8>::value \n        + is_vector_arg<T9>::value + is_vector_arg<T10>::value \n        + is_vector_arg<T11>::value + is_vector_arg<T12>::value \n        + is_vector_arg<T13>::value + is_vector_arg<T14>::value \n        + is_vector_arg<T15>::value + is_vector_arg<T16>::value \n        + is_vector_arg<T17>::value + is_vector_arg<T18>::value \n        + is_vector_arg<T19>::value + is_vector_arg<T20>::value\n        );\n\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector_impl\n{\n    typedef aux::vector_count_args<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        > arg_num_;\n\n    typedef typename aux::vector_chooser< arg_num_::value >\n        ::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector\n    : aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type\n{\n    typedef typename aux::vector_impl<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ctps/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< int N >\nstruct vector_c_chooser;\n\n}\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<0>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector0_c<\n              T\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<1>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector1_c<\n              T, T(C0)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<2>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector2_c<\n              T, T(C0), T(C1)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<3>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector3_c<\n              T, T(C0), T(C1), T(C2)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<4>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector4_c<\n              T, T(C0), T(C1), T(C2), T(C3)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<5>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector5_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<6>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector6_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<7>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector7_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<8>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector8_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<9>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector9_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<10>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector10_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<11>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector11_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<12>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector12_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<13>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector13_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<14>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector14_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<15>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector15_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<16>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector16_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<17>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector17_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<18>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector18_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<19>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector19_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate<>\nstruct vector_c_chooser<20>\n{\n    template<\n          typename T, long C0, long C1, long C2, long C3, long C4, long C5\n        , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n        , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n        >\n    struct result_\n    {\n        typedef typename vector20_c<\n              T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19)\n            >::type type;\n\n    };\n};\n\n} // namespace aux\n\nnamespace aux {\n\ntemplate< long C >\nstruct is_vector_c_arg\n{\n    BOOST_STATIC_CONSTANT(bool, value  = true);\n};\n\ntemplate<>\nstruct is_vector_c_arg<LONG_MAX>\n{\n    BOOST_STATIC_CONSTANT(bool, value  = false);\n};\n\ntemplate<\n      long C1, long C2, long C3, long C4, long C5, long C6, long C7, long C8\n    , long C9, long C10, long C11, long C12, long C13, long C14, long C15\n    , long C16, long C17, long C18, long C19, long C20\n    >\nstruct vector_c_count_args\n{\n    BOOST_STATIC_CONSTANT(int, value =\n          is_vector_c_arg<C1>::value + is_vector_c_arg<C2>::value \n        + is_vector_c_arg<C3>::value + is_vector_c_arg<C4>::value \n        + is_vector_c_arg<C5>::value + is_vector_c_arg<C6>::value \n        + is_vector_c_arg<C7>::value + is_vector_c_arg<C8>::value \n        + is_vector_c_arg<C9>::value + is_vector_c_arg<C10>::value \n        + is_vector_c_arg<C11>::value + is_vector_c_arg<C12>::value \n        + is_vector_c_arg<C13>::value + is_vector_c_arg<C14>::value \n        + is_vector_c_arg<C15>::value + is_vector_c_arg<C16>::value \n        + is_vector_c_arg<C17>::value + is_vector_c_arg<C18>::value \n        + is_vector_c_arg<C19>::value + is_vector_c_arg<C20>::value\n        );\n\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c_impl\n{\n    typedef aux::vector_c_count_args<\n          C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        > arg_num_;\n\n    typedef typename aux::vector_c_chooser< arg_num_::value >\n        ::template result_< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c\n    : aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type\n{\n    typedef typename aux::vector_c_impl<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19\n        >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , and_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          1\n        , apply0\n        , (F )\n        )\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          2\n        , apply1\n        , (F, T1)\n        )\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , apply2\n        , (F, T1, T2)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , apply3\n        , (F, T1, T2, T3)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , apply4\n        , (F, T1, T2, T3, T4)\n        )\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , apply5\n        , (F, T1, T2, T3, T4, T5)\n        )\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n    : F::template apply<  >\n{\n};\n\ntemplate< typename F >\nstruct apply_wrap0< F,true_ >\n    : F::apply\n{\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n    : F::template apply<T1>\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n    : F::template apply< T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n    : F::template apply< T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n    : F::template apply< T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n    : F::template apply< T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct bind;\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitand_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 & n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitand_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 | n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct bitxor_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 ^ n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::bitxor_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , divides\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct divides_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 / n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::divides_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value ==\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, inherit2, (T1, T2))\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          3\n        , inherit3\n        , ( T1, T2, T3)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          4\n        , inherit4\n        , ( T1, T2, T3, T4)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , inherit5\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3, lambda, (T, Tag, Protect))\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N2)::value >\n             BOOST_MPL_AUX_VALUE_WKND(N1)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, less_equal, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , minus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct minus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 - n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::minus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, modulus, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct modulus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 % n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::modulus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n    {\n        BOOST_STATIC_CONSTANT(bool, value =\n             ( BOOST_MPL_AUX_VALUE_WKND(N1)::value !=\n             BOOST_MPL_AUX_VALUE_WKND(N2)::value )\n            );\n        typedef bool_<value> type;\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , or_\n        , ( T1, T2, T3, T4, T5)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , plus\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct plus_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 + n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::plus_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_left, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_left_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n << s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_left_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2, shift_right, (N1, N2))\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, typename Shift, T n, Shift s >\nstruct shift_right_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n >> s));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n        : aux::shift_right_wknd<\n              typename N::value_type\n            , typename S::value_type\n            , N::value\n            , S::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_< -1 >\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          5\n        , times\n        , ( N1, N2, N3, N4, N5 )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate< typename T, T n1, T n2 >\nstruct times_wknd\n{\n    BOOST_STATIC_CONSTANT(T, value  = (n1 * n2));\n    typedef integral_c< T,value > type;\n};\n\n}\n\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n        : aux::times_wknd<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , N1::value\n            , N2::value\n            >::type\n\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value,F, Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/no_ttp/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/advance_backward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_backward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_backward;\ntemplate<>\nstruct advance_backward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_backward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename prior<iter0>::type iter1;\n        typedef typename prior<iter1>::type iter2;\n        typedef typename prior<iter2>::type iter3;\n        typedef typename prior<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_backward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_backward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_backward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/advance_forward.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/advance_forward.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< long N > struct advance_forward;\ntemplate<>\nstruct advance_forward<0>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef iter0 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<1>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef iter1 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<2>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef iter2 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<3>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef iter3 type;\n    };\n};\n\ntemplate<>\nstruct advance_forward<4>\n{\n    template< typename Iterator > struct apply\n    {\n        typedef Iterator iter0;\n        typedef typename next<iter0>::type iter1;\n        typedef typename next<iter1>::type iter2;\n        typedef typename next<iter2>::type iter3;\n        typedef typename next<iter3>::type iter4;\n        typedef iter4 type;\n    };\n};\n\ntemplate< long N >\nstruct advance_forward\n{\n    template< typename Iterator > struct apply\n    {\n        typedef typename apply_wrap1<\n              advance_forward<4>\n            , Iterator\n            >::type chunk_result_;\n\n        typedef typename apply_wrap1<\n              advance_forward<(\n                (N - 4) < 0\n                    ? 0\n                    : N - 4\n                    )>\n            , chunk_result_\n            >::type type;\n    };\n};\n\n}}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/and.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/and.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl\n    : false_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct and_impl< true,T1,T2,T3,T4 >\n    : and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , true_\n        >\n{\n};\n\ntemplate<>\nstruct and_impl<\n          true\n        , true_, true_, true_, true_\n        >\n    : true_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = true_, typename T4 = true_, typename T5 = true_\n    >\nstruct and_\n\n    : aux::and_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , and_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/apply.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n    >\nstruct apply0\n\n    : apply_wrap0<\n          typename lambda<F>::type\n       \n        >\n{\n};\n\ntemplate<\n      typename F\n    >\nstruct apply< F,na,na,na,na,na >\n    : apply0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1\n\n    : apply_wrap1<\n          typename lambda<F>::type\n        , T1\n        >\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply< F,T1,na,na,na,na >\n    : apply1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2\n\n    : apply_wrap2<\n          typename lambda<F>::type\n        , T1, T2\n        >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply< F,T1,T2,na,na,na >\n    : apply2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3\n\n    : apply_wrap3<\n          typename lambda<F>::type\n        , T1, T2, T3\n        >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply< F,T1,T2,T3,na,na >\n    : apply3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4\n\n    : apply_wrap4<\n          typename lambda<F>::type\n        , T1, T2, T3, T4\n        >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply< F,T1,T2,T3,T4,na >\n    : apply4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5\n\n    : apply_wrap5<\n          typename lambda<F>::type\n        , T1, T2, T3, T4, T5\n        >\n{\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply\n    : apply5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/apply_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct apply;\n\ntemplate<\n      typename F\n    >\nstruct apply0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct apply1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct apply2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct apply3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct apply4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct apply5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/apply_wrap.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/apply_wrap.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F\n\n    , typename has_apply_ = typename aux::has_apply<F>::type\n\n    >\nstruct apply_wrap0\n\n    : F::template apply<  >\n{\n};\n\ntemplate< typename F >\nstruct apply_wrap0< F,true_ >\n    : F::apply\n{\n};\n\ntemplate<\n      typename F, typename T1\n\n    >\nstruct apply_wrap1\n\n    : F::template apply<T1>\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n\n    >\nstruct apply_wrap2\n\n    : F::template apply< T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n\n    >\nstruct apply_wrap3\n\n    : F::template apply< T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n\n    >\nstruct apply_wrap4\n\n    : F::template apply< T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n\n    >\nstruct apply_wrap5\n\n    : F::template apply< T1,T2,T3,T4,T5 >\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/arg.hpp",
    "content": "\n// Copyright Peter Dimov 2001-2002\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/arg.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntemplate<> struct arg< -1 >\n{\n    BOOST_STATIC_CONSTANT(int, value  = -1);\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<1>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 1);\n    typedef arg<2> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U1 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<2>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 2);\n    typedef arg<3> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U2 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<3>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 3);\n    typedef arg<4> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U3 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<4>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 4);\n    typedef arg<5> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U4 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\ntemplate<> struct arg<5>\n{\n    BOOST_STATIC_CONSTANT(int, value  = 5);\n    typedef arg<6> next;\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, tag)\n    BOOST_MPL_AUX_ARG_TYPEDEF(na, type)\n\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n        typedef U5 type;\n        BOOST_MPL_AUX_ASSERT_NOT_NA(type);\n    };\n};\n\nBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/basic_bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/basic_bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef typename aux::resolve_bind_arg< F,U1,U2,U3,U4,U5 >::type f_;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef aux::resolve_bind_arg< T4,U1,U2,U3,U4,U5 > t4;\n        typedef aux::resolve_bind_arg< T5,U1,U2,U3,U4,U5 > t5;\n\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::resolve_bind_arg< T1,U1,U2,U3,U4,U5 > t1;\n        typedef aux::resolve_bind_arg< T2,U1,U2,U3,U4,U5 > t2;\n        typedef aux::resolve_bind_arg< T3,U1,U2,U3,U4,U5 > t3;\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/bind.hpp",
    "content": "\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      typename T, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg< -1 >, Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\ntemplate<\n      int N, typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg< arg<N>, U1, U2, U3, U4, U5 >\n{\n    typedef typename apply_wrap5<mpl::arg<N>, U1, U2, U3, U4, U5>::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg< bind< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5 >\n{\n    typedef bind< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      typename F\n    >\nstruct bind0\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n     public:\n        typedef typename apply_wrap0<\n              f_\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind0<F>, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind0<F> f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(1, bind0)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(1, bind0)\n\ntemplate<\n      typename F\n    >\nstruct bind< F,na,na,na,na,na >\n    : bind0<F>\n{\n};\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n     public:\n        typedef typename apply_wrap1<\n              f_\n            , typename t1::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename U1, typename U2, typename U3\n    , typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind1< F,T1 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind1< F,T1 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(2, bind1)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(2, bind1)\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind< F,T1,na,na,na,na >\n    : bind1< F,T1 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n     public:\n        typedef typename apply_wrap2<\n              f_\n            , typename t1::type, typename t2::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename U1, typename U2\n    , typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind2< F,T1,T2 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind2< F,T1,T2 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(3, bind2)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(3, bind2)\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind< F,T1,T2,na,na,na >\n    : bind2< F,T1,T2 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n     public:\n        typedef typename apply_wrap3<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename U1\n    , typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind3< F,T1,T2,T3 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind3< F,T1,T2,T3 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(4, bind3)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(4, bind3)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind< F,T1,T2,T3,na,na >\n    : bind3< F,T1,T2,T3 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n     public:\n        typedef typename apply_wrap4<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename U1, typename U2, typename U3, typename U4, typename U5\n    >\nstruct resolve_bind_arg<\n      bind4< F,T1,T2,T3,T4 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind4< F,T1,T2,T3,T4 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(5, bind4)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(5, bind4)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind< F,T1,T2,T3,T4,na >\n    : bind4< F,T1,T2,T3,T4 >\n{\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef aux::replace_unnamed_arg< F, mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg< a0,U1,U2,U3,U4,U5 >::type f_;\n        ///\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef aux::replace_unnamed_arg< T4,n4 > r4;\n        typedef typename r4::type a4;\n        typedef typename r4::next n5;\n        typedef aux::resolve_bind_arg< a4,U1,U2,U3,U4,U5 > t4;\n        ///\n        typedef aux::replace_unnamed_arg< T5,n5 > r5;\n        typedef typename r5::type a5;\n        typedef typename r5::next n6;\n        typedef aux::resolve_bind_arg< a5,U1,U2,U3,U4,U5 > t5;\n        ///\n     public:\n        typedef typename apply_wrap5<\n              f_\n            , typename t1::type, typename t2::type, typename t3::type\n            , typename t4::type, typename t5::type\n            >::type type;\n\n    };\n};\n\nnamespace aux {\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename U1, typename U2, typename U3, typename U4\n    , typename U5\n    >\nstruct resolve_bind_arg<\n      bind5< F,T1,T2,T3,T4,T5 >, U1, U2, U3, U4, U5\n    >\n{\n    typedef bind5< F,T1,T2,T3,T4,T5 > f_;\n    typedef typename apply_wrap5< f_,U1,U2,U3,U4,U5 >::type type;\n};\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(6, bind5)\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(6, bind5)\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind\n    : bind5< F,T1,T2,T3,T4,T5 >\n{\n};\n\n/// if_/eval_if specializations\ntemplate< template< typename T1, typename T2, typename T3 > class F, typename Tag >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct if_;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< if_,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename if_<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\ntemplate<\n      template< typename T1, typename T2, typename T3 > class F, typename Tag\n    >\nstruct quote3;\n\ntemplate< typename T1, typename T2, typename T3 > struct eval_if;\n\ntemplate<\n      typename Tag, typename T1, typename T2, typename T3\n    >\nstruct bind3<\n      quote3< eval_if,Tag >\n    , T1, T2, T3\n    >\n{\n    template<\n          typename U1 = na, typename U2 = na, typename U3 = na\n        , typename U4 = na, typename U5 = na\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n        typedef aux::replace_unnamed_arg< T1,n1 > r1;\n        typedef typename r1::type a1;\n        typedef typename r1::next n2;\n        typedef aux::resolve_bind_arg< a1,U1,U2,U3,U4,U5 > t1;\n        ///\n        typedef aux::replace_unnamed_arg< T2,n2 > r2;\n        typedef typename r2::type a2;\n        typedef typename r2::next n3;\n        typedef aux::resolve_bind_arg< a2,U1,U2,U3,U4,U5 > t2;\n        ///\n        typedef aux::replace_unnamed_arg< T3,n3 > r3;\n        typedef typename r3::type a3;\n        typedef typename r3::next n4;\n        typedef aux::resolve_bind_arg< a3,U1,U2,U3,U4,U5 > t3;\n        ///\n        typedef typename eval_if<\n              typename t1::type\n            , t2, t3\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/bind_fwd.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bind_fwd.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename F, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na\n    >\nstruct bind;\n\ntemplate<\n      typename F\n    >\nstruct bind0;\n\ntemplate<\n      typename F, typename T1\n    >\nstruct bind1;\n\ntemplate<\n      typename F, typename T1, typename T2\n    >\nstruct bind2;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    >\nstruct bind3;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    >\nstruct bind4;\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct bind5;\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/bitand.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitand.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitand_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitand_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitand_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitand_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitand_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitand_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitand_\n    : bitand_< bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitand_< N1,N2,N3,N4,na >\n\n    : bitand_< bitand_< bitand_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitand_< N1,N2,N3,na,na >\n\n    : bitand_< bitand_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitand_< N1,N2,na,na,na >\n    : bitand_impl<\n          typename bitand_tag<N1>::type\n        , typename bitand_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitand_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitand_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitand_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  & BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/bitor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitor_\n    : bitor_< bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitor_< N1,N2,N3,N4,na >\n\n    : bitor_< bitor_< bitor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitor_< N1,N2,N3,na,na >\n\n    : bitor_< bitor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitor_< N1,N2,na,na,na >\n    : bitor_impl<\n          typename bitor_tag<N1>::type\n        , typename bitor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  | BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/bitxor.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/bitxor.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct bitxor_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< bitxor_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< bitxor_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct bitxor_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct bitxor_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct bitxor_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct bitxor_\n    : bitxor_< bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct bitxor_< N1,N2,N3,N4,na >\n\n    : bitxor_< bitxor_< bitxor_< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct bitxor_< N1,N2,N3,na,na >\n\n    : bitxor_< bitxor_< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct bitxor_< N1,N2,na,na,na >\n    : bitxor_impl<\n          typename bitxor_tag<N1>::type\n        , typename bitxor_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , bitxor_\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, bitxor_)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct bitxor_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  ^ BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/deque.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/deque.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct deque;\n\ntemplate<\n     \n    >\nstruct deque<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct deque<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct deque<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct deque<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct deque<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct deque<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct deque\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/divides.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/divides.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct divides_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< divides_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< divides_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct divides_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct divides_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct divides_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct divides\n    : divides< divides< divides< divides< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct divides< N1,N2,N3,N4,na >\n\n    : divides< divides< divides< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct divides< N1,N2,N3,na,na >\n\n    : divides< divides< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct divides< N1,N2,na,na,na >\n    : divides_impl<\n          typename divides_tag<N1>::type\n        , typename divides_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , divides\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, divides)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct divides_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  / BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct equal_to\n\n    : equal_to_impl<\n          typename equal_to_tag<N1>::type\n        , typename equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value  == BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp, state0, typename deref<iter0>::type >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, state1, typename deref<iter1>::type >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, state2, typename deref<iter2>::type >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, state3, typename deref<iter3>::type >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl\n{\n    typedef fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,First,Last,State,ForwardOp >\n    : fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/full_lambda.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/full_lambda.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n   \n    >\nstruct lambda\n{\n    typedef false_ is_le;\n    typedef T result_;\n    typedef T type;\n};\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\ntemplate< int N, typename Tag >\nstruct lambda< arg<N>, Tag >\n{\n    typedef true_ is_le;\n    typedef mpl::arg<N> result_; // qualified for the sake of MIPSpro 7.41\n    typedef mpl::protect<result_> type;\n};\n\ntemplate<\n      typename F\n    , typename Tag\n    >\nstruct lambda<\n          bind0<F>\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind0<\n          F\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1\n{\n    typedef F<\n          typename L1::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1 > class F\n    , typename L1\n    >\nstruct le_result1< true_,Tag,F,L1 >\n{\n    typedef bind1<\n          quote1< F,Tag >\n        , typename L1::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1 > class F\n    , typename T1\n    , typename Tag\n    >\nstruct lambda<\n          F<T1>\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef typename l1::is_le is_le1;\n    typedef typename aux::lambda_or<\n          is_le1::value\n        >::type is_le;\n\n    typedef aux::le_result1<\n          is_le, Tag, F, l1\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1\n    , typename Tag\n    >\nstruct lambda<\n          bind1< F,T1 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind1<\n          F\n        , T1\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2\n{\n    typedef F<\n          typename L1::type, typename L2::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2 > class F\n    , typename L1, typename L2\n    >\nstruct le_result2< true_,Tag,F,L1,L2 >\n{\n    typedef bind2<\n          quote2< F,Tag >\n        , typename L1::result_, typename L2::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value\n        >::type is_le;\n\n    typedef aux::le_result2<\n          is_le, Tag, F, l1, l2\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2\n    , typename Tag\n    >\nstruct lambda<\n          bind2< F,T1,T2 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind2<\n          F\n        , T1, T2\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3 > class F\n    , typename L1, typename L2, typename L3\n    >\nstruct le_result3< true_,Tag,F,L1,L2,L3 >\n{\n    typedef bind3<\n          quote3< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value\n        >::type is_le;\n\n    typedef aux::le_result3<\n          is_le, Tag, F, l1, l2, l3\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3\n    , typename Tag\n    >\nstruct lambda<\n          bind3< F,T1,T2,T3 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind3<\n          F\n        , T1, T2, T3\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename L1, typename L2, typename L3, typename L4\n    >\nstruct le_result4< true_,Tag,F,L1,L2,L3,L4 >\n{\n    typedef bind4<\n          quote4< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        >::type is_le;\n\n    typedef aux::le_result4<\n          is_le, Tag, F, l1, l2, l3, l4\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename Tag\n    >\nstruct lambda<\n          bind4< F,T1,T2,T3,T4 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind4<\n          F\n        , T1, T2, T3, T4\n        > result_;\n\n    typedef result_ type;\n};\n\nnamespace aux {\n\ntemplate<\n      typename IsLE, typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5\n{\n    typedef F<\n          typename L1::type, typename L2::type, typename L3::type\n        , typename L4::type, typename L5::type\n        > result_;\n\n    typedef result_ type;\n};\n\ntemplate<\n      typename Tag\n    , template< typename P1, typename P2, typename P3, typename P4, typename P5 > class F\n    , typename L1, typename L2, typename L3, typename L4, typename L5\n    >\nstruct le_result5< true_,Tag,F,L1,L2,L3,L4,L5 >\n{\n    typedef bind5<\n          quote5< F,Tag >\n        , typename L1::result_, typename L2::result_, typename L3::result_\n        , typename L4::result_, typename L5::result_\n        > result_;\n\n    typedef mpl::protect<result_> type;\n};\n\n} // namespace aux\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename T1, typename T2, typename T3, typename T4, typename T5\n    , typename Tag\n    >\nstruct lambda<\n          F< T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef lambda< T1,Tag > l1;\n    typedef lambda< T2,Tag > l2;\n    typedef lambda< T3,Tag > l3;\n    typedef lambda< T4,Tag > l4;\n    typedef lambda< T5,Tag > l5;\n    \n    typedef typename l1::is_le is_le1;\n    typedef typename l2::is_le is_le2;\n    typedef typename l3::is_le is_le3;\n    typedef typename l4::is_le is_le4;\n    typedef typename l5::is_le is_le5;\n    \n\n    typedef typename aux::lambda_or<\n          is_le1::value, is_le2::value, is_le3::value, is_le4::value\n        , is_le5::value\n        >::type is_le;\n\n    typedef aux::le_result5<\n          is_le, Tag, F, l1, l2, l3, l4, l5\n        > le_result_;\n\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind5< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind5<\n          F\n        , T1, T2, T3, T4, T5\n        > result_;\n\n    typedef result_ type;\n};\n\n/// special case for 'protect'\ntemplate< typename T, typename Tag >\nstruct lambda< mpl::protect<T>, Tag >\n{\n    typedef false_ is_le;\n    typedef mpl::protect<T> result_;\n    typedef result_ type;\n};\n\n/// specializations for the main 'bind' form\n\ntemplate<\n      typename F, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    , typename Tag\n    >\nstruct lambda<\n          bind< F,T1,T2,T3,T4,T5 >\n        , Tag\n       \n        >\n{\n    typedef false_ is_le;\n    typedef bind< F,T1,T2,T3,T4,T5 > result_;\n    typedef result_ type;\n};\n\n/// workaround for MWCW 8.3+/EDG < 303, leads to ambiguity on Digital Mars\n\ntemplate<\n      typename F, typename Tag1, typename Tag2\n    >\nstruct lambda<\n          lambda< F,Tag1 >\n        , Tag2\n        >\n{\n    typedef lambda< F,Tag2 > l1;\n    typedef lambda< Tag1,Tag2 > l2;\n    typedef typename l1::is_le is_le;\n    typedef aux::le_result2<is_le, Tag2, mpl::lambda, l1, l2> le_result_;\n    typedef typename le_result_::result_ result_;\n    typedef typename le_result_::type type;\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, lambda)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/greater.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater\n\n    : greater_impl<\n          typename greater_tag<N1>::type\n        , typename greater_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value > BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/greater_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/greater_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct greater_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct greater_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct greater_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct greater_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct greater_equal\n\n    : greater_equal_impl<\n          typename greater_equal_tag<N1>::type\n        , typename greater_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct greater_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value >= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/inherit.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/inherit.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct inherit2\n    : T1, T2\n{\n    typedef inherit2 type;\n};\n\ntemplate< typename T1 >\nstruct inherit2< T1,empty_base >\n{\n    typedef T1 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (T1, empty_base))\n};\n\ntemplate< typename T2 >\nstruct inherit2< empty_base,T2 >\n{\n    typedef T2 type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, T2))\n};\n\ntemplate<>\nstruct inherit2< empty_base,empty_base >\n{\n    typedef empty_base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(2, inherit2, (empty_base, empty_base))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, inherit2)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na\n    >\nstruct inherit3\n    : inherit2<\n          typename inherit2<\n              T1, T2\n            >::type\n        , T3\n        >\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, inherit3)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    >\nstruct inherit4\n    : inherit2<\n          typename inherit3<\n              T1, T2, T3\n            >::type\n        , T4\n        >\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC(4, inherit4)\n\ntemplate<\n      typename T1 = na, typename T2 = na, typename T3 = na, typename T4 = na\n    , typename T5 = na\n    >\nstruct inherit5\n    : inherit2<\n          typename inherit4<\n              T1, T2, T3, T4\n            >::type\n        , T5\n        >\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC(5, inherit5)\n\n/// primary template\n\ntemplate<\n      typename T1 = empty_base, typename T2 = empty_base\n    , typename T3 = empty_base, typename T4 = empty_base\n    , typename T5 = empty_base\n    >\nstruct inherit\n    : inherit5< T1,T2,T3,T4,T5 >\n{\n};\n\ntemplate<>\nstruct inherit< na,na,na,na,na >\n{\n    template<\n\n          typename T1 = empty_base, typename T2 = empty_base\n        , typename T3 = empty_base, typename T4 = empty_base\n        , typename T5 = empty_base\n\n        >\n    struct apply\n        : inherit< T1,T2,T3,T4,T5 >\n    {\n    };\n};\n\nBOOST_MPL_AUX_NA_SPEC_LAMBDA(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_ARITY(5, inherit)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(5, 5, inherit)\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/iter_fold_if_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_if_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename Iterator, typename State >\nstruct iter_fold_if_null_step\n{\n    typedef State state;\n    typedef Iterator iterator;\n};\n\ntemplate< bool >\nstruct iter_fold_if_step_impl\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef typename apply2< StateOp,State,Iterator >::type state;\n        typedef typename IteratorOp::type iterator;\n    };\n};\n\ntemplate<>\nstruct iter_fold_if_step_impl<false>\n{\n    template<\n          typename Iterator\n        , typename State\n        , typename StateOp\n        , typename IteratorOp\n        >\n    struct result_\n    {\n        typedef State state;\n        typedef Iterator iterator;\n    };\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_forward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,ForwardOp, mpl::next<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename BackwardOp\n    , typename Predicate\n    >\nstruct iter_fold_if_backward_step\n{\n    typedef typename apply2< Predicate,State,Iterator >::type not_last;\n    typedef typename iter_fold_if_step_impl<\n          BOOST_MPL_AUX_MSVC_VALUE_WKND(not_last)::value\n        >::template result_< Iterator,State,BackwardOp, identity<Iterator> > impl_;\n\n    typedef typename impl_::state state;\n    typedef typename impl_::iterator iterator;\n};\n\ntemplate<\n      typename Iterator\n    , typename State\n    , typename ForwardOp\n    , typename ForwardPredicate\n    , typename BackwardOp\n    , typename BackwardPredicate\n    >\nstruct iter_fold_if_impl\n{\n private:\n    typedef iter_fold_if_null_step< Iterator,State > forward_step0;\n    typedef iter_fold_if_forward_step< typename forward_step0::iterator, typename forward_step0::state, ForwardOp, ForwardPredicate > forward_step1;\n    typedef iter_fold_if_forward_step< typename forward_step1::iterator, typename forward_step1::state, ForwardOp, ForwardPredicate > forward_step2;\n    typedef iter_fold_if_forward_step< typename forward_step2::iterator, typename forward_step2::state, ForwardOp, ForwardPredicate > forward_step3;\n    typedef iter_fold_if_forward_step< typename forward_step3::iterator, typename forward_step3::state, ForwardOp, ForwardPredicate > forward_step4;\n    \n\n    typedef typename if_<\n          typename forward_step4::not_last\n        , iter_fold_if_impl<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            , ForwardOp\n            , ForwardPredicate\n            , BackwardOp\n            , BackwardPredicate\n            >\n        , iter_fold_if_null_step<\n              typename forward_step4::iterator\n            , typename forward_step4::state\n            >\n        >::type backward_step4;\n\n    typedef iter_fold_if_backward_step< typename forward_step3::iterator, typename backward_step4::state, BackwardOp, BackwardPredicate > backward_step3;\n    typedef iter_fold_if_backward_step< typename forward_step2::iterator, typename backward_step3::state, BackwardOp, BackwardPredicate > backward_step2;\n    typedef iter_fold_if_backward_step< typename forward_step1::iterator, typename backward_step2::state, BackwardOp, BackwardPredicate > backward_step1;\n    typedef iter_fold_if_backward_step< typename forward_step0::iterator, typename backward_step1::state, BackwardOp, BackwardPredicate > backward_step0;\n    \n\n public:\n    typedef typename backward_step0::state state;\n    typedef typename backward_step4::iterator iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 0,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 1,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef state1 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 2,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef state2 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 3,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef state3 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< 4,First,Last,State,ForwardOp >\n{\n    typedef First iter0;\n    typedef State state0;\n    typedef typename apply2< ForwardOp,state0,iter0 >::type state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,state1,iter1 >::type state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,state2,iter2 >::type state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,state3,iter3 >::type state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef state4 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      int N\n    , typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl\n{\n    typedef iter_fold_impl<\n          4\n        , First\n        , Last\n        , State\n        , ForwardOp\n        > chunk_;\n\n    typedef iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , typename chunk_::iterator\n        , Last\n        , typename chunk_::state\n        , ForwardOp\n        > res_;\n\n    typedef typename res_::state state;\n    typedef typename res_::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,First,Last,State,ForwardOp >\n    : iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , ForwardOp\n        >\n{\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename ForwardOp\n    >\nstruct iter_fold_impl< -1,Last,Last,State,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/lambda_no_ctps.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/lambda_no_ctps.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate<\n      bool C1 = false, bool C2 = false, bool C3 = false, bool C4 = false\n    , bool C5 = false\n    >\nstruct lambda_or\n    : true_\n{\n};\n\ntemplate<>\nstruct lambda_or< false,false,false,false,false >\n    : false_\n{\n};\n\ntemplate< typename Arity > struct lambda_impl\n{\n    template< typename T, typename Tag, typename Protect > struct result_\n    {\n        typedef T type;\n        typedef is_placeholder<T> is_le;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<1> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef typename l1::is_le is_le1;\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value\n            > is_le;\n\n        typedef bind1<\n              typename F::rebind\n            , typename l1::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<2> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value\n            > is_le;\n\n        typedef bind2<\n              typename F::rebind\n            , typename l1::type, typename l2::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<3> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value\n            > is_le;\n\n        typedef bind3<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<4> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value\n            > is_le;\n\n        typedef bind4<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\ntemplate<> struct lambda_impl< int_<5> >\n{\n    template< typename F, typename Tag, typename Protect > struct result_\n    {\n        typedef lambda< typename F::arg1, Tag, false_ > l1;\n        typedef lambda< typename F::arg2, Tag, false_ > l2;\n        typedef lambda< typename F::arg3, Tag, false_ > l3;\n        typedef lambda< typename F::arg4, Tag, false_ > l4;\n        typedef lambda< typename F::arg5, Tag, false_ > l5;\n        \n        typedef typename l1::is_le is_le1;\n        typedef typename l2::is_le is_le2;\n        typedef typename l3::is_le is_le3;\n        typedef typename l4::is_le is_le4;\n        typedef typename l5::is_le is_le5;\n        \n\n        typedef aux::lambda_or<\n              BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le1)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le2)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le3)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le4)::value, BOOST_MPL_AUX_MSVC_VALUE_WKND(is_le5)::value\n            > is_le;\n\n        typedef bind5<\n              typename F::rebind\n            , typename l1::type, typename l2::type, typename l3::type\n            , typename l4::type, typename l5::type\n            > bind_;\n\n        typedef typename if_<\n              is_le\n            , if_< Protect, mpl::protect<bind_>, bind_ >\n            , identity<F>\n            >::type type_;\n\n        typedef typename type_::type type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename T\n    , typename Tag\n    , typename Protect\n    >\nstruct lambda\n{\n    /// Metafunction forwarding confuses MSVC 6.x\n    typedef typename aux::template_arity<T>::type arity_;\n    typedef typename aux::lambda_impl<arity_>\n        ::template result_< T,Tag,Protect > l_;\n\n    typedef typename l_::type type;\n    typedef typename l_::is_le is_le;\n};\n\nBOOST_MPL_AUX_NA_SPEC2(1, 3, lambda)\n\ntemplate<\n      typename T\n    >\nstruct is_lambda_expression\n    : lambda<T>::is_le\n{\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/less.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less\n\n    : less_impl<\n          typename less_tag<N1>::type\n        , typename less_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N2)::value > BOOST_MPL_AUX_VALUE_WKND(N1)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/less_equal.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/less_equal.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct less_equal_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< less_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< less_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct less_equal_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct less_equal_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct less_equal_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct less_equal\n\n    : less_equal_impl<\n          typename less_equal_tag<N1>::type\n        , typename less_equal_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, less_equal)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct less_equal_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value <= BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/list.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct list;\n\ntemplate<\n     \n    >\nstruct list<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list0<  >\n{\n    typedef list0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct list<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list1<T0>\n{\n    typedef typename list1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list2< T0,T1 >\n{\n    typedef typename list2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list3< T0,T1,T2 >\n{\n    typedef typename list3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list4< T0,T1,T2,T3 >\n{\n    typedef typename list4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list5< T0,T1,T2,T3,T4 >\n{\n    typedef typename list5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename list6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename list7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename list8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename list9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename list10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename list11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename list12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename list13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename list14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : list15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename list15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : list16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename list16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : list17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename list17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : list18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename list18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : list19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename list19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list\n    : list20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename list20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/list_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct list_c;\n\ntemplate<\n      typename T\n    >\nstruct list_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list0_c<T>\n{\n    typedef typename list0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct list_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list1_c< T,C0 >\n{\n    typedef typename list1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct list_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list2_c< T,C0,C1 >\n{\n    typedef typename list2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct list_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list3_c< T,C0,C1,C2 >\n{\n    typedef typename list3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename list4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename list5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename list6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename list7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename list8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename list9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename list10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename list11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename list12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename list13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename list14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename list15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename list16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : list17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename list17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : list18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename list18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct list_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : list19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename list19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct list_c\n    : list20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename list20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/map.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/map.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct map;\n\ntemplate<\n     \n    >\nstruct map<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map0<  >\n{\n    typedef map0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct map<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map1<T0>\n{\n    typedef typename map1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct map<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map2< T0,T1 >\n{\n    typedef typename map2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct map<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map3< T0,T1,T2 >\n{\n    typedef typename map3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct map<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map4< T0,T1,T2,T3 >\n{\n    typedef typename map4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct map<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map5< T0,T1,T2,T3,T4 >\n{\n    typedef typename map5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename map6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename map7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename map8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename map9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename map10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename map11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename map12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename map13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename map14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : map15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename map15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : map16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename map16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : map17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename map17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : map18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename map18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct map<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : map19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename map19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct map\n    : map20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename map20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/minus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/minus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct minus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< minus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< minus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct minus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct minus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct minus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct minus\n    : minus< minus< minus< minus< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct minus< N1,N2,N3,N4,na >\n\n    : minus< minus< minus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct minus< N1,N2,N3,na,na >\n\n    : minus< minus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct minus< N1,N2,na,na,na >\n    : minus_impl<\n          typename minus_tag<N1>::type\n        , typename minus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , minus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, minus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct minus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  - BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/modulus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/modulus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct modulus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< modulus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< modulus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct modulus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct modulus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct modulus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct modulus\n\n    : modulus_impl<\n          typename modulus_tag<N1>::type\n        , typename modulus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, modulus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct modulus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  % BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/not_equal_to.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/not_equal_to.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct not_equal_to_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct not_equal_to_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct not_equal_to_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct not_equal_to_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct not_equal_to\n\n    : not_equal_to_impl<\n          typename not_equal_to_tag<N1>::type\n        , typename not_equal_to_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)\n\n}}\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct not_equal_to_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/or.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/or.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< bool C_, typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl\n    : true_\n{\n};\n\ntemplate< typename T1, typename T2, typename T3, typename T4 >\nstruct or_impl< false,T1,T2,T3,T4 >\n    : or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4\n        , false_\n        >\n{\n};\n\ntemplate<>\nstruct or_impl<\n          false\n        , false_, false_, false_, false_\n        >\n    : false_\n{\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename T3 = false_, typename T4 = false_, typename T5 = false_\n    >\nstruct or_\n\n    : aux::or_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T1)::value\n        , T2, T3, T4, T5\n        >\n\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(\n      2\n    , 5\n    , or_\n    )\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/placeholders.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/placeholders.hpp\" header\n// -- DO NOT modify by hand!\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg< -1 > _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<1> _1;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_1)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_1;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<2> _2;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_2)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_2;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<3> _3;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_3)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_3;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<4> _4;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_4)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_4;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<5> _5;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_5)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_5;\n}\n\n}}\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<6> _6;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_6)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_6;\n}\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/plus.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/plus.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct plus_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< plus_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< plus_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct plus_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct plus_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct plus_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct plus\n    : plus< plus< plus< plus< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct plus< N1,N2,N3,N4,na >\n\n    : plus< plus< plus< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct plus< N1,N2,N3,na,na >\n\n    : plus< plus< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct plus< N1,N2,na,na,na >\n    : plus_impl<\n          typename plus_tag<N1>::type\n        , typename plus_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , plus\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, plus)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct plus_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  + BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/quote.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/quote.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n    : T\n{\n};\n\ntemplate< typename T >\nstruct quote_impl< T,false >\n{\n    typedef T type;\n};\n\ntemplate<\n      template< typename P1 > class F\n    , typename Tag = void_\n    >\nstruct quote1\n{\n    template< typename U1 > struct apply\n\n        : quote_impl<\n              F<U1>\n            , aux::has_type< F<U1> >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2 > class F\n    , typename Tag = void_\n    >\nstruct quote2\n{\n    template< typename U1, typename U2 > struct apply\n\n        : quote_impl<\n              F< U1,U2 >\n            , aux::has_type< F< U1,U2 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3 > class F\n    , typename Tag = void_\n    >\nstruct quote3\n{\n    template< typename U1, typename U2, typename U3 > struct apply\n\n        : quote_impl<\n              F< U1,U2,U3 >\n            , aux::has_type< F< U1,U2,U3 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template< typename P1, typename P2, typename P3, typename P4 > class F\n    , typename Tag = void_\n    >\nstruct quote4\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4 >\n            , aux::has_type< F< U1,U2,U3,U4 > >::value\n            >\n\n    {\n    };\n};\n\ntemplate<\n      template<\n          typename P1, typename P2, typename P3, typename P4\n        , typename P5\n        >\n      class F\n    , typename Tag = void_\n    >\nstruct quote5\n{\n    template<\n          typename U1, typename U2, typename U3, typename U4\n        , typename U5\n        >\n    struct apply\n\n        : quote_impl<\n              F< U1,U2,U3,U4,U5 >\n            , aux::has_type< F< U1,U2,U3,U4,U5 > >::value\n            >\n\n    {\n    };\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/reverse_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp, fwd_state0, typename deref<iter0>::type >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp, fwd_state1, typename deref<iter1>::type >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp, fwd_state2, typename deref<iter2>::type >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp, fwd_state3, typename deref<iter3>::type >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp, bkwd_state4, typename deref<iter3>::type >::type bkwd_state3;\n    typedef typename apply2< BackwardOp, bkwd_state3, typename deref<iter2>::type >::type bkwd_state2;\n    typedef typename apply2< BackwardOp, bkwd_state2, typename deref<iter1>::type >::type bkwd_state1;\n    typedef typename apply2< BackwardOp, bkwd_state1, typename deref<iter0>::type >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State, typename deref<First>::type>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , typename deref<First>::type\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/reverse_iter_fold_impl.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/reverse_iter_fold_impl.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl;\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 0,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef fwd_state0 bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter0 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    \n\n    typedef fwd_state1 bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    typedef bkwd_state0 state;\n    typedef iter1 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 2,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    \n\n    typedef fwd_state2 bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter2 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 3,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    \n\n    typedef fwd_state3 bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter3 iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< 4,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef fwd_state4 bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef iter4 iterator;\n};\n\ntemplate<\n      long N\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n    typedef typename apply2< ForwardOp,fwd_state0,iter0 >::type fwd_state1;\n    typedef typename mpl::next<iter0>::type iter1;\n    typedef typename apply2< ForwardOp,fwd_state1,iter1 >::type fwd_state2;\n    typedef typename mpl::next<iter1>::type iter2;\n    typedef typename apply2< ForwardOp,fwd_state2,iter2 >::type fwd_state3;\n    typedef typename mpl::next<iter2>::type iter3;\n    typedef typename apply2< ForwardOp,fwd_state3,iter3 >::type fwd_state4;\n    typedef typename mpl::next<iter3>::type iter4;\n    \n\n    typedef reverse_iter_fold_impl<\n          ( (N - 4) < 0 ? 0 : N - 4 )\n        , iter4\n        , Last\n        , fwd_state4\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n\n    typedef typename nested_chunk::state bkwd_state4;\n    typedef typename apply2< BackwardOp,bkwd_state4,iter3 >::type bkwd_state3;\n    typedef typename apply2< BackwardOp,bkwd_state3,iter2 >::type bkwd_state2;\n    typedef typename apply2< BackwardOp,bkwd_state2,iter1 >::type bkwd_state1;\n    typedef typename apply2< BackwardOp,bkwd_state1,iter0 >::type bkwd_state0;\n    \n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,First,Last,State,BackwardOp,ForwardOp >\n{\n    typedef reverse_iter_fold_impl<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2< ForwardOp,State,First >::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , First\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct reverse_iter_fold_impl< -1,Last,Last,State,BackwardOp,ForwardOp >\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n}}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/set.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct set;\n\ntemplate<\n     \n    >\nstruct set<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set0<  >\n{\n    typedef set0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct set<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set1<T0>\n{\n    typedef typename set1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct set<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set2< T0,T1 >\n{\n    typedef typename set2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct set<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set3< T0,T1,T2 >\n{\n    typedef typename set3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct set<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set4< T0,T1,T2,T3 >\n{\n    typedef typename set4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct set<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set5< T0,T1,T2,T3,T4 >\n{\n    typedef typename set5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename set6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename set7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename set8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename set9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename set10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename set11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename set12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename set13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename set14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : set15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename set15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : set16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename set16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : set17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename set17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : set18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename set18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct set<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : set19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename set19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct set\n    : set20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename set20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/set_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/set_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct set_c;\n\ntemplate<\n      typename T\n    >\nstruct set_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set0_c<T>\n{\n    typedef typename set0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct set_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set1_c< T,C0 >\n{\n    typedef typename set1_c< T,C0 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct set_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set2_c< T,C0,C1 >\n{\n    typedef typename set2_c< T,C0,C1 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct set_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set3_c< T,C0,C1,C2 >\n{\n    typedef typename set3_c< T,C0,C1,C2 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set4_c< T,C0,C1,C2,C3 >\n{\n    typedef typename set4_c< T,C0,C1,C2,C3 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set5_c< T,C0,C1,C2,C3,C4 >\n{\n    typedef typename set5_c< T,C0,C1,C2,C3,C4 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set6_c< T,C0,C1,C2,C3,C4,C5 >\n{\n    typedef typename set6_c< T,C0,C1,C2,C3,C4,C5 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set7_c< T,C0,C1,C2,C3,C4,C5,C6 >\n{\n    typedef typename set7_c< T,C0,C1,C2,C3,C4,C5,C6 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >\n{\n    typedef typename set8_c< T,C0,C1,C2,C3,C4,C5,C6,C7 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >\n{\n    typedef typename set9_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n{\n    typedef typename set10_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n{\n    typedef typename set11_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n{\n    typedef typename set12_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n{\n    typedef typename set13_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set14_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        >\n{\n    typedef typename set14_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set15_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        >\n{\n    typedef typename set15_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set16_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15\n        >\n{\n    typedef typename set16_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : set17_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16\n        >\n{\n    typedef typename set17_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : set18_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17\n        >\n{\n    typedef typename set18_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct set_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : set19_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18\n        >\n{\n    typedef typename set19_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct set_c\n    : set20_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, C19\n        >\n{\n    typedef typename set20_c< T,C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/shift_left.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_left.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_left_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_left_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_left_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_left_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_left_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_left_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_left\n\n    : shift_left_impl<\n          typename shift_left_tag<N1>::type\n        , typename shift_left_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_left)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_left_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  << BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/shift_right.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Jaap Suter 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/shift_right.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct shift_right_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< shift_right_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< shift_right_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct shift_right_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct shift_right_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct shift_right_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    >\nstruct shift_right\n\n    : shift_right_impl<\n          typename shift_right_tag<N1>::type\n        , typename shift_right_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 2, shift_right)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct shift_right_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N, typename S > struct apply\n\n        : integral_c<\n              typename N::value_type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N)::value\n                  >> BOOST_MPL_AUX_VALUE_WKND(S)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/template_arity.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/aux_/template_arity.hpp\" header\n// -- DO NOT modify by hand!\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/times.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/times.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Tag1\n    , typename Tag2\n    >\nstruct times_impl\n    : if_c<\n          ( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)\n              > BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)\n            )\n\n        , aux::cast2nd_impl< times_impl< Tag1,Tag1 >,Tag1, Tag2 >\n        , aux::cast1st_impl< times_impl< Tag2,Tag2 >,Tag1, Tag2 >\n        >::type\n{\n};\n\n/// for Digital Mars C++/compilers with no CTPS/TTP support\ntemplate<> struct times_impl< na,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< na,Tag >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename Tag > struct times_impl< Tag,na >\n{\n    template< typename U1, typename U2 > struct apply\n    {\n        typedef apply type;\n        BOOST_STATIC_CONSTANT(int, value  = 0);\n    };\n};\n\ntemplate< typename T > struct times_tag\n{\n    typedef typename T::tag type;\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(N1)\n    , typename BOOST_MPL_AUX_NA_PARAM(N2)\n    , typename N3 = na, typename N4 = na, typename N5 = na\n    >\nstruct times\n    : times< times< times< times< N1,N2 >, N3>, N4>, N5>\n{\n};\n\ntemplate<\n      typename N1, typename N2, typename N3, typename N4\n    >\nstruct times< N1,N2,N3,N4,na >\n\n    : times< times< times< N1,N2 >, N3>, N4>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, N4, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2, typename N3\n    >\nstruct times< N1,N2,N3,na,na >\n\n    : times< times< N1,N2 >, N3>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, N3, na, na )\n        )\n};\n\ntemplate<\n      typename N1, typename N2\n    >\nstruct times< N1,N2,na,na,na >\n    : times_impl<\n          typename times_tag<N1>::type\n        , typename times_tag<N2>::type\n        >::template apply< N1,N2 >::type\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT_SPEC(\n          5\n        , times\n        , ( N1, N2, na, na, na )\n        )\n\n};\n\nBOOST_MPL_AUX_NA_SPEC2(2, 5, times)\n\n}}\n\nnamespace boost { namespace mpl {\ntemplate<>\nstruct times_impl< integral_c_tag,integral_c_tag >\n{\n    template< typename N1, typename N2 > struct apply\n\n        : integral_c<\n              typename aux::largest_int<\n                  typename N1::value_type\n                , typename N2::value_type\n                >::type\n            , ( BOOST_MPL_AUX_VALUE_WKND(N1)::value\n                  * BOOST_MPL_AUX_VALUE_WKND(N2)::value\n                )\n            >\n    {\n    };\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/unpack_args.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/unpack_args.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< int size, typename F, typename Args >\nstruct unpack_args_impl;\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 0,F,Args >\n    : apply0<\n          F\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 1,F,Args >\n    : apply1<\n          F\n        , typename at_c< Args,0 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 2,F,Args >\n    : apply2<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 3,F,Args >\n    : apply3<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 4,F,Args >\n    : apply4<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        >\n{\n};\n\ntemplate< typename F, typename Args >\nstruct unpack_args_impl< 5,F,Args >\n    : apply5<\n          F\n        , typename at_c< Args,0 >::type, typename at_c< Args,1 >::type\n        , typename at_c< Args,2 >::type, typename at_c< Args,3 >::type\n        , typename at_c< Args,4 >::type\n        >\n{\n};\n\n}\n\ntemplate<\n      typename F\n    >\nstruct unpack_args\n{\n    template< typename Args > struct apply\n\n        : aux::unpack_args_impl< size<Args>::value,F, Args >\n\n    {\n    };\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/vector.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na\n    , typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na\n    , typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na\n    , typename T12 = na, typename T13 = na, typename T14 = na\n    , typename T15 = na, typename T16 = na, typename T17 = na\n    , typename T18 = na, typename T19 = na\n    >\nstruct vector;\n\ntemplate<\n     \n    >\nstruct vector<\n          na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector0<  >\n{\n    typedef vector0<  >::type type;\n};\n\ntemplate<\n      typename T0\n    >\nstruct vector<\n          T0, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector1<T0>\n{\n    typedef typename vector1<T0>::type type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct vector<\n          T0, T1, na, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector2< T0,T1 >\n{\n    typedef typename vector2< T0,T1 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct vector<\n          T0, T1, T2, na, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector3< T0,T1,T2 >\n{\n    typedef typename vector3< T0,T1,T2 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct vector<\n          T0, T1, T2, T3, na, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector4< T0,T1,T2,T3 >\n{\n    typedef typename vector4< T0,T1,T2,T3 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, na, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector5< T0,T1,T2,T3,T4 >\n{\n    typedef typename vector5< T0,T1,T2,T3,T4 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, na, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector6< T0,T1,T2,T3,T4,T5 >\n{\n    typedef typename vector6< T0,T1,T2,T3,T4,T5 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, na, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector7< T0,T1,T2,T3,T4,T5,T6 >\n{\n    typedef typename vector7< T0,T1,T2,T3,T4,T5,T6 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, na, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector8< T0,T1,T2,T3,T4,T5,T6,T7 >\n{\n    typedef typename vector8< T0,T1,T2,T3,T4,T5,T6,T7 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, na, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >\n{\n    typedef typename vector9< T0,T1,T2,T3,T4,T5,T6,T7,T8 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, na, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n{\n    typedef typename vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, na, na, na, na, na, na\n        , na, na, na\n        >\n    : vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n{\n    typedef typename vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, na, na, na, na\n        , na, na, na, na\n        >\n    : vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n{\n    typedef typename vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, na, na, na\n        , na, na, na, na\n        >\n    : vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n{\n    typedef typename vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, na, na\n        , na, na, na, na\n        >\n    : vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n{\n    typedef typename vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, na\n        , na, na, na, na\n        >\n    : vector15<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        >\n{\n    typedef typename vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, na, na, na, na\n        >\n    : vector16<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15\n        >\n{\n    typedef typename vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, na, na, na\n        >\n    : vector17<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16\n        >\n{\n    typedef typename vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, na, na\n        >\n    : vector18<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17\n        >\n{\n    typedef typename vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >::type type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct vector<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, na\n        >\n    : vector19<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18\n        >\n{\n    typedef typename vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct vector\n    : vector20<\n          T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14\n        , T15, T16, T17, T18, T19\n        >\n{\n    typedef typename vector20< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessed/plain/vector_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/vector_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T, long C0 = LONG_MAX, long C1 = LONG_MAX, long C2 = LONG_MAX\n    , long C3 = LONG_MAX, long C4 = LONG_MAX, long C5 = LONG_MAX\n    , long C6 = LONG_MAX, long C7 = LONG_MAX, long C8 = LONG_MAX\n    , long C9 = LONG_MAX, long C10 = LONG_MAX, long C11 = LONG_MAX\n    , long C12 = LONG_MAX, long C13 = LONG_MAX, long C14 = LONG_MAX\n    , long C15 = LONG_MAX, long C16 = LONG_MAX, long C17 = LONG_MAX\n    , long C18 = LONG_MAX, long C19 = LONG_MAX\n    >\nstruct vector_c;\n\ntemplate<\n      typename T\n    >\nstruct vector_c<\n          T, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector0_c<T>\n{\n    typedef typename vector0_c<T>::type type;\n};\n\ntemplate<\n      typename T, long C0\n    >\nstruct vector_c<\n          T, C0, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector1_c< T, T(C0) >\n{\n    typedef typename vector1_c< T, T(C0) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1\n    >\nstruct vector_c<\n          T, C0, C1, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector2_c< T, T(C0), T(C1) >\n{\n    typedef typename vector2_c< T, T(C0), T(C1) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2\n    >\nstruct vector_c<\n          T, C0, C1, C2, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector3_c< T, T(C0), T(C1), T(C2) >\n{\n    typedef typename vector3_c< T, T(C0), T(C1), T(C2) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector4_c< T, T(C0), T(C1), T(C2), T(C3) >\n{\n    typedef typename vector4_c< T, T(C0), T(C1), T(C2), T(C3) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >\n{\n    typedef typename vector5_c< T, T(C0), T(C1), T(C2), T(C3), T(C4) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >\n{\n    typedef typename vector6_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >\n{\n    typedef typename vector7_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX\n        >\n    : vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >\n{\n    typedef typename vector8_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >\n{\n    typedef typename vector9_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        , LONG_MAX\n        >\n    : vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >\n{\n    typedef typename vector10_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, LONG_MAX, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >\n{\n    typedef typename vector11_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >\n{\n    typedef typename vector12_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, LONG_MAX\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >\n{\n    typedef typename vector13_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >\n{\n    typedef typename vector14_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >\n{\n    typedef typename vector15_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >\n{\n    typedef typename vector16_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, LONG_MAX, LONG_MAX, LONG_MAX\n        >\n    : vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >\n{\n    typedef typename vector17_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, LONG_MAX, LONG_MAX\n        >\n    : vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >\n{\n    typedef typename vector18_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17) >::type type;\n};\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18\n    >\nstruct vector_c<\n          T, C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14\n        , C15, C16, C17, C18, LONG_MAX\n        >\n    : vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >\n{\n    typedef typename vector19_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18) >::type type;\n};\n\n/// primary template (not a specialization!)\n\ntemplate<\n      typename T, long C0, long C1, long C2, long C3, long C4, long C5\n    , long C6, long C7, long C8, long C9, long C10, long C11, long C12\n    , long C13, long C14, long C15, long C16, long C17, long C18, long C19\n    >\nstruct vector_c\n    : vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >\n{\n    typedef typename vector20_c< T, T(C0), T(C1), T(C2), T(C3), T(C4), T(C5), T(C6), T(C7), T(C8), T(C9), T(C10), T(C11), T(C12), T(C13), T(C14), T(C15), T(C16), T(C17), T(C18), T(C19) >::type type;\n};\n\n}}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/add.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_ADD_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_ADD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/mpl/aux_/preprocessor/tuple.hpp>\n\n#if defined(BOOST_MPL_CFG_BROKEN_PP_MACRO_EXPANSION)\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_ADD(i,j) \\\n    BOOST_MPL_PP_ADD_DELAY(i,j) \\\n    /**/\n\n#   define BOOST_MPL_PP_ADD_DELAY(i,j) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_TUPLE_11_ELEM_##i,BOOST_MPL_PP_ADD_##j) \\\n    /**/\n#else\n#   define BOOST_MPL_PP_ADD(i,j) \\\n    BOOST_MPL_PP_ADD_DELAY(i,j) \\\n    /**/\n\n#   define BOOST_MPL_PP_ADD_DELAY(i,j) \\\n    BOOST_MPL_PP_TUPLE_11_ELEM_##i BOOST_MPL_PP_ADD_##j \\\n    /**/\n#endif\n\n#   define BOOST_MPL_PP_ADD_0 (0,1,2,3,4,5,6,7,8,9,10)\n#   define BOOST_MPL_PP_ADD_1 (1,2,3,4,5,6,7,8,9,10,0)\n#   define BOOST_MPL_PP_ADD_2 (2,3,4,5,6,7,8,9,10,0,0)\n#   define BOOST_MPL_PP_ADD_3 (3,4,5,6,7,8,9,10,0,0,0)\n#   define BOOST_MPL_PP_ADD_4 (4,5,6,7,8,9,10,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_5 (5,6,7,8,9,10,0,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_6 (6,7,8,9,10,0,0,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_7 (7,8,9,10,0,0,0,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_8 (8,9,10,0,0,0,0,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_9 (9,10,0,0,0,0,0,0,0,0,0)\n#   define BOOST_MPL_PP_ADD_10 (10,0,0,0,0,0,0,0,0,0,0)\n\n#else\n\n#   include <boost/preprocessor/arithmetic/add.hpp>\n\n#   define BOOST_MPL_PP_ADD(i,j) \\\n    BOOST_PP_ADD(i,j) \\\n    /**/\n    \n#endif \n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_ADD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/def_params_tail.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_DEF_PARAMS_TAIL_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_DEF_PARAMS_TAIL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/limits/arity.hpp>\n#include <boost/mpl/aux_/config/dtp.hpp>\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n#include <boost/preprocessor/comma_if.hpp>\n#include <boost/preprocessor/logical/and.hpp>\n#include <boost/preprocessor/identity.hpp>\n#include <boost/preprocessor/empty.hpp>\n\n// BOOST_MPL_PP_DEF_PARAMS_TAIL(1,T,value): , T1 = value, .., Tn = value\n// BOOST_MPL_PP_DEF_PARAMS_TAIL(2,T,value): , T2 = value, .., Tn = value\n// BOOST_MPL_PP_DEF_PARAMS_TAIL(n,T,value): <nothing>\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/mpl/aux_/preprocessor/filter_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/sub.hpp>\n\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_IMPL(i, param, value_func) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_DELAY_1( \\\n          i \\\n        , BOOST_MPL_PP_SUB(BOOST_MPL_LIMIT_METAFUNCTION_ARITY,i) \\\n        , param \\\n        , value_func \\\n        ) \\\n    /**/\n\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_DELAY_1(i, n, param, value_func) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_DELAY_2(i,n,param,value_func) \\\n    /**/\n\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_DELAY_2(i, n, param, value_func) \\\n    BOOST_PP_COMMA_IF(BOOST_PP_AND(i,n)) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_##i(n,param,value_func) \\\n    /**/\n\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_0(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##1 v(),p##2 v(),p##3 v(),p##4 v(),p##5 v(),p##6 v(),p##7 v(),p##8 v(),p##9 v())\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_1(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##2 v(),p##3 v(),p##4 v(),p##5 v(),p##6 v(),p##7 v(),p##8 v(),p##9 v(),p1)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_2(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##3 v(),p##4 v(),p##5 v(),p##6 v(),p##7 v(),p##8 v(),p##9 v(),p1,p2)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_3(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##4 v(),p##5 v(),p##6 v(),p##7 v(),p##8 v(),p##9 v(),p1,p2,p3)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_4(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##5 v(),p##6 v(),p##7 v(),p##8 v(),p##9 v(),p1,p2,p3,p4)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_5(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##6 v(),p##7 v(),p##8 v(),p##9 v(),p1,p2,p3,p4,p5)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_6(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##7 v(),p##8 v(),p##9 v(),p1,p2,p3,p4,p5,p6)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_7(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##8 v(),p##9 v(),p1,p2,p3,p4,p5,p6,p7)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_8(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p##9 v(),p1,p2,p3,p4,p5,p6,p7,p8)\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_9(i,p,v) BOOST_MPL_PP_FILTER_PARAMS_##i(p1,p2,p3,p4,p5,p6,p7,p8,p9)\n\n#else\n\n#   include <boost/preprocessor/arithmetic/add.hpp>\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/tuple/elem.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_AUX_TAIL_PARAM_FUNC(unused, i, op) \\\n    , BOOST_PP_CAT( \\\n          BOOST_PP_TUPLE_ELEM(3, 1, op) \\\n        , BOOST_PP_ADD_D(1, i, BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(3, 0, op))) \\\n        ) BOOST_PP_TUPLE_ELEM(3, 2, op)() \\\n    /**/\n\n#   define BOOST_MPL_PP_DEF_PARAMS_TAIL_IMPL(i, param, value_func) \\\n    BOOST_PP_REPEAT( \\\n          BOOST_PP_SUB_D(1, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, i) \\\n        , BOOST_MPL_PP_AUX_TAIL_PARAM_FUNC \\\n        , (i, param, value_func) \\\n        ) \\\n    /**/\n\n\n#endif // BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES\n\n#define BOOST_MPL_PP_DEF_PARAMS_TAIL(i, param, value) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_IMPL(i, param, BOOST_PP_IDENTITY(=value)) \\\n    /**/\n\n#if !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#   define BOOST_MPL_PP_NESTED_DEF_PARAMS_TAIL(i, param, value) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_IMPL(i, param, BOOST_PP_IDENTITY(=value)) \\\n    /**/\n#else\n#   define BOOST_MPL_PP_NESTED_DEF_PARAMS_TAIL(i, param, value) \\\n    BOOST_MPL_PP_DEF_PARAMS_TAIL_IMPL(i, param, BOOST_PP_EMPTY) \\\n    /**/\n#endif\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_DEF_PARAMS_TAIL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/default_params.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_DEFAULT_PARAMS_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_DEFAULT_PARAMS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n// BOOST_MPL_PP_DEFAULT_PARAMS(0,T,int): <nothing>\n// BOOST_MPL_PP_DEFAULT_PARAMS(1,T,int): T1 = int\n// BOOST_MPL_PP_DEFAULT_PARAMS(2,T,int): T1 = int, T2 = int\n// BOOST_MPL_PP_DEFAULT_PARAMS(n,T,int): T1 = int, T2 = int, .., Tn = int\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_DEFAULT_PARAMS(n,p,v) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_DEFAULT_PARAMS_,n)(p,v) \\\n    /**/\n    \n#   define BOOST_MPL_PP_DEFAULT_PARAMS_0(p,v)\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_1(p,v) p##1=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_2(p,v) p##1=v,p##2=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_3(p,v) p##1=v,p##2=v,p##3=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_4(p,v) p##1=v,p##2=v,p##3=v,p##4=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_5(p,v) p##1=v,p##2=v,p##3=v,p##4=v,p##5=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_6(p,v) p##1=v,p##2=v,p##3=v,p##4=v,p##5=v,p##6=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_7(p,v) p##1=v,p##2=v,p##3=v,p##4=v,p##5=v,p##6=v,p##7=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_8(p,v) p##1=v,p##2=v,p##3=v,p##4=v,p##5=v,p##6=v,p##7=v,p##8=v\n#   define BOOST_MPL_PP_DEFAULT_PARAMS_9(p,v) p##1=v,p##2=v,p##3=v,p##4=v,p##5=v,p##6=v,p##7=v,p##8=v,p##9=v\n\n#else\n\n#   include <boost/preprocessor/tuple/elem.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_AUX_DEFAULT_PARAM_FUNC(unused, i, pv) \\\n    BOOST_PP_COMMA_IF(i) \\\n    BOOST_PP_CAT( BOOST_PP_TUPLE_ELEM(2,0,pv), BOOST_PP_INC(i) ) \\\n        = BOOST_PP_TUPLE_ELEM(2,1,pv) \\\n    /**/\n\n#   define BOOST_MPL_PP_DEFAULT_PARAMS(n, param, value) \\\n    BOOST_PP_REPEAT( \\\n          n \\\n        , BOOST_MPL_PP_AUX_DEFAULT_PARAM_FUNC \\\n        , (param,value) \\\n        ) \\\n    /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_DEFAULT_PARAMS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/enum.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_ENUM_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_ENUM_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n// BOOST_MPL_PP_ENUM(0,int): <nothing>\n// BOOST_MPL_PP_ENUM(1,int): int\n// BOOST_MPL_PP_ENUM(2,int): int, int\n// BOOST_MPL_PP_ENUM(n,int): int, int, .., int\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_ENUM(n, param) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_ENUM_,n)(param) \\\n    /**/\n    \n#   define BOOST_MPL_PP_ENUM_0(p)\n#   define BOOST_MPL_PP_ENUM_1(p) p\n#   define BOOST_MPL_PP_ENUM_2(p) p,p\n#   define BOOST_MPL_PP_ENUM_3(p) p,p,p\n#   define BOOST_MPL_PP_ENUM_4(p) p,p,p,p\n#   define BOOST_MPL_PP_ENUM_5(p) p,p,p,p,p\n#   define BOOST_MPL_PP_ENUM_6(p) p,p,p,p,p,p\n#   define BOOST_MPL_PP_ENUM_7(p) p,p,p,p,p,p,p\n#   define BOOST_MPL_PP_ENUM_8(p) p,p,p,p,p,p,p,p\n#   define BOOST_MPL_PP_ENUM_9(p) p,p,p,p,p,p,p,p,p\n\n#else\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n\n#   define BOOST_MPL_PP_AUX_ENUM_FUNC(unused, i, param) \\\n    BOOST_PP_COMMA_IF(i) param \\\n    /**/\n\n#   define BOOST_MPL_PP_ENUM(n, param) \\\n    BOOST_PP_REPEAT( \\\n          n \\\n        , BOOST_MPL_PP_AUX_ENUM_FUNC \\\n        , param \\\n        ) \\\n    /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_ENUM_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/ext_params.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n// BOOST_MPL_PP_EXT_PARAMS(2,2,T): <nothing>\n// BOOST_MPL_PP_EXT_PARAMS(2,3,T): T2\n// BOOST_MPL_PP_EXT_PARAMS(2,4,T): T2, T3\n// BOOST_MPL_PP_EXT_PARAMS(2,n,T): T2, T3, .., Tn-1\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/mpl/aux_/preprocessor/filter_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/sub.hpp>\n\n#   define BOOST_MPL_PP_EXT_PARAMS(i,j,p) \\\n    BOOST_MPL_PP_EXT_PARAMS_DELAY_1(i,BOOST_MPL_PP_SUB(j,i),p) \\\n    /**/\n\n#   define BOOST_MPL_PP_EXT_PARAMS_DELAY_1(i,n,p) \\\n    BOOST_MPL_PP_EXT_PARAMS_DELAY_2(i,n,p) \\\n    /**/\n\n#   define BOOST_MPL_PP_EXT_PARAMS_DELAY_2(i,n,p) \\\n    BOOST_MPL_PP_EXT_PARAMS_##i(n,p) \\\n    /**/\n\n#   define BOOST_MPL_PP_EXT_PARAMS_1(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##1,p##2,p##3,p##4,p##5,p##6,p##7,p##8,p##9)\n#   define BOOST_MPL_PP_EXT_PARAMS_2(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##2,p##3,p##4,p##5,p##6,p##7,p##8,p##9,p1)\n#   define BOOST_MPL_PP_EXT_PARAMS_3(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##3,p##4,p##5,p##6,p##7,p##8,p##9,p1,p2)\n#   define BOOST_MPL_PP_EXT_PARAMS_4(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##4,p##5,p##6,p##7,p##8,p##9,p1,p2,p3)\n#   define BOOST_MPL_PP_EXT_PARAMS_5(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##5,p##6,p##7,p##8,p##9,p1,p2,p3,p4)\n#   define BOOST_MPL_PP_EXT_PARAMS_6(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##6,p##7,p##8,p##9,p1,p2,p3,p4,p5)\n#   define BOOST_MPL_PP_EXT_PARAMS_7(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##7,p##8,p##9,p1,p2,p3,p4,p5,p6)\n#   define BOOST_MPL_PP_EXT_PARAMS_8(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##8,p##9,p1,p2,p3,p4,p5,p6,p7)\n#   define BOOST_MPL_PP_EXT_PARAMS_9(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##9,p1,p2,p3,p4,p5,p6,p7,p8)\n\n#else\n\n#   include <boost/preprocessor/arithmetic/add.hpp>\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/tuple/elem.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_AUX_EXT_PARAM_FUNC(unused, i, op) \\\n    BOOST_PP_COMMA_IF(i) \\\n    BOOST_PP_CAT( \\\n          BOOST_PP_TUPLE_ELEM(2,1,op) \\\n        , BOOST_PP_ADD_D(1, i, BOOST_PP_TUPLE_ELEM(2,0,op)) \\\n        ) \\\n    /**/\n\n#   define BOOST_MPL_PP_EXT_PARAMS(i, j, param) \\\n    BOOST_PP_REPEAT( \\\n          BOOST_PP_SUB_D(1,j,i) \\\n        , BOOST_MPL_PP_AUX_EXT_PARAM_FUNC \\\n        , (i,param) \\\n        ) \\\n    /**/\n\n#endif\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/filter_params.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_FILTER_PARAMS_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_FILTER_PARAMS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#define BOOST_MPL_PP_FILTER_PARAMS_0(p1,p2,p3,p4,p5,p6,p7,p8,p9) \n#define BOOST_MPL_PP_FILTER_PARAMS_1(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1\n#define BOOST_MPL_PP_FILTER_PARAMS_2(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2\n#define BOOST_MPL_PP_FILTER_PARAMS_3(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3\n#define BOOST_MPL_PP_FILTER_PARAMS_4(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4\n#define BOOST_MPL_PP_FILTER_PARAMS_5(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4,p5\n#define BOOST_MPL_PP_FILTER_PARAMS_6(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4,p5,p6\n#define BOOST_MPL_PP_FILTER_PARAMS_7(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4,p5,p6,p7\n#define BOOST_MPL_PP_FILTER_PARAMS_8(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4,p5,p6,p7,p8\n#define BOOST_MPL_PP_FILTER_PARAMS_9(p1,p2,p3,p4,p5,p6,p7,p8,p9) p1,p2,p3,p4,p5,p6,p7,p8,p9\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_FILTER_PARAMS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/params.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_PARAMS_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_PARAMS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n// BOOST_MPL_PP_PARAMS(0,T): <nothing>\n// BOOST_MPL_PP_PARAMS(1,T): T1\n// BOOST_MPL_PP_PARAMS(2,T): T1, T2\n// BOOST_MPL_PP_PARAMS(n,T): T1, T2, .., Tn\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_PARAMS(n,p) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_PARAMS_,n)(p) \\\n    /**/\n\n#   define BOOST_MPL_PP_PARAMS_0(p)\n#   define BOOST_MPL_PP_PARAMS_1(p) p##1\n#   define BOOST_MPL_PP_PARAMS_2(p) p##1,p##2\n#   define BOOST_MPL_PP_PARAMS_3(p) p##1,p##2,p##3\n#   define BOOST_MPL_PP_PARAMS_4(p) p##1,p##2,p##3,p##4\n#   define BOOST_MPL_PP_PARAMS_5(p) p##1,p##2,p##3,p##4,p##5\n#   define BOOST_MPL_PP_PARAMS_6(p) p##1,p##2,p##3,p##4,p##5,p##6\n#   define BOOST_MPL_PP_PARAMS_7(p) p##1,p##2,p##3,p##4,p##5,p##6,p##7\n#   define BOOST_MPL_PP_PARAMS_8(p) p##1,p##2,p##3,p##4,p##5,p##6,p##7,p##8\n#   define BOOST_MPL_PP_PARAMS_9(p) p##1,p##2,p##3,p##4,p##5,p##6,p##7,p##8,p##9\n\n#else\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_AUX_PARAM_FUNC(unused, i, param) \\\n    BOOST_PP_COMMA_IF(i) \\\n    BOOST_PP_CAT(param, BOOST_PP_INC(i)) \\\n    /**/\n\n#   define BOOST_MPL_PP_PARAMS(n, param) \\\n    BOOST_PP_REPEAT( \\\n          n \\\n        , BOOST_MPL_PP_AUX_PARAM_FUNC \\\n        , param \\\n        ) \\\n    /**/\n\n#endif \n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_PARAMS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/partial_spec_params.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_PARTIAL_SPEC_PARAMS_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_PARTIAL_SPEC_PARAMS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/limits/arity.hpp>\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/preprocessor/enum.hpp>\n#include <boost/mpl/aux_/preprocessor/sub.hpp>\n#include <boost/preprocessor/comma_if.hpp>\n\n#define BOOST_MPL_PP_PARTIAL_SPEC_PARAMS(n, param, def) \\\nBOOST_MPL_PP_PARAMS(n, param) \\\nBOOST_PP_COMMA_IF(BOOST_MPL_PP_SUB(BOOST_MPL_LIMIT_METAFUNCTION_ARITY,n)) \\\nBOOST_MPL_PP_ENUM( \\\n      BOOST_MPL_PP_SUB(BOOST_MPL_LIMIT_METAFUNCTION_ARITY,n) \\\n    , def \\\n    ) \\\n/**/\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_PARTIAL_SPEC_PARAMS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/range.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_RANGE_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_RANGE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/preprocessor/seq/subseq.hpp>\n#include <boost/preprocessor/repetition/repeat.hpp>\n#include <boost/preprocessor/arithmetic/add.hpp>\n\n#define BOOST_MPL_PP_RANGE_ITEM(z,n,_) (n)\n\n#define BOOST_MPL_PP_RANGE(first, length) \\\n    BOOST_PP_SEQ_SUBSEQ( \\\n        BOOST_PP_REPEAT(BOOST_PP_ADD(first,length), BOOST_MPL_PP_RANGE_ITEM, _), \\\n        first, length \\\n    ) \\\n/**/\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_RANGE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/repeat.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_REPEAT_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_REPEAT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_REPEAT(n,f,param) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_REPEAT_,n)(f,param) \\\n    /**/\n    \n#   define BOOST_MPL_PP_REPEAT_0(f,p)\n#   define BOOST_MPL_PP_REPEAT_1(f,p) f(0,0,p)\n#   define BOOST_MPL_PP_REPEAT_2(f,p) f(0,0,p) f(0,1,p)\n#   define BOOST_MPL_PP_REPEAT_3(f,p) f(0,0,p) f(0,1,p) f(0,2,p)\n#   define BOOST_MPL_PP_REPEAT_4(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p)\n#   define BOOST_MPL_PP_REPEAT_5(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p)\n#   define BOOST_MPL_PP_REPEAT_6(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p) f(0,5,p)\n#   define BOOST_MPL_PP_REPEAT_7(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p) f(0,5,p) f(0,6,p)\n#   define BOOST_MPL_PP_REPEAT_8(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p) f(0,5,p) f(0,6,p) f(0,7,p)\n#   define BOOST_MPL_PP_REPEAT_9(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p) f(0,5,p) f(0,6,p) f(0,7,p) f(0,8,p)\n#   define BOOST_MPL_PP_REPEAT_10(f,p) f(0,0,p) f(0,1,p) f(0,2,p) f(0,3,p) f(0,4,p) f(0,5,p) f(0,6,p) f(0,7,p) f(0,8,p) f(0,9,p)\n\n#else \n\n#   include <boost/preprocessor/repeat.hpp>\n\n#   define BOOST_MPL_PP_REPEAT(n,f,param) \\\n    BOOST_PP_REPEAT(n,f,param) \\\n    /**/\n\n#endif \n\n#define BOOST_MPL_PP_REPEAT_IDENTITY_FUNC(unused1, unused2, x) x\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_REPEAT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/sub.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_SUB_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_SUB_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/preprocessor.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)\n\n#   include <boost/mpl/aux_/preprocessor/tuple.hpp>\n\n#if defined(BOOST_MPL_CFG_BROKEN_PP_MACRO_EXPANSION)\n#   include <boost/preprocessor/cat.hpp>\n\n#   define BOOST_MPL_PP_SUB(i,j) \\\n    BOOST_MPL_PP_SUB_DELAY(i,j) \\\n    /**/\n\n#   define BOOST_MPL_PP_SUB_DELAY(i,j) \\\n    BOOST_PP_CAT(BOOST_MPL_PP_TUPLE_11_ELEM_##i,BOOST_MPL_PP_SUB_##j) \\\n    /**/\n#else\n#   define BOOST_MPL_PP_SUB(i,j) \\\n    BOOST_MPL_PP_SUB_DELAY(i,j) \\\n    /**/\n\n#   define BOOST_MPL_PP_SUB_DELAY(i,j) \\\n    BOOST_MPL_PP_TUPLE_11_ELEM_##i BOOST_MPL_PP_SUB_##j \\\n    /**/\n#endif\n\n#   define BOOST_MPL_PP_SUB_0 (0,1,2,3,4,5,6,7,8,9,10)\n#   define BOOST_MPL_PP_SUB_1 (0,0,1,2,3,4,5,6,7,8,9)\n#   define BOOST_MPL_PP_SUB_2 (0,0,0,1,2,3,4,5,6,7,8)\n#   define BOOST_MPL_PP_SUB_3 (0,0,0,0,1,2,3,4,5,6,7)\n#   define BOOST_MPL_PP_SUB_4 (0,0,0,0,0,1,2,3,4,5,6)\n#   define BOOST_MPL_PP_SUB_5 (0,0,0,0,0,0,1,2,3,4,5)\n#   define BOOST_MPL_PP_SUB_6 (0,0,0,0,0,0,0,1,2,3,4)\n#   define BOOST_MPL_PP_SUB_7 (0,0,0,0,0,0,0,0,1,2,3)\n#   define BOOST_MPL_PP_SUB_8 (0,0,0,0,0,0,0,0,0,1,2)\n#   define BOOST_MPL_PP_SUB_9 (0,0,0,0,0,0,0,0,0,0,1)\n#   define BOOST_MPL_PP_SUB_10 (0,0,0,0,0,0,0,0,0,0,0)\n\n#else\n\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n\n#   define BOOST_MPL_PP_SUB(i,j) \\\n    BOOST_PP_SUB(i,j) \\\n    /**/\n    \n#endif\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_SUB_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/preprocessor/tuple.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PREPROCESSOR_TUPLE_HPP_INCLUDED\n#define BOOST_MPL_AUX_PREPROCESSOR_TUPLE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#define BOOST_MPL_PP_TUPLE_11_ELEM_0(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e0\n#define BOOST_MPL_PP_TUPLE_11_ELEM_1(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e1\n#define BOOST_MPL_PP_TUPLE_11_ELEM_2(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e2\n#define BOOST_MPL_PP_TUPLE_11_ELEM_3(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e3\n#define BOOST_MPL_PP_TUPLE_11_ELEM_4(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e4\n#define BOOST_MPL_PP_TUPLE_11_ELEM_5(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e5\n#define BOOST_MPL_PP_TUPLE_11_ELEM_6(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e6\n#define BOOST_MPL_PP_TUPLE_11_ELEM_7(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e7\n#define BOOST_MPL_PP_TUPLE_11_ELEM_8(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e8\n#define BOOST_MPL_PP_TUPLE_11_ELEM_9(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e9\n#define BOOST_MPL_PP_TUPLE_11_ELEM_10(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10) e10\n\n#endif // BOOST_MPL_AUX_PREPROCESSOR_TUPLE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/ptr_to_ref.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PTR_TO_REF_HPP_INCLUDED\n#define BOOST_MPL_AUX_PTR_TO_REF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/static_cast.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n\n#if BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1400)) \\\n    ||  ( BOOST_WORKAROUND(__EDG_VERSION__, <= 245) \\\n        && !(defined(__STD_STRICT_ANSI) \\\n            || defined(__STD_STRICT_ANSI_ERRORS)) )\n\n#   define BOOST_MPL_AUX_PTR_TO_REF(X) \\\n    *BOOST_MPL_AUX_STATIC_CAST(X*, 0) \\\n/**/\n\n#else\n\n#   define BOOST_MPL_AUX_PTR_TO_REF(X) \\\n    aux::ptr_to_ref(BOOST_MPL_AUX_STATIC_CAST(X*, 0)) \\\n/**/\n\n#endif\n\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename T > static T const& ptr_to_ref(T*);\n\n}}}\n\n#endif // BOOST_MPL_AUX_PTR_TO_REF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/push_front_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_PUSH_FRONT_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_PUSH_FRONT_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/push_front_fwd.hpp>\n#include <boost/mpl/assert.hpp>\n#include <boost/mpl/aux_/has_type.hpp>\n#include <boost/mpl/aux_/traits_lambda_spec.hpp>\n#include <boost/mpl/aux_/config/forwarding.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\nstruct has_push_front_arg {};\n\n// agurt 05/feb/04: no default implementation; the stub definition is needed \n// to enable the default 'has_push_front' implementation below\n\ntemplate< typename Tag >\nstruct push_front_impl\n{\n    template< typename Sequence, typename T > struct apply\n    {\n        // should be instantiated only in the context of 'has_push_front_impl';\n        // if you've got an assert here, you are requesting a 'push_front' \n        // specialization that doesn't exist.\n        BOOST_MPL_ASSERT_MSG(\n              ( boost::is_same< T, has_push_front_arg >::value )\n            , REQUESTED_PUSH_FRONT_SPECIALIZATION_FOR_SEQUENCE_DOES_NOT_EXIST\n            , ( Sequence )\n            );\n    };\n};\n\ntemplate< typename Tag >\nstruct has_push_front_impl\n{\n    template< typename Seq > struct apply\n#if !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING)\n        : aux::has_type< push_front< Seq, has_push_front_arg > >\n    {\n#else\n    {\n        typedef aux::has_type< push_front< Seq, has_push_front_arg > > type;\n        BOOST_STATIC_CONSTANT(bool, value = \n              (aux::has_type< push_front< Seq, has_push_front_arg > >::value)\n            );\n#endif\n    };\n};\n\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(2, push_front_impl)\nBOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(1, has_push_front_impl)\n\n}}\n\n#endif // BOOST_MPL_AUX_PUSH_FRONT_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/reverse_fold_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_REVERSE_FOLD_IMPL_HPP_INCLUDED\n#define BOOST_MPL_AUX_REVERSE_FOLD_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/next_prior.hpp>\n#   include <boost/mpl/deref.hpp>\n#   include <boost/mpl/apply.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n    || defined(BOOST_MPL_CFG_NO_NONTYPE_TEMPLATE_PARTIAL_SPEC)\n#       include <boost/mpl/if.hpp>\n#       include <boost/type_traits/is_same.hpp>\n#   endif\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER reverse_fold_impl.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   define AUX778076_FOLD_IMPL_OP(iter) typename deref<iter>::type\n#   define AUX778076_FOLD_IMPL_NAME_PREFIX reverse_fold\n#   include <boost/mpl/aux_/reverse_fold_impl_body.hpp>\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_REVERSE_FOLD_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/reverse_fold_impl_body.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION!\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#   include <boost/mpl/limits/unrolling.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/dec.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n// local macros, #undef-ined at the end of the header\n\n#   define AUX778076_ITER_FOLD_FORWARD_STEP(unused, n_, unused2) \\\n    typedef typename apply2< \\\n          ForwardOp \\\n        , BOOST_PP_CAT(fwd_state,n_) \\\n        , AUX778076_FOLD_IMPL_OP(BOOST_PP_CAT(iter,n_)) \\\n        >::type BOOST_PP_CAT(fwd_state,BOOST_PP_INC(n_)); \\\n    typedef typename mpl::next<BOOST_PP_CAT(iter,n_)>::type \\\n        BOOST_PP_CAT(iter,BOOST_PP_INC(n_)); \\\n    /**/\n\n#   define AUX778076_ITER_FOLD_BACKWARD_STEP_FUNC(n_) \\\n    typedef typename apply2< \\\n          BackwardOp \\\n        , BOOST_PP_CAT(bkwd_state,n_) \\\n        , AUX778076_FOLD_IMPL_OP(BOOST_PP_CAT(iter,BOOST_PP_DEC(n_))) \\\n        >::type BOOST_PP_CAT(bkwd_state,BOOST_PP_DEC(n_)); \\\n    /**/\n\n#   define AUX778076_ITER_FOLD_BACKWARD_STEP(unused, n_, j) \\\n    AUX778076_ITER_FOLD_BACKWARD_STEP_FUNC( \\\n          BOOST_PP_SUB_D(1,j,n_) \\\n        ) \\\n    /**/\n\n#   define AUX778076_FIRST_BACKWARD_STATE_TYPEDEF(n_) \\\n    typedef typename nested_chunk::state BOOST_PP_CAT(bkwd_state,n_);\n    /**/\n\n#   define AUX778076_FOLD_IMPL_NAME \\\n    BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_impl) \\\n    /**/\n\n#   define AUX778076_FOLD_CHUNK_NAME \\\n    BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_chunk) \\\n    /**/\n\nnamespace boost { namespace mpl { namespace aux {\n\n/// forward declaration\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(long, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME;\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n    && !defined(BOOST_MPL_CFG_NO_NONTYPE_TEMPLATE_PARTIAL_SPEC)\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_UNROLLING, <boost/mpl/aux_/reverse_fold_impl_body.hpp>))\n#   include BOOST_PP_ITERATE()\n\n// implementation for N that exceeds BOOST_MPL_LIMIT_UNROLLING\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(long, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n\n    BOOST_MPL_PP_REPEAT(\n          BOOST_MPL_LIMIT_UNROLLING\n        , AUX778076_ITER_FOLD_FORWARD_STEP\n        , unused\n        )\n\n    typedef AUX778076_FOLD_IMPL_NAME<\n          ( (N - BOOST_MPL_LIMIT_UNROLLING) < 0 ? 0 : N - BOOST_MPL_LIMIT_UNROLLING )\n        , BOOST_PP_CAT(iter,BOOST_MPL_LIMIT_UNROLLING)\n        , Last\n        , BOOST_PP_CAT(fwd_state,BOOST_MPL_LIMIT_UNROLLING)\n        , BackwardOp\n        , ForwardOp\n        > nested_chunk;\n        \n    AUX778076_FIRST_BACKWARD_STATE_TYPEDEF(BOOST_MPL_LIMIT_UNROLLING)\n\n    BOOST_MPL_PP_REPEAT(\n          BOOST_MPL_LIMIT_UNROLLING\n        , AUX778076_ITER_FOLD_BACKWARD_STEP\n        , BOOST_MPL_LIMIT_UNROLLING\n        )\n\n    typedef bkwd_state0 state;\n    typedef typename nested_chunk::iterator iterator;\n};\n\n// fallback implementation for sequences of unknown size\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME<-1,First,Last,State,BackwardOp,ForwardOp>\n{\n    typedef AUX778076_FOLD_IMPL_NAME<\n          -1\n        , typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State,AUX778076_FOLD_IMPL_OP(First)>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , AUX778076_FOLD_IMPL_OP(First)\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME<-1,Last,Last,State,BackwardOp,ForwardOp>\n{\n    typedef State state;\n    typedef Last iterator;\n};\n\n#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, N) >\nstruct AUX778076_FOLD_CHUNK_NAME;\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_UNROLLING, <boost/mpl/aux_/reverse_fold_impl_body.hpp>))\n#   include BOOST_PP_ITERATE()\n\n// implementation for N that exceeds BOOST_MPL_LIMIT_UNROLLING\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, N) > \nstruct AUX778076_FOLD_CHUNK_NAME\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        > \n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n\n        BOOST_MPL_PP_REPEAT(\n              BOOST_MPL_LIMIT_UNROLLING\n            , AUX778076_ITER_FOLD_FORWARD_STEP\n            , unused\n            )\n\n        typedef AUX778076_FOLD_IMPL_NAME<\n              ( (N - BOOST_MPL_LIMIT_UNROLLING) < 0 ? 0 : N - BOOST_MPL_LIMIT_UNROLLING )\n            , BOOST_PP_CAT(iter,BOOST_MPL_LIMIT_UNROLLING)\n            , Last\n            , BOOST_PP_CAT(fwd_state,BOOST_MPL_LIMIT_UNROLLING)\n            , BackwardOp\n            , ForwardOp\n            > nested_chunk;\n            \n        AUX778076_FIRST_BACKWARD_STATE_TYPEDEF(BOOST_MPL_LIMIT_UNROLLING)\n\n        BOOST_MPL_PP_REPEAT(\n              BOOST_MPL_LIMIT_UNROLLING\n            , AUX778076_ITER_FOLD_BACKWARD_STEP\n            , BOOST_MPL_LIMIT_UNROLLING\n            )\n\n        typedef bkwd_state0 state;\n        typedef typename nested_chunk::iterator iterator;\n    };\n};\n\n// fallback implementation for sequences of unknown size\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step);\n\ntemplate<\n      typename Last\n    , typename State\n    >\nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_null_step)\n{\n    typedef Last iterator;\n    typedef State state;\n};\n\ntemplate<> \nstruct AUX778076_FOLD_CHUNK_NAME<-1>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        > \n    struct result_\n    {\n        typedef typename if_<\n              typename is_same<First,Last>::type\n            , BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_null_step)<Last,State>\n            , BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step)<First,Last,State,BackwardOp,ForwardOp>\n            >::type res_;\n\n        typedef typename res_::state state;\n        typedef typename res_::iterator iterator;\n    };\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n    /// ETI workaround\n    template<> struct result_<int,int,int,int,int>\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n#endif\n};\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct BOOST_PP_CAT(AUX778076_FOLD_IMPL_NAME_PREFIX,_step)\n{\n    typedef AUX778076_FOLD_CHUNK_NAME<-1>::template result_<\n          typename mpl::next<First>::type\n        , Last\n        , typename apply2<ForwardOp,State,AUX778076_FOLD_IMPL_OP(First)>::type\n        , BackwardOp\n        , ForwardOp\n        > nested_step;\n\n    typedef typename apply2<\n          BackwardOp\n        , typename nested_step::state\n        , AUX778076_FOLD_IMPL_OP(First)\n        >::type state;\n\n    typedef typename nested_step::iterator iterator;\n};\n\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(long, N)\n    , typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    > \nstruct AUX778076_FOLD_IMPL_NAME\n    : AUX778076_FOLD_CHUNK_NAME<N>\n        ::template result_<First,Last,State,BackwardOp,ForwardOp>\n{\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n}}}\n\n#   undef AUX778076_FIRST_BACKWARD_STATE_TYPEDEF\n#   undef AUX778076_ITER_FOLD_BACKWARD_STEP\n#   undef AUX778076_ITER_FOLD_BACKWARD_STEP_FUNC\n#   undef AUX778076_ITER_FOLD_FORWARD_STEP\n\n#undef AUX778076_FOLD_IMPL_OP\n#undef AUX778076_FOLD_IMPL_NAME_PREFIX\n\n///// iteration\n\n#else\n\n#   define n_ BOOST_PP_FRAME_ITERATION(1)\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n    && !defined(BOOST_MPL_CFG_NO_NONTYPE_TEMPLATE_PARTIAL_SPEC)\n\ntemplate<\n      typename First\n    , typename Last\n    , typename State\n    , typename BackwardOp\n    , typename ForwardOp\n    >\nstruct AUX778076_FOLD_IMPL_NAME<n_,First,Last,State,BackwardOp,ForwardOp>\n{\n    typedef First iter0;\n    typedef State fwd_state0;\n\n    BOOST_MPL_PP_REPEAT(\n          n_\n        , AUX778076_ITER_FOLD_FORWARD_STEP\n        , unused\n        )\n\n    typedef BOOST_PP_CAT(fwd_state,n_) BOOST_PP_CAT(bkwd_state,n_);\n\n    BOOST_MPL_PP_REPEAT(\n          n_\n        , AUX778076_ITER_FOLD_BACKWARD_STEP\n        , n_\n        )\n\n    typedef bkwd_state0 state;\n    typedef BOOST_PP_CAT(iter,n_) iterator;\n};\n\n#else\n\ntemplate<> struct AUX778076_FOLD_CHUNK_NAME<n_>\n{\n    template<\n          typename First\n        , typename Last\n        , typename State\n        , typename BackwardOp\n        , typename ForwardOp\n        >\n    struct result_\n    {\n        typedef First iter0;\n        typedef State fwd_state0;\n\n        BOOST_MPL_PP_REPEAT(\n              n_\n            , AUX778076_ITER_FOLD_FORWARD_STEP\n            , unused\n            )\n\n        typedef BOOST_PP_CAT(fwd_state,n_) BOOST_PP_CAT(bkwd_state,n_);\n\n        BOOST_MPL_PP_REPEAT(\n              n_\n            , AUX778076_ITER_FOLD_BACKWARD_STEP\n            , n_\n            )\n\n        typedef bkwd_state0 state;\n        typedef BOOST_PP_CAT(iter,n_) iterator;\n    };\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n    /// ETI workaround\n    template<> struct result_<int,int,int,int,int>\n    {\n        typedef int state;\n        typedef int iterator;\n    };\n#endif\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#   undef n_\n\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/sequence_wrapper.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/config/static_constant.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/arithmetic/sub.hpp>\n#   include <boost/preprocessor/tuple/elem.hpp>\n#   include <boost/preprocessor/enum_params_with_a_default.hpp>\n#   include <boost/preprocessor/enum_params.hpp>\n#   include <boost/preprocessor/enum.hpp>\n#   include <boost/preprocessor/repeat.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n\n#if defined(BOOST_MPL_PREPROCESSING_MODE)\n#   undef LONG_MAX\n#endif\n\nnamespace boost { namespace mpl {\n\n#if !defined(AUX778076_SEQUENCE_BASE_NAME)\n#   define AUX778076_SEQUENCE_BASE_NAME AUX778076_SEQUENCE_NAME\n#endif\n\n#if !defined(AUX778076_SEQUENCE_INTEGRAL_WRAPPER)\n\n#   define AUX778076_SEQUENCE_PARAM_NAME T\n#   define AUX778076_SEQUENCE_TEMPLATE_PARAM typename T\n#   define AUX778076_SEQUENCE_DEFAULT na\n\n#   define AUX778076_SEQUENCE_NAME_N(n) \\\n    BOOST_PP_CAT(AUX778076_SEQUENCE_BASE_NAME,n) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_PARAMS() \\\n    BOOST_PP_ENUM_PARAMS( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , AUX778076_SEQUENCE_TEMPLATE_PARAM \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_ARGS() \\\n    BOOST_PP_ENUM_PARAMS( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , T \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_DEFAULT_PARAMS() \\\n     BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , AUX778076_SEQUENCE_TEMPLATE_PARAM \\\n        , AUX778076_SEQUENCE_DEFAULT \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_N_PARAMS(n) \\\n    BOOST_PP_ENUM_PARAMS(n, AUX778076_SEQUENCE_TEMPLATE_PARAM) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_N_ARGS(n) \\\n    BOOST_PP_ENUM_PARAMS(n, T) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_N_PARTIAL_SPEC_ARGS(n) \\\n    BOOST_PP_ENUM_PARAMS(n, T) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_PP_ENUM( \\\n          BOOST_PP_SUB_D(1,AUX778076_SEQUENCE_LIMIT,n) \\\n        , BOOST_PP_TUPLE_ELEM_3_2 \\\n        , AUX778076_SEQUENCE_DEFAULT \\\n        ) \\\n    /**/\n\n#else // AUX778076_SEQUENCE_INTEGRAL_WRAPPER\n\n#   define AUX778076_SEQUENCE_PARAM_NAME C\n#   define AUX778076_SEQUENCE_TEMPLATE_PARAM BOOST_MPL_AUX_NTTP_DECL(long, C)\n#   define AUX778076_SEQUENCE_DEFAULT LONG_MAX\n\n#   define AUX778076_SEQUENCE_PARAMS() \\\n    typename T, BOOST_PP_ENUM_PARAMS( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , AUX778076_SEQUENCE_TEMPLATE_PARAM \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_ARGS() \\\n    T, BOOST_PP_ENUM_PARAMS( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , C \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_DEFAULT_PARAMS() \\\n    typename T, \\\n    BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT( \\\n          AUX778076_SEQUENCE_LIMIT \\\n        , AUX778076_SEQUENCE_TEMPLATE_PARAM \\\n        , AUX778076_SEQUENCE_DEFAULT \\\n        ) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_N_PARAMS(n) \\\n    typename T BOOST_PP_COMMA_IF(n) \\\n    BOOST_PP_ENUM_PARAMS(n, AUX778076_SEQUENCE_TEMPLATE_PARAM) \\\n    /**/\n\n#   if !defined(AUX778076_SEQUENCE_CONVERT_CN_TO)\n#       define AUX778076_SEQUENCE_CONVERT_CN_TO(z,n,TARGET) BOOST_PP_CAT(C,n)\n#   endif\n\n#   define AUX778076_SEQUENCE_N_ARGS(n) \\\n    T BOOST_PP_COMMA_IF(n) \\\n    BOOST_PP_ENUM(n,AUX778076_SEQUENCE_CONVERT_CN_TO,T) \\\n    /**/\n\n#   define AUX778076_SEQUENCE_N_PARTIAL_SPEC_ARGS(n) \\\n    T, BOOST_PP_ENUM_PARAMS(n, C) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_PP_ENUM( \\\n          BOOST_PP_SUB_D(1,AUX778076_SEQUENCE_LIMIT,n) \\\n        , BOOST_PP_TUPLE_ELEM_3_2 \\\n        , AUX778076_SEQUENCE_DEFAULT \\\n        ) \\\n    /**/\n\n#endif // AUX778076_SEQUENCE_INTEGRAL_WRAPPER\n\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n// forward declaration\ntemplate<\n      AUX778076_SEQUENCE_DEFAULT_PARAMS()\n    >\nstruct AUX778076_SEQUENCE_NAME;\n#else\nnamespace aux {\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > \nstruct BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_chooser);\n}\n#endif\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, AUX778076_SEQUENCE_LIMIT, <boost/mpl/aux_/sequence_wrapper.hpp>))\n#include BOOST_PP_ITERATE()\n\n// real C++ version is already taken care of\n#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\nnamespace aux {\n// ???_count_args\n#define AUX778076_COUNT_ARGS_PREFIX         AUX778076_SEQUENCE_NAME\n#define AUX778076_COUNT_ARGS_DEFAULT        AUX778076_SEQUENCE_DEFAULT\n#define AUX778076_COUNT_ARGS_PARAM_NAME     AUX778076_SEQUENCE_PARAM_NAME\n#define AUX778076_COUNT_ARGS_TEMPLATE_PARAM AUX778076_SEQUENCE_TEMPLATE_PARAM\n#define AUX778076_COUNT_ARGS_ARITY          AUX778076_SEQUENCE_LIMIT\n#define AUX778076_COUNT_ARGS_USE_STANDARD_PP_PRIMITIVES\n#include <boost/mpl/aux_/count_args.hpp>\n\ntemplate<\n      AUX778076_SEQUENCE_PARAMS()\n    >\nstruct BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_impl)\n{\n    typedef aux::BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_count_args)<\n          BOOST_PP_ENUM_PARAMS(AUX778076_SEQUENCE_LIMIT, AUX778076_SEQUENCE_PARAM_NAME)\n        > arg_num_;\n    \n    typedef typename aux::BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_chooser)< arg_num_::value >\n        ::template result_< AUX778076_SEQUENCE_ARGS() >::type type;\n};\n\n} // namespace aux\n\ntemplate<\n      AUX778076_SEQUENCE_DEFAULT_PARAMS()\n    >\nstruct AUX778076_SEQUENCE_NAME\n    : aux::BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_impl)<\n          AUX778076_SEQUENCE_ARGS()\n        >::type\n{\n    typedef typename aux::BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_impl)<\n          AUX778076_SEQUENCE_ARGS()\n        >::type type;\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#   undef AUX778076_SEQUENCE_N_PARTIAL_SPEC_ARGS\n#   undef AUX778076_SEQUENCE_N_ARGS\n#   undef AUX778076_SEQUENCE_CONVERT_CN_TO\n#   undef AUX778076_SEQUENCE_N_PARAMS\n#   undef AUX778076_SEQUENCE_DEFAULT_PARAMS\n#   undef AUX778076_SEQUENCE_ARGS\n#   undef AUX778076_SEQUENCE_PARAMS\n#   undef AUX778076_SEQUENCE_NAME_N\n#   undef AUX778076_SEQUENCE_DEFAULT\n#   undef AUX778076_SEQUENCE_TEMPLATE_PARAM\n#   undef AUX778076_SEQUENCE_PARAM_NAME\n#   undef AUX778076_SEQUENCE_LIMIT\n#   undef AUX778076_SEQUENCE_BASE_NAME\n#   undef AUX778076_SEQUENCE_NAME\n#   undef AUX778076_SEQUENCE_INTEGRAL_WRAPPER\n\n}}\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#   if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\n#if i_ == AUX778076_SEQUENCE_LIMIT\n\n/// primary template (not a specialization!)\ntemplate<\n      AUX778076_SEQUENCE_N_PARAMS(i_)\n    >\nstruct AUX778076_SEQUENCE_NAME\n    : AUX778076_SEQUENCE_NAME_N(i_)< AUX778076_SEQUENCE_N_ARGS(i_) >\n{\n    typedef typename AUX778076_SEQUENCE_NAME_N(i_)< AUX778076_SEQUENCE_N_ARGS(i_) >::type type;\n};\n\n#else\n\ntemplate<\n      AUX778076_SEQUENCE_N_PARAMS(i_)\n    >\nstruct AUX778076_SEQUENCE_NAME< AUX778076_SEQUENCE_N_PARTIAL_SPEC_ARGS(i_) >\n    : AUX778076_SEQUENCE_NAME_N(i_)< AUX778076_SEQUENCE_N_ARGS(i_) >\n{\n#if i_ > 0 || defined(AUX778076_SEQUENCE_INTEGRAL_WRAPPER)\n    typedef typename AUX778076_SEQUENCE_NAME_N(i_)< AUX778076_SEQUENCE_N_ARGS(i_) >::type type;\n#else\n    typedef AUX778076_SEQUENCE_NAME_N(i_)< AUX778076_SEQUENCE_N_ARGS(i_) >::type type;\n#endif\n};\n\n#endif // i_ == AUX778076_SEQUENCE_LIMIT\n\n#   else\n\nnamespace aux {\n\ntemplate<>\nstruct BOOST_PP_CAT(AUX778076_SEQUENCE_NAME,_chooser)<i_>\n{\n    template<\n          AUX778076_SEQUENCE_PARAMS()\n        >\n    struct result_\n    {\n#if i_ > 0 || defined(AUX778076_SEQUENCE_INTEGRAL_WRAPPER)\n        typedef typename AUX778076_SEQUENCE_NAME_N(i_)<\n              AUX778076_SEQUENCE_N_ARGS(i_)\n            >::type type;\n#else\n        typedef AUX778076_SEQUENCE_NAME_N(i_)<\n              AUX778076_SEQUENCE_N_ARGS(i_)\n            >::type type;\n#endif\n    };\n};\n\n} // namespace aux\n\n#   endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/static_cast.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_STATIC_CAST_HPP_INCLUDED\n#define BOOST_MPL_AUX_STATIC_CAST_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x561)) \\\n || BOOST_WORKAROUND(__GNUC__, < 3) \\\n || BOOST_WORKAROUND(__MWERKS__, <= 0x3001)\n#   define BOOST_MPL_AUX_STATIC_CAST(T, expr) (T)(expr)\n#else\n#   define BOOST_MPL_AUX_STATIC_CAST(T, expr) static_cast<T>(expr)\n#endif\n\n#endif // BOOST_MPL_AUX_STATIC_CAST_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/template_arity.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_AUX_TEMPLATE_ARITY_HPP_INCLUDED\n#define BOOST_MPL_AUX_TEMPLATE_ARITY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/ttp.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/aux_/template_arity_fwd.hpp>\n#   include <boost/mpl/int.hpp>\n#   if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n#   if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\n#       include <boost/mpl/aux_/type_wrapper.hpp>\n#   endif\n#   else\n#       include <boost/mpl/aux_/has_rebind.hpp>\n#   endif\n#endif\n\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER template_arity.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n#   if defined(BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING)\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/range.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/seq/fold_left.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#   define AUX778076_ARITY BOOST_PP_INC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY)\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > struct arity_tag\n{\n    typedef char (&type)[N + 1];\n};\n\n#   define AUX778076_MAX_ARITY_OP(unused, state, i_) \\\n    ( BOOST_PP_CAT(C,i_) > 0 ? BOOST_PP_CAT(C,i_) : state ) \\\n/**/\n\ntemplate<\n      BOOST_MPL_PP_PARAMS(AUX778076_ARITY, BOOST_MPL_AUX_NTTP_DECL(int, C))\n    >\nstruct max_arity\n{\n    BOOST_STATIC_CONSTANT(int, value = \n          BOOST_PP_SEQ_FOLD_LEFT(\n              AUX778076_MAX_ARITY_OP\n            , -1\n            , BOOST_MPL_PP_RANGE(1, AUX778076_ARITY)\n            )\n        );\n};\n\n#   undef AUX778076_MAX_ARITY_OP\n\narity_tag<0>::type arity_helper(...);\n\n#   define BOOST_PP_ITERATION_LIMITS (1, AUX778076_ARITY)\n#   define BOOST_PP_FILENAME_1 <boost/mpl/aux_/template_arity.hpp>\n#   include BOOST_PP_ITERATE()\n\ntemplate< typename F, BOOST_MPL_AUX_NTTP_DECL(int, N) >\nstruct template_arity_impl\n{\n    BOOST_STATIC_CONSTANT(int, value = \n          sizeof(::boost::mpl::aux::arity_helper(type_wrapper<F>(),arity_tag<N>())) - 1\n        );\n};\n\n#   define AUX778076_TEMPLATE_ARITY_IMPL_INVOCATION(unused, i_, F) \\\n    BOOST_PP_COMMA_IF(i_) template_arity_impl<F,BOOST_PP_INC(i_)>::value \\\n/**/\n\ntemplate< typename F >\nstruct template_arity\n{\n    BOOST_STATIC_CONSTANT(int, value = (\n          max_arity< BOOST_MPL_PP_REPEAT(\n              AUX778076_ARITY\n            , AUX778076_TEMPLATE_ARITY_IMPL_INVOCATION\n            , F\n            ) >::value\n        ));\n        \n    typedef mpl::int_<value> type;\n};\n\n#   undef AUX778076_TEMPLATE_ARITY_IMPL_INVOCATION\n\n#   undef AUX778076_ARITY\n\n}}}\n\n#   endif // BOOST_MPL_CFG_EXTENDED_TEMPLATE_PARAMETERS_MATCHING\n#   else // BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n#   include <boost/mpl/aux_/config/eti.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< bool >\nstruct template_arity_impl\n{\n    template< typename F > struct result_\n        : mpl::int_<-1>\n    {\n    };\n};\n\ntemplate<>\nstruct template_arity_impl<true>\n{\n    template< typename F > struct result_\n        : F::arity\n    {\n    };\n};\n\ntemplate< typename F >\nstruct template_arity\n    : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value >\n        ::template result_<F>\n{\n};\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\ntemplate<>\nstruct template_arity<int>\n    : mpl::int_<-1>\n{\n};\n#endif\n\n}}}\n\n#   endif // BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_AUX_TEMPLATE_ARITY_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\ntemplate<\n      template< BOOST_MPL_PP_PARAMS(i_, typename P) > class F\n    , BOOST_MPL_PP_PARAMS(i_, typename T)\n    >\ntypename arity_tag<i_>::type\narity_helper(type_wrapper< F<BOOST_MPL_PP_PARAMS(i_, T)> >, arity_tag<i_>);\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/template_arity_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_TEMPLATE_ARITY_FWD_HPP_INCLUDED\n#define BOOST_MPL_AUX_TEMPLATE_ARITY_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename F > struct template_arity;\n\n}}}\n\n#endif // BOOST_MPL_AUX_TEMPLATE_ARITY_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/traits_lambda_spec.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_TRAITS_LAMBDA_SPEC_HPP_INCLUDED\n#define BOOST_MPL_AUX_TRAITS_LAMBDA_SPEC_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/sequence_tag_fwd.hpp>\n#include <boost/mpl/void.hpp>\n#include <boost/mpl/aux_/preprocessor/params.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n\n#   define BOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(i, trait) /**/\n\n#elif !defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\n\n#   define BOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(i, trait) \\\ntemplate<> struct trait<void_> \\\n{ \\\n    template< BOOST_MPL_PP_PARAMS(i, typename T) > struct apply \\\n    { \\\n    }; \\\n}; \\\n/**/\n\n#else\n\n#   define BOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(i, trait) \\\ntemplate<> struct trait<void_> \\\n{ \\\n    template< BOOST_MPL_PP_PARAMS(i, typename T) > struct apply \\\n    { \\\n    }; \\\n}; \\\ntemplate<> struct trait<int> \\\n{ \\\n    template< BOOST_MPL_PP_PARAMS(i, typename T) > struct apply \\\n    { \\\n        typedef int type; \\\n    }; \\\n}; \\\n/**/\n\n#endif // BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n\n#define BOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC(i, trait) \\\n    BOOST_MPL_ALGORITM_TRAITS_LAMBDA_SPEC_IMPL(i, trait) \\\n    template<> struct trait<non_sequence_tag> {}; \\\n/**/\n\n#endif // BOOST_MPL_AUX_TRAITS_LAMBDA_SPEC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/type_wrapper.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_TYPE_WRAPPER_HPP_INCLUDED\n#define BOOST_MPL_AUX_TYPE_WRAPPER_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n// Copyright Peter Dimov 2000-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/ctps.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename T > struct type_wrapper\n{\n    typedef T type;\n};\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n// agurt 08/may/03: a complicated way to extract the wrapped type; need it \n// mostly for the sake of GCC (3.2.x), which ICEs if you try to extract the \n// nested 'type' from 'type_wrapper<T>' when the latter was the result of a\n// 'typeof' expression\ntemplate< typename T > struct wrapped_type;\n\ntemplate< typename T > struct wrapped_type< type_wrapper<T> >\n{\n    typedef T type;\n};\n#else\ntemplate< typename W > struct wrapped_type\n{\n    typedef typename W::type type;\n};\n#endif\n\n}}}\n\n#endif // BOOST_MPL_AUX_TYPE_WRAPPER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/value_wknd.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_VALUE_WKND_HPP_INCLUDED\n#define BOOST_MPL_AUX_VALUE_WKND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/static_cast.hpp>\n#include <boost/mpl/aux_/config/integral.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if defined(BOOST_MPL_CFG_BCC_INTEGRAL_CONSTANTS) \\\n    || defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n\n#   include <boost/mpl/int.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\ntemplate< typename C_ > struct value_wknd\n    : C_\n{\n};\n\n#if defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\ntemplate<> struct value_wknd<int>\n    : int_<1>\n{\n    using int_<1>::value;\n};\n#endif\n}}}\n\n\n#if !defined(BOOST_MPL_CFG_MSVC_60_ETI_BUG)\n#   define BOOST_MPL_AUX_VALUE_WKND(C) \\\n    ::BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::aux::value_wknd< C > \\\n/**/\n#    define BOOST_MPL_AUX_MSVC_VALUE_WKND(C) BOOST_MPL_AUX_VALUE_WKND(C)\n#else\n#   define BOOST_MPL_AUX_VALUE_WKND(C) C\n#   define BOOST_MPL_AUX_MSVC_VALUE_WKND(C) \\\n    ::boost::mpl::aux::value_wknd< C > \\\n/**/\n#endif\n\n#else // BOOST_MPL_CFG_BCC_INTEGRAL_CONSTANTS\n\n#   define BOOST_MPL_AUX_VALUE_WKND(C) C\n#   define BOOST_MPL_AUX_MSVC_VALUE_WKND(C) C\n\n#endif\n\n#if BOOST_WORKAROUND(__EDG_VERSION__, <= 238)\n#   define BOOST_MPL_AUX_NESTED_VALUE_WKND(T, C) \\\n    BOOST_MPL_AUX_STATIC_CAST(T, C::value) \\\n/**/\n#else\n#   define BOOST_MPL_AUX_NESTED_VALUE_WKND(T, C) \\\n    BOOST_MPL_AUX_VALUE_WKND(C)::value \\\n/**/\n#endif\n\n\nnamespace boost { namespace mpl { namespace aux {\n\ntemplate< typename T > struct value_type_wknd\n{\n    typedef typename T::value_type type;\n};\n\n#if defined(BOOST_MPL_CFG_MSVC_ETI_BUG)\ntemplate<> struct value_type_wknd<int>\n{\n    typedef int type;\n};\n#endif\n\n}}}\n\n#endif // BOOST_MPL_AUX_VALUE_WKND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/aux_/yes_no.hpp",
    "content": "\n#ifndef BOOST_MPL_AUX_YES_NO_HPP_INCLUDED\n#define BOOST_MPL_AUX_YES_NO_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/config/arrays.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n\nnamespace boost { namespace mpl { namespace aux {\n\ntypedef char (&no_tag)[1];\ntypedef char (&yes_tag)[2];\n\ntemplate< bool C_ > struct yes_no_tag\n{\n    typedef no_tag type;\n};\n\ntemplate<> struct yes_no_tag<true>\n{\n    typedef yes_tag type;\n};\n\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, n) > struct weighted_tag\n{\n#if !BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    typedef char (&type)[n];\n#else\n    char buf[n];\n    typedef weighted_tag type;\n#endif\n};\n\n#if defined(BOOST_MPL_CFG_NO_DEPENDENT_ARRAY_TYPES)\ntemplate<> struct weighted_tag<0>\n{\n    typedef char (&type)[1];\n};\n#endif\n\n}}}\n\n#endif // BOOST_MPL_AUX_YES_NO_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/base.hpp",
    "content": "\n#ifndef BOOST_MPL_BASE_HPP_INCLUDED\n#define BOOST_MPL_BASE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct base\n{\n    typedef typename T::base type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,base,(T))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, base)\n\n}}\n\n#endif // BOOST_MPL_BASE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/begin.hpp",
    "content": "\n#ifndef BOOST_MPL_BEGIN_HPP_INCLUDED\n#define BOOST_MPL_BEGIN_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end.hpp>\n\n#endif // BOOST_MPL_BEGIN_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/begin_end.hpp",
    "content": "\n#ifndef BOOST_MPL_BEGIN_END_HPP_INCLUDED\n#define BOOST_MPL_BEGIN_END_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end_fwd.hpp>\n#include <boost/mpl/aux_/begin_end_impl.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\n// agurt, 13/sep/02: switched from inheritance to typedef; MSVC is more\n// happy this way (less ETI-related errors), and it doesn't affect \n// anything else\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct begin\n{\n    typedef typename sequence_tag<Sequence>::type tag_;\n    typedef typename begin_impl< tag_ >\n        ::template apply< Sequence >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,begin,(Sequence))\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct end\n{\n    typedef typename sequence_tag<Sequence>::type tag_;\n    typedef typename end_impl< tag_ >\n        ::template apply< Sequence >::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,end,(Sequence))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, begin)\nBOOST_MPL_AUX_NA_SPEC(1, end)\n\n}}\n\n#endif // BOOST_MPL_BEGIN_END_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/begin_end_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_BEGIN_END_FWD_HPP_INCLUDED\n#define BOOST_MPL_BEGIN_END_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct begin_impl;\ntemplate< typename Tag > struct end_impl;\n\ntemplate< typename Sequence > struct begin;\ntemplate< typename Sequence > struct end;\n\n}}\n\n#endif // BOOST_MPL_BEGIN_END_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/bind.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_BIND_HPP_INCLUDED\n#define BOOST_MPL_BIND_HPP_INCLUDED\n\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/bind_fwd.hpp>\n#   include <boost/mpl/placeholders.hpp>\n#   include <boost/mpl/next.hpp>\n#   include <boost/mpl/protect.hpp>\n#   include <boost/mpl/apply_wrap.hpp>\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/arity_spec.hpp>\n#   include <boost/mpl/aux_/type_wrapper.hpp>\n#   include <boost/mpl/aux_/yes_no.hpp>\n#   if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n#       include <boost/type_traits/is_reference.hpp>\n#   endif \n#endif\n\n#include <boost/mpl/aux_/config/bind.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   if defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\n#       define BOOST_MPL_PREPROCESSED_HEADER basic_bind.hpp\n#   else\n#       define BOOST_MPL_PREPROCESSED_HEADER bind.hpp\n#   endif\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/def_params_tail.hpp>\n#   include <boost/mpl/aux_/preprocessor/partial_spec_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/ext_params.hpp>\n#   include <boost/mpl/aux_/preprocessor/repeat.hpp>\n#   include <boost/mpl/aux_/preprocessor/enum.hpp>\n#   include <boost/mpl/aux_/preprocessor/add.hpp>\n#   include <boost/mpl/aux_/config/dmc_ambiguous_ctps.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/config/ttp.hpp>\n#   include <boost/mpl/aux_/config/dtp.hpp>\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/cat.hpp>\n#   include <boost/preprocessor/inc.hpp>\n\nnamespace boost { namespace mpl {\n\n// local macros, #undef-ined at the end of the header\n#   define AUX778076_APPLY \\\n    BOOST_PP_CAT(apply_wrap,BOOST_MPL_LIMIT_METAFUNCTION_ARITY) \\\n    /**/\n\n#   if defined(BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS)\n#       define AUX778076_DMC_PARAM() , int dummy_\n#   else\n#       define AUX778076_DMC_PARAM()\n#   endif\n\n#   define AUX778076_BIND_PARAMS(param) \\\n    BOOST_MPL_PP_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        ) \\\n    /**/\n\n#   define AUX778076_BIND_DEFAULT_PARAMS(param, value) \\\n    BOOST_MPL_PP_DEFAULT_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        , value \\\n        ) \\\n    /**/\n\n#   define AUX778076_BIND_N_PARAMS(n, param) \\\n    BOOST_PP_COMMA_IF(n) BOOST_MPL_PP_PARAMS(n, param) \\\n    /**/\n\n#   define AUX778076_BIND_N_SPEC_PARAMS(n, param, def) \\\n    BOOST_PP_COMMA_IF(n) \\\n    BOOST_MPL_PP_PARTIAL_SPEC_PARAMS(n, param, def) \\\n    /**/\n\n#if !defined(BOOST_MPL_CFG_NO_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\n#   define AUX778076_BIND_NESTED_DEFAULT_PARAMS(param, value) \\\n    AUX778076_BIND_DEFAULT_PARAMS(param, value) \\\n    /**/\n#else\n#   define AUX778076_BIND_NESTED_DEFAULT_PARAMS(param, value) \\\n    AUX778076_BIND_PARAMS(param) \\\n    /**/\n#endif\n\nnamespace aux {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate<\n      typename T, AUX778076_BIND_PARAMS(typename U)\n    >\nstruct resolve_bind_arg\n{\n    typedef T type;\n};\n\n#   if !defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\n\ntemplate<\n      typename T\n    , typename Arg\n    >\nstruct replace_unnamed_arg\n{\n    typedef Arg next;\n    typedef T type;\n};\n\ntemplate<\n      typename Arg\n    >\nstruct replace_unnamed_arg< arg<-1>,Arg >\n{\n    typedef typename Arg::next next;\n    typedef Arg type;\n};\n\n#   endif // BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT\n\ntemplate<\n      BOOST_MPL_AUX_NTTP_DECL(int, N), AUX778076_BIND_PARAMS(typename U)\n    >\nstruct resolve_bind_arg< arg<N>,AUX778076_BIND_PARAMS(U) >\n{\n    typedef typename AUX778076_APPLY<mpl::arg<N>, AUX778076_BIND_PARAMS(U)>::type type;\n};\n\n#if !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE)\ntemplate<\n      typename F, AUX778076_BIND_PARAMS(typename T), AUX778076_BIND_PARAMS(typename U)\n    >\nstruct resolve_bind_arg< bind<F,AUX778076_BIND_PARAMS(T)>,AUX778076_BIND_PARAMS(U) >\n{\n    typedef bind<F,AUX778076_BIND_PARAMS(T)> f_;\n    typedef typename AUX778076_APPLY<f_, AUX778076_BIND_PARAMS(U)>::type type;\n};\n#endif\n\n#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n// agurt, 15/jan/02: it's not a intended to be used as a function class, and \n// MSVC6.5 has problems with 'apply' name here (the code compiles, but doesn't\n// work), so I went with the 'result_' here, and in all other similar cases\ntemplate< bool >\nstruct resolve_arg_impl\n{\n    template< typename T, AUX778076_BIND_PARAMS(typename U) > struct result_\n    {\n        typedef T type;\n    };\n};\n\ntemplate<> \nstruct resolve_arg_impl<true>\n{\n    template< typename T, AUX778076_BIND_PARAMS(typename U) > struct result_\n    {\n        typedef typename AUX778076_APPLY<\n              T\n            , AUX778076_BIND_PARAMS(U)\n            >::type type;\n    };\n};\n\n// for 'resolve_bind_arg'\ntemplate< typename T > struct is_bind_template;\n\ntemplate< \n      typename T, AUX778076_BIND_PARAMS(typename U)\n    >\nstruct resolve_bind_arg\n    : resolve_arg_impl< is_bind_template<T>::value >\n            ::template result_< T,AUX778076_BIND_PARAMS(U) >\n{\n};\n\n#   if !defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\n\ntemplate< typename T > \nstruct replace_unnamed_arg_impl\n{\n    template< typename Arg > struct result_\n    {\n        typedef Arg next;\n        typedef T type;\n    };\n};\n\ntemplate<> \nstruct replace_unnamed_arg_impl< arg<-1> >\n{\n    template< typename Arg > struct result_\n    {\n        typedef typename next<Arg>::type next;\n        typedef Arg type;\n    };\n};\n\ntemplate< typename T, typename Arg > \nstruct replace_unnamed_arg\n    : replace_unnamed_arg_impl<T>::template result_<Arg>\n{\n};\n\n#   endif // BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT\n\n// agurt, 10/mar/02: the forward declaration has to appear before any of\n// 'is_bind_helper' overloads, otherwise MSVC6.5 issues an ICE on it\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, arity_) > struct bind_chooser;\n\naux::no_tag is_bind_helper(...);\ntemplate< typename T > aux::no_tag is_bind_helper(protect<T>*);\n\n// overload for \"main\" form\n// agurt, 15/mar/02: MSVC 6.5 fails to properly resolve the overload \n// in case if we use 'aux::type_wrapper< bind<...> >' here, and all \n// 'bind' instantiations form a complete type anyway\n#if !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE)\ntemplate<\n      typename F, AUX778076_BIND_PARAMS(typename T)\n    >\naux::yes_tag is_bind_helper(bind<F,AUX778076_BIND_PARAMS(T)>*);\n#endif\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) >\naux::yes_tag is_bind_helper(arg<N>*);\n\ntemplate< bool is_ref_ = true >\nstruct is_bind_template_impl\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value = false);\n    };\n};\n\ntemplate<>\nstruct is_bind_template_impl<false>\n{\n    template< typename T > struct result_\n    {\n        BOOST_STATIC_CONSTANT(bool, value = \n              sizeof(aux::is_bind_helper(static_cast<T*>(0))) \n                == sizeof(aux::yes_tag)\n            );\n    };\n};\n\ntemplate< typename T > struct is_bind_template\n    : is_bind_template_impl< ::boost::detail::is_reference_impl<T>::value >\n        ::template result_<T>\n{\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n} // namespace aux\n\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/bind.hpp>))\n#include BOOST_PP_ITERATE()\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n    && !defined(BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS)\n/// if_/eval_if specializations\n#   define AUX778076_SPEC_NAME if_\n#   define BOOST_PP_ITERATION_PARAMS_1 (3,(3, 3, <boost/mpl/bind.hpp>))\n#   include BOOST_PP_ITERATE()\n\n#if !defined(BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS)\n#   define AUX778076_SPEC_NAME eval_if\n#   define BOOST_PP_ITERATION_PARAMS_1 (3,(3, 3, <boost/mpl/bind.hpp>))\n#   include BOOST_PP_ITERATE()\n#endif\n#endif\n\n// real C++ version is already taken care of\n#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n    && !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE)\n\nnamespace aux {\n// apply_count_args\n#define AUX778076_COUNT_ARGS_PREFIX bind\n#define AUX778076_COUNT_ARGS_DEFAULT na\n#define AUX778076_COUNT_ARGS_ARITY BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n#include <boost/mpl/aux_/count_args.hpp>\n}\n\n// bind\ntemplate<\n      typename F, AUX778076_BIND_PARAMS(typename T) AUX778076_DMC_PARAM()\n    >\nstruct bind\n    : aux::bind_chooser<\n          aux::bind_count_args<AUX778076_BIND_PARAMS(T)>::value\n        >::template result_< F,AUX778076_BIND_PARAMS(T) >::type\n{\n};\n\nBOOST_MPL_AUX_ARITY_SPEC(\n      BOOST_PP_INC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY)\n    , bind\n    )\n\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(\n      BOOST_PP_INC(BOOST_MPL_LIMIT_METAFUNCTION_ARITY)\n    , bind\n    )\n\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n#   undef AUX778076_BIND_NESTED_DEFAULT_PARAMS\n#   undef AUX778076_BIND_N_SPEC_PARAMS\n#   undef AUX778076_BIND_N_PARAMS\n#   undef AUX778076_BIND_DEFAULT_PARAMS\n#   undef AUX778076_BIND_PARAMS\n#   undef AUX778076_DMC_PARAM\n#   undef AUX778076_APPLY\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_BIND_HPP_INCLUDED\n\n///// iteration, depth == 1\n\n// For gcc 4.4 compatability, we must include the\n// BOOST_PP_ITERATION_DEPTH test inside an #else clause.\n#else // BOOST_PP_IS_ITERATING\n#if BOOST_PP_ITERATION_DEPTH() == 1\n\n#   define i_ BOOST_PP_FRAME_ITERATION(1)\n\n#if defined(AUX778076_SPEC_NAME)\n\n// lazy metafunction specialization\ntemplate< template< BOOST_MPL_PP_PARAMS(i_, typename T) > class F, typename Tag >\nstruct BOOST_PP_CAT(quote,i_);\n\ntemplate< BOOST_MPL_PP_PARAMS(i_, typename T) > struct AUX778076_SPEC_NAME;\n\ntemplate<\n      typename Tag AUX778076_BIND_N_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(bind,i_)< \n      BOOST_PP_CAT(quote,i_)<AUX778076_SPEC_NAME,Tag>\n    AUX778076_BIND_N_PARAMS(i_,T)\n    >\n{\n    template<\n          AUX778076_BIND_NESTED_DEFAULT_PARAMS(typename U, na)\n        >\n    struct apply\n    {\n     private:\n        typedef mpl::arg<1> n1;\n#       define BOOST_PP_ITERATION_PARAMS_2 (3,(1, i_, <boost/mpl/bind.hpp>))\n#       include BOOST_PP_ITERATE()\n\n        typedef typename AUX778076_SPEC_NAME<\n              typename t1::type\n            , BOOST_MPL_PP_EXT_PARAMS(2, BOOST_PP_INC(i_), t)\n            >::type f_;\n\n     public:\n        typedef typename f_::type type;\n    };\n};\n\n#undef AUX778076_SPEC_NAME\n\n#else // AUX778076_SPEC_NAME\n\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T) AUX778076_DMC_PARAM()\n    >\nstruct BOOST_PP_CAT(bind,i_)\n{\n    template<\n          AUX778076_BIND_NESTED_DEFAULT_PARAMS(typename U, na)\n        >\n    struct apply\n    {\n     private:\n#   if !defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\n\n        typedef aux::replace_unnamed_arg< F,mpl::arg<1> > r0;\n        typedef typename r0::type a0;\n        typedef typename r0::next n1;\n        typedef typename aux::resolve_bind_arg<a0,AUX778076_BIND_PARAMS(U)>::type f_;\n        ///\n#   else\n        typedef typename aux::resolve_bind_arg<F,AUX778076_BIND_PARAMS(U)>::type f_;\n\n#   endif // BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT\n\n#   if i_ > 0\n#       define BOOST_PP_ITERATION_PARAMS_2 (3,(1, i_, <boost/mpl/bind.hpp>))\n#       include BOOST_PP_ITERATE()\n#   endif\n\n     public:\n\n#   define AUX778076_ARG(unused, i_, t) \\\n    BOOST_PP_COMMA_IF(i_) \\\n    typename BOOST_PP_CAT(t,BOOST_PP_INC(i_))::type \\\n/**/\n\n        typedef typename BOOST_PP_CAT(apply_wrap,i_)<\n              f_ \n            BOOST_PP_COMMA_IF(i_) BOOST_MPL_PP_REPEAT(i_, AUX778076_ARG, t)\n            >::type type;\n\n#   undef AUX778076_ARG\n    };\n};\n\nnamespace aux {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T), AUX778076_BIND_PARAMS(typename U)\n    >\nstruct resolve_bind_arg<\n      BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_,T)>,AUX778076_BIND_PARAMS(U)\n    >\n{\n    typedef BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_,T)> f_;\n    typedef typename AUX778076_APPLY<f_, AUX778076_BIND_PARAMS(U)>::type type;\n};\n\n#else\n\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T)\n    >\naux::yes_tag\nis_bind_helper(BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_,T)>*);\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n} // namespace aux\n\nBOOST_MPL_AUX_ARITY_SPEC(BOOST_PP_INC(i_), BOOST_PP_CAT(bind,i_))\nBOOST_MPL_AUX_TEMPLATE_ARITY_SPEC(BOOST_PP_INC(i_), BOOST_PP_CAT(bind,i_))\n\n#   if !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE)\n#   if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n    \n#if i_ == BOOST_MPL_LIMIT_METAFUNCTION_ARITY\n/// primary template (not a specialization!)\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T) AUX778076_DMC_PARAM()\n    >\nstruct bind\n    : BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_,T) >\n{\n};\n#else\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T) AUX778076_DMC_PARAM()\n    >\nstruct bind< F AUX778076_BIND_N_SPEC_PARAMS(i_, T, na) >\n    : BOOST_PP_CAT(bind,i_)<F AUX778076_BIND_N_PARAMS(i_,T) >\n{\n};\n#endif\n\n#   else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\nnamespace aux {\n\ntemplate<>\nstruct bind_chooser<i_>\n{\n    template<\n          typename F, AUX778076_BIND_PARAMS(typename T)\n        >\n    struct result_\n    {\n        typedef BOOST_PP_CAT(bind,i_)< F AUX778076_BIND_N_PARAMS(i_,T) > type;\n    };\n};\n\n} // namespace aux\n\n#   endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n#   endif // BOOST_MPL_CFG_NO_BIND_TEMPLATE\n\n#endif // AUX778076_SPEC_NAME\n\n#   undef i_\n\n///// iteration, depth == 2\n\n#elif BOOST_PP_ITERATION_DEPTH() == 2\n\n#   define j_ BOOST_PP_FRAME_ITERATION(2)\n#   if !defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\n\n        typedef aux::replace_unnamed_arg< BOOST_PP_CAT(T,j_),BOOST_PP_CAT(n,j_) > BOOST_PP_CAT(r,j_);\n        typedef typename BOOST_PP_CAT(r,j_)::type BOOST_PP_CAT(a,j_);\n        typedef typename BOOST_PP_CAT(r,j_)::next BOOST_PP_CAT(n,BOOST_PP_INC(j_));\n        typedef aux::resolve_bind_arg<BOOST_PP_CAT(a,j_), AUX778076_BIND_PARAMS(U)> BOOST_PP_CAT(t,j_);\n        ///\n#   else\n        typedef aux::resolve_bind_arg< BOOST_PP_CAT(T,j_),AUX778076_BIND_PARAMS(U)> BOOST_PP_CAT(t,j_);\n\n#   endif\n#   undef j_\n\n#endif // BOOST_PP_ITERATION_DEPTH()\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/bind_fwd.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_BIND_FWD_HPP_INCLUDED\n#define BOOST_MPL_BIND_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/aux_/na.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/bind.hpp>\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER bind_fwd.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/preprocessor/default_params.hpp>\n#   include <boost/mpl/aux_/config/dmc_ambiguous_ctps.hpp>\n\n#   include <boost/preprocessor/comma_if.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n// local macros, #undef-ined at the end of the header\n\n#   if defined(BOOST_MPL_CFG_DMC_AMBIGUOUS_CTPS)\n#       define AUX778076_DMC_PARAM() , int dummy_ = 0\n#   else\n#       define AUX778076_DMC_PARAM()\n#   endif\n\n#   define AUX778076_BIND_DEFAULT_PARAMS(param, value) \\\n    BOOST_MPL_PP_DEFAULT_PARAMS( \\\n          BOOST_MPL_LIMIT_METAFUNCTION_ARITY \\\n        , param \\\n        , value \\\n        ) \\\n    AUX778076_DMC_PARAM() \\\n    /**/\n\n#   define AUX778076_BIND_N_PARAMS(n, param) \\\n    BOOST_PP_COMMA_IF(n) BOOST_MPL_PP_PARAMS(n, param) \\\n    AUX778076_DMC_PARAM() \\\n    /**/\n\n#if !defined(BOOST_MPL_CFG_NO_BIND_TEMPLATE)\ntemplate<\n      typename F, AUX778076_BIND_DEFAULT_PARAMS(typename T, na)\n    >\nstruct bind;\n#endif\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(0, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/bind_fwd.hpp>))\n#include BOOST_PP_ITERATE()\n\n#   undef AUX778076_BIND_N_PARAMS\n#   undef AUX778076_BIND_DEFAULT_PARAMS\n#   undef AUX778076_DMC_PARAM\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_BIND_FWD_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\ntemplate<\n      typename F AUX778076_BIND_N_PARAMS(i_, typename T)\n    >\nstruct BOOST_PP_CAT(bind,i_);\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/bool.hpp",
    "content": "\n#ifndef BOOST_MPL_BOOL_HPP_INCLUDED\n#define BOOST_MPL_BOOL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/bool_fwd.hpp>\n#include <boost/mpl/integral_c_tag.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< bool C_ > struct bool_\n{\n    BOOST_STATIC_CONSTANT(bool, value = C_);\n    typedef integral_c_tag tag;\n    typedef bool_ type;\n    typedef bool value_type;\n    BOOST_CONSTEXPR operator bool() const { return this->value; }\n};\n\n#if !defined(BOOST_NO_INCLASS_MEMBER_INITIALIZATION)\ntemplate< bool C_ >\nbool const bool_<C_>::value;\n#endif\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\n#endif // BOOST_MPL_BOOL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/bool_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_BOOL_FWD_HPP_INCLUDED\n#define BOOST_MPL_BOOL_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< bool C_ > struct bool_;\n\n// shorcuts\ntypedef bool_<true> true_;\ntypedef bool_<false> false_;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\nBOOST_MPL_AUX_ADL_BARRIER_DECL(bool_)\nBOOST_MPL_AUX_ADL_BARRIER_DECL(true_)\nBOOST_MPL_AUX_ADL_BARRIER_DECL(false_)\n\n#endif // BOOST_MPL_BOOL_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/clear.hpp",
    "content": "\n#ifndef BOOST_MPL_CLEAR_HPP_INCLUDED\n#define BOOST_MPL_CLEAR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/clear_fwd.hpp>\n#include <boost/mpl/aux_/clear_impl.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct clear\n    : clear_impl< typename sequence_tag<Sequence>::type >\n        ::template apply< Sequence >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,clear,(Sequence))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, clear)\n\n}}\n\n#endif // BOOST_MPL_CLEAR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/clear_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_CLEAR_FWD_HPP_INCLUDED\n#define BOOST_MPL_CLEAR_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct clear_impl;\ntemplate< typename Sequence > struct clear;\n\n}}\n\n#endif // BOOST_MPL_CLEAR_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/deref.hpp",
    "content": "\n#ifndef BOOST_MPL_DEREF_HPP_INCLUDED\n#define BOOST_MPL_DEREF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/msvc_type.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Iterator)\n    >\nstruct deref\n{\n#if !defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)\n    typedef typename Iterator::type type;\n#else\n    typedef typename aux::msvc_type<Iterator>::type type;\n#endif\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,deref,(Iterator))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, deref)\n\n}}\n\n#endif // BOOST_MPL_DEREF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/empty_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_EMPTY_FWD_HPP_INCLUDED\n#define BOOST_MPL_EMPTY_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct empty_impl;\ntemplate< typename Sequence > struct empty;\n\n}}\n\n#endif // BOOST_MPL_EMPTY_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/end.hpp",
    "content": "\n#ifndef BOOST_MPL_END_HPP_INCLUDED\n#define BOOST_MPL_END_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end.hpp>\n\n#endif // BOOST_MPL_END_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/erase_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_ERASE_FWD_HPP_INCLUDED\n#define BOOST_MPL_ERASE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct erase_impl;\ntemplate< typename Sequence, typename First, typename Last > struct erase;\n\n}}\n\n#endif // BOOST_MPL_ERASE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/erase_key_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_ERASE_KEY_FWD_HPP_INCLUDED\n#define BOOST_MPL_ERASE_KEY_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct erase_key_impl;\ntemplate< typename Sequence, typename Key > struct erase_key;\n\n}}\n\n#endif // BOOST_MPL_ERASE_KEY_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/eval_if.hpp",
    "content": "\n#ifndef BOOST_MPL_EVAL_IF_HPP_INCLUDED\n#define BOOST_MPL_EVAL_IF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(C)\n    , typename BOOST_MPL_AUX_NA_PARAM(F1)\n    , typename BOOST_MPL_AUX_NA_PARAM(F2)\n    >\nstruct eval_if\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n     || ( BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, >= 0x0300) \\\n        && BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304)) \\\n        )\n{\n    typedef typename if_<C,F1,F2>::type f_;\n    typedef typename f_::type type;\n#else\n    : if_<C,F1,F2>::type\n{\n#endif\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,eval_if,(C,F1,F2))\n};\n\n// (almost) copy & paste in order to save one more\n// recursively nested template instantiation to user\ntemplate<\n      bool C\n    , typename F1\n    , typename F2\n    >\nstruct eval_if_c\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300) \\\n     || ( BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, >= 0x0300) \\\n        && BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304)) \\\n        )\n{\n    typedef typename if_c<C,F1,F2>::type f_;\n    typedef typename f_::type type;\n#else\n    : if_c<C,F1,F2>::type\n{\n#endif\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, eval_if)\n\n}}\n\n#endif // BOOST_MPL_EVAL_IF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/find.hpp",
    "content": "\n#ifndef BOOST_MPL_FIND_HPP_INCLUDED\n#define BOOST_MPL_FIND_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/find_if.hpp>\n#include <boost/mpl/same_as.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct find\n    : find_if< Sequence,same_as<T> >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,find,(Sequence,T))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, find)\n\n}}\n\n#endif // BOOST_MPL_FIND_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/find_if.hpp",
    "content": "\n#ifndef BOOST_MPL_FIND_IF_HPP_INCLUDED\n#define BOOST_MPL_FIND_IF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/find_if_pred.hpp>\n#include <boost/mpl/arg.hpp>\n#include <boost/mpl/iter_fold_if.hpp>\n#include <boost/mpl/aux_/common_name_wknd.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_COMMON_NAME_WKND(find_if)\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(Predicate)\n    >\nstruct find_if\n{\n    typedef typename iter_fold_if<\n          Sequence\n        , void\n        , mpl::arg<1> // ignore\n        , protect< aux::find_if_pred<Predicate> >\n        >::type result_;\n\n    typedef typename second<result_>::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,find_if,(Sequence,Predicate))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2,find_if)\n\n}}\n\n#endif // BOOST_MPL_FIND_IF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/fold.hpp",
    "content": "\n#ifndef BOOST_MPL_FOLD_HPP_INCLUDED\n#define BOOST_MPL_FOLD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end.hpp>\n#include <boost/mpl/O1_size.hpp>\n#include <boost/mpl/aux_/fold_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(State)\n    , typename BOOST_MPL_AUX_NA_PARAM(ForwardOp)\n    >\nstruct fold\n{\n    typedef typename aux::fold_impl<\n          ::boost::mpl::O1_size<Sequence>::value\n        , typename begin<Sequence>::type\n        , typename end<Sequence>::type\n        , State\n        , ForwardOp\n        >::state type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,fold,(Sequence,State,ForwardOp))\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, fold)\n\n}}\n\n#endif // BOOST_MPL_FOLD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/front_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_FRONT_FWD_HPP_INCLUDED\n#define BOOST_MPL_FRONT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct front_impl;\ntemplate< typename Sequence > struct front;\n\n}}\n\n#endif // BOOST_MPL_FRONT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/has_key.hpp",
    "content": "\n#ifndef BOOST_MPL_HAS_KEY_HPP_INCLUDED\n#define BOOST_MPL_HAS_KEY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/has_key_fwd.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/has_key_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(AssociativeSequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(Key)\n    >\nstruct has_key\n    : has_key_impl< typename sequence_tag<AssociativeSequence>::type >\n        ::template apply<AssociativeSequence,Key>\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,has_key,(AssociativeSequence,Key))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, has_key)\n\n}}\n\n#endif // BOOST_MPL_HAS_KEY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/has_key_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_HAS_KEY_FWD_HPP_INCLUDED\n#define BOOST_MPL_HAS_KEY_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct has_key_impl;\ntemplate< typename AssociativeSequence, typename Key > struct has_key;\n\n}}\n\n#endif // BOOST_MPL_HAS_KEY_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/has_xxx.hpp",
    "content": "\n#ifndef BOOST_MPL_HAS_XXX_HPP_INCLUDED\n#define BOOST_MPL_HAS_XXX_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2002-2006\n// Copyright David Abrahams 2002-2003\n// Copyright Daniel Walker 2007\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/type_wrapper.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/config/gcc.hpp>\n#include <boost/mpl/aux_/config/has_xxx.hpp>\n#include <boost/mpl/aux_/config/msvc_typename.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/preprocessor/array/elem.hpp>\n#include <boost/preprocessor/cat.hpp>\n#include <boost/preprocessor/control/if.hpp>\n#include <boost/preprocessor/repetition/enum_params.hpp>\n#include <boost/preprocessor/repetition/enum_trailing_params.hpp>\n\n#if BOOST_WORKAROUND( __BORLANDC__, BOOST_TESTED_AT(0x590) )\n# include <boost/type_traits/is_class.hpp>\n#endif\n\n#if !defined(BOOST_MPL_CFG_NO_HAS_XXX)\n\n#   if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n\n// agurt, 11/sep/02: MSVC-specific version (< 7.1), based on a USENET \n// newsgroup's posting by John Madsen (comp.lang.c++.moderated, \n// 1999-11-12 19:17:06 GMT); the code is _not_ standard-conforming, but \n// it works way more reliably than the SFINAE-based implementation\n\n// Modified dwa 8/Oct/02 to handle reference types.\n\n#   include <boost/mpl/if.hpp>\n#   include <boost/mpl/bool.hpp>\n\nnamespace boost { namespace mpl { namespace aux {\n\nstruct has_xxx_tag;\n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1300)\ntemplate< typename U > struct msvc_incomplete_array\n{\n    typedef char (&type)[sizeof(U) + 1];\n};\n#endif\n\ntemplate< typename T >\nstruct msvc_is_incomplete\n{\n    // MSVC is capable of some kinds of SFINAE.  If U is an incomplete\n    // type, it won't pick the second overload\n    static char tester(...);\n\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n    template< typename U >\n    static typename msvc_incomplete_array<U>::type tester(type_wrapper<U>);\n#else\n    template< typename U >\n    static char (& tester(type_wrapper<U>) )[sizeof(U)+1];\n#endif \n    \n    BOOST_STATIC_CONSTANT(bool, value = \n          sizeof(tester(type_wrapper<T>())) == 1\n        );\n};\n\ntemplate<>\nstruct msvc_is_incomplete<int>\n{\n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n}}}\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF_(trait, name, default_) \\\ntemplate< typename T, typename name = ::boost::mpl::aux::has_xxx_tag > \\\nstruct BOOST_PP_CAT(trait,_impl) : T \\\n{ \\\n    static boost::mpl::aux::no_tag \\\n    test(void(*)(::boost::mpl::aux::has_xxx_tag)); \\\n    \\\n    static boost::mpl::aux::yes_tag test(...); \\\n    \\\n    BOOST_STATIC_CONSTANT(bool, value = \\\n          sizeof(test(static_cast<void(*)(name)>(0))) \\\n            != sizeof(boost::mpl::aux::no_tag) \\\n        ); \\\n    typedef boost::mpl::bool_<value> type; \\\n}; \\\n\\\ntemplate< typename T, typename fallback_ = boost::mpl::bool_<default_> > \\\nstruct trait \\\n    : boost::mpl::if_c< \\\n          boost::mpl::aux::msvc_is_incomplete<T>::value \\\n        , boost::mpl::bool_<false> \\\n        , BOOST_PP_CAT(trait,_impl)<T> \\\n        >::type \\\n{ \\\n}; \\\n\\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, void) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, bool) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, char) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, signed char) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, unsigned char) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, signed short) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, unsigned short) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, signed int) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, unsigned int) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, signed long) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, unsigned long) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, float) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, double) \\\nBOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, long double) \\\n/**/\n\n#   define BOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, T) \\\ntemplate<> struct trait<T> \\\n{ \\\n    BOOST_STATIC_CONSTANT(bool, value = false); \\\n    typedef boost::mpl::bool_<false> type; \\\n}; \\\n/**/\n\n#if !defined(BOOST_NO_INTRINSIC_WCHAR_T)\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, unused) \\\n    BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF_(trait, name, unused) \\\n    BOOST_MPL_AUX_HAS_XXX_TRAIT_SPEC(trait, wchar_t) \\\n/**/\n#else\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, unused) \\\n    BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF_(trait, name, unused) \\\n/**/\n#endif\n\n\n// SFINAE-based implementations below are derived from a USENET newsgroup's \n// posting by Rani Sharoni (comp.lang.c++.moderated, 2002-03-17 07:45:09 PST)\n\n#   elif BOOST_WORKAROUND(BOOST_MSVC, <= 1400) \\\n      || (BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1800)) && defined(__CUDACC__)) \\\n      || BOOST_WORKAROUND(__IBMCPP__, <= 700)\n\n// MSVC 7.1 & MSVC 8.0 & VACPP\n\n// agurt, 15/jun/05: replace overload-based SFINAE implementation with SFINAE\n// applied to partial specialization to fix some apparently random failures \n// (thanks to Daniel Wallin for researching this!)\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, default_) \\\ntemplate< typename T > \\\nstruct BOOST_PP_CAT(trait, _msvc_sfinae_helper) \\\n{ \\\n    typedef void type; \\\n};\\\n\\\ntemplate< typename T, typename U = void > \\\nstruct BOOST_PP_CAT(trait,_impl_) \\\n{ \\\n    BOOST_STATIC_CONSTANT(bool, value = false); \\\n    typedef boost::mpl::bool_<value> type; \\\n}; \\\n\\\ntemplate< typename T > \\\nstruct BOOST_PP_CAT(trait,_impl_)< \\\n      T \\\n    , typename BOOST_PP_CAT(trait, _msvc_sfinae_helper)< typename T::name >::type \\\n    > \\\n{ \\\n    BOOST_STATIC_CONSTANT(bool, value = true); \\\n    typedef boost::mpl::bool_<value> type; \\\n}; \\\n\\\ntemplate< typename T, typename fallback_ = boost::mpl::bool_<default_> > \\\nstruct trait \\\n    : BOOST_PP_CAT(trait,_impl_)<T> \\\n{ \\\n}; \\\n/**/\n\n#   elif BOOST_WORKAROUND( __BORLANDC__, BOOST_TESTED_AT(0x590) )\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_BCB_DEF(trait, trait_tester, name, default_) \\\ntemplate< typename T, bool IS_CLASS > \\\nstruct trait_tester \\\n{ \\\n    BOOST_STATIC_CONSTANT( bool,  value = false ); \\\n}; \\\ntemplate< typename T > \\\nstruct trait_tester< T, true > \\\n{ \\\n    struct trait_tester_impl \\\n    { \\\n        template < class U > \\\n        static int  resolve( boost::mpl::aux::type_wrapper<U> const volatile * \\\n                           , boost::mpl::aux::type_wrapper<typename U::name >* = 0 ); \\\n        static char resolve( ... ); \\\n    }; \\\n    typedef boost::mpl::aux::type_wrapper<T> t_; \\\n    BOOST_STATIC_CONSTANT( bool, value = ( sizeof( trait_tester_impl::resolve( static_cast< t_ * >(0) ) ) == sizeof(int) ) ); \\\n}; \\\ntemplate< typename T, typename fallback_ = boost::mpl::bool_<default_> > \\\nstruct trait           \\\n{                      \\\n    BOOST_STATIC_CONSTANT( bool, value = (trait_tester< T, boost::is_class< T >::value >::value) );     \\\n    typedef boost::mpl::bool_< trait< T, fallback_ >::value > type; \\\n};\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, default_) \\\n    BOOST_MPL_HAS_XXX_TRAIT_NAMED_BCB_DEF( trait \\\n                                         , BOOST_PP_CAT(trait,_tester)      \\\n                                         , name       \\\n                                         , default_ ) \\\n/**/\n\n#   else // other SFINAE-capable compilers\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, default_) \\\ntemplate< typename T, typename fallback_ = boost::mpl::bool_<default_> > \\\nstruct trait \\\n{ \\\n    struct gcc_3_2_wknd \\\n    { \\\n        template< typename U > \\\n        static boost::mpl::aux::yes_tag test( \\\n              boost::mpl::aux::type_wrapper<U> const volatile* \\\n            , boost::mpl::aux::type_wrapper<BOOST_MSVC_TYPENAME U::name>* = 0 \\\n            ); \\\n    \\\n        static boost::mpl::aux::no_tag test(...); \\\n    }; \\\n    \\\n    typedef boost::mpl::aux::type_wrapper<T> t_; \\\n    BOOST_STATIC_CONSTANT(bool, value = \\\n          sizeof(gcc_3_2_wknd::test(static_cast<t_*>(0))) \\\n            == sizeof(boost::mpl::aux::yes_tag) \\\n        ); \\\n    typedef boost::mpl::bool_<value> type; \\\n}; \\\n/**/\n\n#   endif // BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n\n\n#else // BOOST_MPL_CFG_NO_HAS_XXX\n\n// placeholder implementation\n\n#   define BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(trait, name, default_) \\\ntemplate< typename T, typename fallback_ = boost::mpl::bool_<default_> > \\\nstruct trait \\\n{ \\\n    BOOST_STATIC_CONSTANT(bool, value = fallback_::value); \\\n    typedef fallback_ type; \\\n}; \\\n/**/\n\n#endif\n\n#define BOOST_MPL_HAS_XXX_TRAIT_DEF(name) \\\n    BOOST_MPL_HAS_XXX_TRAIT_NAMED_DEF(BOOST_PP_CAT(has_,name), name, false) \\\n/**/\n\n\n#if !defined(BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE)\n\n// Create a boolean Metafunction to detect a nested template\n// member. This implementation is based on a USENET newsgroup's\n// posting by Aleksey Gurtovoy (comp.lang.c++.moderated, 2002-03-19),\n// Rani Sharoni's USENET posting cited above, the non-template has_xxx\n// implementations above, and discussion on the Boost mailing list.\n\n#   if !defined(BOOST_MPL_HAS_XXX_NO_WRAPPED_TYPES)\n#     if BOOST_WORKAROUND(BOOST_MSVC, <= 1400)\n#       define BOOST_MPL_HAS_XXX_NO_WRAPPED_TYPES 1\n#     else\n#       define BOOST_MPL_HAS_XXX_NO_WRAPPED_TYPES 0\n#     endif\n#   endif\n\n#   if !defined(BOOST_MPL_HAS_XXX_NO_EXPLICIT_TEST_FUNCTION)\n#     if (defined(BOOST_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS))\n#       define BOOST_MPL_HAS_XXX_NO_EXPLICIT_TEST_FUNCTION 1\n#     else\n#       define BOOST_MPL_HAS_XXX_NO_EXPLICIT_TEST_FUNCTION 0\n#     endif\n#   endif\n\n#   if !defined(BOOST_MPL_HAS_XXX_NEEDS_TEMPLATE_SFINAE)\n#     if BOOST_WORKAROUND(BOOST_MSVC, <= 1400)\n#       define BOOST_MPL_HAS_XXX_NEEDS_TEMPLATE_SFINAE 1\n#     else\n#       define BOOST_MPL_HAS_XXX_NEEDS_TEMPLATE_SFINAE 0\n#     endif\n#   endif\n\n// NOTE: Many internal implementation macros take a Boost.Preprocessor\n// array argument called args which is of the following form.\n//           ( 4, ( trait, name, max_arity, default_ ) )\n\n#   define BOOST_MPL_HAS_MEMBER_INTROSPECTION_NAME(args) \\\n      BOOST_PP_CAT(BOOST_PP_ARRAY_ELEM(0, args) , _introspect) \\\n    /**/\n\n#   define BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME(args, n) \\\n      BOOST_PP_CAT(BOOST_PP_CAT(BOOST_PP_ARRAY_ELEM(0, args) , _substitute), n) \\\n    /**/\n\n#   define BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args) \\\n      BOOST_PP_CAT(BOOST_PP_ARRAY_ELEM(0, args) , _test) \\\n    /**/\n\n// Thanks to Guillaume Melquiond for pointing out the need for the\n// \"substitute\" template as an argument to the overloaded test\n// functions to get SFINAE to work for member templates with the\n// correct name but different number of arguments.\n#   define BOOST_MPL_HAS_MEMBER_MULTI_SUBSTITUTE(z, n, args) \\\n      template< \\\n          template< BOOST_PP_ENUM_PARAMS(BOOST_PP_INC(n), typename V) > class V \\\n       > \\\n      struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME(args, n) { \\\n      }; \\\n    /**/\n\n#   define BOOST_MPL_HAS_MEMBER_SUBSTITUTE(args, substitute_macro) \\\n      BOOST_PP_REPEAT( \\\n          BOOST_PP_ARRAY_ELEM(2, args) \\\n        , BOOST_MPL_HAS_MEMBER_MULTI_SUBSTITUTE \\\n        , args \\\n      ) \\\n    /**/\n\n#   if !BOOST_MPL_HAS_XXX_NO_EXPLICIT_TEST_FUNCTION\n#     define BOOST_MPL_HAS_MEMBER_REJECT(args, member_macro) \\\n        template< typename V > \\\n        static boost::mpl::aux::no_tag \\\n        BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)(...); \\\n      /**/\n#   else\n#     define BOOST_MPL_HAS_MEMBER_REJECT(args, member_macro) \\\n        static boost::mpl::aux::no_tag \\\n        BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)(...); \\\n      /**/\n#   endif\n\n#   if !BOOST_MPL_HAS_XXX_NO_WRAPPED_TYPES\n#     define BOOST_MPL_HAS_MEMBER_MULTI_ACCEPT(z, n, args) \\\n        template< typename V > \\\n        static boost::mpl::aux::yes_tag \\\n        BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)( \\\n            boost::mpl::aux::type_wrapper< V > const volatile* \\\n          , BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME(args, n) < \\\n                V::template BOOST_PP_ARRAY_ELEM(1, args) \\\n            >* = 0 \\\n        ); \\\n      /**/\n#     define BOOST_MPL_HAS_MEMBER_ACCEPT(args, member_macro) \\\n        BOOST_PP_REPEAT( \\\n            BOOST_PP_ARRAY_ELEM(2, args) \\\n          , BOOST_MPL_HAS_MEMBER_MULTI_ACCEPT \\\n          , args \\\n        ) \\\n      /**/\n#   else\n#     define BOOST_MPL_HAS_MEMBER_ACCEPT(args, member_macro) \\\n        template< typename V > \\\n        static boost::mpl::aux::yes_tag \\\n        BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)( \\\n            V const volatile* \\\n          , member_macro(args, V, T)* = 0 \\\n        ); \\\n      /**/\n#   endif\n\n#   if !BOOST_MPL_HAS_XXX_NO_EXPLICIT_TEST_FUNCTION\n#     define BOOST_MPL_HAS_MEMBER_TEST(args) \\\n          sizeof(BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)< U >(0)) \\\n              == sizeof(boost::mpl::aux::yes_tag) \\\n      /**/\n#   else\n#     if !BOOST_MPL_HAS_XXX_NO_WRAPPED_TYPES\n#       define BOOST_MPL_HAS_MEMBER_TEST(args) \\\n          sizeof( \\\n              BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)( \\\n                  static_cast< boost::mpl::aux::type_wrapper< U >* >(0) \\\n              ) \\\n          ) == sizeof(boost::mpl::aux::yes_tag) \\\n        /**/\n#     else\n#       define BOOST_MPL_HAS_MEMBER_TEST(args) \\\n          sizeof( \\\n              BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)( \\\n                  static_cast< U* >(0) \\\n              ) \\\n          ) == sizeof(boost::mpl::aux::yes_tag) \\\n        /**/\n#     endif\n#   endif\n\n#   define BOOST_MPL_HAS_MEMBER_INTROSPECT( \\\n               args, substitute_macro, member_macro \\\n           ) \\\n      template< typename U > \\\n      struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_NAME(args) { \\\n          BOOST_MPL_HAS_MEMBER_SUBSTITUTE(args, substitute_macro) \\\n          BOOST_MPL_HAS_MEMBER_REJECT(args, member_macro) \\\n          BOOST_MPL_HAS_MEMBER_ACCEPT(args, member_macro) \\\n          BOOST_STATIC_CONSTANT( \\\n              bool, value = BOOST_MPL_HAS_MEMBER_TEST(args) \\\n          ); \\\n          typedef boost::mpl::bool_< value > type; \\\n      }; \\\n    /**/\n\n#   define BOOST_MPL_HAS_MEMBER_IMPLEMENTATION( \\\n               args, introspect_macro, substitute_macro, member_macro \\\n           ) \\\n      template< \\\n          typename T \\\n        , typename fallback_ \\\n              = boost::mpl::bool_< BOOST_PP_ARRAY_ELEM(3, args) > \\\n      > \\\n      class BOOST_PP_ARRAY_ELEM(0, args) { \\\n          introspect_macro(args, substitute_macro, member_macro) \\\n      public: \\\n          static const bool value \\\n              = BOOST_MPL_HAS_MEMBER_INTROSPECTION_NAME(args)< T >::value; \\\n          typedef typename BOOST_MPL_HAS_MEMBER_INTROSPECTION_NAME(args)< \\\n              T \\\n          >::type type; \\\n      }; \\\n    /**/\n\n// BOOST_MPL_HAS_MEMBER_WITH_FUNCTION_SFINAE expands to the full\n// implementation of the function-based metafunction. Compile with -E\n// to see the preprocessor output for this macro.\n#   define BOOST_MPL_HAS_MEMBER_WITH_FUNCTION_SFINAE( \\\n               args, substitute_macro, member_macro \\\n           ) \\\n      BOOST_MPL_HAS_MEMBER_IMPLEMENTATION( \\\n          args \\\n        , BOOST_MPL_HAS_MEMBER_INTROSPECT \\\n        , substitute_macro \\\n        , member_macro \\\n      ) \\\n    /**/\n\n#   if BOOST_MPL_HAS_XXX_NEEDS_TEMPLATE_SFINAE\n\n#     if !defined(BOOST_MPL_HAS_XXX_NEEDS_NAMESPACE_LEVEL_SUBSTITUTE)\n#       if BOOST_WORKAROUND(BOOST_MSVC, <= 1400)\n#         define BOOST_MPL_HAS_XXX_NEEDS_NAMESPACE_LEVEL_SUBSTITUTE 1\n#       endif\n#     endif\n\n#     if !BOOST_MPL_HAS_XXX_NEEDS_NAMESPACE_LEVEL_SUBSTITUTE\n#       define BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME_WITH_TEMPLATE_SFINAE( \\\n                   args, n \\\n               ) \\\n          BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME(args, n) \\\n        /**/\n#     else\n#       define BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME_WITH_TEMPLATE_SFINAE( \\\n                   args, n \\\n               ) \\\n          BOOST_PP_CAT( \\\n              boost_mpl_has_xxx_ \\\n            , BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME(args, n) \\\n          ) \\\n        /**/\n#     endif\n\n#     define BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_TAG_NAME( \\\n                 args \\\n             ) \\\n        BOOST_PP_CAT( \\\n            BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME_WITH_TEMPLATE_SFINAE( \\\n                args, 0 \\\n            ) \\\n          , _tag \\\n        ) \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_MULTI_SUBSTITUTE_WITH_TEMPLATE_SFINAE( \\\n                 z, n, args \\\n             ) \\\n        template< \\\n             template< BOOST_PP_ENUM_PARAMS(BOOST_PP_INC(n), typename U) > class U \\\n        > \\\n        struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME_WITH_TEMPLATE_SFINAE( \\\n                args, n \\\n               ) { \\\n            typedef \\\n                BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_TAG_NAME(args) \\\n                type; \\\n        }; \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_SUBSTITUTE_WITH_TEMPLATE_SFINAE( \\\n                 args, substitute_macro \\\n             ) \\\n        typedef void \\\n            BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_TAG_NAME(args); \\\n        BOOST_PP_REPEAT( \\\n            BOOST_PP_ARRAY_ELEM(2, args) \\\n          , BOOST_MPL_HAS_MEMBER_MULTI_SUBSTITUTE_WITH_TEMPLATE_SFINAE \\\n          , args \\\n        ) \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_REJECT_WITH_TEMPLATE_SFINAE( \\\n                 args, member_macro \\\n             ) \\\n        template< \\\n            typename U \\\n          , typename V \\\n                = BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_TAG_NAME(args) \\\n        > \\\n        struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args) { \\\n            BOOST_STATIC_CONSTANT(bool, value = false); \\\n            typedef boost::mpl::bool_< value > type; \\\n        }; \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_MULTI_ACCEPT_WITH_TEMPLATE_SFINAE( \\\n                 z, n, args \\\n             ) \\\n        template< typename U > \\\n        struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)< \\\n            U \\\n          , typename \\\n                BOOST_MPL_HAS_MEMBER_INTROSPECTION_SUBSTITUTE_NAME_WITH_TEMPLATE_SFINAE( \\\n                    args, n \\\n                )< \\\n                    BOOST_MSVC_TYPENAME U::BOOST_PP_ARRAY_ELEM(1, args)< > \\\n                >::type \\\n        > { \\\n            BOOST_STATIC_CONSTANT(bool, value = true); \\\n            typedef boost::mpl::bool_< value > type; \\\n        }; \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_ACCEPT_WITH_TEMPLATE_SFINAE( \\\n                 args, member_macro \\\n             ) \\\n        BOOST_PP_REPEAT( \\\n            BOOST_PP_ARRAY_ELEM(2, args) \\\n          , BOOST_MPL_HAS_MEMBER_MULTI_ACCEPT_WITH_TEMPLATE_SFINAE \\\n          , args \\\n        ) \\\n      /**/\n\n#     define BOOST_MPL_HAS_MEMBER_INTROSPECT_WITH_TEMPLATE_SFINAE( \\\n                 args, substitute_macro, member_macro \\\n             ) \\\n        BOOST_MPL_HAS_MEMBER_REJECT_WITH_TEMPLATE_SFINAE(args, member_macro) \\\n        BOOST_MPL_HAS_MEMBER_ACCEPT_WITH_TEMPLATE_SFINAE(args, member_macro) \\\n        template< typename U > \\\n        struct BOOST_MPL_HAS_MEMBER_INTROSPECTION_NAME(args) \\\n            : BOOST_MPL_HAS_MEMBER_INTROSPECTION_TEST_NAME(args)< U > { \\\n        }; \\\n      /**/\n \n// BOOST_MPL_HAS_MEMBER_WITH_TEMPLATE_SFINAE expands to the full\n// implementation of the template-based metafunction. Compile with -E\n// to see the preprocessor output for this macro.\n//\n// Note that if BOOST_MPL_HAS_XXX_NEEDS_NAMESPACE_LEVEL_SUBSTITUTE is\n// defined BOOST_MPL_HAS_MEMBER_SUBSTITUTE_WITH_TEMPLATE_SFINAE needs\n// to be expanded at namespace level before\n// BOOST_MPL_HAS_MEMBER_WITH_TEMPLATE_SFINAE can be used.\n#     define BOOST_MPL_HAS_MEMBER_WITH_TEMPLATE_SFINAE( \\\n                 args, substitute_macro, member_macro \\\n             ) \\\n        BOOST_MPL_HAS_MEMBER_SUBSTITUTE_WITH_TEMPLATE_SFINAE( \\\n            args, substitute_macro \\\n        ) \\\n        BOOST_MPL_HAS_MEMBER_IMPLEMENTATION( \\\n            args \\\n          , BOOST_MPL_HAS_MEMBER_INTROSPECT_WITH_TEMPLATE_SFINAE \\\n          , substitute_macro \\\n          , member_macro \\\n        ) \\\n      /**/\n\n#   endif // BOOST_MPL_HAS_XXX_NEEDS_TEMPLATE_SFINAE\n\n// Note: In the current implementation the parameter and access macros\n// are no longer expanded.\n#   if !BOOST_WORKAROUND(BOOST_MSVC, <= 1400)\n#     define BOOST_MPL_HAS_XXX_TEMPLATE_NAMED_DEF(trait, name, default_) \\\n        BOOST_MPL_HAS_MEMBER_WITH_FUNCTION_SFINAE( \\\n            ( 4, ( trait, name, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, default_ ) ) \\\n          , BOOST_MPL_HAS_MEMBER_TEMPLATE_SUBSTITUTE_PARAMETER \\\n          , BOOST_MPL_HAS_MEMBER_TEMPLATE_ACCESS \\\n        ) \\\n      /**/\n#   else\n#     define BOOST_MPL_HAS_XXX_TEMPLATE_NAMED_DEF(trait, name, default_) \\\n        BOOST_MPL_HAS_MEMBER_WITH_TEMPLATE_SFINAE( \\\n            ( 4, ( trait, name, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, default_ ) ) \\\n          , BOOST_MPL_HAS_MEMBER_TEMPLATE_SUBSTITUTE_PARAMETER \\\n          , BOOST_MPL_HAS_MEMBER_TEMPLATE_ACCESS \\\n        ) \\\n      /**/\n#   endif\n\n#else // BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE\n\n// placeholder implementation\n\n#   define BOOST_MPL_HAS_XXX_TEMPLATE_NAMED_DEF(trait, name, default_) \\\n      template< typename T \\\n              , typename fallback_ = boost::mpl::bool_< default_ > > \\\n      struct trait { \\\n          BOOST_STATIC_CONSTANT(bool, value = fallback_::value); \\\n          typedef fallback_ type; \\\n      }; \\\n    /**/\n\n#endif // BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE\n\n#   define BOOST_MPL_HAS_XXX_TEMPLATE_DEF(name) \\\n      BOOST_MPL_HAS_XXX_TEMPLATE_NAMED_DEF( \\\n          BOOST_PP_CAT(has_, name), name, false \\\n      ) \\\n    /**/\n\n#endif // BOOST_MPL_HAS_XXX_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/identity.hpp",
    "content": "\n#ifndef BOOST_MPL_IDENTITY_HPP_INCLUDED\n#define BOOST_MPL_IDENTITY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct identity\n{\n    typedef T type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1, identity, (T))\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct make_identity\n{\n    typedef identity<T> type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1, make_identity, (T))\n};\n\nBOOST_MPL_AUX_NA_SPEC_NO_ETI(1, identity)\nBOOST_MPL_AUX_NA_SPEC_NO_ETI(1, make_identity)\n\n}}\n\n#endif // BOOST_MPL_IDENTITY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/if.hpp",
    "content": "\n#ifndef BOOST_MPL_IF_HPP_INCLUDED\n#define BOOST_MPL_IF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/value_wknd.hpp>\n#include <boost/mpl/aux_/static_cast.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n#include <boost/mpl/aux_/config/integral.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate<\n      bool C\n    , typename T1\n    , typename T2\n    >\nstruct if_c\n{\n    typedef T1 type;\n};\n\ntemplate<\n      typename T1\n    , typename T2\n    >\nstruct if_c<false,T1,T2>\n{\n    typedef T2 type;\n};\n\n// agurt, 05/sep/04: nondescriptive parameter names for the sake of DigitalMars\n// (and possibly MWCW < 8.0); see http://article.gmane.org/gmane.comp.lib.boost.devel/108959\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    , typename BOOST_MPL_AUX_NA_PARAM(T3)\n    >\nstruct if_\n{\n private:\n    // agurt, 02/jan/03: two-step 'type' definition for the sake of aCC \n    typedef if_c<\n#if defined(BOOST_MPL_CFG_BCC_INTEGRAL_CONSTANTS)\n          BOOST_MPL_AUX_VALUE_WKND(T1)::value\n#else\n          BOOST_MPL_AUX_STATIC_CAST(bool, BOOST_MPL_AUX_VALUE_WKND(T1)::value)\n#endif\n        , T2\n        , T3\n        > almost_type_;\n \n public:\n    typedef typename almost_type_::type type;\n    \n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,if_,(T1,T2,T3))\n};\n\n#else\n\n// no partial class template specialization\n\nnamespace aux {\n\ntemplate< bool C >\nstruct if_impl\n{\n    template< typename T1, typename T2 > struct result_\n    {\n        typedef T1 type;\n    };\n};\n\ntemplate<>\nstruct if_impl<false>\n{\n    template< typename T1, typename T2 > struct result_\n    { \n        typedef T2 type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      bool C_\n    , typename T1\n    , typename T2\n    >\nstruct if_c\n{\n    typedef typename aux::if_impl< C_ >\n        ::template result_<T1,T2>::type type;\n};\n\n// (almost) copy & paste in order to save one more \n// recursively nested template instantiation to user\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(C_)\n    , typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct if_\n{\n    enum { msvc_wknd_ = BOOST_MPL_AUX_MSVC_VALUE_WKND(C_)::value };\n\n    typedef typename aux::if_impl< BOOST_MPL_AUX_STATIC_CAST(bool, msvc_wknd_) >\n        ::template result_<T1,T2>::type type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,if_,(C_,T1,T2))\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\nBOOST_MPL_AUX_NA_SPEC(3, if_)\n\n}}\n\n#endif // BOOST_MPL_IF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/insert.hpp",
    "content": "\n#ifndef BOOST_MPL_INSERT_HPP_INCLUDED\n#define BOOST_MPL_INSERT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/insert_fwd.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/insert_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(Pos_or_T)\n    , typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct insert\n    : insert_impl< typename sequence_tag<Sequence>::type >\n        ::template apply< Sequence,Pos_or_T,T >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,insert,(Sequence,Pos_or_T,T))\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, insert)\n\n}}\n\n#endif // BOOST_MPL_INSERT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/insert_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_INSERT_FWD_HPP_INCLUDED\n#define BOOST_MPL_INSERT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct insert_impl;\ntemplate< typename Sequence, typename Pos_or_T, typename T > struct insert;\n\n}}\n\n#endif // BOOST_MPL_INSERT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/insert_range_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_INSERT_RANGE_FWD_HPP_INCLUDED\n#define BOOST_MPL_INSERT_RANGE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct insert_range_impl;\ntemplate< typename Sequence, typename Pos, typename Range > struct insert_range;\n\n}}\n\n#endif // BOOST_MPL_INSERT_RANGE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/int.hpp",
    "content": "\n#ifndef BOOST_MPL_INT_HPP_INCLUDED\n#define BOOST_MPL_INT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/int_fwd.hpp>\n\n#define AUX_WRAPPER_VALUE_TYPE int\n#include <boost/mpl/aux_/integral_wrapper.hpp>\n\n#endif // BOOST_MPL_INT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/int_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_INT_FWD_HPP_INCLUDED\n#define BOOST_MPL_INT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) > struct int_;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(int_)\n\n#endif // BOOST_MPL_INT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/integral_c.hpp",
    "content": "\n#ifndef BOOST_MPL_INTEGRAL_C_HPP_INCLUDED\n#define BOOST_MPL_INTEGRAL_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/integral_c_fwd.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#if BOOST_WORKAROUND(__HP_aCC, <= 53800)\n// the type of non-type template arguments may not depend on template arguments\n#   define AUX_WRAPPER_PARAMS(N) typename T, long N\n#else\n#   define AUX_WRAPPER_PARAMS(N) typename T, T N\n#endif\n\n#define AUX_WRAPPER_NAME integral_c\n#define AUX_WRAPPER_VALUE_TYPE T\n#define AUX_WRAPPER_INST(value) AUX_WRAPPER_NAME< T, value >\n#include <boost/mpl/aux_/integral_wrapper.hpp>\n\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) \\\n && !BOOST_WORKAROUND(__BORLANDC__, <= 0x551)\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n// 'bool' constant doesn't have 'next'/'prior' members\ntemplate< bool C >\nstruct integral_c<bool, C>\n{\n    BOOST_STATIC_CONSTANT(bool, value = C);\n    typedef integral_c_tag tag;\n    typedef integral_c type;\n    typedef bool value_type;\n    operator bool() const { return this->value; }\n};\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n#endif\n\n#endif // BOOST_MPL_INTEGRAL_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/integral_c_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_INTEGRAL_C_FWD_HPP_INCLUDED\n#define BOOST_MPL_INTEGRAL_C_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n#include <boost/mpl/aux_/adl_barrier.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\n#if BOOST_WORKAROUND(__HP_aCC, <= 53800)\n// the type of non-type template arguments may not depend on template arguments\ntemplate< typename T, long N > struct integral_c;\n#else\ntemplate< typename T, T N > struct integral_c;\n#endif\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(integral_c)\n\n#endif // BOOST_MPL_INTEGRAL_C_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/integral_c_tag.hpp",
    "content": "\n#ifndef BOOST_MPL_INTEGRAL_C_TAG_HPP_INCLUDED\n#define BOOST_MPL_INTEGRAL_C_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\nstruct integral_c_tag { BOOST_STATIC_CONSTANT(int, value = 0); };\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(integral_c_tag)\n\n#endif // BOOST_MPL_INTEGRAL_C_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/is_placeholder.hpp",
    "content": "\n#ifndef BOOST_MPL_IS_PLACEHOLDER_HPP_INCLUDED\n#define BOOST_MPL_IS_PLACEHOLDER_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/arg_fwd.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/type_wrapper.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\nnamespace boost { namespace mpl {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate< typename T >\nstruct is_placeholder\n    : bool_<false>\n{\n};\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) >\nstruct is_placeholder< arg<N> >\n    : bool_<true>\n{\n};\n\n#else\n\nnamespace aux {\n\naux::no_tag is_placeholder_helper(...);\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N) >\naux::yes_tag is_placeholder_helper(aux::type_wrapper< arg<N> >*);\n\n} // namespace aux\n\ntemplate< typename T >\nstruct is_placeholder\n{\n    static aux::type_wrapper<T>* get();\n    BOOST_STATIC_CONSTANT(bool, value = \n          sizeof(aux::is_placeholder_helper(get())) == sizeof(aux::yes_tag)\n        );\n    \n    typedef bool_<value> type;\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n}}\n\n#endif // BOOST_MPL_IS_PLACEHOLDER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/iter_fold_if.hpp",
    "content": "\n#ifndef BOOST_MPL_ITER_FOLD_IF_HPP_INCLUDED\n#define BOOST_MPL_ITER_FOLD_IF_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright Eric Friedman 2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end.hpp>\n#include <boost/mpl/logical.hpp>\n#include <boost/mpl/always.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/pair.hpp>\n#include <boost/mpl/apply.hpp>\n#include <boost/mpl/aux_/iter_fold_if_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n#include <boost/mpl/aux_/config/forwarding.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< typename Predicate, typename LastIterator >\nstruct iter_fold_if_pred\n{\n    template< typename State, typename Iterator > struct apply\n#if !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING)\n        : and_<\n              not_< is_same<Iterator,LastIterator> >\n            , apply1<Predicate,Iterator>\n            >\n    {\n#else\n    {\n        typedef and_<\n              not_< is_same<Iterator,LastIterator> >\n            , apply1<Predicate,Iterator>\n            > type;\n#endif\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(State)\n    , typename BOOST_MPL_AUX_NA_PARAM(ForwardOp)\n    , typename BOOST_MPL_AUX_NA_PARAM(ForwardPredicate)\n    , typename BOOST_MPL_AUX_NA_PARAM(BackwardOp)\n    , typename BOOST_MPL_AUX_NA_PARAM(BackwardPredicate)\n    >\nstruct iter_fold_if\n{\n\n    typedef typename begin<Sequence>::type first_;\n    typedef typename end<Sequence>::type last_;\n\n    typedef typename eval_if<\n          is_na<BackwardPredicate>\n        , if_< is_na<BackwardOp>, always<false_>, always<true_> >\n        , identity<BackwardPredicate>\n        >::type backward_pred_;\n\n// cwpro8 doesn't like 'cut-off' type here (use typedef instead)\n#if !BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3003)) && !BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(600))\n    struct result_ :\n#else\n    typedef\n#endif\n        aux::iter_fold_if_impl<\n          first_\n        , State\n        , ForwardOp\n        , protect< aux::iter_fold_if_pred< ForwardPredicate,last_ > >\n        , BackwardOp\n        , backward_pred_\n        >\n#if !BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3003)) && !BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(600))\n    { };\n#else\n    result_;\n#endif\n\npublic:\n\n    typedef pair<\n          typename result_::state\n        , typename result_::iterator\n        > type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(\n          6\n        , iter_fold_if\n        , (Sequence,State,ForwardOp,ForwardPredicate,BackwardOp,BackwardPredicate)\n        )\n};\n\nBOOST_MPL_AUX_NA_SPEC(6, iter_fold_if)\n\n}}\n\n#endif // BOOST_MPL_ITER_FOLD_IF_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/iterator_range.hpp",
    "content": "\n#ifndef BOOST_MPL_ITERATOR_RANGE_HPP_INCLUDED\n#define BOOST_MPL_ITERATOR_RANGE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\nstruct iterator_range_tag;\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(First)\n    , typename BOOST_MPL_AUX_NA_PARAM(Last)\n    >\nstruct iterator_range\n{\n    typedef iterator_range_tag tag;\n    typedef iterator_range type;\n    typedef First begin;\n    typedef Last end;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,iterator_range,(First,Last))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, iterator_range)\n\n}}\n\n#endif // BOOST_MPL_ITERATOR_RANGE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/iterator_tags.hpp",
    "content": "\n#ifndef BOOST_MPL_ITERATOR_TAG_HPP_INCLUDED\n#define BOOST_MPL_ITERATOR_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/int.hpp>\n\nnamespace boost { namespace mpl {\n\nstruct forward_iterator_tag       : int_<0> { typedef forward_iterator_tag type; };\nstruct bidirectional_iterator_tag : int_<1> { typedef bidirectional_iterator_tag type; };\nstruct random_access_iterator_tag : int_<2> { typedef random_access_iterator_tag type; };\n\n}}\n\n#endif // BOOST_MPL_ITERATOR_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/key_type_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_KEY_TYPE_FWD_HPP_INCLUDED\n#define BOOST_MPL_KEY_TYPE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct key_type_impl;\ntemplate< typename AssociativeSequence, typename T > struct key_type;\n\n}}\n\n#endif // BOOST_MPL_KEY_TYPE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/lambda.hpp",
    "content": "\n#ifndef BOOST_MPL_LAMBDA_HPP_INCLUDED\n#define BOOST_MPL_LAMBDA_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/lambda_fwd.hpp>\n#include <boost/mpl/bind.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n#   include <boost/mpl/aux_/full_lambda.hpp>\n#else\n#   include <boost/mpl/aux_/lambda_no_ctps.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n#   define BOOST_MPL_CFG_NO_IMPLICIT_METAFUNCTIONS\n#endif\n\n#endif // BOOST_MPL_LAMBDA_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/lambda_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_LAMBDA_FWD_HPP_INCLUDED\n#define BOOST_MPL_LAMBDA_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/void_fwd.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/config/lambda.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\n\n#   include <boost/mpl/int.hpp>\n#   include <boost/mpl/aux_/lambda_arity_param.hpp>\n#   include <boost/mpl/aux_/template_arity_fwd.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< \n      typename T = na\n    , typename Tag = void_\n    BOOST_MPL_AUX_LAMBDA_ARITY_PARAM(\n          typename Arity = int_< aux::template_arity<T>::value >\n        )\n    >\nstruct lambda;\n\n}}\n\n#else // BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT\n\n#   include <boost/mpl/bool.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< \n      typename T = na\n    , typename Tag = void_\n    , typename Protect = true_\n    > \nstruct lambda;\n\n}}\n\n#endif\n\n#endif // BOOST_MPL_LAMBDA_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/limits/arity.hpp",
    "content": "\n#ifndef BOOST_MPL_LIMITS_ARITY_HPP_INCLUDED\n#define BOOST_MPL_LIMITS_ARITY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_LIMIT_METAFUNCTION_ARITY)\n#   define BOOST_MPL_LIMIT_METAFUNCTION_ARITY 5\n#endif\n\n#endif // BOOST_MPL_LIMITS_ARITY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/limits/list.hpp",
    "content": "\n#ifndef BOOST_MPL_LIMITS_LIST_HPP_INCLUDED\n#define BOOST_MPL_LIMITS_LIST_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_LIMIT_LIST_SIZE)\n#   define BOOST_MPL_LIMIT_LIST_SIZE 20\n#endif\n\n#endif // BOOST_MPL_LIMITS_LIST_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/limits/unrolling.hpp",
    "content": "\n#ifndef BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED\n#define BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_LIMIT_UNROLLING)\n#   define BOOST_MPL_LIMIT_UNROLLING 4\n#endif\n\n#endif // BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/O1_size.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_O1_SIZE_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_O1_SIZE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/O1_size_fwd.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct O1_size_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n        : List::size\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_O1_SIZE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/begin_end.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_BEGIN_END_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_BEGIN_END_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end_fwd.hpp>\n#include <boost/mpl/list/aux_/iterator.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n#include <boost/mpl/list/aux_/item.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct begin_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n    {\n        typedef l_iter<typename List::type> type;\n    };\n};\n\ntemplate<>\nstruct end_impl< aux::list_tag >\n{\n    template< typename > struct apply\n    {\n        typedef l_iter<l_end> type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_BEGIN_END_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/clear.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_CLEAR_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_CLEAR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/clear_fwd.hpp>\n#include <boost/mpl/list/aux_/item.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct clear_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n    {\n        typedef l_end type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_CLEAR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/empty.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_EMPTY_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_EMPTY_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/empty_fwd.hpp>\n#include <boost/mpl/not.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct empty_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n        : not_<typename List::size>\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_EMPTY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/front.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_FRONT_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_FRONT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/front_fwd.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct front_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n    {\n        typedef typename List::item type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_FRONT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/include_preprocessed.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2001-2006\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION!\n\n#include <boost/mpl/aux_/config/workaround.hpp>\n\n#include <boost/preprocessor/cat.hpp>\n#include <boost/preprocessor/stringize.hpp>\n\n#   define AUX778076_HEADER \\\n    aux_/preprocessed/plain/BOOST_MPL_PREPROCESSED_HEADER \\\n/**/\n\n#if BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(700))\n#   define AUX778076_INCLUDE_STRING BOOST_PP_STRINGIZE(boost/mpl/list/AUX778076_HEADER)\n#   include AUX778076_INCLUDE_STRING\n#   undef AUX778076_INCLUDE_STRING\n#else\n#   include BOOST_PP_STRINGIZE(boost/mpl/list/AUX778076_HEADER)\n#endif\n\n#   undef AUX778076_HEADER\n\n#undef BOOST_MPL_PREPROCESSED_HEADER\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/item.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_NODE_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_NODE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/long.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename Size\n    , typename T\n    , typename Next\n    >\nstruct l_item\n{\n// agurt, 17/jul/03: to facilitate the deficient 'is_sequence' implementation \n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n    typedef int begin;\n#endif\n    typedef aux::list_tag tag;\n    typedef l_item type;\n\n    typedef Size size;\n    typedef T item;\n    typedef Next next;\n};\n\nstruct l_end\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)\n    typedef int begin;\n#endif\n    typedef aux::list_tag tag;\n    typedef l_end type;\n    typedef long_<0> size;\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_NODE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/iterator.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_ITERATOR_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_ITERATOR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/iterator_tags.hpp>\n#include <boost/mpl/next_prior.hpp>\n#include <boost/mpl/deref.hpp>\n#include <boost/mpl/list/aux_/item.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/aux_/lambda_spec.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n\nnamespace boost { namespace mpl {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate< typename Node >\nstruct l_iter\n{\n    typedef aux::l_iter_tag tag;\n    typedef forward_iterator_tag category;\n};\n\ntemplate< typename Node >\nstruct deref< l_iter<Node> >\n{\n    typedef typename Node::item type;\n};\n\ntemplate< typename Node >\nstruct next< l_iter<Node> >\n{\n    typedef l_iter< typename Node::next > type;\n};\n\n#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\ntemplate< typename Node >\nstruct l_iter\n{\n    typedef aux::l_iter_tag tag;\n    typedef forward_iterator_tag category;\n    typedef typename Node::item type;\n    typedef l_iter< typename mpl::next<Node>::type > next;\n};\n\n#endif\n\n\ntemplate<> struct l_iter<l_end>\n{\n    typedef aux::l_iter_tag tag;\n    typedef forward_iterator_tag category;\n#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n    typedef na type;\n    typedef l_iter next;\n#endif\n};\n\nBOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, l_iter)\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_ITERATOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/numbered.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n// Copyright Peter Dimov 2000-2002\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if defined(BOOST_PP_IS_ITERATING)\n\n#include <boost/preprocessor/enum_params.hpp>\n#include <boost/preprocessor/enum_shifted_params.hpp>\n#include <boost/preprocessor/dec.hpp>\n#include <boost/preprocessor/cat.hpp>\n\n#define i BOOST_PP_FRAME_ITERATION(1)\n\n#if i == 1\n\ntemplate<\n      BOOST_PP_ENUM_PARAMS(i, typename T)\n    >\nstruct list1\n    : l_item<\n          long_<1>\n        , T0\n        , l_end\n        >\n{\n    typedef list1 type;\n};\n\n#else\n\n#   define MPL_AUX_LIST_TAIL(list, i, T) \\\n    BOOST_PP_CAT(list,BOOST_PP_DEC(i))< \\\n      BOOST_PP_ENUM_SHIFTED_PARAMS(i, T) \\\n    > \\\n    /**/\n    \ntemplate<\n      BOOST_PP_ENUM_PARAMS(i, typename T)\n    >\nstruct BOOST_PP_CAT(list,i)\n    : l_item<\n          long_<i>\n        , T0\n        , MPL_AUX_LIST_TAIL(list,i,T)\n        >\n{\n    typedef BOOST_PP_CAT(list,i) type;\n};\n\n#   undef MPL_AUX_LIST_TAIL\n\n#endif // i == 1\n\n#undef i\n\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/numbered_c.hpp",
    "content": "\n// NO INCLUDE GUARDS, THE HEADER IS INTENDED FOR MULTIPLE INCLUSION\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if defined(BOOST_PP_IS_ITERATING)\n\n#include <boost/preprocessor/enum_params.hpp>\n#include <boost/preprocessor/enum_shifted_params.hpp>\n#include <boost/preprocessor/dec.hpp>\n#include <boost/preprocessor/cat.hpp>\n\n#define i BOOST_PP_FRAME_ITERATION(1)\n\n#if i == 1\n\ntemplate<\n      typename T\n    , BOOST_PP_ENUM_PARAMS(i, T C)\n    >\nstruct list1_c\n    : l_item<\n          long_<1>\n        , integral_c<T,C0>\n        , l_end\n        >\n{\n    typedef list1_c type;\n    typedef T value_type;\n};\n\n#else\n\n#   define MPL_AUX_LIST_C_TAIL(list, i, C) \\\n    BOOST_PP_CAT(BOOST_PP_CAT(list,BOOST_PP_DEC(i)),_c)<T, \\\n      BOOST_PP_ENUM_SHIFTED_PARAMS(i, C) \\\n    > \\\n    /**/\n    \ntemplate<\n      typename T\n    , BOOST_PP_ENUM_PARAMS(i, T C)\n    >\nstruct BOOST_PP_CAT(BOOST_PP_CAT(list,i),_c)\n    : l_item<\n          long_<i>\n        , integral_c<T,C0>\n        , MPL_AUX_LIST_C_TAIL(list,i,C)\n        >\n{\n    typedef BOOST_PP_CAT(BOOST_PP_CAT(list,i),_c) type;\n    typedef T value_type;\n};\n\n#   undef MPL_AUX_LIST_C_TAIL\n\n#endif // i == 1\n\n#undef i\n\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/pop_front.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_POP_FRONT_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_POP_FRONT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/pop_front_fwd.hpp>\n#include <boost/mpl/next_prior.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct pop_front_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n    {\n        typedef typename mpl::next<List>::type type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_POP_FRONT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list10.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list10.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0\n    >\nstruct list1\n    : l_item<\n          long_<1>\n        , T0\n        , l_end\n        >\n{\n    typedef list1 type;\n};\n\ntemplate<\n      typename T0, typename T1\n    >\nstruct list2\n    : l_item<\n          long_<2>\n        , T0\n        , list1<T1>\n        >\n{\n    typedef list2 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2\n    >\nstruct list3\n    : l_item<\n          long_<3>\n        , T0\n        , list2< T1,T2 >\n        >\n{\n    typedef list3 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3\n    >\nstruct list4\n    : l_item<\n          long_<4>\n        , T0\n        , list3< T1,T2,T3 >\n        >\n{\n    typedef list4 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    >\nstruct list5\n    : l_item<\n          long_<5>\n        , T0\n        , list4< T1,T2,T3,T4 >\n        >\n{\n    typedef list5 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5\n    >\nstruct list6\n    : l_item<\n          long_<6>\n        , T0\n        , list5< T1,T2,T3,T4,T5 >\n        >\n{\n    typedef list6 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6\n    >\nstruct list7\n    : l_item<\n          long_<7>\n        , T0\n        , list6< T1,T2,T3,T4,T5,T6 >\n        >\n{\n    typedef list7 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7\n    >\nstruct list8\n    : l_item<\n          long_<8>\n        , T0\n        , list7< T1,T2,T3,T4,T5,T6,T7 >\n        >\n{\n    typedef list8 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8\n    >\nstruct list9\n    : l_item<\n          long_<9>\n        , T0\n        , list8< T1,T2,T3,T4,T5,T6,T7,T8 >\n        >\n{\n    typedef list9 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    >\nstruct list10\n    : l_item<\n          long_<10>\n        , T0\n        , list9< T1,T2,T3,T4,T5,T6,T7,T8,T9 >\n        >\n{\n    typedef list10 type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list10_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list10_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T\n    , T C0\n    >\nstruct list1_c\n    : l_item<\n          long_<1>\n        , integral_c< T,C0 >\n        , l_end\n        >\n{\n    typedef list1_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1\n    >\nstruct list2_c\n    : l_item<\n          long_<2>\n        , integral_c< T,C0 >\n        , list1_c< T,C1 >\n        >\n{\n    typedef list2_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2\n    >\nstruct list3_c\n    : l_item<\n          long_<3>\n        , integral_c< T,C0 >\n        , list2_c< T,C1,C2 >\n        >\n{\n    typedef list3_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3\n    >\nstruct list4_c\n    : l_item<\n          long_<4>\n        , integral_c< T,C0 >\n        , list3_c< T,C1,C2,C3 >\n        >\n{\n    typedef list4_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4\n    >\nstruct list5_c\n    : l_item<\n          long_<5>\n        , integral_c< T,C0 >\n        , list4_c< T,C1,C2,C3,C4 >\n        >\n{\n    typedef list5_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5\n    >\nstruct list6_c\n    : l_item<\n          long_<6>\n        , integral_c< T,C0 >\n        , list5_c< T,C1,C2,C3,C4,C5 >\n        >\n{\n    typedef list6_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6\n    >\nstruct list7_c\n    : l_item<\n          long_<7>\n        , integral_c< T,C0 >\n        , list6_c< T,C1,C2,C3,C4,C5,C6 >\n        >\n{\n    typedef list7_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7\n    >\nstruct list8_c\n    : l_item<\n          long_<8>\n        , integral_c< T,C0 >\n        , list7_c< T,C1,C2,C3,C4,C5,C6,C7 >\n        >\n{\n    typedef list8_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8\n    >\nstruct list9_c\n    : l_item<\n          long_<9>\n        , integral_c< T,C0 >\n        , list8_c< T,C1,C2,C3,C4,C5,C6,C7,C8 >\n        >\n{\n    typedef list9_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9\n    >\nstruct list10_c\n    : l_item<\n          long_<10>\n        , integral_c< T,C0 >\n        , list9_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9 >\n        >\n{\n    typedef list10_c type;\n    typedef T value_type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list20.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list20.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10\n    >\nstruct list11\n    : l_item<\n          long_<11>\n        , T0\n        , list10< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 >\n        >\n{\n    typedef list11 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11\n    >\nstruct list12\n    : l_item<\n          long_<12>\n        , T0\n        , list11< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 >\n        >\n{\n    typedef list12 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12\n    >\nstruct list13\n    : l_item<\n          long_<13>\n        , T0\n        , list12< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 >\n        >\n{\n    typedef list13 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13\n    >\nstruct list14\n    : l_item<\n          long_<14>\n        , T0\n        , list13< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 >\n        >\n{\n    typedef list14 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    >\nstruct list15\n    : l_item<\n          long_<15>\n        , T0\n        , list14< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 >\n        >\n{\n    typedef list15 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15\n    >\nstruct list16\n    : l_item<\n          long_<16>\n        , T0\n        , list15< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 >\n        >\n{\n    typedef list16 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16\n    >\nstruct list17\n    : l_item<\n          long_<17>\n        , T0\n        , list16< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 >\n        >\n{\n    typedef list17 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17\n    >\nstruct list18\n    : l_item<\n          long_<18>\n        , T0\n        , list17< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 >\n        >\n{\n    typedef list18 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18\n    >\nstruct list19\n    : l_item<\n          long_<19>\n        , T0\n        , list18< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 >\n        >\n{\n    typedef list19 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    >\nstruct list20\n    : l_item<\n          long_<20>\n        , T0\n        , list19< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >\n        >\n{\n    typedef list20 type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list20_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list20_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    >\nstruct list11_c\n    : l_item<\n          long_<11>\n        , integral_c< T,C0 >\n        , list10_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 >\n        >\n{\n    typedef list11_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11\n    >\nstruct list12_c\n    : l_item<\n          long_<12>\n        , integral_c< T,C0 >\n        , list11_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 >\n        >\n{\n    typedef list12_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12\n    >\nstruct list13_c\n    : l_item<\n          long_<13>\n        , integral_c< T,C0 >\n        , list12_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12 >\n        >\n{\n    typedef list13_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13\n    >\nstruct list14_c\n    : l_item<\n          long_<14>\n        , integral_c< T,C0 >\n        , list13_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13 >\n        >\n{\n    typedef list14_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14\n    >\nstruct list15_c\n    : l_item<\n          long_<15>\n        , integral_c< T,C0 >\n        , list14_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14 >\n        >\n{\n    typedef list15_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15\n    >\nstruct list16_c\n    : l_item<\n          long_<16>\n        , integral_c< T,C0 >\n        , list15_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15 >\n        >\n{\n    typedef list16_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16\n    >\nstruct list17_c\n    : l_item<\n          long_<17>\n        , integral_c< T,C0 >\n        , list16_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16 >\n        >\n{\n    typedef list17_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17\n    >\nstruct list18_c\n    : l_item<\n          long_<18>\n        , integral_c< T,C0 >\n        , list17_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17 >\n        >\n{\n    typedef list18_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18\n    >\nstruct list19_c\n    : l_item<\n          long_<19>\n        , integral_c< T,C0 >\n        , list18_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18 >\n        >\n{\n    typedef list19_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19\n    >\nstruct list20_c\n    : l_item<\n          long_<20>\n        , integral_c< T,C0 >\n        , list19_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19 >\n        >\n{\n    typedef list20_c type;\n    typedef T value_type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list30.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list30.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20\n    >\nstruct list21\n    : l_item<\n          long_<21>\n        , T0\n        , list20< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20 >\n        >\n{\n    typedef list21 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21\n    >\nstruct list22\n    : l_item<\n          long_<22>\n        , T0\n        , list21< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21 >\n        >\n{\n    typedef list22 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22\n    >\nstruct list23\n    : l_item<\n          long_<23>\n        , T0\n        , list22< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22 >\n        >\n{\n    typedef list23 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23\n    >\nstruct list24\n    : l_item<\n          long_<24>\n        , T0\n        , list23< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23 >\n        >\n{\n    typedef list24 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    >\nstruct list25\n    : l_item<\n          long_<25>\n        , T0\n        , list24< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24 >\n        >\n{\n    typedef list25 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25\n    >\nstruct list26\n    : l_item<\n          long_<26>\n        , T0\n        , list25< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25 >\n        >\n{\n    typedef list26 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26\n    >\nstruct list27\n    : l_item<\n          long_<27>\n        , T0\n        , list26< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26 >\n        >\n{\n    typedef list27 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27\n    >\nstruct list28\n    : l_item<\n          long_<28>\n        , T0\n        , list27< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27 >\n        >\n{\n    typedef list28 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28\n    >\nstruct list29\n    : l_item<\n          long_<29>\n        , T0\n        , list28< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28 >\n        >\n{\n    typedef list29 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    >\nstruct list30\n    : l_item<\n          long_<30>\n        , T0\n        , list29< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29 >\n        >\n{\n    typedef list30 type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list30_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list30_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    >\nstruct list21_c\n    : l_item<\n          long_<21>\n        , integral_c< T,C0 >\n        , list20_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20 >\n        >\n{\n    typedef list21_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21\n    >\nstruct list22_c\n    : l_item<\n          long_<22>\n        , integral_c< T,C0 >\n        , list21_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21 >\n        >\n{\n    typedef list22_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22\n    >\nstruct list23_c\n    : l_item<\n          long_<23>\n        , integral_c< T,C0 >\n        , list22_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22 >\n        >\n{\n    typedef list23_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23\n    >\nstruct list24_c\n    : l_item<\n          long_<24>\n        , integral_c< T,C0 >\n        , list23_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23 >\n        >\n{\n    typedef list24_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24\n    >\nstruct list25_c\n    : l_item<\n          long_<25>\n        , integral_c< T,C0 >\n        , list24_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24 >\n        >\n{\n    typedef list25_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25\n    >\nstruct list26_c\n    : l_item<\n          long_<26>\n        , integral_c< T,C0 >\n        , list25_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25 >\n        >\n{\n    typedef list26_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26\n    >\nstruct list27_c\n    : l_item<\n          long_<27>\n        , integral_c< T,C0 >\n        , list26_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26 >\n        >\n{\n    typedef list27_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27\n    >\nstruct list28_c\n    : l_item<\n          long_<28>\n        , integral_c< T,C0 >\n        , list27_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27 >\n        >\n{\n    typedef list28_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28\n    >\nstruct list29_c\n    : l_item<\n          long_<29>\n        , integral_c< T,C0 >\n        , list28_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28 >\n        >\n{\n    typedef list29_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29\n    >\nstruct list30_c\n    : l_item<\n          long_<30>\n        , integral_c< T,C0 >\n        , list29_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29 >\n        >\n{\n    typedef list30_c type;\n    typedef T value_type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list40.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list40.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30\n    >\nstruct list31\n    : l_item<\n          long_<31>\n        , T0\n        , list30< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30 >\n        >\n{\n    typedef list31 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31\n    >\nstruct list32\n    : l_item<\n          long_<32>\n        , T0\n        , list31< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31 >\n        >\n{\n    typedef list32 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32\n    >\nstruct list33\n    : l_item<\n          long_<33>\n        , T0\n        , list32< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32 >\n        >\n{\n    typedef list33 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33\n    >\nstruct list34\n    : l_item<\n          long_<34>\n        , T0\n        , list33< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33 >\n        >\n{\n    typedef list34 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    >\nstruct list35\n    : l_item<\n          long_<35>\n        , T0\n        , list34< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34 >\n        >\n{\n    typedef list35 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35\n    >\nstruct list36\n    : l_item<\n          long_<36>\n        , T0\n        , list35< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35 >\n        >\n{\n    typedef list36 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36\n    >\nstruct list37\n    : l_item<\n          long_<37>\n        , T0\n        , list36< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36 >\n        >\n{\n    typedef list37 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37\n    >\nstruct list38\n    : l_item<\n          long_<38>\n        , T0\n        , list37< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37 >\n        >\n{\n    typedef list38 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38\n    >\nstruct list39\n    : l_item<\n          long_<39>\n        , T0\n        , list38< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38 >\n        >\n{\n    typedef list39 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    >\nstruct list40\n    : l_item<\n          long_<40>\n        , T0\n        , list39< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39 >\n        >\n{\n    typedef list40 type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list40_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list40_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    >\nstruct list31_c\n    : l_item<\n          long_<31>\n        , integral_c< T,C0 >\n        , list30_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30 >\n        >\n{\n    typedef list31_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31\n    >\nstruct list32_c\n    : l_item<\n          long_<32>\n        , integral_c< T,C0 >\n        , list31_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31 >\n        >\n{\n    typedef list32_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32\n    >\nstruct list33_c\n    : l_item<\n          long_<33>\n        , integral_c< T,C0 >\n        , list32_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32 >\n        >\n{\n    typedef list33_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33\n    >\nstruct list34_c\n    : l_item<\n          long_<34>\n        , integral_c< T,C0 >\n        , list33_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33 >\n        >\n{\n    typedef list34_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34\n    >\nstruct list35_c\n    : l_item<\n          long_<35>\n        , integral_c< T,C0 >\n        , list34_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34 >\n        >\n{\n    typedef list35_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35\n    >\nstruct list36_c\n    : l_item<\n          long_<36>\n        , integral_c< T,C0 >\n        , list35_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35 >\n        >\n{\n    typedef list36_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36\n    >\nstruct list37_c\n    : l_item<\n          long_<37>\n        , integral_c< T,C0 >\n        , list36_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36 >\n        >\n{\n    typedef list37_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37\n    >\nstruct list38_c\n    : l_item<\n          long_<38>\n        , integral_c< T,C0 >\n        , list37_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37 >\n        >\n{\n    typedef list38_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38\n    >\nstruct list39_c\n    : l_item<\n          long_<39>\n        , integral_c< T,C0 >\n        , list38_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38 >\n        >\n{\n    typedef list39_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39\n    >\nstruct list40_c\n    : l_item<\n          long_<40>\n        , integral_c< T,C0 >\n        , list39_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39 >\n        >\n{\n    typedef list40_c type;\n    typedef T value_type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list50.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list50.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40\n    >\nstruct list41\n    : l_item<\n          long_<41>\n        , T0\n        , list40< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40 >\n        >\n{\n    typedef list41 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41\n    >\nstruct list42\n    : l_item<\n          long_<42>\n        , T0\n        , list41< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41 >\n        >\n{\n    typedef list42 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42\n    >\nstruct list43\n    : l_item<\n          long_<43>\n        , T0\n        , list42< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42 >\n        >\n{\n    typedef list43 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43\n    >\nstruct list44\n    : l_item<\n          long_<44>\n        , T0\n        , list43< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43 >\n        >\n{\n    typedef list44 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    >\nstruct list45\n    : l_item<\n          long_<45>\n        , T0\n        , list44< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44 >\n        >\n{\n    typedef list45 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    , typename T45\n    >\nstruct list46\n    : l_item<\n          long_<46>\n        , T0\n        , list45< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44,T45 >\n        >\n{\n    typedef list46 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    , typename T45, typename T46\n    >\nstruct list47\n    : l_item<\n          long_<47>\n        , T0\n        , list46< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44,T45,T46 >\n        >\n{\n    typedef list47 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    , typename T45, typename T46, typename T47\n    >\nstruct list48\n    : l_item<\n          long_<48>\n        , T0\n        , list47< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44,T45,T46,T47 >\n        >\n{\n    typedef list48 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    , typename T45, typename T46, typename T47, typename T48\n    >\nstruct list49\n    : l_item<\n          long_<49>\n        , T0\n        , list48< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44,T45,T46,T47,T48 >\n        >\n{\n    typedef list49 type;\n};\n\ntemplate<\n      typename T0, typename T1, typename T2, typename T3, typename T4\n    , typename T5, typename T6, typename T7, typename T8, typename T9\n    , typename T10, typename T11, typename T12, typename T13, typename T14\n    , typename T15, typename T16, typename T17, typename T18, typename T19\n    , typename T20, typename T21, typename T22, typename T23, typename T24\n    , typename T25, typename T26, typename T27, typename T28, typename T29\n    , typename T30, typename T31, typename T32, typename T33, typename T34\n    , typename T35, typename T36, typename T37, typename T38, typename T39\n    , typename T40, typename T41, typename T42, typename T43, typename T44\n    , typename T45, typename T46, typename T47, typename T48, typename T49\n    >\nstruct list50\n    : l_item<\n          long_<50>\n        , T0\n        , list49< T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19,T20,T21,T22,T23,T24,T25,T26,T27,T28,T29,T30,T31,T32,T33,T34,T35,T36,T37,T38,T39,T40,T41,T42,T43,T44,T45,T46,T47,T48,T49 >\n        >\n{\n    typedef list50 type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/preprocessed/plain/list50_c.hpp",
    "content": "\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n\n// Preprocessed version of \"boost/mpl/list/list50_c.hpp\" header\n// -- DO NOT modify by hand!\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    >\nstruct list41_c\n    : l_item<\n          long_<41>\n        , integral_c< T,C0 >\n        , list40_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40 >\n        >\n{\n    typedef list41_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41\n    >\nstruct list42_c\n    : l_item<\n          long_<42>\n        , integral_c< T,C0 >\n        , list41_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41 >\n        >\n{\n    typedef list42_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42\n    >\nstruct list43_c\n    : l_item<\n          long_<43>\n        , integral_c< T,C0 >\n        , list42_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42 >\n        >\n{\n    typedef list43_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43\n    >\nstruct list44_c\n    : l_item<\n          long_<44>\n        , integral_c< T,C0 >\n        , list43_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43 >\n        >\n{\n    typedef list44_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44\n    >\nstruct list45_c\n    : l_item<\n          long_<45>\n        , integral_c< T,C0 >\n        , list44_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44 >\n        >\n{\n    typedef list45_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44, T C45\n    >\nstruct list46_c\n    : l_item<\n          long_<46>\n        , integral_c< T,C0 >\n        , list45_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44,C45 >\n        >\n{\n    typedef list46_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44, T C45, T C46\n    >\nstruct list47_c\n    : l_item<\n          long_<47>\n        , integral_c< T,C0 >\n        , list46_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44,C45,C46 >\n        >\n{\n    typedef list47_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44, T C45, T C46, T C47\n    >\nstruct list48_c\n    : l_item<\n          long_<48>\n        , integral_c< T,C0 >\n        , list47_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44,C45,C46,C47 >\n        >\n{\n    typedef list48_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44, T C45, T C46, T C47, T C48\n    >\nstruct list49_c\n    : l_item<\n          long_<49>\n        , integral_c< T,C0 >\n        , list48_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44,C45,C46,C47,C48 >\n        >\n{\n    typedef list49_c type;\n    typedef T value_type;\n};\n\ntemplate<\n      typename T\n    , T C0, T C1, T C2, T C3, T C4, T C5, T C6, T C7, T C8, T C9, T C10\n    , T C11, T C12, T C13, T C14, T C15, T C16, T C17, T C18, T C19, T C20\n    , T C21, T C22, T C23, T C24, T C25, T C26, T C27, T C28, T C29, T C30\n    , T C31, T C32, T C33, T C34, T C35, T C36, T C37, T C38, T C39, T C40\n    , T C41, T C42, T C43, T C44, T C45, T C46, T C47, T C48, T C49\n    >\nstruct list50_c\n    : l_item<\n          long_<50>\n        , integral_c< T,C0 >\n        , list49_c< T,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,C23,C24,C25,C26,C27,C28,C29,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C40,C41,C42,C43,C44,C45,C46,C47,C48,C49 >\n        >\n{\n    typedef list50_c type;\n    typedef T value_type;\n};\n\n}}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/push_back.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_PUSH_BACK_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_PUSH_BACK_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/push_back_fwd.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct has_push_back_impl;\n\ntemplate<>\nstruct has_push_back_impl< aux::list_tag >\n{\n    template< typename Seq > struct apply\n        : false_\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_PUSH_BACK_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/push_front.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_PUSH_FRONT_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_PUSH_FRONT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/push_front_fwd.hpp>\n#include <boost/mpl/next.hpp>\n#include <boost/mpl/list/aux_/item.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct push_front_impl< aux::list_tag >\n{\n    template< typename List, typename T > struct apply\n    {\n        typedef l_item<\n              typename next<typename List::size>::type\n            , T\n            , typename List::type\n            > type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_PUSH_FRONT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/size.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_SIZE_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_SIZE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/size_fwd.hpp>\n#include <boost/mpl/list/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct size_impl< aux::list_tag >\n{\n    template< typename List > struct apply\n        : List::size\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_AUX_SIZE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/aux_/tag.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_AUX_TAG_HPP_INCLUDED\n#define BOOST_MPL_LIST_AUX_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl { namespace aux {\n\nstruct list_tag;\nstruct l_iter_tag;\n\n}}}\n\n#endif // BOOST_MPL_LIST_AUX_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list0.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST0_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST0_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/long.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/list/aux_/push_front.hpp>\n#include <boost/mpl/list/aux_/pop_front.hpp>\n#include <boost/mpl/list/aux_/push_back.hpp>\n#include <boost/mpl/list/aux_/front.hpp>\n#include <boost/mpl/list/aux_/clear.hpp>\n#include <boost/mpl/list/aux_/O1_size.hpp>\n#include <boost/mpl/list/aux_/size.hpp>\n#include <boost/mpl/list/aux_/empty.hpp>\n#include <boost/mpl/list/aux_/begin_end.hpp>\n#include <boost/mpl/list/aux_/item.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Dummy = na > struct list0;\n\ntemplate<> struct list0<na>\n    : l_end\n{\n    typedef l_end type;\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_LIST0_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list0_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST0_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST0_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/list/list0.hpp>\n#include <boost/mpl/integral_c.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T > struct list0_c\n    : l_end\n{\n    typedef l_end type;\n    typedef T value_type;\n};\n\n}}\n\n#endif // BOOST_MPL_LIST_LIST0_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list10.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST10_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST10_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list0.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list10.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(1, 10, <boost/mpl/list/aux_/numbered.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST10_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list10_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST10_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST10_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list0_c.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list10_c.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(1, 10, <boost/mpl/list/aux_/numbered_c.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST10_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list20.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST20_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST20_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list10.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list20.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(11, 20, <boost/mpl/list/aux_/numbered.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST20_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list20_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST20_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST20_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list10_c.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list20_c.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(11, 20, <boost/mpl/list/aux_/numbered_c.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST20_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list30.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST30_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST30_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list20.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list30.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(21, 30, <boost/mpl/list/aux_/numbered.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST30_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list30_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST30_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST30_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list20_c.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list30_c.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(21, 30, <boost/mpl/list/aux_/numbered_c.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST30_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list40.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST40_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST40_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list30.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list40.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(31, 40, <boost/mpl/list/aux_/numbered.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST40_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list40_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST40_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST40_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list30_c.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list40_c.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(31, 40, <boost/mpl/list/aux_/numbered_c.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST40_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list50.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST50_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST50_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list40.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list50.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(41, 50, <boost/mpl/list/aux_/numbered.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST50_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list/list50_c.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_LIST50_C_HPP_INCLUDED\n#define BOOST_MPL_LIST_LIST50_C_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/list/list40_c.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list50_c.hpp\n#   include <boost/mpl/list/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/preprocessor/iterate.hpp>\n\nnamespace boost { namespace mpl {\n\n#   define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(41, 50, <boost/mpl/list/aux_/numbered_c.hpp>))\n#   include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n\n#endif // BOOST_MPL_LIST_LIST50_C_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/list.hpp",
    "content": "\n#ifndef BOOST_MPL_LIST_HPP_INCLUDED\n#define BOOST_MPL_LIST_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/limits/list.hpp>\n#   include <boost/mpl/aux_/na.hpp>\n#   include <boost/mpl/aux_/config/preprocessor.hpp>\n\n#   include <boost/preprocessor/inc.hpp>\n#   include <boost/preprocessor/cat.hpp>\n#   include <boost/preprocessor/stringize.hpp>\n\n#if !defined(BOOST_NEEDS_TOKEN_PASTING_OP_FOR_TOKENS_JUXTAPOSING)\n#   define AUX778076_LIST_HEADER \\\n    BOOST_PP_CAT(list,BOOST_MPL_LIMIT_LIST_SIZE).hpp \\\n    /**/\n#else\n#   define AUX778076_LIST_HEADER \\\n    BOOST_PP_CAT(list,BOOST_MPL_LIMIT_LIST_SIZE)##.hpp \\\n    /**/\n#endif\n\n#   include BOOST_PP_STRINGIZE(boost/mpl/list/AUX778076_LIST_HEADER)\n#   undef AUX778076_LIST_HEADER\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER list.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/list.hpp>\n\n#   define AUX778076_SEQUENCE_NAME list\n#   define AUX778076_SEQUENCE_LIMIT BOOST_MPL_LIMIT_LIST_SIZE\n#   include <boost/mpl/aux_/sequence_wrapper.hpp>\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_LIST_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/logical.hpp",
    "content": "\n#ifndef BOOST_MPL_LOGICAL_HPP_INCLUDED\n#define BOOST_MPL_LOGICAL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/or.hpp>\n#include <boost/mpl/and.hpp>\n#include <boost/mpl/not.hpp>\n\n#endif // BOOST_MPL_LOGICAL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/long.hpp",
    "content": "\n#ifndef BOOST_MPL_LONG_HPP_INCLUDED\n#define BOOST_MPL_LONG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/long_fwd.hpp>\n\n#define AUX_WRAPPER_VALUE_TYPE long\n#include <boost/mpl/aux_/integral_wrapper.hpp>\n\n#endif // BOOST_MPL_LONG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/long_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_LONG_FWD_HPP_INCLUDED\n#define BOOST_MPL_LONG_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, N) > struct long_;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(long_)\n\n#endif // BOOST_MPL_LONG_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/next.hpp",
    "content": "\n#ifndef BOOST_MPL_NEXT_HPP_INCLUDED\n#define BOOST_MPL_NEXT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/next_prior.hpp>\n\n#endif // BOOST_MPL_NEXT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/next_prior.hpp",
    "content": "\n#ifndef BOOST_MPL_NEXT_PRIOR_HPP_INCLUDED\n#define BOOST_MPL_NEXT_PRIOR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/common_name_wknd.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\nBOOST_MPL_AUX_COMMON_NAME_WKND(next)\nBOOST_MPL_AUX_COMMON_NAME_WKND(prior)\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct next\n{\n    typedef typename T::next type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,next,(T))\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct prior\n{\n    typedef typename T::prior type;\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,prior,(T))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, next)\nBOOST_MPL_AUX_NA_SPEC(1, prior)\n\n}}\n\n#endif // BOOST_MPL_NEXT_PRIOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/not.hpp",
    "content": "\n#ifndef BOOST_MPL_NOT_HPP_INCLUDED\n#define BOOST_MPL_NOT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/nested_type_wknd.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\n\ntemplate< BOOST_MPL_AUX_NTTP_DECL(long, C_) > // 'long' is intentional here\nstruct not_impl\n    : bool_<!C_>\n{\n};\n\n} // namespace aux\n\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct not_\n    : aux::not_impl<\n          BOOST_MPL_AUX_NESTED_TYPE_WKND(T)::value\n        >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,not_,(T))\n};\n\nBOOST_MPL_AUX_NA_SPEC(1,not_)\n\n}}\n\n#endif // BOOST_MPL_NOT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/or.hpp",
    "content": "\n#ifndef BOOST_MPL_OR_HPP_INCLUDED\n#define BOOST_MPL_OR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n    && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   include <boost/mpl/bool.hpp>\n#   include <boost/mpl/aux_/nested_type_wknd.hpp>\n#   include <boost/mpl/aux_/na_spec.hpp>\n#   include <boost/mpl/aux_/lambda_support.hpp>\n#   include <boost/mpl/aux_/config/msvc.hpp>\n\n// agurt, 19/may/04: workaround a conflict with <iso646.h> header's \n// 'or' and 'and' macros, see http://tinyurl.com/3et69; 'defined(or)'\n// has to be checked in a separate condition, otherwise GCC complains \n// about 'or' being an alternative token\n#if defined(_MSC_VER) && !defined(__clang__)\n#ifndef __GCCXML__\n#if defined(or)\n#   pragma push_macro(\"or\")\n#   undef or\n#   define or(x)\n#endif\n#endif\n#endif\n\n#   define BOOST_MPL_PREPROCESSED_HEADER or.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#if defined(_MSC_VER) && !defined(__clang__)\n#ifndef __GCCXML__\n#if defined(or)\n#   pragma pop_macro(\"or\")\n#endif\n#endif\n#endif\n\n#else\n\n#   define AUX778076_OP_NAME or_\n#   define AUX778076_OP_VALUE1 true\n#   define AUX778076_OP_VALUE2 false\n#   include <boost/mpl/aux_/logical_op.hpp>\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_OR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/pair.hpp",
    "content": "\n#ifndef BOOST_MPL_PAIR_HPP_INCLUDED\n#define BOOST_MPL_PAIR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/msvc_eti_base.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T1)\n    , typename BOOST_MPL_AUX_NA_PARAM(T2)\n    >\nstruct pair\n{\n    typedef pair type;\n    typedef T1 first;\n    typedef T2 second;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,pair,(T1,T2))\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(P)\n    >\nstruct first\n{\n#if !defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)\n    typedef typename P::first type;\n#else\n    typedef typename aux::msvc_eti_base<P>::first type;\n#endif\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,first,(P))\n};\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(P)\n    >\nstruct second\n{\n#if !defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)\n    typedef typename P::second type;\n#else\n    typedef typename aux::msvc_eti_base<P>::second type;\n#endif\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,second,(P))\n};\n\n\nBOOST_MPL_AUX_NA_SPEC_NO_ETI(2, pair)\nBOOST_MPL_AUX_NA_SPEC(1, first)\nBOOST_MPL_AUX_NA_SPEC(1, second)\n\n}}\n\n#endif // BOOST_MPL_PAIR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/placeholders.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_PLACEHOLDERS_HPP_INCLUDED\n#define BOOST_MPL_PLACEHOLDERS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright Peter Dimov 2001-2003\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/arg.hpp>\n#   include <boost/mpl/aux_/adl_barrier.hpp>\n\n#   if !defined(BOOST_MPL_CFG_NO_ADL_BARRIER_NAMESPACE)\n#       define BOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(type) \\\n        using ::BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::type; \\\n        /**/\n#   else\n#       define BOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(type) /**/\n#   endif\n\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER placeholders.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/aux_/nttp_decl.hpp>\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n// watch out for GNU gettext users, who #define _(x)\n#if !defined(_) || defined(BOOST_MPL_CFG_NO_UNNAMED_PLACEHOLDER_SUPPORT)\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\ntypedef arg<-1> _;\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\nnamespace boost { namespace mpl { \n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(_)\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::_;\n}\n\n}}\n#endif\n\n/// agurt, 17/mar/02: one more placeholder for the last 'apply#' \n/// specialization\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(1, BOOST_MPL_LIMIT_METAFUNCTION_ARITY + 1, <boost/mpl/placeholders.hpp>))\n#include BOOST_PP_ITERATE()\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_PLACEHOLDERS_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntypedef arg<i_> BOOST_PP_CAT(_,i_);\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\nnamespace boost { namespace mpl { \n\nBOOST_MPL_AUX_ARG_ADL_BARRIER_DECL(BOOST_PP_CAT(_,i_))\n\nnamespace placeholders {\nusing BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE::BOOST_PP_CAT(_,i_);\n}\n\n}}\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/pop_front_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_POP_FRONT_FWD_HPP_INCLUDED\n#define BOOST_MPL_POP_FRONT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct pop_front_impl;\ntemplate< typename Sequence > struct pop_front;\n\n}}\n\n#endif // BOOST_MPL_POP_FRONT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/prior.hpp",
    "content": "\n#ifndef BOOST_MPL_PRIOR_HPP_INCLUDED\n#define BOOST_MPL_PRIOR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/next_prior.hpp>\n\n#endif // BOOST_MPL_PRIOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/protect.hpp",
    "content": "\n#ifndef BOOST_MPL_PROTECT_HPP_INCLUDED\n#define BOOST_MPL_PROTECT_HPP_INCLUDED\n\n// Copyright Peter Dimov 2001\n// Copyright Aleksey Gurtovoy 2002-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/arity.hpp>\n#include <boost/mpl/aux_/config/dtp.hpp>\n#include <boost/mpl/aux_/nttp_decl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(T)\n    , int not_le_ = 0\n    >\nstruct protect : T\n{\n#if BOOST_WORKAROUND(__EDG_VERSION__, == 238)\n    typedef mpl::protect type;\n#else\n    typedef protect type;\n#endif\n};\n\n#if defined(BOOST_MPL_CFG_BROKEN_DEFAULT_PARAMETERS_IN_NESTED_TEMPLATES)\nnamespace aux { \ntemplate< BOOST_MPL_AUX_NTTP_DECL(int, N), typename T >\nstruct arity< protect<T>, N > \n    : arity<T,N>\n{ \n};\n} // namespace aux\n#endif\n\nBOOST_MPL_AUX_NA_SPEC_MAIN(1, protect)\n#if !defined(BOOST_MPL_CFG_NO_FULL_LAMBDA_SUPPORT)\nBOOST_MPL_AUX_NA_SPEC_TEMPLATE_ARITY(1, 1, protect)\n#endif\n\n}}\n\n#endif // BOOST_MPL_PROTECT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/push_back_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_PUSH_BACK_FWD_HPP_INCLUDED\n#define BOOST_MPL_PUSH_BACK_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct push_back_impl;\ntemplate< typename Sequence, typename T > struct push_back;\n\n}}\n\n#endif // BOOST_MPL_PUSH_BACK_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/push_front.hpp",
    "content": "\n#ifndef BOOST_MPL_PUSH_FRONT_HPP_INCLUDED\n#define BOOST_MPL_PUSH_FRONT_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/push_front_fwd.hpp>\n#include <boost/mpl/aux_/push_front_impl.hpp>\n#include <boost/mpl/sequence_tag.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/lambda_support.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(T)\n    >\nstruct push_front\n    : push_front_impl< typename sequence_tag<Sequence>::type >\n        ::template apply< Sequence,T >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(2,push_front,(Sequence,T))\n};\n\n\ntemplate< \n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct has_push_front\n    : has_push_front_impl< typename sequence_tag<Sequence>::type >\n        ::template apply< Sequence >\n{\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(1,has_push_front,(Sequence))\n};\n\nBOOST_MPL_AUX_NA_SPEC(2, push_front)\nBOOST_MPL_AUX_NA_SPEC(1, has_push_front)\n\n}}\n\n#endif // BOOST_MPL_PUSH_FRONT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/push_front_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_PUSH_FRONT_FWD_HPP_INCLUDED\n#define BOOST_MPL_PUSH_FRONT_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct push_front_impl;\ntemplate< typename Sequence, typename T > struct push_front;\n\n}}\n\n#endif // BOOST_MPL_PUSH_FRONT_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/quote.hpp",
    "content": "\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_MPL_QUOTE_HPP_INCLUDED\n#define BOOST_MPL_QUOTE_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2008\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#if !defined(BOOST_MPL_PREPROCESSING_MODE)\n#   include <boost/mpl/void.hpp>\n#   include <boost/mpl/aux_/has_type.hpp>\n#endif\n\n#include <boost/mpl/aux_/config/bcc.hpp>\n#include <boost/mpl/aux_/config/ttp.hpp>\n\n#if defined(BOOST_MPL_CFG_NO_TEMPLATE_TEMPLATE_PARAMETERS) \\\n    && !defined(BOOST_MPL_CFG_BCC590_WORKAROUNDS)\n#   define BOOST_MPL_CFG_NO_QUOTE_TEMPLATE\n#endif\n\n#if !defined(BOOST_MPL_CFG_NO_IMPLICIT_METAFUNCTIONS) \\\n    && defined(BOOST_MPL_CFG_NO_HAS_XXX)\n#   define BOOST_MPL_CFG_NO_IMPLICIT_METAFUNCTIONS\n#endif\n\n#include <boost/mpl/aux_/config/use_preprocessed.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \\\n && !defined(BOOST_MPL_PREPROCESSING_MODE)\n\n#   define BOOST_MPL_PREPROCESSED_HEADER quote.hpp\n#   include <boost/mpl/aux_/include_preprocessed.hpp>\n\n#else\n\n#   include <boost/mpl/limits/arity.hpp>\n#   include <boost/mpl/aux_/preprocessor/params.hpp>\n#   include <boost/mpl/aux_/config/ctps.hpp>\n#   include <boost/mpl/aux_/config/workaround.hpp>\n\n#   include <boost/preprocessor/iterate.hpp>\n#   include <boost/preprocessor/cat.hpp>\n\n#if !defined(BOOST_MPL_CFG_NO_QUOTE_TEMPLATE)\n\nnamespace boost { namespace mpl {\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate< typename T, bool has_type_ >\nstruct quote_impl\n// GCC has a problem with metafunction forwarding when T is a\n// specialization of a template called 'type'.\n# if BOOST_WORKAROUND(__GNUC__, BOOST_TESTED_AT(4)) \\\n    && BOOST_WORKAROUND(__GNUC_MINOR__, BOOST_TESTED_AT(0)) \\\n    && BOOST_WORKAROUND(__GNUC_PATCHLEVEL__, BOOST_TESTED_AT(2))\n{\n    typedef typename T::type type;\n};\n# else \n    : T\n{\n};\n# endif \n\ntemplate< typename T >\nstruct quote_impl<T,false>\n{\n    typedef T type;\n};\n\n#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\ntemplate< bool > struct quote_impl\n{\n    template< typename T > struct result_\n        : T\n    {\n    };\n};\n\ntemplate<> struct quote_impl<false>\n{\n    template< typename T > struct result_\n    {\n        typedef T type;\n    };\n};\n\n#endif \n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3,(1, BOOST_MPL_LIMIT_METAFUNCTION_ARITY, <boost/mpl/quote.hpp>))\n#include BOOST_PP_ITERATE()\n\n}}\n\n#endif // BOOST_MPL_CFG_NO_QUOTE_TEMPLATE\n\n#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS\n#endif // BOOST_MPL_QUOTE_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define i_ BOOST_PP_FRAME_ITERATION(1)\n\ntemplate<\n      template< BOOST_MPL_PP_PARAMS(i_, typename P) > class F\n    , typename Tag = void_\n    >\nstruct BOOST_PP_CAT(quote,i_)\n{\n    template< BOOST_MPL_PP_PARAMS(i_, typename U) > struct apply\n#if defined(BOOST_MPL_CFG_BCC590_WORKAROUNDS)\n    {\n        typedef typename quote_impl<\n              F< BOOST_MPL_PP_PARAMS(i_, U) >\n            , aux::has_type< F< BOOST_MPL_PP_PARAMS(i_, U) > >::value\n            >::type type;\n    };\n#elif !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n        : quote_impl<\n              F< BOOST_MPL_PP_PARAMS(i_, U) >\n            , aux::has_type< F< BOOST_MPL_PP_PARAMS(i_, U) > >::value\n            >\n    {\n    };\n#else\n        : quote_impl< aux::has_type< F< BOOST_MPL_PP_PARAMS(i_, U) > >::value >\n            ::template result_< F< BOOST_MPL_PP_PARAMS(i_, U) > >\n    {\n    };\n#endif\n};\n\n#undef i_\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/reverse_fold.hpp",
    "content": "\n#ifndef BOOST_MPL_REVERSE_FOLD_HPP_INCLUDED\n#define BOOST_MPL_REVERSE_FOLD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n// Copyright David Abrahams 2001-2002\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end.hpp>\n#include <boost/mpl/O1_size.hpp>\n#include <boost/mpl/arg.hpp>\n#include <boost/mpl/aux_/reverse_fold_impl.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    , typename BOOST_MPL_AUX_NA_PARAM(State)\n    , typename BOOST_MPL_AUX_NA_PARAM(BackwardOp)\n    , typename ForwardOp = arg<1>\n    >\nstruct reverse_fold\n{\n    typedef typename aux::reverse_fold_impl<\n          ::boost::mpl::O1_size<Sequence>::value\n        , typename begin<Sequence>::type\n        , typename end<Sequence>::type\n        , State\n        , BackwardOp\n        , ForwardOp\n        >::state type;\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,reverse_fold,(Sequence,State,BackwardOp))\n};\n\nBOOST_MPL_AUX_NA_SPEC(3, reverse_fold)\n\n}}\n\n#endif // BOOST_MPL_REVERSE_FOLD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/same_as.hpp",
    "content": "\n#ifndef BOOST_MPL_SAME_AS_HPP_INCLUDED\n#define BOOST_MPL_SAME_AS_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/not.hpp>\n#include <boost/mpl/aux_/lambda_spec.hpp>\n#include <boost/mpl/aux_/config/forwarding.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T1 >\nstruct same_as\n{\n    template< typename T2 > struct apply\n#if !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING)\n        : is_same<T1,T2>\n    {\n#else\n    {\n        typedef typename is_same<T1,T2>::type type;\n#endif\n    };\n};\n\ntemplate< typename T1 >\nstruct not_same_as\n{\n    template< typename T2 > struct apply\n#if !defined(BOOST_MPL_CFG_NO_NESTED_FORWARDING)\n        : not_< is_same<T1,T2> >\n    {\n#else\n    {\n        typedef typename not_< is_same<T1,T2> >::type type;\n#endif\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SAME_AS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/sequence_tag.hpp",
    "content": "\n#ifndef BOOST_MPL_SEQUENCE_TAG_HPP_INCLUDED\n#define BOOST_MPL_SEQUENCE_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/sequence_tag_fwd.hpp>\n#include <boost/mpl/aux_/has_tag.hpp>\n#include <boost/mpl/aux_/has_begin.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/is_msvc_eti_arg.hpp>\n#include <boost/mpl/aux_/config/eti.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nnamespace boost { namespace mpl {\n\n// agurt, 27/nov/02: have to use a simplistic 'sequence_tag' implementation\n// on MSVC to avoid dreadful \"internal structure overflow\" error\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300) \\\n    || defined(BOOST_MPL_CFG_NO_HAS_XXX)\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct sequence_tag\n{\n    typedef typename Sequence::tag type;\n};\n\n#elif BOOST_WORKAROUND(BOOST_MSVC, == 1300)\n\n// agurt, 07/feb/03: workaround for what seems to be MSVC 7.0-specific ETI issue\n\nnamespace aux {\n\ntemplate< bool >\nstruct sequence_tag_impl\n{\n    template< typename Sequence > struct result_\n    {\n        typedef typename Sequence::tag type;\n    };\n};\n\ntemplate<>\nstruct sequence_tag_impl<false>\n{\n    template< typename Sequence > struct result_\n    {\n        typedef int type;\n    };\n};\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct sequence_tag\n    : aux::sequence_tag_impl< !aux::is_msvc_eti_arg<Sequence>::value >\n        ::template result_<Sequence>\n{\n};\n\n#else\n\nnamespace aux {\n\ntemplate< bool has_tag_, bool has_begin_ >\nstruct sequence_tag_impl\n{\n    // agurt 24/nov/02: MSVC 6.5 gets confused in 'sequence_tag_impl<true>' \n    // specialization below, if we name it 'result_' here\n    template< typename Sequence > struct result2_;\n};\n\n#   define AUX_CLASS_SEQUENCE_TAG_SPEC(has_tag, has_begin, result_type) \\\ntemplate<> struct sequence_tag_impl<has_tag,has_begin> \\\n{ \\\n    template< typename Sequence > struct result2_ \\\n    { \\\n        typedef result_type type; \\\n    }; \\\n}; \\\n/**/\n\nAUX_CLASS_SEQUENCE_TAG_SPEC(true, true, typename Sequence::tag)\nAUX_CLASS_SEQUENCE_TAG_SPEC(true, false, typename Sequence::tag)\nAUX_CLASS_SEQUENCE_TAG_SPEC(false, true, nested_begin_end_tag)\nAUX_CLASS_SEQUENCE_TAG_SPEC(false, false, non_sequence_tag)\n\n#   undef AUX_CLASS_SEQUENCE_TAG_SPEC\n\n} // namespace aux\n\ntemplate<\n      typename BOOST_MPL_AUX_NA_PARAM(Sequence)\n    >\nstruct sequence_tag\n    : aux::sequence_tag_impl<\n          ::boost::mpl::aux::has_tag<Sequence>::value\n        , ::boost::mpl::aux::has_begin<Sequence>::value\n        >::template result2_<Sequence>\n{\n};\n\n#endif // BOOST_MSVC\n\nBOOST_MPL_AUX_NA_SPEC(1, sequence_tag)\n\n}}\n\n#endif // BOOST_MPL_SEQUENCE_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/sequence_tag_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_SEQUENCE_TAG_FWD_HPP_INCLUDED\n#define BOOST_MPL_SEQUENCE_TAG_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\nstruct nested_begin_end_tag;\nstruct non_sequence_tag;\n\ntemplate< typename Sequence > struct sequence_tag;\n\n}}\n\n#endif // BOOST_MPL_SEQUENCE_TAG_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/at_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_AT_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_AT_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/at_fwd.hpp>\n#include <boost/mpl/set/aux_/has_key_impl.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/void.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct at_impl< aux::set_tag >\n{\n    template< typename Set, typename T > struct apply\n    {\n        typedef typename if_< \n              has_key_impl<aux::set_tag>::apply<Set,T>\n            , T\n            , void_\n            >::type type;            \n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_AT_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/begin_end_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2007\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/begin_end_fwd.hpp>\n#include <boost/mpl/set/aux_/iterator.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct begin_impl< aux::set_tag >\n{\n    template< typename Set > struct apply\n        : s_iter_get<Set,typename Set::item_>\n    {\n    };\n};\n\ntemplate<>\nstruct end_impl< aux::set_tag >\n{\n    template< typename Set > struct apply\n    {\n        typedef s_iter< Set,set0<> > type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_BEGIN_END_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/clear_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_CLEAR_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_CLEAR_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/clear_fwd.hpp>\n#include <boost/mpl/set/aux_/set0.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct clear_impl< aux::set_tag >\n{\n    template< typename Set > struct apply\n    {\n        typedef set0<> type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_CLEAR_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/empty_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_EMPTY_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_EMPTY_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/empty_fwd.hpp>\n#include <boost/mpl/not.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct empty_impl< aux::set_tag >\n{\n    template< typename Set > struct apply\n        : not_< typename Set::size >\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_EMPTY_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/erase_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_ERASE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_ERASE_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/erase_fwd.hpp>\n#include <boost/mpl/set/aux_/erase_key_impl.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct erase_impl< aux::set_tag >\n{\n    template< \n          typename Set\n        , typename Pos\n        , typename unused_\n        > \n    struct apply\n        : erase_key_impl<aux::set_tag>\n            ::apply<Set,typename Pos::type>\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_ERASE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/erase_key_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_ERASE_KEY_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_ERASE_KEY_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2007\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/erase_key_fwd.hpp>\n#include <boost/mpl/set/aux_/has_key_impl.hpp>\n#include <boost/mpl/set/aux_/item.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/base.hpp>\n#include <boost/mpl/eval_if.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct erase_key_impl< aux::set_tag >\n{\n    template< \n          typename Set\n        , typename T\n        > \n    struct apply\n        : eval_if< \n              has_key_impl<aux::set_tag>::apply<Set,T>\n            , eval_if< \n                  is_same< T,typename Set::item_type_ > \n                , base<Set>\n                , identity< s_mask<T,typename Set::item_> >\n                >\n            , identity<Set>\n            >\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_ERASE_KEY_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/has_key_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/has_key_fwd.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/overload_names.hpp>\n#include <boost/mpl/aux_/static_cast.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/type_wrapper.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n#include <boost/mpl/aux_/config/static_constant.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct has_key_impl< aux::set_tag >\n{\n    template< typename Set, typename T > struct apply\n#if BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1400)) \\\n    || BOOST_WORKAROUND(__EDG_VERSION__, <= 245)\n    {\n        BOOST_STATIC_CONSTANT(bool, value = \n              ( sizeof( BOOST_MPL_AUX_OVERLOAD_CALL_IS_MASKED(\n                    Set\n                  , BOOST_MPL_AUX_STATIC_CAST(aux::type_wrapper<T>*, 0)\n                  ) ) == sizeof(aux::no_tag) )\n            );\n\n        typedef bool_<value> type;\n\n#else // ISO98 C++\n        : bool_< \n              ( sizeof( BOOST_MPL_AUX_OVERLOAD_CALL_IS_MASKED(\n                    Set\n                  , BOOST_MPL_AUX_STATIC_CAST(aux::type_wrapper<T>*, 0)\n                  ) ) == sizeof(aux::no_tag) )\n            >\n    {\n#endif\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_HAS_KEY_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/insert_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2007\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/insert_fwd.hpp>\n#include <boost/mpl/set/aux_/has_key_impl.hpp>\n#include <boost/mpl/set/aux_/item.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/base.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/aux_/na.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace mpl {\n\nnamespace aux {\ntemplate<  typename Set, typename T > struct set_insert_impl\n    : eval_if< \n          has_key_impl<aux::set_tag>::apply<Set,T>\n        , identity<Set>\n        , eval_if< \n              is_same< T,typename Set::last_masked_ > \n            , base<Set>\n            , identity< s_item<T,typename Set::item_> >\n            >\n        >\n{\n};\n}\n\ntemplate<>\nstruct insert_impl< aux::set_tag >\n{\n    template< \n          typename Set\n        , typename PosOrKey\n        , typename KeyOrNA\n        > \n    struct apply\n        : aux::set_insert_impl<\n              Set\n            , typename if_na<KeyOrNA,PosOrKey>::type\n            >\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/insert_range_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_INSERT_RANGE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_INSERT_RANGE_IMPL_HPP_INCLUDED\n\n// Copyright Bruno Dutra 2015\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/insert_range_fwd.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/placeholders.hpp>\n#include <boost/mpl/fold.hpp>\n#include <boost/mpl/insert.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct insert_range_impl< aux::set_tag >\n{\n    template<\n          typename Sequence\n        , typename /*Pos*/\n        , typename Range\n        >\n    struct apply\n        : fold<Range, Sequence, insert<_1, _2> >\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_INSERT_RANGE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/item.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_ITEM_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_ITEM_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2007\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/long.hpp>\n#include <boost/mpl/void.hpp>\n#include <boost/mpl/next.hpp>\n#include <boost/mpl/prior.hpp>\n#include <boost/mpl/set/aux_/set0.hpp>\n#include <boost/mpl/aux_/type_wrapper.hpp>\n#include <boost/mpl/aux_/config/arrays.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T, typename Base >\nstruct s_item\n    : Base\n{\n    typedef s_item<T,Base> item_;\n    typedef void_       last_masked_;\n    typedef T           item_type_;\n    typedef typename Base::item_ base;\n    typedef s_item type;\n    \n    typedef typename next< typename Base::size >::type  size;\n    typedef typename next< typename Base::order >::type order;\n\n#if defined(BOOST_MPL_CFG_NO_DEPENDENT_ARRAY_TYPES)\n    typedef typename aux::weighted_tag<BOOST_MPL_AUX_MSVC_VALUE_WKND(order)::value>::type order_tag_;\n#else\n    typedef char (&order_tag_)[BOOST_MPL_AUX_MSVC_VALUE_WKND(order)::value];\n#endif\n\n    BOOST_MPL_AUX_SET_OVERLOAD( order_tag_, ORDER_BY_KEY, s_item, aux::type_wrapper<T>* );\n    BOOST_MPL_AUX_SET_OVERLOAD( aux::no_tag, IS_MASKED, s_item, aux::type_wrapper<T>* );\n};\n\n\ntemplate< typename T, typename Base >\nstruct s_mask\n    : Base\n{\n    typedef s_mask<T,Base> item_;\n    typedef T       last_masked_;\n    typedef void_   item_type_;\n    typedef typename Base::item_ base;\n    typedef typename prior< typename Base::size >::type  size;\n    typedef s_mask type;\n\n    BOOST_MPL_AUX_SET_OVERLOAD( aux::yes_tag, IS_MASKED, s_mask, aux::type_wrapper<T>* );\n};\n\n\ntemplate< typename T, typename Base >\nstruct s_unmask\n    : Base\n{\n    typedef s_unmask<T,Base> item_;\n    typedef void_   last_masked_;\n    typedef T       item_type_;\n    typedef typename Base::item_ base;\n    typedef typename next< typename Base::size >::type  size;\n\n    BOOST_MPL_AUX_SET_OVERLOAD( aux::no_tag, IS_MASKED, s_unmask, aux::type_wrapper<T>* );\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_ITEM_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/iterator.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_ITERATOR_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_ITERATOR_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2007\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/set/aux_/set0.hpp>\n#include <boost/mpl/has_key.hpp>\n#include <boost/mpl/iterator_tags.hpp>\n#include <boost/mpl/next.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/aux_/config/ctps.hpp>\n\nnamespace boost { namespace mpl {\n\n// used by 's_iter_get'\ntemplate< typename Set, typename Tail > struct s_iter;\n\ntemplate< typename Set, typename Tail > struct s_iter_get\n    : eval_if< \n          has_key< Set,typename Tail::item_type_ >\n        , identity< s_iter<Set,Tail> >\n        , next< s_iter<Set,Tail> >\n        >\n{\n};\n\ntemplate< typename Set, typename Tail > struct s_iter_impl\n{\n    typedef Tail                        tail_;\n    typedef forward_iterator_tag        category;\n    typedef typename Tail::item_type_   type;\n\n#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n    typedef typename s_iter_get< Set,typename Tail::base >::type next;\n#endif\n};\n\n#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)\n\ntemplate< typename Set, typename Tail > \nstruct next< s_iter<Set,Tail> >\n    : s_iter_get< Set,typename Tail::base >\n{\n};\n\ntemplate< typename Set > \nstruct next< s_iter<Set,set0<> > >\n{\n    typedef s_iter<Set,set0<> > type;\n};\n\ntemplate< typename Set, typename Tail > struct s_iter\n    : s_iter_impl<Set,Tail>\n{\n};\n\ntemplate< typename Set > struct s_iter<Set, set0<> >\n{\n    typedef forward_iterator_tag category;\n};\n\n#else\n\ntemplate< typename Set >\nstruct s_end_iter\n{\n    typedef forward_iterator_tag    category;\n    typedef s_iter<Set,set0<> >     next;\n};\n\ntemplate< typename Set, typename Tail > struct s_iter\n    : if_< \n          is_same< Tail,set0<> >\n        , s_end_iter<Set>\n        , s_iter_impl<Set,Tail>\n        >::type\n{\n};\n\n#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_ITERATOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/key_type_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_KEY_TYPE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_KEY_TYPE_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/key_type_fwd.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct key_type_impl< aux::set_tag >\n{\n    template< typename Set, typename T > struct apply\n    {\n        typedef T type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_KEY_TYPE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/set0.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_SET0_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_SET0_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/long.hpp>\n#include <boost/mpl/void.hpp>\n#include <boost/mpl/aux_/na.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n#include <boost/mpl/aux_/yes_no.hpp>\n#include <boost/mpl/aux_/overload_names.hpp>\n#include <boost/mpl/aux_/config/operators.hpp>\n\n#include <boost/preprocessor/cat.hpp>\n\nnamespace boost { namespace mpl {\n\n#if defined(BOOST_MPL_CFG_USE_OPERATORS_OVERLOADING)\n\n#   define BOOST_MPL_AUX_SET0_OVERLOAD(R, f, X, T) \\\n    friend R BOOST_PP_CAT(BOOST_MPL_AUX_OVERLOAD_,f)(X const&, T) \\\n/**/\n\n#   define BOOST_MPL_AUX_SET_OVERLOAD(R, f, X, T) \\\n    BOOST_MPL_AUX_SET0_OVERLOAD(R, f, X, T) \\\n/**/\n\n#else\n\n#   define BOOST_MPL_AUX_SET0_OVERLOAD(R, f, X, T) \\\n    static R BOOST_PP_CAT(BOOST_MPL_AUX_OVERLOAD_,f)(X const&, T) \\\n/**/\n\n#   define BOOST_MPL_AUX_SET_OVERLOAD(R, f, X, T) \\\n    BOOST_MPL_AUX_SET0_OVERLOAD(R, f, X, T); \\\n    using Base::BOOST_PP_CAT(BOOST_MPL_AUX_OVERLOAD_,f) \\\n/**/\n\n#endif\n\ntemplate< typename Dummy = na > struct set0\n{\n    typedef set0<>          item_;\n    typedef item_           type;\n    typedef aux::set_tag    tag;\n    typedef void_           last_masked_;\n    typedef void_           item_type_;\n    typedef long_<0>        size;\n    typedef long_<1>        order;\n\n    BOOST_MPL_AUX_SET0_OVERLOAD( aux::no_tag, ORDER_BY_KEY, set0<>, void const volatile* );\n    BOOST_MPL_AUX_SET0_OVERLOAD( aux::yes_tag, IS_MASKED, set0<>, void const volatile* );\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_SET0_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/size_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_SIZE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_SIZE_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/size_fwd.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct size_impl< aux::set_tag >\n{\n    template< typename Set > struct apply\n        : Set::size\n    {\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_SIZE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/tag.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_TAG_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_TAG_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl { namespace aux {\n\nstruct set_tag;\n\n}}}\n\n#endif // BOOST_MPL_SET_AUX_TAG_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/aux_/value_type_impl.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_AUX_VALUE_TYPE_IMPL_HPP_INCLUDED\n#define BOOST_MPL_SET_AUX_VALUE_TYPE_IMPL_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/value_type_fwd.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\nnamespace boost { namespace mpl {\n\ntemplate<>\nstruct value_type_impl< aux::set_tag >\n{\n    template< typename Set, typename T > struct apply\n    {\n        typedef T type;\n    };\n};\n\n}}\n\n#endif // BOOST_MPL_SET_AUX_VALUE_TYPE_IMPL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/set/set0.hpp",
    "content": "\n#ifndef BOOST_MPL_SET_SET0_HPP_INCLUDED\n#define BOOST_MPL_SET_SET0_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/set/aux_/at_impl.hpp>\n#include <boost/mpl/set/aux_/clear_impl.hpp>\n//#include <boost/mpl/set/aux_/O1_size.hpp>\n#include <boost/mpl/set/aux_/size_impl.hpp>\n#include <boost/mpl/set/aux_/empty_impl.hpp>\n#include <boost/mpl/set/aux_/insert_impl.hpp>\n#include <boost/mpl/set/aux_/insert_range_impl.hpp>\n#include <boost/mpl/set/aux_/erase_impl.hpp>\n#include <boost/mpl/set/aux_/erase_key_impl.hpp>\n#include <boost/mpl/set/aux_/has_key_impl.hpp>\n#include <boost/mpl/set/aux_/key_type_impl.hpp>\n#include <boost/mpl/set/aux_/value_type_impl.hpp>\n#include <boost/mpl/set/aux_/begin_end_impl.hpp>\n#include <boost/mpl/set/aux_/iterator.hpp>\n#include <boost/mpl/set/aux_/item.hpp>\n#include <boost/mpl/set/aux_/set0.hpp>\n#include <boost/mpl/set/aux_/tag.hpp>\n\n#endif // BOOST_MPL_SET_SET0_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/size_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_SIZE_FWD_HPP_INCLUDED\n#define BOOST_MPL_SIZE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct size_impl;\ntemplate< typename Sequence > struct size;\n\n}}\n\n#endif // BOOST_MPL_SIZE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/size_t.hpp",
    "content": "\n#ifndef BOOST_MPL_SIZE_T_HPP_INCLUDED\n#define BOOST_MPL_SIZE_T_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/size_t_fwd.hpp>\n\n#define AUX_WRAPPER_VALUE_TYPE std::size_t\n#define AUX_WRAPPER_NAME size_t\n#define AUX_WRAPPER_PARAMS(N) std::size_t N\n\n#include <boost/mpl/aux_/integral_wrapper.hpp>\n\n#endif // BOOST_MPL_SIZE_T_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/size_t_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_SIZE_T_FWD_HPP_INCLUDED\n#define BOOST_MPL_SIZE_T_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2000-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n#include <boost/config.hpp> // make sure 'size_t' is placed into 'std'\n#include <cstddef>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\ntemplate< std::size_t N > struct size_t;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(size_t)\n\n#endif // BOOST_MPL_SIZE_T_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/value_type_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_VALUE_TYPE_FWD_HPP_INCLUDED\n#define BOOST_MPL_VALUE_TYPE_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2003-2004\n// Copyright David Abrahams 2003-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\nnamespace boost { namespace mpl {\n\ntemplate< typename Tag > struct value_type_impl;\ntemplate< typename AssociativeSequence, typename T > struct value_type;\n\n}}\n\n#endif // BOOST_MPL_VALUE_TYPE_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/void.hpp",
    "content": "\n#ifndef BOOST_MPL_VOID_HPP_INCLUDED\n#define BOOST_MPL_VOID_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/void_fwd.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/aux_/na_spec.hpp>\n#include <boost/mpl/aux_/config/msvc.hpp>\n#include <boost/mpl/aux_/config/workaround.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\n//  [JDG Feb-4-2003] made void_ a complete type to allow it to be\n//  instantiated so that it can be passed in as an object that can be\n//  used to select an overloaded function. Possible use includes signaling\n//  a zero arity functor evaluation call.\nstruct void_ { typedef void_ type; };\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\n\nnamespace boost { namespace mpl {\n\ntemplate< typename T >\nstruct is_void_\n    : false_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using false_::value;\n#endif\n};\n\ntemplate<>\nstruct is_void_<void_>\n    : true_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using true_::value;\n#endif\n};\n\ntemplate< typename T >\nstruct is_not_void_\n    : true_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using true_::value;\n#endif\n};\n\ntemplate<>\nstruct is_not_void_<void_>\n    : false_\n{\n#if BOOST_WORKAROUND(BOOST_MSVC, < 1300)\n    using false_::value;\n#endif\n};\n\nBOOST_MPL_AUX_NA_SPEC(1, is_void_)\nBOOST_MPL_AUX_NA_SPEC(1, is_not_void_)\n\n}}\n\n#endif // BOOST_MPL_VOID_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/mpl/void_fwd.hpp",
    "content": "\n#ifndef BOOST_MPL_VOID_FWD_HPP_INCLUDED\n#define BOOST_MPL_VOID_FWD_HPP_INCLUDED\n\n// Copyright Aleksey Gurtovoy 2001-2004\n//\n// Distributed under the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/mpl for documentation.\n\n// $Id$\n// $Date$\n// $Revision$\n\n#include <boost/mpl/aux_/adl_barrier.hpp>\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN\n\nstruct void_;\n\nBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE\nBOOST_MPL_AUX_ADL_BARRIER_DECL(void_)\n\n#endif // BOOST_MPL_VOID_FWD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/noncopyable.hpp",
    "content": "/*\n * Copyright (c) 2014 Glen Fernandes\n *\n * Distributed under the Boost Software License, Version 1.0. (See\n * accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n */\n\n#ifndef BOOST_NONCOPYABLE_HPP\n#define BOOST_NONCOPYABLE_HPP\n\n// The header file at this path is deprecated;\n// use boost/core/noncopyable.hpp instead.\n\n#include <boost/core/noncopyable.hpp>\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/arg_list.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef ARG_LIST_050329_HPP\n#define ARG_LIST_050329_HPP\n\n#include <boost/parameter/aux_/void.hpp>\n#include <boost/parameter/aux_/result_of0.hpp>\n#include <boost/parameter/aux_/default.hpp>\n#include <boost/parameter/aux_/parameter_requirements.hpp>\n#include <boost/parameter/aux_/yesno.hpp>\n#include <boost/parameter/aux_/is_maybe.hpp>\n#include <boost/parameter/config.hpp>\n\n#include <boost/mpl/apply.hpp>\n#include <boost/mpl/assert.hpp>\n#include <boost/mpl/begin.hpp>\n#include <boost/mpl/end.hpp>\n#include <boost/mpl/iterator_tags.hpp>\n\n#include <boost/type_traits/add_reference.hpp>\n#include <boost/type_traits/is_same.hpp>\n#include <boost/preprocessor/repetition/enum_params.hpp>\n#include <boost/preprocessor/repetition/enum_binary_params.hpp>\n#include <boost/preprocessor/facilities/intercept.hpp>\n\nnamespace boost { namespace parameter {\n\n// Forward declaration for aux::arg_list, below.\ntemplate<class T> struct keyword;\n\nnamespace aux {\n\n// Tag type passed to MPL lambda.\nstruct lambda_tag;\n\n//\n// Structures used to build the tuple of actual arguments.  The\n// tuple is a nested cons-style list of arg_list specializations\n// terminated by an empty_arg_list.\n//\n// Each specialization of arg_list is derived from its successor in\n// the list type.  This feature is used along with using\n// declarations to build member function overload sets that can\n// match against keywords.\n//\n\n// MPL sequence support\nstruct arg_list_tag;\n\n// Terminates arg_list<> and represents an empty list.  Since this\n// is just the terminating case you might want to look at arg_list\n// first, to get a feel for what's really happening here.\n\nstruct empty_arg_list\n{\n    empty_arg_list() {}\n\n    // Constructor taking BOOST_PARAMETER_MAX_ARITY empty_arg_list\n    // arguments; this makes initialization\n    empty_arg_list(\n        BOOST_PP_ENUM_PARAMS(\n            BOOST_PARAMETER_MAX_ARITY, void_ BOOST_PP_INTERCEPT\n        ))\n    {}\n\n    // A metafunction class that, given a keyword and a default\n    // type, returns the appropriate result type for a keyword\n    // lookup given that default\n    struct binding\n    {\n        template<class KW, class Default, class Reference>\n        struct apply\n        {\n            typedef Default type;\n        };\n    };\n\n    // Terminator for has_key, indicating that the keyword is unique\n    template <class KW>\n    static no_tag has_key(KW*);\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n\n    // The overload set technique doesn't work with these older\n    // compilers, so they need some explicit handholding.\n\n    // A metafunction class that, given a keyword, returns the type\n    // of the base sublist whose get() function can produce the\n    // value for that key\n    struct key_owner\n    {\n        template<class KW>\n        struct apply\n        {\n            typedef empty_arg_list type;\n        };\n    };\n\n    template <class K, class T>\n    T& get(default_<K,T> x) const\n    {\n        return x.value;\n    }\n\n    template <class K, class F>\n    typename result_of0<F>::type\n    get(lazy_default<K,F> x) const\n    {\n        return x.compute_default();\n    }\n#endif\n\n    // If this function is called, it means there is no argument\n    // in the list that matches the supplied keyword. Just return\n    // the default value.\n    template <class K, class Default>\n    Default& operator[](default_<K, Default> x) const\n    {\n        return x.value;\n    }\n\n    // If this function is called, it means there is no argument\n    // in the list that matches the supplied keyword. Just evaluate\n    // and return the default value.\n    template <class K, class F>\n    typename result_of0<F>::type\n    operator[](\n        BOOST_PARAMETER_lazy_default_fallback<K,F> x) const\n    {\n        return x.compute_default();\n    }\n\n    // No argument corresponding to ParameterRequirements::key_type\n    // was found if we match this overload, so unless that parameter\n    // has a default, we indicate that the actual arguments don't\n    // match the function's requirements.\n    template <class ParameterRequirements, class ArgPack>\n    static typename ParameterRequirements::has_default\n    satisfies(ParameterRequirements*, ArgPack*);\n\n    // MPL sequence support\n    typedef empty_arg_list type;   // convenience\n    typedef arg_list_tag tag; // For dispatching to sequence intrinsics\n};\n\n// Forward declaration for arg_list::operator,\ntemplate <class KW, class T>\nstruct tagged_argument;\n\ntemplate <class T>\nstruct get_reference\n{\n    typedef typename T::reference type;\n};\n\n// A tuple of tagged arguments, terminated with empty_arg_list.\n// Every TaggedArg is an instance of tagged_argument<>.\ntemplate <class TaggedArg, class Next = empty_arg_list>\nstruct arg_list : Next\n{\n    typedef arg_list<TaggedArg,Next> self;\n    typedef typename TaggedArg::key_type key_type;\n\n    typedef typename is_maybe<typename TaggedArg::value_type>::type holds_maybe;\n\n    typedef typename mpl::eval_if<\n        holds_maybe\n      , get_reference<typename TaggedArg::value_type>\n      , get_reference<TaggedArg>\n    >::type reference;\n\n    typedef typename mpl::if_<\n        holds_maybe\n      , reference\n      , typename TaggedArg::value_type\n    >::type value_type;\n\n    TaggedArg arg;      // Stores the argument\n\n    // Store the arguments in successive nodes of this list\n    template< // class A0, class A1, ...\n        BOOST_PP_ENUM_PARAMS(BOOST_PARAMETER_MAX_ARITY, class A)\n    >\n    arg_list( // A0& a0, A1& a1, ...\n        BOOST_PP_ENUM_BINARY_PARAMS(BOOST_PARAMETER_MAX_ARITY, A, & a)\n    )\n      : Next( // a1, a2, ...\n            BOOST_PP_ENUM_SHIFTED_PARAMS(BOOST_PARAMETER_MAX_ARITY, a)\n          , void_reference()\n        )\n      , arg(a0)\n    {}\n\n    // Create a new list by prepending arg to a copy of tail.  Used\n    // when incrementally building this structure with the comma\n    // operator.\n    arg_list(TaggedArg head, Next const& tail)\n      : Next(tail)\n      , arg(head)\n    {}\n\n    // A metafunction class that, given a keyword and a default\n    // type, returns the appropriate result type for a keyword\n    // lookup given that default\n    struct binding\n    {\n        template <class KW, class Default, class Reference>\n        struct apply\n        {\n          typedef typename mpl::eval_if<\n                boost::is_same<KW, key_type>\n              , mpl::if_<Reference, reference, value_type>\n              , mpl::apply_wrap3<typename Next::binding, KW, Default, Reference>\n          >::type type;\n        };\n    };\n\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    // Overload for key_type, so the assert below will fire if the\n    // same keyword is used again\n    static yes_tag has_key(key_type*);\n    using Next::has_key;\n\n    BOOST_MPL_ASSERT_MSG(\n        sizeof(Next::has_key((key_type*)0)) == sizeof(no_tag)\n      , duplicate_keyword, (key_type)\n    );\n\n#endif\n    //\n    // Begin implementation of indexing operators for looking up\n    // specific arguments by name\n    //\n\n    // Helpers that handle the case when TaggedArg is\n    // empty<T>.\n    template <class D>\n    reference get_default(D const&, mpl::false_) const\n    {\n        return arg.value;\n    }\n\n    template <class D>\n    reference get_default(D const& d, mpl::true_) const\n    {\n        return arg.value ? arg.value.get() : arg.value.construct(d.value);\n    }\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    // These older compilers don't support the overload set creation\n    // idiom well, so we need to do all the return type calculation\n    // for the compiler and dispatch through an outer function template\n\n    // A metafunction class that, given a keyword, returns the base\n    // sublist whose get() function can produce the value for that\n    // key.\n    struct key_owner\n    {\n        template<class KW>\n        struct apply\n        {\n          typedef typename mpl::eval_if<\n                boost::is_same<KW, key_type>\n              , mpl::identity<arg_list<TaggedArg,Next> >\n              , mpl::apply_wrap1<typename Next::key_owner,KW>\n          >::type type;\n        };\n    };\n\n    // Outer indexing operators that dispatch to the right node's\n    // get() function.\n    template <class KW>\n    typename mpl::apply_wrap3<binding, KW, void_, mpl::true_>::type\n    operator[](keyword<KW> const& x) const\n    {\n        typename mpl::apply_wrap1<key_owner, KW>::type const& sublist = *this;\n        return sublist.get(x);\n    }\n\n    template <class KW, class Default>\n    typename mpl::apply_wrap3<binding, KW, Default&, mpl::true_>::type\n    operator[](default_<KW, Default> x) const\n    {\n        typename mpl::apply_wrap1<key_owner, KW>::type const& sublist = *this;\n        return sublist.get(x);\n    }\n\n    template <class KW, class F>\n    typename mpl::apply_wrap3<\n        binding,KW\n      , typename result_of0<F>::type\n      , mpl::true_\n    >::type\n    operator[](lazy_default<KW,F> x) const\n    {\n        typename mpl::apply_wrap1<key_owner, KW>::type const& sublist = *this;\n        return sublist.get(x);\n    }\n\n    // These just return the stored value; when empty_arg_list is\n    // reached, indicating no matching argument was passed, the\n    // default is returned, or if no default_ or lazy_default was\n    // passed, compilation fails.\n    reference get(keyword<key_type> const&) const\n    {\n        BOOST_MPL_ASSERT_NOT((holds_maybe));\n        return arg.value;\n    }\n\n    template <class Default>\n    reference get(default_<key_type,Default> const& d) const\n    {\n        return get_default(d, holds_maybe());\n    }\n\n    template <class Default>\n    reference get(lazy_default<key_type, Default>) const\n    {\n        return arg.value;\n    }\n\n#else\n\n    reference operator[](keyword<key_type> const&) const\n    {\n        BOOST_MPL_ASSERT_NOT((holds_maybe));\n        return arg.value;\n    }\n\n    template <class Default>\n    reference operator[](default_<key_type, Default> const& d) const\n    {\n        return get_default(d, holds_maybe());\n    }\n\n    template <class Default>\n    reference operator[](lazy_default<key_type, Default>) const\n    {\n        return arg.value;\n    }\n\n    // Builds an overload set including operator[]s defined in base\n    // classes.\n    using Next::operator[];\n\n    //\n    // End of indexing support\n    //\n\n\n    //\n    // For parameter_requirements matching this node's key_type,\n    // return a bool constant wrapper indicating whether the\n    // requirements are satisfied by TaggedArg.  Used only for\n    // compile-time computation and never really called, so a\n    // declaration is enough.\n    //\n    template <class HasDefault, class Predicate, class ArgPack>\n    static typename mpl::apply_wrap2<\n        typename mpl::lambda<Predicate, lambda_tag>::type\n      , value_type, ArgPack\n    >::type\n    satisfies(\n        parameter_requirements<key_type,Predicate,HasDefault>*\n      , ArgPack*\n    );\n\n    // Builds an overload set including satisfies functions defined\n    // in base classes.\n    using Next::satisfies;\n#endif\n\n    // Comma operator to compose argument list without using parameters<>.\n    // Useful for argument lists with undetermined length.\n    template <class KW, class T2>\n    arg_list<tagged_argument<KW, T2>, self>\n    operator,(tagged_argument<KW,T2> x) const\n    {\n        return arg_list<tagged_argument<KW,T2>, self>(x, *this);\n    }\n\n    // MPL sequence support\n    typedef self type;             // Convenience for users\n    typedef Next tail_type;        // For the benefit of iterators\n    typedef arg_list_tag tag; // For dispatching to sequence intrinsics\n};\n\n// MPL sequence support\ntemplate <class ArgumentPack>\nstruct arg_list_iterator\n{\n    typedef mpl::forward_iterator_tag category;\n\n    // The incremented iterator\n    typedef arg_list_iterator<typename ArgumentPack::tail_type> next;\n\n    // dereferencing yields the key type\n    typedef typename ArgumentPack::key_type type;\n};\n\ntemplate <>\nstruct arg_list_iterator<empty_arg_list> {};\n\n}} // namespace parameter::aux\n\n// MPL sequence support\nnamespace mpl\n{\n  template <>\n  struct begin_impl<parameter::aux::arg_list_tag>\n  {\n      template <class S>\n      struct apply\n      {\n          typedef parameter::aux::arg_list_iterator<S> type;\n      };\n  };\n\n  template <>\n  struct end_impl<parameter::aux::arg_list_tag>\n  {\n      template <class>\n      struct apply\n      {\n          typedef parameter::aux::arg_list_iterator<parameter::aux::empty_arg_list> type;\n      };\n  };\n}\n\n} // namespace boost\n\n#endif // ARG_LIST_050329_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/cast.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_CAST_060902_HPP\n# define BOOST_PARAMETER_CAST_060902_HPP\n\n# include <boost/detail/workaround.hpp>\n\n# if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n#  include <boost/type_traits/add_reference.hpp>\n#  include <boost/type_traits/remove_const.hpp>\n# endif\n\nnamespace boost { namespace parameter { namespace aux {\n\nstruct use_default_tag {};\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n\n#  define BOOST_PARAMETER_FUNCTION_CAST(value, predicate) value\n\n# else\n\n// Handles possible implicit casts. Used by preprocessor.hpp to\n// normalize user input.\n//\n// cast<void*>::execute() is identity\n// cast<void*(X)>::execute() is identity\n// cast<void(X)>::execute() casts to X\n//\n// preprocessor.hpp uses this like this:\n//\n//   #define X(value, predicate)\n//      cast<void predicate>::execute(value)\n//\n//   X(something, *)\n//   X(something, *(predicate))\n//   X(something, (int))\n\ntemplate <class T, class Args>\nstruct cast;\n\ntemplate <class Args>\nstruct cast<void*, Args>\n{\n    static use_default_tag execute(use_default_tag)\n    {\n        return use_default_tag();\n    }\n\n    static use_default_tag remove_const(use_default_tag)\n    {\n        return use_default_tag();\n    }\n\n    template <class U>\n    static U& execute(U& value)\n    {\n        return value;\n    }\n\n    template <class U>\n    static U& remove_const(U& x)\n    {\n        return x;\n    }\n};\n\n#if BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x580))\n\ntypedef void* voidstar;\n\ntemplate <class T, class Args>\nstruct cast<voidstar(T), Args>\n  : cast<void*, Args>\n{\n};\n\n#else\n\ntemplate <class T, class Args>\nstruct cast<void*(T), Args>\n  : cast<void*, Args>\n{\n};\n\n#endif\n\n// This is a hack used in cast<> to turn the user supplied type,\n// which may or may not be a placeholder expression into one, so\n// that it will be properly evaluated by mpl::apply.\ntemplate <class T, class Dummy = mpl::_1>\nstruct as_placeholder_expr\n{\n    typedef T type;\n};\n\ntemplate <class T, class Args>\nstruct cast<void(T), Args>\n{\n    typedef typename mpl::apply2<\n        as_placeholder_expr<T>, Args, Args>::type type0;\n\n    typedef typename boost::add_reference<\n        typename boost::remove_const<type0>::type \n    >::type reference;\n\n    static use_default_tag execute(use_default_tag)\n    {\n        return use_default_tag();\n    }\n\n    static use_default_tag remove_const(use_default_tag)\n    {\n        return use_default_tag();\n    }\n\n    static type0 execute(type0 value)\n    {\n        return value;\n    }\n\n    template <class U>\n    static reference remove_const(U const& x)\n    {\n        return const_cast<reference>(x);\n    }\n};\n\n#  define BOOST_PARAMETER_FUNCTION_CAST(value, predicate, args) \\\n    boost::parameter::aux::cast<void predicate, args>::remove_const( \\\n        boost::parameter::aux::cast<void predicate, args>::execute(value) \\\n    )\n\n# endif\n\n}}} // namespace boost::parameter::aux\n\n#endif // BOOST_PARAMETER_CAST_060902_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/default.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef DEFAULT_050329_HPP\n# define DEFAULT_050329_HPP\n\n# include <boost/detail/workaround.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\n// A wrapper for the default value passed by the user when resolving\n// the value of the parameter with the given Keyword\ntemplate <class Keyword, class Value>\nstruct default_\n{\n    default_(Value& x)\n      : value(x)\n    {}\n\n    Value& value;\n};\n\n//\n// lazy_default -- \n//\n//    A wrapper for the default value computation function passed by\n//    the user when resolving the value of the parameter with the\n//    given keyword\n//\n# if BOOST_WORKAROUND(__EDG_VERSION__, <= 300)\n// These compilers need a little extra help with overload\n// resolution; we have empty_arg_list's operator[] accept a base\n// class to make that overload less preferable.\ntemplate <class KW, class DefaultComputer>\nstruct lazy_default_base\n{\n    lazy_default_base(DefaultComputer const& x)\n      : compute_default(x)\n    {}\n    DefaultComputer const& compute_default;\n};\n\ntemplate <class KW, class DefaultComputer>\nstruct lazy_default\n  : lazy_default_base<KW,DefaultComputer>\n  {\n      lazy_default(DefaultComputer const & x)\n        : lazy_default_base<KW,DefaultComputer>(x)\n      {}\n  };\n#  define BOOST_PARAMETER_lazy_default_fallback lazy_default_base\n# else \ntemplate <class KW, class DefaultComputer>\nstruct lazy_default\n{\n    lazy_default(const DefaultComputer& x)\n      : compute_default(x)\n    {}\n    DefaultComputer const& compute_default;\n};\n#  define BOOST_PARAMETER_lazy_default_fallback lazy_default\n# endif \n\n}}} // namespace boost::parameter::aux\n\n#endif // DEFAULT_050329_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/is_maybe.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2010. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_IS_MAYBE_050329_HPP\n#define BOOST_PARAMETER_IS_MAYBE_050329_HPP\n\n#include <boost/type_traits/is_base_and_derived.hpp>\n\nnamespace boost {\nnamespace parameter {\nnamespace aux {\n\nstruct maybe_base {};\n\ntemplate <class T>\nstruct is_maybe\n  : is_base_and_derived<maybe_base, T>\n{};\n\n} // namespace aux\n} // namespace parameter\n} // namespace boost\n\n#endif // BOOST_PARAMETER_IS_MAYBE_050329_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/overloads.hpp",
    "content": "// Copyright David Abrahams, Daniel Wallin 2003. Use, modification and \n// distribution is subject to the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n\n// This file generates overloads in this format:\n//\n//     template<class A0, class A1>\n//     typename mpl::apply_wrap1<\n//         aux::make_arg_list<\n//             PS0,A0\n//           , aux::make_arg_list<\n//                 PS1,A1\n//               , mpl::identity<aux::empty_arg_list>\n//             >\n//         >\n//      , unnamed_list\n//     >::type\n//     operator()(A0 const& a0, A1 const& a1) const\n//     {\n//         typedef typename mpl::apply_wrap1<\n//             aux::make_arg_list<\n//                 PS0,A0\n//               , aux::make_arg_list<\n//                     PS1,A1\n//                   , mpl::identity<aux::empty_arg_list>\n//                 >\n//             >\n//         >::type arg_tuple;\n//\n//         return arg_tuple(\n//             a0\n//           , a1\n//           , aux::void_()\n//             ...\n//         );\n//     }\n//\n\n#if !defined(BOOST_PP_IS_ITERATING)\n# error Boost.Parameters - do not include this file!\n#endif\n\n#define N BOOST_PP_ITERATION()\n\n#define BOOST_PARAMETER_open_list(z, n, text) \\\n    aux::item< \\\n        BOOST_PP_CAT(PS, n), BOOST_PP_CAT(A, n)\n\n#define BOOST_PARAMETER_close_list(z, n, text) > \n\n#define BOOST_PARAMETER_arg_list(n) \\\n    aux::make_arg_list< \\\n        BOOST_PP_ENUM(N, BOOST_PARAMETER_open_list, _) \\\n      , void_ \\\n        BOOST_PP_REPEAT(N, BOOST_PARAMETER_close_list, _) \\\n      , deduced_list \\\n      , aux::tag_keyword_arg \\\n    >\n\n#define BOOST_PARAMETER_arg_pack_init(z, n, limit) \\\n    BOOST_PP_CAT(a, BOOST_PP_SUB(limit,n))\n\ntemplate<BOOST_PP_ENUM_PARAMS(N, class A)>\ntypename mpl::first<\n    typename BOOST_PARAMETER_arg_list(N)::type\n>::type\noperator()(BOOST_PP_ENUM_BINARY_PARAMS(N, A, & a)) const\n{\n    typedef typename BOOST_PARAMETER_arg_list(N)::type result;\n\n    typedef typename mpl::first<result>::type result_type;\n    typedef typename mpl::second<result>::type error;\n    error();\n\n    return result_type(\n        BOOST_PP_ENUM(N, BOOST_PARAMETER_arg_pack_init, BOOST_PP_DEC(N))\n        BOOST_PP_ENUM_TRAILING_PARAMS(\n            BOOST_PP_SUB(BOOST_PARAMETER_MAX_ARITY, N)\n          , aux::void_reference() BOOST_PP_INTERCEPT\n        ));\n}\n\n#undef BOOST_PARAMETER_arg_list\n#undef BOOST_PARAMETER_open_list\n#undef BOOST_PARAMETER_close_list\n#undef N\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/parameter_requirements.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef PARAMETER_REQUIREMENTS_050331_HPP\n#define PARAMETER_REQUIREMENTS_050331_HPP\n\nnamespace boost { namespace parameter { namespace aux {\n\n// Used to pass static information about parameter requirements\n// through the satisfies() overload set (below).  The\n// matched function is never invoked, but its type indicates whether\n// a parameter matches at compile-time\ntemplate <class Keyword, class Predicate, class HasDefault>\nstruct parameter_requirements\n{\n    typedef Keyword keyword;\n    typedef Predicate predicate;\n    typedef HasDefault has_default;\n};\n\n}}} // namespace boost::parameter::aux\n\n#endif // PARAMETER_REQUIREMENTS_050331_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/parenthesized_type.hpp",
    "content": "// Copyright David Abrahams 2006. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_PARAMETER_AUX_PARENTHESIZED_TYPE_DWA2006414_HPP\n# define BOOST_PARAMETER_AUX_PARENTHESIZED_TYPE_DWA2006414_HPP\n\n# include <boost/config.hpp>\n# include <boost/detail/workaround.hpp>\n\nnamespace boost { namespace parameter { namespace aux { \n\n// A macro that takes a parenthesized C++ type name (T) and transforms\n// it into an un-parenthesized type expression equivalent to T.\n#  define BOOST_PARAMETER_PARENTHESIZED_TYPE(x)                    \\\n    boost::parameter::aux::unaryfunptr_arg_type< void(*)x >::type\n\n// A metafunction that transforms void(*)(T) -> T\ntemplate <class UnaryFunctionPointer>\nstruct unaryfunptr_arg_type;\n\ntemplate <class Arg>\nstruct unaryfunptr_arg_type<void(*)(Arg)>\n{\n    typedef Arg type;\n};\n\ntemplate <>\nstruct unaryfunptr_arg_type<void(*)(void)>\n{\n    typedef void type;\n};\n    \n}}} // namespace boost::parameter::aux\n\n#endif // BOOST_PARAMETER_AUX_PARENTHESIZED_TYPE_DWA2006414_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/preprocessor/flatten.hpp",
    "content": "// Copyright Daniel Wallin 2005. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_FLATTEN_051217_HPP\n# define BOOST_PARAMETER_FLATTEN_051217_HPP\n\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/seq/for_each.hpp>\n# include <boost/preprocessor/seq/for_each_i.hpp>\n# include <boost/preprocessor/identity.hpp>\n# include <boost/preprocessor/selection/max.hpp>\n# include <boost/preprocessor/arithmetic/sub.hpp>\n# include <boost/preprocessor/repetition/enum_trailing.hpp>\n# include <boost/parameter/aux_/preprocessor/for_each.hpp>\n\n# define BOOST_PARAMETER_FLATTEN_SPLIT_required required,\n# define BOOST_PARAMETER_FLATTEN_SPLIT_optional optional,\n# define BOOST_PARAMETER_FLATTEN_SPLIT_deduced deduced,\n\n# define BOOST_PARAMETER_FLATTEN_SPLIT(sub) \\\n    BOOST_PP_CAT(BOOST_PARAMETER_FLATTEN_SPLIT_, sub)\n\n# define BOOST_PARAMETER_FLATTEN_QUALIFIER(sub) \\\n    BOOST_PP_SPLIT(0, BOOST_PARAMETER_FLATTEN_SPLIT(sub))\n\n# define BOOST_PARAMETER_FLATTEN_ARGS(sub) \\\n    BOOST_PP_SPLIT(1, BOOST_PARAMETER_FLATTEN_SPLIT(sub))\n\n# define BOOST_PARAMETER_FLATTEN_ARITY_optional(arities) \\\n    BOOST_PP_TUPLE_ELEM(3,0,arities)\n\n# define BOOST_PARAMETER_FLATTEN_ARITY_required(arities) \\\n    BOOST_PP_TUPLE_ELEM(3,1,arities)\n\n# define BOOST_PARAMETER_FLATTEN_SPEC0_DUMMY_ELEM(z, n, data) ~\n# define BOOST_PARAMETER_FLATTEN_SPEC0(r, n, elem, data) \\\n    (( \\\n        BOOST_PP_TUPLE_ELEM(3,2,data) \\\n      , BOOST_PP_TUPLE_REM(BOOST_PP_TUPLE_ELEM(3,0,data)) elem \\\n        BOOST_PP_ENUM_TRAILING( \\\n            BOOST_PP_SUB( \\\n                BOOST_PP_TUPLE_ELEM(3,1,data) \\\n              , BOOST_PP_TUPLE_ELEM(3,0,data) \\\n            ) \\\n          , BOOST_PARAMETER_FLATTEN_SPEC0_DUMMY_ELEM \\\n          , ~ \\\n        ) \\\n    ))\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_AUX(r, arity, max_arity, spec, transform) \\\n    BOOST_PARAMETER_FOR_EACH_R( \\\n        r \\\n      , arity \\\n      , BOOST_PARAMETER_FLATTEN_ARGS(spec) \\\n      , (arity, max_arity, transform(BOOST_PARAMETER_FLATTEN_QUALIFIER(spec))) \\\n      , BOOST_PARAMETER_FLATTEN_SPEC0 \\\n    )\n\n# define BOOST_PARAMETER_FLATTEN_IDENTITY(x) x\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_optional(r, arities, spec) \\\n    BOOST_PARAMETER_FLATTEN_SPEC_AUX( \\\n        r \\\n      , BOOST_PP_CAT( \\\n            BOOST_PARAMETER_FLATTEN_ARITY_, BOOST_PARAMETER_FLATTEN_QUALIFIER(spec) \\\n        )(arities) \\\n      , BOOST_PP_TUPLE_ELEM(3,2,arities) \\\n      , spec \\\n      , BOOST_PARAMETER_FLATTEN_IDENTITY \\\n    )\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_required(r, arities, spec) \\\n    BOOST_PARAMETER_FLATTEN_SPEC_optional(r, arities, spec)\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_AS_DEDUCED(x) BOOST_PP_CAT(deduced_,x)\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_deduced_M(r, arities, n, spec) \\\n    BOOST_PARAMETER_FLATTEN_SPEC_AUX( \\\n        r \\\n      , BOOST_PP_CAT( \\\n            BOOST_PARAMETER_FLATTEN_ARITY_, BOOST_PARAMETER_FLATTEN_QUALIFIER(spec) \\\n        )(arities) \\\n      , BOOST_PP_TUPLE_ELEM(3,2,arities) \\\n      , spec \\\n      , BOOST_PARAMETER_FLATTEN_SPEC_AS_DEDUCED \\\n    )\n\n# define BOOST_PARAMETER_FLATTEN_SPEC_deduced(r, arities, spec) \\\n    BOOST_PP_SEQ_FOR_EACH_I_R( \\\n        r \\\n      , BOOST_PARAMETER_FLATTEN_SPEC_deduced_M \\\n      , arities \\\n      , BOOST_PARAMETER_FLATTEN_ARGS(spec) \\\n    )\n\n# define BOOST_PARAMETER_FLATTEN_SPEC(r, arities, spec) \\\n    BOOST_PP_CAT( \\\n        BOOST_PARAMETER_FLATTEN_SPEC_, BOOST_PARAMETER_FLATTEN_QUALIFIER(spec) \\\n    )(r, arities, spec)\n\n# define BOOST_PARAMETER_FLATTEN(optional_arity, required_arity, wanted_arity, specs) \\\n    BOOST_PP_SEQ_FOR_EACH( \\\n        BOOST_PARAMETER_FLATTEN_SPEC \\\n      , ( \\\n            optional_arity, required_arity \\\n          , wanted_arity \\\n        ) \\\n      , specs \\\n    )\n\n#endif // BOOST_PARAMETER_FLATTEN_051217_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/preprocessor/for_each.hpp",
    "content": "// Copyright Daniel Wallin 2005. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_FOR_EACH_051217_HPP\n# define BOOST_PARAMETER_FOR_EACH_051217_HPP\n\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/detail/split.hpp>\n# include <boost/preprocessor/logical/not.hpp>\n# include <boost/preprocessor/facilities/is_empty.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/repeat.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/for.hpp>\n# include <boost/preprocessor/repetition/deduce_r.hpp>\n\n# define BOOST_PARAMETER_FOR_EACH_head_aux2(x,y) (x,y), ~\n# define BOOST_PARAMETER_FOR_EACH_head_aux3(x,y,z) (x,y,z), ~\n# define BOOST_PARAMETER_FOR_EACH_head_aux4(x,y,z,u) (x,y,z,u), ~\n# define BOOST_PARAMETER_FOR_EACH_head(n,x) \\\n    BOOST_PP_SPLIT(0, BOOST_PP_CAT(BOOST_PARAMETER_FOR_EACH_head_aux,n) x)\n\n# define BOOST_PARAMETER_FOR_EACH_pred_aux_BOOST_PARAMETER_FOR_EACH_END_SENTINEL\n# define BOOST_PARAMETER_FOR_EACH_pred_aux_check(x) \\\n    BOOST_PP_NOT(BOOST_PP_IS_EMPTY( \\\n        BOOST_PP_CAT(BOOST_PARAMETER_FOR_EACH_pred_aux_, x) \\\n    )), ~\n\n# define BOOST_PARAMETER_FOR_EACH_pred_aux2(x,y) \\\n    BOOST_PARAMETER_FOR_EACH_pred_aux_check(x)\n# define BOOST_PARAMETER_FOR_EACH_pred_aux3(x,y,z) \\\n    BOOST_PARAMETER_FOR_EACH_pred_aux_check(x)\n# define BOOST_PARAMETER_FOR_EACH_pred_aux4(x,y,z,u) \\\n    BOOST_PARAMETER_FOR_EACH_pred_aux_check(x)\n\n# define BOOST_PARAMETER_FOR_EACH_pred_aux0(n,x) \\\n    BOOST_PP_CAT(BOOST_PARAMETER_FOR_EACH_pred_aux,n) x\n\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#  define BOOST_PARAMETER_FOR_EACH_pred_SPLIT_FIRST(x) \\\n    BOOST_PP_SPLIT(0, x)\n\n#  define BOOST_PARAMETER_FOR_EACH_pred(r, state) \\\n    BOOST_PARAMETER_FOR_EACH_pred_SPLIT_FIRST( \\\n        BOOST_PARAMETER_FOR_EACH_pred_aux0( \\\n            BOOST_PP_TUPLE_ELEM(5,3,state) \\\n          , BOOST_PP_TUPLE_ELEM(5,0,state) \\\n        ) \\\n    )\n# else\n#  define BOOST_PARAMETER_FOR_EACH_pred(r, state) \\\n    BOOST_PP_SPLIT( \\\n        0 \\\n      , BOOST_PARAMETER_FOR_EACH_pred_aux0( \\\n            BOOST_PP_TUPLE_ELEM(5,3,state) \\\n          , BOOST_PP_TUPLE_ELEM(5,0,state) \\\n        ) \\\n    )\n# endif\n\n# define BOOST_PARAMETER_FOR_EACH_op(r, state) \\\n    ( \\\n        BOOST_PP_TUPLE_EAT(BOOST_PP_TUPLE_ELEM(5,3,state)) \\\n          BOOST_PP_TUPLE_ELEM(5,0,state) \\\n      , BOOST_PP_TUPLE_ELEM(5,1,state) \\\n      , BOOST_PP_TUPLE_ELEM(5,2,state) \\\n      , BOOST_PP_TUPLE_ELEM(5,3,state) \\\n      , BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(5,4,state)) \\\n    )\n\n# define BOOST_PARAMETER_FOR_EACH_macro(r, state) \\\n    BOOST_PP_TUPLE_ELEM(5,2,state)( \\\n        r \\\n      , BOOST_PP_TUPLE_ELEM(5,4,state) \\\n      , BOOST_PARAMETER_FOR_EACH_head( \\\n            BOOST_PP_TUPLE_ELEM(5,3,state) \\\n          , BOOST_PP_TUPLE_ELEM(5,0,state) \\\n        ) \\\n      , BOOST_PP_TUPLE_ELEM(5,1,state) \\\n    )\n\n# define BOOST_PARAMETER_FOR_EACH_build_end_sentinel(z,n,text) \\\n    BOOST_PP_COMMA_IF(n) BOOST_PARAMETER_FOR_EACH_END_SENTINEL\n# define BOOST_PARAMETER_FOR_EACH_build_end_sentinel_tuple(arity) \\\n    ( \\\n        BOOST_PP_REPEAT(arity, BOOST_PARAMETER_FOR_EACH_build_end_sentinel, _) \\\n    )\n\n# define BOOST_PARAMETER_FOR_EACH_R(r, arity, list, data, macro) \\\n    BOOST_PP_CAT(BOOST_PP_FOR_, r)( \\\n        (list BOOST_PARAMETER_FOR_EACH_build_end_sentinel_tuple(arity), data, macro, arity, 0) \\\n      , BOOST_PARAMETER_FOR_EACH_pred \\\n      , BOOST_PARAMETER_FOR_EACH_op \\\n      , BOOST_PARAMETER_FOR_EACH_macro \\\n    )\n\n# define BOOST_PARAMETER_FOR_EACH(arity, list, data, macro) \\\n    BOOST_PARAMETER_FOR_EACH_R(BOOST_PP_DEDUCE_R(), arity, list, data, macro)\n\n#endif // BOOST_PARAMETER_FOR_EACH_051217_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/result_of0.hpp",
    "content": "// Copyright David Abrahams 2005. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_PARAMETER_AUX_RESULT_OF0_DWA2005511_HPP\n# define BOOST_PARAMETER_AUX_RESULT_OF0_DWA2005511_HPP\n\n# include <boost/utility/result_of.hpp>\n\n// A metafunction returning the result of invoking a nullary function\n// object of the given type.\n\n#ifndef BOOST_NO_RESULT_OF\n\n# include <boost/utility/result_of.hpp>\nnamespace boost { namespace parameter { namespace aux { \ntemplate <class F>\nstruct result_of0 : result_of<F()>\n{};\n\n}}} // namespace boost::parameter::aux_\n\n#else\n\nnamespace boost { namespace parameter { namespace aux { \ntemplate <class F>\nstruct result_of0\n{\n    typedef typename F::result_type type;\n};\n\n}}} // namespace boost::parameter::aux_\n\n#endif \n\n\n#endif // BOOST_PARAMETER_AUX_RESULT_OF0_DWA2005511_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/set.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_SET_060912_HPP\n# define BOOST_PARAMETER_SET_060912_HPP\n\n# include <boost/detail/workaround.hpp>\n\n# if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n#  include <boost/mpl/insert.hpp>\n#  include <boost/mpl/set/set0.hpp>\n#  include <boost/mpl/has_key.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\ntypedef mpl::set0<> set0;\n\ntemplate <class Set, class K>\nstruct insert_\n{\n    typedef typename mpl::insert<Set, K>::type type;\n};\n\ntemplate <class Set, class K>\nstruct has_key_\n{\n    typedef typename mpl::has_key<Set, K>::type type;\n};\n\n}}} // namespace boost::parameter::aux\n\n# else\n\n#  include <boost/mpl/list.hpp>\n#  include <boost/mpl/end.hpp>\n#  include <boost/mpl/find.hpp>\n#  include <boost/mpl/not.hpp>\n#  include <boost/mpl/push_front.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\ntypedef mpl::list0<> set0;\n\ntemplate <class Set, class K>\nstruct insert_\n{\n    typedef typename mpl::push_front<Set, K>::type type;\n};\n\ntemplate <class Set, class K>\nstruct has_key_\n{\n    typedef typename mpl::find<Set, K>::type iter;\n    typedef mpl::not_<\n        is_same<iter, typename mpl::end<Set>::type> \n    > type;\n};\n\n}}} // namespace boost::parameter::aux\n\n# endif\n\n\n#endif // BOOST_PARAMETER_SET_060912_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/tag.hpp",
    "content": "// Copyright David Abrahams 2005. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_PARAMETER_AUX_TAG_DWA2005610_HPP\n# define BOOST_PARAMETER_AUX_TAG_DWA2005610_HPP\n\n# include <boost/parameter/aux_/unwrap_cv_reference.hpp>\n# include <boost/parameter/aux_/tagged_argument.hpp>\n\nnamespace boost { namespace parameter { namespace aux { \n\ntemplate <class Keyword, class ActualArg\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n        , class = typename is_cv_reference_wrapper<ActualArg>::type\n#endif \n          >\nstruct tag\n{\n    typedef tagged_argument<\n        Keyword\n      , typename unwrap_cv_reference<ActualArg>::type\n    > type;\n};\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\ntemplate <class Keyword, class ActualArg>\nstruct tag<Keyword,ActualArg,mpl::false_>\n{\n    typedef tagged_argument<\n        Keyword\n      , ActualArg\n    > type;\n};\n#endif \n\n}}} // namespace boost::parameter::aux_\n\n#endif // BOOST_PARAMETER_AUX_TAG_DWA2005610_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/tagged_argument.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_TAGGED_ARGUMENT_050328_HPP\n# define BOOST_PARAMETER_TAGGED_ARGUMENT_050328_HPP\n\n# include <boost/parameter/aux_/void.hpp>\n# include <boost/parameter/aux_/arg_list.hpp>\n# include <boost/parameter/aux_/result_of0.hpp>\n# include <boost/mpl/if.hpp>\n# include <boost/mpl/apply_wrap.hpp>\n# include <boost/mpl/and.hpp>\n# include <boost/mpl/not.hpp>\n# include <boost/type_traits/is_same.hpp>\n# include <boost/type_traits/is_convertible.hpp>\n# include <boost/type_traits/is_reference.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\nstruct empty_arg_list;\nstruct arg_list_tag;\n\nstruct tagged_argument_base {};\n\n// Holds a reference to an argument of type Arg associated with\n// keyword Keyword\n    \ntemplate <class Keyword, class Arg>\nstruct tagged_argument : tagged_argument_base\n{\n    typedef Keyword key_type;\n    typedef Arg value_type;\n    typedef Arg& reference;\n\n    tagged_argument(reference x) : value(x) {}\n\n    // A metafunction class that, given a keyword and a default\n    // type, returns the appropriate result type for a keyword\n    // lookup given that default\n    struct binding\n    {\n        template <class KW, class Default, class Reference>\n        struct apply\n        {\n          typedef typename mpl::eval_if<\n                boost::is_same<KW, key_type>\n              , mpl::if_<Reference, reference, value_type>\n              , mpl::identity<Default>\n          >::type type;\n        };\n    };\n\n    // Comma operator to compose argument list without using parameters<>.\n    // Useful for argument lists with undetermined length.\n    template <class Keyword2, class Arg2>\n    arg_list<\n        tagged_argument<Keyword, Arg>\n      , arg_list<tagged_argument<Keyword2, Arg2> > \n    >\n    operator,(tagged_argument<Keyword2, Arg2> x) const\n    {\n        return arg_list<\n            tagged_argument<Keyword, Arg>\n          , arg_list<tagged_argument<Keyword2, Arg2> > \n        >(\n            *this\n          , arg_list<tagged_argument<Keyword2, Arg2> >(x, empty_arg_list())\n        );\n    }\n\n    reference operator[](keyword<Keyword> const&) const\n    {\n        return value;\n    }\n\n# if defined(BOOST_NO_FUNCTION_TEMPLATE_ORDERING) || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    template <class KW, class Default>\n    Default& get_with_default(default_<KW,Default> const& x, int) const\n    {\n        return x.value;\n    }\n\n    template <class Default>\n    reference get_with_default(default_<key_type,Default> const&, long) const\n    {\n        return value;\n    }\n\n    template <class KW, class Default>\n    typename mpl::apply_wrap3<binding, KW, Default&, mpl::true_>::type\n    operator[](default_<KW,Default> const& x) const\n    {\n        return get_with_default(x, 0L);\n    }\n\n    template <class KW, class F>\n    typename result_of0<F>::type \n    get_with_lazy_default(lazy_default<KW,F> const& x, int) const\n    {\n        return x.compute_default();\n    }\n\n    template <class F>\n    reference get_with_lazy_default(lazy_default<key_type,F> const&, long) const\n    {\n        return value;\n    }\n\n    template <class KW, class F>\n    typename mpl::apply_wrap3<\n        binding,KW\n      , typename result_of0<F>::type\n      , mpl::true_\n    >::type\n    operator[](lazy_default<KW,F> const& x) const\n    {\n        return get_with_lazy_default(x, 0L);\n    }\n# else\n    template <class Default>\n    reference operator[](default_<key_type,Default> const& ) const\n    {\n        return value;\n    }\n\n    template <class F>\n    reference operator[](lazy_default<key_type,F> const& ) const\n    {\n        return value;\n    }\n\n    template <class KW, class Default>\n    Default& operator[](default_<KW,Default> const& x) const\n    {\n        return x.value;\n    }\n\n    template <class KW, class F>\n    typename result_of0<F>::type operator[](lazy_default<KW,F> const& x) const\n    {\n        return x.compute_default();\n    }\n\n    template <class ParameterRequirements>\n    static typename ParameterRequirements::has_default\n    satisfies(ParameterRequirements*);\n\n    template <class HasDefault, class Predicate>\n    static typename mpl::apply1<Predicate, value_type>::type\n    satisfies(\n        parameter_requirements<key_type,Predicate,HasDefault>*\n    );\n# endif\n\n    reference value;\n# if BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1310))\n    // warning suppression\n private:\n    void operator=(tagged_argument const&);\n public:    \n# endif\n    // MPL sequence support\n    typedef tagged_argument type;            // Convenience for users\n    typedef empty_arg_list tail_type;        // For the benefit of iterators\n    typedef arg_list_tag tag; // For dispatching to sequence intrinsics\n};\n\n// Defines a metafunction, is_tagged_argument, that identifies\n// tagged_argument specializations and their derived classes.\ntemplate <class T>\nstruct is_tagged_argument_aux\n  : is_convertible<T*,tagged_argument_base const*>\n{};\n\ntemplate <class T>\nstruct is_tagged_argument\n  : mpl::and_<\n        mpl::not_<is_reference<T> >\n      , is_tagged_argument_aux<T>\n    >\n{};\n\n}}} // namespace boost::parameter::aux\n\n#endif // BOOST_PARAMETER_TAGGED_ARGUMENT_050328_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/template_keyword.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_TEMPLATE_KEYWORD_060203_HPP\n# define BOOST_PARAMETER_TEMPLATE_KEYWORD_060203_HPP\n\n# include <boost/mpl/and.hpp>\n# include <boost/mpl/not.hpp>\n# include <boost/type_traits/is_convertible.hpp>\n# include <boost/type_traits/is_reference.hpp>\n\nnamespace boost { namespace parameter { \n\nnamespace aux \n{\n\n  struct template_keyword_tag {}; \n\n  template <class T, class U>\n  struct is_pointer_convertible\n    : is_convertible<T*, U*>\n  {};\n\n  template <class T>\n  struct is_template_keyword\n    : mpl::and_<\n          mpl::not_<is_reference<T> >\n        , is_pointer_convertible<T, template_keyword_tag>\n      >\n  {};\n\n} // namespace aux\n\ntemplate <class Tag, class T>\nstruct template_keyword\n  : aux::template_keyword_tag\n{\n    typedef Tag key_type;\n    typedef T value_type;\n    typedef value_type reference;\n};\n\n}} // namespace boost::parameter\n\n#endif // BOOST_PARAMETER_TEMPLATE_KEYWORD_060203_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/unwrap_cv_reference.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef UNWRAP_CV_REFERENCE_050328_HPP\n#define UNWRAP_CV_REFERENCE_050328_HPP\n\n#include <boost/parameter/aux_/yesno.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/eval_if.hpp>\n\nnamespace boost { template<class T> class reference_wrapper; }\n\nnamespace boost { namespace parameter { namespace aux {\n\n//\n// reference_wrapper support -- because of the forwarding problem,\n// when passing arguments positionally by non-const reference, we\n// ask users of named parameter interfaces to use ref(x) to wrap\n// them.\n//\n\n// is_cv_reference_wrapper returns mpl::true_ if T is of type\n// reference_wrapper<U> cv\ntemplate <class U>\nyes_tag is_cv_reference_wrapper_check(reference_wrapper<U> const volatile*);\nno_tag is_cv_reference_wrapper_check(...);\n\ntemplate <class T>\nstruct is_cv_reference_wrapper\n{\n    BOOST_STATIC_CONSTANT(\n        bool, value = (\n            sizeof(is_cv_reference_wrapper_check((T*)0)) == sizeof(yes_tag)\n        )\n    );\n\n    typedef mpl::bool_<\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n        is_cv_reference_wrapper::\n#endif \n    value> type;\n};\n\n// Needed for unwrap_cv_reference below. T might be const, so\n// eval_if might fail because of deriving from T const on EDG.\ntemplate <class T>\nstruct get_type\n{\n    typedef typename T::type type;\n};\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\ntemplate <class T, class is_reference_wrapper = typename is_cv_reference_wrapper<T>::type>\nstruct unwrap_cv_reference\n{\n    typedef T type;\n};\n\ntemplate <class T>\nstruct unwrap_cv_reference<T const, mpl::false_>\n{\n    typedef T const type;\n};\n\ntemplate <class T>\nstruct unwrap_cv_reference<T, mpl::true_>\n  : T\n{};\n\n#else \n// Produces the unwrapped type to hold a reference to in named<>\n// Can't use boost::unwrap_reference<> here because it\n// doesn't handle the case where T = reference_wrapper<U> cv\ntemplate <class T>\nstruct unwrap_cv_reference\n{\n    typedef typename mpl::eval_if<\n        is_cv_reference_wrapper<T>\n      , get_type<T>\n      , mpl::identity<T>\n    >::type type;\n};\n#endif\n\n}}} // namespace boost::parameter::aux\n\n#endif // UNWRAP_CV_REFERENCE_050328_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/void.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_VOID_050329_HPP\n#define BOOST_PARAMETER_VOID_050329_HPP\n\nnamespace boost { namespace parameter { \n\n// A placemarker for \"no argument passed.\"\n// MAINTAINER NOTE: Do not make this into a metafunction\nstruct void_ {}; \n\nnamespace aux \n{\n\n  inline void_& void_reference()\n  {\n      static void_ instance;\n      return instance;\n  }\n\n} // namespace aux\n\n}} // namespace boost::parameter\n\n#endif // BOOST_PARAMETER_VOID_050329_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/aux_/yesno.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef YESNO_050328_HPP\n#define YESNO_050328_HPP\n\n#include <boost/mpl/bool.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\n// types used with the \"sizeof trick\" to capture the results of\n// overload resolution at compile-time.\ntypedef char yes_tag;\ntypedef char (&no_tag)[2];\n\n// mpl::true_ and mpl::false_ are not distinguishable by sizeof(),\n// so we pass them through these functions to get a type that is.\nyes_tag to_yesno(mpl::true_);\nno_tag to_yesno(mpl::false_);\n\n}}} // namespace boost::parameter::aux\n\n#endif // YESNO_050328_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/binding.hpp",
    "content": "// Copyright David Abrahams 2005. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_PARAMETER_BINDING_DWA200558_HPP\n# define BOOST_PARAMETER_BINDING_DWA200558_HPP\n\n# include <boost/mpl/apply.hpp>\n# include <boost/mpl/assert.hpp>\n# include <boost/mpl/and.hpp>\n# include <boost/parameter/aux_/result_of0.hpp>\n# include <boost/parameter/aux_/void.hpp>\n# include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace parameter { \n\n// A metafunction that, given an argument pack, returns the type of\n// the parameter identified by the given keyword.  If no such\n// parameter has been specified, returns Default\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\ntemplate <class Parameters, class Keyword, class Default>\nstruct binding0\n{\n    typedef typename mpl::apply_wrap3<\n        typename Parameters::binding,Keyword,Default,mpl::true_\n    >::type type;\n\n    BOOST_MPL_ASSERT_NOT((\n        mpl::and_<\n            is_same<Default, void_>\n          , is_same<type, void_>\n        >\n    ));\n};\n# endif\n\ntemplate <class Parameters, class Keyword, class Default = void_>\nstruct binding\n{\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    typedef typename mpl::eval_if<\n        mpl::is_placeholder<Parameters>\n      , mpl::identity<int>\n      , binding0<Parameters,Keyword,Default>\n    >::type type;\n# else\n    typedef typename mpl::apply_wrap3<\n        typename Parameters::binding,Keyword,Default,mpl::true_\n    >::type type;\n\n    BOOST_MPL_ASSERT_NOT((\n        mpl::and_<\n            is_same<Default, void_>\n          , is_same<type, void_>\n        >\n    ));\n# endif\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,binding,(Parameters,Keyword,Default))\n};\n\n// A metafunction that, given an argument pack, returns the type of\n// the parameter identified by the given keyword.  If no such\n// parameter has been specified, returns the type returned by invoking\n// DefaultFn\ntemplate <class Parameters, class Keyword, class DefaultFn>\nstruct lazy_binding\n{\n  typedef typename mpl::apply_wrap3<\n      typename Parameters::binding\n    , Keyword\n    , typename aux::result_of0<DefaultFn>::type\n    , mpl::true_\n  >::type type;\n};\n\n\n}} // namespace boost::parameter\n\n#endif // BOOST_PARAMETER_BINDING_DWA200558_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/config.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_CONFIG_050403_HPP\n#define BOOST_PARAMETER_CONFIG_050403_HPP\n\n#ifndef BOOST_PARAMETER_MAX_ARITY\n# define BOOST_PARAMETER_MAX_ARITY 8\n#endif\n\n#endif // BOOST_PARAMETER_CONFIG_050403_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/keyword.hpp",
    "content": "// Copyright Daniel Wallin, David Abrahams 2005. Use, modification and\n// distribution is subject to the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef KEYWORD_050328_HPP\n#define KEYWORD_050328_HPP\n\n#include <boost/parameter/aux_/unwrap_cv_reference.hpp>\n#include <boost/parameter/aux_/tag.hpp>\n#include <boost/parameter/aux_/default.hpp>\n\nnamespace boost { namespace parameter {\n\n// Instances of unique specializations of keyword<...> serve to\n// associate arguments with parameter names.  For example:\n//\n//    struct rate_;           // parameter names\n//    struct skew_;\n//    namespace\n//    {\n//      keyword<rate_> rate;  // keywords\n//      keyword<skew_> skew;\n//    }\n//\n//    ...\n//\n//    f(rate = 1, skew = 2.4);\n//\ntemplate <class Tag>\nstruct keyword\n{\n    template <class T>\n    typename aux::tag<Tag, T>::type const\n    operator=(T& x) const\n    {\n        typedef typename aux::tag<Tag, T>::type result;\n        return result(x);\n    }\n\n    template <class Default>\n    aux::default_<Tag, Default>\n    operator|(Default& default_) const\n    {\n        return aux::default_<Tag, Default>(default_);\n    }\n\n    template <class Default>\n    aux::lazy_default<Tag, Default>\n    operator||(Default& default_) const\n    {\n        return aux::lazy_default<Tag, Default>(default_);\n    }\n\n    template <class T>\n    typename aux::tag<Tag, T const>::type const\n    operator=(T const& x) const\n    {\n        typedef typename aux::tag<Tag, T const>::type result;\n        return result(x);\n    }\n\n    template <class Default>\n    aux::default_<Tag, const Default>\n    operator|(const Default& default_) const\n    {\n        return aux::default_<Tag, const Default>(default_);\n    }\n\n    template <class Default>\n    aux::lazy_default<Tag, Default>\n    operator||(Default const& default_) const\n    {\n        return aux::lazy_default<Tag, Default>(default_);\n    }\n\n public: // Insurance against ODR violations\n    \n    // People will need to define these keywords in header files.  To\n    // prevent ODR violations, it's important that the keyword used in\n    // every instantiation of a function template is the same object.\n    // We provide a reference to a common instance of each keyword\n    // object and prevent construction by users.\n    static keyword<Tag> const instance;\n\n    // This interface is deprecated\n    static keyword<Tag>& get()\n    {\n        return const_cast<keyword<Tag>&>(instance);\n    }\n};\n\ntemplate <class Tag>\nkeyword<Tag> const keyword<Tag>::instance = {};\n\n// Reduces boilerplate required to declare and initialize keywords\n// without violating ODR.  Declares a keyword tag type with the given\n// name in namespace tag_namespace, and declares and initializes a\n// reference in an anonymous namespace to a singleton instance of that\n// type.\n\n#define BOOST_PARAMETER_KEYWORD(tag_namespace,name)                 \\\n    namespace tag_namespace                                         \\\n    {                                                               \\\n      struct name                                                   \\\n      {                                                             \\\n          static char const* keyword_name()                         \\\n          {                                                         \\\n              return #name;                                         \\\n          }                                                         \\\n      };                                                            \\\n    }                                                               \\\n    namespace                                                       \\\n    {                                                               \\\n       ::boost::parameter::keyword<tag_namespace::name> const& name \\\n       = ::boost::parameter::keyword<tag_namespace::name>::instance;\\\n    }\n\n}} // namespace boost::parameter\n\n#endif // KEYWORD_050328_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/macros.hpp",
    "content": "// Copyright David Abrahams, Daniel Wallin 2003. Use, modification and \n// distribution is subject to the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_MACROS_050412_HPP\n#define BOOST_PARAMETER_MACROS_050412_HPP\n\n#include <boost/preprocessor/tuple/elem.hpp>\n#include <boost/preprocessor/repetition/repeat_from_to.hpp>\n#include <boost/preprocessor/arithmetic/inc.hpp>\n#include <boost/preprocessor/logical/bool.hpp>\n#include <boost/preprocessor/punctuation/comma_if.hpp>\n#include <boost/preprocessor/control/expr_if.hpp>\n#include <boost/preprocessor/repetition/enum_params.hpp>\n#include <boost/preprocessor/repetition/enum_binary_params.hpp>\n#include <boost/preprocessor/cat.hpp>\n#include <boost/detail/workaround.hpp>\n\n#define BOOST_PARAMETER_FUN_TEMPLATE_HEAD1(n) \\\n    template<BOOST_PP_ENUM_PARAMS(n, class T)>\n\n#define BOOST_PARAMETER_FUN_TEMPLATE_HEAD0(n)\n\n#if ! defined(BOOST_NO_SFINAE) && ! BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592)) \n\n# define BOOST_PARAMETER_MATCH_TYPE(n, param)           \\\n            BOOST_PP_EXPR_IF(n, typename) param::match  \\\n            <                                           \\\n                BOOST_PP_ENUM_PARAMS(n, T)              \\\n            >::type \n\n#else\n\n# define BOOST_PARAMETER_MATCH_TYPE(n, param) param\n\n#endif\n\n#define BOOST_PARAMETER_FUN_DECL(z, n, params)                                      \\\n                                                                                    \\\n    BOOST_PP_CAT(BOOST_PARAMETER_FUN_TEMPLATE_HEAD, BOOST_PP_BOOL(n))(n)            \\\n                                                                                    \\\n    BOOST_PP_TUPLE_ELEM(3, 0, params)                                               \\\n        BOOST_PP_TUPLE_ELEM(3, 1, params)(                                          \\\n            BOOST_PP_ENUM_BINARY_PARAMS(n, T, const& p)                             \\\n            BOOST_PP_COMMA_IF(n)                                                    \\\n            BOOST_PARAMETER_MATCH_TYPE(n,BOOST_PP_TUPLE_ELEM(3, 2, params))         \\\n            kw = BOOST_PP_TUPLE_ELEM(3, 2, params)()                                \\\n        )                                                                           \\\n    {                                                                               \\\n        return BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM(3, 1, params), _with_named_params)( \\\n            kw(BOOST_PP_ENUM_PARAMS(n, p))                                          \\\n        );                                                                          \\\n    }\n\n// Generates:\n//\n// template<class Params>\n// ret name ## _with_named_params(Params const&);\n//\n// template<class T0>\n// ret name(T0 const& p0, typename parameters::match<T0>::type kw = parameters())\n// {\n//     return name ## _with_named_params(kw(p0));\n// }\n//\n// template<class T0, ..., class TN>\n// ret name(T0 const& p0, ..., TN const& PN\n//    , typename parameters::match<T0, ..., TN>::type kw = parameters())\n// {\n//     return name ## _with_named_params(kw(p0, ..., pN));\n// }\n//\n// template<class Params>\n// ret name ## _with_named_params(Params const&)\n//\n// lo and hi determines the min and max arity of the generated functions.\n\n#define BOOST_PARAMETER_FUN(ret, name, lo, hi, parameters)                          \\\n                                                                                    \\\n    template<class Params>                                                          \\\n    ret BOOST_PP_CAT(name, _with_named_params)(Params const& p);                    \\\n                                                                                    \\\n    BOOST_PP_REPEAT_FROM_TO(                                                        \\\n        lo, BOOST_PP_INC(hi), BOOST_PARAMETER_FUN_DECL, (ret, name, parameters))    \\\n                                                                                    \\\n    template<class Params>                                                          \\\n    ret BOOST_PP_CAT(name, _with_named_params)(Params const& p)\n\n#define BOOST_PARAMETER_MEMFUN(ret, name, lo, hi, parameters)                       \\\n                                                                                    \\\n    BOOST_PP_REPEAT_FROM_TO(                                                        \\\n        lo, BOOST_PP_INC(hi), BOOST_PARAMETER_FUN_DECL, (ret, name, parameters))    \\\n                                                                                    \\\n    template<class Params>                                                          \\\n    ret BOOST_PP_CAT(name, _with_named_params)(Params const& p)\n\n#endif // BOOST_PARAMETER_MACROS_050412_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/match.hpp",
    "content": "// Copyright David Abrahams 2005. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#ifndef BOOST_PARAMETER_MATCH_DWA2005714_HPP\n# define BOOST_PARAMETER_MATCH_DWA2005714_HPP\n\n# include <boost/detail/workaround.hpp>\n# include <boost/preprocessor/seq/enum.hpp>\n\n# if BOOST_WORKAROUND(__MWERKS__, <= 0x3003)\n// Temporary version of BOOST_PP_SEQ_ENUM until Paul M. integrates the workaround.\n#  define BOOST_PARAMETER_SEQ_ENUM_I(size,seq) BOOST_PP_CAT(BOOST_PP_SEQ_ENUM_, size) seq\n#  define BOOST_PARAMETER_SEQ_ENUM(seq) BOOST_PARAMETER_SEQ_ENUM_I(BOOST_PP_SEQ_SIZE(seq), seq)\n# else\n#  define BOOST_PARAMETER_SEQ_ENUM(seq) BOOST_PP_SEQ_ENUM(seq)\n# endif \n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n\n#  include <boost/parameter/config.hpp>\n#  include <boost/parameter/aux_/void.hpp>\n#  include <boost/preprocessor/arithmetic/sub.hpp>\n#  include <boost/preprocessor/facilities/intercept.hpp>\n#  include <boost/preprocessor/repetition/enum_trailing_params.hpp>\n\n#  define BOOST_PARAMETER_MATCH_DEFAULTS(ArgTypes)              \\\n        BOOST_PP_ENUM_TRAILING_PARAMS(                          \\\n            BOOST_PP_SUB(                                       \\\n                BOOST_PARAMETER_MAX_ARITY                       \\\n              , BOOST_PP_SEQ_SIZE(ArgTypes)                     \\\n            )                                                   \\\n          , ::boost::parameter::void_ BOOST_PP_INTERCEPT   \\\n        )\n\n# else\n\n#  define BOOST_PARAMETER_MATCH_DEFAULTS(ArgTypes)\n\n# endif \n\n//\n// Generates, e.g.\n//\n//    typename dfs_params::match<A1,A2>::type name = dfs_params()\n//\n// with workarounds for Borland compatibility.\n//\n\n# define BOOST_PARAMETER_MATCH(ParameterSpec, ArgTypes, name)   \\\n    typename ParameterSpec ::match<                             \\\n        BOOST_PARAMETER_SEQ_ENUM(ArgTypes)                      \\\n        BOOST_PARAMETER_MATCH_DEFAULTS(ArgTypes)                \\\n    >::type name = ParameterSpec ()\n\n#endif // BOOST_PARAMETER_MATCH_DWA2005714_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/name.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_NAME_060806_HPP\n# define BOOST_PARAMETER_NAME_060806_HPP\n\n# include <boost/parameter/keyword.hpp>\n# include <boost/parameter/value_type.hpp>\n# include <boost/detail/workaround.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/stringize.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/mpl/placeholders.hpp>\n\n# if !defined(BOOST_NO_SFINAE) \\\n  && !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592))\n\n#  include <boost/utility/enable_if.hpp>\n#  include <boost/mpl/lambda.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\n// Tag type passed to MPL lambda.\nstruct lambda_tag;\n\nstruct name_tag_base\n{};\n\ntemplate <class Tag>\nstruct name_tag\n{};\n\ntemplate <class T>\nstruct is_name_tag\n  : mpl::false_\n{};\n\n}}} // namespace boost::parameter::aux\n\nnamespace boost { namespace mpl {\n\ntemplate <class T>\nstruct lambda<\n    T\n  , typename boost::enable_if<\n        parameter::aux::is_name_tag<T>, parameter::aux::lambda_tag\n    >::type\n>\n{\n    typedef true_ is_le;\n    typedef bind3< quote3<parameter::value_type>, arg<2>, T, void> result_;\n    typedef result_ type;\n};\n\n}} // namespace boost::mpl\n\n# endif\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n# include <boost/preprocessor/detail/split.hpp>\n// From Paul Mensonides\n#  define BOOST_PARAMETER_IS_BINARY(x) \\\n    BOOST_PP_SPLIT(1, BOOST_PARAMETER_IS_BINARY_C x BOOST_PP_COMMA() 0) \\\n    /**/\n#  define BOOST_PARAMETER_IS_BINARY_C(x,y) \\\n    ~, 1 BOOST_PP_RPAREN() \\\n    BOOST_PP_TUPLE_EAT(2) BOOST_PP_LPAREN() ~ \\\n    /**/\n# else\n#  include <boost/preprocessor/detail/is_binary.hpp>\n#  define BOOST_PARAMETER_IS_BINARY(x) BOOST_PP_IS_BINARY(x)\n# endif\n\n# define BOOST_PARAMETER_BASIC_NAME(tag_namespace, tag, name)       \\\n    namespace tag_namespace                                         \\\n    {                                                               \\\n      struct tag                                                    \\\n      {                                                             \\\n          static char const* keyword_name()                         \\\n          {                                                         \\\n              return BOOST_PP_STRINGIZE(tag);                       \\\n          }                                                         \\\n                                                                    \\\n          typedef boost::parameter::value_type<                     \\\n              boost::mpl::_2, tag, boost::parameter::void_          \\\n          > _;                                                      \\\n                                                                    \\\n          typedef boost::parameter::value_type<                     \\\n              boost::mpl::_2, tag, boost::parameter::void_          \\\n          > _1;                                                     \\\n      };                                                            \\\n    }                                                               \\\n    namespace                                                       \\\n    {                                                               \\\n       ::boost::parameter::keyword<tag_namespace::tag> const& name  \\\n       = ::boost::parameter::keyword<tag_namespace::tag>::instance; \\\n    }\n\n# define BOOST_PARAMETER_COMPLEX_NAME_TUPLE1(tag,namespace)         \\\n    (tag, namespace), ~\n\n# define BOOST_PARAMETER_COMPLEX_NAME_TUPLE(name)                   \\\n    BOOST_PP_TUPLE_ELEM(2, 0, (BOOST_PARAMETER_COMPLEX_NAME_TUPLE1 name))\n\n# define BOOST_PARAMETER_COMPLEX_NAME_TAG(name)                     \\\n    BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PARAMETER_COMPLEX_NAME_TUPLE(name))\n\n# define BOOST_PARAMETER_COMPLEX_NAME_NAMESPACE(name)               \\\n    BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PARAMETER_COMPLEX_NAME_TUPLE(name))\n\n# define BOOST_PARAMETER_COMPLEX_NAME(name)                         \\\n    BOOST_PARAMETER_BASIC_NAME(                                     \\\n        BOOST_PARAMETER_COMPLEX_NAME_NAMESPACE(name)                \\\n      , BOOST_PP_TUPLE_EAT(2) name                                  \\\n      , BOOST_PARAMETER_COMPLEX_NAME_TAG(name)                      \\\n    )                                                               \\\n/**/\n\n# define BOOST_PARAMETER_SIMPLE_NAME(name)                          \\\n    BOOST_PARAMETER_BASIC_NAME(tag, name, BOOST_PP_CAT(_, name))\n\n# define BOOST_PARAMETER_NAME(name)                                 \\\n    BOOST_PP_IIF(                                                   \\\n        BOOST_PARAMETER_IS_BINARY(name)                             \\\n      , BOOST_PARAMETER_COMPLEX_NAME                                \\\n      , BOOST_PARAMETER_SIMPLE_NAME                                 \\\n    )(name)                                                         \\\n/**/\n\n\n# define BOOST_PARAMETER_TEMPLATE_KEYWORD(name)                     \\\n    namespace tag                                                   \\\n    {                                                               \\\n      struct name;                                                  \\\n    }                                                               \\\n    template <class T>                                              \\\n    struct name                                                     \\\n      : boost::parameter::template_keyword<tag::name, T>            \\\n    {};                                                             \\\n/**/\n\n#endif // BOOST_PARAMETER_NAME_060806_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/parameters.hpp",
    "content": "// Copyright David Abrahams, Daniel Wallin 2003. Use, modification and \n// distribution is subject to the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETERS_031014_HPP\n#define BOOST_PARAMETERS_031014_HPP\n\n#include <boost/detail/is_xxx.hpp>\n\n#include <boost/type_traits/is_const.hpp>\n\n#include <boost/mpl/lambda.hpp>\n#include <boost/mpl/apply.hpp>\n#include <boost/mpl/always.hpp>\n#include <boost/mpl/and.hpp>\n#include <boost/mpl/or.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/not.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/pair.hpp>\n\n#include <boost/type_traits/is_same.hpp>\n#include <boost/type_traits/remove_reference.hpp>\n\n#include <boost/preprocessor/repetition/enum.hpp>\n#include <boost/preprocessor/repetition/enum_params.hpp>\n#include <boost/preprocessor/repetition/enum_trailing_params.hpp>\n#include <boost/preprocessor/arithmetic/sub.hpp>\n#include <boost/preprocessor/repetition/repeat.hpp>\n#include <boost/preprocessor/repetition/enum_shifted.hpp>\n#include <boost/preprocessor/repetition/enum_binary_params.hpp>\n#include <boost/preprocessor/repetition/enum_shifted_params.hpp>\n#include <boost/preprocessor/seq/elem.hpp>\n#include <boost/preprocessor/iteration/iterate.hpp>\n#include <boost/preprocessor/facilities/intercept.hpp>\n#include <boost/preprocessor/cat.hpp>\n\n#include <boost/parameter/aux_/arg_list.hpp>\n#include <boost/parameter/aux_/yesno.hpp>\n#include <boost/parameter/aux_/void.hpp>\n#include <boost/parameter/aux_/default.hpp>\n#include <boost/parameter/aux_/unwrap_cv_reference.hpp>\n#include <boost/parameter/aux_/tagged_argument.hpp>\n#include <boost/parameter/aux_/tag.hpp>\n#include <boost/parameter/aux_/template_keyword.hpp>\n#include <boost/parameter/aux_/set.hpp>\n#include <boost/parameter/config.hpp>\n\nnamespace parameter_\n{\n  template <class T>\n  struct unmatched_argument\n  {\n      BOOST_MPL_ASSERT((boost::is_same<T,void>));\n      typedef int type;\n  }; \n} // namespace parameter_\n\nnamespace boost {\n\ntemplate<class T> class reference_wrapper;\n\nnamespace parameter {\n\nnamespace aux { struct use_default {}; }\n\n// These templates can be used to describe the treatment of particular\n// named parameters for the purposes of overload elimination with\n// SFINAE, by placing specializations in the parameters<...> list.  In\n// order for a treated function to participate in overload resolution:\n//\n//   - all keyword tags wrapped in required<...> must have a matching\n//     actual argument\n//\n//   - The actual argument type matched by every keyword tag\n//     associated with a predicate must satisfy that predicate\n//\n// If a keyword k is specified without an optional<...> or\n// required<...>, wrapper, it is treated as though optional<k> were\n// specified.\n//\n// If a keyword k is specified with deduced<...>, that keyword\n// will be automatically deduced from the argument list.\n//\ntemplate <class Tag, class Predicate = aux::use_default>\nstruct required\n{\n    typedef Tag key_type;\n    typedef Predicate predicate;\n};\n\ntemplate <class Tag, class Predicate = aux::use_default>\nstruct optional\n{\n    typedef Tag key_type;\n    typedef Predicate predicate;\n};\n\ntemplate <class Tag>\nstruct deduced\n{\n    typedef Tag key_type;\n};\n\nnamespace aux\n{\n  // Defines metafunctions, is_required and is_optional, that\n  // identify required<...>, optional<...> and deduced<...> specializations.\n  BOOST_DETAIL_IS_XXX_DEF(required, required, 2)\n  BOOST_DETAIL_IS_XXX_DEF(optional, optional, 2)\n  BOOST_DETAIL_IS_XXX_DEF(deduced_aux, deduced, 1)\n\n  template <class S>\n  struct is_deduced0\n    : is_deduced_aux<\n          typename S::key_type\n      >::type\n  {};\n\n  template <class S>\n  struct is_deduced\n    : mpl::eval_if<\n          mpl::or_<\n              is_optional<S>, is_required<S>\n          >\n        , is_deduced0<S>\n        , mpl::false_\n      >::type\n  {};\n\n  //\n  // key_type, has_default, and predicate --\n  //\n  // These metafunctions accept a ParameterSpec and extract the\n  // keyword tag, whether or not a default is supplied for the\n  // parameter, and the predicate that the corresponding actual\n  // argument type is required match.\n  //\n  // a ParameterSpec is a specialization of either keyword<...>,\n  // required<...>, optional<...>\n  //\n\n  // helper for key_type<...>, below.\n  template <class T>\n  struct get_tag_type0\n  {\n      typedef typename T::key_type type;\n  };\n\n  template <class T>\n  struct get_tag_type\n    : mpl::eval_if<\n          is_deduced_aux<typename T::key_type>\n        , get_tag_type0<typename T::key_type>\n        , mpl::identity<typename T::key_type>\n      >\n  {};\n\n  template <class T>\n  struct tag_type\n    : mpl::eval_if<\n          mpl::or_<\n              is_optional<T>\n            , is_required<T>\n          >\n        , get_tag_type<T>\n        , mpl::identity<T>\n      >\n  {};\n\n  template <class T>\n  struct has_default\n    : mpl::not_<is_required<T> >\n  {};\n\n  // helper for get_predicate<...>, below\n  template <class T>\n  struct get_predicate_or_default\n  {\n      typedef T type;\n  };\n\n  template <>\n  struct get_predicate_or_default<use_default>\n  {\n      typedef mpl::always<mpl::true_> type;\n  };\n\n  // helper for predicate<...>, below\n  template <class T>\n  struct get_predicate\n  {\n      typedef typename\n          get_predicate_or_default<typename T::predicate>::type\n      type;\n  };\n\n  template <class T>\n  struct predicate\n    : mpl::eval_if<\n         mpl::or_<\n              is_optional<T>\n            , is_required<T>\n          >\n        , get_predicate<T>\n        , mpl::identity<mpl::always<mpl::true_> >\n      >\n  {\n  };\n\n\n  // Converts a ParameterSpec into a specialization of\n  // parameter_requirements.  We need to do this in order to get the\n  // tag_type into the type in a way that can be conveniently matched\n  // by a satisfies(...) member function in arg_list.\n  template <class ParameterSpec>\n  struct as_parameter_requirements\n  {\n      typedef parameter_requirements<\n          typename tag_type<ParameterSpec>::type\n        , typename predicate<ParameterSpec>::type\n        , typename has_default<ParameterSpec>::type\n      > type;\n  };\n\n  template <class T>\n  struct is_named_argument\n    : mpl::or_<\n          is_template_keyword<T>\n        , is_tagged_argument<T>\n      >\n  {};\n  \n  // Returns mpl::true_ iff the given ParameterRequirements are\n  // satisfied by ArgList.\n  template <class ArgList, class ParameterRequirements>\n  struct satisfies\n  {\n#if BOOST_WORKAROUND(BOOST_MSVC, == 1310)\n      // VC7.1 can't handle the sizeof() implementation below,\n      // so we use this instead.\n      typedef typename mpl::apply_wrap3<\n          typename ArgList::binding\n        , typename ParameterRequirements::keyword\n        , void_\n        , mpl::false_\n      >::type bound;\n\n      typedef typename mpl::eval_if<\n          is_same<bound, void_>\n        , typename ParameterRequirements::has_default\n        , mpl::apply_wrap2<\n              typename mpl::lambda<\n                  typename ParameterRequirements::predicate, lambda_tag\n              >::type\n            , bound\n            , ArgList\n          >\n      >::type type;\n#else\n      BOOST_STATIC_CONSTANT(\n          bool, value = (\n              sizeof(\n                  aux::to_yesno(\n                      ArgList::satisfies((ParameterRequirements*)0, (ArgList*)0)\n                  )\n              ) == sizeof(yes_tag)\n          )\n      );\n\n      typedef mpl::bool_<satisfies::value> type;\n#endif\n  };\n\n  // Returns mpl::true_ if the requirements of the given ParameterSpec\n  // are satisfied by ArgList.\n  template <class ArgList, class ParameterSpec>\n  struct satisfies_requirements_of\n    : satisfies<\n          ArgList\n        , typename as_parameter_requirements<ParameterSpec>::type\n      >\n  {};\n\n  // Tags a deduced argument Arg with the keyword tag of Spec using TagFn.\n  // Returns the tagged argument and the mpl::set<> UsedArgs with the\n  // tag of Spec inserted.\n  template <class UsedArgs, class Spec, class Arg, class TagFn>\n  struct tag_deduced\n  {\n      typedef mpl::pair<\n          typename mpl::apply_wrap2<TagFn, typename tag_type<Spec>::type, Arg>::type\n        , typename aux::insert_<UsedArgs, typename tag_type<Spec>::type>::type\n      > type;\n  };\n\n  template <\n      class Argument\n    , class ArgumentPack\n    , class DeducedArgs\n    , class UsedArgs\n    , class TagFn\n  >\n  struct deduce_tag;\n\n  // Tag type passed to MPL lambda.\n  struct lambda_tag;\n\n  // Helper for deduce_tag<> below.\n  template <\n      class Argument\n    , class ArgumentPack\n    , class DeducedArgs\n    , class UsedArgs\n    , class TagFn\n  >\n  struct deduce_tag0\n  {\n      typedef typename DeducedArgs::spec spec;\n\n      typedef typename mpl::apply_wrap2<\n          typename mpl::lambda<\n              typename spec::predicate, lambda_tag\n          >::type\n        , Argument\n        , ArgumentPack\n      >::type condition;\n\n      // Deduced parameter matches several arguments.\n\n      BOOST_MPL_ASSERT((\n          mpl::not_<mpl::and_<\n              condition\n            , aux::has_key_<UsedArgs, typename tag_type<spec>::type>\n          > >\n      ));\n\n      typedef typename mpl::eval_if<\n          condition\n        , tag_deduced<UsedArgs, spec, Argument, TagFn>\n        , deduce_tag<Argument, ArgumentPack, typename DeducedArgs::tail, UsedArgs, TagFn>\n      >::type type;\n  };\n\n  // Tries to deduced a keyword tag for a given Argument.\n  // Returns an mpl::pair<> consisting of the tagged_argument<>, \n  // and an mpl::set<> where the new tag has been inserted.\n  //\n  //  Argument: The argument type to be tagged.\n  //\n  //  ArgumentPack: The ArgumentPack built so far.\n  //\n  //  DeducedArgs: A specialization of deduced_item<> (see below).\n  //               A list containing only the deduced ParameterSpecs.\n  //\n  //  UsedArgs: An mpl::set<> containing the keyword tags used so far.\n  //\n  //  TagFn: A metafunction class used to tag positional or deduced\n  //         arguments with a keyword tag.\n\n  template <\n      class Argument\n    , class ArgumentPack\n    , class DeducedArgs\n    , class UsedArgs\n    , class TagFn\n  >\n  struct deduce_tag\n  {\n      typedef typename mpl::eval_if<\n          is_same<DeducedArgs, void_>\n        , mpl::pair<void_, UsedArgs>\n        , deduce_tag0<Argument, ArgumentPack, DeducedArgs, UsedArgs, TagFn>\n      >::type type;\n  };\n\n  template <\n      class List\n    , class DeducedArgs\n    , class TagFn\n    , class Positional\n    , class UsedArgs\n    , class ArgumentPack\n    , class Error\n  >\n  struct make_arg_list_aux;\n\n  // Inserts Tagged::key_type into the UserArgs set.\n  // Extra indirection to lazily evaluate Tagged::key_type.\n  template <class UsedArgs, class Tagged>\n  struct insert_tagged\n  {\n      typedef typename aux::insert_<\n          UsedArgs, typename Tagged::key_type\n      >::type type;\n  };\n\n  // Borland needs the insane extra-indirection workaround below\n  // so that it doesn't magically drop the const qualifier from\n  // the argument type.\n\n  template <\n      class List\n    , class DeducedArgs\n    , class TagFn\n    , class Positional\n    , class UsedArgs\n    , class ArgumentPack\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    , class argument\n#endif\n    , class Error\n  >\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n  struct make_arg_list00\n#else\n  struct make_arg_list0\n#endif\n  {\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n      typedef typename List::arg argument;\n#endif\n      typedef typename List::spec parameter_spec;\n      typedef typename tag_type<parameter_spec>::type tag_;\n\n      typedef is_named_argument<argument> is_tagged;\n\n      // If this argument is either explicitly tagged or a deduced\n      // parameter, we turn off positional matching.\n      typedef mpl::and_<\n          mpl::not_<\n              mpl::or_<is_deduced<parameter_spec>, is_tagged> \n          > \n        , Positional\n      > positional;\n\n      // If this parameter is explicitly tagged we add it to the\n      // used-parmeters set. We only really need to add parameters\n      // that are deduced, but we would need a way to check if\n      // a given tag corresponds to a deduced parameter spec.\n      typedef typename mpl::eval_if<\n          is_tagged\n        , insert_tagged<UsedArgs, argument>\n        , mpl::identity<UsedArgs>\n      >::type used_args;\n\n      // If this parameter is neither explicitly tagged, nor\n      // positionally matched; deduce the tag from the deduced\n      // parameter specs.\n      typedef typename mpl::eval_if<\n          mpl::or_<is_tagged, positional>\n        , mpl::pair<void_, used_args>\n        , deduce_tag<argument, ArgumentPack, DeducedArgs, used_args, TagFn>\n      >::type deduced_data;\n\n      // If this parameter is explicitly tagged..\n      typedef typename mpl::eval_if<\n          is_tagged\n        , mpl::identity<argument>                        // .. just use it\n        , mpl::eval_if<                                  // .. else, if positional matching is turned on..\n                positional\n              , mpl::apply_wrap2<TagFn, tag_, argument>  // .. tag it positionally\n              , mpl::first<deduced_data>                 // .. else, use the deduced tag\n          >\n      >::type tagged;\n\n      // We build the arg_list incrementally as we go, prepending new\n      // nodes.\n\n      typedef typename mpl::if_<\n          mpl::and_<\n              is_same<Error, void_>\n            , is_same<tagged, void_>\n          >\n        , parameter_::unmatched_argument<argument>\n        , void_\n      >::type error;\n\n      typedef typename mpl::if_<\n          is_same<tagged, void_>\n        , ArgumentPack\n        , arg_list<tagged, ArgumentPack>\n      >::type argument_pack;\n\n      typedef typename make_arg_list_aux<\n          typename List::tail\n        , DeducedArgs\n        , TagFn\n        , positional\n        , typename deduced_data::second\n        , argument_pack\n        , error\n      >::type type;\n  };\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n  template <\n      class List\n    , class DeducedArgs\n    , class TagFn\n    , class Positional\n    , class UsedArgs\n    , class ArgumentPack\n    , class Error\n  >\n  struct make_arg_list0\n  {\n      typedef typename mpl::eval_if<\n          typename List::is_arg_const\n        , make_arg_list00<\n              List\n            , DeducedArgs\n            , TagFn\n            , Positional\n            , UsedArgs\n            , ArgumentPack\n            , typename List::arg const\n            , Error\n          >\n        , make_arg_list00<\n              List\n            , DeducedArgs\n            , TagFn\n            , Positional\n            , UsedArgs\n            , ArgumentPack\n            , typename List::arg\n            , Error\n          >\n      >::type type;\n  };\n#endif\n\n  // Returns an ArgumentPack where the list of arguments has\n  // been tagged with keyword tags.\n  //\n  //   List: A specialization of item<> (see below). Contains\n  //         both the ordered ParameterSpecs, and the given arguments.\n  //\n  //   DeducedArgs: A specialization of deduced_item<> (see below).\n  //                A list containing only the deduced ParameterSpecs.\n  //\n  //   TagFn: A metafunction class used to tag positional or deduced\n  //          arguments with a keyword tag.\n  //\n  //   Position: An mpl::bool_<> specialization indicating if positional\n  //             matching is to be performed.\n  //\n  //   DeducedSet: An mpl::set<> containing the keyword tags used so far.\n  //\n  //   ArgumentPack: The ArgumentPack built so far. This is initially an\n  //                 empty_arg_list and is built incrementally.\n  //\n\n  template <\n      class List\n    , class DeducedArgs\n    , class TagFn\n    , class Positional\n    , class DeducedSet\n    , class ArgumentPack\n    , class Error\n  >\n  struct make_arg_list_aux\n  {\n      typedef typename mpl::eval_if<\n          is_same<List, void_>\n        , mpl::identity<mpl::pair<ArgumentPack, Error> >\n        , make_arg_list0<List, DeducedArgs, TagFn, Positional, DeducedSet, ArgumentPack, Error>\n      >::type type;\n  };\n\n  // VC6.5 was choking on the default parameters for make_arg_list_aux, so\n  // this just forwards to that adding in the defaults.\n  template <\n      class List\n    , class DeducedArgs\n    , class TagFn\n    , class EmitErrors = mpl::true_\n  >\n  struct make_arg_list\n  {\n      typedef typename make_arg_list_aux<\n          List, DeducedArgs, TagFn, mpl::true_, aux::set0, empty_arg_list, void_\n      >::type type;\n  };\n\n  // A parameter spec item typelist.\n  template <class Spec, class Arg, class Tail = void_>\n  struct item\n  {\n      typedef Spec spec;\n\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n      typedef is_const<Arg> is_arg_const;\n#endif\n\n      typedef Arg arg;\n      typedef Tail tail;\n  };\n\n  template <class Spec, class Arg, class Tail>\n  struct make_item\n  {\n      typedef item<Spec, Arg, typename Tail::type> type;\n  };\n\n  // Creates a item typelist.\n  template <class Spec, class Arg, class Tail>\n  struct make_items\n  {\n      typedef typename mpl::eval_if<\n          is_same<Arg, void_>\n        , mpl::identity<void_>\n        , make_item<Spec, Arg, Tail>\n      >::type type;\n  };\n\n  // A typelist that stored deduced parameter specs.\n  template <class ParameterSpec, class Tail = void_>\n  struct deduced_item\n  {\n      typedef ParameterSpec spec;\n      typedef Tail tail;\n  };\n\n  // Evaluate Tail and construct deduced_item list.\n  template <class Spec, class Tail>\n  struct make_deduced_item\n  {\n      typedef deduced_item<Spec, typename Tail::type> type;\n  };\n\n  template <class Spec, class Tail>\n  struct make_deduced_items\n  {\n      typedef typename mpl::eval_if<\n          is_same<Spec, void_>\n        , mpl::identity<void_>\n        , mpl::eval_if<\n              is_deduced<Spec>\n            , make_deduced_item<Spec, Tail>\n            , Tail\n          >\n      >::type type;\n  };\n\n  // Generates:\n  //\n  //   make<\n  //       parameter_spec#0, argument_type#0\n  //     , make<\n  //           parameter_spec#1, argument_type#1\n  //         , ... mpl::identity<aux::empty_arg_list>\n  //    ...>\n  //   >\n#define BOOST_PARAMETER_make_arg_list(z, n, names)      \\\n      BOOST_PP_SEQ_ELEM(0,names)<                       \\\n          BOOST_PP_CAT(BOOST_PP_SEQ_ELEM(1,names), n),  \\\n          BOOST_PP_CAT(BOOST_PP_SEQ_ELEM(2,names), n), \n\n#define BOOST_PARAMETER_right_angle(z, n, text) >\n\n#define BOOST_PARAMETER_build_arg_list(n, make, parameter_spec, argument_type)      \\\n  BOOST_PP_REPEAT(                                                                  \\\n      n, BOOST_PARAMETER_make_arg_list, (make)(parameter_spec)(argument_type))      \\\n      mpl::identity<void_>                                                          \\\n  BOOST_PP_REPEAT(n, BOOST_PARAMETER_right_angle, _)\n\n#define BOOST_PARAMETER_make_deduced_list(z, n, names)  \\\n      BOOST_PP_SEQ_ELEM(0,names)<                       \\\n          BOOST_PP_CAT(BOOST_PP_SEQ_ELEM(1,names), n),\n\n#define BOOST_PARAMETER_build_deduced_list(n, make, parameter_spec)                 \\\n  BOOST_PP_REPEAT(                                                                  \\\n      n, BOOST_PARAMETER_make_deduced_list, (make)(parameter_spec))                 \\\n  mpl::identity<void_>                                                              \\\n  BOOST_PP_REPEAT(n, BOOST_PARAMETER_right_angle, _)\n\n  struct tag_keyword_arg\n  {\n      template <class K, class T>\n      struct apply\n        : tag<K,T>\n      {};\n  };\n\n  struct tag_template_keyword_arg\n  {\n      template <class K, class T>\n      struct apply\n      {\n          typedef template_keyword<K,T> type;\n      };\n  };\n\n} // namespace aux\n\n#define BOOST_PARAMETER_FORWARD_TYPEDEF(z, i, names) \\\n    typedef BOOST_PP_CAT(BOOST_PP_SEQ_ELEM(0,names),i) BOOST_PP_CAT(BOOST_PP_SEQ_ELEM(1,names),i);\n\n#define BOOST_PARAMETER_FORWARD_TYPEDEFS(n, src, dest) \\\n    BOOST_PP_REPEAT(n, BOOST_PARAMETER_FORWARD_TYPEDEF, (src)(dest))\n\n\n#define BOOST_PARAMETER_TEMPLATE_ARGS(z, n, text) class BOOST_PP_CAT(PS, n) = void_\n\ntemplate<\n     class PS0\n   , BOOST_PP_ENUM_SHIFTED(BOOST_PARAMETER_MAX_ARITY, BOOST_PARAMETER_TEMPLATE_ARGS, _)\n>\nstruct parameters\n{\n#undef BOOST_PARAMETER_TEMPLATE_ARGS\n\n    typedef typename BOOST_PARAMETER_build_deduced_list(\n        BOOST_PARAMETER_MAX_ARITY, aux::make_deduced_items, PS\n    )::type deduced_list;\n\n    // if the elements of NamedList match the criteria of overload\n    // resolution, returns a type which can be constructed from\n    // parameters.  Otherwise, this is not a valid metafunction (no nested\n    // ::type).\n\n\n#if ! defined(BOOST_NO_SFINAE) && ! BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592))\n    // If NamedList satisfies the PS0, PS1, ..., this is a\n    // metafunction returning parameters.  Otherwise it \n    // has no nested ::type.\n    template <class ArgumentPackAndError>\n    struct match_base\n      : mpl::if_<\n            // mpl::and_<\n            //    aux::satisfies_requirements_of<NamedList,PS0>\n            //  , mpl::and_<\n            //       aux::satisfies_requirements_of<NamedList,PS1>...\n            //           ..., mpl::true_\n            // ...> >\n            \n# define BOOST_PARAMETER_satisfies(z, n, text)                                      \\\n            mpl::and_<                                                              \\\n                aux::satisfies_requirements_of<                                     \\\n                    typename mpl::first<ArgumentPackAndError>::type                 \\\n                  , BOOST_PP_CAT(PS, n)>                                            \\\n                  ,\n            mpl::and_<\n                is_same<typename mpl::second<ArgumentPackAndError>::type, void_>\n              , BOOST_PP_REPEAT(BOOST_PARAMETER_MAX_ARITY, BOOST_PARAMETER_satisfies, _)\n                mpl::true_\n                BOOST_PP_REPEAT(BOOST_PARAMETER_MAX_ARITY, BOOST_PARAMETER_right_angle, _)\n            >\n\n# undef BOOST_PARAMETER_satisfies\n\n          , mpl::identity<parameters>\n          , void_\n        >\n    {};\n#endif\n    \n    // Specializations are to be used as an optional argument to\n    // eliminate overloads via SFINAE\n    template<\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n        // Borland simply can't handle default arguments in member\n        // class templates.  People wishing to write portable code can\n        // explicitly specify BOOST_PARAMETER_MAX_ARITY arguments\n        BOOST_PP_ENUM_PARAMS(BOOST_PARAMETER_MAX_ARITY, class A)\n#else \n        BOOST_PP_ENUM_BINARY_PARAMS(\n            BOOST_PARAMETER_MAX_ARITY, class A, = void_ BOOST_PP_INTERCEPT\n        )\n#endif\n    >\n    struct match\n# if ! defined(BOOST_NO_SFINAE) && ! BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592))\n      : match_base<\n            typename aux::make_arg_list<\n                typename BOOST_PARAMETER_build_arg_list(\n                    BOOST_PARAMETER_MAX_ARITY, aux::make_items, PS, A\n                )::type\n              , deduced_list\n              , aux::tag_keyword_arg\n              , mpl::false_ // Don't emit errors when doing SFINAE\n            >::type\n        >::type\n    {};\n# else\n    { \n        typedef parameters<\n            BOOST_PP_ENUM_PARAMS(BOOST_PARAMETER_MAX_ARITY, PS)\n        > type; \n    };\n# endif\n\n    // Metafunction that returns an ArgumentPack.\n\n    // TODO, bind has to instantiate the error type in the result\n    // of make_arg_list.\n\n    template <\n#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n        // Borland simply can't handle default arguments in member\n        // class templates.  People wishing to write portable code can\n        // explicitly specify BOOST_PARAMETER_MAX_ARITY arguments\n        BOOST_PP_ENUM_PARAMS(BOOST_PARAMETER_MAX_ARITY, class A)\n#else \n        BOOST_PP_ENUM_BINARY_PARAMS(\n            BOOST_PARAMETER_MAX_ARITY, class A, = void_ BOOST_PP_INTERCEPT\n        )\n#endif            \n    >\n    struct bind\n    {\n        typedef typename aux::make_arg_list<\n            typename BOOST_PARAMETER_build_arg_list(\n                BOOST_PARAMETER_MAX_ARITY, aux::make_items, PS, A\n            )::type\n          , deduced_list\n          , aux::tag_template_keyword_arg\n        >::type result;\n\n        typedef typename mpl::first<result>::type type;\n    };\n\n    BOOST_PARAMETER_FORWARD_TYPEDEFS(BOOST_PARAMETER_MAX_ARITY, PS, parameter_spec)\n\n    //\n    // The function call operator is used to build an arg_list that\n    // labels the positional parameters and maintains whatever other\n    // tags may have been specified by the caller.\n    //\n    // !!!NOTE!!!\n    //\n    // The make_arg_list<> produces a reversed arg_list, so\n    // we need to pass the arguments to its constructor\n    // reversed.\n    //\n    aux::empty_arg_list operator()() const\n    {\n       return aux::empty_arg_list();\n    }\n\n    template<class A0>\n    typename mpl::first<\n        typename aux::make_arg_list<\n            aux::item<\n                PS0,A0\n            >\n          , deduced_list\n          , aux::tag_keyword_arg\n        >::type\n    >::type\n    operator()(A0& a0) const\n    {\n        typedef typename aux::make_arg_list<\n            aux::item<\n                PS0,A0\n            >\n          , deduced_list\n          , aux::tag_keyword_arg\n        >::type result;\n\n        typedef typename mpl::first<result>::type result_type;\n        typedef typename mpl::second<result>::type error;\n        error();\n\n        return result_type(\n            a0\n            // , void_(), void_(), void_() ...\n            BOOST_PP_ENUM_TRAILING_PARAMS(\n                BOOST_PP_SUB(BOOST_PARAMETER_MAX_ARITY, 1)\n              , aux::void_reference() BOOST_PP_INTERCEPT)\n        );\n    }\n\n    template<class A0, class A1>\n    typename mpl::first<\n        typename aux::make_arg_list<\n            aux::item<\n                PS0,A0\n              , aux::item<\n                    PS1,A1\n                >\n            >\n          , deduced_list\n          , aux::tag_keyword_arg\n        >::type\n    >::type\n    operator()(A0& a0, A1& a1) const\n    {\n        typedef typename aux::make_arg_list<\n            aux::item<\n                PS0,A0\n              , aux::item<\n                    PS1,A1\n                >\n            >\n          , deduced_list\n          , aux::tag_keyword_arg\n        >::type result;\n\n        typedef typename mpl::first<result>::type result_type;\n        typedef typename mpl::second<result>::type error;\n        error();\n\n        return result_type(\n            a1,a0\n            // , void_(), void_() ...\n            BOOST_PP_ENUM_TRAILING_PARAMS(\n                BOOST_PP_SUB(BOOST_PARAMETER_MAX_ARITY, 2)\n              , aux::void_reference() BOOST_PP_INTERCEPT)\n        );\n    }\n\n    // Higher arities are handled by the preprocessor\n#define BOOST_PP_ITERATION_PARAMS_1 (3,( \\\n        3,BOOST_PARAMETER_MAX_ARITY,<boost/parameter/aux_/overloads.hpp> \\\n    ))\n#include BOOST_PP_ITERATE()\n\n};\n\n} // namespace parameter\n\n} // namespace boost\n\n#endif // BOOST_PARAMETERS_031014_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/preprocessor.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_PREPROCESSOR_060206_HPP\n# define BOOST_PARAMETER_PREPROCESSOR_060206_HPP\n\n# include <boost/parameter/parameters.hpp>\n# include <boost/parameter/binding.hpp>\n# include <boost/parameter/match.hpp>\n\n# include <boost/parameter/aux_/parenthesized_type.hpp>\n# include <boost/parameter/aux_/cast.hpp>\n# include <boost/parameter/aux_/preprocessor/flatten.hpp>\n\n# include <boost/preprocessor/repetition/repeat_from_to.hpp>\n# include <boost/preprocessor/comparison/equal.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/control/expr_if.hpp>\n# include <boost/preprocessor/repetition/enum_params.hpp>\n# include <boost/preprocessor/repetition/enum_binary_params.hpp>\n# include <boost/preprocessor/repetition/enum_trailing.hpp>\n# include <boost/preprocessor/seq/first_n.hpp>\n# include <boost/preprocessor/seq/for_each_product.hpp>\n# include <boost/preprocessor/seq/for_each_i.hpp> \n# include <boost/preprocessor/tuple/elem.hpp> \n# include <boost/preprocessor/tuple/eat.hpp>\n# include <boost/preprocessor/seq/fold_left.hpp>\n# include <boost/preprocessor/seq/push_back.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n# include <boost/preprocessor/seq/enum.hpp>\n# include <boost/preprocessor/seq/push_back.hpp>\n\n# include <boost/preprocessor/detail/is_nullary.hpp>\n\n# include <boost/mpl/always.hpp>\n# include <boost/mpl/apply_wrap.hpp>\n\nnamespace boost { namespace parameter { namespace aux {\n\n#  if ! defined(BOOST_NO_SFINAE) && ! BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592))\n\n// Given Match, which is \"void x\" where x is an argument matching\n// criterion, extract a corresponding MPL predicate.\ntemplate <class Match>\nstruct unwrap_predicate;\n\n// Match anything\ntemplate <>\nstruct unwrap_predicate<void*>\n{\n    typedef mpl::always<mpl::true_> type;\n};\n\n#if BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x580))\n\ntypedef void* voidstar;\n\n// A matching predicate is explicitly specified\ntemplate <class Predicate>\nstruct unwrap_predicate<voidstar (Predicate)>\n{\n    typedef Predicate type;\n};\n\n#else\n\n// A matching predicate is explicitly specified\ntemplate <class Predicate>\nstruct unwrap_predicate<void *(Predicate)>\n{\n    typedef Predicate type;\n};\n\n#endif \n\n\n// A type to which the argument is supposed to be convertible is\n// specified\ntemplate <class Target>\nstruct unwrap_predicate<void (Target)>\n{\n    typedef is_convertible<mpl::_, Target> type;\n};\n\n// Recast the ParameterSpec's nested match metafunction as a free metafunction\ntemplate <\n    class Parameters\n  , BOOST_PP_ENUM_BINARY_PARAMS(\n        BOOST_PARAMETER_MAX_ARITY, class A, = boost::parameter::void_ BOOST_PP_INTERCEPT\n    )\n>\nstruct match\n  : Parameters::template match<\n        BOOST_PP_ENUM_PARAMS(BOOST_PARAMETER_MAX_ARITY, A)\n    >\n{};\n# endif \n\n# undef false_\n\ntemplate <\n    class Parameters\n  , BOOST_PP_ENUM_BINARY_PARAMS(\n        BOOST_PARAMETER_MAX_ARITY, class A, = boost::parameter::void_ BOOST_PP_INTERCEPT\n    )\n>\nstruct argument_pack\n{\n    typedef typename make_arg_list<\n        typename BOOST_PARAMETER_build_arg_list(\n            BOOST_PARAMETER_MAX_ARITY, make_items, typename Parameters::parameter_spec, A\n        )::type\n      , typename Parameters::deduced_list\n      , tag_keyword_arg\n      , mpl::false_\n    >::type result;\n    typedef typename mpl::first<result>::type type;\n};\n\n// Works around VC6 problem where it won't accept rvalues.\ntemplate <class T>\nT& as_lvalue(T& value, long)\n{\n    return value;\n}\n\ntemplate <class T>\nT const& as_lvalue(T const& value, int)\n{\n    return value;\n}\n\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n\ntemplate <class Predicate, class T, class Args>\nstruct apply_predicate\n{\n    BOOST_MPL_ASSERT((\n        mpl::and_<mpl::false_,T>\n    ));\n\n    typedef typename mpl::if_<\n        typename mpl::apply2<Predicate,T,Args>::type\n      , char\n      , int\n    >::type type;\n};\n\ntemplate <class P>\nstruct funptr_predicate\n{\n    static P p;\n\n    template <class T, class Args, class P0>\n    static typename apply_predicate<P0,T,Args>::type\n    check_predicate(type<T>, Args*, void**(*)(P0));\n\n    template <class T, class Args, class P0>\n    static typename mpl::if_<\n        is_convertible<T,P0>\n      , char\n      , int\n     >::type check_predicate(type<T>, Args*, void*(*)(P0));\n\n    template <class T, class Args>\n    struct apply\n    {\n        BOOST_STATIC_CONSTANT(bool, result = \n            sizeof(check_predicate(boost::type<T>(), (Args*)0, &p)) == 1\n        );\n\n        typedef mpl::bool_<apply<T,Args>::result> type;\n    };\n};\n\ntemplate <>\nstruct funptr_predicate<void**>\n  : mpl::always<mpl::true_>\n{};\n\n# endif\n\n}}} // namespace boost::parameter::aux\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n// From Paul Mensonides\n#  define BOOST_PARAMETER_IS_NULLARY(x) \\\n    BOOST_PP_SPLIT(1, BOOST_PARAMETER_IS_NULLARY_C x BOOST_PP_COMMA() 0) \\\n    /**/\n#  define BOOST_PARAMETER_IS_NULLARY_C() \\\n    ~, 1 BOOST_PP_RPAREN() \\\n    BOOST_PP_TUPLE_EAT(2) BOOST_PP_LPAREN() ~ \\\n    /**/\n# else\n#  define BOOST_PARAMETER_IS_NULLARY(x) BOOST_PP_IS_NULLARY(x)\n# endif\n\n# define BOOST_PARAMETER_MEMBER_FUNCTION_CHECK_STATIC_static ()\n# define BOOST_PARAMETER_MEMBER_FUNCTION_IS_STATIC(name) \\\n    BOOST_PARAMETER_IS_NULLARY( \\\n        BOOST_PP_CAT(BOOST_PARAMETER_MEMBER_FUNCTION_CHECK_STATIC_,name) \\\n    )\n\n# if !defined(BOOST_MSVC)\n#  define BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_static\n#  define BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC(name) \\\n    BOOST_PP_CAT(BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_, name)\n# else\n// Workaround for MSVC preprocessor.\n//\n// When stripping static from \"static f\", msvc will produce\n// \" f\". The leading whitespace doesn't go away when pasting\n// the token with something else, so this thing is a hack to\n// strip the whitespace.\n#  define BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_static (\n#  define BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_AUX(name) \\\n    BOOST_PP_CAT(BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_, name))\n#  define BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC(name) \\\n    BOOST_PP_SEQ_HEAD( \\\n        BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC_AUX(name) \\\n    )\n# endif\n\n# define BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(name) \\\n    BOOST_PP_EXPR_IF( \\\n        BOOST_PARAMETER_MEMBER_FUNCTION_IS_STATIC(name) \\\n      , static \\\n    )\n\n# define BOOST_PARAMETER_MEMBER_FUNCTION_NAME(name) \\\n    BOOST_PP_IF( \\\n        BOOST_PARAMETER_MEMBER_FUNCTION_IS_STATIC(name) \\\n      , BOOST_PARAMETER_MEMBER_FUNCTION_STRIP_STATIC \\\n      , name BOOST_PP_TUPLE_EAT(1) \\\n    )(name)\n\n// Calculates [begin, end) arity range.\n\n# define BOOST_PARAMETER_ARITY_RANGE_M_optional(state) state\n# define BOOST_PARAMETER_ARITY_RANGE_M_deduced_optional(state) state\n# define BOOST_PARAMETER_ARITY_RANGE_M_required(state) BOOST_PP_INC(state)\n# define BOOST_PARAMETER_ARITY_RANGE_M_deduced_required(state) BOOST_PP_INC(state)\n\n# define BOOST_PARAMETER_ARITY_RANGE_M(s, state, x) \\\n    BOOST_PP_CAT( \\\n        BOOST_PARAMETER_ARITY_RANGE_M_ \\\n      , BOOST_PARAMETER_FN_ARG_QUALIFIER(x) \\\n    )(state)\n/**/\n\n# define BOOST_PARAMETER_ARITY_RANGE(args) \\\n    ( \\\n        BOOST_PP_SEQ_FOLD_LEFT(BOOST_PARAMETER_ARITY_RANGE_M, 0, args) \\\n      , BOOST_PP_INC(BOOST_PP_SEQ_SIZE(args)) \\\n    )\n/**/\n\n// Accessor macros for the argument specs tuple.\n# define BOOST_PARAMETER_FN_ARG_QUALIFIER(x) \\\n    BOOST_PP_TUPLE_ELEM(4,0,x)\n/**/\n\n# define BOOST_PARAMETER_FN_ARG_NAME(x) \\\n    BOOST_PP_TUPLE_ELEM(4,1,x)\n/**/\n\n# define BOOST_PARAMETER_FN_ARG_PRED(x) \\\n    BOOST_PP_TUPLE_ELEM(4,2,x)\n/**/\n\n# define BOOST_PARAMETER_FN_ARG_DEFAULT(x) \\\n    BOOST_PP_TUPLE_ELEM(4,3,x)\n/**/\n\n# define BOOST_PARAMETETER_FUNCTION_EAT_KEYWORD_QUALIFIER_out(x)\n# define BOOST_PARAMETETER_FUNCTION_EAT_KEYWORD_QUALIFIER_in_out(x)\n\n// Returns 1 if x is either \"out(k)\" or \"in_out(k)\".\n# define BOOST_PARAMETER_FUNCTION_IS_KEYWORD_QUALIFIER(x) \\\n    BOOST_PP_IS_EMPTY( \\\n        BOOST_PP_CAT(BOOST_PARAMETETER_FUNCTION_EAT_KEYWORD_QUALIFIER_, x) \\\n    ) \\\n/**/\n\n# define BOOST_PARAMETETER_FUNCTION_GET_KEYWORD_QUALIFIER_out(x) x\n# define BOOST_PARAMETETER_FUNCTION_GET_KEYWORD_QUALIFIER_in_out(x) x\n# define BOOST_PARAMETER_FUNCTION_KEYWORD_GET(x) \\\n    BOOST_PP_CAT(BOOST_PARAMETETER_FUNCTION_GET_KEYWORD_QUALIFIER_, x)\n/**/\n\n// Returns the keyword of x, where x is either a keyword qualifier\n// or a keyword.\n//\n//   k => k\n//   out(k) => k\n//   in_out(k) => k\n//\n# define BOOST_PARAMETER_FUNCTION_KEYWORD(x) \\\n    BOOST_PP_IF( \\\n        BOOST_PARAMETER_FUNCTION_IS_KEYWORD_QUALIFIER(x) \\\n      , BOOST_PARAMETER_FUNCTION_KEYWORD_GET \\\n      , x BOOST_PP_TUPLE_EAT(1) \\\n    )(x)\n/**/\n\n# define BOOST_PARAMETER_FN_ARG_KEYWORD(x) \\\n    BOOST_PARAMETER_FUNCTION_KEYWORD( \\\n        BOOST_PARAMETER_FN_ARG_NAME(x) \\\n    )\n\n// Builds forwarding functions.\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_TEMPLATE_Z(z, n) \\\n    template<BOOST_PP_ENUM_PARAMS_Z(z, n, class ParameterArgumentType)>\n/**/\n\n# if ! defined(BOOST_NO_SFINAE) && ! BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x592))\n#  define BOOST_PARAMETER_FUNCTION_FWD_MATCH_Z(z, name, parameters, n) \\\n    , typename boost::parameter::aux::match< \\\n          parameters, BOOST_PP_ENUM_PARAMS(n, ParameterArgumentType) \\\n      >::type = parameters()\n# else\n#  define BOOST_PARAMETER_FUNCTION_FWD_MATCH_Z(z, name, parameters, n)\n# endif\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_PARAMETERS_NAME(base) \\\n    BOOST_PP_CAT( \\\n        boost_param_parameters_ \\\n      , BOOST_PP_CAT(__LINE__, BOOST_PARAMETER_MEMBER_FUNCTION_NAME(base)) \\\n    )\n\n// Produce a name for a result type metafunction for the function\n// named base\n# define BOOST_PARAMETER_FUNCTION_RESULT_NAME(base) \\\n    BOOST_PP_CAT( \\\n        boost_param_result_ \\\n      , BOOST_PP_CAT(__LINE__,BOOST_PARAMETER_MEMBER_FUNCTION_NAME(base)) \\\n    )\n\n// Can't do boost_param_impl_ ## basee because base might start with an underscore\n// daniel: what? how is that relevant? the reason for using CAT() is to make sure\n// base is expanded. i'm not sure we need to here, but it's more stable to do it.\n# define BOOST_PARAMETER_IMPL(base) \\\n    BOOST_PP_CAT(boost_param_impl,BOOST_PARAMETER_MEMBER_FUNCTION_NAME(base))\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION00(z, n, r, data, elem) \\\n    BOOST_PP_IF( \\\n        n \\\n      , BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_TEMPLATE_Z, BOOST_PP_TUPLE_EAT(2) \\\n    )(z,n) \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(BOOST_PP_TUPLE_ELEM(7,3,data)) \\\n    inline \\\n    BOOST_PP_EXPR_IF(n, typename) \\\n        BOOST_PARAMETER_FUNCTION_RESULT_NAME(BOOST_PP_TUPLE_ELEM(7,3,data))<   \\\n        BOOST_PP_EXPR_IF(n, typename) \\\n        boost::parameter::aux::argument_pack< \\\n            BOOST_PARAMETER_FUNCTION_PARAMETERS_NAME(BOOST_PP_TUPLE_ELEM(7,3,data)) \\\n            BOOST_PP_COMMA_IF(n) \\\n            BOOST_PP_IF( \\\n                n, BOOST_PP_SEQ_ENUM, BOOST_PP_TUPLE_EAT(1) \\\n            )(elem) \\\n        >::type \\\n    >::type \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_NAME(BOOST_PP_TUPLE_ELEM(7,3,data))( \\\n        BOOST_PP_IF( \\\n            n \\\n          , BOOST_PP_SEQ_FOR_EACH_I_R \\\n          , BOOST_PP_TUPLE_EAT(4) \\\n        )( \\\n            r \\\n          , BOOST_PARAMETER_FUNCTION_ARGUMENT \\\n          , ~ \\\n          , elem \\\n        ) \\\n        BOOST_PP_IF(n, BOOST_PARAMETER_FUNCTION_FWD_MATCH_Z, BOOST_PP_TUPLE_EAT(4))( \\\n            z \\\n          , BOOST_PP_TUPLE_ELEM(7,3,data) \\\n          , BOOST_PARAMETER_FUNCTION_PARAMETERS_NAME(BOOST_PP_TUPLE_ELEM(7,3,data)) \\\n          , n \\\n        ) \\\n    ) BOOST_PP_EXPR_IF(BOOST_PP_TUPLE_ELEM(7,4,data), const) \\\n    { \\\n        return BOOST_PARAMETER_IMPL(BOOST_PP_TUPLE_ELEM(7,3,data))( \\\n            BOOST_PARAMETER_FUNCTION_PARAMETERS_NAME(BOOST_PP_TUPLE_ELEM(7,3,data))()( \\\n                BOOST_PP_ENUM_PARAMS_Z(z, n, a) \\\n            ) \\\n        ); \\\n    }\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION0(r, data, elem) \\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTION00( \\\n        BOOST_PP_TUPLE_ELEM(7,0,data) \\\n      , BOOST_PP_TUPLE_ELEM(7,1,data) \\\n      , r \\\n      , data \\\n      , elem \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_ARITY_0(z, n, data) \\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTION00( \\\n        z, n, BOOST_PP_DEDUCE_R() \\\n      , (z, n, BOOST_PP_TUPLE_REM(5) data) \\\n      , ~ \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_ARITY_N(z, n, data) \\\n    BOOST_PP_SEQ_FOR_EACH( \\\n        BOOST_PARAMETER_FUNCTION_FWD_FUNCTION0 \\\n      , (z, n, BOOST_PP_TUPLE_REM(5) data) \\\n      , BOOST_PP_SEQ_FOR_EACH_PRODUCT( \\\n            BOOST_PARAMETER_FUNCTION_FWD_PRODUCT \\\n          , BOOST_PP_SEQ_FIRST_N( \\\n                n, BOOST_PP_TUPLE_ELEM(5,3,data) \\\n            ) \\\n        ) \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTION(z, n, data) \\\n    BOOST_PP_IF( \\\n        n \\\n      , BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_ARITY_N \\\n      , BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_ARITY_0 \\\n    )(z,n,data) \\\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS0( \\\n    result,name,args,const_,combinations,range \\\n) \\\n    BOOST_PP_REPEAT_FROM_TO( \\\n        BOOST_PP_TUPLE_ELEM(2,0,range), BOOST_PP_TUPLE_ELEM(2,1,range) \\\n      , BOOST_PARAMETER_FUNCTION_FWD_FUNCTION \\\n      , (result,name,const_,combinations,BOOST_PP_TUPLE_ELEM(2,1,range)) \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS(result,name,args, const_, combinations) \\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS0( \\\n        result, name, args, const_, combinations, BOOST_PARAMETER_ARITY_RANGE(args) \\\n    )\n/**/\n\n// Builds boost::parameter::parameters<> specialization\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_optional(tag) \\\n    optional<tag\n\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_required(tag) \\\n    required<tag\n\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_deduced_optional(tag) \\\n    optional<boost::parameter::deduced<tag>\n\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_deduced_required(tag) \\\n    required<boost::parameter::deduced<tag>\n\n# if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_M(r,tag_namespace,i,elem) \\\n    BOOST_PP_COMMA_IF(i) \\\n    boost::parameter::BOOST_PP_CAT( \\\n        BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_ \\\n      , BOOST_PARAMETER_FN_ARG_QUALIFIER(elem) \\\n    )( \\\n        tag_namespace::BOOST_PARAMETER_FUNCTION_KEYWORD( \\\n            BOOST_PARAMETER_FN_ARG_KEYWORD(elem) \\\n        ) \\\n    ) \\\n      , typename boost::parameter::aux::unwrap_predicate< \\\n            void BOOST_PARAMETER_FN_ARG_PRED(elem) \\\n        >::type \\\n    >\n# elif BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n#  define BOOST_PARAMETER_FUNCTION_PARAMETERS_M(r,tag_namespace,i,elem) \\\n    BOOST_PP_COMMA_IF(i) \\\n    boost::parameter::BOOST_PP_CAT( \\\n        BOOST_PARAMETER_FUNCTION_PARAMETERS_QUALIFIER_ \\\n      , BOOST_PARAMETER_FN_ARG_QUALIFIER(elem) \\\n    )( \\\n        tag_namespace::BOOST_PARAMETER_FUNCTION_KEYWORD( \\\n            BOOST_PARAMETER_FN_ARG_KEYWORD(elem) \\\n        ) \\\n    ) \\\n      , boost::mpl::always<boost::mpl::true_> \\\n    >\n# endif\n\n# define BOOST_PARAMETER_FUNCTION_PARAMETERS(tag_namespace, base, args)             \\\n    template <class BoostParameterDummy>                                            \\\n    struct BOOST_PP_CAT(                                                            \\\n            BOOST_PP_CAT(boost_param_params_, __LINE__)                             \\\n          , BOOST_PARAMETER_MEMBER_FUNCTION_NAME(base)                              \\\n    ) : boost::parameter::parameters<                                               \\\n            BOOST_PP_SEQ_FOR_EACH_I(                                                \\\n                BOOST_PARAMETER_FUNCTION_PARAMETERS_M, tag_namespace, args          \\\n            )                                                                       \\\n        >                                                                           \\\n    {};                                                                             \\\n                                                                                    \\\n    typedef BOOST_PP_CAT( \\\n            BOOST_PP_CAT(boost_param_params_, __LINE__) \\\n          , BOOST_PARAMETER_MEMBER_FUNCTION_NAME(base) \\\n    )<int>\n\n// Defines result type metafunction\n# define BOOST_PARAMETER_FUNCTION_RESULT_ARG(z, _, i, x) \\\n    BOOST_PP_COMMA_IF(i) class BOOST_PP_TUPLE_ELEM(3,1,x)\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_RESULT_(result, name, args)                                   \\\n    template <class Args>                                                                       \\\n    struct BOOST_PARAMETER_FUNCTION_RESULT_NAME(name)                                           \\\n    {                                                                                           \\\n        typedef typename BOOST_PARAMETER_PARENTHESIZED_TYPE(result) type;                       \\\n    };\n\n// Defines implementation function\n# define BOOST_PARAMETER_FUNCTION_IMPL_HEAD(name)           \\\n    template <class Args>                                   \\\n    typename BOOST_PARAMETER_FUNCTION_RESULT_NAME(name)<    \\\n       Args                                                 \\\n    >::type BOOST_PARAMETER_IMPL(name)(Args const& args)\n\n# define BOOST_PARAMETER_FUNCTION_IMPL_FWD(name) \\\n    BOOST_PARAMETER_FUNCTION_IMPL_HEAD(name);\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARG_required(state, arg) \\\n    ( \\\n        BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(4, 0, state)) \\\n      , BOOST_PP_SEQ_PUSH_BACK(BOOST_PP_TUPLE_ELEM(4, 1, state), arg) \\\n      , BOOST_PP_TUPLE_ELEM(4, 2, state) \\\n      , BOOST_PP_TUPLE_ELEM(4, 3, state) \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARG_deduced_required(state, arg) \\\n    BOOST_PARAMETER_FUNCTION_SPLIT_ARG_required(state, arg)\n\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARG_optional(state, arg) \\\n    ( \\\n        BOOST_PP_TUPLE_ELEM(4, 0, state) \\\n      , BOOST_PP_TUPLE_ELEM(4, 1, state) \\\n      , BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(4, 2, state)) \\\n      , BOOST_PP_SEQ_PUSH_BACK(BOOST_PP_TUPLE_ELEM(4, 3, state), arg) \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARG_deduced_optional(state, arg) \\\n    BOOST_PARAMETER_FUNCTION_SPLIT_ARG_optional(state, arg)\n\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARG(s, state, arg) \\\n    BOOST_PP_CAT( \\\n        BOOST_PARAMETER_FUNCTION_SPLIT_ARG_ \\\n      , BOOST_PARAMETER_FN_ARG_QUALIFIER(arg) \\\n    )(state, arg)\n\n// Returns (required_count, required, optional_count, optionals) tuple\n# define BOOST_PARAMETER_FUNCTION_SPLIT_ARGS(args) \\\n    BOOST_PP_SEQ_FOLD_LEFT( \\\n        BOOST_PARAMETER_FUNCTION_SPLIT_ARG \\\n      , (0,BOOST_PP_SEQ_NIL, 0,BOOST_PP_SEQ_NIL) \\\n      , args \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG_NAME(keyword) \\\n    BOOST_PP_CAT(BOOST_PP_CAT(keyword,_),type)\n\n// Helpers used as parameters to BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_TEMPLATE_ARG(r, _, arg) \\\n    , class BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG_NAME( \\\n              BOOST_PARAMETER_FN_ARG_KEYWORD(arg) \\\n      )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG(r, _, arg) \\\n    , BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG_NAME( \\\n              BOOST_PARAMETER_FN_ARG_KEYWORD(arg) \\\n      )& BOOST_PARAMETER_FN_ARG_KEYWORD(arg)\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_PARAMETER(r, _, arg) \\\n    , BOOST_PARAMETER_FN_ARG_KEYWORD(arg)\n\n// Produces a name for the dispatch functions.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name) \\\n    BOOST_PP_CAT( \\\n        boost_param_default_ \\\n      , BOOST_PP_CAT(__LINE__, BOOST_PARAMETER_MEMBER_FUNCTION_NAME(name)) \\\n    )\n\n// Helper macro used below to produce lists based on the keyword argument\n// names. macro is applied to every element. n is the number of\n// optional arguments that should be included.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS(macro, n, split_args) \\\n    BOOST_PP_SEQ_FOR_EACH( \\\n        macro \\\n      , ~ \\\n      , BOOST_PP_TUPLE_ELEM(4,1,split_args) \\\n    ) \\\n    BOOST_PP_SEQ_FOR_EACH( \\\n        macro \\\n      , ~ \\\n      , BOOST_PP_SEQ_FIRST_N( \\\n          BOOST_PP_SUB(BOOST_PP_TUPLE_ELEM(4,2,split_args), n) \\\n        , BOOST_PP_TUPLE_ELEM(4,3,split_args) \\\n        ) \\\n    )\n\n// Generates a keyword | default expression.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_DEFAULT(arg, tag_namespace) \\\n    boost::parameter::keyword< \\\n        tag_namespace::BOOST_PARAMETER_FN_ARG_KEYWORD(arg) \\\n    >::instance | boost::parameter::aux::use_default_tag()\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_GET_ARG(arg, tag_ns) \\\n    BOOST_PARAMETER_FUNCTION_CAST( \\\n        args[ \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_DEFAULT( \\\n                arg, tag_ns \\\n            ) \\\n        ] \\\n      , BOOST_PARAMETER_FN_ARG_PRED(arg) \\\n      , Args \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_BODY(name, n, split_args, tag_namespace) \\\n    { \\\n        return BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name)( \\\n            (ResultType(*)())0 \\\n          , args \\\n          , 0L \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n                BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_PARAMETER \\\n              , n \\\n              , split_args \\\n            ) \\\n          , BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_GET_ARG( \\\n                BOOST_PP_SEQ_ELEM( \\\n                    BOOST_PP_SUB(BOOST_PP_TUPLE_ELEM(4,2,split_args), n) \\\n                  , BOOST_PP_TUPLE_ELEM(4,3,split_args) \\\n                ) \\\n              , tag_namespace \\\n            ) \\\n        ); \\\n    }\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_ACTUAL_DEFAULT(arg) \\\n    BOOST_PARAMETER_FUNCTION_CAST( \\\n        boost::parameter::aux::as_lvalue(BOOST_PARAMETER_FN_ARG_DEFAULT(arg), 0L) \\\n      , BOOST_PARAMETER_FN_ARG_PRED(arg) \\\n      , Args \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_DEFAULT_BODY(name, n, split_args, tag_ns, const_) \\\n    template < \\\n        class ResultType \\\n      , class Args \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_TEMPLATE_ARG \\\n          , BOOST_PP_INC(n) \\\n          , split_args \\\n        ) \\\n    > \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(name) \\\n    ResultType BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name)( \\\n        ResultType(*)() \\\n      , Args const& args \\\n      , long \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG \\\n          , BOOST_PP_INC(n) \\\n          , split_args \\\n        ) \\\n      , boost::parameter::aux::use_default_tag \\\n    ) BOOST_PP_EXPR_IF(const_, const) \\\n    { \\\n        return BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name)( \\\n            (ResultType(*)())0 \\\n          , args \\\n          , 0L \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n                BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_PARAMETER \\\n              , BOOST_PP_INC(n) \\\n              , split_args \\\n            ) \\\n          , BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_ACTUAL_DEFAULT( \\\n                BOOST_PP_SEQ_ELEM( \\\n                    BOOST_PP_SUB(BOOST_PP_TUPLE_ELEM(4,2,split_args), BOOST_PP_INC(n)) \\\n                  , BOOST_PP_TUPLE_ELEM(4,3,split_args) \\\n                ) \\\n            ) \\\n        ); \\\n    }\n\n// Produces a forwarding layer in the default evaluation machine.\n//\n// data is a tuple:\n//\n//   (name, split_args)\n//\n// Where name is the base name of the function, and split_args is a tuple:\n//\n//   (required_count, required_args, optional_count, required_args)\n//\n\n\n// defines the actual function body for BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION below.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION0(z, n, data) \\\n    template < \\\n        class ResultType \\\n      , class Args \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_TEMPLATE_ARG \\\n          , n \\\n          , BOOST_PP_TUPLE_ELEM(5,1,data) \\\n        ) \\\n    > \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(BOOST_PP_TUPLE_ELEM(5,0,data)) \\\n    ResultType BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(BOOST_PP_TUPLE_ELEM(5,0,data))( \\\n        ResultType(*)() \\\n      , Args const& args \\\n      , int \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG \\\n          , n \\\n          , BOOST_PP_TUPLE_ELEM(5,1,data) \\\n        ) \\\n    ) BOOST_PP_EXPR_IF(BOOST_PP_TUPLE_ELEM(5,2,data), const) \\\n    BOOST_PP_IF( \\\n        n \\\n      , BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_BODY \\\n      , ; BOOST_PP_TUPLE_EAT(4) \\\n    )( \\\n        BOOST_PP_TUPLE_ELEM(5,0,data) \\\n      , n \\\n      , BOOST_PP_TUPLE_ELEM(5,1,data) \\\n      , BOOST_PP_TUPLE_ELEM(5,3,data) \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION(z, n, data) \\\n    BOOST_PP_IF( \\\n        BOOST_PP_AND( \\\n            BOOST_PP_NOT(n) \\\n          , BOOST_PP_TUPLE_ELEM(5,4,data) \\\n        ) \\\n      , BOOST_PP_TUPLE_EAT(3) \\\n      , BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION0 \\\n    )(z, n, data) \\\n    BOOST_PP_IF( \\\n        BOOST_PP_EQUAL(n, BOOST_PP_TUPLE_ELEM(4,2,BOOST_PP_TUPLE_ELEM(5,1,data))) \\\n      , BOOST_PP_TUPLE_EAT(5) \\\n      , BOOST_PARAMETER_FUNCTION_DEFAULT_EVAL_DEFAULT_BODY \\\n    )( \\\n        BOOST_PP_TUPLE_ELEM(5,0,data) \\\n      , n \\\n      , BOOST_PP_TUPLE_ELEM(5,1,data) \\\n      , BOOST_PP_TUPLE_ELEM(5,3,data) \\\n      , BOOST_PP_TUPLE_ELEM(5,2,data) \\\n    )\n\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_GET_ARG(r, tag_ns, arg) \\\n    , BOOST_PARAMETER_FUNCTION_CAST( \\\n          args[ \\\n              boost::parameter::keyword<tag_ns::BOOST_PARAMETER_FN_ARG_KEYWORD(arg)>::instance \\\n          ] \\\n        , BOOST_PARAMETER_FN_ARG_PRED(arg) \\\n        , Args \\\n      )\n\n// Generates the function template that recives a ArgumentPack, and then\n// goes on to call the layers of overloads generated by \n// BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER.\n# define BOOST_PARAMETER_FUNCTION_INITIAL_DISPATCH_FUNCTION(name, split_args, const_, tag_ns) \\\n    template <class Args> \\\n    typename BOOST_PARAMETER_FUNCTION_RESULT_NAME(name)<Args>::type \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(name) \\\n    BOOST_PARAMETER_IMPL(name)(Args const& args) BOOST_PP_EXPR_IF(const_, const) \\\n    { \\\n        return BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name)( \\\n            (typename BOOST_PARAMETER_FUNCTION_RESULT_NAME(name)<Args>::type(*)())0 \\\n          , args \\\n          , 0L \\\n \\\n            BOOST_PP_SEQ_FOR_EACH( \\\n                BOOST_PARAMETER_FUNCTION_DEFAULT_GET_ARG \\\n              , tag_ns \\\n              , BOOST_PP_TUPLE_ELEM(4,1,split_args) \\\n            ) \\\n \\\n        ); \\\n    }\n\n// Helper for BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER below.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER_AUX( \\\n    name, split_args, skip_fwd_decl, const_, tag_namespace \\\n  ) \\\n    BOOST_PP_REPEAT_FROM_TO( \\\n        0 \\\n      , BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(4, 2, split_args)) \\\n      , BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION \\\n      , (name, split_args, const_, tag_namespace, skip_fwd_decl) \\\n    ) \\\n \\\n    BOOST_PARAMETER_FUNCTION_INITIAL_DISPATCH_FUNCTION(name, split_args, const_, tag_namespace) \\\n\\\n    template < \\\n        class ResultType \\\n      , class Args \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_TEMPLATE_ARG \\\n          , 0 \\\n          , split_args \\\n        ) \\\n    > \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_STATIC(name) \\\n    ResultType BOOST_PARAMETER_FUNCTION_DEFAULT_NAME(name)( \\\n        ResultType(*)() \\\n      , Args const& \\\n      , int \\\n        BOOST_PARAMETER_FUNCTION_DEFAULT_ARGUMENTS( \\\n            BOOST_PARAMETER_FUNCTION_DEFAULT_FUNCTION_ARG \\\n          , 0 \\\n          , split_args \\\n        ) \\\n    ) BOOST_PP_EXPR_IF(const_, const)\n\n// Generates a bunch of forwarding functions that each extract\n// one more argument.\n# define BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER(name, args, skip_fwd_decl, const_, tag_ns) \\\n    BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER_AUX( \\\n        name, BOOST_PARAMETER_FUNCTION_SPLIT_ARGS(args), skip_fwd_decl, const_, tag_ns \\\n    )\n/**/\n\n// Defines the result metafunction and the parameters specialization.\n# define BOOST_PARAMETER_FUNCTION_HEAD(result, name, tag_namespace, args)   \\\n      BOOST_PARAMETER_FUNCTION_RESULT_(result, name, args)                   \\\n                                                                            \\\n          BOOST_PARAMETER_FUNCTION_PARAMETERS(tag_namespace, name, args)    \\\n          BOOST_PARAMETER_FUNCTION_PARAMETERS_NAME(name);                   \\\n\n// Helper for BOOST_PARAMETER_FUNCTION below.\n# define BOOST_PARAMETER_FUNCTION_AUX(result, name, tag_namespace, args)    \\\n    BOOST_PARAMETER_FUNCTION_HEAD(result, name, tag_namespace, args)         \\\n    BOOST_PARAMETER_FUNCTION_IMPL_HEAD(name); \\\n\\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS(                                  \\\n        result, name, args, 0                                                \\\n      , BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args)                      \\\n    )                                                                        \\\n                                                                             \\\n    BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER(name, args, 0, 0, tag_namespace)\n\n// Defines a Boost.Parameter enabled function with the new syntax.\n# define BOOST_PARAMETER_FUNCTION(result, name, tag_namespace, args)    \\\n    BOOST_PARAMETER_FUNCTION_AUX(                                       \\\n        result, name, tag_namespace                                      \\\n      , BOOST_PARAMETER_FLATTEN(3, 2, 3, args)                           \\\n    )                                                                    \\\n/**/\n\n// Defines a Boost.Parameter enabled function.\n# define BOOST_PARAMETER_BASIC_FUNCTION_AUX(result, name, tag_namespace, args)    \\\n    BOOST_PARAMETER_FUNCTION_HEAD(result, name, tag_namespace, args)        \\\n                                                                            \\\n    BOOST_PARAMETER_FUNCTION_IMPL_FWD(name)                                 \\\n                                                                            \\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS(                                 \\\n        result, name, args, 0                                               \\\n      , BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args)                     \\\n    )                                                                       \\\n                                                                            \\\n    BOOST_PARAMETER_FUNCTION_IMPL_HEAD(name)\n\n# define BOOST_PARAMETER_BASIC_FUNCTION(result, name, tag_namespace, args)  \\\n    BOOST_PARAMETER_BASIC_FUNCTION_AUX(                                     \\\n        result, name, tag_namespace                                     \\\n      , BOOST_PARAMETER_FLATTEN(2, 2, 3, args)                          \\\n    )                                                                   \\\n/**/\n\n// Defines a Boost.Parameter enabled member function.\n# define BOOST_PARAMETER_BASIC_MEMBER_FUNCTION_AUX(result, name, tag_namespace, args, const_) \\\n    BOOST_PARAMETER_FUNCTION_HEAD(result, name, tag_namespace, args)                    \\\n                                                                                        \\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS(                                             \\\n        result, name, args, const_                                                      \\\n      , BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args)                                 \\\n    )                                                                                   \\\n                                                                                        \\\n    BOOST_PARAMETER_FUNCTION_IMPL_HEAD(name) BOOST_PP_EXPR_IF(const_, const)            \\\n/**/\n\n# define BOOST_PARAMETER_BASIC_MEMBER_FUNCTION(result, name, tag_namespace, args) \\\n    BOOST_PARAMETER_BASIC_MEMBER_FUNCTION_AUX( \\\n        result, name, tag_namespace \\\n      , BOOST_PARAMETER_FLATTEN(2, 2, 3, args) \\\n      , 0 \\\n    )\n/**/\n\n# define BOOST_PARAMETER_BASIC_CONST_MEMBER_FUNCTION(result, name, tag_namespace, args) \\\n    BOOST_PARAMETER_BASIC_MEMBER_FUNCTION_AUX( \\\n        result, name, tag_namespace \\\n      , BOOST_PARAMETER_FLATTEN(2, 2, 3, args) \\\n      , 1 \\\n    )\n/**/\n\n\n\n# define BOOST_PARAMETER_MEMBER_FUNCTION_AUX(result, name, tag_namespace, const_, args)    \\\n    BOOST_PARAMETER_FUNCTION_HEAD(result, name, tag_namespace, args)         \\\n\\\n    BOOST_PARAMETER_FUNCTION_FWD_FUNCTIONS(                                  \\\n        result, name, args, const_                                           \\\n      , BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args)                      \\\n    )                                                                        \\\n                                                                             \\\n    BOOST_PARAMETER_FUNCTION_DEFAULT_LAYER(name, args, 1, const_, tag_namespace)\n\n// Defines a Boost.Parameter enabled function with the new syntax.\n# define BOOST_PARAMETER_MEMBER_FUNCTION(result, name, tag_namespace, args)    \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_AUX(                                       \\\n        result, name, tag_namespace, 0                                     \\\n      , BOOST_PARAMETER_FLATTEN(3, 2, 3, args)                           \\\n    )                                                                    \\\n/**/\n\n# define BOOST_PARAMETER_CONST_MEMBER_FUNCTION(result, name, tag_namespace, args)    \\\n    BOOST_PARAMETER_MEMBER_FUNCTION_AUX(                                       \\\n        result, name, tag_namespace, 1                                     \\\n      , BOOST_PARAMETER_FLATTEN(3, 2, 3, args)                           \\\n    )                                                                    \\\n/**/\n\n// Defines a Boost.Parameter enabled constructor.\n\n# define BOOST_PARAMETER_FUNCTION_ARGUMENT(r, _, i, elem) \\\n    BOOST_PP_COMMA_IF(i) elem& BOOST_PP_CAT(a, i)\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR00(z, n, r, data, elem) \\\n    BOOST_PP_IF( \\\n        n \\\n      , BOOST_PARAMETER_FUNCTION_FWD_FUNCTION_TEMPLATE_Z, BOOST_PP_TUPLE_EAT(2) \\\n    )(z, n) \\\n    BOOST_PP_EXPR_IF(BOOST_PP_EQUAL(n,1), explicit) \\\n    BOOST_PP_TUPLE_ELEM(6,2,data)( \\\n        BOOST_PP_IF( \\\n            n \\\n          , BOOST_PP_SEQ_FOR_EACH_I_R \\\n          , BOOST_PP_TUPLE_EAT(4) \\\n        )( \\\n            r \\\n          , BOOST_PARAMETER_FUNCTION_ARGUMENT \\\n          , ~ \\\n          , elem \\\n        ) \\\n        BOOST_PP_IF(n, BOOST_PARAMETER_FUNCTION_FWD_MATCH_Z, BOOST_PP_TUPLE_EAT(4))( \\\n            z \\\n          , BOOST_PP_TUPLE_ELEM(6,3,data) \\\n          , BOOST_PP_CAT(constructor_parameters, __LINE__) \\\n          , n \\\n        ) \\\n    ) \\\n      : BOOST_PARAMETER_PARENTHESIZED_TYPE(BOOST_PP_TUPLE_ELEM(6,3,data)) ( \\\n            BOOST_PP_CAT(constructor_parameters, __LINE__)()( \\\n                BOOST_PP_ENUM_PARAMS_Z(z, n, a) \\\n            ) \\\n        ) \\\n    {}\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR0(r, data, elem) \\\n    BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR00( \\\n        BOOST_PP_TUPLE_ELEM(6,0,data) \\\n      , BOOST_PP_TUPLE_ELEM(6,1,data) \\\n      , r \\\n      , data \\\n      , elem \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_PRODUCT(r, product) \\\n    (product)\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR_ARITY_0(z, n, data) \\\n    BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR00( \\\n        z, n, BOOST_PP_DEDUCE_R() \\\n      , (z, n, BOOST_PP_TUPLE_REM(4) data) \\\n      , ~ \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR_ARITY_N(z, n, data) \\\n    BOOST_PP_SEQ_FOR_EACH( \\\n        BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR0 \\\n      , (z, n, BOOST_PP_TUPLE_REM(4) data) \\\n      , BOOST_PP_SEQ_FOR_EACH_PRODUCT( \\\n            BOOST_PARAMETER_FUNCTION_FWD_PRODUCT \\\n          , BOOST_PP_SEQ_FIRST_N( \\\n                n, BOOST_PP_TUPLE_ELEM(4,2,data) \\\n            ) \\\n        ) \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR(z, n, data) \\\n    BOOST_PP_IF( \\\n        n \\\n      , BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR_ARITY_N \\\n      , BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR_ARITY_0 \\\n    )(z,n,data) \\\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTORS0(class_,base,args,combinations,range) \\\n    BOOST_PP_REPEAT_FROM_TO( \\\n        BOOST_PP_TUPLE_ELEM(2,0,range), BOOST_PP_TUPLE_ELEM(2,1,range) \\\n      , BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTOR \\\n      , (class_,base,combinations,BOOST_PP_TUPLE_ELEM(2,1,range)) \\\n    )\n/**/\n\n# define BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTORS(class_,base,args,combinations) \\\n    BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTORS0( \\\n        class_, base, args, combinations, BOOST_PARAMETER_ARITY_RANGE(args) \\\n    )\n/**/\n\n# define BOOST_PARAMETER_CONSTRUCTOR_AUX(class_, base, tag_namespace, args) \\\n    BOOST_PARAMETER_FUNCTION_PARAMETERS(tag_namespace, ctor, args)          \\\n        BOOST_PP_CAT(constructor_parameters, __LINE__); \\\n\\\n    BOOST_PARAMETER_FUNCTION_FWD_CONSTRUCTORS( \\\n        class_, base, args \\\n      , BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args) \\\n    ) \\\n/**/\n\n# define BOOST_PARAMETER_CONSTRUCTOR(class_, base, tag_namespace, args) \\\n    BOOST_PARAMETER_CONSTRUCTOR_AUX( \\\n        class_, base, tag_namespace \\\n      , BOOST_PARAMETER_FLATTEN(2, 2, 3, args) \\\n    )\n/**/\n\n# ifndef BOOST_NO_FUNCTION_TEMPLATE_ORDERING\n#  define BOOST_PARAMETER_FUNCTION_FWD_COMBINATION(r, _, i, elem) \\\n    (BOOST_PP_IF( \\\n        BOOST_PARAMETER_FUNCTION_IS_KEYWORD_QUALIFIER( \\\n            BOOST_PARAMETER_FN_ARG_NAME(elem) \\\n        ) \\\n      , (const ParameterArgumentType ## i)(ParameterArgumentType ## i) \\\n      , (const ParameterArgumentType ## i) \\\n    ))\n// No partial ordering. This feature doesn't work.\n# else\n#  define BOOST_PARAMETER_FUNCTION_FWD_COMBINATION(r, _, i, elem) \\\n    (BOOST_PP_IF( \\\n        BOOST_PARAMETER_FUNCTION_IS_KEYWORD_QUALIFIER( \\\n            BOOST_PARAMETER_FN_ARG_NAME(elem) \\\n        ) \\\n      , (ParameterArgumentType ## i) \\\n      , (const ParameterArgumentType ## i) \\\n    ))\n# endif\n\n# define BOOST_PARAMETER_FUNCTION_FWD_COMBINATIONS(args) \\\n    BOOST_PP_SEQ_FOR_EACH_I(BOOST_PARAMETER_FUNCTION_FWD_COMBINATION, ~, args)\n\n#endif // BOOST_PARAMETER_PREPROCESSOR_060206_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter/value_type.hpp",
    "content": "// Copyright Daniel Wallin 2006. Use, modification and distribution is\n// subject to the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_PARAMETER_VALUE_TYPE_060921_HPP\n# define BOOST_PARAMETER_VALUE_TYPE_060921_HPP\n\n# include <boost/mpl/apply.hpp>\n# include <boost/mpl/assert.hpp>\n# include <boost/mpl/and.hpp>\n# include <boost/parameter/aux_/result_of0.hpp>\n# include <boost/parameter/aux_/void.hpp>\n# include <boost/type_traits/is_same.hpp>\n\nnamespace boost { namespace parameter { \n\n// A metafunction that, given an argument pack, returns the type of\n// the parameter identified by the given keyword.  If no such\n// parameter has been specified, returns Default\n\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\ntemplate <class Parameters, class Keyword, class Default>\nstruct value_type0\n{\n    typedef typename mpl::apply_wrap3<\n        typename Parameters::binding,Keyword,Default,mpl::false_\n    >::type type;\n\n    BOOST_MPL_ASSERT_NOT((\n        mpl::and_<\n            is_same<Default, void_>\n          , is_same<type, void_>\n        >\n    ));\n};\n# endif\n\ntemplate <class Parameters, class Keyword, class Default = void_>\nstruct value_type\n{\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))\n    typedef typename mpl::eval_if<\n        mpl::is_placeholder<Parameters>\n      , mpl::identity<int>\n      , value_type0<Parameters,Keyword,Default>\n    >::type type;\n# else\n    typedef typename mpl::apply_wrap3<\n        typename Parameters::binding,Keyword,Default,mpl::false_\n    >::type type;\n\n    BOOST_MPL_ASSERT_NOT((\n        mpl::and_<\n            is_same<Default, void_>\n          , is_same<type, void_>\n        >\n    ));\n# endif\n\n    BOOST_MPL_AUX_LAMBDA_SUPPORT(3,value_type,(Parameters,Keyword,Default))\n};\n\n// A metafunction that, given an argument pack, returns the type of\n// the parameter identified by the given keyword.  If no such\n// parameter has been specified, returns the type returned by invoking\n// DefaultFn\ntemplate <class Parameters, class Keyword, class DefaultFn>\nstruct lazy_value_type\n{\n  typedef typename mpl::apply_wrap3<\n      typename Parameters::binding\n    , Keyword\n    , typename aux::result_of0<DefaultFn>::type\n    , mpl::false_\n  >::type type;\n};\n\n\n}} // namespace boost::parameter\n\n#endif // BOOST_PARAMETER_VALUE_TYPE_060921_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/parameter.hpp",
    "content": "// Copyright David Abrahams, Daniel Wallin 2005. Use, modification and \n// distribution is subject to the Boost Software License, Version 1.0. \n// (See accompanying file LICENSE_1_0.txt or copy at \n// http://www.boost.org/LICENSE_1_0.txt)\n\n//  See www.boost.org/libs/parameter for documentation.\n\n#ifndef BOOST_PARAMETER_050401_HPP\n#define BOOST_PARAMETER_050401_HPP\n\n#include <boost/parameter/parameters.hpp>\n#include <boost/parameter/keyword.hpp>\n#include <boost/parameter/binding.hpp>\n#include <boost/parameter/value_type.hpp>\n#include <boost/parameter/macros.hpp>\n#include <boost/parameter/match.hpp>\n#include <boost/parameter/name.hpp>\n#include <boost/parameter/preprocessor.hpp>\n\n#endif // BOOST_PARAMETER_050401_HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/arithmetic/add.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARITHMETIC_ADD_HPP\n# define BOOST_PREPROCESSOR_ARITHMETIC_ADD_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/while.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_ADD */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ADD(x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE(BOOST_PP_ADD_P, BOOST_PP_ADD_O, (x, y)))\n# else\n#    define BOOST_PP_ADD(x, y) BOOST_PP_ADD_I(x, y)\n#    define BOOST_PP_ADD_I(x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE(BOOST_PP_ADD_P, BOOST_PP_ADD_O, (x, y)))\n# endif\n#\n# define BOOST_PP_ADD_P(d, xy) BOOST_PP_TUPLE_ELEM(2, 1, xy)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_ADD_O(d, xy) BOOST_PP_ADD_O_I xy\n# else\n#    define BOOST_PP_ADD_O(d, xy) BOOST_PP_ADD_O_I(BOOST_PP_TUPLE_ELEM(2, 0, xy), BOOST_PP_TUPLE_ELEM(2, 1, xy))\n# endif\n#\n# define BOOST_PP_ADD_O_I(x, y) (BOOST_PP_INC(x), BOOST_PP_DEC(y))\n#\n# /* BOOST_PP_ADD_D */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ADD_D(d, x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE_ ## d(BOOST_PP_ADD_P, BOOST_PP_ADD_O, (x, y)))\n# else\n#    define BOOST_PP_ADD_D(d, x, y) BOOST_PP_ADD_D_I(d, x, y)\n#    define BOOST_PP_ADD_D_I(d, x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE_ ## d(BOOST_PP_ADD_P, BOOST_PP_ADD_O, (x, y)))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/arithmetic/dec.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARITHMETIC_DEC_HPP\n# define BOOST_PREPROCESSOR_ARITHMETIC_DEC_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_DEC */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_DEC(x) BOOST_PP_DEC_I(x)\n# else\n#    define BOOST_PP_DEC(x) BOOST_PP_DEC_OO((x))\n#    define BOOST_PP_DEC_OO(par) BOOST_PP_DEC_I ## par\n# endif\n#\n# define BOOST_PP_DEC_I(x) BOOST_PP_DEC_ ## x\n#\n# define BOOST_PP_DEC_0 0\n# define BOOST_PP_DEC_1 0\n# define BOOST_PP_DEC_2 1\n# define BOOST_PP_DEC_3 2\n# define BOOST_PP_DEC_4 3\n# define BOOST_PP_DEC_5 4\n# define BOOST_PP_DEC_6 5\n# define BOOST_PP_DEC_7 6\n# define BOOST_PP_DEC_8 7\n# define BOOST_PP_DEC_9 8\n# define BOOST_PP_DEC_10 9\n# define BOOST_PP_DEC_11 10\n# define BOOST_PP_DEC_12 11\n# define BOOST_PP_DEC_13 12\n# define BOOST_PP_DEC_14 13\n# define BOOST_PP_DEC_15 14\n# define BOOST_PP_DEC_16 15\n# define BOOST_PP_DEC_17 16\n# define BOOST_PP_DEC_18 17\n# define BOOST_PP_DEC_19 18\n# define BOOST_PP_DEC_20 19\n# define BOOST_PP_DEC_21 20\n# define BOOST_PP_DEC_22 21\n# define BOOST_PP_DEC_23 22\n# define BOOST_PP_DEC_24 23\n# define BOOST_PP_DEC_25 24\n# define BOOST_PP_DEC_26 25\n# define BOOST_PP_DEC_27 26\n# define BOOST_PP_DEC_28 27\n# define BOOST_PP_DEC_29 28\n# define BOOST_PP_DEC_30 29\n# define BOOST_PP_DEC_31 30\n# define BOOST_PP_DEC_32 31\n# define BOOST_PP_DEC_33 32\n# define BOOST_PP_DEC_34 33\n# define BOOST_PP_DEC_35 34\n# define BOOST_PP_DEC_36 35\n# define BOOST_PP_DEC_37 36\n# define BOOST_PP_DEC_38 37\n# define BOOST_PP_DEC_39 38\n# define BOOST_PP_DEC_40 39\n# define BOOST_PP_DEC_41 40\n# define BOOST_PP_DEC_42 41\n# define BOOST_PP_DEC_43 42\n# define BOOST_PP_DEC_44 43\n# define BOOST_PP_DEC_45 44\n# define BOOST_PP_DEC_46 45\n# define BOOST_PP_DEC_47 46\n# define BOOST_PP_DEC_48 47\n# define BOOST_PP_DEC_49 48\n# define BOOST_PP_DEC_50 49\n# define BOOST_PP_DEC_51 50\n# define BOOST_PP_DEC_52 51\n# define BOOST_PP_DEC_53 52\n# define BOOST_PP_DEC_54 53\n# define BOOST_PP_DEC_55 54\n# define BOOST_PP_DEC_56 55\n# define BOOST_PP_DEC_57 56\n# define BOOST_PP_DEC_58 57\n# define BOOST_PP_DEC_59 58\n# define BOOST_PP_DEC_60 59\n# define BOOST_PP_DEC_61 60\n# define BOOST_PP_DEC_62 61\n# define BOOST_PP_DEC_63 62\n# define BOOST_PP_DEC_64 63\n# define BOOST_PP_DEC_65 64\n# define BOOST_PP_DEC_66 65\n# define BOOST_PP_DEC_67 66\n# define BOOST_PP_DEC_68 67\n# define BOOST_PP_DEC_69 68\n# define BOOST_PP_DEC_70 69\n# define BOOST_PP_DEC_71 70\n# define BOOST_PP_DEC_72 71\n# define BOOST_PP_DEC_73 72\n# define BOOST_PP_DEC_74 73\n# define BOOST_PP_DEC_75 74\n# define BOOST_PP_DEC_76 75\n# define BOOST_PP_DEC_77 76\n# define BOOST_PP_DEC_78 77\n# define BOOST_PP_DEC_79 78\n# define BOOST_PP_DEC_80 79\n# define BOOST_PP_DEC_81 80\n# define BOOST_PP_DEC_82 81\n# define BOOST_PP_DEC_83 82\n# define BOOST_PP_DEC_84 83\n# define BOOST_PP_DEC_85 84\n# define BOOST_PP_DEC_86 85\n# define BOOST_PP_DEC_87 86\n# define BOOST_PP_DEC_88 87\n# define BOOST_PP_DEC_89 88\n# define BOOST_PP_DEC_90 89\n# define BOOST_PP_DEC_91 90\n# define BOOST_PP_DEC_92 91\n# define BOOST_PP_DEC_93 92\n# define BOOST_PP_DEC_94 93\n# define BOOST_PP_DEC_95 94\n# define BOOST_PP_DEC_96 95\n# define BOOST_PP_DEC_97 96\n# define BOOST_PP_DEC_98 97\n# define BOOST_PP_DEC_99 98\n# define BOOST_PP_DEC_100 99\n# define BOOST_PP_DEC_101 100\n# define BOOST_PP_DEC_102 101\n# define BOOST_PP_DEC_103 102\n# define BOOST_PP_DEC_104 103\n# define BOOST_PP_DEC_105 104\n# define BOOST_PP_DEC_106 105\n# define BOOST_PP_DEC_107 106\n# define BOOST_PP_DEC_108 107\n# define BOOST_PP_DEC_109 108\n# define BOOST_PP_DEC_110 109\n# define BOOST_PP_DEC_111 110\n# define BOOST_PP_DEC_112 111\n# define BOOST_PP_DEC_113 112\n# define BOOST_PP_DEC_114 113\n# define BOOST_PP_DEC_115 114\n# define BOOST_PP_DEC_116 115\n# define BOOST_PP_DEC_117 116\n# define BOOST_PP_DEC_118 117\n# define BOOST_PP_DEC_119 118\n# define BOOST_PP_DEC_120 119\n# define BOOST_PP_DEC_121 120\n# define BOOST_PP_DEC_122 121\n# define BOOST_PP_DEC_123 122\n# define BOOST_PP_DEC_124 123\n# define BOOST_PP_DEC_125 124\n# define BOOST_PP_DEC_126 125\n# define BOOST_PP_DEC_127 126\n# define BOOST_PP_DEC_128 127\n# define BOOST_PP_DEC_129 128\n# define BOOST_PP_DEC_130 129\n# define BOOST_PP_DEC_131 130\n# define BOOST_PP_DEC_132 131\n# define BOOST_PP_DEC_133 132\n# define BOOST_PP_DEC_134 133\n# define BOOST_PP_DEC_135 134\n# define BOOST_PP_DEC_136 135\n# define BOOST_PP_DEC_137 136\n# define BOOST_PP_DEC_138 137\n# define BOOST_PP_DEC_139 138\n# define BOOST_PP_DEC_140 139\n# define BOOST_PP_DEC_141 140\n# define BOOST_PP_DEC_142 141\n# define BOOST_PP_DEC_143 142\n# define BOOST_PP_DEC_144 143\n# define BOOST_PP_DEC_145 144\n# define BOOST_PP_DEC_146 145\n# define BOOST_PP_DEC_147 146\n# define BOOST_PP_DEC_148 147\n# define BOOST_PP_DEC_149 148\n# define BOOST_PP_DEC_150 149\n# define BOOST_PP_DEC_151 150\n# define BOOST_PP_DEC_152 151\n# define BOOST_PP_DEC_153 152\n# define BOOST_PP_DEC_154 153\n# define BOOST_PP_DEC_155 154\n# define BOOST_PP_DEC_156 155\n# define BOOST_PP_DEC_157 156\n# define BOOST_PP_DEC_158 157\n# define BOOST_PP_DEC_159 158\n# define BOOST_PP_DEC_160 159\n# define BOOST_PP_DEC_161 160\n# define BOOST_PP_DEC_162 161\n# define BOOST_PP_DEC_163 162\n# define BOOST_PP_DEC_164 163\n# define BOOST_PP_DEC_165 164\n# define BOOST_PP_DEC_166 165\n# define BOOST_PP_DEC_167 166\n# define BOOST_PP_DEC_168 167\n# define BOOST_PP_DEC_169 168\n# define BOOST_PP_DEC_170 169\n# define BOOST_PP_DEC_171 170\n# define BOOST_PP_DEC_172 171\n# define BOOST_PP_DEC_173 172\n# define BOOST_PP_DEC_174 173\n# define BOOST_PP_DEC_175 174\n# define BOOST_PP_DEC_176 175\n# define BOOST_PP_DEC_177 176\n# define BOOST_PP_DEC_178 177\n# define BOOST_PP_DEC_179 178\n# define BOOST_PP_DEC_180 179\n# define BOOST_PP_DEC_181 180\n# define BOOST_PP_DEC_182 181\n# define BOOST_PP_DEC_183 182\n# define BOOST_PP_DEC_184 183\n# define BOOST_PP_DEC_185 184\n# define BOOST_PP_DEC_186 185\n# define BOOST_PP_DEC_187 186\n# define BOOST_PP_DEC_188 187\n# define BOOST_PP_DEC_189 188\n# define BOOST_PP_DEC_190 189\n# define BOOST_PP_DEC_191 190\n# define BOOST_PP_DEC_192 191\n# define BOOST_PP_DEC_193 192\n# define BOOST_PP_DEC_194 193\n# define BOOST_PP_DEC_195 194\n# define BOOST_PP_DEC_196 195\n# define BOOST_PP_DEC_197 196\n# define BOOST_PP_DEC_198 197\n# define BOOST_PP_DEC_199 198\n# define BOOST_PP_DEC_200 199\n# define BOOST_PP_DEC_201 200\n# define BOOST_PP_DEC_202 201\n# define BOOST_PP_DEC_203 202\n# define BOOST_PP_DEC_204 203\n# define BOOST_PP_DEC_205 204\n# define BOOST_PP_DEC_206 205\n# define BOOST_PP_DEC_207 206\n# define BOOST_PP_DEC_208 207\n# define BOOST_PP_DEC_209 208\n# define BOOST_PP_DEC_210 209\n# define BOOST_PP_DEC_211 210\n# define BOOST_PP_DEC_212 211\n# define BOOST_PP_DEC_213 212\n# define BOOST_PP_DEC_214 213\n# define BOOST_PP_DEC_215 214\n# define BOOST_PP_DEC_216 215\n# define BOOST_PP_DEC_217 216\n# define BOOST_PP_DEC_218 217\n# define BOOST_PP_DEC_219 218\n# define BOOST_PP_DEC_220 219\n# define BOOST_PP_DEC_221 220\n# define BOOST_PP_DEC_222 221\n# define BOOST_PP_DEC_223 222\n# define BOOST_PP_DEC_224 223\n# define BOOST_PP_DEC_225 224\n# define BOOST_PP_DEC_226 225\n# define BOOST_PP_DEC_227 226\n# define BOOST_PP_DEC_228 227\n# define BOOST_PP_DEC_229 228\n# define BOOST_PP_DEC_230 229\n# define BOOST_PP_DEC_231 230\n# define BOOST_PP_DEC_232 231\n# define BOOST_PP_DEC_233 232\n# define BOOST_PP_DEC_234 233\n# define BOOST_PP_DEC_235 234\n# define BOOST_PP_DEC_236 235\n# define BOOST_PP_DEC_237 236\n# define BOOST_PP_DEC_238 237\n# define BOOST_PP_DEC_239 238\n# define BOOST_PP_DEC_240 239\n# define BOOST_PP_DEC_241 240\n# define BOOST_PP_DEC_242 241\n# define BOOST_PP_DEC_243 242\n# define BOOST_PP_DEC_244 243\n# define BOOST_PP_DEC_245 244\n# define BOOST_PP_DEC_246 245\n# define BOOST_PP_DEC_247 246\n# define BOOST_PP_DEC_248 247\n# define BOOST_PP_DEC_249 248\n# define BOOST_PP_DEC_250 249\n# define BOOST_PP_DEC_251 250\n# define BOOST_PP_DEC_252 251\n# define BOOST_PP_DEC_253 252\n# define BOOST_PP_DEC_254 253\n# define BOOST_PP_DEC_255 254\n# define BOOST_PP_DEC_256 255\n# define BOOST_PP_DEC_257 256\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/arithmetic/inc.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARITHMETIC_INC_HPP\n# define BOOST_PREPROCESSOR_ARITHMETIC_INC_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_INC */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_INC(x) BOOST_PP_INC_I(x)\n# else\n#    define BOOST_PP_INC(x) BOOST_PP_INC_OO((x))\n#    define BOOST_PP_INC_OO(par) BOOST_PP_INC_I ## par\n# endif\n#\n# define BOOST_PP_INC_I(x) BOOST_PP_INC_ ## x\n#\n# define BOOST_PP_INC_0 1\n# define BOOST_PP_INC_1 2\n# define BOOST_PP_INC_2 3\n# define BOOST_PP_INC_3 4\n# define BOOST_PP_INC_4 5\n# define BOOST_PP_INC_5 6\n# define BOOST_PP_INC_6 7\n# define BOOST_PP_INC_7 8\n# define BOOST_PP_INC_8 9\n# define BOOST_PP_INC_9 10\n# define BOOST_PP_INC_10 11\n# define BOOST_PP_INC_11 12\n# define BOOST_PP_INC_12 13\n# define BOOST_PP_INC_13 14\n# define BOOST_PP_INC_14 15\n# define BOOST_PP_INC_15 16\n# define BOOST_PP_INC_16 17\n# define BOOST_PP_INC_17 18\n# define BOOST_PP_INC_18 19\n# define BOOST_PP_INC_19 20\n# define BOOST_PP_INC_20 21\n# define BOOST_PP_INC_21 22\n# define BOOST_PP_INC_22 23\n# define BOOST_PP_INC_23 24\n# define BOOST_PP_INC_24 25\n# define BOOST_PP_INC_25 26\n# define BOOST_PP_INC_26 27\n# define BOOST_PP_INC_27 28\n# define BOOST_PP_INC_28 29\n# define BOOST_PP_INC_29 30\n# define BOOST_PP_INC_30 31\n# define BOOST_PP_INC_31 32\n# define BOOST_PP_INC_32 33\n# define BOOST_PP_INC_33 34\n# define BOOST_PP_INC_34 35\n# define BOOST_PP_INC_35 36\n# define BOOST_PP_INC_36 37\n# define BOOST_PP_INC_37 38\n# define BOOST_PP_INC_38 39\n# define BOOST_PP_INC_39 40\n# define BOOST_PP_INC_40 41\n# define BOOST_PP_INC_41 42\n# define BOOST_PP_INC_42 43\n# define BOOST_PP_INC_43 44\n# define BOOST_PP_INC_44 45\n# define BOOST_PP_INC_45 46\n# define BOOST_PP_INC_46 47\n# define BOOST_PP_INC_47 48\n# define BOOST_PP_INC_48 49\n# define BOOST_PP_INC_49 50\n# define BOOST_PP_INC_50 51\n# define BOOST_PP_INC_51 52\n# define BOOST_PP_INC_52 53\n# define BOOST_PP_INC_53 54\n# define BOOST_PP_INC_54 55\n# define BOOST_PP_INC_55 56\n# define BOOST_PP_INC_56 57\n# define BOOST_PP_INC_57 58\n# define BOOST_PP_INC_58 59\n# define BOOST_PP_INC_59 60\n# define BOOST_PP_INC_60 61\n# define BOOST_PP_INC_61 62\n# define BOOST_PP_INC_62 63\n# define BOOST_PP_INC_63 64\n# define BOOST_PP_INC_64 65\n# define BOOST_PP_INC_65 66\n# define BOOST_PP_INC_66 67\n# define BOOST_PP_INC_67 68\n# define BOOST_PP_INC_68 69\n# define BOOST_PP_INC_69 70\n# define BOOST_PP_INC_70 71\n# define BOOST_PP_INC_71 72\n# define BOOST_PP_INC_72 73\n# define BOOST_PP_INC_73 74\n# define BOOST_PP_INC_74 75\n# define BOOST_PP_INC_75 76\n# define BOOST_PP_INC_76 77\n# define BOOST_PP_INC_77 78\n# define BOOST_PP_INC_78 79\n# define BOOST_PP_INC_79 80\n# define BOOST_PP_INC_80 81\n# define BOOST_PP_INC_81 82\n# define BOOST_PP_INC_82 83\n# define BOOST_PP_INC_83 84\n# define BOOST_PP_INC_84 85\n# define BOOST_PP_INC_85 86\n# define BOOST_PP_INC_86 87\n# define BOOST_PP_INC_87 88\n# define BOOST_PP_INC_88 89\n# define BOOST_PP_INC_89 90\n# define BOOST_PP_INC_90 91\n# define BOOST_PP_INC_91 92\n# define BOOST_PP_INC_92 93\n# define BOOST_PP_INC_93 94\n# define BOOST_PP_INC_94 95\n# define BOOST_PP_INC_95 96\n# define BOOST_PP_INC_96 97\n# define BOOST_PP_INC_97 98\n# define BOOST_PP_INC_98 99\n# define BOOST_PP_INC_99 100\n# define BOOST_PP_INC_100 101\n# define BOOST_PP_INC_101 102\n# define BOOST_PP_INC_102 103\n# define BOOST_PP_INC_103 104\n# define BOOST_PP_INC_104 105\n# define BOOST_PP_INC_105 106\n# define BOOST_PP_INC_106 107\n# define BOOST_PP_INC_107 108\n# define BOOST_PP_INC_108 109\n# define BOOST_PP_INC_109 110\n# define BOOST_PP_INC_110 111\n# define BOOST_PP_INC_111 112\n# define BOOST_PP_INC_112 113\n# define BOOST_PP_INC_113 114\n# define BOOST_PP_INC_114 115\n# define BOOST_PP_INC_115 116\n# define BOOST_PP_INC_116 117\n# define BOOST_PP_INC_117 118\n# define BOOST_PP_INC_118 119\n# define BOOST_PP_INC_119 120\n# define BOOST_PP_INC_120 121\n# define BOOST_PP_INC_121 122\n# define BOOST_PP_INC_122 123\n# define BOOST_PP_INC_123 124\n# define BOOST_PP_INC_124 125\n# define BOOST_PP_INC_125 126\n# define BOOST_PP_INC_126 127\n# define BOOST_PP_INC_127 128\n# define BOOST_PP_INC_128 129\n# define BOOST_PP_INC_129 130\n# define BOOST_PP_INC_130 131\n# define BOOST_PP_INC_131 132\n# define BOOST_PP_INC_132 133\n# define BOOST_PP_INC_133 134\n# define BOOST_PP_INC_134 135\n# define BOOST_PP_INC_135 136\n# define BOOST_PP_INC_136 137\n# define BOOST_PP_INC_137 138\n# define BOOST_PP_INC_138 139\n# define BOOST_PP_INC_139 140\n# define BOOST_PP_INC_140 141\n# define BOOST_PP_INC_141 142\n# define BOOST_PP_INC_142 143\n# define BOOST_PP_INC_143 144\n# define BOOST_PP_INC_144 145\n# define BOOST_PP_INC_145 146\n# define BOOST_PP_INC_146 147\n# define BOOST_PP_INC_147 148\n# define BOOST_PP_INC_148 149\n# define BOOST_PP_INC_149 150\n# define BOOST_PP_INC_150 151\n# define BOOST_PP_INC_151 152\n# define BOOST_PP_INC_152 153\n# define BOOST_PP_INC_153 154\n# define BOOST_PP_INC_154 155\n# define BOOST_PP_INC_155 156\n# define BOOST_PP_INC_156 157\n# define BOOST_PP_INC_157 158\n# define BOOST_PP_INC_158 159\n# define BOOST_PP_INC_159 160\n# define BOOST_PP_INC_160 161\n# define BOOST_PP_INC_161 162\n# define BOOST_PP_INC_162 163\n# define BOOST_PP_INC_163 164\n# define BOOST_PP_INC_164 165\n# define BOOST_PP_INC_165 166\n# define BOOST_PP_INC_166 167\n# define BOOST_PP_INC_167 168\n# define BOOST_PP_INC_168 169\n# define BOOST_PP_INC_169 170\n# define BOOST_PP_INC_170 171\n# define BOOST_PP_INC_171 172\n# define BOOST_PP_INC_172 173\n# define BOOST_PP_INC_173 174\n# define BOOST_PP_INC_174 175\n# define BOOST_PP_INC_175 176\n# define BOOST_PP_INC_176 177\n# define BOOST_PP_INC_177 178\n# define BOOST_PP_INC_178 179\n# define BOOST_PP_INC_179 180\n# define BOOST_PP_INC_180 181\n# define BOOST_PP_INC_181 182\n# define BOOST_PP_INC_182 183\n# define BOOST_PP_INC_183 184\n# define BOOST_PP_INC_184 185\n# define BOOST_PP_INC_185 186\n# define BOOST_PP_INC_186 187\n# define BOOST_PP_INC_187 188\n# define BOOST_PP_INC_188 189\n# define BOOST_PP_INC_189 190\n# define BOOST_PP_INC_190 191\n# define BOOST_PP_INC_191 192\n# define BOOST_PP_INC_192 193\n# define BOOST_PP_INC_193 194\n# define BOOST_PP_INC_194 195\n# define BOOST_PP_INC_195 196\n# define BOOST_PP_INC_196 197\n# define BOOST_PP_INC_197 198\n# define BOOST_PP_INC_198 199\n# define BOOST_PP_INC_199 200\n# define BOOST_PP_INC_200 201\n# define BOOST_PP_INC_201 202\n# define BOOST_PP_INC_202 203\n# define BOOST_PP_INC_203 204\n# define BOOST_PP_INC_204 205\n# define BOOST_PP_INC_205 206\n# define BOOST_PP_INC_206 207\n# define BOOST_PP_INC_207 208\n# define BOOST_PP_INC_208 209\n# define BOOST_PP_INC_209 210\n# define BOOST_PP_INC_210 211\n# define BOOST_PP_INC_211 212\n# define BOOST_PP_INC_212 213\n# define BOOST_PP_INC_213 214\n# define BOOST_PP_INC_214 215\n# define BOOST_PP_INC_215 216\n# define BOOST_PP_INC_216 217\n# define BOOST_PP_INC_217 218\n# define BOOST_PP_INC_218 219\n# define BOOST_PP_INC_219 220\n# define BOOST_PP_INC_220 221\n# define BOOST_PP_INC_221 222\n# define BOOST_PP_INC_222 223\n# define BOOST_PP_INC_223 224\n# define BOOST_PP_INC_224 225\n# define BOOST_PP_INC_225 226\n# define BOOST_PP_INC_226 227\n# define BOOST_PP_INC_227 228\n# define BOOST_PP_INC_228 229\n# define BOOST_PP_INC_229 230\n# define BOOST_PP_INC_230 231\n# define BOOST_PP_INC_231 232\n# define BOOST_PP_INC_232 233\n# define BOOST_PP_INC_233 234\n# define BOOST_PP_INC_234 235\n# define BOOST_PP_INC_235 236\n# define BOOST_PP_INC_236 237\n# define BOOST_PP_INC_237 238\n# define BOOST_PP_INC_238 239\n# define BOOST_PP_INC_239 240\n# define BOOST_PP_INC_240 241\n# define BOOST_PP_INC_241 242\n# define BOOST_PP_INC_242 243\n# define BOOST_PP_INC_243 244\n# define BOOST_PP_INC_244 245\n# define BOOST_PP_INC_245 246\n# define BOOST_PP_INC_246 247\n# define BOOST_PP_INC_247 248\n# define BOOST_PP_INC_248 249\n# define BOOST_PP_INC_249 250\n# define BOOST_PP_INC_250 251\n# define BOOST_PP_INC_251 252\n# define BOOST_PP_INC_252 253\n# define BOOST_PP_INC_253 254\n# define BOOST_PP_INC_254 255\n# define BOOST_PP_INC_255 256\n# define BOOST_PP_INC_256 256\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/arithmetic/sub.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARITHMETIC_SUB_HPP\n# define BOOST_PREPROCESSOR_ARITHMETIC_SUB_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/while.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_SUB */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SUB(x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE(BOOST_PP_SUB_P, BOOST_PP_SUB_O, (x, y)))\n# else\n#    define BOOST_PP_SUB(x, y) BOOST_PP_SUB_I(x, y)\n#    define BOOST_PP_SUB_I(x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE(BOOST_PP_SUB_P, BOOST_PP_SUB_O, (x, y)))\n# endif\n#\n# define BOOST_PP_SUB_P(d, xy) BOOST_PP_TUPLE_ELEM(2, 1, xy)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SUB_O(d, xy) BOOST_PP_SUB_O_I xy\n# else\n#    define BOOST_PP_SUB_O(d, xy) BOOST_PP_SUB_O_I(BOOST_PP_TUPLE_ELEM(2, 0, xy), BOOST_PP_TUPLE_ELEM(2, 1, xy))\n# endif\n#\n# define BOOST_PP_SUB_O_I(x, y) (BOOST_PP_DEC(x), BOOST_PP_DEC(y))\n#\n# /* BOOST_PP_SUB_D */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SUB_D(d, x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE_ ## d(BOOST_PP_SUB_P, BOOST_PP_SUB_O, (x, y)))\n# else\n#    define BOOST_PP_SUB_D(d, x, y) BOOST_PP_SUB_D_I(d, x, y)\n#    define BOOST_PP_SUB_D_I(d, x, y) BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_WHILE_ ## d(BOOST_PP_SUB_P, BOOST_PP_SUB_O, (x, y)))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/array/data.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARRAY_DATA_HPP\n# define BOOST_PREPROCESSOR_ARRAY_DATA_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_ARRAY_DATA */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ARRAY_DATA(array) BOOST_PP_TUPLE_ELEM(2, 1, array)\n# else\n#    define BOOST_PP_ARRAY_DATA(array) BOOST_PP_ARRAY_DATA_I(array)\n#    define BOOST_PP_ARRAY_DATA_I(array) BOOST_PP_ARRAY_DATA_II array\n#    define BOOST_PP_ARRAY_DATA_II(size, data) data\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/array/elem.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARRAY_ELEM_HPP\n# define BOOST_PREPROCESSOR_ARRAY_ELEM_HPP\n#\n# include <boost/preprocessor/array/data.hpp>\n# include <boost/preprocessor/array/size.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_ARRAY_ELEM */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ARRAY_ELEM(i, array) BOOST_PP_TUPLE_ELEM(BOOST_PP_ARRAY_SIZE(array), i, BOOST_PP_ARRAY_DATA(array))\n# else\n#    define BOOST_PP_ARRAY_ELEM(i, array) BOOST_PP_ARRAY_ELEM_I(i, array)\n#    define BOOST_PP_ARRAY_ELEM_I(i, array) BOOST_PP_TUPLE_ELEM(BOOST_PP_ARRAY_SIZE(array), i, BOOST_PP_ARRAY_DATA(array))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/array/size.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ARRAY_SIZE_HPP\n# define BOOST_PREPROCESSOR_ARRAY_SIZE_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_ARRAY_SIZE */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ARRAY_SIZE(array) BOOST_PP_TUPLE_ELEM(2, 0, array)\n# else\n#    define BOOST_PP_ARRAY_SIZE(array) BOOST_PP_ARRAY_SIZE_I(array)\n#    define BOOST_PP_ARRAY_SIZE_I(array) BOOST_PP_ARRAY_SIZE_II array\n#    define BOOST_PP_ARRAY_SIZE_II(size, data) size\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/cat.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CAT_HPP\n# define BOOST_PREPROCESSOR_CAT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_CAT */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_CAT(a, b) BOOST_PP_CAT_I(a, b)\n# else\n#    define BOOST_PP_CAT(a, b) BOOST_PP_CAT_OO((a, b))\n#    define BOOST_PP_CAT_OO(par) BOOST_PP_CAT_I ## par\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_CAT_I(a, b) a ## b\n# else\n#    define BOOST_PP_CAT_I(a, b) BOOST_PP_CAT_II(~, a ## b)\n#    define BOOST_PP_CAT_II(p, res) res\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/comma_if.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_COMMA_IF_HPP\n# define BOOST_PREPROCESSOR_COMMA_IF_HPP\n#\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/comparison/equal.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_COMPARISON_EQUAL_HPP\n# define BOOST_PREPROCESSOR_COMPARISON_EQUAL_HPP\n#\n# include <boost/preprocessor/comparison/not_equal.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/logical/compl.hpp>\n#\n# /* BOOST_PP_EQUAL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_EQUAL(x, y) BOOST_PP_COMPL(BOOST_PP_NOT_EQUAL(x, y))\n# else\n#    define BOOST_PP_EQUAL(x, y) BOOST_PP_EQUAL_I(x, y)\n#    define BOOST_PP_EQUAL_I(x, y) BOOST_PP_COMPL(BOOST_PP_NOT_EQUAL(x, y))\n# endif\n#\n# /* BOOST_PP_EQUAL_D */\n#\n# define BOOST_PP_EQUAL_D(d, x, y) BOOST_PP_EQUAL(x, y)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/comparison/less_equal.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_COMPARISON_LESS_EQUAL_HPP\n# define BOOST_PREPROCESSOR_COMPARISON_LESS_EQUAL_HPP\n#\n# include <boost/preprocessor/arithmetic/sub.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/logical/not.hpp>\n#\n# /* BOOST_PP_LESS_EQUAL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LESS_EQUAL(x, y) BOOST_PP_NOT(BOOST_PP_SUB(x, y))\n# else\n#    define BOOST_PP_LESS_EQUAL(x, y) BOOST_PP_LESS_EQUAL_I(x, y)\n#    define BOOST_PP_LESS_EQUAL_I(x, y) BOOST_PP_NOT(BOOST_PP_SUB(x, y))\n# endif\n#\n# /* BOOST_PP_LESS_EQUAL_D */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LESS_EQUAL_D(d, x, y) BOOST_PP_NOT(BOOST_PP_SUB_D(d, x, y))\n# else\n#    define BOOST_PP_LESS_EQUAL_D(d, x, y) BOOST_PP_LESS_EQUAL_D_I(d, x, y)\n#    define BOOST_PP_LESS_EQUAL_D_I(d, x, y) BOOST_PP_NOT(BOOST_PP_SUB_D(d, x, y))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/comparison/not_equal.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_COMPARISON_NOT_EQUAL_HPP\n# define BOOST_PREPROCESSOR_COMPARISON_NOT_EQUAL_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n#\n# /* BOOST_PP_NOT_EQUAL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_NOT_EQUAL(x, y) BOOST_PP_NOT_EQUAL_I(x, y)\n# else\n#    define BOOST_PP_NOT_EQUAL(x, y) BOOST_PP_NOT_EQUAL_OO((x, y))\n#    define BOOST_PP_NOT_EQUAL_OO(par) BOOST_PP_NOT_EQUAL_I ## par\n# endif\n#\n# define BOOST_PP_NOT_EQUAL_I(x, y) BOOST_PP_CAT(BOOST_PP_NOT_EQUAL_CHECK_, BOOST_PP_NOT_EQUAL_ ## x(0, BOOST_PP_NOT_EQUAL_ ## y))\n#\n# /* BOOST_PP_NOT_EQUAL_D */\n#\n# define BOOST_PP_NOT_EQUAL_D(d, x, y) BOOST_PP_NOT_EQUAL(x, y)\n#\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_0(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_1(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_2(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_3(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_4(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_5(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_6(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_7(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_8(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_9(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_10(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_11(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_12(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_13(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_14(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_15(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_16(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_17(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_18(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_19(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_20(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_21(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_22(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_23(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_24(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_25(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_26(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_27(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_28(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_29(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_30(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_31(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_32(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_33(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_34(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_35(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_36(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_37(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_38(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_39(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_40(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_41(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_42(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_43(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_44(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_45(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_46(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_47(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_48(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_49(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_50(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_51(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_52(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_53(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_54(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_55(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_56(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_57(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_58(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_59(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_60(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_61(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_62(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_63(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_64(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_65(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_66(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_67(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_68(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_69(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_70(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_71(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_72(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_73(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_74(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_75(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_76(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_77(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_78(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_79(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_80(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_81(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_82(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_83(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_84(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_85(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_86(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_87(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_88(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_89(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_90(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_91(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_92(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_93(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_94(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_95(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_96(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_97(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_98(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_99(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_100(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_101(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_102(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_103(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_104(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_105(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_106(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_107(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_108(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_109(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_110(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_111(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_112(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_113(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_114(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_115(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_116(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_117(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_118(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_119(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_120(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_121(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_122(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_123(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_124(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_125(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_126(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_127(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_128(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_129(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_130(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_131(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_132(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_133(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_134(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_135(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_136(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_137(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_138(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_139(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_140(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_141(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_142(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_143(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_144(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_145(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_146(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_147(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_148(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_149(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_150(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_151(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_152(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_153(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_154(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_155(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_156(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_157(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_158(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_159(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_160(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_161(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_162(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_163(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_164(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_165(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_166(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_167(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_168(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_169(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_170(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_171(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_172(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_173(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_174(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_175(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_176(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_177(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_178(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_179(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_180(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_181(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_182(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_183(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_184(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_185(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_186(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_187(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_188(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_189(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_190(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_191(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_192(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_193(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_194(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_195(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_196(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_197(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_198(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_199(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_200(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_201(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_202(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_203(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_204(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_205(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_206(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_207(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_208(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_209(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_210(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_211(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_212(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_213(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_214(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_215(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_216(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_217(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_218(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_219(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_220(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_221(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_222(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_223(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_224(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_225(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_226(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_227(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_228(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_229(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_230(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_231(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_232(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_233(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_234(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_235(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_236(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_237(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_238(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_239(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_240(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_241(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_242(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_243(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_244(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_245(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_246(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_247(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_248(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_249(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_250(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_251(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_252(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_253(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_254(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_255(c, y) 0\n# define BOOST_PP_NOT_EQUAL_CHECK_BOOST_PP_NOT_EQUAL_256(c, y) 0\n#\n#if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    define BOOST_PP_NOT_EQUAL_0(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_1(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_2(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_3(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_4(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_5(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_6(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_7(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_8(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_9(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_10(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_11(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_12(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_13(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_14(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_15(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_16(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_17(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_18(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_19(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_20(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_21(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_22(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_23(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_24(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_25(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_26(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_27(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_28(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_29(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_30(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_31(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_32(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_33(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_34(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_35(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_36(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_37(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_38(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_39(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_40(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_41(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_42(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_43(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_44(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_45(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_46(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_47(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_48(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_49(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_50(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_51(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_52(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_53(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_54(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_55(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_56(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_57(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_58(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_59(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_60(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_61(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_62(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_63(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_64(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_65(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_66(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_67(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_68(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_69(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_70(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_71(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_72(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_73(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_74(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_75(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_76(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_77(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_78(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_79(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_80(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_81(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_82(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_83(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_84(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_85(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_86(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_87(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_88(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_89(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_90(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_91(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_92(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_93(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_94(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_95(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_96(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_97(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_98(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_99(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_100(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_101(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_102(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_103(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_104(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_105(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_106(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_107(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_108(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_109(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_110(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_111(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_112(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_113(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_114(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_115(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_116(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_117(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_118(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_119(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_120(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_121(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_122(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_123(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_124(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_125(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_126(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_127(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_128(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_129(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_130(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_131(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_132(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_133(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_134(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_135(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_136(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_137(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_138(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_139(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_140(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_141(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_142(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_143(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_144(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_145(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_146(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_147(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_148(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_149(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_150(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_151(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_152(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_153(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_154(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_155(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_156(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_157(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_158(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_159(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_160(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_161(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_162(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_163(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_164(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_165(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_166(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_167(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_168(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_169(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_170(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_171(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_172(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_173(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_174(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_175(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_176(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_177(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_178(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_179(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_180(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_181(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_182(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_183(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_184(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_185(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_186(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_187(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_188(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_189(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_190(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_191(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_192(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_193(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_194(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_195(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_196(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_197(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_198(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_199(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_200(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_201(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_202(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_203(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_204(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_205(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_206(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_207(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_208(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_209(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_210(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_211(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_212(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_213(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_214(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_215(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_216(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_217(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_218(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_219(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_220(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_221(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_222(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_223(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_224(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_225(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_226(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_227(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_228(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_229(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_230(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_231(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_232(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_233(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_234(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_235(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_236(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_237(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_238(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_239(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_240(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_241(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_242(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_243(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_244(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_245(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_246(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_247(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_248(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_249(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_250(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_251(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_252(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_253(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_254(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_255(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_256(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y(1, BOOST_PP_NIL))\n# else\n#    define BOOST_PP_NOT_EQUAL_0(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_1(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_2(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_3(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_4(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_5(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_6(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_7(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_8(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_9(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_10(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_11(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_12(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_13(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_14(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_15(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_16(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_17(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_18(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_19(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_20(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_21(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_22(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_23(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_24(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_25(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_26(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_27(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_28(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_29(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_30(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_31(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_32(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_33(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_34(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_35(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_36(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_37(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_38(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_39(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_40(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_41(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_42(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_43(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_44(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_45(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_46(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_47(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_48(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_49(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_50(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_51(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_52(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_53(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_54(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_55(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_56(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_57(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_58(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_59(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_60(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_61(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_62(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_63(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_64(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_65(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_66(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_67(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_68(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_69(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_70(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_71(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_72(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_73(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_74(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_75(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_76(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_77(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_78(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_79(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_80(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_81(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_82(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_83(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_84(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_85(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_86(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_87(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_88(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_89(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_90(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_91(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_92(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_93(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_94(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_95(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_96(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_97(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_98(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_99(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_100(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_101(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_102(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_103(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_104(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_105(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_106(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_107(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_108(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_109(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_110(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_111(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_112(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_113(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_114(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_115(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_116(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_117(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_118(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_119(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_120(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_121(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_122(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_123(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_124(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_125(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_126(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_127(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_128(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_129(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_130(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_131(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_132(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_133(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_134(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_135(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_136(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_137(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_138(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_139(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_140(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_141(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_142(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_143(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_144(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_145(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_146(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_147(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_148(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_149(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_150(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_151(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_152(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_153(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_154(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_155(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_156(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_157(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_158(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_159(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_160(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_161(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_162(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_163(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_164(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_165(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_166(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_167(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_168(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_169(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_170(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_171(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_172(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_173(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_174(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_175(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_176(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_177(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_178(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_179(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_180(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_181(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_182(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_183(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_184(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_185(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_186(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_187(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_188(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_189(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_190(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_191(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_192(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_193(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_194(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_195(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_196(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_197(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_198(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_199(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_200(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_201(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_202(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_203(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_204(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_205(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_206(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_207(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_208(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_209(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_210(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_211(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_212(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_213(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_214(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_215(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_216(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_217(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_218(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_219(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_220(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_221(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_222(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_223(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_224(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_225(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_226(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_227(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_228(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_229(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_230(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_231(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_232(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_233(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_234(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_235(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_236(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_237(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_238(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_239(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_240(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_241(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_242(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_243(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_244(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_245(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_246(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_247(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_248(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_249(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_250(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_251(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_252(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_253(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_254(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_255(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n#    define BOOST_PP_NOT_EQUAL_256(c, y) BOOST_PP_IIF(c, BOOST_PP_NIL, y##(1, BOOST_PP_NIL))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/config/config.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002-2011.                             *\n#  *     (C) Copyright Edward Diener 2011.                                    *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONFIG_CONFIG_HPP\n# define BOOST_PREPROCESSOR_CONFIG_CONFIG_HPP\n#\n# /* BOOST_PP_CONFIG_FLAGS */\n#\n# define BOOST_PP_CONFIG_STRICT() 0x0001\n# define BOOST_PP_CONFIG_IDEAL() 0x0002\n#\n# define BOOST_PP_CONFIG_MSVC() 0x0004\n# define BOOST_PP_CONFIG_MWCC() 0x0008\n# define BOOST_PP_CONFIG_BCC() 0x0010\n# define BOOST_PP_CONFIG_EDG() 0x0020\n# define BOOST_PP_CONFIG_DMC() 0x0040\n#\n# ifndef BOOST_PP_CONFIG_FLAGS\n#    if defined(__GCCXML__)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_STRICT())\n#    elif defined(__WAVE__)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_STRICT())\n#    elif defined(__MWERKS__) && __MWERKS__ >= 0x3200\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_STRICT())\n#    elif defined(__EDG__) || defined(__EDG_VERSION__)\n#        if defined(_MSC_VER) && (defined(__INTELLISENSE__) || __EDG_VERSION__ >= 308)\n#            define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_MSVC())\n#        else\n#            define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_EDG() | BOOST_PP_CONFIG_STRICT())\n#        endif\n#    elif defined(__MWERKS__)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_MWCC())\n#    elif defined(__DMC__)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_DMC())\n#    elif defined(__BORLANDC__) && __BORLANDC__ >= 0x581\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_STRICT())\n#    elif defined(__BORLANDC__) || defined(__IBMC__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_BCC())\n#    elif defined(_MSC_VER) && !defined(__clang__)\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_MSVC())\n#    else\n#        define BOOST_PP_CONFIG_FLAGS() (BOOST_PP_CONFIG_STRICT())\n#    endif\n# endif\n#\n# /* BOOST_PP_CONFIG_EXTENDED_LINE_INFO */\n#\n# ifndef BOOST_PP_CONFIG_EXTENDED_LINE_INFO\n#    define BOOST_PP_CONFIG_EXTENDED_LINE_INFO 0\n# endif\n#\n# /* BOOST_PP_CONFIG_ERRORS */\n#\n# ifndef BOOST_PP_CONFIG_ERRORS\n#    ifdef NDEBUG\n#        define BOOST_PP_CONFIG_ERRORS 0\n#    else\n#        define BOOST_PP_CONFIG_ERRORS 1\n#    endif\n# endif\n#\n# /* BOOST_PP_VARIADICS */\n#\n# define BOOST_PP_VARIADICS_MSVC 0\n# if !defined BOOST_PP_VARIADICS\n#    /* variadic support explicitly disabled for all untested compilers */\n#    if defined __GCCXML__ || defined __CUDACC__ || defined __PATHSCALE__ || defined __DMC__ || defined __CODEGEARC__ || defined __BORLANDC__ || defined __MWERKS__ || ( defined __SUNPRO_CC && __SUNPRO_CC < 0x5130 ) || defined __HP_aCC && !defined __EDG__ || defined __MRC__ || defined __SC__ || defined __IBMCPP__ || defined __PGI\n#        define BOOST_PP_VARIADICS 0\n#    /* VC++ (C/C++) */\n#    elif defined _MSC_VER && _MSC_VER >= 1400 && (!defined __EDG__ || defined(__INTELLISENSE__)) && !defined __clang__\n#        define BOOST_PP_VARIADICS 1\n#        undef BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_VARIADICS_MSVC 1\n#    /* Wave (C/C++), GCC (C++) */\n#    elif defined __WAVE__ && __WAVE_HAS_VARIADICS__ || defined __GNUC__ && defined __GXX_EXPERIMENTAL_CXX0X__ && __GXX_EXPERIMENTAL_CXX0X__\n#        define BOOST_PP_VARIADICS 1\n#    /* EDG-based (C/C++), GCC (C), and unknown (C/C++) */\n#    elif !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L\n#        define BOOST_PP_VARIADICS 1\n#    else\n#        define BOOST_PP_VARIADICS 0\n#    endif\n# elif !BOOST_PP_VARIADICS + 1 < 2\n#    undef BOOST_PP_VARIADICS\n#    define BOOST_PP_VARIADICS 1\n#    if defined _MSC_VER && _MSC_VER >= 1400 && (defined(__INTELLISENSE__) || !(defined __EDG__ || defined __GCCXML__ || defined __CUDACC__ || defined __PATHSCALE__ || defined __clang__ || defined __DMC__ || defined __CODEGEARC__ || defined __BORLANDC__ || defined __MWERKS__ || defined __SUNPRO_CC || defined __HP_aCC || defined __MRC__ || defined __SC__ || defined __IBMCPP__ || defined __PGI))\n#        undef BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_VARIADICS_MSVC 1\n#    endif\n# else\n#    undef BOOST_PP_VARIADICS\n#    define BOOST_PP_VARIADICS 0\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/detail/dmc/while.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_DETAIL_WHILE_HPP\n# define BOOST_PREPROCESSOR_CONTROL_DETAIL_WHILE_HPP\n#\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_WHILE_1(p, o, s) BOOST_PP_WHILE_1_C(BOOST_PP_BOOL(p##(2, s)), p, o, s)\n# define BOOST_PP_WHILE_2(p, o, s) BOOST_PP_WHILE_2_C(BOOST_PP_BOOL(p##(3, s)), p, o, s)\n# define BOOST_PP_WHILE_3(p, o, s) BOOST_PP_WHILE_3_C(BOOST_PP_BOOL(p##(4, s)), p, o, s)\n# define BOOST_PP_WHILE_4(p, o, s) BOOST_PP_WHILE_4_C(BOOST_PP_BOOL(p##(5, s)), p, o, s)\n# define BOOST_PP_WHILE_5(p, o, s) BOOST_PP_WHILE_5_C(BOOST_PP_BOOL(p##(6, s)), p, o, s)\n# define BOOST_PP_WHILE_6(p, o, s) BOOST_PP_WHILE_6_C(BOOST_PP_BOOL(p##(7, s)), p, o, s)\n# define BOOST_PP_WHILE_7(p, o, s) BOOST_PP_WHILE_7_C(BOOST_PP_BOOL(p##(8, s)), p, o, s)\n# define BOOST_PP_WHILE_8(p, o, s) BOOST_PP_WHILE_8_C(BOOST_PP_BOOL(p##(9, s)), p, o, s)\n# define BOOST_PP_WHILE_9(p, o, s) BOOST_PP_WHILE_9_C(BOOST_PP_BOOL(p##(10, s)), p, o, s)\n# define BOOST_PP_WHILE_10(p, o, s) BOOST_PP_WHILE_10_C(BOOST_PP_BOOL(p##(11, s)), p, o, s)\n# define BOOST_PP_WHILE_11(p, o, s) BOOST_PP_WHILE_11_C(BOOST_PP_BOOL(p##(12, s)), p, o, s)\n# define BOOST_PP_WHILE_12(p, o, s) BOOST_PP_WHILE_12_C(BOOST_PP_BOOL(p##(13, s)), p, o, s)\n# define BOOST_PP_WHILE_13(p, o, s) BOOST_PP_WHILE_13_C(BOOST_PP_BOOL(p##(14, s)), p, o, s)\n# define BOOST_PP_WHILE_14(p, o, s) BOOST_PP_WHILE_14_C(BOOST_PP_BOOL(p##(15, s)), p, o, s)\n# define BOOST_PP_WHILE_15(p, o, s) BOOST_PP_WHILE_15_C(BOOST_PP_BOOL(p##(16, s)), p, o, s)\n# define BOOST_PP_WHILE_16(p, o, s) BOOST_PP_WHILE_16_C(BOOST_PP_BOOL(p##(17, s)), p, o, s)\n# define BOOST_PP_WHILE_17(p, o, s) BOOST_PP_WHILE_17_C(BOOST_PP_BOOL(p##(18, s)), p, o, s)\n# define BOOST_PP_WHILE_18(p, o, s) BOOST_PP_WHILE_18_C(BOOST_PP_BOOL(p##(19, s)), p, o, s)\n# define BOOST_PP_WHILE_19(p, o, s) BOOST_PP_WHILE_19_C(BOOST_PP_BOOL(p##(20, s)), p, o, s)\n# define BOOST_PP_WHILE_20(p, o, s) BOOST_PP_WHILE_20_C(BOOST_PP_BOOL(p##(21, s)), p, o, s)\n# define BOOST_PP_WHILE_21(p, o, s) BOOST_PP_WHILE_21_C(BOOST_PP_BOOL(p##(22, s)), p, o, s)\n# define BOOST_PP_WHILE_22(p, o, s) BOOST_PP_WHILE_22_C(BOOST_PP_BOOL(p##(23, s)), p, o, s)\n# define BOOST_PP_WHILE_23(p, o, s) BOOST_PP_WHILE_23_C(BOOST_PP_BOOL(p##(24, s)), p, o, s)\n# define BOOST_PP_WHILE_24(p, o, s) BOOST_PP_WHILE_24_C(BOOST_PP_BOOL(p##(25, s)), p, o, s)\n# define BOOST_PP_WHILE_25(p, o, s) BOOST_PP_WHILE_25_C(BOOST_PP_BOOL(p##(26, s)), p, o, s)\n# define BOOST_PP_WHILE_26(p, o, s) BOOST_PP_WHILE_26_C(BOOST_PP_BOOL(p##(27, s)), p, o, s)\n# define BOOST_PP_WHILE_27(p, o, s) BOOST_PP_WHILE_27_C(BOOST_PP_BOOL(p##(28, s)), p, o, s)\n# define BOOST_PP_WHILE_28(p, o, s) BOOST_PP_WHILE_28_C(BOOST_PP_BOOL(p##(29, s)), p, o, s)\n# define BOOST_PP_WHILE_29(p, o, s) BOOST_PP_WHILE_29_C(BOOST_PP_BOOL(p##(30, s)), p, o, s)\n# define BOOST_PP_WHILE_30(p, o, s) BOOST_PP_WHILE_30_C(BOOST_PP_BOOL(p##(31, s)), p, o, s)\n# define BOOST_PP_WHILE_31(p, o, s) BOOST_PP_WHILE_31_C(BOOST_PP_BOOL(p##(32, s)), p, o, s)\n# define BOOST_PP_WHILE_32(p, o, s) BOOST_PP_WHILE_32_C(BOOST_PP_BOOL(p##(33, s)), p, o, s)\n# define BOOST_PP_WHILE_33(p, o, s) BOOST_PP_WHILE_33_C(BOOST_PP_BOOL(p##(34, s)), p, o, s)\n# define BOOST_PP_WHILE_34(p, o, s) BOOST_PP_WHILE_34_C(BOOST_PP_BOOL(p##(35, s)), p, o, s)\n# define BOOST_PP_WHILE_35(p, o, s) BOOST_PP_WHILE_35_C(BOOST_PP_BOOL(p##(36, s)), p, o, s)\n# define BOOST_PP_WHILE_36(p, o, s) BOOST_PP_WHILE_36_C(BOOST_PP_BOOL(p##(37, s)), p, o, s)\n# define BOOST_PP_WHILE_37(p, o, s) BOOST_PP_WHILE_37_C(BOOST_PP_BOOL(p##(38, s)), p, o, s)\n# define BOOST_PP_WHILE_38(p, o, s) BOOST_PP_WHILE_38_C(BOOST_PP_BOOL(p##(39, s)), p, o, s)\n# define BOOST_PP_WHILE_39(p, o, s) BOOST_PP_WHILE_39_C(BOOST_PP_BOOL(p##(40, s)), p, o, s)\n# define BOOST_PP_WHILE_40(p, o, s) BOOST_PP_WHILE_40_C(BOOST_PP_BOOL(p##(41, s)), p, o, s)\n# define BOOST_PP_WHILE_41(p, o, s) BOOST_PP_WHILE_41_C(BOOST_PP_BOOL(p##(42, s)), p, o, s)\n# define BOOST_PP_WHILE_42(p, o, s) BOOST_PP_WHILE_42_C(BOOST_PP_BOOL(p##(43, s)), p, o, s)\n# define BOOST_PP_WHILE_43(p, o, s) BOOST_PP_WHILE_43_C(BOOST_PP_BOOL(p##(44, s)), p, o, s)\n# define BOOST_PP_WHILE_44(p, o, s) BOOST_PP_WHILE_44_C(BOOST_PP_BOOL(p##(45, s)), p, o, s)\n# define BOOST_PP_WHILE_45(p, o, s) BOOST_PP_WHILE_45_C(BOOST_PP_BOOL(p##(46, s)), p, o, s)\n# define BOOST_PP_WHILE_46(p, o, s) BOOST_PP_WHILE_46_C(BOOST_PP_BOOL(p##(47, s)), p, o, s)\n# define BOOST_PP_WHILE_47(p, o, s) BOOST_PP_WHILE_47_C(BOOST_PP_BOOL(p##(48, s)), p, o, s)\n# define BOOST_PP_WHILE_48(p, o, s) BOOST_PP_WHILE_48_C(BOOST_PP_BOOL(p##(49, s)), p, o, s)\n# define BOOST_PP_WHILE_49(p, o, s) BOOST_PP_WHILE_49_C(BOOST_PP_BOOL(p##(50, s)), p, o, s)\n# define BOOST_PP_WHILE_50(p, o, s) BOOST_PP_WHILE_50_C(BOOST_PP_BOOL(p##(51, s)), p, o, s)\n# define BOOST_PP_WHILE_51(p, o, s) BOOST_PP_WHILE_51_C(BOOST_PP_BOOL(p##(52, s)), p, o, s)\n# define BOOST_PP_WHILE_52(p, o, s) BOOST_PP_WHILE_52_C(BOOST_PP_BOOL(p##(53, s)), p, o, s)\n# define BOOST_PP_WHILE_53(p, o, s) BOOST_PP_WHILE_53_C(BOOST_PP_BOOL(p##(54, s)), p, o, s)\n# define BOOST_PP_WHILE_54(p, o, s) BOOST_PP_WHILE_54_C(BOOST_PP_BOOL(p##(55, s)), p, o, s)\n# define BOOST_PP_WHILE_55(p, o, s) BOOST_PP_WHILE_55_C(BOOST_PP_BOOL(p##(56, s)), p, o, s)\n# define BOOST_PP_WHILE_56(p, o, s) BOOST_PP_WHILE_56_C(BOOST_PP_BOOL(p##(57, s)), p, o, s)\n# define BOOST_PP_WHILE_57(p, o, s) BOOST_PP_WHILE_57_C(BOOST_PP_BOOL(p##(58, s)), p, o, s)\n# define BOOST_PP_WHILE_58(p, o, s) BOOST_PP_WHILE_58_C(BOOST_PP_BOOL(p##(59, s)), p, o, s)\n# define BOOST_PP_WHILE_59(p, o, s) BOOST_PP_WHILE_59_C(BOOST_PP_BOOL(p##(60, s)), p, o, s)\n# define BOOST_PP_WHILE_60(p, o, s) BOOST_PP_WHILE_60_C(BOOST_PP_BOOL(p##(61, s)), p, o, s)\n# define BOOST_PP_WHILE_61(p, o, s) BOOST_PP_WHILE_61_C(BOOST_PP_BOOL(p##(62, s)), p, o, s)\n# define BOOST_PP_WHILE_62(p, o, s) BOOST_PP_WHILE_62_C(BOOST_PP_BOOL(p##(63, s)), p, o, s)\n# define BOOST_PP_WHILE_63(p, o, s) BOOST_PP_WHILE_63_C(BOOST_PP_BOOL(p##(64, s)), p, o, s)\n# define BOOST_PP_WHILE_64(p, o, s) BOOST_PP_WHILE_64_C(BOOST_PP_BOOL(p##(65, s)), p, o, s)\n# define BOOST_PP_WHILE_65(p, o, s) BOOST_PP_WHILE_65_C(BOOST_PP_BOOL(p##(66, s)), p, o, s)\n# define BOOST_PP_WHILE_66(p, o, s) BOOST_PP_WHILE_66_C(BOOST_PP_BOOL(p##(67, s)), p, o, s)\n# define BOOST_PP_WHILE_67(p, o, s) BOOST_PP_WHILE_67_C(BOOST_PP_BOOL(p##(68, s)), p, o, s)\n# define BOOST_PP_WHILE_68(p, o, s) BOOST_PP_WHILE_68_C(BOOST_PP_BOOL(p##(69, s)), p, o, s)\n# define BOOST_PP_WHILE_69(p, o, s) BOOST_PP_WHILE_69_C(BOOST_PP_BOOL(p##(70, s)), p, o, s)\n# define BOOST_PP_WHILE_70(p, o, s) BOOST_PP_WHILE_70_C(BOOST_PP_BOOL(p##(71, s)), p, o, s)\n# define BOOST_PP_WHILE_71(p, o, s) BOOST_PP_WHILE_71_C(BOOST_PP_BOOL(p##(72, s)), p, o, s)\n# define BOOST_PP_WHILE_72(p, o, s) BOOST_PP_WHILE_72_C(BOOST_PP_BOOL(p##(73, s)), p, o, s)\n# define BOOST_PP_WHILE_73(p, o, s) BOOST_PP_WHILE_73_C(BOOST_PP_BOOL(p##(74, s)), p, o, s)\n# define BOOST_PP_WHILE_74(p, o, s) BOOST_PP_WHILE_74_C(BOOST_PP_BOOL(p##(75, s)), p, o, s)\n# define BOOST_PP_WHILE_75(p, o, s) BOOST_PP_WHILE_75_C(BOOST_PP_BOOL(p##(76, s)), p, o, s)\n# define BOOST_PP_WHILE_76(p, o, s) BOOST_PP_WHILE_76_C(BOOST_PP_BOOL(p##(77, s)), p, o, s)\n# define BOOST_PP_WHILE_77(p, o, s) BOOST_PP_WHILE_77_C(BOOST_PP_BOOL(p##(78, s)), p, o, s)\n# define BOOST_PP_WHILE_78(p, o, s) BOOST_PP_WHILE_78_C(BOOST_PP_BOOL(p##(79, s)), p, o, s)\n# define BOOST_PP_WHILE_79(p, o, s) BOOST_PP_WHILE_79_C(BOOST_PP_BOOL(p##(80, s)), p, o, s)\n# define BOOST_PP_WHILE_80(p, o, s) BOOST_PP_WHILE_80_C(BOOST_PP_BOOL(p##(81, s)), p, o, s)\n# define BOOST_PP_WHILE_81(p, o, s) BOOST_PP_WHILE_81_C(BOOST_PP_BOOL(p##(82, s)), p, o, s)\n# define BOOST_PP_WHILE_82(p, o, s) BOOST_PP_WHILE_82_C(BOOST_PP_BOOL(p##(83, s)), p, o, s)\n# define BOOST_PP_WHILE_83(p, o, s) BOOST_PP_WHILE_83_C(BOOST_PP_BOOL(p##(84, s)), p, o, s)\n# define BOOST_PP_WHILE_84(p, o, s) BOOST_PP_WHILE_84_C(BOOST_PP_BOOL(p##(85, s)), p, o, s)\n# define BOOST_PP_WHILE_85(p, o, s) BOOST_PP_WHILE_85_C(BOOST_PP_BOOL(p##(86, s)), p, o, s)\n# define BOOST_PP_WHILE_86(p, o, s) BOOST_PP_WHILE_86_C(BOOST_PP_BOOL(p##(87, s)), p, o, s)\n# define BOOST_PP_WHILE_87(p, o, s) BOOST_PP_WHILE_87_C(BOOST_PP_BOOL(p##(88, s)), p, o, s)\n# define BOOST_PP_WHILE_88(p, o, s) BOOST_PP_WHILE_88_C(BOOST_PP_BOOL(p##(89, s)), p, o, s)\n# define BOOST_PP_WHILE_89(p, o, s) BOOST_PP_WHILE_89_C(BOOST_PP_BOOL(p##(90, s)), p, o, s)\n# define BOOST_PP_WHILE_90(p, o, s) BOOST_PP_WHILE_90_C(BOOST_PP_BOOL(p##(91, s)), p, o, s)\n# define BOOST_PP_WHILE_91(p, o, s) BOOST_PP_WHILE_91_C(BOOST_PP_BOOL(p##(92, s)), p, o, s)\n# define BOOST_PP_WHILE_92(p, o, s) BOOST_PP_WHILE_92_C(BOOST_PP_BOOL(p##(93, s)), p, o, s)\n# define BOOST_PP_WHILE_93(p, o, s) BOOST_PP_WHILE_93_C(BOOST_PP_BOOL(p##(94, s)), p, o, s)\n# define BOOST_PP_WHILE_94(p, o, s) BOOST_PP_WHILE_94_C(BOOST_PP_BOOL(p##(95, s)), p, o, s)\n# define BOOST_PP_WHILE_95(p, o, s) BOOST_PP_WHILE_95_C(BOOST_PP_BOOL(p##(96, s)), p, o, s)\n# define BOOST_PP_WHILE_96(p, o, s) BOOST_PP_WHILE_96_C(BOOST_PP_BOOL(p##(97, s)), p, o, s)\n# define BOOST_PP_WHILE_97(p, o, s) BOOST_PP_WHILE_97_C(BOOST_PP_BOOL(p##(98, s)), p, o, s)\n# define BOOST_PP_WHILE_98(p, o, s) BOOST_PP_WHILE_98_C(BOOST_PP_BOOL(p##(99, s)), p, o, s)\n# define BOOST_PP_WHILE_99(p, o, s) BOOST_PP_WHILE_99_C(BOOST_PP_BOOL(p##(100, s)), p, o, s)\n# define BOOST_PP_WHILE_100(p, o, s) BOOST_PP_WHILE_100_C(BOOST_PP_BOOL(p##(101, s)), p, o, s)\n# define BOOST_PP_WHILE_101(p, o, s) BOOST_PP_WHILE_101_C(BOOST_PP_BOOL(p##(102, s)), p, o, s)\n# define BOOST_PP_WHILE_102(p, o, s) BOOST_PP_WHILE_102_C(BOOST_PP_BOOL(p##(103, s)), p, o, s)\n# define BOOST_PP_WHILE_103(p, o, s) BOOST_PP_WHILE_103_C(BOOST_PP_BOOL(p##(104, s)), p, o, s)\n# define BOOST_PP_WHILE_104(p, o, s) BOOST_PP_WHILE_104_C(BOOST_PP_BOOL(p##(105, s)), p, o, s)\n# define BOOST_PP_WHILE_105(p, o, s) BOOST_PP_WHILE_105_C(BOOST_PP_BOOL(p##(106, s)), p, o, s)\n# define BOOST_PP_WHILE_106(p, o, s) BOOST_PP_WHILE_106_C(BOOST_PP_BOOL(p##(107, s)), p, o, s)\n# define BOOST_PP_WHILE_107(p, o, s) BOOST_PP_WHILE_107_C(BOOST_PP_BOOL(p##(108, s)), p, o, s)\n# define BOOST_PP_WHILE_108(p, o, s) BOOST_PP_WHILE_108_C(BOOST_PP_BOOL(p##(109, s)), p, o, s)\n# define BOOST_PP_WHILE_109(p, o, s) BOOST_PP_WHILE_109_C(BOOST_PP_BOOL(p##(110, s)), p, o, s)\n# define BOOST_PP_WHILE_110(p, o, s) BOOST_PP_WHILE_110_C(BOOST_PP_BOOL(p##(111, s)), p, o, s)\n# define BOOST_PP_WHILE_111(p, o, s) BOOST_PP_WHILE_111_C(BOOST_PP_BOOL(p##(112, s)), p, o, s)\n# define BOOST_PP_WHILE_112(p, o, s) BOOST_PP_WHILE_112_C(BOOST_PP_BOOL(p##(113, s)), p, o, s)\n# define BOOST_PP_WHILE_113(p, o, s) BOOST_PP_WHILE_113_C(BOOST_PP_BOOL(p##(114, s)), p, o, s)\n# define BOOST_PP_WHILE_114(p, o, s) BOOST_PP_WHILE_114_C(BOOST_PP_BOOL(p##(115, s)), p, o, s)\n# define BOOST_PP_WHILE_115(p, o, s) BOOST_PP_WHILE_115_C(BOOST_PP_BOOL(p##(116, s)), p, o, s)\n# define BOOST_PP_WHILE_116(p, o, s) BOOST_PP_WHILE_116_C(BOOST_PP_BOOL(p##(117, s)), p, o, s)\n# define BOOST_PP_WHILE_117(p, o, s) BOOST_PP_WHILE_117_C(BOOST_PP_BOOL(p##(118, s)), p, o, s)\n# define BOOST_PP_WHILE_118(p, o, s) BOOST_PP_WHILE_118_C(BOOST_PP_BOOL(p##(119, s)), p, o, s)\n# define BOOST_PP_WHILE_119(p, o, s) BOOST_PP_WHILE_119_C(BOOST_PP_BOOL(p##(120, s)), p, o, s)\n# define BOOST_PP_WHILE_120(p, o, s) BOOST_PP_WHILE_120_C(BOOST_PP_BOOL(p##(121, s)), p, o, s)\n# define BOOST_PP_WHILE_121(p, o, s) BOOST_PP_WHILE_121_C(BOOST_PP_BOOL(p##(122, s)), p, o, s)\n# define BOOST_PP_WHILE_122(p, o, s) BOOST_PP_WHILE_122_C(BOOST_PP_BOOL(p##(123, s)), p, o, s)\n# define BOOST_PP_WHILE_123(p, o, s) BOOST_PP_WHILE_123_C(BOOST_PP_BOOL(p##(124, s)), p, o, s)\n# define BOOST_PP_WHILE_124(p, o, s) BOOST_PP_WHILE_124_C(BOOST_PP_BOOL(p##(125, s)), p, o, s)\n# define BOOST_PP_WHILE_125(p, o, s) BOOST_PP_WHILE_125_C(BOOST_PP_BOOL(p##(126, s)), p, o, s)\n# define BOOST_PP_WHILE_126(p, o, s) BOOST_PP_WHILE_126_C(BOOST_PP_BOOL(p##(127, s)), p, o, s)\n# define BOOST_PP_WHILE_127(p, o, s) BOOST_PP_WHILE_127_C(BOOST_PP_BOOL(p##(128, s)), p, o, s)\n# define BOOST_PP_WHILE_128(p, o, s) BOOST_PP_WHILE_128_C(BOOST_PP_BOOL(p##(129, s)), p, o, s)\n# define BOOST_PP_WHILE_129(p, o, s) BOOST_PP_WHILE_129_C(BOOST_PP_BOOL(p##(130, s)), p, o, s)\n# define BOOST_PP_WHILE_130(p, o, s) BOOST_PP_WHILE_130_C(BOOST_PP_BOOL(p##(131, s)), p, o, s)\n# define BOOST_PP_WHILE_131(p, o, s) BOOST_PP_WHILE_131_C(BOOST_PP_BOOL(p##(132, s)), p, o, s)\n# define BOOST_PP_WHILE_132(p, o, s) BOOST_PP_WHILE_132_C(BOOST_PP_BOOL(p##(133, s)), p, o, s)\n# define BOOST_PP_WHILE_133(p, o, s) BOOST_PP_WHILE_133_C(BOOST_PP_BOOL(p##(134, s)), p, o, s)\n# define BOOST_PP_WHILE_134(p, o, s) BOOST_PP_WHILE_134_C(BOOST_PP_BOOL(p##(135, s)), p, o, s)\n# define BOOST_PP_WHILE_135(p, o, s) BOOST_PP_WHILE_135_C(BOOST_PP_BOOL(p##(136, s)), p, o, s)\n# define BOOST_PP_WHILE_136(p, o, s) BOOST_PP_WHILE_136_C(BOOST_PP_BOOL(p##(137, s)), p, o, s)\n# define BOOST_PP_WHILE_137(p, o, s) BOOST_PP_WHILE_137_C(BOOST_PP_BOOL(p##(138, s)), p, o, s)\n# define BOOST_PP_WHILE_138(p, o, s) BOOST_PP_WHILE_138_C(BOOST_PP_BOOL(p##(139, s)), p, o, s)\n# define BOOST_PP_WHILE_139(p, o, s) BOOST_PP_WHILE_139_C(BOOST_PP_BOOL(p##(140, s)), p, o, s)\n# define BOOST_PP_WHILE_140(p, o, s) BOOST_PP_WHILE_140_C(BOOST_PP_BOOL(p##(141, s)), p, o, s)\n# define BOOST_PP_WHILE_141(p, o, s) BOOST_PP_WHILE_141_C(BOOST_PP_BOOL(p##(142, s)), p, o, s)\n# define BOOST_PP_WHILE_142(p, o, s) BOOST_PP_WHILE_142_C(BOOST_PP_BOOL(p##(143, s)), p, o, s)\n# define BOOST_PP_WHILE_143(p, o, s) BOOST_PP_WHILE_143_C(BOOST_PP_BOOL(p##(144, s)), p, o, s)\n# define BOOST_PP_WHILE_144(p, o, s) BOOST_PP_WHILE_144_C(BOOST_PP_BOOL(p##(145, s)), p, o, s)\n# define BOOST_PP_WHILE_145(p, o, s) BOOST_PP_WHILE_145_C(BOOST_PP_BOOL(p##(146, s)), p, o, s)\n# define BOOST_PP_WHILE_146(p, o, s) BOOST_PP_WHILE_146_C(BOOST_PP_BOOL(p##(147, s)), p, o, s)\n# define BOOST_PP_WHILE_147(p, o, s) BOOST_PP_WHILE_147_C(BOOST_PP_BOOL(p##(148, s)), p, o, s)\n# define BOOST_PP_WHILE_148(p, o, s) BOOST_PP_WHILE_148_C(BOOST_PP_BOOL(p##(149, s)), p, o, s)\n# define BOOST_PP_WHILE_149(p, o, s) BOOST_PP_WHILE_149_C(BOOST_PP_BOOL(p##(150, s)), p, o, s)\n# define BOOST_PP_WHILE_150(p, o, s) BOOST_PP_WHILE_150_C(BOOST_PP_BOOL(p##(151, s)), p, o, s)\n# define BOOST_PP_WHILE_151(p, o, s) BOOST_PP_WHILE_151_C(BOOST_PP_BOOL(p##(152, s)), p, o, s)\n# define BOOST_PP_WHILE_152(p, o, s) BOOST_PP_WHILE_152_C(BOOST_PP_BOOL(p##(153, s)), p, o, s)\n# define BOOST_PP_WHILE_153(p, o, s) BOOST_PP_WHILE_153_C(BOOST_PP_BOOL(p##(154, s)), p, o, s)\n# define BOOST_PP_WHILE_154(p, o, s) BOOST_PP_WHILE_154_C(BOOST_PP_BOOL(p##(155, s)), p, o, s)\n# define BOOST_PP_WHILE_155(p, o, s) BOOST_PP_WHILE_155_C(BOOST_PP_BOOL(p##(156, s)), p, o, s)\n# define BOOST_PP_WHILE_156(p, o, s) BOOST_PP_WHILE_156_C(BOOST_PP_BOOL(p##(157, s)), p, o, s)\n# define BOOST_PP_WHILE_157(p, o, s) BOOST_PP_WHILE_157_C(BOOST_PP_BOOL(p##(158, s)), p, o, s)\n# define BOOST_PP_WHILE_158(p, o, s) BOOST_PP_WHILE_158_C(BOOST_PP_BOOL(p##(159, s)), p, o, s)\n# define BOOST_PP_WHILE_159(p, o, s) BOOST_PP_WHILE_159_C(BOOST_PP_BOOL(p##(160, s)), p, o, s)\n# define BOOST_PP_WHILE_160(p, o, s) BOOST_PP_WHILE_160_C(BOOST_PP_BOOL(p##(161, s)), p, o, s)\n# define BOOST_PP_WHILE_161(p, o, s) BOOST_PP_WHILE_161_C(BOOST_PP_BOOL(p##(162, s)), p, o, s)\n# define BOOST_PP_WHILE_162(p, o, s) BOOST_PP_WHILE_162_C(BOOST_PP_BOOL(p##(163, s)), p, o, s)\n# define BOOST_PP_WHILE_163(p, o, s) BOOST_PP_WHILE_163_C(BOOST_PP_BOOL(p##(164, s)), p, o, s)\n# define BOOST_PP_WHILE_164(p, o, s) BOOST_PP_WHILE_164_C(BOOST_PP_BOOL(p##(165, s)), p, o, s)\n# define BOOST_PP_WHILE_165(p, o, s) BOOST_PP_WHILE_165_C(BOOST_PP_BOOL(p##(166, s)), p, o, s)\n# define BOOST_PP_WHILE_166(p, o, s) BOOST_PP_WHILE_166_C(BOOST_PP_BOOL(p##(167, s)), p, o, s)\n# define BOOST_PP_WHILE_167(p, o, s) BOOST_PP_WHILE_167_C(BOOST_PP_BOOL(p##(168, s)), p, o, s)\n# define BOOST_PP_WHILE_168(p, o, s) BOOST_PP_WHILE_168_C(BOOST_PP_BOOL(p##(169, s)), p, o, s)\n# define BOOST_PP_WHILE_169(p, o, s) BOOST_PP_WHILE_169_C(BOOST_PP_BOOL(p##(170, s)), p, o, s)\n# define BOOST_PP_WHILE_170(p, o, s) BOOST_PP_WHILE_170_C(BOOST_PP_BOOL(p##(171, s)), p, o, s)\n# define BOOST_PP_WHILE_171(p, o, s) BOOST_PP_WHILE_171_C(BOOST_PP_BOOL(p##(172, s)), p, o, s)\n# define BOOST_PP_WHILE_172(p, o, s) BOOST_PP_WHILE_172_C(BOOST_PP_BOOL(p##(173, s)), p, o, s)\n# define BOOST_PP_WHILE_173(p, o, s) BOOST_PP_WHILE_173_C(BOOST_PP_BOOL(p##(174, s)), p, o, s)\n# define BOOST_PP_WHILE_174(p, o, s) BOOST_PP_WHILE_174_C(BOOST_PP_BOOL(p##(175, s)), p, o, s)\n# define BOOST_PP_WHILE_175(p, o, s) BOOST_PP_WHILE_175_C(BOOST_PP_BOOL(p##(176, s)), p, o, s)\n# define BOOST_PP_WHILE_176(p, o, s) BOOST_PP_WHILE_176_C(BOOST_PP_BOOL(p##(177, s)), p, o, s)\n# define BOOST_PP_WHILE_177(p, o, s) BOOST_PP_WHILE_177_C(BOOST_PP_BOOL(p##(178, s)), p, o, s)\n# define BOOST_PP_WHILE_178(p, o, s) BOOST_PP_WHILE_178_C(BOOST_PP_BOOL(p##(179, s)), p, o, s)\n# define BOOST_PP_WHILE_179(p, o, s) BOOST_PP_WHILE_179_C(BOOST_PP_BOOL(p##(180, s)), p, o, s)\n# define BOOST_PP_WHILE_180(p, o, s) BOOST_PP_WHILE_180_C(BOOST_PP_BOOL(p##(181, s)), p, o, s)\n# define BOOST_PP_WHILE_181(p, o, s) BOOST_PP_WHILE_181_C(BOOST_PP_BOOL(p##(182, s)), p, o, s)\n# define BOOST_PP_WHILE_182(p, o, s) BOOST_PP_WHILE_182_C(BOOST_PP_BOOL(p##(183, s)), p, o, s)\n# define BOOST_PP_WHILE_183(p, o, s) BOOST_PP_WHILE_183_C(BOOST_PP_BOOL(p##(184, s)), p, o, s)\n# define BOOST_PP_WHILE_184(p, o, s) BOOST_PP_WHILE_184_C(BOOST_PP_BOOL(p##(185, s)), p, o, s)\n# define BOOST_PP_WHILE_185(p, o, s) BOOST_PP_WHILE_185_C(BOOST_PP_BOOL(p##(186, s)), p, o, s)\n# define BOOST_PP_WHILE_186(p, o, s) BOOST_PP_WHILE_186_C(BOOST_PP_BOOL(p##(187, s)), p, o, s)\n# define BOOST_PP_WHILE_187(p, o, s) BOOST_PP_WHILE_187_C(BOOST_PP_BOOL(p##(188, s)), p, o, s)\n# define BOOST_PP_WHILE_188(p, o, s) BOOST_PP_WHILE_188_C(BOOST_PP_BOOL(p##(189, s)), p, o, s)\n# define BOOST_PP_WHILE_189(p, o, s) BOOST_PP_WHILE_189_C(BOOST_PP_BOOL(p##(190, s)), p, o, s)\n# define BOOST_PP_WHILE_190(p, o, s) BOOST_PP_WHILE_190_C(BOOST_PP_BOOL(p##(191, s)), p, o, s)\n# define BOOST_PP_WHILE_191(p, o, s) BOOST_PP_WHILE_191_C(BOOST_PP_BOOL(p##(192, s)), p, o, s)\n# define BOOST_PP_WHILE_192(p, o, s) BOOST_PP_WHILE_192_C(BOOST_PP_BOOL(p##(193, s)), p, o, s)\n# define BOOST_PP_WHILE_193(p, o, s) BOOST_PP_WHILE_193_C(BOOST_PP_BOOL(p##(194, s)), p, o, s)\n# define BOOST_PP_WHILE_194(p, o, s) BOOST_PP_WHILE_194_C(BOOST_PP_BOOL(p##(195, s)), p, o, s)\n# define BOOST_PP_WHILE_195(p, o, s) BOOST_PP_WHILE_195_C(BOOST_PP_BOOL(p##(196, s)), p, o, s)\n# define BOOST_PP_WHILE_196(p, o, s) BOOST_PP_WHILE_196_C(BOOST_PP_BOOL(p##(197, s)), p, o, s)\n# define BOOST_PP_WHILE_197(p, o, s) BOOST_PP_WHILE_197_C(BOOST_PP_BOOL(p##(198, s)), p, o, s)\n# define BOOST_PP_WHILE_198(p, o, s) BOOST_PP_WHILE_198_C(BOOST_PP_BOOL(p##(199, s)), p, o, s)\n# define BOOST_PP_WHILE_199(p, o, s) BOOST_PP_WHILE_199_C(BOOST_PP_BOOL(p##(200, s)), p, o, s)\n# define BOOST_PP_WHILE_200(p, o, s) BOOST_PP_WHILE_200_C(BOOST_PP_BOOL(p##(201, s)), p, o, s)\n# define BOOST_PP_WHILE_201(p, o, s) BOOST_PP_WHILE_201_C(BOOST_PP_BOOL(p##(202, s)), p, o, s)\n# define BOOST_PP_WHILE_202(p, o, s) BOOST_PP_WHILE_202_C(BOOST_PP_BOOL(p##(203, s)), p, o, s)\n# define BOOST_PP_WHILE_203(p, o, s) BOOST_PP_WHILE_203_C(BOOST_PP_BOOL(p##(204, s)), p, o, s)\n# define BOOST_PP_WHILE_204(p, o, s) BOOST_PP_WHILE_204_C(BOOST_PP_BOOL(p##(205, s)), p, o, s)\n# define BOOST_PP_WHILE_205(p, o, s) BOOST_PP_WHILE_205_C(BOOST_PP_BOOL(p##(206, s)), p, o, s)\n# define BOOST_PP_WHILE_206(p, o, s) BOOST_PP_WHILE_206_C(BOOST_PP_BOOL(p##(207, s)), p, o, s)\n# define BOOST_PP_WHILE_207(p, o, s) BOOST_PP_WHILE_207_C(BOOST_PP_BOOL(p##(208, s)), p, o, s)\n# define BOOST_PP_WHILE_208(p, o, s) BOOST_PP_WHILE_208_C(BOOST_PP_BOOL(p##(209, s)), p, o, s)\n# define BOOST_PP_WHILE_209(p, o, s) BOOST_PP_WHILE_209_C(BOOST_PP_BOOL(p##(210, s)), p, o, s)\n# define BOOST_PP_WHILE_210(p, o, s) BOOST_PP_WHILE_210_C(BOOST_PP_BOOL(p##(211, s)), p, o, s)\n# define BOOST_PP_WHILE_211(p, o, s) BOOST_PP_WHILE_211_C(BOOST_PP_BOOL(p##(212, s)), p, o, s)\n# define BOOST_PP_WHILE_212(p, o, s) BOOST_PP_WHILE_212_C(BOOST_PP_BOOL(p##(213, s)), p, o, s)\n# define BOOST_PP_WHILE_213(p, o, s) BOOST_PP_WHILE_213_C(BOOST_PP_BOOL(p##(214, s)), p, o, s)\n# define BOOST_PP_WHILE_214(p, o, s) BOOST_PP_WHILE_214_C(BOOST_PP_BOOL(p##(215, s)), p, o, s)\n# define BOOST_PP_WHILE_215(p, o, s) BOOST_PP_WHILE_215_C(BOOST_PP_BOOL(p##(216, s)), p, o, s)\n# define BOOST_PP_WHILE_216(p, o, s) BOOST_PP_WHILE_216_C(BOOST_PP_BOOL(p##(217, s)), p, o, s)\n# define BOOST_PP_WHILE_217(p, o, s) BOOST_PP_WHILE_217_C(BOOST_PP_BOOL(p##(218, s)), p, o, s)\n# define BOOST_PP_WHILE_218(p, o, s) BOOST_PP_WHILE_218_C(BOOST_PP_BOOL(p##(219, s)), p, o, s)\n# define BOOST_PP_WHILE_219(p, o, s) BOOST_PP_WHILE_219_C(BOOST_PP_BOOL(p##(220, s)), p, o, s)\n# define BOOST_PP_WHILE_220(p, o, s) BOOST_PP_WHILE_220_C(BOOST_PP_BOOL(p##(221, s)), p, o, s)\n# define BOOST_PP_WHILE_221(p, o, s) BOOST_PP_WHILE_221_C(BOOST_PP_BOOL(p##(222, s)), p, o, s)\n# define BOOST_PP_WHILE_222(p, o, s) BOOST_PP_WHILE_222_C(BOOST_PP_BOOL(p##(223, s)), p, o, s)\n# define BOOST_PP_WHILE_223(p, o, s) BOOST_PP_WHILE_223_C(BOOST_PP_BOOL(p##(224, s)), p, o, s)\n# define BOOST_PP_WHILE_224(p, o, s) BOOST_PP_WHILE_224_C(BOOST_PP_BOOL(p##(225, s)), p, o, s)\n# define BOOST_PP_WHILE_225(p, o, s) BOOST_PP_WHILE_225_C(BOOST_PP_BOOL(p##(226, s)), p, o, s)\n# define BOOST_PP_WHILE_226(p, o, s) BOOST_PP_WHILE_226_C(BOOST_PP_BOOL(p##(227, s)), p, o, s)\n# define BOOST_PP_WHILE_227(p, o, s) BOOST_PP_WHILE_227_C(BOOST_PP_BOOL(p##(228, s)), p, o, s)\n# define BOOST_PP_WHILE_228(p, o, s) BOOST_PP_WHILE_228_C(BOOST_PP_BOOL(p##(229, s)), p, o, s)\n# define BOOST_PP_WHILE_229(p, o, s) BOOST_PP_WHILE_229_C(BOOST_PP_BOOL(p##(230, s)), p, o, s)\n# define BOOST_PP_WHILE_230(p, o, s) BOOST_PP_WHILE_230_C(BOOST_PP_BOOL(p##(231, s)), p, o, s)\n# define BOOST_PP_WHILE_231(p, o, s) BOOST_PP_WHILE_231_C(BOOST_PP_BOOL(p##(232, s)), p, o, s)\n# define BOOST_PP_WHILE_232(p, o, s) BOOST_PP_WHILE_232_C(BOOST_PP_BOOL(p##(233, s)), p, o, s)\n# define BOOST_PP_WHILE_233(p, o, s) BOOST_PP_WHILE_233_C(BOOST_PP_BOOL(p##(234, s)), p, o, s)\n# define BOOST_PP_WHILE_234(p, o, s) BOOST_PP_WHILE_234_C(BOOST_PP_BOOL(p##(235, s)), p, o, s)\n# define BOOST_PP_WHILE_235(p, o, s) BOOST_PP_WHILE_235_C(BOOST_PP_BOOL(p##(236, s)), p, o, s)\n# define BOOST_PP_WHILE_236(p, o, s) BOOST_PP_WHILE_236_C(BOOST_PP_BOOL(p##(237, s)), p, o, s)\n# define BOOST_PP_WHILE_237(p, o, s) BOOST_PP_WHILE_237_C(BOOST_PP_BOOL(p##(238, s)), p, o, s)\n# define BOOST_PP_WHILE_238(p, o, s) BOOST_PP_WHILE_238_C(BOOST_PP_BOOL(p##(239, s)), p, o, s)\n# define BOOST_PP_WHILE_239(p, o, s) BOOST_PP_WHILE_239_C(BOOST_PP_BOOL(p##(240, s)), p, o, s)\n# define BOOST_PP_WHILE_240(p, o, s) BOOST_PP_WHILE_240_C(BOOST_PP_BOOL(p##(241, s)), p, o, s)\n# define BOOST_PP_WHILE_241(p, o, s) BOOST_PP_WHILE_241_C(BOOST_PP_BOOL(p##(242, s)), p, o, s)\n# define BOOST_PP_WHILE_242(p, o, s) BOOST_PP_WHILE_242_C(BOOST_PP_BOOL(p##(243, s)), p, o, s)\n# define BOOST_PP_WHILE_243(p, o, s) BOOST_PP_WHILE_243_C(BOOST_PP_BOOL(p##(244, s)), p, o, s)\n# define BOOST_PP_WHILE_244(p, o, s) BOOST_PP_WHILE_244_C(BOOST_PP_BOOL(p##(245, s)), p, o, s)\n# define BOOST_PP_WHILE_245(p, o, s) BOOST_PP_WHILE_245_C(BOOST_PP_BOOL(p##(246, s)), p, o, s)\n# define BOOST_PP_WHILE_246(p, o, s) BOOST_PP_WHILE_246_C(BOOST_PP_BOOL(p##(247, s)), p, o, s)\n# define BOOST_PP_WHILE_247(p, o, s) BOOST_PP_WHILE_247_C(BOOST_PP_BOOL(p##(248, s)), p, o, s)\n# define BOOST_PP_WHILE_248(p, o, s) BOOST_PP_WHILE_248_C(BOOST_PP_BOOL(p##(249, s)), p, o, s)\n# define BOOST_PP_WHILE_249(p, o, s) BOOST_PP_WHILE_249_C(BOOST_PP_BOOL(p##(250, s)), p, o, s)\n# define BOOST_PP_WHILE_250(p, o, s) BOOST_PP_WHILE_250_C(BOOST_PP_BOOL(p##(251, s)), p, o, s)\n# define BOOST_PP_WHILE_251(p, o, s) BOOST_PP_WHILE_251_C(BOOST_PP_BOOL(p##(252, s)), p, o, s)\n# define BOOST_PP_WHILE_252(p, o, s) BOOST_PP_WHILE_252_C(BOOST_PP_BOOL(p##(253, s)), p, o, s)\n# define BOOST_PP_WHILE_253(p, o, s) BOOST_PP_WHILE_253_C(BOOST_PP_BOOL(p##(254, s)), p, o, s)\n# define BOOST_PP_WHILE_254(p, o, s) BOOST_PP_WHILE_254_C(BOOST_PP_BOOL(p##(255, s)), p, o, s)\n# define BOOST_PP_WHILE_255(p, o, s) BOOST_PP_WHILE_255_C(BOOST_PP_BOOL(p##(256, s)), p, o, s)\n# define BOOST_PP_WHILE_256(p, o, s) BOOST_PP_WHILE_256_C(BOOST_PP_BOOL(p##(257, s)), p, o, s)\n#\n# define BOOST_PP_WHILE_1_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_2, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(2, s))\n# define BOOST_PP_WHILE_2_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_3, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(3, s))\n# define BOOST_PP_WHILE_3_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_4, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(4, s))\n# define BOOST_PP_WHILE_4_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_5, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(5, s))\n# define BOOST_PP_WHILE_5_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_6, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(6, s))\n# define BOOST_PP_WHILE_6_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_7, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(7, s))\n# define BOOST_PP_WHILE_7_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_8, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(8, s))\n# define BOOST_PP_WHILE_8_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_9, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(9, s))\n# define BOOST_PP_WHILE_9_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_10, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(10, s))\n# define BOOST_PP_WHILE_10_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_11, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(11, s))\n# define BOOST_PP_WHILE_11_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_12, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(12, s))\n# define BOOST_PP_WHILE_12_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_13, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(13, s))\n# define BOOST_PP_WHILE_13_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_14, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(14, s))\n# define BOOST_PP_WHILE_14_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_15, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(15, s))\n# define BOOST_PP_WHILE_15_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_16, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(16, s))\n# define BOOST_PP_WHILE_16_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_17, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(17, s))\n# define BOOST_PP_WHILE_17_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_18, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(18, s))\n# define BOOST_PP_WHILE_18_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_19, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(19, s))\n# define BOOST_PP_WHILE_19_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_20, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(20, s))\n# define BOOST_PP_WHILE_20_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_21, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(21, s))\n# define BOOST_PP_WHILE_21_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_22, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(22, s))\n# define BOOST_PP_WHILE_22_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_23, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(23, s))\n# define BOOST_PP_WHILE_23_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_24, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(24, s))\n# define BOOST_PP_WHILE_24_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_25, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(25, s))\n# define BOOST_PP_WHILE_25_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_26, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(26, s))\n# define BOOST_PP_WHILE_26_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_27, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(27, s))\n# define BOOST_PP_WHILE_27_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_28, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(28, s))\n# define BOOST_PP_WHILE_28_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_29, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(29, s))\n# define BOOST_PP_WHILE_29_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_30, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(30, s))\n# define BOOST_PP_WHILE_30_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_31, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(31, s))\n# define BOOST_PP_WHILE_31_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_32, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(32, s))\n# define BOOST_PP_WHILE_32_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_33, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(33, s))\n# define BOOST_PP_WHILE_33_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_34, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(34, s))\n# define BOOST_PP_WHILE_34_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_35, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(35, s))\n# define BOOST_PP_WHILE_35_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_36, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(36, s))\n# define BOOST_PP_WHILE_36_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_37, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(37, s))\n# define BOOST_PP_WHILE_37_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_38, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(38, s))\n# define BOOST_PP_WHILE_38_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_39, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(39, s))\n# define BOOST_PP_WHILE_39_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_40, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(40, s))\n# define BOOST_PP_WHILE_40_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_41, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(41, s))\n# define BOOST_PP_WHILE_41_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_42, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(42, s))\n# define BOOST_PP_WHILE_42_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_43, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(43, s))\n# define BOOST_PP_WHILE_43_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_44, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(44, s))\n# define BOOST_PP_WHILE_44_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_45, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(45, s))\n# define BOOST_PP_WHILE_45_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_46, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(46, s))\n# define BOOST_PP_WHILE_46_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_47, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(47, s))\n# define BOOST_PP_WHILE_47_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_48, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(48, s))\n# define BOOST_PP_WHILE_48_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_49, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(49, s))\n# define BOOST_PP_WHILE_49_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_50, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(50, s))\n# define BOOST_PP_WHILE_50_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_51, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(51, s))\n# define BOOST_PP_WHILE_51_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_52, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(52, s))\n# define BOOST_PP_WHILE_52_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_53, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(53, s))\n# define BOOST_PP_WHILE_53_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_54, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(54, s))\n# define BOOST_PP_WHILE_54_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_55, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(55, s))\n# define BOOST_PP_WHILE_55_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_56, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(56, s))\n# define BOOST_PP_WHILE_56_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_57, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(57, s))\n# define BOOST_PP_WHILE_57_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_58, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(58, s))\n# define BOOST_PP_WHILE_58_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_59, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(59, s))\n# define BOOST_PP_WHILE_59_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_60, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(60, s))\n# define BOOST_PP_WHILE_60_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_61, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(61, s))\n# define BOOST_PP_WHILE_61_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_62, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(62, s))\n# define BOOST_PP_WHILE_62_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_63, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(63, s))\n# define BOOST_PP_WHILE_63_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_64, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(64, s))\n# define BOOST_PP_WHILE_64_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_65, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(65, s))\n# define BOOST_PP_WHILE_65_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_66, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(66, s))\n# define BOOST_PP_WHILE_66_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_67, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(67, s))\n# define BOOST_PP_WHILE_67_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_68, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(68, s))\n# define BOOST_PP_WHILE_68_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_69, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(69, s))\n# define BOOST_PP_WHILE_69_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_70, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(70, s))\n# define BOOST_PP_WHILE_70_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_71, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(71, s))\n# define BOOST_PP_WHILE_71_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_72, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(72, s))\n# define BOOST_PP_WHILE_72_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_73, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(73, s))\n# define BOOST_PP_WHILE_73_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_74, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(74, s))\n# define BOOST_PP_WHILE_74_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_75, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(75, s))\n# define BOOST_PP_WHILE_75_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_76, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(76, s))\n# define BOOST_PP_WHILE_76_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_77, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(77, s))\n# define BOOST_PP_WHILE_77_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_78, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(78, s))\n# define BOOST_PP_WHILE_78_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_79, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(79, s))\n# define BOOST_PP_WHILE_79_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_80, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(80, s))\n# define BOOST_PP_WHILE_80_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_81, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(81, s))\n# define BOOST_PP_WHILE_81_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_82, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(82, s))\n# define BOOST_PP_WHILE_82_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_83, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(83, s))\n# define BOOST_PP_WHILE_83_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_84, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(84, s))\n# define BOOST_PP_WHILE_84_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_85, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(85, s))\n# define BOOST_PP_WHILE_85_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_86, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(86, s))\n# define BOOST_PP_WHILE_86_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_87, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(87, s))\n# define BOOST_PP_WHILE_87_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_88, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(88, s))\n# define BOOST_PP_WHILE_88_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_89, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(89, s))\n# define BOOST_PP_WHILE_89_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_90, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(90, s))\n# define BOOST_PP_WHILE_90_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_91, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(91, s))\n# define BOOST_PP_WHILE_91_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_92, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(92, s))\n# define BOOST_PP_WHILE_92_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_93, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(93, s))\n# define BOOST_PP_WHILE_93_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_94, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(94, s))\n# define BOOST_PP_WHILE_94_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_95, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(95, s))\n# define BOOST_PP_WHILE_95_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_96, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(96, s))\n# define BOOST_PP_WHILE_96_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_97, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(97, s))\n# define BOOST_PP_WHILE_97_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_98, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(98, s))\n# define BOOST_PP_WHILE_98_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_99, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(99, s))\n# define BOOST_PP_WHILE_99_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_100, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(100, s))\n# define BOOST_PP_WHILE_100_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_101, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(101, s))\n# define BOOST_PP_WHILE_101_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_102, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(102, s))\n# define BOOST_PP_WHILE_102_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_103, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(103, s))\n# define BOOST_PP_WHILE_103_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_104, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(104, s))\n# define BOOST_PP_WHILE_104_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_105, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(105, s))\n# define BOOST_PP_WHILE_105_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_106, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(106, s))\n# define BOOST_PP_WHILE_106_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_107, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(107, s))\n# define BOOST_PP_WHILE_107_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_108, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(108, s))\n# define BOOST_PP_WHILE_108_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_109, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(109, s))\n# define BOOST_PP_WHILE_109_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_110, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(110, s))\n# define BOOST_PP_WHILE_110_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_111, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(111, s))\n# define BOOST_PP_WHILE_111_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_112, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(112, s))\n# define BOOST_PP_WHILE_112_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_113, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(113, s))\n# define BOOST_PP_WHILE_113_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_114, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(114, s))\n# define BOOST_PP_WHILE_114_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_115, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(115, s))\n# define BOOST_PP_WHILE_115_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_116, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(116, s))\n# define BOOST_PP_WHILE_116_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_117, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(117, s))\n# define BOOST_PP_WHILE_117_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_118, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(118, s))\n# define BOOST_PP_WHILE_118_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_119, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(119, s))\n# define BOOST_PP_WHILE_119_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_120, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(120, s))\n# define BOOST_PP_WHILE_120_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_121, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(121, s))\n# define BOOST_PP_WHILE_121_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_122, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(122, s))\n# define BOOST_PP_WHILE_122_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_123, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(123, s))\n# define BOOST_PP_WHILE_123_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_124, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(124, s))\n# define BOOST_PP_WHILE_124_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_125, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(125, s))\n# define BOOST_PP_WHILE_125_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_126, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(126, s))\n# define BOOST_PP_WHILE_126_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_127, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(127, s))\n# define BOOST_PP_WHILE_127_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_128, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(128, s))\n# define BOOST_PP_WHILE_128_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_129, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(129, s))\n# define BOOST_PP_WHILE_129_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_130, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(130, s))\n# define BOOST_PP_WHILE_130_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_131, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(131, s))\n# define BOOST_PP_WHILE_131_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_132, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(132, s))\n# define BOOST_PP_WHILE_132_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_133, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(133, s))\n# define BOOST_PP_WHILE_133_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_134, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(134, s))\n# define BOOST_PP_WHILE_134_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_135, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(135, s))\n# define BOOST_PP_WHILE_135_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_136, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(136, s))\n# define BOOST_PP_WHILE_136_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_137, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(137, s))\n# define BOOST_PP_WHILE_137_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_138, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(138, s))\n# define BOOST_PP_WHILE_138_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_139, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(139, s))\n# define BOOST_PP_WHILE_139_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_140, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(140, s))\n# define BOOST_PP_WHILE_140_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_141, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(141, s))\n# define BOOST_PP_WHILE_141_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_142, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(142, s))\n# define BOOST_PP_WHILE_142_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_143, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(143, s))\n# define BOOST_PP_WHILE_143_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_144, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(144, s))\n# define BOOST_PP_WHILE_144_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_145, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(145, s))\n# define BOOST_PP_WHILE_145_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_146, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(146, s))\n# define BOOST_PP_WHILE_146_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_147, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(147, s))\n# define BOOST_PP_WHILE_147_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_148, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(148, s))\n# define BOOST_PP_WHILE_148_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_149, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(149, s))\n# define BOOST_PP_WHILE_149_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_150, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(150, s))\n# define BOOST_PP_WHILE_150_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_151, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(151, s))\n# define BOOST_PP_WHILE_151_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_152, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(152, s))\n# define BOOST_PP_WHILE_152_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_153, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(153, s))\n# define BOOST_PP_WHILE_153_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_154, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(154, s))\n# define BOOST_PP_WHILE_154_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_155, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(155, s))\n# define BOOST_PP_WHILE_155_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_156, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(156, s))\n# define BOOST_PP_WHILE_156_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_157, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(157, s))\n# define BOOST_PP_WHILE_157_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_158, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(158, s))\n# define BOOST_PP_WHILE_158_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_159, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(159, s))\n# define BOOST_PP_WHILE_159_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_160, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(160, s))\n# define BOOST_PP_WHILE_160_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_161, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(161, s))\n# define BOOST_PP_WHILE_161_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_162, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(162, s))\n# define BOOST_PP_WHILE_162_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_163, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(163, s))\n# define BOOST_PP_WHILE_163_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_164, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(164, s))\n# define BOOST_PP_WHILE_164_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_165, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(165, s))\n# define BOOST_PP_WHILE_165_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_166, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(166, s))\n# define BOOST_PP_WHILE_166_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_167, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(167, s))\n# define BOOST_PP_WHILE_167_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_168, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(168, s))\n# define BOOST_PP_WHILE_168_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_169, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(169, s))\n# define BOOST_PP_WHILE_169_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_170, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(170, s))\n# define BOOST_PP_WHILE_170_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_171, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(171, s))\n# define BOOST_PP_WHILE_171_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_172, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(172, s))\n# define BOOST_PP_WHILE_172_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_173, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(173, s))\n# define BOOST_PP_WHILE_173_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_174, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(174, s))\n# define BOOST_PP_WHILE_174_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_175, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(175, s))\n# define BOOST_PP_WHILE_175_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_176, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(176, s))\n# define BOOST_PP_WHILE_176_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_177, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(177, s))\n# define BOOST_PP_WHILE_177_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_178, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(178, s))\n# define BOOST_PP_WHILE_178_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_179, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(179, s))\n# define BOOST_PP_WHILE_179_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_180, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(180, s))\n# define BOOST_PP_WHILE_180_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_181, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(181, s))\n# define BOOST_PP_WHILE_181_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_182, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(182, s))\n# define BOOST_PP_WHILE_182_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_183, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(183, s))\n# define BOOST_PP_WHILE_183_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_184, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(184, s))\n# define BOOST_PP_WHILE_184_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_185, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(185, s))\n# define BOOST_PP_WHILE_185_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_186, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(186, s))\n# define BOOST_PP_WHILE_186_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_187, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(187, s))\n# define BOOST_PP_WHILE_187_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_188, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(188, s))\n# define BOOST_PP_WHILE_188_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_189, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(189, s))\n# define BOOST_PP_WHILE_189_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_190, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(190, s))\n# define BOOST_PP_WHILE_190_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_191, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(191, s))\n# define BOOST_PP_WHILE_191_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_192, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(192, s))\n# define BOOST_PP_WHILE_192_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_193, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(193, s))\n# define BOOST_PP_WHILE_193_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_194, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(194, s))\n# define BOOST_PP_WHILE_194_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_195, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(195, s))\n# define BOOST_PP_WHILE_195_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_196, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(196, s))\n# define BOOST_PP_WHILE_196_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_197, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(197, s))\n# define BOOST_PP_WHILE_197_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_198, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(198, s))\n# define BOOST_PP_WHILE_198_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_199, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(199, s))\n# define BOOST_PP_WHILE_199_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_200, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(200, s))\n# define BOOST_PP_WHILE_200_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_201, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(201, s))\n# define BOOST_PP_WHILE_201_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_202, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(202, s))\n# define BOOST_PP_WHILE_202_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_203, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(203, s))\n# define BOOST_PP_WHILE_203_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_204, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(204, s))\n# define BOOST_PP_WHILE_204_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_205, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(205, s))\n# define BOOST_PP_WHILE_205_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_206, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(206, s))\n# define BOOST_PP_WHILE_206_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_207, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(207, s))\n# define BOOST_PP_WHILE_207_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_208, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(208, s))\n# define BOOST_PP_WHILE_208_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_209, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(209, s))\n# define BOOST_PP_WHILE_209_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_210, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(210, s))\n# define BOOST_PP_WHILE_210_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_211, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(211, s))\n# define BOOST_PP_WHILE_211_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_212, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(212, s))\n# define BOOST_PP_WHILE_212_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_213, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(213, s))\n# define BOOST_PP_WHILE_213_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_214, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(214, s))\n# define BOOST_PP_WHILE_214_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_215, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(215, s))\n# define BOOST_PP_WHILE_215_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_216, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(216, s))\n# define BOOST_PP_WHILE_216_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_217, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(217, s))\n# define BOOST_PP_WHILE_217_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_218, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(218, s))\n# define BOOST_PP_WHILE_218_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_219, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(219, s))\n# define BOOST_PP_WHILE_219_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_220, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(220, s))\n# define BOOST_PP_WHILE_220_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_221, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(221, s))\n# define BOOST_PP_WHILE_221_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_222, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(222, s))\n# define BOOST_PP_WHILE_222_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_223, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(223, s))\n# define BOOST_PP_WHILE_223_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_224, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(224, s))\n# define BOOST_PP_WHILE_224_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_225, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(225, s))\n# define BOOST_PP_WHILE_225_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_226, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(226, s))\n# define BOOST_PP_WHILE_226_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_227, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(227, s))\n# define BOOST_PP_WHILE_227_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_228, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(228, s))\n# define BOOST_PP_WHILE_228_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_229, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(229, s))\n# define BOOST_PP_WHILE_229_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_230, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(230, s))\n# define BOOST_PP_WHILE_230_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_231, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(231, s))\n# define BOOST_PP_WHILE_231_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_232, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(232, s))\n# define BOOST_PP_WHILE_232_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_233, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(233, s))\n# define BOOST_PP_WHILE_233_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_234, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(234, s))\n# define BOOST_PP_WHILE_234_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_235, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(235, s))\n# define BOOST_PP_WHILE_235_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_236, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(236, s))\n# define BOOST_PP_WHILE_236_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_237, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(237, s))\n# define BOOST_PP_WHILE_237_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_238, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(238, s))\n# define BOOST_PP_WHILE_238_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_239, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(239, s))\n# define BOOST_PP_WHILE_239_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_240, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(240, s))\n# define BOOST_PP_WHILE_240_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_241, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(241, s))\n# define BOOST_PP_WHILE_241_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_242, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(242, s))\n# define BOOST_PP_WHILE_242_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_243, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(243, s))\n# define BOOST_PP_WHILE_243_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_244, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(244, s))\n# define BOOST_PP_WHILE_244_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_245, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(245, s))\n# define BOOST_PP_WHILE_245_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_246, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(246, s))\n# define BOOST_PP_WHILE_246_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_247, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(247, s))\n# define BOOST_PP_WHILE_247_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_248, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(248, s))\n# define BOOST_PP_WHILE_248_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_249, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(249, s))\n# define BOOST_PP_WHILE_249_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_250, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(250, s))\n# define BOOST_PP_WHILE_250_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_251, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(251, s))\n# define BOOST_PP_WHILE_251_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_252, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(252, s))\n# define BOOST_PP_WHILE_252_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_253, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(253, s))\n# define BOOST_PP_WHILE_253_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_254, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(254, s))\n# define BOOST_PP_WHILE_254_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_255, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(255, s))\n# define BOOST_PP_WHILE_255_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_256, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(256, s))\n# define BOOST_PP_WHILE_256_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_257, BOOST_PP_TUPLE_ELEM_3_2)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_TUPLE_ELEM_2_1)(257, s))\n#\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/detail/edg/while.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_DETAIL_EDG_WHILE_HPP\n# define BOOST_PREPROCESSOR_CONTROL_DETAIL_EDG_WHILE_HPP\n#\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_WHILE_1(p, o, s) BOOST_PP_WHILE_1_I(p, o, s)\n# define BOOST_PP_WHILE_2(p, o, s) BOOST_PP_WHILE_2_I(p, o, s)\n# define BOOST_PP_WHILE_3(p, o, s) BOOST_PP_WHILE_3_I(p, o, s)\n# define BOOST_PP_WHILE_4(p, o, s) BOOST_PP_WHILE_4_I(p, o, s)\n# define BOOST_PP_WHILE_5(p, o, s) BOOST_PP_WHILE_5_I(p, o, s)\n# define BOOST_PP_WHILE_6(p, o, s) BOOST_PP_WHILE_6_I(p, o, s)\n# define BOOST_PP_WHILE_7(p, o, s) BOOST_PP_WHILE_7_I(p, o, s)\n# define BOOST_PP_WHILE_8(p, o, s) BOOST_PP_WHILE_8_I(p, o, s)\n# define BOOST_PP_WHILE_9(p, o, s) BOOST_PP_WHILE_9_I(p, o, s)\n# define BOOST_PP_WHILE_10(p, o, s) BOOST_PP_WHILE_10_I(p, o, s)\n# define BOOST_PP_WHILE_11(p, o, s) BOOST_PP_WHILE_11_I(p, o, s)\n# define BOOST_PP_WHILE_12(p, o, s) BOOST_PP_WHILE_12_I(p, o, s)\n# define BOOST_PP_WHILE_13(p, o, s) BOOST_PP_WHILE_13_I(p, o, s)\n# define BOOST_PP_WHILE_14(p, o, s) BOOST_PP_WHILE_14_I(p, o, s)\n# define BOOST_PP_WHILE_15(p, o, s) BOOST_PP_WHILE_15_I(p, o, s)\n# define BOOST_PP_WHILE_16(p, o, s) BOOST_PP_WHILE_16_I(p, o, s)\n# define BOOST_PP_WHILE_17(p, o, s) BOOST_PP_WHILE_17_I(p, o, s)\n# define BOOST_PP_WHILE_18(p, o, s) BOOST_PP_WHILE_18_I(p, o, s)\n# define BOOST_PP_WHILE_19(p, o, s) BOOST_PP_WHILE_19_I(p, o, s)\n# define BOOST_PP_WHILE_20(p, o, s) BOOST_PP_WHILE_20_I(p, o, s)\n# define BOOST_PP_WHILE_21(p, o, s) BOOST_PP_WHILE_21_I(p, o, s)\n# define BOOST_PP_WHILE_22(p, o, s) BOOST_PP_WHILE_22_I(p, o, s)\n# define BOOST_PP_WHILE_23(p, o, s) BOOST_PP_WHILE_23_I(p, o, s)\n# define BOOST_PP_WHILE_24(p, o, s) BOOST_PP_WHILE_24_I(p, o, s)\n# define BOOST_PP_WHILE_25(p, o, s) BOOST_PP_WHILE_25_I(p, o, s)\n# define BOOST_PP_WHILE_26(p, o, s) BOOST_PP_WHILE_26_I(p, o, s)\n# define BOOST_PP_WHILE_27(p, o, s) BOOST_PP_WHILE_27_I(p, o, s)\n# define BOOST_PP_WHILE_28(p, o, s) BOOST_PP_WHILE_28_I(p, o, s)\n# define BOOST_PP_WHILE_29(p, o, s) BOOST_PP_WHILE_29_I(p, o, s)\n# define BOOST_PP_WHILE_30(p, o, s) BOOST_PP_WHILE_30_I(p, o, s)\n# define BOOST_PP_WHILE_31(p, o, s) BOOST_PP_WHILE_31_I(p, o, s)\n# define BOOST_PP_WHILE_32(p, o, s) BOOST_PP_WHILE_32_I(p, o, s)\n# define BOOST_PP_WHILE_33(p, o, s) BOOST_PP_WHILE_33_I(p, o, s)\n# define BOOST_PP_WHILE_34(p, o, s) BOOST_PP_WHILE_34_I(p, o, s)\n# define BOOST_PP_WHILE_35(p, o, s) BOOST_PP_WHILE_35_I(p, o, s)\n# define BOOST_PP_WHILE_36(p, o, s) BOOST_PP_WHILE_36_I(p, o, s)\n# define BOOST_PP_WHILE_37(p, o, s) BOOST_PP_WHILE_37_I(p, o, s)\n# define BOOST_PP_WHILE_38(p, o, s) BOOST_PP_WHILE_38_I(p, o, s)\n# define BOOST_PP_WHILE_39(p, o, s) BOOST_PP_WHILE_39_I(p, o, s)\n# define BOOST_PP_WHILE_40(p, o, s) BOOST_PP_WHILE_40_I(p, o, s)\n# define BOOST_PP_WHILE_41(p, o, s) BOOST_PP_WHILE_41_I(p, o, s)\n# define BOOST_PP_WHILE_42(p, o, s) BOOST_PP_WHILE_42_I(p, o, s)\n# define BOOST_PP_WHILE_43(p, o, s) BOOST_PP_WHILE_43_I(p, o, s)\n# define BOOST_PP_WHILE_44(p, o, s) BOOST_PP_WHILE_44_I(p, o, s)\n# define BOOST_PP_WHILE_45(p, o, s) BOOST_PP_WHILE_45_I(p, o, s)\n# define BOOST_PP_WHILE_46(p, o, s) BOOST_PP_WHILE_46_I(p, o, s)\n# define BOOST_PP_WHILE_47(p, o, s) BOOST_PP_WHILE_47_I(p, o, s)\n# define BOOST_PP_WHILE_48(p, o, s) BOOST_PP_WHILE_48_I(p, o, s)\n# define BOOST_PP_WHILE_49(p, o, s) BOOST_PP_WHILE_49_I(p, o, s)\n# define BOOST_PP_WHILE_50(p, o, s) BOOST_PP_WHILE_50_I(p, o, s)\n# define BOOST_PP_WHILE_51(p, o, s) BOOST_PP_WHILE_51_I(p, o, s)\n# define BOOST_PP_WHILE_52(p, o, s) BOOST_PP_WHILE_52_I(p, o, s)\n# define BOOST_PP_WHILE_53(p, o, s) BOOST_PP_WHILE_53_I(p, o, s)\n# define BOOST_PP_WHILE_54(p, o, s) BOOST_PP_WHILE_54_I(p, o, s)\n# define BOOST_PP_WHILE_55(p, o, s) BOOST_PP_WHILE_55_I(p, o, s)\n# define BOOST_PP_WHILE_56(p, o, s) BOOST_PP_WHILE_56_I(p, o, s)\n# define BOOST_PP_WHILE_57(p, o, s) BOOST_PP_WHILE_57_I(p, o, s)\n# define BOOST_PP_WHILE_58(p, o, s) BOOST_PP_WHILE_58_I(p, o, s)\n# define BOOST_PP_WHILE_59(p, o, s) BOOST_PP_WHILE_59_I(p, o, s)\n# define BOOST_PP_WHILE_60(p, o, s) BOOST_PP_WHILE_60_I(p, o, s)\n# define BOOST_PP_WHILE_61(p, o, s) BOOST_PP_WHILE_61_I(p, o, s)\n# define BOOST_PP_WHILE_62(p, o, s) BOOST_PP_WHILE_62_I(p, o, s)\n# define BOOST_PP_WHILE_63(p, o, s) BOOST_PP_WHILE_63_I(p, o, s)\n# define BOOST_PP_WHILE_64(p, o, s) BOOST_PP_WHILE_64_I(p, o, s)\n# define BOOST_PP_WHILE_65(p, o, s) BOOST_PP_WHILE_65_I(p, o, s)\n# define BOOST_PP_WHILE_66(p, o, s) BOOST_PP_WHILE_66_I(p, o, s)\n# define BOOST_PP_WHILE_67(p, o, s) BOOST_PP_WHILE_67_I(p, o, s)\n# define BOOST_PP_WHILE_68(p, o, s) BOOST_PP_WHILE_68_I(p, o, s)\n# define BOOST_PP_WHILE_69(p, o, s) BOOST_PP_WHILE_69_I(p, o, s)\n# define BOOST_PP_WHILE_70(p, o, s) BOOST_PP_WHILE_70_I(p, o, s)\n# define BOOST_PP_WHILE_71(p, o, s) BOOST_PP_WHILE_71_I(p, o, s)\n# define BOOST_PP_WHILE_72(p, o, s) BOOST_PP_WHILE_72_I(p, o, s)\n# define BOOST_PP_WHILE_73(p, o, s) BOOST_PP_WHILE_73_I(p, o, s)\n# define BOOST_PP_WHILE_74(p, o, s) BOOST_PP_WHILE_74_I(p, o, s)\n# define BOOST_PP_WHILE_75(p, o, s) BOOST_PP_WHILE_75_I(p, o, s)\n# define BOOST_PP_WHILE_76(p, o, s) BOOST_PP_WHILE_76_I(p, o, s)\n# define BOOST_PP_WHILE_77(p, o, s) BOOST_PP_WHILE_77_I(p, o, s)\n# define BOOST_PP_WHILE_78(p, o, s) BOOST_PP_WHILE_78_I(p, o, s)\n# define BOOST_PP_WHILE_79(p, o, s) BOOST_PP_WHILE_79_I(p, o, s)\n# define BOOST_PP_WHILE_80(p, o, s) BOOST_PP_WHILE_80_I(p, o, s)\n# define BOOST_PP_WHILE_81(p, o, s) BOOST_PP_WHILE_81_I(p, o, s)\n# define BOOST_PP_WHILE_82(p, o, s) BOOST_PP_WHILE_82_I(p, o, s)\n# define BOOST_PP_WHILE_83(p, o, s) BOOST_PP_WHILE_83_I(p, o, s)\n# define BOOST_PP_WHILE_84(p, o, s) BOOST_PP_WHILE_84_I(p, o, s)\n# define BOOST_PP_WHILE_85(p, o, s) BOOST_PP_WHILE_85_I(p, o, s)\n# define BOOST_PP_WHILE_86(p, o, s) BOOST_PP_WHILE_86_I(p, o, s)\n# define BOOST_PP_WHILE_87(p, o, s) BOOST_PP_WHILE_87_I(p, o, s)\n# define BOOST_PP_WHILE_88(p, o, s) BOOST_PP_WHILE_88_I(p, o, s)\n# define BOOST_PP_WHILE_89(p, o, s) BOOST_PP_WHILE_89_I(p, o, s)\n# define BOOST_PP_WHILE_90(p, o, s) BOOST_PP_WHILE_90_I(p, o, s)\n# define BOOST_PP_WHILE_91(p, o, s) BOOST_PP_WHILE_91_I(p, o, s)\n# define BOOST_PP_WHILE_92(p, o, s) BOOST_PP_WHILE_92_I(p, o, s)\n# define BOOST_PP_WHILE_93(p, o, s) BOOST_PP_WHILE_93_I(p, o, s)\n# define BOOST_PP_WHILE_94(p, o, s) BOOST_PP_WHILE_94_I(p, o, s)\n# define BOOST_PP_WHILE_95(p, o, s) BOOST_PP_WHILE_95_I(p, o, s)\n# define BOOST_PP_WHILE_96(p, o, s) BOOST_PP_WHILE_96_I(p, o, s)\n# define BOOST_PP_WHILE_97(p, o, s) BOOST_PP_WHILE_97_I(p, o, s)\n# define BOOST_PP_WHILE_98(p, o, s) BOOST_PP_WHILE_98_I(p, o, s)\n# define BOOST_PP_WHILE_99(p, o, s) BOOST_PP_WHILE_99_I(p, o, s)\n# define BOOST_PP_WHILE_100(p, o, s) BOOST_PP_WHILE_100_I(p, o, s)\n# define BOOST_PP_WHILE_101(p, o, s) BOOST_PP_WHILE_101_I(p, o, s)\n# define BOOST_PP_WHILE_102(p, o, s) BOOST_PP_WHILE_102_I(p, o, s)\n# define BOOST_PP_WHILE_103(p, o, s) BOOST_PP_WHILE_103_I(p, o, s)\n# define BOOST_PP_WHILE_104(p, o, s) BOOST_PP_WHILE_104_I(p, o, s)\n# define BOOST_PP_WHILE_105(p, o, s) BOOST_PP_WHILE_105_I(p, o, s)\n# define BOOST_PP_WHILE_106(p, o, s) BOOST_PP_WHILE_106_I(p, o, s)\n# define BOOST_PP_WHILE_107(p, o, s) BOOST_PP_WHILE_107_I(p, o, s)\n# define BOOST_PP_WHILE_108(p, o, s) BOOST_PP_WHILE_108_I(p, o, s)\n# define BOOST_PP_WHILE_109(p, o, s) BOOST_PP_WHILE_109_I(p, o, s)\n# define BOOST_PP_WHILE_110(p, o, s) BOOST_PP_WHILE_110_I(p, o, s)\n# define BOOST_PP_WHILE_111(p, o, s) BOOST_PP_WHILE_111_I(p, o, s)\n# define BOOST_PP_WHILE_112(p, o, s) BOOST_PP_WHILE_112_I(p, o, s)\n# define BOOST_PP_WHILE_113(p, o, s) BOOST_PP_WHILE_113_I(p, o, s)\n# define BOOST_PP_WHILE_114(p, o, s) BOOST_PP_WHILE_114_I(p, o, s)\n# define BOOST_PP_WHILE_115(p, o, s) BOOST_PP_WHILE_115_I(p, o, s)\n# define BOOST_PP_WHILE_116(p, o, s) BOOST_PP_WHILE_116_I(p, o, s)\n# define BOOST_PP_WHILE_117(p, o, s) BOOST_PP_WHILE_117_I(p, o, s)\n# define BOOST_PP_WHILE_118(p, o, s) BOOST_PP_WHILE_118_I(p, o, s)\n# define BOOST_PP_WHILE_119(p, o, s) BOOST_PP_WHILE_119_I(p, o, s)\n# define BOOST_PP_WHILE_120(p, o, s) BOOST_PP_WHILE_120_I(p, o, s)\n# define BOOST_PP_WHILE_121(p, o, s) BOOST_PP_WHILE_121_I(p, o, s)\n# define BOOST_PP_WHILE_122(p, o, s) BOOST_PP_WHILE_122_I(p, o, s)\n# define BOOST_PP_WHILE_123(p, o, s) BOOST_PP_WHILE_123_I(p, o, s)\n# define BOOST_PP_WHILE_124(p, o, s) BOOST_PP_WHILE_124_I(p, o, s)\n# define BOOST_PP_WHILE_125(p, o, s) BOOST_PP_WHILE_125_I(p, o, s)\n# define BOOST_PP_WHILE_126(p, o, s) BOOST_PP_WHILE_126_I(p, o, s)\n# define BOOST_PP_WHILE_127(p, o, s) BOOST_PP_WHILE_127_I(p, o, s)\n# define BOOST_PP_WHILE_128(p, o, s) BOOST_PP_WHILE_128_I(p, o, s)\n# define BOOST_PP_WHILE_129(p, o, s) BOOST_PP_WHILE_129_I(p, o, s)\n# define BOOST_PP_WHILE_130(p, o, s) BOOST_PP_WHILE_130_I(p, o, s)\n# define BOOST_PP_WHILE_131(p, o, s) BOOST_PP_WHILE_131_I(p, o, s)\n# define BOOST_PP_WHILE_132(p, o, s) BOOST_PP_WHILE_132_I(p, o, s)\n# define BOOST_PP_WHILE_133(p, o, s) BOOST_PP_WHILE_133_I(p, o, s)\n# define BOOST_PP_WHILE_134(p, o, s) BOOST_PP_WHILE_134_I(p, o, s)\n# define BOOST_PP_WHILE_135(p, o, s) BOOST_PP_WHILE_135_I(p, o, s)\n# define BOOST_PP_WHILE_136(p, o, s) BOOST_PP_WHILE_136_I(p, o, s)\n# define BOOST_PP_WHILE_137(p, o, s) BOOST_PP_WHILE_137_I(p, o, s)\n# define BOOST_PP_WHILE_138(p, o, s) BOOST_PP_WHILE_138_I(p, o, s)\n# define BOOST_PP_WHILE_139(p, o, s) BOOST_PP_WHILE_139_I(p, o, s)\n# define BOOST_PP_WHILE_140(p, o, s) BOOST_PP_WHILE_140_I(p, o, s)\n# define BOOST_PP_WHILE_141(p, o, s) BOOST_PP_WHILE_141_I(p, o, s)\n# define BOOST_PP_WHILE_142(p, o, s) BOOST_PP_WHILE_142_I(p, o, s)\n# define BOOST_PP_WHILE_143(p, o, s) BOOST_PP_WHILE_143_I(p, o, s)\n# define BOOST_PP_WHILE_144(p, o, s) BOOST_PP_WHILE_144_I(p, o, s)\n# define BOOST_PP_WHILE_145(p, o, s) BOOST_PP_WHILE_145_I(p, o, s)\n# define BOOST_PP_WHILE_146(p, o, s) BOOST_PP_WHILE_146_I(p, o, s)\n# define BOOST_PP_WHILE_147(p, o, s) BOOST_PP_WHILE_147_I(p, o, s)\n# define BOOST_PP_WHILE_148(p, o, s) BOOST_PP_WHILE_148_I(p, o, s)\n# define BOOST_PP_WHILE_149(p, o, s) BOOST_PP_WHILE_149_I(p, o, s)\n# define BOOST_PP_WHILE_150(p, o, s) BOOST_PP_WHILE_150_I(p, o, s)\n# define BOOST_PP_WHILE_151(p, o, s) BOOST_PP_WHILE_151_I(p, o, s)\n# define BOOST_PP_WHILE_152(p, o, s) BOOST_PP_WHILE_152_I(p, o, s)\n# define BOOST_PP_WHILE_153(p, o, s) BOOST_PP_WHILE_153_I(p, o, s)\n# define BOOST_PP_WHILE_154(p, o, s) BOOST_PP_WHILE_154_I(p, o, s)\n# define BOOST_PP_WHILE_155(p, o, s) BOOST_PP_WHILE_155_I(p, o, s)\n# define BOOST_PP_WHILE_156(p, o, s) BOOST_PP_WHILE_156_I(p, o, s)\n# define BOOST_PP_WHILE_157(p, o, s) BOOST_PP_WHILE_157_I(p, o, s)\n# define BOOST_PP_WHILE_158(p, o, s) BOOST_PP_WHILE_158_I(p, o, s)\n# define BOOST_PP_WHILE_159(p, o, s) BOOST_PP_WHILE_159_I(p, o, s)\n# define BOOST_PP_WHILE_160(p, o, s) BOOST_PP_WHILE_160_I(p, o, s)\n# define BOOST_PP_WHILE_161(p, o, s) BOOST_PP_WHILE_161_I(p, o, s)\n# define BOOST_PP_WHILE_162(p, o, s) BOOST_PP_WHILE_162_I(p, o, s)\n# define BOOST_PP_WHILE_163(p, o, s) BOOST_PP_WHILE_163_I(p, o, s)\n# define BOOST_PP_WHILE_164(p, o, s) BOOST_PP_WHILE_164_I(p, o, s)\n# define BOOST_PP_WHILE_165(p, o, s) BOOST_PP_WHILE_165_I(p, o, s)\n# define BOOST_PP_WHILE_166(p, o, s) BOOST_PP_WHILE_166_I(p, o, s)\n# define BOOST_PP_WHILE_167(p, o, s) BOOST_PP_WHILE_167_I(p, o, s)\n# define BOOST_PP_WHILE_168(p, o, s) BOOST_PP_WHILE_168_I(p, o, s)\n# define BOOST_PP_WHILE_169(p, o, s) BOOST_PP_WHILE_169_I(p, o, s)\n# define BOOST_PP_WHILE_170(p, o, s) BOOST_PP_WHILE_170_I(p, o, s)\n# define BOOST_PP_WHILE_171(p, o, s) BOOST_PP_WHILE_171_I(p, o, s)\n# define BOOST_PP_WHILE_172(p, o, s) BOOST_PP_WHILE_172_I(p, o, s)\n# define BOOST_PP_WHILE_173(p, o, s) BOOST_PP_WHILE_173_I(p, o, s)\n# define BOOST_PP_WHILE_174(p, o, s) BOOST_PP_WHILE_174_I(p, o, s)\n# define BOOST_PP_WHILE_175(p, o, s) BOOST_PP_WHILE_175_I(p, o, s)\n# define BOOST_PP_WHILE_176(p, o, s) BOOST_PP_WHILE_176_I(p, o, s)\n# define BOOST_PP_WHILE_177(p, o, s) BOOST_PP_WHILE_177_I(p, o, s)\n# define BOOST_PP_WHILE_178(p, o, s) BOOST_PP_WHILE_178_I(p, o, s)\n# define BOOST_PP_WHILE_179(p, o, s) BOOST_PP_WHILE_179_I(p, o, s)\n# define BOOST_PP_WHILE_180(p, o, s) BOOST_PP_WHILE_180_I(p, o, s)\n# define BOOST_PP_WHILE_181(p, o, s) BOOST_PP_WHILE_181_I(p, o, s)\n# define BOOST_PP_WHILE_182(p, o, s) BOOST_PP_WHILE_182_I(p, o, s)\n# define BOOST_PP_WHILE_183(p, o, s) BOOST_PP_WHILE_183_I(p, o, s)\n# define BOOST_PP_WHILE_184(p, o, s) BOOST_PP_WHILE_184_I(p, o, s)\n# define BOOST_PP_WHILE_185(p, o, s) BOOST_PP_WHILE_185_I(p, o, s)\n# define BOOST_PP_WHILE_186(p, o, s) BOOST_PP_WHILE_186_I(p, o, s)\n# define BOOST_PP_WHILE_187(p, o, s) BOOST_PP_WHILE_187_I(p, o, s)\n# define BOOST_PP_WHILE_188(p, o, s) BOOST_PP_WHILE_188_I(p, o, s)\n# define BOOST_PP_WHILE_189(p, o, s) BOOST_PP_WHILE_189_I(p, o, s)\n# define BOOST_PP_WHILE_190(p, o, s) BOOST_PP_WHILE_190_I(p, o, s)\n# define BOOST_PP_WHILE_191(p, o, s) BOOST_PP_WHILE_191_I(p, o, s)\n# define BOOST_PP_WHILE_192(p, o, s) BOOST_PP_WHILE_192_I(p, o, s)\n# define BOOST_PP_WHILE_193(p, o, s) BOOST_PP_WHILE_193_I(p, o, s)\n# define BOOST_PP_WHILE_194(p, o, s) BOOST_PP_WHILE_194_I(p, o, s)\n# define BOOST_PP_WHILE_195(p, o, s) BOOST_PP_WHILE_195_I(p, o, s)\n# define BOOST_PP_WHILE_196(p, o, s) BOOST_PP_WHILE_196_I(p, o, s)\n# define BOOST_PP_WHILE_197(p, o, s) BOOST_PP_WHILE_197_I(p, o, s)\n# define BOOST_PP_WHILE_198(p, o, s) BOOST_PP_WHILE_198_I(p, o, s)\n# define BOOST_PP_WHILE_199(p, o, s) BOOST_PP_WHILE_199_I(p, o, s)\n# define BOOST_PP_WHILE_200(p, o, s) BOOST_PP_WHILE_200_I(p, o, s)\n# define BOOST_PP_WHILE_201(p, o, s) BOOST_PP_WHILE_201_I(p, o, s)\n# define BOOST_PP_WHILE_202(p, o, s) BOOST_PP_WHILE_202_I(p, o, s)\n# define BOOST_PP_WHILE_203(p, o, s) BOOST_PP_WHILE_203_I(p, o, s)\n# define BOOST_PP_WHILE_204(p, o, s) BOOST_PP_WHILE_204_I(p, o, s)\n# define BOOST_PP_WHILE_205(p, o, s) BOOST_PP_WHILE_205_I(p, o, s)\n# define BOOST_PP_WHILE_206(p, o, s) BOOST_PP_WHILE_206_I(p, o, s)\n# define BOOST_PP_WHILE_207(p, o, s) BOOST_PP_WHILE_207_I(p, o, s)\n# define BOOST_PP_WHILE_208(p, o, s) BOOST_PP_WHILE_208_I(p, o, s)\n# define BOOST_PP_WHILE_209(p, o, s) BOOST_PP_WHILE_209_I(p, o, s)\n# define BOOST_PP_WHILE_210(p, o, s) BOOST_PP_WHILE_210_I(p, o, s)\n# define BOOST_PP_WHILE_211(p, o, s) BOOST_PP_WHILE_211_I(p, o, s)\n# define BOOST_PP_WHILE_212(p, o, s) BOOST_PP_WHILE_212_I(p, o, s)\n# define BOOST_PP_WHILE_213(p, o, s) BOOST_PP_WHILE_213_I(p, o, s)\n# define BOOST_PP_WHILE_214(p, o, s) BOOST_PP_WHILE_214_I(p, o, s)\n# define BOOST_PP_WHILE_215(p, o, s) BOOST_PP_WHILE_215_I(p, o, s)\n# define BOOST_PP_WHILE_216(p, o, s) BOOST_PP_WHILE_216_I(p, o, s)\n# define BOOST_PP_WHILE_217(p, o, s) BOOST_PP_WHILE_217_I(p, o, s)\n# define BOOST_PP_WHILE_218(p, o, s) BOOST_PP_WHILE_218_I(p, o, s)\n# define BOOST_PP_WHILE_219(p, o, s) BOOST_PP_WHILE_219_I(p, o, s)\n# define BOOST_PP_WHILE_220(p, o, s) BOOST_PP_WHILE_220_I(p, o, s)\n# define BOOST_PP_WHILE_221(p, o, s) BOOST_PP_WHILE_221_I(p, o, s)\n# define BOOST_PP_WHILE_222(p, o, s) BOOST_PP_WHILE_222_I(p, o, s)\n# define BOOST_PP_WHILE_223(p, o, s) BOOST_PP_WHILE_223_I(p, o, s)\n# define BOOST_PP_WHILE_224(p, o, s) BOOST_PP_WHILE_224_I(p, o, s)\n# define BOOST_PP_WHILE_225(p, o, s) BOOST_PP_WHILE_225_I(p, o, s)\n# define BOOST_PP_WHILE_226(p, o, s) BOOST_PP_WHILE_226_I(p, o, s)\n# define BOOST_PP_WHILE_227(p, o, s) BOOST_PP_WHILE_227_I(p, o, s)\n# define BOOST_PP_WHILE_228(p, o, s) BOOST_PP_WHILE_228_I(p, o, s)\n# define BOOST_PP_WHILE_229(p, o, s) BOOST_PP_WHILE_229_I(p, o, s)\n# define BOOST_PP_WHILE_230(p, o, s) BOOST_PP_WHILE_230_I(p, o, s)\n# define BOOST_PP_WHILE_231(p, o, s) BOOST_PP_WHILE_231_I(p, o, s)\n# define BOOST_PP_WHILE_232(p, o, s) BOOST_PP_WHILE_232_I(p, o, s)\n# define BOOST_PP_WHILE_233(p, o, s) BOOST_PP_WHILE_233_I(p, o, s)\n# define BOOST_PP_WHILE_234(p, o, s) BOOST_PP_WHILE_234_I(p, o, s)\n# define BOOST_PP_WHILE_235(p, o, s) BOOST_PP_WHILE_235_I(p, o, s)\n# define BOOST_PP_WHILE_236(p, o, s) BOOST_PP_WHILE_236_I(p, o, s)\n# define BOOST_PP_WHILE_237(p, o, s) BOOST_PP_WHILE_237_I(p, o, s)\n# define BOOST_PP_WHILE_238(p, o, s) BOOST_PP_WHILE_238_I(p, o, s)\n# define BOOST_PP_WHILE_239(p, o, s) BOOST_PP_WHILE_239_I(p, o, s)\n# define BOOST_PP_WHILE_240(p, o, s) BOOST_PP_WHILE_240_I(p, o, s)\n# define BOOST_PP_WHILE_241(p, o, s) BOOST_PP_WHILE_241_I(p, o, s)\n# define BOOST_PP_WHILE_242(p, o, s) BOOST_PP_WHILE_242_I(p, o, s)\n# define BOOST_PP_WHILE_243(p, o, s) BOOST_PP_WHILE_243_I(p, o, s)\n# define BOOST_PP_WHILE_244(p, o, s) BOOST_PP_WHILE_244_I(p, o, s)\n# define BOOST_PP_WHILE_245(p, o, s) BOOST_PP_WHILE_245_I(p, o, s)\n# define BOOST_PP_WHILE_246(p, o, s) BOOST_PP_WHILE_246_I(p, o, s)\n# define BOOST_PP_WHILE_247(p, o, s) BOOST_PP_WHILE_247_I(p, o, s)\n# define BOOST_PP_WHILE_248(p, o, s) BOOST_PP_WHILE_248_I(p, o, s)\n# define BOOST_PP_WHILE_249(p, o, s) BOOST_PP_WHILE_249_I(p, o, s)\n# define BOOST_PP_WHILE_250(p, o, s) BOOST_PP_WHILE_250_I(p, o, s)\n# define BOOST_PP_WHILE_251(p, o, s) BOOST_PP_WHILE_251_I(p, o, s)\n# define BOOST_PP_WHILE_252(p, o, s) BOOST_PP_WHILE_252_I(p, o, s)\n# define BOOST_PP_WHILE_253(p, o, s) BOOST_PP_WHILE_253_I(p, o, s)\n# define BOOST_PP_WHILE_254(p, o, s) BOOST_PP_WHILE_254_I(p, o, s)\n# define BOOST_PP_WHILE_255(p, o, s) BOOST_PP_WHILE_255_I(p, o, s)\n# define BOOST_PP_WHILE_256(p, o, s) BOOST_PP_WHILE_256_I(p, o, s)\n#\n# define BOOST_PP_WHILE_1_I(p, o, s) BOOST_PP_IF(p(2, s), BOOST_PP_WHILE_2, s BOOST_PP_TUPLE_EAT_3)(p, o, o(2, s))\n# define BOOST_PP_WHILE_2_I(p, o, s) BOOST_PP_IF(p(3, s), BOOST_PP_WHILE_3, s BOOST_PP_TUPLE_EAT_3)(p, o, o(3, s))\n# define BOOST_PP_WHILE_3_I(p, o, s) BOOST_PP_IF(p(4, s), BOOST_PP_WHILE_4, s BOOST_PP_TUPLE_EAT_3)(p, o, o(4, s))\n# define BOOST_PP_WHILE_4_I(p, o, s) BOOST_PP_IF(p(5, s), BOOST_PP_WHILE_5, s BOOST_PP_TUPLE_EAT_3)(p, o, o(5, s))\n# define BOOST_PP_WHILE_5_I(p, o, s) BOOST_PP_IF(p(6, s), BOOST_PP_WHILE_6, s BOOST_PP_TUPLE_EAT_3)(p, o, o(6, s))\n# define BOOST_PP_WHILE_6_I(p, o, s) BOOST_PP_IF(p(7, s), BOOST_PP_WHILE_7, s BOOST_PP_TUPLE_EAT_3)(p, o, o(7, s))\n# define BOOST_PP_WHILE_7_I(p, o, s) BOOST_PP_IF(p(8, s), BOOST_PP_WHILE_8, s BOOST_PP_TUPLE_EAT_3)(p, o, o(8, s))\n# define BOOST_PP_WHILE_8_I(p, o, s) BOOST_PP_IF(p(9, s), BOOST_PP_WHILE_9, s BOOST_PP_TUPLE_EAT_3)(p, o, o(9, s))\n# define BOOST_PP_WHILE_9_I(p, o, s) BOOST_PP_IF(p(10, s), BOOST_PP_WHILE_10, s BOOST_PP_TUPLE_EAT_3)(p, o, o(10, s))\n# define BOOST_PP_WHILE_10_I(p, o, s) BOOST_PP_IF(p(11, s), BOOST_PP_WHILE_11, s BOOST_PP_TUPLE_EAT_3)(p, o, o(11, s))\n# define BOOST_PP_WHILE_11_I(p, o, s) BOOST_PP_IF(p(12, s), BOOST_PP_WHILE_12, s BOOST_PP_TUPLE_EAT_3)(p, o, o(12, s))\n# define BOOST_PP_WHILE_12_I(p, o, s) BOOST_PP_IF(p(13, s), BOOST_PP_WHILE_13, s BOOST_PP_TUPLE_EAT_3)(p, o, o(13, s))\n# define BOOST_PP_WHILE_13_I(p, o, s) BOOST_PP_IF(p(14, s), BOOST_PP_WHILE_14, s BOOST_PP_TUPLE_EAT_3)(p, o, o(14, s))\n# define BOOST_PP_WHILE_14_I(p, o, s) BOOST_PP_IF(p(15, s), BOOST_PP_WHILE_15, s BOOST_PP_TUPLE_EAT_3)(p, o, o(15, s))\n# define BOOST_PP_WHILE_15_I(p, o, s) BOOST_PP_IF(p(16, s), BOOST_PP_WHILE_16, s BOOST_PP_TUPLE_EAT_3)(p, o, o(16, s))\n# define BOOST_PP_WHILE_16_I(p, o, s) BOOST_PP_IF(p(17, s), BOOST_PP_WHILE_17, s BOOST_PP_TUPLE_EAT_3)(p, o, o(17, s))\n# define BOOST_PP_WHILE_17_I(p, o, s) BOOST_PP_IF(p(18, s), BOOST_PP_WHILE_18, s BOOST_PP_TUPLE_EAT_3)(p, o, o(18, s))\n# define BOOST_PP_WHILE_18_I(p, o, s) BOOST_PP_IF(p(19, s), BOOST_PP_WHILE_19, s BOOST_PP_TUPLE_EAT_3)(p, o, o(19, s))\n# define BOOST_PP_WHILE_19_I(p, o, s) BOOST_PP_IF(p(20, s), BOOST_PP_WHILE_20, s BOOST_PP_TUPLE_EAT_3)(p, o, o(20, s))\n# define BOOST_PP_WHILE_20_I(p, o, s) BOOST_PP_IF(p(21, s), BOOST_PP_WHILE_21, s BOOST_PP_TUPLE_EAT_3)(p, o, o(21, s))\n# define BOOST_PP_WHILE_21_I(p, o, s) BOOST_PP_IF(p(22, s), BOOST_PP_WHILE_22, s BOOST_PP_TUPLE_EAT_3)(p, o, o(22, s))\n# define BOOST_PP_WHILE_22_I(p, o, s) BOOST_PP_IF(p(23, s), BOOST_PP_WHILE_23, s BOOST_PP_TUPLE_EAT_3)(p, o, o(23, s))\n# define BOOST_PP_WHILE_23_I(p, o, s) BOOST_PP_IF(p(24, s), BOOST_PP_WHILE_24, s BOOST_PP_TUPLE_EAT_3)(p, o, o(24, s))\n# define BOOST_PP_WHILE_24_I(p, o, s) BOOST_PP_IF(p(25, s), BOOST_PP_WHILE_25, s BOOST_PP_TUPLE_EAT_3)(p, o, o(25, s))\n# define BOOST_PP_WHILE_25_I(p, o, s) BOOST_PP_IF(p(26, s), BOOST_PP_WHILE_26, s BOOST_PP_TUPLE_EAT_3)(p, o, o(26, s))\n# define BOOST_PP_WHILE_26_I(p, o, s) BOOST_PP_IF(p(27, s), BOOST_PP_WHILE_27, s BOOST_PP_TUPLE_EAT_3)(p, o, o(27, s))\n# define BOOST_PP_WHILE_27_I(p, o, s) BOOST_PP_IF(p(28, s), BOOST_PP_WHILE_28, s BOOST_PP_TUPLE_EAT_3)(p, o, o(28, s))\n# define BOOST_PP_WHILE_28_I(p, o, s) BOOST_PP_IF(p(29, s), BOOST_PP_WHILE_29, s BOOST_PP_TUPLE_EAT_3)(p, o, o(29, s))\n# define BOOST_PP_WHILE_29_I(p, o, s) BOOST_PP_IF(p(30, s), BOOST_PP_WHILE_30, s BOOST_PP_TUPLE_EAT_3)(p, o, o(30, s))\n# define BOOST_PP_WHILE_30_I(p, o, s) BOOST_PP_IF(p(31, s), BOOST_PP_WHILE_31, s BOOST_PP_TUPLE_EAT_3)(p, o, o(31, s))\n# define BOOST_PP_WHILE_31_I(p, o, s) BOOST_PP_IF(p(32, s), BOOST_PP_WHILE_32, s BOOST_PP_TUPLE_EAT_3)(p, o, o(32, s))\n# define BOOST_PP_WHILE_32_I(p, o, s) BOOST_PP_IF(p(33, s), BOOST_PP_WHILE_33, s BOOST_PP_TUPLE_EAT_3)(p, o, o(33, s))\n# define BOOST_PP_WHILE_33_I(p, o, s) BOOST_PP_IF(p(34, s), BOOST_PP_WHILE_34, s BOOST_PP_TUPLE_EAT_3)(p, o, o(34, s))\n# define BOOST_PP_WHILE_34_I(p, o, s) BOOST_PP_IF(p(35, s), BOOST_PP_WHILE_35, s BOOST_PP_TUPLE_EAT_3)(p, o, o(35, s))\n# define BOOST_PP_WHILE_35_I(p, o, s) BOOST_PP_IF(p(36, s), BOOST_PP_WHILE_36, s BOOST_PP_TUPLE_EAT_3)(p, o, o(36, s))\n# define BOOST_PP_WHILE_36_I(p, o, s) BOOST_PP_IF(p(37, s), BOOST_PP_WHILE_37, s BOOST_PP_TUPLE_EAT_3)(p, o, o(37, s))\n# define BOOST_PP_WHILE_37_I(p, o, s) BOOST_PP_IF(p(38, s), BOOST_PP_WHILE_38, s BOOST_PP_TUPLE_EAT_3)(p, o, o(38, s))\n# define BOOST_PP_WHILE_38_I(p, o, s) BOOST_PP_IF(p(39, s), BOOST_PP_WHILE_39, s BOOST_PP_TUPLE_EAT_3)(p, o, o(39, s))\n# define BOOST_PP_WHILE_39_I(p, o, s) BOOST_PP_IF(p(40, s), BOOST_PP_WHILE_40, s BOOST_PP_TUPLE_EAT_3)(p, o, o(40, s))\n# define BOOST_PP_WHILE_40_I(p, o, s) BOOST_PP_IF(p(41, s), BOOST_PP_WHILE_41, s BOOST_PP_TUPLE_EAT_3)(p, o, o(41, s))\n# define BOOST_PP_WHILE_41_I(p, o, s) BOOST_PP_IF(p(42, s), BOOST_PP_WHILE_42, s BOOST_PP_TUPLE_EAT_3)(p, o, o(42, s))\n# define BOOST_PP_WHILE_42_I(p, o, s) BOOST_PP_IF(p(43, s), BOOST_PP_WHILE_43, s BOOST_PP_TUPLE_EAT_3)(p, o, o(43, s))\n# define BOOST_PP_WHILE_43_I(p, o, s) BOOST_PP_IF(p(44, s), BOOST_PP_WHILE_44, s BOOST_PP_TUPLE_EAT_3)(p, o, o(44, s))\n# define BOOST_PP_WHILE_44_I(p, o, s) BOOST_PP_IF(p(45, s), BOOST_PP_WHILE_45, s BOOST_PP_TUPLE_EAT_3)(p, o, o(45, s))\n# define BOOST_PP_WHILE_45_I(p, o, s) BOOST_PP_IF(p(46, s), BOOST_PP_WHILE_46, s BOOST_PP_TUPLE_EAT_3)(p, o, o(46, s))\n# define BOOST_PP_WHILE_46_I(p, o, s) BOOST_PP_IF(p(47, s), BOOST_PP_WHILE_47, s BOOST_PP_TUPLE_EAT_3)(p, o, o(47, s))\n# define BOOST_PP_WHILE_47_I(p, o, s) BOOST_PP_IF(p(48, s), BOOST_PP_WHILE_48, s BOOST_PP_TUPLE_EAT_3)(p, o, o(48, s))\n# define BOOST_PP_WHILE_48_I(p, o, s) BOOST_PP_IF(p(49, s), BOOST_PP_WHILE_49, s BOOST_PP_TUPLE_EAT_3)(p, o, o(49, s))\n# define BOOST_PP_WHILE_49_I(p, o, s) BOOST_PP_IF(p(50, s), BOOST_PP_WHILE_50, s BOOST_PP_TUPLE_EAT_3)(p, o, o(50, s))\n# define BOOST_PP_WHILE_50_I(p, o, s) BOOST_PP_IF(p(51, s), BOOST_PP_WHILE_51, s BOOST_PP_TUPLE_EAT_3)(p, o, o(51, s))\n# define BOOST_PP_WHILE_51_I(p, o, s) BOOST_PP_IF(p(52, s), BOOST_PP_WHILE_52, s BOOST_PP_TUPLE_EAT_3)(p, o, o(52, s))\n# define BOOST_PP_WHILE_52_I(p, o, s) BOOST_PP_IF(p(53, s), BOOST_PP_WHILE_53, s BOOST_PP_TUPLE_EAT_3)(p, o, o(53, s))\n# define BOOST_PP_WHILE_53_I(p, o, s) BOOST_PP_IF(p(54, s), BOOST_PP_WHILE_54, s BOOST_PP_TUPLE_EAT_3)(p, o, o(54, s))\n# define BOOST_PP_WHILE_54_I(p, o, s) BOOST_PP_IF(p(55, s), BOOST_PP_WHILE_55, s BOOST_PP_TUPLE_EAT_3)(p, o, o(55, s))\n# define BOOST_PP_WHILE_55_I(p, o, s) BOOST_PP_IF(p(56, s), BOOST_PP_WHILE_56, s BOOST_PP_TUPLE_EAT_3)(p, o, o(56, s))\n# define BOOST_PP_WHILE_56_I(p, o, s) BOOST_PP_IF(p(57, s), BOOST_PP_WHILE_57, s BOOST_PP_TUPLE_EAT_3)(p, o, o(57, s))\n# define BOOST_PP_WHILE_57_I(p, o, s) BOOST_PP_IF(p(58, s), BOOST_PP_WHILE_58, s BOOST_PP_TUPLE_EAT_3)(p, o, o(58, s))\n# define BOOST_PP_WHILE_58_I(p, o, s) BOOST_PP_IF(p(59, s), BOOST_PP_WHILE_59, s BOOST_PP_TUPLE_EAT_3)(p, o, o(59, s))\n# define BOOST_PP_WHILE_59_I(p, o, s) BOOST_PP_IF(p(60, s), BOOST_PP_WHILE_60, s BOOST_PP_TUPLE_EAT_3)(p, o, o(60, s))\n# define BOOST_PP_WHILE_60_I(p, o, s) BOOST_PP_IF(p(61, s), BOOST_PP_WHILE_61, s BOOST_PP_TUPLE_EAT_3)(p, o, o(61, s))\n# define BOOST_PP_WHILE_61_I(p, o, s) BOOST_PP_IF(p(62, s), BOOST_PP_WHILE_62, s BOOST_PP_TUPLE_EAT_3)(p, o, o(62, s))\n# define BOOST_PP_WHILE_62_I(p, o, s) BOOST_PP_IF(p(63, s), BOOST_PP_WHILE_63, s BOOST_PP_TUPLE_EAT_3)(p, o, o(63, s))\n# define BOOST_PP_WHILE_63_I(p, o, s) BOOST_PP_IF(p(64, s), BOOST_PP_WHILE_64, s BOOST_PP_TUPLE_EAT_3)(p, o, o(64, s))\n# define BOOST_PP_WHILE_64_I(p, o, s) BOOST_PP_IF(p(65, s), BOOST_PP_WHILE_65, s BOOST_PP_TUPLE_EAT_3)(p, o, o(65, s))\n# define BOOST_PP_WHILE_65_I(p, o, s) BOOST_PP_IF(p(66, s), BOOST_PP_WHILE_66, s BOOST_PP_TUPLE_EAT_3)(p, o, o(66, s))\n# define BOOST_PP_WHILE_66_I(p, o, s) BOOST_PP_IF(p(67, s), BOOST_PP_WHILE_67, s BOOST_PP_TUPLE_EAT_3)(p, o, o(67, s))\n# define BOOST_PP_WHILE_67_I(p, o, s) BOOST_PP_IF(p(68, s), BOOST_PP_WHILE_68, s BOOST_PP_TUPLE_EAT_3)(p, o, o(68, s))\n# define BOOST_PP_WHILE_68_I(p, o, s) BOOST_PP_IF(p(69, s), BOOST_PP_WHILE_69, s BOOST_PP_TUPLE_EAT_3)(p, o, o(69, s))\n# define BOOST_PP_WHILE_69_I(p, o, s) BOOST_PP_IF(p(70, s), BOOST_PP_WHILE_70, s BOOST_PP_TUPLE_EAT_3)(p, o, o(70, s))\n# define BOOST_PP_WHILE_70_I(p, o, s) BOOST_PP_IF(p(71, s), BOOST_PP_WHILE_71, s BOOST_PP_TUPLE_EAT_3)(p, o, o(71, s))\n# define BOOST_PP_WHILE_71_I(p, o, s) BOOST_PP_IF(p(72, s), BOOST_PP_WHILE_72, s BOOST_PP_TUPLE_EAT_3)(p, o, o(72, s))\n# define BOOST_PP_WHILE_72_I(p, o, s) BOOST_PP_IF(p(73, s), BOOST_PP_WHILE_73, s BOOST_PP_TUPLE_EAT_3)(p, o, o(73, s))\n# define BOOST_PP_WHILE_73_I(p, o, s) BOOST_PP_IF(p(74, s), BOOST_PP_WHILE_74, s BOOST_PP_TUPLE_EAT_3)(p, o, o(74, s))\n# define BOOST_PP_WHILE_74_I(p, o, s) BOOST_PP_IF(p(75, s), BOOST_PP_WHILE_75, s BOOST_PP_TUPLE_EAT_3)(p, o, o(75, s))\n# define BOOST_PP_WHILE_75_I(p, o, s) BOOST_PP_IF(p(76, s), BOOST_PP_WHILE_76, s BOOST_PP_TUPLE_EAT_3)(p, o, o(76, s))\n# define BOOST_PP_WHILE_76_I(p, o, s) BOOST_PP_IF(p(77, s), BOOST_PP_WHILE_77, s BOOST_PP_TUPLE_EAT_3)(p, o, o(77, s))\n# define BOOST_PP_WHILE_77_I(p, o, s) BOOST_PP_IF(p(78, s), BOOST_PP_WHILE_78, s BOOST_PP_TUPLE_EAT_3)(p, o, o(78, s))\n# define BOOST_PP_WHILE_78_I(p, o, s) BOOST_PP_IF(p(79, s), BOOST_PP_WHILE_79, s BOOST_PP_TUPLE_EAT_3)(p, o, o(79, s))\n# define BOOST_PP_WHILE_79_I(p, o, s) BOOST_PP_IF(p(80, s), BOOST_PP_WHILE_80, s BOOST_PP_TUPLE_EAT_3)(p, o, o(80, s))\n# define BOOST_PP_WHILE_80_I(p, o, s) BOOST_PP_IF(p(81, s), BOOST_PP_WHILE_81, s BOOST_PP_TUPLE_EAT_3)(p, o, o(81, s))\n# define BOOST_PP_WHILE_81_I(p, o, s) BOOST_PP_IF(p(82, s), BOOST_PP_WHILE_82, s BOOST_PP_TUPLE_EAT_3)(p, o, o(82, s))\n# define BOOST_PP_WHILE_82_I(p, o, s) BOOST_PP_IF(p(83, s), BOOST_PP_WHILE_83, s BOOST_PP_TUPLE_EAT_3)(p, o, o(83, s))\n# define BOOST_PP_WHILE_83_I(p, o, s) BOOST_PP_IF(p(84, s), BOOST_PP_WHILE_84, s BOOST_PP_TUPLE_EAT_3)(p, o, o(84, s))\n# define BOOST_PP_WHILE_84_I(p, o, s) BOOST_PP_IF(p(85, s), BOOST_PP_WHILE_85, s BOOST_PP_TUPLE_EAT_3)(p, o, o(85, s))\n# define BOOST_PP_WHILE_85_I(p, o, s) BOOST_PP_IF(p(86, s), BOOST_PP_WHILE_86, s BOOST_PP_TUPLE_EAT_3)(p, o, o(86, s))\n# define BOOST_PP_WHILE_86_I(p, o, s) BOOST_PP_IF(p(87, s), BOOST_PP_WHILE_87, s BOOST_PP_TUPLE_EAT_3)(p, o, o(87, s))\n# define BOOST_PP_WHILE_87_I(p, o, s) BOOST_PP_IF(p(88, s), BOOST_PP_WHILE_88, s BOOST_PP_TUPLE_EAT_3)(p, o, o(88, s))\n# define BOOST_PP_WHILE_88_I(p, o, s) BOOST_PP_IF(p(89, s), BOOST_PP_WHILE_89, s BOOST_PP_TUPLE_EAT_3)(p, o, o(89, s))\n# define BOOST_PP_WHILE_89_I(p, o, s) BOOST_PP_IF(p(90, s), BOOST_PP_WHILE_90, s BOOST_PP_TUPLE_EAT_3)(p, o, o(90, s))\n# define BOOST_PP_WHILE_90_I(p, o, s) BOOST_PP_IF(p(91, s), BOOST_PP_WHILE_91, s BOOST_PP_TUPLE_EAT_3)(p, o, o(91, s))\n# define BOOST_PP_WHILE_91_I(p, o, s) BOOST_PP_IF(p(92, s), BOOST_PP_WHILE_92, s BOOST_PP_TUPLE_EAT_3)(p, o, o(92, s))\n# define BOOST_PP_WHILE_92_I(p, o, s) BOOST_PP_IF(p(93, s), BOOST_PP_WHILE_93, s BOOST_PP_TUPLE_EAT_3)(p, o, o(93, s))\n# define BOOST_PP_WHILE_93_I(p, o, s) BOOST_PP_IF(p(94, s), BOOST_PP_WHILE_94, s BOOST_PP_TUPLE_EAT_3)(p, o, o(94, s))\n# define BOOST_PP_WHILE_94_I(p, o, s) BOOST_PP_IF(p(95, s), BOOST_PP_WHILE_95, s BOOST_PP_TUPLE_EAT_3)(p, o, o(95, s))\n# define BOOST_PP_WHILE_95_I(p, o, s) BOOST_PP_IF(p(96, s), BOOST_PP_WHILE_96, s BOOST_PP_TUPLE_EAT_3)(p, o, o(96, s))\n# define BOOST_PP_WHILE_96_I(p, o, s) BOOST_PP_IF(p(97, s), BOOST_PP_WHILE_97, s BOOST_PP_TUPLE_EAT_3)(p, o, o(97, s))\n# define BOOST_PP_WHILE_97_I(p, o, s) BOOST_PP_IF(p(98, s), BOOST_PP_WHILE_98, s BOOST_PP_TUPLE_EAT_3)(p, o, o(98, s))\n# define BOOST_PP_WHILE_98_I(p, o, s) BOOST_PP_IF(p(99, s), BOOST_PP_WHILE_99, s BOOST_PP_TUPLE_EAT_3)(p, o, o(99, s))\n# define BOOST_PP_WHILE_99_I(p, o, s) BOOST_PP_IF(p(100, s), BOOST_PP_WHILE_100, s BOOST_PP_TUPLE_EAT_3)(p, o, o(100, s))\n# define BOOST_PP_WHILE_100_I(p, o, s) BOOST_PP_IF(p(101, s), BOOST_PP_WHILE_101, s BOOST_PP_TUPLE_EAT_3)(p, o, o(101, s))\n# define BOOST_PP_WHILE_101_I(p, o, s) BOOST_PP_IF(p(102, s), BOOST_PP_WHILE_102, s BOOST_PP_TUPLE_EAT_3)(p, o, o(102, s))\n# define BOOST_PP_WHILE_102_I(p, o, s) BOOST_PP_IF(p(103, s), BOOST_PP_WHILE_103, s BOOST_PP_TUPLE_EAT_3)(p, o, o(103, s))\n# define BOOST_PP_WHILE_103_I(p, o, s) BOOST_PP_IF(p(104, s), BOOST_PP_WHILE_104, s BOOST_PP_TUPLE_EAT_3)(p, o, o(104, s))\n# define BOOST_PP_WHILE_104_I(p, o, s) BOOST_PP_IF(p(105, s), BOOST_PP_WHILE_105, s BOOST_PP_TUPLE_EAT_3)(p, o, o(105, s))\n# define BOOST_PP_WHILE_105_I(p, o, s) BOOST_PP_IF(p(106, s), BOOST_PP_WHILE_106, s BOOST_PP_TUPLE_EAT_3)(p, o, o(106, s))\n# define BOOST_PP_WHILE_106_I(p, o, s) BOOST_PP_IF(p(107, s), BOOST_PP_WHILE_107, s BOOST_PP_TUPLE_EAT_3)(p, o, o(107, s))\n# define BOOST_PP_WHILE_107_I(p, o, s) BOOST_PP_IF(p(108, s), BOOST_PP_WHILE_108, s BOOST_PP_TUPLE_EAT_3)(p, o, o(108, s))\n# define BOOST_PP_WHILE_108_I(p, o, s) BOOST_PP_IF(p(109, s), BOOST_PP_WHILE_109, s BOOST_PP_TUPLE_EAT_3)(p, o, o(109, s))\n# define BOOST_PP_WHILE_109_I(p, o, s) BOOST_PP_IF(p(110, s), BOOST_PP_WHILE_110, s BOOST_PP_TUPLE_EAT_3)(p, o, o(110, s))\n# define BOOST_PP_WHILE_110_I(p, o, s) BOOST_PP_IF(p(111, s), BOOST_PP_WHILE_111, s BOOST_PP_TUPLE_EAT_3)(p, o, o(111, s))\n# define BOOST_PP_WHILE_111_I(p, o, s) BOOST_PP_IF(p(112, s), BOOST_PP_WHILE_112, s BOOST_PP_TUPLE_EAT_3)(p, o, o(112, s))\n# define BOOST_PP_WHILE_112_I(p, o, s) BOOST_PP_IF(p(113, s), BOOST_PP_WHILE_113, s BOOST_PP_TUPLE_EAT_3)(p, o, o(113, s))\n# define BOOST_PP_WHILE_113_I(p, o, s) BOOST_PP_IF(p(114, s), BOOST_PP_WHILE_114, s BOOST_PP_TUPLE_EAT_3)(p, o, o(114, s))\n# define BOOST_PP_WHILE_114_I(p, o, s) BOOST_PP_IF(p(115, s), BOOST_PP_WHILE_115, s BOOST_PP_TUPLE_EAT_3)(p, o, o(115, s))\n# define BOOST_PP_WHILE_115_I(p, o, s) BOOST_PP_IF(p(116, s), BOOST_PP_WHILE_116, s BOOST_PP_TUPLE_EAT_3)(p, o, o(116, s))\n# define BOOST_PP_WHILE_116_I(p, o, s) BOOST_PP_IF(p(117, s), BOOST_PP_WHILE_117, s BOOST_PP_TUPLE_EAT_3)(p, o, o(117, s))\n# define BOOST_PP_WHILE_117_I(p, o, s) BOOST_PP_IF(p(118, s), BOOST_PP_WHILE_118, s BOOST_PP_TUPLE_EAT_3)(p, o, o(118, s))\n# define BOOST_PP_WHILE_118_I(p, o, s) BOOST_PP_IF(p(119, s), BOOST_PP_WHILE_119, s BOOST_PP_TUPLE_EAT_3)(p, o, o(119, s))\n# define BOOST_PP_WHILE_119_I(p, o, s) BOOST_PP_IF(p(120, s), BOOST_PP_WHILE_120, s BOOST_PP_TUPLE_EAT_3)(p, o, o(120, s))\n# define BOOST_PP_WHILE_120_I(p, o, s) BOOST_PP_IF(p(121, s), BOOST_PP_WHILE_121, s BOOST_PP_TUPLE_EAT_3)(p, o, o(121, s))\n# define BOOST_PP_WHILE_121_I(p, o, s) BOOST_PP_IF(p(122, s), BOOST_PP_WHILE_122, s BOOST_PP_TUPLE_EAT_3)(p, o, o(122, s))\n# define BOOST_PP_WHILE_122_I(p, o, s) BOOST_PP_IF(p(123, s), BOOST_PP_WHILE_123, s BOOST_PP_TUPLE_EAT_3)(p, o, o(123, s))\n# define BOOST_PP_WHILE_123_I(p, o, s) BOOST_PP_IF(p(124, s), BOOST_PP_WHILE_124, s BOOST_PP_TUPLE_EAT_3)(p, o, o(124, s))\n# define BOOST_PP_WHILE_124_I(p, o, s) BOOST_PP_IF(p(125, s), BOOST_PP_WHILE_125, s BOOST_PP_TUPLE_EAT_3)(p, o, o(125, s))\n# define BOOST_PP_WHILE_125_I(p, o, s) BOOST_PP_IF(p(126, s), BOOST_PP_WHILE_126, s BOOST_PP_TUPLE_EAT_3)(p, o, o(126, s))\n# define BOOST_PP_WHILE_126_I(p, o, s) BOOST_PP_IF(p(127, s), BOOST_PP_WHILE_127, s BOOST_PP_TUPLE_EAT_3)(p, o, o(127, s))\n# define BOOST_PP_WHILE_127_I(p, o, s) BOOST_PP_IF(p(128, s), BOOST_PP_WHILE_128, s BOOST_PP_TUPLE_EAT_3)(p, o, o(128, s))\n# define BOOST_PP_WHILE_128_I(p, o, s) BOOST_PP_IF(p(129, s), BOOST_PP_WHILE_129, s BOOST_PP_TUPLE_EAT_3)(p, o, o(129, s))\n# define BOOST_PP_WHILE_129_I(p, o, s) BOOST_PP_IF(p(130, s), BOOST_PP_WHILE_130, s BOOST_PP_TUPLE_EAT_3)(p, o, o(130, s))\n# define BOOST_PP_WHILE_130_I(p, o, s) BOOST_PP_IF(p(131, s), BOOST_PP_WHILE_131, s BOOST_PP_TUPLE_EAT_3)(p, o, o(131, s))\n# define BOOST_PP_WHILE_131_I(p, o, s) BOOST_PP_IF(p(132, s), BOOST_PP_WHILE_132, s BOOST_PP_TUPLE_EAT_3)(p, o, o(132, s))\n# define BOOST_PP_WHILE_132_I(p, o, s) BOOST_PP_IF(p(133, s), BOOST_PP_WHILE_133, s BOOST_PP_TUPLE_EAT_3)(p, o, o(133, s))\n# define BOOST_PP_WHILE_133_I(p, o, s) BOOST_PP_IF(p(134, s), BOOST_PP_WHILE_134, s BOOST_PP_TUPLE_EAT_3)(p, o, o(134, s))\n# define BOOST_PP_WHILE_134_I(p, o, s) BOOST_PP_IF(p(135, s), BOOST_PP_WHILE_135, s BOOST_PP_TUPLE_EAT_3)(p, o, o(135, s))\n# define BOOST_PP_WHILE_135_I(p, o, s) BOOST_PP_IF(p(136, s), BOOST_PP_WHILE_136, s BOOST_PP_TUPLE_EAT_3)(p, o, o(136, s))\n# define BOOST_PP_WHILE_136_I(p, o, s) BOOST_PP_IF(p(137, s), BOOST_PP_WHILE_137, s BOOST_PP_TUPLE_EAT_3)(p, o, o(137, s))\n# define BOOST_PP_WHILE_137_I(p, o, s) BOOST_PP_IF(p(138, s), BOOST_PP_WHILE_138, s BOOST_PP_TUPLE_EAT_3)(p, o, o(138, s))\n# define BOOST_PP_WHILE_138_I(p, o, s) BOOST_PP_IF(p(139, s), BOOST_PP_WHILE_139, s BOOST_PP_TUPLE_EAT_3)(p, o, o(139, s))\n# define BOOST_PP_WHILE_139_I(p, o, s) BOOST_PP_IF(p(140, s), BOOST_PP_WHILE_140, s BOOST_PP_TUPLE_EAT_3)(p, o, o(140, s))\n# define BOOST_PP_WHILE_140_I(p, o, s) BOOST_PP_IF(p(141, s), BOOST_PP_WHILE_141, s BOOST_PP_TUPLE_EAT_3)(p, o, o(141, s))\n# define BOOST_PP_WHILE_141_I(p, o, s) BOOST_PP_IF(p(142, s), BOOST_PP_WHILE_142, s BOOST_PP_TUPLE_EAT_3)(p, o, o(142, s))\n# define BOOST_PP_WHILE_142_I(p, o, s) BOOST_PP_IF(p(143, s), BOOST_PP_WHILE_143, s BOOST_PP_TUPLE_EAT_3)(p, o, o(143, s))\n# define BOOST_PP_WHILE_143_I(p, o, s) BOOST_PP_IF(p(144, s), BOOST_PP_WHILE_144, s BOOST_PP_TUPLE_EAT_3)(p, o, o(144, s))\n# define BOOST_PP_WHILE_144_I(p, o, s) BOOST_PP_IF(p(145, s), BOOST_PP_WHILE_145, s BOOST_PP_TUPLE_EAT_3)(p, o, o(145, s))\n# define BOOST_PP_WHILE_145_I(p, o, s) BOOST_PP_IF(p(146, s), BOOST_PP_WHILE_146, s BOOST_PP_TUPLE_EAT_3)(p, o, o(146, s))\n# define BOOST_PP_WHILE_146_I(p, o, s) BOOST_PP_IF(p(147, s), BOOST_PP_WHILE_147, s BOOST_PP_TUPLE_EAT_3)(p, o, o(147, s))\n# define BOOST_PP_WHILE_147_I(p, o, s) BOOST_PP_IF(p(148, s), BOOST_PP_WHILE_148, s BOOST_PP_TUPLE_EAT_3)(p, o, o(148, s))\n# define BOOST_PP_WHILE_148_I(p, o, s) BOOST_PP_IF(p(149, s), BOOST_PP_WHILE_149, s BOOST_PP_TUPLE_EAT_3)(p, o, o(149, s))\n# define BOOST_PP_WHILE_149_I(p, o, s) BOOST_PP_IF(p(150, s), BOOST_PP_WHILE_150, s BOOST_PP_TUPLE_EAT_3)(p, o, o(150, s))\n# define BOOST_PP_WHILE_150_I(p, o, s) BOOST_PP_IF(p(151, s), BOOST_PP_WHILE_151, s BOOST_PP_TUPLE_EAT_3)(p, o, o(151, s))\n# define BOOST_PP_WHILE_151_I(p, o, s) BOOST_PP_IF(p(152, s), BOOST_PP_WHILE_152, s BOOST_PP_TUPLE_EAT_3)(p, o, o(152, s))\n# define BOOST_PP_WHILE_152_I(p, o, s) BOOST_PP_IF(p(153, s), BOOST_PP_WHILE_153, s BOOST_PP_TUPLE_EAT_3)(p, o, o(153, s))\n# define BOOST_PP_WHILE_153_I(p, o, s) BOOST_PP_IF(p(154, s), BOOST_PP_WHILE_154, s BOOST_PP_TUPLE_EAT_3)(p, o, o(154, s))\n# define BOOST_PP_WHILE_154_I(p, o, s) BOOST_PP_IF(p(155, s), BOOST_PP_WHILE_155, s BOOST_PP_TUPLE_EAT_3)(p, o, o(155, s))\n# define BOOST_PP_WHILE_155_I(p, o, s) BOOST_PP_IF(p(156, s), BOOST_PP_WHILE_156, s BOOST_PP_TUPLE_EAT_3)(p, o, o(156, s))\n# define BOOST_PP_WHILE_156_I(p, o, s) BOOST_PP_IF(p(157, s), BOOST_PP_WHILE_157, s BOOST_PP_TUPLE_EAT_3)(p, o, o(157, s))\n# define BOOST_PP_WHILE_157_I(p, o, s) BOOST_PP_IF(p(158, s), BOOST_PP_WHILE_158, s BOOST_PP_TUPLE_EAT_3)(p, o, o(158, s))\n# define BOOST_PP_WHILE_158_I(p, o, s) BOOST_PP_IF(p(159, s), BOOST_PP_WHILE_159, s BOOST_PP_TUPLE_EAT_3)(p, o, o(159, s))\n# define BOOST_PP_WHILE_159_I(p, o, s) BOOST_PP_IF(p(160, s), BOOST_PP_WHILE_160, s BOOST_PP_TUPLE_EAT_3)(p, o, o(160, s))\n# define BOOST_PP_WHILE_160_I(p, o, s) BOOST_PP_IF(p(161, s), BOOST_PP_WHILE_161, s BOOST_PP_TUPLE_EAT_3)(p, o, o(161, s))\n# define BOOST_PP_WHILE_161_I(p, o, s) BOOST_PP_IF(p(162, s), BOOST_PP_WHILE_162, s BOOST_PP_TUPLE_EAT_3)(p, o, o(162, s))\n# define BOOST_PP_WHILE_162_I(p, o, s) BOOST_PP_IF(p(163, s), BOOST_PP_WHILE_163, s BOOST_PP_TUPLE_EAT_3)(p, o, o(163, s))\n# define BOOST_PP_WHILE_163_I(p, o, s) BOOST_PP_IF(p(164, s), BOOST_PP_WHILE_164, s BOOST_PP_TUPLE_EAT_3)(p, o, o(164, s))\n# define BOOST_PP_WHILE_164_I(p, o, s) BOOST_PP_IF(p(165, s), BOOST_PP_WHILE_165, s BOOST_PP_TUPLE_EAT_3)(p, o, o(165, s))\n# define BOOST_PP_WHILE_165_I(p, o, s) BOOST_PP_IF(p(166, s), BOOST_PP_WHILE_166, s BOOST_PP_TUPLE_EAT_3)(p, o, o(166, s))\n# define BOOST_PP_WHILE_166_I(p, o, s) BOOST_PP_IF(p(167, s), BOOST_PP_WHILE_167, s BOOST_PP_TUPLE_EAT_3)(p, o, o(167, s))\n# define BOOST_PP_WHILE_167_I(p, o, s) BOOST_PP_IF(p(168, s), BOOST_PP_WHILE_168, s BOOST_PP_TUPLE_EAT_3)(p, o, o(168, s))\n# define BOOST_PP_WHILE_168_I(p, o, s) BOOST_PP_IF(p(169, s), BOOST_PP_WHILE_169, s BOOST_PP_TUPLE_EAT_3)(p, o, o(169, s))\n# define BOOST_PP_WHILE_169_I(p, o, s) BOOST_PP_IF(p(170, s), BOOST_PP_WHILE_170, s BOOST_PP_TUPLE_EAT_3)(p, o, o(170, s))\n# define BOOST_PP_WHILE_170_I(p, o, s) BOOST_PP_IF(p(171, s), BOOST_PP_WHILE_171, s BOOST_PP_TUPLE_EAT_3)(p, o, o(171, s))\n# define BOOST_PP_WHILE_171_I(p, o, s) BOOST_PP_IF(p(172, s), BOOST_PP_WHILE_172, s BOOST_PP_TUPLE_EAT_3)(p, o, o(172, s))\n# define BOOST_PP_WHILE_172_I(p, o, s) BOOST_PP_IF(p(173, s), BOOST_PP_WHILE_173, s BOOST_PP_TUPLE_EAT_3)(p, o, o(173, s))\n# define BOOST_PP_WHILE_173_I(p, o, s) BOOST_PP_IF(p(174, s), BOOST_PP_WHILE_174, s BOOST_PP_TUPLE_EAT_3)(p, o, o(174, s))\n# define BOOST_PP_WHILE_174_I(p, o, s) BOOST_PP_IF(p(175, s), BOOST_PP_WHILE_175, s BOOST_PP_TUPLE_EAT_3)(p, o, o(175, s))\n# define BOOST_PP_WHILE_175_I(p, o, s) BOOST_PP_IF(p(176, s), BOOST_PP_WHILE_176, s BOOST_PP_TUPLE_EAT_3)(p, o, o(176, s))\n# define BOOST_PP_WHILE_176_I(p, o, s) BOOST_PP_IF(p(177, s), BOOST_PP_WHILE_177, s BOOST_PP_TUPLE_EAT_3)(p, o, o(177, s))\n# define BOOST_PP_WHILE_177_I(p, o, s) BOOST_PP_IF(p(178, s), BOOST_PP_WHILE_178, s BOOST_PP_TUPLE_EAT_3)(p, o, o(178, s))\n# define BOOST_PP_WHILE_178_I(p, o, s) BOOST_PP_IF(p(179, s), BOOST_PP_WHILE_179, s BOOST_PP_TUPLE_EAT_3)(p, o, o(179, s))\n# define BOOST_PP_WHILE_179_I(p, o, s) BOOST_PP_IF(p(180, s), BOOST_PP_WHILE_180, s BOOST_PP_TUPLE_EAT_3)(p, o, o(180, s))\n# define BOOST_PP_WHILE_180_I(p, o, s) BOOST_PP_IF(p(181, s), BOOST_PP_WHILE_181, s BOOST_PP_TUPLE_EAT_3)(p, o, o(181, s))\n# define BOOST_PP_WHILE_181_I(p, o, s) BOOST_PP_IF(p(182, s), BOOST_PP_WHILE_182, s BOOST_PP_TUPLE_EAT_3)(p, o, o(182, s))\n# define BOOST_PP_WHILE_182_I(p, o, s) BOOST_PP_IF(p(183, s), BOOST_PP_WHILE_183, s BOOST_PP_TUPLE_EAT_3)(p, o, o(183, s))\n# define BOOST_PP_WHILE_183_I(p, o, s) BOOST_PP_IF(p(184, s), BOOST_PP_WHILE_184, s BOOST_PP_TUPLE_EAT_3)(p, o, o(184, s))\n# define BOOST_PP_WHILE_184_I(p, o, s) BOOST_PP_IF(p(185, s), BOOST_PP_WHILE_185, s BOOST_PP_TUPLE_EAT_3)(p, o, o(185, s))\n# define BOOST_PP_WHILE_185_I(p, o, s) BOOST_PP_IF(p(186, s), BOOST_PP_WHILE_186, s BOOST_PP_TUPLE_EAT_3)(p, o, o(186, s))\n# define BOOST_PP_WHILE_186_I(p, o, s) BOOST_PP_IF(p(187, s), BOOST_PP_WHILE_187, s BOOST_PP_TUPLE_EAT_3)(p, o, o(187, s))\n# define BOOST_PP_WHILE_187_I(p, o, s) BOOST_PP_IF(p(188, s), BOOST_PP_WHILE_188, s BOOST_PP_TUPLE_EAT_3)(p, o, o(188, s))\n# define BOOST_PP_WHILE_188_I(p, o, s) BOOST_PP_IF(p(189, s), BOOST_PP_WHILE_189, s BOOST_PP_TUPLE_EAT_3)(p, o, o(189, s))\n# define BOOST_PP_WHILE_189_I(p, o, s) BOOST_PP_IF(p(190, s), BOOST_PP_WHILE_190, s BOOST_PP_TUPLE_EAT_3)(p, o, o(190, s))\n# define BOOST_PP_WHILE_190_I(p, o, s) BOOST_PP_IF(p(191, s), BOOST_PP_WHILE_191, s BOOST_PP_TUPLE_EAT_3)(p, o, o(191, s))\n# define BOOST_PP_WHILE_191_I(p, o, s) BOOST_PP_IF(p(192, s), BOOST_PP_WHILE_192, s BOOST_PP_TUPLE_EAT_3)(p, o, o(192, s))\n# define BOOST_PP_WHILE_192_I(p, o, s) BOOST_PP_IF(p(193, s), BOOST_PP_WHILE_193, s BOOST_PP_TUPLE_EAT_3)(p, o, o(193, s))\n# define BOOST_PP_WHILE_193_I(p, o, s) BOOST_PP_IF(p(194, s), BOOST_PP_WHILE_194, s BOOST_PP_TUPLE_EAT_3)(p, o, o(194, s))\n# define BOOST_PP_WHILE_194_I(p, o, s) BOOST_PP_IF(p(195, s), BOOST_PP_WHILE_195, s BOOST_PP_TUPLE_EAT_3)(p, o, o(195, s))\n# define BOOST_PP_WHILE_195_I(p, o, s) BOOST_PP_IF(p(196, s), BOOST_PP_WHILE_196, s BOOST_PP_TUPLE_EAT_3)(p, o, o(196, s))\n# define BOOST_PP_WHILE_196_I(p, o, s) BOOST_PP_IF(p(197, s), BOOST_PP_WHILE_197, s BOOST_PP_TUPLE_EAT_3)(p, o, o(197, s))\n# define BOOST_PP_WHILE_197_I(p, o, s) BOOST_PP_IF(p(198, s), BOOST_PP_WHILE_198, s BOOST_PP_TUPLE_EAT_3)(p, o, o(198, s))\n# define BOOST_PP_WHILE_198_I(p, o, s) BOOST_PP_IF(p(199, s), BOOST_PP_WHILE_199, s BOOST_PP_TUPLE_EAT_3)(p, o, o(199, s))\n# define BOOST_PP_WHILE_199_I(p, o, s) BOOST_PP_IF(p(200, s), BOOST_PP_WHILE_200, s BOOST_PP_TUPLE_EAT_3)(p, o, o(200, s))\n# define BOOST_PP_WHILE_200_I(p, o, s) BOOST_PP_IF(p(201, s), BOOST_PP_WHILE_201, s BOOST_PP_TUPLE_EAT_3)(p, o, o(201, s))\n# define BOOST_PP_WHILE_201_I(p, o, s) BOOST_PP_IF(p(202, s), BOOST_PP_WHILE_202, s BOOST_PP_TUPLE_EAT_3)(p, o, o(202, s))\n# define BOOST_PP_WHILE_202_I(p, o, s) BOOST_PP_IF(p(203, s), BOOST_PP_WHILE_203, s BOOST_PP_TUPLE_EAT_3)(p, o, o(203, s))\n# define BOOST_PP_WHILE_203_I(p, o, s) BOOST_PP_IF(p(204, s), BOOST_PP_WHILE_204, s BOOST_PP_TUPLE_EAT_3)(p, o, o(204, s))\n# define BOOST_PP_WHILE_204_I(p, o, s) BOOST_PP_IF(p(205, s), BOOST_PP_WHILE_205, s BOOST_PP_TUPLE_EAT_3)(p, o, o(205, s))\n# define BOOST_PP_WHILE_205_I(p, o, s) BOOST_PP_IF(p(206, s), BOOST_PP_WHILE_206, s BOOST_PP_TUPLE_EAT_3)(p, o, o(206, s))\n# define BOOST_PP_WHILE_206_I(p, o, s) BOOST_PP_IF(p(207, s), BOOST_PP_WHILE_207, s BOOST_PP_TUPLE_EAT_3)(p, o, o(207, s))\n# define BOOST_PP_WHILE_207_I(p, o, s) BOOST_PP_IF(p(208, s), BOOST_PP_WHILE_208, s BOOST_PP_TUPLE_EAT_3)(p, o, o(208, s))\n# define BOOST_PP_WHILE_208_I(p, o, s) BOOST_PP_IF(p(209, s), BOOST_PP_WHILE_209, s BOOST_PP_TUPLE_EAT_3)(p, o, o(209, s))\n# define BOOST_PP_WHILE_209_I(p, o, s) BOOST_PP_IF(p(210, s), BOOST_PP_WHILE_210, s BOOST_PP_TUPLE_EAT_3)(p, o, o(210, s))\n# define BOOST_PP_WHILE_210_I(p, o, s) BOOST_PP_IF(p(211, s), BOOST_PP_WHILE_211, s BOOST_PP_TUPLE_EAT_3)(p, o, o(211, s))\n# define BOOST_PP_WHILE_211_I(p, o, s) BOOST_PP_IF(p(212, s), BOOST_PP_WHILE_212, s BOOST_PP_TUPLE_EAT_3)(p, o, o(212, s))\n# define BOOST_PP_WHILE_212_I(p, o, s) BOOST_PP_IF(p(213, s), BOOST_PP_WHILE_213, s BOOST_PP_TUPLE_EAT_3)(p, o, o(213, s))\n# define BOOST_PP_WHILE_213_I(p, o, s) BOOST_PP_IF(p(214, s), BOOST_PP_WHILE_214, s BOOST_PP_TUPLE_EAT_3)(p, o, o(214, s))\n# define BOOST_PP_WHILE_214_I(p, o, s) BOOST_PP_IF(p(215, s), BOOST_PP_WHILE_215, s BOOST_PP_TUPLE_EAT_3)(p, o, o(215, s))\n# define BOOST_PP_WHILE_215_I(p, o, s) BOOST_PP_IF(p(216, s), BOOST_PP_WHILE_216, s BOOST_PP_TUPLE_EAT_3)(p, o, o(216, s))\n# define BOOST_PP_WHILE_216_I(p, o, s) BOOST_PP_IF(p(217, s), BOOST_PP_WHILE_217, s BOOST_PP_TUPLE_EAT_3)(p, o, o(217, s))\n# define BOOST_PP_WHILE_217_I(p, o, s) BOOST_PP_IF(p(218, s), BOOST_PP_WHILE_218, s BOOST_PP_TUPLE_EAT_3)(p, o, o(218, s))\n# define BOOST_PP_WHILE_218_I(p, o, s) BOOST_PP_IF(p(219, s), BOOST_PP_WHILE_219, s BOOST_PP_TUPLE_EAT_3)(p, o, o(219, s))\n# define BOOST_PP_WHILE_219_I(p, o, s) BOOST_PP_IF(p(220, s), BOOST_PP_WHILE_220, s BOOST_PP_TUPLE_EAT_3)(p, o, o(220, s))\n# define BOOST_PP_WHILE_220_I(p, o, s) BOOST_PP_IF(p(221, s), BOOST_PP_WHILE_221, s BOOST_PP_TUPLE_EAT_3)(p, o, o(221, s))\n# define BOOST_PP_WHILE_221_I(p, o, s) BOOST_PP_IF(p(222, s), BOOST_PP_WHILE_222, s BOOST_PP_TUPLE_EAT_3)(p, o, o(222, s))\n# define BOOST_PP_WHILE_222_I(p, o, s) BOOST_PP_IF(p(223, s), BOOST_PP_WHILE_223, s BOOST_PP_TUPLE_EAT_3)(p, o, o(223, s))\n# define BOOST_PP_WHILE_223_I(p, o, s) BOOST_PP_IF(p(224, s), BOOST_PP_WHILE_224, s BOOST_PP_TUPLE_EAT_3)(p, o, o(224, s))\n# define BOOST_PP_WHILE_224_I(p, o, s) BOOST_PP_IF(p(225, s), BOOST_PP_WHILE_225, s BOOST_PP_TUPLE_EAT_3)(p, o, o(225, s))\n# define BOOST_PP_WHILE_225_I(p, o, s) BOOST_PP_IF(p(226, s), BOOST_PP_WHILE_226, s BOOST_PP_TUPLE_EAT_3)(p, o, o(226, s))\n# define BOOST_PP_WHILE_226_I(p, o, s) BOOST_PP_IF(p(227, s), BOOST_PP_WHILE_227, s BOOST_PP_TUPLE_EAT_3)(p, o, o(227, s))\n# define BOOST_PP_WHILE_227_I(p, o, s) BOOST_PP_IF(p(228, s), BOOST_PP_WHILE_228, s BOOST_PP_TUPLE_EAT_3)(p, o, o(228, s))\n# define BOOST_PP_WHILE_228_I(p, o, s) BOOST_PP_IF(p(229, s), BOOST_PP_WHILE_229, s BOOST_PP_TUPLE_EAT_3)(p, o, o(229, s))\n# define BOOST_PP_WHILE_229_I(p, o, s) BOOST_PP_IF(p(230, s), BOOST_PP_WHILE_230, s BOOST_PP_TUPLE_EAT_3)(p, o, o(230, s))\n# define BOOST_PP_WHILE_230_I(p, o, s) BOOST_PP_IF(p(231, s), BOOST_PP_WHILE_231, s BOOST_PP_TUPLE_EAT_3)(p, o, o(231, s))\n# define BOOST_PP_WHILE_231_I(p, o, s) BOOST_PP_IF(p(232, s), BOOST_PP_WHILE_232, s BOOST_PP_TUPLE_EAT_3)(p, o, o(232, s))\n# define BOOST_PP_WHILE_232_I(p, o, s) BOOST_PP_IF(p(233, s), BOOST_PP_WHILE_233, s BOOST_PP_TUPLE_EAT_3)(p, o, o(233, s))\n# define BOOST_PP_WHILE_233_I(p, o, s) BOOST_PP_IF(p(234, s), BOOST_PP_WHILE_234, s BOOST_PP_TUPLE_EAT_3)(p, o, o(234, s))\n# define BOOST_PP_WHILE_234_I(p, o, s) BOOST_PP_IF(p(235, s), BOOST_PP_WHILE_235, s BOOST_PP_TUPLE_EAT_3)(p, o, o(235, s))\n# define BOOST_PP_WHILE_235_I(p, o, s) BOOST_PP_IF(p(236, s), BOOST_PP_WHILE_236, s BOOST_PP_TUPLE_EAT_3)(p, o, o(236, s))\n# define BOOST_PP_WHILE_236_I(p, o, s) BOOST_PP_IF(p(237, s), BOOST_PP_WHILE_237, s BOOST_PP_TUPLE_EAT_3)(p, o, o(237, s))\n# define BOOST_PP_WHILE_237_I(p, o, s) BOOST_PP_IF(p(238, s), BOOST_PP_WHILE_238, s BOOST_PP_TUPLE_EAT_3)(p, o, o(238, s))\n# define BOOST_PP_WHILE_238_I(p, o, s) BOOST_PP_IF(p(239, s), BOOST_PP_WHILE_239, s BOOST_PP_TUPLE_EAT_3)(p, o, o(239, s))\n# define BOOST_PP_WHILE_239_I(p, o, s) BOOST_PP_IF(p(240, s), BOOST_PP_WHILE_240, s BOOST_PP_TUPLE_EAT_3)(p, o, o(240, s))\n# define BOOST_PP_WHILE_240_I(p, o, s) BOOST_PP_IF(p(241, s), BOOST_PP_WHILE_241, s BOOST_PP_TUPLE_EAT_3)(p, o, o(241, s))\n# define BOOST_PP_WHILE_241_I(p, o, s) BOOST_PP_IF(p(242, s), BOOST_PP_WHILE_242, s BOOST_PP_TUPLE_EAT_3)(p, o, o(242, s))\n# define BOOST_PP_WHILE_242_I(p, o, s) BOOST_PP_IF(p(243, s), BOOST_PP_WHILE_243, s BOOST_PP_TUPLE_EAT_3)(p, o, o(243, s))\n# define BOOST_PP_WHILE_243_I(p, o, s) BOOST_PP_IF(p(244, s), BOOST_PP_WHILE_244, s BOOST_PP_TUPLE_EAT_3)(p, o, o(244, s))\n# define BOOST_PP_WHILE_244_I(p, o, s) BOOST_PP_IF(p(245, s), BOOST_PP_WHILE_245, s BOOST_PP_TUPLE_EAT_3)(p, o, o(245, s))\n# define BOOST_PP_WHILE_245_I(p, o, s) BOOST_PP_IF(p(246, s), BOOST_PP_WHILE_246, s BOOST_PP_TUPLE_EAT_3)(p, o, o(246, s))\n# define BOOST_PP_WHILE_246_I(p, o, s) BOOST_PP_IF(p(247, s), BOOST_PP_WHILE_247, s BOOST_PP_TUPLE_EAT_3)(p, o, o(247, s))\n# define BOOST_PP_WHILE_247_I(p, o, s) BOOST_PP_IF(p(248, s), BOOST_PP_WHILE_248, s BOOST_PP_TUPLE_EAT_3)(p, o, o(248, s))\n# define BOOST_PP_WHILE_248_I(p, o, s) BOOST_PP_IF(p(249, s), BOOST_PP_WHILE_249, s BOOST_PP_TUPLE_EAT_3)(p, o, o(249, s))\n# define BOOST_PP_WHILE_249_I(p, o, s) BOOST_PP_IF(p(250, s), BOOST_PP_WHILE_250, s BOOST_PP_TUPLE_EAT_3)(p, o, o(250, s))\n# define BOOST_PP_WHILE_250_I(p, o, s) BOOST_PP_IF(p(251, s), BOOST_PP_WHILE_251, s BOOST_PP_TUPLE_EAT_3)(p, o, o(251, s))\n# define BOOST_PP_WHILE_251_I(p, o, s) BOOST_PP_IF(p(252, s), BOOST_PP_WHILE_252, s BOOST_PP_TUPLE_EAT_3)(p, o, o(252, s))\n# define BOOST_PP_WHILE_252_I(p, o, s) BOOST_PP_IF(p(253, s), BOOST_PP_WHILE_253, s BOOST_PP_TUPLE_EAT_3)(p, o, o(253, s))\n# define BOOST_PP_WHILE_253_I(p, o, s) BOOST_PP_IF(p(254, s), BOOST_PP_WHILE_254, s BOOST_PP_TUPLE_EAT_3)(p, o, o(254, s))\n# define BOOST_PP_WHILE_254_I(p, o, s) BOOST_PP_IF(p(255, s), BOOST_PP_WHILE_255, s BOOST_PP_TUPLE_EAT_3)(p, o, o(255, s))\n# define BOOST_PP_WHILE_255_I(p, o, s) BOOST_PP_IF(p(256, s), BOOST_PP_WHILE_256, s BOOST_PP_TUPLE_EAT_3)(p, o, o(256, s))\n# define BOOST_PP_WHILE_256_I(p, o, s) BOOST_PP_IF(p(257, s), BOOST_PP_WHILE_257, s BOOST_PP_TUPLE_EAT_3)(p, o, o(257, s))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/detail/msvc/while.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_DETAIL_MSVC_WHILE_HPP\n# define BOOST_PREPROCESSOR_CONTROL_DETAIL_MSVC_WHILE_HPP\n#\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_WHILE_1(p, o, s) BOOST_PP_IF(p(2, s), BOOST_PP_WHILE_2, s BOOST_PP_TUPLE_EAT_3)(p, o, o(2, s))\n# define BOOST_PP_WHILE_2(p, o, s) BOOST_PP_IF(p(3, s), BOOST_PP_WHILE_3, s BOOST_PP_TUPLE_EAT_3)(p, o, o(3, s))\n# define BOOST_PP_WHILE_3(p, o, s) BOOST_PP_IF(p(4, s), BOOST_PP_WHILE_4, s BOOST_PP_TUPLE_EAT_3)(p, o, o(4, s))\n# define BOOST_PP_WHILE_4(p, o, s) BOOST_PP_IF(p(5, s), BOOST_PP_WHILE_5, s BOOST_PP_TUPLE_EAT_3)(p, o, o(5, s))\n# define BOOST_PP_WHILE_5(p, o, s) BOOST_PP_IF(p(6, s), BOOST_PP_WHILE_6, s BOOST_PP_TUPLE_EAT_3)(p, o, o(6, s))\n# define BOOST_PP_WHILE_6(p, o, s) BOOST_PP_IF(p(7, s), BOOST_PP_WHILE_7, s BOOST_PP_TUPLE_EAT_3)(p, o, o(7, s))\n# define BOOST_PP_WHILE_7(p, o, s) BOOST_PP_IF(p(8, s), BOOST_PP_WHILE_8, s BOOST_PP_TUPLE_EAT_3)(p, o, o(8, s))\n# define BOOST_PP_WHILE_8(p, o, s) BOOST_PP_IF(p(9, s), BOOST_PP_WHILE_9, s BOOST_PP_TUPLE_EAT_3)(p, o, o(9, s))\n# define BOOST_PP_WHILE_9(p, o, s) BOOST_PP_IF(p(10, s), BOOST_PP_WHILE_10, s BOOST_PP_TUPLE_EAT_3)(p, o, o(10, s))\n# define BOOST_PP_WHILE_10(p, o, s) BOOST_PP_IF(p(11, s), BOOST_PP_WHILE_11, s BOOST_PP_TUPLE_EAT_3)(p, o, o(11, s))\n# define BOOST_PP_WHILE_11(p, o, s) BOOST_PP_IF(p(12, s), BOOST_PP_WHILE_12, s BOOST_PP_TUPLE_EAT_3)(p, o, o(12, s))\n# define BOOST_PP_WHILE_12(p, o, s) BOOST_PP_IF(p(13, s), BOOST_PP_WHILE_13, s BOOST_PP_TUPLE_EAT_3)(p, o, o(13, s))\n# define BOOST_PP_WHILE_13(p, o, s) BOOST_PP_IF(p(14, s), BOOST_PP_WHILE_14, s BOOST_PP_TUPLE_EAT_3)(p, o, o(14, s))\n# define BOOST_PP_WHILE_14(p, o, s) BOOST_PP_IF(p(15, s), BOOST_PP_WHILE_15, s BOOST_PP_TUPLE_EAT_3)(p, o, o(15, s))\n# define BOOST_PP_WHILE_15(p, o, s) BOOST_PP_IF(p(16, s), BOOST_PP_WHILE_16, s BOOST_PP_TUPLE_EAT_3)(p, o, o(16, s))\n# define BOOST_PP_WHILE_16(p, o, s) BOOST_PP_IF(p(17, s), BOOST_PP_WHILE_17, s BOOST_PP_TUPLE_EAT_3)(p, o, o(17, s))\n# define BOOST_PP_WHILE_17(p, o, s) BOOST_PP_IF(p(18, s), BOOST_PP_WHILE_18, s BOOST_PP_TUPLE_EAT_3)(p, o, o(18, s))\n# define BOOST_PP_WHILE_18(p, o, s) BOOST_PP_IF(p(19, s), BOOST_PP_WHILE_19, s BOOST_PP_TUPLE_EAT_3)(p, o, o(19, s))\n# define BOOST_PP_WHILE_19(p, o, s) BOOST_PP_IF(p(20, s), BOOST_PP_WHILE_20, s BOOST_PP_TUPLE_EAT_3)(p, o, o(20, s))\n# define BOOST_PP_WHILE_20(p, o, s) BOOST_PP_IF(p(21, s), BOOST_PP_WHILE_21, s BOOST_PP_TUPLE_EAT_3)(p, o, o(21, s))\n# define BOOST_PP_WHILE_21(p, o, s) BOOST_PP_IF(p(22, s), BOOST_PP_WHILE_22, s BOOST_PP_TUPLE_EAT_3)(p, o, o(22, s))\n# define BOOST_PP_WHILE_22(p, o, s) BOOST_PP_IF(p(23, s), BOOST_PP_WHILE_23, s BOOST_PP_TUPLE_EAT_3)(p, o, o(23, s))\n# define BOOST_PP_WHILE_23(p, o, s) BOOST_PP_IF(p(24, s), BOOST_PP_WHILE_24, s BOOST_PP_TUPLE_EAT_3)(p, o, o(24, s))\n# define BOOST_PP_WHILE_24(p, o, s) BOOST_PP_IF(p(25, s), BOOST_PP_WHILE_25, s BOOST_PP_TUPLE_EAT_3)(p, o, o(25, s))\n# define BOOST_PP_WHILE_25(p, o, s) BOOST_PP_IF(p(26, s), BOOST_PP_WHILE_26, s BOOST_PP_TUPLE_EAT_3)(p, o, o(26, s))\n# define BOOST_PP_WHILE_26(p, o, s) BOOST_PP_IF(p(27, s), BOOST_PP_WHILE_27, s BOOST_PP_TUPLE_EAT_3)(p, o, o(27, s))\n# define BOOST_PP_WHILE_27(p, o, s) BOOST_PP_IF(p(28, s), BOOST_PP_WHILE_28, s BOOST_PP_TUPLE_EAT_3)(p, o, o(28, s))\n# define BOOST_PP_WHILE_28(p, o, s) BOOST_PP_IF(p(29, s), BOOST_PP_WHILE_29, s BOOST_PP_TUPLE_EAT_3)(p, o, o(29, s))\n# define BOOST_PP_WHILE_29(p, o, s) BOOST_PP_IF(p(30, s), BOOST_PP_WHILE_30, s BOOST_PP_TUPLE_EAT_3)(p, o, o(30, s))\n# define BOOST_PP_WHILE_30(p, o, s) BOOST_PP_IF(p(31, s), BOOST_PP_WHILE_31, s BOOST_PP_TUPLE_EAT_3)(p, o, o(31, s))\n# define BOOST_PP_WHILE_31(p, o, s) BOOST_PP_IF(p(32, s), BOOST_PP_WHILE_32, s BOOST_PP_TUPLE_EAT_3)(p, o, o(32, s))\n# define BOOST_PP_WHILE_32(p, o, s) BOOST_PP_IF(p(33, s), BOOST_PP_WHILE_33, s BOOST_PP_TUPLE_EAT_3)(p, o, o(33, s))\n# define BOOST_PP_WHILE_33(p, o, s) BOOST_PP_IF(p(34, s), BOOST_PP_WHILE_34, s BOOST_PP_TUPLE_EAT_3)(p, o, o(34, s))\n# define BOOST_PP_WHILE_34(p, o, s) BOOST_PP_IF(p(35, s), BOOST_PP_WHILE_35, s BOOST_PP_TUPLE_EAT_3)(p, o, o(35, s))\n# define BOOST_PP_WHILE_35(p, o, s) BOOST_PP_IF(p(36, s), BOOST_PP_WHILE_36, s BOOST_PP_TUPLE_EAT_3)(p, o, o(36, s))\n# define BOOST_PP_WHILE_36(p, o, s) BOOST_PP_IF(p(37, s), BOOST_PP_WHILE_37, s BOOST_PP_TUPLE_EAT_3)(p, o, o(37, s))\n# define BOOST_PP_WHILE_37(p, o, s) BOOST_PP_IF(p(38, s), BOOST_PP_WHILE_38, s BOOST_PP_TUPLE_EAT_3)(p, o, o(38, s))\n# define BOOST_PP_WHILE_38(p, o, s) BOOST_PP_IF(p(39, s), BOOST_PP_WHILE_39, s BOOST_PP_TUPLE_EAT_3)(p, o, o(39, s))\n# define BOOST_PP_WHILE_39(p, o, s) BOOST_PP_IF(p(40, s), BOOST_PP_WHILE_40, s BOOST_PP_TUPLE_EAT_3)(p, o, o(40, s))\n# define BOOST_PP_WHILE_40(p, o, s) BOOST_PP_IF(p(41, s), BOOST_PP_WHILE_41, s BOOST_PP_TUPLE_EAT_3)(p, o, o(41, s))\n# define BOOST_PP_WHILE_41(p, o, s) BOOST_PP_IF(p(42, s), BOOST_PP_WHILE_42, s BOOST_PP_TUPLE_EAT_3)(p, o, o(42, s))\n# define BOOST_PP_WHILE_42(p, o, s) BOOST_PP_IF(p(43, s), BOOST_PP_WHILE_43, s BOOST_PP_TUPLE_EAT_3)(p, o, o(43, s))\n# define BOOST_PP_WHILE_43(p, o, s) BOOST_PP_IF(p(44, s), BOOST_PP_WHILE_44, s BOOST_PP_TUPLE_EAT_3)(p, o, o(44, s))\n# define BOOST_PP_WHILE_44(p, o, s) BOOST_PP_IF(p(45, s), BOOST_PP_WHILE_45, s BOOST_PP_TUPLE_EAT_3)(p, o, o(45, s))\n# define BOOST_PP_WHILE_45(p, o, s) BOOST_PP_IF(p(46, s), BOOST_PP_WHILE_46, s BOOST_PP_TUPLE_EAT_3)(p, o, o(46, s))\n# define BOOST_PP_WHILE_46(p, o, s) BOOST_PP_IF(p(47, s), BOOST_PP_WHILE_47, s BOOST_PP_TUPLE_EAT_3)(p, o, o(47, s))\n# define BOOST_PP_WHILE_47(p, o, s) BOOST_PP_IF(p(48, s), BOOST_PP_WHILE_48, s BOOST_PP_TUPLE_EAT_3)(p, o, o(48, s))\n# define BOOST_PP_WHILE_48(p, o, s) BOOST_PP_IF(p(49, s), BOOST_PP_WHILE_49, s BOOST_PP_TUPLE_EAT_3)(p, o, o(49, s))\n# define BOOST_PP_WHILE_49(p, o, s) BOOST_PP_IF(p(50, s), BOOST_PP_WHILE_50, s BOOST_PP_TUPLE_EAT_3)(p, o, o(50, s))\n# define BOOST_PP_WHILE_50(p, o, s) BOOST_PP_IF(p(51, s), BOOST_PP_WHILE_51, s BOOST_PP_TUPLE_EAT_3)(p, o, o(51, s))\n# define BOOST_PP_WHILE_51(p, o, s) BOOST_PP_IF(p(52, s), BOOST_PP_WHILE_52, s BOOST_PP_TUPLE_EAT_3)(p, o, o(52, s))\n# define BOOST_PP_WHILE_52(p, o, s) BOOST_PP_IF(p(53, s), BOOST_PP_WHILE_53, s BOOST_PP_TUPLE_EAT_3)(p, o, o(53, s))\n# define BOOST_PP_WHILE_53(p, o, s) BOOST_PP_IF(p(54, s), BOOST_PP_WHILE_54, s BOOST_PP_TUPLE_EAT_3)(p, o, o(54, s))\n# define BOOST_PP_WHILE_54(p, o, s) BOOST_PP_IF(p(55, s), BOOST_PP_WHILE_55, s BOOST_PP_TUPLE_EAT_3)(p, o, o(55, s))\n# define BOOST_PP_WHILE_55(p, o, s) BOOST_PP_IF(p(56, s), BOOST_PP_WHILE_56, s BOOST_PP_TUPLE_EAT_3)(p, o, o(56, s))\n# define BOOST_PP_WHILE_56(p, o, s) BOOST_PP_IF(p(57, s), BOOST_PP_WHILE_57, s BOOST_PP_TUPLE_EAT_3)(p, o, o(57, s))\n# define BOOST_PP_WHILE_57(p, o, s) BOOST_PP_IF(p(58, s), BOOST_PP_WHILE_58, s BOOST_PP_TUPLE_EAT_3)(p, o, o(58, s))\n# define BOOST_PP_WHILE_58(p, o, s) BOOST_PP_IF(p(59, s), BOOST_PP_WHILE_59, s BOOST_PP_TUPLE_EAT_3)(p, o, o(59, s))\n# define BOOST_PP_WHILE_59(p, o, s) BOOST_PP_IF(p(60, s), BOOST_PP_WHILE_60, s BOOST_PP_TUPLE_EAT_3)(p, o, o(60, s))\n# define BOOST_PP_WHILE_60(p, o, s) BOOST_PP_IF(p(61, s), BOOST_PP_WHILE_61, s BOOST_PP_TUPLE_EAT_3)(p, o, o(61, s))\n# define BOOST_PP_WHILE_61(p, o, s) BOOST_PP_IF(p(62, s), BOOST_PP_WHILE_62, s BOOST_PP_TUPLE_EAT_3)(p, o, o(62, s))\n# define BOOST_PP_WHILE_62(p, o, s) BOOST_PP_IF(p(63, s), BOOST_PP_WHILE_63, s BOOST_PP_TUPLE_EAT_3)(p, o, o(63, s))\n# define BOOST_PP_WHILE_63(p, o, s) BOOST_PP_IF(p(64, s), BOOST_PP_WHILE_64, s BOOST_PP_TUPLE_EAT_3)(p, o, o(64, s))\n# define BOOST_PP_WHILE_64(p, o, s) BOOST_PP_IF(p(65, s), BOOST_PP_WHILE_65, s BOOST_PP_TUPLE_EAT_3)(p, o, o(65, s))\n# define BOOST_PP_WHILE_65(p, o, s) BOOST_PP_IF(p(66, s), BOOST_PP_WHILE_66, s BOOST_PP_TUPLE_EAT_3)(p, o, o(66, s))\n# define BOOST_PP_WHILE_66(p, o, s) BOOST_PP_IF(p(67, s), BOOST_PP_WHILE_67, s BOOST_PP_TUPLE_EAT_3)(p, o, o(67, s))\n# define BOOST_PP_WHILE_67(p, o, s) BOOST_PP_IF(p(68, s), BOOST_PP_WHILE_68, s BOOST_PP_TUPLE_EAT_3)(p, o, o(68, s))\n# define BOOST_PP_WHILE_68(p, o, s) BOOST_PP_IF(p(69, s), BOOST_PP_WHILE_69, s BOOST_PP_TUPLE_EAT_3)(p, o, o(69, s))\n# define BOOST_PP_WHILE_69(p, o, s) BOOST_PP_IF(p(70, s), BOOST_PP_WHILE_70, s BOOST_PP_TUPLE_EAT_3)(p, o, o(70, s))\n# define BOOST_PP_WHILE_70(p, o, s) BOOST_PP_IF(p(71, s), BOOST_PP_WHILE_71, s BOOST_PP_TUPLE_EAT_3)(p, o, o(71, s))\n# define BOOST_PP_WHILE_71(p, o, s) BOOST_PP_IF(p(72, s), BOOST_PP_WHILE_72, s BOOST_PP_TUPLE_EAT_3)(p, o, o(72, s))\n# define BOOST_PP_WHILE_72(p, o, s) BOOST_PP_IF(p(73, s), BOOST_PP_WHILE_73, s BOOST_PP_TUPLE_EAT_3)(p, o, o(73, s))\n# define BOOST_PP_WHILE_73(p, o, s) BOOST_PP_IF(p(74, s), BOOST_PP_WHILE_74, s BOOST_PP_TUPLE_EAT_3)(p, o, o(74, s))\n# define BOOST_PP_WHILE_74(p, o, s) BOOST_PP_IF(p(75, s), BOOST_PP_WHILE_75, s BOOST_PP_TUPLE_EAT_3)(p, o, o(75, s))\n# define BOOST_PP_WHILE_75(p, o, s) BOOST_PP_IF(p(76, s), BOOST_PP_WHILE_76, s BOOST_PP_TUPLE_EAT_3)(p, o, o(76, s))\n# define BOOST_PP_WHILE_76(p, o, s) BOOST_PP_IF(p(77, s), BOOST_PP_WHILE_77, s BOOST_PP_TUPLE_EAT_3)(p, o, o(77, s))\n# define BOOST_PP_WHILE_77(p, o, s) BOOST_PP_IF(p(78, s), BOOST_PP_WHILE_78, s BOOST_PP_TUPLE_EAT_3)(p, o, o(78, s))\n# define BOOST_PP_WHILE_78(p, o, s) BOOST_PP_IF(p(79, s), BOOST_PP_WHILE_79, s BOOST_PP_TUPLE_EAT_3)(p, o, o(79, s))\n# define BOOST_PP_WHILE_79(p, o, s) BOOST_PP_IF(p(80, s), BOOST_PP_WHILE_80, s BOOST_PP_TUPLE_EAT_3)(p, o, o(80, s))\n# define BOOST_PP_WHILE_80(p, o, s) BOOST_PP_IF(p(81, s), BOOST_PP_WHILE_81, s BOOST_PP_TUPLE_EAT_3)(p, o, o(81, s))\n# define BOOST_PP_WHILE_81(p, o, s) BOOST_PP_IF(p(82, s), BOOST_PP_WHILE_82, s BOOST_PP_TUPLE_EAT_3)(p, o, o(82, s))\n# define BOOST_PP_WHILE_82(p, o, s) BOOST_PP_IF(p(83, s), BOOST_PP_WHILE_83, s BOOST_PP_TUPLE_EAT_3)(p, o, o(83, s))\n# define BOOST_PP_WHILE_83(p, o, s) BOOST_PP_IF(p(84, s), BOOST_PP_WHILE_84, s BOOST_PP_TUPLE_EAT_3)(p, o, o(84, s))\n# define BOOST_PP_WHILE_84(p, o, s) BOOST_PP_IF(p(85, s), BOOST_PP_WHILE_85, s BOOST_PP_TUPLE_EAT_3)(p, o, o(85, s))\n# define BOOST_PP_WHILE_85(p, o, s) BOOST_PP_IF(p(86, s), BOOST_PP_WHILE_86, s BOOST_PP_TUPLE_EAT_3)(p, o, o(86, s))\n# define BOOST_PP_WHILE_86(p, o, s) BOOST_PP_IF(p(87, s), BOOST_PP_WHILE_87, s BOOST_PP_TUPLE_EAT_3)(p, o, o(87, s))\n# define BOOST_PP_WHILE_87(p, o, s) BOOST_PP_IF(p(88, s), BOOST_PP_WHILE_88, s BOOST_PP_TUPLE_EAT_3)(p, o, o(88, s))\n# define BOOST_PP_WHILE_88(p, o, s) BOOST_PP_IF(p(89, s), BOOST_PP_WHILE_89, s BOOST_PP_TUPLE_EAT_3)(p, o, o(89, s))\n# define BOOST_PP_WHILE_89(p, o, s) BOOST_PP_IF(p(90, s), BOOST_PP_WHILE_90, s BOOST_PP_TUPLE_EAT_3)(p, o, o(90, s))\n# define BOOST_PP_WHILE_90(p, o, s) BOOST_PP_IF(p(91, s), BOOST_PP_WHILE_91, s BOOST_PP_TUPLE_EAT_3)(p, o, o(91, s))\n# define BOOST_PP_WHILE_91(p, o, s) BOOST_PP_IF(p(92, s), BOOST_PP_WHILE_92, s BOOST_PP_TUPLE_EAT_3)(p, o, o(92, s))\n# define BOOST_PP_WHILE_92(p, o, s) BOOST_PP_IF(p(93, s), BOOST_PP_WHILE_93, s BOOST_PP_TUPLE_EAT_3)(p, o, o(93, s))\n# define BOOST_PP_WHILE_93(p, o, s) BOOST_PP_IF(p(94, s), BOOST_PP_WHILE_94, s BOOST_PP_TUPLE_EAT_3)(p, o, o(94, s))\n# define BOOST_PP_WHILE_94(p, o, s) BOOST_PP_IF(p(95, s), BOOST_PP_WHILE_95, s BOOST_PP_TUPLE_EAT_3)(p, o, o(95, s))\n# define BOOST_PP_WHILE_95(p, o, s) BOOST_PP_IF(p(96, s), BOOST_PP_WHILE_96, s BOOST_PP_TUPLE_EAT_3)(p, o, o(96, s))\n# define BOOST_PP_WHILE_96(p, o, s) BOOST_PP_IF(p(97, s), BOOST_PP_WHILE_97, s BOOST_PP_TUPLE_EAT_3)(p, o, o(97, s))\n# define BOOST_PP_WHILE_97(p, o, s) BOOST_PP_IF(p(98, s), BOOST_PP_WHILE_98, s BOOST_PP_TUPLE_EAT_3)(p, o, o(98, s))\n# define BOOST_PP_WHILE_98(p, o, s) BOOST_PP_IF(p(99, s), BOOST_PP_WHILE_99, s BOOST_PP_TUPLE_EAT_3)(p, o, o(99, s))\n# define BOOST_PP_WHILE_99(p, o, s) BOOST_PP_IF(p(100, s), BOOST_PP_WHILE_100, s BOOST_PP_TUPLE_EAT_3)(p, o, o(100, s))\n# define BOOST_PP_WHILE_100(p, o, s) BOOST_PP_IF(p(101, s), BOOST_PP_WHILE_101, s BOOST_PP_TUPLE_EAT_3)(p, o, o(101, s))\n# define BOOST_PP_WHILE_101(p, o, s) BOOST_PP_IF(p(102, s), BOOST_PP_WHILE_102, s BOOST_PP_TUPLE_EAT_3)(p, o, o(102, s))\n# define BOOST_PP_WHILE_102(p, o, s) BOOST_PP_IF(p(103, s), BOOST_PP_WHILE_103, s BOOST_PP_TUPLE_EAT_3)(p, o, o(103, s))\n# define BOOST_PP_WHILE_103(p, o, s) BOOST_PP_IF(p(104, s), BOOST_PP_WHILE_104, s BOOST_PP_TUPLE_EAT_3)(p, o, o(104, s))\n# define BOOST_PP_WHILE_104(p, o, s) BOOST_PP_IF(p(105, s), BOOST_PP_WHILE_105, s BOOST_PP_TUPLE_EAT_3)(p, o, o(105, s))\n# define BOOST_PP_WHILE_105(p, o, s) BOOST_PP_IF(p(106, s), BOOST_PP_WHILE_106, s BOOST_PP_TUPLE_EAT_3)(p, o, o(106, s))\n# define BOOST_PP_WHILE_106(p, o, s) BOOST_PP_IF(p(107, s), BOOST_PP_WHILE_107, s BOOST_PP_TUPLE_EAT_3)(p, o, o(107, s))\n# define BOOST_PP_WHILE_107(p, o, s) BOOST_PP_IF(p(108, s), BOOST_PP_WHILE_108, s BOOST_PP_TUPLE_EAT_3)(p, o, o(108, s))\n# define BOOST_PP_WHILE_108(p, o, s) BOOST_PP_IF(p(109, s), BOOST_PP_WHILE_109, s BOOST_PP_TUPLE_EAT_3)(p, o, o(109, s))\n# define BOOST_PP_WHILE_109(p, o, s) BOOST_PP_IF(p(110, s), BOOST_PP_WHILE_110, s BOOST_PP_TUPLE_EAT_3)(p, o, o(110, s))\n# define BOOST_PP_WHILE_110(p, o, s) BOOST_PP_IF(p(111, s), BOOST_PP_WHILE_111, s BOOST_PP_TUPLE_EAT_3)(p, o, o(111, s))\n# define BOOST_PP_WHILE_111(p, o, s) BOOST_PP_IF(p(112, s), BOOST_PP_WHILE_112, s BOOST_PP_TUPLE_EAT_3)(p, o, o(112, s))\n# define BOOST_PP_WHILE_112(p, o, s) BOOST_PP_IF(p(113, s), BOOST_PP_WHILE_113, s BOOST_PP_TUPLE_EAT_3)(p, o, o(113, s))\n# define BOOST_PP_WHILE_113(p, o, s) BOOST_PP_IF(p(114, s), BOOST_PP_WHILE_114, s BOOST_PP_TUPLE_EAT_3)(p, o, o(114, s))\n# define BOOST_PP_WHILE_114(p, o, s) BOOST_PP_IF(p(115, s), BOOST_PP_WHILE_115, s BOOST_PP_TUPLE_EAT_3)(p, o, o(115, s))\n# define BOOST_PP_WHILE_115(p, o, s) BOOST_PP_IF(p(116, s), BOOST_PP_WHILE_116, s BOOST_PP_TUPLE_EAT_3)(p, o, o(116, s))\n# define BOOST_PP_WHILE_116(p, o, s) BOOST_PP_IF(p(117, s), BOOST_PP_WHILE_117, s BOOST_PP_TUPLE_EAT_3)(p, o, o(117, s))\n# define BOOST_PP_WHILE_117(p, o, s) BOOST_PP_IF(p(118, s), BOOST_PP_WHILE_118, s BOOST_PP_TUPLE_EAT_3)(p, o, o(118, s))\n# define BOOST_PP_WHILE_118(p, o, s) BOOST_PP_IF(p(119, s), BOOST_PP_WHILE_119, s BOOST_PP_TUPLE_EAT_3)(p, o, o(119, s))\n# define BOOST_PP_WHILE_119(p, o, s) BOOST_PP_IF(p(120, s), BOOST_PP_WHILE_120, s BOOST_PP_TUPLE_EAT_3)(p, o, o(120, s))\n# define BOOST_PP_WHILE_120(p, o, s) BOOST_PP_IF(p(121, s), BOOST_PP_WHILE_121, s BOOST_PP_TUPLE_EAT_3)(p, o, o(121, s))\n# define BOOST_PP_WHILE_121(p, o, s) BOOST_PP_IF(p(122, s), BOOST_PP_WHILE_122, s BOOST_PP_TUPLE_EAT_3)(p, o, o(122, s))\n# define BOOST_PP_WHILE_122(p, o, s) BOOST_PP_IF(p(123, s), BOOST_PP_WHILE_123, s BOOST_PP_TUPLE_EAT_3)(p, o, o(123, s))\n# define BOOST_PP_WHILE_123(p, o, s) BOOST_PP_IF(p(124, s), BOOST_PP_WHILE_124, s BOOST_PP_TUPLE_EAT_3)(p, o, o(124, s))\n# define BOOST_PP_WHILE_124(p, o, s) BOOST_PP_IF(p(125, s), BOOST_PP_WHILE_125, s BOOST_PP_TUPLE_EAT_3)(p, o, o(125, s))\n# define BOOST_PP_WHILE_125(p, o, s) BOOST_PP_IF(p(126, s), BOOST_PP_WHILE_126, s BOOST_PP_TUPLE_EAT_3)(p, o, o(126, s))\n# define BOOST_PP_WHILE_126(p, o, s) BOOST_PP_IF(p(127, s), BOOST_PP_WHILE_127, s BOOST_PP_TUPLE_EAT_3)(p, o, o(127, s))\n# define BOOST_PP_WHILE_127(p, o, s) BOOST_PP_IF(p(128, s), BOOST_PP_WHILE_128, s BOOST_PP_TUPLE_EAT_3)(p, o, o(128, s))\n# define BOOST_PP_WHILE_128(p, o, s) BOOST_PP_IF(p(129, s), BOOST_PP_WHILE_129, s BOOST_PP_TUPLE_EAT_3)(p, o, o(129, s))\n# define BOOST_PP_WHILE_129(p, o, s) BOOST_PP_IF(p(130, s), BOOST_PP_WHILE_130, s BOOST_PP_TUPLE_EAT_3)(p, o, o(130, s))\n# define BOOST_PP_WHILE_130(p, o, s) BOOST_PP_IF(p(131, s), BOOST_PP_WHILE_131, s BOOST_PP_TUPLE_EAT_3)(p, o, o(131, s))\n# define BOOST_PP_WHILE_131(p, o, s) BOOST_PP_IF(p(132, s), BOOST_PP_WHILE_132, s BOOST_PP_TUPLE_EAT_3)(p, o, o(132, s))\n# define BOOST_PP_WHILE_132(p, o, s) BOOST_PP_IF(p(133, s), BOOST_PP_WHILE_133, s BOOST_PP_TUPLE_EAT_3)(p, o, o(133, s))\n# define BOOST_PP_WHILE_133(p, o, s) BOOST_PP_IF(p(134, s), BOOST_PP_WHILE_134, s BOOST_PP_TUPLE_EAT_3)(p, o, o(134, s))\n# define BOOST_PP_WHILE_134(p, o, s) BOOST_PP_IF(p(135, s), BOOST_PP_WHILE_135, s BOOST_PP_TUPLE_EAT_3)(p, o, o(135, s))\n# define BOOST_PP_WHILE_135(p, o, s) BOOST_PP_IF(p(136, s), BOOST_PP_WHILE_136, s BOOST_PP_TUPLE_EAT_3)(p, o, o(136, s))\n# define BOOST_PP_WHILE_136(p, o, s) BOOST_PP_IF(p(137, s), BOOST_PP_WHILE_137, s BOOST_PP_TUPLE_EAT_3)(p, o, o(137, s))\n# define BOOST_PP_WHILE_137(p, o, s) BOOST_PP_IF(p(138, s), BOOST_PP_WHILE_138, s BOOST_PP_TUPLE_EAT_3)(p, o, o(138, s))\n# define BOOST_PP_WHILE_138(p, o, s) BOOST_PP_IF(p(139, s), BOOST_PP_WHILE_139, s BOOST_PP_TUPLE_EAT_3)(p, o, o(139, s))\n# define BOOST_PP_WHILE_139(p, o, s) BOOST_PP_IF(p(140, s), BOOST_PP_WHILE_140, s BOOST_PP_TUPLE_EAT_3)(p, o, o(140, s))\n# define BOOST_PP_WHILE_140(p, o, s) BOOST_PP_IF(p(141, s), BOOST_PP_WHILE_141, s BOOST_PP_TUPLE_EAT_3)(p, o, o(141, s))\n# define BOOST_PP_WHILE_141(p, o, s) BOOST_PP_IF(p(142, s), BOOST_PP_WHILE_142, s BOOST_PP_TUPLE_EAT_3)(p, o, o(142, s))\n# define BOOST_PP_WHILE_142(p, o, s) BOOST_PP_IF(p(143, s), BOOST_PP_WHILE_143, s BOOST_PP_TUPLE_EAT_3)(p, o, o(143, s))\n# define BOOST_PP_WHILE_143(p, o, s) BOOST_PP_IF(p(144, s), BOOST_PP_WHILE_144, s BOOST_PP_TUPLE_EAT_3)(p, o, o(144, s))\n# define BOOST_PP_WHILE_144(p, o, s) BOOST_PP_IF(p(145, s), BOOST_PP_WHILE_145, s BOOST_PP_TUPLE_EAT_3)(p, o, o(145, s))\n# define BOOST_PP_WHILE_145(p, o, s) BOOST_PP_IF(p(146, s), BOOST_PP_WHILE_146, s BOOST_PP_TUPLE_EAT_3)(p, o, o(146, s))\n# define BOOST_PP_WHILE_146(p, o, s) BOOST_PP_IF(p(147, s), BOOST_PP_WHILE_147, s BOOST_PP_TUPLE_EAT_3)(p, o, o(147, s))\n# define BOOST_PP_WHILE_147(p, o, s) BOOST_PP_IF(p(148, s), BOOST_PP_WHILE_148, s BOOST_PP_TUPLE_EAT_3)(p, o, o(148, s))\n# define BOOST_PP_WHILE_148(p, o, s) BOOST_PP_IF(p(149, s), BOOST_PP_WHILE_149, s BOOST_PP_TUPLE_EAT_3)(p, o, o(149, s))\n# define BOOST_PP_WHILE_149(p, o, s) BOOST_PP_IF(p(150, s), BOOST_PP_WHILE_150, s BOOST_PP_TUPLE_EAT_3)(p, o, o(150, s))\n# define BOOST_PP_WHILE_150(p, o, s) BOOST_PP_IF(p(151, s), BOOST_PP_WHILE_151, s BOOST_PP_TUPLE_EAT_3)(p, o, o(151, s))\n# define BOOST_PP_WHILE_151(p, o, s) BOOST_PP_IF(p(152, s), BOOST_PP_WHILE_152, s BOOST_PP_TUPLE_EAT_3)(p, o, o(152, s))\n# define BOOST_PP_WHILE_152(p, o, s) BOOST_PP_IF(p(153, s), BOOST_PP_WHILE_153, s BOOST_PP_TUPLE_EAT_3)(p, o, o(153, s))\n# define BOOST_PP_WHILE_153(p, o, s) BOOST_PP_IF(p(154, s), BOOST_PP_WHILE_154, s BOOST_PP_TUPLE_EAT_3)(p, o, o(154, s))\n# define BOOST_PP_WHILE_154(p, o, s) BOOST_PP_IF(p(155, s), BOOST_PP_WHILE_155, s BOOST_PP_TUPLE_EAT_3)(p, o, o(155, s))\n# define BOOST_PP_WHILE_155(p, o, s) BOOST_PP_IF(p(156, s), BOOST_PP_WHILE_156, s BOOST_PP_TUPLE_EAT_3)(p, o, o(156, s))\n# define BOOST_PP_WHILE_156(p, o, s) BOOST_PP_IF(p(157, s), BOOST_PP_WHILE_157, s BOOST_PP_TUPLE_EAT_3)(p, o, o(157, s))\n# define BOOST_PP_WHILE_157(p, o, s) BOOST_PP_IF(p(158, s), BOOST_PP_WHILE_158, s BOOST_PP_TUPLE_EAT_3)(p, o, o(158, s))\n# define BOOST_PP_WHILE_158(p, o, s) BOOST_PP_IF(p(159, s), BOOST_PP_WHILE_159, s BOOST_PP_TUPLE_EAT_3)(p, o, o(159, s))\n# define BOOST_PP_WHILE_159(p, o, s) BOOST_PP_IF(p(160, s), BOOST_PP_WHILE_160, s BOOST_PP_TUPLE_EAT_3)(p, o, o(160, s))\n# define BOOST_PP_WHILE_160(p, o, s) BOOST_PP_IF(p(161, s), BOOST_PP_WHILE_161, s BOOST_PP_TUPLE_EAT_3)(p, o, o(161, s))\n# define BOOST_PP_WHILE_161(p, o, s) BOOST_PP_IF(p(162, s), BOOST_PP_WHILE_162, s BOOST_PP_TUPLE_EAT_3)(p, o, o(162, s))\n# define BOOST_PP_WHILE_162(p, o, s) BOOST_PP_IF(p(163, s), BOOST_PP_WHILE_163, s BOOST_PP_TUPLE_EAT_3)(p, o, o(163, s))\n# define BOOST_PP_WHILE_163(p, o, s) BOOST_PP_IF(p(164, s), BOOST_PP_WHILE_164, s BOOST_PP_TUPLE_EAT_3)(p, o, o(164, s))\n# define BOOST_PP_WHILE_164(p, o, s) BOOST_PP_IF(p(165, s), BOOST_PP_WHILE_165, s BOOST_PP_TUPLE_EAT_3)(p, o, o(165, s))\n# define BOOST_PP_WHILE_165(p, o, s) BOOST_PP_IF(p(166, s), BOOST_PP_WHILE_166, s BOOST_PP_TUPLE_EAT_3)(p, o, o(166, s))\n# define BOOST_PP_WHILE_166(p, o, s) BOOST_PP_IF(p(167, s), BOOST_PP_WHILE_167, s BOOST_PP_TUPLE_EAT_3)(p, o, o(167, s))\n# define BOOST_PP_WHILE_167(p, o, s) BOOST_PP_IF(p(168, s), BOOST_PP_WHILE_168, s BOOST_PP_TUPLE_EAT_3)(p, o, o(168, s))\n# define BOOST_PP_WHILE_168(p, o, s) BOOST_PP_IF(p(169, s), BOOST_PP_WHILE_169, s BOOST_PP_TUPLE_EAT_3)(p, o, o(169, s))\n# define BOOST_PP_WHILE_169(p, o, s) BOOST_PP_IF(p(170, s), BOOST_PP_WHILE_170, s BOOST_PP_TUPLE_EAT_3)(p, o, o(170, s))\n# define BOOST_PP_WHILE_170(p, o, s) BOOST_PP_IF(p(171, s), BOOST_PP_WHILE_171, s BOOST_PP_TUPLE_EAT_3)(p, o, o(171, s))\n# define BOOST_PP_WHILE_171(p, o, s) BOOST_PP_IF(p(172, s), BOOST_PP_WHILE_172, s BOOST_PP_TUPLE_EAT_3)(p, o, o(172, s))\n# define BOOST_PP_WHILE_172(p, o, s) BOOST_PP_IF(p(173, s), BOOST_PP_WHILE_173, s BOOST_PP_TUPLE_EAT_3)(p, o, o(173, s))\n# define BOOST_PP_WHILE_173(p, o, s) BOOST_PP_IF(p(174, s), BOOST_PP_WHILE_174, s BOOST_PP_TUPLE_EAT_3)(p, o, o(174, s))\n# define BOOST_PP_WHILE_174(p, o, s) BOOST_PP_IF(p(175, s), BOOST_PP_WHILE_175, s BOOST_PP_TUPLE_EAT_3)(p, o, o(175, s))\n# define BOOST_PP_WHILE_175(p, o, s) BOOST_PP_IF(p(176, s), BOOST_PP_WHILE_176, s BOOST_PP_TUPLE_EAT_3)(p, o, o(176, s))\n# define BOOST_PP_WHILE_176(p, o, s) BOOST_PP_IF(p(177, s), BOOST_PP_WHILE_177, s BOOST_PP_TUPLE_EAT_3)(p, o, o(177, s))\n# define BOOST_PP_WHILE_177(p, o, s) BOOST_PP_IF(p(178, s), BOOST_PP_WHILE_178, s BOOST_PP_TUPLE_EAT_3)(p, o, o(178, s))\n# define BOOST_PP_WHILE_178(p, o, s) BOOST_PP_IF(p(179, s), BOOST_PP_WHILE_179, s BOOST_PP_TUPLE_EAT_3)(p, o, o(179, s))\n# define BOOST_PP_WHILE_179(p, o, s) BOOST_PP_IF(p(180, s), BOOST_PP_WHILE_180, s BOOST_PP_TUPLE_EAT_3)(p, o, o(180, s))\n# define BOOST_PP_WHILE_180(p, o, s) BOOST_PP_IF(p(181, s), BOOST_PP_WHILE_181, s BOOST_PP_TUPLE_EAT_3)(p, o, o(181, s))\n# define BOOST_PP_WHILE_181(p, o, s) BOOST_PP_IF(p(182, s), BOOST_PP_WHILE_182, s BOOST_PP_TUPLE_EAT_3)(p, o, o(182, s))\n# define BOOST_PP_WHILE_182(p, o, s) BOOST_PP_IF(p(183, s), BOOST_PP_WHILE_183, s BOOST_PP_TUPLE_EAT_3)(p, o, o(183, s))\n# define BOOST_PP_WHILE_183(p, o, s) BOOST_PP_IF(p(184, s), BOOST_PP_WHILE_184, s BOOST_PP_TUPLE_EAT_3)(p, o, o(184, s))\n# define BOOST_PP_WHILE_184(p, o, s) BOOST_PP_IF(p(185, s), BOOST_PP_WHILE_185, s BOOST_PP_TUPLE_EAT_3)(p, o, o(185, s))\n# define BOOST_PP_WHILE_185(p, o, s) BOOST_PP_IF(p(186, s), BOOST_PP_WHILE_186, s BOOST_PP_TUPLE_EAT_3)(p, o, o(186, s))\n# define BOOST_PP_WHILE_186(p, o, s) BOOST_PP_IF(p(187, s), BOOST_PP_WHILE_187, s BOOST_PP_TUPLE_EAT_3)(p, o, o(187, s))\n# define BOOST_PP_WHILE_187(p, o, s) BOOST_PP_IF(p(188, s), BOOST_PP_WHILE_188, s BOOST_PP_TUPLE_EAT_3)(p, o, o(188, s))\n# define BOOST_PP_WHILE_188(p, o, s) BOOST_PP_IF(p(189, s), BOOST_PP_WHILE_189, s BOOST_PP_TUPLE_EAT_3)(p, o, o(189, s))\n# define BOOST_PP_WHILE_189(p, o, s) BOOST_PP_IF(p(190, s), BOOST_PP_WHILE_190, s BOOST_PP_TUPLE_EAT_3)(p, o, o(190, s))\n# define BOOST_PP_WHILE_190(p, o, s) BOOST_PP_IF(p(191, s), BOOST_PP_WHILE_191, s BOOST_PP_TUPLE_EAT_3)(p, o, o(191, s))\n# define BOOST_PP_WHILE_191(p, o, s) BOOST_PP_IF(p(192, s), BOOST_PP_WHILE_192, s BOOST_PP_TUPLE_EAT_3)(p, o, o(192, s))\n# define BOOST_PP_WHILE_192(p, o, s) BOOST_PP_IF(p(193, s), BOOST_PP_WHILE_193, s BOOST_PP_TUPLE_EAT_3)(p, o, o(193, s))\n# define BOOST_PP_WHILE_193(p, o, s) BOOST_PP_IF(p(194, s), BOOST_PP_WHILE_194, s BOOST_PP_TUPLE_EAT_3)(p, o, o(194, s))\n# define BOOST_PP_WHILE_194(p, o, s) BOOST_PP_IF(p(195, s), BOOST_PP_WHILE_195, s BOOST_PP_TUPLE_EAT_3)(p, o, o(195, s))\n# define BOOST_PP_WHILE_195(p, o, s) BOOST_PP_IF(p(196, s), BOOST_PP_WHILE_196, s BOOST_PP_TUPLE_EAT_3)(p, o, o(196, s))\n# define BOOST_PP_WHILE_196(p, o, s) BOOST_PP_IF(p(197, s), BOOST_PP_WHILE_197, s BOOST_PP_TUPLE_EAT_3)(p, o, o(197, s))\n# define BOOST_PP_WHILE_197(p, o, s) BOOST_PP_IF(p(198, s), BOOST_PP_WHILE_198, s BOOST_PP_TUPLE_EAT_3)(p, o, o(198, s))\n# define BOOST_PP_WHILE_198(p, o, s) BOOST_PP_IF(p(199, s), BOOST_PP_WHILE_199, s BOOST_PP_TUPLE_EAT_3)(p, o, o(199, s))\n# define BOOST_PP_WHILE_199(p, o, s) BOOST_PP_IF(p(200, s), BOOST_PP_WHILE_200, s BOOST_PP_TUPLE_EAT_3)(p, o, o(200, s))\n# define BOOST_PP_WHILE_200(p, o, s) BOOST_PP_IF(p(201, s), BOOST_PP_WHILE_201, s BOOST_PP_TUPLE_EAT_3)(p, o, o(201, s))\n# define BOOST_PP_WHILE_201(p, o, s) BOOST_PP_IF(p(202, s), BOOST_PP_WHILE_202, s BOOST_PP_TUPLE_EAT_3)(p, o, o(202, s))\n# define BOOST_PP_WHILE_202(p, o, s) BOOST_PP_IF(p(203, s), BOOST_PP_WHILE_203, s BOOST_PP_TUPLE_EAT_3)(p, o, o(203, s))\n# define BOOST_PP_WHILE_203(p, o, s) BOOST_PP_IF(p(204, s), BOOST_PP_WHILE_204, s BOOST_PP_TUPLE_EAT_3)(p, o, o(204, s))\n# define BOOST_PP_WHILE_204(p, o, s) BOOST_PP_IF(p(205, s), BOOST_PP_WHILE_205, s BOOST_PP_TUPLE_EAT_3)(p, o, o(205, s))\n# define BOOST_PP_WHILE_205(p, o, s) BOOST_PP_IF(p(206, s), BOOST_PP_WHILE_206, s BOOST_PP_TUPLE_EAT_3)(p, o, o(206, s))\n# define BOOST_PP_WHILE_206(p, o, s) BOOST_PP_IF(p(207, s), BOOST_PP_WHILE_207, s BOOST_PP_TUPLE_EAT_3)(p, o, o(207, s))\n# define BOOST_PP_WHILE_207(p, o, s) BOOST_PP_IF(p(208, s), BOOST_PP_WHILE_208, s BOOST_PP_TUPLE_EAT_3)(p, o, o(208, s))\n# define BOOST_PP_WHILE_208(p, o, s) BOOST_PP_IF(p(209, s), BOOST_PP_WHILE_209, s BOOST_PP_TUPLE_EAT_3)(p, o, o(209, s))\n# define BOOST_PP_WHILE_209(p, o, s) BOOST_PP_IF(p(210, s), BOOST_PP_WHILE_210, s BOOST_PP_TUPLE_EAT_3)(p, o, o(210, s))\n# define BOOST_PP_WHILE_210(p, o, s) BOOST_PP_IF(p(211, s), BOOST_PP_WHILE_211, s BOOST_PP_TUPLE_EAT_3)(p, o, o(211, s))\n# define BOOST_PP_WHILE_211(p, o, s) BOOST_PP_IF(p(212, s), BOOST_PP_WHILE_212, s BOOST_PP_TUPLE_EAT_3)(p, o, o(212, s))\n# define BOOST_PP_WHILE_212(p, o, s) BOOST_PP_IF(p(213, s), BOOST_PP_WHILE_213, s BOOST_PP_TUPLE_EAT_3)(p, o, o(213, s))\n# define BOOST_PP_WHILE_213(p, o, s) BOOST_PP_IF(p(214, s), BOOST_PP_WHILE_214, s BOOST_PP_TUPLE_EAT_3)(p, o, o(214, s))\n# define BOOST_PP_WHILE_214(p, o, s) BOOST_PP_IF(p(215, s), BOOST_PP_WHILE_215, s BOOST_PP_TUPLE_EAT_3)(p, o, o(215, s))\n# define BOOST_PP_WHILE_215(p, o, s) BOOST_PP_IF(p(216, s), BOOST_PP_WHILE_216, s BOOST_PP_TUPLE_EAT_3)(p, o, o(216, s))\n# define BOOST_PP_WHILE_216(p, o, s) BOOST_PP_IF(p(217, s), BOOST_PP_WHILE_217, s BOOST_PP_TUPLE_EAT_3)(p, o, o(217, s))\n# define BOOST_PP_WHILE_217(p, o, s) BOOST_PP_IF(p(218, s), BOOST_PP_WHILE_218, s BOOST_PP_TUPLE_EAT_3)(p, o, o(218, s))\n# define BOOST_PP_WHILE_218(p, o, s) BOOST_PP_IF(p(219, s), BOOST_PP_WHILE_219, s BOOST_PP_TUPLE_EAT_3)(p, o, o(219, s))\n# define BOOST_PP_WHILE_219(p, o, s) BOOST_PP_IF(p(220, s), BOOST_PP_WHILE_220, s BOOST_PP_TUPLE_EAT_3)(p, o, o(220, s))\n# define BOOST_PP_WHILE_220(p, o, s) BOOST_PP_IF(p(221, s), BOOST_PP_WHILE_221, s BOOST_PP_TUPLE_EAT_3)(p, o, o(221, s))\n# define BOOST_PP_WHILE_221(p, o, s) BOOST_PP_IF(p(222, s), BOOST_PP_WHILE_222, s BOOST_PP_TUPLE_EAT_3)(p, o, o(222, s))\n# define BOOST_PP_WHILE_222(p, o, s) BOOST_PP_IF(p(223, s), BOOST_PP_WHILE_223, s BOOST_PP_TUPLE_EAT_3)(p, o, o(223, s))\n# define BOOST_PP_WHILE_223(p, o, s) BOOST_PP_IF(p(224, s), BOOST_PP_WHILE_224, s BOOST_PP_TUPLE_EAT_3)(p, o, o(224, s))\n# define BOOST_PP_WHILE_224(p, o, s) BOOST_PP_IF(p(225, s), BOOST_PP_WHILE_225, s BOOST_PP_TUPLE_EAT_3)(p, o, o(225, s))\n# define BOOST_PP_WHILE_225(p, o, s) BOOST_PP_IF(p(226, s), BOOST_PP_WHILE_226, s BOOST_PP_TUPLE_EAT_3)(p, o, o(226, s))\n# define BOOST_PP_WHILE_226(p, o, s) BOOST_PP_IF(p(227, s), BOOST_PP_WHILE_227, s BOOST_PP_TUPLE_EAT_3)(p, o, o(227, s))\n# define BOOST_PP_WHILE_227(p, o, s) BOOST_PP_IF(p(228, s), BOOST_PP_WHILE_228, s BOOST_PP_TUPLE_EAT_3)(p, o, o(228, s))\n# define BOOST_PP_WHILE_228(p, o, s) BOOST_PP_IF(p(229, s), BOOST_PP_WHILE_229, s BOOST_PP_TUPLE_EAT_3)(p, o, o(229, s))\n# define BOOST_PP_WHILE_229(p, o, s) BOOST_PP_IF(p(230, s), BOOST_PP_WHILE_230, s BOOST_PP_TUPLE_EAT_3)(p, o, o(230, s))\n# define BOOST_PP_WHILE_230(p, o, s) BOOST_PP_IF(p(231, s), BOOST_PP_WHILE_231, s BOOST_PP_TUPLE_EAT_3)(p, o, o(231, s))\n# define BOOST_PP_WHILE_231(p, o, s) BOOST_PP_IF(p(232, s), BOOST_PP_WHILE_232, s BOOST_PP_TUPLE_EAT_3)(p, o, o(232, s))\n# define BOOST_PP_WHILE_232(p, o, s) BOOST_PP_IF(p(233, s), BOOST_PP_WHILE_233, s BOOST_PP_TUPLE_EAT_3)(p, o, o(233, s))\n# define BOOST_PP_WHILE_233(p, o, s) BOOST_PP_IF(p(234, s), BOOST_PP_WHILE_234, s BOOST_PP_TUPLE_EAT_3)(p, o, o(234, s))\n# define BOOST_PP_WHILE_234(p, o, s) BOOST_PP_IF(p(235, s), BOOST_PP_WHILE_235, s BOOST_PP_TUPLE_EAT_3)(p, o, o(235, s))\n# define BOOST_PP_WHILE_235(p, o, s) BOOST_PP_IF(p(236, s), BOOST_PP_WHILE_236, s BOOST_PP_TUPLE_EAT_3)(p, o, o(236, s))\n# define BOOST_PP_WHILE_236(p, o, s) BOOST_PP_IF(p(237, s), BOOST_PP_WHILE_237, s BOOST_PP_TUPLE_EAT_3)(p, o, o(237, s))\n# define BOOST_PP_WHILE_237(p, o, s) BOOST_PP_IF(p(238, s), BOOST_PP_WHILE_238, s BOOST_PP_TUPLE_EAT_3)(p, o, o(238, s))\n# define BOOST_PP_WHILE_238(p, o, s) BOOST_PP_IF(p(239, s), BOOST_PP_WHILE_239, s BOOST_PP_TUPLE_EAT_3)(p, o, o(239, s))\n# define BOOST_PP_WHILE_239(p, o, s) BOOST_PP_IF(p(240, s), BOOST_PP_WHILE_240, s BOOST_PP_TUPLE_EAT_3)(p, o, o(240, s))\n# define BOOST_PP_WHILE_240(p, o, s) BOOST_PP_IF(p(241, s), BOOST_PP_WHILE_241, s BOOST_PP_TUPLE_EAT_3)(p, o, o(241, s))\n# define BOOST_PP_WHILE_241(p, o, s) BOOST_PP_IF(p(242, s), BOOST_PP_WHILE_242, s BOOST_PP_TUPLE_EAT_3)(p, o, o(242, s))\n# define BOOST_PP_WHILE_242(p, o, s) BOOST_PP_IF(p(243, s), BOOST_PP_WHILE_243, s BOOST_PP_TUPLE_EAT_3)(p, o, o(243, s))\n# define BOOST_PP_WHILE_243(p, o, s) BOOST_PP_IF(p(244, s), BOOST_PP_WHILE_244, s BOOST_PP_TUPLE_EAT_3)(p, o, o(244, s))\n# define BOOST_PP_WHILE_244(p, o, s) BOOST_PP_IF(p(245, s), BOOST_PP_WHILE_245, s BOOST_PP_TUPLE_EAT_3)(p, o, o(245, s))\n# define BOOST_PP_WHILE_245(p, o, s) BOOST_PP_IF(p(246, s), BOOST_PP_WHILE_246, s BOOST_PP_TUPLE_EAT_3)(p, o, o(246, s))\n# define BOOST_PP_WHILE_246(p, o, s) BOOST_PP_IF(p(247, s), BOOST_PP_WHILE_247, s BOOST_PP_TUPLE_EAT_3)(p, o, o(247, s))\n# define BOOST_PP_WHILE_247(p, o, s) BOOST_PP_IF(p(248, s), BOOST_PP_WHILE_248, s BOOST_PP_TUPLE_EAT_3)(p, o, o(248, s))\n# define BOOST_PP_WHILE_248(p, o, s) BOOST_PP_IF(p(249, s), BOOST_PP_WHILE_249, s BOOST_PP_TUPLE_EAT_3)(p, o, o(249, s))\n# define BOOST_PP_WHILE_249(p, o, s) BOOST_PP_IF(p(250, s), BOOST_PP_WHILE_250, s BOOST_PP_TUPLE_EAT_3)(p, o, o(250, s))\n# define BOOST_PP_WHILE_250(p, o, s) BOOST_PP_IF(p(251, s), BOOST_PP_WHILE_251, s BOOST_PP_TUPLE_EAT_3)(p, o, o(251, s))\n# define BOOST_PP_WHILE_251(p, o, s) BOOST_PP_IF(p(252, s), BOOST_PP_WHILE_252, s BOOST_PP_TUPLE_EAT_3)(p, o, o(252, s))\n# define BOOST_PP_WHILE_252(p, o, s) BOOST_PP_IF(p(253, s), BOOST_PP_WHILE_253, s BOOST_PP_TUPLE_EAT_3)(p, o, o(253, s))\n# define BOOST_PP_WHILE_253(p, o, s) BOOST_PP_IF(p(254, s), BOOST_PP_WHILE_254, s BOOST_PP_TUPLE_EAT_3)(p, o, o(254, s))\n# define BOOST_PP_WHILE_254(p, o, s) BOOST_PP_IF(p(255, s), BOOST_PP_WHILE_255, s BOOST_PP_TUPLE_EAT_3)(p, o, o(255, s))\n# define BOOST_PP_WHILE_255(p, o, s) BOOST_PP_IF(p(256, s), BOOST_PP_WHILE_256, s BOOST_PP_TUPLE_EAT_3)(p, o, o(256, s))\n# define BOOST_PP_WHILE_256(p, o, s) BOOST_PP_IF(p(257, s), BOOST_PP_WHILE_257, s BOOST_PP_TUPLE_EAT_3)(p, o, o(257, s))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/detail/while.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_DETAIL_WHILE_HPP\n# define BOOST_PREPROCESSOR_CONTROL_DETAIL_WHILE_HPP\n#\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_WHILE_1(p, o, s) BOOST_PP_WHILE_1_C(BOOST_PP_BOOL(p(2, s)), p, o, s)\n# define BOOST_PP_WHILE_2(p, o, s) BOOST_PP_WHILE_2_C(BOOST_PP_BOOL(p(3, s)), p, o, s)\n# define BOOST_PP_WHILE_3(p, o, s) BOOST_PP_WHILE_3_C(BOOST_PP_BOOL(p(4, s)), p, o, s)\n# define BOOST_PP_WHILE_4(p, o, s) BOOST_PP_WHILE_4_C(BOOST_PP_BOOL(p(5, s)), p, o, s)\n# define BOOST_PP_WHILE_5(p, o, s) BOOST_PP_WHILE_5_C(BOOST_PP_BOOL(p(6, s)), p, o, s)\n# define BOOST_PP_WHILE_6(p, o, s) BOOST_PP_WHILE_6_C(BOOST_PP_BOOL(p(7, s)), p, o, s)\n# define BOOST_PP_WHILE_7(p, o, s) BOOST_PP_WHILE_7_C(BOOST_PP_BOOL(p(8, s)), p, o, s)\n# define BOOST_PP_WHILE_8(p, o, s) BOOST_PP_WHILE_8_C(BOOST_PP_BOOL(p(9, s)), p, o, s)\n# define BOOST_PP_WHILE_9(p, o, s) BOOST_PP_WHILE_9_C(BOOST_PP_BOOL(p(10, s)), p, o, s)\n# define BOOST_PP_WHILE_10(p, o, s) BOOST_PP_WHILE_10_C(BOOST_PP_BOOL(p(11, s)), p, o, s)\n# define BOOST_PP_WHILE_11(p, o, s) BOOST_PP_WHILE_11_C(BOOST_PP_BOOL(p(12, s)), p, o, s)\n# define BOOST_PP_WHILE_12(p, o, s) BOOST_PP_WHILE_12_C(BOOST_PP_BOOL(p(13, s)), p, o, s)\n# define BOOST_PP_WHILE_13(p, o, s) BOOST_PP_WHILE_13_C(BOOST_PP_BOOL(p(14, s)), p, o, s)\n# define BOOST_PP_WHILE_14(p, o, s) BOOST_PP_WHILE_14_C(BOOST_PP_BOOL(p(15, s)), p, o, s)\n# define BOOST_PP_WHILE_15(p, o, s) BOOST_PP_WHILE_15_C(BOOST_PP_BOOL(p(16, s)), p, o, s)\n# define BOOST_PP_WHILE_16(p, o, s) BOOST_PP_WHILE_16_C(BOOST_PP_BOOL(p(17, s)), p, o, s)\n# define BOOST_PP_WHILE_17(p, o, s) BOOST_PP_WHILE_17_C(BOOST_PP_BOOL(p(18, s)), p, o, s)\n# define BOOST_PP_WHILE_18(p, o, s) BOOST_PP_WHILE_18_C(BOOST_PP_BOOL(p(19, s)), p, o, s)\n# define BOOST_PP_WHILE_19(p, o, s) BOOST_PP_WHILE_19_C(BOOST_PP_BOOL(p(20, s)), p, o, s)\n# define BOOST_PP_WHILE_20(p, o, s) BOOST_PP_WHILE_20_C(BOOST_PP_BOOL(p(21, s)), p, o, s)\n# define BOOST_PP_WHILE_21(p, o, s) BOOST_PP_WHILE_21_C(BOOST_PP_BOOL(p(22, s)), p, o, s)\n# define BOOST_PP_WHILE_22(p, o, s) BOOST_PP_WHILE_22_C(BOOST_PP_BOOL(p(23, s)), p, o, s)\n# define BOOST_PP_WHILE_23(p, o, s) BOOST_PP_WHILE_23_C(BOOST_PP_BOOL(p(24, s)), p, o, s)\n# define BOOST_PP_WHILE_24(p, o, s) BOOST_PP_WHILE_24_C(BOOST_PP_BOOL(p(25, s)), p, o, s)\n# define BOOST_PP_WHILE_25(p, o, s) BOOST_PP_WHILE_25_C(BOOST_PP_BOOL(p(26, s)), p, o, s)\n# define BOOST_PP_WHILE_26(p, o, s) BOOST_PP_WHILE_26_C(BOOST_PP_BOOL(p(27, s)), p, o, s)\n# define BOOST_PP_WHILE_27(p, o, s) BOOST_PP_WHILE_27_C(BOOST_PP_BOOL(p(28, s)), p, o, s)\n# define BOOST_PP_WHILE_28(p, o, s) BOOST_PP_WHILE_28_C(BOOST_PP_BOOL(p(29, s)), p, o, s)\n# define BOOST_PP_WHILE_29(p, o, s) BOOST_PP_WHILE_29_C(BOOST_PP_BOOL(p(30, s)), p, o, s)\n# define BOOST_PP_WHILE_30(p, o, s) BOOST_PP_WHILE_30_C(BOOST_PP_BOOL(p(31, s)), p, o, s)\n# define BOOST_PP_WHILE_31(p, o, s) BOOST_PP_WHILE_31_C(BOOST_PP_BOOL(p(32, s)), p, o, s)\n# define BOOST_PP_WHILE_32(p, o, s) BOOST_PP_WHILE_32_C(BOOST_PP_BOOL(p(33, s)), p, o, s)\n# define BOOST_PP_WHILE_33(p, o, s) BOOST_PP_WHILE_33_C(BOOST_PP_BOOL(p(34, s)), p, o, s)\n# define BOOST_PP_WHILE_34(p, o, s) BOOST_PP_WHILE_34_C(BOOST_PP_BOOL(p(35, s)), p, o, s)\n# define BOOST_PP_WHILE_35(p, o, s) BOOST_PP_WHILE_35_C(BOOST_PP_BOOL(p(36, s)), p, o, s)\n# define BOOST_PP_WHILE_36(p, o, s) BOOST_PP_WHILE_36_C(BOOST_PP_BOOL(p(37, s)), p, o, s)\n# define BOOST_PP_WHILE_37(p, o, s) BOOST_PP_WHILE_37_C(BOOST_PP_BOOL(p(38, s)), p, o, s)\n# define BOOST_PP_WHILE_38(p, o, s) BOOST_PP_WHILE_38_C(BOOST_PP_BOOL(p(39, s)), p, o, s)\n# define BOOST_PP_WHILE_39(p, o, s) BOOST_PP_WHILE_39_C(BOOST_PP_BOOL(p(40, s)), p, o, s)\n# define BOOST_PP_WHILE_40(p, o, s) BOOST_PP_WHILE_40_C(BOOST_PP_BOOL(p(41, s)), p, o, s)\n# define BOOST_PP_WHILE_41(p, o, s) BOOST_PP_WHILE_41_C(BOOST_PP_BOOL(p(42, s)), p, o, s)\n# define BOOST_PP_WHILE_42(p, o, s) BOOST_PP_WHILE_42_C(BOOST_PP_BOOL(p(43, s)), p, o, s)\n# define BOOST_PP_WHILE_43(p, o, s) BOOST_PP_WHILE_43_C(BOOST_PP_BOOL(p(44, s)), p, o, s)\n# define BOOST_PP_WHILE_44(p, o, s) BOOST_PP_WHILE_44_C(BOOST_PP_BOOL(p(45, s)), p, o, s)\n# define BOOST_PP_WHILE_45(p, o, s) BOOST_PP_WHILE_45_C(BOOST_PP_BOOL(p(46, s)), p, o, s)\n# define BOOST_PP_WHILE_46(p, o, s) BOOST_PP_WHILE_46_C(BOOST_PP_BOOL(p(47, s)), p, o, s)\n# define BOOST_PP_WHILE_47(p, o, s) BOOST_PP_WHILE_47_C(BOOST_PP_BOOL(p(48, s)), p, o, s)\n# define BOOST_PP_WHILE_48(p, o, s) BOOST_PP_WHILE_48_C(BOOST_PP_BOOL(p(49, s)), p, o, s)\n# define BOOST_PP_WHILE_49(p, o, s) BOOST_PP_WHILE_49_C(BOOST_PP_BOOL(p(50, s)), p, o, s)\n# define BOOST_PP_WHILE_50(p, o, s) BOOST_PP_WHILE_50_C(BOOST_PP_BOOL(p(51, s)), p, o, s)\n# define BOOST_PP_WHILE_51(p, o, s) BOOST_PP_WHILE_51_C(BOOST_PP_BOOL(p(52, s)), p, o, s)\n# define BOOST_PP_WHILE_52(p, o, s) BOOST_PP_WHILE_52_C(BOOST_PP_BOOL(p(53, s)), p, o, s)\n# define BOOST_PP_WHILE_53(p, o, s) BOOST_PP_WHILE_53_C(BOOST_PP_BOOL(p(54, s)), p, o, s)\n# define BOOST_PP_WHILE_54(p, o, s) BOOST_PP_WHILE_54_C(BOOST_PP_BOOL(p(55, s)), p, o, s)\n# define BOOST_PP_WHILE_55(p, o, s) BOOST_PP_WHILE_55_C(BOOST_PP_BOOL(p(56, s)), p, o, s)\n# define BOOST_PP_WHILE_56(p, o, s) BOOST_PP_WHILE_56_C(BOOST_PP_BOOL(p(57, s)), p, o, s)\n# define BOOST_PP_WHILE_57(p, o, s) BOOST_PP_WHILE_57_C(BOOST_PP_BOOL(p(58, s)), p, o, s)\n# define BOOST_PP_WHILE_58(p, o, s) BOOST_PP_WHILE_58_C(BOOST_PP_BOOL(p(59, s)), p, o, s)\n# define BOOST_PP_WHILE_59(p, o, s) BOOST_PP_WHILE_59_C(BOOST_PP_BOOL(p(60, s)), p, o, s)\n# define BOOST_PP_WHILE_60(p, o, s) BOOST_PP_WHILE_60_C(BOOST_PP_BOOL(p(61, s)), p, o, s)\n# define BOOST_PP_WHILE_61(p, o, s) BOOST_PP_WHILE_61_C(BOOST_PP_BOOL(p(62, s)), p, o, s)\n# define BOOST_PP_WHILE_62(p, o, s) BOOST_PP_WHILE_62_C(BOOST_PP_BOOL(p(63, s)), p, o, s)\n# define BOOST_PP_WHILE_63(p, o, s) BOOST_PP_WHILE_63_C(BOOST_PP_BOOL(p(64, s)), p, o, s)\n# define BOOST_PP_WHILE_64(p, o, s) BOOST_PP_WHILE_64_C(BOOST_PP_BOOL(p(65, s)), p, o, s)\n# define BOOST_PP_WHILE_65(p, o, s) BOOST_PP_WHILE_65_C(BOOST_PP_BOOL(p(66, s)), p, o, s)\n# define BOOST_PP_WHILE_66(p, o, s) BOOST_PP_WHILE_66_C(BOOST_PP_BOOL(p(67, s)), p, o, s)\n# define BOOST_PP_WHILE_67(p, o, s) BOOST_PP_WHILE_67_C(BOOST_PP_BOOL(p(68, s)), p, o, s)\n# define BOOST_PP_WHILE_68(p, o, s) BOOST_PP_WHILE_68_C(BOOST_PP_BOOL(p(69, s)), p, o, s)\n# define BOOST_PP_WHILE_69(p, o, s) BOOST_PP_WHILE_69_C(BOOST_PP_BOOL(p(70, s)), p, o, s)\n# define BOOST_PP_WHILE_70(p, o, s) BOOST_PP_WHILE_70_C(BOOST_PP_BOOL(p(71, s)), p, o, s)\n# define BOOST_PP_WHILE_71(p, o, s) BOOST_PP_WHILE_71_C(BOOST_PP_BOOL(p(72, s)), p, o, s)\n# define BOOST_PP_WHILE_72(p, o, s) BOOST_PP_WHILE_72_C(BOOST_PP_BOOL(p(73, s)), p, o, s)\n# define BOOST_PP_WHILE_73(p, o, s) BOOST_PP_WHILE_73_C(BOOST_PP_BOOL(p(74, s)), p, o, s)\n# define BOOST_PP_WHILE_74(p, o, s) BOOST_PP_WHILE_74_C(BOOST_PP_BOOL(p(75, s)), p, o, s)\n# define BOOST_PP_WHILE_75(p, o, s) BOOST_PP_WHILE_75_C(BOOST_PP_BOOL(p(76, s)), p, o, s)\n# define BOOST_PP_WHILE_76(p, o, s) BOOST_PP_WHILE_76_C(BOOST_PP_BOOL(p(77, s)), p, o, s)\n# define BOOST_PP_WHILE_77(p, o, s) BOOST_PP_WHILE_77_C(BOOST_PP_BOOL(p(78, s)), p, o, s)\n# define BOOST_PP_WHILE_78(p, o, s) BOOST_PP_WHILE_78_C(BOOST_PP_BOOL(p(79, s)), p, o, s)\n# define BOOST_PP_WHILE_79(p, o, s) BOOST_PP_WHILE_79_C(BOOST_PP_BOOL(p(80, s)), p, o, s)\n# define BOOST_PP_WHILE_80(p, o, s) BOOST_PP_WHILE_80_C(BOOST_PP_BOOL(p(81, s)), p, o, s)\n# define BOOST_PP_WHILE_81(p, o, s) BOOST_PP_WHILE_81_C(BOOST_PP_BOOL(p(82, s)), p, o, s)\n# define BOOST_PP_WHILE_82(p, o, s) BOOST_PP_WHILE_82_C(BOOST_PP_BOOL(p(83, s)), p, o, s)\n# define BOOST_PP_WHILE_83(p, o, s) BOOST_PP_WHILE_83_C(BOOST_PP_BOOL(p(84, s)), p, o, s)\n# define BOOST_PP_WHILE_84(p, o, s) BOOST_PP_WHILE_84_C(BOOST_PP_BOOL(p(85, s)), p, o, s)\n# define BOOST_PP_WHILE_85(p, o, s) BOOST_PP_WHILE_85_C(BOOST_PP_BOOL(p(86, s)), p, o, s)\n# define BOOST_PP_WHILE_86(p, o, s) BOOST_PP_WHILE_86_C(BOOST_PP_BOOL(p(87, s)), p, o, s)\n# define BOOST_PP_WHILE_87(p, o, s) BOOST_PP_WHILE_87_C(BOOST_PP_BOOL(p(88, s)), p, o, s)\n# define BOOST_PP_WHILE_88(p, o, s) BOOST_PP_WHILE_88_C(BOOST_PP_BOOL(p(89, s)), p, o, s)\n# define BOOST_PP_WHILE_89(p, o, s) BOOST_PP_WHILE_89_C(BOOST_PP_BOOL(p(90, s)), p, o, s)\n# define BOOST_PP_WHILE_90(p, o, s) BOOST_PP_WHILE_90_C(BOOST_PP_BOOL(p(91, s)), p, o, s)\n# define BOOST_PP_WHILE_91(p, o, s) BOOST_PP_WHILE_91_C(BOOST_PP_BOOL(p(92, s)), p, o, s)\n# define BOOST_PP_WHILE_92(p, o, s) BOOST_PP_WHILE_92_C(BOOST_PP_BOOL(p(93, s)), p, o, s)\n# define BOOST_PP_WHILE_93(p, o, s) BOOST_PP_WHILE_93_C(BOOST_PP_BOOL(p(94, s)), p, o, s)\n# define BOOST_PP_WHILE_94(p, o, s) BOOST_PP_WHILE_94_C(BOOST_PP_BOOL(p(95, s)), p, o, s)\n# define BOOST_PP_WHILE_95(p, o, s) BOOST_PP_WHILE_95_C(BOOST_PP_BOOL(p(96, s)), p, o, s)\n# define BOOST_PP_WHILE_96(p, o, s) BOOST_PP_WHILE_96_C(BOOST_PP_BOOL(p(97, s)), p, o, s)\n# define BOOST_PP_WHILE_97(p, o, s) BOOST_PP_WHILE_97_C(BOOST_PP_BOOL(p(98, s)), p, o, s)\n# define BOOST_PP_WHILE_98(p, o, s) BOOST_PP_WHILE_98_C(BOOST_PP_BOOL(p(99, s)), p, o, s)\n# define BOOST_PP_WHILE_99(p, o, s) BOOST_PP_WHILE_99_C(BOOST_PP_BOOL(p(100, s)), p, o, s)\n# define BOOST_PP_WHILE_100(p, o, s) BOOST_PP_WHILE_100_C(BOOST_PP_BOOL(p(101, s)), p, o, s)\n# define BOOST_PP_WHILE_101(p, o, s) BOOST_PP_WHILE_101_C(BOOST_PP_BOOL(p(102, s)), p, o, s)\n# define BOOST_PP_WHILE_102(p, o, s) BOOST_PP_WHILE_102_C(BOOST_PP_BOOL(p(103, s)), p, o, s)\n# define BOOST_PP_WHILE_103(p, o, s) BOOST_PP_WHILE_103_C(BOOST_PP_BOOL(p(104, s)), p, o, s)\n# define BOOST_PP_WHILE_104(p, o, s) BOOST_PP_WHILE_104_C(BOOST_PP_BOOL(p(105, s)), p, o, s)\n# define BOOST_PP_WHILE_105(p, o, s) BOOST_PP_WHILE_105_C(BOOST_PP_BOOL(p(106, s)), p, o, s)\n# define BOOST_PP_WHILE_106(p, o, s) BOOST_PP_WHILE_106_C(BOOST_PP_BOOL(p(107, s)), p, o, s)\n# define BOOST_PP_WHILE_107(p, o, s) BOOST_PP_WHILE_107_C(BOOST_PP_BOOL(p(108, s)), p, o, s)\n# define BOOST_PP_WHILE_108(p, o, s) BOOST_PP_WHILE_108_C(BOOST_PP_BOOL(p(109, s)), p, o, s)\n# define BOOST_PP_WHILE_109(p, o, s) BOOST_PP_WHILE_109_C(BOOST_PP_BOOL(p(110, s)), p, o, s)\n# define BOOST_PP_WHILE_110(p, o, s) BOOST_PP_WHILE_110_C(BOOST_PP_BOOL(p(111, s)), p, o, s)\n# define BOOST_PP_WHILE_111(p, o, s) BOOST_PP_WHILE_111_C(BOOST_PP_BOOL(p(112, s)), p, o, s)\n# define BOOST_PP_WHILE_112(p, o, s) BOOST_PP_WHILE_112_C(BOOST_PP_BOOL(p(113, s)), p, o, s)\n# define BOOST_PP_WHILE_113(p, o, s) BOOST_PP_WHILE_113_C(BOOST_PP_BOOL(p(114, s)), p, o, s)\n# define BOOST_PP_WHILE_114(p, o, s) BOOST_PP_WHILE_114_C(BOOST_PP_BOOL(p(115, s)), p, o, s)\n# define BOOST_PP_WHILE_115(p, o, s) BOOST_PP_WHILE_115_C(BOOST_PP_BOOL(p(116, s)), p, o, s)\n# define BOOST_PP_WHILE_116(p, o, s) BOOST_PP_WHILE_116_C(BOOST_PP_BOOL(p(117, s)), p, o, s)\n# define BOOST_PP_WHILE_117(p, o, s) BOOST_PP_WHILE_117_C(BOOST_PP_BOOL(p(118, s)), p, o, s)\n# define BOOST_PP_WHILE_118(p, o, s) BOOST_PP_WHILE_118_C(BOOST_PP_BOOL(p(119, s)), p, o, s)\n# define BOOST_PP_WHILE_119(p, o, s) BOOST_PP_WHILE_119_C(BOOST_PP_BOOL(p(120, s)), p, o, s)\n# define BOOST_PP_WHILE_120(p, o, s) BOOST_PP_WHILE_120_C(BOOST_PP_BOOL(p(121, s)), p, o, s)\n# define BOOST_PP_WHILE_121(p, o, s) BOOST_PP_WHILE_121_C(BOOST_PP_BOOL(p(122, s)), p, o, s)\n# define BOOST_PP_WHILE_122(p, o, s) BOOST_PP_WHILE_122_C(BOOST_PP_BOOL(p(123, s)), p, o, s)\n# define BOOST_PP_WHILE_123(p, o, s) BOOST_PP_WHILE_123_C(BOOST_PP_BOOL(p(124, s)), p, o, s)\n# define BOOST_PP_WHILE_124(p, o, s) BOOST_PP_WHILE_124_C(BOOST_PP_BOOL(p(125, s)), p, o, s)\n# define BOOST_PP_WHILE_125(p, o, s) BOOST_PP_WHILE_125_C(BOOST_PP_BOOL(p(126, s)), p, o, s)\n# define BOOST_PP_WHILE_126(p, o, s) BOOST_PP_WHILE_126_C(BOOST_PP_BOOL(p(127, s)), p, o, s)\n# define BOOST_PP_WHILE_127(p, o, s) BOOST_PP_WHILE_127_C(BOOST_PP_BOOL(p(128, s)), p, o, s)\n# define BOOST_PP_WHILE_128(p, o, s) BOOST_PP_WHILE_128_C(BOOST_PP_BOOL(p(129, s)), p, o, s)\n# define BOOST_PP_WHILE_129(p, o, s) BOOST_PP_WHILE_129_C(BOOST_PP_BOOL(p(130, s)), p, o, s)\n# define BOOST_PP_WHILE_130(p, o, s) BOOST_PP_WHILE_130_C(BOOST_PP_BOOL(p(131, s)), p, o, s)\n# define BOOST_PP_WHILE_131(p, o, s) BOOST_PP_WHILE_131_C(BOOST_PP_BOOL(p(132, s)), p, o, s)\n# define BOOST_PP_WHILE_132(p, o, s) BOOST_PP_WHILE_132_C(BOOST_PP_BOOL(p(133, s)), p, o, s)\n# define BOOST_PP_WHILE_133(p, o, s) BOOST_PP_WHILE_133_C(BOOST_PP_BOOL(p(134, s)), p, o, s)\n# define BOOST_PP_WHILE_134(p, o, s) BOOST_PP_WHILE_134_C(BOOST_PP_BOOL(p(135, s)), p, o, s)\n# define BOOST_PP_WHILE_135(p, o, s) BOOST_PP_WHILE_135_C(BOOST_PP_BOOL(p(136, s)), p, o, s)\n# define BOOST_PP_WHILE_136(p, o, s) BOOST_PP_WHILE_136_C(BOOST_PP_BOOL(p(137, s)), p, o, s)\n# define BOOST_PP_WHILE_137(p, o, s) BOOST_PP_WHILE_137_C(BOOST_PP_BOOL(p(138, s)), p, o, s)\n# define BOOST_PP_WHILE_138(p, o, s) BOOST_PP_WHILE_138_C(BOOST_PP_BOOL(p(139, s)), p, o, s)\n# define BOOST_PP_WHILE_139(p, o, s) BOOST_PP_WHILE_139_C(BOOST_PP_BOOL(p(140, s)), p, o, s)\n# define BOOST_PP_WHILE_140(p, o, s) BOOST_PP_WHILE_140_C(BOOST_PP_BOOL(p(141, s)), p, o, s)\n# define BOOST_PP_WHILE_141(p, o, s) BOOST_PP_WHILE_141_C(BOOST_PP_BOOL(p(142, s)), p, o, s)\n# define BOOST_PP_WHILE_142(p, o, s) BOOST_PP_WHILE_142_C(BOOST_PP_BOOL(p(143, s)), p, o, s)\n# define BOOST_PP_WHILE_143(p, o, s) BOOST_PP_WHILE_143_C(BOOST_PP_BOOL(p(144, s)), p, o, s)\n# define BOOST_PP_WHILE_144(p, o, s) BOOST_PP_WHILE_144_C(BOOST_PP_BOOL(p(145, s)), p, o, s)\n# define BOOST_PP_WHILE_145(p, o, s) BOOST_PP_WHILE_145_C(BOOST_PP_BOOL(p(146, s)), p, o, s)\n# define BOOST_PP_WHILE_146(p, o, s) BOOST_PP_WHILE_146_C(BOOST_PP_BOOL(p(147, s)), p, o, s)\n# define BOOST_PP_WHILE_147(p, o, s) BOOST_PP_WHILE_147_C(BOOST_PP_BOOL(p(148, s)), p, o, s)\n# define BOOST_PP_WHILE_148(p, o, s) BOOST_PP_WHILE_148_C(BOOST_PP_BOOL(p(149, s)), p, o, s)\n# define BOOST_PP_WHILE_149(p, o, s) BOOST_PP_WHILE_149_C(BOOST_PP_BOOL(p(150, s)), p, o, s)\n# define BOOST_PP_WHILE_150(p, o, s) BOOST_PP_WHILE_150_C(BOOST_PP_BOOL(p(151, s)), p, o, s)\n# define BOOST_PP_WHILE_151(p, o, s) BOOST_PP_WHILE_151_C(BOOST_PP_BOOL(p(152, s)), p, o, s)\n# define BOOST_PP_WHILE_152(p, o, s) BOOST_PP_WHILE_152_C(BOOST_PP_BOOL(p(153, s)), p, o, s)\n# define BOOST_PP_WHILE_153(p, o, s) BOOST_PP_WHILE_153_C(BOOST_PP_BOOL(p(154, s)), p, o, s)\n# define BOOST_PP_WHILE_154(p, o, s) BOOST_PP_WHILE_154_C(BOOST_PP_BOOL(p(155, s)), p, o, s)\n# define BOOST_PP_WHILE_155(p, o, s) BOOST_PP_WHILE_155_C(BOOST_PP_BOOL(p(156, s)), p, o, s)\n# define BOOST_PP_WHILE_156(p, o, s) BOOST_PP_WHILE_156_C(BOOST_PP_BOOL(p(157, s)), p, o, s)\n# define BOOST_PP_WHILE_157(p, o, s) BOOST_PP_WHILE_157_C(BOOST_PP_BOOL(p(158, s)), p, o, s)\n# define BOOST_PP_WHILE_158(p, o, s) BOOST_PP_WHILE_158_C(BOOST_PP_BOOL(p(159, s)), p, o, s)\n# define BOOST_PP_WHILE_159(p, o, s) BOOST_PP_WHILE_159_C(BOOST_PP_BOOL(p(160, s)), p, o, s)\n# define BOOST_PP_WHILE_160(p, o, s) BOOST_PP_WHILE_160_C(BOOST_PP_BOOL(p(161, s)), p, o, s)\n# define BOOST_PP_WHILE_161(p, o, s) BOOST_PP_WHILE_161_C(BOOST_PP_BOOL(p(162, s)), p, o, s)\n# define BOOST_PP_WHILE_162(p, o, s) BOOST_PP_WHILE_162_C(BOOST_PP_BOOL(p(163, s)), p, o, s)\n# define BOOST_PP_WHILE_163(p, o, s) BOOST_PP_WHILE_163_C(BOOST_PP_BOOL(p(164, s)), p, o, s)\n# define BOOST_PP_WHILE_164(p, o, s) BOOST_PP_WHILE_164_C(BOOST_PP_BOOL(p(165, s)), p, o, s)\n# define BOOST_PP_WHILE_165(p, o, s) BOOST_PP_WHILE_165_C(BOOST_PP_BOOL(p(166, s)), p, o, s)\n# define BOOST_PP_WHILE_166(p, o, s) BOOST_PP_WHILE_166_C(BOOST_PP_BOOL(p(167, s)), p, o, s)\n# define BOOST_PP_WHILE_167(p, o, s) BOOST_PP_WHILE_167_C(BOOST_PP_BOOL(p(168, s)), p, o, s)\n# define BOOST_PP_WHILE_168(p, o, s) BOOST_PP_WHILE_168_C(BOOST_PP_BOOL(p(169, s)), p, o, s)\n# define BOOST_PP_WHILE_169(p, o, s) BOOST_PP_WHILE_169_C(BOOST_PP_BOOL(p(170, s)), p, o, s)\n# define BOOST_PP_WHILE_170(p, o, s) BOOST_PP_WHILE_170_C(BOOST_PP_BOOL(p(171, s)), p, o, s)\n# define BOOST_PP_WHILE_171(p, o, s) BOOST_PP_WHILE_171_C(BOOST_PP_BOOL(p(172, s)), p, o, s)\n# define BOOST_PP_WHILE_172(p, o, s) BOOST_PP_WHILE_172_C(BOOST_PP_BOOL(p(173, s)), p, o, s)\n# define BOOST_PP_WHILE_173(p, o, s) BOOST_PP_WHILE_173_C(BOOST_PP_BOOL(p(174, s)), p, o, s)\n# define BOOST_PP_WHILE_174(p, o, s) BOOST_PP_WHILE_174_C(BOOST_PP_BOOL(p(175, s)), p, o, s)\n# define BOOST_PP_WHILE_175(p, o, s) BOOST_PP_WHILE_175_C(BOOST_PP_BOOL(p(176, s)), p, o, s)\n# define BOOST_PP_WHILE_176(p, o, s) BOOST_PP_WHILE_176_C(BOOST_PP_BOOL(p(177, s)), p, o, s)\n# define BOOST_PP_WHILE_177(p, o, s) BOOST_PP_WHILE_177_C(BOOST_PP_BOOL(p(178, s)), p, o, s)\n# define BOOST_PP_WHILE_178(p, o, s) BOOST_PP_WHILE_178_C(BOOST_PP_BOOL(p(179, s)), p, o, s)\n# define BOOST_PP_WHILE_179(p, o, s) BOOST_PP_WHILE_179_C(BOOST_PP_BOOL(p(180, s)), p, o, s)\n# define BOOST_PP_WHILE_180(p, o, s) BOOST_PP_WHILE_180_C(BOOST_PP_BOOL(p(181, s)), p, o, s)\n# define BOOST_PP_WHILE_181(p, o, s) BOOST_PP_WHILE_181_C(BOOST_PP_BOOL(p(182, s)), p, o, s)\n# define BOOST_PP_WHILE_182(p, o, s) BOOST_PP_WHILE_182_C(BOOST_PP_BOOL(p(183, s)), p, o, s)\n# define BOOST_PP_WHILE_183(p, o, s) BOOST_PP_WHILE_183_C(BOOST_PP_BOOL(p(184, s)), p, o, s)\n# define BOOST_PP_WHILE_184(p, o, s) BOOST_PP_WHILE_184_C(BOOST_PP_BOOL(p(185, s)), p, o, s)\n# define BOOST_PP_WHILE_185(p, o, s) BOOST_PP_WHILE_185_C(BOOST_PP_BOOL(p(186, s)), p, o, s)\n# define BOOST_PP_WHILE_186(p, o, s) BOOST_PP_WHILE_186_C(BOOST_PP_BOOL(p(187, s)), p, o, s)\n# define BOOST_PP_WHILE_187(p, o, s) BOOST_PP_WHILE_187_C(BOOST_PP_BOOL(p(188, s)), p, o, s)\n# define BOOST_PP_WHILE_188(p, o, s) BOOST_PP_WHILE_188_C(BOOST_PP_BOOL(p(189, s)), p, o, s)\n# define BOOST_PP_WHILE_189(p, o, s) BOOST_PP_WHILE_189_C(BOOST_PP_BOOL(p(190, s)), p, o, s)\n# define BOOST_PP_WHILE_190(p, o, s) BOOST_PP_WHILE_190_C(BOOST_PP_BOOL(p(191, s)), p, o, s)\n# define BOOST_PP_WHILE_191(p, o, s) BOOST_PP_WHILE_191_C(BOOST_PP_BOOL(p(192, s)), p, o, s)\n# define BOOST_PP_WHILE_192(p, o, s) BOOST_PP_WHILE_192_C(BOOST_PP_BOOL(p(193, s)), p, o, s)\n# define BOOST_PP_WHILE_193(p, o, s) BOOST_PP_WHILE_193_C(BOOST_PP_BOOL(p(194, s)), p, o, s)\n# define BOOST_PP_WHILE_194(p, o, s) BOOST_PP_WHILE_194_C(BOOST_PP_BOOL(p(195, s)), p, o, s)\n# define BOOST_PP_WHILE_195(p, o, s) BOOST_PP_WHILE_195_C(BOOST_PP_BOOL(p(196, s)), p, o, s)\n# define BOOST_PP_WHILE_196(p, o, s) BOOST_PP_WHILE_196_C(BOOST_PP_BOOL(p(197, s)), p, o, s)\n# define BOOST_PP_WHILE_197(p, o, s) BOOST_PP_WHILE_197_C(BOOST_PP_BOOL(p(198, s)), p, o, s)\n# define BOOST_PP_WHILE_198(p, o, s) BOOST_PP_WHILE_198_C(BOOST_PP_BOOL(p(199, s)), p, o, s)\n# define BOOST_PP_WHILE_199(p, o, s) BOOST_PP_WHILE_199_C(BOOST_PP_BOOL(p(200, s)), p, o, s)\n# define BOOST_PP_WHILE_200(p, o, s) BOOST_PP_WHILE_200_C(BOOST_PP_BOOL(p(201, s)), p, o, s)\n# define BOOST_PP_WHILE_201(p, o, s) BOOST_PP_WHILE_201_C(BOOST_PP_BOOL(p(202, s)), p, o, s)\n# define BOOST_PP_WHILE_202(p, o, s) BOOST_PP_WHILE_202_C(BOOST_PP_BOOL(p(203, s)), p, o, s)\n# define BOOST_PP_WHILE_203(p, o, s) BOOST_PP_WHILE_203_C(BOOST_PP_BOOL(p(204, s)), p, o, s)\n# define BOOST_PP_WHILE_204(p, o, s) BOOST_PP_WHILE_204_C(BOOST_PP_BOOL(p(205, s)), p, o, s)\n# define BOOST_PP_WHILE_205(p, o, s) BOOST_PP_WHILE_205_C(BOOST_PP_BOOL(p(206, s)), p, o, s)\n# define BOOST_PP_WHILE_206(p, o, s) BOOST_PP_WHILE_206_C(BOOST_PP_BOOL(p(207, s)), p, o, s)\n# define BOOST_PP_WHILE_207(p, o, s) BOOST_PP_WHILE_207_C(BOOST_PP_BOOL(p(208, s)), p, o, s)\n# define BOOST_PP_WHILE_208(p, o, s) BOOST_PP_WHILE_208_C(BOOST_PP_BOOL(p(209, s)), p, o, s)\n# define BOOST_PP_WHILE_209(p, o, s) BOOST_PP_WHILE_209_C(BOOST_PP_BOOL(p(210, s)), p, o, s)\n# define BOOST_PP_WHILE_210(p, o, s) BOOST_PP_WHILE_210_C(BOOST_PP_BOOL(p(211, s)), p, o, s)\n# define BOOST_PP_WHILE_211(p, o, s) BOOST_PP_WHILE_211_C(BOOST_PP_BOOL(p(212, s)), p, o, s)\n# define BOOST_PP_WHILE_212(p, o, s) BOOST_PP_WHILE_212_C(BOOST_PP_BOOL(p(213, s)), p, o, s)\n# define BOOST_PP_WHILE_213(p, o, s) BOOST_PP_WHILE_213_C(BOOST_PP_BOOL(p(214, s)), p, o, s)\n# define BOOST_PP_WHILE_214(p, o, s) BOOST_PP_WHILE_214_C(BOOST_PP_BOOL(p(215, s)), p, o, s)\n# define BOOST_PP_WHILE_215(p, o, s) BOOST_PP_WHILE_215_C(BOOST_PP_BOOL(p(216, s)), p, o, s)\n# define BOOST_PP_WHILE_216(p, o, s) BOOST_PP_WHILE_216_C(BOOST_PP_BOOL(p(217, s)), p, o, s)\n# define BOOST_PP_WHILE_217(p, o, s) BOOST_PP_WHILE_217_C(BOOST_PP_BOOL(p(218, s)), p, o, s)\n# define BOOST_PP_WHILE_218(p, o, s) BOOST_PP_WHILE_218_C(BOOST_PP_BOOL(p(219, s)), p, o, s)\n# define BOOST_PP_WHILE_219(p, o, s) BOOST_PP_WHILE_219_C(BOOST_PP_BOOL(p(220, s)), p, o, s)\n# define BOOST_PP_WHILE_220(p, o, s) BOOST_PP_WHILE_220_C(BOOST_PP_BOOL(p(221, s)), p, o, s)\n# define BOOST_PP_WHILE_221(p, o, s) BOOST_PP_WHILE_221_C(BOOST_PP_BOOL(p(222, s)), p, o, s)\n# define BOOST_PP_WHILE_222(p, o, s) BOOST_PP_WHILE_222_C(BOOST_PP_BOOL(p(223, s)), p, o, s)\n# define BOOST_PP_WHILE_223(p, o, s) BOOST_PP_WHILE_223_C(BOOST_PP_BOOL(p(224, s)), p, o, s)\n# define BOOST_PP_WHILE_224(p, o, s) BOOST_PP_WHILE_224_C(BOOST_PP_BOOL(p(225, s)), p, o, s)\n# define BOOST_PP_WHILE_225(p, o, s) BOOST_PP_WHILE_225_C(BOOST_PP_BOOL(p(226, s)), p, o, s)\n# define BOOST_PP_WHILE_226(p, o, s) BOOST_PP_WHILE_226_C(BOOST_PP_BOOL(p(227, s)), p, o, s)\n# define BOOST_PP_WHILE_227(p, o, s) BOOST_PP_WHILE_227_C(BOOST_PP_BOOL(p(228, s)), p, o, s)\n# define BOOST_PP_WHILE_228(p, o, s) BOOST_PP_WHILE_228_C(BOOST_PP_BOOL(p(229, s)), p, o, s)\n# define BOOST_PP_WHILE_229(p, o, s) BOOST_PP_WHILE_229_C(BOOST_PP_BOOL(p(230, s)), p, o, s)\n# define BOOST_PP_WHILE_230(p, o, s) BOOST_PP_WHILE_230_C(BOOST_PP_BOOL(p(231, s)), p, o, s)\n# define BOOST_PP_WHILE_231(p, o, s) BOOST_PP_WHILE_231_C(BOOST_PP_BOOL(p(232, s)), p, o, s)\n# define BOOST_PP_WHILE_232(p, o, s) BOOST_PP_WHILE_232_C(BOOST_PP_BOOL(p(233, s)), p, o, s)\n# define BOOST_PP_WHILE_233(p, o, s) BOOST_PP_WHILE_233_C(BOOST_PP_BOOL(p(234, s)), p, o, s)\n# define BOOST_PP_WHILE_234(p, o, s) BOOST_PP_WHILE_234_C(BOOST_PP_BOOL(p(235, s)), p, o, s)\n# define BOOST_PP_WHILE_235(p, o, s) BOOST_PP_WHILE_235_C(BOOST_PP_BOOL(p(236, s)), p, o, s)\n# define BOOST_PP_WHILE_236(p, o, s) BOOST_PP_WHILE_236_C(BOOST_PP_BOOL(p(237, s)), p, o, s)\n# define BOOST_PP_WHILE_237(p, o, s) BOOST_PP_WHILE_237_C(BOOST_PP_BOOL(p(238, s)), p, o, s)\n# define BOOST_PP_WHILE_238(p, o, s) BOOST_PP_WHILE_238_C(BOOST_PP_BOOL(p(239, s)), p, o, s)\n# define BOOST_PP_WHILE_239(p, o, s) BOOST_PP_WHILE_239_C(BOOST_PP_BOOL(p(240, s)), p, o, s)\n# define BOOST_PP_WHILE_240(p, o, s) BOOST_PP_WHILE_240_C(BOOST_PP_BOOL(p(241, s)), p, o, s)\n# define BOOST_PP_WHILE_241(p, o, s) BOOST_PP_WHILE_241_C(BOOST_PP_BOOL(p(242, s)), p, o, s)\n# define BOOST_PP_WHILE_242(p, o, s) BOOST_PP_WHILE_242_C(BOOST_PP_BOOL(p(243, s)), p, o, s)\n# define BOOST_PP_WHILE_243(p, o, s) BOOST_PP_WHILE_243_C(BOOST_PP_BOOL(p(244, s)), p, o, s)\n# define BOOST_PP_WHILE_244(p, o, s) BOOST_PP_WHILE_244_C(BOOST_PP_BOOL(p(245, s)), p, o, s)\n# define BOOST_PP_WHILE_245(p, o, s) BOOST_PP_WHILE_245_C(BOOST_PP_BOOL(p(246, s)), p, o, s)\n# define BOOST_PP_WHILE_246(p, o, s) BOOST_PP_WHILE_246_C(BOOST_PP_BOOL(p(247, s)), p, o, s)\n# define BOOST_PP_WHILE_247(p, o, s) BOOST_PP_WHILE_247_C(BOOST_PP_BOOL(p(248, s)), p, o, s)\n# define BOOST_PP_WHILE_248(p, o, s) BOOST_PP_WHILE_248_C(BOOST_PP_BOOL(p(249, s)), p, o, s)\n# define BOOST_PP_WHILE_249(p, o, s) BOOST_PP_WHILE_249_C(BOOST_PP_BOOL(p(250, s)), p, o, s)\n# define BOOST_PP_WHILE_250(p, o, s) BOOST_PP_WHILE_250_C(BOOST_PP_BOOL(p(251, s)), p, o, s)\n# define BOOST_PP_WHILE_251(p, o, s) BOOST_PP_WHILE_251_C(BOOST_PP_BOOL(p(252, s)), p, o, s)\n# define BOOST_PP_WHILE_252(p, o, s) BOOST_PP_WHILE_252_C(BOOST_PP_BOOL(p(253, s)), p, o, s)\n# define BOOST_PP_WHILE_253(p, o, s) BOOST_PP_WHILE_253_C(BOOST_PP_BOOL(p(254, s)), p, o, s)\n# define BOOST_PP_WHILE_254(p, o, s) BOOST_PP_WHILE_254_C(BOOST_PP_BOOL(p(255, s)), p, o, s)\n# define BOOST_PP_WHILE_255(p, o, s) BOOST_PP_WHILE_255_C(BOOST_PP_BOOL(p(256, s)), p, o, s)\n# define BOOST_PP_WHILE_256(p, o, s) BOOST_PP_WHILE_256_C(BOOST_PP_BOOL(p(257, s)), p, o, s)\n#\n# define BOOST_PP_WHILE_1_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_2, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(2, s))\n# define BOOST_PP_WHILE_2_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_3, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(3, s))\n# define BOOST_PP_WHILE_3_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_4, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(4, s))\n# define BOOST_PP_WHILE_4_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_5, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(5, s))\n# define BOOST_PP_WHILE_5_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_6, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(6, s))\n# define BOOST_PP_WHILE_6_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_7, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(7, s))\n# define BOOST_PP_WHILE_7_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_8, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(8, s))\n# define BOOST_PP_WHILE_8_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_9, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(9, s))\n# define BOOST_PP_WHILE_9_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_10, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(10, s))\n# define BOOST_PP_WHILE_10_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_11, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(11, s))\n# define BOOST_PP_WHILE_11_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_12, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(12, s))\n# define BOOST_PP_WHILE_12_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_13, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(13, s))\n# define BOOST_PP_WHILE_13_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_14, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(14, s))\n# define BOOST_PP_WHILE_14_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_15, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(15, s))\n# define BOOST_PP_WHILE_15_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_16, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(16, s))\n# define BOOST_PP_WHILE_16_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_17, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(17, s))\n# define BOOST_PP_WHILE_17_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_18, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(18, s))\n# define BOOST_PP_WHILE_18_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_19, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(19, s))\n# define BOOST_PP_WHILE_19_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_20, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(20, s))\n# define BOOST_PP_WHILE_20_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_21, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(21, s))\n# define BOOST_PP_WHILE_21_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_22, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(22, s))\n# define BOOST_PP_WHILE_22_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_23, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(23, s))\n# define BOOST_PP_WHILE_23_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_24, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(24, s))\n# define BOOST_PP_WHILE_24_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_25, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(25, s))\n# define BOOST_PP_WHILE_25_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_26, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(26, s))\n# define BOOST_PP_WHILE_26_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_27, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(27, s))\n# define BOOST_PP_WHILE_27_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_28, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(28, s))\n# define BOOST_PP_WHILE_28_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_29, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(29, s))\n# define BOOST_PP_WHILE_29_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_30, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(30, s))\n# define BOOST_PP_WHILE_30_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_31, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(31, s))\n# define BOOST_PP_WHILE_31_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_32, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(32, s))\n# define BOOST_PP_WHILE_32_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_33, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(33, s))\n# define BOOST_PP_WHILE_33_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_34, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(34, s))\n# define BOOST_PP_WHILE_34_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_35, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(35, s))\n# define BOOST_PP_WHILE_35_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_36, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(36, s))\n# define BOOST_PP_WHILE_36_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_37, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(37, s))\n# define BOOST_PP_WHILE_37_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_38, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(38, s))\n# define BOOST_PP_WHILE_38_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_39, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(39, s))\n# define BOOST_PP_WHILE_39_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_40, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(40, s))\n# define BOOST_PP_WHILE_40_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_41, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(41, s))\n# define BOOST_PP_WHILE_41_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_42, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(42, s))\n# define BOOST_PP_WHILE_42_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_43, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(43, s))\n# define BOOST_PP_WHILE_43_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_44, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(44, s))\n# define BOOST_PP_WHILE_44_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_45, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(45, s))\n# define BOOST_PP_WHILE_45_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_46, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(46, s))\n# define BOOST_PP_WHILE_46_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_47, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(47, s))\n# define BOOST_PP_WHILE_47_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_48, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(48, s))\n# define BOOST_PP_WHILE_48_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_49, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(49, s))\n# define BOOST_PP_WHILE_49_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_50, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(50, s))\n# define BOOST_PP_WHILE_50_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_51, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(51, s))\n# define BOOST_PP_WHILE_51_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_52, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(52, s))\n# define BOOST_PP_WHILE_52_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_53, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(53, s))\n# define BOOST_PP_WHILE_53_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_54, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(54, s))\n# define BOOST_PP_WHILE_54_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_55, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(55, s))\n# define BOOST_PP_WHILE_55_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_56, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(56, s))\n# define BOOST_PP_WHILE_56_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_57, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(57, s))\n# define BOOST_PP_WHILE_57_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_58, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(58, s))\n# define BOOST_PP_WHILE_58_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_59, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(59, s))\n# define BOOST_PP_WHILE_59_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_60, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(60, s))\n# define BOOST_PP_WHILE_60_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_61, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(61, s))\n# define BOOST_PP_WHILE_61_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_62, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(62, s))\n# define BOOST_PP_WHILE_62_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_63, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(63, s))\n# define BOOST_PP_WHILE_63_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_64, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(64, s))\n# define BOOST_PP_WHILE_64_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_65, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(65, s))\n# define BOOST_PP_WHILE_65_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_66, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(66, s))\n# define BOOST_PP_WHILE_66_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_67, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(67, s))\n# define BOOST_PP_WHILE_67_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_68, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(68, s))\n# define BOOST_PP_WHILE_68_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_69, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(69, s))\n# define BOOST_PP_WHILE_69_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_70, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(70, s))\n# define BOOST_PP_WHILE_70_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_71, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(71, s))\n# define BOOST_PP_WHILE_71_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_72, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(72, s))\n# define BOOST_PP_WHILE_72_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_73, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(73, s))\n# define BOOST_PP_WHILE_73_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_74, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(74, s))\n# define BOOST_PP_WHILE_74_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_75, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(75, s))\n# define BOOST_PP_WHILE_75_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_76, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(76, s))\n# define BOOST_PP_WHILE_76_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_77, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(77, s))\n# define BOOST_PP_WHILE_77_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_78, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(78, s))\n# define BOOST_PP_WHILE_78_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_79, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(79, s))\n# define BOOST_PP_WHILE_79_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_80, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(80, s))\n# define BOOST_PP_WHILE_80_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_81, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(81, s))\n# define BOOST_PP_WHILE_81_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_82, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(82, s))\n# define BOOST_PP_WHILE_82_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_83, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(83, s))\n# define BOOST_PP_WHILE_83_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_84, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(84, s))\n# define BOOST_PP_WHILE_84_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_85, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(85, s))\n# define BOOST_PP_WHILE_85_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_86, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(86, s))\n# define BOOST_PP_WHILE_86_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_87, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(87, s))\n# define BOOST_PP_WHILE_87_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_88, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(88, s))\n# define BOOST_PP_WHILE_88_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_89, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(89, s))\n# define BOOST_PP_WHILE_89_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_90, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(90, s))\n# define BOOST_PP_WHILE_90_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_91, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(91, s))\n# define BOOST_PP_WHILE_91_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_92, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(92, s))\n# define BOOST_PP_WHILE_92_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_93, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(93, s))\n# define BOOST_PP_WHILE_93_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_94, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(94, s))\n# define BOOST_PP_WHILE_94_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_95, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(95, s))\n# define BOOST_PP_WHILE_95_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_96, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(96, s))\n# define BOOST_PP_WHILE_96_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_97, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(97, s))\n# define BOOST_PP_WHILE_97_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_98, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(98, s))\n# define BOOST_PP_WHILE_98_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_99, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(99, s))\n# define BOOST_PP_WHILE_99_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_100, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(100, s))\n# define BOOST_PP_WHILE_100_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_101, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(101, s))\n# define BOOST_PP_WHILE_101_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_102, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(102, s))\n# define BOOST_PP_WHILE_102_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_103, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(103, s))\n# define BOOST_PP_WHILE_103_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_104, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(104, s))\n# define BOOST_PP_WHILE_104_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_105, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(105, s))\n# define BOOST_PP_WHILE_105_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_106, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(106, s))\n# define BOOST_PP_WHILE_106_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_107, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(107, s))\n# define BOOST_PP_WHILE_107_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_108, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(108, s))\n# define BOOST_PP_WHILE_108_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_109, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(109, s))\n# define BOOST_PP_WHILE_109_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_110, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(110, s))\n# define BOOST_PP_WHILE_110_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_111, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(111, s))\n# define BOOST_PP_WHILE_111_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_112, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(112, s))\n# define BOOST_PP_WHILE_112_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_113, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(113, s))\n# define BOOST_PP_WHILE_113_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_114, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(114, s))\n# define BOOST_PP_WHILE_114_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_115, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(115, s))\n# define BOOST_PP_WHILE_115_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_116, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(116, s))\n# define BOOST_PP_WHILE_116_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_117, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(117, s))\n# define BOOST_PP_WHILE_117_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_118, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(118, s))\n# define BOOST_PP_WHILE_118_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_119, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(119, s))\n# define BOOST_PP_WHILE_119_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_120, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(120, s))\n# define BOOST_PP_WHILE_120_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_121, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(121, s))\n# define BOOST_PP_WHILE_121_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_122, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(122, s))\n# define BOOST_PP_WHILE_122_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_123, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(123, s))\n# define BOOST_PP_WHILE_123_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_124, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(124, s))\n# define BOOST_PP_WHILE_124_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_125, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(125, s))\n# define BOOST_PP_WHILE_125_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_126, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(126, s))\n# define BOOST_PP_WHILE_126_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_127, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(127, s))\n# define BOOST_PP_WHILE_127_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_128, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(128, s))\n# define BOOST_PP_WHILE_128_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_129, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(129, s))\n# define BOOST_PP_WHILE_129_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_130, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(130, s))\n# define BOOST_PP_WHILE_130_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_131, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(131, s))\n# define BOOST_PP_WHILE_131_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_132, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(132, s))\n# define BOOST_PP_WHILE_132_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_133, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(133, s))\n# define BOOST_PP_WHILE_133_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_134, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(134, s))\n# define BOOST_PP_WHILE_134_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_135, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(135, s))\n# define BOOST_PP_WHILE_135_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_136, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(136, s))\n# define BOOST_PP_WHILE_136_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_137, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(137, s))\n# define BOOST_PP_WHILE_137_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_138, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(138, s))\n# define BOOST_PP_WHILE_138_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_139, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(139, s))\n# define BOOST_PP_WHILE_139_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_140, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(140, s))\n# define BOOST_PP_WHILE_140_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_141, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(141, s))\n# define BOOST_PP_WHILE_141_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_142, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(142, s))\n# define BOOST_PP_WHILE_142_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_143, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(143, s))\n# define BOOST_PP_WHILE_143_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_144, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(144, s))\n# define BOOST_PP_WHILE_144_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_145, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(145, s))\n# define BOOST_PP_WHILE_145_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_146, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(146, s))\n# define BOOST_PP_WHILE_146_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_147, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(147, s))\n# define BOOST_PP_WHILE_147_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_148, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(148, s))\n# define BOOST_PP_WHILE_148_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_149, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(149, s))\n# define BOOST_PP_WHILE_149_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_150, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(150, s))\n# define BOOST_PP_WHILE_150_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_151, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(151, s))\n# define BOOST_PP_WHILE_151_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_152, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(152, s))\n# define BOOST_PP_WHILE_152_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_153, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(153, s))\n# define BOOST_PP_WHILE_153_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_154, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(154, s))\n# define BOOST_PP_WHILE_154_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_155, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(155, s))\n# define BOOST_PP_WHILE_155_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_156, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(156, s))\n# define BOOST_PP_WHILE_156_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_157, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(157, s))\n# define BOOST_PP_WHILE_157_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_158, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(158, s))\n# define BOOST_PP_WHILE_158_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_159, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(159, s))\n# define BOOST_PP_WHILE_159_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_160, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(160, s))\n# define BOOST_PP_WHILE_160_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_161, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(161, s))\n# define BOOST_PP_WHILE_161_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_162, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(162, s))\n# define BOOST_PP_WHILE_162_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_163, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(163, s))\n# define BOOST_PP_WHILE_163_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_164, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(164, s))\n# define BOOST_PP_WHILE_164_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_165, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(165, s))\n# define BOOST_PP_WHILE_165_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_166, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(166, s))\n# define BOOST_PP_WHILE_166_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_167, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(167, s))\n# define BOOST_PP_WHILE_167_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_168, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(168, s))\n# define BOOST_PP_WHILE_168_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_169, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(169, s))\n# define BOOST_PP_WHILE_169_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_170, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(170, s))\n# define BOOST_PP_WHILE_170_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_171, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(171, s))\n# define BOOST_PP_WHILE_171_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_172, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(172, s))\n# define BOOST_PP_WHILE_172_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_173, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(173, s))\n# define BOOST_PP_WHILE_173_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_174, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(174, s))\n# define BOOST_PP_WHILE_174_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_175, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(175, s))\n# define BOOST_PP_WHILE_175_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_176, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(176, s))\n# define BOOST_PP_WHILE_176_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_177, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(177, s))\n# define BOOST_PP_WHILE_177_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_178, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(178, s))\n# define BOOST_PP_WHILE_178_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_179, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(179, s))\n# define BOOST_PP_WHILE_179_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_180, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(180, s))\n# define BOOST_PP_WHILE_180_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_181, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(181, s))\n# define BOOST_PP_WHILE_181_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_182, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(182, s))\n# define BOOST_PP_WHILE_182_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_183, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(183, s))\n# define BOOST_PP_WHILE_183_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_184, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(184, s))\n# define BOOST_PP_WHILE_184_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_185, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(185, s))\n# define BOOST_PP_WHILE_185_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_186, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(186, s))\n# define BOOST_PP_WHILE_186_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_187, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(187, s))\n# define BOOST_PP_WHILE_187_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_188, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(188, s))\n# define BOOST_PP_WHILE_188_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_189, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(189, s))\n# define BOOST_PP_WHILE_189_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_190, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(190, s))\n# define BOOST_PP_WHILE_190_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_191, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(191, s))\n# define BOOST_PP_WHILE_191_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_192, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(192, s))\n# define BOOST_PP_WHILE_192_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_193, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(193, s))\n# define BOOST_PP_WHILE_193_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_194, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(194, s))\n# define BOOST_PP_WHILE_194_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_195, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(195, s))\n# define BOOST_PP_WHILE_195_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_196, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(196, s))\n# define BOOST_PP_WHILE_196_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_197, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(197, s))\n# define BOOST_PP_WHILE_197_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_198, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(198, s))\n# define BOOST_PP_WHILE_198_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_199, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(199, s))\n# define BOOST_PP_WHILE_199_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_200, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(200, s))\n# define BOOST_PP_WHILE_200_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_201, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(201, s))\n# define BOOST_PP_WHILE_201_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_202, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(202, s))\n# define BOOST_PP_WHILE_202_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_203, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(203, s))\n# define BOOST_PP_WHILE_203_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_204, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(204, s))\n# define BOOST_PP_WHILE_204_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_205, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(205, s))\n# define BOOST_PP_WHILE_205_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_206, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(206, s))\n# define BOOST_PP_WHILE_206_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_207, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(207, s))\n# define BOOST_PP_WHILE_207_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_208, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(208, s))\n# define BOOST_PP_WHILE_208_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_209, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(209, s))\n# define BOOST_PP_WHILE_209_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_210, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(210, s))\n# define BOOST_PP_WHILE_210_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_211, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(211, s))\n# define BOOST_PP_WHILE_211_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_212, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(212, s))\n# define BOOST_PP_WHILE_212_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_213, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(213, s))\n# define BOOST_PP_WHILE_213_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_214, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(214, s))\n# define BOOST_PP_WHILE_214_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_215, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(215, s))\n# define BOOST_PP_WHILE_215_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_216, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(216, s))\n# define BOOST_PP_WHILE_216_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_217, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(217, s))\n# define BOOST_PP_WHILE_217_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_218, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(218, s))\n# define BOOST_PP_WHILE_218_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_219, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(219, s))\n# define BOOST_PP_WHILE_219_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_220, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(220, s))\n# define BOOST_PP_WHILE_220_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_221, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(221, s))\n# define BOOST_PP_WHILE_221_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_222, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(222, s))\n# define BOOST_PP_WHILE_222_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_223, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(223, s))\n# define BOOST_PP_WHILE_223_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_224, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(224, s))\n# define BOOST_PP_WHILE_224_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_225, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(225, s))\n# define BOOST_PP_WHILE_225_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_226, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(226, s))\n# define BOOST_PP_WHILE_226_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_227, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(227, s))\n# define BOOST_PP_WHILE_227_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_228, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(228, s))\n# define BOOST_PP_WHILE_228_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_229, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(229, s))\n# define BOOST_PP_WHILE_229_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_230, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(230, s))\n# define BOOST_PP_WHILE_230_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_231, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(231, s))\n# define BOOST_PP_WHILE_231_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_232, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(232, s))\n# define BOOST_PP_WHILE_232_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_233, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(233, s))\n# define BOOST_PP_WHILE_233_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_234, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(234, s))\n# define BOOST_PP_WHILE_234_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_235, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(235, s))\n# define BOOST_PP_WHILE_235_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_236, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(236, s))\n# define BOOST_PP_WHILE_236_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_237, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(237, s))\n# define BOOST_PP_WHILE_237_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_238, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(238, s))\n# define BOOST_PP_WHILE_238_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_239, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(239, s))\n# define BOOST_PP_WHILE_239_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_240, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(240, s))\n# define BOOST_PP_WHILE_240_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_241, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(241, s))\n# define BOOST_PP_WHILE_241_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_242, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(242, s))\n# define BOOST_PP_WHILE_242_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_243, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(243, s))\n# define BOOST_PP_WHILE_243_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_244, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(244, s))\n# define BOOST_PP_WHILE_244_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_245, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(245, s))\n# define BOOST_PP_WHILE_245_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_246, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(246, s))\n# define BOOST_PP_WHILE_246_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_247, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(247, s))\n# define BOOST_PP_WHILE_247_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_248, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(248, s))\n# define BOOST_PP_WHILE_248_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_249, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(249, s))\n# define BOOST_PP_WHILE_249_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_250, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(250, s))\n# define BOOST_PP_WHILE_250_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_251, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(251, s))\n# define BOOST_PP_WHILE_251_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_252, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(252, s))\n# define BOOST_PP_WHILE_252_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_253, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(253, s))\n# define BOOST_PP_WHILE_253_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_254, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(254, s))\n# define BOOST_PP_WHILE_254_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_255, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(255, s))\n# define BOOST_PP_WHILE_255_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_256, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(256, s))\n# define BOOST_PP_WHILE_256_C(c, p, o, s) BOOST_PP_IIF(c, BOOST_PP_WHILE_257, s BOOST_PP_TUPLE_EAT_3)(p, o, BOOST_PP_IIF(c, o, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_2)(257, s))\n#\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/expr_if.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_EXPR_IF_HPP\n# define BOOST_PREPROCESSOR_CONTROL_EXPR_IF_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n#\n# /* BOOST_PP_EXPR_IF */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_EXPR_IF(cond, expr) BOOST_PP_EXPR_IIF(BOOST_PP_BOOL(cond), expr)\n# else\n#    define BOOST_PP_EXPR_IF(cond, expr) BOOST_PP_EXPR_IF_I(cond, expr)\n#    define BOOST_PP_EXPR_IF_I(cond, expr) BOOST_PP_EXPR_IIF(BOOST_PP_BOOL(cond), expr)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/expr_iif.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_EXPR_IIF_HPP\n# define BOOST_PREPROCESSOR_CONTROL_EXPR_IIF_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_EXPR_IIF */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_EXPR_IIF(bit, expr) BOOST_PP_EXPR_IIF_I(bit, expr)\n# else\n#    define BOOST_PP_EXPR_IIF(bit, expr) BOOST_PP_EXPR_IIF_OO((bit, expr))\n#    define BOOST_PP_EXPR_IIF_OO(par) BOOST_PP_EXPR_IIF_I ## par\n# endif\n#\n# define BOOST_PP_EXPR_IIF_I(bit, expr) BOOST_PP_EXPR_IIF_ ## bit(expr)\n#\n# define BOOST_PP_EXPR_IIF_0(expr)\n# define BOOST_PP_EXPR_IIF_1(expr) expr\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/if.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_IF_HPP\n# define BOOST_PREPROCESSOR_CONTROL_IF_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n#\n# /* BOOST_PP_IF */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_IF(cond, t, f) BOOST_PP_IIF(BOOST_PP_BOOL(cond), t, f)\n# else\n#    define BOOST_PP_IF(cond, t, f) BOOST_PP_IF_I(cond, t, f)\n#    define BOOST_PP_IF_I(cond, t, f) BOOST_PP_IIF(BOOST_PP_BOOL(cond), t, f)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/iif.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_IIF_HPP\n# define BOOST_PREPROCESSOR_CONTROL_IIF_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_IIF(bit, t, f) BOOST_PP_IIF_I(bit, t, f)\n# else\n#    define BOOST_PP_IIF(bit, t, f) BOOST_PP_IIF_OO((bit, t, f))\n#    define BOOST_PP_IIF_OO(par) BOOST_PP_IIF_I ## par\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_IIF_I(bit, t, f) BOOST_PP_IIF_ ## bit(t, f)\n# else\n#    define BOOST_PP_IIF_I(bit, t, f) BOOST_PP_IIF_II(BOOST_PP_IIF_ ## bit(t, f))\n#    define BOOST_PP_IIF_II(id) id\n# endif\n#\n# define BOOST_PP_IIF_0(t, f) f\n# define BOOST_PP_IIF_1(t, f) t\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/control/while.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_CONTROL_WHILE_HPP\n# define BOOST_PREPROCESSOR_CONTROL_WHILE_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/list/fold_left.hpp>\n# include <boost/preprocessor/list/fold_right.hpp>\n# include <boost/preprocessor/logical/bitand.hpp>\n#\n# /* BOOST_PP_WHILE */\n#\n# if 0\n#    define BOOST_PP_WHILE(pred, op, state)\n# endif\n#\n# define BOOST_PP_WHILE BOOST_PP_CAT(BOOST_PP_WHILE_, BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256))\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_WHILE_P(n) BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_WHILE_CHECK_, BOOST_PP_WHILE_ ## n(BOOST_PP_WHILE_F, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_LIST_FOLD_LEFT_CHECK_, BOOST_PP_LIST_FOLD_LEFT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_CAT(BOOST_PP_LIST_FOLD_RIGHT_CHECK_, BOOST_PP_LIST_FOLD_RIGHT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL))))\n# else\n#    define BOOST_PP_WHILE_P(n) BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_WHILE_CHECK_, BOOST_PP_WHILE_ ## n(BOOST_PP_WHILE_F, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_CAT(BOOST_PP_LIST_FOLD_LEFT_CHECK_, BOOST_PP_LIST_FOLD_LEFT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL)))\n# endif\n#\n# define BOOST_PP_WHILE_F(d, _) 0\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    include <boost/preprocessor/control/detail/edg/while.hpp>\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    include <boost/preprocessor/control/detail/msvc/while.hpp>\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    include <boost/preprocessor/control/detail/dmc/while.hpp>\n# else\n#    include <boost/preprocessor/control/detail/while.hpp>\n# endif\n#\n# define BOOST_PP_WHILE_257(p, o, s) BOOST_PP_ERROR(0x0001)\n#\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_1(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_2(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_3(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_4(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_5(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_6(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_7(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_8(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_9(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_10(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_11(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_12(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_13(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_14(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_15(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_16(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_17(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_18(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_19(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_20(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_21(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_22(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_23(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_24(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_25(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_26(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_27(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_28(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_29(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_30(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_31(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_32(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_33(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_34(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_35(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_36(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_37(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_38(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_39(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_40(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_41(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_42(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_43(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_44(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_45(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_46(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_47(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_48(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_49(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_50(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_51(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_52(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_53(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_54(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_55(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_56(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_57(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_58(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_59(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_60(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_61(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_62(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_63(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_64(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_65(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_66(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_67(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_68(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_69(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_70(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_71(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_72(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_73(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_74(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_75(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_76(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_77(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_78(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_79(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_80(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_81(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_82(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_83(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_84(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_85(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_86(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_87(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_88(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_89(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_90(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_91(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_92(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_93(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_94(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_95(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_96(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_97(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_98(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_99(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_100(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_101(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_102(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_103(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_104(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_105(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_106(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_107(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_108(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_109(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_110(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_111(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_112(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_113(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_114(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_115(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_116(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_117(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_118(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_119(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_120(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_121(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_122(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_123(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_124(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_125(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_126(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_127(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_128(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_129(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_130(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_131(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_132(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_133(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_134(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_135(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_136(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_137(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_138(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_139(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_140(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_141(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_142(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_143(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_144(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_145(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_146(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_147(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_148(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_149(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_150(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_151(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_152(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_153(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_154(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_155(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_156(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_157(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_158(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_159(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_160(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_161(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_162(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_163(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_164(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_165(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_166(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_167(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_168(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_169(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_170(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_171(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_172(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_173(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_174(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_175(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_176(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_177(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_178(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_179(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_180(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_181(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_182(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_183(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_184(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_185(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_186(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_187(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_188(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_189(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_190(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_191(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_192(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_193(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_194(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_195(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_196(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_197(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_198(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_199(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_200(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_201(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_202(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_203(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_204(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_205(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_206(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_207(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_208(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_209(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_210(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_211(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_212(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_213(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_214(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_215(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_216(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_217(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_218(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_219(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_220(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_221(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_222(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_223(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_224(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_225(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_226(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_227(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_228(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_229(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_230(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_231(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_232(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_233(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_234(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_235(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_236(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_237(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_238(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_239(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_240(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_241(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_242(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_243(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_244(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_245(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_246(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_247(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_248(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_249(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_250(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_251(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_252(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_253(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_254(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_255(p, o, s) 0\n# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_256(p, o, s) 0\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/debug/error.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DEBUG_ERROR_HPP\n# define BOOST_PREPROCESSOR_DEBUG_ERROR_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_ERROR */\n#\n# if BOOST_PP_CONFIG_ERRORS\n#    define BOOST_PP_ERROR(code) BOOST_PP_CAT(BOOST_PP_ERROR_, code)\n# endif\n#\n# define BOOST_PP_ERROR_0x0000 BOOST_PP_ERROR(0x0000, BOOST_PP_INDEX_OUT_OF_BOUNDS)\n# define BOOST_PP_ERROR_0x0001 BOOST_PP_ERROR(0x0001, BOOST_PP_WHILE_OVERFLOW)\n# define BOOST_PP_ERROR_0x0002 BOOST_PP_ERROR(0x0002, BOOST_PP_FOR_OVERFLOW)\n# define BOOST_PP_ERROR_0x0003 BOOST_PP_ERROR(0x0003, BOOST_PP_REPEAT_OVERFLOW)\n# define BOOST_PP_ERROR_0x0004 BOOST_PP_ERROR(0x0004, BOOST_PP_LIST_FOLD_OVERFLOW)\n# define BOOST_PP_ERROR_0x0005 BOOST_PP_ERROR(0x0005, BOOST_PP_SEQ_FOLD_OVERFLOW)\n# define BOOST_PP_ERROR_0x0006 BOOST_PP_ERROR(0x0006, BOOST_PP_ARITHMETIC_OVERFLOW)\n# define BOOST_PP_ERROR_0x0007 BOOST_PP_ERROR(0x0007, BOOST_PP_DIVISION_BY_ZERO)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/dec.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DEC_HPP\n# define BOOST_PREPROCESSOR_DEC_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/auto_rec.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#     include <boost/preprocessor/detail/dmc/auto_rec.hpp>\n# else\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_AUTO_REC_HPP\n# define BOOST_PREPROCESSOR_DETAIL_AUTO_REC_HPP\n#\n# include <boost/preprocessor/control/iif.hpp>\n#\n# /* BOOST_PP_AUTO_REC */\n#\n# define BOOST_PP_AUTO_REC(pred, n) BOOST_PP_NODE_ENTRY_ ## n(pred)\n#\n# define BOOST_PP_NODE_ENTRY_256(p) BOOST_PP_NODE_128(p)(p)(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_128(p) BOOST_PP_NODE_64(p)(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_64(p) BOOST_PP_NODE_32(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_32(p) BOOST_PP_NODE_16(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_16(p) BOOST_PP_NODE_8(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_8(p) BOOST_PP_NODE_4(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_4(p) BOOST_PP_NODE_2(p)(p)\n# define BOOST_PP_NODE_ENTRY_2(p) BOOST_PP_NODE_1(p)\n#\n# define BOOST_PP_NODE_128(p) BOOST_PP_IIF(p(128), BOOST_PP_NODE_64, BOOST_PP_NODE_192)\n#    define BOOST_PP_NODE_64(p) BOOST_PP_IIF(p(64), BOOST_PP_NODE_32, BOOST_PP_NODE_96)\n#        define BOOST_PP_NODE_32(p) BOOST_PP_IIF(p(32), BOOST_PP_NODE_16, BOOST_PP_NODE_48)\n#            define BOOST_PP_NODE_16(p) BOOST_PP_IIF(p(16), BOOST_PP_NODE_8, BOOST_PP_NODE_24)\n#                define BOOST_PP_NODE_8(p) BOOST_PP_IIF(p(8), BOOST_PP_NODE_4, BOOST_PP_NODE_12)\n#                    define BOOST_PP_NODE_4(p) BOOST_PP_IIF(p(4), BOOST_PP_NODE_2, BOOST_PP_NODE_6)\n#                        define BOOST_PP_NODE_2(p) BOOST_PP_IIF(p(2), BOOST_PP_NODE_1, BOOST_PP_NODE_3)\n#                            define BOOST_PP_NODE_1(p) BOOST_PP_IIF(p(1), 1, 2)\n#                            define BOOST_PP_NODE_3(p) BOOST_PP_IIF(p(3), 3, 4)\n#                        define BOOST_PP_NODE_6(p) BOOST_PP_IIF(p(6), BOOST_PP_NODE_5, BOOST_PP_NODE_7)\n#                            define BOOST_PP_NODE_5(p) BOOST_PP_IIF(p(5), 5, 6)\n#                            define BOOST_PP_NODE_7(p) BOOST_PP_IIF(p(7), 7, 8)\n#                    define BOOST_PP_NODE_12(p) BOOST_PP_IIF(p(12), BOOST_PP_NODE_10, BOOST_PP_NODE_14)\n#                        define BOOST_PP_NODE_10(p) BOOST_PP_IIF(p(10), BOOST_PP_NODE_9, BOOST_PP_NODE_11)\n#                            define BOOST_PP_NODE_9(p) BOOST_PP_IIF(p(9), 9, 10)\n#                            define BOOST_PP_NODE_11(p) BOOST_PP_IIF(p(11), 11, 12)\n#                        define BOOST_PP_NODE_14(p) BOOST_PP_IIF(p(14), BOOST_PP_NODE_13, BOOST_PP_NODE_15)\n#                            define BOOST_PP_NODE_13(p) BOOST_PP_IIF(p(13), 13, 14)\n#                            define BOOST_PP_NODE_15(p) BOOST_PP_IIF(p(15), 15, 16)\n#                define BOOST_PP_NODE_24(p) BOOST_PP_IIF(p(24), BOOST_PP_NODE_20, BOOST_PP_NODE_28)\n#                    define BOOST_PP_NODE_20(p) BOOST_PP_IIF(p(20), BOOST_PP_NODE_18, BOOST_PP_NODE_22)\n#                        define BOOST_PP_NODE_18(p) BOOST_PP_IIF(p(18), BOOST_PP_NODE_17, BOOST_PP_NODE_19)\n#                            define BOOST_PP_NODE_17(p) BOOST_PP_IIF(p(17), 17, 18)\n#                            define BOOST_PP_NODE_19(p) BOOST_PP_IIF(p(19), 19, 20)\n#                        define BOOST_PP_NODE_22(p) BOOST_PP_IIF(p(22), BOOST_PP_NODE_21, BOOST_PP_NODE_23)\n#                            define BOOST_PP_NODE_21(p) BOOST_PP_IIF(p(21), 21, 22)\n#                            define BOOST_PP_NODE_23(p) BOOST_PP_IIF(p(23), 23, 24)\n#                    define BOOST_PP_NODE_28(p) BOOST_PP_IIF(p(28), BOOST_PP_NODE_26, BOOST_PP_NODE_30)\n#                        define BOOST_PP_NODE_26(p) BOOST_PP_IIF(p(26), BOOST_PP_NODE_25, BOOST_PP_NODE_27)\n#                            define BOOST_PP_NODE_25(p) BOOST_PP_IIF(p(25), 25, 26)\n#                            define BOOST_PP_NODE_27(p) BOOST_PP_IIF(p(27), 27, 28)\n#                        define BOOST_PP_NODE_30(p) BOOST_PP_IIF(p(30), BOOST_PP_NODE_29, BOOST_PP_NODE_31)\n#                            define BOOST_PP_NODE_29(p) BOOST_PP_IIF(p(29), 29, 30)\n#                            define BOOST_PP_NODE_31(p) BOOST_PP_IIF(p(31), 31, 32)\n#            define BOOST_PP_NODE_48(p) BOOST_PP_IIF(p(48), BOOST_PP_NODE_40, BOOST_PP_NODE_56)\n#                define BOOST_PP_NODE_40(p) BOOST_PP_IIF(p(40), BOOST_PP_NODE_36, BOOST_PP_NODE_44)\n#                    define BOOST_PP_NODE_36(p) BOOST_PP_IIF(p(36), BOOST_PP_NODE_34, BOOST_PP_NODE_38)\n#                        define BOOST_PP_NODE_34(p) BOOST_PP_IIF(p(34), BOOST_PP_NODE_33, BOOST_PP_NODE_35)\n#                            define BOOST_PP_NODE_33(p) BOOST_PP_IIF(p(33), 33, 34)\n#                            define BOOST_PP_NODE_35(p) BOOST_PP_IIF(p(35), 35, 36)\n#                        define BOOST_PP_NODE_38(p) BOOST_PP_IIF(p(38), BOOST_PP_NODE_37, BOOST_PP_NODE_39)\n#                            define BOOST_PP_NODE_37(p) BOOST_PP_IIF(p(37), 37, 38)\n#                            define BOOST_PP_NODE_39(p) BOOST_PP_IIF(p(39), 39, 40)\n#                    define BOOST_PP_NODE_44(p) BOOST_PP_IIF(p(44), BOOST_PP_NODE_42, BOOST_PP_NODE_46)\n#                        define BOOST_PP_NODE_42(p) BOOST_PP_IIF(p(42), BOOST_PP_NODE_41, BOOST_PP_NODE_43)\n#                            define BOOST_PP_NODE_41(p) BOOST_PP_IIF(p(41), 41, 42)\n#                            define BOOST_PP_NODE_43(p) BOOST_PP_IIF(p(43), 43, 44)\n#                        define BOOST_PP_NODE_46(p) BOOST_PP_IIF(p(46), BOOST_PP_NODE_45, BOOST_PP_NODE_47)\n#                            define BOOST_PP_NODE_45(p) BOOST_PP_IIF(p(45), 45, 46)\n#                            define BOOST_PP_NODE_47(p) BOOST_PP_IIF(p(47), 47, 48)\n#                define BOOST_PP_NODE_56(p) BOOST_PP_IIF(p(56), BOOST_PP_NODE_52, BOOST_PP_NODE_60)\n#                    define BOOST_PP_NODE_52(p) BOOST_PP_IIF(p(52), BOOST_PP_NODE_50, BOOST_PP_NODE_54)\n#                        define BOOST_PP_NODE_50(p) BOOST_PP_IIF(p(50), BOOST_PP_NODE_49, BOOST_PP_NODE_51)\n#                            define BOOST_PP_NODE_49(p) BOOST_PP_IIF(p(49), 49, 50)\n#                            define BOOST_PP_NODE_51(p) BOOST_PP_IIF(p(51), 51, 52)\n#                        define BOOST_PP_NODE_54(p) BOOST_PP_IIF(p(54), BOOST_PP_NODE_53, BOOST_PP_NODE_55)\n#                            define BOOST_PP_NODE_53(p) BOOST_PP_IIF(p(53), 53, 54)\n#                            define BOOST_PP_NODE_55(p) BOOST_PP_IIF(p(55), 55, 56)\n#                    define BOOST_PP_NODE_60(p) BOOST_PP_IIF(p(60), BOOST_PP_NODE_58, BOOST_PP_NODE_62)\n#                        define BOOST_PP_NODE_58(p) BOOST_PP_IIF(p(58), BOOST_PP_NODE_57, BOOST_PP_NODE_59)\n#                            define BOOST_PP_NODE_57(p) BOOST_PP_IIF(p(57), 57, 58)\n#                            define BOOST_PP_NODE_59(p) BOOST_PP_IIF(p(59), 59, 60)\n#                        define BOOST_PP_NODE_62(p) BOOST_PP_IIF(p(62), BOOST_PP_NODE_61, BOOST_PP_NODE_63)\n#                            define BOOST_PP_NODE_61(p) BOOST_PP_IIF(p(61), 61, 62)\n#                            define BOOST_PP_NODE_63(p) BOOST_PP_IIF(p(63), 63, 64)\n#        define BOOST_PP_NODE_96(p) BOOST_PP_IIF(p(96), BOOST_PP_NODE_80, BOOST_PP_NODE_112)\n#            define BOOST_PP_NODE_80(p) BOOST_PP_IIF(p(80), BOOST_PP_NODE_72, BOOST_PP_NODE_88)\n#                define BOOST_PP_NODE_72(p) BOOST_PP_IIF(p(72), BOOST_PP_NODE_68, BOOST_PP_NODE_76)\n#                    define BOOST_PP_NODE_68(p) BOOST_PP_IIF(p(68), BOOST_PP_NODE_66, BOOST_PP_NODE_70)\n#                        define BOOST_PP_NODE_66(p) BOOST_PP_IIF(p(66), BOOST_PP_NODE_65, BOOST_PP_NODE_67)\n#                            define BOOST_PP_NODE_65(p) BOOST_PP_IIF(p(65), 65, 66)\n#                            define BOOST_PP_NODE_67(p) BOOST_PP_IIF(p(67), 67, 68)\n#                        define BOOST_PP_NODE_70(p) BOOST_PP_IIF(p(70), BOOST_PP_NODE_69, BOOST_PP_NODE_71)\n#                            define BOOST_PP_NODE_69(p) BOOST_PP_IIF(p(69), 69, 70)\n#                            define BOOST_PP_NODE_71(p) BOOST_PP_IIF(p(71), 71, 72)\n#                    define BOOST_PP_NODE_76(p) BOOST_PP_IIF(p(76), BOOST_PP_NODE_74, BOOST_PP_NODE_78)\n#                        define BOOST_PP_NODE_74(p) BOOST_PP_IIF(p(74), BOOST_PP_NODE_73, BOOST_PP_NODE_75)\n#                            define BOOST_PP_NODE_73(p) BOOST_PP_IIF(p(73), 73, 74)\n#                            define BOOST_PP_NODE_75(p) BOOST_PP_IIF(p(75), 75, 76)\n#                        define BOOST_PP_NODE_78(p) BOOST_PP_IIF(p(78), BOOST_PP_NODE_77, BOOST_PP_NODE_79)\n#                            define BOOST_PP_NODE_77(p) BOOST_PP_IIF(p(77), 77, 78)\n#                            define BOOST_PP_NODE_79(p) BOOST_PP_IIF(p(79), 79, 80)\n#                define BOOST_PP_NODE_88(p) BOOST_PP_IIF(p(88), BOOST_PP_NODE_84, BOOST_PP_NODE_92)\n#                    define BOOST_PP_NODE_84(p) BOOST_PP_IIF(p(84), BOOST_PP_NODE_82, BOOST_PP_NODE_86)\n#                        define BOOST_PP_NODE_82(p) BOOST_PP_IIF(p(82), BOOST_PP_NODE_81, BOOST_PP_NODE_83)\n#                            define BOOST_PP_NODE_81(p) BOOST_PP_IIF(p(81), 81, 82)\n#                            define BOOST_PP_NODE_83(p) BOOST_PP_IIF(p(83), 83, 84)\n#                        define BOOST_PP_NODE_86(p) BOOST_PP_IIF(p(86), BOOST_PP_NODE_85, BOOST_PP_NODE_87)\n#                            define BOOST_PP_NODE_85(p) BOOST_PP_IIF(p(85), 85, 86)\n#                            define BOOST_PP_NODE_87(p) BOOST_PP_IIF(p(87), 87, 88)\n#                    define BOOST_PP_NODE_92(p) BOOST_PP_IIF(p(92), BOOST_PP_NODE_90, BOOST_PP_NODE_94)\n#                        define BOOST_PP_NODE_90(p) BOOST_PP_IIF(p(90), BOOST_PP_NODE_89, BOOST_PP_NODE_91)\n#                            define BOOST_PP_NODE_89(p) BOOST_PP_IIF(p(89), 89, 90)\n#                            define BOOST_PP_NODE_91(p) BOOST_PP_IIF(p(91), 91, 92)\n#                        define BOOST_PP_NODE_94(p) BOOST_PP_IIF(p(94), BOOST_PP_NODE_93, BOOST_PP_NODE_95)\n#                            define BOOST_PP_NODE_93(p) BOOST_PP_IIF(p(93), 93, 94)\n#                            define BOOST_PP_NODE_95(p) BOOST_PP_IIF(p(95), 95, 96)\n#            define BOOST_PP_NODE_112(p) BOOST_PP_IIF(p(112), BOOST_PP_NODE_104, BOOST_PP_NODE_120)\n#                define BOOST_PP_NODE_104(p) BOOST_PP_IIF(p(104), BOOST_PP_NODE_100, BOOST_PP_NODE_108)\n#                    define BOOST_PP_NODE_100(p) BOOST_PP_IIF(p(100), BOOST_PP_NODE_98, BOOST_PP_NODE_102)\n#                        define BOOST_PP_NODE_98(p) BOOST_PP_IIF(p(98), BOOST_PP_NODE_97, BOOST_PP_NODE_99)\n#                            define BOOST_PP_NODE_97(p) BOOST_PP_IIF(p(97), 97, 98)\n#                            define BOOST_PP_NODE_99(p) BOOST_PP_IIF(p(99), 99, 100)\n#                        define BOOST_PP_NODE_102(p) BOOST_PP_IIF(p(102), BOOST_PP_NODE_101, BOOST_PP_NODE_103)\n#                            define BOOST_PP_NODE_101(p) BOOST_PP_IIF(p(101), 101, 102)\n#                            define BOOST_PP_NODE_103(p) BOOST_PP_IIF(p(103), 103, 104)\n#                    define BOOST_PP_NODE_108(p) BOOST_PP_IIF(p(108), BOOST_PP_NODE_106, BOOST_PP_NODE_110)\n#                        define BOOST_PP_NODE_106(p) BOOST_PP_IIF(p(106), BOOST_PP_NODE_105, BOOST_PP_NODE_107)\n#                            define BOOST_PP_NODE_105(p) BOOST_PP_IIF(p(105), 105, 106)\n#                            define BOOST_PP_NODE_107(p) BOOST_PP_IIF(p(107), 107, 108)\n#                        define BOOST_PP_NODE_110(p) BOOST_PP_IIF(p(110), BOOST_PP_NODE_109, BOOST_PP_NODE_111)\n#                            define BOOST_PP_NODE_109(p) BOOST_PP_IIF(p(109), 109, 110)\n#                            define BOOST_PP_NODE_111(p) BOOST_PP_IIF(p(111), 111, 112)\n#                define BOOST_PP_NODE_120(p) BOOST_PP_IIF(p(120), BOOST_PP_NODE_116, BOOST_PP_NODE_124)\n#                    define BOOST_PP_NODE_116(p) BOOST_PP_IIF(p(116), BOOST_PP_NODE_114, BOOST_PP_NODE_118)\n#                        define BOOST_PP_NODE_114(p) BOOST_PP_IIF(p(114), BOOST_PP_NODE_113, BOOST_PP_NODE_115)\n#                            define BOOST_PP_NODE_113(p) BOOST_PP_IIF(p(113), 113, 114)\n#                            define BOOST_PP_NODE_115(p) BOOST_PP_IIF(p(115), 115, 116)\n#                        define BOOST_PP_NODE_118(p) BOOST_PP_IIF(p(118), BOOST_PP_NODE_117, BOOST_PP_NODE_119)\n#                            define BOOST_PP_NODE_117(p) BOOST_PP_IIF(p(117), 117, 118)\n#                            define BOOST_PP_NODE_119(p) BOOST_PP_IIF(p(119), 119, 120)\n#                    define BOOST_PP_NODE_124(p) BOOST_PP_IIF(p(124), BOOST_PP_NODE_122, BOOST_PP_NODE_126)\n#                        define BOOST_PP_NODE_122(p) BOOST_PP_IIF(p(122), BOOST_PP_NODE_121, BOOST_PP_NODE_123)\n#                            define BOOST_PP_NODE_121(p) BOOST_PP_IIF(p(121), 121, 122)\n#                            define BOOST_PP_NODE_123(p) BOOST_PP_IIF(p(123), 123, 124)\n#                        define BOOST_PP_NODE_126(p) BOOST_PP_IIF(p(126), BOOST_PP_NODE_125, BOOST_PP_NODE_127)\n#                            define BOOST_PP_NODE_125(p) BOOST_PP_IIF(p(125), 125, 126)\n#                            define BOOST_PP_NODE_127(p) BOOST_PP_IIF(p(127), 127, 128)\n#    define BOOST_PP_NODE_192(p) BOOST_PP_IIF(p(192), BOOST_PP_NODE_160, BOOST_PP_NODE_224)\n#        define BOOST_PP_NODE_160(p) BOOST_PP_IIF(p(160), BOOST_PP_NODE_144, BOOST_PP_NODE_176)\n#            define BOOST_PP_NODE_144(p) BOOST_PP_IIF(p(144), BOOST_PP_NODE_136, BOOST_PP_NODE_152)\n#                define BOOST_PP_NODE_136(p) BOOST_PP_IIF(p(136), BOOST_PP_NODE_132, BOOST_PP_NODE_140)\n#                    define BOOST_PP_NODE_132(p) BOOST_PP_IIF(p(132), BOOST_PP_NODE_130, BOOST_PP_NODE_134)\n#                        define BOOST_PP_NODE_130(p) BOOST_PP_IIF(p(130), BOOST_PP_NODE_129, BOOST_PP_NODE_131)\n#                            define BOOST_PP_NODE_129(p) BOOST_PP_IIF(p(129), 129, 130)\n#                            define BOOST_PP_NODE_131(p) BOOST_PP_IIF(p(131), 131, 132)\n#                        define BOOST_PP_NODE_134(p) BOOST_PP_IIF(p(134), BOOST_PP_NODE_133, BOOST_PP_NODE_135)\n#                            define BOOST_PP_NODE_133(p) BOOST_PP_IIF(p(133), 133, 134)\n#                            define BOOST_PP_NODE_135(p) BOOST_PP_IIF(p(135), 135, 136)\n#                    define BOOST_PP_NODE_140(p) BOOST_PP_IIF(p(140), BOOST_PP_NODE_138, BOOST_PP_NODE_142)\n#                        define BOOST_PP_NODE_138(p) BOOST_PP_IIF(p(138), BOOST_PP_NODE_137, BOOST_PP_NODE_139)\n#                            define BOOST_PP_NODE_137(p) BOOST_PP_IIF(p(137), 137, 138)\n#                            define BOOST_PP_NODE_139(p) BOOST_PP_IIF(p(139), 139, 140)\n#                        define BOOST_PP_NODE_142(p) BOOST_PP_IIF(p(142), BOOST_PP_NODE_141, BOOST_PP_NODE_143)\n#                            define BOOST_PP_NODE_141(p) BOOST_PP_IIF(p(141), 141, 142)\n#                            define BOOST_PP_NODE_143(p) BOOST_PP_IIF(p(143), 143, 144)\n#                define BOOST_PP_NODE_152(p) BOOST_PP_IIF(p(152), BOOST_PP_NODE_148, BOOST_PP_NODE_156)\n#                    define BOOST_PP_NODE_148(p) BOOST_PP_IIF(p(148), BOOST_PP_NODE_146, BOOST_PP_NODE_150)\n#                        define BOOST_PP_NODE_146(p) BOOST_PP_IIF(p(146), BOOST_PP_NODE_145, BOOST_PP_NODE_147)\n#                            define BOOST_PP_NODE_145(p) BOOST_PP_IIF(p(145), 145, 146)\n#                            define BOOST_PP_NODE_147(p) BOOST_PP_IIF(p(147), 147, 148)\n#                        define BOOST_PP_NODE_150(p) BOOST_PP_IIF(p(150), BOOST_PP_NODE_149, BOOST_PP_NODE_151)\n#                            define BOOST_PP_NODE_149(p) BOOST_PP_IIF(p(149), 149, 150)\n#                            define BOOST_PP_NODE_151(p) BOOST_PP_IIF(p(151), 151, 152)\n#                    define BOOST_PP_NODE_156(p) BOOST_PP_IIF(p(156), BOOST_PP_NODE_154, BOOST_PP_NODE_158)\n#                        define BOOST_PP_NODE_154(p) BOOST_PP_IIF(p(154), BOOST_PP_NODE_153, BOOST_PP_NODE_155)\n#                            define BOOST_PP_NODE_153(p) BOOST_PP_IIF(p(153), 153, 154)\n#                            define BOOST_PP_NODE_155(p) BOOST_PP_IIF(p(155), 155, 156)\n#                        define BOOST_PP_NODE_158(p) BOOST_PP_IIF(p(158), BOOST_PP_NODE_157, BOOST_PP_NODE_159)\n#                            define BOOST_PP_NODE_157(p) BOOST_PP_IIF(p(157), 157, 158)\n#                            define BOOST_PP_NODE_159(p) BOOST_PP_IIF(p(159), 159, 160)\n#            define BOOST_PP_NODE_176(p) BOOST_PP_IIF(p(176), BOOST_PP_NODE_168, BOOST_PP_NODE_184)\n#                define BOOST_PP_NODE_168(p) BOOST_PP_IIF(p(168), BOOST_PP_NODE_164, BOOST_PP_NODE_172)\n#                    define BOOST_PP_NODE_164(p) BOOST_PP_IIF(p(164), BOOST_PP_NODE_162, BOOST_PP_NODE_166)\n#                        define BOOST_PP_NODE_162(p) BOOST_PP_IIF(p(162), BOOST_PP_NODE_161, BOOST_PP_NODE_163)\n#                            define BOOST_PP_NODE_161(p) BOOST_PP_IIF(p(161), 161, 162)\n#                            define BOOST_PP_NODE_163(p) BOOST_PP_IIF(p(163), 163, 164)\n#                        define BOOST_PP_NODE_166(p) BOOST_PP_IIF(p(166), BOOST_PP_NODE_165, BOOST_PP_NODE_167)\n#                            define BOOST_PP_NODE_165(p) BOOST_PP_IIF(p(165), 165, 166)\n#                            define BOOST_PP_NODE_167(p) BOOST_PP_IIF(p(167), 167, 168)\n#                    define BOOST_PP_NODE_172(p) BOOST_PP_IIF(p(172), BOOST_PP_NODE_170, BOOST_PP_NODE_174)\n#                        define BOOST_PP_NODE_170(p) BOOST_PP_IIF(p(170), BOOST_PP_NODE_169, BOOST_PP_NODE_171)\n#                            define BOOST_PP_NODE_169(p) BOOST_PP_IIF(p(169), 169, 170)\n#                            define BOOST_PP_NODE_171(p) BOOST_PP_IIF(p(171), 171, 172)\n#                        define BOOST_PP_NODE_174(p) BOOST_PP_IIF(p(174), BOOST_PP_NODE_173, BOOST_PP_NODE_175)\n#                            define BOOST_PP_NODE_173(p) BOOST_PP_IIF(p(173), 173, 174)\n#                            define BOOST_PP_NODE_175(p) BOOST_PP_IIF(p(175), 175, 176)\n#                define BOOST_PP_NODE_184(p) BOOST_PP_IIF(p(184), BOOST_PP_NODE_180, BOOST_PP_NODE_188)\n#                    define BOOST_PP_NODE_180(p) BOOST_PP_IIF(p(180), BOOST_PP_NODE_178, BOOST_PP_NODE_182)\n#                        define BOOST_PP_NODE_178(p) BOOST_PP_IIF(p(178), BOOST_PP_NODE_177, BOOST_PP_NODE_179)\n#                            define BOOST_PP_NODE_177(p) BOOST_PP_IIF(p(177), 177, 178)\n#                            define BOOST_PP_NODE_179(p) BOOST_PP_IIF(p(179), 179, 180)\n#                        define BOOST_PP_NODE_182(p) BOOST_PP_IIF(p(182), BOOST_PP_NODE_181, BOOST_PP_NODE_183)\n#                            define BOOST_PP_NODE_181(p) BOOST_PP_IIF(p(181), 181, 182)\n#                            define BOOST_PP_NODE_183(p) BOOST_PP_IIF(p(183), 183, 184)\n#                    define BOOST_PP_NODE_188(p) BOOST_PP_IIF(p(188), BOOST_PP_NODE_186, BOOST_PP_NODE_190)\n#                        define BOOST_PP_NODE_186(p) BOOST_PP_IIF(p(186), BOOST_PP_NODE_185, BOOST_PP_NODE_187)\n#                            define BOOST_PP_NODE_185(p) BOOST_PP_IIF(p(185), 185, 186)\n#                            define BOOST_PP_NODE_187(p) BOOST_PP_IIF(p(187), 187, 188)\n#                        define BOOST_PP_NODE_190(p) BOOST_PP_IIF(p(190), BOOST_PP_NODE_189, BOOST_PP_NODE_191)\n#                            define BOOST_PP_NODE_189(p) BOOST_PP_IIF(p(189), 189, 190)\n#                            define BOOST_PP_NODE_191(p) BOOST_PP_IIF(p(191), 191, 192)\n#        define BOOST_PP_NODE_224(p) BOOST_PP_IIF(p(224), BOOST_PP_NODE_208, BOOST_PP_NODE_240)\n#            define BOOST_PP_NODE_208(p) BOOST_PP_IIF(p(208), BOOST_PP_NODE_200, BOOST_PP_NODE_216)\n#                define BOOST_PP_NODE_200(p) BOOST_PP_IIF(p(200), BOOST_PP_NODE_196, BOOST_PP_NODE_204)\n#                    define BOOST_PP_NODE_196(p) BOOST_PP_IIF(p(196), BOOST_PP_NODE_194, BOOST_PP_NODE_198)\n#                        define BOOST_PP_NODE_194(p) BOOST_PP_IIF(p(194), BOOST_PP_NODE_193, BOOST_PP_NODE_195)\n#                            define BOOST_PP_NODE_193(p) BOOST_PP_IIF(p(193), 193, 194)\n#                            define BOOST_PP_NODE_195(p) BOOST_PP_IIF(p(195), 195, 196)\n#                        define BOOST_PP_NODE_198(p) BOOST_PP_IIF(p(198), BOOST_PP_NODE_197, BOOST_PP_NODE_199)\n#                            define BOOST_PP_NODE_197(p) BOOST_PP_IIF(p(197), 197, 198)\n#                            define BOOST_PP_NODE_199(p) BOOST_PP_IIF(p(199), 199, 200)\n#                    define BOOST_PP_NODE_204(p) BOOST_PP_IIF(p(204), BOOST_PP_NODE_202, BOOST_PP_NODE_206)\n#                        define BOOST_PP_NODE_202(p) BOOST_PP_IIF(p(202), BOOST_PP_NODE_201, BOOST_PP_NODE_203)\n#                            define BOOST_PP_NODE_201(p) BOOST_PP_IIF(p(201), 201, 202)\n#                            define BOOST_PP_NODE_203(p) BOOST_PP_IIF(p(203), 203, 204)\n#                        define BOOST_PP_NODE_206(p) BOOST_PP_IIF(p(206), BOOST_PP_NODE_205, BOOST_PP_NODE_207)\n#                            define BOOST_PP_NODE_205(p) BOOST_PP_IIF(p(205), 205, 206)\n#                            define BOOST_PP_NODE_207(p) BOOST_PP_IIF(p(207), 207, 208)\n#                define BOOST_PP_NODE_216(p) BOOST_PP_IIF(p(216), BOOST_PP_NODE_212, BOOST_PP_NODE_220)\n#                    define BOOST_PP_NODE_212(p) BOOST_PP_IIF(p(212), BOOST_PP_NODE_210, BOOST_PP_NODE_214)\n#                        define BOOST_PP_NODE_210(p) BOOST_PP_IIF(p(210), BOOST_PP_NODE_209, BOOST_PP_NODE_211)\n#                            define BOOST_PP_NODE_209(p) BOOST_PP_IIF(p(209), 209, 210)\n#                            define BOOST_PP_NODE_211(p) BOOST_PP_IIF(p(211), 211, 212)\n#                        define BOOST_PP_NODE_214(p) BOOST_PP_IIF(p(214), BOOST_PP_NODE_213, BOOST_PP_NODE_215)\n#                            define BOOST_PP_NODE_213(p) BOOST_PP_IIF(p(213), 213, 214)\n#                            define BOOST_PP_NODE_215(p) BOOST_PP_IIF(p(215), 215, 216)\n#                    define BOOST_PP_NODE_220(p) BOOST_PP_IIF(p(220), BOOST_PP_NODE_218, BOOST_PP_NODE_222)\n#                        define BOOST_PP_NODE_218(p) BOOST_PP_IIF(p(218), BOOST_PP_NODE_217, BOOST_PP_NODE_219)\n#                            define BOOST_PP_NODE_217(p) BOOST_PP_IIF(p(217), 217, 218)\n#                            define BOOST_PP_NODE_219(p) BOOST_PP_IIF(p(219), 219, 220)\n#                        define BOOST_PP_NODE_222(p) BOOST_PP_IIF(p(222), BOOST_PP_NODE_221, BOOST_PP_NODE_223)\n#                            define BOOST_PP_NODE_221(p) BOOST_PP_IIF(p(221), 221, 222)\n#                            define BOOST_PP_NODE_223(p) BOOST_PP_IIF(p(223), 223, 224)\n#            define BOOST_PP_NODE_240(p) BOOST_PP_IIF(p(240), BOOST_PP_NODE_232, BOOST_PP_NODE_248)\n#                define BOOST_PP_NODE_232(p) BOOST_PP_IIF(p(232), BOOST_PP_NODE_228, BOOST_PP_NODE_236)\n#                    define BOOST_PP_NODE_228(p) BOOST_PP_IIF(p(228), BOOST_PP_NODE_226, BOOST_PP_NODE_230)\n#                        define BOOST_PP_NODE_226(p) BOOST_PP_IIF(p(226), BOOST_PP_NODE_225, BOOST_PP_NODE_227)\n#                            define BOOST_PP_NODE_225(p) BOOST_PP_IIF(p(225), 225, 226)\n#                            define BOOST_PP_NODE_227(p) BOOST_PP_IIF(p(227), 227, 228)\n#                        define BOOST_PP_NODE_230(p) BOOST_PP_IIF(p(230), BOOST_PP_NODE_229, BOOST_PP_NODE_231)\n#                            define BOOST_PP_NODE_229(p) BOOST_PP_IIF(p(229), 229, 230)\n#                            define BOOST_PP_NODE_231(p) BOOST_PP_IIF(p(231), 231, 232)\n#                    define BOOST_PP_NODE_236(p) BOOST_PP_IIF(p(236), BOOST_PP_NODE_234, BOOST_PP_NODE_238)\n#                        define BOOST_PP_NODE_234(p) BOOST_PP_IIF(p(234), BOOST_PP_NODE_233, BOOST_PP_NODE_235)\n#                            define BOOST_PP_NODE_233(p) BOOST_PP_IIF(p(233), 233, 234)\n#                            define BOOST_PP_NODE_235(p) BOOST_PP_IIF(p(235), 235, 236)\n#                        define BOOST_PP_NODE_238(p) BOOST_PP_IIF(p(238), BOOST_PP_NODE_237, BOOST_PP_NODE_239)\n#                            define BOOST_PP_NODE_237(p) BOOST_PP_IIF(p(237), 237, 238)\n#                            define BOOST_PP_NODE_239(p) BOOST_PP_IIF(p(239), 239, 240)\n#                define BOOST_PP_NODE_248(p) BOOST_PP_IIF(p(248), BOOST_PP_NODE_244, BOOST_PP_NODE_252)\n#                    define BOOST_PP_NODE_244(p) BOOST_PP_IIF(p(244), BOOST_PP_NODE_242, BOOST_PP_NODE_246)\n#                        define BOOST_PP_NODE_242(p) BOOST_PP_IIF(p(242), BOOST_PP_NODE_241, BOOST_PP_NODE_243)\n#                            define BOOST_PP_NODE_241(p) BOOST_PP_IIF(p(241), 241, 242)\n#                            define BOOST_PP_NODE_243(p) BOOST_PP_IIF(p(243), 243, 244)\n#                        define BOOST_PP_NODE_246(p) BOOST_PP_IIF(p(246), BOOST_PP_NODE_245, BOOST_PP_NODE_247)\n#                            define BOOST_PP_NODE_245(p) BOOST_PP_IIF(p(245), 245, 246)\n#                            define BOOST_PP_NODE_247(p) BOOST_PP_IIF(p(247), 247, 248)\n#                    define BOOST_PP_NODE_252(p) BOOST_PP_IIF(p(252), BOOST_PP_NODE_250, BOOST_PP_NODE_254)\n#                        define BOOST_PP_NODE_250(p) BOOST_PP_IIF(p(250), BOOST_PP_NODE_249, BOOST_PP_NODE_251)\n#                            define BOOST_PP_NODE_249(p) BOOST_PP_IIF(p(249), 249, 250)\n#                            define BOOST_PP_NODE_251(p) BOOST_PP_IIF(p(251), 251, 252)\n#                        define BOOST_PP_NODE_254(p) BOOST_PP_IIF(p(254), BOOST_PP_NODE_253, BOOST_PP_NODE_255)\n#                            define BOOST_PP_NODE_253(p) BOOST_PP_IIF(p(253), 253, 254)\n#                            define BOOST_PP_NODE_255(p) BOOST_PP_IIF(p(255), 255, 256)\n#\n# endif\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/check.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_CHECK_HPP\n# define BOOST_PREPROCESSOR_DETAIL_CHECK_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_CHECK */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_CHECK(x, type) BOOST_PP_CHECK_D(x, type)\n# else\n#    define BOOST_PP_CHECK(x, type) BOOST_PP_CHECK_OO((x, type))\n#    define BOOST_PP_CHECK_OO(par) BOOST_PP_CHECK_D ## par\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC() && ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    define BOOST_PP_CHECK_D(x, type) BOOST_PP_CHECK_1(BOOST_PP_CAT(BOOST_PP_CHECK_RESULT_, type x))\n#    define BOOST_PP_CHECK_1(chk) BOOST_PP_CHECK_2(chk)\n#    define BOOST_PP_CHECK_2(res, _) res\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_CHECK_D(x, type) BOOST_PP_CHECK_1(type x)\n#    define BOOST_PP_CHECK_1(chk) BOOST_PP_CHECK_2(chk)\n#    define BOOST_PP_CHECK_2(chk) BOOST_PP_CHECK_3((BOOST_PP_CHECK_RESULT_ ## chk))\n#    define BOOST_PP_CHECK_3(im) BOOST_PP_CHECK_5(BOOST_PP_CHECK_4 im)\n#    define BOOST_PP_CHECK_4(res, _) res\n#    define BOOST_PP_CHECK_5(res) res\n# else /* DMC */\n#    define BOOST_PP_CHECK_D(x, type) BOOST_PP_CHECK_OO((type x))\n#    define BOOST_PP_CHECK_OO(par) BOOST_PP_CHECK_0 ## par\n#    define BOOST_PP_CHECK_0(chk) BOOST_PP_CHECK_1(BOOST_PP_CAT(BOOST_PP_CHECK_RESULT_, chk))\n#    define BOOST_PP_CHECK_1(chk) BOOST_PP_CHECK_2(chk)\n#    define BOOST_PP_CHECK_2(res, _) res\n# endif\n#\n# define BOOST_PP_CHECK_RESULT_1 1, BOOST_PP_NIL\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/dmc/auto_rec.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_AUTO_REC_HPP\n# define BOOST_PREPROCESSOR_DETAIL_AUTO_REC_HPP\n#\n# include <boost/preprocessor/control/iif.hpp>\n#\n# /* BOOST_PP_AUTO_REC */\n#\n# define BOOST_PP_AUTO_REC(pred, n) BOOST_PP_NODE_ENTRY_ ## n(pred)\n#\n# define BOOST_PP_NODE_ENTRY_256(p) BOOST_PP_NODE_128(p)(p)(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_128(p) BOOST_PP_NODE_64(p)(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_64(p) BOOST_PP_NODE_32(p)(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_32(p) BOOST_PP_NODE_16(p)(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_16(p) BOOST_PP_NODE_8(p)(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_8(p) BOOST_PP_NODE_4(p)(p)(p)\n# define BOOST_PP_NODE_ENTRY_4(p) BOOST_PP_NODE_2(p)(p)\n# define BOOST_PP_NODE_ENTRY_2(p) BOOST_PP_NODE_1(p)\n#\n# define BOOST_PP_NODE_128(p) BOOST_PP_IIF(p##(128), BOOST_PP_NODE_64, BOOST_PP_NODE_192)\n#    define BOOST_PP_NODE_64(p) BOOST_PP_IIF(p##(64), BOOST_PP_NODE_32, BOOST_PP_NODE_96)\n#        define BOOST_PP_NODE_32(p) BOOST_PP_IIF(p##(32), BOOST_PP_NODE_16, BOOST_PP_NODE_48)\n#            define BOOST_PP_NODE_16(p) BOOST_PP_IIF(p##(16), BOOST_PP_NODE_8, BOOST_PP_NODE_24)\n#                define BOOST_PP_NODE_8(p) BOOST_PP_IIF(p##(8), BOOST_PP_NODE_4, BOOST_PP_NODE_12)\n#                    define BOOST_PP_NODE_4(p) BOOST_PP_IIF(p##(4), BOOST_PP_NODE_2, BOOST_PP_NODE_6)\n#                        define BOOST_PP_NODE_2(p) BOOST_PP_IIF(p##(2), BOOST_PP_NODE_1, BOOST_PP_NODE_3)\n#                            define BOOST_PP_NODE_1(p) BOOST_PP_IIF(p##(1), 1, 2)\n#                            define BOOST_PP_NODE_3(p) BOOST_PP_IIF(p##(3), 3, 4)\n#                        define BOOST_PP_NODE_6(p) BOOST_PP_IIF(p##(6), BOOST_PP_NODE_5, BOOST_PP_NODE_7)\n#                            define BOOST_PP_NODE_5(p) BOOST_PP_IIF(p##(5), 5, 6)\n#                            define BOOST_PP_NODE_7(p) BOOST_PP_IIF(p##(7), 7, 8)\n#                    define BOOST_PP_NODE_12(p) BOOST_PP_IIF(p##(12), BOOST_PP_NODE_10, BOOST_PP_NODE_14)\n#                        define BOOST_PP_NODE_10(p) BOOST_PP_IIF(p##(10), BOOST_PP_NODE_9, BOOST_PP_NODE_11)\n#                            define BOOST_PP_NODE_9(p) BOOST_PP_IIF(p##(9), 9, 10)\n#                            define BOOST_PP_NODE_11(p) BOOST_PP_IIF(p##(11), 11, 12)\n#                        define BOOST_PP_NODE_14(p) BOOST_PP_IIF(p##(14), BOOST_PP_NODE_13, BOOST_PP_NODE_15)\n#                            define BOOST_PP_NODE_13(p) BOOST_PP_IIF(p##(13), 13, 14)\n#                            define BOOST_PP_NODE_15(p) BOOST_PP_IIF(p##(15), 15, 16)\n#                define BOOST_PP_NODE_24(p) BOOST_PP_IIF(p##(24), BOOST_PP_NODE_20, BOOST_PP_NODE_28)\n#                    define BOOST_PP_NODE_20(p) BOOST_PP_IIF(p##(20), BOOST_PP_NODE_18, BOOST_PP_NODE_22)\n#                        define BOOST_PP_NODE_18(p) BOOST_PP_IIF(p##(18), BOOST_PP_NODE_17, BOOST_PP_NODE_19)\n#                            define BOOST_PP_NODE_17(p) BOOST_PP_IIF(p##(17), 17, 18)\n#                            define BOOST_PP_NODE_19(p) BOOST_PP_IIF(p##(19), 19, 20)\n#                        define BOOST_PP_NODE_22(p) BOOST_PP_IIF(p##(22), BOOST_PP_NODE_21, BOOST_PP_NODE_23)\n#                            define BOOST_PP_NODE_21(p) BOOST_PP_IIF(p##(21), 21, 22)\n#                            define BOOST_PP_NODE_23(p) BOOST_PP_IIF(p##(23), 23, 24)\n#                    define BOOST_PP_NODE_28(p) BOOST_PP_IIF(p##(28), BOOST_PP_NODE_26, BOOST_PP_NODE_30)\n#                        define BOOST_PP_NODE_26(p) BOOST_PP_IIF(p##(26), BOOST_PP_NODE_25, BOOST_PP_NODE_27)\n#                            define BOOST_PP_NODE_25(p) BOOST_PP_IIF(p##(25), 25, 26)\n#                            define BOOST_PP_NODE_27(p) BOOST_PP_IIF(p##(27), 27, 28)\n#                        define BOOST_PP_NODE_30(p) BOOST_PP_IIF(p##(30), BOOST_PP_NODE_29, BOOST_PP_NODE_31)\n#                            define BOOST_PP_NODE_29(p) BOOST_PP_IIF(p##(29), 29, 30)\n#                            define BOOST_PP_NODE_31(p) BOOST_PP_IIF(p##(31), 31, 32)\n#            define BOOST_PP_NODE_48(p) BOOST_PP_IIF(p##(48), BOOST_PP_NODE_40, BOOST_PP_NODE_56)\n#                define BOOST_PP_NODE_40(p) BOOST_PP_IIF(p##(40), BOOST_PP_NODE_36, BOOST_PP_NODE_44)\n#                    define BOOST_PP_NODE_36(p) BOOST_PP_IIF(p##(36), BOOST_PP_NODE_34, BOOST_PP_NODE_38)\n#                        define BOOST_PP_NODE_34(p) BOOST_PP_IIF(p##(34), BOOST_PP_NODE_33, BOOST_PP_NODE_35)\n#                            define BOOST_PP_NODE_33(p) BOOST_PP_IIF(p##(33), 33, 34)\n#                            define BOOST_PP_NODE_35(p) BOOST_PP_IIF(p##(35), 35, 36)\n#                        define BOOST_PP_NODE_38(p) BOOST_PP_IIF(p##(38), BOOST_PP_NODE_37, BOOST_PP_NODE_39)\n#                            define BOOST_PP_NODE_37(p) BOOST_PP_IIF(p##(37), 37, 38)\n#                            define BOOST_PP_NODE_39(p) BOOST_PP_IIF(p##(39), 39, 40)\n#                    define BOOST_PP_NODE_44(p) BOOST_PP_IIF(p##(44), BOOST_PP_NODE_42, BOOST_PP_NODE_46)\n#                        define BOOST_PP_NODE_42(p) BOOST_PP_IIF(p##(42), BOOST_PP_NODE_41, BOOST_PP_NODE_43)\n#                            define BOOST_PP_NODE_41(p) BOOST_PP_IIF(p##(41), 41, 42)\n#                            define BOOST_PP_NODE_43(p) BOOST_PP_IIF(p##(43), 43, 44)\n#                        define BOOST_PP_NODE_46(p) BOOST_PP_IIF(p##(46), BOOST_PP_NODE_45, BOOST_PP_NODE_47)\n#                            define BOOST_PP_NODE_45(p) BOOST_PP_IIF(p##(45), 45, 46)\n#                            define BOOST_PP_NODE_47(p) BOOST_PP_IIF(p##(47), 47, 48)\n#                define BOOST_PP_NODE_56(p) BOOST_PP_IIF(p##(56), BOOST_PP_NODE_52, BOOST_PP_NODE_60)\n#                    define BOOST_PP_NODE_52(p) BOOST_PP_IIF(p##(52), BOOST_PP_NODE_50, BOOST_PP_NODE_54)\n#                        define BOOST_PP_NODE_50(p) BOOST_PP_IIF(p##(50), BOOST_PP_NODE_49, BOOST_PP_NODE_51)\n#                            define BOOST_PP_NODE_49(p) BOOST_PP_IIF(p##(49), 49, 50)\n#                            define BOOST_PP_NODE_51(p) BOOST_PP_IIF(p##(51), 51, 52)\n#                        define BOOST_PP_NODE_54(p) BOOST_PP_IIF(p##(54), BOOST_PP_NODE_53, BOOST_PP_NODE_55)\n#                            define BOOST_PP_NODE_53(p) BOOST_PP_IIF(p##(53), 53, 54)\n#                            define BOOST_PP_NODE_55(p) BOOST_PP_IIF(p##(55), 55, 56)\n#                    define BOOST_PP_NODE_60(p) BOOST_PP_IIF(p##(60), BOOST_PP_NODE_58, BOOST_PP_NODE_62)\n#                        define BOOST_PP_NODE_58(p) BOOST_PP_IIF(p##(58), BOOST_PP_NODE_57, BOOST_PP_NODE_59)\n#                            define BOOST_PP_NODE_57(p) BOOST_PP_IIF(p##(57), 57, 58)\n#                            define BOOST_PP_NODE_59(p) BOOST_PP_IIF(p##(59), 59, 60)\n#                        define BOOST_PP_NODE_62(p) BOOST_PP_IIF(p##(62), BOOST_PP_NODE_61, BOOST_PP_NODE_63)\n#                            define BOOST_PP_NODE_61(p) BOOST_PP_IIF(p##(61), 61, 62)\n#                            define BOOST_PP_NODE_63(p) BOOST_PP_IIF(p##(63), 63, 64)\n#        define BOOST_PP_NODE_96(p) BOOST_PP_IIF(p##(96), BOOST_PP_NODE_80, BOOST_PP_NODE_112)\n#            define BOOST_PP_NODE_80(p) BOOST_PP_IIF(p##(80), BOOST_PP_NODE_72, BOOST_PP_NODE_88)\n#                define BOOST_PP_NODE_72(p) BOOST_PP_IIF(p##(72), BOOST_PP_NODE_68, BOOST_PP_NODE_76)\n#                    define BOOST_PP_NODE_68(p) BOOST_PP_IIF(p##(68), BOOST_PP_NODE_66, BOOST_PP_NODE_70)\n#                        define BOOST_PP_NODE_66(p) BOOST_PP_IIF(p##(66), BOOST_PP_NODE_65, BOOST_PP_NODE_67)\n#                            define BOOST_PP_NODE_65(p) BOOST_PP_IIF(p##(65), 65, 66)\n#                            define BOOST_PP_NODE_67(p) BOOST_PP_IIF(p##(67), 67, 68)\n#                        define BOOST_PP_NODE_70(p) BOOST_PP_IIF(p##(70), BOOST_PP_NODE_69, BOOST_PP_NODE_71)\n#                            define BOOST_PP_NODE_69(p) BOOST_PP_IIF(p##(69), 69, 70)\n#                            define BOOST_PP_NODE_71(p) BOOST_PP_IIF(p##(71), 71, 72)\n#                    define BOOST_PP_NODE_76(p) BOOST_PP_IIF(p##(76), BOOST_PP_NODE_74, BOOST_PP_NODE_78)\n#                        define BOOST_PP_NODE_74(p) BOOST_PP_IIF(p##(74), BOOST_PP_NODE_73, BOOST_PP_NODE_75)\n#                            define BOOST_PP_NODE_73(p) BOOST_PP_IIF(p##(73), 73, 74)\n#                            define BOOST_PP_NODE_75(p) BOOST_PP_IIF(p##(75), 75, 76)\n#                        define BOOST_PP_NODE_78(p) BOOST_PP_IIF(p##(78), BOOST_PP_NODE_77, BOOST_PP_NODE_79)\n#                            define BOOST_PP_NODE_77(p) BOOST_PP_IIF(p##(77), 77, 78)\n#                            define BOOST_PP_NODE_79(p) BOOST_PP_IIF(p##(79), 79, 80)\n#                define BOOST_PP_NODE_88(p) BOOST_PP_IIF(p##(88), BOOST_PP_NODE_84, BOOST_PP_NODE_92)\n#                    define BOOST_PP_NODE_84(p) BOOST_PP_IIF(p##(84), BOOST_PP_NODE_82, BOOST_PP_NODE_86)\n#                        define BOOST_PP_NODE_82(p) BOOST_PP_IIF(p##(82), BOOST_PP_NODE_81, BOOST_PP_NODE_83)\n#                            define BOOST_PP_NODE_81(p) BOOST_PP_IIF(p##(81), 81, 82)\n#                            define BOOST_PP_NODE_83(p) BOOST_PP_IIF(p##(83), 83, 84)\n#                        define BOOST_PP_NODE_86(p) BOOST_PP_IIF(p##(86), BOOST_PP_NODE_85, BOOST_PP_NODE_87)\n#                            define BOOST_PP_NODE_85(p) BOOST_PP_IIF(p##(85), 85, 86)\n#                            define BOOST_PP_NODE_87(p) BOOST_PP_IIF(p##(87), 87, 88)\n#                    define BOOST_PP_NODE_92(p) BOOST_PP_IIF(p##(92), BOOST_PP_NODE_90, BOOST_PP_NODE_94)\n#                        define BOOST_PP_NODE_90(p) BOOST_PP_IIF(p##(90), BOOST_PP_NODE_89, BOOST_PP_NODE_91)\n#                            define BOOST_PP_NODE_89(p) BOOST_PP_IIF(p##(89), 89, 90)\n#                            define BOOST_PP_NODE_91(p) BOOST_PP_IIF(p##(91), 91, 92)\n#                        define BOOST_PP_NODE_94(p) BOOST_PP_IIF(p##(94), BOOST_PP_NODE_93, BOOST_PP_NODE_95)\n#                            define BOOST_PP_NODE_93(p) BOOST_PP_IIF(p##(93), 93, 94)\n#                            define BOOST_PP_NODE_95(p) BOOST_PP_IIF(p##(95), 95, 96)\n#            define BOOST_PP_NODE_112(p) BOOST_PP_IIF(p##(112), BOOST_PP_NODE_104, BOOST_PP_NODE_120)\n#                define BOOST_PP_NODE_104(p) BOOST_PP_IIF(p##(104), BOOST_PP_NODE_100, BOOST_PP_NODE_108)\n#                    define BOOST_PP_NODE_100(p) BOOST_PP_IIF(p##(100), BOOST_PP_NODE_98, BOOST_PP_NODE_102)\n#                        define BOOST_PP_NODE_98(p) BOOST_PP_IIF(p##(98), BOOST_PP_NODE_97, BOOST_PP_NODE_99)\n#                            define BOOST_PP_NODE_97(p) BOOST_PP_IIF(p##(97), 97, 98)\n#                            define BOOST_PP_NODE_99(p) BOOST_PP_IIF(p##(99), 99, 100)\n#                        define BOOST_PP_NODE_102(p) BOOST_PP_IIF(p##(102), BOOST_PP_NODE_101, BOOST_PP_NODE_103)\n#                            define BOOST_PP_NODE_101(p) BOOST_PP_IIF(p##(101), 101, 102)\n#                            define BOOST_PP_NODE_103(p) BOOST_PP_IIF(p##(103), 103, 104)\n#                    define BOOST_PP_NODE_108(p) BOOST_PP_IIF(p##(108), BOOST_PP_NODE_106, BOOST_PP_NODE_110)\n#                        define BOOST_PP_NODE_106(p) BOOST_PP_IIF(p##(106), BOOST_PP_NODE_105, BOOST_PP_NODE_107)\n#                            define BOOST_PP_NODE_105(p) BOOST_PP_IIF(p##(105), 105, 106)\n#                            define BOOST_PP_NODE_107(p) BOOST_PP_IIF(p##(107), 107, 108)\n#                        define BOOST_PP_NODE_110(p) BOOST_PP_IIF(p##(110), BOOST_PP_NODE_109, BOOST_PP_NODE_111)\n#                            define BOOST_PP_NODE_109(p) BOOST_PP_IIF(p##(109), 109, 110)\n#                            define BOOST_PP_NODE_111(p) BOOST_PP_IIF(p##(111), 111, 112)\n#                define BOOST_PP_NODE_120(p) BOOST_PP_IIF(p##(120), BOOST_PP_NODE_116, BOOST_PP_NODE_124)\n#                    define BOOST_PP_NODE_116(p) BOOST_PP_IIF(p##(116), BOOST_PP_NODE_114, BOOST_PP_NODE_118)\n#                        define BOOST_PP_NODE_114(p) BOOST_PP_IIF(p##(114), BOOST_PP_NODE_113, BOOST_PP_NODE_115)\n#                            define BOOST_PP_NODE_113(p) BOOST_PP_IIF(p##(113), 113, 114)\n#                            define BOOST_PP_NODE_115(p) BOOST_PP_IIF(p##(115), 115, 116)\n#                        define BOOST_PP_NODE_118(p) BOOST_PP_IIF(p##(118), BOOST_PP_NODE_117, BOOST_PP_NODE_119)\n#                            define BOOST_PP_NODE_117(p) BOOST_PP_IIF(p##(117), 117, 118)\n#                            define BOOST_PP_NODE_119(p) BOOST_PP_IIF(p##(119), 119, 120)\n#                    define BOOST_PP_NODE_124(p) BOOST_PP_IIF(p##(124), BOOST_PP_NODE_122, BOOST_PP_NODE_126)\n#                        define BOOST_PP_NODE_122(p) BOOST_PP_IIF(p##(122), BOOST_PP_NODE_121, BOOST_PP_NODE_123)\n#                            define BOOST_PP_NODE_121(p) BOOST_PP_IIF(p##(121), 121, 122)\n#                            define BOOST_PP_NODE_123(p) BOOST_PP_IIF(p##(123), 123, 124)\n#                        define BOOST_PP_NODE_126(p) BOOST_PP_IIF(p##(126), BOOST_PP_NODE_125, BOOST_PP_NODE_127)\n#                            define BOOST_PP_NODE_125(p) BOOST_PP_IIF(p##(125), 125, 126)\n#                            define BOOST_PP_NODE_127(p) BOOST_PP_IIF(p##(127), 127, 128)\n#    define BOOST_PP_NODE_192(p) BOOST_PP_IIF(p##(192), BOOST_PP_NODE_160, BOOST_PP_NODE_224)\n#        define BOOST_PP_NODE_160(p) BOOST_PP_IIF(p##(160), BOOST_PP_NODE_144, BOOST_PP_NODE_176)\n#            define BOOST_PP_NODE_144(p) BOOST_PP_IIF(p##(144), BOOST_PP_NODE_136, BOOST_PP_NODE_152)\n#                define BOOST_PP_NODE_136(p) BOOST_PP_IIF(p##(136), BOOST_PP_NODE_132, BOOST_PP_NODE_140)\n#                    define BOOST_PP_NODE_132(p) BOOST_PP_IIF(p##(132), BOOST_PP_NODE_130, BOOST_PP_NODE_134)\n#                        define BOOST_PP_NODE_130(p) BOOST_PP_IIF(p##(130), BOOST_PP_NODE_129, BOOST_PP_NODE_131)\n#                            define BOOST_PP_NODE_129(p) BOOST_PP_IIF(p##(129), 129, 130)\n#                            define BOOST_PP_NODE_131(p) BOOST_PP_IIF(p##(131), 131, 132)\n#                        define BOOST_PP_NODE_134(p) BOOST_PP_IIF(p##(134), BOOST_PP_NODE_133, BOOST_PP_NODE_135)\n#                            define BOOST_PP_NODE_133(p) BOOST_PP_IIF(p##(133), 133, 134)\n#                            define BOOST_PP_NODE_135(p) BOOST_PP_IIF(p##(135), 135, 136)\n#                    define BOOST_PP_NODE_140(p) BOOST_PP_IIF(p##(140), BOOST_PP_NODE_138, BOOST_PP_NODE_142)\n#                        define BOOST_PP_NODE_138(p) BOOST_PP_IIF(p##(138), BOOST_PP_NODE_137, BOOST_PP_NODE_139)\n#                            define BOOST_PP_NODE_137(p) BOOST_PP_IIF(p##(137), 137, 138)\n#                            define BOOST_PP_NODE_139(p) BOOST_PP_IIF(p##(139), 139, 140)\n#                        define BOOST_PP_NODE_142(p) BOOST_PP_IIF(p##(142), BOOST_PP_NODE_141, BOOST_PP_NODE_143)\n#                            define BOOST_PP_NODE_141(p) BOOST_PP_IIF(p##(141), 141, 142)\n#                            define BOOST_PP_NODE_143(p) BOOST_PP_IIF(p##(143), 143, 144)\n#                define BOOST_PP_NODE_152(p) BOOST_PP_IIF(p##(152), BOOST_PP_NODE_148, BOOST_PP_NODE_156)\n#                    define BOOST_PP_NODE_148(p) BOOST_PP_IIF(p##(148), BOOST_PP_NODE_146, BOOST_PP_NODE_150)\n#                        define BOOST_PP_NODE_146(p) BOOST_PP_IIF(p##(146), BOOST_PP_NODE_145, BOOST_PP_NODE_147)\n#                            define BOOST_PP_NODE_145(p) BOOST_PP_IIF(p##(145), 145, 146)\n#                            define BOOST_PP_NODE_147(p) BOOST_PP_IIF(p##(147), 147, 148)\n#                        define BOOST_PP_NODE_150(p) BOOST_PP_IIF(p##(150), BOOST_PP_NODE_149, BOOST_PP_NODE_151)\n#                            define BOOST_PP_NODE_149(p) BOOST_PP_IIF(p##(149), 149, 150)\n#                            define BOOST_PP_NODE_151(p) BOOST_PP_IIF(p##(151), 151, 152)\n#                    define BOOST_PP_NODE_156(p) BOOST_PP_IIF(p##(156), BOOST_PP_NODE_154, BOOST_PP_NODE_158)\n#                        define BOOST_PP_NODE_154(p) BOOST_PP_IIF(p##(154), BOOST_PP_NODE_153, BOOST_PP_NODE_155)\n#                            define BOOST_PP_NODE_153(p) BOOST_PP_IIF(p##(153), 153, 154)\n#                            define BOOST_PP_NODE_155(p) BOOST_PP_IIF(p##(155), 155, 156)\n#                        define BOOST_PP_NODE_158(p) BOOST_PP_IIF(p##(158), BOOST_PP_NODE_157, BOOST_PP_NODE_159)\n#                            define BOOST_PP_NODE_157(p) BOOST_PP_IIF(p##(157), 157, 158)\n#                            define BOOST_PP_NODE_159(p) BOOST_PP_IIF(p##(159), 159, 160)\n#            define BOOST_PP_NODE_176(p) BOOST_PP_IIF(p##(176), BOOST_PP_NODE_168, BOOST_PP_NODE_184)\n#                define BOOST_PP_NODE_168(p) BOOST_PP_IIF(p##(168), BOOST_PP_NODE_164, BOOST_PP_NODE_172)\n#                    define BOOST_PP_NODE_164(p) BOOST_PP_IIF(p##(164), BOOST_PP_NODE_162, BOOST_PP_NODE_166)\n#                        define BOOST_PP_NODE_162(p) BOOST_PP_IIF(p##(162), BOOST_PP_NODE_161, BOOST_PP_NODE_163)\n#                            define BOOST_PP_NODE_161(p) BOOST_PP_IIF(p##(161), 161, 162)\n#                            define BOOST_PP_NODE_163(p) BOOST_PP_IIF(p##(163), 163, 164)\n#                        define BOOST_PP_NODE_166(p) BOOST_PP_IIF(p##(166), BOOST_PP_NODE_165, BOOST_PP_NODE_167)\n#                            define BOOST_PP_NODE_165(p) BOOST_PP_IIF(p##(165), 165, 166)\n#                            define BOOST_PP_NODE_167(p) BOOST_PP_IIF(p##(167), 167, 168)\n#                    define BOOST_PP_NODE_172(p) BOOST_PP_IIF(p##(172), BOOST_PP_NODE_170, BOOST_PP_NODE_174)\n#                        define BOOST_PP_NODE_170(p) BOOST_PP_IIF(p##(170), BOOST_PP_NODE_169, BOOST_PP_NODE_171)\n#                            define BOOST_PP_NODE_169(p) BOOST_PP_IIF(p##(169), 169, 170)\n#                            define BOOST_PP_NODE_171(p) BOOST_PP_IIF(p##(171), 171, 172)\n#                        define BOOST_PP_NODE_174(p) BOOST_PP_IIF(p##(174), BOOST_PP_NODE_173, BOOST_PP_NODE_175)\n#                            define BOOST_PP_NODE_173(p) BOOST_PP_IIF(p##(173), 173, 174)\n#                            define BOOST_PP_NODE_175(p) BOOST_PP_IIF(p##(175), 175, 176)\n#                define BOOST_PP_NODE_184(p) BOOST_PP_IIF(p##(184), BOOST_PP_NODE_180, BOOST_PP_NODE_188)\n#                    define BOOST_PP_NODE_180(p) BOOST_PP_IIF(p##(180), BOOST_PP_NODE_178, BOOST_PP_NODE_182)\n#                        define BOOST_PP_NODE_178(p) BOOST_PP_IIF(p##(178), BOOST_PP_NODE_177, BOOST_PP_NODE_179)\n#                            define BOOST_PP_NODE_177(p) BOOST_PP_IIF(p##(177), 177, 178)\n#                            define BOOST_PP_NODE_179(p) BOOST_PP_IIF(p##(179), 179, 180)\n#                        define BOOST_PP_NODE_182(p) BOOST_PP_IIF(p##(182), BOOST_PP_NODE_181, BOOST_PP_NODE_183)\n#                            define BOOST_PP_NODE_181(p) BOOST_PP_IIF(p##(181), 181, 182)\n#                            define BOOST_PP_NODE_183(p) BOOST_PP_IIF(p##(183), 183, 184)\n#                    define BOOST_PP_NODE_188(p) BOOST_PP_IIF(p##(188), BOOST_PP_NODE_186, BOOST_PP_NODE_190)\n#                        define BOOST_PP_NODE_186(p) BOOST_PP_IIF(p##(186), BOOST_PP_NODE_185, BOOST_PP_NODE_187)\n#                            define BOOST_PP_NODE_185(p) BOOST_PP_IIF(p##(185), 185, 186)\n#                            define BOOST_PP_NODE_187(p) BOOST_PP_IIF(p##(187), 187, 188)\n#                        define BOOST_PP_NODE_190(p) BOOST_PP_IIF(p##(190), BOOST_PP_NODE_189, BOOST_PP_NODE_191)\n#                            define BOOST_PP_NODE_189(p) BOOST_PP_IIF(p##(189), 189, 190)\n#                            define BOOST_PP_NODE_191(p) BOOST_PP_IIF(p##(191), 191, 192)\n#        define BOOST_PP_NODE_224(p) BOOST_PP_IIF(p##(224), BOOST_PP_NODE_208, BOOST_PP_NODE_240)\n#            define BOOST_PP_NODE_208(p) BOOST_PP_IIF(p##(208), BOOST_PP_NODE_200, BOOST_PP_NODE_216)\n#                define BOOST_PP_NODE_200(p) BOOST_PP_IIF(p##(200), BOOST_PP_NODE_196, BOOST_PP_NODE_204)\n#                    define BOOST_PP_NODE_196(p) BOOST_PP_IIF(p##(196), BOOST_PP_NODE_194, BOOST_PP_NODE_198)\n#                        define BOOST_PP_NODE_194(p) BOOST_PP_IIF(p##(194), BOOST_PP_NODE_193, BOOST_PP_NODE_195)\n#                            define BOOST_PP_NODE_193(p) BOOST_PP_IIF(p##(193), 193, 194)\n#                            define BOOST_PP_NODE_195(p) BOOST_PP_IIF(p##(195), 195, 196)\n#                        define BOOST_PP_NODE_198(p) BOOST_PP_IIF(p##(198), BOOST_PP_NODE_197, BOOST_PP_NODE_199)\n#                            define BOOST_PP_NODE_197(p) BOOST_PP_IIF(p##(197), 197, 198)\n#                            define BOOST_PP_NODE_199(p) BOOST_PP_IIF(p##(199), 199, 200)\n#                    define BOOST_PP_NODE_204(p) BOOST_PP_IIF(p##(204), BOOST_PP_NODE_202, BOOST_PP_NODE_206)\n#                        define BOOST_PP_NODE_202(p) BOOST_PP_IIF(p##(202), BOOST_PP_NODE_201, BOOST_PP_NODE_203)\n#                            define BOOST_PP_NODE_201(p) BOOST_PP_IIF(p##(201), 201, 202)\n#                            define BOOST_PP_NODE_203(p) BOOST_PP_IIF(p##(203), 203, 204)\n#                        define BOOST_PP_NODE_206(p) BOOST_PP_IIF(p##(206), BOOST_PP_NODE_205, BOOST_PP_NODE_207)\n#                            define BOOST_PP_NODE_205(p) BOOST_PP_IIF(p##(205), 205, 206)\n#                            define BOOST_PP_NODE_207(p) BOOST_PP_IIF(p##(207), 207, 208)\n#                define BOOST_PP_NODE_216(p) BOOST_PP_IIF(p##(216), BOOST_PP_NODE_212, BOOST_PP_NODE_220)\n#                    define BOOST_PP_NODE_212(p) BOOST_PP_IIF(p##(212), BOOST_PP_NODE_210, BOOST_PP_NODE_214)\n#                        define BOOST_PP_NODE_210(p) BOOST_PP_IIF(p##(210), BOOST_PP_NODE_209, BOOST_PP_NODE_211)\n#                            define BOOST_PP_NODE_209(p) BOOST_PP_IIF(p##(209), 209, 210)\n#                            define BOOST_PP_NODE_211(p) BOOST_PP_IIF(p##(211), 211, 212)\n#                        define BOOST_PP_NODE_214(p) BOOST_PP_IIF(p##(214), BOOST_PP_NODE_213, BOOST_PP_NODE_215)\n#                            define BOOST_PP_NODE_213(p) BOOST_PP_IIF(p##(213), 213, 214)\n#                            define BOOST_PP_NODE_215(p) BOOST_PP_IIF(p##(215), 215, 216)\n#                    define BOOST_PP_NODE_220(p) BOOST_PP_IIF(p##(220), BOOST_PP_NODE_218, BOOST_PP_NODE_222)\n#                        define BOOST_PP_NODE_218(p) BOOST_PP_IIF(p##(218), BOOST_PP_NODE_217, BOOST_PP_NODE_219)\n#                            define BOOST_PP_NODE_217(p) BOOST_PP_IIF(p##(217), 217, 218)\n#                            define BOOST_PP_NODE_219(p) BOOST_PP_IIF(p##(219), 219, 220)\n#                        define BOOST_PP_NODE_222(p) BOOST_PP_IIF(p##(222), BOOST_PP_NODE_221, BOOST_PP_NODE_223)\n#                            define BOOST_PP_NODE_221(p) BOOST_PP_IIF(p##(221), 221, 222)\n#                            define BOOST_PP_NODE_223(p) BOOST_PP_IIF(p##(223), 223, 224)\n#            define BOOST_PP_NODE_240(p) BOOST_PP_IIF(p##(240), BOOST_PP_NODE_232, BOOST_PP_NODE_248)\n#                define BOOST_PP_NODE_232(p) BOOST_PP_IIF(p##(232), BOOST_PP_NODE_228, BOOST_PP_NODE_236)\n#                    define BOOST_PP_NODE_228(p) BOOST_PP_IIF(p##(228), BOOST_PP_NODE_226, BOOST_PP_NODE_230)\n#                        define BOOST_PP_NODE_226(p) BOOST_PP_IIF(p##(226), BOOST_PP_NODE_225, BOOST_PP_NODE_227)\n#                            define BOOST_PP_NODE_225(p) BOOST_PP_IIF(p##(225), 225, 226)\n#                            define BOOST_PP_NODE_227(p) BOOST_PP_IIF(p##(227), 227, 228)\n#                        define BOOST_PP_NODE_230(p) BOOST_PP_IIF(p##(230), BOOST_PP_NODE_229, BOOST_PP_NODE_231)\n#                            define BOOST_PP_NODE_229(p) BOOST_PP_IIF(p##(229), 229, 230)\n#                            define BOOST_PP_NODE_231(p) BOOST_PP_IIF(p##(231), 231, 232)\n#                    define BOOST_PP_NODE_236(p) BOOST_PP_IIF(p##(236), BOOST_PP_NODE_234, BOOST_PP_NODE_238)\n#                        define BOOST_PP_NODE_234(p) BOOST_PP_IIF(p##(234), BOOST_PP_NODE_233, BOOST_PP_NODE_235)\n#                            define BOOST_PP_NODE_233(p) BOOST_PP_IIF(p##(233), 233, 234)\n#                            define BOOST_PP_NODE_235(p) BOOST_PP_IIF(p##(235), 235, 236)\n#                        define BOOST_PP_NODE_238(p) BOOST_PP_IIF(p##(238), BOOST_PP_NODE_237, BOOST_PP_NODE_239)\n#                            define BOOST_PP_NODE_237(p) BOOST_PP_IIF(p##(237), 237, 238)\n#                            define BOOST_PP_NODE_239(p) BOOST_PP_IIF(p##(239), 239, 240)\n#                define BOOST_PP_NODE_248(p) BOOST_PP_IIF(p##(248), BOOST_PP_NODE_244, BOOST_PP_NODE_252)\n#                    define BOOST_PP_NODE_244(p) BOOST_PP_IIF(p##(244), BOOST_PP_NODE_242, BOOST_PP_NODE_246)\n#                        define BOOST_PP_NODE_242(p) BOOST_PP_IIF(p##(242), BOOST_PP_NODE_241, BOOST_PP_NODE_243)\n#                            define BOOST_PP_NODE_241(p) BOOST_PP_IIF(p##(241), 241, 242)\n#                            define BOOST_PP_NODE_243(p) BOOST_PP_IIF(p##(243), 243, 244)\n#                        define BOOST_PP_NODE_246(p) BOOST_PP_IIF(p##(246), BOOST_PP_NODE_245, BOOST_PP_NODE_247)\n#                            define BOOST_PP_NODE_245(p) BOOST_PP_IIF(p##(245), 245, 246)\n#                            define BOOST_PP_NODE_247(p) BOOST_PP_IIF(p##(247), 247, 248)\n#                    define BOOST_PP_NODE_252(p) BOOST_PP_IIF(p##(252), BOOST_PP_NODE_250, BOOST_PP_NODE_254)\n#                        define BOOST_PP_NODE_250(p) BOOST_PP_IIF(p##(250), BOOST_PP_NODE_249, BOOST_PP_NODE_251)\n#                            define BOOST_PP_NODE_249(p) BOOST_PP_IIF(p##(249), 249, 250)\n#                            define BOOST_PP_NODE_251(p) BOOST_PP_IIF(p##(251), 251, 252)\n#                        define BOOST_PP_NODE_254(p) BOOST_PP_IIF(p##(254), BOOST_PP_NODE_253, BOOST_PP_NODE_255)\n#                            define BOOST_PP_NODE_253(p) BOOST_PP_IIF(p##(253), 253, 254)\n#                            define BOOST_PP_NODE_255(p) BOOST_PP_IIF(p##(255), 255, 256)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/is_binary.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_IS_BINARY_HPP\n# define BOOST_PREPROCESSOR_DETAIL_IS_BINARY_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/detail/check.hpp>\n#\n# /* BOOST_PP_IS_BINARY */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_IS_BINARY(x) BOOST_PP_CHECK(x, BOOST_PP_IS_BINARY_CHECK)\n# else\n#    define BOOST_PP_IS_BINARY(x) BOOST_PP_IS_BINARY_I(x)\n#    define BOOST_PP_IS_BINARY_I(x) BOOST_PP_CHECK(x, BOOST_PP_IS_BINARY_CHECK)\n# endif\n#\n# define BOOST_PP_IS_BINARY_CHECK(a, b) 1\n# define BOOST_PP_CHECK_RESULT_BOOST_PP_IS_BINARY_CHECK 0, BOOST_PP_NIL\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/is_nullary.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_IS_NULLARY_HPP\n# define BOOST_PREPROCESSOR_DETAIL_IS_NULLARY_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/detail/check.hpp>\n#\n# /* BOOST_PP_IS_NULLARY */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_IS_NULLARY(x) BOOST_PP_CHECK(x, BOOST_PP_IS_NULLARY_CHECK)\n# else\n#    define BOOST_PP_IS_NULLARY(x) BOOST_PP_IS_NULLARY_I(x)\n#    define BOOST_PP_IS_NULLARY_I(x) BOOST_PP_CHECK(x, BOOST_PP_IS_NULLARY_CHECK)\n# endif\n#\n# define BOOST_PP_IS_NULLARY_CHECK() 1\n# define BOOST_PP_CHECK_RESULT_BOOST_PP_IS_NULLARY_CHECK 0, BOOST_PP_NIL\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/detail/split.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# ifndef BOOST_PREPROCESSOR_DETAIL_SPLIT_HPP\n# define BOOST_PREPROCESSOR_DETAIL_SPLIT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_SPLIT */\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SPLIT(n, im) BOOST_PP_SPLIT_I((n, im))\n#    define BOOST_PP_SPLIT_I(par) BOOST_PP_SPLIT_II ## par\n#    define BOOST_PP_SPLIT_II(n, a, b) BOOST_PP_SPLIT_ ## n(a, b)\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_SPLIT(n, im) BOOST_PP_SPLIT_I(n((im)))\n#    define BOOST_PP_SPLIT_I(n) BOOST_PP_SPLIT_ID(BOOST_PP_SPLIT_II_ ## n)\n#    define BOOST_PP_SPLIT_II_0(s) BOOST_PP_SPLIT_ID(BOOST_PP_SPLIT_0 s)\n#    define BOOST_PP_SPLIT_II_1(s) BOOST_PP_SPLIT_ID(BOOST_PP_SPLIT_1 s)\n#    define BOOST_PP_SPLIT_ID(id) id\n# else\n#    define BOOST_PP_SPLIT(n, im) BOOST_PP_SPLIT_I(n)(im)\n#    define BOOST_PP_SPLIT_I(n) BOOST_PP_SPLIT_ ## n\n# endif\n#\n# define BOOST_PP_SPLIT_0(a, b) a\n# define BOOST_PP_SPLIT_1(a, b) b\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/empty.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_EMPTY_HPP\n# define BOOST_PREPROCESSOR_EMPTY_HPP\n#\n# include <boost/preprocessor/facilities/empty.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/enum.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ENUM_HPP\n# define BOOST_PREPROCESSOR_ENUM_HPP\n#\n# include <boost/preprocessor/repetition/enum.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/enum_params.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ENUM_PARAMS_HPP\n# define BOOST_PREPROCESSOR_ENUM_PARAMS_HPP\n#\n# include <boost/preprocessor/repetition/enum_params.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/enum_params_with_a_default.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ENUM_PARAMS_WITH_A_DEFAULT_HPP\n# define BOOST_PREPROCESSOR_ENUM_PARAMS_WITH_A_DEFAULT_HPP\n#\n# include <boost/preprocessor/repetition/enum_params_with_a_default.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/enum_shifted_params.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ENUM_SHIFTED_PARAMS_HPP\n# define BOOST_PREPROCESSOR_ENUM_SHIFTED_PARAMS_HPP\n#\n# include <boost/preprocessor/repetition/enum_shifted_params.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/expr_if.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_EXPR_IF_HPP\n# define BOOST_PREPROCESSOR_EXPR_IF_HPP\n#\n# include <boost/preprocessor/control/expr_if.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/detail/is_empty.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2014.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n#ifndef BOOST_PREPROCESSOR_DETAIL_IS_EMPTY_HPP\n#define BOOST_PREPROCESSOR_DETAIL_IS_EMPTY_HPP\n\n#include <boost/preprocessor/punctuation/is_begin_parens.hpp>\n\n#if BOOST_PP_VARIADICS_MSVC\n\n# pragma warning(once:4002)\n\n#define BOOST_PP_DETAIL_IS_EMPTY_IIF_0(t, b) b\n#define BOOST_PP_DETAIL_IS_EMPTY_IIF_1(t, b) t\n\n#else\n\n#define BOOST_PP_DETAIL_IS_EMPTY_IIF_0(t, ...) __VA_ARGS__\n#define BOOST_PP_DETAIL_IS_EMPTY_IIF_1(t, ...) t\n\n#endif\n\n#if BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400\n\n#define BOOST_PP_DETAIL_IS_EMPTY_PROCESS(param) \\\n\tBOOST_PP_IS_BEGIN_PARENS \\\n    \t( \\\n        BOOST_PP_DETAIL_IS_EMPTY_NON_FUNCTION_C param () \\\n        ) \\\n/**/\n\n#else\n\n#define BOOST_PP_DETAIL_IS_EMPTY_PROCESS(...) \\\n\tBOOST_PP_IS_BEGIN_PARENS \\\n        ( \\\n        BOOST_PP_DETAIL_IS_EMPTY_NON_FUNCTION_C __VA_ARGS__ () \\\n        ) \\\n/**/\n\n#endif\n\n#define BOOST_PP_DETAIL_IS_EMPTY_PRIMITIVE_CAT(a, b) a ## b\n#define BOOST_PP_DETAIL_IS_EMPTY_IIF(bit) BOOST_PP_DETAIL_IS_EMPTY_PRIMITIVE_CAT(BOOST_PP_DETAIL_IS_EMPTY_IIF_,bit)\n#define BOOST_PP_DETAIL_IS_EMPTY_NON_FUNCTION_C(...) ()\n\n#endif /* BOOST_PREPROCESSOR_DETAIL_IS_EMPTY_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/empty.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_EMPTY_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_EMPTY_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_EMPTY */\n#\n# define BOOST_PP_EMPTY()\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/expand.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_EXPAND_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_EXPAND_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC() && ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    define BOOST_PP_EXPAND(x) BOOST_PP_EXPAND_I(x)\n# else\n#    define BOOST_PP_EXPAND(x) BOOST_PP_EXPAND_OO((x))\n#    define BOOST_PP_EXPAND_OO(par) BOOST_PP_EXPAND_I ## par\n# endif\n#\n# define BOOST_PP_EXPAND_I(x) x\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/identity.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n# /* Revised by Edward Diener (2015) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_IDENTITY_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_IDENTITY_HPP\n#\n# include <boost/preprocessor/facilities/empty.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# /* BOOST_PP_IDENTITY */\n#\n# define BOOST_PP_IDENTITY(item) item BOOST_PP_EMPTY\n#\n# define BOOST_PP_IDENTITY_N(item,n) item BOOST_PP_TUPLE_EAT_N(n)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/intercept.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_INTERCEPT_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_INTERCEPT_HPP\n#\n# /* BOOST_PP_INTERCEPT */\n#\n# define BOOST_PP_INTERCEPT BOOST_PP_INTERCEPT_\n#\n# define BOOST_PP_INTERCEPT_0\n# define BOOST_PP_INTERCEPT_1\n# define BOOST_PP_INTERCEPT_2\n# define BOOST_PP_INTERCEPT_3\n# define BOOST_PP_INTERCEPT_4\n# define BOOST_PP_INTERCEPT_5\n# define BOOST_PP_INTERCEPT_6\n# define BOOST_PP_INTERCEPT_7\n# define BOOST_PP_INTERCEPT_8\n# define BOOST_PP_INTERCEPT_9\n# define BOOST_PP_INTERCEPT_10\n# define BOOST_PP_INTERCEPT_11\n# define BOOST_PP_INTERCEPT_12\n# define BOOST_PP_INTERCEPT_13\n# define BOOST_PP_INTERCEPT_14\n# define BOOST_PP_INTERCEPT_15\n# define BOOST_PP_INTERCEPT_16\n# define BOOST_PP_INTERCEPT_17\n# define BOOST_PP_INTERCEPT_18\n# define BOOST_PP_INTERCEPT_19\n# define BOOST_PP_INTERCEPT_20\n# define BOOST_PP_INTERCEPT_21\n# define BOOST_PP_INTERCEPT_22\n# define BOOST_PP_INTERCEPT_23\n# define BOOST_PP_INTERCEPT_24\n# define BOOST_PP_INTERCEPT_25\n# define BOOST_PP_INTERCEPT_26\n# define BOOST_PP_INTERCEPT_27\n# define BOOST_PP_INTERCEPT_28\n# define BOOST_PP_INTERCEPT_29\n# define BOOST_PP_INTERCEPT_30\n# define BOOST_PP_INTERCEPT_31\n# define BOOST_PP_INTERCEPT_32\n# define BOOST_PP_INTERCEPT_33\n# define BOOST_PP_INTERCEPT_34\n# define BOOST_PP_INTERCEPT_35\n# define BOOST_PP_INTERCEPT_36\n# define BOOST_PP_INTERCEPT_37\n# define BOOST_PP_INTERCEPT_38\n# define BOOST_PP_INTERCEPT_39\n# define BOOST_PP_INTERCEPT_40\n# define BOOST_PP_INTERCEPT_41\n# define BOOST_PP_INTERCEPT_42\n# define BOOST_PP_INTERCEPT_43\n# define BOOST_PP_INTERCEPT_44\n# define BOOST_PP_INTERCEPT_45\n# define BOOST_PP_INTERCEPT_46\n# define BOOST_PP_INTERCEPT_47\n# define BOOST_PP_INTERCEPT_48\n# define BOOST_PP_INTERCEPT_49\n# define BOOST_PP_INTERCEPT_50\n# define BOOST_PP_INTERCEPT_51\n# define BOOST_PP_INTERCEPT_52\n# define BOOST_PP_INTERCEPT_53\n# define BOOST_PP_INTERCEPT_54\n# define BOOST_PP_INTERCEPT_55\n# define BOOST_PP_INTERCEPT_56\n# define BOOST_PP_INTERCEPT_57\n# define BOOST_PP_INTERCEPT_58\n# define BOOST_PP_INTERCEPT_59\n# define BOOST_PP_INTERCEPT_60\n# define BOOST_PP_INTERCEPT_61\n# define BOOST_PP_INTERCEPT_62\n# define BOOST_PP_INTERCEPT_63\n# define BOOST_PP_INTERCEPT_64\n# define BOOST_PP_INTERCEPT_65\n# define BOOST_PP_INTERCEPT_66\n# define BOOST_PP_INTERCEPT_67\n# define BOOST_PP_INTERCEPT_68\n# define BOOST_PP_INTERCEPT_69\n# define BOOST_PP_INTERCEPT_70\n# define BOOST_PP_INTERCEPT_71\n# define BOOST_PP_INTERCEPT_72\n# define BOOST_PP_INTERCEPT_73\n# define BOOST_PP_INTERCEPT_74\n# define BOOST_PP_INTERCEPT_75\n# define BOOST_PP_INTERCEPT_76\n# define BOOST_PP_INTERCEPT_77\n# define BOOST_PP_INTERCEPT_78\n# define BOOST_PP_INTERCEPT_79\n# define BOOST_PP_INTERCEPT_80\n# define BOOST_PP_INTERCEPT_81\n# define BOOST_PP_INTERCEPT_82\n# define BOOST_PP_INTERCEPT_83\n# define BOOST_PP_INTERCEPT_84\n# define BOOST_PP_INTERCEPT_85\n# define BOOST_PP_INTERCEPT_86\n# define BOOST_PP_INTERCEPT_87\n# define BOOST_PP_INTERCEPT_88\n# define BOOST_PP_INTERCEPT_89\n# define BOOST_PP_INTERCEPT_90\n# define BOOST_PP_INTERCEPT_91\n# define BOOST_PP_INTERCEPT_92\n# define BOOST_PP_INTERCEPT_93\n# define BOOST_PP_INTERCEPT_94\n# define BOOST_PP_INTERCEPT_95\n# define BOOST_PP_INTERCEPT_96\n# define BOOST_PP_INTERCEPT_97\n# define BOOST_PP_INTERCEPT_98\n# define BOOST_PP_INTERCEPT_99\n# define BOOST_PP_INTERCEPT_100\n# define BOOST_PP_INTERCEPT_101\n# define BOOST_PP_INTERCEPT_102\n# define BOOST_PP_INTERCEPT_103\n# define BOOST_PP_INTERCEPT_104\n# define BOOST_PP_INTERCEPT_105\n# define BOOST_PP_INTERCEPT_106\n# define BOOST_PP_INTERCEPT_107\n# define BOOST_PP_INTERCEPT_108\n# define BOOST_PP_INTERCEPT_109\n# define BOOST_PP_INTERCEPT_110\n# define BOOST_PP_INTERCEPT_111\n# define BOOST_PP_INTERCEPT_112\n# define BOOST_PP_INTERCEPT_113\n# define BOOST_PP_INTERCEPT_114\n# define BOOST_PP_INTERCEPT_115\n# define BOOST_PP_INTERCEPT_116\n# define BOOST_PP_INTERCEPT_117\n# define BOOST_PP_INTERCEPT_118\n# define BOOST_PP_INTERCEPT_119\n# define BOOST_PP_INTERCEPT_120\n# define BOOST_PP_INTERCEPT_121\n# define BOOST_PP_INTERCEPT_122\n# define BOOST_PP_INTERCEPT_123\n# define BOOST_PP_INTERCEPT_124\n# define BOOST_PP_INTERCEPT_125\n# define BOOST_PP_INTERCEPT_126\n# define BOOST_PP_INTERCEPT_127\n# define BOOST_PP_INTERCEPT_128\n# define BOOST_PP_INTERCEPT_129\n# define BOOST_PP_INTERCEPT_130\n# define BOOST_PP_INTERCEPT_131\n# define BOOST_PP_INTERCEPT_132\n# define BOOST_PP_INTERCEPT_133\n# define BOOST_PP_INTERCEPT_134\n# define BOOST_PP_INTERCEPT_135\n# define BOOST_PP_INTERCEPT_136\n# define BOOST_PP_INTERCEPT_137\n# define BOOST_PP_INTERCEPT_138\n# define BOOST_PP_INTERCEPT_139\n# define BOOST_PP_INTERCEPT_140\n# define BOOST_PP_INTERCEPT_141\n# define BOOST_PP_INTERCEPT_142\n# define BOOST_PP_INTERCEPT_143\n# define BOOST_PP_INTERCEPT_144\n# define BOOST_PP_INTERCEPT_145\n# define BOOST_PP_INTERCEPT_146\n# define BOOST_PP_INTERCEPT_147\n# define BOOST_PP_INTERCEPT_148\n# define BOOST_PP_INTERCEPT_149\n# define BOOST_PP_INTERCEPT_150\n# define BOOST_PP_INTERCEPT_151\n# define BOOST_PP_INTERCEPT_152\n# define BOOST_PP_INTERCEPT_153\n# define BOOST_PP_INTERCEPT_154\n# define BOOST_PP_INTERCEPT_155\n# define BOOST_PP_INTERCEPT_156\n# define BOOST_PP_INTERCEPT_157\n# define BOOST_PP_INTERCEPT_158\n# define BOOST_PP_INTERCEPT_159\n# define BOOST_PP_INTERCEPT_160\n# define BOOST_PP_INTERCEPT_161\n# define BOOST_PP_INTERCEPT_162\n# define BOOST_PP_INTERCEPT_163\n# define BOOST_PP_INTERCEPT_164\n# define BOOST_PP_INTERCEPT_165\n# define BOOST_PP_INTERCEPT_166\n# define BOOST_PP_INTERCEPT_167\n# define BOOST_PP_INTERCEPT_168\n# define BOOST_PP_INTERCEPT_169\n# define BOOST_PP_INTERCEPT_170\n# define BOOST_PP_INTERCEPT_171\n# define BOOST_PP_INTERCEPT_172\n# define BOOST_PP_INTERCEPT_173\n# define BOOST_PP_INTERCEPT_174\n# define BOOST_PP_INTERCEPT_175\n# define BOOST_PP_INTERCEPT_176\n# define BOOST_PP_INTERCEPT_177\n# define BOOST_PP_INTERCEPT_178\n# define BOOST_PP_INTERCEPT_179\n# define BOOST_PP_INTERCEPT_180\n# define BOOST_PP_INTERCEPT_181\n# define BOOST_PP_INTERCEPT_182\n# define BOOST_PP_INTERCEPT_183\n# define BOOST_PP_INTERCEPT_184\n# define BOOST_PP_INTERCEPT_185\n# define BOOST_PP_INTERCEPT_186\n# define BOOST_PP_INTERCEPT_187\n# define BOOST_PP_INTERCEPT_188\n# define BOOST_PP_INTERCEPT_189\n# define BOOST_PP_INTERCEPT_190\n# define BOOST_PP_INTERCEPT_191\n# define BOOST_PP_INTERCEPT_192\n# define BOOST_PP_INTERCEPT_193\n# define BOOST_PP_INTERCEPT_194\n# define BOOST_PP_INTERCEPT_195\n# define BOOST_PP_INTERCEPT_196\n# define BOOST_PP_INTERCEPT_197\n# define BOOST_PP_INTERCEPT_198\n# define BOOST_PP_INTERCEPT_199\n# define BOOST_PP_INTERCEPT_200\n# define BOOST_PP_INTERCEPT_201\n# define BOOST_PP_INTERCEPT_202\n# define BOOST_PP_INTERCEPT_203\n# define BOOST_PP_INTERCEPT_204\n# define BOOST_PP_INTERCEPT_205\n# define BOOST_PP_INTERCEPT_206\n# define BOOST_PP_INTERCEPT_207\n# define BOOST_PP_INTERCEPT_208\n# define BOOST_PP_INTERCEPT_209\n# define BOOST_PP_INTERCEPT_210\n# define BOOST_PP_INTERCEPT_211\n# define BOOST_PP_INTERCEPT_212\n# define BOOST_PP_INTERCEPT_213\n# define BOOST_PP_INTERCEPT_214\n# define BOOST_PP_INTERCEPT_215\n# define BOOST_PP_INTERCEPT_216\n# define BOOST_PP_INTERCEPT_217\n# define BOOST_PP_INTERCEPT_218\n# define BOOST_PP_INTERCEPT_219\n# define BOOST_PP_INTERCEPT_220\n# define BOOST_PP_INTERCEPT_221\n# define BOOST_PP_INTERCEPT_222\n# define BOOST_PP_INTERCEPT_223\n# define BOOST_PP_INTERCEPT_224\n# define BOOST_PP_INTERCEPT_225\n# define BOOST_PP_INTERCEPT_226\n# define BOOST_PP_INTERCEPT_227\n# define BOOST_PP_INTERCEPT_228\n# define BOOST_PP_INTERCEPT_229\n# define BOOST_PP_INTERCEPT_230\n# define BOOST_PP_INTERCEPT_231\n# define BOOST_PP_INTERCEPT_232\n# define BOOST_PP_INTERCEPT_233\n# define BOOST_PP_INTERCEPT_234\n# define BOOST_PP_INTERCEPT_235\n# define BOOST_PP_INTERCEPT_236\n# define BOOST_PP_INTERCEPT_237\n# define BOOST_PP_INTERCEPT_238\n# define BOOST_PP_INTERCEPT_239\n# define BOOST_PP_INTERCEPT_240\n# define BOOST_PP_INTERCEPT_241\n# define BOOST_PP_INTERCEPT_242\n# define BOOST_PP_INTERCEPT_243\n# define BOOST_PP_INTERCEPT_244\n# define BOOST_PP_INTERCEPT_245\n# define BOOST_PP_INTERCEPT_246\n# define BOOST_PP_INTERCEPT_247\n# define BOOST_PP_INTERCEPT_248\n# define BOOST_PP_INTERCEPT_249\n# define BOOST_PP_INTERCEPT_250\n# define BOOST_PP_INTERCEPT_251\n# define BOOST_PP_INTERCEPT_252\n# define BOOST_PP_INTERCEPT_253\n# define BOOST_PP_INTERCEPT_254\n# define BOOST_PP_INTERCEPT_255\n# define BOOST_PP_INTERCEPT_256\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/is_1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2003.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_IS_1_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_IS_1_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/facilities/is_empty.hpp>\n#\n# /* BOOST_PP_IS_1 */\n#\n# define BOOST_PP_IS_1(x) BOOST_PP_IS_EMPTY(BOOST_PP_CAT(BOOST_PP_IS_1_HELPER_, x))\n# define BOOST_PP_IS_1_HELPER_1\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/is_empty.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2003.\n#  *     (C) Copyright Edward Diener 2014.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# if BOOST_PP_VARIADICS\n#\n# include <boost/preprocessor/facilities/is_empty_variadic.hpp>\n#\n# else\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC() && ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/facilities/identity.hpp>\n# else\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/detail/split.hpp>\n# endif\n#\n# /* BOOST_PP_IS_EMPTY */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC() && ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_IS_EMPTY(x) BOOST_PP_IS_EMPTY_I(x BOOST_PP_IS_EMPTY_HELPER)\n#    define BOOST_PP_IS_EMPTY_I(contents) BOOST_PP_TUPLE_ELEM(2, 1, (BOOST_PP_IS_EMPTY_DEF_ ## contents()))\n#    define BOOST_PP_IS_EMPTY_DEF_BOOST_PP_IS_EMPTY_HELPER 1, BOOST_PP_IDENTITY(1)\n#    define BOOST_PP_IS_EMPTY_HELPER() , 0\n# else\n#    if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#        define BOOST_PP_IS_EMPTY(x) BOOST_PP_IS_EMPTY_I(BOOST_PP_IS_EMPTY_HELPER x ())\n#        define BOOST_PP_IS_EMPTY_I(test) BOOST_PP_IS_EMPTY_II(BOOST_PP_SPLIT(0, BOOST_PP_CAT(BOOST_PP_IS_EMPTY_DEF_, test)))\n#        define BOOST_PP_IS_EMPTY_II(id) id\n#    else\n#        define BOOST_PP_IS_EMPTY(x) BOOST_PP_IS_EMPTY_I((BOOST_PP_IS_EMPTY_HELPER x ()))\n#        define BOOST_PP_IS_EMPTY_I(par) BOOST_PP_IS_EMPTY_II ## par\n#        define BOOST_PP_IS_EMPTY_II(test) BOOST_PP_SPLIT(0, BOOST_PP_CAT(BOOST_PP_IS_EMPTY_DEF_, test))\n#    endif\n#    define BOOST_PP_IS_EMPTY_HELPER() 1\n#    define BOOST_PP_IS_EMPTY_DEF_1 1, BOOST_PP_NIL\n#    define BOOST_PP_IS_EMPTY_DEF_BOOST_PP_IS_EMPTY_HELPER 0, BOOST_PP_NIL\n# endif\n#\n# endif /* BOOST_PP_VARIADICS */\n#\n# endif /* BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/is_empty_variadic.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2014.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_VARIADIC_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_VARIADIC_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# if BOOST_PP_VARIADICS\n#\n# include <boost/preprocessor/punctuation/is_begin_parens.hpp>\n# include <boost/preprocessor/facilities/detail/is_empty.hpp>\n#\n#if BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400\n#\n#define BOOST_PP_IS_EMPTY(param) \\\n    BOOST_PP_DETAIL_IS_EMPTY_IIF \\\n      ( \\\n      BOOST_PP_IS_BEGIN_PARENS \\\n        ( \\\n        param \\\n        ) \\\n      ) \\\n      ( \\\n      BOOST_PP_IS_EMPTY_ZERO, \\\n      BOOST_PP_DETAIL_IS_EMPTY_PROCESS \\\n      ) \\\n    (param) \\\n/**/\n#define BOOST_PP_IS_EMPTY_ZERO(param) 0\n# else\n#define BOOST_PP_IS_EMPTY(...) \\\n    BOOST_PP_DETAIL_IS_EMPTY_IIF \\\n      ( \\\n      BOOST_PP_IS_BEGIN_PARENS \\\n        ( \\\n        __VA_ARGS__ \\\n        ) \\\n      ) \\\n      ( \\\n      BOOST_PP_IS_EMPTY_ZERO, \\\n      BOOST_PP_DETAIL_IS_EMPTY_PROCESS \\\n      ) \\\n    (__VA_ARGS__) \\\n/**/\n#define BOOST_PP_IS_EMPTY_ZERO(...) 0\n# endif /* BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400 */\n# endif /* BOOST_PP_VARIADICS */\n# endif /* BOOST_PREPROCESSOR_FACILITIES_IS_EMPTY_VARIADIC_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/facilities/overload.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2011.                                  *\n#  *     (C) Copyright Edward Diener 2011.                                    *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FACILITIES_OVERLOAD_HPP\n# define BOOST_PREPROCESSOR_FACILITIES_OVERLOAD_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/variadic/size.hpp>\n#\n# /* BOOST_PP_OVERLOAD */\n#\n# if BOOST_PP_VARIADICS\n#    define BOOST_PP_OVERLOAD(prefix, ...) BOOST_PP_CAT(prefix, BOOST_PP_VARIADIC_SIZE(__VA_ARGS__))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/for.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_FOR_HPP\n# define BOOST_PREPROCESSOR_FOR_HPP\n#\n# include <boost/preprocessor/repetition/for.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/identity.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_IDENTITY_HPP\n# define BOOST_PREPROCESSOR_IDENTITY_HPP\n#\n# include <boost/preprocessor/facilities/identity.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/inc.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_INC_HPP\n# define BOOST_PREPROCESSOR_INC_HPP\n#\n# include <boost/preprocessor/arithmetic/inc.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iterate.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ITERATE_HPP\n# define BOOST_PREPROCESSOR_ITERATE_HPP\n#\n# include <boost/preprocessor/iteration/iterate.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/lower1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_START_1\n#\n# undef BOOST_PP_ITERATION_START_1_DIGIT_1\n# undef BOOST_PP_ITERATION_START_1_DIGIT_2\n# undef BOOST_PP_ITERATION_START_1_DIGIT_3\n# undef BOOST_PP_ITERATION_START_1_DIGIT_4\n# undef BOOST_PP_ITERATION_START_1_DIGIT_5\n# undef BOOST_PP_ITERATION_START_1_DIGIT_6\n# undef BOOST_PP_ITERATION_START_1_DIGIT_7\n# undef BOOST_PP_ITERATION_START_1_DIGIT_8\n# undef BOOST_PP_ITERATION_START_1_DIGIT_9\n# undef BOOST_PP_ITERATION_START_1_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_START_1_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_START_1_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_START_1_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_START_1_DIGIT_3\n#    define BOOST_PP_ITERATION_START_1 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_START_1_DIGIT_3, BOOST_PP_ITERATION_START_1_DIGIT_2, BOOST_PP_ITERATION_START_1_DIGIT_1)\n# elif BOOST_PP_ITERATION_START_1_DIGIT_2\n#    define BOOST_PP_ITERATION_START_1 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_START_1_DIGIT_2, BOOST_PP_ITERATION_START_1_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_START_1 BOOST_PP_ITERATION_START_1_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/lower2.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_START_2\n#\n# undef BOOST_PP_ITERATION_START_2_DIGIT_1\n# undef BOOST_PP_ITERATION_START_2_DIGIT_2\n# undef BOOST_PP_ITERATION_START_2_DIGIT_3\n# undef BOOST_PP_ITERATION_START_2_DIGIT_4\n# undef BOOST_PP_ITERATION_START_2_DIGIT_5\n# undef BOOST_PP_ITERATION_START_2_DIGIT_6\n# undef BOOST_PP_ITERATION_START_2_DIGIT_7\n# undef BOOST_PP_ITERATION_START_2_DIGIT_8\n# undef BOOST_PP_ITERATION_START_2_DIGIT_9\n# undef BOOST_PP_ITERATION_START_2_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_START_2_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_START_2_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_START_2_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_START_2_DIGIT_3\n#    define BOOST_PP_ITERATION_START_2 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_START_2_DIGIT_3, BOOST_PP_ITERATION_START_2_DIGIT_2, BOOST_PP_ITERATION_START_2_DIGIT_1)\n# elif BOOST_PP_ITERATION_START_2_DIGIT_2\n#    define BOOST_PP_ITERATION_START_2 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_START_2_DIGIT_2, BOOST_PP_ITERATION_START_2_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_START_2 BOOST_PP_ITERATION_START_2_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/lower3.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_START_3\n#\n# undef BOOST_PP_ITERATION_START_3_DIGIT_1\n# undef BOOST_PP_ITERATION_START_3_DIGIT_2\n# undef BOOST_PP_ITERATION_START_3_DIGIT_3\n# undef BOOST_PP_ITERATION_START_3_DIGIT_4\n# undef BOOST_PP_ITERATION_START_3_DIGIT_5\n# undef BOOST_PP_ITERATION_START_3_DIGIT_6\n# undef BOOST_PP_ITERATION_START_3_DIGIT_7\n# undef BOOST_PP_ITERATION_START_3_DIGIT_8\n# undef BOOST_PP_ITERATION_START_3_DIGIT_9\n# undef BOOST_PP_ITERATION_START_3_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_START_3_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_START_3_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_START_3_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_START_3_DIGIT_3\n#    define BOOST_PP_ITERATION_START_3 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_START_3_DIGIT_3, BOOST_PP_ITERATION_START_3_DIGIT_2, BOOST_PP_ITERATION_START_3_DIGIT_1)\n# elif BOOST_PP_ITERATION_START_3_DIGIT_2\n#    define BOOST_PP_ITERATION_START_3 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_START_3_DIGIT_2, BOOST_PP_ITERATION_START_3_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_START_3 BOOST_PP_ITERATION_START_3_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/lower4.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_START_4\n#\n# undef BOOST_PP_ITERATION_START_4_DIGIT_1\n# undef BOOST_PP_ITERATION_START_4_DIGIT_2\n# undef BOOST_PP_ITERATION_START_4_DIGIT_3\n# undef BOOST_PP_ITERATION_START_4_DIGIT_4\n# undef BOOST_PP_ITERATION_START_4_DIGIT_5\n# undef BOOST_PP_ITERATION_START_4_DIGIT_6\n# undef BOOST_PP_ITERATION_START_4_DIGIT_7\n# undef BOOST_PP_ITERATION_START_4_DIGIT_8\n# undef BOOST_PP_ITERATION_START_4_DIGIT_9\n# undef BOOST_PP_ITERATION_START_4_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_START_4_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_START_4_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_START_4_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_START_4_DIGIT_3\n#    define BOOST_PP_ITERATION_START_4 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_START_4_DIGIT_3, BOOST_PP_ITERATION_START_4_DIGIT_2, BOOST_PP_ITERATION_START_4_DIGIT_1)\n# elif BOOST_PP_ITERATION_START_4_DIGIT_2\n#    define BOOST_PP_ITERATION_START_4 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_START_4_DIGIT_2, BOOST_PP_ITERATION_START_4_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_START_4 BOOST_PP_ITERATION_START_4_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/lower5.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_START_5\n#\n# undef BOOST_PP_ITERATION_START_5_DIGIT_1\n# undef BOOST_PP_ITERATION_START_5_DIGIT_2\n# undef BOOST_PP_ITERATION_START_5_DIGIT_3\n# undef BOOST_PP_ITERATION_START_5_DIGIT_4\n# undef BOOST_PP_ITERATION_START_5_DIGIT_5\n# undef BOOST_PP_ITERATION_START_5_DIGIT_6\n# undef BOOST_PP_ITERATION_START_5_DIGIT_7\n# undef BOOST_PP_ITERATION_START_5_DIGIT_8\n# undef BOOST_PP_ITERATION_START_5_DIGIT_9\n# undef BOOST_PP_ITERATION_START_5_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_START_5_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_START_5_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_START_5_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_START_5_DIGIT_3\n#    define BOOST_PP_ITERATION_START_5 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_START_5_DIGIT_3, BOOST_PP_ITERATION_START_5_DIGIT_2, BOOST_PP_ITERATION_START_5_DIGIT_1)\n# elif BOOST_PP_ITERATION_START_5_DIGIT_2\n#    define BOOST_PP_ITERATION_START_5 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_START_5_DIGIT_2, BOOST_PP_ITERATION_START_5_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_START_5 BOOST_PP_ITERATION_START_5_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/upper1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_FINISH_1\n#\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_1\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_2\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_3\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_4\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_5\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_6\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_7\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_8\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_9\n# undef BOOST_PP_ITERATION_FINISH_1_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_FINISH_1_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_FINISH_1_DIGIT_3\n#    define BOOST_PP_ITERATION_FINISH_1 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_FINISH_1_DIGIT_3, BOOST_PP_ITERATION_FINISH_1_DIGIT_2, BOOST_PP_ITERATION_FINISH_1_DIGIT_1)\n# elif BOOST_PP_ITERATION_FINISH_1_DIGIT_2\n#    define BOOST_PP_ITERATION_FINISH_1 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_FINISH_1_DIGIT_2, BOOST_PP_ITERATION_FINISH_1_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_FINISH_1 BOOST_PP_ITERATION_FINISH_1_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/upper2.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_FINISH_2\n#\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_1\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_2\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_3\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_4\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_5\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_6\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_7\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_8\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_9\n# undef BOOST_PP_ITERATION_FINISH_2_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_FINISH_2_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_FINISH_2_DIGIT_3\n#    define BOOST_PP_ITERATION_FINISH_2 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_FINISH_2_DIGIT_3, BOOST_PP_ITERATION_FINISH_2_DIGIT_2, BOOST_PP_ITERATION_FINISH_2_DIGIT_1)\n# elif BOOST_PP_ITERATION_FINISH_2_DIGIT_2\n#    define BOOST_PP_ITERATION_FINISH_2 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_FINISH_2_DIGIT_2, BOOST_PP_ITERATION_FINISH_2_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_FINISH_2 BOOST_PP_ITERATION_FINISH_2_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/upper3.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_FINISH_3\n#\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_1\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_2\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_3\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_4\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_5\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_6\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_7\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_8\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_9\n# undef BOOST_PP_ITERATION_FINISH_3_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_FINISH_3_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_FINISH_3_DIGIT_3\n#    define BOOST_PP_ITERATION_FINISH_3 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_FINISH_3_DIGIT_3, BOOST_PP_ITERATION_FINISH_3_DIGIT_2, BOOST_PP_ITERATION_FINISH_3_DIGIT_1)\n# elif BOOST_PP_ITERATION_FINISH_3_DIGIT_2\n#    define BOOST_PP_ITERATION_FINISH_3 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_FINISH_3_DIGIT_2, BOOST_PP_ITERATION_FINISH_3_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_FINISH_3 BOOST_PP_ITERATION_FINISH_3_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/upper4.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_FINISH_4\n#\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_1\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_2\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_3\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_4\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_5\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_6\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_7\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_8\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_9\n# undef BOOST_PP_ITERATION_FINISH_4_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_FINISH_4_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_FINISH_4_DIGIT_3\n#    define BOOST_PP_ITERATION_FINISH_4 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_FINISH_4_DIGIT_3, BOOST_PP_ITERATION_FINISH_4_DIGIT_2, BOOST_PP_ITERATION_FINISH_4_DIGIT_1)\n# elif BOOST_PP_ITERATION_FINISH_4_DIGIT_2\n#    define BOOST_PP_ITERATION_FINISH_4 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_FINISH_4_DIGIT_2, BOOST_PP_ITERATION_FINISH_4_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_FINISH_4 BOOST_PP_ITERATION_FINISH_4_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/bounds/upper5.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_ITERATION_FINISH_5\n#\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_1\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_2\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_3\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_4\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_5\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_6\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_7\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_8\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_9\n# undef BOOST_PP_ITERATION_FINISH_5_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_ITERATION_FINISH_5_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_ITERATION_FINISH_5_DIGIT_3\n#    define BOOST_PP_ITERATION_FINISH_5 BOOST_PP_SLOT_CC_3(BOOST_PP_ITERATION_FINISH_5_DIGIT_3, BOOST_PP_ITERATION_FINISH_5_DIGIT_2, BOOST_PP_ITERATION_FINISH_5_DIGIT_1)\n# elif BOOST_PP_ITERATION_FINISH_5_DIGIT_2\n#    define BOOST_PP_ITERATION_FINISH_5 BOOST_PP_SLOT_CC_2(BOOST_PP_ITERATION_FINISH_5_DIGIT_2, BOOST_PP_ITERATION_FINISH_5_DIGIT_1)\n# else\n#    define BOOST_PP_ITERATION_FINISH_5 BOOST_PP_ITERATION_FINISH_5_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/finish.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_LOCAL_FE\n#\n# undef BOOST_PP_LOCAL_FE_DIGIT_1\n# undef BOOST_PP_LOCAL_FE_DIGIT_2\n# undef BOOST_PP_LOCAL_FE_DIGIT_3\n# undef BOOST_PP_LOCAL_FE_DIGIT_4\n# undef BOOST_PP_LOCAL_FE_DIGIT_5\n# undef BOOST_PP_LOCAL_FE_DIGIT_6\n# undef BOOST_PP_LOCAL_FE_DIGIT_7\n# undef BOOST_PP_LOCAL_FE_DIGIT_8\n# undef BOOST_PP_LOCAL_FE_DIGIT_9\n# undef BOOST_PP_LOCAL_FE_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_LOCAL_FE_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_LOCAL_FE_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_LOCAL_FE_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_LOCAL_FE_DIGIT_3\n#    define BOOST_PP_LOCAL_FE() BOOST_PP_SLOT_CC_3(BOOST_PP_LOCAL_FE_DIGIT_3, BOOST_PP_LOCAL_FE_DIGIT_2, BOOST_PP_LOCAL_FE_DIGIT_1)\n# elif BOOST_PP_LOCAL_FE_DIGIT_2\n#    define BOOST_PP_LOCAL_FE() BOOST_PP_SLOT_CC_2(BOOST_PP_LOCAL_FE_DIGIT_2, BOOST_PP_LOCAL_FE_DIGIT_1)\n# else\n#    define BOOST_PP_LOCAL_FE() BOOST_PP_LOCAL_FE_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/forward1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if defined(BOOST_PP_ITERATION_LIMITS)\n#    if !defined(BOOST_PP_FILENAME_1)\n#        error BOOST_PP_ERROR:  depth #1 filename is not defined\n#    endif\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/lower1.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/upper1.hpp>\n#    define BOOST_PP_ITERATION_FLAGS_1() 0\n#    undef BOOST_PP_ITERATION_LIMITS\n# elif defined(BOOST_PP_ITERATION_PARAMS_1)\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_1)\n#    include <boost/preprocessor/iteration/detail/bounds/lower1.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_1)\n#    include <boost/preprocessor/iteration/detail/bounds/upper1.hpp>\n#    define BOOST_PP_FILENAME_1 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_1)\n#    if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_1) >= 4\n#        define BOOST_PP_ITERATION_FLAGS_1() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_1)\n#    else\n#        define BOOST_PP_ITERATION_FLAGS_1() 0\n#    endif\n# else\n#    error BOOST_PP_ERROR:  depth #1 iteration boundaries or filename not defined\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 1\n#\n# define BOOST_PP_IS_ITERATING 1\n#\n# if (BOOST_PP_ITERATION_START_1) > (BOOST_PP_ITERATION_FINISH_1)\n#    include <boost/preprocessor/iteration/detail/iter/reverse1.hpp>\n# else\n#    if BOOST_PP_ITERATION_START_1 <= 0 && BOOST_PP_ITERATION_FINISH_1 >= 0\n#        define BOOST_PP_ITERATION_1 0\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 1 && BOOST_PP_ITERATION_FINISH_1 >= 1\n#        define BOOST_PP_ITERATION_1 1\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 2 && BOOST_PP_ITERATION_FINISH_1 >= 2\n#        define BOOST_PP_ITERATION_1 2\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 3 && BOOST_PP_ITERATION_FINISH_1 >= 3\n#        define BOOST_PP_ITERATION_1 3\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 4 && BOOST_PP_ITERATION_FINISH_1 >= 4\n#        define BOOST_PP_ITERATION_1 4\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 5 && BOOST_PP_ITERATION_FINISH_1 >= 5\n#        define BOOST_PP_ITERATION_1 5\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 6 && BOOST_PP_ITERATION_FINISH_1 >= 6\n#        define BOOST_PP_ITERATION_1 6\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 7 && BOOST_PP_ITERATION_FINISH_1 >= 7\n#        define BOOST_PP_ITERATION_1 7\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 8 && BOOST_PP_ITERATION_FINISH_1 >= 8\n#        define BOOST_PP_ITERATION_1 8\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 9 && BOOST_PP_ITERATION_FINISH_1 >= 9\n#        define BOOST_PP_ITERATION_1 9\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 10 && BOOST_PP_ITERATION_FINISH_1 >= 10\n#        define BOOST_PP_ITERATION_1 10\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 11 && BOOST_PP_ITERATION_FINISH_1 >= 11\n#        define BOOST_PP_ITERATION_1 11\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 12 && BOOST_PP_ITERATION_FINISH_1 >= 12\n#        define BOOST_PP_ITERATION_1 12\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 13 && BOOST_PP_ITERATION_FINISH_1 >= 13\n#        define BOOST_PP_ITERATION_1 13\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 14 && BOOST_PP_ITERATION_FINISH_1 >= 14\n#        define BOOST_PP_ITERATION_1 14\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 15 && BOOST_PP_ITERATION_FINISH_1 >= 15\n#        define BOOST_PP_ITERATION_1 15\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 16 && BOOST_PP_ITERATION_FINISH_1 >= 16\n#        define BOOST_PP_ITERATION_1 16\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 17 && BOOST_PP_ITERATION_FINISH_1 >= 17\n#        define BOOST_PP_ITERATION_1 17\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 18 && BOOST_PP_ITERATION_FINISH_1 >= 18\n#        define BOOST_PP_ITERATION_1 18\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 19 && BOOST_PP_ITERATION_FINISH_1 >= 19\n#        define BOOST_PP_ITERATION_1 19\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 20 && BOOST_PP_ITERATION_FINISH_1 >= 20\n#        define BOOST_PP_ITERATION_1 20\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 21 && BOOST_PP_ITERATION_FINISH_1 >= 21\n#        define BOOST_PP_ITERATION_1 21\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 22 && BOOST_PP_ITERATION_FINISH_1 >= 22\n#        define BOOST_PP_ITERATION_1 22\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 23 && BOOST_PP_ITERATION_FINISH_1 >= 23\n#        define BOOST_PP_ITERATION_1 23\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 24 && BOOST_PP_ITERATION_FINISH_1 >= 24\n#        define BOOST_PP_ITERATION_1 24\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 25 && BOOST_PP_ITERATION_FINISH_1 >= 25\n#        define BOOST_PP_ITERATION_1 25\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 26 && BOOST_PP_ITERATION_FINISH_1 >= 26\n#        define BOOST_PP_ITERATION_1 26\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 27 && BOOST_PP_ITERATION_FINISH_1 >= 27\n#        define BOOST_PP_ITERATION_1 27\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 28 && BOOST_PP_ITERATION_FINISH_1 >= 28\n#        define BOOST_PP_ITERATION_1 28\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 29 && BOOST_PP_ITERATION_FINISH_1 >= 29\n#        define BOOST_PP_ITERATION_1 29\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 30 && BOOST_PP_ITERATION_FINISH_1 >= 30\n#        define BOOST_PP_ITERATION_1 30\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 31 && BOOST_PP_ITERATION_FINISH_1 >= 31\n#        define BOOST_PP_ITERATION_1 31\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 32 && BOOST_PP_ITERATION_FINISH_1 >= 32\n#        define BOOST_PP_ITERATION_1 32\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 33 && BOOST_PP_ITERATION_FINISH_1 >= 33\n#        define BOOST_PP_ITERATION_1 33\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 34 && BOOST_PP_ITERATION_FINISH_1 >= 34\n#        define BOOST_PP_ITERATION_1 34\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 35 && BOOST_PP_ITERATION_FINISH_1 >= 35\n#        define BOOST_PP_ITERATION_1 35\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 36 && BOOST_PP_ITERATION_FINISH_1 >= 36\n#        define BOOST_PP_ITERATION_1 36\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 37 && BOOST_PP_ITERATION_FINISH_1 >= 37\n#        define BOOST_PP_ITERATION_1 37\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 38 && BOOST_PP_ITERATION_FINISH_1 >= 38\n#        define BOOST_PP_ITERATION_1 38\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 39 && BOOST_PP_ITERATION_FINISH_1 >= 39\n#        define BOOST_PP_ITERATION_1 39\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 40 && BOOST_PP_ITERATION_FINISH_1 >= 40\n#        define BOOST_PP_ITERATION_1 40\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 41 && BOOST_PP_ITERATION_FINISH_1 >= 41\n#        define BOOST_PP_ITERATION_1 41\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 42 && BOOST_PP_ITERATION_FINISH_1 >= 42\n#        define BOOST_PP_ITERATION_1 42\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 43 && BOOST_PP_ITERATION_FINISH_1 >= 43\n#        define BOOST_PP_ITERATION_1 43\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 44 && BOOST_PP_ITERATION_FINISH_1 >= 44\n#        define BOOST_PP_ITERATION_1 44\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 45 && BOOST_PP_ITERATION_FINISH_1 >= 45\n#        define BOOST_PP_ITERATION_1 45\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 46 && BOOST_PP_ITERATION_FINISH_1 >= 46\n#        define BOOST_PP_ITERATION_1 46\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 47 && BOOST_PP_ITERATION_FINISH_1 >= 47\n#        define BOOST_PP_ITERATION_1 47\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 48 && BOOST_PP_ITERATION_FINISH_1 >= 48\n#        define BOOST_PP_ITERATION_1 48\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 49 && BOOST_PP_ITERATION_FINISH_1 >= 49\n#        define BOOST_PP_ITERATION_1 49\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 50 && BOOST_PP_ITERATION_FINISH_1 >= 50\n#        define BOOST_PP_ITERATION_1 50\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 51 && BOOST_PP_ITERATION_FINISH_1 >= 51\n#        define BOOST_PP_ITERATION_1 51\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 52 && BOOST_PP_ITERATION_FINISH_1 >= 52\n#        define BOOST_PP_ITERATION_1 52\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 53 && BOOST_PP_ITERATION_FINISH_1 >= 53\n#        define BOOST_PP_ITERATION_1 53\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 54 && BOOST_PP_ITERATION_FINISH_1 >= 54\n#        define BOOST_PP_ITERATION_1 54\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 55 && BOOST_PP_ITERATION_FINISH_1 >= 55\n#        define BOOST_PP_ITERATION_1 55\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 56 && BOOST_PP_ITERATION_FINISH_1 >= 56\n#        define BOOST_PP_ITERATION_1 56\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 57 && BOOST_PP_ITERATION_FINISH_1 >= 57\n#        define BOOST_PP_ITERATION_1 57\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 58 && BOOST_PP_ITERATION_FINISH_1 >= 58\n#        define BOOST_PP_ITERATION_1 58\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 59 && BOOST_PP_ITERATION_FINISH_1 >= 59\n#        define BOOST_PP_ITERATION_1 59\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 60 && BOOST_PP_ITERATION_FINISH_1 >= 60\n#        define BOOST_PP_ITERATION_1 60\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 61 && BOOST_PP_ITERATION_FINISH_1 >= 61\n#        define BOOST_PP_ITERATION_1 61\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 62 && BOOST_PP_ITERATION_FINISH_1 >= 62\n#        define BOOST_PP_ITERATION_1 62\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 63 && BOOST_PP_ITERATION_FINISH_1 >= 63\n#        define BOOST_PP_ITERATION_1 63\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 64 && BOOST_PP_ITERATION_FINISH_1 >= 64\n#        define BOOST_PP_ITERATION_1 64\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 65 && BOOST_PP_ITERATION_FINISH_1 >= 65\n#        define BOOST_PP_ITERATION_1 65\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 66 && BOOST_PP_ITERATION_FINISH_1 >= 66\n#        define BOOST_PP_ITERATION_1 66\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 67 && BOOST_PP_ITERATION_FINISH_1 >= 67\n#        define BOOST_PP_ITERATION_1 67\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 68 && BOOST_PP_ITERATION_FINISH_1 >= 68\n#        define BOOST_PP_ITERATION_1 68\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 69 && BOOST_PP_ITERATION_FINISH_1 >= 69\n#        define BOOST_PP_ITERATION_1 69\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 70 && BOOST_PP_ITERATION_FINISH_1 >= 70\n#        define BOOST_PP_ITERATION_1 70\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 71 && BOOST_PP_ITERATION_FINISH_1 >= 71\n#        define BOOST_PP_ITERATION_1 71\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 72 && BOOST_PP_ITERATION_FINISH_1 >= 72\n#        define BOOST_PP_ITERATION_1 72\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 73 && BOOST_PP_ITERATION_FINISH_1 >= 73\n#        define BOOST_PP_ITERATION_1 73\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 74 && BOOST_PP_ITERATION_FINISH_1 >= 74\n#        define BOOST_PP_ITERATION_1 74\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 75 && BOOST_PP_ITERATION_FINISH_1 >= 75\n#        define BOOST_PP_ITERATION_1 75\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 76 && BOOST_PP_ITERATION_FINISH_1 >= 76\n#        define BOOST_PP_ITERATION_1 76\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 77 && BOOST_PP_ITERATION_FINISH_1 >= 77\n#        define BOOST_PP_ITERATION_1 77\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 78 && BOOST_PP_ITERATION_FINISH_1 >= 78\n#        define BOOST_PP_ITERATION_1 78\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 79 && BOOST_PP_ITERATION_FINISH_1 >= 79\n#        define BOOST_PP_ITERATION_1 79\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 80 && BOOST_PP_ITERATION_FINISH_1 >= 80\n#        define BOOST_PP_ITERATION_1 80\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 81 && BOOST_PP_ITERATION_FINISH_1 >= 81\n#        define BOOST_PP_ITERATION_1 81\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 82 && BOOST_PP_ITERATION_FINISH_1 >= 82\n#        define BOOST_PP_ITERATION_1 82\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 83 && BOOST_PP_ITERATION_FINISH_1 >= 83\n#        define BOOST_PP_ITERATION_1 83\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 84 && BOOST_PP_ITERATION_FINISH_1 >= 84\n#        define BOOST_PP_ITERATION_1 84\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 85 && BOOST_PP_ITERATION_FINISH_1 >= 85\n#        define BOOST_PP_ITERATION_1 85\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 86 && BOOST_PP_ITERATION_FINISH_1 >= 86\n#        define BOOST_PP_ITERATION_1 86\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 87 && BOOST_PP_ITERATION_FINISH_1 >= 87\n#        define BOOST_PP_ITERATION_1 87\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 88 && BOOST_PP_ITERATION_FINISH_1 >= 88\n#        define BOOST_PP_ITERATION_1 88\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 89 && BOOST_PP_ITERATION_FINISH_1 >= 89\n#        define BOOST_PP_ITERATION_1 89\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 90 && BOOST_PP_ITERATION_FINISH_1 >= 90\n#        define BOOST_PP_ITERATION_1 90\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 91 && BOOST_PP_ITERATION_FINISH_1 >= 91\n#        define BOOST_PP_ITERATION_1 91\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 92 && BOOST_PP_ITERATION_FINISH_1 >= 92\n#        define BOOST_PP_ITERATION_1 92\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 93 && BOOST_PP_ITERATION_FINISH_1 >= 93\n#        define BOOST_PP_ITERATION_1 93\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 94 && BOOST_PP_ITERATION_FINISH_1 >= 94\n#        define BOOST_PP_ITERATION_1 94\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 95 && BOOST_PP_ITERATION_FINISH_1 >= 95\n#        define BOOST_PP_ITERATION_1 95\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 96 && BOOST_PP_ITERATION_FINISH_1 >= 96\n#        define BOOST_PP_ITERATION_1 96\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 97 && BOOST_PP_ITERATION_FINISH_1 >= 97\n#        define BOOST_PP_ITERATION_1 97\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 98 && BOOST_PP_ITERATION_FINISH_1 >= 98\n#        define BOOST_PP_ITERATION_1 98\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 99 && BOOST_PP_ITERATION_FINISH_1 >= 99\n#        define BOOST_PP_ITERATION_1 99\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 100 && BOOST_PP_ITERATION_FINISH_1 >= 100\n#        define BOOST_PP_ITERATION_1 100\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 101 && BOOST_PP_ITERATION_FINISH_1 >= 101\n#        define BOOST_PP_ITERATION_1 101\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 102 && BOOST_PP_ITERATION_FINISH_1 >= 102\n#        define BOOST_PP_ITERATION_1 102\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 103 && BOOST_PP_ITERATION_FINISH_1 >= 103\n#        define BOOST_PP_ITERATION_1 103\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 104 && BOOST_PP_ITERATION_FINISH_1 >= 104\n#        define BOOST_PP_ITERATION_1 104\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 105 && BOOST_PP_ITERATION_FINISH_1 >= 105\n#        define BOOST_PP_ITERATION_1 105\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 106 && BOOST_PP_ITERATION_FINISH_1 >= 106\n#        define BOOST_PP_ITERATION_1 106\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 107 && BOOST_PP_ITERATION_FINISH_1 >= 107\n#        define BOOST_PP_ITERATION_1 107\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 108 && BOOST_PP_ITERATION_FINISH_1 >= 108\n#        define BOOST_PP_ITERATION_1 108\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 109 && BOOST_PP_ITERATION_FINISH_1 >= 109\n#        define BOOST_PP_ITERATION_1 109\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 110 && BOOST_PP_ITERATION_FINISH_1 >= 110\n#        define BOOST_PP_ITERATION_1 110\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 111 && BOOST_PP_ITERATION_FINISH_1 >= 111\n#        define BOOST_PP_ITERATION_1 111\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 112 && BOOST_PP_ITERATION_FINISH_1 >= 112\n#        define BOOST_PP_ITERATION_1 112\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 113 && BOOST_PP_ITERATION_FINISH_1 >= 113\n#        define BOOST_PP_ITERATION_1 113\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 114 && BOOST_PP_ITERATION_FINISH_1 >= 114\n#        define BOOST_PP_ITERATION_1 114\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 115 && BOOST_PP_ITERATION_FINISH_1 >= 115\n#        define BOOST_PP_ITERATION_1 115\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 116 && BOOST_PP_ITERATION_FINISH_1 >= 116\n#        define BOOST_PP_ITERATION_1 116\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 117 && BOOST_PP_ITERATION_FINISH_1 >= 117\n#        define BOOST_PP_ITERATION_1 117\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 118 && BOOST_PP_ITERATION_FINISH_1 >= 118\n#        define BOOST_PP_ITERATION_1 118\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 119 && BOOST_PP_ITERATION_FINISH_1 >= 119\n#        define BOOST_PP_ITERATION_1 119\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 120 && BOOST_PP_ITERATION_FINISH_1 >= 120\n#        define BOOST_PP_ITERATION_1 120\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 121 && BOOST_PP_ITERATION_FINISH_1 >= 121\n#        define BOOST_PP_ITERATION_1 121\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 122 && BOOST_PP_ITERATION_FINISH_1 >= 122\n#        define BOOST_PP_ITERATION_1 122\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 123 && BOOST_PP_ITERATION_FINISH_1 >= 123\n#        define BOOST_PP_ITERATION_1 123\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 124 && BOOST_PP_ITERATION_FINISH_1 >= 124\n#        define BOOST_PP_ITERATION_1 124\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 125 && BOOST_PP_ITERATION_FINISH_1 >= 125\n#        define BOOST_PP_ITERATION_1 125\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 126 && BOOST_PP_ITERATION_FINISH_1 >= 126\n#        define BOOST_PP_ITERATION_1 126\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 127 && BOOST_PP_ITERATION_FINISH_1 >= 127\n#        define BOOST_PP_ITERATION_1 127\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 128 && BOOST_PP_ITERATION_FINISH_1 >= 128\n#        define BOOST_PP_ITERATION_1 128\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 129 && BOOST_PP_ITERATION_FINISH_1 >= 129\n#        define BOOST_PP_ITERATION_1 129\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 130 && BOOST_PP_ITERATION_FINISH_1 >= 130\n#        define BOOST_PP_ITERATION_1 130\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 131 && BOOST_PP_ITERATION_FINISH_1 >= 131\n#        define BOOST_PP_ITERATION_1 131\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 132 && BOOST_PP_ITERATION_FINISH_1 >= 132\n#        define BOOST_PP_ITERATION_1 132\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 133 && BOOST_PP_ITERATION_FINISH_1 >= 133\n#        define BOOST_PP_ITERATION_1 133\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 134 && BOOST_PP_ITERATION_FINISH_1 >= 134\n#        define BOOST_PP_ITERATION_1 134\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 135 && BOOST_PP_ITERATION_FINISH_1 >= 135\n#        define BOOST_PP_ITERATION_1 135\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 136 && BOOST_PP_ITERATION_FINISH_1 >= 136\n#        define BOOST_PP_ITERATION_1 136\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 137 && BOOST_PP_ITERATION_FINISH_1 >= 137\n#        define BOOST_PP_ITERATION_1 137\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 138 && BOOST_PP_ITERATION_FINISH_1 >= 138\n#        define BOOST_PP_ITERATION_1 138\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 139 && BOOST_PP_ITERATION_FINISH_1 >= 139\n#        define BOOST_PP_ITERATION_1 139\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 140 && BOOST_PP_ITERATION_FINISH_1 >= 140\n#        define BOOST_PP_ITERATION_1 140\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 141 && BOOST_PP_ITERATION_FINISH_1 >= 141\n#        define BOOST_PP_ITERATION_1 141\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 142 && BOOST_PP_ITERATION_FINISH_1 >= 142\n#        define BOOST_PP_ITERATION_1 142\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 143 && BOOST_PP_ITERATION_FINISH_1 >= 143\n#        define BOOST_PP_ITERATION_1 143\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 144 && BOOST_PP_ITERATION_FINISH_1 >= 144\n#        define BOOST_PP_ITERATION_1 144\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 145 && BOOST_PP_ITERATION_FINISH_1 >= 145\n#        define BOOST_PP_ITERATION_1 145\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 146 && BOOST_PP_ITERATION_FINISH_1 >= 146\n#        define BOOST_PP_ITERATION_1 146\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 147 && BOOST_PP_ITERATION_FINISH_1 >= 147\n#        define BOOST_PP_ITERATION_1 147\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 148 && BOOST_PP_ITERATION_FINISH_1 >= 148\n#        define BOOST_PP_ITERATION_1 148\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 149 && BOOST_PP_ITERATION_FINISH_1 >= 149\n#        define BOOST_PP_ITERATION_1 149\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 150 && BOOST_PP_ITERATION_FINISH_1 >= 150\n#        define BOOST_PP_ITERATION_1 150\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 151 && BOOST_PP_ITERATION_FINISH_1 >= 151\n#        define BOOST_PP_ITERATION_1 151\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 152 && BOOST_PP_ITERATION_FINISH_1 >= 152\n#        define BOOST_PP_ITERATION_1 152\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 153 && BOOST_PP_ITERATION_FINISH_1 >= 153\n#        define BOOST_PP_ITERATION_1 153\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 154 && BOOST_PP_ITERATION_FINISH_1 >= 154\n#        define BOOST_PP_ITERATION_1 154\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 155 && BOOST_PP_ITERATION_FINISH_1 >= 155\n#        define BOOST_PP_ITERATION_1 155\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 156 && BOOST_PP_ITERATION_FINISH_1 >= 156\n#        define BOOST_PP_ITERATION_1 156\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 157 && BOOST_PP_ITERATION_FINISH_1 >= 157\n#        define BOOST_PP_ITERATION_1 157\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 158 && BOOST_PP_ITERATION_FINISH_1 >= 158\n#        define BOOST_PP_ITERATION_1 158\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 159 && BOOST_PP_ITERATION_FINISH_1 >= 159\n#        define BOOST_PP_ITERATION_1 159\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 160 && BOOST_PP_ITERATION_FINISH_1 >= 160\n#        define BOOST_PP_ITERATION_1 160\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 161 && BOOST_PP_ITERATION_FINISH_1 >= 161\n#        define BOOST_PP_ITERATION_1 161\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 162 && BOOST_PP_ITERATION_FINISH_1 >= 162\n#        define BOOST_PP_ITERATION_1 162\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 163 && BOOST_PP_ITERATION_FINISH_1 >= 163\n#        define BOOST_PP_ITERATION_1 163\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 164 && BOOST_PP_ITERATION_FINISH_1 >= 164\n#        define BOOST_PP_ITERATION_1 164\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 165 && BOOST_PP_ITERATION_FINISH_1 >= 165\n#        define BOOST_PP_ITERATION_1 165\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 166 && BOOST_PP_ITERATION_FINISH_1 >= 166\n#        define BOOST_PP_ITERATION_1 166\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 167 && BOOST_PP_ITERATION_FINISH_1 >= 167\n#        define BOOST_PP_ITERATION_1 167\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 168 && BOOST_PP_ITERATION_FINISH_1 >= 168\n#        define BOOST_PP_ITERATION_1 168\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 169 && BOOST_PP_ITERATION_FINISH_1 >= 169\n#        define BOOST_PP_ITERATION_1 169\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 170 && BOOST_PP_ITERATION_FINISH_1 >= 170\n#        define BOOST_PP_ITERATION_1 170\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 171 && BOOST_PP_ITERATION_FINISH_1 >= 171\n#        define BOOST_PP_ITERATION_1 171\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 172 && BOOST_PP_ITERATION_FINISH_1 >= 172\n#        define BOOST_PP_ITERATION_1 172\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 173 && BOOST_PP_ITERATION_FINISH_1 >= 173\n#        define BOOST_PP_ITERATION_1 173\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 174 && BOOST_PP_ITERATION_FINISH_1 >= 174\n#        define BOOST_PP_ITERATION_1 174\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 175 && BOOST_PP_ITERATION_FINISH_1 >= 175\n#        define BOOST_PP_ITERATION_1 175\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 176 && BOOST_PP_ITERATION_FINISH_1 >= 176\n#        define BOOST_PP_ITERATION_1 176\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 177 && BOOST_PP_ITERATION_FINISH_1 >= 177\n#        define BOOST_PP_ITERATION_1 177\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 178 && BOOST_PP_ITERATION_FINISH_1 >= 178\n#        define BOOST_PP_ITERATION_1 178\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 179 && BOOST_PP_ITERATION_FINISH_1 >= 179\n#        define BOOST_PP_ITERATION_1 179\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 180 && BOOST_PP_ITERATION_FINISH_1 >= 180\n#        define BOOST_PP_ITERATION_1 180\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 181 && BOOST_PP_ITERATION_FINISH_1 >= 181\n#        define BOOST_PP_ITERATION_1 181\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 182 && BOOST_PP_ITERATION_FINISH_1 >= 182\n#        define BOOST_PP_ITERATION_1 182\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 183 && BOOST_PP_ITERATION_FINISH_1 >= 183\n#        define BOOST_PP_ITERATION_1 183\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 184 && BOOST_PP_ITERATION_FINISH_1 >= 184\n#        define BOOST_PP_ITERATION_1 184\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 185 && BOOST_PP_ITERATION_FINISH_1 >= 185\n#        define BOOST_PP_ITERATION_1 185\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 186 && BOOST_PP_ITERATION_FINISH_1 >= 186\n#        define BOOST_PP_ITERATION_1 186\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 187 && BOOST_PP_ITERATION_FINISH_1 >= 187\n#        define BOOST_PP_ITERATION_1 187\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 188 && BOOST_PP_ITERATION_FINISH_1 >= 188\n#        define BOOST_PP_ITERATION_1 188\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 189 && BOOST_PP_ITERATION_FINISH_1 >= 189\n#        define BOOST_PP_ITERATION_1 189\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 190 && BOOST_PP_ITERATION_FINISH_1 >= 190\n#        define BOOST_PP_ITERATION_1 190\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 191 && BOOST_PP_ITERATION_FINISH_1 >= 191\n#        define BOOST_PP_ITERATION_1 191\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 192 && BOOST_PP_ITERATION_FINISH_1 >= 192\n#        define BOOST_PP_ITERATION_1 192\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 193 && BOOST_PP_ITERATION_FINISH_1 >= 193\n#        define BOOST_PP_ITERATION_1 193\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 194 && BOOST_PP_ITERATION_FINISH_1 >= 194\n#        define BOOST_PP_ITERATION_1 194\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 195 && BOOST_PP_ITERATION_FINISH_1 >= 195\n#        define BOOST_PP_ITERATION_1 195\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 196 && BOOST_PP_ITERATION_FINISH_1 >= 196\n#        define BOOST_PP_ITERATION_1 196\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 197 && BOOST_PP_ITERATION_FINISH_1 >= 197\n#        define BOOST_PP_ITERATION_1 197\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 198 && BOOST_PP_ITERATION_FINISH_1 >= 198\n#        define BOOST_PP_ITERATION_1 198\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 199 && BOOST_PP_ITERATION_FINISH_1 >= 199\n#        define BOOST_PP_ITERATION_1 199\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 200 && BOOST_PP_ITERATION_FINISH_1 >= 200\n#        define BOOST_PP_ITERATION_1 200\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 201 && BOOST_PP_ITERATION_FINISH_1 >= 201\n#        define BOOST_PP_ITERATION_1 201\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 202 && BOOST_PP_ITERATION_FINISH_1 >= 202\n#        define BOOST_PP_ITERATION_1 202\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 203 && BOOST_PP_ITERATION_FINISH_1 >= 203\n#        define BOOST_PP_ITERATION_1 203\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 204 && BOOST_PP_ITERATION_FINISH_1 >= 204\n#        define BOOST_PP_ITERATION_1 204\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 205 && BOOST_PP_ITERATION_FINISH_1 >= 205\n#        define BOOST_PP_ITERATION_1 205\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 206 && BOOST_PP_ITERATION_FINISH_1 >= 206\n#        define BOOST_PP_ITERATION_1 206\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 207 && BOOST_PP_ITERATION_FINISH_1 >= 207\n#        define BOOST_PP_ITERATION_1 207\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 208 && BOOST_PP_ITERATION_FINISH_1 >= 208\n#        define BOOST_PP_ITERATION_1 208\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 209 && BOOST_PP_ITERATION_FINISH_1 >= 209\n#        define BOOST_PP_ITERATION_1 209\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 210 && BOOST_PP_ITERATION_FINISH_1 >= 210\n#        define BOOST_PP_ITERATION_1 210\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 211 && BOOST_PP_ITERATION_FINISH_1 >= 211\n#        define BOOST_PP_ITERATION_1 211\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 212 && BOOST_PP_ITERATION_FINISH_1 >= 212\n#        define BOOST_PP_ITERATION_1 212\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 213 && BOOST_PP_ITERATION_FINISH_1 >= 213\n#        define BOOST_PP_ITERATION_1 213\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 214 && BOOST_PP_ITERATION_FINISH_1 >= 214\n#        define BOOST_PP_ITERATION_1 214\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 215 && BOOST_PP_ITERATION_FINISH_1 >= 215\n#        define BOOST_PP_ITERATION_1 215\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 216 && BOOST_PP_ITERATION_FINISH_1 >= 216\n#        define BOOST_PP_ITERATION_1 216\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 217 && BOOST_PP_ITERATION_FINISH_1 >= 217\n#        define BOOST_PP_ITERATION_1 217\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 218 && BOOST_PP_ITERATION_FINISH_1 >= 218\n#        define BOOST_PP_ITERATION_1 218\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 219 && BOOST_PP_ITERATION_FINISH_1 >= 219\n#        define BOOST_PP_ITERATION_1 219\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 220 && BOOST_PP_ITERATION_FINISH_1 >= 220\n#        define BOOST_PP_ITERATION_1 220\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 221 && BOOST_PP_ITERATION_FINISH_1 >= 221\n#        define BOOST_PP_ITERATION_1 221\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 222 && BOOST_PP_ITERATION_FINISH_1 >= 222\n#        define BOOST_PP_ITERATION_1 222\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 223 && BOOST_PP_ITERATION_FINISH_1 >= 223\n#        define BOOST_PP_ITERATION_1 223\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 224 && BOOST_PP_ITERATION_FINISH_1 >= 224\n#        define BOOST_PP_ITERATION_1 224\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 225 && BOOST_PP_ITERATION_FINISH_1 >= 225\n#        define BOOST_PP_ITERATION_1 225\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 226 && BOOST_PP_ITERATION_FINISH_1 >= 226\n#        define BOOST_PP_ITERATION_1 226\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 227 && BOOST_PP_ITERATION_FINISH_1 >= 227\n#        define BOOST_PP_ITERATION_1 227\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 228 && BOOST_PP_ITERATION_FINISH_1 >= 228\n#        define BOOST_PP_ITERATION_1 228\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 229 && BOOST_PP_ITERATION_FINISH_1 >= 229\n#        define BOOST_PP_ITERATION_1 229\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 230 && BOOST_PP_ITERATION_FINISH_1 >= 230\n#        define BOOST_PP_ITERATION_1 230\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 231 && BOOST_PP_ITERATION_FINISH_1 >= 231\n#        define BOOST_PP_ITERATION_1 231\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 232 && BOOST_PP_ITERATION_FINISH_1 >= 232\n#        define BOOST_PP_ITERATION_1 232\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 233 && BOOST_PP_ITERATION_FINISH_1 >= 233\n#        define BOOST_PP_ITERATION_1 233\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 234 && BOOST_PP_ITERATION_FINISH_1 >= 234\n#        define BOOST_PP_ITERATION_1 234\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 235 && BOOST_PP_ITERATION_FINISH_1 >= 235\n#        define BOOST_PP_ITERATION_1 235\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 236 && BOOST_PP_ITERATION_FINISH_1 >= 236\n#        define BOOST_PP_ITERATION_1 236\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 237 && BOOST_PP_ITERATION_FINISH_1 >= 237\n#        define BOOST_PP_ITERATION_1 237\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 238 && BOOST_PP_ITERATION_FINISH_1 >= 238\n#        define BOOST_PP_ITERATION_1 238\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 239 && BOOST_PP_ITERATION_FINISH_1 >= 239\n#        define BOOST_PP_ITERATION_1 239\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 240 && BOOST_PP_ITERATION_FINISH_1 >= 240\n#        define BOOST_PP_ITERATION_1 240\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 241 && BOOST_PP_ITERATION_FINISH_1 >= 241\n#        define BOOST_PP_ITERATION_1 241\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 242 && BOOST_PP_ITERATION_FINISH_1 >= 242\n#        define BOOST_PP_ITERATION_1 242\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 243 && BOOST_PP_ITERATION_FINISH_1 >= 243\n#        define BOOST_PP_ITERATION_1 243\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 244 && BOOST_PP_ITERATION_FINISH_1 >= 244\n#        define BOOST_PP_ITERATION_1 244\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 245 && BOOST_PP_ITERATION_FINISH_1 >= 245\n#        define BOOST_PP_ITERATION_1 245\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 246 && BOOST_PP_ITERATION_FINISH_1 >= 246\n#        define BOOST_PP_ITERATION_1 246\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 247 && BOOST_PP_ITERATION_FINISH_1 >= 247\n#        define BOOST_PP_ITERATION_1 247\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 248 && BOOST_PP_ITERATION_FINISH_1 >= 248\n#        define BOOST_PP_ITERATION_1 248\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 249 && BOOST_PP_ITERATION_FINISH_1 >= 249\n#        define BOOST_PP_ITERATION_1 249\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 250 && BOOST_PP_ITERATION_FINISH_1 >= 250\n#        define BOOST_PP_ITERATION_1 250\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 251 && BOOST_PP_ITERATION_FINISH_1 >= 251\n#        define BOOST_PP_ITERATION_1 251\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 252 && BOOST_PP_ITERATION_FINISH_1 >= 252\n#        define BOOST_PP_ITERATION_1 252\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 253 && BOOST_PP_ITERATION_FINISH_1 >= 253\n#        define BOOST_PP_ITERATION_1 253\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 254 && BOOST_PP_ITERATION_FINISH_1 >= 254\n#        define BOOST_PP_ITERATION_1 254\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 255 && BOOST_PP_ITERATION_FINISH_1 >= 255\n#        define BOOST_PP_ITERATION_1 255\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n#    if BOOST_PP_ITERATION_START_1 <= 256 && BOOST_PP_ITERATION_FINISH_1 >= 256\n#        define BOOST_PP_ITERATION_1 256\n#        include BOOST_PP_FILENAME_1\n#        undef BOOST_PP_ITERATION_1\n#    endif\n# endif\n#\n# undef BOOST_PP_IS_ITERATING\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 0\n#\n# undef BOOST_PP_ITERATION_START_1\n# undef BOOST_PP_ITERATION_FINISH_1\n# undef BOOST_PP_FILENAME_1\n#\n# undef BOOST_PP_ITERATION_FLAGS_1\n# undef BOOST_PP_ITERATION_PARAMS_1\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/forward2.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if defined(BOOST_PP_ITERATION_LIMITS)\n#    if !defined(BOOST_PP_FILENAME_2)\n#        error BOOST_PP_ERROR:  depth #2 filename is not defined\n#    endif\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/lower2.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/upper2.hpp>\n#    define BOOST_PP_ITERATION_FLAGS_2() 0\n#    undef BOOST_PP_ITERATION_LIMITS\n# elif defined(BOOST_PP_ITERATION_PARAMS_2)\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_2)\n#    include <boost/preprocessor/iteration/detail/bounds/lower2.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_2)\n#    include <boost/preprocessor/iteration/detail/bounds/upper2.hpp>\n#    define BOOST_PP_FILENAME_2 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_2)\n#    if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_2) >= 4\n#        define BOOST_PP_ITERATION_FLAGS_2() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_2)\n#    else\n#        define BOOST_PP_ITERATION_FLAGS_2() 0\n#    endif\n# else\n#    error BOOST_PP_ERROR:  depth #2 iteration boundaries or filename not defined\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 2\n#\n# if (BOOST_PP_ITERATION_START_2) > (BOOST_PP_ITERATION_FINISH_2)\n#    include <boost/preprocessor/iteration/detail/iter/reverse2.hpp>\n# else\n#    if BOOST_PP_ITERATION_START_2 <= 0 && BOOST_PP_ITERATION_FINISH_2 >= 0\n#        define BOOST_PP_ITERATION_2 0\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 1 && BOOST_PP_ITERATION_FINISH_2 >= 1\n#        define BOOST_PP_ITERATION_2 1\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 2 && BOOST_PP_ITERATION_FINISH_2 >= 2\n#        define BOOST_PP_ITERATION_2 2\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 3 && BOOST_PP_ITERATION_FINISH_2 >= 3\n#        define BOOST_PP_ITERATION_2 3\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 4 && BOOST_PP_ITERATION_FINISH_2 >= 4\n#        define BOOST_PP_ITERATION_2 4\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 5 && BOOST_PP_ITERATION_FINISH_2 >= 5\n#        define BOOST_PP_ITERATION_2 5\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 6 && BOOST_PP_ITERATION_FINISH_2 >= 6\n#        define BOOST_PP_ITERATION_2 6\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 7 && BOOST_PP_ITERATION_FINISH_2 >= 7\n#        define BOOST_PP_ITERATION_2 7\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 8 && BOOST_PP_ITERATION_FINISH_2 >= 8\n#        define BOOST_PP_ITERATION_2 8\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 9 && BOOST_PP_ITERATION_FINISH_2 >= 9\n#        define BOOST_PP_ITERATION_2 9\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 10 && BOOST_PP_ITERATION_FINISH_2 >= 10\n#        define BOOST_PP_ITERATION_2 10\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 11 && BOOST_PP_ITERATION_FINISH_2 >= 11\n#        define BOOST_PP_ITERATION_2 11\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 12 && BOOST_PP_ITERATION_FINISH_2 >= 12\n#        define BOOST_PP_ITERATION_2 12\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 13 && BOOST_PP_ITERATION_FINISH_2 >= 13\n#        define BOOST_PP_ITERATION_2 13\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 14 && BOOST_PP_ITERATION_FINISH_2 >= 14\n#        define BOOST_PP_ITERATION_2 14\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 15 && BOOST_PP_ITERATION_FINISH_2 >= 15\n#        define BOOST_PP_ITERATION_2 15\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 16 && BOOST_PP_ITERATION_FINISH_2 >= 16\n#        define BOOST_PP_ITERATION_2 16\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 17 && BOOST_PP_ITERATION_FINISH_2 >= 17\n#        define BOOST_PP_ITERATION_2 17\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 18 && BOOST_PP_ITERATION_FINISH_2 >= 18\n#        define BOOST_PP_ITERATION_2 18\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 19 && BOOST_PP_ITERATION_FINISH_2 >= 19\n#        define BOOST_PP_ITERATION_2 19\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 20 && BOOST_PP_ITERATION_FINISH_2 >= 20\n#        define BOOST_PP_ITERATION_2 20\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 21 && BOOST_PP_ITERATION_FINISH_2 >= 21\n#        define BOOST_PP_ITERATION_2 21\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 22 && BOOST_PP_ITERATION_FINISH_2 >= 22\n#        define BOOST_PP_ITERATION_2 22\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 23 && BOOST_PP_ITERATION_FINISH_2 >= 23\n#        define BOOST_PP_ITERATION_2 23\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 24 && BOOST_PP_ITERATION_FINISH_2 >= 24\n#        define BOOST_PP_ITERATION_2 24\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 25 && BOOST_PP_ITERATION_FINISH_2 >= 25\n#        define BOOST_PP_ITERATION_2 25\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 26 && BOOST_PP_ITERATION_FINISH_2 >= 26\n#        define BOOST_PP_ITERATION_2 26\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 27 && BOOST_PP_ITERATION_FINISH_2 >= 27\n#        define BOOST_PP_ITERATION_2 27\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 28 && BOOST_PP_ITERATION_FINISH_2 >= 28\n#        define BOOST_PP_ITERATION_2 28\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 29 && BOOST_PP_ITERATION_FINISH_2 >= 29\n#        define BOOST_PP_ITERATION_2 29\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 30 && BOOST_PP_ITERATION_FINISH_2 >= 30\n#        define BOOST_PP_ITERATION_2 30\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 31 && BOOST_PP_ITERATION_FINISH_2 >= 31\n#        define BOOST_PP_ITERATION_2 31\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 32 && BOOST_PP_ITERATION_FINISH_2 >= 32\n#        define BOOST_PP_ITERATION_2 32\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 33 && BOOST_PP_ITERATION_FINISH_2 >= 33\n#        define BOOST_PP_ITERATION_2 33\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 34 && BOOST_PP_ITERATION_FINISH_2 >= 34\n#        define BOOST_PP_ITERATION_2 34\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 35 && BOOST_PP_ITERATION_FINISH_2 >= 35\n#        define BOOST_PP_ITERATION_2 35\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 36 && BOOST_PP_ITERATION_FINISH_2 >= 36\n#        define BOOST_PP_ITERATION_2 36\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 37 && BOOST_PP_ITERATION_FINISH_2 >= 37\n#        define BOOST_PP_ITERATION_2 37\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 38 && BOOST_PP_ITERATION_FINISH_2 >= 38\n#        define BOOST_PP_ITERATION_2 38\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 39 && BOOST_PP_ITERATION_FINISH_2 >= 39\n#        define BOOST_PP_ITERATION_2 39\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 40 && BOOST_PP_ITERATION_FINISH_2 >= 40\n#        define BOOST_PP_ITERATION_2 40\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 41 && BOOST_PP_ITERATION_FINISH_2 >= 41\n#        define BOOST_PP_ITERATION_2 41\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 42 && BOOST_PP_ITERATION_FINISH_2 >= 42\n#        define BOOST_PP_ITERATION_2 42\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 43 && BOOST_PP_ITERATION_FINISH_2 >= 43\n#        define BOOST_PP_ITERATION_2 43\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 44 && BOOST_PP_ITERATION_FINISH_2 >= 44\n#        define BOOST_PP_ITERATION_2 44\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 45 && BOOST_PP_ITERATION_FINISH_2 >= 45\n#        define BOOST_PP_ITERATION_2 45\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 46 && BOOST_PP_ITERATION_FINISH_2 >= 46\n#        define BOOST_PP_ITERATION_2 46\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 47 && BOOST_PP_ITERATION_FINISH_2 >= 47\n#        define BOOST_PP_ITERATION_2 47\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 48 && BOOST_PP_ITERATION_FINISH_2 >= 48\n#        define BOOST_PP_ITERATION_2 48\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 49 && BOOST_PP_ITERATION_FINISH_2 >= 49\n#        define BOOST_PP_ITERATION_2 49\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 50 && BOOST_PP_ITERATION_FINISH_2 >= 50\n#        define BOOST_PP_ITERATION_2 50\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 51 && BOOST_PP_ITERATION_FINISH_2 >= 51\n#        define BOOST_PP_ITERATION_2 51\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 52 && BOOST_PP_ITERATION_FINISH_2 >= 52\n#        define BOOST_PP_ITERATION_2 52\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 53 && BOOST_PP_ITERATION_FINISH_2 >= 53\n#        define BOOST_PP_ITERATION_2 53\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 54 && BOOST_PP_ITERATION_FINISH_2 >= 54\n#        define BOOST_PP_ITERATION_2 54\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 55 && BOOST_PP_ITERATION_FINISH_2 >= 55\n#        define BOOST_PP_ITERATION_2 55\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 56 && BOOST_PP_ITERATION_FINISH_2 >= 56\n#        define BOOST_PP_ITERATION_2 56\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 57 && BOOST_PP_ITERATION_FINISH_2 >= 57\n#        define BOOST_PP_ITERATION_2 57\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 58 && BOOST_PP_ITERATION_FINISH_2 >= 58\n#        define BOOST_PP_ITERATION_2 58\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 59 && BOOST_PP_ITERATION_FINISH_2 >= 59\n#        define BOOST_PP_ITERATION_2 59\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 60 && BOOST_PP_ITERATION_FINISH_2 >= 60\n#        define BOOST_PP_ITERATION_2 60\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 61 && BOOST_PP_ITERATION_FINISH_2 >= 61\n#        define BOOST_PP_ITERATION_2 61\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 62 && BOOST_PP_ITERATION_FINISH_2 >= 62\n#        define BOOST_PP_ITERATION_2 62\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 63 && BOOST_PP_ITERATION_FINISH_2 >= 63\n#        define BOOST_PP_ITERATION_2 63\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 64 && BOOST_PP_ITERATION_FINISH_2 >= 64\n#        define BOOST_PP_ITERATION_2 64\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 65 && BOOST_PP_ITERATION_FINISH_2 >= 65\n#        define BOOST_PP_ITERATION_2 65\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 66 && BOOST_PP_ITERATION_FINISH_2 >= 66\n#        define BOOST_PP_ITERATION_2 66\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 67 && BOOST_PP_ITERATION_FINISH_2 >= 67\n#        define BOOST_PP_ITERATION_2 67\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 68 && BOOST_PP_ITERATION_FINISH_2 >= 68\n#        define BOOST_PP_ITERATION_2 68\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 69 && BOOST_PP_ITERATION_FINISH_2 >= 69\n#        define BOOST_PP_ITERATION_2 69\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 70 && BOOST_PP_ITERATION_FINISH_2 >= 70\n#        define BOOST_PP_ITERATION_2 70\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 71 && BOOST_PP_ITERATION_FINISH_2 >= 71\n#        define BOOST_PP_ITERATION_2 71\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 72 && BOOST_PP_ITERATION_FINISH_2 >= 72\n#        define BOOST_PP_ITERATION_2 72\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 73 && BOOST_PP_ITERATION_FINISH_2 >= 73\n#        define BOOST_PP_ITERATION_2 73\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 74 && BOOST_PP_ITERATION_FINISH_2 >= 74\n#        define BOOST_PP_ITERATION_2 74\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 75 && BOOST_PP_ITERATION_FINISH_2 >= 75\n#        define BOOST_PP_ITERATION_2 75\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 76 && BOOST_PP_ITERATION_FINISH_2 >= 76\n#        define BOOST_PP_ITERATION_2 76\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 77 && BOOST_PP_ITERATION_FINISH_2 >= 77\n#        define BOOST_PP_ITERATION_2 77\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 78 && BOOST_PP_ITERATION_FINISH_2 >= 78\n#        define BOOST_PP_ITERATION_2 78\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 79 && BOOST_PP_ITERATION_FINISH_2 >= 79\n#        define BOOST_PP_ITERATION_2 79\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 80 && BOOST_PP_ITERATION_FINISH_2 >= 80\n#        define BOOST_PP_ITERATION_2 80\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 81 && BOOST_PP_ITERATION_FINISH_2 >= 81\n#        define BOOST_PP_ITERATION_2 81\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 82 && BOOST_PP_ITERATION_FINISH_2 >= 82\n#        define BOOST_PP_ITERATION_2 82\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 83 && BOOST_PP_ITERATION_FINISH_2 >= 83\n#        define BOOST_PP_ITERATION_2 83\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 84 && BOOST_PP_ITERATION_FINISH_2 >= 84\n#        define BOOST_PP_ITERATION_2 84\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 85 && BOOST_PP_ITERATION_FINISH_2 >= 85\n#        define BOOST_PP_ITERATION_2 85\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 86 && BOOST_PP_ITERATION_FINISH_2 >= 86\n#        define BOOST_PP_ITERATION_2 86\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 87 && BOOST_PP_ITERATION_FINISH_2 >= 87\n#        define BOOST_PP_ITERATION_2 87\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 88 && BOOST_PP_ITERATION_FINISH_2 >= 88\n#        define BOOST_PP_ITERATION_2 88\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 89 && BOOST_PP_ITERATION_FINISH_2 >= 89\n#        define BOOST_PP_ITERATION_2 89\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 90 && BOOST_PP_ITERATION_FINISH_2 >= 90\n#        define BOOST_PP_ITERATION_2 90\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 91 && BOOST_PP_ITERATION_FINISH_2 >= 91\n#        define BOOST_PP_ITERATION_2 91\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 92 && BOOST_PP_ITERATION_FINISH_2 >= 92\n#        define BOOST_PP_ITERATION_2 92\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 93 && BOOST_PP_ITERATION_FINISH_2 >= 93\n#        define BOOST_PP_ITERATION_2 93\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 94 && BOOST_PP_ITERATION_FINISH_2 >= 94\n#        define BOOST_PP_ITERATION_2 94\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 95 && BOOST_PP_ITERATION_FINISH_2 >= 95\n#        define BOOST_PP_ITERATION_2 95\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 96 && BOOST_PP_ITERATION_FINISH_2 >= 96\n#        define BOOST_PP_ITERATION_2 96\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 97 && BOOST_PP_ITERATION_FINISH_2 >= 97\n#        define BOOST_PP_ITERATION_2 97\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 98 && BOOST_PP_ITERATION_FINISH_2 >= 98\n#        define BOOST_PP_ITERATION_2 98\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 99 && BOOST_PP_ITERATION_FINISH_2 >= 99\n#        define BOOST_PP_ITERATION_2 99\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 100 && BOOST_PP_ITERATION_FINISH_2 >= 100\n#        define BOOST_PP_ITERATION_2 100\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 101 && BOOST_PP_ITERATION_FINISH_2 >= 101\n#        define BOOST_PP_ITERATION_2 101\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 102 && BOOST_PP_ITERATION_FINISH_2 >= 102\n#        define BOOST_PP_ITERATION_2 102\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 103 && BOOST_PP_ITERATION_FINISH_2 >= 103\n#        define BOOST_PP_ITERATION_2 103\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 104 && BOOST_PP_ITERATION_FINISH_2 >= 104\n#        define BOOST_PP_ITERATION_2 104\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 105 && BOOST_PP_ITERATION_FINISH_2 >= 105\n#        define BOOST_PP_ITERATION_2 105\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 106 && BOOST_PP_ITERATION_FINISH_2 >= 106\n#        define BOOST_PP_ITERATION_2 106\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 107 && BOOST_PP_ITERATION_FINISH_2 >= 107\n#        define BOOST_PP_ITERATION_2 107\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 108 && BOOST_PP_ITERATION_FINISH_2 >= 108\n#        define BOOST_PP_ITERATION_2 108\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 109 && BOOST_PP_ITERATION_FINISH_2 >= 109\n#        define BOOST_PP_ITERATION_2 109\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 110 && BOOST_PP_ITERATION_FINISH_2 >= 110\n#        define BOOST_PP_ITERATION_2 110\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 111 && BOOST_PP_ITERATION_FINISH_2 >= 111\n#        define BOOST_PP_ITERATION_2 111\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 112 && BOOST_PP_ITERATION_FINISH_2 >= 112\n#        define BOOST_PP_ITERATION_2 112\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 113 && BOOST_PP_ITERATION_FINISH_2 >= 113\n#        define BOOST_PP_ITERATION_2 113\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 114 && BOOST_PP_ITERATION_FINISH_2 >= 114\n#        define BOOST_PP_ITERATION_2 114\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 115 && BOOST_PP_ITERATION_FINISH_2 >= 115\n#        define BOOST_PP_ITERATION_2 115\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 116 && BOOST_PP_ITERATION_FINISH_2 >= 116\n#        define BOOST_PP_ITERATION_2 116\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 117 && BOOST_PP_ITERATION_FINISH_2 >= 117\n#        define BOOST_PP_ITERATION_2 117\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 118 && BOOST_PP_ITERATION_FINISH_2 >= 118\n#        define BOOST_PP_ITERATION_2 118\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 119 && BOOST_PP_ITERATION_FINISH_2 >= 119\n#        define BOOST_PP_ITERATION_2 119\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 120 && BOOST_PP_ITERATION_FINISH_2 >= 120\n#        define BOOST_PP_ITERATION_2 120\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 121 && BOOST_PP_ITERATION_FINISH_2 >= 121\n#        define BOOST_PP_ITERATION_2 121\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 122 && BOOST_PP_ITERATION_FINISH_2 >= 122\n#        define BOOST_PP_ITERATION_2 122\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 123 && BOOST_PP_ITERATION_FINISH_2 >= 123\n#        define BOOST_PP_ITERATION_2 123\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 124 && BOOST_PP_ITERATION_FINISH_2 >= 124\n#        define BOOST_PP_ITERATION_2 124\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 125 && BOOST_PP_ITERATION_FINISH_2 >= 125\n#        define BOOST_PP_ITERATION_2 125\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 126 && BOOST_PP_ITERATION_FINISH_2 >= 126\n#        define BOOST_PP_ITERATION_2 126\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 127 && BOOST_PP_ITERATION_FINISH_2 >= 127\n#        define BOOST_PP_ITERATION_2 127\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 128 && BOOST_PP_ITERATION_FINISH_2 >= 128\n#        define BOOST_PP_ITERATION_2 128\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 129 && BOOST_PP_ITERATION_FINISH_2 >= 129\n#        define BOOST_PP_ITERATION_2 129\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 130 && BOOST_PP_ITERATION_FINISH_2 >= 130\n#        define BOOST_PP_ITERATION_2 130\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 131 && BOOST_PP_ITERATION_FINISH_2 >= 131\n#        define BOOST_PP_ITERATION_2 131\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 132 && BOOST_PP_ITERATION_FINISH_2 >= 132\n#        define BOOST_PP_ITERATION_2 132\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 133 && BOOST_PP_ITERATION_FINISH_2 >= 133\n#        define BOOST_PP_ITERATION_2 133\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 134 && BOOST_PP_ITERATION_FINISH_2 >= 134\n#        define BOOST_PP_ITERATION_2 134\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 135 && BOOST_PP_ITERATION_FINISH_2 >= 135\n#        define BOOST_PP_ITERATION_2 135\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 136 && BOOST_PP_ITERATION_FINISH_2 >= 136\n#        define BOOST_PP_ITERATION_2 136\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 137 && BOOST_PP_ITERATION_FINISH_2 >= 137\n#        define BOOST_PP_ITERATION_2 137\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 138 && BOOST_PP_ITERATION_FINISH_2 >= 138\n#        define BOOST_PP_ITERATION_2 138\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 139 && BOOST_PP_ITERATION_FINISH_2 >= 139\n#        define BOOST_PP_ITERATION_2 139\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 140 && BOOST_PP_ITERATION_FINISH_2 >= 140\n#        define BOOST_PP_ITERATION_2 140\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 141 && BOOST_PP_ITERATION_FINISH_2 >= 141\n#        define BOOST_PP_ITERATION_2 141\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 142 && BOOST_PP_ITERATION_FINISH_2 >= 142\n#        define BOOST_PP_ITERATION_2 142\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 143 && BOOST_PP_ITERATION_FINISH_2 >= 143\n#        define BOOST_PP_ITERATION_2 143\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 144 && BOOST_PP_ITERATION_FINISH_2 >= 144\n#        define BOOST_PP_ITERATION_2 144\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 145 && BOOST_PP_ITERATION_FINISH_2 >= 145\n#        define BOOST_PP_ITERATION_2 145\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 146 && BOOST_PP_ITERATION_FINISH_2 >= 146\n#        define BOOST_PP_ITERATION_2 146\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 147 && BOOST_PP_ITERATION_FINISH_2 >= 147\n#        define BOOST_PP_ITERATION_2 147\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 148 && BOOST_PP_ITERATION_FINISH_2 >= 148\n#        define BOOST_PP_ITERATION_2 148\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 149 && BOOST_PP_ITERATION_FINISH_2 >= 149\n#        define BOOST_PP_ITERATION_2 149\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 150 && BOOST_PP_ITERATION_FINISH_2 >= 150\n#        define BOOST_PP_ITERATION_2 150\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 151 && BOOST_PP_ITERATION_FINISH_2 >= 151\n#        define BOOST_PP_ITERATION_2 151\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 152 && BOOST_PP_ITERATION_FINISH_2 >= 152\n#        define BOOST_PP_ITERATION_2 152\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 153 && BOOST_PP_ITERATION_FINISH_2 >= 153\n#        define BOOST_PP_ITERATION_2 153\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 154 && BOOST_PP_ITERATION_FINISH_2 >= 154\n#        define BOOST_PP_ITERATION_2 154\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 155 && BOOST_PP_ITERATION_FINISH_2 >= 155\n#        define BOOST_PP_ITERATION_2 155\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 156 && BOOST_PP_ITERATION_FINISH_2 >= 156\n#        define BOOST_PP_ITERATION_2 156\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 157 && BOOST_PP_ITERATION_FINISH_2 >= 157\n#        define BOOST_PP_ITERATION_2 157\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 158 && BOOST_PP_ITERATION_FINISH_2 >= 158\n#        define BOOST_PP_ITERATION_2 158\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 159 && BOOST_PP_ITERATION_FINISH_2 >= 159\n#        define BOOST_PP_ITERATION_2 159\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 160 && BOOST_PP_ITERATION_FINISH_2 >= 160\n#        define BOOST_PP_ITERATION_2 160\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 161 && BOOST_PP_ITERATION_FINISH_2 >= 161\n#        define BOOST_PP_ITERATION_2 161\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 162 && BOOST_PP_ITERATION_FINISH_2 >= 162\n#        define BOOST_PP_ITERATION_2 162\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 163 && BOOST_PP_ITERATION_FINISH_2 >= 163\n#        define BOOST_PP_ITERATION_2 163\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 164 && BOOST_PP_ITERATION_FINISH_2 >= 164\n#        define BOOST_PP_ITERATION_2 164\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 165 && BOOST_PP_ITERATION_FINISH_2 >= 165\n#        define BOOST_PP_ITERATION_2 165\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 166 && BOOST_PP_ITERATION_FINISH_2 >= 166\n#        define BOOST_PP_ITERATION_2 166\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 167 && BOOST_PP_ITERATION_FINISH_2 >= 167\n#        define BOOST_PP_ITERATION_2 167\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 168 && BOOST_PP_ITERATION_FINISH_2 >= 168\n#        define BOOST_PP_ITERATION_2 168\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 169 && BOOST_PP_ITERATION_FINISH_2 >= 169\n#        define BOOST_PP_ITERATION_2 169\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 170 && BOOST_PP_ITERATION_FINISH_2 >= 170\n#        define BOOST_PP_ITERATION_2 170\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 171 && BOOST_PP_ITERATION_FINISH_2 >= 171\n#        define BOOST_PP_ITERATION_2 171\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 172 && BOOST_PP_ITERATION_FINISH_2 >= 172\n#        define BOOST_PP_ITERATION_2 172\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 173 && BOOST_PP_ITERATION_FINISH_2 >= 173\n#        define BOOST_PP_ITERATION_2 173\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 174 && BOOST_PP_ITERATION_FINISH_2 >= 174\n#        define BOOST_PP_ITERATION_2 174\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 175 && BOOST_PP_ITERATION_FINISH_2 >= 175\n#        define BOOST_PP_ITERATION_2 175\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 176 && BOOST_PP_ITERATION_FINISH_2 >= 176\n#        define BOOST_PP_ITERATION_2 176\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 177 && BOOST_PP_ITERATION_FINISH_2 >= 177\n#        define BOOST_PP_ITERATION_2 177\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 178 && BOOST_PP_ITERATION_FINISH_2 >= 178\n#        define BOOST_PP_ITERATION_2 178\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 179 && BOOST_PP_ITERATION_FINISH_2 >= 179\n#        define BOOST_PP_ITERATION_2 179\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 180 && BOOST_PP_ITERATION_FINISH_2 >= 180\n#        define BOOST_PP_ITERATION_2 180\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 181 && BOOST_PP_ITERATION_FINISH_2 >= 181\n#        define BOOST_PP_ITERATION_2 181\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 182 && BOOST_PP_ITERATION_FINISH_2 >= 182\n#        define BOOST_PP_ITERATION_2 182\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 183 && BOOST_PP_ITERATION_FINISH_2 >= 183\n#        define BOOST_PP_ITERATION_2 183\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 184 && BOOST_PP_ITERATION_FINISH_2 >= 184\n#        define BOOST_PP_ITERATION_2 184\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 185 && BOOST_PP_ITERATION_FINISH_2 >= 185\n#        define BOOST_PP_ITERATION_2 185\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 186 && BOOST_PP_ITERATION_FINISH_2 >= 186\n#        define BOOST_PP_ITERATION_2 186\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 187 && BOOST_PP_ITERATION_FINISH_2 >= 187\n#        define BOOST_PP_ITERATION_2 187\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 188 && BOOST_PP_ITERATION_FINISH_2 >= 188\n#        define BOOST_PP_ITERATION_2 188\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 189 && BOOST_PP_ITERATION_FINISH_2 >= 189\n#        define BOOST_PP_ITERATION_2 189\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 190 && BOOST_PP_ITERATION_FINISH_2 >= 190\n#        define BOOST_PP_ITERATION_2 190\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 191 && BOOST_PP_ITERATION_FINISH_2 >= 191\n#        define BOOST_PP_ITERATION_2 191\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 192 && BOOST_PP_ITERATION_FINISH_2 >= 192\n#        define BOOST_PP_ITERATION_2 192\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 193 && BOOST_PP_ITERATION_FINISH_2 >= 193\n#        define BOOST_PP_ITERATION_2 193\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 194 && BOOST_PP_ITERATION_FINISH_2 >= 194\n#        define BOOST_PP_ITERATION_2 194\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 195 && BOOST_PP_ITERATION_FINISH_2 >= 195\n#        define BOOST_PP_ITERATION_2 195\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 196 && BOOST_PP_ITERATION_FINISH_2 >= 196\n#        define BOOST_PP_ITERATION_2 196\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 197 && BOOST_PP_ITERATION_FINISH_2 >= 197\n#        define BOOST_PP_ITERATION_2 197\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 198 && BOOST_PP_ITERATION_FINISH_2 >= 198\n#        define BOOST_PP_ITERATION_2 198\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 199 && BOOST_PP_ITERATION_FINISH_2 >= 199\n#        define BOOST_PP_ITERATION_2 199\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 200 && BOOST_PP_ITERATION_FINISH_2 >= 200\n#        define BOOST_PP_ITERATION_2 200\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 201 && BOOST_PP_ITERATION_FINISH_2 >= 201\n#        define BOOST_PP_ITERATION_2 201\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 202 && BOOST_PP_ITERATION_FINISH_2 >= 202\n#        define BOOST_PP_ITERATION_2 202\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 203 && BOOST_PP_ITERATION_FINISH_2 >= 203\n#        define BOOST_PP_ITERATION_2 203\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 204 && BOOST_PP_ITERATION_FINISH_2 >= 204\n#        define BOOST_PP_ITERATION_2 204\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 205 && BOOST_PP_ITERATION_FINISH_2 >= 205\n#        define BOOST_PP_ITERATION_2 205\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 206 && BOOST_PP_ITERATION_FINISH_2 >= 206\n#        define BOOST_PP_ITERATION_2 206\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 207 && BOOST_PP_ITERATION_FINISH_2 >= 207\n#        define BOOST_PP_ITERATION_2 207\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 208 && BOOST_PP_ITERATION_FINISH_2 >= 208\n#        define BOOST_PP_ITERATION_2 208\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 209 && BOOST_PP_ITERATION_FINISH_2 >= 209\n#        define BOOST_PP_ITERATION_2 209\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 210 && BOOST_PP_ITERATION_FINISH_2 >= 210\n#        define BOOST_PP_ITERATION_2 210\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 211 && BOOST_PP_ITERATION_FINISH_2 >= 211\n#        define BOOST_PP_ITERATION_2 211\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 212 && BOOST_PP_ITERATION_FINISH_2 >= 212\n#        define BOOST_PP_ITERATION_2 212\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 213 && BOOST_PP_ITERATION_FINISH_2 >= 213\n#        define BOOST_PP_ITERATION_2 213\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 214 && BOOST_PP_ITERATION_FINISH_2 >= 214\n#        define BOOST_PP_ITERATION_2 214\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 215 && BOOST_PP_ITERATION_FINISH_2 >= 215\n#        define BOOST_PP_ITERATION_2 215\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 216 && BOOST_PP_ITERATION_FINISH_2 >= 216\n#        define BOOST_PP_ITERATION_2 216\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 217 && BOOST_PP_ITERATION_FINISH_2 >= 217\n#        define BOOST_PP_ITERATION_2 217\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 218 && BOOST_PP_ITERATION_FINISH_2 >= 218\n#        define BOOST_PP_ITERATION_2 218\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 219 && BOOST_PP_ITERATION_FINISH_2 >= 219\n#        define BOOST_PP_ITERATION_2 219\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 220 && BOOST_PP_ITERATION_FINISH_2 >= 220\n#        define BOOST_PP_ITERATION_2 220\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 221 && BOOST_PP_ITERATION_FINISH_2 >= 221\n#        define BOOST_PP_ITERATION_2 221\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 222 && BOOST_PP_ITERATION_FINISH_2 >= 222\n#        define BOOST_PP_ITERATION_2 222\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 223 && BOOST_PP_ITERATION_FINISH_2 >= 223\n#        define BOOST_PP_ITERATION_2 223\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 224 && BOOST_PP_ITERATION_FINISH_2 >= 224\n#        define BOOST_PP_ITERATION_2 224\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 225 && BOOST_PP_ITERATION_FINISH_2 >= 225\n#        define BOOST_PP_ITERATION_2 225\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 226 && BOOST_PP_ITERATION_FINISH_2 >= 226\n#        define BOOST_PP_ITERATION_2 226\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 227 && BOOST_PP_ITERATION_FINISH_2 >= 227\n#        define BOOST_PP_ITERATION_2 227\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 228 && BOOST_PP_ITERATION_FINISH_2 >= 228\n#        define BOOST_PP_ITERATION_2 228\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 229 && BOOST_PP_ITERATION_FINISH_2 >= 229\n#        define BOOST_PP_ITERATION_2 229\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 230 && BOOST_PP_ITERATION_FINISH_2 >= 230\n#        define BOOST_PP_ITERATION_2 230\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 231 && BOOST_PP_ITERATION_FINISH_2 >= 231\n#        define BOOST_PP_ITERATION_2 231\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 232 && BOOST_PP_ITERATION_FINISH_2 >= 232\n#        define BOOST_PP_ITERATION_2 232\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 233 && BOOST_PP_ITERATION_FINISH_2 >= 233\n#        define BOOST_PP_ITERATION_2 233\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 234 && BOOST_PP_ITERATION_FINISH_2 >= 234\n#        define BOOST_PP_ITERATION_2 234\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 235 && BOOST_PP_ITERATION_FINISH_2 >= 235\n#        define BOOST_PP_ITERATION_2 235\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 236 && BOOST_PP_ITERATION_FINISH_2 >= 236\n#        define BOOST_PP_ITERATION_2 236\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 237 && BOOST_PP_ITERATION_FINISH_2 >= 237\n#        define BOOST_PP_ITERATION_2 237\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 238 && BOOST_PP_ITERATION_FINISH_2 >= 238\n#        define BOOST_PP_ITERATION_2 238\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 239 && BOOST_PP_ITERATION_FINISH_2 >= 239\n#        define BOOST_PP_ITERATION_2 239\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 240 && BOOST_PP_ITERATION_FINISH_2 >= 240\n#        define BOOST_PP_ITERATION_2 240\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 241 && BOOST_PP_ITERATION_FINISH_2 >= 241\n#        define BOOST_PP_ITERATION_2 241\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 242 && BOOST_PP_ITERATION_FINISH_2 >= 242\n#        define BOOST_PP_ITERATION_2 242\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 243 && BOOST_PP_ITERATION_FINISH_2 >= 243\n#        define BOOST_PP_ITERATION_2 243\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 244 && BOOST_PP_ITERATION_FINISH_2 >= 244\n#        define BOOST_PP_ITERATION_2 244\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 245 && BOOST_PP_ITERATION_FINISH_2 >= 245\n#        define BOOST_PP_ITERATION_2 245\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 246 && BOOST_PP_ITERATION_FINISH_2 >= 246\n#        define BOOST_PP_ITERATION_2 246\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 247 && BOOST_PP_ITERATION_FINISH_2 >= 247\n#        define BOOST_PP_ITERATION_2 247\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 248 && BOOST_PP_ITERATION_FINISH_2 >= 248\n#        define BOOST_PP_ITERATION_2 248\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 249 && BOOST_PP_ITERATION_FINISH_2 >= 249\n#        define BOOST_PP_ITERATION_2 249\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 250 && BOOST_PP_ITERATION_FINISH_2 >= 250\n#        define BOOST_PP_ITERATION_2 250\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 251 && BOOST_PP_ITERATION_FINISH_2 >= 251\n#        define BOOST_PP_ITERATION_2 251\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 252 && BOOST_PP_ITERATION_FINISH_2 >= 252\n#        define BOOST_PP_ITERATION_2 252\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 253 && BOOST_PP_ITERATION_FINISH_2 >= 253\n#        define BOOST_PP_ITERATION_2 253\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 254 && BOOST_PP_ITERATION_FINISH_2 >= 254\n#        define BOOST_PP_ITERATION_2 254\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 255 && BOOST_PP_ITERATION_FINISH_2 >= 255\n#        define BOOST_PP_ITERATION_2 255\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n#    if BOOST_PP_ITERATION_START_2 <= 256 && BOOST_PP_ITERATION_FINISH_2 >= 256\n#        define BOOST_PP_ITERATION_2 256\n#        include BOOST_PP_FILENAME_2\n#        undef BOOST_PP_ITERATION_2\n#    endif\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 1\n#\n# undef BOOST_PP_ITERATION_START_2\n# undef BOOST_PP_ITERATION_FINISH_2\n# undef BOOST_PP_FILENAME_2\n#\n# undef BOOST_PP_ITERATION_FLAGS_2\n# undef BOOST_PP_ITERATION_PARAMS_2\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/forward3.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if defined(BOOST_PP_ITERATION_LIMITS)\n#    if !defined(BOOST_PP_FILENAME_3)\n#        error BOOST_PP_ERROR:  depth #3 filename is not defined\n#    endif\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/lower3.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/upper3.hpp>\n#    define BOOST_PP_ITERATION_FLAGS_3() 0\n#    undef BOOST_PP_ITERATION_LIMITS\n# elif defined(BOOST_PP_ITERATION_PARAMS_3)\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_3)\n#    include <boost/preprocessor/iteration/detail/bounds/lower3.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_3)\n#    include <boost/preprocessor/iteration/detail/bounds/upper3.hpp>\n#    define BOOST_PP_FILENAME_3 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_3)\n#    if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_3) >= 4\n#        define BOOST_PP_ITERATION_FLAGS_3() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_3)\n#    else\n#        define BOOST_PP_ITERATION_FLAGS_3() 0\n#    endif\n# else\n#    error BOOST_PP_ERROR:  depth #3 iteration boundaries or filename not defined\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 3\n#\n# if (BOOST_PP_ITERATION_START_3) > (BOOST_PP_ITERATION_FINISH_3)\n#    include <boost/preprocessor/iteration/detail/iter/reverse3.hpp>\n# else\n#    if BOOST_PP_ITERATION_START_3 <= 0 && BOOST_PP_ITERATION_FINISH_3 >= 0\n#        define BOOST_PP_ITERATION_3 0\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 1 && BOOST_PP_ITERATION_FINISH_3 >= 1\n#        define BOOST_PP_ITERATION_3 1\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 2 && BOOST_PP_ITERATION_FINISH_3 >= 2\n#        define BOOST_PP_ITERATION_3 2\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 3 && BOOST_PP_ITERATION_FINISH_3 >= 3\n#        define BOOST_PP_ITERATION_3 3\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 4 && BOOST_PP_ITERATION_FINISH_3 >= 4\n#        define BOOST_PP_ITERATION_3 4\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 5 && BOOST_PP_ITERATION_FINISH_3 >= 5\n#        define BOOST_PP_ITERATION_3 5\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 6 && BOOST_PP_ITERATION_FINISH_3 >= 6\n#        define BOOST_PP_ITERATION_3 6\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 7 && BOOST_PP_ITERATION_FINISH_3 >= 7\n#        define BOOST_PP_ITERATION_3 7\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 8 && BOOST_PP_ITERATION_FINISH_3 >= 8\n#        define BOOST_PP_ITERATION_3 8\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 9 && BOOST_PP_ITERATION_FINISH_3 >= 9\n#        define BOOST_PP_ITERATION_3 9\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 10 && BOOST_PP_ITERATION_FINISH_3 >= 10\n#        define BOOST_PP_ITERATION_3 10\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 11 && BOOST_PP_ITERATION_FINISH_3 >= 11\n#        define BOOST_PP_ITERATION_3 11\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 12 && BOOST_PP_ITERATION_FINISH_3 >= 12\n#        define BOOST_PP_ITERATION_3 12\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 13 && BOOST_PP_ITERATION_FINISH_3 >= 13\n#        define BOOST_PP_ITERATION_3 13\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 14 && BOOST_PP_ITERATION_FINISH_3 >= 14\n#        define BOOST_PP_ITERATION_3 14\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 15 && BOOST_PP_ITERATION_FINISH_3 >= 15\n#        define BOOST_PP_ITERATION_3 15\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 16 && BOOST_PP_ITERATION_FINISH_3 >= 16\n#        define BOOST_PP_ITERATION_3 16\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 17 && BOOST_PP_ITERATION_FINISH_3 >= 17\n#        define BOOST_PP_ITERATION_3 17\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 18 && BOOST_PP_ITERATION_FINISH_3 >= 18\n#        define BOOST_PP_ITERATION_3 18\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 19 && BOOST_PP_ITERATION_FINISH_3 >= 19\n#        define BOOST_PP_ITERATION_3 19\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 20 && BOOST_PP_ITERATION_FINISH_3 >= 20\n#        define BOOST_PP_ITERATION_3 20\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 21 && BOOST_PP_ITERATION_FINISH_3 >= 21\n#        define BOOST_PP_ITERATION_3 21\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 22 && BOOST_PP_ITERATION_FINISH_3 >= 22\n#        define BOOST_PP_ITERATION_3 22\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 23 && BOOST_PP_ITERATION_FINISH_3 >= 23\n#        define BOOST_PP_ITERATION_3 23\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 24 && BOOST_PP_ITERATION_FINISH_3 >= 24\n#        define BOOST_PP_ITERATION_3 24\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 25 && BOOST_PP_ITERATION_FINISH_3 >= 25\n#        define BOOST_PP_ITERATION_3 25\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 26 && BOOST_PP_ITERATION_FINISH_3 >= 26\n#        define BOOST_PP_ITERATION_3 26\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 27 && BOOST_PP_ITERATION_FINISH_3 >= 27\n#        define BOOST_PP_ITERATION_3 27\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 28 && BOOST_PP_ITERATION_FINISH_3 >= 28\n#        define BOOST_PP_ITERATION_3 28\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 29 && BOOST_PP_ITERATION_FINISH_3 >= 29\n#        define BOOST_PP_ITERATION_3 29\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 30 && BOOST_PP_ITERATION_FINISH_3 >= 30\n#        define BOOST_PP_ITERATION_3 30\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 31 && BOOST_PP_ITERATION_FINISH_3 >= 31\n#        define BOOST_PP_ITERATION_3 31\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 32 && BOOST_PP_ITERATION_FINISH_3 >= 32\n#        define BOOST_PP_ITERATION_3 32\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 33 && BOOST_PP_ITERATION_FINISH_3 >= 33\n#        define BOOST_PP_ITERATION_3 33\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 34 && BOOST_PP_ITERATION_FINISH_3 >= 34\n#        define BOOST_PP_ITERATION_3 34\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 35 && BOOST_PP_ITERATION_FINISH_3 >= 35\n#        define BOOST_PP_ITERATION_3 35\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 36 && BOOST_PP_ITERATION_FINISH_3 >= 36\n#        define BOOST_PP_ITERATION_3 36\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 37 && BOOST_PP_ITERATION_FINISH_3 >= 37\n#        define BOOST_PP_ITERATION_3 37\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 38 && BOOST_PP_ITERATION_FINISH_3 >= 38\n#        define BOOST_PP_ITERATION_3 38\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 39 && BOOST_PP_ITERATION_FINISH_3 >= 39\n#        define BOOST_PP_ITERATION_3 39\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 40 && BOOST_PP_ITERATION_FINISH_3 >= 40\n#        define BOOST_PP_ITERATION_3 40\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 41 && BOOST_PP_ITERATION_FINISH_3 >= 41\n#        define BOOST_PP_ITERATION_3 41\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 42 && BOOST_PP_ITERATION_FINISH_3 >= 42\n#        define BOOST_PP_ITERATION_3 42\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 43 && BOOST_PP_ITERATION_FINISH_3 >= 43\n#        define BOOST_PP_ITERATION_3 43\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 44 && BOOST_PP_ITERATION_FINISH_3 >= 44\n#        define BOOST_PP_ITERATION_3 44\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 45 && BOOST_PP_ITERATION_FINISH_3 >= 45\n#        define BOOST_PP_ITERATION_3 45\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 46 && BOOST_PP_ITERATION_FINISH_3 >= 46\n#        define BOOST_PP_ITERATION_3 46\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 47 && BOOST_PP_ITERATION_FINISH_3 >= 47\n#        define BOOST_PP_ITERATION_3 47\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 48 && BOOST_PP_ITERATION_FINISH_3 >= 48\n#        define BOOST_PP_ITERATION_3 48\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 49 && BOOST_PP_ITERATION_FINISH_3 >= 49\n#        define BOOST_PP_ITERATION_3 49\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 50 && BOOST_PP_ITERATION_FINISH_3 >= 50\n#        define BOOST_PP_ITERATION_3 50\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 51 && BOOST_PP_ITERATION_FINISH_3 >= 51\n#        define BOOST_PP_ITERATION_3 51\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 52 && BOOST_PP_ITERATION_FINISH_3 >= 52\n#        define BOOST_PP_ITERATION_3 52\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 53 && BOOST_PP_ITERATION_FINISH_3 >= 53\n#        define BOOST_PP_ITERATION_3 53\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 54 && BOOST_PP_ITERATION_FINISH_3 >= 54\n#        define BOOST_PP_ITERATION_3 54\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 55 && BOOST_PP_ITERATION_FINISH_3 >= 55\n#        define BOOST_PP_ITERATION_3 55\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 56 && BOOST_PP_ITERATION_FINISH_3 >= 56\n#        define BOOST_PP_ITERATION_3 56\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 57 && BOOST_PP_ITERATION_FINISH_3 >= 57\n#        define BOOST_PP_ITERATION_3 57\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 58 && BOOST_PP_ITERATION_FINISH_3 >= 58\n#        define BOOST_PP_ITERATION_3 58\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 59 && BOOST_PP_ITERATION_FINISH_3 >= 59\n#        define BOOST_PP_ITERATION_3 59\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 60 && BOOST_PP_ITERATION_FINISH_3 >= 60\n#        define BOOST_PP_ITERATION_3 60\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 61 && BOOST_PP_ITERATION_FINISH_3 >= 61\n#        define BOOST_PP_ITERATION_3 61\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 62 && BOOST_PP_ITERATION_FINISH_3 >= 62\n#        define BOOST_PP_ITERATION_3 62\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 63 && BOOST_PP_ITERATION_FINISH_3 >= 63\n#        define BOOST_PP_ITERATION_3 63\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 64 && BOOST_PP_ITERATION_FINISH_3 >= 64\n#        define BOOST_PP_ITERATION_3 64\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 65 && BOOST_PP_ITERATION_FINISH_3 >= 65\n#        define BOOST_PP_ITERATION_3 65\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 66 && BOOST_PP_ITERATION_FINISH_3 >= 66\n#        define BOOST_PP_ITERATION_3 66\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 67 && BOOST_PP_ITERATION_FINISH_3 >= 67\n#        define BOOST_PP_ITERATION_3 67\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 68 && BOOST_PP_ITERATION_FINISH_3 >= 68\n#        define BOOST_PP_ITERATION_3 68\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 69 && BOOST_PP_ITERATION_FINISH_3 >= 69\n#        define BOOST_PP_ITERATION_3 69\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 70 && BOOST_PP_ITERATION_FINISH_3 >= 70\n#        define BOOST_PP_ITERATION_3 70\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 71 && BOOST_PP_ITERATION_FINISH_3 >= 71\n#        define BOOST_PP_ITERATION_3 71\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 72 && BOOST_PP_ITERATION_FINISH_3 >= 72\n#        define BOOST_PP_ITERATION_3 72\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 73 && BOOST_PP_ITERATION_FINISH_3 >= 73\n#        define BOOST_PP_ITERATION_3 73\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 74 && BOOST_PP_ITERATION_FINISH_3 >= 74\n#        define BOOST_PP_ITERATION_3 74\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 75 && BOOST_PP_ITERATION_FINISH_3 >= 75\n#        define BOOST_PP_ITERATION_3 75\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 76 && BOOST_PP_ITERATION_FINISH_3 >= 76\n#        define BOOST_PP_ITERATION_3 76\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 77 && BOOST_PP_ITERATION_FINISH_3 >= 77\n#        define BOOST_PP_ITERATION_3 77\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 78 && BOOST_PP_ITERATION_FINISH_3 >= 78\n#        define BOOST_PP_ITERATION_3 78\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 79 && BOOST_PP_ITERATION_FINISH_3 >= 79\n#        define BOOST_PP_ITERATION_3 79\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 80 && BOOST_PP_ITERATION_FINISH_3 >= 80\n#        define BOOST_PP_ITERATION_3 80\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 81 && BOOST_PP_ITERATION_FINISH_3 >= 81\n#        define BOOST_PP_ITERATION_3 81\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 82 && BOOST_PP_ITERATION_FINISH_3 >= 82\n#        define BOOST_PP_ITERATION_3 82\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 83 && BOOST_PP_ITERATION_FINISH_3 >= 83\n#        define BOOST_PP_ITERATION_3 83\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 84 && BOOST_PP_ITERATION_FINISH_3 >= 84\n#        define BOOST_PP_ITERATION_3 84\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 85 && BOOST_PP_ITERATION_FINISH_3 >= 85\n#        define BOOST_PP_ITERATION_3 85\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 86 && BOOST_PP_ITERATION_FINISH_3 >= 86\n#        define BOOST_PP_ITERATION_3 86\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 87 && BOOST_PP_ITERATION_FINISH_3 >= 87\n#        define BOOST_PP_ITERATION_3 87\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 88 && BOOST_PP_ITERATION_FINISH_3 >= 88\n#        define BOOST_PP_ITERATION_3 88\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 89 && BOOST_PP_ITERATION_FINISH_3 >= 89\n#        define BOOST_PP_ITERATION_3 89\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 90 && BOOST_PP_ITERATION_FINISH_3 >= 90\n#        define BOOST_PP_ITERATION_3 90\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 91 && BOOST_PP_ITERATION_FINISH_3 >= 91\n#        define BOOST_PP_ITERATION_3 91\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 92 && BOOST_PP_ITERATION_FINISH_3 >= 92\n#        define BOOST_PP_ITERATION_3 92\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 93 && BOOST_PP_ITERATION_FINISH_3 >= 93\n#        define BOOST_PP_ITERATION_3 93\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 94 && BOOST_PP_ITERATION_FINISH_3 >= 94\n#        define BOOST_PP_ITERATION_3 94\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 95 && BOOST_PP_ITERATION_FINISH_3 >= 95\n#        define BOOST_PP_ITERATION_3 95\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 96 && BOOST_PP_ITERATION_FINISH_3 >= 96\n#        define BOOST_PP_ITERATION_3 96\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 97 && BOOST_PP_ITERATION_FINISH_3 >= 97\n#        define BOOST_PP_ITERATION_3 97\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 98 && BOOST_PP_ITERATION_FINISH_3 >= 98\n#        define BOOST_PP_ITERATION_3 98\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 99 && BOOST_PP_ITERATION_FINISH_3 >= 99\n#        define BOOST_PP_ITERATION_3 99\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 100 && BOOST_PP_ITERATION_FINISH_3 >= 100\n#        define BOOST_PP_ITERATION_3 100\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 101 && BOOST_PP_ITERATION_FINISH_3 >= 101\n#        define BOOST_PP_ITERATION_3 101\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 102 && BOOST_PP_ITERATION_FINISH_3 >= 102\n#        define BOOST_PP_ITERATION_3 102\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 103 && BOOST_PP_ITERATION_FINISH_3 >= 103\n#        define BOOST_PP_ITERATION_3 103\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 104 && BOOST_PP_ITERATION_FINISH_3 >= 104\n#        define BOOST_PP_ITERATION_3 104\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 105 && BOOST_PP_ITERATION_FINISH_3 >= 105\n#        define BOOST_PP_ITERATION_3 105\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 106 && BOOST_PP_ITERATION_FINISH_3 >= 106\n#        define BOOST_PP_ITERATION_3 106\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 107 && BOOST_PP_ITERATION_FINISH_3 >= 107\n#        define BOOST_PP_ITERATION_3 107\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 108 && BOOST_PP_ITERATION_FINISH_3 >= 108\n#        define BOOST_PP_ITERATION_3 108\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 109 && BOOST_PP_ITERATION_FINISH_3 >= 109\n#        define BOOST_PP_ITERATION_3 109\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 110 && BOOST_PP_ITERATION_FINISH_3 >= 110\n#        define BOOST_PP_ITERATION_3 110\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 111 && BOOST_PP_ITERATION_FINISH_3 >= 111\n#        define BOOST_PP_ITERATION_3 111\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 112 && BOOST_PP_ITERATION_FINISH_3 >= 112\n#        define BOOST_PP_ITERATION_3 112\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 113 && BOOST_PP_ITERATION_FINISH_3 >= 113\n#        define BOOST_PP_ITERATION_3 113\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 114 && BOOST_PP_ITERATION_FINISH_3 >= 114\n#        define BOOST_PP_ITERATION_3 114\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 115 && BOOST_PP_ITERATION_FINISH_3 >= 115\n#        define BOOST_PP_ITERATION_3 115\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 116 && BOOST_PP_ITERATION_FINISH_3 >= 116\n#        define BOOST_PP_ITERATION_3 116\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 117 && BOOST_PP_ITERATION_FINISH_3 >= 117\n#        define BOOST_PP_ITERATION_3 117\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 118 && BOOST_PP_ITERATION_FINISH_3 >= 118\n#        define BOOST_PP_ITERATION_3 118\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 119 && BOOST_PP_ITERATION_FINISH_3 >= 119\n#        define BOOST_PP_ITERATION_3 119\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 120 && BOOST_PP_ITERATION_FINISH_3 >= 120\n#        define BOOST_PP_ITERATION_3 120\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 121 && BOOST_PP_ITERATION_FINISH_3 >= 121\n#        define BOOST_PP_ITERATION_3 121\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 122 && BOOST_PP_ITERATION_FINISH_3 >= 122\n#        define BOOST_PP_ITERATION_3 122\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 123 && BOOST_PP_ITERATION_FINISH_3 >= 123\n#        define BOOST_PP_ITERATION_3 123\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 124 && BOOST_PP_ITERATION_FINISH_3 >= 124\n#        define BOOST_PP_ITERATION_3 124\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 125 && BOOST_PP_ITERATION_FINISH_3 >= 125\n#        define BOOST_PP_ITERATION_3 125\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 126 && BOOST_PP_ITERATION_FINISH_3 >= 126\n#        define BOOST_PP_ITERATION_3 126\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 127 && BOOST_PP_ITERATION_FINISH_3 >= 127\n#        define BOOST_PP_ITERATION_3 127\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 128 && BOOST_PP_ITERATION_FINISH_3 >= 128\n#        define BOOST_PP_ITERATION_3 128\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 129 && BOOST_PP_ITERATION_FINISH_3 >= 129\n#        define BOOST_PP_ITERATION_3 129\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 130 && BOOST_PP_ITERATION_FINISH_3 >= 130\n#        define BOOST_PP_ITERATION_3 130\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 131 && BOOST_PP_ITERATION_FINISH_3 >= 131\n#        define BOOST_PP_ITERATION_3 131\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 132 && BOOST_PP_ITERATION_FINISH_3 >= 132\n#        define BOOST_PP_ITERATION_3 132\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 133 && BOOST_PP_ITERATION_FINISH_3 >= 133\n#        define BOOST_PP_ITERATION_3 133\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 134 && BOOST_PP_ITERATION_FINISH_3 >= 134\n#        define BOOST_PP_ITERATION_3 134\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 135 && BOOST_PP_ITERATION_FINISH_3 >= 135\n#        define BOOST_PP_ITERATION_3 135\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 136 && BOOST_PP_ITERATION_FINISH_3 >= 136\n#        define BOOST_PP_ITERATION_3 136\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 137 && BOOST_PP_ITERATION_FINISH_3 >= 137\n#        define BOOST_PP_ITERATION_3 137\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 138 && BOOST_PP_ITERATION_FINISH_3 >= 138\n#        define BOOST_PP_ITERATION_3 138\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 139 && BOOST_PP_ITERATION_FINISH_3 >= 139\n#        define BOOST_PP_ITERATION_3 139\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 140 && BOOST_PP_ITERATION_FINISH_3 >= 140\n#        define BOOST_PP_ITERATION_3 140\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 141 && BOOST_PP_ITERATION_FINISH_3 >= 141\n#        define BOOST_PP_ITERATION_3 141\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 142 && BOOST_PP_ITERATION_FINISH_3 >= 142\n#        define BOOST_PP_ITERATION_3 142\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 143 && BOOST_PP_ITERATION_FINISH_3 >= 143\n#        define BOOST_PP_ITERATION_3 143\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 144 && BOOST_PP_ITERATION_FINISH_3 >= 144\n#        define BOOST_PP_ITERATION_3 144\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 145 && BOOST_PP_ITERATION_FINISH_3 >= 145\n#        define BOOST_PP_ITERATION_3 145\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 146 && BOOST_PP_ITERATION_FINISH_3 >= 146\n#        define BOOST_PP_ITERATION_3 146\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 147 && BOOST_PP_ITERATION_FINISH_3 >= 147\n#        define BOOST_PP_ITERATION_3 147\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 148 && BOOST_PP_ITERATION_FINISH_3 >= 148\n#        define BOOST_PP_ITERATION_3 148\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 149 && BOOST_PP_ITERATION_FINISH_3 >= 149\n#        define BOOST_PP_ITERATION_3 149\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 150 && BOOST_PP_ITERATION_FINISH_3 >= 150\n#        define BOOST_PP_ITERATION_3 150\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 151 && BOOST_PP_ITERATION_FINISH_3 >= 151\n#        define BOOST_PP_ITERATION_3 151\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 152 && BOOST_PP_ITERATION_FINISH_3 >= 152\n#        define BOOST_PP_ITERATION_3 152\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 153 && BOOST_PP_ITERATION_FINISH_3 >= 153\n#        define BOOST_PP_ITERATION_3 153\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 154 && BOOST_PP_ITERATION_FINISH_3 >= 154\n#        define BOOST_PP_ITERATION_3 154\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 155 && BOOST_PP_ITERATION_FINISH_3 >= 155\n#        define BOOST_PP_ITERATION_3 155\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 156 && BOOST_PP_ITERATION_FINISH_3 >= 156\n#        define BOOST_PP_ITERATION_3 156\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 157 && BOOST_PP_ITERATION_FINISH_3 >= 157\n#        define BOOST_PP_ITERATION_3 157\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 158 && BOOST_PP_ITERATION_FINISH_3 >= 158\n#        define BOOST_PP_ITERATION_3 158\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 159 && BOOST_PP_ITERATION_FINISH_3 >= 159\n#        define BOOST_PP_ITERATION_3 159\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 160 && BOOST_PP_ITERATION_FINISH_3 >= 160\n#        define BOOST_PP_ITERATION_3 160\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 161 && BOOST_PP_ITERATION_FINISH_3 >= 161\n#        define BOOST_PP_ITERATION_3 161\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 162 && BOOST_PP_ITERATION_FINISH_3 >= 162\n#        define BOOST_PP_ITERATION_3 162\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 163 && BOOST_PP_ITERATION_FINISH_3 >= 163\n#        define BOOST_PP_ITERATION_3 163\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 164 && BOOST_PP_ITERATION_FINISH_3 >= 164\n#        define BOOST_PP_ITERATION_3 164\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 165 && BOOST_PP_ITERATION_FINISH_3 >= 165\n#        define BOOST_PP_ITERATION_3 165\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 166 && BOOST_PP_ITERATION_FINISH_3 >= 166\n#        define BOOST_PP_ITERATION_3 166\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 167 && BOOST_PP_ITERATION_FINISH_3 >= 167\n#        define BOOST_PP_ITERATION_3 167\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 168 && BOOST_PP_ITERATION_FINISH_3 >= 168\n#        define BOOST_PP_ITERATION_3 168\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 169 && BOOST_PP_ITERATION_FINISH_3 >= 169\n#        define BOOST_PP_ITERATION_3 169\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 170 && BOOST_PP_ITERATION_FINISH_3 >= 170\n#        define BOOST_PP_ITERATION_3 170\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 171 && BOOST_PP_ITERATION_FINISH_3 >= 171\n#        define BOOST_PP_ITERATION_3 171\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 172 && BOOST_PP_ITERATION_FINISH_3 >= 172\n#        define BOOST_PP_ITERATION_3 172\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 173 && BOOST_PP_ITERATION_FINISH_3 >= 173\n#        define BOOST_PP_ITERATION_3 173\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 174 && BOOST_PP_ITERATION_FINISH_3 >= 174\n#        define BOOST_PP_ITERATION_3 174\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 175 && BOOST_PP_ITERATION_FINISH_3 >= 175\n#        define BOOST_PP_ITERATION_3 175\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 176 && BOOST_PP_ITERATION_FINISH_3 >= 176\n#        define BOOST_PP_ITERATION_3 176\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 177 && BOOST_PP_ITERATION_FINISH_3 >= 177\n#        define BOOST_PP_ITERATION_3 177\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 178 && BOOST_PP_ITERATION_FINISH_3 >= 178\n#        define BOOST_PP_ITERATION_3 178\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 179 && BOOST_PP_ITERATION_FINISH_3 >= 179\n#        define BOOST_PP_ITERATION_3 179\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 180 && BOOST_PP_ITERATION_FINISH_3 >= 180\n#        define BOOST_PP_ITERATION_3 180\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 181 && BOOST_PP_ITERATION_FINISH_3 >= 181\n#        define BOOST_PP_ITERATION_3 181\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 182 && BOOST_PP_ITERATION_FINISH_3 >= 182\n#        define BOOST_PP_ITERATION_3 182\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 183 && BOOST_PP_ITERATION_FINISH_3 >= 183\n#        define BOOST_PP_ITERATION_3 183\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 184 && BOOST_PP_ITERATION_FINISH_3 >= 184\n#        define BOOST_PP_ITERATION_3 184\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 185 && BOOST_PP_ITERATION_FINISH_3 >= 185\n#        define BOOST_PP_ITERATION_3 185\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 186 && BOOST_PP_ITERATION_FINISH_3 >= 186\n#        define BOOST_PP_ITERATION_3 186\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 187 && BOOST_PP_ITERATION_FINISH_3 >= 187\n#        define BOOST_PP_ITERATION_3 187\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 188 && BOOST_PP_ITERATION_FINISH_3 >= 188\n#        define BOOST_PP_ITERATION_3 188\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 189 && BOOST_PP_ITERATION_FINISH_3 >= 189\n#        define BOOST_PP_ITERATION_3 189\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 190 && BOOST_PP_ITERATION_FINISH_3 >= 190\n#        define BOOST_PP_ITERATION_3 190\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 191 && BOOST_PP_ITERATION_FINISH_3 >= 191\n#        define BOOST_PP_ITERATION_3 191\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 192 && BOOST_PP_ITERATION_FINISH_3 >= 192\n#        define BOOST_PP_ITERATION_3 192\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 193 && BOOST_PP_ITERATION_FINISH_3 >= 193\n#        define BOOST_PP_ITERATION_3 193\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 194 && BOOST_PP_ITERATION_FINISH_3 >= 194\n#        define BOOST_PP_ITERATION_3 194\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 195 && BOOST_PP_ITERATION_FINISH_3 >= 195\n#        define BOOST_PP_ITERATION_3 195\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 196 && BOOST_PP_ITERATION_FINISH_3 >= 196\n#        define BOOST_PP_ITERATION_3 196\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 197 && BOOST_PP_ITERATION_FINISH_3 >= 197\n#        define BOOST_PP_ITERATION_3 197\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 198 && BOOST_PP_ITERATION_FINISH_3 >= 198\n#        define BOOST_PP_ITERATION_3 198\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 199 && BOOST_PP_ITERATION_FINISH_3 >= 199\n#        define BOOST_PP_ITERATION_3 199\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 200 && BOOST_PP_ITERATION_FINISH_3 >= 200\n#        define BOOST_PP_ITERATION_3 200\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 201 && BOOST_PP_ITERATION_FINISH_3 >= 201\n#        define BOOST_PP_ITERATION_3 201\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 202 && BOOST_PP_ITERATION_FINISH_3 >= 202\n#        define BOOST_PP_ITERATION_3 202\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 203 && BOOST_PP_ITERATION_FINISH_3 >= 203\n#        define BOOST_PP_ITERATION_3 203\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 204 && BOOST_PP_ITERATION_FINISH_3 >= 204\n#        define BOOST_PP_ITERATION_3 204\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 205 && BOOST_PP_ITERATION_FINISH_3 >= 205\n#        define BOOST_PP_ITERATION_3 205\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 206 && BOOST_PP_ITERATION_FINISH_3 >= 206\n#        define BOOST_PP_ITERATION_3 206\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 207 && BOOST_PP_ITERATION_FINISH_3 >= 207\n#        define BOOST_PP_ITERATION_3 207\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 208 && BOOST_PP_ITERATION_FINISH_3 >= 208\n#        define BOOST_PP_ITERATION_3 208\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 209 && BOOST_PP_ITERATION_FINISH_3 >= 209\n#        define BOOST_PP_ITERATION_3 209\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 210 && BOOST_PP_ITERATION_FINISH_3 >= 210\n#        define BOOST_PP_ITERATION_3 210\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 211 && BOOST_PP_ITERATION_FINISH_3 >= 211\n#        define BOOST_PP_ITERATION_3 211\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 212 && BOOST_PP_ITERATION_FINISH_3 >= 212\n#        define BOOST_PP_ITERATION_3 212\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 213 && BOOST_PP_ITERATION_FINISH_3 >= 213\n#        define BOOST_PP_ITERATION_3 213\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 214 && BOOST_PP_ITERATION_FINISH_3 >= 214\n#        define BOOST_PP_ITERATION_3 214\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 215 && BOOST_PP_ITERATION_FINISH_3 >= 215\n#        define BOOST_PP_ITERATION_3 215\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 216 && BOOST_PP_ITERATION_FINISH_3 >= 216\n#        define BOOST_PP_ITERATION_3 216\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 217 && BOOST_PP_ITERATION_FINISH_3 >= 217\n#        define BOOST_PP_ITERATION_3 217\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 218 && BOOST_PP_ITERATION_FINISH_3 >= 218\n#        define BOOST_PP_ITERATION_3 218\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 219 && BOOST_PP_ITERATION_FINISH_3 >= 219\n#        define BOOST_PP_ITERATION_3 219\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 220 && BOOST_PP_ITERATION_FINISH_3 >= 220\n#        define BOOST_PP_ITERATION_3 220\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 221 && BOOST_PP_ITERATION_FINISH_3 >= 221\n#        define BOOST_PP_ITERATION_3 221\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 222 && BOOST_PP_ITERATION_FINISH_3 >= 222\n#        define BOOST_PP_ITERATION_3 222\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 223 && BOOST_PP_ITERATION_FINISH_3 >= 223\n#        define BOOST_PP_ITERATION_3 223\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 224 && BOOST_PP_ITERATION_FINISH_3 >= 224\n#        define BOOST_PP_ITERATION_3 224\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 225 && BOOST_PP_ITERATION_FINISH_3 >= 225\n#        define BOOST_PP_ITERATION_3 225\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 226 && BOOST_PP_ITERATION_FINISH_3 >= 226\n#        define BOOST_PP_ITERATION_3 226\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 227 && BOOST_PP_ITERATION_FINISH_3 >= 227\n#        define BOOST_PP_ITERATION_3 227\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 228 && BOOST_PP_ITERATION_FINISH_3 >= 228\n#        define BOOST_PP_ITERATION_3 228\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 229 && BOOST_PP_ITERATION_FINISH_3 >= 229\n#        define BOOST_PP_ITERATION_3 229\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 230 && BOOST_PP_ITERATION_FINISH_3 >= 230\n#        define BOOST_PP_ITERATION_3 230\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 231 && BOOST_PP_ITERATION_FINISH_3 >= 231\n#        define BOOST_PP_ITERATION_3 231\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 232 && BOOST_PP_ITERATION_FINISH_3 >= 232\n#        define BOOST_PP_ITERATION_3 232\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 233 && BOOST_PP_ITERATION_FINISH_3 >= 233\n#        define BOOST_PP_ITERATION_3 233\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 234 && BOOST_PP_ITERATION_FINISH_3 >= 234\n#        define BOOST_PP_ITERATION_3 234\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 235 && BOOST_PP_ITERATION_FINISH_3 >= 235\n#        define BOOST_PP_ITERATION_3 235\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 236 && BOOST_PP_ITERATION_FINISH_3 >= 236\n#        define BOOST_PP_ITERATION_3 236\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 237 && BOOST_PP_ITERATION_FINISH_3 >= 237\n#        define BOOST_PP_ITERATION_3 237\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 238 && BOOST_PP_ITERATION_FINISH_3 >= 238\n#        define BOOST_PP_ITERATION_3 238\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 239 && BOOST_PP_ITERATION_FINISH_3 >= 239\n#        define BOOST_PP_ITERATION_3 239\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 240 && BOOST_PP_ITERATION_FINISH_3 >= 240\n#        define BOOST_PP_ITERATION_3 240\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 241 && BOOST_PP_ITERATION_FINISH_3 >= 241\n#        define BOOST_PP_ITERATION_3 241\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 242 && BOOST_PP_ITERATION_FINISH_3 >= 242\n#        define BOOST_PP_ITERATION_3 242\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 243 && BOOST_PP_ITERATION_FINISH_3 >= 243\n#        define BOOST_PP_ITERATION_3 243\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 244 && BOOST_PP_ITERATION_FINISH_3 >= 244\n#        define BOOST_PP_ITERATION_3 244\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 245 && BOOST_PP_ITERATION_FINISH_3 >= 245\n#        define BOOST_PP_ITERATION_3 245\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 246 && BOOST_PP_ITERATION_FINISH_3 >= 246\n#        define BOOST_PP_ITERATION_3 246\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 247 && BOOST_PP_ITERATION_FINISH_3 >= 247\n#        define BOOST_PP_ITERATION_3 247\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 248 && BOOST_PP_ITERATION_FINISH_3 >= 248\n#        define BOOST_PP_ITERATION_3 248\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 249 && BOOST_PP_ITERATION_FINISH_3 >= 249\n#        define BOOST_PP_ITERATION_3 249\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 250 && BOOST_PP_ITERATION_FINISH_3 >= 250\n#        define BOOST_PP_ITERATION_3 250\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 251 && BOOST_PP_ITERATION_FINISH_3 >= 251\n#        define BOOST_PP_ITERATION_3 251\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 252 && BOOST_PP_ITERATION_FINISH_3 >= 252\n#        define BOOST_PP_ITERATION_3 252\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 253 && BOOST_PP_ITERATION_FINISH_3 >= 253\n#        define BOOST_PP_ITERATION_3 253\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 254 && BOOST_PP_ITERATION_FINISH_3 >= 254\n#        define BOOST_PP_ITERATION_3 254\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 255 && BOOST_PP_ITERATION_FINISH_3 >= 255\n#        define BOOST_PP_ITERATION_3 255\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n#    if BOOST_PP_ITERATION_START_3 <= 256 && BOOST_PP_ITERATION_FINISH_3 >= 256\n#        define BOOST_PP_ITERATION_3 256\n#        include BOOST_PP_FILENAME_3\n#        undef BOOST_PP_ITERATION_3\n#    endif\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 2\n#\n# undef BOOST_PP_ITERATION_START_3\n# undef BOOST_PP_ITERATION_FINISH_3\n# undef BOOST_PP_FILENAME_3\n#\n# undef BOOST_PP_ITERATION_FLAGS_3\n# undef BOOST_PP_ITERATION_PARAMS_3\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/forward4.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if defined(BOOST_PP_ITERATION_LIMITS)\n#    if !defined(BOOST_PP_FILENAME_4)\n#        error BOOST_PP_ERROR:  depth #4 filename is not defined\n#    endif\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/lower4.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/upper4.hpp>\n#    define BOOST_PP_ITERATION_FLAGS_4() 0\n#    undef BOOST_PP_ITERATION_LIMITS\n# elif defined(BOOST_PP_ITERATION_PARAMS_4)\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_4)\n#    include <boost/preprocessor/iteration/detail/bounds/lower4.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_4)\n#    include <boost/preprocessor/iteration/detail/bounds/upper4.hpp>\n#    define BOOST_PP_FILENAME_4 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_4)\n#    if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_4) >= 4\n#        define BOOST_PP_ITERATION_FLAGS_4() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_4)\n#    else\n#        define BOOST_PP_ITERATION_FLAGS_4() 0\n#    endif\n# else\n#    error BOOST_PP_ERROR:  depth #4 iteration boundaries or filename not defined\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 4\n#\n# if (BOOST_PP_ITERATION_START_4) > (BOOST_PP_ITERATION_FINISH_4)\n#    include <boost/preprocessor/iteration/detail/iter/reverse4.hpp>\n# else\n#    if BOOST_PP_ITERATION_START_4 <= 0 && BOOST_PP_ITERATION_FINISH_4 >= 0\n#        define BOOST_PP_ITERATION_4 0\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 1 && BOOST_PP_ITERATION_FINISH_4 >= 1\n#        define BOOST_PP_ITERATION_4 1\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 2 && BOOST_PP_ITERATION_FINISH_4 >= 2\n#        define BOOST_PP_ITERATION_4 2\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 3 && BOOST_PP_ITERATION_FINISH_4 >= 3\n#        define BOOST_PP_ITERATION_4 3\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 4 && BOOST_PP_ITERATION_FINISH_4 >= 4\n#        define BOOST_PP_ITERATION_4 4\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 5 && BOOST_PP_ITERATION_FINISH_4 >= 5\n#        define BOOST_PP_ITERATION_4 5\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 6 && BOOST_PP_ITERATION_FINISH_4 >= 6\n#        define BOOST_PP_ITERATION_4 6\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 7 && BOOST_PP_ITERATION_FINISH_4 >= 7\n#        define BOOST_PP_ITERATION_4 7\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 8 && BOOST_PP_ITERATION_FINISH_4 >= 8\n#        define BOOST_PP_ITERATION_4 8\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 9 && BOOST_PP_ITERATION_FINISH_4 >= 9\n#        define BOOST_PP_ITERATION_4 9\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 10 && BOOST_PP_ITERATION_FINISH_4 >= 10\n#        define BOOST_PP_ITERATION_4 10\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 11 && BOOST_PP_ITERATION_FINISH_4 >= 11\n#        define BOOST_PP_ITERATION_4 11\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 12 && BOOST_PP_ITERATION_FINISH_4 >= 12\n#        define BOOST_PP_ITERATION_4 12\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 13 && BOOST_PP_ITERATION_FINISH_4 >= 13\n#        define BOOST_PP_ITERATION_4 13\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 14 && BOOST_PP_ITERATION_FINISH_4 >= 14\n#        define BOOST_PP_ITERATION_4 14\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 15 && BOOST_PP_ITERATION_FINISH_4 >= 15\n#        define BOOST_PP_ITERATION_4 15\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 16 && BOOST_PP_ITERATION_FINISH_4 >= 16\n#        define BOOST_PP_ITERATION_4 16\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 17 && BOOST_PP_ITERATION_FINISH_4 >= 17\n#        define BOOST_PP_ITERATION_4 17\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 18 && BOOST_PP_ITERATION_FINISH_4 >= 18\n#        define BOOST_PP_ITERATION_4 18\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 19 && BOOST_PP_ITERATION_FINISH_4 >= 19\n#        define BOOST_PP_ITERATION_4 19\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 20 && BOOST_PP_ITERATION_FINISH_4 >= 20\n#        define BOOST_PP_ITERATION_4 20\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 21 && BOOST_PP_ITERATION_FINISH_4 >= 21\n#        define BOOST_PP_ITERATION_4 21\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 22 && BOOST_PP_ITERATION_FINISH_4 >= 22\n#        define BOOST_PP_ITERATION_4 22\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 23 && BOOST_PP_ITERATION_FINISH_4 >= 23\n#        define BOOST_PP_ITERATION_4 23\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 24 && BOOST_PP_ITERATION_FINISH_4 >= 24\n#        define BOOST_PP_ITERATION_4 24\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 25 && BOOST_PP_ITERATION_FINISH_4 >= 25\n#        define BOOST_PP_ITERATION_4 25\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 26 && BOOST_PP_ITERATION_FINISH_4 >= 26\n#        define BOOST_PP_ITERATION_4 26\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 27 && BOOST_PP_ITERATION_FINISH_4 >= 27\n#        define BOOST_PP_ITERATION_4 27\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 28 && BOOST_PP_ITERATION_FINISH_4 >= 28\n#        define BOOST_PP_ITERATION_4 28\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 29 && BOOST_PP_ITERATION_FINISH_4 >= 29\n#        define BOOST_PP_ITERATION_4 29\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 30 && BOOST_PP_ITERATION_FINISH_4 >= 30\n#        define BOOST_PP_ITERATION_4 30\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 31 && BOOST_PP_ITERATION_FINISH_4 >= 31\n#        define BOOST_PP_ITERATION_4 31\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 32 && BOOST_PP_ITERATION_FINISH_4 >= 32\n#        define BOOST_PP_ITERATION_4 32\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 33 && BOOST_PP_ITERATION_FINISH_4 >= 33\n#        define BOOST_PP_ITERATION_4 33\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 34 && BOOST_PP_ITERATION_FINISH_4 >= 34\n#        define BOOST_PP_ITERATION_4 34\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 35 && BOOST_PP_ITERATION_FINISH_4 >= 35\n#        define BOOST_PP_ITERATION_4 35\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 36 && BOOST_PP_ITERATION_FINISH_4 >= 36\n#        define BOOST_PP_ITERATION_4 36\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 37 && BOOST_PP_ITERATION_FINISH_4 >= 37\n#        define BOOST_PP_ITERATION_4 37\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 38 && BOOST_PP_ITERATION_FINISH_4 >= 38\n#        define BOOST_PP_ITERATION_4 38\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 39 && BOOST_PP_ITERATION_FINISH_4 >= 39\n#        define BOOST_PP_ITERATION_4 39\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 40 && BOOST_PP_ITERATION_FINISH_4 >= 40\n#        define BOOST_PP_ITERATION_4 40\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 41 && BOOST_PP_ITERATION_FINISH_4 >= 41\n#        define BOOST_PP_ITERATION_4 41\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 42 && BOOST_PP_ITERATION_FINISH_4 >= 42\n#        define BOOST_PP_ITERATION_4 42\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 43 && BOOST_PP_ITERATION_FINISH_4 >= 43\n#        define BOOST_PP_ITERATION_4 43\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 44 && BOOST_PP_ITERATION_FINISH_4 >= 44\n#        define BOOST_PP_ITERATION_4 44\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 45 && BOOST_PP_ITERATION_FINISH_4 >= 45\n#        define BOOST_PP_ITERATION_4 45\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 46 && BOOST_PP_ITERATION_FINISH_4 >= 46\n#        define BOOST_PP_ITERATION_4 46\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 47 && BOOST_PP_ITERATION_FINISH_4 >= 47\n#        define BOOST_PP_ITERATION_4 47\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 48 && BOOST_PP_ITERATION_FINISH_4 >= 48\n#        define BOOST_PP_ITERATION_4 48\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 49 && BOOST_PP_ITERATION_FINISH_4 >= 49\n#        define BOOST_PP_ITERATION_4 49\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 50 && BOOST_PP_ITERATION_FINISH_4 >= 50\n#        define BOOST_PP_ITERATION_4 50\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 51 && BOOST_PP_ITERATION_FINISH_4 >= 51\n#        define BOOST_PP_ITERATION_4 51\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 52 && BOOST_PP_ITERATION_FINISH_4 >= 52\n#        define BOOST_PP_ITERATION_4 52\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 53 && BOOST_PP_ITERATION_FINISH_4 >= 53\n#        define BOOST_PP_ITERATION_4 53\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 54 && BOOST_PP_ITERATION_FINISH_4 >= 54\n#        define BOOST_PP_ITERATION_4 54\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 55 && BOOST_PP_ITERATION_FINISH_4 >= 55\n#        define BOOST_PP_ITERATION_4 55\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 56 && BOOST_PP_ITERATION_FINISH_4 >= 56\n#        define BOOST_PP_ITERATION_4 56\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 57 && BOOST_PP_ITERATION_FINISH_4 >= 57\n#        define BOOST_PP_ITERATION_4 57\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 58 && BOOST_PP_ITERATION_FINISH_4 >= 58\n#        define BOOST_PP_ITERATION_4 58\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 59 && BOOST_PP_ITERATION_FINISH_4 >= 59\n#        define BOOST_PP_ITERATION_4 59\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 60 && BOOST_PP_ITERATION_FINISH_4 >= 60\n#        define BOOST_PP_ITERATION_4 60\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 61 && BOOST_PP_ITERATION_FINISH_4 >= 61\n#        define BOOST_PP_ITERATION_4 61\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 62 && BOOST_PP_ITERATION_FINISH_4 >= 62\n#        define BOOST_PP_ITERATION_4 62\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 63 && BOOST_PP_ITERATION_FINISH_4 >= 63\n#        define BOOST_PP_ITERATION_4 63\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 64 && BOOST_PP_ITERATION_FINISH_4 >= 64\n#        define BOOST_PP_ITERATION_4 64\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 65 && BOOST_PP_ITERATION_FINISH_4 >= 65\n#        define BOOST_PP_ITERATION_4 65\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 66 && BOOST_PP_ITERATION_FINISH_4 >= 66\n#        define BOOST_PP_ITERATION_4 66\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 67 && BOOST_PP_ITERATION_FINISH_4 >= 67\n#        define BOOST_PP_ITERATION_4 67\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 68 && BOOST_PP_ITERATION_FINISH_4 >= 68\n#        define BOOST_PP_ITERATION_4 68\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 69 && BOOST_PP_ITERATION_FINISH_4 >= 69\n#        define BOOST_PP_ITERATION_4 69\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 70 && BOOST_PP_ITERATION_FINISH_4 >= 70\n#        define BOOST_PP_ITERATION_4 70\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 71 && BOOST_PP_ITERATION_FINISH_4 >= 71\n#        define BOOST_PP_ITERATION_4 71\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 72 && BOOST_PP_ITERATION_FINISH_4 >= 72\n#        define BOOST_PP_ITERATION_4 72\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 73 && BOOST_PP_ITERATION_FINISH_4 >= 73\n#        define BOOST_PP_ITERATION_4 73\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 74 && BOOST_PP_ITERATION_FINISH_4 >= 74\n#        define BOOST_PP_ITERATION_4 74\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 75 && BOOST_PP_ITERATION_FINISH_4 >= 75\n#        define BOOST_PP_ITERATION_4 75\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 76 && BOOST_PP_ITERATION_FINISH_4 >= 76\n#        define BOOST_PP_ITERATION_4 76\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 77 && BOOST_PP_ITERATION_FINISH_4 >= 77\n#        define BOOST_PP_ITERATION_4 77\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 78 && BOOST_PP_ITERATION_FINISH_4 >= 78\n#        define BOOST_PP_ITERATION_4 78\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 79 && BOOST_PP_ITERATION_FINISH_4 >= 79\n#        define BOOST_PP_ITERATION_4 79\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 80 && BOOST_PP_ITERATION_FINISH_4 >= 80\n#        define BOOST_PP_ITERATION_4 80\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 81 && BOOST_PP_ITERATION_FINISH_4 >= 81\n#        define BOOST_PP_ITERATION_4 81\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 82 && BOOST_PP_ITERATION_FINISH_4 >= 82\n#        define BOOST_PP_ITERATION_4 82\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 83 && BOOST_PP_ITERATION_FINISH_4 >= 83\n#        define BOOST_PP_ITERATION_4 83\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 84 && BOOST_PP_ITERATION_FINISH_4 >= 84\n#        define BOOST_PP_ITERATION_4 84\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 85 && BOOST_PP_ITERATION_FINISH_4 >= 85\n#        define BOOST_PP_ITERATION_4 85\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 86 && BOOST_PP_ITERATION_FINISH_4 >= 86\n#        define BOOST_PP_ITERATION_4 86\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 87 && BOOST_PP_ITERATION_FINISH_4 >= 87\n#        define BOOST_PP_ITERATION_4 87\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 88 && BOOST_PP_ITERATION_FINISH_4 >= 88\n#        define BOOST_PP_ITERATION_4 88\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 89 && BOOST_PP_ITERATION_FINISH_4 >= 89\n#        define BOOST_PP_ITERATION_4 89\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 90 && BOOST_PP_ITERATION_FINISH_4 >= 90\n#        define BOOST_PP_ITERATION_4 90\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 91 && BOOST_PP_ITERATION_FINISH_4 >= 91\n#        define BOOST_PP_ITERATION_4 91\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 92 && BOOST_PP_ITERATION_FINISH_4 >= 92\n#        define BOOST_PP_ITERATION_4 92\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 93 && BOOST_PP_ITERATION_FINISH_4 >= 93\n#        define BOOST_PP_ITERATION_4 93\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 94 && BOOST_PP_ITERATION_FINISH_4 >= 94\n#        define BOOST_PP_ITERATION_4 94\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 95 && BOOST_PP_ITERATION_FINISH_4 >= 95\n#        define BOOST_PP_ITERATION_4 95\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 96 && BOOST_PP_ITERATION_FINISH_4 >= 96\n#        define BOOST_PP_ITERATION_4 96\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 97 && BOOST_PP_ITERATION_FINISH_4 >= 97\n#        define BOOST_PP_ITERATION_4 97\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 98 && BOOST_PP_ITERATION_FINISH_4 >= 98\n#        define BOOST_PP_ITERATION_4 98\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 99 && BOOST_PP_ITERATION_FINISH_4 >= 99\n#        define BOOST_PP_ITERATION_4 99\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 100 && BOOST_PP_ITERATION_FINISH_4 >= 100\n#        define BOOST_PP_ITERATION_4 100\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 101 && BOOST_PP_ITERATION_FINISH_4 >= 101\n#        define BOOST_PP_ITERATION_4 101\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 102 && BOOST_PP_ITERATION_FINISH_4 >= 102\n#        define BOOST_PP_ITERATION_4 102\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 103 && BOOST_PP_ITERATION_FINISH_4 >= 103\n#        define BOOST_PP_ITERATION_4 103\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 104 && BOOST_PP_ITERATION_FINISH_4 >= 104\n#        define BOOST_PP_ITERATION_4 104\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 105 && BOOST_PP_ITERATION_FINISH_4 >= 105\n#        define BOOST_PP_ITERATION_4 105\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 106 && BOOST_PP_ITERATION_FINISH_4 >= 106\n#        define BOOST_PP_ITERATION_4 106\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 107 && BOOST_PP_ITERATION_FINISH_4 >= 107\n#        define BOOST_PP_ITERATION_4 107\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 108 && BOOST_PP_ITERATION_FINISH_4 >= 108\n#        define BOOST_PP_ITERATION_4 108\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 109 && BOOST_PP_ITERATION_FINISH_4 >= 109\n#        define BOOST_PP_ITERATION_4 109\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 110 && BOOST_PP_ITERATION_FINISH_4 >= 110\n#        define BOOST_PP_ITERATION_4 110\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 111 && BOOST_PP_ITERATION_FINISH_4 >= 111\n#        define BOOST_PP_ITERATION_4 111\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 112 && BOOST_PP_ITERATION_FINISH_4 >= 112\n#        define BOOST_PP_ITERATION_4 112\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 113 && BOOST_PP_ITERATION_FINISH_4 >= 113\n#        define BOOST_PP_ITERATION_4 113\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 114 && BOOST_PP_ITERATION_FINISH_4 >= 114\n#        define BOOST_PP_ITERATION_4 114\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 115 && BOOST_PP_ITERATION_FINISH_4 >= 115\n#        define BOOST_PP_ITERATION_4 115\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 116 && BOOST_PP_ITERATION_FINISH_4 >= 116\n#        define BOOST_PP_ITERATION_4 116\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 117 && BOOST_PP_ITERATION_FINISH_4 >= 117\n#        define BOOST_PP_ITERATION_4 117\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 118 && BOOST_PP_ITERATION_FINISH_4 >= 118\n#        define BOOST_PP_ITERATION_4 118\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 119 && BOOST_PP_ITERATION_FINISH_4 >= 119\n#        define BOOST_PP_ITERATION_4 119\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 120 && BOOST_PP_ITERATION_FINISH_4 >= 120\n#        define BOOST_PP_ITERATION_4 120\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 121 && BOOST_PP_ITERATION_FINISH_4 >= 121\n#        define BOOST_PP_ITERATION_4 121\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 122 && BOOST_PP_ITERATION_FINISH_4 >= 122\n#        define BOOST_PP_ITERATION_4 122\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 123 && BOOST_PP_ITERATION_FINISH_4 >= 123\n#        define BOOST_PP_ITERATION_4 123\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 124 && BOOST_PP_ITERATION_FINISH_4 >= 124\n#        define BOOST_PP_ITERATION_4 124\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 125 && BOOST_PP_ITERATION_FINISH_4 >= 125\n#        define BOOST_PP_ITERATION_4 125\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 126 && BOOST_PP_ITERATION_FINISH_4 >= 126\n#        define BOOST_PP_ITERATION_4 126\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 127 && BOOST_PP_ITERATION_FINISH_4 >= 127\n#        define BOOST_PP_ITERATION_4 127\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 128 && BOOST_PP_ITERATION_FINISH_4 >= 128\n#        define BOOST_PP_ITERATION_4 128\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 129 && BOOST_PP_ITERATION_FINISH_4 >= 129\n#        define BOOST_PP_ITERATION_4 129\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 130 && BOOST_PP_ITERATION_FINISH_4 >= 130\n#        define BOOST_PP_ITERATION_4 130\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 131 && BOOST_PP_ITERATION_FINISH_4 >= 131\n#        define BOOST_PP_ITERATION_4 131\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 132 && BOOST_PP_ITERATION_FINISH_4 >= 132\n#        define BOOST_PP_ITERATION_4 132\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 133 && BOOST_PP_ITERATION_FINISH_4 >= 133\n#        define BOOST_PP_ITERATION_4 133\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 134 && BOOST_PP_ITERATION_FINISH_4 >= 134\n#        define BOOST_PP_ITERATION_4 134\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 135 && BOOST_PP_ITERATION_FINISH_4 >= 135\n#        define BOOST_PP_ITERATION_4 135\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 136 && BOOST_PP_ITERATION_FINISH_4 >= 136\n#        define BOOST_PP_ITERATION_4 136\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 137 && BOOST_PP_ITERATION_FINISH_4 >= 137\n#        define BOOST_PP_ITERATION_4 137\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 138 && BOOST_PP_ITERATION_FINISH_4 >= 138\n#        define BOOST_PP_ITERATION_4 138\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 139 && BOOST_PP_ITERATION_FINISH_4 >= 139\n#        define BOOST_PP_ITERATION_4 139\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 140 && BOOST_PP_ITERATION_FINISH_4 >= 140\n#        define BOOST_PP_ITERATION_4 140\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 141 && BOOST_PP_ITERATION_FINISH_4 >= 141\n#        define BOOST_PP_ITERATION_4 141\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 142 && BOOST_PP_ITERATION_FINISH_4 >= 142\n#        define BOOST_PP_ITERATION_4 142\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 143 && BOOST_PP_ITERATION_FINISH_4 >= 143\n#        define BOOST_PP_ITERATION_4 143\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 144 && BOOST_PP_ITERATION_FINISH_4 >= 144\n#        define BOOST_PP_ITERATION_4 144\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 145 && BOOST_PP_ITERATION_FINISH_4 >= 145\n#        define BOOST_PP_ITERATION_4 145\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 146 && BOOST_PP_ITERATION_FINISH_4 >= 146\n#        define BOOST_PP_ITERATION_4 146\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 147 && BOOST_PP_ITERATION_FINISH_4 >= 147\n#        define BOOST_PP_ITERATION_4 147\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 148 && BOOST_PP_ITERATION_FINISH_4 >= 148\n#        define BOOST_PP_ITERATION_4 148\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 149 && BOOST_PP_ITERATION_FINISH_4 >= 149\n#        define BOOST_PP_ITERATION_4 149\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 150 && BOOST_PP_ITERATION_FINISH_4 >= 150\n#        define BOOST_PP_ITERATION_4 150\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 151 && BOOST_PP_ITERATION_FINISH_4 >= 151\n#        define BOOST_PP_ITERATION_4 151\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 152 && BOOST_PP_ITERATION_FINISH_4 >= 152\n#        define BOOST_PP_ITERATION_4 152\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 153 && BOOST_PP_ITERATION_FINISH_4 >= 153\n#        define BOOST_PP_ITERATION_4 153\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 154 && BOOST_PP_ITERATION_FINISH_4 >= 154\n#        define BOOST_PP_ITERATION_4 154\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 155 && BOOST_PP_ITERATION_FINISH_4 >= 155\n#        define BOOST_PP_ITERATION_4 155\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 156 && BOOST_PP_ITERATION_FINISH_4 >= 156\n#        define BOOST_PP_ITERATION_4 156\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 157 && BOOST_PP_ITERATION_FINISH_4 >= 157\n#        define BOOST_PP_ITERATION_4 157\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 158 && BOOST_PP_ITERATION_FINISH_4 >= 158\n#        define BOOST_PP_ITERATION_4 158\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 159 && BOOST_PP_ITERATION_FINISH_4 >= 159\n#        define BOOST_PP_ITERATION_4 159\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 160 && BOOST_PP_ITERATION_FINISH_4 >= 160\n#        define BOOST_PP_ITERATION_4 160\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 161 && BOOST_PP_ITERATION_FINISH_4 >= 161\n#        define BOOST_PP_ITERATION_4 161\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 162 && BOOST_PP_ITERATION_FINISH_4 >= 162\n#        define BOOST_PP_ITERATION_4 162\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 163 && BOOST_PP_ITERATION_FINISH_4 >= 163\n#        define BOOST_PP_ITERATION_4 163\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 164 && BOOST_PP_ITERATION_FINISH_4 >= 164\n#        define BOOST_PP_ITERATION_4 164\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 165 && BOOST_PP_ITERATION_FINISH_4 >= 165\n#        define BOOST_PP_ITERATION_4 165\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 166 && BOOST_PP_ITERATION_FINISH_4 >= 166\n#        define BOOST_PP_ITERATION_4 166\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 167 && BOOST_PP_ITERATION_FINISH_4 >= 167\n#        define BOOST_PP_ITERATION_4 167\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 168 && BOOST_PP_ITERATION_FINISH_4 >= 168\n#        define BOOST_PP_ITERATION_4 168\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 169 && BOOST_PP_ITERATION_FINISH_4 >= 169\n#        define BOOST_PP_ITERATION_4 169\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 170 && BOOST_PP_ITERATION_FINISH_4 >= 170\n#        define BOOST_PP_ITERATION_4 170\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 171 && BOOST_PP_ITERATION_FINISH_4 >= 171\n#        define BOOST_PP_ITERATION_4 171\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 172 && BOOST_PP_ITERATION_FINISH_4 >= 172\n#        define BOOST_PP_ITERATION_4 172\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 173 && BOOST_PP_ITERATION_FINISH_4 >= 173\n#        define BOOST_PP_ITERATION_4 173\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 174 && BOOST_PP_ITERATION_FINISH_4 >= 174\n#        define BOOST_PP_ITERATION_4 174\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 175 && BOOST_PP_ITERATION_FINISH_4 >= 175\n#        define BOOST_PP_ITERATION_4 175\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 176 && BOOST_PP_ITERATION_FINISH_4 >= 176\n#        define BOOST_PP_ITERATION_4 176\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 177 && BOOST_PP_ITERATION_FINISH_4 >= 177\n#        define BOOST_PP_ITERATION_4 177\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 178 && BOOST_PP_ITERATION_FINISH_4 >= 178\n#        define BOOST_PP_ITERATION_4 178\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 179 && BOOST_PP_ITERATION_FINISH_4 >= 179\n#        define BOOST_PP_ITERATION_4 179\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 180 && BOOST_PP_ITERATION_FINISH_4 >= 180\n#        define BOOST_PP_ITERATION_4 180\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 181 && BOOST_PP_ITERATION_FINISH_4 >= 181\n#        define BOOST_PP_ITERATION_4 181\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 182 && BOOST_PP_ITERATION_FINISH_4 >= 182\n#        define BOOST_PP_ITERATION_4 182\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 183 && BOOST_PP_ITERATION_FINISH_4 >= 183\n#        define BOOST_PP_ITERATION_4 183\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 184 && BOOST_PP_ITERATION_FINISH_4 >= 184\n#        define BOOST_PP_ITERATION_4 184\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 185 && BOOST_PP_ITERATION_FINISH_4 >= 185\n#        define BOOST_PP_ITERATION_4 185\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 186 && BOOST_PP_ITERATION_FINISH_4 >= 186\n#        define BOOST_PP_ITERATION_4 186\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 187 && BOOST_PP_ITERATION_FINISH_4 >= 187\n#        define BOOST_PP_ITERATION_4 187\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 188 && BOOST_PP_ITERATION_FINISH_4 >= 188\n#        define BOOST_PP_ITERATION_4 188\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 189 && BOOST_PP_ITERATION_FINISH_4 >= 189\n#        define BOOST_PP_ITERATION_4 189\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 190 && BOOST_PP_ITERATION_FINISH_4 >= 190\n#        define BOOST_PP_ITERATION_4 190\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 191 && BOOST_PP_ITERATION_FINISH_4 >= 191\n#        define BOOST_PP_ITERATION_4 191\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 192 && BOOST_PP_ITERATION_FINISH_4 >= 192\n#        define BOOST_PP_ITERATION_4 192\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 193 && BOOST_PP_ITERATION_FINISH_4 >= 193\n#        define BOOST_PP_ITERATION_4 193\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 194 && BOOST_PP_ITERATION_FINISH_4 >= 194\n#        define BOOST_PP_ITERATION_4 194\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 195 && BOOST_PP_ITERATION_FINISH_4 >= 195\n#        define BOOST_PP_ITERATION_4 195\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 196 && BOOST_PP_ITERATION_FINISH_4 >= 196\n#        define BOOST_PP_ITERATION_4 196\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 197 && BOOST_PP_ITERATION_FINISH_4 >= 197\n#        define BOOST_PP_ITERATION_4 197\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 198 && BOOST_PP_ITERATION_FINISH_4 >= 198\n#        define BOOST_PP_ITERATION_4 198\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 199 && BOOST_PP_ITERATION_FINISH_4 >= 199\n#        define BOOST_PP_ITERATION_4 199\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 200 && BOOST_PP_ITERATION_FINISH_4 >= 200\n#        define BOOST_PP_ITERATION_4 200\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 201 && BOOST_PP_ITERATION_FINISH_4 >= 201\n#        define BOOST_PP_ITERATION_4 201\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 202 && BOOST_PP_ITERATION_FINISH_4 >= 202\n#        define BOOST_PP_ITERATION_4 202\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 203 && BOOST_PP_ITERATION_FINISH_4 >= 203\n#        define BOOST_PP_ITERATION_4 203\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 204 && BOOST_PP_ITERATION_FINISH_4 >= 204\n#        define BOOST_PP_ITERATION_4 204\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 205 && BOOST_PP_ITERATION_FINISH_4 >= 205\n#        define BOOST_PP_ITERATION_4 205\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 206 && BOOST_PP_ITERATION_FINISH_4 >= 206\n#        define BOOST_PP_ITERATION_4 206\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 207 && BOOST_PP_ITERATION_FINISH_4 >= 207\n#        define BOOST_PP_ITERATION_4 207\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 208 && BOOST_PP_ITERATION_FINISH_4 >= 208\n#        define BOOST_PP_ITERATION_4 208\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 209 && BOOST_PP_ITERATION_FINISH_4 >= 209\n#        define BOOST_PP_ITERATION_4 209\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 210 && BOOST_PP_ITERATION_FINISH_4 >= 210\n#        define BOOST_PP_ITERATION_4 210\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 211 && BOOST_PP_ITERATION_FINISH_4 >= 211\n#        define BOOST_PP_ITERATION_4 211\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 212 && BOOST_PP_ITERATION_FINISH_4 >= 212\n#        define BOOST_PP_ITERATION_4 212\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 213 && BOOST_PP_ITERATION_FINISH_4 >= 213\n#        define BOOST_PP_ITERATION_4 213\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 214 && BOOST_PP_ITERATION_FINISH_4 >= 214\n#        define BOOST_PP_ITERATION_4 214\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 215 && BOOST_PP_ITERATION_FINISH_4 >= 215\n#        define BOOST_PP_ITERATION_4 215\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 216 && BOOST_PP_ITERATION_FINISH_4 >= 216\n#        define BOOST_PP_ITERATION_4 216\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 217 && BOOST_PP_ITERATION_FINISH_4 >= 217\n#        define BOOST_PP_ITERATION_4 217\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 218 && BOOST_PP_ITERATION_FINISH_4 >= 218\n#        define BOOST_PP_ITERATION_4 218\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 219 && BOOST_PP_ITERATION_FINISH_4 >= 219\n#        define BOOST_PP_ITERATION_4 219\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 220 && BOOST_PP_ITERATION_FINISH_4 >= 220\n#        define BOOST_PP_ITERATION_4 220\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 221 && BOOST_PP_ITERATION_FINISH_4 >= 221\n#        define BOOST_PP_ITERATION_4 221\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 222 && BOOST_PP_ITERATION_FINISH_4 >= 222\n#        define BOOST_PP_ITERATION_4 222\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 223 && BOOST_PP_ITERATION_FINISH_4 >= 223\n#        define BOOST_PP_ITERATION_4 223\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 224 && BOOST_PP_ITERATION_FINISH_4 >= 224\n#        define BOOST_PP_ITERATION_4 224\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 225 && BOOST_PP_ITERATION_FINISH_4 >= 225\n#        define BOOST_PP_ITERATION_4 225\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 226 && BOOST_PP_ITERATION_FINISH_4 >= 226\n#        define BOOST_PP_ITERATION_4 226\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 227 && BOOST_PP_ITERATION_FINISH_4 >= 227\n#        define BOOST_PP_ITERATION_4 227\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 228 && BOOST_PP_ITERATION_FINISH_4 >= 228\n#        define BOOST_PP_ITERATION_4 228\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 229 && BOOST_PP_ITERATION_FINISH_4 >= 229\n#        define BOOST_PP_ITERATION_4 229\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 230 && BOOST_PP_ITERATION_FINISH_4 >= 230\n#        define BOOST_PP_ITERATION_4 230\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 231 && BOOST_PP_ITERATION_FINISH_4 >= 231\n#        define BOOST_PP_ITERATION_4 231\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 232 && BOOST_PP_ITERATION_FINISH_4 >= 232\n#        define BOOST_PP_ITERATION_4 232\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 233 && BOOST_PP_ITERATION_FINISH_4 >= 233\n#        define BOOST_PP_ITERATION_4 233\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 234 && BOOST_PP_ITERATION_FINISH_4 >= 234\n#        define BOOST_PP_ITERATION_4 234\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 235 && BOOST_PP_ITERATION_FINISH_4 >= 235\n#        define BOOST_PP_ITERATION_4 235\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 236 && BOOST_PP_ITERATION_FINISH_4 >= 236\n#        define BOOST_PP_ITERATION_4 236\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 237 && BOOST_PP_ITERATION_FINISH_4 >= 237\n#        define BOOST_PP_ITERATION_4 237\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 238 && BOOST_PP_ITERATION_FINISH_4 >= 238\n#        define BOOST_PP_ITERATION_4 238\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 239 && BOOST_PP_ITERATION_FINISH_4 >= 239\n#        define BOOST_PP_ITERATION_4 239\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 240 && BOOST_PP_ITERATION_FINISH_4 >= 240\n#        define BOOST_PP_ITERATION_4 240\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 241 && BOOST_PP_ITERATION_FINISH_4 >= 241\n#        define BOOST_PP_ITERATION_4 241\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 242 && BOOST_PP_ITERATION_FINISH_4 >= 242\n#        define BOOST_PP_ITERATION_4 242\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 243 && BOOST_PP_ITERATION_FINISH_4 >= 243\n#        define BOOST_PP_ITERATION_4 243\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 244 && BOOST_PP_ITERATION_FINISH_4 >= 244\n#        define BOOST_PP_ITERATION_4 244\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 245 && BOOST_PP_ITERATION_FINISH_4 >= 245\n#        define BOOST_PP_ITERATION_4 245\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 246 && BOOST_PP_ITERATION_FINISH_4 >= 246\n#        define BOOST_PP_ITERATION_4 246\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 247 && BOOST_PP_ITERATION_FINISH_4 >= 247\n#        define BOOST_PP_ITERATION_4 247\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 248 && BOOST_PP_ITERATION_FINISH_4 >= 248\n#        define BOOST_PP_ITERATION_4 248\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 249 && BOOST_PP_ITERATION_FINISH_4 >= 249\n#        define BOOST_PP_ITERATION_4 249\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 250 && BOOST_PP_ITERATION_FINISH_4 >= 250\n#        define BOOST_PP_ITERATION_4 250\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 251 && BOOST_PP_ITERATION_FINISH_4 >= 251\n#        define BOOST_PP_ITERATION_4 251\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 252 && BOOST_PP_ITERATION_FINISH_4 >= 252\n#        define BOOST_PP_ITERATION_4 252\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 253 && BOOST_PP_ITERATION_FINISH_4 >= 253\n#        define BOOST_PP_ITERATION_4 253\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 254 && BOOST_PP_ITERATION_FINISH_4 >= 254\n#        define BOOST_PP_ITERATION_4 254\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 255 && BOOST_PP_ITERATION_FINISH_4 >= 255\n#        define BOOST_PP_ITERATION_4 255\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n#    if BOOST_PP_ITERATION_START_4 <= 256 && BOOST_PP_ITERATION_FINISH_4 >= 256\n#        define BOOST_PP_ITERATION_4 256\n#        include BOOST_PP_FILENAME_4\n#        undef BOOST_PP_ITERATION_4\n#    endif\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 3\n#\n# undef BOOST_PP_ITERATION_START_4\n# undef BOOST_PP_ITERATION_FINISH_4\n# undef BOOST_PP_FILENAME_4\n#\n# undef BOOST_PP_ITERATION_FLAGS_4\n# undef BOOST_PP_ITERATION_PARAMS_4\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/forward5.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if defined(BOOST_PP_ITERATION_LIMITS)\n#    if !defined(BOOST_PP_FILENAME_5)\n#        error BOOST_PP_ERROR:  depth #5 filename is not defined\n#    endif\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/lower5.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)\n#    include <boost/preprocessor/iteration/detail/bounds/upper5.hpp>\n#    define BOOST_PP_ITERATION_FLAGS_5() 0\n#    undef BOOST_PP_ITERATION_LIMITS\n# elif defined(BOOST_PP_ITERATION_PARAMS_5)\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_5)\n#    include <boost/preprocessor/iteration/detail/bounds/lower5.hpp>\n#    define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_5)\n#    include <boost/preprocessor/iteration/detail/bounds/upper5.hpp>\n#    define BOOST_PP_FILENAME_5 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_5)\n#    if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_5) >= 4\n#        define BOOST_PP_ITERATION_FLAGS_5() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_5)\n#    else\n#        define BOOST_PP_ITERATION_FLAGS_5() 0\n#    endif\n# else\n#    error BOOST_PP_ERROR:  depth #5 iteration boundaries or filename not defined\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 5\n#\n# if (BOOST_PP_ITERATION_START_5) > (BOOST_PP_ITERATION_FINISH_5)\n#    include <boost/preprocessor/iteration/detail/iter/reverse5.hpp>\n# else\n#    if BOOST_PP_ITERATION_START_5 <= 0 && BOOST_PP_ITERATION_FINISH_5 >= 0\n#        define BOOST_PP_ITERATION_5 0\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 1 && BOOST_PP_ITERATION_FINISH_5 >= 1\n#        define BOOST_PP_ITERATION_5 1\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 2 && BOOST_PP_ITERATION_FINISH_5 >= 2\n#        define BOOST_PP_ITERATION_5 2\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 3 && BOOST_PP_ITERATION_FINISH_5 >= 3\n#        define BOOST_PP_ITERATION_5 3\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 4 && BOOST_PP_ITERATION_FINISH_5 >= 4\n#        define BOOST_PP_ITERATION_5 4\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 5 && BOOST_PP_ITERATION_FINISH_5 >= 5\n#        define BOOST_PP_ITERATION_5 5\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 6 && BOOST_PP_ITERATION_FINISH_5 >= 6\n#        define BOOST_PP_ITERATION_5 6\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 7 && BOOST_PP_ITERATION_FINISH_5 >= 7\n#        define BOOST_PP_ITERATION_5 7\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 8 && BOOST_PP_ITERATION_FINISH_5 >= 8\n#        define BOOST_PP_ITERATION_5 8\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 9 && BOOST_PP_ITERATION_FINISH_5 >= 9\n#        define BOOST_PP_ITERATION_5 9\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 10 && BOOST_PP_ITERATION_FINISH_5 >= 10\n#        define BOOST_PP_ITERATION_5 10\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 11 && BOOST_PP_ITERATION_FINISH_5 >= 11\n#        define BOOST_PP_ITERATION_5 11\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 12 && BOOST_PP_ITERATION_FINISH_5 >= 12\n#        define BOOST_PP_ITERATION_5 12\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 13 && BOOST_PP_ITERATION_FINISH_5 >= 13\n#        define BOOST_PP_ITERATION_5 13\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 14 && BOOST_PP_ITERATION_FINISH_5 >= 14\n#        define BOOST_PP_ITERATION_5 14\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 15 && BOOST_PP_ITERATION_FINISH_5 >= 15\n#        define BOOST_PP_ITERATION_5 15\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 16 && BOOST_PP_ITERATION_FINISH_5 >= 16\n#        define BOOST_PP_ITERATION_5 16\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 17 && BOOST_PP_ITERATION_FINISH_5 >= 17\n#        define BOOST_PP_ITERATION_5 17\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 18 && BOOST_PP_ITERATION_FINISH_5 >= 18\n#        define BOOST_PP_ITERATION_5 18\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 19 && BOOST_PP_ITERATION_FINISH_5 >= 19\n#        define BOOST_PP_ITERATION_5 19\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 20 && BOOST_PP_ITERATION_FINISH_5 >= 20\n#        define BOOST_PP_ITERATION_5 20\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 21 && BOOST_PP_ITERATION_FINISH_5 >= 21\n#        define BOOST_PP_ITERATION_5 21\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 22 && BOOST_PP_ITERATION_FINISH_5 >= 22\n#        define BOOST_PP_ITERATION_5 22\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 23 && BOOST_PP_ITERATION_FINISH_5 >= 23\n#        define BOOST_PP_ITERATION_5 23\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 24 && BOOST_PP_ITERATION_FINISH_5 >= 24\n#        define BOOST_PP_ITERATION_5 24\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 25 && BOOST_PP_ITERATION_FINISH_5 >= 25\n#        define BOOST_PP_ITERATION_5 25\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 26 && BOOST_PP_ITERATION_FINISH_5 >= 26\n#        define BOOST_PP_ITERATION_5 26\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 27 && BOOST_PP_ITERATION_FINISH_5 >= 27\n#        define BOOST_PP_ITERATION_5 27\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 28 && BOOST_PP_ITERATION_FINISH_5 >= 28\n#        define BOOST_PP_ITERATION_5 28\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 29 && BOOST_PP_ITERATION_FINISH_5 >= 29\n#        define BOOST_PP_ITERATION_5 29\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 30 && BOOST_PP_ITERATION_FINISH_5 >= 30\n#        define BOOST_PP_ITERATION_5 30\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 31 && BOOST_PP_ITERATION_FINISH_5 >= 31\n#        define BOOST_PP_ITERATION_5 31\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 32 && BOOST_PP_ITERATION_FINISH_5 >= 32\n#        define BOOST_PP_ITERATION_5 32\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 33 && BOOST_PP_ITERATION_FINISH_5 >= 33\n#        define BOOST_PP_ITERATION_5 33\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 34 && BOOST_PP_ITERATION_FINISH_5 >= 34\n#        define BOOST_PP_ITERATION_5 34\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 35 && BOOST_PP_ITERATION_FINISH_5 >= 35\n#        define BOOST_PP_ITERATION_5 35\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 36 && BOOST_PP_ITERATION_FINISH_5 >= 36\n#        define BOOST_PP_ITERATION_5 36\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 37 && BOOST_PP_ITERATION_FINISH_5 >= 37\n#        define BOOST_PP_ITERATION_5 37\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 38 && BOOST_PP_ITERATION_FINISH_5 >= 38\n#        define BOOST_PP_ITERATION_5 38\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 39 && BOOST_PP_ITERATION_FINISH_5 >= 39\n#        define BOOST_PP_ITERATION_5 39\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 40 && BOOST_PP_ITERATION_FINISH_5 >= 40\n#        define BOOST_PP_ITERATION_5 40\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 41 && BOOST_PP_ITERATION_FINISH_5 >= 41\n#        define BOOST_PP_ITERATION_5 41\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 42 && BOOST_PP_ITERATION_FINISH_5 >= 42\n#        define BOOST_PP_ITERATION_5 42\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 43 && BOOST_PP_ITERATION_FINISH_5 >= 43\n#        define BOOST_PP_ITERATION_5 43\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 44 && BOOST_PP_ITERATION_FINISH_5 >= 44\n#        define BOOST_PP_ITERATION_5 44\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 45 && BOOST_PP_ITERATION_FINISH_5 >= 45\n#        define BOOST_PP_ITERATION_5 45\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 46 && BOOST_PP_ITERATION_FINISH_5 >= 46\n#        define BOOST_PP_ITERATION_5 46\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 47 && BOOST_PP_ITERATION_FINISH_5 >= 47\n#        define BOOST_PP_ITERATION_5 47\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 48 && BOOST_PP_ITERATION_FINISH_5 >= 48\n#        define BOOST_PP_ITERATION_5 48\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 49 && BOOST_PP_ITERATION_FINISH_5 >= 49\n#        define BOOST_PP_ITERATION_5 49\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 50 && BOOST_PP_ITERATION_FINISH_5 >= 50\n#        define BOOST_PP_ITERATION_5 50\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 51 && BOOST_PP_ITERATION_FINISH_5 >= 51\n#        define BOOST_PP_ITERATION_5 51\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 52 && BOOST_PP_ITERATION_FINISH_5 >= 52\n#        define BOOST_PP_ITERATION_5 52\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 53 && BOOST_PP_ITERATION_FINISH_5 >= 53\n#        define BOOST_PP_ITERATION_5 53\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 54 && BOOST_PP_ITERATION_FINISH_5 >= 54\n#        define BOOST_PP_ITERATION_5 54\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 55 && BOOST_PP_ITERATION_FINISH_5 >= 55\n#        define BOOST_PP_ITERATION_5 55\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 56 && BOOST_PP_ITERATION_FINISH_5 >= 56\n#        define BOOST_PP_ITERATION_5 56\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 57 && BOOST_PP_ITERATION_FINISH_5 >= 57\n#        define BOOST_PP_ITERATION_5 57\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 58 && BOOST_PP_ITERATION_FINISH_5 >= 58\n#        define BOOST_PP_ITERATION_5 58\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 59 && BOOST_PP_ITERATION_FINISH_5 >= 59\n#        define BOOST_PP_ITERATION_5 59\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 60 && BOOST_PP_ITERATION_FINISH_5 >= 60\n#        define BOOST_PP_ITERATION_5 60\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 61 && BOOST_PP_ITERATION_FINISH_5 >= 61\n#        define BOOST_PP_ITERATION_5 61\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 62 && BOOST_PP_ITERATION_FINISH_5 >= 62\n#        define BOOST_PP_ITERATION_5 62\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 63 && BOOST_PP_ITERATION_FINISH_5 >= 63\n#        define BOOST_PP_ITERATION_5 63\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 64 && BOOST_PP_ITERATION_FINISH_5 >= 64\n#        define BOOST_PP_ITERATION_5 64\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 65 && BOOST_PP_ITERATION_FINISH_5 >= 65\n#        define BOOST_PP_ITERATION_5 65\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 66 && BOOST_PP_ITERATION_FINISH_5 >= 66\n#        define BOOST_PP_ITERATION_5 66\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 67 && BOOST_PP_ITERATION_FINISH_5 >= 67\n#        define BOOST_PP_ITERATION_5 67\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 68 && BOOST_PP_ITERATION_FINISH_5 >= 68\n#        define BOOST_PP_ITERATION_5 68\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 69 && BOOST_PP_ITERATION_FINISH_5 >= 69\n#        define BOOST_PP_ITERATION_5 69\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 70 && BOOST_PP_ITERATION_FINISH_5 >= 70\n#        define BOOST_PP_ITERATION_5 70\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 71 && BOOST_PP_ITERATION_FINISH_5 >= 71\n#        define BOOST_PP_ITERATION_5 71\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 72 && BOOST_PP_ITERATION_FINISH_5 >= 72\n#        define BOOST_PP_ITERATION_5 72\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 73 && BOOST_PP_ITERATION_FINISH_5 >= 73\n#        define BOOST_PP_ITERATION_5 73\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 74 && BOOST_PP_ITERATION_FINISH_5 >= 74\n#        define BOOST_PP_ITERATION_5 74\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 75 && BOOST_PP_ITERATION_FINISH_5 >= 75\n#        define BOOST_PP_ITERATION_5 75\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 76 && BOOST_PP_ITERATION_FINISH_5 >= 76\n#        define BOOST_PP_ITERATION_5 76\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 77 && BOOST_PP_ITERATION_FINISH_5 >= 77\n#        define BOOST_PP_ITERATION_5 77\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 78 && BOOST_PP_ITERATION_FINISH_5 >= 78\n#        define BOOST_PP_ITERATION_5 78\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 79 && BOOST_PP_ITERATION_FINISH_5 >= 79\n#        define BOOST_PP_ITERATION_5 79\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 80 && BOOST_PP_ITERATION_FINISH_5 >= 80\n#        define BOOST_PP_ITERATION_5 80\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 81 && BOOST_PP_ITERATION_FINISH_5 >= 81\n#        define BOOST_PP_ITERATION_5 81\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 82 && BOOST_PP_ITERATION_FINISH_5 >= 82\n#        define BOOST_PP_ITERATION_5 82\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 83 && BOOST_PP_ITERATION_FINISH_5 >= 83\n#        define BOOST_PP_ITERATION_5 83\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 84 && BOOST_PP_ITERATION_FINISH_5 >= 84\n#        define BOOST_PP_ITERATION_5 84\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 85 && BOOST_PP_ITERATION_FINISH_5 >= 85\n#        define BOOST_PP_ITERATION_5 85\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 86 && BOOST_PP_ITERATION_FINISH_5 >= 86\n#        define BOOST_PP_ITERATION_5 86\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 87 && BOOST_PP_ITERATION_FINISH_5 >= 87\n#        define BOOST_PP_ITERATION_5 87\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 88 && BOOST_PP_ITERATION_FINISH_5 >= 88\n#        define BOOST_PP_ITERATION_5 88\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 89 && BOOST_PP_ITERATION_FINISH_5 >= 89\n#        define BOOST_PP_ITERATION_5 89\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 90 && BOOST_PP_ITERATION_FINISH_5 >= 90\n#        define BOOST_PP_ITERATION_5 90\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 91 && BOOST_PP_ITERATION_FINISH_5 >= 91\n#        define BOOST_PP_ITERATION_5 91\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 92 && BOOST_PP_ITERATION_FINISH_5 >= 92\n#        define BOOST_PP_ITERATION_5 92\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 93 && BOOST_PP_ITERATION_FINISH_5 >= 93\n#        define BOOST_PP_ITERATION_5 93\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 94 && BOOST_PP_ITERATION_FINISH_5 >= 94\n#        define BOOST_PP_ITERATION_5 94\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 95 && BOOST_PP_ITERATION_FINISH_5 >= 95\n#        define BOOST_PP_ITERATION_5 95\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 96 && BOOST_PP_ITERATION_FINISH_5 >= 96\n#        define BOOST_PP_ITERATION_5 96\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 97 && BOOST_PP_ITERATION_FINISH_5 >= 97\n#        define BOOST_PP_ITERATION_5 97\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 98 && BOOST_PP_ITERATION_FINISH_5 >= 98\n#        define BOOST_PP_ITERATION_5 98\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 99 && BOOST_PP_ITERATION_FINISH_5 >= 99\n#        define BOOST_PP_ITERATION_5 99\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 100 && BOOST_PP_ITERATION_FINISH_5 >= 100\n#        define BOOST_PP_ITERATION_5 100\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 101 && BOOST_PP_ITERATION_FINISH_5 >= 101\n#        define BOOST_PP_ITERATION_5 101\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 102 && BOOST_PP_ITERATION_FINISH_5 >= 102\n#        define BOOST_PP_ITERATION_5 102\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 103 && BOOST_PP_ITERATION_FINISH_5 >= 103\n#        define BOOST_PP_ITERATION_5 103\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 104 && BOOST_PP_ITERATION_FINISH_5 >= 104\n#        define BOOST_PP_ITERATION_5 104\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 105 && BOOST_PP_ITERATION_FINISH_5 >= 105\n#        define BOOST_PP_ITERATION_5 105\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 106 && BOOST_PP_ITERATION_FINISH_5 >= 106\n#        define BOOST_PP_ITERATION_5 106\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 107 && BOOST_PP_ITERATION_FINISH_5 >= 107\n#        define BOOST_PP_ITERATION_5 107\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 108 && BOOST_PP_ITERATION_FINISH_5 >= 108\n#        define BOOST_PP_ITERATION_5 108\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 109 && BOOST_PP_ITERATION_FINISH_5 >= 109\n#        define BOOST_PP_ITERATION_5 109\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 110 && BOOST_PP_ITERATION_FINISH_5 >= 110\n#        define BOOST_PP_ITERATION_5 110\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 111 && BOOST_PP_ITERATION_FINISH_5 >= 111\n#        define BOOST_PP_ITERATION_5 111\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 112 && BOOST_PP_ITERATION_FINISH_5 >= 112\n#        define BOOST_PP_ITERATION_5 112\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 113 && BOOST_PP_ITERATION_FINISH_5 >= 113\n#        define BOOST_PP_ITERATION_5 113\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 114 && BOOST_PP_ITERATION_FINISH_5 >= 114\n#        define BOOST_PP_ITERATION_5 114\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 115 && BOOST_PP_ITERATION_FINISH_5 >= 115\n#        define BOOST_PP_ITERATION_5 115\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 116 && BOOST_PP_ITERATION_FINISH_5 >= 116\n#        define BOOST_PP_ITERATION_5 116\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 117 && BOOST_PP_ITERATION_FINISH_5 >= 117\n#        define BOOST_PP_ITERATION_5 117\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 118 && BOOST_PP_ITERATION_FINISH_5 >= 118\n#        define BOOST_PP_ITERATION_5 118\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 119 && BOOST_PP_ITERATION_FINISH_5 >= 119\n#        define BOOST_PP_ITERATION_5 119\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 120 && BOOST_PP_ITERATION_FINISH_5 >= 120\n#        define BOOST_PP_ITERATION_5 120\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 121 && BOOST_PP_ITERATION_FINISH_5 >= 121\n#        define BOOST_PP_ITERATION_5 121\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 122 && BOOST_PP_ITERATION_FINISH_5 >= 122\n#        define BOOST_PP_ITERATION_5 122\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 123 && BOOST_PP_ITERATION_FINISH_5 >= 123\n#        define BOOST_PP_ITERATION_5 123\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 124 && BOOST_PP_ITERATION_FINISH_5 >= 124\n#        define BOOST_PP_ITERATION_5 124\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 125 && BOOST_PP_ITERATION_FINISH_5 >= 125\n#        define BOOST_PP_ITERATION_5 125\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 126 && BOOST_PP_ITERATION_FINISH_5 >= 126\n#        define BOOST_PP_ITERATION_5 126\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 127 && BOOST_PP_ITERATION_FINISH_5 >= 127\n#        define BOOST_PP_ITERATION_5 127\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 128 && BOOST_PP_ITERATION_FINISH_5 >= 128\n#        define BOOST_PP_ITERATION_5 128\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 129 && BOOST_PP_ITERATION_FINISH_5 >= 129\n#        define BOOST_PP_ITERATION_5 129\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 130 && BOOST_PP_ITERATION_FINISH_5 >= 130\n#        define BOOST_PP_ITERATION_5 130\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 131 && BOOST_PP_ITERATION_FINISH_5 >= 131\n#        define BOOST_PP_ITERATION_5 131\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 132 && BOOST_PP_ITERATION_FINISH_5 >= 132\n#        define BOOST_PP_ITERATION_5 132\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 133 && BOOST_PP_ITERATION_FINISH_5 >= 133\n#        define BOOST_PP_ITERATION_5 133\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 134 && BOOST_PP_ITERATION_FINISH_5 >= 134\n#        define BOOST_PP_ITERATION_5 134\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 135 && BOOST_PP_ITERATION_FINISH_5 >= 135\n#        define BOOST_PP_ITERATION_5 135\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 136 && BOOST_PP_ITERATION_FINISH_5 >= 136\n#        define BOOST_PP_ITERATION_5 136\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 137 && BOOST_PP_ITERATION_FINISH_5 >= 137\n#        define BOOST_PP_ITERATION_5 137\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 138 && BOOST_PP_ITERATION_FINISH_5 >= 138\n#        define BOOST_PP_ITERATION_5 138\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 139 && BOOST_PP_ITERATION_FINISH_5 >= 139\n#        define BOOST_PP_ITERATION_5 139\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 140 && BOOST_PP_ITERATION_FINISH_5 >= 140\n#        define BOOST_PP_ITERATION_5 140\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 141 && BOOST_PP_ITERATION_FINISH_5 >= 141\n#        define BOOST_PP_ITERATION_5 141\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 142 && BOOST_PP_ITERATION_FINISH_5 >= 142\n#        define BOOST_PP_ITERATION_5 142\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 143 && BOOST_PP_ITERATION_FINISH_5 >= 143\n#        define BOOST_PP_ITERATION_5 143\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 144 && BOOST_PP_ITERATION_FINISH_5 >= 144\n#        define BOOST_PP_ITERATION_5 144\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 145 && BOOST_PP_ITERATION_FINISH_5 >= 145\n#        define BOOST_PP_ITERATION_5 145\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 146 && BOOST_PP_ITERATION_FINISH_5 >= 146\n#        define BOOST_PP_ITERATION_5 146\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 147 && BOOST_PP_ITERATION_FINISH_5 >= 147\n#        define BOOST_PP_ITERATION_5 147\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 148 && BOOST_PP_ITERATION_FINISH_5 >= 148\n#        define BOOST_PP_ITERATION_5 148\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 149 && BOOST_PP_ITERATION_FINISH_5 >= 149\n#        define BOOST_PP_ITERATION_5 149\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 150 && BOOST_PP_ITERATION_FINISH_5 >= 150\n#        define BOOST_PP_ITERATION_5 150\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 151 && BOOST_PP_ITERATION_FINISH_5 >= 151\n#        define BOOST_PP_ITERATION_5 151\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 152 && BOOST_PP_ITERATION_FINISH_5 >= 152\n#        define BOOST_PP_ITERATION_5 152\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 153 && BOOST_PP_ITERATION_FINISH_5 >= 153\n#        define BOOST_PP_ITERATION_5 153\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 154 && BOOST_PP_ITERATION_FINISH_5 >= 154\n#        define BOOST_PP_ITERATION_5 154\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 155 && BOOST_PP_ITERATION_FINISH_5 >= 155\n#        define BOOST_PP_ITERATION_5 155\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 156 && BOOST_PP_ITERATION_FINISH_5 >= 156\n#        define BOOST_PP_ITERATION_5 156\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 157 && BOOST_PP_ITERATION_FINISH_5 >= 157\n#        define BOOST_PP_ITERATION_5 157\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 158 && BOOST_PP_ITERATION_FINISH_5 >= 158\n#        define BOOST_PP_ITERATION_5 158\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 159 && BOOST_PP_ITERATION_FINISH_5 >= 159\n#        define BOOST_PP_ITERATION_5 159\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 160 && BOOST_PP_ITERATION_FINISH_5 >= 160\n#        define BOOST_PP_ITERATION_5 160\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 161 && BOOST_PP_ITERATION_FINISH_5 >= 161\n#        define BOOST_PP_ITERATION_5 161\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 162 && BOOST_PP_ITERATION_FINISH_5 >= 162\n#        define BOOST_PP_ITERATION_5 162\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 163 && BOOST_PP_ITERATION_FINISH_5 >= 163\n#        define BOOST_PP_ITERATION_5 163\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 164 && BOOST_PP_ITERATION_FINISH_5 >= 164\n#        define BOOST_PP_ITERATION_5 164\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 165 && BOOST_PP_ITERATION_FINISH_5 >= 165\n#        define BOOST_PP_ITERATION_5 165\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 166 && BOOST_PP_ITERATION_FINISH_5 >= 166\n#        define BOOST_PP_ITERATION_5 166\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 167 && BOOST_PP_ITERATION_FINISH_5 >= 167\n#        define BOOST_PP_ITERATION_5 167\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 168 && BOOST_PP_ITERATION_FINISH_5 >= 168\n#        define BOOST_PP_ITERATION_5 168\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 169 && BOOST_PP_ITERATION_FINISH_5 >= 169\n#        define BOOST_PP_ITERATION_5 169\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 170 && BOOST_PP_ITERATION_FINISH_5 >= 170\n#        define BOOST_PP_ITERATION_5 170\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 171 && BOOST_PP_ITERATION_FINISH_5 >= 171\n#        define BOOST_PP_ITERATION_5 171\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 172 && BOOST_PP_ITERATION_FINISH_5 >= 172\n#        define BOOST_PP_ITERATION_5 172\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 173 && BOOST_PP_ITERATION_FINISH_5 >= 173\n#        define BOOST_PP_ITERATION_5 173\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 174 && BOOST_PP_ITERATION_FINISH_5 >= 174\n#        define BOOST_PP_ITERATION_5 174\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 175 && BOOST_PP_ITERATION_FINISH_5 >= 175\n#        define BOOST_PP_ITERATION_5 175\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 176 && BOOST_PP_ITERATION_FINISH_5 >= 176\n#        define BOOST_PP_ITERATION_5 176\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 177 && BOOST_PP_ITERATION_FINISH_5 >= 177\n#        define BOOST_PP_ITERATION_5 177\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 178 && BOOST_PP_ITERATION_FINISH_5 >= 178\n#        define BOOST_PP_ITERATION_5 178\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 179 && BOOST_PP_ITERATION_FINISH_5 >= 179\n#        define BOOST_PP_ITERATION_5 179\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 180 && BOOST_PP_ITERATION_FINISH_5 >= 180\n#        define BOOST_PP_ITERATION_5 180\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 181 && BOOST_PP_ITERATION_FINISH_5 >= 181\n#        define BOOST_PP_ITERATION_5 181\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 182 && BOOST_PP_ITERATION_FINISH_5 >= 182\n#        define BOOST_PP_ITERATION_5 182\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 183 && BOOST_PP_ITERATION_FINISH_5 >= 183\n#        define BOOST_PP_ITERATION_5 183\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 184 && BOOST_PP_ITERATION_FINISH_5 >= 184\n#        define BOOST_PP_ITERATION_5 184\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 185 && BOOST_PP_ITERATION_FINISH_5 >= 185\n#        define BOOST_PP_ITERATION_5 185\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 186 && BOOST_PP_ITERATION_FINISH_5 >= 186\n#        define BOOST_PP_ITERATION_5 186\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 187 && BOOST_PP_ITERATION_FINISH_5 >= 187\n#        define BOOST_PP_ITERATION_5 187\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 188 && BOOST_PP_ITERATION_FINISH_5 >= 188\n#        define BOOST_PP_ITERATION_5 188\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 189 && BOOST_PP_ITERATION_FINISH_5 >= 189\n#        define BOOST_PP_ITERATION_5 189\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 190 && BOOST_PP_ITERATION_FINISH_5 >= 190\n#        define BOOST_PP_ITERATION_5 190\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 191 && BOOST_PP_ITERATION_FINISH_5 >= 191\n#        define BOOST_PP_ITERATION_5 191\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 192 && BOOST_PP_ITERATION_FINISH_5 >= 192\n#        define BOOST_PP_ITERATION_5 192\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 193 && BOOST_PP_ITERATION_FINISH_5 >= 193\n#        define BOOST_PP_ITERATION_5 193\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 194 && BOOST_PP_ITERATION_FINISH_5 >= 194\n#        define BOOST_PP_ITERATION_5 194\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 195 && BOOST_PP_ITERATION_FINISH_5 >= 195\n#        define BOOST_PP_ITERATION_5 195\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 196 && BOOST_PP_ITERATION_FINISH_5 >= 196\n#        define BOOST_PP_ITERATION_5 196\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 197 && BOOST_PP_ITERATION_FINISH_5 >= 197\n#        define BOOST_PP_ITERATION_5 197\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 198 && BOOST_PP_ITERATION_FINISH_5 >= 198\n#        define BOOST_PP_ITERATION_5 198\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 199 && BOOST_PP_ITERATION_FINISH_5 >= 199\n#        define BOOST_PP_ITERATION_5 199\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 200 && BOOST_PP_ITERATION_FINISH_5 >= 200\n#        define BOOST_PP_ITERATION_5 200\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 201 && BOOST_PP_ITERATION_FINISH_5 >= 201\n#        define BOOST_PP_ITERATION_5 201\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 202 && BOOST_PP_ITERATION_FINISH_5 >= 202\n#        define BOOST_PP_ITERATION_5 202\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 203 && BOOST_PP_ITERATION_FINISH_5 >= 203\n#        define BOOST_PP_ITERATION_5 203\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 204 && BOOST_PP_ITERATION_FINISH_5 >= 204\n#        define BOOST_PP_ITERATION_5 204\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 205 && BOOST_PP_ITERATION_FINISH_5 >= 205\n#        define BOOST_PP_ITERATION_5 205\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 206 && BOOST_PP_ITERATION_FINISH_5 >= 206\n#        define BOOST_PP_ITERATION_5 206\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 207 && BOOST_PP_ITERATION_FINISH_5 >= 207\n#        define BOOST_PP_ITERATION_5 207\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 208 && BOOST_PP_ITERATION_FINISH_5 >= 208\n#        define BOOST_PP_ITERATION_5 208\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 209 && BOOST_PP_ITERATION_FINISH_5 >= 209\n#        define BOOST_PP_ITERATION_5 209\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 210 && BOOST_PP_ITERATION_FINISH_5 >= 210\n#        define BOOST_PP_ITERATION_5 210\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 211 && BOOST_PP_ITERATION_FINISH_5 >= 211\n#        define BOOST_PP_ITERATION_5 211\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 212 && BOOST_PP_ITERATION_FINISH_5 >= 212\n#        define BOOST_PP_ITERATION_5 212\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 213 && BOOST_PP_ITERATION_FINISH_5 >= 213\n#        define BOOST_PP_ITERATION_5 213\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 214 && BOOST_PP_ITERATION_FINISH_5 >= 214\n#        define BOOST_PP_ITERATION_5 214\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 215 && BOOST_PP_ITERATION_FINISH_5 >= 215\n#        define BOOST_PP_ITERATION_5 215\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 216 && BOOST_PP_ITERATION_FINISH_5 >= 216\n#        define BOOST_PP_ITERATION_5 216\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 217 && BOOST_PP_ITERATION_FINISH_5 >= 217\n#        define BOOST_PP_ITERATION_5 217\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 218 && BOOST_PP_ITERATION_FINISH_5 >= 218\n#        define BOOST_PP_ITERATION_5 218\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 219 && BOOST_PP_ITERATION_FINISH_5 >= 219\n#        define BOOST_PP_ITERATION_5 219\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 220 && BOOST_PP_ITERATION_FINISH_5 >= 220\n#        define BOOST_PP_ITERATION_5 220\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 221 && BOOST_PP_ITERATION_FINISH_5 >= 221\n#        define BOOST_PP_ITERATION_5 221\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 222 && BOOST_PP_ITERATION_FINISH_5 >= 222\n#        define BOOST_PP_ITERATION_5 222\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 223 && BOOST_PP_ITERATION_FINISH_5 >= 223\n#        define BOOST_PP_ITERATION_5 223\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 224 && BOOST_PP_ITERATION_FINISH_5 >= 224\n#        define BOOST_PP_ITERATION_5 224\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 225 && BOOST_PP_ITERATION_FINISH_5 >= 225\n#        define BOOST_PP_ITERATION_5 225\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 226 && BOOST_PP_ITERATION_FINISH_5 >= 226\n#        define BOOST_PP_ITERATION_5 226\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 227 && BOOST_PP_ITERATION_FINISH_5 >= 227\n#        define BOOST_PP_ITERATION_5 227\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 228 && BOOST_PP_ITERATION_FINISH_5 >= 228\n#        define BOOST_PP_ITERATION_5 228\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 229 && BOOST_PP_ITERATION_FINISH_5 >= 229\n#        define BOOST_PP_ITERATION_5 229\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 230 && BOOST_PP_ITERATION_FINISH_5 >= 230\n#        define BOOST_PP_ITERATION_5 230\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 231 && BOOST_PP_ITERATION_FINISH_5 >= 231\n#        define BOOST_PP_ITERATION_5 231\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 232 && BOOST_PP_ITERATION_FINISH_5 >= 232\n#        define BOOST_PP_ITERATION_5 232\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 233 && BOOST_PP_ITERATION_FINISH_5 >= 233\n#        define BOOST_PP_ITERATION_5 233\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 234 && BOOST_PP_ITERATION_FINISH_5 >= 234\n#        define BOOST_PP_ITERATION_5 234\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 235 && BOOST_PP_ITERATION_FINISH_5 >= 235\n#        define BOOST_PP_ITERATION_5 235\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 236 && BOOST_PP_ITERATION_FINISH_5 >= 236\n#        define BOOST_PP_ITERATION_5 236\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 237 && BOOST_PP_ITERATION_FINISH_5 >= 237\n#        define BOOST_PP_ITERATION_5 237\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 238 && BOOST_PP_ITERATION_FINISH_5 >= 238\n#        define BOOST_PP_ITERATION_5 238\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 239 && BOOST_PP_ITERATION_FINISH_5 >= 239\n#        define BOOST_PP_ITERATION_5 239\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 240 && BOOST_PP_ITERATION_FINISH_5 >= 240\n#        define BOOST_PP_ITERATION_5 240\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 241 && BOOST_PP_ITERATION_FINISH_5 >= 241\n#        define BOOST_PP_ITERATION_5 241\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 242 && BOOST_PP_ITERATION_FINISH_5 >= 242\n#        define BOOST_PP_ITERATION_5 242\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 243 && BOOST_PP_ITERATION_FINISH_5 >= 243\n#        define BOOST_PP_ITERATION_5 243\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 244 && BOOST_PP_ITERATION_FINISH_5 >= 244\n#        define BOOST_PP_ITERATION_5 244\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 245 && BOOST_PP_ITERATION_FINISH_5 >= 245\n#        define BOOST_PP_ITERATION_5 245\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 246 && BOOST_PP_ITERATION_FINISH_5 >= 246\n#        define BOOST_PP_ITERATION_5 246\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 247 && BOOST_PP_ITERATION_FINISH_5 >= 247\n#        define BOOST_PP_ITERATION_5 247\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 248 && BOOST_PP_ITERATION_FINISH_5 >= 248\n#        define BOOST_PP_ITERATION_5 248\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 249 && BOOST_PP_ITERATION_FINISH_5 >= 249\n#        define BOOST_PP_ITERATION_5 249\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 250 && BOOST_PP_ITERATION_FINISH_5 >= 250\n#        define BOOST_PP_ITERATION_5 250\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 251 && BOOST_PP_ITERATION_FINISH_5 >= 251\n#        define BOOST_PP_ITERATION_5 251\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 252 && BOOST_PP_ITERATION_FINISH_5 >= 252\n#        define BOOST_PP_ITERATION_5 252\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 253 && BOOST_PP_ITERATION_FINISH_5 >= 253\n#        define BOOST_PP_ITERATION_5 253\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 254 && BOOST_PP_ITERATION_FINISH_5 >= 254\n#        define BOOST_PP_ITERATION_5 254\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 255 && BOOST_PP_ITERATION_FINISH_5 >= 255\n#        define BOOST_PP_ITERATION_5 255\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n#    if BOOST_PP_ITERATION_START_5 <= 256 && BOOST_PP_ITERATION_FINISH_5 >= 256\n#        define BOOST_PP_ITERATION_5 256\n#        include BOOST_PP_FILENAME_5\n#        undef BOOST_PP_ITERATION_5\n#    endif\n# endif\n#\n# undef BOOST_PP_ITERATION_DEPTH\n# define BOOST_PP_ITERATION_DEPTH() 4\n#\n# undef BOOST_PP_ITERATION_START_5\n# undef BOOST_PP_ITERATION_FINISH_5\n# undef BOOST_PP_FILENAME_5\n#\n# undef BOOST_PP_ITERATION_FLAGS_5\n# undef BOOST_PP_ITERATION_PARAMS_5\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/reverse1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_ITERATION_FINISH_1 <= 256 && BOOST_PP_ITERATION_START_1 >= 256\n#    define BOOST_PP_ITERATION_1 256\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 255 && BOOST_PP_ITERATION_START_1 >= 255\n#    define BOOST_PP_ITERATION_1 255\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 254 && BOOST_PP_ITERATION_START_1 >= 254\n#    define BOOST_PP_ITERATION_1 254\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 253 && BOOST_PP_ITERATION_START_1 >= 253\n#    define BOOST_PP_ITERATION_1 253\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 252 && BOOST_PP_ITERATION_START_1 >= 252\n#    define BOOST_PP_ITERATION_1 252\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 251 && BOOST_PP_ITERATION_START_1 >= 251\n#    define BOOST_PP_ITERATION_1 251\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 250 && BOOST_PP_ITERATION_START_1 >= 250\n#    define BOOST_PP_ITERATION_1 250\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 249 && BOOST_PP_ITERATION_START_1 >= 249\n#    define BOOST_PP_ITERATION_1 249\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 248 && BOOST_PP_ITERATION_START_1 >= 248\n#    define BOOST_PP_ITERATION_1 248\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 247 && BOOST_PP_ITERATION_START_1 >= 247\n#    define BOOST_PP_ITERATION_1 247\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 246 && BOOST_PP_ITERATION_START_1 >= 246\n#    define BOOST_PP_ITERATION_1 246\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 245 && BOOST_PP_ITERATION_START_1 >= 245\n#    define BOOST_PP_ITERATION_1 245\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 244 && BOOST_PP_ITERATION_START_1 >= 244\n#    define BOOST_PP_ITERATION_1 244\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 243 && BOOST_PP_ITERATION_START_1 >= 243\n#    define BOOST_PP_ITERATION_1 243\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 242 && BOOST_PP_ITERATION_START_1 >= 242\n#    define BOOST_PP_ITERATION_1 242\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 241 && BOOST_PP_ITERATION_START_1 >= 241\n#    define BOOST_PP_ITERATION_1 241\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 240 && BOOST_PP_ITERATION_START_1 >= 240\n#    define BOOST_PP_ITERATION_1 240\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 239 && BOOST_PP_ITERATION_START_1 >= 239\n#    define BOOST_PP_ITERATION_1 239\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 238 && BOOST_PP_ITERATION_START_1 >= 238\n#    define BOOST_PP_ITERATION_1 238\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 237 && BOOST_PP_ITERATION_START_1 >= 237\n#    define BOOST_PP_ITERATION_1 237\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 236 && BOOST_PP_ITERATION_START_1 >= 236\n#    define BOOST_PP_ITERATION_1 236\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 235 && BOOST_PP_ITERATION_START_1 >= 235\n#    define BOOST_PP_ITERATION_1 235\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 234 && BOOST_PP_ITERATION_START_1 >= 234\n#    define BOOST_PP_ITERATION_1 234\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 233 && BOOST_PP_ITERATION_START_1 >= 233\n#    define BOOST_PP_ITERATION_1 233\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 232 && BOOST_PP_ITERATION_START_1 >= 232\n#    define BOOST_PP_ITERATION_1 232\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 231 && BOOST_PP_ITERATION_START_1 >= 231\n#    define BOOST_PP_ITERATION_1 231\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 230 && BOOST_PP_ITERATION_START_1 >= 230\n#    define BOOST_PP_ITERATION_1 230\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 229 && BOOST_PP_ITERATION_START_1 >= 229\n#    define BOOST_PP_ITERATION_1 229\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 228 && BOOST_PP_ITERATION_START_1 >= 228\n#    define BOOST_PP_ITERATION_1 228\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 227 && BOOST_PP_ITERATION_START_1 >= 227\n#    define BOOST_PP_ITERATION_1 227\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 226 && BOOST_PP_ITERATION_START_1 >= 226\n#    define BOOST_PP_ITERATION_1 226\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 225 && BOOST_PP_ITERATION_START_1 >= 225\n#    define BOOST_PP_ITERATION_1 225\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 224 && BOOST_PP_ITERATION_START_1 >= 224\n#    define BOOST_PP_ITERATION_1 224\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 223 && BOOST_PP_ITERATION_START_1 >= 223\n#    define BOOST_PP_ITERATION_1 223\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 222 && BOOST_PP_ITERATION_START_1 >= 222\n#    define BOOST_PP_ITERATION_1 222\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 221 && BOOST_PP_ITERATION_START_1 >= 221\n#    define BOOST_PP_ITERATION_1 221\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 220 && BOOST_PP_ITERATION_START_1 >= 220\n#    define BOOST_PP_ITERATION_1 220\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 219 && BOOST_PP_ITERATION_START_1 >= 219\n#    define BOOST_PP_ITERATION_1 219\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 218 && BOOST_PP_ITERATION_START_1 >= 218\n#    define BOOST_PP_ITERATION_1 218\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 217 && BOOST_PP_ITERATION_START_1 >= 217\n#    define BOOST_PP_ITERATION_1 217\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 216 && BOOST_PP_ITERATION_START_1 >= 216\n#    define BOOST_PP_ITERATION_1 216\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 215 && BOOST_PP_ITERATION_START_1 >= 215\n#    define BOOST_PP_ITERATION_1 215\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 214 && BOOST_PP_ITERATION_START_1 >= 214\n#    define BOOST_PP_ITERATION_1 214\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 213 && BOOST_PP_ITERATION_START_1 >= 213\n#    define BOOST_PP_ITERATION_1 213\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 212 && BOOST_PP_ITERATION_START_1 >= 212\n#    define BOOST_PP_ITERATION_1 212\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 211 && BOOST_PP_ITERATION_START_1 >= 211\n#    define BOOST_PP_ITERATION_1 211\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 210 && BOOST_PP_ITERATION_START_1 >= 210\n#    define BOOST_PP_ITERATION_1 210\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 209 && BOOST_PP_ITERATION_START_1 >= 209\n#    define BOOST_PP_ITERATION_1 209\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 208 && BOOST_PP_ITERATION_START_1 >= 208\n#    define BOOST_PP_ITERATION_1 208\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 207 && BOOST_PP_ITERATION_START_1 >= 207\n#    define BOOST_PP_ITERATION_1 207\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 206 && BOOST_PP_ITERATION_START_1 >= 206\n#    define BOOST_PP_ITERATION_1 206\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 205 && BOOST_PP_ITERATION_START_1 >= 205\n#    define BOOST_PP_ITERATION_1 205\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 204 && BOOST_PP_ITERATION_START_1 >= 204\n#    define BOOST_PP_ITERATION_1 204\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 203 && BOOST_PP_ITERATION_START_1 >= 203\n#    define BOOST_PP_ITERATION_1 203\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 202 && BOOST_PP_ITERATION_START_1 >= 202\n#    define BOOST_PP_ITERATION_1 202\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 201 && BOOST_PP_ITERATION_START_1 >= 201\n#    define BOOST_PP_ITERATION_1 201\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 200 && BOOST_PP_ITERATION_START_1 >= 200\n#    define BOOST_PP_ITERATION_1 200\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 199 && BOOST_PP_ITERATION_START_1 >= 199\n#    define BOOST_PP_ITERATION_1 199\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 198 && BOOST_PP_ITERATION_START_1 >= 198\n#    define BOOST_PP_ITERATION_1 198\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 197 && BOOST_PP_ITERATION_START_1 >= 197\n#    define BOOST_PP_ITERATION_1 197\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 196 && BOOST_PP_ITERATION_START_1 >= 196\n#    define BOOST_PP_ITERATION_1 196\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 195 && BOOST_PP_ITERATION_START_1 >= 195\n#    define BOOST_PP_ITERATION_1 195\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 194 && BOOST_PP_ITERATION_START_1 >= 194\n#    define BOOST_PP_ITERATION_1 194\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 193 && BOOST_PP_ITERATION_START_1 >= 193\n#    define BOOST_PP_ITERATION_1 193\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 192 && BOOST_PP_ITERATION_START_1 >= 192\n#    define BOOST_PP_ITERATION_1 192\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 191 && BOOST_PP_ITERATION_START_1 >= 191\n#    define BOOST_PP_ITERATION_1 191\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 190 && BOOST_PP_ITERATION_START_1 >= 190\n#    define BOOST_PP_ITERATION_1 190\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 189 && BOOST_PP_ITERATION_START_1 >= 189\n#    define BOOST_PP_ITERATION_1 189\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 188 && BOOST_PP_ITERATION_START_1 >= 188\n#    define BOOST_PP_ITERATION_1 188\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 187 && BOOST_PP_ITERATION_START_1 >= 187\n#    define BOOST_PP_ITERATION_1 187\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 186 && BOOST_PP_ITERATION_START_1 >= 186\n#    define BOOST_PP_ITERATION_1 186\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 185 && BOOST_PP_ITERATION_START_1 >= 185\n#    define BOOST_PP_ITERATION_1 185\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 184 && BOOST_PP_ITERATION_START_1 >= 184\n#    define BOOST_PP_ITERATION_1 184\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 183 && BOOST_PP_ITERATION_START_1 >= 183\n#    define BOOST_PP_ITERATION_1 183\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 182 && BOOST_PP_ITERATION_START_1 >= 182\n#    define BOOST_PP_ITERATION_1 182\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 181 && BOOST_PP_ITERATION_START_1 >= 181\n#    define BOOST_PP_ITERATION_1 181\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 180 && BOOST_PP_ITERATION_START_1 >= 180\n#    define BOOST_PP_ITERATION_1 180\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 179 && BOOST_PP_ITERATION_START_1 >= 179\n#    define BOOST_PP_ITERATION_1 179\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 178 && BOOST_PP_ITERATION_START_1 >= 178\n#    define BOOST_PP_ITERATION_1 178\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 177 && BOOST_PP_ITERATION_START_1 >= 177\n#    define BOOST_PP_ITERATION_1 177\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 176 && BOOST_PP_ITERATION_START_1 >= 176\n#    define BOOST_PP_ITERATION_1 176\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 175 && BOOST_PP_ITERATION_START_1 >= 175\n#    define BOOST_PP_ITERATION_1 175\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 174 && BOOST_PP_ITERATION_START_1 >= 174\n#    define BOOST_PP_ITERATION_1 174\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 173 && BOOST_PP_ITERATION_START_1 >= 173\n#    define BOOST_PP_ITERATION_1 173\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 172 && BOOST_PP_ITERATION_START_1 >= 172\n#    define BOOST_PP_ITERATION_1 172\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 171 && BOOST_PP_ITERATION_START_1 >= 171\n#    define BOOST_PP_ITERATION_1 171\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 170 && BOOST_PP_ITERATION_START_1 >= 170\n#    define BOOST_PP_ITERATION_1 170\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 169 && BOOST_PP_ITERATION_START_1 >= 169\n#    define BOOST_PP_ITERATION_1 169\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 168 && BOOST_PP_ITERATION_START_1 >= 168\n#    define BOOST_PP_ITERATION_1 168\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 167 && BOOST_PP_ITERATION_START_1 >= 167\n#    define BOOST_PP_ITERATION_1 167\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 166 && BOOST_PP_ITERATION_START_1 >= 166\n#    define BOOST_PP_ITERATION_1 166\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 165 && BOOST_PP_ITERATION_START_1 >= 165\n#    define BOOST_PP_ITERATION_1 165\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 164 && BOOST_PP_ITERATION_START_1 >= 164\n#    define BOOST_PP_ITERATION_1 164\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 163 && BOOST_PP_ITERATION_START_1 >= 163\n#    define BOOST_PP_ITERATION_1 163\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 162 && BOOST_PP_ITERATION_START_1 >= 162\n#    define BOOST_PP_ITERATION_1 162\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 161 && BOOST_PP_ITERATION_START_1 >= 161\n#    define BOOST_PP_ITERATION_1 161\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 160 && BOOST_PP_ITERATION_START_1 >= 160\n#    define BOOST_PP_ITERATION_1 160\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 159 && BOOST_PP_ITERATION_START_1 >= 159\n#    define BOOST_PP_ITERATION_1 159\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 158 && BOOST_PP_ITERATION_START_1 >= 158\n#    define BOOST_PP_ITERATION_1 158\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 157 && BOOST_PP_ITERATION_START_1 >= 157\n#    define BOOST_PP_ITERATION_1 157\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 156 && BOOST_PP_ITERATION_START_1 >= 156\n#    define BOOST_PP_ITERATION_1 156\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 155 && BOOST_PP_ITERATION_START_1 >= 155\n#    define BOOST_PP_ITERATION_1 155\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 154 && BOOST_PP_ITERATION_START_1 >= 154\n#    define BOOST_PP_ITERATION_1 154\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 153 && BOOST_PP_ITERATION_START_1 >= 153\n#    define BOOST_PP_ITERATION_1 153\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 152 && BOOST_PP_ITERATION_START_1 >= 152\n#    define BOOST_PP_ITERATION_1 152\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 151 && BOOST_PP_ITERATION_START_1 >= 151\n#    define BOOST_PP_ITERATION_1 151\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 150 && BOOST_PP_ITERATION_START_1 >= 150\n#    define BOOST_PP_ITERATION_1 150\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 149 && BOOST_PP_ITERATION_START_1 >= 149\n#    define BOOST_PP_ITERATION_1 149\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 148 && BOOST_PP_ITERATION_START_1 >= 148\n#    define BOOST_PP_ITERATION_1 148\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 147 && BOOST_PP_ITERATION_START_1 >= 147\n#    define BOOST_PP_ITERATION_1 147\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 146 && BOOST_PP_ITERATION_START_1 >= 146\n#    define BOOST_PP_ITERATION_1 146\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 145 && BOOST_PP_ITERATION_START_1 >= 145\n#    define BOOST_PP_ITERATION_1 145\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 144 && BOOST_PP_ITERATION_START_1 >= 144\n#    define BOOST_PP_ITERATION_1 144\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 143 && BOOST_PP_ITERATION_START_1 >= 143\n#    define BOOST_PP_ITERATION_1 143\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 142 && BOOST_PP_ITERATION_START_1 >= 142\n#    define BOOST_PP_ITERATION_1 142\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 141 && BOOST_PP_ITERATION_START_1 >= 141\n#    define BOOST_PP_ITERATION_1 141\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 140 && BOOST_PP_ITERATION_START_1 >= 140\n#    define BOOST_PP_ITERATION_1 140\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 139 && BOOST_PP_ITERATION_START_1 >= 139\n#    define BOOST_PP_ITERATION_1 139\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 138 && BOOST_PP_ITERATION_START_1 >= 138\n#    define BOOST_PP_ITERATION_1 138\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 137 && BOOST_PP_ITERATION_START_1 >= 137\n#    define BOOST_PP_ITERATION_1 137\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 136 && BOOST_PP_ITERATION_START_1 >= 136\n#    define BOOST_PP_ITERATION_1 136\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 135 && BOOST_PP_ITERATION_START_1 >= 135\n#    define BOOST_PP_ITERATION_1 135\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 134 && BOOST_PP_ITERATION_START_1 >= 134\n#    define BOOST_PP_ITERATION_1 134\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 133 && BOOST_PP_ITERATION_START_1 >= 133\n#    define BOOST_PP_ITERATION_1 133\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 132 && BOOST_PP_ITERATION_START_1 >= 132\n#    define BOOST_PP_ITERATION_1 132\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 131 && BOOST_PP_ITERATION_START_1 >= 131\n#    define BOOST_PP_ITERATION_1 131\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 130 && BOOST_PP_ITERATION_START_1 >= 130\n#    define BOOST_PP_ITERATION_1 130\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 129 && BOOST_PP_ITERATION_START_1 >= 129\n#    define BOOST_PP_ITERATION_1 129\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 128 && BOOST_PP_ITERATION_START_1 >= 128\n#    define BOOST_PP_ITERATION_1 128\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 127 && BOOST_PP_ITERATION_START_1 >= 127\n#    define BOOST_PP_ITERATION_1 127\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 126 && BOOST_PP_ITERATION_START_1 >= 126\n#    define BOOST_PP_ITERATION_1 126\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 125 && BOOST_PP_ITERATION_START_1 >= 125\n#    define BOOST_PP_ITERATION_1 125\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 124 && BOOST_PP_ITERATION_START_1 >= 124\n#    define BOOST_PP_ITERATION_1 124\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 123 && BOOST_PP_ITERATION_START_1 >= 123\n#    define BOOST_PP_ITERATION_1 123\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 122 && BOOST_PP_ITERATION_START_1 >= 122\n#    define BOOST_PP_ITERATION_1 122\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 121 && BOOST_PP_ITERATION_START_1 >= 121\n#    define BOOST_PP_ITERATION_1 121\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 120 && BOOST_PP_ITERATION_START_1 >= 120\n#    define BOOST_PP_ITERATION_1 120\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 119 && BOOST_PP_ITERATION_START_1 >= 119\n#    define BOOST_PP_ITERATION_1 119\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 118 && BOOST_PP_ITERATION_START_1 >= 118\n#    define BOOST_PP_ITERATION_1 118\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 117 && BOOST_PP_ITERATION_START_1 >= 117\n#    define BOOST_PP_ITERATION_1 117\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 116 && BOOST_PP_ITERATION_START_1 >= 116\n#    define BOOST_PP_ITERATION_1 116\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 115 && BOOST_PP_ITERATION_START_1 >= 115\n#    define BOOST_PP_ITERATION_1 115\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 114 && BOOST_PP_ITERATION_START_1 >= 114\n#    define BOOST_PP_ITERATION_1 114\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 113 && BOOST_PP_ITERATION_START_1 >= 113\n#    define BOOST_PP_ITERATION_1 113\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 112 && BOOST_PP_ITERATION_START_1 >= 112\n#    define BOOST_PP_ITERATION_1 112\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 111 && BOOST_PP_ITERATION_START_1 >= 111\n#    define BOOST_PP_ITERATION_1 111\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 110 && BOOST_PP_ITERATION_START_1 >= 110\n#    define BOOST_PP_ITERATION_1 110\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 109 && BOOST_PP_ITERATION_START_1 >= 109\n#    define BOOST_PP_ITERATION_1 109\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 108 && BOOST_PP_ITERATION_START_1 >= 108\n#    define BOOST_PP_ITERATION_1 108\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 107 && BOOST_PP_ITERATION_START_1 >= 107\n#    define BOOST_PP_ITERATION_1 107\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 106 && BOOST_PP_ITERATION_START_1 >= 106\n#    define BOOST_PP_ITERATION_1 106\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 105 && BOOST_PP_ITERATION_START_1 >= 105\n#    define BOOST_PP_ITERATION_1 105\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 104 && BOOST_PP_ITERATION_START_1 >= 104\n#    define BOOST_PP_ITERATION_1 104\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 103 && BOOST_PP_ITERATION_START_1 >= 103\n#    define BOOST_PP_ITERATION_1 103\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 102 && BOOST_PP_ITERATION_START_1 >= 102\n#    define BOOST_PP_ITERATION_1 102\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 101 && BOOST_PP_ITERATION_START_1 >= 101\n#    define BOOST_PP_ITERATION_1 101\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 100 && BOOST_PP_ITERATION_START_1 >= 100\n#    define BOOST_PP_ITERATION_1 100\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 99 && BOOST_PP_ITERATION_START_1 >= 99\n#    define BOOST_PP_ITERATION_1 99\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 98 && BOOST_PP_ITERATION_START_1 >= 98\n#    define BOOST_PP_ITERATION_1 98\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 97 && BOOST_PP_ITERATION_START_1 >= 97\n#    define BOOST_PP_ITERATION_1 97\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 96 && BOOST_PP_ITERATION_START_1 >= 96\n#    define BOOST_PP_ITERATION_1 96\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 95 && BOOST_PP_ITERATION_START_1 >= 95\n#    define BOOST_PP_ITERATION_1 95\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 94 && BOOST_PP_ITERATION_START_1 >= 94\n#    define BOOST_PP_ITERATION_1 94\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 93 && BOOST_PP_ITERATION_START_1 >= 93\n#    define BOOST_PP_ITERATION_1 93\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 92 && BOOST_PP_ITERATION_START_1 >= 92\n#    define BOOST_PP_ITERATION_1 92\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 91 && BOOST_PP_ITERATION_START_1 >= 91\n#    define BOOST_PP_ITERATION_1 91\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 90 && BOOST_PP_ITERATION_START_1 >= 90\n#    define BOOST_PP_ITERATION_1 90\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 89 && BOOST_PP_ITERATION_START_1 >= 89\n#    define BOOST_PP_ITERATION_1 89\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 88 && BOOST_PP_ITERATION_START_1 >= 88\n#    define BOOST_PP_ITERATION_1 88\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 87 && BOOST_PP_ITERATION_START_1 >= 87\n#    define BOOST_PP_ITERATION_1 87\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 86 && BOOST_PP_ITERATION_START_1 >= 86\n#    define BOOST_PP_ITERATION_1 86\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 85 && BOOST_PP_ITERATION_START_1 >= 85\n#    define BOOST_PP_ITERATION_1 85\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 84 && BOOST_PP_ITERATION_START_1 >= 84\n#    define BOOST_PP_ITERATION_1 84\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 83 && BOOST_PP_ITERATION_START_1 >= 83\n#    define BOOST_PP_ITERATION_1 83\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 82 && BOOST_PP_ITERATION_START_1 >= 82\n#    define BOOST_PP_ITERATION_1 82\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 81 && BOOST_PP_ITERATION_START_1 >= 81\n#    define BOOST_PP_ITERATION_1 81\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 80 && BOOST_PP_ITERATION_START_1 >= 80\n#    define BOOST_PP_ITERATION_1 80\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 79 && BOOST_PP_ITERATION_START_1 >= 79\n#    define BOOST_PP_ITERATION_1 79\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 78 && BOOST_PP_ITERATION_START_1 >= 78\n#    define BOOST_PP_ITERATION_1 78\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 77 && BOOST_PP_ITERATION_START_1 >= 77\n#    define BOOST_PP_ITERATION_1 77\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 76 && BOOST_PP_ITERATION_START_1 >= 76\n#    define BOOST_PP_ITERATION_1 76\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 75 && BOOST_PP_ITERATION_START_1 >= 75\n#    define BOOST_PP_ITERATION_1 75\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 74 && BOOST_PP_ITERATION_START_1 >= 74\n#    define BOOST_PP_ITERATION_1 74\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 73 && BOOST_PP_ITERATION_START_1 >= 73\n#    define BOOST_PP_ITERATION_1 73\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 72 && BOOST_PP_ITERATION_START_1 >= 72\n#    define BOOST_PP_ITERATION_1 72\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 71 && BOOST_PP_ITERATION_START_1 >= 71\n#    define BOOST_PP_ITERATION_1 71\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 70 && BOOST_PP_ITERATION_START_1 >= 70\n#    define BOOST_PP_ITERATION_1 70\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 69 && BOOST_PP_ITERATION_START_1 >= 69\n#    define BOOST_PP_ITERATION_1 69\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 68 && BOOST_PP_ITERATION_START_1 >= 68\n#    define BOOST_PP_ITERATION_1 68\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 67 && BOOST_PP_ITERATION_START_1 >= 67\n#    define BOOST_PP_ITERATION_1 67\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 66 && BOOST_PP_ITERATION_START_1 >= 66\n#    define BOOST_PP_ITERATION_1 66\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 65 && BOOST_PP_ITERATION_START_1 >= 65\n#    define BOOST_PP_ITERATION_1 65\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 64 && BOOST_PP_ITERATION_START_1 >= 64\n#    define BOOST_PP_ITERATION_1 64\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 63 && BOOST_PP_ITERATION_START_1 >= 63\n#    define BOOST_PP_ITERATION_1 63\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 62 && BOOST_PP_ITERATION_START_1 >= 62\n#    define BOOST_PP_ITERATION_1 62\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 61 && BOOST_PP_ITERATION_START_1 >= 61\n#    define BOOST_PP_ITERATION_1 61\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 60 && BOOST_PP_ITERATION_START_1 >= 60\n#    define BOOST_PP_ITERATION_1 60\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 59 && BOOST_PP_ITERATION_START_1 >= 59\n#    define BOOST_PP_ITERATION_1 59\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 58 && BOOST_PP_ITERATION_START_1 >= 58\n#    define BOOST_PP_ITERATION_1 58\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 57 && BOOST_PP_ITERATION_START_1 >= 57\n#    define BOOST_PP_ITERATION_1 57\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 56 && BOOST_PP_ITERATION_START_1 >= 56\n#    define BOOST_PP_ITERATION_1 56\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 55 && BOOST_PP_ITERATION_START_1 >= 55\n#    define BOOST_PP_ITERATION_1 55\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 54 && BOOST_PP_ITERATION_START_1 >= 54\n#    define BOOST_PP_ITERATION_1 54\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 53 && BOOST_PP_ITERATION_START_1 >= 53\n#    define BOOST_PP_ITERATION_1 53\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 52 && BOOST_PP_ITERATION_START_1 >= 52\n#    define BOOST_PP_ITERATION_1 52\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 51 && BOOST_PP_ITERATION_START_1 >= 51\n#    define BOOST_PP_ITERATION_1 51\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 50 && BOOST_PP_ITERATION_START_1 >= 50\n#    define BOOST_PP_ITERATION_1 50\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 49 && BOOST_PP_ITERATION_START_1 >= 49\n#    define BOOST_PP_ITERATION_1 49\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 48 && BOOST_PP_ITERATION_START_1 >= 48\n#    define BOOST_PP_ITERATION_1 48\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 47 && BOOST_PP_ITERATION_START_1 >= 47\n#    define BOOST_PP_ITERATION_1 47\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 46 && BOOST_PP_ITERATION_START_1 >= 46\n#    define BOOST_PP_ITERATION_1 46\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 45 && BOOST_PP_ITERATION_START_1 >= 45\n#    define BOOST_PP_ITERATION_1 45\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 44 && BOOST_PP_ITERATION_START_1 >= 44\n#    define BOOST_PP_ITERATION_1 44\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 43 && BOOST_PP_ITERATION_START_1 >= 43\n#    define BOOST_PP_ITERATION_1 43\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 42 && BOOST_PP_ITERATION_START_1 >= 42\n#    define BOOST_PP_ITERATION_1 42\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 41 && BOOST_PP_ITERATION_START_1 >= 41\n#    define BOOST_PP_ITERATION_1 41\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 40 && BOOST_PP_ITERATION_START_1 >= 40\n#    define BOOST_PP_ITERATION_1 40\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 39 && BOOST_PP_ITERATION_START_1 >= 39\n#    define BOOST_PP_ITERATION_1 39\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 38 && BOOST_PP_ITERATION_START_1 >= 38\n#    define BOOST_PP_ITERATION_1 38\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 37 && BOOST_PP_ITERATION_START_1 >= 37\n#    define BOOST_PP_ITERATION_1 37\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 36 && BOOST_PP_ITERATION_START_1 >= 36\n#    define BOOST_PP_ITERATION_1 36\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 35 && BOOST_PP_ITERATION_START_1 >= 35\n#    define BOOST_PP_ITERATION_1 35\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 34 && BOOST_PP_ITERATION_START_1 >= 34\n#    define BOOST_PP_ITERATION_1 34\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 33 && BOOST_PP_ITERATION_START_1 >= 33\n#    define BOOST_PP_ITERATION_1 33\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 32 && BOOST_PP_ITERATION_START_1 >= 32\n#    define BOOST_PP_ITERATION_1 32\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 31 && BOOST_PP_ITERATION_START_1 >= 31\n#    define BOOST_PP_ITERATION_1 31\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 30 && BOOST_PP_ITERATION_START_1 >= 30\n#    define BOOST_PP_ITERATION_1 30\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 29 && BOOST_PP_ITERATION_START_1 >= 29\n#    define BOOST_PP_ITERATION_1 29\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 28 && BOOST_PP_ITERATION_START_1 >= 28\n#    define BOOST_PP_ITERATION_1 28\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 27 && BOOST_PP_ITERATION_START_1 >= 27\n#    define BOOST_PP_ITERATION_1 27\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 26 && BOOST_PP_ITERATION_START_1 >= 26\n#    define BOOST_PP_ITERATION_1 26\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 25 && BOOST_PP_ITERATION_START_1 >= 25\n#    define BOOST_PP_ITERATION_1 25\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 24 && BOOST_PP_ITERATION_START_1 >= 24\n#    define BOOST_PP_ITERATION_1 24\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 23 && BOOST_PP_ITERATION_START_1 >= 23\n#    define BOOST_PP_ITERATION_1 23\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 22 && BOOST_PP_ITERATION_START_1 >= 22\n#    define BOOST_PP_ITERATION_1 22\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 21 && BOOST_PP_ITERATION_START_1 >= 21\n#    define BOOST_PP_ITERATION_1 21\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 20 && BOOST_PP_ITERATION_START_1 >= 20\n#    define BOOST_PP_ITERATION_1 20\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 19 && BOOST_PP_ITERATION_START_1 >= 19\n#    define BOOST_PP_ITERATION_1 19\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 18 && BOOST_PP_ITERATION_START_1 >= 18\n#    define BOOST_PP_ITERATION_1 18\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 17 && BOOST_PP_ITERATION_START_1 >= 17\n#    define BOOST_PP_ITERATION_1 17\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 16 && BOOST_PP_ITERATION_START_1 >= 16\n#    define BOOST_PP_ITERATION_1 16\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 15 && BOOST_PP_ITERATION_START_1 >= 15\n#    define BOOST_PP_ITERATION_1 15\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 14 && BOOST_PP_ITERATION_START_1 >= 14\n#    define BOOST_PP_ITERATION_1 14\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 13 && BOOST_PP_ITERATION_START_1 >= 13\n#    define BOOST_PP_ITERATION_1 13\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 12 && BOOST_PP_ITERATION_START_1 >= 12\n#    define BOOST_PP_ITERATION_1 12\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 11 && BOOST_PP_ITERATION_START_1 >= 11\n#    define BOOST_PP_ITERATION_1 11\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 10 && BOOST_PP_ITERATION_START_1 >= 10\n#    define BOOST_PP_ITERATION_1 10\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 9 && BOOST_PP_ITERATION_START_1 >= 9\n#    define BOOST_PP_ITERATION_1 9\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 8 && BOOST_PP_ITERATION_START_1 >= 8\n#    define BOOST_PP_ITERATION_1 8\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 7 && BOOST_PP_ITERATION_START_1 >= 7\n#    define BOOST_PP_ITERATION_1 7\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 6 && BOOST_PP_ITERATION_START_1 >= 6\n#    define BOOST_PP_ITERATION_1 6\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 5 && BOOST_PP_ITERATION_START_1 >= 5\n#    define BOOST_PP_ITERATION_1 5\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 4 && BOOST_PP_ITERATION_START_1 >= 4\n#    define BOOST_PP_ITERATION_1 4\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 3 && BOOST_PP_ITERATION_START_1 >= 3\n#    define BOOST_PP_ITERATION_1 3\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 2 && BOOST_PP_ITERATION_START_1 >= 2\n#    define BOOST_PP_ITERATION_1 2\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 1 && BOOST_PP_ITERATION_START_1 >= 1\n#    define BOOST_PP_ITERATION_1 1\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n# if BOOST_PP_ITERATION_FINISH_1 <= 0 && BOOST_PP_ITERATION_START_1 >= 0\n#    define BOOST_PP_ITERATION_1 0\n#    include BOOST_PP_FILENAME_1\n#    undef BOOST_PP_ITERATION_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/reverse2.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_ITERATION_FINISH_2 <= 256 && BOOST_PP_ITERATION_START_2 >= 256\n#    define BOOST_PP_ITERATION_2 256\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 255 && BOOST_PP_ITERATION_START_2 >= 255\n#    define BOOST_PP_ITERATION_2 255\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 254 && BOOST_PP_ITERATION_START_2 >= 254\n#    define BOOST_PP_ITERATION_2 254\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 253 && BOOST_PP_ITERATION_START_2 >= 253\n#    define BOOST_PP_ITERATION_2 253\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 252 && BOOST_PP_ITERATION_START_2 >= 252\n#    define BOOST_PP_ITERATION_2 252\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 251 && BOOST_PP_ITERATION_START_2 >= 251\n#    define BOOST_PP_ITERATION_2 251\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 250 && BOOST_PP_ITERATION_START_2 >= 250\n#    define BOOST_PP_ITERATION_2 250\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 249 && BOOST_PP_ITERATION_START_2 >= 249\n#    define BOOST_PP_ITERATION_2 249\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 248 && BOOST_PP_ITERATION_START_2 >= 248\n#    define BOOST_PP_ITERATION_2 248\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 247 && BOOST_PP_ITERATION_START_2 >= 247\n#    define BOOST_PP_ITERATION_2 247\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 246 && BOOST_PP_ITERATION_START_2 >= 246\n#    define BOOST_PP_ITERATION_2 246\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 245 && BOOST_PP_ITERATION_START_2 >= 245\n#    define BOOST_PP_ITERATION_2 245\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 244 && BOOST_PP_ITERATION_START_2 >= 244\n#    define BOOST_PP_ITERATION_2 244\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 243 && BOOST_PP_ITERATION_START_2 >= 243\n#    define BOOST_PP_ITERATION_2 243\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 242 && BOOST_PP_ITERATION_START_2 >= 242\n#    define BOOST_PP_ITERATION_2 242\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 241 && BOOST_PP_ITERATION_START_2 >= 241\n#    define BOOST_PP_ITERATION_2 241\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 240 && BOOST_PP_ITERATION_START_2 >= 240\n#    define BOOST_PP_ITERATION_2 240\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 239 && BOOST_PP_ITERATION_START_2 >= 239\n#    define BOOST_PP_ITERATION_2 239\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 238 && BOOST_PP_ITERATION_START_2 >= 238\n#    define BOOST_PP_ITERATION_2 238\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 237 && BOOST_PP_ITERATION_START_2 >= 237\n#    define BOOST_PP_ITERATION_2 237\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 236 && BOOST_PP_ITERATION_START_2 >= 236\n#    define BOOST_PP_ITERATION_2 236\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 235 && BOOST_PP_ITERATION_START_2 >= 235\n#    define BOOST_PP_ITERATION_2 235\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 234 && BOOST_PP_ITERATION_START_2 >= 234\n#    define BOOST_PP_ITERATION_2 234\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 233 && BOOST_PP_ITERATION_START_2 >= 233\n#    define BOOST_PP_ITERATION_2 233\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 232 && BOOST_PP_ITERATION_START_2 >= 232\n#    define BOOST_PP_ITERATION_2 232\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 231 && BOOST_PP_ITERATION_START_2 >= 231\n#    define BOOST_PP_ITERATION_2 231\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 230 && BOOST_PP_ITERATION_START_2 >= 230\n#    define BOOST_PP_ITERATION_2 230\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 229 && BOOST_PP_ITERATION_START_2 >= 229\n#    define BOOST_PP_ITERATION_2 229\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 228 && BOOST_PP_ITERATION_START_2 >= 228\n#    define BOOST_PP_ITERATION_2 228\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 227 && BOOST_PP_ITERATION_START_2 >= 227\n#    define BOOST_PP_ITERATION_2 227\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 226 && BOOST_PP_ITERATION_START_2 >= 226\n#    define BOOST_PP_ITERATION_2 226\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 225 && BOOST_PP_ITERATION_START_2 >= 225\n#    define BOOST_PP_ITERATION_2 225\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 224 && BOOST_PP_ITERATION_START_2 >= 224\n#    define BOOST_PP_ITERATION_2 224\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 223 && BOOST_PP_ITERATION_START_2 >= 223\n#    define BOOST_PP_ITERATION_2 223\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 222 && BOOST_PP_ITERATION_START_2 >= 222\n#    define BOOST_PP_ITERATION_2 222\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 221 && BOOST_PP_ITERATION_START_2 >= 221\n#    define BOOST_PP_ITERATION_2 221\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 220 && BOOST_PP_ITERATION_START_2 >= 220\n#    define BOOST_PP_ITERATION_2 220\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 219 && BOOST_PP_ITERATION_START_2 >= 219\n#    define BOOST_PP_ITERATION_2 219\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 218 && BOOST_PP_ITERATION_START_2 >= 218\n#    define BOOST_PP_ITERATION_2 218\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 217 && BOOST_PP_ITERATION_START_2 >= 217\n#    define BOOST_PP_ITERATION_2 217\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 216 && BOOST_PP_ITERATION_START_2 >= 216\n#    define BOOST_PP_ITERATION_2 216\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 215 && BOOST_PP_ITERATION_START_2 >= 215\n#    define BOOST_PP_ITERATION_2 215\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 214 && BOOST_PP_ITERATION_START_2 >= 214\n#    define BOOST_PP_ITERATION_2 214\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 213 && BOOST_PP_ITERATION_START_2 >= 213\n#    define BOOST_PP_ITERATION_2 213\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 212 && BOOST_PP_ITERATION_START_2 >= 212\n#    define BOOST_PP_ITERATION_2 212\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 211 && BOOST_PP_ITERATION_START_2 >= 211\n#    define BOOST_PP_ITERATION_2 211\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 210 && BOOST_PP_ITERATION_START_2 >= 210\n#    define BOOST_PP_ITERATION_2 210\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 209 && BOOST_PP_ITERATION_START_2 >= 209\n#    define BOOST_PP_ITERATION_2 209\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 208 && BOOST_PP_ITERATION_START_2 >= 208\n#    define BOOST_PP_ITERATION_2 208\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 207 && BOOST_PP_ITERATION_START_2 >= 207\n#    define BOOST_PP_ITERATION_2 207\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 206 && BOOST_PP_ITERATION_START_2 >= 206\n#    define BOOST_PP_ITERATION_2 206\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 205 && BOOST_PP_ITERATION_START_2 >= 205\n#    define BOOST_PP_ITERATION_2 205\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 204 && BOOST_PP_ITERATION_START_2 >= 204\n#    define BOOST_PP_ITERATION_2 204\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 203 && BOOST_PP_ITERATION_START_2 >= 203\n#    define BOOST_PP_ITERATION_2 203\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 202 && BOOST_PP_ITERATION_START_2 >= 202\n#    define BOOST_PP_ITERATION_2 202\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 201 && BOOST_PP_ITERATION_START_2 >= 201\n#    define BOOST_PP_ITERATION_2 201\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 200 && BOOST_PP_ITERATION_START_2 >= 200\n#    define BOOST_PP_ITERATION_2 200\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 199 && BOOST_PP_ITERATION_START_2 >= 199\n#    define BOOST_PP_ITERATION_2 199\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 198 && BOOST_PP_ITERATION_START_2 >= 198\n#    define BOOST_PP_ITERATION_2 198\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 197 && BOOST_PP_ITERATION_START_2 >= 197\n#    define BOOST_PP_ITERATION_2 197\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 196 && BOOST_PP_ITERATION_START_2 >= 196\n#    define BOOST_PP_ITERATION_2 196\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 195 && BOOST_PP_ITERATION_START_2 >= 195\n#    define BOOST_PP_ITERATION_2 195\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 194 && BOOST_PP_ITERATION_START_2 >= 194\n#    define BOOST_PP_ITERATION_2 194\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 193 && BOOST_PP_ITERATION_START_2 >= 193\n#    define BOOST_PP_ITERATION_2 193\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 192 && BOOST_PP_ITERATION_START_2 >= 192\n#    define BOOST_PP_ITERATION_2 192\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 191 && BOOST_PP_ITERATION_START_2 >= 191\n#    define BOOST_PP_ITERATION_2 191\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 190 && BOOST_PP_ITERATION_START_2 >= 190\n#    define BOOST_PP_ITERATION_2 190\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 189 && BOOST_PP_ITERATION_START_2 >= 189\n#    define BOOST_PP_ITERATION_2 189\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 188 && BOOST_PP_ITERATION_START_2 >= 188\n#    define BOOST_PP_ITERATION_2 188\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 187 && BOOST_PP_ITERATION_START_2 >= 187\n#    define BOOST_PP_ITERATION_2 187\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 186 && BOOST_PP_ITERATION_START_2 >= 186\n#    define BOOST_PP_ITERATION_2 186\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 185 && BOOST_PP_ITERATION_START_2 >= 185\n#    define BOOST_PP_ITERATION_2 185\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 184 && BOOST_PP_ITERATION_START_2 >= 184\n#    define BOOST_PP_ITERATION_2 184\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 183 && BOOST_PP_ITERATION_START_2 >= 183\n#    define BOOST_PP_ITERATION_2 183\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 182 && BOOST_PP_ITERATION_START_2 >= 182\n#    define BOOST_PP_ITERATION_2 182\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 181 && BOOST_PP_ITERATION_START_2 >= 181\n#    define BOOST_PP_ITERATION_2 181\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 180 && BOOST_PP_ITERATION_START_2 >= 180\n#    define BOOST_PP_ITERATION_2 180\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 179 && BOOST_PP_ITERATION_START_2 >= 179\n#    define BOOST_PP_ITERATION_2 179\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 178 && BOOST_PP_ITERATION_START_2 >= 178\n#    define BOOST_PP_ITERATION_2 178\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 177 && BOOST_PP_ITERATION_START_2 >= 177\n#    define BOOST_PP_ITERATION_2 177\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 176 && BOOST_PP_ITERATION_START_2 >= 176\n#    define BOOST_PP_ITERATION_2 176\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 175 && BOOST_PP_ITERATION_START_2 >= 175\n#    define BOOST_PP_ITERATION_2 175\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 174 && BOOST_PP_ITERATION_START_2 >= 174\n#    define BOOST_PP_ITERATION_2 174\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 173 && BOOST_PP_ITERATION_START_2 >= 173\n#    define BOOST_PP_ITERATION_2 173\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 172 && BOOST_PP_ITERATION_START_2 >= 172\n#    define BOOST_PP_ITERATION_2 172\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 171 && BOOST_PP_ITERATION_START_2 >= 171\n#    define BOOST_PP_ITERATION_2 171\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 170 && BOOST_PP_ITERATION_START_2 >= 170\n#    define BOOST_PP_ITERATION_2 170\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 169 && BOOST_PP_ITERATION_START_2 >= 169\n#    define BOOST_PP_ITERATION_2 169\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 168 && BOOST_PP_ITERATION_START_2 >= 168\n#    define BOOST_PP_ITERATION_2 168\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 167 && BOOST_PP_ITERATION_START_2 >= 167\n#    define BOOST_PP_ITERATION_2 167\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 166 && BOOST_PP_ITERATION_START_2 >= 166\n#    define BOOST_PP_ITERATION_2 166\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 165 && BOOST_PP_ITERATION_START_2 >= 165\n#    define BOOST_PP_ITERATION_2 165\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 164 && BOOST_PP_ITERATION_START_2 >= 164\n#    define BOOST_PP_ITERATION_2 164\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 163 && BOOST_PP_ITERATION_START_2 >= 163\n#    define BOOST_PP_ITERATION_2 163\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 162 && BOOST_PP_ITERATION_START_2 >= 162\n#    define BOOST_PP_ITERATION_2 162\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 161 && BOOST_PP_ITERATION_START_2 >= 161\n#    define BOOST_PP_ITERATION_2 161\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 160 && BOOST_PP_ITERATION_START_2 >= 160\n#    define BOOST_PP_ITERATION_2 160\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 159 && BOOST_PP_ITERATION_START_2 >= 159\n#    define BOOST_PP_ITERATION_2 159\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 158 && BOOST_PP_ITERATION_START_2 >= 158\n#    define BOOST_PP_ITERATION_2 158\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 157 && BOOST_PP_ITERATION_START_2 >= 157\n#    define BOOST_PP_ITERATION_2 157\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 156 && BOOST_PP_ITERATION_START_2 >= 156\n#    define BOOST_PP_ITERATION_2 156\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 155 && BOOST_PP_ITERATION_START_2 >= 155\n#    define BOOST_PP_ITERATION_2 155\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 154 && BOOST_PP_ITERATION_START_2 >= 154\n#    define BOOST_PP_ITERATION_2 154\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 153 && BOOST_PP_ITERATION_START_2 >= 153\n#    define BOOST_PP_ITERATION_2 153\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 152 && BOOST_PP_ITERATION_START_2 >= 152\n#    define BOOST_PP_ITERATION_2 152\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 151 && BOOST_PP_ITERATION_START_2 >= 151\n#    define BOOST_PP_ITERATION_2 151\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 150 && BOOST_PP_ITERATION_START_2 >= 150\n#    define BOOST_PP_ITERATION_2 150\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 149 && BOOST_PP_ITERATION_START_2 >= 149\n#    define BOOST_PP_ITERATION_2 149\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 148 && BOOST_PP_ITERATION_START_2 >= 148\n#    define BOOST_PP_ITERATION_2 148\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 147 && BOOST_PP_ITERATION_START_2 >= 147\n#    define BOOST_PP_ITERATION_2 147\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 146 && BOOST_PP_ITERATION_START_2 >= 146\n#    define BOOST_PP_ITERATION_2 146\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 145 && BOOST_PP_ITERATION_START_2 >= 145\n#    define BOOST_PP_ITERATION_2 145\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 144 && BOOST_PP_ITERATION_START_2 >= 144\n#    define BOOST_PP_ITERATION_2 144\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 143 && BOOST_PP_ITERATION_START_2 >= 143\n#    define BOOST_PP_ITERATION_2 143\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 142 && BOOST_PP_ITERATION_START_2 >= 142\n#    define BOOST_PP_ITERATION_2 142\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 141 && BOOST_PP_ITERATION_START_2 >= 141\n#    define BOOST_PP_ITERATION_2 141\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 140 && BOOST_PP_ITERATION_START_2 >= 140\n#    define BOOST_PP_ITERATION_2 140\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 139 && BOOST_PP_ITERATION_START_2 >= 139\n#    define BOOST_PP_ITERATION_2 139\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 138 && BOOST_PP_ITERATION_START_2 >= 138\n#    define BOOST_PP_ITERATION_2 138\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 137 && BOOST_PP_ITERATION_START_2 >= 137\n#    define BOOST_PP_ITERATION_2 137\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 136 && BOOST_PP_ITERATION_START_2 >= 136\n#    define BOOST_PP_ITERATION_2 136\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 135 && BOOST_PP_ITERATION_START_2 >= 135\n#    define BOOST_PP_ITERATION_2 135\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 134 && BOOST_PP_ITERATION_START_2 >= 134\n#    define BOOST_PP_ITERATION_2 134\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 133 && BOOST_PP_ITERATION_START_2 >= 133\n#    define BOOST_PP_ITERATION_2 133\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 132 && BOOST_PP_ITERATION_START_2 >= 132\n#    define BOOST_PP_ITERATION_2 132\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 131 && BOOST_PP_ITERATION_START_2 >= 131\n#    define BOOST_PP_ITERATION_2 131\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 130 && BOOST_PP_ITERATION_START_2 >= 130\n#    define BOOST_PP_ITERATION_2 130\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 129 && BOOST_PP_ITERATION_START_2 >= 129\n#    define BOOST_PP_ITERATION_2 129\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 128 && BOOST_PP_ITERATION_START_2 >= 128\n#    define BOOST_PP_ITERATION_2 128\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 127 && BOOST_PP_ITERATION_START_2 >= 127\n#    define BOOST_PP_ITERATION_2 127\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 126 && BOOST_PP_ITERATION_START_2 >= 126\n#    define BOOST_PP_ITERATION_2 126\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 125 && BOOST_PP_ITERATION_START_2 >= 125\n#    define BOOST_PP_ITERATION_2 125\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 124 && BOOST_PP_ITERATION_START_2 >= 124\n#    define BOOST_PP_ITERATION_2 124\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 123 && BOOST_PP_ITERATION_START_2 >= 123\n#    define BOOST_PP_ITERATION_2 123\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 122 && BOOST_PP_ITERATION_START_2 >= 122\n#    define BOOST_PP_ITERATION_2 122\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 121 && BOOST_PP_ITERATION_START_2 >= 121\n#    define BOOST_PP_ITERATION_2 121\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 120 && BOOST_PP_ITERATION_START_2 >= 120\n#    define BOOST_PP_ITERATION_2 120\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 119 && BOOST_PP_ITERATION_START_2 >= 119\n#    define BOOST_PP_ITERATION_2 119\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 118 && BOOST_PP_ITERATION_START_2 >= 118\n#    define BOOST_PP_ITERATION_2 118\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 117 && BOOST_PP_ITERATION_START_2 >= 117\n#    define BOOST_PP_ITERATION_2 117\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 116 && BOOST_PP_ITERATION_START_2 >= 116\n#    define BOOST_PP_ITERATION_2 116\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 115 && BOOST_PP_ITERATION_START_2 >= 115\n#    define BOOST_PP_ITERATION_2 115\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 114 && BOOST_PP_ITERATION_START_2 >= 114\n#    define BOOST_PP_ITERATION_2 114\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 113 && BOOST_PP_ITERATION_START_2 >= 113\n#    define BOOST_PP_ITERATION_2 113\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 112 && BOOST_PP_ITERATION_START_2 >= 112\n#    define BOOST_PP_ITERATION_2 112\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 111 && BOOST_PP_ITERATION_START_2 >= 111\n#    define BOOST_PP_ITERATION_2 111\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 110 && BOOST_PP_ITERATION_START_2 >= 110\n#    define BOOST_PP_ITERATION_2 110\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 109 && BOOST_PP_ITERATION_START_2 >= 109\n#    define BOOST_PP_ITERATION_2 109\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 108 && BOOST_PP_ITERATION_START_2 >= 108\n#    define BOOST_PP_ITERATION_2 108\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 107 && BOOST_PP_ITERATION_START_2 >= 107\n#    define BOOST_PP_ITERATION_2 107\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 106 && BOOST_PP_ITERATION_START_2 >= 106\n#    define BOOST_PP_ITERATION_2 106\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 105 && BOOST_PP_ITERATION_START_2 >= 105\n#    define BOOST_PP_ITERATION_2 105\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 104 && BOOST_PP_ITERATION_START_2 >= 104\n#    define BOOST_PP_ITERATION_2 104\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 103 && BOOST_PP_ITERATION_START_2 >= 103\n#    define BOOST_PP_ITERATION_2 103\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 102 && BOOST_PP_ITERATION_START_2 >= 102\n#    define BOOST_PP_ITERATION_2 102\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 101 && BOOST_PP_ITERATION_START_2 >= 101\n#    define BOOST_PP_ITERATION_2 101\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 100 && BOOST_PP_ITERATION_START_2 >= 100\n#    define BOOST_PP_ITERATION_2 100\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 99 && BOOST_PP_ITERATION_START_2 >= 99\n#    define BOOST_PP_ITERATION_2 99\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 98 && BOOST_PP_ITERATION_START_2 >= 98\n#    define BOOST_PP_ITERATION_2 98\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 97 && BOOST_PP_ITERATION_START_2 >= 97\n#    define BOOST_PP_ITERATION_2 97\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 96 && BOOST_PP_ITERATION_START_2 >= 96\n#    define BOOST_PP_ITERATION_2 96\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 95 && BOOST_PP_ITERATION_START_2 >= 95\n#    define BOOST_PP_ITERATION_2 95\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 94 && BOOST_PP_ITERATION_START_2 >= 94\n#    define BOOST_PP_ITERATION_2 94\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 93 && BOOST_PP_ITERATION_START_2 >= 93\n#    define BOOST_PP_ITERATION_2 93\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 92 && BOOST_PP_ITERATION_START_2 >= 92\n#    define BOOST_PP_ITERATION_2 92\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 91 && BOOST_PP_ITERATION_START_2 >= 91\n#    define BOOST_PP_ITERATION_2 91\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 90 && BOOST_PP_ITERATION_START_2 >= 90\n#    define BOOST_PP_ITERATION_2 90\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 89 && BOOST_PP_ITERATION_START_2 >= 89\n#    define BOOST_PP_ITERATION_2 89\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 88 && BOOST_PP_ITERATION_START_2 >= 88\n#    define BOOST_PP_ITERATION_2 88\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 87 && BOOST_PP_ITERATION_START_2 >= 87\n#    define BOOST_PP_ITERATION_2 87\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 86 && BOOST_PP_ITERATION_START_2 >= 86\n#    define BOOST_PP_ITERATION_2 86\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 85 && BOOST_PP_ITERATION_START_2 >= 85\n#    define BOOST_PP_ITERATION_2 85\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 84 && BOOST_PP_ITERATION_START_2 >= 84\n#    define BOOST_PP_ITERATION_2 84\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 83 && BOOST_PP_ITERATION_START_2 >= 83\n#    define BOOST_PP_ITERATION_2 83\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 82 && BOOST_PP_ITERATION_START_2 >= 82\n#    define BOOST_PP_ITERATION_2 82\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 81 && BOOST_PP_ITERATION_START_2 >= 81\n#    define BOOST_PP_ITERATION_2 81\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 80 && BOOST_PP_ITERATION_START_2 >= 80\n#    define BOOST_PP_ITERATION_2 80\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 79 && BOOST_PP_ITERATION_START_2 >= 79\n#    define BOOST_PP_ITERATION_2 79\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 78 && BOOST_PP_ITERATION_START_2 >= 78\n#    define BOOST_PP_ITERATION_2 78\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 77 && BOOST_PP_ITERATION_START_2 >= 77\n#    define BOOST_PP_ITERATION_2 77\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 76 && BOOST_PP_ITERATION_START_2 >= 76\n#    define BOOST_PP_ITERATION_2 76\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 75 && BOOST_PP_ITERATION_START_2 >= 75\n#    define BOOST_PP_ITERATION_2 75\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 74 && BOOST_PP_ITERATION_START_2 >= 74\n#    define BOOST_PP_ITERATION_2 74\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 73 && BOOST_PP_ITERATION_START_2 >= 73\n#    define BOOST_PP_ITERATION_2 73\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 72 && BOOST_PP_ITERATION_START_2 >= 72\n#    define BOOST_PP_ITERATION_2 72\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 71 && BOOST_PP_ITERATION_START_2 >= 71\n#    define BOOST_PP_ITERATION_2 71\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 70 && BOOST_PP_ITERATION_START_2 >= 70\n#    define BOOST_PP_ITERATION_2 70\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 69 && BOOST_PP_ITERATION_START_2 >= 69\n#    define BOOST_PP_ITERATION_2 69\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 68 && BOOST_PP_ITERATION_START_2 >= 68\n#    define BOOST_PP_ITERATION_2 68\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 67 && BOOST_PP_ITERATION_START_2 >= 67\n#    define BOOST_PP_ITERATION_2 67\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 66 && BOOST_PP_ITERATION_START_2 >= 66\n#    define BOOST_PP_ITERATION_2 66\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 65 && BOOST_PP_ITERATION_START_2 >= 65\n#    define BOOST_PP_ITERATION_2 65\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 64 && BOOST_PP_ITERATION_START_2 >= 64\n#    define BOOST_PP_ITERATION_2 64\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 63 && BOOST_PP_ITERATION_START_2 >= 63\n#    define BOOST_PP_ITERATION_2 63\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 62 && BOOST_PP_ITERATION_START_2 >= 62\n#    define BOOST_PP_ITERATION_2 62\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 61 && BOOST_PP_ITERATION_START_2 >= 61\n#    define BOOST_PP_ITERATION_2 61\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 60 && BOOST_PP_ITERATION_START_2 >= 60\n#    define BOOST_PP_ITERATION_2 60\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 59 && BOOST_PP_ITERATION_START_2 >= 59\n#    define BOOST_PP_ITERATION_2 59\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 58 && BOOST_PP_ITERATION_START_2 >= 58\n#    define BOOST_PP_ITERATION_2 58\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 57 && BOOST_PP_ITERATION_START_2 >= 57\n#    define BOOST_PP_ITERATION_2 57\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 56 && BOOST_PP_ITERATION_START_2 >= 56\n#    define BOOST_PP_ITERATION_2 56\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 55 && BOOST_PP_ITERATION_START_2 >= 55\n#    define BOOST_PP_ITERATION_2 55\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 54 && BOOST_PP_ITERATION_START_2 >= 54\n#    define BOOST_PP_ITERATION_2 54\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 53 && BOOST_PP_ITERATION_START_2 >= 53\n#    define BOOST_PP_ITERATION_2 53\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 52 && BOOST_PP_ITERATION_START_2 >= 52\n#    define BOOST_PP_ITERATION_2 52\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 51 && BOOST_PP_ITERATION_START_2 >= 51\n#    define BOOST_PP_ITERATION_2 51\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 50 && BOOST_PP_ITERATION_START_2 >= 50\n#    define BOOST_PP_ITERATION_2 50\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 49 && BOOST_PP_ITERATION_START_2 >= 49\n#    define BOOST_PP_ITERATION_2 49\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 48 && BOOST_PP_ITERATION_START_2 >= 48\n#    define BOOST_PP_ITERATION_2 48\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 47 && BOOST_PP_ITERATION_START_2 >= 47\n#    define BOOST_PP_ITERATION_2 47\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 46 && BOOST_PP_ITERATION_START_2 >= 46\n#    define BOOST_PP_ITERATION_2 46\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 45 && BOOST_PP_ITERATION_START_2 >= 45\n#    define BOOST_PP_ITERATION_2 45\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 44 && BOOST_PP_ITERATION_START_2 >= 44\n#    define BOOST_PP_ITERATION_2 44\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 43 && BOOST_PP_ITERATION_START_2 >= 43\n#    define BOOST_PP_ITERATION_2 43\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 42 && BOOST_PP_ITERATION_START_2 >= 42\n#    define BOOST_PP_ITERATION_2 42\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 41 && BOOST_PP_ITERATION_START_2 >= 41\n#    define BOOST_PP_ITERATION_2 41\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 40 && BOOST_PP_ITERATION_START_2 >= 40\n#    define BOOST_PP_ITERATION_2 40\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 39 && BOOST_PP_ITERATION_START_2 >= 39\n#    define BOOST_PP_ITERATION_2 39\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 38 && BOOST_PP_ITERATION_START_2 >= 38\n#    define BOOST_PP_ITERATION_2 38\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 37 && BOOST_PP_ITERATION_START_2 >= 37\n#    define BOOST_PP_ITERATION_2 37\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 36 && BOOST_PP_ITERATION_START_2 >= 36\n#    define BOOST_PP_ITERATION_2 36\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 35 && BOOST_PP_ITERATION_START_2 >= 35\n#    define BOOST_PP_ITERATION_2 35\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 34 && BOOST_PP_ITERATION_START_2 >= 34\n#    define BOOST_PP_ITERATION_2 34\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 33 && BOOST_PP_ITERATION_START_2 >= 33\n#    define BOOST_PP_ITERATION_2 33\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 32 && BOOST_PP_ITERATION_START_2 >= 32\n#    define BOOST_PP_ITERATION_2 32\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 31 && BOOST_PP_ITERATION_START_2 >= 31\n#    define BOOST_PP_ITERATION_2 31\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 30 && BOOST_PP_ITERATION_START_2 >= 30\n#    define BOOST_PP_ITERATION_2 30\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 29 && BOOST_PP_ITERATION_START_2 >= 29\n#    define BOOST_PP_ITERATION_2 29\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 28 && BOOST_PP_ITERATION_START_2 >= 28\n#    define BOOST_PP_ITERATION_2 28\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 27 && BOOST_PP_ITERATION_START_2 >= 27\n#    define BOOST_PP_ITERATION_2 27\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 26 && BOOST_PP_ITERATION_START_2 >= 26\n#    define BOOST_PP_ITERATION_2 26\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 25 && BOOST_PP_ITERATION_START_2 >= 25\n#    define BOOST_PP_ITERATION_2 25\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 24 && BOOST_PP_ITERATION_START_2 >= 24\n#    define BOOST_PP_ITERATION_2 24\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 23 && BOOST_PP_ITERATION_START_2 >= 23\n#    define BOOST_PP_ITERATION_2 23\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 22 && BOOST_PP_ITERATION_START_2 >= 22\n#    define BOOST_PP_ITERATION_2 22\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 21 && BOOST_PP_ITERATION_START_2 >= 21\n#    define BOOST_PP_ITERATION_2 21\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 20 && BOOST_PP_ITERATION_START_2 >= 20\n#    define BOOST_PP_ITERATION_2 20\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 19 && BOOST_PP_ITERATION_START_2 >= 19\n#    define BOOST_PP_ITERATION_2 19\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 18 && BOOST_PP_ITERATION_START_2 >= 18\n#    define BOOST_PP_ITERATION_2 18\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 17 && BOOST_PP_ITERATION_START_2 >= 17\n#    define BOOST_PP_ITERATION_2 17\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 16 && BOOST_PP_ITERATION_START_2 >= 16\n#    define BOOST_PP_ITERATION_2 16\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 15 && BOOST_PP_ITERATION_START_2 >= 15\n#    define BOOST_PP_ITERATION_2 15\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 14 && BOOST_PP_ITERATION_START_2 >= 14\n#    define BOOST_PP_ITERATION_2 14\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 13 && BOOST_PP_ITERATION_START_2 >= 13\n#    define BOOST_PP_ITERATION_2 13\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 12 && BOOST_PP_ITERATION_START_2 >= 12\n#    define BOOST_PP_ITERATION_2 12\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 11 && BOOST_PP_ITERATION_START_2 >= 11\n#    define BOOST_PP_ITERATION_2 11\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 10 && BOOST_PP_ITERATION_START_2 >= 10\n#    define BOOST_PP_ITERATION_2 10\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 9 && BOOST_PP_ITERATION_START_2 >= 9\n#    define BOOST_PP_ITERATION_2 9\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 8 && BOOST_PP_ITERATION_START_2 >= 8\n#    define BOOST_PP_ITERATION_2 8\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 7 && BOOST_PP_ITERATION_START_2 >= 7\n#    define BOOST_PP_ITERATION_2 7\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 6 && BOOST_PP_ITERATION_START_2 >= 6\n#    define BOOST_PP_ITERATION_2 6\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 5 && BOOST_PP_ITERATION_START_2 >= 5\n#    define BOOST_PP_ITERATION_2 5\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 4 && BOOST_PP_ITERATION_START_2 >= 4\n#    define BOOST_PP_ITERATION_2 4\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 3 && BOOST_PP_ITERATION_START_2 >= 3\n#    define BOOST_PP_ITERATION_2 3\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 2 && BOOST_PP_ITERATION_START_2 >= 2\n#    define BOOST_PP_ITERATION_2 2\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 1 && BOOST_PP_ITERATION_START_2 >= 1\n#    define BOOST_PP_ITERATION_2 1\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n# if BOOST_PP_ITERATION_FINISH_2 <= 0 && BOOST_PP_ITERATION_START_2 >= 0\n#    define BOOST_PP_ITERATION_2 0\n#    include BOOST_PP_FILENAME_2\n#    undef BOOST_PP_ITERATION_2\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/reverse3.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_ITERATION_FINISH_3 <= 256 && BOOST_PP_ITERATION_START_3 >= 256\n#    define BOOST_PP_ITERATION_3 256\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 255 && BOOST_PP_ITERATION_START_3 >= 255\n#    define BOOST_PP_ITERATION_3 255\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 254 && BOOST_PP_ITERATION_START_3 >= 254\n#    define BOOST_PP_ITERATION_3 254\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 253 && BOOST_PP_ITERATION_START_3 >= 253\n#    define BOOST_PP_ITERATION_3 253\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 252 && BOOST_PP_ITERATION_START_3 >= 252\n#    define BOOST_PP_ITERATION_3 252\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 251 && BOOST_PP_ITERATION_START_3 >= 251\n#    define BOOST_PP_ITERATION_3 251\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 250 && BOOST_PP_ITERATION_START_3 >= 250\n#    define BOOST_PP_ITERATION_3 250\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 249 && BOOST_PP_ITERATION_START_3 >= 249\n#    define BOOST_PP_ITERATION_3 249\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 248 && BOOST_PP_ITERATION_START_3 >= 248\n#    define BOOST_PP_ITERATION_3 248\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 247 && BOOST_PP_ITERATION_START_3 >= 247\n#    define BOOST_PP_ITERATION_3 247\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 246 && BOOST_PP_ITERATION_START_3 >= 246\n#    define BOOST_PP_ITERATION_3 246\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 245 && BOOST_PP_ITERATION_START_3 >= 245\n#    define BOOST_PP_ITERATION_3 245\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 244 && BOOST_PP_ITERATION_START_3 >= 244\n#    define BOOST_PP_ITERATION_3 244\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 243 && BOOST_PP_ITERATION_START_3 >= 243\n#    define BOOST_PP_ITERATION_3 243\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 242 && BOOST_PP_ITERATION_START_3 >= 242\n#    define BOOST_PP_ITERATION_3 242\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 241 && BOOST_PP_ITERATION_START_3 >= 241\n#    define BOOST_PP_ITERATION_3 241\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 240 && BOOST_PP_ITERATION_START_3 >= 240\n#    define BOOST_PP_ITERATION_3 240\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 239 && BOOST_PP_ITERATION_START_3 >= 239\n#    define BOOST_PP_ITERATION_3 239\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 238 && BOOST_PP_ITERATION_START_3 >= 238\n#    define BOOST_PP_ITERATION_3 238\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 237 && BOOST_PP_ITERATION_START_3 >= 237\n#    define BOOST_PP_ITERATION_3 237\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 236 && BOOST_PP_ITERATION_START_3 >= 236\n#    define BOOST_PP_ITERATION_3 236\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 235 && BOOST_PP_ITERATION_START_3 >= 235\n#    define BOOST_PP_ITERATION_3 235\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 234 && BOOST_PP_ITERATION_START_3 >= 234\n#    define BOOST_PP_ITERATION_3 234\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 233 && BOOST_PP_ITERATION_START_3 >= 233\n#    define BOOST_PP_ITERATION_3 233\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 232 && BOOST_PP_ITERATION_START_3 >= 232\n#    define BOOST_PP_ITERATION_3 232\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 231 && BOOST_PP_ITERATION_START_3 >= 231\n#    define BOOST_PP_ITERATION_3 231\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 230 && BOOST_PP_ITERATION_START_3 >= 230\n#    define BOOST_PP_ITERATION_3 230\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 229 && BOOST_PP_ITERATION_START_3 >= 229\n#    define BOOST_PP_ITERATION_3 229\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 228 && BOOST_PP_ITERATION_START_3 >= 228\n#    define BOOST_PP_ITERATION_3 228\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 227 && BOOST_PP_ITERATION_START_3 >= 227\n#    define BOOST_PP_ITERATION_3 227\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 226 && BOOST_PP_ITERATION_START_3 >= 226\n#    define BOOST_PP_ITERATION_3 226\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 225 && BOOST_PP_ITERATION_START_3 >= 225\n#    define BOOST_PP_ITERATION_3 225\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 224 && BOOST_PP_ITERATION_START_3 >= 224\n#    define BOOST_PP_ITERATION_3 224\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 223 && BOOST_PP_ITERATION_START_3 >= 223\n#    define BOOST_PP_ITERATION_3 223\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 222 && BOOST_PP_ITERATION_START_3 >= 222\n#    define BOOST_PP_ITERATION_3 222\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 221 && BOOST_PP_ITERATION_START_3 >= 221\n#    define BOOST_PP_ITERATION_3 221\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 220 && BOOST_PP_ITERATION_START_3 >= 220\n#    define BOOST_PP_ITERATION_3 220\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 219 && BOOST_PP_ITERATION_START_3 >= 219\n#    define BOOST_PP_ITERATION_3 219\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 218 && BOOST_PP_ITERATION_START_3 >= 218\n#    define BOOST_PP_ITERATION_3 218\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 217 && BOOST_PP_ITERATION_START_3 >= 217\n#    define BOOST_PP_ITERATION_3 217\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 216 && BOOST_PP_ITERATION_START_3 >= 216\n#    define BOOST_PP_ITERATION_3 216\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 215 && BOOST_PP_ITERATION_START_3 >= 215\n#    define BOOST_PP_ITERATION_3 215\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 214 && BOOST_PP_ITERATION_START_3 >= 214\n#    define BOOST_PP_ITERATION_3 214\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 213 && BOOST_PP_ITERATION_START_3 >= 213\n#    define BOOST_PP_ITERATION_3 213\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 212 && BOOST_PP_ITERATION_START_3 >= 212\n#    define BOOST_PP_ITERATION_3 212\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 211 && BOOST_PP_ITERATION_START_3 >= 211\n#    define BOOST_PP_ITERATION_3 211\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 210 && BOOST_PP_ITERATION_START_3 >= 210\n#    define BOOST_PP_ITERATION_3 210\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 209 && BOOST_PP_ITERATION_START_3 >= 209\n#    define BOOST_PP_ITERATION_3 209\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 208 && BOOST_PP_ITERATION_START_3 >= 208\n#    define BOOST_PP_ITERATION_3 208\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 207 && BOOST_PP_ITERATION_START_3 >= 207\n#    define BOOST_PP_ITERATION_3 207\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 206 && BOOST_PP_ITERATION_START_3 >= 206\n#    define BOOST_PP_ITERATION_3 206\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 205 && BOOST_PP_ITERATION_START_3 >= 205\n#    define BOOST_PP_ITERATION_3 205\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 204 && BOOST_PP_ITERATION_START_3 >= 204\n#    define BOOST_PP_ITERATION_3 204\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 203 && BOOST_PP_ITERATION_START_3 >= 203\n#    define BOOST_PP_ITERATION_3 203\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 202 && BOOST_PP_ITERATION_START_3 >= 202\n#    define BOOST_PP_ITERATION_3 202\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 201 && BOOST_PP_ITERATION_START_3 >= 201\n#    define BOOST_PP_ITERATION_3 201\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 200 && BOOST_PP_ITERATION_START_3 >= 200\n#    define BOOST_PP_ITERATION_3 200\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 199 && BOOST_PP_ITERATION_START_3 >= 199\n#    define BOOST_PP_ITERATION_3 199\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 198 && BOOST_PP_ITERATION_START_3 >= 198\n#    define BOOST_PP_ITERATION_3 198\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 197 && BOOST_PP_ITERATION_START_3 >= 197\n#    define BOOST_PP_ITERATION_3 197\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 196 && BOOST_PP_ITERATION_START_3 >= 196\n#    define BOOST_PP_ITERATION_3 196\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 195 && BOOST_PP_ITERATION_START_3 >= 195\n#    define BOOST_PP_ITERATION_3 195\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 194 && BOOST_PP_ITERATION_START_3 >= 194\n#    define BOOST_PP_ITERATION_3 194\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 193 && BOOST_PP_ITERATION_START_3 >= 193\n#    define BOOST_PP_ITERATION_3 193\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 192 && BOOST_PP_ITERATION_START_3 >= 192\n#    define BOOST_PP_ITERATION_3 192\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 191 && BOOST_PP_ITERATION_START_3 >= 191\n#    define BOOST_PP_ITERATION_3 191\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 190 && BOOST_PP_ITERATION_START_3 >= 190\n#    define BOOST_PP_ITERATION_3 190\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 189 && BOOST_PP_ITERATION_START_3 >= 189\n#    define BOOST_PP_ITERATION_3 189\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 188 && BOOST_PP_ITERATION_START_3 >= 188\n#    define BOOST_PP_ITERATION_3 188\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 187 && BOOST_PP_ITERATION_START_3 >= 187\n#    define BOOST_PP_ITERATION_3 187\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 186 && BOOST_PP_ITERATION_START_3 >= 186\n#    define BOOST_PP_ITERATION_3 186\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 185 && BOOST_PP_ITERATION_START_3 >= 185\n#    define BOOST_PP_ITERATION_3 185\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 184 && BOOST_PP_ITERATION_START_3 >= 184\n#    define BOOST_PP_ITERATION_3 184\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 183 && BOOST_PP_ITERATION_START_3 >= 183\n#    define BOOST_PP_ITERATION_3 183\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 182 && BOOST_PP_ITERATION_START_3 >= 182\n#    define BOOST_PP_ITERATION_3 182\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 181 && BOOST_PP_ITERATION_START_3 >= 181\n#    define BOOST_PP_ITERATION_3 181\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 180 && BOOST_PP_ITERATION_START_3 >= 180\n#    define BOOST_PP_ITERATION_3 180\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 179 && BOOST_PP_ITERATION_START_3 >= 179\n#    define BOOST_PP_ITERATION_3 179\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 178 && BOOST_PP_ITERATION_START_3 >= 178\n#    define BOOST_PP_ITERATION_3 178\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 177 && BOOST_PP_ITERATION_START_3 >= 177\n#    define BOOST_PP_ITERATION_3 177\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 176 && BOOST_PP_ITERATION_START_3 >= 176\n#    define BOOST_PP_ITERATION_3 176\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 175 && BOOST_PP_ITERATION_START_3 >= 175\n#    define BOOST_PP_ITERATION_3 175\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 174 && BOOST_PP_ITERATION_START_3 >= 174\n#    define BOOST_PP_ITERATION_3 174\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 173 && BOOST_PP_ITERATION_START_3 >= 173\n#    define BOOST_PP_ITERATION_3 173\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 172 && BOOST_PP_ITERATION_START_3 >= 172\n#    define BOOST_PP_ITERATION_3 172\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 171 && BOOST_PP_ITERATION_START_3 >= 171\n#    define BOOST_PP_ITERATION_3 171\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 170 && BOOST_PP_ITERATION_START_3 >= 170\n#    define BOOST_PP_ITERATION_3 170\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 169 && BOOST_PP_ITERATION_START_3 >= 169\n#    define BOOST_PP_ITERATION_3 169\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 168 && BOOST_PP_ITERATION_START_3 >= 168\n#    define BOOST_PP_ITERATION_3 168\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 167 && BOOST_PP_ITERATION_START_3 >= 167\n#    define BOOST_PP_ITERATION_3 167\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 166 && BOOST_PP_ITERATION_START_3 >= 166\n#    define BOOST_PP_ITERATION_3 166\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 165 && BOOST_PP_ITERATION_START_3 >= 165\n#    define BOOST_PP_ITERATION_3 165\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 164 && BOOST_PP_ITERATION_START_3 >= 164\n#    define BOOST_PP_ITERATION_3 164\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 163 && BOOST_PP_ITERATION_START_3 >= 163\n#    define BOOST_PP_ITERATION_3 163\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 162 && BOOST_PP_ITERATION_START_3 >= 162\n#    define BOOST_PP_ITERATION_3 162\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 161 && BOOST_PP_ITERATION_START_3 >= 161\n#    define BOOST_PP_ITERATION_3 161\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 160 && BOOST_PP_ITERATION_START_3 >= 160\n#    define BOOST_PP_ITERATION_3 160\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 159 && BOOST_PP_ITERATION_START_3 >= 159\n#    define BOOST_PP_ITERATION_3 159\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 158 && BOOST_PP_ITERATION_START_3 >= 158\n#    define BOOST_PP_ITERATION_3 158\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 157 && BOOST_PP_ITERATION_START_3 >= 157\n#    define BOOST_PP_ITERATION_3 157\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 156 && BOOST_PP_ITERATION_START_3 >= 156\n#    define BOOST_PP_ITERATION_3 156\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 155 && BOOST_PP_ITERATION_START_3 >= 155\n#    define BOOST_PP_ITERATION_3 155\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 154 && BOOST_PP_ITERATION_START_3 >= 154\n#    define BOOST_PP_ITERATION_3 154\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 153 && BOOST_PP_ITERATION_START_3 >= 153\n#    define BOOST_PP_ITERATION_3 153\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 152 && BOOST_PP_ITERATION_START_3 >= 152\n#    define BOOST_PP_ITERATION_3 152\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 151 && BOOST_PP_ITERATION_START_3 >= 151\n#    define BOOST_PP_ITERATION_3 151\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 150 && BOOST_PP_ITERATION_START_3 >= 150\n#    define BOOST_PP_ITERATION_3 150\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 149 && BOOST_PP_ITERATION_START_3 >= 149\n#    define BOOST_PP_ITERATION_3 149\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 148 && BOOST_PP_ITERATION_START_3 >= 148\n#    define BOOST_PP_ITERATION_3 148\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 147 && BOOST_PP_ITERATION_START_3 >= 147\n#    define BOOST_PP_ITERATION_3 147\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 146 && BOOST_PP_ITERATION_START_3 >= 146\n#    define BOOST_PP_ITERATION_3 146\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 145 && BOOST_PP_ITERATION_START_3 >= 145\n#    define BOOST_PP_ITERATION_3 145\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 144 && BOOST_PP_ITERATION_START_3 >= 144\n#    define BOOST_PP_ITERATION_3 144\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 143 && BOOST_PP_ITERATION_START_3 >= 143\n#    define BOOST_PP_ITERATION_3 143\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 142 && BOOST_PP_ITERATION_START_3 >= 142\n#    define BOOST_PP_ITERATION_3 142\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 141 && BOOST_PP_ITERATION_START_3 >= 141\n#    define BOOST_PP_ITERATION_3 141\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 140 && BOOST_PP_ITERATION_START_3 >= 140\n#    define BOOST_PP_ITERATION_3 140\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 139 && BOOST_PP_ITERATION_START_3 >= 139\n#    define BOOST_PP_ITERATION_3 139\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 138 && BOOST_PP_ITERATION_START_3 >= 138\n#    define BOOST_PP_ITERATION_3 138\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 137 && BOOST_PP_ITERATION_START_3 >= 137\n#    define BOOST_PP_ITERATION_3 137\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 136 && BOOST_PP_ITERATION_START_3 >= 136\n#    define BOOST_PP_ITERATION_3 136\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 135 && BOOST_PP_ITERATION_START_3 >= 135\n#    define BOOST_PP_ITERATION_3 135\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 134 && BOOST_PP_ITERATION_START_3 >= 134\n#    define BOOST_PP_ITERATION_3 134\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 133 && BOOST_PP_ITERATION_START_3 >= 133\n#    define BOOST_PP_ITERATION_3 133\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 132 && BOOST_PP_ITERATION_START_3 >= 132\n#    define BOOST_PP_ITERATION_3 132\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 131 && BOOST_PP_ITERATION_START_3 >= 131\n#    define BOOST_PP_ITERATION_3 131\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 130 && BOOST_PP_ITERATION_START_3 >= 130\n#    define BOOST_PP_ITERATION_3 130\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 129 && BOOST_PP_ITERATION_START_3 >= 129\n#    define BOOST_PP_ITERATION_3 129\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 128 && BOOST_PP_ITERATION_START_3 >= 128\n#    define BOOST_PP_ITERATION_3 128\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 127 && BOOST_PP_ITERATION_START_3 >= 127\n#    define BOOST_PP_ITERATION_3 127\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 126 && BOOST_PP_ITERATION_START_3 >= 126\n#    define BOOST_PP_ITERATION_3 126\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 125 && BOOST_PP_ITERATION_START_3 >= 125\n#    define BOOST_PP_ITERATION_3 125\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 124 && BOOST_PP_ITERATION_START_3 >= 124\n#    define BOOST_PP_ITERATION_3 124\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 123 && BOOST_PP_ITERATION_START_3 >= 123\n#    define BOOST_PP_ITERATION_3 123\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 122 && BOOST_PP_ITERATION_START_3 >= 122\n#    define BOOST_PP_ITERATION_3 122\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 121 && BOOST_PP_ITERATION_START_3 >= 121\n#    define BOOST_PP_ITERATION_3 121\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 120 && BOOST_PP_ITERATION_START_3 >= 120\n#    define BOOST_PP_ITERATION_3 120\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 119 && BOOST_PP_ITERATION_START_3 >= 119\n#    define BOOST_PP_ITERATION_3 119\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 118 && BOOST_PP_ITERATION_START_3 >= 118\n#    define BOOST_PP_ITERATION_3 118\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 117 && BOOST_PP_ITERATION_START_3 >= 117\n#    define BOOST_PP_ITERATION_3 117\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 116 && BOOST_PP_ITERATION_START_3 >= 116\n#    define BOOST_PP_ITERATION_3 116\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 115 && BOOST_PP_ITERATION_START_3 >= 115\n#    define BOOST_PP_ITERATION_3 115\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 114 && BOOST_PP_ITERATION_START_3 >= 114\n#    define BOOST_PP_ITERATION_3 114\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 113 && BOOST_PP_ITERATION_START_3 >= 113\n#    define BOOST_PP_ITERATION_3 113\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 112 && BOOST_PP_ITERATION_START_3 >= 112\n#    define BOOST_PP_ITERATION_3 112\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 111 && BOOST_PP_ITERATION_START_3 >= 111\n#    define BOOST_PP_ITERATION_3 111\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 110 && BOOST_PP_ITERATION_START_3 >= 110\n#    define BOOST_PP_ITERATION_3 110\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 109 && BOOST_PP_ITERATION_START_3 >= 109\n#    define BOOST_PP_ITERATION_3 109\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 108 && BOOST_PP_ITERATION_START_3 >= 108\n#    define BOOST_PP_ITERATION_3 108\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 107 && BOOST_PP_ITERATION_START_3 >= 107\n#    define BOOST_PP_ITERATION_3 107\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 106 && BOOST_PP_ITERATION_START_3 >= 106\n#    define BOOST_PP_ITERATION_3 106\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 105 && BOOST_PP_ITERATION_START_3 >= 105\n#    define BOOST_PP_ITERATION_3 105\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 104 && BOOST_PP_ITERATION_START_3 >= 104\n#    define BOOST_PP_ITERATION_3 104\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 103 && BOOST_PP_ITERATION_START_3 >= 103\n#    define BOOST_PP_ITERATION_3 103\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 102 && BOOST_PP_ITERATION_START_3 >= 102\n#    define BOOST_PP_ITERATION_3 102\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 101 && BOOST_PP_ITERATION_START_3 >= 101\n#    define BOOST_PP_ITERATION_3 101\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 100 && BOOST_PP_ITERATION_START_3 >= 100\n#    define BOOST_PP_ITERATION_3 100\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 99 && BOOST_PP_ITERATION_START_3 >= 99\n#    define BOOST_PP_ITERATION_3 99\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 98 && BOOST_PP_ITERATION_START_3 >= 98\n#    define BOOST_PP_ITERATION_3 98\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 97 && BOOST_PP_ITERATION_START_3 >= 97\n#    define BOOST_PP_ITERATION_3 97\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 96 && BOOST_PP_ITERATION_START_3 >= 96\n#    define BOOST_PP_ITERATION_3 96\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 95 && BOOST_PP_ITERATION_START_3 >= 95\n#    define BOOST_PP_ITERATION_3 95\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 94 && BOOST_PP_ITERATION_START_3 >= 94\n#    define BOOST_PP_ITERATION_3 94\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 93 && BOOST_PP_ITERATION_START_3 >= 93\n#    define BOOST_PP_ITERATION_3 93\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 92 && BOOST_PP_ITERATION_START_3 >= 92\n#    define BOOST_PP_ITERATION_3 92\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 91 && BOOST_PP_ITERATION_START_3 >= 91\n#    define BOOST_PP_ITERATION_3 91\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 90 && BOOST_PP_ITERATION_START_3 >= 90\n#    define BOOST_PP_ITERATION_3 90\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 89 && BOOST_PP_ITERATION_START_3 >= 89\n#    define BOOST_PP_ITERATION_3 89\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 88 && BOOST_PP_ITERATION_START_3 >= 88\n#    define BOOST_PP_ITERATION_3 88\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 87 && BOOST_PP_ITERATION_START_3 >= 87\n#    define BOOST_PP_ITERATION_3 87\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 86 && BOOST_PP_ITERATION_START_3 >= 86\n#    define BOOST_PP_ITERATION_3 86\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 85 && BOOST_PP_ITERATION_START_3 >= 85\n#    define BOOST_PP_ITERATION_3 85\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 84 && BOOST_PP_ITERATION_START_3 >= 84\n#    define BOOST_PP_ITERATION_3 84\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 83 && BOOST_PP_ITERATION_START_3 >= 83\n#    define BOOST_PP_ITERATION_3 83\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 82 && BOOST_PP_ITERATION_START_3 >= 82\n#    define BOOST_PP_ITERATION_3 82\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 81 && BOOST_PP_ITERATION_START_3 >= 81\n#    define BOOST_PP_ITERATION_3 81\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 80 && BOOST_PP_ITERATION_START_3 >= 80\n#    define BOOST_PP_ITERATION_3 80\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 79 && BOOST_PP_ITERATION_START_3 >= 79\n#    define BOOST_PP_ITERATION_3 79\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 78 && BOOST_PP_ITERATION_START_3 >= 78\n#    define BOOST_PP_ITERATION_3 78\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 77 && BOOST_PP_ITERATION_START_3 >= 77\n#    define BOOST_PP_ITERATION_3 77\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 76 && BOOST_PP_ITERATION_START_3 >= 76\n#    define BOOST_PP_ITERATION_3 76\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 75 && BOOST_PP_ITERATION_START_3 >= 75\n#    define BOOST_PP_ITERATION_3 75\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 74 && BOOST_PP_ITERATION_START_3 >= 74\n#    define BOOST_PP_ITERATION_3 74\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 73 && BOOST_PP_ITERATION_START_3 >= 73\n#    define BOOST_PP_ITERATION_3 73\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 72 && BOOST_PP_ITERATION_START_3 >= 72\n#    define BOOST_PP_ITERATION_3 72\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 71 && BOOST_PP_ITERATION_START_3 >= 71\n#    define BOOST_PP_ITERATION_3 71\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 70 && BOOST_PP_ITERATION_START_3 >= 70\n#    define BOOST_PP_ITERATION_3 70\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 69 && BOOST_PP_ITERATION_START_3 >= 69\n#    define BOOST_PP_ITERATION_3 69\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 68 && BOOST_PP_ITERATION_START_3 >= 68\n#    define BOOST_PP_ITERATION_3 68\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 67 && BOOST_PP_ITERATION_START_3 >= 67\n#    define BOOST_PP_ITERATION_3 67\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 66 && BOOST_PP_ITERATION_START_3 >= 66\n#    define BOOST_PP_ITERATION_3 66\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 65 && BOOST_PP_ITERATION_START_3 >= 65\n#    define BOOST_PP_ITERATION_3 65\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 64 && BOOST_PP_ITERATION_START_3 >= 64\n#    define BOOST_PP_ITERATION_3 64\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 63 && BOOST_PP_ITERATION_START_3 >= 63\n#    define BOOST_PP_ITERATION_3 63\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 62 && BOOST_PP_ITERATION_START_3 >= 62\n#    define BOOST_PP_ITERATION_3 62\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 61 && BOOST_PP_ITERATION_START_3 >= 61\n#    define BOOST_PP_ITERATION_3 61\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 60 && BOOST_PP_ITERATION_START_3 >= 60\n#    define BOOST_PP_ITERATION_3 60\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 59 && BOOST_PP_ITERATION_START_3 >= 59\n#    define BOOST_PP_ITERATION_3 59\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 58 && BOOST_PP_ITERATION_START_3 >= 58\n#    define BOOST_PP_ITERATION_3 58\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 57 && BOOST_PP_ITERATION_START_3 >= 57\n#    define BOOST_PP_ITERATION_3 57\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 56 && BOOST_PP_ITERATION_START_3 >= 56\n#    define BOOST_PP_ITERATION_3 56\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 55 && BOOST_PP_ITERATION_START_3 >= 55\n#    define BOOST_PP_ITERATION_3 55\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 54 && BOOST_PP_ITERATION_START_3 >= 54\n#    define BOOST_PP_ITERATION_3 54\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 53 && BOOST_PP_ITERATION_START_3 >= 53\n#    define BOOST_PP_ITERATION_3 53\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 52 && BOOST_PP_ITERATION_START_3 >= 52\n#    define BOOST_PP_ITERATION_3 52\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 51 && BOOST_PP_ITERATION_START_3 >= 51\n#    define BOOST_PP_ITERATION_3 51\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 50 && BOOST_PP_ITERATION_START_3 >= 50\n#    define BOOST_PP_ITERATION_3 50\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 49 && BOOST_PP_ITERATION_START_3 >= 49\n#    define BOOST_PP_ITERATION_3 49\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 48 && BOOST_PP_ITERATION_START_3 >= 48\n#    define BOOST_PP_ITERATION_3 48\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 47 && BOOST_PP_ITERATION_START_3 >= 47\n#    define BOOST_PP_ITERATION_3 47\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 46 && BOOST_PP_ITERATION_START_3 >= 46\n#    define BOOST_PP_ITERATION_3 46\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 45 && BOOST_PP_ITERATION_START_3 >= 45\n#    define BOOST_PP_ITERATION_3 45\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 44 && BOOST_PP_ITERATION_START_3 >= 44\n#    define BOOST_PP_ITERATION_3 44\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 43 && BOOST_PP_ITERATION_START_3 >= 43\n#    define BOOST_PP_ITERATION_3 43\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 42 && BOOST_PP_ITERATION_START_3 >= 42\n#    define BOOST_PP_ITERATION_3 42\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 41 && BOOST_PP_ITERATION_START_3 >= 41\n#    define BOOST_PP_ITERATION_3 41\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 40 && BOOST_PP_ITERATION_START_3 >= 40\n#    define BOOST_PP_ITERATION_3 40\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 39 && BOOST_PP_ITERATION_START_3 >= 39\n#    define BOOST_PP_ITERATION_3 39\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 38 && BOOST_PP_ITERATION_START_3 >= 38\n#    define BOOST_PP_ITERATION_3 38\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 37 && BOOST_PP_ITERATION_START_3 >= 37\n#    define BOOST_PP_ITERATION_3 37\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 36 && BOOST_PP_ITERATION_START_3 >= 36\n#    define BOOST_PP_ITERATION_3 36\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 35 && BOOST_PP_ITERATION_START_3 >= 35\n#    define BOOST_PP_ITERATION_3 35\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 34 && BOOST_PP_ITERATION_START_3 >= 34\n#    define BOOST_PP_ITERATION_3 34\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 33 && BOOST_PP_ITERATION_START_3 >= 33\n#    define BOOST_PP_ITERATION_3 33\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 32 && BOOST_PP_ITERATION_START_3 >= 32\n#    define BOOST_PP_ITERATION_3 32\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 31 && BOOST_PP_ITERATION_START_3 >= 31\n#    define BOOST_PP_ITERATION_3 31\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 30 && BOOST_PP_ITERATION_START_3 >= 30\n#    define BOOST_PP_ITERATION_3 30\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 29 && BOOST_PP_ITERATION_START_3 >= 29\n#    define BOOST_PP_ITERATION_3 29\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 28 && BOOST_PP_ITERATION_START_3 >= 28\n#    define BOOST_PP_ITERATION_3 28\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 27 && BOOST_PP_ITERATION_START_3 >= 27\n#    define BOOST_PP_ITERATION_3 27\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 26 && BOOST_PP_ITERATION_START_3 >= 26\n#    define BOOST_PP_ITERATION_3 26\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 25 && BOOST_PP_ITERATION_START_3 >= 25\n#    define BOOST_PP_ITERATION_3 25\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 24 && BOOST_PP_ITERATION_START_3 >= 24\n#    define BOOST_PP_ITERATION_3 24\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 23 && BOOST_PP_ITERATION_START_3 >= 23\n#    define BOOST_PP_ITERATION_3 23\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 22 && BOOST_PP_ITERATION_START_3 >= 22\n#    define BOOST_PP_ITERATION_3 22\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 21 && BOOST_PP_ITERATION_START_3 >= 21\n#    define BOOST_PP_ITERATION_3 21\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 20 && BOOST_PP_ITERATION_START_3 >= 20\n#    define BOOST_PP_ITERATION_3 20\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 19 && BOOST_PP_ITERATION_START_3 >= 19\n#    define BOOST_PP_ITERATION_3 19\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 18 && BOOST_PP_ITERATION_START_3 >= 18\n#    define BOOST_PP_ITERATION_3 18\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 17 && BOOST_PP_ITERATION_START_3 >= 17\n#    define BOOST_PP_ITERATION_3 17\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 16 && BOOST_PP_ITERATION_START_3 >= 16\n#    define BOOST_PP_ITERATION_3 16\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 15 && BOOST_PP_ITERATION_START_3 >= 15\n#    define BOOST_PP_ITERATION_3 15\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 14 && BOOST_PP_ITERATION_START_3 >= 14\n#    define BOOST_PP_ITERATION_3 14\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 13 && BOOST_PP_ITERATION_START_3 >= 13\n#    define BOOST_PP_ITERATION_3 13\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 12 && BOOST_PP_ITERATION_START_3 >= 12\n#    define BOOST_PP_ITERATION_3 12\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 11 && BOOST_PP_ITERATION_START_3 >= 11\n#    define BOOST_PP_ITERATION_3 11\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 10 && BOOST_PP_ITERATION_START_3 >= 10\n#    define BOOST_PP_ITERATION_3 10\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 9 && BOOST_PP_ITERATION_START_3 >= 9\n#    define BOOST_PP_ITERATION_3 9\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 8 && BOOST_PP_ITERATION_START_3 >= 8\n#    define BOOST_PP_ITERATION_3 8\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 7 && BOOST_PP_ITERATION_START_3 >= 7\n#    define BOOST_PP_ITERATION_3 7\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 6 && BOOST_PP_ITERATION_START_3 >= 6\n#    define BOOST_PP_ITERATION_3 6\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 5 && BOOST_PP_ITERATION_START_3 >= 5\n#    define BOOST_PP_ITERATION_3 5\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 4 && BOOST_PP_ITERATION_START_3 >= 4\n#    define BOOST_PP_ITERATION_3 4\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 3 && BOOST_PP_ITERATION_START_3 >= 3\n#    define BOOST_PP_ITERATION_3 3\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 2 && BOOST_PP_ITERATION_START_3 >= 2\n#    define BOOST_PP_ITERATION_3 2\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 1 && BOOST_PP_ITERATION_START_3 >= 1\n#    define BOOST_PP_ITERATION_3 1\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n# if BOOST_PP_ITERATION_FINISH_3 <= 0 && BOOST_PP_ITERATION_START_3 >= 0\n#    define BOOST_PP_ITERATION_3 0\n#    include BOOST_PP_FILENAME_3\n#    undef BOOST_PP_ITERATION_3\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/reverse4.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_ITERATION_FINISH_4 <= 256 && BOOST_PP_ITERATION_START_4 >= 256\n#    define BOOST_PP_ITERATION_4 256\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 255 && BOOST_PP_ITERATION_START_4 >= 255\n#    define BOOST_PP_ITERATION_4 255\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 254 && BOOST_PP_ITERATION_START_4 >= 254\n#    define BOOST_PP_ITERATION_4 254\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 253 && BOOST_PP_ITERATION_START_4 >= 253\n#    define BOOST_PP_ITERATION_4 253\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 252 && BOOST_PP_ITERATION_START_4 >= 252\n#    define BOOST_PP_ITERATION_4 252\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 251 && BOOST_PP_ITERATION_START_4 >= 251\n#    define BOOST_PP_ITERATION_4 251\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 250 && BOOST_PP_ITERATION_START_4 >= 250\n#    define BOOST_PP_ITERATION_4 250\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 249 && BOOST_PP_ITERATION_START_4 >= 249\n#    define BOOST_PP_ITERATION_4 249\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 248 && BOOST_PP_ITERATION_START_4 >= 248\n#    define BOOST_PP_ITERATION_4 248\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 247 && BOOST_PP_ITERATION_START_4 >= 247\n#    define BOOST_PP_ITERATION_4 247\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 246 && BOOST_PP_ITERATION_START_4 >= 246\n#    define BOOST_PP_ITERATION_4 246\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 245 && BOOST_PP_ITERATION_START_4 >= 245\n#    define BOOST_PP_ITERATION_4 245\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 244 && BOOST_PP_ITERATION_START_4 >= 244\n#    define BOOST_PP_ITERATION_4 244\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 243 && BOOST_PP_ITERATION_START_4 >= 243\n#    define BOOST_PP_ITERATION_4 243\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 242 && BOOST_PP_ITERATION_START_4 >= 242\n#    define BOOST_PP_ITERATION_4 242\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 241 && BOOST_PP_ITERATION_START_4 >= 241\n#    define BOOST_PP_ITERATION_4 241\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 240 && BOOST_PP_ITERATION_START_4 >= 240\n#    define BOOST_PP_ITERATION_4 240\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 239 && BOOST_PP_ITERATION_START_4 >= 239\n#    define BOOST_PP_ITERATION_4 239\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 238 && BOOST_PP_ITERATION_START_4 >= 238\n#    define BOOST_PP_ITERATION_4 238\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 237 && BOOST_PP_ITERATION_START_4 >= 237\n#    define BOOST_PP_ITERATION_4 237\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 236 && BOOST_PP_ITERATION_START_4 >= 236\n#    define BOOST_PP_ITERATION_4 236\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 235 && BOOST_PP_ITERATION_START_4 >= 235\n#    define BOOST_PP_ITERATION_4 235\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 234 && BOOST_PP_ITERATION_START_4 >= 234\n#    define BOOST_PP_ITERATION_4 234\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 233 && BOOST_PP_ITERATION_START_4 >= 233\n#    define BOOST_PP_ITERATION_4 233\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 232 && BOOST_PP_ITERATION_START_4 >= 232\n#    define BOOST_PP_ITERATION_4 232\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 231 && BOOST_PP_ITERATION_START_4 >= 231\n#    define BOOST_PP_ITERATION_4 231\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 230 && BOOST_PP_ITERATION_START_4 >= 230\n#    define BOOST_PP_ITERATION_4 230\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 229 && BOOST_PP_ITERATION_START_4 >= 229\n#    define BOOST_PP_ITERATION_4 229\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 228 && BOOST_PP_ITERATION_START_4 >= 228\n#    define BOOST_PP_ITERATION_4 228\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 227 && BOOST_PP_ITERATION_START_4 >= 227\n#    define BOOST_PP_ITERATION_4 227\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 226 && BOOST_PP_ITERATION_START_4 >= 226\n#    define BOOST_PP_ITERATION_4 226\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 225 && BOOST_PP_ITERATION_START_4 >= 225\n#    define BOOST_PP_ITERATION_4 225\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 224 && BOOST_PP_ITERATION_START_4 >= 224\n#    define BOOST_PP_ITERATION_4 224\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 223 && BOOST_PP_ITERATION_START_4 >= 223\n#    define BOOST_PP_ITERATION_4 223\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 222 && BOOST_PP_ITERATION_START_4 >= 222\n#    define BOOST_PP_ITERATION_4 222\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 221 && BOOST_PP_ITERATION_START_4 >= 221\n#    define BOOST_PP_ITERATION_4 221\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 220 && BOOST_PP_ITERATION_START_4 >= 220\n#    define BOOST_PP_ITERATION_4 220\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 219 && BOOST_PP_ITERATION_START_4 >= 219\n#    define BOOST_PP_ITERATION_4 219\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 218 && BOOST_PP_ITERATION_START_4 >= 218\n#    define BOOST_PP_ITERATION_4 218\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 217 && BOOST_PP_ITERATION_START_4 >= 217\n#    define BOOST_PP_ITERATION_4 217\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 216 && BOOST_PP_ITERATION_START_4 >= 216\n#    define BOOST_PP_ITERATION_4 216\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 215 && BOOST_PP_ITERATION_START_4 >= 215\n#    define BOOST_PP_ITERATION_4 215\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 214 && BOOST_PP_ITERATION_START_4 >= 214\n#    define BOOST_PP_ITERATION_4 214\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 213 && BOOST_PP_ITERATION_START_4 >= 213\n#    define BOOST_PP_ITERATION_4 213\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 212 && BOOST_PP_ITERATION_START_4 >= 212\n#    define BOOST_PP_ITERATION_4 212\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 211 && BOOST_PP_ITERATION_START_4 >= 211\n#    define BOOST_PP_ITERATION_4 211\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 210 && BOOST_PP_ITERATION_START_4 >= 210\n#    define BOOST_PP_ITERATION_4 210\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 209 && BOOST_PP_ITERATION_START_4 >= 209\n#    define BOOST_PP_ITERATION_4 209\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 208 && BOOST_PP_ITERATION_START_4 >= 208\n#    define BOOST_PP_ITERATION_4 208\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 207 && BOOST_PP_ITERATION_START_4 >= 207\n#    define BOOST_PP_ITERATION_4 207\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 206 && BOOST_PP_ITERATION_START_4 >= 206\n#    define BOOST_PP_ITERATION_4 206\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 205 && BOOST_PP_ITERATION_START_4 >= 205\n#    define BOOST_PP_ITERATION_4 205\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 204 && BOOST_PP_ITERATION_START_4 >= 204\n#    define BOOST_PP_ITERATION_4 204\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 203 && BOOST_PP_ITERATION_START_4 >= 203\n#    define BOOST_PP_ITERATION_4 203\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 202 && BOOST_PP_ITERATION_START_4 >= 202\n#    define BOOST_PP_ITERATION_4 202\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 201 && BOOST_PP_ITERATION_START_4 >= 201\n#    define BOOST_PP_ITERATION_4 201\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 200 && BOOST_PP_ITERATION_START_4 >= 200\n#    define BOOST_PP_ITERATION_4 200\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 199 && BOOST_PP_ITERATION_START_4 >= 199\n#    define BOOST_PP_ITERATION_4 199\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 198 && BOOST_PP_ITERATION_START_4 >= 198\n#    define BOOST_PP_ITERATION_4 198\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 197 && BOOST_PP_ITERATION_START_4 >= 197\n#    define BOOST_PP_ITERATION_4 197\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 196 && BOOST_PP_ITERATION_START_4 >= 196\n#    define BOOST_PP_ITERATION_4 196\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 195 && BOOST_PP_ITERATION_START_4 >= 195\n#    define BOOST_PP_ITERATION_4 195\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 194 && BOOST_PP_ITERATION_START_4 >= 194\n#    define BOOST_PP_ITERATION_4 194\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 193 && BOOST_PP_ITERATION_START_4 >= 193\n#    define BOOST_PP_ITERATION_4 193\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 192 && BOOST_PP_ITERATION_START_4 >= 192\n#    define BOOST_PP_ITERATION_4 192\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 191 && BOOST_PP_ITERATION_START_4 >= 191\n#    define BOOST_PP_ITERATION_4 191\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 190 && BOOST_PP_ITERATION_START_4 >= 190\n#    define BOOST_PP_ITERATION_4 190\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 189 && BOOST_PP_ITERATION_START_4 >= 189\n#    define BOOST_PP_ITERATION_4 189\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 188 && BOOST_PP_ITERATION_START_4 >= 188\n#    define BOOST_PP_ITERATION_4 188\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 187 && BOOST_PP_ITERATION_START_4 >= 187\n#    define BOOST_PP_ITERATION_4 187\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 186 && BOOST_PP_ITERATION_START_4 >= 186\n#    define BOOST_PP_ITERATION_4 186\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 185 && BOOST_PP_ITERATION_START_4 >= 185\n#    define BOOST_PP_ITERATION_4 185\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 184 && BOOST_PP_ITERATION_START_4 >= 184\n#    define BOOST_PP_ITERATION_4 184\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 183 && BOOST_PP_ITERATION_START_4 >= 183\n#    define BOOST_PP_ITERATION_4 183\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 182 && BOOST_PP_ITERATION_START_4 >= 182\n#    define BOOST_PP_ITERATION_4 182\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 181 && BOOST_PP_ITERATION_START_4 >= 181\n#    define BOOST_PP_ITERATION_4 181\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 180 && BOOST_PP_ITERATION_START_4 >= 180\n#    define BOOST_PP_ITERATION_4 180\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 179 && BOOST_PP_ITERATION_START_4 >= 179\n#    define BOOST_PP_ITERATION_4 179\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 178 && BOOST_PP_ITERATION_START_4 >= 178\n#    define BOOST_PP_ITERATION_4 178\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 177 && BOOST_PP_ITERATION_START_4 >= 177\n#    define BOOST_PP_ITERATION_4 177\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 176 && BOOST_PP_ITERATION_START_4 >= 176\n#    define BOOST_PP_ITERATION_4 176\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 175 && BOOST_PP_ITERATION_START_4 >= 175\n#    define BOOST_PP_ITERATION_4 175\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 174 && BOOST_PP_ITERATION_START_4 >= 174\n#    define BOOST_PP_ITERATION_4 174\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 173 && BOOST_PP_ITERATION_START_4 >= 173\n#    define BOOST_PP_ITERATION_4 173\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 172 && BOOST_PP_ITERATION_START_4 >= 172\n#    define BOOST_PP_ITERATION_4 172\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 171 && BOOST_PP_ITERATION_START_4 >= 171\n#    define BOOST_PP_ITERATION_4 171\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 170 && BOOST_PP_ITERATION_START_4 >= 170\n#    define BOOST_PP_ITERATION_4 170\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 169 && BOOST_PP_ITERATION_START_4 >= 169\n#    define BOOST_PP_ITERATION_4 169\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 168 && BOOST_PP_ITERATION_START_4 >= 168\n#    define BOOST_PP_ITERATION_4 168\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 167 && BOOST_PP_ITERATION_START_4 >= 167\n#    define BOOST_PP_ITERATION_4 167\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 166 && BOOST_PP_ITERATION_START_4 >= 166\n#    define BOOST_PP_ITERATION_4 166\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 165 && BOOST_PP_ITERATION_START_4 >= 165\n#    define BOOST_PP_ITERATION_4 165\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 164 && BOOST_PP_ITERATION_START_4 >= 164\n#    define BOOST_PP_ITERATION_4 164\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 163 && BOOST_PP_ITERATION_START_4 >= 163\n#    define BOOST_PP_ITERATION_4 163\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 162 && BOOST_PP_ITERATION_START_4 >= 162\n#    define BOOST_PP_ITERATION_4 162\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 161 && BOOST_PP_ITERATION_START_4 >= 161\n#    define BOOST_PP_ITERATION_4 161\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 160 && BOOST_PP_ITERATION_START_4 >= 160\n#    define BOOST_PP_ITERATION_4 160\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 159 && BOOST_PP_ITERATION_START_4 >= 159\n#    define BOOST_PP_ITERATION_4 159\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 158 && BOOST_PP_ITERATION_START_4 >= 158\n#    define BOOST_PP_ITERATION_4 158\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 157 && BOOST_PP_ITERATION_START_4 >= 157\n#    define BOOST_PP_ITERATION_4 157\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 156 && BOOST_PP_ITERATION_START_4 >= 156\n#    define BOOST_PP_ITERATION_4 156\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 155 && BOOST_PP_ITERATION_START_4 >= 155\n#    define BOOST_PP_ITERATION_4 155\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 154 && BOOST_PP_ITERATION_START_4 >= 154\n#    define BOOST_PP_ITERATION_4 154\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 153 && BOOST_PP_ITERATION_START_4 >= 153\n#    define BOOST_PP_ITERATION_4 153\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 152 && BOOST_PP_ITERATION_START_4 >= 152\n#    define BOOST_PP_ITERATION_4 152\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 151 && BOOST_PP_ITERATION_START_4 >= 151\n#    define BOOST_PP_ITERATION_4 151\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 150 && BOOST_PP_ITERATION_START_4 >= 150\n#    define BOOST_PP_ITERATION_4 150\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 149 && BOOST_PP_ITERATION_START_4 >= 149\n#    define BOOST_PP_ITERATION_4 149\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 148 && BOOST_PP_ITERATION_START_4 >= 148\n#    define BOOST_PP_ITERATION_4 148\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 147 && BOOST_PP_ITERATION_START_4 >= 147\n#    define BOOST_PP_ITERATION_4 147\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 146 && BOOST_PP_ITERATION_START_4 >= 146\n#    define BOOST_PP_ITERATION_4 146\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 145 && BOOST_PP_ITERATION_START_4 >= 145\n#    define BOOST_PP_ITERATION_4 145\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 144 && BOOST_PP_ITERATION_START_4 >= 144\n#    define BOOST_PP_ITERATION_4 144\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 143 && BOOST_PP_ITERATION_START_4 >= 143\n#    define BOOST_PP_ITERATION_4 143\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 142 && BOOST_PP_ITERATION_START_4 >= 142\n#    define BOOST_PP_ITERATION_4 142\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 141 && BOOST_PP_ITERATION_START_4 >= 141\n#    define BOOST_PP_ITERATION_4 141\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 140 && BOOST_PP_ITERATION_START_4 >= 140\n#    define BOOST_PP_ITERATION_4 140\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 139 && BOOST_PP_ITERATION_START_4 >= 139\n#    define BOOST_PP_ITERATION_4 139\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 138 && BOOST_PP_ITERATION_START_4 >= 138\n#    define BOOST_PP_ITERATION_4 138\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 137 && BOOST_PP_ITERATION_START_4 >= 137\n#    define BOOST_PP_ITERATION_4 137\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 136 && BOOST_PP_ITERATION_START_4 >= 136\n#    define BOOST_PP_ITERATION_4 136\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 135 && BOOST_PP_ITERATION_START_4 >= 135\n#    define BOOST_PP_ITERATION_4 135\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 134 && BOOST_PP_ITERATION_START_4 >= 134\n#    define BOOST_PP_ITERATION_4 134\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 133 && BOOST_PP_ITERATION_START_4 >= 133\n#    define BOOST_PP_ITERATION_4 133\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 132 && BOOST_PP_ITERATION_START_4 >= 132\n#    define BOOST_PP_ITERATION_4 132\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 131 && BOOST_PP_ITERATION_START_4 >= 131\n#    define BOOST_PP_ITERATION_4 131\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 130 && BOOST_PP_ITERATION_START_4 >= 130\n#    define BOOST_PP_ITERATION_4 130\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 129 && BOOST_PP_ITERATION_START_4 >= 129\n#    define BOOST_PP_ITERATION_4 129\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 128 && BOOST_PP_ITERATION_START_4 >= 128\n#    define BOOST_PP_ITERATION_4 128\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 127 && BOOST_PP_ITERATION_START_4 >= 127\n#    define BOOST_PP_ITERATION_4 127\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 126 && BOOST_PP_ITERATION_START_4 >= 126\n#    define BOOST_PP_ITERATION_4 126\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 125 && BOOST_PP_ITERATION_START_4 >= 125\n#    define BOOST_PP_ITERATION_4 125\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 124 && BOOST_PP_ITERATION_START_4 >= 124\n#    define BOOST_PP_ITERATION_4 124\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 123 && BOOST_PP_ITERATION_START_4 >= 123\n#    define BOOST_PP_ITERATION_4 123\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 122 && BOOST_PP_ITERATION_START_4 >= 122\n#    define BOOST_PP_ITERATION_4 122\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 121 && BOOST_PP_ITERATION_START_4 >= 121\n#    define BOOST_PP_ITERATION_4 121\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 120 && BOOST_PP_ITERATION_START_4 >= 120\n#    define BOOST_PP_ITERATION_4 120\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 119 && BOOST_PP_ITERATION_START_4 >= 119\n#    define BOOST_PP_ITERATION_4 119\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 118 && BOOST_PP_ITERATION_START_4 >= 118\n#    define BOOST_PP_ITERATION_4 118\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 117 && BOOST_PP_ITERATION_START_4 >= 117\n#    define BOOST_PP_ITERATION_4 117\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 116 && BOOST_PP_ITERATION_START_4 >= 116\n#    define BOOST_PP_ITERATION_4 116\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 115 && BOOST_PP_ITERATION_START_4 >= 115\n#    define BOOST_PP_ITERATION_4 115\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 114 && BOOST_PP_ITERATION_START_4 >= 114\n#    define BOOST_PP_ITERATION_4 114\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 113 && BOOST_PP_ITERATION_START_4 >= 113\n#    define BOOST_PP_ITERATION_4 113\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 112 && BOOST_PP_ITERATION_START_4 >= 112\n#    define BOOST_PP_ITERATION_4 112\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 111 && BOOST_PP_ITERATION_START_4 >= 111\n#    define BOOST_PP_ITERATION_4 111\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 110 && BOOST_PP_ITERATION_START_4 >= 110\n#    define BOOST_PP_ITERATION_4 110\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 109 && BOOST_PP_ITERATION_START_4 >= 109\n#    define BOOST_PP_ITERATION_4 109\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 108 && BOOST_PP_ITERATION_START_4 >= 108\n#    define BOOST_PP_ITERATION_4 108\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 107 && BOOST_PP_ITERATION_START_4 >= 107\n#    define BOOST_PP_ITERATION_4 107\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 106 && BOOST_PP_ITERATION_START_4 >= 106\n#    define BOOST_PP_ITERATION_4 106\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 105 && BOOST_PP_ITERATION_START_4 >= 105\n#    define BOOST_PP_ITERATION_4 105\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 104 && BOOST_PP_ITERATION_START_4 >= 104\n#    define BOOST_PP_ITERATION_4 104\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 103 && BOOST_PP_ITERATION_START_4 >= 103\n#    define BOOST_PP_ITERATION_4 103\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 102 && BOOST_PP_ITERATION_START_4 >= 102\n#    define BOOST_PP_ITERATION_4 102\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 101 && BOOST_PP_ITERATION_START_4 >= 101\n#    define BOOST_PP_ITERATION_4 101\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 100 && BOOST_PP_ITERATION_START_4 >= 100\n#    define BOOST_PP_ITERATION_4 100\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 99 && BOOST_PP_ITERATION_START_4 >= 99\n#    define BOOST_PP_ITERATION_4 99\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 98 && BOOST_PP_ITERATION_START_4 >= 98\n#    define BOOST_PP_ITERATION_4 98\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 97 && BOOST_PP_ITERATION_START_4 >= 97\n#    define BOOST_PP_ITERATION_4 97\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 96 && BOOST_PP_ITERATION_START_4 >= 96\n#    define BOOST_PP_ITERATION_4 96\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 95 && BOOST_PP_ITERATION_START_4 >= 95\n#    define BOOST_PP_ITERATION_4 95\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 94 && BOOST_PP_ITERATION_START_4 >= 94\n#    define BOOST_PP_ITERATION_4 94\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 93 && BOOST_PP_ITERATION_START_4 >= 93\n#    define BOOST_PP_ITERATION_4 93\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 92 && BOOST_PP_ITERATION_START_4 >= 92\n#    define BOOST_PP_ITERATION_4 92\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 91 && BOOST_PP_ITERATION_START_4 >= 91\n#    define BOOST_PP_ITERATION_4 91\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 90 && BOOST_PP_ITERATION_START_4 >= 90\n#    define BOOST_PP_ITERATION_4 90\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 89 && BOOST_PP_ITERATION_START_4 >= 89\n#    define BOOST_PP_ITERATION_4 89\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 88 && BOOST_PP_ITERATION_START_4 >= 88\n#    define BOOST_PP_ITERATION_4 88\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 87 && BOOST_PP_ITERATION_START_4 >= 87\n#    define BOOST_PP_ITERATION_4 87\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 86 && BOOST_PP_ITERATION_START_4 >= 86\n#    define BOOST_PP_ITERATION_4 86\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 85 && BOOST_PP_ITERATION_START_4 >= 85\n#    define BOOST_PP_ITERATION_4 85\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 84 && BOOST_PP_ITERATION_START_4 >= 84\n#    define BOOST_PP_ITERATION_4 84\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 83 && BOOST_PP_ITERATION_START_4 >= 83\n#    define BOOST_PP_ITERATION_4 83\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 82 && BOOST_PP_ITERATION_START_4 >= 82\n#    define BOOST_PP_ITERATION_4 82\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 81 && BOOST_PP_ITERATION_START_4 >= 81\n#    define BOOST_PP_ITERATION_4 81\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 80 && BOOST_PP_ITERATION_START_4 >= 80\n#    define BOOST_PP_ITERATION_4 80\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 79 && BOOST_PP_ITERATION_START_4 >= 79\n#    define BOOST_PP_ITERATION_4 79\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 78 && BOOST_PP_ITERATION_START_4 >= 78\n#    define BOOST_PP_ITERATION_4 78\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 77 && BOOST_PP_ITERATION_START_4 >= 77\n#    define BOOST_PP_ITERATION_4 77\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 76 && BOOST_PP_ITERATION_START_4 >= 76\n#    define BOOST_PP_ITERATION_4 76\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 75 && BOOST_PP_ITERATION_START_4 >= 75\n#    define BOOST_PP_ITERATION_4 75\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 74 && BOOST_PP_ITERATION_START_4 >= 74\n#    define BOOST_PP_ITERATION_4 74\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 73 && BOOST_PP_ITERATION_START_4 >= 73\n#    define BOOST_PP_ITERATION_4 73\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 72 && BOOST_PP_ITERATION_START_4 >= 72\n#    define BOOST_PP_ITERATION_4 72\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 71 && BOOST_PP_ITERATION_START_4 >= 71\n#    define BOOST_PP_ITERATION_4 71\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 70 && BOOST_PP_ITERATION_START_4 >= 70\n#    define BOOST_PP_ITERATION_4 70\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 69 && BOOST_PP_ITERATION_START_4 >= 69\n#    define BOOST_PP_ITERATION_4 69\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 68 && BOOST_PP_ITERATION_START_4 >= 68\n#    define BOOST_PP_ITERATION_4 68\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 67 && BOOST_PP_ITERATION_START_4 >= 67\n#    define BOOST_PP_ITERATION_4 67\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 66 && BOOST_PP_ITERATION_START_4 >= 66\n#    define BOOST_PP_ITERATION_4 66\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 65 && BOOST_PP_ITERATION_START_4 >= 65\n#    define BOOST_PP_ITERATION_4 65\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 64 && BOOST_PP_ITERATION_START_4 >= 64\n#    define BOOST_PP_ITERATION_4 64\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 63 && BOOST_PP_ITERATION_START_4 >= 63\n#    define BOOST_PP_ITERATION_4 63\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 62 && BOOST_PP_ITERATION_START_4 >= 62\n#    define BOOST_PP_ITERATION_4 62\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 61 && BOOST_PP_ITERATION_START_4 >= 61\n#    define BOOST_PP_ITERATION_4 61\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 60 && BOOST_PP_ITERATION_START_4 >= 60\n#    define BOOST_PP_ITERATION_4 60\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 59 && BOOST_PP_ITERATION_START_4 >= 59\n#    define BOOST_PP_ITERATION_4 59\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 58 && BOOST_PP_ITERATION_START_4 >= 58\n#    define BOOST_PP_ITERATION_4 58\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 57 && BOOST_PP_ITERATION_START_4 >= 57\n#    define BOOST_PP_ITERATION_4 57\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 56 && BOOST_PP_ITERATION_START_4 >= 56\n#    define BOOST_PP_ITERATION_4 56\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 55 && BOOST_PP_ITERATION_START_4 >= 55\n#    define BOOST_PP_ITERATION_4 55\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 54 && BOOST_PP_ITERATION_START_4 >= 54\n#    define BOOST_PP_ITERATION_4 54\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 53 && BOOST_PP_ITERATION_START_4 >= 53\n#    define BOOST_PP_ITERATION_4 53\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 52 && BOOST_PP_ITERATION_START_4 >= 52\n#    define BOOST_PP_ITERATION_4 52\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 51 && BOOST_PP_ITERATION_START_4 >= 51\n#    define BOOST_PP_ITERATION_4 51\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 50 && BOOST_PP_ITERATION_START_4 >= 50\n#    define BOOST_PP_ITERATION_4 50\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 49 && BOOST_PP_ITERATION_START_4 >= 49\n#    define BOOST_PP_ITERATION_4 49\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 48 && BOOST_PP_ITERATION_START_4 >= 48\n#    define BOOST_PP_ITERATION_4 48\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 47 && BOOST_PP_ITERATION_START_4 >= 47\n#    define BOOST_PP_ITERATION_4 47\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 46 && BOOST_PP_ITERATION_START_4 >= 46\n#    define BOOST_PP_ITERATION_4 46\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 45 && BOOST_PP_ITERATION_START_4 >= 45\n#    define BOOST_PP_ITERATION_4 45\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 44 && BOOST_PP_ITERATION_START_4 >= 44\n#    define BOOST_PP_ITERATION_4 44\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 43 && BOOST_PP_ITERATION_START_4 >= 43\n#    define BOOST_PP_ITERATION_4 43\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 42 && BOOST_PP_ITERATION_START_4 >= 42\n#    define BOOST_PP_ITERATION_4 42\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 41 && BOOST_PP_ITERATION_START_4 >= 41\n#    define BOOST_PP_ITERATION_4 41\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 40 && BOOST_PP_ITERATION_START_4 >= 40\n#    define BOOST_PP_ITERATION_4 40\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 39 && BOOST_PP_ITERATION_START_4 >= 39\n#    define BOOST_PP_ITERATION_4 39\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 38 && BOOST_PP_ITERATION_START_4 >= 38\n#    define BOOST_PP_ITERATION_4 38\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 37 && BOOST_PP_ITERATION_START_4 >= 37\n#    define BOOST_PP_ITERATION_4 37\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 36 && BOOST_PP_ITERATION_START_4 >= 36\n#    define BOOST_PP_ITERATION_4 36\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 35 && BOOST_PP_ITERATION_START_4 >= 35\n#    define BOOST_PP_ITERATION_4 35\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 34 && BOOST_PP_ITERATION_START_4 >= 34\n#    define BOOST_PP_ITERATION_4 34\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 33 && BOOST_PP_ITERATION_START_4 >= 33\n#    define BOOST_PP_ITERATION_4 33\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 32 && BOOST_PP_ITERATION_START_4 >= 32\n#    define BOOST_PP_ITERATION_4 32\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 31 && BOOST_PP_ITERATION_START_4 >= 31\n#    define BOOST_PP_ITERATION_4 31\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 30 && BOOST_PP_ITERATION_START_4 >= 30\n#    define BOOST_PP_ITERATION_4 30\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 29 && BOOST_PP_ITERATION_START_4 >= 29\n#    define BOOST_PP_ITERATION_4 29\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 28 && BOOST_PP_ITERATION_START_4 >= 28\n#    define BOOST_PP_ITERATION_4 28\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 27 && BOOST_PP_ITERATION_START_4 >= 27\n#    define BOOST_PP_ITERATION_4 27\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 26 && BOOST_PP_ITERATION_START_4 >= 26\n#    define BOOST_PP_ITERATION_4 26\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 25 && BOOST_PP_ITERATION_START_4 >= 25\n#    define BOOST_PP_ITERATION_4 25\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 24 && BOOST_PP_ITERATION_START_4 >= 24\n#    define BOOST_PP_ITERATION_4 24\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 23 && BOOST_PP_ITERATION_START_4 >= 23\n#    define BOOST_PP_ITERATION_4 23\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 22 && BOOST_PP_ITERATION_START_4 >= 22\n#    define BOOST_PP_ITERATION_4 22\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 21 && BOOST_PP_ITERATION_START_4 >= 21\n#    define BOOST_PP_ITERATION_4 21\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 20 && BOOST_PP_ITERATION_START_4 >= 20\n#    define BOOST_PP_ITERATION_4 20\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 19 && BOOST_PP_ITERATION_START_4 >= 19\n#    define BOOST_PP_ITERATION_4 19\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 18 && BOOST_PP_ITERATION_START_4 >= 18\n#    define BOOST_PP_ITERATION_4 18\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 17 && BOOST_PP_ITERATION_START_4 >= 17\n#    define BOOST_PP_ITERATION_4 17\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 16 && BOOST_PP_ITERATION_START_4 >= 16\n#    define BOOST_PP_ITERATION_4 16\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 15 && BOOST_PP_ITERATION_START_4 >= 15\n#    define BOOST_PP_ITERATION_4 15\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 14 && BOOST_PP_ITERATION_START_4 >= 14\n#    define BOOST_PP_ITERATION_4 14\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 13 && BOOST_PP_ITERATION_START_4 >= 13\n#    define BOOST_PP_ITERATION_4 13\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 12 && BOOST_PP_ITERATION_START_4 >= 12\n#    define BOOST_PP_ITERATION_4 12\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 11 && BOOST_PP_ITERATION_START_4 >= 11\n#    define BOOST_PP_ITERATION_4 11\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 10 && BOOST_PP_ITERATION_START_4 >= 10\n#    define BOOST_PP_ITERATION_4 10\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 9 && BOOST_PP_ITERATION_START_4 >= 9\n#    define BOOST_PP_ITERATION_4 9\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 8 && BOOST_PP_ITERATION_START_4 >= 8\n#    define BOOST_PP_ITERATION_4 8\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 7 && BOOST_PP_ITERATION_START_4 >= 7\n#    define BOOST_PP_ITERATION_4 7\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 6 && BOOST_PP_ITERATION_START_4 >= 6\n#    define BOOST_PP_ITERATION_4 6\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 5 && BOOST_PP_ITERATION_START_4 >= 5\n#    define BOOST_PP_ITERATION_4 5\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 4 && BOOST_PP_ITERATION_START_4 >= 4\n#    define BOOST_PP_ITERATION_4 4\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 3 && BOOST_PP_ITERATION_START_4 >= 3\n#    define BOOST_PP_ITERATION_4 3\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 2 && BOOST_PP_ITERATION_START_4 >= 2\n#    define BOOST_PP_ITERATION_4 2\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 1 && BOOST_PP_ITERATION_START_4 >= 1\n#    define BOOST_PP_ITERATION_4 1\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n# if BOOST_PP_ITERATION_FINISH_4 <= 0 && BOOST_PP_ITERATION_START_4 >= 0\n#    define BOOST_PP_ITERATION_4 0\n#    include BOOST_PP_FILENAME_4\n#    undef BOOST_PP_ITERATION_4\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/iter/reverse5.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_ITERATION_FINISH_5 <= 256 && BOOST_PP_ITERATION_START_5 >= 256\n#    define BOOST_PP_ITERATION_5 256\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 255 && BOOST_PP_ITERATION_START_5 >= 255\n#    define BOOST_PP_ITERATION_5 255\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 254 && BOOST_PP_ITERATION_START_5 >= 254\n#    define BOOST_PP_ITERATION_5 254\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 253 && BOOST_PP_ITERATION_START_5 >= 253\n#    define BOOST_PP_ITERATION_5 253\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 252 && BOOST_PP_ITERATION_START_5 >= 252\n#    define BOOST_PP_ITERATION_5 252\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 251 && BOOST_PP_ITERATION_START_5 >= 251\n#    define BOOST_PP_ITERATION_5 251\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 250 && BOOST_PP_ITERATION_START_5 >= 250\n#    define BOOST_PP_ITERATION_5 250\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 249 && BOOST_PP_ITERATION_START_5 >= 249\n#    define BOOST_PP_ITERATION_5 249\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 248 && BOOST_PP_ITERATION_START_5 >= 248\n#    define BOOST_PP_ITERATION_5 248\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 247 && BOOST_PP_ITERATION_START_5 >= 247\n#    define BOOST_PP_ITERATION_5 247\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 246 && BOOST_PP_ITERATION_START_5 >= 246\n#    define BOOST_PP_ITERATION_5 246\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 245 && BOOST_PP_ITERATION_START_5 >= 245\n#    define BOOST_PP_ITERATION_5 245\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 244 && BOOST_PP_ITERATION_START_5 >= 244\n#    define BOOST_PP_ITERATION_5 244\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 243 && BOOST_PP_ITERATION_START_5 >= 243\n#    define BOOST_PP_ITERATION_5 243\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 242 && BOOST_PP_ITERATION_START_5 >= 242\n#    define BOOST_PP_ITERATION_5 242\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 241 && BOOST_PP_ITERATION_START_5 >= 241\n#    define BOOST_PP_ITERATION_5 241\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 240 && BOOST_PP_ITERATION_START_5 >= 240\n#    define BOOST_PP_ITERATION_5 240\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 239 && BOOST_PP_ITERATION_START_5 >= 239\n#    define BOOST_PP_ITERATION_5 239\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 238 && BOOST_PP_ITERATION_START_5 >= 238\n#    define BOOST_PP_ITERATION_5 238\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 237 && BOOST_PP_ITERATION_START_5 >= 237\n#    define BOOST_PP_ITERATION_5 237\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 236 && BOOST_PP_ITERATION_START_5 >= 236\n#    define BOOST_PP_ITERATION_5 236\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 235 && BOOST_PP_ITERATION_START_5 >= 235\n#    define BOOST_PP_ITERATION_5 235\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 234 && BOOST_PP_ITERATION_START_5 >= 234\n#    define BOOST_PP_ITERATION_5 234\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 233 && BOOST_PP_ITERATION_START_5 >= 233\n#    define BOOST_PP_ITERATION_5 233\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 232 && BOOST_PP_ITERATION_START_5 >= 232\n#    define BOOST_PP_ITERATION_5 232\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 231 && BOOST_PP_ITERATION_START_5 >= 231\n#    define BOOST_PP_ITERATION_5 231\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 230 && BOOST_PP_ITERATION_START_5 >= 230\n#    define BOOST_PP_ITERATION_5 230\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 229 && BOOST_PP_ITERATION_START_5 >= 229\n#    define BOOST_PP_ITERATION_5 229\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 228 && BOOST_PP_ITERATION_START_5 >= 228\n#    define BOOST_PP_ITERATION_5 228\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 227 && BOOST_PP_ITERATION_START_5 >= 227\n#    define BOOST_PP_ITERATION_5 227\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 226 && BOOST_PP_ITERATION_START_5 >= 226\n#    define BOOST_PP_ITERATION_5 226\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 225 && BOOST_PP_ITERATION_START_5 >= 225\n#    define BOOST_PP_ITERATION_5 225\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 224 && BOOST_PP_ITERATION_START_5 >= 224\n#    define BOOST_PP_ITERATION_5 224\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 223 && BOOST_PP_ITERATION_START_5 >= 223\n#    define BOOST_PP_ITERATION_5 223\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 222 && BOOST_PP_ITERATION_START_5 >= 222\n#    define BOOST_PP_ITERATION_5 222\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 221 && BOOST_PP_ITERATION_START_5 >= 221\n#    define BOOST_PP_ITERATION_5 221\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 220 && BOOST_PP_ITERATION_START_5 >= 220\n#    define BOOST_PP_ITERATION_5 220\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 219 && BOOST_PP_ITERATION_START_5 >= 219\n#    define BOOST_PP_ITERATION_5 219\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 218 && BOOST_PP_ITERATION_START_5 >= 218\n#    define BOOST_PP_ITERATION_5 218\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 217 && BOOST_PP_ITERATION_START_5 >= 217\n#    define BOOST_PP_ITERATION_5 217\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 216 && BOOST_PP_ITERATION_START_5 >= 216\n#    define BOOST_PP_ITERATION_5 216\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 215 && BOOST_PP_ITERATION_START_5 >= 215\n#    define BOOST_PP_ITERATION_5 215\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 214 && BOOST_PP_ITERATION_START_5 >= 214\n#    define BOOST_PP_ITERATION_5 214\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 213 && BOOST_PP_ITERATION_START_5 >= 213\n#    define BOOST_PP_ITERATION_5 213\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 212 && BOOST_PP_ITERATION_START_5 >= 212\n#    define BOOST_PP_ITERATION_5 212\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 211 && BOOST_PP_ITERATION_START_5 >= 211\n#    define BOOST_PP_ITERATION_5 211\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 210 && BOOST_PP_ITERATION_START_5 >= 210\n#    define BOOST_PP_ITERATION_5 210\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 209 && BOOST_PP_ITERATION_START_5 >= 209\n#    define BOOST_PP_ITERATION_5 209\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 208 && BOOST_PP_ITERATION_START_5 >= 208\n#    define BOOST_PP_ITERATION_5 208\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 207 && BOOST_PP_ITERATION_START_5 >= 207\n#    define BOOST_PP_ITERATION_5 207\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 206 && BOOST_PP_ITERATION_START_5 >= 206\n#    define BOOST_PP_ITERATION_5 206\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 205 && BOOST_PP_ITERATION_START_5 >= 205\n#    define BOOST_PP_ITERATION_5 205\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 204 && BOOST_PP_ITERATION_START_5 >= 204\n#    define BOOST_PP_ITERATION_5 204\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 203 && BOOST_PP_ITERATION_START_5 >= 203\n#    define BOOST_PP_ITERATION_5 203\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 202 && BOOST_PP_ITERATION_START_5 >= 202\n#    define BOOST_PP_ITERATION_5 202\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 201 && BOOST_PP_ITERATION_START_5 >= 201\n#    define BOOST_PP_ITERATION_5 201\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 200 && BOOST_PP_ITERATION_START_5 >= 200\n#    define BOOST_PP_ITERATION_5 200\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 199 && BOOST_PP_ITERATION_START_5 >= 199\n#    define BOOST_PP_ITERATION_5 199\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 198 && BOOST_PP_ITERATION_START_5 >= 198\n#    define BOOST_PP_ITERATION_5 198\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 197 && BOOST_PP_ITERATION_START_5 >= 197\n#    define BOOST_PP_ITERATION_5 197\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 196 && BOOST_PP_ITERATION_START_5 >= 196\n#    define BOOST_PP_ITERATION_5 196\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 195 && BOOST_PP_ITERATION_START_5 >= 195\n#    define BOOST_PP_ITERATION_5 195\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 194 && BOOST_PP_ITERATION_START_5 >= 194\n#    define BOOST_PP_ITERATION_5 194\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 193 && BOOST_PP_ITERATION_START_5 >= 193\n#    define BOOST_PP_ITERATION_5 193\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 192 && BOOST_PP_ITERATION_START_5 >= 192\n#    define BOOST_PP_ITERATION_5 192\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 191 && BOOST_PP_ITERATION_START_5 >= 191\n#    define BOOST_PP_ITERATION_5 191\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 190 && BOOST_PP_ITERATION_START_5 >= 190\n#    define BOOST_PP_ITERATION_5 190\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 189 && BOOST_PP_ITERATION_START_5 >= 189\n#    define BOOST_PP_ITERATION_5 189\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 188 && BOOST_PP_ITERATION_START_5 >= 188\n#    define BOOST_PP_ITERATION_5 188\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 187 && BOOST_PP_ITERATION_START_5 >= 187\n#    define BOOST_PP_ITERATION_5 187\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 186 && BOOST_PP_ITERATION_START_5 >= 186\n#    define BOOST_PP_ITERATION_5 186\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 185 && BOOST_PP_ITERATION_START_5 >= 185\n#    define BOOST_PP_ITERATION_5 185\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 184 && BOOST_PP_ITERATION_START_5 >= 184\n#    define BOOST_PP_ITERATION_5 184\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 183 && BOOST_PP_ITERATION_START_5 >= 183\n#    define BOOST_PP_ITERATION_5 183\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 182 && BOOST_PP_ITERATION_START_5 >= 182\n#    define BOOST_PP_ITERATION_5 182\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 181 && BOOST_PP_ITERATION_START_5 >= 181\n#    define BOOST_PP_ITERATION_5 181\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 180 && BOOST_PP_ITERATION_START_5 >= 180\n#    define BOOST_PP_ITERATION_5 180\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 179 && BOOST_PP_ITERATION_START_5 >= 179\n#    define BOOST_PP_ITERATION_5 179\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 178 && BOOST_PP_ITERATION_START_5 >= 178\n#    define BOOST_PP_ITERATION_5 178\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 177 && BOOST_PP_ITERATION_START_5 >= 177\n#    define BOOST_PP_ITERATION_5 177\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 176 && BOOST_PP_ITERATION_START_5 >= 176\n#    define BOOST_PP_ITERATION_5 176\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 175 && BOOST_PP_ITERATION_START_5 >= 175\n#    define BOOST_PP_ITERATION_5 175\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 174 && BOOST_PP_ITERATION_START_5 >= 174\n#    define BOOST_PP_ITERATION_5 174\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 173 && BOOST_PP_ITERATION_START_5 >= 173\n#    define BOOST_PP_ITERATION_5 173\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 172 && BOOST_PP_ITERATION_START_5 >= 172\n#    define BOOST_PP_ITERATION_5 172\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 171 && BOOST_PP_ITERATION_START_5 >= 171\n#    define BOOST_PP_ITERATION_5 171\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 170 && BOOST_PP_ITERATION_START_5 >= 170\n#    define BOOST_PP_ITERATION_5 170\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 169 && BOOST_PP_ITERATION_START_5 >= 169\n#    define BOOST_PP_ITERATION_5 169\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 168 && BOOST_PP_ITERATION_START_5 >= 168\n#    define BOOST_PP_ITERATION_5 168\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 167 && BOOST_PP_ITERATION_START_5 >= 167\n#    define BOOST_PP_ITERATION_5 167\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 166 && BOOST_PP_ITERATION_START_5 >= 166\n#    define BOOST_PP_ITERATION_5 166\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 165 && BOOST_PP_ITERATION_START_5 >= 165\n#    define BOOST_PP_ITERATION_5 165\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 164 && BOOST_PP_ITERATION_START_5 >= 164\n#    define BOOST_PP_ITERATION_5 164\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 163 && BOOST_PP_ITERATION_START_5 >= 163\n#    define BOOST_PP_ITERATION_5 163\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 162 && BOOST_PP_ITERATION_START_5 >= 162\n#    define BOOST_PP_ITERATION_5 162\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 161 && BOOST_PP_ITERATION_START_5 >= 161\n#    define BOOST_PP_ITERATION_5 161\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 160 && BOOST_PP_ITERATION_START_5 >= 160\n#    define BOOST_PP_ITERATION_5 160\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 159 && BOOST_PP_ITERATION_START_5 >= 159\n#    define BOOST_PP_ITERATION_5 159\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 158 && BOOST_PP_ITERATION_START_5 >= 158\n#    define BOOST_PP_ITERATION_5 158\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 157 && BOOST_PP_ITERATION_START_5 >= 157\n#    define BOOST_PP_ITERATION_5 157\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 156 && BOOST_PP_ITERATION_START_5 >= 156\n#    define BOOST_PP_ITERATION_5 156\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 155 && BOOST_PP_ITERATION_START_5 >= 155\n#    define BOOST_PP_ITERATION_5 155\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 154 && BOOST_PP_ITERATION_START_5 >= 154\n#    define BOOST_PP_ITERATION_5 154\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 153 && BOOST_PP_ITERATION_START_5 >= 153\n#    define BOOST_PP_ITERATION_5 153\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 152 && BOOST_PP_ITERATION_START_5 >= 152\n#    define BOOST_PP_ITERATION_5 152\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 151 && BOOST_PP_ITERATION_START_5 >= 151\n#    define BOOST_PP_ITERATION_5 151\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 150 && BOOST_PP_ITERATION_START_5 >= 150\n#    define BOOST_PP_ITERATION_5 150\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 149 && BOOST_PP_ITERATION_START_5 >= 149\n#    define BOOST_PP_ITERATION_5 149\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 148 && BOOST_PP_ITERATION_START_5 >= 148\n#    define BOOST_PP_ITERATION_5 148\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 147 && BOOST_PP_ITERATION_START_5 >= 147\n#    define BOOST_PP_ITERATION_5 147\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 146 && BOOST_PP_ITERATION_START_5 >= 146\n#    define BOOST_PP_ITERATION_5 146\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 145 && BOOST_PP_ITERATION_START_5 >= 145\n#    define BOOST_PP_ITERATION_5 145\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 144 && BOOST_PP_ITERATION_START_5 >= 144\n#    define BOOST_PP_ITERATION_5 144\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 143 && BOOST_PP_ITERATION_START_5 >= 143\n#    define BOOST_PP_ITERATION_5 143\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 142 && BOOST_PP_ITERATION_START_5 >= 142\n#    define BOOST_PP_ITERATION_5 142\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 141 && BOOST_PP_ITERATION_START_5 >= 141\n#    define BOOST_PP_ITERATION_5 141\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 140 && BOOST_PP_ITERATION_START_5 >= 140\n#    define BOOST_PP_ITERATION_5 140\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 139 && BOOST_PP_ITERATION_START_5 >= 139\n#    define BOOST_PP_ITERATION_5 139\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 138 && BOOST_PP_ITERATION_START_5 >= 138\n#    define BOOST_PP_ITERATION_5 138\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 137 && BOOST_PP_ITERATION_START_5 >= 137\n#    define BOOST_PP_ITERATION_5 137\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 136 && BOOST_PP_ITERATION_START_5 >= 136\n#    define BOOST_PP_ITERATION_5 136\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 135 && BOOST_PP_ITERATION_START_5 >= 135\n#    define BOOST_PP_ITERATION_5 135\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 134 && BOOST_PP_ITERATION_START_5 >= 134\n#    define BOOST_PP_ITERATION_5 134\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 133 && BOOST_PP_ITERATION_START_5 >= 133\n#    define BOOST_PP_ITERATION_5 133\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 132 && BOOST_PP_ITERATION_START_5 >= 132\n#    define BOOST_PP_ITERATION_5 132\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 131 && BOOST_PP_ITERATION_START_5 >= 131\n#    define BOOST_PP_ITERATION_5 131\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 130 && BOOST_PP_ITERATION_START_5 >= 130\n#    define BOOST_PP_ITERATION_5 130\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 129 && BOOST_PP_ITERATION_START_5 >= 129\n#    define BOOST_PP_ITERATION_5 129\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 128 && BOOST_PP_ITERATION_START_5 >= 128\n#    define BOOST_PP_ITERATION_5 128\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 127 && BOOST_PP_ITERATION_START_5 >= 127\n#    define BOOST_PP_ITERATION_5 127\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 126 && BOOST_PP_ITERATION_START_5 >= 126\n#    define BOOST_PP_ITERATION_5 126\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 125 && BOOST_PP_ITERATION_START_5 >= 125\n#    define BOOST_PP_ITERATION_5 125\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 124 && BOOST_PP_ITERATION_START_5 >= 124\n#    define BOOST_PP_ITERATION_5 124\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 123 && BOOST_PP_ITERATION_START_5 >= 123\n#    define BOOST_PP_ITERATION_5 123\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 122 && BOOST_PP_ITERATION_START_5 >= 122\n#    define BOOST_PP_ITERATION_5 122\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 121 && BOOST_PP_ITERATION_START_5 >= 121\n#    define BOOST_PP_ITERATION_5 121\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 120 && BOOST_PP_ITERATION_START_5 >= 120\n#    define BOOST_PP_ITERATION_5 120\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 119 && BOOST_PP_ITERATION_START_5 >= 119\n#    define BOOST_PP_ITERATION_5 119\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 118 && BOOST_PP_ITERATION_START_5 >= 118\n#    define BOOST_PP_ITERATION_5 118\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 117 && BOOST_PP_ITERATION_START_5 >= 117\n#    define BOOST_PP_ITERATION_5 117\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 116 && BOOST_PP_ITERATION_START_5 >= 116\n#    define BOOST_PP_ITERATION_5 116\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 115 && BOOST_PP_ITERATION_START_5 >= 115\n#    define BOOST_PP_ITERATION_5 115\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 114 && BOOST_PP_ITERATION_START_5 >= 114\n#    define BOOST_PP_ITERATION_5 114\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 113 && BOOST_PP_ITERATION_START_5 >= 113\n#    define BOOST_PP_ITERATION_5 113\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 112 && BOOST_PP_ITERATION_START_5 >= 112\n#    define BOOST_PP_ITERATION_5 112\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 111 && BOOST_PP_ITERATION_START_5 >= 111\n#    define BOOST_PP_ITERATION_5 111\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 110 && BOOST_PP_ITERATION_START_5 >= 110\n#    define BOOST_PP_ITERATION_5 110\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 109 && BOOST_PP_ITERATION_START_5 >= 109\n#    define BOOST_PP_ITERATION_5 109\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 108 && BOOST_PP_ITERATION_START_5 >= 108\n#    define BOOST_PP_ITERATION_5 108\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 107 && BOOST_PP_ITERATION_START_5 >= 107\n#    define BOOST_PP_ITERATION_5 107\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 106 && BOOST_PP_ITERATION_START_5 >= 106\n#    define BOOST_PP_ITERATION_5 106\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 105 && BOOST_PP_ITERATION_START_5 >= 105\n#    define BOOST_PP_ITERATION_5 105\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 104 && BOOST_PP_ITERATION_START_5 >= 104\n#    define BOOST_PP_ITERATION_5 104\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 103 && BOOST_PP_ITERATION_START_5 >= 103\n#    define BOOST_PP_ITERATION_5 103\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 102 && BOOST_PP_ITERATION_START_5 >= 102\n#    define BOOST_PP_ITERATION_5 102\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 101 && BOOST_PP_ITERATION_START_5 >= 101\n#    define BOOST_PP_ITERATION_5 101\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 100 && BOOST_PP_ITERATION_START_5 >= 100\n#    define BOOST_PP_ITERATION_5 100\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 99 && BOOST_PP_ITERATION_START_5 >= 99\n#    define BOOST_PP_ITERATION_5 99\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 98 && BOOST_PP_ITERATION_START_5 >= 98\n#    define BOOST_PP_ITERATION_5 98\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 97 && BOOST_PP_ITERATION_START_5 >= 97\n#    define BOOST_PP_ITERATION_5 97\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 96 && BOOST_PP_ITERATION_START_5 >= 96\n#    define BOOST_PP_ITERATION_5 96\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 95 && BOOST_PP_ITERATION_START_5 >= 95\n#    define BOOST_PP_ITERATION_5 95\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 94 && BOOST_PP_ITERATION_START_5 >= 94\n#    define BOOST_PP_ITERATION_5 94\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 93 && BOOST_PP_ITERATION_START_5 >= 93\n#    define BOOST_PP_ITERATION_5 93\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 92 && BOOST_PP_ITERATION_START_5 >= 92\n#    define BOOST_PP_ITERATION_5 92\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 91 && BOOST_PP_ITERATION_START_5 >= 91\n#    define BOOST_PP_ITERATION_5 91\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 90 && BOOST_PP_ITERATION_START_5 >= 90\n#    define BOOST_PP_ITERATION_5 90\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 89 && BOOST_PP_ITERATION_START_5 >= 89\n#    define BOOST_PP_ITERATION_5 89\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 88 && BOOST_PP_ITERATION_START_5 >= 88\n#    define BOOST_PP_ITERATION_5 88\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 87 && BOOST_PP_ITERATION_START_5 >= 87\n#    define BOOST_PP_ITERATION_5 87\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 86 && BOOST_PP_ITERATION_START_5 >= 86\n#    define BOOST_PP_ITERATION_5 86\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 85 && BOOST_PP_ITERATION_START_5 >= 85\n#    define BOOST_PP_ITERATION_5 85\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 84 && BOOST_PP_ITERATION_START_5 >= 84\n#    define BOOST_PP_ITERATION_5 84\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 83 && BOOST_PP_ITERATION_START_5 >= 83\n#    define BOOST_PP_ITERATION_5 83\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 82 && BOOST_PP_ITERATION_START_5 >= 82\n#    define BOOST_PP_ITERATION_5 82\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 81 && BOOST_PP_ITERATION_START_5 >= 81\n#    define BOOST_PP_ITERATION_5 81\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 80 && BOOST_PP_ITERATION_START_5 >= 80\n#    define BOOST_PP_ITERATION_5 80\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 79 && BOOST_PP_ITERATION_START_5 >= 79\n#    define BOOST_PP_ITERATION_5 79\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 78 && BOOST_PP_ITERATION_START_5 >= 78\n#    define BOOST_PP_ITERATION_5 78\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 77 && BOOST_PP_ITERATION_START_5 >= 77\n#    define BOOST_PP_ITERATION_5 77\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 76 && BOOST_PP_ITERATION_START_5 >= 76\n#    define BOOST_PP_ITERATION_5 76\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 75 && BOOST_PP_ITERATION_START_5 >= 75\n#    define BOOST_PP_ITERATION_5 75\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 74 && BOOST_PP_ITERATION_START_5 >= 74\n#    define BOOST_PP_ITERATION_5 74\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 73 && BOOST_PP_ITERATION_START_5 >= 73\n#    define BOOST_PP_ITERATION_5 73\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 72 && BOOST_PP_ITERATION_START_5 >= 72\n#    define BOOST_PP_ITERATION_5 72\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 71 && BOOST_PP_ITERATION_START_5 >= 71\n#    define BOOST_PP_ITERATION_5 71\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 70 && BOOST_PP_ITERATION_START_5 >= 70\n#    define BOOST_PP_ITERATION_5 70\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 69 && BOOST_PP_ITERATION_START_5 >= 69\n#    define BOOST_PP_ITERATION_5 69\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 68 && BOOST_PP_ITERATION_START_5 >= 68\n#    define BOOST_PP_ITERATION_5 68\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 67 && BOOST_PP_ITERATION_START_5 >= 67\n#    define BOOST_PP_ITERATION_5 67\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 66 && BOOST_PP_ITERATION_START_5 >= 66\n#    define BOOST_PP_ITERATION_5 66\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 65 && BOOST_PP_ITERATION_START_5 >= 65\n#    define BOOST_PP_ITERATION_5 65\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 64 && BOOST_PP_ITERATION_START_5 >= 64\n#    define BOOST_PP_ITERATION_5 64\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 63 && BOOST_PP_ITERATION_START_5 >= 63\n#    define BOOST_PP_ITERATION_5 63\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 62 && BOOST_PP_ITERATION_START_5 >= 62\n#    define BOOST_PP_ITERATION_5 62\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 61 && BOOST_PP_ITERATION_START_5 >= 61\n#    define BOOST_PP_ITERATION_5 61\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 60 && BOOST_PP_ITERATION_START_5 >= 60\n#    define BOOST_PP_ITERATION_5 60\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 59 && BOOST_PP_ITERATION_START_5 >= 59\n#    define BOOST_PP_ITERATION_5 59\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 58 && BOOST_PP_ITERATION_START_5 >= 58\n#    define BOOST_PP_ITERATION_5 58\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 57 && BOOST_PP_ITERATION_START_5 >= 57\n#    define BOOST_PP_ITERATION_5 57\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 56 && BOOST_PP_ITERATION_START_5 >= 56\n#    define BOOST_PP_ITERATION_5 56\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 55 && BOOST_PP_ITERATION_START_5 >= 55\n#    define BOOST_PP_ITERATION_5 55\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 54 && BOOST_PP_ITERATION_START_5 >= 54\n#    define BOOST_PP_ITERATION_5 54\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 53 && BOOST_PP_ITERATION_START_5 >= 53\n#    define BOOST_PP_ITERATION_5 53\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 52 && BOOST_PP_ITERATION_START_5 >= 52\n#    define BOOST_PP_ITERATION_5 52\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 51 && BOOST_PP_ITERATION_START_5 >= 51\n#    define BOOST_PP_ITERATION_5 51\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 50 && BOOST_PP_ITERATION_START_5 >= 50\n#    define BOOST_PP_ITERATION_5 50\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 49 && BOOST_PP_ITERATION_START_5 >= 49\n#    define BOOST_PP_ITERATION_5 49\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 48 && BOOST_PP_ITERATION_START_5 >= 48\n#    define BOOST_PP_ITERATION_5 48\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 47 && BOOST_PP_ITERATION_START_5 >= 47\n#    define BOOST_PP_ITERATION_5 47\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 46 && BOOST_PP_ITERATION_START_5 >= 46\n#    define BOOST_PP_ITERATION_5 46\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 45 && BOOST_PP_ITERATION_START_5 >= 45\n#    define BOOST_PP_ITERATION_5 45\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 44 && BOOST_PP_ITERATION_START_5 >= 44\n#    define BOOST_PP_ITERATION_5 44\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 43 && BOOST_PP_ITERATION_START_5 >= 43\n#    define BOOST_PP_ITERATION_5 43\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 42 && BOOST_PP_ITERATION_START_5 >= 42\n#    define BOOST_PP_ITERATION_5 42\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 41 && BOOST_PP_ITERATION_START_5 >= 41\n#    define BOOST_PP_ITERATION_5 41\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 40 && BOOST_PP_ITERATION_START_5 >= 40\n#    define BOOST_PP_ITERATION_5 40\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 39 && BOOST_PP_ITERATION_START_5 >= 39\n#    define BOOST_PP_ITERATION_5 39\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 38 && BOOST_PP_ITERATION_START_5 >= 38\n#    define BOOST_PP_ITERATION_5 38\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 37 && BOOST_PP_ITERATION_START_5 >= 37\n#    define BOOST_PP_ITERATION_5 37\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 36 && BOOST_PP_ITERATION_START_5 >= 36\n#    define BOOST_PP_ITERATION_5 36\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 35 && BOOST_PP_ITERATION_START_5 >= 35\n#    define BOOST_PP_ITERATION_5 35\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 34 && BOOST_PP_ITERATION_START_5 >= 34\n#    define BOOST_PP_ITERATION_5 34\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 33 && BOOST_PP_ITERATION_START_5 >= 33\n#    define BOOST_PP_ITERATION_5 33\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 32 && BOOST_PP_ITERATION_START_5 >= 32\n#    define BOOST_PP_ITERATION_5 32\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 31 && BOOST_PP_ITERATION_START_5 >= 31\n#    define BOOST_PP_ITERATION_5 31\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 30 && BOOST_PP_ITERATION_START_5 >= 30\n#    define BOOST_PP_ITERATION_5 30\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 29 && BOOST_PP_ITERATION_START_5 >= 29\n#    define BOOST_PP_ITERATION_5 29\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 28 && BOOST_PP_ITERATION_START_5 >= 28\n#    define BOOST_PP_ITERATION_5 28\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 27 && BOOST_PP_ITERATION_START_5 >= 27\n#    define BOOST_PP_ITERATION_5 27\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 26 && BOOST_PP_ITERATION_START_5 >= 26\n#    define BOOST_PP_ITERATION_5 26\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 25 && BOOST_PP_ITERATION_START_5 >= 25\n#    define BOOST_PP_ITERATION_5 25\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 24 && BOOST_PP_ITERATION_START_5 >= 24\n#    define BOOST_PP_ITERATION_5 24\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 23 && BOOST_PP_ITERATION_START_5 >= 23\n#    define BOOST_PP_ITERATION_5 23\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 22 && BOOST_PP_ITERATION_START_5 >= 22\n#    define BOOST_PP_ITERATION_5 22\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 21 && BOOST_PP_ITERATION_START_5 >= 21\n#    define BOOST_PP_ITERATION_5 21\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 20 && BOOST_PP_ITERATION_START_5 >= 20\n#    define BOOST_PP_ITERATION_5 20\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 19 && BOOST_PP_ITERATION_START_5 >= 19\n#    define BOOST_PP_ITERATION_5 19\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 18 && BOOST_PP_ITERATION_START_5 >= 18\n#    define BOOST_PP_ITERATION_5 18\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 17 && BOOST_PP_ITERATION_START_5 >= 17\n#    define BOOST_PP_ITERATION_5 17\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 16 && BOOST_PP_ITERATION_START_5 >= 16\n#    define BOOST_PP_ITERATION_5 16\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 15 && BOOST_PP_ITERATION_START_5 >= 15\n#    define BOOST_PP_ITERATION_5 15\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 14 && BOOST_PP_ITERATION_START_5 >= 14\n#    define BOOST_PP_ITERATION_5 14\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 13 && BOOST_PP_ITERATION_START_5 >= 13\n#    define BOOST_PP_ITERATION_5 13\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 12 && BOOST_PP_ITERATION_START_5 >= 12\n#    define BOOST_PP_ITERATION_5 12\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 11 && BOOST_PP_ITERATION_START_5 >= 11\n#    define BOOST_PP_ITERATION_5 11\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 10 && BOOST_PP_ITERATION_START_5 >= 10\n#    define BOOST_PP_ITERATION_5 10\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 9 && BOOST_PP_ITERATION_START_5 >= 9\n#    define BOOST_PP_ITERATION_5 9\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 8 && BOOST_PP_ITERATION_START_5 >= 8\n#    define BOOST_PP_ITERATION_5 8\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 7 && BOOST_PP_ITERATION_START_5 >= 7\n#    define BOOST_PP_ITERATION_5 7\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 6 && BOOST_PP_ITERATION_START_5 >= 6\n#    define BOOST_PP_ITERATION_5 6\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 5 && BOOST_PP_ITERATION_START_5 >= 5\n#    define BOOST_PP_ITERATION_5 5\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 4 && BOOST_PP_ITERATION_START_5 >= 4\n#    define BOOST_PP_ITERATION_5 4\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 3 && BOOST_PP_ITERATION_START_5 >= 3\n#    define BOOST_PP_ITERATION_5 3\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 2 && BOOST_PP_ITERATION_START_5 >= 2\n#    define BOOST_PP_ITERATION_5 2\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 1 && BOOST_PP_ITERATION_START_5 >= 1\n#    define BOOST_PP_ITERATION_5 1\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n# if BOOST_PP_ITERATION_FINISH_5 <= 0 && BOOST_PP_ITERATION_START_5 >= 0\n#    define BOOST_PP_ITERATION_5 0\n#    include BOOST_PP_FILENAME_5\n#    undef BOOST_PP_ITERATION_5\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/local.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if !defined(BOOST_PP_LOCAL_LIMITS)\n#    error BOOST_PP_ERROR:  local iteration boundaries are not defined\n# elif !defined(BOOST_PP_LOCAL_MACRO)\n#    error BOOST_PP_ERROR:  local iteration target macro is not defined\n# else\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#        define BOOST_PP_LOCAL_S BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_LOCAL_LIMITS)\n#        define BOOST_PP_LOCAL_F BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_LOCAL_LIMITS)\n#    else\n#        define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_LOCAL_LIMITS)\n#        include <boost/preprocessor/iteration/detail/start.hpp>\n#        define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_LOCAL_LIMITS)\n#        include <boost/preprocessor/iteration/detail/finish.hpp>\n#        define BOOST_PP_LOCAL_S BOOST_PP_LOCAL_SE()\n#        define BOOST_PP_LOCAL_F BOOST_PP_LOCAL_FE()\n#    endif\n# endif\n#\n# if (BOOST_PP_LOCAL_S) > (BOOST_PP_LOCAL_F)\n#    include <boost/preprocessor/iteration/detail/rlocal.hpp>\n# else\n#    if BOOST_PP_LOCAL_C(0)\n        BOOST_PP_LOCAL_MACRO(0)\n#    endif\n#    if BOOST_PP_LOCAL_C(1)\n        BOOST_PP_LOCAL_MACRO(1)\n#    endif\n#    if BOOST_PP_LOCAL_C(2)\n        BOOST_PP_LOCAL_MACRO(2)\n#    endif\n#    if BOOST_PP_LOCAL_C(3)\n        BOOST_PP_LOCAL_MACRO(3)\n#    endif\n#    if BOOST_PP_LOCAL_C(4)\n        BOOST_PP_LOCAL_MACRO(4)\n#    endif\n#    if BOOST_PP_LOCAL_C(5)\n        BOOST_PP_LOCAL_MACRO(5)\n#    endif\n#    if BOOST_PP_LOCAL_C(6)\n        BOOST_PP_LOCAL_MACRO(6)\n#    endif\n#    if BOOST_PP_LOCAL_C(7)\n        BOOST_PP_LOCAL_MACRO(7)\n#    endif\n#    if BOOST_PP_LOCAL_C(8)\n        BOOST_PP_LOCAL_MACRO(8)\n#    endif\n#    if BOOST_PP_LOCAL_C(9)\n        BOOST_PP_LOCAL_MACRO(9)\n#    endif\n#    if BOOST_PP_LOCAL_C(10)\n        BOOST_PP_LOCAL_MACRO(10)\n#    endif\n#    if BOOST_PP_LOCAL_C(11)\n        BOOST_PP_LOCAL_MACRO(11)\n#    endif\n#    if BOOST_PP_LOCAL_C(12)\n        BOOST_PP_LOCAL_MACRO(12)\n#    endif\n#    if BOOST_PP_LOCAL_C(13)\n        BOOST_PP_LOCAL_MACRO(13)\n#    endif\n#    if BOOST_PP_LOCAL_C(14)\n        BOOST_PP_LOCAL_MACRO(14)\n#    endif\n#    if BOOST_PP_LOCAL_C(15)\n        BOOST_PP_LOCAL_MACRO(15)\n#    endif\n#    if BOOST_PP_LOCAL_C(16)\n        BOOST_PP_LOCAL_MACRO(16)\n#    endif\n#    if BOOST_PP_LOCAL_C(17)\n        BOOST_PP_LOCAL_MACRO(17)\n#    endif\n#    if BOOST_PP_LOCAL_C(18)\n        BOOST_PP_LOCAL_MACRO(18)\n#    endif\n#    if BOOST_PP_LOCAL_C(19)\n        BOOST_PP_LOCAL_MACRO(19)\n#    endif\n#    if BOOST_PP_LOCAL_C(20)\n        BOOST_PP_LOCAL_MACRO(20)\n#    endif\n#    if BOOST_PP_LOCAL_C(21)\n        BOOST_PP_LOCAL_MACRO(21)\n#    endif\n#    if BOOST_PP_LOCAL_C(22)\n        BOOST_PP_LOCAL_MACRO(22)\n#    endif\n#    if BOOST_PP_LOCAL_C(23)\n        BOOST_PP_LOCAL_MACRO(23)\n#    endif\n#    if BOOST_PP_LOCAL_C(24)\n        BOOST_PP_LOCAL_MACRO(24)\n#    endif\n#    if BOOST_PP_LOCAL_C(25)\n        BOOST_PP_LOCAL_MACRO(25)\n#    endif\n#    if BOOST_PP_LOCAL_C(26)\n        BOOST_PP_LOCAL_MACRO(26)\n#    endif\n#    if BOOST_PP_LOCAL_C(27)\n        BOOST_PP_LOCAL_MACRO(27)\n#    endif\n#    if BOOST_PP_LOCAL_C(28)\n        BOOST_PP_LOCAL_MACRO(28)\n#    endif\n#    if BOOST_PP_LOCAL_C(29)\n        BOOST_PP_LOCAL_MACRO(29)\n#    endif\n#    if BOOST_PP_LOCAL_C(30)\n        BOOST_PP_LOCAL_MACRO(30)\n#    endif\n#    if BOOST_PP_LOCAL_C(31)\n        BOOST_PP_LOCAL_MACRO(31)\n#    endif\n#    if BOOST_PP_LOCAL_C(32)\n        BOOST_PP_LOCAL_MACRO(32)\n#    endif\n#    if BOOST_PP_LOCAL_C(33)\n        BOOST_PP_LOCAL_MACRO(33)\n#    endif\n#    if BOOST_PP_LOCAL_C(34)\n        BOOST_PP_LOCAL_MACRO(34)\n#    endif\n#    if BOOST_PP_LOCAL_C(35)\n        BOOST_PP_LOCAL_MACRO(35)\n#    endif\n#    if BOOST_PP_LOCAL_C(36)\n        BOOST_PP_LOCAL_MACRO(36)\n#    endif\n#    if BOOST_PP_LOCAL_C(37)\n        BOOST_PP_LOCAL_MACRO(37)\n#    endif\n#    if BOOST_PP_LOCAL_C(38)\n        BOOST_PP_LOCAL_MACRO(38)\n#    endif\n#    if BOOST_PP_LOCAL_C(39)\n        BOOST_PP_LOCAL_MACRO(39)\n#    endif\n#    if BOOST_PP_LOCAL_C(40)\n        BOOST_PP_LOCAL_MACRO(40)\n#    endif\n#    if BOOST_PP_LOCAL_C(41)\n        BOOST_PP_LOCAL_MACRO(41)\n#    endif\n#    if BOOST_PP_LOCAL_C(42)\n        BOOST_PP_LOCAL_MACRO(42)\n#    endif\n#    if BOOST_PP_LOCAL_C(43)\n        BOOST_PP_LOCAL_MACRO(43)\n#    endif\n#    if BOOST_PP_LOCAL_C(44)\n        BOOST_PP_LOCAL_MACRO(44)\n#    endif\n#    if BOOST_PP_LOCAL_C(45)\n        BOOST_PP_LOCAL_MACRO(45)\n#    endif\n#    if BOOST_PP_LOCAL_C(46)\n        BOOST_PP_LOCAL_MACRO(46)\n#    endif\n#    if BOOST_PP_LOCAL_C(47)\n        BOOST_PP_LOCAL_MACRO(47)\n#    endif\n#    if BOOST_PP_LOCAL_C(48)\n        BOOST_PP_LOCAL_MACRO(48)\n#    endif\n#    if BOOST_PP_LOCAL_C(49)\n        BOOST_PP_LOCAL_MACRO(49)\n#    endif\n#    if BOOST_PP_LOCAL_C(50)\n        BOOST_PP_LOCAL_MACRO(50)\n#    endif\n#    if BOOST_PP_LOCAL_C(51)\n        BOOST_PP_LOCAL_MACRO(51)\n#    endif\n#    if BOOST_PP_LOCAL_C(52)\n        BOOST_PP_LOCAL_MACRO(52)\n#    endif\n#    if BOOST_PP_LOCAL_C(53)\n        BOOST_PP_LOCAL_MACRO(53)\n#    endif\n#    if BOOST_PP_LOCAL_C(54)\n        BOOST_PP_LOCAL_MACRO(54)\n#    endif\n#    if BOOST_PP_LOCAL_C(55)\n        BOOST_PP_LOCAL_MACRO(55)\n#    endif\n#    if BOOST_PP_LOCAL_C(56)\n        BOOST_PP_LOCAL_MACRO(56)\n#    endif\n#    if BOOST_PP_LOCAL_C(57)\n        BOOST_PP_LOCAL_MACRO(57)\n#    endif\n#    if BOOST_PP_LOCAL_C(58)\n        BOOST_PP_LOCAL_MACRO(58)\n#    endif\n#    if BOOST_PP_LOCAL_C(59)\n        BOOST_PP_LOCAL_MACRO(59)\n#    endif\n#    if BOOST_PP_LOCAL_C(60)\n        BOOST_PP_LOCAL_MACRO(60)\n#    endif\n#    if BOOST_PP_LOCAL_C(61)\n        BOOST_PP_LOCAL_MACRO(61)\n#    endif\n#    if BOOST_PP_LOCAL_C(62)\n        BOOST_PP_LOCAL_MACRO(62)\n#    endif\n#    if BOOST_PP_LOCAL_C(63)\n        BOOST_PP_LOCAL_MACRO(63)\n#    endif\n#    if BOOST_PP_LOCAL_C(64)\n        BOOST_PP_LOCAL_MACRO(64)\n#    endif\n#    if BOOST_PP_LOCAL_C(65)\n        BOOST_PP_LOCAL_MACRO(65)\n#    endif\n#    if BOOST_PP_LOCAL_C(66)\n        BOOST_PP_LOCAL_MACRO(66)\n#    endif\n#    if BOOST_PP_LOCAL_C(67)\n        BOOST_PP_LOCAL_MACRO(67)\n#    endif\n#    if BOOST_PP_LOCAL_C(68)\n        BOOST_PP_LOCAL_MACRO(68)\n#    endif\n#    if BOOST_PP_LOCAL_C(69)\n        BOOST_PP_LOCAL_MACRO(69)\n#    endif\n#    if BOOST_PP_LOCAL_C(70)\n        BOOST_PP_LOCAL_MACRO(70)\n#    endif\n#    if BOOST_PP_LOCAL_C(71)\n        BOOST_PP_LOCAL_MACRO(71)\n#    endif\n#    if BOOST_PP_LOCAL_C(72)\n        BOOST_PP_LOCAL_MACRO(72)\n#    endif\n#    if BOOST_PP_LOCAL_C(73)\n        BOOST_PP_LOCAL_MACRO(73)\n#    endif\n#    if BOOST_PP_LOCAL_C(74)\n        BOOST_PP_LOCAL_MACRO(74)\n#    endif\n#    if BOOST_PP_LOCAL_C(75)\n        BOOST_PP_LOCAL_MACRO(75)\n#    endif\n#    if BOOST_PP_LOCAL_C(76)\n        BOOST_PP_LOCAL_MACRO(76)\n#    endif\n#    if BOOST_PP_LOCAL_C(77)\n        BOOST_PP_LOCAL_MACRO(77)\n#    endif\n#    if BOOST_PP_LOCAL_C(78)\n        BOOST_PP_LOCAL_MACRO(78)\n#    endif\n#    if BOOST_PP_LOCAL_C(79)\n        BOOST_PP_LOCAL_MACRO(79)\n#    endif\n#    if BOOST_PP_LOCAL_C(80)\n        BOOST_PP_LOCAL_MACRO(80)\n#    endif\n#    if BOOST_PP_LOCAL_C(81)\n        BOOST_PP_LOCAL_MACRO(81)\n#    endif\n#    if BOOST_PP_LOCAL_C(82)\n        BOOST_PP_LOCAL_MACRO(82)\n#    endif\n#    if BOOST_PP_LOCAL_C(83)\n        BOOST_PP_LOCAL_MACRO(83)\n#    endif\n#    if BOOST_PP_LOCAL_C(84)\n        BOOST_PP_LOCAL_MACRO(84)\n#    endif\n#    if BOOST_PP_LOCAL_C(85)\n        BOOST_PP_LOCAL_MACRO(85)\n#    endif\n#    if BOOST_PP_LOCAL_C(86)\n        BOOST_PP_LOCAL_MACRO(86)\n#    endif\n#    if BOOST_PP_LOCAL_C(87)\n        BOOST_PP_LOCAL_MACRO(87)\n#    endif\n#    if BOOST_PP_LOCAL_C(88)\n        BOOST_PP_LOCAL_MACRO(88)\n#    endif\n#    if BOOST_PP_LOCAL_C(89)\n        BOOST_PP_LOCAL_MACRO(89)\n#    endif\n#    if BOOST_PP_LOCAL_C(90)\n        BOOST_PP_LOCAL_MACRO(90)\n#    endif\n#    if BOOST_PP_LOCAL_C(91)\n        BOOST_PP_LOCAL_MACRO(91)\n#    endif\n#    if BOOST_PP_LOCAL_C(92)\n        BOOST_PP_LOCAL_MACRO(92)\n#    endif\n#    if BOOST_PP_LOCAL_C(93)\n        BOOST_PP_LOCAL_MACRO(93)\n#    endif\n#    if BOOST_PP_LOCAL_C(94)\n        BOOST_PP_LOCAL_MACRO(94)\n#    endif\n#    if BOOST_PP_LOCAL_C(95)\n        BOOST_PP_LOCAL_MACRO(95)\n#    endif\n#    if BOOST_PP_LOCAL_C(96)\n        BOOST_PP_LOCAL_MACRO(96)\n#    endif\n#    if BOOST_PP_LOCAL_C(97)\n        BOOST_PP_LOCAL_MACRO(97)\n#    endif\n#    if BOOST_PP_LOCAL_C(98)\n        BOOST_PP_LOCAL_MACRO(98)\n#    endif\n#    if BOOST_PP_LOCAL_C(99)\n        BOOST_PP_LOCAL_MACRO(99)\n#    endif\n#    if BOOST_PP_LOCAL_C(100)\n        BOOST_PP_LOCAL_MACRO(100)\n#    endif\n#    if BOOST_PP_LOCAL_C(101)\n        BOOST_PP_LOCAL_MACRO(101)\n#    endif\n#    if BOOST_PP_LOCAL_C(102)\n        BOOST_PP_LOCAL_MACRO(102)\n#    endif\n#    if BOOST_PP_LOCAL_C(103)\n        BOOST_PP_LOCAL_MACRO(103)\n#    endif\n#    if BOOST_PP_LOCAL_C(104)\n        BOOST_PP_LOCAL_MACRO(104)\n#    endif\n#    if BOOST_PP_LOCAL_C(105)\n        BOOST_PP_LOCAL_MACRO(105)\n#    endif\n#    if BOOST_PP_LOCAL_C(106)\n        BOOST_PP_LOCAL_MACRO(106)\n#    endif\n#    if BOOST_PP_LOCAL_C(107)\n        BOOST_PP_LOCAL_MACRO(107)\n#    endif\n#    if BOOST_PP_LOCAL_C(108)\n        BOOST_PP_LOCAL_MACRO(108)\n#    endif\n#    if BOOST_PP_LOCAL_C(109)\n        BOOST_PP_LOCAL_MACRO(109)\n#    endif\n#    if BOOST_PP_LOCAL_C(110)\n        BOOST_PP_LOCAL_MACRO(110)\n#    endif\n#    if BOOST_PP_LOCAL_C(111)\n        BOOST_PP_LOCAL_MACRO(111)\n#    endif\n#    if BOOST_PP_LOCAL_C(112)\n        BOOST_PP_LOCAL_MACRO(112)\n#    endif\n#    if BOOST_PP_LOCAL_C(113)\n        BOOST_PP_LOCAL_MACRO(113)\n#    endif\n#    if BOOST_PP_LOCAL_C(114)\n        BOOST_PP_LOCAL_MACRO(114)\n#    endif\n#    if BOOST_PP_LOCAL_C(115)\n        BOOST_PP_LOCAL_MACRO(115)\n#    endif\n#    if BOOST_PP_LOCAL_C(116)\n        BOOST_PP_LOCAL_MACRO(116)\n#    endif\n#    if BOOST_PP_LOCAL_C(117)\n        BOOST_PP_LOCAL_MACRO(117)\n#    endif\n#    if BOOST_PP_LOCAL_C(118)\n        BOOST_PP_LOCAL_MACRO(118)\n#    endif\n#    if BOOST_PP_LOCAL_C(119)\n        BOOST_PP_LOCAL_MACRO(119)\n#    endif\n#    if BOOST_PP_LOCAL_C(120)\n        BOOST_PP_LOCAL_MACRO(120)\n#    endif\n#    if BOOST_PP_LOCAL_C(121)\n        BOOST_PP_LOCAL_MACRO(121)\n#    endif\n#    if BOOST_PP_LOCAL_C(122)\n        BOOST_PP_LOCAL_MACRO(122)\n#    endif\n#    if BOOST_PP_LOCAL_C(123)\n        BOOST_PP_LOCAL_MACRO(123)\n#    endif\n#    if BOOST_PP_LOCAL_C(124)\n        BOOST_PP_LOCAL_MACRO(124)\n#    endif\n#    if BOOST_PP_LOCAL_C(125)\n        BOOST_PP_LOCAL_MACRO(125)\n#    endif\n#    if BOOST_PP_LOCAL_C(126)\n        BOOST_PP_LOCAL_MACRO(126)\n#    endif\n#    if BOOST_PP_LOCAL_C(127)\n        BOOST_PP_LOCAL_MACRO(127)\n#    endif\n#    if BOOST_PP_LOCAL_C(128)\n        BOOST_PP_LOCAL_MACRO(128)\n#    endif\n#    if BOOST_PP_LOCAL_C(129)\n        BOOST_PP_LOCAL_MACRO(129)\n#    endif\n#    if BOOST_PP_LOCAL_C(130)\n        BOOST_PP_LOCAL_MACRO(130)\n#    endif\n#    if BOOST_PP_LOCAL_C(131)\n        BOOST_PP_LOCAL_MACRO(131)\n#    endif\n#    if BOOST_PP_LOCAL_C(132)\n        BOOST_PP_LOCAL_MACRO(132)\n#    endif\n#    if BOOST_PP_LOCAL_C(133)\n        BOOST_PP_LOCAL_MACRO(133)\n#    endif\n#    if BOOST_PP_LOCAL_C(134)\n        BOOST_PP_LOCAL_MACRO(134)\n#    endif\n#    if BOOST_PP_LOCAL_C(135)\n        BOOST_PP_LOCAL_MACRO(135)\n#    endif\n#    if BOOST_PP_LOCAL_C(136)\n        BOOST_PP_LOCAL_MACRO(136)\n#    endif\n#    if BOOST_PP_LOCAL_C(137)\n        BOOST_PP_LOCAL_MACRO(137)\n#    endif\n#    if BOOST_PP_LOCAL_C(138)\n        BOOST_PP_LOCAL_MACRO(138)\n#    endif\n#    if BOOST_PP_LOCAL_C(139)\n        BOOST_PP_LOCAL_MACRO(139)\n#    endif\n#    if BOOST_PP_LOCAL_C(140)\n        BOOST_PP_LOCAL_MACRO(140)\n#    endif\n#    if BOOST_PP_LOCAL_C(141)\n        BOOST_PP_LOCAL_MACRO(141)\n#    endif\n#    if BOOST_PP_LOCAL_C(142)\n        BOOST_PP_LOCAL_MACRO(142)\n#    endif\n#    if BOOST_PP_LOCAL_C(143)\n        BOOST_PP_LOCAL_MACRO(143)\n#    endif\n#    if BOOST_PP_LOCAL_C(144)\n        BOOST_PP_LOCAL_MACRO(144)\n#    endif\n#    if BOOST_PP_LOCAL_C(145)\n        BOOST_PP_LOCAL_MACRO(145)\n#    endif\n#    if BOOST_PP_LOCAL_C(146)\n        BOOST_PP_LOCAL_MACRO(146)\n#    endif\n#    if BOOST_PP_LOCAL_C(147)\n        BOOST_PP_LOCAL_MACRO(147)\n#    endif\n#    if BOOST_PP_LOCAL_C(148)\n        BOOST_PP_LOCAL_MACRO(148)\n#    endif\n#    if BOOST_PP_LOCAL_C(149)\n        BOOST_PP_LOCAL_MACRO(149)\n#    endif\n#    if BOOST_PP_LOCAL_C(150)\n        BOOST_PP_LOCAL_MACRO(150)\n#    endif\n#    if BOOST_PP_LOCAL_C(151)\n        BOOST_PP_LOCAL_MACRO(151)\n#    endif\n#    if BOOST_PP_LOCAL_C(152)\n        BOOST_PP_LOCAL_MACRO(152)\n#    endif\n#    if BOOST_PP_LOCAL_C(153)\n        BOOST_PP_LOCAL_MACRO(153)\n#    endif\n#    if BOOST_PP_LOCAL_C(154)\n        BOOST_PP_LOCAL_MACRO(154)\n#    endif\n#    if BOOST_PP_LOCAL_C(155)\n        BOOST_PP_LOCAL_MACRO(155)\n#    endif\n#    if BOOST_PP_LOCAL_C(156)\n        BOOST_PP_LOCAL_MACRO(156)\n#    endif\n#    if BOOST_PP_LOCAL_C(157)\n        BOOST_PP_LOCAL_MACRO(157)\n#    endif\n#    if BOOST_PP_LOCAL_C(158)\n        BOOST_PP_LOCAL_MACRO(158)\n#    endif\n#    if BOOST_PP_LOCAL_C(159)\n        BOOST_PP_LOCAL_MACRO(159)\n#    endif\n#    if BOOST_PP_LOCAL_C(160)\n        BOOST_PP_LOCAL_MACRO(160)\n#    endif\n#    if BOOST_PP_LOCAL_C(161)\n        BOOST_PP_LOCAL_MACRO(161)\n#    endif\n#    if BOOST_PP_LOCAL_C(162)\n        BOOST_PP_LOCAL_MACRO(162)\n#    endif\n#    if BOOST_PP_LOCAL_C(163)\n        BOOST_PP_LOCAL_MACRO(163)\n#    endif\n#    if BOOST_PP_LOCAL_C(164)\n        BOOST_PP_LOCAL_MACRO(164)\n#    endif\n#    if BOOST_PP_LOCAL_C(165)\n        BOOST_PP_LOCAL_MACRO(165)\n#    endif\n#    if BOOST_PP_LOCAL_C(166)\n        BOOST_PP_LOCAL_MACRO(166)\n#    endif\n#    if BOOST_PP_LOCAL_C(167)\n        BOOST_PP_LOCAL_MACRO(167)\n#    endif\n#    if BOOST_PP_LOCAL_C(168)\n        BOOST_PP_LOCAL_MACRO(168)\n#    endif\n#    if BOOST_PP_LOCAL_C(169)\n        BOOST_PP_LOCAL_MACRO(169)\n#    endif\n#    if BOOST_PP_LOCAL_C(170)\n        BOOST_PP_LOCAL_MACRO(170)\n#    endif\n#    if BOOST_PP_LOCAL_C(171)\n        BOOST_PP_LOCAL_MACRO(171)\n#    endif\n#    if BOOST_PP_LOCAL_C(172)\n        BOOST_PP_LOCAL_MACRO(172)\n#    endif\n#    if BOOST_PP_LOCAL_C(173)\n        BOOST_PP_LOCAL_MACRO(173)\n#    endif\n#    if BOOST_PP_LOCAL_C(174)\n        BOOST_PP_LOCAL_MACRO(174)\n#    endif\n#    if BOOST_PP_LOCAL_C(175)\n        BOOST_PP_LOCAL_MACRO(175)\n#    endif\n#    if BOOST_PP_LOCAL_C(176)\n        BOOST_PP_LOCAL_MACRO(176)\n#    endif\n#    if BOOST_PP_LOCAL_C(177)\n        BOOST_PP_LOCAL_MACRO(177)\n#    endif\n#    if BOOST_PP_LOCAL_C(178)\n        BOOST_PP_LOCAL_MACRO(178)\n#    endif\n#    if BOOST_PP_LOCAL_C(179)\n        BOOST_PP_LOCAL_MACRO(179)\n#    endif\n#    if BOOST_PP_LOCAL_C(180)\n        BOOST_PP_LOCAL_MACRO(180)\n#    endif\n#    if BOOST_PP_LOCAL_C(181)\n        BOOST_PP_LOCAL_MACRO(181)\n#    endif\n#    if BOOST_PP_LOCAL_C(182)\n        BOOST_PP_LOCAL_MACRO(182)\n#    endif\n#    if BOOST_PP_LOCAL_C(183)\n        BOOST_PP_LOCAL_MACRO(183)\n#    endif\n#    if BOOST_PP_LOCAL_C(184)\n        BOOST_PP_LOCAL_MACRO(184)\n#    endif\n#    if BOOST_PP_LOCAL_C(185)\n        BOOST_PP_LOCAL_MACRO(185)\n#    endif\n#    if BOOST_PP_LOCAL_C(186)\n        BOOST_PP_LOCAL_MACRO(186)\n#    endif\n#    if BOOST_PP_LOCAL_C(187)\n        BOOST_PP_LOCAL_MACRO(187)\n#    endif\n#    if BOOST_PP_LOCAL_C(188)\n        BOOST_PP_LOCAL_MACRO(188)\n#    endif\n#    if BOOST_PP_LOCAL_C(189)\n        BOOST_PP_LOCAL_MACRO(189)\n#    endif\n#    if BOOST_PP_LOCAL_C(190)\n        BOOST_PP_LOCAL_MACRO(190)\n#    endif\n#    if BOOST_PP_LOCAL_C(191)\n        BOOST_PP_LOCAL_MACRO(191)\n#    endif\n#    if BOOST_PP_LOCAL_C(192)\n        BOOST_PP_LOCAL_MACRO(192)\n#    endif\n#    if BOOST_PP_LOCAL_C(193)\n        BOOST_PP_LOCAL_MACRO(193)\n#    endif\n#    if BOOST_PP_LOCAL_C(194)\n        BOOST_PP_LOCAL_MACRO(194)\n#    endif\n#    if BOOST_PP_LOCAL_C(195)\n        BOOST_PP_LOCAL_MACRO(195)\n#    endif\n#    if BOOST_PP_LOCAL_C(196)\n        BOOST_PP_LOCAL_MACRO(196)\n#    endif\n#    if BOOST_PP_LOCAL_C(197)\n        BOOST_PP_LOCAL_MACRO(197)\n#    endif\n#    if BOOST_PP_LOCAL_C(198)\n        BOOST_PP_LOCAL_MACRO(198)\n#    endif\n#    if BOOST_PP_LOCAL_C(199)\n        BOOST_PP_LOCAL_MACRO(199)\n#    endif\n#    if BOOST_PP_LOCAL_C(200)\n        BOOST_PP_LOCAL_MACRO(200)\n#    endif\n#    if BOOST_PP_LOCAL_C(201)\n        BOOST_PP_LOCAL_MACRO(201)\n#    endif\n#    if BOOST_PP_LOCAL_C(202)\n        BOOST_PP_LOCAL_MACRO(202)\n#    endif\n#    if BOOST_PP_LOCAL_C(203)\n        BOOST_PP_LOCAL_MACRO(203)\n#    endif\n#    if BOOST_PP_LOCAL_C(204)\n        BOOST_PP_LOCAL_MACRO(204)\n#    endif\n#    if BOOST_PP_LOCAL_C(205)\n        BOOST_PP_LOCAL_MACRO(205)\n#    endif\n#    if BOOST_PP_LOCAL_C(206)\n        BOOST_PP_LOCAL_MACRO(206)\n#    endif\n#    if BOOST_PP_LOCAL_C(207)\n        BOOST_PP_LOCAL_MACRO(207)\n#    endif\n#    if BOOST_PP_LOCAL_C(208)\n        BOOST_PP_LOCAL_MACRO(208)\n#    endif\n#    if BOOST_PP_LOCAL_C(209)\n        BOOST_PP_LOCAL_MACRO(209)\n#    endif\n#    if BOOST_PP_LOCAL_C(210)\n        BOOST_PP_LOCAL_MACRO(210)\n#    endif\n#    if BOOST_PP_LOCAL_C(211)\n        BOOST_PP_LOCAL_MACRO(211)\n#    endif\n#    if BOOST_PP_LOCAL_C(212)\n        BOOST_PP_LOCAL_MACRO(212)\n#    endif\n#    if BOOST_PP_LOCAL_C(213)\n        BOOST_PP_LOCAL_MACRO(213)\n#    endif\n#    if BOOST_PP_LOCAL_C(214)\n        BOOST_PP_LOCAL_MACRO(214)\n#    endif\n#    if BOOST_PP_LOCAL_C(215)\n        BOOST_PP_LOCAL_MACRO(215)\n#    endif\n#    if BOOST_PP_LOCAL_C(216)\n        BOOST_PP_LOCAL_MACRO(216)\n#    endif\n#    if BOOST_PP_LOCAL_C(217)\n        BOOST_PP_LOCAL_MACRO(217)\n#    endif\n#    if BOOST_PP_LOCAL_C(218)\n        BOOST_PP_LOCAL_MACRO(218)\n#    endif\n#    if BOOST_PP_LOCAL_C(219)\n        BOOST_PP_LOCAL_MACRO(219)\n#    endif\n#    if BOOST_PP_LOCAL_C(220)\n        BOOST_PP_LOCAL_MACRO(220)\n#    endif\n#    if BOOST_PP_LOCAL_C(221)\n        BOOST_PP_LOCAL_MACRO(221)\n#    endif\n#    if BOOST_PP_LOCAL_C(222)\n        BOOST_PP_LOCAL_MACRO(222)\n#    endif\n#    if BOOST_PP_LOCAL_C(223)\n        BOOST_PP_LOCAL_MACRO(223)\n#    endif\n#    if BOOST_PP_LOCAL_C(224)\n        BOOST_PP_LOCAL_MACRO(224)\n#    endif\n#    if BOOST_PP_LOCAL_C(225)\n        BOOST_PP_LOCAL_MACRO(225)\n#    endif\n#    if BOOST_PP_LOCAL_C(226)\n        BOOST_PP_LOCAL_MACRO(226)\n#    endif\n#    if BOOST_PP_LOCAL_C(227)\n        BOOST_PP_LOCAL_MACRO(227)\n#    endif\n#    if BOOST_PP_LOCAL_C(228)\n        BOOST_PP_LOCAL_MACRO(228)\n#    endif\n#    if BOOST_PP_LOCAL_C(229)\n        BOOST_PP_LOCAL_MACRO(229)\n#    endif\n#    if BOOST_PP_LOCAL_C(230)\n        BOOST_PP_LOCAL_MACRO(230)\n#    endif\n#    if BOOST_PP_LOCAL_C(231)\n        BOOST_PP_LOCAL_MACRO(231)\n#    endif\n#    if BOOST_PP_LOCAL_C(232)\n        BOOST_PP_LOCAL_MACRO(232)\n#    endif\n#    if BOOST_PP_LOCAL_C(233)\n        BOOST_PP_LOCAL_MACRO(233)\n#    endif\n#    if BOOST_PP_LOCAL_C(234)\n        BOOST_PP_LOCAL_MACRO(234)\n#    endif\n#    if BOOST_PP_LOCAL_C(235)\n        BOOST_PP_LOCAL_MACRO(235)\n#    endif\n#    if BOOST_PP_LOCAL_C(236)\n        BOOST_PP_LOCAL_MACRO(236)\n#    endif\n\n#    if BOOST_PP_LOCAL_C(237)\n        BOOST_PP_LOCAL_MACRO(237)\n#    endif\n#    if BOOST_PP_LOCAL_C(238)\n        BOOST_PP_LOCAL_MACRO(238)\n#    endif\n#    if BOOST_PP_LOCAL_C(239)\n        BOOST_PP_LOCAL_MACRO(239)\n#    endif\n#    if BOOST_PP_LOCAL_C(240)\n        BOOST_PP_LOCAL_MACRO(240)\n#    endif\n#    if BOOST_PP_LOCAL_C(241)\n        BOOST_PP_LOCAL_MACRO(241)\n#    endif\n#    if BOOST_PP_LOCAL_C(242)\n        BOOST_PP_LOCAL_MACRO(242)\n#    endif\n#    if BOOST_PP_LOCAL_C(243)\n        BOOST_PP_LOCAL_MACRO(243)\n#    endif\n#    if BOOST_PP_LOCAL_C(244)\n        BOOST_PP_LOCAL_MACRO(244)\n#    endif\n#    if BOOST_PP_LOCAL_C(245)\n        BOOST_PP_LOCAL_MACRO(245)\n#    endif\n#    if BOOST_PP_LOCAL_C(246)\n        BOOST_PP_LOCAL_MACRO(246)\n#    endif\n#    if BOOST_PP_LOCAL_C(247)\n        BOOST_PP_LOCAL_MACRO(247)\n#    endif\n#    if BOOST_PP_LOCAL_C(248)\n        BOOST_PP_LOCAL_MACRO(248)\n#    endif\n#    if BOOST_PP_LOCAL_C(249)\n        BOOST_PP_LOCAL_MACRO(249)\n#    endif\n#    if BOOST_PP_LOCAL_C(250)\n        BOOST_PP_LOCAL_MACRO(250)\n#    endif\n#    if BOOST_PP_LOCAL_C(251)\n        BOOST_PP_LOCAL_MACRO(251)\n#    endif\n#    if BOOST_PP_LOCAL_C(252)\n        BOOST_PP_LOCAL_MACRO(252)\n#    endif\n#    if BOOST_PP_LOCAL_C(253)\n        BOOST_PP_LOCAL_MACRO(253)\n#    endif\n#    if BOOST_PP_LOCAL_C(254)\n        BOOST_PP_LOCAL_MACRO(254)\n#    endif\n#    if BOOST_PP_LOCAL_C(255)\n        BOOST_PP_LOCAL_MACRO(255)\n#    endif\n#    if BOOST_PP_LOCAL_C(256)\n        BOOST_PP_LOCAL_MACRO(256)\n#    endif\n# endif\n#\n# undef BOOST_PP_LOCAL_LIMITS\n#\n# undef BOOST_PP_LOCAL_S\n# undef BOOST_PP_LOCAL_F\n#\n# undef BOOST_PP_LOCAL_MACRO\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/rlocal.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if BOOST_PP_LOCAL_R(256)\n    BOOST_PP_LOCAL_MACRO(256)\n# endif\n# if BOOST_PP_LOCAL_R(255)\n    BOOST_PP_LOCAL_MACRO(255)\n# endif\n# if BOOST_PP_LOCAL_R(254)\n    BOOST_PP_LOCAL_MACRO(254)\n# endif\n# if BOOST_PP_LOCAL_R(253)\n    BOOST_PP_LOCAL_MACRO(253)\n# endif\n# if BOOST_PP_LOCAL_R(252)\n    BOOST_PP_LOCAL_MACRO(252)\n# endif\n# if BOOST_PP_LOCAL_R(251)\n    BOOST_PP_LOCAL_MACRO(251)\n# endif\n# if BOOST_PP_LOCAL_R(250)\n    BOOST_PP_LOCAL_MACRO(250)\n# endif\n# if BOOST_PP_LOCAL_R(249)\n    BOOST_PP_LOCAL_MACRO(249)\n# endif\n# if BOOST_PP_LOCAL_R(248)\n    BOOST_PP_LOCAL_MACRO(248)\n# endif\n# if BOOST_PP_LOCAL_R(247)\n    BOOST_PP_LOCAL_MACRO(247)\n# endif\n# if BOOST_PP_LOCAL_R(246)\n    BOOST_PP_LOCAL_MACRO(246)\n# endif\n# if BOOST_PP_LOCAL_R(245)\n    BOOST_PP_LOCAL_MACRO(245)\n# endif\n# if BOOST_PP_LOCAL_R(244)\n    BOOST_PP_LOCAL_MACRO(244)\n# endif\n# if BOOST_PP_LOCAL_R(243)\n    BOOST_PP_LOCAL_MACRO(243)\n# endif\n# if BOOST_PP_LOCAL_R(242)\n    BOOST_PP_LOCAL_MACRO(242)\n# endif\n# if BOOST_PP_LOCAL_R(241)\n    BOOST_PP_LOCAL_MACRO(241)\n# endif\n# if BOOST_PP_LOCAL_R(240)\n    BOOST_PP_LOCAL_MACRO(240)\n# endif\n# if BOOST_PP_LOCAL_R(239)\n    BOOST_PP_LOCAL_MACRO(239)\n# endif\n# if BOOST_PP_LOCAL_R(238)\n    BOOST_PP_LOCAL_MACRO(238)\n# endif\n# if BOOST_PP_LOCAL_R(237)\n    BOOST_PP_LOCAL_MACRO(237)\n# endif\n# if BOOST_PP_LOCAL_R(236)\n    BOOST_PP_LOCAL_MACRO(236)\n# endif\n# if BOOST_PP_LOCAL_R(235)\n    BOOST_PP_LOCAL_MACRO(235)\n# endif\n# if BOOST_PP_LOCAL_R(234)\n    BOOST_PP_LOCAL_MACRO(234)\n# endif\n# if BOOST_PP_LOCAL_R(233)\n    BOOST_PP_LOCAL_MACRO(233)\n# endif\n# if BOOST_PP_LOCAL_R(232)\n    BOOST_PP_LOCAL_MACRO(232)\n# endif\n# if BOOST_PP_LOCAL_R(231)\n    BOOST_PP_LOCAL_MACRO(231)\n# endif\n# if BOOST_PP_LOCAL_R(230)\n    BOOST_PP_LOCAL_MACRO(230)\n# endif\n# if BOOST_PP_LOCAL_R(229)\n    BOOST_PP_LOCAL_MACRO(229)\n# endif\n# if BOOST_PP_LOCAL_R(228)\n    BOOST_PP_LOCAL_MACRO(228)\n# endif\n# if BOOST_PP_LOCAL_R(227)\n    BOOST_PP_LOCAL_MACRO(227)\n# endif\n# if BOOST_PP_LOCAL_R(226)\n    BOOST_PP_LOCAL_MACRO(226)\n# endif\n# if BOOST_PP_LOCAL_R(225)\n    BOOST_PP_LOCAL_MACRO(225)\n# endif\n# if BOOST_PP_LOCAL_R(224)\n    BOOST_PP_LOCAL_MACRO(224)\n# endif\n# if BOOST_PP_LOCAL_R(223)\n    BOOST_PP_LOCAL_MACRO(223)\n# endif\n# if BOOST_PP_LOCAL_R(222)\n    BOOST_PP_LOCAL_MACRO(222)\n# endif\n# if BOOST_PP_LOCAL_R(221)\n    BOOST_PP_LOCAL_MACRO(221)\n# endif\n# if BOOST_PP_LOCAL_R(220)\n    BOOST_PP_LOCAL_MACRO(220)\n# endif\n# if BOOST_PP_LOCAL_R(219)\n    BOOST_PP_LOCAL_MACRO(219)\n# endif\n# if BOOST_PP_LOCAL_R(218)\n    BOOST_PP_LOCAL_MACRO(218)\n# endif\n# if BOOST_PP_LOCAL_R(217)\n    BOOST_PP_LOCAL_MACRO(217)\n# endif\n# if BOOST_PP_LOCAL_R(216)\n    BOOST_PP_LOCAL_MACRO(216)\n# endif\n# if BOOST_PP_LOCAL_R(215)\n    BOOST_PP_LOCAL_MACRO(215)\n# endif\n# if BOOST_PP_LOCAL_R(214)\n    BOOST_PP_LOCAL_MACRO(214)\n# endif\n# if BOOST_PP_LOCAL_R(213)\n    BOOST_PP_LOCAL_MACRO(213)\n# endif\n# if BOOST_PP_LOCAL_R(212)\n    BOOST_PP_LOCAL_MACRO(212)\n# endif\n# if BOOST_PP_LOCAL_R(211)\n    BOOST_PP_LOCAL_MACRO(211)\n# endif\n# if BOOST_PP_LOCAL_R(210)\n    BOOST_PP_LOCAL_MACRO(210)\n# endif\n# if BOOST_PP_LOCAL_R(209)\n    BOOST_PP_LOCAL_MACRO(209)\n# endif\n# if BOOST_PP_LOCAL_R(208)\n    BOOST_PP_LOCAL_MACRO(208)\n# endif\n# if BOOST_PP_LOCAL_R(207)\n    BOOST_PP_LOCAL_MACRO(207)\n# endif\n# if BOOST_PP_LOCAL_R(206)\n    BOOST_PP_LOCAL_MACRO(206)\n# endif\n# if BOOST_PP_LOCAL_R(205)\n    BOOST_PP_LOCAL_MACRO(205)\n# endif\n# if BOOST_PP_LOCAL_R(204)\n    BOOST_PP_LOCAL_MACRO(204)\n# endif\n# if BOOST_PP_LOCAL_R(203)\n    BOOST_PP_LOCAL_MACRO(203)\n# endif\n# if BOOST_PP_LOCAL_R(202)\n    BOOST_PP_LOCAL_MACRO(202)\n# endif\n# if BOOST_PP_LOCAL_R(201)\n    BOOST_PP_LOCAL_MACRO(201)\n# endif\n# if BOOST_PP_LOCAL_R(200)\n    BOOST_PP_LOCAL_MACRO(200)\n# endif\n# if BOOST_PP_LOCAL_R(199)\n    BOOST_PP_LOCAL_MACRO(199)\n# endif\n# if BOOST_PP_LOCAL_R(198)\n    BOOST_PP_LOCAL_MACRO(198)\n# endif\n# if BOOST_PP_LOCAL_R(197)\n    BOOST_PP_LOCAL_MACRO(197)\n# endif\n# if BOOST_PP_LOCAL_R(196)\n    BOOST_PP_LOCAL_MACRO(196)\n# endif\n# if BOOST_PP_LOCAL_R(195)\n    BOOST_PP_LOCAL_MACRO(195)\n# endif\n# if BOOST_PP_LOCAL_R(194)\n    BOOST_PP_LOCAL_MACRO(194)\n# endif\n# if BOOST_PP_LOCAL_R(193)\n    BOOST_PP_LOCAL_MACRO(193)\n# endif\n# if BOOST_PP_LOCAL_R(192)\n    BOOST_PP_LOCAL_MACRO(192)\n# endif\n# if BOOST_PP_LOCAL_R(191)\n    BOOST_PP_LOCAL_MACRO(191)\n# endif\n# if BOOST_PP_LOCAL_R(190)\n    BOOST_PP_LOCAL_MACRO(190)\n# endif\n# if BOOST_PP_LOCAL_R(189)\n    BOOST_PP_LOCAL_MACRO(189)\n# endif\n# if BOOST_PP_LOCAL_R(188)\n    BOOST_PP_LOCAL_MACRO(188)\n# endif\n# if BOOST_PP_LOCAL_R(187)\n    BOOST_PP_LOCAL_MACRO(187)\n# endif\n# if BOOST_PP_LOCAL_R(186)\n    BOOST_PP_LOCAL_MACRO(186)\n# endif\n# if BOOST_PP_LOCAL_R(185)\n    BOOST_PP_LOCAL_MACRO(185)\n# endif\n# if BOOST_PP_LOCAL_R(184)\n    BOOST_PP_LOCAL_MACRO(184)\n# endif\n# if BOOST_PP_LOCAL_R(183)\n    BOOST_PP_LOCAL_MACRO(183)\n# endif\n# if BOOST_PP_LOCAL_R(182)\n    BOOST_PP_LOCAL_MACRO(182)\n# endif\n# if BOOST_PP_LOCAL_R(181)\n    BOOST_PP_LOCAL_MACRO(181)\n# endif\n# if BOOST_PP_LOCAL_R(180)\n    BOOST_PP_LOCAL_MACRO(180)\n# endif\n# if BOOST_PP_LOCAL_R(179)\n    BOOST_PP_LOCAL_MACRO(179)\n# endif\n# if BOOST_PP_LOCAL_R(178)\n    BOOST_PP_LOCAL_MACRO(178)\n# endif\n# if BOOST_PP_LOCAL_R(177)\n    BOOST_PP_LOCAL_MACRO(177)\n# endif\n# if BOOST_PP_LOCAL_R(176)\n    BOOST_PP_LOCAL_MACRO(176)\n# endif\n# if BOOST_PP_LOCAL_R(175)\n    BOOST_PP_LOCAL_MACRO(175)\n# endif\n# if BOOST_PP_LOCAL_R(174)\n    BOOST_PP_LOCAL_MACRO(174)\n# endif\n# if BOOST_PP_LOCAL_R(173)\n    BOOST_PP_LOCAL_MACRO(173)\n# endif\n# if BOOST_PP_LOCAL_R(172)\n    BOOST_PP_LOCAL_MACRO(172)\n# endif\n# if BOOST_PP_LOCAL_R(171)\n    BOOST_PP_LOCAL_MACRO(171)\n# endif\n# if BOOST_PP_LOCAL_R(170)\n    BOOST_PP_LOCAL_MACRO(170)\n# endif\n# if BOOST_PP_LOCAL_R(169)\n    BOOST_PP_LOCAL_MACRO(169)\n# endif\n# if BOOST_PP_LOCAL_R(168)\n    BOOST_PP_LOCAL_MACRO(168)\n# endif\n# if BOOST_PP_LOCAL_R(167)\n    BOOST_PP_LOCAL_MACRO(167)\n# endif\n# if BOOST_PP_LOCAL_R(166)\n    BOOST_PP_LOCAL_MACRO(166)\n# endif\n# if BOOST_PP_LOCAL_R(165)\n    BOOST_PP_LOCAL_MACRO(165)\n# endif\n# if BOOST_PP_LOCAL_R(164)\n    BOOST_PP_LOCAL_MACRO(164)\n# endif\n# if BOOST_PP_LOCAL_R(163)\n    BOOST_PP_LOCAL_MACRO(163)\n# endif\n# if BOOST_PP_LOCAL_R(162)\n    BOOST_PP_LOCAL_MACRO(162)\n# endif\n# if BOOST_PP_LOCAL_R(161)\n    BOOST_PP_LOCAL_MACRO(161)\n# endif\n# if BOOST_PP_LOCAL_R(160)\n    BOOST_PP_LOCAL_MACRO(160)\n# endif\n# if BOOST_PP_LOCAL_R(159)\n    BOOST_PP_LOCAL_MACRO(159)\n# endif\n# if BOOST_PP_LOCAL_R(158)\n    BOOST_PP_LOCAL_MACRO(158)\n# endif\n# if BOOST_PP_LOCAL_R(157)\n    BOOST_PP_LOCAL_MACRO(157)\n# endif\n# if BOOST_PP_LOCAL_R(156)\n    BOOST_PP_LOCAL_MACRO(156)\n# endif\n# if BOOST_PP_LOCAL_R(155)\n    BOOST_PP_LOCAL_MACRO(155)\n# endif\n# if BOOST_PP_LOCAL_R(154)\n    BOOST_PP_LOCAL_MACRO(154)\n# endif\n# if BOOST_PP_LOCAL_R(153)\n    BOOST_PP_LOCAL_MACRO(153)\n# endif\n# if BOOST_PP_LOCAL_R(152)\n    BOOST_PP_LOCAL_MACRO(152)\n# endif\n# if BOOST_PP_LOCAL_R(151)\n    BOOST_PP_LOCAL_MACRO(151)\n# endif\n# if BOOST_PP_LOCAL_R(150)\n    BOOST_PP_LOCAL_MACRO(150)\n# endif\n# if BOOST_PP_LOCAL_R(149)\n    BOOST_PP_LOCAL_MACRO(149)\n# endif\n# if BOOST_PP_LOCAL_R(148)\n    BOOST_PP_LOCAL_MACRO(148)\n# endif\n# if BOOST_PP_LOCAL_R(147)\n    BOOST_PP_LOCAL_MACRO(147)\n# endif\n# if BOOST_PP_LOCAL_R(146)\n    BOOST_PP_LOCAL_MACRO(146)\n# endif\n# if BOOST_PP_LOCAL_R(145)\n    BOOST_PP_LOCAL_MACRO(145)\n# endif\n# if BOOST_PP_LOCAL_R(144)\n    BOOST_PP_LOCAL_MACRO(144)\n# endif\n# if BOOST_PP_LOCAL_R(143)\n    BOOST_PP_LOCAL_MACRO(143)\n# endif\n# if BOOST_PP_LOCAL_R(142)\n    BOOST_PP_LOCAL_MACRO(142)\n# endif\n# if BOOST_PP_LOCAL_R(141)\n    BOOST_PP_LOCAL_MACRO(141)\n# endif\n# if BOOST_PP_LOCAL_R(140)\n    BOOST_PP_LOCAL_MACRO(140)\n# endif\n# if BOOST_PP_LOCAL_R(139)\n    BOOST_PP_LOCAL_MACRO(139)\n# endif\n# if BOOST_PP_LOCAL_R(138)\n    BOOST_PP_LOCAL_MACRO(138)\n# endif\n# if BOOST_PP_LOCAL_R(137)\n    BOOST_PP_LOCAL_MACRO(137)\n# endif\n# if BOOST_PP_LOCAL_R(136)\n    BOOST_PP_LOCAL_MACRO(136)\n# endif\n# if BOOST_PP_LOCAL_R(135)\n    BOOST_PP_LOCAL_MACRO(135)\n# endif\n# if BOOST_PP_LOCAL_R(134)\n    BOOST_PP_LOCAL_MACRO(134)\n# endif\n# if BOOST_PP_LOCAL_R(133)\n    BOOST_PP_LOCAL_MACRO(133)\n# endif\n# if BOOST_PP_LOCAL_R(132)\n    BOOST_PP_LOCAL_MACRO(132)\n# endif\n# if BOOST_PP_LOCAL_R(131)\n    BOOST_PP_LOCAL_MACRO(131)\n# endif\n# if BOOST_PP_LOCAL_R(130)\n    BOOST_PP_LOCAL_MACRO(130)\n# endif\n# if BOOST_PP_LOCAL_R(129)\n    BOOST_PP_LOCAL_MACRO(129)\n# endif\n# if BOOST_PP_LOCAL_R(128)\n    BOOST_PP_LOCAL_MACRO(128)\n# endif\n# if BOOST_PP_LOCAL_R(127)\n    BOOST_PP_LOCAL_MACRO(127)\n# endif\n# if BOOST_PP_LOCAL_R(126)\n    BOOST_PP_LOCAL_MACRO(126)\n# endif\n# if BOOST_PP_LOCAL_R(125)\n    BOOST_PP_LOCAL_MACRO(125)\n# endif\n# if BOOST_PP_LOCAL_R(124)\n    BOOST_PP_LOCAL_MACRO(124)\n# endif\n# if BOOST_PP_LOCAL_R(123)\n    BOOST_PP_LOCAL_MACRO(123)\n# endif\n# if BOOST_PP_LOCAL_R(122)\n    BOOST_PP_LOCAL_MACRO(122)\n# endif\n# if BOOST_PP_LOCAL_R(121)\n    BOOST_PP_LOCAL_MACRO(121)\n# endif\n# if BOOST_PP_LOCAL_R(120)\n    BOOST_PP_LOCAL_MACRO(120)\n# endif\n# if BOOST_PP_LOCAL_R(119)\n    BOOST_PP_LOCAL_MACRO(119)\n# endif\n# if BOOST_PP_LOCAL_R(118)\n    BOOST_PP_LOCAL_MACRO(118)\n# endif\n# if BOOST_PP_LOCAL_R(117)\n    BOOST_PP_LOCAL_MACRO(117)\n# endif\n# if BOOST_PP_LOCAL_R(116)\n    BOOST_PP_LOCAL_MACRO(116)\n# endif\n# if BOOST_PP_LOCAL_R(115)\n    BOOST_PP_LOCAL_MACRO(115)\n# endif\n# if BOOST_PP_LOCAL_R(114)\n    BOOST_PP_LOCAL_MACRO(114)\n# endif\n# if BOOST_PP_LOCAL_R(113)\n    BOOST_PP_LOCAL_MACRO(113)\n# endif\n# if BOOST_PP_LOCAL_R(112)\n    BOOST_PP_LOCAL_MACRO(112)\n# endif\n# if BOOST_PP_LOCAL_R(111)\n    BOOST_PP_LOCAL_MACRO(111)\n# endif\n# if BOOST_PP_LOCAL_R(110)\n    BOOST_PP_LOCAL_MACRO(110)\n# endif\n# if BOOST_PP_LOCAL_R(109)\n    BOOST_PP_LOCAL_MACRO(109)\n# endif\n# if BOOST_PP_LOCAL_R(108)\n    BOOST_PP_LOCAL_MACRO(108)\n# endif\n# if BOOST_PP_LOCAL_R(107)\n    BOOST_PP_LOCAL_MACRO(107)\n# endif\n# if BOOST_PP_LOCAL_R(106)\n    BOOST_PP_LOCAL_MACRO(106)\n# endif\n# if BOOST_PP_LOCAL_R(105)\n    BOOST_PP_LOCAL_MACRO(105)\n# endif\n# if BOOST_PP_LOCAL_R(104)\n    BOOST_PP_LOCAL_MACRO(104)\n# endif\n# if BOOST_PP_LOCAL_R(103)\n    BOOST_PP_LOCAL_MACRO(103)\n# endif\n# if BOOST_PP_LOCAL_R(102)\n    BOOST_PP_LOCAL_MACRO(102)\n# endif\n# if BOOST_PP_LOCAL_R(101)\n    BOOST_PP_LOCAL_MACRO(101)\n# endif\n# if BOOST_PP_LOCAL_R(100)\n    BOOST_PP_LOCAL_MACRO(100)\n# endif\n# if BOOST_PP_LOCAL_R(99)\n    BOOST_PP_LOCAL_MACRO(99)\n# endif\n# if BOOST_PP_LOCAL_R(98)\n    BOOST_PP_LOCAL_MACRO(98)\n# endif\n# if BOOST_PP_LOCAL_R(97)\n    BOOST_PP_LOCAL_MACRO(97)\n# endif\n# if BOOST_PP_LOCAL_R(96)\n    BOOST_PP_LOCAL_MACRO(96)\n# endif\n# if BOOST_PP_LOCAL_R(95)\n    BOOST_PP_LOCAL_MACRO(95)\n# endif\n# if BOOST_PP_LOCAL_R(94)\n    BOOST_PP_LOCAL_MACRO(94)\n# endif\n# if BOOST_PP_LOCAL_R(93)\n    BOOST_PP_LOCAL_MACRO(93)\n# endif\n# if BOOST_PP_LOCAL_R(92)\n    BOOST_PP_LOCAL_MACRO(92)\n# endif\n# if BOOST_PP_LOCAL_R(91)\n    BOOST_PP_LOCAL_MACRO(91)\n# endif\n# if BOOST_PP_LOCAL_R(90)\n    BOOST_PP_LOCAL_MACRO(90)\n# endif\n# if BOOST_PP_LOCAL_R(89)\n    BOOST_PP_LOCAL_MACRO(89)\n# endif\n# if BOOST_PP_LOCAL_R(88)\n    BOOST_PP_LOCAL_MACRO(88)\n# endif\n# if BOOST_PP_LOCAL_R(87)\n    BOOST_PP_LOCAL_MACRO(87)\n# endif\n# if BOOST_PP_LOCAL_R(86)\n    BOOST_PP_LOCAL_MACRO(86)\n# endif\n# if BOOST_PP_LOCAL_R(85)\n    BOOST_PP_LOCAL_MACRO(85)\n# endif\n# if BOOST_PP_LOCAL_R(84)\n    BOOST_PP_LOCAL_MACRO(84)\n# endif\n# if BOOST_PP_LOCAL_R(83)\n    BOOST_PP_LOCAL_MACRO(83)\n# endif\n# if BOOST_PP_LOCAL_R(82)\n    BOOST_PP_LOCAL_MACRO(82)\n# endif\n# if BOOST_PP_LOCAL_R(81)\n    BOOST_PP_LOCAL_MACRO(81)\n# endif\n# if BOOST_PP_LOCAL_R(80)\n    BOOST_PP_LOCAL_MACRO(80)\n# endif\n# if BOOST_PP_LOCAL_R(79)\n    BOOST_PP_LOCAL_MACRO(79)\n# endif\n# if BOOST_PP_LOCAL_R(78)\n    BOOST_PP_LOCAL_MACRO(78)\n# endif\n# if BOOST_PP_LOCAL_R(77)\n    BOOST_PP_LOCAL_MACRO(77)\n# endif\n# if BOOST_PP_LOCAL_R(76)\n    BOOST_PP_LOCAL_MACRO(76)\n# endif\n# if BOOST_PP_LOCAL_R(75)\n    BOOST_PP_LOCAL_MACRO(75)\n# endif\n# if BOOST_PP_LOCAL_R(74)\n    BOOST_PP_LOCAL_MACRO(74)\n# endif\n# if BOOST_PP_LOCAL_R(73)\n    BOOST_PP_LOCAL_MACRO(73)\n# endif\n# if BOOST_PP_LOCAL_R(72)\n    BOOST_PP_LOCAL_MACRO(72)\n# endif\n# if BOOST_PP_LOCAL_R(71)\n    BOOST_PP_LOCAL_MACRO(71)\n# endif\n# if BOOST_PP_LOCAL_R(70)\n    BOOST_PP_LOCAL_MACRO(70)\n# endif\n# if BOOST_PP_LOCAL_R(69)\n    BOOST_PP_LOCAL_MACRO(69)\n# endif\n# if BOOST_PP_LOCAL_R(68)\n    BOOST_PP_LOCAL_MACRO(68)\n# endif\n# if BOOST_PP_LOCAL_R(67)\n    BOOST_PP_LOCAL_MACRO(67)\n# endif\n# if BOOST_PP_LOCAL_R(66)\n    BOOST_PP_LOCAL_MACRO(66)\n# endif\n# if BOOST_PP_LOCAL_R(65)\n    BOOST_PP_LOCAL_MACRO(65)\n# endif\n# if BOOST_PP_LOCAL_R(64)\n    BOOST_PP_LOCAL_MACRO(64)\n# endif\n# if BOOST_PP_LOCAL_R(63)\n    BOOST_PP_LOCAL_MACRO(63)\n# endif\n# if BOOST_PP_LOCAL_R(62)\n    BOOST_PP_LOCAL_MACRO(62)\n# endif\n# if BOOST_PP_LOCAL_R(61)\n    BOOST_PP_LOCAL_MACRO(61)\n# endif\n# if BOOST_PP_LOCAL_R(60)\n    BOOST_PP_LOCAL_MACRO(60)\n# endif\n# if BOOST_PP_LOCAL_R(59)\n    BOOST_PP_LOCAL_MACRO(59)\n# endif\n# if BOOST_PP_LOCAL_R(58)\n    BOOST_PP_LOCAL_MACRO(58)\n# endif\n# if BOOST_PP_LOCAL_R(57)\n    BOOST_PP_LOCAL_MACRO(57)\n# endif\n# if BOOST_PP_LOCAL_R(56)\n    BOOST_PP_LOCAL_MACRO(56)\n# endif\n# if BOOST_PP_LOCAL_R(55)\n    BOOST_PP_LOCAL_MACRO(55)\n# endif\n# if BOOST_PP_LOCAL_R(54)\n    BOOST_PP_LOCAL_MACRO(54)\n# endif\n# if BOOST_PP_LOCAL_R(53)\n    BOOST_PP_LOCAL_MACRO(53)\n# endif\n# if BOOST_PP_LOCAL_R(52)\n    BOOST_PP_LOCAL_MACRO(52)\n# endif\n# if BOOST_PP_LOCAL_R(51)\n    BOOST_PP_LOCAL_MACRO(51)\n# endif\n# if BOOST_PP_LOCAL_R(50)\n    BOOST_PP_LOCAL_MACRO(50)\n# endif\n# if BOOST_PP_LOCAL_R(49)\n    BOOST_PP_LOCAL_MACRO(49)\n# endif\n# if BOOST_PP_LOCAL_R(48)\n    BOOST_PP_LOCAL_MACRO(48)\n# endif\n# if BOOST_PP_LOCAL_R(47)\n    BOOST_PP_LOCAL_MACRO(47)\n# endif\n# if BOOST_PP_LOCAL_R(46)\n    BOOST_PP_LOCAL_MACRO(46)\n# endif\n# if BOOST_PP_LOCAL_R(45)\n    BOOST_PP_LOCAL_MACRO(45)\n# endif\n# if BOOST_PP_LOCAL_R(44)\n    BOOST_PP_LOCAL_MACRO(44)\n# endif\n# if BOOST_PP_LOCAL_R(43)\n    BOOST_PP_LOCAL_MACRO(43)\n# endif\n# if BOOST_PP_LOCAL_R(42)\n    BOOST_PP_LOCAL_MACRO(42)\n# endif\n# if BOOST_PP_LOCAL_R(41)\n    BOOST_PP_LOCAL_MACRO(41)\n# endif\n# if BOOST_PP_LOCAL_R(40)\n    BOOST_PP_LOCAL_MACRO(40)\n# endif\n# if BOOST_PP_LOCAL_R(39)\n    BOOST_PP_LOCAL_MACRO(39)\n# endif\n# if BOOST_PP_LOCAL_R(38)\n    BOOST_PP_LOCAL_MACRO(38)\n# endif\n# if BOOST_PP_LOCAL_R(37)\n    BOOST_PP_LOCAL_MACRO(37)\n# endif\n# if BOOST_PP_LOCAL_R(36)\n    BOOST_PP_LOCAL_MACRO(36)\n# endif\n# if BOOST_PP_LOCAL_R(35)\n    BOOST_PP_LOCAL_MACRO(35)\n# endif\n# if BOOST_PP_LOCAL_R(34)\n    BOOST_PP_LOCAL_MACRO(34)\n# endif\n# if BOOST_PP_LOCAL_R(33)\n    BOOST_PP_LOCAL_MACRO(33)\n# endif\n# if BOOST_PP_LOCAL_R(32)\n    BOOST_PP_LOCAL_MACRO(32)\n# endif\n# if BOOST_PP_LOCAL_R(31)\n    BOOST_PP_LOCAL_MACRO(31)\n# endif\n# if BOOST_PP_LOCAL_R(30)\n    BOOST_PP_LOCAL_MACRO(30)\n# endif\n# if BOOST_PP_LOCAL_R(29)\n    BOOST_PP_LOCAL_MACRO(29)\n# endif\n# if BOOST_PP_LOCAL_R(28)\n    BOOST_PP_LOCAL_MACRO(28)\n# endif\n# if BOOST_PP_LOCAL_R(27)\n    BOOST_PP_LOCAL_MACRO(27)\n# endif\n# if BOOST_PP_LOCAL_R(26)\n    BOOST_PP_LOCAL_MACRO(26)\n# endif\n# if BOOST_PP_LOCAL_R(25)\n    BOOST_PP_LOCAL_MACRO(25)\n# endif\n# if BOOST_PP_LOCAL_R(24)\n    BOOST_PP_LOCAL_MACRO(24)\n# endif\n# if BOOST_PP_LOCAL_R(23)\n    BOOST_PP_LOCAL_MACRO(23)\n# endif\n# if BOOST_PP_LOCAL_R(22)\n    BOOST_PP_LOCAL_MACRO(22)\n# endif\n# if BOOST_PP_LOCAL_R(21)\n    BOOST_PP_LOCAL_MACRO(21)\n# endif\n# if BOOST_PP_LOCAL_R(20)\n    BOOST_PP_LOCAL_MACRO(20)\n# endif\n# if BOOST_PP_LOCAL_R(19)\n    BOOST_PP_LOCAL_MACRO(19)\n# endif\n# if BOOST_PP_LOCAL_R(18)\n    BOOST_PP_LOCAL_MACRO(18)\n# endif\n# if BOOST_PP_LOCAL_R(17)\n    BOOST_PP_LOCAL_MACRO(17)\n# endif\n# if BOOST_PP_LOCAL_R(16)\n    BOOST_PP_LOCAL_MACRO(16)\n# endif\n# if BOOST_PP_LOCAL_R(15)\n    BOOST_PP_LOCAL_MACRO(15)\n# endif\n# if BOOST_PP_LOCAL_R(14)\n    BOOST_PP_LOCAL_MACRO(14)\n# endif\n# if BOOST_PP_LOCAL_R(13)\n    BOOST_PP_LOCAL_MACRO(13)\n# endif\n# if BOOST_PP_LOCAL_R(12)\n    BOOST_PP_LOCAL_MACRO(12)\n# endif\n# if BOOST_PP_LOCAL_R(11)\n    BOOST_PP_LOCAL_MACRO(11)\n# endif\n# if BOOST_PP_LOCAL_R(10)\n    BOOST_PP_LOCAL_MACRO(10)\n# endif\n# if BOOST_PP_LOCAL_R(9)\n    BOOST_PP_LOCAL_MACRO(9)\n# endif\n# if BOOST_PP_LOCAL_R(8)\n    BOOST_PP_LOCAL_MACRO(8)\n# endif\n# if BOOST_PP_LOCAL_R(7)\n    BOOST_PP_LOCAL_MACRO(7)\n# endif\n# if BOOST_PP_LOCAL_R(6)\n    BOOST_PP_LOCAL_MACRO(6)\n# endif\n# if BOOST_PP_LOCAL_R(5)\n    BOOST_PP_LOCAL_MACRO(5)\n# endif\n# if BOOST_PP_LOCAL_R(4)\n    BOOST_PP_LOCAL_MACRO(4)\n# endif\n# if BOOST_PP_LOCAL_R(3)\n    BOOST_PP_LOCAL_MACRO(3)\n# endif\n# if BOOST_PP_LOCAL_R(2)\n    BOOST_PP_LOCAL_MACRO(2)\n# endif\n# if BOOST_PP_LOCAL_R(1)\n    BOOST_PP_LOCAL_MACRO(1)\n# endif\n# if BOOST_PP_LOCAL_R(0)\n    BOOST_PP_LOCAL_MACRO(0)\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/self.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# if !defined(BOOST_PP_INDIRECT_SELF)\n#    error BOOST_PP_ERROR:  no indirect file to include\n# endif\n#\n# define BOOST_PP_IS_SELFISH 1\n#\n# include BOOST_PP_INDIRECT_SELF\n#\n# undef BOOST_PP_IS_SELFISH\n# undef BOOST_PP_INDIRECT_SELF\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/detail/start.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_LOCAL_SE\n#\n# undef BOOST_PP_LOCAL_SE_DIGIT_1\n# undef BOOST_PP_LOCAL_SE_DIGIT_2\n# undef BOOST_PP_LOCAL_SE_DIGIT_3\n# undef BOOST_PP_LOCAL_SE_DIGIT_4\n# undef BOOST_PP_LOCAL_SE_DIGIT_5\n# undef BOOST_PP_LOCAL_SE_DIGIT_6\n# undef BOOST_PP_LOCAL_SE_DIGIT_7\n# undef BOOST_PP_LOCAL_SE_DIGIT_8\n# undef BOOST_PP_LOCAL_SE_DIGIT_9\n# undef BOOST_PP_LOCAL_SE_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_LOCAL_SE_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_LOCAL_SE_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_LOCAL_SE_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_LOCAL_SE_DIGIT_3\n#    define BOOST_PP_LOCAL_SE() BOOST_PP_SLOT_CC_3(BOOST_PP_LOCAL_SE_DIGIT_3, BOOST_PP_LOCAL_SE_DIGIT_2, BOOST_PP_LOCAL_SE_DIGIT_1)\n# elif BOOST_PP_LOCAL_SE_DIGIT_2\n#    define BOOST_PP_LOCAL_SE() BOOST_PP_SLOT_CC_2(BOOST_PP_LOCAL_SE_DIGIT_2, BOOST_PP_LOCAL_SE_DIGIT_1)\n# else\n#    define BOOST_PP_LOCAL_SE() BOOST_PP_LOCAL_SE_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/iterate.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ITERATION_ITERATE_HPP\n# define BOOST_PREPROCESSOR_ITERATION_ITERATE_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/array/elem.hpp>\n# include <boost/preprocessor/array/size.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/slot/slot.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_ITERATION_DEPTH */\n#\n# define BOOST_PP_ITERATION_DEPTH() 0\n#\n# /* BOOST_PP_ITERATION */\n#\n# define BOOST_PP_ITERATION() BOOST_PP_CAT(BOOST_PP_ITERATION_, BOOST_PP_ITERATION_DEPTH())\n#\n# /* BOOST_PP_ITERATION_START && BOOST_PP_ITERATION_FINISH */\n#\n# define BOOST_PP_ITERATION_START() BOOST_PP_CAT(BOOST_PP_ITERATION_START_, BOOST_PP_ITERATION_DEPTH())\n# define BOOST_PP_ITERATION_FINISH() BOOST_PP_CAT(BOOST_PP_ITERATION_FINISH_, BOOST_PP_ITERATION_DEPTH())\n#\n# /* BOOST_PP_ITERATION_FLAGS */\n#\n# define BOOST_PP_ITERATION_FLAGS() (BOOST_PP_CAT(BOOST_PP_ITERATION_FLAGS_, BOOST_PP_ITERATION_DEPTH())())\n#\n# /* BOOST_PP_FRAME_ITERATION */\n#\n# define BOOST_PP_FRAME_ITERATION(i) BOOST_PP_CAT(BOOST_PP_ITERATION_, i)\n#\n# /* BOOST_PP_FRAME_START && BOOST_PP_FRAME_FINISH */\n#\n# define BOOST_PP_FRAME_START(i) BOOST_PP_CAT(BOOST_PP_ITERATION_START_, i)\n# define BOOST_PP_FRAME_FINISH(i) BOOST_PP_CAT(BOOST_PP_ITERATION_FINISH_, i)\n#\n# /* BOOST_PP_FRAME_FLAGS */\n#\n# define BOOST_PP_FRAME_FLAGS(i) (BOOST_PP_CAT(BOOST_PP_ITERATION_FLAGS_, i)())\n#\n# /* BOOST_PP_RELATIVE_ITERATION */\n#\n# define BOOST_PP_RELATIVE_ITERATION(i) BOOST_PP_CAT(BOOST_PP_RELATIVE_, i)(BOOST_PP_ITERATION_)\n#\n# define BOOST_PP_RELATIVE_0(m) BOOST_PP_CAT(m, BOOST_PP_ITERATION_DEPTH())\n# define BOOST_PP_RELATIVE_1(m) BOOST_PP_CAT(m, BOOST_PP_DEC(BOOST_PP_ITERATION_DEPTH()))\n# define BOOST_PP_RELATIVE_2(m) BOOST_PP_CAT(m, BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_ITERATION_DEPTH())))\n# define BOOST_PP_RELATIVE_3(m) BOOST_PP_CAT(m, BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_ITERATION_DEPTH()))))\n# define BOOST_PP_RELATIVE_4(m) BOOST_PP_CAT(m, BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_DEC(BOOST_PP_ITERATION_DEPTH())))))\n#\n# /* BOOST_PP_RELATIVE_START && BOOST_PP_RELATIVE_FINISH */\n#\n# define BOOST_PP_RELATIVE_START(i) BOOST_PP_CAT(BOOST_PP_RELATIVE_, i)(BOOST_PP_ITERATION_START_)\n# define BOOST_PP_RELATIVE_FINISH(i) BOOST_PP_CAT(BOOST_PP_RELATIVE_, i)(BOOST_PP_ITERATION_FINISH_)\n#\n# /* BOOST_PP_RELATIVE_FLAGS */\n#\n# define BOOST_PP_RELATIVE_FLAGS(i) (BOOST_PP_CAT(BOOST_PP_RELATIVE_, i)(BOOST_PP_ITERATION_FLAGS_)())\n#\n# /* BOOST_PP_ITERATE */\n#\n# define BOOST_PP_ITERATE() BOOST_PP_CAT(BOOST_PP_ITERATE_, BOOST_PP_INC(BOOST_PP_ITERATION_DEPTH()))\n#\n# define BOOST_PP_ITERATE_1 <boost/preprocessor/iteration/detail/iter/forward1.hpp>\n# define BOOST_PP_ITERATE_2 <boost/preprocessor/iteration/detail/iter/forward2.hpp>\n# define BOOST_PP_ITERATE_3 <boost/preprocessor/iteration/detail/iter/forward3.hpp>\n# define BOOST_PP_ITERATE_4 <boost/preprocessor/iteration/detail/iter/forward4.hpp>\n# define BOOST_PP_ITERATE_5 <boost/preprocessor/iteration/detail/iter/forward5.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/local.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ITERATION_LOCAL_HPP\n# define BOOST_PREPROCESSOR_ITERATION_LOCAL_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/slot/slot.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_LOCAL_ITERATE */\n#\n# define BOOST_PP_LOCAL_ITERATE() <boost/preprocessor/iteration/detail/local.hpp>\n#\n# define BOOST_PP_LOCAL_C(n) (BOOST_PP_LOCAL_S) <= n && (BOOST_PP_LOCAL_F) >= n\n# define BOOST_PP_LOCAL_R(n) (BOOST_PP_LOCAL_F) <= n && (BOOST_PP_LOCAL_S) >= n\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/iteration/self.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_ITERATION_SELF_HPP\n# define BOOST_PREPROCESSOR_ITERATION_SELF_HPP\n#\n# /* BOOST_PP_INCLUDE_SELF */\n#\n# define BOOST_PP_INCLUDE_SELF() <boost/preprocessor/iteration/detail/self.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/adt.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  *\n#  * See http://www.boost.org for most recent version.\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_ADT_HPP\n# define BOOST_PREPROCESSOR_LIST_ADT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/detail/is_binary.hpp>\n# include <boost/preprocessor/logical/compl.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# /* BOOST_PP_LIST_CONS */\n#\n# define BOOST_PP_LIST_CONS(head, tail) (head, tail)\n#\n# /* BOOST_PP_LIST_NIL */\n#\n# define BOOST_PP_LIST_NIL BOOST_PP_NIL\n#\n# /* BOOST_PP_LIST_FIRST */\n#\n# define BOOST_PP_LIST_FIRST(list) BOOST_PP_LIST_FIRST_D(list)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_LIST_FIRST_D(list) BOOST_PP_LIST_FIRST_I list\n# else\n#    define BOOST_PP_LIST_FIRST_D(list) BOOST_PP_LIST_FIRST_I ## list\n# endif\n#\n# define BOOST_PP_LIST_FIRST_I(head, tail) head\n#\n# /* BOOST_PP_LIST_REST */\n#\n# define BOOST_PP_LIST_REST(list) BOOST_PP_LIST_REST_D(list)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_LIST_REST_D(list) BOOST_PP_LIST_REST_I list\n# else\n#    define BOOST_PP_LIST_REST_D(list) BOOST_PP_LIST_REST_I ## list\n# endif\n#\n# define BOOST_PP_LIST_REST_I(head, tail) tail\n#\n# /* BOOST_PP_LIST_IS_CONS */\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_BCC()\n#    define BOOST_PP_LIST_IS_CONS(list) BOOST_PP_LIST_IS_CONS_D(list)\n#    define BOOST_PP_LIST_IS_CONS_D(list) BOOST_PP_LIST_IS_CONS_ ## list\n#    define BOOST_PP_LIST_IS_CONS_(head, tail) 1\n#    define BOOST_PP_LIST_IS_CONS_BOOST_PP_NIL 0\n# else\n#    define BOOST_PP_LIST_IS_CONS(list) BOOST_PP_IS_BINARY(list)\n# endif\n#\n# /* BOOST_PP_LIST_IS_NIL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_BCC()\n#    define BOOST_PP_LIST_IS_NIL(list) BOOST_PP_COMPL(BOOST_PP_IS_BINARY(list))\n# else\n#    define BOOST_PP_LIST_IS_NIL(list) BOOST_PP_COMPL(BOOST_PP_LIST_IS_CONS(list))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/detail/dmc/fold_left.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_LEFT_HPP\n# define BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_LEFT_HPP\n#\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/list/adt.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# define BOOST_PP_LIST_FOLD_LEFT_1(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_2, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(2, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_2(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_3, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(3, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_3(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_4, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(4, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_4(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_5, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(5, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_5(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_6, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(6, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_6(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_7, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(7, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_7(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_8, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(8, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_8(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_9, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(9, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_9(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_10, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(10, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_10(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_11, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(11, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_11(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_12, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(12, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_12(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_13, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(13, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_13(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_14, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(14, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_14(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_15, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(15, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_15(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_16, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(16, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_16(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_17, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(17, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_17(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_18, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(18, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_18(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_19, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(19, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_19(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_20, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(20, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_20(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_21, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(21, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_21(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_22, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(22, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_22(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_23, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(23, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_23(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_24, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(24, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_24(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_25, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(25, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_25(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_26, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(26, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_26(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_27, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(27, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_27(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_28, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(28, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_28(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_29, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(29, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_29(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_30, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(30, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_30(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_31, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(31, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_31(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_32, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(32, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_32(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_33, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(33, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_33(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_34, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(34, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_34(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_35, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(35, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_35(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_36, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(36, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_36(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_37, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(37, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_37(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_38, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(38, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_38(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_39, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(39, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_39(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_40, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(40, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_40(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_41, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(41, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_41(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_42, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(42, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_42(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_43, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(43, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_43(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_44, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(44, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_44(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_45, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(45, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_45(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_46, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(46, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_46(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_47, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(47, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_47(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_48, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(48, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_48(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_49, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(49, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_49(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_50, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(50, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_50(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_51, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(51, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_51(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_52, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(52, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_52(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_53, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(53, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_53(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_54, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(54, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_54(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_55, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(55, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_55(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_56, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(56, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_56(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_57, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(57, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_57(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_58, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(58, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_58(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_59, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(59, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_59(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_60, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(60, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_60(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_61, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(61, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_61(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_62, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(62, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_62(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_63, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(63, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_63(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_64, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(64, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_64(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_65, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(65, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_65(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_66, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(66, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_66(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_67, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(67, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_67(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_68, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(68, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_68(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_69, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(69, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_69(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_70, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(70, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_70(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_71, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(71, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_71(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_72, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(72, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_72(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_73, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(73, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_73(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_74, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(74, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_74(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_75, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(75, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_75(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_76, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(76, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_76(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_77, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(77, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_77(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_78, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(78, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_78(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_79, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(79, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_79(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_80, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(80, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_80(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_81, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(81, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_81(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_82, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(82, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_82(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_83, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(83, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_83(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_84, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(84, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_84(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_85, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(85, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_85(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_86, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(86, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_86(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_87, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(87, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_87(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_88, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(88, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_88(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_89, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(89, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_89(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_90, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(90, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_90(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_91, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(91, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_91(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_92, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(92, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_92(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_93, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(93, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_93(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_94, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(94, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_94(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_95, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(95, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_95(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_96, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(96, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_96(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_97, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(97, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_97(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_98, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(98, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_98(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_99, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(99, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_99(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_100, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(100, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_100(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_101, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(101, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_101(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_102, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(102, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_102(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_103, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(103, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_103(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_104, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(104, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_104(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_105, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(105, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_105(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_106, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(106, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_106(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_107, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(107, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_107(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_108, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(108, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_108(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_109, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(109, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_109(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_110, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(110, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_110(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_111, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(111, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_111(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_112, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(112, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_112(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_113, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(113, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_113(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_114, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(114, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_114(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_115, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(115, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_115(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_116, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(116, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_116(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_117, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(117, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_117(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_118, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(118, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_118(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_119, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(119, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_119(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_120, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(120, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_120(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_121, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(121, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_121(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_122, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(122, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_122(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_123, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(123, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_123(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_124, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(124, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_124(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_125, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(125, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_125(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_126, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(126, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_126(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_127, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(127, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_127(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_128, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(128, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_128(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_129, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(129, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_129(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_130, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(130, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_130(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_131, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(131, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_131(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_132, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(132, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_132(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_133, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(133, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_133(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_134, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(134, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_134(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_135, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(135, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_135(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_136, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(136, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_136(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_137, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(137, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_137(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_138, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(138, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_138(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_139, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(139, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_139(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_140, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(140, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_140(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_141, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(141, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_141(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_142, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(142, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_142(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_143, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(143, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_143(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_144, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(144, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_144(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_145, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(145, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_145(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_146, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(146, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_146(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_147, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(147, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_147(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_148, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(148, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_148(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_149, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(149, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_149(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_150, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(150, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_150(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_151, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(151, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_151(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_152, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(152, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_152(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_153, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(153, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_153(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_154, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(154, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_154(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_155, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(155, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_155(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_156, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(156, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_156(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_157, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(157, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_157(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_158, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(158, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_158(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_159, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(159, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_159(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_160, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(160, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_160(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_161, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(161, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_161(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_162, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(162, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_162(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_163, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(163, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_163(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_164, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(164, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_164(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_165, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(165, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_165(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_166, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(166, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_166(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_167, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(167, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_167(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_168, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(168, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_168(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_169, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(169, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_169(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_170, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(170, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_170(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_171, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(171, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_171(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_172, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(172, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_172(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_173, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(173, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_173(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_174, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(174, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_174(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_175, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(175, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_175(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_176, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(176, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_176(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_177, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(177, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_177(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_178, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(178, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_178(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_179, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(179, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_179(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_180, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(180, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_180(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_181, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(181, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_181(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_182, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(182, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_182(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_183, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(183, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_183(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_184, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(184, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_184(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_185, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(185, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_185(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_186, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(186, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_186(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_187, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(187, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_187(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_188, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(188, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_188(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_189, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(189, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_189(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_190, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(190, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_190(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_191, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(191, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_191(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_192, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(192, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_192(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_193, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(193, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_193(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_194, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(194, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_194(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_195, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(195, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_195(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_196, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(196, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_196(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_197, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(197, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_197(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_198, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(198, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_198(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_199, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(199, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_199(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_200, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(200, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_200(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_201, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(201, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_201(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_202, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(202, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_202(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_203, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(203, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_203(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_204, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(204, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_204(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_205, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(205, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_205(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_206, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(206, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_206(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_207, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(207, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_207(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_208, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(208, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_208(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_209, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(209, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_209(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_210, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(210, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_210(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_211, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(211, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_211(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_212, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(212, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_212(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_213, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(213, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_213(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_214, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(214, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_214(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_215, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(215, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_215(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_216, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(216, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_216(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_217, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(217, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_217(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_218, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(218, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_218(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_219, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(219, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_219(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_220, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(220, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_220(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_221, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(221, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_221(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_222, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(222, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_222(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_223, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(223, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_223(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_224, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(224, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_224(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_225, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(225, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_225(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_226, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(226, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_226(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_227, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(227, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_227(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_228, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(228, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_228(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_229, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(229, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_229(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_230, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(230, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_230(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_231, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(231, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_231(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_232, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(232, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_232(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_233, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(233, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_233(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_234, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(234, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_234(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_235, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(235, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_235(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_236, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(236, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_236(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_237, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(237, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_237(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_238, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(238, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_238(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_239, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(239, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_239(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_240, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(240, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_240(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_241, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(241, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_241(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_242, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(242, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_242(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_243, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(243, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_243(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_244, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(244, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_244(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_245, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(245, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_245(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_246, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(246, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_246(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_247, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(247, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_247(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_248, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(248, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_248(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_249, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(249, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_249(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_250, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(250, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_250(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_251, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(251, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_251(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_252, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(252, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_252(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_253, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(253, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_253(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_254, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(254, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_254(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_255, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(255, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_255(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_256, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(256, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_256(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_257, BOOST_PP_TUPLE_ELEM_3_1)(o, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, BOOST_PP_TUPLE_ELEM_3_1)(257, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/detail/edg/fold_left.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_DETAIL_EDG_FOLD_LEFT_HPP\n# define BOOST_PREPROCESSOR_LIST_DETAIL_EDG_FOLD_LEFT_HPP\n#\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/list/adt.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_LIST_FOLD_LEFT_1(o, s, l) BOOST_PP_LIST_FOLD_LEFT_1_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_2(o, s, l) BOOST_PP_LIST_FOLD_LEFT_2_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_3(o, s, l) BOOST_PP_LIST_FOLD_LEFT_3_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_4(o, s, l) BOOST_PP_LIST_FOLD_LEFT_4_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_5(o, s, l) BOOST_PP_LIST_FOLD_LEFT_5_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_6(o, s, l) BOOST_PP_LIST_FOLD_LEFT_6_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_7(o, s, l) BOOST_PP_LIST_FOLD_LEFT_7_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_8(o, s, l) BOOST_PP_LIST_FOLD_LEFT_8_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_9(o, s, l) BOOST_PP_LIST_FOLD_LEFT_9_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_10(o, s, l) BOOST_PP_LIST_FOLD_LEFT_10_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_11(o, s, l) BOOST_PP_LIST_FOLD_LEFT_11_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_12(o, s, l) BOOST_PP_LIST_FOLD_LEFT_12_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_13(o, s, l) BOOST_PP_LIST_FOLD_LEFT_13_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_14(o, s, l) BOOST_PP_LIST_FOLD_LEFT_14_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_15(o, s, l) BOOST_PP_LIST_FOLD_LEFT_15_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_16(o, s, l) BOOST_PP_LIST_FOLD_LEFT_16_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_17(o, s, l) BOOST_PP_LIST_FOLD_LEFT_17_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_18(o, s, l) BOOST_PP_LIST_FOLD_LEFT_18_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_19(o, s, l) BOOST_PP_LIST_FOLD_LEFT_19_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_20(o, s, l) BOOST_PP_LIST_FOLD_LEFT_20_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_21(o, s, l) BOOST_PP_LIST_FOLD_LEFT_21_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_22(o, s, l) BOOST_PP_LIST_FOLD_LEFT_22_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_23(o, s, l) BOOST_PP_LIST_FOLD_LEFT_23_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_24(o, s, l) BOOST_PP_LIST_FOLD_LEFT_24_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_25(o, s, l) BOOST_PP_LIST_FOLD_LEFT_25_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_26(o, s, l) BOOST_PP_LIST_FOLD_LEFT_26_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_27(o, s, l) BOOST_PP_LIST_FOLD_LEFT_27_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_28(o, s, l) BOOST_PP_LIST_FOLD_LEFT_28_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_29(o, s, l) BOOST_PP_LIST_FOLD_LEFT_29_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_30(o, s, l) BOOST_PP_LIST_FOLD_LEFT_30_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_31(o, s, l) BOOST_PP_LIST_FOLD_LEFT_31_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_32(o, s, l) BOOST_PP_LIST_FOLD_LEFT_32_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_33(o, s, l) BOOST_PP_LIST_FOLD_LEFT_33_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_34(o, s, l) BOOST_PP_LIST_FOLD_LEFT_34_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_35(o, s, l) BOOST_PP_LIST_FOLD_LEFT_35_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_36(o, s, l) BOOST_PP_LIST_FOLD_LEFT_36_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_37(o, s, l) BOOST_PP_LIST_FOLD_LEFT_37_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_38(o, s, l) BOOST_PP_LIST_FOLD_LEFT_38_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_39(o, s, l) BOOST_PP_LIST_FOLD_LEFT_39_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_40(o, s, l) BOOST_PP_LIST_FOLD_LEFT_40_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_41(o, s, l) BOOST_PP_LIST_FOLD_LEFT_41_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_42(o, s, l) BOOST_PP_LIST_FOLD_LEFT_42_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_43(o, s, l) BOOST_PP_LIST_FOLD_LEFT_43_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_44(o, s, l) BOOST_PP_LIST_FOLD_LEFT_44_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_45(o, s, l) BOOST_PP_LIST_FOLD_LEFT_45_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_46(o, s, l) BOOST_PP_LIST_FOLD_LEFT_46_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_47(o, s, l) BOOST_PP_LIST_FOLD_LEFT_47_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_48(o, s, l) BOOST_PP_LIST_FOLD_LEFT_48_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_49(o, s, l) BOOST_PP_LIST_FOLD_LEFT_49_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_50(o, s, l) BOOST_PP_LIST_FOLD_LEFT_50_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_51(o, s, l) BOOST_PP_LIST_FOLD_LEFT_51_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_52(o, s, l) BOOST_PP_LIST_FOLD_LEFT_52_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_53(o, s, l) BOOST_PP_LIST_FOLD_LEFT_53_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_54(o, s, l) BOOST_PP_LIST_FOLD_LEFT_54_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_55(o, s, l) BOOST_PP_LIST_FOLD_LEFT_55_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_56(o, s, l) BOOST_PP_LIST_FOLD_LEFT_56_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_57(o, s, l) BOOST_PP_LIST_FOLD_LEFT_57_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_58(o, s, l) BOOST_PP_LIST_FOLD_LEFT_58_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_59(o, s, l) BOOST_PP_LIST_FOLD_LEFT_59_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_60(o, s, l) BOOST_PP_LIST_FOLD_LEFT_60_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_61(o, s, l) BOOST_PP_LIST_FOLD_LEFT_61_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_62(o, s, l) BOOST_PP_LIST_FOLD_LEFT_62_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_63(o, s, l) BOOST_PP_LIST_FOLD_LEFT_63_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_64(o, s, l) BOOST_PP_LIST_FOLD_LEFT_64_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_65(o, s, l) BOOST_PP_LIST_FOLD_LEFT_65_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_66(o, s, l) BOOST_PP_LIST_FOLD_LEFT_66_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_67(o, s, l) BOOST_PP_LIST_FOLD_LEFT_67_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_68(o, s, l) BOOST_PP_LIST_FOLD_LEFT_68_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_69(o, s, l) BOOST_PP_LIST_FOLD_LEFT_69_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_70(o, s, l) BOOST_PP_LIST_FOLD_LEFT_70_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_71(o, s, l) BOOST_PP_LIST_FOLD_LEFT_71_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_72(o, s, l) BOOST_PP_LIST_FOLD_LEFT_72_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_73(o, s, l) BOOST_PP_LIST_FOLD_LEFT_73_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_74(o, s, l) BOOST_PP_LIST_FOLD_LEFT_74_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_75(o, s, l) BOOST_PP_LIST_FOLD_LEFT_75_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_76(o, s, l) BOOST_PP_LIST_FOLD_LEFT_76_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_77(o, s, l) BOOST_PP_LIST_FOLD_LEFT_77_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_78(o, s, l) BOOST_PP_LIST_FOLD_LEFT_78_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_79(o, s, l) BOOST_PP_LIST_FOLD_LEFT_79_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_80(o, s, l) BOOST_PP_LIST_FOLD_LEFT_80_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_81(o, s, l) BOOST_PP_LIST_FOLD_LEFT_81_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_82(o, s, l) BOOST_PP_LIST_FOLD_LEFT_82_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_83(o, s, l) BOOST_PP_LIST_FOLD_LEFT_83_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_84(o, s, l) BOOST_PP_LIST_FOLD_LEFT_84_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_85(o, s, l) BOOST_PP_LIST_FOLD_LEFT_85_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_86(o, s, l) BOOST_PP_LIST_FOLD_LEFT_86_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_87(o, s, l) BOOST_PP_LIST_FOLD_LEFT_87_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_88(o, s, l) BOOST_PP_LIST_FOLD_LEFT_88_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_89(o, s, l) BOOST_PP_LIST_FOLD_LEFT_89_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_90(o, s, l) BOOST_PP_LIST_FOLD_LEFT_90_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_91(o, s, l) BOOST_PP_LIST_FOLD_LEFT_91_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_92(o, s, l) BOOST_PP_LIST_FOLD_LEFT_92_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_93(o, s, l) BOOST_PP_LIST_FOLD_LEFT_93_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_94(o, s, l) BOOST_PP_LIST_FOLD_LEFT_94_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_95(o, s, l) BOOST_PP_LIST_FOLD_LEFT_95_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_96(o, s, l) BOOST_PP_LIST_FOLD_LEFT_96_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_97(o, s, l) BOOST_PP_LIST_FOLD_LEFT_97_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_98(o, s, l) BOOST_PP_LIST_FOLD_LEFT_98_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_99(o, s, l) BOOST_PP_LIST_FOLD_LEFT_99_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_100(o, s, l) BOOST_PP_LIST_FOLD_LEFT_100_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_101(o, s, l) BOOST_PP_LIST_FOLD_LEFT_101_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_102(o, s, l) BOOST_PP_LIST_FOLD_LEFT_102_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_103(o, s, l) BOOST_PP_LIST_FOLD_LEFT_103_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_104(o, s, l) BOOST_PP_LIST_FOLD_LEFT_104_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_105(o, s, l) BOOST_PP_LIST_FOLD_LEFT_105_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_106(o, s, l) BOOST_PP_LIST_FOLD_LEFT_106_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_107(o, s, l) BOOST_PP_LIST_FOLD_LEFT_107_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_108(o, s, l) BOOST_PP_LIST_FOLD_LEFT_108_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_109(o, s, l) BOOST_PP_LIST_FOLD_LEFT_109_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_110(o, s, l) BOOST_PP_LIST_FOLD_LEFT_110_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_111(o, s, l) BOOST_PP_LIST_FOLD_LEFT_111_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_112(o, s, l) BOOST_PP_LIST_FOLD_LEFT_112_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_113(o, s, l) BOOST_PP_LIST_FOLD_LEFT_113_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_114(o, s, l) BOOST_PP_LIST_FOLD_LEFT_114_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_115(o, s, l) BOOST_PP_LIST_FOLD_LEFT_115_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_116(o, s, l) BOOST_PP_LIST_FOLD_LEFT_116_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_117(o, s, l) BOOST_PP_LIST_FOLD_LEFT_117_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_118(o, s, l) BOOST_PP_LIST_FOLD_LEFT_118_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_119(o, s, l) BOOST_PP_LIST_FOLD_LEFT_119_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_120(o, s, l) BOOST_PP_LIST_FOLD_LEFT_120_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_121(o, s, l) BOOST_PP_LIST_FOLD_LEFT_121_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_122(o, s, l) BOOST_PP_LIST_FOLD_LEFT_122_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_123(o, s, l) BOOST_PP_LIST_FOLD_LEFT_123_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_124(o, s, l) BOOST_PP_LIST_FOLD_LEFT_124_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_125(o, s, l) BOOST_PP_LIST_FOLD_LEFT_125_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_126(o, s, l) BOOST_PP_LIST_FOLD_LEFT_126_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_127(o, s, l) BOOST_PP_LIST_FOLD_LEFT_127_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_128(o, s, l) BOOST_PP_LIST_FOLD_LEFT_128_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_129(o, s, l) BOOST_PP_LIST_FOLD_LEFT_129_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_130(o, s, l) BOOST_PP_LIST_FOLD_LEFT_130_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_131(o, s, l) BOOST_PP_LIST_FOLD_LEFT_131_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_132(o, s, l) BOOST_PP_LIST_FOLD_LEFT_132_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_133(o, s, l) BOOST_PP_LIST_FOLD_LEFT_133_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_134(o, s, l) BOOST_PP_LIST_FOLD_LEFT_134_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_135(o, s, l) BOOST_PP_LIST_FOLD_LEFT_135_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_136(o, s, l) BOOST_PP_LIST_FOLD_LEFT_136_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_137(o, s, l) BOOST_PP_LIST_FOLD_LEFT_137_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_138(o, s, l) BOOST_PP_LIST_FOLD_LEFT_138_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_139(o, s, l) BOOST_PP_LIST_FOLD_LEFT_139_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_140(o, s, l) BOOST_PP_LIST_FOLD_LEFT_140_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_141(o, s, l) BOOST_PP_LIST_FOLD_LEFT_141_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_142(o, s, l) BOOST_PP_LIST_FOLD_LEFT_142_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_143(o, s, l) BOOST_PP_LIST_FOLD_LEFT_143_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_144(o, s, l) BOOST_PP_LIST_FOLD_LEFT_144_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_145(o, s, l) BOOST_PP_LIST_FOLD_LEFT_145_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_146(o, s, l) BOOST_PP_LIST_FOLD_LEFT_146_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_147(o, s, l) BOOST_PP_LIST_FOLD_LEFT_147_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_148(o, s, l) BOOST_PP_LIST_FOLD_LEFT_148_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_149(o, s, l) BOOST_PP_LIST_FOLD_LEFT_149_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_150(o, s, l) BOOST_PP_LIST_FOLD_LEFT_150_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_151(o, s, l) BOOST_PP_LIST_FOLD_LEFT_151_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_152(o, s, l) BOOST_PP_LIST_FOLD_LEFT_152_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_153(o, s, l) BOOST_PP_LIST_FOLD_LEFT_153_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_154(o, s, l) BOOST_PP_LIST_FOLD_LEFT_154_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_155(o, s, l) BOOST_PP_LIST_FOLD_LEFT_155_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_156(o, s, l) BOOST_PP_LIST_FOLD_LEFT_156_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_157(o, s, l) BOOST_PP_LIST_FOLD_LEFT_157_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_158(o, s, l) BOOST_PP_LIST_FOLD_LEFT_158_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_159(o, s, l) BOOST_PP_LIST_FOLD_LEFT_159_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_160(o, s, l) BOOST_PP_LIST_FOLD_LEFT_160_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_161(o, s, l) BOOST_PP_LIST_FOLD_LEFT_161_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_162(o, s, l) BOOST_PP_LIST_FOLD_LEFT_162_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_163(o, s, l) BOOST_PP_LIST_FOLD_LEFT_163_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_164(o, s, l) BOOST_PP_LIST_FOLD_LEFT_164_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_165(o, s, l) BOOST_PP_LIST_FOLD_LEFT_165_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_166(o, s, l) BOOST_PP_LIST_FOLD_LEFT_166_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_167(o, s, l) BOOST_PP_LIST_FOLD_LEFT_167_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_168(o, s, l) BOOST_PP_LIST_FOLD_LEFT_168_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_169(o, s, l) BOOST_PP_LIST_FOLD_LEFT_169_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_170(o, s, l) BOOST_PP_LIST_FOLD_LEFT_170_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_171(o, s, l) BOOST_PP_LIST_FOLD_LEFT_171_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_172(o, s, l) BOOST_PP_LIST_FOLD_LEFT_172_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_173(o, s, l) BOOST_PP_LIST_FOLD_LEFT_173_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_174(o, s, l) BOOST_PP_LIST_FOLD_LEFT_174_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_175(o, s, l) BOOST_PP_LIST_FOLD_LEFT_175_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_176(o, s, l) BOOST_PP_LIST_FOLD_LEFT_176_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_177(o, s, l) BOOST_PP_LIST_FOLD_LEFT_177_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_178(o, s, l) BOOST_PP_LIST_FOLD_LEFT_178_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_179(o, s, l) BOOST_PP_LIST_FOLD_LEFT_179_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_180(o, s, l) BOOST_PP_LIST_FOLD_LEFT_180_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_181(o, s, l) BOOST_PP_LIST_FOLD_LEFT_181_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_182(o, s, l) BOOST_PP_LIST_FOLD_LEFT_182_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_183(o, s, l) BOOST_PP_LIST_FOLD_LEFT_183_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_184(o, s, l) BOOST_PP_LIST_FOLD_LEFT_184_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_185(o, s, l) BOOST_PP_LIST_FOLD_LEFT_185_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_186(o, s, l) BOOST_PP_LIST_FOLD_LEFT_186_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_187(o, s, l) BOOST_PP_LIST_FOLD_LEFT_187_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_188(o, s, l) BOOST_PP_LIST_FOLD_LEFT_188_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_189(o, s, l) BOOST_PP_LIST_FOLD_LEFT_189_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_190(o, s, l) BOOST_PP_LIST_FOLD_LEFT_190_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_191(o, s, l) BOOST_PP_LIST_FOLD_LEFT_191_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_192(o, s, l) BOOST_PP_LIST_FOLD_LEFT_192_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_193(o, s, l) BOOST_PP_LIST_FOLD_LEFT_193_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_194(o, s, l) BOOST_PP_LIST_FOLD_LEFT_194_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_195(o, s, l) BOOST_PP_LIST_FOLD_LEFT_195_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_196(o, s, l) BOOST_PP_LIST_FOLD_LEFT_196_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_197(o, s, l) BOOST_PP_LIST_FOLD_LEFT_197_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_198(o, s, l) BOOST_PP_LIST_FOLD_LEFT_198_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_199(o, s, l) BOOST_PP_LIST_FOLD_LEFT_199_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_200(o, s, l) BOOST_PP_LIST_FOLD_LEFT_200_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_201(o, s, l) BOOST_PP_LIST_FOLD_LEFT_201_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_202(o, s, l) BOOST_PP_LIST_FOLD_LEFT_202_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_203(o, s, l) BOOST_PP_LIST_FOLD_LEFT_203_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_204(o, s, l) BOOST_PP_LIST_FOLD_LEFT_204_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_205(o, s, l) BOOST_PP_LIST_FOLD_LEFT_205_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_206(o, s, l) BOOST_PP_LIST_FOLD_LEFT_206_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_207(o, s, l) BOOST_PP_LIST_FOLD_LEFT_207_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_208(o, s, l) BOOST_PP_LIST_FOLD_LEFT_208_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_209(o, s, l) BOOST_PP_LIST_FOLD_LEFT_209_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_210(o, s, l) BOOST_PP_LIST_FOLD_LEFT_210_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_211(o, s, l) BOOST_PP_LIST_FOLD_LEFT_211_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_212(o, s, l) BOOST_PP_LIST_FOLD_LEFT_212_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_213(o, s, l) BOOST_PP_LIST_FOLD_LEFT_213_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_214(o, s, l) BOOST_PP_LIST_FOLD_LEFT_214_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_215(o, s, l) BOOST_PP_LIST_FOLD_LEFT_215_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_216(o, s, l) BOOST_PP_LIST_FOLD_LEFT_216_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_217(o, s, l) BOOST_PP_LIST_FOLD_LEFT_217_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_218(o, s, l) BOOST_PP_LIST_FOLD_LEFT_218_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_219(o, s, l) BOOST_PP_LIST_FOLD_LEFT_219_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_220(o, s, l) BOOST_PP_LIST_FOLD_LEFT_220_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_221(o, s, l) BOOST_PP_LIST_FOLD_LEFT_221_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_222(o, s, l) BOOST_PP_LIST_FOLD_LEFT_222_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_223(o, s, l) BOOST_PP_LIST_FOLD_LEFT_223_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_224(o, s, l) BOOST_PP_LIST_FOLD_LEFT_224_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_225(o, s, l) BOOST_PP_LIST_FOLD_LEFT_225_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_226(o, s, l) BOOST_PP_LIST_FOLD_LEFT_226_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_227(o, s, l) BOOST_PP_LIST_FOLD_LEFT_227_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_228(o, s, l) BOOST_PP_LIST_FOLD_LEFT_228_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_229(o, s, l) BOOST_PP_LIST_FOLD_LEFT_229_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_230(o, s, l) BOOST_PP_LIST_FOLD_LEFT_230_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_231(o, s, l) BOOST_PP_LIST_FOLD_LEFT_231_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_232(o, s, l) BOOST_PP_LIST_FOLD_LEFT_232_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_233(o, s, l) BOOST_PP_LIST_FOLD_LEFT_233_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_234(o, s, l) BOOST_PP_LIST_FOLD_LEFT_234_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_235(o, s, l) BOOST_PP_LIST_FOLD_LEFT_235_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_236(o, s, l) BOOST_PP_LIST_FOLD_LEFT_236_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_237(o, s, l) BOOST_PP_LIST_FOLD_LEFT_237_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_238(o, s, l) BOOST_PP_LIST_FOLD_LEFT_238_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_239(o, s, l) BOOST_PP_LIST_FOLD_LEFT_239_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_240(o, s, l) BOOST_PP_LIST_FOLD_LEFT_240_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_241(o, s, l) BOOST_PP_LIST_FOLD_LEFT_241_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_242(o, s, l) BOOST_PP_LIST_FOLD_LEFT_242_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_243(o, s, l) BOOST_PP_LIST_FOLD_LEFT_243_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_244(o, s, l) BOOST_PP_LIST_FOLD_LEFT_244_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_245(o, s, l) BOOST_PP_LIST_FOLD_LEFT_245_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_246(o, s, l) BOOST_PP_LIST_FOLD_LEFT_246_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_247(o, s, l) BOOST_PP_LIST_FOLD_LEFT_247_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_248(o, s, l) BOOST_PP_LIST_FOLD_LEFT_248_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_249(o, s, l) BOOST_PP_LIST_FOLD_LEFT_249_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_250(o, s, l) BOOST_PP_LIST_FOLD_LEFT_250_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_251(o, s, l) BOOST_PP_LIST_FOLD_LEFT_251_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_252(o, s, l) BOOST_PP_LIST_FOLD_LEFT_252_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_253(o, s, l) BOOST_PP_LIST_FOLD_LEFT_253_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_254(o, s, l) BOOST_PP_LIST_FOLD_LEFT_254_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_255(o, s, l) BOOST_PP_LIST_FOLD_LEFT_255_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_256(o, s, l) BOOST_PP_LIST_FOLD_LEFT_256_D(o, s, l)\n#\n# define BOOST_PP_LIST_FOLD_LEFT_1_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_2, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(2, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_2_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_3, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(3, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_3_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_4, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(4, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_4_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_5, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(5, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_5_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_6, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(6, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_6_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_7, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(7, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_7_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_8, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(8, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_8_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_9, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(9, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_9_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_10, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(10, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_10_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_11, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(11, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_11_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_12, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(12, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_12_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_13, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(13, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_13_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_14, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(14, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_14_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_15, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(15, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_15_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_16, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(16, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_16_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_17, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(17, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_17_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_18, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(18, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_18_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_19, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(19, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_19_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_20, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(20, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_20_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_21, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(21, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_21_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_22, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(22, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_22_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_23, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(23, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_23_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_24, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(24, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_24_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_25, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(25, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_25_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_26, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(26, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_26_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_27, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(27, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_27_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_28, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(28, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_28_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_29, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(29, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_29_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_30, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(30, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_30_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_31, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(31, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_31_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_32, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(32, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_32_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_33, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(33, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_33_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_34, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(34, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_34_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_35, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(35, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_35_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_36, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(36, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_36_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_37, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(37, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_37_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_38, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(38, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_38_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_39, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(39, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_39_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_40, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(40, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_40_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_41, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(41, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_41_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_42, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(42, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_42_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_43, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(43, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_43_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_44, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(44, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_44_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_45, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(45, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_45_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_46, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(46, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_46_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_47, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(47, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_47_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_48, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(48, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_48_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_49, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(49, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_49_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_50, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(50, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_50_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_51, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(51, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_51_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_52, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(52, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_52_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_53, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(53, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_53_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_54, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(54, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_54_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_55, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(55, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_55_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_56, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(56, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_56_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_57, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(57, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_57_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_58, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(58, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_58_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_59, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(59, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_59_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_60, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(60, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_60_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_61, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(61, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_61_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_62, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(62, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_62_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_63, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(63, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_63_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_64, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(64, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_64_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_65, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(65, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_65_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_66, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(66, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_66_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_67, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(67, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_67_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_68, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(68, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_68_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_69, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(69, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_69_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_70, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(70, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_70_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_71, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(71, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_71_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_72, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(72, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_72_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_73, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(73, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_73_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_74, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(74, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_74_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_75, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(75, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_75_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_76, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(76, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_76_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_77, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(77, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_77_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_78, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(78, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_78_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_79, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(79, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_79_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_80, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(80, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_80_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_81, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(81, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_81_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_82, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(82, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_82_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_83, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(83, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_83_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_84, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(84, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_84_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_85, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(85, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_85_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_86, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(86, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_86_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_87, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(87, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_87_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_88, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(88, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_88_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_89, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(89, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_89_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_90, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(90, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_90_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_91, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(91, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_91_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_92, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(92, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_92_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_93, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(93, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_93_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_94, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(94, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_94_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_95, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(95, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_95_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_96, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(96, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_96_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_97, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(97, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_97_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_98, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(98, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_98_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_99, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(99, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_99_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_100, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(100, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_100_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_101, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(101, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_101_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_102, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(102, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_102_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_103, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(103, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_103_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_104, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(104, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_104_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_105, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(105, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_105_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_106, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(106, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_106_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_107, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(107, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_107_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_108, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(108, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_108_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_109, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(109, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_109_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_110, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(110, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_110_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_111, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(111, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_111_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_112, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(112, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_112_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_113, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(113, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_113_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_114, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(114, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_114_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_115, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(115, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_115_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_116, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(116, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_116_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_117, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(117, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_117_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_118, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(118, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_118_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_119, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(119, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_119_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_120, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(120, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_120_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_121, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(121, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_121_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_122, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(122, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_122_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_123, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(123, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_123_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_124, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(124, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_124_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_125, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(125, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_125_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_126, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(126, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_126_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_127, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(127, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_127_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_128, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(128, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_128_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_129, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(129, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_129_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_130, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(130, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_130_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_131, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(131, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_131_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_132, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(132, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_132_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_133, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(133, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_133_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_134, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(134, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_134_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_135, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(135, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_135_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_136, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(136, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_136_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_137, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(137, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_137_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_138, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(138, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_138_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_139, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(139, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_139_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_140, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(140, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_140_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_141, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(141, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_141_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_142, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(142, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_142_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_143, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(143, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_143_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_144, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(144, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_144_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_145, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(145, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_145_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_146, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(146, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_146_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_147, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(147, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_147_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_148, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(148, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_148_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_149, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(149, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_149_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_150, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(150, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_150_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_151, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(151, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_151_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_152, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(152, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_152_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_153, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(153, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_153_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_154, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(154, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_154_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_155, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(155, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_155_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_156, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(156, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_156_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_157, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(157, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_157_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_158, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(158, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_158_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_159, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(159, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_159_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_160, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(160, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_160_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_161, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(161, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_161_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_162, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(162, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_162_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_163, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(163, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_163_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_164, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(164, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_164_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_165, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(165, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_165_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_166, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(166, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_166_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_167, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(167, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_167_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_168, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(168, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_168_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_169, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(169, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_169_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_170, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(170, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_170_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_171, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(171, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_171_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_172, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(172, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_172_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_173, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(173, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_173_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_174, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(174, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_174_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_175, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(175, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_175_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_176, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(176, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_176_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_177, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(177, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_177_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_178, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(178, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_178_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_179, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(179, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_179_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_180, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(180, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_180_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_181, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(181, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_181_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_182, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(182, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_182_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_183, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(183, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_183_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_184, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(184, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_184_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_185, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(185, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_185_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_186, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(186, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_186_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_187, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(187, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_187_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_188, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(188, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_188_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_189, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(189, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_189_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_190, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(190, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_190_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_191, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(191, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_191_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_192, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(192, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_192_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_193, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(193, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_193_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_194, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(194, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_194_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_195, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(195, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_195_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_196, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(196, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_196_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_197, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(197, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_197_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_198, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(198, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_198_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_199, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(199, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_199_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_200, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(200, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_200_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_201, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(201, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_201_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_202, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(202, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_202_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_203, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(203, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_203_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_204, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(204, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_204_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_205, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(205, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_205_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_206, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(206, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_206_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_207, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(207, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_207_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_208, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(208, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_208_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_209, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(209, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_209_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_210, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(210, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_210_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_211, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(211, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_211_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_212, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(212, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_212_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_213, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(213, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_213_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_214, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(214, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_214_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_215, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(215, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_215_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_216, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(216, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_216_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_217, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(217, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_217_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_218, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(218, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_218_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_219, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(219, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_219_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_220, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(220, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_220_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_221, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(221, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_221_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_222, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(222, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_222_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_223, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(223, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_223_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_224, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(224, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_224_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_225, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(225, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_225_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_226, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(226, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_226_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_227, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(227, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_227_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_228, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(228, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_228_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_229, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(229, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_229_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_230, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(230, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_230_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_231, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(231, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_231_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_232, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(232, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_232_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_233, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(233, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_233_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_234, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(234, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_234_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_235, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(235, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_235_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_236, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(236, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_236_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_237, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(237, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_237_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_238, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(238, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_238_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_239, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(239, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_239_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_240, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(240, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_240_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_241, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(241, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_241_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_242, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(242, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_242_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_243, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(243, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_243_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_244, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(244, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_244_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_245, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(245, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_245_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_246, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(246, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_246_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_247, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(247, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_247_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_248, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(248, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_248_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_249, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(249, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_249_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_250, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(250, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_250_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_251, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(251, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_251_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_252, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(252, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_252_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_253, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(253, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_253_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_254, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(254, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_254_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_255, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(255, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_255_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_256, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(256, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_256_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_257, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(257, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/detail/edg/fold_right.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_DETAIL_EDG_FOLD_RIGHT_HPP\n# define BOOST_PREPROCESSOR_LIST_DETAIL_EDG_FOLD_RIGHT_HPP\n#\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/list/adt.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_1(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_1_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_2(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_2_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_3(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_3_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_4(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_4_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_5(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_5_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_6(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_6_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_7(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_7_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_8(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_8_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_9(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_9_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_10(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_10_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_11(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_11_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_12(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_12_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_13(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_13_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_14(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_14_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_15(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_15_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_16(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_16_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_17(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_17_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_18(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_18_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_19(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_19_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_20(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_20_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_21(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_21_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_22(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_22_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_23(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_23_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_24(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_24_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_25(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_25_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_26(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_26_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_27(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_27_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_28(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_28_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_29(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_29_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_30(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_30_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_31(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_31_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_32(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_32_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_33(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_33_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_34(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_34_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_35(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_35_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_36(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_36_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_37(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_37_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_38(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_38_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_39(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_39_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_40(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_40_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_41(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_41_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_42(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_42_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_43(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_43_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_44(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_44_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_45(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_45_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_46(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_46_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_47(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_47_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_48(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_48_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_49(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_49_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_50(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_50_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_51(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_51_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_52(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_52_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_53(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_53_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_54(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_54_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_55(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_55_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_56(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_56_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_57(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_57_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_58(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_58_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_59(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_59_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_60(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_60_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_61(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_61_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_62(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_62_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_63(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_63_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_64(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_64_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_65(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_65_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_66(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_66_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_67(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_67_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_68(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_68_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_69(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_69_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_70(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_70_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_71(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_71_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_72(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_72_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_73(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_73_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_74(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_74_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_75(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_75_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_76(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_76_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_77(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_77_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_78(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_78_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_79(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_79_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_80(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_80_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_81(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_81_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_82(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_82_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_83(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_83_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_84(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_84_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_85(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_85_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_86(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_86_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_87(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_87_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_88(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_88_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_89(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_89_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_90(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_90_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_91(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_91_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_92(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_92_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_93(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_93_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_94(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_94_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_95(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_95_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_96(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_96_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_97(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_97_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_98(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_98_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_99(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_99_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_100(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_100_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_101(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_101_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_102(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_102_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_103(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_103_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_104(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_104_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_105(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_105_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_106(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_106_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_107(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_107_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_108(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_108_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_109(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_109_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_110(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_110_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_111(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_111_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_112(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_112_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_113(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_113_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_114(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_114_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_115(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_115_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_116(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_116_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_117(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_117_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_118(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_118_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_119(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_119_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_120(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_120_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_121(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_121_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_122(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_122_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_123(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_123_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_124(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_124_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_125(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_125_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_126(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_126_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_127(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_127_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_128(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_128_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_129(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_129_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_130(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_130_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_131(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_131_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_132(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_132_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_133(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_133_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_134(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_134_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_135(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_135_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_136(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_136_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_137(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_137_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_138(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_138_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_139(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_139_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_140(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_140_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_141(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_141_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_142(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_142_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_143(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_143_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_144(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_144_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_145(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_145_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_146(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_146_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_147(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_147_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_148(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_148_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_149(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_149_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_150(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_150_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_151(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_151_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_152(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_152_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_153(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_153_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_154(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_154_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_155(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_155_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_156(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_156_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_157(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_157_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_158(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_158_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_159(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_159_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_160(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_160_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_161(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_161_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_162(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_162_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_163(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_163_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_164(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_164_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_165(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_165_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_166(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_166_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_167(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_167_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_168(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_168_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_169(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_169_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_170(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_170_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_171(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_171_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_172(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_172_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_173(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_173_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_174(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_174_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_175(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_175_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_176(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_176_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_177(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_177_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_178(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_178_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_179(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_179_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_180(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_180_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_181(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_181_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_182(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_182_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_183(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_183_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_184(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_184_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_185(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_185_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_186(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_186_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_187(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_187_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_188(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_188_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_189(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_189_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_190(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_190_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_191(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_191_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_192(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_192_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_193(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_193_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_194(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_194_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_195(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_195_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_196(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_196_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_197(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_197_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_198(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_198_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_199(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_199_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_200(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_200_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_201(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_201_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_202(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_202_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_203(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_203_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_204(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_204_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_205(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_205_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_206(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_206_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_207(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_207_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_208(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_208_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_209(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_209_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_210(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_210_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_211(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_211_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_212(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_212_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_213(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_213_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_214(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_214_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_215(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_215_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_216(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_216_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_217(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_217_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_218(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_218_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_219(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_219_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_220(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_220_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_221(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_221_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_222(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_222_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_223(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_223_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_224(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_224_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_225(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_225_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_226(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_226_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_227(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_227_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_228(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_228_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_229(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_229_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_230(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_230_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_231(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_231_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_232(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_232_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_233(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_233_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_234(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_234_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_235(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_235_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_236(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_236_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_237(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_237_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_238(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_238_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_239(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_239_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_240(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_240_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_241(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_241_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_242(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_242_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_243(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_243_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_244(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_244_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_245(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_245_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_246(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_246_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_247(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_247_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_248(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_248_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_249(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_249_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_250(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_250_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_251(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_251_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_252(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_252_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_253(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_253_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_254(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_254_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_255(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_255_D(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_256(o, s, l) BOOST_PP_LIST_FOLD_RIGHT_256_D(o, s, l)\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_1_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(2, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_2, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_2_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(3, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_3, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_3_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(4, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_4, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_4_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(5, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_5, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_5_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(6, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_6, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_6_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(7, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_7, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_7_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(8, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_8, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_8_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(9, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_9, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_9_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(10, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_10, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_10_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(11, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_11, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_11_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(12, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_12, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_12_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(13, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_13, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_13_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(14, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_14, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_14_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(15, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_15, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_15_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(16, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_16, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_16_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(17, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_17, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_17_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(18, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_18, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_18_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(19, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_19, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_19_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(20, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_20, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_20_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(21, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_21, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_21_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(22, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_22, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_22_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(23, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_23, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_23_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(24, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_24, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_24_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(25, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_25, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_25_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(26, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_26, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_26_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(27, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_27, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_27_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(28, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_28, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_28_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(29, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_29, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_29_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(30, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_30, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_30_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(31, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_31, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_31_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(32, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_32, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_32_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(33, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_33, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_33_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(34, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_34, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_34_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(35, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_35, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_35_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(36, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_36, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_36_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(37, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_37, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_37_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(38, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_38, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_38_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(39, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_39, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_39_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(40, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_40, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_40_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(41, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_41, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_41_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(42, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_42, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_42_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(43, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_43, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_43_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(44, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_44, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_44_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(45, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_45, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_45_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(46, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_46, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_46_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(47, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_47, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_47_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(48, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_48, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_48_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(49, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_49, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_49_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(50, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_50, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_50_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(51, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_51, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_51_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(52, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_52, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_52_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(53, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_53, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_53_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(54, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_54, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_54_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(55, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_55, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_55_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(56, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_56, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_56_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(57, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_57, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_57_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(58, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_58, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_58_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(59, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_59, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_59_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(60, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_60, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_60_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(61, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_61, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_61_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(62, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_62, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_62_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(63, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_63, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_63_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(64, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_64, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_64_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(65, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_65, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_65_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(66, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_66, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_66_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(67, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_67, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_67_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(68, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_68, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_68_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(69, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_69, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_69_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(70, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_70, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_70_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(71, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_71, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_71_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(72, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_72, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_72_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(73, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_73, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_73_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(74, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_74, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_74_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(75, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_75, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_75_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(76, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_76, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_76_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(77, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_77, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_77_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(78, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_78, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_78_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(79, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_79, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_79_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(80, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_80, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_80_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(81, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_81, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_81_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(82, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_82, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_82_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(83, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_83, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_83_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(84, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_84, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_84_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(85, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_85, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_85_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(86, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_86, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_86_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(87, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_87, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_87_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(88, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_88, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_88_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(89, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_89, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_89_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(90, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_90, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_90_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(91, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_91, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_91_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(92, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_92, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_92_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(93, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_93, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_93_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(94, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_94, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_94_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(95, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_95, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_95_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(96, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_96, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_96_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(97, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_97, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_97_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(98, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_98, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_98_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(99, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_99, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_99_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(100, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_100, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_100_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(101, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_101, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_101_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(102, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_102, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_102_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(103, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_103, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_103_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(104, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_104, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_104_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(105, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_105, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_105_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(106, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_106, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_106_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(107, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_107, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_107_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(108, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_108, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_108_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(109, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_109, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_109_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(110, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_110, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_110_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(111, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_111, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_111_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(112, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_112, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_112_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(113, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_113, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_113_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(114, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_114, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_114_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(115, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_115, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_115_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(116, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_116, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_116_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(117, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_117, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_117_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(118, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_118, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_118_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(119, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_119, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_119_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(120, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_120, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_120_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(121, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_121, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_121_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(122, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_122, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_122_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(123, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_123, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_123_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(124, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_124, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_124_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(125, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_125, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_125_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(126, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_126, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_126_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(127, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_127, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_127_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(128, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_128, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_128_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(129, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_129, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_129_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(130, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_130, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_130_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(131, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_131, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_131_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(132, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_132, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_132_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(133, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_133, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_133_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(134, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_134, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_134_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(135, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_135, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_135_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(136, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_136, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_136_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(137, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_137, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_137_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(138, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_138, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_138_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(139, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_139, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_139_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(140, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_140, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_140_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(141, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_141, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_141_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(142, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_142, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_142_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(143, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_143, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_143_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(144, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_144, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_144_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(145, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_145, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_145_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(146, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_146, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_146_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(147, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_147, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_147_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(148, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_148, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_148_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(149, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_149, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_149_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(150, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_150, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_150_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(151, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_151, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_151_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(152, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_152, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_152_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(153, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_153, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_153_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(154, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_154, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_154_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(155, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_155, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_155_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(156, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_156, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_156_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(157, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_157, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_157_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(158, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_158, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_158_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(159, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_159, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_159_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(160, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_160, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_160_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(161, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_161, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_161_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(162, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_162, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_162_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(163, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_163, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_163_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(164, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_164, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_164_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(165, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_165, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_165_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(166, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_166, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_166_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(167, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_167, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_167_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(168, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_168, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_168_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(169, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_169, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_169_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(170, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_170, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_170_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(171, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_171, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_171_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(172, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_172, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_172_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(173, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_173, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_173_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(174, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_174, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_174_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(175, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_175, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_175_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(176, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_176, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_176_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(177, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_177, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_177_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(178, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_178, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_178_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(179, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_179, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_179_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(180, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_180, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_180_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(181, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_181, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_181_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(182, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_182, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_182_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(183, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_183, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_183_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(184, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_184, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_184_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(185, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_185, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_185_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(186, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_186, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_186_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(187, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_187, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_187_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(188, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_188, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_188_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(189, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_189, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_189_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(190, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_190, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_190_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(191, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_191, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_191_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(192, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_192, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_192_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(193, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_193, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_193_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(194, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_194, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_194_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(195, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_195, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_195_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(196, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_196, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_196_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(197, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_197, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_197_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(198, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_198, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_198_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(199, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_199, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_199_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(200, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_200, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_200_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(201, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_201, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_201_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(202, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_202, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_202_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(203, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_203, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_203_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(204, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_204, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_204_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(205, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_205, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_205_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(206, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_206, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_206_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(207, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_207, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_207_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(208, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_208, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_208_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(209, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_209, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_209_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(210, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_210, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_210_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(211, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_211, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_211_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(212, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_212, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_212_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(213, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_213, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_213_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(214, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_214, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_214_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(215, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_215, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_215_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(216, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_216, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_216_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(217, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_217, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_217_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(218, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_218, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_218_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(219, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_219, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_219_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(220, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_220, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_220_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(221, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_221, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_221_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(222, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_222, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_222_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(223, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_223, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_223_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(224, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_224, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_224_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(225, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_225, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_225_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(226, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_226, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_226_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(227, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_227, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_227_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(228, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_228, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_228_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(229, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_229, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_229_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(230, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_230, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_230_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(231, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_231, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_231_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(232, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_232, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_232_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(233, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_233, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_233_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(234, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_234, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_234_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(235, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_235, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_235_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(236, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_236, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_236_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(237, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_237, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_237_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(238, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_238, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_238_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(239, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_239, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_239_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(240, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_240, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_240_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(241, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_241, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_241_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(242, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_242, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_242_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(243, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_243, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_243_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(244, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_244, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_244_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(245, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_245, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_245_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(246, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_246, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_246_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(247, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_247, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_247_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(248, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_248, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_248_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(249, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_249, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_249_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(250, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_250, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_250_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(251, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_251, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_251_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(252, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_252, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_252_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(253, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_253, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_253_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(254, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_254, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_254_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(255, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_255, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_255_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(256, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_256, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n# define BOOST_PP_LIST_FOLD_RIGHT_256_D(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), o, s BOOST_PP_TUPLE_EAT_3)(257, BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_RIGHT_257, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3)(o, s, BOOST_PP_LIST_REST(l)), BOOST_PP_LIST_FIRST(l))\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_1(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_2(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_3(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_4(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_5(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_6(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_7(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_8(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_9(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_10(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_11(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_12(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_13(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_14(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_15(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_16(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_17(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_18(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_19(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_20(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_21(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_22(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_23(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_24(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_25(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_26(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_27(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_28(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_29(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_30(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_31(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_32(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_33(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_34(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_35(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_36(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_37(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_38(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_39(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_40(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_41(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_42(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_43(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_44(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_45(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_46(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_47(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_48(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_49(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_50(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_51(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_52(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_53(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_54(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_55(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_56(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_57(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_58(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_59(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_60(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_61(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_62(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_63(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_64(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_65(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_66(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_67(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_68(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_69(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_70(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_71(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_72(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_73(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_74(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_75(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_76(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_77(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_78(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_79(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_80(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_81(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_82(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_83(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_84(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_85(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_86(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_87(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_88(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_89(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_90(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_91(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_92(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_93(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_94(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_95(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_96(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_97(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_98(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_99(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_100(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_101(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_102(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_103(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_104(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_105(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_106(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_107(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_108(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_109(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_110(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_111(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_112(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_113(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_114(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_115(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_116(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_117(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_118(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_119(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_120(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_121(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_122(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_123(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_124(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_125(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_126(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_127(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_128(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_129(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_130(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_131(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_132(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_133(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_134(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_135(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_136(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_137(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_138(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_139(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_140(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_141(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_142(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_143(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_144(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_145(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_146(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_147(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_148(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_149(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_150(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_151(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_152(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_153(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_154(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_155(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_156(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_157(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_158(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_159(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_160(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_161(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_162(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_163(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_164(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_165(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_166(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_167(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_168(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_169(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_170(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_171(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_172(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_173(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_174(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_175(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_176(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_177(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_178(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_179(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_180(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_181(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_182(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_183(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_184(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_185(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_186(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_187(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_188(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_189(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_190(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_191(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_192(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_193(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_194(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_195(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_196(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_197(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_198(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_199(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_200(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_201(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_202(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_203(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_204(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_205(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_206(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_207(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_208(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_209(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_210(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_211(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_212(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_213(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_214(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_215(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_216(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_217(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_218(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_219(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_220(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_221(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_222(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_223(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_224(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_225(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_226(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_227(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_228(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_229(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_230(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_231(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_232(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_233(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_234(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_235(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_236(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_237(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_238(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_239(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_240(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_241(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_242(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_243(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_244(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_245(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_246(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_247(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_248(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_249(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_250(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_251(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_252(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_253(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_254(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_255(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_RIGHT_CHECK_BOOST_PP_LIST_FOLD_RIGHT_256(o, s, l) 0\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/detail/fold_left.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_LEFT_HPP\n# define BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_LEFT_HPP\n#\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/list/adt.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_LIST_FOLD_LEFT_1(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_2, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(2, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_2(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_3, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(3, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_3(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_4, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(4, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_4(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_5, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(5, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_5(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_6, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(6, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_6(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_7, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(7, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_7(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_8, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(8, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_8(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_9, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(9, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_9(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_10, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(10, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_10(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_11, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(11, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_11(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_12, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(12, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_12(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_13, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(13, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_13(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_14, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(14, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_14(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_15, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(15, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_15(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_16, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(16, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_16(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_17, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(17, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_17(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_18, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(18, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_18(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_19, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(19, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_19(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_20, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(20, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_20(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_21, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(21, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_21(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_22, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(22, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_22(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_23, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(23, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_23(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_24, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(24, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_24(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_25, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(25, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_25(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_26, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(26, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_26(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_27, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(27, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_27(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_28, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(28, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_28(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_29, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(29, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_29(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_30, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(30, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_30(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_31, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(31, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_31(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_32, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(32, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_32(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_33, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(33, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_33(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_34, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(34, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_34(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_35, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(35, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_35(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_36, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(36, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_36(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_37, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(37, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_37(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_38, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(38, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_38(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_39, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(39, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_39(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_40, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(40, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_40(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_41, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(41, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_41(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_42, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(42, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_42(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_43, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(43, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_43(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_44, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(44, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_44(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_45, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(45, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_45(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_46, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(46, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_46(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_47, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(47, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_47(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_48, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(48, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_48(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_49, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(49, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_49(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_50, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(50, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_50(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_51, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(51, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_51(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_52, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(52, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_52(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_53, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(53, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_53(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_54, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(54, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_54(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_55, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(55, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_55(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_56, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(56, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_56(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_57, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(57, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_57(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_58, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(58, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_58(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_59, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(59, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_59(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_60, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(60, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_60(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_61, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(61, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_61(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_62, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(62, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_62(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_63, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(63, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_63(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_64, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(64, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_64(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_65, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(65, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_65(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_66, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(66, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_66(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_67, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(67, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_67(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_68, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(68, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_68(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_69, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(69, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_69(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_70, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(70, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_70(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_71, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(71, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_71(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_72, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(72, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_72(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_73, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(73, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_73(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_74, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(74, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_74(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_75, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(75, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_75(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_76, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(76, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_76(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_77, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(77, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_77(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_78, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(78, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_78(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_79, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(79, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_79(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_80, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(80, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_80(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_81, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(81, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_81(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_82, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(82, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_82(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_83, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(83, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_83(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_84, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(84, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_84(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_85, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(85, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_85(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_86, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(86, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_86(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_87, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(87, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_87(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_88, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(88, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_88(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_89, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(89, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_89(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_90, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(90, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_90(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_91, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(91, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_91(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_92, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(92, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_92(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_93, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(93, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_93(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_94, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(94, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_94(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_95, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(95, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_95(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_96, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(96, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_96(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_97, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(97, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_97(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_98, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(98, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_98(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_99, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(99, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_99(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_100, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(100, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_100(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_101, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(101, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_101(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_102, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(102, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_102(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_103, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(103, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_103(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_104, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(104, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_104(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_105, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(105, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_105(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_106, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(106, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_106(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_107, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(107, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_107(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_108, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(108, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_108(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_109, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(109, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_109(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_110, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(110, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_110(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_111, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(111, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_111(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_112, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(112, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_112(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_113, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(113, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_113(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_114, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(114, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_114(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_115, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(115, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_115(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_116, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(116, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_116(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_117, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(117, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_117(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_118, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(118, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_118(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_119, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(119, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_119(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_120, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(120, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_120(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_121, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(121, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_121(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_122, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(122, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_122(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_123, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(123, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_123(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_124, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(124, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_124(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_125, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(125, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_125(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_126, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(126, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_126(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_127, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(127, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_127(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_128, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(128, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_128(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_129, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(129, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_129(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_130, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(130, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_130(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_131, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(131, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_131(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_132, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(132, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_132(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_133, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(133, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_133(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_134, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(134, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_134(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_135, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(135, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_135(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_136, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(136, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_136(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_137, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(137, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_137(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_138, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(138, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_138(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_139, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(139, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_139(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_140, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(140, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_140(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_141, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(141, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_141(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_142, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(142, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_142(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_143, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(143, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_143(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_144, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(144, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_144(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_145, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(145, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_145(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_146, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(146, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_146(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_147, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(147, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_147(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_148, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(148, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_148(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_149, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(149, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_149(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_150, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(150, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_150(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_151, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(151, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_151(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_152, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(152, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_152(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_153, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(153, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_153(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_154, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(154, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_154(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_155, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(155, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_155(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_156, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(156, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_156(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_157, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(157, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_157(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_158, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(158, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_158(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_159, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(159, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_159(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_160, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(160, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_160(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_161, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(161, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_161(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_162, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(162, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_162(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_163, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(163, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_163(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_164, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(164, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_164(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_165, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(165, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_165(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_166, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(166, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_166(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_167, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(167, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_167(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_168, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(168, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_168(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_169, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(169, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_169(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_170, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(170, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_170(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_171, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(171, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_171(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_172, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(172, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_172(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_173, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(173, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_173(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_174, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(174, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_174(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_175, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(175, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_175(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_176, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(176, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_176(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_177, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(177, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_177(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_178, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(178, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_178(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_179, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(179, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_179(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_180, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(180, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_180(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_181, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(181, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_181(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_182, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(182, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_182(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_183, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(183, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_183(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_184, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(184, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_184(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_185, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(185, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_185(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_186, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(186, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_186(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_187, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(187, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_187(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_188, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(188, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_188(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_189, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(189, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_189(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_190, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(190, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_190(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_191, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(191, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_191(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_192, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(192, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_192(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_193, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(193, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_193(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_194, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(194, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_194(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_195, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(195, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_195(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_196, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(196, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_196(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_197, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(197, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_197(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_198, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(198, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_198(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_199, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(199, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_199(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_200, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(200, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_200(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_201, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(201, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_201(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_202, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(202, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_202(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_203, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(203, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_203(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_204, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(204, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_204(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_205, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(205, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_205(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_206, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(206, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_206(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_207, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(207, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_207(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_208, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(208, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_208(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_209, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(209, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_209(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_210, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(210, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_210(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_211, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(211, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_211(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_212, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(212, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_212(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_213, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(213, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_213(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_214, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(214, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_214(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_215, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(215, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_215(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_216, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(216, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_216(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_217, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(217, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_217(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_218, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(218, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_218(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_219, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(219, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_219(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_220, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(220, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_220(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_221, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(221, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_221(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_222, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(222, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_222(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_223, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(223, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_223(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_224, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(224, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_224(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_225, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(225, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_225(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_226, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(226, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_226(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_227, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(227, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_227(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_228, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(228, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_228(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_229, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(229, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_229(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_230, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(230, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_230(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_231, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(231, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_231(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_232, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(232, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_232(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_233, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(233, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_233(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_234, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(234, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_234(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_235, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(235, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_235(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_236, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(236, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_236(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_237, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(237, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_237(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_238, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(238, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_238(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_239, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(239, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_239(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_240, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(240, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_240(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_241, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(241, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_241(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_242, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(242, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_242(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_243, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(243, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_243(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_244, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(244, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_244(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_245, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(245, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_245(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_246, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(246, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_246(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_247, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(247, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_247(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_248, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(248, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_248(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_249, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(249, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_249(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_250, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(250, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_250(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_251, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(251, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_251(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_252, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(252, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_252(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_253, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(253, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_253(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_254, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(254, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_254(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_255, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(255, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_255(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_256, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(256, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n# define BOOST_PP_LIST_FOLD_LEFT_256(o, s, l) BOOST_PP_IIF(BOOST_PP_LIST_IS_CONS(l), BOOST_PP_LIST_FOLD_LEFT_257, s BOOST_PP_TUPLE_EAT_3)(o, BOOST_PP_EXPR_IIF(BOOST_PP_LIST_IS_CONS(l), o)(257, s, BOOST_PP_LIST_FIRST(l)), BOOST_PP_LIST_REST(l))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/detail/fold_right.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_RIGHT_HPP\n# define BOOST_PREPROCESSOR_LIST_DETAIL_FOLD_RIGHT_HPP\n#\n# include <boost/preprocessor/list/fold_left.hpp>\n# include <boost/preprocessor/list/reverse.hpp>\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_1(o, s, l) BOOST_PP_LIST_FOLD_LEFT_1(o, s, BOOST_PP_LIST_REVERSE_D(1, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_2(o, s, l) BOOST_PP_LIST_FOLD_LEFT_2(o, s, BOOST_PP_LIST_REVERSE_D(2, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_3(o, s, l) BOOST_PP_LIST_FOLD_LEFT_3(o, s, BOOST_PP_LIST_REVERSE_D(3, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_4(o, s, l) BOOST_PP_LIST_FOLD_LEFT_4(o, s, BOOST_PP_LIST_REVERSE_D(4, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_5(o, s, l) BOOST_PP_LIST_FOLD_LEFT_5(o, s, BOOST_PP_LIST_REVERSE_D(5, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_6(o, s, l) BOOST_PP_LIST_FOLD_LEFT_6(o, s, BOOST_PP_LIST_REVERSE_D(6, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_7(o, s, l) BOOST_PP_LIST_FOLD_LEFT_7(o, s, BOOST_PP_LIST_REVERSE_D(7, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_8(o, s, l) BOOST_PP_LIST_FOLD_LEFT_8(o, s, BOOST_PP_LIST_REVERSE_D(8, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_9(o, s, l) BOOST_PP_LIST_FOLD_LEFT_9(o, s, BOOST_PP_LIST_REVERSE_D(9, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_10(o, s, l) BOOST_PP_LIST_FOLD_LEFT_10(o, s, BOOST_PP_LIST_REVERSE_D(10, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_11(o, s, l) BOOST_PP_LIST_FOLD_LEFT_11(o, s, BOOST_PP_LIST_REVERSE_D(11, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_12(o, s, l) BOOST_PP_LIST_FOLD_LEFT_12(o, s, BOOST_PP_LIST_REVERSE_D(12, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_13(o, s, l) BOOST_PP_LIST_FOLD_LEFT_13(o, s, BOOST_PP_LIST_REVERSE_D(13, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_14(o, s, l) BOOST_PP_LIST_FOLD_LEFT_14(o, s, BOOST_PP_LIST_REVERSE_D(14, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_15(o, s, l) BOOST_PP_LIST_FOLD_LEFT_15(o, s, BOOST_PP_LIST_REVERSE_D(15, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_16(o, s, l) BOOST_PP_LIST_FOLD_LEFT_16(o, s, BOOST_PP_LIST_REVERSE_D(16, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_17(o, s, l) BOOST_PP_LIST_FOLD_LEFT_17(o, s, BOOST_PP_LIST_REVERSE_D(17, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_18(o, s, l) BOOST_PP_LIST_FOLD_LEFT_18(o, s, BOOST_PP_LIST_REVERSE_D(18, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_19(o, s, l) BOOST_PP_LIST_FOLD_LEFT_19(o, s, BOOST_PP_LIST_REVERSE_D(19, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_20(o, s, l) BOOST_PP_LIST_FOLD_LEFT_20(o, s, BOOST_PP_LIST_REVERSE_D(20, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_21(o, s, l) BOOST_PP_LIST_FOLD_LEFT_21(o, s, BOOST_PP_LIST_REVERSE_D(21, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_22(o, s, l) BOOST_PP_LIST_FOLD_LEFT_22(o, s, BOOST_PP_LIST_REVERSE_D(22, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_23(o, s, l) BOOST_PP_LIST_FOLD_LEFT_23(o, s, BOOST_PP_LIST_REVERSE_D(23, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_24(o, s, l) BOOST_PP_LIST_FOLD_LEFT_24(o, s, BOOST_PP_LIST_REVERSE_D(24, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_25(o, s, l) BOOST_PP_LIST_FOLD_LEFT_25(o, s, BOOST_PP_LIST_REVERSE_D(25, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_26(o, s, l) BOOST_PP_LIST_FOLD_LEFT_26(o, s, BOOST_PP_LIST_REVERSE_D(26, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_27(o, s, l) BOOST_PP_LIST_FOLD_LEFT_27(o, s, BOOST_PP_LIST_REVERSE_D(27, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_28(o, s, l) BOOST_PP_LIST_FOLD_LEFT_28(o, s, BOOST_PP_LIST_REVERSE_D(28, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_29(o, s, l) BOOST_PP_LIST_FOLD_LEFT_29(o, s, BOOST_PP_LIST_REVERSE_D(29, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_30(o, s, l) BOOST_PP_LIST_FOLD_LEFT_30(o, s, BOOST_PP_LIST_REVERSE_D(30, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_31(o, s, l) BOOST_PP_LIST_FOLD_LEFT_31(o, s, BOOST_PP_LIST_REVERSE_D(31, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_32(o, s, l) BOOST_PP_LIST_FOLD_LEFT_32(o, s, BOOST_PP_LIST_REVERSE_D(32, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_33(o, s, l) BOOST_PP_LIST_FOLD_LEFT_33(o, s, BOOST_PP_LIST_REVERSE_D(33, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_34(o, s, l) BOOST_PP_LIST_FOLD_LEFT_34(o, s, BOOST_PP_LIST_REVERSE_D(34, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_35(o, s, l) BOOST_PP_LIST_FOLD_LEFT_35(o, s, BOOST_PP_LIST_REVERSE_D(35, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_36(o, s, l) BOOST_PP_LIST_FOLD_LEFT_36(o, s, BOOST_PP_LIST_REVERSE_D(36, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_37(o, s, l) BOOST_PP_LIST_FOLD_LEFT_37(o, s, BOOST_PP_LIST_REVERSE_D(37, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_38(o, s, l) BOOST_PP_LIST_FOLD_LEFT_38(o, s, BOOST_PP_LIST_REVERSE_D(38, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_39(o, s, l) BOOST_PP_LIST_FOLD_LEFT_39(o, s, BOOST_PP_LIST_REVERSE_D(39, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_40(o, s, l) BOOST_PP_LIST_FOLD_LEFT_40(o, s, BOOST_PP_LIST_REVERSE_D(40, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_41(o, s, l) BOOST_PP_LIST_FOLD_LEFT_41(o, s, BOOST_PP_LIST_REVERSE_D(41, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_42(o, s, l) BOOST_PP_LIST_FOLD_LEFT_42(o, s, BOOST_PP_LIST_REVERSE_D(42, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_43(o, s, l) BOOST_PP_LIST_FOLD_LEFT_43(o, s, BOOST_PP_LIST_REVERSE_D(43, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_44(o, s, l) BOOST_PP_LIST_FOLD_LEFT_44(o, s, BOOST_PP_LIST_REVERSE_D(44, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_45(o, s, l) BOOST_PP_LIST_FOLD_LEFT_45(o, s, BOOST_PP_LIST_REVERSE_D(45, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_46(o, s, l) BOOST_PP_LIST_FOLD_LEFT_46(o, s, BOOST_PP_LIST_REVERSE_D(46, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_47(o, s, l) BOOST_PP_LIST_FOLD_LEFT_47(o, s, BOOST_PP_LIST_REVERSE_D(47, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_48(o, s, l) BOOST_PP_LIST_FOLD_LEFT_48(o, s, BOOST_PP_LIST_REVERSE_D(48, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_49(o, s, l) BOOST_PP_LIST_FOLD_LEFT_49(o, s, BOOST_PP_LIST_REVERSE_D(49, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_50(o, s, l) BOOST_PP_LIST_FOLD_LEFT_50(o, s, BOOST_PP_LIST_REVERSE_D(50, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_51(o, s, l) BOOST_PP_LIST_FOLD_LEFT_51(o, s, BOOST_PP_LIST_REVERSE_D(51, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_52(o, s, l) BOOST_PP_LIST_FOLD_LEFT_52(o, s, BOOST_PP_LIST_REVERSE_D(52, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_53(o, s, l) BOOST_PP_LIST_FOLD_LEFT_53(o, s, BOOST_PP_LIST_REVERSE_D(53, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_54(o, s, l) BOOST_PP_LIST_FOLD_LEFT_54(o, s, BOOST_PP_LIST_REVERSE_D(54, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_55(o, s, l) BOOST_PP_LIST_FOLD_LEFT_55(o, s, BOOST_PP_LIST_REVERSE_D(55, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_56(o, s, l) BOOST_PP_LIST_FOLD_LEFT_56(o, s, BOOST_PP_LIST_REVERSE_D(56, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_57(o, s, l) BOOST_PP_LIST_FOLD_LEFT_57(o, s, BOOST_PP_LIST_REVERSE_D(57, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_58(o, s, l) BOOST_PP_LIST_FOLD_LEFT_58(o, s, BOOST_PP_LIST_REVERSE_D(58, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_59(o, s, l) BOOST_PP_LIST_FOLD_LEFT_59(o, s, BOOST_PP_LIST_REVERSE_D(59, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_60(o, s, l) BOOST_PP_LIST_FOLD_LEFT_60(o, s, BOOST_PP_LIST_REVERSE_D(60, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_61(o, s, l) BOOST_PP_LIST_FOLD_LEFT_61(o, s, BOOST_PP_LIST_REVERSE_D(61, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_62(o, s, l) BOOST_PP_LIST_FOLD_LEFT_62(o, s, BOOST_PP_LIST_REVERSE_D(62, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_63(o, s, l) BOOST_PP_LIST_FOLD_LEFT_63(o, s, BOOST_PP_LIST_REVERSE_D(63, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_64(o, s, l) BOOST_PP_LIST_FOLD_LEFT_64(o, s, BOOST_PP_LIST_REVERSE_D(64, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_65(o, s, l) BOOST_PP_LIST_FOLD_LEFT_65(o, s, BOOST_PP_LIST_REVERSE_D(65, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_66(o, s, l) BOOST_PP_LIST_FOLD_LEFT_66(o, s, BOOST_PP_LIST_REVERSE_D(66, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_67(o, s, l) BOOST_PP_LIST_FOLD_LEFT_67(o, s, BOOST_PP_LIST_REVERSE_D(67, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_68(o, s, l) BOOST_PP_LIST_FOLD_LEFT_68(o, s, BOOST_PP_LIST_REVERSE_D(68, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_69(o, s, l) BOOST_PP_LIST_FOLD_LEFT_69(o, s, BOOST_PP_LIST_REVERSE_D(69, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_70(o, s, l) BOOST_PP_LIST_FOLD_LEFT_70(o, s, BOOST_PP_LIST_REVERSE_D(70, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_71(o, s, l) BOOST_PP_LIST_FOLD_LEFT_71(o, s, BOOST_PP_LIST_REVERSE_D(71, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_72(o, s, l) BOOST_PP_LIST_FOLD_LEFT_72(o, s, BOOST_PP_LIST_REVERSE_D(72, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_73(o, s, l) BOOST_PP_LIST_FOLD_LEFT_73(o, s, BOOST_PP_LIST_REVERSE_D(73, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_74(o, s, l) BOOST_PP_LIST_FOLD_LEFT_74(o, s, BOOST_PP_LIST_REVERSE_D(74, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_75(o, s, l) BOOST_PP_LIST_FOLD_LEFT_75(o, s, BOOST_PP_LIST_REVERSE_D(75, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_76(o, s, l) BOOST_PP_LIST_FOLD_LEFT_76(o, s, BOOST_PP_LIST_REVERSE_D(76, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_77(o, s, l) BOOST_PP_LIST_FOLD_LEFT_77(o, s, BOOST_PP_LIST_REVERSE_D(77, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_78(o, s, l) BOOST_PP_LIST_FOLD_LEFT_78(o, s, BOOST_PP_LIST_REVERSE_D(78, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_79(o, s, l) BOOST_PP_LIST_FOLD_LEFT_79(o, s, BOOST_PP_LIST_REVERSE_D(79, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_80(o, s, l) BOOST_PP_LIST_FOLD_LEFT_80(o, s, BOOST_PP_LIST_REVERSE_D(80, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_81(o, s, l) BOOST_PP_LIST_FOLD_LEFT_81(o, s, BOOST_PP_LIST_REVERSE_D(81, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_82(o, s, l) BOOST_PP_LIST_FOLD_LEFT_82(o, s, BOOST_PP_LIST_REVERSE_D(82, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_83(o, s, l) BOOST_PP_LIST_FOLD_LEFT_83(o, s, BOOST_PP_LIST_REVERSE_D(83, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_84(o, s, l) BOOST_PP_LIST_FOLD_LEFT_84(o, s, BOOST_PP_LIST_REVERSE_D(84, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_85(o, s, l) BOOST_PP_LIST_FOLD_LEFT_85(o, s, BOOST_PP_LIST_REVERSE_D(85, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_86(o, s, l) BOOST_PP_LIST_FOLD_LEFT_86(o, s, BOOST_PP_LIST_REVERSE_D(86, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_87(o, s, l) BOOST_PP_LIST_FOLD_LEFT_87(o, s, BOOST_PP_LIST_REVERSE_D(87, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_88(o, s, l) BOOST_PP_LIST_FOLD_LEFT_88(o, s, BOOST_PP_LIST_REVERSE_D(88, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_89(o, s, l) BOOST_PP_LIST_FOLD_LEFT_89(o, s, BOOST_PP_LIST_REVERSE_D(89, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_90(o, s, l) BOOST_PP_LIST_FOLD_LEFT_90(o, s, BOOST_PP_LIST_REVERSE_D(90, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_91(o, s, l) BOOST_PP_LIST_FOLD_LEFT_91(o, s, BOOST_PP_LIST_REVERSE_D(91, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_92(o, s, l) BOOST_PP_LIST_FOLD_LEFT_92(o, s, BOOST_PP_LIST_REVERSE_D(92, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_93(o, s, l) BOOST_PP_LIST_FOLD_LEFT_93(o, s, BOOST_PP_LIST_REVERSE_D(93, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_94(o, s, l) BOOST_PP_LIST_FOLD_LEFT_94(o, s, BOOST_PP_LIST_REVERSE_D(94, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_95(o, s, l) BOOST_PP_LIST_FOLD_LEFT_95(o, s, BOOST_PP_LIST_REVERSE_D(95, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_96(o, s, l) BOOST_PP_LIST_FOLD_LEFT_96(o, s, BOOST_PP_LIST_REVERSE_D(96, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_97(o, s, l) BOOST_PP_LIST_FOLD_LEFT_97(o, s, BOOST_PP_LIST_REVERSE_D(97, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_98(o, s, l) BOOST_PP_LIST_FOLD_LEFT_98(o, s, BOOST_PP_LIST_REVERSE_D(98, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_99(o, s, l) BOOST_PP_LIST_FOLD_LEFT_99(o, s, BOOST_PP_LIST_REVERSE_D(99, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_100(o, s, l) BOOST_PP_LIST_FOLD_LEFT_100(o, s, BOOST_PP_LIST_REVERSE_D(100, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_101(o, s, l) BOOST_PP_LIST_FOLD_LEFT_101(o, s, BOOST_PP_LIST_REVERSE_D(101, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_102(o, s, l) BOOST_PP_LIST_FOLD_LEFT_102(o, s, BOOST_PP_LIST_REVERSE_D(102, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_103(o, s, l) BOOST_PP_LIST_FOLD_LEFT_103(o, s, BOOST_PP_LIST_REVERSE_D(103, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_104(o, s, l) BOOST_PP_LIST_FOLD_LEFT_104(o, s, BOOST_PP_LIST_REVERSE_D(104, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_105(o, s, l) BOOST_PP_LIST_FOLD_LEFT_105(o, s, BOOST_PP_LIST_REVERSE_D(105, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_106(o, s, l) BOOST_PP_LIST_FOLD_LEFT_106(o, s, BOOST_PP_LIST_REVERSE_D(106, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_107(o, s, l) BOOST_PP_LIST_FOLD_LEFT_107(o, s, BOOST_PP_LIST_REVERSE_D(107, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_108(o, s, l) BOOST_PP_LIST_FOLD_LEFT_108(o, s, BOOST_PP_LIST_REVERSE_D(108, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_109(o, s, l) BOOST_PP_LIST_FOLD_LEFT_109(o, s, BOOST_PP_LIST_REVERSE_D(109, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_110(o, s, l) BOOST_PP_LIST_FOLD_LEFT_110(o, s, BOOST_PP_LIST_REVERSE_D(110, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_111(o, s, l) BOOST_PP_LIST_FOLD_LEFT_111(o, s, BOOST_PP_LIST_REVERSE_D(111, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_112(o, s, l) BOOST_PP_LIST_FOLD_LEFT_112(o, s, BOOST_PP_LIST_REVERSE_D(112, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_113(o, s, l) BOOST_PP_LIST_FOLD_LEFT_113(o, s, BOOST_PP_LIST_REVERSE_D(113, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_114(o, s, l) BOOST_PP_LIST_FOLD_LEFT_114(o, s, BOOST_PP_LIST_REVERSE_D(114, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_115(o, s, l) BOOST_PP_LIST_FOLD_LEFT_115(o, s, BOOST_PP_LIST_REVERSE_D(115, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_116(o, s, l) BOOST_PP_LIST_FOLD_LEFT_116(o, s, BOOST_PP_LIST_REVERSE_D(116, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_117(o, s, l) BOOST_PP_LIST_FOLD_LEFT_117(o, s, BOOST_PP_LIST_REVERSE_D(117, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_118(o, s, l) BOOST_PP_LIST_FOLD_LEFT_118(o, s, BOOST_PP_LIST_REVERSE_D(118, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_119(o, s, l) BOOST_PP_LIST_FOLD_LEFT_119(o, s, BOOST_PP_LIST_REVERSE_D(119, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_120(o, s, l) BOOST_PP_LIST_FOLD_LEFT_120(o, s, BOOST_PP_LIST_REVERSE_D(120, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_121(o, s, l) BOOST_PP_LIST_FOLD_LEFT_121(o, s, BOOST_PP_LIST_REVERSE_D(121, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_122(o, s, l) BOOST_PP_LIST_FOLD_LEFT_122(o, s, BOOST_PP_LIST_REVERSE_D(122, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_123(o, s, l) BOOST_PP_LIST_FOLD_LEFT_123(o, s, BOOST_PP_LIST_REVERSE_D(123, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_124(o, s, l) BOOST_PP_LIST_FOLD_LEFT_124(o, s, BOOST_PP_LIST_REVERSE_D(124, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_125(o, s, l) BOOST_PP_LIST_FOLD_LEFT_125(o, s, BOOST_PP_LIST_REVERSE_D(125, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_126(o, s, l) BOOST_PP_LIST_FOLD_LEFT_126(o, s, BOOST_PP_LIST_REVERSE_D(126, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_127(o, s, l) BOOST_PP_LIST_FOLD_LEFT_127(o, s, BOOST_PP_LIST_REVERSE_D(127, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_128(o, s, l) BOOST_PP_LIST_FOLD_LEFT_128(o, s, BOOST_PP_LIST_REVERSE_D(128, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_129(o, s, l) BOOST_PP_LIST_FOLD_LEFT_129(o, s, BOOST_PP_LIST_REVERSE_D(129, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_130(o, s, l) BOOST_PP_LIST_FOLD_LEFT_130(o, s, BOOST_PP_LIST_REVERSE_D(130, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_131(o, s, l) BOOST_PP_LIST_FOLD_LEFT_131(o, s, BOOST_PP_LIST_REVERSE_D(131, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_132(o, s, l) BOOST_PP_LIST_FOLD_LEFT_132(o, s, BOOST_PP_LIST_REVERSE_D(132, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_133(o, s, l) BOOST_PP_LIST_FOLD_LEFT_133(o, s, BOOST_PP_LIST_REVERSE_D(133, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_134(o, s, l) BOOST_PP_LIST_FOLD_LEFT_134(o, s, BOOST_PP_LIST_REVERSE_D(134, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_135(o, s, l) BOOST_PP_LIST_FOLD_LEFT_135(o, s, BOOST_PP_LIST_REVERSE_D(135, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_136(o, s, l) BOOST_PP_LIST_FOLD_LEFT_136(o, s, BOOST_PP_LIST_REVERSE_D(136, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_137(o, s, l) BOOST_PP_LIST_FOLD_LEFT_137(o, s, BOOST_PP_LIST_REVERSE_D(137, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_138(o, s, l) BOOST_PP_LIST_FOLD_LEFT_138(o, s, BOOST_PP_LIST_REVERSE_D(138, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_139(o, s, l) BOOST_PP_LIST_FOLD_LEFT_139(o, s, BOOST_PP_LIST_REVERSE_D(139, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_140(o, s, l) BOOST_PP_LIST_FOLD_LEFT_140(o, s, BOOST_PP_LIST_REVERSE_D(140, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_141(o, s, l) BOOST_PP_LIST_FOLD_LEFT_141(o, s, BOOST_PP_LIST_REVERSE_D(141, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_142(o, s, l) BOOST_PP_LIST_FOLD_LEFT_142(o, s, BOOST_PP_LIST_REVERSE_D(142, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_143(o, s, l) BOOST_PP_LIST_FOLD_LEFT_143(o, s, BOOST_PP_LIST_REVERSE_D(143, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_144(o, s, l) BOOST_PP_LIST_FOLD_LEFT_144(o, s, BOOST_PP_LIST_REVERSE_D(144, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_145(o, s, l) BOOST_PP_LIST_FOLD_LEFT_145(o, s, BOOST_PP_LIST_REVERSE_D(145, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_146(o, s, l) BOOST_PP_LIST_FOLD_LEFT_146(o, s, BOOST_PP_LIST_REVERSE_D(146, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_147(o, s, l) BOOST_PP_LIST_FOLD_LEFT_147(o, s, BOOST_PP_LIST_REVERSE_D(147, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_148(o, s, l) BOOST_PP_LIST_FOLD_LEFT_148(o, s, BOOST_PP_LIST_REVERSE_D(148, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_149(o, s, l) BOOST_PP_LIST_FOLD_LEFT_149(o, s, BOOST_PP_LIST_REVERSE_D(149, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_150(o, s, l) BOOST_PP_LIST_FOLD_LEFT_150(o, s, BOOST_PP_LIST_REVERSE_D(150, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_151(o, s, l) BOOST_PP_LIST_FOLD_LEFT_151(o, s, BOOST_PP_LIST_REVERSE_D(151, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_152(o, s, l) BOOST_PP_LIST_FOLD_LEFT_152(o, s, BOOST_PP_LIST_REVERSE_D(152, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_153(o, s, l) BOOST_PP_LIST_FOLD_LEFT_153(o, s, BOOST_PP_LIST_REVERSE_D(153, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_154(o, s, l) BOOST_PP_LIST_FOLD_LEFT_154(o, s, BOOST_PP_LIST_REVERSE_D(154, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_155(o, s, l) BOOST_PP_LIST_FOLD_LEFT_155(o, s, BOOST_PP_LIST_REVERSE_D(155, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_156(o, s, l) BOOST_PP_LIST_FOLD_LEFT_156(o, s, BOOST_PP_LIST_REVERSE_D(156, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_157(o, s, l) BOOST_PP_LIST_FOLD_LEFT_157(o, s, BOOST_PP_LIST_REVERSE_D(157, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_158(o, s, l) BOOST_PP_LIST_FOLD_LEFT_158(o, s, BOOST_PP_LIST_REVERSE_D(158, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_159(o, s, l) BOOST_PP_LIST_FOLD_LEFT_159(o, s, BOOST_PP_LIST_REVERSE_D(159, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_160(o, s, l) BOOST_PP_LIST_FOLD_LEFT_160(o, s, BOOST_PP_LIST_REVERSE_D(160, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_161(o, s, l) BOOST_PP_LIST_FOLD_LEFT_161(o, s, BOOST_PP_LIST_REVERSE_D(161, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_162(o, s, l) BOOST_PP_LIST_FOLD_LEFT_162(o, s, BOOST_PP_LIST_REVERSE_D(162, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_163(o, s, l) BOOST_PP_LIST_FOLD_LEFT_163(o, s, BOOST_PP_LIST_REVERSE_D(163, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_164(o, s, l) BOOST_PP_LIST_FOLD_LEFT_164(o, s, BOOST_PP_LIST_REVERSE_D(164, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_165(o, s, l) BOOST_PP_LIST_FOLD_LEFT_165(o, s, BOOST_PP_LIST_REVERSE_D(165, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_166(o, s, l) BOOST_PP_LIST_FOLD_LEFT_166(o, s, BOOST_PP_LIST_REVERSE_D(166, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_167(o, s, l) BOOST_PP_LIST_FOLD_LEFT_167(o, s, BOOST_PP_LIST_REVERSE_D(167, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_168(o, s, l) BOOST_PP_LIST_FOLD_LEFT_168(o, s, BOOST_PP_LIST_REVERSE_D(168, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_169(o, s, l) BOOST_PP_LIST_FOLD_LEFT_169(o, s, BOOST_PP_LIST_REVERSE_D(169, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_170(o, s, l) BOOST_PP_LIST_FOLD_LEFT_170(o, s, BOOST_PP_LIST_REVERSE_D(170, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_171(o, s, l) BOOST_PP_LIST_FOLD_LEFT_171(o, s, BOOST_PP_LIST_REVERSE_D(171, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_172(o, s, l) BOOST_PP_LIST_FOLD_LEFT_172(o, s, BOOST_PP_LIST_REVERSE_D(172, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_173(o, s, l) BOOST_PP_LIST_FOLD_LEFT_173(o, s, BOOST_PP_LIST_REVERSE_D(173, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_174(o, s, l) BOOST_PP_LIST_FOLD_LEFT_174(o, s, BOOST_PP_LIST_REVERSE_D(174, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_175(o, s, l) BOOST_PP_LIST_FOLD_LEFT_175(o, s, BOOST_PP_LIST_REVERSE_D(175, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_176(o, s, l) BOOST_PP_LIST_FOLD_LEFT_176(o, s, BOOST_PP_LIST_REVERSE_D(176, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_177(o, s, l) BOOST_PP_LIST_FOLD_LEFT_177(o, s, BOOST_PP_LIST_REVERSE_D(177, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_178(o, s, l) BOOST_PP_LIST_FOLD_LEFT_178(o, s, BOOST_PP_LIST_REVERSE_D(178, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_179(o, s, l) BOOST_PP_LIST_FOLD_LEFT_179(o, s, BOOST_PP_LIST_REVERSE_D(179, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_180(o, s, l) BOOST_PP_LIST_FOLD_LEFT_180(o, s, BOOST_PP_LIST_REVERSE_D(180, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_181(o, s, l) BOOST_PP_LIST_FOLD_LEFT_181(o, s, BOOST_PP_LIST_REVERSE_D(181, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_182(o, s, l) BOOST_PP_LIST_FOLD_LEFT_182(o, s, BOOST_PP_LIST_REVERSE_D(182, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_183(o, s, l) BOOST_PP_LIST_FOLD_LEFT_183(o, s, BOOST_PP_LIST_REVERSE_D(183, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_184(o, s, l) BOOST_PP_LIST_FOLD_LEFT_184(o, s, BOOST_PP_LIST_REVERSE_D(184, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_185(o, s, l) BOOST_PP_LIST_FOLD_LEFT_185(o, s, BOOST_PP_LIST_REVERSE_D(185, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_186(o, s, l) BOOST_PP_LIST_FOLD_LEFT_186(o, s, BOOST_PP_LIST_REVERSE_D(186, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_187(o, s, l) BOOST_PP_LIST_FOLD_LEFT_187(o, s, BOOST_PP_LIST_REVERSE_D(187, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_188(o, s, l) BOOST_PP_LIST_FOLD_LEFT_188(o, s, BOOST_PP_LIST_REVERSE_D(188, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_189(o, s, l) BOOST_PP_LIST_FOLD_LEFT_189(o, s, BOOST_PP_LIST_REVERSE_D(189, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_190(o, s, l) BOOST_PP_LIST_FOLD_LEFT_190(o, s, BOOST_PP_LIST_REVERSE_D(190, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_191(o, s, l) BOOST_PP_LIST_FOLD_LEFT_191(o, s, BOOST_PP_LIST_REVERSE_D(191, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_192(o, s, l) BOOST_PP_LIST_FOLD_LEFT_192(o, s, BOOST_PP_LIST_REVERSE_D(192, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_193(o, s, l) BOOST_PP_LIST_FOLD_LEFT_193(o, s, BOOST_PP_LIST_REVERSE_D(193, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_194(o, s, l) BOOST_PP_LIST_FOLD_LEFT_194(o, s, BOOST_PP_LIST_REVERSE_D(194, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_195(o, s, l) BOOST_PP_LIST_FOLD_LEFT_195(o, s, BOOST_PP_LIST_REVERSE_D(195, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_196(o, s, l) BOOST_PP_LIST_FOLD_LEFT_196(o, s, BOOST_PP_LIST_REVERSE_D(196, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_197(o, s, l) BOOST_PP_LIST_FOLD_LEFT_197(o, s, BOOST_PP_LIST_REVERSE_D(197, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_198(o, s, l) BOOST_PP_LIST_FOLD_LEFT_198(o, s, BOOST_PP_LIST_REVERSE_D(198, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_199(o, s, l) BOOST_PP_LIST_FOLD_LEFT_199(o, s, BOOST_PP_LIST_REVERSE_D(199, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_200(o, s, l) BOOST_PP_LIST_FOLD_LEFT_200(o, s, BOOST_PP_LIST_REVERSE_D(200, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_201(o, s, l) BOOST_PP_LIST_FOLD_LEFT_201(o, s, BOOST_PP_LIST_REVERSE_D(201, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_202(o, s, l) BOOST_PP_LIST_FOLD_LEFT_202(o, s, BOOST_PP_LIST_REVERSE_D(202, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_203(o, s, l) BOOST_PP_LIST_FOLD_LEFT_203(o, s, BOOST_PP_LIST_REVERSE_D(203, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_204(o, s, l) BOOST_PP_LIST_FOLD_LEFT_204(o, s, BOOST_PP_LIST_REVERSE_D(204, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_205(o, s, l) BOOST_PP_LIST_FOLD_LEFT_205(o, s, BOOST_PP_LIST_REVERSE_D(205, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_206(o, s, l) BOOST_PP_LIST_FOLD_LEFT_206(o, s, BOOST_PP_LIST_REVERSE_D(206, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_207(o, s, l) BOOST_PP_LIST_FOLD_LEFT_207(o, s, BOOST_PP_LIST_REVERSE_D(207, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_208(o, s, l) BOOST_PP_LIST_FOLD_LEFT_208(o, s, BOOST_PP_LIST_REVERSE_D(208, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_209(o, s, l) BOOST_PP_LIST_FOLD_LEFT_209(o, s, BOOST_PP_LIST_REVERSE_D(209, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_210(o, s, l) BOOST_PP_LIST_FOLD_LEFT_210(o, s, BOOST_PP_LIST_REVERSE_D(210, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_211(o, s, l) BOOST_PP_LIST_FOLD_LEFT_211(o, s, BOOST_PP_LIST_REVERSE_D(211, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_212(o, s, l) BOOST_PP_LIST_FOLD_LEFT_212(o, s, BOOST_PP_LIST_REVERSE_D(212, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_213(o, s, l) BOOST_PP_LIST_FOLD_LEFT_213(o, s, BOOST_PP_LIST_REVERSE_D(213, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_214(o, s, l) BOOST_PP_LIST_FOLD_LEFT_214(o, s, BOOST_PP_LIST_REVERSE_D(214, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_215(o, s, l) BOOST_PP_LIST_FOLD_LEFT_215(o, s, BOOST_PP_LIST_REVERSE_D(215, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_216(o, s, l) BOOST_PP_LIST_FOLD_LEFT_216(o, s, BOOST_PP_LIST_REVERSE_D(216, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_217(o, s, l) BOOST_PP_LIST_FOLD_LEFT_217(o, s, BOOST_PP_LIST_REVERSE_D(217, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_218(o, s, l) BOOST_PP_LIST_FOLD_LEFT_218(o, s, BOOST_PP_LIST_REVERSE_D(218, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_219(o, s, l) BOOST_PP_LIST_FOLD_LEFT_219(o, s, BOOST_PP_LIST_REVERSE_D(219, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_220(o, s, l) BOOST_PP_LIST_FOLD_LEFT_220(o, s, BOOST_PP_LIST_REVERSE_D(220, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_221(o, s, l) BOOST_PP_LIST_FOLD_LEFT_221(o, s, BOOST_PP_LIST_REVERSE_D(221, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_222(o, s, l) BOOST_PP_LIST_FOLD_LEFT_222(o, s, BOOST_PP_LIST_REVERSE_D(222, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_223(o, s, l) BOOST_PP_LIST_FOLD_LEFT_223(o, s, BOOST_PP_LIST_REVERSE_D(223, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_224(o, s, l) BOOST_PP_LIST_FOLD_LEFT_224(o, s, BOOST_PP_LIST_REVERSE_D(224, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_225(o, s, l) BOOST_PP_LIST_FOLD_LEFT_225(o, s, BOOST_PP_LIST_REVERSE_D(225, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_226(o, s, l) BOOST_PP_LIST_FOLD_LEFT_226(o, s, BOOST_PP_LIST_REVERSE_D(226, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_227(o, s, l) BOOST_PP_LIST_FOLD_LEFT_227(o, s, BOOST_PP_LIST_REVERSE_D(227, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_228(o, s, l) BOOST_PP_LIST_FOLD_LEFT_228(o, s, BOOST_PP_LIST_REVERSE_D(228, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_229(o, s, l) BOOST_PP_LIST_FOLD_LEFT_229(o, s, BOOST_PP_LIST_REVERSE_D(229, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_230(o, s, l) BOOST_PP_LIST_FOLD_LEFT_230(o, s, BOOST_PP_LIST_REVERSE_D(230, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_231(o, s, l) BOOST_PP_LIST_FOLD_LEFT_231(o, s, BOOST_PP_LIST_REVERSE_D(231, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_232(o, s, l) BOOST_PP_LIST_FOLD_LEFT_232(o, s, BOOST_PP_LIST_REVERSE_D(232, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_233(o, s, l) BOOST_PP_LIST_FOLD_LEFT_233(o, s, BOOST_PP_LIST_REVERSE_D(233, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_234(o, s, l) BOOST_PP_LIST_FOLD_LEFT_234(o, s, BOOST_PP_LIST_REVERSE_D(234, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_235(o, s, l) BOOST_PP_LIST_FOLD_LEFT_235(o, s, BOOST_PP_LIST_REVERSE_D(235, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_236(o, s, l) BOOST_PP_LIST_FOLD_LEFT_236(o, s, BOOST_PP_LIST_REVERSE_D(236, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_237(o, s, l) BOOST_PP_LIST_FOLD_LEFT_237(o, s, BOOST_PP_LIST_REVERSE_D(237, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_238(o, s, l) BOOST_PP_LIST_FOLD_LEFT_238(o, s, BOOST_PP_LIST_REVERSE_D(238, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_239(o, s, l) BOOST_PP_LIST_FOLD_LEFT_239(o, s, BOOST_PP_LIST_REVERSE_D(239, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_240(o, s, l) BOOST_PP_LIST_FOLD_LEFT_240(o, s, BOOST_PP_LIST_REVERSE_D(240, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_241(o, s, l) BOOST_PP_LIST_FOLD_LEFT_241(o, s, BOOST_PP_LIST_REVERSE_D(241, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_242(o, s, l) BOOST_PP_LIST_FOLD_LEFT_242(o, s, BOOST_PP_LIST_REVERSE_D(242, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_243(o, s, l) BOOST_PP_LIST_FOLD_LEFT_243(o, s, BOOST_PP_LIST_REVERSE_D(243, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_244(o, s, l) BOOST_PP_LIST_FOLD_LEFT_244(o, s, BOOST_PP_LIST_REVERSE_D(244, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_245(o, s, l) BOOST_PP_LIST_FOLD_LEFT_245(o, s, BOOST_PP_LIST_REVERSE_D(245, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_246(o, s, l) BOOST_PP_LIST_FOLD_LEFT_246(o, s, BOOST_PP_LIST_REVERSE_D(246, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_247(o, s, l) BOOST_PP_LIST_FOLD_LEFT_247(o, s, BOOST_PP_LIST_REVERSE_D(247, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_248(o, s, l) BOOST_PP_LIST_FOLD_LEFT_248(o, s, BOOST_PP_LIST_REVERSE_D(248, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_249(o, s, l) BOOST_PP_LIST_FOLD_LEFT_249(o, s, BOOST_PP_LIST_REVERSE_D(249, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_250(o, s, l) BOOST_PP_LIST_FOLD_LEFT_250(o, s, BOOST_PP_LIST_REVERSE_D(250, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_251(o, s, l) BOOST_PP_LIST_FOLD_LEFT_251(o, s, BOOST_PP_LIST_REVERSE_D(251, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_252(o, s, l) BOOST_PP_LIST_FOLD_LEFT_252(o, s, BOOST_PP_LIST_REVERSE_D(252, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_253(o, s, l) BOOST_PP_LIST_FOLD_LEFT_253(o, s, BOOST_PP_LIST_REVERSE_D(253, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_254(o, s, l) BOOST_PP_LIST_FOLD_LEFT_254(o, s, BOOST_PP_LIST_REVERSE_D(254, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_255(o, s, l) BOOST_PP_LIST_FOLD_LEFT_255(o, s, BOOST_PP_LIST_REVERSE_D(255, l))\n# define BOOST_PP_LIST_FOLD_RIGHT_256(o, s, l) BOOST_PP_LIST_FOLD_LEFT_256(o, s, BOOST_PP_LIST_REVERSE_D(256, l))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/fold_left.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_FOLD_LEFT_HPP\n# define BOOST_PREPROCESSOR_LIST_FOLD_LEFT_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/control/while.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n#\n# /* BOOST_PP_LIST_FOLD_LEFT */\n#\n# if 0\n#    define BOOST_PP_LIST_FOLD_LEFT(op, state, list)\n# endif\n#\n# define BOOST_PP_LIST_FOLD_LEFT BOOST_PP_CAT(BOOST_PP_LIST_FOLD_LEFT_, BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256))\n#\n# define BOOST_PP_LIST_FOLD_LEFT_257(o, s, l) BOOST_PP_ERROR(0x0004)\n#\n# define BOOST_PP_LIST_FOLD_LEFT_D(d, o, s, l) BOOST_PP_LIST_FOLD_LEFT_ ## d(o, s, l)\n# define BOOST_PP_LIST_FOLD_LEFT_2ND BOOST_PP_LIST_FOLD_LEFT\n# define BOOST_PP_LIST_FOLD_LEFT_2ND_D BOOST_PP_LIST_FOLD_LEFT_D\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    include <boost/preprocessor/list/detail/edg/fold_left.hpp>\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    include <boost/preprocessor/list/detail/dmc/fold_left.hpp>\n# else\n#    include <boost/preprocessor/list/detail/fold_left.hpp>\n# endif\n#\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_1(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_2(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_3(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_4(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_5(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_6(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_7(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_8(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_9(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_10(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_11(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_12(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_13(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_14(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_15(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_16(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_17(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_18(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_19(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_20(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_21(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_22(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_23(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_24(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_25(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_26(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_27(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_28(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_29(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_30(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_31(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_32(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_33(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_34(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_35(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_36(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_37(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_38(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_39(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_40(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_41(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_42(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_43(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_44(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_45(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_46(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_47(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_48(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_49(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_50(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_51(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_52(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_53(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_54(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_55(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_56(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_57(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_58(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_59(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_60(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_61(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_62(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_63(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_64(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_65(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_66(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_67(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_68(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_69(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_70(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_71(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_72(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_73(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_74(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_75(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_76(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_77(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_78(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_79(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_80(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_81(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_82(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_83(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_84(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_85(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_86(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_87(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_88(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_89(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_90(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_91(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_92(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_93(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_94(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_95(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_96(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_97(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_98(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_99(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_100(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_101(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_102(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_103(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_104(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_105(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_106(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_107(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_108(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_109(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_110(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_111(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_112(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_113(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_114(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_115(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_116(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_117(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_118(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_119(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_120(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_121(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_122(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_123(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_124(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_125(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_126(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_127(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_128(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_129(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_130(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_131(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_132(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_133(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_134(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_135(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_136(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_137(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_138(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_139(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_140(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_141(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_142(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_143(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_144(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_145(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_146(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_147(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_148(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_149(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_150(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_151(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_152(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_153(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_154(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_155(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_156(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_157(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_158(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_159(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_160(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_161(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_162(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_163(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_164(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_165(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_166(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_167(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_168(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_169(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_170(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_171(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_172(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_173(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_174(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_175(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_176(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_177(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_178(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_179(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_180(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_181(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_182(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_183(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_184(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_185(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_186(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_187(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_188(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_189(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_190(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_191(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_192(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_193(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_194(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_195(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_196(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_197(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_198(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_199(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_200(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_201(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_202(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_203(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_204(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_205(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_206(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_207(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_208(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_209(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_210(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_211(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_212(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_213(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_214(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_215(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_216(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_217(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_218(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_219(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_220(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_221(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_222(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_223(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_224(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_225(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_226(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_227(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_228(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_229(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_230(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_231(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_232(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_233(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_234(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_235(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_236(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_237(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_238(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_239(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_240(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_241(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_242(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_243(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_244(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_245(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_246(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_247(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_248(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_249(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_250(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_251(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_252(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_253(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_254(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_255(o, s, l) 0\n# define BOOST_PP_LIST_FOLD_LEFT_CHECK_BOOST_PP_LIST_FOLD_LEFT_256(o, s, l) 0\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/fold_right.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_FOLD_RIGHT_HPP\n# define BOOST_PREPROCESSOR_LIST_FOLD_RIGHT_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/control/while.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n#\n# if 0\n#    define BOOST_PP_LIST_FOLD_RIGHT(op, state, list)\n# endif\n#\n# define BOOST_PP_LIST_FOLD_RIGHT BOOST_PP_CAT(BOOST_PP_LIST_FOLD_RIGHT_, BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256))\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_257(o, s, l) BOOST_PP_ERROR(0x0004)\n#\n# define BOOST_PP_LIST_FOLD_RIGHT_D(d, o, s, l) BOOST_PP_LIST_FOLD_RIGHT_ ## d(o, s, l)\n# define BOOST_PP_LIST_FOLD_RIGHT_2ND BOOST_PP_LIST_FOLD_RIGHT\n# define BOOST_PP_LIST_FOLD_RIGHT_2ND_D BOOST_PP_LIST_FOLD_RIGHT_D\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    include <boost/preprocessor/list/detail/edg/fold_right.hpp>\n# else\n#    include <boost/preprocessor/list/detail/fold_right.hpp>\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/for_each_i.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_LIST_FOR_EACH_I_HPP\n# define BOOST_PREPROCESSOR_LIST_LIST_FOR_EACH_I_HPP\n#\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/list/adt.hpp>\n# include <boost/preprocessor/repetition/for.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_LIST_FOR_EACH_I */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG() && ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_LIST_FOR_EACH_I(macro, data, list) BOOST_PP_FOR((macro, data, list, 0), BOOST_PP_LIST_FOR_EACH_I_P, BOOST_PP_LIST_FOR_EACH_I_O, BOOST_PP_LIST_FOR_EACH_I_M)\n# else\n#    define BOOST_PP_LIST_FOR_EACH_I(macro, data, list) BOOST_PP_LIST_FOR_EACH_I_I(macro, data, list)\n#    define BOOST_PP_LIST_FOR_EACH_I_I(macro, data, list) BOOST_PP_FOR((macro, data, list, 0), BOOST_PP_LIST_FOR_EACH_I_P, BOOST_PP_LIST_FOR_EACH_I_O, BOOST_PP_LIST_FOR_EACH_I_M)\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_LIST_FOR_EACH_I_P(r, x) BOOST_PP_LIST_FOR_EACH_I_P_D x\n#    define BOOST_PP_LIST_FOR_EACH_I_P_D(m, d, l, i) BOOST_PP_LIST_IS_CONS(l)\n# else\n#    define BOOST_PP_LIST_FOR_EACH_I_P(r, x) BOOST_PP_LIST_IS_CONS(BOOST_PP_TUPLE_ELEM(4, 2, x))\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_LIST_FOR_EACH_I_O(r, x) BOOST_PP_LIST_FOR_EACH_I_O_D x\n#    define BOOST_PP_LIST_FOR_EACH_I_O_D(m, d, l, i) (m, d, BOOST_PP_LIST_REST(l), BOOST_PP_INC(i))\n# else\n#    define BOOST_PP_LIST_FOR_EACH_I_O(r, x) (BOOST_PP_TUPLE_ELEM(4, 0, x), BOOST_PP_TUPLE_ELEM(4, 1, x), BOOST_PP_LIST_REST(BOOST_PP_TUPLE_ELEM(4, 2, x)), BOOST_PP_INC(BOOST_PP_TUPLE_ELEM(4, 3, x)))\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LIST_FOR_EACH_I_M(r, x) BOOST_PP_LIST_FOR_EACH_I_M_D(r, BOOST_PP_TUPLE_ELEM(4, 0, x), BOOST_PP_TUPLE_ELEM(4, 1, x), BOOST_PP_TUPLE_ELEM(4, 2, x), BOOST_PP_TUPLE_ELEM(4, 3, x))\n# else\n#    define BOOST_PP_LIST_FOR_EACH_I_M(r, x) BOOST_PP_LIST_FOR_EACH_I_M_I(r, BOOST_PP_TUPLE_REM_4 x)\n#    define BOOST_PP_LIST_FOR_EACH_I_M_I(r, x_e) BOOST_PP_LIST_FOR_EACH_I_M_D(r, x_e)\n# endif\n#\n# define BOOST_PP_LIST_FOR_EACH_I_M_D(r, m, d, l, i) m(r, d, i, BOOST_PP_LIST_FIRST(l))\n#\n# /* BOOST_PP_LIST_FOR_EACH_I_R */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LIST_FOR_EACH_I_R(r, macro, data, list) BOOST_PP_FOR_ ## r((macro, data, list, 0), BOOST_PP_LIST_FOR_EACH_I_P, BOOST_PP_LIST_FOR_EACH_I_O, BOOST_PP_LIST_FOR_EACH_I_M)\n# else\n#    define BOOST_PP_LIST_FOR_EACH_I_R(r, macro, data, list) BOOST_PP_LIST_FOR_EACH_I_R_I(r, macro, data, list)\n#    define BOOST_PP_LIST_FOR_EACH_I_R_I(r, macro, data, list) BOOST_PP_FOR_ ## r((macro, data, list, 0), BOOST_PP_LIST_FOR_EACH_I_P, BOOST_PP_LIST_FOR_EACH_I_O, BOOST_PP_LIST_FOR_EACH_I_M)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/list/reverse.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LIST_REVERSE_HPP\n# define BOOST_PREPROCESSOR_LIST_REVERSE_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/list/fold_left.hpp>\n#\n# /* BOOST_PP_LIST_REVERSE */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LIST_REVERSE(list) BOOST_PP_LIST_FOLD_LEFT(BOOST_PP_LIST_REVERSE_O, BOOST_PP_NIL, list)\n# else\n#    define BOOST_PP_LIST_REVERSE(list) BOOST_PP_LIST_REVERSE_I(list)\n#    define BOOST_PP_LIST_REVERSE_I(list) BOOST_PP_LIST_FOLD_LEFT(BOOST_PP_LIST_REVERSE_O, BOOST_PP_NIL, list)\n# endif\n#\n# define BOOST_PP_LIST_REVERSE_O(d, s, x) (x, s)\n#\n# /* BOOST_PP_LIST_REVERSE_D */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_LIST_REVERSE_D(d, list) BOOST_PP_LIST_FOLD_LEFT_ ## d(BOOST_PP_LIST_REVERSE_O, BOOST_PP_NIL, list)\n# else\n#    define BOOST_PP_LIST_REVERSE_D(d, list) BOOST_PP_LIST_REVERSE_D_I(d, list)\n#    define BOOST_PP_LIST_REVERSE_D_I(d, list) BOOST_PP_LIST_FOLD_LEFT_ ## d(BOOST_PP_LIST_REVERSE_O, BOOST_PP_NIL, list)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/logical/and.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LOGICAL_AND_HPP\n# define BOOST_PREPROCESSOR_LOGICAL_AND_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/logical/bitand.hpp>\n#\n# /* BOOST_PP_AND */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_AND(p, q) BOOST_PP_BITAND(BOOST_PP_BOOL(p), BOOST_PP_BOOL(q))\n# else\n#    define BOOST_PP_AND(p, q) BOOST_PP_AND_I(p, q)\n#    define BOOST_PP_AND_I(p, q) BOOST_PP_BITAND(BOOST_PP_BOOL(p), BOOST_PP_BOOL(q))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/logical/bitand.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LOGICAL_BITAND_HPP\n# define BOOST_PREPROCESSOR_LOGICAL_BITAND_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_BITAND */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_BITAND(x, y) BOOST_PP_BITAND_I(x, y)\n# else\n#    define BOOST_PP_BITAND(x, y) BOOST_PP_BITAND_OO((x, y))\n#    define BOOST_PP_BITAND_OO(par) BOOST_PP_BITAND_I ## par\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_BITAND_I(x, y) BOOST_PP_BITAND_ ## x ## y\n# else\n#    define BOOST_PP_BITAND_I(x, y) BOOST_PP_BITAND_ID(BOOST_PP_BITAND_ ## x ## y)\n#    define BOOST_PP_BITAND_ID(res) res\n# endif\n#\n# define BOOST_PP_BITAND_00 0\n# define BOOST_PP_BITAND_01 0\n# define BOOST_PP_BITAND_10 0\n# define BOOST_PP_BITAND_11 1\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/logical/bool.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LOGICAL_BOOL_HPP\n# define BOOST_PREPROCESSOR_LOGICAL_BOOL_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_BOOL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_BOOL(x) BOOST_PP_BOOL_I(x)\n# else\n#    define BOOST_PP_BOOL(x) BOOST_PP_BOOL_OO((x))\n#    define BOOST_PP_BOOL_OO(par) BOOST_PP_BOOL_I ## par\n# endif\n#\n# define BOOST_PP_BOOL_I(x) BOOST_PP_BOOL_ ## x\n#\n# define BOOST_PP_BOOL_0 0\n# define BOOST_PP_BOOL_1 1\n# define BOOST_PP_BOOL_2 1\n# define BOOST_PP_BOOL_3 1\n# define BOOST_PP_BOOL_4 1\n# define BOOST_PP_BOOL_5 1\n# define BOOST_PP_BOOL_6 1\n# define BOOST_PP_BOOL_7 1\n# define BOOST_PP_BOOL_8 1\n# define BOOST_PP_BOOL_9 1\n# define BOOST_PP_BOOL_10 1\n# define BOOST_PP_BOOL_11 1\n# define BOOST_PP_BOOL_12 1\n# define BOOST_PP_BOOL_13 1\n# define BOOST_PP_BOOL_14 1\n# define BOOST_PP_BOOL_15 1\n# define BOOST_PP_BOOL_16 1\n# define BOOST_PP_BOOL_17 1\n# define BOOST_PP_BOOL_18 1\n# define BOOST_PP_BOOL_19 1\n# define BOOST_PP_BOOL_20 1\n# define BOOST_PP_BOOL_21 1\n# define BOOST_PP_BOOL_22 1\n# define BOOST_PP_BOOL_23 1\n# define BOOST_PP_BOOL_24 1\n# define BOOST_PP_BOOL_25 1\n# define BOOST_PP_BOOL_26 1\n# define BOOST_PP_BOOL_27 1\n# define BOOST_PP_BOOL_28 1\n# define BOOST_PP_BOOL_29 1\n# define BOOST_PP_BOOL_30 1\n# define BOOST_PP_BOOL_31 1\n# define BOOST_PP_BOOL_32 1\n# define BOOST_PP_BOOL_33 1\n# define BOOST_PP_BOOL_34 1\n# define BOOST_PP_BOOL_35 1\n# define BOOST_PP_BOOL_36 1\n# define BOOST_PP_BOOL_37 1\n# define BOOST_PP_BOOL_38 1\n# define BOOST_PP_BOOL_39 1\n# define BOOST_PP_BOOL_40 1\n# define BOOST_PP_BOOL_41 1\n# define BOOST_PP_BOOL_42 1\n# define BOOST_PP_BOOL_43 1\n# define BOOST_PP_BOOL_44 1\n# define BOOST_PP_BOOL_45 1\n# define BOOST_PP_BOOL_46 1\n# define BOOST_PP_BOOL_47 1\n# define BOOST_PP_BOOL_48 1\n# define BOOST_PP_BOOL_49 1\n# define BOOST_PP_BOOL_50 1\n# define BOOST_PP_BOOL_51 1\n# define BOOST_PP_BOOL_52 1\n# define BOOST_PP_BOOL_53 1\n# define BOOST_PP_BOOL_54 1\n# define BOOST_PP_BOOL_55 1\n# define BOOST_PP_BOOL_56 1\n# define BOOST_PP_BOOL_57 1\n# define BOOST_PP_BOOL_58 1\n# define BOOST_PP_BOOL_59 1\n# define BOOST_PP_BOOL_60 1\n# define BOOST_PP_BOOL_61 1\n# define BOOST_PP_BOOL_62 1\n# define BOOST_PP_BOOL_63 1\n# define BOOST_PP_BOOL_64 1\n# define BOOST_PP_BOOL_65 1\n# define BOOST_PP_BOOL_66 1\n# define BOOST_PP_BOOL_67 1\n# define BOOST_PP_BOOL_68 1\n# define BOOST_PP_BOOL_69 1\n# define BOOST_PP_BOOL_70 1\n# define BOOST_PP_BOOL_71 1\n# define BOOST_PP_BOOL_72 1\n# define BOOST_PP_BOOL_73 1\n# define BOOST_PP_BOOL_74 1\n# define BOOST_PP_BOOL_75 1\n# define BOOST_PP_BOOL_76 1\n# define BOOST_PP_BOOL_77 1\n# define BOOST_PP_BOOL_78 1\n# define BOOST_PP_BOOL_79 1\n# define BOOST_PP_BOOL_80 1\n# define BOOST_PP_BOOL_81 1\n# define BOOST_PP_BOOL_82 1\n# define BOOST_PP_BOOL_83 1\n# define BOOST_PP_BOOL_84 1\n# define BOOST_PP_BOOL_85 1\n# define BOOST_PP_BOOL_86 1\n# define BOOST_PP_BOOL_87 1\n# define BOOST_PP_BOOL_88 1\n# define BOOST_PP_BOOL_89 1\n# define BOOST_PP_BOOL_90 1\n# define BOOST_PP_BOOL_91 1\n# define BOOST_PP_BOOL_92 1\n# define BOOST_PP_BOOL_93 1\n# define BOOST_PP_BOOL_94 1\n# define BOOST_PP_BOOL_95 1\n# define BOOST_PP_BOOL_96 1\n# define BOOST_PP_BOOL_97 1\n# define BOOST_PP_BOOL_98 1\n# define BOOST_PP_BOOL_99 1\n# define BOOST_PP_BOOL_100 1\n# define BOOST_PP_BOOL_101 1\n# define BOOST_PP_BOOL_102 1\n# define BOOST_PP_BOOL_103 1\n# define BOOST_PP_BOOL_104 1\n# define BOOST_PP_BOOL_105 1\n# define BOOST_PP_BOOL_106 1\n# define BOOST_PP_BOOL_107 1\n# define BOOST_PP_BOOL_108 1\n# define BOOST_PP_BOOL_109 1\n# define BOOST_PP_BOOL_110 1\n# define BOOST_PP_BOOL_111 1\n# define BOOST_PP_BOOL_112 1\n# define BOOST_PP_BOOL_113 1\n# define BOOST_PP_BOOL_114 1\n# define BOOST_PP_BOOL_115 1\n# define BOOST_PP_BOOL_116 1\n# define BOOST_PP_BOOL_117 1\n# define BOOST_PP_BOOL_118 1\n# define BOOST_PP_BOOL_119 1\n# define BOOST_PP_BOOL_120 1\n# define BOOST_PP_BOOL_121 1\n# define BOOST_PP_BOOL_122 1\n# define BOOST_PP_BOOL_123 1\n# define BOOST_PP_BOOL_124 1\n# define BOOST_PP_BOOL_125 1\n# define BOOST_PP_BOOL_126 1\n# define BOOST_PP_BOOL_127 1\n# define BOOST_PP_BOOL_128 1\n# define BOOST_PP_BOOL_129 1\n# define BOOST_PP_BOOL_130 1\n# define BOOST_PP_BOOL_131 1\n# define BOOST_PP_BOOL_132 1\n# define BOOST_PP_BOOL_133 1\n# define BOOST_PP_BOOL_134 1\n# define BOOST_PP_BOOL_135 1\n# define BOOST_PP_BOOL_136 1\n# define BOOST_PP_BOOL_137 1\n# define BOOST_PP_BOOL_138 1\n# define BOOST_PP_BOOL_139 1\n# define BOOST_PP_BOOL_140 1\n# define BOOST_PP_BOOL_141 1\n# define BOOST_PP_BOOL_142 1\n# define BOOST_PP_BOOL_143 1\n# define BOOST_PP_BOOL_144 1\n# define BOOST_PP_BOOL_145 1\n# define BOOST_PP_BOOL_146 1\n# define BOOST_PP_BOOL_147 1\n# define BOOST_PP_BOOL_148 1\n# define BOOST_PP_BOOL_149 1\n# define BOOST_PP_BOOL_150 1\n# define BOOST_PP_BOOL_151 1\n# define BOOST_PP_BOOL_152 1\n# define BOOST_PP_BOOL_153 1\n# define BOOST_PP_BOOL_154 1\n# define BOOST_PP_BOOL_155 1\n# define BOOST_PP_BOOL_156 1\n# define BOOST_PP_BOOL_157 1\n# define BOOST_PP_BOOL_158 1\n# define BOOST_PP_BOOL_159 1\n# define BOOST_PP_BOOL_160 1\n# define BOOST_PP_BOOL_161 1\n# define BOOST_PP_BOOL_162 1\n# define BOOST_PP_BOOL_163 1\n# define BOOST_PP_BOOL_164 1\n# define BOOST_PP_BOOL_165 1\n# define BOOST_PP_BOOL_166 1\n# define BOOST_PP_BOOL_167 1\n# define BOOST_PP_BOOL_168 1\n# define BOOST_PP_BOOL_169 1\n# define BOOST_PP_BOOL_170 1\n# define BOOST_PP_BOOL_171 1\n# define BOOST_PP_BOOL_172 1\n# define BOOST_PP_BOOL_173 1\n# define BOOST_PP_BOOL_174 1\n# define BOOST_PP_BOOL_175 1\n# define BOOST_PP_BOOL_176 1\n# define BOOST_PP_BOOL_177 1\n# define BOOST_PP_BOOL_178 1\n# define BOOST_PP_BOOL_179 1\n# define BOOST_PP_BOOL_180 1\n# define BOOST_PP_BOOL_181 1\n# define BOOST_PP_BOOL_182 1\n# define BOOST_PP_BOOL_183 1\n# define BOOST_PP_BOOL_184 1\n# define BOOST_PP_BOOL_185 1\n# define BOOST_PP_BOOL_186 1\n# define BOOST_PP_BOOL_187 1\n# define BOOST_PP_BOOL_188 1\n# define BOOST_PP_BOOL_189 1\n# define BOOST_PP_BOOL_190 1\n# define BOOST_PP_BOOL_191 1\n# define BOOST_PP_BOOL_192 1\n# define BOOST_PP_BOOL_193 1\n# define BOOST_PP_BOOL_194 1\n# define BOOST_PP_BOOL_195 1\n# define BOOST_PP_BOOL_196 1\n# define BOOST_PP_BOOL_197 1\n# define BOOST_PP_BOOL_198 1\n# define BOOST_PP_BOOL_199 1\n# define BOOST_PP_BOOL_200 1\n# define BOOST_PP_BOOL_201 1\n# define BOOST_PP_BOOL_202 1\n# define BOOST_PP_BOOL_203 1\n# define BOOST_PP_BOOL_204 1\n# define BOOST_PP_BOOL_205 1\n# define BOOST_PP_BOOL_206 1\n# define BOOST_PP_BOOL_207 1\n# define BOOST_PP_BOOL_208 1\n# define BOOST_PP_BOOL_209 1\n# define BOOST_PP_BOOL_210 1\n# define BOOST_PP_BOOL_211 1\n# define BOOST_PP_BOOL_212 1\n# define BOOST_PP_BOOL_213 1\n# define BOOST_PP_BOOL_214 1\n# define BOOST_PP_BOOL_215 1\n# define BOOST_PP_BOOL_216 1\n# define BOOST_PP_BOOL_217 1\n# define BOOST_PP_BOOL_218 1\n# define BOOST_PP_BOOL_219 1\n# define BOOST_PP_BOOL_220 1\n# define BOOST_PP_BOOL_221 1\n# define BOOST_PP_BOOL_222 1\n# define BOOST_PP_BOOL_223 1\n# define BOOST_PP_BOOL_224 1\n# define BOOST_PP_BOOL_225 1\n# define BOOST_PP_BOOL_226 1\n# define BOOST_PP_BOOL_227 1\n# define BOOST_PP_BOOL_228 1\n# define BOOST_PP_BOOL_229 1\n# define BOOST_PP_BOOL_230 1\n# define BOOST_PP_BOOL_231 1\n# define BOOST_PP_BOOL_232 1\n# define BOOST_PP_BOOL_233 1\n# define BOOST_PP_BOOL_234 1\n# define BOOST_PP_BOOL_235 1\n# define BOOST_PP_BOOL_236 1\n# define BOOST_PP_BOOL_237 1\n# define BOOST_PP_BOOL_238 1\n# define BOOST_PP_BOOL_239 1\n# define BOOST_PP_BOOL_240 1\n# define BOOST_PP_BOOL_241 1\n# define BOOST_PP_BOOL_242 1\n# define BOOST_PP_BOOL_243 1\n# define BOOST_PP_BOOL_244 1\n# define BOOST_PP_BOOL_245 1\n# define BOOST_PP_BOOL_246 1\n# define BOOST_PP_BOOL_247 1\n# define BOOST_PP_BOOL_248 1\n# define BOOST_PP_BOOL_249 1\n# define BOOST_PP_BOOL_250 1\n# define BOOST_PP_BOOL_251 1\n# define BOOST_PP_BOOL_252 1\n# define BOOST_PP_BOOL_253 1\n# define BOOST_PP_BOOL_254 1\n# define BOOST_PP_BOOL_255 1\n# define BOOST_PP_BOOL_256 1\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/logical/compl.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LOGICAL_COMPL_HPP\n# define BOOST_PREPROCESSOR_LOGICAL_COMPL_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_COMPL */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_COMPL(x) BOOST_PP_COMPL_I(x)\n# else\n#    define BOOST_PP_COMPL(x) BOOST_PP_COMPL_OO((x))\n#    define BOOST_PP_COMPL_OO(par) BOOST_PP_COMPL_I ## par\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_COMPL_I(x) BOOST_PP_COMPL_ ## x\n# else\n#    define BOOST_PP_COMPL_I(x) BOOST_PP_COMPL_ID(BOOST_PP_COMPL_ ## x)\n#    define BOOST_PP_COMPL_ID(id) id\n# endif\n#\n# define BOOST_PP_COMPL_0 1\n# define BOOST_PP_COMPL_1 0\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/logical/not.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_LOGICAL_NOT_HPP\n# define BOOST_PREPROCESSOR_LOGICAL_NOT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/logical/compl.hpp>\n#\n# /* BOOST_PP_NOT */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_NOT(x) BOOST_PP_COMPL(BOOST_PP_BOOL(x))\n# else\n#    define BOOST_PP_NOT(x) BOOST_PP_NOT_I(x)\n#    define BOOST_PP_NOT_I(x) BOOST_PP_COMPL(BOOST_PP_BOOL(x))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/punctuation/comma.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_PUNCTUATION_COMMA_HPP\n# define BOOST_PREPROCESSOR_PUNCTUATION_COMMA_HPP\n#\n# /* BOOST_PP_COMMA */\n#\n# define BOOST_PP_COMMA() ,\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/punctuation/comma_if.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_PUNCTUATION_COMMA_IF_HPP\n# define BOOST_PREPROCESSOR_PUNCTUATION_COMMA_IF_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/facilities/empty.hpp>\n# include <boost/preprocessor/punctuation/comma.hpp>\n#\n# /* BOOST_PP_COMMA_IF */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_COMMA_IF(cond) BOOST_PP_IF(cond, BOOST_PP_COMMA, BOOST_PP_EMPTY)()\n# else\n#    define BOOST_PP_COMMA_IF(cond) BOOST_PP_COMMA_IF_I(cond)\n#    define BOOST_PP_COMMA_IF_I(cond) BOOST_PP_IF(cond, BOOST_PP_COMMA, BOOST_PP_EMPTY)()\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/punctuation/detail/is_begin_parens.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2014.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n#ifndef BOOST_PREPROCESSOR_DETAIL_IS_BEGIN_PARENS_HPP\n#define BOOST_PREPROCESSOR_DETAIL_IS_BEGIN_PARENS_HPP\n\n#if BOOST_PP_VARIADICS_MSVC\n\n#include <boost/preprocessor/facilities/empty.hpp>\n\n#define BOOST_PP_DETAIL_VD_IBP_CAT(a, b) BOOST_PP_DETAIL_VD_IBP_CAT_I(a, b)\n#define BOOST_PP_DETAIL_VD_IBP_CAT_I(a, b) BOOST_PP_DETAIL_VD_IBP_CAT_II(a ## b)\n#define BOOST_PP_DETAIL_VD_IBP_CAT_II(res) res\n\n#define BOOST_PP_DETAIL_IBP_SPLIT(i, ...) \\\n    BOOST_PP_DETAIL_VD_IBP_CAT(BOOST_PP_DETAIL_IBP_PRIMITIVE_CAT(BOOST_PP_DETAIL_IBP_SPLIT_,i)(__VA_ARGS__),BOOST_PP_EMPTY()) \\\n/**/\n\n#define BOOST_PP_DETAIL_IBP_IS_VARIADIC_C(...) 1 1\n\n#else\n\n#define BOOST_PP_DETAIL_IBP_SPLIT(i, ...) \\\n    BOOST_PP_DETAIL_IBP_PRIMITIVE_CAT(BOOST_PP_DETAIL_IBP_SPLIT_,i)(__VA_ARGS__) \\\n/**/\n\n#define BOOST_PP_DETAIL_IBP_IS_VARIADIC_C(...) 1\n\n#endif /* BOOST_PP_VARIADICS_MSVC */\n\n#define BOOST_PP_DETAIL_IBP_SPLIT_0(a, ...) a\n#define BOOST_PP_DETAIL_IBP_SPLIT_1(a, ...) __VA_ARGS__\n\n#define BOOST_PP_DETAIL_IBP_CAT(a, ...) BOOST_PP_DETAIL_IBP_PRIMITIVE_CAT(a,__VA_ARGS__)\n#define BOOST_PP_DETAIL_IBP_PRIMITIVE_CAT(a, ...) a ## __VA_ARGS__\n\n#define BOOST_PP_DETAIL_IBP_IS_VARIADIC_R_1 1,\n#define BOOST_PP_DETAIL_IBP_IS_VARIADIC_R_BOOST_PP_DETAIL_IBP_IS_VARIADIC_C 0,\n\n#endif /* BOOST_PREPROCESSOR_DETAIL_IS_BEGIN_PARENS_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/punctuation/is_begin_parens.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2014.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_IS_BEGIN_PARENS_HPP\n# define BOOST_PREPROCESSOR_IS_BEGIN_PARENS_HPP\n\n# include <boost/preprocessor/config/config.hpp>\n\n#if BOOST_PP_VARIADICS\n\n#include <boost/preprocessor/punctuation/detail/is_begin_parens.hpp>\n\n#if BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400\n\n#define BOOST_PP_IS_BEGIN_PARENS(param) \\\n    BOOST_PP_DETAIL_IBP_SPLIT \\\n      ( \\\n      0, \\\n      BOOST_PP_DETAIL_IBP_CAT \\\n        ( \\\n        BOOST_PP_DETAIL_IBP_IS_VARIADIC_R_, \\\n        BOOST_PP_DETAIL_IBP_IS_VARIADIC_C param \\\n        ) \\\n      ) \\\n/**/\n\n#else\n\n#define BOOST_PP_IS_BEGIN_PARENS(...) \\\n    BOOST_PP_DETAIL_IBP_SPLIT \\\n      ( \\\n      0, \\\n      BOOST_PP_DETAIL_IBP_CAT \\\n        ( \\\n        BOOST_PP_DETAIL_IBP_IS_VARIADIC_R_, \\\n        BOOST_PP_DETAIL_IBP_IS_VARIADIC_C __VA_ARGS__ \\\n        ) \\\n      ) \\\n/**/\n\n#endif /* BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400 */\n#endif /* BOOST_PP_VARIADICS */\n#endif /* BOOST_PREPROCESSOR_IS_BEGIN_PARENS_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repeat.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPEAT_HPP\n# define BOOST_PREPROCESSOR_REPEAT_HPP\n#\n# include <boost/preprocessor/repetition/repeat.hpp>\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/deduce_r.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_DEDUCE_R_HPP\n# define BOOST_PREPROCESSOR_REPETITION_DEDUCE_R_HPP\n#\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/repetition/for.hpp>\n#\n# /* BOOST_PP_DEDUCE_R */\n#\n# define BOOST_PP_DEDUCE_R() BOOST_PP_AUTO_REC(BOOST_PP_FOR_P, 256)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/detail/dmc/for.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_DETAIL_FOR_HPP\n# define BOOST_PREPROCESSOR_REPETITION_DETAIL_FOR_HPP\n#\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_FOR_1(s, p, o, m) BOOST_PP_FOR_1_C(BOOST_PP_BOOL(p##(2, s)), s, p, o, m)\n# define BOOST_PP_FOR_2(s, p, o, m) BOOST_PP_FOR_2_C(BOOST_PP_BOOL(p##(3, s)), s, p, o, m)\n# define BOOST_PP_FOR_3(s, p, o, m) BOOST_PP_FOR_3_C(BOOST_PP_BOOL(p##(4, s)), s, p, o, m)\n# define BOOST_PP_FOR_4(s, p, o, m) BOOST_PP_FOR_4_C(BOOST_PP_BOOL(p##(5, s)), s, p, o, m)\n# define BOOST_PP_FOR_5(s, p, o, m) BOOST_PP_FOR_5_C(BOOST_PP_BOOL(p##(6, s)), s, p, o, m)\n# define BOOST_PP_FOR_6(s, p, o, m) BOOST_PP_FOR_6_C(BOOST_PP_BOOL(p##(7, s)), s, p, o, m)\n# define BOOST_PP_FOR_7(s, p, o, m) BOOST_PP_FOR_7_C(BOOST_PP_BOOL(p##(8, s)), s, p, o, m)\n# define BOOST_PP_FOR_8(s, p, o, m) BOOST_PP_FOR_8_C(BOOST_PP_BOOL(p##(9, s)), s, p, o, m)\n# define BOOST_PP_FOR_9(s, p, o, m) BOOST_PP_FOR_9_C(BOOST_PP_BOOL(p##(10, s)), s, p, o, m)\n# define BOOST_PP_FOR_10(s, p, o, m) BOOST_PP_FOR_10_C(BOOST_PP_BOOL(p##(11, s)), s, p, o, m)\n# define BOOST_PP_FOR_11(s, p, o, m) BOOST_PP_FOR_11_C(BOOST_PP_BOOL(p##(12, s)), s, p, o, m)\n# define BOOST_PP_FOR_12(s, p, o, m) BOOST_PP_FOR_12_C(BOOST_PP_BOOL(p##(13, s)), s, p, o, m)\n# define BOOST_PP_FOR_13(s, p, o, m) BOOST_PP_FOR_13_C(BOOST_PP_BOOL(p##(14, s)), s, p, o, m)\n# define BOOST_PP_FOR_14(s, p, o, m) BOOST_PP_FOR_14_C(BOOST_PP_BOOL(p##(15, s)), s, p, o, m)\n# define BOOST_PP_FOR_15(s, p, o, m) BOOST_PP_FOR_15_C(BOOST_PP_BOOL(p##(16, s)), s, p, o, m)\n# define BOOST_PP_FOR_16(s, p, o, m) BOOST_PP_FOR_16_C(BOOST_PP_BOOL(p##(17, s)), s, p, o, m)\n# define BOOST_PP_FOR_17(s, p, o, m) BOOST_PP_FOR_17_C(BOOST_PP_BOOL(p##(18, s)), s, p, o, m)\n# define BOOST_PP_FOR_18(s, p, o, m) BOOST_PP_FOR_18_C(BOOST_PP_BOOL(p##(19, s)), s, p, o, m)\n# define BOOST_PP_FOR_19(s, p, o, m) BOOST_PP_FOR_19_C(BOOST_PP_BOOL(p##(20, s)), s, p, o, m)\n# define BOOST_PP_FOR_20(s, p, o, m) BOOST_PP_FOR_20_C(BOOST_PP_BOOL(p##(21, s)), s, p, o, m)\n# define BOOST_PP_FOR_21(s, p, o, m) BOOST_PP_FOR_21_C(BOOST_PP_BOOL(p##(22, s)), s, p, o, m)\n# define BOOST_PP_FOR_22(s, p, o, m) BOOST_PP_FOR_22_C(BOOST_PP_BOOL(p##(23, s)), s, p, o, m)\n# define BOOST_PP_FOR_23(s, p, o, m) BOOST_PP_FOR_23_C(BOOST_PP_BOOL(p##(24, s)), s, p, o, m)\n# define BOOST_PP_FOR_24(s, p, o, m) BOOST_PP_FOR_24_C(BOOST_PP_BOOL(p##(25, s)), s, p, o, m)\n# define BOOST_PP_FOR_25(s, p, o, m) BOOST_PP_FOR_25_C(BOOST_PP_BOOL(p##(26, s)), s, p, o, m)\n# define BOOST_PP_FOR_26(s, p, o, m) BOOST_PP_FOR_26_C(BOOST_PP_BOOL(p##(27, s)), s, p, o, m)\n# define BOOST_PP_FOR_27(s, p, o, m) BOOST_PP_FOR_27_C(BOOST_PP_BOOL(p##(28, s)), s, p, o, m)\n# define BOOST_PP_FOR_28(s, p, o, m) BOOST_PP_FOR_28_C(BOOST_PP_BOOL(p##(29, s)), s, p, o, m)\n# define BOOST_PP_FOR_29(s, p, o, m) BOOST_PP_FOR_29_C(BOOST_PP_BOOL(p##(30, s)), s, p, o, m)\n# define BOOST_PP_FOR_30(s, p, o, m) BOOST_PP_FOR_30_C(BOOST_PP_BOOL(p##(31, s)), s, p, o, m)\n# define BOOST_PP_FOR_31(s, p, o, m) BOOST_PP_FOR_31_C(BOOST_PP_BOOL(p##(32, s)), s, p, o, m)\n# define BOOST_PP_FOR_32(s, p, o, m) BOOST_PP_FOR_32_C(BOOST_PP_BOOL(p##(33, s)), s, p, o, m)\n# define BOOST_PP_FOR_33(s, p, o, m) BOOST_PP_FOR_33_C(BOOST_PP_BOOL(p##(34, s)), s, p, o, m)\n# define BOOST_PP_FOR_34(s, p, o, m) BOOST_PP_FOR_34_C(BOOST_PP_BOOL(p##(35, s)), s, p, o, m)\n# define BOOST_PP_FOR_35(s, p, o, m) BOOST_PP_FOR_35_C(BOOST_PP_BOOL(p##(36, s)), s, p, o, m)\n# define BOOST_PP_FOR_36(s, p, o, m) BOOST_PP_FOR_36_C(BOOST_PP_BOOL(p##(37, s)), s, p, o, m)\n# define BOOST_PP_FOR_37(s, p, o, m) BOOST_PP_FOR_37_C(BOOST_PP_BOOL(p##(38, s)), s, p, o, m)\n# define BOOST_PP_FOR_38(s, p, o, m) BOOST_PP_FOR_38_C(BOOST_PP_BOOL(p##(39, s)), s, p, o, m)\n# define BOOST_PP_FOR_39(s, p, o, m) BOOST_PP_FOR_39_C(BOOST_PP_BOOL(p##(40, s)), s, p, o, m)\n# define BOOST_PP_FOR_40(s, p, o, m) BOOST_PP_FOR_40_C(BOOST_PP_BOOL(p##(41, s)), s, p, o, m)\n# define BOOST_PP_FOR_41(s, p, o, m) BOOST_PP_FOR_41_C(BOOST_PP_BOOL(p##(42, s)), s, p, o, m)\n# define BOOST_PP_FOR_42(s, p, o, m) BOOST_PP_FOR_42_C(BOOST_PP_BOOL(p##(43, s)), s, p, o, m)\n# define BOOST_PP_FOR_43(s, p, o, m) BOOST_PP_FOR_43_C(BOOST_PP_BOOL(p##(44, s)), s, p, o, m)\n# define BOOST_PP_FOR_44(s, p, o, m) BOOST_PP_FOR_44_C(BOOST_PP_BOOL(p##(45, s)), s, p, o, m)\n# define BOOST_PP_FOR_45(s, p, o, m) BOOST_PP_FOR_45_C(BOOST_PP_BOOL(p##(46, s)), s, p, o, m)\n# define BOOST_PP_FOR_46(s, p, o, m) BOOST_PP_FOR_46_C(BOOST_PP_BOOL(p##(47, s)), s, p, o, m)\n# define BOOST_PP_FOR_47(s, p, o, m) BOOST_PP_FOR_47_C(BOOST_PP_BOOL(p##(48, s)), s, p, o, m)\n# define BOOST_PP_FOR_48(s, p, o, m) BOOST_PP_FOR_48_C(BOOST_PP_BOOL(p##(49, s)), s, p, o, m)\n# define BOOST_PP_FOR_49(s, p, o, m) BOOST_PP_FOR_49_C(BOOST_PP_BOOL(p##(50, s)), s, p, o, m)\n# define BOOST_PP_FOR_50(s, p, o, m) BOOST_PP_FOR_50_C(BOOST_PP_BOOL(p##(51, s)), s, p, o, m)\n# define BOOST_PP_FOR_51(s, p, o, m) BOOST_PP_FOR_51_C(BOOST_PP_BOOL(p##(52, s)), s, p, o, m)\n# define BOOST_PP_FOR_52(s, p, o, m) BOOST_PP_FOR_52_C(BOOST_PP_BOOL(p##(53, s)), s, p, o, m)\n# define BOOST_PP_FOR_53(s, p, o, m) BOOST_PP_FOR_53_C(BOOST_PP_BOOL(p##(54, s)), s, p, o, m)\n# define BOOST_PP_FOR_54(s, p, o, m) BOOST_PP_FOR_54_C(BOOST_PP_BOOL(p##(55, s)), s, p, o, m)\n# define BOOST_PP_FOR_55(s, p, o, m) BOOST_PP_FOR_55_C(BOOST_PP_BOOL(p##(56, s)), s, p, o, m)\n# define BOOST_PP_FOR_56(s, p, o, m) BOOST_PP_FOR_56_C(BOOST_PP_BOOL(p##(57, s)), s, p, o, m)\n# define BOOST_PP_FOR_57(s, p, o, m) BOOST_PP_FOR_57_C(BOOST_PP_BOOL(p##(58, s)), s, p, o, m)\n# define BOOST_PP_FOR_58(s, p, o, m) BOOST_PP_FOR_58_C(BOOST_PP_BOOL(p##(59, s)), s, p, o, m)\n# define BOOST_PP_FOR_59(s, p, o, m) BOOST_PP_FOR_59_C(BOOST_PP_BOOL(p##(60, s)), s, p, o, m)\n# define BOOST_PP_FOR_60(s, p, o, m) BOOST_PP_FOR_60_C(BOOST_PP_BOOL(p##(61, s)), s, p, o, m)\n# define BOOST_PP_FOR_61(s, p, o, m) BOOST_PP_FOR_61_C(BOOST_PP_BOOL(p##(62, s)), s, p, o, m)\n# define BOOST_PP_FOR_62(s, p, o, m) BOOST_PP_FOR_62_C(BOOST_PP_BOOL(p##(63, s)), s, p, o, m)\n# define BOOST_PP_FOR_63(s, p, o, m) BOOST_PP_FOR_63_C(BOOST_PP_BOOL(p##(64, s)), s, p, o, m)\n# define BOOST_PP_FOR_64(s, p, o, m) BOOST_PP_FOR_64_C(BOOST_PP_BOOL(p##(65, s)), s, p, o, m)\n# define BOOST_PP_FOR_65(s, p, o, m) BOOST_PP_FOR_65_C(BOOST_PP_BOOL(p##(66, s)), s, p, o, m)\n# define BOOST_PP_FOR_66(s, p, o, m) BOOST_PP_FOR_66_C(BOOST_PP_BOOL(p##(67, s)), s, p, o, m)\n# define BOOST_PP_FOR_67(s, p, o, m) BOOST_PP_FOR_67_C(BOOST_PP_BOOL(p##(68, s)), s, p, o, m)\n# define BOOST_PP_FOR_68(s, p, o, m) BOOST_PP_FOR_68_C(BOOST_PP_BOOL(p##(69, s)), s, p, o, m)\n# define BOOST_PP_FOR_69(s, p, o, m) BOOST_PP_FOR_69_C(BOOST_PP_BOOL(p##(70, s)), s, p, o, m)\n# define BOOST_PP_FOR_70(s, p, o, m) BOOST_PP_FOR_70_C(BOOST_PP_BOOL(p##(71, s)), s, p, o, m)\n# define BOOST_PP_FOR_71(s, p, o, m) BOOST_PP_FOR_71_C(BOOST_PP_BOOL(p##(72, s)), s, p, o, m)\n# define BOOST_PP_FOR_72(s, p, o, m) BOOST_PP_FOR_72_C(BOOST_PP_BOOL(p##(73, s)), s, p, o, m)\n# define BOOST_PP_FOR_73(s, p, o, m) BOOST_PP_FOR_73_C(BOOST_PP_BOOL(p##(74, s)), s, p, o, m)\n# define BOOST_PP_FOR_74(s, p, o, m) BOOST_PP_FOR_74_C(BOOST_PP_BOOL(p##(75, s)), s, p, o, m)\n# define BOOST_PP_FOR_75(s, p, o, m) BOOST_PP_FOR_75_C(BOOST_PP_BOOL(p##(76, s)), s, p, o, m)\n# define BOOST_PP_FOR_76(s, p, o, m) BOOST_PP_FOR_76_C(BOOST_PP_BOOL(p##(77, s)), s, p, o, m)\n# define BOOST_PP_FOR_77(s, p, o, m) BOOST_PP_FOR_77_C(BOOST_PP_BOOL(p##(78, s)), s, p, o, m)\n# define BOOST_PP_FOR_78(s, p, o, m) BOOST_PP_FOR_78_C(BOOST_PP_BOOL(p##(79, s)), s, p, o, m)\n# define BOOST_PP_FOR_79(s, p, o, m) BOOST_PP_FOR_79_C(BOOST_PP_BOOL(p##(80, s)), s, p, o, m)\n# define BOOST_PP_FOR_80(s, p, o, m) BOOST_PP_FOR_80_C(BOOST_PP_BOOL(p##(81, s)), s, p, o, m)\n# define BOOST_PP_FOR_81(s, p, o, m) BOOST_PP_FOR_81_C(BOOST_PP_BOOL(p##(82, s)), s, p, o, m)\n# define BOOST_PP_FOR_82(s, p, o, m) BOOST_PP_FOR_82_C(BOOST_PP_BOOL(p##(83, s)), s, p, o, m)\n# define BOOST_PP_FOR_83(s, p, o, m) BOOST_PP_FOR_83_C(BOOST_PP_BOOL(p##(84, s)), s, p, o, m)\n# define BOOST_PP_FOR_84(s, p, o, m) BOOST_PP_FOR_84_C(BOOST_PP_BOOL(p##(85, s)), s, p, o, m)\n# define BOOST_PP_FOR_85(s, p, o, m) BOOST_PP_FOR_85_C(BOOST_PP_BOOL(p##(86, s)), s, p, o, m)\n# define BOOST_PP_FOR_86(s, p, o, m) BOOST_PP_FOR_86_C(BOOST_PP_BOOL(p##(87, s)), s, p, o, m)\n# define BOOST_PP_FOR_87(s, p, o, m) BOOST_PP_FOR_87_C(BOOST_PP_BOOL(p##(88, s)), s, p, o, m)\n# define BOOST_PP_FOR_88(s, p, o, m) BOOST_PP_FOR_88_C(BOOST_PP_BOOL(p##(89, s)), s, p, o, m)\n# define BOOST_PP_FOR_89(s, p, o, m) BOOST_PP_FOR_89_C(BOOST_PP_BOOL(p##(90, s)), s, p, o, m)\n# define BOOST_PP_FOR_90(s, p, o, m) BOOST_PP_FOR_90_C(BOOST_PP_BOOL(p##(91, s)), s, p, o, m)\n# define BOOST_PP_FOR_91(s, p, o, m) BOOST_PP_FOR_91_C(BOOST_PP_BOOL(p##(92, s)), s, p, o, m)\n# define BOOST_PP_FOR_92(s, p, o, m) BOOST_PP_FOR_92_C(BOOST_PP_BOOL(p##(93, s)), s, p, o, m)\n# define BOOST_PP_FOR_93(s, p, o, m) BOOST_PP_FOR_93_C(BOOST_PP_BOOL(p##(94, s)), s, p, o, m)\n# define BOOST_PP_FOR_94(s, p, o, m) BOOST_PP_FOR_94_C(BOOST_PP_BOOL(p##(95, s)), s, p, o, m)\n# define BOOST_PP_FOR_95(s, p, o, m) BOOST_PP_FOR_95_C(BOOST_PP_BOOL(p##(96, s)), s, p, o, m)\n# define BOOST_PP_FOR_96(s, p, o, m) BOOST_PP_FOR_96_C(BOOST_PP_BOOL(p##(97, s)), s, p, o, m)\n# define BOOST_PP_FOR_97(s, p, o, m) BOOST_PP_FOR_97_C(BOOST_PP_BOOL(p##(98, s)), s, p, o, m)\n# define BOOST_PP_FOR_98(s, p, o, m) BOOST_PP_FOR_98_C(BOOST_PP_BOOL(p##(99, s)), s, p, o, m)\n# define BOOST_PP_FOR_99(s, p, o, m) BOOST_PP_FOR_99_C(BOOST_PP_BOOL(p##(100, s)), s, p, o, m)\n# define BOOST_PP_FOR_100(s, p, o, m) BOOST_PP_FOR_100_C(BOOST_PP_BOOL(p##(101, s)), s, p, o, m)\n# define BOOST_PP_FOR_101(s, p, o, m) BOOST_PP_FOR_101_C(BOOST_PP_BOOL(p##(102, s)), s, p, o, m)\n# define BOOST_PP_FOR_102(s, p, o, m) BOOST_PP_FOR_102_C(BOOST_PP_BOOL(p##(103, s)), s, p, o, m)\n# define BOOST_PP_FOR_103(s, p, o, m) BOOST_PP_FOR_103_C(BOOST_PP_BOOL(p##(104, s)), s, p, o, m)\n# define BOOST_PP_FOR_104(s, p, o, m) BOOST_PP_FOR_104_C(BOOST_PP_BOOL(p##(105, s)), s, p, o, m)\n# define BOOST_PP_FOR_105(s, p, o, m) BOOST_PP_FOR_105_C(BOOST_PP_BOOL(p##(106, s)), s, p, o, m)\n# define BOOST_PP_FOR_106(s, p, o, m) BOOST_PP_FOR_106_C(BOOST_PP_BOOL(p##(107, s)), s, p, o, m)\n# define BOOST_PP_FOR_107(s, p, o, m) BOOST_PP_FOR_107_C(BOOST_PP_BOOL(p##(108, s)), s, p, o, m)\n# define BOOST_PP_FOR_108(s, p, o, m) BOOST_PP_FOR_108_C(BOOST_PP_BOOL(p##(109, s)), s, p, o, m)\n# define BOOST_PP_FOR_109(s, p, o, m) BOOST_PP_FOR_109_C(BOOST_PP_BOOL(p##(110, s)), s, p, o, m)\n# define BOOST_PP_FOR_110(s, p, o, m) BOOST_PP_FOR_110_C(BOOST_PP_BOOL(p##(111, s)), s, p, o, m)\n# define BOOST_PP_FOR_111(s, p, o, m) BOOST_PP_FOR_111_C(BOOST_PP_BOOL(p##(112, s)), s, p, o, m)\n# define BOOST_PP_FOR_112(s, p, o, m) BOOST_PP_FOR_112_C(BOOST_PP_BOOL(p##(113, s)), s, p, o, m)\n# define BOOST_PP_FOR_113(s, p, o, m) BOOST_PP_FOR_113_C(BOOST_PP_BOOL(p##(114, s)), s, p, o, m)\n# define BOOST_PP_FOR_114(s, p, o, m) BOOST_PP_FOR_114_C(BOOST_PP_BOOL(p##(115, s)), s, p, o, m)\n# define BOOST_PP_FOR_115(s, p, o, m) BOOST_PP_FOR_115_C(BOOST_PP_BOOL(p##(116, s)), s, p, o, m)\n# define BOOST_PP_FOR_116(s, p, o, m) BOOST_PP_FOR_116_C(BOOST_PP_BOOL(p##(117, s)), s, p, o, m)\n# define BOOST_PP_FOR_117(s, p, o, m) BOOST_PP_FOR_117_C(BOOST_PP_BOOL(p##(118, s)), s, p, o, m)\n# define BOOST_PP_FOR_118(s, p, o, m) BOOST_PP_FOR_118_C(BOOST_PP_BOOL(p##(119, s)), s, p, o, m)\n# define BOOST_PP_FOR_119(s, p, o, m) BOOST_PP_FOR_119_C(BOOST_PP_BOOL(p##(120, s)), s, p, o, m)\n# define BOOST_PP_FOR_120(s, p, o, m) BOOST_PP_FOR_120_C(BOOST_PP_BOOL(p##(121, s)), s, p, o, m)\n# define BOOST_PP_FOR_121(s, p, o, m) BOOST_PP_FOR_121_C(BOOST_PP_BOOL(p##(122, s)), s, p, o, m)\n# define BOOST_PP_FOR_122(s, p, o, m) BOOST_PP_FOR_122_C(BOOST_PP_BOOL(p##(123, s)), s, p, o, m)\n# define BOOST_PP_FOR_123(s, p, o, m) BOOST_PP_FOR_123_C(BOOST_PP_BOOL(p##(124, s)), s, p, o, m)\n# define BOOST_PP_FOR_124(s, p, o, m) BOOST_PP_FOR_124_C(BOOST_PP_BOOL(p##(125, s)), s, p, o, m)\n# define BOOST_PP_FOR_125(s, p, o, m) BOOST_PP_FOR_125_C(BOOST_PP_BOOL(p##(126, s)), s, p, o, m)\n# define BOOST_PP_FOR_126(s, p, o, m) BOOST_PP_FOR_126_C(BOOST_PP_BOOL(p##(127, s)), s, p, o, m)\n# define BOOST_PP_FOR_127(s, p, o, m) BOOST_PP_FOR_127_C(BOOST_PP_BOOL(p##(128, s)), s, p, o, m)\n# define BOOST_PP_FOR_128(s, p, o, m) BOOST_PP_FOR_128_C(BOOST_PP_BOOL(p##(129, s)), s, p, o, m)\n# define BOOST_PP_FOR_129(s, p, o, m) BOOST_PP_FOR_129_C(BOOST_PP_BOOL(p##(130, s)), s, p, o, m)\n# define BOOST_PP_FOR_130(s, p, o, m) BOOST_PP_FOR_130_C(BOOST_PP_BOOL(p##(131, s)), s, p, o, m)\n# define BOOST_PP_FOR_131(s, p, o, m) BOOST_PP_FOR_131_C(BOOST_PP_BOOL(p##(132, s)), s, p, o, m)\n# define BOOST_PP_FOR_132(s, p, o, m) BOOST_PP_FOR_132_C(BOOST_PP_BOOL(p##(133, s)), s, p, o, m)\n# define BOOST_PP_FOR_133(s, p, o, m) BOOST_PP_FOR_133_C(BOOST_PP_BOOL(p##(134, s)), s, p, o, m)\n# define BOOST_PP_FOR_134(s, p, o, m) BOOST_PP_FOR_134_C(BOOST_PP_BOOL(p##(135, s)), s, p, o, m)\n# define BOOST_PP_FOR_135(s, p, o, m) BOOST_PP_FOR_135_C(BOOST_PP_BOOL(p##(136, s)), s, p, o, m)\n# define BOOST_PP_FOR_136(s, p, o, m) BOOST_PP_FOR_136_C(BOOST_PP_BOOL(p##(137, s)), s, p, o, m)\n# define BOOST_PP_FOR_137(s, p, o, m) BOOST_PP_FOR_137_C(BOOST_PP_BOOL(p##(138, s)), s, p, o, m)\n# define BOOST_PP_FOR_138(s, p, o, m) BOOST_PP_FOR_138_C(BOOST_PP_BOOL(p##(139, s)), s, p, o, m)\n# define BOOST_PP_FOR_139(s, p, o, m) BOOST_PP_FOR_139_C(BOOST_PP_BOOL(p##(140, s)), s, p, o, m)\n# define BOOST_PP_FOR_140(s, p, o, m) BOOST_PP_FOR_140_C(BOOST_PP_BOOL(p##(141, s)), s, p, o, m)\n# define BOOST_PP_FOR_141(s, p, o, m) BOOST_PP_FOR_141_C(BOOST_PP_BOOL(p##(142, s)), s, p, o, m)\n# define BOOST_PP_FOR_142(s, p, o, m) BOOST_PP_FOR_142_C(BOOST_PP_BOOL(p##(143, s)), s, p, o, m)\n# define BOOST_PP_FOR_143(s, p, o, m) BOOST_PP_FOR_143_C(BOOST_PP_BOOL(p##(144, s)), s, p, o, m)\n# define BOOST_PP_FOR_144(s, p, o, m) BOOST_PP_FOR_144_C(BOOST_PP_BOOL(p##(145, s)), s, p, o, m)\n# define BOOST_PP_FOR_145(s, p, o, m) BOOST_PP_FOR_145_C(BOOST_PP_BOOL(p##(146, s)), s, p, o, m)\n# define BOOST_PP_FOR_146(s, p, o, m) BOOST_PP_FOR_146_C(BOOST_PP_BOOL(p##(147, s)), s, p, o, m)\n# define BOOST_PP_FOR_147(s, p, o, m) BOOST_PP_FOR_147_C(BOOST_PP_BOOL(p##(148, s)), s, p, o, m)\n# define BOOST_PP_FOR_148(s, p, o, m) BOOST_PP_FOR_148_C(BOOST_PP_BOOL(p##(149, s)), s, p, o, m)\n# define BOOST_PP_FOR_149(s, p, o, m) BOOST_PP_FOR_149_C(BOOST_PP_BOOL(p##(150, s)), s, p, o, m)\n# define BOOST_PP_FOR_150(s, p, o, m) BOOST_PP_FOR_150_C(BOOST_PP_BOOL(p##(151, s)), s, p, o, m)\n# define BOOST_PP_FOR_151(s, p, o, m) BOOST_PP_FOR_151_C(BOOST_PP_BOOL(p##(152, s)), s, p, o, m)\n# define BOOST_PP_FOR_152(s, p, o, m) BOOST_PP_FOR_152_C(BOOST_PP_BOOL(p##(153, s)), s, p, o, m)\n# define BOOST_PP_FOR_153(s, p, o, m) BOOST_PP_FOR_153_C(BOOST_PP_BOOL(p##(154, s)), s, p, o, m)\n# define BOOST_PP_FOR_154(s, p, o, m) BOOST_PP_FOR_154_C(BOOST_PP_BOOL(p##(155, s)), s, p, o, m)\n# define BOOST_PP_FOR_155(s, p, o, m) BOOST_PP_FOR_155_C(BOOST_PP_BOOL(p##(156, s)), s, p, o, m)\n# define BOOST_PP_FOR_156(s, p, o, m) BOOST_PP_FOR_156_C(BOOST_PP_BOOL(p##(157, s)), s, p, o, m)\n# define BOOST_PP_FOR_157(s, p, o, m) BOOST_PP_FOR_157_C(BOOST_PP_BOOL(p##(158, s)), s, p, o, m)\n# define BOOST_PP_FOR_158(s, p, o, m) BOOST_PP_FOR_158_C(BOOST_PP_BOOL(p##(159, s)), s, p, o, m)\n# define BOOST_PP_FOR_159(s, p, o, m) BOOST_PP_FOR_159_C(BOOST_PP_BOOL(p##(160, s)), s, p, o, m)\n# define BOOST_PP_FOR_160(s, p, o, m) BOOST_PP_FOR_160_C(BOOST_PP_BOOL(p##(161, s)), s, p, o, m)\n# define BOOST_PP_FOR_161(s, p, o, m) BOOST_PP_FOR_161_C(BOOST_PP_BOOL(p##(162, s)), s, p, o, m)\n# define BOOST_PP_FOR_162(s, p, o, m) BOOST_PP_FOR_162_C(BOOST_PP_BOOL(p##(163, s)), s, p, o, m)\n# define BOOST_PP_FOR_163(s, p, o, m) BOOST_PP_FOR_163_C(BOOST_PP_BOOL(p##(164, s)), s, p, o, m)\n# define BOOST_PP_FOR_164(s, p, o, m) BOOST_PP_FOR_164_C(BOOST_PP_BOOL(p##(165, s)), s, p, o, m)\n# define BOOST_PP_FOR_165(s, p, o, m) BOOST_PP_FOR_165_C(BOOST_PP_BOOL(p##(166, s)), s, p, o, m)\n# define BOOST_PP_FOR_166(s, p, o, m) BOOST_PP_FOR_166_C(BOOST_PP_BOOL(p##(167, s)), s, p, o, m)\n# define BOOST_PP_FOR_167(s, p, o, m) BOOST_PP_FOR_167_C(BOOST_PP_BOOL(p##(168, s)), s, p, o, m)\n# define BOOST_PP_FOR_168(s, p, o, m) BOOST_PP_FOR_168_C(BOOST_PP_BOOL(p##(169, s)), s, p, o, m)\n# define BOOST_PP_FOR_169(s, p, o, m) BOOST_PP_FOR_169_C(BOOST_PP_BOOL(p##(170, s)), s, p, o, m)\n# define BOOST_PP_FOR_170(s, p, o, m) BOOST_PP_FOR_170_C(BOOST_PP_BOOL(p##(171, s)), s, p, o, m)\n# define BOOST_PP_FOR_171(s, p, o, m) BOOST_PP_FOR_171_C(BOOST_PP_BOOL(p##(172, s)), s, p, o, m)\n# define BOOST_PP_FOR_172(s, p, o, m) BOOST_PP_FOR_172_C(BOOST_PP_BOOL(p##(173, s)), s, p, o, m)\n# define BOOST_PP_FOR_173(s, p, o, m) BOOST_PP_FOR_173_C(BOOST_PP_BOOL(p##(174, s)), s, p, o, m)\n# define BOOST_PP_FOR_174(s, p, o, m) BOOST_PP_FOR_174_C(BOOST_PP_BOOL(p##(175, s)), s, p, o, m)\n# define BOOST_PP_FOR_175(s, p, o, m) BOOST_PP_FOR_175_C(BOOST_PP_BOOL(p##(176, s)), s, p, o, m)\n# define BOOST_PP_FOR_176(s, p, o, m) BOOST_PP_FOR_176_C(BOOST_PP_BOOL(p##(177, s)), s, p, o, m)\n# define BOOST_PP_FOR_177(s, p, o, m) BOOST_PP_FOR_177_C(BOOST_PP_BOOL(p##(178, s)), s, p, o, m)\n# define BOOST_PP_FOR_178(s, p, o, m) BOOST_PP_FOR_178_C(BOOST_PP_BOOL(p##(179, s)), s, p, o, m)\n# define BOOST_PP_FOR_179(s, p, o, m) BOOST_PP_FOR_179_C(BOOST_PP_BOOL(p##(180, s)), s, p, o, m)\n# define BOOST_PP_FOR_180(s, p, o, m) BOOST_PP_FOR_180_C(BOOST_PP_BOOL(p##(181, s)), s, p, o, m)\n# define BOOST_PP_FOR_181(s, p, o, m) BOOST_PP_FOR_181_C(BOOST_PP_BOOL(p##(182, s)), s, p, o, m)\n# define BOOST_PP_FOR_182(s, p, o, m) BOOST_PP_FOR_182_C(BOOST_PP_BOOL(p##(183, s)), s, p, o, m)\n# define BOOST_PP_FOR_183(s, p, o, m) BOOST_PP_FOR_183_C(BOOST_PP_BOOL(p##(184, s)), s, p, o, m)\n# define BOOST_PP_FOR_184(s, p, o, m) BOOST_PP_FOR_184_C(BOOST_PP_BOOL(p##(185, s)), s, p, o, m)\n# define BOOST_PP_FOR_185(s, p, o, m) BOOST_PP_FOR_185_C(BOOST_PP_BOOL(p##(186, s)), s, p, o, m)\n# define BOOST_PP_FOR_186(s, p, o, m) BOOST_PP_FOR_186_C(BOOST_PP_BOOL(p##(187, s)), s, p, o, m)\n# define BOOST_PP_FOR_187(s, p, o, m) BOOST_PP_FOR_187_C(BOOST_PP_BOOL(p##(188, s)), s, p, o, m)\n# define BOOST_PP_FOR_188(s, p, o, m) BOOST_PP_FOR_188_C(BOOST_PP_BOOL(p##(189, s)), s, p, o, m)\n# define BOOST_PP_FOR_189(s, p, o, m) BOOST_PP_FOR_189_C(BOOST_PP_BOOL(p##(190, s)), s, p, o, m)\n# define BOOST_PP_FOR_190(s, p, o, m) BOOST_PP_FOR_190_C(BOOST_PP_BOOL(p##(191, s)), s, p, o, m)\n# define BOOST_PP_FOR_191(s, p, o, m) BOOST_PP_FOR_191_C(BOOST_PP_BOOL(p##(192, s)), s, p, o, m)\n# define BOOST_PP_FOR_192(s, p, o, m) BOOST_PP_FOR_192_C(BOOST_PP_BOOL(p##(193, s)), s, p, o, m)\n# define BOOST_PP_FOR_193(s, p, o, m) BOOST_PP_FOR_193_C(BOOST_PP_BOOL(p##(194, s)), s, p, o, m)\n# define BOOST_PP_FOR_194(s, p, o, m) BOOST_PP_FOR_194_C(BOOST_PP_BOOL(p##(195, s)), s, p, o, m)\n# define BOOST_PP_FOR_195(s, p, o, m) BOOST_PP_FOR_195_C(BOOST_PP_BOOL(p##(196, s)), s, p, o, m)\n# define BOOST_PP_FOR_196(s, p, o, m) BOOST_PP_FOR_196_C(BOOST_PP_BOOL(p##(197, s)), s, p, o, m)\n# define BOOST_PP_FOR_197(s, p, o, m) BOOST_PP_FOR_197_C(BOOST_PP_BOOL(p##(198, s)), s, p, o, m)\n# define BOOST_PP_FOR_198(s, p, o, m) BOOST_PP_FOR_198_C(BOOST_PP_BOOL(p##(199, s)), s, p, o, m)\n# define BOOST_PP_FOR_199(s, p, o, m) BOOST_PP_FOR_199_C(BOOST_PP_BOOL(p##(200, s)), s, p, o, m)\n# define BOOST_PP_FOR_200(s, p, o, m) BOOST_PP_FOR_200_C(BOOST_PP_BOOL(p##(201, s)), s, p, o, m)\n# define BOOST_PP_FOR_201(s, p, o, m) BOOST_PP_FOR_201_C(BOOST_PP_BOOL(p##(202, s)), s, p, o, m)\n# define BOOST_PP_FOR_202(s, p, o, m) BOOST_PP_FOR_202_C(BOOST_PP_BOOL(p##(203, s)), s, p, o, m)\n# define BOOST_PP_FOR_203(s, p, o, m) BOOST_PP_FOR_203_C(BOOST_PP_BOOL(p##(204, s)), s, p, o, m)\n# define BOOST_PP_FOR_204(s, p, o, m) BOOST_PP_FOR_204_C(BOOST_PP_BOOL(p##(205, s)), s, p, o, m)\n# define BOOST_PP_FOR_205(s, p, o, m) BOOST_PP_FOR_205_C(BOOST_PP_BOOL(p##(206, s)), s, p, o, m)\n# define BOOST_PP_FOR_206(s, p, o, m) BOOST_PP_FOR_206_C(BOOST_PP_BOOL(p##(207, s)), s, p, o, m)\n# define BOOST_PP_FOR_207(s, p, o, m) BOOST_PP_FOR_207_C(BOOST_PP_BOOL(p##(208, s)), s, p, o, m)\n# define BOOST_PP_FOR_208(s, p, o, m) BOOST_PP_FOR_208_C(BOOST_PP_BOOL(p##(209, s)), s, p, o, m)\n# define BOOST_PP_FOR_209(s, p, o, m) BOOST_PP_FOR_209_C(BOOST_PP_BOOL(p##(210, s)), s, p, o, m)\n# define BOOST_PP_FOR_210(s, p, o, m) BOOST_PP_FOR_210_C(BOOST_PP_BOOL(p##(211, s)), s, p, o, m)\n# define BOOST_PP_FOR_211(s, p, o, m) BOOST_PP_FOR_211_C(BOOST_PP_BOOL(p##(212, s)), s, p, o, m)\n# define BOOST_PP_FOR_212(s, p, o, m) BOOST_PP_FOR_212_C(BOOST_PP_BOOL(p##(213, s)), s, p, o, m)\n# define BOOST_PP_FOR_213(s, p, o, m) BOOST_PP_FOR_213_C(BOOST_PP_BOOL(p##(214, s)), s, p, o, m)\n# define BOOST_PP_FOR_214(s, p, o, m) BOOST_PP_FOR_214_C(BOOST_PP_BOOL(p##(215, s)), s, p, o, m)\n# define BOOST_PP_FOR_215(s, p, o, m) BOOST_PP_FOR_215_C(BOOST_PP_BOOL(p##(216, s)), s, p, o, m)\n# define BOOST_PP_FOR_216(s, p, o, m) BOOST_PP_FOR_216_C(BOOST_PP_BOOL(p##(217, s)), s, p, o, m)\n# define BOOST_PP_FOR_217(s, p, o, m) BOOST_PP_FOR_217_C(BOOST_PP_BOOL(p##(218, s)), s, p, o, m)\n# define BOOST_PP_FOR_218(s, p, o, m) BOOST_PP_FOR_218_C(BOOST_PP_BOOL(p##(219, s)), s, p, o, m)\n# define BOOST_PP_FOR_219(s, p, o, m) BOOST_PP_FOR_219_C(BOOST_PP_BOOL(p##(220, s)), s, p, o, m)\n# define BOOST_PP_FOR_220(s, p, o, m) BOOST_PP_FOR_220_C(BOOST_PP_BOOL(p##(221, s)), s, p, o, m)\n# define BOOST_PP_FOR_221(s, p, o, m) BOOST_PP_FOR_221_C(BOOST_PP_BOOL(p##(222, s)), s, p, o, m)\n# define BOOST_PP_FOR_222(s, p, o, m) BOOST_PP_FOR_222_C(BOOST_PP_BOOL(p##(223, s)), s, p, o, m)\n# define BOOST_PP_FOR_223(s, p, o, m) BOOST_PP_FOR_223_C(BOOST_PP_BOOL(p##(224, s)), s, p, o, m)\n# define BOOST_PP_FOR_224(s, p, o, m) BOOST_PP_FOR_224_C(BOOST_PP_BOOL(p##(225, s)), s, p, o, m)\n# define BOOST_PP_FOR_225(s, p, o, m) BOOST_PP_FOR_225_C(BOOST_PP_BOOL(p##(226, s)), s, p, o, m)\n# define BOOST_PP_FOR_226(s, p, o, m) BOOST_PP_FOR_226_C(BOOST_PP_BOOL(p##(227, s)), s, p, o, m)\n# define BOOST_PP_FOR_227(s, p, o, m) BOOST_PP_FOR_227_C(BOOST_PP_BOOL(p##(228, s)), s, p, o, m)\n# define BOOST_PP_FOR_228(s, p, o, m) BOOST_PP_FOR_228_C(BOOST_PP_BOOL(p##(229, s)), s, p, o, m)\n# define BOOST_PP_FOR_229(s, p, o, m) BOOST_PP_FOR_229_C(BOOST_PP_BOOL(p##(230, s)), s, p, o, m)\n# define BOOST_PP_FOR_230(s, p, o, m) BOOST_PP_FOR_230_C(BOOST_PP_BOOL(p##(231, s)), s, p, o, m)\n# define BOOST_PP_FOR_231(s, p, o, m) BOOST_PP_FOR_231_C(BOOST_PP_BOOL(p##(232, s)), s, p, o, m)\n# define BOOST_PP_FOR_232(s, p, o, m) BOOST_PP_FOR_232_C(BOOST_PP_BOOL(p##(233, s)), s, p, o, m)\n# define BOOST_PP_FOR_233(s, p, o, m) BOOST_PP_FOR_233_C(BOOST_PP_BOOL(p##(234, s)), s, p, o, m)\n# define BOOST_PP_FOR_234(s, p, o, m) BOOST_PP_FOR_234_C(BOOST_PP_BOOL(p##(235, s)), s, p, o, m)\n# define BOOST_PP_FOR_235(s, p, o, m) BOOST_PP_FOR_235_C(BOOST_PP_BOOL(p##(236, s)), s, p, o, m)\n# define BOOST_PP_FOR_236(s, p, o, m) BOOST_PP_FOR_236_C(BOOST_PP_BOOL(p##(237, s)), s, p, o, m)\n# define BOOST_PP_FOR_237(s, p, o, m) BOOST_PP_FOR_237_C(BOOST_PP_BOOL(p##(238, s)), s, p, o, m)\n# define BOOST_PP_FOR_238(s, p, o, m) BOOST_PP_FOR_238_C(BOOST_PP_BOOL(p##(239, s)), s, p, o, m)\n# define BOOST_PP_FOR_239(s, p, o, m) BOOST_PP_FOR_239_C(BOOST_PP_BOOL(p##(240, s)), s, p, o, m)\n# define BOOST_PP_FOR_240(s, p, o, m) BOOST_PP_FOR_240_C(BOOST_PP_BOOL(p##(241, s)), s, p, o, m)\n# define BOOST_PP_FOR_241(s, p, o, m) BOOST_PP_FOR_241_C(BOOST_PP_BOOL(p##(242, s)), s, p, o, m)\n# define BOOST_PP_FOR_242(s, p, o, m) BOOST_PP_FOR_242_C(BOOST_PP_BOOL(p##(243, s)), s, p, o, m)\n# define BOOST_PP_FOR_243(s, p, o, m) BOOST_PP_FOR_243_C(BOOST_PP_BOOL(p##(244, s)), s, p, o, m)\n# define BOOST_PP_FOR_244(s, p, o, m) BOOST_PP_FOR_244_C(BOOST_PP_BOOL(p##(245, s)), s, p, o, m)\n# define BOOST_PP_FOR_245(s, p, o, m) BOOST_PP_FOR_245_C(BOOST_PP_BOOL(p##(246, s)), s, p, o, m)\n# define BOOST_PP_FOR_246(s, p, o, m) BOOST_PP_FOR_246_C(BOOST_PP_BOOL(p##(247, s)), s, p, o, m)\n# define BOOST_PP_FOR_247(s, p, o, m) BOOST_PP_FOR_247_C(BOOST_PP_BOOL(p##(248, s)), s, p, o, m)\n# define BOOST_PP_FOR_248(s, p, o, m) BOOST_PP_FOR_248_C(BOOST_PP_BOOL(p##(249, s)), s, p, o, m)\n# define BOOST_PP_FOR_249(s, p, o, m) BOOST_PP_FOR_249_C(BOOST_PP_BOOL(p##(250, s)), s, p, o, m)\n# define BOOST_PP_FOR_250(s, p, o, m) BOOST_PP_FOR_250_C(BOOST_PP_BOOL(p##(251, s)), s, p, o, m)\n# define BOOST_PP_FOR_251(s, p, o, m) BOOST_PP_FOR_251_C(BOOST_PP_BOOL(p##(252, s)), s, p, o, m)\n# define BOOST_PP_FOR_252(s, p, o, m) BOOST_PP_FOR_252_C(BOOST_PP_BOOL(p##(253, s)), s, p, o, m)\n# define BOOST_PP_FOR_253(s, p, o, m) BOOST_PP_FOR_253_C(BOOST_PP_BOOL(p##(254, s)), s, p, o, m)\n# define BOOST_PP_FOR_254(s, p, o, m) BOOST_PP_FOR_254_C(BOOST_PP_BOOL(p##(255, s)), s, p, o, m)\n# define BOOST_PP_FOR_255(s, p, o, m) BOOST_PP_FOR_255_C(BOOST_PP_BOOL(p##(256, s)), s, p, o, m)\n# define BOOST_PP_FOR_256(s, p, o, m) BOOST_PP_FOR_256_C(BOOST_PP_BOOL(p##(257, s)), s, p, o, m)\n#\n# define BOOST_PP_FOR_1_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(2, s) BOOST_PP_IIF(c, BOOST_PP_FOR_2, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(2, s), p, o, m)\n# define BOOST_PP_FOR_2_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(3, s) BOOST_PP_IIF(c, BOOST_PP_FOR_3, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(3, s), p, o, m)\n# define BOOST_PP_FOR_3_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(4, s) BOOST_PP_IIF(c, BOOST_PP_FOR_4, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(4, s), p, o, m)\n# define BOOST_PP_FOR_4_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(5, s) BOOST_PP_IIF(c, BOOST_PP_FOR_5, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(5, s), p, o, m)\n# define BOOST_PP_FOR_5_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(6, s) BOOST_PP_IIF(c, BOOST_PP_FOR_6, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(6, s), p, o, m)\n# define BOOST_PP_FOR_6_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(7, s) BOOST_PP_IIF(c, BOOST_PP_FOR_7, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(7, s), p, o, m)\n# define BOOST_PP_FOR_7_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(8, s) BOOST_PP_IIF(c, BOOST_PP_FOR_8, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(8, s), p, o, m)\n# define BOOST_PP_FOR_8_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(9, s) BOOST_PP_IIF(c, BOOST_PP_FOR_9, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(9, s), p, o, m)\n# define BOOST_PP_FOR_9_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(10, s) BOOST_PP_IIF(c, BOOST_PP_FOR_10, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(10, s), p, o, m)\n# define BOOST_PP_FOR_10_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(11, s) BOOST_PP_IIF(c, BOOST_PP_FOR_11, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(11, s), p, o, m)\n# define BOOST_PP_FOR_11_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(12, s) BOOST_PP_IIF(c, BOOST_PP_FOR_12, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(12, s), p, o, m)\n# define BOOST_PP_FOR_12_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(13, s) BOOST_PP_IIF(c, BOOST_PP_FOR_13, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(13, s), p, o, m)\n# define BOOST_PP_FOR_13_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(14, s) BOOST_PP_IIF(c, BOOST_PP_FOR_14, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(14, s), p, o, m)\n# define BOOST_PP_FOR_14_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(15, s) BOOST_PP_IIF(c, BOOST_PP_FOR_15, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(15, s), p, o, m)\n# define BOOST_PP_FOR_15_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(16, s) BOOST_PP_IIF(c, BOOST_PP_FOR_16, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(16, s), p, o, m)\n# define BOOST_PP_FOR_16_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(17, s) BOOST_PP_IIF(c, BOOST_PP_FOR_17, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(17, s), p, o, m)\n# define BOOST_PP_FOR_17_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(18, s) BOOST_PP_IIF(c, BOOST_PP_FOR_18, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(18, s), p, o, m)\n# define BOOST_PP_FOR_18_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(19, s) BOOST_PP_IIF(c, BOOST_PP_FOR_19, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(19, s), p, o, m)\n# define BOOST_PP_FOR_19_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(20, s) BOOST_PP_IIF(c, BOOST_PP_FOR_20, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(20, s), p, o, m)\n# define BOOST_PP_FOR_20_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(21, s) BOOST_PP_IIF(c, BOOST_PP_FOR_21, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(21, s), p, o, m)\n# define BOOST_PP_FOR_21_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(22, s) BOOST_PP_IIF(c, BOOST_PP_FOR_22, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(22, s), p, o, m)\n# define BOOST_PP_FOR_22_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(23, s) BOOST_PP_IIF(c, BOOST_PP_FOR_23, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(23, s), p, o, m)\n# define BOOST_PP_FOR_23_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(24, s) BOOST_PP_IIF(c, BOOST_PP_FOR_24, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(24, s), p, o, m)\n# define BOOST_PP_FOR_24_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(25, s) BOOST_PP_IIF(c, BOOST_PP_FOR_25, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(25, s), p, o, m)\n# define BOOST_PP_FOR_25_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(26, s) BOOST_PP_IIF(c, BOOST_PP_FOR_26, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(26, s), p, o, m)\n# define BOOST_PP_FOR_26_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(27, s) BOOST_PP_IIF(c, BOOST_PP_FOR_27, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(27, s), p, o, m)\n# define BOOST_PP_FOR_27_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(28, s) BOOST_PP_IIF(c, BOOST_PP_FOR_28, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(28, s), p, o, m)\n# define BOOST_PP_FOR_28_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(29, s) BOOST_PP_IIF(c, BOOST_PP_FOR_29, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(29, s), p, o, m)\n# define BOOST_PP_FOR_29_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(30, s) BOOST_PP_IIF(c, BOOST_PP_FOR_30, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(30, s), p, o, m)\n# define BOOST_PP_FOR_30_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(31, s) BOOST_PP_IIF(c, BOOST_PP_FOR_31, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(31, s), p, o, m)\n# define BOOST_PP_FOR_31_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(32, s) BOOST_PP_IIF(c, BOOST_PP_FOR_32, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(32, s), p, o, m)\n# define BOOST_PP_FOR_32_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(33, s) BOOST_PP_IIF(c, BOOST_PP_FOR_33, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(33, s), p, o, m)\n# define BOOST_PP_FOR_33_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(34, s) BOOST_PP_IIF(c, BOOST_PP_FOR_34, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(34, s), p, o, m)\n# define BOOST_PP_FOR_34_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(35, s) BOOST_PP_IIF(c, BOOST_PP_FOR_35, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(35, s), p, o, m)\n# define BOOST_PP_FOR_35_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(36, s) BOOST_PP_IIF(c, BOOST_PP_FOR_36, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(36, s), p, o, m)\n# define BOOST_PP_FOR_36_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(37, s) BOOST_PP_IIF(c, BOOST_PP_FOR_37, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(37, s), p, o, m)\n# define BOOST_PP_FOR_37_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(38, s) BOOST_PP_IIF(c, BOOST_PP_FOR_38, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(38, s), p, o, m)\n# define BOOST_PP_FOR_38_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(39, s) BOOST_PP_IIF(c, BOOST_PP_FOR_39, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(39, s), p, o, m)\n# define BOOST_PP_FOR_39_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(40, s) BOOST_PP_IIF(c, BOOST_PP_FOR_40, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(40, s), p, o, m)\n# define BOOST_PP_FOR_40_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(41, s) BOOST_PP_IIF(c, BOOST_PP_FOR_41, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(41, s), p, o, m)\n# define BOOST_PP_FOR_41_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(42, s) BOOST_PP_IIF(c, BOOST_PP_FOR_42, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(42, s), p, o, m)\n# define BOOST_PP_FOR_42_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(43, s) BOOST_PP_IIF(c, BOOST_PP_FOR_43, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(43, s), p, o, m)\n# define BOOST_PP_FOR_43_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(44, s) BOOST_PP_IIF(c, BOOST_PP_FOR_44, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(44, s), p, o, m)\n# define BOOST_PP_FOR_44_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(45, s) BOOST_PP_IIF(c, BOOST_PP_FOR_45, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(45, s), p, o, m)\n# define BOOST_PP_FOR_45_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(46, s) BOOST_PP_IIF(c, BOOST_PP_FOR_46, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(46, s), p, o, m)\n# define BOOST_PP_FOR_46_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(47, s) BOOST_PP_IIF(c, BOOST_PP_FOR_47, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(47, s), p, o, m)\n# define BOOST_PP_FOR_47_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(48, s) BOOST_PP_IIF(c, BOOST_PP_FOR_48, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(48, s), p, o, m)\n# define BOOST_PP_FOR_48_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(49, s) BOOST_PP_IIF(c, BOOST_PP_FOR_49, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(49, s), p, o, m)\n# define BOOST_PP_FOR_49_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(50, s) BOOST_PP_IIF(c, BOOST_PP_FOR_50, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(50, s), p, o, m)\n# define BOOST_PP_FOR_50_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(51, s) BOOST_PP_IIF(c, BOOST_PP_FOR_51, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(51, s), p, o, m)\n# define BOOST_PP_FOR_51_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(52, s) BOOST_PP_IIF(c, BOOST_PP_FOR_52, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(52, s), p, o, m)\n# define BOOST_PP_FOR_52_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(53, s) BOOST_PP_IIF(c, BOOST_PP_FOR_53, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(53, s), p, o, m)\n# define BOOST_PP_FOR_53_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(54, s) BOOST_PP_IIF(c, BOOST_PP_FOR_54, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(54, s), p, o, m)\n# define BOOST_PP_FOR_54_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(55, s) BOOST_PP_IIF(c, BOOST_PP_FOR_55, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(55, s), p, o, m)\n# define BOOST_PP_FOR_55_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(56, s) BOOST_PP_IIF(c, BOOST_PP_FOR_56, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(56, s), p, o, m)\n# define BOOST_PP_FOR_56_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(57, s) BOOST_PP_IIF(c, BOOST_PP_FOR_57, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(57, s), p, o, m)\n# define BOOST_PP_FOR_57_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(58, s) BOOST_PP_IIF(c, BOOST_PP_FOR_58, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(58, s), p, o, m)\n# define BOOST_PP_FOR_58_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(59, s) BOOST_PP_IIF(c, BOOST_PP_FOR_59, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(59, s), p, o, m)\n# define BOOST_PP_FOR_59_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(60, s) BOOST_PP_IIF(c, BOOST_PP_FOR_60, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(60, s), p, o, m)\n# define BOOST_PP_FOR_60_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(61, s) BOOST_PP_IIF(c, BOOST_PP_FOR_61, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(61, s), p, o, m)\n# define BOOST_PP_FOR_61_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(62, s) BOOST_PP_IIF(c, BOOST_PP_FOR_62, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(62, s), p, o, m)\n# define BOOST_PP_FOR_62_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(63, s) BOOST_PP_IIF(c, BOOST_PP_FOR_63, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(63, s), p, o, m)\n# define BOOST_PP_FOR_63_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(64, s) BOOST_PP_IIF(c, BOOST_PP_FOR_64, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(64, s), p, o, m)\n# define BOOST_PP_FOR_64_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(65, s) BOOST_PP_IIF(c, BOOST_PP_FOR_65, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(65, s), p, o, m)\n# define BOOST_PP_FOR_65_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(66, s) BOOST_PP_IIF(c, BOOST_PP_FOR_66, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(66, s), p, o, m)\n# define BOOST_PP_FOR_66_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(67, s) BOOST_PP_IIF(c, BOOST_PP_FOR_67, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(67, s), p, o, m)\n# define BOOST_PP_FOR_67_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(68, s) BOOST_PP_IIF(c, BOOST_PP_FOR_68, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(68, s), p, o, m)\n# define BOOST_PP_FOR_68_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(69, s) BOOST_PP_IIF(c, BOOST_PP_FOR_69, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(69, s), p, o, m)\n# define BOOST_PP_FOR_69_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(70, s) BOOST_PP_IIF(c, BOOST_PP_FOR_70, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(70, s), p, o, m)\n# define BOOST_PP_FOR_70_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(71, s) BOOST_PP_IIF(c, BOOST_PP_FOR_71, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(71, s), p, o, m)\n# define BOOST_PP_FOR_71_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(72, s) BOOST_PP_IIF(c, BOOST_PP_FOR_72, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(72, s), p, o, m)\n# define BOOST_PP_FOR_72_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(73, s) BOOST_PP_IIF(c, BOOST_PP_FOR_73, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(73, s), p, o, m)\n# define BOOST_PP_FOR_73_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(74, s) BOOST_PP_IIF(c, BOOST_PP_FOR_74, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(74, s), p, o, m)\n# define BOOST_PP_FOR_74_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(75, s) BOOST_PP_IIF(c, BOOST_PP_FOR_75, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(75, s), p, o, m)\n# define BOOST_PP_FOR_75_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(76, s) BOOST_PP_IIF(c, BOOST_PP_FOR_76, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(76, s), p, o, m)\n# define BOOST_PP_FOR_76_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(77, s) BOOST_PP_IIF(c, BOOST_PP_FOR_77, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(77, s), p, o, m)\n# define BOOST_PP_FOR_77_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(78, s) BOOST_PP_IIF(c, BOOST_PP_FOR_78, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(78, s), p, o, m)\n# define BOOST_PP_FOR_78_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(79, s) BOOST_PP_IIF(c, BOOST_PP_FOR_79, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(79, s), p, o, m)\n# define BOOST_PP_FOR_79_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(80, s) BOOST_PP_IIF(c, BOOST_PP_FOR_80, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(80, s), p, o, m)\n# define BOOST_PP_FOR_80_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(81, s) BOOST_PP_IIF(c, BOOST_PP_FOR_81, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(81, s), p, o, m)\n# define BOOST_PP_FOR_81_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(82, s) BOOST_PP_IIF(c, BOOST_PP_FOR_82, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(82, s), p, o, m)\n# define BOOST_PP_FOR_82_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(83, s) BOOST_PP_IIF(c, BOOST_PP_FOR_83, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(83, s), p, o, m)\n# define BOOST_PP_FOR_83_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(84, s) BOOST_PP_IIF(c, BOOST_PP_FOR_84, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(84, s), p, o, m)\n# define BOOST_PP_FOR_84_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(85, s) BOOST_PP_IIF(c, BOOST_PP_FOR_85, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(85, s), p, o, m)\n# define BOOST_PP_FOR_85_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(86, s) BOOST_PP_IIF(c, BOOST_PP_FOR_86, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(86, s), p, o, m)\n# define BOOST_PP_FOR_86_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(87, s) BOOST_PP_IIF(c, BOOST_PP_FOR_87, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(87, s), p, o, m)\n# define BOOST_PP_FOR_87_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(88, s) BOOST_PP_IIF(c, BOOST_PP_FOR_88, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(88, s), p, o, m)\n# define BOOST_PP_FOR_88_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(89, s) BOOST_PP_IIF(c, BOOST_PP_FOR_89, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(89, s), p, o, m)\n# define BOOST_PP_FOR_89_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(90, s) BOOST_PP_IIF(c, BOOST_PP_FOR_90, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(90, s), p, o, m)\n# define BOOST_PP_FOR_90_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(91, s) BOOST_PP_IIF(c, BOOST_PP_FOR_91, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(91, s), p, o, m)\n# define BOOST_PP_FOR_91_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(92, s) BOOST_PP_IIF(c, BOOST_PP_FOR_92, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(92, s), p, o, m)\n# define BOOST_PP_FOR_92_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(93, s) BOOST_PP_IIF(c, BOOST_PP_FOR_93, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(93, s), p, o, m)\n# define BOOST_PP_FOR_93_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(94, s) BOOST_PP_IIF(c, BOOST_PP_FOR_94, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(94, s), p, o, m)\n# define BOOST_PP_FOR_94_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(95, s) BOOST_PP_IIF(c, BOOST_PP_FOR_95, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(95, s), p, o, m)\n# define BOOST_PP_FOR_95_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(96, s) BOOST_PP_IIF(c, BOOST_PP_FOR_96, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(96, s), p, o, m)\n# define BOOST_PP_FOR_96_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(97, s) BOOST_PP_IIF(c, BOOST_PP_FOR_97, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(97, s), p, o, m)\n# define BOOST_PP_FOR_97_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(98, s) BOOST_PP_IIF(c, BOOST_PP_FOR_98, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(98, s), p, o, m)\n# define BOOST_PP_FOR_98_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(99, s) BOOST_PP_IIF(c, BOOST_PP_FOR_99, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(99, s), p, o, m)\n# define BOOST_PP_FOR_99_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(100, s) BOOST_PP_IIF(c, BOOST_PP_FOR_100, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(100, s), p, o, m)\n# define BOOST_PP_FOR_100_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(101, s) BOOST_PP_IIF(c, BOOST_PP_FOR_101, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(101, s), p, o, m)\n# define BOOST_PP_FOR_101_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(102, s) BOOST_PP_IIF(c, BOOST_PP_FOR_102, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(102, s), p, o, m)\n# define BOOST_PP_FOR_102_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(103, s) BOOST_PP_IIF(c, BOOST_PP_FOR_103, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(103, s), p, o, m)\n# define BOOST_PP_FOR_103_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(104, s) BOOST_PP_IIF(c, BOOST_PP_FOR_104, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(104, s), p, o, m)\n# define BOOST_PP_FOR_104_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(105, s) BOOST_PP_IIF(c, BOOST_PP_FOR_105, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(105, s), p, o, m)\n# define BOOST_PP_FOR_105_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(106, s) BOOST_PP_IIF(c, BOOST_PP_FOR_106, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(106, s), p, o, m)\n# define BOOST_PP_FOR_106_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(107, s) BOOST_PP_IIF(c, BOOST_PP_FOR_107, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(107, s), p, o, m)\n# define BOOST_PP_FOR_107_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(108, s) BOOST_PP_IIF(c, BOOST_PP_FOR_108, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(108, s), p, o, m)\n# define BOOST_PP_FOR_108_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(109, s) BOOST_PP_IIF(c, BOOST_PP_FOR_109, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(109, s), p, o, m)\n# define BOOST_PP_FOR_109_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(110, s) BOOST_PP_IIF(c, BOOST_PP_FOR_110, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(110, s), p, o, m)\n# define BOOST_PP_FOR_110_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(111, s) BOOST_PP_IIF(c, BOOST_PP_FOR_111, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(111, s), p, o, m)\n# define BOOST_PP_FOR_111_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(112, s) BOOST_PP_IIF(c, BOOST_PP_FOR_112, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(112, s), p, o, m)\n# define BOOST_PP_FOR_112_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(113, s) BOOST_PP_IIF(c, BOOST_PP_FOR_113, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(113, s), p, o, m)\n# define BOOST_PP_FOR_113_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(114, s) BOOST_PP_IIF(c, BOOST_PP_FOR_114, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(114, s), p, o, m)\n# define BOOST_PP_FOR_114_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(115, s) BOOST_PP_IIF(c, BOOST_PP_FOR_115, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(115, s), p, o, m)\n# define BOOST_PP_FOR_115_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(116, s) BOOST_PP_IIF(c, BOOST_PP_FOR_116, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(116, s), p, o, m)\n# define BOOST_PP_FOR_116_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(117, s) BOOST_PP_IIF(c, BOOST_PP_FOR_117, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(117, s), p, o, m)\n# define BOOST_PP_FOR_117_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(118, s) BOOST_PP_IIF(c, BOOST_PP_FOR_118, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(118, s), p, o, m)\n# define BOOST_PP_FOR_118_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(119, s) BOOST_PP_IIF(c, BOOST_PP_FOR_119, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(119, s), p, o, m)\n# define BOOST_PP_FOR_119_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(120, s) BOOST_PP_IIF(c, BOOST_PP_FOR_120, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(120, s), p, o, m)\n# define BOOST_PP_FOR_120_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(121, s) BOOST_PP_IIF(c, BOOST_PP_FOR_121, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(121, s), p, o, m)\n# define BOOST_PP_FOR_121_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(122, s) BOOST_PP_IIF(c, BOOST_PP_FOR_122, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(122, s), p, o, m)\n# define BOOST_PP_FOR_122_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(123, s) BOOST_PP_IIF(c, BOOST_PP_FOR_123, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(123, s), p, o, m)\n# define BOOST_PP_FOR_123_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(124, s) BOOST_PP_IIF(c, BOOST_PP_FOR_124, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(124, s), p, o, m)\n# define BOOST_PP_FOR_124_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(125, s) BOOST_PP_IIF(c, BOOST_PP_FOR_125, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(125, s), p, o, m)\n# define BOOST_PP_FOR_125_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(126, s) BOOST_PP_IIF(c, BOOST_PP_FOR_126, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(126, s), p, o, m)\n# define BOOST_PP_FOR_126_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(127, s) BOOST_PP_IIF(c, BOOST_PP_FOR_127, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(127, s), p, o, m)\n# define BOOST_PP_FOR_127_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(128, s) BOOST_PP_IIF(c, BOOST_PP_FOR_128, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(128, s), p, o, m)\n# define BOOST_PP_FOR_128_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(129, s) BOOST_PP_IIF(c, BOOST_PP_FOR_129, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(129, s), p, o, m)\n# define BOOST_PP_FOR_129_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(130, s) BOOST_PP_IIF(c, BOOST_PP_FOR_130, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(130, s), p, o, m)\n# define BOOST_PP_FOR_130_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(131, s) BOOST_PP_IIF(c, BOOST_PP_FOR_131, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(131, s), p, o, m)\n# define BOOST_PP_FOR_131_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(132, s) BOOST_PP_IIF(c, BOOST_PP_FOR_132, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(132, s), p, o, m)\n# define BOOST_PP_FOR_132_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(133, s) BOOST_PP_IIF(c, BOOST_PP_FOR_133, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(133, s), p, o, m)\n# define BOOST_PP_FOR_133_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(134, s) BOOST_PP_IIF(c, BOOST_PP_FOR_134, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(134, s), p, o, m)\n# define BOOST_PP_FOR_134_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(135, s) BOOST_PP_IIF(c, BOOST_PP_FOR_135, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(135, s), p, o, m)\n# define BOOST_PP_FOR_135_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(136, s) BOOST_PP_IIF(c, BOOST_PP_FOR_136, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(136, s), p, o, m)\n# define BOOST_PP_FOR_136_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(137, s) BOOST_PP_IIF(c, BOOST_PP_FOR_137, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(137, s), p, o, m)\n# define BOOST_PP_FOR_137_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(138, s) BOOST_PP_IIF(c, BOOST_PP_FOR_138, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(138, s), p, o, m)\n# define BOOST_PP_FOR_138_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(139, s) BOOST_PP_IIF(c, BOOST_PP_FOR_139, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(139, s), p, o, m)\n# define BOOST_PP_FOR_139_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(140, s) BOOST_PP_IIF(c, BOOST_PP_FOR_140, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(140, s), p, o, m)\n# define BOOST_PP_FOR_140_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(141, s) BOOST_PP_IIF(c, BOOST_PP_FOR_141, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(141, s), p, o, m)\n# define BOOST_PP_FOR_141_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(142, s) BOOST_PP_IIF(c, BOOST_PP_FOR_142, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(142, s), p, o, m)\n# define BOOST_PP_FOR_142_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(143, s) BOOST_PP_IIF(c, BOOST_PP_FOR_143, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(143, s), p, o, m)\n# define BOOST_PP_FOR_143_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(144, s) BOOST_PP_IIF(c, BOOST_PP_FOR_144, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(144, s), p, o, m)\n# define BOOST_PP_FOR_144_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(145, s) BOOST_PP_IIF(c, BOOST_PP_FOR_145, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(145, s), p, o, m)\n# define BOOST_PP_FOR_145_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(146, s) BOOST_PP_IIF(c, BOOST_PP_FOR_146, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(146, s), p, o, m)\n# define BOOST_PP_FOR_146_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(147, s) BOOST_PP_IIF(c, BOOST_PP_FOR_147, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(147, s), p, o, m)\n# define BOOST_PP_FOR_147_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(148, s) BOOST_PP_IIF(c, BOOST_PP_FOR_148, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(148, s), p, o, m)\n# define BOOST_PP_FOR_148_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(149, s) BOOST_PP_IIF(c, BOOST_PP_FOR_149, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(149, s), p, o, m)\n# define BOOST_PP_FOR_149_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(150, s) BOOST_PP_IIF(c, BOOST_PP_FOR_150, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(150, s), p, o, m)\n# define BOOST_PP_FOR_150_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(151, s) BOOST_PP_IIF(c, BOOST_PP_FOR_151, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(151, s), p, o, m)\n# define BOOST_PP_FOR_151_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(152, s) BOOST_PP_IIF(c, BOOST_PP_FOR_152, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(152, s), p, o, m)\n# define BOOST_PP_FOR_152_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(153, s) BOOST_PP_IIF(c, BOOST_PP_FOR_153, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(153, s), p, o, m)\n# define BOOST_PP_FOR_153_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(154, s) BOOST_PP_IIF(c, BOOST_PP_FOR_154, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(154, s), p, o, m)\n# define BOOST_PP_FOR_154_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(155, s) BOOST_PP_IIF(c, BOOST_PP_FOR_155, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(155, s), p, o, m)\n# define BOOST_PP_FOR_155_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(156, s) BOOST_PP_IIF(c, BOOST_PP_FOR_156, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(156, s), p, o, m)\n# define BOOST_PP_FOR_156_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(157, s) BOOST_PP_IIF(c, BOOST_PP_FOR_157, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(157, s), p, o, m)\n# define BOOST_PP_FOR_157_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(158, s) BOOST_PP_IIF(c, BOOST_PP_FOR_158, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(158, s), p, o, m)\n# define BOOST_PP_FOR_158_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(159, s) BOOST_PP_IIF(c, BOOST_PP_FOR_159, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(159, s), p, o, m)\n# define BOOST_PP_FOR_159_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(160, s) BOOST_PP_IIF(c, BOOST_PP_FOR_160, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(160, s), p, o, m)\n# define BOOST_PP_FOR_160_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(161, s) BOOST_PP_IIF(c, BOOST_PP_FOR_161, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(161, s), p, o, m)\n# define BOOST_PP_FOR_161_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(162, s) BOOST_PP_IIF(c, BOOST_PP_FOR_162, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(162, s), p, o, m)\n# define BOOST_PP_FOR_162_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(163, s) BOOST_PP_IIF(c, BOOST_PP_FOR_163, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(163, s), p, o, m)\n# define BOOST_PP_FOR_163_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(164, s) BOOST_PP_IIF(c, BOOST_PP_FOR_164, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(164, s), p, o, m)\n# define BOOST_PP_FOR_164_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(165, s) BOOST_PP_IIF(c, BOOST_PP_FOR_165, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(165, s), p, o, m)\n# define BOOST_PP_FOR_165_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(166, s) BOOST_PP_IIF(c, BOOST_PP_FOR_166, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(166, s), p, o, m)\n# define BOOST_PP_FOR_166_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(167, s) BOOST_PP_IIF(c, BOOST_PP_FOR_167, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(167, s), p, o, m)\n# define BOOST_PP_FOR_167_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(168, s) BOOST_PP_IIF(c, BOOST_PP_FOR_168, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(168, s), p, o, m)\n# define BOOST_PP_FOR_168_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(169, s) BOOST_PP_IIF(c, BOOST_PP_FOR_169, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(169, s), p, o, m)\n# define BOOST_PP_FOR_169_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(170, s) BOOST_PP_IIF(c, BOOST_PP_FOR_170, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(170, s), p, o, m)\n# define BOOST_PP_FOR_170_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(171, s) BOOST_PP_IIF(c, BOOST_PP_FOR_171, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(171, s), p, o, m)\n# define BOOST_PP_FOR_171_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(172, s) BOOST_PP_IIF(c, BOOST_PP_FOR_172, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(172, s), p, o, m)\n# define BOOST_PP_FOR_172_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(173, s) BOOST_PP_IIF(c, BOOST_PP_FOR_173, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(173, s), p, o, m)\n# define BOOST_PP_FOR_173_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(174, s) BOOST_PP_IIF(c, BOOST_PP_FOR_174, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(174, s), p, o, m)\n# define BOOST_PP_FOR_174_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(175, s) BOOST_PP_IIF(c, BOOST_PP_FOR_175, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(175, s), p, o, m)\n# define BOOST_PP_FOR_175_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(176, s) BOOST_PP_IIF(c, BOOST_PP_FOR_176, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(176, s), p, o, m)\n# define BOOST_PP_FOR_176_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(177, s) BOOST_PP_IIF(c, BOOST_PP_FOR_177, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(177, s), p, o, m)\n# define BOOST_PP_FOR_177_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(178, s) BOOST_PP_IIF(c, BOOST_PP_FOR_178, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(178, s), p, o, m)\n# define BOOST_PP_FOR_178_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(179, s) BOOST_PP_IIF(c, BOOST_PP_FOR_179, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(179, s), p, o, m)\n# define BOOST_PP_FOR_179_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(180, s) BOOST_PP_IIF(c, BOOST_PP_FOR_180, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(180, s), p, o, m)\n# define BOOST_PP_FOR_180_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(181, s) BOOST_PP_IIF(c, BOOST_PP_FOR_181, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(181, s), p, o, m)\n# define BOOST_PP_FOR_181_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(182, s) BOOST_PP_IIF(c, BOOST_PP_FOR_182, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(182, s), p, o, m)\n# define BOOST_PP_FOR_182_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(183, s) BOOST_PP_IIF(c, BOOST_PP_FOR_183, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(183, s), p, o, m)\n# define BOOST_PP_FOR_183_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(184, s) BOOST_PP_IIF(c, BOOST_PP_FOR_184, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(184, s), p, o, m)\n# define BOOST_PP_FOR_184_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(185, s) BOOST_PP_IIF(c, BOOST_PP_FOR_185, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(185, s), p, o, m)\n# define BOOST_PP_FOR_185_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(186, s) BOOST_PP_IIF(c, BOOST_PP_FOR_186, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(186, s), p, o, m)\n# define BOOST_PP_FOR_186_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(187, s) BOOST_PP_IIF(c, BOOST_PP_FOR_187, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(187, s), p, o, m)\n# define BOOST_PP_FOR_187_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(188, s) BOOST_PP_IIF(c, BOOST_PP_FOR_188, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(188, s), p, o, m)\n# define BOOST_PP_FOR_188_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(189, s) BOOST_PP_IIF(c, BOOST_PP_FOR_189, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(189, s), p, o, m)\n# define BOOST_PP_FOR_189_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(190, s) BOOST_PP_IIF(c, BOOST_PP_FOR_190, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(190, s), p, o, m)\n# define BOOST_PP_FOR_190_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(191, s) BOOST_PP_IIF(c, BOOST_PP_FOR_191, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(191, s), p, o, m)\n# define BOOST_PP_FOR_191_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(192, s) BOOST_PP_IIF(c, BOOST_PP_FOR_192, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(192, s), p, o, m)\n# define BOOST_PP_FOR_192_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(193, s) BOOST_PP_IIF(c, BOOST_PP_FOR_193, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(193, s), p, o, m)\n# define BOOST_PP_FOR_193_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(194, s) BOOST_PP_IIF(c, BOOST_PP_FOR_194, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(194, s), p, o, m)\n# define BOOST_PP_FOR_194_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(195, s) BOOST_PP_IIF(c, BOOST_PP_FOR_195, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(195, s), p, o, m)\n# define BOOST_PP_FOR_195_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(196, s) BOOST_PP_IIF(c, BOOST_PP_FOR_196, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(196, s), p, o, m)\n# define BOOST_PP_FOR_196_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(197, s) BOOST_PP_IIF(c, BOOST_PP_FOR_197, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(197, s), p, o, m)\n# define BOOST_PP_FOR_197_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(198, s) BOOST_PP_IIF(c, BOOST_PP_FOR_198, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(198, s), p, o, m)\n# define BOOST_PP_FOR_198_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(199, s) BOOST_PP_IIF(c, BOOST_PP_FOR_199, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(199, s), p, o, m)\n# define BOOST_PP_FOR_199_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(200, s) BOOST_PP_IIF(c, BOOST_PP_FOR_200, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(200, s), p, o, m)\n# define BOOST_PP_FOR_200_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(201, s) BOOST_PP_IIF(c, BOOST_PP_FOR_201, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(201, s), p, o, m)\n# define BOOST_PP_FOR_201_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(202, s) BOOST_PP_IIF(c, BOOST_PP_FOR_202, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(202, s), p, o, m)\n# define BOOST_PP_FOR_202_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(203, s) BOOST_PP_IIF(c, BOOST_PP_FOR_203, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(203, s), p, o, m)\n# define BOOST_PP_FOR_203_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(204, s) BOOST_PP_IIF(c, BOOST_PP_FOR_204, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(204, s), p, o, m)\n# define BOOST_PP_FOR_204_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(205, s) BOOST_PP_IIF(c, BOOST_PP_FOR_205, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(205, s), p, o, m)\n# define BOOST_PP_FOR_205_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(206, s) BOOST_PP_IIF(c, BOOST_PP_FOR_206, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(206, s), p, o, m)\n# define BOOST_PP_FOR_206_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(207, s) BOOST_PP_IIF(c, BOOST_PP_FOR_207, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(207, s), p, o, m)\n# define BOOST_PP_FOR_207_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(208, s) BOOST_PP_IIF(c, BOOST_PP_FOR_208, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(208, s), p, o, m)\n# define BOOST_PP_FOR_208_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(209, s) BOOST_PP_IIF(c, BOOST_PP_FOR_209, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(209, s), p, o, m)\n# define BOOST_PP_FOR_209_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(210, s) BOOST_PP_IIF(c, BOOST_PP_FOR_210, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(210, s), p, o, m)\n# define BOOST_PP_FOR_210_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(211, s) BOOST_PP_IIF(c, BOOST_PP_FOR_211, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(211, s), p, o, m)\n# define BOOST_PP_FOR_211_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(212, s) BOOST_PP_IIF(c, BOOST_PP_FOR_212, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(212, s), p, o, m)\n# define BOOST_PP_FOR_212_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(213, s) BOOST_PP_IIF(c, BOOST_PP_FOR_213, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(213, s), p, o, m)\n# define BOOST_PP_FOR_213_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(214, s) BOOST_PP_IIF(c, BOOST_PP_FOR_214, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(214, s), p, o, m)\n# define BOOST_PP_FOR_214_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(215, s) BOOST_PP_IIF(c, BOOST_PP_FOR_215, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(215, s), p, o, m)\n# define BOOST_PP_FOR_215_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(216, s) BOOST_PP_IIF(c, BOOST_PP_FOR_216, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(216, s), p, o, m)\n# define BOOST_PP_FOR_216_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(217, s) BOOST_PP_IIF(c, BOOST_PP_FOR_217, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(217, s), p, o, m)\n# define BOOST_PP_FOR_217_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(218, s) BOOST_PP_IIF(c, BOOST_PP_FOR_218, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(218, s), p, o, m)\n# define BOOST_PP_FOR_218_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(219, s) BOOST_PP_IIF(c, BOOST_PP_FOR_219, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(219, s), p, o, m)\n# define BOOST_PP_FOR_219_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(220, s) BOOST_PP_IIF(c, BOOST_PP_FOR_220, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(220, s), p, o, m)\n# define BOOST_PP_FOR_220_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(221, s) BOOST_PP_IIF(c, BOOST_PP_FOR_221, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(221, s), p, o, m)\n# define BOOST_PP_FOR_221_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(222, s) BOOST_PP_IIF(c, BOOST_PP_FOR_222, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(222, s), p, o, m)\n# define BOOST_PP_FOR_222_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(223, s) BOOST_PP_IIF(c, BOOST_PP_FOR_223, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(223, s), p, o, m)\n# define BOOST_PP_FOR_223_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(224, s) BOOST_PP_IIF(c, BOOST_PP_FOR_224, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(224, s), p, o, m)\n# define BOOST_PP_FOR_224_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(225, s) BOOST_PP_IIF(c, BOOST_PP_FOR_225, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(225, s), p, o, m)\n# define BOOST_PP_FOR_225_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(226, s) BOOST_PP_IIF(c, BOOST_PP_FOR_226, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(226, s), p, o, m)\n# define BOOST_PP_FOR_226_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(227, s) BOOST_PP_IIF(c, BOOST_PP_FOR_227, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(227, s), p, o, m)\n# define BOOST_PP_FOR_227_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(228, s) BOOST_PP_IIF(c, BOOST_PP_FOR_228, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(228, s), p, o, m)\n# define BOOST_PP_FOR_228_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(229, s) BOOST_PP_IIF(c, BOOST_PP_FOR_229, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(229, s), p, o, m)\n# define BOOST_PP_FOR_229_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(230, s) BOOST_PP_IIF(c, BOOST_PP_FOR_230, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(230, s), p, o, m)\n# define BOOST_PP_FOR_230_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(231, s) BOOST_PP_IIF(c, BOOST_PP_FOR_231, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(231, s), p, o, m)\n# define BOOST_PP_FOR_231_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(232, s) BOOST_PP_IIF(c, BOOST_PP_FOR_232, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(232, s), p, o, m)\n# define BOOST_PP_FOR_232_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(233, s) BOOST_PP_IIF(c, BOOST_PP_FOR_233, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(233, s), p, o, m)\n# define BOOST_PP_FOR_233_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(234, s) BOOST_PP_IIF(c, BOOST_PP_FOR_234, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(234, s), p, o, m)\n# define BOOST_PP_FOR_234_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(235, s) BOOST_PP_IIF(c, BOOST_PP_FOR_235, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(235, s), p, o, m)\n# define BOOST_PP_FOR_235_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(236, s) BOOST_PP_IIF(c, BOOST_PP_FOR_236, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(236, s), p, o, m)\n# define BOOST_PP_FOR_236_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(237, s) BOOST_PP_IIF(c, BOOST_PP_FOR_237, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(237, s), p, o, m)\n# define BOOST_PP_FOR_237_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(238, s) BOOST_PP_IIF(c, BOOST_PP_FOR_238, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(238, s), p, o, m)\n# define BOOST_PP_FOR_238_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(239, s) BOOST_PP_IIF(c, BOOST_PP_FOR_239, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(239, s), p, o, m)\n# define BOOST_PP_FOR_239_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(240, s) BOOST_PP_IIF(c, BOOST_PP_FOR_240, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(240, s), p, o, m)\n# define BOOST_PP_FOR_240_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(241, s) BOOST_PP_IIF(c, BOOST_PP_FOR_241, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(241, s), p, o, m)\n# define BOOST_PP_FOR_241_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(242, s) BOOST_PP_IIF(c, BOOST_PP_FOR_242, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(242, s), p, o, m)\n# define BOOST_PP_FOR_242_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(243, s) BOOST_PP_IIF(c, BOOST_PP_FOR_243, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(243, s), p, o, m)\n# define BOOST_PP_FOR_243_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(244, s) BOOST_PP_IIF(c, BOOST_PP_FOR_244, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(244, s), p, o, m)\n# define BOOST_PP_FOR_244_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(245, s) BOOST_PP_IIF(c, BOOST_PP_FOR_245, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(245, s), p, o, m)\n# define BOOST_PP_FOR_245_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(246, s) BOOST_PP_IIF(c, BOOST_PP_FOR_246, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(246, s), p, o, m)\n# define BOOST_PP_FOR_246_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(247, s) BOOST_PP_IIF(c, BOOST_PP_FOR_247, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(247, s), p, o, m)\n# define BOOST_PP_FOR_247_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(248, s) BOOST_PP_IIF(c, BOOST_PP_FOR_248, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(248, s), p, o, m)\n# define BOOST_PP_FOR_248_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(249, s) BOOST_PP_IIF(c, BOOST_PP_FOR_249, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(249, s), p, o, m)\n# define BOOST_PP_FOR_249_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(250, s) BOOST_PP_IIF(c, BOOST_PP_FOR_250, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(250, s), p, o, m)\n# define BOOST_PP_FOR_250_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(251, s) BOOST_PP_IIF(c, BOOST_PP_FOR_251, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(251, s), p, o, m)\n# define BOOST_PP_FOR_251_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(252, s) BOOST_PP_IIF(c, BOOST_PP_FOR_252, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(252, s), p, o, m)\n# define BOOST_PP_FOR_252_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(253, s) BOOST_PP_IIF(c, BOOST_PP_FOR_253, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(253, s), p, o, m)\n# define BOOST_PP_FOR_253_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(254, s) BOOST_PP_IIF(c, BOOST_PP_FOR_254, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(254, s), p, o, m)\n# define BOOST_PP_FOR_254_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(255, s) BOOST_PP_IIF(c, BOOST_PP_FOR_255, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(255, s), p, o, m)\n# define BOOST_PP_FOR_255_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(256, s) BOOST_PP_IIF(c, BOOST_PP_FOR_256, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(256, s), p, o, m)\n# define BOOST_PP_FOR_256_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(257, s) BOOST_PP_IIF(c, BOOST_PP_FOR_257, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(257, s), p, o, m)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/detail/edg/for.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_DETAIL_EDG_FOR_HPP\n# define BOOST_PREPROCESSOR_REPETITION_DETAIL_EDG_FOR_HPP\n#\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_FOR_1(s, p, o, m) BOOST_PP_FOR_1_I(s, p, o, m)\n# define BOOST_PP_FOR_2(s, p, o, m) BOOST_PP_FOR_2_I(s, p, o, m)\n# define BOOST_PP_FOR_3(s, p, o, m) BOOST_PP_FOR_3_I(s, p, o, m)\n# define BOOST_PP_FOR_4(s, p, o, m) BOOST_PP_FOR_4_I(s, p, o, m)\n# define BOOST_PP_FOR_5(s, p, o, m) BOOST_PP_FOR_5_I(s, p, o, m)\n# define BOOST_PP_FOR_6(s, p, o, m) BOOST_PP_FOR_6_I(s, p, o, m)\n# define BOOST_PP_FOR_7(s, p, o, m) BOOST_PP_FOR_7_I(s, p, o, m)\n# define BOOST_PP_FOR_8(s, p, o, m) BOOST_PP_FOR_8_I(s, p, o, m)\n# define BOOST_PP_FOR_9(s, p, o, m) BOOST_PP_FOR_9_I(s, p, o, m)\n# define BOOST_PP_FOR_10(s, p, o, m) BOOST_PP_FOR_10_I(s, p, o, m)\n# define BOOST_PP_FOR_11(s, p, o, m) BOOST_PP_FOR_11_I(s, p, o, m)\n# define BOOST_PP_FOR_12(s, p, o, m) BOOST_PP_FOR_12_I(s, p, o, m)\n# define BOOST_PP_FOR_13(s, p, o, m) BOOST_PP_FOR_13_I(s, p, o, m)\n# define BOOST_PP_FOR_14(s, p, o, m) BOOST_PP_FOR_14_I(s, p, o, m)\n# define BOOST_PP_FOR_15(s, p, o, m) BOOST_PP_FOR_15_I(s, p, o, m)\n# define BOOST_PP_FOR_16(s, p, o, m) BOOST_PP_FOR_16_I(s, p, o, m)\n# define BOOST_PP_FOR_17(s, p, o, m) BOOST_PP_FOR_17_I(s, p, o, m)\n# define BOOST_PP_FOR_18(s, p, o, m) BOOST_PP_FOR_18_I(s, p, o, m)\n# define BOOST_PP_FOR_19(s, p, o, m) BOOST_PP_FOR_19_I(s, p, o, m)\n# define BOOST_PP_FOR_20(s, p, o, m) BOOST_PP_FOR_20_I(s, p, o, m)\n# define BOOST_PP_FOR_21(s, p, o, m) BOOST_PP_FOR_21_I(s, p, o, m)\n# define BOOST_PP_FOR_22(s, p, o, m) BOOST_PP_FOR_22_I(s, p, o, m)\n# define BOOST_PP_FOR_23(s, p, o, m) BOOST_PP_FOR_23_I(s, p, o, m)\n# define BOOST_PP_FOR_24(s, p, o, m) BOOST_PP_FOR_24_I(s, p, o, m)\n# define BOOST_PP_FOR_25(s, p, o, m) BOOST_PP_FOR_25_I(s, p, o, m)\n# define BOOST_PP_FOR_26(s, p, o, m) BOOST_PP_FOR_26_I(s, p, o, m)\n# define BOOST_PP_FOR_27(s, p, o, m) BOOST_PP_FOR_27_I(s, p, o, m)\n# define BOOST_PP_FOR_28(s, p, o, m) BOOST_PP_FOR_28_I(s, p, o, m)\n# define BOOST_PP_FOR_29(s, p, o, m) BOOST_PP_FOR_29_I(s, p, o, m)\n# define BOOST_PP_FOR_30(s, p, o, m) BOOST_PP_FOR_30_I(s, p, o, m)\n# define BOOST_PP_FOR_31(s, p, o, m) BOOST_PP_FOR_31_I(s, p, o, m)\n# define BOOST_PP_FOR_32(s, p, o, m) BOOST_PP_FOR_32_I(s, p, o, m)\n# define BOOST_PP_FOR_33(s, p, o, m) BOOST_PP_FOR_33_I(s, p, o, m)\n# define BOOST_PP_FOR_34(s, p, o, m) BOOST_PP_FOR_34_I(s, p, o, m)\n# define BOOST_PP_FOR_35(s, p, o, m) BOOST_PP_FOR_35_I(s, p, o, m)\n# define BOOST_PP_FOR_36(s, p, o, m) BOOST_PP_FOR_36_I(s, p, o, m)\n# define BOOST_PP_FOR_37(s, p, o, m) BOOST_PP_FOR_37_I(s, p, o, m)\n# define BOOST_PP_FOR_38(s, p, o, m) BOOST_PP_FOR_38_I(s, p, o, m)\n# define BOOST_PP_FOR_39(s, p, o, m) BOOST_PP_FOR_39_I(s, p, o, m)\n# define BOOST_PP_FOR_40(s, p, o, m) BOOST_PP_FOR_40_I(s, p, o, m)\n# define BOOST_PP_FOR_41(s, p, o, m) BOOST_PP_FOR_41_I(s, p, o, m)\n# define BOOST_PP_FOR_42(s, p, o, m) BOOST_PP_FOR_42_I(s, p, o, m)\n# define BOOST_PP_FOR_43(s, p, o, m) BOOST_PP_FOR_43_I(s, p, o, m)\n# define BOOST_PP_FOR_44(s, p, o, m) BOOST_PP_FOR_44_I(s, p, o, m)\n# define BOOST_PP_FOR_45(s, p, o, m) BOOST_PP_FOR_45_I(s, p, o, m)\n# define BOOST_PP_FOR_46(s, p, o, m) BOOST_PP_FOR_46_I(s, p, o, m)\n# define BOOST_PP_FOR_47(s, p, o, m) BOOST_PP_FOR_47_I(s, p, o, m)\n# define BOOST_PP_FOR_48(s, p, o, m) BOOST_PP_FOR_48_I(s, p, o, m)\n# define BOOST_PP_FOR_49(s, p, o, m) BOOST_PP_FOR_49_I(s, p, o, m)\n# define BOOST_PP_FOR_50(s, p, o, m) BOOST_PP_FOR_50_I(s, p, o, m)\n# define BOOST_PP_FOR_51(s, p, o, m) BOOST_PP_FOR_51_I(s, p, o, m)\n# define BOOST_PP_FOR_52(s, p, o, m) BOOST_PP_FOR_52_I(s, p, o, m)\n# define BOOST_PP_FOR_53(s, p, o, m) BOOST_PP_FOR_53_I(s, p, o, m)\n# define BOOST_PP_FOR_54(s, p, o, m) BOOST_PP_FOR_54_I(s, p, o, m)\n# define BOOST_PP_FOR_55(s, p, o, m) BOOST_PP_FOR_55_I(s, p, o, m)\n# define BOOST_PP_FOR_56(s, p, o, m) BOOST_PP_FOR_56_I(s, p, o, m)\n# define BOOST_PP_FOR_57(s, p, o, m) BOOST_PP_FOR_57_I(s, p, o, m)\n# define BOOST_PP_FOR_58(s, p, o, m) BOOST_PP_FOR_58_I(s, p, o, m)\n# define BOOST_PP_FOR_59(s, p, o, m) BOOST_PP_FOR_59_I(s, p, o, m)\n# define BOOST_PP_FOR_60(s, p, o, m) BOOST_PP_FOR_60_I(s, p, o, m)\n# define BOOST_PP_FOR_61(s, p, o, m) BOOST_PP_FOR_61_I(s, p, o, m)\n# define BOOST_PP_FOR_62(s, p, o, m) BOOST_PP_FOR_62_I(s, p, o, m)\n# define BOOST_PP_FOR_63(s, p, o, m) BOOST_PP_FOR_63_I(s, p, o, m)\n# define BOOST_PP_FOR_64(s, p, o, m) BOOST_PP_FOR_64_I(s, p, o, m)\n# define BOOST_PP_FOR_65(s, p, o, m) BOOST_PP_FOR_65_I(s, p, o, m)\n# define BOOST_PP_FOR_66(s, p, o, m) BOOST_PP_FOR_66_I(s, p, o, m)\n# define BOOST_PP_FOR_67(s, p, o, m) BOOST_PP_FOR_67_I(s, p, o, m)\n# define BOOST_PP_FOR_68(s, p, o, m) BOOST_PP_FOR_68_I(s, p, o, m)\n# define BOOST_PP_FOR_69(s, p, o, m) BOOST_PP_FOR_69_I(s, p, o, m)\n# define BOOST_PP_FOR_70(s, p, o, m) BOOST_PP_FOR_70_I(s, p, o, m)\n# define BOOST_PP_FOR_71(s, p, o, m) BOOST_PP_FOR_71_I(s, p, o, m)\n# define BOOST_PP_FOR_72(s, p, o, m) BOOST_PP_FOR_72_I(s, p, o, m)\n# define BOOST_PP_FOR_73(s, p, o, m) BOOST_PP_FOR_73_I(s, p, o, m)\n# define BOOST_PP_FOR_74(s, p, o, m) BOOST_PP_FOR_74_I(s, p, o, m)\n# define BOOST_PP_FOR_75(s, p, o, m) BOOST_PP_FOR_75_I(s, p, o, m)\n# define BOOST_PP_FOR_76(s, p, o, m) BOOST_PP_FOR_76_I(s, p, o, m)\n# define BOOST_PP_FOR_77(s, p, o, m) BOOST_PP_FOR_77_I(s, p, o, m)\n# define BOOST_PP_FOR_78(s, p, o, m) BOOST_PP_FOR_78_I(s, p, o, m)\n# define BOOST_PP_FOR_79(s, p, o, m) BOOST_PP_FOR_79_I(s, p, o, m)\n# define BOOST_PP_FOR_80(s, p, o, m) BOOST_PP_FOR_80_I(s, p, o, m)\n# define BOOST_PP_FOR_81(s, p, o, m) BOOST_PP_FOR_81_I(s, p, o, m)\n# define BOOST_PP_FOR_82(s, p, o, m) BOOST_PP_FOR_82_I(s, p, o, m)\n# define BOOST_PP_FOR_83(s, p, o, m) BOOST_PP_FOR_83_I(s, p, o, m)\n# define BOOST_PP_FOR_84(s, p, o, m) BOOST_PP_FOR_84_I(s, p, o, m)\n# define BOOST_PP_FOR_85(s, p, o, m) BOOST_PP_FOR_85_I(s, p, o, m)\n# define BOOST_PP_FOR_86(s, p, o, m) BOOST_PP_FOR_86_I(s, p, o, m)\n# define BOOST_PP_FOR_87(s, p, o, m) BOOST_PP_FOR_87_I(s, p, o, m)\n# define BOOST_PP_FOR_88(s, p, o, m) BOOST_PP_FOR_88_I(s, p, o, m)\n# define BOOST_PP_FOR_89(s, p, o, m) BOOST_PP_FOR_89_I(s, p, o, m)\n# define BOOST_PP_FOR_90(s, p, o, m) BOOST_PP_FOR_90_I(s, p, o, m)\n# define BOOST_PP_FOR_91(s, p, o, m) BOOST_PP_FOR_91_I(s, p, o, m)\n# define BOOST_PP_FOR_92(s, p, o, m) BOOST_PP_FOR_92_I(s, p, o, m)\n# define BOOST_PP_FOR_93(s, p, o, m) BOOST_PP_FOR_93_I(s, p, o, m)\n# define BOOST_PP_FOR_94(s, p, o, m) BOOST_PP_FOR_94_I(s, p, o, m)\n# define BOOST_PP_FOR_95(s, p, o, m) BOOST_PP_FOR_95_I(s, p, o, m)\n# define BOOST_PP_FOR_96(s, p, o, m) BOOST_PP_FOR_96_I(s, p, o, m)\n# define BOOST_PP_FOR_97(s, p, o, m) BOOST_PP_FOR_97_I(s, p, o, m)\n# define BOOST_PP_FOR_98(s, p, o, m) BOOST_PP_FOR_98_I(s, p, o, m)\n# define BOOST_PP_FOR_99(s, p, o, m) BOOST_PP_FOR_99_I(s, p, o, m)\n# define BOOST_PP_FOR_100(s, p, o, m) BOOST_PP_FOR_100_I(s, p, o, m)\n# define BOOST_PP_FOR_101(s, p, o, m) BOOST_PP_FOR_101_I(s, p, o, m)\n# define BOOST_PP_FOR_102(s, p, o, m) BOOST_PP_FOR_102_I(s, p, o, m)\n# define BOOST_PP_FOR_103(s, p, o, m) BOOST_PP_FOR_103_I(s, p, o, m)\n# define BOOST_PP_FOR_104(s, p, o, m) BOOST_PP_FOR_104_I(s, p, o, m)\n# define BOOST_PP_FOR_105(s, p, o, m) BOOST_PP_FOR_105_I(s, p, o, m)\n# define BOOST_PP_FOR_106(s, p, o, m) BOOST_PP_FOR_106_I(s, p, o, m)\n# define BOOST_PP_FOR_107(s, p, o, m) BOOST_PP_FOR_107_I(s, p, o, m)\n# define BOOST_PP_FOR_108(s, p, o, m) BOOST_PP_FOR_108_I(s, p, o, m)\n# define BOOST_PP_FOR_109(s, p, o, m) BOOST_PP_FOR_109_I(s, p, o, m)\n# define BOOST_PP_FOR_110(s, p, o, m) BOOST_PP_FOR_110_I(s, p, o, m)\n# define BOOST_PP_FOR_111(s, p, o, m) BOOST_PP_FOR_111_I(s, p, o, m)\n# define BOOST_PP_FOR_112(s, p, o, m) BOOST_PP_FOR_112_I(s, p, o, m)\n# define BOOST_PP_FOR_113(s, p, o, m) BOOST_PP_FOR_113_I(s, p, o, m)\n# define BOOST_PP_FOR_114(s, p, o, m) BOOST_PP_FOR_114_I(s, p, o, m)\n# define BOOST_PP_FOR_115(s, p, o, m) BOOST_PP_FOR_115_I(s, p, o, m)\n# define BOOST_PP_FOR_116(s, p, o, m) BOOST_PP_FOR_116_I(s, p, o, m)\n# define BOOST_PP_FOR_117(s, p, o, m) BOOST_PP_FOR_117_I(s, p, o, m)\n# define BOOST_PP_FOR_118(s, p, o, m) BOOST_PP_FOR_118_I(s, p, o, m)\n# define BOOST_PP_FOR_119(s, p, o, m) BOOST_PP_FOR_119_I(s, p, o, m)\n# define BOOST_PP_FOR_120(s, p, o, m) BOOST_PP_FOR_120_I(s, p, o, m)\n# define BOOST_PP_FOR_121(s, p, o, m) BOOST_PP_FOR_121_I(s, p, o, m)\n# define BOOST_PP_FOR_122(s, p, o, m) BOOST_PP_FOR_122_I(s, p, o, m)\n# define BOOST_PP_FOR_123(s, p, o, m) BOOST_PP_FOR_123_I(s, p, o, m)\n# define BOOST_PP_FOR_124(s, p, o, m) BOOST_PP_FOR_124_I(s, p, o, m)\n# define BOOST_PP_FOR_125(s, p, o, m) BOOST_PP_FOR_125_I(s, p, o, m)\n# define BOOST_PP_FOR_126(s, p, o, m) BOOST_PP_FOR_126_I(s, p, o, m)\n# define BOOST_PP_FOR_127(s, p, o, m) BOOST_PP_FOR_127_I(s, p, o, m)\n# define BOOST_PP_FOR_128(s, p, o, m) BOOST_PP_FOR_128_I(s, p, o, m)\n# define BOOST_PP_FOR_129(s, p, o, m) BOOST_PP_FOR_129_I(s, p, o, m)\n# define BOOST_PP_FOR_130(s, p, o, m) BOOST_PP_FOR_130_I(s, p, o, m)\n# define BOOST_PP_FOR_131(s, p, o, m) BOOST_PP_FOR_131_I(s, p, o, m)\n# define BOOST_PP_FOR_132(s, p, o, m) BOOST_PP_FOR_132_I(s, p, o, m)\n# define BOOST_PP_FOR_133(s, p, o, m) BOOST_PP_FOR_133_I(s, p, o, m)\n# define BOOST_PP_FOR_134(s, p, o, m) BOOST_PP_FOR_134_I(s, p, o, m)\n# define BOOST_PP_FOR_135(s, p, o, m) BOOST_PP_FOR_135_I(s, p, o, m)\n# define BOOST_PP_FOR_136(s, p, o, m) BOOST_PP_FOR_136_I(s, p, o, m)\n# define BOOST_PP_FOR_137(s, p, o, m) BOOST_PP_FOR_137_I(s, p, o, m)\n# define BOOST_PP_FOR_138(s, p, o, m) BOOST_PP_FOR_138_I(s, p, o, m)\n# define BOOST_PP_FOR_139(s, p, o, m) BOOST_PP_FOR_139_I(s, p, o, m)\n# define BOOST_PP_FOR_140(s, p, o, m) BOOST_PP_FOR_140_I(s, p, o, m)\n# define BOOST_PP_FOR_141(s, p, o, m) BOOST_PP_FOR_141_I(s, p, o, m)\n# define BOOST_PP_FOR_142(s, p, o, m) BOOST_PP_FOR_142_I(s, p, o, m)\n# define BOOST_PP_FOR_143(s, p, o, m) BOOST_PP_FOR_143_I(s, p, o, m)\n# define BOOST_PP_FOR_144(s, p, o, m) BOOST_PP_FOR_144_I(s, p, o, m)\n# define BOOST_PP_FOR_145(s, p, o, m) BOOST_PP_FOR_145_I(s, p, o, m)\n# define BOOST_PP_FOR_146(s, p, o, m) BOOST_PP_FOR_146_I(s, p, o, m)\n# define BOOST_PP_FOR_147(s, p, o, m) BOOST_PP_FOR_147_I(s, p, o, m)\n# define BOOST_PP_FOR_148(s, p, o, m) BOOST_PP_FOR_148_I(s, p, o, m)\n# define BOOST_PP_FOR_149(s, p, o, m) BOOST_PP_FOR_149_I(s, p, o, m)\n# define BOOST_PP_FOR_150(s, p, o, m) BOOST_PP_FOR_150_I(s, p, o, m)\n# define BOOST_PP_FOR_151(s, p, o, m) BOOST_PP_FOR_151_I(s, p, o, m)\n# define BOOST_PP_FOR_152(s, p, o, m) BOOST_PP_FOR_152_I(s, p, o, m)\n# define BOOST_PP_FOR_153(s, p, o, m) BOOST_PP_FOR_153_I(s, p, o, m)\n# define BOOST_PP_FOR_154(s, p, o, m) BOOST_PP_FOR_154_I(s, p, o, m)\n# define BOOST_PP_FOR_155(s, p, o, m) BOOST_PP_FOR_155_I(s, p, o, m)\n# define BOOST_PP_FOR_156(s, p, o, m) BOOST_PP_FOR_156_I(s, p, o, m)\n# define BOOST_PP_FOR_157(s, p, o, m) BOOST_PP_FOR_157_I(s, p, o, m)\n# define BOOST_PP_FOR_158(s, p, o, m) BOOST_PP_FOR_158_I(s, p, o, m)\n# define BOOST_PP_FOR_159(s, p, o, m) BOOST_PP_FOR_159_I(s, p, o, m)\n# define BOOST_PP_FOR_160(s, p, o, m) BOOST_PP_FOR_160_I(s, p, o, m)\n# define BOOST_PP_FOR_161(s, p, o, m) BOOST_PP_FOR_161_I(s, p, o, m)\n# define BOOST_PP_FOR_162(s, p, o, m) BOOST_PP_FOR_162_I(s, p, o, m)\n# define BOOST_PP_FOR_163(s, p, o, m) BOOST_PP_FOR_163_I(s, p, o, m)\n# define BOOST_PP_FOR_164(s, p, o, m) BOOST_PP_FOR_164_I(s, p, o, m)\n# define BOOST_PP_FOR_165(s, p, o, m) BOOST_PP_FOR_165_I(s, p, o, m)\n# define BOOST_PP_FOR_166(s, p, o, m) BOOST_PP_FOR_166_I(s, p, o, m)\n# define BOOST_PP_FOR_167(s, p, o, m) BOOST_PP_FOR_167_I(s, p, o, m)\n# define BOOST_PP_FOR_168(s, p, o, m) BOOST_PP_FOR_168_I(s, p, o, m)\n# define BOOST_PP_FOR_169(s, p, o, m) BOOST_PP_FOR_169_I(s, p, o, m)\n# define BOOST_PP_FOR_170(s, p, o, m) BOOST_PP_FOR_170_I(s, p, o, m)\n# define BOOST_PP_FOR_171(s, p, o, m) BOOST_PP_FOR_171_I(s, p, o, m)\n# define BOOST_PP_FOR_172(s, p, o, m) BOOST_PP_FOR_172_I(s, p, o, m)\n# define BOOST_PP_FOR_173(s, p, o, m) BOOST_PP_FOR_173_I(s, p, o, m)\n# define BOOST_PP_FOR_174(s, p, o, m) BOOST_PP_FOR_174_I(s, p, o, m)\n# define BOOST_PP_FOR_175(s, p, o, m) BOOST_PP_FOR_175_I(s, p, o, m)\n# define BOOST_PP_FOR_176(s, p, o, m) BOOST_PP_FOR_176_I(s, p, o, m)\n# define BOOST_PP_FOR_177(s, p, o, m) BOOST_PP_FOR_177_I(s, p, o, m)\n# define BOOST_PP_FOR_178(s, p, o, m) BOOST_PP_FOR_178_I(s, p, o, m)\n# define BOOST_PP_FOR_179(s, p, o, m) BOOST_PP_FOR_179_I(s, p, o, m)\n# define BOOST_PP_FOR_180(s, p, o, m) BOOST_PP_FOR_180_I(s, p, o, m)\n# define BOOST_PP_FOR_181(s, p, o, m) BOOST_PP_FOR_181_I(s, p, o, m)\n# define BOOST_PP_FOR_182(s, p, o, m) BOOST_PP_FOR_182_I(s, p, o, m)\n# define BOOST_PP_FOR_183(s, p, o, m) BOOST_PP_FOR_183_I(s, p, o, m)\n# define BOOST_PP_FOR_184(s, p, o, m) BOOST_PP_FOR_184_I(s, p, o, m)\n# define BOOST_PP_FOR_185(s, p, o, m) BOOST_PP_FOR_185_I(s, p, o, m)\n# define BOOST_PP_FOR_186(s, p, o, m) BOOST_PP_FOR_186_I(s, p, o, m)\n# define BOOST_PP_FOR_187(s, p, o, m) BOOST_PP_FOR_187_I(s, p, o, m)\n# define BOOST_PP_FOR_188(s, p, o, m) BOOST_PP_FOR_188_I(s, p, o, m)\n# define BOOST_PP_FOR_189(s, p, o, m) BOOST_PP_FOR_189_I(s, p, o, m)\n# define BOOST_PP_FOR_190(s, p, o, m) BOOST_PP_FOR_190_I(s, p, o, m)\n# define BOOST_PP_FOR_191(s, p, o, m) BOOST_PP_FOR_191_I(s, p, o, m)\n# define BOOST_PP_FOR_192(s, p, o, m) BOOST_PP_FOR_192_I(s, p, o, m)\n# define BOOST_PP_FOR_193(s, p, o, m) BOOST_PP_FOR_193_I(s, p, o, m)\n# define BOOST_PP_FOR_194(s, p, o, m) BOOST_PP_FOR_194_I(s, p, o, m)\n# define BOOST_PP_FOR_195(s, p, o, m) BOOST_PP_FOR_195_I(s, p, o, m)\n# define BOOST_PP_FOR_196(s, p, o, m) BOOST_PP_FOR_196_I(s, p, o, m)\n# define BOOST_PP_FOR_197(s, p, o, m) BOOST_PP_FOR_197_I(s, p, o, m)\n# define BOOST_PP_FOR_198(s, p, o, m) BOOST_PP_FOR_198_I(s, p, o, m)\n# define BOOST_PP_FOR_199(s, p, o, m) BOOST_PP_FOR_199_I(s, p, o, m)\n# define BOOST_PP_FOR_200(s, p, o, m) BOOST_PP_FOR_200_I(s, p, o, m)\n# define BOOST_PP_FOR_201(s, p, o, m) BOOST_PP_FOR_201_I(s, p, o, m)\n# define BOOST_PP_FOR_202(s, p, o, m) BOOST_PP_FOR_202_I(s, p, o, m)\n# define BOOST_PP_FOR_203(s, p, o, m) BOOST_PP_FOR_203_I(s, p, o, m)\n# define BOOST_PP_FOR_204(s, p, o, m) BOOST_PP_FOR_204_I(s, p, o, m)\n# define BOOST_PP_FOR_205(s, p, o, m) BOOST_PP_FOR_205_I(s, p, o, m)\n# define BOOST_PP_FOR_206(s, p, o, m) BOOST_PP_FOR_206_I(s, p, o, m)\n# define BOOST_PP_FOR_207(s, p, o, m) BOOST_PP_FOR_207_I(s, p, o, m)\n# define BOOST_PP_FOR_208(s, p, o, m) BOOST_PP_FOR_208_I(s, p, o, m)\n# define BOOST_PP_FOR_209(s, p, o, m) BOOST_PP_FOR_209_I(s, p, o, m)\n# define BOOST_PP_FOR_210(s, p, o, m) BOOST_PP_FOR_210_I(s, p, o, m)\n# define BOOST_PP_FOR_211(s, p, o, m) BOOST_PP_FOR_211_I(s, p, o, m)\n# define BOOST_PP_FOR_212(s, p, o, m) BOOST_PP_FOR_212_I(s, p, o, m)\n# define BOOST_PP_FOR_213(s, p, o, m) BOOST_PP_FOR_213_I(s, p, o, m)\n# define BOOST_PP_FOR_214(s, p, o, m) BOOST_PP_FOR_214_I(s, p, o, m)\n# define BOOST_PP_FOR_215(s, p, o, m) BOOST_PP_FOR_215_I(s, p, o, m)\n# define BOOST_PP_FOR_216(s, p, o, m) BOOST_PP_FOR_216_I(s, p, o, m)\n# define BOOST_PP_FOR_217(s, p, o, m) BOOST_PP_FOR_217_I(s, p, o, m)\n# define BOOST_PP_FOR_218(s, p, o, m) BOOST_PP_FOR_218_I(s, p, o, m)\n# define BOOST_PP_FOR_219(s, p, o, m) BOOST_PP_FOR_219_I(s, p, o, m)\n# define BOOST_PP_FOR_220(s, p, o, m) BOOST_PP_FOR_220_I(s, p, o, m)\n# define BOOST_PP_FOR_221(s, p, o, m) BOOST_PP_FOR_221_I(s, p, o, m)\n# define BOOST_PP_FOR_222(s, p, o, m) BOOST_PP_FOR_222_I(s, p, o, m)\n# define BOOST_PP_FOR_223(s, p, o, m) BOOST_PP_FOR_223_I(s, p, o, m)\n# define BOOST_PP_FOR_224(s, p, o, m) BOOST_PP_FOR_224_I(s, p, o, m)\n# define BOOST_PP_FOR_225(s, p, o, m) BOOST_PP_FOR_225_I(s, p, o, m)\n# define BOOST_PP_FOR_226(s, p, o, m) BOOST_PP_FOR_226_I(s, p, o, m)\n# define BOOST_PP_FOR_227(s, p, o, m) BOOST_PP_FOR_227_I(s, p, o, m)\n# define BOOST_PP_FOR_228(s, p, o, m) BOOST_PP_FOR_228_I(s, p, o, m)\n# define BOOST_PP_FOR_229(s, p, o, m) BOOST_PP_FOR_229_I(s, p, o, m)\n# define BOOST_PP_FOR_230(s, p, o, m) BOOST_PP_FOR_230_I(s, p, o, m)\n# define BOOST_PP_FOR_231(s, p, o, m) BOOST_PP_FOR_231_I(s, p, o, m)\n# define BOOST_PP_FOR_232(s, p, o, m) BOOST_PP_FOR_232_I(s, p, o, m)\n# define BOOST_PP_FOR_233(s, p, o, m) BOOST_PP_FOR_233_I(s, p, o, m)\n# define BOOST_PP_FOR_234(s, p, o, m) BOOST_PP_FOR_234_I(s, p, o, m)\n# define BOOST_PP_FOR_235(s, p, o, m) BOOST_PP_FOR_235_I(s, p, o, m)\n# define BOOST_PP_FOR_236(s, p, o, m) BOOST_PP_FOR_236_I(s, p, o, m)\n# define BOOST_PP_FOR_237(s, p, o, m) BOOST_PP_FOR_237_I(s, p, o, m)\n# define BOOST_PP_FOR_238(s, p, o, m) BOOST_PP_FOR_238_I(s, p, o, m)\n# define BOOST_PP_FOR_239(s, p, o, m) BOOST_PP_FOR_239_I(s, p, o, m)\n# define BOOST_PP_FOR_240(s, p, o, m) BOOST_PP_FOR_240_I(s, p, o, m)\n# define BOOST_PP_FOR_241(s, p, o, m) BOOST_PP_FOR_241_I(s, p, o, m)\n# define BOOST_PP_FOR_242(s, p, o, m) BOOST_PP_FOR_242_I(s, p, o, m)\n# define BOOST_PP_FOR_243(s, p, o, m) BOOST_PP_FOR_243_I(s, p, o, m)\n# define BOOST_PP_FOR_244(s, p, o, m) BOOST_PP_FOR_244_I(s, p, o, m)\n# define BOOST_PP_FOR_245(s, p, o, m) BOOST_PP_FOR_245_I(s, p, o, m)\n# define BOOST_PP_FOR_246(s, p, o, m) BOOST_PP_FOR_246_I(s, p, o, m)\n# define BOOST_PP_FOR_247(s, p, o, m) BOOST_PP_FOR_247_I(s, p, o, m)\n# define BOOST_PP_FOR_248(s, p, o, m) BOOST_PP_FOR_248_I(s, p, o, m)\n# define BOOST_PP_FOR_249(s, p, o, m) BOOST_PP_FOR_249_I(s, p, o, m)\n# define BOOST_PP_FOR_250(s, p, o, m) BOOST_PP_FOR_250_I(s, p, o, m)\n# define BOOST_PP_FOR_251(s, p, o, m) BOOST_PP_FOR_251_I(s, p, o, m)\n# define BOOST_PP_FOR_252(s, p, o, m) BOOST_PP_FOR_252_I(s, p, o, m)\n# define BOOST_PP_FOR_253(s, p, o, m) BOOST_PP_FOR_253_I(s, p, o, m)\n# define BOOST_PP_FOR_254(s, p, o, m) BOOST_PP_FOR_254_I(s, p, o, m)\n# define BOOST_PP_FOR_255(s, p, o, m) BOOST_PP_FOR_255_I(s, p, o, m)\n# define BOOST_PP_FOR_256(s, p, o, m) BOOST_PP_FOR_256_I(s, p, o, m)\n#\n# define BOOST_PP_FOR_1_I(s, p, o, m) BOOST_PP_IF(p(2, s), m, BOOST_PP_TUPLE_EAT_2)(2, s) BOOST_PP_IF(p(2, s), BOOST_PP_FOR_2, BOOST_PP_TUPLE_EAT_4)(o(2, s), p, o, m)\n# define BOOST_PP_FOR_2_I(s, p, o, m) BOOST_PP_IF(p(3, s), m, BOOST_PP_TUPLE_EAT_2)(3, s) BOOST_PP_IF(p(3, s), BOOST_PP_FOR_3, BOOST_PP_TUPLE_EAT_4)(o(3, s), p, o, m)\n# define BOOST_PP_FOR_3_I(s, p, o, m) BOOST_PP_IF(p(4, s), m, BOOST_PP_TUPLE_EAT_2)(4, s) BOOST_PP_IF(p(4, s), BOOST_PP_FOR_4, BOOST_PP_TUPLE_EAT_4)(o(4, s), p, o, m)\n# define BOOST_PP_FOR_4_I(s, p, o, m) BOOST_PP_IF(p(5, s), m, BOOST_PP_TUPLE_EAT_2)(5, s) BOOST_PP_IF(p(5, s), BOOST_PP_FOR_5, BOOST_PP_TUPLE_EAT_4)(o(5, s), p, o, m)\n# define BOOST_PP_FOR_5_I(s, p, o, m) BOOST_PP_IF(p(6, s), m, BOOST_PP_TUPLE_EAT_2)(6, s) BOOST_PP_IF(p(6, s), BOOST_PP_FOR_6, BOOST_PP_TUPLE_EAT_4)(o(6, s), p, o, m)\n# define BOOST_PP_FOR_6_I(s, p, o, m) BOOST_PP_IF(p(7, s), m, BOOST_PP_TUPLE_EAT_2)(7, s) BOOST_PP_IF(p(7, s), BOOST_PP_FOR_7, BOOST_PP_TUPLE_EAT_4)(o(7, s), p, o, m)\n# define BOOST_PP_FOR_7_I(s, p, o, m) BOOST_PP_IF(p(8, s), m, BOOST_PP_TUPLE_EAT_2)(8, s) BOOST_PP_IF(p(8, s), BOOST_PP_FOR_8, BOOST_PP_TUPLE_EAT_4)(o(8, s), p, o, m)\n# define BOOST_PP_FOR_8_I(s, p, o, m) BOOST_PP_IF(p(9, s), m, BOOST_PP_TUPLE_EAT_2)(9, s) BOOST_PP_IF(p(9, s), BOOST_PP_FOR_9, BOOST_PP_TUPLE_EAT_4)(o(9, s), p, o, m)\n# define BOOST_PP_FOR_9_I(s, p, o, m) BOOST_PP_IF(p(10, s), m, BOOST_PP_TUPLE_EAT_2)(10, s) BOOST_PP_IF(p(10, s), BOOST_PP_FOR_10, BOOST_PP_TUPLE_EAT_4)(o(10, s), p, o, m)\n# define BOOST_PP_FOR_10_I(s, p, o, m) BOOST_PP_IF(p(11, s), m, BOOST_PP_TUPLE_EAT_2)(11, s) BOOST_PP_IF(p(11, s), BOOST_PP_FOR_11, BOOST_PP_TUPLE_EAT_4)(o(11, s), p, o, m)\n# define BOOST_PP_FOR_11_I(s, p, o, m) BOOST_PP_IF(p(12, s), m, BOOST_PP_TUPLE_EAT_2)(12, s) BOOST_PP_IF(p(12, s), BOOST_PP_FOR_12, BOOST_PP_TUPLE_EAT_4)(o(12, s), p, o, m)\n# define BOOST_PP_FOR_12_I(s, p, o, m) BOOST_PP_IF(p(13, s), m, BOOST_PP_TUPLE_EAT_2)(13, s) BOOST_PP_IF(p(13, s), BOOST_PP_FOR_13, BOOST_PP_TUPLE_EAT_4)(o(13, s), p, o, m)\n# define BOOST_PP_FOR_13_I(s, p, o, m) BOOST_PP_IF(p(14, s), m, BOOST_PP_TUPLE_EAT_2)(14, s) BOOST_PP_IF(p(14, s), BOOST_PP_FOR_14, BOOST_PP_TUPLE_EAT_4)(o(14, s), p, o, m)\n# define BOOST_PP_FOR_14_I(s, p, o, m) BOOST_PP_IF(p(15, s), m, BOOST_PP_TUPLE_EAT_2)(15, s) BOOST_PP_IF(p(15, s), BOOST_PP_FOR_15, BOOST_PP_TUPLE_EAT_4)(o(15, s), p, o, m)\n# define BOOST_PP_FOR_15_I(s, p, o, m) BOOST_PP_IF(p(16, s), m, BOOST_PP_TUPLE_EAT_2)(16, s) BOOST_PP_IF(p(16, s), BOOST_PP_FOR_16, BOOST_PP_TUPLE_EAT_4)(o(16, s), p, o, m)\n# define BOOST_PP_FOR_16_I(s, p, o, m) BOOST_PP_IF(p(17, s), m, BOOST_PP_TUPLE_EAT_2)(17, s) BOOST_PP_IF(p(17, s), BOOST_PP_FOR_17, BOOST_PP_TUPLE_EAT_4)(o(17, s), p, o, m)\n# define BOOST_PP_FOR_17_I(s, p, o, m) BOOST_PP_IF(p(18, s), m, BOOST_PP_TUPLE_EAT_2)(18, s) BOOST_PP_IF(p(18, s), BOOST_PP_FOR_18, BOOST_PP_TUPLE_EAT_4)(o(18, s), p, o, m)\n# define BOOST_PP_FOR_18_I(s, p, o, m) BOOST_PP_IF(p(19, s), m, BOOST_PP_TUPLE_EAT_2)(19, s) BOOST_PP_IF(p(19, s), BOOST_PP_FOR_19, BOOST_PP_TUPLE_EAT_4)(o(19, s), p, o, m)\n# define BOOST_PP_FOR_19_I(s, p, o, m) BOOST_PP_IF(p(20, s), m, BOOST_PP_TUPLE_EAT_2)(20, s) BOOST_PP_IF(p(20, s), BOOST_PP_FOR_20, BOOST_PP_TUPLE_EAT_4)(o(20, s), p, o, m)\n# define BOOST_PP_FOR_20_I(s, p, o, m) BOOST_PP_IF(p(21, s), m, BOOST_PP_TUPLE_EAT_2)(21, s) BOOST_PP_IF(p(21, s), BOOST_PP_FOR_21, BOOST_PP_TUPLE_EAT_4)(o(21, s), p, o, m)\n# define BOOST_PP_FOR_21_I(s, p, o, m) BOOST_PP_IF(p(22, s), m, BOOST_PP_TUPLE_EAT_2)(22, s) BOOST_PP_IF(p(22, s), BOOST_PP_FOR_22, BOOST_PP_TUPLE_EAT_4)(o(22, s), p, o, m)\n# define BOOST_PP_FOR_22_I(s, p, o, m) BOOST_PP_IF(p(23, s), m, BOOST_PP_TUPLE_EAT_2)(23, s) BOOST_PP_IF(p(23, s), BOOST_PP_FOR_23, BOOST_PP_TUPLE_EAT_4)(o(23, s), p, o, m)\n# define BOOST_PP_FOR_23_I(s, p, o, m) BOOST_PP_IF(p(24, s), m, BOOST_PP_TUPLE_EAT_2)(24, s) BOOST_PP_IF(p(24, s), BOOST_PP_FOR_24, BOOST_PP_TUPLE_EAT_4)(o(24, s), p, o, m)\n# define BOOST_PP_FOR_24_I(s, p, o, m) BOOST_PP_IF(p(25, s), m, BOOST_PP_TUPLE_EAT_2)(25, s) BOOST_PP_IF(p(25, s), BOOST_PP_FOR_25, BOOST_PP_TUPLE_EAT_4)(o(25, s), p, o, m)\n# define BOOST_PP_FOR_25_I(s, p, o, m) BOOST_PP_IF(p(26, s), m, BOOST_PP_TUPLE_EAT_2)(26, s) BOOST_PP_IF(p(26, s), BOOST_PP_FOR_26, BOOST_PP_TUPLE_EAT_4)(o(26, s), p, o, m)\n# define BOOST_PP_FOR_26_I(s, p, o, m) BOOST_PP_IF(p(27, s), m, BOOST_PP_TUPLE_EAT_2)(27, s) BOOST_PP_IF(p(27, s), BOOST_PP_FOR_27, BOOST_PP_TUPLE_EAT_4)(o(27, s), p, o, m)\n# define BOOST_PP_FOR_27_I(s, p, o, m) BOOST_PP_IF(p(28, s), m, BOOST_PP_TUPLE_EAT_2)(28, s) BOOST_PP_IF(p(28, s), BOOST_PP_FOR_28, BOOST_PP_TUPLE_EAT_4)(o(28, s), p, o, m)\n# define BOOST_PP_FOR_28_I(s, p, o, m) BOOST_PP_IF(p(29, s), m, BOOST_PP_TUPLE_EAT_2)(29, s) BOOST_PP_IF(p(29, s), BOOST_PP_FOR_29, BOOST_PP_TUPLE_EAT_4)(o(29, s), p, o, m)\n# define BOOST_PP_FOR_29_I(s, p, o, m) BOOST_PP_IF(p(30, s), m, BOOST_PP_TUPLE_EAT_2)(30, s) BOOST_PP_IF(p(30, s), BOOST_PP_FOR_30, BOOST_PP_TUPLE_EAT_4)(o(30, s), p, o, m)\n# define BOOST_PP_FOR_30_I(s, p, o, m) BOOST_PP_IF(p(31, s), m, BOOST_PP_TUPLE_EAT_2)(31, s) BOOST_PP_IF(p(31, s), BOOST_PP_FOR_31, BOOST_PP_TUPLE_EAT_4)(o(31, s), p, o, m)\n# define BOOST_PP_FOR_31_I(s, p, o, m) BOOST_PP_IF(p(32, s), m, BOOST_PP_TUPLE_EAT_2)(32, s) BOOST_PP_IF(p(32, s), BOOST_PP_FOR_32, BOOST_PP_TUPLE_EAT_4)(o(32, s), p, o, m)\n# define BOOST_PP_FOR_32_I(s, p, o, m) BOOST_PP_IF(p(33, s), m, BOOST_PP_TUPLE_EAT_2)(33, s) BOOST_PP_IF(p(33, s), BOOST_PP_FOR_33, BOOST_PP_TUPLE_EAT_4)(o(33, s), p, o, m)\n# define BOOST_PP_FOR_33_I(s, p, o, m) BOOST_PP_IF(p(34, s), m, BOOST_PP_TUPLE_EAT_2)(34, s) BOOST_PP_IF(p(34, s), BOOST_PP_FOR_34, BOOST_PP_TUPLE_EAT_4)(o(34, s), p, o, m)\n# define BOOST_PP_FOR_34_I(s, p, o, m) BOOST_PP_IF(p(35, s), m, BOOST_PP_TUPLE_EAT_2)(35, s) BOOST_PP_IF(p(35, s), BOOST_PP_FOR_35, BOOST_PP_TUPLE_EAT_4)(o(35, s), p, o, m)\n# define BOOST_PP_FOR_35_I(s, p, o, m) BOOST_PP_IF(p(36, s), m, BOOST_PP_TUPLE_EAT_2)(36, s) BOOST_PP_IF(p(36, s), BOOST_PP_FOR_36, BOOST_PP_TUPLE_EAT_4)(o(36, s), p, o, m)\n# define BOOST_PP_FOR_36_I(s, p, o, m) BOOST_PP_IF(p(37, s), m, BOOST_PP_TUPLE_EAT_2)(37, s) BOOST_PP_IF(p(37, s), BOOST_PP_FOR_37, BOOST_PP_TUPLE_EAT_4)(o(37, s), p, o, m)\n# define BOOST_PP_FOR_37_I(s, p, o, m) BOOST_PP_IF(p(38, s), m, BOOST_PP_TUPLE_EAT_2)(38, s) BOOST_PP_IF(p(38, s), BOOST_PP_FOR_38, BOOST_PP_TUPLE_EAT_4)(o(38, s), p, o, m)\n# define BOOST_PP_FOR_38_I(s, p, o, m) BOOST_PP_IF(p(39, s), m, BOOST_PP_TUPLE_EAT_2)(39, s) BOOST_PP_IF(p(39, s), BOOST_PP_FOR_39, BOOST_PP_TUPLE_EAT_4)(o(39, s), p, o, m)\n# define BOOST_PP_FOR_39_I(s, p, o, m) BOOST_PP_IF(p(40, s), m, BOOST_PP_TUPLE_EAT_2)(40, s) BOOST_PP_IF(p(40, s), BOOST_PP_FOR_40, BOOST_PP_TUPLE_EAT_4)(o(40, s), p, o, m)\n# define BOOST_PP_FOR_40_I(s, p, o, m) BOOST_PP_IF(p(41, s), m, BOOST_PP_TUPLE_EAT_2)(41, s) BOOST_PP_IF(p(41, s), BOOST_PP_FOR_41, BOOST_PP_TUPLE_EAT_4)(o(41, s), p, o, m)\n# define BOOST_PP_FOR_41_I(s, p, o, m) BOOST_PP_IF(p(42, s), m, BOOST_PP_TUPLE_EAT_2)(42, s) BOOST_PP_IF(p(42, s), BOOST_PP_FOR_42, BOOST_PP_TUPLE_EAT_4)(o(42, s), p, o, m)\n# define BOOST_PP_FOR_42_I(s, p, o, m) BOOST_PP_IF(p(43, s), m, BOOST_PP_TUPLE_EAT_2)(43, s) BOOST_PP_IF(p(43, s), BOOST_PP_FOR_43, BOOST_PP_TUPLE_EAT_4)(o(43, s), p, o, m)\n# define BOOST_PP_FOR_43_I(s, p, o, m) BOOST_PP_IF(p(44, s), m, BOOST_PP_TUPLE_EAT_2)(44, s) BOOST_PP_IF(p(44, s), BOOST_PP_FOR_44, BOOST_PP_TUPLE_EAT_4)(o(44, s), p, o, m)\n# define BOOST_PP_FOR_44_I(s, p, o, m) BOOST_PP_IF(p(45, s), m, BOOST_PP_TUPLE_EAT_2)(45, s) BOOST_PP_IF(p(45, s), BOOST_PP_FOR_45, BOOST_PP_TUPLE_EAT_4)(o(45, s), p, o, m)\n# define BOOST_PP_FOR_45_I(s, p, o, m) BOOST_PP_IF(p(46, s), m, BOOST_PP_TUPLE_EAT_2)(46, s) BOOST_PP_IF(p(46, s), BOOST_PP_FOR_46, BOOST_PP_TUPLE_EAT_4)(o(46, s), p, o, m)\n# define BOOST_PP_FOR_46_I(s, p, o, m) BOOST_PP_IF(p(47, s), m, BOOST_PP_TUPLE_EAT_2)(47, s) BOOST_PP_IF(p(47, s), BOOST_PP_FOR_47, BOOST_PP_TUPLE_EAT_4)(o(47, s), p, o, m)\n# define BOOST_PP_FOR_47_I(s, p, o, m) BOOST_PP_IF(p(48, s), m, BOOST_PP_TUPLE_EAT_2)(48, s) BOOST_PP_IF(p(48, s), BOOST_PP_FOR_48, BOOST_PP_TUPLE_EAT_4)(o(48, s), p, o, m)\n# define BOOST_PP_FOR_48_I(s, p, o, m) BOOST_PP_IF(p(49, s), m, BOOST_PP_TUPLE_EAT_2)(49, s) BOOST_PP_IF(p(49, s), BOOST_PP_FOR_49, BOOST_PP_TUPLE_EAT_4)(o(49, s), p, o, m)\n# define BOOST_PP_FOR_49_I(s, p, o, m) BOOST_PP_IF(p(50, s), m, BOOST_PP_TUPLE_EAT_2)(50, s) BOOST_PP_IF(p(50, s), BOOST_PP_FOR_50, BOOST_PP_TUPLE_EAT_4)(o(50, s), p, o, m)\n# define BOOST_PP_FOR_50_I(s, p, o, m) BOOST_PP_IF(p(51, s), m, BOOST_PP_TUPLE_EAT_2)(51, s) BOOST_PP_IF(p(51, s), BOOST_PP_FOR_51, BOOST_PP_TUPLE_EAT_4)(o(51, s), p, o, m)\n# define BOOST_PP_FOR_51_I(s, p, o, m) BOOST_PP_IF(p(52, s), m, BOOST_PP_TUPLE_EAT_2)(52, s) BOOST_PP_IF(p(52, s), BOOST_PP_FOR_52, BOOST_PP_TUPLE_EAT_4)(o(52, s), p, o, m)\n# define BOOST_PP_FOR_52_I(s, p, o, m) BOOST_PP_IF(p(53, s), m, BOOST_PP_TUPLE_EAT_2)(53, s) BOOST_PP_IF(p(53, s), BOOST_PP_FOR_53, BOOST_PP_TUPLE_EAT_4)(o(53, s), p, o, m)\n# define BOOST_PP_FOR_53_I(s, p, o, m) BOOST_PP_IF(p(54, s), m, BOOST_PP_TUPLE_EAT_2)(54, s) BOOST_PP_IF(p(54, s), BOOST_PP_FOR_54, BOOST_PP_TUPLE_EAT_4)(o(54, s), p, o, m)\n# define BOOST_PP_FOR_54_I(s, p, o, m) BOOST_PP_IF(p(55, s), m, BOOST_PP_TUPLE_EAT_2)(55, s) BOOST_PP_IF(p(55, s), BOOST_PP_FOR_55, BOOST_PP_TUPLE_EAT_4)(o(55, s), p, o, m)\n# define BOOST_PP_FOR_55_I(s, p, o, m) BOOST_PP_IF(p(56, s), m, BOOST_PP_TUPLE_EAT_2)(56, s) BOOST_PP_IF(p(56, s), BOOST_PP_FOR_56, BOOST_PP_TUPLE_EAT_4)(o(56, s), p, o, m)\n# define BOOST_PP_FOR_56_I(s, p, o, m) BOOST_PP_IF(p(57, s), m, BOOST_PP_TUPLE_EAT_2)(57, s) BOOST_PP_IF(p(57, s), BOOST_PP_FOR_57, BOOST_PP_TUPLE_EAT_4)(o(57, s), p, o, m)\n# define BOOST_PP_FOR_57_I(s, p, o, m) BOOST_PP_IF(p(58, s), m, BOOST_PP_TUPLE_EAT_2)(58, s) BOOST_PP_IF(p(58, s), BOOST_PP_FOR_58, BOOST_PP_TUPLE_EAT_4)(o(58, s), p, o, m)\n# define BOOST_PP_FOR_58_I(s, p, o, m) BOOST_PP_IF(p(59, s), m, BOOST_PP_TUPLE_EAT_2)(59, s) BOOST_PP_IF(p(59, s), BOOST_PP_FOR_59, BOOST_PP_TUPLE_EAT_4)(o(59, s), p, o, m)\n# define BOOST_PP_FOR_59_I(s, p, o, m) BOOST_PP_IF(p(60, s), m, BOOST_PP_TUPLE_EAT_2)(60, s) BOOST_PP_IF(p(60, s), BOOST_PP_FOR_60, BOOST_PP_TUPLE_EAT_4)(o(60, s), p, o, m)\n# define BOOST_PP_FOR_60_I(s, p, o, m) BOOST_PP_IF(p(61, s), m, BOOST_PP_TUPLE_EAT_2)(61, s) BOOST_PP_IF(p(61, s), BOOST_PP_FOR_61, BOOST_PP_TUPLE_EAT_4)(o(61, s), p, o, m)\n# define BOOST_PP_FOR_61_I(s, p, o, m) BOOST_PP_IF(p(62, s), m, BOOST_PP_TUPLE_EAT_2)(62, s) BOOST_PP_IF(p(62, s), BOOST_PP_FOR_62, BOOST_PP_TUPLE_EAT_4)(o(62, s), p, o, m)\n# define BOOST_PP_FOR_62_I(s, p, o, m) BOOST_PP_IF(p(63, s), m, BOOST_PP_TUPLE_EAT_2)(63, s) BOOST_PP_IF(p(63, s), BOOST_PP_FOR_63, BOOST_PP_TUPLE_EAT_4)(o(63, s), p, o, m)\n# define BOOST_PP_FOR_63_I(s, p, o, m) BOOST_PP_IF(p(64, s), m, BOOST_PP_TUPLE_EAT_2)(64, s) BOOST_PP_IF(p(64, s), BOOST_PP_FOR_64, BOOST_PP_TUPLE_EAT_4)(o(64, s), p, o, m)\n# define BOOST_PP_FOR_64_I(s, p, o, m) BOOST_PP_IF(p(65, s), m, BOOST_PP_TUPLE_EAT_2)(65, s) BOOST_PP_IF(p(65, s), BOOST_PP_FOR_65, BOOST_PP_TUPLE_EAT_4)(o(65, s), p, o, m)\n# define BOOST_PP_FOR_65_I(s, p, o, m) BOOST_PP_IF(p(66, s), m, BOOST_PP_TUPLE_EAT_2)(66, s) BOOST_PP_IF(p(66, s), BOOST_PP_FOR_66, BOOST_PP_TUPLE_EAT_4)(o(66, s), p, o, m)\n# define BOOST_PP_FOR_66_I(s, p, o, m) BOOST_PP_IF(p(67, s), m, BOOST_PP_TUPLE_EAT_2)(67, s) BOOST_PP_IF(p(67, s), BOOST_PP_FOR_67, BOOST_PP_TUPLE_EAT_4)(o(67, s), p, o, m)\n# define BOOST_PP_FOR_67_I(s, p, o, m) BOOST_PP_IF(p(68, s), m, BOOST_PP_TUPLE_EAT_2)(68, s) BOOST_PP_IF(p(68, s), BOOST_PP_FOR_68, BOOST_PP_TUPLE_EAT_4)(o(68, s), p, o, m)\n# define BOOST_PP_FOR_68_I(s, p, o, m) BOOST_PP_IF(p(69, s), m, BOOST_PP_TUPLE_EAT_2)(69, s) BOOST_PP_IF(p(69, s), BOOST_PP_FOR_69, BOOST_PP_TUPLE_EAT_4)(o(69, s), p, o, m)\n# define BOOST_PP_FOR_69_I(s, p, o, m) BOOST_PP_IF(p(70, s), m, BOOST_PP_TUPLE_EAT_2)(70, s) BOOST_PP_IF(p(70, s), BOOST_PP_FOR_70, BOOST_PP_TUPLE_EAT_4)(o(70, s), p, o, m)\n# define BOOST_PP_FOR_70_I(s, p, o, m) BOOST_PP_IF(p(71, s), m, BOOST_PP_TUPLE_EAT_2)(71, s) BOOST_PP_IF(p(71, s), BOOST_PP_FOR_71, BOOST_PP_TUPLE_EAT_4)(o(71, s), p, o, m)\n# define BOOST_PP_FOR_71_I(s, p, o, m) BOOST_PP_IF(p(72, s), m, BOOST_PP_TUPLE_EAT_2)(72, s) BOOST_PP_IF(p(72, s), BOOST_PP_FOR_72, BOOST_PP_TUPLE_EAT_4)(o(72, s), p, o, m)\n# define BOOST_PP_FOR_72_I(s, p, o, m) BOOST_PP_IF(p(73, s), m, BOOST_PP_TUPLE_EAT_2)(73, s) BOOST_PP_IF(p(73, s), BOOST_PP_FOR_73, BOOST_PP_TUPLE_EAT_4)(o(73, s), p, o, m)\n# define BOOST_PP_FOR_73_I(s, p, o, m) BOOST_PP_IF(p(74, s), m, BOOST_PP_TUPLE_EAT_2)(74, s) BOOST_PP_IF(p(74, s), BOOST_PP_FOR_74, BOOST_PP_TUPLE_EAT_4)(o(74, s), p, o, m)\n# define BOOST_PP_FOR_74_I(s, p, o, m) BOOST_PP_IF(p(75, s), m, BOOST_PP_TUPLE_EAT_2)(75, s) BOOST_PP_IF(p(75, s), BOOST_PP_FOR_75, BOOST_PP_TUPLE_EAT_4)(o(75, s), p, o, m)\n# define BOOST_PP_FOR_75_I(s, p, o, m) BOOST_PP_IF(p(76, s), m, BOOST_PP_TUPLE_EAT_2)(76, s) BOOST_PP_IF(p(76, s), BOOST_PP_FOR_76, BOOST_PP_TUPLE_EAT_4)(o(76, s), p, o, m)\n# define BOOST_PP_FOR_76_I(s, p, o, m) BOOST_PP_IF(p(77, s), m, BOOST_PP_TUPLE_EAT_2)(77, s) BOOST_PP_IF(p(77, s), BOOST_PP_FOR_77, BOOST_PP_TUPLE_EAT_4)(o(77, s), p, o, m)\n# define BOOST_PP_FOR_77_I(s, p, o, m) BOOST_PP_IF(p(78, s), m, BOOST_PP_TUPLE_EAT_2)(78, s) BOOST_PP_IF(p(78, s), BOOST_PP_FOR_78, BOOST_PP_TUPLE_EAT_4)(o(78, s), p, o, m)\n# define BOOST_PP_FOR_78_I(s, p, o, m) BOOST_PP_IF(p(79, s), m, BOOST_PP_TUPLE_EAT_2)(79, s) BOOST_PP_IF(p(79, s), BOOST_PP_FOR_79, BOOST_PP_TUPLE_EAT_4)(o(79, s), p, o, m)\n# define BOOST_PP_FOR_79_I(s, p, o, m) BOOST_PP_IF(p(80, s), m, BOOST_PP_TUPLE_EAT_2)(80, s) BOOST_PP_IF(p(80, s), BOOST_PP_FOR_80, BOOST_PP_TUPLE_EAT_4)(o(80, s), p, o, m)\n# define BOOST_PP_FOR_80_I(s, p, o, m) BOOST_PP_IF(p(81, s), m, BOOST_PP_TUPLE_EAT_2)(81, s) BOOST_PP_IF(p(81, s), BOOST_PP_FOR_81, BOOST_PP_TUPLE_EAT_4)(o(81, s), p, o, m)\n# define BOOST_PP_FOR_81_I(s, p, o, m) BOOST_PP_IF(p(82, s), m, BOOST_PP_TUPLE_EAT_2)(82, s) BOOST_PP_IF(p(82, s), BOOST_PP_FOR_82, BOOST_PP_TUPLE_EAT_4)(o(82, s), p, o, m)\n# define BOOST_PP_FOR_82_I(s, p, o, m) BOOST_PP_IF(p(83, s), m, BOOST_PP_TUPLE_EAT_2)(83, s) BOOST_PP_IF(p(83, s), BOOST_PP_FOR_83, BOOST_PP_TUPLE_EAT_4)(o(83, s), p, o, m)\n# define BOOST_PP_FOR_83_I(s, p, o, m) BOOST_PP_IF(p(84, s), m, BOOST_PP_TUPLE_EAT_2)(84, s) BOOST_PP_IF(p(84, s), BOOST_PP_FOR_84, BOOST_PP_TUPLE_EAT_4)(o(84, s), p, o, m)\n# define BOOST_PP_FOR_84_I(s, p, o, m) BOOST_PP_IF(p(85, s), m, BOOST_PP_TUPLE_EAT_2)(85, s) BOOST_PP_IF(p(85, s), BOOST_PP_FOR_85, BOOST_PP_TUPLE_EAT_4)(o(85, s), p, o, m)\n# define BOOST_PP_FOR_85_I(s, p, o, m) BOOST_PP_IF(p(86, s), m, BOOST_PP_TUPLE_EAT_2)(86, s) BOOST_PP_IF(p(86, s), BOOST_PP_FOR_86, BOOST_PP_TUPLE_EAT_4)(o(86, s), p, o, m)\n# define BOOST_PP_FOR_86_I(s, p, o, m) BOOST_PP_IF(p(87, s), m, BOOST_PP_TUPLE_EAT_2)(87, s) BOOST_PP_IF(p(87, s), BOOST_PP_FOR_87, BOOST_PP_TUPLE_EAT_4)(o(87, s), p, o, m)\n# define BOOST_PP_FOR_87_I(s, p, o, m) BOOST_PP_IF(p(88, s), m, BOOST_PP_TUPLE_EAT_2)(88, s) BOOST_PP_IF(p(88, s), BOOST_PP_FOR_88, BOOST_PP_TUPLE_EAT_4)(o(88, s), p, o, m)\n# define BOOST_PP_FOR_88_I(s, p, o, m) BOOST_PP_IF(p(89, s), m, BOOST_PP_TUPLE_EAT_2)(89, s) BOOST_PP_IF(p(89, s), BOOST_PP_FOR_89, BOOST_PP_TUPLE_EAT_4)(o(89, s), p, o, m)\n# define BOOST_PP_FOR_89_I(s, p, o, m) BOOST_PP_IF(p(90, s), m, BOOST_PP_TUPLE_EAT_2)(90, s) BOOST_PP_IF(p(90, s), BOOST_PP_FOR_90, BOOST_PP_TUPLE_EAT_4)(o(90, s), p, o, m)\n# define BOOST_PP_FOR_90_I(s, p, o, m) BOOST_PP_IF(p(91, s), m, BOOST_PP_TUPLE_EAT_2)(91, s) BOOST_PP_IF(p(91, s), BOOST_PP_FOR_91, BOOST_PP_TUPLE_EAT_4)(o(91, s), p, o, m)\n# define BOOST_PP_FOR_91_I(s, p, o, m) BOOST_PP_IF(p(92, s), m, BOOST_PP_TUPLE_EAT_2)(92, s) BOOST_PP_IF(p(92, s), BOOST_PP_FOR_92, BOOST_PP_TUPLE_EAT_4)(o(92, s), p, o, m)\n# define BOOST_PP_FOR_92_I(s, p, o, m) BOOST_PP_IF(p(93, s), m, BOOST_PP_TUPLE_EAT_2)(93, s) BOOST_PP_IF(p(93, s), BOOST_PP_FOR_93, BOOST_PP_TUPLE_EAT_4)(o(93, s), p, o, m)\n# define BOOST_PP_FOR_93_I(s, p, o, m) BOOST_PP_IF(p(94, s), m, BOOST_PP_TUPLE_EAT_2)(94, s) BOOST_PP_IF(p(94, s), BOOST_PP_FOR_94, BOOST_PP_TUPLE_EAT_4)(o(94, s), p, o, m)\n# define BOOST_PP_FOR_94_I(s, p, o, m) BOOST_PP_IF(p(95, s), m, BOOST_PP_TUPLE_EAT_2)(95, s) BOOST_PP_IF(p(95, s), BOOST_PP_FOR_95, BOOST_PP_TUPLE_EAT_4)(o(95, s), p, o, m)\n# define BOOST_PP_FOR_95_I(s, p, o, m) BOOST_PP_IF(p(96, s), m, BOOST_PP_TUPLE_EAT_2)(96, s) BOOST_PP_IF(p(96, s), BOOST_PP_FOR_96, BOOST_PP_TUPLE_EAT_4)(o(96, s), p, o, m)\n# define BOOST_PP_FOR_96_I(s, p, o, m) BOOST_PP_IF(p(97, s), m, BOOST_PP_TUPLE_EAT_2)(97, s) BOOST_PP_IF(p(97, s), BOOST_PP_FOR_97, BOOST_PP_TUPLE_EAT_4)(o(97, s), p, o, m)\n# define BOOST_PP_FOR_97_I(s, p, o, m) BOOST_PP_IF(p(98, s), m, BOOST_PP_TUPLE_EAT_2)(98, s) BOOST_PP_IF(p(98, s), BOOST_PP_FOR_98, BOOST_PP_TUPLE_EAT_4)(o(98, s), p, o, m)\n# define BOOST_PP_FOR_98_I(s, p, o, m) BOOST_PP_IF(p(99, s), m, BOOST_PP_TUPLE_EAT_2)(99, s) BOOST_PP_IF(p(99, s), BOOST_PP_FOR_99, BOOST_PP_TUPLE_EAT_4)(o(99, s), p, o, m)\n# define BOOST_PP_FOR_99_I(s, p, o, m) BOOST_PP_IF(p(100, s), m, BOOST_PP_TUPLE_EAT_2)(100, s) BOOST_PP_IF(p(100, s), BOOST_PP_FOR_100, BOOST_PP_TUPLE_EAT_4)(o(100, s), p, o, m)\n# define BOOST_PP_FOR_100_I(s, p, o, m) BOOST_PP_IF(p(101, s), m, BOOST_PP_TUPLE_EAT_2)(101, s) BOOST_PP_IF(p(101, s), BOOST_PP_FOR_101, BOOST_PP_TUPLE_EAT_4)(o(101, s), p, o, m)\n# define BOOST_PP_FOR_101_I(s, p, o, m) BOOST_PP_IF(p(102, s), m, BOOST_PP_TUPLE_EAT_2)(102, s) BOOST_PP_IF(p(102, s), BOOST_PP_FOR_102, BOOST_PP_TUPLE_EAT_4)(o(102, s), p, o, m)\n# define BOOST_PP_FOR_102_I(s, p, o, m) BOOST_PP_IF(p(103, s), m, BOOST_PP_TUPLE_EAT_2)(103, s) BOOST_PP_IF(p(103, s), BOOST_PP_FOR_103, BOOST_PP_TUPLE_EAT_4)(o(103, s), p, o, m)\n# define BOOST_PP_FOR_103_I(s, p, o, m) BOOST_PP_IF(p(104, s), m, BOOST_PP_TUPLE_EAT_2)(104, s) BOOST_PP_IF(p(104, s), BOOST_PP_FOR_104, BOOST_PP_TUPLE_EAT_4)(o(104, s), p, o, m)\n# define BOOST_PP_FOR_104_I(s, p, o, m) BOOST_PP_IF(p(105, s), m, BOOST_PP_TUPLE_EAT_2)(105, s) BOOST_PP_IF(p(105, s), BOOST_PP_FOR_105, BOOST_PP_TUPLE_EAT_4)(o(105, s), p, o, m)\n# define BOOST_PP_FOR_105_I(s, p, o, m) BOOST_PP_IF(p(106, s), m, BOOST_PP_TUPLE_EAT_2)(106, s) BOOST_PP_IF(p(106, s), BOOST_PP_FOR_106, BOOST_PP_TUPLE_EAT_4)(o(106, s), p, o, m)\n# define BOOST_PP_FOR_106_I(s, p, o, m) BOOST_PP_IF(p(107, s), m, BOOST_PP_TUPLE_EAT_2)(107, s) BOOST_PP_IF(p(107, s), BOOST_PP_FOR_107, BOOST_PP_TUPLE_EAT_4)(o(107, s), p, o, m)\n# define BOOST_PP_FOR_107_I(s, p, o, m) BOOST_PP_IF(p(108, s), m, BOOST_PP_TUPLE_EAT_2)(108, s) BOOST_PP_IF(p(108, s), BOOST_PP_FOR_108, BOOST_PP_TUPLE_EAT_4)(o(108, s), p, o, m)\n# define BOOST_PP_FOR_108_I(s, p, o, m) BOOST_PP_IF(p(109, s), m, BOOST_PP_TUPLE_EAT_2)(109, s) BOOST_PP_IF(p(109, s), BOOST_PP_FOR_109, BOOST_PP_TUPLE_EAT_4)(o(109, s), p, o, m)\n# define BOOST_PP_FOR_109_I(s, p, o, m) BOOST_PP_IF(p(110, s), m, BOOST_PP_TUPLE_EAT_2)(110, s) BOOST_PP_IF(p(110, s), BOOST_PP_FOR_110, BOOST_PP_TUPLE_EAT_4)(o(110, s), p, o, m)\n# define BOOST_PP_FOR_110_I(s, p, o, m) BOOST_PP_IF(p(111, s), m, BOOST_PP_TUPLE_EAT_2)(111, s) BOOST_PP_IF(p(111, s), BOOST_PP_FOR_111, BOOST_PP_TUPLE_EAT_4)(o(111, s), p, o, m)\n# define BOOST_PP_FOR_111_I(s, p, o, m) BOOST_PP_IF(p(112, s), m, BOOST_PP_TUPLE_EAT_2)(112, s) BOOST_PP_IF(p(112, s), BOOST_PP_FOR_112, BOOST_PP_TUPLE_EAT_4)(o(112, s), p, o, m)\n# define BOOST_PP_FOR_112_I(s, p, o, m) BOOST_PP_IF(p(113, s), m, BOOST_PP_TUPLE_EAT_2)(113, s) BOOST_PP_IF(p(113, s), BOOST_PP_FOR_113, BOOST_PP_TUPLE_EAT_4)(o(113, s), p, o, m)\n# define BOOST_PP_FOR_113_I(s, p, o, m) BOOST_PP_IF(p(114, s), m, BOOST_PP_TUPLE_EAT_2)(114, s) BOOST_PP_IF(p(114, s), BOOST_PP_FOR_114, BOOST_PP_TUPLE_EAT_4)(o(114, s), p, o, m)\n# define BOOST_PP_FOR_114_I(s, p, o, m) BOOST_PP_IF(p(115, s), m, BOOST_PP_TUPLE_EAT_2)(115, s) BOOST_PP_IF(p(115, s), BOOST_PP_FOR_115, BOOST_PP_TUPLE_EAT_4)(o(115, s), p, o, m)\n# define BOOST_PP_FOR_115_I(s, p, o, m) BOOST_PP_IF(p(116, s), m, BOOST_PP_TUPLE_EAT_2)(116, s) BOOST_PP_IF(p(116, s), BOOST_PP_FOR_116, BOOST_PP_TUPLE_EAT_4)(o(116, s), p, o, m)\n# define BOOST_PP_FOR_116_I(s, p, o, m) BOOST_PP_IF(p(117, s), m, BOOST_PP_TUPLE_EAT_2)(117, s) BOOST_PP_IF(p(117, s), BOOST_PP_FOR_117, BOOST_PP_TUPLE_EAT_4)(o(117, s), p, o, m)\n# define BOOST_PP_FOR_117_I(s, p, o, m) BOOST_PP_IF(p(118, s), m, BOOST_PP_TUPLE_EAT_2)(118, s) BOOST_PP_IF(p(118, s), BOOST_PP_FOR_118, BOOST_PP_TUPLE_EAT_4)(o(118, s), p, o, m)\n# define BOOST_PP_FOR_118_I(s, p, o, m) BOOST_PP_IF(p(119, s), m, BOOST_PP_TUPLE_EAT_2)(119, s) BOOST_PP_IF(p(119, s), BOOST_PP_FOR_119, BOOST_PP_TUPLE_EAT_4)(o(119, s), p, o, m)\n# define BOOST_PP_FOR_119_I(s, p, o, m) BOOST_PP_IF(p(120, s), m, BOOST_PP_TUPLE_EAT_2)(120, s) BOOST_PP_IF(p(120, s), BOOST_PP_FOR_120, BOOST_PP_TUPLE_EAT_4)(o(120, s), p, o, m)\n# define BOOST_PP_FOR_120_I(s, p, o, m) BOOST_PP_IF(p(121, s), m, BOOST_PP_TUPLE_EAT_2)(121, s) BOOST_PP_IF(p(121, s), BOOST_PP_FOR_121, BOOST_PP_TUPLE_EAT_4)(o(121, s), p, o, m)\n# define BOOST_PP_FOR_121_I(s, p, o, m) BOOST_PP_IF(p(122, s), m, BOOST_PP_TUPLE_EAT_2)(122, s) BOOST_PP_IF(p(122, s), BOOST_PP_FOR_122, BOOST_PP_TUPLE_EAT_4)(o(122, s), p, o, m)\n# define BOOST_PP_FOR_122_I(s, p, o, m) BOOST_PP_IF(p(123, s), m, BOOST_PP_TUPLE_EAT_2)(123, s) BOOST_PP_IF(p(123, s), BOOST_PP_FOR_123, BOOST_PP_TUPLE_EAT_4)(o(123, s), p, o, m)\n# define BOOST_PP_FOR_123_I(s, p, o, m) BOOST_PP_IF(p(124, s), m, BOOST_PP_TUPLE_EAT_2)(124, s) BOOST_PP_IF(p(124, s), BOOST_PP_FOR_124, BOOST_PP_TUPLE_EAT_4)(o(124, s), p, o, m)\n# define BOOST_PP_FOR_124_I(s, p, o, m) BOOST_PP_IF(p(125, s), m, BOOST_PP_TUPLE_EAT_2)(125, s) BOOST_PP_IF(p(125, s), BOOST_PP_FOR_125, BOOST_PP_TUPLE_EAT_4)(o(125, s), p, o, m)\n# define BOOST_PP_FOR_125_I(s, p, o, m) BOOST_PP_IF(p(126, s), m, BOOST_PP_TUPLE_EAT_2)(126, s) BOOST_PP_IF(p(126, s), BOOST_PP_FOR_126, BOOST_PP_TUPLE_EAT_4)(o(126, s), p, o, m)\n# define BOOST_PP_FOR_126_I(s, p, o, m) BOOST_PP_IF(p(127, s), m, BOOST_PP_TUPLE_EAT_2)(127, s) BOOST_PP_IF(p(127, s), BOOST_PP_FOR_127, BOOST_PP_TUPLE_EAT_4)(o(127, s), p, o, m)\n# define BOOST_PP_FOR_127_I(s, p, o, m) BOOST_PP_IF(p(128, s), m, BOOST_PP_TUPLE_EAT_2)(128, s) BOOST_PP_IF(p(128, s), BOOST_PP_FOR_128, BOOST_PP_TUPLE_EAT_4)(o(128, s), p, o, m)\n# define BOOST_PP_FOR_128_I(s, p, o, m) BOOST_PP_IF(p(129, s), m, BOOST_PP_TUPLE_EAT_2)(129, s) BOOST_PP_IF(p(129, s), BOOST_PP_FOR_129, BOOST_PP_TUPLE_EAT_4)(o(129, s), p, o, m)\n# define BOOST_PP_FOR_129_I(s, p, o, m) BOOST_PP_IF(p(130, s), m, BOOST_PP_TUPLE_EAT_2)(130, s) BOOST_PP_IF(p(130, s), BOOST_PP_FOR_130, BOOST_PP_TUPLE_EAT_4)(o(130, s), p, o, m)\n# define BOOST_PP_FOR_130_I(s, p, o, m) BOOST_PP_IF(p(131, s), m, BOOST_PP_TUPLE_EAT_2)(131, s) BOOST_PP_IF(p(131, s), BOOST_PP_FOR_131, BOOST_PP_TUPLE_EAT_4)(o(131, s), p, o, m)\n# define BOOST_PP_FOR_131_I(s, p, o, m) BOOST_PP_IF(p(132, s), m, BOOST_PP_TUPLE_EAT_2)(132, s) BOOST_PP_IF(p(132, s), BOOST_PP_FOR_132, BOOST_PP_TUPLE_EAT_4)(o(132, s), p, o, m)\n# define BOOST_PP_FOR_132_I(s, p, o, m) BOOST_PP_IF(p(133, s), m, BOOST_PP_TUPLE_EAT_2)(133, s) BOOST_PP_IF(p(133, s), BOOST_PP_FOR_133, BOOST_PP_TUPLE_EAT_4)(o(133, s), p, o, m)\n# define BOOST_PP_FOR_133_I(s, p, o, m) BOOST_PP_IF(p(134, s), m, BOOST_PP_TUPLE_EAT_2)(134, s) BOOST_PP_IF(p(134, s), BOOST_PP_FOR_134, BOOST_PP_TUPLE_EAT_4)(o(134, s), p, o, m)\n# define BOOST_PP_FOR_134_I(s, p, o, m) BOOST_PP_IF(p(135, s), m, BOOST_PP_TUPLE_EAT_2)(135, s) BOOST_PP_IF(p(135, s), BOOST_PP_FOR_135, BOOST_PP_TUPLE_EAT_4)(o(135, s), p, o, m)\n# define BOOST_PP_FOR_135_I(s, p, o, m) BOOST_PP_IF(p(136, s), m, BOOST_PP_TUPLE_EAT_2)(136, s) BOOST_PP_IF(p(136, s), BOOST_PP_FOR_136, BOOST_PP_TUPLE_EAT_4)(o(136, s), p, o, m)\n# define BOOST_PP_FOR_136_I(s, p, o, m) BOOST_PP_IF(p(137, s), m, BOOST_PP_TUPLE_EAT_2)(137, s) BOOST_PP_IF(p(137, s), BOOST_PP_FOR_137, BOOST_PP_TUPLE_EAT_4)(o(137, s), p, o, m)\n# define BOOST_PP_FOR_137_I(s, p, o, m) BOOST_PP_IF(p(138, s), m, BOOST_PP_TUPLE_EAT_2)(138, s) BOOST_PP_IF(p(138, s), BOOST_PP_FOR_138, BOOST_PP_TUPLE_EAT_4)(o(138, s), p, o, m)\n# define BOOST_PP_FOR_138_I(s, p, o, m) BOOST_PP_IF(p(139, s), m, BOOST_PP_TUPLE_EAT_2)(139, s) BOOST_PP_IF(p(139, s), BOOST_PP_FOR_139, BOOST_PP_TUPLE_EAT_4)(o(139, s), p, o, m)\n# define BOOST_PP_FOR_139_I(s, p, o, m) BOOST_PP_IF(p(140, s), m, BOOST_PP_TUPLE_EAT_2)(140, s) BOOST_PP_IF(p(140, s), BOOST_PP_FOR_140, BOOST_PP_TUPLE_EAT_4)(o(140, s), p, o, m)\n# define BOOST_PP_FOR_140_I(s, p, o, m) BOOST_PP_IF(p(141, s), m, BOOST_PP_TUPLE_EAT_2)(141, s) BOOST_PP_IF(p(141, s), BOOST_PP_FOR_141, BOOST_PP_TUPLE_EAT_4)(o(141, s), p, o, m)\n# define BOOST_PP_FOR_141_I(s, p, o, m) BOOST_PP_IF(p(142, s), m, BOOST_PP_TUPLE_EAT_2)(142, s) BOOST_PP_IF(p(142, s), BOOST_PP_FOR_142, BOOST_PP_TUPLE_EAT_4)(o(142, s), p, o, m)\n# define BOOST_PP_FOR_142_I(s, p, o, m) BOOST_PP_IF(p(143, s), m, BOOST_PP_TUPLE_EAT_2)(143, s) BOOST_PP_IF(p(143, s), BOOST_PP_FOR_143, BOOST_PP_TUPLE_EAT_4)(o(143, s), p, o, m)\n# define BOOST_PP_FOR_143_I(s, p, o, m) BOOST_PP_IF(p(144, s), m, BOOST_PP_TUPLE_EAT_2)(144, s) BOOST_PP_IF(p(144, s), BOOST_PP_FOR_144, BOOST_PP_TUPLE_EAT_4)(o(144, s), p, o, m)\n# define BOOST_PP_FOR_144_I(s, p, o, m) BOOST_PP_IF(p(145, s), m, BOOST_PP_TUPLE_EAT_2)(145, s) BOOST_PP_IF(p(145, s), BOOST_PP_FOR_145, BOOST_PP_TUPLE_EAT_4)(o(145, s), p, o, m)\n# define BOOST_PP_FOR_145_I(s, p, o, m) BOOST_PP_IF(p(146, s), m, BOOST_PP_TUPLE_EAT_2)(146, s) BOOST_PP_IF(p(146, s), BOOST_PP_FOR_146, BOOST_PP_TUPLE_EAT_4)(o(146, s), p, o, m)\n# define BOOST_PP_FOR_146_I(s, p, o, m) BOOST_PP_IF(p(147, s), m, BOOST_PP_TUPLE_EAT_2)(147, s) BOOST_PP_IF(p(147, s), BOOST_PP_FOR_147, BOOST_PP_TUPLE_EAT_4)(o(147, s), p, o, m)\n# define BOOST_PP_FOR_147_I(s, p, o, m) BOOST_PP_IF(p(148, s), m, BOOST_PP_TUPLE_EAT_2)(148, s) BOOST_PP_IF(p(148, s), BOOST_PP_FOR_148, BOOST_PP_TUPLE_EAT_4)(o(148, s), p, o, m)\n# define BOOST_PP_FOR_148_I(s, p, o, m) BOOST_PP_IF(p(149, s), m, BOOST_PP_TUPLE_EAT_2)(149, s) BOOST_PP_IF(p(149, s), BOOST_PP_FOR_149, BOOST_PP_TUPLE_EAT_4)(o(149, s), p, o, m)\n# define BOOST_PP_FOR_149_I(s, p, o, m) BOOST_PP_IF(p(150, s), m, BOOST_PP_TUPLE_EAT_2)(150, s) BOOST_PP_IF(p(150, s), BOOST_PP_FOR_150, BOOST_PP_TUPLE_EAT_4)(o(150, s), p, o, m)\n# define BOOST_PP_FOR_150_I(s, p, o, m) BOOST_PP_IF(p(151, s), m, BOOST_PP_TUPLE_EAT_2)(151, s) BOOST_PP_IF(p(151, s), BOOST_PP_FOR_151, BOOST_PP_TUPLE_EAT_4)(o(151, s), p, o, m)\n# define BOOST_PP_FOR_151_I(s, p, o, m) BOOST_PP_IF(p(152, s), m, BOOST_PP_TUPLE_EAT_2)(152, s) BOOST_PP_IF(p(152, s), BOOST_PP_FOR_152, BOOST_PP_TUPLE_EAT_4)(o(152, s), p, o, m)\n# define BOOST_PP_FOR_152_I(s, p, o, m) BOOST_PP_IF(p(153, s), m, BOOST_PP_TUPLE_EAT_2)(153, s) BOOST_PP_IF(p(153, s), BOOST_PP_FOR_153, BOOST_PP_TUPLE_EAT_4)(o(153, s), p, o, m)\n# define BOOST_PP_FOR_153_I(s, p, o, m) BOOST_PP_IF(p(154, s), m, BOOST_PP_TUPLE_EAT_2)(154, s) BOOST_PP_IF(p(154, s), BOOST_PP_FOR_154, BOOST_PP_TUPLE_EAT_4)(o(154, s), p, o, m)\n# define BOOST_PP_FOR_154_I(s, p, o, m) BOOST_PP_IF(p(155, s), m, BOOST_PP_TUPLE_EAT_2)(155, s) BOOST_PP_IF(p(155, s), BOOST_PP_FOR_155, BOOST_PP_TUPLE_EAT_4)(o(155, s), p, o, m)\n# define BOOST_PP_FOR_155_I(s, p, o, m) BOOST_PP_IF(p(156, s), m, BOOST_PP_TUPLE_EAT_2)(156, s) BOOST_PP_IF(p(156, s), BOOST_PP_FOR_156, BOOST_PP_TUPLE_EAT_4)(o(156, s), p, o, m)\n# define BOOST_PP_FOR_156_I(s, p, o, m) BOOST_PP_IF(p(157, s), m, BOOST_PP_TUPLE_EAT_2)(157, s) BOOST_PP_IF(p(157, s), BOOST_PP_FOR_157, BOOST_PP_TUPLE_EAT_4)(o(157, s), p, o, m)\n# define BOOST_PP_FOR_157_I(s, p, o, m) BOOST_PP_IF(p(158, s), m, BOOST_PP_TUPLE_EAT_2)(158, s) BOOST_PP_IF(p(158, s), BOOST_PP_FOR_158, BOOST_PP_TUPLE_EAT_4)(o(158, s), p, o, m)\n# define BOOST_PP_FOR_158_I(s, p, o, m) BOOST_PP_IF(p(159, s), m, BOOST_PP_TUPLE_EAT_2)(159, s) BOOST_PP_IF(p(159, s), BOOST_PP_FOR_159, BOOST_PP_TUPLE_EAT_4)(o(159, s), p, o, m)\n# define BOOST_PP_FOR_159_I(s, p, o, m) BOOST_PP_IF(p(160, s), m, BOOST_PP_TUPLE_EAT_2)(160, s) BOOST_PP_IF(p(160, s), BOOST_PP_FOR_160, BOOST_PP_TUPLE_EAT_4)(o(160, s), p, o, m)\n# define BOOST_PP_FOR_160_I(s, p, o, m) BOOST_PP_IF(p(161, s), m, BOOST_PP_TUPLE_EAT_2)(161, s) BOOST_PP_IF(p(161, s), BOOST_PP_FOR_161, BOOST_PP_TUPLE_EAT_4)(o(161, s), p, o, m)\n# define BOOST_PP_FOR_161_I(s, p, o, m) BOOST_PP_IF(p(162, s), m, BOOST_PP_TUPLE_EAT_2)(162, s) BOOST_PP_IF(p(162, s), BOOST_PP_FOR_162, BOOST_PP_TUPLE_EAT_4)(o(162, s), p, o, m)\n# define BOOST_PP_FOR_162_I(s, p, o, m) BOOST_PP_IF(p(163, s), m, BOOST_PP_TUPLE_EAT_2)(163, s) BOOST_PP_IF(p(163, s), BOOST_PP_FOR_163, BOOST_PP_TUPLE_EAT_4)(o(163, s), p, o, m)\n# define BOOST_PP_FOR_163_I(s, p, o, m) BOOST_PP_IF(p(164, s), m, BOOST_PP_TUPLE_EAT_2)(164, s) BOOST_PP_IF(p(164, s), BOOST_PP_FOR_164, BOOST_PP_TUPLE_EAT_4)(o(164, s), p, o, m)\n# define BOOST_PP_FOR_164_I(s, p, o, m) BOOST_PP_IF(p(165, s), m, BOOST_PP_TUPLE_EAT_2)(165, s) BOOST_PP_IF(p(165, s), BOOST_PP_FOR_165, BOOST_PP_TUPLE_EAT_4)(o(165, s), p, o, m)\n# define BOOST_PP_FOR_165_I(s, p, o, m) BOOST_PP_IF(p(166, s), m, BOOST_PP_TUPLE_EAT_2)(166, s) BOOST_PP_IF(p(166, s), BOOST_PP_FOR_166, BOOST_PP_TUPLE_EAT_4)(o(166, s), p, o, m)\n# define BOOST_PP_FOR_166_I(s, p, o, m) BOOST_PP_IF(p(167, s), m, BOOST_PP_TUPLE_EAT_2)(167, s) BOOST_PP_IF(p(167, s), BOOST_PP_FOR_167, BOOST_PP_TUPLE_EAT_4)(o(167, s), p, o, m)\n# define BOOST_PP_FOR_167_I(s, p, o, m) BOOST_PP_IF(p(168, s), m, BOOST_PP_TUPLE_EAT_2)(168, s) BOOST_PP_IF(p(168, s), BOOST_PP_FOR_168, BOOST_PP_TUPLE_EAT_4)(o(168, s), p, o, m)\n# define BOOST_PP_FOR_168_I(s, p, o, m) BOOST_PP_IF(p(169, s), m, BOOST_PP_TUPLE_EAT_2)(169, s) BOOST_PP_IF(p(169, s), BOOST_PP_FOR_169, BOOST_PP_TUPLE_EAT_4)(o(169, s), p, o, m)\n# define BOOST_PP_FOR_169_I(s, p, o, m) BOOST_PP_IF(p(170, s), m, BOOST_PP_TUPLE_EAT_2)(170, s) BOOST_PP_IF(p(170, s), BOOST_PP_FOR_170, BOOST_PP_TUPLE_EAT_4)(o(170, s), p, o, m)\n# define BOOST_PP_FOR_170_I(s, p, o, m) BOOST_PP_IF(p(171, s), m, BOOST_PP_TUPLE_EAT_2)(171, s) BOOST_PP_IF(p(171, s), BOOST_PP_FOR_171, BOOST_PP_TUPLE_EAT_4)(o(171, s), p, o, m)\n# define BOOST_PP_FOR_171_I(s, p, o, m) BOOST_PP_IF(p(172, s), m, BOOST_PP_TUPLE_EAT_2)(172, s) BOOST_PP_IF(p(172, s), BOOST_PP_FOR_172, BOOST_PP_TUPLE_EAT_4)(o(172, s), p, o, m)\n# define BOOST_PP_FOR_172_I(s, p, o, m) BOOST_PP_IF(p(173, s), m, BOOST_PP_TUPLE_EAT_2)(173, s) BOOST_PP_IF(p(173, s), BOOST_PP_FOR_173, BOOST_PP_TUPLE_EAT_4)(o(173, s), p, o, m)\n# define BOOST_PP_FOR_173_I(s, p, o, m) BOOST_PP_IF(p(174, s), m, BOOST_PP_TUPLE_EAT_2)(174, s) BOOST_PP_IF(p(174, s), BOOST_PP_FOR_174, BOOST_PP_TUPLE_EAT_4)(o(174, s), p, o, m)\n# define BOOST_PP_FOR_174_I(s, p, o, m) BOOST_PP_IF(p(175, s), m, BOOST_PP_TUPLE_EAT_2)(175, s) BOOST_PP_IF(p(175, s), BOOST_PP_FOR_175, BOOST_PP_TUPLE_EAT_4)(o(175, s), p, o, m)\n# define BOOST_PP_FOR_175_I(s, p, o, m) BOOST_PP_IF(p(176, s), m, BOOST_PP_TUPLE_EAT_2)(176, s) BOOST_PP_IF(p(176, s), BOOST_PP_FOR_176, BOOST_PP_TUPLE_EAT_4)(o(176, s), p, o, m)\n# define BOOST_PP_FOR_176_I(s, p, o, m) BOOST_PP_IF(p(177, s), m, BOOST_PP_TUPLE_EAT_2)(177, s) BOOST_PP_IF(p(177, s), BOOST_PP_FOR_177, BOOST_PP_TUPLE_EAT_4)(o(177, s), p, o, m)\n# define BOOST_PP_FOR_177_I(s, p, o, m) BOOST_PP_IF(p(178, s), m, BOOST_PP_TUPLE_EAT_2)(178, s) BOOST_PP_IF(p(178, s), BOOST_PP_FOR_178, BOOST_PP_TUPLE_EAT_4)(o(178, s), p, o, m)\n# define BOOST_PP_FOR_178_I(s, p, o, m) BOOST_PP_IF(p(179, s), m, BOOST_PP_TUPLE_EAT_2)(179, s) BOOST_PP_IF(p(179, s), BOOST_PP_FOR_179, BOOST_PP_TUPLE_EAT_4)(o(179, s), p, o, m)\n# define BOOST_PP_FOR_179_I(s, p, o, m) BOOST_PP_IF(p(180, s), m, BOOST_PP_TUPLE_EAT_2)(180, s) BOOST_PP_IF(p(180, s), BOOST_PP_FOR_180, BOOST_PP_TUPLE_EAT_4)(o(180, s), p, o, m)\n# define BOOST_PP_FOR_180_I(s, p, o, m) BOOST_PP_IF(p(181, s), m, BOOST_PP_TUPLE_EAT_2)(181, s) BOOST_PP_IF(p(181, s), BOOST_PP_FOR_181, BOOST_PP_TUPLE_EAT_4)(o(181, s), p, o, m)\n# define BOOST_PP_FOR_181_I(s, p, o, m) BOOST_PP_IF(p(182, s), m, BOOST_PP_TUPLE_EAT_2)(182, s) BOOST_PP_IF(p(182, s), BOOST_PP_FOR_182, BOOST_PP_TUPLE_EAT_4)(o(182, s), p, o, m)\n# define BOOST_PP_FOR_182_I(s, p, o, m) BOOST_PP_IF(p(183, s), m, BOOST_PP_TUPLE_EAT_2)(183, s) BOOST_PP_IF(p(183, s), BOOST_PP_FOR_183, BOOST_PP_TUPLE_EAT_4)(o(183, s), p, o, m)\n# define BOOST_PP_FOR_183_I(s, p, o, m) BOOST_PP_IF(p(184, s), m, BOOST_PP_TUPLE_EAT_2)(184, s) BOOST_PP_IF(p(184, s), BOOST_PP_FOR_184, BOOST_PP_TUPLE_EAT_4)(o(184, s), p, o, m)\n# define BOOST_PP_FOR_184_I(s, p, o, m) BOOST_PP_IF(p(185, s), m, BOOST_PP_TUPLE_EAT_2)(185, s) BOOST_PP_IF(p(185, s), BOOST_PP_FOR_185, BOOST_PP_TUPLE_EAT_4)(o(185, s), p, o, m)\n# define BOOST_PP_FOR_185_I(s, p, o, m) BOOST_PP_IF(p(186, s), m, BOOST_PP_TUPLE_EAT_2)(186, s) BOOST_PP_IF(p(186, s), BOOST_PP_FOR_186, BOOST_PP_TUPLE_EAT_4)(o(186, s), p, o, m)\n# define BOOST_PP_FOR_186_I(s, p, o, m) BOOST_PP_IF(p(187, s), m, BOOST_PP_TUPLE_EAT_2)(187, s) BOOST_PP_IF(p(187, s), BOOST_PP_FOR_187, BOOST_PP_TUPLE_EAT_4)(o(187, s), p, o, m)\n# define BOOST_PP_FOR_187_I(s, p, o, m) BOOST_PP_IF(p(188, s), m, BOOST_PP_TUPLE_EAT_2)(188, s) BOOST_PP_IF(p(188, s), BOOST_PP_FOR_188, BOOST_PP_TUPLE_EAT_4)(o(188, s), p, o, m)\n# define BOOST_PP_FOR_188_I(s, p, o, m) BOOST_PP_IF(p(189, s), m, BOOST_PP_TUPLE_EAT_2)(189, s) BOOST_PP_IF(p(189, s), BOOST_PP_FOR_189, BOOST_PP_TUPLE_EAT_4)(o(189, s), p, o, m)\n# define BOOST_PP_FOR_189_I(s, p, o, m) BOOST_PP_IF(p(190, s), m, BOOST_PP_TUPLE_EAT_2)(190, s) BOOST_PP_IF(p(190, s), BOOST_PP_FOR_190, BOOST_PP_TUPLE_EAT_4)(o(190, s), p, o, m)\n# define BOOST_PP_FOR_190_I(s, p, o, m) BOOST_PP_IF(p(191, s), m, BOOST_PP_TUPLE_EAT_2)(191, s) BOOST_PP_IF(p(191, s), BOOST_PP_FOR_191, BOOST_PP_TUPLE_EAT_4)(o(191, s), p, o, m)\n# define BOOST_PP_FOR_191_I(s, p, o, m) BOOST_PP_IF(p(192, s), m, BOOST_PP_TUPLE_EAT_2)(192, s) BOOST_PP_IF(p(192, s), BOOST_PP_FOR_192, BOOST_PP_TUPLE_EAT_4)(o(192, s), p, o, m)\n# define BOOST_PP_FOR_192_I(s, p, o, m) BOOST_PP_IF(p(193, s), m, BOOST_PP_TUPLE_EAT_2)(193, s) BOOST_PP_IF(p(193, s), BOOST_PP_FOR_193, BOOST_PP_TUPLE_EAT_4)(o(193, s), p, o, m)\n# define BOOST_PP_FOR_193_I(s, p, o, m) BOOST_PP_IF(p(194, s), m, BOOST_PP_TUPLE_EAT_2)(194, s) BOOST_PP_IF(p(194, s), BOOST_PP_FOR_194, BOOST_PP_TUPLE_EAT_4)(o(194, s), p, o, m)\n# define BOOST_PP_FOR_194_I(s, p, o, m) BOOST_PP_IF(p(195, s), m, BOOST_PP_TUPLE_EAT_2)(195, s) BOOST_PP_IF(p(195, s), BOOST_PP_FOR_195, BOOST_PP_TUPLE_EAT_4)(o(195, s), p, o, m)\n# define BOOST_PP_FOR_195_I(s, p, o, m) BOOST_PP_IF(p(196, s), m, BOOST_PP_TUPLE_EAT_2)(196, s) BOOST_PP_IF(p(196, s), BOOST_PP_FOR_196, BOOST_PP_TUPLE_EAT_4)(o(196, s), p, o, m)\n# define BOOST_PP_FOR_196_I(s, p, o, m) BOOST_PP_IF(p(197, s), m, BOOST_PP_TUPLE_EAT_2)(197, s) BOOST_PP_IF(p(197, s), BOOST_PP_FOR_197, BOOST_PP_TUPLE_EAT_4)(o(197, s), p, o, m)\n# define BOOST_PP_FOR_197_I(s, p, o, m) BOOST_PP_IF(p(198, s), m, BOOST_PP_TUPLE_EAT_2)(198, s) BOOST_PP_IF(p(198, s), BOOST_PP_FOR_198, BOOST_PP_TUPLE_EAT_4)(o(198, s), p, o, m)\n# define BOOST_PP_FOR_198_I(s, p, o, m) BOOST_PP_IF(p(199, s), m, BOOST_PP_TUPLE_EAT_2)(199, s) BOOST_PP_IF(p(199, s), BOOST_PP_FOR_199, BOOST_PP_TUPLE_EAT_4)(o(199, s), p, o, m)\n# define BOOST_PP_FOR_199_I(s, p, o, m) BOOST_PP_IF(p(200, s), m, BOOST_PP_TUPLE_EAT_2)(200, s) BOOST_PP_IF(p(200, s), BOOST_PP_FOR_200, BOOST_PP_TUPLE_EAT_4)(o(200, s), p, o, m)\n# define BOOST_PP_FOR_200_I(s, p, o, m) BOOST_PP_IF(p(201, s), m, BOOST_PP_TUPLE_EAT_2)(201, s) BOOST_PP_IF(p(201, s), BOOST_PP_FOR_201, BOOST_PP_TUPLE_EAT_4)(o(201, s), p, o, m)\n# define BOOST_PP_FOR_201_I(s, p, o, m) BOOST_PP_IF(p(202, s), m, BOOST_PP_TUPLE_EAT_2)(202, s) BOOST_PP_IF(p(202, s), BOOST_PP_FOR_202, BOOST_PP_TUPLE_EAT_4)(o(202, s), p, o, m)\n# define BOOST_PP_FOR_202_I(s, p, o, m) BOOST_PP_IF(p(203, s), m, BOOST_PP_TUPLE_EAT_2)(203, s) BOOST_PP_IF(p(203, s), BOOST_PP_FOR_203, BOOST_PP_TUPLE_EAT_4)(o(203, s), p, o, m)\n# define BOOST_PP_FOR_203_I(s, p, o, m) BOOST_PP_IF(p(204, s), m, BOOST_PP_TUPLE_EAT_2)(204, s) BOOST_PP_IF(p(204, s), BOOST_PP_FOR_204, BOOST_PP_TUPLE_EAT_4)(o(204, s), p, o, m)\n# define BOOST_PP_FOR_204_I(s, p, o, m) BOOST_PP_IF(p(205, s), m, BOOST_PP_TUPLE_EAT_2)(205, s) BOOST_PP_IF(p(205, s), BOOST_PP_FOR_205, BOOST_PP_TUPLE_EAT_4)(o(205, s), p, o, m)\n# define BOOST_PP_FOR_205_I(s, p, o, m) BOOST_PP_IF(p(206, s), m, BOOST_PP_TUPLE_EAT_2)(206, s) BOOST_PP_IF(p(206, s), BOOST_PP_FOR_206, BOOST_PP_TUPLE_EAT_4)(o(206, s), p, o, m)\n# define BOOST_PP_FOR_206_I(s, p, o, m) BOOST_PP_IF(p(207, s), m, BOOST_PP_TUPLE_EAT_2)(207, s) BOOST_PP_IF(p(207, s), BOOST_PP_FOR_207, BOOST_PP_TUPLE_EAT_4)(o(207, s), p, o, m)\n# define BOOST_PP_FOR_207_I(s, p, o, m) BOOST_PP_IF(p(208, s), m, BOOST_PP_TUPLE_EAT_2)(208, s) BOOST_PP_IF(p(208, s), BOOST_PP_FOR_208, BOOST_PP_TUPLE_EAT_4)(o(208, s), p, o, m)\n# define BOOST_PP_FOR_208_I(s, p, o, m) BOOST_PP_IF(p(209, s), m, BOOST_PP_TUPLE_EAT_2)(209, s) BOOST_PP_IF(p(209, s), BOOST_PP_FOR_209, BOOST_PP_TUPLE_EAT_4)(o(209, s), p, o, m)\n# define BOOST_PP_FOR_209_I(s, p, o, m) BOOST_PP_IF(p(210, s), m, BOOST_PP_TUPLE_EAT_2)(210, s) BOOST_PP_IF(p(210, s), BOOST_PP_FOR_210, BOOST_PP_TUPLE_EAT_4)(o(210, s), p, o, m)\n# define BOOST_PP_FOR_210_I(s, p, o, m) BOOST_PP_IF(p(211, s), m, BOOST_PP_TUPLE_EAT_2)(211, s) BOOST_PP_IF(p(211, s), BOOST_PP_FOR_211, BOOST_PP_TUPLE_EAT_4)(o(211, s), p, o, m)\n# define BOOST_PP_FOR_211_I(s, p, o, m) BOOST_PP_IF(p(212, s), m, BOOST_PP_TUPLE_EAT_2)(212, s) BOOST_PP_IF(p(212, s), BOOST_PP_FOR_212, BOOST_PP_TUPLE_EAT_4)(o(212, s), p, o, m)\n# define BOOST_PP_FOR_212_I(s, p, o, m) BOOST_PP_IF(p(213, s), m, BOOST_PP_TUPLE_EAT_2)(213, s) BOOST_PP_IF(p(213, s), BOOST_PP_FOR_213, BOOST_PP_TUPLE_EAT_4)(o(213, s), p, o, m)\n# define BOOST_PP_FOR_213_I(s, p, o, m) BOOST_PP_IF(p(214, s), m, BOOST_PP_TUPLE_EAT_2)(214, s) BOOST_PP_IF(p(214, s), BOOST_PP_FOR_214, BOOST_PP_TUPLE_EAT_4)(o(214, s), p, o, m)\n# define BOOST_PP_FOR_214_I(s, p, o, m) BOOST_PP_IF(p(215, s), m, BOOST_PP_TUPLE_EAT_2)(215, s) BOOST_PP_IF(p(215, s), BOOST_PP_FOR_215, BOOST_PP_TUPLE_EAT_4)(o(215, s), p, o, m)\n# define BOOST_PP_FOR_215_I(s, p, o, m) BOOST_PP_IF(p(216, s), m, BOOST_PP_TUPLE_EAT_2)(216, s) BOOST_PP_IF(p(216, s), BOOST_PP_FOR_216, BOOST_PP_TUPLE_EAT_4)(o(216, s), p, o, m)\n# define BOOST_PP_FOR_216_I(s, p, o, m) BOOST_PP_IF(p(217, s), m, BOOST_PP_TUPLE_EAT_2)(217, s) BOOST_PP_IF(p(217, s), BOOST_PP_FOR_217, BOOST_PP_TUPLE_EAT_4)(o(217, s), p, o, m)\n# define BOOST_PP_FOR_217_I(s, p, o, m) BOOST_PP_IF(p(218, s), m, BOOST_PP_TUPLE_EAT_2)(218, s) BOOST_PP_IF(p(218, s), BOOST_PP_FOR_218, BOOST_PP_TUPLE_EAT_4)(o(218, s), p, o, m)\n# define BOOST_PP_FOR_218_I(s, p, o, m) BOOST_PP_IF(p(219, s), m, BOOST_PP_TUPLE_EAT_2)(219, s) BOOST_PP_IF(p(219, s), BOOST_PP_FOR_219, BOOST_PP_TUPLE_EAT_4)(o(219, s), p, o, m)\n# define BOOST_PP_FOR_219_I(s, p, o, m) BOOST_PP_IF(p(220, s), m, BOOST_PP_TUPLE_EAT_2)(220, s) BOOST_PP_IF(p(220, s), BOOST_PP_FOR_220, BOOST_PP_TUPLE_EAT_4)(o(220, s), p, o, m)\n# define BOOST_PP_FOR_220_I(s, p, o, m) BOOST_PP_IF(p(221, s), m, BOOST_PP_TUPLE_EAT_2)(221, s) BOOST_PP_IF(p(221, s), BOOST_PP_FOR_221, BOOST_PP_TUPLE_EAT_4)(o(221, s), p, o, m)\n# define BOOST_PP_FOR_221_I(s, p, o, m) BOOST_PP_IF(p(222, s), m, BOOST_PP_TUPLE_EAT_2)(222, s) BOOST_PP_IF(p(222, s), BOOST_PP_FOR_222, BOOST_PP_TUPLE_EAT_4)(o(222, s), p, o, m)\n# define BOOST_PP_FOR_222_I(s, p, o, m) BOOST_PP_IF(p(223, s), m, BOOST_PP_TUPLE_EAT_2)(223, s) BOOST_PP_IF(p(223, s), BOOST_PP_FOR_223, BOOST_PP_TUPLE_EAT_4)(o(223, s), p, o, m)\n# define BOOST_PP_FOR_223_I(s, p, o, m) BOOST_PP_IF(p(224, s), m, BOOST_PP_TUPLE_EAT_2)(224, s) BOOST_PP_IF(p(224, s), BOOST_PP_FOR_224, BOOST_PP_TUPLE_EAT_4)(o(224, s), p, o, m)\n# define BOOST_PP_FOR_224_I(s, p, o, m) BOOST_PP_IF(p(225, s), m, BOOST_PP_TUPLE_EAT_2)(225, s) BOOST_PP_IF(p(225, s), BOOST_PP_FOR_225, BOOST_PP_TUPLE_EAT_4)(o(225, s), p, o, m)\n# define BOOST_PP_FOR_225_I(s, p, o, m) BOOST_PP_IF(p(226, s), m, BOOST_PP_TUPLE_EAT_2)(226, s) BOOST_PP_IF(p(226, s), BOOST_PP_FOR_226, BOOST_PP_TUPLE_EAT_4)(o(226, s), p, o, m)\n# define BOOST_PP_FOR_226_I(s, p, o, m) BOOST_PP_IF(p(227, s), m, BOOST_PP_TUPLE_EAT_2)(227, s) BOOST_PP_IF(p(227, s), BOOST_PP_FOR_227, BOOST_PP_TUPLE_EAT_4)(o(227, s), p, o, m)\n# define BOOST_PP_FOR_227_I(s, p, o, m) BOOST_PP_IF(p(228, s), m, BOOST_PP_TUPLE_EAT_2)(228, s) BOOST_PP_IF(p(228, s), BOOST_PP_FOR_228, BOOST_PP_TUPLE_EAT_4)(o(228, s), p, o, m)\n# define BOOST_PP_FOR_228_I(s, p, o, m) BOOST_PP_IF(p(229, s), m, BOOST_PP_TUPLE_EAT_2)(229, s) BOOST_PP_IF(p(229, s), BOOST_PP_FOR_229, BOOST_PP_TUPLE_EAT_4)(o(229, s), p, o, m)\n# define BOOST_PP_FOR_229_I(s, p, o, m) BOOST_PP_IF(p(230, s), m, BOOST_PP_TUPLE_EAT_2)(230, s) BOOST_PP_IF(p(230, s), BOOST_PP_FOR_230, BOOST_PP_TUPLE_EAT_4)(o(230, s), p, o, m)\n# define BOOST_PP_FOR_230_I(s, p, o, m) BOOST_PP_IF(p(231, s), m, BOOST_PP_TUPLE_EAT_2)(231, s) BOOST_PP_IF(p(231, s), BOOST_PP_FOR_231, BOOST_PP_TUPLE_EAT_4)(o(231, s), p, o, m)\n# define BOOST_PP_FOR_231_I(s, p, o, m) BOOST_PP_IF(p(232, s), m, BOOST_PP_TUPLE_EAT_2)(232, s) BOOST_PP_IF(p(232, s), BOOST_PP_FOR_232, BOOST_PP_TUPLE_EAT_4)(o(232, s), p, o, m)\n# define BOOST_PP_FOR_232_I(s, p, o, m) BOOST_PP_IF(p(233, s), m, BOOST_PP_TUPLE_EAT_2)(233, s) BOOST_PP_IF(p(233, s), BOOST_PP_FOR_233, BOOST_PP_TUPLE_EAT_4)(o(233, s), p, o, m)\n# define BOOST_PP_FOR_233_I(s, p, o, m) BOOST_PP_IF(p(234, s), m, BOOST_PP_TUPLE_EAT_2)(234, s) BOOST_PP_IF(p(234, s), BOOST_PP_FOR_234, BOOST_PP_TUPLE_EAT_4)(o(234, s), p, o, m)\n# define BOOST_PP_FOR_234_I(s, p, o, m) BOOST_PP_IF(p(235, s), m, BOOST_PP_TUPLE_EAT_2)(235, s) BOOST_PP_IF(p(235, s), BOOST_PP_FOR_235, BOOST_PP_TUPLE_EAT_4)(o(235, s), p, o, m)\n# define BOOST_PP_FOR_235_I(s, p, o, m) BOOST_PP_IF(p(236, s), m, BOOST_PP_TUPLE_EAT_2)(236, s) BOOST_PP_IF(p(236, s), BOOST_PP_FOR_236, BOOST_PP_TUPLE_EAT_4)(o(236, s), p, o, m)\n# define BOOST_PP_FOR_236_I(s, p, o, m) BOOST_PP_IF(p(237, s), m, BOOST_PP_TUPLE_EAT_2)(237, s) BOOST_PP_IF(p(237, s), BOOST_PP_FOR_237, BOOST_PP_TUPLE_EAT_4)(o(237, s), p, o, m)\n# define BOOST_PP_FOR_237_I(s, p, o, m) BOOST_PP_IF(p(238, s), m, BOOST_PP_TUPLE_EAT_2)(238, s) BOOST_PP_IF(p(238, s), BOOST_PP_FOR_238, BOOST_PP_TUPLE_EAT_4)(o(238, s), p, o, m)\n# define BOOST_PP_FOR_238_I(s, p, o, m) BOOST_PP_IF(p(239, s), m, BOOST_PP_TUPLE_EAT_2)(239, s) BOOST_PP_IF(p(239, s), BOOST_PP_FOR_239, BOOST_PP_TUPLE_EAT_4)(o(239, s), p, o, m)\n# define BOOST_PP_FOR_239_I(s, p, o, m) BOOST_PP_IF(p(240, s), m, BOOST_PP_TUPLE_EAT_2)(240, s) BOOST_PP_IF(p(240, s), BOOST_PP_FOR_240, BOOST_PP_TUPLE_EAT_4)(o(240, s), p, o, m)\n# define BOOST_PP_FOR_240_I(s, p, o, m) BOOST_PP_IF(p(241, s), m, BOOST_PP_TUPLE_EAT_2)(241, s) BOOST_PP_IF(p(241, s), BOOST_PP_FOR_241, BOOST_PP_TUPLE_EAT_4)(o(241, s), p, o, m)\n# define BOOST_PP_FOR_241_I(s, p, o, m) BOOST_PP_IF(p(242, s), m, BOOST_PP_TUPLE_EAT_2)(242, s) BOOST_PP_IF(p(242, s), BOOST_PP_FOR_242, BOOST_PP_TUPLE_EAT_4)(o(242, s), p, o, m)\n# define BOOST_PP_FOR_242_I(s, p, o, m) BOOST_PP_IF(p(243, s), m, BOOST_PP_TUPLE_EAT_2)(243, s) BOOST_PP_IF(p(243, s), BOOST_PP_FOR_243, BOOST_PP_TUPLE_EAT_4)(o(243, s), p, o, m)\n# define BOOST_PP_FOR_243_I(s, p, o, m) BOOST_PP_IF(p(244, s), m, BOOST_PP_TUPLE_EAT_2)(244, s) BOOST_PP_IF(p(244, s), BOOST_PP_FOR_244, BOOST_PP_TUPLE_EAT_4)(o(244, s), p, o, m)\n# define BOOST_PP_FOR_244_I(s, p, o, m) BOOST_PP_IF(p(245, s), m, BOOST_PP_TUPLE_EAT_2)(245, s) BOOST_PP_IF(p(245, s), BOOST_PP_FOR_245, BOOST_PP_TUPLE_EAT_4)(o(245, s), p, o, m)\n# define BOOST_PP_FOR_245_I(s, p, o, m) BOOST_PP_IF(p(246, s), m, BOOST_PP_TUPLE_EAT_2)(246, s) BOOST_PP_IF(p(246, s), BOOST_PP_FOR_246, BOOST_PP_TUPLE_EAT_4)(o(246, s), p, o, m)\n# define BOOST_PP_FOR_246_I(s, p, o, m) BOOST_PP_IF(p(247, s), m, BOOST_PP_TUPLE_EAT_2)(247, s) BOOST_PP_IF(p(247, s), BOOST_PP_FOR_247, BOOST_PP_TUPLE_EAT_4)(o(247, s), p, o, m)\n# define BOOST_PP_FOR_247_I(s, p, o, m) BOOST_PP_IF(p(248, s), m, BOOST_PP_TUPLE_EAT_2)(248, s) BOOST_PP_IF(p(248, s), BOOST_PP_FOR_248, BOOST_PP_TUPLE_EAT_4)(o(248, s), p, o, m)\n# define BOOST_PP_FOR_248_I(s, p, o, m) BOOST_PP_IF(p(249, s), m, BOOST_PP_TUPLE_EAT_2)(249, s) BOOST_PP_IF(p(249, s), BOOST_PP_FOR_249, BOOST_PP_TUPLE_EAT_4)(o(249, s), p, o, m)\n# define BOOST_PP_FOR_249_I(s, p, o, m) BOOST_PP_IF(p(250, s), m, BOOST_PP_TUPLE_EAT_2)(250, s) BOOST_PP_IF(p(250, s), BOOST_PP_FOR_250, BOOST_PP_TUPLE_EAT_4)(o(250, s), p, o, m)\n# define BOOST_PP_FOR_250_I(s, p, o, m) BOOST_PP_IF(p(251, s), m, BOOST_PP_TUPLE_EAT_2)(251, s) BOOST_PP_IF(p(251, s), BOOST_PP_FOR_251, BOOST_PP_TUPLE_EAT_4)(o(251, s), p, o, m)\n# define BOOST_PP_FOR_251_I(s, p, o, m) BOOST_PP_IF(p(252, s), m, BOOST_PP_TUPLE_EAT_2)(252, s) BOOST_PP_IF(p(252, s), BOOST_PP_FOR_252, BOOST_PP_TUPLE_EAT_4)(o(252, s), p, o, m)\n# define BOOST_PP_FOR_252_I(s, p, o, m) BOOST_PP_IF(p(253, s), m, BOOST_PP_TUPLE_EAT_2)(253, s) BOOST_PP_IF(p(253, s), BOOST_PP_FOR_253, BOOST_PP_TUPLE_EAT_4)(o(253, s), p, o, m)\n# define BOOST_PP_FOR_253_I(s, p, o, m) BOOST_PP_IF(p(254, s), m, BOOST_PP_TUPLE_EAT_2)(254, s) BOOST_PP_IF(p(254, s), BOOST_PP_FOR_254, BOOST_PP_TUPLE_EAT_4)(o(254, s), p, o, m)\n# define BOOST_PP_FOR_254_I(s, p, o, m) BOOST_PP_IF(p(255, s), m, BOOST_PP_TUPLE_EAT_2)(255, s) BOOST_PP_IF(p(255, s), BOOST_PP_FOR_255, BOOST_PP_TUPLE_EAT_4)(o(255, s), p, o, m)\n# define BOOST_PP_FOR_255_I(s, p, o, m) BOOST_PP_IF(p(256, s), m, BOOST_PP_TUPLE_EAT_2)(256, s) BOOST_PP_IF(p(256, s), BOOST_PP_FOR_256, BOOST_PP_TUPLE_EAT_4)(o(256, s), p, o, m)\n# define BOOST_PP_FOR_256_I(s, p, o, m) BOOST_PP_IF(p(257, s), m, BOOST_PP_TUPLE_EAT_2)(257, s) BOOST_PP_IF(p(257, s), BOOST_PP_FOR_257, BOOST_PP_TUPLE_EAT_4)(o(257, s), p, o, m)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/detail/for.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_DETAIL_FOR_HPP\n# define BOOST_PREPROCESSOR_REPETITION_DETAIL_FOR_HPP\n#\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_FOR_1(s, p, o, m) BOOST_PP_FOR_1_C(BOOST_PP_BOOL(p(2, s)), s, p, o, m)\n# define BOOST_PP_FOR_2(s, p, o, m) BOOST_PP_FOR_2_C(BOOST_PP_BOOL(p(3, s)), s, p, o, m)\n# define BOOST_PP_FOR_3(s, p, o, m) BOOST_PP_FOR_3_C(BOOST_PP_BOOL(p(4, s)), s, p, o, m)\n# define BOOST_PP_FOR_4(s, p, o, m) BOOST_PP_FOR_4_C(BOOST_PP_BOOL(p(5, s)), s, p, o, m)\n# define BOOST_PP_FOR_5(s, p, o, m) BOOST_PP_FOR_5_C(BOOST_PP_BOOL(p(6, s)), s, p, o, m)\n# define BOOST_PP_FOR_6(s, p, o, m) BOOST_PP_FOR_6_C(BOOST_PP_BOOL(p(7, s)), s, p, o, m)\n# define BOOST_PP_FOR_7(s, p, o, m) BOOST_PP_FOR_7_C(BOOST_PP_BOOL(p(8, s)), s, p, o, m)\n# define BOOST_PP_FOR_8(s, p, o, m) BOOST_PP_FOR_8_C(BOOST_PP_BOOL(p(9, s)), s, p, o, m)\n# define BOOST_PP_FOR_9(s, p, o, m) BOOST_PP_FOR_9_C(BOOST_PP_BOOL(p(10, s)), s, p, o, m)\n# define BOOST_PP_FOR_10(s, p, o, m) BOOST_PP_FOR_10_C(BOOST_PP_BOOL(p(11, s)), s, p, o, m)\n# define BOOST_PP_FOR_11(s, p, o, m) BOOST_PP_FOR_11_C(BOOST_PP_BOOL(p(12, s)), s, p, o, m)\n# define BOOST_PP_FOR_12(s, p, o, m) BOOST_PP_FOR_12_C(BOOST_PP_BOOL(p(13, s)), s, p, o, m)\n# define BOOST_PP_FOR_13(s, p, o, m) BOOST_PP_FOR_13_C(BOOST_PP_BOOL(p(14, s)), s, p, o, m)\n# define BOOST_PP_FOR_14(s, p, o, m) BOOST_PP_FOR_14_C(BOOST_PP_BOOL(p(15, s)), s, p, o, m)\n# define BOOST_PP_FOR_15(s, p, o, m) BOOST_PP_FOR_15_C(BOOST_PP_BOOL(p(16, s)), s, p, o, m)\n# define BOOST_PP_FOR_16(s, p, o, m) BOOST_PP_FOR_16_C(BOOST_PP_BOOL(p(17, s)), s, p, o, m)\n# define BOOST_PP_FOR_17(s, p, o, m) BOOST_PP_FOR_17_C(BOOST_PP_BOOL(p(18, s)), s, p, o, m)\n# define BOOST_PP_FOR_18(s, p, o, m) BOOST_PP_FOR_18_C(BOOST_PP_BOOL(p(19, s)), s, p, o, m)\n# define BOOST_PP_FOR_19(s, p, o, m) BOOST_PP_FOR_19_C(BOOST_PP_BOOL(p(20, s)), s, p, o, m)\n# define BOOST_PP_FOR_20(s, p, o, m) BOOST_PP_FOR_20_C(BOOST_PP_BOOL(p(21, s)), s, p, o, m)\n# define BOOST_PP_FOR_21(s, p, o, m) BOOST_PP_FOR_21_C(BOOST_PP_BOOL(p(22, s)), s, p, o, m)\n# define BOOST_PP_FOR_22(s, p, o, m) BOOST_PP_FOR_22_C(BOOST_PP_BOOL(p(23, s)), s, p, o, m)\n# define BOOST_PP_FOR_23(s, p, o, m) BOOST_PP_FOR_23_C(BOOST_PP_BOOL(p(24, s)), s, p, o, m)\n# define BOOST_PP_FOR_24(s, p, o, m) BOOST_PP_FOR_24_C(BOOST_PP_BOOL(p(25, s)), s, p, o, m)\n# define BOOST_PP_FOR_25(s, p, o, m) BOOST_PP_FOR_25_C(BOOST_PP_BOOL(p(26, s)), s, p, o, m)\n# define BOOST_PP_FOR_26(s, p, o, m) BOOST_PP_FOR_26_C(BOOST_PP_BOOL(p(27, s)), s, p, o, m)\n# define BOOST_PP_FOR_27(s, p, o, m) BOOST_PP_FOR_27_C(BOOST_PP_BOOL(p(28, s)), s, p, o, m)\n# define BOOST_PP_FOR_28(s, p, o, m) BOOST_PP_FOR_28_C(BOOST_PP_BOOL(p(29, s)), s, p, o, m)\n# define BOOST_PP_FOR_29(s, p, o, m) BOOST_PP_FOR_29_C(BOOST_PP_BOOL(p(30, s)), s, p, o, m)\n# define BOOST_PP_FOR_30(s, p, o, m) BOOST_PP_FOR_30_C(BOOST_PP_BOOL(p(31, s)), s, p, o, m)\n# define BOOST_PP_FOR_31(s, p, o, m) BOOST_PP_FOR_31_C(BOOST_PP_BOOL(p(32, s)), s, p, o, m)\n# define BOOST_PP_FOR_32(s, p, o, m) BOOST_PP_FOR_32_C(BOOST_PP_BOOL(p(33, s)), s, p, o, m)\n# define BOOST_PP_FOR_33(s, p, o, m) BOOST_PP_FOR_33_C(BOOST_PP_BOOL(p(34, s)), s, p, o, m)\n# define BOOST_PP_FOR_34(s, p, o, m) BOOST_PP_FOR_34_C(BOOST_PP_BOOL(p(35, s)), s, p, o, m)\n# define BOOST_PP_FOR_35(s, p, o, m) BOOST_PP_FOR_35_C(BOOST_PP_BOOL(p(36, s)), s, p, o, m)\n# define BOOST_PP_FOR_36(s, p, o, m) BOOST_PP_FOR_36_C(BOOST_PP_BOOL(p(37, s)), s, p, o, m)\n# define BOOST_PP_FOR_37(s, p, o, m) BOOST_PP_FOR_37_C(BOOST_PP_BOOL(p(38, s)), s, p, o, m)\n# define BOOST_PP_FOR_38(s, p, o, m) BOOST_PP_FOR_38_C(BOOST_PP_BOOL(p(39, s)), s, p, o, m)\n# define BOOST_PP_FOR_39(s, p, o, m) BOOST_PP_FOR_39_C(BOOST_PP_BOOL(p(40, s)), s, p, o, m)\n# define BOOST_PP_FOR_40(s, p, o, m) BOOST_PP_FOR_40_C(BOOST_PP_BOOL(p(41, s)), s, p, o, m)\n# define BOOST_PP_FOR_41(s, p, o, m) BOOST_PP_FOR_41_C(BOOST_PP_BOOL(p(42, s)), s, p, o, m)\n# define BOOST_PP_FOR_42(s, p, o, m) BOOST_PP_FOR_42_C(BOOST_PP_BOOL(p(43, s)), s, p, o, m)\n# define BOOST_PP_FOR_43(s, p, o, m) BOOST_PP_FOR_43_C(BOOST_PP_BOOL(p(44, s)), s, p, o, m)\n# define BOOST_PP_FOR_44(s, p, o, m) BOOST_PP_FOR_44_C(BOOST_PP_BOOL(p(45, s)), s, p, o, m)\n# define BOOST_PP_FOR_45(s, p, o, m) BOOST_PP_FOR_45_C(BOOST_PP_BOOL(p(46, s)), s, p, o, m)\n# define BOOST_PP_FOR_46(s, p, o, m) BOOST_PP_FOR_46_C(BOOST_PP_BOOL(p(47, s)), s, p, o, m)\n# define BOOST_PP_FOR_47(s, p, o, m) BOOST_PP_FOR_47_C(BOOST_PP_BOOL(p(48, s)), s, p, o, m)\n# define BOOST_PP_FOR_48(s, p, o, m) BOOST_PP_FOR_48_C(BOOST_PP_BOOL(p(49, s)), s, p, o, m)\n# define BOOST_PP_FOR_49(s, p, o, m) BOOST_PP_FOR_49_C(BOOST_PP_BOOL(p(50, s)), s, p, o, m)\n# define BOOST_PP_FOR_50(s, p, o, m) BOOST_PP_FOR_50_C(BOOST_PP_BOOL(p(51, s)), s, p, o, m)\n# define BOOST_PP_FOR_51(s, p, o, m) BOOST_PP_FOR_51_C(BOOST_PP_BOOL(p(52, s)), s, p, o, m)\n# define BOOST_PP_FOR_52(s, p, o, m) BOOST_PP_FOR_52_C(BOOST_PP_BOOL(p(53, s)), s, p, o, m)\n# define BOOST_PP_FOR_53(s, p, o, m) BOOST_PP_FOR_53_C(BOOST_PP_BOOL(p(54, s)), s, p, o, m)\n# define BOOST_PP_FOR_54(s, p, o, m) BOOST_PP_FOR_54_C(BOOST_PP_BOOL(p(55, s)), s, p, o, m)\n# define BOOST_PP_FOR_55(s, p, o, m) BOOST_PP_FOR_55_C(BOOST_PP_BOOL(p(56, s)), s, p, o, m)\n# define BOOST_PP_FOR_56(s, p, o, m) BOOST_PP_FOR_56_C(BOOST_PP_BOOL(p(57, s)), s, p, o, m)\n# define BOOST_PP_FOR_57(s, p, o, m) BOOST_PP_FOR_57_C(BOOST_PP_BOOL(p(58, s)), s, p, o, m)\n# define BOOST_PP_FOR_58(s, p, o, m) BOOST_PP_FOR_58_C(BOOST_PP_BOOL(p(59, s)), s, p, o, m)\n# define BOOST_PP_FOR_59(s, p, o, m) BOOST_PP_FOR_59_C(BOOST_PP_BOOL(p(60, s)), s, p, o, m)\n# define BOOST_PP_FOR_60(s, p, o, m) BOOST_PP_FOR_60_C(BOOST_PP_BOOL(p(61, s)), s, p, o, m)\n# define BOOST_PP_FOR_61(s, p, o, m) BOOST_PP_FOR_61_C(BOOST_PP_BOOL(p(62, s)), s, p, o, m)\n# define BOOST_PP_FOR_62(s, p, o, m) BOOST_PP_FOR_62_C(BOOST_PP_BOOL(p(63, s)), s, p, o, m)\n# define BOOST_PP_FOR_63(s, p, o, m) BOOST_PP_FOR_63_C(BOOST_PP_BOOL(p(64, s)), s, p, o, m)\n# define BOOST_PP_FOR_64(s, p, o, m) BOOST_PP_FOR_64_C(BOOST_PP_BOOL(p(65, s)), s, p, o, m)\n# define BOOST_PP_FOR_65(s, p, o, m) BOOST_PP_FOR_65_C(BOOST_PP_BOOL(p(66, s)), s, p, o, m)\n# define BOOST_PP_FOR_66(s, p, o, m) BOOST_PP_FOR_66_C(BOOST_PP_BOOL(p(67, s)), s, p, o, m)\n# define BOOST_PP_FOR_67(s, p, o, m) BOOST_PP_FOR_67_C(BOOST_PP_BOOL(p(68, s)), s, p, o, m)\n# define BOOST_PP_FOR_68(s, p, o, m) BOOST_PP_FOR_68_C(BOOST_PP_BOOL(p(69, s)), s, p, o, m)\n# define BOOST_PP_FOR_69(s, p, o, m) BOOST_PP_FOR_69_C(BOOST_PP_BOOL(p(70, s)), s, p, o, m)\n# define BOOST_PP_FOR_70(s, p, o, m) BOOST_PP_FOR_70_C(BOOST_PP_BOOL(p(71, s)), s, p, o, m)\n# define BOOST_PP_FOR_71(s, p, o, m) BOOST_PP_FOR_71_C(BOOST_PP_BOOL(p(72, s)), s, p, o, m)\n# define BOOST_PP_FOR_72(s, p, o, m) BOOST_PP_FOR_72_C(BOOST_PP_BOOL(p(73, s)), s, p, o, m)\n# define BOOST_PP_FOR_73(s, p, o, m) BOOST_PP_FOR_73_C(BOOST_PP_BOOL(p(74, s)), s, p, o, m)\n# define BOOST_PP_FOR_74(s, p, o, m) BOOST_PP_FOR_74_C(BOOST_PP_BOOL(p(75, s)), s, p, o, m)\n# define BOOST_PP_FOR_75(s, p, o, m) BOOST_PP_FOR_75_C(BOOST_PP_BOOL(p(76, s)), s, p, o, m)\n# define BOOST_PP_FOR_76(s, p, o, m) BOOST_PP_FOR_76_C(BOOST_PP_BOOL(p(77, s)), s, p, o, m)\n# define BOOST_PP_FOR_77(s, p, o, m) BOOST_PP_FOR_77_C(BOOST_PP_BOOL(p(78, s)), s, p, o, m)\n# define BOOST_PP_FOR_78(s, p, o, m) BOOST_PP_FOR_78_C(BOOST_PP_BOOL(p(79, s)), s, p, o, m)\n# define BOOST_PP_FOR_79(s, p, o, m) BOOST_PP_FOR_79_C(BOOST_PP_BOOL(p(80, s)), s, p, o, m)\n# define BOOST_PP_FOR_80(s, p, o, m) BOOST_PP_FOR_80_C(BOOST_PP_BOOL(p(81, s)), s, p, o, m)\n# define BOOST_PP_FOR_81(s, p, o, m) BOOST_PP_FOR_81_C(BOOST_PP_BOOL(p(82, s)), s, p, o, m)\n# define BOOST_PP_FOR_82(s, p, o, m) BOOST_PP_FOR_82_C(BOOST_PP_BOOL(p(83, s)), s, p, o, m)\n# define BOOST_PP_FOR_83(s, p, o, m) BOOST_PP_FOR_83_C(BOOST_PP_BOOL(p(84, s)), s, p, o, m)\n# define BOOST_PP_FOR_84(s, p, o, m) BOOST_PP_FOR_84_C(BOOST_PP_BOOL(p(85, s)), s, p, o, m)\n# define BOOST_PP_FOR_85(s, p, o, m) BOOST_PP_FOR_85_C(BOOST_PP_BOOL(p(86, s)), s, p, o, m)\n# define BOOST_PP_FOR_86(s, p, o, m) BOOST_PP_FOR_86_C(BOOST_PP_BOOL(p(87, s)), s, p, o, m)\n# define BOOST_PP_FOR_87(s, p, o, m) BOOST_PP_FOR_87_C(BOOST_PP_BOOL(p(88, s)), s, p, o, m)\n# define BOOST_PP_FOR_88(s, p, o, m) BOOST_PP_FOR_88_C(BOOST_PP_BOOL(p(89, s)), s, p, o, m)\n# define BOOST_PP_FOR_89(s, p, o, m) BOOST_PP_FOR_89_C(BOOST_PP_BOOL(p(90, s)), s, p, o, m)\n# define BOOST_PP_FOR_90(s, p, o, m) BOOST_PP_FOR_90_C(BOOST_PP_BOOL(p(91, s)), s, p, o, m)\n# define BOOST_PP_FOR_91(s, p, o, m) BOOST_PP_FOR_91_C(BOOST_PP_BOOL(p(92, s)), s, p, o, m)\n# define BOOST_PP_FOR_92(s, p, o, m) BOOST_PP_FOR_92_C(BOOST_PP_BOOL(p(93, s)), s, p, o, m)\n# define BOOST_PP_FOR_93(s, p, o, m) BOOST_PP_FOR_93_C(BOOST_PP_BOOL(p(94, s)), s, p, o, m)\n# define BOOST_PP_FOR_94(s, p, o, m) BOOST_PP_FOR_94_C(BOOST_PP_BOOL(p(95, s)), s, p, o, m)\n# define BOOST_PP_FOR_95(s, p, o, m) BOOST_PP_FOR_95_C(BOOST_PP_BOOL(p(96, s)), s, p, o, m)\n# define BOOST_PP_FOR_96(s, p, o, m) BOOST_PP_FOR_96_C(BOOST_PP_BOOL(p(97, s)), s, p, o, m)\n# define BOOST_PP_FOR_97(s, p, o, m) BOOST_PP_FOR_97_C(BOOST_PP_BOOL(p(98, s)), s, p, o, m)\n# define BOOST_PP_FOR_98(s, p, o, m) BOOST_PP_FOR_98_C(BOOST_PP_BOOL(p(99, s)), s, p, o, m)\n# define BOOST_PP_FOR_99(s, p, o, m) BOOST_PP_FOR_99_C(BOOST_PP_BOOL(p(100, s)), s, p, o, m)\n# define BOOST_PP_FOR_100(s, p, o, m) BOOST_PP_FOR_100_C(BOOST_PP_BOOL(p(101, s)), s, p, o, m)\n# define BOOST_PP_FOR_101(s, p, o, m) BOOST_PP_FOR_101_C(BOOST_PP_BOOL(p(102, s)), s, p, o, m)\n# define BOOST_PP_FOR_102(s, p, o, m) BOOST_PP_FOR_102_C(BOOST_PP_BOOL(p(103, s)), s, p, o, m)\n# define BOOST_PP_FOR_103(s, p, o, m) BOOST_PP_FOR_103_C(BOOST_PP_BOOL(p(104, s)), s, p, o, m)\n# define BOOST_PP_FOR_104(s, p, o, m) BOOST_PP_FOR_104_C(BOOST_PP_BOOL(p(105, s)), s, p, o, m)\n# define BOOST_PP_FOR_105(s, p, o, m) BOOST_PP_FOR_105_C(BOOST_PP_BOOL(p(106, s)), s, p, o, m)\n# define BOOST_PP_FOR_106(s, p, o, m) BOOST_PP_FOR_106_C(BOOST_PP_BOOL(p(107, s)), s, p, o, m)\n# define BOOST_PP_FOR_107(s, p, o, m) BOOST_PP_FOR_107_C(BOOST_PP_BOOL(p(108, s)), s, p, o, m)\n# define BOOST_PP_FOR_108(s, p, o, m) BOOST_PP_FOR_108_C(BOOST_PP_BOOL(p(109, s)), s, p, o, m)\n# define BOOST_PP_FOR_109(s, p, o, m) BOOST_PP_FOR_109_C(BOOST_PP_BOOL(p(110, s)), s, p, o, m)\n# define BOOST_PP_FOR_110(s, p, o, m) BOOST_PP_FOR_110_C(BOOST_PP_BOOL(p(111, s)), s, p, o, m)\n# define BOOST_PP_FOR_111(s, p, o, m) BOOST_PP_FOR_111_C(BOOST_PP_BOOL(p(112, s)), s, p, o, m)\n# define BOOST_PP_FOR_112(s, p, o, m) BOOST_PP_FOR_112_C(BOOST_PP_BOOL(p(113, s)), s, p, o, m)\n# define BOOST_PP_FOR_113(s, p, o, m) BOOST_PP_FOR_113_C(BOOST_PP_BOOL(p(114, s)), s, p, o, m)\n# define BOOST_PP_FOR_114(s, p, o, m) BOOST_PP_FOR_114_C(BOOST_PP_BOOL(p(115, s)), s, p, o, m)\n# define BOOST_PP_FOR_115(s, p, o, m) BOOST_PP_FOR_115_C(BOOST_PP_BOOL(p(116, s)), s, p, o, m)\n# define BOOST_PP_FOR_116(s, p, o, m) BOOST_PP_FOR_116_C(BOOST_PP_BOOL(p(117, s)), s, p, o, m)\n# define BOOST_PP_FOR_117(s, p, o, m) BOOST_PP_FOR_117_C(BOOST_PP_BOOL(p(118, s)), s, p, o, m)\n# define BOOST_PP_FOR_118(s, p, o, m) BOOST_PP_FOR_118_C(BOOST_PP_BOOL(p(119, s)), s, p, o, m)\n# define BOOST_PP_FOR_119(s, p, o, m) BOOST_PP_FOR_119_C(BOOST_PP_BOOL(p(120, s)), s, p, o, m)\n# define BOOST_PP_FOR_120(s, p, o, m) BOOST_PP_FOR_120_C(BOOST_PP_BOOL(p(121, s)), s, p, o, m)\n# define BOOST_PP_FOR_121(s, p, o, m) BOOST_PP_FOR_121_C(BOOST_PP_BOOL(p(122, s)), s, p, o, m)\n# define BOOST_PP_FOR_122(s, p, o, m) BOOST_PP_FOR_122_C(BOOST_PP_BOOL(p(123, s)), s, p, o, m)\n# define BOOST_PP_FOR_123(s, p, o, m) BOOST_PP_FOR_123_C(BOOST_PP_BOOL(p(124, s)), s, p, o, m)\n# define BOOST_PP_FOR_124(s, p, o, m) BOOST_PP_FOR_124_C(BOOST_PP_BOOL(p(125, s)), s, p, o, m)\n# define BOOST_PP_FOR_125(s, p, o, m) BOOST_PP_FOR_125_C(BOOST_PP_BOOL(p(126, s)), s, p, o, m)\n# define BOOST_PP_FOR_126(s, p, o, m) BOOST_PP_FOR_126_C(BOOST_PP_BOOL(p(127, s)), s, p, o, m)\n# define BOOST_PP_FOR_127(s, p, o, m) BOOST_PP_FOR_127_C(BOOST_PP_BOOL(p(128, s)), s, p, o, m)\n# define BOOST_PP_FOR_128(s, p, o, m) BOOST_PP_FOR_128_C(BOOST_PP_BOOL(p(129, s)), s, p, o, m)\n# define BOOST_PP_FOR_129(s, p, o, m) BOOST_PP_FOR_129_C(BOOST_PP_BOOL(p(130, s)), s, p, o, m)\n# define BOOST_PP_FOR_130(s, p, o, m) BOOST_PP_FOR_130_C(BOOST_PP_BOOL(p(131, s)), s, p, o, m)\n# define BOOST_PP_FOR_131(s, p, o, m) BOOST_PP_FOR_131_C(BOOST_PP_BOOL(p(132, s)), s, p, o, m)\n# define BOOST_PP_FOR_132(s, p, o, m) BOOST_PP_FOR_132_C(BOOST_PP_BOOL(p(133, s)), s, p, o, m)\n# define BOOST_PP_FOR_133(s, p, o, m) BOOST_PP_FOR_133_C(BOOST_PP_BOOL(p(134, s)), s, p, o, m)\n# define BOOST_PP_FOR_134(s, p, o, m) BOOST_PP_FOR_134_C(BOOST_PP_BOOL(p(135, s)), s, p, o, m)\n# define BOOST_PP_FOR_135(s, p, o, m) BOOST_PP_FOR_135_C(BOOST_PP_BOOL(p(136, s)), s, p, o, m)\n# define BOOST_PP_FOR_136(s, p, o, m) BOOST_PP_FOR_136_C(BOOST_PP_BOOL(p(137, s)), s, p, o, m)\n# define BOOST_PP_FOR_137(s, p, o, m) BOOST_PP_FOR_137_C(BOOST_PP_BOOL(p(138, s)), s, p, o, m)\n# define BOOST_PP_FOR_138(s, p, o, m) BOOST_PP_FOR_138_C(BOOST_PP_BOOL(p(139, s)), s, p, o, m)\n# define BOOST_PP_FOR_139(s, p, o, m) BOOST_PP_FOR_139_C(BOOST_PP_BOOL(p(140, s)), s, p, o, m)\n# define BOOST_PP_FOR_140(s, p, o, m) BOOST_PP_FOR_140_C(BOOST_PP_BOOL(p(141, s)), s, p, o, m)\n# define BOOST_PP_FOR_141(s, p, o, m) BOOST_PP_FOR_141_C(BOOST_PP_BOOL(p(142, s)), s, p, o, m)\n# define BOOST_PP_FOR_142(s, p, o, m) BOOST_PP_FOR_142_C(BOOST_PP_BOOL(p(143, s)), s, p, o, m)\n# define BOOST_PP_FOR_143(s, p, o, m) BOOST_PP_FOR_143_C(BOOST_PP_BOOL(p(144, s)), s, p, o, m)\n# define BOOST_PP_FOR_144(s, p, o, m) BOOST_PP_FOR_144_C(BOOST_PP_BOOL(p(145, s)), s, p, o, m)\n# define BOOST_PP_FOR_145(s, p, o, m) BOOST_PP_FOR_145_C(BOOST_PP_BOOL(p(146, s)), s, p, o, m)\n# define BOOST_PP_FOR_146(s, p, o, m) BOOST_PP_FOR_146_C(BOOST_PP_BOOL(p(147, s)), s, p, o, m)\n# define BOOST_PP_FOR_147(s, p, o, m) BOOST_PP_FOR_147_C(BOOST_PP_BOOL(p(148, s)), s, p, o, m)\n# define BOOST_PP_FOR_148(s, p, o, m) BOOST_PP_FOR_148_C(BOOST_PP_BOOL(p(149, s)), s, p, o, m)\n# define BOOST_PP_FOR_149(s, p, o, m) BOOST_PP_FOR_149_C(BOOST_PP_BOOL(p(150, s)), s, p, o, m)\n# define BOOST_PP_FOR_150(s, p, o, m) BOOST_PP_FOR_150_C(BOOST_PP_BOOL(p(151, s)), s, p, o, m)\n# define BOOST_PP_FOR_151(s, p, o, m) BOOST_PP_FOR_151_C(BOOST_PP_BOOL(p(152, s)), s, p, o, m)\n# define BOOST_PP_FOR_152(s, p, o, m) BOOST_PP_FOR_152_C(BOOST_PP_BOOL(p(153, s)), s, p, o, m)\n# define BOOST_PP_FOR_153(s, p, o, m) BOOST_PP_FOR_153_C(BOOST_PP_BOOL(p(154, s)), s, p, o, m)\n# define BOOST_PP_FOR_154(s, p, o, m) BOOST_PP_FOR_154_C(BOOST_PP_BOOL(p(155, s)), s, p, o, m)\n# define BOOST_PP_FOR_155(s, p, o, m) BOOST_PP_FOR_155_C(BOOST_PP_BOOL(p(156, s)), s, p, o, m)\n# define BOOST_PP_FOR_156(s, p, o, m) BOOST_PP_FOR_156_C(BOOST_PP_BOOL(p(157, s)), s, p, o, m)\n# define BOOST_PP_FOR_157(s, p, o, m) BOOST_PP_FOR_157_C(BOOST_PP_BOOL(p(158, s)), s, p, o, m)\n# define BOOST_PP_FOR_158(s, p, o, m) BOOST_PP_FOR_158_C(BOOST_PP_BOOL(p(159, s)), s, p, o, m)\n# define BOOST_PP_FOR_159(s, p, o, m) BOOST_PP_FOR_159_C(BOOST_PP_BOOL(p(160, s)), s, p, o, m)\n# define BOOST_PP_FOR_160(s, p, o, m) BOOST_PP_FOR_160_C(BOOST_PP_BOOL(p(161, s)), s, p, o, m)\n# define BOOST_PP_FOR_161(s, p, o, m) BOOST_PP_FOR_161_C(BOOST_PP_BOOL(p(162, s)), s, p, o, m)\n# define BOOST_PP_FOR_162(s, p, o, m) BOOST_PP_FOR_162_C(BOOST_PP_BOOL(p(163, s)), s, p, o, m)\n# define BOOST_PP_FOR_163(s, p, o, m) BOOST_PP_FOR_163_C(BOOST_PP_BOOL(p(164, s)), s, p, o, m)\n# define BOOST_PP_FOR_164(s, p, o, m) BOOST_PP_FOR_164_C(BOOST_PP_BOOL(p(165, s)), s, p, o, m)\n# define BOOST_PP_FOR_165(s, p, o, m) BOOST_PP_FOR_165_C(BOOST_PP_BOOL(p(166, s)), s, p, o, m)\n# define BOOST_PP_FOR_166(s, p, o, m) BOOST_PP_FOR_166_C(BOOST_PP_BOOL(p(167, s)), s, p, o, m)\n# define BOOST_PP_FOR_167(s, p, o, m) BOOST_PP_FOR_167_C(BOOST_PP_BOOL(p(168, s)), s, p, o, m)\n# define BOOST_PP_FOR_168(s, p, o, m) BOOST_PP_FOR_168_C(BOOST_PP_BOOL(p(169, s)), s, p, o, m)\n# define BOOST_PP_FOR_169(s, p, o, m) BOOST_PP_FOR_169_C(BOOST_PP_BOOL(p(170, s)), s, p, o, m)\n# define BOOST_PP_FOR_170(s, p, o, m) BOOST_PP_FOR_170_C(BOOST_PP_BOOL(p(171, s)), s, p, o, m)\n# define BOOST_PP_FOR_171(s, p, o, m) BOOST_PP_FOR_171_C(BOOST_PP_BOOL(p(172, s)), s, p, o, m)\n# define BOOST_PP_FOR_172(s, p, o, m) BOOST_PP_FOR_172_C(BOOST_PP_BOOL(p(173, s)), s, p, o, m)\n# define BOOST_PP_FOR_173(s, p, o, m) BOOST_PP_FOR_173_C(BOOST_PP_BOOL(p(174, s)), s, p, o, m)\n# define BOOST_PP_FOR_174(s, p, o, m) BOOST_PP_FOR_174_C(BOOST_PP_BOOL(p(175, s)), s, p, o, m)\n# define BOOST_PP_FOR_175(s, p, o, m) BOOST_PP_FOR_175_C(BOOST_PP_BOOL(p(176, s)), s, p, o, m)\n# define BOOST_PP_FOR_176(s, p, o, m) BOOST_PP_FOR_176_C(BOOST_PP_BOOL(p(177, s)), s, p, o, m)\n# define BOOST_PP_FOR_177(s, p, o, m) BOOST_PP_FOR_177_C(BOOST_PP_BOOL(p(178, s)), s, p, o, m)\n# define BOOST_PP_FOR_178(s, p, o, m) BOOST_PP_FOR_178_C(BOOST_PP_BOOL(p(179, s)), s, p, o, m)\n# define BOOST_PP_FOR_179(s, p, o, m) BOOST_PP_FOR_179_C(BOOST_PP_BOOL(p(180, s)), s, p, o, m)\n# define BOOST_PP_FOR_180(s, p, o, m) BOOST_PP_FOR_180_C(BOOST_PP_BOOL(p(181, s)), s, p, o, m)\n# define BOOST_PP_FOR_181(s, p, o, m) BOOST_PP_FOR_181_C(BOOST_PP_BOOL(p(182, s)), s, p, o, m)\n# define BOOST_PP_FOR_182(s, p, o, m) BOOST_PP_FOR_182_C(BOOST_PP_BOOL(p(183, s)), s, p, o, m)\n# define BOOST_PP_FOR_183(s, p, o, m) BOOST_PP_FOR_183_C(BOOST_PP_BOOL(p(184, s)), s, p, o, m)\n# define BOOST_PP_FOR_184(s, p, o, m) BOOST_PP_FOR_184_C(BOOST_PP_BOOL(p(185, s)), s, p, o, m)\n# define BOOST_PP_FOR_185(s, p, o, m) BOOST_PP_FOR_185_C(BOOST_PP_BOOL(p(186, s)), s, p, o, m)\n# define BOOST_PP_FOR_186(s, p, o, m) BOOST_PP_FOR_186_C(BOOST_PP_BOOL(p(187, s)), s, p, o, m)\n# define BOOST_PP_FOR_187(s, p, o, m) BOOST_PP_FOR_187_C(BOOST_PP_BOOL(p(188, s)), s, p, o, m)\n# define BOOST_PP_FOR_188(s, p, o, m) BOOST_PP_FOR_188_C(BOOST_PP_BOOL(p(189, s)), s, p, o, m)\n# define BOOST_PP_FOR_189(s, p, o, m) BOOST_PP_FOR_189_C(BOOST_PP_BOOL(p(190, s)), s, p, o, m)\n# define BOOST_PP_FOR_190(s, p, o, m) BOOST_PP_FOR_190_C(BOOST_PP_BOOL(p(191, s)), s, p, o, m)\n# define BOOST_PP_FOR_191(s, p, o, m) BOOST_PP_FOR_191_C(BOOST_PP_BOOL(p(192, s)), s, p, o, m)\n# define BOOST_PP_FOR_192(s, p, o, m) BOOST_PP_FOR_192_C(BOOST_PP_BOOL(p(193, s)), s, p, o, m)\n# define BOOST_PP_FOR_193(s, p, o, m) BOOST_PP_FOR_193_C(BOOST_PP_BOOL(p(194, s)), s, p, o, m)\n# define BOOST_PP_FOR_194(s, p, o, m) BOOST_PP_FOR_194_C(BOOST_PP_BOOL(p(195, s)), s, p, o, m)\n# define BOOST_PP_FOR_195(s, p, o, m) BOOST_PP_FOR_195_C(BOOST_PP_BOOL(p(196, s)), s, p, o, m)\n# define BOOST_PP_FOR_196(s, p, o, m) BOOST_PP_FOR_196_C(BOOST_PP_BOOL(p(197, s)), s, p, o, m)\n# define BOOST_PP_FOR_197(s, p, o, m) BOOST_PP_FOR_197_C(BOOST_PP_BOOL(p(198, s)), s, p, o, m)\n# define BOOST_PP_FOR_198(s, p, o, m) BOOST_PP_FOR_198_C(BOOST_PP_BOOL(p(199, s)), s, p, o, m)\n# define BOOST_PP_FOR_199(s, p, o, m) BOOST_PP_FOR_199_C(BOOST_PP_BOOL(p(200, s)), s, p, o, m)\n# define BOOST_PP_FOR_200(s, p, o, m) BOOST_PP_FOR_200_C(BOOST_PP_BOOL(p(201, s)), s, p, o, m)\n# define BOOST_PP_FOR_201(s, p, o, m) BOOST_PP_FOR_201_C(BOOST_PP_BOOL(p(202, s)), s, p, o, m)\n# define BOOST_PP_FOR_202(s, p, o, m) BOOST_PP_FOR_202_C(BOOST_PP_BOOL(p(203, s)), s, p, o, m)\n# define BOOST_PP_FOR_203(s, p, o, m) BOOST_PP_FOR_203_C(BOOST_PP_BOOL(p(204, s)), s, p, o, m)\n# define BOOST_PP_FOR_204(s, p, o, m) BOOST_PP_FOR_204_C(BOOST_PP_BOOL(p(205, s)), s, p, o, m)\n# define BOOST_PP_FOR_205(s, p, o, m) BOOST_PP_FOR_205_C(BOOST_PP_BOOL(p(206, s)), s, p, o, m)\n# define BOOST_PP_FOR_206(s, p, o, m) BOOST_PP_FOR_206_C(BOOST_PP_BOOL(p(207, s)), s, p, o, m)\n# define BOOST_PP_FOR_207(s, p, o, m) BOOST_PP_FOR_207_C(BOOST_PP_BOOL(p(208, s)), s, p, o, m)\n# define BOOST_PP_FOR_208(s, p, o, m) BOOST_PP_FOR_208_C(BOOST_PP_BOOL(p(209, s)), s, p, o, m)\n# define BOOST_PP_FOR_209(s, p, o, m) BOOST_PP_FOR_209_C(BOOST_PP_BOOL(p(210, s)), s, p, o, m)\n# define BOOST_PP_FOR_210(s, p, o, m) BOOST_PP_FOR_210_C(BOOST_PP_BOOL(p(211, s)), s, p, o, m)\n# define BOOST_PP_FOR_211(s, p, o, m) BOOST_PP_FOR_211_C(BOOST_PP_BOOL(p(212, s)), s, p, o, m)\n# define BOOST_PP_FOR_212(s, p, o, m) BOOST_PP_FOR_212_C(BOOST_PP_BOOL(p(213, s)), s, p, o, m)\n# define BOOST_PP_FOR_213(s, p, o, m) BOOST_PP_FOR_213_C(BOOST_PP_BOOL(p(214, s)), s, p, o, m)\n# define BOOST_PP_FOR_214(s, p, o, m) BOOST_PP_FOR_214_C(BOOST_PP_BOOL(p(215, s)), s, p, o, m)\n# define BOOST_PP_FOR_215(s, p, o, m) BOOST_PP_FOR_215_C(BOOST_PP_BOOL(p(216, s)), s, p, o, m)\n# define BOOST_PP_FOR_216(s, p, o, m) BOOST_PP_FOR_216_C(BOOST_PP_BOOL(p(217, s)), s, p, o, m)\n# define BOOST_PP_FOR_217(s, p, o, m) BOOST_PP_FOR_217_C(BOOST_PP_BOOL(p(218, s)), s, p, o, m)\n# define BOOST_PP_FOR_218(s, p, o, m) BOOST_PP_FOR_218_C(BOOST_PP_BOOL(p(219, s)), s, p, o, m)\n# define BOOST_PP_FOR_219(s, p, o, m) BOOST_PP_FOR_219_C(BOOST_PP_BOOL(p(220, s)), s, p, o, m)\n# define BOOST_PP_FOR_220(s, p, o, m) BOOST_PP_FOR_220_C(BOOST_PP_BOOL(p(221, s)), s, p, o, m)\n# define BOOST_PP_FOR_221(s, p, o, m) BOOST_PP_FOR_221_C(BOOST_PP_BOOL(p(222, s)), s, p, o, m)\n# define BOOST_PP_FOR_222(s, p, o, m) BOOST_PP_FOR_222_C(BOOST_PP_BOOL(p(223, s)), s, p, o, m)\n# define BOOST_PP_FOR_223(s, p, o, m) BOOST_PP_FOR_223_C(BOOST_PP_BOOL(p(224, s)), s, p, o, m)\n# define BOOST_PP_FOR_224(s, p, o, m) BOOST_PP_FOR_224_C(BOOST_PP_BOOL(p(225, s)), s, p, o, m)\n# define BOOST_PP_FOR_225(s, p, o, m) BOOST_PP_FOR_225_C(BOOST_PP_BOOL(p(226, s)), s, p, o, m)\n# define BOOST_PP_FOR_226(s, p, o, m) BOOST_PP_FOR_226_C(BOOST_PP_BOOL(p(227, s)), s, p, o, m)\n# define BOOST_PP_FOR_227(s, p, o, m) BOOST_PP_FOR_227_C(BOOST_PP_BOOL(p(228, s)), s, p, o, m)\n# define BOOST_PP_FOR_228(s, p, o, m) BOOST_PP_FOR_228_C(BOOST_PP_BOOL(p(229, s)), s, p, o, m)\n# define BOOST_PP_FOR_229(s, p, o, m) BOOST_PP_FOR_229_C(BOOST_PP_BOOL(p(230, s)), s, p, o, m)\n# define BOOST_PP_FOR_230(s, p, o, m) BOOST_PP_FOR_230_C(BOOST_PP_BOOL(p(231, s)), s, p, o, m)\n# define BOOST_PP_FOR_231(s, p, o, m) BOOST_PP_FOR_231_C(BOOST_PP_BOOL(p(232, s)), s, p, o, m)\n# define BOOST_PP_FOR_232(s, p, o, m) BOOST_PP_FOR_232_C(BOOST_PP_BOOL(p(233, s)), s, p, o, m)\n# define BOOST_PP_FOR_233(s, p, o, m) BOOST_PP_FOR_233_C(BOOST_PP_BOOL(p(234, s)), s, p, o, m)\n# define BOOST_PP_FOR_234(s, p, o, m) BOOST_PP_FOR_234_C(BOOST_PP_BOOL(p(235, s)), s, p, o, m)\n# define BOOST_PP_FOR_235(s, p, o, m) BOOST_PP_FOR_235_C(BOOST_PP_BOOL(p(236, s)), s, p, o, m)\n# define BOOST_PP_FOR_236(s, p, o, m) BOOST_PP_FOR_236_C(BOOST_PP_BOOL(p(237, s)), s, p, o, m)\n# define BOOST_PP_FOR_237(s, p, o, m) BOOST_PP_FOR_237_C(BOOST_PP_BOOL(p(238, s)), s, p, o, m)\n# define BOOST_PP_FOR_238(s, p, o, m) BOOST_PP_FOR_238_C(BOOST_PP_BOOL(p(239, s)), s, p, o, m)\n# define BOOST_PP_FOR_239(s, p, o, m) BOOST_PP_FOR_239_C(BOOST_PP_BOOL(p(240, s)), s, p, o, m)\n# define BOOST_PP_FOR_240(s, p, o, m) BOOST_PP_FOR_240_C(BOOST_PP_BOOL(p(241, s)), s, p, o, m)\n# define BOOST_PP_FOR_241(s, p, o, m) BOOST_PP_FOR_241_C(BOOST_PP_BOOL(p(242, s)), s, p, o, m)\n# define BOOST_PP_FOR_242(s, p, o, m) BOOST_PP_FOR_242_C(BOOST_PP_BOOL(p(243, s)), s, p, o, m)\n# define BOOST_PP_FOR_243(s, p, o, m) BOOST_PP_FOR_243_C(BOOST_PP_BOOL(p(244, s)), s, p, o, m)\n# define BOOST_PP_FOR_244(s, p, o, m) BOOST_PP_FOR_244_C(BOOST_PP_BOOL(p(245, s)), s, p, o, m)\n# define BOOST_PP_FOR_245(s, p, o, m) BOOST_PP_FOR_245_C(BOOST_PP_BOOL(p(246, s)), s, p, o, m)\n# define BOOST_PP_FOR_246(s, p, o, m) BOOST_PP_FOR_246_C(BOOST_PP_BOOL(p(247, s)), s, p, o, m)\n# define BOOST_PP_FOR_247(s, p, o, m) BOOST_PP_FOR_247_C(BOOST_PP_BOOL(p(248, s)), s, p, o, m)\n# define BOOST_PP_FOR_248(s, p, o, m) BOOST_PP_FOR_248_C(BOOST_PP_BOOL(p(249, s)), s, p, o, m)\n# define BOOST_PP_FOR_249(s, p, o, m) BOOST_PP_FOR_249_C(BOOST_PP_BOOL(p(250, s)), s, p, o, m)\n# define BOOST_PP_FOR_250(s, p, o, m) BOOST_PP_FOR_250_C(BOOST_PP_BOOL(p(251, s)), s, p, o, m)\n# define BOOST_PP_FOR_251(s, p, o, m) BOOST_PP_FOR_251_C(BOOST_PP_BOOL(p(252, s)), s, p, o, m)\n# define BOOST_PP_FOR_252(s, p, o, m) BOOST_PP_FOR_252_C(BOOST_PP_BOOL(p(253, s)), s, p, o, m)\n# define BOOST_PP_FOR_253(s, p, o, m) BOOST_PP_FOR_253_C(BOOST_PP_BOOL(p(254, s)), s, p, o, m)\n# define BOOST_PP_FOR_254(s, p, o, m) BOOST_PP_FOR_254_C(BOOST_PP_BOOL(p(255, s)), s, p, o, m)\n# define BOOST_PP_FOR_255(s, p, o, m) BOOST_PP_FOR_255_C(BOOST_PP_BOOL(p(256, s)), s, p, o, m)\n# define BOOST_PP_FOR_256(s, p, o, m) BOOST_PP_FOR_256_C(BOOST_PP_BOOL(p(257, s)), s, p, o, m)\n#\n# define BOOST_PP_FOR_1_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(2, s) BOOST_PP_IIF(c, BOOST_PP_FOR_2, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(2, s), p, o, m)\n# define BOOST_PP_FOR_2_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(3, s) BOOST_PP_IIF(c, BOOST_PP_FOR_3, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(3, s), p, o, m)\n# define BOOST_PP_FOR_3_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(4, s) BOOST_PP_IIF(c, BOOST_PP_FOR_4, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(4, s), p, o, m)\n# define BOOST_PP_FOR_4_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(5, s) BOOST_PP_IIF(c, BOOST_PP_FOR_5, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(5, s), p, o, m)\n# define BOOST_PP_FOR_5_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(6, s) BOOST_PP_IIF(c, BOOST_PP_FOR_6, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(6, s), p, o, m)\n# define BOOST_PP_FOR_6_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(7, s) BOOST_PP_IIF(c, BOOST_PP_FOR_7, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(7, s), p, o, m)\n# define BOOST_PP_FOR_7_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(8, s) BOOST_PP_IIF(c, BOOST_PP_FOR_8, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(8, s), p, o, m)\n# define BOOST_PP_FOR_8_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(9, s) BOOST_PP_IIF(c, BOOST_PP_FOR_9, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(9, s), p, o, m)\n# define BOOST_PP_FOR_9_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(10, s) BOOST_PP_IIF(c, BOOST_PP_FOR_10, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(10, s), p, o, m)\n# define BOOST_PP_FOR_10_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(11, s) BOOST_PP_IIF(c, BOOST_PP_FOR_11, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(11, s), p, o, m)\n# define BOOST_PP_FOR_11_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(12, s) BOOST_PP_IIF(c, BOOST_PP_FOR_12, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(12, s), p, o, m)\n# define BOOST_PP_FOR_12_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(13, s) BOOST_PP_IIF(c, BOOST_PP_FOR_13, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(13, s), p, o, m)\n# define BOOST_PP_FOR_13_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(14, s) BOOST_PP_IIF(c, BOOST_PP_FOR_14, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(14, s), p, o, m)\n# define BOOST_PP_FOR_14_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(15, s) BOOST_PP_IIF(c, BOOST_PP_FOR_15, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(15, s), p, o, m)\n# define BOOST_PP_FOR_15_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(16, s) BOOST_PP_IIF(c, BOOST_PP_FOR_16, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(16, s), p, o, m)\n# define BOOST_PP_FOR_16_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(17, s) BOOST_PP_IIF(c, BOOST_PP_FOR_17, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(17, s), p, o, m)\n# define BOOST_PP_FOR_17_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(18, s) BOOST_PP_IIF(c, BOOST_PP_FOR_18, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(18, s), p, o, m)\n# define BOOST_PP_FOR_18_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(19, s) BOOST_PP_IIF(c, BOOST_PP_FOR_19, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(19, s), p, o, m)\n# define BOOST_PP_FOR_19_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(20, s) BOOST_PP_IIF(c, BOOST_PP_FOR_20, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(20, s), p, o, m)\n# define BOOST_PP_FOR_20_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(21, s) BOOST_PP_IIF(c, BOOST_PP_FOR_21, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(21, s), p, o, m)\n# define BOOST_PP_FOR_21_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(22, s) BOOST_PP_IIF(c, BOOST_PP_FOR_22, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(22, s), p, o, m)\n# define BOOST_PP_FOR_22_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(23, s) BOOST_PP_IIF(c, BOOST_PP_FOR_23, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(23, s), p, o, m)\n# define BOOST_PP_FOR_23_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(24, s) BOOST_PP_IIF(c, BOOST_PP_FOR_24, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(24, s), p, o, m)\n# define BOOST_PP_FOR_24_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(25, s) BOOST_PP_IIF(c, BOOST_PP_FOR_25, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(25, s), p, o, m)\n# define BOOST_PP_FOR_25_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(26, s) BOOST_PP_IIF(c, BOOST_PP_FOR_26, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(26, s), p, o, m)\n# define BOOST_PP_FOR_26_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(27, s) BOOST_PP_IIF(c, BOOST_PP_FOR_27, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(27, s), p, o, m)\n# define BOOST_PP_FOR_27_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(28, s) BOOST_PP_IIF(c, BOOST_PP_FOR_28, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(28, s), p, o, m)\n# define BOOST_PP_FOR_28_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(29, s) BOOST_PP_IIF(c, BOOST_PP_FOR_29, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(29, s), p, o, m)\n# define BOOST_PP_FOR_29_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(30, s) BOOST_PP_IIF(c, BOOST_PP_FOR_30, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(30, s), p, o, m)\n# define BOOST_PP_FOR_30_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(31, s) BOOST_PP_IIF(c, BOOST_PP_FOR_31, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(31, s), p, o, m)\n# define BOOST_PP_FOR_31_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(32, s) BOOST_PP_IIF(c, BOOST_PP_FOR_32, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(32, s), p, o, m)\n# define BOOST_PP_FOR_32_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(33, s) BOOST_PP_IIF(c, BOOST_PP_FOR_33, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(33, s), p, o, m)\n# define BOOST_PP_FOR_33_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(34, s) BOOST_PP_IIF(c, BOOST_PP_FOR_34, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(34, s), p, o, m)\n# define BOOST_PP_FOR_34_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(35, s) BOOST_PP_IIF(c, BOOST_PP_FOR_35, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(35, s), p, o, m)\n# define BOOST_PP_FOR_35_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(36, s) BOOST_PP_IIF(c, BOOST_PP_FOR_36, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(36, s), p, o, m)\n# define BOOST_PP_FOR_36_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(37, s) BOOST_PP_IIF(c, BOOST_PP_FOR_37, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(37, s), p, o, m)\n# define BOOST_PP_FOR_37_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(38, s) BOOST_PP_IIF(c, BOOST_PP_FOR_38, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(38, s), p, o, m)\n# define BOOST_PP_FOR_38_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(39, s) BOOST_PP_IIF(c, BOOST_PP_FOR_39, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(39, s), p, o, m)\n# define BOOST_PP_FOR_39_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(40, s) BOOST_PP_IIF(c, BOOST_PP_FOR_40, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(40, s), p, o, m)\n# define BOOST_PP_FOR_40_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(41, s) BOOST_PP_IIF(c, BOOST_PP_FOR_41, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(41, s), p, o, m)\n# define BOOST_PP_FOR_41_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(42, s) BOOST_PP_IIF(c, BOOST_PP_FOR_42, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(42, s), p, o, m)\n# define BOOST_PP_FOR_42_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(43, s) BOOST_PP_IIF(c, BOOST_PP_FOR_43, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(43, s), p, o, m)\n# define BOOST_PP_FOR_43_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(44, s) BOOST_PP_IIF(c, BOOST_PP_FOR_44, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(44, s), p, o, m)\n# define BOOST_PP_FOR_44_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(45, s) BOOST_PP_IIF(c, BOOST_PP_FOR_45, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(45, s), p, o, m)\n# define BOOST_PP_FOR_45_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(46, s) BOOST_PP_IIF(c, BOOST_PP_FOR_46, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(46, s), p, o, m)\n# define BOOST_PP_FOR_46_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(47, s) BOOST_PP_IIF(c, BOOST_PP_FOR_47, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(47, s), p, o, m)\n# define BOOST_PP_FOR_47_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(48, s) BOOST_PP_IIF(c, BOOST_PP_FOR_48, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(48, s), p, o, m)\n# define BOOST_PP_FOR_48_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(49, s) BOOST_PP_IIF(c, BOOST_PP_FOR_49, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(49, s), p, o, m)\n# define BOOST_PP_FOR_49_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(50, s) BOOST_PP_IIF(c, BOOST_PP_FOR_50, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(50, s), p, o, m)\n# define BOOST_PP_FOR_50_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(51, s) BOOST_PP_IIF(c, BOOST_PP_FOR_51, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(51, s), p, o, m)\n# define BOOST_PP_FOR_51_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(52, s) BOOST_PP_IIF(c, BOOST_PP_FOR_52, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(52, s), p, o, m)\n# define BOOST_PP_FOR_52_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(53, s) BOOST_PP_IIF(c, BOOST_PP_FOR_53, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(53, s), p, o, m)\n# define BOOST_PP_FOR_53_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(54, s) BOOST_PP_IIF(c, BOOST_PP_FOR_54, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(54, s), p, o, m)\n# define BOOST_PP_FOR_54_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(55, s) BOOST_PP_IIF(c, BOOST_PP_FOR_55, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(55, s), p, o, m)\n# define BOOST_PP_FOR_55_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(56, s) BOOST_PP_IIF(c, BOOST_PP_FOR_56, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(56, s), p, o, m)\n# define BOOST_PP_FOR_56_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(57, s) BOOST_PP_IIF(c, BOOST_PP_FOR_57, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(57, s), p, o, m)\n# define BOOST_PP_FOR_57_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(58, s) BOOST_PP_IIF(c, BOOST_PP_FOR_58, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(58, s), p, o, m)\n# define BOOST_PP_FOR_58_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(59, s) BOOST_PP_IIF(c, BOOST_PP_FOR_59, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(59, s), p, o, m)\n# define BOOST_PP_FOR_59_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(60, s) BOOST_PP_IIF(c, BOOST_PP_FOR_60, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(60, s), p, o, m)\n# define BOOST_PP_FOR_60_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(61, s) BOOST_PP_IIF(c, BOOST_PP_FOR_61, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(61, s), p, o, m)\n# define BOOST_PP_FOR_61_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(62, s) BOOST_PP_IIF(c, BOOST_PP_FOR_62, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(62, s), p, o, m)\n# define BOOST_PP_FOR_62_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(63, s) BOOST_PP_IIF(c, BOOST_PP_FOR_63, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(63, s), p, o, m)\n# define BOOST_PP_FOR_63_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(64, s) BOOST_PP_IIF(c, BOOST_PP_FOR_64, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(64, s), p, o, m)\n# define BOOST_PP_FOR_64_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(65, s) BOOST_PP_IIF(c, BOOST_PP_FOR_65, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(65, s), p, o, m)\n# define BOOST_PP_FOR_65_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(66, s) BOOST_PP_IIF(c, BOOST_PP_FOR_66, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(66, s), p, o, m)\n# define BOOST_PP_FOR_66_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(67, s) BOOST_PP_IIF(c, BOOST_PP_FOR_67, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(67, s), p, o, m)\n# define BOOST_PP_FOR_67_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(68, s) BOOST_PP_IIF(c, BOOST_PP_FOR_68, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(68, s), p, o, m)\n# define BOOST_PP_FOR_68_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(69, s) BOOST_PP_IIF(c, BOOST_PP_FOR_69, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(69, s), p, o, m)\n# define BOOST_PP_FOR_69_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(70, s) BOOST_PP_IIF(c, BOOST_PP_FOR_70, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(70, s), p, o, m)\n# define BOOST_PP_FOR_70_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(71, s) BOOST_PP_IIF(c, BOOST_PP_FOR_71, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(71, s), p, o, m)\n# define BOOST_PP_FOR_71_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(72, s) BOOST_PP_IIF(c, BOOST_PP_FOR_72, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(72, s), p, o, m)\n# define BOOST_PP_FOR_72_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(73, s) BOOST_PP_IIF(c, BOOST_PP_FOR_73, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(73, s), p, o, m)\n# define BOOST_PP_FOR_73_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(74, s) BOOST_PP_IIF(c, BOOST_PP_FOR_74, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(74, s), p, o, m)\n# define BOOST_PP_FOR_74_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(75, s) BOOST_PP_IIF(c, BOOST_PP_FOR_75, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(75, s), p, o, m)\n# define BOOST_PP_FOR_75_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(76, s) BOOST_PP_IIF(c, BOOST_PP_FOR_76, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(76, s), p, o, m)\n# define BOOST_PP_FOR_76_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(77, s) BOOST_PP_IIF(c, BOOST_PP_FOR_77, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(77, s), p, o, m)\n# define BOOST_PP_FOR_77_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(78, s) BOOST_PP_IIF(c, BOOST_PP_FOR_78, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(78, s), p, o, m)\n# define BOOST_PP_FOR_78_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(79, s) BOOST_PP_IIF(c, BOOST_PP_FOR_79, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(79, s), p, o, m)\n# define BOOST_PP_FOR_79_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(80, s) BOOST_PP_IIF(c, BOOST_PP_FOR_80, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(80, s), p, o, m)\n# define BOOST_PP_FOR_80_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(81, s) BOOST_PP_IIF(c, BOOST_PP_FOR_81, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(81, s), p, o, m)\n# define BOOST_PP_FOR_81_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(82, s) BOOST_PP_IIF(c, BOOST_PP_FOR_82, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(82, s), p, o, m)\n# define BOOST_PP_FOR_82_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(83, s) BOOST_PP_IIF(c, BOOST_PP_FOR_83, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(83, s), p, o, m)\n# define BOOST_PP_FOR_83_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(84, s) BOOST_PP_IIF(c, BOOST_PP_FOR_84, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(84, s), p, o, m)\n# define BOOST_PP_FOR_84_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(85, s) BOOST_PP_IIF(c, BOOST_PP_FOR_85, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(85, s), p, o, m)\n# define BOOST_PP_FOR_85_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(86, s) BOOST_PP_IIF(c, BOOST_PP_FOR_86, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(86, s), p, o, m)\n# define BOOST_PP_FOR_86_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(87, s) BOOST_PP_IIF(c, BOOST_PP_FOR_87, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(87, s), p, o, m)\n# define BOOST_PP_FOR_87_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(88, s) BOOST_PP_IIF(c, BOOST_PP_FOR_88, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(88, s), p, o, m)\n# define BOOST_PP_FOR_88_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(89, s) BOOST_PP_IIF(c, BOOST_PP_FOR_89, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(89, s), p, o, m)\n# define BOOST_PP_FOR_89_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(90, s) BOOST_PP_IIF(c, BOOST_PP_FOR_90, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(90, s), p, o, m)\n# define BOOST_PP_FOR_90_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(91, s) BOOST_PP_IIF(c, BOOST_PP_FOR_91, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(91, s), p, o, m)\n# define BOOST_PP_FOR_91_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(92, s) BOOST_PP_IIF(c, BOOST_PP_FOR_92, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(92, s), p, o, m)\n# define BOOST_PP_FOR_92_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(93, s) BOOST_PP_IIF(c, BOOST_PP_FOR_93, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(93, s), p, o, m)\n# define BOOST_PP_FOR_93_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(94, s) BOOST_PP_IIF(c, BOOST_PP_FOR_94, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(94, s), p, o, m)\n# define BOOST_PP_FOR_94_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(95, s) BOOST_PP_IIF(c, BOOST_PP_FOR_95, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(95, s), p, o, m)\n# define BOOST_PP_FOR_95_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(96, s) BOOST_PP_IIF(c, BOOST_PP_FOR_96, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(96, s), p, o, m)\n# define BOOST_PP_FOR_96_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(97, s) BOOST_PP_IIF(c, BOOST_PP_FOR_97, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(97, s), p, o, m)\n# define BOOST_PP_FOR_97_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(98, s) BOOST_PP_IIF(c, BOOST_PP_FOR_98, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(98, s), p, o, m)\n# define BOOST_PP_FOR_98_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(99, s) BOOST_PP_IIF(c, BOOST_PP_FOR_99, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(99, s), p, o, m)\n# define BOOST_PP_FOR_99_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(100, s) BOOST_PP_IIF(c, BOOST_PP_FOR_100, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(100, s), p, o, m)\n# define BOOST_PP_FOR_100_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(101, s) BOOST_PP_IIF(c, BOOST_PP_FOR_101, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(101, s), p, o, m)\n# define BOOST_PP_FOR_101_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(102, s) BOOST_PP_IIF(c, BOOST_PP_FOR_102, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(102, s), p, o, m)\n# define BOOST_PP_FOR_102_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(103, s) BOOST_PP_IIF(c, BOOST_PP_FOR_103, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(103, s), p, o, m)\n# define BOOST_PP_FOR_103_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(104, s) BOOST_PP_IIF(c, BOOST_PP_FOR_104, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(104, s), p, o, m)\n# define BOOST_PP_FOR_104_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(105, s) BOOST_PP_IIF(c, BOOST_PP_FOR_105, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(105, s), p, o, m)\n# define BOOST_PP_FOR_105_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(106, s) BOOST_PP_IIF(c, BOOST_PP_FOR_106, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(106, s), p, o, m)\n# define BOOST_PP_FOR_106_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(107, s) BOOST_PP_IIF(c, BOOST_PP_FOR_107, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(107, s), p, o, m)\n# define BOOST_PP_FOR_107_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(108, s) BOOST_PP_IIF(c, BOOST_PP_FOR_108, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(108, s), p, o, m)\n# define BOOST_PP_FOR_108_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(109, s) BOOST_PP_IIF(c, BOOST_PP_FOR_109, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(109, s), p, o, m)\n# define BOOST_PP_FOR_109_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(110, s) BOOST_PP_IIF(c, BOOST_PP_FOR_110, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(110, s), p, o, m)\n# define BOOST_PP_FOR_110_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(111, s) BOOST_PP_IIF(c, BOOST_PP_FOR_111, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(111, s), p, o, m)\n# define BOOST_PP_FOR_111_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(112, s) BOOST_PP_IIF(c, BOOST_PP_FOR_112, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(112, s), p, o, m)\n# define BOOST_PP_FOR_112_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(113, s) BOOST_PP_IIF(c, BOOST_PP_FOR_113, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(113, s), p, o, m)\n# define BOOST_PP_FOR_113_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(114, s) BOOST_PP_IIF(c, BOOST_PP_FOR_114, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(114, s), p, o, m)\n# define BOOST_PP_FOR_114_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(115, s) BOOST_PP_IIF(c, BOOST_PP_FOR_115, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(115, s), p, o, m)\n# define BOOST_PP_FOR_115_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(116, s) BOOST_PP_IIF(c, BOOST_PP_FOR_116, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(116, s), p, o, m)\n# define BOOST_PP_FOR_116_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(117, s) BOOST_PP_IIF(c, BOOST_PP_FOR_117, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(117, s), p, o, m)\n# define BOOST_PP_FOR_117_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(118, s) BOOST_PP_IIF(c, BOOST_PP_FOR_118, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(118, s), p, o, m)\n# define BOOST_PP_FOR_118_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(119, s) BOOST_PP_IIF(c, BOOST_PP_FOR_119, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(119, s), p, o, m)\n# define BOOST_PP_FOR_119_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(120, s) BOOST_PP_IIF(c, BOOST_PP_FOR_120, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(120, s), p, o, m)\n# define BOOST_PP_FOR_120_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(121, s) BOOST_PP_IIF(c, BOOST_PP_FOR_121, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(121, s), p, o, m)\n# define BOOST_PP_FOR_121_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(122, s) BOOST_PP_IIF(c, BOOST_PP_FOR_122, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(122, s), p, o, m)\n# define BOOST_PP_FOR_122_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(123, s) BOOST_PP_IIF(c, BOOST_PP_FOR_123, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(123, s), p, o, m)\n# define BOOST_PP_FOR_123_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(124, s) BOOST_PP_IIF(c, BOOST_PP_FOR_124, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(124, s), p, o, m)\n# define BOOST_PP_FOR_124_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(125, s) BOOST_PP_IIF(c, BOOST_PP_FOR_125, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(125, s), p, o, m)\n# define BOOST_PP_FOR_125_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(126, s) BOOST_PP_IIF(c, BOOST_PP_FOR_126, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(126, s), p, o, m)\n# define BOOST_PP_FOR_126_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(127, s) BOOST_PP_IIF(c, BOOST_PP_FOR_127, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(127, s), p, o, m)\n# define BOOST_PP_FOR_127_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(128, s) BOOST_PP_IIF(c, BOOST_PP_FOR_128, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(128, s), p, o, m)\n# define BOOST_PP_FOR_128_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(129, s) BOOST_PP_IIF(c, BOOST_PP_FOR_129, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(129, s), p, o, m)\n# define BOOST_PP_FOR_129_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(130, s) BOOST_PP_IIF(c, BOOST_PP_FOR_130, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(130, s), p, o, m)\n# define BOOST_PP_FOR_130_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(131, s) BOOST_PP_IIF(c, BOOST_PP_FOR_131, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(131, s), p, o, m)\n# define BOOST_PP_FOR_131_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(132, s) BOOST_PP_IIF(c, BOOST_PP_FOR_132, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(132, s), p, o, m)\n# define BOOST_PP_FOR_132_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(133, s) BOOST_PP_IIF(c, BOOST_PP_FOR_133, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(133, s), p, o, m)\n# define BOOST_PP_FOR_133_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(134, s) BOOST_PP_IIF(c, BOOST_PP_FOR_134, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(134, s), p, o, m)\n# define BOOST_PP_FOR_134_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(135, s) BOOST_PP_IIF(c, BOOST_PP_FOR_135, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(135, s), p, o, m)\n# define BOOST_PP_FOR_135_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(136, s) BOOST_PP_IIF(c, BOOST_PP_FOR_136, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(136, s), p, o, m)\n# define BOOST_PP_FOR_136_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(137, s) BOOST_PP_IIF(c, BOOST_PP_FOR_137, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(137, s), p, o, m)\n# define BOOST_PP_FOR_137_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(138, s) BOOST_PP_IIF(c, BOOST_PP_FOR_138, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(138, s), p, o, m)\n# define BOOST_PP_FOR_138_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(139, s) BOOST_PP_IIF(c, BOOST_PP_FOR_139, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(139, s), p, o, m)\n# define BOOST_PP_FOR_139_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(140, s) BOOST_PP_IIF(c, BOOST_PP_FOR_140, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(140, s), p, o, m)\n# define BOOST_PP_FOR_140_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(141, s) BOOST_PP_IIF(c, BOOST_PP_FOR_141, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(141, s), p, o, m)\n# define BOOST_PP_FOR_141_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(142, s) BOOST_PP_IIF(c, BOOST_PP_FOR_142, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(142, s), p, o, m)\n# define BOOST_PP_FOR_142_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(143, s) BOOST_PP_IIF(c, BOOST_PP_FOR_143, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(143, s), p, o, m)\n# define BOOST_PP_FOR_143_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(144, s) BOOST_PP_IIF(c, BOOST_PP_FOR_144, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(144, s), p, o, m)\n# define BOOST_PP_FOR_144_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(145, s) BOOST_PP_IIF(c, BOOST_PP_FOR_145, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(145, s), p, o, m)\n# define BOOST_PP_FOR_145_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(146, s) BOOST_PP_IIF(c, BOOST_PP_FOR_146, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(146, s), p, o, m)\n# define BOOST_PP_FOR_146_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(147, s) BOOST_PP_IIF(c, BOOST_PP_FOR_147, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(147, s), p, o, m)\n# define BOOST_PP_FOR_147_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(148, s) BOOST_PP_IIF(c, BOOST_PP_FOR_148, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(148, s), p, o, m)\n# define BOOST_PP_FOR_148_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(149, s) BOOST_PP_IIF(c, BOOST_PP_FOR_149, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(149, s), p, o, m)\n# define BOOST_PP_FOR_149_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(150, s) BOOST_PP_IIF(c, BOOST_PP_FOR_150, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(150, s), p, o, m)\n# define BOOST_PP_FOR_150_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(151, s) BOOST_PP_IIF(c, BOOST_PP_FOR_151, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(151, s), p, o, m)\n# define BOOST_PP_FOR_151_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(152, s) BOOST_PP_IIF(c, BOOST_PP_FOR_152, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(152, s), p, o, m)\n# define BOOST_PP_FOR_152_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(153, s) BOOST_PP_IIF(c, BOOST_PP_FOR_153, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(153, s), p, o, m)\n# define BOOST_PP_FOR_153_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(154, s) BOOST_PP_IIF(c, BOOST_PP_FOR_154, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(154, s), p, o, m)\n# define BOOST_PP_FOR_154_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(155, s) BOOST_PP_IIF(c, BOOST_PP_FOR_155, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(155, s), p, o, m)\n# define BOOST_PP_FOR_155_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(156, s) BOOST_PP_IIF(c, BOOST_PP_FOR_156, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(156, s), p, o, m)\n# define BOOST_PP_FOR_156_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(157, s) BOOST_PP_IIF(c, BOOST_PP_FOR_157, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(157, s), p, o, m)\n# define BOOST_PP_FOR_157_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(158, s) BOOST_PP_IIF(c, BOOST_PP_FOR_158, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(158, s), p, o, m)\n# define BOOST_PP_FOR_158_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(159, s) BOOST_PP_IIF(c, BOOST_PP_FOR_159, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(159, s), p, o, m)\n# define BOOST_PP_FOR_159_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(160, s) BOOST_PP_IIF(c, BOOST_PP_FOR_160, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(160, s), p, o, m)\n# define BOOST_PP_FOR_160_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(161, s) BOOST_PP_IIF(c, BOOST_PP_FOR_161, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(161, s), p, o, m)\n# define BOOST_PP_FOR_161_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(162, s) BOOST_PP_IIF(c, BOOST_PP_FOR_162, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(162, s), p, o, m)\n# define BOOST_PP_FOR_162_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(163, s) BOOST_PP_IIF(c, BOOST_PP_FOR_163, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(163, s), p, o, m)\n# define BOOST_PP_FOR_163_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(164, s) BOOST_PP_IIF(c, BOOST_PP_FOR_164, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(164, s), p, o, m)\n# define BOOST_PP_FOR_164_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(165, s) BOOST_PP_IIF(c, BOOST_PP_FOR_165, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(165, s), p, o, m)\n# define BOOST_PP_FOR_165_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(166, s) BOOST_PP_IIF(c, BOOST_PP_FOR_166, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(166, s), p, o, m)\n# define BOOST_PP_FOR_166_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(167, s) BOOST_PP_IIF(c, BOOST_PP_FOR_167, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(167, s), p, o, m)\n# define BOOST_PP_FOR_167_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(168, s) BOOST_PP_IIF(c, BOOST_PP_FOR_168, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(168, s), p, o, m)\n# define BOOST_PP_FOR_168_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(169, s) BOOST_PP_IIF(c, BOOST_PP_FOR_169, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(169, s), p, o, m)\n# define BOOST_PP_FOR_169_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(170, s) BOOST_PP_IIF(c, BOOST_PP_FOR_170, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(170, s), p, o, m)\n# define BOOST_PP_FOR_170_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(171, s) BOOST_PP_IIF(c, BOOST_PP_FOR_171, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(171, s), p, o, m)\n# define BOOST_PP_FOR_171_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(172, s) BOOST_PP_IIF(c, BOOST_PP_FOR_172, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(172, s), p, o, m)\n# define BOOST_PP_FOR_172_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(173, s) BOOST_PP_IIF(c, BOOST_PP_FOR_173, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(173, s), p, o, m)\n# define BOOST_PP_FOR_173_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(174, s) BOOST_PP_IIF(c, BOOST_PP_FOR_174, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(174, s), p, o, m)\n# define BOOST_PP_FOR_174_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(175, s) BOOST_PP_IIF(c, BOOST_PP_FOR_175, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(175, s), p, o, m)\n# define BOOST_PP_FOR_175_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(176, s) BOOST_PP_IIF(c, BOOST_PP_FOR_176, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(176, s), p, o, m)\n# define BOOST_PP_FOR_176_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(177, s) BOOST_PP_IIF(c, BOOST_PP_FOR_177, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(177, s), p, o, m)\n# define BOOST_PP_FOR_177_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(178, s) BOOST_PP_IIF(c, BOOST_PP_FOR_178, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(178, s), p, o, m)\n# define BOOST_PP_FOR_178_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(179, s) BOOST_PP_IIF(c, BOOST_PP_FOR_179, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(179, s), p, o, m)\n# define BOOST_PP_FOR_179_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(180, s) BOOST_PP_IIF(c, BOOST_PP_FOR_180, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(180, s), p, o, m)\n# define BOOST_PP_FOR_180_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(181, s) BOOST_PP_IIF(c, BOOST_PP_FOR_181, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(181, s), p, o, m)\n# define BOOST_PP_FOR_181_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(182, s) BOOST_PP_IIF(c, BOOST_PP_FOR_182, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(182, s), p, o, m)\n# define BOOST_PP_FOR_182_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(183, s) BOOST_PP_IIF(c, BOOST_PP_FOR_183, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(183, s), p, o, m)\n# define BOOST_PP_FOR_183_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(184, s) BOOST_PP_IIF(c, BOOST_PP_FOR_184, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(184, s), p, o, m)\n# define BOOST_PP_FOR_184_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(185, s) BOOST_PP_IIF(c, BOOST_PP_FOR_185, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(185, s), p, o, m)\n# define BOOST_PP_FOR_185_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(186, s) BOOST_PP_IIF(c, BOOST_PP_FOR_186, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(186, s), p, o, m)\n# define BOOST_PP_FOR_186_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(187, s) BOOST_PP_IIF(c, BOOST_PP_FOR_187, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(187, s), p, o, m)\n# define BOOST_PP_FOR_187_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(188, s) BOOST_PP_IIF(c, BOOST_PP_FOR_188, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(188, s), p, o, m)\n# define BOOST_PP_FOR_188_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(189, s) BOOST_PP_IIF(c, BOOST_PP_FOR_189, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(189, s), p, o, m)\n# define BOOST_PP_FOR_189_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(190, s) BOOST_PP_IIF(c, BOOST_PP_FOR_190, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(190, s), p, o, m)\n# define BOOST_PP_FOR_190_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(191, s) BOOST_PP_IIF(c, BOOST_PP_FOR_191, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(191, s), p, o, m)\n# define BOOST_PP_FOR_191_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(192, s) BOOST_PP_IIF(c, BOOST_PP_FOR_192, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(192, s), p, o, m)\n# define BOOST_PP_FOR_192_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(193, s) BOOST_PP_IIF(c, BOOST_PP_FOR_193, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(193, s), p, o, m)\n# define BOOST_PP_FOR_193_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(194, s) BOOST_PP_IIF(c, BOOST_PP_FOR_194, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(194, s), p, o, m)\n# define BOOST_PP_FOR_194_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(195, s) BOOST_PP_IIF(c, BOOST_PP_FOR_195, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(195, s), p, o, m)\n# define BOOST_PP_FOR_195_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(196, s) BOOST_PP_IIF(c, BOOST_PP_FOR_196, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(196, s), p, o, m)\n# define BOOST_PP_FOR_196_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(197, s) BOOST_PP_IIF(c, BOOST_PP_FOR_197, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(197, s), p, o, m)\n# define BOOST_PP_FOR_197_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(198, s) BOOST_PP_IIF(c, BOOST_PP_FOR_198, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(198, s), p, o, m)\n# define BOOST_PP_FOR_198_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(199, s) BOOST_PP_IIF(c, BOOST_PP_FOR_199, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(199, s), p, o, m)\n# define BOOST_PP_FOR_199_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(200, s) BOOST_PP_IIF(c, BOOST_PP_FOR_200, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(200, s), p, o, m)\n# define BOOST_PP_FOR_200_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(201, s) BOOST_PP_IIF(c, BOOST_PP_FOR_201, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(201, s), p, o, m)\n# define BOOST_PP_FOR_201_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(202, s) BOOST_PP_IIF(c, BOOST_PP_FOR_202, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(202, s), p, o, m)\n# define BOOST_PP_FOR_202_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(203, s) BOOST_PP_IIF(c, BOOST_PP_FOR_203, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(203, s), p, o, m)\n# define BOOST_PP_FOR_203_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(204, s) BOOST_PP_IIF(c, BOOST_PP_FOR_204, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(204, s), p, o, m)\n# define BOOST_PP_FOR_204_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(205, s) BOOST_PP_IIF(c, BOOST_PP_FOR_205, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(205, s), p, o, m)\n# define BOOST_PP_FOR_205_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(206, s) BOOST_PP_IIF(c, BOOST_PP_FOR_206, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(206, s), p, o, m)\n# define BOOST_PP_FOR_206_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(207, s) BOOST_PP_IIF(c, BOOST_PP_FOR_207, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(207, s), p, o, m)\n# define BOOST_PP_FOR_207_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(208, s) BOOST_PP_IIF(c, BOOST_PP_FOR_208, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(208, s), p, o, m)\n# define BOOST_PP_FOR_208_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(209, s) BOOST_PP_IIF(c, BOOST_PP_FOR_209, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(209, s), p, o, m)\n# define BOOST_PP_FOR_209_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(210, s) BOOST_PP_IIF(c, BOOST_PP_FOR_210, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(210, s), p, o, m)\n# define BOOST_PP_FOR_210_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(211, s) BOOST_PP_IIF(c, BOOST_PP_FOR_211, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(211, s), p, o, m)\n# define BOOST_PP_FOR_211_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(212, s) BOOST_PP_IIF(c, BOOST_PP_FOR_212, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(212, s), p, o, m)\n# define BOOST_PP_FOR_212_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(213, s) BOOST_PP_IIF(c, BOOST_PP_FOR_213, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(213, s), p, o, m)\n# define BOOST_PP_FOR_213_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(214, s) BOOST_PP_IIF(c, BOOST_PP_FOR_214, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(214, s), p, o, m)\n# define BOOST_PP_FOR_214_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(215, s) BOOST_PP_IIF(c, BOOST_PP_FOR_215, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(215, s), p, o, m)\n# define BOOST_PP_FOR_215_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(216, s) BOOST_PP_IIF(c, BOOST_PP_FOR_216, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(216, s), p, o, m)\n# define BOOST_PP_FOR_216_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(217, s) BOOST_PP_IIF(c, BOOST_PP_FOR_217, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(217, s), p, o, m)\n# define BOOST_PP_FOR_217_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(218, s) BOOST_PP_IIF(c, BOOST_PP_FOR_218, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(218, s), p, o, m)\n# define BOOST_PP_FOR_218_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(219, s) BOOST_PP_IIF(c, BOOST_PP_FOR_219, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(219, s), p, o, m)\n# define BOOST_PP_FOR_219_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(220, s) BOOST_PP_IIF(c, BOOST_PP_FOR_220, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(220, s), p, o, m)\n# define BOOST_PP_FOR_220_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(221, s) BOOST_PP_IIF(c, BOOST_PP_FOR_221, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(221, s), p, o, m)\n# define BOOST_PP_FOR_221_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(222, s) BOOST_PP_IIF(c, BOOST_PP_FOR_222, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(222, s), p, o, m)\n# define BOOST_PP_FOR_222_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(223, s) BOOST_PP_IIF(c, BOOST_PP_FOR_223, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(223, s), p, o, m)\n# define BOOST_PP_FOR_223_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(224, s) BOOST_PP_IIF(c, BOOST_PP_FOR_224, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(224, s), p, o, m)\n# define BOOST_PP_FOR_224_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(225, s) BOOST_PP_IIF(c, BOOST_PP_FOR_225, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(225, s), p, o, m)\n# define BOOST_PP_FOR_225_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(226, s) BOOST_PP_IIF(c, BOOST_PP_FOR_226, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(226, s), p, o, m)\n# define BOOST_PP_FOR_226_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(227, s) BOOST_PP_IIF(c, BOOST_PP_FOR_227, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(227, s), p, o, m)\n# define BOOST_PP_FOR_227_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(228, s) BOOST_PP_IIF(c, BOOST_PP_FOR_228, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(228, s), p, o, m)\n# define BOOST_PP_FOR_228_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(229, s) BOOST_PP_IIF(c, BOOST_PP_FOR_229, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(229, s), p, o, m)\n# define BOOST_PP_FOR_229_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(230, s) BOOST_PP_IIF(c, BOOST_PP_FOR_230, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(230, s), p, o, m)\n# define BOOST_PP_FOR_230_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(231, s) BOOST_PP_IIF(c, BOOST_PP_FOR_231, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(231, s), p, o, m)\n# define BOOST_PP_FOR_231_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(232, s) BOOST_PP_IIF(c, BOOST_PP_FOR_232, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(232, s), p, o, m)\n# define BOOST_PP_FOR_232_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(233, s) BOOST_PP_IIF(c, BOOST_PP_FOR_233, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(233, s), p, o, m)\n# define BOOST_PP_FOR_233_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(234, s) BOOST_PP_IIF(c, BOOST_PP_FOR_234, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(234, s), p, o, m)\n# define BOOST_PP_FOR_234_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(235, s) BOOST_PP_IIF(c, BOOST_PP_FOR_235, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(235, s), p, o, m)\n# define BOOST_PP_FOR_235_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(236, s) BOOST_PP_IIF(c, BOOST_PP_FOR_236, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(236, s), p, o, m)\n# define BOOST_PP_FOR_236_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(237, s) BOOST_PP_IIF(c, BOOST_PP_FOR_237, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(237, s), p, o, m)\n# define BOOST_PP_FOR_237_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(238, s) BOOST_PP_IIF(c, BOOST_PP_FOR_238, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(238, s), p, o, m)\n# define BOOST_PP_FOR_238_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(239, s) BOOST_PP_IIF(c, BOOST_PP_FOR_239, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(239, s), p, o, m)\n# define BOOST_PP_FOR_239_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(240, s) BOOST_PP_IIF(c, BOOST_PP_FOR_240, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(240, s), p, o, m)\n# define BOOST_PP_FOR_240_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(241, s) BOOST_PP_IIF(c, BOOST_PP_FOR_241, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(241, s), p, o, m)\n# define BOOST_PP_FOR_241_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(242, s) BOOST_PP_IIF(c, BOOST_PP_FOR_242, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(242, s), p, o, m)\n# define BOOST_PP_FOR_242_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(243, s) BOOST_PP_IIF(c, BOOST_PP_FOR_243, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(243, s), p, o, m)\n# define BOOST_PP_FOR_243_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(244, s) BOOST_PP_IIF(c, BOOST_PP_FOR_244, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(244, s), p, o, m)\n# define BOOST_PP_FOR_244_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(245, s) BOOST_PP_IIF(c, BOOST_PP_FOR_245, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(245, s), p, o, m)\n# define BOOST_PP_FOR_245_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(246, s) BOOST_PP_IIF(c, BOOST_PP_FOR_246, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(246, s), p, o, m)\n# define BOOST_PP_FOR_246_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(247, s) BOOST_PP_IIF(c, BOOST_PP_FOR_247, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(247, s), p, o, m)\n# define BOOST_PP_FOR_247_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(248, s) BOOST_PP_IIF(c, BOOST_PP_FOR_248, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(248, s), p, o, m)\n# define BOOST_PP_FOR_248_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(249, s) BOOST_PP_IIF(c, BOOST_PP_FOR_249, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(249, s), p, o, m)\n# define BOOST_PP_FOR_249_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(250, s) BOOST_PP_IIF(c, BOOST_PP_FOR_250, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(250, s), p, o, m)\n# define BOOST_PP_FOR_250_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(251, s) BOOST_PP_IIF(c, BOOST_PP_FOR_251, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(251, s), p, o, m)\n# define BOOST_PP_FOR_251_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(252, s) BOOST_PP_IIF(c, BOOST_PP_FOR_252, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(252, s), p, o, m)\n# define BOOST_PP_FOR_252_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(253, s) BOOST_PP_IIF(c, BOOST_PP_FOR_253, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(253, s), p, o, m)\n# define BOOST_PP_FOR_253_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(254, s) BOOST_PP_IIF(c, BOOST_PP_FOR_254, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(254, s), p, o, m)\n# define BOOST_PP_FOR_254_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(255, s) BOOST_PP_IIF(c, BOOST_PP_FOR_255, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(255, s), p, o, m)\n# define BOOST_PP_FOR_255_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(256, s) BOOST_PP_IIF(c, BOOST_PP_FOR_256, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(256, s), p, o, m)\n# define BOOST_PP_FOR_256_C(c, s, p, o, m) BOOST_PP_IIF(c, m, BOOST_PP_TUPLE_EAT_2)(257, s) BOOST_PP_IIF(c, BOOST_PP_FOR_257, BOOST_PP_TUPLE_EAT_4)(BOOST_PP_EXPR_IIF(c, o)(257, s), p, o, m)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/detail/msvc/for.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_DETAIL_MSVC_FOR_HPP\n# define BOOST_PREPROCESSOR_REPETITION_DETAIL_MSVC_FOR_HPP\n#\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# define BOOST_PP_FOR_1(s, p, o, m) BOOST_PP_IF(p(2, s), m, BOOST_PP_TUPLE_EAT_2)(2, s) BOOST_PP_IF(p(2, s), BOOST_PP_FOR_2, BOOST_PP_TUPLE_EAT_4)(o(2, s), p, o, m)\n# define BOOST_PP_FOR_2(s, p, o, m) BOOST_PP_IF(p(3, s), m, BOOST_PP_TUPLE_EAT_2)(3, s) BOOST_PP_IF(p(3, s), BOOST_PP_FOR_3, BOOST_PP_TUPLE_EAT_4)(o(3, s), p, o, m)\n# define BOOST_PP_FOR_3(s, p, o, m) BOOST_PP_IF(p(4, s), m, BOOST_PP_TUPLE_EAT_2)(4, s) BOOST_PP_IF(p(4, s), BOOST_PP_FOR_4, BOOST_PP_TUPLE_EAT_4)(o(4, s), p, o, m)\n# define BOOST_PP_FOR_4(s, p, o, m) BOOST_PP_IF(p(5, s), m, BOOST_PP_TUPLE_EAT_2)(5, s) BOOST_PP_IF(p(5, s), BOOST_PP_FOR_5, BOOST_PP_TUPLE_EAT_4)(o(5, s), p, o, m)\n# define BOOST_PP_FOR_5(s, p, o, m) BOOST_PP_IF(p(6, s), m, BOOST_PP_TUPLE_EAT_2)(6, s) BOOST_PP_IF(p(6, s), BOOST_PP_FOR_6, BOOST_PP_TUPLE_EAT_4)(o(6, s), p, o, m)\n# define BOOST_PP_FOR_6(s, p, o, m) BOOST_PP_IF(p(7, s), m, BOOST_PP_TUPLE_EAT_2)(7, s) BOOST_PP_IF(p(7, s), BOOST_PP_FOR_7, BOOST_PP_TUPLE_EAT_4)(o(7, s), p, o, m)\n# define BOOST_PP_FOR_7(s, p, o, m) BOOST_PP_IF(p(8, s), m, BOOST_PP_TUPLE_EAT_2)(8, s) BOOST_PP_IF(p(8, s), BOOST_PP_FOR_8, BOOST_PP_TUPLE_EAT_4)(o(8, s), p, o, m)\n# define BOOST_PP_FOR_8(s, p, o, m) BOOST_PP_IF(p(9, s), m, BOOST_PP_TUPLE_EAT_2)(9, s) BOOST_PP_IF(p(9, s), BOOST_PP_FOR_9, BOOST_PP_TUPLE_EAT_4)(o(9, s), p, o, m)\n# define BOOST_PP_FOR_9(s, p, o, m) BOOST_PP_IF(p(10, s), m, BOOST_PP_TUPLE_EAT_2)(10, s) BOOST_PP_IF(p(10, s), BOOST_PP_FOR_10, BOOST_PP_TUPLE_EAT_4)(o(10, s), p, o, m)\n# define BOOST_PP_FOR_10(s, p, o, m) BOOST_PP_IF(p(11, s), m, BOOST_PP_TUPLE_EAT_2)(11, s) BOOST_PP_IF(p(11, s), BOOST_PP_FOR_11, BOOST_PP_TUPLE_EAT_4)(o(11, s), p, o, m)\n# define BOOST_PP_FOR_11(s, p, o, m) BOOST_PP_IF(p(12, s), m, BOOST_PP_TUPLE_EAT_2)(12, s) BOOST_PP_IF(p(12, s), BOOST_PP_FOR_12, BOOST_PP_TUPLE_EAT_4)(o(12, s), p, o, m)\n# define BOOST_PP_FOR_12(s, p, o, m) BOOST_PP_IF(p(13, s), m, BOOST_PP_TUPLE_EAT_2)(13, s) BOOST_PP_IF(p(13, s), BOOST_PP_FOR_13, BOOST_PP_TUPLE_EAT_4)(o(13, s), p, o, m)\n# define BOOST_PP_FOR_13(s, p, o, m) BOOST_PP_IF(p(14, s), m, BOOST_PP_TUPLE_EAT_2)(14, s) BOOST_PP_IF(p(14, s), BOOST_PP_FOR_14, BOOST_PP_TUPLE_EAT_4)(o(14, s), p, o, m)\n# define BOOST_PP_FOR_14(s, p, o, m) BOOST_PP_IF(p(15, s), m, BOOST_PP_TUPLE_EAT_2)(15, s) BOOST_PP_IF(p(15, s), BOOST_PP_FOR_15, BOOST_PP_TUPLE_EAT_4)(o(15, s), p, o, m)\n# define BOOST_PP_FOR_15(s, p, o, m) BOOST_PP_IF(p(16, s), m, BOOST_PP_TUPLE_EAT_2)(16, s) BOOST_PP_IF(p(16, s), BOOST_PP_FOR_16, BOOST_PP_TUPLE_EAT_4)(o(16, s), p, o, m)\n# define BOOST_PP_FOR_16(s, p, o, m) BOOST_PP_IF(p(17, s), m, BOOST_PP_TUPLE_EAT_2)(17, s) BOOST_PP_IF(p(17, s), BOOST_PP_FOR_17, BOOST_PP_TUPLE_EAT_4)(o(17, s), p, o, m)\n# define BOOST_PP_FOR_17(s, p, o, m) BOOST_PP_IF(p(18, s), m, BOOST_PP_TUPLE_EAT_2)(18, s) BOOST_PP_IF(p(18, s), BOOST_PP_FOR_18, BOOST_PP_TUPLE_EAT_4)(o(18, s), p, o, m)\n# define BOOST_PP_FOR_18(s, p, o, m) BOOST_PP_IF(p(19, s), m, BOOST_PP_TUPLE_EAT_2)(19, s) BOOST_PP_IF(p(19, s), BOOST_PP_FOR_19, BOOST_PP_TUPLE_EAT_4)(o(19, s), p, o, m)\n# define BOOST_PP_FOR_19(s, p, o, m) BOOST_PP_IF(p(20, s), m, BOOST_PP_TUPLE_EAT_2)(20, s) BOOST_PP_IF(p(20, s), BOOST_PP_FOR_20, BOOST_PP_TUPLE_EAT_4)(o(20, s), p, o, m)\n# define BOOST_PP_FOR_20(s, p, o, m) BOOST_PP_IF(p(21, s), m, BOOST_PP_TUPLE_EAT_2)(21, s) BOOST_PP_IF(p(21, s), BOOST_PP_FOR_21, BOOST_PP_TUPLE_EAT_4)(o(21, s), p, o, m)\n# define BOOST_PP_FOR_21(s, p, o, m) BOOST_PP_IF(p(22, s), m, BOOST_PP_TUPLE_EAT_2)(22, s) BOOST_PP_IF(p(22, s), BOOST_PP_FOR_22, BOOST_PP_TUPLE_EAT_4)(o(22, s), p, o, m)\n# define BOOST_PP_FOR_22(s, p, o, m) BOOST_PP_IF(p(23, s), m, BOOST_PP_TUPLE_EAT_2)(23, s) BOOST_PP_IF(p(23, s), BOOST_PP_FOR_23, BOOST_PP_TUPLE_EAT_4)(o(23, s), p, o, m)\n# define BOOST_PP_FOR_23(s, p, o, m) BOOST_PP_IF(p(24, s), m, BOOST_PP_TUPLE_EAT_2)(24, s) BOOST_PP_IF(p(24, s), BOOST_PP_FOR_24, BOOST_PP_TUPLE_EAT_4)(o(24, s), p, o, m)\n# define BOOST_PP_FOR_24(s, p, o, m) BOOST_PP_IF(p(25, s), m, BOOST_PP_TUPLE_EAT_2)(25, s) BOOST_PP_IF(p(25, s), BOOST_PP_FOR_25, BOOST_PP_TUPLE_EAT_4)(o(25, s), p, o, m)\n# define BOOST_PP_FOR_25(s, p, o, m) BOOST_PP_IF(p(26, s), m, BOOST_PP_TUPLE_EAT_2)(26, s) BOOST_PP_IF(p(26, s), BOOST_PP_FOR_26, BOOST_PP_TUPLE_EAT_4)(o(26, s), p, o, m)\n# define BOOST_PP_FOR_26(s, p, o, m) BOOST_PP_IF(p(27, s), m, BOOST_PP_TUPLE_EAT_2)(27, s) BOOST_PP_IF(p(27, s), BOOST_PP_FOR_27, BOOST_PP_TUPLE_EAT_4)(o(27, s), p, o, m)\n# define BOOST_PP_FOR_27(s, p, o, m) BOOST_PP_IF(p(28, s), m, BOOST_PP_TUPLE_EAT_2)(28, s) BOOST_PP_IF(p(28, s), BOOST_PP_FOR_28, BOOST_PP_TUPLE_EAT_4)(o(28, s), p, o, m)\n# define BOOST_PP_FOR_28(s, p, o, m) BOOST_PP_IF(p(29, s), m, BOOST_PP_TUPLE_EAT_2)(29, s) BOOST_PP_IF(p(29, s), BOOST_PP_FOR_29, BOOST_PP_TUPLE_EAT_4)(o(29, s), p, o, m)\n# define BOOST_PP_FOR_29(s, p, o, m) BOOST_PP_IF(p(30, s), m, BOOST_PP_TUPLE_EAT_2)(30, s) BOOST_PP_IF(p(30, s), BOOST_PP_FOR_30, BOOST_PP_TUPLE_EAT_4)(o(30, s), p, o, m)\n# define BOOST_PP_FOR_30(s, p, o, m) BOOST_PP_IF(p(31, s), m, BOOST_PP_TUPLE_EAT_2)(31, s) BOOST_PP_IF(p(31, s), BOOST_PP_FOR_31, BOOST_PP_TUPLE_EAT_4)(o(31, s), p, o, m)\n# define BOOST_PP_FOR_31(s, p, o, m) BOOST_PP_IF(p(32, s), m, BOOST_PP_TUPLE_EAT_2)(32, s) BOOST_PP_IF(p(32, s), BOOST_PP_FOR_32, BOOST_PP_TUPLE_EAT_4)(o(32, s), p, o, m)\n# define BOOST_PP_FOR_32(s, p, o, m) BOOST_PP_IF(p(33, s), m, BOOST_PP_TUPLE_EAT_2)(33, s) BOOST_PP_IF(p(33, s), BOOST_PP_FOR_33, BOOST_PP_TUPLE_EAT_4)(o(33, s), p, o, m)\n# define BOOST_PP_FOR_33(s, p, o, m) BOOST_PP_IF(p(34, s), m, BOOST_PP_TUPLE_EAT_2)(34, s) BOOST_PP_IF(p(34, s), BOOST_PP_FOR_34, BOOST_PP_TUPLE_EAT_4)(o(34, s), p, o, m)\n# define BOOST_PP_FOR_34(s, p, o, m) BOOST_PP_IF(p(35, s), m, BOOST_PP_TUPLE_EAT_2)(35, s) BOOST_PP_IF(p(35, s), BOOST_PP_FOR_35, BOOST_PP_TUPLE_EAT_4)(o(35, s), p, o, m)\n# define BOOST_PP_FOR_35(s, p, o, m) BOOST_PP_IF(p(36, s), m, BOOST_PP_TUPLE_EAT_2)(36, s) BOOST_PP_IF(p(36, s), BOOST_PP_FOR_36, BOOST_PP_TUPLE_EAT_4)(o(36, s), p, o, m)\n# define BOOST_PP_FOR_36(s, p, o, m) BOOST_PP_IF(p(37, s), m, BOOST_PP_TUPLE_EAT_2)(37, s) BOOST_PP_IF(p(37, s), BOOST_PP_FOR_37, BOOST_PP_TUPLE_EAT_4)(o(37, s), p, o, m)\n# define BOOST_PP_FOR_37(s, p, o, m) BOOST_PP_IF(p(38, s), m, BOOST_PP_TUPLE_EAT_2)(38, s) BOOST_PP_IF(p(38, s), BOOST_PP_FOR_38, BOOST_PP_TUPLE_EAT_4)(o(38, s), p, o, m)\n# define BOOST_PP_FOR_38(s, p, o, m) BOOST_PP_IF(p(39, s), m, BOOST_PP_TUPLE_EAT_2)(39, s) BOOST_PP_IF(p(39, s), BOOST_PP_FOR_39, BOOST_PP_TUPLE_EAT_4)(o(39, s), p, o, m)\n# define BOOST_PP_FOR_39(s, p, o, m) BOOST_PP_IF(p(40, s), m, BOOST_PP_TUPLE_EAT_2)(40, s) BOOST_PP_IF(p(40, s), BOOST_PP_FOR_40, BOOST_PP_TUPLE_EAT_4)(o(40, s), p, o, m)\n# define BOOST_PP_FOR_40(s, p, o, m) BOOST_PP_IF(p(41, s), m, BOOST_PP_TUPLE_EAT_2)(41, s) BOOST_PP_IF(p(41, s), BOOST_PP_FOR_41, BOOST_PP_TUPLE_EAT_4)(o(41, s), p, o, m)\n# define BOOST_PP_FOR_41(s, p, o, m) BOOST_PP_IF(p(42, s), m, BOOST_PP_TUPLE_EAT_2)(42, s) BOOST_PP_IF(p(42, s), BOOST_PP_FOR_42, BOOST_PP_TUPLE_EAT_4)(o(42, s), p, o, m)\n# define BOOST_PP_FOR_42(s, p, o, m) BOOST_PP_IF(p(43, s), m, BOOST_PP_TUPLE_EAT_2)(43, s) BOOST_PP_IF(p(43, s), BOOST_PP_FOR_43, BOOST_PP_TUPLE_EAT_4)(o(43, s), p, o, m)\n# define BOOST_PP_FOR_43(s, p, o, m) BOOST_PP_IF(p(44, s), m, BOOST_PP_TUPLE_EAT_2)(44, s) BOOST_PP_IF(p(44, s), BOOST_PP_FOR_44, BOOST_PP_TUPLE_EAT_4)(o(44, s), p, o, m)\n# define BOOST_PP_FOR_44(s, p, o, m) BOOST_PP_IF(p(45, s), m, BOOST_PP_TUPLE_EAT_2)(45, s) BOOST_PP_IF(p(45, s), BOOST_PP_FOR_45, BOOST_PP_TUPLE_EAT_4)(o(45, s), p, o, m)\n# define BOOST_PP_FOR_45(s, p, o, m) BOOST_PP_IF(p(46, s), m, BOOST_PP_TUPLE_EAT_2)(46, s) BOOST_PP_IF(p(46, s), BOOST_PP_FOR_46, BOOST_PP_TUPLE_EAT_4)(o(46, s), p, o, m)\n# define BOOST_PP_FOR_46(s, p, o, m) BOOST_PP_IF(p(47, s), m, BOOST_PP_TUPLE_EAT_2)(47, s) BOOST_PP_IF(p(47, s), BOOST_PP_FOR_47, BOOST_PP_TUPLE_EAT_4)(o(47, s), p, o, m)\n# define BOOST_PP_FOR_47(s, p, o, m) BOOST_PP_IF(p(48, s), m, BOOST_PP_TUPLE_EAT_2)(48, s) BOOST_PP_IF(p(48, s), BOOST_PP_FOR_48, BOOST_PP_TUPLE_EAT_4)(o(48, s), p, o, m)\n# define BOOST_PP_FOR_48(s, p, o, m) BOOST_PP_IF(p(49, s), m, BOOST_PP_TUPLE_EAT_2)(49, s) BOOST_PP_IF(p(49, s), BOOST_PP_FOR_49, BOOST_PP_TUPLE_EAT_4)(o(49, s), p, o, m)\n# define BOOST_PP_FOR_49(s, p, o, m) BOOST_PP_IF(p(50, s), m, BOOST_PP_TUPLE_EAT_2)(50, s) BOOST_PP_IF(p(50, s), BOOST_PP_FOR_50, BOOST_PP_TUPLE_EAT_4)(o(50, s), p, o, m)\n# define BOOST_PP_FOR_50(s, p, o, m) BOOST_PP_IF(p(51, s), m, BOOST_PP_TUPLE_EAT_2)(51, s) BOOST_PP_IF(p(51, s), BOOST_PP_FOR_51, BOOST_PP_TUPLE_EAT_4)(o(51, s), p, o, m)\n# define BOOST_PP_FOR_51(s, p, o, m) BOOST_PP_IF(p(52, s), m, BOOST_PP_TUPLE_EAT_2)(52, s) BOOST_PP_IF(p(52, s), BOOST_PP_FOR_52, BOOST_PP_TUPLE_EAT_4)(o(52, s), p, o, m)\n# define BOOST_PP_FOR_52(s, p, o, m) BOOST_PP_IF(p(53, s), m, BOOST_PP_TUPLE_EAT_2)(53, s) BOOST_PP_IF(p(53, s), BOOST_PP_FOR_53, BOOST_PP_TUPLE_EAT_4)(o(53, s), p, o, m)\n# define BOOST_PP_FOR_53(s, p, o, m) BOOST_PP_IF(p(54, s), m, BOOST_PP_TUPLE_EAT_2)(54, s) BOOST_PP_IF(p(54, s), BOOST_PP_FOR_54, BOOST_PP_TUPLE_EAT_4)(o(54, s), p, o, m)\n# define BOOST_PP_FOR_54(s, p, o, m) BOOST_PP_IF(p(55, s), m, BOOST_PP_TUPLE_EAT_2)(55, s) BOOST_PP_IF(p(55, s), BOOST_PP_FOR_55, BOOST_PP_TUPLE_EAT_4)(o(55, s), p, o, m)\n# define BOOST_PP_FOR_55(s, p, o, m) BOOST_PP_IF(p(56, s), m, BOOST_PP_TUPLE_EAT_2)(56, s) BOOST_PP_IF(p(56, s), BOOST_PP_FOR_56, BOOST_PP_TUPLE_EAT_4)(o(56, s), p, o, m)\n# define BOOST_PP_FOR_56(s, p, o, m) BOOST_PP_IF(p(57, s), m, BOOST_PP_TUPLE_EAT_2)(57, s) BOOST_PP_IF(p(57, s), BOOST_PP_FOR_57, BOOST_PP_TUPLE_EAT_4)(o(57, s), p, o, m)\n# define BOOST_PP_FOR_57(s, p, o, m) BOOST_PP_IF(p(58, s), m, BOOST_PP_TUPLE_EAT_2)(58, s) BOOST_PP_IF(p(58, s), BOOST_PP_FOR_58, BOOST_PP_TUPLE_EAT_4)(o(58, s), p, o, m)\n# define BOOST_PP_FOR_58(s, p, o, m) BOOST_PP_IF(p(59, s), m, BOOST_PP_TUPLE_EAT_2)(59, s) BOOST_PP_IF(p(59, s), BOOST_PP_FOR_59, BOOST_PP_TUPLE_EAT_4)(o(59, s), p, o, m)\n# define BOOST_PP_FOR_59(s, p, o, m) BOOST_PP_IF(p(60, s), m, BOOST_PP_TUPLE_EAT_2)(60, s) BOOST_PP_IF(p(60, s), BOOST_PP_FOR_60, BOOST_PP_TUPLE_EAT_4)(o(60, s), p, o, m)\n# define BOOST_PP_FOR_60(s, p, o, m) BOOST_PP_IF(p(61, s), m, BOOST_PP_TUPLE_EAT_2)(61, s) BOOST_PP_IF(p(61, s), BOOST_PP_FOR_61, BOOST_PP_TUPLE_EAT_4)(o(61, s), p, o, m)\n# define BOOST_PP_FOR_61(s, p, o, m) BOOST_PP_IF(p(62, s), m, BOOST_PP_TUPLE_EAT_2)(62, s) BOOST_PP_IF(p(62, s), BOOST_PP_FOR_62, BOOST_PP_TUPLE_EAT_4)(o(62, s), p, o, m)\n# define BOOST_PP_FOR_62(s, p, o, m) BOOST_PP_IF(p(63, s), m, BOOST_PP_TUPLE_EAT_2)(63, s) BOOST_PP_IF(p(63, s), BOOST_PP_FOR_63, BOOST_PP_TUPLE_EAT_4)(o(63, s), p, o, m)\n# define BOOST_PP_FOR_63(s, p, o, m) BOOST_PP_IF(p(64, s), m, BOOST_PP_TUPLE_EAT_2)(64, s) BOOST_PP_IF(p(64, s), BOOST_PP_FOR_64, BOOST_PP_TUPLE_EAT_4)(o(64, s), p, o, m)\n# define BOOST_PP_FOR_64(s, p, o, m) BOOST_PP_IF(p(65, s), m, BOOST_PP_TUPLE_EAT_2)(65, s) BOOST_PP_IF(p(65, s), BOOST_PP_FOR_65, BOOST_PP_TUPLE_EAT_4)(o(65, s), p, o, m)\n# define BOOST_PP_FOR_65(s, p, o, m) BOOST_PP_IF(p(66, s), m, BOOST_PP_TUPLE_EAT_2)(66, s) BOOST_PP_IF(p(66, s), BOOST_PP_FOR_66, BOOST_PP_TUPLE_EAT_4)(o(66, s), p, o, m)\n# define BOOST_PP_FOR_66(s, p, o, m) BOOST_PP_IF(p(67, s), m, BOOST_PP_TUPLE_EAT_2)(67, s) BOOST_PP_IF(p(67, s), BOOST_PP_FOR_67, BOOST_PP_TUPLE_EAT_4)(o(67, s), p, o, m)\n# define BOOST_PP_FOR_67(s, p, o, m) BOOST_PP_IF(p(68, s), m, BOOST_PP_TUPLE_EAT_2)(68, s) BOOST_PP_IF(p(68, s), BOOST_PP_FOR_68, BOOST_PP_TUPLE_EAT_4)(o(68, s), p, o, m)\n# define BOOST_PP_FOR_68(s, p, o, m) BOOST_PP_IF(p(69, s), m, BOOST_PP_TUPLE_EAT_2)(69, s) BOOST_PP_IF(p(69, s), BOOST_PP_FOR_69, BOOST_PP_TUPLE_EAT_4)(o(69, s), p, o, m)\n# define BOOST_PP_FOR_69(s, p, o, m) BOOST_PP_IF(p(70, s), m, BOOST_PP_TUPLE_EAT_2)(70, s) BOOST_PP_IF(p(70, s), BOOST_PP_FOR_70, BOOST_PP_TUPLE_EAT_4)(o(70, s), p, o, m)\n# define BOOST_PP_FOR_70(s, p, o, m) BOOST_PP_IF(p(71, s), m, BOOST_PP_TUPLE_EAT_2)(71, s) BOOST_PP_IF(p(71, s), BOOST_PP_FOR_71, BOOST_PP_TUPLE_EAT_4)(o(71, s), p, o, m)\n# define BOOST_PP_FOR_71(s, p, o, m) BOOST_PP_IF(p(72, s), m, BOOST_PP_TUPLE_EAT_2)(72, s) BOOST_PP_IF(p(72, s), BOOST_PP_FOR_72, BOOST_PP_TUPLE_EAT_4)(o(72, s), p, o, m)\n# define BOOST_PP_FOR_72(s, p, o, m) BOOST_PP_IF(p(73, s), m, BOOST_PP_TUPLE_EAT_2)(73, s) BOOST_PP_IF(p(73, s), BOOST_PP_FOR_73, BOOST_PP_TUPLE_EAT_4)(o(73, s), p, o, m)\n# define BOOST_PP_FOR_73(s, p, o, m) BOOST_PP_IF(p(74, s), m, BOOST_PP_TUPLE_EAT_2)(74, s) BOOST_PP_IF(p(74, s), BOOST_PP_FOR_74, BOOST_PP_TUPLE_EAT_4)(o(74, s), p, o, m)\n# define BOOST_PP_FOR_74(s, p, o, m) BOOST_PP_IF(p(75, s), m, BOOST_PP_TUPLE_EAT_2)(75, s) BOOST_PP_IF(p(75, s), BOOST_PP_FOR_75, BOOST_PP_TUPLE_EAT_4)(o(75, s), p, o, m)\n# define BOOST_PP_FOR_75(s, p, o, m) BOOST_PP_IF(p(76, s), m, BOOST_PP_TUPLE_EAT_2)(76, s) BOOST_PP_IF(p(76, s), BOOST_PP_FOR_76, BOOST_PP_TUPLE_EAT_4)(o(76, s), p, o, m)\n# define BOOST_PP_FOR_76(s, p, o, m) BOOST_PP_IF(p(77, s), m, BOOST_PP_TUPLE_EAT_2)(77, s) BOOST_PP_IF(p(77, s), BOOST_PP_FOR_77, BOOST_PP_TUPLE_EAT_4)(o(77, s), p, o, m)\n# define BOOST_PP_FOR_77(s, p, o, m) BOOST_PP_IF(p(78, s), m, BOOST_PP_TUPLE_EAT_2)(78, s) BOOST_PP_IF(p(78, s), BOOST_PP_FOR_78, BOOST_PP_TUPLE_EAT_4)(o(78, s), p, o, m)\n# define BOOST_PP_FOR_78(s, p, o, m) BOOST_PP_IF(p(79, s), m, BOOST_PP_TUPLE_EAT_2)(79, s) BOOST_PP_IF(p(79, s), BOOST_PP_FOR_79, BOOST_PP_TUPLE_EAT_4)(o(79, s), p, o, m)\n# define BOOST_PP_FOR_79(s, p, o, m) BOOST_PP_IF(p(80, s), m, BOOST_PP_TUPLE_EAT_2)(80, s) BOOST_PP_IF(p(80, s), BOOST_PP_FOR_80, BOOST_PP_TUPLE_EAT_4)(o(80, s), p, o, m)\n# define BOOST_PP_FOR_80(s, p, o, m) BOOST_PP_IF(p(81, s), m, BOOST_PP_TUPLE_EAT_2)(81, s) BOOST_PP_IF(p(81, s), BOOST_PP_FOR_81, BOOST_PP_TUPLE_EAT_4)(o(81, s), p, o, m)\n# define BOOST_PP_FOR_81(s, p, o, m) BOOST_PP_IF(p(82, s), m, BOOST_PP_TUPLE_EAT_2)(82, s) BOOST_PP_IF(p(82, s), BOOST_PP_FOR_82, BOOST_PP_TUPLE_EAT_4)(o(82, s), p, o, m)\n# define BOOST_PP_FOR_82(s, p, o, m) BOOST_PP_IF(p(83, s), m, BOOST_PP_TUPLE_EAT_2)(83, s) BOOST_PP_IF(p(83, s), BOOST_PP_FOR_83, BOOST_PP_TUPLE_EAT_4)(o(83, s), p, o, m)\n# define BOOST_PP_FOR_83(s, p, o, m) BOOST_PP_IF(p(84, s), m, BOOST_PP_TUPLE_EAT_2)(84, s) BOOST_PP_IF(p(84, s), BOOST_PP_FOR_84, BOOST_PP_TUPLE_EAT_4)(o(84, s), p, o, m)\n# define BOOST_PP_FOR_84(s, p, o, m) BOOST_PP_IF(p(85, s), m, BOOST_PP_TUPLE_EAT_2)(85, s) BOOST_PP_IF(p(85, s), BOOST_PP_FOR_85, BOOST_PP_TUPLE_EAT_4)(o(85, s), p, o, m)\n# define BOOST_PP_FOR_85(s, p, o, m) BOOST_PP_IF(p(86, s), m, BOOST_PP_TUPLE_EAT_2)(86, s) BOOST_PP_IF(p(86, s), BOOST_PP_FOR_86, BOOST_PP_TUPLE_EAT_4)(o(86, s), p, o, m)\n# define BOOST_PP_FOR_86(s, p, o, m) BOOST_PP_IF(p(87, s), m, BOOST_PP_TUPLE_EAT_2)(87, s) BOOST_PP_IF(p(87, s), BOOST_PP_FOR_87, BOOST_PP_TUPLE_EAT_4)(o(87, s), p, o, m)\n# define BOOST_PP_FOR_87(s, p, o, m) BOOST_PP_IF(p(88, s), m, BOOST_PP_TUPLE_EAT_2)(88, s) BOOST_PP_IF(p(88, s), BOOST_PP_FOR_88, BOOST_PP_TUPLE_EAT_4)(o(88, s), p, o, m)\n# define BOOST_PP_FOR_88(s, p, o, m) BOOST_PP_IF(p(89, s), m, BOOST_PP_TUPLE_EAT_2)(89, s) BOOST_PP_IF(p(89, s), BOOST_PP_FOR_89, BOOST_PP_TUPLE_EAT_4)(o(89, s), p, o, m)\n# define BOOST_PP_FOR_89(s, p, o, m) BOOST_PP_IF(p(90, s), m, BOOST_PP_TUPLE_EAT_2)(90, s) BOOST_PP_IF(p(90, s), BOOST_PP_FOR_90, BOOST_PP_TUPLE_EAT_4)(o(90, s), p, o, m)\n# define BOOST_PP_FOR_90(s, p, o, m) BOOST_PP_IF(p(91, s), m, BOOST_PP_TUPLE_EAT_2)(91, s) BOOST_PP_IF(p(91, s), BOOST_PP_FOR_91, BOOST_PP_TUPLE_EAT_4)(o(91, s), p, o, m)\n# define BOOST_PP_FOR_91(s, p, o, m) BOOST_PP_IF(p(92, s), m, BOOST_PP_TUPLE_EAT_2)(92, s) BOOST_PP_IF(p(92, s), BOOST_PP_FOR_92, BOOST_PP_TUPLE_EAT_4)(o(92, s), p, o, m)\n# define BOOST_PP_FOR_92(s, p, o, m) BOOST_PP_IF(p(93, s), m, BOOST_PP_TUPLE_EAT_2)(93, s) BOOST_PP_IF(p(93, s), BOOST_PP_FOR_93, BOOST_PP_TUPLE_EAT_4)(o(93, s), p, o, m)\n# define BOOST_PP_FOR_93(s, p, o, m) BOOST_PP_IF(p(94, s), m, BOOST_PP_TUPLE_EAT_2)(94, s) BOOST_PP_IF(p(94, s), BOOST_PP_FOR_94, BOOST_PP_TUPLE_EAT_4)(o(94, s), p, o, m)\n# define BOOST_PP_FOR_94(s, p, o, m) BOOST_PP_IF(p(95, s), m, BOOST_PP_TUPLE_EAT_2)(95, s) BOOST_PP_IF(p(95, s), BOOST_PP_FOR_95, BOOST_PP_TUPLE_EAT_4)(o(95, s), p, o, m)\n# define BOOST_PP_FOR_95(s, p, o, m) BOOST_PP_IF(p(96, s), m, BOOST_PP_TUPLE_EAT_2)(96, s) BOOST_PP_IF(p(96, s), BOOST_PP_FOR_96, BOOST_PP_TUPLE_EAT_4)(o(96, s), p, o, m)\n# define BOOST_PP_FOR_96(s, p, o, m) BOOST_PP_IF(p(97, s), m, BOOST_PP_TUPLE_EAT_2)(97, s) BOOST_PP_IF(p(97, s), BOOST_PP_FOR_97, BOOST_PP_TUPLE_EAT_4)(o(97, s), p, o, m)\n# define BOOST_PP_FOR_97(s, p, o, m) BOOST_PP_IF(p(98, s), m, BOOST_PP_TUPLE_EAT_2)(98, s) BOOST_PP_IF(p(98, s), BOOST_PP_FOR_98, BOOST_PP_TUPLE_EAT_4)(o(98, s), p, o, m)\n# define BOOST_PP_FOR_98(s, p, o, m) BOOST_PP_IF(p(99, s), m, BOOST_PP_TUPLE_EAT_2)(99, s) BOOST_PP_IF(p(99, s), BOOST_PP_FOR_99, BOOST_PP_TUPLE_EAT_4)(o(99, s), p, o, m)\n# define BOOST_PP_FOR_99(s, p, o, m) BOOST_PP_IF(p(100, s), m, BOOST_PP_TUPLE_EAT_2)(100, s) BOOST_PP_IF(p(100, s), BOOST_PP_FOR_100, BOOST_PP_TUPLE_EAT_4)(o(100, s), p, o, m)\n# define BOOST_PP_FOR_100(s, p, o, m) BOOST_PP_IF(p(101, s), m, BOOST_PP_TUPLE_EAT_2)(101, s) BOOST_PP_IF(p(101, s), BOOST_PP_FOR_101, BOOST_PP_TUPLE_EAT_4)(o(101, s), p, o, m)\n# define BOOST_PP_FOR_101(s, p, o, m) BOOST_PP_IF(p(102, s), m, BOOST_PP_TUPLE_EAT_2)(102, s) BOOST_PP_IF(p(102, s), BOOST_PP_FOR_102, BOOST_PP_TUPLE_EAT_4)(o(102, s), p, o, m)\n# define BOOST_PP_FOR_102(s, p, o, m) BOOST_PP_IF(p(103, s), m, BOOST_PP_TUPLE_EAT_2)(103, s) BOOST_PP_IF(p(103, s), BOOST_PP_FOR_103, BOOST_PP_TUPLE_EAT_4)(o(103, s), p, o, m)\n# define BOOST_PP_FOR_103(s, p, o, m) BOOST_PP_IF(p(104, s), m, BOOST_PP_TUPLE_EAT_2)(104, s) BOOST_PP_IF(p(104, s), BOOST_PP_FOR_104, BOOST_PP_TUPLE_EAT_4)(o(104, s), p, o, m)\n# define BOOST_PP_FOR_104(s, p, o, m) BOOST_PP_IF(p(105, s), m, BOOST_PP_TUPLE_EAT_2)(105, s) BOOST_PP_IF(p(105, s), BOOST_PP_FOR_105, BOOST_PP_TUPLE_EAT_4)(o(105, s), p, o, m)\n# define BOOST_PP_FOR_105(s, p, o, m) BOOST_PP_IF(p(106, s), m, BOOST_PP_TUPLE_EAT_2)(106, s) BOOST_PP_IF(p(106, s), BOOST_PP_FOR_106, BOOST_PP_TUPLE_EAT_4)(o(106, s), p, o, m)\n# define BOOST_PP_FOR_106(s, p, o, m) BOOST_PP_IF(p(107, s), m, BOOST_PP_TUPLE_EAT_2)(107, s) BOOST_PP_IF(p(107, s), BOOST_PP_FOR_107, BOOST_PP_TUPLE_EAT_4)(o(107, s), p, o, m)\n# define BOOST_PP_FOR_107(s, p, o, m) BOOST_PP_IF(p(108, s), m, BOOST_PP_TUPLE_EAT_2)(108, s) BOOST_PP_IF(p(108, s), BOOST_PP_FOR_108, BOOST_PP_TUPLE_EAT_4)(o(108, s), p, o, m)\n# define BOOST_PP_FOR_108(s, p, o, m) BOOST_PP_IF(p(109, s), m, BOOST_PP_TUPLE_EAT_2)(109, s) BOOST_PP_IF(p(109, s), BOOST_PP_FOR_109, BOOST_PP_TUPLE_EAT_4)(o(109, s), p, o, m)\n# define BOOST_PP_FOR_109(s, p, o, m) BOOST_PP_IF(p(110, s), m, BOOST_PP_TUPLE_EAT_2)(110, s) BOOST_PP_IF(p(110, s), BOOST_PP_FOR_110, BOOST_PP_TUPLE_EAT_4)(o(110, s), p, o, m)\n# define BOOST_PP_FOR_110(s, p, o, m) BOOST_PP_IF(p(111, s), m, BOOST_PP_TUPLE_EAT_2)(111, s) BOOST_PP_IF(p(111, s), BOOST_PP_FOR_111, BOOST_PP_TUPLE_EAT_4)(o(111, s), p, o, m)\n# define BOOST_PP_FOR_111(s, p, o, m) BOOST_PP_IF(p(112, s), m, BOOST_PP_TUPLE_EAT_2)(112, s) BOOST_PP_IF(p(112, s), BOOST_PP_FOR_112, BOOST_PP_TUPLE_EAT_4)(o(112, s), p, o, m)\n# define BOOST_PP_FOR_112(s, p, o, m) BOOST_PP_IF(p(113, s), m, BOOST_PP_TUPLE_EAT_2)(113, s) BOOST_PP_IF(p(113, s), BOOST_PP_FOR_113, BOOST_PP_TUPLE_EAT_4)(o(113, s), p, o, m)\n# define BOOST_PP_FOR_113(s, p, o, m) BOOST_PP_IF(p(114, s), m, BOOST_PP_TUPLE_EAT_2)(114, s) BOOST_PP_IF(p(114, s), BOOST_PP_FOR_114, BOOST_PP_TUPLE_EAT_4)(o(114, s), p, o, m)\n# define BOOST_PP_FOR_114(s, p, o, m) BOOST_PP_IF(p(115, s), m, BOOST_PP_TUPLE_EAT_2)(115, s) BOOST_PP_IF(p(115, s), BOOST_PP_FOR_115, BOOST_PP_TUPLE_EAT_4)(o(115, s), p, o, m)\n# define BOOST_PP_FOR_115(s, p, o, m) BOOST_PP_IF(p(116, s), m, BOOST_PP_TUPLE_EAT_2)(116, s) BOOST_PP_IF(p(116, s), BOOST_PP_FOR_116, BOOST_PP_TUPLE_EAT_4)(o(116, s), p, o, m)\n# define BOOST_PP_FOR_116(s, p, o, m) BOOST_PP_IF(p(117, s), m, BOOST_PP_TUPLE_EAT_2)(117, s) BOOST_PP_IF(p(117, s), BOOST_PP_FOR_117, BOOST_PP_TUPLE_EAT_4)(o(117, s), p, o, m)\n# define BOOST_PP_FOR_117(s, p, o, m) BOOST_PP_IF(p(118, s), m, BOOST_PP_TUPLE_EAT_2)(118, s) BOOST_PP_IF(p(118, s), BOOST_PP_FOR_118, BOOST_PP_TUPLE_EAT_4)(o(118, s), p, o, m)\n# define BOOST_PP_FOR_118(s, p, o, m) BOOST_PP_IF(p(119, s), m, BOOST_PP_TUPLE_EAT_2)(119, s) BOOST_PP_IF(p(119, s), BOOST_PP_FOR_119, BOOST_PP_TUPLE_EAT_4)(o(119, s), p, o, m)\n# define BOOST_PP_FOR_119(s, p, o, m) BOOST_PP_IF(p(120, s), m, BOOST_PP_TUPLE_EAT_2)(120, s) BOOST_PP_IF(p(120, s), BOOST_PP_FOR_120, BOOST_PP_TUPLE_EAT_4)(o(120, s), p, o, m)\n# define BOOST_PP_FOR_120(s, p, o, m) BOOST_PP_IF(p(121, s), m, BOOST_PP_TUPLE_EAT_2)(121, s) BOOST_PP_IF(p(121, s), BOOST_PP_FOR_121, BOOST_PP_TUPLE_EAT_4)(o(121, s), p, o, m)\n# define BOOST_PP_FOR_121(s, p, o, m) BOOST_PP_IF(p(122, s), m, BOOST_PP_TUPLE_EAT_2)(122, s) BOOST_PP_IF(p(122, s), BOOST_PP_FOR_122, BOOST_PP_TUPLE_EAT_4)(o(122, s), p, o, m)\n# define BOOST_PP_FOR_122(s, p, o, m) BOOST_PP_IF(p(123, s), m, BOOST_PP_TUPLE_EAT_2)(123, s) BOOST_PP_IF(p(123, s), BOOST_PP_FOR_123, BOOST_PP_TUPLE_EAT_4)(o(123, s), p, o, m)\n# define BOOST_PP_FOR_123(s, p, o, m) BOOST_PP_IF(p(124, s), m, BOOST_PP_TUPLE_EAT_2)(124, s) BOOST_PP_IF(p(124, s), BOOST_PP_FOR_124, BOOST_PP_TUPLE_EAT_4)(o(124, s), p, o, m)\n# define BOOST_PP_FOR_124(s, p, o, m) BOOST_PP_IF(p(125, s), m, BOOST_PP_TUPLE_EAT_2)(125, s) BOOST_PP_IF(p(125, s), BOOST_PP_FOR_125, BOOST_PP_TUPLE_EAT_4)(o(125, s), p, o, m)\n# define BOOST_PP_FOR_125(s, p, o, m) BOOST_PP_IF(p(126, s), m, BOOST_PP_TUPLE_EAT_2)(126, s) BOOST_PP_IF(p(126, s), BOOST_PP_FOR_126, BOOST_PP_TUPLE_EAT_4)(o(126, s), p, o, m)\n# define BOOST_PP_FOR_126(s, p, o, m) BOOST_PP_IF(p(127, s), m, BOOST_PP_TUPLE_EAT_2)(127, s) BOOST_PP_IF(p(127, s), BOOST_PP_FOR_127, BOOST_PP_TUPLE_EAT_4)(o(127, s), p, o, m)\n# define BOOST_PP_FOR_127(s, p, o, m) BOOST_PP_IF(p(128, s), m, BOOST_PP_TUPLE_EAT_2)(128, s) BOOST_PP_IF(p(128, s), BOOST_PP_FOR_128, BOOST_PP_TUPLE_EAT_4)(o(128, s), p, o, m)\n# define BOOST_PP_FOR_128(s, p, o, m) BOOST_PP_IF(p(129, s), m, BOOST_PP_TUPLE_EAT_2)(129, s) BOOST_PP_IF(p(129, s), BOOST_PP_FOR_129, BOOST_PP_TUPLE_EAT_4)(o(129, s), p, o, m)\n# define BOOST_PP_FOR_129(s, p, o, m) BOOST_PP_IF(p(130, s), m, BOOST_PP_TUPLE_EAT_2)(130, s) BOOST_PP_IF(p(130, s), BOOST_PP_FOR_130, BOOST_PP_TUPLE_EAT_4)(o(130, s), p, o, m)\n# define BOOST_PP_FOR_130(s, p, o, m) BOOST_PP_IF(p(131, s), m, BOOST_PP_TUPLE_EAT_2)(131, s) BOOST_PP_IF(p(131, s), BOOST_PP_FOR_131, BOOST_PP_TUPLE_EAT_4)(o(131, s), p, o, m)\n# define BOOST_PP_FOR_131(s, p, o, m) BOOST_PP_IF(p(132, s), m, BOOST_PP_TUPLE_EAT_2)(132, s) BOOST_PP_IF(p(132, s), BOOST_PP_FOR_132, BOOST_PP_TUPLE_EAT_4)(o(132, s), p, o, m)\n# define BOOST_PP_FOR_132(s, p, o, m) BOOST_PP_IF(p(133, s), m, BOOST_PP_TUPLE_EAT_2)(133, s) BOOST_PP_IF(p(133, s), BOOST_PP_FOR_133, BOOST_PP_TUPLE_EAT_4)(o(133, s), p, o, m)\n# define BOOST_PP_FOR_133(s, p, o, m) BOOST_PP_IF(p(134, s), m, BOOST_PP_TUPLE_EAT_2)(134, s) BOOST_PP_IF(p(134, s), BOOST_PP_FOR_134, BOOST_PP_TUPLE_EAT_4)(o(134, s), p, o, m)\n# define BOOST_PP_FOR_134(s, p, o, m) BOOST_PP_IF(p(135, s), m, BOOST_PP_TUPLE_EAT_2)(135, s) BOOST_PP_IF(p(135, s), BOOST_PP_FOR_135, BOOST_PP_TUPLE_EAT_4)(o(135, s), p, o, m)\n# define BOOST_PP_FOR_135(s, p, o, m) BOOST_PP_IF(p(136, s), m, BOOST_PP_TUPLE_EAT_2)(136, s) BOOST_PP_IF(p(136, s), BOOST_PP_FOR_136, BOOST_PP_TUPLE_EAT_4)(o(136, s), p, o, m)\n# define BOOST_PP_FOR_136(s, p, o, m) BOOST_PP_IF(p(137, s), m, BOOST_PP_TUPLE_EAT_2)(137, s) BOOST_PP_IF(p(137, s), BOOST_PP_FOR_137, BOOST_PP_TUPLE_EAT_4)(o(137, s), p, o, m)\n# define BOOST_PP_FOR_137(s, p, o, m) BOOST_PP_IF(p(138, s), m, BOOST_PP_TUPLE_EAT_2)(138, s) BOOST_PP_IF(p(138, s), BOOST_PP_FOR_138, BOOST_PP_TUPLE_EAT_4)(o(138, s), p, o, m)\n# define BOOST_PP_FOR_138(s, p, o, m) BOOST_PP_IF(p(139, s), m, BOOST_PP_TUPLE_EAT_2)(139, s) BOOST_PP_IF(p(139, s), BOOST_PP_FOR_139, BOOST_PP_TUPLE_EAT_4)(o(139, s), p, o, m)\n# define BOOST_PP_FOR_139(s, p, o, m) BOOST_PP_IF(p(140, s), m, BOOST_PP_TUPLE_EAT_2)(140, s) BOOST_PP_IF(p(140, s), BOOST_PP_FOR_140, BOOST_PP_TUPLE_EAT_4)(o(140, s), p, o, m)\n# define BOOST_PP_FOR_140(s, p, o, m) BOOST_PP_IF(p(141, s), m, BOOST_PP_TUPLE_EAT_2)(141, s) BOOST_PP_IF(p(141, s), BOOST_PP_FOR_141, BOOST_PP_TUPLE_EAT_4)(o(141, s), p, o, m)\n# define BOOST_PP_FOR_141(s, p, o, m) BOOST_PP_IF(p(142, s), m, BOOST_PP_TUPLE_EAT_2)(142, s) BOOST_PP_IF(p(142, s), BOOST_PP_FOR_142, BOOST_PP_TUPLE_EAT_4)(o(142, s), p, o, m)\n# define BOOST_PP_FOR_142(s, p, o, m) BOOST_PP_IF(p(143, s), m, BOOST_PP_TUPLE_EAT_2)(143, s) BOOST_PP_IF(p(143, s), BOOST_PP_FOR_143, BOOST_PP_TUPLE_EAT_4)(o(143, s), p, o, m)\n# define BOOST_PP_FOR_143(s, p, o, m) BOOST_PP_IF(p(144, s), m, BOOST_PP_TUPLE_EAT_2)(144, s) BOOST_PP_IF(p(144, s), BOOST_PP_FOR_144, BOOST_PP_TUPLE_EAT_4)(o(144, s), p, o, m)\n# define BOOST_PP_FOR_144(s, p, o, m) BOOST_PP_IF(p(145, s), m, BOOST_PP_TUPLE_EAT_2)(145, s) BOOST_PP_IF(p(145, s), BOOST_PP_FOR_145, BOOST_PP_TUPLE_EAT_4)(o(145, s), p, o, m)\n# define BOOST_PP_FOR_145(s, p, o, m) BOOST_PP_IF(p(146, s), m, BOOST_PP_TUPLE_EAT_2)(146, s) BOOST_PP_IF(p(146, s), BOOST_PP_FOR_146, BOOST_PP_TUPLE_EAT_4)(o(146, s), p, o, m)\n# define BOOST_PP_FOR_146(s, p, o, m) BOOST_PP_IF(p(147, s), m, BOOST_PP_TUPLE_EAT_2)(147, s) BOOST_PP_IF(p(147, s), BOOST_PP_FOR_147, BOOST_PP_TUPLE_EAT_4)(o(147, s), p, o, m)\n# define BOOST_PP_FOR_147(s, p, o, m) BOOST_PP_IF(p(148, s), m, BOOST_PP_TUPLE_EAT_2)(148, s) BOOST_PP_IF(p(148, s), BOOST_PP_FOR_148, BOOST_PP_TUPLE_EAT_4)(o(148, s), p, o, m)\n# define BOOST_PP_FOR_148(s, p, o, m) BOOST_PP_IF(p(149, s), m, BOOST_PP_TUPLE_EAT_2)(149, s) BOOST_PP_IF(p(149, s), BOOST_PP_FOR_149, BOOST_PP_TUPLE_EAT_4)(o(149, s), p, o, m)\n# define BOOST_PP_FOR_149(s, p, o, m) BOOST_PP_IF(p(150, s), m, BOOST_PP_TUPLE_EAT_2)(150, s) BOOST_PP_IF(p(150, s), BOOST_PP_FOR_150, BOOST_PP_TUPLE_EAT_4)(o(150, s), p, o, m)\n# define BOOST_PP_FOR_150(s, p, o, m) BOOST_PP_IF(p(151, s), m, BOOST_PP_TUPLE_EAT_2)(151, s) BOOST_PP_IF(p(151, s), BOOST_PP_FOR_151, BOOST_PP_TUPLE_EAT_4)(o(151, s), p, o, m)\n# define BOOST_PP_FOR_151(s, p, o, m) BOOST_PP_IF(p(152, s), m, BOOST_PP_TUPLE_EAT_2)(152, s) BOOST_PP_IF(p(152, s), BOOST_PP_FOR_152, BOOST_PP_TUPLE_EAT_4)(o(152, s), p, o, m)\n# define BOOST_PP_FOR_152(s, p, o, m) BOOST_PP_IF(p(153, s), m, BOOST_PP_TUPLE_EAT_2)(153, s) BOOST_PP_IF(p(153, s), BOOST_PP_FOR_153, BOOST_PP_TUPLE_EAT_4)(o(153, s), p, o, m)\n# define BOOST_PP_FOR_153(s, p, o, m) BOOST_PP_IF(p(154, s), m, BOOST_PP_TUPLE_EAT_2)(154, s) BOOST_PP_IF(p(154, s), BOOST_PP_FOR_154, BOOST_PP_TUPLE_EAT_4)(o(154, s), p, o, m)\n# define BOOST_PP_FOR_154(s, p, o, m) BOOST_PP_IF(p(155, s), m, BOOST_PP_TUPLE_EAT_2)(155, s) BOOST_PP_IF(p(155, s), BOOST_PP_FOR_155, BOOST_PP_TUPLE_EAT_4)(o(155, s), p, o, m)\n# define BOOST_PP_FOR_155(s, p, o, m) BOOST_PP_IF(p(156, s), m, BOOST_PP_TUPLE_EAT_2)(156, s) BOOST_PP_IF(p(156, s), BOOST_PP_FOR_156, BOOST_PP_TUPLE_EAT_4)(o(156, s), p, o, m)\n# define BOOST_PP_FOR_156(s, p, o, m) BOOST_PP_IF(p(157, s), m, BOOST_PP_TUPLE_EAT_2)(157, s) BOOST_PP_IF(p(157, s), BOOST_PP_FOR_157, BOOST_PP_TUPLE_EAT_4)(o(157, s), p, o, m)\n# define BOOST_PP_FOR_157(s, p, o, m) BOOST_PP_IF(p(158, s), m, BOOST_PP_TUPLE_EAT_2)(158, s) BOOST_PP_IF(p(158, s), BOOST_PP_FOR_158, BOOST_PP_TUPLE_EAT_4)(o(158, s), p, o, m)\n# define BOOST_PP_FOR_158(s, p, o, m) BOOST_PP_IF(p(159, s), m, BOOST_PP_TUPLE_EAT_2)(159, s) BOOST_PP_IF(p(159, s), BOOST_PP_FOR_159, BOOST_PP_TUPLE_EAT_4)(o(159, s), p, o, m)\n# define BOOST_PP_FOR_159(s, p, o, m) BOOST_PP_IF(p(160, s), m, BOOST_PP_TUPLE_EAT_2)(160, s) BOOST_PP_IF(p(160, s), BOOST_PP_FOR_160, BOOST_PP_TUPLE_EAT_4)(o(160, s), p, o, m)\n# define BOOST_PP_FOR_160(s, p, o, m) BOOST_PP_IF(p(161, s), m, BOOST_PP_TUPLE_EAT_2)(161, s) BOOST_PP_IF(p(161, s), BOOST_PP_FOR_161, BOOST_PP_TUPLE_EAT_4)(o(161, s), p, o, m)\n# define BOOST_PP_FOR_161(s, p, o, m) BOOST_PP_IF(p(162, s), m, BOOST_PP_TUPLE_EAT_2)(162, s) BOOST_PP_IF(p(162, s), BOOST_PP_FOR_162, BOOST_PP_TUPLE_EAT_4)(o(162, s), p, o, m)\n# define BOOST_PP_FOR_162(s, p, o, m) BOOST_PP_IF(p(163, s), m, BOOST_PP_TUPLE_EAT_2)(163, s) BOOST_PP_IF(p(163, s), BOOST_PP_FOR_163, BOOST_PP_TUPLE_EAT_4)(o(163, s), p, o, m)\n# define BOOST_PP_FOR_163(s, p, o, m) BOOST_PP_IF(p(164, s), m, BOOST_PP_TUPLE_EAT_2)(164, s) BOOST_PP_IF(p(164, s), BOOST_PP_FOR_164, BOOST_PP_TUPLE_EAT_4)(o(164, s), p, o, m)\n# define BOOST_PP_FOR_164(s, p, o, m) BOOST_PP_IF(p(165, s), m, BOOST_PP_TUPLE_EAT_2)(165, s) BOOST_PP_IF(p(165, s), BOOST_PP_FOR_165, BOOST_PP_TUPLE_EAT_4)(o(165, s), p, o, m)\n# define BOOST_PP_FOR_165(s, p, o, m) BOOST_PP_IF(p(166, s), m, BOOST_PP_TUPLE_EAT_2)(166, s) BOOST_PP_IF(p(166, s), BOOST_PP_FOR_166, BOOST_PP_TUPLE_EAT_4)(o(166, s), p, o, m)\n# define BOOST_PP_FOR_166(s, p, o, m) BOOST_PP_IF(p(167, s), m, BOOST_PP_TUPLE_EAT_2)(167, s) BOOST_PP_IF(p(167, s), BOOST_PP_FOR_167, BOOST_PP_TUPLE_EAT_4)(o(167, s), p, o, m)\n# define BOOST_PP_FOR_167(s, p, o, m) BOOST_PP_IF(p(168, s), m, BOOST_PP_TUPLE_EAT_2)(168, s) BOOST_PP_IF(p(168, s), BOOST_PP_FOR_168, BOOST_PP_TUPLE_EAT_4)(o(168, s), p, o, m)\n# define BOOST_PP_FOR_168(s, p, o, m) BOOST_PP_IF(p(169, s), m, BOOST_PP_TUPLE_EAT_2)(169, s) BOOST_PP_IF(p(169, s), BOOST_PP_FOR_169, BOOST_PP_TUPLE_EAT_4)(o(169, s), p, o, m)\n# define BOOST_PP_FOR_169(s, p, o, m) BOOST_PP_IF(p(170, s), m, BOOST_PP_TUPLE_EAT_2)(170, s) BOOST_PP_IF(p(170, s), BOOST_PP_FOR_170, BOOST_PP_TUPLE_EAT_4)(o(170, s), p, o, m)\n# define BOOST_PP_FOR_170(s, p, o, m) BOOST_PP_IF(p(171, s), m, BOOST_PP_TUPLE_EAT_2)(171, s) BOOST_PP_IF(p(171, s), BOOST_PP_FOR_171, BOOST_PP_TUPLE_EAT_4)(o(171, s), p, o, m)\n# define BOOST_PP_FOR_171(s, p, o, m) BOOST_PP_IF(p(172, s), m, BOOST_PP_TUPLE_EAT_2)(172, s) BOOST_PP_IF(p(172, s), BOOST_PP_FOR_172, BOOST_PP_TUPLE_EAT_4)(o(172, s), p, o, m)\n# define BOOST_PP_FOR_172(s, p, o, m) BOOST_PP_IF(p(173, s), m, BOOST_PP_TUPLE_EAT_2)(173, s) BOOST_PP_IF(p(173, s), BOOST_PP_FOR_173, BOOST_PP_TUPLE_EAT_4)(o(173, s), p, o, m)\n# define BOOST_PP_FOR_173(s, p, o, m) BOOST_PP_IF(p(174, s), m, BOOST_PP_TUPLE_EAT_2)(174, s) BOOST_PP_IF(p(174, s), BOOST_PP_FOR_174, BOOST_PP_TUPLE_EAT_4)(o(174, s), p, o, m)\n# define BOOST_PP_FOR_174(s, p, o, m) BOOST_PP_IF(p(175, s), m, BOOST_PP_TUPLE_EAT_2)(175, s) BOOST_PP_IF(p(175, s), BOOST_PP_FOR_175, BOOST_PP_TUPLE_EAT_4)(o(175, s), p, o, m)\n# define BOOST_PP_FOR_175(s, p, o, m) BOOST_PP_IF(p(176, s), m, BOOST_PP_TUPLE_EAT_2)(176, s) BOOST_PP_IF(p(176, s), BOOST_PP_FOR_176, BOOST_PP_TUPLE_EAT_4)(o(176, s), p, o, m)\n# define BOOST_PP_FOR_176(s, p, o, m) BOOST_PP_IF(p(177, s), m, BOOST_PP_TUPLE_EAT_2)(177, s) BOOST_PP_IF(p(177, s), BOOST_PP_FOR_177, BOOST_PP_TUPLE_EAT_4)(o(177, s), p, o, m)\n# define BOOST_PP_FOR_177(s, p, o, m) BOOST_PP_IF(p(178, s), m, BOOST_PP_TUPLE_EAT_2)(178, s) BOOST_PP_IF(p(178, s), BOOST_PP_FOR_178, BOOST_PP_TUPLE_EAT_4)(o(178, s), p, o, m)\n# define BOOST_PP_FOR_178(s, p, o, m) BOOST_PP_IF(p(179, s), m, BOOST_PP_TUPLE_EAT_2)(179, s) BOOST_PP_IF(p(179, s), BOOST_PP_FOR_179, BOOST_PP_TUPLE_EAT_4)(o(179, s), p, o, m)\n# define BOOST_PP_FOR_179(s, p, o, m) BOOST_PP_IF(p(180, s), m, BOOST_PP_TUPLE_EAT_2)(180, s) BOOST_PP_IF(p(180, s), BOOST_PP_FOR_180, BOOST_PP_TUPLE_EAT_4)(o(180, s), p, o, m)\n# define BOOST_PP_FOR_180(s, p, o, m) BOOST_PP_IF(p(181, s), m, BOOST_PP_TUPLE_EAT_2)(181, s) BOOST_PP_IF(p(181, s), BOOST_PP_FOR_181, BOOST_PP_TUPLE_EAT_4)(o(181, s), p, o, m)\n# define BOOST_PP_FOR_181(s, p, o, m) BOOST_PP_IF(p(182, s), m, BOOST_PP_TUPLE_EAT_2)(182, s) BOOST_PP_IF(p(182, s), BOOST_PP_FOR_182, BOOST_PP_TUPLE_EAT_4)(o(182, s), p, o, m)\n# define BOOST_PP_FOR_182(s, p, o, m) BOOST_PP_IF(p(183, s), m, BOOST_PP_TUPLE_EAT_2)(183, s) BOOST_PP_IF(p(183, s), BOOST_PP_FOR_183, BOOST_PP_TUPLE_EAT_4)(o(183, s), p, o, m)\n# define BOOST_PP_FOR_183(s, p, o, m) BOOST_PP_IF(p(184, s), m, BOOST_PP_TUPLE_EAT_2)(184, s) BOOST_PP_IF(p(184, s), BOOST_PP_FOR_184, BOOST_PP_TUPLE_EAT_4)(o(184, s), p, o, m)\n# define BOOST_PP_FOR_184(s, p, o, m) BOOST_PP_IF(p(185, s), m, BOOST_PP_TUPLE_EAT_2)(185, s) BOOST_PP_IF(p(185, s), BOOST_PP_FOR_185, BOOST_PP_TUPLE_EAT_4)(o(185, s), p, o, m)\n# define BOOST_PP_FOR_185(s, p, o, m) BOOST_PP_IF(p(186, s), m, BOOST_PP_TUPLE_EAT_2)(186, s) BOOST_PP_IF(p(186, s), BOOST_PP_FOR_186, BOOST_PP_TUPLE_EAT_4)(o(186, s), p, o, m)\n# define BOOST_PP_FOR_186(s, p, o, m) BOOST_PP_IF(p(187, s), m, BOOST_PP_TUPLE_EAT_2)(187, s) BOOST_PP_IF(p(187, s), BOOST_PP_FOR_187, BOOST_PP_TUPLE_EAT_4)(o(187, s), p, o, m)\n# define BOOST_PP_FOR_187(s, p, o, m) BOOST_PP_IF(p(188, s), m, BOOST_PP_TUPLE_EAT_2)(188, s) BOOST_PP_IF(p(188, s), BOOST_PP_FOR_188, BOOST_PP_TUPLE_EAT_4)(o(188, s), p, o, m)\n# define BOOST_PP_FOR_188(s, p, o, m) BOOST_PP_IF(p(189, s), m, BOOST_PP_TUPLE_EAT_2)(189, s) BOOST_PP_IF(p(189, s), BOOST_PP_FOR_189, BOOST_PP_TUPLE_EAT_4)(o(189, s), p, o, m)\n# define BOOST_PP_FOR_189(s, p, o, m) BOOST_PP_IF(p(190, s), m, BOOST_PP_TUPLE_EAT_2)(190, s) BOOST_PP_IF(p(190, s), BOOST_PP_FOR_190, BOOST_PP_TUPLE_EAT_4)(o(190, s), p, o, m)\n# define BOOST_PP_FOR_190(s, p, o, m) BOOST_PP_IF(p(191, s), m, BOOST_PP_TUPLE_EAT_2)(191, s) BOOST_PP_IF(p(191, s), BOOST_PP_FOR_191, BOOST_PP_TUPLE_EAT_4)(o(191, s), p, o, m)\n# define BOOST_PP_FOR_191(s, p, o, m) BOOST_PP_IF(p(192, s), m, BOOST_PP_TUPLE_EAT_2)(192, s) BOOST_PP_IF(p(192, s), BOOST_PP_FOR_192, BOOST_PP_TUPLE_EAT_4)(o(192, s), p, o, m)\n# define BOOST_PP_FOR_192(s, p, o, m) BOOST_PP_IF(p(193, s), m, BOOST_PP_TUPLE_EAT_2)(193, s) BOOST_PP_IF(p(193, s), BOOST_PP_FOR_193, BOOST_PP_TUPLE_EAT_4)(o(193, s), p, o, m)\n# define BOOST_PP_FOR_193(s, p, o, m) BOOST_PP_IF(p(194, s), m, BOOST_PP_TUPLE_EAT_2)(194, s) BOOST_PP_IF(p(194, s), BOOST_PP_FOR_194, BOOST_PP_TUPLE_EAT_4)(o(194, s), p, o, m)\n# define BOOST_PP_FOR_194(s, p, o, m) BOOST_PP_IF(p(195, s), m, BOOST_PP_TUPLE_EAT_2)(195, s) BOOST_PP_IF(p(195, s), BOOST_PP_FOR_195, BOOST_PP_TUPLE_EAT_4)(o(195, s), p, o, m)\n# define BOOST_PP_FOR_195(s, p, o, m) BOOST_PP_IF(p(196, s), m, BOOST_PP_TUPLE_EAT_2)(196, s) BOOST_PP_IF(p(196, s), BOOST_PP_FOR_196, BOOST_PP_TUPLE_EAT_4)(o(196, s), p, o, m)\n# define BOOST_PP_FOR_196(s, p, o, m) BOOST_PP_IF(p(197, s), m, BOOST_PP_TUPLE_EAT_2)(197, s) BOOST_PP_IF(p(197, s), BOOST_PP_FOR_197, BOOST_PP_TUPLE_EAT_4)(o(197, s), p, o, m)\n# define BOOST_PP_FOR_197(s, p, o, m) BOOST_PP_IF(p(198, s), m, BOOST_PP_TUPLE_EAT_2)(198, s) BOOST_PP_IF(p(198, s), BOOST_PP_FOR_198, BOOST_PP_TUPLE_EAT_4)(o(198, s), p, o, m)\n# define BOOST_PP_FOR_198(s, p, o, m) BOOST_PP_IF(p(199, s), m, BOOST_PP_TUPLE_EAT_2)(199, s) BOOST_PP_IF(p(199, s), BOOST_PP_FOR_199, BOOST_PP_TUPLE_EAT_4)(o(199, s), p, o, m)\n# define BOOST_PP_FOR_199(s, p, o, m) BOOST_PP_IF(p(200, s), m, BOOST_PP_TUPLE_EAT_2)(200, s) BOOST_PP_IF(p(200, s), BOOST_PP_FOR_200, BOOST_PP_TUPLE_EAT_4)(o(200, s), p, o, m)\n# define BOOST_PP_FOR_200(s, p, o, m) BOOST_PP_IF(p(201, s), m, BOOST_PP_TUPLE_EAT_2)(201, s) BOOST_PP_IF(p(201, s), BOOST_PP_FOR_201, BOOST_PP_TUPLE_EAT_4)(o(201, s), p, o, m)\n# define BOOST_PP_FOR_201(s, p, o, m) BOOST_PP_IF(p(202, s), m, BOOST_PP_TUPLE_EAT_2)(202, s) BOOST_PP_IF(p(202, s), BOOST_PP_FOR_202, BOOST_PP_TUPLE_EAT_4)(o(202, s), p, o, m)\n# define BOOST_PP_FOR_202(s, p, o, m) BOOST_PP_IF(p(203, s), m, BOOST_PP_TUPLE_EAT_2)(203, s) BOOST_PP_IF(p(203, s), BOOST_PP_FOR_203, BOOST_PP_TUPLE_EAT_4)(o(203, s), p, o, m)\n# define BOOST_PP_FOR_203(s, p, o, m) BOOST_PP_IF(p(204, s), m, BOOST_PP_TUPLE_EAT_2)(204, s) BOOST_PP_IF(p(204, s), BOOST_PP_FOR_204, BOOST_PP_TUPLE_EAT_4)(o(204, s), p, o, m)\n# define BOOST_PP_FOR_204(s, p, o, m) BOOST_PP_IF(p(205, s), m, BOOST_PP_TUPLE_EAT_2)(205, s) BOOST_PP_IF(p(205, s), BOOST_PP_FOR_205, BOOST_PP_TUPLE_EAT_4)(o(205, s), p, o, m)\n# define BOOST_PP_FOR_205(s, p, o, m) BOOST_PP_IF(p(206, s), m, BOOST_PP_TUPLE_EAT_2)(206, s) BOOST_PP_IF(p(206, s), BOOST_PP_FOR_206, BOOST_PP_TUPLE_EAT_4)(o(206, s), p, o, m)\n# define BOOST_PP_FOR_206(s, p, o, m) BOOST_PP_IF(p(207, s), m, BOOST_PP_TUPLE_EAT_2)(207, s) BOOST_PP_IF(p(207, s), BOOST_PP_FOR_207, BOOST_PP_TUPLE_EAT_4)(o(207, s), p, o, m)\n# define BOOST_PP_FOR_207(s, p, o, m) BOOST_PP_IF(p(208, s), m, BOOST_PP_TUPLE_EAT_2)(208, s) BOOST_PP_IF(p(208, s), BOOST_PP_FOR_208, BOOST_PP_TUPLE_EAT_4)(o(208, s), p, o, m)\n# define BOOST_PP_FOR_208(s, p, o, m) BOOST_PP_IF(p(209, s), m, BOOST_PP_TUPLE_EAT_2)(209, s) BOOST_PP_IF(p(209, s), BOOST_PP_FOR_209, BOOST_PP_TUPLE_EAT_4)(o(209, s), p, o, m)\n# define BOOST_PP_FOR_209(s, p, o, m) BOOST_PP_IF(p(210, s), m, BOOST_PP_TUPLE_EAT_2)(210, s) BOOST_PP_IF(p(210, s), BOOST_PP_FOR_210, BOOST_PP_TUPLE_EAT_4)(o(210, s), p, o, m)\n# define BOOST_PP_FOR_210(s, p, o, m) BOOST_PP_IF(p(211, s), m, BOOST_PP_TUPLE_EAT_2)(211, s) BOOST_PP_IF(p(211, s), BOOST_PP_FOR_211, BOOST_PP_TUPLE_EAT_4)(o(211, s), p, o, m)\n# define BOOST_PP_FOR_211(s, p, o, m) BOOST_PP_IF(p(212, s), m, BOOST_PP_TUPLE_EAT_2)(212, s) BOOST_PP_IF(p(212, s), BOOST_PP_FOR_212, BOOST_PP_TUPLE_EAT_4)(o(212, s), p, o, m)\n# define BOOST_PP_FOR_212(s, p, o, m) BOOST_PP_IF(p(213, s), m, BOOST_PP_TUPLE_EAT_2)(213, s) BOOST_PP_IF(p(213, s), BOOST_PP_FOR_213, BOOST_PP_TUPLE_EAT_4)(o(213, s), p, o, m)\n# define BOOST_PP_FOR_213(s, p, o, m) BOOST_PP_IF(p(214, s), m, BOOST_PP_TUPLE_EAT_2)(214, s) BOOST_PP_IF(p(214, s), BOOST_PP_FOR_214, BOOST_PP_TUPLE_EAT_4)(o(214, s), p, o, m)\n# define BOOST_PP_FOR_214(s, p, o, m) BOOST_PP_IF(p(215, s), m, BOOST_PP_TUPLE_EAT_2)(215, s) BOOST_PP_IF(p(215, s), BOOST_PP_FOR_215, BOOST_PP_TUPLE_EAT_4)(o(215, s), p, o, m)\n# define BOOST_PP_FOR_215(s, p, o, m) BOOST_PP_IF(p(216, s), m, BOOST_PP_TUPLE_EAT_2)(216, s) BOOST_PP_IF(p(216, s), BOOST_PP_FOR_216, BOOST_PP_TUPLE_EAT_4)(o(216, s), p, o, m)\n# define BOOST_PP_FOR_216(s, p, o, m) BOOST_PP_IF(p(217, s), m, BOOST_PP_TUPLE_EAT_2)(217, s) BOOST_PP_IF(p(217, s), BOOST_PP_FOR_217, BOOST_PP_TUPLE_EAT_4)(o(217, s), p, o, m)\n# define BOOST_PP_FOR_217(s, p, o, m) BOOST_PP_IF(p(218, s), m, BOOST_PP_TUPLE_EAT_2)(218, s) BOOST_PP_IF(p(218, s), BOOST_PP_FOR_218, BOOST_PP_TUPLE_EAT_4)(o(218, s), p, o, m)\n# define BOOST_PP_FOR_218(s, p, o, m) BOOST_PP_IF(p(219, s), m, BOOST_PP_TUPLE_EAT_2)(219, s) BOOST_PP_IF(p(219, s), BOOST_PP_FOR_219, BOOST_PP_TUPLE_EAT_4)(o(219, s), p, o, m)\n# define BOOST_PP_FOR_219(s, p, o, m) BOOST_PP_IF(p(220, s), m, BOOST_PP_TUPLE_EAT_2)(220, s) BOOST_PP_IF(p(220, s), BOOST_PP_FOR_220, BOOST_PP_TUPLE_EAT_4)(o(220, s), p, o, m)\n# define BOOST_PP_FOR_220(s, p, o, m) BOOST_PP_IF(p(221, s), m, BOOST_PP_TUPLE_EAT_2)(221, s) BOOST_PP_IF(p(221, s), BOOST_PP_FOR_221, BOOST_PP_TUPLE_EAT_4)(o(221, s), p, o, m)\n# define BOOST_PP_FOR_221(s, p, o, m) BOOST_PP_IF(p(222, s), m, BOOST_PP_TUPLE_EAT_2)(222, s) BOOST_PP_IF(p(222, s), BOOST_PP_FOR_222, BOOST_PP_TUPLE_EAT_4)(o(222, s), p, o, m)\n# define BOOST_PP_FOR_222(s, p, o, m) BOOST_PP_IF(p(223, s), m, BOOST_PP_TUPLE_EAT_2)(223, s) BOOST_PP_IF(p(223, s), BOOST_PP_FOR_223, BOOST_PP_TUPLE_EAT_4)(o(223, s), p, o, m)\n# define BOOST_PP_FOR_223(s, p, o, m) BOOST_PP_IF(p(224, s), m, BOOST_PP_TUPLE_EAT_2)(224, s) BOOST_PP_IF(p(224, s), BOOST_PP_FOR_224, BOOST_PP_TUPLE_EAT_4)(o(224, s), p, o, m)\n# define BOOST_PP_FOR_224(s, p, o, m) BOOST_PP_IF(p(225, s), m, BOOST_PP_TUPLE_EAT_2)(225, s) BOOST_PP_IF(p(225, s), BOOST_PP_FOR_225, BOOST_PP_TUPLE_EAT_4)(o(225, s), p, o, m)\n# define BOOST_PP_FOR_225(s, p, o, m) BOOST_PP_IF(p(226, s), m, BOOST_PP_TUPLE_EAT_2)(226, s) BOOST_PP_IF(p(226, s), BOOST_PP_FOR_226, BOOST_PP_TUPLE_EAT_4)(o(226, s), p, o, m)\n# define BOOST_PP_FOR_226(s, p, o, m) BOOST_PP_IF(p(227, s), m, BOOST_PP_TUPLE_EAT_2)(227, s) BOOST_PP_IF(p(227, s), BOOST_PP_FOR_227, BOOST_PP_TUPLE_EAT_4)(o(227, s), p, o, m)\n# define BOOST_PP_FOR_227(s, p, o, m) BOOST_PP_IF(p(228, s), m, BOOST_PP_TUPLE_EAT_2)(228, s) BOOST_PP_IF(p(228, s), BOOST_PP_FOR_228, BOOST_PP_TUPLE_EAT_4)(o(228, s), p, o, m)\n# define BOOST_PP_FOR_228(s, p, o, m) BOOST_PP_IF(p(229, s), m, BOOST_PP_TUPLE_EAT_2)(229, s) BOOST_PP_IF(p(229, s), BOOST_PP_FOR_229, BOOST_PP_TUPLE_EAT_4)(o(229, s), p, o, m)\n# define BOOST_PP_FOR_229(s, p, o, m) BOOST_PP_IF(p(230, s), m, BOOST_PP_TUPLE_EAT_2)(230, s) BOOST_PP_IF(p(230, s), BOOST_PP_FOR_230, BOOST_PP_TUPLE_EAT_4)(o(230, s), p, o, m)\n# define BOOST_PP_FOR_230(s, p, o, m) BOOST_PP_IF(p(231, s), m, BOOST_PP_TUPLE_EAT_2)(231, s) BOOST_PP_IF(p(231, s), BOOST_PP_FOR_231, BOOST_PP_TUPLE_EAT_4)(o(231, s), p, o, m)\n# define BOOST_PP_FOR_231(s, p, o, m) BOOST_PP_IF(p(232, s), m, BOOST_PP_TUPLE_EAT_2)(232, s) BOOST_PP_IF(p(232, s), BOOST_PP_FOR_232, BOOST_PP_TUPLE_EAT_4)(o(232, s), p, o, m)\n# define BOOST_PP_FOR_232(s, p, o, m) BOOST_PP_IF(p(233, s), m, BOOST_PP_TUPLE_EAT_2)(233, s) BOOST_PP_IF(p(233, s), BOOST_PP_FOR_233, BOOST_PP_TUPLE_EAT_4)(o(233, s), p, o, m)\n# define BOOST_PP_FOR_233(s, p, o, m) BOOST_PP_IF(p(234, s), m, BOOST_PP_TUPLE_EAT_2)(234, s) BOOST_PP_IF(p(234, s), BOOST_PP_FOR_234, BOOST_PP_TUPLE_EAT_4)(o(234, s), p, o, m)\n# define BOOST_PP_FOR_234(s, p, o, m) BOOST_PP_IF(p(235, s), m, BOOST_PP_TUPLE_EAT_2)(235, s) BOOST_PP_IF(p(235, s), BOOST_PP_FOR_235, BOOST_PP_TUPLE_EAT_4)(o(235, s), p, o, m)\n# define BOOST_PP_FOR_235(s, p, o, m) BOOST_PP_IF(p(236, s), m, BOOST_PP_TUPLE_EAT_2)(236, s) BOOST_PP_IF(p(236, s), BOOST_PP_FOR_236, BOOST_PP_TUPLE_EAT_4)(o(236, s), p, o, m)\n# define BOOST_PP_FOR_236(s, p, o, m) BOOST_PP_IF(p(237, s), m, BOOST_PP_TUPLE_EAT_2)(237, s) BOOST_PP_IF(p(237, s), BOOST_PP_FOR_237, BOOST_PP_TUPLE_EAT_4)(o(237, s), p, o, m)\n# define BOOST_PP_FOR_237(s, p, o, m) BOOST_PP_IF(p(238, s), m, BOOST_PP_TUPLE_EAT_2)(238, s) BOOST_PP_IF(p(238, s), BOOST_PP_FOR_238, BOOST_PP_TUPLE_EAT_4)(o(238, s), p, o, m)\n# define BOOST_PP_FOR_238(s, p, o, m) BOOST_PP_IF(p(239, s), m, BOOST_PP_TUPLE_EAT_2)(239, s) BOOST_PP_IF(p(239, s), BOOST_PP_FOR_239, BOOST_PP_TUPLE_EAT_4)(o(239, s), p, o, m)\n# define BOOST_PP_FOR_239(s, p, o, m) BOOST_PP_IF(p(240, s), m, BOOST_PP_TUPLE_EAT_2)(240, s) BOOST_PP_IF(p(240, s), BOOST_PP_FOR_240, BOOST_PP_TUPLE_EAT_4)(o(240, s), p, o, m)\n# define BOOST_PP_FOR_240(s, p, o, m) BOOST_PP_IF(p(241, s), m, BOOST_PP_TUPLE_EAT_2)(241, s) BOOST_PP_IF(p(241, s), BOOST_PP_FOR_241, BOOST_PP_TUPLE_EAT_4)(o(241, s), p, o, m)\n# define BOOST_PP_FOR_241(s, p, o, m) BOOST_PP_IF(p(242, s), m, BOOST_PP_TUPLE_EAT_2)(242, s) BOOST_PP_IF(p(242, s), BOOST_PP_FOR_242, BOOST_PP_TUPLE_EAT_4)(o(242, s), p, o, m)\n# define BOOST_PP_FOR_242(s, p, o, m) BOOST_PP_IF(p(243, s), m, BOOST_PP_TUPLE_EAT_2)(243, s) BOOST_PP_IF(p(243, s), BOOST_PP_FOR_243, BOOST_PP_TUPLE_EAT_4)(o(243, s), p, o, m)\n# define BOOST_PP_FOR_243(s, p, o, m) BOOST_PP_IF(p(244, s), m, BOOST_PP_TUPLE_EAT_2)(244, s) BOOST_PP_IF(p(244, s), BOOST_PP_FOR_244, BOOST_PP_TUPLE_EAT_4)(o(244, s), p, o, m)\n# define BOOST_PP_FOR_244(s, p, o, m) BOOST_PP_IF(p(245, s), m, BOOST_PP_TUPLE_EAT_2)(245, s) BOOST_PP_IF(p(245, s), BOOST_PP_FOR_245, BOOST_PP_TUPLE_EAT_4)(o(245, s), p, o, m)\n# define BOOST_PP_FOR_245(s, p, o, m) BOOST_PP_IF(p(246, s), m, BOOST_PP_TUPLE_EAT_2)(246, s) BOOST_PP_IF(p(246, s), BOOST_PP_FOR_246, BOOST_PP_TUPLE_EAT_4)(o(246, s), p, o, m)\n# define BOOST_PP_FOR_246(s, p, o, m) BOOST_PP_IF(p(247, s), m, BOOST_PP_TUPLE_EAT_2)(247, s) BOOST_PP_IF(p(247, s), BOOST_PP_FOR_247, BOOST_PP_TUPLE_EAT_4)(o(247, s), p, o, m)\n# define BOOST_PP_FOR_247(s, p, o, m) BOOST_PP_IF(p(248, s), m, BOOST_PP_TUPLE_EAT_2)(248, s) BOOST_PP_IF(p(248, s), BOOST_PP_FOR_248, BOOST_PP_TUPLE_EAT_4)(o(248, s), p, o, m)\n# define BOOST_PP_FOR_248(s, p, o, m) BOOST_PP_IF(p(249, s), m, BOOST_PP_TUPLE_EAT_2)(249, s) BOOST_PP_IF(p(249, s), BOOST_PP_FOR_249, BOOST_PP_TUPLE_EAT_4)(o(249, s), p, o, m)\n# define BOOST_PP_FOR_249(s, p, o, m) BOOST_PP_IF(p(250, s), m, BOOST_PP_TUPLE_EAT_2)(250, s) BOOST_PP_IF(p(250, s), BOOST_PP_FOR_250, BOOST_PP_TUPLE_EAT_4)(o(250, s), p, o, m)\n# define BOOST_PP_FOR_250(s, p, o, m) BOOST_PP_IF(p(251, s), m, BOOST_PP_TUPLE_EAT_2)(251, s) BOOST_PP_IF(p(251, s), BOOST_PP_FOR_251, BOOST_PP_TUPLE_EAT_4)(o(251, s), p, o, m)\n# define BOOST_PP_FOR_251(s, p, o, m) BOOST_PP_IF(p(252, s), m, BOOST_PP_TUPLE_EAT_2)(252, s) BOOST_PP_IF(p(252, s), BOOST_PP_FOR_252, BOOST_PP_TUPLE_EAT_4)(o(252, s), p, o, m)\n# define BOOST_PP_FOR_252(s, p, o, m) BOOST_PP_IF(p(253, s), m, BOOST_PP_TUPLE_EAT_2)(253, s) BOOST_PP_IF(p(253, s), BOOST_PP_FOR_253, BOOST_PP_TUPLE_EAT_4)(o(253, s), p, o, m)\n# define BOOST_PP_FOR_253(s, p, o, m) BOOST_PP_IF(p(254, s), m, BOOST_PP_TUPLE_EAT_2)(254, s) BOOST_PP_IF(p(254, s), BOOST_PP_FOR_254, BOOST_PP_TUPLE_EAT_4)(o(254, s), p, o, m)\n# define BOOST_PP_FOR_254(s, p, o, m) BOOST_PP_IF(p(255, s), m, BOOST_PP_TUPLE_EAT_2)(255, s) BOOST_PP_IF(p(255, s), BOOST_PP_FOR_255, BOOST_PP_TUPLE_EAT_4)(o(255, s), p, o, m)\n# define BOOST_PP_FOR_255(s, p, o, m) BOOST_PP_IF(p(256, s), m, BOOST_PP_TUPLE_EAT_2)(256, s) BOOST_PP_IF(p(256, s), BOOST_PP_FOR_256, BOOST_PP_TUPLE_EAT_4)(o(256, s), p, o, m)\n# define BOOST_PP_FOR_256(s, p, o, m) BOOST_PP_IF(p(257, s), m, BOOST_PP_TUPLE_EAT_2)(257, s) BOOST_PP_IF(p(257, s), BOOST_PP_FOR_257, BOOST_PP_TUPLE_EAT_4)(o(257, s), p, o, m)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_ENUM */\n#\n# if 0\n#    define BOOST_PP_ENUM(count, macro, data)\n# endif\n#\n# define BOOST_PP_ENUM BOOST_PP_CAT(BOOST_PP_ENUM_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_1(c, m, d) BOOST_PP_REPEAT_1(c, BOOST_PP_ENUM_M_1, (m, d))\n#    define BOOST_PP_ENUM_2(c, m, d) BOOST_PP_REPEAT_2(c, BOOST_PP_ENUM_M_2, (m, d))\n#    define BOOST_PP_ENUM_3(c, m, d) BOOST_PP_REPEAT_3(c, BOOST_PP_ENUM_M_3, (m, d))\n# else\n#    define BOOST_PP_ENUM_1(c, m, d) BOOST_PP_ENUM_1_I(c, m, d)\n#    define BOOST_PP_ENUM_2(c, m, d) BOOST_PP_ENUM_2_I(c, m, d)\n#    define BOOST_PP_ENUM_3(c, m, d) BOOST_PP_ENUM_3_I(c, m, d)\n#    define BOOST_PP_ENUM_1_I(c, m, d) BOOST_PP_REPEAT_1(c, BOOST_PP_ENUM_M_1, (m, d))\n#    define BOOST_PP_ENUM_2_I(c, m, d) BOOST_PP_REPEAT_2(c, BOOST_PP_ENUM_M_2, (m, d))\n#    define BOOST_PP_ENUM_3_I(c, m, d) BOOST_PP_REPEAT_3(c, BOOST_PP_ENUM_M_3, (m, d))\n# endif\n#\n# define BOOST_PP_ENUM_4(c, m, d) BOOST_PP_ERROR(0x0003)\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_ENUM_M_1(z, n, md) BOOST_PP_ENUM_M_1_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_M_2(z, n, md) BOOST_PP_ENUM_M_2_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_M_3(z, n, md) BOOST_PP_ENUM_M_3_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_M_1_IM(z, n, im) BOOST_PP_ENUM_M_1_I(z, n, im)\n#    define BOOST_PP_ENUM_M_2_IM(z, n, im) BOOST_PP_ENUM_M_2_I(z, n, im)\n#    define BOOST_PP_ENUM_M_3_IM(z, n, im) BOOST_PP_ENUM_M_3_I(z, n, im)\n# else\n#    define BOOST_PP_ENUM_M_1(z, n, md) BOOST_PP_ENUM_M_1_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_M_2(z, n, md) BOOST_PP_ENUM_M_2_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_M_3(z, n, md) BOOST_PP_ENUM_M_3_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n# endif\n#\n# define BOOST_PP_ENUM_M_1_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, n, d)\n# define BOOST_PP_ENUM_M_2_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, n, d)\n# define BOOST_PP_ENUM_M_3_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, n, d)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_binary_params.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_BINARY_PARAMS_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_BINARY_PARAMS_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_ENUM_BINARY_PARAMS */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_BINARY_PARAMS(count, p1, p2) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_BINARY_PARAMS_M, (p1, p2))\n# else\n#    define BOOST_PP_ENUM_BINARY_PARAMS(count, p1, p2) BOOST_PP_ENUM_BINARY_PARAMS_I(count, p1, p2)\n#    define BOOST_PP_ENUM_BINARY_PARAMS_I(count, p1, p2) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_BINARY_PARAMS_M, (p1, p2))\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M(z, n, pp) BOOST_PP_ENUM_BINARY_PARAMS_M_IM(z, n, BOOST_PP_TUPLE_REM_2 pp)\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M_IM(z, n, im) BOOST_PP_ENUM_BINARY_PARAMS_M_I(z, n, im)\n# else\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M(z, n, pp) BOOST_PP_ENUM_BINARY_PARAMS_M_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, pp), BOOST_PP_TUPLE_ELEM(2, 1, pp))\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M_I(z, n, p1, p2) BOOST_PP_ENUM_BINARY_PARAMS_M_II(z, n, p1, p2)\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M_II(z, n, p1, p2) BOOST_PP_COMMA_IF(n) p1 ## n p2 ## n\n# else\n#    define BOOST_PP_ENUM_BINARY_PARAMS_M_I(z, n, p1, p2) BOOST_PP_COMMA_IF(n) BOOST_PP_CAT(p1, n) BOOST_PP_CAT(p2, n)\n# endif\n#\n# /* BOOST_PP_ENUM_BINARY_PARAMS_Z */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_BINARY_PARAMS_Z(z, count, p1, p2) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_BINARY_PARAMS_M, (p1, p2))\n# else\n#    define BOOST_PP_ENUM_BINARY_PARAMS_Z(z, count, p1, p2) BOOST_PP_ENUM_BINARY_PARAMS_Z_I(z, count, p1, p2)\n#    define BOOST_PP_ENUM_BINARY_PARAMS_Z_I(z, count, p1, p2) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_BINARY_PARAMS_M, (p1, p2))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_params.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_PARAMS_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_PARAMS_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n#\n# /* BOOST_PP_ENUM_PARAMS */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_PARAMS(count, param) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_PARAMS(count, param) BOOST_PP_ENUM_PARAMS_I(count, param)\n#    define BOOST_PP_ENUM_PARAMS_I(count, param) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_PARAMS_M, param)\n# endif\n#\n# define BOOST_PP_ENUM_PARAMS_M(z, n, param) BOOST_PP_COMMA_IF(n) param ## n\n#\n# /* BOOST_PP_ENUM_PARAMS_Z */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_PARAMS_Z(z, count, param) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_PARAMS_Z(z, count, param) BOOST_PP_ENUM_PARAMS_Z_I(z, count, param)\n#    define BOOST_PP_ENUM_PARAMS_Z_I(z, count, param) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_PARAMS_M, param)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_params_with_a_default.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_PARAMS_WITH_A_DEFAULT_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_PARAMS_WITH_A_DEFAULT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/facilities/intercept.hpp>\n# include <boost/preprocessor/repetition/enum_binary_params.hpp>\n#\n# /* BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT */\n#\n# define BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT(count, param, def) BOOST_PP_ENUM_BINARY_PARAMS(count, param, = def BOOST_PP_INTERCEPT)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_shifted.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_SHIFTED_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_SHIFTED_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_ENUM_SHIFTED */\n#\n# if 0\n#    define BOOST_PP_ENUM_SHIFTED(count, macro, data)\n# endif\n#\n# define BOOST_PP_ENUM_SHIFTED BOOST_PP_CAT(BOOST_PP_ENUM_SHIFTED_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_SHIFTED_1(c, m, d) BOOST_PP_REPEAT_1(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_1, (m, d))\n#    define BOOST_PP_ENUM_SHIFTED_2(c, m, d) BOOST_PP_REPEAT_2(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_2, (m, d))\n#    define BOOST_PP_ENUM_SHIFTED_3(c, m, d) BOOST_PP_REPEAT_3(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_3, (m, d))\n# else\n#    define BOOST_PP_ENUM_SHIFTED_1(c, m, d) BOOST_PP_ENUM_SHIFTED_1_I(c, m, d)\n#    define BOOST_PP_ENUM_SHIFTED_2(c, m, d) BOOST_PP_ENUM_SHIFTED_1_2(c, m, d)\n#    define BOOST_PP_ENUM_SHIFTED_3(c, m, d) BOOST_PP_ENUM_SHIFTED_1_3(c, m, d)\n#    define BOOST_PP_ENUM_SHIFTED_1_I(c, m, d) BOOST_PP_REPEAT_1(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_1, (m, d))\n#    define BOOST_PP_ENUM_SHIFTED_2_I(c, m, d) BOOST_PP_REPEAT_2(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_2, (m, d))\n#    define BOOST_PP_ENUM_SHIFTED_3_I(c, m, d) BOOST_PP_REPEAT_3(BOOST_PP_DEC(c), BOOST_PP_ENUM_SHIFTED_M_3, (m, d))\n# endif\n#\n# define BOOST_PP_ENUM_SHIFTED_4(c, m, d) BOOST_PP_ERROR(0x0003)\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_ENUM_SHIFTED_M_1(z, n, md) BOOST_PP_ENUM_SHIFTED_M_1_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_SHIFTED_M_2(z, n, md) BOOST_PP_ENUM_SHIFTED_M_2_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_SHIFTED_M_3(z, n, md) BOOST_PP_ENUM_SHIFTED_M_3_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_SHIFTED_M_1_IM(z, n, im) BOOST_PP_ENUM_SHIFTED_M_1_I(z, n, im)\n#    define BOOST_PP_ENUM_SHIFTED_M_2_IM(z, n, im) BOOST_PP_ENUM_SHIFTED_M_2_I(z, n, im)\n#    define BOOST_PP_ENUM_SHIFTED_M_3_IM(z, n, im) BOOST_PP_ENUM_SHIFTED_M_3_I(z, n, im)\n# else\n#    define BOOST_PP_ENUM_SHIFTED_M_1(z, n, md) BOOST_PP_ENUM_SHIFTED_M_1_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_SHIFTED_M_2(z, n, md) BOOST_PP_ENUM_SHIFTED_M_2_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_SHIFTED_M_3(z, n, md) BOOST_PP_ENUM_SHIFTED_M_3_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n# endif\n#\n# define BOOST_PP_ENUM_SHIFTED_M_1_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, BOOST_PP_INC(n), d)\n# define BOOST_PP_ENUM_SHIFTED_M_2_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, BOOST_PP_INC(n), d)\n# define BOOST_PP_ENUM_SHIFTED_M_3_I(z, n, m, d) BOOST_PP_COMMA_IF(n) m(z, BOOST_PP_INC(n), d)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_shifted_params.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_SHIFTED_PARAMS_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_SHIFTED_PARAMS_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/punctuation/comma_if.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n#\n# /* BOOST_PP_ENUM_SHIFTED_PARAMS */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS(count, param) BOOST_PP_REPEAT(BOOST_PP_DEC(count), BOOST_PP_ENUM_SHIFTED_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS(count, param) BOOST_PP_ENUM_SHIFTED_PARAMS_I(count, param)\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS_I(count, param) BOOST_PP_REPEAT(BOOST_PP_DEC(count), BOOST_PP_ENUM_SHIFTED_PARAMS_M, param)\n# endif\n#\n# define BOOST_PP_ENUM_SHIFTED_PARAMS_M(z, n, param) BOOST_PP_COMMA_IF(n) BOOST_PP_CAT(param, BOOST_PP_INC(n))\n#\n# /* BOOST_PP_ENUM_SHIFTED_PARAMS_Z */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS_Z(z, count, param) BOOST_PP_REPEAT_ ## z(BOOST_PP_DEC(count), BOOST_PP_ENUM_SHIFTED_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS_Z(z, count, param) BOOST_PP_ENUM_SHIFTED_PARAMS_Z_I(z, count, param)\n#    define BOOST_PP_ENUM_SHIFTED_PARAMS_Z_I(z, count, param) BOOST_PP_REPEAT_ ## z(BOOST_PP_DEC(count), BOOST_PP_ENUM_SHIFTED_PARAMS_M, param)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_trailing.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_TRAILING_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_TRAILING_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_ENUM_TRAILING */\n#\n# if 0\n#    define BOOST_PP_ENUM_TRAILING(count, macro, data)\n# endif\n#\n# define BOOST_PP_ENUM_TRAILING BOOST_PP_CAT(BOOST_PP_ENUM_TRAILING_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_TRAILING_1(c, m, d) BOOST_PP_REPEAT_1(c, BOOST_PP_ENUM_TRAILING_M_1, (m, d))\n#    define BOOST_PP_ENUM_TRAILING_2(c, m, d) BOOST_PP_REPEAT_2(c, BOOST_PP_ENUM_TRAILING_M_2, (m, d))\n#    define BOOST_PP_ENUM_TRAILING_3(c, m, d) BOOST_PP_REPEAT_3(c, BOOST_PP_ENUM_TRAILING_M_3, (m, d))\n# else\n#    define BOOST_PP_ENUM_TRAILING_1(c, m, d) BOOST_PP_ENUM_TRAILING_1_I(c, m, d)\n#    define BOOST_PP_ENUM_TRAILING_2(c, m, d) BOOST_PP_ENUM_TRAILING_2_I(c, m, d)\n#    define BOOST_PP_ENUM_TRAILING_3(c, m, d) BOOST_PP_ENUM_TRAILING_3_I(c, m, d)\n#    define BOOST_PP_ENUM_TRAILING_1_I(c, m, d) BOOST_PP_REPEAT_1(c, BOOST_PP_ENUM_TRAILING_M_1, (m, d))\n#    define BOOST_PP_ENUM_TRAILING_2_I(c, m, d) BOOST_PP_REPEAT_2(c, BOOST_PP_ENUM_TRAILING_M_2, (m, d))\n#    define BOOST_PP_ENUM_TRAILING_3_I(c, m, d) BOOST_PP_REPEAT_3(c, BOOST_PP_ENUM_TRAILING_M_3, (m, d))\n# endif\n#\n# define BOOST_PP_ENUM_TRAILING_4(c, m, d) BOOST_PP_ERROR(0x0003)\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_ENUM_TRAILING_M_1(z, n, md) BOOST_PP_ENUM_TRAILING_M_1_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_TRAILING_M_2(z, n, md) BOOST_PP_ENUM_TRAILING_M_2_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_TRAILING_M_3(z, n, md) BOOST_PP_ENUM_TRAILING_M_3_IM(z, n, BOOST_PP_TUPLE_REM_2 md)\n#    define BOOST_PP_ENUM_TRAILING_M_1_IM(z, n, im) BOOST_PP_ENUM_TRAILING_M_1_I(z, n, im)\n#    define BOOST_PP_ENUM_TRAILING_M_2_IM(z, n, im) BOOST_PP_ENUM_TRAILING_M_2_I(z, n, im)\n#    define BOOST_PP_ENUM_TRAILING_M_3_IM(z, n, im) BOOST_PP_ENUM_TRAILING_M_3_I(z, n, im)\n# else\n#    define BOOST_PP_ENUM_TRAILING_M_1(z, n, md) BOOST_PP_ENUM_TRAILING_M_1_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_TRAILING_M_2(z, n, md) BOOST_PP_ENUM_TRAILING_M_2_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n#    define BOOST_PP_ENUM_TRAILING_M_3(z, n, md) BOOST_PP_ENUM_TRAILING_M_3_I(z, n, BOOST_PP_TUPLE_ELEM(2, 0, md), BOOST_PP_TUPLE_ELEM(2, 1, md))\n# endif\n#\n# define BOOST_PP_ENUM_TRAILING_M_1_I(z, n, m, d) , m(z, n, d)\n# define BOOST_PP_ENUM_TRAILING_M_2_I(z, n, m, d) , m(z, n, d)\n# define BOOST_PP_ENUM_TRAILING_M_3_I(z, n, m, d) , m(z, n, d)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/enum_trailing_params.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_ENUM_TRAILING_PARAMS_HPP\n# define BOOST_PREPROCESSOR_REPETITION_ENUM_TRAILING_PARAMS_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n#\n# /* BOOST_PP_ENUM_TRAILING_PARAMS */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_TRAILING_PARAMS(count, param) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_TRAILING_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_TRAILING_PARAMS(count, param) BOOST_PP_ENUM_TRAILING_PARAMS_I(count, param)\n#    define BOOST_PP_ENUM_TRAILING_PARAMS_I(count, param) BOOST_PP_REPEAT(count, BOOST_PP_ENUM_TRAILING_PARAMS_M, param)\n# endif\n#\n# define BOOST_PP_ENUM_TRAILING_PARAMS_M(z, n, param) , param ## n\n#\n# /* BOOST_PP_ENUM_TRAILING_PARAMS_Z */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_ENUM_TRAILING_PARAMS_Z(z, count, param) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_TRAILING_PARAMS_M, param)\n# else\n#    define BOOST_PP_ENUM_TRAILING_PARAMS_Z(z, count, param) BOOST_PP_ENUM_TRAILING_PARAMS_Z_I(z, count, param)\n#    define BOOST_PP_ENUM_TRAILING_PARAMS_Z_I(z, count, param) BOOST_PP_REPEAT_ ## z(count, BOOST_PP_ENUM_TRAILING_PARAMS_M, param)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/for.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_FOR_HPP\n# define BOOST_PREPROCESSOR_REPETITION_FOR_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/facilities/empty.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n#\n# /* BOOST_PP_FOR */\n#\n# if 0\n#    define BOOST_PP_FOR(state, pred, op, macro)\n# endif\n#\n# define BOOST_PP_FOR BOOST_PP_CAT(BOOST_PP_FOR_, BOOST_PP_AUTO_REC(BOOST_PP_FOR_P, 256))\n#\n# define BOOST_PP_FOR_P(n) BOOST_PP_CAT(BOOST_PP_FOR_CHECK_, BOOST_PP_FOR_ ## n(1, BOOST_PP_FOR_SR_P, BOOST_PP_FOR_SR_O, BOOST_PP_FOR_SR_M))\n#\n# define BOOST_PP_FOR_SR_P(r, s) s\n# define BOOST_PP_FOR_SR_O(r, s) 0\n# define BOOST_PP_FOR_SR_M(r, s) BOOST_PP_NIL\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    include <boost/preprocessor/repetition/detail/edg/for.hpp>\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    include <boost/preprocessor/repetition/detail/msvc/for.hpp>\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    include <boost/preprocessor/repetition/detail/dmc/for.hpp>\n# else\n#    include <boost/preprocessor/repetition/detail/for.hpp>\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n# define BOOST_PP_FOR_257_PR(s, p) BOOST_PP_BOOL(p##(257, s))\n# else\n# define BOOST_PP_FOR_257_PR(s, p) BOOST_PP_BOOL(p(257, s))\n# endif\n\n# define BOOST_PP_FOR_257_ERROR() BOOST_PP_ERROR(0x0002)\n# define BOOST_PP_FOR_257(s, p, o, m) \\\n\tBOOST_PP_IIF \\\n\t\t( \\\n\t\tBOOST_PP_FOR_257_PR(s,p), \\\n\t\tBOOST_PP_FOR_257_ERROR, \\\n\t\tBOOST_PP_EMPTY \\\n\t\t) \\\n\t() \\\n/**/\n// # define BOOST_PP_FOR_257(s, p, o, m) BOOST_PP_ERROR(0x0002)\n#\n# define BOOST_PP_FOR_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_1(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_2(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_3(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_4(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_5(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_6(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_7(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_8(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_9(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_10(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_11(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_12(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_13(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_14(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_15(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_16(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_17(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_18(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_19(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_20(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_21(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_22(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_23(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_24(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_25(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_26(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_27(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_28(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_29(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_30(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_31(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_32(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_33(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_34(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_35(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_36(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_37(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_38(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_39(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_40(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_41(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_42(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_43(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_44(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_45(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_46(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_47(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_48(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_49(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_50(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_51(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_52(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_53(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_54(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_55(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_56(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_57(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_58(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_59(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_60(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_61(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_62(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_63(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_64(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_65(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_66(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_67(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_68(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_69(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_70(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_71(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_72(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_73(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_74(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_75(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_76(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_77(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_78(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_79(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_80(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_81(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_82(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_83(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_84(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_85(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_86(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_87(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_88(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_89(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_90(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_91(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_92(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_93(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_94(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_95(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_96(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_97(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_98(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_99(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_100(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_101(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_102(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_103(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_104(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_105(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_106(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_107(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_108(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_109(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_110(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_111(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_112(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_113(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_114(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_115(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_116(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_117(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_118(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_119(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_120(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_121(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_122(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_123(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_124(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_125(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_126(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_127(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_128(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_129(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_130(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_131(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_132(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_133(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_134(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_135(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_136(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_137(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_138(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_139(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_140(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_141(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_142(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_143(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_144(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_145(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_146(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_147(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_148(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_149(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_150(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_151(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_152(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_153(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_154(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_155(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_156(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_157(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_158(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_159(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_160(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_161(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_162(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_163(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_164(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_165(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_166(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_167(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_168(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_169(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_170(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_171(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_172(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_173(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_174(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_175(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_176(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_177(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_178(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_179(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_180(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_181(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_182(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_183(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_184(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_185(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_186(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_187(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_188(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_189(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_190(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_191(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_192(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_193(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_194(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_195(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_196(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_197(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_198(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_199(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_200(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_201(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_202(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_203(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_204(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_205(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_206(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_207(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_208(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_209(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_210(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_211(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_212(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_213(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_214(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_215(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_216(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_217(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_218(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_219(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_220(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_221(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_222(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_223(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_224(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_225(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_226(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_227(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_228(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_229(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_230(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_231(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_232(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_233(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_234(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_235(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_236(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_237(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_238(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_239(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_240(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_241(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_242(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_243(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_244(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_245(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_246(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_247(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_248(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_249(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_250(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_251(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_252(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_253(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_254(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_255(s, p, o, m) 0\n# define BOOST_PP_FOR_CHECK_BOOST_PP_FOR_256(s, p, o, m) 0\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/repeat.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_REPEAT_HPP\n# define BOOST_PREPROCESSOR_REPETITION_REPEAT_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n#\n# /* BOOST_PP_REPEAT */\n#\n# if 0\n#    define BOOST_PP_REPEAT(count, macro, data)\n# endif\n#\n# define BOOST_PP_REPEAT BOOST_PP_CAT(BOOST_PP_REPEAT_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# define BOOST_PP_REPEAT_P(n) BOOST_PP_CAT(BOOST_PP_REPEAT_CHECK_, BOOST_PP_REPEAT_ ## n(1, BOOST_PP_NIL BOOST_PP_TUPLE_EAT_3, BOOST_PP_NIL))\n#\n# define BOOST_PP_REPEAT_CHECK_BOOST_PP_NIL 1\n# define BOOST_PP_REPEAT_CHECK_BOOST_PP_REPEAT_1(c, m, d) 0\n# define BOOST_PP_REPEAT_CHECK_BOOST_PP_REPEAT_2(c, m, d) 0\n# define BOOST_PP_REPEAT_CHECK_BOOST_PP_REPEAT_3(c, m, d) 0\n#\n# define BOOST_PP_REPEAT_1(c, m, d) BOOST_PP_REPEAT_1_I(c, m, d)\n# define BOOST_PP_REPEAT_2(c, m, d) BOOST_PP_REPEAT_2_I(c, m, d)\n# define BOOST_PP_REPEAT_3(c, m, d) BOOST_PP_REPEAT_3_I(c, m, d)\n# define BOOST_PP_REPEAT_4(c, m, d) BOOST_PP_ERROR(0x0003)\n#\n# define BOOST_PP_REPEAT_1_I(c, m, d) BOOST_PP_REPEAT_1_ ## c(m, d)\n# define BOOST_PP_REPEAT_2_I(c, m, d) BOOST_PP_REPEAT_2_ ## c(m, d)\n# define BOOST_PP_REPEAT_3_I(c, m, d) BOOST_PP_REPEAT_3_ ## c(m, d)\n#\n# define BOOST_PP_REPEAT_1ST BOOST_PP_REPEAT_1\n# define BOOST_PP_REPEAT_2ND BOOST_PP_REPEAT_2\n# define BOOST_PP_REPEAT_3RD BOOST_PP_REPEAT_3\n#\n# define BOOST_PP_REPEAT_1_0(m, d)\n# define BOOST_PP_REPEAT_1_1(m, d) m(2, 0, d)\n# define BOOST_PP_REPEAT_1_2(m, d) BOOST_PP_REPEAT_1_1(m, d) m(2, 1, d)\n# define BOOST_PP_REPEAT_1_3(m, d) BOOST_PP_REPEAT_1_2(m, d) m(2, 2, d)\n# define BOOST_PP_REPEAT_1_4(m, d) BOOST_PP_REPEAT_1_3(m, d) m(2, 3, d)\n# define BOOST_PP_REPEAT_1_5(m, d) BOOST_PP_REPEAT_1_4(m, d) m(2, 4, d)\n# define BOOST_PP_REPEAT_1_6(m, d) BOOST_PP_REPEAT_1_5(m, d) m(2, 5, d)\n# define BOOST_PP_REPEAT_1_7(m, d) BOOST_PP_REPEAT_1_6(m, d) m(2, 6, d)\n# define BOOST_PP_REPEAT_1_8(m, d) BOOST_PP_REPEAT_1_7(m, d) m(2, 7, d)\n# define BOOST_PP_REPEAT_1_9(m, d) BOOST_PP_REPEAT_1_8(m, d) m(2, 8, d)\n# define BOOST_PP_REPEAT_1_10(m, d) BOOST_PP_REPEAT_1_9(m, d) m(2, 9, d)\n# define BOOST_PP_REPEAT_1_11(m, d) BOOST_PP_REPEAT_1_10(m, d) m(2, 10, d)\n# define BOOST_PP_REPEAT_1_12(m, d) BOOST_PP_REPEAT_1_11(m, d) m(2, 11, d)\n# define BOOST_PP_REPEAT_1_13(m, d) BOOST_PP_REPEAT_1_12(m, d) m(2, 12, d)\n# define BOOST_PP_REPEAT_1_14(m, d) BOOST_PP_REPEAT_1_13(m, d) m(2, 13, d)\n# define BOOST_PP_REPEAT_1_15(m, d) BOOST_PP_REPEAT_1_14(m, d) m(2, 14, d)\n# define BOOST_PP_REPEAT_1_16(m, d) BOOST_PP_REPEAT_1_15(m, d) m(2, 15, d)\n# define BOOST_PP_REPEAT_1_17(m, d) BOOST_PP_REPEAT_1_16(m, d) m(2, 16, d)\n# define BOOST_PP_REPEAT_1_18(m, d) BOOST_PP_REPEAT_1_17(m, d) m(2, 17, d)\n# define BOOST_PP_REPEAT_1_19(m, d) BOOST_PP_REPEAT_1_18(m, d) m(2, 18, d)\n# define BOOST_PP_REPEAT_1_20(m, d) BOOST_PP_REPEAT_1_19(m, d) m(2, 19, d)\n# define BOOST_PP_REPEAT_1_21(m, d) BOOST_PP_REPEAT_1_20(m, d) m(2, 20, d)\n# define BOOST_PP_REPEAT_1_22(m, d) BOOST_PP_REPEAT_1_21(m, d) m(2, 21, d)\n# define BOOST_PP_REPEAT_1_23(m, d) BOOST_PP_REPEAT_1_22(m, d) m(2, 22, d)\n# define BOOST_PP_REPEAT_1_24(m, d) BOOST_PP_REPEAT_1_23(m, d) m(2, 23, d)\n# define BOOST_PP_REPEAT_1_25(m, d) BOOST_PP_REPEAT_1_24(m, d) m(2, 24, d)\n# define BOOST_PP_REPEAT_1_26(m, d) BOOST_PP_REPEAT_1_25(m, d) m(2, 25, d)\n# define BOOST_PP_REPEAT_1_27(m, d) BOOST_PP_REPEAT_1_26(m, d) m(2, 26, d)\n# define BOOST_PP_REPEAT_1_28(m, d) BOOST_PP_REPEAT_1_27(m, d) m(2, 27, d)\n# define BOOST_PP_REPEAT_1_29(m, d) BOOST_PP_REPEAT_1_28(m, d) m(2, 28, d)\n# define BOOST_PP_REPEAT_1_30(m, d) BOOST_PP_REPEAT_1_29(m, d) m(2, 29, d)\n# define BOOST_PP_REPEAT_1_31(m, d) BOOST_PP_REPEAT_1_30(m, d) m(2, 30, d)\n# define BOOST_PP_REPEAT_1_32(m, d) BOOST_PP_REPEAT_1_31(m, d) m(2, 31, d)\n# define BOOST_PP_REPEAT_1_33(m, d) BOOST_PP_REPEAT_1_32(m, d) m(2, 32, d)\n# define BOOST_PP_REPEAT_1_34(m, d) BOOST_PP_REPEAT_1_33(m, d) m(2, 33, d)\n# define BOOST_PP_REPEAT_1_35(m, d) BOOST_PP_REPEAT_1_34(m, d) m(2, 34, d)\n# define BOOST_PP_REPEAT_1_36(m, d) BOOST_PP_REPEAT_1_35(m, d) m(2, 35, d)\n# define BOOST_PP_REPEAT_1_37(m, d) BOOST_PP_REPEAT_1_36(m, d) m(2, 36, d)\n# define BOOST_PP_REPEAT_1_38(m, d) BOOST_PP_REPEAT_1_37(m, d) m(2, 37, d)\n# define BOOST_PP_REPEAT_1_39(m, d) BOOST_PP_REPEAT_1_38(m, d) m(2, 38, d)\n# define BOOST_PP_REPEAT_1_40(m, d) BOOST_PP_REPEAT_1_39(m, d) m(2, 39, d)\n# define BOOST_PP_REPEAT_1_41(m, d) BOOST_PP_REPEAT_1_40(m, d) m(2, 40, d)\n# define BOOST_PP_REPEAT_1_42(m, d) BOOST_PP_REPEAT_1_41(m, d) m(2, 41, d)\n# define BOOST_PP_REPEAT_1_43(m, d) BOOST_PP_REPEAT_1_42(m, d) m(2, 42, d)\n# define BOOST_PP_REPEAT_1_44(m, d) BOOST_PP_REPEAT_1_43(m, d) m(2, 43, d)\n# define BOOST_PP_REPEAT_1_45(m, d) BOOST_PP_REPEAT_1_44(m, d) m(2, 44, d)\n# define BOOST_PP_REPEAT_1_46(m, d) BOOST_PP_REPEAT_1_45(m, d) m(2, 45, d)\n# define BOOST_PP_REPEAT_1_47(m, d) BOOST_PP_REPEAT_1_46(m, d) m(2, 46, d)\n# define BOOST_PP_REPEAT_1_48(m, d) BOOST_PP_REPEAT_1_47(m, d) m(2, 47, d)\n# define BOOST_PP_REPEAT_1_49(m, d) BOOST_PP_REPEAT_1_48(m, d) m(2, 48, d)\n# define BOOST_PP_REPEAT_1_50(m, d) BOOST_PP_REPEAT_1_49(m, d) m(2, 49, d)\n# define BOOST_PP_REPEAT_1_51(m, d) BOOST_PP_REPEAT_1_50(m, d) m(2, 50, d)\n# define BOOST_PP_REPEAT_1_52(m, d) BOOST_PP_REPEAT_1_51(m, d) m(2, 51, d)\n# define BOOST_PP_REPEAT_1_53(m, d) BOOST_PP_REPEAT_1_52(m, d) m(2, 52, d)\n# define BOOST_PP_REPEAT_1_54(m, d) BOOST_PP_REPEAT_1_53(m, d) m(2, 53, d)\n# define BOOST_PP_REPEAT_1_55(m, d) BOOST_PP_REPEAT_1_54(m, d) m(2, 54, d)\n# define BOOST_PP_REPEAT_1_56(m, d) BOOST_PP_REPEAT_1_55(m, d) m(2, 55, d)\n# define BOOST_PP_REPEAT_1_57(m, d) BOOST_PP_REPEAT_1_56(m, d) m(2, 56, d)\n# define BOOST_PP_REPEAT_1_58(m, d) BOOST_PP_REPEAT_1_57(m, d) m(2, 57, d)\n# define BOOST_PP_REPEAT_1_59(m, d) BOOST_PP_REPEAT_1_58(m, d) m(2, 58, d)\n# define BOOST_PP_REPEAT_1_60(m, d) BOOST_PP_REPEAT_1_59(m, d) m(2, 59, d)\n# define BOOST_PP_REPEAT_1_61(m, d) BOOST_PP_REPEAT_1_60(m, d) m(2, 60, d)\n# define BOOST_PP_REPEAT_1_62(m, d) BOOST_PP_REPEAT_1_61(m, d) m(2, 61, d)\n# define BOOST_PP_REPEAT_1_63(m, d) BOOST_PP_REPEAT_1_62(m, d) m(2, 62, d)\n# define BOOST_PP_REPEAT_1_64(m, d) BOOST_PP_REPEAT_1_63(m, d) m(2, 63, d)\n# define BOOST_PP_REPEAT_1_65(m, d) BOOST_PP_REPEAT_1_64(m, d) m(2, 64, d)\n# define BOOST_PP_REPEAT_1_66(m, d) BOOST_PP_REPEAT_1_65(m, d) m(2, 65, d)\n# define BOOST_PP_REPEAT_1_67(m, d) BOOST_PP_REPEAT_1_66(m, d) m(2, 66, d)\n# define BOOST_PP_REPEAT_1_68(m, d) BOOST_PP_REPEAT_1_67(m, d) m(2, 67, d)\n# define BOOST_PP_REPEAT_1_69(m, d) BOOST_PP_REPEAT_1_68(m, d) m(2, 68, d)\n# define BOOST_PP_REPEAT_1_70(m, d) BOOST_PP_REPEAT_1_69(m, d) m(2, 69, d)\n# define BOOST_PP_REPEAT_1_71(m, d) BOOST_PP_REPEAT_1_70(m, d) m(2, 70, d)\n# define BOOST_PP_REPEAT_1_72(m, d) BOOST_PP_REPEAT_1_71(m, d) m(2, 71, d)\n# define BOOST_PP_REPEAT_1_73(m, d) BOOST_PP_REPEAT_1_72(m, d) m(2, 72, d)\n# define BOOST_PP_REPEAT_1_74(m, d) BOOST_PP_REPEAT_1_73(m, d) m(2, 73, d)\n# define BOOST_PP_REPEAT_1_75(m, d) BOOST_PP_REPEAT_1_74(m, d) m(2, 74, d)\n# define BOOST_PP_REPEAT_1_76(m, d) BOOST_PP_REPEAT_1_75(m, d) m(2, 75, d)\n# define BOOST_PP_REPEAT_1_77(m, d) BOOST_PP_REPEAT_1_76(m, d) m(2, 76, d)\n# define BOOST_PP_REPEAT_1_78(m, d) BOOST_PP_REPEAT_1_77(m, d) m(2, 77, d)\n# define BOOST_PP_REPEAT_1_79(m, d) BOOST_PP_REPEAT_1_78(m, d) m(2, 78, d)\n# define BOOST_PP_REPEAT_1_80(m, d) BOOST_PP_REPEAT_1_79(m, d) m(2, 79, d)\n# define BOOST_PP_REPEAT_1_81(m, d) BOOST_PP_REPEAT_1_80(m, d) m(2, 80, d)\n# define BOOST_PP_REPEAT_1_82(m, d) BOOST_PP_REPEAT_1_81(m, d) m(2, 81, d)\n# define BOOST_PP_REPEAT_1_83(m, d) BOOST_PP_REPEAT_1_82(m, d) m(2, 82, d)\n# define BOOST_PP_REPEAT_1_84(m, d) BOOST_PP_REPEAT_1_83(m, d) m(2, 83, d)\n# define BOOST_PP_REPEAT_1_85(m, d) BOOST_PP_REPEAT_1_84(m, d) m(2, 84, d)\n# define BOOST_PP_REPEAT_1_86(m, d) BOOST_PP_REPEAT_1_85(m, d) m(2, 85, d)\n# define BOOST_PP_REPEAT_1_87(m, d) BOOST_PP_REPEAT_1_86(m, d) m(2, 86, d)\n# define BOOST_PP_REPEAT_1_88(m, d) BOOST_PP_REPEAT_1_87(m, d) m(2, 87, d)\n# define BOOST_PP_REPEAT_1_89(m, d) BOOST_PP_REPEAT_1_88(m, d) m(2, 88, d)\n# define BOOST_PP_REPEAT_1_90(m, d) BOOST_PP_REPEAT_1_89(m, d) m(2, 89, d)\n# define BOOST_PP_REPEAT_1_91(m, d) BOOST_PP_REPEAT_1_90(m, d) m(2, 90, d)\n# define BOOST_PP_REPEAT_1_92(m, d) BOOST_PP_REPEAT_1_91(m, d) m(2, 91, d)\n# define BOOST_PP_REPEAT_1_93(m, d) BOOST_PP_REPEAT_1_92(m, d) m(2, 92, d)\n# define BOOST_PP_REPEAT_1_94(m, d) BOOST_PP_REPEAT_1_93(m, d) m(2, 93, d)\n# define BOOST_PP_REPEAT_1_95(m, d) BOOST_PP_REPEAT_1_94(m, d) m(2, 94, d)\n# define BOOST_PP_REPEAT_1_96(m, d) BOOST_PP_REPEAT_1_95(m, d) m(2, 95, d)\n# define BOOST_PP_REPEAT_1_97(m, d) BOOST_PP_REPEAT_1_96(m, d) m(2, 96, d)\n# define BOOST_PP_REPEAT_1_98(m, d) BOOST_PP_REPEAT_1_97(m, d) m(2, 97, d)\n# define BOOST_PP_REPEAT_1_99(m, d) BOOST_PP_REPEAT_1_98(m, d) m(2, 98, d)\n# define BOOST_PP_REPEAT_1_100(m, d) BOOST_PP_REPEAT_1_99(m, d) m(2, 99, d)\n# define BOOST_PP_REPEAT_1_101(m, d) BOOST_PP_REPEAT_1_100(m, d) m(2, 100, d)\n# define BOOST_PP_REPEAT_1_102(m, d) BOOST_PP_REPEAT_1_101(m, d) m(2, 101, d)\n# define BOOST_PP_REPEAT_1_103(m, d) BOOST_PP_REPEAT_1_102(m, d) m(2, 102, d)\n# define BOOST_PP_REPEAT_1_104(m, d) BOOST_PP_REPEAT_1_103(m, d) m(2, 103, d)\n# define BOOST_PP_REPEAT_1_105(m, d) BOOST_PP_REPEAT_1_104(m, d) m(2, 104, d)\n# define BOOST_PP_REPEAT_1_106(m, d) BOOST_PP_REPEAT_1_105(m, d) m(2, 105, d)\n# define BOOST_PP_REPEAT_1_107(m, d) BOOST_PP_REPEAT_1_106(m, d) m(2, 106, d)\n# define BOOST_PP_REPEAT_1_108(m, d) BOOST_PP_REPEAT_1_107(m, d) m(2, 107, d)\n# define BOOST_PP_REPEAT_1_109(m, d) BOOST_PP_REPEAT_1_108(m, d) m(2, 108, d)\n# define BOOST_PP_REPEAT_1_110(m, d) BOOST_PP_REPEAT_1_109(m, d) m(2, 109, d)\n# define BOOST_PP_REPEAT_1_111(m, d) BOOST_PP_REPEAT_1_110(m, d) m(2, 110, d)\n# define BOOST_PP_REPEAT_1_112(m, d) BOOST_PP_REPEAT_1_111(m, d) m(2, 111, d)\n# define BOOST_PP_REPEAT_1_113(m, d) BOOST_PP_REPEAT_1_112(m, d) m(2, 112, d)\n# define BOOST_PP_REPEAT_1_114(m, d) BOOST_PP_REPEAT_1_113(m, d) m(2, 113, d)\n# define BOOST_PP_REPEAT_1_115(m, d) BOOST_PP_REPEAT_1_114(m, d) m(2, 114, d)\n# define BOOST_PP_REPEAT_1_116(m, d) BOOST_PP_REPEAT_1_115(m, d) m(2, 115, d)\n# define BOOST_PP_REPEAT_1_117(m, d) BOOST_PP_REPEAT_1_116(m, d) m(2, 116, d)\n# define BOOST_PP_REPEAT_1_118(m, d) BOOST_PP_REPEAT_1_117(m, d) m(2, 117, d)\n# define BOOST_PP_REPEAT_1_119(m, d) BOOST_PP_REPEAT_1_118(m, d) m(2, 118, d)\n# define BOOST_PP_REPEAT_1_120(m, d) BOOST_PP_REPEAT_1_119(m, d) m(2, 119, d)\n# define BOOST_PP_REPEAT_1_121(m, d) BOOST_PP_REPEAT_1_120(m, d) m(2, 120, d)\n# define BOOST_PP_REPEAT_1_122(m, d) BOOST_PP_REPEAT_1_121(m, d) m(2, 121, d)\n# define BOOST_PP_REPEAT_1_123(m, d) BOOST_PP_REPEAT_1_122(m, d) m(2, 122, d)\n# define BOOST_PP_REPEAT_1_124(m, d) BOOST_PP_REPEAT_1_123(m, d) m(2, 123, d)\n# define BOOST_PP_REPEAT_1_125(m, d) BOOST_PP_REPEAT_1_124(m, d) m(2, 124, d)\n# define BOOST_PP_REPEAT_1_126(m, d) BOOST_PP_REPEAT_1_125(m, d) m(2, 125, d)\n# define BOOST_PP_REPEAT_1_127(m, d) BOOST_PP_REPEAT_1_126(m, d) m(2, 126, d)\n# define BOOST_PP_REPEAT_1_128(m, d) BOOST_PP_REPEAT_1_127(m, d) m(2, 127, d)\n# define BOOST_PP_REPEAT_1_129(m, d) BOOST_PP_REPEAT_1_128(m, d) m(2, 128, d)\n# define BOOST_PP_REPEAT_1_130(m, d) BOOST_PP_REPEAT_1_129(m, d) m(2, 129, d)\n# define BOOST_PP_REPEAT_1_131(m, d) BOOST_PP_REPEAT_1_130(m, d) m(2, 130, d)\n# define BOOST_PP_REPEAT_1_132(m, d) BOOST_PP_REPEAT_1_131(m, d) m(2, 131, d)\n# define BOOST_PP_REPEAT_1_133(m, d) BOOST_PP_REPEAT_1_132(m, d) m(2, 132, d)\n# define BOOST_PP_REPEAT_1_134(m, d) BOOST_PP_REPEAT_1_133(m, d) m(2, 133, d)\n# define BOOST_PP_REPEAT_1_135(m, d) BOOST_PP_REPEAT_1_134(m, d) m(2, 134, d)\n# define BOOST_PP_REPEAT_1_136(m, d) BOOST_PP_REPEAT_1_135(m, d) m(2, 135, d)\n# define BOOST_PP_REPEAT_1_137(m, d) BOOST_PP_REPEAT_1_136(m, d) m(2, 136, d)\n# define BOOST_PP_REPEAT_1_138(m, d) BOOST_PP_REPEAT_1_137(m, d) m(2, 137, d)\n# define BOOST_PP_REPEAT_1_139(m, d) BOOST_PP_REPEAT_1_138(m, d) m(2, 138, d)\n# define BOOST_PP_REPEAT_1_140(m, d) BOOST_PP_REPEAT_1_139(m, d) m(2, 139, d)\n# define BOOST_PP_REPEAT_1_141(m, d) BOOST_PP_REPEAT_1_140(m, d) m(2, 140, d)\n# define BOOST_PP_REPEAT_1_142(m, d) BOOST_PP_REPEAT_1_141(m, d) m(2, 141, d)\n# define BOOST_PP_REPEAT_1_143(m, d) BOOST_PP_REPEAT_1_142(m, d) m(2, 142, d)\n# define BOOST_PP_REPEAT_1_144(m, d) BOOST_PP_REPEAT_1_143(m, d) m(2, 143, d)\n# define BOOST_PP_REPEAT_1_145(m, d) BOOST_PP_REPEAT_1_144(m, d) m(2, 144, d)\n# define BOOST_PP_REPEAT_1_146(m, d) BOOST_PP_REPEAT_1_145(m, d) m(2, 145, d)\n# define BOOST_PP_REPEAT_1_147(m, d) BOOST_PP_REPEAT_1_146(m, d) m(2, 146, d)\n# define BOOST_PP_REPEAT_1_148(m, d) BOOST_PP_REPEAT_1_147(m, d) m(2, 147, d)\n# define BOOST_PP_REPEAT_1_149(m, d) BOOST_PP_REPEAT_1_148(m, d) m(2, 148, d)\n# define BOOST_PP_REPEAT_1_150(m, d) BOOST_PP_REPEAT_1_149(m, d) m(2, 149, d)\n# define BOOST_PP_REPEAT_1_151(m, d) BOOST_PP_REPEAT_1_150(m, d) m(2, 150, d)\n# define BOOST_PP_REPEAT_1_152(m, d) BOOST_PP_REPEAT_1_151(m, d) m(2, 151, d)\n# define BOOST_PP_REPEAT_1_153(m, d) BOOST_PP_REPEAT_1_152(m, d) m(2, 152, d)\n# define BOOST_PP_REPEAT_1_154(m, d) BOOST_PP_REPEAT_1_153(m, d) m(2, 153, d)\n# define BOOST_PP_REPEAT_1_155(m, d) BOOST_PP_REPEAT_1_154(m, d) m(2, 154, d)\n# define BOOST_PP_REPEAT_1_156(m, d) BOOST_PP_REPEAT_1_155(m, d) m(2, 155, d)\n# define BOOST_PP_REPEAT_1_157(m, d) BOOST_PP_REPEAT_1_156(m, d) m(2, 156, d)\n# define BOOST_PP_REPEAT_1_158(m, d) BOOST_PP_REPEAT_1_157(m, d) m(2, 157, d)\n# define BOOST_PP_REPEAT_1_159(m, d) BOOST_PP_REPEAT_1_158(m, d) m(2, 158, d)\n# define BOOST_PP_REPEAT_1_160(m, d) BOOST_PP_REPEAT_1_159(m, d) m(2, 159, d)\n# define BOOST_PP_REPEAT_1_161(m, d) BOOST_PP_REPEAT_1_160(m, d) m(2, 160, d)\n# define BOOST_PP_REPEAT_1_162(m, d) BOOST_PP_REPEAT_1_161(m, d) m(2, 161, d)\n# define BOOST_PP_REPEAT_1_163(m, d) BOOST_PP_REPEAT_1_162(m, d) m(2, 162, d)\n# define BOOST_PP_REPEAT_1_164(m, d) BOOST_PP_REPEAT_1_163(m, d) m(2, 163, d)\n# define BOOST_PP_REPEAT_1_165(m, d) BOOST_PP_REPEAT_1_164(m, d) m(2, 164, d)\n# define BOOST_PP_REPEAT_1_166(m, d) BOOST_PP_REPEAT_1_165(m, d) m(2, 165, d)\n# define BOOST_PP_REPEAT_1_167(m, d) BOOST_PP_REPEAT_1_166(m, d) m(2, 166, d)\n# define BOOST_PP_REPEAT_1_168(m, d) BOOST_PP_REPEAT_1_167(m, d) m(2, 167, d)\n# define BOOST_PP_REPEAT_1_169(m, d) BOOST_PP_REPEAT_1_168(m, d) m(2, 168, d)\n# define BOOST_PP_REPEAT_1_170(m, d) BOOST_PP_REPEAT_1_169(m, d) m(2, 169, d)\n# define BOOST_PP_REPEAT_1_171(m, d) BOOST_PP_REPEAT_1_170(m, d) m(2, 170, d)\n# define BOOST_PP_REPEAT_1_172(m, d) BOOST_PP_REPEAT_1_171(m, d) m(2, 171, d)\n# define BOOST_PP_REPEAT_1_173(m, d) BOOST_PP_REPEAT_1_172(m, d) m(2, 172, d)\n# define BOOST_PP_REPEAT_1_174(m, d) BOOST_PP_REPEAT_1_173(m, d) m(2, 173, d)\n# define BOOST_PP_REPEAT_1_175(m, d) BOOST_PP_REPEAT_1_174(m, d) m(2, 174, d)\n# define BOOST_PP_REPEAT_1_176(m, d) BOOST_PP_REPEAT_1_175(m, d) m(2, 175, d)\n# define BOOST_PP_REPEAT_1_177(m, d) BOOST_PP_REPEAT_1_176(m, d) m(2, 176, d)\n# define BOOST_PP_REPEAT_1_178(m, d) BOOST_PP_REPEAT_1_177(m, d) m(2, 177, d)\n# define BOOST_PP_REPEAT_1_179(m, d) BOOST_PP_REPEAT_1_178(m, d) m(2, 178, d)\n# define BOOST_PP_REPEAT_1_180(m, d) BOOST_PP_REPEAT_1_179(m, d) m(2, 179, d)\n# define BOOST_PP_REPEAT_1_181(m, d) BOOST_PP_REPEAT_1_180(m, d) m(2, 180, d)\n# define BOOST_PP_REPEAT_1_182(m, d) BOOST_PP_REPEAT_1_181(m, d) m(2, 181, d)\n# define BOOST_PP_REPEAT_1_183(m, d) BOOST_PP_REPEAT_1_182(m, d) m(2, 182, d)\n# define BOOST_PP_REPEAT_1_184(m, d) BOOST_PP_REPEAT_1_183(m, d) m(2, 183, d)\n# define BOOST_PP_REPEAT_1_185(m, d) BOOST_PP_REPEAT_1_184(m, d) m(2, 184, d)\n# define BOOST_PP_REPEAT_1_186(m, d) BOOST_PP_REPEAT_1_185(m, d) m(2, 185, d)\n# define BOOST_PP_REPEAT_1_187(m, d) BOOST_PP_REPEAT_1_186(m, d) m(2, 186, d)\n# define BOOST_PP_REPEAT_1_188(m, d) BOOST_PP_REPEAT_1_187(m, d) m(2, 187, d)\n# define BOOST_PP_REPEAT_1_189(m, d) BOOST_PP_REPEAT_1_188(m, d) m(2, 188, d)\n# define BOOST_PP_REPEAT_1_190(m, d) BOOST_PP_REPEAT_1_189(m, d) m(2, 189, d)\n# define BOOST_PP_REPEAT_1_191(m, d) BOOST_PP_REPEAT_1_190(m, d) m(2, 190, d)\n# define BOOST_PP_REPEAT_1_192(m, d) BOOST_PP_REPEAT_1_191(m, d) m(2, 191, d)\n# define BOOST_PP_REPEAT_1_193(m, d) BOOST_PP_REPEAT_1_192(m, d) m(2, 192, d)\n# define BOOST_PP_REPEAT_1_194(m, d) BOOST_PP_REPEAT_1_193(m, d) m(2, 193, d)\n# define BOOST_PP_REPEAT_1_195(m, d) BOOST_PP_REPEAT_1_194(m, d) m(2, 194, d)\n# define BOOST_PP_REPEAT_1_196(m, d) BOOST_PP_REPEAT_1_195(m, d) m(2, 195, d)\n# define BOOST_PP_REPEAT_1_197(m, d) BOOST_PP_REPEAT_1_196(m, d) m(2, 196, d)\n# define BOOST_PP_REPEAT_1_198(m, d) BOOST_PP_REPEAT_1_197(m, d) m(2, 197, d)\n# define BOOST_PP_REPEAT_1_199(m, d) BOOST_PP_REPEAT_1_198(m, d) m(2, 198, d)\n# define BOOST_PP_REPEAT_1_200(m, d) BOOST_PP_REPEAT_1_199(m, d) m(2, 199, d)\n# define BOOST_PP_REPEAT_1_201(m, d) BOOST_PP_REPEAT_1_200(m, d) m(2, 200, d)\n# define BOOST_PP_REPEAT_1_202(m, d) BOOST_PP_REPEAT_1_201(m, d) m(2, 201, d)\n# define BOOST_PP_REPEAT_1_203(m, d) BOOST_PP_REPEAT_1_202(m, d) m(2, 202, d)\n# define BOOST_PP_REPEAT_1_204(m, d) BOOST_PP_REPEAT_1_203(m, d) m(2, 203, d)\n# define BOOST_PP_REPEAT_1_205(m, d) BOOST_PP_REPEAT_1_204(m, d) m(2, 204, d)\n# define BOOST_PP_REPEAT_1_206(m, d) BOOST_PP_REPEAT_1_205(m, d) m(2, 205, d)\n# define BOOST_PP_REPEAT_1_207(m, d) BOOST_PP_REPEAT_1_206(m, d) m(2, 206, d)\n# define BOOST_PP_REPEAT_1_208(m, d) BOOST_PP_REPEAT_1_207(m, d) m(2, 207, d)\n# define BOOST_PP_REPEAT_1_209(m, d) BOOST_PP_REPEAT_1_208(m, d) m(2, 208, d)\n# define BOOST_PP_REPEAT_1_210(m, d) BOOST_PP_REPEAT_1_209(m, d) m(2, 209, d)\n# define BOOST_PP_REPEAT_1_211(m, d) BOOST_PP_REPEAT_1_210(m, d) m(2, 210, d)\n# define BOOST_PP_REPEAT_1_212(m, d) BOOST_PP_REPEAT_1_211(m, d) m(2, 211, d)\n# define BOOST_PP_REPEAT_1_213(m, d) BOOST_PP_REPEAT_1_212(m, d) m(2, 212, d)\n# define BOOST_PP_REPEAT_1_214(m, d) BOOST_PP_REPEAT_1_213(m, d) m(2, 213, d)\n# define BOOST_PP_REPEAT_1_215(m, d) BOOST_PP_REPEAT_1_214(m, d) m(2, 214, d)\n# define BOOST_PP_REPEAT_1_216(m, d) BOOST_PP_REPEAT_1_215(m, d) m(2, 215, d)\n# define BOOST_PP_REPEAT_1_217(m, d) BOOST_PP_REPEAT_1_216(m, d) m(2, 216, d)\n# define BOOST_PP_REPEAT_1_218(m, d) BOOST_PP_REPEAT_1_217(m, d) m(2, 217, d)\n# define BOOST_PP_REPEAT_1_219(m, d) BOOST_PP_REPEAT_1_218(m, d) m(2, 218, d)\n# define BOOST_PP_REPEAT_1_220(m, d) BOOST_PP_REPEAT_1_219(m, d) m(2, 219, d)\n# define BOOST_PP_REPEAT_1_221(m, d) BOOST_PP_REPEAT_1_220(m, d) m(2, 220, d)\n# define BOOST_PP_REPEAT_1_222(m, d) BOOST_PP_REPEAT_1_221(m, d) m(2, 221, d)\n# define BOOST_PP_REPEAT_1_223(m, d) BOOST_PP_REPEAT_1_222(m, d) m(2, 222, d)\n# define BOOST_PP_REPEAT_1_224(m, d) BOOST_PP_REPEAT_1_223(m, d) m(2, 223, d)\n# define BOOST_PP_REPEAT_1_225(m, d) BOOST_PP_REPEAT_1_224(m, d) m(2, 224, d)\n# define BOOST_PP_REPEAT_1_226(m, d) BOOST_PP_REPEAT_1_225(m, d) m(2, 225, d)\n# define BOOST_PP_REPEAT_1_227(m, d) BOOST_PP_REPEAT_1_226(m, d) m(2, 226, d)\n# define BOOST_PP_REPEAT_1_228(m, d) BOOST_PP_REPEAT_1_227(m, d) m(2, 227, d)\n# define BOOST_PP_REPEAT_1_229(m, d) BOOST_PP_REPEAT_1_228(m, d) m(2, 228, d)\n# define BOOST_PP_REPEAT_1_230(m, d) BOOST_PP_REPEAT_1_229(m, d) m(2, 229, d)\n# define BOOST_PP_REPEAT_1_231(m, d) BOOST_PP_REPEAT_1_230(m, d) m(2, 230, d)\n# define BOOST_PP_REPEAT_1_232(m, d) BOOST_PP_REPEAT_1_231(m, d) m(2, 231, d)\n# define BOOST_PP_REPEAT_1_233(m, d) BOOST_PP_REPEAT_1_232(m, d) m(2, 232, d)\n# define BOOST_PP_REPEAT_1_234(m, d) BOOST_PP_REPEAT_1_233(m, d) m(2, 233, d)\n# define BOOST_PP_REPEAT_1_235(m, d) BOOST_PP_REPEAT_1_234(m, d) m(2, 234, d)\n# define BOOST_PP_REPEAT_1_236(m, d) BOOST_PP_REPEAT_1_235(m, d) m(2, 235, d)\n# define BOOST_PP_REPEAT_1_237(m, d) BOOST_PP_REPEAT_1_236(m, d) m(2, 236, d)\n# define BOOST_PP_REPEAT_1_238(m, d) BOOST_PP_REPEAT_1_237(m, d) m(2, 237, d)\n# define BOOST_PP_REPEAT_1_239(m, d) BOOST_PP_REPEAT_1_238(m, d) m(2, 238, d)\n# define BOOST_PP_REPEAT_1_240(m, d) BOOST_PP_REPEAT_1_239(m, d) m(2, 239, d)\n# define BOOST_PP_REPEAT_1_241(m, d) BOOST_PP_REPEAT_1_240(m, d) m(2, 240, d)\n# define BOOST_PP_REPEAT_1_242(m, d) BOOST_PP_REPEAT_1_241(m, d) m(2, 241, d)\n# define BOOST_PP_REPEAT_1_243(m, d) BOOST_PP_REPEAT_1_242(m, d) m(2, 242, d)\n# define BOOST_PP_REPEAT_1_244(m, d) BOOST_PP_REPEAT_1_243(m, d) m(2, 243, d)\n# define BOOST_PP_REPEAT_1_245(m, d) BOOST_PP_REPEAT_1_244(m, d) m(2, 244, d)\n# define BOOST_PP_REPEAT_1_246(m, d) BOOST_PP_REPEAT_1_245(m, d) m(2, 245, d)\n# define BOOST_PP_REPEAT_1_247(m, d) BOOST_PP_REPEAT_1_246(m, d) m(2, 246, d)\n# define BOOST_PP_REPEAT_1_248(m, d) BOOST_PP_REPEAT_1_247(m, d) m(2, 247, d)\n# define BOOST_PP_REPEAT_1_249(m, d) BOOST_PP_REPEAT_1_248(m, d) m(2, 248, d)\n# define BOOST_PP_REPEAT_1_250(m, d) BOOST_PP_REPEAT_1_249(m, d) m(2, 249, d)\n# define BOOST_PP_REPEAT_1_251(m, d) BOOST_PP_REPEAT_1_250(m, d) m(2, 250, d)\n# define BOOST_PP_REPEAT_1_252(m, d) BOOST_PP_REPEAT_1_251(m, d) m(2, 251, d)\n# define BOOST_PP_REPEAT_1_253(m, d) BOOST_PP_REPEAT_1_252(m, d) m(2, 252, d)\n# define BOOST_PP_REPEAT_1_254(m, d) BOOST_PP_REPEAT_1_253(m, d) m(2, 253, d)\n# define BOOST_PP_REPEAT_1_255(m, d) BOOST_PP_REPEAT_1_254(m, d) m(2, 254, d)\n# define BOOST_PP_REPEAT_1_256(m, d) BOOST_PP_REPEAT_1_255(m, d) m(2, 255, d)\n#\n# define BOOST_PP_REPEAT_2_0(m, d)\n# define BOOST_PP_REPEAT_2_1(m, d) m(3, 0, d)\n# define BOOST_PP_REPEAT_2_2(m, d) BOOST_PP_REPEAT_2_1(m, d) m(3, 1, d)\n# define BOOST_PP_REPEAT_2_3(m, d) BOOST_PP_REPEAT_2_2(m, d) m(3, 2, d)\n# define BOOST_PP_REPEAT_2_4(m, d) BOOST_PP_REPEAT_2_3(m, d) m(3, 3, d)\n# define BOOST_PP_REPEAT_2_5(m, d) BOOST_PP_REPEAT_2_4(m, d) m(3, 4, d)\n# define BOOST_PP_REPEAT_2_6(m, d) BOOST_PP_REPEAT_2_5(m, d) m(3, 5, d)\n# define BOOST_PP_REPEAT_2_7(m, d) BOOST_PP_REPEAT_2_6(m, d) m(3, 6, d)\n# define BOOST_PP_REPEAT_2_8(m, d) BOOST_PP_REPEAT_2_7(m, d) m(3, 7, d)\n# define BOOST_PP_REPEAT_2_9(m, d) BOOST_PP_REPEAT_2_8(m, d) m(3, 8, d)\n# define BOOST_PP_REPEAT_2_10(m, d) BOOST_PP_REPEAT_2_9(m, d) m(3, 9, d)\n# define BOOST_PP_REPEAT_2_11(m, d) BOOST_PP_REPEAT_2_10(m, d) m(3, 10, d)\n# define BOOST_PP_REPEAT_2_12(m, d) BOOST_PP_REPEAT_2_11(m, d) m(3, 11, d)\n# define BOOST_PP_REPEAT_2_13(m, d) BOOST_PP_REPEAT_2_12(m, d) m(3, 12, d)\n# define BOOST_PP_REPEAT_2_14(m, d) BOOST_PP_REPEAT_2_13(m, d) m(3, 13, d)\n# define BOOST_PP_REPEAT_2_15(m, d) BOOST_PP_REPEAT_2_14(m, d) m(3, 14, d)\n# define BOOST_PP_REPEAT_2_16(m, d) BOOST_PP_REPEAT_2_15(m, d) m(3, 15, d)\n# define BOOST_PP_REPEAT_2_17(m, d) BOOST_PP_REPEAT_2_16(m, d) m(3, 16, d)\n# define BOOST_PP_REPEAT_2_18(m, d) BOOST_PP_REPEAT_2_17(m, d) m(3, 17, d)\n# define BOOST_PP_REPEAT_2_19(m, d) BOOST_PP_REPEAT_2_18(m, d) m(3, 18, d)\n# define BOOST_PP_REPEAT_2_20(m, d) BOOST_PP_REPEAT_2_19(m, d) m(3, 19, d)\n# define BOOST_PP_REPEAT_2_21(m, d) BOOST_PP_REPEAT_2_20(m, d) m(3, 20, d)\n# define BOOST_PP_REPEAT_2_22(m, d) BOOST_PP_REPEAT_2_21(m, d) m(3, 21, d)\n# define BOOST_PP_REPEAT_2_23(m, d) BOOST_PP_REPEAT_2_22(m, d) m(3, 22, d)\n# define BOOST_PP_REPEAT_2_24(m, d) BOOST_PP_REPEAT_2_23(m, d) m(3, 23, d)\n# define BOOST_PP_REPEAT_2_25(m, d) BOOST_PP_REPEAT_2_24(m, d) m(3, 24, d)\n# define BOOST_PP_REPEAT_2_26(m, d) BOOST_PP_REPEAT_2_25(m, d) m(3, 25, d)\n# define BOOST_PP_REPEAT_2_27(m, d) BOOST_PP_REPEAT_2_26(m, d) m(3, 26, d)\n# define BOOST_PP_REPEAT_2_28(m, d) BOOST_PP_REPEAT_2_27(m, d) m(3, 27, d)\n# define BOOST_PP_REPEAT_2_29(m, d) BOOST_PP_REPEAT_2_28(m, d) m(3, 28, d)\n# define BOOST_PP_REPEAT_2_30(m, d) BOOST_PP_REPEAT_2_29(m, d) m(3, 29, d)\n# define BOOST_PP_REPEAT_2_31(m, d) BOOST_PP_REPEAT_2_30(m, d) m(3, 30, d)\n# define BOOST_PP_REPEAT_2_32(m, d) BOOST_PP_REPEAT_2_31(m, d) m(3, 31, d)\n# define BOOST_PP_REPEAT_2_33(m, d) BOOST_PP_REPEAT_2_32(m, d) m(3, 32, d)\n# define BOOST_PP_REPEAT_2_34(m, d) BOOST_PP_REPEAT_2_33(m, d) m(3, 33, d)\n# define BOOST_PP_REPEAT_2_35(m, d) BOOST_PP_REPEAT_2_34(m, d) m(3, 34, d)\n# define BOOST_PP_REPEAT_2_36(m, d) BOOST_PP_REPEAT_2_35(m, d) m(3, 35, d)\n# define BOOST_PP_REPEAT_2_37(m, d) BOOST_PP_REPEAT_2_36(m, d) m(3, 36, d)\n# define BOOST_PP_REPEAT_2_38(m, d) BOOST_PP_REPEAT_2_37(m, d) m(3, 37, d)\n# define BOOST_PP_REPEAT_2_39(m, d) BOOST_PP_REPEAT_2_38(m, d) m(3, 38, d)\n# define BOOST_PP_REPEAT_2_40(m, d) BOOST_PP_REPEAT_2_39(m, d) m(3, 39, d)\n# define BOOST_PP_REPEAT_2_41(m, d) BOOST_PP_REPEAT_2_40(m, d) m(3, 40, d)\n# define BOOST_PP_REPEAT_2_42(m, d) BOOST_PP_REPEAT_2_41(m, d) m(3, 41, d)\n# define BOOST_PP_REPEAT_2_43(m, d) BOOST_PP_REPEAT_2_42(m, d) m(3, 42, d)\n# define BOOST_PP_REPEAT_2_44(m, d) BOOST_PP_REPEAT_2_43(m, d) m(3, 43, d)\n# define BOOST_PP_REPEAT_2_45(m, d) BOOST_PP_REPEAT_2_44(m, d) m(3, 44, d)\n# define BOOST_PP_REPEAT_2_46(m, d) BOOST_PP_REPEAT_2_45(m, d) m(3, 45, d)\n# define BOOST_PP_REPEAT_2_47(m, d) BOOST_PP_REPEAT_2_46(m, d) m(3, 46, d)\n# define BOOST_PP_REPEAT_2_48(m, d) BOOST_PP_REPEAT_2_47(m, d) m(3, 47, d)\n# define BOOST_PP_REPEAT_2_49(m, d) BOOST_PP_REPEAT_2_48(m, d) m(3, 48, d)\n# define BOOST_PP_REPEAT_2_50(m, d) BOOST_PP_REPEAT_2_49(m, d) m(3, 49, d)\n# define BOOST_PP_REPEAT_2_51(m, d) BOOST_PP_REPEAT_2_50(m, d) m(3, 50, d)\n# define BOOST_PP_REPEAT_2_52(m, d) BOOST_PP_REPEAT_2_51(m, d) m(3, 51, d)\n# define BOOST_PP_REPEAT_2_53(m, d) BOOST_PP_REPEAT_2_52(m, d) m(3, 52, d)\n# define BOOST_PP_REPEAT_2_54(m, d) BOOST_PP_REPEAT_2_53(m, d) m(3, 53, d)\n# define BOOST_PP_REPEAT_2_55(m, d) BOOST_PP_REPEAT_2_54(m, d) m(3, 54, d)\n# define BOOST_PP_REPEAT_2_56(m, d) BOOST_PP_REPEAT_2_55(m, d) m(3, 55, d)\n# define BOOST_PP_REPEAT_2_57(m, d) BOOST_PP_REPEAT_2_56(m, d) m(3, 56, d)\n# define BOOST_PP_REPEAT_2_58(m, d) BOOST_PP_REPEAT_2_57(m, d) m(3, 57, d)\n# define BOOST_PP_REPEAT_2_59(m, d) BOOST_PP_REPEAT_2_58(m, d) m(3, 58, d)\n# define BOOST_PP_REPEAT_2_60(m, d) BOOST_PP_REPEAT_2_59(m, d) m(3, 59, d)\n# define BOOST_PP_REPEAT_2_61(m, d) BOOST_PP_REPEAT_2_60(m, d) m(3, 60, d)\n# define BOOST_PP_REPEAT_2_62(m, d) BOOST_PP_REPEAT_2_61(m, d) m(3, 61, d)\n# define BOOST_PP_REPEAT_2_63(m, d) BOOST_PP_REPEAT_2_62(m, d) m(3, 62, d)\n# define BOOST_PP_REPEAT_2_64(m, d) BOOST_PP_REPEAT_2_63(m, d) m(3, 63, d)\n# define BOOST_PP_REPEAT_2_65(m, d) BOOST_PP_REPEAT_2_64(m, d) m(3, 64, d)\n# define BOOST_PP_REPEAT_2_66(m, d) BOOST_PP_REPEAT_2_65(m, d) m(3, 65, d)\n# define BOOST_PP_REPEAT_2_67(m, d) BOOST_PP_REPEAT_2_66(m, d) m(3, 66, d)\n# define BOOST_PP_REPEAT_2_68(m, d) BOOST_PP_REPEAT_2_67(m, d) m(3, 67, d)\n# define BOOST_PP_REPEAT_2_69(m, d) BOOST_PP_REPEAT_2_68(m, d) m(3, 68, d)\n# define BOOST_PP_REPEAT_2_70(m, d) BOOST_PP_REPEAT_2_69(m, d) m(3, 69, d)\n# define BOOST_PP_REPEAT_2_71(m, d) BOOST_PP_REPEAT_2_70(m, d) m(3, 70, d)\n# define BOOST_PP_REPEAT_2_72(m, d) BOOST_PP_REPEAT_2_71(m, d) m(3, 71, d)\n# define BOOST_PP_REPEAT_2_73(m, d) BOOST_PP_REPEAT_2_72(m, d) m(3, 72, d)\n# define BOOST_PP_REPEAT_2_74(m, d) BOOST_PP_REPEAT_2_73(m, d) m(3, 73, d)\n# define BOOST_PP_REPEAT_2_75(m, d) BOOST_PP_REPEAT_2_74(m, d) m(3, 74, d)\n# define BOOST_PP_REPEAT_2_76(m, d) BOOST_PP_REPEAT_2_75(m, d) m(3, 75, d)\n# define BOOST_PP_REPEAT_2_77(m, d) BOOST_PP_REPEAT_2_76(m, d) m(3, 76, d)\n# define BOOST_PP_REPEAT_2_78(m, d) BOOST_PP_REPEAT_2_77(m, d) m(3, 77, d)\n# define BOOST_PP_REPEAT_2_79(m, d) BOOST_PP_REPEAT_2_78(m, d) m(3, 78, d)\n# define BOOST_PP_REPEAT_2_80(m, d) BOOST_PP_REPEAT_2_79(m, d) m(3, 79, d)\n# define BOOST_PP_REPEAT_2_81(m, d) BOOST_PP_REPEAT_2_80(m, d) m(3, 80, d)\n# define BOOST_PP_REPEAT_2_82(m, d) BOOST_PP_REPEAT_2_81(m, d) m(3, 81, d)\n# define BOOST_PP_REPEAT_2_83(m, d) BOOST_PP_REPEAT_2_82(m, d) m(3, 82, d)\n# define BOOST_PP_REPEAT_2_84(m, d) BOOST_PP_REPEAT_2_83(m, d) m(3, 83, d)\n# define BOOST_PP_REPEAT_2_85(m, d) BOOST_PP_REPEAT_2_84(m, d) m(3, 84, d)\n# define BOOST_PP_REPEAT_2_86(m, d) BOOST_PP_REPEAT_2_85(m, d) m(3, 85, d)\n# define BOOST_PP_REPEAT_2_87(m, d) BOOST_PP_REPEAT_2_86(m, d) m(3, 86, d)\n# define BOOST_PP_REPEAT_2_88(m, d) BOOST_PP_REPEAT_2_87(m, d) m(3, 87, d)\n# define BOOST_PP_REPEAT_2_89(m, d) BOOST_PP_REPEAT_2_88(m, d) m(3, 88, d)\n# define BOOST_PP_REPEAT_2_90(m, d) BOOST_PP_REPEAT_2_89(m, d) m(3, 89, d)\n# define BOOST_PP_REPEAT_2_91(m, d) BOOST_PP_REPEAT_2_90(m, d) m(3, 90, d)\n# define BOOST_PP_REPEAT_2_92(m, d) BOOST_PP_REPEAT_2_91(m, d) m(3, 91, d)\n# define BOOST_PP_REPEAT_2_93(m, d) BOOST_PP_REPEAT_2_92(m, d) m(3, 92, d)\n# define BOOST_PP_REPEAT_2_94(m, d) BOOST_PP_REPEAT_2_93(m, d) m(3, 93, d)\n# define BOOST_PP_REPEAT_2_95(m, d) BOOST_PP_REPEAT_2_94(m, d) m(3, 94, d)\n# define BOOST_PP_REPEAT_2_96(m, d) BOOST_PP_REPEAT_2_95(m, d) m(3, 95, d)\n# define BOOST_PP_REPEAT_2_97(m, d) BOOST_PP_REPEAT_2_96(m, d) m(3, 96, d)\n# define BOOST_PP_REPEAT_2_98(m, d) BOOST_PP_REPEAT_2_97(m, d) m(3, 97, d)\n# define BOOST_PP_REPEAT_2_99(m, d) BOOST_PP_REPEAT_2_98(m, d) m(3, 98, d)\n# define BOOST_PP_REPEAT_2_100(m, d) BOOST_PP_REPEAT_2_99(m, d) m(3, 99, d)\n# define BOOST_PP_REPEAT_2_101(m, d) BOOST_PP_REPEAT_2_100(m, d) m(3, 100, d)\n# define BOOST_PP_REPEAT_2_102(m, d) BOOST_PP_REPEAT_2_101(m, d) m(3, 101, d)\n# define BOOST_PP_REPEAT_2_103(m, d) BOOST_PP_REPEAT_2_102(m, d) m(3, 102, d)\n# define BOOST_PP_REPEAT_2_104(m, d) BOOST_PP_REPEAT_2_103(m, d) m(3, 103, d)\n# define BOOST_PP_REPEAT_2_105(m, d) BOOST_PP_REPEAT_2_104(m, d) m(3, 104, d)\n# define BOOST_PP_REPEAT_2_106(m, d) BOOST_PP_REPEAT_2_105(m, d) m(3, 105, d)\n# define BOOST_PP_REPEAT_2_107(m, d) BOOST_PP_REPEAT_2_106(m, d) m(3, 106, d)\n# define BOOST_PP_REPEAT_2_108(m, d) BOOST_PP_REPEAT_2_107(m, d) m(3, 107, d)\n# define BOOST_PP_REPEAT_2_109(m, d) BOOST_PP_REPEAT_2_108(m, d) m(3, 108, d)\n# define BOOST_PP_REPEAT_2_110(m, d) BOOST_PP_REPEAT_2_109(m, d) m(3, 109, d)\n# define BOOST_PP_REPEAT_2_111(m, d) BOOST_PP_REPEAT_2_110(m, d) m(3, 110, d)\n# define BOOST_PP_REPEAT_2_112(m, d) BOOST_PP_REPEAT_2_111(m, d) m(3, 111, d)\n# define BOOST_PP_REPEAT_2_113(m, d) BOOST_PP_REPEAT_2_112(m, d) m(3, 112, d)\n# define BOOST_PP_REPEAT_2_114(m, d) BOOST_PP_REPEAT_2_113(m, d) m(3, 113, d)\n# define BOOST_PP_REPEAT_2_115(m, d) BOOST_PP_REPEAT_2_114(m, d) m(3, 114, d)\n# define BOOST_PP_REPEAT_2_116(m, d) BOOST_PP_REPEAT_2_115(m, d) m(3, 115, d)\n# define BOOST_PP_REPEAT_2_117(m, d) BOOST_PP_REPEAT_2_116(m, d) m(3, 116, d)\n# define BOOST_PP_REPEAT_2_118(m, d) BOOST_PP_REPEAT_2_117(m, d) m(3, 117, d)\n# define BOOST_PP_REPEAT_2_119(m, d) BOOST_PP_REPEAT_2_118(m, d) m(3, 118, d)\n# define BOOST_PP_REPEAT_2_120(m, d) BOOST_PP_REPEAT_2_119(m, d) m(3, 119, d)\n# define BOOST_PP_REPEAT_2_121(m, d) BOOST_PP_REPEAT_2_120(m, d) m(3, 120, d)\n# define BOOST_PP_REPEAT_2_122(m, d) BOOST_PP_REPEAT_2_121(m, d) m(3, 121, d)\n# define BOOST_PP_REPEAT_2_123(m, d) BOOST_PP_REPEAT_2_122(m, d) m(3, 122, d)\n# define BOOST_PP_REPEAT_2_124(m, d) BOOST_PP_REPEAT_2_123(m, d) m(3, 123, d)\n# define BOOST_PP_REPEAT_2_125(m, d) BOOST_PP_REPEAT_2_124(m, d) m(3, 124, d)\n# define BOOST_PP_REPEAT_2_126(m, d) BOOST_PP_REPEAT_2_125(m, d) m(3, 125, d)\n# define BOOST_PP_REPEAT_2_127(m, d) BOOST_PP_REPEAT_2_126(m, d) m(3, 126, d)\n# define BOOST_PP_REPEAT_2_128(m, d) BOOST_PP_REPEAT_2_127(m, d) m(3, 127, d)\n# define BOOST_PP_REPEAT_2_129(m, d) BOOST_PP_REPEAT_2_128(m, d) m(3, 128, d)\n# define BOOST_PP_REPEAT_2_130(m, d) BOOST_PP_REPEAT_2_129(m, d) m(3, 129, d)\n# define BOOST_PP_REPEAT_2_131(m, d) BOOST_PP_REPEAT_2_130(m, d) m(3, 130, d)\n# define BOOST_PP_REPEAT_2_132(m, d) BOOST_PP_REPEAT_2_131(m, d) m(3, 131, d)\n# define BOOST_PP_REPEAT_2_133(m, d) BOOST_PP_REPEAT_2_132(m, d) m(3, 132, d)\n# define BOOST_PP_REPEAT_2_134(m, d) BOOST_PP_REPEAT_2_133(m, d) m(3, 133, d)\n# define BOOST_PP_REPEAT_2_135(m, d) BOOST_PP_REPEAT_2_134(m, d) m(3, 134, d)\n# define BOOST_PP_REPEAT_2_136(m, d) BOOST_PP_REPEAT_2_135(m, d) m(3, 135, d)\n# define BOOST_PP_REPEAT_2_137(m, d) BOOST_PP_REPEAT_2_136(m, d) m(3, 136, d)\n# define BOOST_PP_REPEAT_2_138(m, d) BOOST_PP_REPEAT_2_137(m, d) m(3, 137, d)\n# define BOOST_PP_REPEAT_2_139(m, d) BOOST_PP_REPEAT_2_138(m, d) m(3, 138, d)\n# define BOOST_PP_REPEAT_2_140(m, d) BOOST_PP_REPEAT_2_139(m, d) m(3, 139, d)\n# define BOOST_PP_REPEAT_2_141(m, d) BOOST_PP_REPEAT_2_140(m, d) m(3, 140, d)\n# define BOOST_PP_REPEAT_2_142(m, d) BOOST_PP_REPEAT_2_141(m, d) m(3, 141, d)\n# define BOOST_PP_REPEAT_2_143(m, d) BOOST_PP_REPEAT_2_142(m, d) m(3, 142, d)\n# define BOOST_PP_REPEAT_2_144(m, d) BOOST_PP_REPEAT_2_143(m, d) m(3, 143, d)\n# define BOOST_PP_REPEAT_2_145(m, d) BOOST_PP_REPEAT_2_144(m, d) m(3, 144, d)\n# define BOOST_PP_REPEAT_2_146(m, d) BOOST_PP_REPEAT_2_145(m, d) m(3, 145, d)\n# define BOOST_PP_REPEAT_2_147(m, d) BOOST_PP_REPEAT_2_146(m, d) m(3, 146, d)\n# define BOOST_PP_REPEAT_2_148(m, d) BOOST_PP_REPEAT_2_147(m, d) m(3, 147, d)\n# define BOOST_PP_REPEAT_2_149(m, d) BOOST_PP_REPEAT_2_148(m, d) m(3, 148, d)\n# define BOOST_PP_REPEAT_2_150(m, d) BOOST_PP_REPEAT_2_149(m, d) m(3, 149, d)\n# define BOOST_PP_REPEAT_2_151(m, d) BOOST_PP_REPEAT_2_150(m, d) m(3, 150, d)\n# define BOOST_PP_REPEAT_2_152(m, d) BOOST_PP_REPEAT_2_151(m, d) m(3, 151, d)\n# define BOOST_PP_REPEAT_2_153(m, d) BOOST_PP_REPEAT_2_152(m, d) m(3, 152, d)\n# define BOOST_PP_REPEAT_2_154(m, d) BOOST_PP_REPEAT_2_153(m, d) m(3, 153, d)\n# define BOOST_PP_REPEAT_2_155(m, d) BOOST_PP_REPEAT_2_154(m, d) m(3, 154, d)\n# define BOOST_PP_REPEAT_2_156(m, d) BOOST_PP_REPEAT_2_155(m, d) m(3, 155, d)\n# define BOOST_PP_REPEAT_2_157(m, d) BOOST_PP_REPEAT_2_156(m, d) m(3, 156, d)\n# define BOOST_PP_REPEAT_2_158(m, d) BOOST_PP_REPEAT_2_157(m, d) m(3, 157, d)\n# define BOOST_PP_REPEAT_2_159(m, d) BOOST_PP_REPEAT_2_158(m, d) m(3, 158, d)\n# define BOOST_PP_REPEAT_2_160(m, d) BOOST_PP_REPEAT_2_159(m, d) m(3, 159, d)\n# define BOOST_PP_REPEAT_2_161(m, d) BOOST_PP_REPEAT_2_160(m, d) m(3, 160, d)\n# define BOOST_PP_REPEAT_2_162(m, d) BOOST_PP_REPEAT_2_161(m, d) m(3, 161, d)\n# define BOOST_PP_REPEAT_2_163(m, d) BOOST_PP_REPEAT_2_162(m, d) m(3, 162, d)\n# define BOOST_PP_REPEAT_2_164(m, d) BOOST_PP_REPEAT_2_163(m, d) m(3, 163, d)\n# define BOOST_PP_REPEAT_2_165(m, d) BOOST_PP_REPEAT_2_164(m, d) m(3, 164, d)\n# define BOOST_PP_REPEAT_2_166(m, d) BOOST_PP_REPEAT_2_165(m, d) m(3, 165, d)\n# define BOOST_PP_REPEAT_2_167(m, d) BOOST_PP_REPEAT_2_166(m, d) m(3, 166, d)\n# define BOOST_PP_REPEAT_2_168(m, d) BOOST_PP_REPEAT_2_167(m, d) m(3, 167, d)\n# define BOOST_PP_REPEAT_2_169(m, d) BOOST_PP_REPEAT_2_168(m, d) m(3, 168, d)\n# define BOOST_PP_REPEAT_2_170(m, d) BOOST_PP_REPEAT_2_169(m, d) m(3, 169, d)\n# define BOOST_PP_REPEAT_2_171(m, d) BOOST_PP_REPEAT_2_170(m, d) m(3, 170, d)\n# define BOOST_PP_REPEAT_2_172(m, d) BOOST_PP_REPEAT_2_171(m, d) m(3, 171, d)\n# define BOOST_PP_REPEAT_2_173(m, d) BOOST_PP_REPEAT_2_172(m, d) m(3, 172, d)\n# define BOOST_PP_REPEAT_2_174(m, d) BOOST_PP_REPEAT_2_173(m, d) m(3, 173, d)\n# define BOOST_PP_REPEAT_2_175(m, d) BOOST_PP_REPEAT_2_174(m, d) m(3, 174, d)\n# define BOOST_PP_REPEAT_2_176(m, d) BOOST_PP_REPEAT_2_175(m, d) m(3, 175, d)\n# define BOOST_PP_REPEAT_2_177(m, d) BOOST_PP_REPEAT_2_176(m, d) m(3, 176, d)\n# define BOOST_PP_REPEAT_2_178(m, d) BOOST_PP_REPEAT_2_177(m, d) m(3, 177, d)\n# define BOOST_PP_REPEAT_2_179(m, d) BOOST_PP_REPEAT_2_178(m, d) m(3, 178, d)\n# define BOOST_PP_REPEAT_2_180(m, d) BOOST_PP_REPEAT_2_179(m, d) m(3, 179, d)\n# define BOOST_PP_REPEAT_2_181(m, d) BOOST_PP_REPEAT_2_180(m, d) m(3, 180, d)\n# define BOOST_PP_REPEAT_2_182(m, d) BOOST_PP_REPEAT_2_181(m, d) m(3, 181, d)\n# define BOOST_PP_REPEAT_2_183(m, d) BOOST_PP_REPEAT_2_182(m, d) m(3, 182, d)\n# define BOOST_PP_REPEAT_2_184(m, d) BOOST_PP_REPEAT_2_183(m, d) m(3, 183, d)\n# define BOOST_PP_REPEAT_2_185(m, d) BOOST_PP_REPEAT_2_184(m, d) m(3, 184, d)\n# define BOOST_PP_REPEAT_2_186(m, d) BOOST_PP_REPEAT_2_185(m, d) m(3, 185, d)\n# define BOOST_PP_REPEAT_2_187(m, d) BOOST_PP_REPEAT_2_186(m, d) m(3, 186, d)\n# define BOOST_PP_REPEAT_2_188(m, d) BOOST_PP_REPEAT_2_187(m, d) m(3, 187, d)\n# define BOOST_PP_REPEAT_2_189(m, d) BOOST_PP_REPEAT_2_188(m, d) m(3, 188, d)\n# define BOOST_PP_REPEAT_2_190(m, d) BOOST_PP_REPEAT_2_189(m, d) m(3, 189, d)\n# define BOOST_PP_REPEAT_2_191(m, d) BOOST_PP_REPEAT_2_190(m, d) m(3, 190, d)\n# define BOOST_PP_REPEAT_2_192(m, d) BOOST_PP_REPEAT_2_191(m, d) m(3, 191, d)\n# define BOOST_PP_REPEAT_2_193(m, d) BOOST_PP_REPEAT_2_192(m, d) m(3, 192, d)\n# define BOOST_PP_REPEAT_2_194(m, d) BOOST_PP_REPEAT_2_193(m, d) m(3, 193, d)\n# define BOOST_PP_REPEAT_2_195(m, d) BOOST_PP_REPEAT_2_194(m, d) m(3, 194, d)\n# define BOOST_PP_REPEAT_2_196(m, d) BOOST_PP_REPEAT_2_195(m, d) m(3, 195, d)\n# define BOOST_PP_REPEAT_2_197(m, d) BOOST_PP_REPEAT_2_196(m, d) m(3, 196, d)\n# define BOOST_PP_REPEAT_2_198(m, d) BOOST_PP_REPEAT_2_197(m, d) m(3, 197, d)\n# define BOOST_PP_REPEAT_2_199(m, d) BOOST_PP_REPEAT_2_198(m, d) m(3, 198, d)\n# define BOOST_PP_REPEAT_2_200(m, d) BOOST_PP_REPEAT_2_199(m, d) m(3, 199, d)\n# define BOOST_PP_REPEAT_2_201(m, d) BOOST_PP_REPEAT_2_200(m, d) m(3, 200, d)\n# define BOOST_PP_REPEAT_2_202(m, d) BOOST_PP_REPEAT_2_201(m, d) m(3, 201, d)\n# define BOOST_PP_REPEAT_2_203(m, d) BOOST_PP_REPEAT_2_202(m, d) m(3, 202, d)\n# define BOOST_PP_REPEAT_2_204(m, d) BOOST_PP_REPEAT_2_203(m, d) m(3, 203, d)\n# define BOOST_PP_REPEAT_2_205(m, d) BOOST_PP_REPEAT_2_204(m, d) m(3, 204, d)\n# define BOOST_PP_REPEAT_2_206(m, d) BOOST_PP_REPEAT_2_205(m, d) m(3, 205, d)\n# define BOOST_PP_REPEAT_2_207(m, d) BOOST_PP_REPEAT_2_206(m, d) m(3, 206, d)\n# define BOOST_PP_REPEAT_2_208(m, d) BOOST_PP_REPEAT_2_207(m, d) m(3, 207, d)\n# define BOOST_PP_REPEAT_2_209(m, d) BOOST_PP_REPEAT_2_208(m, d) m(3, 208, d)\n# define BOOST_PP_REPEAT_2_210(m, d) BOOST_PP_REPEAT_2_209(m, d) m(3, 209, d)\n# define BOOST_PP_REPEAT_2_211(m, d) BOOST_PP_REPEAT_2_210(m, d) m(3, 210, d)\n# define BOOST_PP_REPEAT_2_212(m, d) BOOST_PP_REPEAT_2_211(m, d) m(3, 211, d)\n# define BOOST_PP_REPEAT_2_213(m, d) BOOST_PP_REPEAT_2_212(m, d) m(3, 212, d)\n# define BOOST_PP_REPEAT_2_214(m, d) BOOST_PP_REPEAT_2_213(m, d) m(3, 213, d)\n# define BOOST_PP_REPEAT_2_215(m, d) BOOST_PP_REPEAT_2_214(m, d) m(3, 214, d)\n# define BOOST_PP_REPEAT_2_216(m, d) BOOST_PP_REPEAT_2_215(m, d) m(3, 215, d)\n# define BOOST_PP_REPEAT_2_217(m, d) BOOST_PP_REPEAT_2_216(m, d) m(3, 216, d)\n# define BOOST_PP_REPEAT_2_218(m, d) BOOST_PP_REPEAT_2_217(m, d) m(3, 217, d)\n# define BOOST_PP_REPEAT_2_219(m, d) BOOST_PP_REPEAT_2_218(m, d) m(3, 218, d)\n# define BOOST_PP_REPEAT_2_220(m, d) BOOST_PP_REPEAT_2_219(m, d) m(3, 219, d)\n# define BOOST_PP_REPEAT_2_221(m, d) BOOST_PP_REPEAT_2_220(m, d) m(3, 220, d)\n# define BOOST_PP_REPEAT_2_222(m, d) BOOST_PP_REPEAT_2_221(m, d) m(3, 221, d)\n# define BOOST_PP_REPEAT_2_223(m, d) BOOST_PP_REPEAT_2_222(m, d) m(3, 222, d)\n# define BOOST_PP_REPEAT_2_224(m, d) BOOST_PP_REPEAT_2_223(m, d) m(3, 223, d)\n# define BOOST_PP_REPEAT_2_225(m, d) BOOST_PP_REPEAT_2_224(m, d) m(3, 224, d)\n# define BOOST_PP_REPEAT_2_226(m, d) BOOST_PP_REPEAT_2_225(m, d) m(3, 225, d)\n# define BOOST_PP_REPEAT_2_227(m, d) BOOST_PP_REPEAT_2_226(m, d) m(3, 226, d)\n# define BOOST_PP_REPEAT_2_228(m, d) BOOST_PP_REPEAT_2_227(m, d) m(3, 227, d)\n# define BOOST_PP_REPEAT_2_229(m, d) BOOST_PP_REPEAT_2_228(m, d) m(3, 228, d)\n# define BOOST_PP_REPEAT_2_230(m, d) BOOST_PP_REPEAT_2_229(m, d) m(3, 229, d)\n# define BOOST_PP_REPEAT_2_231(m, d) BOOST_PP_REPEAT_2_230(m, d) m(3, 230, d)\n# define BOOST_PP_REPEAT_2_232(m, d) BOOST_PP_REPEAT_2_231(m, d) m(3, 231, d)\n# define BOOST_PP_REPEAT_2_233(m, d) BOOST_PP_REPEAT_2_232(m, d) m(3, 232, d)\n# define BOOST_PP_REPEAT_2_234(m, d) BOOST_PP_REPEAT_2_233(m, d) m(3, 233, d)\n# define BOOST_PP_REPEAT_2_235(m, d) BOOST_PP_REPEAT_2_234(m, d) m(3, 234, d)\n# define BOOST_PP_REPEAT_2_236(m, d) BOOST_PP_REPEAT_2_235(m, d) m(3, 235, d)\n# define BOOST_PP_REPEAT_2_237(m, d) BOOST_PP_REPEAT_2_236(m, d) m(3, 236, d)\n# define BOOST_PP_REPEAT_2_238(m, d) BOOST_PP_REPEAT_2_237(m, d) m(3, 237, d)\n# define BOOST_PP_REPEAT_2_239(m, d) BOOST_PP_REPEAT_2_238(m, d) m(3, 238, d)\n# define BOOST_PP_REPEAT_2_240(m, d) BOOST_PP_REPEAT_2_239(m, d) m(3, 239, d)\n# define BOOST_PP_REPEAT_2_241(m, d) BOOST_PP_REPEAT_2_240(m, d) m(3, 240, d)\n# define BOOST_PP_REPEAT_2_242(m, d) BOOST_PP_REPEAT_2_241(m, d) m(3, 241, d)\n# define BOOST_PP_REPEAT_2_243(m, d) BOOST_PP_REPEAT_2_242(m, d) m(3, 242, d)\n# define BOOST_PP_REPEAT_2_244(m, d) BOOST_PP_REPEAT_2_243(m, d) m(3, 243, d)\n# define BOOST_PP_REPEAT_2_245(m, d) BOOST_PP_REPEAT_2_244(m, d) m(3, 244, d)\n# define BOOST_PP_REPEAT_2_246(m, d) BOOST_PP_REPEAT_2_245(m, d) m(3, 245, d)\n# define BOOST_PP_REPEAT_2_247(m, d) BOOST_PP_REPEAT_2_246(m, d) m(3, 246, d)\n# define BOOST_PP_REPEAT_2_248(m, d) BOOST_PP_REPEAT_2_247(m, d) m(3, 247, d)\n# define BOOST_PP_REPEAT_2_249(m, d) BOOST_PP_REPEAT_2_248(m, d) m(3, 248, d)\n# define BOOST_PP_REPEAT_2_250(m, d) BOOST_PP_REPEAT_2_249(m, d) m(3, 249, d)\n# define BOOST_PP_REPEAT_2_251(m, d) BOOST_PP_REPEAT_2_250(m, d) m(3, 250, d)\n# define BOOST_PP_REPEAT_2_252(m, d) BOOST_PP_REPEAT_2_251(m, d) m(3, 251, d)\n# define BOOST_PP_REPEAT_2_253(m, d) BOOST_PP_REPEAT_2_252(m, d) m(3, 252, d)\n# define BOOST_PP_REPEAT_2_254(m, d) BOOST_PP_REPEAT_2_253(m, d) m(3, 253, d)\n# define BOOST_PP_REPEAT_2_255(m, d) BOOST_PP_REPEAT_2_254(m, d) m(3, 254, d)\n# define BOOST_PP_REPEAT_2_256(m, d) BOOST_PP_REPEAT_2_255(m, d) m(3, 255, d)\n#\n# define BOOST_PP_REPEAT_3_0(m, d)\n# define BOOST_PP_REPEAT_3_1(m, d) m(4, 0, d)\n# define BOOST_PP_REPEAT_3_2(m, d) BOOST_PP_REPEAT_3_1(m, d) m(4, 1, d)\n# define BOOST_PP_REPEAT_3_3(m, d) BOOST_PP_REPEAT_3_2(m, d) m(4, 2, d)\n# define BOOST_PP_REPEAT_3_4(m, d) BOOST_PP_REPEAT_3_3(m, d) m(4, 3, d)\n# define BOOST_PP_REPEAT_3_5(m, d) BOOST_PP_REPEAT_3_4(m, d) m(4, 4, d)\n# define BOOST_PP_REPEAT_3_6(m, d) BOOST_PP_REPEAT_3_5(m, d) m(4, 5, d)\n# define BOOST_PP_REPEAT_3_7(m, d) BOOST_PP_REPEAT_3_6(m, d) m(4, 6, d)\n# define BOOST_PP_REPEAT_3_8(m, d) BOOST_PP_REPEAT_3_7(m, d) m(4, 7, d)\n# define BOOST_PP_REPEAT_3_9(m, d) BOOST_PP_REPEAT_3_8(m, d) m(4, 8, d)\n# define BOOST_PP_REPEAT_3_10(m, d) BOOST_PP_REPEAT_3_9(m, d) m(4, 9, d)\n# define BOOST_PP_REPEAT_3_11(m, d) BOOST_PP_REPEAT_3_10(m, d) m(4, 10, d)\n# define BOOST_PP_REPEAT_3_12(m, d) BOOST_PP_REPEAT_3_11(m, d) m(4, 11, d)\n# define BOOST_PP_REPEAT_3_13(m, d) BOOST_PP_REPEAT_3_12(m, d) m(4, 12, d)\n# define BOOST_PP_REPEAT_3_14(m, d) BOOST_PP_REPEAT_3_13(m, d) m(4, 13, d)\n# define BOOST_PP_REPEAT_3_15(m, d) BOOST_PP_REPEAT_3_14(m, d) m(4, 14, d)\n# define BOOST_PP_REPEAT_3_16(m, d) BOOST_PP_REPEAT_3_15(m, d) m(4, 15, d)\n# define BOOST_PP_REPEAT_3_17(m, d) BOOST_PP_REPEAT_3_16(m, d) m(4, 16, d)\n# define BOOST_PP_REPEAT_3_18(m, d) BOOST_PP_REPEAT_3_17(m, d) m(4, 17, d)\n# define BOOST_PP_REPEAT_3_19(m, d) BOOST_PP_REPEAT_3_18(m, d) m(4, 18, d)\n# define BOOST_PP_REPEAT_3_20(m, d) BOOST_PP_REPEAT_3_19(m, d) m(4, 19, d)\n# define BOOST_PP_REPEAT_3_21(m, d) BOOST_PP_REPEAT_3_20(m, d) m(4, 20, d)\n# define BOOST_PP_REPEAT_3_22(m, d) BOOST_PP_REPEAT_3_21(m, d) m(4, 21, d)\n# define BOOST_PP_REPEAT_3_23(m, d) BOOST_PP_REPEAT_3_22(m, d) m(4, 22, d)\n# define BOOST_PP_REPEAT_3_24(m, d) BOOST_PP_REPEAT_3_23(m, d) m(4, 23, d)\n# define BOOST_PP_REPEAT_3_25(m, d) BOOST_PP_REPEAT_3_24(m, d) m(4, 24, d)\n# define BOOST_PP_REPEAT_3_26(m, d) BOOST_PP_REPEAT_3_25(m, d) m(4, 25, d)\n# define BOOST_PP_REPEAT_3_27(m, d) BOOST_PP_REPEAT_3_26(m, d) m(4, 26, d)\n# define BOOST_PP_REPEAT_3_28(m, d) BOOST_PP_REPEAT_3_27(m, d) m(4, 27, d)\n# define BOOST_PP_REPEAT_3_29(m, d) BOOST_PP_REPEAT_3_28(m, d) m(4, 28, d)\n# define BOOST_PP_REPEAT_3_30(m, d) BOOST_PP_REPEAT_3_29(m, d) m(4, 29, d)\n# define BOOST_PP_REPEAT_3_31(m, d) BOOST_PP_REPEAT_3_30(m, d) m(4, 30, d)\n# define BOOST_PP_REPEAT_3_32(m, d) BOOST_PP_REPEAT_3_31(m, d) m(4, 31, d)\n# define BOOST_PP_REPEAT_3_33(m, d) BOOST_PP_REPEAT_3_32(m, d) m(4, 32, d)\n# define BOOST_PP_REPEAT_3_34(m, d) BOOST_PP_REPEAT_3_33(m, d) m(4, 33, d)\n# define BOOST_PP_REPEAT_3_35(m, d) BOOST_PP_REPEAT_3_34(m, d) m(4, 34, d)\n# define BOOST_PP_REPEAT_3_36(m, d) BOOST_PP_REPEAT_3_35(m, d) m(4, 35, d)\n# define BOOST_PP_REPEAT_3_37(m, d) BOOST_PP_REPEAT_3_36(m, d) m(4, 36, d)\n# define BOOST_PP_REPEAT_3_38(m, d) BOOST_PP_REPEAT_3_37(m, d) m(4, 37, d)\n# define BOOST_PP_REPEAT_3_39(m, d) BOOST_PP_REPEAT_3_38(m, d) m(4, 38, d)\n# define BOOST_PP_REPEAT_3_40(m, d) BOOST_PP_REPEAT_3_39(m, d) m(4, 39, d)\n# define BOOST_PP_REPEAT_3_41(m, d) BOOST_PP_REPEAT_3_40(m, d) m(4, 40, d)\n# define BOOST_PP_REPEAT_3_42(m, d) BOOST_PP_REPEAT_3_41(m, d) m(4, 41, d)\n# define BOOST_PP_REPEAT_3_43(m, d) BOOST_PP_REPEAT_3_42(m, d) m(4, 42, d)\n# define BOOST_PP_REPEAT_3_44(m, d) BOOST_PP_REPEAT_3_43(m, d) m(4, 43, d)\n# define BOOST_PP_REPEAT_3_45(m, d) BOOST_PP_REPEAT_3_44(m, d) m(4, 44, d)\n# define BOOST_PP_REPEAT_3_46(m, d) BOOST_PP_REPEAT_3_45(m, d) m(4, 45, d)\n# define BOOST_PP_REPEAT_3_47(m, d) BOOST_PP_REPEAT_3_46(m, d) m(4, 46, d)\n# define BOOST_PP_REPEAT_3_48(m, d) BOOST_PP_REPEAT_3_47(m, d) m(4, 47, d)\n# define BOOST_PP_REPEAT_3_49(m, d) BOOST_PP_REPEAT_3_48(m, d) m(4, 48, d)\n# define BOOST_PP_REPEAT_3_50(m, d) BOOST_PP_REPEAT_3_49(m, d) m(4, 49, d)\n# define BOOST_PP_REPEAT_3_51(m, d) BOOST_PP_REPEAT_3_50(m, d) m(4, 50, d)\n# define BOOST_PP_REPEAT_3_52(m, d) BOOST_PP_REPEAT_3_51(m, d) m(4, 51, d)\n# define BOOST_PP_REPEAT_3_53(m, d) BOOST_PP_REPEAT_3_52(m, d) m(4, 52, d)\n# define BOOST_PP_REPEAT_3_54(m, d) BOOST_PP_REPEAT_3_53(m, d) m(4, 53, d)\n# define BOOST_PP_REPEAT_3_55(m, d) BOOST_PP_REPEAT_3_54(m, d) m(4, 54, d)\n# define BOOST_PP_REPEAT_3_56(m, d) BOOST_PP_REPEAT_3_55(m, d) m(4, 55, d)\n# define BOOST_PP_REPEAT_3_57(m, d) BOOST_PP_REPEAT_3_56(m, d) m(4, 56, d)\n# define BOOST_PP_REPEAT_3_58(m, d) BOOST_PP_REPEAT_3_57(m, d) m(4, 57, d)\n# define BOOST_PP_REPEAT_3_59(m, d) BOOST_PP_REPEAT_3_58(m, d) m(4, 58, d)\n# define BOOST_PP_REPEAT_3_60(m, d) BOOST_PP_REPEAT_3_59(m, d) m(4, 59, d)\n# define BOOST_PP_REPEAT_3_61(m, d) BOOST_PP_REPEAT_3_60(m, d) m(4, 60, d)\n# define BOOST_PP_REPEAT_3_62(m, d) BOOST_PP_REPEAT_3_61(m, d) m(4, 61, d)\n# define BOOST_PP_REPEAT_3_63(m, d) BOOST_PP_REPEAT_3_62(m, d) m(4, 62, d)\n# define BOOST_PP_REPEAT_3_64(m, d) BOOST_PP_REPEAT_3_63(m, d) m(4, 63, d)\n# define BOOST_PP_REPEAT_3_65(m, d) BOOST_PP_REPEAT_3_64(m, d) m(4, 64, d)\n# define BOOST_PP_REPEAT_3_66(m, d) BOOST_PP_REPEAT_3_65(m, d) m(4, 65, d)\n# define BOOST_PP_REPEAT_3_67(m, d) BOOST_PP_REPEAT_3_66(m, d) m(4, 66, d)\n# define BOOST_PP_REPEAT_3_68(m, d) BOOST_PP_REPEAT_3_67(m, d) m(4, 67, d)\n# define BOOST_PP_REPEAT_3_69(m, d) BOOST_PP_REPEAT_3_68(m, d) m(4, 68, d)\n# define BOOST_PP_REPEAT_3_70(m, d) BOOST_PP_REPEAT_3_69(m, d) m(4, 69, d)\n# define BOOST_PP_REPEAT_3_71(m, d) BOOST_PP_REPEAT_3_70(m, d) m(4, 70, d)\n# define BOOST_PP_REPEAT_3_72(m, d) BOOST_PP_REPEAT_3_71(m, d) m(4, 71, d)\n# define BOOST_PP_REPEAT_3_73(m, d) BOOST_PP_REPEAT_3_72(m, d) m(4, 72, d)\n# define BOOST_PP_REPEAT_3_74(m, d) BOOST_PP_REPEAT_3_73(m, d) m(4, 73, d)\n# define BOOST_PP_REPEAT_3_75(m, d) BOOST_PP_REPEAT_3_74(m, d) m(4, 74, d)\n# define BOOST_PP_REPEAT_3_76(m, d) BOOST_PP_REPEAT_3_75(m, d) m(4, 75, d)\n# define BOOST_PP_REPEAT_3_77(m, d) BOOST_PP_REPEAT_3_76(m, d) m(4, 76, d)\n# define BOOST_PP_REPEAT_3_78(m, d) BOOST_PP_REPEAT_3_77(m, d) m(4, 77, d)\n# define BOOST_PP_REPEAT_3_79(m, d) BOOST_PP_REPEAT_3_78(m, d) m(4, 78, d)\n# define BOOST_PP_REPEAT_3_80(m, d) BOOST_PP_REPEAT_3_79(m, d) m(4, 79, d)\n# define BOOST_PP_REPEAT_3_81(m, d) BOOST_PP_REPEAT_3_80(m, d) m(4, 80, d)\n# define BOOST_PP_REPEAT_3_82(m, d) BOOST_PP_REPEAT_3_81(m, d) m(4, 81, d)\n# define BOOST_PP_REPEAT_3_83(m, d) BOOST_PP_REPEAT_3_82(m, d) m(4, 82, d)\n# define BOOST_PP_REPEAT_3_84(m, d) BOOST_PP_REPEAT_3_83(m, d) m(4, 83, d)\n# define BOOST_PP_REPEAT_3_85(m, d) BOOST_PP_REPEAT_3_84(m, d) m(4, 84, d)\n# define BOOST_PP_REPEAT_3_86(m, d) BOOST_PP_REPEAT_3_85(m, d) m(4, 85, d)\n# define BOOST_PP_REPEAT_3_87(m, d) BOOST_PP_REPEAT_3_86(m, d) m(4, 86, d)\n# define BOOST_PP_REPEAT_3_88(m, d) BOOST_PP_REPEAT_3_87(m, d) m(4, 87, d)\n# define BOOST_PP_REPEAT_3_89(m, d) BOOST_PP_REPEAT_3_88(m, d) m(4, 88, d)\n# define BOOST_PP_REPEAT_3_90(m, d) BOOST_PP_REPEAT_3_89(m, d) m(4, 89, d)\n# define BOOST_PP_REPEAT_3_91(m, d) BOOST_PP_REPEAT_3_90(m, d) m(4, 90, d)\n# define BOOST_PP_REPEAT_3_92(m, d) BOOST_PP_REPEAT_3_91(m, d) m(4, 91, d)\n# define BOOST_PP_REPEAT_3_93(m, d) BOOST_PP_REPEAT_3_92(m, d) m(4, 92, d)\n# define BOOST_PP_REPEAT_3_94(m, d) BOOST_PP_REPEAT_3_93(m, d) m(4, 93, d)\n# define BOOST_PP_REPEAT_3_95(m, d) BOOST_PP_REPEAT_3_94(m, d) m(4, 94, d)\n# define BOOST_PP_REPEAT_3_96(m, d) BOOST_PP_REPEAT_3_95(m, d) m(4, 95, d)\n# define BOOST_PP_REPEAT_3_97(m, d) BOOST_PP_REPEAT_3_96(m, d) m(4, 96, d)\n# define BOOST_PP_REPEAT_3_98(m, d) BOOST_PP_REPEAT_3_97(m, d) m(4, 97, d)\n# define BOOST_PP_REPEAT_3_99(m, d) BOOST_PP_REPEAT_3_98(m, d) m(4, 98, d)\n# define BOOST_PP_REPEAT_3_100(m, d) BOOST_PP_REPEAT_3_99(m, d) m(4, 99, d)\n# define BOOST_PP_REPEAT_3_101(m, d) BOOST_PP_REPEAT_3_100(m, d) m(4, 100, d)\n# define BOOST_PP_REPEAT_3_102(m, d) BOOST_PP_REPEAT_3_101(m, d) m(4, 101, d)\n# define BOOST_PP_REPEAT_3_103(m, d) BOOST_PP_REPEAT_3_102(m, d) m(4, 102, d)\n# define BOOST_PP_REPEAT_3_104(m, d) BOOST_PP_REPEAT_3_103(m, d) m(4, 103, d)\n# define BOOST_PP_REPEAT_3_105(m, d) BOOST_PP_REPEAT_3_104(m, d) m(4, 104, d)\n# define BOOST_PP_REPEAT_3_106(m, d) BOOST_PP_REPEAT_3_105(m, d) m(4, 105, d)\n# define BOOST_PP_REPEAT_3_107(m, d) BOOST_PP_REPEAT_3_106(m, d) m(4, 106, d)\n# define BOOST_PP_REPEAT_3_108(m, d) BOOST_PP_REPEAT_3_107(m, d) m(4, 107, d)\n# define BOOST_PP_REPEAT_3_109(m, d) BOOST_PP_REPEAT_3_108(m, d) m(4, 108, d)\n# define BOOST_PP_REPEAT_3_110(m, d) BOOST_PP_REPEAT_3_109(m, d) m(4, 109, d)\n# define BOOST_PP_REPEAT_3_111(m, d) BOOST_PP_REPEAT_3_110(m, d) m(4, 110, d)\n# define BOOST_PP_REPEAT_3_112(m, d) BOOST_PP_REPEAT_3_111(m, d) m(4, 111, d)\n# define BOOST_PP_REPEAT_3_113(m, d) BOOST_PP_REPEAT_3_112(m, d) m(4, 112, d)\n# define BOOST_PP_REPEAT_3_114(m, d) BOOST_PP_REPEAT_3_113(m, d) m(4, 113, d)\n# define BOOST_PP_REPEAT_3_115(m, d) BOOST_PP_REPEAT_3_114(m, d) m(4, 114, d)\n# define BOOST_PP_REPEAT_3_116(m, d) BOOST_PP_REPEAT_3_115(m, d) m(4, 115, d)\n# define BOOST_PP_REPEAT_3_117(m, d) BOOST_PP_REPEAT_3_116(m, d) m(4, 116, d)\n# define BOOST_PP_REPEAT_3_118(m, d) BOOST_PP_REPEAT_3_117(m, d) m(4, 117, d)\n# define BOOST_PP_REPEAT_3_119(m, d) BOOST_PP_REPEAT_3_118(m, d) m(4, 118, d)\n# define BOOST_PP_REPEAT_3_120(m, d) BOOST_PP_REPEAT_3_119(m, d) m(4, 119, d)\n# define BOOST_PP_REPEAT_3_121(m, d) BOOST_PP_REPEAT_3_120(m, d) m(4, 120, d)\n# define BOOST_PP_REPEAT_3_122(m, d) BOOST_PP_REPEAT_3_121(m, d) m(4, 121, d)\n# define BOOST_PP_REPEAT_3_123(m, d) BOOST_PP_REPEAT_3_122(m, d) m(4, 122, d)\n# define BOOST_PP_REPEAT_3_124(m, d) BOOST_PP_REPEAT_3_123(m, d) m(4, 123, d)\n# define BOOST_PP_REPEAT_3_125(m, d) BOOST_PP_REPEAT_3_124(m, d) m(4, 124, d)\n# define BOOST_PP_REPEAT_3_126(m, d) BOOST_PP_REPEAT_3_125(m, d) m(4, 125, d)\n# define BOOST_PP_REPEAT_3_127(m, d) BOOST_PP_REPEAT_3_126(m, d) m(4, 126, d)\n# define BOOST_PP_REPEAT_3_128(m, d) BOOST_PP_REPEAT_3_127(m, d) m(4, 127, d)\n# define BOOST_PP_REPEAT_3_129(m, d) BOOST_PP_REPEAT_3_128(m, d) m(4, 128, d)\n# define BOOST_PP_REPEAT_3_130(m, d) BOOST_PP_REPEAT_3_129(m, d) m(4, 129, d)\n# define BOOST_PP_REPEAT_3_131(m, d) BOOST_PP_REPEAT_3_130(m, d) m(4, 130, d)\n# define BOOST_PP_REPEAT_3_132(m, d) BOOST_PP_REPEAT_3_131(m, d) m(4, 131, d)\n# define BOOST_PP_REPEAT_3_133(m, d) BOOST_PP_REPEAT_3_132(m, d) m(4, 132, d)\n# define BOOST_PP_REPEAT_3_134(m, d) BOOST_PP_REPEAT_3_133(m, d) m(4, 133, d)\n# define BOOST_PP_REPEAT_3_135(m, d) BOOST_PP_REPEAT_3_134(m, d) m(4, 134, d)\n# define BOOST_PP_REPEAT_3_136(m, d) BOOST_PP_REPEAT_3_135(m, d) m(4, 135, d)\n# define BOOST_PP_REPEAT_3_137(m, d) BOOST_PP_REPEAT_3_136(m, d) m(4, 136, d)\n# define BOOST_PP_REPEAT_3_138(m, d) BOOST_PP_REPEAT_3_137(m, d) m(4, 137, d)\n# define BOOST_PP_REPEAT_3_139(m, d) BOOST_PP_REPEAT_3_138(m, d) m(4, 138, d)\n# define BOOST_PP_REPEAT_3_140(m, d) BOOST_PP_REPEAT_3_139(m, d) m(4, 139, d)\n# define BOOST_PP_REPEAT_3_141(m, d) BOOST_PP_REPEAT_3_140(m, d) m(4, 140, d)\n# define BOOST_PP_REPEAT_3_142(m, d) BOOST_PP_REPEAT_3_141(m, d) m(4, 141, d)\n# define BOOST_PP_REPEAT_3_143(m, d) BOOST_PP_REPEAT_3_142(m, d) m(4, 142, d)\n# define BOOST_PP_REPEAT_3_144(m, d) BOOST_PP_REPEAT_3_143(m, d) m(4, 143, d)\n# define BOOST_PP_REPEAT_3_145(m, d) BOOST_PP_REPEAT_3_144(m, d) m(4, 144, d)\n# define BOOST_PP_REPEAT_3_146(m, d) BOOST_PP_REPEAT_3_145(m, d) m(4, 145, d)\n# define BOOST_PP_REPEAT_3_147(m, d) BOOST_PP_REPEAT_3_146(m, d) m(4, 146, d)\n# define BOOST_PP_REPEAT_3_148(m, d) BOOST_PP_REPEAT_3_147(m, d) m(4, 147, d)\n# define BOOST_PP_REPEAT_3_149(m, d) BOOST_PP_REPEAT_3_148(m, d) m(4, 148, d)\n# define BOOST_PP_REPEAT_3_150(m, d) BOOST_PP_REPEAT_3_149(m, d) m(4, 149, d)\n# define BOOST_PP_REPEAT_3_151(m, d) BOOST_PP_REPEAT_3_150(m, d) m(4, 150, d)\n# define BOOST_PP_REPEAT_3_152(m, d) BOOST_PP_REPEAT_3_151(m, d) m(4, 151, d)\n# define BOOST_PP_REPEAT_3_153(m, d) BOOST_PP_REPEAT_3_152(m, d) m(4, 152, d)\n# define BOOST_PP_REPEAT_3_154(m, d) BOOST_PP_REPEAT_3_153(m, d) m(4, 153, d)\n# define BOOST_PP_REPEAT_3_155(m, d) BOOST_PP_REPEAT_3_154(m, d) m(4, 154, d)\n# define BOOST_PP_REPEAT_3_156(m, d) BOOST_PP_REPEAT_3_155(m, d) m(4, 155, d)\n# define BOOST_PP_REPEAT_3_157(m, d) BOOST_PP_REPEAT_3_156(m, d) m(4, 156, d)\n# define BOOST_PP_REPEAT_3_158(m, d) BOOST_PP_REPEAT_3_157(m, d) m(4, 157, d)\n# define BOOST_PP_REPEAT_3_159(m, d) BOOST_PP_REPEAT_3_158(m, d) m(4, 158, d)\n# define BOOST_PP_REPEAT_3_160(m, d) BOOST_PP_REPEAT_3_159(m, d) m(4, 159, d)\n# define BOOST_PP_REPEAT_3_161(m, d) BOOST_PP_REPEAT_3_160(m, d) m(4, 160, d)\n# define BOOST_PP_REPEAT_3_162(m, d) BOOST_PP_REPEAT_3_161(m, d) m(4, 161, d)\n# define BOOST_PP_REPEAT_3_163(m, d) BOOST_PP_REPEAT_3_162(m, d) m(4, 162, d)\n# define BOOST_PP_REPEAT_3_164(m, d) BOOST_PP_REPEAT_3_163(m, d) m(4, 163, d)\n# define BOOST_PP_REPEAT_3_165(m, d) BOOST_PP_REPEAT_3_164(m, d) m(4, 164, d)\n# define BOOST_PP_REPEAT_3_166(m, d) BOOST_PP_REPEAT_3_165(m, d) m(4, 165, d)\n# define BOOST_PP_REPEAT_3_167(m, d) BOOST_PP_REPEAT_3_166(m, d) m(4, 166, d)\n# define BOOST_PP_REPEAT_3_168(m, d) BOOST_PP_REPEAT_3_167(m, d) m(4, 167, d)\n# define BOOST_PP_REPEAT_3_169(m, d) BOOST_PP_REPEAT_3_168(m, d) m(4, 168, d)\n# define BOOST_PP_REPEAT_3_170(m, d) BOOST_PP_REPEAT_3_169(m, d) m(4, 169, d)\n# define BOOST_PP_REPEAT_3_171(m, d) BOOST_PP_REPEAT_3_170(m, d) m(4, 170, d)\n# define BOOST_PP_REPEAT_3_172(m, d) BOOST_PP_REPEAT_3_171(m, d) m(4, 171, d)\n# define BOOST_PP_REPEAT_3_173(m, d) BOOST_PP_REPEAT_3_172(m, d) m(4, 172, d)\n# define BOOST_PP_REPEAT_3_174(m, d) BOOST_PP_REPEAT_3_173(m, d) m(4, 173, d)\n# define BOOST_PP_REPEAT_3_175(m, d) BOOST_PP_REPEAT_3_174(m, d) m(4, 174, d)\n# define BOOST_PP_REPEAT_3_176(m, d) BOOST_PP_REPEAT_3_175(m, d) m(4, 175, d)\n# define BOOST_PP_REPEAT_3_177(m, d) BOOST_PP_REPEAT_3_176(m, d) m(4, 176, d)\n# define BOOST_PP_REPEAT_3_178(m, d) BOOST_PP_REPEAT_3_177(m, d) m(4, 177, d)\n# define BOOST_PP_REPEAT_3_179(m, d) BOOST_PP_REPEAT_3_178(m, d) m(4, 178, d)\n# define BOOST_PP_REPEAT_3_180(m, d) BOOST_PP_REPEAT_3_179(m, d) m(4, 179, d)\n# define BOOST_PP_REPEAT_3_181(m, d) BOOST_PP_REPEAT_3_180(m, d) m(4, 180, d)\n# define BOOST_PP_REPEAT_3_182(m, d) BOOST_PP_REPEAT_3_181(m, d) m(4, 181, d)\n# define BOOST_PP_REPEAT_3_183(m, d) BOOST_PP_REPEAT_3_182(m, d) m(4, 182, d)\n# define BOOST_PP_REPEAT_3_184(m, d) BOOST_PP_REPEAT_3_183(m, d) m(4, 183, d)\n# define BOOST_PP_REPEAT_3_185(m, d) BOOST_PP_REPEAT_3_184(m, d) m(4, 184, d)\n# define BOOST_PP_REPEAT_3_186(m, d) BOOST_PP_REPEAT_3_185(m, d) m(4, 185, d)\n# define BOOST_PP_REPEAT_3_187(m, d) BOOST_PP_REPEAT_3_186(m, d) m(4, 186, d)\n# define BOOST_PP_REPEAT_3_188(m, d) BOOST_PP_REPEAT_3_187(m, d) m(4, 187, d)\n# define BOOST_PP_REPEAT_3_189(m, d) BOOST_PP_REPEAT_3_188(m, d) m(4, 188, d)\n# define BOOST_PP_REPEAT_3_190(m, d) BOOST_PP_REPEAT_3_189(m, d) m(4, 189, d)\n# define BOOST_PP_REPEAT_3_191(m, d) BOOST_PP_REPEAT_3_190(m, d) m(4, 190, d)\n# define BOOST_PP_REPEAT_3_192(m, d) BOOST_PP_REPEAT_3_191(m, d) m(4, 191, d)\n# define BOOST_PP_REPEAT_3_193(m, d) BOOST_PP_REPEAT_3_192(m, d) m(4, 192, d)\n# define BOOST_PP_REPEAT_3_194(m, d) BOOST_PP_REPEAT_3_193(m, d) m(4, 193, d)\n# define BOOST_PP_REPEAT_3_195(m, d) BOOST_PP_REPEAT_3_194(m, d) m(4, 194, d)\n# define BOOST_PP_REPEAT_3_196(m, d) BOOST_PP_REPEAT_3_195(m, d) m(4, 195, d)\n# define BOOST_PP_REPEAT_3_197(m, d) BOOST_PP_REPEAT_3_196(m, d) m(4, 196, d)\n# define BOOST_PP_REPEAT_3_198(m, d) BOOST_PP_REPEAT_3_197(m, d) m(4, 197, d)\n# define BOOST_PP_REPEAT_3_199(m, d) BOOST_PP_REPEAT_3_198(m, d) m(4, 198, d)\n# define BOOST_PP_REPEAT_3_200(m, d) BOOST_PP_REPEAT_3_199(m, d) m(4, 199, d)\n# define BOOST_PP_REPEAT_3_201(m, d) BOOST_PP_REPEAT_3_200(m, d) m(4, 200, d)\n# define BOOST_PP_REPEAT_3_202(m, d) BOOST_PP_REPEAT_3_201(m, d) m(4, 201, d)\n# define BOOST_PP_REPEAT_3_203(m, d) BOOST_PP_REPEAT_3_202(m, d) m(4, 202, d)\n# define BOOST_PP_REPEAT_3_204(m, d) BOOST_PP_REPEAT_3_203(m, d) m(4, 203, d)\n# define BOOST_PP_REPEAT_3_205(m, d) BOOST_PP_REPEAT_3_204(m, d) m(4, 204, d)\n# define BOOST_PP_REPEAT_3_206(m, d) BOOST_PP_REPEAT_3_205(m, d) m(4, 205, d)\n# define BOOST_PP_REPEAT_3_207(m, d) BOOST_PP_REPEAT_3_206(m, d) m(4, 206, d)\n# define BOOST_PP_REPEAT_3_208(m, d) BOOST_PP_REPEAT_3_207(m, d) m(4, 207, d)\n# define BOOST_PP_REPEAT_3_209(m, d) BOOST_PP_REPEAT_3_208(m, d) m(4, 208, d)\n# define BOOST_PP_REPEAT_3_210(m, d) BOOST_PP_REPEAT_3_209(m, d) m(4, 209, d)\n# define BOOST_PP_REPEAT_3_211(m, d) BOOST_PP_REPEAT_3_210(m, d) m(4, 210, d)\n# define BOOST_PP_REPEAT_3_212(m, d) BOOST_PP_REPEAT_3_211(m, d) m(4, 211, d)\n# define BOOST_PP_REPEAT_3_213(m, d) BOOST_PP_REPEAT_3_212(m, d) m(4, 212, d)\n# define BOOST_PP_REPEAT_3_214(m, d) BOOST_PP_REPEAT_3_213(m, d) m(4, 213, d)\n# define BOOST_PP_REPEAT_3_215(m, d) BOOST_PP_REPEAT_3_214(m, d) m(4, 214, d)\n# define BOOST_PP_REPEAT_3_216(m, d) BOOST_PP_REPEAT_3_215(m, d) m(4, 215, d)\n# define BOOST_PP_REPEAT_3_217(m, d) BOOST_PP_REPEAT_3_216(m, d) m(4, 216, d)\n# define BOOST_PP_REPEAT_3_218(m, d) BOOST_PP_REPEAT_3_217(m, d) m(4, 217, d)\n# define BOOST_PP_REPEAT_3_219(m, d) BOOST_PP_REPEAT_3_218(m, d) m(4, 218, d)\n# define BOOST_PP_REPEAT_3_220(m, d) BOOST_PP_REPEAT_3_219(m, d) m(4, 219, d)\n# define BOOST_PP_REPEAT_3_221(m, d) BOOST_PP_REPEAT_3_220(m, d) m(4, 220, d)\n# define BOOST_PP_REPEAT_3_222(m, d) BOOST_PP_REPEAT_3_221(m, d) m(4, 221, d)\n# define BOOST_PP_REPEAT_3_223(m, d) BOOST_PP_REPEAT_3_222(m, d) m(4, 222, d)\n# define BOOST_PP_REPEAT_3_224(m, d) BOOST_PP_REPEAT_3_223(m, d) m(4, 223, d)\n# define BOOST_PP_REPEAT_3_225(m, d) BOOST_PP_REPEAT_3_224(m, d) m(4, 224, d)\n# define BOOST_PP_REPEAT_3_226(m, d) BOOST_PP_REPEAT_3_225(m, d) m(4, 225, d)\n# define BOOST_PP_REPEAT_3_227(m, d) BOOST_PP_REPEAT_3_226(m, d) m(4, 226, d)\n# define BOOST_PP_REPEAT_3_228(m, d) BOOST_PP_REPEAT_3_227(m, d) m(4, 227, d)\n# define BOOST_PP_REPEAT_3_229(m, d) BOOST_PP_REPEAT_3_228(m, d) m(4, 228, d)\n# define BOOST_PP_REPEAT_3_230(m, d) BOOST_PP_REPEAT_3_229(m, d) m(4, 229, d)\n# define BOOST_PP_REPEAT_3_231(m, d) BOOST_PP_REPEAT_3_230(m, d) m(4, 230, d)\n# define BOOST_PP_REPEAT_3_232(m, d) BOOST_PP_REPEAT_3_231(m, d) m(4, 231, d)\n# define BOOST_PP_REPEAT_3_233(m, d) BOOST_PP_REPEAT_3_232(m, d) m(4, 232, d)\n# define BOOST_PP_REPEAT_3_234(m, d) BOOST_PP_REPEAT_3_233(m, d) m(4, 233, d)\n# define BOOST_PP_REPEAT_3_235(m, d) BOOST_PP_REPEAT_3_234(m, d) m(4, 234, d)\n# define BOOST_PP_REPEAT_3_236(m, d) BOOST_PP_REPEAT_3_235(m, d) m(4, 235, d)\n# define BOOST_PP_REPEAT_3_237(m, d) BOOST_PP_REPEAT_3_236(m, d) m(4, 236, d)\n# define BOOST_PP_REPEAT_3_238(m, d) BOOST_PP_REPEAT_3_237(m, d) m(4, 237, d)\n# define BOOST_PP_REPEAT_3_239(m, d) BOOST_PP_REPEAT_3_238(m, d) m(4, 238, d)\n# define BOOST_PP_REPEAT_3_240(m, d) BOOST_PP_REPEAT_3_239(m, d) m(4, 239, d)\n# define BOOST_PP_REPEAT_3_241(m, d) BOOST_PP_REPEAT_3_240(m, d) m(4, 240, d)\n# define BOOST_PP_REPEAT_3_242(m, d) BOOST_PP_REPEAT_3_241(m, d) m(4, 241, d)\n# define BOOST_PP_REPEAT_3_243(m, d) BOOST_PP_REPEAT_3_242(m, d) m(4, 242, d)\n# define BOOST_PP_REPEAT_3_244(m, d) BOOST_PP_REPEAT_3_243(m, d) m(4, 243, d)\n# define BOOST_PP_REPEAT_3_245(m, d) BOOST_PP_REPEAT_3_244(m, d) m(4, 244, d)\n# define BOOST_PP_REPEAT_3_246(m, d) BOOST_PP_REPEAT_3_245(m, d) m(4, 245, d)\n# define BOOST_PP_REPEAT_3_247(m, d) BOOST_PP_REPEAT_3_246(m, d) m(4, 246, d)\n# define BOOST_PP_REPEAT_3_248(m, d) BOOST_PP_REPEAT_3_247(m, d) m(4, 247, d)\n# define BOOST_PP_REPEAT_3_249(m, d) BOOST_PP_REPEAT_3_248(m, d) m(4, 248, d)\n# define BOOST_PP_REPEAT_3_250(m, d) BOOST_PP_REPEAT_3_249(m, d) m(4, 249, d)\n# define BOOST_PP_REPEAT_3_251(m, d) BOOST_PP_REPEAT_3_250(m, d) m(4, 250, d)\n# define BOOST_PP_REPEAT_3_252(m, d) BOOST_PP_REPEAT_3_251(m, d) m(4, 251, d)\n# define BOOST_PP_REPEAT_3_253(m, d) BOOST_PP_REPEAT_3_252(m, d) m(4, 252, d)\n# define BOOST_PP_REPEAT_3_254(m, d) BOOST_PP_REPEAT_3_253(m, d) m(4, 253, d)\n# define BOOST_PP_REPEAT_3_255(m, d) BOOST_PP_REPEAT_3_254(m, d) m(4, 254, d)\n# define BOOST_PP_REPEAT_3_256(m, d) BOOST_PP_REPEAT_3_255(m, d) m(4, 255, d)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/repetition/repeat_from_to.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_REPETITION_REPEAT_FROM_TO_HPP\n# define BOOST_PREPROCESSOR_REPETITION_REPEAT_FROM_TO_HPP\n#\n# include <boost/preprocessor/arithmetic/add.hpp>\n# include <boost/preprocessor/arithmetic/sub.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/while.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/repetition/repeat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_REPEAT_FROM_TO */\n#\n# if 0\n#    define BOOST_PP_REPEAT_FROM_TO(first, last, macro, data)\n# endif\n#\n# define BOOST_PP_REPEAT_FROM_TO BOOST_PP_CAT(BOOST_PP_REPEAT_FROM_TO_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# define BOOST_PP_REPEAT_FROM_TO_1(f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_1(BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256), f, l, m, dt)\n# define BOOST_PP_REPEAT_FROM_TO_2(f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_2(BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256), f, l, m, dt)\n# define BOOST_PP_REPEAT_FROM_TO_3(f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_3(BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256), f, l, m, dt)\n# define BOOST_PP_REPEAT_FROM_TO_4(f, l, m, dt) BOOST_PP_ERROR(0x0003)\n#\n# define BOOST_PP_REPEAT_FROM_TO_1ST BOOST_PP_REPEAT_FROM_TO_1\n# define BOOST_PP_REPEAT_FROM_TO_2ND BOOST_PP_REPEAT_FROM_TO_2\n# define BOOST_PP_REPEAT_FROM_TO_3RD BOOST_PP_REPEAT_FROM_TO_3\n#\n# /* BOOST_PP_REPEAT_FROM_TO_D */\n#\n# if 0\n#    define BOOST_PP_REPEAT_FROM_TO_D(d, first, last, macro, data)\n# endif\n#\n# define BOOST_PP_REPEAT_FROM_TO_D BOOST_PP_CAT(BOOST_PP_REPEAT_FROM_TO_D_, BOOST_PP_AUTO_REC(BOOST_PP_REPEAT_P, 4))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_REPEAT_FROM_TO_D_1(d, f, l, m, dt) BOOST_PP_REPEAT_1(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_1, (d, f, m, dt))\n#    define BOOST_PP_REPEAT_FROM_TO_D_2(d, f, l, m, dt) BOOST_PP_REPEAT_2(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_2, (d, f, m, dt))\n#    define BOOST_PP_REPEAT_FROM_TO_D_3(d, f, l, m, dt) BOOST_PP_REPEAT_3(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_3, (d, f, m, dt))\n# else\n#    define BOOST_PP_REPEAT_FROM_TO_D_1(d, f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_1_I(d, f, l, m, dt)\n#    define BOOST_PP_REPEAT_FROM_TO_D_2(d, f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_2_I(d, f, l, m, dt)\n#    define BOOST_PP_REPEAT_FROM_TO_D_3(d, f, l, m, dt) BOOST_PP_REPEAT_FROM_TO_D_3_I(d, f, l, m, dt)\n#    define BOOST_PP_REPEAT_FROM_TO_D_1_I(d, f, l, m, dt) BOOST_PP_REPEAT_1(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_1, (d, f, m, dt))\n#    define BOOST_PP_REPEAT_FROM_TO_D_2_I(d, f, l, m, dt) BOOST_PP_REPEAT_2(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_2, (d, f, m, dt))\n#    define BOOST_PP_REPEAT_FROM_TO_D_3_I(d, f, l, m, dt) BOOST_PP_REPEAT_3(BOOST_PP_SUB_D(d, l, f), BOOST_PP_REPEAT_FROM_TO_M_3, (d, f, m, dt))\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_REPEAT_FROM_TO_M_1(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_1_IM(z, n, BOOST_PP_TUPLE_REM_4 dfmd)\n#    define BOOST_PP_REPEAT_FROM_TO_M_2(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_2_IM(z, n, BOOST_PP_TUPLE_REM_4 dfmd)\n#    define BOOST_PP_REPEAT_FROM_TO_M_3(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_3_IM(z, n, BOOST_PP_TUPLE_REM_4 dfmd)\n#    define BOOST_PP_REPEAT_FROM_TO_M_1_IM(z, n, im) BOOST_PP_REPEAT_FROM_TO_M_1_I(z, n, im)\n#    define BOOST_PP_REPEAT_FROM_TO_M_2_IM(z, n, im) BOOST_PP_REPEAT_FROM_TO_M_2_I(z, n, im)\n#    define BOOST_PP_REPEAT_FROM_TO_M_3_IM(z, n, im) BOOST_PP_REPEAT_FROM_TO_M_3_I(z, n, im)\n# else\n#    define BOOST_PP_REPEAT_FROM_TO_M_1(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_1_I(z, n, BOOST_PP_TUPLE_ELEM(4, 0, dfmd), BOOST_PP_TUPLE_ELEM(4, 1, dfmd), BOOST_PP_TUPLE_ELEM(4, 2, dfmd), BOOST_PP_TUPLE_ELEM(4, 3, dfmd))\n#    define BOOST_PP_REPEAT_FROM_TO_M_2(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_2_I(z, n, BOOST_PP_TUPLE_ELEM(4, 0, dfmd), BOOST_PP_TUPLE_ELEM(4, 1, dfmd), BOOST_PP_TUPLE_ELEM(4, 2, dfmd), BOOST_PP_TUPLE_ELEM(4, 3, dfmd))\n#    define BOOST_PP_REPEAT_FROM_TO_M_3(z, n, dfmd) BOOST_PP_REPEAT_FROM_TO_M_3_I(z, n, BOOST_PP_TUPLE_ELEM(4, 0, dfmd), BOOST_PP_TUPLE_ELEM(4, 1, dfmd), BOOST_PP_TUPLE_ELEM(4, 2, dfmd), BOOST_PP_TUPLE_ELEM(4, 3, dfmd))\n# endif\n#\n# define BOOST_PP_REPEAT_FROM_TO_M_1_I(z, n, d, f, m, dt) BOOST_PP_REPEAT_FROM_TO_M_1_II(z, BOOST_PP_ADD_D(d, n, f), m, dt)\n# define BOOST_PP_REPEAT_FROM_TO_M_2_I(z, n, d, f, m, dt) BOOST_PP_REPEAT_FROM_TO_M_2_II(z, BOOST_PP_ADD_D(d, n, f), m, dt)\n# define BOOST_PP_REPEAT_FROM_TO_M_3_I(z, n, d, f, m, dt) BOOST_PP_REPEAT_FROM_TO_M_3_II(z, BOOST_PP_ADD_D(d, n, f), m, dt)\n#\n# define BOOST_PP_REPEAT_FROM_TO_M_1_II(z, n, m, dt) m(z, n, dt)\n# define BOOST_PP_REPEAT_FROM_TO_M_2_II(z, n, m, dt) m(z, n, dt)\n# define BOOST_PP_REPEAT_FROM_TO_M_3_II(z, n, m, dt) m(z, n, dt)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/selection/max.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SELECTION_MAX_HPP\n# define BOOST_PREPROCESSOR_SELECTION_MAX_HPP\n#\n# include <boost/preprocessor/comparison/less_equal.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n#\n# /* BOOST_PP_MAX */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_MAX(x, y) BOOST_PP_IIF(BOOST_PP_LESS_EQUAL(x, y), y, x)\n# else\n#    define BOOST_PP_MAX(x, y) BOOST_PP_MAX_I(x, y)\n#    define BOOST_PP_MAX_I(x, y) BOOST_PP_IIF(BOOST_PP_LESS_EQUAL(x, y), y, x)\n# endif\n#\n# /* BOOST_PP_MAX_D */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_MAX_D(d, x, y) BOOST_PP_IIF(BOOST_PP_LESS_EQUAL_D(d, x, y), y, x)\n# else\n#    define BOOST_PP_MAX_D(d, x, y) BOOST_PP_MAX_D_I(d, x, y)\n#    define BOOST_PP_MAX_D_I(d, x, y) BOOST_PP_IIF(BOOST_PP_LESS_EQUAL_D(d, x, y), y, x)\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/detail/is_empty.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2015.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_DETAIL_IS_EMPTY_HPP\n# define BOOST_PREPROCESSOR_SEQ_DETAIL_IS_EMPTY_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/logical/bool.hpp>\n# include <boost/preprocessor/logical/compl.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n#\n/* An empty seq is one that is just BOOST_PP_SEQ_NIL */\n#\n# define BOOST_PP_SEQ_DETAIL_IS_EMPTY(seq) \\\n\tBOOST_PP_COMPL \\\n\t\t( \\\n\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq) \\\n\t\t) \\\n/**/\n#\n# define BOOST_PP_SEQ_DETAIL_IS_EMPTY_SIZE(size) \\\n\tBOOST_PP_COMPL \\\n\t\t( \\\n\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY_SIZE(size) \\\n\t\t) \\\n/**/\n#\n# define BOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq) \\\n\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY_SIZE(BOOST_PP_SEQ_DETAIL_EMPTY_SIZE(seq)) \\\n/**/\n#\n# define BOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY_SIZE(size) \\\n\tBOOST_PP_BOOL(size) \\\n/**/\n#\n# define BOOST_PP_SEQ_DETAIL_EMPTY_SIZE(seq) \\\n\tBOOST_PP_DEC(BOOST_PP_SEQ_SIZE(seq (nil))) \\\n/**/\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/detail/split.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_DETAIL_SPLIT_HPP\n# define BOOST_PREPROCESSOR_SEQ_DETAIL_SPLIT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_SEQ_SPLIT */\n#\n# define BOOST_PP_SEQ_SPLIT(n, seq) BOOST_PP_SEQ_SPLIT_D(n, seq)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_SPLIT_D(n, seq) (BOOST_PP_SEQ_SPLIT_ ## n seq)\n# else\n#    define BOOST_PP_SEQ_SPLIT_D(n, seq) (BOOST_PP_SEQ_SPLIT_ ## n ## seq)\n# endif\n#\n# define BOOST_PP_SEQ_SPLIT_1(x) (x),\n# define BOOST_PP_SEQ_SPLIT_2(x) (x) BOOST_PP_SEQ_SPLIT_1\n# define BOOST_PP_SEQ_SPLIT_3(x) (x) BOOST_PP_SEQ_SPLIT_2\n# define BOOST_PP_SEQ_SPLIT_4(x) (x) BOOST_PP_SEQ_SPLIT_3\n# define BOOST_PP_SEQ_SPLIT_5(x) (x) BOOST_PP_SEQ_SPLIT_4\n# define BOOST_PP_SEQ_SPLIT_6(x) (x) BOOST_PP_SEQ_SPLIT_5\n# define BOOST_PP_SEQ_SPLIT_7(x) (x) BOOST_PP_SEQ_SPLIT_6\n# define BOOST_PP_SEQ_SPLIT_8(x) (x) BOOST_PP_SEQ_SPLIT_7\n# define BOOST_PP_SEQ_SPLIT_9(x) (x) BOOST_PP_SEQ_SPLIT_8\n# define BOOST_PP_SEQ_SPLIT_10(x) (x) BOOST_PP_SEQ_SPLIT_9\n# define BOOST_PP_SEQ_SPLIT_11(x) (x) BOOST_PP_SEQ_SPLIT_10\n# define BOOST_PP_SEQ_SPLIT_12(x) (x) BOOST_PP_SEQ_SPLIT_11\n# define BOOST_PP_SEQ_SPLIT_13(x) (x) BOOST_PP_SEQ_SPLIT_12\n# define BOOST_PP_SEQ_SPLIT_14(x) (x) BOOST_PP_SEQ_SPLIT_13\n# define BOOST_PP_SEQ_SPLIT_15(x) (x) BOOST_PP_SEQ_SPLIT_14\n# define BOOST_PP_SEQ_SPLIT_16(x) (x) BOOST_PP_SEQ_SPLIT_15\n# define BOOST_PP_SEQ_SPLIT_17(x) (x) BOOST_PP_SEQ_SPLIT_16\n# define BOOST_PP_SEQ_SPLIT_18(x) (x) BOOST_PP_SEQ_SPLIT_17\n# define BOOST_PP_SEQ_SPLIT_19(x) (x) BOOST_PP_SEQ_SPLIT_18\n# define BOOST_PP_SEQ_SPLIT_20(x) (x) BOOST_PP_SEQ_SPLIT_19\n# define BOOST_PP_SEQ_SPLIT_21(x) (x) BOOST_PP_SEQ_SPLIT_20\n# define BOOST_PP_SEQ_SPLIT_22(x) (x) BOOST_PP_SEQ_SPLIT_21\n# define BOOST_PP_SEQ_SPLIT_23(x) (x) BOOST_PP_SEQ_SPLIT_22\n# define BOOST_PP_SEQ_SPLIT_24(x) (x) BOOST_PP_SEQ_SPLIT_23\n# define BOOST_PP_SEQ_SPLIT_25(x) (x) BOOST_PP_SEQ_SPLIT_24\n# define BOOST_PP_SEQ_SPLIT_26(x) (x) BOOST_PP_SEQ_SPLIT_25\n# define BOOST_PP_SEQ_SPLIT_27(x) (x) BOOST_PP_SEQ_SPLIT_26\n# define BOOST_PP_SEQ_SPLIT_28(x) (x) BOOST_PP_SEQ_SPLIT_27\n# define BOOST_PP_SEQ_SPLIT_29(x) (x) BOOST_PP_SEQ_SPLIT_28\n# define BOOST_PP_SEQ_SPLIT_30(x) (x) BOOST_PP_SEQ_SPLIT_29\n# define BOOST_PP_SEQ_SPLIT_31(x) (x) BOOST_PP_SEQ_SPLIT_30\n# define BOOST_PP_SEQ_SPLIT_32(x) (x) BOOST_PP_SEQ_SPLIT_31\n# define BOOST_PP_SEQ_SPLIT_33(x) (x) BOOST_PP_SEQ_SPLIT_32\n# define BOOST_PP_SEQ_SPLIT_34(x) (x) BOOST_PP_SEQ_SPLIT_33\n# define BOOST_PP_SEQ_SPLIT_35(x) (x) BOOST_PP_SEQ_SPLIT_34\n# define BOOST_PP_SEQ_SPLIT_36(x) (x) BOOST_PP_SEQ_SPLIT_35\n# define BOOST_PP_SEQ_SPLIT_37(x) (x) BOOST_PP_SEQ_SPLIT_36\n# define BOOST_PP_SEQ_SPLIT_38(x) (x) BOOST_PP_SEQ_SPLIT_37\n# define BOOST_PP_SEQ_SPLIT_39(x) (x) BOOST_PP_SEQ_SPLIT_38\n# define BOOST_PP_SEQ_SPLIT_40(x) (x) BOOST_PP_SEQ_SPLIT_39\n# define BOOST_PP_SEQ_SPLIT_41(x) (x) BOOST_PP_SEQ_SPLIT_40\n# define BOOST_PP_SEQ_SPLIT_42(x) (x) BOOST_PP_SEQ_SPLIT_41\n# define BOOST_PP_SEQ_SPLIT_43(x) (x) BOOST_PP_SEQ_SPLIT_42\n# define BOOST_PP_SEQ_SPLIT_44(x) (x) BOOST_PP_SEQ_SPLIT_43\n# define BOOST_PP_SEQ_SPLIT_45(x) (x) BOOST_PP_SEQ_SPLIT_44\n# define BOOST_PP_SEQ_SPLIT_46(x) (x) BOOST_PP_SEQ_SPLIT_45\n# define BOOST_PP_SEQ_SPLIT_47(x) (x) BOOST_PP_SEQ_SPLIT_46\n# define BOOST_PP_SEQ_SPLIT_48(x) (x) BOOST_PP_SEQ_SPLIT_47\n# define BOOST_PP_SEQ_SPLIT_49(x) (x) BOOST_PP_SEQ_SPLIT_48\n# define BOOST_PP_SEQ_SPLIT_50(x) (x) BOOST_PP_SEQ_SPLIT_49\n# define BOOST_PP_SEQ_SPLIT_51(x) (x) BOOST_PP_SEQ_SPLIT_50\n# define BOOST_PP_SEQ_SPLIT_52(x) (x) BOOST_PP_SEQ_SPLIT_51\n# define BOOST_PP_SEQ_SPLIT_53(x) (x) BOOST_PP_SEQ_SPLIT_52\n# define BOOST_PP_SEQ_SPLIT_54(x) (x) BOOST_PP_SEQ_SPLIT_53\n# define BOOST_PP_SEQ_SPLIT_55(x) (x) BOOST_PP_SEQ_SPLIT_54\n# define BOOST_PP_SEQ_SPLIT_56(x) (x) BOOST_PP_SEQ_SPLIT_55\n# define BOOST_PP_SEQ_SPLIT_57(x) (x) BOOST_PP_SEQ_SPLIT_56\n# define BOOST_PP_SEQ_SPLIT_58(x) (x) BOOST_PP_SEQ_SPLIT_57\n# define BOOST_PP_SEQ_SPLIT_59(x) (x) BOOST_PP_SEQ_SPLIT_58\n# define BOOST_PP_SEQ_SPLIT_60(x) (x) BOOST_PP_SEQ_SPLIT_59\n# define BOOST_PP_SEQ_SPLIT_61(x) (x) BOOST_PP_SEQ_SPLIT_60\n# define BOOST_PP_SEQ_SPLIT_62(x) (x) BOOST_PP_SEQ_SPLIT_61\n# define BOOST_PP_SEQ_SPLIT_63(x) (x) BOOST_PP_SEQ_SPLIT_62\n# define BOOST_PP_SEQ_SPLIT_64(x) (x) BOOST_PP_SEQ_SPLIT_63\n# define BOOST_PP_SEQ_SPLIT_65(x) (x) BOOST_PP_SEQ_SPLIT_64\n# define BOOST_PP_SEQ_SPLIT_66(x) (x) BOOST_PP_SEQ_SPLIT_65\n# define BOOST_PP_SEQ_SPLIT_67(x) (x) BOOST_PP_SEQ_SPLIT_66\n# define BOOST_PP_SEQ_SPLIT_68(x) (x) BOOST_PP_SEQ_SPLIT_67\n# define BOOST_PP_SEQ_SPLIT_69(x) (x) BOOST_PP_SEQ_SPLIT_68\n# define BOOST_PP_SEQ_SPLIT_70(x) (x) BOOST_PP_SEQ_SPLIT_69\n# define BOOST_PP_SEQ_SPLIT_71(x) (x) BOOST_PP_SEQ_SPLIT_70\n# define BOOST_PP_SEQ_SPLIT_72(x) (x) BOOST_PP_SEQ_SPLIT_71\n# define BOOST_PP_SEQ_SPLIT_73(x) (x) BOOST_PP_SEQ_SPLIT_72\n# define BOOST_PP_SEQ_SPLIT_74(x) (x) BOOST_PP_SEQ_SPLIT_73\n# define BOOST_PP_SEQ_SPLIT_75(x) (x) BOOST_PP_SEQ_SPLIT_74\n# define BOOST_PP_SEQ_SPLIT_76(x) (x) BOOST_PP_SEQ_SPLIT_75\n# define BOOST_PP_SEQ_SPLIT_77(x) (x) BOOST_PP_SEQ_SPLIT_76\n# define BOOST_PP_SEQ_SPLIT_78(x) (x) BOOST_PP_SEQ_SPLIT_77\n# define BOOST_PP_SEQ_SPLIT_79(x) (x) BOOST_PP_SEQ_SPLIT_78\n# define BOOST_PP_SEQ_SPLIT_80(x) (x) BOOST_PP_SEQ_SPLIT_79\n# define BOOST_PP_SEQ_SPLIT_81(x) (x) BOOST_PP_SEQ_SPLIT_80\n# define BOOST_PP_SEQ_SPLIT_82(x) (x) BOOST_PP_SEQ_SPLIT_81\n# define BOOST_PP_SEQ_SPLIT_83(x) (x) BOOST_PP_SEQ_SPLIT_82\n# define BOOST_PP_SEQ_SPLIT_84(x) (x) BOOST_PP_SEQ_SPLIT_83\n# define BOOST_PP_SEQ_SPLIT_85(x) (x) BOOST_PP_SEQ_SPLIT_84\n# define BOOST_PP_SEQ_SPLIT_86(x) (x) BOOST_PP_SEQ_SPLIT_85\n# define BOOST_PP_SEQ_SPLIT_87(x) (x) BOOST_PP_SEQ_SPLIT_86\n# define BOOST_PP_SEQ_SPLIT_88(x) (x) BOOST_PP_SEQ_SPLIT_87\n# define BOOST_PP_SEQ_SPLIT_89(x) (x) BOOST_PP_SEQ_SPLIT_88\n# define BOOST_PP_SEQ_SPLIT_90(x) (x) BOOST_PP_SEQ_SPLIT_89\n# define BOOST_PP_SEQ_SPLIT_91(x) (x) BOOST_PP_SEQ_SPLIT_90\n# define BOOST_PP_SEQ_SPLIT_92(x) (x) BOOST_PP_SEQ_SPLIT_91\n# define BOOST_PP_SEQ_SPLIT_93(x) (x) BOOST_PP_SEQ_SPLIT_92\n# define BOOST_PP_SEQ_SPLIT_94(x) (x) BOOST_PP_SEQ_SPLIT_93\n# define BOOST_PP_SEQ_SPLIT_95(x) (x) BOOST_PP_SEQ_SPLIT_94\n# define BOOST_PP_SEQ_SPLIT_96(x) (x) BOOST_PP_SEQ_SPLIT_95\n# define BOOST_PP_SEQ_SPLIT_97(x) (x) BOOST_PP_SEQ_SPLIT_96\n# define BOOST_PP_SEQ_SPLIT_98(x) (x) BOOST_PP_SEQ_SPLIT_97\n# define BOOST_PP_SEQ_SPLIT_99(x) (x) BOOST_PP_SEQ_SPLIT_98\n# define BOOST_PP_SEQ_SPLIT_100(x) (x) BOOST_PP_SEQ_SPLIT_99\n# define BOOST_PP_SEQ_SPLIT_101(x) (x) BOOST_PP_SEQ_SPLIT_100\n# define BOOST_PP_SEQ_SPLIT_102(x) (x) BOOST_PP_SEQ_SPLIT_101\n# define BOOST_PP_SEQ_SPLIT_103(x) (x) BOOST_PP_SEQ_SPLIT_102\n# define BOOST_PP_SEQ_SPLIT_104(x) (x) BOOST_PP_SEQ_SPLIT_103\n# define BOOST_PP_SEQ_SPLIT_105(x) (x) BOOST_PP_SEQ_SPLIT_104\n# define BOOST_PP_SEQ_SPLIT_106(x) (x) BOOST_PP_SEQ_SPLIT_105\n# define BOOST_PP_SEQ_SPLIT_107(x) (x) BOOST_PP_SEQ_SPLIT_106\n# define BOOST_PP_SEQ_SPLIT_108(x) (x) BOOST_PP_SEQ_SPLIT_107\n# define BOOST_PP_SEQ_SPLIT_109(x) (x) BOOST_PP_SEQ_SPLIT_108\n# define BOOST_PP_SEQ_SPLIT_110(x) (x) BOOST_PP_SEQ_SPLIT_109\n# define BOOST_PP_SEQ_SPLIT_111(x) (x) BOOST_PP_SEQ_SPLIT_110\n# define BOOST_PP_SEQ_SPLIT_112(x) (x) BOOST_PP_SEQ_SPLIT_111\n# define BOOST_PP_SEQ_SPLIT_113(x) (x) BOOST_PP_SEQ_SPLIT_112\n# define BOOST_PP_SEQ_SPLIT_114(x) (x) BOOST_PP_SEQ_SPLIT_113\n# define BOOST_PP_SEQ_SPLIT_115(x) (x) BOOST_PP_SEQ_SPLIT_114\n# define BOOST_PP_SEQ_SPLIT_116(x) (x) BOOST_PP_SEQ_SPLIT_115\n# define BOOST_PP_SEQ_SPLIT_117(x) (x) BOOST_PP_SEQ_SPLIT_116\n# define BOOST_PP_SEQ_SPLIT_118(x) (x) BOOST_PP_SEQ_SPLIT_117\n# define BOOST_PP_SEQ_SPLIT_119(x) (x) BOOST_PP_SEQ_SPLIT_118\n# define BOOST_PP_SEQ_SPLIT_120(x) (x) BOOST_PP_SEQ_SPLIT_119\n# define BOOST_PP_SEQ_SPLIT_121(x) (x) BOOST_PP_SEQ_SPLIT_120\n# define BOOST_PP_SEQ_SPLIT_122(x) (x) BOOST_PP_SEQ_SPLIT_121\n# define BOOST_PP_SEQ_SPLIT_123(x) (x) BOOST_PP_SEQ_SPLIT_122\n# define BOOST_PP_SEQ_SPLIT_124(x) (x) BOOST_PP_SEQ_SPLIT_123\n# define BOOST_PP_SEQ_SPLIT_125(x) (x) BOOST_PP_SEQ_SPLIT_124\n# define BOOST_PP_SEQ_SPLIT_126(x) (x) BOOST_PP_SEQ_SPLIT_125\n# define BOOST_PP_SEQ_SPLIT_127(x) (x) BOOST_PP_SEQ_SPLIT_126\n# define BOOST_PP_SEQ_SPLIT_128(x) (x) BOOST_PP_SEQ_SPLIT_127\n# define BOOST_PP_SEQ_SPLIT_129(x) (x) BOOST_PP_SEQ_SPLIT_128\n# define BOOST_PP_SEQ_SPLIT_130(x) (x) BOOST_PP_SEQ_SPLIT_129\n# define BOOST_PP_SEQ_SPLIT_131(x) (x) BOOST_PP_SEQ_SPLIT_130\n# define BOOST_PP_SEQ_SPLIT_132(x) (x) BOOST_PP_SEQ_SPLIT_131\n# define BOOST_PP_SEQ_SPLIT_133(x) (x) BOOST_PP_SEQ_SPLIT_132\n# define BOOST_PP_SEQ_SPLIT_134(x) (x) BOOST_PP_SEQ_SPLIT_133\n# define BOOST_PP_SEQ_SPLIT_135(x) (x) BOOST_PP_SEQ_SPLIT_134\n# define BOOST_PP_SEQ_SPLIT_136(x) (x) BOOST_PP_SEQ_SPLIT_135\n# define BOOST_PP_SEQ_SPLIT_137(x) (x) BOOST_PP_SEQ_SPLIT_136\n# define BOOST_PP_SEQ_SPLIT_138(x) (x) BOOST_PP_SEQ_SPLIT_137\n# define BOOST_PP_SEQ_SPLIT_139(x) (x) BOOST_PP_SEQ_SPLIT_138\n# define BOOST_PP_SEQ_SPLIT_140(x) (x) BOOST_PP_SEQ_SPLIT_139\n# define BOOST_PP_SEQ_SPLIT_141(x) (x) BOOST_PP_SEQ_SPLIT_140\n# define BOOST_PP_SEQ_SPLIT_142(x) (x) BOOST_PP_SEQ_SPLIT_141\n# define BOOST_PP_SEQ_SPLIT_143(x) (x) BOOST_PP_SEQ_SPLIT_142\n# define BOOST_PP_SEQ_SPLIT_144(x) (x) BOOST_PP_SEQ_SPLIT_143\n# define BOOST_PP_SEQ_SPLIT_145(x) (x) BOOST_PP_SEQ_SPLIT_144\n# define BOOST_PP_SEQ_SPLIT_146(x) (x) BOOST_PP_SEQ_SPLIT_145\n# define BOOST_PP_SEQ_SPLIT_147(x) (x) BOOST_PP_SEQ_SPLIT_146\n# define BOOST_PP_SEQ_SPLIT_148(x) (x) BOOST_PP_SEQ_SPLIT_147\n# define BOOST_PP_SEQ_SPLIT_149(x) (x) BOOST_PP_SEQ_SPLIT_148\n# define BOOST_PP_SEQ_SPLIT_150(x) (x) BOOST_PP_SEQ_SPLIT_149\n# define BOOST_PP_SEQ_SPLIT_151(x) (x) BOOST_PP_SEQ_SPLIT_150\n# define BOOST_PP_SEQ_SPLIT_152(x) (x) BOOST_PP_SEQ_SPLIT_151\n# define BOOST_PP_SEQ_SPLIT_153(x) (x) BOOST_PP_SEQ_SPLIT_152\n# define BOOST_PP_SEQ_SPLIT_154(x) (x) BOOST_PP_SEQ_SPLIT_153\n# define BOOST_PP_SEQ_SPLIT_155(x) (x) BOOST_PP_SEQ_SPLIT_154\n# define BOOST_PP_SEQ_SPLIT_156(x) (x) BOOST_PP_SEQ_SPLIT_155\n# define BOOST_PP_SEQ_SPLIT_157(x) (x) BOOST_PP_SEQ_SPLIT_156\n# define BOOST_PP_SEQ_SPLIT_158(x) (x) BOOST_PP_SEQ_SPLIT_157\n# define BOOST_PP_SEQ_SPLIT_159(x) (x) BOOST_PP_SEQ_SPLIT_158\n# define BOOST_PP_SEQ_SPLIT_160(x) (x) BOOST_PP_SEQ_SPLIT_159\n# define BOOST_PP_SEQ_SPLIT_161(x) (x) BOOST_PP_SEQ_SPLIT_160\n# define BOOST_PP_SEQ_SPLIT_162(x) (x) BOOST_PP_SEQ_SPLIT_161\n# define BOOST_PP_SEQ_SPLIT_163(x) (x) BOOST_PP_SEQ_SPLIT_162\n# define BOOST_PP_SEQ_SPLIT_164(x) (x) BOOST_PP_SEQ_SPLIT_163\n# define BOOST_PP_SEQ_SPLIT_165(x) (x) BOOST_PP_SEQ_SPLIT_164\n# define BOOST_PP_SEQ_SPLIT_166(x) (x) BOOST_PP_SEQ_SPLIT_165\n# define BOOST_PP_SEQ_SPLIT_167(x) (x) BOOST_PP_SEQ_SPLIT_166\n# define BOOST_PP_SEQ_SPLIT_168(x) (x) BOOST_PP_SEQ_SPLIT_167\n# define BOOST_PP_SEQ_SPLIT_169(x) (x) BOOST_PP_SEQ_SPLIT_168\n# define BOOST_PP_SEQ_SPLIT_170(x) (x) BOOST_PP_SEQ_SPLIT_169\n# define BOOST_PP_SEQ_SPLIT_171(x) (x) BOOST_PP_SEQ_SPLIT_170\n# define BOOST_PP_SEQ_SPLIT_172(x) (x) BOOST_PP_SEQ_SPLIT_171\n# define BOOST_PP_SEQ_SPLIT_173(x) (x) BOOST_PP_SEQ_SPLIT_172\n# define BOOST_PP_SEQ_SPLIT_174(x) (x) BOOST_PP_SEQ_SPLIT_173\n# define BOOST_PP_SEQ_SPLIT_175(x) (x) BOOST_PP_SEQ_SPLIT_174\n# define BOOST_PP_SEQ_SPLIT_176(x) (x) BOOST_PP_SEQ_SPLIT_175\n# define BOOST_PP_SEQ_SPLIT_177(x) (x) BOOST_PP_SEQ_SPLIT_176\n# define BOOST_PP_SEQ_SPLIT_178(x) (x) BOOST_PP_SEQ_SPLIT_177\n# define BOOST_PP_SEQ_SPLIT_179(x) (x) BOOST_PP_SEQ_SPLIT_178\n# define BOOST_PP_SEQ_SPLIT_180(x) (x) BOOST_PP_SEQ_SPLIT_179\n# define BOOST_PP_SEQ_SPLIT_181(x) (x) BOOST_PP_SEQ_SPLIT_180\n# define BOOST_PP_SEQ_SPLIT_182(x) (x) BOOST_PP_SEQ_SPLIT_181\n# define BOOST_PP_SEQ_SPLIT_183(x) (x) BOOST_PP_SEQ_SPLIT_182\n# define BOOST_PP_SEQ_SPLIT_184(x) (x) BOOST_PP_SEQ_SPLIT_183\n# define BOOST_PP_SEQ_SPLIT_185(x) (x) BOOST_PP_SEQ_SPLIT_184\n# define BOOST_PP_SEQ_SPLIT_186(x) (x) BOOST_PP_SEQ_SPLIT_185\n# define BOOST_PP_SEQ_SPLIT_187(x) (x) BOOST_PP_SEQ_SPLIT_186\n# define BOOST_PP_SEQ_SPLIT_188(x) (x) BOOST_PP_SEQ_SPLIT_187\n# define BOOST_PP_SEQ_SPLIT_189(x) (x) BOOST_PP_SEQ_SPLIT_188\n# define BOOST_PP_SEQ_SPLIT_190(x) (x) BOOST_PP_SEQ_SPLIT_189\n# define BOOST_PP_SEQ_SPLIT_191(x) (x) BOOST_PP_SEQ_SPLIT_190\n# define BOOST_PP_SEQ_SPLIT_192(x) (x) BOOST_PP_SEQ_SPLIT_191\n# define BOOST_PP_SEQ_SPLIT_193(x) (x) BOOST_PP_SEQ_SPLIT_192\n# define BOOST_PP_SEQ_SPLIT_194(x) (x) BOOST_PP_SEQ_SPLIT_193\n# define BOOST_PP_SEQ_SPLIT_195(x) (x) BOOST_PP_SEQ_SPLIT_194\n# define BOOST_PP_SEQ_SPLIT_196(x) (x) BOOST_PP_SEQ_SPLIT_195\n# define BOOST_PP_SEQ_SPLIT_197(x) (x) BOOST_PP_SEQ_SPLIT_196\n# define BOOST_PP_SEQ_SPLIT_198(x) (x) BOOST_PP_SEQ_SPLIT_197\n# define BOOST_PP_SEQ_SPLIT_199(x) (x) BOOST_PP_SEQ_SPLIT_198\n# define BOOST_PP_SEQ_SPLIT_200(x) (x) BOOST_PP_SEQ_SPLIT_199\n# define BOOST_PP_SEQ_SPLIT_201(x) (x) BOOST_PP_SEQ_SPLIT_200\n# define BOOST_PP_SEQ_SPLIT_202(x) (x) BOOST_PP_SEQ_SPLIT_201\n# define BOOST_PP_SEQ_SPLIT_203(x) (x) BOOST_PP_SEQ_SPLIT_202\n# define BOOST_PP_SEQ_SPLIT_204(x) (x) BOOST_PP_SEQ_SPLIT_203\n# define BOOST_PP_SEQ_SPLIT_205(x) (x) BOOST_PP_SEQ_SPLIT_204\n# define BOOST_PP_SEQ_SPLIT_206(x) (x) BOOST_PP_SEQ_SPLIT_205\n# define BOOST_PP_SEQ_SPLIT_207(x) (x) BOOST_PP_SEQ_SPLIT_206\n# define BOOST_PP_SEQ_SPLIT_208(x) (x) BOOST_PP_SEQ_SPLIT_207\n# define BOOST_PP_SEQ_SPLIT_209(x) (x) BOOST_PP_SEQ_SPLIT_208\n# define BOOST_PP_SEQ_SPLIT_210(x) (x) BOOST_PP_SEQ_SPLIT_209\n# define BOOST_PP_SEQ_SPLIT_211(x) (x) BOOST_PP_SEQ_SPLIT_210\n# define BOOST_PP_SEQ_SPLIT_212(x) (x) BOOST_PP_SEQ_SPLIT_211\n# define BOOST_PP_SEQ_SPLIT_213(x) (x) BOOST_PP_SEQ_SPLIT_212\n# define BOOST_PP_SEQ_SPLIT_214(x) (x) BOOST_PP_SEQ_SPLIT_213\n# define BOOST_PP_SEQ_SPLIT_215(x) (x) BOOST_PP_SEQ_SPLIT_214\n# define BOOST_PP_SEQ_SPLIT_216(x) (x) BOOST_PP_SEQ_SPLIT_215\n# define BOOST_PP_SEQ_SPLIT_217(x) (x) BOOST_PP_SEQ_SPLIT_216\n# define BOOST_PP_SEQ_SPLIT_218(x) (x) BOOST_PP_SEQ_SPLIT_217\n# define BOOST_PP_SEQ_SPLIT_219(x) (x) BOOST_PP_SEQ_SPLIT_218\n# define BOOST_PP_SEQ_SPLIT_220(x) (x) BOOST_PP_SEQ_SPLIT_219\n# define BOOST_PP_SEQ_SPLIT_221(x) (x) BOOST_PP_SEQ_SPLIT_220\n# define BOOST_PP_SEQ_SPLIT_222(x) (x) BOOST_PP_SEQ_SPLIT_221\n# define BOOST_PP_SEQ_SPLIT_223(x) (x) BOOST_PP_SEQ_SPLIT_222\n# define BOOST_PP_SEQ_SPLIT_224(x) (x) BOOST_PP_SEQ_SPLIT_223\n# define BOOST_PP_SEQ_SPLIT_225(x) (x) BOOST_PP_SEQ_SPLIT_224\n# define BOOST_PP_SEQ_SPLIT_226(x) (x) BOOST_PP_SEQ_SPLIT_225\n# define BOOST_PP_SEQ_SPLIT_227(x) (x) BOOST_PP_SEQ_SPLIT_226\n# define BOOST_PP_SEQ_SPLIT_228(x) (x) BOOST_PP_SEQ_SPLIT_227\n# define BOOST_PP_SEQ_SPLIT_229(x) (x) BOOST_PP_SEQ_SPLIT_228\n# define BOOST_PP_SEQ_SPLIT_230(x) (x) BOOST_PP_SEQ_SPLIT_229\n# define BOOST_PP_SEQ_SPLIT_231(x) (x) BOOST_PP_SEQ_SPLIT_230\n# define BOOST_PP_SEQ_SPLIT_232(x) (x) BOOST_PP_SEQ_SPLIT_231\n# define BOOST_PP_SEQ_SPLIT_233(x) (x) BOOST_PP_SEQ_SPLIT_232\n# define BOOST_PP_SEQ_SPLIT_234(x) (x) BOOST_PP_SEQ_SPLIT_233\n# define BOOST_PP_SEQ_SPLIT_235(x) (x) BOOST_PP_SEQ_SPLIT_234\n# define BOOST_PP_SEQ_SPLIT_236(x) (x) BOOST_PP_SEQ_SPLIT_235\n# define BOOST_PP_SEQ_SPLIT_237(x) (x) BOOST_PP_SEQ_SPLIT_236\n# define BOOST_PP_SEQ_SPLIT_238(x) (x) BOOST_PP_SEQ_SPLIT_237\n# define BOOST_PP_SEQ_SPLIT_239(x) (x) BOOST_PP_SEQ_SPLIT_238\n# define BOOST_PP_SEQ_SPLIT_240(x) (x) BOOST_PP_SEQ_SPLIT_239\n# define BOOST_PP_SEQ_SPLIT_241(x) (x) BOOST_PP_SEQ_SPLIT_240\n# define BOOST_PP_SEQ_SPLIT_242(x) (x) BOOST_PP_SEQ_SPLIT_241\n# define BOOST_PP_SEQ_SPLIT_243(x) (x) BOOST_PP_SEQ_SPLIT_242\n# define BOOST_PP_SEQ_SPLIT_244(x) (x) BOOST_PP_SEQ_SPLIT_243\n# define BOOST_PP_SEQ_SPLIT_245(x) (x) BOOST_PP_SEQ_SPLIT_244\n# define BOOST_PP_SEQ_SPLIT_246(x) (x) BOOST_PP_SEQ_SPLIT_245\n# define BOOST_PP_SEQ_SPLIT_247(x) (x) BOOST_PP_SEQ_SPLIT_246\n# define BOOST_PP_SEQ_SPLIT_248(x) (x) BOOST_PP_SEQ_SPLIT_247\n# define BOOST_PP_SEQ_SPLIT_249(x) (x) BOOST_PP_SEQ_SPLIT_248\n# define BOOST_PP_SEQ_SPLIT_250(x) (x) BOOST_PP_SEQ_SPLIT_249\n# define BOOST_PP_SEQ_SPLIT_251(x) (x) BOOST_PP_SEQ_SPLIT_250\n# define BOOST_PP_SEQ_SPLIT_252(x) (x) BOOST_PP_SEQ_SPLIT_251\n# define BOOST_PP_SEQ_SPLIT_253(x) (x) BOOST_PP_SEQ_SPLIT_252\n# define BOOST_PP_SEQ_SPLIT_254(x) (x) BOOST_PP_SEQ_SPLIT_253\n# define BOOST_PP_SEQ_SPLIT_255(x) (x) BOOST_PP_SEQ_SPLIT_254\n# define BOOST_PP_SEQ_SPLIT_256(x) (x) BOOST_PP_SEQ_SPLIT_255\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/elem.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_ELEM_HPP\n# define BOOST_PREPROCESSOR_SEQ_ELEM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/facilities/empty.hpp>\n#\n# /* BOOST_PP_SEQ_ELEM */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_ELEM(i, seq) BOOST_PP_SEQ_ELEM_I(i, seq)\n# else\n#    define BOOST_PP_SEQ_ELEM(i, seq) BOOST_PP_SEQ_ELEM_I((i, seq))\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_SEQ_ELEM_I(i, seq) BOOST_PP_SEQ_ELEM_II((BOOST_PP_SEQ_ELEM_ ## i seq))\n#    define BOOST_PP_SEQ_ELEM_II(res) BOOST_PP_SEQ_ELEM_IV(BOOST_PP_SEQ_ELEM_III res)\n#    define BOOST_PP_SEQ_ELEM_III(x, _) x BOOST_PP_EMPTY()\n#    define BOOST_PP_SEQ_ELEM_IV(x) x\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_ELEM_I(par) BOOST_PP_SEQ_ELEM_II ## par\n#    define BOOST_PP_SEQ_ELEM_II(i, seq) BOOST_PP_SEQ_ELEM_III(BOOST_PP_SEQ_ELEM_ ## i ## seq)\n#    define BOOST_PP_SEQ_ELEM_III(im) BOOST_PP_SEQ_ELEM_IV(im)\n#    define BOOST_PP_SEQ_ELEM_IV(x, _) x\n# else\n#    if defined(__IBMC__) || defined(__IBMCPP__)\n#        define BOOST_PP_SEQ_ELEM_I(i, seq) BOOST_PP_SEQ_ELEM_II(BOOST_PP_CAT(BOOST_PP_SEQ_ELEM_ ## i, seq))\n#    else\n#        define BOOST_PP_SEQ_ELEM_I(i, seq) BOOST_PP_SEQ_ELEM_II(BOOST_PP_SEQ_ELEM_ ## i seq)\n#    endif\n#    define BOOST_PP_SEQ_ELEM_II(im) BOOST_PP_SEQ_ELEM_III(im)\n#    define BOOST_PP_SEQ_ELEM_III(x, _) x\n# endif\n#\n# define BOOST_PP_SEQ_ELEM_0(x) x, BOOST_PP_NIL\n# define BOOST_PP_SEQ_ELEM_1(_) BOOST_PP_SEQ_ELEM_0\n# define BOOST_PP_SEQ_ELEM_2(_) BOOST_PP_SEQ_ELEM_1\n# define BOOST_PP_SEQ_ELEM_3(_) BOOST_PP_SEQ_ELEM_2\n# define BOOST_PP_SEQ_ELEM_4(_) BOOST_PP_SEQ_ELEM_3\n# define BOOST_PP_SEQ_ELEM_5(_) BOOST_PP_SEQ_ELEM_4\n# define BOOST_PP_SEQ_ELEM_6(_) BOOST_PP_SEQ_ELEM_5\n# define BOOST_PP_SEQ_ELEM_7(_) BOOST_PP_SEQ_ELEM_6\n# define BOOST_PP_SEQ_ELEM_8(_) BOOST_PP_SEQ_ELEM_7\n# define BOOST_PP_SEQ_ELEM_9(_) BOOST_PP_SEQ_ELEM_8\n# define BOOST_PP_SEQ_ELEM_10(_) BOOST_PP_SEQ_ELEM_9\n# define BOOST_PP_SEQ_ELEM_11(_) BOOST_PP_SEQ_ELEM_10\n# define BOOST_PP_SEQ_ELEM_12(_) BOOST_PP_SEQ_ELEM_11\n# define BOOST_PP_SEQ_ELEM_13(_) BOOST_PP_SEQ_ELEM_12\n# define BOOST_PP_SEQ_ELEM_14(_) BOOST_PP_SEQ_ELEM_13\n# define BOOST_PP_SEQ_ELEM_15(_) BOOST_PP_SEQ_ELEM_14\n# define BOOST_PP_SEQ_ELEM_16(_) BOOST_PP_SEQ_ELEM_15\n# define BOOST_PP_SEQ_ELEM_17(_) BOOST_PP_SEQ_ELEM_16\n# define BOOST_PP_SEQ_ELEM_18(_) BOOST_PP_SEQ_ELEM_17\n# define BOOST_PP_SEQ_ELEM_19(_) BOOST_PP_SEQ_ELEM_18\n# define BOOST_PP_SEQ_ELEM_20(_) BOOST_PP_SEQ_ELEM_19\n# define BOOST_PP_SEQ_ELEM_21(_) BOOST_PP_SEQ_ELEM_20\n# define BOOST_PP_SEQ_ELEM_22(_) BOOST_PP_SEQ_ELEM_21\n# define BOOST_PP_SEQ_ELEM_23(_) BOOST_PP_SEQ_ELEM_22\n# define BOOST_PP_SEQ_ELEM_24(_) BOOST_PP_SEQ_ELEM_23\n# define BOOST_PP_SEQ_ELEM_25(_) BOOST_PP_SEQ_ELEM_24\n# define BOOST_PP_SEQ_ELEM_26(_) BOOST_PP_SEQ_ELEM_25\n# define BOOST_PP_SEQ_ELEM_27(_) BOOST_PP_SEQ_ELEM_26\n# define BOOST_PP_SEQ_ELEM_28(_) BOOST_PP_SEQ_ELEM_27\n# define BOOST_PP_SEQ_ELEM_29(_) BOOST_PP_SEQ_ELEM_28\n# define BOOST_PP_SEQ_ELEM_30(_) BOOST_PP_SEQ_ELEM_29\n# define BOOST_PP_SEQ_ELEM_31(_) BOOST_PP_SEQ_ELEM_30\n# define BOOST_PP_SEQ_ELEM_32(_) BOOST_PP_SEQ_ELEM_31\n# define BOOST_PP_SEQ_ELEM_33(_) BOOST_PP_SEQ_ELEM_32\n# define BOOST_PP_SEQ_ELEM_34(_) BOOST_PP_SEQ_ELEM_33\n# define BOOST_PP_SEQ_ELEM_35(_) BOOST_PP_SEQ_ELEM_34\n# define BOOST_PP_SEQ_ELEM_36(_) BOOST_PP_SEQ_ELEM_35\n# define BOOST_PP_SEQ_ELEM_37(_) BOOST_PP_SEQ_ELEM_36\n# define BOOST_PP_SEQ_ELEM_38(_) BOOST_PP_SEQ_ELEM_37\n# define BOOST_PP_SEQ_ELEM_39(_) BOOST_PP_SEQ_ELEM_38\n# define BOOST_PP_SEQ_ELEM_40(_) BOOST_PP_SEQ_ELEM_39\n# define BOOST_PP_SEQ_ELEM_41(_) BOOST_PP_SEQ_ELEM_40\n# define BOOST_PP_SEQ_ELEM_42(_) BOOST_PP_SEQ_ELEM_41\n# define BOOST_PP_SEQ_ELEM_43(_) BOOST_PP_SEQ_ELEM_42\n# define BOOST_PP_SEQ_ELEM_44(_) BOOST_PP_SEQ_ELEM_43\n# define BOOST_PP_SEQ_ELEM_45(_) BOOST_PP_SEQ_ELEM_44\n# define BOOST_PP_SEQ_ELEM_46(_) BOOST_PP_SEQ_ELEM_45\n# define BOOST_PP_SEQ_ELEM_47(_) BOOST_PP_SEQ_ELEM_46\n# define BOOST_PP_SEQ_ELEM_48(_) BOOST_PP_SEQ_ELEM_47\n# define BOOST_PP_SEQ_ELEM_49(_) BOOST_PP_SEQ_ELEM_48\n# define BOOST_PP_SEQ_ELEM_50(_) BOOST_PP_SEQ_ELEM_49\n# define BOOST_PP_SEQ_ELEM_51(_) BOOST_PP_SEQ_ELEM_50\n# define BOOST_PP_SEQ_ELEM_52(_) BOOST_PP_SEQ_ELEM_51\n# define BOOST_PP_SEQ_ELEM_53(_) BOOST_PP_SEQ_ELEM_52\n# define BOOST_PP_SEQ_ELEM_54(_) BOOST_PP_SEQ_ELEM_53\n# define BOOST_PP_SEQ_ELEM_55(_) BOOST_PP_SEQ_ELEM_54\n# define BOOST_PP_SEQ_ELEM_56(_) BOOST_PP_SEQ_ELEM_55\n# define BOOST_PP_SEQ_ELEM_57(_) BOOST_PP_SEQ_ELEM_56\n# define BOOST_PP_SEQ_ELEM_58(_) BOOST_PP_SEQ_ELEM_57\n# define BOOST_PP_SEQ_ELEM_59(_) BOOST_PP_SEQ_ELEM_58\n# define BOOST_PP_SEQ_ELEM_60(_) BOOST_PP_SEQ_ELEM_59\n# define BOOST_PP_SEQ_ELEM_61(_) BOOST_PP_SEQ_ELEM_60\n# define BOOST_PP_SEQ_ELEM_62(_) BOOST_PP_SEQ_ELEM_61\n# define BOOST_PP_SEQ_ELEM_63(_) BOOST_PP_SEQ_ELEM_62\n# define BOOST_PP_SEQ_ELEM_64(_) BOOST_PP_SEQ_ELEM_63\n# define BOOST_PP_SEQ_ELEM_65(_) BOOST_PP_SEQ_ELEM_64\n# define BOOST_PP_SEQ_ELEM_66(_) BOOST_PP_SEQ_ELEM_65\n# define BOOST_PP_SEQ_ELEM_67(_) BOOST_PP_SEQ_ELEM_66\n# define BOOST_PP_SEQ_ELEM_68(_) BOOST_PP_SEQ_ELEM_67\n# define BOOST_PP_SEQ_ELEM_69(_) BOOST_PP_SEQ_ELEM_68\n# define BOOST_PP_SEQ_ELEM_70(_) BOOST_PP_SEQ_ELEM_69\n# define BOOST_PP_SEQ_ELEM_71(_) BOOST_PP_SEQ_ELEM_70\n# define BOOST_PP_SEQ_ELEM_72(_) BOOST_PP_SEQ_ELEM_71\n# define BOOST_PP_SEQ_ELEM_73(_) BOOST_PP_SEQ_ELEM_72\n# define BOOST_PP_SEQ_ELEM_74(_) BOOST_PP_SEQ_ELEM_73\n# define BOOST_PP_SEQ_ELEM_75(_) BOOST_PP_SEQ_ELEM_74\n# define BOOST_PP_SEQ_ELEM_76(_) BOOST_PP_SEQ_ELEM_75\n# define BOOST_PP_SEQ_ELEM_77(_) BOOST_PP_SEQ_ELEM_76\n# define BOOST_PP_SEQ_ELEM_78(_) BOOST_PP_SEQ_ELEM_77\n# define BOOST_PP_SEQ_ELEM_79(_) BOOST_PP_SEQ_ELEM_78\n# define BOOST_PP_SEQ_ELEM_80(_) BOOST_PP_SEQ_ELEM_79\n# define BOOST_PP_SEQ_ELEM_81(_) BOOST_PP_SEQ_ELEM_80\n# define BOOST_PP_SEQ_ELEM_82(_) BOOST_PP_SEQ_ELEM_81\n# define BOOST_PP_SEQ_ELEM_83(_) BOOST_PP_SEQ_ELEM_82\n# define BOOST_PP_SEQ_ELEM_84(_) BOOST_PP_SEQ_ELEM_83\n# define BOOST_PP_SEQ_ELEM_85(_) BOOST_PP_SEQ_ELEM_84\n# define BOOST_PP_SEQ_ELEM_86(_) BOOST_PP_SEQ_ELEM_85\n# define BOOST_PP_SEQ_ELEM_87(_) BOOST_PP_SEQ_ELEM_86\n# define BOOST_PP_SEQ_ELEM_88(_) BOOST_PP_SEQ_ELEM_87\n# define BOOST_PP_SEQ_ELEM_89(_) BOOST_PP_SEQ_ELEM_88\n# define BOOST_PP_SEQ_ELEM_90(_) BOOST_PP_SEQ_ELEM_89\n# define BOOST_PP_SEQ_ELEM_91(_) BOOST_PP_SEQ_ELEM_90\n# define BOOST_PP_SEQ_ELEM_92(_) BOOST_PP_SEQ_ELEM_91\n# define BOOST_PP_SEQ_ELEM_93(_) BOOST_PP_SEQ_ELEM_92\n# define BOOST_PP_SEQ_ELEM_94(_) BOOST_PP_SEQ_ELEM_93\n# define BOOST_PP_SEQ_ELEM_95(_) BOOST_PP_SEQ_ELEM_94\n# define BOOST_PP_SEQ_ELEM_96(_) BOOST_PP_SEQ_ELEM_95\n# define BOOST_PP_SEQ_ELEM_97(_) BOOST_PP_SEQ_ELEM_96\n# define BOOST_PP_SEQ_ELEM_98(_) BOOST_PP_SEQ_ELEM_97\n# define BOOST_PP_SEQ_ELEM_99(_) BOOST_PP_SEQ_ELEM_98\n# define BOOST_PP_SEQ_ELEM_100(_) BOOST_PP_SEQ_ELEM_99\n# define BOOST_PP_SEQ_ELEM_101(_) BOOST_PP_SEQ_ELEM_100\n# define BOOST_PP_SEQ_ELEM_102(_) BOOST_PP_SEQ_ELEM_101\n# define BOOST_PP_SEQ_ELEM_103(_) BOOST_PP_SEQ_ELEM_102\n# define BOOST_PP_SEQ_ELEM_104(_) BOOST_PP_SEQ_ELEM_103\n# define BOOST_PP_SEQ_ELEM_105(_) BOOST_PP_SEQ_ELEM_104\n# define BOOST_PP_SEQ_ELEM_106(_) BOOST_PP_SEQ_ELEM_105\n# define BOOST_PP_SEQ_ELEM_107(_) BOOST_PP_SEQ_ELEM_106\n# define BOOST_PP_SEQ_ELEM_108(_) BOOST_PP_SEQ_ELEM_107\n# define BOOST_PP_SEQ_ELEM_109(_) BOOST_PP_SEQ_ELEM_108\n# define BOOST_PP_SEQ_ELEM_110(_) BOOST_PP_SEQ_ELEM_109\n# define BOOST_PP_SEQ_ELEM_111(_) BOOST_PP_SEQ_ELEM_110\n# define BOOST_PP_SEQ_ELEM_112(_) BOOST_PP_SEQ_ELEM_111\n# define BOOST_PP_SEQ_ELEM_113(_) BOOST_PP_SEQ_ELEM_112\n# define BOOST_PP_SEQ_ELEM_114(_) BOOST_PP_SEQ_ELEM_113\n# define BOOST_PP_SEQ_ELEM_115(_) BOOST_PP_SEQ_ELEM_114\n# define BOOST_PP_SEQ_ELEM_116(_) BOOST_PP_SEQ_ELEM_115\n# define BOOST_PP_SEQ_ELEM_117(_) BOOST_PP_SEQ_ELEM_116\n# define BOOST_PP_SEQ_ELEM_118(_) BOOST_PP_SEQ_ELEM_117\n# define BOOST_PP_SEQ_ELEM_119(_) BOOST_PP_SEQ_ELEM_118\n# define BOOST_PP_SEQ_ELEM_120(_) BOOST_PP_SEQ_ELEM_119\n# define BOOST_PP_SEQ_ELEM_121(_) BOOST_PP_SEQ_ELEM_120\n# define BOOST_PP_SEQ_ELEM_122(_) BOOST_PP_SEQ_ELEM_121\n# define BOOST_PP_SEQ_ELEM_123(_) BOOST_PP_SEQ_ELEM_122\n# define BOOST_PP_SEQ_ELEM_124(_) BOOST_PP_SEQ_ELEM_123\n# define BOOST_PP_SEQ_ELEM_125(_) BOOST_PP_SEQ_ELEM_124\n# define BOOST_PP_SEQ_ELEM_126(_) BOOST_PP_SEQ_ELEM_125\n# define BOOST_PP_SEQ_ELEM_127(_) BOOST_PP_SEQ_ELEM_126\n# define BOOST_PP_SEQ_ELEM_128(_) BOOST_PP_SEQ_ELEM_127\n# define BOOST_PP_SEQ_ELEM_129(_) BOOST_PP_SEQ_ELEM_128\n# define BOOST_PP_SEQ_ELEM_130(_) BOOST_PP_SEQ_ELEM_129\n# define BOOST_PP_SEQ_ELEM_131(_) BOOST_PP_SEQ_ELEM_130\n# define BOOST_PP_SEQ_ELEM_132(_) BOOST_PP_SEQ_ELEM_131\n# define BOOST_PP_SEQ_ELEM_133(_) BOOST_PP_SEQ_ELEM_132\n# define BOOST_PP_SEQ_ELEM_134(_) BOOST_PP_SEQ_ELEM_133\n# define BOOST_PP_SEQ_ELEM_135(_) BOOST_PP_SEQ_ELEM_134\n# define BOOST_PP_SEQ_ELEM_136(_) BOOST_PP_SEQ_ELEM_135\n# define BOOST_PP_SEQ_ELEM_137(_) BOOST_PP_SEQ_ELEM_136\n# define BOOST_PP_SEQ_ELEM_138(_) BOOST_PP_SEQ_ELEM_137\n# define BOOST_PP_SEQ_ELEM_139(_) BOOST_PP_SEQ_ELEM_138\n# define BOOST_PP_SEQ_ELEM_140(_) BOOST_PP_SEQ_ELEM_139\n# define BOOST_PP_SEQ_ELEM_141(_) BOOST_PP_SEQ_ELEM_140\n# define BOOST_PP_SEQ_ELEM_142(_) BOOST_PP_SEQ_ELEM_141\n# define BOOST_PP_SEQ_ELEM_143(_) BOOST_PP_SEQ_ELEM_142\n# define BOOST_PP_SEQ_ELEM_144(_) BOOST_PP_SEQ_ELEM_143\n# define BOOST_PP_SEQ_ELEM_145(_) BOOST_PP_SEQ_ELEM_144\n# define BOOST_PP_SEQ_ELEM_146(_) BOOST_PP_SEQ_ELEM_145\n# define BOOST_PP_SEQ_ELEM_147(_) BOOST_PP_SEQ_ELEM_146\n# define BOOST_PP_SEQ_ELEM_148(_) BOOST_PP_SEQ_ELEM_147\n# define BOOST_PP_SEQ_ELEM_149(_) BOOST_PP_SEQ_ELEM_148\n# define BOOST_PP_SEQ_ELEM_150(_) BOOST_PP_SEQ_ELEM_149\n# define BOOST_PP_SEQ_ELEM_151(_) BOOST_PP_SEQ_ELEM_150\n# define BOOST_PP_SEQ_ELEM_152(_) BOOST_PP_SEQ_ELEM_151\n# define BOOST_PP_SEQ_ELEM_153(_) BOOST_PP_SEQ_ELEM_152\n# define BOOST_PP_SEQ_ELEM_154(_) BOOST_PP_SEQ_ELEM_153\n# define BOOST_PP_SEQ_ELEM_155(_) BOOST_PP_SEQ_ELEM_154\n# define BOOST_PP_SEQ_ELEM_156(_) BOOST_PP_SEQ_ELEM_155\n# define BOOST_PP_SEQ_ELEM_157(_) BOOST_PP_SEQ_ELEM_156\n# define BOOST_PP_SEQ_ELEM_158(_) BOOST_PP_SEQ_ELEM_157\n# define BOOST_PP_SEQ_ELEM_159(_) BOOST_PP_SEQ_ELEM_158\n# define BOOST_PP_SEQ_ELEM_160(_) BOOST_PP_SEQ_ELEM_159\n# define BOOST_PP_SEQ_ELEM_161(_) BOOST_PP_SEQ_ELEM_160\n# define BOOST_PP_SEQ_ELEM_162(_) BOOST_PP_SEQ_ELEM_161\n# define BOOST_PP_SEQ_ELEM_163(_) BOOST_PP_SEQ_ELEM_162\n# define BOOST_PP_SEQ_ELEM_164(_) BOOST_PP_SEQ_ELEM_163\n# define BOOST_PP_SEQ_ELEM_165(_) BOOST_PP_SEQ_ELEM_164\n# define BOOST_PP_SEQ_ELEM_166(_) BOOST_PP_SEQ_ELEM_165\n# define BOOST_PP_SEQ_ELEM_167(_) BOOST_PP_SEQ_ELEM_166\n# define BOOST_PP_SEQ_ELEM_168(_) BOOST_PP_SEQ_ELEM_167\n# define BOOST_PP_SEQ_ELEM_169(_) BOOST_PP_SEQ_ELEM_168\n# define BOOST_PP_SEQ_ELEM_170(_) BOOST_PP_SEQ_ELEM_169\n# define BOOST_PP_SEQ_ELEM_171(_) BOOST_PP_SEQ_ELEM_170\n# define BOOST_PP_SEQ_ELEM_172(_) BOOST_PP_SEQ_ELEM_171\n# define BOOST_PP_SEQ_ELEM_173(_) BOOST_PP_SEQ_ELEM_172\n# define BOOST_PP_SEQ_ELEM_174(_) BOOST_PP_SEQ_ELEM_173\n# define BOOST_PP_SEQ_ELEM_175(_) BOOST_PP_SEQ_ELEM_174\n# define BOOST_PP_SEQ_ELEM_176(_) BOOST_PP_SEQ_ELEM_175\n# define BOOST_PP_SEQ_ELEM_177(_) BOOST_PP_SEQ_ELEM_176\n# define BOOST_PP_SEQ_ELEM_178(_) BOOST_PP_SEQ_ELEM_177\n# define BOOST_PP_SEQ_ELEM_179(_) BOOST_PP_SEQ_ELEM_178\n# define BOOST_PP_SEQ_ELEM_180(_) BOOST_PP_SEQ_ELEM_179\n# define BOOST_PP_SEQ_ELEM_181(_) BOOST_PP_SEQ_ELEM_180\n# define BOOST_PP_SEQ_ELEM_182(_) BOOST_PP_SEQ_ELEM_181\n# define BOOST_PP_SEQ_ELEM_183(_) BOOST_PP_SEQ_ELEM_182\n# define BOOST_PP_SEQ_ELEM_184(_) BOOST_PP_SEQ_ELEM_183\n# define BOOST_PP_SEQ_ELEM_185(_) BOOST_PP_SEQ_ELEM_184\n# define BOOST_PP_SEQ_ELEM_186(_) BOOST_PP_SEQ_ELEM_185\n# define BOOST_PP_SEQ_ELEM_187(_) BOOST_PP_SEQ_ELEM_186\n# define BOOST_PP_SEQ_ELEM_188(_) BOOST_PP_SEQ_ELEM_187\n# define BOOST_PP_SEQ_ELEM_189(_) BOOST_PP_SEQ_ELEM_188\n# define BOOST_PP_SEQ_ELEM_190(_) BOOST_PP_SEQ_ELEM_189\n# define BOOST_PP_SEQ_ELEM_191(_) BOOST_PP_SEQ_ELEM_190\n# define BOOST_PP_SEQ_ELEM_192(_) BOOST_PP_SEQ_ELEM_191\n# define BOOST_PP_SEQ_ELEM_193(_) BOOST_PP_SEQ_ELEM_192\n# define BOOST_PP_SEQ_ELEM_194(_) BOOST_PP_SEQ_ELEM_193\n# define BOOST_PP_SEQ_ELEM_195(_) BOOST_PP_SEQ_ELEM_194\n# define BOOST_PP_SEQ_ELEM_196(_) BOOST_PP_SEQ_ELEM_195\n# define BOOST_PP_SEQ_ELEM_197(_) BOOST_PP_SEQ_ELEM_196\n# define BOOST_PP_SEQ_ELEM_198(_) BOOST_PP_SEQ_ELEM_197\n# define BOOST_PP_SEQ_ELEM_199(_) BOOST_PP_SEQ_ELEM_198\n# define BOOST_PP_SEQ_ELEM_200(_) BOOST_PP_SEQ_ELEM_199\n# define BOOST_PP_SEQ_ELEM_201(_) BOOST_PP_SEQ_ELEM_200\n# define BOOST_PP_SEQ_ELEM_202(_) BOOST_PP_SEQ_ELEM_201\n# define BOOST_PP_SEQ_ELEM_203(_) BOOST_PP_SEQ_ELEM_202\n# define BOOST_PP_SEQ_ELEM_204(_) BOOST_PP_SEQ_ELEM_203\n# define BOOST_PP_SEQ_ELEM_205(_) BOOST_PP_SEQ_ELEM_204\n# define BOOST_PP_SEQ_ELEM_206(_) BOOST_PP_SEQ_ELEM_205\n# define BOOST_PP_SEQ_ELEM_207(_) BOOST_PP_SEQ_ELEM_206\n# define BOOST_PP_SEQ_ELEM_208(_) BOOST_PP_SEQ_ELEM_207\n# define BOOST_PP_SEQ_ELEM_209(_) BOOST_PP_SEQ_ELEM_208\n# define BOOST_PP_SEQ_ELEM_210(_) BOOST_PP_SEQ_ELEM_209\n# define BOOST_PP_SEQ_ELEM_211(_) BOOST_PP_SEQ_ELEM_210\n# define BOOST_PP_SEQ_ELEM_212(_) BOOST_PP_SEQ_ELEM_211\n# define BOOST_PP_SEQ_ELEM_213(_) BOOST_PP_SEQ_ELEM_212\n# define BOOST_PP_SEQ_ELEM_214(_) BOOST_PP_SEQ_ELEM_213\n# define BOOST_PP_SEQ_ELEM_215(_) BOOST_PP_SEQ_ELEM_214\n# define BOOST_PP_SEQ_ELEM_216(_) BOOST_PP_SEQ_ELEM_215\n# define BOOST_PP_SEQ_ELEM_217(_) BOOST_PP_SEQ_ELEM_216\n# define BOOST_PP_SEQ_ELEM_218(_) BOOST_PP_SEQ_ELEM_217\n# define BOOST_PP_SEQ_ELEM_219(_) BOOST_PP_SEQ_ELEM_218\n# define BOOST_PP_SEQ_ELEM_220(_) BOOST_PP_SEQ_ELEM_219\n# define BOOST_PP_SEQ_ELEM_221(_) BOOST_PP_SEQ_ELEM_220\n# define BOOST_PP_SEQ_ELEM_222(_) BOOST_PP_SEQ_ELEM_221\n# define BOOST_PP_SEQ_ELEM_223(_) BOOST_PP_SEQ_ELEM_222\n# define BOOST_PP_SEQ_ELEM_224(_) BOOST_PP_SEQ_ELEM_223\n# define BOOST_PP_SEQ_ELEM_225(_) BOOST_PP_SEQ_ELEM_224\n# define BOOST_PP_SEQ_ELEM_226(_) BOOST_PP_SEQ_ELEM_225\n# define BOOST_PP_SEQ_ELEM_227(_) BOOST_PP_SEQ_ELEM_226\n# define BOOST_PP_SEQ_ELEM_228(_) BOOST_PP_SEQ_ELEM_227\n# define BOOST_PP_SEQ_ELEM_229(_) BOOST_PP_SEQ_ELEM_228\n# define BOOST_PP_SEQ_ELEM_230(_) BOOST_PP_SEQ_ELEM_229\n# define BOOST_PP_SEQ_ELEM_231(_) BOOST_PP_SEQ_ELEM_230\n# define BOOST_PP_SEQ_ELEM_232(_) BOOST_PP_SEQ_ELEM_231\n# define BOOST_PP_SEQ_ELEM_233(_) BOOST_PP_SEQ_ELEM_232\n# define BOOST_PP_SEQ_ELEM_234(_) BOOST_PP_SEQ_ELEM_233\n# define BOOST_PP_SEQ_ELEM_235(_) BOOST_PP_SEQ_ELEM_234\n# define BOOST_PP_SEQ_ELEM_236(_) BOOST_PP_SEQ_ELEM_235\n# define BOOST_PP_SEQ_ELEM_237(_) BOOST_PP_SEQ_ELEM_236\n# define BOOST_PP_SEQ_ELEM_238(_) BOOST_PP_SEQ_ELEM_237\n# define BOOST_PP_SEQ_ELEM_239(_) BOOST_PP_SEQ_ELEM_238\n# define BOOST_PP_SEQ_ELEM_240(_) BOOST_PP_SEQ_ELEM_239\n# define BOOST_PP_SEQ_ELEM_241(_) BOOST_PP_SEQ_ELEM_240\n# define BOOST_PP_SEQ_ELEM_242(_) BOOST_PP_SEQ_ELEM_241\n# define BOOST_PP_SEQ_ELEM_243(_) BOOST_PP_SEQ_ELEM_242\n# define BOOST_PP_SEQ_ELEM_244(_) BOOST_PP_SEQ_ELEM_243\n# define BOOST_PP_SEQ_ELEM_245(_) BOOST_PP_SEQ_ELEM_244\n# define BOOST_PP_SEQ_ELEM_246(_) BOOST_PP_SEQ_ELEM_245\n# define BOOST_PP_SEQ_ELEM_247(_) BOOST_PP_SEQ_ELEM_246\n# define BOOST_PP_SEQ_ELEM_248(_) BOOST_PP_SEQ_ELEM_247\n# define BOOST_PP_SEQ_ELEM_249(_) BOOST_PP_SEQ_ELEM_248\n# define BOOST_PP_SEQ_ELEM_250(_) BOOST_PP_SEQ_ELEM_249\n# define BOOST_PP_SEQ_ELEM_251(_) BOOST_PP_SEQ_ELEM_250\n# define BOOST_PP_SEQ_ELEM_252(_) BOOST_PP_SEQ_ELEM_251\n# define BOOST_PP_SEQ_ELEM_253(_) BOOST_PP_SEQ_ELEM_252\n# define BOOST_PP_SEQ_ELEM_254(_) BOOST_PP_SEQ_ELEM_253\n# define BOOST_PP_SEQ_ELEM_255(_) BOOST_PP_SEQ_ELEM_254\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/enum.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_ENUM_HPP\n# define BOOST_PREPROCESSOR_SEQ_ENUM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n#\n# /* BOOST_PP_SEQ_ENUM */\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_ENUM(seq) BOOST_PP_SEQ_ENUM_I(seq)\n#    define BOOST_PP_SEQ_ENUM_I(seq) BOOST_PP_CAT(BOOST_PP_SEQ_ENUM_, BOOST_PP_SEQ_SIZE(seq)) seq\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_ENUM(seq) BOOST_PP_SEQ_ENUM_I(BOOST_PP_SEQ_SIZE(seq), seq)\n#    define BOOST_PP_SEQ_ENUM_I(size, seq) BOOST_PP_CAT(BOOST_PP_SEQ_ENUM_, size) seq\n# else\n#    define BOOST_PP_SEQ_ENUM(seq) BOOST_PP_CAT(BOOST_PP_SEQ_ENUM_, BOOST_PP_SEQ_SIZE(seq)) seq\n# endif\n#\n# define BOOST_PP_SEQ_ENUM_1(x) x\n# define BOOST_PP_SEQ_ENUM_2(x) x, BOOST_PP_SEQ_ENUM_1\n# define BOOST_PP_SEQ_ENUM_3(x) x, BOOST_PP_SEQ_ENUM_2\n# define BOOST_PP_SEQ_ENUM_4(x) x, BOOST_PP_SEQ_ENUM_3\n# define BOOST_PP_SEQ_ENUM_5(x) x, BOOST_PP_SEQ_ENUM_4\n# define BOOST_PP_SEQ_ENUM_6(x) x, BOOST_PP_SEQ_ENUM_5\n# define BOOST_PP_SEQ_ENUM_7(x) x, BOOST_PP_SEQ_ENUM_6\n# define BOOST_PP_SEQ_ENUM_8(x) x, BOOST_PP_SEQ_ENUM_7\n# define BOOST_PP_SEQ_ENUM_9(x) x, BOOST_PP_SEQ_ENUM_8\n# define BOOST_PP_SEQ_ENUM_10(x) x, BOOST_PP_SEQ_ENUM_9\n# define BOOST_PP_SEQ_ENUM_11(x) x, BOOST_PP_SEQ_ENUM_10\n# define BOOST_PP_SEQ_ENUM_12(x) x, BOOST_PP_SEQ_ENUM_11\n# define BOOST_PP_SEQ_ENUM_13(x) x, BOOST_PP_SEQ_ENUM_12\n# define BOOST_PP_SEQ_ENUM_14(x) x, BOOST_PP_SEQ_ENUM_13\n# define BOOST_PP_SEQ_ENUM_15(x) x, BOOST_PP_SEQ_ENUM_14\n# define BOOST_PP_SEQ_ENUM_16(x) x, BOOST_PP_SEQ_ENUM_15\n# define BOOST_PP_SEQ_ENUM_17(x) x, BOOST_PP_SEQ_ENUM_16\n# define BOOST_PP_SEQ_ENUM_18(x) x, BOOST_PP_SEQ_ENUM_17\n# define BOOST_PP_SEQ_ENUM_19(x) x, BOOST_PP_SEQ_ENUM_18\n# define BOOST_PP_SEQ_ENUM_20(x) x, BOOST_PP_SEQ_ENUM_19\n# define BOOST_PP_SEQ_ENUM_21(x) x, BOOST_PP_SEQ_ENUM_20\n# define BOOST_PP_SEQ_ENUM_22(x) x, BOOST_PP_SEQ_ENUM_21\n# define BOOST_PP_SEQ_ENUM_23(x) x, BOOST_PP_SEQ_ENUM_22\n# define BOOST_PP_SEQ_ENUM_24(x) x, BOOST_PP_SEQ_ENUM_23\n# define BOOST_PP_SEQ_ENUM_25(x) x, BOOST_PP_SEQ_ENUM_24\n# define BOOST_PP_SEQ_ENUM_26(x) x, BOOST_PP_SEQ_ENUM_25\n# define BOOST_PP_SEQ_ENUM_27(x) x, BOOST_PP_SEQ_ENUM_26\n# define BOOST_PP_SEQ_ENUM_28(x) x, BOOST_PP_SEQ_ENUM_27\n# define BOOST_PP_SEQ_ENUM_29(x) x, BOOST_PP_SEQ_ENUM_28\n# define BOOST_PP_SEQ_ENUM_30(x) x, BOOST_PP_SEQ_ENUM_29\n# define BOOST_PP_SEQ_ENUM_31(x) x, BOOST_PP_SEQ_ENUM_30\n# define BOOST_PP_SEQ_ENUM_32(x) x, BOOST_PP_SEQ_ENUM_31\n# define BOOST_PP_SEQ_ENUM_33(x) x, BOOST_PP_SEQ_ENUM_32\n# define BOOST_PP_SEQ_ENUM_34(x) x, BOOST_PP_SEQ_ENUM_33\n# define BOOST_PP_SEQ_ENUM_35(x) x, BOOST_PP_SEQ_ENUM_34\n# define BOOST_PP_SEQ_ENUM_36(x) x, BOOST_PP_SEQ_ENUM_35\n# define BOOST_PP_SEQ_ENUM_37(x) x, BOOST_PP_SEQ_ENUM_36\n# define BOOST_PP_SEQ_ENUM_38(x) x, BOOST_PP_SEQ_ENUM_37\n# define BOOST_PP_SEQ_ENUM_39(x) x, BOOST_PP_SEQ_ENUM_38\n# define BOOST_PP_SEQ_ENUM_40(x) x, BOOST_PP_SEQ_ENUM_39\n# define BOOST_PP_SEQ_ENUM_41(x) x, BOOST_PP_SEQ_ENUM_40\n# define BOOST_PP_SEQ_ENUM_42(x) x, BOOST_PP_SEQ_ENUM_41\n# define BOOST_PP_SEQ_ENUM_43(x) x, BOOST_PP_SEQ_ENUM_42\n# define BOOST_PP_SEQ_ENUM_44(x) x, BOOST_PP_SEQ_ENUM_43\n# define BOOST_PP_SEQ_ENUM_45(x) x, BOOST_PP_SEQ_ENUM_44\n# define BOOST_PP_SEQ_ENUM_46(x) x, BOOST_PP_SEQ_ENUM_45\n# define BOOST_PP_SEQ_ENUM_47(x) x, BOOST_PP_SEQ_ENUM_46\n# define BOOST_PP_SEQ_ENUM_48(x) x, BOOST_PP_SEQ_ENUM_47\n# define BOOST_PP_SEQ_ENUM_49(x) x, BOOST_PP_SEQ_ENUM_48\n# define BOOST_PP_SEQ_ENUM_50(x) x, BOOST_PP_SEQ_ENUM_49\n# define BOOST_PP_SEQ_ENUM_51(x) x, BOOST_PP_SEQ_ENUM_50\n# define BOOST_PP_SEQ_ENUM_52(x) x, BOOST_PP_SEQ_ENUM_51\n# define BOOST_PP_SEQ_ENUM_53(x) x, BOOST_PP_SEQ_ENUM_52\n# define BOOST_PP_SEQ_ENUM_54(x) x, BOOST_PP_SEQ_ENUM_53\n# define BOOST_PP_SEQ_ENUM_55(x) x, BOOST_PP_SEQ_ENUM_54\n# define BOOST_PP_SEQ_ENUM_56(x) x, BOOST_PP_SEQ_ENUM_55\n# define BOOST_PP_SEQ_ENUM_57(x) x, BOOST_PP_SEQ_ENUM_56\n# define BOOST_PP_SEQ_ENUM_58(x) x, BOOST_PP_SEQ_ENUM_57\n# define BOOST_PP_SEQ_ENUM_59(x) x, BOOST_PP_SEQ_ENUM_58\n# define BOOST_PP_SEQ_ENUM_60(x) x, BOOST_PP_SEQ_ENUM_59\n# define BOOST_PP_SEQ_ENUM_61(x) x, BOOST_PP_SEQ_ENUM_60\n# define BOOST_PP_SEQ_ENUM_62(x) x, BOOST_PP_SEQ_ENUM_61\n# define BOOST_PP_SEQ_ENUM_63(x) x, BOOST_PP_SEQ_ENUM_62\n# define BOOST_PP_SEQ_ENUM_64(x) x, BOOST_PP_SEQ_ENUM_63\n# define BOOST_PP_SEQ_ENUM_65(x) x, BOOST_PP_SEQ_ENUM_64\n# define BOOST_PP_SEQ_ENUM_66(x) x, BOOST_PP_SEQ_ENUM_65\n# define BOOST_PP_SEQ_ENUM_67(x) x, BOOST_PP_SEQ_ENUM_66\n# define BOOST_PP_SEQ_ENUM_68(x) x, BOOST_PP_SEQ_ENUM_67\n# define BOOST_PP_SEQ_ENUM_69(x) x, BOOST_PP_SEQ_ENUM_68\n# define BOOST_PP_SEQ_ENUM_70(x) x, BOOST_PP_SEQ_ENUM_69\n# define BOOST_PP_SEQ_ENUM_71(x) x, BOOST_PP_SEQ_ENUM_70\n# define BOOST_PP_SEQ_ENUM_72(x) x, BOOST_PP_SEQ_ENUM_71\n# define BOOST_PP_SEQ_ENUM_73(x) x, BOOST_PP_SEQ_ENUM_72\n# define BOOST_PP_SEQ_ENUM_74(x) x, BOOST_PP_SEQ_ENUM_73\n# define BOOST_PP_SEQ_ENUM_75(x) x, BOOST_PP_SEQ_ENUM_74\n# define BOOST_PP_SEQ_ENUM_76(x) x, BOOST_PP_SEQ_ENUM_75\n# define BOOST_PP_SEQ_ENUM_77(x) x, BOOST_PP_SEQ_ENUM_76\n# define BOOST_PP_SEQ_ENUM_78(x) x, BOOST_PP_SEQ_ENUM_77\n# define BOOST_PP_SEQ_ENUM_79(x) x, BOOST_PP_SEQ_ENUM_78\n# define BOOST_PP_SEQ_ENUM_80(x) x, BOOST_PP_SEQ_ENUM_79\n# define BOOST_PP_SEQ_ENUM_81(x) x, BOOST_PP_SEQ_ENUM_80\n# define BOOST_PP_SEQ_ENUM_82(x) x, BOOST_PP_SEQ_ENUM_81\n# define BOOST_PP_SEQ_ENUM_83(x) x, BOOST_PP_SEQ_ENUM_82\n# define BOOST_PP_SEQ_ENUM_84(x) x, BOOST_PP_SEQ_ENUM_83\n# define BOOST_PP_SEQ_ENUM_85(x) x, BOOST_PP_SEQ_ENUM_84\n# define BOOST_PP_SEQ_ENUM_86(x) x, BOOST_PP_SEQ_ENUM_85\n# define BOOST_PP_SEQ_ENUM_87(x) x, BOOST_PP_SEQ_ENUM_86\n# define BOOST_PP_SEQ_ENUM_88(x) x, BOOST_PP_SEQ_ENUM_87\n# define BOOST_PP_SEQ_ENUM_89(x) x, BOOST_PP_SEQ_ENUM_88\n# define BOOST_PP_SEQ_ENUM_90(x) x, BOOST_PP_SEQ_ENUM_89\n# define BOOST_PP_SEQ_ENUM_91(x) x, BOOST_PP_SEQ_ENUM_90\n# define BOOST_PP_SEQ_ENUM_92(x) x, BOOST_PP_SEQ_ENUM_91\n# define BOOST_PP_SEQ_ENUM_93(x) x, BOOST_PP_SEQ_ENUM_92\n# define BOOST_PP_SEQ_ENUM_94(x) x, BOOST_PP_SEQ_ENUM_93\n# define BOOST_PP_SEQ_ENUM_95(x) x, BOOST_PP_SEQ_ENUM_94\n# define BOOST_PP_SEQ_ENUM_96(x) x, BOOST_PP_SEQ_ENUM_95\n# define BOOST_PP_SEQ_ENUM_97(x) x, BOOST_PP_SEQ_ENUM_96\n# define BOOST_PP_SEQ_ENUM_98(x) x, BOOST_PP_SEQ_ENUM_97\n# define BOOST_PP_SEQ_ENUM_99(x) x, BOOST_PP_SEQ_ENUM_98\n# define BOOST_PP_SEQ_ENUM_100(x) x, BOOST_PP_SEQ_ENUM_99\n# define BOOST_PP_SEQ_ENUM_101(x) x, BOOST_PP_SEQ_ENUM_100\n# define BOOST_PP_SEQ_ENUM_102(x) x, BOOST_PP_SEQ_ENUM_101\n# define BOOST_PP_SEQ_ENUM_103(x) x, BOOST_PP_SEQ_ENUM_102\n# define BOOST_PP_SEQ_ENUM_104(x) x, BOOST_PP_SEQ_ENUM_103\n# define BOOST_PP_SEQ_ENUM_105(x) x, BOOST_PP_SEQ_ENUM_104\n# define BOOST_PP_SEQ_ENUM_106(x) x, BOOST_PP_SEQ_ENUM_105\n# define BOOST_PP_SEQ_ENUM_107(x) x, BOOST_PP_SEQ_ENUM_106\n# define BOOST_PP_SEQ_ENUM_108(x) x, BOOST_PP_SEQ_ENUM_107\n# define BOOST_PP_SEQ_ENUM_109(x) x, BOOST_PP_SEQ_ENUM_108\n# define BOOST_PP_SEQ_ENUM_110(x) x, BOOST_PP_SEQ_ENUM_109\n# define BOOST_PP_SEQ_ENUM_111(x) x, BOOST_PP_SEQ_ENUM_110\n# define BOOST_PP_SEQ_ENUM_112(x) x, BOOST_PP_SEQ_ENUM_111\n# define BOOST_PP_SEQ_ENUM_113(x) x, BOOST_PP_SEQ_ENUM_112\n# define BOOST_PP_SEQ_ENUM_114(x) x, BOOST_PP_SEQ_ENUM_113\n# define BOOST_PP_SEQ_ENUM_115(x) x, BOOST_PP_SEQ_ENUM_114\n# define BOOST_PP_SEQ_ENUM_116(x) x, BOOST_PP_SEQ_ENUM_115\n# define BOOST_PP_SEQ_ENUM_117(x) x, BOOST_PP_SEQ_ENUM_116\n# define BOOST_PP_SEQ_ENUM_118(x) x, BOOST_PP_SEQ_ENUM_117\n# define BOOST_PP_SEQ_ENUM_119(x) x, BOOST_PP_SEQ_ENUM_118\n# define BOOST_PP_SEQ_ENUM_120(x) x, BOOST_PP_SEQ_ENUM_119\n# define BOOST_PP_SEQ_ENUM_121(x) x, BOOST_PP_SEQ_ENUM_120\n# define BOOST_PP_SEQ_ENUM_122(x) x, BOOST_PP_SEQ_ENUM_121\n# define BOOST_PP_SEQ_ENUM_123(x) x, BOOST_PP_SEQ_ENUM_122\n# define BOOST_PP_SEQ_ENUM_124(x) x, BOOST_PP_SEQ_ENUM_123\n# define BOOST_PP_SEQ_ENUM_125(x) x, BOOST_PP_SEQ_ENUM_124\n# define BOOST_PP_SEQ_ENUM_126(x) x, BOOST_PP_SEQ_ENUM_125\n# define BOOST_PP_SEQ_ENUM_127(x) x, BOOST_PP_SEQ_ENUM_126\n# define BOOST_PP_SEQ_ENUM_128(x) x, BOOST_PP_SEQ_ENUM_127\n# define BOOST_PP_SEQ_ENUM_129(x) x, BOOST_PP_SEQ_ENUM_128\n# define BOOST_PP_SEQ_ENUM_130(x) x, BOOST_PP_SEQ_ENUM_129\n# define BOOST_PP_SEQ_ENUM_131(x) x, BOOST_PP_SEQ_ENUM_130\n# define BOOST_PP_SEQ_ENUM_132(x) x, BOOST_PP_SEQ_ENUM_131\n# define BOOST_PP_SEQ_ENUM_133(x) x, BOOST_PP_SEQ_ENUM_132\n# define BOOST_PP_SEQ_ENUM_134(x) x, BOOST_PP_SEQ_ENUM_133\n# define BOOST_PP_SEQ_ENUM_135(x) x, BOOST_PP_SEQ_ENUM_134\n# define BOOST_PP_SEQ_ENUM_136(x) x, BOOST_PP_SEQ_ENUM_135\n# define BOOST_PP_SEQ_ENUM_137(x) x, BOOST_PP_SEQ_ENUM_136\n# define BOOST_PP_SEQ_ENUM_138(x) x, BOOST_PP_SEQ_ENUM_137\n# define BOOST_PP_SEQ_ENUM_139(x) x, BOOST_PP_SEQ_ENUM_138\n# define BOOST_PP_SEQ_ENUM_140(x) x, BOOST_PP_SEQ_ENUM_139\n# define BOOST_PP_SEQ_ENUM_141(x) x, BOOST_PP_SEQ_ENUM_140\n# define BOOST_PP_SEQ_ENUM_142(x) x, BOOST_PP_SEQ_ENUM_141\n# define BOOST_PP_SEQ_ENUM_143(x) x, BOOST_PP_SEQ_ENUM_142\n# define BOOST_PP_SEQ_ENUM_144(x) x, BOOST_PP_SEQ_ENUM_143\n# define BOOST_PP_SEQ_ENUM_145(x) x, BOOST_PP_SEQ_ENUM_144\n# define BOOST_PP_SEQ_ENUM_146(x) x, BOOST_PP_SEQ_ENUM_145\n# define BOOST_PP_SEQ_ENUM_147(x) x, BOOST_PP_SEQ_ENUM_146\n# define BOOST_PP_SEQ_ENUM_148(x) x, BOOST_PP_SEQ_ENUM_147\n# define BOOST_PP_SEQ_ENUM_149(x) x, BOOST_PP_SEQ_ENUM_148\n# define BOOST_PP_SEQ_ENUM_150(x) x, BOOST_PP_SEQ_ENUM_149\n# define BOOST_PP_SEQ_ENUM_151(x) x, BOOST_PP_SEQ_ENUM_150\n# define BOOST_PP_SEQ_ENUM_152(x) x, BOOST_PP_SEQ_ENUM_151\n# define BOOST_PP_SEQ_ENUM_153(x) x, BOOST_PP_SEQ_ENUM_152\n# define BOOST_PP_SEQ_ENUM_154(x) x, BOOST_PP_SEQ_ENUM_153\n# define BOOST_PP_SEQ_ENUM_155(x) x, BOOST_PP_SEQ_ENUM_154\n# define BOOST_PP_SEQ_ENUM_156(x) x, BOOST_PP_SEQ_ENUM_155\n# define BOOST_PP_SEQ_ENUM_157(x) x, BOOST_PP_SEQ_ENUM_156\n# define BOOST_PP_SEQ_ENUM_158(x) x, BOOST_PP_SEQ_ENUM_157\n# define BOOST_PP_SEQ_ENUM_159(x) x, BOOST_PP_SEQ_ENUM_158\n# define BOOST_PP_SEQ_ENUM_160(x) x, BOOST_PP_SEQ_ENUM_159\n# define BOOST_PP_SEQ_ENUM_161(x) x, BOOST_PP_SEQ_ENUM_160\n# define BOOST_PP_SEQ_ENUM_162(x) x, BOOST_PP_SEQ_ENUM_161\n# define BOOST_PP_SEQ_ENUM_163(x) x, BOOST_PP_SEQ_ENUM_162\n# define BOOST_PP_SEQ_ENUM_164(x) x, BOOST_PP_SEQ_ENUM_163\n# define BOOST_PP_SEQ_ENUM_165(x) x, BOOST_PP_SEQ_ENUM_164\n# define BOOST_PP_SEQ_ENUM_166(x) x, BOOST_PP_SEQ_ENUM_165\n# define BOOST_PP_SEQ_ENUM_167(x) x, BOOST_PP_SEQ_ENUM_166\n# define BOOST_PP_SEQ_ENUM_168(x) x, BOOST_PP_SEQ_ENUM_167\n# define BOOST_PP_SEQ_ENUM_169(x) x, BOOST_PP_SEQ_ENUM_168\n# define BOOST_PP_SEQ_ENUM_170(x) x, BOOST_PP_SEQ_ENUM_169\n# define BOOST_PP_SEQ_ENUM_171(x) x, BOOST_PP_SEQ_ENUM_170\n# define BOOST_PP_SEQ_ENUM_172(x) x, BOOST_PP_SEQ_ENUM_171\n# define BOOST_PP_SEQ_ENUM_173(x) x, BOOST_PP_SEQ_ENUM_172\n# define BOOST_PP_SEQ_ENUM_174(x) x, BOOST_PP_SEQ_ENUM_173\n# define BOOST_PP_SEQ_ENUM_175(x) x, BOOST_PP_SEQ_ENUM_174\n# define BOOST_PP_SEQ_ENUM_176(x) x, BOOST_PP_SEQ_ENUM_175\n# define BOOST_PP_SEQ_ENUM_177(x) x, BOOST_PP_SEQ_ENUM_176\n# define BOOST_PP_SEQ_ENUM_178(x) x, BOOST_PP_SEQ_ENUM_177\n# define BOOST_PP_SEQ_ENUM_179(x) x, BOOST_PP_SEQ_ENUM_178\n# define BOOST_PP_SEQ_ENUM_180(x) x, BOOST_PP_SEQ_ENUM_179\n# define BOOST_PP_SEQ_ENUM_181(x) x, BOOST_PP_SEQ_ENUM_180\n# define BOOST_PP_SEQ_ENUM_182(x) x, BOOST_PP_SEQ_ENUM_181\n# define BOOST_PP_SEQ_ENUM_183(x) x, BOOST_PP_SEQ_ENUM_182\n# define BOOST_PP_SEQ_ENUM_184(x) x, BOOST_PP_SEQ_ENUM_183\n# define BOOST_PP_SEQ_ENUM_185(x) x, BOOST_PP_SEQ_ENUM_184\n# define BOOST_PP_SEQ_ENUM_186(x) x, BOOST_PP_SEQ_ENUM_185\n# define BOOST_PP_SEQ_ENUM_187(x) x, BOOST_PP_SEQ_ENUM_186\n# define BOOST_PP_SEQ_ENUM_188(x) x, BOOST_PP_SEQ_ENUM_187\n# define BOOST_PP_SEQ_ENUM_189(x) x, BOOST_PP_SEQ_ENUM_188\n# define BOOST_PP_SEQ_ENUM_190(x) x, BOOST_PP_SEQ_ENUM_189\n# define BOOST_PP_SEQ_ENUM_191(x) x, BOOST_PP_SEQ_ENUM_190\n# define BOOST_PP_SEQ_ENUM_192(x) x, BOOST_PP_SEQ_ENUM_191\n# define BOOST_PP_SEQ_ENUM_193(x) x, BOOST_PP_SEQ_ENUM_192\n# define BOOST_PP_SEQ_ENUM_194(x) x, BOOST_PP_SEQ_ENUM_193\n# define BOOST_PP_SEQ_ENUM_195(x) x, BOOST_PP_SEQ_ENUM_194\n# define BOOST_PP_SEQ_ENUM_196(x) x, BOOST_PP_SEQ_ENUM_195\n# define BOOST_PP_SEQ_ENUM_197(x) x, BOOST_PP_SEQ_ENUM_196\n# define BOOST_PP_SEQ_ENUM_198(x) x, BOOST_PP_SEQ_ENUM_197\n# define BOOST_PP_SEQ_ENUM_199(x) x, BOOST_PP_SEQ_ENUM_198\n# define BOOST_PP_SEQ_ENUM_200(x) x, BOOST_PP_SEQ_ENUM_199\n# define BOOST_PP_SEQ_ENUM_201(x) x, BOOST_PP_SEQ_ENUM_200\n# define BOOST_PP_SEQ_ENUM_202(x) x, BOOST_PP_SEQ_ENUM_201\n# define BOOST_PP_SEQ_ENUM_203(x) x, BOOST_PP_SEQ_ENUM_202\n# define BOOST_PP_SEQ_ENUM_204(x) x, BOOST_PP_SEQ_ENUM_203\n# define BOOST_PP_SEQ_ENUM_205(x) x, BOOST_PP_SEQ_ENUM_204\n# define BOOST_PP_SEQ_ENUM_206(x) x, BOOST_PP_SEQ_ENUM_205\n# define BOOST_PP_SEQ_ENUM_207(x) x, BOOST_PP_SEQ_ENUM_206\n# define BOOST_PP_SEQ_ENUM_208(x) x, BOOST_PP_SEQ_ENUM_207\n# define BOOST_PP_SEQ_ENUM_209(x) x, BOOST_PP_SEQ_ENUM_208\n# define BOOST_PP_SEQ_ENUM_210(x) x, BOOST_PP_SEQ_ENUM_209\n# define BOOST_PP_SEQ_ENUM_211(x) x, BOOST_PP_SEQ_ENUM_210\n# define BOOST_PP_SEQ_ENUM_212(x) x, BOOST_PP_SEQ_ENUM_211\n# define BOOST_PP_SEQ_ENUM_213(x) x, BOOST_PP_SEQ_ENUM_212\n# define BOOST_PP_SEQ_ENUM_214(x) x, BOOST_PP_SEQ_ENUM_213\n# define BOOST_PP_SEQ_ENUM_215(x) x, BOOST_PP_SEQ_ENUM_214\n# define BOOST_PP_SEQ_ENUM_216(x) x, BOOST_PP_SEQ_ENUM_215\n# define BOOST_PP_SEQ_ENUM_217(x) x, BOOST_PP_SEQ_ENUM_216\n# define BOOST_PP_SEQ_ENUM_218(x) x, BOOST_PP_SEQ_ENUM_217\n# define BOOST_PP_SEQ_ENUM_219(x) x, BOOST_PP_SEQ_ENUM_218\n# define BOOST_PP_SEQ_ENUM_220(x) x, BOOST_PP_SEQ_ENUM_219\n# define BOOST_PP_SEQ_ENUM_221(x) x, BOOST_PP_SEQ_ENUM_220\n# define BOOST_PP_SEQ_ENUM_222(x) x, BOOST_PP_SEQ_ENUM_221\n# define BOOST_PP_SEQ_ENUM_223(x) x, BOOST_PP_SEQ_ENUM_222\n# define BOOST_PP_SEQ_ENUM_224(x) x, BOOST_PP_SEQ_ENUM_223\n# define BOOST_PP_SEQ_ENUM_225(x) x, BOOST_PP_SEQ_ENUM_224\n# define BOOST_PP_SEQ_ENUM_226(x) x, BOOST_PP_SEQ_ENUM_225\n# define BOOST_PP_SEQ_ENUM_227(x) x, BOOST_PP_SEQ_ENUM_226\n# define BOOST_PP_SEQ_ENUM_228(x) x, BOOST_PP_SEQ_ENUM_227\n# define BOOST_PP_SEQ_ENUM_229(x) x, BOOST_PP_SEQ_ENUM_228\n# define BOOST_PP_SEQ_ENUM_230(x) x, BOOST_PP_SEQ_ENUM_229\n# define BOOST_PP_SEQ_ENUM_231(x) x, BOOST_PP_SEQ_ENUM_230\n# define BOOST_PP_SEQ_ENUM_232(x) x, BOOST_PP_SEQ_ENUM_231\n# define BOOST_PP_SEQ_ENUM_233(x) x, BOOST_PP_SEQ_ENUM_232\n# define BOOST_PP_SEQ_ENUM_234(x) x, BOOST_PP_SEQ_ENUM_233\n# define BOOST_PP_SEQ_ENUM_235(x) x, BOOST_PP_SEQ_ENUM_234\n# define BOOST_PP_SEQ_ENUM_236(x) x, BOOST_PP_SEQ_ENUM_235\n# define BOOST_PP_SEQ_ENUM_237(x) x, BOOST_PP_SEQ_ENUM_236\n# define BOOST_PP_SEQ_ENUM_238(x) x, BOOST_PP_SEQ_ENUM_237\n# define BOOST_PP_SEQ_ENUM_239(x) x, BOOST_PP_SEQ_ENUM_238\n# define BOOST_PP_SEQ_ENUM_240(x) x, BOOST_PP_SEQ_ENUM_239\n# define BOOST_PP_SEQ_ENUM_241(x) x, BOOST_PP_SEQ_ENUM_240\n# define BOOST_PP_SEQ_ENUM_242(x) x, BOOST_PP_SEQ_ENUM_241\n# define BOOST_PP_SEQ_ENUM_243(x) x, BOOST_PP_SEQ_ENUM_242\n# define BOOST_PP_SEQ_ENUM_244(x) x, BOOST_PP_SEQ_ENUM_243\n# define BOOST_PP_SEQ_ENUM_245(x) x, BOOST_PP_SEQ_ENUM_244\n# define BOOST_PP_SEQ_ENUM_246(x) x, BOOST_PP_SEQ_ENUM_245\n# define BOOST_PP_SEQ_ENUM_247(x) x, BOOST_PP_SEQ_ENUM_246\n# define BOOST_PP_SEQ_ENUM_248(x) x, BOOST_PP_SEQ_ENUM_247\n# define BOOST_PP_SEQ_ENUM_249(x) x, BOOST_PP_SEQ_ENUM_248\n# define BOOST_PP_SEQ_ENUM_250(x) x, BOOST_PP_SEQ_ENUM_249\n# define BOOST_PP_SEQ_ENUM_251(x) x, BOOST_PP_SEQ_ENUM_250\n# define BOOST_PP_SEQ_ENUM_252(x) x, BOOST_PP_SEQ_ENUM_251\n# define BOOST_PP_SEQ_ENUM_253(x) x, BOOST_PP_SEQ_ENUM_252\n# define BOOST_PP_SEQ_ENUM_254(x) x, BOOST_PP_SEQ_ENUM_253\n# define BOOST_PP_SEQ_ENUM_255(x) x, BOOST_PP_SEQ_ENUM_254\n# define BOOST_PP_SEQ_ENUM_256(x) x, BOOST_PP_SEQ_ENUM_255\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/first_n.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_FIRST_N_HPP\n# define BOOST_PREPROCESSOR_SEQ_FIRST_N_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/seq/detail/split.hpp>\n# include <boost/preprocessor/tuple/eat.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_SEQ_FIRST_N */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FIRST_N(n, seq) BOOST_PP_IF(n, BOOST_PP_TUPLE_ELEM, BOOST_PP_TUPLE_EAT_3)(2, 0, BOOST_PP_SEQ_SPLIT(n, seq (nil)))\n# else\n#    define BOOST_PP_SEQ_FIRST_N(n, seq) BOOST_PP_SEQ_FIRST_N_I(n, seq)\n#    define BOOST_PP_SEQ_FIRST_N_I(n, seq) BOOST_PP_IF(n, BOOST_PP_TUPLE_ELEM, BOOST_PP_TUPLE_EAT_3)(2, 0, BOOST_PP_SEQ_SPLIT(n, seq (nil)))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/fold_left.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_FOLD_LEFT_HPP\n# define BOOST_PREPROCESSOR_SEQ_FOLD_LEFT_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/debug/error.hpp>\n# include <boost/preprocessor/detail/auto_rec.hpp>\n# include <boost/preprocessor/seq/seq.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n#\n# /* BOOST_PP_SEQ_FOLD_LEFT */\n#\n# if 0\n#    define BOOST_PP_SEQ_FOLD_LEFT(op, state, seq) ...\n# endif\n#\n# define BOOST_PP_SEQ_FOLD_LEFT BOOST_PP_CAT(BOOST_PP_SEQ_FOLD_LEFT_, BOOST_PP_AUTO_REC(BOOST_PP_SEQ_FOLD_LEFT_P, 256))\n# define BOOST_PP_SEQ_FOLD_LEFT_P(n) BOOST_PP_CAT(BOOST_PP_SEQ_FOLD_LEFT_CHECK_, BOOST_PP_SEQ_FOLD_LEFT_I_ ## n(BOOST_PP_SEQ_FOLD_LEFT_O, BOOST_PP_NIL, (nil), 1))\n# define BOOST_PP_SEQ_FOLD_LEFT_O(s, st, _) st\n#\n# define BOOST_PP_SEQ_FOLD_LEFT_257(op, st, ss) BOOST_PP_ERROR(0x0005)\n# define BOOST_PP_SEQ_FOLD_LEFT_I_257(op, st, ss, sz) BOOST_PP_ERROR(0x0005)\n#\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_NIL 1\n#\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_1(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_2(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_3(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_4(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_5(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_6(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_7(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_8(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_9(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_10(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_11(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_12(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_13(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_14(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_15(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_16(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_17(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_18(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_19(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_20(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_21(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_22(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_23(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_24(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_25(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_26(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_27(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_28(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_29(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_30(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_31(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_32(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_33(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_34(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_35(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_36(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_37(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_38(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_39(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_40(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_41(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_42(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_43(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_44(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_45(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_46(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_47(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_48(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_49(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_50(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_51(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_52(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_53(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_54(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_55(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_56(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_57(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_58(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_59(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_60(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_61(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_62(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_63(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_64(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_65(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_66(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_67(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_68(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_69(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_70(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_71(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_72(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_73(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_74(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_75(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_76(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_77(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_78(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_79(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_80(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_81(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_82(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_83(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_84(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_85(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_86(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_87(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_88(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_89(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_90(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_91(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_92(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_93(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_94(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_95(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_96(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_97(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_98(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_99(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_100(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_101(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_102(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_103(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_104(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_105(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_106(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_107(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_108(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_109(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_110(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_111(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_112(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_113(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_114(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_115(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_116(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_117(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_118(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_119(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_120(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_121(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_122(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_123(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_124(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_125(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_126(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_127(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_128(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_129(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_130(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_131(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_132(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_133(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_134(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_135(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_136(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_137(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_138(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_139(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_140(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_141(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_142(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_143(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_144(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_145(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_146(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_147(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_148(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_149(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_150(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_151(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_152(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_153(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_154(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_155(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_156(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_157(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_158(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_159(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_160(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_161(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_162(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_163(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_164(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_165(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_166(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_167(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_168(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_169(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_170(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_171(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_172(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_173(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_174(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_175(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_176(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_177(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_178(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_179(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_180(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_181(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_182(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_183(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_184(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_185(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_186(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_187(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_188(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_189(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_190(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_191(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_192(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_193(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_194(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_195(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_196(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_197(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_198(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_199(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_200(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_201(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_202(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_203(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_204(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_205(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_206(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_207(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_208(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_209(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_210(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_211(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_212(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_213(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_214(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_215(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_216(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_217(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_218(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_219(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_220(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_221(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_222(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_223(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_224(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_225(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_226(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_227(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_228(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_229(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_230(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_231(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_232(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_233(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_234(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_235(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_236(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_237(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_238(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_239(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_240(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_241(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_242(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_243(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_244(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_245(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_246(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_247(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_248(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_249(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_250(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_251(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_252(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_253(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_254(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_255(op, st, ss, sz) 0\n# define BOOST_PP_SEQ_FOLD_LEFT_CHECK_BOOST_PP_SEQ_FOLD_LEFT_I_256(op, st, ss, sz) 0\n#\n# define BOOST_PP_SEQ_FOLD_LEFT_F(op, st, ss, sz) st\n#\n# define BOOST_PP_SEQ_FOLD_LEFT_1(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_1(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_2(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_2(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_3(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_3(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_4(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_4(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_5(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_5(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_6(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_6(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_7(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_7(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_8(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_8(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_9(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_9(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_10(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_10(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_11(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_11(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_12(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_12(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_13(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_13(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_14(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_14(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_15(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_15(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_16(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_16(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_17(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_17(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_18(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_18(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_19(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_19(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_20(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_20(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_21(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_21(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_22(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_22(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_23(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_23(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_24(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_24(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_25(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_25(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_26(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_26(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_27(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_27(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_28(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_28(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_29(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_29(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_30(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_30(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_31(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_31(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_32(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_32(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_33(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_33(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_34(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_34(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_35(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_35(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_36(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_36(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_37(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_37(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_38(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_38(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_39(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_39(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_40(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_40(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_41(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_41(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_42(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_42(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_43(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_43(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_44(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_44(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_45(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_45(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_46(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_46(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_47(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_47(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_48(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_48(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_49(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_49(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_50(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_50(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_51(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_51(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_52(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_52(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_53(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_53(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_54(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_54(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_55(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_55(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_56(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_56(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_57(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_57(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_58(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_58(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_59(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_59(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_60(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_60(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_61(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_61(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_62(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_62(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_63(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_63(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_64(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_64(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_65(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_65(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_66(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_66(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_67(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_67(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_68(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_68(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_69(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_69(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_70(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_70(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_71(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_71(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_72(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_72(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_73(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_73(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_74(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_74(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_75(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_75(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_76(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_76(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_77(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_77(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_78(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_78(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_79(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_79(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_80(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_80(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_81(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_81(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_82(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_82(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_83(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_83(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_84(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_84(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_85(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_85(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_86(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_86(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_87(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_87(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_88(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_88(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_89(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_89(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_90(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_90(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_91(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_91(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_92(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_92(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_93(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_93(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_94(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_94(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_95(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_95(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_96(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_96(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_97(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_97(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_98(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_98(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_99(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_99(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_100(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_100(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_101(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_101(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_102(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_102(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_103(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_103(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_104(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_104(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_105(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_105(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_106(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_106(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_107(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_107(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_108(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_108(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_109(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_109(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_110(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_110(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_111(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_111(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_112(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_112(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_113(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_113(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_114(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_114(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_115(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_115(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_116(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_116(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_117(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_117(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_118(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_118(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_119(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_119(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_120(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_120(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_121(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_121(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_122(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_122(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_123(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_123(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_124(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_124(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_125(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_125(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_126(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_126(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_127(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_127(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_128(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_128(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_129(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_129(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_130(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_130(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_131(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_131(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_132(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_132(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_133(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_133(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_134(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_134(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_135(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_135(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_136(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_136(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_137(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_137(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_138(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_138(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_139(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_139(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_140(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_140(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_141(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_141(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_142(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_142(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_143(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_143(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_144(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_144(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_145(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_145(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_146(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_146(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_147(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_147(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_148(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_148(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_149(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_149(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_150(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_150(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_151(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_151(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_152(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_152(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_153(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_153(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_154(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_154(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_155(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_155(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_156(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_156(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_157(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_157(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_158(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_158(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_159(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_159(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_160(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_160(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_161(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_161(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_162(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_162(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_163(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_163(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_164(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_164(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_165(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_165(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_166(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_166(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_167(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_167(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_168(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_168(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_169(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_169(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_170(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_170(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_171(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_171(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_172(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_172(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_173(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_173(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_174(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_174(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_175(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_175(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_176(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_176(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_177(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_177(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_178(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_178(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_179(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_179(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_180(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_180(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_181(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_181(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_182(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_182(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_183(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_183(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_184(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_184(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_185(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_185(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_186(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_186(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_187(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_187(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_188(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_188(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_189(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_189(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_190(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_190(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_191(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_191(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_192(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_192(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_193(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_193(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_194(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_194(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_195(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_195(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_196(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_196(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_197(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_197(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_198(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_198(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_199(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_199(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_200(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_200(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_201(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_201(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_202(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_202(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_203(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_203(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_204(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_204(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_205(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_205(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_206(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_206(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_207(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_207(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_208(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_208(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_209(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_209(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_210(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_210(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_211(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_211(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_212(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_212(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_213(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_213(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_214(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_214(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_215(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_215(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_216(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_216(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_217(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_217(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_218(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_218(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_219(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_219(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_220(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_220(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_221(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_221(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_222(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_222(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_223(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_223(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_224(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_224(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_225(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_225(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_226(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_226(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_227(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_227(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_228(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_228(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_229(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_229(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_230(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_230(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_231(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_231(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_232(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_232(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_233(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_233(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_234(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_234(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_235(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_235(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_236(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_236(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_237(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_237(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_238(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_238(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_239(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_239(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_240(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_240(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_241(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_241(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_242(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_242(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_243(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_243(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_244(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_244(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_245(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_245(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_246(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_246(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_247(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_247(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_248(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_248(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_249(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_249(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_250(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_250(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_251(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_251(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_252(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_252(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_253(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_253(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_254(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_254(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_255(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_255(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n# define BOOST_PP_SEQ_FOLD_LEFT_256(op, st, ss) BOOST_PP_SEQ_FOLD_LEFT_I_256(op, st, ss, BOOST_PP_SEQ_SIZE(ss))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_1(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_2, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(2, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_2(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_3, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(3, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_3(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_4, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(4, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_4(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_5, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(5, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_5(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_6, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(6, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_6(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_7, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(7, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_7(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_8, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(8, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_8(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_9, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(9, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_9(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_10, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(10, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_10(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_11, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(11, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_11(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_12, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(12, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_12(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_13, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(13, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_13(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_14, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(14, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_14(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_15, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(15, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_15(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_16, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(16, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_16(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_17, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(17, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_17(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_18, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(18, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_18(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_19, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(19, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_19(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_20, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(20, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_20(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_21, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(21, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_21(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_22, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(22, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_22(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_23, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(23, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_23(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_24, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(24, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_24(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_25, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(25, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_25(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_26, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(26, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_26(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_27, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(27, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_27(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_28, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(28, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_28(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_29, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(29, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_29(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_30, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(30, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_30(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_31, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(31, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_31(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_32, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(32, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_32(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_33, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(33, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_33(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_34, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(34, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_34(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_35, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(35, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_35(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_36, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(36, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_36(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_37, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(37, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_37(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_38, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(38, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_38(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_39, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(39, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_39(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_40, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(40, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_40(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_41, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(41, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_41(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_42, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(42, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_42(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_43, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(43, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_43(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_44, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(44, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_44(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_45, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(45, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_45(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_46, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(46, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_46(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_47, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(47, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_47(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_48, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(48, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_48(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_49, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(49, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_49(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_50, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(50, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_50(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_51, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(51, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_51(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_52, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(52, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_52(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_53, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(53, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_53(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_54, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(54, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_54(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_55, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(55, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_55(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_56, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(56, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_56(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_57, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(57, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_57(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_58, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(58, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_58(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_59, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(59, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_59(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_60, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(60, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_60(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_61, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(61, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_61(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_62, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(62, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_62(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_63, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(63, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_63(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_64, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(64, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_64(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_65, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(65, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_65(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_66, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(66, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_66(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_67, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(67, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_67(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_68, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(68, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_68(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_69, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(69, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_69(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_70, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(70, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_70(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_71, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(71, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_71(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_72, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(72, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_72(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_73, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(73, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_73(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_74, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(74, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_74(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_75, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(75, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_75(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_76, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(76, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_76(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_77, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(77, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_77(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_78, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(78, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_78(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_79, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(79, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_79(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_80, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(80, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_80(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_81, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(81, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_81(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_82, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(82, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_82(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_83, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(83, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_83(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_84, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(84, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_84(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_85, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(85, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_85(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_86, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(86, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_86(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_87, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(87, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_87(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_88, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(88, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_88(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_89, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(89, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_89(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_90, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(90, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_90(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_91, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(91, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_91(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_92, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(92, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_92(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_93, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(93, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_93(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_94, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(94, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_94(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_95, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(95, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_95(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_96, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(96, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_96(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_97, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(97, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_97(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_98, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(98, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_98(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_99, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(99, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_99(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_100, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(100, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_100(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_101, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(101, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_101(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_102, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(102, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_102(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_103, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(103, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_103(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_104, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(104, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_104(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_105, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(105, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_105(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_106, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(106, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_106(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_107, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(107, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_107(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_108, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(108, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_108(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_109, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(109, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_109(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_110, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(110, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_110(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_111, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(111, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_111(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_112, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(112, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_112(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_113, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(113, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_113(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_114, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(114, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_114(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_115, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(115, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_115(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_116, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(116, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_116(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_117, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(117, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_117(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_118, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(118, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_118(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_119, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(119, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_119(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_120, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(120, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_120(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_121, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(121, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_121(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_122, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(122, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_122(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_123, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(123, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_123(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_124, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(124, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_124(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_125, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(125, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_125(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_126, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(126, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_126(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_127, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(127, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_127(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_128, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(128, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_128(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_129, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(129, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_129(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_130, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(130, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_130(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_131, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(131, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_131(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_132, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(132, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_132(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_133, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(133, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_133(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_134, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(134, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_134(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_135, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(135, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_135(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_136, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(136, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_136(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_137, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(137, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_137(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_138, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(138, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_138(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_139, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(139, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_139(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_140, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(140, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_140(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_141, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(141, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_141(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_142, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(142, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_142(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_143, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(143, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_143(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_144, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(144, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_144(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_145, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(145, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_145(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_146, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(146, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_146(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_147, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(147, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_147(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_148, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(148, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_148(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_149, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(149, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_149(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_150, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(150, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_150(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_151, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(151, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_151(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_152, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(152, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_152(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_153, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(153, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_153(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_154, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(154, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_154(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_155, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(155, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_155(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_156, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(156, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_156(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_157, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(157, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_157(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_158, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(158, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_158(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_159, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(159, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_159(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_160, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(160, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_160(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_161, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(161, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_161(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_162, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(162, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_162(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_163, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(163, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_163(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_164, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(164, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_164(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_165, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(165, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_165(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_166, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(166, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_166(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_167, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(167, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_167(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_168, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(168, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_168(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_169, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(169, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_169(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_170, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(170, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_170(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_171, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(171, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_171(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_172, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(172, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_172(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_173, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(173, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_173(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_174, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(174, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_174(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_175, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(175, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_175(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_176, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(176, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_176(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_177, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(177, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_177(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_178, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(178, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_178(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_179, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(179, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_179(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_180, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(180, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_180(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_181, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(181, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_181(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_182, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(182, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_182(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_183, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(183, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_183(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_184, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(184, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_184(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_185, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(185, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_185(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_186, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(186, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_186(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_187, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(187, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_187(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_188, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(188, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_188(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_189, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(189, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_189(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_190, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(190, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_190(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_191, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(191, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_191(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_192, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(192, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_192(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_193, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(193, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_193(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_194, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(194, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_194(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_195, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(195, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_195(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_196, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(196, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_196(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_197, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(197, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_197(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_198, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(198, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_198(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_199, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(199, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_199(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_200, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(200, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_200(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_201, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(201, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_201(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_202, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(202, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_202(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_203, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(203, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_203(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_204, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(204, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_204(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_205, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(205, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_205(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_206, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(206, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_206(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_207, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(207, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_207(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_208, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(208, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_208(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_209, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(209, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_209(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_210, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(210, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_210(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_211, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(211, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_211(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_212, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(212, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_212(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_213, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(213, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_213(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_214, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(214, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_214(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_215, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(215, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_215(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_216, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(216, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_216(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_217, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(217, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_217(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_218, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(218, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_218(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_219, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(219, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_219(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_220, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(220, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_220(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_221, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(221, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_221(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_222, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(222, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_222(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_223, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(223, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_223(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_224, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(224, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_224(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_225, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(225, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_225(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_226, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(226, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_226(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_227, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(227, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_227(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_228, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(228, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_228(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_229, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(229, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_229(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_230, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(230, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_230(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_231, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(231, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_231(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_232, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(232, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_232(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_233, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(233, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_233(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_234, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(234, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_234(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_235, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(235, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_235(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_236, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(236, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_236(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_237, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(237, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_237(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_238, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(238, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_238(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_239, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(239, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_239(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_240, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(240, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_240(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_241, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(241, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_241(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_242, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(242, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_242(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_243, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(243, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_243(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_244, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(244, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_244(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_245, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(245, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_245(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_246, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(246, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_246(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_247, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(247, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_247(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_248, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(248, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_248(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_249, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(249, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_249(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_250, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(250, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_250(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_251, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(251, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_251(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_252, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(252, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_252(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_253, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(253, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_253(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_254, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(254, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_254(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_255, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(255, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_255(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_256, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(256, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_256(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_257, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op(257, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n# else\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_1(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_2, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(2, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_2(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_3, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(3, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_3(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_4, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(4, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_4(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_5, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(5, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_5(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_6, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(6, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_6(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_7, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(7, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_7(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_8, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(8, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_8(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_9, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(9, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_9(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_10, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(10, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_10(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_11, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(11, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_11(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_12, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(12, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_12(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_13, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(13, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_13(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_14, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(14, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_14(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_15, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(15, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_15(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_16, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(16, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_16(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_17, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(17, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_17(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_18, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(18, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_18(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_19, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(19, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_19(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_20, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(20, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_20(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_21, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(21, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_21(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_22, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(22, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_22(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_23, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(23, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_23(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_24, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(24, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_24(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_25, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(25, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_25(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_26, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(26, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_26(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_27, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(27, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_27(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_28, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(28, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_28(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_29, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(29, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_29(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_30, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(30, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_30(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_31, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(31, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_31(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_32, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(32, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_32(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_33, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(33, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_33(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_34, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(34, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_34(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_35, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(35, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_35(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_36, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(36, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_36(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_37, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(37, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_37(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_38, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(38, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_38(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_39, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(39, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_39(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_40, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(40, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_40(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_41, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(41, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_41(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_42, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(42, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_42(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_43, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(43, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_43(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_44, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(44, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_44(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_45, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(45, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_45(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_46, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(46, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_46(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_47, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(47, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_47(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_48, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(48, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_48(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_49, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(49, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_49(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_50, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(50, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_50(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_51, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(51, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_51(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_52, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(52, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_52(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_53, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(53, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_53(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_54, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(54, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_54(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_55, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(55, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_55(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_56, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(56, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_56(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_57, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(57, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_57(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_58, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(58, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_58(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_59, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(59, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_59(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_60, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(60, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_60(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_61, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(61, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_61(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_62, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(62, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_62(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_63, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(63, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_63(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_64, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(64, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_64(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_65, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(65, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_65(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_66, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(66, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_66(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_67, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(67, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_67(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_68, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(68, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_68(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_69, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(69, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_69(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_70, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(70, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_70(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_71, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(71, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_71(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_72, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(72, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_72(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_73, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(73, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_73(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_74, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(74, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_74(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_75, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(75, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_75(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_76, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(76, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_76(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_77, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(77, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_77(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_78, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(78, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_78(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_79, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(79, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_79(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_80, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(80, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_80(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_81, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(81, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_81(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_82, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(82, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_82(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_83, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(83, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_83(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_84, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(84, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_84(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_85, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(85, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_85(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_86, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(86, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_86(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_87, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(87, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_87(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_88, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(88, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_88(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_89, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(89, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_89(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_90, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(90, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_90(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_91, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(91, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_91(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_92, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(92, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_92(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_93, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(93, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_93(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_94, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(94, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_94(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_95, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(95, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_95(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_96, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(96, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_96(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_97, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(97, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_97(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_98, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(98, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_98(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_99, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(99, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_99(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_100, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(100, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_100(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_101, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(101, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_101(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_102, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(102, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_102(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_103, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(103, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_103(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_104, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(104, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_104(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_105, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(105, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_105(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_106, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(106, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_106(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_107, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(107, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_107(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_108, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(108, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_108(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_109, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(109, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_109(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_110, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(110, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_110(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_111, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(111, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_111(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_112, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(112, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_112(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_113, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(113, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_113(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_114, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(114, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_114(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_115, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(115, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_115(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_116, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(116, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_116(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_117, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(117, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_117(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_118, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(118, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_118(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_119, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(119, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_119(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_120, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(120, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_120(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_121, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(121, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_121(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_122, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(122, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_122(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_123, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(123, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_123(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_124, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(124, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_124(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_125, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(125, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_125(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_126, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(126, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_126(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_127, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(127, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_127(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_128, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(128, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_128(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_129, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(129, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_129(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_130, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(130, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_130(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_131, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(131, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_131(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_132, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(132, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_132(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_133, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(133, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_133(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_134, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(134, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_134(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_135, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(135, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_135(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_136, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(136, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_136(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_137, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(137, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_137(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_138, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(138, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_138(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_139, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(139, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_139(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_140, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(140, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_140(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_141, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(141, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_141(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_142, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(142, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_142(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_143, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(143, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_143(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_144, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(144, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_144(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_145, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(145, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_145(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_146, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(146, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_146(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_147, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(147, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_147(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_148, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(148, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_148(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_149, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(149, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_149(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_150, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(150, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_150(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_151, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(151, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_151(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_152, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(152, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_152(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_153, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(153, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_153(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_154, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(154, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_154(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_155, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(155, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_155(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_156, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(156, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_156(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_157, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(157, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_157(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_158, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(158, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_158(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_159, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(159, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_159(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_160, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(160, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_160(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_161, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(161, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_161(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_162, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(162, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_162(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_163, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(163, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_163(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_164, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(164, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_164(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_165, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(165, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_165(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_166, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(166, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_166(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_167, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(167, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_167(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_168, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(168, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_168(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_169, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(169, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_169(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_170, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(170, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_170(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_171, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(171, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_171(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_172, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(172, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_172(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_173, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(173, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_173(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_174, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(174, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_174(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_175, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(175, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_175(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_176, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(176, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_176(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_177, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(177, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_177(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_178, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(178, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_178(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_179, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(179, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_179(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_180, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(180, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_180(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_181, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(181, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_181(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_182, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(182, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_182(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_183, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(183, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_183(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_184, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(184, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_184(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_185, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(185, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_185(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_186, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(186, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_186(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_187, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(187, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_187(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_188, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(188, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_188(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_189, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(189, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_189(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_190, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(190, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_190(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_191, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(191, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_191(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_192, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(192, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_192(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_193, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(193, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_193(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_194, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(194, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_194(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_195, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(195, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_195(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_196, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(196, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_196(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_197, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(197, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_197(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_198, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(198, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_198(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_199, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(199, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_199(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_200, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(200, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_200(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_201, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(201, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_201(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_202, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(202, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_202(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_203, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(203, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_203(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_204, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(204, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_204(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_205, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(205, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_205(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_206, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(206, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_206(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_207, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(207, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_207(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_208, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(208, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_208(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_209, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(209, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_209(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_210, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(210, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_210(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_211, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(211, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_211(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_212, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(212, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_212(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_213, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(213, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_213(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_214, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(214, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_214(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_215, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(215, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_215(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_216, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(216, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_216(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_217, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(217, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_217(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_218, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(218, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_218(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_219, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(219, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_219(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_220, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(220, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_220(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_221, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(221, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_221(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_222, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(222, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_222(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_223, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(223, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_223(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_224, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(224, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_224(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_225, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(225, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_225(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_226, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(226, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_226(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_227, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(227, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_227(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_228, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(228, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_228(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_229, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(229, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_229(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_230, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(230, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_230(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_231, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(231, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_231(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_232, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(232, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_232(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_233, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(233, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_233(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_234, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(234, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_234(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_235, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(235, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_235(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_236, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(236, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_236(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_237, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(237, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_237(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_238, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(238, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_238(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_239, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(239, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_239(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_240, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(240, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_240(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_241, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(241, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_241(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_242, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(242, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_242(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_243, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(243, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_243(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_244, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(244, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_244(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_245, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(245, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_245(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_246, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(246, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_246(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_247, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(247, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_247(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_248, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(248, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_248(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_249, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(249, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_249(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_250, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(250, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_250(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_251, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(251, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_251(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_252, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(252, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_252(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_253, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(253, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_253(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_254, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(254, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_254(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_255, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(255, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_255(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_256, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(256, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n#    define BOOST_PP_SEQ_FOLD_LEFT_I_256(op, st, ss, sz) BOOST_PP_IF(BOOST_PP_DEC(sz), BOOST_PP_SEQ_FOLD_LEFT_I_257, BOOST_PP_SEQ_FOLD_LEFT_F)(op, op##(257, st, BOOST_PP_SEQ_HEAD(ss)), BOOST_PP_SEQ_TAIL(ss), BOOST_PP_DEC(sz))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/for_each.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_FOR_EACH_HPP\n# define BOOST_PREPROCESSOR_SEQ_FOR_EACH_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/repetition/for.hpp>\n# include <boost/preprocessor/seq/seq.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n# include <boost/preprocessor/seq/detail/is_empty.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_SEQ_FOR_EACH */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK(macro, data, seq)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_D(macro, data, seq)\n#    define BOOST_PP_SEQ_FOR_EACH_D(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK(macro, data, seq)\n# endif\n#\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EXEC(macro, data, seq) BOOST_PP_FOR((macro, data, seq, BOOST_PP_SEQ_SIZE(seq)), BOOST_PP_SEQ_FOR_EACH_P, BOOST_PP_SEQ_FOR_EACH_O, BOOST_PP_SEQ_FOR_EACH_M)\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EMPTY(macro, data, seq)\n#\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK(macro, data, seq) \\\n\t\tBOOST_PP_IIF \\\n\t\t\t( \\\n\t\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq), \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EXEC, \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EMPTY \\\n\t\t\t) \\\n\t\t(macro, data, seq) \\\n/**/\n#\n# define BOOST_PP_SEQ_FOR_EACH_P(r, x) BOOST_PP_TUPLE_ELEM(4, 3, x)\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_SEQ_FOR_EACH_O(r, x) BOOST_PP_SEQ_FOR_EACH_O_I x\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_O(r, x) BOOST_PP_SEQ_FOR_EACH_O_I(BOOST_PP_TUPLE_ELEM(4, 0, x), BOOST_PP_TUPLE_ELEM(4, 1, x), BOOST_PP_TUPLE_ELEM(4, 2, x), BOOST_PP_TUPLE_ELEM(4, 3, x))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_O_I(macro, data, seq, sz) \\\n\tBOOST_PP_SEQ_FOR_EACH_O_I_DEC(macro, data, seq, BOOST_PP_DEC(sz)) \\\n/**/\n# define BOOST_PP_SEQ_FOR_EACH_O_I_DEC(macro, data, seq, sz) \\\n\t( \\\n\tmacro, \\\n\tdata, \\\n\tBOOST_PP_IF \\\n\t\t( \\\n\t\tsz, \\\n\t\tBOOST_PP_SEQ_FOR_EACH_O_I_TAIL, \\\n\t\tBOOST_PP_SEQ_FOR_EACH_O_I_NIL \\\n\t\t) \\\n\t(seq), \\\n\tsz \\\n\t) \\\n/**/\n# define BOOST_PP_SEQ_FOR_EACH_O_I_TAIL(seq) BOOST_PP_SEQ_TAIL(seq)\n# define BOOST_PP_SEQ_FOR_EACH_O_I_NIL(seq) BOOST_PP_NIL\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_SEQ_FOR_EACH_M(r, x) BOOST_PP_SEQ_FOR_EACH_M_IM(r, BOOST_PP_TUPLE_REM_4 x)\n#    define BOOST_PP_SEQ_FOR_EACH_M_IM(r, im) BOOST_PP_SEQ_FOR_EACH_M_I(r, im)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_M(r, x) BOOST_PP_SEQ_FOR_EACH_M_I(r, BOOST_PP_TUPLE_ELEM(4, 0, x), BOOST_PP_TUPLE_ELEM(4, 1, x), BOOST_PP_TUPLE_ELEM(4, 2, x), BOOST_PP_TUPLE_ELEM(4, 3, x))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_M_I(r, macro, data, seq, sz) macro(r, data, BOOST_PP_SEQ_HEAD(seq))\n#\n# /* BOOST_PP_SEQ_FOR_EACH_R */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH_R(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_R(r, macro, data, seq)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_R(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_R_I(r, macro, data, seq)\n#    define BOOST_PP_SEQ_FOR_EACH_R_I(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_R(r, macro, data, seq)\n# endif\n#\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EXEC_R(r, macro, data, seq) BOOST_PP_FOR_ ## r((macro, data, seq, BOOST_PP_SEQ_SIZE(seq)), BOOST_PP_SEQ_FOR_EACH_P, BOOST_PP_SEQ_FOR_EACH_O, BOOST_PP_SEQ_FOR_EACH_M)\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EMPTY_R(r, macro, data, seq)\n#\n#    define BOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_R(r, macro, data, seq) \\\n\t\tBOOST_PP_IIF \\\n\t\t\t( \\\n\t\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq), \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EXEC_R, \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_DETAIL_CHECK_EMPTY_R \\\n\t\t\t) \\\n\t\t(r, macro, data, seq) \\\n/**/\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/for_each_i.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_FOR_EACH_I_HPP\n# define BOOST_PREPROCESSOR_SEQ_FOR_EACH_I_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/repetition/for.hpp>\n# include <boost/preprocessor/seq/seq.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n# include <boost/preprocessor/seq/detail/is_empty.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_SEQ_FOR_EACH_I */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH_I(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK(macro, data, seq)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_I(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_I(macro, data, seq)\n#    define BOOST_PP_SEQ_FOR_EACH_I_I(macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK(macro, data, seq)\n# endif\n#\n#    define BOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK_EXEC(macro, data, seq) BOOST_PP_FOR((macro, data, seq, 0, BOOST_PP_SEQ_SIZE(seq)), BOOST_PP_SEQ_FOR_EACH_I_P, BOOST_PP_SEQ_FOR_EACH_I_O, BOOST_PP_SEQ_FOR_EACH_I_M)\n#    define BOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK_EMPTY(macro, data, seq)\n#\n#    define BOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK(macro, data, seq) \\\n\t\tBOOST_PP_IIF \\\n\t\t\t( \\\n\t\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq), \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK_EXEC, \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_I_DETAIL_CHECK_EMPTY \\\n\t\t\t) \\\n\t\t(macro, data, seq) \\\n/**/\n#\n# define BOOST_PP_SEQ_FOR_EACH_I_P(r, x) BOOST_PP_TUPLE_ELEM(5, 4, x)\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_SEQ_FOR_EACH_I_O(r, x) BOOST_PP_SEQ_FOR_EACH_I_O_I x\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_I_O(r, x) BOOST_PP_SEQ_FOR_EACH_I_O_I(BOOST_PP_TUPLE_ELEM(5, 0, x), BOOST_PP_TUPLE_ELEM(5, 1, x), BOOST_PP_TUPLE_ELEM(5, 2, x), BOOST_PP_TUPLE_ELEM(5, 3, x), BOOST_PP_TUPLE_ELEM(5, 4, x))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_I_O_I(macro, data, seq, i, sz) \\\n\tBOOST_PP_SEQ_FOR_EACH_I_O_I_DEC(macro, data, seq, i, BOOST_PP_DEC(sz)) \\\n/**/\n# define BOOST_PP_SEQ_FOR_EACH_I_O_I_DEC(macro, data, seq, i, sz) \\\n\t( \\\n\tmacro, \\\n\tdata, \\\n\tBOOST_PP_IF \\\n\t\t( \\\n\t\tsz, \\\n\t\tBOOST_PP_SEQ_FOR_EACH_I_O_I_TAIL, \\\n\t\tBOOST_PP_SEQ_FOR_EACH_I_O_I_NIL \\\n\t\t) \\\n\t(seq), \\\n\tBOOST_PP_INC(i), \\\n\tsz \\\n\t) \\\n/**/\n# define BOOST_PP_SEQ_FOR_EACH_I_O_I_TAIL(seq) BOOST_PP_SEQ_TAIL(seq)\n# define BOOST_PP_SEQ_FOR_EACH_I_O_I_NIL(seq) BOOST_PP_NIL\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_SEQ_FOR_EACH_I_M(r, x) BOOST_PP_SEQ_FOR_EACH_I_M_IM(r, BOOST_PP_TUPLE_REM_5 x)\n#    define BOOST_PP_SEQ_FOR_EACH_I_M_IM(r, im) BOOST_PP_SEQ_FOR_EACH_I_M_I(r, im)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_I_M(r, x) BOOST_PP_SEQ_FOR_EACH_I_M_I(r, BOOST_PP_TUPLE_ELEM(5, 0, x), BOOST_PP_TUPLE_ELEM(5, 1, x), BOOST_PP_TUPLE_ELEM(5, 2, x), BOOST_PP_TUPLE_ELEM(5, 3, x), BOOST_PP_TUPLE_ELEM(5, 4, x))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_I_M_I(r, macro, data, seq, i, sz) macro(r, data, i, BOOST_PP_SEQ_HEAD(seq))\n#\n# /* BOOST_PP_SEQ_FOR_EACH_I_R */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH_I_R(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK(r, macro, data, seq)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_I_R(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_R_I(r, macro, data, seq)\n#    define BOOST_PP_SEQ_FOR_EACH_I_R_I(r, macro, data, seq) BOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK(r, macro, data, seq)\n# endif\n#\n#    define BOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK_EXEC(r, macro, data, seq) BOOST_PP_FOR_ ## r((macro, data, seq, 0, BOOST_PP_SEQ_SIZE(seq)), BOOST_PP_SEQ_FOR_EACH_I_P, BOOST_PP_SEQ_FOR_EACH_I_O, BOOST_PP_SEQ_FOR_EACH_I_M)\n#    define BOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK_EMPTY(r, macro, data, seq)\n#\n#    define BOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK(r, macro, data, seq) \\\n\t\tBOOST_PP_IIF \\\n\t\t\t( \\\n\t\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY(seq), \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK_EXEC, \\\n\t\t\tBOOST_PP_SEQ_FOR_EACH_I_R_DETAIL_CHECK_EMPTY \\\n\t\t\t) \\\n\t\t(r, macro, data, seq) \\\n/**/\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/for_each_product.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_FOR_EACH_PRODUCT_HPP\n# define BOOST_PREPROCESSOR_SEQ_FOR_EACH_PRODUCT_HPP\n#\n# include <boost/preprocessor/arithmetic/dec.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/if.hpp>\n# include <boost/preprocessor/repetition/for.hpp>\n# include <boost/preprocessor/seq/seq.hpp>\n# include <boost/preprocessor/seq/size.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n#\n# /* BOOST_PP_SEQ_FOR_EACH_PRODUCT */\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT(macro, sets) BOOST_PP_SEQ_FOR_EACH_PRODUCT_E(BOOST_PP_FOR, macro, sets)\n#\n# /* BOOST_PP_SEQ_FOR_EACH_PRODUCT_R */\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_R(r, macro, sets) BOOST_PP_SEQ_FOR_EACH_PRODUCT_E(BOOST_PP_FOR_ ## r, macro, sets)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_E(impl, macro, sets) impl((BOOST_PP_SEQ_HEAD(sets)(nil), BOOST_PP_SEQ_TAIL(sets)(nil), (nil), macro), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_0)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_E(impl, macro, sets) BOOST_PP_SEQ_FOR_EACH_PRODUCT_E_I(impl, macro, sets)\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_E_I(impl, macro, sets) impl((BOOST_PP_SEQ_HEAD(sets)(nil), BOOST_PP_SEQ_TAIL(sets)(nil), (nil), macro), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_0)\n# endif\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_STRICT()\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_P(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_P_I data\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_P_I(cset, rset, res, macro) BOOST_PP_DEC(BOOST_PP_SEQ_SIZE(cset))\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_P(r, data) BOOST_PP_DEC(BOOST_PP_SEQ_SIZE(BOOST_PP_TUPLE_ELEM(4, 0, data)))\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_O(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_O_I data\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_O_I(cset, rset, res, macro) (BOOST_PP_SEQ_TAIL(cset), rset, res, macro)\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_O(r, data) (BOOST_PP_SEQ_TAIL(BOOST_PP_TUPLE_ELEM(4, 0, data)), BOOST_PP_TUPLE_ELEM(4, 1, data), BOOST_PP_TUPLE_ELEM(4, 2, data), BOOST_PP_TUPLE_ELEM(4, 3, data))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, i) BOOST_PP_IF(BOOST_PP_DEC(BOOST_PP_SEQ_SIZE(BOOST_PP_TUPLE_ELEM(4, 1, data))), BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_ ## i, BOOST_PP_SEQ_FOR_EACH_PRODUCT_I)\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_I(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_I_I(r, BOOST_PP_TUPLE_ELEM(4, 0, data), BOOST_PP_TUPLE_ELEM(4, 1, data), BOOST_PP_TUPLE_ELEM(4, 2, data), BOOST_PP_TUPLE_ELEM(4, 3, data))\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_I(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_I_IM(r, BOOST_PP_TUPLE_REM_4 data)\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_I_IM(r, im) BOOST_PP_SEQ_FOR_EACH_PRODUCT_I_I(r, im)\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_I_I(r, cset, rset, res, macro) macro(r, BOOST_PP_SEQ_TAIL(res (BOOST_PP_SEQ_HEAD(cset))))\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_H_I data\n# else\n#    define BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_H_I(BOOST_PP_TUPLE_ELEM(4, 0, data), BOOST_PP_TUPLE_ELEM(4, 1, data), BOOST_PP_TUPLE_ELEM(4, 2, data), BOOST_PP_TUPLE_ELEM(4, 3, data))\n# endif\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_H_I(cset, rset, res, macro) (BOOST_PP_SEQ_HEAD(rset)(nil), BOOST_PP_SEQ_TAIL(rset), res (BOOST_PP_SEQ_HEAD(cset)), macro)\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_0(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 0)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_1(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 1)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_2(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 2)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_3(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 3)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_4(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 4)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_5(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 5)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_6(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 6)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_7(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 7)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_8(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 8)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_9(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 9)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_10(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 10)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_11(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 11)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_12(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 12)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_13(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 13)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_14(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 14)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_15(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 15)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_16(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 16)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_17(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 17)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_18(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 18)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_19(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 19)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_20(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 20)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_21(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 21)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_22(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 22)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_23(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 23)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_24(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 24)(r, data)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_25(r, data) BOOST_PP_SEQ_FOR_EACH_PRODUCT_C(data, 25)(r, data)\n#\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_0(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_1)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_1(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_2)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_2(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_3)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_3(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_4)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_4(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_5)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_5(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_6)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_6(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_7)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_7(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_8)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_8(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_9)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_9(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_10)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_10(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_11)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_11(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_12)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_12(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_13)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_13(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_14)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_14(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_15)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_15(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_16)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_16(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_17)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_17(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_18)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_18(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_19)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_19(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_20)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_20(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_21)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_21(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_22)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_22(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_23)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_23(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_24)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_24(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_25)\n# define BOOST_PP_SEQ_FOR_EACH_PRODUCT_N_25(r, data) BOOST_PP_FOR_ ## r(BOOST_PP_SEQ_FOR_EACH_PRODUCT_H(data), BOOST_PP_SEQ_FOR_EACH_PRODUCT_P, BOOST_PP_SEQ_FOR_EACH_PRODUCT_O, BOOST_PP_SEQ_FOR_EACH_PRODUCT_M_26)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/push_back.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_PUSH_BACK_HPP\n# define BOOST_PREPROCESSOR_SEQ_PUSH_BACK_HPP\n#\n# /* BOOST_PP_SEQ_PUSH_BACK */\n#\n# define BOOST_PP_SEQ_PUSH_BACK(seq, elem) seq(elem)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/rest_n.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_REST_N_HPP\n# define BOOST_PREPROCESSOR_SEQ_REST_N_HPP\n#\n# include <boost/preprocessor/arithmetic/inc.hpp>\n# include <boost/preprocessor/comparison/not_equal.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/control/expr_iif.hpp>\n# include <boost/preprocessor/facilities/identity.hpp>\n# include <boost/preprocessor/logical/bitand.hpp>\n# include <boost/preprocessor/seq/detail/is_empty.hpp>\n# include <boost/preprocessor/seq/detail/split.hpp>\n# include <boost/preprocessor/tuple/elem.hpp>\n#\n# /* BOOST_PP_SEQ_REST_N */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_REST_N(n, seq) BOOST_PP_SEQ_REST_N_DETAIL_EXEC(n, seq, BOOST_PP_SEQ_DETAIL_EMPTY_SIZE(seq))\n# else\n#    define BOOST_PP_SEQ_REST_N(n, seq) BOOST_PP_SEQ_REST_N_I(n, seq)\n#    define BOOST_PP_SEQ_REST_N_I(n, seq) BOOST_PP_SEQ_REST_N_DETAIL_EXEC(n, seq, BOOST_PP_SEQ_DETAIL_EMPTY_SIZE(seq))\n# endif\n#\n#    define BOOST_PP_SEQ_REST_N_DETAIL_EXEC(n, seq, size) \\\n\t\tBOOST_PP_EXPR_IIF \\\n\t\t\t( \\\n\t\t\tBOOST_PP_BITAND \\\n\t\t\t\t( \\\n\t\t\t\tBOOST_PP_SEQ_DETAIL_IS_NOT_EMPTY_SIZE(size), \\\n\t\t\t\tBOOST_PP_NOT_EQUAL(n,size) \\\n\t\t\t\t), \\\n\t\t\tBOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_SEQ_SPLIT(BOOST_PP_INC(n), BOOST_PP_IDENTITY( (nil) seq )))() \\\n\t\t\t) \\\n/**/\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/seq.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_SEQ_HPP\n# define BOOST_PREPROCESSOR_SEQ_SEQ_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/seq/elem.hpp>\n#\n# /* BOOST_PP_SEQ_HEAD */\n#\n# define BOOST_PP_SEQ_HEAD(seq) BOOST_PP_SEQ_ELEM(0, seq)\n#\n# /* BOOST_PP_SEQ_TAIL */\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_TAIL(seq) BOOST_PP_SEQ_TAIL_1((seq))\n#    define BOOST_PP_SEQ_TAIL_1(par) BOOST_PP_SEQ_TAIL_2 ## par\n#    define BOOST_PP_SEQ_TAIL_2(seq) BOOST_PP_SEQ_TAIL_I ## seq\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_SEQ_TAIL(seq) BOOST_PP_SEQ_TAIL_ID(BOOST_PP_SEQ_TAIL_I seq)\n#    define BOOST_PP_SEQ_TAIL_ID(id) id\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_TAIL(seq) BOOST_PP_SEQ_TAIL_D(seq)\n#    define BOOST_PP_SEQ_TAIL_D(seq) BOOST_PP_SEQ_TAIL_I seq\n# else\n#    define BOOST_PP_SEQ_TAIL(seq) BOOST_PP_SEQ_TAIL_I seq\n# endif\n#\n# define BOOST_PP_SEQ_TAIL_I(x)\n#\n# /* BOOST_PP_SEQ_NIL */\n#\n# define BOOST_PP_SEQ_NIL(x) (x)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/size.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_SIZE_HPP\n# define BOOST_PREPROCESSOR_SEQ_SIZE_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_SEQ_SIZE(seq) BOOST_PP_SEQ_SIZE_I((seq))\n#    define BOOST_PP_SEQ_SIZE_I(par) BOOST_PP_SEQ_SIZE_II ## par\n#    define BOOST_PP_SEQ_SIZE_II(seq) BOOST_PP_CAT(BOOST_PP_SEQ_SIZE_, BOOST_PP_SEQ_SIZE_0 ## seq)\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG() || BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_SEQ_SIZE(seq) BOOST_PP_SEQ_SIZE_I(seq)\n#    define BOOST_PP_SEQ_SIZE_I(seq) BOOST_PP_CAT(BOOST_PP_SEQ_SIZE_, BOOST_PP_SEQ_SIZE_0 seq)\n# elif defined(__IBMC__) || defined(__IBMCPP__)\n#    define BOOST_PP_SEQ_SIZE(seq) BOOST_PP_CAT(BOOST_PP_SEQ_SIZE_, BOOST_PP_CAT(BOOST_PP_SEQ_SIZE_0, seq))\n# else\n#    define BOOST_PP_SEQ_SIZE(seq) BOOST_PP_CAT(BOOST_PP_SEQ_SIZE_, BOOST_PP_SEQ_SIZE_0 seq)\n# endif\n#\n# define BOOST_PP_SEQ_SIZE_0(_) BOOST_PP_SEQ_SIZE_1\n# define BOOST_PP_SEQ_SIZE_1(_) BOOST_PP_SEQ_SIZE_2\n# define BOOST_PP_SEQ_SIZE_2(_) BOOST_PP_SEQ_SIZE_3\n# define BOOST_PP_SEQ_SIZE_3(_) BOOST_PP_SEQ_SIZE_4\n# define BOOST_PP_SEQ_SIZE_4(_) BOOST_PP_SEQ_SIZE_5\n# define BOOST_PP_SEQ_SIZE_5(_) BOOST_PP_SEQ_SIZE_6\n# define BOOST_PP_SEQ_SIZE_6(_) BOOST_PP_SEQ_SIZE_7\n# define BOOST_PP_SEQ_SIZE_7(_) BOOST_PP_SEQ_SIZE_8\n# define BOOST_PP_SEQ_SIZE_8(_) BOOST_PP_SEQ_SIZE_9\n# define BOOST_PP_SEQ_SIZE_9(_) BOOST_PP_SEQ_SIZE_10\n# define BOOST_PP_SEQ_SIZE_10(_) BOOST_PP_SEQ_SIZE_11\n# define BOOST_PP_SEQ_SIZE_11(_) BOOST_PP_SEQ_SIZE_12\n# define BOOST_PP_SEQ_SIZE_12(_) BOOST_PP_SEQ_SIZE_13\n# define BOOST_PP_SEQ_SIZE_13(_) BOOST_PP_SEQ_SIZE_14\n# define BOOST_PP_SEQ_SIZE_14(_) BOOST_PP_SEQ_SIZE_15\n# define BOOST_PP_SEQ_SIZE_15(_) BOOST_PP_SEQ_SIZE_16\n# define BOOST_PP_SEQ_SIZE_16(_) BOOST_PP_SEQ_SIZE_17\n# define BOOST_PP_SEQ_SIZE_17(_) BOOST_PP_SEQ_SIZE_18\n# define BOOST_PP_SEQ_SIZE_18(_) BOOST_PP_SEQ_SIZE_19\n# define BOOST_PP_SEQ_SIZE_19(_) BOOST_PP_SEQ_SIZE_20\n# define BOOST_PP_SEQ_SIZE_20(_) BOOST_PP_SEQ_SIZE_21\n# define BOOST_PP_SEQ_SIZE_21(_) BOOST_PP_SEQ_SIZE_22\n# define BOOST_PP_SEQ_SIZE_22(_) BOOST_PP_SEQ_SIZE_23\n# define BOOST_PP_SEQ_SIZE_23(_) BOOST_PP_SEQ_SIZE_24\n# define BOOST_PP_SEQ_SIZE_24(_) BOOST_PP_SEQ_SIZE_25\n# define BOOST_PP_SEQ_SIZE_25(_) BOOST_PP_SEQ_SIZE_26\n# define BOOST_PP_SEQ_SIZE_26(_) BOOST_PP_SEQ_SIZE_27\n# define BOOST_PP_SEQ_SIZE_27(_) BOOST_PP_SEQ_SIZE_28\n# define BOOST_PP_SEQ_SIZE_28(_) BOOST_PP_SEQ_SIZE_29\n# define BOOST_PP_SEQ_SIZE_29(_) BOOST_PP_SEQ_SIZE_30\n# define BOOST_PP_SEQ_SIZE_30(_) BOOST_PP_SEQ_SIZE_31\n# define BOOST_PP_SEQ_SIZE_31(_) BOOST_PP_SEQ_SIZE_32\n# define BOOST_PP_SEQ_SIZE_32(_) BOOST_PP_SEQ_SIZE_33\n# define BOOST_PP_SEQ_SIZE_33(_) BOOST_PP_SEQ_SIZE_34\n# define BOOST_PP_SEQ_SIZE_34(_) BOOST_PP_SEQ_SIZE_35\n# define BOOST_PP_SEQ_SIZE_35(_) BOOST_PP_SEQ_SIZE_36\n# define BOOST_PP_SEQ_SIZE_36(_) BOOST_PP_SEQ_SIZE_37\n# define BOOST_PP_SEQ_SIZE_37(_) BOOST_PP_SEQ_SIZE_38\n# define BOOST_PP_SEQ_SIZE_38(_) BOOST_PP_SEQ_SIZE_39\n# define BOOST_PP_SEQ_SIZE_39(_) BOOST_PP_SEQ_SIZE_40\n# define BOOST_PP_SEQ_SIZE_40(_) BOOST_PP_SEQ_SIZE_41\n# define BOOST_PP_SEQ_SIZE_41(_) BOOST_PP_SEQ_SIZE_42\n# define BOOST_PP_SEQ_SIZE_42(_) BOOST_PP_SEQ_SIZE_43\n# define BOOST_PP_SEQ_SIZE_43(_) BOOST_PP_SEQ_SIZE_44\n# define BOOST_PP_SEQ_SIZE_44(_) BOOST_PP_SEQ_SIZE_45\n# define BOOST_PP_SEQ_SIZE_45(_) BOOST_PP_SEQ_SIZE_46\n# define BOOST_PP_SEQ_SIZE_46(_) BOOST_PP_SEQ_SIZE_47\n# define BOOST_PP_SEQ_SIZE_47(_) BOOST_PP_SEQ_SIZE_48\n# define BOOST_PP_SEQ_SIZE_48(_) BOOST_PP_SEQ_SIZE_49\n# define BOOST_PP_SEQ_SIZE_49(_) BOOST_PP_SEQ_SIZE_50\n# define BOOST_PP_SEQ_SIZE_50(_) BOOST_PP_SEQ_SIZE_51\n# define BOOST_PP_SEQ_SIZE_51(_) BOOST_PP_SEQ_SIZE_52\n# define BOOST_PP_SEQ_SIZE_52(_) BOOST_PP_SEQ_SIZE_53\n# define BOOST_PP_SEQ_SIZE_53(_) BOOST_PP_SEQ_SIZE_54\n# define BOOST_PP_SEQ_SIZE_54(_) BOOST_PP_SEQ_SIZE_55\n# define BOOST_PP_SEQ_SIZE_55(_) BOOST_PP_SEQ_SIZE_56\n# define BOOST_PP_SEQ_SIZE_56(_) BOOST_PP_SEQ_SIZE_57\n# define BOOST_PP_SEQ_SIZE_57(_) BOOST_PP_SEQ_SIZE_58\n# define BOOST_PP_SEQ_SIZE_58(_) BOOST_PP_SEQ_SIZE_59\n# define BOOST_PP_SEQ_SIZE_59(_) BOOST_PP_SEQ_SIZE_60\n# define BOOST_PP_SEQ_SIZE_60(_) BOOST_PP_SEQ_SIZE_61\n# define BOOST_PP_SEQ_SIZE_61(_) BOOST_PP_SEQ_SIZE_62\n# define BOOST_PP_SEQ_SIZE_62(_) BOOST_PP_SEQ_SIZE_63\n# define BOOST_PP_SEQ_SIZE_63(_) BOOST_PP_SEQ_SIZE_64\n# define BOOST_PP_SEQ_SIZE_64(_) BOOST_PP_SEQ_SIZE_65\n# define BOOST_PP_SEQ_SIZE_65(_) BOOST_PP_SEQ_SIZE_66\n# define BOOST_PP_SEQ_SIZE_66(_) BOOST_PP_SEQ_SIZE_67\n# define BOOST_PP_SEQ_SIZE_67(_) BOOST_PP_SEQ_SIZE_68\n# define BOOST_PP_SEQ_SIZE_68(_) BOOST_PP_SEQ_SIZE_69\n# define BOOST_PP_SEQ_SIZE_69(_) BOOST_PP_SEQ_SIZE_70\n# define BOOST_PP_SEQ_SIZE_70(_) BOOST_PP_SEQ_SIZE_71\n# define BOOST_PP_SEQ_SIZE_71(_) BOOST_PP_SEQ_SIZE_72\n# define BOOST_PP_SEQ_SIZE_72(_) BOOST_PP_SEQ_SIZE_73\n# define BOOST_PP_SEQ_SIZE_73(_) BOOST_PP_SEQ_SIZE_74\n# define BOOST_PP_SEQ_SIZE_74(_) BOOST_PP_SEQ_SIZE_75\n# define BOOST_PP_SEQ_SIZE_75(_) BOOST_PP_SEQ_SIZE_76\n# define BOOST_PP_SEQ_SIZE_76(_) BOOST_PP_SEQ_SIZE_77\n# define BOOST_PP_SEQ_SIZE_77(_) BOOST_PP_SEQ_SIZE_78\n# define BOOST_PP_SEQ_SIZE_78(_) BOOST_PP_SEQ_SIZE_79\n# define BOOST_PP_SEQ_SIZE_79(_) BOOST_PP_SEQ_SIZE_80\n# define BOOST_PP_SEQ_SIZE_80(_) BOOST_PP_SEQ_SIZE_81\n# define BOOST_PP_SEQ_SIZE_81(_) BOOST_PP_SEQ_SIZE_82\n# define BOOST_PP_SEQ_SIZE_82(_) BOOST_PP_SEQ_SIZE_83\n# define BOOST_PP_SEQ_SIZE_83(_) BOOST_PP_SEQ_SIZE_84\n# define BOOST_PP_SEQ_SIZE_84(_) BOOST_PP_SEQ_SIZE_85\n# define BOOST_PP_SEQ_SIZE_85(_) BOOST_PP_SEQ_SIZE_86\n# define BOOST_PP_SEQ_SIZE_86(_) BOOST_PP_SEQ_SIZE_87\n# define BOOST_PP_SEQ_SIZE_87(_) BOOST_PP_SEQ_SIZE_88\n# define BOOST_PP_SEQ_SIZE_88(_) BOOST_PP_SEQ_SIZE_89\n# define BOOST_PP_SEQ_SIZE_89(_) BOOST_PP_SEQ_SIZE_90\n# define BOOST_PP_SEQ_SIZE_90(_) BOOST_PP_SEQ_SIZE_91\n# define BOOST_PP_SEQ_SIZE_91(_) BOOST_PP_SEQ_SIZE_92\n# define BOOST_PP_SEQ_SIZE_92(_) BOOST_PP_SEQ_SIZE_93\n# define BOOST_PP_SEQ_SIZE_93(_) BOOST_PP_SEQ_SIZE_94\n# define BOOST_PP_SEQ_SIZE_94(_) BOOST_PP_SEQ_SIZE_95\n# define BOOST_PP_SEQ_SIZE_95(_) BOOST_PP_SEQ_SIZE_96\n# define BOOST_PP_SEQ_SIZE_96(_) BOOST_PP_SEQ_SIZE_97\n# define BOOST_PP_SEQ_SIZE_97(_) BOOST_PP_SEQ_SIZE_98\n# define BOOST_PP_SEQ_SIZE_98(_) BOOST_PP_SEQ_SIZE_99\n# define BOOST_PP_SEQ_SIZE_99(_) BOOST_PP_SEQ_SIZE_100\n# define BOOST_PP_SEQ_SIZE_100(_) BOOST_PP_SEQ_SIZE_101\n# define BOOST_PP_SEQ_SIZE_101(_) BOOST_PP_SEQ_SIZE_102\n# define BOOST_PP_SEQ_SIZE_102(_) BOOST_PP_SEQ_SIZE_103\n# define BOOST_PP_SEQ_SIZE_103(_) BOOST_PP_SEQ_SIZE_104\n# define BOOST_PP_SEQ_SIZE_104(_) BOOST_PP_SEQ_SIZE_105\n# define BOOST_PP_SEQ_SIZE_105(_) BOOST_PP_SEQ_SIZE_106\n# define BOOST_PP_SEQ_SIZE_106(_) BOOST_PP_SEQ_SIZE_107\n# define BOOST_PP_SEQ_SIZE_107(_) BOOST_PP_SEQ_SIZE_108\n# define BOOST_PP_SEQ_SIZE_108(_) BOOST_PP_SEQ_SIZE_109\n# define BOOST_PP_SEQ_SIZE_109(_) BOOST_PP_SEQ_SIZE_110\n# define BOOST_PP_SEQ_SIZE_110(_) BOOST_PP_SEQ_SIZE_111\n# define BOOST_PP_SEQ_SIZE_111(_) BOOST_PP_SEQ_SIZE_112\n# define BOOST_PP_SEQ_SIZE_112(_) BOOST_PP_SEQ_SIZE_113\n# define BOOST_PP_SEQ_SIZE_113(_) BOOST_PP_SEQ_SIZE_114\n# define BOOST_PP_SEQ_SIZE_114(_) BOOST_PP_SEQ_SIZE_115\n# define BOOST_PP_SEQ_SIZE_115(_) BOOST_PP_SEQ_SIZE_116\n# define BOOST_PP_SEQ_SIZE_116(_) BOOST_PP_SEQ_SIZE_117\n# define BOOST_PP_SEQ_SIZE_117(_) BOOST_PP_SEQ_SIZE_118\n# define BOOST_PP_SEQ_SIZE_118(_) BOOST_PP_SEQ_SIZE_119\n# define BOOST_PP_SEQ_SIZE_119(_) BOOST_PP_SEQ_SIZE_120\n# define BOOST_PP_SEQ_SIZE_120(_) BOOST_PP_SEQ_SIZE_121\n# define BOOST_PP_SEQ_SIZE_121(_) BOOST_PP_SEQ_SIZE_122\n# define BOOST_PP_SEQ_SIZE_122(_) BOOST_PP_SEQ_SIZE_123\n# define BOOST_PP_SEQ_SIZE_123(_) BOOST_PP_SEQ_SIZE_124\n# define BOOST_PP_SEQ_SIZE_124(_) BOOST_PP_SEQ_SIZE_125\n# define BOOST_PP_SEQ_SIZE_125(_) BOOST_PP_SEQ_SIZE_126\n# define BOOST_PP_SEQ_SIZE_126(_) BOOST_PP_SEQ_SIZE_127\n# define BOOST_PP_SEQ_SIZE_127(_) BOOST_PP_SEQ_SIZE_128\n# define BOOST_PP_SEQ_SIZE_128(_) BOOST_PP_SEQ_SIZE_129\n# define BOOST_PP_SEQ_SIZE_129(_) BOOST_PP_SEQ_SIZE_130\n# define BOOST_PP_SEQ_SIZE_130(_) BOOST_PP_SEQ_SIZE_131\n# define BOOST_PP_SEQ_SIZE_131(_) BOOST_PP_SEQ_SIZE_132\n# define BOOST_PP_SEQ_SIZE_132(_) BOOST_PP_SEQ_SIZE_133\n# define BOOST_PP_SEQ_SIZE_133(_) BOOST_PP_SEQ_SIZE_134\n# define BOOST_PP_SEQ_SIZE_134(_) BOOST_PP_SEQ_SIZE_135\n# define BOOST_PP_SEQ_SIZE_135(_) BOOST_PP_SEQ_SIZE_136\n# define BOOST_PP_SEQ_SIZE_136(_) BOOST_PP_SEQ_SIZE_137\n# define BOOST_PP_SEQ_SIZE_137(_) BOOST_PP_SEQ_SIZE_138\n# define BOOST_PP_SEQ_SIZE_138(_) BOOST_PP_SEQ_SIZE_139\n# define BOOST_PP_SEQ_SIZE_139(_) BOOST_PP_SEQ_SIZE_140\n# define BOOST_PP_SEQ_SIZE_140(_) BOOST_PP_SEQ_SIZE_141\n# define BOOST_PP_SEQ_SIZE_141(_) BOOST_PP_SEQ_SIZE_142\n# define BOOST_PP_SEQ_SIZE_142(_) BOOST_PP_SEQ_SIZE_143\n# define BOOST_PP_SEQ_SIZE_143(_) BOOST_PP_SEQ_SIZE_144\n# define BOOST_PP_SEQ_SIZE_144(_) BOOST_PP_SEQ_SIZE_145\n# define BOOST_PP_SEQ_SIZE_145(_) BOOST_PP_SEQ_SIZE_146\n# define BOOST_PP_SEQ_SIZE_146(_) BOOST_PP_SEQ_SIZE_147\n# define BOOST_PP_SEQ_SIZE_147(_) BOOST_PP_SEQ_SIZE_148\n# define BOOST_PP_SEQ_SIZE_148(_) BOOST_PP_SEQ_SIZE_149\n# define BOOST_PP_SEQ_SIZE_149(_) BOOST_PP_SEQ_SIZE_150\n# define BOOST_PP_SEQ_SIZE_150(_) BOOST_PP_SEQ_SIZE_151\n# define BOOST_PP_SEQ_SIZE_151(_) BOOST_PP_SEQ_SIZE_152\n# define BOOST_PP_SEQ_SIZE_152(_) BOOST_PP_SEQ_SIZE_153\n# define BOOST_PP_SEQ_SIZE_153(_) BOOST_PP_SEQ_SIZE_154\n# define BOOST_PP_SEQ_SIZE_154(_) BOOST_PP_SEQ_SIZE_155\n# define BOOST_PP_SEQ_SIZE_155(_) BOOST_PP_SEQ_SIZE_156\n# define BOOST_PP_SEQ_SIZE_156(_) BOOST_PP_SEQ_SIZE_157\n# define BOOST_PP_SEQ_SIZE_157(_) BOOST_PP_SEQ_SIZE_158\n# define BOOST_PP_SEQ_SIZE_158(_) BOOST_PP_SEQ_SIZE_159\n# define BOOST_PP_SEQ_SIZE_159(_) BOOST_PP_SEQ_SIZE_160\n# define BOOST_PP_SEQ_SIZE_160(_) BOOST_PP_SEQ_SIZE_161\n# define BOOST_PP_SEQ_SIZE_161(_) BOOST_PP_SEQ_SIZE_162\n# define BOOST_PP_SEQ_SIZE_162(_) BOOST_PP_SEQ_SIZE_163\n# define BOOST_PP_SEQ_SIZE_163(_) BOOST_PP_SEQ_SIZE_164\n# define BOOST_PP_SEQ_SIZE_164(_) BOOST_PP_SEQ_SIZE_165\n# define BOOST_PP_SEQ_SIZE_165(_) BOOST_PP_SEQ_SIZE_166\n# define BOOST_PP_SEQ_SIZE_166(_) BOOST_PP_SEQ_SIZE_167\n# define BOOST_PP_SEQ_SIZE_167(_) BOOST_PP_SEQ_SIZE_168\n# define BOOST_PP_SEQ_SIZE_168(_) BOOST_PP_SEQ_SIZE_169\n# define BOOST_PP_SEQ_SIZE_169(_) BOOST_PP_SEQ_SIZE_170\n# define BOOST_PP_SEQ_SIZE_170(_) BOOST_PP_SEQ_SIZE_171\n# define BOOST_PP_SEQ_SIZE_171(_) BOOST_PP_SEQ_SIZE_172\n# define BOOST_PP_SEQ_SIZE_172(_) BOOST_PP_SEQ_SIZE_173\n# define BOOST_PP_SEQ_SIZE_173(_) BOOST_PP_SEQ_SIZE_174\n# define BOOST_PP_SEQ_SIZE_174(_) BOOST_PP_SEQ_SIZE_175\n# define BOOST_PP_SEQ_SIZE_175(_) BOOST_PP_SEQ_SIZE_176\n# define BOOST_PP_SEQ_SIZE_176(_) BOOST_PP_SEQ_SIZE_177\n# define BOOST_PP_SEQ_SIZE_177(_) BOOST_PP_SEQ_SIZE_178\n# define BOOST_PP_SEQ_SIZE_178(_) BOOST_PP_SEQ_SIZE_179\n# define BOOST_PP_SEQ_SIZE_179(_) BOOST_PP_SEQ_SIZE_180\n# define BOOST_PP_SEQ_SIZE_180(_) BOOST_PP_SEQ_SIZE_181\n# define BOOST_PP_SEQ_SIZE_181(_) BOOST_PP_SEQ_SIZE_182\n# define BOOST_PP_SEQ_SIZE_182(_) BOOST_PP_SEQ_SIZE_183\n# define BOOST_PP_SEQ_SIZE_183(_) BOOST_PP_SEQ_SIZE_184\n# define BOOST_PP_SEQ_SIZE_184(_) BOOST_PP_SEQ_SIZE_185\n# define BOOST_PP_SEQ_SIZE_185(_) BOOST_PP_SEQ_SIZE_186\n# define BOOST_PP_SEQ_SIZE_186(_) BOOST_PP_SEQ_SIZE_187\n# define BOOST_PP_SEQ_SIZE_187(_) BOOST_PP_SEQ_SIZE_188\n# define BOOST_PP_SEQ_SIZE_188(_) BOOST_PP_SEQ_SIZE_189\n# define BOOST_PP_SEQ_SIZE_189(_) BOOST_PP_SEQ_SIZE_190\n# define BOOST_PP_SEQ_SIZE_190(_) BOOST_PP_SEQ_SIZE_191\n# define BOOST_PP_SEQ_SIZE_191(_) BOOST_PP_SEQ_SIZE_192\n# define BOOST_PP_SEQ_SIZE_192(_) BOOST_PP_SEQ_SIZE_193\n# define BOOST_PP_SEQ_SIZE_193(_) BOOST_PP_SEQ_SIZE_194\n# define BOOST_PP_SEQ_SIZE_194(_) BOOST_PP_SEQ_SIZE_195\n# define BOOST_PP_SEQ_SIZE_195(_) BOOST_PP_SEQ_SIZE_196\n# define BOOST_PP_SEQ_SIZE_196(_) BOOST_PP_SEQ_SIZE_197\n# define BOOST_PP_SEQ_SIZE_197(_) BOOST_PP_SEQ_SIZE_198\n# define BOOST_PP_SEQ_SIZE_198(_) BOOST_PP_SEQ_SIZE_199\n# define BOOST_PP_SEQ_SIZE_199(_) BOOST_PP_SEQ_SIZE_200\n# define BOOST_PP_SEQ_SIZE_200(_) BOOST_PP_SEQ_SIZE_201\n# define BOOST_PP_SEQ_SIZE_201(_) BOOST_PP_SEQ_SIZE_202\n# define BOOST_PP_SEQ_SIZE_202(_) BOOST_PP_SEQ_SIZE_203\n# define BOOST_PP_SEQ_SIZE_203(_) BOOST_PP_SEQ_SIZE_204\n# define BOOST_PP_SEQ_SIZE_204(_) BOOST_PP_SEQ_SIZE_205\n# define BOOST_PP_SEQ_SIZE_205(_) BOOST_PP_SEQ_SIZE_206\n# define BOOST_PP_SEQ_SIZE_206(_) BOOST_PP_SEQ_SIZE_207\n# define BOOST_PP_SEQ_SIZE_207(_) BOOST_PP_SEQ_SIZE_208\n# define BOOST_PP_SEQ_SIZE_208(_) BOOST_PP_SEQ_SIZE_209\n# define BOOST_PP_SEQ_SIZE_209(_) BOOST_PP_SEQ_SIZE_210\n# define BOOST_PP_SEQ_SIZE_210(_) BOOST_PP_SEQ_SIZE_211\n# define BOOST_PP_SEQ_SIZE_211(_) BOOST_PP_SEQ_SIZE_212\n# define BOOST_PP_SEQ_SIZE_212(_) BOOST_PP_SEQ_SIZE_213\n# define BOOST_PP_SEQ_SIZE_213(_) BOOST_PP_SEQ_SIZE_214\n# define BOOST_PP_SEQ_SIZE_214(_) BOOST_PP_SEQ_SIZE_215\n# define BOOST_PP_SEQ_SIZE_215(_) BOOST_PP_SEQ_SIZE_216\n# define BOOST_PP_SEQ_SIZE_216(_) BOOST_PP_SEQ_SIZE_217\n# define BOOST_PP_SEQ_SIZE_217(_) BOOST_PP_SEQ_SIZE_218\n# define BOOST_PP_SEQ_SIZE_218(_) BOOST_PP_SEQ_SIZE_219\n# define BOOST_PP_SEQ_SIZE_219(_) BOOST_PP_SEQ_SIZE_220\n# define BOOST_PP_SEQ_SIZE_220(_) BOOST_PP_SEQ_SIZE_221\n# define BOOST_PP_SEQ_SIZE_221(_) BOOST_PP_SEQ_SIZE_222\n# define BOOST_PP_SEQ_SIZE_222(_) BOOST_PP_SEQ_SIZE_223\n# define BOOST_PP_SEQ_SIZE_223(_) BOOST_PP_SEQ_SIZE_224\n# define BOOST_PP_SEQ_SIZE_224(_) BOOST_PP_SEQ_SIZE_225\n# define BOOST_PP_SEQ_SIZE_225(_) BOOST_PP_SEQ_SIZE_226\n# define BOOST_PP_SEQ_SIZE_226(_) BOOST_PP_SEQ_SIZE_227\n# define BOOST_PP_SEQ_SIZE_227(_) BOOST_PP_SEQ_SIZE_228\n# define BOOST_PP_SEQ_SIZE_228(_) BOOST_PP_SEQ_SIZE_229\n# define BOOST_PP_SEQ_SIZE_229(_) BOOST_PP_SEQ_SIZE_230\n# define BOOST_PP_SEQ_SIZE_230(_) BOOST_PP_SEQ_SIZE_231\n# define BOOST_PP_SEQ_SIZE_231(_) BOOST_PP_SEQ_SIZE_232\n# define BOOST_PP_SEQ_SIZE_232(_) BOOST_PP_SEQ_SIZE_233\n# define BOOST_PP_SEQ_SIZE_233(_) BOOST_PP_SEQ_SIZE_234\n# define BOOST_PP_SEQ_SIZE_234(_) BOOST_PP_SEQ_SIZE_235\n# define BOOST_PP_SEQ_SIZE_235(_) BOOST_PP_SEQ_SIZE_236\n# define BOOST_PP_SEQ_SIZE_236(_) BOOST_PP_SEQ_SIZE_237\n# define BOOST_PP_SEQ_SIZE_237(_) BOOST_PP_SEQ_SIZE_238\n# define BOOST_PP_SEQ_SIZE_238(_) BOOST_PP_SEQ_SIZE_239\n# define BOOST_PP_SEQ_SIZE_239(_) BOOST_PP_SEQ_SIZE_240\n# define BOOST_PP_SEQ_SIZE_240(_) BOOST_PP_SEQ_SIZE_241\n# define BOOST_PP_SEQ_SIZE_241(_) BOOST_PP_SEQ_SIZE_242\n# define BOOST_PP_SEQ_SIZE_242(_) BOOST_PP_SEQ_SIZE_243\n# define BOOST_PP_SEQ_SIZE_243(_) BOOST_PP_SEQ_SIZE_244\n# define BOOST_PP_SEQ_SIZE_244(_) BOOST_PP_SEQ_SIZE_245\n# define BOOST_PP_SEQ_SIZE_245(_) BOOST_PP_SEQ_SIZE_246\n# define BOOST_PP_SEQ_SIZE_246(_) BOOST_PP_SEQ_SIZE_247\n# define BOOST_PP_SEQ_SIZE_247(_) BOOST_PP_SEQ_SIZE_248\n# define BOOST_PP_SEQ_SIZE_248(_) BOOST_PP_SEQ_SIZE_249\n# define BOOST_PP_SEQ_SIZE_249(_) BOOST_PP_SEQ_SIZE_250\n# define BOOST_PP_SEQ_SIZE_250(_) BOOST_PP_SEQ_SIZE_251\n# define BOOST_PP_SEQ_SIZE_251(_) BOOST_PP_SEQ_SIZE_252\n# define BOOST_PP_SEQ_SIZE_252(_) BOOST_PP_SEQ_SIZE_253\n# define BOOST_PP_SEQ_SIZE_253(_) BOOST_PP_SEQ_SIZE_254\n# define BOOST_PP_SEQ_SIZE_254(_) BOOST_PP_SEQ_SIZE_255\n# define BOOST_PP_SEQ_SIZE_255(_) BOOST_PP_SEQ_SIZE_256\n# define BOOST_PP_SEQ_SIZE_256(_) BOOST_PP_SEQ_SIZE_257\n#\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_0 0\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_1 1\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_2 2\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_3 3\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_4 4\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_5 5\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_6 6\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_7 7\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_8 8\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_9 9\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_10 10\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_11 11\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_12 12\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_13 13\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_14 14\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_15 15\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_16 16\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_17 17\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_18 18\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_19 19\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_20 20\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_21 21\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_22 22\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_23 23\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_24 24\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_25 25\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_26 26\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_27 27\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_28 28\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_29 29\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_30 30\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_31 31\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_32 32\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_33 33\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_34 34\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_35 35\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_36 36\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_37 37\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_38 38\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_39 39\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_40 40\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_41 41\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_42 42\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_43 43\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_44 44\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_45 45\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_46 46\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_47 47\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_48 48\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_49 49\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_50 50\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_51 51\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_52 52\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_53 53\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_54 54\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_55 55\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_56 56\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_57 57\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_58 58\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_59 59\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_60 60\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_61 61\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_62 62\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_63 63\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_64 64\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_65 65\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_66 66\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_67 67\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_68 68\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_69 69\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_70 70\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_71 71\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_72 72\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_73 73\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_74 74\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_75 75\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_76 76\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_77 77\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_78 78\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_79 79\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_80 80\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_81 81\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_82 82\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_83 83\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_84 84\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_85 85\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_86 86\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_87 87\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_88 88\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_89 89\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_90 90\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_91 91\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_92 92\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_93 93\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_94 94\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_95 95\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_96 96\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_97 97\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_98 98\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_99 99\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_100 100\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_101 101\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_102 102\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_103 103\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_104 104\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_105 105\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_106 106\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_107 107\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_108 108\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_109 109\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_110 110\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_111 111\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_112 112\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_113 113\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_114 114\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_115 115\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_116 116\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_117 117\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_118 118\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_119 119\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_120 120\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_121 121\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_122 122\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_123 123\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_124 124\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_125 125\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_126 126\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_127 127\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_128 128\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_129 129\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_130 130\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_131 131\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_132 132\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_133 133\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_134 134\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_135 135\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_136 136\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_137 137\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_138 138\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_139 139\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_140 140\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_141 141\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_142 142\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_143 143\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_144 144\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_145 145\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_146 146\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_147 147\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_148 148\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_149 149\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_150 150\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_151 151\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_152 152\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_153 153\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_154 154\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_155 155\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_156 156\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_157 157\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_158 158\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_159 159\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_160 160\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_161 161\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_162 162\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_163 163\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_164 164\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_165 165\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_166 166\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_167 167\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_168 168\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_169 169\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_170 170\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_171 171\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_172 172\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_173 173\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_174 174\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_175 175\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_176 176\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_177 177\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_178 178\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_179 179\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_180 180\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_181 181\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_182 182\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_183 183\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_184 184\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_185 185\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_186 186\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_187 187\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_188 188\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_189 189\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_190 190\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_191 191\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_192 192\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_193 193\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_194 194\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_195 195\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_196 196\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_197 197\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_198 198\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_199 199\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_200 200\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_201 201\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_202 202\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_203 203\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_204 204\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_205 205\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_206 206\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_207 207\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_208 208\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_209 209\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_210 210\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_211 211\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_212 212\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_213 213\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_214 214\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_215 215\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_216 216\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_217 217\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_218 218\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_219 219\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_220 220\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_221 221\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_222 222\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_223 223\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_224 224\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_225 225\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_226 226\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_227 227\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_228 228\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_229 229\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_230 230\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_231 231\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_232 232\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_233 233\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_234 234\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_235 235\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_236 236\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_237 237\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_238 238\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_239 239\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_240 240\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_241 241\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_242 242\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_243 243\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_244 244\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_245 245\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_246 246\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_247 247\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_248 248\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_249 249\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_250 250\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_251 251\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_252 252\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_253 253\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_254 254\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_255 255\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_256 256\n# define BOOST_PP_SEQ_SIZE_BOOST_PP_SEQ_SIZE_257 257\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/seq/subseq.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SEQ_SUBSEQ_HPP\n# define BOOST_PREPROCESSOR_SEQ_SUBSEQ_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/seq/first_n.hpp>\n# include <boost/preprocessor/seq/rest_n.hpp>\n#\n# /* BOOST_PP_SEQ_SUBSEQ */\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#    define BOOST_PP_SEQ_SUBSEQ(seq, i, len) BOOST_PP_SEQ_FIRST_N(len, BOOST_PP_SEQ_REST_N(i, seq))\n# else\n#    define BOOST_PP_SEQ_SUBSEQ(seq, i, len) BOOST_PP_SEQ_SUBSEQ_I(seq, i, len)\n#    define BOOST_PP_SEQ_SUBSEQ_I(seq, i, len) BOOST_PP_SEQ_FIRST_N(len, BOOST_PP_SEQ_REST_N(i, seq))\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/counter.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2005.                                  *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# define BOOST_PP_VALUE BOOST_PP_COUNTER + 1\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_COUNTER\n#\n# undef BOOST_PP_COUNTER_DIGIT_1\n# undef BOOST_PP_COUNTER_DIGIT_2\n# undef BOOST_PP_COUNTER_DIGIT_3\n# undef BOOST_PP_COUNTER_DIGIT_4\n# undef BOOST_PP_COUNTER_DIGIT_5\n# undef BOOST_PP_COUNTER_DIGIT_6\n# undef BOOST_PP_COUNTER_DIGIT_7\n# undef BOOST_PP_COUNTER_DIGIT_8\n# undef BOOST_PP_COUNTER_DIGIT_9\n# undef BOOST_PP_COUNTER_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_COUNTER_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_COUNTER_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_COUNTER_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_COUNTER_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_COUNTER_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_COUNTER_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_COUNTER_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_COUNTER_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_COUNTER_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_COUNTER_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_COUNTER_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_COUNTER_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_COUNTER_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_COUNTER_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_COUNTER_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_COUNTER_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_COUNTER_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_COUNTER_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_COUNTER_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_COUNTER_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_COUNTER_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_COUNTER_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_COUNTER_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_COUNTER_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_COUNTER_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_COUNTER_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_COUNTER_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_COUNTER_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_COUNTER_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_COUNTER_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_COUNTER_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_COUNTER_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_COUNTER_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_COUNTER_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_COUNTER_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_COUNTER_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_COUNTER_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_COUNTER_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_COUNTER_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_COUNTER_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_COUNTER_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_COUNTER_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_COUNTER_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_COUNTER_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_COUNTER_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_COUNTER_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_COUNTER_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_COUNTER_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_COUNTER_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_COUNTER_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_COUNTER_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_COUNTER_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_COUNTER_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_COUNTER_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_COUNTER_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_COUNTER_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_COUNTER_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_COUNTER_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_COUNTER_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_COUNTER_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_COUNTER_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_COUNTER_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_COUNTER_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_COUNTER_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_COUNTER_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_COUNTER_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_COUNTER_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_COUNTER_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_COUNTER_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_COUNTER_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_COUNTER_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_COUNTER_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_COUNTER_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_COUNTER_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_COUNTER_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_COUNTER_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_COUNTER_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_COUNTER_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_COUNTER_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_COUNTER_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_COUNTER_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_COUNTER_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_COUNTER_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_COUNTER_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_COUNTER_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_COUNTER_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_COUNTER_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_COUNTER_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_COUNTER_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_COUNTER_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_COUNTER_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_COUNTER_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_COUNTER_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_COUNTER_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_COUNTER_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_COUNTER_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_COUNTER_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_COUNTER_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_COUNTER_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_COUNTER_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_COUNTER_DIGIT_10\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_10(BOOST_PP_COUNTER_DIGIT_10, BOOST_PP_COUNTER_DIGIT_9, BOOST_PP_COUNTER_DIGIT_8, BOOST_PP_COUNTER_DIGIT_7, BOOST_PP_COUNTER_DIGIT_6, BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_9\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_9(BOOST_PP_COUNTER_DIGIT_9, BOOST_PP_COUNTER_DIGIT_8, BOOST_PP_COUNTER_DIGIT_7, BOOST_PP_COUNTER_DIGIT_6, BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_8\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_8(BOOST_PP_COUNTER_DIGIT_8, BOOST_PP_COUNTER_DIGIT_7, BOOST_PP_COUNTER_DIGIT_6, BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_7\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_7(BOOST_PP_COUNTER_DIGIT_7, BOOST_PP_COUNTER_DIGIT_6, BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_6\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_6(BOOST_PP_COUNTER_DIGIT_6, BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_5\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_5(BOOST_PP_COUNTER_DIGIT_5, BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_4\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_4(BOOST_PP_COUNTER_DIGIT_4, BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_3\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_3(BOOST_PP_COUNTER_DIGIT_3, BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# elif BOOST_PP_COUNTER_DIGIT_2\n#    define BOOST_PP_COUNTER BOOST_PP_SLOT_CC_2(BOOST_PP_COUNTER_DIGIT_2, BOOST_PP_COUNTER_DIGIT_1)\n# else\n#    define BOOST_PP_COUNTER BOOST_PP_COUNTER_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/def.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SLOT_DETAIL_DEF_HPP\n# define BOOST_PREPROCESSOR_SLOT_DETAIL_DEF_HPP\n#\n# /* BOOST_PP_SLOT_OFFSET_x */\n#\n# define BOOST_PP_SLOT_OFFSET_10(x) (x) % 1000000000UL\n# define BOOST_PP_SLOT_OFFSET_9(x) BOOST_PP_SLOT_OFFSET_10(x) % 100000000UL\n# define BOOST_PP_SLOT_OFFSET_8(x) BOOST_PP_SLOT_OFFSET_9(x) % 10000000UL\n# define BOOST_PP_SLOT_OFFSET_7(x) BOOST_PP_SLOT_OFFSET_8(x) % 1000000UL\n# define BOOST_PP_SLOT_OFFSET_6(x) BOOST_PP_SLOT_OFFSET_7(x) % 100000UL\n# define BOOST_PP_SLOT_OFFSET_5(x) BOOST_PP_SLOT_OFFSET_6(x) % 10000UL\n# define BOOST_PP_SLOT_OFFSET_4(x) BOOST_PP_SLOT_OFFSET_5(x) % 1000UL\n# define BOOST_PP_SLOT_OFFSET_3(x) BOOST_PP_SLOT_OFFSET_4(x) % 100UL\n# define BOOST_PP_SLOT_OFFSET_2(x) BOOST_PP_SLOT_OFFSET_3(x) % 10UL\n#\n# /* BOOST_PP_SLOT_CC_x */\n#\n# define BOOST_PP_SLOT_CC_2(a, b) BOOST_PP_SLOT_CC_2_D(a, b)\n# define BOOST_PP_SLOT_CC_3(a, b, c) BOOST_PP_SLOT_CC_3_D(a, b, c)\n# define BOOST_PP_SLOT_CC_4(a, b, c, d) BOOST_PP_SLOT_CC_4_D(a, b, c, d)\n# define BOOST_PP_SLOT_CC_5(a, b, c, d, e) BOOST_PP_SLOT_CC_5_D(a, b, c, d, e)\n# define BOOST_PP_SLOT_CC_6(a, b, c, d, e, f) BOOST_PP_SLOT_CC_6_D(a, b, c, d, e, f)\n# define BOOST_PP_SLOT_CC_7(a, b, c, d, e, f, g) BOOST_PP_SLOT_CC_7_D(a, b, c, d, e, f, g)\n# define BOOST_PP_SLOT_CC_8(a, b, c, d, e, f, g, h) BOOST_PP_SLOT_CC_8_D(a, b, c, d, e, f, g, h)\n# define BOOST_PP_SLOT_CC_9(a, b, c, d, e, f, g, h, i) BOOST_PP_SLOT_CC_9_D(a, b, c, d, e, f, g, h, i)\n# define BOOST_PP_SLOT_CC_10(a, b, c, d, e, f, g, h, i, j) BOOST_PP_SLOT_CC_10_D(a, b, c, d, e, f, g, h, i, j)\n#\n# define BOOST_PP_SLOT_CC_2_D(a, b) a ## b\n# define BOOST_PP_SLOT_CC_3_D(a, b, c) a ## b ## c\n# define BOOST_PP_SLOT_CC_4_D(a, b, c, d) a ## b ## c ## d\n# define BOOST_PP_SLOT_CC_5_D(a, b, c, d, e) a ## b ## c ## d ## e\n# define BOOST_PP_SLOT_CC_6_D(a, b, c, d, e, f) a ## b ## c ## d ## e ## f\n# define BOOST_PP_SLOT_CC_7_D(a, b, c, d, e, f, g) a ## b ## c ## d ## e ## f ## g\n# define BOOST_PP_SLOT_CC_8_D(a, b, c, d, e, f, g, h) a ## b ## c ## d ## e ## f ## g ## h\n# define BOOST_PP_SLOT_CC_9_D(a, b, c, d, e, f, g, h, i) a ## b ## c ## d ## e ## f ## g ## h ## i\n# define BOOST_PP_SLOT_CC_10_D(a, b, c, d, e, f, g, h, i, j) a ## b ## c ## d ## e ## f ## g ## h ## i ## j\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/shared.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PP_VALUE\n#    error BOOST_PP_ERROR:  BOOST_PP_VALUE is not defined\n# endif\n#\n# undef BOOST_PP_SLOT_TEMP_1\n# undef BOOST_PP_SLOT_TEMP_2\n# undef BOOST_PP_SLOT_TEMP_3\n# undef BOOST_PP_SLOT_TEMP_4\n# undef BOOST_PP_SLOT_TEMP_5\n# undef BOOST_PP_SLOT_TEMP_6\n# undef BOOST_PP_SLOT_TEMP_7\n# undef BOOST_PP_SLOT_TEMP_8\n# undef BOOST_PP_SLOT_TEMP_9\n# undef BOOST_PP_SLOT_TEMP_10\n#\n# if (BOOST_PP_VALUE) / 1000000000UL == 0\n#    define BOOST_PP_SLOT_TEMP_10 0\n# elif (BOOST_PP_VALUE) / 1000000000UL == 1\n#    define BOOST_PP_SLOT_TEMP_10 1\n# elif (BOOST_PP_VALUE) / 1000000000UL == 2\n#    define BOOST_PP_SLOT_TEMP_10 2\n# elif (BOOST_PP_VALUE) / 1000000000UL == 3\n#    define BOOST_PP_SLOT_TEMP_10 3\n# elif (BOOST_PP_VALUE) / 1000000000UL == 4\n#    define BOOST_PP_SLOT_TEMP_10 4\n# elif (BOOST_PP_VALUE) / 1000000000UL == 5\n#    define BOOST_PP_SLOT_TEMP_10 5\n# elif (BOOST_PP_VALUE) / 1000000000UL == 6\n#    define BOOST_PP_SLOT_TEMP_10 6\n# elif (BOOST_PP_VALUE) / 1000000000UL == 7\n#    define BOOST_PP_SLOT_TEMP_10 7\n# elif (BOOST_PP_VALUE) / 1000000000UL == 8\n#    define BOOST_PP_SLOT_TEMP_10 8\n# elif (BOOST_PP_VALUE) / 1000000000UL == 9\n#    define BOOST_PP_SLOT_TEMP_10 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 0\n#    define BOOST_PP_SLOT_TEMP_9 0\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 1\n#    define BOOST_PP_SLOT_TEMP_9 1\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 2\n#    define BOOST_PP_SLOT_TEMP_9 2\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 3\n#    define BOOST_PP_SLOT_TEMP_9 3\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 4\n#    define BOOST_PP_SLOT_TEMP_9 4\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 5\n#    define BOOST_PP_SLOT_TEMP_9 5\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 6\n#    define BOOST_PP_SLOT_TEMP_9 6\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 7\n#    define BOOST_PP_SLOT_TEMP_9 7\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 8\n#    define BOOST_PP_SLOT_TEMP_9 8\n# elif BOOST_PP_SLOT_OFFSET_10(BOOST_PP_VALUE) / 100000000UL == 9\n#    define BOOST_PP_SLOT_TEMP_9 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 0\n#    define BOOST_PP_SLOT_TEMP_8 0\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 1\n#    define BOOST_PP_SLOT_TEMP_8 1\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 2\n#    define BOOST_PP_SLOT_TEMP_8 2\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 3\n#    define BOOST_PP_SLOT_TEMP_8 3\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 4\n#    define BOOST_PP_SLOT_TEMP_8 4\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 5\n#    define BOOST_PP_SLOT_TEMP_8 5\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 6\n#    define BOOST_PP_SLOT_TEMP_8 6\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 7\n#    define BOOST_PP_SLOT_TEMP_8 7\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 8\n#    define BOOST_PP_SLOT_TEMP_8 8\n# elif BOOST_PP_SLOT_OFFSET_9(BOOST_PP_VALUE) / 10000000UL == 9\n#    define BOOST_PP_SLOT_TEMP_8 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 0\n#    define BOOST_PP_SLOT_TEMP_7 0\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 1\n#    define BOOST_PP_SLOT_TEMP_7 1\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 2\n#    define BOOST_PP_SLOT_TEMP_7 2\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 3\n#    define BOOST_PP_SLOT_TEMP_7 3\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 4\n#    define BOOST_PP_SLOT_TEMP_7 4\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 5\n#    define BOOST_PP_SLOT_TEMP_7 5\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 6\n#    define BOOST_PP_SLOT_TEMP_7 6\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 7\n#    define BOOST_PP_SLOT_TEMP_7 7\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 8\n#    define BOOST_PP_SLOT_TEMP_7 8\n# elif BOOST_PP_SLOT_OFFSET_8(BOOST_PP_VALUE) / 1000000UL == 9\n#    define BOOST_PP_SLOT_TEMP_7 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 0\n#    define BOOST_PP_SLOT_TEMP_6 0\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 1\n#    define BOOST_PP_SLOT_TEMP_6 1\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 2\n#    define BOOST_PP_SLOT_TEMP_6 2\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 3\n#    define BOOST_PP_SLOT_TEMP_6 3\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 4\n#    define BOOST_PP_SLOT_TEMP_6 4\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 5\n#    define BOOST_PP_SLOT_TEMP_6 5\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 6\n#    define BOOST_PP_SLOT_TEMP_6 6\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 7\n#    define BOOST_PP_SLOT_TEMP_6 7\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 8\n#    define BOOST_PP_SLOT_TEMP_6 8\n# elif BOOST_PP_SLOT_OFFSET_7(BOOST_PP_VALUE) / 100000UL == 9\n#    define BOOST_PP_SLOT_TEMP_6 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 0\n#    define BOOST_PP_SLOT_TEMP_5 0\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 1\n#    define BOOST_PP_SLOT_TEMP_5 1\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 2\n#    define BOOST_PP_SLOT_TEMP_5 2\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 3\n#    define BOOST_PP_SLOT_TEMP_5 3\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 4\n#    define BOOST_PP_SLOT_TEMP_5 4\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 5\n#    define BOOST_PP_SLOT_TEMP_5 5\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 6\n#    define BOOST_PP_SLOT_TEMP_5 6\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 7\n#    define BOOST_PP_SLOT_TEMP_5 7\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 8\n#    define BOOST_PP_SLOT_TEMP_5 8\n# elif BOOST_PP_SLOT_OFFSET_6(BOOST_PP_VALUE) / 10000UL == 9\n#    define BOOST_PP_SLOT_TEMP_5 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 0\n#    define BOOST_PP_SLOT_TEMP_4 0\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 1\n#    define BOOST_PP_SLOT_TEMP_4 1\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 2\n#    define BOOST_PP_SLOT_TEMP_4 2\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 3\n#    define BOOST_PP_SLOT_TEMP_4 3\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 4\n#    define BOOST_PP_SLOT_TEMP_4 4\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 5\n#    define BOOST_PP_SLOT_TEMP_4 5\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 6\n#    define BOOST_PP_SLOT_TEMP_4 6\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 7\n#    define BOOST_PP_SLOT_TEMP_4 7\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 8\n#    define BOOST_PP_SLOT_TEMP_4 8\n# elif BOOST_PP_SLOT_OFFSET_5(BOOST_PP_VALUE) / 1000UL == 9\n#    define BOOST_PP_SLOT_TEMP_4 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 0\n#    define BOOST_PP_SLOT_TEMP_3 0\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 1\n#    define BOOST_PP_SLOT_TEMP_3 1\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 2\n#    define BOOST_PP_SLOT_TEMP_3 2\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 3\n#    define BOOST_PP_SLOT_TEMP_3 3\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 4\n#    define BOOST_PP_SLOT_TEMP_3 4\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 5\n#    define BOOST_PP_SLOT_TEMP_3 5\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 6\n#    define BOOST_PP_SLOT_TEMP_3 6\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 7\n#    define BOOST_PP_SLOT_TEMP_3 7\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 8\n#    define BOOST_PP_SLOT_TEMP_3 8\n# elif BOOST_PP_SLOT_OFFSET_4(BOOST_PP_VALUE) / 100UL == 9\n#    define BOOST_PP_SLOT_TEMP_3 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 0\n#    define BOOST_PP_SLOT_TEMP_2 0\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 1\n#    define BOOST_PP_SLOT_TEMP_2 1\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 2\n#    define BOOST_PP_SLOT_TEMP_2 2\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 3\n#    define BOOST_PP_SLOT_TEMP_2 3\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 4\n#    define BOOST_PP_SLOT_TEMP_2 4\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 5\n#    define BOOST_PP_SLOT_TEMP_2 5\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 6\n#    define BOOST_PP_SLOT_TEMP_2 6\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 7\n#    define BOOST_PP_SLOT_TEMP_2 7\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 8\n#    define BOOST_PP_SLOT_TEMP_2 8\n# elif BOOST_PP_SLOT_OFFSET_3(BOOST_PP_VALUE) / 10UL == 9\n#    define BOOST_PP_SLOT_TEMP_2 9\n# endif\n#\n# if BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 0\n#    define BOOST_PP_SLOT_TEMP_1 0\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 1\n#    define BOOST_PP_SLOT_TEMP_1 1\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 2\n#    define BOOST_PP_SLOT_TEMP_1 2\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 3\n#    define BOOST_PP_SLOT_TEMP_1 3\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 4\n#    define BOOST_PP_SLOT_TEMP_1 4\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 5\n#    define BOOST_PP_SLOT_TEMP_1 5\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 6\n#    define BOOST_PP_SLOT_TEMP_1 6\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 7\n#    define BOOST_PP_SLOT_TEMP_1 7\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 8\n#    define BOOST_PP_SLOT_TEMP_1 8\n# elif BOOST_PP_SLOT_OFFSET_2(BOOST_PP_VALUE) == 9\n#    define BOOST_PP_SLOT_TEMP_1 9\n# endif\n#\n# undef BOOST_PP_VALUE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/slot1.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_SLOT_1\n#\n# undef BOOST_PP_SLOT_1_DIGIT_1\n# undef BOOST_PP_SLOT_1_DIGIT_2\n# undef BOOST_PP_SLOT_1_DIGIT_3\n# undef BOOST_PP_SLOT_1_DIGIT_4\n# undef BOOST_PP_SLOT_1_DIGIT_5\n# undef BOOST_PP_SLOT_1_DIGIT_6\n# undef BOOST_PP_SLOT_1_DIGIT_7\n# undef BOOST_PP_SLOT_1_DIGIT_8\n# undef BOOST_PP_SLOT_1_DIGIT_9\n# undef BOOST_PP_SLOT_1_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_SLOT_1_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_SLOT_1_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_SLOT_1_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_SLOT_1_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_SLOT_1_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_SLOT_1_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_SLOT_1_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_SLOT_1_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_SLOT_1_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_SLOT_1_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_SLOT_1_DIGIT_10\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_10(BOOST_PP_SLOT_1_DIGIT_10, BOOST_PP_SLOT_1_DIGIT_9, BOOST_PP_SLOT_1_DIGIT_8, BOOST_PP_SLOT_1_DIGIT_7, BOOST_PP_SLOT_1_DIGIT_6, BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_9\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_9(BOOST_PP_SLOT_1_DIGIT_9, BOOST_PP_SLOT_1_DIGIT_8, BOOST_PP_SLOT_1_DIGIT_7, BOOST_PP_SLOT_1_DIGIT_6, BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_8\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_8(BOOST_PP_SLOT_1_DIGIT_8, BOOST_PP_SLOT_1_DIGIT_7, BOOST_PP_SLOT_1_DIGIT_6, BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_7\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_7(BOOST_PP_SLOT_1_DIGIT_7, BOOST_PP_SLOT_1_DIGIT_6, BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_6\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_6(BOOST_PP_SLOT_1_DIGIT_6, BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_5\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_5(BOOST_PP_SLOT_1_DIGIT_5, BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_4\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_4(BOOST_PP_SLOT_1_DIGIT_4, BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_3\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_3(BOOST_PP_SLOT_1_DIGIT_3, BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# elif BOOST_PP_SLOT_1_DIGIT_2\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_CC_2(BOOST_PP_SLOT_1_DIGIT_2, BOOST_PP_SLOT_1_DIGIT_1)\n# else\n#    define BOOST_PP_SLOT_1() BOOST_PP_SLOT_1_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/slot2.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_SLOT_2\n#\n# undef BOOST_PP_SLOT_2_DIGIT_1\n# undef BOOST_PP_SLOT_2_DIGIT_2\n# undef BOOST_PP_SLOT_2_DIGIT_3\n# undef BOOST_PP_SLOT_2_DIGIT_4\n# undef BOOST_PP_SLOT_2_DIGIT_5\n# undef BOOST_PP_SLOT_2_DIGIT_6\n# undef BOOST_PP_SLOT_2_DIGIT_7\n# undef BOOST_PP_SLOT_2_DIGIT_8\n# undef BOOST_PP_SLOT_2_DIGIT_9\n# undef BOOST_PP_SLOT_2_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_SLOT_2_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_SLOT_2_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_SLOT_2_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_SLOT_2_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_SLOT_2_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_SLOT_2_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_SLOT_2_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_SLOT_2_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_SLOT_2_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_SLOT_2_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_SLOT_2_DIGIT_10\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_10(BOOST_PP_SLOT_2_DIGIT_10, BOOST_PP_SLOT_2_DIGIT_9, BOOST_PP_SLOT_2_DIGIT_8, BOOST_PP_SLOT_2_DIGIT_7, BOOST_PP_SLOT_2_DIGIT_6, BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_9\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_9(BOOST_PP_SLOT_2_DIGIT_9, BOOST_PP_SLOT_2_DIGIT_8, BOOST_PP_SLOT_2_DIGIT_7, BOOST_PP_SLOT_2_DIGIT_6, BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_8\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_8(BOOST_PP_SLOT_2_DIGIT_8, BOOST_PP_SLOT_2_DIGIT_7, BOOST_PP_SLOT_2_DIGIT_6, BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_7\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_7(BOOST_PP_SLOT_2_DIGIT_7, BOOST_PP_SLOT_2_DIGIT_6, BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_6\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_6(BOOST_PP_SLOT_2_DIGIT_6, BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_5\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_5(BOOST_PP_SLOT_2_DIGIT_5, BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_4\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_4(BOOST_PP_SLOT_2_DIGIT_4, BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_3\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_3(BOOST_PP_SLOT_2_DIGIT_3, BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# elif BOOST_PP_SLOT_2_DIGIT_2\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_CC_2(BOOST_PP_SLOT_2_DIGIT_2, BOOST_PP_SLOT_2_DIGIT_1)\n# else\n#    define BOOST_PP_SLOT_2() BOOST_PP_SLOT_2_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/slot3.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_SLOT_3\n#\n# undef BOOST_PP_SLOT_3_DIGIT_1\n# undef BOOST_PP_SLOT_3_DIGIT_2\n# undef BOOST_PP_SLOT_3_DIGIT_3\n# undef BOOST_PP_SLOT_3_DIGIT_4\n# undef BOOST_PP_SLOT_3_DIGIT_5\n# undef BOOST_PP_SLOT_3_DIGIT_6\n# undef BOOST_PP_SLOT_3_DIGIT_7\n# undef BOOST_PP_SLOT_3_DIGIT_8\n# undef BOOST_PP_SLOT_3_DIGIT_9\n# undef BOOST_PP_SLOT_3_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_SLOT_3_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_SLOT_3_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_SLOT_3_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_SLOT_3_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_SLOT_3_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_SLOT_3_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_SLOT_3_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_SLOT_3_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_SLOT_3_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_SLOT_3_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_SLOT_3_DIGIT_10\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_10(BOOST_PP_SLOT_3_DIGIT_10, BOOST_PP_SLOT_3_DIGIT_9, BOOST_PP_SLOT_3_DIGIT_8, BOOST_PP_SLOT_3_DIGIT_7, BOOST_PP_SLOT_3_DIGIT_6, BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_9\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_9(BOOST_PP_SLOT_3_DIGIT_9, BOOST_PP_SLOT_3_DIGIT_8, BOOST_PP_SLOT_3_DIGIT_7, BOOST_PP_SLOT_3_DIGIT_6, BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_8\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_8(BOOST_PP_SLOT_3_DIGIT_8, BOOST_PP_SLOT_3_DIGIT_7, BOOST_PP_SLOT_3_DIGIT_6, BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_7\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_7(BOOST_PP_SLOT_3_DIGIT_7, BOOST_PP_SLOT_3_DIGIT_6, BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_6\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_6(BOOST_PP_SLOT_3_DIGIT_6, BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_5\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_5(BOOST_PP_SLOT_3_DIGIT_5, BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_4\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_4(BOOST_PP_SLOT_3_DIGIT_4, BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_3\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_3(BOOST_PP_SLOT_3_DIGIT_3, BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# elif BOOST_PP_SLOT_3_DIGIT_2\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_CC_2(BOOST_PP_SLOT_3_DIGIT_2, BOOST_PP_SLOT_3_DIGIT_1)\n# else\n#    define BOOST_PP_SLOT_3() BOOST_PP_SLOT_3_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/slot4.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_SLOT_4\n#\n# undef BOOST_PP_SLOT_4_DIGIT_1\n# undef BOOST_PP_SLOT_4_DIGIT_2\n# undef BOOST_PP_SLOT_4_DIGIT_3\n# undef BOOST_PP_SLOT_4_DIGIT_4\n# undef BOOST_PP_SLOT_4_DIGIT_5\n# undef BOOST_PP_SLOT_4_DIGIT_6\n# undef BOOST_PP_SLOT_4_DIGIT_7\n# undef BOOST_PP_SLOT_4_DIGIT_8\n# undef BOOST_PP_SLOT_4_DIGIT_9\n# undef BOOST_PP_SLOT_4_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_SLOT_4_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_SLOT_4_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_SLOT_4_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_SLOT_4_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_SLOT_4_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_SLOT_4_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_SLOT_4_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_SLOT_4_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_SLOT_4_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_SLOT_4_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_SLOT_4_DIGIT_10\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_10(BOOST_PP_SLOT_4_DIGIT_10, BOOST_PP_SLOT_4_DIGIT_9, BOOST_PP_SLOT_4_DIGIT_8, BOOST_PP_SLOT_4_DIGIT_7, BOOST_PP_SLOT_4_DIGIT_6, BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_9\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_9(BOOST_PP_SLOT_4_DIGIT_9, BOOST_PP_SLOT_4_DIGIT_8, BOOST_PP_SLOT_4_DIGIT_7, BOOST_PP_SLOT_4_DIGIT_6, BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_8\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_8(BOOST_PP_SLOT_4_DIGIT_8, BOOST_PP_SLOT_4_DIGIT_7, BOOST_PP_SLOT_4_DIGIT_6, BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_7\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_7(BOOST_PP_SLOT_4_DIGIT_7, BOOST_PP_SLOT_4_DIGIT_6, BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_6\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_6(BOOST_PP_SLOT_4_DIGIT_6, BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_5\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_5(BOOST_PP_SLOT_4_DIGIT_5, BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_4\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_4(BOOST_PP_SLOT_4_DIGIT_4, BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_3\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_3(BOOST_PP_SLOT_4_DIGIT_3, BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# elif BOOST_PP_SLOT_4_DIGIT_2\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_CC_2(BOOST_PP_SLOT_4_DIGIT_2, BOOST_PP_SLOT_4_DIGIT_1)\n# else\n#    define BOOST_PP_SLOT_4() BOOST_PP_SLOT_4_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/detail/slot5.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# include <boost/preprocessor/slot/detail/shared.hpp>\n#\n# undef BOOST_PP_SLOT_5\n#\n# undef BOOST_PP_SLOT_5_DIGIT_1\n# undef BOOST_PP_SLOT_5_DIGIT_2\n# undef BOOST_PP_SLOT_5_DIGIT_3\n# undef BOOST_PP_SLOT_5_DIGIT_4\n# undef BOOST_PP_SLOT_5_DIGIT_5\n# undef BOOST_PP_SLOT_5_DIGIT_6\n# undef BOOST_PP_SLOT_5_DIGIT_7\n# undef BOOST_PP_SLOT_5_DIGIT_8\n# undef BOOST_PP_SLOT_5_DIGIT_9\n# undef BOOST_PP_SLOT_5_DIGIT_10\n#\n# if BOOST_PP_SLOT_TEMP_10 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_10 0\n# elif BOOST_PP_SLOT_TEMP_10 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_10 1\n# elif BOOST_PP_SLOT_TEMP_10 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_10 2\n# elif BOOST_PP_SLOT_TEMP_10 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_10 3\n# elif BOOST_PP_SLOT_TEMP_10 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_10 4\n# elif BOOST_PP_SLOT_TEMP_10 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_10 5\n# elif BOOST_PP_SLOT_TEMP_10 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_10 6\n# elif BOOST_PP_SLOT_TEMP_10 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_10 7\n# elif BOOST_PP_SLOT_TEMP_10 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_10 8\n# elif BOOST_PP_SLOT_TEMP_10 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_10 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_9 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_9 0\n# elif BOOST_PP_SLOT_TEMP_9 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_9 1\n# elif BOOST_PP_SLOT_TEMP_9 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_9 2\n# elif BOOST_PP_SLOT_TEMP_9 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_9 3\n# elif BOOST_PP_SLOT_TEMP_9 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_9 4\n# elif BOOST_PP_SLOT_TEMP_9 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_9 5\n# elif BOOST_PP_SLOT_TEMP_9 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_9 6\n# elif BOOST_PP_SLOT_TEMP_9 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_9 7\n# elif BOOST_PP_SLOT_TEMP_9 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_9 8\n# elif BOOST_PP_SLOT_TEMP_9 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_9 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_8 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_8 0\n# elif BOOST_PP_SLOT_TEMP_8 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_8 1\n# elif BOOST_PP_SLOT_TEMP_8 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_8 2\n# elif BOOST_PP_SLOT_TEMP_8 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_8 3\n# elif BOOST_PP_SLOT_TEMP_8 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_8 4\n# elif BOOST_PP_SLOT_TEMP_8 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_8 5\n# elif BOOST_PP_SLOT_TEMP_8 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_8 6\n# elif BOOST_PP_SLOT_TEMP_8 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_8 7\n# elif BOOST_PP_SLOT_TEMP_8 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_8 8\n# elif BOOST_PP_SLOT_TEMP_8 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_8 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_7 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_7 0\n# elif BOOST_PP_SLOT_TEMP_7 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_7 1\n# elif BOOST_PP_SLOT_TEMP_7 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_7 2\n# elif BOOST_PP_SLOT_TEMP_7 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_7 3\n# elif BOOST_PP_SLOT_TEMP_7 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_7 4\n# elif BOOST_PP_SLOT_TEMP_7 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_7 5\n# elif BOOST_PP_SLOT_TEMP_7 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_7 6\n# elif BOOST_PP_SLOT_TEMP_7 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_7 7\n# elif BOOST_PP_SLOT_TEMP_7 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_7 8\n# elif BOOST_PP_SLOT_TEMP_7 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_7 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_6 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_6 0\n# elif BOOST_PP_SLOT_TEMP_6 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_6 1\n# elif BOOST_PP_SLOT_TEMP_6 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_6 2\n# elif BOOST_PP_SLOT_TEMP_6 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_6 3\n# elif BOOST_PP_SLOT_TEMP_6 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_6 4\n# elif BOOST_PP_SLOT_TEMP_6 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_6 5\n# elif BOOST_PP_SLOT_TEMP_6 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_6 6\n# elif BOOST_PP_SLOT_TEMP_6 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_6 7\n# elif BOOST_PP_SLOT_TEMP_6 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_6 8\n# elif BOOST_PP_SLOT_TEMP_6 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_6 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_5 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_5 0\n# elif BOOST_PP_SLOT_TEMP_5 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_5 1\n# elif BOOST_PP_SLOT_TEMP_5 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_5 2\n# elif BOOST_PP_SLOT_TEMP_5 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_5 3\n# elif BOOST_PP_SLOT_TEMP_5 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_5 4\n# elif BOOST_PP_SLOT_TEMP_5 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_5 5\n# elif BOOST_PP_SLOT_TEMP_5 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_5 6\n# elif BOOST_PP_SLOT_TEMP_5 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_5 7\n# elif BOOST_PP_SLOT_TEMP_5 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_5 8\n# elif BOOST_PP_SLOT_TEMP_5 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_5 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_4 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_4 0\n# elif BOOST_PP_SLOT_TEMP_4 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_4 1\n# elif BOOST_PP_SLOT_TEMP_4 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_4 2\n# elif BOOST_PP_SLOT_TEMP_4 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_4 3\n# elif BOOST_PP_SLOT_TEMP_4 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_4 4\n# elif BOOST_PP_SLOT_TEMP_4 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_4 5\n# elif BOOST_PP_SLOT_TEMP_4 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_4 6\n# elif BOOST_PP_SLOT_TEMP_4 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_4 7\n# elif BOOST_PP_SLOT_TEMP_4 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_4 8\n# elif BOOST_PP_SLOT_TEMP_4 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_4 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_3 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_3 0\n# elif BOOST_PP_SLOT_TEMP_3 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_3 1\n# elif BOOST_PP_SLOT_TEMP_3 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_3 2\n# elif BOOST_PP_SLOT_TEMP_3 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_3 3\n# elif BOOST_PP_SLOT_TEMP_3 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_3 4\n# elif BOOST_PP_SLOT_TEMP_3 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_3 5\n# elif BOOST_PP_SLOT_TEMP_3 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_3 6\n# elif BOOST_PP_SLOT_TEMP_3 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_3 7\n# elif BOOST_PP_SLOT_TEMP_3 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_3 8\n# elif BOOST_PP_SLOT_TEMP_3 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_3 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_2 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_2 0\n# elif BOOST_PP_SLOT_TEMP_2 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_2 1\n# elif BOOST_PP_SLOT_TEMP_2 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_2 2\n# elif BOOST_PP_SLOT_TEMP_2 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_2 3\n# elif BOOST_PP_SLOT_TEMP_2 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_2 4\n# elif BOOST_PP_SLOT_TEMP_2 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_2 5\n# elif BOOST_PP_SLOT_TEMP_2 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_2 6\n# elif BOOST_PP_SLOT_TEMP_2 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_2 7\n# elif BOOST_PP_SLOT_TEMP_2 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_2 8\n# elif BOOST_PP_SLOT_TEMP_2 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_2 9\n# endif\n#\n# if BOOST_PP_SLOT_TEMP_1 == 0\n#    define BOOST_PP_SLOT_5_DIGIT_1 0\n# elif BOOST_PP_SLOT_TEMP_1 == 1\n#    define BOOST_PP_SLOT_5_DIGIT_1 1\n# elif BOOST_PP_SLOT_TEMP_1 == 2\n#    define BOOST_PP_SLOT_5_DIGIT_1 2\n# elif BOOST_PP_SLOT_TEMP_1 == 3\n#    define BOOST_PP_SLOT_5_DIGIT_1 3\n# elif BOOST_PP_SLOT_TEMP_1 == 4\n#    define BOOST_PP_SLOT_5_DIGIT_1 4\n# elif BOOST_PP_SLOT_TEMP_1 == 5\n#    define BOOST_PP_SLOT_5_DIGIT_1 5\n# elif BOOST_PP_SLOT_TEMP_1 == 6\n#    define BOOST_PP_SLOT_5_DIGIT_1 6\n# elif BOOST_PP_SLOT_TEMP_1 == 7\n#    define BOOST_PP_SLOT_5_DIGIT_1 7\n# elif BOOST_PP_SLOT_TEMP_1 == 8\n#    define BOOST_PP_SLOT_5_DIGIT_1 8\n# elif BOOST_PP_SLOT_TEMP_1 == 9\n#    define BOOST_PP_SLOT_5_DIGIT_1 9\n# endif\n#\n# if BOOST_PP_SLOT_5_DIGIT_10\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_10(BOOST_PP_SLOT_5_DIGIT_10, BOOST_PP_SLOT_5_DIGIT_9, BOOST_PP_SLOT_5_DIGIT_8, BOOST_PP_SLOT_5_DIGIT_7, BOOST_PP_SLOT_5_DIGIT_6, BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_9\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_9(BOOST_PP_SLOT_5_DIGIT_9, BOOST_PP_SLOT_5_DIGIT_8, BOOST_PP_SLOT_5_DIGIT_7, BOOST_PP_SLOT_5_DIGIT_6, BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_8\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_8(BOOST_PP_SLOT_5_DIGIT_8, BOOST_PP_SLOT_5_DIGIT_7, BOOST_PP_SLOT_5_DIGIT_6, BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_7\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_7(BOOST_PP_SLOT_5_DIGIT_7, BOOST_PP_SLOT_5_DIGIT_6, BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_6\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_6(BOOST_PP_SLOT_5_DIGIT_6, BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_5\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_5(BOOST_PP_SLOT_5_DIGIT_5, BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_4\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_4(BOOST_PP_SLOT_5_DIGIT_4, BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_3\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_3(BOOST_PP_SLOT_5_DIGIT_3, BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# elif BOOST_PP_SLOT_5_DIGIT_2\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_CC_2(BOOST_PP_SLOT_5_DIGIT_2, BOOST_PP_SLOT_5_DIGIT_1)\n# else\n#    define BOOST_PP_SLOT_5() BOOST_PP_SLOT_5_DIGIT_1\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/slot/slot.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002.\n#  *     Distributed under the Boost Software License, Version 1.0. (See\n#  *     accompanying file LICENSE_1_0.txt or copy at\n#  *     http://www.boost.org/LICENSE_1_0.txt)\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_SLOT_SLOT_HPP\n# define BOOST_PREPROCESSOR_SLOT_SLOT_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/slot/detail/def.hpp>\n#\n# /* BOOST_PP_ASSIGN_SLOT */\n#\n# define BOOST_PP_ASSIGN_SLOT(i) BOOST_PP_CAT(BOOST_PP_ASSIGN_SLOT_, i)\n#\n# define BOOST_PP_ASSIGN_SLOT_1 <boost/preprocessor/slot/detail/slot1.hpp>\n# define BOOST_PP_ASSIGN_SLOT_2 <boost/preprocessor/slot/detail/slot2.hpp>\n# define BOOST_PP_ASSIGN_SLOT_3 <boost/preprocessor/slot/detail/slot3.hpp>\n# define BOOST_PP_ASSIGN_SLOT_4 <boost/preprocessor/slot/detail/slot4.hpp>\n# define BOOST_PP_ASSIGN_SLOT_5 <boost/preprocessor/slot/detail/slot5.hpp>\n#\n# /* BOOST_PP_SLOT */\n#\n# define BOOST_PP_SLOT(i) BOOST_PP_CAT(BOOST_PP_SLOT_, i)()\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/stringize.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_STRINGIZE_HPP\n# define BOOST_PREPROCESSOR_STRINGIZE_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_STRINGIZE */\n#\n# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#    define BOOST_PP_STRINGIZE(text) BOOST_PP_STRINGIZE_A((text))\n#    define BOOST_PP_STRINGIZE_A(arg) BOOST_PP_STRINGIZE_I arg\n# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#    define BOOST_PP_STRINGIZE(text) BOOST_PP_STRINGIZE_OO((text))\n#    define BOOST_PP_STRINGIZE_OO(par) BOOST_PP_STRINGIZE_I ## par\n# else\n#    define BOOST_PP_STRINGIZE(text) BOOST_PP_STRINGIZE_I(text)\n# endif\n#\n# define BOOST_PP_STRINGIZE_I(text) #text\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/detail/is_single_return.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2014.                                    *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_DETAIL_IS_SINGLE_RETURN_HPP\n# define BOOST_PREPROCESSOR_TUPLE_DETAIL_IS_SINGLE_RETURN_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_TUPLE_IS_SINGLE_RETURN */\n#\n# if BOOST_PP_VARIADICS && BOOST_PP_VARIADICS_MSVC\n# include <boost/preprocessor/control/iif.hpp>\n# include <boost/preprocessor/facilities/is_1.hpp>\n# include <boost/preprocessor/tuple/size.hpp>\n# define BOOST_PP_TUPLE_IS_SINGLE_RETURN(sr,nsr,tuple)\t\\\n\tBOOST_PP_IIF(BOOST_PP_IS_1(BOOST_PP_TUPLE_SIZE(tuple)),sr,nsr) \\\n\t/**/\n# endif /* BOOST_PP_VARIADICS && BOOST_PP_VARIADICS_MSVC */\n#\n# endif /* BOOST_PREPROCESSOR_TUPLE_DETAIL_IS_SINGLE_RETURN_HPP */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/eat.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002-2011) */\n# /* Revised by Edward Diener (2011,2015) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_EAT_HPP\n# define BOOST_PREPROCESSOR_TUPLE_EAT_HPP\n#\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_EAT */\n#\n# if BOOST_PP_VARIADICS\n#    define BOOST_PP_EAT(...)\n# else\n#    define BOOST_PP_EAT(x)\n# endif\n#\n# /* BOOST_PP_TUPLE_EAT */\n#\n# if BOOST_PP_VARIADICS\n#    define BOOST_PP_TUPLE_EAT(size) BOOST_PP_EAT\n# else\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#        define BOOST_PP_TUPLE_EAT(size) BOOST_PP_TUPLE_EAT_I(size)\n#    else\n#        define BOOST_PP_TUPLE_EAT(size) BOOST_PP_TUPLE_EAT_OO((size))\n#        define BOOST_PP_TUPLE_EAT_OO(par) BOOST_PP_TUPLE_EAT_I ## par\n#    endif\n#    define BOOST_PP_TUPLE_EAT_I(size) BOOST_PP_TUPLE_EAT_ ## size\n# endif\n#\n# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#     define BOOST_PP_TUPLE_EAT_N(size) BOOST_PP_TUPLE_EAT_N_I(size)\n# else\n#     define BOOST_PP_TUPLE_EAT_N(size) BOOST_PP_TUPLE_EAT_N_OO((size))\n#     define BOOST_PP_TUPLE_EAT_N_OO(par) BOOST_PP_TUPLE_EAT_N_I ## par\n# endif\n# define BOOST_PP_TUPLE_EAT_N_I(size) BOOST_PP_TUPLE_EAT_ ## size\n#\n# define BOOST_PP_TUPLE_EAT_1(e0)\n# define BOOST_PP_TUPLE_EAT_2(e0, e1)\n# define BOOST_PP_TUPLE_EAT_3(e0, e1, e2)\n# define BOOST_PP_TUPLE_EAT_4(e0, e1, e2, e3)\n# define BOOST_PP_TUPLE_EAT_5(e0, e1, e2, e3, e4)\n# define BOOST_PP_TUPLE_EAT_6(e0, e1, e2, e3, e4, e5)\n# define BOOST_PP_TUPLE_EAT_7(e0, e1, e2, e3, e4, e5, e6)\n# define BOOST_PP_TUPLE_EAT_8(e0, e1, e2, e3, e4, e5, e6, e7)\n# define BOOST_PP_TUPLE_EAT_9(e0, e1, e2, e3, e4, e5, e6, e7, e8)\n# define BOOST_PP_TUPLE_EAT_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9)\n# define BOOST_PP_TUPLE_EAT_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10)\n# define BOOST_PP_TUPLE_EAT_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11)\n# define BOOST_PP_TUPLE_EAT_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12)\n# define BOOST_PP_TUPLE_EAT_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13)\n# define BOOST_PP_TUPLE_EAT_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14)\n# define BOOST_PP_TUPLE_EAT_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15)\n# define BOOST_PP_TUPLE_EAT_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16)\n# define BOOST_PP_TUPLE_EAT_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17)\n# define BOOST_PP_TUPLE_EAT_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18)\n# define BOOST_PP_TUPLE_EAT_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19)\n# define BOOST_PP_TUPLE_EAT_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20)\n# define BOOST_PP_TUPLE_EAT_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21)\n# define BOOST_PP_TUPLE_EAT_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22)\n# define BOOST_PP_TUPLE_EAT_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23)\n# define BOOST_PP_TUPLE_EAT_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24)\n# define BOOST_PP_TUPLE_EAT_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25)\n# define BOOST_PP_TUPLE_EAT_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26)\n# define BOOST_PP_TUPLE_EAT_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27)\n# define BOOST_PP_TUPLE_EAT_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28)\n# define BOOST_PP_TUPLE_EAT_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29)\n# define BOOST_PP_TUPLE_EAT_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30)\n# define BOOST_PP_TUPLE_EAT_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31)\n# define BOOST_PP_TUPLE_EAT_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32)\n# define BOOST_PP_TUPLE_EAT_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33)\n# define BOOST_PP_TUPLE_EAT_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34)\n# define BOOST_PP_TUPLE_EAT_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35)\n# define BOOST_PP_TUPLE_EAT_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36)\n# define BOOST_PP_TUPLE_EAT_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37)\n# define BOOST_PP_TUPLE_EAT_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38)\n# define BOOST_PP_TUPLE_EAT_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39)\n# define BOOST_PP_TUPLE_EAT_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40)\n# define BOOST_PP_TUPLE_EAT_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41)\n# define BOOST_PP_TUPLE_EAT_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42)\n# define BOOST_PP_TUPLE_EAT_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43)\n# define BOOST_PP_TUPLE_EAT_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44)\n# define BOOST_PP_TUPLE_EAT_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45)\n# define BOOST_PP_TUPLE_EAT_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46)\n# define BOOST_PP_TUPLE_EAT_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47)\n# define BOOST_PP_TUPLE_EAT_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48)\n# define BOOST_PP_TUPLE_EAT_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49)\n# define BOOST_PP_TUPLE_EAT_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50)\n# define BOOST_PP_TUPLE_EAT_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51)\n# define BOOST_PP_TUPLE_EAT_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52)\n# define BOOST_PP_TUPLE_EAT_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53)\n# define BOOST_PP_TUPLE_EAT_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54)\n# define BOOST_PP_TUPLE_EAT_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55)\n# define BOOST_PP_TUPLE_EAT_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56)\n# define BOOST_PP_TUPLE_EAT_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57)\n# define BOOST_PP_TUPLE_EAT_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58)\n# define BOOST_PP_TUPLE_EAT_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59)\n# define BOOST_PP_TUPLE_EAT_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60)\n# define BOOST_PP_TUPLE_EAT_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61)\n# define BOOST_PP_TUPLE_EAT_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62)\n# define BOOST_PP_TUPLE_EAT_64(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63)\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/elem.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002-2011) */\n# /* Revised by Edward Diener (2011,2014) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_ELEM_HPP\n# define BOOST_PREPROCESSOR_TUPLE_ELEM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/facilities/expand.hpp>\n# include <boost/preprocessor/facilities/overload.hpp>\n# include <boost/preprocessor/tuple/rem.hpp>\n# include <boost/preprocessor/variadic/elem.hpp>\n# include <boost/preprocessor/tuple/detail/is_single_return.hpp>\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_TUPLE_ELEM(...) BOOST_PP_TUPLE_ELEM_I(BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_ELEM_O_, __VA_ARGS__), (__VA_ARGS__))\n#        define BOOST_PP_TUPLE_ELEM_I(m, args) BOOST_PP_TUPLE_ELEM_II(m, args)\n#        define BOOST_PP_TUPLE_ELEM_II(m, args) BOOST_PP_CAT(m ## args,)\n/*\n  Use BOOST_PP_REM_CAT if it is a single element tuple ( which might be empty )\n  else use BOOST_PP_REM. This fixes a VC++ problem with an empty tuple and BOOST_PP_TUPLE_ELEM\n  functionality. See tuple_elem_bug_test.cxx.\n*/\n#    \t define BOOST_PP_TUPLE_ELEM_O_2(n, tuple) \\\n\t\t\tBOOST_PP_VARIADIC_ELEM(n, BOOST_PP_EXPAND(BOOST_PP_TUPLE_IS_SINGLE_RETURN(BOOST_PP_REM_CAT,BOOST_PP_REM,tuple) tuple)) \\\n\t\t\t/**/\n#    else\n#        define BOOST_PP_TUPLE_ELEM(...) BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_ELEM_O_, __VA_ARGS__)(__VA_ARGS__)\n#    \t define BOOST_PP_TUPLE_ELEM_O_2(n, tuple) BOOST_PP_VARIADIC_ELEM(n, BOOST_PP_REM tuple)\n#    endif\n#    define BOOST_PP_TUPLE_ELEM_O_3(size, n, tuple) BOOST_PP_TUPLE_ELEM_O_2(n, tuple)\n# else\n#    if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#        define BOOST_PP_TUPLE_ELEM(size, n, tuple) BOOST_PP_TUPLE_ELEM_I(BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM_, n), BOOST_PP_CAT(BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM_E_, size), tuple))\n#        define BOOST_PP_TUPLE_ELEM_I(m, args) BOOST_PP_TUPLE_ELEM_II(m, args)\n#        define BOOST_PP_TUPLE_ELEM_II(m, args) BOOST_PP_CAT(m ## args,)\n#    elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#        define BOOST_PP_TUPLE_ELEM(size, n, tuple) BOOST_PP_TUPLE_ELEM_I_OO((size, n, tuple))\n#        define BOOST_PP_TUPLE_ELEM_I_OO(par) BOOST_PP_TUPLE_ELEM_I ## par\n#        define BOOST_PP_TUPLE_ELEM_I(size, n, tuple) BOOST_PP_TUPLE_ELEM_II((n, BOOST_PP_TUPLE_ELEM_E_ ## size ## tuple))\n#        define BOOST_PP_TUPLE_ELEM_II(par) BOOST_PP_TUPLE_ELEM_III_OO(par)\n#        define BOOST_PP_TUPLE_ELEM_III_OO(par) BOOST_PP_TUPLE_ELEM_III ## par\n#        define BOOST_PP_TUPLE_ELEM_III(n, etuple) BOOST_PP_TUPLE_ELEM_ ## n ## etuple\n#    else\n#        define BOOST_PP_TUPLE_ELEM(size, n, tuple) BOOST_PP_TUPLE_ELEM_I(BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM_, n) BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM_E_, size) tuple)\n#        define BOOST_PP_TUPLE_ELEM_I(x) x\n#    endif\n#    define BOOST_PP_TUPLE_ELEM_E_1(e0) (e0, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_2(e0, e1) (e0, e1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_3(e0, e1, e2) (e0, e1, e2, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_4(e0, e1, e2, e3) (e0, e1, e2, e3, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_5(e0, e1, e2, e3, e4) (e0, e1, e2, e3, e4, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_6(e0, e1, e2, e3, e4, e5) (e0, e1, e2, e3, e4, e5, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_7(e0, e1, e2, e3, e4, e5, e6) (e0, e1, e2, e3, e4, e5, e6, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_8(e0, e1, e2, e3, e4, e5, e6, e7) (e0, e1, e2, e3, e4, e5, e6, e7, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_9(e0, e1, e2, e3, e4, e5, e6, e7, e8) (e0, e1, e2, e3, e4, e5, e6, e7, e8, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, ?, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, ?, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, ?, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, ?, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, ?, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, ?, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, ?, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62) (e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, ?)\n#    define BOOST_PP_TUPLE_ELEM_E_64\n#    define BOOST_PP_TUPLE_ELEM_0(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e0\n#    define BOOST_PP_TUPLE_ELEM_1(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e1\n#    define BOOST_PP_TUPLE_ELEM_2(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e2\n#    define BOOST_PP_TUPLE_ELEM_3(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e3\n#    define BOOST_PP_TUPLE_ELEM_4(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e4\n#    define BOOST_PP_TUPLE_ELEM_5(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e5\n#    define BOOST_PP_TUPLE_ELEM_6(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e6\n#    define BOOST_PP_TUPLE_ELEM_7(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e7\n#    define BOOST_PP_TUPLE_ELEM_8(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e8\n#    define BOOST_PP_TUPLE_ELEM_9(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e9\n#    define BOOST_PP_TUPLE_ELEM_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e10\n#    define BOOST_PP_TUPLE_ELEM_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e11\n#    define BOOST_PP_TUPLE_ELEM_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e12\n#    define BOOST_PP_TUPLE_ELEM_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e13\n#    define BOOST_PP_TUPLE_ELEM_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e14\n#    define BOOST_PP_TUPLE_ELEM_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e15\n#    define BOOST_PP_TUPLE_ELEM_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e16\n#    define BOOST_PP_TUPLE_ELEM_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e17\n#    define BOOST_PP_TUPLE_ELEM_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e18\n#    define BOOST_PP_TUPLE_ELEM_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e19\n#    define BOOST_PP_TUPLE_ELEM_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e20\n#    define BOOST_PP_TUPLE_ELEM_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e21\n#    define BOOST_PP_TUPLE_ELEM_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e22\n#    define BOOST_PP_TUPLE_ELEM_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e23\n#    define BOOST_PP_TUPLE_ELEM_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e24\n#    define BOOST_PP_TUPLE_ELEM_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e25\n#    define BOOST_PP_TUPLE_ELEM_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e26\n#    define BOOST_PP_TUPLE_ELEM_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e27\n#    define BOOST_PP_TUPLE_ELEM_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e28\n#    define BOOST_PP_TUPLE_ELEM_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e29\n#    define BOOST_PP_TUPLE_ELEM_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e30\n#    define BOOST_PP_TUPLE_ELEM_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e31\n#    define BOOST_PP_TUPLE_ELEM_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e32\n#    define BOOST_PP_TUPLE_ELEM_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e33\n#    define BOOST_PP_TUPLE_ELEM_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e34\n#    define BOOST_PP_TUPLE_ELEM_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e35\n#    define BOOST_PP_TUPLE_ELEM_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e36\n#    define BOOST_PP_TUPLE_ELEM_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e37\n#    define BOOST_PP_TUPLE_ELEM_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e38\n#    define BOOST_PP_TUPLE_ELEM_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e39\n#    define BOOST_PP_TUPLE_ELEM_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e40\n#    define BOOST_PP_TUPLE_ELEM_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e41\n#    define BOOST_PP_TUPLE_ELEM_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e42\n#    define BOOST_PP_TUPLE_ELEM_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e43\n#    define BOOST_PP_TUPLE_ELEM_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e44\n#    define BOOST_PP_TUPLE_ELEM_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e45\n#    define BOOST_PP_TUPLE_ELEM_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e46\n#    define BOOST_PP_TUPLE_ELEM_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e47\n#    define BOOST_PP_TUPLE_ELEM_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e48\n#    define BOOST_PP_TUPLE_ELEM_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e49\n#    define BOOST_PP_TUPLE_ELEM_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e50\n#    define BOOST_PP_TUPLE_ELEM_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e51\n#    define BOOST_PP_TUPLE_ELEM_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e52\n#    define BOOST_PP_TUPLE_ELEM_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e53\n#    define BOOST_PP_TUPLE_ELEM_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e54\n#    define BOOST_PP_TUPLE_ELEM_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e55\n#    define BOOST_PP_TUPLE_ELEM_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e56\n#    define BOOST_PP_TUPLE_ELEM_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e57\n#    define BOOST_PP_TUPLE_ELEM_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e58\n#    define BOOST_PP_TUPLE_ELEM_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e59\n#    define BOOST_PP_TUPLE_ELEM_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e60\n#    define BOOST_PP_TUPLE_ELEM_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e61\n#    define BOOST_PP_TUPLE_ELEM_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e62\n#    define BOOST_PP_TUPLE_ELEM_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e63\n# endif\n#\n# /* directly used elsewhere in Boost... */\n#\n# define BOOST_PP_TUPLE_ELEM_1_0(a) a\n#\n# define BOOST_PP_TUPLE_ELEM_2_0(a, b) a\n# define BOOST_PP_TUPLE_ELEM_2_1(a, b) b\n#\n# define BOOST_PP_TUPLE_ELEM_3_0(a, b, c) a\n# define BOOST_PP_TUPLE_ELEM_3_1(a, b, c) b\n# define BOOST_PP_TUPLE_ELEM_3_2(a, b, c) c\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/rem.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Paul Mensonides 2002-2011.                             *\n#  *     (C) Copyright Edward Diener 2011,2013.                               *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_REM_HPP\n# define BOOST_PREPROCESSOR_TUPLE_REM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/facilities/expand.hpp>\n# include <boost/preprocessor/facilities/overload.hpp>\n# include <boost/preprocessor/tuple/detail/is_single_return.hpp>\n#\n# /* BOOST_PP_REM */\n#\n# if BOOST_PP_VARIADICS\n# \t if BOOST_PP_VARIADICS_MSVC\n\t\t/* To be used internally when __VA_ARGS__ could be empty ( or is a single element ) */\n#    \tdefine BOOST_PP_REM_CAT(...) BOOST_PP_CAT(__VA_ARGS__,)\n# \t endif\n#    define BOOST_PP_REM(...) __VA_ARGS__\n# else\n#    define BOOST_PP_REM(x) x\n# endif\n#\n# /* BOOST_PP_TUPLE_REM */\n#\n/*\n  VC++8.0 cannot handle the variadic version of BOOST_PP_TUPLE_REM(size)\n*/\n# if BOOST_PP_VARIADICS && !(BOOST_PP_VARIADICS_MSVC && _MSC_VER <= 1400)\n# \t if BOOST_PP_VARIADICS_MSVC\n\t\t/* To be used internally when the size could be 0 ( or 1 ) */\n#    \tdefine BOOST_PP_TUPLE_REM_CAT(size) BOOST_PP_REM_CAT\n# \t endif\n#    define BOOST_PP_TUPLE_REM(size) BOOST_PP_REM\n# else\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#        define BOOST_PP_TUPLE_REM(size) BOOST_PP_TUPLE_REM_I(size)\n#    else\n#        define BOOST_PP_TUPLE_REM(size) BOOST_PP_TUPLE_REM_OO((size))\n#        define BOOST_PP_TUPLE_REM_OO(par) BOOST_PP_TUPLE_REM_I ## par\n#    endif\n#    define BOOST_PP_TUPLE_REM_I(size) BOOST_PP_TUPLE_REM_ ## size\n# endif\n# define BOOST_PP_TUPLE_REM_0()\n# define BOOST_PP_TUPLE_REM_1(e0) e0\n# define BOOST_PP_TUPLE_REM_2(e0, e1) e0, e1\n# define BOOST_PP_TUPLE_REM_3(e0, e1, e2) e0, e1, e2\n# define BOOST_PP_TUPLE_REM_4(e0, e1, e2, e3) e0, e1, e2, e3\n# define BOOST_PP_TUPLE_REM_5(e0, e1, e2, e3, e4) e0, e1, e2, e3, e4\n# define BOOST_PP_TUPLE_REM_6(e0, e1, e2, e3, e4, e5) e0, e1, e2, e3, e4, e5\n# define BOOST_PP_TUPLE_REM_7(e0, e1, e2, e3, e4, e5, e6) e0, e1, e2, e3, e4, e5, e6\n# define BOOST_PP_TUPLE_REM_8(e0, e1, e2, e3, e4, e5, e6, e7) e0, e1, e2, e3, e4, e5, e6, e7\n# define BOOST_PP_TUPLE_REM_9(e0, e1, e2, e3, e4, e5, e6, e7, e8) e0, e1, e2, e3, e4, e5, e6, e7, e8\n# define BOOST_PP_TUPLE_REM_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9\n# define BOOST_PP_TUPLE_REM_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10\n# define BOOST_PP_TUPLE_REM_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11\n# define BOOST_PP_TUPLE_REM_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12\n# define BOOST_PP_TUPLE_REM_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13\n# define BOOST_PP_TUPLE_REM_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14\n# define BOOST_PP_TUPLE_REM_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15\n# define BOOST_PP_TUPLE_REM_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16\n# define BOOST_PP_TUPLE_REM_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17\n# define BOOST_PP_TUPLE_REM_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18\n# define BOOST_PP_TUPLE_REM_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19\n# define BOOST_PP_TUPLE_REM_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20\n# define BOOST_PP_TUPLE_REM_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21\n# define BOOST_PP_TUPLE_REM_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22\n# define BOOST_PP_TUPLE_REM_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23\n# define BOOST_PP_TUPLE_REM_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24\n# define BOOST_PP_TUPLE_REM_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25\n# define BOOST_PP_TUPLE_REM_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26\n# define BOOST_PP_TUPLE_REM_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27\n# define BOOST_PP_TUPLE_REM_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28\n# define BOOST_PP_TUPLE_REM_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29\n# define BOOST_PP_TUPLE_REM_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30\n# define BOOST_PP_TUPLE_REM_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31\n# define BOOST_PP_TUPLE_REM_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32\n# define BOOST_PP_TUPLE_REM_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33\n# define BOOST_PP_TUPLE_REM_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34\n# define BOOST_PP_TUPLE_REM_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35\n# define BOOST_PP_TUPLE_REM_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36\n# define BOOST_PP_TUPLE_REM_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37\n# define BOOST_PP_TUPLE_REM_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38\n# define BOOST_PP_TUPLE_REM_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39\n# define BOOST_PP_TUPLE_REM_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40\n# define BOOST_PP_TUPLE_REM_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41\n# define BOOST_PP_TUPLE_REM_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42\n# define BOOST_PP_TUPLE_REM_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43\n# define BOOST_PP_TUPLE_REM_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44\n# define BOOST_PP_TUPLE_REM_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45\n# define BOOST_PP_TUPLE_REM_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46\n# define BOOST_PP_TUPLE_REM_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47\n# define BOOST_PP_TUPLE_REM_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48\n# define BOOST_PP_TUPLE_REM_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49\n# define BOOST_PP_TUPLE_REM_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50\n# define BOOST_PP_TUPLE_REM_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51\n# define BOOST_PP_TUPLE_REM_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52\n# define BOOST_PP_TUPLE_REM_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53\n# define BOOST_PP_TUPLE_REM_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54\n# define BOOST_PP_TUPLE_REM_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55\n# define BOOST_PP_TUPLE_REM_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56\n# define BOOST_PP_TUPLE_REM_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57\n# define BOOST_PP_TUPLE_REM_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58\n# define BOOST_PP_TUPLE_REM_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59\n# define BOOST_PP_TUPLE_REM_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60\n# define BOOST_PP_TUPLE_REM_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61\n# define BOOST_PP_TUPLE_REM_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62\n# define BOOST_PP_TUPLE_REM_64(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63\n#\n# /* BOOST_PP_TUPLE_REM_CTOR */\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_TUPLE_REM_CTOR(...) BOOST_PP_TUPLE_REM_CTOR_I(BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_REM_CTOR_O_, __VA_ARGS__), (__VA_ARGS__))\n#        define BOOST_PP_TUPLE_REM_CTOR_I(m, args) BOOST_PP_TUPLE_REM_CTOR_II(m, args)\n#        define BOOST_PP_TUPLE_REM_CTOR_II(m, args) BOOST_PP_CAT(m ## args,)\n#    \t define BOOST_PP_TUPLE_REM_CTOR_O_1(tuple) BOOST_PP_EXPAND(BOOST_PP_TUPLE_IS_SINGLE_RETURN(BOOST_PP_REM_CAT,BOOST_PP_REM,tuple) tuple)\n#    else\n#        define BOOST_PP_TUPLE_REM_CTOR(...) BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_REM_CTOR_O_, __VA_ARGS__)(__VA_ARGS__)\n#    \t define BOOST_PP_TUPLE_REM_CTOR_O_1(tuple) BOOST_PP_REM tuple\n#    endif\n#    define BOOST_PP_TUPLE_REM_CTOR_O_2(size, tuple) BOOST_PP_TUPLE_REM_CTOR_O_1(tuple)\n# else\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()\n#        define BOOST_PP_TUPLE_REM_CTOR(size, tuple) BOOST_PP_TUPLE_REM_CTOR_I(BOOST_PP_TUPLE_REM(size), tuple)\n#    else\n#        define BOOST_PP_TUPLE_REM_CTOR(size, tuple) BOOST_PP_TUPLE_REM_CTOR_D(size, tuple)\n#        define BOOST_PP_TUPLE_REM_CTOR_D(size, tuple) BOOST_PP_TUPLE_REM_CTOR_I(BOOST_PP_TUPLE_REM(size), tuple)\n#    endif\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#        define BOOST_PP_TUPLE_REM_CTOR_I(ext, tuple) ext tuple\n#    else\n#        define BOOST_PP_TUPLE_REM_CTOR_I(ext, tuple) BOOST_PP_TUPLE_REM_CTOR_OO((ext, tuple))\n#        define BOOST_PP_TUPLE_REM_CTOR_OO(par) BOOST_PP_TUPLE_REM_CTOR_II ## par\n#        define BOOST_PP_TUPLE_REM_CTOR_II(ext, tuple) ext ## tuple\n#    endif\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/size.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2011.                                    *\n#  *     (C) Copyright Paul Mensonides 2011.                                  *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_SIZE_HPP\n# define BOOST_PREPROCESSOR_TUPLE_SIZE_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/variadic/size.hpp>\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_TUPLE_SIZE(tuple) BOOST_PP_CAT(BOOST_PP_VARIADIC_SIZE tuple,)\n#    else\n#        define BOOST_PP_TUPLE_SIZE(tuple) BOOST_PP_VARIADIC_SIZE tuple\n#    endif\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/tuple/to_list.hpp",
    "content": "# /* Copyright (C) 2001\n#  * Housemarque Oy\n#  * http://www.housemarque.com\n#  *\n#  * Distributed under the Boost Software License, Version 1.0. (See\n#  * accompanying file LICENSE_1_0.txt or copy at\n#  * http://www.boost.org/LICENSE_1_0.txt)\n#  */\n#\n# /* Revised by Paul Mensonides (2002-2011) */\n# /* Revised by Edward Diener (2011) */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_TUPLE_TO_LIST_HPP\n# define BOOST_PREPROCESSOR_TUPLE_TO_LIST_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n# include <boost/preprocessor/facilities/overload.hpp>\n# include <boost/preprocessor/tuple/size.hpp>\n# include <boost/preprocessor/variadic/size.hpp>\n#\n# /* BOOST_PP_TUPLE_TO_LIST */\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_TUPLE_TO_LIST(...) BOOST_PP_TUPLE_TO_LIST_I(BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_TO_LIST_O_, __VA_ARGS__), (__VA_ARGS__))\n#        define BOOST_PP_TUPLE_TO_LIST_I(m, args) BOOST_PP_TUPLE_TO_LIST_II(m, args)\n#        define BOOST_PP_TUPLE_TO_LIST_II(m, args) BOOST_PP_CAT(m ## args,)\n#    \t define BOOST_PP_TUPLE_TO_LIST_O_1(tuple) BOOST_PP_CAT(BOOST_PP_TUPLE_TO_LIST_, BOOST_PP_TUPLE_SIZE(tuple)) tuple\n#    else\n#        define BOOST_PP_TUPLE_TO_LIST(...) BOOST_PP_OVERLOAD(BOOST_PP_TUPLE_TO_LIST_O_, __VA_ARGS__)(__VA_ARGS__)\n#    \t define BOOST_PP_TUPLE_TO_LIST_O_1(tuple) BOOST_PP_CAT(BOOST_PP_TUPLE_TO_LIST_, BOOST_PP_VARIADIC_SIZE tuple) tuple\n#    endif\n#    define BOOST_PP_TUPLE_TO_LIST_O_2(size, tuple) BOOST_PP_TUPLE_TO_LIST_O_1(tuple)\n# else\n#    if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MWCC()\n#        define BOOST_PP_TUPLE_TO_LIST(size, tuple) BOOST_PP_TUPLE_TO_LIST_I(size, tuple)\n#        if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()\n#            define BOOST_PP_TUPLE_TO_LIST_I(s, t) BOOST_PP_TUPLE_TO_LIST_ ## s t\n#        else\n#            define BOOST_PP_TUPLE_TO_LIST_I(s, t) BOOST_PP_TUPLE_TO_LIST_II(BOOST_PP_TUPLE_TO_LIST_ ## s t)\n#            define BOOST_PP_TUPLE_TO_LIST_II(res) res\n#        endif\n#    else\n#        define BOOST_PP_TUPLE_TO_LIST(size, tuple) BOOST_PP_TUPLE_TO_LIST_OO((size, tuple))\n#        define BOOST_PP_TUPLE_TO_LIST_OO(par) BOOST_PP_TUPLE_TO_LIST_I ## par\n#        define BOOST_PP_TUPLE_TO_LIST_I(s, t) BOOST_PP_TUPLE_TO_LIST_ ## s ## t\n#    endif\n# endif\n#\n# define BOOST_PP_TUPLE_TO_LIST_1(e0) (e0, BOOST_PP_NIL)\n# define BOOST_PP_TUPLE_TO_LIST_2(e0, e1) (e0, (e1, BOOST_PP_NIL))\n# define BOOST_PP_TUPLE_TO_LIST_3(e0, e1, e2) (e0, (e1, (e2, BOOST_PP_NIL)))\n# define BOOST_PP_TUPLE_TO_LIST_4(e0, e1, e2, e3) (e0, (e1, (e2, (e3, BOOST_PP_NIL))))\n# define BOOST_PP_TUPLE_TO_LIST_5(e0, e1, e2, e3, e4) (e0, (e1, (e2, (e3, (e4, BOOST_PP_NIL)))))\n# define BOOST_PP_TUPLE_TO_LIST_6(e0, e1, e2, e3, e4, e5) (e0, (e1, (e2, (e3, (e4, (e5, BOOST_PP_NIL))))))\n# define BOOST_PP_TUPLE_TO_LIST_7(e0, e1, e2, e3, e4, e5, e6) (e0, (e1, (e2, (e3, (e4, (e5, (e6, BOOST_PP_NIL)))))))\n# define BOOST_PP_TUPLE_TO_LIST_8(e0, e1, e2, e3, e4, e5, e6, e7) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, BOOST_PP_NIL))))))))\n# define BOOST_PP_TUPLE_TO_LIST_9(e0, e1, e2, e3, e4, e5, e6, e7, e8) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, BOOST_PP_NIL)))))))))\n# define BOOST_PP_TUPLE_TO_LIST_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, BOOST_PP_NIL))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, BOOST_PP_NIL)))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, BOOST_PP_NIL))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, BOOST_PP_NIL)))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, BOOST_PP_NIL))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, BOOST_PP_NIL)))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, BOOST_PP_NIL))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, BOOST_PP_NIL)))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, BOOST_PP_NIL))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, BOOST_PP_NIL)))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, BOOST_PP_NIL))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, BOOST_PP_NIL)))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, BOOST_PP_NIL))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, BOOST_PP_NIL)))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, BOOST_PP_NIL))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, BOOST_PP_NIL)))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, BOOST_PP_NIL))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, BOOST_PP_NIL)))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, BOOST_PP_NIL))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, BOOST_PP_NIL)))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, BOOST_PP_NIL))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, BOOST_PP_NIL)))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, BOOST_PP_NIL))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, BOOST_PP_NIL)))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, BOOST_PP_NIL))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, (e59, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, (e59, (e60, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, (e59, (e60, (e61, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, (e59, (e60, (e61, (e62, BOOST_PP_NIL)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n# define BOOST_PP_TUPLE_TO_LIST_64(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63) (e0, (e1, (e2, (e3, (e4, (e5, (e6, (e7, (e8, (e9, (e10, (e11, (e12, (e13, (e14, (e15, (e16, (e17, (e18, (e19, (e20, (e21, (e22, (e23, (e24, (e25, (e26, (e27, (e28, (e29, (e30, (e31, (e32, (e33, (e34, (e35, (e36, (e37, (e38, (e39, (e40, (e41, (e42, (e43, (e44, (e45, (e46, (e47, (e48, (e49, (e50, (e51, (e52, (e53, (e54, (e55, (e56, (e57, (e58, (e59, (e60, (e61, (e62, (e63, BOOST_PP_NIL))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/variadic/elem.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2011.                                    *\n#  *     (C) Copyright Paul Mensonides 2011.                                  *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_VARIADIC_ELEM_HPP\n# define BOOST_PREPROCESSOR_VARIADIC_ELEM_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_VARIADIC_ELEM */\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_VARIADIC_ELEM(n, ...) BOOST_PP_VARIADIC_ELEM_I(n,__VA_ARGS__)\n#        define BOOST_PP_VARIADIC_ELEM_I(n, ...) BOOST_PP_CAT(BOOST_PP_CAT(BOOST_PP_VARIADIC_ELEM_, n)(__VA_ARGS__,),)\n#    else\n#        define BOOST_PP_VARIADIC_ELEM(n, ...) BOOST_PP_CAT(BOOST_PP_VARIADIC_ELEM_, n)(__VA_ARGS__,)\n#    endif\n#    define BOOST_PP_VARIADIC_ELEM_0(e0, ...) e0\n#    define BOOST_PP_VARIADIC_ELEM_1(e0, e1, ...) e1\n#    define BOOST_PP_VARIADIC_ELEM_2(e0, e1, e2, ...) e2\n#    define BOOST_PP_VARIADIC_ELEM_3(e0, e1, e2, e3, ...) e3\n#    define BOOST_PP_VARIADIC_ELEM_4(e0, e1, e2, e3, e4, ...) e4\n#    define BOOST_PP_VARIADIC_ELEM_5(e0, e1, e2, e3, e4, e5, ...) e5\n#    define BOOST_PP_VARIADIC_ELEM_6(e0, e1, e2, e3, e4, e5, e6, ...) e6\n#    define BOOST_PP_VARIADIC_ELEM_7(e0, e1, e2, e3, e4, e5, e6, e7, ...) e7\n#    define BOOST_PP_VARIADIC_ELEM_8(e0, e1, e2, e3, e4, e5, e6, e7, e8, ...) e8\n#    define BOOST_PP_VARIADIC_ELEM_9(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, ...) e9\n#    define BOOST_PP_VARIADIC_ELEM_10(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, ...) e10\n#    define BOOST_PP_VARIADIC_ELEM_11(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, ...) e11\n#    define BOOST_PP_VARIADIC_ELEM_12(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, ...) e12\n#    define BOOST_PP_VARIADIC_ELEM_13(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, ...) e13\n#    define BOOST_PP_VARIADIC_ELEM_14(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, ...) e14\n#    define BOOST_PP_VARIADIC_ELEM_15(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, ...) e15\n#    define BOOST_PP_VARIADIC_ELEM_16(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, ...) e16\n#    define BOOST_PP_VARIADIC_ELEM_17(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, ...) e17\n#    define BOOST_PP_VARIADIC_ELEM_18(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, ...) e18\n#    define BOOST_PP_VARIADIC_ELEM_19(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, ...) e19\n#    define BOOST_PP_VARIADIC_ELEM_20(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, ...) e20\n#    define BOOST_PP_VARIADIC_ELEM_21(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, ...) e21\n#    define BOOST_PP_VARIADIC_ELEM_22(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, ...) e22\n#    define BOOST_PP_VARIADIC_ELEM_23(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, ...) e23\n#    define BOOST_PP_VARIADIC_ELEM_24(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, ...) e24\n#    define BOOST_PP_VARIADIC_ELEM_25(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, ...) e25\n#    define BOOST_PP_VARIADIC_ELEM_26(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, ...) e26\n#    define BOOST_PP_VARIADIC_ELEM_27(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, ...) e27\n#    define BOOST_PP_VARIADIC_ELEM_28(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, ...) e28\n#    define BOOST_PP_VARIADIC_ELEM_29(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, ...) e29\n#    define BOOST_PP_VARIADIC_ELEM_30(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, ...) e30\n#    define BOOST_PP_VARIADIC_ELEM_31(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, ...) e31\n#    define BOOST_PP_VARIADIC_ELEM_32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, ...) e32\n#    define BOOST_PP_VARIADIC_ELEM_33(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, ...) e33\n#    define BOOST_PP_VARIADIC_ELEM_34(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, ...) e34\n#    define BOOST_PP_VARIADIC_ELEM_35(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, ...) e35\n#    define BOOST_PP_VARIADIC_ELEM_36(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, ...) e36\n#    define BOOST_PP_VARIADIC_ELEM_37(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, ...) e37\n#    define BOOST_PP_VARIADIC_ELEM_38(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, ...) e38\n#    define BOOST_PP_VARIADIC_ELEM_39(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, ...) e39\n#    define BOOST_PP_VARIADIC_ELEM_40(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, ...) e40\n#    define BOOST_PP_VARIADIC_ELEM_41(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, ...) e41\n#    define BOOST_PP_VARIADIC_ELEM_42(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, ...) e42\n#    define BOOST_PP_VARIADIC_ELEM_43(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, ...) e43\n#    define BOOST_PP_VARIADIC_ELEM_44(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, ...) e44\n#    define BOOST_PP_VARIADIC_ELEM_45(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, ...) e45\n#    define BOOST_PP_VARIADIC_ELEM_46(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, ...) e46\n#    define BOOST_PP_VARIADIC_ELEM_47(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, ...) e47\n#    define BOOST_PP_VARIADIC_ELEM_48(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, ...) e48\n#    define BOOST_PP_VARIADIC_ELEM_49(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, ...) e49\n#    define BOOST_PP_VARIADIC_ELEM_50(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, ...) e50\n#    define BOOST_PP_VARIADIC_ELEM_51(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, ...) e51\n#    define BOOST_PP_VARIADIC_ELEM_52(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, ...) e52\n#    define BOOST_PP_VARIADIC_ELEM_53(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, ...) e53\n#    define BOOST_PP_VARIADIC_ELEM_54(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, ...) e54\n#    define BOOST_PP_VARIADIC_ELEM_55(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, ...) e55\n#    define BOOST_PP_VARIADIC_ELEM_56(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, ...) e56\n#    define BOOST_PP_VARIADIC_ELEM_57(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, ...) e57\n#    define BOOST_PP_VARIADIC_ELEM_58(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, ...) e58\n#    define BOOST_PP_VARIADIC_ELEM_59(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, ...) e59\n#    define BOOST_PP_VARIADIC_ELEM_60(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, ...) e60\n#    define BOOST_PP_VARIADIC_ELEM_61(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, ...) e61\n#    define BOOST_PP_VARIADIC_ELEM_62(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, ...) e62\n#    define BOOST_PP_VARIADIC_ELEM_63(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63, ...) e63\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/preprocessor/variadic/size.hpp",
    "content": "# /* **************************************************************************\n#  *                                                                          *\n#  *     (C) Copyright Edward Diener 2011.                                    *\n#  *     (C) Copyright Paul Mensonides 2011.                                  *\n#  *     Distributed under the Boost Software License, Version 1.0. (See      *\n#  *     accompanying file LICENSE_1_0.txt or copy at                         *\n#  *     http://www.boost.org/LICENSE_1_0.txt)                                *\n#  *                                                                          *\n#  ************************************************************************** */\n#\n# /* See http://www.boost.org for most recent version. */\n#\n# ifndef BOOST_PREPROCESSOR_VARIADIC_SIZE_HPP\n# define BOOST_PREPROCESSOR_VARIADIC_SIZE_HPP\n#\n# include <boost/preprocessor/cat.hpp>\n# include <boost/preprocessor/config/config.hpp>\n#\n# /* BOOST_PP_VARIADIC_SIZE */\n#\n# if BOOST_PP_VARIADICS\n#    if BOOST_PP_VARIADICS_MSVC\n#        define BOOST_PP_VARIADIC_SIZE(...) BOOST_PP_CAT(BOOST_PP_VARIADIC_SIZE_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,),)\n#    else\n#        define BOOST_PP_VARIADIC_SIZE(...) BOOST_PP_VARIADIC_SIZE_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,)\n#    endif\n#    define BOOST_PP_VARIADIC_SIZE_I(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63, size, ...) size\n# endif\n#\n# endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/static_assert.hpp",
    "content": "//  (C) Copyright John Maddock 2000.\n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/libs/static_assert for documentation.\n\n/*\n Revision history:\n   02 August 2000\n      Initial version.\n*/\n\n#ifndef BOOST_STATIC_ASSERT_HPP\n#define BOOST_STATIC_ASSERT_HPP\n\n#include <boost/config.hpp>\n#include <boost/detail/workaround.hpp>\n\n#if defined(__GNUC__) && !defined(__GXX_EXPERIMENTAL_CXX0X__)\n//\n// This is horrible, but it seems to be the only we can shut up the\n// \"anonymous variadic macros were introduced in C99 [-Wvariadic-macros]\"\n// warning that get spewed out otherwise in non-C++11 mode.\n//\n#pragma GCC system_header\n#endif\n\n#ifndef BOOST_NO_CXX11_STATIC_ASSERT\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#     define BOOST_STATIC_ASSERT_MSG( ... ) static_assert(__VA_ARGS__)\n#  else\n#     define BOOST_STATIC_ASSERT_MSG( B, Msg ) static_assert( B, Msg )\n#  endif\n#else\n#     define BOOST_STATIC_ASSERT_MSG( B, Msg ) BOOST_STATIC_ASSERT( B )\n#endif\n\n#ifdef __BORLANDC__\n//\n// workaround for buggy integral-constant expression support:\n#define BOOST_BUGGY_INTEGRAL_CONSTANT_EXPRESSIONS\n#endif\n\n#if defined(__GNUC__) && (__GNUC__ == 3) && ((__GNUC_MINOR__ == 3) || (__GNUC_MINOR__ == 4))\n// gcc 3.3 and 3.4 don't produce good error messages with the default version:\n#  define BOOST_SA_GCC_WORKAROUND\n#endif\n\n//\n// If the compiler issues warnings about old C style casts,\n// then enable this:\n//\n#if defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)))\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#     define BOOST_STATIC_ASSERT_BOOL_CAST( ... ) ((__VA_ARGS__) == 0 ? false : true)\n#  else\n#     define BOOST_STATIC_ASSERT_BOOL_CAST( x ) ((x) == 0 ? false : true)\n#  endif\n#else\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#     define BOOST_STATIC_ASSERT_BOOL_CAST( ... ) (bool)(__VA_ARGS__)\n#  else\n#     define BOOST_STATIC_ASSERT_BOOL_CAST(x) (bool)(x)\n#  endif\n#endif\n\n#ifndef BOOST_NO_CXX11_STATIC_ASSERT\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#     define BOOST_STATIC_ASSERT( ... ) static_assert(__VA_ARGS__, #__VA_ARGS__)\n#  else\n#     define BOOST_STATIC_ASSERT( B ) static_assert(B, #B)\n#  endif\n#else\n\nnamespace boost{\n\n// HP aCC cannot deal with missing names for template value parameters\ntemplate <bool x> struct STATIC_ASSERTION_FAILURE;\n\ntemplate <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };\n\n// HP aCC cannot deal with missing names for template value parameters\ntemplate<int x> struct static_assert_test{};\n\n}\n\n//\n// Implicit instantiation requires that all member declarations be\n// instantiated, but that the definitions are *not* instantiated.\n//\n// It's not particularly clear how this applies to enum's or typedefs;\n// both are described as declarations [7.1.3] and [7.2] in the standard,\n// however some compilers use \"delayed evaluation\" of one or more of\n// these when implicitly instantiating templates.  We use typedef declarations\n// by default, but try defining BOOST_USE_ENUM_STATIC_ASSERT if the enum\n// version gets better results from your compiler...\n//\n// Implementation:\n// Both of these versions rely on sizeof(incomplete_type) generating an error\n// message containing the name of the incomplete type.  We use\n// \"STATIC_ASSERTION_FAILURE\" as the type name here to generate\n// an eye catching error message.  The result of the sizeof expression is either\n// used as an enum initialiser, or as a template argument depending which version\n// is in use...\n// Note that the argument to the assert is explicitly cast to bool using old-\n// style casts: too many compilers currently have problems with static_cast\n// when used inside integral constant expressions.\n//\n#if !defined(BOOST_BUGGY_INTEGRAL_CONSTANT_EXPRESSIONS)\n\n#if defined(BOOST_MSVC) && defined(BOOST_NO_CXX11_VARIADIC_MACROS)\n#define BOOST_STATIC_ASSERT( B ) \\\n   typedef ::boost::static_assert_test<\\\n      sizeof(::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST ( B ) >)>\\\n         BOOST_JOIN(boost_static_assert_typedef_, __COUNTER__)\n#elif defined(BOOST_MSVC)\n#define BOOST_STATIC_ASSERT(...) \\\n   typedef ::boost::static_assert_test<\\\n      sizeof(::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST (__VA_ARGS__) >)>\\\n         BOOST_JOIN(boost_static_assert_typedef_, __COUNTER__)\n#elif (defined(BOOST_INTEL_CXX_VERSION) || defined(BOOST_SA_GCC_WORKAROUND))  && defined(BOOST_NO_CXX11_VARIADIC_MACROS)\n// agurt 15/sep/02: a special care is needed to force Intel C++ issue an error \n// instead of warning in case of failure\n# define BOOST_STATIC_ASSERT( B ) \\\n    typedef char BOOST_JOIN(boost_static_assert_typedef_, __LINE__) \\\n        [ ::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST( B ) >::value ]\n#elif (defined(BOOST_INTEL_CXX_VERSION) || defined(BOOST_SA_GCC_WORKAROUND))  && !defined(BOOST_NO_CXX11_VARIADIC_MACROS)\n// agurt 15/sep/02: a special care is needed to force Intel C++ issue an error \n// instead of warning in case of failure\n# define BOOST_STATIC_ASSERT(...) \\\n    typedef char BOOST_JOIN(boost_static_assert_typedef_, __LINE__) \\\n        [ ::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST( __VA_ARGS__ ) >::value ]\n#elif defined(__sgi)\n// special version for SGI MIPSpro compiler\n#define BOOST_STATIC_ASSERT( B ) \\\n   BOOST_STATIC_CONSTANT(bool, \\\n     BOOST_JOIN(boost_static_assert_test_, __LINE__) = ( B )); \\\n   typedef ::boost::static_assert_test<\\\n     sizeof(::boost::STATIC_ASSERTION_FAILURE< \\\n       BOOST_JOIN(boost_static_assert_test_, __LINE__) >)>\\\n         BOOST_JOIN(boost_static_assert_typedef_, __LINE__)\n#elif BOOST_WORKAROUND(__MWERKS__, <= 0x3003)\n// special version for CodeWarrior <= 8.x\n#define BOOST_STATIC_ASSERT( B ) \\\n   BOOST_STATIC_CONSTANT(int, \\\n     BOOST_JOIN(boost_static_assert_test_, __LINE__) = \\\n       sizeof(::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST( B ) >) )\n#else\n// generic version\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#     define BOOST_STATIC_ASSERT( ... ) \\\n         typedef ::boost::static_assert_test<\\\n            sizeof(::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST( __VA_ARGS__ ) >)>\\\n               BOOST_JOIN(boost_static_assert_typedef_, __LINE__) BOOST_ATTRIBUTE_UNUSED\n#  else\n#     define BOOST_STATIC_ASSERT( B ) \\\n         typedef ::boost::static_assert_test<\\\n            sizeof(::boost::STATIC_ASSERTION_FAILURE< BOOST_STATIC_ASSERT_BOOL_CAST( B ) >)>\\\n               BOOST_JOIN(boost_static_assert_typedef_, __LINE__) BOOST_ATTRIBUTE_UNUSED\n#  endif\n#endif\n\n#else\n// alternative enum based implementation:\n#  ifndef BOOST_NO_CXX11_VARIADIC_MACROS\n#    define BOOST_STATIC_ASSERT( ... ) \\\n         enum { BOOST_JOIN(boost_static_assert_enum_, __LINE__) \\\n            = sizeof(::boost::STATIC_ASSERTION_FAILURE< (bool)( __VA_ARGS__ ) >) }\n#  else\n#    define BOOST_STATIC_ASSERT(B) \\\n         enum { BOOST_JOIN(boost_static_assert_enum_, __LINE__) \\\n            = sizeof(::boost::STATIC_ASSERTION_FAILURE< (bool)( B ) >) }\n#  endif\n#endif\n#endif // defined(BOOST_NO_CXX11_STATIC_ASSERT)\n\n#endif // BOOST_STATIC_ASSERT_HPP\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/swap.hpp",
    "content": "/*\n * Copyright (c) 2014 Glen Fernandes\n *\n * Distributed under the Boost Software License, Version 1.0. (See\n * accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n */\n\n#ifndef BOOST_SWAP_HPP\n#define BOOST_SWAP_HPP\n\n// The header file at this path is deprecated;\n// use boost/core/swap.hpp instead.\n\n#include <boost/core/swap.hpp>\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/throw_exception.hpp",
    "content": "#ifndef UUID_AA15E74A856F11E08B8D93F24824019B\n#define UUID_AA15E74A856F11E08B8D93F24824019B\n#if (__GNUC__*100+__GNUC_MINOR__>301) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma GCC system_header\n#endif\n#if defined(_MSC_VER) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma warning(push,1)\n#endif\n\n// MS compatible compilers support #pragma once\n\n#if defined(_MSC_VER) && (_MSC_VER >= 1020)\n# pragma once\n#endif\n\n//\n//  boost/throw_exception.hpp\n//\n//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.\n//  Copyright (c) 2008-2009 Emil Dotchevski and Reverge Studios, Inc.\n//\n//  Distributed under the Boost Software License, Version 1.0. (See\n//  accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n//\n//  http://www.boost.org/libs/utility/throw_exception.html\n//\n\n#include <boost/detail/workaround.hpp>\n#include <boost/config.hpp>\n#include <exception>\n\n#if !defined( BOOST_EXCEPTION_DISABLE ) && defined( __BORLANDC__ ) && BOOST_WORKAROUND( __BORLANDC__, BOOST_TESTED_AT(0x593) )\n# define BOOST_EXCEPTION_DISABLE\n#endif\n\n#if !defined( BOOST_EXCEPTION_DISABLE ) && defined( BOOST_MSVC ) && BOOST_WORKAROUND( BOOST_MSVC, < 1310 )\n# define BOOST_EXCEPTION_DISABLE\n#endif\n\n#if !defined( BOOST_EXCEPTION_DISABLE )\n# include <boost/exception/exception.hpp>\n#if !defined(BOOST_THROW_EXCEPTION_CURRENT_FUNCTION)\n# include <boost/current_function.hpp>\n# define BOOST_THROW_EXCEPTION_CURRENT_FUNCTION BOOST_CURRENT_FUNCTION\n#endif\n# define BOOST_THROW_EXCEPTION(x) ::boost::exception_detail::throw_exception_(x,BOOST_THROW_EXCEPTION_CURRENT_FUNCTION,__FILE__,__LINE__)\n#else\n# define BOOST_THROW_EXCEPTION(x) ::boost::throw_exception(x)\n#endif\n\nnamespace boost\n{\n#ifdef BOOST_NO_EXCEPTIONS\n\nvoid throw_exception( std::exception const & e ); // user defined\n\n#else\n\ninline void throw_exception_assert_compatibility( std::exception const & ) { }\n\ntemplate<class E> BOOST_NORETURN inline void throw_exception( E const & e )\n{\n    //All boost exceptions are required to derive from std::exception,\n    //to ensure compatibility with BOOST_NO_EXCEPTIONS.\n    throw_exception_assert_compatibility(e);\n\n#ifndef BOOST_EXCEPTION_DISABLE\n    throw enable_current_exception(enable_error_info(e));\n#else\n    throw e;\n#endif\n}\n\n#endif\n\n#if !defined( BOOST_EXCEPTION_DISABLE )\n    namespace\n    exception_detail\n    {\n        template <class E>\n        BOOST_NORETURN\n        void\n        throw_exception_( E const & x, char const * current_function, char const * file, int line )\n        {\n            boost::throw_exception(\n                set_info(\n                    set_info(\n                        set_info(\n                            enable_error_info(x),\n                            throw_function(current_function)),\n                        throw_file(file)),\n                    throw_line(line)));\n        }\n    }\n#endif\n} // namespace boost\n\n#if defined(_MSC_VER) && !defined(BOOST_EXCEPTION_ENABLE_WARNINGS)\n#pragma warning(pop)\n#endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/add_const.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_ADD_CONST_HPP_INCLUDED\n#define BOOST_TT_ADD_CONST_HPP_INCLUDED\n\n#include <boost/type_traits/detail/config.hpp>\n\nnamespace boost {\n\n// * convert a type T to const type - add_const<T>\n// this is not required since the result is always\n// the same as \"T const\", but it does suppress warnings\n// from some compilers:\n\n#if defined(BOOST_MSVC)\n// This bogus warning will appear when add_const is applied to a\n// const volatile reference because we can't detect const volatile\n// references with MSVC6.\n#   pragma warning(push)\n#   pragma warning(disable:4181) // warning C4181: qualifier applied to reference type ignored\n#endif \n\n   template <class T> struct add_const\n   {\n      typedef T const type;\n   };\n\n#if defined(BOOST_MSVC)\n#   pragma warning(pop)\n#endif \n\n   template <class T> struct add_const<T&>\n   {\n      typedef T& type;\n   };\n\n} // namespace boost\n\n#endif // BOOST_TT_ADD_CONST_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/add_lvalue_reference.hpp",
    "content": "//  Copyright 2010 John Maddock\n\n//  Distributed under the Boost Software License, Version 1.0.\n//  See http://www.boost.org/LICENSE_1_0.txt\n\n#ifndef BOOST_TYPE_TRAITS_EXT_ADD_LVALUE_REFERENCE__HPP\n#define BOOST_TYPE_TRAITS_EXT_ADD_LVALUE_REFERENCE__HPP\n\n#include <boost/type_traits/add_reference.hpp>\n\nnamespace boost{\n\ntemplate <class T> struct add_lvalue_reference\n{\n   typedef typename boost::add_reference<T>::type type; \n};\n\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <class T> struct add_lvalue_reference<T&&>\n{\n   typedef T& type;\n};\n#endif\n\n}\n\n#endif  // BOOST_TYPE_TRAITS_EXT_ADD_LVALUE_REFERENCE__HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/add_reference.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_ADD_REFERENCE_HPP_INCLUDED\n#define BOOST_TT_ADD_REFERENCE_HPP_INCLUDED\n\n#include <boost/detail/workaround.hpp>\n#include <boost/config.hpp>\n\nnamespace boost {\n\nnamespace detail {\n\n//\n// We can't filter out rvalue_references at the same level as\n// references or we get ambiguities from msvc:\n//\n\ntemplate <typename T>\nstruct add_reference_impl\n{\n    typedef T& type;\n};\n\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <typename T>\nstruct add_reference_impl<T&&>\n{\n    typedef T&& type;\n};\n#endif\n\n} // namespace detail\n\ntemplate <class T> struct add_reference\n{\n   typedef typename boost::detail::add_reference_impl<T>::type type;\n};\ntemplate <class T> struct add_reference<T&>\n{\n   typedef T& type;\n};\n\n// these full specialisations are always required:\ntemplate <> struct add_reference<void> { typedef void type; };\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\ntemplate <> struct add_reference<const void> { typedef void type; };\ntemplate <> struct add_reference<const volatile void> { typedef void type; };\ntemplate <> struct add_reference<volatile void> { typedef void type; };\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_ADD_REFERENCE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/add_rvalue_reference.hpp",
    "content": "//  add_rvalue_reference.hpp  ---------------------------------------------------------//\n\n//  Copyright 2010 Vicente J. Botet Escriba\n\n//  Distributed under the Boost Software License, Version 1.0.\n//  See http://www.boost.org/LICENSE_1_0.txt\n\n#ifndef BOOST_TYPE_TRAITS_EXT_ADD_RVALUE_REFERENCE__HPP\n#define BOOST_TYPE_TRAITS_EXT_ADD_RVALUE_REFERENCE__HPP\n\n#include <boost/config.hpp>\n\n//----------------------------------------------------------------------------//\n\n#include <boost/type_traits/is_void.hpp>\n#include <boost/type_traits/is_reference.hpp>\n\n//----------------------------------------------------------------------------//\n//                                                                            //\n//                           C++03 implementation of                          //\n//             20.9.7.2 Reference modifications [meta.trans.ref]              //\n//                          Written by Vicente J. Botet Escriba               //\n//                                                                            //\n// If T names an object or function type then the member typedef type\n// shall name T&&; otherwise, type shall name T. [ Note: This rule reflects\n// the semantics of reference collapsing. For example, when a type T names\n// a type T1&, the type add_rvalue_reference<T>::type is not an rvalue\n// reference. -end note ]\n//----------------------------------------------------------------------------//\n\nnamespace boost {\n\nnamespace type_traits_detail {\n\n    template <typename T, bool b>\n    struct add_rvalue_reference_helper\n    { typedef T   type; };\n\n#if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)\n    template <typename T>\n    struct add_rvalue_reference_helper<T, true>\n    {\n        typedef T&&   type;\n    };\n#endif\n\n    template <typename T>\n    struct add_rvalue_reference_imp\n    {\n       typedef typename boost::type_traits_detail::add_rvalue_reference_helper\n                  <T, (is_void<T>::value == false && is_reference<T>::value == false) >::type type;\n    };\n\n}\n\ntemplate <class T> struct add_rvalue_reference\n{\n   typedef typename boost::type_traits_detail::add_rvalue_reference_imp<T>::type type;\n};\n\n}  // namespace boost\n\n#endif  // BOOST_TYPE_TRAITS_EXT_ADD_RVALUE_REFERENCE__HPP\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/add_volatile.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_ADD_VOLATILE_HPP_INCLUDED\n#define BOOST_TT_ADD_VOLATILE_HPP_INCLUDED\n\n#include <boost/config.hpp>\n\nnamespace boost {\n\n// * convert a type T to volatile type - add_volatile<T>\n// this is not required since the result is always\n// the same as \"T volatile\", but it does suppress warnings\n// from some compilers:\n\n#if defined(BOOST_MSVC)\n// This bogus warning will appear when add_volatile is applied to a\n// const volatile reference because we can't detect const volatile\n// references with MSVC6.\n#   pragma warning(push)\n#   pragma warning(disable:4181) // warning C4181: qualifier applied to reference type ignored\n#endif \n\ntemplate <class T> struct add_volatile{ typedef T volatile type; };\n\n#if defined(BOOST_MSVC)\n#   pragma warning(pop)\n#endif \n\ntemplate <class T> struct add_volatile<T&>{ typedef T& type; };\n\n} // namespace boost\n\n#endif // BOOST_TT_ADD_VOLATILE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/conditional.hpp",
    "content": "//  (C) Copyright John Maddock 2010.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_CONDITIONAL_HPP_INCLUDED\n#define BOOST_TT_CONDITIONAL_HPP_INCLUDED\n\nnamespace boost {\n\ntemplate <bool b, class T, class U> struct conditional { typedef T type; };\ntemplate <class T, class U> struct conditional<false, T, U> { typedef U type; };\n\n} // namespace boost\n\n\n#endif // BOOST_TT_CONDITIONAL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/declval.hpp",
    "content": "//  declval.hpp  -------------------------------------------------------------//\n\n//  Copyright 2010 Vicente J. Botet Escriba\n\n//  Distributed under the Boost Software License, Version 1.0.\n//  See http://www.boost.org/LICENSE_1_0.txt\n\n#ifndef BOOST_TYPE_TRAITS_DECLVAL_HPP_INCLUDED\n#define BOOST_TYPE_TRAITS_DECLVAL_HPP_INCLUDED\n\n#include <boost/config.hpp>\n\n//----------------------------------------------------------------------------//\n\n#include <boost/type_traits/add_rvalue_reference.hpp>\n\n//----------------------------------------------------------------------------//\n//                                                                            //\n//                           C++03 implementation of                          //\n//                   20.2.4 Function template declval [declval]               //\n//                          Written by Vicente J. Botet Escriba               //\n//                                                                            //\n// 1 The library provides the function template declval to simplify the\n// definition of expressions which occur as unevaluated operands.\n// 2 Remarks: If this function is used, the program is ill-formed.\n// 3 Remarks: The template parameter T of declval may be an incomplete type.\n// [ Example:\n//\n// template <class To, class From>\n// decltype(static_cast<To>(declval<From>())) convert(From&&);\n//\n// declares a function template convert which only participates in overloading\n// if the type From can be explicitly converted to type To. For another example\n// see class template common_type (20.9.7.6). -end example ]\n//----------------------------------------------------------------------------//\n\nnamespace boost {\n\n    template <typename T>\n    typename add_rvalue_reference<T>::type declval() BOOST_NOEXCEPT; // as unevaluated operand\n\n}  // namespace boost\n\n#endif  // BOOST_TYPE_TRAITS_DECLVAL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/config.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_CONFIG_HPP_INCLUDED\n#define BOOST_TT_CONFIG_HPP_INCLUDED\n\n#ifndef BOOST_CONFIG_HPP\n#include <boost/config.hpp>\n#endif\n#include <boost/version.hpp>\n#include <boost/detail/workaround.hpp>\n\n//\n// whenever we have a conversion function with ellipses\n// it needs to be declared __cdecl to suppress compiler\n// warnings from MS and Borland compilers (this *must*\n// appear before we include is_same.hpp below):\n#if defined(BOOST_MSVC) || (defined(__BORLANDC__) && !defined(BOOST_DISABLE_WIN32))\n#   define BOOST_TT_DECL __cdecl\n#else\n#   define BOOST_TT_DECL /**/\n#endif\n\n# if (BOOST_WORKAROUND(__MWERKS__, < 0x3000)                         \\\n    || BOOST_WORKAROUND(__IBMCPP__, < 600 )                         \\\n    || BOOST_WORKAROUND(__BORLANDC__, < 0x5A0)                      \\\n    || defined(__ghs)                                               \\\n    || BOOST_WORKAROUND(__HP_aCC, < 60700)           \\\n    || BOOST_WORKAROUND(MPW_CPLUS, BOOST_TESTED_AT(0x890))          \\\n    || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x580)))       \\\n    && defined(BOOST_NO_IS_ABSTRACT)\n\n#   define BOOST_TT_NO_CONFORMING_IS_CLASS_IMPLEMENTATION 1\n\n#endif\n\n#ifndef BOOST_TT_NO_CONFORMING_IS_CLASS_IMPLEMENTATION\n# define BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION 1\n#endif\n\n//\n// define BOOST_TT_TEST_MS_FUNC_SIGS\n// when we want to test __stdcall etc function types with is_function etc\n// (Note, does not work with Borland, even though it does support __stdcall etc):\n//\n#if defined(_MSC_EXTENSIONS) && !defined(__BORLANDC__)\n#  define BOOST_TT_TEST_MS_FUNC_SIGS\n#endif\n\n//\n// define BOOST_TT_NO_CV_FUNC_TEST\n// if tests for cv-qualified member functions don't \n// work in is_member_function_pointer\n//\n#if BOOST_WORKAROUND(__MWERKS__, < 0x3000) || BOOST_WORKAROUND(__IBMCPP__, <= 600)\n#  define BOOST_TT_NO_CV_FUNC_TEST\n#endif\n\n//\n// Macros that have been deprecated, defined here for backwards compatibility:\n//\n#define BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION(x)\n#define BOOST_TT_BROKEN_COMPILER_SPEC(x)\n\n#endif // BOOST_TT_CONFIG_HPP_INCLUDED\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/is_function_ptr_helper.hpp",
    "content": "\n//  Copyright 2000 John Maddock (john@johnmaddock.co.uk)\n//  Copyright 2002 Aleksey Gurtovoy (agurtovoy@meta-comm.com)\n//\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_TT_DETAIL_IS_FUNCTION_PTR_HELPER_HPP_INCLUDED\n#define BOOST_TT_DETAIL_IS_FUNCTION_PTR_HELPER_HPP_INCLUDED\n\n#if defined(BOOST_TT_PREPROCESSING_MODE)\n//\n// Hide these #include from dependency analysers as\n// these are required in maintenance mode only:\n//\n#define PP1 <boost/preprocessor/iterate.hpp>\n#include PP1\n#undef PP1\n#define PP1 <boost/preprocessor/enum_params.hpp>\n#include PP1\n#undef PP1\n#define PP1 <boost/preprocessor/comma_if.hpp>\n#include PP1\n#undef PP1\n#endif\n\nnamespace boost {\nnamespace type_traits {\n\ntemplate <class R>\nstruct is_function_ptr_helper\n{\n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n#if !defined(BOOST_TT_PREPROCESSING_MODE)\n// preprocessor-generated part, don't edit by hand!\n\ntemplate <class R >\nstruct is_function_ptr_helper<R (*)()> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R >\nstruct is_function_ptr_helper<R (*)( ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0>\nstruct is_function_ptr_helper<R (*)( T0)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0>\nstruct is_function_ptr_helper<R (*)( T0 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1>\nstruct is_function_ptr_helper<R (*)( T0 , T1)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1>\nstruct is_function_ptr_helper<R (*)( T0 , T1 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24)> { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_function_ptr_helper<R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\n#else\n\n#undef BOOST_STATIC_CONSTANT\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3, (0, 25, \"boost/type_traits/detail/is_function_ptr_helper.hpp\"))\n#include BOOST_PP_ITERATE()\n\n#endif // BOOST_TT_PREPROCESSING_MODE\n\n} // namespace type_traits\n} // namespace boost\n\n#endif // BOOST_TT_DETAIL_IS_FUNCTION_PTR_HELPER_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define BOOST_PP_COUNTER BOOST_PP_FRAME_ITERATION(1)\n\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_function_ptr_helper<R (*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T))> { BOOST_STATIC_CONSTANT(bool, value = true); };\n@#ifndef BOOST_TT_NO_ELLIPSIS_IN_FUNC_TESTING\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_function_ptr_helper<R (*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...)> { BOOST_STATIC_CONSTANT(bool, value = true); };\n@#endif\n#undef BOOST_PP_COUNTER\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/is_function_ptr_tester.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//  Aleksey Gurtovoy, Howard Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_TT_DETAIL_IS_FUNCTION_PTR_TESTER_HPP_INCLUDED\n#define BOOST_TT_DETAIL_IS_FUNCTION_PTR_TESTER_HPP_INCLUDED\n\n#include <boost/type_traits/detail/yes_no_type.hpp>\n\n#if defined(BOOST_TT_PREPROCESSING_MODE)\n//\n// Hide include dependencies from analysers since they're\n// only require in maintenance mode:\n//\n#define PP1 <boost/preprocessor/iterate.hpp>\n#define PP2 <boost/preprocessor/enum_params.hpp>\n#define PP3 <boost/preprocessor/comma_if.hpp>\n#include PP1\n#include PP2\n#include PP3\n#undef PP1\n#undef PP2\n#undef PP3\n#endif\n\nnamespace boost {\nnamespace type_traits {\n\n// Note it is acceptable to use ellipsis here, since the argument will\n// always be a pointer type of some sort (JM 2005/06/04):\nno_type BOOST_TT_DECL is_function_ptr_tester(...);\n\n#if !defined(BOOST_TT_PREPROCESSING_MODE)\n// pre-processed code, don't edit, try GNU cpp with \n// cpp -I../../../ -DBOOST_TT_PREPROCESSING_MODE -x c++ -P filename\n\ntemplate <class R >\nyes_type is_function_ptr_tester(R (*)());\ntemplate <class R >\nyes_type is_function_ptr_tester(R (*)( ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R >\nyes_type is_function_ptr_tester(R (__stdcall*)());\n#ifndef _MANAGED\ntemplate <class R >\nyes_type is_function_ptr_tester(R (__fastcall*)());\n#endif\ntemplate <class R >\nyes_type is_function_ptr_tester(R (__cdecl*)());\n#endif\ntemplate <class R , class T0 >\nyes_type is_function_ptr_tester(R (*)( T0));\ntemplate <class R , class T0 >\nyes_type is_function_ptr_tester(R (*)( T0 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0));\n#ifndef _MANAGED\ntemplate <class R , class T0 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0));\n#endif\ntemplate <class R , class T0 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0));\n#endif\ntemplate <class R , class T0 , class T1 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1));\ntemplate <class R , class T0 , class T1 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1));\n#endif\ntemplate <class R , class T0 , class T1 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2));\ntemplate <class R , class T0 , class T1 , class T2 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_function_ptr_tester(R (*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...));\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_function_ptr_tester(R (__stdcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n#ifndef _MANAGED\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_function_ptr_tester(R (__fastcall*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n#endif\ntemplate <class R , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_function_ptr_tester(R (__cdecl*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n#endif\n#else\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3, (0, 25, \"boost/type_traits/detail/is_function_ptr_tester.hpp\"))\n#include BOOST_PP_ITERATE()\n\n#endif // BOOST_TT_PREPROCESSING_MODE\n\n} // namespace type_traits\n} // namespace boost\n\n#endif // BOOST_TT_DETAIL_IS_FUNCTION_PTR_TESTER_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define BOOST_PP_COUNTER BOOST_PP_FRAME_ITERATION(1)\n#undef __stdcall\n#undef __fastcall\n#undef __cdecl\n\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_function_ptr_tester(R (*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n@#ifndef BOOST_TT_NO_ELLIPSIS_IN_FUNC_TESTING\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_function_ptr_tester(R (*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...));\n@#endif\n@#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_function_ptr_tester(R (__stdcall*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n@#ifndef _MANAGED\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_function_ptr_tester(R (__fastcall*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n@#endif\ntemplate <class R BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_function_ptr_tester(R (__cdecl*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n@#endif\n\n#undef BOOST_PP_COUNTER\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//  Aleksey Gurtovoy, Howard Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_IMPL_HPP_INCLUDED\n#define BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_IMPL_HPP_INCLUDED\n\n#include <boost/config.hpp>\n\n#if defined(BOOST_TT_PREPROCESSING_MODE)\n//\n// Maintenance mode, hide include dependencies\n// from trackers:\n//\n#define PPI <boost/preprocessor/iterate.hpp>\n#include PPI\n#undef PPI\n#define PPI <boost/preprocessor/enum_params.hpp>\n#include PPI\n#undef PPI\n#define PPI <boost/preprocessor/comma_if.hpp>\n#include PPI\n#undef PPI\n#endif\n\nnamespace boost {\nnamespace type_traits {\n\ntemplate <typename T>\nstruct is_mem_fun_pointer_impl\n{\n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n#if !defined(BOOST_TT_PREPROCESSING_MODE)\n// pre-processed code, don't edit, try GNU cpp with \n// cpp -I../../../ -DBOOST_TT_PREPROCESSING_MODE -x c++ -P filename\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)() > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)( ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)() const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)() volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)() const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)( ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)( ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T >\nstruct is_mem_fun_pointer_impl<R (T::*)( ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) > { BOOST_STATIC_CONSTANT(bool, value = true); };\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24>\nstruct is_mem_fun_pointer_impl<R (T::*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n#endif\n\n#else\n\n#undef BOOST_STATIC_CONSTANT\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3, (0, 25, \"boost/type_traits/detail/is_mem_fun_pointer_impl.hpp\"))\n#include BOOST_PP_ITERATE()\n\n#endif // BOOST_TT_PREPROCESSING_MODE\n\n} // namespace type_traits\n} // namespace boost\n\n#endif // BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_IMPL_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define BOOST_PP_COUNTER BOOST_PP_FRAME_ITERATION(1)\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n@#ifndef BOOST_TT_NO_ELLIPSIS_IN_FUNC_TESTING\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) > { BOOST_STATIC_CONSTANT(bool, value = true); };\n@#endif\n\n@#if !defined(BOOST_TT_NO_CV_FUNC_TEST)\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\n@#ifndef BOOST_TT_NO_ELLIPSIS_IN_FUNC_TESTING\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) const > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T)>\nstruct is_mem_fun_pointer_impl<R (T::*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) const volatile > { BOOST_STATIC_CONSTANT(bool, value = true); };\n@#endif\n@#endif\n\n#undef BOOST_PP_COUNTER\n#endif // BOOST_PP_IS_ITERATING\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/is_mem_fun_pointer_tester.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//  Aleksey Gurtovoy, Howard Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#if !defined(BOOST_PP_IS_ITERATING)\n\n///// header body\n\n#ifndef BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_TESTER_HPP_INCLUDED\n#define BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_TESTER_HPP_INCLUDED\n\n#include <boost/type_traits/detail/yes_no_type.hpp>\n#include <boost/type_traits/detail/config.hpp>\n\n#if defined(BOOST_TT_PREPROCESSING_MODE)\n//\n// Maintentance mode, hide include dependencies\n// from dependency trackers:\n//\n#define PPI <boost/preprocessor/iterate.hpp>\n#include PPI\n#undef PPI\n#define PPI <boost/preprocessor/enum_params.hpp>\n#include PPI\n#undef PPI\n#define <boost/preprocessor/comma_if.hpp>\n#include PPI\n#undef\n#endif\n\nnamespace boost {\nnamespace type_traits {\n\nno_type BOOST_TT_DECL is_mem_fun_pointer_tester(...);\n\n#if !defined(BOOST_TT_PREPROCESSING_MODE)\n// pre-processed code, don't edit, try GNU cpp with \n// cpp -I../../../ -DBOOST_TT_PREPROCESSING_MODE -x c++ -P filename\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)());\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)() const);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)() volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)() const volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( ...));\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( ...) const);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( ...) volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)());\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)() const);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)() volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)() const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)());\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)() const);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)() volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)() const volatile);\n\n#endif\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)());\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)() const);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)() volatile);\n\ntemplate <class R, class T >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)() const volatile);\n\n#endif\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0));\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0) const);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0) volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0) const volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 ...));\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 ...) const);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 ...) volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0));\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0) const);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0) volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0));\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0) const);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0) volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0));\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0) const);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0) volatile);\n\ntemplate <class R, class T , class T0 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1));\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1) const);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1) volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 ...));\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1));\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1) const);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1) volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1));\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1) const);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1) volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1));\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1) const);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1) volatile);\n\ntemplate <class R, class T , class T0 , class T1 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 ...) const volatile);\n\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23) const volatile);\n\n#endif\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24 ...) const volatile);\n#ifdef BOOST_TT_TEST_MS_FUNC_SIGS\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const volatile);\n\n#ifndef _MANAGED\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const volatile);\n\n#endif\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24));\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) volatile);\n\ntemplate <class R, class T , class T0 , class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 , class T17 , class T18 , class T19 , class T20 , class T21 , class T22 , class T23 , class T24 >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)( T0 , T1 , T2 , T3 , T4 , T5 , T6 , T7 , T8 , T9 , T10 , T11 , T12 , T13 , T14 , T15 , T16 , T17 , T18 , T19 , T20 , T21 , T22 , T23 , T24) const volatile);\n\n#endif\n\n#else\n\n#define BOOST_PP_ITERATION_PARAMS_1 \\\n    (3, (0, 25, \"boost/type_traits/detail/is_mem_fun_pointer_tester.hpp\"))\n#include BOOST_PP_ITERATE()\n\n#endif // BOOST_TT_PREPROCESSING_MODE\n\n} // namespace type_traits\n} // namespace boost\n\n#endif // BOOST_TT_DETAIL_IS_MEM_FUN_POINTER_TESTER_HPP_INCLUDED\n\n///// iteration\n\n#else\n#define BOOST_PP_COUNTER BOOST_PP_FRAME_ITERATION(1)\n#undef __stdcall\n#undef __fastcall\n#undef __cdecl\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) volatile);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const volatile);\n\n@#ifndef BOOST_TT_NO_ELLIPSIS_IN_FUNC_TESTING\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...));\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) const);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) volatile);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T) ...) const volatile);\n@#endif\n@#ifdef BOOST_TT_TEST_MS_FUNC_SIGS // Other calling conventions used by MS compatible compilers:\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) volatile);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__stdcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const volatile);\n\n@#ifndef _MANAGED\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) volatile);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__fastcall T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const volatile);\n\n@#endif\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)));\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) volatile);\n\ntemplate <class R, class T BOOST_PP_COMMA_IF(BOOST_PP_COUNTER) BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,class T) >\nyes_type is_mem_fun_pointer_tester(R (__cdecl T::*const volatile*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_COUNTER,T)) const volatile);\n\n@#endif\n\n#undef BOOST_PP_COUNTER\n#endif // BOOST_PP_IS_ITERATING\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/detail/yes_no_type.hpp",
    "content": "\n//  (C) Copyright John Maddock and Steve Cleary 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n//\n//  macros and helpers for working with integral-constant-expressions.\n\n#ifndef BOOST_TT_DETAIL_YES_NO_TYPE_HPP_INCLUDED\n#define BOOST_TT_DETAIL_YES_NO_TYPE_HPP_INCLUDED\n\nnamespace boost {\nnamespace type_traits {\n\ntypedef char yes_type;\nstruct no_type\n{\n   char padding[8];\n};\n\n} // namespace type_traits\n} // namespace boost\n\n#endif // BOOST_TT_DETAIL_YES_NO_TYPE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/has_trivial_assign.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_HAS_TRIVIAL_ASSIGN_HPP_INCLUDED\n#define BOOST_TT_HAS_TRIVIAL_ASSIGN_HPP_INCLUDED\n\n#include <boost/type_traits/detail/config.hpp>\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n\n#if !defined(BOOST_HAS_TRIVIAL_ASSIGN) || defined(BOOST_MSVC) || defined(__GNUC__) || defined(BOOST_INTEL) || defined(__SUNPRO_CC) || defined(__clang)\n#include <boost/type_traits/is_pod.hpp>\n#include <boost/type_traits/is_const.hpp>\n#include <boost/type_traits/is_volatile.hpp>\n#include <boost/type_traits/is_assignable.hpp>\n#endif\n\nnamespace boost {\n\n   template <typename T>\n   struct has_trivial_assign : public integral_constant < bool,\n#ifdef BOOST_HAS_TRIVIAL_ASSIGN\n      BOOST_HAS_TRIVIAL_ASSIGN(T)\n#else\n      ::boost::is_pod<T>::value && !::boost::is_const<T>::value && !::boost::is_volatile<T>::value\n#endif\n   > {};\n\n   template<> struct has_trivial_assign<void> : public false_type{};\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\n   template<> struct has_trivial_assign<void const> : public false_type{};\n   template<> struct has_trivial_assign<void const volatile> : public false_type{};\n   template<> struct has_trivial_assign<void volatile> : public false_type{};\n#endif\n   template <class T> struct has_trivial_assign<T volatile> : public false_type{};\n   template <class T> struct has_trivial_assign<T&> : public false_type{};\n#if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)\n   template <class T> struct has_trivial_assign<T&&> : public false_type{};\n#endif\n   // Arrays are not explictly assignable:\n   template <typename T, std::size_t N> struct has_trivial_assign<T[N]> : public false_type{};\n   template <typename T> struct has_trivial_assign<T[]> : public false_type{};\n\n} // namespace boost\n\n#endif // BOOST_TT_HAS_TRIVIAL_ASSIGN_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/has_trivial_destructor.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_HAS_TRIVIAL_DESTRUCTOR_HPP_INCLUDED\n#define BOOST_TT_HAS_TRIVIAL_DESTRUCTOR_HPP_INCLUDED\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n\n#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR\n\n#if defined(BOOST_INTEL) || defined(BOOST_MSVC)\n#include <boost/type_traits/is_pod.hpp>\n#endif\n#ifdef BOOST_HAS_SGI_TYPE_TRAITS\n#include <boost/type_traits/is_same.hpp>\n#endif\n\n#if defined(__GNUC__) || defined(__clang) || defined(__SUNPRO_CC)\n#include <boost/type_traits/is_destructible.hpp>\n#endif\n\nnamespace boost {\n\ntemplate <typename T> struct has_trivial_destructor : public integral_constant<bool, BOOST_HAS_TRIVIAL_DESTRUCTOR(T)>{};\n#else\n#include <boost/type_traits/is_pod.hpp>\n\nnamespace boost{\n\ntemplate <typename T> struct has_trivial_destructor : public integral_constant<bool, ::boost::is_pod<T>::value>{};\n#endif\n\ntemplate <> struct has_trivial_destructor<void> : public false_type{};\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\ntemplate <> struct has_trivial_destructor<void const> : public false_type{};\ntemplate <> struct has_trivial_destructor<void const volatile> : public false_type{};\ntemplate <> struct has_trivial_destructor<void volatile> : public false_type{};\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_HAS_TRIVIAL_DESTRUCTOR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/integral_constant.hpp",
    "content": "//  (C) Copyright John Maddock 2015. \n//  Use, modification and distribution are subject to the \n//  Boost Software License, Version 1.0. (See accompanying file \n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_TYPE_TRAITS_INTEGRAL_CONSTANT_HPP\n#define BOOST_TYPE_TRAITS_INTEGRAL_CONSTANT_HPP\n\n#include <boost/config.hpp>\n#include <boost/detail/workaround.hpp>\n\n#if (BOOST_WORKAROUND(BOOST_MSVC, BOOST_TESTED_AT(1400)) \\\n   || BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x610)) \\\n   || BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840)) \\\n   || BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3202)) \\\n   || BOOST_WORKAROUND(BOOST_INTEL_CXX_VERSION, BOOST_TESTED_AT(810)) )\n\n\nnamespace boost{\n   namespace mpl\n   {\n      template <bool B> struct bool_;\n      template <class I, I val> struct integral_c;\n      struct integral_c_tag;\n   }\n}\n\n#else\n\nnamespace mpl_{\n\n   template <bool B> struct bool_;\n   template <class I, I val> struct integral_c;\n   struct integral_c_tag;\n}\n\nnamespace boost\n{\n   namespace mpl\n   {\n      using ::mpl_::bool_;\n      using ::mpl_::integral_c;\n      using ::mpl_::integral_c_tag;\n   }\n}\n\n#endif\n\nnamespace boost{\n\n   template <class T, T val>\n   struct integral_constant\n   {\n      typedef mpl::integral_c_tag tag;\n      typedef T value_type;\n      typedef integral_constant<T, val> type;\n      static const T value = val;\n      //\n      // This helper function is just to disable type-punning \n      // warnings from GCC:\n      //\n      template <class U>\n      static U& dereference(U* p) { return *p; }\n\n      operator const mpl::integral_c<T, val>& ()const\n      {\n         static const char data[sizeof(long)] = { 0 };\n         return dereference(reinterpret_cast<const mpl::integral_c<T, val>*>(&data));\n      }\n      BOOST_CONSTEXPR operator T()const { return val; }\n   };\n\n   template <class T, T val>\n   T const integral_constant<T, val>::value;\n      \n   template <bool val>\n   struct integral_constant<bool, val>\n   {\n      typedef mpl::integral_c_tag tag;\n      typedef bool value_type;\n      typedef integral_constant<bool, val> type;\n      static const bool value = val;\n      //\n      // This helper function is just to disable type-punning \n      // warnings from GCC:\n      //\n      template <class T>\n      static T& dereference(T* p) { return *p; }\n\n      operator const mpl::bool_<val>& ()const\n      {\n         static const char data = 0;\n         return dereference(reinterpret_cast<const mpl::bool_<val>*>(&data));\n      }\n      BOOST_CONSTEXPR operator bool()const { return val; }\n   };\n\n   template <bool val>\n   bool const integral_constant<bool, val>::value;\n\n   typedef integral_constant<bool, true> true_type;\n   typedef integral_constant<bool, false> false_type;\n\n}\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/intrinsics.hpp",
    "content": "//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_INTRINSICS_HPP_INCLUDED\n#define BOOST_TT_INTRINSICS_HPP_INCLUDED\n\n#ifndef BOOST_TT_DISABLE_INTRINSICS\n\n#include <boost/config.hpp>\n\n#ifndef BOOST_TT_CONFIG_HPP_INCLUDED\n#include <boost/type_traits/detail/config.hpp>\n#endif\n\n//\n// Helper macros for builtin compiler support.\n// If your compiler has builtin support for any of the following\n// traits concepts, then redefine the appropriate macros to pick\n// up on the compiler support:\n//\n// (these should largely ignore cv-qualifiers)\n// BOOST_IS_UNION(T) should evaluate to true if T is a union type\n// BOOST_IS_POD(T) should evaluate to true if T is a POD type\n// BOOST_IS_EMPTY(T) should evaluate to true if T is an empty class type (and not a union)\n// BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) should evaluate to true if \"T x;\" has no effect\n// BOOST_HAS_TRIVIAL_COPY(T) should evaluate to true if T(t) <==> memcpy\n// BOOST_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T) should evaluate to true if T(boost::move(t)) <==> memcpy\n// BOOST_HAS_TRIVIAL_ASSIGN(T) should evaluate to true if t = u <==> memcpy\n// BOOST_HAS_TRIVIAL_MOVE_ASSIGN(T) should evaluate to true if t = boost::move(u) <==> memcpy\n// BOOST_HAS_TRIVIAL_DESTRUCTOR(T) should evaluate to true if ~T() has no effect\n// BOOST_HAS_NOTHROW_CONSTRUCTOR(T) should evaluate to true if \"T x;\" can not throw\n// BOOST_HAS_NOTHROW_COPY(T) should evaluate to true if T(t) can not throw\n// BOOST_HAS_NOTHROW_ASSIGN(T) should evaluate to true if t = u can not throw\n// BOOST_HAS_VIRTUAL_DESTRUCTOR(T) should evaluate to true T has a virtual destructor\n// BOOST_IS_NOTHROW_MOVE_CONSTRUCT(T) should evaluate to true if T has a non-throwing move constructor.\n// BOOST_IS_NOTHROW_MOVE_ASSIGN(T) should evaluate to true if T has a non-throwing move assignment operator.\n//\n// The following can also be defined: when detected our implementation is greatly simplified.\n//\n// BOOST_IS_ABSTRACT(T) true if T is an abstract type\n// BOOST_IS_BASE_OF(T,U) true if T is a base class of U\n// BOOST_IS_CLASS(T) true if T is a class type (and not a union)\n// BOOST_IS_CONVERTIBLE(T,U) true if T is convertible to U\n// BOOST_IS_ENUM(T) true is T is an enum\n// BOOST_IS_POLYMORPHIC(T) true if T is a polymorphic type\n// BOOST_ALIGNMENT_OF(T) should evaluate to the alignment requirements of type T.\n//\n// define BOOST_TT_DISABLE_INTRINSICS to prevent any intrinsics being used (mostly used when testing)\n//\n\n#ifdef BOOST_HAS_SGI_TYPE_TRAITS\n    // Hook into SGI's __type_traits class, this will pick up user supplied\n    // specializations as well as SGI - compiler supplied specializations.\n#   include <boost/type_traits/is_same.hpp>\n#   ifdef __NetBSD__\n      // There are two different versions of type_traits.h on NetBSD on Spark\n      // use an implicit include via algorithm instead, to make sure we get\n      // the same version as the std lib:\n#     include <algorithm>\n#   else\n#    include <type_traits.h>\n#   endif\n#   define BOOST_IS_POD(T) ::boost::is_same< typename ::__type_traits<T>::is_POD_type, ::__true_type>::value\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) ::boost::is_same< typename ::__type_traits<T>::has_trivial_default_constructor, ::__true_type>::value\n#   define BOOST_HAS_TRIVIAL_COPY(T) ::boost::is_same< typename ::__type_traits<T>::has_trivial_copy_constructor, ::__true_type>::value\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) ::boost::is_same< typename ::__type_traits<T>::has_trivial_assignment_operator, ::__true_type>::value\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) ::boost::is_same< typename ::__type_traits<T>::has_trivial_destructor, ::__true_type>::value\n\n#   ifdef __sgi\n#      define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#   endif\n#endif\n\n#if defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000)\n    // Metrowerks compiler is acquiring intrinsic type traits support\n    // post version 8.  We hook into the published interface to pick up\n    // user defined specializations as well as compiler intrinsics as \n    // and when they become available:\n#   include <msl_utility>\n#   define BOOST_IS_UNION(T) BOOST_STD_EXTENSION_NAMESPACE::is_union<T>::value\n#   define BOOST_IS_POD(T) BOOST_STD_EXTENSION_NAMESPACE::is_POD<T>::value\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) BOOST_STD_EXTENSION_NAMESPACE::has_trivial_default_ctor<T>::value\n#   define BOOST_HAS_TRIVIAL_COPY(T) BOOST_STD_EXTENSION_NAMESPACE::has_trivial_copy_ctor<T>::value\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) BOOST_STD_EXTENSION_NAMESPACE::has_trivial_assignment<T>::value\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) BOOST_STD_EXTENSION_NAMESPACE::has_trivial_dtor<T>::value\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if (defined(BOOST_MSVC) && defined(BOOST_MSVC_FULL_VER) && (BOOST_MSVC_FULL_VER >=140050215))\\\n         || (defined(BOOST_INTEL) && defined(_MSC_VER) && (_MSC_VER >= 1500))\n//\n// Note that even though these intrinsics rely on other type traits classes\n// we do not #include those here as it produces cyclic dependencies and\n// can cause the intrinsics to not even be used at all!\n//\n#   define BOOST_IS_UNION(T) __is_union(T)\n#   define BOOST_IS_POD(T) (__is_pod(T) && __has_trivial_constructor(T))\n#   define BOOST_IS_EMPTY(T) __is_empty(T)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) __has_trivial_constructor(T)\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) (__has_trivial_assign(T) || ( ::boost::is_pod<T>::value && ! ::boost::is_const<T>::value && !::boost::is_volatile<T>::value))\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__has_trivial_destructor(T) || ::boost::is_pod<T>::value)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__has_nothrow_constructor(T) || ::boost::has_trivial_constructor<T>::value)\n#if !defined(BOOST_INTEL)\n#   define BOOST_HAS_NOTHROW_COPY(T) ((__has_nothrow_copy(T) || ::boost::has_trivial_copy<T>::value) && !is_array<T>::value)\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__has_trivial_copy(T) || ::boost::is_pod<T>::value)\n#elif (_MSC_VER >= 1900)\n#   define BOOST_HAS_NOTHROW_COPY(T) ((__is_nothrow_constructible(T, typename add_lvalue_reference<typename add_const<T>::type>::type)) && !is_array<T>::value)\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__is_trivially_constructible(T, typename add_lvalue_reference<typename add_const<T>::type>::type))\n#endif\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) (__has_nothrow_assign(T) || ::boost::has_trivial_assign<T>::value)\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T)\n\n#   define BOOST_IS_ABSTRACT(T) __is_abstract(T)\n#   define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_same<T,U>::value)\n#   define BOOST_IS_CLASS(T) __is_class(T)\n#   define BOOST_IS_CONVERTIBLE(T,U) ((__is_convertible_to(T,U) || (is_same<T,U>::value && !is_function<U>::value)) && !__is_abstract(U))\n#   define BOOST_IS_ENUM(T) __is_enum(T)\n//  This one fails if the default alignment has been changed with /Zp:\n//  #   define BOOST_ALIGNMENT_OF(T) __alignof(T)\n\n#   if defined(_MSC_VER) && (_MSC_VER >= 1700)\n#       define BOOST_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T) ((__has_trivial_move_constructor(T) || boost::is_pod<T>::value) && ! ::boost::is_volatile<T>::value && ! ::boost::is_reference<T>::value)\n#       define BOOST_HAS_TRIVIAL_MOVE_ASSIGN(T) ((__has_trivial_move_assign(T) || boost::is_pod<T>::value) && ! ::boost::is_const<T>::value && !::boost::is_volatile<T>::value && ! ::boost::is_reference<T>::value)\n#   endif\n#ifndef BOOST_NO_CXX11_FINAL\n//  This one doesn't quite always do the right thing on older VC++ versions\n//  we really need it when the final keyword is supporyted though:\n#   define BOOST_IS_POLYMORPHIC(T) __is_polymorphic(T)\n#endif\n#if _MSC_FULL_VER >= 180020827\n#   define BOOST_IS_NOTHROW_MOVE_ASSIGN(T) (__is_nothrow_assignable(T&, T&&))\n#   define BOOST_IS_NOTHROW_MOVE_CONSTRUCT(T) (__is_nothrow_constructible(T, T&&))\n#endif\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if defined(__DMC__) && (__DMC__ >= 0x848)\n// For Digital Mars C++, www.digitalmars.com\n#   define BOOST_IS_UNION(T) (__typeinfo(T) & 0x400)\n#   define BOOST_IS_POD(T) (__typeinfo(T) & 0x800)\n#   define BOOST_IS_EMPTY(T) (__typeinfo(T) & 0x1000)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) (__typeinfo(T) & 0x10)\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__typeinfo(T) & 0x20)\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) (__typeinfo(T) & 0x40)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__typeinfo(T) & 0x8)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__typeinfo(T) & 0x80)\n#   define BOOST_HAS_NOTHROW_COPY(T) (__typeinfo(T) & 0x100)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) (__typeinfo(T) & 0x200)\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) (__typeinfo(T) & 0x4)\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if defined(BOOST_CLANG) && defined(__has_feature) && !defined(__CUDACC__)\n//\n// Note that these intrinsics are disabled for the CUDA meta-compiler as it appears\n// to not support them, even though the underlying clang compiler does so.\n// This is a rubbish fix as it basically stops type traits from working correctly, \n// but maybe the best we can do for now.  See https://svn.boost.org/trac/boost/ticket/10694\n//\n//\n// Note that even though these intrinsics rely on other type traits classes\n// we do not #include those here as it produces cyclic dependencies and\n// can cause the intrinsics to not even be used at all!\n//\n#   include <cstddef>\n\n#   if __has_feature(is_union)\n#     define BOOST_IS_UNION(T) __is_union(T)\n#   endif\n#   if (!defined(__GLIBCXX__) || (__GLIBCXX__ >= 20080306 && __GLIBCXX__ != 20080519)) && __has_feature(is_pod)\n#     define BOOST_IS_POD(T) __is_pod(T)\n#   endif\n#   if (!defined(__GLIBCXX__) || (__GLIBCXX__ >= 20080306 && __GLIBCXX__ != 20080519)) && __has_feature(is_empty)\n#     define BOOST_IS_EMPTY(T) __is_empty(T)\n#   endif\n#   if __has_feature(has_trivial_constructor)\n#     define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) __has_trivial_constructor(T)\n#   endif\n#   if __has_feature(has_trivial_copy)\n#     define BOOST_HAS_TRIVIAL_COPY(T) (__has_trivial_copy(T) && !is_reference<T>::value)\n#   endif\n#   if __has_feature(has_trivial_assign)\n#     define BOOST_HAS_TRIVIAL_ASSIGN(T) (__has_trivial_assign(T) && !is_volatile<T>::value && is_assignable<T&, const T&>::value)\n#   endif\n#   if __has_feature(has_trivial_destructor)\n#     define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__has_trivial_destructor(T)  && is_destructible<T>::value)\n#   endif\n#   if __has_feature(has_nothrow_constructor)\n#     define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__has_nothrow_constructor(T) && is_default_constructible<T>::value)\n#   endif\n#   if __has_feature(has_nothrow_copy)\n#     define BOOST_HAS_NOTHROW_COPY(T) (__has_nothrow_copy(T) && !is_volatile<T>::value && !is_reference<T>::value && is_copy_constructible<T>::value)\n#   endif\n#   if __has_feature(has_nothrow_assign)\n#     define BOOST_HAS_NOTHROW_ASSIGN(T) (__has_nothrow_assign(T) && !is_volatile<T>::value && is_assignable<T&, const T&>::value)\n#   endif\n#   if __has_feature(has_virtual_destructor)\n#     define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T)\n#   endif\n#   if __has_feature(is_abstract)\n#     define BOOST_IS_ABSTRACT(T) __is_abstract(T)\n#   endif\n#   if __has_feature(is_base_of)\n#     define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_same<T,U>::value)\n#   endif\n#   if __has_feature(is_class)\n#     define BOOST_IS_CLASS(T) __is_class(T)\n#   endif\n#   if __has_feature(is_convertible_to)\n#     define BOOST_IS_CONVERTIBLE(T,U) __is_convertible_to(T,U)\n#   endif\n#   if __has_feature(is_enum)\n#     define BOOST_IS_ENUM(T) __is_enum(T)\n#   endif\n#   if __has_feature(is_polymorphic)\n#     define BOOST_IS_POLYMORPHIC(T) __is_polymorphic(T)\n#   endif\n#   if __has_feature(has_trivial_move_constructor)\n#     define BOOST_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T) (__has_trivial_move_constructor(T)  && is_constructible<T, T&&>::value && !::boost::is_volatile<T>::value)\n#   endif\n#   if __has_feature(has_trivial_move_assign)\n#     define BOOST_HAS_TRIVIAL_MOVE_ASSIGN(T) (__has_trivial_move_assign(T) && is_assignable<T&, T&&>::value && !::boost::is_volatile<T>::value)\n#   endif\n#   if (!defined(unix) && !defined(__unix__)) || defined(__LP64__) || !defined(__GNUC__)\n// GCC sometimes lies about alignment requirements\n// of type double on 32-bit unix platforms, use the\n// old implementation instead in that case:\n#     define BOOST_ALIGNMENT_OF(T) __alignof(T)\n#   endif\n#   if __has_feature(is_final)\n#     define BOOST_IS_FINAL(T) __is_final(T)\n#   endif\n\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3) && !defined(__GCCXML__))) && !defined(BOOST_CLANG)\n//\n// Note that even though these intrinsics rely on other type traits classes\n// we do not #include those here as it produces cyclic dependencies and\n// can cause the intrinsics to not even be used at all!\n//\n\n#ifdef BOOST_INTEL\n#  define BOOST_INTEL_TT_OPTS || is_pod<T>::value\n#else\n#  define BOOST_INTEL_TT_OPTS\n#endif\n\n#   define BOOST_IS_UNION(T) __is_union(T)\n#   define BOOST_IS_POD(T) __is_pod(T)\n#   define BOOST_IS_EMPTY(T) __is_empty(T)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) ((__has_trivial_constructor(T) BOOST_INTEL_TT_OPTS) && ! ::boost::is_volatile<T>::value)\n#   define BOOST_HAS_TRIVIAL_COPY(T) ((__has_trivial_copy(T) BOOST_INTEL_TT_OPTS) && !is_reference<T>::value)\n#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) ((__has_trivial_assign(T) BOOST_INTEL_TT_OPTS) && ! ::boost::is_volatile<T>::value && ! ::boost::is_const<T>::value && is_assignable<T&, const T&>::value)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__has_trivial_destructor(T) BOOST_INTEL_TT_OPTS && is_destructible<T>::value)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__has_nothrow_constructor(T) && is_default_constructible<T>::value BOOST_INTEL_TT_OPTS)\n#   define BOOST_HAS_NOTHROW_COPY(T) ((__has_nothrow_copy(T) BOOST_INTEL_TT_OPTS) && !is_volatile<T>::value && !is_reference<T>::value && is_copy_constructible<T>::value)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) ((__has_nothrow_assign(T) BOOST_INTEL_TT_OPTS) && !is_volatile<T>::value && !is_const<T>::value && is_assignable<T&, const T&>::value)\n#else\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) ((__has_trivial_assign(T) BOOST_INTEL_TT_OPTS) && ! ::boost::is_volatile<T>::value && ! ::boost::is_const<T>::value)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__has_trivial_destructor(T) BOOST_INTEL_TT_OPTS)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__has_nothrow_constructor(T) BOOST_INTEL_TT_OPTS)\n#   define BOOST_HAS_NOTHROW_COPY(T) ((__has_nothrow_copy(T) BOOST_INTEL_TT_OPTS) && !is_volatile<T>::value && !is_reference<T>::value && !is_array<T>::value)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) ((__has_nothrow_assign(T) BOOST_INTEL_TT_OPTS) && !is_volatile<T>::value && !is_const<T>::value && !is_array<T>::value)\n#endif\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T)\n\n#   define BOOST_IS_ABSTRACT(T) __is_abstract(T)\n#   define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_same<T,U>::value)\n#   define BOOST_IS_CLASS(T) __is_class(T)\n#   define BOOST_IS_ENUM(T) __is_enum(T)\n#   define BOOST_IS_POLYMORPHIC(T) __is_polymorphic(T)\n#   if (!defined(unix) && !defined(__unix__)) || defined(__LP64__)\n      // GCC sometimes lies about alignment requirements\n      // of type double on 32-bit unix platforms, use the\n      // old implementation instead in that case:\n#     define BOOST_ALIGNMENT_OF(T) __alignof__(T)\n#   endif\n#   if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))\n#     define BOOST_IS_FINAL(T) __is_final(T)\n#   endif\n\n#   if (__GNUC__ >= 5) && (__cplusplus >= 201103)\n#     define BOOST_HAS_TRIVIAL_MOVE_ASSIGN(T) (__is_trivially_assignable(T&, T&&) && is_assignable<T&, T&&>::value && !::boost::is_volatile<T>::value)\n#     define BOOST_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T) (__is_trivially_constructible(T, T&&) && is_constructible<T, T&&>::value && !::boost::is_volatile<T>::value)\n#   endif\n\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x5130)\n#   define BOOST_IS_UNION(T) __oracle_is_union(T)\n#   define BOOST_IS_POD(T) (__oracle_is_pod(T) && !is_function<T>::value)\n#   define BOOST_IS_EMPTY(T) __oracle_is_empty(T)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) (__oracle_has_trivial_constructor(T) && ! ::boost::is_volatile<T>::value)\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__oracle_has_trivial_copy(T) && !is_reference<T>::value)\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) ((__oracle_has_trivial_assign(T) || __oracle_is_trivial(T)) && ! ::boost::is_volatile<T>::value && ! ::boost::is_const<T>::value && is_assignable<T&, const T&>::value)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__oracle_has_trivial_destructor(T) && is_destructible<T>::value)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) ((__oracle_has_nothrow_constructor(T) || __oracle_has_trivial_constructor(T) || __oracle_is_trivial(T)) && is_default_constructible<T>::value)\n//  __oracle_has_nothrow_copy appears to behave the same as __oracle_has_nothrow_assign, disabled for now:\n//#   define BOOST_HAS_NOTHROW_COPY(T) ((__oracle_has_nothrow_copy(T) || __oracle_has_trivial_copy(T) || __oracle_is_trivial(T)) && !is_volatile<T>::value && !is_reference<T>::value && is_copy_constructible<T>::value)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) ((__oracle_has_nothrow_assign(T) || __oracle_has_trivial_assign(T) || __oracle_is_trivial(T)) && !is_volatile<T>::value && !is_const<T>::value && is_assignable<T&, const T&>::value)\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __oracle_has_virtual_destructor(T)\n\n#   define BOOST_IS_ABSTRACT(T) __oracle_is_abstract(T)\n//#   define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_same<T,U>::value)\n#   define BOOST_IS_CLASS(T) __oracle_is_class(T)\n#   define BOOST_IS_ENUM(T) __oracle_is_enum(T)\n#   define BOOST_IS_POLYMORPHIC(T) __oracle_is_polymorphic(T)\n#   define BOOST_ALIGNMENT_OF(T) __alignof__(T)\n#   define BOOST_IS_FINAL(T) __oracle_is_final(T)\n\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#if defined(__ghs__) && (__GHS_VERSION_NUMBER >= 600)\n#   include <boost/type_traits/is_same.hpp>\n#   include <boost/type_traits/is_reference.hpp>\n#   include <boost/type_traits/is_volatile.hpp>\n\n#   define BOOST_IS_UNION(T) __is_union(T)\n#   define BOOST_IS_POD(T) __is_pod(T)\n#   define BOOST_IS_EMPTY(T) __is_empty(T)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) __has_trivial_constructor(T)\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__has_trivial_copy(T) && !is_reference<T>::value)\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) (__has_trivial_assign(T) && !is_volatile<T>::value)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) __has_nothrow_constructor(T)\n#   define BOOST_HAS_NOTHROW_COPY(T) (__has_nothrow_copy(T) && !is_volatile<T>::value && !is_reference<T>::value)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) (__has_nothrow_assign(T) && !is_volatile<T>::value)\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T)\n\n#   define BOOST_IS_ABSTRACT(T) __is_abstract(T)\n#   define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_same<T,U>::value)\n#   define BOOST_IS_CLASS(T) __is_class(T)\n#   define BOOST_IS_ENUM(T) __is_enum(T)\n#   define BOOST_IS_POLYMORPHIC(T) __is_polymorphic(T)\n#   define BOOST_ALIGNMENT_OF(T) __alignof__(T)\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n# if defined(__CODEGEARC__)\n#   include <boost/type_traits/is_same.hpp>\n#   include <boost/type_traits/is_reference.hpp>\n#   include <boost/type_traits/is_volatile.hpp>\n#   include <boost/type_traits/is_void.hpp>\n\n#   define BOOST_IS_UNION(T) __is_union(T)\n#   define BOOST_IS_POD(T) __is_pod(T)\n#   define BOOST_IS_EMPTY(T) __is_empty(T)\n#   define BOOST_HAS_TRIVIAL_CONSTRUCTOR(T) (__has_trivial_default_constructor(T))\n#   define BOOST_HAS_TRIVIAL_COPY(T) (__has_trivial_copy_constructor(T) && !is_reference<T>::value)\n#   define BOOST_HAS_TRIVIAL_ASSIGN(T) (__has_trivial_assign(T) && !is_volatile<T>::value)\n#   define BOOST_HAS_TRIVIAL_DESTRUCTOR(T) (__has_trivial_destructor(T))\n#   define BOOST_HAS_NOTHROW_CONSTRUCTOR(T) (__has_nothrow_default_constructor(T))\n#   define BOOST_HAS_NOTHROW_COPY(T) (__has_nothrow_copy_constructor(T) && !is_volatile<T>::value && !is_reference<T>::value)\n#   define BOOST_HAS_NOTHROW_ASSIGN(T) (__has_nothrow_assign(T) && !is_volatile<T>::value)\n#   define BOOST_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T)\n\n#   define BOOST_IS_ABSTRACT(T) __is_abstract(T)\n#   define BOOST_IS_BASE_OF(T,U) (__is_base_of(T,U) && !is_void<T>::value && !is_void<U>::value)\n#   define BOOST_IS_CLASS(T) __is_class(T)\n#   define BOOST_IS_CONVERTIBLE(T,U) (__is_convertible(T,U) || is_void<U>::value)\n#   define BOOST_IS_ENUM(T) __is_enum(T)\n#   define BOOST_IS_POLYMORPHIC(T) __is_polymorphic(T)\n#   define BOOST_ALIGNMENT_OF(T) alignof(T)\n\n#   define BOOST_HAS_TYPE_TRAITS_INTRINSICS\n#endif\n\n#endif // BOOST_TT_DISABLE_INTRINSICS\n\n#endif // BOOST_TT_INTRINSICS_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_abstract.hpp",
    "content": "#ifndef BOOST_TT_IS_ABSTRACT_CLASS_HPP\n#define BOOST_TT_IS_ABSTRACT_CLASS_HPP\n\n#if defined(_MSC_VER)\n# pragma once\n#endif\n\n/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8\n// is_abstract_class.hpp:\n//\n//  (C) Copyright 2002 Rani Sharoni (rani_sharoni@hotmail.com) and Robert Ramey\n//  Use, modification and distribution is subject to the Boost Software\n//  License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n//  \n//  See http://www.boost.org for updates, documentation, and revision history.\n//\n\n// Compile type discovery whether given type is abstract class or not.\n//\n//   Requires DR 337 to be supported by compiler\n//   (http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#337).\n//\n//\n// Believed (Jan 2004) to work on:\n//  - GCC 3.4\n//  - VC++ 7.1\n//  - compilers with new EDG frontend (Intel C++ 7, Comeau 4.3.2)\n//\n// Doesn't work on:\n//  - VC++6, VC++7.0 and less\n//  - GCC 3.3.X and less\n//  - Borland C++ 6 and less\n//      \n//\n// History:\n//  - Originally written by Rani Sharoni, see\n//    http://groups.google.com/groups?selm=df893da6.0207110613.75b2fe90%40posting.google.com\n//    At this time supported by EDG (Intel C++ 7, Comeau 4.3.2) and VC7.1.\n//  - Adapted and added into Boost.Serialization library by Robert Ramey \n//    (starting with submission #10).\n//  - Jan 2004: GCC 3.4 fixed to support DR337 (Giovanni Bajo).\n//  - Jan 2004: modified to be part of Boost.TypeTraits (Pavel Vozenilek).\n//  - Nov 2004: Christoph Ludwig found that the implementation did not work with\n//              template types and gcc-3.4 or VC7.1, fix due to Christoph Ludwig\n//              and John Maddock.\n//  - Dec 2004: Added new config macro BOOST_NO_IS_ABSTRACT which causes the template\n//              to degrade gracefully, rather than trash the compiler (John Maddock).\n//\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_ABSTRACT\n#include <boost/static_assert.hpp>\n#include <boost/type_traits/detail/yes_no_type.hpp>\n#include <boost/type_traits/is_class.hpp>\n#ifdef BOOST_NO_IS_ABSTRACT\n#include <boost/type_traits/is_polymorphic.hpp>\n#endif\n#endif\n\nnamespace boost {\n\nnamespace detail{\n\n#ifdef BOOST_IS_ABSTRACT\ntemplate <class T>\nstruct is_abstract_imp\n{\n   BOOST_STATIC_CONSTANT(bool, value = BOOST_IS_ABSTRACT(T));\n};\n#elif !defined(BOOST_NO_IS_ABSTRACT)\ntemplate<class T>\nstruct is_abstract_imp2\n{\n   // Deduction fails if T is void, function type, \n   // reference type (14.8.2/2)or an abstract class type \n   // according to review status issue #337\n   //\n   template<class U>\n   static type_traits::no_type check_sig(U (*)[1]);\n   template<class U>\n   static type_traits::yes_type check_sig(...);\n   //\n   // T must be a complete type, further if T is a template then\n   // it must be instantiated in order for us to get the right answer:\n   //\n   BOOST_STATIC_ASSERT(sizeof(T) != 0);\n\n   // GCC2 won't even parse this template if we embed the computation\n   // of s1 in the computation of value.\n#ifdef __GNUC__\n   BOOST_STATIC_CONSTANT(std::size_t, s1 = sizeof(is_abstract_imp2<T>::template check_sig<T>(0)));\n#else\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(push)\n#pragma warning(disable:6334)\n#endif\n   BOOST_STATIC_CONSTANT(std::size_t, s1 = sizeof(check_sig<T>(0)));\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(pop)\n#endif\n#endif\n    \n   BOOST_STATIC_CONSTANT(bool, value = \n      (s1 == sizeof(type_traits::yes_type)));\n};\n\ntemplate <bool v>\nstruct is_abstract_select\n{\n   template <class T>\n   struct rebind\n   {\n      typedef is_abstract_imp2<T> type;\n   };\n};\ntemplate <>\nstruct is_abstract_select<false>\n{\n   template <class T>\n   struct rebind\n   {\n      typedef false_type type;\n   };\n};\n\ntemplate <class T>\nstruct is_abstract_imp\n{\n   typedef is_abstract_select< ::boost::is_class<T>::value> selector;\n   typedef typename selector::template rebind<T> binder;\n   typedef typename binder::type type;\n\n   BOOST_STATIC_CONSTANT(bool, value = type::value);\n};\n\n#endif\n}\n\n#ifndef BOOST_NO_IS_ABSTRACT\ntemplate <class T> struct is_abstract : public integral_constant<bool, ::boost::detail::is_abstract_imp<T>::value> {};\n#else\ntemplate <class T> struct is_abstract : public integral_constant<bool, ::boost::detail::is_polymorphic_imp<T>::value> {};\n#endif\n\n} // namespace boost\n\n#endif //BOOST_TT_IS_ABSTRACT_CLASS_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_arithmetic.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_ARITHMETIC_HPP_INCLUDED\n#define BOOST_TT_IS_ARITHMETIC_HPP_INCLUDED\n\n#include <boost/type_traits/is_integral.hpp>\n#include <boost/type_traits/is_floating_point.hpp>\n\nnamespace boost {\n\ntemplate <class T>\nstruct is_arithmetic : public integral_constant<bool, is_integral<T>::value || is_floating_point<T>::value> {};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_ARITHMETIC_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_array.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n// Some fixes for is_array are based on a newsgroup posting by Jonathan Lundquist.\n\n\n#ifndef BOOST_TT_IS_ARRAY_HPP_INCLUDED\n#define BOOST_TT_IS_ARRAY_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n#include <cstddef>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\n   template <class T> struct is_array : public integral_constant<bool, __is_array(T)> {};\n#else\n   template <class T> struct is_array : public false_type {};\n#if !defined(BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS)\n   template <class T, std::size_t N> struct is_array<T[N]> : public true_type {};\n   template <class T, std::size_t N> struct is_array<T const[N]> : public true_type{};\n   template <class T, std::size_t N> struct is_array<T volatile[N]> : public true_type{};\n   template <class T, std::size_t N> struct is_array<T const volatile[N]> : public true_type{};\n#if !BOOST_WORKAROUND(__BORLANDC__, < 0x600) && !defined(__IBMCPP__) &&  !BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n   template <class T> struct is_array<T[]> : public true_type{};\n   template <class T> struct is_array<T const[]> : public true_type{};\n   template <class T> struct is_array<T const volatile[]> : public true_type{};\n   template <class T> struct is_array<T volatile[]> : public true_type{};\n#endif\n#endif\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_ARRAY_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_assignable.hpp",
    "content": "\n//  (C) Copyright John Maddock 2015.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_ASSIGNABLE_HPP_INCLUDED\n#define BOOST_TT_IS_ASSIGNABLE_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n#include <boost/detail/workaround.hpp>\n\nnamespace boost{\n\n   template <class T, class U = T> struct is_assignable;\n\n}\n\n#if !defined(BOOST_NO_CXX11_DECLTYPE) && !BOOST_WORKAROUND(BOOST_MSVC, < 1800)\n\n#include <boost/type_traits/detail/yes_no_type.hpp>\n#include <boost/type_traits/declval.hpp>\n\nnamespace boost{\n\n   namespace detail{\n\n      struct is_assignable_imp\n      {\n         template<typename T, typename U, typename = decltype(boost::declval<T>() = boost::declval<U>())>\n         static boost::type_traits::yes_type test(int);\n\n         template<typename, typename>\n         static boost::type_traits::no_type test(...);\n      };\n\n   }\n\n   template <class T, class U> struct is_assignable : public integral_constant<bool, sizeof(detail::is_assignable_imp::test<T, U>(0)) == sizeof(boost::type_traits::yes_type)>{};\n   template <class T, std::size_t N, class U> struct is_assignable<T[N], U> : public is_assignable<T, U>{};\n   template <class T, std::size_t N, class U> struct is_assignable<T(&)[N], U> : public is_assignable<T&, U>{};\n   template <class T, class U> struct is_assignable<T[], U> : public is_assignable<T, U>{};\n   template <class T, class U> struct is_assignable<T(&)[], U> : public is_assignable<T&, U>{};\n   template <class U> struct is_assignable<void, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void const, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void volatile, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void const volatile, U> : public integral_constant<bool, false>{};\n\n#else\n\n#include <boost/type_traits/has_trivial_assign.hpp>\n#include <boost/type_traits/remove_reference.hpp>\n\nnamespace boost{\n\n   // We don't know how to implement this:\n   template <class T, class U> struct is_assignable : public integral_constant<bool, false>{};\n   template <class T, class U> struct is_assignable<T&, U> : public integral_constant<bool, is_pod<T>::value && is_pod<typename remove_reference<U>::type>::value>{};\n   template <class T, class U> struct is_assignable<const T&, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void const, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void volatile, U> : public integral_constant<bool, false>{};\n   template <class U> struct is_assignable<void const volatile, U> : public integral_constant<bool, false>{};\n   /*\n   template <> struct is_assignable<void, void> : public integral_constant<bool, false>{};\n   template <> struct is_assignable<void const, void const> : public integral_constant<bool, false>{};\n   template <> struct is_assignable<void volatile, void volatile> : public integral_constant<bool, false>{};\n   template <> struct is_assignable<void const volatile, void const volatile> : public integral_constant<bool, false>{};\n   */\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_ASSIGNABLE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_base_and_derived.hpp",
    "content": "\n//  (C) Copyright Rani Sharoni 2003.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n \n#ifndef BOOST_TT_IS_BASE_AND_DERIVED_HPP_INCLUDED\n#define BOOST_TT_IS_BASE_AND_DERIVED_HPP_INCLUDED\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_BASE_OF\n#include <boost/type_traits/is_class.hpp>\n#include <boost/type_traits/is_same.hpp>\n#include <boost/type_traits/is_convertible.hpp>\n#include <boost/config.hpp>\n#include <boost/static_assert.hpp>\n#endif\n#include <boost/type_traits/remove_cv.hpp>\n#include <boost/type_traits/is_same.hpp>\n\nnamespace boost {\n\nnamespace detail {\n\n#ifndef BOOST_IS_BASE_OF\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x581)) \\\n && !BOOST_WORKAROUND(__SUNPRO_CC , <= 0x540) \\\n && !BOOST_WORKAROUND(__EDG_VERSION__, <= 243) \\\n && !BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n\n                             // The EDG version number is a lower estimate.\n                             // It is not currently known which EDG version\n                             // exactly fixes the problem.\n\n/*************************************************************************\n\nThis version detects ambiguous base classes and private base classes\ncorrectly, and was devised by Rani Sharoni.\n\nExplanation by Terje Slettebo and Rani Sharoni.\n\nLet's take the multiple base class below as an example, and the following\nwill also show why there's not a problem with private or ambiguous base\nclass:\n\nstruct B {};\nstruct B1 : B {};\nstruct B2 : B {};\nstruct D : private B1, private B2 {};\n\nis_base_and_derived<B, D>::value;\n\nFirst, some terminology:\n\nSC  - Standard conversion\nUDC - User-defined conversion\n\nA user-defined conversion sequence consists of an SC, followed by an UDC,\nfollowed by another SC. Either SC may be the identity conversion.\n\nWhen passing the default-constructed Host object to the overloaded check_sig()\nfunctions (initialization 8.5/14/4/3), we have several viable implicit\nconversion sequences:\n\nFor \"static no_type check_sig(B const volatile *, int)\" we have the conversion\nsequences:\n\nC -> C const (SC - Qualification Adjustment) -> B const volatile* (UDC)\nC -> D const volatile* (UDC) -> B1 const volatile* / B2 const volatile* ->\n     B const volatile* (SC - Conversion)\n\nFor \"static yes_type check_sig(D const volatile *, T)\" we have the conversion\nsequence:\n\nC -> D const volatile* (UDC)\n\nAccording to 13.3.3.1/4, in context of user-defined conversion only the\nstandard conversion sequence is considered when selecting the best viable\nfunction, so it only considers up to the user-defined conversion. For the\nfirst function this means choosing between C -> C const and C -> C, and it\nchooses the latter, because it's a proper subset (13.3.3.2/3/2) of the\nformer. Therefore, we have:\n\nC -> D const volatile* (UDC) -> B1 const volatile* / B2 const volatile* ->\n     B const volatile* (SC - Conversion)\nC -> D const volatile* (UDC)\n\nHere, the principle of the \"shortest subsequence\" applies again, and it\nchooses C -> D const volatile*. This shows that it doesn't even need to\nconsider the multiple paths to B, or accessibility, as that possibility is\neliminated before it could possibly cause ambiguity or access violation.\n\nIf D is not derived from B, it has to choose between C -> C const -> B const\nvolatile* for the first function, and C -> D const volatile* for the second\nfunction, which are just as good (both requires a UDC, 13.3.3.2), had it not\nbeen for the fact that \"static no_type check_sig(B const volatile *, int)\" is\nnot templated, which makes C -> C const -> B const volatile* the best choice\n(13.3.3/1/4), resulting in \"no\".\n\nAlso, if Host::operator B const volatile* hadn't been const, the two\nconversion sequences for \"static no_type check_sig(B const volatile *, int)\", in\nthe case where D is derived from B, would have been ambiguous.\n\nSee also\nhttp://groups.google.com/groups?selm=df893da6.0301280859.522081f7%40posting.\ngoogle.com and links therein.\n\n*************************************************************************/\n\ntemplate <typename B, typename D>\nstruct bd_helper\n{\n   //\n   // This VC7.1 specific workaround stops the compiler from generating\n   // an internal compiler error when compiling with /vmg (thanks to\n   // Aleksey Gurtovoy for figuring out the workaround).\n   //\n#if !BOOST_WORKAROUND(BOOST_MSVC, == 1310)\n    template <typename T>\n    static type_traits::yes_type check_sig(D const volatile *, T);\n    static type_traits::no_type  check_sig(B const volatile *, int);\n#else\n    static type_traits::yes_type check_sig(D const volatile *, long);\n    static type_traits::no_type  check_sig(B const volatile * const&, int);\n#endif\n};\n\ntemplate<typename B, typename D>\nstruct is_base_and_derived_impl2\n{\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(push)\n#pragma warning(disable:6334)\n#endif\n    //\n    // May silently do the wrong thing with incomplete types\n    // unless we trap them here:\n    //\n    BOOST_STATIC_ASSERT(sizeof(B) != 0);\n    BOOST_STATIC_ASSERT(sizeof(D) != 0);\n\n    struct Host\n    {\n#if !BOOST_WORKAROUND(BOOST_MSVC, == 1310)\n        operator B const volatile *() const;\n#else\n        operator B const volatile * const&() const;\n#endif\n        operator D const volatile *();\n    };\n\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof(bd_helper<B,D>::check_sig(Host(), 0)) == sizeof(type_traits::yes_type));\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(pop)\n#endif\n};\n\n#else\n\n//\n// broken version:\n//\ntemplate<typename B, typename D>\nstruct is_base_and_derived_impl2\n{\n    BOOST_STATIC_CONSTANT(bool, value =\n        (::boost::is_convertible<D*,B*>::value));\n};\n\n#define BOOST_BROKEN_IS_BASE_AND_DERIVED\n\n#endif\n\ntemplate <typename B, typename D>\nstruct is_base_and_derived_impl3\n{\n    BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\ntemplate <bool ic1, bool ic2, bool iss>\nstruct is_base_and_derived_select\n{\n   template <class T, class U>\n   struct rebind\n   {\n      typedef is_base_and_derived_impl3<T,U> type;\n   };\n};\n\ntemplate <>\nstruct is_base_and_derived_select<true,true,false>\n{\n   template <class T, class U>\n   struct rebind\n   {\n      typedef is_base_and_derived_impl2<T,U> type;\n   };\n};\n\ntemplate <typename B, typename D>\nstruct is_base_and_derived_impl\n{\n    typedef typename remove_cv<B>::type ncvB;\n    typedef typename remove_cv<D>::type ncvD;\n\n    typedef is_base_and_derived_select<\n       ::boost::is_class<B>::value,\n       ::boost::is_class<D>::value,\n       ::boost::is_same<ncvB,ncvD>::value> selector;\n    typedef typename selector::template rebind<ncvB,ncvD> binder;\n    typedef typename binder::type bound_type;\n\n    BOOST_STATIC_CONSTANT(bool, value = bound_type::value);\n};\n#else\ntemplate <typename B, typename D>\nstruct is_base_and_derived_impl\n{\n    typedef typename remove_cv<B>::type ncvB;\n    typedef typename remove_cv<D>::type ncvD;\n\n    BOOST_STATIC_CONSTANT(bool, value = (BOOST_IS_BASE_OF(B,D) && ! ::boost::is_same<ncvB,ncvD>::value));\n};\n#endif\n} // namespace detail\n\ntemplate <class Base, class Derived> struct is_base_and_derived\n   : public integral_constant<bool, (::boost::detail::is_base_and_derived_impl<Base, Derived>::value)> {};\n\ntemplate <class Base, class Derived> struct is_base_and_derived<Base&, Derived> : public false_type{};\ntemplate <class Base, class Derived> struct is_base_and_derived<Base, Derived&> : public false_type{};\ntemplate <class Base, class Derived> struct is_base_and_derived<Base&, Derived&> : public false_type{};\n\n#if BOOST_WORKAROUND(__CODEGEARC__, BOOST_TESTED_AT(0x610))\ntemplate <class Base> struct is_base_and_derived<Base, Base> : public true_type{};\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_BASE_AND_DERIVED_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_class.hpp",
    "content": "//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000-2003.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_CLASS_HPP_INCLUDED\n#define BOOST_TT_IS_CLASS_HPP_INCLUDED\n\n#include <boost/type_traits/detail/config.hpp>\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_CLASS\n#   include <boost/type_traits/is_union.hpp>\n\n#ifdef BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION\n#   include <boost/type_traits/detail/yes_no_type.hpp>\n#else\n#   include <boost/type_traits/is_scalar.hpp>\n#   include <boost/type_traits/is_array.hpp>\n#   include <boost/type_traits/is_reference.hpp>\n#   include <boost/type_traits/is_void.hpp>\n#   include <boost/type_traits/is_function.hpp>\n#endif\n\n#endif // BOOST_IS_CLASS\n\nnamespace boost {\n\nnamespace detail {\n\n#ifndef BOOST_IS_CLASS\n#ifdef BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION\n\n// This is actually the conforming implementation which works with\n// abstract classes.  However, enough compilers have trouble with\n// it that most will use the one in\n// boost/type_traits/object_traits.hpp. This implementation\n// actually works with VC7.0, but other interactions seem to fail\n// when we use it.\n\n// is_class<> metafunction due to Paul Mensonides\n// (leavings@attbi.com). For more details:\n// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1\n#if defined(__GNUC__)  && !defined(__EDG_VERSION__)\n\ntemplate <class U> ::boost::type_traits::yes_type is_class_tester(void(U::*)(void));\ntemplate <class U> ::boost::type_traits::no_type is_class_tester(...);\n\ntemplate <typename T>\nstruct is_class_impl\n{\n\n    BOOST_STATIC_CONSTANT(bool, value =\n            sizeof(is_class_tester<T>(0)) == sizeof(::boost::type_traits::yes_type)\n            && ! ::boost::is_union<T>::value\n        );\n};\n\n#else\n\ntemplate <typename T>\nstruct is_class_impl\n{\n    template <class U> static ::boost::type_traits::yes_type is_class_tester(void(U::*)(void));\n    template <class U> static ::boost::type_traits::no_type is_class_tester(...);\n\n    BOOST_STATIC_CONSTANT(bool, value =\n            sizeof(is_class_tester<T>(0)) == sizeof(::boost::type_traits::yes_type)\n            && ! ::boost::is_union<T>::value\n        );\n};\n\n#endif\n\n#else\n\ntemplate <typename T>\nstruct is_class_impl\n{\n    BOOST_STATIC_CONSTANT(bool, value =\n        ! ::boost::is_union<T>::value >::value\n        && ! ::boost::is_scalar<T>::value\n        && ! ::boost::is_array<T>::value\n        && ! ::boost::is_reference<T>::value\n        && ! ::boost::is_void<T>::value\n        && ! ::boost::is_function<T>::value\n        );\n};\n\n# endif // BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION\n# else // BOOST_IS_CLASS\ntemplate <typename T>\nstruct is_class_impl\n{\n    BOOST_STATIC_CONSTANT(bool, value = BOOST_IS_CLASS(T));\n};\n# endif // BOOST_IS_CLASS\n\n} // namespace detail\n\ntemplate <class T> struct is_class : public integral_constant<bool, ::boost::detail::is_class_impl<T>::value> {};\n# ifdef __EDG_VERSION__\ntemplate <class T> struct is_class<const T> : public is_class<T>{};\ntemplate <class T> struct is_class<const volatile T> : public is_class<T>{};\ntemplate <class T> struct is_class<volatile T> : public is_class<T>{};\n# endif\n    \n} // namespace boost\n\n#endif // BOOST_TT_IS_CLASS_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_const.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_CONST_HPP_INCLUDED\n#define BOOST_TT_IS_CONST_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\n\n   template <class T>\n   struct is_const : public integral_constant<bool, __is_const(T)> {};\n\n#else\n\n   template <class T>\n   struct is_const : public false_type {};\n   template <class T> struct is_const<T const> : public true_type{};\n   template <class T, size_t N> struct is_const<T const[N]> : public true_type{};\n   template <class T> struct is_const<T const[]> : public true_type{};\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_CONST_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_convertible.hpp",
    "content": "\n// Copyright 2000 John Maddock (john@johnmaddock.co.uk)\n// Copyright 2000 Jeremy Siek (jsiek@lsc.nd.edu)\n// Copyright 1999, 2000 Jaakko Jarvi (jaakko.jarvi@cs.utu.fi)\n//\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_CONVERTIBLE_HPP_INCLUDED\n#define BOOST_TT_IS_CONVERTIBLE_HPP_INCLUDED\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_CONVERTIBLE\n#include <boost/type_traits/detail/yes_no_type.hpp>\n#include <boost/type_traits/detail/config.hpp>\n#include <boost/type_traits/is_array.hpp>\n#include <boost/type_traits/is_arithmetic.hpp>\n#include <boost/type_traits/is_void.hpp>\n#if !defined(BOOST_NO_IS_ABSTRACT)\n#include <boost/type_traits/is_abstract.hpp>\n#endif\n#include <boost/type_traits/add_lvalue_reference.hpp>\n#include <boost/type_traits/add_rvalue_reference.hpp>\n#include <boost/type_traits/is_function.hpp>\n\n#if defined(__MWERKS__)\n#include <boost/type_traits/remove_reference.hpp>\n#endif\n#if !defined(BOOST_NO_SFINAE_EXPR) && !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)\n#  include <boost/type_traits/declval.hpp>\n#endif\n#elif defined(BOOST_MSVC) || defined(BOOST_INTEL)\n#include <boost/type_traits/is_function.hpp>\n#include <boost/type_traits/is_same.hpp>\n#endif // BOOST_IS_CONVERTIBLE\n\nnamespace boost {\n\n#ifndef BOOST_IS_CONVERTIBLE\n\n// is one type convertible to another?\n//\n// there are multiple versions of the is_convertible\n// template, almost every compiler seems to require its\n// own version.\n//\n// Thanks to Andrei Alexandrescu for the original version of the\n// conversion detection technique!\n//\n\nnamespace detail {\n\n#if !defined(BOOST_NO_SFINAE_EXPR) && !defined(BOOST_NO_CXX11_RVALUE_REFERENCES) && !(defined(BOOST_GCC) && (BOOST_GCC < 40700))\n\n   // This is a C++11 conforming version, place this first and use it wherever possible:\n\n#  define BOOST_TT_CXX11_IS_CONVERTIBLE\n\n   template <class A, class B, class C>\n   struct or_helper\n   {\n      static const bool value = (A::value || B::value || C::value);\n   };\n\n   template<typename From, typename To, bool b = or_helper<boost::is_void<From>, boost::is_function<To>, boost::is_array<To> >::value>\n   struct is_convertible_basic_impl\n   {\n      // Nothing converts to function or array, but void converts to void:\n      static const bool value = is_void<To>::value; \n   };\n\n   template<typename From, typename To>\n   class is_convertible_basic_impl<From, To, false>\n   {\n      typedef char one;\n      typedef int  two;\n\n      template<typename To1>\n      static void test_aux(To1);\n\n      template<typename From1, typename To1>\n      static decltype(test_aux<To1>(boost::declval<From1>()), one()) test(int);\n\n      template<typename, typename>\n      static two test(...);\n\n   public:\n      static const bool value = sizeof(test<From, To>(0)) == 1;\n   };\n\n#elif defined(__BORLANDC__) && (__BORLANDC__ < 0x560)\n//\n// special version for Borland compilers\n// this version breaks when used for some\n// UDT conversions:\n//\ntemplate <typename From, typename To>\nstruct is_convertible_impl\n{\n#pragma option push -w-8074\n    // This workaround for Borland breaks the EDG C++ frontend,\n    // so we only use it for Borland.\n    template <typename T> struct checker\n    {\n        static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(...);\n        static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(T);\n    };\n\n    static typename add_lvalue_reference<From>::type  _m_from;\n    static bool const value = sizeof( checker<To>::_m_check(_m_from) )\n        == sizeof(::boost::type_traits::yes_type);\n#pragma option pop\n};\n\n#elif defined(__GNUC__) || defined(__BORLANDC__) && (__BORLANDC__ < 0x600)\n// special version for gcc compiler + recent Borland versions\n// note that this does not pass UDT's through (...)\n\nstruct any_conversion\n{\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(const T&);\n    template <typename T> any_conversion(volatile T&);\n    template <typename T> any_conversion(T&);\n};\n\ntemplate <typename T> struct checker\n{\n    static boost::type_traits::no_type _m_check(any_conversion ...);\n    static boost::type_traits::yes_type _m_check(T, int);\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl\n{\n    typedef typename add_lvalue_reference<From>::type lvalue_type;\n    typedef typename add_rvalue_reference<From>::type rvalue_type;\n    static lvalue_type _m_from;\n#if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)))\n    static bool const value =\n        sizeof( boost::detail::checker<To>::_m_check(static_cast<rvalue_type>(_m_from), 0) )\n        == sizeof(::boost::type_traits::yes_type);\n#else\n    static bool const value =\n        sizeof( boost::detail::checker<To>::_m_check(_m_from, 0) )\n        == sizeof(::boost::type_traits::yes_type);\n#endif\n};\n\n#elif (defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 245) && !defined(__ICL)) \\\n      || defined(__IBMCPP__) || defined(__HP_aCC)\n//\n// This is *almost* an ideal world implementation as it doesn't rely\n// on undefined behaviour by passing UDT's through (...).\n// Unfortunately it doesn't quite pass all the tests for most compilers (sigh...)\n// Enable this for your compiler if is_convertible_test.cpp will compile it...\n//\n// Note we do not enable this for VC7.1, because even though it passes all the\n// type_traits tests it is known to cause problems when instantiation occurs\n// deep within the instantiation tree :-(\n//\nstruct any_conversion\n{\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(const T&);\n    template <typename T> any_conversion(volatile T&);\n    // we need this constructor to catch references to functions\n    // (which can not be cv-qualified):\n    template <typename T> any_conversion(T&);\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl\n{\n    static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(any_conversion ...);\n    static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(To, int);\n    typedef typename add_lvalue_reference<From>::type lvalue_type;\n    typedef typename add_rvalue_reference<From>::type rvalue_type; \n    static lvalue_type _m_from;\n\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(static_cast<rvalue_type>(_m_from), 0) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#else\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(_m_from, 0) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#endif\n};\n\n#elif defined(__DMC__)\n\nstruct any_conversion\n{\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(const T&);\n    template <typename T> any_conversion(volatile T&);\n    // we need this constructor to catch references to functions\n    // (which can not be cv-qualified):\n    template <typename T> any_conversion(T&);\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl\n{\n    // Using '...' doesn't always work on Digital Mars. This version seems to.\n    template <class T>\n    static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(any_conversion,  float, T);\n    static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(To, int, int);\n    typedef typename add_lvalue_reference<From>::type lvalue_type;\n    typedef typename add_rvalue_reference<From>::type rvalue_type;\n    static lvalue_type _m_from;\n\n    // Static constants sometime cause the conversion of _m_from to To to be\n    // called. This doesn't happen with an enum.\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n    enum { value =\n        sizeof( _m_check(static_cast<rvalue_type>(_m_from), 0, 0) ) == sizeof(::boost::type_traits::yes_type)\n        };\n#else\n    enum { value =\n        sizeof( _m_check(_m_from, 0, 0) ) == sizeof(::boost::type_traits::yes_type)\n        };\n#endif\n};\n\n#elif defined(__MWERKS__)\n// \n// CW works with the technique implemented above for EDG, except when From\n// is a function type (or a reference to such a type), in which case\n// any_conversion won't be accepted as a valid conversion. We detect this\n// exceptional situation and channel it through an alternative algorithm.\n//\n\ntemplate <typename From, typename To,bool FromIsFunctionRef>\nstruct is_convertible_basic_impl_aux;\n\nstruct any_conversion\n{\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(const T&);\n    template <typename T> any_conversion(volatile T&);\n    template <typename T> any_conversion(T&);\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl_aux<From,To,false /*FromIsFunctionRef*/>\n{\n    static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(any_conversion ...);\n    static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(To, int);\n    typedef typename add_lvalue_reference<From>::type lvalue_type;\n    typedef typename add_rvalue_reference<From>::type rvalue_type; \n    static lvalue_type _m_from;\n\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(static_cast<rvalue_type>(_m_from), 0) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#else\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(_m_from, 0) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#endif\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl_aux<From,To,true /*FromIsFunctionRef*/>\n{\n    static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(...);\n    static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(To);\n    typedef typename add_lvalue_reference<From>::type lvalue_type;\n    typedef typename add_rvalue_reference<From>::type rvalue_type;\n    static lvalue_type _m_from;\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(static_cast<rvalue_type>(_m_from)) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#else\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(_m_from) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#endif\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl:\n  is_convertible_basic_impl_aux<\n    From,To,\n    ::boost::is_function<typename ::boost::remove_reference<From>::type>::value\n  >\n{};\n\n#else\n//\n// This version seems to work pretty well for a wide spectrum of compilers,\n// however it does rely on undefined behaviour by passing UDT's through (...).\n//\n\n//Workaround for old compilers like MSVC 7.1 to avoid\n//forming a reference to an array of unknown bound\ntemplate <typename From>\nstruct is_convertible_basic_impl_add_lvalue_reference\n   : add_lvalue_reference<From>\n{};\n\ntemplate <typename From>\nstruct is_convertible_basic_impl_add_lvalue_reference<From[]>\n{\n    typedef From type [];\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_basic_impl\n{\n    static ::boost::type_traits::no_type BOOST_TT_DECL _m_check(...);\n    static ::boost::type_traits::yes_type BOOST_TT_DECL _m_check(To);\n    typedef typename is_convertible_basic_impl_add_lvalue_reference<From>::type lvalue_type;\n    static lvalue_type _m_from;\n#ifdef BOOST_MSVC\n#pragma warning(push)\n#pragma warning(disable:4244)\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(disable:6334)\n#endif\n#endif\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n    typedef typename add_rvalue_reference<From>::type rvalue_type; \n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(static_cast<rvalue_type>(_m_from)) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#else\n    BOOST_STATIC_CONSTANT(bool, value =\n        sizeof( _m_check(_m_from) ) == sizeof(::boost::type_traits::yes_type)\n        );\n#endif\n#ifdef BOOST_MSVC\n#pragma warning(pop)\n#endif\n};\n\n#endif // is_convertible_impl\n\n#if defined(__DMC__)\n// As before, a static constant sometimes causes errors on Digital Mars.\ntemplate <typename From, typename To>\nstruct is_convertible_impl\n{\n    enum { \n       value = ( ::boost::detail::is_convertible_basic_impl<From,To>::value && ! ::boost::is_array<To>::value && ! ::boost::is_function<To>::value) \n    };\n};\n#elif !defined(__BORLANDC__) || __BORLANDC__ > 0x551\ntemplate <typename From, typename To>\nstruct is_convertible_impl\n{\n   BOOST_STATIC_CONSTANT(bool, value = ( ::boost::detail::is_convertible_basic_impl<From, To>::value && !::boost::is_array<To>::value && !::boost::is_function<To>::value));\n};\n#endif\n\ntemplate <bool trivial1, bool trivial2, bool abstract_target>\nstruct is_convertible_impl_select\n{\n   template <class From, class To>\n   struct rebind\n   {\n      typedef is_convertible_impl<From, To> type;\n   };\n};\n\ntemplate <>\nstruct is_convertible_impl_select<true, true, false>\n{\n   template <class From, class To>\n   struct rebind\n   {\n      typedef true_type type;\n   };\n};\n\ntemplate <>\nstruct is_convertible_impl_select<false, false, true>\n{\n   template <class From, class To>\n   struct rebind\n   {\n      typedef false_type type;\n   };\n};\n\ntemplate <>\nstruct is_convertible_impl_select<true, false, true>\n{\n   template <class From, class To>\n   struct rebind\n   {\n      typedef false_type type;\n   };\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_impl_dispatch_base\n{\n#if !BOOST_WORKAROUND(__HP_aCC, < 60700)\n   typedef is_convertible_impl_select< \n      ::boost::is_arithmetic<From>::value, \n      ::boost::is_arithmetic<To>::value,\n#if !defined(BOOST_NO_IS_ABSTRACT) && !defined(BOOST_TT_CXX11_IS_CONVERTIBLE)\n      // We need to filter out abstract types, only if we don't have a strictly conforming C++11 version:\n      ::boost::is_abstract<To>::value\n#else\n      false\n#endif\n   > selector;\n#else\n   typedef is_convertible_impl_select<false, false, false> selector;\n#endif\n   typedef typename selector::template rebind<From, To> isc_binder;\n   typedef typename isc_binder::type type;\n};\n\ntemplate <typename From, typename To>\nstruct is_convertible_impl_dispatch \n   : public is_convertible_impl_dispatch_base<From, To>::type\n{};\n\n//\n// Now add the full and partial specialisations\n// for void types, these are common to all the\n// implementation above:\n//\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\n\ntemplate <> struct is_convertible_impl_dispatch<void, void> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void, void const> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void, void const volatile> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void, void volatile> : public true_type{};\n\ntemplate <> struct is_convertible_impl_dispatch<void const, void> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const, void const> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const, void const volatile> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const, void volatile> : public true_type{};\n\ntemplate <> struct is_convertible_impl_dispatch<void const volatile, void> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const volatile, void const> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const volatile, void const volatile> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void const volatile, void volatile> : public true_type{};\n\ntemplate <> struct is_convertible_impl_dispatch<void volatile, void> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void volatile, void const> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void volatile, void const volatile> : public true_type{};\ntemplate <> struct is_convertible_impl_dispatch<void volatile, void volatile> : public true_type{};\n\n#else\ntemplate <> struct is_convertible_impl_dispatch<void, void> : public true_type{};\n#endif // BOOST_NO_CV_VOID_SPECIALIZATIONS\n\ntemplate <class To> struct is_convertible_impl_dispatch<void, To> : public false_type{};\ntemplate <class From> struct is_convertible_impl_dispatch<From, void> : public false_type{};\n\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\ntemplate <class To> struct is_convertible_impl_dispatch<void const, To> : public false_type{};\ntemplate <class From> struct is_convertible_impl_dispatch<From, void const> : public false_type{};\ntemplate <class To> struct is_convertible_impl_dispatch<void const volatile, To> : public false_type{};\ntemplate <class From> struct is_convertible_impl_dispatch<From, void const volatile> : public false_type{};\ntemplate <class To> struct is_convertible_impl_dispatch<void volatile, To> : public false_type{};\ntemplate <class From> struct is_convertible_impl_dispatch<From, void volatile> : public false_type{};\n#endif\n\n} // namespace detail\n\ntemplate <class From, class To> \nstruct is_convertible : public integral_constant<bool, ::boost::detail::is_convertible_impl_dispatch<From, To>::value> {};\n\n#else\n\ntemplate <class From, class To>\nstruct is_convertible : public integral_constant<bool, BOOST_IS_CONVERTIBLE(From, To)> {};\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_CONVERTIBLE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_destructible.hpp",
    "content": "\n//  (C) Copyright John Maddock 2015.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_DESTRUCTIBLE_HPP_INCLUDED\n#define BOOST_TT_IS_DESTRUCTIBLE_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n#include <boost/detail/workaround.hpp>\n\n#if !defined(BOOST_NO_CXX11_DECLTYPE) && !BOOST_WORKAROUND(BOOST_MSVC, < 1800)\n\n#include <boost/type_traits/detail/yes_no_type.hpp>\n#include <boost/type_traits/declval.hpp>\n\nnamespace boost{\n\n   namespace detail{\n\n      struct is_destructible_imp\n      {\n         template<typename T, typename = decltype(boost::declval<T&>().~T())>\n         static boost::type_traits::yes_type test(int);\n         template<typename>\n         static boost::type_traits::no_type test(...);\n      };\n\n   }\n\n   template <class T> struct is_destructible : public integral_constant<bool, sizeof(detail::is_destructible_imp::test<T>(0)) == sizeof(boost::type_traits::yes_type)>{};\n\n#else\n\n#include <boost/type_traits/is_pod.hpp>\n#include <boost/type_traits/is_class.hpp>\n\nnamespace boost{\n\n   // We don't know how to implement this:\n   template <class T> struct is_destructible : public integral_constant<bool, is_pod<T>::value || is_class<T>::value>{};\n#endif\n\n   template <> struct is_destructible<void> : public false_type{};\n   template <> struct is_destructible<void const> : public false_type{};\n   template <> struct is_destructible<void volatile> : public false_type{};\n   template <> struct is_destructible<void const volatile> : public false_type{};\n   template <class T> struct is_destructible<T&> : public is_destructible<T>{};\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\n   template <class T> struct is_destructible<T&&> : public is_destructible<T>{};\n#endif\n   template <class T, std::size_t N> struct is_destructible<T[N]> : public is_destructible<T>{};\n   template <class T> struct is_destructible<T[]> : public is_destructible<T>{};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_DESTRUCTIBLE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_enum.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_ENUM_HPP_INCLUDED\n#define BOOST_TT_IS_ENUM_HPP_INCLUDED\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_ENUM\n#include <boost/type_traits/add_reference.hpp>\n#include <boost/type_traits/is_arithmetic.hpp>\n#include <boost/type_traits/is_reference.hpp>\n#include <boost/type_traits/is_convertible.hpp>\n#include <boost/type_traits/is_array.hpp>\n#ifdef __GNUC__\n#include <boost/type_traits/is_function.hpp>\n#endif\n#include <boost/type_traits/detail/config.hpp>\n#if defined(BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION) \n#  include <boost/type_traits/is_class.hpp>\n#  include <boost/type_traits/is_union.hpp>\n#endif\n#endif\n\nnamespace boost {\n\n#ifndef BOOST_IS_ENUM\n#if !(defined(__BORLANDC__) && (__BORLANDC__ <= 0x551))\n\nnamespace detail {\n\n#if defined(BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION) \n\ntemplate <typename T>\nstruct is_class_or_union\n{\n   BOOST_STATIC_CONSTANT(bool, value = ::boost::is_class<T>::value || ::boost::is_union<T>::value);\n};\n\n#else\n\ntemplate <typename T>\nstruct is_class_or_union\n{\n# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x581))// we simply can't detect it this way.\n    BOOST_STATIC_CONSTANT(bool, value = false);\n# else\n    template <class U> static ::boost::type_traits::yes_type is_class_or_union_tester(void(U::*)(void));\n\n#  if BOOST_WORKAROUND(__MWERKS__, <= 0x3000) // no SFINAE\n    static ::boost::type_traits::no_type is_class_or_union_tester(...);\n    BOOST_STATIC_CONSTANT(\n        bool, value = sizeof(is_class_or_union_tester(0)) == sizeof(::boost::type_traits::yes_type));\n#  else\n    template <class U>\n    static ::boost::type_traits::no_type is_class_or_union_tester(...);\n    BOOST_STATIC_CONSTANT(\n        bool, value = sizeof(is_class_or_union_tester<T>(0)) == sizeof(::boost::type_traits::yes_type));\n#  endif\n# endif\n};\n#endif\n\nstruct int_convertible\n{\n    int_convertible(int);\n};\n\n// Don't evaluate convertibility to int_convertible unless the type\n// is non-arithmetic. This suppresses warnings with GCC.\ntemplate <bool is_typename_arithmetic_or_reference = true>\nstruct is_enum_helper\n{\n    template <typename T> struct type\n    {\n        BOOST_STATIC_CONSTANT(bool, value = false);\n    };\n};\n\ntemplate <>\nstruct is_enum_helper<false>\n{\n    template <typename T> struct type\n    {\n       static const bool value = ::boost::is_convertible<typename boost::add_reference<T>::type, ::boost::detail::int_convertible>::value;\n    };\n};\n\ntemplate <typename T> struct is_enum_impl\n{\n   //typedef ::boost::add_reference<T> ar_t;\n   //typedef typename ar_t::type r_type;\n\n#if defined(__GNUC__)\n\n#ifdef BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION\n    \n   // We MUST check for is_class_or_union on conforming compilers in\n   // order to correctly deduce that noncopyable types are not enums\n   // (dwa 2002/04/15)...\n   BOOST_STATIC_CONSTANT(bool, selector =\n           ::boost::is_arithmetic<T>::value\n         || ::boost::is_reference<T>::value\n         || ::boost::is_function<T>::value\n         || is_class_or_union<T>::value\n         || is_array<T>::value);\n#else\n   // ...however, not checking is_class_or_union on non-conforming\n   // compilers prevents a dependency recursion.\n   BOOST_STATIC_CONSTANT(bool, selector =\n           ::boost::is_arithmetic<T>::value\n         || ::boost::is_reference<T>::value\n         || ::boost::is_function<T>::value\n         || is_array<T>::value);\n#endif // BOOST_TT_HAS_CONFORMING_IS_CLASS_IMPLEMENTATION\n\n#else // !defined(__GNUC__):\n    \n   BOOST_STATIC_CONSTANT(bool, selector =\n           ::boost::is_arithmetic<T>::value\n         || ::boost::is_reference<T>::value\n         || is_class_or_union<T>::value\n         || is_array<T>::value);\n    \n#endif\n\n#if BOOST_WORKAROUND(__BORLANDC__, < 0x600)\n    typedef ::boost::detail::is_enum_helper<\n          ::boost::detail::is_enum_impl<T>::selector\n        > se_t;\n#else\n    typedef ::boost::detail::is_enum_helper<selector> se_t;\n#endif\n\n    typedef typename se_t::template type<T> helper;\n    BOOST_STATIC_CONSTANT(bool, value = helper::value);\n};\n\n} // namespace detail\n\ntemplate <class T> struct is_enum : public integral_constant<bool, ::boost::detail::is_enum_impl<T>::value> {};\n\n#else // __BORLANDC__\n//\n// buggy is_convertible prevents working\n// implementation of is_enum:\ntemplate <class T> struct is_enum : public integral_constant<bool, false> {};\n\n#endif\n\n#else // BOOST_IS_ENUM\n\ntemplate <class T> struct is_enum : public integral_constant<bool, BOOST_IS_ENUM(T)> {};\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_ENUM_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_floating_point.hpp",
    "content": "//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000-2005.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TYPE_TRAITS_IS_FLOATING_HPP_INCLUDED\n#define BOOST_TYPE_TRAITS_IS_FLOATING_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n//* is a type T a floating-point type described in the standard (3.9.1p8)\n   template <class T> struct is_floating_point : public false_type{};\n   template <class T> struct is_floating_point<const T> : public is_floating_point<T>{};\n   template <class T> struct is_floating_point<volatile const T> : public is_floating_point<T>{};\n   template <class T> struct is_floating_point<volatile T> : public is_floating_point<T>{};\n   template<> struct is_floating_point<float> : public true_type{};\n   template<> struct is_floating_point<double> : public true_type{};\n   template<> struct is_floating_point<long double> : public true_type{};\n   \n#if defined(BOOST_HAS_FLOAT128)\n   template<> struct is_floating_point<__float128> : public true_type{};\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TYPE_TRAITS_IS_FLOAT_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_function.hpp",
    "content": "\n//  Copyright 2000 John Maddock (john@johnmaddock.co.uk)\n//  Copyright 2002 Aleksey Gurtovoy (agurtovoy@meta-comm.com)\n//\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_FUNCTION_HPP_INCLUDED\n#define BOOST_TT_IS_FUNCTION_HPP_INCLUDED\n\n#include <boost/type_traits/is_reference.hpp>\n#include <boost/type_traits/detail/config.hpp>\n\n#if !defined(BOOST_TT_TEST_MS_FUNC_SIGS)\n#   include <boost/type_traits/detail/is_function_ptr_helper.hpp>\n#else\n#   include <boost/type_traits/detail/is_function_ptr_tester.hpp>\n#   include <boost/type_traits/detail/yes_no_type.hpp>\n#endif\n\n// is a type a function?\n// Please note that this implementation is unnecessarily complex:\n// we could just use !is_convertible<T*, const volatile void*>::value,\n// except that some compilers erroneously allow conversions from\n// function pointers to void*.\n\nnamespace boost {\n\n#if !defined( __CODEGEARC__ )\n\nnamespace detail {\n\n#if !defined(BOOST_TT_TEST_MS_FUNC_SIGS)\ntemplate<bool is_ref = true>\nstruct is_function_chooser\n{\n   template< typename T > struct result_\n      : public false_type {};\n};\n\ntemplate <>\nstruct is_function_chooser<false>\n{\n    template< typename T > struct result_\n        : public ::boost::type_traits::is_function_ptr_helper<T*> {};\n};\n\ntemplate <typename T>\nstruct is_function_impl\n    : public is_function_chooser< ::boost::is_reference<T>::value >\n        ::BOOST_NESTED_TEMPLATE result_<T>\n{\n};\n\n#else\n\ntemplate <typename T>\nstruct is_function_impl\n{\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(push)\n#pragma warning(disable:6334)\n#endif\n    static T* t;\n    BOOST_STATIC_CONSTANT(\n        bool, value = sizeof(::boost::type_traits::is_function_ptr_tester(t))\n        == sizeof(::boost::type_traits::yes_type)\n        );\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(pop)\n#endif\n};\n\ntemplate <typename T>\nstruct is_function_impl<T&> : public false_type\n{};\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <typename T>\nstruct is_function_impl<T&&> : public false_type\n{};\n#endif\n\n#endif\n\n} // namespace detail\n\n#endif // !defined( __CODEGEARC__ )\n\n#if defined( __CODEGEARC__ )\ntemplate <class T> struct is_function : integral_constant<bool, __is_function(T)> {};\n#else\ntemplate <class T> struct is_function : integral_constant<bool, ::boost::detail::is_function_impl<T>::value> {};\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <class T> struct is_function<T&&> : public false_type {};\n#endif\n#endif\n} // namespace boost\n\n#endif // BOOST_TT_IS_FUNCTION_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_integral.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_INTEGRAL_HPP_INCLUDED\n#define BOOST_TT_IS_INTEGRAL_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\n   template <class T>\n   struct is_integral : public integral_constant<bool, __is_integral(T)> {};\n#else\n\ntemplate <class T> struct is_integral : public false_type {};\ntemplate <class T> struct is_integral<const T> : public is_integral<T> {};\ntemplate <class T> struct is_integral<volatile const T> : public is_integral<T>{};\ntemplate <class T> struct is_integral<volatile T> : public is_integral<T>{};\n\n//* is a type T an [cv-qualified-] integral type described in the standard (3.9.1p3)\n// as an extension we include long long, as this is likely to be added to the\n// standard at a later date\ntemplate<> struct is_integral<unsigned char> : public true_type {};\ntemplate<> struct is_integral<unsigned short> : public true_type{};\ntemplate<> struct is_integral<unsigned int> : public true_type{};\ntemplate<> struct is_integral<unsigned long> : public true_type{};\n\ntemplate<> struct is_integral<signed char> : public true_type{};\ntemplate<> struct is_integral<short> : public true_type{};\ntemplate<> struct is_integral<int> : public true_type{};\ntemplate<> struct is_integral<long> : public true_type{};\n\ntemplate<> struct is_integral<char> : public true_type{};\ntemplate<> struct is_integral<bool> : public true_type{};\n\n#ifndef BOOST_NO_INTRINSIC_WCHAR_T\n// If the following line fails to compile and you're using the Intel\n// compiler, see http://lists.boost.org/MailArchives/boost-users/msg06567.php,\n// and define BOOST_NO_INTRINSIC_WCHAR_T on the command line.\ntemplate<> struct is_integral<wchar_t> : public true_type{};\n#endif\n\n// Same set of integral types as in boost/type_traits/integral_promotion.hpp.\n// Please, keep in sync. -- Alexander Nasonov\n#if (defined(BOOST_INTEL_CXX_VERSION) && defined(_MSC_VER) && (BOOST_INTEL_CXX_VERSION <= 600)) \\\n    || (defined(__BORLANDC__) && (__BORLANDC__ == 0x600) && (_MSC_VER < 1300))\ntemplate<> struct is_integral<unsigned __int8> : public true_type{};\ntemplate<> struct is_integral<unsigned __int16> : public true_type{};\ntemplate<> struct is_integral<unsigned __int32> : public true_type{};\ntemplate<> struct is_integral<__int8> : public true_type{};\ntemplate<> struct is_integral<__int16> : public true_type{};\ntemplate<> struct is_integral<__int32> : public true_type{};\n#ifdef __BORLANDC__\ntemplate<> struct is_integral<unsigned __int64> : public true_type{};\ntemplate<> struct is_integral<__int64> : public true_type{};\n#endif\n#endif\n\n# if defined(BOOST_HAS_LONG_LONG)\ntemplate<> struct is_integral< ::boost::ulong_long_type> : public true_type{};\ntemplate<> struct is_integral< ::boost::long_long_type> : public true_type{};\n#elif defined(BOOST_HAS_MS_INT64)\ntemplate<> struct is_integral<unsigned __int64> : public true_type{};\ntemplate<> struct is_integral<__int64> : public true_type{};\n#endif\n        \n#ifdef BOOST_HAS_INT128\ntemplate<> struct is_integral<boost::int128_type> : public true_type{};\ntemplate<> struct is_integral<boost::uint128_type> : public true_type{};\n#endif\n#ifndef BOOST_NO_CXX11_CHAR16_T\ntemplate<> struct is_integral<char16_t> : public true_type{};\n#endif\n#ifndef BOOST_NO_CXX11_CHAR32_T\ntemplate<> struct is_integral<char32_t> : public true_type{};\n#endif\n\n#endif  // non-CodeGear implementation\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_INTEGRAL_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_lvalue_reference.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_lvalue_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_LVALUE_REFERENCE_HPP_INCLUDED\n#define BOOST_TT_IS_LVALUE_REFERENCE_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\n   template <class T> struct is_lvalue_reference : public integral_constant<bool, __is_reference(T)>{};\n#else\n\n   template <class T> struct is_lvalue_reference : public false_type{};\n   template <class T> struct is_lvalue_reference<T&> : public true_type{};\n\n#if  defined(BOOST_ILLEGAL_CV_REFERENCES)\n// these are illegal specialisations; cv-qualifies applied to\n// references have no effect according to [8.3.2p1],\n// C++ Builder requires them though as it treats cv-qualified\n// references as distinct types...\n   template <class T> struct is_lvalue_reference<T&const> : public true_type{};\n   template <class T> struct is_lvalue_reference<T&volatile> : public true_type{};\n   template <class T> struct is_lvalue_reference<T&const volatile> : public true_type{};\n#endif\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_REFERENCE_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_member_function_pointer.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_MEMBER_FUNCTION_POINTER_HPP_INCLUDED\n#define BOOST_TT_IS_MEMBER_FUNCTION_POINTER_HPP_INCLUDED\n\n#include <boost/type_traits/detail/config.hpp>\n#include <boost/detail/workaround.hpp>\n\n#if !BOOST_WORKAROUND(__BORLANDC__, < 0x600) && !defined(BOOST_TT_TEST_MS_FUNC_SIGS)\n   //\n   // Note: we use the \"workaround\" version for MSVC because it works for \n   // __stdcall etc function types, where as the partial specialisation\n   // version does not do so.\n   //\n#   include <boost/type_traits/detail/is_mem_fun_pointer_impl.hpp>\n#   include <boost/type_traits/remove_cv.hpp>\n#   include <boost/type_traits/integral_constant.hpp>\n#else\n#   include <boost/type_traits/is_reference.hpp>\n#   include <boost/type_traits/is_array.hpp>\n#   include <boost/type_traits/detail/yes_no_type.hpp>\n#   include <boost/type_traits/detail/is_mem_fun_pointer_tester.hpp>\n#endif\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\ntemplate <class T> struct is_member_function_pointer : public integral_constant<bool, __is_member_function_pointer( T )> {};\n#elif !BOOST_WORKAROUND(__BORLANDC__, < 0x600) && !defined(BOOST_TT_TEST_MS_FUNC_SIGS)\n\ntemplate <class T> struct is_member_function_pointer \n   : public ::boost::integral_constant<bool, ::boost::type_traits::is_mem_fun_pointer_impl<typename remove_cv<T>::type>::value>{};\n\n#else\n\nnamespace detail {\n\n#ifndef __BORLANDC__\n\ntemplate <bool>\nstruct is_mem_fun_pointer_select\n{\n   template <class T> struct result_ : public false_type{};\n};\n\ntemplate <>\nstruct is_mem_fun_pointer_select<false>\n{\n    template <typename T> struct result_\n    {\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(push)\n#pragma warning(disable:6334)\n#endif\n        static T* make_t;\n        typedef result_<T> self_type;\n\n        BOOST_STATIC_CONSTANT(\n            bool, value = (\n                1 == sizeof(::boost::type_traits::is_mem_fun_pointer_tester(self_type::make_t))\n            ));\n#if BOOST_WORKAROUND(BOOST_MSVC_FULL_VER, >= 140050000)\n#pragma warning(pop)\n#endif\n    };\n};\n\ntemplate <typename T>\nstruct is_member_function_pointer_impl\n    : public is_mem_fun_pointer_select< \n      ::boost::is_reference<T>::value || ::boost::is_array<T>::value>::template result_<T>{};\n\ntemplate <typename T>\nstruct is_member_function_pointer_impl<T&> : public false_type{};\n\n#else // Borland C++\n\ntemplate <typename T>\nstruct is_member_function_pointer_impl\n{\n   static T* m_t;\n   BOOST_STATIC_CONSTANT(\n              bool, value =\n               (1 == sizeof(type_traits::is_mem_fun_pointer_tester(m_t))) );\n};\n\ntemplate <typename T>\nstruct is_member_function_pointer_impl<T&>\n{\n   BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\n#endif\n\ntemplate<> struct is_member_function_pointer_impl<void> : public false_type{};\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\ntemplate<> struct is_member_function_pointer_impl<void const> : public false_type{};\ntemplate<> struct is_member_function_pointer_impl<void const volatile> : public false_type{};\ntemplate<> struct is_member_function_pointer_impl<void volatile> : public false_type{};\n#endif\n\n} // namespace detail\n\ntemplate <class T>\nstruct is_member_function_pointer\n   : public integral_constant<bool, ::boost::detail::is_member_function_pointer_impl<T>::value>{};\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_MEMBER_FUNCTION_POINTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_member_pointer.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_MEMBER_POINTER_HPP_INCLUDED\n#define BOOST_TT_IS_MEMBER_POINTER_HPP_INCLUDED\n\n#include <boost/detail/workaround.hpp>\n#include <boost/type_traits/is_member_function_pointer.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\ntemplate <class T> struct is_member_pointer : public integral_constant<bool, __is_member_pointer(T)>{};\n#else\ntemplate <class T> struct is_member_pointer : public integral_constant<bool, ::boost::is_member_function_pointer<T>::value>{};\ntemplate <class T, class U> struct is_member_pointer<U T::* > : public true_type{};\n\n#if !BOOST_WORKAROUND(__MWERKS__,<=0x3003) && !BOOST_WORKAROUND(__IBMCPP__, <=600)\ntemplate <class T, class U> struct is_member_pointer<U T::*const> : public true_type{};\ntemplate <class T, class U> struct is_member_pointer<U T::*const volatile> : public true_type{};\ntemplate <class T, class U> struct is_member_pointer<U T::*volatile> : public true_type{};\n#endif\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_MEMBER_POINTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_pod.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_POD_HPP_INCLUDED\n#define BOOST_TT_IS_POD_HPP_INCLUDED\n\n#include <boost/type_traits/detail/config.hpp>\n#include <boost/type_traits/is_void.hpp>\n#include <boost/type_traits/is_scalar.hpp>\n#include <boost/type_traits/intrinsics.hpp>\n\n#ifdef __SUNPRO_CC\n#include <boost/type_traits/is_function.hpp>\n#endif\n\n#include <cstddef>\n\n#ifndef BOOST_IS_POD\n#define BOOST_INTERNAL_IS_POD(T) false\n#else\n#define BOOST_INTERNAL_IS_POD(T) BOOST_IS_POD(T)\n#endif\n\nnamespace boost {\n\n// forward declaration, needed by 'is_pod_array_helper' template below\ntemplate< typename T > struct is_POD;\n\ntemplate <typename T> struct is_pod\n: public integral_constant<bool, ::boost::is_scalar<T>::value || ::boost::is_void<T>::value || BOOST_INTERNAL_IS_POD(T)>\n{};\n\n#if !defined(BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS)\ntemplate <typename T, std::size_t sz> struct is_pod<T[sz]> : public is_pod<T>{};\n#endif\n\n\n// the following help compilers without partial specialization support:\ntemplate<> struct is_pod<void> : public true_type{};\n\n#ifndef BOOST_NO_CV_VOID_SPECIALIZATIONS\ntemplate<> struct is_pod<void const> : public true_type{};\ntemplate<> struct is_pod<void const volatile> : public true_type{};\ntemplate<> struct is_pod<void volatile> : public true_type{};\n#endif\n\ntemplate<class T> struct is_POD : public is_pod<T>{};\n\n} // namespace boost\n\n#undef BOOST_INTERNAL_IS_POD\n\n#endif // BOOST_TT_IS_POD_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_pointer.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_POINTER_HPP_INCLUDED\n#define BOOST_TT_IS_POINTER_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\ntemplate <class T> struct is_pointer : public integral_constant<bool, __is_pointer(T)>{};\n#else\ntemplate <class T> struct is_pointer : public false_type{};\ntemplate <class T> struct is_pointer<T*> : public true_type{};\ntemplate <class T> struct is_pointer<T*const> : public true_type{};\ntemplate <class T> struct is_pointer<T*const volatile> : public true_type{};\ntemplate <class T> struct is_pointer<T*volatile> : public true_type{};\n\n#ifdef BOOST_MSVC\ntemplate <class T> struct is_pointer<T const> : public is_pointer<T>{};\ntemplate <class T> struct is_pointer<T const volatile> : public is_pointer<T>{};\ntemplate <class T> struct is_pointer<T volatile> : public is_pointer<T>{};\n#endif\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_POINTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_polymorphic.hpp",
    "content": "//  (C) Copyright John Maddock 2000. \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_POLYMORPHIC_HPP\n#define BOOST_TT_IS_POLYMORPHIC_HPP\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n#ifndef BOOST_IS_POLYMORPHIC\n#include <boost/type_traits/is_class.hpp>\n#endif\n#include <boost/detail/workaround.hpp>\n\n#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)\n#pragma warning(push)\n#pragma warning(disable:4250)\n#endif\n\nnamespace boost{\n\n#ifndef BOOST_IS_POLYMORPHIC\n\nnamespace detail{\n\ntemplate <class T>\nstruct is_polymorphic_imp1\n{\n# if BOOST_WORKAROUND(__MWERKS__, <= 0x2407) // CWPro7 should return false always.\n    typedef char d1, (&d2)[2];\n# else \n   struct d1 : public T\n   {\n      d1();\n#  if !defined(__GNUC__) // this raises warnings with some classes, and buys nothing with GCC\n      ~d1()throw();\n#  endif \n      char padding[256];\n   private:\n      // keep some picky compilers happy:\n      d1(const d1&);\n      d1& operator=(const d1&);\n   };\n   struct d2 : public T\n   {\n      d2();\n      virtual ~d2()throw();\n#  if !defined(BOOST_MSVC) && !defined(__ICL)\n      // for some reason this messes up VC++ when T has virtual bases,\n      // probably likewise for compilers that use the same ABI:\n      struct unique{};\n      virtual void unique_name_to_boost5487629(unique*);\n#  endif\n      char padding[256];\n   private:\n      // keep some picky compilers happy:\n      d2(const d2&);\n      d2& operator=(const d2&);\n   };\n# endif \n   BOOST_STATIC_CONSTANT(bool, value = (sizeof(d2) == sizeof(d1)));\n};\n\ntemplate <class T> struct is_polymorphic_imp1<T const> : public is_polymorphic_imp1<T>{};\ntemplate <class T> struct is_polymorphic_imp1<T const volatile> : public is_polymorphic_imp1<T>{};\ntemplate <class T> struct is_polymorphic_imp1<T volatile> : public is_polymorphic_imp1<T>{};\n\ntemplate <class T>\nstruct is_polymorphic_imp2\n{\n   BOOST_STATIC_CONSTANT(bool, value = false);\n};\n\ntemplate <bool is_class>\nstruct is_polymorphic_selector\n{\n   template <class T>\n   struct rebind\n   {\n      typedef is_polymorphic_imp2<T> type;\n   };\n};\n\ntemplate <>\nstruct is_polymorphic_selector<true>\n{\n   template <class T>\n   struct rebind\n   {\n      typedef is_polymorphic_imp1<T> type;\n   };\n};\n\ntemplate <class T>\nstruct is_polymorphic_imp\n{\n   typedef is_polymorphic_selector< ::boost::is_class<T>::value> selector;\n   typedef typename selector::template rebind<T> binder;\n   typedef typename binder::type imp_type;\n   BOOST_STATIC_CONSTANT(bool, value = imp_type::value);\n};\n\n} // namespace detail\n\ntemplate <class T> struct is_polymorphic : public integral_constant<bool, ::boost::detail::is_polymorphic_imp<T>::value> {};\n\n#else // BOOST_IS_POLYMORPHIC\n\ntemplate <class T> struct is_polymorphic : public integral_constant<bool, BOOST_IS_POLYMORPHIC(T)> {};\n\n#endif\n\n} // namespace boost\n\n#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)\n#pragma warning(pop)\n#endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_reference.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000, 2010. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_REFERENCE_HPP_INCLUDED\n#define BOOST_TT_IS_REFERENCE_HPP_INCLUDED\n\n#include <boost/type_traits/is_lvalue_reference.hpp>\n#include <boost/type_traits/is_rvalue_reference.hpp>\n\nnamespace boost {\n\ntemplate <class T> struct is_reference \n   : public \n   integral_constant<\n      bool, \n      ::boost::is_lvalue_reference<T>::value || ::boost::is_rvalue_reference<T>::value>\n{};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_REFERENCE_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_rvalue_reference.hpp",
    "content": "\n//  (C) John Maddock 2010. \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_RVALUE_REFERENCE_HPP_INCLUDED\n#define BOOST_TT_IS_RVALUE_REFERENCE_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\ntemplate <class T> struct is_rvalue_reference : public false_type {};\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <class T> struct is_rvalue_reference<T&&> : public true_type {};\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_REFERENCE_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_same.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_SAME_HPP_INCLUDED\n#define BOOST_TT_IS_SAME_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n\n   template <class T, class U> struct is_same : public false_type {};\n   template <class T> struct is_same<T,T> : public true_type {};\n#if BOOST_WORKAROUND(__BORLANDC__, < 0x600)\n// without this, Borland's compiler gives the wrong answer for\n// references to arrays:\n   template <class T> struct is_same<T&, T&> : public true_type{};\n#endif\n\n\n} // namespace boost\n\n#endif  // BOOST_TT_IS_SAME_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_scalar.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_SCALAR_HPP_INCLUDED\n#define BOOST_TT_IS_SCALAR_HPP_INCLUDED\n\n#include <boost/type_traits/is_arithmetic.hpp>\n#include <boost/type_traits/is_enum.hpp>\n#include <boost/type_traits/is_pointer.hpp>\n#include <boost/type_traits/is_member_pointer.hpp>\n#include <boost/config.hpp>\n\nnamespace boost {\n\ntemplate <typename T>\nstruct is_scalar\n   : public integral_constant<bool, ::boost::is_arithmetic<T>::value || ::boost::is_enum<T>::value || ::boost::is_pointer<T>::value || ::boost::is_member_pointer<T>::value>\n{};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_SCALAR_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_signed.hpp",
    "content": "\n//  (C) Copyright John Maddock 2005.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_SIGNED_HPP_INCLUDED\n#define BOOST_TT_IS_SIGNED_HPP_INCLUDED\n\n#include <boost/type_traits/is_integral.hpp>\n#include <boost/type_traits/remove_cv.hpp>\n#include <boost/type_traits/is_enum.hpp>\n#include <climits>\n\nnamespace boost {\n\n#if !defined( __CODEGEARC__ )\n\n#if !(defined(BOOST_MSVC) && BOOST_MSVC <= 1310) && \\\n    !(defined(__EDG_VERSION__) && __EDG_VERSION__ <= 238) &&\\\n    !defined(BOOST_NO_INCLASS_MEMBER_INITIALIZATION)\n\nnamespace detail{\n\ntemplate <class T>\nstruct is_signed_values\n{\n   //\n   // Note that we cannot use BOOST_STATIC_CONSTANT here, using enum's\n   // rather than \"real\" static constants simply doesn't work or give\n   // the correct answer.\n   //\n   typedef typename remove_cv<T>::type no_cv_t;\n   static const no_cv_t minus_one = (static_cast<no_cv_t>(-1));\n   static const no_cv_t zero = (static_cast<no_cv_t>(0));\n};\n\ntemplate <class T>\nstruct is_signed_helper\n{\n   typedef typename remove_cv<T>::type no_cv_t;\n   BOOST_STATIC_CONSTANT(bool, value = (!(::boost::detail::is_signed_values<T>::minus_one  > boost::detail::is_signed_values<T>::zero)));\n};\n\ntemplate <bool integral_type>\nstruct is_signed_select_helper\n{\n   template <class T>\n   struct rebind\n   {\n      typedef is_signed_helper<T> type;\n   };\n};\n\ntemplate <>\nstruct is_signed_select_helper<false>\n{\n   template <class T>\n   struct rebind\n   {\n      typedef false_type type;\n   };\n};\n\ntemplate <class T>\nstruct is_signed_impl\n{\n   typedef ::boost::detail::is_signed_select_helper< ::boost::is_integral<T>::value || ::boost::is_enum<T>::value> selector;\n   typedef typename selector::template rebind<T> binder;\n   typedef typename binder::type type;\n   BOOST_STATIC_CONSTANT(bool, value = type::value);\n};\n\n}\n\ntemplate <class T> struct is_signed : public integral_constant<bool, boost::detail::is_signed_impl<T>::value> {};\n\n#else\n\ntemplate <class T> struct is_signed : public false_type{};\n\n#endif\n\n#else //defined( __CODEGEARC__ )\n   template <class T> struct is_signed : public integral_constant<bool, __is_signed(T)>{};\n#endif\n\ntemplate <> struct is_signed<signed char> : public true_type{};\ntemplate <> struct is_signed<const signed char> : public true_type{};\ntemplate <> struct is_signed<volatile signed char> : public true_type{};\ntemplate <> struct is_signed<const volatile signed char> : public true_type{};\ntemplate <> struct is_signed<short> : public true_type{};\ntemplate <> struct is_signed<const short> : public true_type{};\ntemplate <> struct is_signed<volatile short> : public true_type{};\ntemplate <> struct is_signed<const volatile short> : public true_type{};\ntemplate <> struct is_signed<int> : public true_type{};\ntemplate <> struct is_signed<const int> : public true_type{};\ntemplate <> struct is_signed<volatile int> : public true_type{};\ntemplate <> struct is_signed<const volatile int> : public true_type{};\ntemplate <> struct is_signed<long> : public true_type{};\ntemplate <> struct is_signed<const long> : public true_type{};\ntemplate <> struct is_signed<volatile long> : public true_type{};\ntemplate <> struct is_signed<const volatile long> : public true_type{};\n\ntemplate <> struct is_signed<unsigned char> : public false_type{};\ntemplate <> struct is_signed<const unsigned char> : public false_type{};\ntemplate <> struct is_signed<volatile unsigned char> : public false_type{};\ntemplate <> struct is_signed<const volatile unsigned char> : public false_type{};\ntemplate <> struct is_signed<unsigned short> : public false_type{};\ntemplate <> struct is_signed<const unsigned short> : public false_type{};\ntemplate <> struct is_signed<volatile unsigned short> : public false_type{};\ntemplate <> struct is_signed<const volatile unsigned short> : public false_type{};\ntemplate <> struct is_signed<unsigned int> : public false_type{};\ntemplate <> struct is_signed<const unsigned int> : public false_type{};\ntemplate <> struct is_signed<volatile unsigned int> : public false_type{};\ntemplate <> struct is_signed<const volatile unsigned int> : public false_type{};\ntemplate <> struct is_signed<unsigned long> : public false_type{};\ntemplate <> struct is_signed<const unsigned long> : public false_type{};\ntemplate <> struct is_signed<volatile unsigned long> : public false_type{};\ntemplate <> struct is_signed<const volatile unsigned long> : public false_type{};\n#ifdef BOOST_HAS_LONG_LONG\ntemplate <> struct is_signed< ::boost::long_long_type> : public true_type{};\ntemplate <> struct is_signed<const ::boost::long_long_type> : public true_type{};\ntemplate <> struct is_signed<volatile ::boost::long_long_type> : public true_type{};\ntemplate <> struct is_signed<const volatile ::boost::long_long_type> : public true_type{};\n\ntemplate <> struct is_signed< ::boost::ulong_long_type> : public false_type{};\ntemplate <> struct is_signed<const ::boost::ulong_long_type> : public false_type{};\ntemplate <> struct is_signed<volatile ::boost::ulong_long_type> : public false_type{};\ntemplate <> struct is_signed<const volatile ::boost::ulong_long_type> : public false_type{};\n#endif\n#if defined(CHAR_MIN) \n#if CHAR_MIN != 0\ntemplate <> struct is_signed<char> : public true_type{};\ntemplate <> struct is_signed<const char> : public true_type{};\ntemplate <> struct is_signed<volatile char> : public true_type{};\ntemplate <> struct is_signed<const volatile char> : public true_type{};\n#else\ntemplate <> struct is_signed<char> : public false_type{};\ntemplate <> struct is_signed<const char> : public false_type{};\ntemplate <> struct is_signed<volatile char> : public false_type{};\ntemplate <> struct is_signed<const volatile char> : public false_type{};\n#endif\n#endif\n#if defined(WCHAR_MIN) && !defined(BOOST_NO_INTRINSIC_WCHAR_T)\n#if WCHAR_MIN != 0\ntemplate <> struct is_signed<wchar_t> : public true_type{};\ntemplate <> struct is_signed<const wchar_t> : public true_type{};\ntemplate <> struct is_signed<volatile wchar_t> : public true_type{};\ntemplate <> struct is_signed<const volatile wchar_t> : public true_type{};\n#else\ntemplate <> struct is_signed<wchar_t> : public false_type{};\ntemplate <> struct is_signed<const wchar_t> : public false_type{};\ntemplate <> struct is_signed<volatile wchar_t> : public false_type{};\ntemplate <> struct is_signed<const volatile wchar_t> : public false_type{};\n#endif\n#endif\n} // namespace boost\n\n#endif // BOOST_TT_IS_MEMBER_FUNCTION_POINTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_union.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_UNION_HPP_INCLUDED\n#define BOOST_TT_IS_UNION_HPP_INCLUDED\n\n#include <boost/type_traits/intrinsics.hpp>\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#ifdef BOOST_IS_UNION\ntemplate <class T> struct is_union : public integral_constant<bool, BOOST_IS_UNION(T)> {};\n#else\ntemplate <class T> struct is_union : public integral_constant<bool, false> {};\n#endif\n\ntemplate <class T> struct is_union<T const> : public is_union<T>{};\ntemplate <class T> struct is_union<T volatile const> : public is_union<T>{};\ntemplate <class T> struct is_union<T volatile> : public is_union<T>{};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_UNION_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_unsigned.hpp",
    "content": "\n//  (C) Copyright John Maddock 2005.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_IS_UNSIGNED_HPP_INCLUDED\n#define BOOST_TT_IS_UNSIGNED_HPP_INCLUDED\n\n#include <boost/type_traits/is_integral.hpp>\n#include <boost/type_traits/is_enum.hpp>\n#include <boost/type_traits/remove_cv.hpp>\n\n#include <climits>\n\nnamespace boost {\n\n#if !defined( __CODEGEARC__ )\n\n#if !(defined(BOOST_MSVC) && BOOST_MSVC <= 1310) &&\\\n    !(defined(__EDG_VERSION__) && __EDG_VERSION__ <= 238) &&\\\n    !defined(BOOST_NO_INCLASS_MEMBER_INITIALIZATION)\n\nnamespace detail{\n\ntemplate <class T>\nstruct is_unsigned_values\n{\n   //\n   // Note that we cannot use BOOST_STATIC_CONSTANT here, using enum's\n   // rather than \"real\" static constants simply doesn't work or give\n   // the correct answer.\n   //\n   typedef typename remove_cv<T>::type no_cv_t;\n   static const no_cv_t minus_one = (static_cast<no_cv_t>(-1));\n   static const no_cv_t zero = (static_cast<no_cv_t>(0));\n};\n\ntemplate <class T>\nstruct is_ununsigned_helper\n{\n   BOOST_STATIC_CONSTANT(bool, value = (::boost::detail::is_unsigned_values<T>::minus_one > ::boost::detail::is_unsigned_values<T>::zero));\n};\n\ntemplate <bool integral_type>\nstruct is_unsigned_select_helper\n{\n   template <class T>\n   struct rebind\n   {\n      typedef is_ununsigned_helper<T> type;\n   };\n};\n\ntemplate <>\nstruct is_unsigned_select_helper<false>\n{\n   template <class T>\n   struct rebind\n   {\n      typedef false_type type;\n   };\n};\n\ntemplate <class T>\nstruct is_unsigned\n{\n   typedef ::boost::detail::is_unsigned_select_helper< ::boost::is_integral<T>::value || ::boost::is_enum<T>::value > selector;\n   typedef typename selector::template rebind<T> binder;\n   typedef typename binder::type type;\n   BOOST_STATIC_CONSTANT(bool, value = type::value);\n};\n\n} // namespace detail\n\ntemplate <class T> struct is_unsigned : public integral_constant<bool, boost::detail::is_unsigned<T>::value> {};\n\n#else\n\ntemplate <class T> struct is_unsigned : public false_type{};\n\n#endif\n\n#else // defined( __CODEGEARC__ )\ntemplate <class T> struct is_unsigned : public integral_constant<bool, __is_unsigned(T)> {};\n#endif\n\ntemplate <> struct is_unsigned<unsigned char> : public true_type{};\ntemplate <> struct is_unsigned<const unsigned char> : public true_type{};\ntemplate <> struct is_unsigned<volatile unsigned char> : public true_type{};\ntemplate <> struct is_unsigned<const volatile unsigned char> : public true_type{};\ntemplate <> struct is_unsigned<unsigned short> : public true_type{};\ntemplate <> struct is_unsigned<const unsigned short> : public true_type{};\ntemplate <> struct is_unsigned<volatile unsigned short> : public true_type{};\ntemplate <> struct is_unsigned<const volatile unsigned short> : public true_type{};\ntemplate <> struct is_unsigned<unsigned int> : public true_type{};\ntemplate <> struct is_unsigned<const unsigned int> : public true_type{};\ntemplate <> struct is_unsigned<volatile unsigned int> : public true_type{};\ntemplate <> struct is_unsigned<const volatile unsigned int> : public true_type{};\ntemplate <> struct is_unsigned<unsigned long> : public true_type{};\ntemplate <> struct is_unsigned<const unsigned long> : public true_type{};\ntemplate <> struct is_unsigned<volatile unsigned long> : public true_type{};\ntemplate <> struct is_unsigned<const volatile unsigned long> : public true_type{};\n\ntemplate <> struct is_unsigned<signed char> : public false_type{};\ntemplate <> struct is_unsigned<const signed char> : public false_type{};\ntemplate <> struct is_unsigned<volatile signed char> : public false_type{};\ntemplate <> struct is_unsigned<const volatile signed char> : public false_type{};\ntemplate <> struct is_unsigned< short> : public false_type{};\ntemplate <> struct is_unsigned<const  short> : public false_type{};\ntemplate <> struct is_unsigned<volatile  short> : public false_type{};\ntemplate <> struct is_unsigned<const volatile  short> : public false_type{};\ntemplate <> struct is_unsigned< int> : public false_type{};\ntemplate <> struct is_unsigned<const  int> : public false_type{};\ntemplate <> struct is_unsigned<volatile  int> : public false_type{};\ntemplate <> struct is_unsigned<const volatile  int> : public false_type{};\ntemplate <> struct is_unsigned< long> : public false_type{};\ntemplate <> struct is_unsigned<const  long> : public false_type{};\ntemplate <> struct is_unsigned<volatile  long> : public false_type{};\ntemplate <> struct is_unsigned<const volatile  long> : public false_type{};\n#ifdef BOOST_HAS_LONG_LONG\ntemplate <> struct is_unsigned< ::boost::ulong_long_type> : public true_type{};\ntemplate <> struct is_unsigned<const ::boost::ulong_long_type> : public true_type{};\ntemplate <> struct is_unsigned<volatile ::boost::ulong_long_type> : public true_type{};\ntemplate <> struct is_unsigned<const volatile ::boost::ulong_long_type> : public true_type{};\n\ntemplate <> struct is_unsigned< ::boost::long_long_type> : public false_type{};\ntemplate <> struct is_unsigned<const ::boost::long_long_type> : public false_type{};\ntemplate <> struct is_unsigned<volatile ::boost::long_long_type> : public false_type{};\ntemplate <> struct is_unsigned<const volatile ::boost::long_long_type> : public false_type{};\n#endif\n#if defined(CHAR_MIN) \n#if CHAR_MIN == 0\ntemplate <> struct is_unsigned<char> : public true_type{};\ntemplate <> struct is_unsigned<const char> : public true_type{};\ntemplate <> struct is_unsigned<volatile char> : public true_type{};\ntemplate <> struct is_unsigned<const volatile char> : public true_type{};\n#else\ntemplate <> struct is_unsigned<char> : public false_type{};\ntemplate <> struct is_unsigned<const char> : public false_type{};\ntemplate <> struct is_unsigned<volatile char> : public false_type{};\ntemplate <> struct is_unsigned<const volatile char> : public false_type{};\n#endif\n#endif\n#if !defined(BOOST_NO_INTRINSIC_WCHAR_T) && defined(WCHAR_MIN)\n#if WCHAR_MIN == 0\ntemplate <> struct is_unsigned<wchar_t> : public true_type{};\ntemplate <> struct is_unsigned<const wchar_t> : public true_type{};\ntemplate <> struct is_unsigned<volatile wchar_t> : public true_type{};\ntemplate <> struct is_unsigned<const volatile wchar_t> : public true_type{};\n#else\ntemplate <> struct is_unsigned<wchar_t> : public false_type{};\ntemplate <> struct is_unsigned<const wchar_t> : public false_type{};\ntemplate <> struct is_unsigned<volatile wchar_t> : public false_type{};\ntemplate <> struct is_unsigned<const volatile wchar_t> : public false_type{};\n#endif\n#endif\n} // namespace boost\n\n#endif // BOOST_TT_IS_MEMBER_FUNCTION_POINTER_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_void.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_IS_VOID_HPP_INCLUDED\n#define BOOST_TT_IS_VOID_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\ntemplate <class T>\nstruct is_void : public false_type {};\n\ntemplate<> struct is_void<void> : public true_type {};\ntemplate<> struct is_void<const void> : public true_type{};\ntemplate<> struct is_void<const volatile void> : public true_type{};\ntemplate<> struct is_void<volatile void> : public true_type{};\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_VOID_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/is_volatile.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, \n//      Howard Hinnant and John Maddock 2000. \n//  (C) Copyright Mat Marcus, Jesse Jones and Adobe Systems Inc 2001\n\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n//    Fixed is_pointer, is_reference, is_const, is_volatile, is_same, \n//    is_member_pointer based on the Simulated Partial Specialization work \n//    of Mat Marcus and Jesse Jones. See  http://opensource.adobe.com or \n//    http://groups.yahoo.com/group/boost/message/5441 \n//    Some workarounds in here use ideas suggested from \"Generic<Programming>: \n//    Mappings between Types and Values\" \n//    by Andrei Alexandrescu (see http://www.cuj.com/experts/1810/alexandr.html).\n\n\n#ifndef BOOST_TT_IS_VOLATILE_HPP_INCLUDED\n#define BOOST_TT_IS_VOLATILE_HPP_INCLUDED\n\n#include <boost/type_traits/integral_constant.hpp>\n\nnamespace boost {\n\n#if defined( __CODEGEARC__ )\n\n   template <class T>\n   struct is_volatile : public integral_constant<bool, __is_volatile(T)> {};\n\n#else\n\n   template <class T>\n   struct is_volatile : public false_type {};\n   template <class T> struct is_volatile<T volatile> : public true_type{};\n   template <class T, size_t N> struct is_volatile<T volatile[N]> : public true_type{};\n   template <class T> struct is_volatile<T volatile[]> : public true_type{};\n\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_IS_VOLATILE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/make_signed.hpp",
    "content": "\n//  (C) Copyright John Maddock 2007.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_MAKE_SIGNED_HPP_INCLUDED\n#define BOOST_TT_MAKE_SIGNED_HPP_INCLUDED\n\n#include <boost/type_traits/conditional.hpp>\n#include <boost/type_traits/is_integral.hpp>\n#include <boost/type_traits/is_signed.hpp>\n#include <boost/type_traits/is_unsigned.hpp>\n#include <boost/type_traits/is_enum.hpp>\n#include <boost/type_traits/is_same.hpp>\n#include <boost/type_traits/remove_cv.hpp>\n#include <boost/type_traits/is_const.hpp>\n#include <boost/type_traits/is_volatile.hpp>\n#include <boost/type_traits/add_const.hpp>\n#include <boost/type_traits/add_volatile.hpp>\n#include <boost/static_assert.hpp>\n\nnamespace boost {\n\ntemplate <class T>\nstruct make_signed\n{\nprivate:\n   BOOST_STATIC_ASSERT_MSG(( ::boost::is_integral<T>::value || ::boost::is_enum<T>::value), \"The template argument to make_signed must be an integer or enum type.\");\n   BOOST_STATIC_ASSERT_MSG(!(::boost::is_same<typename remove_cv<T>::type, bool>::value), \"The template argument to make_signed must not be the type bool.\");\n\n   typedef typename remove_cv<T>::type t_no_cv;\n   typedef typename conditional<\n      (::boost::is_signed<T>::value\n      && ::boost::is_integral<T>::value\n      && ! ::boost::is_same<t_no_cv, char>::value\n      && ! ::boost::is_same<t_no_cv, wchar_t>::value\n      && ! ::boost::is_same<t_no_cv, bool>::value),\n      T,\n      typename conditional<\n         (::boost::is_integral<T>::value\n         && ! ::boost::is_same<t_no_cv, char>::value\n         && ! ::boost::is_same<t_no_cv, wchar_t>::value\n         && ! ::boost::is_same<t_no_cv, bool>::value),\n         typename conditional<\n            is_same<t_no_cv, unsigned char>::value,\n            signed char,\n            typename conditional<\n               is_same<t_no_cv, unsigned short>::value,\n               signed short,\n               typename conditional<\n                  is_same<t_no_cv, unsigned int>::value,\n                  int,\n                  typename conditional<\n                     is_same<t_no_cv, unsigned long>::value,\n                     long,\n#if defined(BOOST_HAS_LONG_LONG)\n#ifdef BOOST_HAS_INT128\n                     typename conditional<\n                        sizeof(t_no_cv) == sizeof(boost::long_long_type), \n                        boost::long_long_type, \n                        boost::int128_type\n                     >::type\n#else\n                     boost::long_long_type\n#endif\n#elif defined(BOOST_HAS_MS_INT64)\n                     __int64\n#else\n                     long\n#endif\n                  >::type\n               >::type\n            >::type\n         >::type,\n         // Not a regular integer type:\n         typename conditional<\n            sizeof(t_no_cv) == sizeof(unsigned char),\n            signed char,\n            typename conditional<\n               sizeof(t_no_cv) == sizeof(unsigned short),\n               signed short,\n               typename conditional<\n                  sizeof(t_no_cv) == sizeof(unsigned int),\n                  int,\n                  typename conditional<\n                     sizeof(t_no_cv) == sizeof(unsigned long),\n                     long,\n#if defined(BOOST_HAS_LONG_LONG)\n#ifdef BOOST_HAS_INT128\n                     typename conditional<\n                        sizeof(t_no_cv) == sizeof(boost::long_long_type), \n                        boost::long_long_type, \n                        boost::int128_type\n                     >::type\n#else\n                     boost::long_long_type\n#endif\n#elif defined(BOOST_HAS_MS_INT64)\n                     __int64\n#else\n                     long\n#endif\n                  >::type\n               >::type\n            >::type\n         >::type\n      >::type\n   >::type base_integer_type;\n   \n   // Add back any const qualifier:\n   typedef typename conditional<\n      is_const<T>::value,\n      typename add_const<base_integer_type>::type,\n      base_integer_type\n   >::type const_base_integer_type;\npublic:\n   // Add back any volatile qualifier:\n   typedef typename conditional<\n      is_volatile<T>::value,\n      typename add_volatile<const_base_integer_type>::type,\n      const_base_integer_type\n   >::type type;\n};\n\n} // namespace boost\n\n#endif // BOOST_TT_ADD_REFERENCE_HPP_INCLUDED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/remove_const.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_REMOVE_CONST_HPP_INCLUDED\n#define BOOST_TT_REMOVE_CONST_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <cstddef>\n#include <boost/detail/workaround.hpp>\n\nnamespace boost {\n\n   //  convert a type T to a non-cv-qualified type - remove_const<T>\n   template <class T> struct remove_const{ typedef T type; };\n   template <class T> struct remove_const<T const>{ typedef T type; };\n\n#if !defined(BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS)\n   template <class T, std::size_t N> struct remove_const<T const[N]>{ typedef T type[N]; };\n#if !BOOST_WORKAROUND(__BORLANDC__, < 0x600) && !defined(__IBMCPP__) &&  !BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\n   template <class T> struct remove_const<T const[]>{ typedef T type[]; };\n#endif\n#endif\n\n} // namespace boost\n\n#endif // BOOST_TT_REMOVE_CONST_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/remove_cv.hpp",
    "content": "\n//  (C) Copyright Dave Abrahams, Steve Cleary, Beman Dawes, Howard\n//  Hinnant & John Maddock 2000.  \n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n\n#ifndef BOOST_TT_REMOVE_CV_HPP_INCLUDED\n#define BOOST_TT_REMOVE_CV_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <boost/detail/workaround.hpp>\n#include <cstddef>\n\nnamespace boost {\n\n   //  convert a type T to a non-cv-qualified type - remove_cv<T>\ntemplate <class T> struct remove_cv{ typedef T type; };\ntemplate <class T> struct remove_cv<T const>{ typedef T type;  };\ntemplate <class T> struct remove_cv<T volatile>{ typedef T type; };\ntemplate <class T> struct remove_cv<T const volatile>{ typedef T type; };\n\n#if !defined(BOOST_NO_ARRAY_TYPE_SPECIALIZATIONS)\ntemplate <class T, std::size_t N> struct remove_cv<T const[N]>{ typedef T type[N]; };\ntemplate <class T, std::size_t N> struct remove_cv<T const volatile[N]>{ typedef T type[N]; };\ntemplate <class T, std::size_t N> struct remove_cv<T volatile[N]>{ typedef T type[N]; };\n#if !BOOST_WORKAROUND(__BORLANDC__, < 0x600) && !defined(__IBMCPP__) &&  !BOOST_WORKAROUND(__DMC__, BOOST_TESTED_AT(0x840))\ntemplate <class T> struct remove_cv<T const[]>{ typedef T type[]; };\ntemplate <class T> struct remove_cv<T const volatile[]>{ typedef T type[]; };\ntemplate <class T> struct remove_cv<T volatile[]>{ typedef T type[]; };\n#endif\n#endif\n\n\n} // namespace boost\n\n#endif // BOOST_TT_REMOVE_CV_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/type_traits/remove_reference.hpp",
    "content": "\n//  (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.\n//  Use, modification and distribution are subject to the Boost Software License,\n//  Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt).\n//\n//  See http://www.boost.org/libs/type_traits for most recent version including documentation.\n\n#ifndef BOOST_TT_REMOVE_REFERENCE_HPP_INCLUDED\n#define BOOST_TT_REMOVE_REFERENCE_HPP_INCLUDED\n\n#include <boost/config.hpp>\n#include <boost/detail/workaround.hpp>\n\nnamespace boost {\n\n\nnamespace detail{\n//\n// We can't filter out rvalue_references at the same level as\n// references or we get ambiguities from msvc:\n//\ntemplate <class T>\nstruct remove_rvalue_ref\n{\n   typedef T type;\n};\n#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES\ntemplate <class T>\nstruct remove_rvalue_ref<T&&>\n{\n   typedef T type;\n};\n#endif\n\n} // namespace detail\n\ntemplate <class T> struct remove_reference{ typedef typename boost::detail::remove_rvalue_ref<T>::type type; };\ntemplate <class T> struct remove_reference<T&>{ typedef T type; };\n\n#if defined(BOOST_ILLEGAL_CV_REFERENCES)\n// these are illegal specialisations; cv-qualifies applied to\n// references have no effect according to [8.3.2p1],\n// C++ Builder requires them though as it treats cv-qualified\n// references as distinct types...\ntemplate <class T> struct remove_reference<T&const>{ typedef T type; };\ntemplate <class T> struct remove_reference<T&volatile>{ typedef T type; };\ntemplate <class T> struct remove_reference<T&const volatile>{ typedef T type; };\n#endif\n\n\n} // namespace boost\n\n#endif // BOOST_TT_REMOVE_REFERENCE_HPP_INCLUDED\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/utility/declval.hpp",
    "content": "//  declval.hpp  -------------------------------------------------------------//\n\n//  Copyright 2010 Vicente J. Botet Escriba\n\n//  Distributed under the Boost Software License, Version 1.0.\n//  See http://www.boost.org/LICENSE_1_0.txt\n\n#ifndef BOOST_UTILITY_DECLVAL_HPP\n#define BOOST_UTILITY_DECLVAL_HPP\n\n#include <boost/type_traits/declval.hpp>\n\n#endif  // BOOST_UTILITY_DECLVAL_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/utility/detail/result_of_iterate.hpp",
    "content": "// Boost result_of library\n\n//  Copyright Douglas Gregor 2004. Use, modification and\n//  distribution is subject to the Boost Software License, Version\n//  1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n//  Copyright Daniel Walker, Eric Niebler, Michel Morin 2008-2012.\n//  Use, modification and distribution is subject to the Boost Software\n//  License, Version 1.0. (See accompanying file LICENSE_1_0.txt or\n//  copy at http://www.boost.org/LICENSE_1_0.txt)\n\n// For more information, see http://www.boost.org/libs/utility\n#if !defined(BOOST_PP_IS_ITERATING)\n# error Boost result_of - do not include this file!\n#endif\n\n// CWPro8 requires an argument in a function type specialization\n#if BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3002)) && BOOST_PP_ITERATION() == 0\n# define BOOST_RESULT_OF_ARGS void\n#else\n# define BOOST_RESULT_OF_ARGS BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)\n#endif\n\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x551))\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of<F(BOOST_RESULT_OF_ARGS)>\n    : mpl::if_<\n          mpl::or_< is_pointer<F>, is_member_function_pointer<F> >\n        , boost::detail::tr1_result_of_impl<\n            typename remove_cv<F>::type,\n            typename remove_cv<F>::type(BOOST_RESULT_OF_ARGS),\n            (boost::detail::has_result_type<F>::value)>\n        , boost::detail::tr1_result_of_impl<\n            F,\n            F(BOOST_RESULT_OF_ARGS),\n            (boost::detail::has_result_type<F>::value)> >::type { };\n#endif\n\n#ifdef BOOST_RESULT_OF_USE_DECLTYPE\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct result_of<F(BOOST_RESULT_OF_ARGS)>\n    : detail::cpp0x_result_of<F(BOOST_RESULT_OF_ARGS)> { };\n#endif // BOOST_RESULT_OF_USE_DECLTYPE\n\n#ifdef BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct result_of<F(BOOST_RESULT_OF_ARGS)>\n    : mpl::if_<mpl::or_<detail::has_result_type<F>, detail::has_result<F> >,\n               tr1_result_of<F(BOOST_RESULT_OF_ARGS)>,\n               detail::cpp0x_result_of<F(BOOST_RESULT_OF_ARGS)> >::type { };\n#endif // BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK\n\n#if defined(BOOST_RESULT_OF_USE_DECLTYPE) || defined(BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK)\n\nnamespace detail {\n\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct cpp0x_result_of<F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T))>\n    : mpl::if_<\n          is_member_function_pointer<F>\n        , detail::tr1_result_of_impl<\n            typename remove_cv<F>::type,\n            typename remove_cv<F>::type(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), false\n          >\n        , detail::cpp0x_result_of_impl<\n              F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T))\n          >\n      >::type\n{};\n\n#ifdef BOOST_NO_SFINAE_EXPR\n\ntemplate<typename F>\nstruct BOOST_PP_CAT(result_of_callable_fun_2_, BOOST_PP_ITERATION());\n\ntemplate<typename R BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(), typename T)>\nstruct BOOST_PP_CAT(result_of_callable_fun_2_, BOOST_PP_ITERATION())<R(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(), T))> {\n    R operator()(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(), T)) const;\n    typedef result_of_private_type const &(*pfn_t)(...);\n    operator pfn_t() const volatile;\n};\n\ntemplate<typename F>\nstruct BOOST_PP_CAT(result_of_callable_fun_, BOOST_PP_ITERATION());\n\ntemplate<typename F>\nstruct BOOST_PP_CAT(result_of_callable_fun_, BOOST_PP_ITERATION())<F *>\n  : BOOST_PP_CAT(result_of_callable_fun_2_, BOOST_PP_ITERATION())<F>\n{};\n\ntemplate<typename F>\nstruct BOOST_PP_CAT(result_of_callable_fun_, BOOST_PP_ITERATION())<F &>\n  : BOOST_PP_CAT(result_of_callable_fun_2_, BOOST_PP_ITERATION())<F>\n{};\n\ntemplate<typename F>\nstruct BOOST_PP_CAT(result_of_select_call_wrapper_type_, BOOST_PP_ITERATION())\n  : mpl::eval_if<\n        is_class<typename remove_reference<F>::type>,\n        result_of_wrap_callable_class<F>,\n        mpl::identity<BOOST_PP_CAT(result_of_callable_fun_, BOOST_PP_ITERATION())<typename remove_cv<F>::type> >\n    >\n{};\n\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(), typename T)>\nstruct BOOST_PP_CAT(result_of_is_callable_, BOOST_PP_ITERATION()) {\n    typedef typename BOOST_PP_CAT(result_of_select_call_wrapper_type_, BOOST_PP_ITERATION())<F>::type wrapper_t;\n    static const bool value = (\n        sizeof(result_of_no_type) == sizeof(detail::result_of_is_private_type(\n            (boost::declval<wrapper_t>()(BOOST_PP_ENUM_BINARY_PARAMS(BOOST_PP_ITERATION(), boost::declval<T, >() BOOST_PP_INTERCEPT)), result_of_weird_type())\n        ))\n    );\n    typedef mpl::bool_<value> type;\n};\n\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct cpp0x_result_of_impl<F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), true>\n    : lazy_enable_if<\n          BOOST_PP_CAT(result_of_is_callable_, BOOST_PP_ITERATION())<F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(), T)>\n        , cpp0x_result_of_impl<F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), false>\n      >\n{};\n\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct cpp0x_result_of_impl<F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), false>\n{\n  typedef decltype(\n    boost::declval<F>()(\n      BOOST_PP_ENUM_BINARY_PARAMS(BOOST_PP_ITERATION(), boost::declval<T, >() BOOST_PP_INTERCEPT)\n    )\n  ) type;\n};\n\n#else // BOOST_NO_SFINAE_EXPR\n\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct cpp0x_result_of_impl<F(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)),\n                            typename result_of_always_void<decltype(\n                                boost::declval<F>()(\n                                    BOOST_PP_ENUM_BINARY_PARAMS(BOOST_PP_ITERATION(), boost::declval<T, >() BOOST_PP_INTERCEPT)\n                                )\n                            )>::type> {\n  typedef decltype(\n    boost::declval<F>()(\n      BOOST_PP_ENUM_BINARY_PARAMS(BOOST_PP_ITERATION(), boost::declval<T, >() BOOST_PP_INTERCEPT)\n    )\n  ) type;\n};\n\n#endif // BOOST_NO_SFINAE_EXPR\n\n} // namespace detail\n\n#else // defined(BOOST_RESULT_OF_USE_DECLTYPE) || defined(BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK)\n\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x551))\ntemplate<typename F BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct result_of<F(BOOST_RESULT_OF_ARGS)>\n    : tr1_result_of<F(BOOST_RESULT_OF_ARGS)> { };\n#endif\n\n#endif // defined(BOOST_RESULT_OF_USE_DECLTYPE)\n\n#undef BOOST_RESULT_OF_ARGS\n\n#if BOOST_PP_ITERATION() >= 1\n\nnamespace detail {\n\ntemplate<typename R,  typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (*)(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), FArgs, false>\n{\n  typedef R type;\n};\n\ntemplate<typename R,  typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (&)(BOOST_PP_ENUM_PARAMS(BOOST_PP_ITERATION(),T)), FArgs, false>\n{\n  typedef R type;\n};\n\n#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x551))\ntemplate<typename R, typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (T0::*)\n                     (BOOST_PP_ENUM_SHIFTED_PARAMS(BOOST_PP_ITERATION(),T)),\n                 FArgs, false>\n{\n  typedef R type;\n};\n\ntemplate<typename R, typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (T0::*)\n                     (BOOST_PP_ENUM_SHIFTED_PARAMS(BOOST_PP_ITERATION(),T))\n                     const,\n                 FArgs, false>\n{\n  typedef R type;\n};\n\ntemplate<typename R, typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (T0::*)\n                     (BOOST_PP_ENUM_SHIFTED_PARAMS(BOOST_PP_ITERATION(),T))\n                     volatile,\n                 FArgs, false>\n{\n  typedef R type;\n};\n\ntemplate<typename R, typename FArgs BOOST_PP_ENUM_TRAILING_PARAMS(BOOST_PP_ITERATION(),typename T)>\nstruct tr1_result_of_impl<R (T0::*)\n                     (BOOST_PP_ENUM_SHIFTED_PARAMS(BOOST_PP_ITERATION(),T))\n                     const volatile,\n                 FArgs, false>\n{\n  typedef R type;\n};\n#endif\n\n}\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/utility/enable_if.hpp",
    "content": "/*\n * Copyright (c) 2014 Glen Fernandes\n *\n * Distributed under the Boost Software License, Version 1.0. (See\n * accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt)\n */\n\n#ifndef BOOST_UTILITY_ENABLE_IF_HPP\n#define BOOST_UTILITY_ENABLE_IF_HPP\n\n// The header file at this path is deprecated;\n// use boost/core/enable_if.hpp instead.\n\n#include <boost/core/enable_if.hpp>\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/utility/result_of.hpp",
    "content": "// Boost result_of library\n\n//  Copyright Douglas Gregor 2004. Use, modification and\n//  distribution is subject to the Boost Software License, Version\n//  1.0. (See accompanying file LICENSE_1_0.txt or copy at\n//  http://www.boost.org/LICENSE_1_0.txt)\n\n// For more information, see http://www.boost.org/libs/utility\n#ifndef BOOST_RESULT_OF_HPP\n#define BOOST_RESULT_OF_HPP\n\n#include <boost/config.hpp>\n#include <boost/preprocessor/cat.hpp>\n#include <boost/preprocessor/iteration/iterate.hpp>\n#include <boost/preprocessor/repetition/enum_params.hpp>\n#include <boost/preprocessor/repetition/enum_trailing_params.hpp>\n#include <boost/preprocessor/repetition/enum_binary_params.hpp>\n#include <boost/preprocessor/repetition/enum_shifted_params.hpp>\n#include <boost/preprocessor/facilities/intercept.hpp>\n#include <boost/detail/workaround.hpp>\n#include <boost/mpl/has_xxx.hpp>\n#include <boost/mpl/if.hpp>\n#include <boost/mpl/eval_if.hpp>\n#include <boost/mpl/bool.hpp>\n#include <boost/mpl/identity.hpp>\n#include <boost/mpl/or.hpp>\n#include <boost/type_traits/is_class.hpp>\n#include <boost/type_traits/is_pointer.hpp>\n#include <boost/type_traits/is_member_function_pointer.hpp>\n#include <boost/type_traits/remove_cv.hpp>\n#include <boost/type_traits/remove_reference.hpp>\n#include <boost/utility/declval.hpp>\n#include <boost/utility/enable_if.hpp>\n\n#ifndef BOOST_RESULT_OF_NUM_ARGS\n#  define BOOST_RESULT_OF_NUM_ARGS 16\n#endif\n\n// Use the decltype-based version of result_of by default if the compiler\n// supports N3276 <http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2011/n3276.pdf>.\n// The user can force the choice by defining BOOST_RESULT_OF_USE_DECLTYPE,\n// BOOST_RESULT_OF_USE_TR1, or BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK but not more than one!\n#if (defined(BOOST_RESULT_OF_USE_DECLTYPE) && defined(BOOST_RESULT_OF_USE_TR1)) || \\\n    (defined(BOOST_RESULT_OF_USE_DECLTYPE) && defined(BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK)) || \\\n    (defined(BOOST_RESULT_OF_USE_TR1) && defined(BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK))\n#  error More than one of BOOST_RESULT_OF_USE_DECLTYPE, BOOST_RESULT_OF_USE_TR1 and \\\n  BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK cannot be defined at the same time.\n#endif\n\n#if defined(BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK) && defined(BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE)\n#  error Cannot fallback to decltype if BOOST_MPL_CFG_NO_HAS_XXX_TEMPLATE is not defined.\n#endif\n\n#ifndef BOOST_RESULT_OF_USE_TR1\n#  ifndef BOOST_RESULT_OF_USE_DECLTYPE\n#    ifndef BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK\n#      ifndef BOOST_NO_CXX11_DECLTYPE_N3276 // this implies !defined(BOOST_NO_CXX11_DECLTYPE)\n#        define BOOST_RESULT_OF_USE_DECLTYPE\n#      else\n#        define BOOST_RESULT_OF_USE_TR1\n#      endif\n#    endif\n#  endif\n#endif\n\nnamespace boost {\n\ntemplate<typename F> struct result_of;\ntemplate<typename F> struct tr1_result_of; // a TR1-style implementation of result_of\n\n#if !defined(BOOST_NO_SFINAE)\nnamespace detail {\n\nBOOST_MPL_HAS_XXX_TRAIT_DEF(result_type)\n\n// Work around a nvcc bug by only defining has_result when it's needed.\n#ifdef BOOST_RESULT_OF_USE_TR1_WITH_DECLTYPE_FALLBACK\nBOOST_MPL_HAS_XXX_TEMPLATE_DEF(result)\n#endif\n\ntemplate<typename F, typename FArgs, bool HasResultType> struct tr1_result_of_impl;\n\ntemplate<typename F> struct cpp0x_result_of;\n\n#ifdef BOOST_NO_SFINAE_EXPR\n\n// There doesn't seem to be any other way to turn this off such that the presence of\n// the user-defined operator,() below doesn't cause spurious warning all over the place,\n// so unconditionally turn it off.\n#if BOOST_MSVC\n#  pragma warning(disable: 4913) // user defined binary operator ',' exists but no overload could convert all operands, default built-in binary operator ',' used\n#endif\n\nstruct result_of_private_type {};\n\nstruct result_of_weird_type {\n  friend result_of_private_type operator,(result_of_private_type, result_of_weird_type);\n};\n\ntypedef char result_of_yes_type;      // sizeof(result_of_yes_type) == 1\ntypedef char (&result_of_no_type)[2]; // sizeof(result_of_no_type)  == 2\n\ntemplate<typename T>\nresult_of_no_type result_of_is_private_type(T const &);\nresult_of_yes_type result_of_is_private_type(result_of_private_type);\n\ntemplate<typename C>\nstruct result_of_callable_class : C {\n    result_of_callable_class();\n    typedef result_of_private_type const &(*pfn_t)(...);\n    operator pfn_t() const volatile;\n};\n\ntemplate<typename C>\nstruct result_of_wrap_callable_class {\n  typedef result_of_callable_class<C> type;\n};\n\ntemplate<typename C>\nstruct result_of_wrap_callable_class<C const> {\n  typedef result_of_callable_class<C> const type;\n};\n\ntemplate<typename C>\nstruct result_of_wrap_callable_class<C volatile> {\n  typedef result_of_callable_class<C> volatile type;\n};\n\ntemplate<typename C>\nstruct result_of_wrap_callable_class<C const volatile> {\n  typedef result_of_callable_class<C> const volatile type;\n};\n\ntemplate<typename C>\nstruct result_of_wrap_callable_class<C &> {\n  typedef typename result_of_wrap_callable_class<C>::type &type;\n};\n\ntemplate<typename F, bool TestCallability = true> struct cpp0x_result_of_impl;\n\n#else // BOOST_NO_SFINAE_EXPR\n\ntemplate<typename T>\nstruct result_of_always_void\n{\n  typedef void type;\n};\n\ntemplate<typename F, typename Enable = void> struct cpp0x_result_of_impl {};\n\n#endif // BOOST_NO_SFINAE_EXPR\n\ntemplate<typename F>\nstruct result_of_void_impl\n{\n  typedef void type;\n};\n\ntemplate<typename R>\nstruct result_of_void_impl<R (*)(void)>\n{\n  typedef R type;\n};\n\ntemplate<typename R>\nstruct result_of_void_impl<R (&)(void)>\n{\n  typedef R type;\n};\n\n// Determine the return type of a function pointer or pointer to member.\ntemplate<typename F, typename FArgs>\nstruct result_of_pointer\n  : tr1_result_of_impl<typename remove_cv<F>::type, FArgs, false> { };\n\ntemplate<typename F, typename FArgs>\nstruct tr1_result_of_impl<F, FArgs, true>\n{\n  typedef typename F::result_type type;\n};\n\ntemplate<typename FArgs>\nstruct is_function_with_no_args : mpl::false_ {};\n\ntemplate<typename F>\nstruct is_function_with_no_args<F(void)> : mpl::true_ {};\n\ntemplate<typename F, typename FArgs>\nstruct result_of_nested_result : F::template result<FArgs>\n{};\n\ntemplate<typename F, typename FArgs>\nstruct tr1_result_of_impl<F, FArgs, false>\n  : mpl::if_<is_function_with_no_args<FArgs>,\n             result_of_void_impl<F>,\n             result_of_nested_result<F, FArgs> >::type\n{};\n\n} // end namespace detail\n\n#define BOOST_PP_ITERATION_PARAMS_1 (3,(0,BOOST_RESULT_OF_NUM_ARGS,<boost/utility/detail/result_of_iterate.hpp>))\n#include BOOST_PP_ITERATE()\n\n#else\n#  define BOOST_NO_RESULT_OF 1\n#endif\n\n}\n\n#endif // BOOST_RESULT_OF_HPP\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boost/version.hpp",
    "content": "//  Boost version.hpp configuration header file  ------------------------------//\n\n//  (C) Copyright John maddock 1999. Distributed under the Boost\n//  Software License, Version 1.0. (See accompanying file\n//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n//  See http://www.boost.org/libs/config for documentation\n\n#ifndef BOOST_VERSION_HPP\n#define BOOST_VERSION_HPP\n\n//\n//  Caution: this is the only Boost header that is guaranteed\n//  to change with every Boost release. Including this header\n//  will cause a recompile every time a new Boost version is\n//  used.\n//\n//  BOOST_VERSION % 100 is the patch level\n//  BOOST_VERSION / 100 % 1000 is the minor version\n//  BOOST_VERSION / 100000 is the major version\n\n#define BOOST_VERSION 106000\n\n//\n//  BOOST_LIB_VERSION must be defined to be the same as BOOST_VERSION\n//  but as a *string* in the form \"x_y[_z]\" where x is the major version\n//  number, y is the minor version number, and z is the patch level if not 0.\n//  This is used by <config/auto_link.hpp> to select which library version to link to.\n\n#define BOOST_LIB_VERSION \"1_60\"\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/boostqueue.h",
    "content": "#pragma once\n\n#include <utility>\n\n#include \"boost/lockfree/queue.hpp\"\n#include \"wrappers.h\"\n\ntemplate<typename T>\nstruct BoostQueueWrapper\n{\n\tpublic:\n\ttypedef DummyToken producer_token_t;\n\ttypedef DummyToken consumer_token_t;\n\t\npublic:\n  BoostQueueWrapper() : q(/* starting capacity */ 16384) { }\n\n\ttemplate<typename U>\n\tinline bool enqueue(U&& item)\n\t{\n\t\treturn q.push(std::forward<U>(item));\n\t}\n\t\n\tinline bool try_dequeue(T& item)\n\t{\n\t\treturn q.pop(item);\n\t}\n\t\n\t// Dummy token methods (not used)\n\tbool enqueue(producer_token_t const&, T const&) { return false; }\n\tbool try_enqueue(producer_token_t, T const&) { return false; }\n\tbool try_dequeue(consumer_token_t, T& item) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(It, size_t) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(producer_token_t const&, It, size_t) { return false; }\n\ttemplate<typename It> size_t try_dequeue_bulk(It, size_t) { return 0; }\n\ttemplate<typename It> size_t try_dequeue_bulk(consumer_token_t, It, size_t) { return 0; }\n\t\nprivate:\n\tboost::lockfree::queue<T> q;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/cpuid.cpp",
    "content": "#include <cstdint>\n#include <cstring>\n#include <cstdio>\n#include <cstdlib>\n\n#include \"cpuid.h\"\n\n#ifdef _WIN32\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n\n// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683194(v=vs.85).aspx\ntypedef BOOL (WINAPI *LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);\n\n// Helper function to count set bits in the processor mask.\nint countBitsSet(ULONG_PTR bitMask)\n{\n\tint result = 0;\n\twhile (bitMask != 0) {\n\t\tresult += (int)(bitMask & 1);\n\t\tbitMask >>= 1;\n\t}\n\treturn result;\n}\n\nbool getProcessorInfoFromOS(int& cpus, int& cores, int& logicalCores, double& clockSpeed)\n{\n\tcpus = 0;\n\tcores = 0;\n\tlogicalCores = 0;\n\tclockSpeed = 0;\n\t\n\t// Clock speed\n\tHKEY hKey;\n\tif (RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(\"HARDWARE\\\\DESCRIPTION\\\\System\\\\CentralProcessor\\\\0\"), 0, KEY_EXECUTE, &hKey) == ERROR_SUCCESS) {\n\t\tDWORD type = REG_DWORD;\n\t\tDWORD val;\n\t\tDWORD cbData = sizeof(val);\n\t\tif (RegQueryValueEx(hKey, TEXT(\"~MHz\"), NULL, &type, (LPBYTE)&val, &cbData) == ERROR_SUCCESS) {\n\t\t\tif (type == REG_DWORD && cbData == sizeof(DWORD)) {\n\t\t\t\tclockSpeed = val / 1000.0;\n\t\t\t}\n\t\t}\n\t\t\n\t}\n\tif (clockSpeed == 0) {\n\t\t// Can't access registry, try QueryPerformanceFrequency (nearly always same speed as CPU)\n\t\tLARGE_INTEGER f;\n\t\tif (!QueryPerformanceFrequency(&f)) {\n\t\t\treturn false;\n\t\t}\n\t\tclockSpeed = f.QuadPart / 1000.0 / 1000.0;\n\t}\n\t\n\t// Everything else\n\tLPFN_GLPI glpi;\n\tglpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT(\"kernel32\")), \"GetLogicalProcessorInformation\");\n\tif (glpi == NULL) {\n\t\treturn false;\n\t}\n\t\n\tPSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;\n\tDWORD bufferLength = 0;\n\tif (glpi(buffer, &bufferLength) == TRUE) {\n    \treturn false;\n    }\n    \n\twhile (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {\n\t\tif (buffer != NULL) {\n\t\t\tstd::free(buffer);\n\t\t}\n\t\tbuffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)std::malloc(bufferLength);\n\t\tif (buffer == NULL) {\n\t\t\treturn false;\n\t\t}\n\t\tif (glpi(buffer, &bufferLength) == TRUE) {\n\t\t\tif (bufferLength / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) != bufferLength) {\n\t\t\t\t// sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) must have changed (different from at compile time)\n\t\t\t\tstd::free(buffer);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t\n\t\t\tauto end = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)((char*)buffer + bufferLength);\n\t\t\tfor (auto ptr = buffer; ptr != end; ++ptr) {\n\t\t\t\tswitch (ptr->Relationship) {\n\t\t\t\tcase RelationProcessorCore:\n\t\t\t\t\t++cores;\n\t\t\t\t\tlogicalCores += countBitsSet(ptr->ProcessorMask);\n\t\t\t\t\tbreak;\n\t\t\t\tcase RelationProcessorPackage:\n\t\t\t\t\t++cpus;\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tstd::free(buffer);\n\t\t\treturn true;\n\t\t}\n\t}\n\tif (buffer != NULL) {\n\t\tstd::free(buffer);\n\t}\n\treturn false;\n}\n#else\n// TODO\nbool getProcessorInfoFromOS(int& cpus, int& cores, int& logicalCores, double& clockSpeed)\n{\n\treturn false;\n}\n#endif\n\n\n#if defined(__x86_64__) || defined(_M_AMD64) || defined(__amd64__) || defined (_M_X64) || defined(_M_IX86) || defined(__i386__)\n#define MOODYCAMEL_X86_OR_X64\n#endif\n\n#ifdef MOODYCAMEL_X86_OR_X64\nstruct CPUIDInfo\n{\n\tstd::uint32_t data[4];\n};\n\n#ifdef _MSC_VER\n#include <intrin.h>\n\ninline CPUIDInfo cpuid(std::uint32_t eax)\n{\n\tCPUIDInfo info;\n\t__cpuidex((int*)&info.data[0], eax, 0);\n\treturn info;\n}\n#else\n// Assume GCC-compatible inline assembly syntax\ninline CPUIDInfo cpuid(std::uint32_t eax)\n{\n\tCPUIDInfo info;\n\tasm volatile(\"cpuid\"\n\t\t: \"=a\" (info.data[0]), \"=b\" (info.data[1]), \"=c\" (info.data[2]), \"=d\" (info.data[3])\n\t\t: \"a\" (eax), \"c\" (0));\n\treturn info;\n}\n#endif\n#endif\t\t// MOODYCAMEL_X86_OR_X64\n\nnamespace moodycamel\n{\n\tconst char* getCPUString()\n\t{\n\t\t// TODO: Support non-x86/-x64 architectures\n#ifdef MOODYCAMEL_X86_OR_X64\n\t\tstatic char buf[128] = { 0 };\n\t\tif (buf[0] != 0) {\n\t\t\treturn buf;\n\t\t}\n\t\t\n\t\tCPUIDInfo info = cpuid(0x80000000);\n\t\tstd::uint32_t ex = info.data[0];\n\t\tfor (std::uint32_t i = 0; i + 0x80000002 <= ex && i != 3; ++i) {\n\t\t\t*(reinterpret_cast<CPUIDInfo*>(buf) + i) = cpuid(i + 0x80000002);\n\t\t}\n\t\t\n\t\tif (buf[0] == 0) {\n\t\t\tstrcpy(buf, UNKNOWN_CPU_STRING);\n\t\t\treturn buf;\n\t\t}\n\t\t\n\t\tinfo = cpuid(0);\n\t\tif (info.data[0] < 1) {\n\t\t\t// cpuid(1) not supported\n\t\t\treturn buf;\n\t\t}\n\t\t\n\t\t// Add number of CPUs, cores, HT, and GHz\n\t\tinfo = cpuid(1);\n\t\tbool ht = ((info.data[3] >> 28) & 1) == 1;\t// Note: This is also 1 on most multi-core systems, even if there's no HT\n\t\tint cpus, cores, logicalCores;\n\t\tdouble clockSpeed;\n\t\tif (!getProcessorInfoFromOS(cpus, cores, logicalCores, clockSpeed)) {\n\t\t\treturn buf;\n\t\t}\n\t\t// Strip @ nGHz if any, since we re-calculate this ourselves\n\t\tint atIndex;\n\t\tfor (atIndex = (int)std::strlen(buf) - 1; atIndex != -1; --atIndex) {\n\t\t\tif (buf[atIndex] == '@') {\n\t\t\t\tif (atIndex > 0 && buf[atIndex - 1] == ' ') {\n\t\t\t\t\t--atIndex;\n\t\t\t\t}\n\t\t\t\tbuf[atIndex] = '\\0';\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tchar* str = buf + std::strlen(buf);\n\t\tif (cpus > 1) {\n\t\t\t// Assume identical CPUs\n\t\t\tlogicalCores /= cpus;\n\t\t\tcores /= cpus;\n\t\t\tstd::sprintf(str, \" x%d\", cpus);\n\t\t\tstr += strlen(str);\n\t\t}\n\t\tht = ht && logicalCores != cores;\n\t\tstd::sprintf(str, \" with %d core%s%s @ %.1fGHz%s\", cores, cores == 1 ? \"\" : \"s\", ht ? \" (HyperThreaded)\" : \"\", clockSpeed, cpus > 1 ? \" each\" : \"\");\n\t\t\n\t\treturn buf;\n#else\n\t\treturn UNKNOWN_CPU_STRING;\n#endif\n\t}\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/cpuid.h",
    "content": "#pragma once\n\nnamespace moodycamel\n{\n\tstatic const char UNKNOWN_CPU_STRING[] = \"unknown processor\";\n\t\n\t// Returns a string representing the system's CPU info.\n\t// Assumes an x86/x64 architecture (returns UNKNOWN_CPU_STRING otherwise).\n\t// Returned string is valid in perpetuity.\n\t// Not thread safe.\n\tconst char* getCPUString();\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/extract_graph_data.py",
    "content": "#!/usr/bin/python\n\n# A simple script that reads the last run from the benchmark log file,\n# and creates two CSV files containing the data required to make pretty\n# performance graphs for enqueuing and dequeueing.\n# The x-axis of the graph is meant to be the number of threads (first column), with\n# the y-axis representing thousands of operations/second/thread (one column per queue).\n\nimport sys\nimport re\n\n\ndef extract(bench, log, data, hasBulk = True):\n\t# data = { thread_count: [ locked, boost, tbb, moodycamel, moodycamel_tok, moodycamel_bulk ], ... }\n\t\n\tdef do_extract(bench, queue_header):\n\t\tblock = re.search(r'^' + bench + r':.*?' + queue_header + r'\\s*(.*?)\\s*^\\s*Operations per second', log, re.S | re.M | re.I).group(1)\n\t\tfor threads, opsst in re.findall(r'^\\s*(\\d+)\\s+thread.*?([0-9\\.]+[kMG]?\\s*$)', block, re.M | re.I):\n\t\t\tthreads = int(threads)\n\t\t\tmultiplier = 1\n\t\t\tif opsst[-1] in 'kMG':\n\t\t\t\tmultiplier = { 'k': 1000, 'M': 1000000, 'G': 1000000000 }[opsst[-1]]\n\t\t\t\topsst = opsst[:-1]\n\t\t\topsst = int(float(opsst) * multiplier)\n\t\t\tif threads not in data:\n\t\t\t\tdata[threads] = []\n\t\t\tdata[threads].append(opsst)\n\t\n\tdo_extract(bench, 'LockBasedQueue')\n\tdo_extract(bench, 'boost::lockfree::queue')\n\tdo_extract(bench, 'tbb::concurrent_queue')\n\tdo_extract(bench, 'Without tokens')\n\tdo_extract(bench, 'With tokens')\n\tif hasBulk:\n\t\tdo_extract(bench + ' bulk', 'With tokens')\n\n\ndef write_csv(data, path, hasBulk = True):\n\twith open(path, 'w') as f:\n\t\tf.write('threads,\"std::queue + std::mutex\",\"boost::lockfree::queue\",\"tbb::concurrent_queue\",\"moodycamel::ConcurrentQueue (no tokens)\",\"moodycamel::ConcurrentQueue\",' + ('\"moodycamel::ConcurrentQueue (bulk)\"' if hasBulk else '') + '\\n')\n\t\tfor threads in sorted(data.keys()):\n\t\t\tf.write(str(threads))\n\t\t\tfor opsst in data[threads]:\n\t\t\t\tf.write(',' + str(opsst))\n\t\t\tf.write('\\n')\n\n\ntry:\n\tfilename = 'benchmarks.log' if len(sys.argv) < 2 else sys.argv[1]\n\twith open(filename, 'r') as f:\n\t\tpieces = f.read().split('--- New run')\n\t\tlog = pieces[-1]\n\t\t\n\t\tenq_data = { }\n\t\textract('only enqueue', log, enq_data)\n\t\t\n\t\tdeq_data = { }\n\t\textract('only dequeue', log, deq_data)\n\t\t\n\t\theavy_data = { }\n\t\textract('heavy concurrent', log, heavy_data, False)\n\t\t\n\t\twrite_csv(enq_data, 'enqueue.csv')\n\t\twrite_csv(deq_data, 'dequeue.csv')\n\t\twrite_csv(heavy_data, 'heavy.csv', False)\nexcept IOError:\n\tprint 'Usage: ' + sys.argv[0] + ' path/to/benchmarks.log'\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/lockbasedqueue.h",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n#pragma once\n\n#include \"wrappers.h\"\n#include <mutex>\n\n// Naïve implementation of a simple lock-based queue. A std::mutex is obtained for every\n// method. Note that while the queue size is not fixed, each enqueue operation allocates\n// memory, and each dequeue frees memory.\ntemplate<typename T>\nclass LockBasedQueue\n{\npublic:\n\ttypedef DummyToken producer_token_t;\n\ttypedef DummyToken consumer_token_t;\n\t\npublic:\n\tLockBasedQueue()\n\t{\n\t\ttail = nullptr;\n\t\thead = nullptr;\n\t}\n\t\n\t~LockBasedQueue()\n\t{\n\t\twhile (head != nullptr) {\n\t\t\tNode* next = head->next;\n\t\t\tdelete head;\n\t\t\thead = next;\n\t\t}\n\t}\n\t\n\ttemplate<typename U>\n\tinline bool enqueue(U&& item)\n\t{\n\t\tNode* node = new Node(item);\n\t\t\n\t\tstd::lock_guard<std::mutex> guard(mutex);\n\t\tif (tail == nullptr) {\n\t\t\thead = tail = node;\n\t\t}\n\t\telse {\n\t\t\ttail->next = node;\n\t\t\ttail = node;\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tinline bool try_dequeue(T& item)\n\t{\n\t\tstd::lock_guard<std::mutex> guard(mutex);\n\t\tif (head == nullptr) {\n\t\t\treturn false;\n\t\t}\n\t\telse {\n\t\t\titem = std::move(head->item);\n\t\t\tNode* next = head->next;\n\t\t\tdelete head;\n\t\t\thead = next;\n\t\t\tif (head == nullptr) {\n\t\t\t\ttail = nullptr;\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\t\n\t// Dummy token methods (not used)\n\tbool enqueue(producer_token_t const&, T const&) { return false; }\n\tbool try_enqueue(producer_token_t, T const&) { return false; }\n\tbool try_dequeue(consumer_token_t, T& item) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(It, size_t) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(producer_token_t const&, It, size_t) { return false; }\n\ttemplate<typename It> size_t try_dequeue_bulk(It, size_t) { return 0; }\n\ttemplate<typename It> size_t try_dequeue_bulk(consumer_token_t, It, size_t) { return 0; }\n\t\nprivate:\n\tstruct Node\n\t{\n\t\tT item;\n\t\tNode* next;\n\t\t\n\t\ttemplate<typename U>\n\t\tNode(U&& item)\n\t\t\t: item(std::forward<U>(item)), next(nullptr)\n\t\t{\n\t\t}\n\t};\n\t\n\tstd::mutex mutex;\n\tNode* head;\n\tNode* tail;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/simplelockfree.h",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n#pragma once\n\n#include \"wrappers.h\"\n#include <atomic>\n#include <cstdint>\n\n#if defined(_MSC_VER) && _MSC_VER < 1900\n#define alignas(T)\n#endif\n\n// Fairly simple, yet correct, implementation of a simple lock-free queue based on linked pointers with CAS\ntemplate<typename T>\nclass SimpleLockFreeQueue\n{\npublic:\n\ttypedef DummyToken producer_token_t;\n\ttypedef DummyToken consumer_token_t;\n\t\n\t// Total maximum capacity: 2**39 (half a terabyte's worth -- off-by-one aligned indices)\n\tstatic const int UBER_BLOCKS = 256;\n\tstatic const int UBER_BLOCK_SIZE = 256;\n\tstatic const int ULTRA_BLOCK_SIZE = 256;\n\tstatic const int SUPER_BLOCK_SIZE = 256;\n\tstatic const int BLOCK_SIZE = 128;\n\t\nprivate:\n\tstatic const uint64_t VERSION_MASK = 0xFFFFFF0000000000ULL;\n\tstatic const uint64_t VERSION_INCR = 0x0000010000000000ULL;\n\tstatic const uint64_t UBER_BLOCK_IDX_MASK\t= 0xFF00000000ULL;\n\tstatic const uint64_t UBER_BLOCK_MASK \t\t= 0x00FF000000ULL;\n\tstatic const uint64_t ULTRA_BLOCK_MASK \t\t= 0x0000FF0000ULL;\n\tstatic const uint64_t SUPER_BLOCK_MASK \t\t= 0x000000FF00ULL;\n\tstatic const uint64_t BLOCK_MASK \t\t\t= 0x00000000FEULL;\n\t\n\tstatic const uint64_t UBER_BLOCK_IDX_SHIFT\t= 32;\n\tstatic const uint64_t UBER_BLOCK_SHIFT \t\t= 24;\n\tstatic const uint64_t ULTRA_BLOCK_SHIFT\t\t= 16;\n\tstatic const uint64_t SUPER_BLOCK_SHIFT\t\t= 8;\n\tstatic const uint64_t BLOCK_SHIFT \t\t\t= 1;\n\t\n\ttypedef std::uint64_t idx_t;\n\t\npublic:\n\tSimpleLockFreeQueue()\n\t\t: nextNodeIdx(2), freeListHead(0)\n\t{\n\t\t// Invariants: Head and tail are never null\n\t\tauto initialNode = allocate_blank_node();\n\t\thead.store(set_consumed_flag(initialNode), std::memory_order_relaxed);\n\t\ttail.store(initialNode, std::memory_order_relaxed);\n\t\tstd::atomic_thread_fence(std::memory_order_seq_cst);\n\t}\n\t\n\t~SimpleLockFreeQueue()\n\t{\n\t\tstd::atomic_thread_fence(std::memory_order_seq_cst);\n\t\tidx_t idx = head.load(std::memory_order_relaxed);\n\t\tif (is_consumed(idx)) {\n\t\t\tidx = clear_consumed_flag(idx);\n\t\t\tauto node = get_node_at(idx);\n\t\t\tauto next = node->next.load(std::memory_order_relaxed);\n\t\t\tnode->~Node();\n\t\t\tidx = next;\n\t\t}\n\t\twhile (idx != 0) {\n\t\t\tauto node = get_node_at(idx);\n\t\t\tauto next = node->next.load(std::memory_order_relaxed);\n\t\t\tnode->item()->~T();\n\t\t\tnode->~Node();\n\t\t\tidx = next;\n\t\t}\n\t\t\n\t\tidx = freeListHead.load(std::memory_order_relaxed);\n\t\twhile (idx != 0) {\n\t\t\tauto node = get_node_at(idx);\n\t\t\tauto next = node->next.load(std::memory_order_relaxed);\n\t\t\tnode->~Node();\n\t\t\tidx = next;\n\t\t}\n\t}\n\t\n\t\n\ttemplate<typename U>\n\tinline bool enqueue(U&& item)\n\t{\n\t\tidx_t nodeIdx = allocate_node_for(std::forward<U>(item));\n\t\t\n\t\tauto tail_ = tail.load(std::memory_order_relaxed);\n\t\twhile (!tail.compare_exchange_weak(tail_, nodeIdx, std::memory_order_release, std::memory_order_relaxed))\n\t\t\tcontinue;\n\t\tget_node_at(tail_)->next.store(nodeIdx, std::memory_order_release);\n\t\t\n\t\treturn true;\n\t}\n\t\n\tinline bool try_dequeue(T& item)\n\t{\n\t\twhile (true) {\n\t\t\tauto rawHead_ = head.load(std::memory_order_acquire);\n\t\t\tauto head_ = clear_consumed_flag(rawHead_);\n\t\t\tauto headNode = get_node_at(head_);\n\t\t\tauto next = headNode->next.load(std::memory_order_relaxed);\n\t\t\tif (next == 0) {\n\t\t\t\t// Can't move head (that would make head null), but can try to dequeue the node at head anyway\n\t\t\t\tif (is_consumed(rawHead_)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (head.compare_exchange_strong(head_, set_consumed_flag(head_), std::memory_order_release, std::memory_order_relaxed)) {\n\t\t\t\t\t// Whee, we own the right to dequeue this item\n\t\t\t\t\titem = std::move(*headNode->item());\n\t\t\t\t\theadNode->item()->~T();\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Remove node whether it's already been consumed or not; if it hasn't been consumed, consume it!\n\t\t\t\t\n\t\t\t\t// head_->next can't possibly change, since once it's not null nobody writes to it (and ABA is avoided with versioning)\n\t\t\t\tif (head.compare_exchange_weak(rawHead_, next, std::memory_order_acq_rel, std::memory_order_relaxed)) {\n\t\t\t\t\t// Aha, we successfully moved the head. But does it have anything in it?\n\t\t\t\t\tif (!is_consumed(rawHead_)) {\n\t\t\t\t\t\titem = std::move(*headNode->item());\n\t\t\t\t\t\theadNode->item()->~T();\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tadd_node_to_free_list(head_, headNode);\n\t\t\t\t\t\n\t\t\t\t\tif (!is_consumed(rawHead_)) {\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t// Dummy token methods (not used)\n\tbool enqueue(producer_token_t const&, T const&) { return false; }\n\tbool try_enqueue(producer_token_t, T const&) { return false; }\n\tbool try_dequeue(consumer_token_t, T& item) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(It, size_t) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(producer_token_t const&, It, size_t) { return false; }\n\ttemplate<typename It> size_t try_dequeue_bulk(It, size_t) { return 0; }\n\ttemplate<typename It> size_t try_dequeue_bulk(consumer_token_t, It, size_t) { return 0; }\n\t\nprivate:\n\tstruct Node\n\t{\n\t\tstd::atomic<idx_t> next;\n\t\t\n\t\talignas(T)\n\t\tchar rawItem[sizeof(T)];\n\t\t\n\t\ttemplate<typename U>\n\t\tNode(U&& item)\n\t\t\t: next(0)\n\t\t{\n\t\t\tnew (this->item()) T(std::forward<U>(item));\n\t\t}\n\t\t\n\t\tNode()\n\t\t\t: next(0)\n\t\t{\n\t\t}\n\t\t\n\t\tinline T* item() { return reinterpret_cast<T*>(rawItem); }\n\t};\n\t\n\tstruct Block\n\t{\n\t\talignas(Node)\n\t\tchar nodes[sizeof(Node) * BLOCK_SIZE];\n\t\t\n\t\tinline char* node_pos(idx_t idx) { return nodes + ((idx & BLOCK_MASK) >> BLOCK_SHIFT) * sizeof(Node); }\n\t};\n\t\n\ttemplate<typename TSubBlock, int BlockSize>\n\tstruct HigherOrderBlock\n\t{\n\t\tstd::atomic<TSubBlock*> subblocks[BlockSize];\n\t\t\n\t\tHigherOrderBlock()\n\t\t{\n\t\t\tfor (int i = 0; i != BlockSize; ++i) {\n\t\t\t\tsubblocks[i].store(nullptr, std::memory_order_release);\n\t\t\t}\n\t\t}\n\t\t\n\t\t~HigherOrderBlock()\n\t\t{\n\t\t\tfor (int i = 0; i != BlockSize; ++i) {\n\t\t\t\tif (subblocks[i].load(std::memory_order_relaxed) != nullptr) {\n\t\t\t\t\tdelete subblocks[i].load(std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\t\n\ttypedef HigherOrderBlock<Block, SUPER_BLOCK_SIZE> SuperBlock;\n\ttypedef HigherOrderBlock<SuperBlock, ULTRA_BLOCK_SIZE> UltraBlock;\n\ttypedef HigherOrderBlock<UltraBlock, UBER_BLOCK_SIZE> UberBlock;\n\ttypedef HigherOrderBlock<UberBlock, UBER_BLOCKS> UberBlockContainer;\n\t\n\t\nprivate:\n\tinline idx_t set_consumed_flag(idx_t idx)\n\t{\n\t\treturn idx | (idx_t)1;\n\t}\n\t\n\tinline idx_t clear_consumed_flag(idx_t idx)\n\t{\n\t\treturn idx & ~(idx_t)1;\n\t}\n\t\n\tinline bool is_consumed(idx_t idx)\n\t{\n\t\treturn (idx & 1) != 0;\n\t}\n\t\n\t\n\tinline void add_node_to_free_list(idx_t idx, Node* node)\n\t{\n\t\tauto head = freeListHead.load(std::memory_order_relaxed);\n\t\tdo {\n\t\t\tnode->next.store(head, std::memory_order_relaxed);\n\t\t} while (!freeListHead.compare_exchange_weak(head, idx, std::memory_order_release, std::memory_order_relaxed));\n\t}\n\t\n\tinline idx_t try_get_node_from_free_list()\n\t{\n\t\tauto head = freeListHead.load(std::memory_order_acquire);\n\t\twhile (head != 0 && !freeListHead.compare_exchange_weak(head, get_node_at(head)->next.load(std::memory_order_relaxed), std::memory_order_acquire)) {\n\t\t\tcontinue;\n\t\t}\n\t\t\n\t\tif (head != 0) {\n\t\t\t// Increment version\n\t\t\thead = (head & ~VERSION_MASK) | ((head + VERSION_INCR) & VERSION_MASK);\n\t\t}\n\t\treturn head;\n\t}\n\t\n\t\n\tinline Node* get_node_at(idx_t idx)\n\t{\n\t\tauto uberBlock = uberBlockContainer.subblocks[(idx & UBER_BLOCK_IDX_MASK) >> UBER_BLOCK_IDX_SHIFT].load(std::memory_order_relaxed);\n\t\tauto ultraBlock = uberBlock->subblocks[(idx & UBER_BLOCK_MASK) >> UBER_BLOCK_SHIFT].load(std::memory_order_relaxed);\n\t\tauto superBlock = ultraBlock->subblocks[(idx & ULTRA_BLOCK_MASK) >> ULTRA_BLOCK_SHIFT].load(std::memory_order_relaxed);\n\t\tauto block = superBlock->subblocks[(idx & SUPER_BLOCK_MASK) >> SUPER_BLOCK_SHIFT].load(std::memory_order_relaxed);\n\t\treturn reinterpret_cast<Node*>(block->node_pos(idx));\n\t}\n\t\n\ttemplate<typename U>\n\tinline idx_t allocate_node_for(U&& item)\n\t{\n\t\tauto idx = try_get_node_from_free_list();\n\t\tif (idx != 0) {\n\t\t\tauto node = get_node_at(idx);\n\t\t\tnode->next.store(0, std::memory_order_relaxed);\n\t\t\tnew (node->item()) T(std::forward<U>(item));\n\t\t\treturn idx;\n\t\t}\n\t\tnew (new_node_address(idx)) Node(std::forward<U>(item));\n\t\treturn idx;\n\t}\n\t\n\tinline idx_t allocate_blank_node()\n\t{\n\t\tidx_t idx;\n\t\tnew (new_node_address(idx)) Node();\n\t\treturn idx;\n\t}\n\t\n\tinline char* new_node_address(idx_t& idx)\n\t{\n\t\tidx = nextNodeIdx.fetch_add(static_cast<idx_t>(1) << BLOCK_SHIFT, std::memory_order_relaxed);\n\t\t\n\t\tstd::size_t uberBlockContainerIdx = (idx & UBER_BLOCK_IDX_MASK) >> UBER_BLOCK_IDX_SHIFT;\n\t\tstd::size_t uberBlockIdx = (idx & UBER_BLOCK_MASK) >> UBER_BLOCK_SHIFT;\n\t\tstd::size_t ultraBlockIdx = (idx & ULTRA_BLOCK_MASK) >> ULTRA_BLOCK_SHIFT;\n\t\tstd::size_t superBlockIdx = (idx & SUPER_BLOCK_MASK) >> SUPER_BLOCK_SHIFT;\n\t\t\n\t\tauto uberBlock = lookup_subblock<UberBlockContainer, UberBlock>(&uberBlockContainer, uberBlockContainerIdx);\n\t\tauto ultraBlock = lookup_subblock<UberBlock, UltraBlock>(uberBlock, uberBlockIdx);\n\t\tauto superBlock = lookup_subblock<UltraBlock, SuperBlock>(ultraBlock, ultraBlockIdx);\n\t\tauto block = lookup_subblock<SuperBlock, Block>(superBlock, superBlockIdx);\n\t\treturn block->node_pos(idx);\n\t}\n\t\n\ttemplate<typename TBlock, typename TSubBlock>\n\tinline TSubBlock* lookup_subblock(TBlock* block, std::size_t idx)\n\t{\n\t\tauto ptr = block->subblocks[idx].load(std::memory_order_acquire);\n\t\tif (ptr == nullptr) {\n\t\t\tauto newBlock = new TSubBlock();\n\t\t\tif (!block->subblocks[idx].compare_exchange_strong(ptr, newBlock, std::memory_order_release, std::memory_order_acquire)) {\n\t\t\t\tdelete newBlock;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tptr = newBlock;\n\t\t\t}\n\t\t}\n\t\treturn ptr;\n\t}\n\t\nprivate:\n\tstd::atomic<idx_t> nextNodeIdx;\n\tstd::atomic<idx_t> head;\n\tstd::atomic<idx_t> tail;\n\tstd::atomic<idx_t> freeListHead;\n\t\n\tUberBlockContainer uberBlockContainer;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/stdqueue.h",
    "content": "// ©2014 Cameron Desrochers.\n\n#pragma once\n\n#include <queue>\n\n#include \"wrappers.h\"\n\n// Simple wrapper around std::queue (not thread safe)\ntemplate<typename T>\nclass StdQueueWrapper\n{\npublic:\n\ttypedef DummyToken producer_token_t;\n\ttypedef DummyToken consumer_token_t;\n\t\npublic:\n\ttemplate<typename U>\n\tinline bool enqueue(U&& item)\n\t{\n\t\tq.push(std::forward<U>(item));\n\t\treturn true;\n\t}\n\t\n\tinline bool try_dequeue(T& item)\n\t{\n\t\tif (q.empty()) {\n\t\t\treturn false;\n\t\t}\n\t\t\n\t\titem = std::move(q.front());\n\t\tq.pop();\n\t\treturn true;\n\t}\n\t\n\t// Dummy token methods (not used)\n\tbool enqueue(producer_token_t const&, T const&) { return false; }\n\tbool try_enqueue(producer_token_t, T const&) { return false; }\n\tbool try_dequeue(consumer_token_t, T& item) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(It, size_t) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(producer_token_t const&, It, size_t) { return false; }\n\ttemplate<typename It> size_t try_dequeue_bulk(It, size_t) { return 0; }\n\ttemplate<typename It> size_t try_dequeue_bulk(consumer_token_t, It, size_t) { return 0; }\n\nprivate:\n\tstd::queue<T> q;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/COPYING",
    "content": "\t\t    GNU GENERAL PUBLIC LICENSE\n\t\t       Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\t\t\t    Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users.  This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it.  (Some other Free Software Foundation software is covered by\nthe GNU Lesser General Public License instead.)  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n  To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have.  You must make sure that they, too, receive or can get the\nsource code.  And you must show them these terms so they know their\nrights.\n\n  We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n  Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware.  If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n  Finally, any free program is threatened constantly by software\npatents.  We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary.  To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n\t\t    GNU GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License.  The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage.  (Hereinafter, translation is included without limitation in\nthe term \"modification\".)  Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n  1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n  2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) You must cause the modified files to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    b) You must cause any work that you distribute or publish, that in\n    whole or in part contains or is derived from the Program or any\n    part thereof, to be licensed as a whole at no charge to all third\n    parties under the terms of this License.\n\n    c) If the modified program normally reads commands interactively\n    when run, you must cause it, when started running for such\n    interactive use in the most ordinary way, to print or display an\n    announcement including an appropriate copyright notice and a\n    notice that there is no warranty (or else, saying that you provide\n    a warranty) and that users may redistribute the program under\n    these conditions, and telling the user how to view a copy of this\n    License.  (Exception: if the Program itself is interactive but\n    does not normally print such an announcement, your work based on\n    the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n    a) Accompany it with the complete corresponding machine-readable\n    source code, which must be distributed under the terms of Sections\n    1 and 2 above on a medium customarily used for software interchange; or,\n\n    b) Accompany it with a written offer, valid for at least three\n    years, to give any third party, for a charge no more than your\n    cost of physically performing source distribution, a complete\n    machine-readable copy of the corresponding source code, to be\n    distributed under the terms of Sections 1 and 2 above on a medium\n    customarily used for software interchange; or,\n\n    c) Accompany it with the information you received as to the offer\n    to distribute corresponding source code.  (This alternative is\n    allowed only for noncommercial distribution and only if you\n    received the program in object code or executable form with such\n    an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it.  For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable.  However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License.  Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n  5. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n  6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n  7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded.  In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n  9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n  10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission.  For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this.  Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n\t\t\t    NO WARRANTY\n\n  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n\t\t     END OF TERMS AND CONDITIONS\n\n\t    How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software; you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation; either version 2 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License along\n    with this program; if not, write to the Free Software Foundation, Inc.,\n    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n    Gnomovision version 69, Copyright (C) year name of author\n    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n  `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n  <signature of Ty Coon>, 1 April 1989\n  Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs.  If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.\n----------------     END OF Gnu General Public License     ----------------\n\nThe source code of Threading Building Blocks is distributed under version 2\nof the GNU General Public License, with the so-called \"runtime exception,\"\nas follows (or see any header or implementation file):\n\n   As a special exception, you may use this file as part of a free software\n   library without restriction.  Specifically, if other files instantiate\n   templates or use macros or inline functions from this file, or you compile\n   this file and link it with other files to produce an executable, this\n   file does not by itself cause the resulting executable to be covered by\n   the GNU General Public License.  This exception does not however\n   invalidate any other reasons why the executable file might be covered by\n   the GNU General Public License.\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/README.txt",
    "content": "This is a partial copy of the Intel TBB open source version.\r\nThe version taken is 4.3, obtained from https://www.threadingbuildingblocks.org/download\r\nThe files in this directory consist of the files taken from src/tbb and include/tbb"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/aggregator.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__aggregator_H\n#define __TBB__aggregator_H\n\n#if !TBB_PREVIEW_AGGREGATOR\n#error Set TBB_PREVIEW_AGGREGATOR before including aggregator.h\n#endif\n\n#include \"atomic.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\nnamespace interface6 {\n\nusing namespace tbb::internal;\n\nclass aggregator_operation {\n    template<typename handler_type> friend class aggregator_ext;\n    uintptr_t status;\n    aggregator_operation* my_next;\npublic:\n    enum aggregator_operation_status { agg_waiting=0, agg_finished };\n    aggregator_operation() : status(agg_waiting), my_next(NULL) {}\n    /// Call start before handling this operation\n    void start() { call_itt_notify(acquired, &status); }\n    /// Call finish when done handling this operation\n    /** The operation will be released to its originating thread, and possibly deleted. */\n    void finish() { itt_store_word_with_release(status, uintptr_t(agg_finished)); }\n    aggregator_operation* next() { return itt_hide_load_word(my_next);}\n    void set_next(aggregator_operation* n) { itt_hide_store_word(my_next, n); }\n};\n\nnamespace internal {\n\nclass basic_operation_base : public aggregator_operation {\n    friend class basic_handler;\n    virtual void apply_body() = 0;\npublic:\n    basic_operation_base() : aggregator_operation() {}\n    virtual ~basic_operation_base() {}\n};\n\ntemplate<typename Body>\nclass basic_operation : public basic_operation_base, no_assign {\n    const Body& my_body;\n    /*override*/ void apply_body() { my_body(); }\npublic:\n    basic_operation(const Body& b) : basic_operation_base(), my_body(b) {}\n};\n\nclass basic_handler {\npublic:\n    basic_handler() {}\n    void operator()(aggregator_operation* op_list) const { \n        while (op_list) {\n            // ITT note: &(op_list->status) tag is used to cover accesses to the operation data.\n            // The executing thread \"acquires\" the tag (see start()) and then performs\n            // the associated operation w/o triggering a race condition diagnostics.\n            // A thread that created the operation is waiting for its status (see execute_impl()),\n            // so when this thread is done with the operation, it will \"release\" the tag \n            // and update the status (see finish()) to give control back to the waiting thread.\n            basic_operation_base& request = static_cast<basic_operation_base&>(*op_list);\n            // IMPORTANT: need to advance op_list to op_list->next() before calling request.finish()\n            op_list = op_list->next();\n            request.start();\n            request.apply_body();\n            request.finish();\n        }\n    }\n};\n\n} // namespace internal\n\n//! Aggregator base class and expert interface\n/** An aggregator for collecting operations coming from multiple sources and executing\n    them serially on a single thread. */\ntemplate <typename handler_type>\nclass aggregator_ext : tbb::internal::no_copy {\npublic:\n    aggregator_ext(const handler_type& h) : handler_busy(0), handle_operations(h) { mailbox = NULL; }\n\n    //! EXPERT INTERFACE: Enter a user-made operation into the aggregator's mailbox.\n    /** Details of user-made operations must be handled by user-provided handler */\n    void process(aggregator_operation *op) { execute_impl(*op); }\n\n protected:\n    /** Place operation in mailbox, then either handle mailbox or wait for the operation \n        to be completed by a different thread. */\n    void execute_impl(aggregator_operation& op) {\n        aggregator_operation* res;\n\n        // ITT note: &(op.status) tag is used to cover accesses to this operation. This\n        // thread has created the operation, and now releases it so that the handler\n        // thread may handle the associated operation w/o triggering a race condition;\n        // thus this tag will be acquired just before the operation is handled in the\n        // handle_operations functor.\n        call_itt_notify(releasing, &(op.status));\n        // insert the operation in the queue\n        do {\n            // ITT may flag the following line as a race; it is a false positive:\n            // This is an atomic read; we don't provide itt_hide_load_word for atomics\n            op.my_next = res = mailbox; // NOT A RACE \n        } while (mailbox.compare_and_swap(&op, res) != res);\n        if (!res) { // first in the list; handle the operations\n            // ITT note: &mailbox tag covers access to the handler_busy flag, which this\n            // waiting handler thread will try to set before entering handle_operations.\n            call_itt_notify(acquired, &mailbox);\n            start_handle_operations();\n            __TBB_ASSERT(op.status, NULL);\n        }\n        else { // not first; wait for op to be ready\n            call_itt_notify(prepare, &(op.status));\n            spin_wait_while_eq(op.status, uintptr_t(aggregator_operation::agg_waiting));\n            itt_load_word_with_acquire(op.status);\n        }\n    }\n\n\n private:\n    //! An atomically updated list (aka mailbox) of aggregator_operations\n    atomic<aggregator_operation *> mailbox;\n\n    //! Controls thread access to handle_operations\n    /** Behaves as boolean flag where 0=false, 1=true */\n    uintptr_t handler_busy;\n\n    handler_type handle_operations;\n\n    //! Trigger the handling of operations when the handler is free\n    void start_handle_operations() {\n        aggregator_operation *pending_operations;\n\n        // ITT note: &handler_busy tag covers access to mailbox as it is passed\n        // between active and waiting handlers.  Below, the waiting handler waits until\n        // the active handler releases, and the waiting handler acquires &handler_busy as\n        // it becomes the active_handler. The release point is at the end of this\n        // function, when all operations in mailbox have been handled by the\n        // owner of this aggregator.\n        call_itt_notify(prepare, &handler_busy);\n        // get handler_busy: only one thread can possibly spin here at a time\n        spin_wait_until_eq(handler_busy, uintptr_t(0));\n        call_itt_notify(acquired, &handler_busy);\n        // acquire fence not necessary here due to causality rule and surrounding atomics\n        __TBB_store_with_release(handler_busy, uintptr_t(1));\n\n        // ITT note: &mailbox tag covers access to the handler_busy flag itself. \n        // Capturing the state of the mailbox signifies that handler_busy has been \n        // set and a new active handler will now process that list's operations.\n        call_itt_notify(releasing, &mailbox);\n        // grab pending_operations\n        pending_operations = mailbox.fetch_and_store(NULL);\n\n        // handle all the operations\n        handle_operations(pending_operations);\n\n        // release the handler\n        itt_store_word_with_release(handler_busy, uintptr_t(0));\n    }\n};\n\n//! Basic aggregator interface\nclass aggregator : private aggregator_ext<internal::basic_handler> {\npublic:\n    aggregator() : aggregator_ext<internal::basic_handler>(internal::basic_handler()) {}\n    //! BASIC INTERFACE: Enter a function for exclusive execution by the aggregator.\n    /** The calling thread stores the function object in a basic_operation and\n        places the operation in the aggregator's mailbox */\n    template<typename Body>\n    void execute(const Body& b) {\n        internal::basic_operation<Body> op(b);\n        this->execute_impl(op);\n    }\n};\n\n} // namespace interface6\n\nusing interface6::aggregator;\nusing interface6::aggregator_ext;\nusing interface6::aggregator_operation;\n\n} // namespace tbb\n\n#endif  // __TBB__aggregator_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/aligned_space.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_aligned_space_H\n#define __TBB_aligned_space_H\n\n#include \"tbb_stddef.h\"\n#include \"tbb_machine.h\"\n\nnamespace tbb {\n\n//! Block of space aligned sufficiently to construct an array T with N elements.\n/** The elements are not constructed or destroyed by this class.\n    @ingroup memory_allocation */\ntemplate<typename T,size_t N=1>\nclass aligned_space {\nprivate:\n    typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type;\n    element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)];\npublic:\n    //! Pointer to beginning of array\n    T* begin() {return internal::punned_cast<T*>(this);}\n\n    //! Pointer to one past last element in array.\n    T* end() {return begin()+N;}\n};\n\n} // namespace tbb \n\n#endif /* __TBB_aligned_space_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/arena.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"scheduler.h\"\n#include \"governor.h\"\n#include \"arena.h\"\n#include \"itt_notify.h\"\n#include \"semaphore.h\"\n\n#include <functional>\n\n#if __TBB_STATISTICS_STDOUT\n#include <cstdio>\n#endif\n\nnamespace tbb {\nnamespace internal {\n\nvoid arena::process( generic_scheduler& s ) {\n    __TBB_ASSERT( is_alive(my_guard), NULL );\n    __TBB_ASSERT( governor::is_set(&s), NULL );\n    __TBB_ASSERT( !s.my_innermost_running_task, NULL );\n    __TBB_ASSERT( !s.my_dispatching_task, NULL );\n\n    __TBB_ASSERT( my_num_slots != 1, NULL );\n    // Start search for an empty slot from the one we occupied the last time\n    unsigned index = s.my_arena_index < my_num_slots ? s.my_arena_index : s.my_random.get() % (my_num_slots - 1) + 1,\n             end = index;\n    __TBB_ASSERT( index != 0, \"A worker cannot occupy slot 0\" );\n    __TBB_ASSERT( index < my_num_slots, NULL );\n\n    // Find a vacant slot\n    for ( ;; ) {\n        if ( !my_slots[index].my_scheduler && as_atomic(my_slots[index].my_scheduler).compare_and_swap(&s, NULL ) == NULL )\n            break;\n        if ( ++index == my_num_slots )\n            index = 1;\n        if ( index == end ) {\n            // Likely this arena is already saturated\n            goto quit;\n        }\n    }\n    ITT_NOTIFY(sync_acquired, my_slots + index);\n    s.my_arena = this;\n    s.my_arena_index = index;\n    s.my_arena_slot = my_slots + index;\n#if __TBB_TASK_PRIORITY\n    s.my_local_reload_epoch = *s.my_ref_reload_epoch;\n    __TBB_ASSERT( !s.my_offloaded_tasks, NULL );\n#endif /* __TBB_TASK_PRIORITY */\n    s.attach_mailbox( affinity_id(index+1) );\n\n    s.my_arena_slot->hint_for_pop  = index; // initial value for round-robin\n\n#if !__TBB_FP_CONTEXT\n    my_cpu_ctl_env.set_env();\n#endif\n\n#if __TBB_SCHEDULER_OBSERVER\n    __TBB_ASSERT( !s.my_last_local_observer, \"There cannot be notified local observers when entering arena\" );\n    my_observers.notify_entry_observers( s.my_last_local_observer, /*worker=*/true );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n    atomic_update( my_limit, index + 1, std::less<unsigned>() );\n\n    for ( ;; ) {\n        // Try to steal a task.\n        // Passing reference count is technically unnecessary in this context,\n        // but omitting it here would add checks inside the function.\n        __TBB_ASSERT( is_alive(my_guard), NULL );\n        task* t = s.receive_or_steal_task( s.my_dummy_task->prefix().ref_count, /*return_if_no_work=*/true );\n        if (t) {\n            // A side effect of receive_or_steal_task is that my_innermost_running_task can be set.\n            // But for the outermost dispatch loop of a worker it has to be NULL.\n            s.my_innermost_running_task = NULL;\n            __TBB_ASSERT( !s.my_dispatching_task, NULL );\n            s.local_wait_for_all(*s.my_dummy_task,t);\n        }\n        __TBB_ASSERT ( __TBB_load_relaxed(s.my_arena_slot->head) == __TBB_load_relaxed(s.my_arena_slot->tail),\n                       \"Worker cannot leave arena while its task pool is not empty\" );\n        __TBB_ASSERT( s.my_arena_slot->task_pool == EmptyTaskPool, \"Empty task pool is not marked appropriately\" );\n        // This check prevents relinquishing more than necessary workers because\n        // of the non-atomicity of the decision making procedure\n        if (num_workers_active() > my_num_workers_allotted)\n            break;\n    }\n#if __TBB_SCHEDULER_OBSERVER\n    my_observers.notify_exit_observers( s.my_last_local_observer, /*worker=*/true );\n    s.my_last_local_observer = NULL;\n#endif /* __TBB_SCHEDULER_OBSERVER */\n#if __TBB_TASK_PRIORITY\n    if ( s.my_offloaded_tasks )\n        orphan_offloaded_tasks( s );\n#endif /* __TBB_TASK_PRIORITY */\n#if __TBB_STATISTICS\n    ++s.my_counters.arena_roundtrips;\n    *my_slots[index].my_counters += s.my_counters;\n    s.my_counters.reset();\n#endif /* __TBB_STATISTICS */\n    __TBB_store_with_release( my_slots[index].my_scheduler, (generic_scheduler*)NULL );\n    s.my_arena_slot = 0; // detached from slot\n    s.my_inbox.detach();\n    __TBB_ASSERT( s.my_inbox.is_idle_state(true), NULL );\n    __TBB_ASSERT( !s.my_innermost_running_task, NULL );\n    __TBB_ASSERT( !s.my_dispatching_task, NULL );\n    __TBB_ASSERT( is_alive(my_guard), NULL );\nquit:\n    // In contrast to earlier versions of TBB (before 3.0 U5) now it is possible\n    // that arena may be temporarily left unpopulated by threads. See comments in\n    // arena::on_thread_leaving() for more details.\n#if !__TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    on_thread_leaving</*is_master*/false>();\n#endif /* !__TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n}\n\narena::arena ( market& m, unsigned max_num_workers ) {\n    __TBB_ASSERT( !my_guard, \"improperly allocated arena?\" );\n    __TBB_ASSERT( sizeof(my_slots[0]) % NFS_GetLineSize()==0, \"arena::slot size not multiple of cache line size\" );\n    __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, \"arena misaligned\" );\n#if __TBB_TASK_PRIORITY\n    __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority, \"New arena object is not zeroed\" );\n#endif /* __TBB_TASK_PRIORITY */\n    my_market = &m;\n    my_limit = 1;\n    // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks).\n    my_num_slots = num_slots_to_reserve(max_num_workers);\n    my_max_num_workers = max_num_workers;\n    my_references = 1; // accounts for the master\n#if __TBB_TASK_PRIORITY\n    my_bottom_priority = my_top_priority = normalized_normal_priority;\n#endif /* __TBB_TASK_PRIORITY */\n    my_aba_epoch = m.my_arenas_aba_epoch;\n#if __TBB_SCHEDULER_OBSERVER\n    my_observers.my_arena = this;\n#endif /* __TBB_SCHEDULER_OBSERVER */\n    __TBB_ASSERT ( my_max_num_workers < my_num_slots, NULL );\n    // Construct slots. Mark internal synchronization elements for the tools.\n    for( unsigned i = 0; i < my_num_slots; ++i ) {\n        __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );\n        __TBB_ASSERT( !my_slots[i].task_pool_ptr, NULL );\n        __TBB_ASSERT( !my_slots[i].my_task_pool_size, NULL );\n        ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);\n        mailbox(i+1).construct();\n        ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox);\n        my_slots[i].hint_for_pop = i;\n#if __TBB_STATISTICS\n        my_slots[i].my_counters = new ( NFS_Allocate(1, sizeof(statistics_counters), NULL) ) statistics_counters;\n#endif /* __TBB_STATISTICS */\n    }\n#if __TBB_TASK_PRIORITY\n    for ( intptr_t i = 0; i < num_priority_levels; ++i ) {\n        my_task_stream[i].initialize(my_num_slots);\n        ITT_SYNC_CREATE(my_task_stream + i, SyncType_Scheduler, SyncObj_TaskStream);\n    }\n#else /* !__TBB_TASK_PRIORITY */\n    my_task_stream.initialize(my_num_slots);\n    ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);\n#endif /* !__TBB_TASK_PRIORITY */\n    my_mandatory_concurrency = false;\n#if __TBB_TASK_GROUP_CONTEXT\n    // Context to be used by root tasks by default (if the user has not specified one).\n    // The arena's context should not capture fp settings for the sake of backward compatibility.\n    my_default_ctx =\n            new ( NFS_Allocate(1, sizeof(task_group_context), NULL) ) task_group_context(task_group_context::isolated, task_group_context::default_traits);\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n#if __TBB_FP_CONTEXT\n    my_default_ctx->capture_fp_settings();\n#else\n    my_cpu_ctl_env.get_env();\n#endif\n}\n\narena& arena::allocate_arena( market& m, unsigned max_num_workers ) {\n    __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), \"All arena data fields must go to arena_base\" );\n    __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, \"arena slots area misaligned: wrong padding\" );\n    __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, \"Mailbox padding is wrong\" );\n    size_t n = allocation_size(max_num_workers);\n    unsigned char* storage = (unsigned char*)NFS_Allocate( 1, n, NULL );\n    // Zero all slots to indicate that they are empty\n    memset( storage, 0, n );\n    return *new( storage + num_slots_to_reserve(max_num_workers) * sizeof(mail_outbox) ) arena(m, max_num_workers);\n}\n\nvoid arena::free_arena () {\n    __TBB_ASSERT( is_alive(my_guard), NULL );\n    __TBB_ASSERT( !my_references, \"There are threads in the dying arena\" );\n    __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted, \"Dying arena requests workers\" );\n    __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, \"Inconsistent state of a dying arena\" );\n#if !__TBB_STATISTICS_EARLY_DUMP\n    GATHER_STATISTIC( dump_arena_statistics() );\n#endif\n    poison_value( my_guard );\n    intptr_t drained = 0;\n    for ( unsigned i = 0; i < my_num_slots; ++i ) {\n        __TBB_ASSERT( !my_slots[i].my_scheduler, \"arena slot is not empty\" );\n#if !__TBB_TASK_ARENA\n        __TBB_ASSERT( my_slots[i].task_pool == EmptyTaskPool, NULL );\n#else\n        //TODO: understand the assertion and modify\n#endif\n        __TBB_ASSERT( my_slots[i].head == my_slots[i].tail, NULL ); // TODO: replace by is_quiescent_local_task_pool_empty\n        my_slots[i].free_task_pool();\n#if __TBB_STATISTICS\n        NFS_Free( my_slots[i].my_counters );\n#endif /* __TBB_STATISTICS */\n        drained += mailbox(i+1).drain();\n    }\n#if __TBB_TASK_PRIORITY && TBB_USE_ASSERT\n    for ( intptr_t i = 0; i < num_priority_levels; ++i )\n        __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, \"Not all enqueued tasks were executed\");\n#elif !__TBB_TASK_PRIORITY\n    __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, \"Not all enqueued tasks were executed\");\n#endif /* !__TBB_TASK_PRIORITY */\n#if __TBB_COUNT_TASK_NODES\n    my_market->update_task_node_count( -drained );\n#endif /* __TBB_COUNT_TASK_NODES */\n    my_market->release();\n#if __TBB_TASK_GROUP_CONTEXT\n    __TBB_ASSERT( my_default_ctx, \"Master thread never entered the arena?\" );\n    my_default_ctx->~task_group_context();\n    NFS_Free(my_default_ctx);\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n#if __TBB_SCHEDULER_OBSERVER\n    if ( !my_observers.empty() )\n        my_observers.clear();\n#endif /* __TBB_SCHEDULER_OBSERVER */\n    void* storage  = &mailbox(my_num_slots);\n    __TBB_ASSERT( my_references == 0, NULL );\n    __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );\n    this->~arena();\n#if TBB_USE_ASSERT > 1\n    memset( storage, 0, allocation_size(my_max_num_workers) );\n#endif /* TBB_USE_ASSERT */\n    NFS_Free( storage );\n}\n\n#if __TBB_STATISTICS\nvoid arena::dump_arena_statistics () {\n    statistics_counters total;\n    for( unsigned i = 0; i < my_num_slots; ++i ) {\n#if __TBB_STATISTICS_EARLY_DUMP\n        generic_scheduler* s = my_slots[i].my_scheduler;\n        if ( s )\n            *my_slots[i].my_counters += s->my_counters;\n#else\n        __TBB_ASSERT( !my_slots[i].my_scheduler, NULL );\n#endif\n        if ( i != 0 ) {\n            total += *my_slots[i].my_counters;\n            dump_statistics( *my_slots[i].my_counters, i );\n        }\n    }\n    dump_statistics( *my_slots[0].my_counters, 0 );\n#if __TBB_STATISTICS_STDOUT\n#if !__TBB_STATISTICS_TOTALS_ONLY\n    printf( \"----------------------------------------------\\n\" );\n#endif\n    dump_statistics( total, workers_counters_total );\n    total += *my_slots[0].my_counters;\n    dump_statistics( total, arena_counters_total );\n#if !__TBB_STATISTICS_TOTALS_ONLY\n    printf( \"==============================================\\n\" );\n#endif\n#endif /* __TBB_STATISTICS_STDOUT */\n}\n#endif /* __TBB_STATISTICS */\n\n#if __TBB_TASK_PRIORITY\n// The method inspects a scheduler to determine:\n// 1. if it has tasks that can be retrieved and executed (via the return value);\n// 2. if it has any tasks at all, including those of lower priority (via tasks_present);\n// 3. if it is able to work with enqueued tasks (via dequeuing_possible).\ninline bool arena::may_have_tasks ( generic_scheduler* s, bool& tasks_present, bool& dequeuing_possible ) {\n    if ( !s\n#if __TBB_TASK_ARENA\n            || s->my_arena != this\n#endif\n            ) return false;\n    dequeuing_possible |= s->worker_outermost_level();\n    if ( s->my_pool_reshuffling_pending ) {\n        // This primary task pool is nonempty and may contain tasks at the current\n        // priority level. Its owner is winnowing lower priority tasks at the moment.\n        tasks_present = true;\n        return true;\n    }\n    if ( s->my_offloaded_tasks ) {\n        tasks_present = true;\n        if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {\n            // This scheduler's offload area is nonempty and may contain tasks at the\n            // current priority level.\n            return true;\n        }\n    }\n    return false;\n}\n\nvoid arena::orphan_offloaded_tasks(generic_scheduler& s) {\n    __TBB_ASSERT( s.my_offloaded_tasks, NULL );\n    GATHER_STATISTIC( ++s.my_counters.prio_orphanings );\n    ++my_abandonment_epoch;\n    __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );\n    task* orphans;\n    do {\n        orphans = const_cast<task*>(my_orphaned_tasks);\n        *s.my_offloaded_task_list_tail_link = orphans;\n    } while ( as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );\n    s.my_offloaded_tasks = NULL;\n#if TBB_USE_ASSERT\n    s.my_offloaded_task_list_tail_link = NULL;\n#endif /* TBB_USE_ASSERT */\n}\n#endif /* __TBB_TASK_PRIORITY */\n\nbool arena::is_out_of_work() {\n    // TODO: rework it to return at least a hint about where a task was found; better if the task itself.\n    for(;;) {\n        pool_state_t snapshot = my_pool_state;\n        switch( snapshot ) {\n            case SNAPSHOT_EMPTY:\n                return true;\n            case SNAPSHOT_FULL: {\n                // Use unique id for \"busy\" in order to avoid ABA problems.\n                const pool_state_t busy = pool_state_t(&busy);\n                // Request permission to take snapshot\n                if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {\n                    // Got permission. Take the snapshot.\n                    // NOTE: This is not a lock, as the state can be set to FULL at\n                    //       any moment by a thread that spawns/enqueues new task.\n                    size_t n = my_limit;\n                    // Make local copies of volatile parameters. Their change during\n                    // snapshot taking procedure invalidates the attempt, and returns\n                    // this thread into the dispatch loop.\n#if __TBB_TASK_PRIORITY\n                    intptr_t top_priority = my_top_priority;\n                    uintptr_t reload_epoch = my_reload_epoch;\n                    // Inspect primary task pools first\n#endif /* __TBB_TASK_PRIORITY */\n                    size_t k;\n                    for( k=0; k<n; ++k ) {\n                        if( my_slots[k].task_pool != EmptyTaskPool &&\n                            __TBB_load_relaxed(my_slots[k].head) < __TBB_load_relaxed(my_slots[k].tail) )\n                        {\n                            // k-th primary task pool is nonempty and does contain tasks.\n                            break;\n                        }\n                        if( my_pool_state!=busy )\n                            return false; // the work was published\n                    }\n                    __TBB_ASSERT( k <= n, NULL );\n                    bool work_absent = k == n;\n#if __TBB_TASK_PRIORITY\n                    // Variable tasks_present indicates presence of tasks at any priority\n                    // level, while work_absent refers only to the current priority.\n                    bool tasks_present = !work_absent || my_orphaned_tasks;\n                    bool dequeuing_possible = false;\n                    if ( work_absent ) {\n                        // Check for the possibility that recent priority changes\n                        // brought some tasks to the current priority level\n\n                        uintptr_t abandonment_epoch = my_abandonment_epoch;\n                        // Master thread's scheduler needs special handling as it\n                        // may be destroyed at any moment (workers' schedulers are\n                        // guaranteed to be alive while at least one thread is in arena).\n                        // Have to exclude concurrency with task group state change propagation too.\n                        // TODO: check whether it is still necessary since some pools belong to slots now\n                        my_market->my_arenas_list_mutex.lock();\n                        generic_scheduler *s = my_slots[0].my_scheduler;\n                        if ( s && as_atomic(my_slots[0].my_scheduler).compare_and_swap(LockedMaster, s) == s ) { //TODO: remove need to lock\n                            __TBB_ASSERT( my_slots[0].my_scheduler == LockedMaster && s != LockedMaster, NULL );\n                            work_absent = !may_have_tasks( s, tasks_present, dequeuing_possible );\n                            __TBB_store_with_release( my_slots[0].my_scheduler, s );\n                        }\n                        my_market->my_arenas_list_mutex.unlock();\n                        // The following loop is subject to data races. While k-th slot's\n                        // scheduler is being examined, corresponding worker can either\n                        // leave to RML or migrate to another arena.\n                        // But the races are not prevented because all of them are benign.\n                        // First, the code relies on the fact that worker thread's scheduler\n                        // object persists until the whole library is deinitialized.\n                        // Second, in the worst case the races can only cause another\n                        // round of stealing attempts to be undertaken. Introducing complex\n                        // synchronization into this coldest part of the scheduler's control\n                        // flow does not seem to make sense because it both is unlikely to\n                        // ever have any observable performance effect, and will require\n                        // additional synchronization code on the hotter paths.\n                        for( k = 1; work_absent && k < n; ++k ) {\n                            if( my_pool_state!=busy )\n                                return false; // the work was published\n                            work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );\n                        }\n                        // Preclude premature switching arena off because of a race in the previous loop.\n                        work_absent = work_absent\n                                      && !__TBB_load_with_acquire(my_orphaned_tasks)\n                                      && abandonment_epoch == my_abandonment_epoch;\n                    }\n#endif /* __TBB_TASK_PRIORITY */\n                    // Test and test-and-set.\n                    if( my_pool_state==busy ) {\n#if __TBB_TASK_PRIORITY\n                        bool no_fifo_tasks = my_task_stream[top_priority].empty();\n                        work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)\n                                      && top_priority == my_top_priority && reload_epoch == my_reload_epoch;\n#else\n                        bool no_fifo_tasks = my_task_stream.empty();\n                        work_absent = work_absent && no_fifo_tasks;\n#endif /* __TBB_TASK_PRIORITY */\n                        if( work_absent ) {\n#if __TBB_TASK_PRIORITY\n                            if ( top_priority > my_bottom_priority ) {\n                                if ( my_market->lower_arena_priority(*this, top_priority - 1, reload_epoch)\n                                     && !my_task_stream[top_priority].empty() )\n                                {\n                                    atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());\n                                }\n                            }\n                            else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {\n#endif /* __TBB_TASK_PRIORITY */\n                                // save current demand value before setting SNAPSHOT_EMPTY,\n                                // to avoid race with advertise_new_work.\n                                int current_demand = (int)my_max_num_workers;\n                                if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {\n                                    // This thread transitioned pool to empty state, and thus is\n                                    // responsible for telling RML that there is no other work to do.\n                                    my_market->adjust_demand( *this, -current_demand );\n#if __TBB_TASK_PRIORITY\n                                    // Check for the presence of enqueued tasks \"lost\" on some of\n                                    // priority levels because updating arena priority and switching\n                                    // arena into \"populated\" (FULL) state happen non-atomically.\n                                    // Imposing atomicity would require task::enqueue() to use a lock,\n                                    // which is unacceptable.\n                                    bool switch_back = false;\n                                    for ( int p = 0; p < num_priority_levels; ++p ) {\n                                        if ( !my_task_stream[p].empty() ) {\n                                            switch_back = true;\n                                            if ( p < my_bottom_priority || p > my_top_priority )\n                                                my_market->update_arena_priority(*this, p);\n                                        }\n                                    }\n                                    if ( switch_back )\n                                        advertise_new_work</*Spawned*/false>();\n#endif /* __TBB_TASK_PRIORITY */\n                                    return true;\n                                }\n                                return false;\n#if __TBB_TASK_PRIORITY\n                            }\n#endif /* __TBB_TASK_PRIORITY */\n                        }\n                        // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it.\n                        my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );\n                    }\n                }\n                return false;\n            }\n            default:\n                // Another thread is taking a snapshot.\n                return false;\n        }\n    }\n}\n\n#if __TBB_COUNT_TASK_NODES\nintptr_t arena::workers_task_node_count() {\n    intptr_t result = 0;\n    for( unsigned i = 1; i < my_num_slots; ++i ) {\n        generic_scheduler* s = my_slots[i].my_scheduler;\n        if( s )\n            result += s->my_task_node_count;\n    }\n    return result;\n}\n#endif /* __TBB_COUNT_TASK_NODES */\n\nvoid arena::enqueue_task( task& t, intptr_t prio, FastRandom &random )\n{\n#if __TBB_RECYCLE_TO_ENQUEUE\n    __TBB_ASSERT( t.state()==task::allocated || t.state()==task::to_enqueue, \"attempt to enqueue task with inappropriate state\" );\n#else\n    __TBB_ASSERT( t.state()==task::allocated, \"attempt to enqueue task that is not in 'allocated' state\" );\n#endif\n    t.prefix().state = task::ready;\n    t.prefix().extra_state |= es_task_enqueued; // enqueued task marker\n\n#if TBB_USE_ASSERT\n    if( task* parent = t.parent() ) {\n        internal::reference_count ref_count = parent->prefix().ref_count;\n        __TBB_ASSERT( ref_count!=0, \"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)\" );\n        __TBB_ASSERT( ref_count>0, \"attempt to enqueue task whose parent has a ref_count<0\" );\n        parent->prefix().extra_state |= es_ref_count_active;\n    }\n    __TBB_ASSERT(t.prefix().affinity==affinity_id(0), \"affinity is ignored for enqueued tasks\");\n#endif /* TBB_USE_ASSERT */\n\n#if __TBB_TASK_PRIORITY\n    intptr_t p = prio ? normalize_priority(priority_t(prio)) : normalized_normal_priority;\n    assert_priority_valid(p);\n    task_stream &ts = my_task_stream[p];\n#else /* !__TBB_TASK_PRIORITY */\n    __TBB_ASSERT_EX(prio == 0, \"the library is not configured to respect the task priority\");\n    task_stream &ts = my_task_stream;\n#endif /* !__TBB_TASK_PRIORITY */\n    ITT_NOTIFY(sync_releasing, &ts);\n    ts.push( &t, random );\n#if __TBB_TASK_PRIORITY\n    if ( p != my_top_priority )\n        my_market->update_arena_priority( *this, p );\n#endif /* __TBB_TASK_PRIORITY */\n    advertise_new_work< /*Spawned=*/ false >();\n#if __TBB_TASK_PRIORITY\n    if ( p != my_top_priority )\n        my_market->update_arena_priority( *this, p );\n#endif /* __TBB_TASK_PRIORITY */\n}\n\n#if __TBB_TASK_ARENA\nstruct nested_arena_context : no_copy {\n    generic_scheduler &my_scheduler;\n    scheduler_state const my_orig_state;\n    void *my_orig_ptr;\n    bool my_adjusting;\n    nested_arena_context(generic_scheduler *s, arena* a, bool needs_adjusting, bool as_worker = false)\n        : my_scheduler(*s), my_orig_state(*s), my_orig_ptr(NULL), my_adjusting(needs_adjusting) {\n        s->nested_arena_entry(a, *this, as_worker);\n    }\n    ~nested_arena_context() {\n        my_scheduler.nested_arena_exit(*this);\n        (scheduler_state&)my_scheduler = my_orig_state; // restore arena settings\n    }\n};\n\nvoid generic_scheduler::nested_arena_entry(arena* a, nested_arena_context& c, bool as_worker) {\n    if( a == my_arena ) {\n#if __TBB_TASK_GROUP_CONTEXT\n        c.my_orig_ptr = my_innermost_running_task =\n                new(&allocate_task(sizeof(empty_task), NULL, a->my_default_ctx)) empty_task;\n#endif\n        return;\n    }\n    __TBB_ASSERT( is_alive(a->my_guard), NULL );\n    // overwrite arena settings\n#if __TBB_TASK_PRIORITY\n    if ( my_offloaded_tasks )\n        my_arena->orphan_offloaded_tasks( *this );\n    my_ref_top_priority = &a->my_top_priority;\n    my_ref_reload_epoch = &a->my_reload_epoch;\n    my_local_reload_epoch = a->my_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n    my_arena = a;\n    my_arena_index = 0;\n    my_arena_slot = my_arena->my_slots + my_arena_index;\n    my_inbox.detach(); // TODO: mailboxes were not designed for switching, add copy constructor?\n    attach_mailbox( affinity_id(my_arena_index+1) );\n    my_innermost_running_task = my_dispatching_task = as_worker? NULL : my_dummy_task;\n#if __TBB_TASK_GROUP_CONTEXT\n    // save dummy's context and replace it by arena's context\n    c.my_orig_ptr = my_dummy_task->prefix().context;\n    my_dummy_task->prefix().context = a->my_default_ctx;\n#endif\n#if __TBB_ARENA_OBSERVER\n    my_last_local_observer = 0; // TODO: try optimize number of calls\n    my_arena->my_observers.notify_entry_observers( my_last_local_observer, /*worker=*/false );\n#endif\n    // TODO? ITT_NOTIFY(sync_acquired, a->my_slots + index);\n    // TODO: it requires market to have P workers (not P-1)\n    // TODO: it still allows temporary oversubscription by 1 worker (due to my_max_num_workers)\n    // TODO: a preempted worker should be excluded from assignment to other arenas e.g. my_slack--\n    if( c.my_adjusting ) my_arena->my_market->adjust_demand(*my_arena, -1);\n}\n\nvoid generic_scheduler::nested_arena_exit(nested_arena_context& c) {\n    if( my_arena == c.my_orig_state.my_arena ) {\n#if __TBB_TASK_GROUP_CONTEXT\n        free_task<small_local_task>(*(task*)c.my_orig_ptr); // TODO: use scoped_task instead?\n#endif\n        return;\n    }\n    if( c.my_adjusting ) my_arena->my_market->adjust_demand(*my_arena, 1);\n#if __TBB_ARENA_OBSERVER\n    my_arena->my_observers.notify_exit_observers( my_last_local_observer, /*worker=*/false );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#if __TBB_TASK_PRIORITY\n    if ( my_offloaded_tasks )\n        my_arena->orphan_offloaded_tasks( *this );\n    my_local_reload_epoch = *c.my_orig_state.my_ref_reload_epoch;\n    while ( as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap( NULL, this) != this )\n        __TBB_Yield(); // TODO: task priority can use master slot for locking while accessing the scheduler\n#else\n    // Free the master slot. TODO: support multiple masters\n    __TBB_store_with_release(my_arena->my_slots[0].my_scheduler, (generic_scheduler*)NULL);\n#endif\n    my_arena->my_exit_monitors.notify_all_relaxed(); // TODO: fix concurrent monitor to use notify_one (test MultipleMastersPart4 fails)\n#if __TBB_TASK_GROUP_CONTEXT\n    // restore context of dummy task\n    my_dummy_task->prefix().context = (task_group_context*)c.my_orig_ptr;\n#endif\n}\n\nvoid generic_scheduler::wait_until_empty() {\n    my_dummy_task->prefix().ref_count++; // prevents exit from local_wait_for_all when local work is done enforcing the stealing\n    while( my_arena->my_pool_state != arena::SNAPSHOT_EMPTY )\n        local_wait_for_all(*my_dummy_task, NULL);\n    my_dummy_task->prefix().ref_count--;\n}\n\n#endif /* __TBB_TASK_ARENA */\n\n} // namespace internal\n} // namespace tbb\n\n#if __TBB_TASK_ARENA\n#include \"scheduler_utility.h\"\n\nnamespace tbb {\nnamespace interface7 {\nnamespace internal {\n\nvoid task_arena_base::internal_initialize( ) {\n    __TBB_ASSERT( my_master_slots <= 1, \"Number of slots reserved for master can be only [0,1]\");\n    if( my_master_slots > 1 ) my_master_slots = 1; // TODO: make more masters\n    if( my_max_concurrency < 1 )\n        my_max_concurrency = (int)governor::default_num_threads();\n    // TODO: reimplement in an efficient way. We need a scheduler instance in this thread\n    // but the scheduler is only required for task allocation and fifo random seeds until\n    // master wants to join the arena. (Idea - to create a restricted specialization)\n    // It is excessive to create an implicit arena for master here anyway. But scheduler\n    // instance implies master thread to be always connected with arena.\n    // browse recursively into init_scheduler and arena::process for details\n    if( !governor::local_scheduler_if_initialized() )\n        governor::init_scheduler( (unsigned)my_max_concurrency - my_master_slots + 1/*TODO: address in market instead*/, 0, true );\n    // TODO: we will need to introduce a mechanism for global settings, including stack size, used by all arenas\n    arena* new_arena = &market::create_arena( my_max_concurrency - my_master_slots/*it's +1 slot for num_masters=0*/, ThreadStackSize );\n    if(as_atomic(my_arena).compare_and_swap(new_arena, NULL) != NULL) { // there is a race possible on my_initialized\n        __TBB_ASSERT(my_arena, NULL);                             // other thread was the first\n        new_arena->on_thread_leaving</*is_master*/true>(); // deallocate new arena\n    }\n#if __TBB_TASK_GROUP_CONTEXT\n    else {\n        my_context = new_arena->my_default_ctx;\n        my_context->my_version_and_traits |= my_version_and_traits & exact_exception_flag;\n    }\n#endif\n}\n\nvoid task_arena_base::internal_terminate( ) {\n    if( my_arena ) {// task_arena was initialized\n#if __TBB_STATISTICS_EARLY_DUMP\n        GATHER_STATISTIC( my_arena->dump_arena_statistics() );\n#endif\n        my_arena->on_thread_leaving</*is_master*/true>();\n        my_arena = 0;\n#if __TBB_TASK_GROUP_CONTEXT\n        my_context = 0;\n#endif\n    }\n}\n\nvoid task_arena_base::internal_enqueue( task& t, intptr_t prio ) const {\n    __TBB_ASSERT(my_arena, NULL);\n    generic_scheduler* s = governor::local_scheduler_if_initialized();\n    __TBB_ASSERT(s, \"Scheduler is not initialized\"); // we allocated a task so can expect the scheduler\n#if __TBB_TASK_GROUP_CONTEXT\n    __TBB_ASSERT(my_arena->my_default_ctx == t.prefix().context, NULL);\n    __TBB_ASSERT(!my_arena->my_default_ctx->is_group_execution_cancelled(), // TODO: any better idea?\n                 \"The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?\");\n#endif\n    my_arena->enqueue_task( t, prio, s->my_random );\n}\n\nclass delegated_task : public task {\n    internal::delegate_base & my_delegate;\n    concurrent_monitor & my_monitor;\n    task * my_root;\n    /*override*/ task* execute() {\n        generic_scheduler& s = *(generic_scheduler*)prefix().owner;\n        __TBB_ASSERT(s.worker_outermost_level() || s.master_outermost_level(), \"expected to be enqueued and received on the outermost level\");\n        // but this task can mimics outermost level, detect it\n        if( s.master_outermost_level() && s.my_dummy_task->state() == task::executing ) {\n#if TBB_USE_EXCEPTIONS\n            // RTTI is available, check whether the cast is valid\n            __TBB_ASSERT(dynamic_cast<delegated_task*>(s.my_dummy_task), 0);\n#endif\n            set_ref_count(1); // required by the semantics of recycle_to_enqueue()\n            recycle_to_enqueue();\n            return NULL;\n        }\n        struct outermost_context : internal::no_copy {\n            delegated_task * t;\n            generic_scheduler & s;\n            task * orig_dummy;\n            task_group_context * orig_ctx;\n            outermost_context(delegated_task *_t, generic_scheduler &_s) : t(_t), s(_s) {\n                orig_dummy = s.my_dummy_task;\n#if __TBB_TASK_GROUP_CONTEXT\n                orig_ctx = t->prefix().context;\n                t->prefix().context = s.my_arena->my_default_ctx;\n#endif\n                s.my_dummy_task = t; // mimics outermost master\n                __TBB_ASSERT(s.my_innermost_running_task == t, NULL);\n            }\n            ~outermost_context() {\n                s.my_dummy_task = orig_dummy;\n#if TBB_USE_EXCEPTIONS\n                // restore context for sake of registering potential exception\n                t->prefix().context = orig_ctx;\n#endif\n            }\n        } scope(this, s);\n        my_delegate();\n        return NULL;\n    }\n    ~delegated_task() {\n        // potential exception was already registered. It must happen before the notification\n        __TBB_ASSERT(my_root->ref_count()==2, NULL);\n        __TBB_store_with_release(my_root->prefix().ref_count, 1); // must precede the wakeup\n        my_monitor.notify_relaxed(*this);\n    }\npublic:\n    delegated_task( internal::delegate_base & d, concurrent_monitor & s, task * t )\n        : my_delegate(d), my_monitor(s), my_root(t) {}\n    // predicate for concurrent_monitor notification\n    bool operator()(uintptr_t ctx) const { return (void*)ctx == (void*)&my_delegate; }\n};\n\nvoid task_arena_base::internal_execute( internal::delegate_base& d) const {\n    __TBB_ASSERT(my_arena, NULL);\n    generic_scheduler* s = governor::local_scheduler();\n    __TBB_ASSERT(s, \"Scheduler is not initialized\");\n    // TODO: is it safe to assign slot to a scheduler which is not yet switched?\n    // TODO TEMP: one master, make more masters\n    if( s->my_arena == my_arena || (!__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler)\n            && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL ) == NULL) ) {\n        cpu_ctl_env_helper cpu_ctl_helper;\n        cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(my_context) );\n#if TBB_USE_EXCEPTIONS\n        try {\n#endif\n        //TODO: replace dummy tasks for workers as well to avoid using of the_dummy_context\n        nested_arena_context scope(s, my_arena, !my_master_slots);\n        d();\n#if TBB_USE_EXCEPTIONS\n        } catch(...) {\n            cpu_ctl_helper.restore_default(); // TODO: is it needed on Windows?\n            if( my_version_and_traits & exact_exception_flag ) throw;\n            else {\n                task_group_context exception_container( task_group_context::isolated,\n                    task_group_context::default_traits & ~task_group_context::exact_exception );\n                exception_container.register_pending_exception();\n                __TBB_ASSERT(exception_container.my_exception, NULL);\n                exception_container.my_exception->throw_self();\n            }\n        }\n#endif\n    } else {\n        concurrent_monitor::thread_context waiter;\n#if __TBB_TASK_GROUP_CONTEXT\n        task_group_context exec_context( task_group_context::isolated, my_version_and_traits & exact_exception_flag );\n#if __TBB_FP_CONTEXT\n        exec_context.copy_fp_settings( *my_context );\n#endif\n#endif\n        auto_empty_task root(__TBB_CONTEXT_ARG(s, &exec_context));\n        root.prefix().ref_count = 2;\n        my_arena->enqueue_task( *new( task::allocate_root(__TBB_CONTEXT_ARG1(exec_context)) )\n                                delegated_task(d, my_arena->my_exit_monitors, &root),\n                                0, s->my_random ); // TODO: priority?\n        do {\n            my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&d);\n            if( __TBB_load_with_acquire(root.prefix().ref_count) < 2 ) {\n                my_arena->my_exit_monitors.cancel_wait(waiter);\n                break;\n            }\n            else if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO: refactor into a function?\n                    && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL ) == NULL ) {\n                my_arena->my_exit_monitors.cancel_wait(waiter);\n                nested_arena_context scope(s, my_arena, !my_master_slots);\n                s->local_wait_for_all(root, NULL);\n#if TBB_USE_EXCEPTIONS\n                __TBB_ASSERT( !exec_context.my_exception, NULL ); // exception can be thrown above, not deferred\n#endif\n                __TBB_ASSERT( root.prefix().ref_count == 0, NULL );\n                break;\n            } else {\n                my_arena->my_exit_monitors.commit_wait(waiter);\n            }\n        } while( __TBB_load_with_acquire(root.prefix().ref_count) == 2 );\n#if TBB_USE_EXCEPTIONS\n        // process possible exception\n        if( task_group_context::exception_container_type *pe = exec_context.my_exception )\n            pe->throw_self();\n#endif\n    }\n}\n\n// this wait task is a temporary approach to wait for arena emptiness for masters without slots\n// TODO: it will be rather reworked for one source of notification from is_out_of_work\nclass wait_task : public task {\n    binary_semaphore & my_signal;\n    /*override*/ task* execute() {\n        generic_scheduler* s = governor::local_scheduler_if_initialized();\n        __TBB_ASSERT( s, NULL );\n        if( s->my_arena_index && s->worker_outermost_level() ) {// on outermost level of workers only\n            s->local_wait_for_all( *s->my_dummy_task, NULL ); // run remaining tasks\n        } else s->my_arena->is_out_of_work(); // avoids starvation of internal_wait: issuing this task makes arena full\n        my_signal.V();\n        return NULL;\n    }\npublic:\n    wait_task ( binary_semaphore & sema ) : my_signal(sema) {}\n};\n\nvoid task_arena_base::internal_wait() const {\n    __TBB_ASSERT(my_arena, NULL);\n    generic_scheduler* s = governor::local_scheduler();\n    __TBB_ASSERT(s, \"Scheduler is not initialized\");\n    __TBB_ASSERT(s->my_arena != my_arena || s->my_arena_index == 0, \"task_arena::wait_until_empty() is not supported within a worker context\" );\n    if( s->my_arena == my_arena ) {\n        //unsupported, but try do something for outermost master\n        __TBB_ASSERT(s->master_outermost_level(), \"unsupported\");\n        if( !s->my_arena_index )\n            while( my_arena->num_workers_active() )\n                s->wait_until_empty();\n    } else for(;;) {\n        while( my_arena->my_pool_state != arena::SNAPSHOT_EMPTY ) {\n            if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO TEMP: one master, make more masters\n                && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL) == NULL ) {\n                nested_arena_context a(s, my_arena, !my_master_slots, true);\n                s->wait_until_empty();\n            } else {\n                binary_semaphore waiter; // TODO: replace by a single event notification from is_out_of_work\n                internal_enqueue( *new( task::allocate_root(__TBB_CONTEXT_ARG1(*my_context)) ) wait_task(waiter), 0 ); // TODO: priority?\n                waiter.P(); // TODO: concurrent_monitor\n            }\n        }\n        if( !my_arena->num_workers_active() && !my_arena->my_slots[0].my_scheduler) // no activity\n            break; // spin until workers active but avoid spinning in a worker\n        __TBB_Yield(); // wait until workers and master leave\n    }\n}\n\n/*static*/ int task_arena_base::internal_current_slot() {\n    generic_scheduler* s = governor::local_scheduler_if_initialized();\n    return s? int(s->my_arena_index) : -1;\n}\n\n\n} // tbb::interfaceX::internal\n} // tbb::interfaceX\n} // tbb\n#endif /* __TBB_TASK_ARENA */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/arena.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_arena_H\n#define _TBB_arena_H\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/atomic.h\"\n\n#include \"tbb/tbb_machine.h\"\n\n#include \"scheduler_common.h\"\n#include \"intrusive_list.h\"\n#include \"task_stream.h\"\n#include \"../rml/include/rml_tbb.h\"\n#include \"mailbox.h\"\n#include \"observer_proxy.h\"\n#include \"market.h\"\n#include \"governor.h\"\n#if __TBB_TASK_ARENA\n#include \"concurrent_monitor.h\"\n#endif\n\nnamespace tbb {\n\nclass task_group_context;\nclass allocate_root_with_context_proxy;\n\nnamespace internal {\n\n//! arena data except the array of slots\n/** Separated in order to simplify padding. \n    Intrusive list node base class is used by market to form a list of arenas. **/\nstruct arena_base : padded<intrusive_list_node> {\n    //! Number of workers that have been marked out by the resource manager to service the arena\n    unsigned my_num_workers_allotted;   // heavy use in stealing loop\n\n    //! References of the arena\n    /** Counts workers and master references separately. Bit 0 indicates reference from implicit\n        master or explicit task_arena; the next bits contain number of workers servicing the arena.*/\n    atomic<unsigned> my_references;     // heavy use in stealing loop\n\n#if __TBB_TASK_PRIORITY\n    //! Highest priority of recently spawned or enqueued tasks.\n    volatile intptr_t my_top_priority;  // heavy use in stealing loop\n\n    //! Maximal currently busy slot.\n    atomic<unsigned> my_limit;          // heavy use in stealing loop\n\n    //! Task pool for the tasks scheduled via task::enqueue() method\n    /** Such scheduling guarantees eventual execution even if\n        - new tasks are constantly coming (by extracting scheduled tasks in\n          relaxed FIFO order);\n        - the enqueuing thread does not call any of wait_for_all methods. **/\n    task_stream my_task_stream[num_priority_levels]; // heavy use in stealing loop\n#else /* !__TBB_TASK_PRIORITY */\n    //! Task pool for the tasks scheduled via task::enqueue() method\n    /** Such scheduling guarantees eventual execution even if\n        - new tasks are constantly coming (by extracting scheduled tasks in\n          relaxed FIFO order);\n        - the enqueuing thread does not call any of wait_for_all methods. **/\n    task_stream my_task_stream;         // heavy use in stealing loop\n\n    //! Maximal currently busy slot.\n    atomic<unsigned> my_limit;          // heavy use in stealing loop\n#endif /* !__TBB_TASK_PRIORITY */\n\n    //! Number of workers that are currently requested from the resource manager\n    int my_num_workers_requested;\n\n    //! Number of slots in the arena\n    unsigned my_num_slots;\n\n    //! Number of workers requested by the master thread owning the arena\n    unsigned my_max_num_workers;\n\n    //! Market owning this arena\n    market* my_market;\n\n    //! ABA prevention marker\n    uintptr_t my_aba_epoch;\n\n#if !__TBB_FP_CONTEXT\n    //! FPU control settings of arena's master thread captured at the moment of arena instantiation.\n    __TBB_cpu_ctl_env_t my_cpu_ctl_env;\n#endif\n\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    int my_num_workers_present;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n\n    //! Current task pool state and estimate of available tasks amount.\n    /** The estimate is either 0 (SNAPSHOT_EMPTY) or infinity (SNAPSHOT_FULL). \n        Special state is \"busy\" (any other unsigned value). \n        Note that the implementation of arena::is_busy_or_empty() requires \n        my_pool_state to be unsigned. */\n    tbb::atomic<uintptr_t> my_pool_state;\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Default task group context.\n    /** Used by root tasks allocated directly by the master thread (not from inside\n        a TBB task) without explicit context specification. **/\n    task_group_context* my_default_ctx;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#if __TBB_SCHEDULER_OBSERVER\n    //! List of local observers attached to this arena.\n    observer_list my_observers;\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#if __TBB_TASK_PRIORITY\n    //! Lowest normalized priority of available spawned or enqueued tasks.\n    intptr_t my_bottom_priority;\n\n    //! Tracks events that may bring tasks in offload areas to the top priority level.\n    /** Incremented when arena top priority changes or a task group priority\n        is elevated to the current arena's top level. **/\n    uintptr_t my_reload_epoch;\n\n    //! List of offloaded tasks abandoned by workers revoked by the market\n    task* my_orphaned_tasks;\n\n    //! Counter used to track the occurrence of recent orphaning and re-sharing operations.\n    tbb::atomic<uintptr_t> my_abandonment_epoch;\n\n    //! Highest priority level containing enqueued tasks\n    /** It being greater than 0 means that high priority enqueued tasks had to be\n        bypassed because all workers were blocked in nested dispatch loops and\n        were unable to progress at then current priority level. **/\n    tbb::atomic<intptr_t> my_skipped_fifo_priority;\n#endif /* !__TBB_TASK_PRIORITY */\n\n    //! Indicates if there is an oversubscribing worker created to service enqueued tasks.\n    bool my_mandatory_concurrency;\n\n#if __TBB_TASK_ARENA\n    //! exit notifications after arena slot is released\n    concurrent_monitor my_exit_monitors;\n#endif\n\n#if TBB_USE_ASSERT\n    //! Used to trap accesses to the object after its destruction.\n    uintptr_t my_guard;\n#endif /* TBB_USE_ASSERT */\n}; // struct arena_base\n\nclass arena\n#if (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__==0) && !__INTEL_COMPILER\n    : public padded<arena_base>\n#else\n    : private padded<arena_base>\n#endif\n{\nprivate:\n    friend class generic_scheduler;\n    template<typename SchedulerTraits> friend class custom_scheduler;\n    friend class governor;\n    friend class task_scheduler_observer_v3;\n    friend class market;\n    friend class tbb::task;\n    friend class tbb::task_group_context;\n    friend class allocate_root_with_context_proxy;\n    friend class intrusive_list<arena>;\n    friend class interface7::internal::task_arena_base; // declared in scheduler_common.h\n    friend class interface7::internal::delegated_task;\n    friend class interface7::internal::wait_task;\n\n    typedef padded<arena_base> base_type;\n\n    //! Constructor\n    arena ( market&, unsigned max_num_workers );\n\n    //! Allocate an instance of arena.\n    static arena& allocate_arena( market&, unsigned max_num_workers );\n\n    static int unsigned num_slots_to_reserve ( unsigned max_num_workers ) {\n        return max(2u, max_num_workers + 1);\n    }\n\n    static int allocation_size ( unsigned max_num_workers ) {\n        return sizeof(base_type) + num_slots_to_reserve(max_num_workers) * (sizeof(mail_outbox) + sizeof(arena_slot));\n    }\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Finds all contexts affected by the state change and propagates the new state to them.\n    /** The propagation is relayed to the market because tasks created by one \n        master thread can be passed to and executed by other masters. This means \n        that context trees can span several arenas at once and thus state change\n        propagation cannot be generally localized to one arena only. **/\n    template <typename T>\n    bool propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    //! Get reference to mailbox corresponding to given affinity_id.\n    mail_outbox& mailbox( affinity_id id ) {\n        __TBB_ASSERT( 0<id, \"affinity id must be positive integer\" );\n        __TBB_ASSERT( id <= my_num_slots, \"affinity id out of bounds\" );\n\n        return ((mail_outbox*)this)[-(int)id];\n    }\n\n    //! Completes arena shutdown, destructs and deallocates it.\n    void free_arena ();\n\n    typedef uintptr_t pool_state_t;\n\n    //! No tasks to steal since last snapshot was taken\n    static const pool_state_t SNAPSHOT_EMPTY = 0;\n\n    //! At least one task has been offered for stealing since the last snapshot started\n    static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1);\n\n    //! No tasks to steal or snapshot is being taken.\n    static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; }\n\n    //! The number of workers active in the arena.\n    unsigned num_workers_active( ) {\n        return my_references >> 1;\n    }\n\n    //! If necessary, raise a flag that there is new job in arena.\n    template<bool Spawned> void advertise_new_work();\n\n    //! Check if there is job anywhere in arena.\n    /** Return true if no job or if arena is being cleaned up. */\n    bool is_out_of_work();\n\n    //! enqueue a task into starvation-resistance queue\n    void enqueue_task( task&, intptr_t, FastRandom & );\n\n    //! Registers the worker with the arena and enters TBB scheduler dispatch loop\n    void process( generic_scheduler& );\n\n    //! Notification that worker or master leaves its arena\n    template<bool is_master>\n    inline void on_thread_leaving ( );\n\n#if __TBB_STATISTICS\n    //! Outputs internal statistics accumulated by the arena\n    void dump_arena_statistics ();\n#endif /* __TBB_STATISTICS */\n\n#if __TBB_TASK_PRIORITY\n    //! Check if recent priority changes may bring some tasks to the current priority level soon\n    /** /param tasks_present indicates presence of tasks at any priority level. **/\n    inline bool may_have_tasks ( generic_scheduler*, bool& tasks_present, bool& dequeuing_possible );\n\n    //! Puts offloaded tasks into global list of orphaned tasks\n    void orphan_offloaded_tasks ( generic_scheduler& s );\n#endif /* __TBB_TASK_PRIORITY */\n\n#if __TBB_COUNT_TASK_NODES\n    //! Returns the number of task objects \"living\" in worker threads\n    intptr_t workers_task_node_count();\n#endif\n\n    /** Must be the last data field */\n    arena_slot my_slots[1];\n}; // class arena\n\n\ntemplate<bool is_master>\ninline void arena::on_thread_leaving ( ) {\n    //\n    // Implementation of arena destruction synchronization logic contained various\n    // bugs/flaws at the different stages of its evolution, so below is a detailed\n    // description of the issues taken into consideration in the framework of the\n    // current design.\n    //\n    // In case of using fire-and-forget tasks (scheduled via task::enqueue())\n    // master thread is allowed to leave its arena before all its work is executed,\n    // and market may temporarily revoke all workers from this arena. Since revoked\n    // workers never attempt to reset arena state to EMPTY and cancel its request\n    // to RML for threads, the arena object is destroyed only when both the last\n    // thread is leaving it and arena's state is EMPTY (that is its master thread\n    // left and it does not contain any work).\n    //\n    // A worker that checks for work presence and transitions arena to the EMPTY\n    // state (in snapshot taking procedure arena::is_out_of_work()) updates\n    // arena::my_pool_state first and only then arena::my_num_workers_requested.\n    // So the check for work absence must be done against the latter field.\n    //\n    // In a time window between decrementing the active threads count and checking\n    // if there is an outstanding request for workers. New worker thread may arrive,\n    // finish remaining work, set arena state to empty, and leave decrementing its\n    // refcount and destroying. Then the current thread will destroy the arena\n    // the second time. To preclude it a local copy of the outstanding request\n    // value can be stored before decrementing active threads count.\n    //\n    // But this technique may cause two other problem. When the stored request is\n    // zero, it is possible that arena still has threads and they can generate new\n    // tasks and thus re-establish non-zero requests. Then all the threads can be\n    // revoked (as described above) leaving this thread the last one, and causing\n    // it to destroy non-empty arena.\n    //\n    // The other problem takes place when the stored request is non-zero. Another\n    // thread may complete the work, set arena state to empty, and leave without\n    // arena destruction before this thread decrements the refcount. This thread\n    // cannot destroy the arena either. Thus the arena may be \"orphaned\".\n    //\n    // In both cases we cannot dereference arena pointer after the refcount is\n    // decremented, as our arena may already be destroyed.\n    //\n    // If this is the master thread, market can be concurrently destroyed.\n    // In case of workers market's liveness is ensured by the RML connection\n    // rundown protocol, according to which the client (i.e. the market) lives\n    // until RML server notifies it about connection termination, and this\n    // notification is fired only after all workers return into RML.\n    //\n    // Thus if we decremented refcount to zero we ask the market to check arena\n    // state (including the fact if it is alive) under the lock.\n    //\n    uintptr_t aba_epoch = my_aba_epoch;\n    market* m = my_market;\n    __TBB_ASSERT(my_references > int(!is_master), \"broken arena reference counter\");\n    if ( (my_references -= is_master? 1:2 ) == 0 ) // worker's counter starts from bit 1\n        market::try_destroy_arena( m, this, aba_epoch, is_master );\n}\n\ntemplate<bool Spawned> void arena::advertise_new_work() {\n    if( !Spawned ) { // i.e. the work was enqueued\n        if( my_max_num_workers==0 ) {\n            my_max_num_workers = 1;\n            __TBB_ASSERT(!my_mandatory_concurrency, \"\");\n            my_mandatory_concurrency = true;\n            __TBB_ASSERT(!num_workers_active(), \"\");\n            my_pool_state = SNAPSHOT_FULL;\n            my_market->adjust_demand( *this, 1 );\n            return;\n        }\n        // Local memory fence is required to avoid missed wakeups; see the comment below.\n        // Starvation resistant tasks require mandatory concurrency, so missed wakeups are unacceptable.\n        atomic_fence(); \n    }\n    // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.\n    // Technically, to avoid missed wakeups, there should be a full memory fence between the point we \n    // released the task pool (i.e. spawned task) and read the arena's state.  However, adding such a \n    // fence might hurt overall performance more than it helps, because the fence would be executed \n    // on every task pool release, even when stealing does not occur.  Since TBB allows parallelism, \n    // but never promises parallelism, the missed wakeup is not a correctness problem.\n    pool_state_t snapshot = my_pool_state;\n    if( is_busy_or_empty(snapshot) ) {\n        // Attempt to mark as full.  The compare_and_swap below is a little unusual because the \n        // result is compared to a value that can be different than the comparand argument.\n        if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {\n            if( snapshot!=SNAPSHOT_EMPTY ) {\n                // This thread read \"busy\" into snapshot, and then another thread transitioned \n                // my_pool_state to \"empty\" in the meantime, which caused the compare_and_swap above \n                // to fail.  Attempt to transition my_pool_state from \"empty\" to \"full\".\n                if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {\n                    // Some other thread transitioned my_pool_state from \"empty\", and hence became\n                    // responsible for waking up workers.\n                    return;\n                }\n            }\n            // This thread transitioned pool from empty to full state, and thus is responsible for\n            // telling RML that there is work to do.\n            if( Spawned ) {\n                if( my_mandatory_concurrency ) {\n                    __TBB_ASSERT(my_max_num_workers==1, \"\");\n                    __TBB_ASSERT(!governor::local_scheduler()->is_worker(), \"\");\n                    // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks.\n                    // Now a single active thread (must be the master) supposedly starts a new parallel region\n                    // with relaxed sequential semantics, and oversubscription should be avoided.\n                    // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it.\n                    my_max_num_workers = 0;\n                    my_mandatory_concurrency = false;\n                    return;\n                }\n            }\n            my_market->adjust_demand( *this, my_max_num_workers );\n        }\n    }\n}\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_arena_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/atomic.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_atomic_H\n#define __TBB_atomic_H\n\n#include <cstddef>\n\n#if _MSC_VER\n#define __TBB_LONG_LONG __int64\n#else\n#define __TBB_LONG_LONG long long\n#endif /* _MSC_VER */\n\n#include \"tbb_machine.h\"\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    #pragma warning (push)\n    #pragma warning (disable: 4244 4267 4512)\n#endif\n\nnamespace tbb {\n\n//! Specifies memory semantics.\nenum memory_semantics {\n    //! Sequential consistency\n    full_fence,\n    //! Acquire\n    acquire,\n    //! Release\n    release,\n    //! No ordering\n    relaxed\n};\n\n//! @cond INTERNAL\nnamespace internal {\n\n#if __TBB_ATTRIBUTE_ALIGNED_PRESENT\n    #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));\n#elif __TBB_DECLSPEC_ALIGN_PRESENT\n    #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;\n#else\n    #error Do not know syntax for forcing alignment.\n#endif\n\ntemplate<size_t S>\nstruct atomic_rep;           // Primary template declared, but never defined.\n\ntemplate<>\nstruct atomic_rep<1> {       // Specialization\n    typedef int8_t word;\n};\ntemplate<>\nstruct atomic_rep<2> {       // Specialization\n    typedef int16_t word;\n};\ntemplate<>\nstruct atomic_rep<4> {       // Specialization\n#if _MSC_VER && !_WIN64\n    // Work-around that avoids spurious /Wp64 warnings\n    typedef intptr_t word;\n#else\n    typedef int32_t word;\n#endif\n};\n#if __TBB_64BIT_ATOMICS\ntemplate<>\nstruct atomic_rep<8> {       // Specialization\n    typedef int64_t word;\n};\n#endif\n\ntemplate<typename value_type, size_t size>\nstruct aligned_storage;\n\n//the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only\n#if __TBB_ATOMIC_CTORS\n    #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S)                  \\\n    template<typename value_type>                                     \\\n    struct aligned_storage<value_type,S> {                            \\\n        __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S)                \\\n        aligned_storage() = default ;                                 \\\n        constexpr aligned_storage(value_type value):my_value(value){} \\\n    };                                                                \\\n\n#else\n    #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S)                  \\\n    template<typename value_type>                                     \\\n    struct aligned_storage<value_type,S> {                            \\\n        __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S)                \\\n    };                                                                \\\n\n#endif\n\ntemplate<typename value_type>\nstruct aligned_storage<value_type,1> {\n    value_type my_value;\n#if __TBB_ATOMIC_CTORS\n    aligned_storage() = default ;\n    constexpr aligned_storage(value_type value):my_value(value){}\n#endif\n};\n\nATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2)\nATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4)\n#if __TBB_64BIT_ATOMICS\nATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8)\n#endif\n\ntemplate<size_t Size, memory_semantics M>\nstruct atomic_traits;        // Primary template declared, but not defined.\n\n#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                                                         \\\n    template<> struct atomic_traits<S,M> {                                                               \\\n        typedef atomic_rep<S>::word word;                                                                \\\n        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \\\n            return __TBB_machine_cmpswp##S##M(location,new_value,comparand);                             \\\n        }                                                                                                \\\n        inline static word fetch_and_add( volatile void* location, word addend ) {                       \\\n            return __TBB_machine_fetchadd##S##M(location,addend);                                        \\\n        }                                                                                                \\\n        inline static word fetch_and_store( volatile void* location, word value ) {                      \\\n            return __TBB_machine_fetchstore##S##M(location,value);                                       \\\n        }                                                                                                \\\n    };\n\n#define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                                                  \\\n    template<memory_semantics M>                                                                         \\\n    struct atomic_traits<S,M> {                                                                          \\\n        typedef atomic_rep<S>::word word;                                                                \\\n        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \\\n            return __TBB_machine_cmpswp##S(location,new_value,comparand);                                \\\n        }                                                                                                \\\n        inline static word fetch_and_add( volatile void* location, word addend ) {                       \\\n            return __TBB_machine_fetchadd##S(location,addend);                                           \\\n        }                                                                                                \\\n        inline static word fetch_and_store( volatile void* location, word value ) {                      \\\n            return __TBB_machine_fetchstore##S(location,value);                                          \\\n        }                                                                                                \\\n    };\n\ntemplate<memory_semantics M>\nstruct atomic_load_store_traits;    // Primary template declaration\n\n#define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M)                      \\\n    template<> struct atomic_load_store_traits<M> {                     \\\n        template <typename T>                                           \\\n        inline static T load( const volatile T& location ) {            \\\n            return __TBB_load_##M( location );                          \\\n        }                                                               \\\n        template <typename T>                                           \\\n        inline static void store( volatile T& location, T value ) {     \\\n            __TBB_store_##M( location, value );                         \\\n        }                                                               \\\n    }\n\n#if __TBB_USE_FENCED_ATOMICS\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed)\n#if __TBB_64BIT_ATOMICS\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)\n__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed)\n#endif\n#else /* !__TBB_USE_FENCED_ATOMICS */\n__TBB_DECL_ATOMIC_PRIMITIVES(1)\n__TBB_DECL_ATOMIC_PRIMITIVES(2)\n__TBB_DECL_ATOMIC_PRIMITIVES(4)\n#if __TBB_64BIT_ATOMICS\n__TBB_DECL_ATOMIC_PRIMITIVES(8)\n#endif\n#endif /* !__TBB_USE_FENCED_ATOMICS */\n\n__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence);\n__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire);\n__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release);\n__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed);\n\n//! Additive inverse of 1 for type T.\n/** Various compilers issue various warnings if -1 is used with various integer types.\n    The baroque expression below avoids all the warnings (we hope). */\n#define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))\n\n//! Base class that provides basic functionality for atomic<T> without fetch_and_add.\n/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor,\n    and can be copied/compared by memcpy/memcmp. */\ntemplate<typename T>\nstruct atomic_impl {\nprotected:\n    aligned_storage<T,sizeof(T)> my_storage;\nprivate:\n    //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings\n    //! Union type used to convert type T to underlying integral type.\n    template<typename value_type>\n    union converter {\n        typedef typename atomic_rep<sizeof(value_type)>::word bits_type;\n        converter(){}\n        converter(value_type a_value) : value(a_value) {}\n        value_type value;\n        bits_type bits;\n    };\n\n    template<typename value_t>\n    static typename converter<value_t>::bits_type to_bits(value_t value){\n        return converter<value_t>(value).bits;\n    }\n    template<typename value_t>\n    static value_t to_value(typename converter<value_t>::bits_type bits){\n        converter<value_t> u;\n        u.bits = bits;\n        return u.value;\n    }\n\n    template<typename value_t>\n    union ptr_converter;            //Primary template declared, but never defined.\n\n    template<typename value_t>\n    union ptr_converter<value_t *> {\n        ptr_converter(){}\n        ptr_converter(value_t* a_value) : value(a_value) {}\n        value_t* value;\n        uintptr_t bits;\n    };\n    //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref)\n    //does not hurt performance\n    template<typename value_t>\n    static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){\n        //TODO: this #ifdef is temporary workaround, as union conversion seems to fail\n        //on suncc for 64 bit types for 32 bit target\n        #if !__SUNPRO_CC\n            return *(typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits;\n        #else\n            return *(typename converter<value_t>::bits_type*)(&value);\n        #endif\n    }\n\n\npublic:\n    typedef T value_type;\n\n#if __TBB_ATOMIC_CTORS\n    atomic_impl() = default ;\n    constexpr atomic_impl(value_type value):my_storage(value){}\n#endif\n    template<memory_semantics M>\n    value_type fetch_and_store( value_type value ) {\n          return to_value<value_type>(\n                  internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) )\n          );\n    }\n\n    value_type fetch_and_store( value_type value ) {\n        return fetch_and_store<full_fence>(value);\n    }\n\n    template<memory_semantics M>\n    value_type compare_and_swap( value_type value, value_type comparand ) {\n        return to_value<value_type>(\n                internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) )\n        );\n    }\n\n    value_type compare_and_swap( value_type value, value_type comparand ) {\n        return compare_and_swap<full_fence>(value,comparand);\n    }\n\n    operator value_type() const volatile {                // volatile qualifier here for backwards compatibility\n        return  to_value<value_type>(\n                __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) )\n        );\n    }\n\n    template<memory_semantics M>\n    value_type load () const {\n        return to_value<value_type>(\n                internal::atomic_load_store_traits<M>::load( to_bits_ref(my_storage.my_value) )\n        );\n    }\n\n    value_type load () const {\n        return load<acquire>();\n    }\n\n    template<memory_semantics M>\n    void store ( value_type value ) {\n        internal::atomic_load_store_traits<M>::store( to_bits_ref(my_storage.my_value), to_bits(value));\n    }\n\n    void store ( value_type value ) {\n        store<release>( value );\n    }\n\nprotected:\n    value_type store_with_release( value_type rhs ) {\n       //TODO: unify with store<release>\n        __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) );\n        return rhs;\n    }\n};\n\n//! Base class that provides basic functionality for atomic<T> with fetch_and_add.\n/** I is the underlying type.\n    D is the difference type.\n    StepType should be char if I is an integral type, and T if I is a T*. */\ntemplate<typename I, typename D, typename StepType>\nstruct atomic_impl_with_arithmetic: atomic_impl<I> {\npublic:\n    typedef I value_type;\n#if    __TBB_ATOMIC_CTORS\n    atomic_impl_with_arithmetic() = default ;\n    constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl<I>(value){}\n#endif\n    template<memory_semantics M>\n    value_type fetch_and_add( D addend ) {\n        return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) ));\n    }\n\n    value_type fetch_and_add( D addend ) {\n        return fetch_and_add<full_fence>(addend);\n    }\n\n    template<memory_semantics M>\n    value_type fetch_and_increment() {\n        return fetch_and_add<M>(1);\n    }\n\n    value_type fetch_and_increment() {\n        return fetch_and_add(1);\n    }\n\n    template<memory_semantics M>\n    value_type fetch_and_decrement() {\n        return fetch_and_add<M>(__TBB_MINUS_ONE(D));\n    }\n\n    value_type fetch_and_decrement() {\n        return fetch_and_add(__TBB_MINUS_ONE(D));\n    }\n\npublic:\n    value_type operator+=( D value ) {\n        return fetch_and_add(value)+value;\n    }\n\n    value_type operator-=( D value ) {\n        // Additive inverse of value computed using binary minus,\n        // instead of unary minus, for sake of avoiding compiler warnings.\n        return operator+=(D(0)-value);\n    }\n\n    value_type operator++() {\n        return fetch_and_add(1)+1;\n    }\n\n    value_type operator--() {\n        return fetch_and_add(__TBB_MINUS_ONE(D))-1;\n    }\n\n    value_type operator++(int) {\n        return fetch_and_add(1);\n    }\n\n    value_type operator--(int) {\n        return fetch_and_add(__TBB_MINUS_ONE(D));\n    }\n};\n\n} /* Internal */\n//! @endcond\n\n//! Primary template for atomic.\n/** See the Reference for details.\n    @ingroup synchronization */\ntemplate<typename T>\nstruct atomic: internal::atomic_impl<T> {\n#if __TBB_ATOMIC_CTORS\n    atomic() = default;\n    constexpr atomic(T arg): internal::atomic_impl<T>(arg) {}\n#endif\n    T operator=( T rhs ) {\n        // \"this\" required here in strict ISO C++ because store_with_release is a dependent name\n        return this->store_with_release(rhs);\n    }\n    atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}\n};\n\n#if __TBB_ATOMIC_CTORS\n    #define __TBB_DECL_ATOMIC(T)                                                                    \\\n        template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {              \\\n            atomic() = default;                                                                     \\\n            constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {}        \\\n                                                                                                    \\\n            T operator=( T rhs ) {return store_with_release(rhs);}                                  \\\n            atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}   \\\n        };\n#else\n    #define __TBB_DECL_ATOMIC(T)                                                                    \\\n        template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {              \\\n            T operator=( T rhs ) {return store_with_release(rhs);}                                  \\\n            atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}   \\\n        };\n#endif\n\n#if __TBB_64BIT_ATOMICS\n//TODO: consider adding non-default (and atomic) copy constructor for 32bit platform\n__TBB_DECL_ATOMIC(__TBB_LONG_LONG)\n__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)\n#else\n// test_atomic will verify that sizeof(long long)==8\n#endif\n__TBB_DECL_ATOMIC(long)\n__TBB_DECL_ATOMIC(unsigned long)\n\n#if _MSC_VER && !_WIN64\n#if __TBB_ATOMIC_CTORS\n/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.\n   It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)\n   with an operator=(U) that explicitly converts the U to a T.  Types T and U should be\n   type synonyms on the platform.  Type U should be the wider variant of T from the\n   perspective of /Wp64. */\n#define __TBB_DECL_ATOMIC_ALT(T,U) \\\n    template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {             \\\n        atomic() = default ;                                                                   \\\n        constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {}       \\\n        T operator=( U rhs ) {return store_with_release(T(rhs));}                              \\\n        atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \\\n    };\n#else\n#define __TBB_DECL_ATOMIC_ALT(T,U) \\\n    template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {             \\\n        T operator=( U rhs ) {return store_with_release(T(rhs));}                              \\\n        atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \\\n    };\n#endif\n__TBB_DECL_ATOMIC_ALT(unsigned,size_t)\n__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)\n#else\n__TBB_DECL_ATOMIC(unsigned)\n__TBB_DECL_ATOMIC(int)\n#endif /* _MSC_VER && !_WIN64 */\n\n__TBB_DECL_ATOMIC(unsigned short)\n__TBB_DECL_ATOMIC(short)\n__TBB_DECL_ATOMIC(char)\n__TBB_DECL_ATOMIC(signed char)\n__TBB_DECL_ATOMIC(unsigned char)\n\n#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)\n__TBB_DECL_ATOMIC(wchar_t)\n#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */\n\n//! Specialization for atomic<T*> with arithmetic and operator->.\ntemplate<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {\n#if __TBB_ATOMIC_CTORS\n    atomic() = default ;\n    constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {}\n#endif\n    T* operator=( T* rhs ) {\n        // \"this\" required here in strict ISO C++ because store_with_release is a dependent name\n        return this->store_with_release(rhs);\n    }\n    atomic<T*>& operator=( const atomic<T*>& rhs ) {\n        this->store_with_release(rhs); return *this;\n    }\n    T* operator->() const {\n        return (*this);\n    }\n};\n\n//! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.\ntemplate<> struct atomic<void*>: internal::atomic_impl<void*> {\n#if __TBB_ATOMIC_CTORS\n    atomic() = default ;\n    constexpr atomic(void* arg): internal::atomic_impl<void*>(arg) {}\n#endif\n    void* operator=( void* rhs ) {\n        // \"this\" required here in strict ISO C++ because store_with_release is a dependent name\n        return this->store_with_release(rhs);\n    }\n    atomic<void*>& operator=( const atomic<void*>& rhs ) {\n        this->store_with_release(rhs); return *this;\n    }\n};\n\n// Helpers to workaround ugly syntax of calling template member function of a\n// template class with template argument dependent on template parameters.\n\ntemplate <memory_semantics M, typename T>\nT load ( const atomic<T>& a ) { return a.template load<M>(); }\n\ntemplate <memory_semantics M, typename T>\nvoid store ( atomic<T>& a, T value ) { a.template store<M>(value); }\n\nnamespace interface6{\n//! Make an atomic for use in an initialization (list), as an alternative to zero-initialization or normal assignment.\ntemplate<typename T>\natomic<T> make_atomic(T t) {\n    atomic<T> a;\n    store<relaxed>(a,t);\n    return a;\n}\n}\nusing interface6::make_atomic;\n\nnamespace internal {\ntemplate<memory_semantics M, typename T >\nvoid swap(atomic<T> & lhs, atomic<T> & rhs){\n    T tmp = load<M>(lhs);\n    store<M>(lhs,load<M>(rhs));\n    store<M>(rhs,tmp);\n}\n\n// only to aid in the gradual conversion of ordinary variables to proper atomics\ntemplate<typename T>\ninline atomic<T>& as_atomic( T& t ) {\n    return (atomic<T>&)t;\n}\n} // namespace tbb::internal\n\n} // namespace tbb\n\n#if _MSC_VER && !__INTEL_COMPILER\n    #pragma warning (pop)\n#endif // warnings 4244, 4267 are back\n\n#endif /* __TBB_atomic_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/blocked_range.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_blocked_range_H\n#define __TBB_blocked_range_H\n\n#include \"tbb_stddef.h\"\n\nnamespace tbb {\n\n/** \\page range_req Requirements on range concept\n    Class \\c R implementing the concept of range must define:\n    - \\code R::R( const R& ); \\endcode               Copy constructor\n    - \\code R::~R(); \\endcode                        Destructor\n    - \\code bool R::is_divisible() const; \\endcode   True if range can be partitioned into two subranges\n    - \\code bool R::empty() const; \\endcode          True if range is empty\n    - \\code R::R( R& r, split ); \\endcode            Split range \\c r into two subranges.\n**/\n\n//! A range over which to iterate.\n/** @ingroup algorithms */\ntemplate<typename Value>\nclass blocked_range {\npublic:\n    //! Type of a value\n    /** Called a const_iterator for sake of algorithms that need to treat a blocked_range\n        as an STL container. */\n    typedef Value const_iterator;\n\n    //! Type for size of a range\n    typedef std::size_t size_type;\n\n    //! Construct range with default-constructed values for begin and end.\n    /** Requires that Value have a default constructor. */\n    blocked_range() : my_end(), my_begin() {}\n\n    //! Construct range over half-open interval [begin,end), with the given grainsize.\n    blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) :\n        my_end(end_), my_begin(begin_), my_grainsize(grainsize_)\n    {\n        __TBB_ASSERT( my_grainsize>0, \"grainsize must be positive\" );\n    }\n\n    //! Beginning of range.\n    const_iterator begin() const {return my_begin;}\n\n    //! One past last value in range.\n    const_iterator end() const {return my_end;}\n\n    //! Size of the range\n    /** Unspecified if end()<begin(). */\n    size_type size() const {\n        __TBB_ASSERT( !(end()<begin()), \"size() unspecified if end()<begin()\" );\n        return size_type(my_end-my_begin);\n    }\n\n    //! The grain size for this range.\n    size_type grainsize() const {return my_grainsize;}\n\n    //------------------------------------------------------------------------\n    // Methods that implement Range concept\n    //------------------------------------------------------------------------\n\n    //! True if range is empty.\n    bool empty() const {return !(my_begin<my_end);}\n\n    //! True if range is divisible.\n    /** Unspecified if end()<begin(). */\n    bool is_divisible() const {return my_grainsize<size();}\n\n    //! Split range.\n    /** The new Range *this has the second part, the old range r has the first part.\n        Unspecified if end()<begin() or !is_divisible(). */\n    blocked_range( blocked_range& r, split ) :\n        my_end(r.my_end),\n        my_begin(do_split(r, split())),\n        my_grainsize(r.my_grainsize)\n    {\n        // only comparison 'less than' is required from values of blocked_range objects\n        __TBB_ASSERT( !(my_begin < r.my_end) && !(r.my_end < my_begin), \"blocked_range has been split incorrectly\" );\n    }\n\n#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES\n    //! Static field to support proportional split\n    static const bool is_divisible_in_proportion = true;\n\n    //! Split range.\n    /** The new Range *this has the second part split according to specified proportion, the old range r has the first part.\n        Unspecified if end()<begin() or !is_divisible(). */\n    blocked_range( blocked_range& r, proportional_split& proportion ) :\n        my_end(r.my_end),\n        my_begin(do_split(r, proportion)),\n        my_grainsize(r.my_grainsize)\n    {\n        // only comparison 'less than' is required from values of blocked_range objects\n        __TBB_ASSERT( !(my_begin < r.my_end) && !(r.my_end < my_begin), \"blocked_range has been split incorrectly\" );\n    }\n#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */\n\nprivate:\n    /** NOTE: my_end MUST be declared before my_begin, otherwise the forking constructor will break. */\n    Value my_end;\n    Value my_begin;\n    size_type my_grainsize;\n\n    //! Auxiliary function used by forking constructor.\n    /** Using this function lets us not require that Value support assignment or default construction. */\n    static Value do_split( blocked_range& r, split )\n    {\n        __TBB_ASSERT( r.is_divisible(), \"cannot split blocked_range that is not divisible\" );\n        Value middle = r.my_begin + (r.my_end - r.my_begin) / 2u;\n        r.my_end = middle;\n        return middle;\n    }\n\n#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES\n    static Value do_split( blocked_range& r, proportional_split& proportion )\n    {\n        __TBB_ASSERT( r.is_divisible(), \"cannot split blocked_range that is not divisible\" );\n\n        // usage of 32-bit floating point arithmetic is not enough to handle ranges of\n        // more than 2^24 iterations accurately. However, even on ranges with 2^64\n        // iterations the computational error approximately equals to 0.000001% which\n        // makes small impact on uniform distribution of such range's iterations (assuming\n        // all iterations take equal time to complete). See 'test_partitioner_whitebox'\n        // for implementation of an exact split algorithm\n        size_type right_part = size_type(float(r.size()) * float(proportion.right())\n                                         / float(proportion.left() + proportion.right()) + 0.5f);\n        return r.my_end = Value(r.my_end - right_part);\n    }\n#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */\n\n    template<typename RowValue, typename ColValue>\n    friend class blocked_range2d;\n\n    template<typename RowValue, typename ColValue, typename PageValue>\n    friend class blocked_range3d;\n};\n\n} // namespace tbb\n\n#endif /* __TBB_blocked_range_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/blocked_range2d.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_blocked_range2d_H\n#define __TBB_blocked_range2d_H\n\n#include \"tbb_stddef.h\"\n#include \"blocked_range.h\"\n\nnamespace tbb {\n\n//! A 2-dimensional range that models the Range concept.\n/** @ingroup algorithms */\ntemplate<typename RowValue, typename ColValue=RowValue>\nclass blocked_range2d {\npublic:\n    //! Type for size of an iteration range\n    typedef blocked_range<RowValue> row_range_type;\n    typedef blocked_range<ColValue> col_range_type;\n\nprivate:\n    row_range_type my_rows;\n    col_range_type my_cols;\n\npublic:\n\n    blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize,\n                     ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) :\n        my_rows(row_begin,row_end,row_grainsize),\n        my_cols(col_begin,col_end,col_grainsize)\n    {\n    }\n\n    blocked_range2d( RowValue row_begin, RowValue row_end,\n                     ColValue col_begin, ColValue col_end ) :\n        my_rows(row_begin,row_end),\n        my_cols(col_begin,col_end)\n    {\n    }\n\n    //! True if range is empty\n    bool empty() const {\n        // Yes, it is a logical OR here, not AND.\n        return my_rows.empty() || my_cols.empty();\n    }\n\n    //! True if range is divisible into two pieces.\n    bool is_divisible() const {\n        return my_rows.is_divisible() || my_cols.is_divisible();\n    }\n\n    blocked_range2d( blocked_range2d& r, split ) :\n        my_rows(r.my_rows),\n        my_cols(r.my_cols)\n    {\n        split split_obj;\n        do_split(r, split_obj);\n    }\n\n#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES\n    //! Static field to support proportional split\n    static const bool is_divisible_in_proportion = true;\n\n    blocked_range2d( blocked_range2d& r, proportional_split& proportion ) :\n        my_rows(r.my_rows),\n        my_cols(r.my_cols)\n    {\n        do_split(r, proportion);\n    }\n#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */\n\n    template <typename Split>\n    void do_split( blocked_range2d& r, Split& split_obj )\n    {\n        if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) {\n            my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj);\n        } else {\n            my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj);\n        }\n    }\n\n    //! The rows of the iteration space\n    const row_range_type& rows() const {return my_rows;}\n\n    //! The columns of the iteration space\n    const col_range_type& cols() const {return my_cols;}\n};\n\n} // namespace tbb\n\n#endif /* __TBB_blocked_range2d_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/blocked_range3d.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_blocked_range3d_H\n#define __TBB_blocked_range3d_H\n\n#include \"tbb_stddef.h\"\n#include \"blocked_range.h\"\n\nnamespace tbb {\n\n//! A 3-dimensional range that models the Range concept.\n/** @ingroup algorithms */\ntemplate<typename PageValue, typename RowValue=PageValue, typename ColValue=RowValue>\nclass blocked_range3d {\npublic:\n    //! Type for size of an iteration range\n    typedef blocked_range<PageValue> page_range_type;\n    typedef blocked_range<RowValue>  row_range_type;\n    typedef blocked_range<ColValue>  col_range_type;\n\nprivate:\n    page_range_type my_pages;\n    row_range_type  my_rows;\n    col_range_type  my_cols;\n\npublic:\n\n    blocked_range3d( PageValue page_begin, PageValue page_end,\n                     RowValue  row_begin,  RowValue row_end,\n                     ColValue  col_begin,  ColValue col_end ) :\n        my_pages(page_begin,page_end),\n        my_rows(row_begin,row_end),\n        my_cols(col_begin,col_end)\n    {\n    }\n\n    blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize,\n                     RowValue  row_begin,  RowValue row_end,   typename row_range_type::size_type row_grainsize,\n                     ColValue  col_begin,  ColValue col_end,   typename col_range_type::size_type col_grainsize ) :\n        my_pages(page_begin,page_end,page_grainsize),\n        my_rows(row_begin,row_end,row_grainsize),\n        my_cols(col_begin,col_end,col_grainsize)\n    {\n    }\n\n    //! True if range is empty\n    bool empty() const {\n        // Yes, it is a logical OR here, not AND.\n        return my_pages.empty() || my_rows.empty() || my_cols.empty();\n    }\n\n    //! True if range is divisible into two pieces.\n    bool is_divisible() const {\n        return  my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible();\n    }\n\n    blocked_range3d( blocked_range3d& r, split ) :\n        my_pages(r.my_pages),\n        my_rows(r.my_rows),\n        my_cols(r.my_cols)\n    {\n        split split_obj;\n        do_split(r, split_obj);\n    }\n\n#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES\n    //! Static field to support proportional split\n    static const bool is_divisible_in_proportion = true;\n\n    blocked_range3d( blocked_range3d& r, proportional_split& proportion ) :\n        my_pages(r.my_pages),\n        my_rows(r.my_rows),\n        my_cols(r.my_cols)\n    {\n        do_split(r, proportion);\n    }\n#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */\n\n    template <typename Split>\n    void do_split( blocked_range3d& r, Split& split_obj)\n    {\n        if ( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) {\n            if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) {\n                my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj);\n            } else {\n                my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj);\n            }\n\t} else {\n            if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) {\n                my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj);\n            } else {\n                my_pages.my_begin = page_range_type::do_split(r.my_pages, split_obj);\n            }\n        }\n    }\n\n    //! The pages of the iteration space\n    const page_range_type& pages() const {return my_pages;}\n\n    //! The rows of the iteration space\n    const row_range_type& rows() const {return my_rows;}\n\n    //! The columns of the iteration space\n    const col_range_type& cols() const {return my_cols;}\n\n};\n\n} // namespace tbb\n\n#endif /* __TBB_blocked_range3d_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/cache_aligned_allocator.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n#include \"tbb/tbb_allocator.h\"\n#include \"tbb/tbb_exception.h\"\n#include \"tbb_misc.h\"\n#include \"dynamic_link.h\"\n#include <cstdlib>\n\n#if _WIN32||_WIN64\n#include \"tbb/machine/windows_api.h\"\n#else\n#include <dlfcn.h>\n#endif /* _WIN32||_WIN64 */\n\nusing namespace std;\n\n#if __TBB_WEAK_SYMBOLS_PRESENT\n\n#pragma weak scalable_malloc\n#pragma weak scalable_free\n#pragma weak scalable_aligned_malloc\n#pragma weak scalable_aligned_free\n\nextern \"C\" {\n    void* scalable_malloc( size_t );\n    void  scalable_free( void* );\n    void* scalable_aligned_malloc( size_t, size_t );\n    void  scalable_aligned_free( void* );\n}\n\n#endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n\nnamespace tbb {\n\nnamespace internal {\n\n//! Dummy routine used for first indirect call via MallocHandler.\nstatic void* DummyMalloc( size_t size );\n\n//! Dummy routine used for first indirect call via FreeHandler.\nstatic void DummyFree( void * ptr );\n\n//! Handler for memory allocation\nstatic void* (*MallocHandler)( size_t size ) = &DummyMalloc;\n\n//! Handler for memory deallocation\nstatic void (*FreeHandler)( void* pointer ) = &DummyFree;\n\n//! Dummy routine used for first indirect call via padded_allocate_handler.\nstatic void* dummy_padded_allocate( size_t bytes, size_t alignment );\n\n//! Dummy routine used for first indirect call via padded_free_handler.\nstatic void dummy_padded_free( void * ptr );\n\n// ! Allocates memory using standard malloc. It is used when scalable_allocator is not available\nstatic void* padded_allocate( size_t bytes, size_t alignment );\n\n// ! Allocates memory using standard free. It is used when scalable_allocator is not available\nstatic void padded_free( void* p );\n\n//! Handler for padded memory allocation\nstatic void* (*padded_allocate_handler)( size_t bytes, size_t alignment ) = &dummy_padded_allocate;\n\n//! Handler for padded memory deallocation\nstatic void (*padded_free_handler)( void* p ) = &dummy_padded_free;\n\n//! Table describing how to link the handlers.\nstatic const dynamic_link_descriptor MallocLinkTable[] = {\n    DLD(scalable_malloc, MallocHandler),\n    DLD(scalable_free, FreeHandler),\n    DLD(scalable_aligned_malloc, padded_allocate_handler),\n    DLD(scalable_aligned_free, padded_free_handler),\n};\n\n\n#if TBB_USE_DEBUG\n#define DEBUG_SUFFIX \"_debug\"\n#else\n#define DEBUG_SUFFIX\n#endif /* TBB_USE_DEBUG */\n\n// MALLOCLIB_NAME is the name of the TBB memory allocator library.\n#if _WIN32||_WIN64\n#define MALLOCLIB_NAME \"tbbmalloc\" DEBUG_SUFFIX \".dll\"\n#elif __APPLE__\n#define MALLOCLIB_NAME \"libtbbmalloc\" DEBUG_SUFFIX \".dylib\"\n#elif __FreeBSD__ || __NetBSD__ || __sun || _AIX || __ANDROID__\n#define MALLOCLIB_NAME \"libtbbmalloc\" DEBUG_SUFFIX \".so\"\n#elif __linux__  // Note that order of these #elif's is important!\n#define MALLOCLIB_NAME \"libtbbmalloc\" DEBUG_SUFFIX  __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)\n#else\n#error Unknown OS\n#endif\n\n//! Initialize the allocation/free handler pointers.\n/** Caller is responsible for ensuring this routine is called exactly once.\n    The routine attempts to dynamically link with the TBB memory allocator.\n    If that allocator is not found, it links to malloc and free. */\nvoid initialize_handler_pointers() {\n    __TBB_ASSERT( MallocHandler==&DummyMalloc, NULL );\n    bool success = dynamic_link( MALLOCLIB_NAME, MallocLinkTable, 4 );\n    if( !success ) {\n        // If unsuccessful, set the handlers to the default routines.\n        // This must be done now, and not before FillDynamicLinks runs, because if other\n        // threads call the handlers, we want them to go through the DoOneTimeInitializations logic,\n        // which forces them to wait.\n        FreeHandler = &free;\n        MallocHandler = &malloc;\n        padded_allocate_handler = &padded_allocate;\n        padded_free_handler = &padded_free;\n    }\n#if !__TBB_RML_STATIC\n    PrintExtraVersionInfo( \"ALLOCATOR\", success?\"scalable_malloc\":\"malloc\" );\n#endif\n}\n\nstatic tbb::atomic<do_once_state> initialization_state;\nvoid initialize_cache_aligned_allocator() {\n    atomic_do_once( &initialize_handler_pointers, initialization_state );\n}\n\n//! Executed on very first call through MallocHandler\nstatic void* DummyMalloc( size_t size ) {\n    initialize_cache_aligned_allocator();\n    __TBB_ASSERT( MallocHandler!=&DummyMalloc, NULL );\n    return (*MallocHandler)( size );\n}\n\n//! Executed on very first call through FreeHandler\nstatic void DummyFree( void * ptr ) {\n    initialize_cache_aligned_allocator();\n    __TBB_ASSERT( FreeHandler!=&DummyFree, NULL );\n    (*FreeHandler)( ptr );\n}\n\n//! Executed on very first call through padded_allocate_handler\nstatic void* dummy_padded_allocate( size_t bytes, size_t alignment ) {\n    initialize_cache_aligned_allocator();\n    __TBB_ASSERT( padded_allocate_handler!=&dummy_padded_allocate, NULL );\n    return (*padded_allocate_handler)(bytes, alignment);\n}\n\n//! Executed on very first call through padded_free_handler\nstatic void dummy_padded_free( void * ptr ) {\n    initialize_cache_aligned_allocator();\n    __TBB_ASSERT( padded_free_handler!=&dummy_padded_free, NULL );\n    (*padded_free_handler)( ptr );\n}    \n\nstatic size_t NFS_LineSize = 128;\n\nsize_t NFS_GetLineSize() {\n    return NFS_LineSize;\n}\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // unary minus operator applied to unsigned type, result still unsigned\n    #pragma warning( disable: 4146 4706 )\n#endif\n\nvoid* NFS_Allocate( size_t n, size_t element_size, void* /*hint*/ ) {\n    size_t m = NFS_LineSize;\n    __TBB_ASSERT( m<=NFS_MaxLineSize, \"illegal value for NFS_LineSize\" );\n    __TBB_ASSERT( (m & (m-1))==0, \"must be power of two\" );\n    size_t bytes = n*element_size;\n\n    if (bytes<n || bytes+m<bytes) {\n        // Overflow\n        throw_exception(eid_bad_alloc);\n    }\n    // scalable_aligned_malloc considers zero size request an error, and returns NULL\n    if (bytes==0) bytes = 1;\n    \n    void* result = (*padded_allocate_handler)( bytes, m );\n    if (!result)\n        throw_exception(eid_bad_alloc);\n\n    __TBB_ASSERT( ((size_t)result&(m-1)) == 0, \"The address returned isn't aligned to cache line size\" );\n    return result;\n}\n\nvoid NFS_Free( void* p ) {\n    (*padded_free_handler)( p );\n}\n\nstatic void* padded_allocate( size_t bytes, size_t alignment ) {    \n    unsigned char* result = NULL;\n    unsigned char* base = (unsigned char*)malloc(alignment+bytes);\n    if( base ) {        \n        // Round up to the next line\n        result = (unsigned char*)((uintptr_t)(base+alignment)&-alignment);\n        // Record where block actually starts.\n        ((uintptr_t*)result)[-1] = uintptr_t(base);\n    }\n    return result;    \n}\n\nstatic void padded_free( void* p ) {\n    if( p ) {\n        __TBB_ASSERT( (uintptr_t)p>=0x4096, \"attempt to free block not obtained from cache_aligned_allocator\" );\n        // Recover where block actually starts\n        unsigned char* base = ((unsigned char**)p)[-1];\n        __TBB_ASSERT( (void*)((uintptr_t)(base+NFS_LineSize)&-NFS_LineSize)==p, \"not allocated by NFS_Allocate?\" );\n        free(base);\n    }\n}\n\nvoid* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ) {    \n    void* result = (*MallocHandler) (n);\n    if (!result) {\n        throw_exception(eid_bad_alloc);\n    }\n    return result;\n}\n\nvoid __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ) {\n    if( p ) {        \n        (*FreeHandler)( p );\n    }\n}\n\nbool __TBB_EXPORTED_FUNC is_malloc_used_v3() {\n    if (MallocHandler == &DummyMalloc) {\n        void* void_ptr = (*MallocHandler)(1);\n        (*FreeHandler)(void_ptr);\n    }\n    __TBB_ASSERT( MallocHandler!=&DummyMalloc && FreeHandler!=&DummyFree, NULL );\n    // Cast to void avoids type mismatch errors on some compilers (e.g. __IBMCPP__)\n    __TBB_ASSERT( !(((void*)MallocHandler==(void*)&malloc) ^ ((void*)FreeHandler==(void*)&free)),\n                  \"Both shim pointers must refer to routines from the same package (either TBB or CRT)\" );\n    return (void*)MallocHandler == (void*)&malloc;\n}\n\n} // namespace internal\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/cache_aligned_allocator.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_cache_aligned_allocator_H\n#define __TBB_cache_aligned_allocator_H\n\n#include <new>\n#include \"tbb_stddef.h\"\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n #include <utility> // std::forward\n#endif\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n    //! Cache/sector line size.\n    /** @ingroup memory_allocation */\n    size_t __TBB_EXPORTED_FUNC NFS_GetLineSize();\n\n    //! Allocate memory on cache/sector line boundary.\n    /** @ingroup memory_allocation */\n    void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint );\n\n    //! Free memory allocated by NFS_Allocate.\n    /** Freeing a NULL pointer is allowed, but has no effect.\n        @ingroup memory_allocation */\n    void __TBB_EXPORTED_FUNC NFS_Free( void* );\n}\n//! @endcond\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for erroneous \"unreferenced parameter\" warning in method destroy.\n    #pragma warning (push)\n    #pragma warning (disable: 4100)\n#endif\n\n//! Meets \"allocator\" requirements of ISO C++ Standard, Section 20.1.5\n/** The members are ordered the same way they are in section 20.4.1\n    of the ISO C++ standard.\n    @ingroup memory_allocation */\ntemplate<typename T>\nclass cache_aligned_allocator {\npublic:\n    typedef typename internal::allocator_type<T>::value_type value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n    template<typename U> struct rebind {\n        typedef cache_aligned_allocator<U> other;\n    };\n\n    cache_aligned_allocator() throw() {}\n    cache_aligned_allocator( const cache_aligned_allocator& ) throw() {}\n    template<typename U> cache_aligned_allocator(const cache_aligned_allocator<U>&) throw() {}\n\n    pointer address(reference x) const {return &x;}\n    const_pointer address(const_reference x) const {return &x;}\n    \n    //! Allocate space for n objects, starting on a cache/sector line.\n    pointer allocate( size_type n, const void* hint=0 ) {\n        // The \"hint\" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt\n        return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast<void*>(hint) ));\n    }\n\n    //! Free block of memory that starts on a cache line\n    void deallocate( pointer p, size_type ) {\n        internal::NFS_Free(p);\n    }\n\n    //! Largest value for which method allocate might succeed.\n    size_type max_size() const throw() {\n        return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type);\n    }\n\n    //! Copy-construct value at location pointed to by p.\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n    template<typename U, typename... Args>\n    void construct(U *p, Args&&... args)\n        { ::new((void *)p) U(std::forward<Args>(args)...); }\n#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));}\n#endif\n    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}\n#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n\n    //! Destroy value at location pointed to by p.\n    void destroy( pointer p ) {p->~value_type();}\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4100 is back\n\n//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1\n/** @ingroup memory_allocation */\ntemplate<> \nclass cache_aligned_allocator<void> {\npublic:\n    typedef void* pointer;\n    typedef const void* const_pointer;\n    typedef void value_type;\n    template<typename U> struct rebind {\n        typedef cache_aligned_allocator<U> other;\n    };\n};\n\ntemplate<typename T, typename U>\ninline bool operator==( const cache_aligned_allocator<T>&, const cache_aligned_allocator<U>& ) {return true;}\n\ntemplate<typename T, typename U>\ninline bool operator!=( const cache_aligned_allocator<T>&, const cache_aligned_allocator<U>& ) {return false;}\n\n} // namespace tbb\n\n#endif /* __TBB_cache_aligned_allocator_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/cilk-tbb-interop.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/* The API to enable interoperability between Intel(R) Cilk(TM) Plus and \n   Intel(R) Threading Building Blocks. */\n\n#ifndef CILK_TBB_INTEROP_H\n#define CILK_TBB_INTEROP_H\n\n#ifndef _WIN32\n#ifdef IN_CILK_RUNTIME\n#define CILK_EXPORT __attribute__((visibility(\"protected\")))\n#else\n#define CILK_EXPORT /* nothing */\n#endif\n#else\n#ifdef IN_CILK_RUNTIME\n#define CILK_EXPORT __declspec(dllexport)\n#else\n#define CILK_EXPORT __declspec(dllimport)\n#endif  // IN_CILK_RUNTIME\n#endif // _WIN32\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/* A return code.  0 indicates success */\ntypedef int __cilk_tbb_retcode;\n\nenum __cilk_tbb_stack_op {\n    CILK_TBB_STACK_ORPHAN, // disconnecting stack from a thread\n    CILK_TBB_STACK_ADOPT,  // reconnecting orphaned stack to a trhead\n    CILK_TBB_STACK_RELEASE // releasing stack\n};\n\ntypedef __cilk_tbb_retcode (*__cilk_tbb_pfn_stack_op)(enum __cilk_tbb_stack_op, void* data);\n\ntypedef __cilk_tbb_retcode (*__cilk_tbb_pfn_unwatch_stacks)(void *data);\n\n/* Each thunk structure has two pointers: \"routine\" and \"data\".\n   The caller of the thunk invokes *routine, passing \"data\" as the void* parameter. */\n\n/* Thunk invoked by Intel Cilk Plus runtime (cilkrts) when it changes the relationship\n   between a stack and a thread. It does not matter what stack the thunk runs on.\n   The thread (not fiber) on which the thunk runs is important.\n\n   CILK_TBB_STACK_ORPHAN\n      The thunk must be invoked on the thread disconnecting itself from the stack.\n      Must \"happen before\" the stack is adopted elsewhere.\n   CILK_TBB_STACK_ADOPT\n      The thunk must be invoked on the thread adopting the stack.\n   CILK_TBB_STACK_RELEASE\n      The thunk must be invoked on the thread doing the releasing,\n      Must \"happen before\" the stack is used elsewhere.\n\n   When a non-empty stack is transfered between threads, the first thread must orphan it \n   and the second thread must adopt it.\n\n   An empty stack can be transfered similarly, or simply released by the first thread.\n\n   Here is a summary of the actions as transitions on a state machine.\n\n                       watch                                    ORPHAN\n                       -->-->                                   -->--\n                      /      \\                                 /     \\\n   (freed empty stack)       (TBB sees stack running on thread)      (stack in limbo)\n                |     \\     /                                  \\     /     |\n                |      --<--                                    --<--      |\n                ^      RELEASE or                              ADOPT       V\n                 \\     unwatch                                            / \n                  \\                                                      /\n                   --------------------------<---------------------------\n                                          RELEASE\n*/\nstruct __cilk_tbb_stack_op_thunk {\n    __cilk_tbb_pfn_stack_op routine;\n    void* data;                 /* Set by TBB */\n};\n\n/* Thunk invoked by TBB when it is no longer interested in watching the stack bound to the current thread. */\nstruct __cilk_tbb_unwatch_thunk {\n    __cilk_tbb_pfn_unwatch_stacks routine;\n    void* data;      \n};\n\n/* Defined by cilkrts, called by TBB.\n   Requests that cilkrts invoke __cilk_tbb_stack_op_thunk when it orphans a stack. \n   cilkrts sets *u to a thunk that TBB should call when it is no longer interested in watching the stack. */\nCILK_EXPORT\n__cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk* u,\n                                         struct __cilk_tbb_stack_op_thunk o);\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif  // CILK_TBB_INTEROP_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/combinable.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_combinable_H\n#define __TBB_combinable_H\n\n#include \"enumerable_thread_specific.h\"\n#include \"cache_aligned_allocator.h\"\n\nnamespace tbb {\n/** \\name combinable\n    **/\n//@{\n//! Thread-local storage with optional reduction\n/** @ingroup containers */\n    template <typename T>\n        class combinable {\n    private:\n        typedef typename tbb::cache_aligned_allocator<T> my_alloc;\n\n        typedef typename tbb::enumerable_thread_specific<T, my_alloc, ets_no_key> my_ets_type;\n        my_ets_type my_ets; \n \n    public:\n\n        combinable() { }\n\n        template <typename finit>\n        combinable( finit _finit) : my_ets(_finit) { }\n\n        //! destructor\n        ~combinable() { \n        }\n\n        combinable(const combinable& other) : my_ets(other.my_ets) { }\n\n        combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; }\n\n        void clear() { my_ets.clear(); }\n\n        T& local() { return my_ets.local(); }\n\n        T& local(bool & exists) { return my_ets.local(exists); }\n\n        // combine_func_t has signature T(T,T) or T(const T&, const T&)\n        template <typename combine_func_t>\n        T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); }\n\n        // combine_func_t has signature void(T) or void(const T&)\n        template <typename combine_func_t>\n        void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); }\n\n    };\n} // namespace tbb\n#endif /* __TBB_combinable_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/compat/condition_variable",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_condition_variable_H\n#define __TBB_condition_variable_H\n\n#if _WIN32||_WIN64\n#include \"../machine/windows_api.h\"\n\nnamespace tbb { \nnamespace interface5 {\nnamespace internal { \nstruct condition_variable_using_event\n{\n    //! Event for blocking waiting threads.\n    HANDLE event;\n    //! Protects invariants involving n_waiters, release_count, and epoch.\n    CRITICAL_SECTION mutex;\n    //! Number of threads waiting on this condition variable\n    int n_waiters;\n    //! Number of threads remaining that should no longer wait on this condition variable.\n    int release_count;\n    //! To keep threads from waking up prematurely with earlier signals.\n    unsigned epoch;\n};\n}}} // namespace tbb::interface5::internal\n\n#ifndef CONDITION_VARIABLE_INIT\ntypedef void* CONDITION_VARIABLE;\ntypedef CONDITION_VARIABLE* PCONDITION_VARIABLE;\n#endif\n\n#else /* if not _WIN32||_WIN64 */\n#include <errno.h> // some systems need it for ETIMEDOUT\n#include <pthread.h>\n#if __linux__\n#include <ctime>\n#else /* generic Unix */\n#include <sys/time.h>\n#endif\n#endif /* _WIN32||_WIN64 */\n\n#include \"../tbb_stddef.h\"\n#include \"../mutex.h\"\n#include \"../tbb_thread.h\"\n#include \"../tbb_exception.h\"\n#include \"../tbb_profiling.h\"\n\nnamespace tbb {\n\nnamespace interface5 {\n\n// C++0x standard working draft 30.4.3\n// Lock tag types\nstruct defer_lock_t { }; //! do not acquire ownership of the mutex\nstruct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking\nstruct adopt_lock_t { }; //! assume the calling thread has already\nconst defer_lock_t defer_lock = {};\nconst try_to_lock_t try_to_lock = {};\nconst adopt_lock_t adopt_lock = {};\n\n// C++0x standard working draft 30.4.3.1\n//! lock_guard \ntemplate<typename M>\nclass lock_guard : tbb::internal::no_copy {\npublic:\n    //! mutex type\n    typedef M mutex_type;\n\n    //! Constructor\n    /** precondition: If mutex_type is not a recursive mutex, the calling thread\n        does not own the mutex m. */\n    explicit lock_guard(mutex_type& m) : pm(m) {m.lock();}\n    \n    //! Adopt_lock constructor\n    /** precondition: the calling thread owns the mutex m. */\n    lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {}\n\n    //! Destructor\n    ~lock_guard() { pm.unlock(); }\nprivate:\n    mutex_type& pm;\n};\n\n// C++0x standard working draft 30.4.3.2\n//! unique_lock \ntemplate<typename M>\nclass unique_lock : tbb::internal::no_copy {\n    friend class condition_variable;\npublic:\n    typedef M mutex_type;\n\n    // 30.4.3.2.1 construct/copy/destroy\n    // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use.\n    //! Constructor\n    /** postcondition: pm==0 && owns==false */\n    unique_lock() : pm(NULL), owns(false) {}\n\n    //! Constructor\n    /** precondition: if mutex_type is not a recursive mutex, the  calling thread\n        does not own the mutex m.  If the precondition is not met, a deadlock occurs.\n        postcondition: pm==&m and owns==true */\n    explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;}\n\n    //! Defer_lock constructor\n    /** postcondition: pm==&m and owns==false */\n    unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {}\n\n    //! Try_to_lock constructor\n    /** precondition: if mutex_type is not a recursive mutex, the  calling thread\n       does not own the mutex m.  If the precondition is not met, a deadlock occurs.\n       postcondition: pm==&m and owns==res where res is the value returned by\n       the call to m.try_lock(). */\n    unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();}\n\n    //! Adopt_lock constructor\n    /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail.\n        postcondition: pm==&m and owns==true */\n    unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {}\n\n    //! Timed unique_lock acquisition.\n    /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that \n        it uses tbb::tick_count::interval_t to specify the time duration. */\n    unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}\n\n    //! Destructor\n    ~unique_lock() { if( owns ) pm->unlock(); }\n\n    // 30.4.3.2.2 locking\n    //! Lock the mutex and own it.\n    void lock() {\n        if( pm ) {\n            if( !owns ) {\n                pm->lock();\n                owns = true;\n            } else \n                throw_exception_v4( tbb::internal::eid_possible_deadlock );\n        } else \n            throw_exception_v4( tbb::internal::eid_operation_not_permitted );\n        __TBB_ASSERT( owns, NULL );\n    }\n\n    //! Try to lock the mutex. \n    /** If successful, note that this lock owns it. Otherwise, set it false. */\n    bool try_lock() {\n        if( pm ) {\n            if( !owns )\n                owns = pm->try_lock();\n            else\n                throw_exception_v4( tbb::internal::eid_possible_deadlock );\n        } else \n            throw_exception_v4( tbb::internal::eid_operation_not_permitted );\n        return owns;\n    }\n \n    //! Try to lock the mutex. \n    bool try_lock_for( const tick_count::interval_t &i );\n\n    //! Unlock the mutex\n    /** And note that this lock no longer owns it. */\n    void unlock() { \n        if( owns ) {\n            pm->unlock();\n            owns = false;\n        } else\n            throw_exception_v4( tbb::internal::eid_operation_not_permitted );\n        __TBB_ASSERT( !owns, NULL );\n    }\n\n    // 30.4.3.2.3 modifiers\n    //! Swap the two unique locks\n    void swap(unique_lock& u) {\n        mutex_type* t_pm = u.pm;    u.pm   = pm;    pm   = t_pm;\n        bool t_owns      = u.owns;  u.owns = owns;  owns = t_owns;\n    }\n\n    //! Release control over the mutex.\n    mutex_type* release() {\n        mutex_type* o_pm = pm; \n        pm = NULL; \n        owns = false; \n        return o_pm; \n    }\n\n    // 30.4.3.2.4 observers\n    //! Does this lock own the mutex?\n    bool owns_lock() const { return owns; }\n\n    // TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped\n    //! Does this lock own the mutex?\n    /*explicit*/ operator bool() const { return owns; }\n\n    //! Return the mutex that this lock currently has.\n    mutex_type* mutex() const { return pm; }\n\nprivate:\n    mutex_type* pm;\n    bool owns;\n};\n\ntemplate<typename M>\nbool unique_lock<M>::try_lock_for( const tick_count::interval_t &i)\n{ \n    const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */\n    // the smallest wait-time is 0.1 milliseconds.\n    bool res = pm->try_lock();\n    int duration_in_micro; \n    if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) {\n        tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3\n        do {\n            this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds\n            duration_in_micro -= unique_lock_tick;\n            res = pm->try_lock();\n        } while( !res && duration_in_micro>unique_lock_tick );\n    }\n    return (owns=res);\n}\n\n//! Swap the two unique locks that have the mutexes of same type \ntemplate<typename M>\nvoid swap(unique_lock<M>& x, unique_lock<M>& y) { x.swap( y ); }\n\nnamespace internal {\n\n#if _WIN32||_WIN64\nunion condvar_impl_t {\n    condition_variable_using_event cv_event;\n    CONDITION_VARIABLE             cv_native;\n};\nvoid __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv );\nvoid __TBB_EXPORTED_FUNC internal_destroy_condition_variable(    condvar_impl_t& cv );\nvoid __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv );\nvoid __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv );\nbool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL );\n\n#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */\ntypedef pthread_cond_t condvar_impl_t;\n#endif\n\n} // namespace internal\n\n//! cv_status\n/** C++0x standard working draft 30.5 */\nenum cv_status { no_timeout, timeout }; \n\n//! condition variable\n/** C++0x standard working draft 30.5.1 \n    @ingroup synchronization */\nclass condition_variable : tbb::internal::no_copy {\npublic:\n    //! Constructor\n    condition_variable() { \n#if _WIN32||_WIN64\n        internal_initialize_condition_variable( my_cv ); \n#else\n        pthread_cond_init( &my_cv, NULL );\n#endif\n    }\n\n    //! Destructor\n    ~condition_variable() { \n        //precondition: There shall be no thread blocked on *this.\n#if _WIN32||_WIN64\n        internal_destroy_condition_variable( my_cv );\n#else\n        pthread_cond_destroy( &my_cv );\n#endif\n    }\n\n    //! Notify one thread and wake it up\n    void notify_one() { \n#if _WIN32||_WIN64\n        internal_condition_variable_notify_one( my_cv ); \n#else\n        pthread_cond_signal( &my_cv );\n#endif\n    }\n\n    //! Notify all threads \n    void notify_all() { \n#if _WIN32||_WIN64\n        internal_condition_variable_notify_all( my_cv ); \n#else\n        pthread_cond_broadcast( &my_cv );\n#endif\n    }\n\n    //! Release the mutex associated with the lock and wait on this condition variable\n    void wait(unique_lock<mutex>& lock);\n\n    //! Wait on this condition variable while pred is false\n    template <class Predicate>\n    void wait(unique_lock<mutex>& lock, Predicate pred) {\n        while( !pred() )\n            wait( lock );\n    }\n\n    //! Timed version of wait()\n    cv_status wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i );\n\n    //! Timed version of the predicated wait\n    /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */\n    template<typename Predicate>\n    bool wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i, Predicate pred)\n    {\n        while( !pred() ) {\n            cv_status st = wait_for( lock, i );\n            if( st==timeout )\n                return pred();\n        }\n        return true;\n    }\n\n    // C++0x standard working draft. 30.2.3\n    typedef internal::condvar_impl_t* native_handle_type;\n\n    native_handle_type native_handle() { return (native_handle_type) &my_cv; }\n\nprivate:\n    internal::condvar_impl_t my_cv;\n};\n\n\n#if _WIN32||_WIN64\ninline void condition_variable::wait( unique_lock<mutex>& lock )\n{\n    __TBB_ASSERT( lock.owns, NULL );\n    lock.owns = false;\n    if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) {\n        int ec = GetLastError();\n        // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT\n        __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL );\n        lock.owns = true;\n        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );\n    }\n    lock.owns = true;\n}\n\ninline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )\n{\n    cv_status rc = no_timeout;\n    __TBB_ASSERT( lock.owns, NULL );\n    lock.owns = false;\n    // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait()\n    if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) {\n        int ec = GetLastError();\n        if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT )\n            rc = timeout;\n        else {\n            lock.owns = true;\n            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );\n        }\n    }\n    lock.owns = true;\n    return rc;\n}\n\n#else /* !(_WIN32||_WIN64) */\ninline void condition_variable::wait( unique_lock<mutex>& lock )\n{\n    __TBB_ASSERT( lock.owns, NULL );\n    lock.owns = false;\n    if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) {\n        lock.owns = true;\n        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );\n    }\n    // upon successful return, the mutex has been locked and is owned by the calling thread.\n    lock.owns = true;\n}\n\ninline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )\n{\n#if __linux__\n    struct timespec req;\n    double sec = i.seconds();\n    clock_gettime( CLOCK_REALTIME, &req );\n    req.tv_sec  += static_cast<long>(sec);\n    req.tv_nsec += static_cast<long>( (sec - static_cast<long>(sec))*1e9 );\n#else /* generic Unix */\n    struct timeval tv;\n    struct timespec req;\n    double sec = i.seconds();\n    int status = gettimeofday(&tv, NULL);\n    __TBB_ASSERT_EX( status==0, \"gettimeofday failed\" );\n    req.tv_sec  = tv.tv_sec + static_cast<long>(sec);\n    req.tv_nsec = tv.tv_usec*1000 + static_cast<long>( (sec - static_cast<long>(sec))*1e9 );\n#endif /*(choice of OS) */\n    if( req.tv_nsec>=1e9 ) {\n        req.tv_sec  += 1;\n        req.tv_nsec -= static_cast<long int>(1e9);\n    }\n    __TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL );\n\n    int ec;\n    cv_status rc = no_timeout;\n    __TBB_ASSERT( lock.owns, NULL );\n    lock.owns = false;\n    if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) {\n        if( ec==ETIMEDOUT )\n            rc = timeout;\n        else {\n            __TBB_ASSERT( lock.try_lock()==false, NULL );\n            lock.owns = true;\n            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );\n        }\n    }\n    lock.owns = true;\n    return rc;\n}\n#endif /* !(_WIN32||_WIN64) */\n\n} // namespace interface5\n\n__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable)\n\n} // namespace tbb \n\n#if TBB_IMPLEMENT_CPP0X\n\nnamespace std {\n\nusing tbb::interface5::defer_lock_t;\nusing tbb::interface5::try_to_lock_t;\nusing tbb::interface5::adopt_lock_t;\nusing tbb::interface5::defer_lock;\nusing tbb::interface5::try_to_lock;\nusing tbb::interface5::adopt_lock;\nusing tbb::interface5::lock_guard;\nusing tbb::interface5::unique_lock;\nusing tbb::interface5::swap;   /* this is for void std::swap(unique_lock<M>&,unique_lock<M>&) */\nusing tbb::interface5::condition_variable;\nusing tbb::interface5::cv_status;\nusing tbb::interface5::timeout;\nusing tbb::interface5::no_timeout;\n\n} // namespace std \n\n#endif /* TBB_IMPLEMENT_CPP0X */\n\n#endif /* __TBB_condition_variable_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/compat/ppl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_compat_ppl_H\n#define __TBB_compat_ppl_H\n\n#include \"../task_group.h\"\n#include \"../parallel_invoke.h\"\n#include \"../parallel_for_each.h\"\n#include \"../parallel_for.h\"\n#include \"../tbb_exception.h\"\n#include \"../critical_section.h\"\n#include \"../reader_writer_lock.h\"\n#include \"../combinable.h\"\n\nnamespace Concurrency {\n\n#if __TBB_TASK_GROUP_CONTEXT\n    using tbb::task_handle;\n    using tbb::task_group_status;\n    using tbb::task_group;\n    using tbb::structured_task_group;\n    using tbb::invalid_multiple_scheduling;\n    using tbb::missing_wait;\n    using tbb::make_task;\n\n    using tbb::not_complete;\n    using tbb::complete;\n    using tbb::canceled;\n\n    using tbb::is_current_task_group_canceling;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    using tbb::parallel_invoke;\n    using tbb::strict_ppl::parallel_for;\n    using tbb::parallel_for_each;\n    using tbb::critical_section;\n    using tbb::reader_writer_lock;\n    using tbb::combinable;\n\n    using tbb::improper_lock;\n\n} // namespace Concurrency\n\n#endif /* __TBB_compat_ppl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/compat/thread",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_thread_H\n#define __TBB_thread_H\n\n#include \"../tbb_thread.h\"\n\n#if TBB_IMPLEMENT_CPP0X\n\nnamespace std {\n\ntypedef tbb::tbb_thread thread;\n\nnamespace this_thread {\n    using tbb::this_tbb_thread::get_id;\n    using tbb::this_tbb_thread::yield;\n\n    inline void sleep_for(const tbb::tick_count::interval_t& rel_time) {\n        tbb::internal::thread_sleep_v3( rel_time );\n    }\n\n}\n\n}\n\n#endif /* TBB_IMPLEMENT_CPP0X */\n\n#endif /* __TBB_thread_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/compat/tuple",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tuple_H\n#define __TBB_tuple_H\n\n#include <utility>\n#include \"../tbb_stddef.h\"\n\n// build preprocessor variables for varying number of arguments\n// Need the leading comma so the empty __TBB_T_PACK will not cause a syntax error.\n#if __TBB_VARIADIC_MAX <= 5\n#define __TBB_T_PACK\n#define __TBB_U_PACK\n#define __TBB_TYPENAME_T_PACK\n#define __TBB_TYPENAME_U_PACK\n#define __TBB_NULL_TYPE_PACK\n#define __TBB_REF_T_PARAM_PACK\n#define __TBB_CONST_REF_T_PARAM_PACK\n#define __TBB_T_PARAM_LIST_PACK\n#define __TBB_CONST_NULL_REF_PACK\n//\n#elif __TBB_VARIADIC_MAX == 6\n#define __TBB_T_PACK ,__T5\n#define __TBB_U_PACK ,__U5\n#define __TBB_TYPENAME_T_PACK , typename __T5\n#define __TBB_TYPENAME_U_PACK , typename __U5\n#define __TBB_NULL_TYPE_PACK , null_type\n#define __TBB_REF_T_PARAM_PACK ,__T5& t5\n#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5\n#define __TBB_T_PARAM_LIST_PACK ,t5\n#define __TBB_CONST_NULL_REF_PACK , const null_type&\n//\n#elif __TBB_VARIADIC_MAX == 7\n#define __TBB_T_PACK ,__T5, __T6\n#define __TBB_U_PACK ,__U5, __U6\n#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6\n#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6\n#define __TBB_NULL_TYPE_PACK , null_type, null_type\n#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6\n#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5, const __T6& t6\n#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6\n#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&\n//\n#elif __TBB_VARIADIC_MAX == 8\n#define __TBB_T_PACK ,__T5, __T6, __T7\n#define __TBB_U_PACK ,__U5, __U6, __U7\n#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6, typename __T7\n#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6, typename __U7\n#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type\n#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7\n#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7\n#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7\n#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&\n//\n#elif __TBB_VARIADIC_MAX == 9\n#define __TBB_T_PACK ,__T5, __T6, __T7, __T8\n#define __TBB_U_PACK ,__U5, __U6, __U7, __U8\n#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8\n#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8\n#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type\n#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8\n#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8\n#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8\n#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type&\n//\n#elif __TBB_VARIADIC_MAX >= 10\n#define __TBB_T_PACK ,__T5, __T6, __T7, __T8, __T9\n#define __TBB_U_PACK ,__U5, __U6, __U7, __U8, __U9\n#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8, typename __T9\n#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8, typename __U9\n#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type, null_type\n#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8, __T9& t9\n#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8, const __T9& t9\n#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 ,t9\n#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type&, const null_type&\n#endif\n\n\n\nnamespace tbb {\nnamespace interface5 {\n\nnamespace internal {\nstruct null_type { };\n}\nusing internal::null_type;\n\n// tuple forward declaration\ntemplate <typename __T0=null_type, typename __T1=null_type, typename __T2=null_type,\n          typename __T3=null_type, typename __T4=null_type\n#if __TBB_VARIADIC_MAX >= 6\n, typename __T5=null_type\n#if __TBB_VARIADIC_MAX >= 7\n, typename __T6=null_type\n#if __TBB_VARIADIC_MAX >= 8\n, typename __T7=null_type\n#if __TBB_VARIADIC_MAX >= 9\n, typename __T8=null_type\n#if __TBB_VARIADIC_MAX >= 10\n, typename __T9=null_type\n#endif\n#endif\n#endif\n#endif\n#endif\n>\nclass tuple;\n\nnamespace internal {\n\n// const null_type temp\ninline const null_type cnull() { return null_type(); }\n\n// cons forward declaration\ntemplate <typename __HT, typename __TT> struct cons;\n\n// type of a component of the cons\ntemplate<int __N, typename __T>\nstruct component {\n    typedef typename __T::tail_type next;\n    typedef typename component<__N-1,next>::type type;\n};\n\ntemplate<typename __T>\nstruct component<0,__T> {\n    typedef typename __T::head_type type;\n};\n\ntemplate<>\nstruct component<0,null_type> {\n    typedef null_type type;\n};\n\n// const version of component\n\ntemplate<int __N, typename __T>\nstruct component<__N, const __T>\n{\n    typedef typename __T::tail_type next;\n    typedef const typename component<__N-1,next>::type type;\n};\n\ntemplate<typename __T>\nstruct component<0, const __T>\n{\n    typedef const typename __T::head_type type;\n};\n\n\n// helper class for getting components of cons\ntemplate< int __N>\nstruct get_helper {\ntemplate<typename __HT, typename __TT>\ninline static typename component<__N, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) {\n    return get_helper<__N-1>::get(ti.tail);\n}\ntemplate<typename __HT, typename __TT>\ninline static typename component<__N, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) {\n    return get_helper<__N-1>::get(ti.tail);\n}\n};\n\ntemplate<>\nstruct get_helper<0> {\ntemplate<typename __HT, typename __TT>\ninline static typename component<0, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) {\n    return ti.head;\n}\ntemplate<typename __HT, typename __TT>\ninline static typename component<0, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) {\n    return ti.head;\n}\n};\n\n// traits adaptor\ntemplate <typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK>\nstruct tuple_traits {\n    typedef cons <__T0, typename tuple_traits<__T1, __T2, __T3, __T4 __TBB_T_PACK , null_type>::U > U;\n};\n\ntemplate <typename __T0>\nstruct tuple_traits<__T0, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > {\n    typedef cons<__T0, null_type> U;\n};\n\ntemplate<>\nstruct tuple_traits<null_type, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > {\n    typedef null_type U;\n};\n\n\n// core cons defs\ntemplate <typename __HT, typename __TT>\nstruct cons{\n\n    typedef __HT head_type;\n    typedef __TT tail_type;\n\n    head_type head; \n    tail_type tail;\n\n    static const int length = 1 + tail_type::length;\n\n    // default constructors\n    explicit cons() : head(), tail() { }\n\n    // non-default constructors\n    cons(head_type& h, const tail_type& t) : head(h), tail(t) { }\n\n    template <typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK >\n    cons(const __T0& t0, const __T1& t1, const __T2& t2, const __T3& t3, const __T4& t4 __TBB_CONST_REF_T_PARAM_PACK) :\n        head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK, cnull()) { }\n\n    template <typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK >\n    cons(__T0& t0, __T1& t1, __T2& t2, __T3& t3, __T4& t4 __TBB_REF_T_PARAM_PACK) :\n        head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK , cnull()) { }\n\n    template <typename __HT1, typename __TT1>\n    cons(const cons<__HT1,__TT1>& other) : head(other.head), tail(other.tail) { }\n\n    cons& operator=(const cons& other) { head = other.head; tail = other.tail; return *this; }\n\n    friend bool operator==(const cons& me, const cons& other) {\n        return me.head == other.head && me.tail == other.tail;\n    }\n    friend bool operator<(const cons& me, const cons& other)  {\n        return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail);\n    }\n    friend bool operator>(const cons& me, const cons& other)  { return other<me; }\n    friend bool operator!=(const cons& me, const cons& other) { return !(me==other); }\n    friend bool operator>=(const cons& me, const cons& other) { return !(me<other); }\n    friend bool operator<=(const cons& me, const cons& other) { return !(me>other); }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator==(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) {\n        return me.head == other.head && me.tail == other.tail;\n    }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator<(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) {\n        return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail);\n    }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator>(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return other<me; }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator!=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me==other); }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator>=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me<other); }\n\n    template<typename __HT1, typename __TT1>\n    friend bool operator<=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me>other); }\n\n\n};  // cons\n\n\ntemplate <typename __HT>\nstruct cons<__HT,null_type> { \n\n    typedef __HT head_type;\n    typedef null_type tail_type;\n\n    head_type head; \n\n    static const int length = 1;\n\n    // default constructor\n    cons() : head() { /*std::cout << \"default constructor 1\\n\";*/ }\n\n    cons(const null_type&, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head() { /*std::cout << \"default constructor 2\\n\";*/ }\n\n    // non-default constructor\n    template<typename __T1>\n    cons(__T1& t1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t1) { /*std::cout << \"non-default a1, t1== \" << t1 << \"\\n\";*/}\n\n    cons(head_type& h, const null_type& = null_type() ) : head(h) { }\n    cons(const head_type& t0, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t0) { }\n\n    // converting constructor\n    template<typename __HT1>\n    cons(__HT1 h1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(h1) { }\n\n    // copy constructor\n    template<typename __HT1>\n    cons( const cons<__HT1, null_type>& other) : head(other.head) { }\n\n    // assignment operator\n    cons& operator=(const cons& other) { head = other.head; return *this; }\n\n    friend bool operator==(const cons& me, const cons& other) { return me.head == other.head; }\n    friend bool operator<(const cons& me, const cons& other) { return me.head < other.head; }\n    friend bool operator>(const cons& me, const cons& other) { return other<me; }\n    friend bool operator!=(const cons& me, const cons& other) {return !(me==other); }\n    friend bool operator<=(const cons& me, const cons& other) {return !(me>other); }\n    friend bool operator>=(const cons& me, const cons& other) {return !(me<other); }\n\n    template<typename __HT1>\n    friend bool operator==(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) {\n        return me.head == other.head;\n    }\n\n    template<typename __HT1>\n    friend bool operator<(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) {\n        return me.head < other.head;\n    }\n\n    template<typename __HT1>\n    friend bool operator>(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return other<me; }\n\n    template<typename __HT1>\n    friend bool operator!=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me==other); }\n\n    template<typename __HT1>\n    friend bool operator<=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me>other); }\n\n    template<typename __HT1>\n    friend bool operator>=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me<other); }\n\n};  // cons\n\ntemplate <>\nstruct cons<null_type,null_type> { typedef null_type tail_type; static const int length = 0; };\n\n// wrapper for default constructor\ntemplate<typename __T>\ninline const __T wrap_dcons(__T*) { return __T(); }\n\n} // namespace internal\n\n// tuple definition\ntemplate<typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK >\nclass tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U {\n    // friends\n    template <typename __T> friend class tuple_size;\n    template<int __N, typename __T> friend struct tuple_element;\n\n    // stl components\n    typedef tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > value_type;\n    typedef value_type *pointer;\n    typedef const value_type *const_pointer;\n    typedef value_type &reference;\n    typedef const value_type &const_reference;\n    typedef size_t size_type;\n\n    typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons;\n\npublic:\n    tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL)\n          ,const __T1& t1=internal::wrap_dcons((__T1*)NULL)\n          ,const __T2& t2=internal::wrap_dcons((__T2*)NULL)\n          ,const __T3& t3=internal::wrap_dcons((__T3*)NULL)\n          ,const __T4& t4=internal::wrap_dcons((__T4*)NULL)\n#if __TBB_VARIADIC_MAX >= 6\n          ,const __T5& t5=internal::wrap_dcons((__T5*)NULL)\n#if __TBB_VARIADIC_MAX >= 7\n          ,const __T6& t6=internal::wrap_dcons((__T6*)NULL)\n#if __TBB_VARIADIC_MAX >= 8\n          ,const __T7& t7=internal::wrap_dcons((__T7*)NULL)\n#if __TBB_VARIADIC_MAX >= 9\n          ,const __T8& t8=internal::wrap_dcons((__T8*)NULL)\n#if __TBB_VARIADIC_MAX >= 10\n          ,const __T9& t9=internal::wrap_dcons((__T9*)NULL)\n#endif\n#endif\n#endif\n#endif\n#endif\n          ) :\n        my_cons(t0,t1,t2,t3,t4 __TBB_T_PARAM_LIST_PACK) { }\n\n    template<int __N>\n    struct internal_tuple_element {\n        typedef typename internal::component<__N,my_cons>::type type;\n    };\n\n    template<int __N>\n    typename internal_tuple_element<__N>::type& get() { return internal::get_helper<__N>::get(*this); }\n\n    template<int __N>\n    typename internal_tuple_element<__N>::type const& get() const { return internal::get_helper<__N>::get(*this); }\n\n    template<typename __U1, typename __U2>\n    tuple& operator=(const internal::cons<__U1,__U2>& other) {\n        my_cons::operator=(other);\n        return *this;\n    }\n\n    template<typename __U1, typename __U2>\n    tuple& operator=(const std::pair<__U1,__U2>& other) {\n        // __TBB_ASSERT(tuple_size<value_type>::value == 2, \"Invalid size for pair to tuple assignment\");\n        this->head = other.first;\n        this->tail.head = other.second;\n        return *this;\n    }\n\n    friend bool operator==(const tuple& me, const tuple& other) {return static_cast<const my_cons &>(me)==(other);}\n    friend bool operator<(const tuple& me,  const tuple& other) {return static_cast<const my_cons &>(me)<(other);}\n    friend bool operator>(const tuple& me,  const tuple& other) {return static_cast<const my_cons &>(me)>(other);}\n    friend bool operator!=(const tuple& me, const tuple& other) {return static_cast<const my_cons &>(me)!=(other);}\n    friend bool operator>=(const tuple& me, const tuple& other) {return static_cast<const my_cons &>(me)>=(other);}\n    friend bool operator<=(const tuple& me, const tuple& other) {return static_cast<const my_cons &>(me)<=(other);}\n\n};  // tuple\n\n// empty tuple\ntemplate<>\nclass tuple<null_type, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > : public null_type {\n};\n\n// helper classes\n\ntemplate < typename __T>\nclass tuple_size {\npublic:\n    static const size_t value = 1 + tuple_size<typename __T::tail_type>::value;\n};\n\ntemplate <>\nclass tuple_size<tuple<> > { \npublic:\n    static const size_t value = 0;\n};\n\ntemplate <>\nclass tuple_size<null_type> {\npublic:\n    static const size_t value = 0;\n};\n\ntemplate<int __N, typename __T>\nstruct tuple_element {\n    typedef typename internal::component<__N, typename __T::my_cons>::type type;\n};\n\ntemplate<int __N, typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK >\ninline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type&\n    get(tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); }\n\ntemplate<int __N, typename __T0, typename __T1, typename __T2, typename __T3, typename __T4 __TBB_TYPENAME_T_PACK >\ninline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type const&\n    get(const tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); }\n\n}  // interface5\n} // tbb\n\n#if !__TBB_CPP11_TUPLE_PRESENT\nnamespace tbb {\n    namespace flow {\n        using tbb::interface5::tuple;\n        using tbb::interface5::tuple_size;\n        using tbb::interface5::tuple_element;\n        using tbb::interface5::get;\n    }\n}\n#endif\n\n#undef __TBB_T_PACK\n#undef __TBB_U_PACK\n#undef __TBB_TYPENAME_T_PACK\n#undef __TBB_TYPENAME_U_PACK\n#undef __TBB_NULL_TYPE_PACK\n#undef __TBB_REF_T_PARAM_PACK\n#undef __TBB_CONST_REF_T_PARAM_PACK\n#undef __TBB_T_PARAM_LIST_PACK\n#undef __TBB_CONST_NULL_REF_PACK\n \n#endif /* __TBB_tuple_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_hash_map.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/concurrent_hash_map.h\"\n\nnamespace tbb {\n\nnamespace internal {\n#if !TBB_NO_LEGACY\nstruct hash_map_segment_base {\n    typedef spin_rw_mutex segment_mutex_t;\n    //! Type of a hash code.\n    typedef size_t hashcode_t;\n    //! Log2 of n_segment\n    static const size_t n_segment_bits = 6;\n    //! Maximum size of array of chains\n    static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits);\n    //! Mutex that protects this segment\n    segment_mutex_t my_mutex;\n    // Number of nodes\n    atomic<size_t> my_logical_size;\n    // Size of chains\n    /** Always zero or a power of two */\n    size_t my_physical_size;\n    //! True if my_logical_size>=my_physical_size.\n    /** Used to support Intel(R) Thread Checker. */\n    bool __TBB_EXPORTED_METHOD internal_grow_predicate() const;\n};\n\nbool hash_map_segment_base::internal_grow_predicate() const {\n    // Intel(R) Thread Checker considers the following reads to be races, so we hide them in the \n    // library so that Intel(R) Thread Checker will ignore them.  The reads are used in a double-check\n    // context, so the program is nonetheless correct despite the race.\n    return my_logical_size >= my_physical_size && my_physical_size < max_physical_size;\n}\n#endif//!TBB_NO_LEGACY\n\n} // namespace internal\n\n} // namespace tbb\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_hash_map.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_hash_map_H\n#define __TBB_concurrent_hash_map_H\n\n#include \"tbb_stddef.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <iterator>\n#include <utility>      // Need std::pair\n#include <cstring>      // Need std::memset\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#include \"cache_aligned_allocator.h\"\n#include \"tbb_allocator.h\"\n#include \"spin_rw_mutex.h\"\n#include \"atomic.h\"\n#include \"tbb_exception.h\"\n#include \"tbb_profiling.h\"\n#include \"internal/_concurrent_unordered_impl.h\" // Need tbb_hasher\n#if __TBB_INITIALIZER_LISTS_PRESENT\n#include <initializer_list>\n#endif\n#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n#include <typeinfo>\n#endif\n#if __TBB_STATISTICS\n#include <stdio.h>\n#endif\n\nnamespace tbb {\n\n//! hash_compare that is default argument for concurrent_hash_map\ntemplate<typename Key>\nstruct tbb_hash_compare {\n    static size_t hash( const Key& a ) { return tbb_hasher(a); }\n    static bool equal( const Key& a, const Key& b ) { return a == b; }\n};\n\nnamespace interface5 {\n\n    template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >\n    class concurrent_hash_map;\n\n    //! @cond INTERNAL\n    namespace internal {\n    using namespace tbb::internal;\n\n\n    //! Type of a hash code.\n    typedef size_t hashcode_t;\n    //! Node base type\n    struct hash_map_node_base : tbb::internal::no_copy {\n        //! Mutex type\n        typedef spin_rw_mutex mutex_t;\n        //! Scoped lock type for mutex\n        typedef mutex_t::scoped_lock scoped_t;\n        //! Next node in chain\n        hash_map_node_base *next;\n        mutex_t mutex;\n    };\n    //! Incompleteness flag value\n    static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));\n    //! Rehashed empty bucket flag\n    static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));\n    //! base class of concurrent_hash_map\n    class hash_map_base {\n    public:\n        //! Size type\n        typedef size_t size_type;\n        //! Type of a hash code.\n        typedef size_t hashcode_t;\n        //! Segment index type\n        typedef size_t segment_index_t;\n        //! Node base type\n        typedef hash_map_node_base node_base;\n        //! Bucket type\n        struct bucket : tbb::internal::no_copy {\n            //! Mutex type for buckets\n            typedef spin_rw_mutex mutex_t;\n            //! Scoped lock type for mutex\n            typedef mutex_t::scoped_lock scoped_t;\n            mutex_t mutex;\n            node_base *node_list;\n        };\n        //! Count of segments in the first block\n        static size_type const embedded_block = 1;\n        //! Count of segments in the first block\n        static size_type const embedded_buckets = 1<<embedded_block;\n        //! Count of segments in the first block\n        static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096\n        //! Size of a pointer / table size\n        static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit\n        //! Segment pointer\n        typedef bucket *segment_ptr_t;\n        //! Segment pointers table type\n        typedef segment_ptr_t segments_table_t[pointers_per_table];\n        //! Hash mask = sum of allocated segment sizes - 1\n        atomic<hashcode_t> my_mask;\n        //! Segment pointers table. Also prevents false sharing between my_mask and my_size\n        segments_table_t my_table;\n        //! Size of container in stored items\n        atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects\n        //! Zero segment\n        bucket my_embedded_segment[embedded_buckets];\n#if __TBB_STATISTICS\n        atomic<unsigned> my_info_resizes; // concurrent ones\n        mutable atomic<unsigned> my_info_restarts; // race collisions\n        atomic<unsigned> my_info_rehashes;  // invocations of rehash_bucket\n#endif\n        //! Constructor\n        hash_map_base() {\n            std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128   or 64*8=512\n                + sizeof(my_size) + sizeof(my_mask)  // 4+4 or 8+8\n                + embedded_buckets*sizeof(bucket) ); // n*8 or n*16\n            for( size_type i = 0; i < embedded_block; i++ ) // fill the table\n                my_table[i] = my_embedded_segment + segment_base(i);\n            my_mask = embedded_buckets - 1;\n            __TBB_ASSERT( embedded_block <= first_block, \"The first block number must include embedded blocks\");\n#if __TBB_STATISTICS\n            my_info_resizes = 0; // concurrent ones\n            my_info_restarts = 0; // race collisions\n            my_info_rehashes = 0;  // invocations of rehash_bucket\n#endif\n        }\n\n        //! @return segment index of given index in the array\n        static segment_index_t segment_index_of( size_type index ) {\n            return segment_index_t( __TBB_Log2( index|1 ) );\n        }\n\n        //! @return the first array index of given segment\n        static segment_index_t segment_base( segment_index_t k ) {\n            return (segment_index_t(1)<<k & ~segment_index_t(1));\n        }\n\n        //! @return segment size except for @arg k == 0\n        static size_type segment_size( segment_index_t k ) {\n            return size_type(1)<<k; // fake value for k==0\n        }\n\n        //! @return true if @arg ptr is valid pointer\n        static bool is_valid( void *ptr ) {\n            return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);\n        }\n\n        //! Initialize buckets\n        static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {\n            if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );\n            else for(size_type i = 0; i < sz; i++, ptr++) {\n                *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;\n                ptr->node_list = rehash_req;\n            }\n        }\n\n        //! Add node @arg n to bucket @arg b\n        static void add_to_bucket( bucket *b, node_base *n ) {\n            __TBB_ASSERT(b->node_list != rehash_req, NULL);\n            n->next = b->node_list;\n            b->node_list = n; // its under lock and flag is set\n        }\n\n        //! Exception safety helper\n        struct enable_segment_failsafe : tbb::internal::no_copy {\n            segment_ptr_t *my_segment_ptr;\n            enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}\n            ~enable_segment_failsafe() {\n                if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress\n            }\n        };\n\n        //! Enable segment\n        void enable_segment( segment_index_t k, bool is_initial = false ) {\n            __TBB_ASSERT( k, \"Zero segment must be embedded\" );\n            enable_segment_failsafe watchdog( my_table, k );\n            cache_aligned_allocator<bucket> alloc;\n            size_type sz;\n            __TBB_ASSERT( !is_valid(my_table[k]), \"Wrong concurrent assignment\");\n            if( k >= first_block ) {\n                sz = segment_size( k );\n                segment_ptr_t ptr = alloc.allocate( sz );\n                init_buckets( ptr, sz, is_initial );\n                itt_hide_store_word( my_table[k], ptr );\n                sz <<= 1;// double it to get entire capacity of the container\n            } else { // the first block\n                __TBB_ASSERT( k == embedded_block, \"Wrong segment index\" );\n                sz = segment_size( first_block );\n                segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );\n                init_buckets( ptr, sz - embedded_buckets, is_initial );\n                ptr -= segment_base(embedded_block);\n                for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets\n                    itt_hide_store_word( my_table[i], ptr + segment_base(i) );\n            }\n            itt_store_word_with_release( my_mask, sz-1 );\n            watchdog.my_segment_ptr = 0;\n        }\n\n        //! Get bucket by (masked) hashcode\n        bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?\n            segment_index_t s = segment_index_of( h );\n            h -= segment_base(s);\n            segment_ptr_t seg = my_table[s];\n            __TBB_ASSERT( is_valid(seg), \"hashcode must be cut by valid mask for allocated segments\" );\n            return &seg[h];\n        }\n\n        // internal serial rehashing helper\n        void mark_rehashed_levels( hashcode_t h ) throw () {\n            segment_index_t s = segment_index_of( h );\n            while( segment_ptr_t seg = my_table[++s] )\n                if( seg[h].node_list == rehash_req ) {\n                    seg[h].node_list = empty_rehashed;\n                    mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)\n                }\n        }\n\n        //! Check for mask race\n        // Splitting into two functions should help inlining\n        inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {\n            hashcode_t m_now, m_old = m;\n            m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );\n            if( m_old != m_now )\n                return check_rehashing_collision( h, m_old, m = m_now );\n            return false;\n        }\n\n        //! Process mask race, check for rehashing collision\n        bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {\n            __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m\n            if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event\n                // condition above proves that 'h' has some other bits set beside 'm_old'\n                // find next applicable mask after m_old    //TODO: look at bsl instruction\n                for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size\n                    ;\n                m_old = (m_old<<1) - 1; // get full mask from a bit\n                __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);\n                // check whether it is rehashing/ed\n                if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )\n                {\n#if __TBB_STATISTICS\n                    my_info_restarts++; // race collisions\n#endif\n                    return true;\n                }\n            }\n            return false;\n        }\n\n        //! Insert a node and check for load factor. @return segment index to enable.\n        segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {\n            size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted\n            add_to_bucket( b, n );\n            // check load factor\n            if( sz >= mask ) { // TODO: add custom load_factor\n                segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of\n                __TBB_ASSERT( is_valid(my_table[new_seg-1]), \"new allocations must not publish new mask until segment has allocated\");\n                static const segment_ptr_t is_allocating = (segment_ptr_t)2;\n                if( !itt_hide_load_word(my_table[new_seg])\n                  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )\n                    return new_seg; // The value must be processed\n            }\n            return 0;\n        }\n\n        //! Prepare enough segments for number of buckets\n        void reserve(size_type buckets) {\n            if( !buckets-- ) return;\n            bool is_initial = !my_size;\n            for( size_type m = my_mask; buckets > m; m = my_mask )\n                enable_segment( segment_index_of( m+1 ), is_initial );\n        }\n        //! Swap hash_map_bases\n        void internal_swap(hash_map_base &table) {\n            using std::swap;\n            swap(this->my_mask, table.my_mask);\n            swap(this->my_size, table.my_size);\n            for(size_type i = 0; i < embedded_buckets; i++)\n                swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);\n            for(size_type i = embedded_block; i < pointers_per_table; i++)\n                swap(this->my_table[i], table.my_table[i]);\n        }\n    };\n\n    template<typename Iterator>\n    class hash_map_range;\n\n    //! Meets requirements of a forward iterator for STL */\n    /** Value is either the T or const T type of the container.\n        @ingroup containers */\n    template<typename Container, typename Value>\n    class hash_map_iterator\n        : public std::iterator<std::forward_iterator_tag,Value>\n    {\n        typedef Container map_type;\n        typedef typename Container::node node;\n        typedef hash_map_base::node_base node_base;\n        typedef hash_map_base::bucket bucket;\n\n        template<typename C, typename T, typename U>\n        friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );\n\n        template<typename C, typename T, typename U>\n        friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );\n\n        template<typename C, typename T, typename U>\n        friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );\n\n        template<typename C, typename U>\n        friend class hash_map_iterator;\n\n        template<typename I>\n        friend class hash_map_range;\n\n        void advance_to_next_bucket() { // TODO?: refactor to iterator_base class\n            size_t k = my_index+1;\n            while( my_bucket && k <= my_map->my_mask ) {\n                // Following test uses 2's-complement wizardry\n                if( k& (k-2) ) // not the beginning of a segment\n                    ++my_bucket;\n                else my_bucket = my_map->get_bucket( k );\n                my_node = static_cast<node*>( my_bucket->node_list );\n                if( hash_map_base::is_valid(my_node) ) {\n                    my_index = k; return;\n                }\n                ++k;\n            }\n            my_bucket = 0; my_node = 0; my_index = k; // the end\n        }\n#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)\n        template<typename Key, typename T, typename HashCompare, typename A>\n        friend class interface5::concurrent_hash_map;\n#else\n    public: // workaround\n#endif\n        //! concurrent_hash_map over which we are iterating.\n        const Container *my_map;\n\n        //! Index in hash table for current item\n        size_t my_index;\n\n        //! Pointer to bucket\n        const bucket *my_bucket;\n\n        //! Pointer to node that has current item\n        node *my_node;\n\n        hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );\n\n    public:\n        //! Construct undefined iterator\n        hash_map_iterator() {}\n        hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :\n            my_map(other.my_map),\n            my_index(other.my_index),\n            my_bucket(other.my_bucket),\n            my_node(other.my_node)\n        {}\n        Value& operator*() const {\n            __TBB_ASSERT( hash_map_base::is_valid(my_node), \"iterator uninitialized or at end of container?\" );\n            return my_node->item;\n        }\n        Value* operator->() const {return &operator*();}\n        hash_map_iterator& operator++();\n\n        //! Post increment\n        hash_map_iterator operator++(int) {\n            hash_map_iterator old(*this);\n            operator++();\n            return old;\n        }\n    };\n\n    template<typename Container, typename Value>\n    hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :\n        my_map(&map),\n        my_index(index),\n        my_bucket(b),\n        my_node( static_cast<node*>(n) )\n    {\n        if( b && !hash_map_base::is_valid(n) )\n            advance_to_next_bucket();\n    }\n\n    template<typename Container, typename Value>\n    hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {\n        my_node = static_cast<node*>( my_node->next );\n        if( !my_node ) advance_to_next_bucket();\n        return *this;\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {\n        return i.my_node == j.my_node && i.my_map == j.my_map;\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {\n        return i.my_node != j.my_node || i.my_map != j.my_map;\n    }\n\n    //! Range class used with concurrent_hash_map\n    /** @ingroup containers */\n    template<typename Iterator>\n    class hash_map_range {\n        typedef typename Iterator::map_type map_type;\n        Iterator my_begin;\n        Iterator my_end;\n        mutable Iterator my_midpoint;\n        size_t my_grainsize;\n        //! Set my_midpoint to point approximately half way between my_begin and my_end.\n        void set_midpoint() const;\n        template<typename U> friend class hash_map_range;\n    public:\n        //! Type for size of a range\n        typedef std::size_t size_type;\n        typedef typename Iterator::value_type value_type;\n        typedef typename Iterator::reference reference;\n        typedef typename Iterator::difference_type difference_type;\n        typedef Iterator iterator;\n\n        //! True if range is empty.\n        bool empty() const {return my_begin==my_end;}\n\n        //! True if range can be partitioned into two subranges.\n        bool is_divisible() const {\n            return my_midpoint!=my_end;\n        }\n        //! Split range.\n        hash_map_range( hash_map_range& r, split ) :\n            my_end(r.my_end),\n            my_grainsize(r.my_grainsize)\n        {\n            r.my_end = my_begin = r.my_midpoint;\n            __TBB_ASSERT( !empty(), \"Splitting despite the range is not divisible\" );\n            __TBB_ASSERT( !r.empty(), \"Splitting despite the range is not divisible\" );\n            set_midpoint();\n            r.set_midpoint();\n        }\n        //! type conversion\n        template<typename U>\n        hash_map_range( hash_map_range<U>& r) :\n            my_begin(r.my_begin),\n            my_end(r.my_end),\n            my_midpoint(r.my_midpoint),\n            my_grainsize(r.my_grainsize)\n        {}\n        //! Init range with container and grainsize specified\n        hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :\n            my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),\n            my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),\n            my_grainsize( grainsize_ )\n        {\n            __TBB_ASSERT( grainsize_>0, \"grainsize must be positive\" );\n            set_midpoint();\n        }\n        const Iterator& begin() const {return my_begin;}\n        const Iterator& end() const {return my_end;}\n        //! The grain size for this range.\n        size_type grainsize() const {return my_grainsize;}\n    };\n\n    template<typename Iterator>\n    void hash_map_range<Iterator>::set_midpoint() const {\n        // Split by groups of nodes\n        size_t m = my_end.my_index-my_begin.my_index;\n        if( m > my_grainsize ) {\n            m = my_begin.my_index + m/2u;\n            hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);\n            my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);\n        } else {\n            my_midpoint = my_end;\n        }\n        __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,\n            \"my_begin is after my_midpoint\" );\n        __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,\n            \"my_midpoint is after my_end\" );\n        __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,\n            \"[my_begin, my_midpoint) range should not be empty\" );\n    }\n\n    } // internal\n//! @endcond\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Suppress \"conditional expression is constant\" warning.\n    #pragma warning( push )\n    #pragma warning( disable: 4127 )\n#endif\n\n//! Unordered map from Key to T.\n/** concurrent_hash_map is associative container with concurrent access.\n\n@par Compatibility\n    The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).\n\n@par Exception Safety\n    - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.\n    - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).\n    - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.\n\n@par Changes since TBB 2.1\n    - Replaced internal algorithm and data structure. Patent is pending.\n    - Added buckets number argument for constructor\n\n@par Changes since TBB 2.0\n    - Fixed exception-safety\n    - Added template argument for allocator\n    - Added allocator argument in constructors\n    - Added constructor from a range of iterators\n    - Added several new overloaded insert() methods\n    - Added get_allocator()\n    - Added swap()\n    - Added count()\n    - Added overloaded erase(accessor &) and erase(const_accessor&)\n    - Added equal_range() [const]\n    - Added [const_]pointer, [const_]reference, and allocator_type types\n    - Added global functions: operator==(), operator!=(), and swap()\n\n    @ingroup containers */\ntemplate<typename Key, typename T, typename HashCompare, typename Allocator>\nclass concurrent_hash_map : protected internal::hash_map_base {\n    template<typename Container, typename Value>\n    friend class internal::hash_map_iterator;\n\n    template<typename I>\n    friend class internal::hash_map_range;\n\npublic:\n    typedef Key key_type;\n    typedef T mapped_type;\n    typedef std::pair<const Key,T> value_type;\n    typedef hash_map_base::size_type size_type;\n    typedef ptrdiff_t difference_type;\n    typedef value_type *pointer;\n    typedef const value_type *const_pointer;\n    typedef value_type &reference;\n    typedef const value_type &const_reference;\n    typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;\n    typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;\n    typedef internal::hash_map_range<iterator> range_type;\n    typedef internal::hash_map_range<const_iterator> const_range_type;\n    typedef Allocator allocator_type;\n\nprotected:\n    friend class const_accessor;\n    struct node;\n    typedef typename Allocator::template rebind<node>::other node_allocator_type;\n    node_allocator_type my_allocator;\n    HashCompare my_hash_compare;\n\n    struct node : public node_base {\n        value_type item;\n        node( const Key &key ) : item(key, T()) {}\n        node( const Key &key, const T &t ) : item(key, t) {}\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n        node( value_type&& i ) : item(std::move(i)){}\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n        node( const value_type& i ) : item(i) {}\n\n        // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17\n        void *operator new( size_t /*size*/, node_allocator_type &a ) {\n            void *ptr = a.allocate(1);\n            if(!ptr)\n                tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);\n            return ptr;\n        }\n        // match placement-new form above to be called if exception thrown in constructor\n        void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast<node*>(ptr),1); }\n    };\n\n    void delete_node( node_base *n ) {\n        my_allocator.destroy( static_cast<node*>(n) );\n        my_allocator.deallocate( static_cast<node*>(n), 1);\n    }\n\n    static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){\n        return  new( allocator ) node(key, *t);\n    }\n\n    static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){\n        return  new( allocator ) node(key);\n    }\n\n    static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){\n        __TBB_ASSERT(false,\"this dummy function should not be called\");\n        return NULL;\n    }\n\n    node *search_bucket( const key_type &key, bucket *b ) const {\n        node *n = static_cast<node*>( b->node_list );\n        while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )\n            n = static_cast<node*>( n->next );\n        __TBB_ASSERT(n != internal::rehash_req, \"Search can be executed only for rehashed bucket\");\n        return n;\n    }\n\n    //! bucket accessor is to find, rehash, acquire a lock, and access a bucket\n    class bucket_accessor : public bucket::scoped_t {\n        bucket *my_b;\n    public:\n        bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }\n        //! find a bucket by masked hashcode, optionally rehash, and acquire the lock\n        inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {\n            my_b = base->get_bucket( h );\n            // TODO: actually, notification is unnecessary here, just hiding double-check\n            if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req\n                && try_acquire( my_b->mutex, /*write=*/true ) )\n            {\n                if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing\n            }\n            else bucket::scoped_t::acquire( my_b->mutex, writer );\n            __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);\n        }\n        //! check whether bucket is locked for write\n        bool is_writer() { return bucket::scoped_t::is_writer; }\n        //! get bucket pointer\n        bucket *operator() () { return my_b; }\n    };\n\n    // TODO refactor to hash_base\n    void rehash_bucket( bucket *b_new, const hashcode_t h ) {\n        __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), \"b_new must be locked (for write)\");\n        __TBB_ASSERT( h > 1, \"The lowermost buckets can't be rehashed\" );\n        __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed\n        hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit\n#if __TBB_STATISTICS\n        my_info_rehashes++; // invocations of rehash_bucket\n#endif\n\n        bucket_accessor b_old( this, h & mask );\n\n        mask = (mask<<1) | 1; // get full mask for new bucket\n        __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );\n    restart:\n        for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {\n            hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );\n#if TBB_USE_ASSERT\n            hashcode_t bmask = h & (mask>>1);\n            bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket\n            __TBB_ASSERT( (c & bmask) == (h & bmask), \"hash() function changed for key in table\" );\n#endif\n            if( (c & mask) == h ) {\n                if( !b_old.is_writer() )\n                    if( !b_old.upgrade_to_writer() ) {\n                        goto restart; // node ptr can be invalid due to concurrent erase\n                    }\n                *p = n->next; // exclude from b_old\n                add_to_bucket( b_new, n );\n            } else p = &n->next; // iterate to next item\n        }\n    }\n\n    struct call_clear_on_leave {\n        concurrent_hash_map* my_ch_map;\n        call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}\n        void dismiss() {my_ch_map = 0;}\n        ~call_clear_on_leave(){\n            if (my_ch_map){\n                my_ch_map->clear();\n            }\n        }\n    };\npublic:\n\n    class accessor;\n    //! Combines data access, locking, and garbage collection.\n    class const_accessor : private node::scoped_t /*which derived from no_copy*/ {\n        friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;\n        friend class accessor;\n    public:\n        //! Type of value\n        typedef const typename concurrent_hash_map::value_type value_type;\n\n        //! True if result is empty.\n        bool empty() const { return !my_node; }\n\n        //! Set to null\n        void release() {\n            if( my_node ) {\n                node::scoped_t::release();\n                my_node = 0;\n            }\n        }\n\n        //! Return reference to associated value in hash table.\n        const_reference operator*() const {\n            __TBB_ASSERT( my_node, \"attempt to dereference empty accessor\" );\n            return my_node->item;\n        }\n\n        //! Return pointer to associated value in hash table.\n        const_pointer operator->() const {\n            return &operator*();\n        }\n\n        //! Create empty result\n        const_accessor() : my_node(NULL) {}\n\n        //! Destroy result after releasing the underlying reference.\n        ~const_accessor() {\n            my_node = NULL; // scoped lock's release() is called in its destructor\n        }\n    protected:\n        bool is_writer() { return node::scoped_t::is_writer; }\n        node *my_node;\n        hashcode_t my_hash;\n    };\n\n    //! Allows write access to elements and combines data access, locking, and garbage collection.\n    class accessor: public const_accessor {\n    public:\n        //! Type of value\n        typedef typename concurrent_hash_map::value_type value_type;\n\n        //! Return reference to associated value in hash table.\n        reference operator*() const {\n            __TBB_ASSERT( this->my_node, \"attempt to dereference empty accessor\" );\n            return this->my_node->item;\n        }\n\n        //! Return pointer to associated value in hash table.\n        pointer operator->() const {\n            return &operator*();\n        }\n    };\n\n    //! Construct empty table.\n    concurrent_hash_map( const allocator_type &a = allocator_type() )\n        : internal::hash_map_base(), my_allocator(a)\n    {}\n\n    //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.\n    concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )\n        : my_allocator(a)\n    {\n        reserve( n );\n    }\n\n    //! Copy constructor\n    concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a = allocator_type() )\n        : internal::hash_map_base(), my_allocator(a)\n    {\n        internal_copy(table);\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move constructor\n    concurrent_hash_map( concurrent_hash_map &&table )\n        : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))\n    {\n        swap(table);\n    }\n\n    //! Move constructor \n    concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )\n        : internal::hash_map_base(), my_allocator(a)\n    {\n        if (a == table.get_allocator()){\n            this->swap(table);\n        }else{\n            call_clear_on_leave scope_guard(this);\n            internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()));\n            scope_guard.dismiss();\n        }\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n    //! Construction with copying iteration range and given allocator instance\n    template<typename I>\n    concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )\n        : my_allocator(a)\n    {\n        reserve( std::distance(first, last) ); // TODO: load_factor?\n        internal_copy(first, last);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.\n    concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )\n        : my_allocator(a)\n    {\n        reserve(il.size());\n        internal_copy(il.begin(), il.end());\n    }\n\n#endif //__TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Assignment\n    concurrent_hash_map& operator=( const concurrent_hash_map &table ) {\n        if( this!=&table ) {\n            clear();\n            internal_copy(table);\n        }\n        return *this;\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move Assignment\n    concurrent_hash_map& operator=( concurrent_hash_map &&table ) {\n        if(this != &table){\n            typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;\n            if(pocma_t::value || this->my_allocator == table.my_allocator) {\n                concurrent_hash_map trash (std::move(*this));\n                //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set\n                this->swap(table);\n            } else {\n                //do per element move\n                concurrent_hash_map moved_copy(std::move(table), this->my_allocator);\n                this->swap(moved_copy);\n            }\n        }\n        return *this;\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Assignment\n    concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {\n        clear();\n        reserve(il.size());\n        internal_copy(il.begin(), il.end());\n        return *this;\n    }\n#endif //__TBB_INITIALIZER_LISTS_PRESENT\n\n\n    //! Rehashes and optionally resizes the whole table.\n    /** Useful to optimize performance before or after concurrent operations.\n        Also enables using of find() and count() concurrent methods in serial context. */\n    void rehash(size_type n = 0);\n\n    //! Clear table\n    void clear();\n\n    //! Clear table and destroy it.\n    ~concurrent_hash_map() { clear(); }\n\n    //------------------------------------------------------------------------\n    // Parallel algorithm support\n    //------------------------------------------------------------------------\n    range_type range( size_type grainsize=1 ) {\n        return range_type( *this, grainsize );\n    }\n    const_range_type range( size_type grainsize=1 ) const {\n        return const_range_type( *this, grainsize );\n    }\n\n    //------------------------------------------------------------------------\n    // STL support - not thread-safe methods\n    //------------------------------------------------------------------------\n    iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }\n    iterator end() { return iterator( *this, 0, 0, 0 ); }\n    const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }\n    const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }\n    std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }\n    std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }\n\n    //! Number of items in table.\n    size_type size() const { return my_size; }\n\n    //! True if size()==0.\n    bool empty() const { return my_size == 0; }\n\n    //! Upper bound on size.\n    size_type max_size() const {return (~size_type(0))/sizeof(node);}\n\n    //! Returns the current number of buckets\n    size_type bucket_count() const { return my_mask+1; }\n\n    //! return allocator object\n    allocator_type get_allocator() const { return this->my_allocator; }\n\n    //! swap two instances. Iterators are invalidated\n    void swap( concurrent_hash_map &table );\n\n    //------------------------------------------------------------------------\n    // concurrent map operations\n    //------------------------------------------------------------------------\n\n    //! Return count of items (0 or 1)\n    size_type count( const Key &key ) const {\n        return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );\n    }\n\n    //! Find item and acquire a read lock on the item.\n    /** Return true if item is found, false otherwise. */\n    bool find( const_accessor &result, const Key &key ) const {\n        result.release();\n        return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );\n    }\n\n    //! Find item and acquire a write lock on the item.\n    /** Return true if item is found, false otherwise. */\n    bool find( accessor &result, const Key &key ) {\n        result.release();\n        return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );\n    }\n\n    //! Insert item (if not already present) and acquire a read lock on the item.\n    /** Returns true if item is new. */\n    bool insert( const_accessor &result, const Key &key ) {\n        result.release();\n        return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );\n    }\n\n    //! Insert item (if not already present) and acquire a write lock on the item.\n    /** Returns true if item is new. */\n    bool insert( accessor &result, const Key &key ) {\n        result.release();\n        return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );\n    }\n\n    //! Insert item by copying if there is no such key present already and acquire a read lock on the item.\n    /** Returns true if item is new. */\n    bool insert( const_accessor &result, const value_type &value ) {\n        result.release();\n        return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );\n    }\n\n    //! Insert item by copying if there is no such key present already and acquire a write lock on the item.\n    /** Returns true if item is new. */\n    bool insert( accessor &result, const value_type &value ) {\n        result.release();\n        return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );\n    }\n\n    //! Insert item by copying if there is no such key present already\n    /** Returns true if item is inserted. */\n    bool insert( const value_type &value ) {\n        return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );\n    }\n\n    //! Insert range [first, last)\n    template<typename I>\n    void insert( I first, I last ) {\n        for ( ; first != last; ++first )\n            insert( *first );\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Insert initializer list\n    void insert( std::initializer_list<value_type> il ) {\n        insert( il.begin(), il.end() );\n    }\n#endif //__TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Erase item.\n    /** Return true if item was erased by particularly this call. */\n    bool erase( const Key& key );\n\n    //! Erase item by const_accessor.\n    /** Return true if item was erased by particularly this call. */\n    bool erase( const_accessor& item_accessor ) {\n        return exclude( item_accessor );\n    }\n\n    //! Erase item by accessor.\n    /** Return true if item was erased by particularly this call. */\n    bool erase( accessor& item_accessor ) {\n        return exclude( item_accessor );\n    }\n\nprotected:\n    //! Insert or find item and optionally acquire a lock on the item.\n    bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write,  node* (*allocate_node)(node_allocator_type& ,  const Key &, const T * )  ) ;\n\n    //! delete item by accessor\n    bool exclude( const_accessor &item_accessor );\n\n    //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)\n    template<typename I>\n    std::pair<I, I> internal_equal_range( const Key& key, I end ) const;\n\n    //! Copy \"source\" to *this, where *this must start out empty.\n    void internal_copy( const concurrent_hash_map& source );\n\n    template<typename I>\n    void internal_copy( I first, I last );\n\n    //! Fast find when no concurrent erasure is used. For internal use inside TBB only!\n    /** Return pointer to item with given key, or NULL if no such item exists.\n        Must not be called concurrently with erasure operations. */\n    const_pointer internal_fast_find( const Key& key ) const {\n        hashcode_t h = my_hash_compare.hash( key );\n        hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );\n        node *n;\n    restart:\n        __TBB_ASSERT((m&(m+1))==0, \"data structure is invalid\");\n        bucket *b = get_bucket( h & m );\n        // TODO: actually, notification is unnecessary here, just hiding double-check\n        if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )\n        {\n            bucket::scoped_t lock;\n            if( lock.try_acquire( b->mutex, /*write=*/true ) ) {\n                if( b->node_list == internal::rehash_req)\n                    const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing\n            }\n            else lock.acquire( b->mutex, /*write=*/false );\n            __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);\n        }\n        n = search_bucket( key, b );\n        if( n )\n            return &n->item;\n        else if( check_mask_race( h, m ) )\n            goto restart;\n        return 0;\n    }\n};\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nbool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*) ) {\n    __TBB_ASSERT( !result || !result->my_node, NULL );\n    bool return_value;\n    hashcode_t const h = my_hash_compare.hash( key );\n    hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );\n    segment_index_t grow_segment = 0;\n    node *n, *tmp_n = 0;\n    restart:\n    {//lock scope\n        __TBB_ASSERT((m&(m+1))==0, \"data structure is invalid\");\n        return_value = false;\n        // get bucket\n        bucket_accessor b( this, h & m );\n\n        // find a node\n        n = search_bucket( key, b() );\n        if( op_insert ) {\n            // [opt] insert a key\n            if( !n ) {\n                if( !tmp_n ) {\n                    tmp_n = allocate_node(my_allocator, key, t);\n                }\n                if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion\n                    // Rerun search_list, in case another thread inserted the item during the upgrade.\n                    n = search_bucket( key, b() );\n                    if( is_valid(n) ) { // unfortunately, it did\n                        b.downgrade_to_reader();\n                        goto exists;\n                    }\n                }\n                if( check_mask_race(h, m) )\n                    goto restart; // b.release() is done in ~b().\n                // insert and set flag to grow the container\n                grow_segment = insert_new_node( b(), n = tmp_n, m );\n                tmp_n = 0;\n                return_value = true;\n            }\n        } else { // find or count\n            if( !n ) {\n                if( check_mask_race( h, m ) )\n                    goto restart; // b.release() is done in ~b(). TODO: replace by continue\n                return false;\n            }\n            return_value = true;\n        }\n    exists:\n        if( !result ) goto check_growth;\n        // TODO: the following seems as generic/regular operation\n        // acquire the item\n        if( !result->try_acquire( n->mutex, write ) ) {\n            for( tbb::internal::atomic_backoff backoff(true);; ) {\n                if( result->try_acquire( n->mutex, write ) ) break;\n                if( !backoff.bounded_pause() ) {\n                    // the wait takes really long, restart the operation\n                    b.release();\n                    __TBB_ASSERT( !op_insert || !return_value, \"Can't acquire new item in locked bucket?\" );\n                    __TBB_Yield();\n                    m = (hashcode_t) itt_load_word_with_acquire( my_mask );\n                    goto restart;\n                }\n            }\n        }\n    }//lock scope\n    result->my_node = n;\n    result->my_hash = h;\ncheck_growth:\n    // [opt] grow the container\n    if( grow_segment ) {\n#if __TBB_STATISTICS\n        my_info_resizes++; // concurrent ones\n#endif\n        enable_segment( grow_segment );\n    }\n    if( tmp_n ) // if op_insert only\n        delete_node( tmp_n );\n    return return_value;\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\ntemplate<typename I>\nstd::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {\n    hashcode_t h = my_hash_compare.hash( key );\n    hashcode_t m = my_mask;\n    __TBB_ASSERT((m&(m+1))==0, \"data structure is invalid\");\n    h &= m;\n    bucket *b = get_bucket( h );\n    while( b->node_list == internal::rehash_req ) {\n        m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit\n        b = get_bucket( h &= m );\n    }\n    node *n = search_bucket( key, b );\n    if( !n )\n        return std::make_pair(end_, end_);\n    iterator lower(*this, h, b, n), upper(lower);\n    return std::make_pair(lower, ++upper);\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nbool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {\n    __TBB_ASSERT( item_accessor.my_node, NULL );\n    node_base *const n = item_accessor.my_node;\n    hashcode_t const h = item_accessor.my_hash;\n    hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );\n    do {\n        // get bucket\n        bucket_accessor b( this, h & m, /*writer=*/true );\n        node_base **p = &b()->node_list;\n        while( *p && *p != n )\n            p = &(*p)->next;\n        if( !*p ) { // someone else was first\n            if( check_mask_race( h, m ) )\n                continue;\n            item_accessor.release();\n            return false;\n        }\n        __TBB_ASSERT( *p == n, NULL );\n        *p = n->next; // remove from container\n        my_size--;\n        break;\n    } while(true);\n    if( !item_accessor.is_writer() ) // need to get exclusive lock\n        item_accessor.upgrade_to_writer(); // return value means nothing here\n    item_accessor.release();\n    delete_node( n ); // Only one thread can delete it\n    return true;\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nbool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {\n    node_base *n;\n    hashcode_t const h = my_hash_compare.hash( key );\n    hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );\nrestart:\n    {//lock scope\n        // get bucket\n        bucket_accessor b( this, h & m );\n    search:\n        node_base **p = &b()->node_list;\n        n = *p;\n        while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {\n            p = &n->next;\n            n = *p;\n        }\n        if( !n ) { // not found, but mask could be changed\n            if( check_mask_race( h, m ) )\n                goto restart;\n            return false;\n        }\n        else if( !b.is_writer() && !b.upgrade_to_writer() ) {\n            if( check_mask_race( h, m ) ) // contended upgrade, check mask\n                goto restart;\n            goto search;\n        }\n        *p = n->next;\n        my_size--;\n    }\n    {\n        typename node::scoped_t item_locker( n->mutex, /*write=*/true );\n    }\n    // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!\n    delete_node( n ); // Only one thread can delete it due to write lock on the bucket\n    return true;\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nvoid concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {\n    //TODO: respect C++11 allocator_traits<A>::propogate_on_constainer_swap\n    using std::swap;\n    swap(this->my_allocator, table.my_allocator);\n    swap(this->my_hash_compare, table.my_hash_compare);\n    internal_swap(table);\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nvoid concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {\n    reserve( sz ); // TODO: add reduction of number of buckets as well\n    hashcode_t mask = my_mask;\n    hashcode_t b = (mask+1)>>1; // size or first index of the last segment\n    __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2\n    bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing\n    for(; b <= mask; b++, bp++ ) {\n        node_base *n = bp->node_list;\n        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, \"Broken internal structure\" );\n        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, \"concurrent or unexpectedly terminated operation during rehash() execution\" );\n        if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one\n            hashcode_t h = b; bucket *b_old = bp;\n            do {\n                __TBB_ASSERT( h > 1, \"The lowermost buckets can't be rehashed\" );\n                hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit\n                b_old = get_bucket( h &= m );\n            } while( b_old->node_list == internal::rehash_req );\n            // now h - is index of the root rehashed bucket b_old\n            mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments\n            for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {\n                hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );\n                if( (c & mask) != h ) { // should be rehashed\n                    *p = q->next; // exclude from b_old\n                    bucket *b_new = get_bucket( c & mask );\n                    __TBB_ASSERT( b_new->node_list != internal::rehash_req, \"hash() function changed for key in table or internal error\" );\n                    add_to_bucket( b_new, q );\n                } else p = &q->next; // iterate to next item\n            }\n        }\n    }\n#if TBB_USE_PERFORMANCE_WARNINGS\n    int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics\n    static bool reported = false;\n#endif\n#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS\n    for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing\n        if( b & (b-2) ) ++bp; // not the beginning of a segment\n        else bp = get_bucket( b );\n        node_base *n = bp->node_list;\n        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, \"concurrent or unexpectedly terminated operation during rehash() execution\" );\n        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, \"Broken internal structure\" );\n#if TBB_USE_PERFORMANCE_WARNINGS\n        if( n == internal::empty_rehashed ) empty_buckets++;\n        else if( n->next ) overpopulated_buckets++;\n#endif\n#if TBB_USE_ASSERT\n        for( ; is_valid(n); n = n->next ) {\n            hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;\n            __TBB_ASSERT( h == b, \"hash() function changed for key in table or internal error\" );\n        }\n#endif\n    }\n#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS\n#if TBB_USE_PERFORMANCE_WARNINGS\n    if( buckets > current_size) empty_buckets -= buckets - current_size;\n    else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?\n    if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {\n        tbb::internal::runtime_warning(\n            \"Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\\nSize: %d  Empties: %d  Overlaps: %d\",\n            typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );\n        reported = true;\n    }\n#endif\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nvoid concurrent_hash_map<Key,T,HashCompare,A>::clear() {\n    hashcode_t m = my_mask;\n    __TBB_ASSERT((m&(m+1))==0, \"data structure is invalid\");\n#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n    int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics\n    static bool reported = false;\n#endif\n    bucket *bp = 0;\n    // check consistency\n    for( segment_index_t b = 0; b <= m; b++ ) {\n        if( b & (b-2) ) ++bp; // not the beginning of a segment\n        else bp = get_bucket( b );\n        node_base *n = bp->node_list;\n        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, \"Broken internal structure\" );\n        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, \"concurrent or unexpectedly terminated operation during clear() execution\" );\n#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n        if( n == internal::empty_rehashed ) empty_buckets++;\n        else if( n == internal::rehash_req ) buckets--;\n        else if( n->next ) overpopulated_buckets++;\n#endif\n#if __TBB_EXTRA_DEBUG\n        for(; is_valid(n); n = n->next ) {\n            hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );\n            h &= m;\n            __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, \"hash() function changed for key in table or internal error\" );\n        }\n#endif\n    }\n#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n#if __TBB_STATISTICS\n    printf( \"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d\"\n        \" concurrent: resizes=%u rehashes=%u restarts=%u\\n\",\n        current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,\n        unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );\n    my_info_resizes = 0; // concurrent ones\n    my_info_restarts = 0; // race collisions\n    my_info_rehashes = 0;  // invocations of rehash_bucket\n#endif\n    if( buckets > current_size) empty_buckets -= buckets - current_size;\n    else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?\n    if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {\n        tbb::internal::runtime_warning(\n            \"Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\\nSize: %d  Empties: %d  Overlaps: %d\",\n            typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );\n        reported = true;\n    }\n#endif\n#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS\n    my_size = 0;\n    segment_index_t s = segment_index_of( m );\n    __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], \"wrong mask or concurrent grow\" );\n    cache_aligned_allocator<bucket> alloc;\n    do {\n        __TBB_ASSERT( is_valid( my_table[s] ), \"wrong mask or concurrent grow\" );\n        segment_ptr_t buckets_ptr = my_table[s];\n        size_type sz = segment_size( s ? s : 1 );\n        for( segment_index_t i = 0; i < sz; i++ )\n            for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {\n                buckets_ptr[i].node_list = n->next;\n                delete_node( n );\n            }\n        if( s >= first_block) // the first segment or the next\n            alloc.deallocate( buckets_ptr, sz );\n        else if( s == embedded_block && embedded_block != first_block )\n            alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );\n        if( s >= embedded_block ) my_table[s] = 0;\n    } while(s-- > 0);\n    my_mask = embedded_buckets - 1;\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\nvoid concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {\n    reserve( source.my_size ); // TODO: load_factor?\n    hashcode_t mask = source.my_mask;\n    if( my_mask == mask ) { // optimized version\n        bucket *dst = 0, *src = 0;\n        bool rehash_required = false;\n        for( hashcode_t k = 0; k <= mask; k++ ) {\n            if( k & (k-2) ) ++dst,src++; // not the beginning of a segment\n            else { dst = get_bucket( k ); src = source.get_bucket( k ); }\n            __TBB_ASSERT( dst->node_list != internal::rehash_req, \"Invalid bucket in destination table\");\n            node *n = static_cast<node*>( src->node_list );\n            if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets\n                rehash_required = true;\n                dst->node_list = internal::rehash_req;\n            } else for(; n; n = static_cast<node*>( n->next ) ) {\n                add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );\n                ++my_size; // TODO: replace by non-atomic op\n            }\n        }\n        if( rehash_required ) rehash();\n    } else internal_copy( source.begin(), source.end() );\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\ntemplate<typename I>\nvoid concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {\n    hashcode_t m = my_mask;\n    for(; first != last; ++first) {\n        hashcode_t h = my_hash_compare.hash( (*first).first );\n        bucket *b = get_bucket( h & m );\n        __TBB_ASSERT( b->node_list != internal::rehash_req, \"Invalid bucket in destination table\");\n        node *n = new( my_allocator ) node(*first);\n        add_to_bucket( b, n );\n        ++my_size; // TODO: replace by non-atomic op\n    }\n}\n\n} // namespace interface5\n\nusing interface5::concurrent_hash_map;\n\n\ntemplate<typename Key, typename T, typename HashCompare, typename A1, typename A2>\ninline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {\n    if(a.size() != b.size()) return false;\n    typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());\n    typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());\n    for(; i != i_end; ++i) {\n        j = b.equal_range(i->first).first;\n        if( j == j_end || !(i->second == j->second) ) return false;\n    }\n    return true;\n}\n\ntemplate<typename Key, typename T, typename HashCompare, typename A1, typename A2>\ninline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)\n{    return !(a == b); }\n\ntemplate<typename Key, typename T, typename HashCompare, typename A>\ninline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)\n{    a.swap( b ); }\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning( pop )\n#endif // warning 4127 is back\n\n} // namespace tbb\n\n#endif /* __TBB_concurrent_hash_map_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_lru_cache.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_lru_cache_H\n#define __TBB_concurrent_lru_cache_H\n\n#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE\n    #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h\n#endif\n\n#include <map>\n#include <list>\n\n#include \"tbb_stddef.h\"\n#include \"atomic.h\"\n#include \"internal/_aggregator_impl.h\"\n\nnamespace tbb{\nnamespace interface6 {\n\n\ntemplate <typename key_type, typename value_type, typename value_functor_type = value_type (*)(key_type) >\nclass concurrent_lru_cache : internal::no_assign{\nprivate:\n    typedef concurrent_lru_cache self_type;\n    typedef value_functor_type value_function_type;\n    typedef std::size_t ref_counter_type;\n    struct map_value_type;\n    typedef std::map<key_type, map_value_type> map_storage_type;\n    typedef std::list<typename map_storage_type::iterator> lru_list_type;\n    struct map_value_type {\n        value_type my_value;\n        ref_counter_type my_ref_counter;\n        typename lru_list_type::iterator my_lru_list_iterator;\n        bool my_is_ready;\n\n        map_value_type (value_type const& a_value,  ref_counter_type a_ref_counter,    typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready)\n            : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready)\n        {}\n    };\n\n    class handle_object;\n\n    struct aggregator_operation;\n    typedef aggregator_operation aggregated_operation_type;\n    typedef tbb::internal::aggregating_functor<self_type,aggregated_operation_type> aggregator_function_type;\n    friend class tbb::internal::aggregating_functor<self_type,aggregated_operation_type>;\n    typedef tbb::internal::aggregator<aggregator_function_type, aggregated_operation_type> aggregator_type;\n\nprivate:\n    value_function_type my_value_function;\n    std::size_t const my_number_of_lru_history_items;\n    map_storage_type my_map_storage;\n    lru_list_type my_lru_list;\n    aggregator_type my_aggregator;\n\npublic:\n    typedef handle_object handle;\n\npublic:\n    concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items)\n        : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items)\n    {\n        my_aggregator.initialize_handler(aggregator_function_type(this));\n    }\n\n    handle_object operator[](key_type k){\n        retrieve_aggregator_operation op(k);\n        my_aggregator.execute(&op);\n        if (op.is_new_value_needed()){\n             op.result().second.my_value = my_value_function(k);\n             __TBB_store_with_release(op.result().second.my_is_ready, true);\n        }else{\n            tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false);\n        }\n        return handle_object(*this,op.result());\n    }\nprivate:\n    void signal_end_of_usage(typename map_storage_type::reference value_ref){\n        signal_end_of_usage_aggregator_operation op(value_ref);\n        my_aggregator.execute(&op);\n    }\n\nprivate:\n    struct handle_move_t:no_assign{\n        concurrent_lru_cache & my_cache_ref;\n        typename map_storage_type::reference my_map_record_ref;\n        handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {};\n    };\n    class handle_object {\n        concurrent_lru_cache * my_cache_pointer;\n        typename map_storage_type::reference my_map_record_ref;\n    public:\n        handle_object(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_map_record_ref(value_ref) {}\n        handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_map_record_ref(m.my_map_record_ref){}\n        operator handle_move_t(){ return move(*this);}\n        value_type& value(){\n            __TBB_ASSERT(my_cache_pointer,\"get value from moved from object?\");\n            return my_map_record_ref.second.my_value;\n        }\n        ~handle_object(){\n            if (my_cache_pointer){\n                my_cache_pointer->signal_end_of_usage(my_map_record_ref);\n            }\n        }\n    private:\n        friend handle_move_t move(handle_object& h){\n            return handle_object::move(h);\n        }\n        static handle_move_t move(handle_object& h){\n            __TBB_ASSERT(h.my_cache_pointer,\"move from the same object twice ?\");\n            concurrent_lru_cache * cache_pointer = NULL;\n            std::swap(cache_pointer,h.my_cache_pointer);\n            return handle_move_t(*cache_pointer,h.my_map_record_ref);\n        }\n    private:\n        void operator=(handle_object&);\n#if __SUNPRO_CC\n    // Presumably due to a compiler error, private copy constructor\n    // breaks expressions like handle h = cache[key];\n    public:\n#endif\n        handle_object(handle_object &);\n    };\nprivate:\n    //TODO: looks like aggregator_operation is a perfect match for statically typed variant type\n    struct aggregator_operation : tbb::internal::aggregated_operation<aggregator_operation>{\n        enum e_op_type {op_retive, op_signal_end_of_usage};\n        //TODO: try to use pointer to function apply_visitor here\n        //TODO: try virtual functions and measure the difference\n        e_op_type my_operation_type;\n        aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {}\n        void cast_and_handle(self_type& container ){\n            if (my_operation_type==op_retive){\n                static_cast<retrieve_aggregator_operation*>(this)->handle(container);\n            }else{\n                static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(container);\n            }\n        }\n    };\n    struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign {\n        key_type my_key;\n        typename map_storage_type::pointer my_result_map_record_pointer;\n        bool my_is_new_value_needed;\n        retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){}\n        void handle(self_type& container ){\n            my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed);\n        }\n        typename map_storage_type::reference result(){ return * my_result_map_record_pointer; }\n        bool is_new_value_needed(){return my_is_new_value_needed;}\n    };\n    struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign {\n        typename map_storage_type::reference my_map_record_ref;\n        signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){}\n        void handle(self_type& container ){\n            container.signal_end_of_usage_serial(my_map_record_ref);\n        }\n    };\n\nprivate:\n   void handle_operations(aggregator_operation* op_list){\n       while(op_list){\n           op_list->cast_and_handle(*this);\n           aggregator_operation* tmp = op_list;\n           op_list=op_list->next;\n           tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1));\n       }\n   }\n\nprivate:\n   typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){\n        typename map_storage_type::iterator it = my_map_storage.find(k);\n        if (it == my_map_storage.end()){\n            it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false)));\n            is_new_value_needed = true;\n        }else {\n            typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator;\n            if (list_it!=my_lru_list.end()) {\n                __TBB_ASSERT(!it->second.my_ref_counter,\"item to be evicted should not have a live references\");\n                //item is going to be used. Therefore it is not a subject for eviction\n                //so - remove it from LRU history.\n                my_lru_list.erase(list_it);\n                it->second.my_lru_list_iterator= my_lru_list.end();\n            }\n        }\n        ++(it->second.my_ref_counter);\n        return *it;\n    }\n\n    void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){\n        typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first);\n        __TBB_ASSERT(it!=my_map_storage.end(),\"cache should not return past-end iterators to outer world\");\n        __TBB_ASSERT(&(*it) == &map_record_ref,\"dangling reference has been returned to outside world? data race ?\");\n        __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it),\n                \"object in use should not be in list of unused objects \");\n        if (! --(it->second.my_ref_counter)){\n            //it was the last reference so put it to the LRU history\n            if (my_lru_list.size()>=my_number_of_lru_history_items){\n                //evict items in order to get a space\n                size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items;\n                for (size_t i=0; i<number_of_elements_to_evict; ++i){\n                    typename map_storage_type::iterator it_to_evict = my_lru_list.back();\n                    __TBB_ASSERT(!it_to_evict->second.my_ref_counter,\"item to be evicted should not have a live references\");\n                    my_lru_list.pop_back();\n                    my_map_storage.erase(it_to_evict);\n                }\n            }\n            my_lru_list.push_front(it);\n            it->second.my_lru_list_iterator = my_lru_list.begin();\n        }\n    }\n};\n} // namespace interface6\n\nusing interface6::concurrent_lru_cache;\n\n} // namespace tbb\n#endif //__TBB_concurrent_lru_cache_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_monitor.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"concurrent_monitor.h\"\n\nnamespace tbb {\nnamespace internal {\n\nvoid concurrent_monitor::thread_context::init() {\n    new (sema.begin()) binary_semaphore;\n    ready = true;\n}\n\nconcurrent_monitor::~concurrent_monitor() {\n    abort_all();\n    __TBB_ASSERT( waitset_ec.empty(), \"waitset not empty?\" );\n}\n\nvoid concurrent_monitor::prepare_wait( thread_context& thr, uintptr_t ctx ) {\n    if( !thr.ready )\n        thr.init();\n    // this is good place to pump previous spurious wakeup\n    else if( thr.spurious ) {\n        thr.spurious = false;\n        thr.semaphore().P();\n    }\n    thr.context = ctx;\n    thr.in_waitset = true;\n    {\n        tbb::spin_mutex::scoped_lock l( mutex_ec );\n        __TBB_store_relaxed( thr.epoch, __TBB_load_relaxed(epoch) );\n        waitset_ec.add( (waitset_t::node_t*)&thr );\n    }\n    atomic_fence();\n}\n\nvoid concurrent_monitor::cancel_wait( thread_context& thr ) {\n    // spurious wakeup will be pumped in the following prepare_wait()\n    thr.spurious = true;\n    // try to remove node from waitset\n    bool th_in_waitset = thr.in_waitset;\n    if( th_in_waitset ) {\n        tbb::spin_mutex::scoped_lock l( mutex_ec );\n        if (thr.in_waitset) {\n            // successfully removed from waitset,\n            // so there will be no spurious wakeup\n            thr.in_waitset = false;\n            thr.spurious = false;\n            waitset_ec.remove( (waitset_t::node_t&)thr );\n        }\n    }\n}\n\nvoid concurrent_monitor::notify_one_relaxed() {\n    if( waitset_ec.empty() )\n        return;\n    waitset_node_t* n;\n    const waitset_node_t* end = waitset_ec.end();\n    {\n        tbb::spin_mutex::scoped_lock l( mutex_ec );\n        __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 );\n        n = waitset_ec.front();\n        if( n!=end ) {\n            waitset_ec.remove( *n );\n            to_thread_context(n)->in_waitset = false;\n        }\n    }\n    if( n!=end )\n        to_thread_context(n)->semaphore().V();\n}\n\nvoid concurrent_monitor::notify_all_relaxed() {\n    if( waitset_ec.empty() )\n        return;\n    dllist_t temp;\n    const waitset_node_t* end;\n    {\n        tbb::spin_mutex::scoped_lock l( mutex_ec );\n        __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 );\n        waitset_ec.flush_to( temp );\n        end = temp.end();\n        for( waitset_node_t* n=temp.front(); n!=end; n=n->next )\n            to_thread_context(n)->in_waitset = false;\n    }\n    waitset_node_t* nxt;\n    for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {\n        nxt = n->next;\n        to_thread_context(n)->semaphore().V();\n    }\n#if TBB_USE_ASSERT\n    temp.clear();\n#endif\n}\n\nvoid concurrent_monitor::abort_all_relaxed() {\n    if( waitset_ec.empty() )\n        return;\n    dllist_t temp;\n    const waitset_node_t* end;\n    {\n        tbb::spin_mutex::scoped_lock l( mutex_ec );\n        __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 );\n        waitset_ec.flush_to( temp );\n        end = temp.end();\n        for( waitset_node_t* n=temp.front(); n!=end; n=n->next )\n            to_thread_context(n)->in_waitset = false;\n    }\n    waitset_node_t* nxt;\n    for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {\n        nxt = n->next;\n        to_thread_context(n)->aborted = true;\n        to_thread_context(n)->semaphore().V();\n    }\n#if TBB_USE_ASSERT\n    temp.clear();\n#endif\n}\n\n} // namespace internal\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_monitor.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_monitor_H\n#define __TBB_concurrent_monitor_H\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/atomic.h\"\n#include \"tbb/spin_mutex.h\"\n#include \"tbb/tbb_exception.h\"\n#include \"tbb/aligned_space.h\"\n\n#include \"semaphore.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//! Circular doubly-linked list with sentinel\n/** head.next points to the front and head.prev points to the back */\nclass circular_doubly_linked_list_with_sentinel : no_copy {\npublic:\n    struct node_t {\n        node_t* next;\n        node_t* prev;\n        explicit node_t() : next((node_t*)(uintptr_t)0xcdcdcdcd), prev((node_t*)(uintptr_t)0xcdcdcdcd) {}\n    };\n\n    // ctor\n    circular_doubly_linked_list_with_sentinel() {clear();}\n    // dtor\n    ~circular_doubly_linked_list_with_sentinel() {__TBB_ASSERT( head.next==&head && head.prev==&head, \"the list is not empty\" );}\n\n    inline size_t  size()  const {return count;}\n    inline bool    empty() const {return size()==0;}\n    inline node_t* front() const {return head.next;}\n    inline node_t* last()  const {return head.prev;}\n    inline node_t* begin() const {return front();}\n    inline const node_t* end() const {return &head;}\n\n    //! add to the back of the list\n    inline void add( node_t* n ) {\n        __TBB_store_relaxed(count, __TBB_load_relaxed(count) + 1);\n        n->prev = head.prev;\n        n->next = &head;\n        head.prev->next = n;\n        head.prev = n;\n    }\n\n    //! remove node 'n'\n    inline void remove( node_t& n ) {\n        __TBB_store_relaxed(count, __TBB_load_relaxed(count) - 1);\n        n.prev->next = n.next;\n        n.next->prev = n.prev;\n    }\n\n    //! move all elements to 'lst' and initialize the 'this' list\n    inline void flush_to( circular_doubly_linked_list_with_sentinel& lst ) {\n        if( const size_t l_count = __TBB_load_relaxed(count) ) {\n            __TBB_store_relaxed(lst.count, l_count);\n            lst.head.next = head.next;\n            lst.head.prev = head.prev;\n            head.next->prev = &lst.head;\n            head.prev->next = &lst.head;\n            clear();\n        }\n    }\n\n    void clear() {head.next = head.prev = &head; __TBB_store_relaxed(count, 0);}\nprivate:\n    __TBB_atomic size_t count;\n    node_t head;\n};\n\ntypedef circular_doubly_linked_list_with_sentinel waitset_t;\ntypedef circular_doubly_linked_list_with_sentinel dllist_t;\ntypedef circular_doubly_linked_list_with_sentinel::node_t waitset_node_t;\n\n//! concurrent_monitor\n/** fine-grained concurrent_monitor implementation */\nclass concurrent_monitor : no_copy {\npublic:\n    /** per-thread descriptor for concurrent_monitor */\n    class thread_context : waitset_node_t, no_copy {\n        friend class concurrent_monitor;\n    public:\n        thread_context() : spurious(false), aborted(false), ready(false), context(0) {\n            epoch = 0;\n            in_waitset = false;\n        }\n        ~thread_context() {\n            if (ready) {\n                if( spurious ) semaphore().P();\n                semaphore().~binary_semaphore();\n            }\n        }\n        binary_semaphore& semaphore() { return *sema.begin(); }\n    private:\n        //! The method for lazy initialization of the thread_context's semaphore.\n        //  Inlining of the method is undesirable, due to extra instructions for\n        //  exception support added at caller side.\n        __TBB_NOINLINE( void init() );\n        tbb::aligned_space<binary_semaphore> sema;\n        __TBB_atomic unsigned epoch;\n        tbb::atomic<bool> in_waitset;\n        bool  spurious;\n        bool  aborted;\n        bool  ready;\n        uintptr_t context;\n    };\n\n    //! ctor\n    concurrent_monitor() {__TBB_store_relaxed(epoch, 0);}\n\n    //! dtor\n    ~concurrent_monitor() ; \n\n    //! prepare wait by inserting 'thr' into the wait queue\n    void prepare_wait( thread_context& thr, uintptr_t ctx = 0 );\n\n    //! Commit wait if event count has not changed; otherwise, cancel wait.\n    /** Returns true if committed, false if canceled. */\n    inline bool commit_wait( thread_context& thr ) {\n        const bool do_it = thr.epoch == __TBB_load_relaxed(epoch);\n        // this check is just an optimization\n        if( do_it ) {\n            __TBB_ASSERT( thr.ready, \"use of commit_wait() without prior prepare_wait()\");\n            thr.semaphore().P();\n            __TBB_ASSERT( !thr.in_waitset, \"still in the queue?\" );\n            if( thr.aborted )\n                throw_exception( eid_user_abort );\n        } else {\n            cancel_wait( thr );\n        }\n        return do_it;\n    }\n    //! Cancel the wait. Removes the thread from the wait queue if not removed yet.\n    void cancel_wait( thread_context& thr );\n\n    //! Wait for a condition to be satisfied with waiting-on context\n    template<typename WaitUntil, typename Context>\n    void wait( WaitUntil until, Context on );\n\n    //! Notify one thread about the event\n    void notify_one() {atomic_fence(); notify_one_relaxed();}\n\n    //! Notify one thread about the event. Relaxed version.\n    void notify_one_relaxed();\n\n    //! Notify all waiting threads of the event\n    void notify_all() {atomic_fence(); notify_all_relaxed();}\n\n    //! Notify all waiting threads of the event; Relaxed version\n    void notify_all_relaxed();\n\n    //! Notify waiting threads of the event that satisfies the given predicate\n    template<typename P> void notify( const P& predicate ) {atomic_fence(); notify_relaxed( predicate );}\n\n    //! Notify waiting threads of the event that satisfies the given predicate; Relaxed version\n    template<typename P> void notify_relaxed( const P& predicate );\n\n    //! Abort any sleeping threads at the time of the call\n    void abort_all() {atomic_fence(); abort_all_relaxed(); }\n \n    //! Abort any sleeping threads at the time of the call; Relaxed version\n    void abort_all_relaxed();\n\nprivate:\n    tbb::spin_mutex mutex_ec;\n    waitset_t       waitset_ec;\n    __TBB_atomic unsigned epoch;\n    thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); }\n};\n\ntemplate<typename WaitUntil, typename Context>\nvoid concurrent_monitor::wait( WaitUntil until, Context on )\n{\n    bool slept = false;\n    thread_context thr_ctx;\n    prepare_wait( thr_ctx, on() );\n    while( !until() ) {\n        if( (slept = commit_wait( thr_ctx ) )==true )\n            if( until() ) break;\n        slept = false;\n        prepare_wait( thr_ctx, on() );\n    }\n    if( !slept )\n        cancel_wait( thr_ctx );\n}\n\ntemplate<typename P>\nvoid concurrent_monitor::notify_relaxed( const P& predicate ) {\n        if( waitset_ec.empty() )\n            return;\n        dllist_t temp;\n        waitset_node_t* nxt;\n        const waitset_node_t* end = waitset_ec.end();\n        {\n            tbb::spin_mutex::scoped_lock l( mutex_ec );\n            __TBB_store_relaxed(epoch, __TBB_load_relaxed(epoch) + 1);\n            for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) {\n                nxt = n->prev;\n                thread_context* thr = to_thread_context( n );\n                if( predicate( thr->context ) ) {\n                    waitset_ec.remove( *n );\n                    thr->in_waitset = false;\n                    temp.add( n );\n                }\n            }\n        }\n\n        end = temp.end();\n        for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {\n            nxt = n->next;\n            to_thread_context(n)->semaphore().V();\n        }\n#if TBB_USE_ASSERT\n        temp.clear();\n#endif\n}\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_concurrent_monitor_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_priority_queue.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_priority_queue_H\n#define __TBB_concurrent_priority_queue_H\n\n#include \"atomic.h\"\n#include \"cache_aligned_allocator.h\"\n#include \"tbb_exception.h\"\n#include \"tbb_stddef.h\"\n#include \"tbb_profiling.h\"\n#include \"internal/_aggregator_impl.h\"\n#include <vector>\n#include <iterator>\n#include <functional>\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    #include <initializer_list>\n#endif\n\nnamespace tbb {\nnamespace interface5 {\n\nusing namespace tbb::internal;\n\n//! Concurrent priority queue\ntemplate <typename T, typename Compare=std::less<T>, typename A=cache_aligned_allocator<T> >\nclass concurrent_priority_queue {\n public:\n    //! Element type in the queue.\n    typedef T value_type;\n\n    //! Reference type\n    typedef T& reference;\n\n    //! Const reference type\n    typedef const T& const_reference;\n\n    //! Integral type for representing size of the queue.\n    typedef size_t size_type;\n\n    //! Difference type for iterator\n    typedef ptrdiff_t difference_type;\n\n    //! Allocator type\n    typedef A allocator_type;\n\n    //! Constructs a new concurrent_priority_queue with default capacity\n    explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), data(a)\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n    }\n\n    //! Constructs a new concurrent_priority_queue with init_sz capacity\n    explicit concurrent_priority_queue(size_type init_capacity, const allocator_type& a = allocator_type()) :\n        mark(0), my_size(0), data(a)\n    {\n        data.reserve(init_capacity);\n        my_aggregator.initialize_handler(my_functor_t(this));\n    }\n\n    //! [begin,end) constructor\n    template<typename InputIterator>\n    concurrent_priority_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) :\n        mark(0), data(begin, end, a)\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n        heapify();\n        my_size = data.size();\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from std::initializer_list\n    concurrent_priority_queue(std::initializer_list<T> init_list, const allocator_type &a = allocator_type()) :\n        mark(0),data(init_list.begin(), init_list.end(), a)\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n        heapify();\n        my_size = data.size();\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Copy constructor\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    explicit concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark),\n        my_size(src.my_size), data(src.data.begin(), src.data.end(), src.data.get_allocator())\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n        heapify();\n    }\n\n    //! Copy constructor with specific allocator\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    concurrent_priority_queue(const concurrent_priority_queue& src, const allocator_type& a) : mark(src.mark),\n        my_size(src.my_size), data(src.data.begin(), src.data.end(), a)\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n        heapify();\n    }\n\n    //! Assignment operator\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    concurrent_priority_queue& operator=(const concurrent_priority_queue& src) {\n        if (this != &src) {\n            vector_t(src.data.begin(), src.data.end(), src.data.get_allocator()).swap(data);\n            mark = src.mark;\n            my_size = src.my_size;\n        }\n        return *this;\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move constructor\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    concurrent_priority_queue(concurrent_priority_queue&& src) : mark(src.mark),\n        my_size(src.my_size), data(std::move(src.data))\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n    }\n\n    //! Move constructor with specific allocator\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    concurrent_priority_queue(concurrent_priority_queue&& src, const allocator_type& a) : mark(src.mark),\n        my_size(src.my_size),\n#if __TBB_ALLOCATOR_TRAITS_PRESENT\n        data(std::move(src.data), a)\n#else\n    // Some early version of C++11 STL vector does not have a constructor of vector(vector&& , allocator).\n    // It seems that the reason is absence of support of allocator_traits (stateful allocators).\n        data(a)\n#endif //__TBB_ALLOCATOR_TRAITS_PRESENT\n    {\n        my_aggregator.initialize_handler(my_functor_t(this));\n#if !__TBB_ALLOCATOR_TRAITS_PRESENT\n        if (a != src.data.get_allocator()){\n            data.reserve(src.data.size());\n            data.assign(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end()));\n        }else{\n            data = std::move(src.data);\n        }\n#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT\n    }\n\n    //! Move assignment operator\n    /** This operation is unsafe if there are pending concurrent operations on the src queue. */\n    concurrent_priority_queue& operator=( concurrent_priority_queue&& src) {\n        if (this != &src) {\n            mark = src.mark;\n            my_size = src.my_size;\n#if !__TBB_ALLOCATOR_TRAITS_PRESENT\n            if (data.get_allocator() != src.data.get_allocator()){\n                vector_t(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end()), data.get_allocator()).swap(data);\n            }else\n#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT\n            {\n                data = std::move(src.data);\n            }\n        }\n        return *this;\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n    //! Assign the queue from [begin,end) range, not thread-safe\n    template<typename InputIterator>\n    void assign(InputIterator begin, InputIterator end) {\n        vector_t(begin, end, data.get_allocator()).swap(data);\n        mark = 0;\n        my_size = data.size();\n        heapify();\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Assign the queue from std::initializer_list, not thread-safe\n    void assign(std::initializer_list<T> il) { this->assign(il.begin(), il.end()); }\n\n    //! Assign from std::initializer_list, not thread-safe\n    concurrent_priority_queue& operator=(std::initializer_list<T> il) {\n        this->assign(il.begin(), il.end());\n        return *this;\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Returns true if empty, false otherwise\n    /** Returned value may not reflect results of pending operations.\n        This operation reads shared data and will trigger a race condition. */\n    bool empty() const { return size()==0; }\n\n    //! Returns the current number of elements contained in the queue\n    /** Returned value may not reflect results of pending operations.\n        This operation reads shared data and will trigger a race condition. */\n    size_type size() const { return __TBB_load_with_acquire(my_size); }\n\n    //! Pushes elem onto the queue, increasing capacity of queue if necessary\n    /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */\n    void push(const_reference elem) {\n        cpq_operation op_data(elem, PUSH_OP);\n        my_aggregator.execute(&op_data);\n        if (op_data.status == FAILED) // exception thrown\n            throw_exception(eid_bad_alloc);\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Pushes elem onto the queue, increasing capacity of queue if necessary\n    /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */\n    void push(value_type &&elem) {\n        cpq_operation op_data(elem, PUSH_RVALUE_OP);\n        my_aggregator.execute(&op_data);\n        if (op_data.status == FAILED) // exception thrown\n            throw_exception(eid_bad_alloc);\n    }\n\n#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n    //! Constructs a new element using args as the arguments for its construction and pushes it onto the queue */\n    /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */\n    template<typename... Args>\n    void emplace(Args&&... args) {\n        push(value_type(std::forward<Args>(args)...));\n    }\n#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Gets a reference to and removes highest priority element\n    /** If a highest priority element was found, sets elem and returns true,\n        otherwise returns false.\n        This operation can be safely used concurrently with other push, try_pop or emplace operations. */\n    bool try_pop(reference elem) {\n        cpq_operation op_data(POP_OP);\n        op_data.elem = &elem;\n        my_aggregator.execute(&op_data);\n        return op_data.status==SUCCEEDED;\n    }\n\n    //! Clear the queue; not thread-safe\n    /** This operation is unsafe if there are pending concurrent operations on the queue.\n        Resets size, effectively emptying queue; does not free space.\n        May not clear elements added in pending operations. */\n    void clear() {\n        data.clear();\n        mark = 0;\n        my_size = 0;\n    }\n\n    //! Swap this queue with another; not thread-safe\n    /** This operation is unsafe if there are pending concurrent operations on the queue. */\n    void swap(concurrent_priority_queue& q) {\n        using std::swap;\n        data.swap(q.data);\n        swap(mark, q.mark);\n        swap(my_size, q.my_size);\n    }\n\n    //! Return allocator object\n    allocator_type get_allocator() const { return data.get_allocator(); }\n\n private:\n    enum operation_type {INVALID_OP, PUSH_OP, POP_OP, PUSH_RVALUE_OP};\n    enum operation_status { WAIT=0, SUCCEEDED, FAILED };\n\n    class cpq_operation : public aggregated_operation<cpq_operation> {\n     public:\n        operation_type type;\n        union {\n            value_type *elem;\n            size_type sz;\n        };\n        cpq_operation(const_reference e, operation_type t) :\n            type(t), elem(const_cast<value_type*>(&e)) {}\n        cpq_operation(operation_type t) : type(t) {}\n    };\n\n    class my_functor_t {\n        concurrent_priority_queue<T, Compare, A> *cpq;\n     public:\n        my_functor_t() {}\n        my_functor_t(concurrent_priority_queue<T, Compare, A> *cpq_) : cpq(cpq_) {}\n        void operator()(cpq_operation* op_list) {\n            cpq->handle_operations(op_list);\n        }\n    };\n\n    typedef tbb::internal::aggregator< my_functor_t, cpq_operation > aggregator_t;\n    aggregator_t my_aggregator;\n    //! Padding added to avoid false sharing\n    char padding1[NFS_MaxLineSize - sizeof(aggregator_t)];\n    //! The point at which unsorted elements begin\n    size_type mark;\n    __TBB_atomic size_type my_size;\n    Compare compare;\n    //! Padding added to avoid false sharing\n    char padding2[NFS_MaxLineSize - (2*sizeof(size_type)) - sizeof(Compare)];\n    //! Storage for the heap of elements in queue, plus unheapified elements\n    /** data has the following structure:\n\n         binary unheapified\n          heap   elements\n        ____|_______|____\n        |       |       |\n        v       v       v\n        [_|...|_|_|...|_| |...| ]\n         0       ^       ^       ^\n                 |       |       |__capacity\n                 |       |__my_size\n                 |__mark\n\n        Thus, data stores the binary heap starting at position 0 through\n        mark-1 (it may be empty).  Then there are 0 or more elements\n        that have not yet been inserted into the heap, in positions\n        mark through my_size-1. */\n    typedef std::vector<value_type, allocator_type> vector_t;\n    vector_t data;\n\n    void handle_operations(cpq_operation *op_list) {\n        cpq_operation *tmp, *pop_list=NULL;\n\n        __TBB_ASSERT(mark == data.size(), NULL);\n\n        // First pass processes all constant (amortized; reallocation may happen) time pushes and pops.\n        while (op_list) {\n            // ITT note: &(op_list->status) tag is used to cover accesses to op_list\n            // node. This thread is going to handle the operation, and so will acquire it\n            // and perform the associated operation w/o triggering a race condition; the\n            // thread that created the operation is waiting on the status field, so when\n            // this thread is done with the operation, it will perform a\n            // store_with_release to give control back to the waiting thread in\n            // aggregator::insert_operation.\n            call_itt_notify(acquired, &(op_list->status));\n            __TBB_ASSERT(op_list->type != INVALID_OP, NULL);\n            tmp = op_list;\n            op_list = itt_hide_load_word(op_list->next);\n            if (tmp->type == POP_OP) {\n                if (mark < data.size() &&\n                    compare(data[0], data[data.size()-1])) {\n                    // there are newly pushed elems and the last one\n                    // is higher than top\n                    *(tmp->elem) = move(data[data.size()-1]);\n                    __TBB_store_with_release(my_size, my_size-1);\n                    itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED));\n                    data.pop_back();\n                    __TBB_ASSERT(mark<=data.size(), NULL);\n                }\n                else { // no convenient item to pop; postpone\n                    itt_hide_store_word(tmp->next, pop_list);\n                    pop_list = tmp;\n                }\n            } else { // PUSH_OP or PUSH_RVALUE_OP\n                __TBB_ASSERT(tmp->type == PUSH_OP || tmp->type == PUSH_RVALUE_OP, \"Unknown operation\" );\n                __TBB_TRY{\n                    if (tmp->type == PUSH_OP) {\n                        data.push_back(*(tmp->elem));\n                    } else {\n                        data.push_back(move(*(tmp->elem)));\n                    }\n                    __TBB_store_with_release(my_size, my_size + 1);\n                    itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED));\n                } __TBB_CATCH(...) {\n                    itt_store_word_with_release(tmp->status, uintptr_t(FAILED));\n                }\n            }\n        }\n\n        // second pass processes pop operations\n        while (pop_list) {\n            tmp = pop_list;\n            pop_list = itt_hide_load_word(pop_list->next);\n            __TBB_ASSERT(tmp->type == POP_OP, NULL);\n            if (data.empty()) {\n                itt_store_word_with_release(tmp->status, uintptr_t(FAILED));\n            }\n            else {\n                __TBB_ASSERT(mark<=data.size(), NULL);\n                if (mark < data.size() &&\n                    compare(data[0], data[data.size()-1])) {\n                    // there are newly pushed elems and the last one is\n                    // higher than top\n                    *(tmp->elem) = move(data[data.size()-1]);\n                    __TBB_store_with_release(my_size, my_size-1);\n                    itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED));\n                    data.pop_back();\n                }\n                else { // extract top and push last element down heap\n                    *(tmp->elem) = move(data[0]);\n                    __TBB_store_with_release(my_size, my_size-1);\n                    itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED));\n                    reheap();\n                }\n            }\n        }\n\n        // heapify any leftover pushed elements before doing the next\n        // batch of operations\n        if (mark<data.size()) heapify();\n        __TBB_ASSERT(mark == data.size(), NULL);\n    }\n\n    //! Merge unsorted elements into heap\n    void heapify() {\n        if (!mark && data.size()>0) mark = 1;\n        for (; mark<data.size(); ++mark) {\n            // for each unheapified element under size\n            size_type cur_pos = mark;\n            value_type to_place = move(data[mark]);\n            do { // push to_place up the heap\n                size_type parent = (cur_pos-1)>>1;\n                if (!compare(data[parent], to_place)) break;\n                data[cur_pos] = move(data[parent]);\n                cur_pos = parent;\n            } while( cur_pos );\n            data[cur_pos] = move(to_place);\n        }\n    }\n\n    //! Re-heapify after an extraction\n    /** Re-heapify by pushing last element down the heap from the root. */\n    void reheap() {\n        size_type cur_pos=0, child=1;\n\n        while (child < mark) {\n            size_type target = child;\n            if (child+1 < mark && compare(data[child], data[child+1]))\n                ++target;\n            // target now has the higher priority child\n            if (compare(data[target], data[data.size()-1])) break;\n            data[cur_pos] = move(data[target]);\n            cur_pos = target;\n            child = (cur_pos<<1)+1;\n        }\n        if (cur_pos != data.size()-1)\n            data[cur_pos] = move(data[data.size()-1]);\n        data.pop_back();\n        if (mark > data.size()) mark = data.size();\n    }\n};\n\n} // namespace interface5\n\nusing interface5::concurrent_priority_queue;\n\n} // namespace tbb\n\n#endif /* __TBB_concurrent_priority_queue_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_queue.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/tbb_exception.h\"\n// Define required to satisfy test in internal file.\n#define  __TBB_concurrent_queue_H\n#include \"tbb/internal/_concurrent_queue_impl.h\"\n#include \"concurrent_monitor.h\"\n#include \"itt_notify.h\"\n#include <new>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>   // for memset()\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nusing namespace std;\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (disable: 4267)\n#endif\n\n#define RECORD_EVENTS 0\n\n\nnamespace tbb {\n\nnamespace internal {\n\ntypedef concurrent_queue_base_v3 concurrent_queue_base;\n\ntypedef size_t ticket;\n\n//! A queue using simple locking.\n/** For efficiency, this class has no constructor.\n    The caller is expected to zero-initialize it. */\nstruct micro_queue {\n    typedef concurrent_queue_base::page page;\n\n    friend class micro_queue_pop_finalizer;\n\n    atomic<page*> head_page;\n    atomic<ticket> head_counter;\n\n    atomic<page*> tail_page;\n    atomic<ticket> tail_counter;\n\n    spin_mutex page_mutex;\n\n    void push( const void* item, ticket k, concurrent_queue_base& base,\n               concurrent_queue_base::copy_specifics op_type );\n\n    void abort_push( ticket k, concurrent_queue_base& base );\n\n    bool pop( void* dst, ticket k, concurrent_queue_base& base );\n\n    micro_queue& assign( const micro_queue& src, concurrent_queue_base& base,\n                         concurrent_queue_base::copy_specifics op_type );\n\n    page* make_copy ( concurrent_queue_base& base, const page* src_page, size_t begin_in_page,\n                      size_t end_in_page, ticket& g_index, concurrent_queue_base::copy_specifics op_type ) ;\n\n    void make_invalid( ticket k );\n};\n\n// we need to yank it out of micro_queue because of concurrent_queue_base::deallocate_page being virtual.\nclass micro_queue_pop_finalizer: no_copy {\n    typedef concurrent_queue_base::page page;\n    ticket my_ticket;\n    micro_queue& my_queue;\n    page* my_page;\n    concurrent_queue_base &base;\npublic:\n    micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base& b, ticket k, page* p ) :\n        my_ticket(k), my_queue(queue), my_page(p), base(b)\n    {}\n    ~micro_queue_pop_finalizer() {\n        page* p = my_page;\n        if( p ) {\n            spin_mutex::scoped_lock lock( my_queue.page_mutex );\n            page* q = p->next;\n            my_queue.head_page = q;\n            if( !q ) {\n                my_queue.tail_page = NULL;\n            }\n        }\n        my_queue.head_counter = my_ticket;\n        if( p )\n           base.deallocate_page( p );\n    }\n};\n\nstruct predicate_leq {\n    ticket t;\n    predicate_leq( ticket t_ ) : t(t_) {}\n    bool operator() ( uintptr_t p ) const {return (ticket)p<=t;}\n};\n\n//! Internal representation of a ConcurrentQueue.\n/** For efficiency, this class has no constructor.\n    The caller is expected to zero-initialize it. */\nclass concurrent_queue_rep {\npublic:\nprivate:\n    friend struct micro_queue;\n\n    //! Approximately n_queue/golden ratio\n    static const size_t phi = 3;\n\npublic:\n    //! Must be power of 2\n    static const size_t n_queue = 8;\n\n    //! Map ticket to an array index\n    static size_t index( ticket k ) {\n        return k*phi%n_queue;\n    }\n\n    atomic<ticket> head_counter;\n    concurrent_monitor items_avail;\n    atomic<size_t> n_invalid_entries;\n    char pad1[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor)+sizeof(atomic<size_t>))&(NFS_MaxLineSize-1))];\n\n    atomic<ticket> tail_counter;\n    concurrent_monitor slots_avail;\n    char pad2[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor))&(NFS_MaxLineSize-1))];\n    micro_queue array[n_queue];\n\n    micro_queue& choose( ticket k ) {\n        // The formula here approximates LRU in a cache-oblivious way.\n        return array[index(k)];\n    }\n\n    //! Value for effective_capacity that denotes unbounded queue.\n    static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2);\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // unary minus operator applied to unsigned type, result still unsigned\n    #pragma warning( push )\n    #pragma warning( disable: 4146 )\n#endif\n\nstatic void* invalid_page;\n\n//------------------------------------------------------------------------\n// micro_queue\n//------------------------------------------------------------------------\nvoid micro_queue::push( const void* item, ticket k, concurrent_queue_base& base,\n                        concurrent_queue_base::copy_specifics op_type ) {\n    k &= -concurrent_queue_rep::n_queue;\n    page* p = NULL;\n    // find index on page where we would put the data\n    size_t index = modulo_power_of_two( k/concurrent_queue_rep::n_queue, base.items_per_page );\n    if( !index ) {  // make a new page\n        __TBB_TRY {\n            p = base.allocate_page();\n        } __TBB_CATCH(...) {\n            ++base.my_rep->n_invalid_entries;\n            make_invalid( k );\n        }\n        p->mask = 0;\n        p->next = NULL;\n    }\n\n    // wait for my turn\n    if( tail_counter!=k ) // The developer insisted on keeping first check out of the backoff loop\n        for( atomic_backoff b(true);;b.pause() ) {\n            ticket tail = tail_counter;\n            if( tail==k ) break;\n            else if( tail&0x1 ) {\n                // no memory. throws an exception; assumes concurrent_queue_rep::n_queue>1\n                ++base.my_rep->n_invalid_entries;\n                throw_exception( eid_bad_last_alloc );\n            }\n        }\n\n    if( p ) { // page is newly allocated; insert in micro_queue\n        spin_mutex::scoped_lock lock( page_mutex );\n        if( page* q = tail_page )\n            q->next = p;\n        else\n            head_page = p;\n        tail_page = p;\n    }\n\n    if (item) {\n        p = tail_page;\n        ITT_NOTIFY( sync_acquired, p );\n        __TBB_TRY {\n            if( concurrent_queue_base::copy == op_type ) {\n                base.copy_item( *p, index, item );\n            } else {\n                __TBB_ASSERT( concurrent_queue_base::move == op_type, NULL );\n                static_cast<concurrent_queue_base_v8&>(base).move_item( *p, index, item );\n            }\n        }  __TBB_CATCH(...) {\n            ++base.my_rep->n_invalid_entries;\n            tail_counter += concurrent_queue_rep::n_queue;\n            __TBB_RETHROW();\n        }\n        ITT_NOTIFY( sync_releasing, p );\n        // If no exception was thrown, mark item as present.\n        p->mask |= uintptr_t(1)<<index;\n    }\n    else // no item; this was called from abort_push\n        ++base.my_rep->n_invalid_entries;\n\n    tail_counter += concurrent_queue_rep::n_queue;\n}\n\n\nvoid micro_queue::abort_push( ticket k, concurrent_queue_base& base ) {\n    push(NULL, k, base, concurrent_queue_base::copy);\n}\n\nbool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) {\n    k &= -concurrent_queue_rep::n_queue;\n    spin_wait_until_eq( head_counter, k );\n    spin_wait_while_eq( tail_counter, k );\n    page& p = *head_page;\n    __TBB_ASSERT( &p, NULL );\n    size_t index = modulo_power_of_two( k/concurrent_queue_rep::n_queue, base.items_per_page );\n    bool success = false;\n    {\n        micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL );\n        if( p.mask & uintptr_t(1)<<index ) {\n            success = true;\n            ITT_NOTIFY( sync_acquired, dst );\n            ITT_NOTIFY( sync_acquired, head_page );\n            base.assign_and_destroy_item( dst, p, index );\n            ITT_NOTIFY( sync_releasing, head_page );\n        } else {\n            --base.my_rep->n_invalid_entries;\n        }\n    }\n    return success;\n}\n\nmicro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base& base,\n                                  concurrent_queue_base::copy_specifics op_type )\n{\n    head_counter = src.head_counter;\n    tail_counter = src.tail_counter;\n\n    const page* srcp = src.head_page;\n    if( srcp ) {\n        ticket g_index = head_counter;\n        __TBB_TRY {\n            size_t n_items  = (tail_counter-head_counter)/concurrent_queue_rep::n_queue;\n            size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep::n_queue, base.items_per_page );\n            size_t end_in_first_page = (index+n_items<base.items_per_page)?(index+n_items):base.items_per_page;\n\n            head_page = make_copy( base, srcp, index, end_in_first_page, g_index, op_type );\n            page* cur_page = head_page;\n\n            if( srcp != src.tail_page ) {\n                for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) {\n                    cur_page->next = make_copy( base, srcp, 0, base.items_per_page, g_index, op_type );\n                    cur_page = cur_page->next;\n                }\n\n                __TBB_ASSERT( srcp==src.tail_page, NULL );\n\n                size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep::n_queue, base.items_per_page );\n                if( last_index==0 ) last_index = base.items_per_page;\n\n                cur_page->next = make_copy( base, srcp, 0, last_index, g_index, op_type );\n                cur_page = cur_page->next;\n            }\n            tail_page = cur_page;\n        } __TBB_CATCH(...) {\n            make_invalid( g_index );\n        }\n    } else {\n        head_page = tail_page = NULL;\n    }\n    return *this;\n}\n\nconcurrent_queue_base::page* micro_queue::make_copy( concurrent_queue_base& base,\n    const concurrent_queue_base::page* src_page, size_t begin_in_page, size_t end_in_page,\n    ticket& g_index, concurrent_queue_base::copy_specifics op_type )\n{\n    page* new_page = base.allocate_page();\n    new_page->next = NULL;\n    new_page->mask = src_page->mask;\n    for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )\n        if( new_page->mask & uintptr_t(1)<<begin_in_page )\n            if( concurrent_queue_base::copy == op_type ) {\n                base.copy_page_item( *new_page, begin_in_page, *src_page, begin_in_page );\n            } else {\n                __TBB_ASSERT( concurrent_queue_base::move == op_type, NULL );\n                static_cast<concurrent_queue_base_v8&>(base).move_page_item( *new_page, begin_in_page, *src_page, begin_in_page );\n            }\n    return new_page;\n}\n\nvoid micro_queue::make_invalid( ticket k )\n{\n    static concurrent_queue_base::page dummy = {static_cast<page*>((void*)1), 0};\n    // mark it so that no more pushes are allowed.\n    invalid_page = &dummy;\n    {\n        spin_mutex::scoped_lock lock( page_mutex );\n        tail_counter = k+concurrent_queue_rep::n_queue+1;\n        if( page* q = tail_page )\n            q->next = static_cast<page*>(invalid_page);\n        else\n            head_page = static_cast<page*>(invalid_page);\n        tail_page = static_cast<page*>(invalid_page);\n    }\n    __TBB_RETHROW();\n}\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning( pop )\n#endif // warning 4146 is back\n\n//------------------------------------------------------------------------\n// concurrent_queue_base\n//------------------------------------------------------------------------\nconcurrent_queue_base_v3::concurrent_queue_base_v3( size_t item_sz ) {\n    items_per_page = item_sz<=  8 ? 32 :\n                     item_sz<= 16 ? 16 :\n                     item_sz<= 32 ?  8 :\n                     item_sz<= 64 ?  4 :\n                     item_sz<=128 ?  2 :\n                     1;\n    my_capacity = size_t(-1)/(item_sz>1 ? item_sz : 2);\n    my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1);\n    __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, \"alignment error\" );\n    memset(my_rep,0,sizeof(concurrent_queue_rep));\n    new ( &my_rep->items_avail ) concurrent_monitor();\n    new ( &my_rep->slots_avail ) concurrent_monitor();\n    this->item_size = item_sz;\n}\n\nconcurrent_queue_base_v3::~concurrent_queue_base_v3() {\n    size_t nq = my_rep->n_queue;\n    for( size_t i=0; i<nq; i++ )\n        __TBB_ASSERT( my_rep->array[i].tail_page==NULL, \"pages were not freed properly\" );\n    cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1);\n}\n\nvoid concurrent_queue_base_v3::internal_push( const void* src ) {\n    internal_insert_item( src, copy );\n}\n\nvoid concurrent_queue_base_v8::internal_push_move( const void* src ) {\n   internal_insert_item( src, move );\n}\n\nvoid concurrent_queue_base_v3::internal_insert_item( const void* src, copy_specifics op_type ) {\n    concurrent_queue_rep& r = *my_rep;\n    ticket k = r.tail_counter++;\n    ptrdiff_t e = my_capacity;\n#if DO_ITT_NOTIFY\n    bool sync_prepare_done = false;\n#endif\n    if( (ptrdiff_t)(k-r.head_counter)>=e ) { // queue is full\n#if DO_ITT_NOTIFY\n        if( !sync_prepare_done ) {\n            ITT_NOTIFY( sync_prepare, &sync_prepare_done );\n            sync_prepare_done = true;\n        }\n#endif\n        bool slept = false;\n        concurrent_monitor::thread_context thr_ctx;\n        r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );\n        while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) {\n            __TBB_TRY {\n                slept = r.slots_avail.commit_wait( thr_ctx );\n            } __TBB_CATCH( tbb::user_abort& ) {\n                r.choose(k).abort_push(k, *this);\n                __TBB_RETHROW();\n            } __TBB_CATCH(...) {\n                __TBB_RETHROW();\n            }\n            if (slept == true) break;\n            r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );\n        }\n        if( !slept )\n            r.slots_avail.cancel_wait( thr_ctx );\n    }\n    ITT_NOTIFY( sync_acquired, &sync_prepare_done );\n    __TBB_ASSERT( (ptrdiff_t)(k-r.head_counter)<my_capacity, NULL);\n    r.choose( k ).push( src, k, *this, op_type );\n    r.items_avail.notify( predicate_leq(k) );\n}\n\nvoid concurrent_queue_base_v3::internal_pop( void* dst ) {\n    concurrent_queue_rep& r = *my_rep;\n    ticket k;\n#if DO_ITT_NOTIFY\n    bool sync_prepare_done = false;\n#endif\n    do {\n        k=r.head_counter++;\n        if ( (ptrdiff_t)(r.tail_counter-k)<=0 ) { // queue is empty\n#if DO_ITT_NOTIFY\n            if( !sync_prepare_done ) {\n                ITT_NOTIFY( sync_prepare, dst );\n                sync_prepare_done = true;\n            }\n#endif\n            bool slept = false;\n            concurrent_monitor::thread_context thr_ctx;\n            r.items_avail.prepare_wait( thr_ctx, k );\n            while( (ptrdiff_t)(r.tail_counter-k)<=0 ) {\n                __TBB_TRY {\n                    slept = r.items_avail.commit_wait( thr_ctx );\n                } __TBB_CATCH( tbb::user_abort& ) {\n                    r.head_counter--;\n                    __TBB_RETHROW();\n                } __TBB_CATCH(...) {\n                    __TBB_RETHROW();\n                }\n                if (slept == true) break;\n                r.items_avail.prepare_wait( thr_ctx, k );\n            }\n            if( !slept )\n                r.items_avail.cancel_wait( thr_ctx );\n        }\n        __TBB_ASSERT((ptrdiff_t)(r.tail_counter-k)>0, NULL);\n    } while( !r.choose(k).pop(dst,k,*this) );\n\n    // wake up a producer..\n    r.slots_avail.notify( predicate_leq(k) );\n}\n\nvoid concurrent_queue_base_v3::internal_abort() {\n    concurrent_queue_rep& r = *my_rep;\n    r.items_avail.abort_all();\n    r.slots_avail.abort_all();\n}\n\nbool concurrent_queue_base_v3::internal_pop_if_present( void* dst ) {\n    concurrent_queue_rep& r = *my_rep;\n    ticket k;\n    do {\n        k = r.head_counter;\n        for(;;) {\n            if( (ptrdiff_t)(r.tail_counter-k)<=0 ) {\n                // Queue is empty\n                return false;\n            }\n            // Queue had item with ticket k when we looked.  Attempt to get that item.\n            ticket tk=k;\n            k = r.head_counter.compare_and_swap( tk+1, tk );\n            if( k==tk )\n                break;\n            // Another thread snatched the item, retry.\n        }\n    } while( !r.choose( k ).pop( dst, k, *this ) );\n\n    r.slots_avail.notify( predicate_leq(k) );\n\n    return true;\n}\n\nbool concurrent_queue_base_v3::internal_push_if_not_full( const void* src ) {\n    return internal_insert_if_not_full( src, copy );\n}\n\nbool concurrent_queue_base_v8::internal_push_move_if_not_full( const void* src ) {\n    return internal_insert_if_not_full( src, move );\n}\n\nbool concurrent_queue_base_v3::internal_insert_if_not_full( const void* src, copy_specifics op_type ) {\n    concurrent_queue_rep& r = *my_rep;\n    ticket k = r.tail_counter;\n    for(;;) {\n        if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) {\n            // Queue is full\n            return false;\n        }\n        // Queue had empty slot with ticket k when we looked.  Attempt to claim that slot.\n        ticket tk=k;\n        k = r.tail_counter.compare_and_swap( tk+1, tk );\n        if( k==tk )\n            break;\n        // Another thread claimed the slot, so retry.\n    }\n    r.choose(k).push(src, k, *this, op_type);\n    r.items_avail.notify( predicate_leq(k) );\n    return true;\n}\n\nptrdiff_t concurrent_queue_base_v3::internal_size() const {\n    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );\n    return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter-my_rep->n_invalid_entries);\n}\n\nbool concurrent_queue_base_v3::internal_empty() const {\n    ticket tc = my_rep->tail_counter;\n    ticket hc = my_rep->head_counter;\n    // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.\n    return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 );\n}\n\nvoid concurrent_queue_base_v3::internal_set_capacity( ptrdiff_t capacity, size_t /*item_sz*/ ) {\n    my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity;\n}\n\nvoid concurrent_queue_base_v3::internal_finish_clear() {\n    size_t nq = my_rep->n_queue;\n    for( size_t i=0; i<nq; ++i ) {\n        page* tp = my_rep->array[i].tail_page;\n        __TBB_ASSERT( my_rep->array[i].head_page==tp, \"at most one page should remain\" );\n        if( tp!=NULL) {\n            if( tp!=invalid_page ) deallocate_page( tp );\n            my_rep->array[i].tail_page = NULL;\n        }\n    }\n}\n\nvoid concurrent_queue_base_v3::internal_throw_exception() const {\n    throw_exception( eid_bad_alloc );\n}\n\nvoid concurrent_queue_base_v3::internal_assign( const concurrent_queue_base& src, copy_specifics op_type ) {\n    items_per_page = src.items_per_page;\n    my_capacity = src.my_capacity;\n\n    // copy concurrent_queue_rep.\n    my_rep->head_counter = src.my_rep->head_counter;\n    my_rep->tail_counter = src.my_rep->tail_counter;\n    my_rep->n_invalid_entries = src.my_rep->n_invalid_entries;\n\n    // copy micro_queues\n    for( size_t i = 0; i<my_rep->n_queue; ++i )\n        my_rep->array[i].assign( src.my_rep->array[i], *this, op_type );\n\n    __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter,\n            \"the source concurrent queue should not be concurrently modified.\" );\n}\n\nvoid concurrent_queue_base_v3::assign( const concurrent_queue_base& src ) {\n    internal_assign( src, copy );\n}\n\nvoid concurrent_queue_base_v8::move_content( concurrent_queue_base_v8& src ) {\n    internal_assign( src, move );\n}\n\n//------------------------------------------------------------------------\n// concurrent_queue_iterator_rep\n//------------------------------------------------------------------------\nclass concurrent_queue_iterator_rep: no_assign {\npublic:\n    ticket head_counter;\n    const concurrent_queue_base& my_queue;\n    const size_t offset_of_last;\n    concurrent_queue_base::page* array[concurrent_queue_rep::n_queue];\n    concurrent_queue_iterator_rep( const concurrent_queue_base& queue, size_t offset_of_last_ ) :\n        head_counter(queue.my_rep->head_counter),\n        my_queue(queue),\n        offset_of_last(offset_of_last_)\n    {\n        const concurrent_queue_rep& rep = *queue.my_rep;\n        for( size_t k=0; k<concurrent_queue_rep::n_queue; ++k )\n            array[k] = rep.array[k].head_page;\n    }\n    //! Set item to point to kth element.  Return true if at end of queue or item is marked valid; false otherwise.\n    bool get_item( void*& item, size_t k ) {\n        if( k==my_queue.my_rep->tail_counter ) {\n            item = NULL;\n            return true;\n        } else {\n            concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)];\n            __TBB_ASSERT(p,NULL);\n            size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.items_per_page );\n            item = static_cast<unsigned char*>(static_cast<void*>(p)) + offset_of_last + my_queue.item_size*i;\n            return (p->mask & uintptr_t(1)<<i)!=0;\n        }\n    }\n};\n\n//------------------------------------------------------------------------\n// concurrent_queue_iterator_base\n//------------------------------------------------------------------------\n\nvoid concurrent_queue_iterator_base_v3::initialize( const concurrent_queue_base& queue, size_t offset_of_last ) {\n    my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1);\n    new( my_rep ) concurrent_queue_iterator_rep(queue,offset_of_last);\n    size_t k = my_rep->head_counter;\n    if( !my_rep->get_item(my_item, k) ) advance();\n}\n\nconcurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue ) {\n    initialize(queue,0);\n}\n\nconcurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue, size_t offset_of_last ) {\n    initialize(queue,offset_of_last);\n}\n\nvoid concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base& other ) {\n    if( my_rep!=other.my_rep ) {\n        if( my_rep ) {\n            cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1);\n            my_rep = NULL;\n        }\n        if( other.my_rep ) {\n            my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1);\n            new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep );\n        }\n    }\n    my_item = other.my_item;\n}\n\nvoid concurrent_queue_iterator_base_v3::advance() {\n    __TBB_ASSERT( my_item, \"attempt to increment iterator past end of queue\" );\n    size_t k = my_rep->head_counter;\n    const concurrent_queue_base& queue = my_rep->my_queue;\n#if TBB_USE_ASSERT\n    void* tmp;\n    my_rep->get_item(tmp,k);\n    __TBB_ASSERT( my_item==tmp, NULL );\n#endif /* TBB_USE_ASSERT */\n    size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.items_per_page );\n    if( i==queue.items_per_page-1 ) {\n        concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)];\n        root = root->next;\n    }\n    // advance k\n    my_rep->head_counter = ++k;\n    if( !my_rep->get_item(my_item, k) ) advance();\n}\n\nconcurrent_queue_iterator_base_v3::~concurrent_queue_iterator_base_v3() {\n    //delete my_rep;\n    cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1);\n    my_rep = NULL;\n}\n\n} // namespace internal\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_queue.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_queue_H\n#define __TBB_concurrent_queue_H\n\n#include \"internal/_concurrent_queue_impl.h\"\n\nnamespace tbb {\n\nnamespace strict_ppl {\n\n//! A high-performance thread-safe non-blocking concurrent queue.\n/** Multiple threads may each push and pop concurrently.\n    Assignment construction is not allowed.\n    @ingroup containers */\ntemplate<typename T, typename A = cache_aligned_allocator<T> > \nclass concurrent_queue: public internal::concurrent_queue_base_v3<T> {\n    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;\n\n    //! Allocator type\n    typedef typename A::template rebind<char>::other page_allocator_type;\n    page_allocator_type my_allocator;\n\n    //! Allocates a block of size n (bytes)\n    /*override*/ virtual void *allocate_block( size_t n ) {\n        void *b = reinterpret_cast<void*>(my_allocator.allocate( n ));\n        if( !b )\n            internal::throw_exception(internal::eid_bad_alloc); \n        return b;\n    }\n\n    //! Deallocates block created by allocate_block.\n    /*override*/ virtual void deallocate_block( void *b, size_t n ) {\n        my_allocator.deallocate( reinterpret_cast<char*>(b), n );\n    }\n\n    static void copy_construct_item(T* location, const void* src){\n        new (location) T(*static_cast<const T*>(src));\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    static void move_construct_item(T* location, const void* src) {\n        new (location) T( std::move(*static_cast<T*>(const_cast<void*>(src))) );\n    }\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\npublic:\n    //! Element type in the queue.\n    typedef T value_type;\n\n    //! Reference type\n    typedef T& reference;\n\n    //! Const reference type\n    typedef const T& const_reference;\n\n    //! Integral type for representing size of the queue.\n    typedef size_t size_type;\n\n    //! Difference type for iterator\n    typedef ptrdiff_t difference_type;\n\n    //! Allocator type\n    typedef A allocator_type;\n\n    //! Construct empty queue\n    explicit concurrent_queue(const allocator_type& a = allocator_type()) :\n        my_allocator( a )\n    {\n    }\n\n    //! [begin,end) constructor\n    template<typename InputIterator>\n    concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) :\n        my_allocator( a )\n    {\n        for( ; begin != end; ++begin )\n            this->push(*begin);\n    }\n\n    //! Copy constructor\n    concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) :\n        internal::concurrent_queue_base_v3<T>(), my_allocator( a )\n    {\n        this->assign( src, copy_construct_item );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move constructors\n    concurrent_queue( concurrent_queue&& src ) :\n        internal::concurrent_queue_base_v3<T>(), my_allocator( std::move(src.my_allocator) )\n    {\n        this->internal_swap( src );\n    }\n\n    concurrent_queue( concurrent_queue&& src, const allocator_type& a ) :\n        internal::concurrent_queue_base_v3<T>(), my_allocator( a )\n    {\n        // checking that memory allocated by one instance of allocator can be deallocated\n        // with another\n        if( my_allocator == src.my_allocator) {\n            this->internal_swap( src );\n        } else {\n            // allocators are different => performing per-element move\n            this->assign( src, move_construct_item );\n            src.clear();\n        }\n    }\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Destroy queue\n    ~concurrent_queue();\n\n    //! Enqueue an item at tail of queue.\n    void push( const T& source ) {\n        this->internal_push( &source, copy_construct_item );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    void push( T&& source ) {\n        this->internal_push( &source, move_construct_item );\n    }\n\n#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n    template<typename... Arguments>\n    void emplace( Arguments&&... args ) {\n        push( T(std::forward<Arguments>( args )...) );\n    }\n#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Attempt to dequeue an item from head of queue.\n    /** Does not wait for item to become available.\n        Returns true if successful; false otherwise. */\n    bool try_pop( T& result ) {\n        return this->internal_try_pop( &result );\n    }\n\n    //! Return the number of items in the queue; thread unsafe\n    size_type unsafe_size() const {return this->internal_size();}\n\n    //! Equivalent to size()==0.\n    bool empty() const {return this->internal_empty();}\n\n    //! Clear the queue. not thread-safe.\n    void clear() ;\n\n    //! Return allocator object\n    allocator_type get_allocator() const { return this->my_allocator; }\n\n    typedef internal::concurrent_queue_iterator<concurrent_queue,T> iterator;\n    typedef internal::concurrent_queue_iterator<concurrent_queue,const T> const_iterator;\n\n    //------------------------------------------------------------------------\n    // The iterators are intended only for debugging.  They are slow and not thread safe.\n    //------------------------------------------------------------------------\n    iterator unsafe_begin() {return iterator(*this);}\n    iterator unsafe_end() {return iterator();}\n    const_iterator unsafe_begin() const {return const_iterator(*this);}\n    const_iterator unsafe_end() const {return const_iterator();}\n} ;\n\ntemplate<typename T, class A>\nconcurrent_queue<T,A>::~concurrent_queue() {\n    clear();\n    this->internal_finish_clear();\n}\n\ntemplate<typename T, class A>\nvoid concurrent_queue<T,A>::clear() {\n    while( !empty() ) {\n        T value;\n        this->internal_try_pop(&value);\n    }\n}\n\n} // namespace strict_ppl\n\n//! A high-performance thread-safe blocking concurrent bounded queue.\n/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics.\n    Note that method names agree with the PPL-style concurrent queue.\n    Multiple threads may each push and pop concurrently.\n    Assignment construction is not allowed.\n    @ingroup containers */\ntemplate<typename T, class A = cache_aligned_allocator<T> >\nclass concurrent_bounded_queue: public internal::concurrent_queue_base_v8 {\n    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;\n\n    //! Allocator type\n    typedef typename A::template rebind<char>::other page_allocator_type;\n    page_allocator_type my_allocator;\n\n    typedef typename concurrent_queue_base_v3::padded_page<T> padded_page;\n    typedef typename concurrent_queue_base_v3::copy_specifics copy_specifics;\n\n    //! Class used to ensure exception-safety of method \"pop\"\n    class destroyer: internal::no_copy {\n        T& my_value;\n    public:\n        destroyer( T& value ) : my_value(value) {}\n        ~destroyer() {my_value.~T();}\n    };\n\n    T& get_ref( page& p, size_t index ) {\n        __TBB_ASSERT( index<items_per_page, NULL );\n        return (&static_cast<padded_page*>(static_cast<void*>(&p))->last)[index];\n    }\n\n    /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) {\n        new( &get_ref(dst,index) ) T(*static_cast<const T*>(src));\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    /*override*/ virtual void move_item( page& dst, size_t index, const void* src ) {\n        new( &get_ref(dst,index) ) T( std::move(*static_cast<T*>(const_cast<void*>(src))) );\n    }\n#else\n    /*override*/ virtual void move_item( page&, size_t, const void* ) {\n        __TBB_ASSERT( false, \"Unreachable code\" );\n    }\n#endif\n\n    /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) {\n        new( &get_ref(dst,dindex) ) T( get_ref( const_cast<page&>(src), sindex ) );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    /*override*/ virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) {\n        new( &get_ref(dst,dindex) ) T( std::move(get_ref( const_cast<page&>(src), sindex )) );\n    }\n#else\n    /*override*/ virtual void move_page_item( page&, size_t, const page&, size_t ) {\n        __TBB_ASSERT( false, \"Unreachable code\" );\n    }\n#endif\n\n    /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) {\n        T& from = get_ref(src,index);\n        destroyer d(from);\n        *static_cast<T*>(dst) = tbb::internal::move( from );\n    }\n\n    /*override*/ virtual page *allocate_page() {\n        size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T);\n        page *p = reinterpret_cast<page*>(my_allocator.allocate( n ));\n        if( !p )\n            internal::throw_exception(internal::eid_bad_alloc);\n        return p;\n    }\n\n    /*override*/ virtual void deallocate_page( page *p ) {\n        size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T);\n        my_allocator.deallocate( reinterpret_cast<char*>(p), n );\n    }\n\npublic:\n    //! Element type in the queue.\n    typedef T value_type;\n\n    //! Allocator type\n    typedef A allocator_type;\n\n    //! Reference type\n    typedef T& reference;\n\n    //! Const reference type\n    typedef const T& const_reference;\n\n    //! Integral type for representing size of the queue.\n    /** Note that the size_type is a signed integral type.\n        This is because the size can be negative if there are pending pops without corresponding pushes. */\n    typedef std::ptrdiff_t size_type;\n\n    //! Difference type for iterator\n    typedef std::ptrdiff_t difference_type;\n\n    //! Construct empty queue\n    explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : \n        concurrent_queue_base_v8( sizeof(T) ), my_allocator( a )\n    {\n    }\n\n    //! Copy constructor\n    concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type())\n        : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a )\n    {\n        assign( src );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move constructors\n    concurrent_bounded_queue( concurrent_bounded_queue&& src )\n        : concurrent_queue_base_v8( sizeof(T) ), my_allocator( std::move(src.my_allocator) )\n    {\n        internal_swap( src );\n    }\n\n    concurrent_bounded_queue( concurrent_bounded_queue&& src, const allocator_type& a )\n        : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a )\n    {\n        // checking that memory allocated by one instance of allocator can be deallocated\n        // with another\n        if( my_allocator == src.my_allocator) {\n            this->internal_swap( src );\n        } else {\n            // allocators are different => performing per-element move\n            this->move_content( src );\n            src.clear();\n        }\n    }\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! [begin,end) constructor\n    template<typename InputIterator>\n    concurrent_bounded_queue( InputIterator begin, InputIterator end,\n                              const allocator_type& a = allocator_type())\n        : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a )\n    {\n        for( ; begin != end; ++begin )\n            internal_push_if_not_full(&*begin);\n    }\n\n    //! Destroy queue\n    ~concurrent_bounded_queue();\n\n    //! Enqueue an item at tail of queue.\n    void push( const T& source ) {\n        internal_push( &source );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move an item at tail of queue.\n    void push( T&& source ) {\n        internal_push_move( &source );\n    }\n\n#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n    template<typename... Arguments>\n    void emplace( Arguments&&... args ) {\n        push( T(std::forward<Arguments>( args )...) );\n    }\n#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Dequeue item from head of queue.\n    /** Block until an item becomes available, and then dequeue it. */\n    void pop( T& destination ) {\n        internal_pop( &destination );\n    }\n\n#if TBB_USE_EXCEPTIONS\n    //! Abort all pending queue operations\n    void abort() {\n        internal_abort();\n    }\n#endif\n\n    //! Enqueue an item at tail of queue if queue is not already full.\n    /** Does not wait for queue to become not full.\n        Returns true if item is pushed; false if queue was already full. */\n    bool try_push( const T& source ) {\n        return internal_push_if_not_full( &source );\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move an item at tail of queue if queue is not already full.\n    /** Does not wait for queue to become not full.\n        Returns true if item is pushed; false if queue was already full. */\n    bool try_push( T&& source ) {\n        return internal_push_move_if_not_full( &source );\n    }\n#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n    template<typename... Arguments>\n    bool try_emplace( Arguments&&... args ) {\n        return try_push( T(std::forward<Arguments>( args )...) );\n    }\n#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Attempt to dequeue an item from head of queue.\n    /** Does not wait for item to become available.\n        Returns true if successful; false otherwise. */\n    bool try_pop( T& destination ) {\n        return internal_pop_if_present( &destination );\n    }\n\n    //! Return number of pushes minus number of pops.\n    /** Note that the result can be negative if there are pops waiting for the \n        corresponding pushes.  The result can also exceed capacity() if there \n        are push operations in flight. */\n    size_type size() const {return internal_size();}\n\n    //! Equivalent to size()<=0.\n    bool empty() const {return internal_empty();}\n\n    //! Maximum number of allowed elements\n    size_type capacity() const {\n        return my_capacity;\n    }\n\n    //! Set the capacity\n    /** Setting the capacity to 0 causes subsequent try_push operations to always fail,\n        and subsequent push operations to block forever. */\n    void set_capacity( size_type new_capacity ) {\n        internal_set_capacity( new_capacity, sizeof(T) );\n    }\n\n    //! return allocator object\n    allocator_type get_allocator() const { return this->my_allocator; }\n\n    //! clear the queue. not thread-safe.\n    void clear() ;\n\n    typedef internal::concurrent_queue_iterator<concurrent_bounded_queue,T> iterator;\n    typedef internal::concurrent_queue_iterator<concurrent_bounded_queue,const T> const_iterator;\n\n    //------------------------------------------------------------------------\n    // The iterators are intended only for debugging.  They are slow and not thread safe.\n    //------------------------------------------------------------------------\n    iterator unsafe_begin() {return iterator(*this);}\n    iterator unsafe_end() {return iterator();}\n    const_iterator unsafe_begin() const {return const_iterator(*this);}\n    const_iterator unsafe_end() const {return const_iterator();}\n\n}; \n\ntemplate<typename T, class A>\nconcurrent_bounded_queue<T,A>::~concurrent_bounded_queue() {\n    clear();\n    internal_finish_clear();\n}\n\ntemplate<typename T, class A>\nvoid concurrent_bounded_queue<T,A>::clear() {\n    while( !empty() ) {\n        T value;\n        internal_pop_if_present(&value);\n    }\n}\n\nusing strict_ppl::concurrent_queue;\n\n} // namespace tbb\n\n#endif /* __TBB_concurrent_queue_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_unordered_map.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/* Container implementations in this header are based on PPL implementations\n   provided by Microsoft. */\n\n#ifndef __TBB_concurrent_unordered_map_H\n#define __TBB_concurrent_unordered_map_H\n\n#include \"internal/_concurrent_unordered_impl.h\"\n\nnamespace tbb\n{\n\nnamespace interface5 {\n\n// Template class for hash map traits\ntemplate<typename Key, typename T, typename Hash_compare, typename Allocator, bool Allow_multimapping>\nclass concurrent_unordered_map_traits\n{\nprotected:\n    typedef std::pair<const Key, T> value_type;\n    typedef Key key_type;\n    typedef Hash_compare hash_compare;\n    typedef typename Allocator::template rebind<value_type>::other allocator_type;\n    enum { allow_multimapping = Allow_multimapping };\n\n    concurrent_unordered_map_traits() : my_hash_compare() {}\n    concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {}\n\n    class value_compare : public std::binary_function<value_type, value_type, bool>\n    {\n        friend class concurrent_unordered_map_traits<Key, T, Hash_compare, Allocator, Allow_multimapping>;\n\n    public:\n        bool operator()(const value_type& left, const value_type& right) const\n        {\n            return (my_hash_compare(left.first, right.first));\n        }\n\n        value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {}\n\n    protected:\n        hash_compare my_hash_compare;    // the comparator predicate for keys\n    };\n\n    template<class Type1, class Type2>\n    static const Key& get_key(const std::pair<Type1, Type2>& value) {\n        return (value.first);\n    }\n\n    hash_compare my_hash_compare; // the comparator predicate for keys\n};\n\ntemplate <typename Key, typename T, typename Hasher = tbb::tbb_hash<Key>, typename Key_equality = std::equal_to<Key>,\n         typename Allocator = tbb::tbb_allocator<std::pair<const Key, T> > >\nclass concurrent_unordered_map :\n    public internal::concurrent_unordered_base< concurrent_unordered_map_traits<Key, T,\n    internal::hash_compare<Key, Hasher, Key_equality>, Allocator, false> >\n{\n    // Base type definitions\n    typedef internal::hash_compare<Key, Hasher, Key_equality> hash_compare;\n    typedef concurrent_unordered_map_traits<Key, T, hash_compare, Allocator, false> traits_type;\n    typedef internal::concurrent_unordered_base< traits_type > base_type;\n#if __TBB_EXTRA_DEBUG\npublic:\n#endif\n    using traits_type::allow_multimapping;\npublic:\n    using base_type::end;\n    using base_type::find;\n    using base_type::insert;\n\n    // Type definitions\n    typedef Key key_type;\n    typedef typename base_type::value_type value_type;\n    typedef T mapped_type;\n    typedef Hasher hasher;\n    typedef Key_equality key_equal;\n    typedef hash_compare key_compare;\n\n    typedef typename base_type::allocator_type allocator_type;\n    typedef typename base_type::pointer pointer;\n    typedef typename base_type::const_pointer const_pointer;\n    typedef typename base_type::reference reference;\n    typedef typename base_type::const_reference const_reference;\n\n    typedef typename base_type::size_type size_type;\n    typedef typename base_type::difference_type difference_type;\n\n    typedef typename base_type::iterator iterator;\n    typedef typename base_type::const_iterator const_iterator;\n    typedef typename base_type::iterator local_iterator;\n    typedef typename base_type::const_iterator const_local_iterator;\n\n    // Construction/destruction/copying\n    explicit concurrent_unordered_map(size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n    }\n\n    concurrent_unordered_map(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a)\n    {\n    }\n\n    template <typename Iterator>\n    concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n        insert(first, last);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from initializer_list\n    concurrent_unordered_map(std::initializer_list<value_type> il, size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n        this->insert(il.begin(),il.end());\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n    concurrent_unordered_map(const concurrent_unordered_map& table)\n        : base_type(table)\n    {\n    }\n\n    concurrent_unordered_map& operator=(const concurrent_unordered_map& table)\n    {\n        return static_cast<concurrent_unordered_map&>(base_type::operator=(table));\n    }\n\n    concurrent_unordered_map(concurrent_unordered_map&& table)\n        : base_type(std::move(table))\n    {\n    }\n\n    concurrent_unordered_map& operator=(concurrent_unordered_map&& table)\n    {\n        return static_cast<concurrent_unordered_map&>(base_type::operator=(std::move(table)));\n    }\n#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n\n    concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a)\n        : base_type(table, a)\n    {\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_map(concurrent_unordered_map&& table, const Allocator& a) : base_type(std::move(table), a)\n    {\n    }\n#endif\n    // Observers\n    mapped_type& operator[](const key_type& key)\n    {\n        iterator where = find(key);\n\n        if (where == end())\n        {\n            where = insert(std::pair<key_type, mapped_type>(key, mapped_type())).first;\n        }\n\n        return ((*where).second);\n    }\n\n    mapped_type& at(const key_type& key)\n    {\n        iterator where = find(key);\n\n        if (where == end())\n        {\n            tbb::internal::throw_exception(tbb::internal::eid_invalid_key);\n        }\n\n        return ((*where).second);\n    }\n\n    const mapped_type& at(const key_type& key) const\n    {\n        const_iterator where = find(key);\n\n        if (where == end())\n        {\n            tbb::internal::throw_exception(tbb::internal::eid_invalid_key);\n        }\n\n        return ((*where).second);\n    }\n};\n\ntemplate < typename Key, typename T, typename Hasher = tbb::tbb_hash<Key>, typename Key_equality = std::equal_to<Key>,\n        typename Allocator = tbb::tbb_allocator<std::pair<const Key, T> > >\nclass concurrent_unordered_multimap :\n    public internal::concurrent_unordered_base< concurrent_unordered_map_traits< Key, T,\n    internal::hash_compare<Key, Hasher, Key_equality>, Allocator, true> >\n{\n    // Base type definitions\n    typedef internal::hash_compare<Key, Hasher, Key_equality> hash_compare;\n    typedef concurrent_unordered_map_traits<Key, T, hash_compare, Allocator, true> traits_type;\n    typedef internal::concurrent_unordered_base<traits_type> base_type;\n#if __TBB_EXTRA_DEBUG\npublic:\n#endif\n    using traits_type::allow_multimapping;\npublic:\n    using base_type::insert;\n\n    // Type definitions\n    typedef Key key_type;\n    typedef typename base_type::value_type value_type;\n    typedef T mapped_type;\n    typedef Hasher hasher;\n    typedef Key_equality key_equal;\n    typedef hash_compare key_compare;\n\n    typedef typename base_type::allocator_type allocator_type;\n    typedef typename base_type::pointer pointer;\n    typedef typename base_type::const_pointer const_pointer;\n    typedef typename base_type::reference reference;\n    typedef typename base_type::const_reference const_reference;\n\n    typedef typename base_type::size_type size_type;\n    typedef typename base_type::difference_type difference_type;\n\n    typedef typename base_type::iterator iterator;\n    typedef typename base_type::const_iterator const_iterator;\n    typedef typename base_type::iterator local_iterator;\n    typedef typename base_type::const_iterator const_local_iterator;\n\n    // Construction/destruction/copying\n    explicit concurrent_unordered_multimap(size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n    }\n\n    concurrent_unordered_multimap(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a)\n    {\n    }\n\n    template <typename Iterator>\n    concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a)\n    {\n        insert(first, last);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from initializer_list\n    concurrent_unordered_multimap(std::initializer_list<value_type> il, size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n        this->insert(il.begin(),il.end());\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n    concurrent_unordered_multimap(const concurrent_unordered_multimap& table)\n        : base_type(table)\n    {\n    }\n\n    concurrent_unordered_multimap& operator=(const concurrent_unordered_multimap& table)\n    {\n        return static_cast<concurrent_unordered_multimap&>(base_type::operator=(table));\n    }\n\n    concurrent_unordered_multimap(concurrent_unordered_multimap&& table)\n        : base_type(std::move(table))\n    {\n    }\n\n    concurrent_unordered_multimap& operator=(concurrent_unordered_multimap&& table)\n    {\n        return static_cast<concurrent_unordered_multimap&>(base_type::operator=(std::move(table)));\n    }\n#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n\n    concurrent_unordered_multimap(const concurrent_unordered_multimap& table, const Allocator& a)\n        : base_type(table, a)\n    {\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_multimap(concurrent_unordered_multimap&& table, const Allocator& a) : base_type(std::move(table), a)\n    {\n    }\n#endif\n};\n} // namespace interface5\n\nusing interface5::concurrent_unordered_map;\nusing interface5::concurrent_unordered_multimap;\n\n} // namespace tbb\n\n#endif// __TBB_concurrent_unordered_map_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_unordered_set.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/* Container implementations in this header are based on PPL implementations\n   provided by Microsoft. */\n\n#ifndef __TBB_concurrent_unordered_set_H\n#define __TBB_concurrent_unordered_set_H\n\n#include \"internal/_concurrent_unordered_impl.h\"\n\nnamespace tbb\n{\n\nnamespace interface5 {\n\n// Template class for hash set traits\ntemplate<typename Key, typename Hash_compare, typename Allocator, bool Allow_multimapping>\nclass concurrent_unordered_set_traits\n{\nprotected:\n    typedef Key value_type;\n    typedef Key key_type;\n    typedef Hash_compare hash_compare;\n    typedef typename Allocator::template rebind<value_type>::other allocator_type;\n    enum { allow_multimapping = Allow_multimapping };\n\n    concurrent_unordered_set_traits() : my_hash_compare() {}\n    concurrent_unordered_set_traits(const hash_compare& hc) : my_hash_compare(hc) {}\n\n    typedef hash_compare value_compare;\n\n    static const Key& get_key(const value_type& value) {\n        return value;\n    }\n\n    hash_compare my_hash_compare; // the comparator predicate for keys\n};\n\ntemplate <typename Key, typename Hasher = tbb::tbb_hash<Key>, typename Key_equality = std::equal_to<Key>, typename Allocator = tbb::tbb_allocator<Key> >\nclass concurrent_unordered_set : public internal::concurrent_unordered_base< concurrent_unordered_set_traits<Key, internal::hash_compare<Key, Hasher, Key_equality>, Allocator, false> >\n{\n    // Base type definitions\n    typedef internal::hash_compare<Key, Hasher, Key_equality> hash_compare;\n    typedef internal::concurrent_unordered_base< concurrent_unordered_set_traits<Key, hash_compare, Allocator, false> > base_type;\n    typedef concurrent_unordered_set_traits<Key, internal::hash_compare<Key, Hasher, Key_equality>, Allocator, false> traits_type;\n#if __TBB_EXTRA_DEBUG\npublic:\n#endif\n    using traits_type::allow_multimapping;\npublic:\n    using base_type::insert;\n\n    // Type definitions\n    typedef Key key_type;\n    typedef typename base_type::value_type value_type;\n    typedef Key mapped_type;\n    typedef Hasher hasher;\n    typedef Key_equality key_equal;\n    typedef hash_compare key_compare;\n\n    typedef typename base_type::allocator_type allocator_type;\n    typedef typename base_type::pointer pointer;\n    typedef typename base_type::const_pointer const_pointer;\n    typedef typename base_type::reference reference;\n    typedef typename base_type::const_reference const_reference;\n\n    typedef typename base_type::size_type size_type;\n    typedef typename base_type::difference_type difference_type;\n\n    typedef typename base_type::iterator iterator;\n    typedef typename base_type::const_iterator const_iterator;\n    typedef typename base_type::iterator local_iterator;\n    typedef typename base_type::const_iterator const_local_iterator;\n\n    // Construction/destruction/copying\n    explicit concurrent_unordered_set(size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(),\n        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)\n    {\n    }\n\n    concurrent_unordered_set(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a)\n    {\n    }\n\n    template <typename Iterator>\n    concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(),\n        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)\n    {\n        insert(first, last);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from initializer_list\n   concurrent_unordered_set(std::initializer_list<value_type> il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(),\n        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)\n    {\n        this->insert(il.begin(),il.end());\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n    concurrent_unordered_set(const concurrent_unordered_set& table)\n        : base_type(table)\n    {\n    }\n\n    concurrent_unordered_set& operator=(const concurrent_unordered_set& table)\n    {\n        return static_cast<concurrent_unordered_set&>(base_type::operator=(table));\n    }\n\n    concurrent_unordered_set(concurrent_unordered_set&& table)\n        : base_type(std::move(table))\n    {\n    }\n\n    concurrent_unordered_set& operator=(concurrent_unordered_set&& table)\n    {\n        return static_cast<concurrent_unordered_set&>(base_type::operator=(std::move(table)));\n    }\n#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n\n    concurrent_unordered_set(const concurrent_unordered_set& table, const Allocator& a)\n        : base_type(table, a)\n    {\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_set(concurrent_unordered_set&& table, const Allocator& a)\n        : base_type(std::move(table), a)\n    {\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n};\n\ntemplate <typename Key, typename Hasher = tbb::tbb_hash<Key>, typename Key_equality = std::equal_to<Key>,\n         typename Allocator = tbb::tbb_allocator<Key> >\nclass concurrent_unordered_multiset :\n    public internal::concurrent_unordered_base< concurrent_unordered_set_traits<Key,\n    internal::hash_compare<Key, Hasher, Key_equality>, Allocator, true> >\n{\n    // Base type definitions\n    typedef internal::hash_compare<Key, Hasher, Key_equality> hash_compare;\n    typedef concurrent_unordered_set_traits<Key, hash_compare, Allocator, true> traits_type;\n    typedef internal::concurrent_unordered_base< traits_type > base_type;\n#if __TBB_EXTRA_DEBUG\npublic:\n#endif\n    using traits_type::allow_multimapping;\npublic:\n    using base_type::insert;\n\n    // Type definitions\n    typedef Key key_type;\n    typedef typename base_type::value_type value_type;\n    typedef Key mapped_type;\n    typedef Hasher hasher;\n    typedef Key_equality key_equal;\n    typedef hash_compare key_compare;\n\n    typedef typename base_type::allocator_type allocator_type;\n    typedef typename base_type::pointer pointer;\n    typedef typename base_type::const_pointer const_pointer;\n    typedef typename base_type::reference reference;\n    typedef typename base_type::const_reference const_reference;\n\n    typedef typename base_type::size_type size_type;\n    typedef typename base_type::difference_type difference_type;\n\n    typedef typename base_type::iterator iterator;\n    typedef typename base_type::const_iterator const_iterator;\n    typedef typename base_type::iterator local_iterator;\n    typedef typename base_type::const_iterator const_local_iterator;\n\n    // Construction/destruction/copying\n    explicit concurrent_unordered_multiset(size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n    }\n\n    concurrent_unordered_multiset(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a)\n    {\n    }\n\n    template <typename Iterator>\n    concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number,\n        const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),\n        const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)\n    {\n        insert(first, last);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from initializer_list\n   concurrent_unordered_multiset(std::initializer_list<value_type> il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(),\n        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())\n        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)\n    {\n        this->insert(il.begin(),il.end());\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT    \n\n#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n   concurrent_unordered_multiset(const concurrent_unordered_multiset& table)\n        : base_type(table)\n    {\n    }\n\n   concurrent_unordered_multiset& operator=(const concurrent_unordered_multiset& table)\n    {\n        return static_cast<concurrent_unordered_multiset&>(base_type::operator=(table));\n    }\n\n   concurrent_unordered_multiset(concurrent_unordered_multiset&& table)\n        : base_type(std::move(table))\n    {\n    }\n\n   concurrent_unordered_multiset& operator=(concurrent_unordered_multiset&& table)\n    {\n        return static_cast<concurrent_unordered_multiset&>(base_type::operator=(std::move(table)));\n    }\n#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN\n\n    concurrent_unordered_multiset(const concurrent_unordered_multiset& table, const Allocator& a)\n        : base_type(table, a)\n    {\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_multiset(concurrent_unordered_multiset&& table, const Allocator& a)\n        : base_type(std::move(table), a)\n    {\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n};\n} // namespace interface5\n\nusing interface5::concurrent_unordered_set;\nusing interface5::concurrent_unordered_multiset;\n\n} // namespace tbb\n\n#endif// __TBB_concurrent_unordered_set_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_vector.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if (_MSC_VER)\n    //MSVC 10 \"deprecated\" application of some std:: algorithms to raw pointers as not safe.\n    //The reason is that destination is not checked against bounds/having enough place.\n    #define _SCL_SECURE_NO_WARNINGS\n#endif\n\n#include \"tbb/concurrent_vector.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n#include \"tbb/tbb_exception.h\"\n#include \"tbb_misc.h\"\n#include \"itt_notify.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>\n#include <memory> //for uninitialized_fill_n\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (disable: 4267)\n#endif\n\nusing namespace std;\n\nnamespace tbb {\n\nnamespace internal {\n    class concurrent_vector_base_v3::helper :no_assign {\npublic:\n    //! memory page size\n    static const size_type page_size = 4096;\n\n    inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp\n        return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details\n    }\n\n    inline static size_type find_segment_end(const concurrent_vector_base_v3 &v) {\n        segment_t *s = v.my_segment;\n        segment_index_t u = s==v.my_storage? pointers_per_short_table : pointers_per_long_table;\n        segment_index_t k = 0;\n        while( k < u && (s[k].load<relaxed>()==segment_allocated() ))\n            ++k;\n        return k;\n    }\n\n    // TODO: optimize accesses to my_first_block\n    //! assign first segment size. k - is index of last segment to be allocated, not a count of segments\n    inline static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k) {\n        if( !v.my_first_block ) {\n            /* There was a suggestion to set first segment according to incompact_predicate:\n            while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )\n                --k; // while previous vector size is compact, decrement\n            // reasons to not do it:\n            // * constructor(n) is not ready to accept fragmented segments\n            // * backward compatibility due to that constructor\n            // * current version gives additional guarantee and faster init.\n            // * two calls to reserve() will give the same effect.\n            */\n            v.my_first_block.compare_and_swap(k+1, 0); // store number of segments\n        }\n    }\n\n    inline static void *allocate_segment(concurrent_vector_base_v3 &v, size_type n) {\n        void *ptr = v.vector_allocator_ptr(v, n);\n        if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception\n        return ptr;\n    }\n\n    //! Publish segment so other threads can see it.\n    template<typename argument_type>\n    inline static void publish_segment( segment_t& s, argument_type rhs ) {\n        // see also itt_store_pointer_with_release_v3()\n        ITT_NOTIFY( sync_releasing, &s );\n        s.store<release>(rhs);\n    }\n\n    static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure = false);\n\n    // TODO: rename as get_segments_table() and return segment pointer\n    inline static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start ) {\n        if(k >= pointers_per_short_table && v.my_segment == v.my_storage)\n            extend_segment_table(v, start );\n    }\n\n    static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start);\n\n    struct segment_not_used_predicate: no_assign {\n        segment_t &s;\n        segment_not_used_predicate(segment_t &segment) : s(segment) {}\n        bool operator()() const { return s.load<relaxed>() == segment_not_used ();}\n    };\n    inline static segment_t& acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner) {\n        segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument\n        if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state\n            if( owner ) {\n                enable_segment( v, index, element_size );\n            } else {\n                ITT_NOTIFY(sync_prepare, &s);\n                spin_wait_while(segment_not_used_predicate(s));\n                ITT_NOTIFY(sync_acquired, &s);\n            }\n        } else {\n            ITT_NOTIFY(sync_acquired, &s);\n        }\n        if(s.load<relaxed>() != segment_allocated())\n            throw_exception(eid_bad_last_alloc); // throw custom exception, because it's hard to recover correctly after segment_allocation_failed state\n        return s;\n    }\n\n    ///// non-static fields of helper for exception-safe iteration across segments\n    segment_t *table;// TODO: review all segment_index_t as just short type\n    size_type first_block, k, sz, start, finish, element_size;\n    helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw()\n        : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {}\n    inline void first_segment() throw() {\n        __TBB_ASSERT( start <= finish, NULL );\n        __TBB_ASSERT( first_block || !finish, NULL );\n        if( k < first_block ) k = 0; // process solid segment at a time\n        size_type base = segment_base( k );\n        __TBB_ASSERT( base <= start, NULL );\n        finish -= base; start -= base; // rebase as offsets from segment k\n        sz = k ? base : segment_size( first_block ); // sz==base for k>0\n    }\n    inline void next_segment() throw() {\n        finish -= sz; start = 0; // offsets from next segment\n        if( !k ) k = first_block;\n        else { ++k; sz = segment_size( k ); }\n    }\n    template<typename F>\n    inline size_type apply(const F &func) {\n        first_segment();\n        while( sz < finish ) { // work for more than one segment\n            //TODO: remove extra load() of table[k] inside func\n            func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start );\n            next_segment();\n        }\n        func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start );\n        return k;\n    }\n    inline segment_value_t get_segment_value(size_type index, bool wait) {\n        segment_t &s = table[index];\n        if( wait && (s.load<acquire>() == segment_not_used()) ) {\n            ITT_NOTIFY(sync_prepare, &s);\n            spin_wait_while(segment_not_used_predicate(s));\n            ITT_NOTIFY(sync_acquired, &s);\n        }\n        return s.load<relaxed>();\n    }\n    ~helper() {\n        if( sz >= finish ) return; // the work is done correctly\n        cleanup();\n    }\n\n    //! Out of line code to assists destructor in infrequent cases.\n    void cleanup();\n\n    /// TODO: turn into lambda functions when available\n    struct init_body {\n        internal_array_op2 func;\n        const void *arg;\n        init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}\n        void operator()(segment_t &, void *begin, size_type n) const {\n            func( begin, arg, n );\n        }\n    };\n    struct safe_init_body {\n        internal_array_op2 func;\n        const void *arg;\n        safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}\n        void operator()(segment_t &s, void *begin, size_type n) const {\n            if(s.load<relaxed>() != segment_allocated())\n                throw_exception(eid_bad_last_alloc); // throw custom exception\n            func( begin, arg, n );\n        }\n    };\n    struct destroy_body {\n        internal_array_op1 func;\n        destroy_body(internal_array_op1 destroy) : func(destroy) {}\n        void operator()(segment_t &s, void *begin, size_type n) const {\n            if(s.load<relaxed>() == segment_allocated())\n                func( begin, n );\n        }\n    };\n};\n\nvoid concurrent_vector_base_v3::helper::extend_segment_table(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type start) {\n    if( start > segment_size(pointers_per_short_table) ) start = segment_size(pointers_per_short_table);\n    // If other threads are trying to set pointers in the short segment, wait for them to finish their\n    // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it\n    for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){\n        if(v.my_storage[i].load<relaxed>() == segment_not_used()) {\n            ITT_NOTIFY(sync_prepare, &v.my_storage[i]);\n            atomic_backoff backoff(true);\n            while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) )\n                backoff.pause();\n            ITT_NOTIFY(sync_acquired, &v.my_storage[i]);\n        }\n    }\n    if( v.my_segment != v.my_storage ) return;\n\n    segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );\n    __TBB_ASSERT(new_segment_table, \"NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer\" );\n    std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table\n   //TODO: replace with static assert\n    __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, \"size of the big table should be not lesser than of the small one, as we copy values to it\" );\n    std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used\n    if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage )\n        NFS_Free( new_segment_table );\n    // else TODO: add ITT_NOTIFY signals for v.my_segment?\n}\n\nconcurrent_vector_base_v3::size_type concurrent_vector_base_v3::helper::enable_segment(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type k, concurrent_vector_base_v3::size_type element_size,\n        bool mark_as_not_used_on_failure ) {\n\n    struct segment_scope_guard : no_copy{\n        segment_t* my_segment_ptr;\n        bool my_mark_as_not_used;\n        segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}\n        void dismiss(){ my_segment_ptr = 0;}\n        ~segment_scope_guard(){\n            if (my_segment_ptr){\n                if (!my_mark_as_not_used){\n                    publish_segment(*my_segment_ptr, segment_allocation_failed());\n                }else{\n                    publish_segment(*my_segment_ptr, segment_not_used());\n                }\n            }\n        }\n    };\n\n    segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block\n    __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), \"concurrent operation during growth?\");\n\n    size_type size_of_enabled_segment =  segment_size(k);\n    size_type size_to_allocate = size_of_enabled_segment;\n    if( !k ) {\n        assign_first_segment_if_necessary(v, default_initial_segments-1);\n        size_of_enabled_segment =  2 ;\n        size_to_allocate = segment_size(v.my_first_block);\n\n    } else  {\n        spin_wait_while_eq( v.my_first_block, segment_index_t(0) );\n    }\n\n    if( k && (k < v.my_first_block)){ //no need to allocate anything\n        // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory\n        segment_value_t array0 = s[0].load<acquire>();\n        if(array0 == segment_not_used()){\n            // sync_prepare called only if there is a wait\n            ITT_NOTIFY(sync_prepare, &s[0]);\n            spin_wait_while( segment_not_used_predicate(s[0]));\n            array0 = s[0].load<acquire>();\n        }\n        ITT_NOTIFY(sync_acquired, &s[0]);\n        if(array0 != segment_allocated()) { // check for segment_allocation_failed state of initial segment\n            publish_segment(s[k], segment_allocation_failed()); // and assign segment_allocation_failed state here\n            throw_exception(eid_bad_last_alloc); // throw custom exception\n        }\n        publish_segment( s[k],\n            static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size )\n        );\n    } else {\n        segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure);\n        publish_segment(s[k], allocate_segment(v, size_to_allocate));\n        k_segment_guard.dismiss();\n    }\n    return size_of_enabled_segment;\n}\n\nvoid concurrent_vector_base_v3::helper::cleanup() {\n    if( !sz ) { // allocation failed, restore the table\n        segment_index_t k_start = k, k_end = segment_index_of(finish-1);\n        if( segment_base( k_start ) < start )\n            get_segment_value(k_start++, true); // wait\n        if( k_start < first_block ) {\n            segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary\n            if((segment0 != segment_not_used()) && !k_start ) ++k_start;\n            if(segment0 != segment_allocated())\n                for(; k_start < first_block && k_start <= k_end; ++k_start )\n                    publish_segment(table[k_start], segment_allocation_failed());\n            else for(; k_start < first_block && k_start <= k_end; ++k_start )\n                    publish_segment(table[k_start], static_cast<void*>(\n                        (segment0.pointer<char>()) + segment_base(k_start)*element_size) );\n        }\n        for(; k_start <= k_end; ++k_start ) // not in first block\n            if(table[k_start].load<acquire>() == segment_not_used())\n                publish_segment(table[k_start], segment_allocation_failed());\n        // fill allocated items\n        first_segment();\n        goto recover;\n    }\n    while( sz <= finish ) { // there is still work for at least one segment\n        next_segment();\nrecover:\n        segment_value_t array = table[k].load<relaxed>();\n        if(array == segment_allocated())\n            std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );\n        else __TBB_ASSERT( array == segment_allocation_failed(), NULL );\n    }\n}\n\nconcurrent_vector_base_v3::~concurrent_vector_base_v3() {\n    segment_t* s = my_segment;\n    if( s != my_storage ) {\n#if TBB_USE_ASSERT\n        //to please assert in segment_t destructor\n        std::fill_n(my_storage,size_t(pointers_per_short_table),segment_t());\n#endif /* TBB_USE_ASSERT */\n#if TBB_USE_DEBUG\n        for( segment_index_t i = 0; i < pointers_per_long_table; i++)\n            __TBB_ASSERT( my_segment[i].load<relaxed>() != segment_allocated(), \"Segment should have been freed. Please recompile with new TBB before using exceptions.\");\n#endif\n        my_segment = my_storage;\n        NFS_Free( s );\n    }\n}\n\nconcurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_capacity() const {\n    return segment_base( helper::find_segment_end(*this) );\n}\n\nvoid concurrent_vector_base_v3::internal_throw_exception(size_type t) const {\n    switch(t) {\n        case 0: throw_exception(eid_out_of_range);\n        case 1: throw_exception(eid_segment_range_error);\n        case 2: throw_exception(eid_index_range_error);\n    }\n}\n\nvoid concurrent_vector_base_v3::internal_reserve( size_type n, size_type element_size, size_type max_size ) {\n    if( n>max_size )\n        throw_exception(eid_reservation_length_error);\n    __TBB_ASSERT( n, NULL );\n    helper::assign_first_segment_if_necessary(*this, segment_index_of(n-1));\n    segment_index_t k = helper::find_segment_end(*this);\n\n    for( ; segment_base(k)<n; ++k ) {\n        helper::extend_table_if_necessary(*this, k, 0);\n        if(my_segment[k].load<relaxed>() != segment_allocated())\n            helper::enable_segment(*this, k, element_size, true ); //in case of failure mark segments as not used\n    }\n}\n\n//TODO: Looks like atomic loads can be done relaxed here, as the only place this method is called from\n//is the constructor, which does not require synchronization (for more details see comment in the\n// concurrent_vector_base constructor).\nvoid concurrent_vector_base_v3::internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy ) {\n    size_type n = src.my_early_size;\n    __TBB_ASSERT( my_segment == my_storage, NULL);\n    if( n ) {\n        helper::assign_first_segment_if_necessary(*this, segment_index_of(n-1));\n        size_type b;\n        for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {\n            if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)\n                || (src.my_segment[k].load<relaxed>() != segment_allocated())) {\n                my_early_size = b; break;\n            }\n            helper::extend_table_if_necessary(*this, k, 0);\n            size_type m = helper::enable_segment(*this, k, element_size);\n            if( m > n-b ) m = n-b;\n            my_early_size = b+m;\n            copy( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), m );\n        }\n    }\n}\n\nvoid concurrent_vector_base_v3::internal_assign( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) {\n    size_type n = src.my_early_size;\n    while( my_early_size>n ) { // TODO: improve\n        segment_index_t k = segment_index_of( my_early_size-1 );\n        size_type b=segment_base(k);\n        size_type new_end = b>=n ? b : n;\n        __TBB_ASSERT( my_early_size>new_end, NULL );\n        if( my_segment[k].load<relaxed>() != segment_allocated()) // check vector was broken before\n            throw_exception(eid_bad_last_alloc); // throw custom exception\n        // destructors are supposed to not throw any exceptions\n        destroy( my_segment[k].load<relaxed>().pointer<char>() + element_size*(new_end-b), my_early_size-new_end );\n        my_early_size = new_end;\n    }\n    size_type dst_initialized_size = my_early_size;\n    my_early_size = n;\n    helper::assign_first_segment_if_necessary(*this, segment_index_of(n));\n    size_type b;\n    for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {\n        if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)\n            || src.my_segment[k].load<relaxed>() != segment_allocated() ) { // if source is damaged\n                my_early_size = b; break; // TODO: it may cause undestructed items\n        }\n        helper::extend_table_if_necessary(*this, k, 0);\n        if( my_segment[k].load<relaxed>() == segment_not_used())\n            helper::enable_segment(*this, k, element_size);\n        else if( my_segment[k].load<relaxed>() != segment_allocated() )\n            throw_exception(eid_bad_last_alloc); // throw custom exception\n        size_type m = k? segment_size(k) : 2;\n        if( m > n-b ) m = n-b;\n        size_type a = 0;\n        if( dst_initialized_size>b ) {\n            a = dst_initialized_size-b;\n            if( a>m ) a = m;\n            assign( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), a );\n            m -= a;\n            a *= element_size;\n        }\n        if( m>0 )\n            copy( my_segment[k].load<relaxed>().pointer<char>() + a, src.my_segment[k].load<relaxed>().pointer<char>() + a, m );\n    }\n    __TBB_ASSERT( src.my_early_size==n, \"detected use of concurrent_vector::operator= with right side that was concurrently modified\" );\n}\n\nvoid* concurrent_vector_base_v3::internal_push_back( size_type element_size, size_type& index ) {\n    __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL );\n    size_type tmp = my_early_size.fetch_and_increment<acquire>();\n    index = tmp;\n    segment_index_t k_old = segment_index_of( tmp );\n    size_type base = segment_base(k_old);\n    helper::extend_table_if_necessary(*this, k_old, tmp);\n    segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp);\n    size_type j_begin = tmp-base;\n    return (void*)(s.load<relaxed>().pointer<char>() + element_size*j_begin);\n}\n\nvoid concurrent_vector_base_v3::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) {\n    internal_grow_to_at_least_with_result( new_size, element_size, init, src );\n}\n\nconcurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) {\n    size_type e = my_early_size;\n    while( e<new_size ) {\n        size_type f = my_early_size.compare_and_swap(new_size,e);\n        if( f==e ) {\n            internal_grow( e, new_size, element_size, init, src );\n            break;\n        }\n        e = f;\n    }\n    // Check/wait for segments allocation completes\n    segment_index_t i, k_old = segment_index_of( new_size-1 );\n    if( k_old >= pointers_per_short_table && my_segment == my_storage ) {\n        spin_wait_while_eq( my_segment, my_storage );\n    }\n    for( i = 0; i <= k_old; ++i ) {\n        segment_t &s = my_segment[i];\n        if(s.load<relaxed>() == segment_not_used()) {\n            ITT_NOTIFY(sync_prepare, &s);\n            atomic_backoff backoff(true);\n            while( my_segment[i].load<acquire>() == segment_not_used() ) // my_segment may change concurrently\n                backoff.pause();\n            ITT_NOTIFY(sync_acquired, &s);\n        }\n        if( my_segment[i].load<relaxed>() != segment_allocated() )\n            throw_exception(eid_bad_last_alloc);\n    }\n#if TBB_USE_DEBUG\n    size_type capacity = internal_capacity();\n    __TBB_ASSERT( capacity >= new_size, NULL);\n#endif\n    return e;\n}\n\nconcurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src ) {\n    size_type result = my_early_size.fetch_and_add(delta);\n    internal_grow( result, result+delta, element_size, init, src );\n    return result;\n}\n\nvoid concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) {\n    __TBB_ASSERT( start<finish, \"start must be less than finish\" );\n    segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1);\n    helper::assign_first_segment_if_necessary(*this, k_end);\n    helper::extend_table_if_necessary(*this, k_end, start);\n    helper range(my_segment, my_first_block, element_size, k_start, start, finish);\n    for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order\n        helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/);\n    for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order\n        helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start );\n    range.apply( helper::init_body(init, src) );\n}\n\nvoid concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,\n                                                internal_array_op1 destroy, internal_array_op2 init ) {\n    size_type j = my_early_size;\n    if( n > j ) { // construct items\n        internal_reserve(n, element_size, max_size);\n        my_early_size = n;\n        helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n);\n        for_each.apply( helper::safe_init_body(init, src) );\n    } else {\n        my_early_size = n;\n        helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j);\n        for_each.apply( helper::destroy_body(destroy) );\n    }\n}\n\nconcurrent_vector_base_v3::segment_index_t concurrent_vector_base_v3::internal_clear( internal_array_op1 destroy ) {\n    __TBB_ASSERT( my_segment, NULL );\n    size_type j = my_early_size;\n    my_early_size = 0;\n    helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero\n    j = for_each.apply( helper::destroy_body(destroy) );\n    size_type i = helper::find_segment_end(*this);\n    return j < i? i : j+1;\n}\n\nvoid *concurrent_vector_base_v3::internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy )\n{\n    const size_type my_size = my_early_size;\n    const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments\n    const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;..\n    const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics\n\n    segment_index_t k = first_block;\n    if(k_stop < first_block)\n        k = k_stop;\n    else\n        while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++;\n    if(k_stop == k_end && k == first_block)\n        return NULL;\n\n    segment_t *const segment_table = my_segment;\n    internal_segments_table &old = *static_cast<internal_segments_table*>( table );\n    //this call is left here for sake of backward compatibility, and as a placeholder for table initialization\n    std::fill_n(old.table,sizeof(old.table)/sizeof(old.table[0]),segment_t());\n    old.first_block=0;\n\n    if ( k != first_block && k ) // first segment optimization\n    {\n        // exception can occur here\n        void *seg = helper::allocate_segment(*this, segment_size(k));\n        old.table[0].store<relaxed>(seg);\n        old.first_block = k; // fill info for freeing new segment if exception occurs\n        // copy items to the new segment\n        size_type my_segment_size = segment_size( first_block );\n        for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {\n            __TBB_ASSERT( segment_table[i].load<relaxed>() == segment_allocated(), NULL);\n            void *s = static_cast<void*>(\n                static_cast<char*>(seg) + segment_base(i)*element_size );\n            //TODO: refactor to use std::min\n            if(j + my_segment_size >= my_size) my_segment_size = my_size - j;\n            __TBB_TRY { // exception can occur here\n                copy( s, segment_table[i].load<relaxed>().pointer<void>(), my_segment_size );\n            } __TBB_CATCH(...) { // destroy all the already copied items\n                helper for_each(&old.table[0], old.first_block, element_size,\n                    0, 0, segment_base(i)+ my_segment_size);\n                for_each.apply( helper::destroy_body(destroy) );\n                __TBB_RETHROW();\n            }\n            my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );\n        }\n        // commit the changes\n        std::copy(segment_table,segment_table + k,old.table);\n        for (segment_index_t i = 0; i < k; i++) {\n            segment_table[i].store<relaxed>(static_cast<void*>(\n                static_cast<char*>(seg) + segment_base(i)*element_size ));\n        }\n        old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block\n        // destroy original copies\n        my_segment_size = segment_size( first_block ); // old.first_block actually\n        for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {\n            if(j + my_segment_size >= my_size) my_segment_size = my_size - j;\n            // destructors are supposed to not throw any exceptions\n            destroy( old.table[i].load<relaxed>().pointer<void>(), my_segment_size );\n            my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );\n        }\n    }\n    // free unnecessary segments allocated by reserve() call\n    if ( k_stop < k_end ) {\n        old.first_block = first_block;\n        std::copy(segment_table+k_stop, segment_table+k_end, old.table+k_stop );\n        std::fill_n(segment_table+k_stop, (k_end-k_stop), segment_t());\n        if( !k ) my_first_block = 0;\n    }\n    return table;\n}\n\nvoid concurrent_vector_base_v3::internal_swap(concurrent_vector_base_v3& v)\n{\n    size_type my_sz = my_early_size.load<acquire>();\n    size_type v_sz = v.my_early_size.load<relaxed>();\n    if(!my_sz && !v_sz) return;\n\n    bool my_was_short = (my_segment.load<relaxed>() == my_storage);\n    bool v_was_short  = (v.my_segment.load<relaxed>() == v.my_storage);\n\n    //In C++11, this would be: swap(my_storage, v.my_storage);\n    for (int i=0; i < pointers_per_short_table; ++i){\n        swap(my_storage[i], v.my_storage[i]);\n    }\n    tbb::internal::swap<relaxed>(my_first_block, v.my_first_block);\n    tbb::internal::swap<relaxed>(my_segment, v.my_segment);\n    if (my_was_short){\n        v.my_segment.store<relaxed>(v.my_storage);\n    }\n    if(v_was_short){\n        my_segment.store<relaxed>(my_storage);\n    }\n\n    my_early_size.store<relaxed>(v_sz);\n    v.my_early_size.store<release>(my_sz);\n}\n\n} // namespace internal\n\n} // tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/concurrent_vector.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_concurrent_vector_H\n#define __TBB_concurrent_vector_H\n\n#include \"tbb_stddef.h\"\n#include \"tbb_exception.h\"\n#include \"atomic.h\"\n#include \"cache_aligned_allocator.h\"\n#include \"blocked_range.h\"\n#include \"tbb_machine.h\"\n#include \"tbb_profiling.h\"\n#include <new>\n#include <cstring>   // for memset()\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <algorithm>\n#include <iterator>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#if _MSC_VER==1500 && !__INTEL_COMPILER\n    // VS2008/VC9 seems to have an issue; limits pull in math.h\n    #pragma warning( push )\n    #pragma warning( disable: 4985 )\n#endif\n#include <limits> /* std::numeric_limits */\n#if _MSC_VER==1500 && !__INTEL_COMPILER\n    #pragma warning( pop )\n#endif\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    #include <initializer_list>\n#endif\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (push)\n#if defined(_Wp64)\n    #pragma warning (disable: 4267)\n#endif\n    #pragma warning (disable: 4127) //warning C4127: conditional expression is constant\n#endif\n\nnamespace tbb {\n\ntemplate<typename T, class A = cache_aligned_allocator<T> >\nclass concurrent_vector;\n\ntemplate<typename Container, typename Value>\nclass vector_iterator;\n\n//! @cond INTERNAL\nnamespace internal {\n\n    //! Bad allocation marker\n    static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));\n\n    //! Exception helper function\n    template<typename T>\n    void handle_unconstructed_elements(T* array, size_t n_of_elements){\n        std::memset(array, 0, n_of_elements * sizeof(T));\n    }\n\n    //! Base class of concurrent vector implementation.\n    /** @ingroup containers */\n    class concurrent_vector_base_v3 {\n    protected:\n\n        // Basic types declarations\n        typedef size_t segment_index_t;\n        typedef size_t size_type;\n\n        // Using enumerations due to Mac linking problems of static const variables\n        enum {\n            // Size constants\n            default_initial_segments = 1, // 2 initial items\n            //! Number of slots for segment pointers inside the class\n            pointers_per_short_table = 3, // to fit into 8 words of entire structure\n            pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit\n        };\n\n        struct segment_not_used {};\n        struct segment_allocated {};\n        struct segment_allocation_failed {};\n\n        class segment_t;\n        class segment_value_t {\n            void* array;\n        private:\n            //TODO: More elegant way to grant access to selected functions _only_?\n            friend class segment_t;\n            explicit segment_value_t(void* an_array):array(an_array) {}\n        public:\n            friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}\n            friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;}\n            friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;}\n            template<typename argument_type>\n            friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}\n\n            template<typename T>\n            T* pointer() const {  return static_cast<T*>(const_cast<void*>(array)); }\n        };\n\n        // Segment pointer.\n        class segment_t {\n            atomic<void*> array;\n        public:\n            segment_t(){ store<relaxed>(segment_not_used());}\n            //Copy ctor and assignment operator are defined to ease using of stl algorithms.\n            //These algorithms usually not a synchronization point, so, semantic is\n            //intentionally relaxed here.\n            segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}\n\n            void swap(segment_t & rhs ){\n                tbb::internal::swap<relaxed>(array, rhs.array);\n            }\n\n            segment_t& operator=(segment_t const& rhs ){\n                array.store<relaxed>(rhs.array.load<relaxed>());\n                return *this;\n            }\n\n            template<memory_semantics M>\n            segment_value_t load() const { return segment_value_t(array.load<M>());}\n\n            template<memory_semantics M>\n            void store(segment_not_used) {\n                array.store<M>(0);\n            }\n\n            template<memory_semantics M>\n            void store(segment_allocation_failed) {\n                __TBB_ASSERT(load<relaxed>() != segment_allocated(),\"transition from \\\"allocated\\\" to \\\"allocation failed\\\" state looks non-logical\");\n                array.store<M>(internal::vector_allocation_error_flag);\n            }\n\n            template<memory_semantics M>\n            void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {\n                __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),\n                     \"other overloads of store should be used for marking segment as not_used or allocation_failed\" );\n                array.store<M>(allocated_segment_pointer);\n            }\n\n#if TBB_USE_ASSERT\n            ~segment_t() {\n                __TBB_ASSERT(load<relaxed>() != segment_allocated(), \"should have been freed by clear\" );\n            }\n#endif /* TBB_USE_ASSERT */\n        };\n        friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);\n\n        // Data fields\n\n        //! allocator function pointer\n        void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);\n\n        //! count of segments in the first block\n        atomic<size_type> my_first_block;\n\n        //! Requested size of vector\n        atomic<size_type> my_early_size;\n\n        //! Pointer to the segments table\n        atomic<segment_t*> my_segment;\n\n        //! embedded storage of segment pointers\n        segment_t my_storage[pointers_per_short_table];\n\n        // Methods\n\n        concurrent_vector_base_v3() {\n            //Here the semantic is intentionally relaxed.\n            //The reason this is next:\n            //Object that is in middle of construction (i.e. its constructor is not yet finished)\n            //cannot be used concurrently until the construction is finished.\n            //Thus to flag other threads that construction is finished, some synchronization with\n            //acquire-release semantic should be done by the (external) code that uses the vector.\n            //So, no need to do the synchronization inside the vector.\n\n            my_early_size.store<relaxed>(0);\n            my_first_block.store<relaxed>(0); // here is not default_initial_segments\n            my_segment.store<relaxed>(my_storage);\n        }\n\n        __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();\n\n        //these helpers methods use the fact that segments are allocated so\n        //that every segment size is a (increasing) power of 2.\n        //with one exception 0 segment has size of 2 as well segment 1;\n        //e.g. size of segment with index of 3 is 2^3=8;\n        static segment_index_t segment_index_of( size_type index ) {\n            return segment_index_t( __TBB_Log2( index|1 ) );\n        }\n\n        static segment_index_t segment_base( segment_index_t k ) {\n            return (segment_index_t(1)<<k & ~segment_index_t(1));\n        }\n\n        static inline segment_index_t segment_base_index_of( segment_index_t &index ) {\n            segment_index_t k = segment_index_of( index );\n            index -= segment_base(k);\n            return k;\n        }\n\n        static size_type segment_size( segment_index_t k ) {\n            return segment_index_t(1)<<k; // fake value for k==0\n        }\n\n\n        static bool is_first_element_in_segment(size_type element_index){\n            //check if element_index is a power of 2 that is at least 2.\n            //The idea is to detect if the iterator crosses a segment boundary,\n            //and 2 is the minimal index for which it's true\n            __TBB_ASSERT(element_index, \"there should be no need to call \"\n                                        \"is_first_element_in_segment for 0th element\" );\n            return is_power_of_two_factor( element_index, 2 );\n        }\n\n        //! An operation on an n-element array starting at begin.\n        typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );\n\n        //! An operation on n-element destination array and n-element source array.\n        typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );\n\n        //! Internal structure for compact()\n        struct internal_segments_table {\n            segment_index_t first_block;\n            segment_t table[pointers_per_long_table];\n        };\n\n        void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );\n        size_type __TBB_EXPORTED_METHOD internal_capacity() const;\n        void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );\n        size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );\n        void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );\n        segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );\n        void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );\n        void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );\n        void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,\n                              internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );\n        //! Obsolete\n        void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;\n        void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);\n\n        void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,\n                                                    internal_array_op1 destroy, internal_array_op2 init );\n        size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );\n\n        //! Deprecated entry point for backwards compatibility to TBB 2.1.\n        void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );\nprivate:\n        //! Private functionality\n        class helper;\n        friend class helper;\n\n        template<typename Container, typename Value>\n        friend class vector_iterator;\n\n    };\n\n    inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) {\n        lhs.swap(rhs);\n    }\n\n    typedef concurrent_vector_base_v3 concurrent_vector_base;\n\n    //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/\n    /** Value is either the T or const T type of the container.\n        @ingroup containers */\n    template<typename Container, typename Value>\n    class vector_iterator\n    {\n        //! concurrent_vector over which we are iterating.\n        Container* my_vector;\n\n        //! Index into the vector\n        size_t my_index;\n\n        //! Caches my_vector-&gt;internal_subscript(my_index)\n        /** NULL if cached value is not available */\n        mutable Value* my_item;\n\n        template<typename C, typename T>\n        friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );\n\n        template<typename C, typename T, typename U>\n        friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );\n\n        template<typename C, typename T, typename U>\n        friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );\n\n        template<typename C, typename T, typename U>\n        friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );\n\n        template<typename C, typename U>\n        friend class internal::vector_iterator;\n\n#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)\n        template<typename T, class A>\n        friend class tbb::concurrent_vector;\n#else\npublic: // workaround for MSVC\n#endif\n\n        vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :\n            my_vector(const_cast<Container*>(&vector)),\n            my_index(index),\n            my_item(static_cast<Value*>(ptr))\n        {}\n\n    public:\n        //! Default constructor\n        vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}\n\n        vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :\n            my_vector(other.my_vector),\n            my_index(other.my_index),\n            my_item(other.my_item)\n        {}\n\n        vector_iterator operator+( ptrdiff_t offset ) const {\n            return vector_iterator( *my_vector, my_index+offset );\n        }\n        vector_iterator &operator+=( ptrdiff_t offset ) {\n            my_index+=offset;\n            my_item = NULL;\n            return *this;\n        }\n        vector_iterator operator-( ptrdiff_t offset ) const {\n            return vector_iterator( *my_vector, my_index-offset );\n        }\n        vector_iterator &operator-=( ptrdiff_t offset ) {\n            my_index-=offset;\n            my_item = NULL;\n            return *this;\n        }\n        Value& operator*() const {\n            Value* item = my_item;\n            if( !item ) {\n                item = my_item = &my_vector->internal_subscript(my_index);\n            }\n            __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), \"corrupt cache\" );\n            return *item;\n        }\n        Value& operator[]( ptrdiff_t k ) const {\n            return my_vector->internal_subscript(my_index+k);\n        }\n        Value* operator->() const {return &operator*();}\n\n        //! Pre increment\n        vector_iterator& operator++() {\n            size_t element_index = ++my_index;\n            if( my_item ) {\n                //TODO: consider using of knowledge about \"first_block optimization\" here as well?\n                if( concurrent_vector_base::is_first_element_in_segment(element_index)) {\n                    //if the iterator crosses a segment boundary, the pointer become invalid\n                    //as possibly next segment is in another memory location\n                    my_item= NULL;\n                } else {\n                    ++my_item;\n                }\n            }\n            return *this;\n        }\n\n        //! Pre decrement\n        vector_iterator& operator--() {\n            __TBB_ASSERT( my_index>0, \"operator--() applied to iterator already at beginning of concurrent_vector\" );\n            size_t element_index = my_index--;\n            if( my_item ) {\n                if(concurrent_vector_base::is_first_element_in_segment(element_index)) {\n                    //if the iterator crosses a segment boundary, the pointer become invalid\n                    //as possibly next segment is in another memory location\n                    my_item= NULL;\n                } else {\n                    --my_item;\n                }\n            }\n            return *this;\n        }\n\n        //! Post increment\n        vector_iterator operator++(int) {\n            vector_iterator result = *this;\n            operator++();\n            return result;\n        }\n\n        //! Post decrement\n        vector_iterator operator--(int) {\n            vector_iterator result = *this;\n            operator--();\n            return result;\n        }\n\n        // STL support\n\n        typedef ptrdiff_t difference_type;\n        typedef Value value_type;\n        typedef Value* pointer;\n        typedef Value& reference;\n        typedef std::random_access_iterator_tag iterator_category;\n    };\n\n    template<typename Container, typename T>\n    vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {\n        return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return i.my_index==j.my_index && i.my_vector == j.my_vector;\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return !(i==j);\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return i.my_index<j.my_index;\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return j<i;\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return !(i<j);\n    }\n\n    template<typename Container, typename T, typename U>\n    bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return !(j<i);\n    }\n\n    template<typename Container, typename T, typename U>\n    ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {\n        return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);\n    }\n\n    template<typename T, class A>\n    class allocator_base {\n    public:\n        typedef typename A::template\n            rebind<T>::other allocator_type;\n        allocator_type my_allocator;\n\n        allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}\n\n    };\n\n} // namespace internal\n//! @endcond\n\n//! Concurrent vector container\n/** concurrent_vector is a container having the following main properties:\n    - It provides random indexed access to its elements. The index of the first element is 0.\n    - It ensures safe concurrent growing its size (different threads can safely append new elements).\n    - Adding new elements does not invalidate existing iterators and does not change indices of existing items.\n\n@par Compatibility\n    The class meets all Container Requirements and Reversible Container Requirements from\n    C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet\n    Sequence Requirements due to absence of insert() and erase() methods.\n\n@par Exception Safety\n    Methods working with memory allocation and/or new elements construction can throw an\n    exception if allocator fails to allocate memory or element's default constructor throws one.\n    Concurrent vector's element of type T must conform to the following requirements:\n    - Throwing an exception is forbidden for destructor of T.\n    - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.\n    .\n    Otherwise, the program's behavior is undefined.\n@par\n    If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.\n    Invalid state means:\n    - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.\n    - An invalid vector instance cannot be repaired; it is unable to grow anymore.\n    - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.\n    - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.\n    .\n    If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.\n\n@par Fragmentation\n    Unlike an STL vector, a concurrent_vector does not move existing elements if it needs\n    to allocate more memory. The container is divided into a series of contiguous arrays of\n    elements. The first reservation, growth, or assignment operation determines the size of\n    the first array. Using small number of elements as initial size incurs fragmentation that\n    may increase element access time. Internal layout can be optimized by method compact() that\n    merges several smaller arrays into one solid.\n\n@par Changes since TBB 2.1\n    - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.\n    - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector\n    - Added resize() methods (not thread-safe)\n    - Added cbegin/cend/crbegin/crend methods\n    - Changed return type of methods grow* and push_back to iterator\n\n@par Changes since TBB 2.0\n    - Implemented exception-safety guarantees\n    - Added template argument for allocator\n    - Added allocator argument in constructors\n    - Faster index calculation\n    - First growth call specifies a number of segments to be merged in the first allocation.\n    - Fixed memory blow up for swarm of vector's instances of small size\n    - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items.\n    - Added STL-like constructors.\n    - Added operators ==, < and derivatives\n    - Added at() method, approved for using after an exception was thrown inside the vector\n    - Added get_allocator() method.\n    - Added assign() methods\n    - Added compact() method to defragment first segments\n    - Added swap() method\n    - range() defaults on grainsize = 1 supporting auto grainsize algorithms.\n\n    @ingroup containers */\ntemplate<typename T, class A>\nclass concurrent_vector: protected internal::allocator_base<T, A>,\n                         private internal::concurrent_vector_base {\nprivate:\n    template<typename I>\n    class generic_range_type: public blocked_range<I> {\n    public:\n        typedef T value_type;\n        typedef T& reference;\n        typedef const T& const_reference;\n        typedef I iterator;\n        typedef ptrdiff_t difference_type;\n        generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}\n        template<typename U>\n        generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}\n        generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}\n    };\n\n    template<typename C, typename U>\n    friend class internal::vector_iterator;\n\npublic:\n    //------------------------------------------------------------------------\n    // STL compatible types\n    //------------------------------------------------------------------------\n    typedef internal::concurrent_vector_base_v3::size_type size_type;\n    typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;\n\n    typedef T value_type;\n    typedef ptrdiff_t difference_type;\n    typedef T& reference;\n    typedef const T& const_reference;\n    typedef T *pointer;\n    typedef const T *const_pointer;\n\n    typedef internal::vector_iterator<concurrent_vector,T> iterator;\n    typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;\n\n#if !defined(_MSC_VER) || _CPPLIB_VER>=300\n    // Assume ISO standard definition of std::reverse_iterator\n    typedef std::reverse_iterator<iterator> reverse_iterator;\n    typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n#else\n    // Use non-standard std::reverse_iterator\n    typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;\n    typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;\n#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */\n\n    //------------------------------------------------------------------------\n    // Parallel algorithm support\n    //------------------------------------------------------------------------\n    typedef generic_range_type<iterator> range_type;\n    typedef generic_range_type<const_iterator> const_range_type;\n\n    //------------------------------------------------------------------------\n    // STL compatible constructors & destructors\n    //------------------------------------------------------------------------\n\n    //! Construct empty vector.\n    explicit concurrent_vector(const allocator_type &a = allocator_type())\n        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n    }\n\n    //Constructors are not required to have synchronization\n    //(for more details see comment in the concurrent_vector_base constructor).\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Constructor from initializer_list\n    concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())\n        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_assign_iterators(init_list.begin(), init_list.end());\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();;\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());\n            __TBB_RETHROW();\n        }\n\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Copying constructor\n    concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )\n        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_copy(vector, sizeof(T), &copy_array);\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());\n            __TBB_RETHROW();\n        }\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move constructor\n    //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)\n    concurrent_vector( concurrent_vector&& source)\n        : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n        concurrent_vector_base_v3::internal_swap(source);\n    }\n\n    concurrent_vector( concurrent_vector&& source, const allocator_type& a)\n        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n        //C++ standard requires instances of an allocator being compared for equality,\n        //which means that memory allocated by one instance is possible to deallocate with the other one.\n        if (a == source.my_allocator) {\n            concurrent_vector_base_v3::internal_swap(source);\n        } else {\n            __TBB_TRY {\n                internal_copy(source, sizeof(T), &move_array);\n            } __TBB_CATCH(...) {\n                segment_t *table = my_segment.load<relaxed>();\n                internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());\n                __TBB_RETHROW();\n            }\n        }\n    }\n\n#endif\n\n    //! Copying constructor for vector with different allocator type\n    template<class M>\n    concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )\n        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );\n            __TBB_RETHROW();\n        }\n    }\n\n    //! Construction with initial size specified by argument n\n    explicit concurrent_vector(size_type n)\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );\n            __TBB_RETHROW();\n        }\n    }\n\n    //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance\n    concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())\n        : internal::allocator_base<T, A>(a)\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );\n            __TBB_RETHROW();\n        }\n    }\n\n    //! Construction with copying iteration range and given allocator instance\n    template<class I>\n    concurrent_vector(I first, I last, const allocator_type &a = allocator_type())\n        : internal::allocator_base<T, A>(a)\n    {\n        vector_allocator_ptr = &internal_allocator;\n        __TBB_TRY {\n            internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );\n        } __TBB_CATCH(...) {\n            segment_t *table = my_segment.load<relaxed>();\n            internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );\n            __TBB_RETHROW();\n        }\n    }\n\n    //! Assignment\n    concurrent_vector& operator=( const concurrent_vector& vector ) {\n        if( this != &vector )\n            internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);\n        return *this;\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //TODO: add __TBB_NOEXCEPT()\n    //! Move assignment\n    concurrent_vector& operator=( concurrent_vector&& other ) {\n        __TBB_ASSERT(this != &other, \"Move assignment to itself is prohibited \");\n        typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_move_assignment pocma_t;\n        if(pocma_t::value || this->my_allocator == other.my_allocator) {\n            concurrent_vector trash (std::move(*this));\n            internal_swap(other);\n            if (pocma_t::value) {\n                this->my_allocator = std::move(other.my_allocator);\n            }\n        } else {\n            internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);\n        }\n        return *this;\n    }\n#endif\n    //TODO: add an template assignment operator? (i.e. with different element type)\n\n    //! Assignment for vector with different allocator type\n    template<class M>\n    concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {\n        if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )\n            internal_assign(vector.internal_vector_base(),\n                sizeof(T), &destroy_array, &assign_array, &copy_array);\n        return *this;\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Assignment for initializer_list\n    concurrent_vector& operator=( std::initializer_list<T> init_list ) {\n        internal_clear(&destroy_array);\n        internal_assign_iterators(init_list.begin(), init_list.end());\n        return *this;\n    }\n#endif //#if __TBB_INITIALIZER_LISTS_PRESENT\n\n    //------------------------------------------------------------------------\n    // Concurrent operations\n    //------------------------------------------------------------------------\n    //! Grow by \"delta\" elements.\n    /** Returns iterator pointing to the first new element. */\n    iterator grow_by( size_type delta ) {\n        return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());\n    }\n\n    //! Grow by \"delta\" elements using copying constructor.\n    /** Returns iterator pointing to the first new element. */\n    iterator grow_by( size_type delta, const_reference t ) {\n        return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());\n    }\n\n    /** Returns iterator pointing to the first new element. */\n    template<typename I>\n    iterator grow_by( I first, I last ) {\n        typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);\n        __TBB_ASSERT( delta >= 0, NULL);\n\n        return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    /** Returns iterator pointing to the first new element. */\n    iterator grow_by( std::initializer_list<T> init_list ) {\n        return grow_by( init_list.begin(), init_list.end() );\n    }\n#endif //#if __TBB_INITIALIZER_LISTS_PRESENT\n\n    //! Append minimal sequence of elements such that size()>=n.\n    /** The new elements are default constructed.  Blocks until all elements in range [0..n) are allocated.\n        May return while other elements are being constructed by other threads.\n        Returns iterator that points to beginning of appended sequence.\n        If no elements were appended, returns iterator pointing to nth element. */\n    iterator grow_to_at_least( size_type n ) {\n        size_type m=0;\n        if( n ) {\n            m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );\n            if( m>n ) m=n;\n        }\n        return iterator(*this, m);\n    };\n\n    /** Analogous to grow_to_at_least( size_type n ) with exception that the new\n        elements are initialized by copying of t instead of default construction. */\n    iterator grow_to_at_least( size_type n, const_reference t ) {\n        size_type m=0;\n        if( n ) {\n            m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);\n            if( m>n ) m=n;\n        }\n        return iterator(*this, m);\n    };\n\n    //! Push item\n    /** Returns iterator pointing to the new element. */\n    iterator push_back( const_reference item )\n    {\n        size_type k;\n        T* ptr = static_cast<T*>(internal_push_back(sizeof(T),k));\n        element_construction_guard g(ptr);\n        new(ptr) T(item);\n        g.dismiss();\n        return iterator(*this, k, ptr);\n    }\n\n#if    __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Push item, move-aware\n    /** Returns iterator pointing to the new element. */\n    iterator push_back(  T&& item )\n    {\n        size_type k;\n        T* ptr = static_cast<T*>(internal_push_back(sizeof(T),k));\n        element_construction_guard g(ptr);\n        new(ptr) T(std::move(item));\n        g.dismiss();\n        return iterator(*this, k, ptr);\n    }\n#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n    //! Push item, create item \"in place\" with provided arguments\n    /** Returns iterator pointing to the new element. */\n    template<typename... Args>\n    iterator emplace_back(  Args&&... args)\n    {\n        size_type k;\n        T* ptr = static_cast<T*>(internal_push_back(sizeof(T),k));\n        element_construction_guard g(ptr);\n        new(ptr) T( std::forward<Args>(args)...);\n        g.dismiss();\n        return iterator(*this, k, ptr);\n    }\n#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n    //! Get reference to element at given index.\n    /** This method is thread-safe for concurrent reads, and also while growing the vector,\n        as long as the calling thread has checked that index < size(). */\n    reference operator[]( size_type index ) {\n        return internal_subscript(index);\n    }\n\n    //! Get const reference to element at given index.\n    const_reference operator[]( size_type index ) const {\n        return internal_subscript(index);\n    }\n\n    //! Get reference to element at given index. Throws exceptions on errors.\n    reference at( size_type index ) {\n        return internal_subscript_with_exceptions(index);\n    }\n\n    //! Get const reference to element at given index. Throws exceptions on errors.\n    const_reference at( size_type index ) const {\n        return internal_subscript_with_exceptions(index);\n    }\n\n    //! Get range for iterating with parallel algorithms\n    range_type range( size_t grainsize = 1 ) {\n        return range_type( begin(), end(), grainsize );\n    }\n\n    //! Get const range for iterating with parallel algorithms\n    const_range_type range( size_t grainsize = 1 ) const {\n        return const_range_type( begin(), end(), grainsize );\n    }\n\n    //------------------------------------------------------------------------\n    // Capacity\n    //------------------------------------------------------------------------\n    //! Return size of vector. It may include elements under construction\n    size_type size() const {\n        size_type sz = my_early_size, cp = internal_capacity();\n        return cp < sz ? cp : sz;\n    }\n\n    //! Return false if vector is not empty or has elements under construction at least.\n    bool empty() const {return !my_early_size;}\n\n    //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.\n    size_type capacity() const {return internal_capacity();}\n\n    //! Allocate enough space to grow to size n without having to allocate more memory later.\n    /** Like most of the methods provided for STL compatibility, this method is *not* thread safe.\n        The capacity afterwards may be bigger than the requested reservation. */\n    void reserve( size_type n ) {\n        if( n )\n            internal_reserve(n, sizeof(T), max_size());\n    }\n\n    //! Resize the vector. Not thread-safe.\n    void resize( size_type n ) {\n        internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );\n    }\n\n    //! Resize the vector, copy t for new elements. Not thread-safe.\n    void resize( size_type n, const_reference t ) {\n        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );\n    }\n\n    //! Optimize memory usage and fragmentation.\n    void shrink_to_fit();\n\n    //! Upper bound on argument to reserve.\n    size_type max_size() const {return (~size_type(0))/sizeof(T);}\n\n    //------------------------------------------------------------------------\n    // STL support\n    //------------------------------------------------------------------------\n\n    //! start iterator\n    iterator begin() {return iterator(*this,0);}\n    //! end iterator\n    iterator end() {return iterator(*this,size());}\n    //! start const iterator\n    const_iterator begin() const {return const_iterator(*this,0);}\n    //! end const iterator\n    const_iterator end() const {return const_iterator(*this,size());}\n    //! start const iterator\n    const_iterator cbegin() const {return const_iterator(*this,0);}\n    //! end const iterator\n    const_iterator cend() const {return const_iterator(*this,size());}\n    //! reverse start iterator\n    reverse_iterator rbegin() {return reverse_iterator(end());}\n    //! reverse end iterator\n    reverse_iterator rend() {return reverse_iterator(begin());}\n    //! reverse start const iterator\n    const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}\n    //! reverse end const iterator\n    const_reverse_iterator rend() const {return const_reverse_iterator(begin());}\n    //! reverse start const iterator\n    const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}\n    //! reverse end const iterator\n    const_reverse_iterator crend() const {return const_reverse_iterator(begin());}\n    //! the first item\n    reference front() {\n        __TBB_ASSERT( size()>0, NULL);\n        return (my_segment[0].template load<relaxed>().template pointer<T>())[0];\n    }\n    //! the first item const\n    const_reference front() const {\n        __TBB_ASSERT( size()>0, NULL);\n        return static_cast<const T*>(my_segment[0].array)[0];\n    }\n    //! the last item\n    reference back() {\n        __TBB_ASSERT( size()>0, NULL);\n        return internal_subscript( size()-1 );\n    }\n    //! the last item const\n    const_reference back() const {\n        __TBB_ASSERT( size()>0, NULL);\n        return internal_subscript( size()-1 );\n    }\n    //! return allocator object\n    allocator_type get_allocator() const { return this->my_allocator; }\n\n    //! assign n items by copying t item\n    void assign(size_type n, const_reference t) {\n        clear();\n        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );\n    }\n\n    //! assign range [first, last)\n    template<class I>\n    void assign(I first, I last) {\n        clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! assigns an initializer list\n    void assign(std::initializer_list<T> init_list) {\n        clear(); internal_assign_iterators( init_list.begin(), init_list.end());\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n    //! swap two instances\n    void swap(concurrent_vector &vector) {\n        using std::swap;\n        if( this != &vector ) {\n            concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));\n            swap(this->my_allocator, vector.my_allocator);\n        }\n    }\n\n    //! Clear container while keeping memory allocated.\n    /** To free up the memory, use in conjunction with method compact(). Not thread safe **/\n    void clear() {\n        internal_clear(&destroy_array);\n    }\n\n    //! Clear and destroy vector.\n    ~concurrent_vector() {\n        segment_t *table = my_segment.load<relaxed>();\n        internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );\n        // base class destructor call should be then\n    }\n\n    const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }\nprivate:\n    //! Allocate k items\n    static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {\n        return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);\n    }\n    //! Free k segments from table\n    void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);\n\n    //! Get reference to element at given index.\n    T& internal_subscript( size_type index ) const;\n\n    //! Get reference to element at given index with errors checks\n    T& internal_subscript_with_exceptions( size_type index ) const;\n\n    //! assign n items by copying t\n    void internal_assign_n(size_type n, const_pointer p) {\n        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );\n    }\n\n    //! helper class\n    template<bool B> class is_integer_tag;\n\n    //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9\n    template<class I>\n    void internal_assign_range(I first, I last, is_integer_tag<true> *) {\n        internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));\n    }\n    //! inline proxy assign by iterators\n    template<class I>\n    void internal_assign_range(I first, I last, is_integer_tag<false> *) {\n        internal_assign_iterators(first, last);\n    }\n    //! assign by iterators\n    template<class I>\n    void internal_assign_iterators(I first, I last);\n\n    //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library\n\n    //! Construct n instances of T, starting at \"begin\".\n    static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );\n\n    //! Copy-construct n instances of T, starting at \"begin\".\n    static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );\n\n    //! Copy-construct n instances of T by copying single element pointed to by src, starting at \"dst\".\n    static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! Move-construct n instances of T, starting at \"dst\" by copying according element of src array.\n    static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );\n    //! Move-assign (using operator=) n instances of T, starting at \"dst\" by assigning according element of src array.\n    static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );\n#endif\n    //! Copy-construct n instances of T, starting at \"dst\" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n).\n    template<typename Iterator>\n    static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );\n\n    //! Assign (using operator=) n instances of T, starting at \"dst\" by assigning according element of src array.\n    static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );\n\n    //! Destroy n instances of T, starting at \"begin\".\n    static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );\n\n    //! Exception-aware helper class for filling a segment by exception-danger operators of user class\n    class internal_loop_guide : internal::no_copy {\n    public:\n        const pointer array;\n        const size_type n;\n        size_type i;\n\n        static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }\n        static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }\n\n        internal_loop_guide(size_type ntrials, void *ptr)\n            : array(as_pointer(ptr)), n(ntrials), i(0) {}\n        void init() {   for(; i < n; ++i) new( &array[i] ) T(); }\n        void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }\n        void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }\n        void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n        void move_assign(const void *src)       { for(; i < n; ++i) array[i]         =  std::move(as_pointer(src)[i]);   }\n        void move_construct(const void *src)    { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }\n#endif\n        //TODO: rename to construct_range\n        template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }\n        ~internal_loop_guide() {\n            if(i < n) {// if an exception was raised, fill the rest of items with zeros\n                internal::handle_unconstructed_elements(array+i, n-i);\n            }\n        }\n    };\n\n    class element_construction_guard : internal::no_copy{\n        pointer element;\n    public:\n        element_construction_guard(pointer an_element) : element (an_element){}\n        void dismiss(){ element = NULL; }\n        ~element_construction_guard(){\n            if (element){\n                internal::handle_unconstructed_elements(element, 1);\n            }\n        }\n    };\n};\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n#pragma warning (push)\n#pragma warning (disable: 4701) // potentially uninitialized local variable \"old\"\n#endif\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::shrink_to_fit() {\n    internal_segments_table old;\n    __TBB_TRY {\n        if( internal_compact( sizeof(T), &old, &destroy_array, &copy_array ) )\n            internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments\n    } __TBB_CATCH(...) {\n        if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]\n            internal_free_segments( old.table, 1, old.first_block );\n        __TBB_RETHROW();\n    }\n}\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n#pragma warning (pop)\n#endif // warning 4701 is back\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) {\n    // Free the arrays\n    while( k > first_block ) {\n        --k;\n        segment_value_t segment_value = table[k].load<relaxed>();\n        table[k].store<relaxed>(segment_not_used());\n        if( segment_value == segment_allocated() ) // check for correct segment pointer\n            this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );\n    }\n    segment_value_t segment_value = table[0].load<relaxed>();\n    if( segment_value == segment_allocated() ) {\n        __TBB_ASSERT( first_block > 0, NULL );\n        while(k > 0) table[--k].store<relaxed>(segment_not_used());\n        this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );\n    }\n}\n\ntemplate<typename T, class A>\nT& concurrent_vector<T, A>::internal_subscript( size_type index ) const {\n    //TODO: unify both versions of internal_subscript\n    __TBB_ASSERT( index < my_early_size, \"index out of bounds\" );\n    size_type j = index;\n    segment_index_t k = segment_base_index_of( j );\n    __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, \"index is being allocated\" );\n    //no need in load with acquire (load<acquire>) since thread works in own space or gets\n    //the information about added elements via some form of external synchronization\n    //TODO: why not make a load of my_segment relaxed as well ?\n    //TODO: add an assertion that my_segment[k] is properly aligned to please ITT\n    segment_value_t segment_value =  my_segment[k].template load<relaxed>();\n    __TBB_ASSERT( segment_value != segment_allocation_failed(), \"the instance is broken by bad allocation. Use at() instead\" );\n    __TBB_ASSERT( segment_value != segment_not_used(), \"index is being allocated\" );\n    return (( segment_value.pointer<T>()))[j];\n}\n\ntemplate<typename T, class A>\nT& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {\n    if( index >= my_early_size )\n        internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range\n    size_type j = index;\n    segment_index_t k = segment_base_index_of( j );\n    //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table\n    if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )\n        internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error\n    // no need in load with acquire (load<acquire>) since thread works in own space or gets\n    //the information about added elements via some form of external synchronization\n    //TODO: why not make a load of my_segment relaxed as well ?\n    //TODO: add an assertion that my_segment[k] is properly aligned to please ITT\n    segment_value_t segment_value =  my_segment[k].template load<relaxed>();\n    if( segment_value != segment_allocated() ) // check for correct segment pointer\n        internal::throw_exception(internal::eid_index_range_error); // throw std::range_error\n    return (segment_value.pointer<T>())[j];\n}\n\ntemplate<typename T, class A> template<class I>\nvoid concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {\n    __TBB_ASSERT(my_early_size == 0, NULL);\n    size_type n = std::distance(first, last);\n    if( !n ) return;\n    internal_reserve(n, sizeof(T), max_size());\n    my_early_size = n;\n    segment_index_t k = 0;\n    //TODO: unify segment iteration code with concurrent_base_v3::helper\n    size_type sz = segment_size( my_first_block );\n    while( sz < n ) {\n        internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());\n        loop.iterate(first);\n        n -= sz;\n        if( !k ) k = my_first_block;\n        else { ++k; sz <<= 1; }\n    }\n    internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());\n    loop.iterate(first);\n}\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {\n    internal_loop_guide loop(n, begin); loop.init();\n}\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {\n    internal_loop_guide loop(n, begin); loop.init(src);\n}\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {\n    internal_loop_guide loop(n, dst); loop.copy(src);\n}\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {\n    internal_loop_guide loop(n, dst); loop.move_construct(src);\n}\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {\n    internal_loop_guide loop(n, dst); loop.move_assign(src);\n}\n#endif\n\ntemplate<typename T, class A>\ntemplate<typename I>\nvoid concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){\n    I & iterator ((*const_cast<I*>(static_cast<const I*>(p_type_erased_iterator))));\n    internal_loop_guide loop(n, dst); loop.iterate(iterator);\n}\n\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {\n    internal_loop_guide loop(n, dst); loop.assign(src);\n}\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warning\n    #pragma warning (push)\n    #pragma warning (disable: 4189)\n#endif\ntemplate<typename T, class A>\nvoid concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {\n    T* array = static_cast<T*>(begin);\n    for( size_type j=n; j>0; --j )\n        array[j-1].~T(); // destructors are supposed to not throw any exceptions\n}\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4189 is back\n\n// concurrent_vector's template functions\ntemplate<typename T, class A1, class A2>\ninline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {\n    //TODO: call size() only once per vector (in operator==)\n    // Simply:    return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());\n    if(a.size() != b.size()) return false;\n    typename concurrent_vector<T, A1>::const_iterator i(a.begin());\n    typename concurrent_vector<T, A2>::const_iterator j(b.begin());\n    for(; i != a.end(); ++i, ++j)\n        if( !(*i == *j) ) return false;\n    return true;\n}\n\ntemplate<typename T, class A1, class A2>\ninline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)\n{    return !(a == b); }\n\ntemplate<typename T, class A1, class A2>\ninline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)\n{    return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }\n\ntemplate<typename T, class A1, class A2>\ninline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)\n{    return b < a; }\n\ntemplate<typename T, class A1, class A2>\ninline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)\n{    return !(b < a); }\n\ntemplate<typename T, class A1, class A2>\ninline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)\n{    return !(a < b); }\n\ntemplate<typename T, class A>\ninline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)\n{    a.swap( b ); }\n\n} // namespace tbb\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4267,4127 are back\n\n#endif /* __TBB_concurrent_vector_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/condition_variable.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n#include \"tbb/compat/condition_variable\"\n#include \"tbb/atomic.h\"\n#include \"tbb_misc.h\"\n#include \"dynamic_link.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\n\nnamespace internal {\n\n//condition_variable\n#if _WIN32||_WIN64\nusing tbb::interface5::internal::condition_variable_using_event;\n\nstatic atomic<do_once_state> condvar_api_state;\n\nvoid WINAPI init_condvar_using_event( condition_variable_using_event* cv_event )\n{\n    // TODO: For Metro port, we can always use the API for condition variables, without dynamic_link etc.\n    cv_event->event = CreateEventEx(NULL, NULL, 0x1 /*CREATE_EVENT_MANUAL_RESET*/, EVENT_ALL_ACCESS );\n    InitializeCriticalSectionEx( &cv_event->mutex, 4000, 0 );\n    cv_event->n_waiters = 0;\n    cv_event->release_count = 0;\n    cv_event->epoch = 0;\n}\n\nBOOL WINAPI sleep_condition_variable_cs_using_event( condition_variable_using_event* cv_event, LPCRITICAL_SECTION cs, DWORD dwMilliseconds )\n{\n    EnterCriticalSection( &cv_event->mutex );\n    ++cv_event->n_waiters;\n    unsigned my_generation = cv_event->epoch;\n    LeaveCriticalSection( &cv_event->mutex );\n    LeaveCriticalSection( cs );\n    for (;;) {\n        // should come here at least once\n        DWORD rc = WaitForSingleObjectEx( cv_event->event, dwMilliseconds, FALSE );\n        EnterCriticalSection( &cv_event->mutex );\n        if( rc!=WAIT_OBJECT_0 ) {\n            --cv_event->n_waiters;\n            LeaveCriticalSection( &cv_event->mutex );\n            if( rc==WAIT_TIMEOUT ) {\n                SetLastError( WAIT_TIMEOUT );\n                EnterCriticalSection( cs );\n            }\n            return false;\n        }\n        __TBB_ASSERT( rc==WAIT_OBJECT_0, NULL );\n        if( cv_event->release_count>0 && cv_event->epoch!=my_generation )\n            break;\n        LeaveCriticalSection( &cv_event->mutex );\n    }\n\n    // still in the critical section\n    --cv_event->n_waiters;\n    int count = --cv_event->release_count;\n    LeaveCriticalSection( &cv_event->mutex );\n\n    if( count==0 ) {\n        __TBB_ASSERT( cv_event->event, \"Premature destruction of condition variable?\" );\n        ResetEvent( cv_event->event );\n    }\n    EnterCriticalSection( cs );\n    return true;\n}\n\nvoid WINAPI wake_condition_variable_using_event( condition_variable_using_event* cv_event )\n{\n    EnterCriticalSection( &cv_event->mutex );\n    if( cv_event->n_waiters>cv_event->release_count ) {\n        SetEvent( cv_event->event ); // Signal the manual-reset event.\n        ++cv_event->release_count;\n        ++cv_event->epoch;\n    }\n    LeaveCriticalSection( &cv_event->mutex );\n}\n\nvoid WINAPI wake_all_condition_variable_using_event( condition_variable_using_event* cv_event )\n{\n    EnterCriticalSection( &cv_event->mutex );\n    if( cv_event->n_waiters>0 ) {\n        SetEvent( cv_event->event );\n        cv_event->release_count = cv_event->n_waiters;\n        ++cv_event->epoch;\n    }\n    LeaveCriticalSection( &cv_event->mutex );\n}\n\nvoid WINAPI destroy_condvar_using_event( condition_variable_using_event* cv_event )\n{\n    HANDLE my_event = cv_event->event;\n    EnterCriticalSection( &cv_event->mutex );\n    // NULL is an invalid HANDLE value\n    cv_event->event = NULL;\n    if( cv_event->n_waiters>0 ) {\n        LeaveCriticalSection( &cv_event->mutex );\n        spin_wait_until_eq( cv_event->n_waiters, 0 );\n        // make sure the last thread completes its access to cv\n        EnterCriticalSection( &cv_event->mutex );\n    }\n    LeaveCriticalSection( &cv_event->mutex );\n    CloseHandle( my_event );\n}\n\nvoid WINAPI destroy_condvar_noop( CONDITION_VARIABLE* /*cv*/ ) { /*no op*/ }\n\nstatic void (WINAPI *__TBB_init_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&init_condvar_using_event;\nstatic BOOL (WINAPI *__TBB_condvar_wait)( PCONDITION_VARIABLE, LPCRITICAL_SECTION, DWORD ) = (BOOL (WINAPI *)(PCONDITION_VARIABLE,LPCRITICAL_SECTION, DWORD))&sleep_condition_variable_cs_using_event;\nstatic void (WINAPI *__TBB_condvar_notify_one)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_condition_variable_using_event;\nstatic void (WINAPI *__TBB_condvar_notify_all)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_all_condition_variable_using_event;\nstatic void (WINAPI *__TBB_destroy_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_using_event;\n\n//! Table describing how to link the handlers.\nstatic const dynamic_link_descriptor CondVarLinkTable[] = {\n    DLD(InitializeConditionVariable, __TBB_init_condvar),\n    DLD(SleepConditionVariableCS,    __TBB_condvar_wait),\n    DLD(WakeConditionVariable,       __TBB_condvar_notify_one),\n    DLD(WakeAllConditionVariable,    __TBB_condvar_notify_all)\n};\n\nvoid init_condvar_module()\n{\n    __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL );\n    if( dynamic_link( \"Kernel32.dll\", CondVarLinkTable, 4 ) )\n        __TBB_destroy_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_noop;\n}\n#endif /* _WIN32||_WIN64 */\n\n} // namespace internal\n\n#if _WIN32||_WIN64\n\nnamespace interface5 {\nnamespace internal {\n\nusing tbb::internal::condvar_api_state;\nusing tbb::internal::__TBB_init_condvar;\nusing tbb::internal::__TBB_condvar_wait;\nusing tbb::internal::__TBB_condvar_notify_one;\nusing tbb::internal::__TBB_condvar_notify_all;\nusing tbb::internal::__TBB_destroy_condvar;\nusing tbb::internal::init_condvar_module;\n\nvoid internal_initialize_condition_variable( condvar_impl_t& cv )\n{\n    atomic_do_once( &init_condvar_module, condvar_api_state );\n    __TBB_init_condvar( &cv.cv_native );\n}\n\nvoid internal_destroy_condition_variable( condvar_impl_t& cv )\n{\n    __TBB_destroy_condvar( &cv.cv_native );\n}\n\nvoid internal_condition_variable_notify_one( condvar_impl_t& cv )\n{\n    __TBB_condvar_notify_one ( &cv.cv_native );\n}\n\nvoid internal_condition_variable_notify_all( condvar_impl_t& cv )\n{\n    __TBB_condvar_notify_all( &cv.cv_native );\n}\n\nbool internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i )\n{\n    DWORD duration = i ? DWORD((i->seconds()*1000)) : INFINITE;\n    mtx->set_state( mutex::INITIALIZED );\n    BOOL res = __TBB_condvar_wait( &cv.cv_native, mtx->native_handle(), duration );\n    mtx->set_state( mutex::HELD );\n    return res?true:false;\n}\n\n} // namespace internal\n} // nameespace interface5\n\n#endif /* _WIN32||_WIN64 */\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/critical_section.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/critical_section.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\n    namespace internal {\n\nvoid critical_section_v4::internal_construct() {\n    ITT_SYNC_CREATE(&my_impl, _T(\"ppl::critical_section\"), _T(\"\"));\n}\n}  // namespace internal\n}  // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/critical_section.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_CRITICAL_SECTION_H_\n#define _TBB_CRITICAL_SECTION_H_\n\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#else\n#include <pthread.h>\n#include <errno.h>\n#endif  // _WIN32||WIN64\n\n#include \"tbb_stddef.h\"\n#include \"tbb_thread.h\"\n#include \"tbb_exception.h\"\n\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n\n    namespace internal {\nclass critical_section_v4 : internal::no_copy {\n#if _WIN32||_WIN64\n    CRITICAL_SECTION my_impl;\n#else\n    pthread_mutex_t my_impl;\n#endif\n    tbb_thread::id my_tid;\npublic:\n\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    critical_section_v4() { \n#if _WIN32||_WIN64\n        InitializeCriticalSectionEx( &my_impl, 4000, 0 );\n#else\n        pthread_mutex_init(&my_impl, NULL);\n#endif\n        internal_construct();\n    }\n\n    ~critical_section_v4() {\n        __TBB_ASSERT(my_tid == tbb_thread::id(), \"Destroying a still-held critical section\");\n#if _WIN32||_WIN64\n        DeleteCriticalSection(&my_impl); \n#else\n        pthread_mutex_destroy(&my_impl);\n#endif\n    }\n\n    class scoped_lock : internal::no_copy {\n    private:\n        critical_section_v4 &my_crit;\n    public:\n        scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) {\n            my_crit.lock();\n        }\n\n        ~scoped_lock() {\n            my_crit.unlock();\n        }\n    };\n\n    void lock() { \n        tbb_thread::id local_tid = this_tbb_thread::get_id();\n        if(local_tid == my_tid) throw_exception( eid_improper_lock );\n#if _WIN32||_WIN64\n        EnterCriticalSection( &my_impl );\n#else\n        int rval = pthread_mutex_lock(&my_impl);\n        __TBB_ASSERT_EX(!rval, \"critical_section::lock: pthread_mutex_lock failed\");\n#endif\n        __TBB_ASSERT(my_tid == tbb_thread::id(), NULL);\n        my_tid = local_tid;\n    }\n\n    bool try_lock() {\n        bool gotlock;\n        tbb_thread::id local_tid = this_tbb_thread::get_id();\n        if(local_tid == my_tid) return false;\n#if _WIN32||_WIN64\n        gotlock = TryEnterCriticalSection( &my_impl ) != 0;\n#else\n        int rval = pthread_mutex_trylock(&my_impl);\n        // valid returns are 0 (locked) and [EBUSY]\n        __TBB_ASSERT(rval == 0 || rval == EBUSY, \"critical_section::trylock: pthread_mutex_trylock failed\");\n        gotlock = rval == 0;\n#endif\n        if(gotlock)  {\n            my_tid = local_tid;\n        }\n        return gotlock;\n    }\n\n    void unlock() {\n        __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, \"thread unlocking critical_section is not thread that locked it\");\n        my_tid = tbb_thread::id();\n#if _WIN32||_WIN64\n        LeaveCriticalSection( &my_impl );\n#else\n        int rval = pthread_mutex_unlock(&my_impl);\n        __TBB_ASSERT_EX(!rval, \"critical_section::unlock: pthread_mutex_unlock failed\");\n#endif\n    }\n\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = true;\n}; // critical_section_v4\n} // namespace internal\ntypedef internal::critical_section_v4 critical_section;\n\n__TBB_DEFINE_PROFILING_SET_NAME(critical_section)\n} // namespace tbb\n#endif  // _TBB_CRITICAL_SECTION_H_\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/custom_scheduler.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_custom_scheduler_H\n#define _TBB_custom_scheduler_H\n\n#include \"scheduler.h\"\n#include \"observer_proxy.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//! Amount of time to pause between steals.\n/** The default values below were found to be best empirically for K-Means\n    on the 32-way Altix and 4-way (*2 for HT) fxqlin04. */\n#ifdef __TBB_STEALING_PAUSE\nstatic const long PauseTime = __TBB_STEALING_PAUSE;\n#elif __TBB_ipf\nstatic const long PauseTime = 1500;\n#else\nstatic const long PauseTime = 80;\n#endif\n\n//------------------------------------------------------------------------\n//! Traits classes for scheduler\n//------------------------------------------------------------------------\n\nstruct DefaultSchedulerTraits {\n    static const bool itt_possible = true;\n    static const bool has_slow_atomic = false;\n};\n\nstruct IntelSchedulerTraits {\n    static const bool itt_possible = false;\n#if __TBB_x86_32||__TBB_x86_64\n    static const bool has_slow_atomic = true;\n#else\n    static const bool has_slow_atomic = false;\n#endif /* __TBB_x86_32||__TBB_x86_64 */\n};\n\n//------------------------------------------------------------------------\n// custom_scheduler\n//------------------------------------------------------------------------\n\n//! A scheduler with a customized evaluation loop.\n/** The customization can use SchedulerTraits to make decisions without needing a run-time check. */\ntemplate<typename SchedulerTraits>\nclass custom_scheduler: private generic_scheduler {\n    typedef custom_scheduler<SchedulerTraits> scheduler_type;\n\n    //! Scheduler loop that dispatches tasks.\n    /** If child is non-NULL, it is dispatched first.\n        Then, until \"parent\" has a reference count of 1, other task are dispatched or stolen. */\n    /*override*/\n    void local_wait_for_all( task& parent, task* child );\n\n    //! Entry point from client code to the scheduler loop that dispatches tasks.\n    /** The method is virtual, but the *this object is used only for sake of dispatching on the correct vtable,\n        not necessarily the correct *this object.  The correct *this object is looked up in TLS. */\n    /*override*/\n    void wait_for_all( task& parent, task* child ) {\n        static_cast<custom_scheduler*>(governor::local_scheduler())->scheduler_type::local_wait_for_all( parent, child );\n    }\n\n    //! Construct a custom_scheduler\n    custom_scheduler( arena* a, size_t index ) : generic_scheduler(a, index) {}\n\n    //! Decrements ref_count of a predecessor.\n    /** If it achieves 0, the predecessor is scheduled for execution.\n        When changing, remember that this is a hot path function. */\n    void tally_completion_of_predecessor( task& s, task*& bypass_slot ) {\n        task_prefix& p = s.prefix();\n        if( SchedulerTraits::itt_possible )\n            ITT_NOTIFY(sync_releasing, &p.ref_count);\n        if( SchedulerTraits::has_slow_atomic && p.ref_count==1 )\n            p.ref_count=0;\n        else if( __TBB_FetchAndDecrementWrelease(&p.ref_count) > 1 ) {// more references exist\n            // '__TBB_cl_evict(&p)' degraded performance of parallel_preorder example\n            return;\n        }\n\n        // Ordering on p.ref_count (superfluous if SchedulerTraits::has_slow_atomic)\n        __TBB_control_consistency_helper();\n        __TBB_ASSERT(p.ref_count==0, \"completion of task caused predecessor's reference count to underflow\");\n        if( SchedulerTraits::itt_possible )\n            ITT_NOTIFY(sync_acquired, &p.ref_count);\n#if TBB_USE_ASSERT\n        p.extra_state &= ~es_ref_count_active;\n#endif /* TBB_USE_ASSERT */\n\n#if __TBB_RECYCLE_TO_ENQUEUE\n        if (p.state==task::to_enqueue) {\n            // related to __TBB_TASK_ARENA TODO: try keep priority of the task\n            // e.g. rework task_prefix to remember priority of received task and use here\n            my_arena->enqueue_task(s, 0, my_random );\n        } else\n#endif /*__TBB_RECYCLE_TO_ENQUEUE*/\n        if( bypass_slot==NULL )\n            bypass_slot = &s;\n        else\n            local_spawn( s, s.prefix().next );\n    }\n\npublic:\n    static generic_scheduler* allocate_scheduler( arena* a, size_t index ) {\n        scheduler_type* s = (scheduler_type*)NFS_Allocate(1,sizeof(scheduler_type),NULL);\n        new( s ) scheduler_type( a, index );\n        s->assert_task_pool_valid();\n        ITT_SYNC_CREATE(s, SyncType_Scheduler, SyncObj_TaskPoolSpinning);\n        return s;\n    }\n\n    //! Try getting a task from the mailbox or stealing from another scheduler.\n    /** Returns the stolen task or NULL if all attempts fail. */\n    /* override */ task* receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count, bool return_if_no_work );\n\n}; // class custom_scheduler<>\n\n//------------------------------------------------------------------------\n// custom_scheduler methods\n//------------------------------------------------------------------------\ntemplate<typename SchedulerTraits>\ntask* custom_scheduler<SchedulerTraits>::receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count,\n                                                                bool return_if_no_work ) {\n    task* t = NULL;\n    bool outermost_dispatch_level = return_if_no_work || master_outermost_level();\n    bool can_steal_here = can_steal();\n    my_inbox.set_is_idle( true );\n#if __TBB_HOARD_NONLOCAL_TASKS\n    __TBB_ASSERT(!my_nonlocal_free_list, NULL);\n#endif\n#if __TBB_TASK_PRIORITY\n    if ( return_if_no_work && my_arena->my_skipped_fifo_priority ) {\n        // This thread can dequeue FIFO tasks, and some priority levels of\n        // FIFO tasks have been bypassed (to prevent deadlock caused by\n        // dynamic priority changes in nested task group hierarchy).\n        intptr_t skipped_priority = my_arena->my_skipped_fifo_priority;\n        if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority &&\n             skipped_priority > my_arena->my_top_priority )\n        {\n            my_market->update_arena_priority( *my_arena, skipped_priority );\n        }\n    }\n    task_stream *ts;\n#else /* !__TBB_TASK_PRIORITY */\n    task_stream *ts = &my_arena->my_task_stream;\n#endif /* !__TBB_TASK_PRIORITY */\n    // TODO: Try to find a place to reset my_limit (under market's lock)\n    // The number of slots potentially used in the arena. Updated once in a while, as my_limit changes rarely.\n    size_t n = my_arena->my_limit-1;\n    int yield_count = 0;\n    // The state \"failure_count==-1\" is used only when itt_possible is true,\n    // and denotes that a sync_prepare has not yet been issued.\n    for( int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {\n        __TBB_ASSERT( my_arena->my_limit > 0, NULL );\n        __TBB_ASSERT( my_arena_index <= n, NULL );\n        if( completion_ref_count==1 ) {\n            if( SchedulerTraits::itt_possible ) {\n                if( failure_count!=-1 ) {\n                    ITT_NOTIFY(sync_prepare, &completion_ref_count);\n                    // Notify Intel(R) Thread Profiler that thread has stopped spinning.\n                    ITT_NOTIFY(sync_acquired, this);\n                }\n                ITT_NOTIFY(sync_acquired, &completion_ref_count);\n            }\n            __TBB_ASSERT( !t, NULL );\n            __TBB_control_consistency_helper(); // on ref_count\n            break; // exit stealing loop and return;\n        }\n        // Check if the resource manager requires our arena to relinquish some threads\n        if ( return_if_no_work && my_arena->my_num_workers_allotted < my_arena->num_workers_active() ) {\n#if !__TBB_TASK_ARENA\n            __TBB_ASSERT( is_worker(), NULL );\n#endif\n            if( SchedulerTraits::itt_possible && failure_count != -1 )\n                ITT_NOTIFY(sync_cancel, this);\n            return NULL;\n        }\n#if __TBB_TASK_PRIORITY\n        ts = &my_arena->my_task_stream[my_arena->my_top_priority];\n#endif\n        // Check if there are tasks mailed to this thread via task-to-thread affinity mechanism.\n        __TBB_ASSERT(my_affinity_id, NULL);\n        if ( n && !my_inbox.empty() && (t = get_mailbox_task()) ) {\n            GATHER_STATISTIC( ++my_counters.mails_received );\n        }\n        // Check if there are tasks in starvation-resistant stream.\n        // Only allowed for workers with empty stack, which is identified by return_if_no_work.\n        else if ( outermost_dispatch_level && !ts->empty() && (t = ts->pop( my_arena_slot->hint_for_pop)) ) {\n            ITT_NOTIFY(sync_acquired, ts);\n            // just proceed with the obtained task\n        }\n#if __TBB_TASK_PRIORITY\n        // Check if any earlier offloaded non-top priority tasks become returned to the top level\n        else if ( my_offloaded_tasks && (t=reload_tasks()) ) {\n            // just proceed with the obtained task\n        }\n#endif /* __TBB_TASK_PRIORITY */\n        else if ( can_steal_here && n ) {\n            // Try to steal a task from a random victim.\n            size_t k = my_random.get() % n;\n            arena_slot* victim = &my_arena->my_slots[k];\n            // The following condition excludes the master that might have\n            // already taken our previous place in the arena from the list .\n            // of potential victims. But since such a situation can take\n            // place only in case of significant oversubscription, keeping\n            // the checks simple seems to be preferable to complicating the code.\n            if( k >= my_arena_index )\n                ++victim;               // Adjusts random distribution to exclude self\n            task **pool = victim->task_pool;\n            if( pool == EmptyTaskPool || !(t = steal_task( *victim )) )\n                goto fail;\n            if( is_proxy(*t) ) {\n                task_proxy &tp = *(task_proxy*)t;\n                t = tp.extract_task<task_proxy::pool_bit>();\n                if ( !t ) {\n                    // Proxy was empty, so it's our responsibility to free it\n                    free_task<no_cache_small_task>(tp);\n                    goto fail;\n                }\n                GATHER_STATISTIC( ++my_counters.proxies_stolen );\n            }\n            t->prefix().extra_state |= es_task_is_stolen;\n            if( is_version_3_task(*t) ) {\n                my_innermost_running_task = t;\n                t->prefix().owner = this;\n                t->note_affinity( my_affinity_id );\n            }\n            GATHER_STATISTIC( ++my_counters.steals_committed );\n        } // end of stealing branch\n        else\n            goto fail;\n        // A task was successfully obtained somewhere\n        __TBB_ASSERT(t,NULL);\n#if __TBB_SCHEDULER_OBSERVER\n        my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() );\n        the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n        if ( SchedulerTraits::itt_possible && failure_count != -1 ) {\n            // FIXME - might be victim, or might be selected from a mailbox\n            // Notify Intel(R) Thread Profiler that thread has stopped spinning.\n            ITT_NOTIFY(sync_acquired, this);\n        }\n        break; // exit stealing loop and return\nfail:\n        GATHER_STATISTIC( ++my_counters.steals_failed );\n        if( SchedulerTraits::itt_possible && failure_count==-1 ) {\n            // The first attempt to steal work failed, so notify Intel(R) Thread Profiler that\n            // the thread has started spinning.  Ideally, we would do this notification\n            // *before* the first failed attempt to steal, but at that point we do not\n            // know that the steal will fail.\n            ITT_NOTIFY(sync_prepare, this);\n            failure_count = 0;\n        }\n        // Pause, even if we are going to yield, because the yield might return immediately.\n        __TBB_Pause(PauseTime);\n        const int failure_threshold = 2*int(n+1);\n        if( failure_count>=failure_threshold ) {\n#if __TBB_YIELD2P\n            failure_count = 0;\n#else\n            failure_count = failure_threshold;\n#endif\n            __TBB_Yield();\n#if __TBB_TASK_PRIORITY\n            // Check if there are tasks abandoned by other workers\n            if ( my_arena->my_orphaned_tasks ) {\n                // Epoch must be advanced before seizing the list pointer\n                ++my_arena->my_abandonment_epoch;\n                task* orphans = (task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 );\n                if ( orphans ) {\n                    task** link = NULL;\n                    // Get local counter out of the way (we've just brought in external tasks)\n                    my_local_reload_epoch--;\n                    t = reload_tasks( orphans, link, effective_reference_priority() );\n                    if ( orphans ) {\n                        *link = my_offloaded_tasks;\n                        if ( !my_offloaded_tasks )\n                            my_offloaded_task_list_tail_link = link;\n                        my_offloaded_tasks = orphans;\n                    }\n                    __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );\n                    if ( t ) {\n                        if( SchedulerTraits::itt_possible )\n                            ITT_NOTIFY(sync_cancel, this);\n                        break; // exit stealing loop and return\n                    }\n                }\n            }\n#endif /* __TBB_TASK_PRIORITY */\n            const int yield_threshold = 100;\n            if( yield_count++ >= yield_threshold ) {\n                // When a worker thread has nothing to do, return it to RML.\n                // For purposes of affinity support, the thread is considered idle while in RML.\n#if __TBB_TASK_PRIORITY\n                if( return_if_no_work || my_arena->my_top_priority > my_arena->my_bottom_priority ) {\n                    if ( my_arena->is_out_of_work() && return_if_no_work ) {\n#else /* !__TBB_TASK_PRIORITY */\n                    if ( return_if_no_work && my_arena->is_out_of_work() ) {\n#endif /* !__TBB_TASK_PRIORITY */\n                        if( SchedulerTraits::itt_possible )\n                            ITT_NOTIFY(sync_cancel, this);\n                        return NULL;\n                    }\n#if __TBB_TASK_PRIORITY\n                }\n                if ( my_offloaded_tasks ) {\n                    // Safeguard against any sloppiness in managing reload epoch\n                    // counter (e.g. on the hot path because of performance reasons).\n                    my_local_reload_epoch--;\n                    // Break the deadlock caused by a higher priority dispatch loop\n                    // stealing and offloading a lower priority task. Priority check\n                    // at the stealing moment cannot completely preclude such cases\n                    // because priorities can changes dynamically.\n                    if ( !return_if_no_work && *my_ref_top_priority > my_arena->my_top_priority ) {\n                        GATHER_STATISTIC( ++my_counters.prio_ref_fixups );\n                        my_ref_top_priority = &my_arena->my_top_priority;\n                        // it's expected that only outermost workers can use global reload epoch\n                        __TBB_ASSERT(!worker_outermost_level(), NULL);\n                        __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL);\n                    }\n                }\n#endif /* __TBB_TASK_PRIORITY */\n            } // end of arena snapshot branch\n            // If several attempts did not find work, re-read the arena limit.\n            n = my_arena->my_limit-1;\n        } // end of yielding branch\n    } // end of nonlocal task retrieval loop\n    my_inbox.set_is_idle( false );\n    return t;\n}\n\ntemplate<typename SchedulerTraits>\nvoid custom_scheduler<SchedulerTraits>::local_wait_for_all( task& parent, task* child ) {\n    __TBB_ASSERT( governor::is_set(this), NULL );\n    __TBB_ASSERT( parent.ref_count() >= (child && child->parent() == &parent ? 2 : 1), \"ref_count is too small\" );\n    assert_task_pool_valid();\n    // Using parent's refcount in sync_prepare (in the stealing loop below) is\n    // a workaround for TP. We need to name it here to display correctly in Ampl.\n    if( SchedulerTraits::itt_possible )\n        ITT_SYNC_CREATE(&parent.prefix().ref_count, SyncType_Scheduler, SyncObj_TaskStealingLoop);\n#if __TBB_TASK_GROUP_CONTEXT\n    __TBB_ASSERT( parent.prefix().context || (is_worker() && &parent == my_dummy_task), \"parent task does not have context\" );\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    task* t = child;\n    // Constant all_local_work_done is an unreachable refcount value that prevents\n    // early quitting the dispatch loop. It is defined to be in the middle of the range\n    // of negative values representable by the reference_count type.\n    static const reference_count\n        // For normal dispatch loops\n        parents_work_done = 1,\n        // For termination dispatch loops in masters\n        all_local_work_done = (reference_count)3 << (sizeof(reference_count) * 8 - 2);\n    reference_count quit_point;\n#if __TBB_TASK_PRIORITY\n    __TBB_ASSERT( (uintptr_t)*my_ref_top_priority < (uintptr_t)num_priority_levels, NULL );\n    volatile intptr_t *old_ref_top_priority = my_ref_top_priority;\n    // When entering nested parallelism level market level counter\n    // must be replaced with the one local to this arena.\n    volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n    task* old_dispatching_task = my_dispatching_task;\n    my_dispatching_task = my_innermost_running_task;\n    if( master_outermost_level() ) {\n        // We are in the outermost task dispatch loop of a master thread or a worker which mimics master\n        __TBB_ASSERT( !is_worker() || my_dispatching_task != old_dispatching_task, NULL );\n        quit_point = &parent == my_dummy_task ? all_local_work_done : parents_work_done;\n    } else {\n        quit_point = parents_work_done;\n#if __TBB_TASK_PRIORITY\n        if ( &parent != my_dummy_task ) {\n            // We are in a nested dispatch loop.\n            // Market or arena priority must not prevent child tasks from being\n            // executed so that dynamic priority changes did not cause deadlock.\n            my_ref_top_priority = &parent.prefix().context->my_priority;\n            my_ref_reload_epoch = &my_arena->my_reload_epoch;\n            if(my_ref_reload_epoch != old_ref_reload_epoch)\n                my_local_reload_epoch = *my_ref_reload_epoch-1;\n        }\n#endif /* __TBB_TASK_PRIORITY */\n    }\n\n    cpu_ctl_env_helper cpu_ctl_helper;\n    if ( t )\n        cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) );\n\n#if TBB_USE_EXCEPTIONS\n    // Infinite safeguard EH loop\n    for (;;) {\n    try {\n#endif /* TBB_USE_EXCEPTIONS */\n    // Outer loop receives tasks from global environment (via mailbox, FIFO queue(s),\n    // and by  stealing from other threads' task pools).\n    // All exit points from the dispatch loop are located in its immediate scope.\n    for(;;) {\n        // Middle loop retrieves tasks from the local task pool.\n        for(;;) {\n            // Inner loop evaluates tasks coming from nesting loops and those returned\n            // by just executed tasks (bypassing spawn or enqueue calls).\n            while(t) {\n                __TBB_ASSERT( my_inbox.is_idle_state(false), NULL );\n                __TBB_ASSERT(!is_proxy(*t),\"unexpected proxy\");\n                __TBB_ASSERT( t->prefix().owner, NULL );\n                assert_task_valid(*t);\n#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT\n                assert_context_valid(t->prefix().context);\n                if ( !t->prefix().context->my_cancellation_requested )\n#endif\n                __TBB_ASSERT( 1L<<t->state() & (1L<<task::allocated|1L<<task::ready|1L<<task::reexecute), NULL );\n                assert_task_pool_valid();\n#if __TBB_TASK_PRIORITY\n                intptr_t p = priority(*t);\n                if ( p != *my_ref_top_priority && (t->prefix().extra_state & es_task_enqueued) == 0) {\n                    assert_priority_valid(p);\n                    if ( p != my_arena->my_top_priority ) {\n                        my_market->update_arena_priority( *my_arena, p );\n                    }\n                    if ( p < effective_reference_priority() ) {\n                        if ( !my_offloaded_tasks ) {\n                            my_offloaded_task_list_tail_link = &t->prefix().next_offloaded;\n                            // Erase possible reference to the owner scheduler (next_offloaded is a union member)\n                            *my_offloaded_task_list_tail_link = NULL;\n                        }\n                        offload_task( *t, p );\n                        if ( in_arena() ) {\n                            t = winnow_task_pool();\n                            if ( t )\n                                continue;\n                        }\n                        else {\n                            // Mark arena as full to unlock arena priority level adjustment\n                            // by arena::is_out_of_work(), and ensure worker's presence.\n                            my_arena->advertise_new_work<false>();\n                        }\n                        goto stealing_ground;\n                    }\n                }\n#endif /* __TBB_TASK_PRIORITY */\n                task* t_next = NULL;\n                my_innermost_running_task = t;\n                t->prefix().owner = this;\n                t->prefix().state = task::executing;\n#if __TBB_TASK_GROUP_CONTEXT\n                if ( !t->prefix().context->my_cancellation_requested )\n#endif\n                {\n                    GATHER_STATISTIC( ++my_counters.tasks_executed );\n                    GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() );\n                    GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted );\n#if __TBB_TASK_PRIORITY\n                    GATHER_STATISTIC( my_counters.avg_arena_prio += p );\n                    GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority );\n#endif /* __TBB_TASK_PRIORITY */\n                    ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller);\n                    t_next = t->execute();\n                    ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller);\n                    if (t_next) {\n                        __TBB_ASSERT( t_next->state()==task::allocated,\n                                \"if task::execute() returns task, it must be marked as allocated\" );\n                        reset_extra_state(t_next);\n#if TBB_USE_ASSERT\n                        affinity_id next_affinity=t_next->prefix().affinity;\n                        if (next_affinity != 0 && next_affinity != my_affinity_id)\n                            GATHER_STATISTIC( ++my_counters.affinity_ignored );\n#endif\n                    }\n                }\n                assert_task_pool_valid();\n                switch( t->state() ) {\n                    case task::executing: {\n                        task* s = t->parent();\n                        __TBB_ASSERT( my_innermost_running_task==t, NULL );\n                        __TBB_ASSERT( t->prefix().ref_count==0, \"Task still has children after it has been executed\" );\n                        t->~task();\n                        if( s )\n                            tally_completion_of_predecessor(*s, t_next);\n                        free_task<no_hint>( *t );\n                        assert_task_pool_valid();\n                        break;\n                    }\n\n                    case task::recycle: // set by recycle_as_safe_continuation()\n                        t->prefix().state = task::allocated;\n#if __TBB_RECYCLE_TO_ENQUEUE\n                    case task::to_enqueue: // set by recycle_to_enqueue()\n#endif\n                        __TBB_ASSERT( t_next != t, \"a task returned from method execute() can not be recycled in another way\" );\n                        reset_extra_state(t);\n                        // for safe continuation, need atomically decrement ref_count;\n                        tally_completion_of_predecessor(*t, t_next);\n                        assert_task_pool_valid();\n                        break;\n\n                    case task::reexecute: // set by recycle_to_reexecute()\n                        __TBB_ASSERT( t_next, \"reexecution requires that method execute() return another task\" );\n                        __TBB_ASSERT( t_next != t, \"a task returned from method execute() can not be recycled in another way\" );\n                        t->prefix().state = task::allocated;\n                        reset_extra_state(t);\n                        local_spawn( *t, t->prefix().next );\n                        assert_task_pool_valid();\n                        break;\n                    case task::allocated:\n                        reset_extra_state(t);\n                        break;\n#if TBB_USE_ASSERT\n                    case task::ready:\n                        __TBB_ASSERT( false, \"task is in READY state upon return from method execute()\" );\n                        break;\n                    default:\n                        __TBB_ASSERT( false, \"illegal state\" );\n#else\n                    default: // just to shut up some compilation warnings\n                        break;\n#endif /* TBB_USE_ASSERT */\n                }\n                GATHER_STATISTIC( t_next ? ++my_counters.spawns_bypassed : 0 );\n                t = t_next;\n            } // end of scheduler bypass loop\n\n            assert_task_pool_valid();\n            if ( parent.prefix().ref_count == quit_point ) {\n                __TBB_ASSERT( quit_point != all_local_work_done, NULL );\n                __TBB_control_consistency_helper(); // on ref_count\n                ITT_NOTIFY(sync_acquired, &parent.prefix().ref_count);\n                goto done;\n            }\n            if ( in_arena() ) {\n                t = get_task();\n            }\n            else {\n                __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL );\n                break;\n            }\n            __TBB_ASSERT(!t || !is_proxy(*t),\"unexpected proxy\");\n            assert_task_pool_valid();\n\n            if ( !t ) break;\n\n            cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) );\n        }; // end of local task pool retrieval loop\n\n#if __TBB_TASK_PRIORITY\nstealing_ground:\n#endif /* __TBB_TASK_PRIORITY */\n#if __TBB_HOARD_NONLOCAL_TASKS\n        // before stealing, previously stolen task objects are returned\n        for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {\n            t = my_nonlocal_free_list->prefix().next;\n            free_nonlocal_small_task( *my_nonlocal_free_list );\n        }\n#endif\n        if ( quit_point == all_local_work_done ) {\n            __TBB_ASSERT( !in_arena() && is_quiescent_local_task_pool_reset(), NULL );\n            __TBB_ASSERT( !worker_outermost_level(), NULL );\n            my_innermost_running_task = my_dispatching_task;\n            my_dispatching_task = old_dispatching_task;\n#if __TBB_TASK_PRIORITY\n            my_ref_top_priority = old_ref_top_priority;\n            if(my_ref_reload_epoch != old_ref_reload_epoch)\n                my_local_reload_epoch = *old_ref_reload_epoch-1;\n            my_ref_reload_epoch = old_ref_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n            return;\n        }\n        // The following assertion may be falsely triggered in the presence of enqueued tasks\n        //__TBB_ASSERT( my_arena->my_max_num_workers > 0 || my_market->my_ref_count > 1\n        //              || parent.prefix().ref_count == 1, \"deadlock detected\" );\n\n        // Dispatching task pointer is NULL *iff* this is a worker thread in its outermost\n        // dispatch loop (i.e. its execution stack is empty). In this case it should exit it\n        // either when there is no more work in the current arena, or when revoked by the market.\n        \n        t = receive_or_steal_task( parent.prefix().ref_count, worker_outermost_level() );\n        if ( !t )\n            goto done;\n        __TBB_ASSERT(!is_proxy(*t),\"unexpected proxy\");\n\n        // The user can capture another the FPU settings to the context so the\n        // cached data in the helper can be out-of-date and we cannot do fast\n        // check.\n        cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) );\n    } // end of infinite stealing loop\n#if TBB_USE_EXCEPTIONS\n    __TBB_ASSERT( false, \"Must never get here\" );\n    } // end of try-block\n    TbbCatchAll( t->prefix().context );\n    // Complete post-processing ...\n    if( t->state() == task::recycle\n#if __TBB_RECYCLE_TO_ENQUEUE\n        // TODO: the enqueue semantics gets lost below, consider reimplementing\n        ||  t->state() == task::to_enqueue\n#endif\n      ) {\n        // ... for recycled tasks to atomically decrement ref_count\n        t->prefix().state = task::allocated;\n        if( SchedulerTraits::itt_possible )\n            ITT_NOTIFY(sync_releasing, &t->prefix().ref_count);\n        if( __TBB_FetchAndDecrementWrelease(&t->prefix().ref_count)==1 ) {\n            if( SchedulerTraits::itt_possible )\n                ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);\n        }else{\n            t = NULL;\n        }\n    }\n    } // end of infinite EH loop\n    __TBB_ASSERT( false, \"Must never get here too\" );\n#endif /* TBB_USE_EXCEPTIONS */\ndone:\n    my_innermost_running_task = my_dispatching_task;\n    my_dispatching_task = old_dispatching_task;\n#if __TBB_TASK_PRIORITY\n    my_ref_top_priority = old_ref_top_priority;\n    if(my_ref_reload_epoch != old_ref_reload_epoch)\n        my_local_reload_epoch = *old_ref_reload_epoch-1;\n    my_ref_reload_epoch = old_ref_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n    if ( !ConcurrentWaitsEnabled(parent) ) {\n        if ( parent.prefix().ref_count != parents_work_done ) {\n            // This is a worker that was revoked by the market.\n#if __TBB_TASK_ARENA\n            __TBB_ASSERT( worker_outermost_level(),\n                \"Worker thread exits nested dispatch loop prematurely\" );\n#else\n            __TBB_ASSERT( is_worker() && worker_outermost_level(),\n                \"Worker thread exits nested dispatch loop prematurely\" );\n#endif\n            return;\n        }\n        parent.prefix().ref_count = 0;\n    }\n#if TBB_USE_ASSERT\n    parent.prefix().extra_state &= ~es_ref_count_active;\n#endif /* TBB_USE_ASSERT */\n#if __TBB_TASK_GROUP_CONTEXT\n    __TBB_ASSERT(parent.prefix().context && default_context(), NULL);\n    task_group_context* parent_ctx = parent.prefix().context;\n    if ( parent_ctx->my_cancellation_requested ) {\n        task_group_context::exception_container_type *pe = parent_ctx->my_exception;\n        if ( master_outermost_level() && parent_ctx == default_context() ) {\n            // We are in the outermost task dispatch loop of a master thread, and\n            // the whole task tree has been collapsed. So we may clear cancellation data.\n            parent_ctx->my_cancellation_requested = 0;\n            // TODO: Add assertion that master's dummy task context does not have children\n            parent_ctx->my_state &= ~(uintptr_t)task_group_context::may_have_children;\n        }\n        if ( pe ) {\n            // On Windows, FPU control settings changed in the helper destructor are not visible\n            // outside a catch block. So restore the default settings manually before rethrowing\n            // the exception.\n            cpu_ctl_helper.restore_default();\n            pe->throw_self();\n        }\n    }\n    __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task),\n        \"Worker's dummy task context modified\");\n    __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task),\n        \"Unexpected exception or cancellation data in the master's dummy task\");\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    assert_task_pool_valid();\n}\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_custom_scheduler_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/dynamic_link.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"dynamic_link.h\"\n#include \"tbb/tbb_config.h\"\n\n/*\n    This file is used by both TBB and OpenMP RTL. Do not use __TBB_ASSERT() macro\n    and runtime_warning() function because they are not available in OpenMP. Use\n    LIBRARY_ASSERT and DYNAMIC_LINK_WARNING instead.\n*/\n\n#include <cstdarg>          // va_list etc.\n#if _WIN32\n    #include <malloc.h>\n\n    // Unify system calls\n    #define dlopen( name, flags )   LoadLibraryA( name )\n    #define dlsym( handle, name )   GetProcAddress( handle, name )\n    #define dlclose( handle )       ( ! FreeLibrary( handle ) )\n    #define dlerror()               GetLastError()\n#ifndef PATH_MAX\n    #define PATH_MAX                MAX_PATH\n#endif\n#else /* _WIN32 */\n    #include <dlfcn.h>\n    #include <string.h>\n    #include <unistd.h>\n    #include <limits.h>\n    #include <stdlib.h>\n#endif /* _WIN32 */\n\n#if __TBB_WEAK_SYMBOLS_PRESENT\n    //TODO: use function attribute for weak symbols instead of the pragma.\n    #pragma weak dlopen\n    #pragma weak dlsym\n    #pragma weak dlclose\n    #pragma weak dlerror\n    #pragma weak dladdr\n#endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n\n#include \"tbb/tbb_misc.h\"\n\n#define __USE_TBB_ATOMICS       ( !(__linux__&&__ia64__) || __TBB_BUILD )\n#define __USE_STATIC_DL_INIT    (!__ANDROID__)\n\n#if !__USE_TBB_ATOMICS\n#include <pthread.h>\n#endif\n\n/*\ndynamic_link is a common interface for searching for required symbols in an\nexecutable and dynamic libraries.\n\ndynamic_link provides certain guarantees:\n  1. Either all or none of the requested symbols are resolved. Moreover, if\n  symbols are not resolved, the dynamic_link_descriptor table is not modified;\n  2. All returned symbols have secured life time: this means that none of them\n  can be invalidated until dynamic_unlink is called;\n  3. Any loaded library is loaded only via the full path. The full path is that\n  from which the runtime itself was loaded. (This is done to avoid security\n  issues caused by loading libraries from insecure paths).\n\ndynamic_link searches for the requested symbols in three stages, stopping as\nsoon as all of the symbols have been resolved.\n\n  1. Search the global scope:\n    a. On Windows: dynamic_link tries to obtain the handle of the requested\n    library and if it succeeds it resolves the symbols via that handle.\n    b. On Linux: dynamic_link tries to search for the symbols in the global\n    scope via the main program handle. If the symbols are present in the global\n    scope their life time is not guaranteed (since dynamic_link does not know\n    anything about the library from which they are exported). Therefore it\n    tries to \"pin\" the symbols by obtaining the library name and reopening it.\n    dlopen may fail to reopen the library in two cases:\n       i. The symbols are exported from the executable. Currently dynamic _link\n      cannot handle this situation, so it will not find these symbols in this\n      step.\n      ii. The necessary library has been unloaded and cannot be reloaded. It\n      seems there is nothing that can be done in this case. No symbols are\n      returned.\n\n  2. Dynamic load: an attempt is made to load the requested library via the\n  full path.\n    The full path used is that from which the runtime itself was loaded. If the\n    library can be loaded, then an attempt is made to resolve the requested\n    symbols in the newly loaded library.\n    If the symbols are not found the library is unloaded.\n\n  3. Weak symbols: if weak symbols are available they are returned.\n*/\n\nOPEN_INTERNAL_NAMESPACE\n\n#if __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED\n\n#if !defined(DYNAMIC_LINK_WARNING) && !__TBB_WIN8UI_SUPPORT\n    // Report runtime errors and continue.\n    #define DYNAMIC_LINK_WARNING dynamic_link_warning\n    static void dynamic_link_warning( dynamic_link_error_t code, ... ) {\n        (void) code;\n    } // library_warning\n#endif /* DYNAMIC_LINK_WARNING */\n    static bool resolve_symbols( dynamic_link_handle module, const dynamic_link_descriptor descriptors[], size_t required )\n    {\n        LIBRARY_ASSERT( module != NULL, \"Module handle is NULL\" );\n        if ( module == NULL )\n            return false;\n\n        #if __TBB_WEAK_SYMBOLS_PRESENT\n            if ( !dlsym ) return false;\n        #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n\n        const size_t n_desc=20; // Usually we don't have more than 20 descriptors per library\n        LIBRARY_ASSERT( required <= n_desc, \"Too many descriptors is required\" );\n        if ( required > n_desc ) return false;\n        pointer_to_handler h[n_desc];\n\n        for ( size_t k = 0; k < required; ++k ) {\n            dynamic_link_descriptor const & desc = descriptors[k];\n            pointer_to_handler addr = (pointer_to_handler)dlsym( module, desc.name );\n            if ( !addr ) {\n                return false;\n            }\n            h[k] = addr;\n        }\n\n        // Commit the entry points.\n        // Cannot use memset here, because the writes must be atomic.\n        for( size_t k = 0; k < required; ++k )\n            *descriptors[k].handler = h[k];\n        return true;\n    }\n\n#if __TBB_WIN8UI_SUPPORT\n    bool dynamic_link( const char*  library, const dynamic_link_descriptor descriptors[], size_t required, dynamic_link_handle*, int flags ) {\n        dynamic_link_handle tmp_handle = NULL;\n        TCHAR wlibrary[256];\n        if ( MultiByteToWideChar(CP_UTF8, 0, library, -1, wlibrary, 255) == 0 ) return false;\n        if ( flags & DYNAMIC_LINK_LOAD )\n            tmp_handle = LoadPackagedLibrary( wlibrary, 0 );\n        if (tmp_handle != NULL){\n            return resolve_symbols(tmp_handle, descriptors, required);\n        }else{\n            return false;\n        }\n    }\n    void dynamic_unlink( dynamic_link_handle ) {\n    }\n    void dynamic_unlink_all() {\n    }\n#else\n/*\n    There is a security issue on Windows: LoadLibrary() may load and execute malicious code.\n    See http://www.microsoft.com/technet/security/advisory/2269637.mspx for details.\n    To avoid the issue, we have to pass full path (not just library name) to LoadLibrary. This\n    function constructs full path to the specified library (it is assumed the library located\n    side-by-side with the tbb.dll.\n\n    The function constructs absolute path for given relative path. Important: Base directory is not\n    current one, it is the directory tbb.dll loaded from.\n\n    Example:\n        Let us assume \"tbb.dll\" is located in \"c:\\program files\\common\\intel\\\" directory, e. g.\n        absolute path of tbb library is \"c:\\program files\\common\\intel\\tbb.dll\". Absolute path for\n        \"tbbmalloc.dll\" would be \"c:\\program files\\common\\intel\\tbbmalloc.dll\". Absolute path for\n        \"malloc\\tbbmalloc.dll\" would be \"c:\\program files\\common\\intel\\malloc\\tbbmalloc.dll\".\n*/\n\n    // Struct handle_storage is used by dynamic_link routine to store handles of\n    // all loaded or pinned dynamic libraries. When TBB is shut down, it calls\n    // dynamic_unlink_all() that unloads modules referenced by handle_storage.\n    // This struct should not have any constructors since it may be used before\n    // the constructor is called.\n    #define MAX_LOADED_MODULES 8 // The number of maximum possible modules which can be loaded\n\n    struct handle_storage {\n    #if __USE_TBB_ATOMICS\n        ::tbb::atomic<size_t> my_size;\n    #else\n        size_t my_size;\n        pthread_spinlock_t my_lock;\n    #endif\n        dynamic_link_handle my_handles[MAX_LOADED_MODULES];\n\n        void add_handle(const dynamic_link_handle &handle) {\n        #if !__USE_TBB_ATOMICS\n            int res = pthread_spin_lock( &my_lock );\n            LIBRARY_ASSERT( res==0, \"pthread_spin_lock failed\" );\n        #endif\n            const size_t ind = my_size++;\n        #if !__USE_TBB_ATOMICS\n            res = pthread_spin_unlock( &my_lock );\n            LIBRARY_ASSERT( res==0, \"pthread_spin_unlock failed\" );\n        #endif\n            LIBRARY_ASSERT( ind < MAX_LOADED_MODULES, \"Too many modules are loaded\" );\n            my_handles[ind] = handle;\n        }\n\n        void free_handles() {\n            const size_t size = my_size;\n            for (size_t i=0; i<size; ++i)\n                dynamic_unlink( my_handles[i] );\n        }\n    };\n\n    handle_storage handles;\n\n#if __USE_TBB_ATOMICS\n    static void atomic_once ( void (*func) (void), tbb::atomic< tbb::internal::do_once_state > &once_state ) {\n        tbb::internal::atomic_do_once( func, once_state );\n    }\n#define ATOMIC_ONCE_DECL( var ) tbb::atomic< tbb::internal::do_once_state > var\n#else\n    static void atomic_once ( void (*func) (), pthread_once_t &once_state ) {\n        pthread_once( &once_state, func );\n    }\n#define ATOMIC_ONCE_DECL( var ) pthread_once_t var = PTHREAD_ONCE_INIT\n#endif\n\n    ATOMIC_ONCE_DECL( init_dl_data_state );\n\n    static struct _ap_data {\n        char _path[PATH_MAX+1];\n        size_t _len;\n    } ap_data;\n\n    static void init_ap_data() {\n    #if _WIN32\n        // Get handle of our DLL first.\n        HMODULE handle;\n        BOOL brc = GetModuleHandleExA(\n            GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,\n            (LPSTR)( & dynamic_link ), // any function inside the library can be used for the address\n            & handle\n            );\n        if ( !brc ) { // Error occurred.\n            int err = GetLastError();\n            DYNAMIC_LINK_WARNING( dl_sys_fail, \"GetModuleHandleEx\", err );\n            return;\n        }\n        // Now get path to our DLL.\n        DWORD drc = GetModuleFileNameA( handle, ap_data._path, static_cast< DWORD >( PATH_MAX ) );\n        if ( drc == 0 ) { // Error occurred.\n            int err = GetLastError();\n            DYNAMIC_LINK_WARNING( dl_sys_fail, \"GetModuleFileName\", err );\n            return;\n        }\n        if ( drc >= PATH_MAX ) { // Buffer too short.\n            DYNAMIC_LINK_WARNING( dl_buff_too_small );\n            return;\n        }\n        // Find the position of the last backslash.\n        char *backslash = strrchr( ap_data._path, '\\\\' );\n\n        if ( !backslash ) {    // Backslash not found.\n            LIBRARY_ASSERT( backslash!=NULL, \"Unbelievable.\");\n            return;\n        }\n        LIBRARY_ASSERT( backslash >= ap_data._path, \"Unbelievable.\");\n        ap_data._len = (size_t)(backslash - ap_data._path) + 1;\n        *(backslash+1) = 0;\n    #else\n        // Get the library path\n        #if __TBB_WEAK_SYMBOLS_PRESENT\n            if ( !dladdr || !dlerror ) return;\n        #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n        Dl_info dlinfo;\n        int res = dladdr( (void*)&dynamic_link, &dlinfo ); // any function inside the library can be used for the address\n        if ( !res ) {\n            char const * err = dlerror();\n            DYNAMIC_LINK_WARNING( dl_sys_fail, \"dladdr\", err );\n            return;\n        } else {\n            LIBRARY_ASSERT( dlinfo.dli_fname!=NULL, \"Unbelievable.\" );\n        }\n\n        char const *slash = strrchr( dlinfo.dli_fname, '/' );\n        size_t fname_len=0;\n        if ( slash ) {\n            LIBRARY_ASSERT( slash >= dlinfo.dli_fname, \"Unbelievable.\");\n            fname_len = (size_t)(slash - dlinfo.dli_fname) + 1;\n        }\n\n        size_t rc;\n        if ( dlinfo.dli_fname[0]=='/' ) {\n            // The library path is absolute\n            rc = 0;\n            ap_data._len = 0;\n        } else {\n            // The library path is relative so get the current working directory\n            if ( !getcwd( ap_data._path, sizeof(ap_data._path)/sizeof(ap_data._path[0]) ) ) {\n                DYNAMIC_LINK_WARNING( dl_buff_too_small );\n                return;\n            }\n            ap_data._len = strlen( ap_data._path );\n            ap_data._path[ap_data._len++]='/';\n            rc = ap_data._len;\n        }\n\n        if ( fname_len>0 ) {\n            if ( ap_data._len>PATH_MAX ) {\n                DYNAMIC_LINK_WARNING( dl_buff_too_small );\n                ap_data._len=0;\n                return;\n            }\n            strncpy( ap_data._path+rc, dlinfo.dli_fname, fname_len );\n            ap_data._len += fname_len;\n            ap_data._path[ap_data._len]=0;\n        }\n    #endif /* _WIN32 */\n    }\n\n    static void init_dl_data() {\n        init_ap_data();\n    #if !__USE_TBB_ATOMICS\n        int res;\n        res = pthread_spin_init( &handles.my_lock, PTHREAD_PROCESS_SHARED );\n        LIBRARY_ASSERT( res==0, \"pthread_spin_init failed\" );\n    #endif\n    }\n\n    // ap_data structure is initialized with current directory on Linux.\n    // So it should be initialized as soon as possible since the current directory may be changed.\n    // static_init_ap_data object provides this initialization during library loading.\n    static class _static_init_dl_data {\n    public:\n        _static_init_dl_data() {\n    #if __USE_STATIC_DL_INIT\n            atomic_once( &init_dl_data, init_dl_data_state );\n    #endif\n        }\n    #if !__USE_TBB_ATOMICS\n        ~_static_init_dl_data() {\n            int res;\n            res = pthread_spin_destroy( &handles.my_lock );\n            LIBRARY_ASSERT( res==0, \"pthread_spin_destroy failed\" );\n        }\n    #endif\n    } static_init_dl_data;\n\n    /*\n        The function constructs absolute path for given relative path. Important: Base directory is not\n        current one, it is the directory libtbb.so loaded from.\n\n        Arguments:\n        in  name -- Name of a file (may be with relative path; it must not be an absolute one).\n        out path -- Buffer to save result (absolute path) to.\n        in  len  -- Size of buffer.\n        ret      -- 0         -- Error occurred.\n                    > len     -- Buffer too short, required size returned.\n                    otherwise -- Ok, number of characters (not counting terminating null) written to\n                    buffer.\n    */\n    #if __TBB_DYNAMIC_LOAD_ENABLED\n    static size_t abs_path( char const * name, char * path, size_t len ) {\n        atomic_once( &init_dl_data, init_dl_data_state );\n\n        if ( !ap_data._len )\n            return 0;\n\n        size_t name_len = strlen( name );\n        size_t full_len = name_len+ap_data._len;\n        if ( full_len < len ) {\n            strncpy( path, ap_data._path, ap_data._len );\n            strncpy( path+ap_data._len, name, name_len );\n            path[full_len] = 0;\n        }\n        return full_len;\n    }\n    #endif  // __TBB_DYNAMIC_LOAD_ENABLED\n\n    #if __TBB_WEAK_SYMBOLS_PRESENT\n    static bool weak_symbol_link( const dynamic_link_descriptor descriptors[], size_t required )\n    {\n        // Check if the required entries are present in what was loaded into our process.\n        for ( size_t k = 0; k < required; ++k )\n            if ( !descriptors[k].ptr )\n                return false;\n        // Commit the entry points.\n        for ( size_t k = 0; k < required; ++k )\n            *descriptors[k].handler = (pointer_to_handler) descriptors[k].ptr;\n        return true;\n    }\n    #else\n    static bool weak_symbol_link( const dynamic_link_descriptor[], size_t ) {\n        return false;\n    }\n    #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n\n    void dynamic_unlink( dynamic_link_handle handle ) {\n        if ( handle ) {\n    #if __TBB_WEAK_SYMBOLS_PRESENT\n        LIBRARY_ASSERT( dlclose != NULL, \"dlopen is present but dlclose is NOT present!?\" );\n    #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n    #if __TBB_DYNAMIC_LOAD_ENABLED\n            dlclose( handle );\n    #endif /* __TBB_DYNAMIC_LOAD_ENABLED */\n        }\n    }\n\n    void dynamic_unlink_all() {\n        handles.free_handles();\n    }\n\n    #if _WIN32\n    static dynamic_link_handle global_symbols_link( const char* library, const dynamic_link_descriptor descriptors[], size_t required ) {\n        dynamic_link_handle library_handle;\n        if ( GetModuleHandleExA( 0, library, &library_handle ) ) {\n            if ( resolve_symbols( library_handle, descriptors, required ) )\n                return library_handle;\n            else\n                FreeLibrary( library_handle );\n        }\n        return 0;\n    }\n    #else /* _WIN32 */\n    // It is supposed that all symbols are from the only one library\n    static dynamic_link_handle pin_symbols( dynamic_link_descriptor desc, const dynamic_link_descriptor descriptors[], size_t required ) {\n        // The library has been loaded by another module and contains at least one requested symbol.\n        // But after we obtained the symbol the library can be unloaded by another thread\n        // invalidating our symbol. Therefore we need to pin the library in memory.\n        dynamic_link_handle library_handle;\n        Dl_info info;\n        // Get library's name from earlier found symbol\n        if ( dladdr( (void*)*desc.handler, &info ) ) {\n            // Pin the library\n            library_handle = dlopen( info.dli_fname, RTLD_LAZY );\n            if ( library_handle ) {\n                // If original library was unloaded before we pinned it\n                // and then another module loaded in its place, the earlier\n                // found symbol would become invalid. So revalidate them.\n                if ( !resolve_symbols( library_handle, descriptors, required ) ) {\n                    // Wrong library.\n                    dynamic_unlink(library_handle);\n                    library_handle = 0;\n                }\n            } else {\n                char const * err = dlerror();\n                DYNAMIC_LINK_WARNING( dl_lib_not_found, info.dli_fname, err );\n            }\n        }\n        else {\n            // The library have been unloaded by another thread\n            library_handle = 0;\n        }\n        return library_handle;\n    }\n\n    static dynamic_link_handle global_symbols_link( const char*, const dynamic_link_descriptor descriptors[], size_t required ) {\n    #if __TBB_WEAK_SYMBOLS_PRESENT\n        if ( !dlopen ) return 0;\n    #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n        dynamic_link_handle library_handle = dlopen( NULL, RTLD_LAZY );\n    #if __ANDROID__\n        // On Android dlopen( NULL ) returns NULL if it is called during dynamic module initialization.\n        if ( !library_handle )\n            return 0;\n    #endif\n        // Check existence of only the first symbol, then use it to find the library and load all necessary symbols\n        pointer_to_handler handler;\n        dynamic_link_descriptor desc = { descriptors[0].name, &handler };\n        if ( resolve_symbols( library_handle, &desc, 1 ) )\n                return pin_symbols( desc, descriptors, required );\n        return 0;\n    }\n    #endif /* _WIN32 */\n\n    static void save_library_handle( dynamic_link_handle src, dynamic_link_handle *dst ) {\n        if ( dst )\n            *dst = src;\n        else\n            handles.add_handle( src );\n    }\n\n    dynamic_link_handle dynamic_load( const char* library, const dynamic_link_descriptor descriptors[], size_t required ) {\n    #if __TBB_DYNAMIC_LOAD_ENABLED\n    #if _XBOX\n        return LoadLibrary (library);\n    #else /* _XBOX */\n        size_t const len = PATH_MAX + 1;\n        char path[ len ];\n        size_t rc = abs_path( library, path, len );\n        if ( 0 < rc && rc < len ) {\n    #if _WIN32\n            // Prevent Windows from displaying silly message boxes if it fails to load library\n            // (e.g. because of MS runtime problems - one of those crazy manifest related ones)\n            UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS);\n    #endif /* _WIN32 */\n    #if __TBB_WEAK_SYMBOLS_PRESENT\n        if ( !dlopen ) return 0;\n    #endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n            dynamic_link_handle library_handle = dlopen( path, RTLD_LAZY );\n    #if _WIN32\n            SetErrorMode (prev_mode);\n    #endif /* _WIN32 */\n            if( library_handle ) {\n                if( !resolve_symbols( library_handle, descriptors, required ) ) {\n                    // The loaded library does not contain all the expected entry points\n                    dynamic_unlink( library_handle );\n                    library_handle = NULL;\n                }\n            } else\n                DYNAMIC_LINK_WARNING( dl_lib_not_found, path, dlerror() );\n            return library_handle;\n        } else if ( rc>=len )\n                DYNAMIC_LINK_WARNING( dl_buff_too_small );\n                // rc == 0 means failing of init_ap_data so the warning has already been issued.\n    #endif /* _XBOX */\n    #endif /* __TBB_DYNAMIC_LOAD_ENABLED */\n        return 0;\n    }\n\n    bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t required, dynamic_link_handle *handle, int flags ) {\n        // TODO: May global_symbols_link find weak symbols?\n        dynamic_link_handle library_handle = ( flags & DYNAMIC_LINK_GLOBAL ) ? global_symbols_link( library, descriptors, required ) : 0;\n\n        if ( !library_handle && ( flags & DYNAMIC_LINK_LOAD ) )\n            library_handle = dynamic_load( library, descriptors, required );\n\n        if ( !library_handle && ( flags & DYNAMIC_LINK_WEAK ) )\n            return weak_symbol_link( descriptors, required );\n\n        save_library_handle( library_handle, handle );\n        return true;\n    }\n\n#endif /*__TBB_WIN8UI_SUPPORT*/\n#else /* __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED */\n    bool dynamic_link( const char*, const dynamic_link_descriptor*, size_t, dynamic_link_handle *handle, int ) {\n        if ( handle )\n            *handle=0;\n        return false;\n    }\n\n    void dynamic_unlink( dynamic_link_handle ) {\n    }\n\n    void dynamic_unlink_all() {\n    }\n#endif /* __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED */\n\nCLOSE_INTERNAL_NAMESPACE\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/dynamic_link.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_dynamic_link\n#define __TBB_dynamic_link\n\n// Support for dynamic loading entry points from other shared libraries.\n\n#include \"tbb/tbb_stddef.h\"\n\n#ifdef LIBRARY_ASSERT\n    #undef __TBB_ASSERT\n    #define __TBB_ASSERT(x,y) LIBRARY_ASSERT(x,y)\n#else\n    #define LIBRARY_ASSERT(x,y) __TBB_ASSERT_EX(x,y)\n#endif /* !LIBRARY_ASSERT */\n\n/** By default, symbols declared and defined here go into namespace tbb::internal.\n    To put them in other namespace, define macros OPEN_INTERNAL_NAMESPACE\n    and CLOSE_INTERNAL_NAMESPACE to override the following default definitions. **/\n#ifndef OPEN_INTERNAL_NAMESPACE\n#define OPEN_INTERNAL_NAMESPACE namespace tbb { namespace internal {\n#define CLOSE_INTERNAL_NAMESPACE }}\n#endif /* OPEN_INTERNAL_NAMESPACE */\n\n#include <stddef.h>\n#if _WIN32\n#include \"tbb/machine/windows_api.h\"\n#endif /* _WIN32 */\n\nOPEN_INTERNAL_NAMESPACE\n\n//! Type definition for a pointer to a void somefunc(void)\ntypedef void (*pointer_to_handler)();\n\n//! The helper to construct dynamic_link_descriptor structure\n// Double cast through the void* in DLD macro is necessary to\n// prevent warnings from some compilers (g++ 4.1)\n#if __TBB_WEAK_SYMBOLS_PRESENT\n#define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h), (pointer_to_handler)&s}\n#else \n#define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h)}\n#endif /* __TBB_WEAK_SYMBOLS_PRESENT */\n//! Association between a handler name and location of pointer to it.\nstruct dynamic_link_descriptor {\n    //! Name of the handler\n    const char* name;\n    //! Pointer to the handler\n    pointer_to_handler* handler;\n#if __TBB_WEAK_SYMBOLS_PRESENT\n    //! Weak symbol\n    pointer_to_handler ptr;\n#endif\n};\n\n#if _WIN32\ntypedef HMODULE dynamic_link_handle;\n#else\ntypedef void* dynamic_link_handle;\n#endif /* _WIN32 */\n\nconst int DYNAMIC_LINK_GLOBAL = 0x01;\nconst int DYNAMIC_LINK_LOAD   = 0x02;\nconst int DYNAMIC_LINK_WEAK   = 0x04;\nconst int DYNAMIC_LINK_ALL    = DYNAMIC_LINK_GLOBAL | DYNAMIC_LINK_LOAD | DYNAMIC_LINK_WEAK;\n\n//! Fill in dynamically linked handlers.\n/** 'library' is the name of the requested library. It should not contain a full\n    path since dynamic_link adds the full path (from which the runtime itself\n    was loaded) to the library name.\n    'required' is the number of the initial entries in the array descriptors[]\n    that have to be found in order for the call to succeed. If the library and\n    all the required handlers are found, then the corresponding handler\n    pointers are set, and the return value is true.  Otherwise the original\n    array of descriptors is left untouched and the return value is false.\n    'required' is limited by 20 (exceeding of this value will result in failure\n    to load the symbols and the return value will be false).\n    'handle' is the handle of the library if it is loaded. Otherwise it is left\n    untouched.\n    'flags' is the set of DYNAMIC_LINK_* flags. Each of the DYNAMIC_LINK_* flags\n    allows its corresponding linking stage.\n**/\nbool dynamic_link( const char* library,\n                   const dynamic_link_descriptor descriptors[],\n                   size_t required,\n                   dynamic_link_handle* handle = 0,\n                   int flags = DYNAMIC_LINK_ALL );\n\nvoid dynamic_unlink( dynamic_link_handle handle );\n\nvoid dynamic_unlink_all();\n\nenum dynamic_link_error_t {\n    dl_success = 0,\n    dl_lib_not_found,     // char const * lib, dlerr_t err\n    dl_sym_not_found,     // char const * sym, dlerr_t err\n                          // Note: dlerr_t depends on OS: it is char const * on Linux* and OS X*, int on Windows*.\n    dl_sys_fail,          // char const * func, int err\n    dl_buff_too_small     // none\n}; // dynamic_link_error_t\n\nCLOSE_INTERNAL_NAMESPACE\n\n#endif /* __TBB_dynamic_link */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/enumerable_thread_specific.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_enumerable_thread_specific_H\n#define __TBB_enumerable_thread_specific_H\n\n#include \"concurrent_vector.h\"\n#include \"tbb_thread.h\"\n#include \"tbb_allocator.h\"\n#include \"tbb_profiling.h\"\n#include \"cache_aligned_allocator.h\"\n#include \"aligned_space.h\"\n#include <string.h>  // for memcpy\n\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#else\n#include <pthread.h>\n#endif\n\nnamespace tbb {\n\n//! enum for selecting between single key and key-per-instance versions\nenum ets_key_usage_type { ets_key_per_instance, ets_no_key };\n\nnamespace interface6 {\n\n    //! @cond\n    namespace internal {\n\n        using namespace tbb::internal;\n\n        template<ets_key_usage_type ETS_key_type>\n        class ets_base: tbb::internal::no_copy {\n        protected:\n#if _WIN32||_WIN64\n            typedef DWORD key_type;\n#else\n            typedef pthread_t key_type;\n#endif\n#if __TBB_PROTECTED_NESTED_CLASS_BROKEN\n        public:\n#endif\n            struct slot;\n\n            struct array {\n                array* next;\n                size_t lg_size;\n                slot& at( size_t k ) {\n                    return ((slot*)(void*)(this+1))[k];\n                }\n                size_t size() const {return (size_t)1<<lg_size;}\n                size_t mask() const {return size()-1;}\n                size_t start( size_t h ) const {\n                    return h>>(8*sizeof(size_t)-lg_size);\n                }\n            };\n            struct slot {\n                key_type key;\n                void* ptr;\n                bool empty() const {return !key;}\n                bool match( key_type k ) const {return key==k;}\n                bool claim( key_type k ) {\n                    __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);\n                    return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;\n                }\n            };\n#if __TBB_PROTECTED_NESTED_CLASS_BROKEN\n        protected:\n#endif\n\n            static key_type key_of_current_thread() {\n               tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();\n               key_type k;\n               memcpy( &k, &id, sizeof(k) );\n               return k;\n            }\n\n            //! Root of linked list of arrays of decreasing size.\n            /** NULL if and only if my_count==0.\n                Each array in the list is half the size of its predecessor. */\n            atomic<array*> my_root;\n            atomic<size_t> my_count;\n            virtual void* create_local() = 0;\n            virtual void* create_array(size_t _size) = 0;  // _size in bytes\n            virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes\n            array* allocate( size_t lg_size ) {\n                size_t n = 1<<lg_size;\n                array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));\n                a->lg_size = lg_size;\n                std::memset( a+1, 0, n*sizeof(slot) );\n                return a;\n            }\n            void free(array* a) {\n                size_t n = 1<<(a->lg_size);\n                free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );\n            }\n            static size_t hash( key_type k ) {\n                // Multiplicative hashing.  Client should use *upper* bits.\n                // casts required for Mac gcc4.* compiler\n                return uintptr_t(k)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value;\n            }\n\n            ets_base() {my_root=NULL; my_count=0;}\n            virtual ~ets_base();  // g++ complains if this is not virtual...\n            void* table_lookup( bool& exists );\n            void table_clear();\n            // table_find is used in copying ETS, so is not used in concurrent context.  So\n            // we don't need itt annotations for it.\n            slot& table_find( key_type k ) {\n                size_t h = hash(k);\n                array* r = my_root;\n                size_t mask = r->mask();\n                for(size_t i = r->start(h);;i=(i+1)&mask) {\n                    slot& s = r->at(i);\n                    if( s.empty() || s.match(k) )\n                        return s;\n                }\n            }\n            void table_reserve_for_copy( const ets_base& other ) {\n                __TBB_ASSERT(!my_root,NULL);\n                __TBB_ASSERT(!my_count,NULL);\n                if( other.my_root ) {\n                    array* a = allocate(other.my_root->lg_size);\n                    a->next = NULL;\n                    my_root = a;\n                    my_count = other.my_count;\n                }\n            }\n        };\n\n        template<ets_key_usage_type ETS_key_type>\n        ets_base<ETS_key_type>::~ets_base() {\n            __TBB_ASSERT(!my_root, NULL);\n        }\n\n        template<ets_key_usage_type ETS_key_type>\n        void ets_base<ETS_key_type>::table_clear() {\n            while( array* r = my_root ) {\n                my_root = r->next;\n                free(r);\n            }\n            my_count = 0;\n        }\n\n        template<ets_key_usage_type ETS_key_type>\n        void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {\n            const key_type k = key_of_current_thread();\n\n            __TBB_ASSERT(k!=0,NULL);\n            void* found;\n            size_t h = hash(k);\n            for( array* r=my_root; r; r=r->next ) {\n                call_itt_notify(acquired,r);\n                size_t mask=r->mask();\n                for(size_t i = r->start(h); ;i=(i+1)&mask) {\n                    slot& s = r->at(i);\n                    if( s.empty() ) break;\n                    if( s.match(k) ) {\n                        if( r==my_root ) {\n                            // Success at top level\n                            exists = true;\n                            return s.ptr;\n                        } else {\n                            // Success at some other level.  Need to insert at top level.\n                            exists = true;\n                            found = s.ptr;\n                            goto insert;\n                        }\n                    }\n                }\n            }\n            // Key does not yet exist.  The density of slots in the table does not exceed 0.5,\n            // for if this will occur a new table is allocated with double the current table\n            // size, which is swapped in as the new root table.  So an empty slot is guaranteed.\n            exists = false;\n            found = create_local();\n            {\n                size_t c = ++my_count;\n                array* r = my_root;\n                call_itt_notify(acquired,r);\n                if( !r || c>r->size()/2 ) {\n                    size_t s = r ? r->lg_size : 2;\n                    while( c>size_t(1)<<(s-1) ) ++s;\n                    array* a = allocate(s);\n                    for(;;) {\n                        a->next = r;\n                        call_itt_notify(releasing,a);\n                        array* new_r = my_root.compare_and_swap(a,r);\n                        if( new_r==r ) break;\n                        call_itt_notify(acquired, new_r);\n                        if( new_r->lg_size>=s ) {\n                            // Another thread inserted an equal or  bigger array, so our array is superfluous.\n                            free(a);\n                            break;\n                        }\n                        r = new_r;\n                    }\n                }\n            }\n        insert:\n        // Whether a slot has been found in an older table, or if it has been inserted at this level,\n        // it has already been accounted for in the total.  Guaranteed to be room for it, and it is\n        // not present, so search for empty slot and use it.\n            array* ir = my_root;\n            call_itt_notify(acquired, ir);\n            size_t mask = ir->mask();\n            for(size_t i = ir->start(h);;i=(i+1)&mask) {\n                slot& s = ir->at(i);\n                if( s.empty() ) {\n                    if( s.claim(k) ) {\n                        s.ptr = found;\n                        return found;\n                    }\n                }\n            }\n        }\n\n        //! Specialization that exploits native TLS\n        template <>\n        class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {\n            typedef ets_base<ets_no_key> super;\n#if _WIN32||_WIN64\n#if __TBB_WIN8UI_SUPPORT\n            typedef DWORD tls_key_t;\n            void create_key() { my_key = FlsAlloc(NULL); }\n            void destroy_key() { FlsFree(my_key); }\n            void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }\n            void* get_tls() { return (void *)FlsGetValue(my_key); }\n#else\n            typedef DWORD tls_key_t;\n            void create_key() { my_key = TlsAlloc(); }\n            void destroy_key() { TlsFree(my_key); }\n            void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }\n            void* get_tls() { return (void *)TlsGetValue(my_key); }\n#endif\n#else\n            typedef pthread_key_t tls_key_t;\n            void create_key() { pthread_key_create(&my_key, NULL); }\n            void destroy_key() { pthread_key_delete(my_key); }\n            void set_tls( void * value ) const { pthread_setspecific(my_key, value); }\n            void* get_tls() const { return pthread_getspecific(my_key); }\n#endif\n            tls_key_t my_key;\n            virtual void* create_local() = 0;\n            virtual void* create_array(size_t _size) = 0;  // _size in bytes\n            virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes\n        public:\n            ets_base() {create_key();}\n            ~ets_base() {destroy_key();}\n            void* table_lookup( bool& exists ) {\n                void* found = get_tls();\n                if( found ) {\n                    exists=true;\n                } else {\n                    found = super::table_lookup(exists);\n                    set_tls(found);\n                }\n                return found;\n            }\n            void table_clear() {\n                destroy_key();\n                create_key();\n                super::table_clear();\n            }\n        };\n\n        //! Random access iterator for traversing the thread local copies.\n        template< typename Container, typename Value >\n        class enumerable_thread_specific_iterator\n#if defined(_WIN64) && defined(_MSC_VER)\n            // Ensure that Microsoft's internal template function _Val_type works correctly.\n            : public std::iterator<std::random_access_iterator_tag,Value>\n#endif /* defined(_WIN64) && defined(_MSC_VER) */\n        {\n            //! current position in the concurrent_vector\n\n            Container *my_container;\n            typename Container::size_type my_index;\n            mutable Value *my_value;\n\n            template<typename C, typename T>\n            friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,\n                                                                       const enumerable_thread_specific_iterator<C,T>& v );\n\n            template<typename C, typename T, typename U>\n            friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,\n                                    const enumerable_thread_specific_iterator<C,U>& j );\n\n            template<typename C, typename T, typename U>\n            friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,\n                                   const enumerable_thread_specific_iterator<C,U>& j );\n\n            template<typename C, typename T, typename U>\n            friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );\n\n            template<typename C, typename U>\n            friend class enumerable_thread_specific_iterator;\n\n            public:\n\n            enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :\n                my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}\n\n            //! Default constructor\n            enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}\n\n            template<typename U>\n            enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :\n                    my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}\n\n            enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {\n                return enumerable_thread_specific_iterator(*my_container, my_index + offset);\n            }\n\n            enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {\n                my_index += offset;\n                my_value = NULL;\n                return *this;\n            }\n\n            enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {\n                return enumerable_thread_specific_iterator( *my_container, my_index-offset );\n            }\n\n            enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {\n                my_index -= offset;\n                my_value = NULL;\n                return *this;\n            }\n\n            Value& operator*() const {\n                Value* value = my_value;\n                if( !value ) {\n                    value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);\n                }\n                __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), \"corrupt cache\" );\n                return *value;\n            }\n\n            Value& operator[]( ptrdiff_t k ) const {\n               return (*my_container)[my_index + k].value;\n            }\n\n            Value* operator->() const {return &operator*();}\n\n            enumerable_thread_specific_iterator& operator++() {\n                ++my_index;\n                my_value = NULL;\n                return *this;\n            }\n\n            enumerable_thread_specific_iterator& operator--() {\n                --my_index;\n                my_value = NULL;\n                return *this;\n            }\n\n            //! Post increment\n            enumerable_thread_specific_iterator operator++(int) {\n                enumerable_thread_specific_iterator result = *this;\n                ++my_index;\n                my_value = NULL;\n                return result;\n            }\n\n            //! Post decrement\n            enumerable_thread_specific_iterator operator--(int) {\n                enumerable_thread_specific_iterator result = *this;\n                --my_index;\n                my_value = NULL;\n                return result;\n            }\n\n            // STL support\n            typedef ptrdiff_t difference_type;\n            typedef Value value_type;\n            typedef Value* pointer;\n            typedef Value& reference;\n            typedef std::random_access_iterator_tag iterator_category;\n        };\n\n        template<typename Container, typename T>\n        enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,\n                                                                    const enumerable_thread_specific_iterator<Container,T>& v ) {\n            return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,\n                         const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return i.my_index==j.my_index && i.my_container == j.my_container;\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,\n                         const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return !(i==j);\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,\n                        const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return i.my_index<j.my_index;\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,\n                        const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return j<i;\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,\n                         const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return !(i<j);\n        }\n\n        template<typename Container, typename T, typename U>\n        bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,\n                         const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return !(j<i);\n        }\n\n        template<typename Container, typename T, typename U>\n        ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,\n                             const enumerable_thread_specific_iterator<Container,U>& j ) {\n            return i.my_index-j.my_index;\n        }\n\n    template<typename SegmentedContainer, typename Value >\n        class segmented_iterator\n#if defined(_WIN64) && defined(_MSC_VER)\n        : public std::iterator<std::input_iterator_tag, Value>\n#endif\n        {\n            template<typename C, typename T, typename U>\n            friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);\n\n            template<typename C, typename T, typename U>\n            friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);\n\n            template<typename C, typename U>\n            friend class segmented_iterator;\n\n            public:\n\n                segmented_iterator() {my_segcont = NULL;}\n\n                segmented_iterator( const SegmentedContainer& _segmented_container ) :\n                    my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),\n                    outer_iter(my_segcont->end()) { }\n\n                ~segmented_iterator() {}\n\n                typedef typename SegmentedContainer::iterator outer_iterator;\n                typedef typename SegmentedContainer::value_type InnerContainer;\n                typedef typename InnerContainer::iterator inner_iterator;\n\n                // STL support\n                typedef ptrdiff_t difference_type;\n                typedef Value value_type;\n                typedef typename SegmentedContainer::size_type size_type;\n                typedef Value* pointer;\n                typedef Value& reference;\n                typedef std::input_iterator_tag iterator_category;\n\n                // Copy Constructor\n                template<typename U>\n                segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :\n                    my_segcont(other.my_segcont),\n                    outer_iter(other.outer_iter),\n                    // can we assign a default-constructed iterator to inner if we're at the end?\n                    inner_iter(other.inner_iter)\n                {}\n\n                // assignment\n                template<typename U>\n                segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {\n                    if(this != &other) {\n                        my_segcont = other.my_segcont;\n                        outer_iter = other.outer_iter;\n                        if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;\n                    }\n                    return *this;\n                }\n\n                // allow assignment of outer iterator to segmented iterator.  Once it is\n                // assigned, move forward until a non-empty inner container is found or\n                // the end of the outer container is reached.\n                segmented_iterator& operator=(const outer_iterator& new_outer_iter) {\n                    __TBB_ASSERT(my_segcont != NULL, NULL);\n                    // check that this iterator points to something inside the segmented container\n                    for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {\n                        if( !outer_iter->empty() ) {\n                            inner_iter = outer_iter->begin();\n                            break;\n                        }\n                    }\n                    return *this;\n                }\n\n                // pre-increment\n                segmented_iterator& operator++() {\n                    advance_me();\n                    return *this;\n                }\n\n                // post-increment\n                segmented_iterator operator++(int) {\n                    segmented_iterator tmp = *this;\n                    operator++();\n                    return tmp;\n                }\n\n                bool operator==(const outer_iterator& other_outer) const {\n                    __TBB_ASSERT(my_segcont != NULL, NULL);\n                    return (outer_iter == other_outer &&\n                            (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));\n                }\n\n                bool operator!=(const outer_iterator& other_outer) const {\n                    return !operator==(other_outer);\n\n                }\n\n                // (i)* RHS\n                reference operator*() const {\n                    __TBB_ASSERT(my_segcont != NULL, NULL);\n                    __TBB_ASSERT(outer_iter != my_segcont->end(), \"Dereferencing a pointer at end of container\");\n                    __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen\n                    return *inner_iter;\n                }\n\n                // i->\n                pointer operator->() const { return &operator*();}\n\n            private:\n                SegmentedContainer*             my_segcont;\n                outer_iterator outer_iter;\n                inner_iterator inner_iter;\n\n                void advance_me() {\n                    __TBB_ASSERT(my_segcont != NULL, NULL);\n                    __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers\n                    __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.\n                    ++inner_iter;\n                    while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {\n                        inner_iter = outer_iter->begin();\n                    }\n                }\n        };    // segmented_iterator\n\n        template<typename SegmentedContainer, typename T, typename U>\n        bool operator==( const segmented_iterator<SegmentedContainer,T>& i,\n                         const segmented_iterator<SegmentedContainer,U>& j ) {\n            if(i.my_segcont != j.my_segcont) return false;\n            if(i.my_segcont == NULL) return true;\n            if(i.outer_iter != j.outer_iter) return false;\n            if(i.outer_iter == i.my_segcont->end()) return true;\n            return i.inner_iter == j.inner_iter;\n        }\n\n        // !=\n        template<typename SegmentedContainer, typename T, typename U>\n        bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,\n                         const segmented_iterator<SegmentedContainer,U>& j ) {\n            return !(i==j);\n        }\n\n        template<typename T>\n        struct destruct_only: tbb::internal::no_copy {\n            tbb::aligned_space<T> value;\n            ~destruct_only() {value.begin()[0].~T();}\n        };\n\n        template<typename T>\n        struct construct_by_default: tbb::internal::no_assign {\n            void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.\n            construct_by_default( int ) {}\n        };\n\n        template<typename T>\n        struct construct_by_exemplar: tbb::internal::no_assign {\n            const T exemplar;\n            void construct(void*where) {new(where) T(exemplar);}\n            construct_by_exemplar( const T& t ) : exemplar(t) {}\n        };\n\n        template<typename T, typename Finit>\n        struct construct_by_finit: tbb::internal::no_assign {\n            Finit f;\n            void construct(void* where) {new(where) T(f());}\n            construct_by_finit( const Finit& f_ ) : f(f_) {}\n        };\n\n        // storage for initialization function pointer\n        template<typename T>\n        class callback_base {\n        public:\n            // Clone *this\n            virtual callback_base* clone() = 0;\n            // Destruct and free *this\n            virtual void destroy() = 0;\n            // Need virtual destructor to satisfy GCC compiler warning\n            virtual ~callback_base() { }\n            // Construct T at where\n            virtual void construct(void* where) = 0;\n        };\n\n        template <typename T, typename Constructor>\n        class callback_leaf: public callback_base<T>, Constructor {\n            template<typename X> callback_leaf( const X& x ) : Constructor(x) {}\n\n            typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;\n\n            /*override*/ callback_base<T>* clone() {\n                void* where = my_allocator_type().allocate(1);\n                return new(where) callback_leaf(*this);\n            }\n\n            /*override*/ void destroy() {\n                my_allocator_type().destroy(this);\n                my_allocator_type().deallocate(this,1);\n            }\n\n            /*override*/ void construct(void* where) {\n                Constructor::construct(where);\n            }\n        public:\n            template<typename X>\n            static callback_base<T>* make( const X& x ) {\n                void* where = my_allocator_type().allocate(1);\n                return new(where) callback_leaf(x);\n            }\n        };\n\n        //! Template for adding padding in order to avoid false sharing\n        /** ModularSize should be sizeof(U) modulo the cache line size.\n            All maintenance of the space will be done explicitly on push_back,\n            and all thread local copies must be destroyed before the concurrent\n            vector is deleted.\n        */\n        template<typename U, size_t ModularSize>\n        struct ets_element {\n            ets_element() { /* avoid cl warning C4345 about default initialization of POD types */ }\n            char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];\n            void unconstruct() {\n                tbb::internal::punned_cast<U*>(&value)->~U();\n            }\n        };\n\n    } // namespace internal\n    //! @endcond\n\n    //! The enumerable_thread_specific container\n    /** enumerable_thread_specific has the following properties:\n        - thread-local copies are lazily created, with default, exemplar or function initialization.\n        - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant.\n        - the contained objects need not have operator=() defined if combine is not used.\n        - enumerable_thread_specific containers may be copy-constructed or assigned.\n        - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed.\n        - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods\n\n    @par Segmented iterator\n        When the thread-local objects are containers with input_iterators defined, a segmented iterator may\n        be used to iterate over all the elements of all thread-local copies.\n\n    @par combine and combine_each\n        - Both methods are defined for enumerable_thread_specific.\n        - combine() requires the the type T have operator=() defined.\n        - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)\n        - Both are evaluated in serial context (the methods are assumed to be non-benign.)\n\n    @ingroup containers */\n    template <typename T,\n              typename Allocator=cache_aligned_allocator<T>,\n              ets_key_usage_type ETS_key_type=ets_no_key >\n    class enumerable_thread_specific: internal::ets_base<ETS_key_type> {\n\n        template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;\n\n        typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;\n\n        //! A generic range, used to create range objects from the iterators\n        template<typename I>\n        class generic_range_type: public blocked_range<I> {\n        public:\n            typedef T value_type;\n            typedef T& reference;\n            typedef const T& const_reference;\n            typedef I iterator;\n            typedef ptrdiff_t difference_type;\n            generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}\n            template<typename U>\n            generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}\n            generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}\n        };\n\n        typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;\n        typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;\n\n        internal::callback_base<T> *my_construct_callback;\n\n        internal_collection_type my_locals;\n\n        /*override*/ void* create_local() {\n            void* lref = &*my_locals.grow_by(1);\n            my_construct_callback->construct(lref);\n            return lref;\n        }\n\n        void unconstruct_locals() {\n            for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {\n                cvi->unconstruct();\n            }\n        }\n\n        typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;\n\n        // _size is in bytes\n        /*override*/ void* create_array(size_t _size) {\n            size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);\n            return array_allocator_type().allocate(nelements);\n        }\n\n        /*override*/ void free_array( void* _ptr, size_t _size) {\n            size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);\n            array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);\n        }\n\n    public:\n\n        //! Basic types\n        typedef Allocator allocator_type;\n        typedef T value_type;\n        typedef T& reference;\n        typedef const T& const_reference;\n        typedef T* pointer;\n        typedef const T* const_pointer;\n        typedef typename internal_collection_type::size_type size_type;\n        typedef typename internal_collection_type::difference_type difference_type;\n\n        // Iterator types\n        typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;\n        typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;\n\n        // Parallel range types\n        typedef generic_range_type< iterator > range_type;\n        typedef generic_range_type< const_iterator > const_range_type;\n\n        //! Default constructor.  Each local instance of T is default constructed.\n        enumerable_thread_specific() :\n            my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) )\n        {}\n\n        //! Constructor with initializer functor.  Each local instance of T is constructed by T(finit()).\n        template <typename Finit>\n        enumerable_thread_specific( Finit finit ) :\n            my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) )\n        {}\n\n        //! Constructor with exemplar.  Each local instance of T is copied-constructed from the exemplar.\n        enumerable_thread_specific(const T& exemplar) :\n            my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )\n        {}\n\n        //! Destructor\n        ~enumerable_thread_specific() {\n            my_construct_callback->destroy();\n            this->clear();  // deallocation before the derived class is finished destructing\n            // So free(array *) is still accessible\n        }\n\n        //! returns reference to local, discarding exists\n        reference local() {\n            bool exists;\n            return local(exists);\n        }\n\n        //! Returns reference to calling thread's local copy, creating one if necessary\n        reference local(bool& exists)  {\n            void* ptr = this->table_lookup(exists);\n            return *(T*)ptr;\n        }\n\n        //! Get the number of local copies\n        size_type size() const { return my_locals.size(); }\n\n        //! true if there have been no local copies created\n        bool empty() const { return my_locals.empty(); }\n\n        //! begin iterator\n        iterator begin() { return iterator( my_locals, 0 ); }\n        //! end iterator\n        iterator end() { return iterator(my_locals, my_locals.size() ); }\n\n        //! begin const iterator\n        const_iterator begin() const { return const_iterator(my_locals, 0); }\n\n        //! end const iterator\n        const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }\n\n        //! Get range for parallel algorithms\n        range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }\n\n        //! Get const range for parallel algorithms\n        const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }\n\n        //! Destroys local copies\n        void clear() {\n            unconstruct_locals();\n            my_locals.clear();\n            this->table_clear();\n            // callback is not destroyed\n            // exemplar is not destroyed\n        }\n\n    private:\n\n        template<typename U, typename A2, ets_key_usage_type C2>\n        void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);\n\n    public:\n\n        template<typename U, typename Alloc, ets_key_usage_type Cachetype>\n        enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()\n        {\n            internal_copy(other);\n        }\n\n        enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()\n        {\n            internal_copy(other);\n        }\n\n    private:\n\n        template<typename U, typename A2, ets_key_usage_type C2>\n        enumerable_thread_specific &\n        internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {\n            if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {\n                this->clear();\n                my_construct_callback->destroy();\n                my_construct_callback = 0;\n                internal_copy( other );\n            }\n            return *this;\n        }\n\n    public:\n\n        // assignment\n        enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {\n            return internal_assign(other);\n        }\n\n        template<typename U, typename Alloc, ets_key_usage_type Cachetype>\n        enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)\n        {\n            return internal_assign(other);\n        }\n\n        // combine_func_t has signature T(T,T) or T(const T&, const T&)\n        template <typename combine_func_t>\n        T combine(combine_func_t f_combine) {\n            if(begin() == end()) {\n                internal::destruct_only<T> location;\n                my_construct_callback->construct(location.value.begin());\n                return *location.value.begin();\n            }\n            const_iterator ci = begin();\n            T my_result = *ci;\n            while(++ci != end())\n                my_result = f_combine( my_result, *ci );\n            return my_result;\n        }\n\n        // combine_func_t has signature void(T) or void(const T&)\n        template <typename combine_func_t>\n        void combine_each(combine_func_t f_combine) {\n            for(const_iterator ci = begin(); ci != end(); ++ci) {\n                f_combine( *ci );\n            }\n        }\n\n    }; // enumerable_thread_specific\n\n    template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>\n    template<typename U, typename A2, ets_key_usage_type C2>\n    void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {\n        // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.\n        my_construct_callback = other.my_construct_callback->clone();\n\n        typedef internal::ets_base<ets_no_key> base;\n        __TBB_ASSERT(my_locals.size()==0,NULL);\n        this->table_reserve_for_copy( other );\n        for( base::array* r=other.my_root; r; r=r->next ) {\n            for( size_t i=0; i<r->size(); ++i ) {\n                base::slot& s1 = r->at(i);\n                if( !s1.empty() ) {\n                    base::slot& s2 = this->table_find(s1.key);\n                    if( s2.empty() ) {\n                        void* lref = &*my_locals.grow_by(1);\n                        s2.ptr = new(lref) T(*(U*)s1.ptr);\n                        s2.key = s1.key;\n                    } else {\n                        // Skip the duplicate\n                    }\n                }\n            }\n        }\n    }\n\n    template< typename Container >\n    class flattened2d {\n\n        // This intermediate typedef is to address issues with VC7.1 compilers\n        typedef typename Container::value_type conval_type;\n\n    public:\n\n        //! Basic types\n        typedef typename conval_type::size_type size_type;\n        typedef typename conval_type::difference_type difference_type;\n        typedef typename conval_type::allocator_type allocator_type;\n        typedef typename conval_type::value_type value_type;\n        typedef typename conval_type::reference reference;\n        typedef typename conval_type::const_reference const_reference;\n        typedef typename conval_type::pointer pointer;\n        typedef typename conval_type::const_pointer const_pointer;\n\n        typedef typename internal::segmented_iterator<Container, value_type> iterator;\n        typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;\n\n        flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :\n            my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }\n\n        flattened2d( const Container &c ) :\n            my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }\n\n        iterator begin() { return iterator(*my_container) = my_begin; }\n        iterator end() { return iterator(*my_container) = my_end; }\n        const_iterator begin() const { return const_iterator(*my_container) = my_begin; }\n        const_iterator end() const { return const_iterator(*my_container) = my_end; }\n\n        size_type size() const {\n            size_type tot_size = 0;\n            for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {\n                tot_size += i->size();\n            }\n            return tot_size;\n        }\n\n    private:\n\n        Container *my_container;\n        typename Container::const_iterator my_begin;\n        typename Container::const_iterator my_end;\n\n    };\n\n    template <typename Container>\n    flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {\n        return flattened2d<Container>(c, b, e);\n    }\n\n    template <typename Container>\n    flattened2d<Container> flatten2d(const Container &c) {\n        return flattened2d<Container>(c);\n    }\n\n} // interface6\n\nnamespace internal {\nusing interface6::internal::segmented_iterator;\n}\n\nusing interface6::enumerable_thread_specific;\nusing interface6::flattened2d;\nusing interface6::flatten2d;\n\n} // namespace tbb\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/flow_graph.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_flow_graph_H\n#define __TBB_flow_graph_H\n\n#include \"tbb_stddef.h\"\n#include \"atomic.h\"\n#include \"spin_mutex.h\"\n#include \"null_mutex.h\"\n#include \"spin_rw_mutex.h\"\n#include \"null_rw_mutex.h\"\n#include \"task.h\"\n#include \"cache_aligned_allocator.h\"\n#include \"tbb_exception.h\"\n#include \"internal/_aggregator_impl.h\"\n#include \"tbb_profiling.h\"\n\n#if TBB_DEPRECATED_FLOW_ENQUEUE\n#define FLOW_SPAWN(a) tbb::task::enqueue((a))\n#else\n#define FLOW_SPAWN(a) tbb::task::spawn((a))\n#endif\n\n// use the VC10 or gcc version of tuple if it is available.\n#if __TBB_CPP11_TUPLE_PRESENT\n    #include <tuple>\nnamespace tbb {\n    namespace flow {\n        using std::tuple;\n        using std::tuple_size;\n        using std::tuple_element;\n        using std::get;\n    }\n}\n#else\n    #include \"compat/tuple\"\n#endif\n\n#include<list>\n#include<queue>\n\n/** @file\n  \\brief The graph related classes and functions\n\n  There are some applications that best express dependencies as messages\n  passed between nodes in a graph.  These messages may contain data or\n  simply act as signals that a predecessors has completed. The graph\n  class and its associated node classes can be used to express such\n  applications.\n*/\n\nnamespace tbb {\nnamespace flow {\n\n//! An enumeration the provides the two most common concurrency levels: unlimited and serial\nenum concurrency { unlimited = 0, serial = 1 };\n\nnamespace interface7 {\n\nnamespace internal {\n    template<typename T, typename M> class successor_cache;\n    template<typename T, typename M> class broadcast_cache;\n    template<typename T, typename M> class round_robin_cache;\n}\n\n//! An empty class used for messages that mean \"I'm done\"\nclass continue_msg {};\n\ntemplate< typename T > class sender;\ntemplate< typename T > class receiver;\nclass continue_receiver;\n\n//! Pure virtual template class that defines a sender of messages of type T\ntemplate< typename T >\nclass sender {\npublic:\n    //! The output type of this sender\n    typedef T output_type;\n\n    //! The successor type for this node\n    typedef receiver<T> successor_type;\n\n    virtual ~sender() {}\n\n    //! Add a new successor to this node\n    virtual bool register_successor( successor_type &r ) = 0;\n\n    //! Removes a successor from this node\n    virtual bool remove_successor( successor_type &r ) = 0;\n\n    //! Request an item from the sender\n    virtual bool try_get( T & ) { return false; }\n\n    //! Reserves an item in the sender\n    virtual bool try_reserve( T & ) { return false; }\n\n    //! Releases the reserved item\n    virtual bool try_release( ) { return false; }\n\n    //! Consumes the reserved item\n    virtual bool try_consume( ) { return false; }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    //! interface to record edges for traversal & deletion\n    virtual void    internal_add_built_successor( successor_type & )    = 0;\n    virtual void    internal_delete_built_successor( successor_type & ) = 0;\n    virtual void    copy_successors( std::vector<successor_type *> &)   = 0;\n    virtual size_t  successor_count()                                   = 0;\n#endif\n};\n\ntemplate< typename T > class limiter_node;  // needed for resetting decrementer\ntemplate< typename R, typename B > class run_and_put_task;\n\nstatic tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1;\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n// flags to modify the behavior of the graph reset().  Can be combined.\nenum reset_flags {\n    rf_reset_protocol   = 0,\n    rf_reset_bodies     = 1<<0,  // delete the current node body, reset to a copy of the initial node body.\n    rf_extract          = 1<<1   // delete edges (extract() for single node, reset() for graph.)\n};\n\n#define __TBB_PFG_RESET_ARG(exp) exp\n#define __TBB_COMMA ,\n#else\n#define __TBB_PFG_RESET_ARG(exp)  /* nothing */\n#define __TBB_COMMA /* nothing */\n#endif\n\n// enqueue left task if necessary.  Returns the non-enqueued task if there is one.\nstatic inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) {\n    // if no RHS task, don't change left.\n    if(right == NULL) return left;\n    // right != NULL\n    if(left == NULL) return right;\n    if(left == SUCCESSFULLY_ENQUEUED) return right;\n    // left contains a task\n    if(right != SUCCESSFULLY_ENQUEUED) {\n        // both are valid tasks\n        FLOW_SPAWN(*left);\n        return right;\n    }\n    return left;\n}\n\n//! Pure virtual template class that defines a receiver of messages of type T\ntemplate< typename T >\nclass receiver {\npublic:\n    //! The input type of this receiver\n    typedef T input_type;\n\n    //! The predecessor type for this node\n    typedef sender<T> predecessor_type;\n\n    //! Destructor\n    virtual ~receiver() {}\n\n    //! Put an item to the receiver\n    bool try_put( const T& t ) {\n        task *res = try_put_task(t);\n        if(!res) return false;\n        if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res);\n        return true;\n    }\n\n    //! put item to successor; return task to run the successor if possible.\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    virtual task *try_put_task(const T& t) = 0;\npublic:\n\n    //! Add a predecessor to the node\n    virtual bool register_predecessor( predecessor_type & ) { return false; }\n\n    //! Remove a predecessor from the node\n    virtual bool remove_predecessor( predecessor_type & ) { return false; }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    virtual void   internal_add_built_predecessor( predecessor_type & )    = 0;\n    virtual void   internal_delete_built_predecessor( predecessor_type & ) = 0;\n    virtual void   copy_predecessors( std::vector<predecessor_type *> & )  = 0;\n    virtual size_t predecessor_count()                                     = 0;\n#endif\n\nprotected:\n    //! put receiver back in initial state\n    template<typename U> friend class limiter_node;\n    virtual void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol ) ) = 0;\n\n    template<typename TT, typename M>\n        friend class internal::successor_cache;\n    virtual bool is_continue_receiver() { return false; }\n};\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n//* holder of edges both for caches and for those nodes which do not have predecessor caches.\n// C == receiver< ... > or sender< ... >, depending.\ntemplate<typename C>\nclass edge_container {\n\npublic:\n    typedef std::vector<C *> edge_vector;\n\n    void add_edge( C &s) {\n        built_edges.push_back( &s );\n    }\n\n    void delete_edge( C &s) {\n        for ( typename edge_vector::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) {\n            if ( *i == &s )  {\n                (void)built_edges.erase(i);\n                return;  // only remove one predecessor per request\n            }\n        }\n    }\n\n    void copy_edges( edge_vector &v) {\n        v = built_edges;\n    }\n\n    size_t edge_count() {\n        return (size_t)(built_edges.size());\n    }\n\n    void clear() {\n        built_edges.clear();\n    }\n\n    template< typename S > void sender_extract( S &s ); \n    template< typename R > void receiver_extract( R &r ); \n    \nprivate: \n    edge_vector built_edges;\n};\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n//! Base class for receivers of completion messages\n/** These receivers automatically reset, but cannot be explicitly waited on */\nclass continue_receiver : public receiver< continue_msg > {\npublic:\n\n    //! The input type\n    typedef continue_msg input_type;\n\n    //! The predecessor type for this node\n    typedef sender< continue_msg > predecessor_type;\n\n    //! Constructor\n    continue_receiver( int number_of_predecessors = 0 ) {\n        my_predecessor_count = my_initial_predecessor_count = number_of_predecessors;\n        my_current_count = 0;\n    }\n\n    //! Copy constructor\n    continue_receiver( const continue_receiver& src ) : receiver<continue_msg>() {\n        my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count;\n        my_current_count = 0;\n    }\n\n    //! Destructor\n    virtual ~continue_receiver() { }\n\n    //! Increments the trigger threshold\n    /* override */ bool register_predecessor( predecessor_type & ) {\n        spin_mutex::scoped_lock l(my_mutex);\n        ++my_predecessor_count;\n        return true;\n    }\n\n    //! Decrements the trigger threshold\n    /** Does not check to see if the removal of the predecessor now makes the current count\n        exceed the new threshold.  So removing a predecessor while the graph is active can cause\n        unexpected results. */\n    /* override */ bool remove_predecessor( predecessor_type & ) {\n        spin_mutex::scoped_lock l(my_mutex);\n        --my_predecessor_count;\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n\n    /*override*/ void internal_add_built_predecessor( predecessor_type &s) {\n        spin_mutex::scoped_lock l(my_mutex);\n        my_built_predecessors.add_edge( s );\n    }\n\n    /*override*/ void internal_delete_built_predecessor( predecessor_type &s) {\n        spin_mutex::scoped_lock l(my_mutex);\n        my_built_predecessors.delete_edge(s);\n    }\n\n    /*override*/ void copy_predecessors( predecessor_vector_type &v) {\n        spin_mutex::scoped_lock l(my_mutex);\n        my_built_predecessors.copy_edges(v);\n    }\n\n    /*override*/ size_t predecessor_count() {\n        spin_mutex::scoped_lock l(my_mutex);\n        return my_built_predecessors.edge_count();\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n    \nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    // execute body is supposed to be too small to create a task for.\n    /* override */ task *try_put_task( const input_type & ) {\n        {\n            spin_mutex::scoped_lock l(my_mutex);\n            if ( ++my_current_count < my_predecessor_count )\n                return SUCCESSFULLY_ENQUEUED;\n            else\n                my_current_count = 0;\n        }\n        task * res = execute();\n        if(!res) return SUCCESSFULLY_ENQUEUED;\n        return res;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    edge_container<predecessor_type> my_built_predecessors;\n#endif\n    spin_mutex my_mutex;\n    int my_predecessor_count;\n    int my_current_count;\n    int my_initial_predecessor_count;\n    // the friend declaration in the base class did not eliminate the \"protected class\"\n    // error in gcc 4.1.2\n    template<typename U> friend class limiter_node;\n    /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f) )\n    {\n        my_current_count = 0;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        if(f & rf_extract) {\n            my_built_predecessors.receiver_extract(*this);\n            my_predecessor_count = my_initial_predecessor_count;\n        }\n#endif\n    }\n\n    //! Does whatever should happen when the threshold is reached\n    /** This should be very fast or else spawn a task.  This is\n        called while the sender is blocked in the try_put(). */\n    virtual task * execute() = 0;\n    template<typename TT, typename M>\n        friend class internal::successor_cache;\n    /*override*/ bool is_continue_receiver() { return true; }\n};\n}  // interface7\n}  // flow\n}  // tbb\n\n#include \"internal/_flow_graph_trace_impl.h\"\n\nnamespace tbb {\nnamespace flow {\nnamespace interface7 {\n\n#include \"internal/_flow_graph_types_impl.h\"\n#include \"internal/_flow_graph_impl.h\"\nusing namespace internal::graph_policy_namespace;\n\nclass graph;\nclass graph_node;\n\ntemplate <typename GraphContainerType, typename GraphNodeType>\nclass graph_iterator {\n    friend class graph;\n    friend class graph_node;\npublic:\n    typedef size_t size_type;\n    typedef GraphNodeType value_type;\n    typedef GraphNodeType* pointer;\n    typedef GraphNodeType& reference;\n    typedef const GraphNodeType& const_reference;\n    typedef std::forward_iterator_tag iterator_category;\n\n    //! Default constructor\n    graph_iterator() : my_graph(NULL), current_node(NULL) {}\n\n    //! Copy constructor\n    graph_iterator(const graph_iterator& other) :\n        my_graph(other.my_graph), current_node(other.current_node)\n    {}\n\n    //! Assignment\n    graph_iterator& operator=(const graph_iterator& other) {\n        if (this != &other) {\n            my_graph = other.my_graph;\n            current_node = other.current_node;\n        }\n        return *this;\n    }\n\n    //! Dereference\n    reference operator*() const;\n\n    //! Dereference\n    pointer operator->() const;\n\n    //! Equality\n    bool operator==(const graph_iterator& other) const {\n        return ((my_graph == other.my_graph) && (current_node == other.current_node));\n    }\n\n    //! Inequality\n    bool operator!=(const graph_iterator& other) const { return !(operator==(other)); }\n\n    //! Pre-increment\n    graph_iterator& operator++() {\n        internal_forward();\n        return *this;\n    }\n\n    //! Post-increment\n    graph_iterator operator++(int) {\n        graph_iterator result = *this;\n        operator++();\n        return result;\n    }\n\nprivate:\n    // the graph over which we are iterating\n    GraphContainerType *my_graph;\n    // pointer into my_graph's my_nodes list\n    pointer current_node;\n\n    //! Private initializing constructor for begin() and end() iterators\n    graph_iterator(GraphContainerType *g, bool begin);\n    void internal_forward();\n};\n\n//! The graph class\n/** This class serves as a handle to the graph */\nclass graph : tbb::internal::no_copy {\n    friend class graph_node;\n\n    template< typename Body >\n    class run_task : public task {\n    public:\n        run_task( Body& body ) : my_body(body) {}\n        task *execute() {\n            my_body();\n            return NULL;\n        }\n    private:\n        Body my_body;\n    };\n\n    template< typename Receiver, typename Body >\n    class run_and_put_task : public task {\n    public:\n        run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {}\n        task *execute() {\n            task *res = my_receiver.try_put_task( my_body() );\n            if(res == SUCCESSFULLY_ENQUEUED) res = NULL;\n            return res;\n        }\n    private:\n        Receiver &my_receiver;\n        Body my_body;\n    };\n\npublic:\n    //! Constructs a graph with isolated task_group_context\n    explicit graph() : my_nodes(NULL), my_nodes_last(NULL)\n    {\n        own_context = true;\n        cancelled = false;\n        caught_exception = false;\n        my_context = new task_group_context();\n        my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );\n        my_root_task->set_ref_count(1);\n        tbb::internal::fgt_graph( this );\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_is_active = true;\n#endif\n    }\n\n    //! Constructs a graph with use_this_context as context\n    explicit graph(task_group_context& use_this_context) :\n    my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL)\n    {\n        own_context = false;\n        my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );\n        my_root_task->set_ref_count(1);\n        tbb::internal::fgt_graph( this );\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_is_active = true;\n#endif\n    }\n\n    //! Destroys the graph.\n    /** Calls wait_for_all, then destroys the root task and context. */\n    ~graph() {\n        wait_for_all();\n        my_root_task->set_ref_count(0);\n        task::destroy( *my_root_task );\n        if (own_context) delete my_context;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    void set_name( const char *name ) {\n        tbb::internal::fgt_graph_desc( this, name );\n    }\n#endif\n\n    //! Used to register that an external entity may still interact with the graph.\n    /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls\n        is made. */\n    void increment_wait_count() {\n        if (my_root_task)\n            my_root_task->increment_ref_count();\n    }\n\n    //! Deregisters an external entity that may have interacted with the graph.\n    /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls\n        matches the number of increment_wait_count calls. */\n    void decrement_wait_count() {\n        if (my_root_task)\n            my_root_task->decrement_ref_count();\n    }\n\n    //! Spawns a task that runs a body and puts its output to a specific receiver\n    /** The task is spawned as a child of the graph. This is useful for running tasks\n        that need to block a wait_for_all() on the graph.  For example a one-off source. */\n    template< typename Receiver, typename Body >\n        void run( Receiver &r, Body body ) {\n       FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_task ) )\n                   run_and_put_task< Receiver, Body >( r, body )) );\n    }\n\n    //! Spawns a task that runs a function object\n    /** The task is spawned as a child of the graph. This is useful for running tasks\n        that need to block a wait_for_all() on the graph. For example a one-off source. */\n    template< typename Body >\n    void run( Body body ) {\n       FLOW_SPAWN( * new ( task::allocate_additional_child_of( *my_root_task ) ) run_task< Body >( body ) );\n    }\n\n    //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls.\n    /** The waiting thread will go off and steal work while it is block in the wait_for_all. */\n    void wait_for_all() {\n        cancelled = false;\n        caught_exception = false;\n        if (my_root_task) {\n#if TBB_USE_EXCEPTIONS\n            try {\n#endif\n                my_root_task->wait_for_all();\n                cancelled = my_context->is_group_execution_cancelled();\n#if TBB_USE_EXCEPTIONS\n            }\n            catch(...) {\n                my_root_task->set_ref_count(1);\n                my_context->reset();\n                caught_exception = true;\n                cancelled = true;\n                throw;\n            }\n#endif\n            my_context->reset();  // consistent with behavior in catch()\n            my_root_task->set_ref_count(1);\n        }\n    }\n\n    //! Returns the root task of the graph\n    task * root_task() {\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        if (!my_is_active) \n            return NULL;\n        else\n#endif\n            return my_root_task;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    void set_active(bool a = true) {\n       my_is_active = a;\n    }\n\n    bool is_active() {\n       return my_is_active;\n    }\n#endif\n\n    // ITERATORS\n    template<typename C, typename N>\n    friend class graph_iterator;\n\n    // Graph iterator typedefs\n    typedef graph_iterator<graph,graph_node> iterator;\n    typedef graph_iterator<const graph,const graph_node> const_iterator;\n\n    // Graph iterator constructors\n    //! start iterator\n    iterator begin() { return iterator(this, true); }\n    //! end iterator\n    iterator end() { return iterator(this, false); }\n     //! start const iterator\n    const_iterator begin() const { return const_iterator(this, true); }\n    //! end const iterator\n    const_iterator end() const { return const_iterator(this, false); }\n    //! start const iterator\n    const_iterator cbegin() const { return const_iterator(this, true); }\n    //! end const iterator\n    const_iterator cend() const { return const_iterator(this, false); }\n\n    //! return status of graph execution\n    bool is_cancelled() { return cancelled; }\n    bool exception_thrown() { return caught_exception; }\n\n    // thread-unsafe state reset.\n    void reset(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol));\n\nprivate:\n    task *my_root_task;\n    task_group_context *my_context;\n    bool own_context;\n    bool cancelled;\n    bool caught_exception;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    bool my_is_active;\n#endif\n\n\n    graph_node *my_nodes, *my_nodes_last;\n\n    spin_mutex nodelist_mutex;\n    void register_node(graph_node *n);\n    void remove_node(graph_node *n);\n\n};  // class graph\n\ntemplate <typename C, typename N>\ngraph_iterator<C,N>::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL)\n{\n    if (begin) current_node = my_graph->my_nodes;\n    //else it is an end iterator by default\n}\n\ntemplate <typename C, typename N>\ntypename graph_iterator<C,N>::reference graph_iterator<C,N>::operator*() const {\n    __TBB_ASSERT(current_node, \"graph_iterator at end\");\n    return *operator->();\n}\n\ntemplate <typename C, typename N>\ntypename graph_iterator<C,N>::pointer graph_iterator<C,N>::operator->() const {\n    return current_node;\n}\n\n\ntemplate <typename C, typename N>\nvoid graph_iterator<C,N>::internal_forward() {\n    if (current_node) current_node = current_node->next;\n}\n\n//! The base of all graph nodes.\nclass graph_node : tbb::internal::no_assign {\n    friend class graph;\n    template<typename C, typename N>\n    friend class graph_iterator;\nprotected:\n    graph& my_graph;\n    graph_node *next, *prev;\npublic:\n    graph_node(graph& g) : my_graph(g) {\n        my_graph.register_node(this);\n    }\n    virtual ~graph_node() {\n        my_graph.remove_node(this);\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    virtual void set_name( const char *name ) = 0;\n#endif\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    virtual void extract( reset_flags f=rf_extract ) {\n        bool a = my_graph.is_active();\n        my_graph.set_active(false);\n        reset((reset_flags)(f|rf_extract));\n        my_graph.set_active(a);\n    }\n#endif\n\nprotected:\n    virtual void reset(__TBB_PFG_RESET_ARG(reset_flags f=rf_reset_protocol)) = 0;\n};\n\ninline void graph::register_node(graph_node *n) {\n    n->next = NULL;\n    {\n        spin_mutex::scoped_lock lock(nodelist_mutex);\n        n->prev = my_nodes_last;\n        if (my_nodes_last) my_nodes_last->next = n;\n        my_nodes_last = n;\n        if (!my_nodes) my_nodes = n;\n    }\n}\n\ninline void graph::remove_node(graph_node *n) {\n    {\n        spin_mutex::scoped_lock lock(nodelist_mutex);\n        __TBB_ASSERT(my_nodes && my_nodes_last, \"graph::remove_node: Error: no registered nodes\");\n        if (n->prev) n->prev->next = n->next;\n        if (n->next) n->next->prev = n->prev;\n        if (my_nodes_last == n) my_nodes_last = n->prev;\n        if (my_nodes == n) my_nodes = n->next;\n    }\n    n->prev = n->next = NULL;\n}\n\ninline void graph::reset( __TBB_PFG_RESET_ARG( reset_flags f )) {\n    // reset context\n    task *saved_my_root_task = my_root_task;\n    my_root_task = NULL;\n    if(my_context) my_context->reset();\n    cancelled = false;\n    caught_exception = false;\n    // reset all the nodes comprising the graph\n    for(iterator ii = begin(); ii != end(); ++ii) {\n        graph_node *my_p = &(*ii);\n        my_p->reset(__TBB_PFG_RESET_ARG(f));\n    }\n    my_root_task = saved_my_root_task;\n}\n\n\n#include \"internal/_flow_graph_node_impl.h\"\n\n//! An executable node that acts as a source, i.e. it has no predecessors\ntemplate < typename Output >\nclass source_node : public graph_node, public sender< Output > {\nprotected:\n    using graph_node::my_graph;\npublic:\n    //! The type of the output message, which is complete\n    typedef Output output_type;\n\n    //! The type of successors of this node\n    typedef receiver< Output > successor_type;\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    //! Constructor for a node with a successor\n    template< typename Body >\n    source_node( graph &g, Body body, bool is_active = true )\n        : graph_node(g), my_active(is_active), init_my_active(is_active),\n        my_body( new internal::source_body_leaf< output_type, Body>(body) ),\n        my_reserved(false), my_has_cached_item(false)\n    {\n        my_successors.set_owner(this);\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph,\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n    //! Copy constructor\n    source_node( const source_node& src ) :\n        graph_node(src.my_graph), sender<Output>(),\n        my_active(src.init_my_active),\n        init_my_active(src.init_my_active), my_body( src.my_body->clone() ),\n        my_reserved(false), my_has_cached_item(false)\n    {\n        my_successors.set_owner(this);\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph,\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n    //! The destructor\n    ~source_node() { delete my_body; }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    //! Add a new successor to this node\n    /* override */ bool register_successor( successor_type &r ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_successors.register_successor(r);\n        if ( my_active )\n            spawn_put();\n        return true;\n    }\n\n    //! Removes a successor from this node\n    /* override */ bool remove_successor( successor_type &r ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_successors.remove_successor(r);\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    /*override*/void internal_add_built_successor( successor_type &r) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_successors.internal_add_built_successor(r);\n    }\n\n    /*override*/void internal_delete_built_successor( successor_type &r) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_successors.internal_delete_built_successor(r);\n    }\n\n    /*override*/size_t successor_count() {\n        spin_mutex::scoped_lock lock(my_mutex);\n        return my_successors.successor_count();\n    }\n\n    /*override*/void copy_successors(successor_vector_type &v) {\n        spin_mutex::scoped_lock l(my_mutex);\n        my_successors.copy_successors(v);\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    //! Request an item from the node\n    /*override */ bool try_get( output_type &v ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        if ( my_reserved )\n            return false;\n\n        if ( my_has_cached_item ) {\n            v = my_cached_item;\n            my_has_cached_item = false;\n            return true;\n        }\n        // we've been asked to provide an item, but we have none.  enqueue a task to\n        // provide one.\n        spawn_put();\n        return false;\n    }\n\n    //! Reserves an item.\n    /* override */ bool try_reserve( output_type &v ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        if ( my_reserved ) {\n            return false;\n        }\n\n        if ( my_has_cached_item ) {\n            v = my_cached_item;\n            my_reserved = true;\n            return true;\n        } else {\n            return false;\n        }\n    }\n\n    //! Release a reserved item.\n    /** true = item has been released and so remains in sender, dest must request or reserve future items */\n    /* override */ bool try_release( ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        __TBB_ASSERT( my_reserved && my_has_cached_item, \"releasing non-existent reservation\" );\n        my_reserved = false;\n        if(!my_successors.empty())\n            spawn_put();\n        return true;\n    }\n\n    //! Consumes a reserved item\n    /* override */ bool try_consume( ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        __TBB_ASSERT( my_reserved && my_has_cached_item, \"consuming non-existent reservation\" );\n        my_reserved = false;\n        my_has_cached_item = false;\n        if ( !my_successors.empty() ) {\n            spawn_put();\n        }\n        return true;\n    }\n\n    //! Activates a node that was created in the inactive state\n    void activate() {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_active = true;\n        if ( !my_successors.empty() )\n            spawn_put();\n    }\n\n    template<typename Body>\n    Body copy_function_object() {\n        internal::source_body<output_type> &body_ref = *this->my_body;\n        return dynamic_cast< internal::source_body_leaf<output_type, Body> & >(body_ref).get_body();\n    }\n\nprotected:\n\n    //! resets the source_node to its initial state\n    void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        my_active = init_my_active;\n        my_reserved =false;\n        if(my_has_cached_item) {\n            my_has_cached_item = false;\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_successors.reset(f);\n        if(f & rf_reset_bodies) my_body->reset_body();\n#endif\n    }\n\nprivate:\n    spin_mutex my_mutex;\n    bool my_active;\n    bool init_my_active;\n    internal::source_body<output_type> *my_body;\n    internal::broadcast_cache< output_type > my_successors;\n    bool my_reserved;\n    bool my_has_cached_item;\n    output_type my_cached_item;\n\n    // used by apply_body, can invoke body of node.\n    bool try_reserve_apply_body(output_type &v) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        if ( my_reserved ) {\n            return false;\n        }\n        if ( !my_has_cached_item ) {\n            tbb::internal::fgt_begin_body( my_body );\n            bool r = (*my_body)(my_cached_item);\n            tbb::internal::fgt_end_body( my_body );\n            if (r) {\n                my_has_cached_item = true;\n            }\n        }\n        if ( my_has_cached_item ) {\n            v = my_cached_item;\n            my_reserved = true;\n            return true;\n        } else {\n            return false;\n        }\n    }\n\n    //! Spawns a task that applies the body\n    /* override */ void spawn_put( ) {\n        task* tp = this->my_graph.root_task();\n        if(tp) {\n            FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )\n                        internal:: source_task_bypass < source_node< output_type > >( *this ) ) );\n        }\n    }\n\n    friend class internal::source_task_bypass< source_node< output_type > >;\n    //! Applies the body.  Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it.\n    /* override */ task * apply_body_bypass( ) {\n        output_type v;\n        if ( !try_reserve_apply_body(v) )\n            return NULL;\n\n        task *last_task = my_successors.try_put_task(v);\n        if ( last_task )\n            try_consume();\n        else\n            try_release();\n        return last_task;\n    }\n};  // source_node\n\n//! Implements a function node that supports Input -> Output\ntemplate < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >\nclass function_node : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef Input input_type;\n    typedef Output output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n    typedef internal::function_input<input_type,output_type,Allocator> fInput_type;\n    typedef internal::function_output<output_type> fOutput_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEAURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    //! Constructor\n    template< typename Body >\n    function_node( graph &g, size_t concurrency, Body body ) :\n        graph_node(g), internal::function_input<input_type,output_type,Allocator>(g, concurrency, body) {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n    //! Copy constructor\n    function_node( const function_node& src ) :\n        graph_node(src.my_graph), internal::function_input<input_type,output_type,Allocator>( src ),\n        fOutput_type() {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    using fInput_type::try_put_task;\n\n    // override of graph_node's reset.\n    /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {\n        fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        successors().reset(f);\n        __TBB_ASSERT(!(f & rf_extract) || successors().empty(), \"function_node successors not empty\");\n        __TBB_ASSERT(this->my_predecessors.empty(), \"function_node predecessors not empty\");\n#endif\n    }\n\n    /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }\n};\n\n//! Implements a function node that supports Input -> Output\ntemplate < typename Input, typename Output, typename Allocator >\nclass function_node<Input,Output,queueing,Allocator> : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef Input input_type;\n    typedef Output output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n    typedef internal::function_input<input_type,output_type,Allocator> fInput_type;\n    typedef internal::function_input_queue<input_type, Allocator> queue_type;\n    typedef internal::function_output<output_type> fOutput_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    //! Constructor\n    template< typename Body >\n    function_node( graph &g, size_t concurrency, Body body ) :\n        graph_node(g), fInput_type( g, concurrency, body, new queue_type() ) {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n    //! Copy constructor\n    function_node( const function_node& src ) :\n        graph_node(src.graph_node::my_graph), fInput_type( src, new queue_type() ), fOutput_type() {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    using fInput_type::try_put_task;\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        successors().reset(f);\n        __TBB_ASSERT(!(f & rf_extract) || successors().empty(), \"function_node successors not empty\");\n        __TBB_ASSERT(!(f & rf_extract) || this->my_predecessors.empty(), \"function_node predecessors not empty\");\n#endif\n\n    }\n\n    /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }\n};\n\n//! implements a function node that supports Input -> (set of outputs)\n// Output is a tuple of output types.\ntemplate < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >\nclass multifunction_node :\n    public graph_node,\n    public internal::multifunction_input\n    <\n        Input,\n        typename internal::wrap_tuple_elements<\n            tbb::flow::tuple_size<Output>::value,  // #elements in tuple\n            internal::multifunction_output,  // wrap this around each element\n            Output // the tuple providing the types\n        >::type,\n        Allocator\n    > {\nprotected:\n    using graph_node::my_graph;\nprivate:\n    static const int N = tbb::flow::tuple_size<Output>::value;\npublic:\n    typedef Input input_type;\n    typedef typename internal::wrap_tuple_elements<N,internal::multifunction_output, Output>::type output_ports_type;\nprivate:\n    typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;\n    typedef typename internal::function_input_queue<input_type,Allocator> queue_type;\npublic:\n    template<typename Body>\n    multifunction_node( graph &g, size_t concurrency, Body body ) :\n        graph_node(g), base_type(g,concurrency, body) {\n        tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,\n                                                                 &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                                                 this->output_ports(), this->my_body );\n    }\n\n    multifunction_node( const multifunction_node &other) :\n        graph_node(other.graph_node::my_graph), base_type(other) {\n        tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,\n                                                                 &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                                                 this->output_ports(), this->my_body );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_multioutput_node_desc( this, name );\n    }\n#endif\n\n    // all the guts are in multifunction_input...\nprotected:\n    /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); }\n};  // multifunction_node\n\ntemplate < typename Input, typename Output, typename Allocator >\nclass multifunction_node<Input,Output,queueing,Allocator> : public graph_node, public internal::multifunction_input<Input,\n    typename internal::wrap_tuple_elements<tbb::flow::tuple_size<Output>::value, internal::multifunction_output, Output>::type, Allocator> {\nprotected:\n    using graph_node::my_graph;\n    static const int N = tbb::flow::tuple_size<Output>::value;\npublic:\n    typedef Input input_type;\n    typedef typename internal::wrap_tuple_elements<N, internal::multifunction_output, Output>::type output_ports_type;\nprivate:\n    typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;\n    typedef typename internal::function_input_queue<input_type,Allocator> queue_type;\npublic:\n    template<typename Body>\n    multifunction_node( graph &g, size_t concurrency, Body body) :\n        graph_node(g), base_type(g,concurrency, body, new queue_type()) {\n        tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,\n                                                                 &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                                                 this->output_ports(), this->my_body );\n    }\n\n    multifunction_node( const multifunction_node &other) :\n        graph_node(other.graph_node::my_graph), base_type(other, new queue_type()) {\n        tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,\n                                                                 &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),\n                                                                 this->output_ports(), this->my_body );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_multioutput_node_desc( this, name );\n    }\n#endif\n\n    // all the guts are in multifunction_input...\nprotected:\n    /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); }\n};  // multifunction_node\n\n//! split_node: accepts a tuple as input, forwards each element of the tuple to its\n//  successors.  The node has unlimited concurrency, so though it is marked as\n//  \"rejecting\" it does not reject inputs.\ntemplate<typename TupleType, typename Allocator=cache_aligned_allocator<TupleType> >\nclass split_node : public multifunction_node<TupleType, TupleType, rejecting, Allocator> {\n    static const int N = tbb::flow::tuple_size<TupleType>::value;\n    typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> base_type;\npublic:\n    typedef typename base_type::output_ports_type output_ports_type;\nprivate:\n    struct splitting_body {\n        void operator()(const TupleType& t, output_ports_type &p) {\n            internal::emit_element<N>::emit_this(t, p);\n        }\n    };\npublic:\n    typedef TupleType input_type;\n    typedef Allocator allocator_type;\n    split_node(graph &g) : base_type(g, unlimited, splitting_body()) {\n        tbb::internal::fgt_multioutput_node<TupleType,N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,\n                                                          static_cast<receiver<input_type> *>(this), this->output_ports() );\n    }\n\n    split_node( const split_node & other) : base_type(other) {\n        tbb::internal::fgt_multioutput_node<TupleType,N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,\n                                                          static_cast<receiver<input_type> *>(this), this->output_ports() );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_multioutput_node_desc( this, name );\n    }\n#endif\n\n};\n\n//! Implements an executable node that supports continue_msg -> Output\ntemplate <typename Output>\nclass continue_node : public graph_node, public internal::continue_input<Output>, public internal::function_output<Output> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef continue_msg input_type;\n    typedef Output output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n    typedef internal::continue_input<Output> fInput_type;\n    typedef internal::function_output<output_type> fOutput_type;\n\n    //! Constructor for executable node with continue_msg -> Output\n    template <typename Body >\n    continue_node( graph &g, Body body ) :\n        graph_node(g), internal::continue_input<output_type>( g, body ) {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph,\n                                           static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n\n    //! Constructor for executable node with continue_msg -> Output\n    template <typename Body >\n    continue_node( graph &g, int number_of_predecessors, Body body ) :\n        graph_node(g), internal::continue_input<output_type>( g, number_of_predecessors, body ) {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph,\n                                           static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n    //! Copy constructor\n    continue_node( const continue_node& src ) :\n        graph_node(src.graph_node::my_graph), internal::continue_input<output_type>(src),\n        internal::function_output<Output>() {\n        tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph,\n                                           static_cast<receiver<input_type> *>(this),\n                                           static_cast<sender<output_type> *>(this), this->my_body );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    using fInput_type::try_put_task;\n\n    /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {\n        fInput_type::reset_receiver(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        successors().reset(f);\n        __TBB_ASSERT(!(f & rf_extract) || successors().empty(), \"continue_node not reset\");\n#endif\n    }\n\n    /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }\n};  // continue_node\n\ntemplate< typename T >\nclass overwrite_node : public graph_node, public receiver<T>, public sender<T> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) {\n        my_successors.set_owner( this );\n        tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n    // Copy constructor; doesn't take anything from src; default won't work\n    overwrite_node( const overwrite_node& src ) :\n        graph_node(src.my_graph), receiver<T>(), sender<T>(), my_buffer_is_valid(false)\n    {\n        my_successors.set_owner( this );\n        tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n    ~overwrite_node() {}\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    /* override */ bool register_successor( successor_type &s ) {\n        spin_mutex::scoped_lock l( my_mutex );\n        task* tp = this->my_graph.root_task();  // just to test if we are resetting\n        if (my_buffer_is_valid && tp) {\n            // We have a valid value that must be forwarded immediately.\n            if ( s.try_put( my_buffer ) || !s.register_predecessor( *this  ) ) {\n                // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor\n                my_successors.register_successor( s );\n            } else {\n                // We don't add the successor: it rejected our put and we became its predecessor instead\n                return false;\n            }\n        } else {\n            // No valid value yet, just add as successor\n            my_successors.register_successor( s );\n        }\n        return true;\n    }\n\n    /* override */ bool remove_successor( successor_type &s ) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_successors.remove_successor(s);\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    /*override*/void internal_add_built_successor( successor_type &s) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_successors.internal_add_built_successor(s);\n    }\n\n    /*override*/void internal_delete_built_successor( successor_type &s) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_successors.internal_delete_built_successor(s);\n    }\n\n    /*override*/size_t successor_count() {\n        spin_mutex::scoped_lock l( my_mutex );\n        return my_successors.successor_count();\n    }\n\n    /*override*/ void copy_successors(successor_vector_type &v) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_successors.copy_successors(v);\n    }\n\n    /*override*/ void internal_add_built_predecessor( predecessor_type &p) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_built_predecessors.add_edge(p);\n    }\n\n    /*override*/ void internal_delete_built_predecessor( predecessor_type &p) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_built_predecessors.delete_edge(p);\n    }\n\n    /*override*/size_t predecessor_count() {\n        spin_mutex::scoped_lock l( my_mutex );\n        return my_built_predecessors.edge_count();\n    }\n\n    /*override*/void copy_predecessors(predecessor_vector_type &v) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_built_predecessors.copy_edges(v);\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    /* override */ bool try_get( input_type &v ) {\n        spin_mutex::scoped_lock l( my_mutex );\n        if ( my_buffer_is_valid ) {\n            v = my_buffer;\n            return true;\n        }\n        return false;\n    }\n\n    bool is_valid() {\n       spin_mutex::scoped_lock l( my_mutex );\n       return my_buffer_is_valid;\n    }\n\n    void clear() {\n       spin_mutex::scoped_lock l( my_mutex );\n       my_buffer_is_valid = false;\n    }\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    /* override */ task * try_put_task( const input_type &v ) {\n        spin_mutex::scoped_lock l( my_mutex );\n        my_buffer = v;\n        my_buffer_is_valid = true;\n        task * rtask = my_successors.try_put_task(v);\n        if(!rtask) rtask = SUCCESSFULLY_ENQUEUED;\n        return rtask;\n    }\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        my_buffer_is_valid = false;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_successors.reset(f);\n       if (f&rf_extract) {\n           my_built_predecessors.receiver_extract(*this);\n       }\n#endif\n    }\n\n    spin_mutex my_mutex;\n    internal::broadcast_cache< input_type, null_rw_mutex > my_successors;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    edge_container<sender<input_type> > my_built_predecessors;\n#endif\n    input_type my_buffer;\n    bool my_buffer_is_valid;\n    /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {}\n};  // overwrite_node\n\ntemplate< typename T >\nclass write_once_node : public overwrite_node<T> {\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n\n    //! Constructor\n    write_once_node(graph& g) : overwrite_node<T>(g) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor: call base class copy constructor\n    write_once_node( const write_once_node& src ) : overwrite_node<T>(src) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    /* override */ task *try_put_task( const T &v ) {\n        spin_mutex::scoped_lock l( this->my_mutex );\n        if ( this->my_buffer_is_valid ) {\n            return NULL;\n        } else {\n            this->my_buffer = v;\n            this->my_buffer_is_valid = true;\n            task *res = this->my_successors.try_put_task(v);\n            if(!res) res = SUCCESSFULLY_ENQUEUED;\n            return res;\n        }\n    }\n};\n\n//! Forwards messages of type T to all successors\ntemplate <typename T>\nclass broadcast_node : public graph_node, public receiver<T>, public sender<T> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\nprivate:\n    internal::broadcast_cache<input_type> my_successors;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    edge_container<predecessor_type> my_built_predecessors;\n    spin_mutex pred_mutex;\n#endif\npublic:\n\n    broadcast_node(graph& g) : graph_node(g) {\n        my_successors.set_owner( this );\n        tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n    // Copy constructor\n    broadcast_node( const broadcast_node& src ) :\n        graph_node(src.my_graph), receiver<T>(), sender<T>()\n    {\n        my_successors.set_owner( this );\n        tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    //! Adds a successor\n    virtual bool register_successor( receiver<T> &r ) {\n        my_successors.register_successor( r );\n        return true;\n    }\n\n    //! Removes s as a successor\n    virtual bool remove_successor( receiver<T> &r ) {\n        my_successors.remove_successor( r );\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    /*override*/ void internal_add_built_successor(successor_type &r) {\n        my_successors.internal_add_built_successor(r);\n    }\n\n    /*override*/ void internal_delete_built_successor(successor_type &r) {\n        my_successors.internal_delete_built_successor(r);\n    }\n\n    /*override*/ size_t successor_count() {\n        return my_successors.successor_count();\n    }\n\n    /*override*/ void copy_successors(successor_vector_type &v) {\n        my_successors.copy_successors(v);\n    }\n\n    /*override*/ void internal_add_built_predecessor( predecessor_type &p) {\n        my_built_predecessors.add_edge(p);\n    }\n\n    /*override*/ void internal_delete_built_predecessor( predecessor_type &p) {\n        my_built_predecessors.delete_edge(p);\n    }\n\n    /*override*/ size_t predecessor_count() {\n        return my_built_predecessors.edge_count();\n    }\n\n    /*override*/ void copy_predecessors(predecessor_vector_type &v) {\n        my_built_predecessors.copy_edges(v);\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\nprotected:\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    //! build a task to run the successor if possible.  Default is old behavior.\n    /*override*/ task *try_put_task(const T& t) {\n        task *new_task = my_successors.try_put_task(t);\n        if(!new_task) new_task = SUCCESSFULLY_ENQUEUED;\n        return new_task;\n    }\n\n    /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_successors.reset(f);\n        if (f&rf_extract) {\n           my_built_predecessors.receiver_extract(*this);\n        }\n        __TBB_ASSERT(!(f & rf_extract) || my_successors.empty(), \"Error resetting broadcast_node\");\n#endif\n    }\n    /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {}\n};  // broadcast_node\n\n//! Forwards messages in arbitrary order\ntemplate <typename T, typename A=cache_aligned_allocator<T> >\nclass buffer_node : public graph_node, public internal::reservable_item_buffer<T, A>, public receiver<T>, public sender<T> {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n    typedef buffer_node<T, A> my_class;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n    typedef std::vector<successor_type *> successor_vector_type;\n#endif\nprotected:\n    typedef size_t size_type;\n    internal::round_robin_cache< T, null_rw_mutex > my_successors;\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    edge_container<predecessor_type> my_built_predecessors;\n#endif\n\n    friend class internal::forward_task_bypass< buffer_node< T, A > >;\n\n    enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        , add_blt_succ, del_blt_succ,\n        add_blt_pred, del_blt_pred,\n        blt_succ_cnt, blt_pred_cnt,\n        blt_succ_cpy, blt_pred_cpy   // create vector copies of preds and succs\n#endif\n    };\n    enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n\n    // implements the aggregator_operation concept\n    class buffer_operation : public internal::aggregated_operation< buffer_operation > {\n    public:\n        char type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        task * ltask;\n        union {\n            input_type *elem;\n            successor_type *r;\n            predecessor_type *p;\n            size_t cnt_val;\n            successor_vector_type *svec;\n            predecessor_vector_type *pvec;\n        };\n#else\n        T *elem;\n        task * ltask;\n        successor_type *r;\n#endif\n        buffer_operation(const T& e, op_type t) : type(char(t))\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                                                  , ltask(NULL), elem(const_cast<T*>(&e))\n#else\n                                                  , elem(const_cast<T*>(&e)) , ltask(NULL)\n#endif\n        {}\n        buffer_operation(op_type t) : type(char(t)),  ltask(NULL) {}\n    };\n\n    bool forwarder_busy;\n    typedef internal::aggregating_functor<my_class, buffer_operation> my_handler;\n    friend class internal::aggregating_functor<my_class, buffer_operation>;\n    internal::aggregator< my_handler, buffer_operation> my_aggregator;\n\n    virtual void handle_operations(buffer_operation *op_list) {\n        buffer_operation *tmp = NULL;\n        bool try_forwarding=false;\n        while (op_list) {\n            tmp = op_list;\n            op_list = op_list->next;\n            switch (tmp->type) {\n            case reg_succ: internal_reg_succ(tmp);  try_forwarding = true; break;\n            case rem_succ: internal_rem_succ(tmp); break;\n            case req_item: internal_pop(tmp); break;\n            case res_item: internal_reserve(tmp); break;\n            case rel_res:  internal_release(tmp);  try_forwarding = true; break;\n            case con_res:  internal_consume(tmp);  try_forwarding = true; break;\n            case put_item: internal_push(tmp);  try_forwarding = (tmp->status == SUCCEEDED); break;\n            case try_fwd_task: internal_forward_task(tmp); break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            // edge recording\n            case add_blt_succ: internal_add_built_succ(tmp); break;\n            case del_blt_succ: internal_del_built_succ(tmp); break;\n            case add_blt_pred: internal_add_built_pred(tmp); break;\n            case del_blt_pred: internal_del_built_pred(tmp); break;\n            case blt_succ_cnt: internal_succ_cnt(tmp); break;\n            case blt_pred_cnt: internal_pred_cnt(tmp); break;\n            case blt_succ_cpy: internal_copy_succs(tmp); break;\n            case blt_pred_cpy: internal_copy_preds(tmp); break;\n#endif\n            }\n        }\n        if (try_forwarding && !forwarder_busy) {\n            task* tp = this->my_graph.root_task();\n            if(tp) {\n                forwarder_busy = true;\n                task *new_task = new(task::allocate_additional_child_of(*tp)) internal::\n                        forward_task_bypass\n                        < buffer_node<input_type, A> >(*this);\n                // tmp should point to the last item handled by the aggregator.  This is the operation\n                // the handling thread enqueued.  So modifying that record will be okay.\n                tbb::task *z = tmp->ltask;\n                tmp->ltask = combine_tasks(z, new_task);  // in case the op generated a task\n            }\n        }\n    }\n\n    inline task *grab_forwarding_task( buffer_operation &op_data) {\n        return op_data.ltask;\n    }\n\n    inline bool enqueue_forwarding_task(buffer_operation &op_data) {\n        task *ft = grab_forwarding_task(op_data);\n        if(ft) {\n            FLOW_SPAWN(*ft);\n            return true;\n        }\n        return false;\n    }\n\n    //! This is executed by an enqueued task, the \"forwarder\"\n    virtual task *forward_task() {\n        buffer_operation op_data(try_fwd_task);\n        task *last_task = NULL;\n        do {\n            op_data.status = WAIT;\n            op_data.ltask = NULL;\n            my_aggregator.execute(&op_data);\n            tbb::task *xtask = op_data.ltask;\n            last_task = combine_tasks(last_task, xtask);\n        } while (op_data.status == SUCCEEDED);\n        return last_task;\n    }\n\n    //! Register successor\n    virtual void internal_reg_succ(buffer_operation *op) {\n        my_successors.register_successor(*(op->r));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    //! Remove successor\n    virtual void internal_rem_succ(buffer_operation *op) {\n        my_successors.remove_successor(*(op->r));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    virtual void internal_add_built_succ(buffer_operation *op) {\n        my_successors.internal_add_built_successor(*(op->r));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_del_built_succ(buffer_operation *op) {\n        my_successors.internal_delete_built_successor(*(op->r));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_add_built_pred(buffer_operation *op) {\n        my_built_predecessors.add_edge(*(op->p));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_del_built_pred(buffer_operation *op) {\n        my_built_predecessors.delete_edge(*(op->p));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_succ_cnt(buffer_operation *op) {\n        op->cnt_val = my_successors.successor_count();\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_pred_cnt(buffer_operation *op) {\n        op->cnt_val = my_built_predecessors.edge_count();\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_copy_succs(buffer_operation *op) {\n        my_successors.copy_successors(*(op->svec));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_copy_preds(buffer_operation *op) {\n        my_built_predecessors.copy_edges(*(op->pvec));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    //! Tries to forward valid items to successors\n    virtual void internal_forward_task(buffer_operation *op) {\n        if (this->my_reserved || !this->my_item_valid(this->my_tail-1)) {\n            __TBB_store_with_release(op->status, FAILED);\n            this->forwarder_busy = false;\n            return;\n        }\n        T i_copy;\n        task * last_task = NULL;\n        size_type counter = my_successors.size();\n        // Try forwarding, giving each successor a chance\n        while (counter>0 && !this->buffer_empty() && this->my_item_valid(this->my_tail-1)) {\n            this->copy_back(i_copy);\n            task *new_task = my_successors.try_put_task(i_copy);\n            if(new_task) {\n                last_task = combine_tasks(last_task, new_task);\n                this->destroy_back();\n            }\n            --counter;\n        }\n        op->ltask = last_task;  // return task\n        if (last_task && !counter) {\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n        else {\n            __TBB_store_with_release(op->status, FAILED);\n            forwarder_busy = false;\n        }\n    }\n\n    virtual void internal_push(buffer_operation *op) {\n        this->push_back(*(op->elem));\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_pop(buffer_operation *op) {\n        if(this->pop_back(*(op->elem))) {\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n        else {\n            __TBB_store_with_release(op->status, FAILED);\n        }\n    }\n\n    virtual void internal_reserve(buffer_operation *op) {\n        if(this->reserve_front(*(op->elem))) {\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n        else {\n            __TBB_store_with_release(op->status, FAILED);\n        }\n    }\n\n    virtual void internal_consume(buffer_operation *op) {\n        this->consume_front();\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    virtual void internal_release(buffer_operation *op) {\n        this->release_front();\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\npublic:\n    //! Constructor\n    buffer_node( graph &g ) : graph_node(g), internal::reservable_item_buffer<T>(),\n        forwarder_busy(false) {\n        my_successors.set_owner(this);\n        my_aggregator.initialize_handler(my_handler(this));\n        tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor\n    buffer_node( const buffer_node& src ) : graph_node(src.my_graph),\n        internal::reservable_item_buffer<T>(), receiver<T>(), sender<T>() {\n        forwarder_busy = false;\n        my_successors.set_owner(this);\n        my_aggregator.initialize_handler(my_handler(this));\n        tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );\n    }\n\n    virtual ~buffer_node() {}\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    //\n    // message sender implementation\n    //\n\n    //! Adds a new successor.\n    /** Adds successor r to the list of successors; may forward tasks.  */\n    /* override */ bool register_successor( successor_type &r ) {\n        buffer_operation op_data(reg_succ);\n        op_data.r = &r;\n        my_aggregator.execute(&op_data);\n        (void)enqueue_forwarding_task(op_data);\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    /*override*/ void internal_add_built_successor( successor_type &r) {\n        buffer_operation op_data(add_blt_succ);\n        op_data.r = &r;\n        my_aggregator.execute(&op_data);\n    }\n\n    /*override*/ void internal_delete_built_successor( successor_type &r) {\n        buffer_operation op_data(del_blt_succ);\n        op_data.r = &r;\n        my_aggregator.execute(&op_data);\n    }\n\n    /*override*/ void internal_add_built_predecessor( predecessor_type &p) {\n        buffer_operation op_data(add_blt_pred);\n        op_data.p = &p;\n        my_aggregator.execute(&op_data);\n    }\n\n    /*override*/ void internal_delete_built_predecessor( predecessor_type &p) {\n        buffer_operation op_data(del_blt_pred);\n        op_data.p = &p;\n        my_aggregator.execute(&op_data);\n    }\n\n    /*override*/ size_t predecessor_count() {\n        buffer_operation op_data(blt_pred_cnt);\n        my_aggregator.execute(&op_data);\n        return op_data.cnt_val;\n    }\n\n    /*override*/ size_t successor_count() {\n        buffer_operation op_data(blt_succ_cnt);\n        my_aggregator.execute(&op_data);\n        return op_data.cnt_val;\n    }\n\n    /*override*/ void copy_predecessors( predecessor_vector_type &v ) {\n        buffer_operation op_data(blt_pred_cpy);\n        op_data.pvec = &v;\n        my_aggregator.execute(&op_data);\n    }\n\n    /*override*/ void copy_successors( successor_vector_type &v ) {\n        buffer_operation op_data(blt_succ_cpy);\n        op_data.svec = &v;\n        my_aggregator.execute(&op_data);\n    }\n#endif\n\n    //! Removes a successor.\n    /** Removes successor r from the list of successors.\n        It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */\n    /* override */ bool remove_successor( successor_type &r ) {\n        r.remove_predecessor(*this);\n        buffer_operation op_data(rem_succ);\n        op_data.r = &r;\n        my_aggregator.execute(&op_data);\n        // even though this operation does not cause a forward, if we are the handler, and\n        // a forward is scheduled, we may be the first to reach this point after the aggregator,\n        // and so should check for the task.\n        (void)enqueue_forwarding_task(op_data);\n        return true;\n    }\n\n    //! Request an item from the buffer_node\n    /**  true = v contains the returned item<BR>\n         false = no item has been returned */\n    /* override */ bool try_get( T &v ) {\n        buffer_operation op_data(req_item);\n        op_data.elem = &v;\n        my_aggregator.execute(&op_data);\n        (void)enqueue_forwarding_task(op_data);\n        return (op_data.status==SUCCEEDED);\n    }\n\n    //! Reserves an item.\n    /**  false = no item can be reserved<BR>\n         true = an item is reserved */\n    /* override */ bool try_reserve( T &v ) {\n        buffer_operation op_data(res_item);\n        op_data.elem = &v;\n        my_aggregator.execute(&op_data);\n        (void)enqueue_forwarding_task(op_data);\n        return (op_data.status==SUCCEEDED);\n    }\n\n    //! Release a reserved item.\n    /**  true = item has been released and so remains in sender */\n    /* override */ bool try_release() {\n        buffer_operation op_data(rel_res);\n        my_aggregator.execute(&op_data);\n        (void)enqueue_forwarding_task(op_data);\n        return true;\n    }\n\n    //! Consumes a reserved item.\n    /** true = item is removed from sender and reservation removed */\n    /* override */ bool try_consume() {\n        buffer_operation op_data(con_res);\n        my_aggregator.execute(&op_data);\n        (void)enqueue_forwarding_task(op_data);\n        return true;\n    }\n\nprotected:\n\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    //! receive an item, return a task *if possible\n    /* override */ task *try_put_task(const T &t) {\n        buffer_operation op_data(t, put_item);\n        my_aggregator.execute(&op_data);\n        task *ft = grab_forwarding_task(op_data);\n        // sequencer_nodes can return failure (if an item has been previously inserted)\n        // We have to spawn the returned task if our own operation fails.\n\n        if(ft && op_data.status == FAILED) {\n            // we haven't succeeded queueing the item, but for some reason the\n            // call returned a task (if another request resulted in a successful\n            // forward this could happen.)  Queue the task and reset the pointer.\n            FLOW_SPAWN(*ft); ft = NULL;\n        }\n        else if(!ft && op_data.status == SUCCEEDED) {\n            ft = SUCCESSFULLY_ENQUEUED;\n        }\n        return ft;\n    }\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        internal::reservable_item_buffer<T, A>::reset();\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_successors.reset(f);\n        if (f&rf_extract) {\n            my_built_predecessors.receiver_extract(*this);\n        }\n#endif\n        forwarder_busy = false;\n    }\n\n    /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { }\n\n};  // buffer_node\n\n//! Forwards messages in FIFO order\ntemplate <typename T, typename A=cache_aligned_allocator<T> >\nclass queue_node : public buffer_node<T, A> {\nprotected:\n    typedef buffer_node<T, A> base_type;\n    typedef typename base_type::size_type size_type;\n    typedef typename base_type::buffer_operation queue_operation;\n\n    enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n\n    /* override */ void internal_forward_task(queue_operation *op) {\n        if (this->my_reserved || !this->my_item_valid(this->my_head)) {\n            __TBB_store_with_release(op->status, FAILED);\n            this->forwarder_busy = false;\n            return;\n        }\n        T i_copy;\n        task *last_task = NULL;\n        size_type counter = this->my_successors.size();\n        // Keep trying to send items while there is at least one accepting successor\n        while (counter>0 && this->my_item_valid(this->my_head)) {\n            this->copy_front(i_copy);\n            task *new_task = this->my_successors.try_put_task(i_copy);\n            if(new_task) {\n                this->destroy_front();\n                last_task = combine_tasks(last_task, new_task);\n            }\n            --counter;\n        }\n        op->ltask = last_task;\n        if (last_task && !counter)\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        else {\n            __TBB_store_with_release(op->status, FAILED);\n            this->forwarder_busy = false;\n        }\n    }\n\n    /* override */ void internal_pop(queue_operation *op) {\n        if ( this->my_reserved || !this->my_item_valid(this->my_head)){\n            __TBB_store_with_release(op->status, FAILED);\n        }\n        else {\n            this->pop_front(*(op->elem));\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n    }\n    /* override */ void internal_reserve(queue_operation *op) {\n        if (this->my_reserved || !this->my_item_valid(this->my_head)) {\n            __TBB_store_with_release(op->status, FAILED);\n        }\n        else {\n            this->reserve_front(*(op->elem));\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n    }\n    /* override */ void internal_consume(queue_operation *op) {\n        this->consume_front();\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n\n    //! Constructor\n    queue_node( graph &g ) : base_type(g) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor\n    queue_node( const queue_node& src) : base_type(src) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        base_type::reset(__TBB_PFG_RESET_ARG(f));\n    }\n};  // queue_node\n\n//! Forwards messages in sequence order\ntemplate< typename T, typename A=cache_aligned_allocator<T> >\nclass sequencer_node : public queue_node<T, A> {\n    internal::function_body< T, size_t > *my_sequencer;\n    // my_sequencer should be a benign function and must be callable\n    // from a parallel context.  Does this mean it needn't be reset?\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n\n    //! Constructor\n    template< typename Sequencer >\n    sequencer_node( graph &g, const Sequencer& s ) : queue_node<T, A>(g),\n        my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor\n    sequencer_node( const sequencer_node& src ) : queue_node<T, A>(src),\n        my_sequencer( src.my_sequencer->clone() ) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Destructor\n    ~sequencer_node() { delete my_sequencer; }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\nprotected:\n    typedef typename buffer_node<T, A>::size_type size_type;\n    typedef typename buffer_node<T, A>::buffer_operation sequencer_operation;\n\n    enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n\nprivate:\n    /* override */ void internal_push(sequencer_operation *op) {\n        size_type tag = (*my_sequencer)(*(op->elem));\n#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES\n        if(tag < this->my_head) {\n            // have already emitted a message with this tag\n            __TBB_store_with_release(op->status, FAILED);\n            return;\n        }\n#endif\n        // cannot modify this->my_tail now; the buffer would be inconsistent.\n        size_t new_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail;\n\n        if(this->size(new_tail) > this->capacity()) {\n            this->grow_my_array(this->size(new_tail));\n        }\n        this->my_tail = new_tail;\n        if(this->place_item(tag,*(op->elem))) {\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        }\n        else {\n            // already have a message with this tag\n            __TBB_store_with_release(op->status, FAILED);\n        }\n    }\n};  // sequencer_node\n\n//! Forwards messages in priority order\ntemplate< typename T, typename Compare = std::less<T>, typename A=cache_aligned_allocator<T> >\nclass priority_queue_node : public buffer_node<T, A> {\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef buffer_node<T,A> base_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n\n    //! Constructor\n    priority_queue_node( graph &g ) : buffer_node<T, A>(g), mark(0) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor\n    priority_queue_node( const priority_queue_node &src ) : buffer_node<T, A>(src), mark(0) {\n        tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph),\n                                 static_cast<receiver<input_type> *>(this),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n\nprotected:\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        mark = 0;\n        base_type::reset(__TBB_PFG_RESET_ARG(f));\n    }\n\n    typedef typename buffer_node<T, A>::size_type size_type;\n    typedef typename buffer_node<T, A>::item_type item_type;\n    typedef typename buffer_node<T, A>::buffer_operation prio_operation;\n\n    enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n\n    /* override */ void handle_operations(prio_operation *op_list) {\n        prio_operation *tmp = op_list /*, *pop_list*/ ;\n        bool try_forwarding=false;\n        while (op_list) {\n            tmp = op_list;\n            op_list = op_list->next;\n            switch (tmp->type) {\n            case buffer_node<T, A>::reg_succ: this->internal_reg_succ(tmp); try_forwarding = true; break;\n            case buffer_node<T, A>::rem_succ: this->internal_rem_succ(tmp); break;\n            case buffer_node<T, A>::put_item: internal_push(tmp); try_forwarding = true; break;\n            case buffer_node<T, A>::try_fwd_task: internal_forward_task(tmp); break;\n            case buffer_node<T, A>::rel_res: internal_release(tmp); try_forwarding = true; break;\n            case buffer_node<T, A>::con_res: internal_consume(tmp); try_forwarding = true; break;\n            case buffer_node<T, A>::req_item: internal_pop(tmp); break;\n            case buffer_node<T, A>::res_item: internal_reserve(tmp); break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            case buffer_node<T, A>::add_blt_succ: this->internal_add_built_succ(tmp); break;\n            case buffer_node<T, A>::del_blt_succ: this->internal_del_built_succ(tmp); break;\n            case buffer_node<T, A>::add_blt_pred: this->internal_add_built_pred(tmp); break;\n            case buffer_node<T, A>::del_blt_pred: this->internal_del_built_pred(tmp); break;\n            case buffer_node<T, A>::blt_succ_cnt: this->internal_succ_cnt(tmp); break;\n            case buffer_node<T, A>::blt_pred_cnt: this->internal_pred_cnt(tmp); break;\n            case buffer_node<T, A>::blt_succ_cpy: this->internal_copy_succs(tmp); break;\n            case buffer_node<T, A>::blt_pred_cpy: this->internal_copy_preds(tmp); break;\n#endif\n            }\n        }\n        // process pops!  for now, no special pop processing\n        if (mark<this->my_tail) heapify();\n        if (try_forwarding && !this->forwarder_busy) {\n            task* tp = this->my_graph.root_task();\n            if(tp) {\n                this->forwarder_busy = true;\n                task *new_task = new(task::allocate_additional_child_of(*tp)) internal::\n                        forward_task_bypass\n                        < buffer_node<input_type, A> >(*this);\n                // tmp should point to the last item handled by the aggregator.  This is the operation\n                // the handling thread enqueued.  So modifying that record will be okay.\n                tbb::task *tmp1 = tmp->ltask;\n                tmp->ltask = combine_tasks(tmp1, new_task);\n            }\n        }\n    }\n\n    //! Tries to forward valid items to successors\n    /* override */ void internal_forward_task(prio_operation *op) {\n        T i_copy;\n        task * last_task = NULL; // flagged when a successor accepts\n        size_type counter = this->my_successors.size();\n\n        if (this->my_reserved || this->my_tail == 0) {\n            __TBB_store_with_release(op->status, FAILED);\n            this->forwarder_busy = false;\n            return;\n        }\n        // Keep trying to send while there exists an accepting successor\n        while (counter>0 && this->my_tail > 0) {\n            i_copy = this->get_my_item(0);\n            task * new_task = this->my_successors.try_put_task(i_copy);\n            if ( new_task ) {\n                last_task = combine_tasks(last_task, new_task);\n                this->destroy_item(0);  // we've forwarded this item\n                if (mark == this->my_tail) --mark;\n                if(--(this->my_tail)) { // didn't consume last item on heap\n                    this->move_item(0,this->my_tail);\n                }\n                if (this->my_tail > 1) // don't reheap for heap of size 1\n                    reheap();\n            }\n            --counter;\n        }\n        op->ltask = last_task;\n        if (last_task && !counter)\n            __TBB_store_with_release(op->status, SUCCEEDED);\n        else {\n            __TBB_store_with_release(op->status, FAILED);\n            this->forwarder_busy = false;\n        }\n    }\n\n    /* override */ void internal_push(prio_operation *op) {\n        if ( this->my_tail >= this->my_array_size )\n            this->grow_my_array( this->my_tail + 1 );\n        (void) this->place_item(this->my_tail, *(op->elem));\n        ++(this->my_tail);\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n\n    /* override */ void internal_pop(prio_operation *op) {\n        // if empty or already reserved, don't pop\n        if ( this->my_reserved == true || this->my_tail == 0 ) {\n            __TBB_store_with_release(op->status, FAILED);\n            return;\n        }\n        if (mark<this->my_tail &&  // item pushed, no re-heap\n            compare(this->get_my_item(0),\n                    this->get_my_item(this->my_tail-1))) {\n            // there are newly pushed elems; last one higher than top\n            // copy the data\n            this->fetch_item(this->my_tail-1, *(op->elem));\n            __TBB_store_with_release(op->status, SUCCEEDED);\n            --(this->my_tail);\n            return;\n        }\n        // extract and push the last element down heap\n        *(op->elem) = this->get_my_item(0); // copy the data, item 0 still valid\n        __TBB_store_with_release(op->status, SUCCEEDED);\n        if (mark == this->my_tail) --mark;\n        __TBB_ASSERT(this->my_item_valid(this->my_tail - 1), NULL);\n        if(--(this->my_tail)) {\n            // there were two or more items in heap.  Move the\n            // last item to the top of the heap\n            this->set_my_item(0,this->get_my_item(this->my_tail));\n        }\n        this->destroy_item(this->my_tail);\n        if (this->my_tail > 1) // don't reheap for heap of size 1\n            reheap();\n    }\n\n    /* override */ void internal_reserve(prio_operation *op) {\n        if (this->my_reserved == true || this->my_tail == 0) {\n            __TBB_store_with_release(op->status, FAILED);\n            return;\n        }\n        this->my_reserved = true;\n        *(op->elem) = reserved_item = this->get_my_item(0);\n        if (mark == this->my_tail) --mark;\n        --(this->my_tail);\n        __TBB_store_with_release(op->status, SUCCEEDED);\n        this->set_my_item(0, this->get_my_item(this->my_tail));\n        this->destroy_item(this->my_tail);\n        if (this->my_tail > 1)\n            reheap();\n    }\n\n    /* override */ void internal_consume(prio_operation *op) {\n        this->my_reserved = false;\n        __TBB_store_with_release(op->status, SUCCEEDED);\n    }\n    /* override */ void internal_release(prio_operation *op) {\n        if (this->my_tail >= this->my_array_size)\n            this->grow_my_array( this->my_tail + 1 );\n        this->set_my_item(this->my_tail, reserved_item);\n        ++(this->my_tail);\n        this->my_reserved = false;\n        __TBB_store_with_release(op->status, SUCCEEDED);\n        heapify();\n    }\nprivate:\n    Compare compare;\n    size_type mark;\n    input_type reserved_item;\n\n    // turn array into heap\n    void heapify() {\n        if (!mark) mark = 1;\n        for (; mark<this->my_tail; ++mark) { // for each unheaped element\n            size_type cur_pos = mark;\n            input_type to_place;\n            this->fetch_item(mark,to_place);\n            do { // push to_place up the heap\n                size_type parent = (cur_pos-1)>>1;\n                if (!compare(this->get_my_item(parent), to_place))\n                    break;\n                this->move_item(cur_pos, parent);\n                cur_pos = parent;\n            } while( cur_pos );\n            (void) this->place_item(cur_pos, to_place);\n        }\n    }\n\n    // otherwise heapified array with new root element; rearrange to heap\n    void reheap() {\n        size_type cur_pos=0, child=1;\n        while (child < mark) {\n            size_type target = child;\n            if (child+1<mark &&\n                compare(this->get_my_item(child),\n                        this->get_my_item(child+1)))\n                ++target;\n            // target now has the higher priority child\n            if (compare(this->get_my_item(target),\n                        this->get_my_item(cur_pos)))\n                break;\n            // swap\n            this->swap_items(cur_pos, target);\n            cur_pos = target;\n            child = (cur_pos<<1)+1;\n        }\n    }\n};  // priority_queue_node\n\n//! Forwards messages only if the threshold has not been reached\n/** This node forwards items until its threshold is reached.\n    It contains no buffering.  If the downstream node rejects, the\n    message is dropped. */\ntemplate< typename T >\nclass limiter_node : public graph_node, public receiver< T >, public sender< T > {\nprotected:\n    using graph_node::my_graph;\npublic:\n    typedef T input_type;\n    typedef T output_type;\n    typedef sender< input_type > predecessor_type;\n    typedef receiver< output_type > successor_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    typedef std::vector<successor_type *> successor_vector_type;\n    typedef std::vector<predecessor_type *> predecessor_vector_type;\n#endif\n\nprivate:\n    size_t my_threshold;\n    size_t my_count; //number of successful puts\n    size_t my_tries; //number of active put attempts\n    internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors;\n    spin_mutex my_mutex;\n    internal::broadcast_cache< T > my_successors;\n    int init_decrement_predecessors;\n\n    friend class internal::forward_task_bypass< limiter_node<T> >;\n\n    // Let decrementer call decrement_counter()\n    friend class internal::decrementer< limiter_node<T> >;\n\n    bool check_conditions() {  // always called under lock\n        return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() );\n    }\n\n    // only returns a valid task pointer or NULL, never SUCCESSFULLY_ENQUEUED\n    task *forward_task() {\n        input_type v;\n        task *rval = NULL;\n        bool reserved = false;\n            {\n                spin_mutex::scoped_lock lock(my_mutex);\n                if ( check_conditions() )\n                    ++my_tries;\n                else\n                    return NULL;\n            }\n\n        //SUCCESS \n        // if we can reserve and can put, we consume the reservation \n        // we increment the count and decrement the tries\n        if ( (my_predecessors.try_reserve(v)) == true ){\n            reserved=true;\n            if ( (rval = my_successors.try_put_task(v)) != NULL ){\n                {\n                    spin_mutex::scoped_lock lock(my_mutex);\n                    ++my_count;\n                    --my_tries;\n                    my_predecessors.try_consume();\n                    if ( check_conditions() ) {\n                        task* tp = this->my_graph.root_task();\n                        if ( tp ) {\n                            task *rtask = new ( task::allocate_additional_child_of( *tp ) )\n                                internal::forward_task_bypass< limiter_node<T> >( *this );\n                            FLOW_SPAWN (*rtask);\n                        }\n                    }\n                }\n                return rval;\n            }\n        }\n        //FAILURE\n        //if we can't reserve, we decrement the tries\n        //if we can reserve but can't put, we decrement the tries and release the reservation\n        {\n            spin_mutex::scoped_lock lock(my_mutex);\n            --my_tries;\n            if (reserved) my_predecessors.try_release();\n            if ( check_conditions() ) {\n                task* tp = this->my_graph.root_task();\n                if ( tp ) {\n                    task *rtask = new ( task::allocate_additional_child_of( *tp ) )\n                        internal::forward_task_bypass< limiter_node<T> >( *this );\n                    __TBB_ASSERT(!rval, \"Have two tasks to handle\");\n                    return rtask;\n                }\n            }\n            return rval;\n        }\n    }\n\n    void forward() {\n        __TBB_ASSERT(false, \"Should never be called\");\n        return;\n    }\n\n    task * decrement_counter() {\n        {\n            spin_mutex::scoped_lock lock(my_mutex);\n            if(my_count) --my_count;\n        }\n        return forward_task();\n    }\n\npublic:\n    //! The internal receiver< continue_msg > that decrements the count\n    internal::decrementer< limiter_node<T> > decrement;\n\n    //! Constructor\n    limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) :\n        graph_node(g), my_threshold(threshold), my_count(0), my_tries(0),\n        init_decrement_predecessors(num_decrement_predecessors),\n        decrement(num_decrement_predecessors)\n    {\n        my_predecessors.set_owner(this);\n        my_successors.set_owner(this);\n        decrement.set_owner(this);\n        tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<receiver<continue_msg> *>(&decrement),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n    //! Copy constructor\n    limiter_node( const limiter_node& src ) :\n        graph_node(src.my_graph), receiver<T>(), sender<T>(),\n        my_threshold(src.my_threshold), my_count(0), my_tries(0),\n        init_decrement_predecessors(src.init_decrement_predecessors),\n        decrement(src.init_decrement_predecessors)\n    {\n        my_predecessors.set_owner(this);\n        my_successors.set_owner(this);\n        decrement.set_owner(this);\n        tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph,\n                                 static_cast<receiver<input_type> *>(this), static_cast<receiver<continue_msg> *>(&decrement),\n                                 static_cast<sender<output_type> *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n    //! Replace the current successor with this new successor\n    /* override */ bool register_successor( receiver<output_type> &r ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        bool was_empty = my_successors.empty();\n        my_successors.register_successor(r);\n        //spawn a forward task if this is the only successor\n        if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) {\n            task* tp = this->my_graph.root_task();\n            if ( tp ) {\n                FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )\n                            internal::forward_task_bypass < limiter_node<T> >( *this ) ) );\n            }\n        }\n        return true;\n    }\n\n    //! Removes a successor from this node\n    /** r.remove_predecessor(*this) is also called. */\n    /* override */ bool remove_successor( receiver<output_type> &r ) {\n        r.remove_predecessor(*this);\n        my_successors.remove_successor(r);\n        return true;\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    /*override*/void internal_add_built_successor(receiver<output_type> &src) {\n        my_successors.internal_add_built_successor(src);\n    }\n\n    /*override*/void internal_delete_built_successor(receiver<output_type> &src) {\n        my_successors.internal_delete_built_successor(src);\n    }\n\n    /*override*/size_t successor_count() { return my_successors.successor_count(); }\n\n    /*override*/ void copy_successors(successor_vector_type &v) {\n        my_successors.copy_successors(v);\n    }\n\n    /*override*/void internal_add_built_predecessor(sender<output_type> &src) {\n        my_predecessors.internal_add_built_predecessor(src);\n    }\n\n    /*override*/void internal_delete_built_predecessor(sender<output_type> &src) {\n        my_predecessors.internal_delete_built_predecessor(src);\n    }\n\n    /*override*/size_t predecessor_count() { return my_predecessors.predecessor_count(); }\n\n    /*override*/ void copy_predecessors(predecessor_vector_type &v) {\n        my_predecessors.copy_predecessors(v);\n    }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    //! Adds src to the list of cached predecessors.\n    /* override */ bool register_predecessor( predecessor_type &src ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        my_predecessors.add( src );\n        task* tp = this->my_graph.root_task();\n        if ( my_count + my_tries < my_threshold && !my_successors.empty() && tp ) {\n            FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )\n                        internal::forward_task_bypass < limiter_node<T> >( *this ) ) );\n        }\n        return true;\n    }\n\n    //! Removes src from the list of cached predecessors.\n    /* override */ bool remove_predecessor( predecessor_type &src ) {\n        my_predecessors.remove( src );\n        return true;\n    }\n\nprotected:\n\n    template< typename R, typename B > friend class run_and_put_task;\n    template<typename X, typename Y> friend class internal::broadcast_cache;\n    template<typename X, typename Y> friend class internal::round_robin_cache;\n    //! Puts an item to this receiver\n    /* override */ task *try_put_task( const T &t ) {\n        {\n            spin_mutex::scoped_lock lock(my_mutex);\n            if ( my_count + my_tries >= my_threshold )\n                return NULL;\n            else\n                ++my_tries;\n        }\n\n        task * rtask = my_successors.try_put_task(t);\n\n        if ( !rtask ) {  // try_put_task failed.\n            spin_mutex::scoped_lock lock(my_mutex);\n            --my_tries;\n            task* tp = this->my_graph.root_task();\n            if ( check_conditions() && tp ) {\n                rtask = new ( task::allocate_additional_child_of( *tp ) )\n                    internal::forward_task_bypass< limiter_node<T> >( *this );\n            }\n        }\n        else {\n            spin_mutex::scoped_lock lock(my_mutex);\n            ++my_count;\n            --my_tries;\n             }\n        return rtask;\n    }\n\n    /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n        my_count = 0;\n        my_predecessors.reset(__TBB_PFG_RESET_ARG(f));\n        decrement.reset_receiver(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        my_successors.reset(f);\n#endif\n    }\n\n    /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); }\n};  // limiter_node\n\n#include \"internal/_flow_graph_join_impl.h\"\n\nusing internal::reserving_port;\nusing internal::queueing_port;\nusing internal::tag_matching_port;\nusing internal::input_port;\nusing internal::tag_value;\nusing internal::NO_TAG;\n\ntemplate<typename OutputTuple, graph_buffer_policy JP=queueing> class join_node;\n\ntemplate<typename OutputTuple>\nclass join_node<OutputTuple,reserving>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, reserving_port, OutputTuple, reserving> {\nprivate:\n    static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n    typedef typename internal::unfolded_join_node<N, reserving_port, OutputTuple, reserving> unfolded_type;\npublic:\n    typedef OutputTuple output_type;\n    typedef typename unfolded_type::input_ports_type input_ports_type;\n    join_node(graph &g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,\n                                            this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    join_node(const join_node &other) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,\n                                            this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n};\n\ntemplate<typename OutputTuple>\nclass join_node<OutputTuple,queueing>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, queueing_port, OutputTuple, queueing> {\nprivate:\n    static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n    typedef typename internal::unfolded_join_node<N, queueing_port, OutputTuple, queueing> unfolded_type;\npublic:\n    typedef OutputTuple output_type;\n    typedef typename unfolded_type::input_ports_type input_ports_type;\n    join_node(graph &g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,\n                                            this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    join_node(const join_node &other) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,\n                                            this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n};\n\n// template for tag_matching join_node\ntemplate<typename OutputTuple>\nclass join_node<OutputTuple, tag_matching> : public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value,\n      tag_matching_port, OutputTuple, tag_matching> {\nprivate:\n    static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n    typedef typename internal::unfolded_join_node<N, tag_matching_port, OutputTuple, tag_matching> unfolded_type;\npublic:\n    typedef OutputTuple output_type;\n    typedef typename unfolded_type::input_ports_type input_ports_type;\n\n    template<typename __TBB_B0, typename __TBB_B1>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) :\n            unfolded_type(g, b0, b1, b2, b3, b4) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#if __TBB_VARIADIC_MAX >= 6\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4,\n        typename __TBB_B5>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) :\n            unfolded_type(g, b0, b1, b2, b3, b4, b5) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#endif\n#if __TBB_VARIADIC_MAX >= 7\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4,\n        typename __TBB_B5, typename __TBB_B6>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) :\n            unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#endif\n#if __TBB_VARIADIC_MAX >= 8\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4,\n        typename __TBB_B5, typename __TBB_B6, typename __TBB_B7>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,\n            __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#endif\n#if __TBB_VARIADIC_MAX >= 9\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4,\n        typename __TBB_B5, typename __TBB_B6, typename __TBB_B7, typename __TBB_B8>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,\n            __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#endif\n#if __TBB_VARIADIC_MAX >= 10\n    template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4,\n        typename __TBB_B5, typename __TBB_B6, typename __TBB_B7, typename __TBB_B8, typename __TBB_B9>\n    join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,\n            __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n#endif\n    join_node(const join_node &other) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,\n                                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n\n};\n\n// indexer node\n#include \"internal/_flow_graph_indexer_impl.h\"\n\nstruct indexer_null_type {};\n\ntemplate<typename T0, typename T1=indexer_null_type, typename T2=indexer_null_type, typename T3=indexer_null_type,\n                      typename T4=indexer_null_type, typename T5=indexer_null_type, typename T6=indexer_null_type,\n                      typename T7=indexer_null_type, typename T8=indexer_null_type, typename T9=indexer_null_type> class indexer_node;\n\n//indexer node specializations\ntemplate<typename T0>\nclass indexer_node<T0> : public internal::unfolded_indexer_node<tuple<T0> > {\nprivate:\n    static const int N = 1;\npublic:\n    typedef tuple<T0> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n     void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n\ntemplate<typename T0, typename T1>\nclass indexer_node<T0, T1> : public internal::unfolded_indexer_node<tuple<T0, T1> > {\nprivate:\n    static const int N = 2;\npublic:\n    typedef tuple<T0, T1> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n     void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n\ntemplate<typename T0, typename T1, typename T2>\nclass indexer_node<T0, T1, T2> : public internal::unfolded_indexer_node<tuple<T0, T1, T2> > {\nprivate:\n    static const int N = 3;\npublic:\n    typedef tuple<T0, T1, T2> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n        void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n\ntemplate<typename T0, typename T1, typename T2, typename T3>\nclass indexer_node<T0, T1, T2, T3> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3> > {\nprivate:\n    static const int N = 4;\npublic:\n    typedef tuple<T0, T1, T2, T3> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4>\nclass indexer_node<T0, T1, T2, T3, T4> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4> > {\nprivate:\n    static const int N = 5;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n\n#if __TBB_VARIADIC_MAX >= 6\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>\nclass indexer_node<T0, T1, T2, T3, T4, T5> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5> > {\nprivate:\n    static const int N = 6;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4, T5> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n#endif //variadic max 6\n\n#if __TBB_VARIADIC_MAX >= 7\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5,\n         typename T6>\nclass indexer_node<T0, T1, T2, T3, T4, T5, T6> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6> > {\nprivate:\n    static const int N = 7;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4, T5, T6> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n#endif //variadic max 7\n\n#if __TBB_VARIADIC_MAX >= 8\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5,\n         typename T6, typename T7>\nclass indexer_node<T0, T1, T2, T3, T4, T5, T6, T7> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6, T7> > {\nprivate:\n    static const int N = 8;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n#endif //variadic max 8\n\n#if __TBB_VARIADIC_MAX >= 9\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5,\n         typename T6, typename T7, typename T8>\nclass indexer_node<T0, T1, T2, T3, T4, T5, T6, T7, T8> : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8> > {\nprivate:\n    static const int N = 9;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7, T8> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n#endif //variadic max 9\n\n#if __TBB_VARIADIC_MAX >= 10\ntemplate<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5,\n         typename T6, typename T7, typename T8, typename T9>\nclass indexer_node/*default*/ : public internal::unfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> > {\nprivate:\n    static const int N = 10;\npublic:\n    typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> InputTuple;\n    typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> output_type;\n    typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;\n    indexer_node(graph& g) : unfolded_type(g) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n    // Copy constructor\n    indexer_node( const indexer_node& other ) : unfolded_type(other) {\n        tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,\n                                           this->input_ports(), static_cast< sender< output_type > *>(this) );\n    }\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n    /* override */ void set_name( const char *name ) {\n        tbb::internal::fgt_node_desc( this, name );\n    }\n#endif\n};\n#endif //variadic max 10\n\n//! Makes an edge between a single predecessor and a single successor\ntemplate< typename T >\ninline void make_edge( sender<T> &p, receiver<T> &s ) {\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    s.internal_add_built_predecessor(p);\n    p.internal_add_built_successor(s);\n#endif\n    p.register_successor( s );\n    tbb::internal::fgt_make_edge( &p, &s );\n}\n\n//! Makes an edge between a single predecessor and a single successor\ntemplate< typename T >\ninline void remove_edge( sender<T> &p, receiver<T> &s ) {\n    p.remove_successor( s );\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    // TODO: should we try to remove p from the predecessor list of s, in case the edge is reversed?\n    p.internal_delete_built_successor(s);\n    s.internal_delete_built_predecessor(p);\n#endif\n    tbb::internal::fgt_remove_edge( &p, &s );\n}\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\ntemplate<typename C >\ntemplate< typename S >\nvoid edge_container<C>::sender_extract( S &s ) {\n    edge_vector e = built_edges;\n    for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) {\n        remove_edge(s, **i);\n    }\n}\n\ntemplate<typename C >\ntemplate< typename R >\nvoid edge_container<C>::receiver_extract( R &r ) {\n    edge_vector e = built_edges;\n    for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) {\n        remove_edge(**i, r);\n    }\n}\n#endif\n\n//! Returns a copy of the body from a function or continue node\ntemplate< typename Body, typename Node >\nBody copy_body( Node &n ) {\n    return n.template copy_function_object<Body>();\n}\n\n} // interface7\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    using interface7::reset_flags;\n    using interface7::rf_reset_protocol;\n    using interface7::rf_reset_bodies;\n    using interface7::rf_extract;\n#endif\n\n    using interface7::graph;\n    using interface7::graph_node;\n    using interface7::continue_msg;\n    using interface7::sender;\n    using interface7::receiver;\n    using interface7::continue_receiver;\n\n    using interface7::source_node;\n    using interface7::function_node;\n    using interface7::multifunction_node;\n    using interface7::split_node;\n    using interface7::internal::output_port;\n    using interface7::indexer_node;\n    using interface7::internal::tagged_msg;\n    using interface7::internal::cast_to;\n    using interface7::internal::is_a;\n    using interface7::continue_node;\n    using interface7::overwrite_node;\n    using interface7::write_once_node;\n    using interface7::broadcast_node;\n    using interface7::buffer_node;\n    using interface7::queue_node;\n    using interface7::sequencer_node;\n    using interface7::priority_queue_node;\n    using interface7::limiter_node;\n    using namespace interface7::internal::graph_policy_namespace;\n    using interface7::join_node;\n    using interface7::input_port;\n    using interface7::copy_body;\n    using interface7::make_edge;\n    using interface7::remove_edge;\n    using interface7::internal::NO_TAG;\n    using interface7::internal::tag_value;\n\n} // flow\n} // tbb\n\n#undef __TBB_PFG_RESET_ARG\n#undef __TBB_COMMA\n\n#endif // __TBB_flow_graph_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/governor.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include \"governor.h\"\n#include \"tbb_main.h\"\n#include \"scheduler.h\"\n#include \"market.h\"\n#include \"arena.h\"\n\n#include \"tbb/task_scheduler_init.h\"\n\n#include \"dynamic_link.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//------------------------------------------------------------------------\n// governor\n//------------------------------------------------------------------------\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n// Support for interoperability with Intel(R) Cilk(TM) Plus.\n\n#if _WIN32\n#define CILKLIB_NAME \"cilkrts20.dll\"\n#else\n#define CILKLIB_NAME \"libcilkrts.so\"\n#endif\n\n//! Handler for interoperation with cilkrts library.\nstatic __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,\n                                                 struct __cilk_tbb_stack_op_thunk o);\n\n//! Table describing how to link the handlers.\nstatic const dynamic_link_descriptor CilkLinkTable[] = {\n    { \"__cilkrts_watch_stack\", (pointer_to_handler*)(void*)(&watch_stack_handler) }\n};\n\nstatic atomic<do_once_state> cilkrts_load_state;\n\nbool initialize_cilk_interop() {\n    // Pinning can fail. This is a normal situation, and means that the current\n    // thread does not use cilkrts and consequently does not need interop.\n    return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1,  /*handle=*/0, DYNAMIC_LINK_GLOBAL );\n}\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\nnamespace rml {\n    tbb_server* make_private_server( tbb_client& client );\n}\n\nvoid governor::acquire_resources () {\n#if USE_PTHREAD\n    int status = theTLS.create(auto_terminate);\n#else\n    int status = theTLS.create();\n#endif\n    if( status )\n        handle_perror(status, \"TBB failed to initialize task scheduler TLS\\n\");\n    is_speculation_enabled = cpu_has_speculation();\n}\n\nvoid governor::release_resources () {\n    theRMLServerFactory.close();\n#if TBB_USE_ASSERT\n    if( __TBB_InitOnce::initialization_done() && theTLS.get() ) \n        runtime_warning( \"TBB is unloaded while tbb::task_scheduler_init object is alive?\" );\n#endif\n    int status = theTLS.destroy();\n    if( status )\n        runtime_warning(\"failed to destroy task scheduler TLS: %s\", strerror(status));\n    dynamic_unlink_all();\n}\n\nrml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {\n    rml::tbb_server* server = NULL;\n    if( !UsePrivateRML ) {\n        ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );\n        if( status != ::rml::factory::st_success ) {\n            UsePrivateRML = true;\n            runtime_warning( \"rml::tbb_factory::make_server failed with status %x, falling back on private rml\", status );\n        }\n    }\n    if ( !server ) {\n        __TBB_ASSERT( UsePrivateRML, NULL );\n        server = rml::make_private_server( client );\n    }\n    __TBB_ASSERT( server, \"Failed to create RML server\" );\n    return server;\n}\n\nvoid governor::sign_on(generic_scheduler* s) {\n    __TBB_ASSERT( !theTLS.get(), NULL );\n    theTLS.set(s);\n#if __TBB_SURVIVE_THREAD_SWITCH\n    if( watch_stack_handler ) {\n        __cilk_tbb_stack_op_thunk o;\n        o.routine = &stack_op_handler;\n        o.data = s;\n        if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {\n            // Failed to register with cilkrts, make sure we are clean\n            s->my_cilk_unwatch_thunk.routine = NULL;\n        }\n#if TBB_USE_ASSERT\n        else\n            s->my_cilk_state = generic_scheduler::cs_running;\n#endif /* TBB_USE_ASSERT */\n    }\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n}\n\nvoid governor::sign_off(generic_scheduler* s) {\n    suppress_unused_warning(s);\n    __TBB_ASSERT( theTLS.get()==s, \"attempt to unregister a wrong scheduler instance\" );\n    theTLS.set(NULL);\n#if __TBB_SURVIVE_THREAD_SWITCH\n    __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;\n    if ( ut.routine )\n       (*ut.routine)(ut.data);\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n}\n\nvoid governor::setBlockingTerminate(const task_scheduler_init *tsi) {\n    __TBB_ASSERT(!IsBlockingTerminationInProgress, \"It's impossible to create task_scheduler_init while blocking termination is in progress.\");\n    if (BlockingTSI)\n        throw_exception(eid_blocking_sch_init);\n    BlockingTSI = tsi;\n}\n\ngeneric_scheduler* governor::init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init ) {\n    if( !__TBB_InitOnce::initialization_done() )\n        DoOneTimeInitializations();\n    generic_scheduler* s = theTLS.get();\n    if( s ) {\n        s->my_ref_count += 1;\n        return s;\n    }\n#if __TBB_SURVIVE_THREAD_SWITCH\n    atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n    if( (int)num_threads == task_scheduler_init::automatic )\n        num_threads = default_num_threads();\n    s = generic_scheduler::create_master( \n            market::create_arena( num_threads - 1, stack_size ? stack_size : ThreadStackSize ) );\n    __TBB_ASSERT(s, \"Somehow a local scheduler creation for a master thread failed\");\n    s->my_auto_initialized = auto_init;\n    return s;\n}\n\nvoid governor::terminate_scheduler( generic_scheduler* s, const task_scheduler_init* tsi_ptr ) {\n    __TBB_ASSERT( s == theTLS.get(), \"Attempt to terminate non-local scheduler instance\" );\n    if (--(s->my_ref_count)) {\n        if (BlockingTSI && BlockingTSI==tsi_ptr) {\n            // can't throw exception, because this is on dtor's call chain\n            fprintf(stderr, \"Attempt to terminate nested scheduler in blocking mode\\n\");\n            exit(1);\n        }\n    } else {\n#if TBB_USE_ASSERT\n        if (BlockingTSI) {\n            __TBB_ASSERT( BlockingTSI == tsi_ptr, \"For blocking termination last terminate_scheduler must be blocking.\" );\n            IsBlockingTerminationInProgress = true;\n        }\n#endif\n        s->cleanup_master();\n        BlockingTSI = NULL;\n#if TBB_USE_ASSERT\n        IsBlockingTerminationInProgress = false;\n#endif\n    }\n}\n\nvoid governor::auto_terminate(void* arg){\n    generic_scheduler* s = static_cast<generic_scheduler*>(arg);\n    if( s && s->my_auto_initialized ) {\n        if( !--(s->my_ref_count) ) {\n            __TBB_ASSERT( !BlockingTSI, \"Blocking auto-terminate is not supported.\" );\n            // If the TLS slot is already cleared by OS or underlying concurrency\n            // runtime, restore its value.\n            if ( !theTLS.get() )\n                theTLS.set(s);\n            else __TBB_ASSERT( s == theTLS.get(), NULL );\n            s->cleanup_master();\n            __TBB_ASSERT( !theTLS.get(), \"cleanup_master has not cleared its TLS slot\" );\n        }\n    }\n}\n\nvoid governor::print_version_info () {\n    if ( UsePrivateRML )\n        PrintExtraVersionInfo( \"RML\", \"private\" );\n    else {\n        PrintExtraVersionInfo( \"RML\", \"shared\" );\n        theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)\"\" );\n    }\n#if __TBB_SURVIVE_THREAD_SWITCH\n    if( watch_stack_handler )\n        PrintExtraVersionInfo( \"CILK\", CILKLIB_NAME );\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n}\n\nvoid governor::initialize_rml_factory () {\n    ::rml::factory::status_type res = theRMLServerFactory.open(); \n    UsePrivateRML = res != ::rml::factory::st_success;\n}\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n__cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {\n    __TBB_ASSERT(data,NULL);\n    generic_scheduler* s = static_cast<generic_scheduler*>(data);\n#if TBB_USE_ASSERT\n    void* current = theTLS.get();\n#if _WIN32||_WIN64\n    uintptr_t thread_id = GetCurrentThreadId();\n#else\n    uintptr_t thread_id = uintptr_t(pthread_self());\n#endif\n\n#endif /* TBB_USE_ASSERT */\n    switch( op ) {\n        default:\n            __TBB_ASSERT( 0, \"invalid op\" );\n        case CILK_TBB_STACK_ADOPT: {\n            __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || \n                          current==s && s->my_cilk_state==generic_scheduler::cs_running, \"invalid adoption\" );\n#if TBB_USE_ASSERT\n            if( current==s ) \n                runtime_warning( \"redundant adoption of %p by thread %p\\n\", s, (void*)thread_id );\n            s->my_cilk_state = generic_scheduler::cs_running;\n#endif /* TBB_USE_ASSERT */\n            theTLS.set(s);\n            break;\n        }\n        case CILK_TBB_STACK_ORPHAN: {\n            __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, \"invalid orphaning\" ); \n#if TBB_USE_ASSERT\n            s->my_cilk_state = generic_scheduler::cs_limbo;\n#endif /* TBB_USE_ASSERT */\n            theTLS.set(NULL);\n            break;\n        }\n        case CILK_TBB_STACK_RELEASE: {\n            __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || \n                          current==s && s->my_cilk_state==generic_scheduler::cs_running, \"invalid release\" );\n#if TBB_USE_ASSERT\n            s->my_cilk_state = generic_scheduler::cs_freed;\n#endif /* TBB_USE_ASSERT */\n            s->my_cilk_unwatch_thunk.routine = NULL;\n            auto_terminate( s );\n        } \n    }\n    return 0;\n}\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\n} // namespace internal\n\n//------------------------------------------------------------------------\n// task_scheduler_init\n//------------------------------------------------------------------------\n\nusing namespace internal;\n\n/** Left out-of-line for the sake of the backward binary compatibility **/\nvoid task_scheduler_init::initialize( int number_of_threads ) {\n    initialize( number_of_threads, 0 );\n}\n\nvoid task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {\n#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS\n    uintptr_t new_mode = thread_stack_size & propagation_mode_mask;\n#endif\n    thread_stack_size &= ~(stack_size_type)propagation_mode_mask;\n    if( number_of_threads!=deferred ) {\n        bool blocking_terminate = false;\n        if (my_scheduler == (scheduler*)wait_workers_in_terminate_flag) {\n            blocking_terminate = true;\n            my_scheduler = NULL;\n        }\n        __TBB_ASSERT( !my_scheduler, \"task_scheduler_init already initialized\" );\n        __TBB_ASSERT( number_of_threads==-1 || number_of_threads>=1,\n                    \"number_of_threads for task_scheduler_init must be -1 or positive\" );\n        if (blocking_terminate)\n            governor::setBlockingTerminate(this);\n        internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );\n#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS\n        if ( s->master_outermost_level() ) {\n            uintptr_t &vt = s->default_context()->my_version_and_traits;\n            uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;\n            vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception\n                    : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;\n            // Use least significant bit of the scheduler pointer to store previous mode.\n            // This is necessary when components compiled with different compilers and/or\n            // TBB versions initialize the \n            my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));\n        }\n        else\n#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */\n            my_scheduler = s;\n    } else {\n        __TBB_ASSERT( !thread_stack_size, \"deferred initialization ignores stack size setting\" );\n    }\n}\n\nvoid task_scheduler_init::terminate() {\n#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS\n    uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;\n    my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);\n#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */\n    generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);\n    my_scheduler = NULL;\n    __TBB_ASSERT( s, \"task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()\");\n#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS\n    if ( s->master_outermost_level() ) {\n        uintptr_t &vt = s->default_context()->my_version_and_traits;\n        vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception\n                                        : vt & ~task_group_context::exact_exception;\n    }\n#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */\n    governor::terminate_scheduler(s, this);\n}\n\nint task_scheduler_init::default_num_threads() {\n    return governor::default_num_threads();\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/governor.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_governor_H\n#define _TBB_governor_H\n\n#include \"tbb/task_scheduler_init.h\"\n#include \"../rml/include/rml_tbb.h\"\n\n#include \"tbb_misc.h\" // for AvailableHwConcurrency and ThreadStackSize\n#include \"tls.h\"\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n#include \"cilk-tbb-interop.h\"\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\nnamespace tbb {\nnamespace internal {\n\nclass market;\nclass generic_scheduler;\nclass __TBB_InitOnce;\n\n//------------------------------------------------------------------------\n// Class governor\n//------------------------------------------------------------------------\n\n//! The class handles access to the single instance of market, and to TLS to keep scheduler instances.\n/** It also supports automatic on-demand initialization of the TBB scheduler.\n    The class contains only static data members and methods.*/\nclass governor {\n    friend class __TBB_InitOnce;\n    friend class market;\n\n    //! TLS for scheduler instances associated with individual threads\n    static basic_tls<generic_scheduler*> theTLS;\n\n    //! Caches the maximal level of parallelism supported by the hardware\n    static unsigned DefaultNumberOfThreads;\n\n    static rml::tbb_factory theRMLServerFactory;\n\n    static bool UsePrivateRML;\n\n    //! Instance of task_scheduler_init that requested blocking termination.\n    static const task_scheduler_init *BlockingTSI;\n\n#if TBB_USE_ASSERT\n    static bool IsBlockingTerminationInProgress;\n#endif\n\n    static bool is_speculation_enabled;\n\n    //! Create key for thread-local storage and initialize RML.\n    static void acquire_resources ();\n\n    //! Destroy the thread-local storage key and deinitialize RML.\n    static void release_resources ();\n\n    static rml::tbb_server* create_rml_server ( rml::tbb_client& );\n\n    //! The internal routine to undo automatic initialization.\n    /** The signature is written with void* so that the routine\n        can be the destructor argument to pthread_key_create. */\n    static void auto_terminate(void* scheduler);\n\npublic:\n    static unsigned default_num_threads () {\n        // No memory fence required, because at worst each invoking thread calls AvailableHwConcurrency once.\n        return DefaultNumberOfThreads ? DefaultNumberOfThreads :\n                                        DefaultNumberOfThreads = AvailableHwConcurrency();\n    }\n    //! Processes scheduler initialization request (possibly nested) in a master thread\n    /** If necessary creates new instance of arena and/or local scheduler.\n        The auto_init argument specifies if the call is due to automatic initialization. **/\n    static generic_scheduler* init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init = false );\n\n    //! Processes scheduler termination request (possibly nested) in a master thread\n    static void terminate_scheduler( generic_scheduler* s, const task_scheduler_init *tsi_ptr );\n\n    //! Register TBB scheduler instance in thread-local storage.\n    static void sign_on(generic_scheduler* s);\n\n    //! Unregister TBB scheduler instance from thread-local storage.\n    static void sign_off(generic_scheduler* s);\n\n    //! Used to check validity of the local scheduler TLS contents.\n    static bool is_set ( generic_scheduler* s ) { return theTLS.get() == s; }\n\n    //! Temporarily set TLS slot to the given scheduler\n    static void assume_scheduler( generic_scheduler* s ) { theTLS.set( s ); }\n\n    //! Obtain the thread-local instance of the TBB scheduler.\n    /** If the scheduler has not been initialized yet, initialization is done automatically.\n        Note that auto-initialized scheduler instance is destroyed only when its thread terminates. **/\n    static generic_scheduler* local_scheduler () {\n        generic_scheduler* s = theTLS.get();\n        return s ? s : init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true );\n    }\n\n    static generic_scheduler* local_scheduler_if_initialized () {\n        return theTLS.get();\n    }\n\n    //! Undo automatic initialization if necessary; call when a thread exits.\n    static void terminate_auto_initialized_scheduler() {\n        auto_terminate( theTLS.get() );\n    }\n\n    static void print_version_info ();\n\n    static void initialize_rml_factory ();\n\n    static bool needsWaitWorkers () { return BlockingTSI!=NULL; }\n\n    //! Must be called before init_scheduler\n    static void setBlockingTerminate(const task_scheduler_init *tsi);\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n    static __cilk_tbb_retcode stack_op_handler( __cilk_tbb_stack_op op, void* );\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n    static bool speculation_enabled() { return is_speculation_enabled; }\n\n}; // class governor\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_governor_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia32-masm/atomic_support.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n.686\n.model flat,c\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchadd1\n__TBB_machine_fetchadd1:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xadd [edx],al\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchstore1\n__TBB_machine_fetchstore1:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xchg [edx],al\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_cmpswp1\n__TBB_machine_cmpswp1:\n\tmov edx,4[esp]\n\tmov ecx,8[esp]\n\tmov eax,12[esp]\n\tlock cmpxchg [edx],cl\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchadd2\n__TBB_machine_fetchadd2:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xadd [edx],ax\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchstore2\n__TBB_machine_fetchstore2:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xchg [edx],ax\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_cmpswp2\n__TBB_machine_cmpswp2:\n\tmov edx,4[esp]\n\tmov ecx,8[esp]\n\tmov eax,12[esp]\n\tlock cmpxchg [edx],cx\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchadd4\n__TBB_machine_fetchadd4:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xadd [edx],eax\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchstore4\n__TBB_machine_fetchstore4:\n\tmov edx,4[esp]\n\tmov eax,8[esp]\n\tlock xchg [edx],eax\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_cmpswp4\n__TBB_machine_cmpswp4:\n\tmov edx,4[esp]\n\tmov ecx,8[esp]\n\tmov eax,12[esp]\n\tlock cmpxchg [edx],ecx\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchadd8\n__TBB_machine_fetchadd8:\n\tpush ebx\n\tpush edi\n\tmov edi,12[esp]\n\tmov eax,[edi]\n\tmov edx,4[edi]\n__TBB_machine_fetchadd8_loop:\n\tmov ebx,16[esp]\n\tmov ecx,20[esp]\n\tadd ebx,eax\n\tadc ecx,edx\n\tlock cmpxchg8b qword ptr [edi]\n\tjnz __TBB_machine_fetchadd8_loop\n\tpop edi\n\tpop ebx\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_fetchstore8\n__TBB_machine_fetchstore8:\n\tpush ebx\n\tpush edi\n\tmov edi,12[esp]\n\tmov ebx,16[esp]\n\tmov ecx,20[esp]\n\tmov eax,[edi]\n\tmov edx,4[edi]\n__TBB_machine_fetchstore8_loop:\n\tlock cmpxchg8b qword ptr [edi]\n\tjnz __TBB_machine_fetchstore8_loop\n\tpop edi\n\tpop ebx\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_cmpswp8\n__TBB_machine_cmpswp8:\n\tpush ebx\n\tpush edi\n\tmov edi,12[esp]\n\tmov ebx,16[esp]\n\tmov ecx,20[esp]\n\tmov eax,24[esp]\n\tmov edx,28[esp]\n\tlock cmpxchg8b qword ptr [edi]\n\tpop edi\n\tpop ebx\n\tret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_load8\n__TBB_machine_Load8:\n\t; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check.\n\tmov ecx,4[esp]\n\ttest ecx,7\n\tjne load_slow\n\t; Load within a cache line\n\tsub esp,12\n\tfild qword ptr [ecx]\n\tfistp qword ptr [esp]\n\tmov eax,[esp]\n\tmov edx,4[esp]\n\tadd esp,12\n\tret\nload_slow:\n\t; Load is misaligned. Use cmpxchg8b.\n\tpush ebx\n\tpush edi\n\tmov edi,ecx\n\txor eax,eax\n\txor ebx,ebx\n\txor ecx,ecx\n\txor edx,edx\n\tlock cmpxchg8b qword ptr [edi]\n\tpop edi\n\tpop ebx\n\tret\nEXTRN __TBB_machine_store8_slow:PROC\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_store8\n__TBB_machine_Store8:\n\t; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check.\n\tmov ecx,4[esp]\n\ttest ecx,7\n\tjne __TBB_machine_store8_slow ;; tail call to tbb_misc.cpp\n\tfild qword ptr 8[esp]\n\tfistp qword ptr [ecx]\n\tret\nend\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia32-masm/itsx.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n.686\n.model flat,c\n.code\n        ALIGN 4\n        PUBLIC c __TBB_machine_try_lock_elided\n__TBB_machine_try_lock_elided:\n        mov ecx, 4[esp]\n        xor eax, eax\n        mov al, 1\n        BYTE 0F2H\n        xchg al, byte ptr [ecx]\n        xor  al, 1\n        ret\n.code\n        ALIGN 4\n        PUBLIC c __TBB_machine_unlock_elided\n__TBB_machine_unlock_elided:\n        mov ecx, 4[esp]\n        BYTE 0F3H\n        mov byte ptr [ecx], 0\n        ret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_begin_transaction\n__TBB_machine_begin_transaction:\n        mov  eax, -1\n        BYTE 0C7H\n        BYTE 0F8H\n        BYTE 000H\n        BYTE 000H\n        BYTE 000H\n        BYTE 000H\n        ret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_end_transaction\n__TBB_machine_end_transaction:\n        BYTE 00FH\n        BYTE 001H\n        BYTE 0D5H\n        ret\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_transaction_conflict_abort\n__TBB_machine_transaction_conflict_abort:\n        BYTE 0C6H\n        BYTE 0F8H\n        BYTE 0FFH  ; 12.4.5 Abort argument: lock not free when tested\n        ret\n.code \n        ALIGN 4\n\tPUBLIC c __TBB_machine_is_in_transaction\n__TBB_machine_is_in_transaction:\n        xor eax, eax\n        BYTE 00FH\n        BYTE 001H\n        BYTE 0D6H\n        JZ   rset\n        MOV  al,1\nrset:\n        RET\nend\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia32-masm/lock_byte.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE\n.686\n.model flat,c\n.code \n\tALIGN 4\n\tPUBLIC c __TBB_machine_trylockbyte\n__TBB_machine_trylockbyte:\n\tmov edx,4[esp]\n\tmov al,[edx]\n\tmov cl,1\n\ttest al,1\n\tjnz __TBB_machine_trylockbyte_contended\n\tlock cmpxchg [edx],cl\n\tjne __TBB_machine_trylockbyte_contended\n\tmov eax,1\n\tret\n__TBB_machine_trylockbyte_contended:\n\txor eax,eax\n\tret\nend\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia64-gas/atomic_support.s",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_fetchadd1__TBB_full_fence#\n        .global __TBB_machine_fetchadd1__TBB_full_fence#\n__TBB_machine_fetchadd1__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_fetchadd1acquire\n}\n        .endp __TBB_machine_fetchadd1__TBB_full_fence#\n\n        .proc __TBB_machine_fetchadd1acquire#\n        .global __TBB_machine_fetchadd1acquire#\n__TBB_machine_fetchadd1acquire:\n\n\n\n\n\n\n\n        ld1 r9=[r32]\n;;\nRetry_1acquire:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg1.acq r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_1acquire\n        br.ret.sptk.many b0\n# 49 \"<stdin>\"\n        .endp __TBB_machine_fetchadd1acquire#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore1__TBB_full_fence#\n        .global __TBB_machine_fetchstore1__TBB_full_fence#\n__TBB_machine_fetchstore1__TBB_full_fence:\n        mf\n;;\n        xchg1 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore1__TBB_full_fence#\n\n\n        .proc __TBB_machine_fetchstore1acquire#\n        .global __TBB_machine_fetchstore1acquire#\n__TBB_machine_fetchstore1acquire:\n        xchg1 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore1acquire#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_cmpswp1__TBB_full_fence#\n        .global __TBB_machine_cmpswp1__TBB_full_fence#\n__TBB_machine_cmpswp1__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_cmpswp1acquire\n}\n        .endp __TBB_machine_cmpswp1__TBB_full_fence#\n\n        .proc __TBB_machine_cmpswp1acquire#\n        .global __TBB_machine_cmpswp1acquire#\n__TBB_machine_cmpswp1acquire:\n\n        zxt1 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg1.acq r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp1acquire#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_fetchadd2__TBB_full_fence#\n        .global __TBB_machine_fetchadd2__TBB_full_fence#\n__TBB_machine_fetchadd2__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_fetchadd2acquire\n}\n        .endp __TBB_machine_fetchadd2__TBB_full_fence#\n\n        .proc __TBB_machine_fetchadd2acquire#\n        .global __TBB_machine_fetchadd2acquire#\n__TBB_machine_fetchadd2acquire:\n\n\n\n\n\n\n\n        ld2 r9=[r32]\n;;\nRetry_2acquire:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg2.acq r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_2acquire\n        br.ret.sptk.many b0\n# 49 \"<stdin>\"\n        .endp __TBB_machine_fetchadd2acquire#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore2__TBB_full_fence#\n        .global __TBB_machine_fetchstore2__TBB_full_fence#\n__TBB_machine_fetchstore2__TBB_full_fence:\n        mf\n;;\n        xchg2 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore2__TBB_full_fence#\n\n\n        .proc __TBB_machine_fetchstore2acquire#\n        .global __TBB_machine_fetchstore2acquire#\n__TBB_machine_fetchstore2acquire:\n        xchg2 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore2acquire#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_cmpswp2__TBB_full_fence#\n        .global __TBB_machine_cmpswp2__TBB_full_fence#\n__TBB_machine_cmpswp2__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_cmpswp2acquire\n}\n        .endp __TBB_machine_cmpswp2__TBB_full_fence#\n\n        .proc __TBB_machine_cmpswp2acquire#\n        .global __TBB_machine_cmpswp2acquire#\n__TBB_machine_cmpswp2acquire:\n\n        zxt2 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg2.acq r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp2acquire#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_fetchadd4__TBB_full_fence#\n        .global __TBB_machine_fetchadd4__TBB_full_fence#\n__TBB_machine_fetchadd4__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_fetchadd4acquire\n}\n        .endp __TBB_machine_fetchadd4__TBB_full_fence#\n\n        .proc __TBB_machine_fetchadd4acquire#\n        .global __TBB_machine_fetchadd4acquire#\n__TBB_machine_fetchadd4acquire:\n\n        cmp.eq p6,p0=1,r33\n        cmp.eq p8,p0=-1,r33\n  (p6) br.cond.dptk Inc_4acquire\n  (p8) br.cond.dpnt Dec_4acquire\n;;\n\n        ld4 r9=[r32]\n;;\nRetry_4acquire:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg4.acq r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_4acquire\n        br.ret.sptk.many b0\n\nInc_4acquire:\n        fetchadd4.acq r8=[r32],1\n        br.ret.sptk.many b0\nDec_4acquire:\n        fetchadd4.acq r8=[r32],-1\n        br.ret.sptk.many b0\n\n        .endp __TBB_machine_fetchadd4acquire#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore4__TBB_full_fence#\n        .global __TBB_machine_fetchstore4__TBB_full_fence#\n__TBB_machine_fetchstore4__TBB_full_fence:\n        mf\n;;\n        xchg4 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore4__TBB_full_fence#\n\n\n        .proc __TBB_machine_fetchstore4acquire#\n        .global __TBB_machine_fetchstore4acquire#\n__TBB_machine_fetchstore4acquire:\n        xchg4 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore4acquire#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_cmpswp4__TBB_full_fence#\n        .global __TBB_machine_cmpswp4__TBB_full_fence#\n__TBB_machine_cmpswp4__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_cmpswp4acquire\n}\n        .endp __TBB_machine_cmpswp4__TBB_full_fence#\n\n        .proc __TBB_machine_cmpswp4acquire#\n        .global __TBB_machine_cmpswp4acquire#\n__TBB_machine_cmpswp4acquire:\n\n        zxt4 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg4.acq r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp4acquire#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_fetchadd8__TBB_full_fence#\n        .global __TBB_machine_fetchadd8__TBB_full_fence#\n__TBB_machine_fetchadd8__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_fetchadd8acquire\n}\n        .endp __TBB_machine_fetchadd8__TBB_full_fence#\n\n        .proc __TBB_machine_fetchadd8acquire#\n        .global __TBB_machine_fetchadd8acquire#\n__TBB_machine_fetchadd8acquire:\n\n        cmp.eq p6,p0=1,r33\n        cmp.eq p8,p0=-1,r33\n  (p6) br.cond.dptk Inc_8acquire\n  (p8) br.cond.dpnt Dec_8acquire\n;;\n\n        ld8 r9=[r32]\n;;\nRetry_8acquire:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg8.acq r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_8acquire\n        br.ret.sptk.many b0\n\nInc_8acquire:\n        fetchadd8.acq r8=[r32],1\n        br.ret.sptk.many b0\nDec_8acquire:\n        fetchadd8.acq r8=[r32],-1\n        br.ret.sptk.many b0\n\n        .endp __TBB_machine_fetchadd8acquire#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore8__TBB_full_fence#\n        .global __TBB_machine_fetchstore8__TBB_full_fence#\n__TBB_machine_fetchstore8__TBB_full_fence:\n        mf\n;;\n        xchg8 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore8__TBB_full_fence#\n\n\n        .proc __TBB_machine_fetchstore8acquire#\n        .global __TBB_machine_fetchstore8acquire#\n__TBB_machine_fetchstore8acquire:\n        xchg8 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore8acquire#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n\n\n        .proc __TBB_machine_cmpswp8__TBB_full_fence#\n        .global __TBB_machine_cmpswp8__TBB_full_fence#\n__TBB_machine_cmpswp8__TBB_full_fence:\n{\n        mf\n        br __TBB_machine_cmpswp8acquire\n}\n        .endp __TBB_machine_cmpswp8__TBB_full_fence#\n\n        .proc __TBB_machine_cmpswp8acquire#\n        .global __TBB_machine_cmpswp8acquire#\n__TBB_machine_cmpswp8acquire:\n\n\n\n\n        mov ar.ccv=r34\n;;\n        cmpxchg8.acq r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp8acquire#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n# 19 \"<stdin>\"\n        .proc __TBB_machine_fetchadd1release#\n        .global __TBB_machine_fetchadd1release#\n__TBB_machine_fetchadd1release:\n\n\n\n\n\n\n\n        ld1 r9=[r32]\n;;\nRetry_1release:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg1.rel r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_1release\n        br.ret.sptk.many b0\n# 49 \"<stdin>\"\n        .endp __TBB_machine_fetchadd1release#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore1release#\n        .global __TBB_machine_fetchstore1release#\n__TBB_machine_fetchstore1release:\n        mf\n;;\n        xchg1 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore1release#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n# 101 \"<stdin>\"\n        .proc __TBB_machine_cmpswp1release#\n        .global __TBB_machine_cmpswp1release#\n__TBB_machine_cmpswp1release:\n\n        zxt1 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg1.rel r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp1release#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n# 19 \"<stdin>\"\n        .proc __TBB_machine_fetchadd2release#\n        .global __TBB_machine_fetchadd2release#\n__TBB_machine_fetchadd2release:\n\n\n\n\n\n\n\n        ld2 r9=[r32]\n;;\nRetry_2release:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg2.rel r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_2release\n        br.ret.sptk.many b0\n# 49 \"<stdin>\"\n        .endp __TBB_machine_fetchadd2release#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore2release#\n        .global __TBB_machine_fetchstore2release#\n__TBB_machine_fetchstore2release:\n        mf\n;;\n        xchg2 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore2release#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n# 101 \"<stdin>\"\n        .proc __TBB_machine_cmpswp2release#\n        .global __TBB_machine_cmpswp2release#\n__TBB_machine_cmpswp2release:\n\n        zxt2 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg2.rel r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp2release#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n# 19 \"<stdin>\"\n        .proc __TBB_machine_fetchadd4release#\n        .global __TBB_machine_fetchadd4release#\n__TBB_machine_fetchadd4release:\n\n        cmp.eq p6,p0=1,r33\n        cmp.eq p8,p0=-1,r33\n  (p6) br.cond.dptk Inc_4release\n  (p8) br.cond.dpnt Dec_4release\n;;\n\n        ld4 r9=[r32]\n;;\nRetry_4release:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg4.rel r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_4release\n        br.ret.sptk.many b0\n\nInc_4release:\n        fetchadd4.rel r8=[r32],1\n        br.ret.sptk.many b0\nDec_4release:\n        fetchadd4.rel r8=[r32],-1\n        br.ret.sptk.many b0\n\n        .endp __TBB_machine_fetchadd4release#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore4release#\n        .global __TBB_machine_fetchstore4release#\n__TBB_machine_fetchstore4release:\n        mf\n;;\n        xchg4 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore4release#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n# 101 \"<stdin>\"\n        .proc __TBB_machine_cmpswp4release#\n        .global __TBB_machine_cmpswp4release#\n__TBB_machine_cmpswp4release:\n\n        zxt4 r34=r34\n;;\n\n        mov ar.ccv=r34\n;;\n        cmpxchg4.rel r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp4release#\n// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh\n# 1 \"<stdin>\"\n# 1 \"<built-in>\"\n# 1 \"<command line>\"\n# 1 \"<stdin>\"\n\n\n\n\n\n        .section .text\n        .align 16\n# 19 \"<stdin>\"\n        .proc __TBB_machine_fetchadd8release#\n        .global __TBB_machine_fetchadd8release#\n__TBB_machine_fetchadd8release:\n\n        cmp.eq p6,p0=1,r33\n        cmp.eq p8,p0=-1,r33\n  (p6) br.cond.dptk Inc_8release\n  (p8) br.cond.dpnt Dec_8release\n;;\n\n        ld8 r9=[r32]\n;;\nRetry_8release:\n        mov ar.ccv=r9\n        mov r8=r9;\n        add r10=r9,r33\n;;\n        cmpxchg8.rel r9=[r32],r10,ar.ccv\n;;\n        cmp.ne p7,p0=r8,r9\n  (p7) br.cond.dpnt Retry_8release\n        br.ret.sptk.many b0\n\nInc_8release:\n        fetchadd8.rel r8=[r32],1\n        br.ret.sptk.many b0\nDec_8release:\n        fetchadd8.rel r8=[r32],-1\n        br.ret.sptk.many b0\n\n        .endp __TBB_machine_fetchadd8release#\n# 62 \"<stdin>\"\n        .section .text\n        .align 16\n        .proc __TBB_machine_fetchstore8release#\n        .global __TBB_machine_fetchstore8release#\n__TBB_machine_fetchstore8release:\n        mf\n;;\n        xchg8 r8=[r32],r33\n        br.ret.sptk.many b0\n        .endp __TBB_machine_fetchstore8release#\n# 88 \"<stdin>\"\n        .section .text\n        .align 16\n# 101 \"<stdin>\"\n        .proc __TBB_machine_cmpswp8release#\n        .global __TBB_machine_cmpswp8release#\n__TBB_machine_cmpswp8release:\n\n\n\n\n        mov ar.ccv=r34\n;;\n        cmpxchg8.rel r8=[r32],r33,ar.ccv\n        br.ret.sptk.many b0\n        .endp __TBB_machine_cmpswp8release#\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia64-gas/ia64_misc.s",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n\t// RSE backing store pointer retrieval\n    .section .text\n    .align 16\n    .proc __TBB_get_bsp#\n    .global __TBB_get_bsp#\n__TBB_get_bsp:\n        mov r8=ar.bsp\n        br.ret.sptk.many b0\n    .endp __TBB_get_bsp#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_load8_relaxed#\n    .global __TBB_machine_load8_relaxed#\n__TBB_machine_load8_relaxed:\n        ld8 r8=[r32]\n        br.ret.sptk.many b0\n    .endp __TBB_machine_load8_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_store8_relaxed#\n    .global __TBB_machine_store8_relaxed#\n__TBB_machine_store8_relaxed:\n        st8 [r32]=r33\n        br.ret.sptk.many b0\n    .endp __TBB_machine_store8_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_load4_relaxed#\n    .global __TBB_machine_load4_relaxed#\n__TBB_machine_load4_relaxed:\n        ld4 r8=[r32]\n        br.ret.sptk.many b0\n    .endp __TBB_machine_load4_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_store4_relaxed#\n    .global __TBB_machine_store4_relaxed#\n__TBB_machine_store4_relaxed:\n        st4 [r32]=r33\n        br.ret.sptk.many b0\n    .endp __TBB_machine_store4_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_load2_relaxed#\n    .global __TBB_machine_load2_relaxed#\n__TBB_machine_load2_relaxed:\n        ld2 r8=[r32]\n        br.ret.sptk.many b0\n    .endp __TBB_machine_load2_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_store2_relaxed#\n    .global __TBB_machine_store2_relaxed#\n__TBB_machine_store2_relaxed:\n        st2 [r32]=r33\n        br.ret.sptk.many b0\n    .endp __TBB_machine_store2_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_load1_relaxed#\n    .global __TBB_machine_load1_relaxed#\n__TBB_machine_load1_relaxed:\n        ld1 r8=[r32]\n        br.ret.sptk.many b0\n    .endp __TBB_machine_load1_relaxed#\n\n    .section .text\n    .align 16\n    .proc __TBB_machine_store1_relaxed#\n    .global __TBB_machine_store1_relaxed#\n__TBB_machine_store1_relaxed:\n        st1 [r32]=r33\n        br.ret.sptk.many b0\n    .endp __TBB_machine_store1_relaxed#\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia64-gas/lock_byte.s",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n\t// Support for class TinyLock\n\t.section .text\n\t.align 16\n\t// unsigned int __TBB_machine_trylockbyte( byte& flag );\n\t// r32 = address of flag \n\t.proc  __TBB_machine_trylockbyte#\n\t.global __TBB_machine_trylockbyte#\nADDRESS_OF_FLAG=r32\nRETCODE=r8\nFLAG=r9\nBUSY=r10\nSCRATCH=r11\n__TBB_machine_trylockbyte:\n        ld1.acq FLAG=[ADDRESS_OF_FLAG]\n        mov BUSY=1\n        mov RETCODE=0\n;;\n        cmp.ne p6,p0=0,FLAG\n        mov ar.ccv=r0\n(p6)    br.ret.sptk.many b0\n;;\n        cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv  // Try to acquire lock\n;;\n        cmp.eq p6,p0=0,SCRATCH\n;;\n(p6)    mov RETCODE=1\n   \tbr.ret.sptk.many b0\t\n\t.endp __TBB_machine_trylockbyte#\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia64-gas/log2.s",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n\t.section .text\n\t.align 16\n\t// unsigned long __TBB_machine_lg( unsigned long x );\n\t// r32 = x\n\t.proc  __TBB_machine_lg#\n\t.global __TBB_machine_lg#\n__TBB_machine_lg:\n        shr r16=r32,1\t// .x\n;;\n        shr r17=r32,2\t// ..x\n\tor r32=r32,r16\t// xx\n;;\n\tshr r16=r32,3\t// ...xx\n\tor r32=r32,r17  // xxx\n;;\n\tshr r17=r32,5\t// .....xxx\n\tor r32=r32,r16  // xxxxx\n;;\n\tshr r16=r32,8\t// ........xxxxx\n\tor r32=r32,r17\t// xxxxxxxx\n;;\n\tshr r17=r32,13\n\tor r32=r32,r16\t// 13x\n;;\n\tshr r16=r32,21\n\tor r32=r32,r17\t// 21x\n;;\n\tshr r17=r32,34  \n\tor r32=r32,r16\t// 34x\n;;\n\tshr r16=r32,55\n\tor r32=r32,r17  // 55x\n;;\n\tor r32=r32,r16  // 64x\n;;\n\tpopcnt r8=r32\n;;\n\tadd r8=-1,r8\n   \tbr.ret.sptk.many b0\t\n\t.endp __TBB_machine_lg#\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ia64-gas/pause.s",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n\t.section .text\n\t.align 16\n\t// void __TBB_machine_pause( long count );\n\t// r32 = count\n\t.proc  __TBB_machine_pause#\n\t.global __TBB_machine_pause#\ncount = r32\n__TBB_machine_pause:\n        hint.m 0\n\tadd count=-1,count\n;;\n\tcmp.eq p6,p7=0,count\n(p7)\tbr.cond.dpnt __TBB_machine_pause\n(p6)   \tbr.ret.sptk.many b0\t\n\t.endp __TBB_machine_pause#\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/ibm_aix51/atomic_support.c",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include <stdint.h>\n#include <sys/atomic_op.h>\n\n/* This file must be compiled with gcc.  The IBM compiler doesn't seem to\n   support inline assembly statements (October 2007). */\n\n#ifdef __GNUC__\n\nint32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand) { \n    __asm__ __volatile__ (\"sync\\n\");  /* memory release operation */\n    compare_and_swap ((atomic_p) ptr, &comparand, value);\n    __asm__ __volatile__ (\"isync\\n\");  /* memory acquire operation */\n    return comparand;\n}\n\nint64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand) { \n    __asm__ __volatile__ (\"sync\\n\");  /* memory release operation */\n    compare_and_swaplp ((atomic_l) ptr, &comparand, value);\n    __asm__ __volatile__ (\"isync\\n\");  /* memory acquire operation */\n    return comparand;\n}\n\nvoid __TBB_machine_flush () { \n    __asm__ __volatile__ (\"sync\\n\");\n}\n\nvoid __TBB_machine_lwsync () { \n    __asm__ __volatile__ (\"lwsync\\n\");\n}\n\nvoid __TBB_machine_isync () { \n    __asm__ __volatile__ (\"isync\\n\");\n}\n\n#endif /* __GNUC__ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/intel64-masm/atomic_support.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_fetchadd1\n__TBB_machine_fetchadd1:\n\tmov rax,rdx\n\tlock xadd [rcx],al\n\tret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_fetchstore1\n__TBB_machine_fetchstore1:\n\tmov rax,rdx\n\tlock xchg [rcx],al\n\tret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_cmpswp1\n__TBB_machine_cmpswp1:\n\tmov rax,r8\n\tlock cmpxchg [rcx],dl\n\tret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_fetchadd2\n__TBB_machine_fetchadd2:\n\tmov rax,rdx\n\tlock xadd [rcx],ax\n\tret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_fetchstore2\n__TBB_machine_fetchstore2:\n\tmov rax,rdx\n\tlock xchg [rcx],ax\n\tret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_cmpswp2\n__TBB_machine_cmpswp2:\n\tmov rax,r8\n\tlock cmpxchg [rcx],dx\n\tret\n.code\n        ALIGN 8\n        PUBLIC __TBB_machine_pause\n__TBB_machine_pause:\nL1:\n        dw 090f3H; pause\n        add ecx,-1\n        jne L1\n        ret\nend\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/intel64-masm/intel64_misc.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n.code\n\tALIGN 8\n\tPUBLIC __TBB_get_cpu_ctl_env\n__TBB_get_cpu_ctl_env:\n    stmxcsr [rcx]\n    fstcw   [rcx+4]\n\tret\n.code\n\tALIGN 8\n\tPUBLIC __TBB_set_cpu_ctl_env\n__TBB_set_cpu_ctl_env:\n    ldmxcsr [rcx]\n    fldcw   [rcx+4]\n\tret\nend\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/intel64-masm/itsx.asm",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n.code\n        ALIGN 8\n        PUBLIC __TBB_machine_try_lock_elided\n__TBB_machine_try_lock_elided:\n        xor  rax, rax\n        mov  al, 1\n        BYTE 0F2H\n        xchg al, byte ptr [rcx]\n        xor  al, 1\n        ret\n.code\n        ALIGN 8\n        PUBLIC __TBB_machine_unlock_elided\n__TBB_machine_unlock_elided:\n        BYTE 0F3H\n        mov  byte ptr [rcx], 0\n        ret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_begin_transaction\n__TBB_machine_begin_transaction:\n        mov  eax, -1\n        BYTE 0C7H\n        BYTE 0F8H\n        BYTE 000H\n        BYTE 000H\n        BYTE 000H\n        BYTE 000H\n        ret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_end_transaction\n__TBB_machine_end_transaction:\n        BYTE 00FH\n        BYTE 001H\n        BYTE 0D5H\n        ret\n.code \n\tALIGN 8\n\tPUBLIC __TBB_machine_transaction_conflict_abort\n__TBB_machine_transaction_conflict_abort:\n        BYTE 0C6H\n        BYTE 0F8H\n        BYTE 0FFH  ; 12.4.5 Abort argument: lock not free when tested\n        ret\n.code \n        ALIGN 8\n\tPUBLIC __TBB_machine_is_in_transaction\n__TBB_machine_is_in_transaction:\n        xor eax, eax\n        BYTE 00FH  ; _xtest sets or clears ZF\n        BYTE 001H\n        BYTE 0D6H\n        jz   rset\n        mov  al,1\nrset:\n        ret\nend\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_aggregator_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__aggregator_impl_H\n#define __TBB__aggregator_impl_H\n\n#include \"../atomic.h\"\n#if !__TBBMALLOC_BUILD\n#include \"../tbb_profiling.h\"\n#endif\n\nnamespace tbb {\nnamespace interface6 {\nnamespace internal {\n\nusing namespace tbb::internal;\n\n//! aggregated_operation base class\ntemplate <typename Derived>\nclass aggregated_operation {\n public:\n    uintptr_t status;\n    Derived *next;\n    aggregated_operation() : status(0), next(NULL) {}\n};\n\n//! Aggregator base class\n/** An aggregator for collecting operations coming from multiple sources and executing\n    them serially on a single thread.  operation_type must be derived from\n    aggregated_operation. The parameter handler_type is a functor that will be passed the\n    list of operations and is expected to handle each operation appropriately, setting the\n    status of each operation to non-zero.*/\ntemplate < typename operation_type >\nclass aggregator_generic {\npublic:\n    aggregator_generic() : handler_busy(false) { pending_operations = NULL; }\n\n    //! Place operation in list\n    /** Place operation in list and either handle list or wait for operation to\n        complete.\n        long_life_time specifies life time of an operation inserting in an aggregator.\n        \"Long\" (long_life_time == true) life time operation can be accessed\n        even after executing it.\n        \"Short\" (long_life_time == false) life time operations can be destroyed\n        during executing so any access to it after executing is invalid.*/\n    template < typename handler_type >\n    void execute(operation_type *op, handler_type &handle_operations, bool long_life_time = true) {\n        operation_type *res;\n        // op->status should be read before inserting the operation in the\n        // aggregator queue since it can become invalid after executing a\n        // handler (if the operation has 'short' life time.)\n        const uintptr_t status = op->status;\n\n        // ITT note: &(op->status) tag is used to cover accesses to this op node. This\n        // thread has created the operation, and now releases it so that the handler\n        // thread may handle the associated operation w/o triggering a race condition;\n        // thus this tag will be acquired just before the operation is handled in the\n        // handle_operations functor.\n        call_itt_notify(releasing, &(op->status));\n        // insert the operation in the queue.\n        do {\n            // ITT may flag the following line as a race; it is a false positive:\n            // This is an atomic read; we don't provide itt_hide_load_word for atomics\n            op->next = res = pending_operations; // NOT A RACE\n        } while (pending_operations.compare_and_swap(op, res) != res);\n        if (!res) { // first in the list; handle the operations.\n            // ITT note: &pending_operations tag covers access to the handler_busy flag,\n            // which this waiting handler thread will try to set before entering\n            // handle_operations.\n            call_itt_notify(acquired, &pending_operations);\n            start_handle_operations(handle_operations);\n            // The operation with 'short' life time can already be destroyed.\n            if (long_life_time)\n                __TBB_ASSERT(op->status, NULL);\n        }\n        // not first; wait for op to be ready.\n        else if (!status) { // operation is blocking here.\n            __TBB_ASSERT(long_life_time, \"The blocking operation cannot have 'short' life time. Since it can already be destroyed.\");\n            call_itt_notify(prepare, &(op->status));\n            spin_wait_while_eq(op->status, uintptr_t(0));\n            itt_load_word_with_acquire(op->status);\n        }\n    }\n\n private:\n    //! An atomically updated list (aka mailbox) of pending operations\n    atomic<operation_type *> pending_operations;\n    //! Controls thread access to handle_operations\n    uintptr_t handler_busy;\n\n    //! Trigger the handling of operations when the handler is free\n    template < typename handler_type >\n    void start_handle_operations( handler_type &handle_operations ) {\n        operation_type *op_list;\n\n        // ITT note: &handler_busy tag covers access to pending_operations as it is passed\n        // between active and waiting handlers.  Below, the waiting handler waits until\n        // the active handler releases, and the waiting handler acquires &handler_busy as\n        // it becomes the active_handler. The release point is at the end of this\n        // function, when all operations in pending_operations have been handled by the\n        // owner of this aggregator.\n        call_itt_notify(prepare, &handler_busy);\n        // get the handler_busy:\n        // only one thread can possibly spin here at a time\n        spin_wait_until_eq(handler_busy, uintptr_t(0));\n        call_itt_notify(acquired, &handler_busy);\n        // acquire fence not necessary here due to causality rule and surrounding atomics\n        __TBB_store_with_release(handler_busy, uintptr_t(1));\n\n        // ITT note: &pending_operations tag covers access to the handler_busy flag\n        // itself. Capturing the state of the pending_operations signifies that\n        // handler_busy has been set and a new active handler will now process that list's\n        // operations.\n        call_itt_notify(releasing, &pending_operations);\n        // grab pending_operations\n        op_list = pending_operations.fetch_and_store(NULL);\n\n        // handle all the operations\n        handle_operations(op_list);\n\n        // release the handler\n        itt_store_word_with_release(handler_busy, uintptr_t(0));\n    }\n};\n\ntemplate < typename handler_type, typename operation_type >\nclass aggregator : public aggregator_generic<operation_type> {\n    handler_type handle_operations;\npublic:\n    aggregator() {}\n    explicit aggregator(handler_type h) : handle_operations(h) {}\n\n    void initialize_handler(handler_type h) { handle_operations = h; }\n\n    void execute(operation_type *op) {\n        aggregator_generic<operation_type>::execute(op, handle_operations);\n    }\n};\n\n// the most-compatible friend declaration (vs, gcc, icc) is\n//    template<class U, class V> friend class aggregating_functor;\ntemplate<typename aggregating_class, typename operation_list>\nclass aggregating_functor {\n    aggregating_class *fi;\npublic:\n    aggregating_functor() {}\n    aggregating_functor(aggregating_class *fi_) : fi(fi_) {}\n    void operator()(operation_list* op_list) { fi->handle_operations(op_list); }\n};\n\n} // namespace internal\n} // namespace interface6\n\nnamespace internal {\n    using interface6::internal::aggregated_operation;\n    using interface6::internal::aggregator_generic;\n    using interface6::internal::aggregator;\n    using interface6::internal::aggregating_functor;\n} // namespace internal\n\n} // namespace tbb\n\n#endif  // __TBB__aggregator_impl_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_concurrent_queue_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__concurrent_queue_impl_H\n#define __TBB__concurrent_queue_impl_H\n\n#ifndef __TBB_concurrent_queue_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"../tbb_stddef.h\"\n#include \"../tbb_machine.h\"\n#include \"../atomic.h\"\n#include \"../spin_mutex.h\"\n#include \"../cache_aligned_allocator.h\"\n#include \"../tbb_exception.h\"\n#include \"../tbb_profiling.h\"\n#include <new>\n#include <utility>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <iterator>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nnamespace tbb {\n\n#if !__TBB_TEMPLATE_FRIENDS_BROKEN\n\n// forward declaration\nnamespace strict_ppl {\ntemplate<typename T, typename A> class concurrent_queue;\n}\n\ntemplate<typename T, typename A> class concurrent_bounded_queue;\n\n#endif\n\n//! For internal use only.\nnamespace strict_ppl {\n\n//! @cond INTERNAL\nnamespace internal {\n\nusing namespace tbb::internal;\n\ntypedef size_t ticket;\n\ntemplate<typename T> class micro_queue ;\ntemplate<typename T> class micro_queue_pop_finalizer ;\ntemplate<typename T> class concurrent_queue_base_v3;\ntemplate<typename T> struct concurrent_queue_rep;\n\n//! parts of concurrent_queue_rep that do not have references to micro_queue\n/**\n * For internal use only.\n */\nstruct concurrent_queue_rep_base : no_copy {\n    template<typename T> friend class micro_queue;\n    template<typename T> friend class concurrent_queue_base_v3;\n\nprotected:\n    //! Approximately n_queue/golden ratio\n    static const size_t phi = 3;\n\npublic:\n    // must be power of 2\n    static const size_t n_queue = 8;\n\n    //! Prefix on a page\n    struct page {\n        page* next;\n        uintptr_t mask;\n    };\n\n    atomic<ticket> head_counter;\n    char pad1[NFS_MaxLineSize-sizeof(atomic<ticket>)];\n    atomic<ticket> tail_counter;\n    char pad2[NFS_MaxLineSize-sizeof(atomic<ticket>)];\n\n    //! Always a power of 2\n    size_t items_per_page;\n\n    //! Size of an item\n    size_t item_size;\n\n    //! number of invalid entries in the queue\n    atomic<size_t> n_invalid_entries;\n\n    char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic<size_t>)];\n} ;\n\ninline bool is_valid_page(const concurrent_queue_rep_base::page* p) {\n    return uintptr_t(p)>1;\n}\n\n//! Abstract class to define interface for page allocation/deallocation\n/**\n * For internal use only.\n */\nclass concurrent_queue_page_allocator\n{\n    template<typename T> friend class micro_queue ;\n    template<typename T> friend class micro_queue_pop_finalizer ;\nprotected:\n    virtual ~concurrent_queue_page_allocator() {}\nprivate:\n    virtual concurrent_queue_rep_base::page* allocate_page() = 0;\n    virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0;\n} ;\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n// unary minus operator applied to unsigned type, result still unsigned\n#pragma warning( push )\n#pragma warning( disable: 4146 )\n#endif\n\n//! A queue using simple locking.\n/** For efficiency, this class has no constructor.\n    The caller is expected to zero-initialize it. */\ntemplate<typename T>\nclass micro_queue : no_copy {\npublic:\n    typedef void (*item_constructor_t)(T* location, const void* src);\nprivate:\n    typedef concurrent_queue_rep_base::page page;\n\n    //! Class used to ensure exception-safety of method \"pop\"\n    class destroyer: no_copy {\n        T& my_value;\n    public:\n        destroyer( T& value ) : my_value(value) {}\n        ~destroyer() {my_value.~T();}\n    };\n\n    void copy_item( page& dst, size_t dindex, const void* src, item_constructor_t construct_item ) {\n        construct_item( &get_ref(dst, dindex), src );\n    }\n\n    void copy_item( page& dst, size_t dindex, const page& src, size_t sindex,\n        item_constructor_t construct_item )\n    {\n        T& src_item = get_ref( const_cast<page&>(src), sindex );\n        construct_item( &get_ref(dst, dindex), static_cast<const void*>(&src_item) );\n    }\n\n    void assign_and_destroy_item( void* dst, page& src, size_t index ) {\n        T& from = get_ref(src,index);\n        destroyer d(from);\n        *static_cast<T*>(dst) = tbb::internal::move( from );\n    }\n\n    void spin_wait_until_my_turn( atomic<ticket>& counter, ticket k, concurrent_queue_rep_base& rb ) const ;\n\npublic:\n    friend class micro_queue_pop_finalizer<T>;\n\n    struct padded_page: page {\n        //! Not defined anywhere - exists to quiet warnings.\n        padded_page();\n        //! Not defined anywhere - exists to quiet warnings.\n        void operator=( const padded_page& );\n        //! Must be last field.\n        T last;\n    };\n\n    static T& get_ref( page& p, size_t index ) {\n        return (&static_cast<padded_page*>(static_cast<void*>(&p))->last)[index];\n    }\n\n    atomic<page*> head_page;\n    atomic<ticket> head_counter;\n\n    atomic<page*> tail_page;\n    atomic<ticket> tail_counter;\n\n    spin_mutex page_mutex;\n\n    void push( const void* item, ticket k, concurrent_queue_base_v3<T>& base,\n        item_constructor_t construct_item ) ;\n\n    bool pop( void* dst, ticket k, concurrent_queue_base_v3<T>& base ) ;\n\n    micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3<T>& base,\n        item_constructor_t construct_item ) ;\n\n    page* make_copy( concurrent_queue_base_v3<T>& base, const page* src_page, size_t begin_in_page,\n        size_t end_in_page, ticket& g_index, item_constructor_t construct_item ) ;\n\n    void invalidate_page_and_rethrow( ticket k ) ;\n};\n\ntemplate<typename T>\nvoid micro_queue<T>::spin_wait_until_my_turn( atomic<ticket>& counter, ticket k, concurrent_queue_rep_base& rb ) const {\n    for( atomic_backoff b(true);;b.pause() ) {\n        ticket c = counter;\n        if( c==k ) return;\n        else if( c&1 ) {\n            ++rb.n_invalid_entries;\n            throw_exception( eid_bad_last_alloc );\n        }\n    }\n}\n\ntemplate<typename T>\nvoid micro_queue<T>::push( const void* item, ticket k, concurrent_queue_base_v3<T>& base,\n    item_constructor_t construct_item )\n{\n    k &= -concurrent_queue_rep_base::n_queue;\n    page* p = NULL;\n    size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page);\n    if( !index ) {\n        __TBB_TRY {\n            concurrent_queue_page_allocator& pa = base;\n            p = pa.allocate_page();\n        } __TBB_CATCH (...) {\n            ++base.my_rep->n_invalid_entries;\n            invalidate_page_and_rethrow( k );\n        }\n        p->mask = 0;\n        p->next = NULL;\n    }\n\n    if( tail_counter != k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep );\n    call_itt_notify(acquired, &tail_counter);\n\n    if( p ) {\n        spin_mutex::scoped_lock lock( page_mutex );\n        page* q = tail_page;\n        if( is_valid_page(q) )\n            q->next = p;\n        else\n            head_page = p;\n        tail_page = p;\n    } else {\n        p = tail_page;\n    }\n\n    __TBB_TRY {\n        copy_item( *p, index, item, construct_item );\n        // If no exception was thrown, mark item as present.\n        itt_hide_store_word(p->mask,  p->mask | uintptr_t(1)<<index);\n        call_itt_notify(releasing, &tail_counter);\n        tail_counter += concurrent_queue_rep_base::n_queue;\n    } __TBB_CATCH (...) {\n        ++base.my_rep->n_invalid_entries;\n        call_itt_notify(releasing, &tail_counter);\n        tail_counter += concurrent_queue_rep_base::n_queue;\n        __TBB_RETHROW();\n    }\n}\n\ntemplate<typename T>\nbool micro_queue<T>::pop( void* dst, ticket k, concurrent_queue_base_v3<T>& base ) {\n    k &= -concurrent_queue_rep_base::n_queue;\n    if( head_counter!=k ) spin_wait_until_eq( head_counter, k );\n    call_itt_notify(acquired, &head_counter);\n    if( tail_counter==k ) spin_wait_while_eq( tail_counter, k );\n    call_itt_notify(acquired, &tail_counter);\n    page& p = *head_page;\n    __TBB_ASSERT( &p, NULL );\n    size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );\n    bool success = false;\n    {\n        micro_queue_pop_finalizer<T> finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL );\n        if( p.mask & uintptr_t(1)<<index ) {\n            success = true;\n            assign_and_destroy_item( dst, p, index );\n        } else {\n            --base.my_rep->n_invalid_entries;\n        }\n    }\n    return success;\n}\n\ntemplate<typename T>\nmicro_queue<T>& micro_queue<T>::assign( const micro_queue<T>& src, concurrent_queue_base_v3<T>& base,\n    item_constructor_t construct_item )\n{\n    head_counter = src.head_counter;\n    tail_counter = src.tail_counter;\n\n    const page* srcp = src.head_page;\n    if( is_valid_page(srcp) ) {\n        ticket g_index = head_counter;\n        __TBB_TRY {\n            size_t n_items  = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue;\n            size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );\n            size_t end_in_first_page = (index+n_items<base.my_rep->items_per_page)?(index+n_items):base.my_rep->items_per_page;\n\n            head_page = make_copy( base, srcp, index, end_in_first_page, g_index, construct_item );\n            page* cur_page = head_page;\n\n            if( srcp != src.tail_page ) {\n                for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) {\n                    cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index, construct_item );\n                    cur_page = cur_page->next;\n                }\n\n                __TBB_ASSERT( srcp==src.tail_page, NULL );\n                size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );\n                if( last_index==0 ) last_index = base.my_rep->items_per_page;\n\n                cur_page->next = make_copy( base, srcp, 0, last_index, g_index, construct_item );\n                cur_page = cur_page->next;\n            }\n            tail_page = cur_page;\n        } __TBB_CATCH (...) {\n            invalidate_page_and_rethrow( g_index );\n        }\n    } else {\n        head_page = tail_page = NULL;\n    }\n    return *this;\n}\n\ntemplate<typename T>\nvoid micro_queue<T>::invalidate_page_and_rethrow( ticket k ) {\n    // Append an invalid page at address 1 so that no more pushes are allowed.\n    page* invalid_page = (page*)uintptr_t(1);\n    {\n        spin_mutex::scoped_lock lock( page_mutex );\n        itt_store_word_with_release(tail_counter, k+concurrent_queue_rep_base::n_queue+1);\n        page* q = tail_page;\n        if( is_valid_page(q) )\n            q->next = invalid_page;\n        else\n            head_page = invalid_page;\n        tail_page = invalid_page;\n    }\n    __TBB_RETHROW();\n}\n\ntemplate<typename T>\nconcurrent_queue_rep_base::page* micro_queue<T>::make_copy( concurrent_queue_base_v3<T>& base,\n    const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page,\n    ticket& g_index, item_constructor_t construct_item )\n{\n    concurrent_queue_page_allocator& pa = base;\n    page* new_page = pa.allocate_page();\n    new_page->next = NULL;\n    new_page->mask = src_page->mask;\n    for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )\n        if( new_page->mask & uintptr_t(1)<<begin_in_page )\n            copy_item( *new_page, begin_in_page, *src_page, begin_in_page, construct_item );\n    return new_page;\n}\n\ntemplate<typename T>\nclass micro_queue_pop_finalizer: no_copy {\n    typedef concurrent_queue_rep_base::page page;\n    ticket my_ticket;\n    micro_queue<T>& my_queue;\n    page* my_page;\n    concurrent_queue_page_allocator& allocator;\npublic:\n    micro_queue_pop_finalizer( micro_queue<T>& queue, concurrent_queue_base_v3<T>& b, ticket k, page* p ) :\n        my_ticket(k), my_queue(queue), my_page(p), allocator(b)\n    {}\n    ~micro_queue_pop_finalizer() ;\n};\n\ntemplate<typename T>\nmicro_queue_pop_finalizer<T>::~micro_queue_pop_finalizer() {\n    page* p = my_page;\n    if( is_valid_page(p) ) {\n        spin_mutex::scoped_lock lock( my_queue.page_mutex );\n        page* q = p->next;\n        my_queue.head_page = q;\n        if( !is_valid_page(q) ) {\n            my_queue.tail_page = NULL;\n        }\n    }\n    itt_store_word_with_release(my_queue.head_counter, my_ticket);\n    if( is_valid_page(p) ) {\n        allocator.deallocate_page( p );\n    }\n}\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n#pragma warning( pop )\n#endif // warning 4146 is back\n\ntemplate<typename T> class concurrent_queue_iterator_rep ;\ntemplate<typename T> class concurrent_queue_iterator_base_v3;\n\n//! representation of concurrent_queue_base\n/**\n * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue<T>'s\n */\ntemplate<typename T>\nstruct concurrent_queue_rep : public concurrent_queue_rep_base {\n    micro_queue<T> array[n_queue];\n\n    //! Map ticket to an array index\n    static size_t index( ticket k ) {\n        return k*phi%n_queue;\n    }\n\n    micro_queue<T>& choose( ticket k ) {\n        // The formula here approximates LRU in a cache-oblivious way.\n        return array[index(k)];\n    }\n};\n\n//! base class of concurrent_queue\n/**\n * The class implements the interface defined by concurrent_queue_page_allocator\n * and has a pointer to an instance of concurrent_queue_rep.\n */\ntemplate<typename T>\nclass concurrent_queue_base_v3: public concurrent_queue_page_allocator {\n    //! Internal representation\n    concurrent_queue_rep<T>* my_rep;\n\n    friend struct concurrent_queue_rep<T>;\n    friend class micro_queue<T>;\n    friend class concurrent_queue_iterator_rep<T>;\n    friend class concurrent_queue_iterator_base_v3<T>;\n\nprotected:\n    typedef typename concurrent_queue_rep<T>::page page;\n\nprivate:\n    typedef typename micro_queue<T>::padded_page padded_page;\n    typedef typename micro_queue<T>::item_constructor_t item_constructor_t;\n\n    /* override */ virtual page *allocate_page() {\n        concurrent_queue_rep<T>& r = *my_rep;\n        size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T);\n        return reinterpret_cast<page*>(allocate_block ( n ));\n    }\n\n    /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) {\n        concurrent_queue_rep<T>& r = *my_rep;\n        size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T);\n        deallocate_block( reinterpret_cast<void*>(p), n );\n    }\n\n    //! custom allocator\n    virtual void *allocate_block( size_t n ) = 0;\n\n    //! custom de-allocator\n    virtual void deallocate_block( void *p, size_t n ) = 0;\n\nprotected:\n    concurrent_queue_base_v3();\n\n    /* override */ virtual ~concurrent_queue_base_v3() {\n#if TBB_USE_ASSERT\n        size_t nq = my_rep->n_queue;\n        for( size_t i=0; i<nq; i++ )\n            __TBB_ASSERT( my_rep->array[i].tail_page==NULL, \"pages were not freed properly\" );\n#endif /* TBB_USE_ASSERT */\n        cache_aligned_allocator<concurrent_queue_rep<T> >().deallocate(my_rep,1);\n    }\n\n    //! Enqueue item at tail of queue\n    void internal_push( const void* src, item_constructor_t construct_item ) {\n         concurrent_queue_rep<T>& r = *my_rep;\n         ticket k = r.tail_counter++;\n         r.choose(k).push( src, k, *this, construct_item );\n    }\n\n    //! Attempt to dequeue item from queue.\n    /** NULL if there was no item to dequeue. */\n    bool internal_try_pop( void* dst ) ;\n\n    //! Get size of queue; result may be invalid if queue is modified concurrently\n    size_t internal_size() const ;\n\n    //! check if the queue is empty; thread safe\n    bool internal_empty() const ;\n\n    //! free any remaining pages\n    /* note that the name may be misleading, but it remains so due to a historical accident. */\n    void internal_finish_clear() ;\n\n    //! Obsolete\n    void internal_throw_exception() const {\n        throw_exception( eid_bad_alloc );\n    }\n\n    //! copy or move internal representation\n    void assign( const concurrent_queue_base_v3& src, item_constructor_t construct_item ) ;\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! swap internal representation\n    void internal_swap( concurrent_queue_base_v3& src ) {\n        std::swap( my_rep, src.my_rep );\n    }\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n};\n\ntemplate<typename T>\nconcurrent_queue_base_v3<T>::concurrent_queue_base_v3() {\n    const size_t item_size = sizeof(T);\n    my_rep = cache_aligned_allocator<concurrent_queue_rep<T> >().allocate(1);\n    __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, \"alignment error\" );\n    __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, \"alignment error\" );\n    memset(my_rep,0,sizeof(concurrent_queue_rep<T>));\n    my_rep->item_size = item_size;\n    my_rep->items_per_page = item_size<=  8 ? 32 :\n                             item_size<= 16 ? 16 :\n                             item_size<= 32 ?  8 :\n                             item_size<= 64 ?  4 :\n                             item_size<=128 ?  2 :\n                             1;\n}\n\ntemplate<typename T>\nbool concurrent_queue_base_v3<T>::internal_try_pop( void* dst ) {\n    concurrent_queue_rep<T>& r = *my_rep;\n    ticket k;\n    do {\n        k = r.head_counter;\n        for(;;) {\n            if( (ptrdiff_t)(r.tail_counter-k)<=0 ) {\n                // Queue is empty\n                return false;\n            }\n            // Queue had item with ticket k when we looked.  Attempt to get that item.\n            ticket tk=k;\n#if defined(_MSC_VER) && defined(_Wp64)\n    #pragma warning (push)\n    #pragma warning (disable: 4267)\n#endif\n            k = r.head_counter.compare_and_swap( tk+1, tk );\n#if defined(_MSC_VER) && defined(_Wp64)\n    #pragma warning (pop)\n#endif\n            if( k==tk )\n                break;\n            // Another thread snatched the item, retry.\n        }\n    } while( !r.choose( k ).pop( dst, k, *this ) );\n    return true;\n}\n\ntemplate<typename T>\nsize_t concurrent_queue_base_v3<T>::internal_size() const {\n    concurrent_queue_rep<T>& r = *my_rep;\n    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );\n    ticket hc = r.head_counter;\n    size_t nie = r.n_invalid_entries;\n    ticket tc = r.tail_counter;\n    __TBB_ASSERT( hc!=tc || !nie, NULL );\n    ptrdiff_t sz = tc-hc-nie;\n    return sz<0 ? 0 :  size_t(sz);\n}\n\ntemplate<typename T>\nbool concurrent_queue_base_v3<T>::internal_empty() const {\n    concurrent_queue_rep<T>& r = *my_rep;\n    ticket tc = r.tail_counter;\n    ticket hc = r.head_counter;\n    // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.\n    return tc==r.tail_counter && tc==hc+r.n_invalid_entries ;\n}\n\ntemplate<typename T>\nvoid concurrent_queue_base_v3<T>::internal_finish_clear() {\n    concurrent_queue_rep<T>& r = *my_rep;\n    size_t nq = r.n_queue;\n    for( size_t i=0; i<nq; ++i ) {\n        page* tp = r.array[i].tail_page;\n        if( is_valid_page(tp) ) {\n            __TBB_ASSERT( r.array[i].head_page==tp, \"at most one page should remain\" );\n            deallocate_page( tp );\n            r.array[i].tail_page = NULL;\n        } else\n            __TBB_ASSERT( !is_valid_page(r.array[i].head_page), \"head page pointer corrupt?\" );\n    }\n}\n\ntemplate<typename T>\nvoid concurrent_queue_base_v3<T>::assign( const concurrent_queue_base_v3& src,\n    item_constructor_t construct_item )\n{\n    concurrent_queue_rep<T>& r = *my_rep;\n    r.items_per_page = src.my_rep->items_per_page;\n\n    // copy concurrent_queue_rep data\n    r.head_counter = src.my_rep->head_counter;\n    r.tail_counter = src.my_rep->tail_counter;\n    r.n_invalid_entries = src.my_rep->n_invalid_entries;\n\n    // copy or move micro_queues\n    for( size_t i = 0; i < r.n_queue; ++i )\n        r.array[i].assign( src.my_rep->array[i], *this, construct_item);\n\n    __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter,\n            \"the source concurrent queue should not be concurrently modified.\" );\n}\n\ntemplate<typename Container, typename Value> class concurrent_queue_iterator;\n\ntemplate<typename T>\nclass concurrent_queue_iterator_rep: no_assign {\n    typedef typename micro_queue<T>::padded_page padded_page;\npublic:\n    ticket head_counter;\n    const concurrent_queue_base_v3<T>& my_queue;\n    typename concurrent_queue_base_v3<T>::page* array[concurrent_queue_rep<T>::n_queue];\n    concurrent_queue_iterator_rep( const concurrent_queue_base_v3<T>& queue ) :\n        head_counter(queue.my_rep->head_counter),\n        my_queue(queue)\n    {\n        for( size_t k=0; k<concurrent_queue_rep<T>::n_queue; ++k )\n            array[k] = queue.my_rep->array[k].head_page;\n    }\n\n    //! Set item to point to kth element.  Return true if at end of queue or item is marked valid; false otherwise.\n    bool get_item( T*& item, size_t k ) ;\n};\n\ntemplate<typename T>\nbool concurrent_queue_iterator_rep<T>::get_item( T*& item, size_t k ) {\n    if( k==my_queue.my_rep->tail_counter ) {\n        item = NULL;\n        return true;\n    } else {\n        typename concurrent_queue_base_v3<T>::page* p = array[concurrent_queue_rep<T>::index(k)];\n        __TBB_ASSERT(p,NULL);\n        size_t i = modulo_power_of_two( k/concurrent_queue_rep<T>::n_queue, my_queue.my_rep->items_per_page );\n        item = &micro_queue<T>::get_ref(*p,i);\n        return (p->mask & uintptr_t(1)<<i)!=0;\n    }\n}\n\n//! Constness-independent portion of concurrent_queue_iterator.\n/** @ingroup containers */\ntemplate<typename Value>\nclass concurrent_queue_iterator_base_v3 : no_assign {\n    //! Represents concurrent_queue over which we are iterating.\n    /** NULL if one past last element in queue. */\n    concurrent_queue_iterator_rep<Value>* my_rep;\n\n    template<typename C, typename T, typename U>\n    friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );\n\n    template<typename C, typename T, typename U>\n    friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );\nprotected:\n    //! Pointer to current item\n    Value* my_item;\n\n    //! Default constructor\n    concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {\n#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN\n        __TBB_compiler_fence();\n#endif\n    }\n\n    //! Copy constructor\n    concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i )\n    : no_assign(), my_rep(NULL), my_item(NULL) {\n        assign(i);\n    }\n\n    //! Construct iterator pointing to head of queue.\n    concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value>& queue ) ;\n\n    //! Assignment\n    void assign( const concurrent_queue_iterator_base_v3<Value>& other ) ;\n\n    //! Advance iterator one step towards tail of queue.\n    void advance() ;\n\n    //! Destructor\n    ~concurrent_queue_iterator_base_v3() {\n        cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().deallocate(my_rep, 1);\n        my_rep = NULL;\n    }\n};\n\ntemplate<typename Value>\nconcurrent_queue_iterator_base_v3<Value>::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value>& queue ) {\n    my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().allocate(1);\n    new( my_rep ) concurrent_queue_iterator_rep<Value>(queue);\n    size_t k = my_rep->head_counter;\n    if( !my_rep->get_item(my_item, k) ) advance();\n}\n\ntemplate<typename Value>\nvoid concurrent_queue_iterator_base_v3<Value>::assign( const concurrent_queue_iterator_base_v3<Value>& other ) {\n    if( my_rep!=other.my_rep ) {\n        if( my_rep ) {\n            cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().deallocate(my_rep, 1);\n            my_rep = NULL;\n        }\n        if( other.my_rep ) {\n            my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().allocate(1);\n            new( my_rep ) concurrent_queue_iterator_rep<Value>( *other.my_rep );\n        }\n    }\n    my_item = other.my_item;\n}\n\ntemplate<typename Value>\nvoid concurrent_queue_iterator_base_v3<Value>::advance() {\n    __TBB_ASSERT( my_item, \"attempt to increment iterator past end of queue\" );\n    size_t k = my_rep->head_counter;\n    const concurrent_queue_base_v3<Value>& queue = my_rep->my_queue;\n#if TBB_USE_ASSERT\n    Value* tmp;\n    my_rep->get_item(tmp,k);\n    __TBB_ASSERT( my_item==tmp, NULL );\n#endif /* TBB_USE_ASSERT */\n    size_t i = modulo_power_of_two( k/concurrent_queue_rep<Value>::n_queue, queue.my_rep->items_per_page );\n    if( i==queue.my_rep->items_per_page-1 ) {\n        typename concurrent_queue_base_v3<Value>::page*& root = my_rep->array[concurrent_queue_rep<Value>::index(k)];\n        root = root->next;\n    }\n    // advance k\n    my_rep->head_counter = ++k;\n    if( !my_rep->get_item(my_item, k) ) advance();\n}\n\n//! Similar to C++0x std::remove_cv\n/** \"tbb_\" prefix added to avoid overload confusion with C++0x implementations. */\ntemplate<typename T> struct tbb_remove_cv {typedef T type;};\ntemplate<typename T> struct tbb_remove_cv<const T> {typedef T type;};\ntemplate<typename T> struct tbb_remove_cv<volatile T> {typedef T type;};\ntemplate<typename T> struct tbb_remove_cv<const volatile T> {typedef T type;};\n\n//! Meets requirements of a forward iterator for STL.\n/** Value is either the T or const T type of the container.\n    @ingroup containers */\ntemplate<typename Container, typename Value>\nclass concurrent_queue_iterator: public concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>,\n        public std::iterator<std::forward_iterator_tag,Value> {\n#if !__TBB_TEMPLATE_FRIENDS_BROKEN\n    template<typename T, class A>\n    friend class ::tbb::strict_ppl::concurrent_queue;\n#else\npublic: // workaround for MSVC\n#endif\n    //! Construct iterator pointing to head of queue.\n    concurrent_queue_iterator( const concurrent_queue_base_v3<Value>& queue ) :\n        concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>(queue)\n    {\n    }\n\npublic:\n    concurrent_queue_iterator() {}\n\n    concurrent_queue_iterator( const concurrent_queue_iterator<Container,typename Container::value_type>& other ) :\n        concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>(other)\n    {}\n\n    //! Iterator assignment\n    concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) {\n        this->assign(other);\n        return *this;\n    }\n\n    //! Reference to current item\n    Value& operator*() const {\n        return *static_cast<Value*>(this->my_item);\n    }\n\n    Value* operator->() const {return &operator*();}\n\n    //! Advance to next item in queue\n    concurrent_queue_iterator& operator++() {\n        this->advance();\n        return *this;\n    }\n\n    //! Post increment\n    Value* operator++(int) {\n        Value* result = &operator*();\n        operator++();\n        return result;\n    }\n}; // concurrent_queue_iterator\n\n\ntemplate<typename C, typename T, typename U>\nbool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {\n    return i.my_item==j.my_item;\n}\n\ntemplate<typename C, typename T, typename U>\nbool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {\n    return i.my_item!=j.my_item;\n}\n\n} // namespace internal\n\n//! @endcond\n\n} // namespace strict_ppl\n\n//! @cond INTERNAL\nnamespace internal {\n\nclass concurrent_queue_rep;\nclass concurrent_queue_iterator_rep;\nclass concurrent_queue_iterator_base_v3;\ntemplate<typename Container, typename Value> class concurrent_queue_iterator;\n\n//! For internal use only.\n/** Type-independent portion of concurrent_queue.\n    @ingroup containers */\nclass concurrent_queue_base_v3: no_copy {\n    //! Internal representation\n    concurrent_queue_rep* my_rep;\n\n    friend class concurrent_queue_rep;\n    friend struct micro_queue;\n    friend class micro_queue_pop_finalizer;\n    friend class concurrent_queue_iterator_rep;\n    friend class concurrent_queue_iterator_base_v3;\nprotected:\n    //! Prefix on a page\n    struct page {\n        page* next;\n        uintptr_t mask;\n    };\n\n    //! Capacity of the queue\n    ptrdiff_t my_capacity;\n\n    //! Always a power of 2\n    size_t items_per_page;\n\n    //! Size of an item\n    size_t item_size;\n\n    enum copy_specifics { copy, move };\n\n#if __TBB_PROTECTED_NESTED_CLASS_BROKEN\npublic:\n#endif\n    template<typename T>\n    struct padded_page: page {\n        //! Not defined anywhere - exists to quiet warnings.\n        padded_page();\n        //! Not defined anywhere - exists to quiet warnings.\n        void operator=( const padded_page& );\n        //! Must be last field.\n        T last;\n    };\n\nprivate:\n    virtual void copy_item( page& dst, size_t index, const void* src ) = 0;\n    virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0;\nprotected:\n    __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size );\n    virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3();\n\n    //! Enqueue item at tail of queue using copy operation\n    void __TBB_EXPORTED_METHOD internal_push( const void* src );\n\n    //! Dequeue item from head of queue\n    void __TBB_EXPORTED_METHOD internal_pop( void* dst );\n\n    //! Abort all pending queue operations\n    void __TBB_EXPORTED_METHOD internal_abort();\n\n    //! Attempt to enqueue item onto queue using copy operation\n    bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src );\n\n    //! Attempt to dequeue item from queue.\n    /** NULL if there was no item to dequeue. */\n    bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst );\n\n    //! Get size of queue\n    ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const;\n\n    //! Check if the queue is emtpy\n    bool __TBB_EXPORTED_METHOD internal_empty() const;\n\n    //! Set the queue capacity\n    void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size );\n\n    //! custom allocator\n    virtual page *allocate_page() = 0;\n\n    //! custom de-allocator\n    virtual void deallocate_page( page *p ) = 0;\n\n    //! free any remaining pages\n    /* note that the name may be misleading, but it remains so due to a historical accident. */\n    void __TBB_EXPORTED_METHOD internal_finish_clear() ;\n\n    //! throw an exception\n    void __TBB_EXPORTED_METHOD internal_throw_exception() const;\n\n    //! copy internal representation\n    void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ;\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //! swap queues\n    void internal_swap( concurrent_queue_base_v3& src ) {\n        std::swap( my_capacity, src.my_capacity );\n        std::swap( items_per_page, src.items_per_page );\n        std::swap( item_size, src.item_size );\n        std::swap( my_rep, src.my_rep );\n    }\n#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */\n\n    //! Enqueues item at tail of queue using specified operation (copy or move)\n    void internal_insert_item( const void* src, copy_specifics op_type );\n\n    //! Attempts to enqueue at tail of queue using specified operation (copy or move)\n    bool internal_insert_if_not_full( const void* src, copy_specifics op_type );\n\n    //! Assigns one queue to another using specified operation (copy or move)\n    void internal_assign( const concurrent_queue_base_v3& src, copy_specifics op_type );\nprivate:\n    virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0;\n};\n\n//! For internal use only.\n/** Backward compatible modification of concurrent_queue_base_v3\n    @ingroup containers */\nclass concurrent_queue_base_v8: public concurrent_queue_base_v3 {\nprotected:\n    concurrent_queue_base_v8( size_t item_sz ) : concurrent_queue_base_v3( item_sz ) {}\n\n    //! move items\n    void __TBB_EXPORTED_METHOD move_content( concurrent_queue_base_v8& src ) ;\n\n    //! Attempt to enqueue item onto queue using move operation\n    bool __TBB_EXPORTED_METHOD internal_push_move_if_not_full( const void* src );\n\n    //! Enqueue item at tail of queue using move operation\n    void __TBB_EXPORTED_METHOD internal_push_move( const void* src );\nprivate:\n    friend struct micro_queue;\n    virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0;\n    virtual void move_item( page& dst, size_t index, const void* src ) = 0;\n};\n\n//! Type-independent portion of concurrent_queue_iterator.\n/** @ingroup containers */\nclass concurrent_queue_iterator_base_v3 {\n    //! concurrent_queue over which we are iterating.\n    /** NULL if one past last element in queue. */\n    concurrent_queue_iterator_rep* my_rep;\n\n    template<typename C, typename T, typename U>\n    friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );\n\n    template<typename C, typename T, typename U>\n    friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );\n\n    void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data );\nprotected:\n    //! Pointer to current item\n    void* my_item;\n\n    //! Default constructor\n    concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {}\n\n    //! Copy constructor\n    concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) {\n        assign(i);\n    }\n\n    //! Obsolete entry point for constructing iterator pointing to head of queue.\n    /** Does not work correctly for SSE types. */\n    __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue );\n\n    //! Construct iterator pointing to head of queue.\n    __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data );\n\n    //! Assignment\n    void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i );\n\n    //! Advance iterator one step towards tail of queue.\n    void __TBB_EXPORTED_METHOD advance();\n\n    //! Destructor\n    __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3();\n};\n\ntypedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base;\n\n//! Meets requirements of a forward iterator for STL.\n/** Value is either the T or const T type of the container.\n    @ingroup containers */\ntemplate<typename Container, typename Value>\nclass concurrent_queue_iterator: public concurrent_queue_iterator_base,\n        public std::iterator<std::forward_iterator_tag,Value> {\n\n#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)\n    template<typename T, class A>\n    friend class ::tbb::concurrent_bounded_queue;\n#else\npublic: // workaround for MSVC\n#endif\n\n    //! Construct iterator pointing to head of queue.\n    concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) :\n        concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page<Value>,last))\n    {\n    }\n\npublic:\n    concurrent_queue_iterator() {}\n\n    /** If Value==Container::value_type, then this routine is the copy constructor.\n        If Value==const Container::value_type, then this routine is a conversion constructor. */\n    concurrent_queue_iterator( const concurrent_queue_iterator<Container,typename Container::value_type>& other ) :\n        concurrent_queue_iterator_base_v3(other)\n    {}\n\n    //! Iterator assignment\n    concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) {\n        assign(other);\n        return *this;\n    }\n\n    //! Reference to current item\n    Value& operator*() const {\n        return *static_cast<Value*>(my_item);\n    }\n\n    Value* operator->() const {return &operator*();}\n\n    //! Advance to next item in queue\n    concurrent_queue_iterator& operator++() {\n        advance();\n        return *this;\n    }\n\n    //! Post increment\n    Value* operator++(int) {\n        Value* result = &operator*();\n        operator++();\n        return result;\n    }\n}; // concurrent_queue_iterator\n\n\ntemplate<typename C, typename T, typename U>\nbool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {\n    return i.my_item==j.my_item;\n}\n\ntemplate<typename C, typename T, typename U>\nbool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {\n    return i.my_item!=j.my_item;\n}\n\n} // namespace internal;\n\n//! @endcond\n\n} // namespace tbb\n\n#endif /* __TBB__concurrent_queue_impl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_concurrent_unordered_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/* Container implementations in this header are based on PPL implementations \n   provided by Microsoft. */\n\n#ifndef __TBB__concurrent_unordered_impl_H\n#define __TBB__concurrent_unordered_impl_H\n#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"../tbb_stddef.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <iterator>\n#include <utility>      // Need std::pair\n#include <functional>   // Need std::equal_to (in ../concurrent_unordered_*.h)\n#include <string>       // For tbb_hasher\n#include <cstring>      // Need std::memset\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#include \"../atomic.h\"\n#include \"../tbb_exception.h\"\n#include \"../tbb_allocator.h\"\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    #include <initializer_list>\n#endif\n\nnamespace tbb {\nnamespace interface5 {\n//! @cond INTERNAL\nnamespace internal {\n\ntemplate <typename T, typename Allocator>\nclass split_ordered_list;\ntemplate <typename Traits>\nclass concurrent_unordered_base;\n\n// Forward list iterators (without skipping dummy elements)\ntemplate<class Solist, typename Value>\nclass flist_iterator : public std::iterator<std::forward_iterator_tag, Value>\n{\n    template <typename T, typename Allocator>\n    friend class split_ordered_list;\n    template <typename Traits>\n    friend class concurrent_unordered_base;\n    template<class M, typename V>\n    friend class flist_iterator;\n\n    typedef typename Solist::nodeptr_t nodeptr_t;\npublic:\n    typedef typename Solist::value_type value_type;\n    typedef typename Solist::difference_type difference_type;\n    typedef typename Solist::pointer pointer;\n    typedef typename Solist::reference reference;\n\n    flist_iterator() : my_node_ptr(0) {}\n    flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )\n        : my_node_ptr(other.my_node_ptr) {}\n\n    reference operator*() const { return my_node_ptr->my_element; }\n    pointer operator->() const { return &**this; }\n\n    flist_iterator& operator++() {\n        my_node_ptr = my_node_ptr->my_next;\n        return *this;\n    }\n\n    flist_iterator operator++(int) {\n        flist_iterator tmp = *this;\n        ++*this;\n        return tmp;\n    }\n\nprotected:\n    flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}\n    nodeptr_t get_node_ptr() const { return my_node_ptr; }\n\n    nodeptr_t my_node_ptr;\n\n    template<typename M, typename T, typename U>\n    friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );\n    template<typename M, typename T, typename U>\n    friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );\n};\n\ntemplate<typename Solist, typename T, typename U>\nbool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {\n    return i.my_node_ptr == j.my_node_ptr;\n}\ntemplate<typename Solist, typename T, typename U>\nbool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {\n    return i.my_node_ptr != j.my_node_ptr;\n}\n\n// Split-order list iterators, needed to skip dummy elements\ntemplate<class Solist, typename Value>\nclass solist_iterator : public flist_iterator<Solist, Value>\n{\n    typedef flist_iterator<Solist, Value> base_type;\n    typedef typename Solist::nodeptr_t nodeptr_t;\n    using base_type::get_node_ptr;\n    template <typename T, typename Allocator>\n    friend class split_ordered_list;\n    template<class M, typename V>\n    friend class solist_iterator;\n    template<typename M, typename T, typename U>\n    friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );\n    template<typename M, typename T, typename U>\n    friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );\n\n    const Solist *my_list_ptr;\n    solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}\n\npublic:\n    typedef typename Solist::value_type value_type;\n    typedef typename Solist::difference_type difference_type;\n    typedef typename Solist::pointer pointer;\n    typedef typename Solist::reference reference;\n\n    solist_iterator() {}\n    solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )\n        : base_type(other), my_list_ptr(other.my_list_ptr) {}\n\n    reference operator*() const {\n        return this->base_type::operator*();\n    }\n\n    pointer operator->() const {\n        return (&**this);\n    }\n\n    solist_iterator& operator++() {\n        do ++(*(base_type *)this);\n        while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());\n\n        return (*this);\n    }\n\n    solist_iterator operator++(int) {\n        solist_iterator tmp = *this;\n        do ++*this;\n        while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());\n\n        return (tmp);\n    }\n};\n\ntemplate<typename Solist, typename T, typename U>\nbool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {\n    return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;\n}\ntemplate<typename Solist, typename T, typename U>\nbool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {\n    return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;\n}\n\n// Forward type and class definitions\ntypedef size_t sokey_t;\n\n\n// Forward list in which elements are sorted in a split-order\ntemplate <typename T, typename Allocator>\nclass split_ordered_list\n{\npublic:\n    typedef split_ordered_list<T, Allocator> self_type;\n    typedef typename Allocator::template rebind<T>::other allocator_type;\n    struct node;\n    typedef node *nodeptr_t;\n\n    typedef typename allocator_type::size_type size_type;\n    typedef typename allocator_type::difference_type difference_type;\n    typedef typename allocator_type::pointer pointer;\n    typedef typename allocator_type::const_pointer const_pointer;\n    typedef typename allocator_type::reference reference;\n    typedef typename allocator_type::const_reference const_reference;\n    typedef typename allocator_type::value_type value_type;\n\n    typedef solist_iterator<self_type, const value_type> const_iterator;\n    typedef solist_iterator<self_type, value_type> iterator;\n    typedef flist_iterator<self_type, const value_type> raw_const_iterator;\n    typedef flist_iterator<self_type, value_type> raw_iterator;\n\n    // Node that holds the element in a split-ordered list\n    struct node : tbb::internal::no_assign\n    {\n    private:\n        // for compilers that try to generate default constructors though they are not needed.\n        node();  // VS 2008, 2010, 2012\n    public:\n        // Initialize the node with the given order key\n        void init(sokey_t order_key) {\n            my_order_key = order_key;\n            my_next = NULL;\n        }\n\n        // Return the order key (needed for hashing)\n        sokey_t get_order_key() const { // TODO: remove\n            return my_order_key;\n        }\n\n        // Inserts the new element in the list in an atomic fashion\n        nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)\n        {\n            // Try to change the next pointer on the current element to a new element, only if it still points to the cached next\n            nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);\n\n            if (exchange_node == current_node) // TODO: why this branch?\n            {\n                // Operation succeeded, return the new node\n                return new_node;\n            }\n            else\n            {\n                // Operation failed, return the \"interfering\" node\n                return exchange_node;\n            }\n        }\n\n        // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets\n        // in the hash table to quickly index into the right subsection of the split-ordered list.\n        bool is_dummy() const {\n            return (my_order_key & 0x1) == 0;\n        }\n\n\n        nodeptr_t  my_next;      // Next element in the list\n        value_type my_element;   // Element storage\n        sokey_t    my_order_key; // Order key for this element\n    };\n\n    // Allocate a new node with the given order key and value\n    nodeptr_t create_node(sokey_t order_key, const T &value) {\n        nodeptr_t pnode = my_node_allocator.allocate(1);\n\n        __TBB_TRY {\n            new(static_cast<void*>(&pnode->my_element)) T(value);\n            pnode->init(order_key);\n        } __TBB_CATCH(...) {\n            my_node_allocator.deallocate(pnode, 1);\n            __TBB_RETHROW();\n        }\n\n        return (pnode);\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    //TODO: try to combine both implementations using poor man forward\n    //TODO: use RAII scoped guard instead of explicit catch\n    // Allocate a new node with the given order key and value\n    nodeptr_t create_node(sokey_t order_key, T &&value) {\n        nodeptr_t pnode = my_node_allocator.allocate(1);\n\n        __TBB_TRY {\n            new(static_cast<void*>(&pnode->my_element)) T(std::move(value));\n            pnode->init(order_key);\n        } __TBB_CATCH(...) {\n            my_node_allocator.deallocate(pnode, 1);\n            __TBB_RETHROW();\n        }\n\n        return (pnode);\n    }\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n    // Allocate a new node with the given order key; used to allocate dummy nodes\n    nodeptr_t create_node(sokey_t order_key) {\n        nodeptr_t pnode = my_node_allocator.allocate(1);\n        pnode->init(order_key);\n        return (pnode);\n    }\n\n   split_ordered_list(allocator_type a = allocator_type())\n       : my_node_allocator(a), my_element_count(0)\n    {\n        // Immediately allocate a dummy node with order key of 0. This node\n        // will always be the head of the list.\n        my_head = create_node(0);\n    }\n\n    ~split_ordered_list()\n    {\n        // Clear the list\n        clear();\n\n        // Remove the head element which is not cleared by clear()\n        nodeptr_t pnode = my_head;\n        my_head = NULL;\n\n        __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, \"Invalid head list node\");\n\n        destroy_node(pnode);\n    }\n\n    // Common forward list functions\n\n    allocator_type get_allocator() const {\n        return (my_node_allocator);\n    }\n\n    void clear() {\n        nodeptr_t pnext;\n        nodeptr_t pnode = my_head;\n\n        __TBB_ASSERT(my_head != NULL, \"Invalid head list node\");\n        pnext = pnode->my_next;\n        pnode->my_next = NULL;\n        pnode = pnext;\n\n        while (pnode != NULL)\n        {\n            pnext = pnode->my_next;\n            destroy_node(pnode);\n            pnode = pnext;\n        }\n\n        my_element_count = 0;\n    }\n\n    // Returns a first non-dummy element in the SOL\n    iterator begin() {\n        return first_real_iterator(raw_begin());\n    }\n\n    // Returns a first non-dummy element in the SOL\n    const_iterator begin() const {\n        return first_real_iterator(raw_begin());\n    }\n\n    iterator end() {\n        return (iterator(0, this));\n    }\n\n    const_iterator end() const {\n        return (const_iterator(0, this));\n    }\n\n    const_iterator cbegin() const {\n        return (((const self_type *)this)->begin());\n    }\n\n    const_iterator cend() const {\n        return (((const self_type *)this)->end());\n    }\n\n    // Checks if the number of elements (non-dummy) is 0\n    bool empty() const {\n        return (my_element_count == 0);\n    }\n\n    // Returns the number of non-dummy elements in the list\n    size_type size() const {\n        return my_element_count;\n    }\n\n    // Returns the maximum size of the list, determined by the allocator\n    size_type max_size() const {\n        return my_node_allocator.max_size();\n    }\n\n    // Swaps 'this' list with the passed in one\n    void swap(self_type& other)\n    {\n        if (this == &other)\n        {\n            // Nothing to do\n            return;\n        }\n\n            std::swap(my_element_count, other.my_element_count);\n            std::swap(my_head, other.my_head);\n    }\n\n    // Split-order list functions\n\n    // Returns a first element in the SOL, which is always a dummy\n    raw_iterator raw_begin() {\n        return raw_iterator(my_head);\n    }\n\n    // Returns a first element in the SOL, which is always a dummy\n    raw_const_iterator raw_begin() const {\n        return raw_const_iterator(my_head);\n    }\n\n    raw_iterator raw_end() {\n        return raw_iterator(0);\n    }\n\n    raw_const_iterator raw_end() const {\n        return raw_const_iterator(0);\n    }\n\n    static sokey_t get_order_key(const raw_const_iterator& it) {\n        return it.get_node_ptr()->get_order_key();\n    }\n\n    static sokey_t get_safe_order_key(const raw_const_iterator& it) {\n        if( !it.get_node_ptr() )  return ~sokey_t(0);\n        return it.get_node_ptr()->get_order_key();\n    }\n\n    // Returns a public iterator version of the internal iterator. Public iterator must not\n    // be a dummy private iterator.\n    iterator get_iterator(raw_iterator it) {\n        __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), \"Invalid user node (dummy)\");\n        return iterator(it.get_node_ptr(), this);\n    }\n\n    // Returns a public iterator version of the internal iterator. Public iterator must not\n    // be a dummy private iterator.\n    const_iterator get_iterator(raw_const_iterator it) const {\n        __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), \"Invalid user node (dummy)\");\n        return const_iterator(it.get_node_ptr(), this);\n    }\n\n    // Returns a non-const version of the raw_iterator\n    raw_iterator get_iterator(raw_const_iterator it) {\n        return raw_iterator(it.get_node_ptr());\n    }\n\n    // Returns a non-const version of the iterator\n    static iterator get_iterator(const_iterator it) {\n        return iterator(it.my_node_ptr, it.my_list_ptr);\n    }\n\n    // Returns a public iterator version of a first non-dummy internal iterator at or after\n    // the passed in internal iterator.\n    iterator first_real_iterator(raw_iterator it)\n    {\n        // Skip all dummy, internal only iterators\n        while (it != raw_end() && it.get_node_ptr()->is_dummy())\n            ++it;\n\n        return iterator(it.get_node_ptr(), this);\n    }\n\n    // Returns a public iterator version of a first non-dummy internal iterator at or after\n    // the passed in internal iterator.\n    const_iterator first_real_iterator(raw_const_iterator it) const\n    {\n        // Skip all dummy, internal only iterators\n        while (it != raw_end() && it.get_node_ptr()->is_dummy())\n            ++it;\n\n        return const_iterator(it.get_node_ptr(), this);\n    }\n\n    // Erase an element using the allocator\n    void destroy_node(nodeptr_t pnode) {\n        if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);\n        my_node_allocator.deallocate(pnode, 1);\n    }\n\n    // Try to insert a new element in the list. If insert fails, return the node that\n    // was inserted instead.\n    nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {\n        new_node->my_next = current_node;\n        return previous->atomic_set_next(new_node, current_node);\n    }\n\n    // Insert a new element between passed in iterators\n    std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)\n    {\n        nodeptr_t pnode = create_node(order_key, value);\n        nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());\n\n        if (inserted_node == pnode)\n        {\n            // If the insert succeeded, check that the order is correct and increment the element count\n            check_range();\n            *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));\n            return std::pair<iterator, bool>(iterator(pnode, this), true);\n        }\n        else\n        {\n            // If the insert failed (element already there), then delete the new one\n            destroy_node(pnode);\n            return std::pair<iterator, bool>(end(), false);\n        }\n    }\n\n    // Insert a new dummy element, starting search at a parent dummy element\n    raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)\n    {\n        raw_iterator last = raw_end();\n        raw_iterator where = it;\n\n        __TBB_ASSERT(where != last, \"Invalid head node\");\n\n        ++where;\n\n        // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)\n        nodeptr_t dummy_node = create_node(order_key);\n\n        for (;;)\n        {\n            __TBB_ASSERT(it != last, \"Invalid head list node\");\n\n            // If the head iterator is at the end of the list, or past the point where this dummy\n            // node needs to be inserted, then try to insert it.\n            if (where == last || get_order_key(where) > order_key)\n            {\n                __TBB_ASSERT(get_order_key(it) < order_key, \"Invalid node order in the list\");\n\n                // Try to insert it in the right place\n                nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());\n\n                if (inserted_node == dummy_node)\n                {\n                    // Insertion succeeded, check the list for order violations\n                    check_range();\n                    return raw_iterator(dummy_node);\n                }\n                else\n                {\n                    // Insertion failed: either dummy node was inserted by another thread, or\n                    // a real element was inserted at exactly the same place as dummy node.\n                    // Proceed with the search from the previous location where order key was\n                    // known to be larger (note: this is legal only because there is no safe\n                    // concurrent erase operation supported).\n                    where = it;\n                    ++where;\n                    continue;\n                }\n            }\n            else if (get_order_key(where) == order_key)\n            {\n                // Another dummy node with the same value found, discard the new one.\n                destroy_node(dummy_node);\n                return where;\n            }\n\n            // Move the iterator forward\n            it = where;\n            ++where;\n        }\n\n    }\n\n    // This erase function can handle both real and dummy nodes\n    void erase_node(raw_iterator previous, raw_const_iterator& where)\n    {\n        nodeptr_t pnode = (where++).get_node_ptr();\n        nodeptr_t prevnode = previous.get_node_ptr();\n        __TBB_ASSERT(prevnode->my_next == pnode, \"Erase must take consecutive iterators\");\n        prevnode->my_next = pnode->my_next;\n\n        destroy_node(pnode);\n    }\n\n    // Erase the element (previous node needs to be passed because this is a forward only list)\n    iterator erase_node(raw_iterator previous, const_iterator where)\n    {\n        raw_const_iterator it = where;\n        erase_node(previous, it);\n        my_element_count--;\n\n        return get_iterator(first_real_iterator(it));\n    }\n\n    // Move all elements from the passed in split-ordered list to this one\n    void move_all(self_type& source)\n    {\n        raw_const_iterator first = source.raw_begin();\n        raw_const_iterator last = source.raw_end();\n\n        if (first == last)\n            return;\n\n        nodeptr_t previous_node = my_head;\n        raw_const_iterator begin_iterator = first++;\n\n        // Move all elements one by one, including dummy ones\n        for (raw_const_iterator it = first; it != last;)\n        {\n            nodeptr_t pnode = it.get_node_ptr();\n\n            nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);\n            previous_node = try_insert(previous_node, dummy_node, NULL);\n            __TBB_ASSERT(previous_node != NULL, \"Insertion must succeed\");\n            raw_const_iterator where = it++;\n            source.erase_node(get_iterator(begin_iterator), where);\n        }\n        check_range();\n    }\n\n\nprivate:\n    //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base\n    template <typename Traits>\n    friend class concurrent_unordered_base;\n\n    // Check the list for order violations\n    void check_range()\n    {\n#if TBB_USE_ASSERT\n        for (raw_iterator it = raw_begin(); it != raw_end(); ++it)\n        {\n            raw_iterator next_iterator = it;\n            ++next_iterator;\n\n            __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), \"!!! List order inconsistency !!!\");\n        }\n#endif\n    }\n\n    typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes\n    size_type                                             my_element_count;   // Total item count, not counting dummy nodes\n    nodeptr_t                                             my_head;            // pointer to head node\n};\n\n// Template class for hash compare\ntemplate<typename Key, typename Hasher, typename Key_equality>\nclass hash_compare\n{\npublic:\n    typedef Hasher hasher;\n    typedef Key_equality key_equal;\n\n    hash_compare() {}\n\n    hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}\n\n    hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}\n\n    size_t operator()(const Key& key) const {\n        return ((size_t)my_hash_object(key));\n    }\n\n    bool operator()(const Key& key1, const Key& key2) const {\n        return (!my_key_compare_object(key1, key2));\n    }\n\n    Hasher       my_hash_object;        // The hash object\n    Key_equality my_key_compare_object; // The equality comparator object\n};\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n#pragma warning(push)\n#pragma warning(disable: 4127) // warning C4127: conditional expression is constant\n#endif\n\ntemplate <typename Traits>\nclass concurrent_unordered_base : public Traits\n{\nprotected:\n    // Type definitions\n    typedef concurrent_unordered_base<Traits> self_type;\n    typedef typename Traits::value_type value_type;\n    typedef typename Traits::key_type key_type;\n    typedef typename Traits::hash_compare hash_compare;\n    typedef typename Traits::value_compare value_compare;\n    typedef typename Traits::allocator_type allocator_type;\n    typedef typename hash_compare::hasher hasher;\n    typedef typename hash_compare::key_equal key_equal;\n    typedef typename allocator_type::pointer pointer;\n    typedef typename allocator_type::const_pointer const_pointer;\n    typedef typename allocator_type::reference reference;\n    typedef typename allocator_type::const_reference const_reference;\n    typedef typename allocator_type::size_type size_type;\n    typedef typename allocator_type::difference_type difference_type;\n    typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;\n    typedef typename solist_t::nodeptr_t nodeptr_t;\n    // Iterators that walk the entire split-order list, including dummy nodes\n    typedef typename solist_t::raw_iterator raw_iterator;\n    typedef typename solist_t::raw_const_iterator raw_const_iterator;\n    typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets\n    typedef typename solist_t::const_iterator const_iterator;\n    typedef iterator local_iterator;\n    typedef const_iterator const_local_iterator;\n    using Traits::my_hash_compare;\n    using Traits::get_key;\n    using Traits::allow_multimapping;\n\n    static const size_type initial_bucket_number = 8;                               // Initial number of buckets\nprivate:\n    typedef std::pair<iterator, iterator> pairii_t;\n    typedef std::pair<const_iterator, const_iterator> paircc_t;\n\n    static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit\n    static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket\n\n    struct call_internal_clear_on_exit{\n        concurrent_unordered_base* my_instance;\n        call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {}\n        void dismiss(){ my_instance = NULL;}\n        ~call_internal_clear_on_exit(){\n            if (my_instance){\n                my_instance->internal_clear();\n            }\n        }\n    };\nprotected:\n    // Constructors/Destructors\n    concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,\n        const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())\n        : Traits(hc), my_solist(a),\n          my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)\n    {\n        if( n_of_buckets == 0) ++n_of_buckets;\n        my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2\n        internal_init();\n    }\n\n    concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)\n        : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)\n    {\n        internal_init();\n        internal_copy(right);\n    }\n\n    concurrent_unordered_base(const concurrent_unordered_base& right)\n        : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())\n    {\n        //FIXME:exception safety seems to be broken here\n        internal_init();\n        internal_copy(right);\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_base(concurrent_unordered_base&& right)\n        : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())\n    {\n        internal_init();\n        swap(right);\n    }\n\n    concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a)\n        : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)\n    {\n        call_internal_clear_on_exit clear_buckets_on_exception(this);\n\n        internal_init();\n        if (a == right.get_allocator()){\n            this->swap(right);\n        }else{\n            my_maximum_bucket_size = right.my_maximum_bucket_size;\n            my_number_of_buckets = right.my_number_of_buckets;\n            my_solist.my_element_count = right.my_solist.my_element_count;\n\n            if (! right.my_solist.empty()){\n                nodeptr_t previous_node = my_solist.my_head;\n\n                // Move all elements one by one, including dummy ones\n                for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)\n                {\n                    const nodeptr_t pnode = it.get_node_ptr();\n                    nodeptr_t node;\n                    if (pnode->is_dummy()) {\n                        node = my_solist.create_node(pnode->get_order_key());\n                        size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;\n                        set_bucket(bucket, node);\n                    }else{\n                        node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));\n                    }\n\n                    previous_node = my_solist.try_insert(previous_node, node, NULL);\n                    __TBB_ASSERT(previous_node != NULL, \"Insertion of node failed. Concurrent inserts in constructor ?\");\n                }\n                my_solist.check_range();\n            }\n        }\n\n        clear_buckets_on_exception.dismiss();\n    }\n\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n    concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {\n        if (this != &right)\n            internal_copy(right);\n        return (*this);\n    }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    concurrent_unordered_base& operator=(concurrent_unordered_base&& other)\n    {\n        if(this != &other){\n            typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;\n            if(pocma_t::value || this->my_allocator == other.my_allocator) {\n                concurrent_unordered_base trash (std::move(*this));\n                swap(other);\n                if (pocma_t::value) {\n                    using std::swap;\n                    //TODO: swapping allocators here may be a problem, replace with single direction moving\n                    swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);\n                    swap(this->my_allocator, other.my_allocator);\n                }\n            } else {\n                concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);\n                this->swap(moved_copy);\n            }\n        }\n        return *this;\n    }\n\n#endif //__TBB_CPP11_RVALUE_REF_PRESENT\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! assignment operator from initializer_list\n    concurrent_unordered_base& operator=(std::initializer_list<value_type> il)\n    {\n        this->clear();\n        this->insert(il.begin(),il.end());\n        return (*this);\n    }\n#endif //# __TBB_INITIALIZER_LISTS_PRESENT\n\n\n    ~concurrent_unordered_base() {\n        // Delete all node segments\n        internal_clear();\n    }\n\npublic:\n    allocator_type get_allocator() const {\n        return my_solist.get_allocator();\n    }\n\n    // Size and capacity function\n    bool empty() const {\n        return my_solist.empty();\n    }\n\n    size_type size() const {\n        return my_solist.size();\n    }\n\n    size_type max_size() const {\n        return my_solist.max_size();\n    }\n\n    // Iterators \n    iterator begin() {\n        return my_solist.begin();\n    }\n\n    const_iterator begin() const {\n        return my_solist.begin();\n    }\n\n    iterator end() {\n        return my_solist.end();\n    }\n\n    const_iterator end() const {\n        return my_solist.end();\n    }\n\n    const_iterator cbegin() const {\n        return my_solist.cbegin();\n    }\n\n    const_iterator cend() const {\n        return my_solist.cend();\n    }\n\n    // Parallel traversal support\n    class const_range_type : tbb::internal::no_assign {\n        const concurrent_unordered_base &my_table;\n        raw_const_iterator my_begin_node;\n        raw_const_iterator my_end_node;\n        mutable raw_const_iterator my_midpoint_node;\n    public:\n        //! Type for size of a range\n        typedef typename concurrent_unordered_base::size_type size_type;\n        typedef typename concurrent_unordered_base::value_type value_type;\n        typedef typename concurrent_unordered_base::reference reference;\n        typedef typename concurrent_unordered_base::difference_type difference_type;\n        typedef typename concurrent_unordered_base::const_iterator iterator;\n\n        //! True if range is empty.\n        bool empty() const {return my_begin_node == my_end_node;}\n\n        //! True if range can be partitioned into two subranges.\n        bool is_divisible() const {\n            return my_midpoint_node != my_end_node;\n        }\n        //! Split range.\n        const_range_type( const_range_type &r, split ) : \n            my_table(r.my_table), my_end_node(r.my_end_node)\n        {\n            r.my_end_node = my_begin_node = r.my_midpoint_node;\n            __TBB_ASSERT( !empty(), \"Splitting despite the range is not divisible\" );\n            __TBB_ASSERT( !r.empty(), \"Splitting despite the range is not divisible\" );\n            set_midpoint();\n            r.set_midpoint();\n        }\n        //! Init range with container and grainsize specified\n        const_range_type( const concurrent_unordered_base &a_table ) : \n            my_table(a_table), my_begin_node(a_table.my_solist.begin()),\n            my_end_node(a_table.my_solist.end())\n        {\n            set_midpoint();\n        }\n        iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }\n        iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }\n        //! The grain size for this range.\n        size_type grainsize() const { return 1; }\n\n        //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.\n        void set_midpoint() const {\n            if( my_begin_node == my_end_node ) // not divisible\n                my_midpoint_node = my_end_node;\n            else {\n                sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);\n                sokey_t end_key = solist_t::get_safe_order_key(my_end_node);\n                size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;\n                while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);\n                if(__TBB_ReverseBits(mid_bucket) > begin_key) {\n                    // found a dummy_node between begin and end\n                    my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));\n                }\n                else {\n                    // didn't find a dummy node between begin and end.\n                    my_midpoint_node = my_end_node;\n                }\n#if TBB_USE_ASSERT\n                {\n                    sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);\n                    __TBB_ASSERT( begin_key < mid_key, \"my_begin_node is after my_midpoint_node\" );\n                    __TBB_ASSERT( mid_key <= end_key, \"my_midpoint_node is after my_end_node\" );\n                }\n#endif // TBB_USE_ASSERT\n            }\n        }\n    };\n\n    class range_type : public const_range_type {\n    public:\n        typedef typename concurrent_unordered_base::iterator iterator;\n        //! Split range.\n        range_type( range_type &r, split ) : const_range_type( r, split() ) {}\n        //! Init range with container and grainsize specified\n        range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}\n\n        iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }\n        iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }\n    };\n\n    range_type range() {\n        return range_type( *this );\n    }\n\n    const_range_type range() const {\n        return const_range_type( *this );\n    }\n\n    // Modifiers\n    std::pair<iterator, bool> insert(const value_type& value) {\n        return internal_insert(value);\n    }\n\n    iterator insert(const_iterator, const value_type& value) {\n        // Ignore hint\n        return insert(value).first;\n    }\n\n    template<class Iterator>\n    void insert(Iterator first, Iterator last) {\n        for (Iterator it = first; it != last; ++it)\n            insert(*it);\n    }\n\n#if __TBB_INITIALIZER_LISTS_PRESENT\n    //! Insert initializer list\n    void insert(std::initializer_list<value_type> il) {\n        insert(il.begin(), il.end());\n    }\n#endif\n\n    iterator unsafe_erase(const_iterator where) {\n        return internal_erase(where);\n    }\n\n    iterator unsafe_erase(const_iterator first, const_iterator last) {\n        while (first != last)\n            unsafe_erase(first++);\n        return my_solist.get_iterator(first);\n    }\n\n    size_type unsafe_erase(const key_type& key) {\n        pairii_t where = equal_range(key);\n        size_type item_count = internal_distance(where.first, where.second);\n        unsafe_erase(where.first, where.second);\n        return item_count;\n    }\n\n    void swap(concurrent_unordered_base& right) {\n        if (this != &right) {\n            std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here\n            my_solist.swap(right.my_solist);\n            internal_swap_buckets(right);\n            std::swap(my_number_of_buckets, right.my_number_of_buckets);\n            std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);\n        }\n    }\n\n    // Observers\n    hasher hash_function() const {\n        return my_hash_compare.my_hash_object;\n    }\n\n    key_equal key_eq() const {\n        return my_hash_compare.my_key_compare_object;\n    }\n\n    void clear() {\n        // Clear list\n        my_solist.clear();\n\n        // Clear buckets\n        internal_clear();\n\n        // Initialize bucket 0\n        __TBB_ASSERT(my_buckets[0] == NULL, NULL);\n        raw_iterator dummy_node = my_solist.raw_begin();\n        set_bucket(0, dummy_node);\n    }\n\n    // Lookup\n    iterator find(const key_type& key) {\n        return internal_find(key);\n    }\n\n    const_iterator find(const key_type& key) const {\n        return const_cast<self_type*>(this)->internal_find(key);\n    }\n\n    size_type count(const key_type& key) const {\n        if(allow_multimapping) {\n            paircc_t answer = equal_range(key);\n            size_type item_count = internal_distance(answer.first, answer.second);\n            return item_count;\n        } else {\n            return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;\n        }\n    }\n\n    std::pair<iterator, iterator> equal_range(const key_type& key) {\n        return internal_equal_range(key);\n    }\n\n    std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {\n        return const_cast<self_type*>(this)->internal_equal_range(key);\n    }\n\n    // Bucket interface - for debugging \n    size_type unsafe_bucket_count() const {\n        return my_number_of_buckets;\n    }\n\n    size_type unsafe_max_bucket_count() const {\n        return segment_size(pointers_per_table-1);\n    }\n\n    size_type unsafe_bucket_size(size_type bucket) {\n        size_type item_count = 0;\n        if (is_initialized(bucket)) {\n            raw_iterator it = get_bucket(bucket);\n            ++it;\n            for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)\n                ++item_count;\n        }\n        return item_count;\n    }\n\n    size_type unsafe_bucket(const key_type& key) const {\n        sokey_t order_key = (sokey_t) my_hash_compare(key);\n        size_type bucket = order_key % my_number_of_buckets;\n        return bucket;\n    }\n\n    // If the bucket is initialized, return a first non-dummy element in it\n    local_iterator unsafe_begin(size_type bucket) {\n        if (!is_initialized(bucket))\n            return end();\n\n        raw_iterator it = get_bucket(bucket);\n        return my_solist.first_real_iterator(it);\n    }\n\n    // If the bucket is initialized, return a first non-dummy element in it\n    const_local_iterator unsafe_begin(size_type bucket) const\n    {\n        if (!is_initialized(bucket))\n            return end();\n\n        raw_const_iterator it = get_bucket(bucket);\n        return my_solist.first_real_iterator(it);\n    }\n\n    // @REVIEW: Takes O(n)\n    // Returns the iterator after the last non-dummy element in the bucket\n    local_iterator unsafe_end(size_type bucket)\n    {\n        if (!is_initialized(bucket))\n            return end();\n\n        raw_iterator it = get_bucket(bucket);\n    \n        // Find the end of the bucket, denoted by the dummy element\n        do ++it;\n        while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());\n\n        // Return the first real element past the end of the bucket\n        return my_solist.first_real_iterator(it);\n    }\n\n    // @REVIEW: Takes O(n)\n    // Returns the iterator after the last non-dummy element in the bucket\n    const_local_iterator unsafe_end(size_type bucket) const\n    {\n        if (!is_initialized(bucket))\n            return end();\n\n        raw_const_iterator it = get_bucket(bucket);\n    \n        // Find the end of the bucket, denoted by the dummy element\n        do ++it;\n        while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());\n\n        // Return the first real element past the end of the bucket\n        return my_solist.first_real_iterator(it);\n    }\n\n    const_local_iterator unsafe_cbegin(size_type bucket) const {\n        return ((const self_type *) this)->unsafe_begin(bucket);\n    }\n\n    const_local_iterator unsafe_cend(size_type bucket) const {\n        return ((const self_type *) this)->unsafe_end(bucket);\n    }\n\n    // Hash policy\n    float load_factor() const {\n        return (float) size() / (float) unsafe_bucket_count();\n    }\n\n    float max_load_factor() const {\n        return my_maximum_bucket_size;\n    }\n\n    void max_load_factor(float newmax) {\n        if (newmax != newmax || newmax < 0)\n            tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);\n        my_maximum_bucket_size = newmax;\n    }\n\n    // This function is a noop, because the underlying split-ordered list\n    // is already sorted, so an increase in the bucket number will be\n    // reflected next time this bucket is touched.\n    void rehash(size_type buckets) {\n        size_type current_buckets = my_number_of_buckets;\n        if (current_buckets >= buckets)\n            return;\n        my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2\n    }\n\nprivate:\n\n    // Initialize the hash and keep the first bucket open\n    void internal_init() {\n        // Allocate an array of segment pointers\n        memset(my_buckets, 0, pointers_per_table * sizeof(void *));\n\n        // Initialize bucket 0\n        raw_iterator dummy_node = my_solist.raw_begin();\n        set_bucket(0, dummy_node);\n    }\n\n    void internal_clear() {\n        for (size_type index = 0; index < pointers_per_table; ++index) {\n            if (my_buckets[index] != NULL) {\n                size_type sz = segment_size(index);\n                for (size_type index2 = 0; index2 < sz; ++index2)\n                    my_allocator.destroy(&my_buckets[index][index2]);\n                my_allocator.deallocate(my_buckets[index], sz);\n                my_buckets[index] = 0;\n            }\n        }\n    }\n\n    void internal_copy(const self_type& right) {\n        clear();\n\n        my_maximum_bucket_size = right.my_maximum_bucket_size;\n        my_number_of_buckets = right.my_number_of_buckets;\n\n        __TBB_TRY {\n            insert(right.begin(), right.end());\n            my_hash_compare = right.my_hash_compare;\n        } __TBB_CATCH(...) {\n            my_solist.clear();\n            __TBB_RETHROW();\n        }\n    }\n\n    void internal_swap_buckets(concurrent_unordered_base& right)\n    {\n        // Swap all node segments\n        for (size_type index = 0; index < pointers_per_table; ++index)\n        {\n            raw_iterator * iterator_pointer = my_buckets[index];\n            my_buckets[index] = right.my_buckets[index];\n            right.my_buckets[index] = iterator_pointer;\n        }\n    }\n\n    //TODO: why not use std::distance?\n    // Hash APIs\n    size_type internal_distance(const_iterator first, const_iterator last) const\n    {\n        size_type num = 0;\n\n        for (const_iterator it = first; it != last; ++it)\n            ++num;\n\n        return num;\n    }\n\n    // Insert an element in the hash given its value\n    std::pair<iterator, bool> internal_insert(const value_type& value)\n    {\n        sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));\n        size_type bucket = order_key % my_number_of_buckets;\n\n        // If bucket is empty, initialize it first\n        if (!is_initialized(bucket))\n            init_bucket(bucket);\n\n        size_type new_count = 0;\n        order_key = split_order_key_regular(order_key);\n        raw_iterator it = get_bucket(bucket);\n        raw_iterator last = my_solist.raw_end();\n        raw_iterator where = it;\n\n        __TBB_ASSERT(where != last, \"Invalid head node\");\n\n        // First node is a dummy node\n        ++where;\n\n        for (;;)\n        {\n            if (where == last || solist_t::get_order_key(where) > order_key)\n            {\n                // Try to insert it in the right place\n                std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);\n                \n                if (result.second)\n                {\n                    // Insertion succeeded, adjust the table size, if needed\n                    adjust_table_size(new_count, my_number_of_buckets);\n                    return result;\n                }\n                else\n                {\n                    // Insertion failed: either the same node was inserted by another thread, or\n                    // another element was inserted at exactly the same place as this node.\n                    // Proceed with the search from the previous location where order key was\n                    // known to be larger (note: this is legal only because there is no safe\n                    // concurrent erase operation supported).\n                    where = it;\n                    ++where;\n                    continue;\n                }\n            }\n            else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)\n            {\n                // Element already in the list, return it\n                return std::pair<iterator, bool>(my_solist.get_iterator(where), false);\n            }\n\n            // Move the iterator forward\n            it = where;\n            ++where;\n        }\n    }\n\n    // Find the element in the split-ordered list\n    iterator internal_find(const key_type& key)\n    {\n        sokey_t order_key = (sokey_t) my_hash_compare(key);\n        size_type bucket = order_key % my_number_of_buckets;\n\n        // If bucket is empty, initialize it first\n        if (!is_initialized(bucket))\n            init_bucket(bucket);\n\n        order_key = split_order_key_regular(order_key);\n        raw_iterator last = my_solist.raw_end();\n\n        for (raw_iterator it = get_bucket(bucket); it != last; ++it)\n        {\n            if (solist_t::get_order_key(it) > order_key)\n            {\n                // If the order key is smaller than the current order key, the element\n                // is not in the hash.\n                return end();\n            }\n            else if (solist_t::get_order_key(it) == order_key)\n            {\n                // The fact that order keys match does not mean that the element is found.\n                // Key function comparison has to be performed to check whether this is the\n                // right element. If not, keep searching while order key is the same.\n                if (!my_hash_compare(get_key(*it), key))\n                    return my_solist.get_iterator(it);\n            }\n        }\n\n        return end();\n    }\n\n    // Erase an element from the list. This is not a concurrency safe function.\n    iterator internal_erase(const_iterator it)\n    {\n        key_type key = get_key(*it);\n        sokey_t order_key = (sokey_t) my_hash_compare(key);\n        size_type bucket = order_key % my_number_of_buckets;\n\n        // If bucket is empty, initialize it first\n        if (!is_initialized(bucket))\n            init_bucket(bucket);\n\n        order_key = split_order_key_regular(order_key);\n\n        raw_iterator previous = get_bucket(bucket);\n        raw_iterator last = my_solist.raw_end();\n        raw_iterator where = previous;\n\n        __TBB_ASSERT(where != last, \"Invalid head node\");\n\n        // First node is a dummy node\n        ++where;\n\n        for (;;) {\n            if (where == last)\n                return end();\n            else if (my_solist.get_iterator(where) == it)\n                return my_solist.erase_node(previous, it);\n\n            // Move the iterator forward\n            previous = where;\n            ++where;\n        }\n    }\n\n    // Return the [begin, end) pair of iterators with the same key values.\n    // This operation makes sense only if mapping is many-to-one.\n    pairii_t internal_equal_range(const key_type& key)\n    {\n        sokey_t order_key = (sokey_t) my_hash_compare(key);\n        size_type bucket = order_key % my_number_of_buckets;\n\n        // If bucket is empty, initialize it first\n        if (!is_initialized(bucket))\n            init_bucket(bucket);\n\n        order_key = split_order_key_regular(order_key);\n        raw_iterator end_it = my_solist.raw_end();\n\n        for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)\n        {\n            if (solist_t::get_order_key(it) > order_key)\n            {\n                // There is no element with the given key\n                return pairii_t(end(), end());\n            }\n            else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))\n            {\n                iterator first = my_solist.get_iterator(it);\n                iterator last = first;\n                do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );\n                return pairii_t(first, last);\n            }\n        }\n\n        return pairii_t(end(), end());\n    }\n\n    // Bucket APIs\n    void init_bucket(size_type bucket)\n    {\n        // Bucket 0 has no parent.\n        __TBB_ASSERT( bucket != 0, \"The first bucket must always be initialized\");\n\n        size_type parent_bucket = get_parent(bucket);\n\n        // All parent_bucket buckets have to be initialized before this bucket is\n        if (!is_initialized(parent_bucket))\n            init_bucket(parent_bucket);\n\n        raw_iterator parent = get_bucket(parent_bucket);\n\n        // Create a dummy first node in this bucket\n        raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));\n        set_bucket(bucket, dummy_node);\n    }\n\n    void adjust_table_size(size_type total_elements, size_type current_size)\n    {\n        // Grow the table by a factor of 2 if possible and needed\n        if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )\n        {\n            // Double the size of the hash only if size has not changed in between loads\n            my_number_of_buckets.compare_and_swap(2u*current_size, current_size);\n            //Simple \"my_number_of_buckets.compare_and_swap( current_size<<1, current_size );\" does not work for VC8\n            //due to overzealous compiler warnings in /Wp64 mode\n        }\n    }\n\n    size_type get_parent(size_type bucket) const\n    {\n        // Unsets bucket's most significant turned-on bit\n        size_type msb = __TBB_Log2((uintptr_t)bucket);\n        return bucket & ~(size_type(1) << msb);\n    }\n\n\n    // Dynamic sized array (segments)\n    //! @return segment index of given index in the array\n    static size_type segment_index_of( size_type index ) {\n        return size_type( __TBB_Log2( uintptr_t(index|1) ) );\n    }\n\n    //! @return the first array index of given segment\n    static size_type segment_base( size_type k ) {\n        return (size_type(1)<<k & ~size_type(1));\n    }\n\n    //! @return segment size\n    static size_type segment_size( size_type k ) {\n        return k? size_type(1)<<k : 2;\n    }\n\n    raw_iterator get_bucket(size_type bucket) const {\n        size_type segment = segment_index_of(bucket);\n        bucket -= segment_base(segment);\n        __TBB_ASSERT( my_buckets[segment], \"bucket must be in an allocated segment\" );\n        return my_buckets[segment][bucket];\n    }\n\n    void set_bucket(size_type bucket, raw_iterator dummy_head) {\n        size_type segment = segment_index_of(bucket);\n        bucket -= segment_base(segment);\n\n        if (my_buckets[segment] == NULL) {\n            size_type sz = segment_size(segment);\n            raw_iterator * new_segment = my_allocator.allocate(sz);\n            std::memset(new_segment, 0, sz*sizeof(raw_iterator));\n\n            if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)\n                my_allocator.deallocate(new_segment, sz);\n        }\n\n        my_buckets[segment][bucket] = dummy_head;\n    }\n\n    bool is_initialized(size_type bucket) const {\n        size_type segment = segment_index_of(bucket);\n        bucket -= segment_base(segment);\n\n        if (my_buckets[segment] == NULL)\n            return false;\n\n        raw_iterator it = my_buckets[segment][bucket];\n        return (it.get_node_ptr() != NULL);\n    }\n\n    // Utilities for keys\n\n    // A regular order key has its original hash value reversed and the last bit set\n    sokey_t split_order_key_regular(sokey_t order_key) const {\n        return __TBB_ReverseBits(order_key) | 0x1;\n    }\n\n    // A dummy order key has its original hash value reversed and the last bit unset\n    sokey_t split_order_key_dummy(sokey_t order_key) const {\n        return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);\n    }\n\n    // Shared variables\n    atomic<size_type>                                             my_number_of_buckets;       // Current table size\n    solist_t                                                      my_solist;                  // List where all the elements are kept\n    typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments\n    float                                                         my_maximum_bucket_size;     // Maximum size of the bucket\n    atomic<raw_iterator*>                                         my_buckets[pointers_per_table]; // The segment table\n};\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n#pragma warning(pop) // warning 4127 is back\n#endif\n\n//! Hash multiplier\nstatic const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value;\n} // namespace internal\n//! @endcond\n//! Hasher functions\ntemplate<typename T>\ninline size_t tbb_hasher( const T& t ) {\n    return static_cast<size_t>( t ) * internal::hash_multiplier;\n}\ntemplate<typename P>\ninline size_t tbb_hasher( P* ptr ) {\n    size_t const h = reinterpret_cast<size_t>( ptr );\n    return (h >> 3) ^ h;\n}\ntemplate<typename E, typename S, typename A>\ninline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {\n    size_t h = 0;\n    for( const E* c = s.c_str(); *c; ++c )\n        h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);\n    return h;\n}\ntemplate<typename F, typename S>\ninline size_t tbb_hasher( const std::pair<F,S>& p ) {\n    return tbb_hasher(p.first) ^ tbb_hasher(p.second);\n}\n} // namespace interface5\nusing interface5::tbb_hasher;\n\n\n// Template class for hash compare\ntemplate<typename Key>\nclass tbb_hash\n{\npublic:\n    tbb_hash() {}\n\n    size_t operator()(const Key& key) const\n    {\n        return tbb_hasher(key);\n    }\n};\n\n} // namespace tbb\n#endif// __TBB__concurrent_unordered_impl_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_impl_H\n#define __TBB__flow_graph_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\nnamespace internal {\n\n    namespace graph_policy_namespace {\n        enum graph_buffer_policy { rejecting, reserving, queueing, tag_matching };\n    }\n\n// -------------- function_body containers ----------------------\n\n    //! A functor that takes no input and generates a value of type Output\n    template< typename Output >\n    class source_body : tbb::internal::no_assign {\n    public:\n        virtual ~source_body() {}\n        virtual bool operator()(Output &output) = 0;\n        virtual source_body* clone() = 0;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        virtual void reset_body() = 0;\n#endif\n    };\n\n    //! The leaf for source_body\n    template< typename Output, typename Body>\n    class source_body_leaf : public source_body<Output> {\n    public:\n        source_body_leaf( const Body &_body ) : body(_body), init_body(_body) { }\n        /*override*/ bool operator()(Output &output) { return body( output ); }\n        /*override*/ source_body_leaf* clone() {\n            return new source_body_leaf< Output, Body >(init_body);\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        Body get_body() { return body; }\n    private:\n        Body body;\n        Body init_body;\n    };\n\n    //! A functor that takes an Input and generates an Output\n    template< typename Input, typename Output >\n    class function_body : tbb::internal::no_assign {\n    public:\n        virtual ~function_body() {}\n        virtual Output operator()(const Input &input) = 0;\n        virtual function_body* clone() = 0;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        virtual void reset_body() = 0;\n#endif\n    };\n\n    //! the leaf for function_body\n    template <typename Input, typename Output, typename B>\n    class function_body_leaf : public function_body< Input, Output > {\n    public:\n        function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }\n        Output operator()(const Input &i) { return body(i); }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        B get_body() { return body; }\n        /*override*/ function_body_leaf* clone() {\n            return new function_body_leaf< Input, Output, B >(init_body);\n        }\n    private:\n        B body;\n        B init_body;\n    };\n\n    //! the leaf for function_body specialized for Input and output of continue_msg\n    template <typename B>\n    class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > {\n    public:\n        function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }\n        continue_msg operator()( const continue_msg &i ) {\n            body(i);\n            return i;\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        B get_body() { return body; }\n        /*override*/ function_body_leaf* clone() {\n           return new function_body_leaf< continue_msg, continue_msg, B >(init_body);\n        }\n    private:\n        B body;\n        B init_body;\n    };\n\n    //! the leaf for function_body specialized for Output of continue_msg\n    template <typename Input, typename B>\n    class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > {\n    public:\n        function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }\n        continue_msg operator()(const Input &i) {\n            body(i);\n            return continue_msg();\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        B get_body() { return body; }\n        /*override*/ function_body_leaf* clone() {\n            return new function_body_leaf< Input, continue_msg, B >(init_body);\n        }\n    private:\n        B body;\n        B init_body;\n    };\n\n    //! the leaf for function_body specialized for Input of continue_msg\n    template <typename Output, typename B>\n    class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > {\n    public:\n        function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }\n        Output operator()(const continue_msg &i) {\n            return body(i);\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        B get_body() { return body; }\n        /*override*/ function_body_leaf* clone() {\n            return new function_body_leaf< continue_msg, Output, B >(init_body);\n        }\n    private:\n        B body;\n        B init_body;\n    };\n\n    //! function_body that takes an Input and a set of output ports\n    template<typename Input, typename OutputSet>\n    class multifunction_body : tbb::internal::no_assign {\n    public:\n        virtual ~multifunction_body () {}\n        virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0;\n        virtual multifunction_body* clone() = 0;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        virtual void reset_body() = 0;\n#endif\n    };\n\n    //! leaf for multifunction.  OutputSet can be a std::tuple or a vector.\n    template<typename Input, typename OutputSet, typename B>\n    class multifunction_body_leaf : public multifunction_body<Input, OutputSet> {\n    public:\n        multifunction_body_leaf(const B &_body) : body(_body), init_body(_body) { }\n        void operator()(const Input &input, OutputSet &oset) {\n            body(input, oset); // body may explicitly put() to one or more of oset.\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void reset_body() {\n            body = init_body;\n        }\n#endif\n        B get_body() { return body; }\n        /*override*/ multifunction_body_leaf* clone() {\n            return new multifunction_body_leaf<Input, OutputSet,B>(init_body);\n        }\n    private:\n        B body;\n        B init_body;\n    };\n\n// --------------------------- end of function_body containers ------------------------\n\n// --------------------------- node task bodies ---------------------------------------\n\n    //! A task that calls a node's forward_task function\n    template< typename NodeType >\n    class forward_task_bypass : public task {\n\n        NodeType &my_node;\n\n    public:\n\n        forward_task_bypass( NodeType &n ) : my_node(n) {}\n\n        task *execute() {\n            task * new_task = my_node.forward_task();\n            if (new_task == SUCCESSFULLY_ENQUEUED) new_task = NULL;\n            return new_task;\n        }\n    };\n\n    //! A task that calls a node's apply_body_bypass function, passing in an input of type Input\n    //  return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL\n    template< typename NodeType, typename Input >\n    class apply_body_task_bypass : public task {\n\n        NodeType &my_node;\n        Input my_input;\n\n    public:\n\n        apply_body_task_bypass( NodeType &n, const Input &i ) : my_node(n), my_input(i) {}\n\n        task *execute() {\n            task * next_task = my_node.apply_body_bypass( my_input );\n            if(next_task == SUCCESSFULLY_ENQUEUED) next_task = NULL;\n            return next_task;\n        }\n    };\n\n    //! A task that calls a node's apply_body function with no input\n    template< typename NodeType >\n    class source_task_bypass : public task {\n\n        NodeType &my_node;\n\n    public:\n\n        source_task_bypass( NodeType &n ) : my_node(n) {}\n\n        task *execute() {\n            task *new_task = my_node.apply_body_bypass( );\n            if(new_task == SUCCESSFULLY_ENQUEUED) return NULL;\n            return new_task;\n        }\n    };\n\n// ------------------------ end of node task bodies -----------------------------------\n\n    //! An empty functor that takes an Input and returns a default constructed Output\n    template< typename Input, typename Output >\n    struct empty_body {\n       Output operator()( const Input & ) const { return Output(); }\n    };\n\n    //! A node_cache maintains a std::queue of elements of type T.  Each operation is protected by a lock.\n    template< typename T, typename M=spin_mutex >\n    class node_cache {\n        public:\n\n        typedef size_t size_type;\n\n        bool empty() {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            return internal_empty();\n        }\n\n        void add( T &n ) {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            internal_push(n);\n        }\n\n        void remove( T &n ) {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            for ( size_t i = internal_size(); i != 0; --i ) {\n                T &s = internal_pop();\n                if ( &s == &n )  return;  // only remove one predecessor per request\n                internal_push(s);\n            }\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<T *> predecessor_vector_type;\n        void internal_add_built_predecessor( T &n ) {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            my_built_predecessors.add_edge(n);\n        }\n\n        void internal_delete_built_predecessor( T &n ) {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            my_built_predecessors.delete_edge(n);\n        }\n\n        void copy_predecessors( predecessor_vector_type &v) {\n            typename my_mutex_type::scoped_lock lock( my_mutex );\n            my_built_predecessors.copy_edges(v);\n        }\n\n        size_t predecessor_count() {\n            typename my_mutex_type::scoped_lock lock(my_mutex);\n            return (size_t)(my_built_predecessors.edge_count());\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ \n\n    protected:\n\n        typedef M my_mutex_type;\n        my_mutex_type my_mutex;\n        std::queue< T * > my_q;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        edge_container<T> my_built_predecessors;\n#endif\n\n        // Assumes lock is held\n        inline bool internal_empty( )  {\n            return my_q.empty();\n        }\n\n        // Assumes lock is held\n        inline size_type internal_size( )  {\n            return my_q.size();\n        }\n\n        // Assumes lock is held\n        inline void internal_push( T &n )  {\n            my_q.push(&n);\n        }\n\n        // Assumes lock is held\n        inline T &internal_pop() {\n            T *v = my_q.front();\n            my_q.pop();\n            return *v;\n        }\n\n    };\n\n    //! A cache of predecessors that only supports try_get\n    template< typename T, typename M=spin_mutex >\n    class predecessor_cache : public node_cache< sender<T>, M > {\n    public:\n        typedef M my_mutex_type;\n        typedef T output_type;\n        typedef sender<output_type> predecessor_type;\n        typedef receiver<output_type> successor_type;\n\n        predecessor_cache( ) : my_owner( NULL ) { }\n\n        void set_owner( successor_type *owner ) { my_owner = owner; }\n\n        bool get_item( output_type &v ) {\n\n            bool msg = false;\n\n            do {\n                predecessor_type *src;\n                {\n                    typename my_mutex_type::scoped_lock lock(this->my_mutex);\n                    if ( this->internal_empty() ) {\n                        break;\n                    }\n                    src = &this->internal_pop();\n                }\n\n                // Try to get from this sender\n                msg = src->try_get( v );\n\n                if (msg == false) {\n                    // Relinquish ownership of the edge\n                    if ( my_owner)\n                        src->register_successor( *my_owner );\n                } else {\n                    // Retain ownership of the edge\n                    this->add(*src);\n                }\n            } while ( msg == false );\n            return msg;\n        }\n\n        void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            if(my_owner) {\n                for(;;) {\n                    predecessor_type *src;\n                    {\n                        if(this->internal_empty()) break;\n                        src = &this->internal_pop();\n                    }\n                        src->register_successor( *my_owner);\n                }\n            }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            if (f&rf_extract && my_owner) \n                my_built_predecessors.receiver_extract(*my_owner);\n            __TBB_ASSERT(!(f&rf_extract) || this->internal_empty(), \"predecessor cache not empty\");\n#endif\n        }\n\n    protected:\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        using node_cache< sender<T>, M >::my_built_predecessors;\n#endif\n        successor_type *my_owner;\n    };\n\n    //! An cache of predecessors that supports requests and reservations\n    template< typename T, typename M=spin_mutex >\n    class reservable_predecessor_cache : public predecessor_cache< T, M > {\n    public:\n        typedef M my_mutex_type;\n        typedef T output_type;\n        typedef sender<T> predecessor_type;\n        typedef receiver<T> successor_type;\n\n        reservable_predecessor_cache( ) : reserved_src(NULL) { }\n\n        bool\n        try_reserve( output_type &v ) {\n            bool msg = false;\n\n            do {\n                {\n                    typename my_mutex_type::scoped_lock lock(this->my_mutex);\n                    if ( reserved_src || this->internal_empty() )\n                        return false;\n\n                    reserved_src = &this->internal_pop();\n                }\n\n                // Try to get from this sender\n                msg = reserved_src->try_reserve( v );\n\n                if (msg == false) {\n                    typename my_mutex_type::scoped_lock lock(this->my_mutex);\n                    // Relinquish ownership of the edge\n                    reserved_src->register_successor( *this->my_owner );\n                    reserved_src = NULL;\n                } else {\n                    // Retain ownership of the edge\n                    this->add( *reserved_src );\n                }\n            } while ( msg == false );\n\n            return msg;\n        }\n\n        bool\n        try_release( ) {\n            reserved_src->try_release( );\n            reserved_src = NULL;\n            return true;\n        }\n\n        bool\n        try_consume( ) {\n            reserved_src->try_consume( );\n            reserved_src = NULL;\n            return true;\n        }\n\n        void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            reserved_src = NULL;\n            predecessor_cache<T,M>::reset(__TBB_PFG_RESET_ARG(f));\n        }\n\n    private:\n        predecessor_type *reserved_src;\n    };\n\n\n    //! An abstract cache of successors\n    template<typename T, typename M=spin_rw_mutex >\n    class successor_cache : tbb::internal::no_copy {\n    protected:\n\n        typedef M my_mutex_type;\n        my_mutex_type my_mutex;\n\n        typedef receiver<T> *pointer_type;\n        typedef std::list< pointer_type > my_successors_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        edge_container<receiver<T> > my_built_successors;\n#endif\n        my_successors_type my_successors;\n\n        sender<T> *my_owner;\n\n    public:\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<pointer_type> successor_vector_type;\n        void internal_add_built_successor( receiver<T> &r) {\n            typename my_mutex_type::scoped_lock l(my_mutex, true);\n            my_built_successors.add_edge( r );\n        }\n\n        void internal_delete_built_successor( receiver<T> &r) {\n            typename my_mutex_type::scoped_lock l(my_mutex, true);\n            my_built_successors.delete_edge(r);\n        }\n\n        void copy_successors( successor_vector_type &v) {\n            typename my_mutex_type::scoped_lock l(my_mutex, false);\n            my_built_successors.copy_edges(v);\n        }\n\n        size_t successor_count() {\n            typename my_mutex_type::scoped_lock l(my_mutex,false);\n            return my_built_successors.edge_count();\n        }\n\n        void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            if (f&rf_extract && my_owner) \n                my_built_successors.sender_extract(*my_owner);\n        }\n#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n        successor_cache( ) : my_owner(NULL) {}\n\n        void set_owner( sender<T> *owner ) { my_owner = owner; }\n\n        virtual ~successor_cache() {}\n\n        void register_successor( receiver<T> &r ) {\n            typename my_mutex_type::scoped_lock l(my_mutex, true);\n            my_successors.push_back( &r );\n        }\n\n        void remove_successor( receiver<T> &r ) {\n            typename my_mutex_type::scoped_lock l(my_mutex, true);\n            for ( typename my_successors_type::iterator i = my_successors.begin();\n                  i != my_successors.end(); ++i ) {\n                if ( *i == & r ) {\n                    my_successors.erase(i);\n                    break;\n                }\n            }\n        }\n\n        bool empty() {\n            typename my_mutex_type::scoped_lock l(my_mutex, false);\n            return my_successors.empty();\n        }\n\n        void clear() {\n            my_successors.clear();\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            my_built_successors.clear();\n#endif\n        }\n\n        virtual task * try_put_task( const T &t ) = 0;\n     };\n\n    //! An abstract cache of successors, specialized to continue_msg\n    template<>\n    class successor_cache< continue_msg > : tbb::internal::no_copy {\n    protected:\n\n        typedef spin_rw_mutex my_mutex_type;\n        my_mutex_type my_mutex;\n\n        typedef receiver<continue_msg> *pointer_type;\n        typedef std::list< pointer_type > my_successors_type;\n        my_successors_type my_successors;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        edge_container<receiver<continue_msg> > my_built_successors;\n#endif\n\n        sender<continue_msg> *my_owner;\n\n    public:\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<pointer_type> successor_vector_type;\n        void internal_add_built_successor( receiver<continue_msg> &r) {\n            my_mutex_type::scoped_lock l(my_mutex, true);\n            my_built_successors.add_edge( r );\n        }\n\n        void internal_delete_built_successor( receiver<continue_msg> &r) {\n            my_mutex_type::scoped_lock l(my_mutex, true);\n            my_built_successors.delete_edge(r);\n        }\n\n        void copy_successors( successor_vector_type &v) {\n            my_mutex_type::scoped_lock l(my_mutex, false);\n            my_built_successors.copy_edges(v);\n        }\n\n        size_t successor_count() {\n            my_mutex_type::scoped_lock l(my_mutex,false);\n            return my_built_successors.edge_count();\n        }\n\n        void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            if (f&rf_extract && my_owner) \n                my_built_successors.sender_extract(*my_owner);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n        successor_cache( ) : my_owner(NULL) {}\n\n        void set_owner( sender<continue_msg> *owner ) { my_owner = owner; }\n\n        virtual ~successor_cache() {}\n\n        void register_successor( receiver<continue_msg> &r ) {\n            my_mutex_type::scoped_lock l(my_mutex, true);\n            my_successors.push_back( &r );\n            if ( my_owner && r.is_continue_receiver() ) {\n                r.register_predecessor( *my_owner );\n            }\n        }\n\n        void remove_successor( receiver<continue_msg> &r ) {\n            my_mutex_type::scoped_lock l(my_mutex, true);\n            for ( my_successors_type::iterator i = my_successors.begin();\n                  i != my_successors.end(); ++i ) {\n                if ( *i == & r ) {\n                    // TODO: Check if we need to test for continue_receiver before\n                    // removing from r.\n                    if ( my_owner )\n                        r.remove_predecessor( *my_owner );\n                    my_successors.erase(i);\n                    break;\n                }\n            }\n        }\n\n        bool empty() {\n            my_mutex_type::scoped_lock l(my_mutex, false);\n            return my_successors.empty();\n        }\n\n        void clear() {\n            my_successors.clear();\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            my_built_successors.clear();\n#endif\n        }\n\n        virtual task * try_put_task( const continue_msg &t ) = 0;\n\n     };\n\n    //! A cache of successors that are broadcast to\n    template<typename T, typename M=spin_rw_mutex>\n    class broadcast_cache : public successor_cache<T, M> {\n        typedef M my_mutex_type;\n        typedef std::list< receiver<T> * > my_successors_type;\n\n    public:\n\n        broadcast_cache( ) {}\n\n        // as above, but call try_put_task instead, and return the last task we received (if any)\n        /*override*/ task * try_put_task( const T &t ) {\n            task * last_task = NULL;\n            bool upgraded = true;\n            typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded);\n            typename my_successors_type::iterator i = this->my_successors.begin();\n            while ( i != this->my_successors.end() ) {\n                task *new_task = (*i)->try_put_task(t);\n                last_task = combine_tasks(last_task, new_task);  // enqueue if necessary\n                if(new_task) {\n                    ++i;\n                }\n                else {  // failed\n                    if ( (*i)->register_predecessor(*this->my_owner) ) {\n                        if (!upgraded) {\n                            l.upgrade_to_writer();\n                            upgraded = true;\n                        }\n                        i = this->my_successors.erase(i);\n                    } else {\n                        ++i;\n                    }\n                }\n            }\n            return last_task;\n        }\n\n    };\n\n    //! A cache of successors that are put in a round-robin fashion\n    template<typename T, typename M=spin_rw_mutex >\n    class round_robin_cache : public successor_cache<T, M> {\n        typedef size_t size_type;\n        typedef M my_mutex_type;\n        typedef std::list< receiver<T> * > my_successors_type;\n\n    public:\n\n        round_robin_cache( ) {}\n\n        size_type size() {\n            typename my_mutex_type::scoped_lock l(this->my_mutex, false);\n            return this->my_successors.size();\n        }\n\n        /*override*/task *try_put_task( const T &t ) {\n            bool upgraded = true;\n            typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded);\n            typename my_successors_type::iterator i = this->my_successors.begin();\n            while ( i != this->my_successors.end() ) {\n                task *new_task = (*i)->try_put_task(t);\n                if ( new_task ) {\n                    return new_task;\n                } else {\n                   if ( (*i)->register_predecessor(*this->my_owner) ) {\n                       if (!upgraded) {\n                           l.upgrade_to_writer();\n                           upgraded = true;\n                       }\n                       i = this->my_successors.erase(i);\n                   }\n                   else {\n                       ++i;\n                   }\n                }\n            }\n            return NULL;\n        }\n    };\n\n    template<typename T>\n    class decrementer : public continue_receiver, tbb::internal::no_copy {\n\n        T *my_node;\n\n        task *execute() {\n            return my_node->decrement_counter();\n        }\n\n    public:\n\n        typedef continue_msg input_type;\n        typedef continue_msg output_type;\n        decrementer( int number_of_predecessors = 0 ) : continue_receiver( number_of_predecessors ) { }\n        void set_owner( T *node ) { my_node = node; }\n    };\n\n}\n\n#endif // __TBB__flow_graph_impl_H\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_indexer_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_indexer_impl_H\n#define __TBB__flow_graph_indexer_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"tbb/internal/_flow_graph_types_impl.h\"\n\nnamespace internal {\n\n    // Output of the indexer_node is a tbb::flow::tagged_msg, and will be of\n    // the form  tagged_msg<tag, result>\n    // where the value of tag will indicate which result was put to the\n    // successor.  \n    \n    template<typename IndexerNodeBaseType, typename T, size_t K>\n    task* do_try_put(const T &v, void *p) {\n        typename IndexerNodeBaseType::output_type o(K, v);\n        return reinterpret_cast<IndexerNodeBaseType *>(p)->try_put_task(&o);\n    }\n\n    template<typename TupleTypes,int N>\n    struct indexer_helper {\n        template<typename IndexerNodeBaseType, typename PortTuple>\n        static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) {\n            typedef typename tuple_element<N-1, TupleTypes>::type T;\n            task *(*indexer_node_put_task)(const T&, void *) = do_try_put<IndexerNodeBaseType, T, N-1>;\n            tbb::flow::get<N-1>(my_input).set_up(p, indexer_node_put_task);\n            indexer_helper<TupleTypes,N-1>::template set_indexer_node_pointer<IndexerNodeBaseType,PortTuple>(my_input, p);\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        template<typename InputTuple>\n        static inline void reset_inputs(InputTuple &my_input, reset_flags f) {\n            join_helper<N-1>::reset_inputs(my_input, f);\n            tbb::flow::get<N-1>(my_input).reset_receiver(f);\n        }\n#endif\n    };\n\n    template<typename TupleTypes>\n    struct indexer_helper<TupleTypes,1> {\n        template<typename IndexerNodeBaseType, typename PortTuple>\n        static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) {\n            typedef typename tuple_element<0, TupleTypes>::type T;\n            task *(*indexer_node_put_task)(const T&, void *) = do_try_put<IndexerNodeBaseType, T, 0>;\n            tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task);\n        }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        template<typename InputTuple>\n        static inline void reset_inputs(InputTuple &my_input, reset_flags f) {\n            tbb::flow::get<0>(my_input).reset_receiver(f);\n        }\n#endif\n    };\n\n    template<typename T>\n    class indexer_input_port : public receiver<T> {\n    private:\n        void* my_indexer_ptr;\n        typedef task* (* forward_function_ptr)(T const &, void* );\n        forward_function_ptr my_try_put_task;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        spin_mutex my_pred_mutex;\n        edge_container<sender<T> > my_built_predecessors;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n    public:\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        indexer_input_port() : my_pred_mutex() {}\n        indexer_input_port( const indexer_input_port & /*other*/ ) : receiver<T>(), my_pred_mutex() {\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n        void set_up(void *p, forward_function_ptr f) {\n                my_indexer_ptr = p;\n                my_try_put_task = f;\n            }\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<sender<T> *> predecessor_vector_type;\n        /*override*/size_t predecessor_count() {\n            spin_mutex::scoped_lock l(my_pred_mutex);\n            return my_built_predecessors.edge_count();\n        }\n        /*override*/void internal_add_built_predecessor(sender<T> &p) {\n            spin_mutex::scoped_lock l(my_pred_mutex);\n            my_built_predecessors.add_edge(p);\n        }\n        /*override*/void internal_delete_built_predecessor(sender<T> &p) {\n            spin_mutex::scoped_lock l(my_pred_mutex);\n            my_built_predecessors.delete_edge(p);\n        }\n        /*override*/void copy_predecessors( predecessor_vector_type &v) {\n            spin_mutex::scoped_lock l(my_pred_mutex);\n            return my_built_predecessors.copy_edges(v);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n    protected:\n        template< typename R, typename B > friend class run_and_put_task;\n        template<typename X, typename Y> friend class internal::broadcast_cache;\n        template<typename X, typename Y> friend class internal::round_robin_cache;\n        task *try_put_task(const T &v) {\n            return my_try_put_task(v, my_indexer_ptr);\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    public:\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) {\n            if(f&rf_extract) my_built_predecessors.receiver_extract(*this);\n        }\n#else\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { }\n#endif\n\n    };\n\n    template<typename InputTuple, typename OutputType, typename StructTypes>\n    class indexer_node_FE {\n    public:\n        static const int N = tbb::flow::tuple_size<InputTuple>::value;\n        typedef OutputType output_type;\n        typedef InputTuple input_type;\n\n        input_type &input_ports() { return my_inputs; }\n    protected:\n        input_type my_inputs;\n    };\n\n    //! indexer_node_base\n    template<typename InputTuple, typename OutputType, typename StructTypes>\n    class indexer_node_base : public graph_node, public indexer_node_FE<InputTuple, OutputType,StructTypes>,\n                           public sender<OutputType> {\n    protected:\n       using graph_node::my_graph;\n    public:\n        static const size_t N = tbb::flow::tuple_size<InputTuple>::value;\n        typedef OutputType output_type;\n        typedef StructTypes tuple_types;\n        typedef receiver<output_type> successor_type;\n        typedef indexer_node_FE<InputTuple, output_type,StructTypes> input_ports_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    private:\n        // ----------- Aggregator ------------\n        enum op_type { reg_succ, rem_succ, try__put_task\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            , add_blt_succ, del_blt_succ,\n             blt_succ_cnt, blt_succ_cpy\n#endif\n        };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef indexer_node_base<InputTuple,output_type,StructTypes> my_class;\n\n        class indexer_node_base_operation : public aggregated_operation<indexer_node_base_operation> {\n        public:\n            char type;\n            union {\n                output_type const *my_arg;\n                successor_type *my_succ;\n                task *bypass_t;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                size_t cnt_val;\n                successor_vector_type *succv;\n#endif\n            };\n            indexer_node_base_operation(const output_type* e, op_type t) :\n                type(char(t)), my_arg(e) {}\n            indexer_node_base_operation(const successor_type &s, op_type t) : type(char(t)), \n                my_succ(const_cast<successor_type *>(&s)) {}\n            indexer_node_base_operation(op_type t) : type(char(t)) {}\n        };\n\n        typedef internal::aggregating_functor<my_class, indexer_node_base_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, indexer_node_base_operation>;\n        aggregator<my_handler, indexer_node_base_operation> my_aggregator;\n\n        void handle_operations(indexer_node_base_operation* op_list) {\n            indexer_node_base_operation *current;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n\n                case reg_succ:\n                    my_successors.register_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n\n                case rem_succ:\n                    my_successors.remove_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case try__put_task: {\n                        current->bypass_t = my_successors.try_put_task(*(current->my_arg));\n                        __TBB_store_with_release(current->status, SUCCEEDED);  // return of try_put_task actual return value\n                    }\n                    break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_succ:\n                    my_successors.internal_add_built_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case del_blt_succ:\n                    my_successors.internal_delete_built_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_succ_cnt:\n                    current->cnt_val = my_successors.successor_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_succ_cpy:\n                    my_successors.copy_successors(*(current->succv));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n                }\n            }\n        }\n        // ---------- end aggregator -----------\n    public:\n        indexer_node_base(graph& g) : graph_node(g), input_ports_type() {\n            indexer_helper<StructTypes,N>::set_indexer_node_pointer(this->my_inputs, this);\n            my_successors.set_owner(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender<output_type>() {\n            indexer_helper<StructTypes,N>::set_indexer_node_pointer(this->my_inputs, this);\n            my_successors.set_owner(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        bool register_successor(successor_type &r) {\n            indexer_node_base_operation op_data(r, reg_succ);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        bool remove_successor( successor_type &r) {\n            indexer_node_base_operation op_data(r, rem_succ);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        task * try_put_task(output_type const *v) {\n            indexer_node_base_operation op_data(v, try__put_task);\n            my_aggregator.execute(&op_data);\n            return op_data.bypass_t;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        void internal_add_built_successor( successor_type &r) {\n            indexer_node_base_operation op_data(r, add_blt_succ);\n            my_aggregator.execute(&op_data);\n        }\n\n        void internal_delete_built_successor( successor_type &r) {\n            indexer_node_base_operation op_data(r, del_blt_succ);\n            my_aggregator.execute(&op_data);\n        }\n\n        size_t successor_count() {\n            indexer_node_base_operation op_data(blt_succ_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        void copy_successors( successor_vector_type &v) {\n            indexer_node_base_operation op_data(blt_succ_cpy);\n            op_data.succv = &v;\n            my_aggregator.execute(&op_data);\n        } \n#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n    protected:\n        /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            my_successors.reset(f);\n            indexer_helper<StructTypes,N>::reset_inputs(this->my_inputs, f);\n#endif\n        }\n\n    private:\n        broadcast_cache<output_type, null_rw_mutex> my_successors;\n    };  //indexer_node_base\n\n\n    template<int N, typename InputTuple> struct input_types;\n\n    template<typename InputTuple>\n    struct input_types<1, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename internal::tagged_msg<size_t, first_type > type;\n    };\n\n    template<typename InputTuple>\n    struct input_types<2, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type> type;\n    };\n\n    template<typename InputTuple>\n    struct input_types<3, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type> type;\n    };\n    \n    template<typename InputTuple>\n    struct input_types<4, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type> type;\n    };\n    \n    template<typename InputTuple>\n    struct input_types<5, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type> type;\n    };\n    \n    template<typename InputTuple>\n    struct input_types<6, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename tuple_element<5, InputTuple>::type sixth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type, sixth_type> type;\n    };\n    \n    template<typename InputTuple>\n    struct input_types<7, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename tuple_element<5, InputTuple>::type sixth_type;\n        typedef typename tuple_element<6, InputTuple>::type seventh_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type, sixth_type,\n                                                      seventh_type> type;\n    };\n\n\n    template<typename InputTuple>\n    struct input_types<8, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename tuple_element<5, InputTuple>::type sixth_type;\n        typedef typename tuple_element<6, InputTuple>::type seventh_type;\n        typedef typename tuple_element<7, InputTuple>::type eighth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type, sixth_type,\n                                                      seventh_type, eighth_type> type;\n    };\n\n \n    template<typename InputTuple>\n    struct input_types<9, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename tuple_element<5, InputTuple>::type sixth_type;\n        typedef typename tuple_element<6, InputTuple>::type seventh_type;\n        typedef typename tuple_element<7, InputTuple>::type eighth_type;\n        typedef typename tuple_element<8, InputTuple>::type nineth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type, sixth_type,\n                                                      seventh_type, eighth_type, nineth_type> type;\n    };\n\n    template<typename InputTuple>\n    struct input_types<10, InputTuple> {\n        typedef typename tuple_element<0, InputTuple>::type first_type;\n        typedef typename tuple_element<1, InputTuple>::type second_type;\n        typedef typename tuple_element<2, InputTuple>::type third_type;\n        typedef typename tuple_element<3, InputTuple>::type fourth_type;\n        typedef typename tuple_element<4, InputTuple>::type fifth_type;\n        typedef typename tuple_element<5, InputTuple>::type sixth_type;\n        typedef typename tuple_element<6, InputTuple>::type seventh_type;\n        typedef typename tuple_element<7, InputTuple>::type eighth_type;\n        typedef typename tuple_element<8, InputTuple>::type nineth_type;\n        typedef typename tuple_element<9, InputTuple>::type tenth_type;\n        typedef typename internal::tagged_msg<size_t, first_type, second_type, third_type,\n                                                      fourth_type, fifth_type, sixth_type,\n                                                      seventh_type, eighth_type, nineth_type,\n                                                      tenth_type> type;\n    };\n\n    // type generators\n    template<typename OutputTuple>\n    struct indexer_types : public input_types<tuple_size<OutputTuple>::value, OutputTuple> {\n        static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n        typedef typename input_types<N, OutputTuple>::type output_type;\n        typedef typename wrap_tuple_elements<N,indexer_input_port,OutputTuple>::type input_ports_type;\n        typedef internal::indexer_node_FE<input_ports_type,output_type,OutputTuple> indexer_FE_type;\n        typedef internal::indexer_node_base<input_ports_type, output_type, OutputTuple> indexer_base_type;\n    };\n\n    template<class OutputTuple>\n    class unfolded_indexer_node : public indexer_types<OutputTuple>::indexer_base_type {\n    public:\n        typedef typename indexer_types<OutputTuple>::input_ports_type input_ports_type;\n        typedef OutputTuple tuple_types;\n        typedef typename indexer_types<OutputTuple>::output_type output_type;\n    private:\n        typedef typename indexer_types<OutputTuple>::indexer_base_type base_type;\n    public:\n        unfolded_indexer_node(graph& g) : base_type(g) {}\n        unfolded_indexer_node(const unfolded_indexer_node &other) : base_type(other) {}\n    };\n\n} /* namespace internal */\n\n#endif  /* __TBB__flow_graph_indexer_impl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_item_buffer_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_item_buffer_impl_H\n#define __TBB__flow_graph_item_buffer_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"tbb/internal/_flow_graph_types_impl.h\"  // for aligned_pair\n\n// in namespace tbb::flow::interface7 (included in _flow_graph_node_impl.h)\n\n    //! Expandable buffer of items.  The possible operations are push, pop,\n    //* tests for empty and so forth.  No mutual exclusion is built in.\n    //* objects are constructed into and explicitly-destroyed.  get_my_item gives\n    // a read-only reference to the item in the buffer.  set_my_item may be called\n    // with either an empty or occupied slot.\n\n    using internal::aligned_pair;\n    using internal::alignment_of;\n\nnamespace internal {\n\n    template <typename T, typename A=cache_aligned_allocator<T> >\n    class item_buffer {\n    public:\n        typedef T item_type;\n        enum buffer_item_state { no_item=0, has_item=1, reserved_item=2 };\n    protected:\n        typedef size_t size_type;\n        typedef typename aligned_pair<item_type, buffer_item_state>::type buffer_item_type;\n        typedef typename A::template rebind<buffer_item_type>::other allocator_type;\n\n        buffer_item_type *my_array;\n        size_type my_array_size;\n        static const size_type initial_buffer_size = 4;\n        size_type my_head;\n        size_type my_tail;\n\n        bool buffer_empty() { return my_head == my_tail; }\n\n        buffer_item_type &item(size_type i) {\n            __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].second))%alignment_of<buffer_item_state>::value),NULL);\n            __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].first))%alignment_of<item_type>::value), NULL);\n            return my_array[i & (my_array_size - 1) ];\n        }\n\n        bool my_item_valid(size_type i) { return item(i).second != no_item; }\n        bool my_item_reserved(size_type i) { return item(i).second == reserved_item; }\n\n        // object management in buffer\n        const item_type &get_my_item(size_t i) {\n            __TBB_ASSERT(my_item_valid(i),\"attempt to get invalid item\");\n            item_type *itm = (tbb::internal::punned_cast<item_type *>(&(item(i).first)));\n            return *(const item_type *)itm;\n        }\n\n        // may be called with an empty slot or a slot that has already been constructed into.\n        void set_my_item(size_t i, const item_type &o) { \n            if(item(i).second != no_item) {\n                destroy_item(i);\n            }\n            new(&(item(i).first)) item_type(o);\n            item(i).second = has_item;\n        }\n\n        // destructively-fetch an object from the buffer\n        void fetch_item(size_t i, item_type &o) {\n            __TBB_ASSERT(my_item_valid(i), \"Trying to fetch an empty slot\");\n            o = get_my_item(i);  // could have std::move assign semantics\n            destroy_item(i);\n        }\n\n        // move an existing item from one slot to another.  The moved-to slot must be unoccupied,\n        // the moved-from slot must exist and not be reserved.  The after, from will be empty,\n        // to will be occupied but not reserved\n        void move_item(size_t to, size_t from) {\n            __TBB_ASSERT(!my_item_valid(to), \"Trying to move to a non-empty slot\");\n            __TBB_ASSERT(my_item_valid(from), \"Trying to move from an empty slot\");\n            set_my_item(to, get_my_item(from));   // could have std::move semantics\n            destroy_item(from);\n\n        }\n\n        // put an item in an empty slot.  Return true if successful, else false\n        bool place_item(size_t here, const item_type &me) {\n#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES\n            if(my_item_valid(here)) return false;\n#endif\n            set_my_item(here, me);\n            return true;\n        }\n\n        // could be implemented with std::move semantics\n        void swap_items(size_t i, size_t j) {\n            __TBB_ASSERT(my_item_valid(i) && my_item_valid(j), \"attempt to swap invalid item(s)\");\n            item_type temp = get_my_item(i);\n            set_my_item(i, get_my_item(j));\n            set_my_item(j, temp);\n        }\n\n        void destroy_item(size_type i) {\n            __TBB_ASSERT(my_item_valid(i), \"destruction of invalid item\");\n            (tbb::internal::punned_cast<item_type *>(&(item(i).first)))->~item_type();\n            item(i).second = no_item;\n        }\n\n        // returns a copy of the front\n        void copy_front(item_type &v) {\n            __TBB_ASSERT(my_item_valid(my_head), \"attempt to fetch head non-item\");\n            v = get_my_item(my_head);\n        }\n        // returns a copy of the back\n        void copy_back(item_type &v) {\n            __TBB_ASSERT(my_item_valid(my_tail-1), \"attempt to fetch head non-item\");\n            v = get_my_item(my_tail-1);\n        }\n\n        // following methods are for reservation of the front of a bufffer. \n        void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), \"item cannot be reserved\"); item(i).second = reserved_item; }\n        void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), \"item is not reserved\"); item(i).second = has_item; }\n\n        void destroy_front() { destroy_item(my_head); ++my_head; }\n        void destroy_back() { destroy_item(my_tail-1); --my_tail; }\n\n        // we have to be able to test against a new tail value without changing my_tail\n        // grow_array doesn't work if we change my_tail when the old array is too small\n        size_type size(size_t new_tail = 0) { return (new_tail ? new_tail : my_tail) - my_head; }\n        size_type capacity() { return my_array_size; }\n        // sequencer_node does not use this method, so we don't \n        // need a version that passes in the new_tail value.\n        bool buffer_full() { return size() >= capacity(); }\n\n        //! Grows the internal array.\n        void grow_my_array( size_t minimum_size ) {\n            // test that we haven't made the structure inconsistent.\n            __TBB_ASSERT(capacity() >= my_tail - my_head, \"total items exceed capacity\");\n            size_type new_size = my_array_size ? 2*my_array_size : initial_buffer_size;\n            while( new_size<minimum_size )\n                new_size*=2;\n\n            buffer_item_type* new_array = allocator_type().allocate(new_size);\n\n            // initialize validity to \"no\"\n            for( size_type i=0; i<new_size; ++i ) { new_array[i].second = no_item; }\n\n            for( size_type i=my_head; i<my_tail; ++i) {\n                if(my_item_valid(i)) {  // sequencer_node may have empty slots\n                    // placement-new copy-construct; could be std::move\n                    char *new_space = (char *)&(new_array[i&(new_size-1)].first);\n                    (void)new(new_space) item_type(get_my_item(i));\n                    new_array[i&(new_size-1)].second = item(i).second;\n                }\n            }\n\n            clean_up_buffer(/*reset_pointers*/false);\n\n            my_array = new_array;\n            my_array_size = new_size;\n        }\n\n        bool push_back(item_type &v) {\n            if(buffer_full()) {\n                grow_my_array(size() + 1);\n            }\n            set_my_item(my_tail, v);\n            ++my_tail;\n            return true;\n        }\n\n        bool pop_back(item_type &v) {\n            if (!my_item_valid(my_tail-1)) {\n                return false;\n            }\n            copy_back(v);\n            destroy_back();\n            return true;\n        }\n\n        bool pop_front(item_type &v) {\n            if(!my_item_valid(my_head)) {\n                return false;\n            }\n            copy_front(v);\n            destroy_front();\n            return true;\n        }\n\n        // This is used both for reset and for grow_my_array.  In the case of grow_my_array\n        // we want to retain the values of the head and tail.\n        void clean_up_buffer(bool reset_pointers) {\n            if (my_array) {\n                for( size_type i=0; i<my_array_size; ++i ) {\n                    if(my_item_valid(i))\n                        destroy_item(i);\n                }\n                allocator_type().deallocate(my_array,my_array_size); \n            }\n            my_array = NULL;\n            if(reset_pointers) {\n                my_head = my_tail = my_array_size = 0;\n            }\n        }\n\n    public:\n        //! Constructor\n        item_buffer( ) : my_array(NULL), my_array_size(0),\n            my_head(0), my_tail(0) {\n            grow_my_array(initial_buffer_size);\n        }\n\n        ~item_buffer() {\n            clean_up_buffer(/*reset_pointers*/true);\n        }\n\n        void reset() { clean_up_buffer(/*reset_pointers*/true); grow_my_array(initial_buffer_size); }\n\n    };\n\n    //! item_buffer with reservable front-end.  NOTE: if reserving, do not\n    //* complete operation with pop_front(); use consume_front().  \n    //* No synchronization built-in.\n    template<typename T, typename A=cache_aligned_allocator<T> >\n    class reservable_item_buffer : public item_buffer<T, A> {\n    protected:\n        using item_buffer<T, A>::my_item_valid;\n        using item_buffer<T, A>::my_head;\n\n    public:\n        reservable_item_buffer() : item_buffer<T, A>(), my_reserved(false) {}\n        void reset() {my_reserved = false; item_buffer<T,A>::reset(); }\n    protected:\n\n        bool reserve_front(T &v) {\n            if(my_reserved || !my_item_valid(my_head)) return false;\n            my_reserved = true;\n            // reserving the head\n            this->copy_front(v);\n            this->reserve_item(this->my_head);\n            return true;\n        }\n\n        void consume_front() {\n            __TBB_ASSERT(my_reserved, \"Attempt to consume a non-reserved item\");\n            this->destroy_front();\n            my_reserved = false;\n        }\n\n        void release_front() {\n            __TBB_ASSERT(my_reserved, \"Attempt to release a non-reserved item\");\n            this->release_item(this->my_head);\n            my_reserved = false;\n        }\n\n        bool my_reserved;\n    };\n\n}  // namespace internal\n\n#endif // __TBB__flow_graph_item_buffer_impl_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_join_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_join_impl_H\n#define __TBB__flow_graph_join_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"_flow_graph_types_impl.h\"\n\nnamespace internal {\n\n    typedef size_t tag_value;\n    static const tag_value NO_TAG = tag_value(-1);\n\n    struct forwarding_base {\n        forwarding_base(graph &g) : my_graph_ptr(&g), current_tag(NO_TAG) {}\n        virtual ~forwarding_base() {}\n        // decrement_port_count may create a forwarding task.  If we cannot handle the task\n        // ourselves, ask decrement_port_count to deal with it.\n        virtual task * decrement_port_count(bool handle_task) = 0;\n        virtual void increment_port_count() = 0;\n        virtual task * increment_tag_count(tag_value /*t*/, bool /*handle_task*/) {return NULL;}\n        // moved here so input ports can queue tasks\n        graph* my_graph_ptr;\n        tag_value current_tag; // so ports can refer to FE's desired items\n    };\n\n    template< int N >\n    struct join_helper {\n\n        template< typename TupleType, typename PortType >\n        static inline void set_join_node_pointer(TupleType &my_input, PortType *port) {\n            tbb::flow::get<N-1>( my_input ).set_join_node_pointer(port);\n            join_helper<N-1>::set_join_node_pointer( my_input, port );\n        }\n        template< typename TupleType >\n        static inline void consume_reservations( TupleType &my_input ) {\n            tbb::flow::get<N-1>( my_input ).consume();\n            join_helper<N-1>::consume_reservations( my_input );\n        }\n\n        template< typename TupleType >\n        static inline void release_my_reservation( TupleType &my_input ) {\n            tbb::flow::get<N-1>( my_input ).release();\n        }\n\n        template <typename TupleType>\n        static inline void release_reservations( TupleType &my_input) {\n            join_helper<N-1>::release_reservations(my_input);\n            release_my_reservation(my_input);\n        }\n\n        template< typename InputTuple, typename OutputTuple >\n        static inline bool reserve( InputTuple &my_input, OutputTuple &out) {\n            if ( !tbb::flow::get<N-1>( my_input ).reserve( tbb::flow::get<N-1>( out ) ) ) return false;\n            if ( !join_helper<N-1>::reserve( my_input, out ) ) {\n                release_my_reservation( my_input );\n                return false;\n            }\n            return true;\n        }\n\n        template<typename InputTuple, typename OutputTuple>\n        static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) {\n            bool res = tbb::flow::get<N-1>(my_input).get_item(tbb::flow::get<N-1>(out) ); // may fail\n            return join_helper<N-1>::get_my_item(my_input, out) && res;       // do get on other inputs before returning\n        }\n\n        template<typename InputTuple, typename OutputTuple>\n        static inline bool get_items(InputTuple &my_input, OutputTuple &out) {\n            return get_my_item(my_input, out);\n        }\n\n        template<typename InputTuple>\n        static inline void reset_my_port(InputTuple &my_input) {\n            join_helper<N-1>::reset_my_port(my_input);\n            tbb::flow::get<N-1>(my_input).reset_port();\n        }\n\n        template<typename InputTuple>\n        static inline void reset_ports(InputTuple& my_input) {\n            reset_my_port(my_input);\n        }\n\n        template<typename InputTuple, typename TagFuncTuple>\n        static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) {\n            tbb::flow::get<N-1>(my_input).set_my_original_tag_func(tbb::flow::get<N-1>(my_tag_funcs));\n            tbb::flow::get<N-1>(my_input).set_my_tag_func(tbb::flow::get<N-1>(my_input).my_original_func()->clone());\n            tbb::flow::get<N-1>(my_tag_funcs) = NULL;\n            join_helper<N-1>::set_tag_func(my_input, my_tag_funcs);\n        }\n\n        template< typename TagFuncTuple1, typename TagFuncTuple2>\n        static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) {\n            if(tbb::flow::get<N-1>(other_inputs).my_original_func()) {\n                tbb::flow::get<N-1>(my_inputs).set_my_tag_func(tbb::flow::get<N-1>(other_inputs).my_original_func()->clone());\n                tbb::flow::get<N-1>(my_inputs).set_my_original_tag_func(tbb::flow::get<N-1>(other_inputs).my_original_func()->clone());\n            }\n            join_helper<N-1>::copy_tag_functors(my_inputs, other_inputs);\n        }\n\n        template<typename InputTuple>\n        static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) {\n            join_helper<N-1>::reset_inputs(my_input __TBB_PFG_RESET_ARG(__TBB_COMMA f));\n            tbb::flow::get<N-1>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f));\n        }\n    };\n\n    template< >\n    struct join_helper<1> {\n\n        template< typename TupleType, typename PortType >\n        static inline void set_join_node_pointer(TupleType &my_input, PortType *port) {\n            tbb::flow::get<0>( my_input ).set_join_node_pointer(port);\n        }\n\n        template< typename TupleType >\n        static inline void consume_reservations( TupleType &my_input ) {\n            tbb::flow::get<0>( my_input ).consume();\n        }\n\n        template< typename TupleType >\n        static inline void release_my_reservation( TupleType &my_input ) {\n            tbb::flow::get<0>( my_input ).release();\n        }\n\n        template<typename TupleType>\n        static inline void release_reservations( TupleType &my_input) {\n            release_my_reservation(my_input);\n        }\n\n        template< typename InputTuple, typename OutputTuple >\n        static inline bool reserve( InputTuple &my_input, OutputTuple &out) {\n            return tbb::flow::get<0>( my_input ).reserve( tbb::flow::get<0>( out ) );\n        }\n\n        template<typename InputTuple, typename OutputTuple>\n        static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) {\n            return tbb::flow::get<0>(my_input).get_item(tbb::flow::get<0>(out));\n        }\n\n        template<typename InputTuple, typename OutputTuple>\n        static inline bool get_items(InputTuple &my_input, OutputTuple &out) {\n            return get_my_item(my_input, out);\n        }\n\n        template<typename InputTuple>\n        static inline void reset_my_port(InputTuple &my_input) {\n            tbb::flow::get<0>(my_input).reset_port();\n        }\n\n        template<typename InputTuple>\n        static inline void reset_ports(InputTuple& my_input) {\n            reset_my_port(my_input);\n        }\n\n        template<typename InputTuple, typename TagFuncTuple>\n        static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) {\n            tbb::flow::get<0>(my_input).set_my_original_tag_func(tbb::flow::get<0>(my_tag_funcs));\n            tbb::flow::get<0>(my_input).set_my_tag_func(tbb::flow::get<0>(my_input).my_original_func()->clone());\n            tbb::flow::get<0>(my_tag_funcs) = NULL;\n        }\n\n        template< typename TagFuncTuple1, typename TagFuncTuple2>\n        static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) {\n            if(tbb::flow::get<0>(other_inputs).my_original_func()) {\n                tbb::flow::get<0>(my_inputs).set_my_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone());\n                tbb::flow::get<0>(my_inputs).set_my_original_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone());\n            }\n        }\n        template<typename InputTuple>\n        static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) {\n            tbb::flow::get<0>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f));\n        }\n    };\n\n    //! The two-phase join port\n    template< typename T >\n    class reserving_port : public receiver<T> {\n    public:\n        typedef T input_type;\n        typedef sender<T> predecessor_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<predecessor_type *> predecessor_vector_type;\n#endif\n    private:\n        // ----------- Aggregator ------------\n        enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy\n#endif\n        };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef reserving_port<T> my_class;\n\n        class reserving_port_operation : public aggregated_operation<reserving_port_operation> {\n        public:\n            char type;\n            union {\n                T *my_arg;\n                predecessor_type *my_pred;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                size_t cnt_val;\n                predecessor_vector_type *pvec;\n#endif\n            };\n            reserving_port_operation(const T& e, op_type t) :\n                type(char(t)), my_arg(const_cast<T*>(&e)) {}\n            reserving_port_operation(const predecessor_type &s, op_type t) : type(char(t)),\n                my_pred(const_cast<predecessor_type *>(&s)) {}\n            reserving_port_operation(op_type t) : type(char(t)) {}\n        };\n\n        typedef internal::aggregating_functor<my_class, reserving_port_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, reserving_port_operation>;\n        aggregator<my_handler, reserving_port_operation> my_aggregator;\n\n        void handle_operations(reserving_port_operation* op_list) {\n            reserving_port_operation *current;\n            bool no_predecessors;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n                case reg_pred:\n                    no_predecessors = my_predecessors.empty();\n                    my_predecessors.add(*(current->my_pred));\n                    if ( no_predecessors ) {\n                        (void) my_join->decrement_port_count(true); // may try to forward\n                    }\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case rem_pred:\n                    my_predecessors.remove(*(current->my_pred));\n                    if(my_predecessors.empty()) my_join->increment_port_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case res_item:\n                    if ( reserved ) {\n                        __TBB_store_with_release(current->status, FAILED);\n                    }\n                    else if ( my_predecessors.try_reserve( *(current->my_arg) ) ) {\n                        reserved = true;\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    } else {\n                        if ( my_predecessors.empty() ) {\n                            my_join->increment_port_count();\n                        }\n                        __TBB_store_with_release(current->status, FAILED);\n                    }\n                    break;\n                case rel_res:\n                    reserved = false;\n                    my_predecessors.try_release( );\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case con_res:\n                    reserved = false;\n                    my_predecessors.try_consume( );\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_pred:\n                    my_predecessors.internal_add_built_predecessor(*(current->my_pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case del_blt_pred:\n                    my_predecessors.internal_delete_built_predecessor(*(current->my_pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cnt:\n                    current->cnt_val = my_predecessors.predecessor_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cpy:\n                    my_predecessors.copy_predecessors(*(current->pvec));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n                }\n            }\n        }\n\n    protected:\n        template< typename R, typename B > friend class run_and_put_task;\n        template<typename X, typename Y> friend class internal::broadcast_cache;\n        template<typename X, typename Y> friend class internal::round_robin_cache;\n        task *try_put_task( const T & ) {\n            return NULL;\n        }\n\n    public:\n\n        //! Constructor\n        reserving_port() : reserved(false) {\n            my_join = NULL;\n            my_predecessors.set_owner( this );\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        // copy constructor\n        reserving_port(const reserving_port& /* other */) : receiver<T>() {\n            reserved = false;\n            my_join = NULL;\n            my_predecessors.set_owner( this );\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        void set_join_node_pointer(forwarding_base *join) {\n            my_join = join;\n        }\n\n        //! Add a predecessor\n        bool register_predecessor( sender<T> &src ) {\n            reserving_port_operation op_data(src, reg_pred);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        //! Remove a predecessor\n        bool remove_predecessor( sender<T> &src ) {\n            reserving_port_operation op_data(src, rem_pred);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        //! Reserve an item from the port\n        bool reserve( T &v ) {\n            reserving_port_operation op_data(v, res_item);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        //! Release the port\n        void release( ) {\n            reserving_port_operation op_data(rel_res);\n            my_aggregator.execute(&op_data);\n        }\n\n        //! Complete use of the port\n        void consume( ) {\n            reserving_port_operation op_data(con_res);\n            my_aggregator.execute(&op_data);\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/void internal_add_built_predecessor(predecessor_type &src) {\n            reserving_port_operation op_data(src, add_blt_pred);\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/void internal_delete_built_predecessor(predecessor_type &src) {\n            reserving_port_operation op_data(src, del_blt_pred);\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/size_t predecessor_count() {\n            reserving_port_operation op_data(blt_pred_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        /*override*/void copy_predecessors(predecessor_vector_type &v) {\n            reserving_port_operation op_data(blt_pred_cpy);\n            op_data.pvec = &v;\n            my_aggregator.execute(&op_data);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n        /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            my_predecessors.reset(__TBB_PFG_RESET_ARG(f));\n            reserved = false;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            __TBB_ASSERT(!(f&rf_extract) || my_predecessors.empty(), \"port edges not removed\");\n#endif\n        }\n\n    private:\n        forwarding_base *my_join;\n        reservable_predecessor_cache< T, null_mutex > my_predecessors;\n        bool reserved;\n    };\n\n    //! queueing join_port\n    template<typename T>\n    class queueing_port : public receiver<T>, public item_buffer<T> {\n    public:\n        typedef T input_type;\n        typedef sender<T> predecessor_type;\n        typedef queueing_port<T> my_node_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<predecessor_type *> predecessor_vector_type;\n#endif\n\n    // ----------- Aggregator ------------\n    private:\n        enum op_type { get__item, res_port, try__put_task\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy \n#endif\n        };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef queueing_port<T> my_class;\n\n        class queueing_port_operation : public aggregated_operation<queueing_port_operation> {\n        public:\n            char type;\n            T my_val;\n            T *my_arg;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            sender<T> *pred;\n            size_t cnt_val;\n            predecessor_vector_type *pvec;\n#endif\n            task * bypass_t;\n            // constructor for value parameter\n            queueing_port_operation(const T& e, op_type t) :\n                type(char(t)), my_val(e)\n                , bypass_t(NULL)\n            {}\n            // constructor for pointer parameter\n            queueing_port_operation(const T* p, op_type t) :\n                type(char(t)), my_arg(const_cast<T*>(p))\n                , bypass_t(NULL)\n            {}\n            // constructor with no parameter\n            queueing_port_operation(op_type t) : type(char(t))\n                , bypass_t(NULL)\n            {}\n        };\n\n        typedef internal::aggregating_functor<my_class, queueing_port_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, queueing_port_operation>;\n        aggregator<my_handler, queueing_port_operation> my_aggregator;\n\n        void handle_operations(queueing_port_operation* op_list) {\n            queueing_port_operation *current;\n            bool was_empty;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n                case try__put_task: {\n                        task *rtask = NULL;\n                        was_empty = this->buffer_empty();\n                        this->push_back(current->my_val);\n                        if (was_empty) rtask = my_join->decrement_port_count(false);\n                        else\n                            rtask = SUCCESSFULLY_ENQUEUED;\n                        current->bypass_t = rtask;\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    }\n                    break;\n                case get__item:\n                    if(!this->buffer_empty()) {\n                        this->copy_front(*(current->my_arg));\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    }\n                    else {\n                        __TBB_store_with_release(current->status, FAILED);\n                    }\n                    break;\n                case res_port:\n                    __TBB_ASSERT(this->my_item_valid(this->my_head), \"No item to reset\");\n                    this->destroy_front();\n                    if(this->my_item_valid(this->my_head)) {\n                        (void)my_join->decrement_port_count(true);\n                    }\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_pred:\n                    my_built_predecessors.add_edge(*(current->pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case del_blt_pred:\n                    my_built_predecessors.delete_edge(*(current->pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cnt:\n                    current->cnt_val = my_built_predecessors.edge_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cpy:\n                    my_built_predecessors.copy_edges(*(current->pvec));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n                }\n            }\n        }\n    // ------------ End Aggregator ---------------\n\n    protected:\n        template< typename R, typename B > friend class run_and_put_task;\n        template<typename X, typename Y> friend class internal::broadcast_cache;\n        template<typename X, typename Y> friend class internal::round_robin_cache;\n        /*override*/task *try_put_task(const T &v) {\n            queueing_port_operation op_data(v, try__put_task);\n            my_aggregator.execute(&op_data);\n            __TBB_ASSERT(op_data.status == SUCCEEDED || !op_data.bypass_t, \"inconsistent return from aggregator\");\n            if(!op_data.bypass_t) return SUCCESSFULLY_ENQUEUED;\n            return op_data.bypass_t;\n        }\n\n    public:\n\n        //! Constructor\n        queueing_port() : item_buffer<T>() {\n            my_join = NULL;\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        //! copy constructor\n        queueing_port(const queueing_port& /* other */) : receiver<T>(), item_buffer<T>() {\n            my_join = NULL;\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        //! record parent for tallying available items\n        void set_join_node_pointer(forwarding_base *join) {\n            my_join = join;\n        }\n\n        bool get_item( T &v ) {\n            queueing_port_operation op_data(&v, get__item);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        // reset_port is called when item is accepted by successor, but\n        // is initiated by join_node.\n        void reset_port() {\n            queueing_port_operation op_data(res_port);\n            my_aggregator.execute(&op_data);\n            return;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/void internal_add_built_predecessor(sender<T> &p) {\n            queueing_port_operation op_data(add_blt_pred);\n            op_data.pred = &p;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/void internal_delete_built_predecessor(sender<T> &p) {\n            queueing_port_operation op_data(del_blt_pred);\n            op_data.pred = &p;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/size_t predecessor_count() {\n            queueing_port_operation op_data(blt_pred_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        /*override*/void copy_predecessors(predecessor_vector_type &v) {\n            queueing_port_operation op_data(blt_pred_cpy);\n            op_data.pvec = &v;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { \n            item_buffer<T>::reset(); \n            if (f & rf_extract)\n                my_built_predecessors.receiver_extract(*this);\n        }\n#else\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { item_buffer<T>::reset(); }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    private:\n        forwarding_base *my_join;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        edge_container<sender<T> > my_built_predecessors;\n#endif\n    };\n\n#include \"_flow_graph_tagged_buffer_impl.h\"\n\n    template< typename T >\n    class tag_matching_port : public receiver<T>, public tagged_buffer< tag_value, T, NO_TAG > {\n    public:\n        typedef T input_type;\n        typedef sender<T> predecessor_type;\n        typedef tag_matching_port<T> my_node_type;  // for forwarding, if needed\n        typedef function_body<input_type, tag_value> my_tag_func_type;\n        typedef tagged_buffer<tag_value,T,NO_TAG> my_buffer_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<predecessor_type *> predecessor_vector_type;\n#endif\n    private:\n// ----------- Aggregator ------------\n    private:\n        enum op_type { try__put, get__item, res_port,\n            add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy\n        };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef tag_matching_port<T> my_class;\n\n        class tag_matching_port_operation : public aggregated_operation<tag_matching_port_operation> {\n        public:\n            char type;\n            T my_val;\n            T *my_arg;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            predecessor_type *pred;\n            size_t cnt_val;\n            predecessor_vector_type *pvec;\n#endif\n            tag_value my_tag_value;\n            // constructor for value parameter\n            tag_matching_port_operation(const T& e, op_type t) :\n                type(char(t)), my_val(e) {}\n            // constructor for pointer parameter\n            tag_matching_port_operation(const T* p, op_type t) :\n                type(char(t)), my_arg(const_cast<T*>(p)) {}\n            // constructor with no parameter\n            tag_matching_port_operation(op_type t) : type(char(t)) {}\n        };\n\n        typedef internal::aggregating_functor<my_class, tag_matching_port_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, tag_matching_port_operation>;\n        aggregator<my_handler, tag_matching_port_operation> my_aggregator;\n\n        void handle_operations(tag_matching_port_operation* op_list) {\n            tag_matching_port_operation *current;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n                case try__put: {\n                        bool was_inserted = this->tagged_insert(current->my_tag_value, current->my_val);\n                        // return failure if a duplicate insertion occurs\n                        __TBB_store_with_release(current->status, was_inserted ? SUCCEEDED : FAILED);\n                    }\n                    break;\n                case get__item:\n                    // use current_tag from FE for item\n                    if(!this->tagged_find(my_join->current_tag, *(current->my_arg))) {\n                        __TBB_ASSERT(false, \"Failed to find item corresponding to current_tag.\");\n                    }\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case res_port:\n                    // use current_tag from FE for item\n                    this->tagged_delete(my_join->current_tag);\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_pred:\n                    my_built_predecessors.add_edge(*(current->pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case del_blt_pred:\n                    my_built_predecessors.delete_edge(*(current->pred));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cnt:\n                    current->cnt_val = my_built_predecessors.edge_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_pred_cpy:\n                    my_built_predecessors.copy_edges(*(current->pvec));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#endif\n                }\n            }\n        }\n// ------------ End Aggregator ---------------\n    protected:\n        template< typename R, typename B > friend class run_and_put_task;\n        template<typename X, typename Y> friend class internal::broadcast_cache;\n        template<typename X, typename Y> friend class internal::round_robin_cache;\n        /*override*/task *try_put_task(const T& v) {\n            tag_matching_port_operation op_data(v, try__put);\n            op_data.my_tag_value = (*my_tag_func)(v);\n            task *rtask = NULL;\n            my_aggregator.execute(&op_data);\n            if(op_data.status == SUCCEEDED) {\n                rtask = my_join->increment_tag_count(op_data.my_tag_value, false);  // may spawn\n                // rtask has to reflect the return status of the try_put\n                if(!rtask) rtask = SUCCESSFULLY_ENQUEUED;\n            }\n            return rtask;\n        }\n\n    public:\n\n        tag_matching_port() : receiver<T>(), tagged_buffer<tag_value, T, NO_TAG>() {\n            my_join = NULL;\n            my_tag_func = NULL;\n            my_original_tag_func = NULL;\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        // copy constructor\n        tag_matching_port(const tag_matching_port& /*other*/) : receiver<T>(), tagged_buffer<tag_value,T, NO_TAG>() {\n            my_join = NULL;\n            // setting the tag methods is done in the copy-constructor for the front-end.\n            my_tag_func = NULL;\n            my_original_tag_func = NULL;\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        ~tag_matching_port() {\n            if (my_tag_func) delete my_tag_func;\n            if (my_original_tag_func) delete my_original_tag_func;\n        }\n\n        void set_join_node_pointer(forwarding_base *join) {\n            my_join = join;\n        }\n\n        void set_my_original_tag_func(my_tag_func_type *f) {\n            my_original_tag_func = f;\n        }\n\n        void set_my_tag_func(my_tag_func_type *f) {\n            my_tag_func = f;\n        }\n\n        bool get_item( T &v ) {\n            tag_matching_port_operation op_data(&v, get__item);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/void internal_add_built_predecessor(sender<T> &p) {\n            tag_matching_port_operation op_data(add_blt_pred);\n            op_data.pred = &p;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/void internal_delete_built_predecessor(sender<T> &p) {\n            tag_matching_port_operation op_data(del_blt_pred);\n            op_data.pred = &p;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/size_t predecessor_count() {\n            tag_matching_port_operation op_data(blt_pred_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        /*override*/void copy_predecessors(predecessor_vector_type &v) {\n            tag_matching_port_operation op_data(blt_pred_cpy);\n            op_data.pvec = &v;\n            my_aggregator.execute(&op_data);\n        }\n#endif\n\n        // reset_port is called when item is accepted by successor, but\n        // is initiated by join_node.\n        void reset_port() {\n            tag_matching_port_operation op_data(res_port);\n            my_aggregator.execute(&op_data);\n            return;\n        }\n\n        my_tag_func_type *my_func() { return my_tag_func; }\n        my_tag_func_type *my_original_func() { return my_original_tag_func; }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { \n            my_buffer_type::reset(); \n           if (f & rf_extract)\n              my_built_predecessors.receiver_extract(*this);\n        }\n#else\n        /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { my_buffer_type::reset(); }\n#endif\n\n    private:\n        // need map of tags to values\n        forwarding_base *my_join;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        edge_container<predecessor_type> my_built_predecessors;\n#endif\n        my_tag_func_type *my_tag_func;\n        my_tag_func_type *my_original_tag_func;\n    };  // tag_matching_port\n\n    using namespace graph_policy_namespace;\n\n    template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>\n    class join_node_base;\n\n    //! join_node_FE : implements input port policy\n    template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>\n    class join_node_FE;\n\n    template<typename InputTuple, typename OutputTuple>\n    class join_node_FE<reserving, InputTuple, OutputTuple> : public forwarding_base {\n    public:\n        static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n        typedef OutputTuple output_type;\n        typedef InputTuple input_type;\n        typedef join_node_base<reserving, InputTuple, OutputTuple> my_node_type; // for forwarding\n\n        join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) {\n            ports_with_no_inputs = N;\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n        }\n\n        join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) {\n            ports_with_no_inputs = N;\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n        }\n\n        void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }\n\n       void increment_port_count() {\n            ++ports_with_no_inputs;\n        }\n\n        // if all input_ports have predecessors, spawn forward to try and consume tuples\n        task * decrement_port_count(bool handle_task) {\n            if(ports_with_no_inputs.fetch_and_decrement() == 1) {\n                task* tp = this->my_graph_ptr->root_task();\n                if(tp) {\n                    task *rtask = new ( task::allocate_additional_child_of( *tp ) )\n                        forward_task_bypass<my_node_type>(*my_node);\n                    if(!handle_task) return rtask;\n                    FLOW_SPAWN(*rtask);\n                }\n            }\n            return NULL;\n        }\n\n        input_type &input_ports() { return my_inputs; }\n\n    protected:\n\n        void reset( __TBB_PFG_RESET_ARG( reset_flags f)) {\n            // called outside of parallel contexts\n            ports_with_no_inputs = N;\n            join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f));\n        }\n\n        // all methods on input ports should be called under mutual exclusion from join_node_base.\n\n        bool tuple_build_may_succeed() {\n            return !ports_with_no_inputs;\n        }\n\n        bool try_to_make_tuple(output_type &out) {\n            if(ports_with_no_inputs) return false;\n            return join_helper<N>::reserve(my_inputs, out);\n        }\n\n        void tuple_accepted() {\n            join_helper<N>::consume_reservations(my_inputs);\n        }\n        void tuple_rejected() {\n            join_helper<N>::release_reservations(my_inputs);\n        }\n\n        input_type my_inputs;\n        my_node_type *my_node;\n        atomic<size_t> ports_with_no_inputs;\n    };\n\n    template<typename InputTuple, typename OutputTuple>\n    class join_node_FE<queueing, InputTuple, OutputTuple> : public forwarding_base {\n    public:\n        static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n        typedef OutputTuple output_type;\n        typedef InputTuple input_type;\n        typedef join_node_base<queueing, InputTuple, OutputTuple> my_node_type; // for forwarding\n\n        join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) {\n            ports_with_no_items = N;\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n        }\n\n        join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) {\n            ports_with_no_items = N;\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n        }\n\n        // needed for forwarding\n        void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }\n\n        void reset_port_count() {\n            ports_with_no_items = N;\n        }\n\n        // if all input_ports have items, spawn forward to try and consume tuples\n        task * decrement_port_count(bool handle_task)\n        {\n            if(ports_with_no_items.fetch_and_decrement() == 1) {\n                task* tp = this->my_graph_ptr->root_task();\n                if(tp) {\n                    task *rtask = new ( task::allocate_additional_child_of( *tp ) )\n                        forward_task_bypass <my_node_type>(*my_node);\n                    if(!handle_task) return rtask;\n                    FLOW_SPAWN( *rtask);\n                }\n            }\n            return NULL;\n        }\n\n        void increment_port_count() { __TBB_ASSERT(false, NULL); }  // should never be called\n\n        input_type &input_ports() { return my_inputs; }\n\n    protected:\n\n        void reset( __TBB_PFG_RESET_ARG( reset_flags f)) {\n            reset_port_count();\n            join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f) );\n        }\n\n        // all methods on input ports should be called under mutual exclusion from join_node_base.\n\n        bool tuple_build_may_succeed() {\n            return !ports_with_no_items;\n        }\n\n        bool try_to_make_tuple(output_type &out) {\n            if(ports_with_no_items) return false;\n            return join_helper<N>::get_items(my_inputs, out);\n        }\n\n        void tuple_accepted() {\n            reset_port_count();\n            join_helper<N>::reset_ports(my_inputs);\n        }\n        void tuple_rejected() {\n            // nothing to do.\n        }\n\n        input_type my_inputs;\n        my_node_type *my_node;\n        atomic<size_t> ports_with_no_items;\n    };\n\n    // tag_matching join input port.\n    template<typename InputTuple, typename OutputTuple>\n    class join_node_FE<tag_matching, InputTuple, OutputTuple> : public forwarding_base,\n             //     buffer of tag value counts                       buffer of output items\n             public tagged_buffer<tag_value, size_t, NO_TAG>, public item_buffer<OutputTuple> {\n    public:\n        static const int N = tbb::flow::tuple_size<OutputTuple>::value;\n        typedef OutputTuple output_type;\n        typedef InputTuple input_type;\n        typedef tagged_buffer<tag_value, size_t, NO_TAG> my_tag_buffer;\n        typedef item_buffer<output_type> output_buffer_type;\n        typedef join_node_base<tag_matching, InputTuple, OutputTuple> my_node_type; // for forwarding\n\n// ----------- Aggregator ------------\n        // the aggregator is only needed to serialize the access to the hash table.\n        // and the output_buffer_type base class\n    private:\n        enum op_type { res_count, inc_count, may_succeed, try_make };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef join_node_FE<tag_matching, InputTuple, OutputTuple> my_class;\n\n        class tag_matching_FE_operation : public aggregated_operation<tag_matching_FE_operation> {\n        public:\n            char type;\n            union {\n                tag_value my_val;\n                output_type* my_output;\n            };\n            task *bypass_t;\n            bool enqueue_task;\n            // constructor for value parameter\n            tag_matching_FE_operation(const tag_value& e , bool q_task , op_type t) : type(char(t)), my_val(e),\n                 bypass_t(NULL), enqueue_task(q_task) {}\n            tag_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL),\n                 enqueue_task(true) {}\n            // constructor with no parameter\n            tag_matching_FE_operation(op_type t) : type(char(t)), bypass_t(NULL), enqueue_task(true) {}\n        };\n\n        typedef internal::aggregating_functor<my_class, tag_matching_FE_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, tag_matching_FE_operation>;\n        aggregator<my_handler, tag_matching_FE_operation> my_aggregator;\n\n        // called from aggregator, so serialized\n        // construct as many output objects as possible.\n        // returns a task pointer if the a task would have been enqueued but we asked that\n        // it be returned.  Otherwise returns NULL.\n        task * fill_output_buffer(tag_value t, bool should_enqueue, bool handle_task) {\n            output_type l_out;\n            task *rtask = NULL;\n            task* tp = this->my_graph_ptr->root_task();\n            bool do_fwd = should_enqueue && this->buffer_empty() && tp;\n            this->current_tag = t;\n            this->tagged_delete(this->current_tag);   // remove the tag\n            if(join_helper<N>::get_items(my_inputs, l_out)) {  //  <== call back\n                this->push_back(l_out);\n                if(do_fwd) {  // we enqueue if receiving an item from predecessor, not if successor asks for item\n                    rtask = new ( task::allocate_additional_child_of( *tp ) )\n                        forward_task_bypass<my_node_type>(*my_node);\n                    if(handle_task) {\n                        FLOW_SPAWN(*rtask);\n                        rtask = NULL;\n                    }\n                    do_fwd = false;\n                }\n                // retire the input values\n                join_helper<N>::reset_ports(my_inputs);  //  <== call back\n                this->current_tag = NO_TAG;\n            }\n            else {\n                __TBB_ASSERT(false, \"should have had something to push\");\n            }\n            return rtask;\n        }\n\n        void handle_operations(tag_matching_FE_operation* op_list) {\n            tag_matching_FE_operation *current;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n                case res_count:  // called from BE\n                    {\n                        this->destroy_front();\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    }\n                    break;\n                case inc_count: {  // called from input ports\n                        size_t *p = 0;\n                        tag_value t = current->my_val;\n                        bool do_enqueue = current->enqueue_task;\n                        if(!(this->tagged_find_ref(t,p))) {\n                            this->tagged_insert(t, 0);\n                            if(!(this->tagged_find_ref(t,p))) {\n                                __TBB_ASSERT(false, \"should find tag after inserting it\");\n                            }\n                        }\n                        if(++(*p) == size_t(N)) {\n                            task *rtask = fill_output_buffer(t, true, do_enqueue);\n                            __TBB_ASSERT(!rtask || !do_enqueue, \"task should not be returned\");\n                            current->bypass_t = rtask;\n                        }\n                    }\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case may_succeed:  // called from BE\n                    __TBB_store_with_release(current->status, this->buffer_empty() ? FAILED : SUCCEEDED);\n                    break;\n                case try_make:  // called from BE\n                    if(this->buffer_empty()) {\n                        __TBB_store_with_release(current->status, FAILED);\n                    }\n                    else {\n                        this->copy_front(*(current->my_output));\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    }\n                    break;\n                }\n            }\n        }\n// ------------ End Aggregator ---------------\n\n    public:\n        template<typename FunctionTuple>\n        join_node_FE(graph &g, FunctionTuple tag_funcs) : forwarding_base(g), my_node(NULL) {\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n            join_helper<N>::set_tag_func(my_inputs, tag_funcs);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_tag_buffer(),\n        output_buffer_type() {\n            my_node = NULL;\n            join_helper<N>::set_join_node_pointer(my_inputs, this);\n            join_helper<N>::copy_tag_functors(my_inputs, const_cast<input_type &>(other.my_inputs));\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        // needed for forwarding\n        void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }\n\n        void reset_port_count() {  // called from BE\n            tag_matching_FE_operation op_data(res_count);\n            my_aggregator.execute(&op_data);\n            return;\n        }\n\n        // if all input_ports have items, spawn forward to try and consume tuples\n        // return a task if we are asked and did create one.\n        task *increment_tag_count(tag_value t, bool handle_task) {  // called from input_ports\n            tag_matching_FE_operation op_data(t, handle_task, inc_count);\n            my_aggregator.execute(&op_data);\n            return op_data.bypass_t;\n        }\n\n        /*override*/ task *decrement_port_count(bool /*handle_task*/) { __TBB_ASSERT(false, NULL); return NULL; }\n\n        void increment_port_count() { __TBB_ASSERT(false, NULL); }  // should never be called\n\n        input_type &input_ports() { return my_inputs; }\n\n    protected:\n\n        void reset( __TBB_PFG_RESET_ARG( reset_flags f )) {\n            // called outside of parallel contexts\n            join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f));\n\n            my_tag_buffer::reset();  // have to reset the tag counts\n            output_buffer_type::reset();  // also the queue of outputs\n            my_node->current_tag = NO_TAG;\n        }\n\n        // all methods on input ports should be called under mutual exclusion from join_node_base.\n\n        bool tuple_build_may_succeed() {  // called from back-end\n            tag_matching_FE_operation op_data(may_succeed);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        // cannot lock while calling back to input_ports.  current_tag will only be set\n        // and reset under the aggregator, so it will remain consistent.\n        bool try_to_make_tuple(output_type &out) {\n            tag_matching_FE_operation op_data(&out,try_make);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        void tuple_accepted() {\n            reset_port_count();  // reset current_tag after ports reset.\n        }\n\n        void tuple_rejected() {\n            // nothing to do.\n        }\n\n        input_type my_inputs;  // input ports\n        my_node_type *my_node;\n    }; // join_node_FE<tag_matching, InputTuple, OutputTuple>\n\n    //! join_node_base\n    template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>\n    class join_node_base : public graph_node, public join_node_FE<JP, InputTuple, OutputTuple>,\n                           public sender<OutputTuple> {\n    protected:\n        using graph_node::my_graph;\n    public:\n        typedef OutputTuple output_type;\n\n        typedef receiver<output_type> successor_type;\n        typedef join_node_FE<JP, InputTuple, OutputTuple> input_ports_type;\n        using input_ports_type::tuple_build_may_succeed;\n        using input_ports_type::try_to_make_tuple;\n        using input_ports_type::tuple_accepted;\n        using input_ports_type::tuple_rejected;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<successor_type *> successor_vector_type;\n#endif\n\n    private:\n        // ----------- Aggregator ------------\n        enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy\n#endif\n        };\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        typedef join_node_base<JP,InputTuple,OutputTuple> my_class;\n\n        class join_node_base_operation : public aggregated_operation<join_node_base_operation> {\n        public:\n            char type;\n            union {\n                output_type *my_arg;\n                successor_type *my_succ;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                size_t cnt_val;\n                successor_vector_type *svec;\n#endif\n            };\n            task *bypass_t;\n            join_node_base_operation(const output_type& e, op_type t) : type(char(t)),\n                my_arg(const_cast<output_type*>(&e)), bypass_t(NULL) {}\n            join_node_base_operation(const successor_type &s, op_type t) : type(char(t)),\n                my_succ(const_cast<successor_type *>(&s)), bypass_t(NULL) {}\n            join_node_base_operation(op_type t) : type(char(t)), bypass_t(NULL) {}\n        };\n\n        typedef internal::aggregating_functor<my_class, join_node_base_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, join_node_base_operation>;\n        bool forwarder_busy;\n        aggregator<my_handler, join_node_base_operation> my_aggregator;\n\n        void handle_operations(join_node_base_operation* op_list) {\n            join_node_base_operation *current;\n            while(op_list) {\n                current = op_list;\n                op_list = op_list->next;\n                switch(current->type) {\n                case reg_succ: {\n                        my_successors.register_successor(*(current->my_succ));\n                        task* tp = this->graph_node::my_graph.root_task();\n                        if(tuple_build_may_succeed() && !forwarder_busy && tp) {\n                            task *rtask = new ( task::allocate_additional_child_of(*tp) )\n                                    forward_task_bypass\n                                    <join_node_base<JP,InputTuple,OutputTuple> >(*this);\n                            FLOW_SPAWN(*rtask);\n                            forwarder_busy = true;\n                        }\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                    }\n                    break;\n                case rem_succ:\n                    my_successors.remove_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case try__get:\n                    if(tuple_build_may_succeed()) {\n                        if(try_to_make_tuple(*(current->my_arg))) {\n                            tuple_accepted();\n                            __TBB_store_with_release(current->status, SUCCEEDED);\n                        }\n                        else __TBB_store_with_release(current->status, FAILED);\n                    }\n                    else __TBB_store_with_release(current->status, FAILED);\n                    break;\n                case do_fwrd_bypass: {\n                        bool build_succeeded;\n                        task *last_task = NULL;\n                        output_type out;\n                        if(tuple_build_may_succeed()) {\n                            do {\n                                build_succeeded = try_to_make_tuple(out);\n                                if(build_succeeded) {\n                                    task *new_task = my_successors.try_put_task(out);\n                                    last_task = combine_tasks(last_task, new_task);\n                                    if(new_task) {\n                                        tuple_accepted();\n                                    }\n                                    else {\n                                        tuple_rejected();\n                                        build_succeeded = false;\n                                    }\n                                }\n                            } while(build_succeeded);\n                        }\n                        current->bypass_t = last_task;\n                        __TBB_store_with_release(current->status, SUCCEEDED);\n                        forwarder_busy = false;\n                    }\n                    break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_succ:\n                    my_successors.internal_add_built_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case del_blt_succ:\n                    my_successors.internal_delete_built_successor(*(current->my_succ));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_succ_cnt:\n                    current->cnt_val = my_successors.successor_count();\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n                case blt_succ_cpy:\n                    my_successors.copy_successors(*(current->svec));\n                    __TBB_store_with_release(current->status, SUCCEEDED);\n                    break;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n                }\n            }\n        }\n        // ---------- end aggregator -----------\n    public:\n        join_node_base(graph &g) : graph_node(g), input_ports_type(g), forwarder_busy(false) {\n            my_successors.set_owner(this);\n            input_ports_type::set_my_node(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        join_node_base(const join_node_base& other) :\n            graph_node(other.graph_node::my_graph), input_ports_type(other),\n            sender<OutputTuple>(), forwarder_busy(false), my_successors() {\n            my_successors.set_owner(this);\n            input_ports_type::set_my_node(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        template<typename FunctionTuple>\n        join_node_base(graph &g, FunctionTuple f) : graph_node(g), input_ports_type(g, f), forwarder_busy(false) {\n            my_successors.set_owner(this);\n            input_ports_type::set_my_node(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        bool register_successor(successor_type &r) {\n            join_node_base_operation op_data(r, reg_succ);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        bool remove_successor( successor_type &r) {\n            join_node_base_operation op_data(r, rem_succ);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n        bool try_get( output_type &v) {\n            join_node_base_operation op_data(v, try__get);\n            my_aggregator.execute(&op_data);\n            return op_data.status == SUCCEEDED;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/void internal_add_built_successor( successor_type &r) {\n            join_node_base_operation op_data(r, add_blt_succ);\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/void internal_delete_built_successor( successor_type &r) {\n            join_node_base_operation op_data(r, del_blt_succ);\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/size_t successor_count() {\n            join_node_base_operation op_data(blt_succ_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        /*override*/ void copy_successors(successor_vector_type &v) {\n            join_node_base_operation op_data(blt_succ_cpy);\n            op_data.svec = &v;\n            my_aggregator.execute(&op_data);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    protected:\n\n        /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {\n            input_ports_type::reset(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            my_successors.reset(f);\n#endif\n        }\n\n    private:\n        broadcast_cache<output_type, null_rw_mutex> my_successors;\n\n        friend class forward_task_bypass< join_node_base<JP, InputTuple, OutputTuple> >;\n        task *forward_task() {\n            join_node_base_operation op_data(do_fwrd_bypass);\n            my_aggregator.execute(&op_data);\n            return op_data.bypass_t;\n        }\n\n    };\n\n    // join base class type generator\n    template<int N, template<class> class PT, typename OutputTuple, graph_buffer_policy JP>\n    struct join_base {\n        typedef typename internal::join_node_base<JP, typename wrap_tuple_elements<N,PT,OutputTuple>::type, OutputTuple> type;\n    };\n\n    //! unfolded_join_node : passes input_ports_type to join_node_base.  We build the input port type\n    //  using tuple_element.  The class PT is the port type (reserving_port, queueing_port, tag_matching_port)\n    //  and should match the graph_buffer_policy.\n\n    template<int N, template<class> class PT, typename OutputTuple, graph_buffer_policy JP>\n    class unfolded_join_node : public join_base<N,PT,OutputTuple,JP>::type {\n    public:\n        typedef typename wrap_tuple_elements<N, PT, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<JP, input_ports_type, output_type > base_type;\n    public:\n        unfolded_join_node(graph &g) : base_type(g) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n\n    // tag_matching unfolded_join_node.  This must be a separate specialization because the constructors\n    // differ.\n\n    template<typename OutputTuple>\n    class unfolded_join_node<2,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<2,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n    public:\n        typedef typename wrap_tuple_elements<2,tag_matching_port,OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1>\n        unfolded_join_node(graph &g, B0 b0, B1 b1) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n\n    template<typename OutputTuple>\n    class unfolded_join_node<3,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<3,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n    public:\n        typedef typename wrap_tuple_elements<3, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n\n    template<typename OutputTuple>\n    class unfolded_join_node<4,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<4,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n    public:\n        typedef typename wrap_tuple_elements<4, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n\n    template<typename OutputTuple>\n    class unfolded_join_node<5,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<5,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n    public:\n        typedef typename wrap_tuple_elements<5, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n\n#if __TBB_VARIADIC_MAX >= 6\n    template<typename OutputTuple>\n    class unfolded_join_node<6,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<6,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n        typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;\n    public:\n        typedef typename wrap_tuple_elements<6, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename internal::function_body<T5, tag_value> *f5_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4),\n                    new internal::function_body_leaf<T5, tag_value, B5>(b5)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 7\n    template<typename OutputTuple>\n    class unfolded_join_node<7,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<7,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n        typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;\n        typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;\n    public:\n        typedef typename wrap_tuple_elements<7, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename internal::function_body<T5, tag_value> *f5_p;\n        typedef typename internal::function_body<T6, tag_value> *f6_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4),\n                    new internal::function_body_leaf<T5, tag_value, B5>(b5),\n                    new internal::function_body_leaf<T6, tag_value, B6>(b6)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 8\n    template<typename OutputTuple>\n    class unfolded_join_node<8,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<8,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n        typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;\n        typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;\n        typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7;\n    public:\n        typedef typename wrap_tuple_elements<8, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename internal::function_body<T5, tag_value> *f5_p;\n        typedef typename internal::function_body<T6, tag_value> *f6_p;\n        typedef typename internal::function_body<T7, tag_value> *f7_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4),\n                    new internal::function_body_leaf<T5, tag_value, B5>(b5),\n                    new internal::function_body_leaf<T6, tag_value, B6>(b6),\n                    new internal::function_body_leaf<T7, tag_value, B7>(b7)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 9\n    template<typename OutputTuple>\n    class unfolded_join_node<9,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<9,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n        typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;\n        typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;\n        typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7;\n        typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8;\n    public:\n        typedef typename wrap_tuple_elements<9, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename internal::function_body<T5, tag_value> *f5_p;\n        typedef typename internal::function_body<T6, tag_value> *f6_p;\n        typedef typename internal::function_body<T7, tag_value> *f7_p;\n        typedef typename internal::function_body<T8, tag_value> *f8_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4),\n                    new internal::function_body_leaf<T5, tag_value, B5>(b5),\n                    new internal::function_body_leaf<T6, tag_value, B6>(b6),\n                    new internal::function_body_leaf<T7, tag_value, B7>(b7),\n                    new internal::function_body_leaf<T8, tag_value, B8>(b8)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 10\n    template<typename OutputTuple>\n    class unfolded_join_node<10,tag_matching_port,OutputTuple,tag_matching> : public\n            join_base<10,tag_matching_port,OutputTuple,tag_matching>::type {\n        typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;\n        typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;\n        typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;\n        typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;\n        typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;\n        typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;\n        typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;\n        typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7;\n        typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8;\n        typedef typename tbb::flow::tuple_element<9, OutputTuple>::type T9;\n    public:\n        typedef typename wrap_tuple_elements<10, tag_matching_port, OutputTuple>::type input_ports_type;\n        typedef OutputTuple output_type;\n    private:\n        typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;\n        typedef typename internal::function_body<T0, tag_value> *f0_p;\n        typedef typename internal::function_body<T1, tag_value> *f1_p;\n        typedef typename internal::function_body<T2, tag_value> *f2_p;\n        typedef typename internal::function_body<T3, tag_value> *f3_p;\n        typedef typename internal::function_body<T4, tag_value> *f4_p;\n        typedef typename internal::function_body<T5, tag_value> *f5_p;\n        typedef typename internal::function_body<T6, tag_value> *f6_p;\n        typedef typename internal::function_body<T7, tag_value> *f7_p;\n        typedef typename internal::function_body<T8, tag_value> *f8_p;\n        typedef typename internal::function_body<T9, tag_value> *f9_p;\n        typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type;\n    public:\n        template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8, typename B9>\n        unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8, B9 b9) : base_type(g,\n                func_initializer_type(\n                    new internal::function_body_leaf<T0, tag_value, B0>(b0),\n                    new internal::function_body_leaf<T1, tag_value, B1>(b1),\n                    new internal::function_body_leaf<T2, tag_value, B2>(b2),\n                    new internal::function_body_leaf<T3, tag_value, B3>(b3),\n                    new internal::function_body_leaf<T4, tag_value, B4>(b4),\n                    new internal::function_body_leaf<T5, tag_value, B5>(b5),\n                    new internal::function_body_leaf<T6, tag_value, B6>(b6),\n                    new internal::function_body_leaf<T7, tag_value, B7>(b7),\n                    new internal::function_body_leaf<T8, tag_value, B8>(b8),\n                    new internal::function_body_leaf<T9, tag_value, B9>(b9)\n                    ) ) {}\n        unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}\n    };\n#endif\n\n    //! templated function to refer to input ports of the join node\n    template<size_t N, typename JNT>\n    typename tbb::flow::tuple_element<N, typename JNT::input_ports_type>::type &input_port(JNT &jn) {\n        return tbb::flow::get<N>(jn.input_ports());\n    }\n\n}\n#endif // __TBB__flow_graph_join_impl_H\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_node_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_node_impl_H\n#define __TBB__flow_graph_node_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include \"_flow_graph_item_buffer_impl.h\"\n\n//! @cond INTERNAL\nnamespace internal {\n\n    using tbb::internal::aggregated_operation;\n    using tbb::internal::aggregating_functor;\n    using tbb::internal::aggregator;\n\n     template< typename T, typename A >\n     class function_input_queue : public item_buffer<T,A> {\n     public:\n         bool pop( T& t ) {\n             return this->pop_front( t );\n         }\n\n         bool push( T& t ) {\n             return this->push_back( t );\n         }\n     };\n\n    //! Input and scheduling for a function node that takes a type Input as input\n    //  The only up-ref is apply_body_impl, which should implement the function \n    //  call and any handling of the result.\n    template< typename Input, typename A, typename ImplType >\n    class function_input_base : public receiver<Input>, tbb::internal::no_assign {\n        enum op_stat {WAIT=0, SUCCEEDED, FAILED};\n        enum op_type {reg_pred, rem_pred, app_body, try_fwd, tryput_bypass, app_body_bypass\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            , add_blt_pred, del_blt_pred,\n            blt_pred_cnt, blt_pred_cpy   // create vector copies of preds and succs\n#endif \n        };\n        typedef function_input_base<Input, A, ImplType> my_class;\n        \n    public:\n\n        //! The input type of this receiver\n        typedef Input input_type;\n        typedef sender<Input> predecessor_type;\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<predecessor_type *> predecessor_vector_type;\n#endif\n\n        //! Constructor for function_input_base\n        function_input_base( graph &g, size_t max_concurrency, function_input_queue<input_type,A> *q = NULL )\n            : my_graph(g), my_max_concurrency(max_concurrency), my_concurrency(0),\n              my_queue(q), forwarder_busy(false) {\n            my_predecessors.set_owner(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n        \n        //! Copy constructor\n        function_input_base( const function_input_base& src, function_input_queue<input_type,A> *q = NULL ) :\n            receiver<Input>(), tbb::internal::no_assign(),\n            my_graph(src.my_graph), my_max_concurrency(src.my_max_concurrency),\n            my_concurrency(0), my_queue(q), forwarder_busy(false)\n        {\n            my_predecessors.set_owner(this);\n            my_aggregator.initialize_handler(my_handler(this));\n        }\n\n        //! Destructor\n        virtual ~function_input_base() { \n            if ( my_queue ) delete my_queue;\n        }\n        \n        //! Put to the node, returning a task if available\n        virtual task * try_put_task( const input_type &t ) {\n           if ( my_max_concurrency == 0 ) {\n               return create_body_task( t );\n           } else {\n               my_operation op_data(t, tryput_bypass);\n               my_aggregator.execute(&op_data);\n               if(op_data.status == SUCCEEDED ) {\n                   return op_data.bypass_t;\n               }\n               return NULL;\n           }\n        }\n\n        //! Adds src to the list of cached predecessors.\n        /* override */ bool register_predecessor( predecessor_type &src ) {\n            my_operation op_data(reg_pred);\n            op_data.r = &src;\n            my_aggregator.execute(&op_data);\n            return true;\n        }\n        \n        //! Removes src from the list of cached predecessors.\n        /* override */ bool remove_predecessor( predecessor_type &src ) {\n            my_operation op_data(rem_pred);\n            op_data.r = &src;\n            my_aggregator.execute(&op_data);\n            return true;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        //! Adds to list of predecessors added by make_edge\n        /*override*/ void internal_add_built_predecessor( predecessor_type &src) {\n            my_operation op_data(add_blt_pred);\n            op_data.r = &src;\n            my_aggregator.execute(&op_data);\n        }\n\n        //! removes from to list of predecessors (used by remove_edge)\n        /*override*/ void internal_delete_built_predecessor( predecessor_type &src) {\n            my_operation op_data(del_blt_pred);\n            op_data.r = &src;\n            my_aggregator.execute(&op_data);\n        }\n\n        /*override*/ size_t predecessor_count() {\n            my_operation op_data(blt_pred_cnt);\n            my_aggregator.execute(&op_data);\n            return op_data.cnt_val;\n        }\n\n        /*override*/ void copy_predecessors(predecessor_vector_type &v) {\n            my_operation op_data(blt_pred_cpy);\n            op_data.predv = &v;\n            my_aggregator.execute(&op_data);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n    protected:\n\n        void reset_function_input_base( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            my_concurrency = 0;\n            if(my_queue) {\n                my_queue->reset();\n            }\n            reset_receiver(__TBB_PFG_RESET_ARG(f));\n            forwarder_busy = false;\n        }\n\n        graph& my_graph;\n        const size_t my_max_concurrency;\n        size_t my_concurrency;\n        function_input_queue<input_type, A> *my_queue;\n        predecessor_cache<input_type, null_mutex > my_predecessors;\n        \n        /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            my_predecessors.reset(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            __TBB_ASSERT(!(f & rf_extract) || my_predecessors.empty(), \"function_input_base reset failed\");\n#endif\n        }\n\n    private:\n\n        friend class apply_body_task_bypass< my_class, input_type >;\n        friend class forward_task_bypass< my_class >;\n        \n        class my_operation : public aggregated_operation< my_operation > {\n        public:\n            char type;\n            union {\n                input_type *elem;\n                predecessor_type *r;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                size_t cnt_val;\n                predecessor_vector_type *predv;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n            };\n            tbb::task *bypass_t;\n            my_operation(const input_type& e, op_type t) :\n                type(char(t)), elem(const_cast<input_type*>(&e)) {}\n            my_operation(op_type t) : type(char(t)), r(NULL) {}\n        };\n        \n        bool forwarder_busy;\n        typedef internal::aggregating_functor<my_class, my_operation> my_handler;\n        friend class internal::aggregating_functor<my_class, my_operation>;\n        aggregator< my_handler, my_operation > my_aggregator;\n        \n        void handle_operations(my_operation *op_list) {\n            my_operation *tmp;\n            while (op_list) {\n                tmp = op_list;\n                op_list = op_list->next;\n                switch (tmp->type) {\n                case reg_pred:\n                    my_predecessors.add(*(tmp->r));\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    if (!forwarder_busy) {\n                        forwarder_busy = true;\n                        spawn_forward_task();\n                    }\n                    break;\n                case rem_pred:\n                    my_predecessors.remove(*(tmp->r));\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    break;\n                case app_body:\n                    __TBB_ASSERT(my_max_concurrency != 0, NULL);\n                    --my_concurrency;\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    if (my_concurrency<my_max_concurrency) {\n                        input_type i;\n                        bool item_was_retrieved = false;\n                        if ( my_queue )\n                            item_was_retrieved = my_queue->pop(i);\n                        else\n                            item_was_retrieved = my_predecessors.get_item(i);\n                        if (item_was_retrieved) {\n                            ++my_concurrency;\n                            spawn_body_task(i);\n                        }\n                    }\n                    break;\n                case app_body_bypass: {\n                        task * new_task = NULL;\n                        __TBB_ASSERT(my_max_concurrency != 0, NULL);\n                        --my_concurrency;\n                        if (my_concurrency<my_max_concurrency) {\n                            input_type i;\n                            bool item_was_retrieved = false;\n                            if ( my_queue )\n                                item_was_retrieved = my_queue->pop(i);\n                            else \n                                item_was_retrieved = my_predecessors.get_item(i);\n                            if (item_was_retrieved) {\n                                ++my_concurrency;\n                                new_task = create_body_task(i);\n                            }\n                        }\n                        tmp->bypass_t = new_task;\n                        __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    }\n                    break;\n                case tryput_bypass: internal_try_put_task(tmp);  break;\n                case try_fwd: internal_forward(tmp);  break;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n                case add_blt_pred: {\n                         my_predecessors.internal_add_built_predecessor(*(tmp->r));\n                        __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    }\n                    break;\n                case del_blt_pred:\n                    my_predecessors.internal_delete_built_predecessor(*(tmp->r));\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    break;\n                case blt_pred_cnt:\n                    tmp->cnt_val = my_predecessors.predecessor_count();\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    break;\n                case blt_pred_cpy:\n                    my_predecessors.copy_predecessors( *(tmp->predv) );\n                    __TBB_store_with_release(tmp->status, SUCCEEDED);\n                    break;\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n                }\n            }\n        }\n        \n        //! Put to the node, but return the task instead of enqueueing it\n        void internal_try_put_task(my_operation *op) {\n            __TBB_ASSERT(my_max_concurrency != 0, NULL);\n            if (my_concurrency < my_max_concurrency) {\n               ++my_concurrency;\n               task * new_task = create_body_task(*(op->elem));\n               op->bypass_t = new_task;\n               __TBB_store_with_release(op->status, SUCCEEDED);\n           } else if ( my_queue && my_queue->push(*(op->elem)) ) { \n               op->bypass_t = SUCCESSFULLY_ENQUEUED;\n               __TBB_store_with_release(op->status, SUCCEEDED);\n           } else {\n               op->bypass_t = NULL;\n               __TBB_store_with_release(op->status, FAILED);\n           }\n        }\n        \n        //! Tries to spawn bodies if available and if concurrency allows\n        void internal_forward(my_operation *op) {\n            op->bypass_t = NULL;\n            if (my_concurrency<my_max_concurrency || !my_max_concurrency) {\n                input_type i;\n                bool item_was_retrieved = false;\n                if ( my_queue )\n                    item_was_retrieved = my_queue->pop(i);\n                else\n                    item_was_retrieved = my_predecessors.get_item(i);\n                if (item_was_retrieved) {\n                    ++my_concurrency;\n                    op->bypass_t = create_body_task(i);\n                    __TBB_store_with_release(op->status, SUCCEEDED);\n                    return;\n                }\n            }\n            __TBB_store_with_release(op->status, FAILED);\n            forwarder_busy = false;\n        }\n        \n        //! Applies the body to the provided input\n        //  then decides if more work is available \n        void apply_body( input_type &i ) {\n            task *new_task = apply_body_bypass(i);\n            if(!new_task) return;\n            if(new_task == SUCCESSFULLY_ENQUEUED) return;\n            FLOW_SPAWN(*new_task);\n            return;\n        }\n        \n        //! Applies the body to the provided input\n        //  then decides if more work is available \n        task * apply_body_bypass( input_type &i ) {\n            task * new_task = static_cast<ImplType *>(this)->apply_body_impl_bypass(i);\n            if ( my_max_concurrency != 0 ) {\n                my_operation op_data(app_body_bypass);  // tries to pop an item or get_item, enqueues another apply_body\n                my_aggregator.execute(&op_data);\n                tbb::task *ttask = op_data.bypass_t;\n                new_task = combine_tasks(new_task, ttask);\n            }\n            return new_task;\n        }\n        \n        //! allocates a task to call apply_body( input )\n        inline task * create_body_task( const input_type &input ) {\n            \n            task* tp = my_graph.root_task();\n            return (tp) ?\n                new(task::allocate_additional_child_of(*tp))\n                    apply_body_task_bypass < my_class, input_type >(*this, input) :\n                NULL;\n        }\n\n       //! Spawns a task that calls apply_body( input )\n       inline void spawn_body_task( const input_type &input ) {\n           task* tp = create_body_task(input);\n           // tp == NULL => g.reset(), which shouldn't occur in concurrent context\n           if(tp) {\n               FLOW_SPAWN(*tp);\n           }\n       }\n        \n       //! This is executed by an enqueued task, the \"forwarder\"\n       task *forward_task() {\n           my_operation op_data(try_fwd);\n           task *rval = NULL;\n           do {\n               op_data.status = WAIT;\n               my_aggregator.execute(&op_data);\n               if(op_data.status == SUCCEEDED) {\n                   tbb::task *ttask = op_data.bypass_t;\n                   rval = combine_tasks(rval, ttask);\n               }\n           } while (op_data.status == SUCCEEDED);\n           return rval;\n       }\n        \n       inline task *create_forward_task() {\n           task* tp = my_graph.root_task();\n           return (tp) ?\n               new(task::allocate_additional_child_of(*tp)) forward_task_bypass< my_class >(*this) :\n               NULL;\n       }\n\n       //! Spawns a task that calls forward()\n       inline void spawn_forward_task() {\n           task* tp = create_forward_task();\n           if(tp) {\n               FLOW_SPAWN(*tp);\n           }\n       }\n    };  // function_input_base\n\n    //! Implements methods for a function node that takes a type Input as input and sends\n    //  a type Output to its successors.\n    template< typename Input, typename Output, typename A>\n    class function_input : public function_input_base<Input, A, function_input<Input,Output,A> > {\n    public:\n        typedef Input input_type;\n        typedef Output output_type;\n        typedef function_input<Input,Output,A> my_class;\n        typedef function_input_base<Input, A, my_class> base_type;\n        typedef function_input_queue<input_type, A> input_queue_type;\n\n\n        // constructor\n        template<typename Body>\n        function_input( graph &g, size_t max_concurrency, Body& body, function_input_queue<input_type,A> *q = NULL ) :\n            base_type(g, max_concurrency, q),\n            my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) {\n        }\n\n        //! Copy constructor\n        function_input( const function_input& src, input_queue_type *q = NULL ) : \n                base_type(src, q),\n                my_body( src.my_body->clone() ) {\n        }\n\n        ~function_input() {\n            delete my_body;\n        }\n\n        template< typename Body >\n        Body copy_function_object() {\n            internal::function_body<input_type, output_type> &body_ref = *this->my_body;\n            return dynamic_cast< internal::function_body_leaf<input_type, output_type, Body> & >(body_ref).get_body(); \n        } \n\n        task * apply_body_impl_bypass( const input_type &i) {\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n            // There is an extra copied needed to capture the\n            // body execution without the try_put\n            tbb::internal::fgt_begin_body( my_body );\n            output_type v = (*my_body)(i);\n            tbb::internal::fgt_end_body( my_body );\n            task * new_task = successors().try_put_task( v );\n#else       \n            task * new_task = successors().try_put_task( (*my_body)(i) );\n#endif\n            return new_task;\n        }\n\n    protected:\n\n        void reset_function_input(__TBB_PFG_RESET_ARG(reset_flags f)) {\n            base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            if(f & rf_reset_bodies) my_body->reset_body();\n#endif\n        }\n\n        function_body<input_type, output_type> *my_body;\n        virtual broadcast_cache<output_type > &successors() = 0;\n\n    };  // function_input\n\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n    // helper templates to reset the successor edges of the output ports of an multifunction_node\n    template<int N>\n    struct reset_element {\n        template<typename P>\n        static void reset_this(P &p, reset_flags f) {\n            (void)tbb::flow::get<N-1>(p).successors().reset(f);\n            reset_element<N-1>::reset_this(p, f);\n        }\n        template<typename P>\n        static bool this_empty(P &p) {\n            if(tbb::flow::get<N-1>(p).successors().empty()) \n                return reset_element<N-1>::this_empty(p);\n            return false;\n        }\n    };\n\n    template<>\n    struct reset_element<1> {\n        template<typename P>\n        static void reset_this(P &p, reset_flags f) {\n            (void)tbb::flow::get<0>(p).successors().reset(f);\n        }\n        template<typename P>\n        static bool this_empty(P &p) {\n            return tbb::flow::get<0>(p).successors().empty();\n        }\n    };\n#endif\n\n    //! Implements methods for a function node that takes a type Input as input\n    //  and has a tuple of output ports specified.  \n    template< typename Input, typename OutputPortSet, typename A>\n    class multifunction_input : public function_input_base<Input, A, multifunction_input<Input,OutputPortSet,A> > {\n    public:\n        static const int N = tbb::flow::tuple_size<OutputPortSet>::value;\n        typedef Input input_type;\n        typedef OutputPortSet output_ports_type;\n        typedef multifunction_input<Input,OutputPortSet,A> my_class;\n        typedef function_input_base<Input, A, my_class> base_type;\n        typedef function_input_queue<input_type, A> input_queue_type;\n\n\n        // constructor\n        template<typename Body>\n        multifunction_input( \n                graph &g, \n                size_t max_concurrency, \n                Body& body,\n                function_input_queue<input_type,A> *q = NULL ) :\n            base_type(g, max_concurrency, q),\n            my_body( new internal::multifunction_body_leaf<input_type, output_ports_type, Body>(body) ) {\n        }\n\n        //! Copy constructor\n        multifunction_input( const multifunction_input& src, input_queue_type *q = NULL ) : \n                base_type(src, q),\n                my_body( src.my_body->clone() ) {\n        }\n\n        ~multifunction_input() {\n            delete my_body;\n        }\n\n        template< typename Body >\n        Body copy_function_object() {\n            internal::multifunction_body<input_type, output_ports_type> &body_ref = *this->my_body;\n            return dynamic_cast< internal::multifunction_body_leaf<input_type, output_ports_type, Body> & >(body_ref).get_body(); \n        } \n\n        // for multifunction nodes we do not have a single successor as such.  So we just tell\n        // the task we were successful.\n        task * apply_body_impl_bypass( const input_type &i) {\n            tbb::internal::fgt_begin_body( my_body );\n            (*my_body)(i, my_output_ports);\n            tbb::internal::fgt_end_body( my_body );\n            task * new_task = SUCCESSFULLY_ENQUEUED;\n            return new_task;\n        }\n\n        output_ports_type &output_ports(){ return my_output_ports; }\n\n    protected:\n\n        /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { \n            base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            reset_element<N>::reset_this(my_output_ports, f);\n            if(f & rf_reset_bodies) my_body->reset_body();\n            __TBB_ASSERT(!(f & rf_extract) || reset_element<N>::this_empty(my_output_ports), \"multifunction_node reset failed\");\n#endif\n        }\n\n        multifunction_body<input_type, output_ports_type> *my_body;\n        output_ports_type my_output_ports;\n\n    };  // multifunction_input\n\n    // template to refer to an output port of a multifunction_node\n    template<size_t N, typename MOP>\n    typename tbb::flow::tuple_element<N, typename MOP::output_ports_type>::type &output_port(MOP &op) {\n        return tbb::flow::get<N>(op.output_ports()); \n    }\n\n// helper structs for split_node\n    template<int N>\n    struct emit_element {\n        template<typename T, typename P>\n        static void emit_this(const T &t, P &p) {\n            (void)tbb::flow::get<N-1>(p).try_put(tbb::flow::get<N-1>(t));\n            emit_element<N-1>::emit_this(t,p);\n        }\n    };\n\n    template<>\n    struct emit_element<1> {\n        template<typename T, typename P>\n        static void emit_this(const T &t, P &p) {\n            (void)tbb::flow::get<0>(p).try_put(tbb::flow::get<0>(t));\n        }\n    };\n\n    //! Implements methods for an executable node that takes continue_msg as input\n    template< typename Output >\n    class continue_input : public continue_receiver {\n    public:\n        \n        //! The input type of this receiver\n        typedef continue_msg input_type;\n            \n        //! The output type of this receiver\n        typedef Output output_type;\n        \n        template< typename Body >\n        continue_input( graph &g, Body& body )\n            : my_graph_ptr(&g), \n             my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { }\n        \n        template< typename Body >\n        continue_input( graph &g, int number_of_predecessors, Body& body )\n            : continue_receiver( number_of_predecessors ), my_graph_ptr(&g), \n             my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { }\n\n        continue_input( const continue_input& src ) : continue_receiver(src), \n            my_graph_ptr(src.my_graph_ptr), my_body( src.my_body->clone() ) {}\n\n        ~continue_input() {\n            delete my_body;\n        }\n\n        template< typename Body >\n        Body copy_function_object() {\n            internal::function_body<input_type, output_type> &body_ref = *my_body;\n            return dynamic_cast< internal::function_body_leaf<input_type, output_type, Body> & >(body_ref).get_body(); \n        } \n\n        /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {\n            continue_receiver::reset_receiver(__TBB_PFG_RESET_ARG(f));\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n            if(f & rf_reset_bodies) my_body->reset_body();\n#endif\n        }\n\n    protected:\n        \n        graph* my_graph_ptr;\n        function_body<input_type, output_type> *my_body;\n        \n        virtual broadcast_cache<output_type > &successors() = 0; \n        \n        friend class apply_body_task_bypass< continue_input< Output >, continue_msg >;\n        \n        //! Applies the body to the provided input\n        /* override */ task *apply_body_bypass( input_type ) {\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n            // There is an extra copied needed to capture the\n            // body execution without the try_put\n            tbb::internal::fgt_begin_body( my_body );\n            output_type v = (*my_body)( continue_msg() );\n            tbb::internal::fgt_end_body( my_body );\n            return successors().try_put_task( v );\n#else   \n            return successors().try_put_task( (*my_body)( continue_msg() ) );\n#endif\n        }\n        \n        //! Spawns a task that applies the body\n        /* override */ task *execute( ) {\n            task* tp = my_graph_ptr->root_task();\n            return (tp) ?\n                new ( task::allocate_additional_child_of( *tp ) ) \n                    apply_body_task_bypass< continue_input< Output >, continue_msg >( *this, continue_msg() ) :\n                NULL;\n        }\n\n    };  // continue_input\n        \n    //! Implements methods for both executable and function nodes that puts Output to its successors\n    template< typename Output >\n    class function_output : public sender<Output> {\n    public:\n        \n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        template<int N> friend struct reset_element;\n#endif\n        typedef Output output_type;\n        typedef receiver<output_type> successor_type;\n        typedef broadcast_cache<output_type> broadcast_cache_type;\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        typedef std::vector<successor_type *> successor_vector_type;\n#endif\n        \n        function_output() { my_successors.set_owner(this); }\n        function_output(const function_output & /*other*/) : sender<output_type>() {\n            my_successors.set_owner(this);\n        }\n        \n        //! Adds a new successor to this node\n        /* override */ bool register_successor( receiver<output_type> &r ) {\n            successors().register_successor( r );\n            return true;\n        }\n        \n        //! Removes a successor from this node\n        /* override */ bool remove_successor( receiver<output_type> &r ) {\n            successors().remove_successor( r );\n            return true;\n        }\n\n#if TBB_PREVIEW_FLOW_GRAPH_FEATURES\n        /*override*/ void internal_add_built_successor( receiver<output_type> &r) {\n            successors().internal_add_built_successor( r );\n        }\n\n        /*override*/ void internal_delete_built_successor( receiver<output_type> &r) {\n            successors().internal_delete_built_successor( r );\n        }\n\n        /*override*/ size_t successor_count() {\n            return successors().successor_count();\n        }\n\n        /*override*/ void  copy_successors( successor_vector_type &v) {\n            successors().copy_successors(v);\n        }\n#endif  /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */\n\n        // for multifunction_node.  The function_body that implements\n        // the node will have an input and an output tuple of ports.  To put\n        // an item to a successor, the body should\n        //\n        //    get<I>(output_ports).try_put(output_value);\n        //\n        // return value will be bool returned from successors.try_put.\n        task *try_put_task(const output_type &i) { return my_successors.try_put_task(i); }\n          \n    protected:\n        broadcast_cache_type my_successors;\n        broadcast_cache_type &successors() { return my_successors; } \n        \n    };  // function_output\n\n    template< typename Output >\n    class multifunction_output : public function_output<Output> {\n    public:\n        typedef Output output_type;\n        typedef function_output<output_type> base_type;\n        using base_type::my_successors;\n        \n        multifunction_output() : base_type() {my_successors.set_owner(this);}\n        multifunction_output( const multifunction_output &/*other*/) : base_type() { my_successors.set_owner(this); }\n\n        bool try_put(const output_type &i) {\n            task *res = my_successors.try_put_task(i);\n            if(!res) return false;\n            if(res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res);\n            return true;\n        }\n    };  // multifunction_output\n\n}  // internal\n\n#endif // __TBB__flow_graph_node_impl_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_tagged_buffer_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// tagged buffer that can expand, and can support as many deletions as additions\n// list-based, with elements of list held in array (for destruction management),\n// multiplicative hashing (like ets).  No synchronization built-in.\n//\n\n#ifndef __TBB__flow_graph_tagged_buffer_impl_H\n#define __TBB__flow_graph_tagged_buffer_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n// included in namespace tbb::flow::interface7::internal\n\ntemplate<typename T, typename U, size_t NoTagMark>\nstruct otherData {\n    T t;\n    U next;\n    otherData() : t(NoTagMark), next(NULL) {}\n};\n\ntemplate<typename TagType, typename ValueType, size_t NoTagMark>\nstruct buffer_element_type {\n    // the second parameter below is void * because we can't forward-declare the type\n    // itself, so we just reinterpret_cast below.\n    typedef typename aligned_pair<ValueType, otherData<TagType, void *, NoTagMark> >::type type;\n};\n\ntemplate\n    <\n     typename TagType, \n     typename ValueType, \n     size_t   NoTagMark = 0,\n     typename Allocator=tbb::cache_aligned_allocator< typename buffer_element_type<TagType, ValueType, NoTagMark>::type >\n    >\nclass tagged_buffer {\npublic:\n    static const size_t INITIAL_SIZE = 8;  // initial size of the hash pointer table\n    static const TagType NO_TAG = TagType(NoTagMark);\n    typedef ValueType value_type;\n    typedef typename buffer_element_type<TagType, ValueType, NO_TAG>::type element_type;\n    typedef value_type *pointer_type;\n    typedef element_type *list_array_type;  // array we manage manually\n    typedef list_array_type *pointer_array_type;\n    typedef typename Allocator::template rebind<list_array_type>::other pointer_array_allocator_type;\n    typedef typename Allocator::template rebind<element_type>::other elements_array_allocator;\nprivate:\n    size_t my_size;\n    size_t nelements;\n    pointer_array_type pointer_array;    // pointer_array[my_size]\n    list_array_type elements_array;      // elements_array[my_size / 2]\n    element_type* free_list;\n\n    size_t mask() { return my_size - 1; }\n\n    static size_t hash(TagType t) {\n        return uintptr_t(t)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value;\n    }\n\n    void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) {\n        for(size_t i=0; i < sz - 1; ++i ) {  // construct free list\n            la[i].second.next = &(la[i+1]);\n            la[i].second.t = NO_TAG;\n        }\n        la[sz-1].second.next = NULL;\n        *p_free_list = &(la[0]);\n    }\n\n    // cleanup for exceptions\n    struct DoCleanup {\n        pointer_array_type *my_pa;\n        list_array_type *my_elements;\n        size_t my_size;\n\n        DoCleanup(pointer_array_type &pa, list_array_type &my_els, size_t sz) :\n            my_pa(&pa), my_elements(&my_els), my_size(sz) {  }\n        ~DoCleanup() {\n            if(my_pa) {\n                size_t dont_care = 0;\n                internal_free_buffer(*my_pa, *my_elements, my_size, dont_care);\n            }\n        }\n    };\n\n    // exception-safety requires we do all the potentially-throwing operations first\n    void grow_array() {\n        size_t new_size = my_size*2;\n        size_t new_nelements = nelements;  // internal_free_buffer zeroes this\n        list_array_type new_elements_array = NULL;\n        pointer_array_type new_pointer_array = NULL;\n        list_array_type new_free_list = NULL;\n        {\n            DoCleanup my_cleanup(new_pointer_array, new_elements_array, new_size);\n            new_elements_array = elements_array_allocator().allocate(my_size);\n            new_pointer_array = pointer_array_allocator_type().allocate(new_size);\n            for(size_t i=0; i < new_size; ++i) new_pointer_array[i] = NULL;\n            set_up_free_list(&new_free_list, new_elements_array, my_size );\n\n            for(size_t i=0; i < my_size; ++i) {\n                for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second.next)) {\n                    value_type *ov = reinterpret_cast<value_type *>(&(op->first));\n                    // could have std::move semantics\n                    internal_tagged_insert(new_pointer_array, new_size, new_free_list, op->second.t, *ov);\n                }\n            }\n            my_cleanup.my_pa = NULL;\n            my_cleanup.my_elements = NULL;\n        }\n\n        internal_free_buffer(pointer_array, elements_array, my_size, nelements);\n        free_list = new_free_list;\n        pointer_array = new_pointer_array;\n        elements_array = new_elements_array;\n        my_size = new_size;\n        nelements = new_nelements;\n    }\n\n    // v should have perfect forwarding if std::move implemented.\n    // we use this method to move elements in grow_array, so can't use class fields\n    void internal_tagged_insert( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list,\n            const TagType t, const value_type &v) {\n        size_t l_mask = p_sz-1;\n        size_t h = hash(t) & l_mask;\n        __TBB_ASSERT(p_free_list, \"Error: free list not set up.\");\n        element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second.next);\n        my_elem->second.t = t;\n        (void) new(&(my_elem->first)) value_type(v);\n        my_elem->second.next = p_pointer_array[h];\n        p_pointer_array[h] = my_elem;\n    }\n\n    void internal_initialize_buffer() {\n        pointer_array = pointer_array_allocator_type().allocate(my_size);\n        for(size_t i = 0; i < my_size; ++i) pointer_array[i] = NULL;\n        elements_array = elements_array_allocator().allocate(my_size / 2);\n        set_up_free_list(&free_list, elements_array, my_size / 2);\n    }\n\n    // made static so an enclosed class can use to properly dispose of the internals\n    static void internal_free_buffer( pointer_array_type &pa, list_array_type &el, size_t &sz, size_t &ne ) {\n        if(pa) {\n            for(size_t i = 0; i < sz; ++i ) {\n                element_type *p_next;\n                for( element_type *p = pa[i]; p; p = p_next) {\n                    p_next = (element_type *)p->second.next;\n                    value_type *vp = reinterpret_cast<value_type *>(&(p->first));\n                    vp->~value_type();\n                }\n            }\n            pointer_array_allocator_type().deallocate(pa, sz); \n            pa = NULL;\n        }\n        // Separate test (if allocation of pa throws, el may be allocated.\n        // but no elements will be constructed.)\n        if(el) {\n            elements_array_allocator().deallocate(el, sz / 2);\n            el = NULL;\n        }\n        sz = INITIAL_SIZE;\n        ne = 0;\n    }\n\npublic:\n    tagged_buffer() : my_size(INITIAL_SIZE), nelements(0) {\n        internal_initialize_buffer();\n    }\n\n    ~tagged_buffer() {\n        internal_free_buffer(pointer_array, elements_array, my_size, nelements);\n    }\n\n    void reset() {\n        internal_free_buffer(pointer_array, elements_array, my_size, nelements);\n        internal_initialize_buffer();\n    }\n\n    bool tagged_insert(const TagType t, const value_type &v) {\n        pointer_type p;\n        if(tagged_find_ref(t, p)) {\n            p->~value_type();\n            (void) new(p) value_type(v);  // copy-construct into the space\n            return false;\n        }\n        ++nelements;\n        if(nelements*2 > my_size) grow_array();\n        internal_tagged_insert(pointer_array, my_size, free_list, t, v);\n        return true;\n    }\n\n    // returns reference to array element.v\n    bool tagged_find_ref(const TagType t, pointer_type &v) {\n        size_t i = hash(t) & mask();\n        for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second.next)) {\n            if(p->second.t == t) {\n                v = reinterpret_cast<pointer_type>(&(p->first));\n                return true;\n            }\n        }\n        return false;\n    }\n\n    bool tagged_find( const TagType t, value_type &v) {\n        value_type *p;\n        if(tagged_find_ref(t, p)) {\n            v = *p;\n            return true;\n        }\n        else\n            return false;\n    }\n\n    void tagged_delete(const TagType t) {\n        size_t h = hash(t) & mask();\n        element_type* prev = NULL;\n        for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second.next)) {\n            if(p->second.t == t) {\n                value_type *vp = reinterpret_cast<value_type *>(&(p->first));\n                vp->~value_type();\n                p->second.t = NO_TAG;\n                if(prev) prev->second.next = p->second.next;\n                else pointer_array[h] = (element_type *)(p->second.next);\n                p->second.next = free_list;\n                free_list = p;\n                --nelements;\n                return;\n            }\n        }\n        __TBB_ASSERT(false, \"tag not found for delete\");\n    }\n};\n#endif // __TBB__flow_graph_tagged_buffer_impl_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_trace_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _FGT_GRAPH_TRACE_IMPL_H\n#define _FGT_GRAPH_TRACE_IMPL_H\n\n#include \"../tbb_profiling.h\"\n\nnamespace tbb {\n    namespace internal {\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n\nstatic inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index );\n}\n\nstatic inline void fgt_internal_create_output_port( void *node, void *p, string_index name_index ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index );\n}\n\ntemplate < typename TypesTuple, typename PortsTuple, int N >\nstruct fgt_internal_input_helper {\n    static void register_port( void *node, PortsTuple &ports ) {\n        fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<N-1,TypesTuple>::type > * >(&(tbb::flow::get<N-1>(ports))),\n                                        static_cast<tbb::internal::string_index>(FLOW_INPUT_PORT_0 + N - 1) );\n        fgt_internal_input_helper<TypesTuple, PortsTuple, N-1>::register_port( node, ports );\n    } \n};\n\ntemplate < typename TypesTuple, typename PortsTuple >\nstruct fgt_internal_input_helper<TypesTuple,PortsTuple,1> {\n    static void register_port( void *node, PortsTuple &ports ) {\n        fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))),\n                                        FLOW_INPUT_PORT_0 );\n    } \n};\n\ntemplate < typename TypesTuple, typename PortsTuple, int N >\nstruct fgt_internal_output_helper {\n    static void register_port( void *node, PortsTuple &ports ) {\n        fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<N-1,TypesTuple>::type > * >(&(tbb::flow::get<N-1>(ports))),\n                                         static_cast<tbb::internal::string_index>(FLOW_OUTPUT_PORT_0 + N - 1) ); \n        fgt_internal_output_helper<TypesTuple, PortsTuple, N-1>::register_port( node, ports );\n    } \n};\n\ntemplate < typename TypesTuple, typename PortsTuple >\nstruct fgt_internal_output_helper<TypesTuple,PortsTuple,1> {\n    static void register_port( void *node, PortsTuple &ports ) {\n        fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))),\n                                         FLOW_OUTPUT_PORT_0 ); \n    } \n};\n\ntemplate< typename NodeType >\nvoid fgt_multioutput_node_desc( const NodeType *node, const char *desc ) {\n    void *addr =  (void *)( static_cast< tbb::flow::interface7::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); \n    itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); \n}\n\ntemplate< typename NodeType >\nstatic inline void fgt_node_desc( const NodeType *node, const char *desc ) {\n    void *addr =  (void *)( static_cast< tbb::flow::interface7::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); \n    itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); \n}\n\nstatic inline void fgt_graph_desc( void *g, const char *desc ) {\n    itt_metadata_str_add( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); \n}\n\nstatic inline void fgt_body( void *node, void *body ) {\n    itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE );\n}\n\ntemplate< typename OutputTuple, int N, typename PortsTuple >\nstatic inline void fgt_multioutput_node( string_index t, void *g, void *input_port, PortsTuple &ports ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); \n    fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); \n    fgt_internal_output_helper<OutputTuple, PortsTuple, N>::register_port( input_port, ports ); \n}\n\ntemplate< typename OutputTuple, int N, typename PortsTuple >\nstatic inline void fgt_multioutput_node_with_body( string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); \n    fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); \n    fgt_internal_output_helper<OutputTuple, PortsTuple, N>::register_port( input_port, ports ); \n    fgt_body( input_port, body );\n}\n\n\ntemplate< typename InputTuple, int N, typename PortsTuple >\nstatic inline void fgt_multiinput_node( string_index t, void *g, PortsTuple &ports, void *output_port) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); \n    fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); \n    fgt_internal_input_helper<InputTuple, PortsTuple, N>::register_port( output_port, ports ); \n}\n\nstatic inline void fgt_node( string_index t, void *g, void *output_port ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); \n    fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); \n}\n\nstatic inline void fgt_node_with_body( string_index t, void *g, void *output_port, void *body ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); \n    fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); \n    fgt_body( output_port, body );\n}\n\n\nstatic inline void fgt_node( string_index t, void *g, void *input_port, void *output_port ) {\n    fgt_node( t, g, output_port );\n    fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 );\n}\n\nstatic inline void fgt_node_with_body( string_index t, void *g, void *input_port, void *output_port, void *body ) {\n    fgt_node_with_body( t, g, output_port, body );\n    fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); \n}\n\n\nstatic inline void  fgt_node( string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) {\n    fgt_node( t, g, input_port, output_port );\n    fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); \n}\n\nstatic inline void fgt_make_edge( void *output_port, void *input_port ) {\n    itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT);\n}\n\nstatic inline void fgt_remove_edge( void *output_port, void *input_port ) {\n    itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT);\n}\n\nstatic inline void fgt_graph( void *g ) {\n    itt_make_task_group( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_GRAPH ); \n}\n\nstatic inline void fgt_begin_body( void *body ) {\n    itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, NULL, FLOW_NULL, FLOW_NULL );\n}\n\nstatic inline void fgt_end_body( void * ) {\n    itt_task_end( ITT_DOMAIN_FLOW );\n}\n\n#else // TBB_PREVIEW_FLOW_GRAPH_TRACE\n\nstatic inline void fgt_graph( void * /*g*/ ) { }\n\ntemplate< typename NodeType >\nstatic inline void fgt_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { }\n\ntemplate< typename NodeType >\nstatic inline void fgt_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { }\n\nstatic inline void fgt_graph_desc( void * /*g*/, const char * /*desc*/ ) { }\n\nstatic inline void fgt_body( void * /*node*/, void * /*body*/ ) { }\n\ntemplate< typename OutputTuple, int N, typename PortsTuple > \nstatic inline void fgt_multioutput_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { }\n\ntemplate< typename OutputTuple, int N, typename PortsTuple >\nstatic inline void fgt_multioutput_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { }\n\ntemplate< typename InputTuple, int N, typename PortsTuple >\nstatic inline void fgt_multiinput_node( string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { }\n\nstatic inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } \nstatic inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } \nstatic inline void  fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { }\n\nstatic inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { }\nstatic inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { }\n\nstatic inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { }\nstatic inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { }\n\nstatic inline void fgt_begin_body( void * /*body*/ ) { }\nstatic inline void fgt_end_body( void *  /*body*/) { }\n\n#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE\n\n    } // namespace internal\n} // namespace tbb\n\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_flow_graph_types_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__flow_graph_types_impl_H\n#define __TBB__flow_graph_types_impl_H\n\n#ifndef __TBB_flow_graph_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n// included in namespace tbb::flow::interface7\n\nnamespace internal {\n// wrap each element of a tuple in a template, and make a tuple of the result.\n\n    template<int N, template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements;\n\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<1, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type> >\n            type;\n    };\n\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<2, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type> >\n            type;\n    };\n\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<3, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type> >\n            type;\n    };\n\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<4, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type> >\n            type;\n    };\n\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<5, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type> >\n            type;\n    };\n\n#if __TBB_VARIADIC_MAX >= 6\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<6, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<5,TypeTuple>::type> >\n            type;\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 7\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<7, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<6,TypeTuple>::type> >\n            type;\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 8\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<8, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<6,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<7,TypeTuple>::type> >\n            type;\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 9\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<9, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<6,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<7,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<8,TypeTuple>::type> >\n            type;\n    };\n#endif\n\n#if __TBB_VARIADIC_MAX >= 10\n    template<template<class> class PT, typename TypeTuple>\n    struct wrap_tuple_elements<10, PT, TypeTuple> {\n        typedef typename tbb::flow::tuple<\n                PT<typename tbb::flow::tuple_element<0,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<1,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<2,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<3,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<6,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<7,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<8,TypeTuple>::type>,\n                PT<typename tbb::flow::tuple_element<9,TypeTuple>::type> >\n            type;\n    };\n#endif\n\n//! type mimicking std::pair but with trailing fill to ensure each element of an array\n//* will have the correct alignment\n    template<typename T1, typename T2, size_t REM>\n    struct type_plus_align {\n        char first[sizeof(T1)];\n        T2 second;\n        char fill1[REM];\n    };\n\n    template<typename T1, typename T2>\n    struct type_plus_align<T1,T2,0> {\n        char first[sizeof(T1)];\n        T2 second;\n    };\n\n    template<class U> struct alignment_of {\n        typedef struct { char t; U    padded; } test_alignment;\n        static const size_t value = sizeof(test_alignment) - sizeof(U);\n    };\n\n    // T1, T2 are actual types stored.  The space defined for T1 in the type returned\n    // is a char array of the correct size.  Type T2 should be trivially-constructible,\n    // T1 must be explicitly managed.\n    template<typename T1, typename T2>\n    struct aligned_pair {\n        static const size_t t1_align = alignment_of<T1>::value;\n        static const size_t t2_align = alignment_of<T2>::value;\n        typedef type_plus_align<T1, T2, 0 > just_pair;\n        static const size_t max_align = t1_align < t2_align ? t2_align : t1_align;\n        static const size_t extra_bytes = sizeof(just_pair) % max_align;\n        static const size_t remainder = extra_bytes ? max_align - extra_bytes : 0;\n    public:\n        typedef type_plus_align<T1,T2,remainder> type;\n    };  // aligned_pair\n\n// support for variant type\n// type we use when we're not storing a value\nstruct default_constructed { };\n\n// type which contains another type, tests for what type is contained, and references to it.\n// internal::Wrapper<T>\n//     void CopyTo( void *newSpace) : builds a Wrapper<T> copy of itself in newSpace\n\n// struct to allow us to copy and test the type of objects\nstruct WrapperBase {\n    virtual ~WrapperBase() {}\n    virtual void CopyTo(void* /*newSpace*/) const {  }\n};\n\n// Wrapper<T> contains a T, with the ability to test what T is.  The Wrapper<T> can be\n// constructed from a T, can be copy-constructed from another Wrapper<T>, and can be\n// examined via value(), but not modified.\ntemplate<typename T>\nstruct Wrapper: public WrapperBase {\n    typedef T value_type;\n    typedef T* pointer_type;\nprivate:\n    T value_space;\npublic:\n    const value_type &value() const { return value_space; }\n\nprivate:\n    Wrapper();\n\n    // on exception will ensure the Wrapper will contain only a trivially-constructed object\n    struct _unwind_space {\n        pointer_type space;\n        _unwind_space(pointer_type p) : space(p) {}\n        ~_unwind_space() {\n            if(space) (void) new (space) Wrapper<default_constructed>(default_constructed());\n        }\n    };\npublic:\n    explicit Wrapper( const T& other ) : value_space(other) { }\n    explicit Wrapper(const Wrapper& other) : value_space(other.value_space) { }\n\n    /*override*/void CopyTo(void* newSpace) const {\n        _unwind_space guard((pointer_type)newSpace);\n        (void) new(newSpace) Wrapper(value_space);\n        guard.space = NULL;\n    }\n    /*override*/~Wrapper() { }\n};\n\n// specialization for array objects\ntemplate<typename T, size_t N>\nstruct Wrapper<T[N]> : public WrapperBase {\n    typedef T value_type;\n    typedef T* pointer_type;\n    // space must be untyped.\n    typedef T ArrayType[N];\nprivate:\n    // The space is not of type T[N] because when copy-constructing, it would be\n    // default-initialized and then copied to in some fashion, resulting in two\n    // constructions and one destruction per element.  If the type is char[ ], we\n    // placement new into each element, resulting in one construction per element.\n    static const size_t space_size = sizeof(ArrayType) / sizeof(char);\n    char value_space[space_size];\n\n\n    // on exception will ensure the already-built objects will be destructed\n    // (the value_space is a char array, so it is already trivially-destructible.)\n    struct _unwind_class {\n        pointer_type space;\n        int    already_built;\n        _unwind_class(pointer_type p) : space(p), already_built(0) {}\n        ~_unwind_class() {\n            if(space) {\n                for(size_t i = already_built; i > 0 ; --i ) space[i-1].~value_type();\n                (void) new(space) Wrapper<default_constructed>(default_constructed());\n            }\n        }\n    };\npublic:\n    const ArrayType &value() const {\n        char *vp = const_cast<char *>(value_space);\n        return reinterpret_cast<ArrayType &>(*vp);\n    }\n\nprivate:\n    Wrapper();\npublic:\n    // have to explicitly construct because other decays to a const value_type*\n    explicit Wrapper(const ArrayType& other) {\n        _unwind_class guard((pointer_type)value_space);\n        pointer_type vp = reinterpret_cast<pointer_type>(&value_space);\n        for(size_t i = 0; i < N; ++i ) {\n            (void) new(vp++) value_type(other[i]);\n            ++(guard.already_built);\n        }\n        guard.space = NULL;\n    }\n    explicit Wrapper(const Wrapper& other) : WrapperBase() {\n        // we have to do the heavy lifting to copy contents\n        _unwind_class guard((pointer_type)value_space);\n        pointer_type dp = reinterpret_cast<pointer_type>(value_space);\n        pointer_type sp = reinterpret_cast<pointer_type>(const_cast<char *>(other.value_space));\n        for(size_t i = 0; i < N; ++i, ++dp, ++sp) {\n            (void) new(dp) value_type(*sp);\n            ++(guard.already_built);\n        }\n        guard.space = NULL;\n    }\n\n    /*override*/void CopyTo(void* newSpace) const {\n        (void) new(newSpace) Wrapper(*this);  // exceptions handled in copy constructor\n    }\n\n    /*override*/~Wrapper() {\n        // have to destroy explicitly in reverse order\n        pointer_type vp = reinterpret_cast<pointer_type>(&value_space);\n        for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type();\n    }\n};\n\n// given a tuple, return the type of the element that has the maximum alignment requirement.\n// Given a tuple and that type, return the number of elements of the object with the max\n// alignment requirement that is at least as big as the largest object in the tuple.\n\ntemplate<bool, class T1, class T2> struct pick_one;\ntemplate<class T1, class T2> struct pick_one<true , T1, T2> { typedef T1 type; };\ntemplate<class T1, class T2> struct pick_one<false, T1, T2> { typedef T2 type; };\n\ntemplate< template<class> class Selector, typename T1, typename T2 >\nstruct pick_max {\n    typedef typename pick_one< (Selector<T1>::value > Selector<T2>::value), T1, T2 >::type type;\n};\n\ntemplate<typename T> struct size_of { static const int value = sizeof(T); };\n\ntemplate< size_t N, class Tuple, template<class> class Selector > struct pick_tuple_max {\n    typedef typename pick_tuple_max<N-1, Tuple, Selector>::type LeftMaxType;\n    typedef typename tbb::flow::tuple_element<N-1, Tuple>::type ThisType;\n    typedef typename pick_max<Selector, LeftMaxType, ThisType>::type type;\n};\n\ntemplate< class Tuple, template<class> class Selector > struct pick_tuple_max<0, Tuple, Selector> {\n    typedef typename tbb::flow::tuple_element<0, Tuple>::type type;\n};\n\n// is the specified type included in a tuple?\n\ntemplate<class U, class V> struct is_same_type      { static const bool value = false; };\ntemplate<class W>          struct is_same_type<W,W> { static const bool value = true; };\n\ntemplate<class Q, size_t N, class Tuple>\nstruct is_element_of {\n    typedef typename tbb::flow::tuple_element<N-1, Tuple>::type T_i;\n    static const bool value = is_same_type<Q,T_i>::value || is_element_of<Q,N-1,Tuple>::value;\n};\n\ntemplate<class Q, class Tuple>\nstruct is_element_of<Q,0,Tuple> {\n    typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i;\n    static const bool value = is_same_type<Q,T_i>::value;\n};\n\n// allow the construction of types that are listed tuple.  If a disallowed type\n// construction is written, a method involving this type is created.  The\n// type has no definition, so a syntax error is generated.\ntemplate<typename T> struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple;\n\ntemplate<typename T, bool BUILD_IT> struct do_if;\ntemplate<typename T>\nstruct do_if<T, true> {\n    static void construct(void *mySpace, const T& x) {\n        (void) new(mySpace) Wrapper<T>(x);\n    }\n};\ntemplate<typename T>\nstruct do_if<T, false> {\n    static void construct(void * /*mySpace*/, const T& x) {\n        // This method is instantiated when the type T does not match any of the\n        // element types in the Tuple in variant<Tuple>.\n        ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple<T>::bad_type(x);\n    }\n};\n\n// Tuple tells us the allowed types that variant can hold.  It determines the alignment of the space in\n// Wrapper, and how big Wrapper is.\n//\n// the object can only be tested for type, and a read-only reference can be fetched by cast_to<T>().\n\nusing tbb::internal::punned_cast;\nstruct tagged_null_type {};\ntemplate<typename TagType, typename T0, typename T1=tagged_null_type, typename T2=tagged_null_type, typename T3=tagged_null_type, \n                           typename T4=tagged_null_type, typename T5=tagged_null_type, typename T6=tagged_null_type,\n                           typename T7=tagged_null_type, typename T8=tagged_null_type, typename T9=tagged_null_type>\nclass tagged_msg {\n    typedef tbb::flow::tuple<T0, T1, T2, T3, T4\n                  #if __TBB_VARIADIC_MAX >= 6\n                  , T5\n                  #endif\n                  #if __TBB_VARIADIC_MAX >= 7\n                  , T6\n                  #endif\n                  #if __TBB_VARIADIC_MAX >= 8\n                  , T7\n                  #endif\n                  #if __TBB_VARIADIC_MAX >= 9\n                  , T8\n                  #endif\n                  #if __TBB_VARIADIC_MAX >= 10\n                  , T9\n                  #endif\n                  > Tuple;   \n\nprivate:\n    class variant {\n        static const size_t N = tbb::flow::tuple_size<Tuple>::value;\n        typedef typename pick_tuple_max<N, Tuple, alignment_of>::type AlignType;\n        typedef typename pick_tuple_max<N, Tuple, size_of>::type MaxSizeType;\n        static const size_t MaxNBytes = (sizeof(Wrapper<MaxSizeType>)+sizeof(AlignType)-1);\n        static const size_t MaxNElements = MaxNBytes/sizeof(AlignType);\n        typedef typename tbb::aligned_space<AlignType, MaxNElements> SpaceType;\n        SpaceType my_space;\n        static const size_t MaxSize = sizeof(SpaceType);\n\n    public:\n        variant() { (void) new(&my_space) Wrapper<default_constructed>(default_constructed()); }\n\n        template<typename T>\n        variant( const T& x ) {\n            do_if<T, is_element_of<T, N, Tuple>::value>::construct(&my_space,x);\n        }\n\n        variant(const variant& other) {\n            const WrapperBase * h = punned_cast<const WrapperBase *>(&(other.my_space));\n            h->CopyTo(&my_space);\n        }\n\n        // assignment must destroy and re-create the Wrapper type, as there is no way\n        // to create a Wrapper-to-Wrapper assign even if we find they agree in type.\n        void operator=( const variant& rhs ) {\n            if(&rhs != this) {\n                WrapperBase *h = punned_cast<WrapperBase *>(&my_space);\n                h->~WrapperBase();\n                const WrapperBase *ch = punned_cast<const WrapperBase *>(&(rhs.my_space));\n                ch->CopyTo(&my_space);\n            }\n        }\n\n        template<typename U>\n        const U& variant_cast_to() const {\n            const Wrapper<U> *h = dynamic_cast<const Wrapper<U>*>(punned_cast<const WrapperBase *>(&my_space));\n            if(!h) {\n                tbb::internal::throw_exception(tbb::internal::eid_bad_tagged_msg_cast);\n            }\n            return h->value();\n        }\n        template<typename U>\n        bool variant_is_a() const { return dynamic_cast<const Wrapper<U>*>(punned_cast<const WrapperBase *>(&my_space)) != NULL; }\n\n        bool variant_is_default_constructed() const {return variant_is_a<default_constructed>();}\n\n        ~variant() {\n            WrapperBase *h = punned_cast<WrapperBase *>(&my_space);\n            h->~WrapperBase();\n        }\n    }; //class variant\n\n    TagType my_tag;\n    variant my_msg;\n\npublic:\n    tagged_msg(): my_tag(TagType(~0)), my_msg(){} \n\n    template<typename T, typename R>\n    tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {}\n    \n    #if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN\n    template<typename T, typename R, size_t N>\n    tagged_msg(T const &index,  R (&value)[N]) : my_tag(index), my_msg(value) {}\n    #endif\n\n    void set_tag(TagType const &index) {my_tag = index;}\n    TagType tag() const {return my_tag;}\n\n    template<typename V>\n    const V& cast_to() const {return my_msg.template variant_cast_to<V>();}\n\n    template<typename V>\n    bool is_a() const {return my_msg.template variant_is_a<V>();}\n\n    bool is_default_constructed() const {return my_msg.variant_is_default_constructed();}\n}; //class tagged_msg\n\n// template to simplify cast and test for tagged_msg in template contexts\ntemplate<typename T, typename V>\nconst T& cast_to(V const &v) { return v.template cast_to<T>(); }\n\ntemplate<typename T, typename V>\nbool is_a(V const &v) { return v.template is_a<T>(); }\n\n}  // namespace internal\n\n#endif  /* __TBB__flow_graph_types_impl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_mutex_padding.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_mutex_padding_H\n#define __TBB_mutex_padding_H\n\n// wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated\n// from a pool.  Because we allow them to be defined anywhere they must be two cache lines in size.\n\n\nnamespace tbb {\nnamespace interface7 {\nnamespace internal {\n\nstatic const size_t cache_line_size = 64;\n\n// Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing\n// with other data; space overhead is up to 2*cache_line_size-1.\ntemplate<typename Mutex, bool is_rw> class padded_mutex;\n\ntemplate<typename Mutex>\nclass padded_mutex<Mutex,false> : tbb::internal::mutex_copy_deprecated_and_disabled {\n    typedef long pad_type;\n    pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)];\n\n    Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);}\n\npublic:\n    static const bool is_rw_mutex = Mutex::is_rw_mutex;\n    static const bool is_recursive_mutex = Mutex::is_recursive_mutex;\n    static const bool is_fair_mutex = Mutex::is_fair_mutex;\n\n    padded_mutex() { new(impl()) Mutex(); }\n    ~padded_mutex() { impl()->~Mutex(); }\n\n    //! Represents acquisition of a mutex.\n    class scoped_lock :  tbb::internal::no_copy {\n        typename Mutex::scoped_lock my_scoped_lock;\n    public:\n        scoped_lock() : my_scoped_lock() {}\n        scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { }\n        ~scoped_lock() {  }\n\n        void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); }\n        bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); }\n        void release() { my_scoped_lock.release(); }\n    };\n};\n\ntemplate<typename Mutex>\nclass padded_mutex<Mutex,true> : tbb::internal::mutex_copy_deprecated_and_disabled {\n    typedef long pad_type;\n    pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)];\n\n    Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);}\n\npublic:\n    static const bool is_rw_mutex = Mutex::is_rw_mutex;\n    static const bool is_recursive_mutex = Mutex::is_recursive_mutex;\n    static const bool is_fair_mutex = Mutex::is_fair_mutex;\n\n    padded_mutex() { new(impl()) Mutex(); }\n    ~padded_mutex() { impl()->~Mutex(); }\n\n    //! Represents acquisition of a mutex.\n    class scoped_lock :  tbb::internal::no_copy {\n        typename Mutex::scoped_lock my_scoped_lock;\n    public:\n        scoped_lock() : my_scoped_lock() {}\n        scoped_lock( padded_mutex& m, bool write = true ) : my_scoped_lock(*m.impl(),write) { }\n        ~scoped_lock() {  }\n\n        void acquire( padded_mutex& m, bool write = true ) { my_scoped_lock.acquire(*m.impl(),write); }\n        bool try_acquire( padded_mutex& m, bool write = true ) { return my_scoped_lock.try_acquire(*m.impl(),write); }\n        bool upgrade_to_writer() { return my_scoped_lock.upgrade_to_writer(); }\n        bool downgrade_to_reader() { return my_scoped_lock.downgrade_to_reader(); }\n        void release() { my_scoped_lock.release(); }\n    };\n};\n\n} // namespace internal\n} // namespace interface7\n} // namespace tbb\n\n#endif /* __TBB_mutex_padding_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_range_iterator.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_range_iterator_H\n#define __TBB_range_iterator_H\n\n#include \"../tbb_stddef.h\"\n\n#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT\n    #include <iterator>\n#endif\n\nnamespace tbb {\n    // iterators to first and last elements of container\n    namespace internal {\n\n#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT\n        using std::begin;\n        using std::end;\n        template<typename Container>\n        auto first(Container& c)-> decltype(begin(c))  {return begin(c);}\n\n        template<typename Container>\n        auto first(const Container& c)-> decltype(begin(c))  {return begin(c);}\n\n        template<typename Container>\n        auto last(Container& c)-> decltype(begin(c))  {return end(c);}\n\n        template<typename Container>\n        auto last(const Container& c)-> decltype(begin(c)) {return end(c);}\n#else\n        template<typename Container>\n        typename Container::iterator first(Container& c) {return c.begin();}\n\n        template<typename Container>\n        typename Container::const_iterator first(const Container& c) {return c.begin();}\n\n        template<typename Container>\n        typename Container::iterator last(Container& c) {return c.end();}\n\n        template<typename Container>\n        typename Container::const_iterator last(const Container& c) {return c.end();}\n#endif\n\n        template<typename T, size_t size>\n        T* first(T (&arr) [size]) {return arr;}\n\n        template<typename T, size_t size>\n        T* last(T (&arr) [size]) {return arr + size;}\n    } //namespace internal\n}  //namespace tbb\n\n#endif // __TBB_range_iterator_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_tbb_strings.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\nTBB_STRING_RESOURCE(FLOW_BROADCAST_NODE, \"broadcast_node\")\nTBB_STRING_RESOURCE(FLOW_BUFFER_NODE, \"buffer_node\")\nTBB_STRING_RESOURCE(FLOW_CONTINUE_NODE, \"continue_node\")\nTBB_STRING_RESOURCE(FLOW_FUNCTION_NODE, \"function_node\")\nTBB_STRING_RESOURCE(FLOW_JOIN_NODE_QUEUEING, \"join_node (queueing)\")\nTBB_STRING_RESOURCE(FLOW_JOIN_NODE_RESERVING, \"join_node (reserving)\")\nTBB_STRING_RESOURCE(FLOW_JOIN_NODE_TAG_MATCHING, \"join_node (tag_matching)\")\nTBB_STRING_RESOURCE(FLOW_LIMITER_NODE, \"limiter_node\")\nTBB_STRING_RESOURCE(FLOW_MULTIFUNCTION_NODE, \"multifunction_node\")\nTBB_STRING_RESOURCE(FLOW_OR_NODE, \"or_node\") //no longer in use, kept for backward compatibilty\nTBB_STRING_RESOURCE(FLOW_OVERWRITE_NODE, \"overwrite_node\")\nTBB_STRING_RESOURCE(FLOW_PRIORITY_QUEUE_NODE, \"priority_queue_node\")\nTBB_STRING_RESOURCE(FLOW_QUEUE_NODE, \"queue_node\")\nTBB_STRING_RESOURCE(FLOW_SEQUENCER_NODE, \"sequencer_node\")\nTBB_STRING_RESOURCE(FLOW_SOURCE_NODE, \"source_node\")\nTBB_STRING_RESOURCE(FLOW_SPLIT_NODE, \"split_node\")\nTBB_STRING_RESOURCE(FLOW_WRITE_ONCE_NODE, \"write_once_node\")\nTBB_STRING_RESOURCE(FLOW_BODY, \"body\")\nTBB_STRING_RESOURCE(FLOW_GRAPH, \"graph\")\nTBB_STRING_RESOURCE(FLOW_NODE, \"node\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT, \"input_port\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_0, \"input_port_0\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_1, \"input_port_1\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_2, \"input_port_2\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_3, \"input_port_3\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_4, \"input_port_4\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_5, \"input_port_5\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_6, \"input_port_6\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_7, \"input_port_7\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_8, \"input_port_8\")\nTBB_STRING_RESOURCE(FLOW_INPUT_PORT_9, \"input_port_9\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT, \"output_port\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_0, \"output_port_0\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_1, \"output_port_1\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_2, \"output_port_2\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_3, \"output_port_3\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_4, \"output_port_4\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_5, \"output_port_5\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_6, \"output_port_6\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_7, \"output_port_7\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_8, \"output_port_8\")\nTBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_9, \"output_port_9\")\nTBB_STRING_RESOURCE(FLOW_OBJECT_NAME, \"object_name\")\nTBB_STRING_RESOURCE(FLOW_NULL, \"null\")\nTBB_STRING_RESOURCE(FLOW_INDEXER_NODE, \"indexer_node\")\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_tbb_windef.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_windef_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif /* __TBB_tbb_windef_H */\n\n// Check that the target Windows version has all API calls requried for TBB.\n// Do not increase the version in condition beyond 0x0500 without prior discussion!\n#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501\n#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater.\n#endif\n\n#if !defined(_MT)\n#error TBB requires linkage with multithreaded C/C++ runtime library. \\\n       Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch.\n#endif\n\n// Workaround for the problem with MVSC headers failing to define namespace std\nnamespace std {\n  using ::size_t; using ::ptrdiff_t;\n}\n\n#define __TBB_STRING_AUX(x) #x\n#define __TBB_STRING(x) __TBB_STRING_AUX(x)\n\n// Default setting of TBB_USE_DEBUG\n#ifdef TBB_USE_DEBUG\n#    if TBB_USE_DEBUG \n#        if !defined(_DEBUG)\n#            pragma message(__FILE__ \"(\" __TBB_STRING(__LINE__) \") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0\")\n#        endif\n#    else\n#        if defined(_DEBUG)\n#            pragma message(__FILE__ \"(\" __TBB_STRING(__LINE__) \") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0\")\n#        endif\n#    endif\n#endif\n\n#if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE)\n#define __TBB_NO_IMPLICIT_LINKAGE 1\n#endif\n\n#if _MSC_VER\n    #if !__TBB_NO_IMPLICIT_LINKAGE\n        #ifdef __TBB_LIB_NAME\n\t        #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME))\n        #else\n\t\t\t#ifdef _DEBUG\n\t\t\t\t#pragma comment(lib, \"tbb_debug.lib\")\n\t\t\t#else\n\t\t\t\t#pragma comment(lib, \"tbb.lib\")\n\t\t\t#endif\n        #endif\n    #endif\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_x86_eliding_mutex_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__x86_eliding_mutex_impl_H\n#define __TBB__x86_eliding_mutex_impl_H\n\n#ifndef __TBB_spin_mutex_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#if ( __TBB_x86_32 || __TBB_x86_64 )\n\nnamespace tbb {\nnamespace interface7 {\nnamespace internal {\n\ntemplate<typename Mutex, bool is_rw>\nclass padded_mutex;\n\n//! An eliding lock that occupies a single byte.\n/** A x86_eliding_mutex is an HLE-enabled spin mutex. It is recommended to\n    put the mutex on a cache line that is not shared by the data it protects.\n    It should be used for locking short critical sections where the lock is\n    contended but the data it protects are not.  If zero-initialized, the\n    mutex is considered unheld.\n    @ingroup synchronization */\nclass x86_eliding_mutex : tbb::internal::mutex_copy_deprecated_and_disabled {\n    //! 0 if lock is released, 1 if lock is acquired.\n    __TBB_atomic_flag flag;\n\n    friend class padded_mutex<x86_eliding_mutex, false>;\n\npublic:\n    //! Construct unacquired lock.\n    /** Equivalent to zero-initialization of *this. */\n    x86_eliding_mutex() : flag(0) {}\n\n// bug in gcc 3.x.x causes syntax error in spite of the friend declaration above.\n// Make the scoped_lock public in that case.\n#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000\n#else\n    // by default we will not provide the scoped_lock interface.  The user\n    // should use the padded version of the mutex.  scoped_lock is used in\n    // padded_mutex template.\nprivate:\n#endif\n    // scoped_lock in padded_mutex<> is the interface to use.\n    //! Represents acquisition of a mutex.\n    class scoped_lock : tbb::internal::no_copy {\n    private:\n        //! Points to currently held mutex, or NULL if no lock is held.\n        x86_eliding_mutex* my_mutex;\n\n    public:\n        //! Construct without acquiring a mutex.\n        scoped_lock() : my_mutex(NULL) {}\n\n        //! Construct and acquire lock on a mutex.\n        scoped_lock( x86_eliding_mutex& m ) : my_mutex(NULL) { acquire(m); }\n\n        //! Acquire lock.\n        void acquire( x86_eliding_mutex& m ) {\n            __TBB_ASSERT( !my_mutex, \"already holding a lock\" );\n\n            my_mutex=&m;\n            my_mutex->lock();\n        }\n\n        //! Try acquiring lock (non-blocking)\n        /** Return true if lock acquired; false otherwise. */\n        bool try_acquire( x86_eliding_mutex& m ) {\n            __TBB_ASSERT( !my_mutex, \"already holding a lock\" );\n\n            bool result = m.try_lock();\n            if( result ) {\n                my_mutex = &m;\n            }\n            return result;\n        }\n\n        //! Release lock\n        void release() {\n            __TBB_ASSERT( my_mutex, \"release on scoped_lock that is not holding a lock\" );\n\n            my_mutex->unlock();\n            my_mutex = NULL;\n        }\n\n        //! Destroy lock.  If holding a lock, releases the lock first.\n        ~scoped_lock() {\n            if( my_mutex ) {\n                release();\n            }\n        }\n    };\n#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000\n#else\npublic:\n#endif  /* __TBB_USE_X86_ELIDING_MUTEX */\n\n    // Mutex traits\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = false;\n\n    // ISO C++0x compatibility methods\n\n    //! Acquire lock\n    void lock() {\n        __TBB_LockByteElided(flag);\n    }\n\n    //! Try acquiring lock (non-blocking)\n    /** Return true if lock acquired; false otherwise. */\n    bool try_lock() {\n        return __TBB_TryLockByteElided(flag);\n    }\n\n    //! Release lock\n    void unlock() {\n        __TBB_UnlockByteElided( flag );\n    }\n}; // end of x86_eliding_mutex\n\n} // namespace internal\n} // namespace interface7\n} // namespace tbb\n\n#endif /* ( __TBB_x86_32 || __TBB_x86_64 ) */\n\n#endif /* __TBB__x86_eliding_mutex_impl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/internal/_x86_rtm_rw_mutex_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB__x86_rtm_rw_mutex_impl_H\n#define __TBB__x86_rtm_rw_mutex_impl_H\n\n#ifndef __TBB_spin_rw_mutex_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#if __TBB_TSX_AVAILABLE\n\n#include \"../tbb_stddef.h\"\n#include \"../tbb_machine.h\"\n#include \"../tbb_profiling.h\"\n#include \"../spin_rw_mutex.h\"\n\nnamespace tbb {\nnamespace interface8 {\nnamespace internal {\n\nenum RTM_type {\n    RTM_not_in_mutex,\n    RTM_transacting_reader,\n    RTM_transacting_writer,\n    RTM_real_reader,\n    RTM_real_writer\n};\n\nstatic const unsigned long speculation_granularity = 64;\n\n//! Fast, unfair, spinning speculation-enabled reader-writer lock with backoff and\n//  writer-preference\n/** @ingroup synchronization */\nclass x86_rtm_rw_mutex: private spin_rw_mutex {\n#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000\n// bug in gcc 3.x.x causes syntax error in spite of the friend declaration below.\n// Make the scoped_lock public in that case.\npublic:\n#else\nprivate:\n#endif\n    friend class interface7::internal::padded_mutex<x86_rtm_rw_mutex,true>;\n    class scoped_lock;   // should be private \n    friend class scoped_lock;\nprivate:\n    //! @cond INTERNAL\n\n    //! Internal construct unacquired mutex.\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    //! Internal acquire write lock.\n    // only_speculate == true if we're doing a try_lock, else false.\n    void __TBB_EXPORTED_METHOD internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false);\n\n    //! Internal acquire read lock.\n    // only_speculate == true if we're doing a try_lock, else false.\n    void __TBB_EXPORTED_METHOD internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false);\n\n    //! Internal upgrade reader to become a writer.\n    bool __TBB_EXPORTED_METHOD internal_upgrade( x86_rtm_rw_mutex::scoped_lock& );\n\n    //! Out of line code for downgrading a writer to a reader.\n    bool __TBB_EXPORTED_METHOD internal_downgrade( x86_rtm_rw_mutex::scoped_lock& );\n\n    //! Internal try_acquire write lock.\n    bool __TBB_EXPORTED_METHOD internal_try_acquire_writer( x86_rtm_rw_mutex::scoped_lock& );\n\n    //! Internal release lock.\n    void __TBB_EXPORTED_METHOD internal_release( x86_rtm_rw_mutex::scoped_lock& );\n\n    static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock )\n    {\n        return static_cast<x86_rtm_rw_mutex*>( lock.internal_get_mutex() );\n    }\n    static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx )\n    {\n        lock.internal_set_mutex( mtx );\n    }\n    //! @endcond\npublic:\n    //! Construct unacquired mutex.\n    x86_rtm_rw_mutex() {\n        w_flag = false;\n#if TBB_USE_THREADING_TOOLS\n        internal_construct();\n#endif\n    }\n\n#if TBB_USE_ASSERT\n    //! Empty destructor.\n    ~x86_rtm_rw_mutex() {}\n#endif /* TBB_USE_ASSERT */\n\n    // Mutex traits\n    static const bool is_rw_mutex = true;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = false;\n\n#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000\n#else\n    // by default we will not provide the scoped_lock interface.  The user\n    // should use the padded version of the mutex.  scoped_lock is used in\n    // padded_mutex template.\nprivate:\n#endif\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    // Speculation-enabled scoped lock for spin_rw_mutex\n    // The idea is to be able to reuse the acquire/release methods of spin_rw_mutex\n    // and its scoped lock wherever possible.  The only way to use a speculative lock is to use\n    // a scoped_lock. (because transaction_state must be local)\n\n    class scoped_lock : tbb::internal::no_copy {\n        friend class x86_rtm_rw_mutex;\n        spin_rw_mutex::scoped_lock my_scoped_lock;\n\n        RTM_type transaction_state;\n\n    public:\n        //! Construct lock that has not acquired a mutex.\n        /** Equivalent to zero-initialization of *this. */\n        scoped_lock() : my_scoped_lock(), transaction_state(RTM_not_in_mutex) {\n        }\n\n        //! Acquire lock on given mutex.\n        scoped_lock( x86_rtm_rw_mutex& m, bool write = true ) : my_scoped_lock(),\n            transaction_state(RTM_not_in_mutex) {\n            acquire(m, write);\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if(transaction_state != RTM_not_in_mutex) release();\n        }\n\n        //! Acquire lock on given mutex.\n        void acquire( x86_rtm_rw_mutex& m, bool write = true ) {\n            if( write ) m.internal_acquire_writer(*this);\n            else        m.internal_acquire_reader(*this);\n        }\n\n        //! Release lock\n        void release() {\n            x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            __TBB_ASSERT( transaction_state!=RTM_not_in_mutex, \"lock is not acquired\" );\n            return mutex->internal_release(*this);\n        }\n\n        //! Upgrade reader to become a writer.\n        /** Returns whether the upgrade happened without releasing and re-acquiring the lock */\n        bool upgrade_to_writer() {\n            x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            __TBB_ASSERT( transaction_state==RTM_transacting_reader || transaction_state==RTM_real_reader, \"Invalid state for upgrade\" );\n            return mutex->internal_upgrade(*this);\n        }\n\n        //! Downgrade writer to become a reader.\n        /** Returns whether the downgrade happened without releasing and re-acquiring the lock */\n        bool downgrade_to_reader() {\n            x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            __TBB_ASSERT( transaction_state==RTM_transacting_writer || transaction_state==RTM_real_writer, \"Invalid state for downgrade\" );\n            return mutex->internal_downgrade(*this);\n        }\n\n        //! Attempt to acquire mutex.\n        /** returns true if successful.  */\n        bool try_acquire( x86_rtm_rw_mutex& m, bool write = true ) {\n#if TBB_USE_ASSERT\n            x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);\n            __TBB_ASSERT( !mutex, \"lock is already acquired\" );\n#endif\n            // have to assign m to our mutex.\n            // cannot set the mutex, because try_acquire in spin_rw_mutex depends on it being NULL.\n            if(write) return m.internal_try_acquire_writer(*this);\n            // speculatively acquire the lock.  If this fails, do try_acquire on the spin_rw_mutex.\n            m.internal_acquire_reader(*this, /*only_speculate=*/true);\n            if(transaction_state == RTM_transacting_reader) return true;\n            if( my_scoped_lock.try_acquire(m, false)) {\n                transaction_state = RTM_real_reader;\n                return true;\n            }\n            return false;\n        }\n\n        };  // class x86_rtm_rw_mutex::scoped_lock\n\n    // ISO C++0x compatibility methods not provided because we cannot maintain\n    // state about whether a thread is in a transaction.\n\nprivate:\n    char pad[speculation_granularity-sizeof(spin_rw_mutex)]; // padding\n\n    // If true, writer holds the spin_rw_mutex.\n    tbb::atomic<bool> w_flag;  // want this on a separate cache line\n\n};  // x86_rtm_rw_mutex\n\n}  // namespace internal\n}  // namespace interface8\n}  // namespace tbb\n\n#endif  /* __TBB_TSX_AVAILABLE */\n#endif /* __TBB__x86_rtm_rw_mutex_impl_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/intrusive_list.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_intrusive_list_H\n#define _TBB_intrusive_list_H\n\n#include \"tbb/tbb_stddef.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//! Data structure to be inherited by the types that can form intrusive lists.\n/** Intrusive list is formed by means of the member_intrusive_list<T> template class.\n    Note that type T must derive from intrusive_list_node either publicly or \n    declare instantiation member_intrusive_list<T> as a friend.\n    This class implements a limited subset of std::list interface. **/\nstruct intrusive_list_node {\n    intrusive_list_node *my_prev_node,\n                        *my_next_node;\n#if TBB_USE_ASSERT\n    intrusive_list_node () { my_prev_node = my_next_node = this; }\n#endif /* TBB_USE_ASSERT */\n};\n\n//! List of element of type T, where T is derived from intrusive_list_node\n/** The class is not thread safe. **/\ntemplate <class List, class T>\nclass intrusive_list_base {\n    //! Pointer to the head node\n    intrusive_list_node my_head;\n\n    //! Number of list elements\n    size_t my_size;\n\n    static intrusive_list_node& node ( T& item ) { return List::node(item); }\n\n    static T& item ( intrusive_list_node* node ) { return List::item(node); }\n\n    template<class Iterator>\n    class iterator_impl {\n        Iterator& self () { return *static_cast<Iterator*>(this); }\n\n        //! Node the iterator points to at the moment\n        intrusive_list_node *my_pos;\n\n    protected:\n        iterator_impl (intrusive_list_node* pos )\n            :  my_pos(pos)\n        {}\n\n        T& item () const {\n            return intrusive_list_base::item(my_pos);\n        }\n\n    public:\n        iterator_impl () :  my_pos(NULL) {}\n\n        Iterator& operator = ( const Iterator& it ) {\n            return my_pos = it.my_pos;\n        }\n\n        Iterator& operator = ( const T& val ) {\n            return my_pos = &node(val);\n        }\n\n        bool operator == ( const Iterator& it ) const {\n            return my_pos == it.my_pos;\n        }\n\n        bool operator != ( const Iterator& it ) const {\n            return my_pos != it.my_pos;\n        }\n\n        Iterator& operator++ () {\n            my_pos = my_pos->my_next_node;\n            return self();\n        }\n\n        Iterator& operator-- () {\n            my_pos = my_pos->my_prev_node;\n            return self();\n        }\n\n        Iterator operator++ ( int ) {\n            Iterator result = self();\n            ++(*this);\n            return result;\n        }\n\n        Iterator operator-- ( int ) {\n            Iterator result = self();\n            --(*this);\n            return result;\n        }\n    }; // intrusive_list_base::iterator_impl\n\n    void assert_ok () const {\n        __TBB_ASSERT( (my_head.my_prev_node == &my_head && !my_size) || \n                      (my_head.my_next_node != &my_head && my_size >0), \"intrusive_list_base corrupted\" );\n#if TBB_USE_ASSERT >= 2\n        size_t i = 0;\n        for ( intrusive_list_node *n = my_head.my_next_node; n != &my_head; n = n->my_next_node )\n            ++i;\n        __TBB_ASSERT( my_size == i, \"Wrong size\" );\n#endif /* TBB_USE_ASSERT >= 2 */\n    }\n\npublic:\n    class iterator : public iterator_impl<iterator> {\n        template <class U, class V> friend class intrusive_list_base;\n    public:\n        iterator (intrusive_list_node* pos )\n            : iterator_impl<iterator>(pos )\n        {}\n        iterator () {}\n\n        T* operator-> () const { return &this->item(); }\n\n        T& operator* () const { return this->item(); }\n    }; // class iterator\n\n    class const_iterator : public iterator_impl<const_iterator> {\n        template <class U, class V> friend class intrusive_list_base;\n    public:\n        const_iterator (const intrusive_list_node* pos )\n            : iterator_impl<const_iterator>(const_cast<intrusive_list_node*>(pos) )\n        {}\n        const_iterator () {}\n\n        const T* operator-> () const { return &this->item(); }\n\n        const T& operator* () const { return this->item(); }\n    }; // class iterator\n\n    intrusive_list_base () : my_size(0) {\n        my_head.my_prev_node = &my_head;\n        my_head.my_next_node = &my_head;\n    }\n\n    bool empty () const { return my_head.my_next_node == &my_head; }\n\n    size_t size () const { return my_size; }\n\n    iterator begin () { return iterator(my_head.my_next_node); }\n\n    iterator end () { return iterator(&my_head); }\n\n    const_iterator begin () const { return const_iterator(my_head.my_next_node); }\n\n    const_iterator end () const { return const_iterator(&my_head); }\n\n    void push_front ( T& val ) {\n        __TBB_ASSERT( node(val).my_prev_node == &node(val) && node(val).my_next_node == &node(val), \n                    \"Object with intrusive list node can be part of only one intrusive list simultaneously\" );\n        // An object can be part of only one intrusive list at the given moment via the given node member \n        node(val).my_prev_node = &my_head;\n        node(val).my_next_node = my_head.my_next_node;\n        my_head.my_next_node->my_prev_node = &node(val);\n        my_head.my_next_node = &node(val);\n        ++my_size;\n        assert_ok();\n    }\n\n    void remove( T& val ) {\n        __TBB_ASSERT( node(val).my_prev_node != &node(val) && node(val).my_next_node != &node(val), \"Element to remove is not in the list\" );\n        __TBB_ASSERT( node(val).my_prev_node->my_next_node == &node(val) && node(val).my_next_node->my_prev_node == &node(val), \"Element to remove is not in the list\" );\n        --my_size;\n        node(val).my_next_node->my_prev_node = node(val).my_prev_node;\n        node(val).my_prev_node->my_next_node = node(val).my_next_node;\n#if TBB_USE_ASSERT\n        node(val).my_prev_node = node(val).my_next_node = &node(val);\n#endif\n        assert_ok();\n    }\n\n    iterator erase ( iterator it ) {\n        T& val = *it;\n        ++it;\n        remove( val );\n        return it;\n    }\n\n}; // intrusive_list_base\n\n\n//! Double linked list of items of type T containing a member of type intrusive_list_node.\n/** NodePtr is a member pointer to the node data field. Class U is either T or \n    a base class of T containing the node member. Default values exist for the sake\n    of a partial specialization working with inheritance case.\n\n    The list does not have ownership of its items. Its purpose is to avoid dynamic \n    memory allocation when forming lists of existing objects.\n\n    The class is not thread safe. **/\ntemplate <class T, class U, intrusive_list_node U::*NodePtr>\nclass memptr_intrusive_list : public intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T>\n{\n    friend class intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T>;\n\n    static intrusive_list_node& node ( T& val ) { return val.*NodePtr; }\n\n    static T& item ( intrusive_list_node* node ) {\n        // Cannot use __TBB_offsetof (and consequently __TBB_get_object_ref) macro \n        // with *NodePtr argument because gcc refuses to interpret pasted \"->\" and \"*\"\n        // as member pointer dereferencing operator, and explicit usage of ## in \n        // __TBB_offsetof implementation breaks operations with normal member names.\n        return *reinterpret_cast<T*>((char*)node - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000));\n    }\n}; // intrusive_list<T, U, NodePtr>\n\n//! Double linked list of items of type T that is derived from intrusive_list_node class.\n/** The list does not have ownership of its items. Its purpose is to avoid dynamic \n    memory allocation when forming lists of existing objects.\n\n    The class is not thread safe. **/\ntemplate <class T>\nclass intrusive_list : public intrusive_list_base<intrusive_list<T>, T>\n{\n    friend class intrusive_list_base<intrusive_list<T>, T>;\n\n    static intrusive_list_node& node ( T& val ) { return val; }\n\n    static T& item ( intrusive_list_node* node ) { return *static_cast<T*>(node); }\n}; // intrusive_list<T>\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_intrusive_list_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/itt_notify.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if DO_ITT_NOTIFY\n\n#if _WIN32||_WIN64\n    #ifndef UNICODE\n        #define UNICODE\n    #endif\n#else\n    #pragma weak dlopen\n    #pragma weak dlsym\n    #pragma weak dlerror\n#endif /* WIN */\n\n#if __TBB_BUILD\n\nextern \"C\" void ITT_DoOneTimeInitialization();\n#define __itt_init_ittlib_name(x,y) (ITT_DoOneTimeInitialization(), true)\n\n#elif __TBBMALLOC_BUILD\n\nextern \"C\" void MallocInitializeITT();\n#define __itt_init_ittlib_name(x,y) (MallocInitializeITT(), true)\n\n#else\n#error This file is expected to be used for either TBB or TBB allocator build.\n#endif // __TBB_BUILD\n\n#include \"tools_api/ittnotify_static.c\"\n\nnamespace tbb {\nnamespace internal {\nint __TBB_load_ittnotify() {\n    return __itt_init_ittlib(NULL,          // groups for:\n      (__itt_group_id)(__itt_group_sync     // prepare/cancel/acquired/releasing\n                       | __itt_group_thread // name threads\n                       | __itt_group_stitch // stack stitching\n#if __TBB_CPF_BUILD\n                       | __itt_group_structure\n#endif\n                           ));\n}\n\n}} // namespaces\n\n#endif /* DO_ITT_NOTIFY */\n\n#define __TBB_NO_IMPLICIT_LINKAGE 1\n#include \"itt_notify.h\"\n\nnamespace tbb {\n\n#if DO_ITT_NOTIFY\n    const tchar \n            *SyncType_GlobalLock = _T(\"TbbGlobalLock\"),\n            *SyncType_Scheduler = _T(\"%Constant\")\n            ;\n    const tchar \n            *SyncObj_SchedulerInitialization = _T(\"TbbSchedulerInitialization\"),\n            *SyncObj_SchedulersList = _T(\"TbbSchedulersList\"),\n            *SyncObj_WorkerLifeCycleMgmt = _T(\"TBB Scheduler\"),\n            *SyncObj_TaskStealingLoop = _T(\"TBB Scheduler\"),\n            *SyncObj_WorkerTaskPool = _T(\"TBB Scheduler\"),\n            *SyncObj_MasterTaskPool = _T(\"TBB Scheduler\"),\n            *SyncObj_TaskPoolSpinning = _T(\"TBB Scheduler\"),\n            *SyncObj_Mailbox = _T(\"TBB Scheduler\"),\n            *SyncObj_TaskReturnList = _T(\"TBB Scheduler\"),\n            *SyncObj_TaskStream = _T(\"TBB Scheduler\"),\n            *SyncObj_ContextsList = _T(\"TBB Scheduler\")\n            ;\n#endif /* DO_ITT_NOTIFY */\n\n} // namespace tbb\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/itt_notify.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_ITT_NOTIFY\n#define _TBB_ITT_NOTIFY\n\n#include \"tbb/tbb_stddef.h\"\n\n#if DO_ITT_NOTIFY\n\n#if _WIN32||_WIN64\n    #ifndef UNICODE\n        #define UNICODE\n    #endif\n#endif /* WIN */\n\n#ifndef INTEL_ITTNOTIFY_API_PRIVATE\n#define INTEL_ITTNOTIFY_API_PRIVATE\n#endif\n\n#include \"tools_api/ittnotify.h\"\n#include \"tools_api/legacy/ittnotify.h\"\nextern \"C\" void __itt_fini_ittlib(void);\n\n#if _WIN32||_WIN64\n    #undef _T\n    #undef __itt_event_create\n    #define __itt_event_create __itt_event_createA\n#endif /* WIN */\n\n\n#endif /* DO_ITT_NOTIFY */\n\n#if !ITT_CALLER_NULL\n#define ITT_CALLER_NULL ((__itt_caller)0)\n#endif\n\nnamespace tbb {\n//! Unicode support\n#if (_WIN32||_WIN64) && !__MINGW32__\n    //! Unicode character type. Always wchar_t on Windows.\n    /** We do not use typedefs from Windows TCHAR family to keep consistence of TBB coding style. **/\n    typedef wchar_t tchar;\n    //! Standard Windows macro to markup the string literals. \n    #define _T(string_literal) L ## string_literal\n#else /* !WIN */\n    typedef char tchar;\n    //! Standard Windows style macro to markup the string literals.\n    #define _T(string_literal) string_literal\n#endif /* !WIN */\n} // namespace tbb\n\n#if DO_ITT_NOTIFY\nnamespace tbb {\n    //! Display names of internal synchronization types\n    extern const tchar \n            *SyncType_GlobalLock,\n            *SyncType_Scheduler;\n    //! Display names of internal synchronization components/scenarios\n    extern const tchar \n            *SyncObj_SchedulerInitialization,\n            *SyncObj_SchedulersList,\n            *SyncObj_WorkerLifeCycleMgmt,\n            *SyncObj_TaskStealingLoop,\n            *SyncObj_WorkerTaskPool,\n            *SyncObj_MasterTaskPool,\n            *SyncObj_TaskPoolSpinning,\n            *SyncObj_Mailbox,\n            *SyncObj_TaskReturnList,\n            *SyncObj_TaskStream,\n            *SyncObj_ContextsList\n            ;\n\n    namespace internal {\n        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void* obj, const tchar* name); \n\n    } // namespace internal\n\n} // namespace tbb\n\n// const_cast<void*>() is necessary to cast off volatility\n#define ITT_NOTIFY(name,obj)            __itt_notify_##name(const_cast<void*>(static_cast<volatile void*>(obj)))\n#define ITT_THREAD_SET_NAME(name)       __itt_thread_set_name(name)\n#define ITT_FINI_ITTLIB()               __itt_fini_ittlib()\n#define ITT_SYNC_CREATE(obj, type, name) __itt_sync_create((void*)(obj), type, name, 2)\n#define ITT_SYNC_RENAME(obj, name)      __itt_sync_rename(obj, name)\n#define ITT_STACK_CREATE(obj)           obj = __itt_stack_caller_create()\n#if __TBB_TASK_GROUP_CONTEXT\n#define ITT_STACK(precond, name, obj)   (precond) ? __itt_stack_##name(obj) : ((void)0);\n#else\n#define ITT_STACK(precond, name, obj)      ((void)0)\n#endif /* !__TBB_TASK_GROUP_CONTEXT */\n\n#else /* !DO_ITT_NOTIFY */\n\n#define ITT_NOTIFY(name,obj)            ((void)0)\n#define ITT_THREAD_SET_NAME(name)       ((void)0)\n#define ITT_FINI_ITTLIB()               ((void)0)\n#define ITT_SYNC_CREATE(obj, type, name) ((void)0)\n#define ITT_SYNC_RENAME(obj, name)      ((void)0)\n#define ITT_STACK_CREATE(obj)           ((void)0)\n#define ITT_STACK(precond, name, obj)   ((void)0)\n\n#endif /* !DO_ITT_NOTIFY */\n\nnamespace tbb {\nnamespace internal {\nint __TBB_load_ittnotify();\n}}\n\n#endif /* _TBB_ITT_NOTIFY */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin32-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n{\nglobal:\n\n#define __TBB_SYMBOL( sym ) sym;\n#include \"lin32-tbb-export.lst\"\n\nlocal:\n\n/* TBB symbols */\n*3tbb*;\n*__TBB*;\n\n/* ITT symbols */\n__itt_*;\n\n/* Intel Compiler (libirc) symbols */\n__intel_*;\n_intel_*;\nget_memcpy_largest_cachelinesize;\nget_memcpy_largest_cache_size;\nget_mem_ops_method;\ninit_mem_ops_method;\nirc__get_msg;\nirc__print;\noverride_mem_ops_method;\nset_memcpy_largest_cachelinesize;\nset_memcpy_largest_cache_size;\n\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin32-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/* cache_aligned_allocator.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEjjPv )\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Ej )\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n/* task.cpp v3 */\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEj )\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEj )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEj )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEj )\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEij )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEi )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n/* task_v2.cpp */\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif /* !TBB_NO_LEGACY */\n\n/* Exception handling in task scheduler */\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEj )\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/* Symbols for exceptions thrown from TBB */\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n\n/* tbb_misc.cpp */\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n#if __TBB_x86_32\n__TBB_SYMBOL( __TBB_machine_store8_slow_perf_warning )\n__TBB_SYMBOL( __TBB_machine_store8_slow )\n#endif\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n/* tbb_main.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n#if __TBB_ITT_STRUCTURE_API\n__TBB_SYMBOL( _ZN3tbb8internal22itt_make_task_group_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE )\n__TBB_SYMBOL( _ZN3tbb8internal23itt_metadata_str_add_v7ENS0_15itt_domain_enumEPvyNS0_12string_indexEPKc )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_relation_add_v7ENS0_15itt_domain_enumEPvyNS0_12itt_relationES2_y )\n__TBB_SYMBOL( _ZN3tbb8internal17itt_task_begin_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE )\n__TBB_SYMBOL( _ZN3tbb8internal15itt_task_end_v7ENS0_15itt_domain_enumE )\n#endif\n\n/* pipeline.cpp */\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEj )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEjRNS_18task_group_contextE )\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n/* queuing_rw_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n\n/* reader_writer_lock.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n/* spin_rw_mutex.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n/* spin_rw_mutex v3 */\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE )\n\n/* spin_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n\n/* mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n/* recursive_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n/* QueuingMutex.cpp */\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n\n/* critical_section.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n/* concurrent_hash_map */\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n/* concurrent_queue.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEij )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Ej )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n/* concurrent_queue v3 */\n/* constructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Ej )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ej )\n/* destructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n/* typeinfo */\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n/* vtable */\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n/* methods */\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEij )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n#if !TBB_NO_LEGACY\n/* concurrent_vector.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_jPFvPvPKvjE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvjEb )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEjjPFvPvjE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEjjj )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEjRj )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEjjPFvPvjE )\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n/* concurrent_vector v3 */\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_jPFvPvPKvjE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvjE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEjjPFvPvPKvjES4_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEjjj )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEjRj )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEjjPFvPvPKvjES4_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEjPvPFvS2_jEPFvS2_PKvjE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEj )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEjjjPKvPFvPvjEPFvS4_S3_jE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEjjPFvPvPKvjES4_ )\n\n/* tbb_thread */\n#if __MINGW32__\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_ )\n#else\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ )\n#endif\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Ej )\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n\n#if __MINGW32__\n/* condition_variable */\n__TBB_SYMBOL( _ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE )\n#endif\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin64-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n{\nglobal:\n\n#define __TBB_SYMBOL( sym ) sym;\n#include \"lin64-tbb-export.lst\"\n\nlocal:\n\n/* TBB symbols */\n*3tbb*;\n*__TBB*;\n\n/* ITT symbols */\n__itt_*;\n\n/* Intel Compiler (libirc) symbols */\n__intel_*;\n_intel_*;\nget_msg_buf;\nget_text_buf;\nmessage_catalog;\nprint_buf;\nirc__get_msg;\nirc__print;\n\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin64-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/* cache_aligned_allocator.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv )\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n/* task.cpp v3 */\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm )\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm )\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n/* task_v2.cpp */\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif /* !TBB_NO_LEGACY */\n\n/* Exception handling in task scheduler */\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/* Symbols for exceptions thrown from TBB */\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n/* tbb_misc.cpp */\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n/* tbb_main.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n#if __TBB_ITT_STRUCTURE_API\n__TBB_SYMBOL( _ZN3tbb8internal23itt_metadata_str_add_v7ENS0_15itt_domain_enumEPvyNS0_12string_indexEPKc )\n__TBB_SYMBOL( _ZN3tbb8internal22itt_make_task_group_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE )\n__TBB_SYMBOL( _ZN3tbb8internal17itt_task_begin_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_relation_add_v7ENS0_15itt_domain_enumEPvyNS0_12itt_relationES2_y )\n__TBB_SYMBOL( _ZN3tbb8internal15itt_task_end_v7ENS0_15itt_domain_enumE )\n#endif\n\n/* pipeline.cpp */\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEm )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE )\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n/* queuing_rw_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n\n/* reader_writer_lock.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n/* spin_rw_mutex.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE )\n\n/* spin_rw_mutex v3 */\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n\n/* spin_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n\n/* mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n/* recursive_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n/* QueuingMutex.cpp */\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n\n/* critical_section.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n/* concurrent_hash_map */\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n/* concurrent_queue.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n/* concurrent_queue v3 */\n/* constructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em )\n/* destructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n/* typeinfo */\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n/* vtable */\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n/* methods */\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n#if !TBB_NO_LEGACY\n/* concurrent_vector.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE )\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n/* concurrent_vector v3 */\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ )\n\n/* tbb_thread */\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin64ipf-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n{\nglobal:\n\n#define __TBB_SYMBOL( sym ) sym;\n#include \"lin64ipf-tbb-export.lst\"\n\nlocal:\n\n/* TBB symbols */\n*3tbb*;\n*__TBB*;\n\n/* ITT symbols */\n__itt_*;\n\n/* Intel Compiler (libirc) symbols */\n__intel_*;\n_intel_*;\n?0_memcopyA;\n?0_memcopyDu;\n?0_memcpyD;\n?1__memcpy;\n?1__memmove;\n?1__serial_memmove;\nmemcpy;\nmemset;\n\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/lin64ipf-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/* cache_aligned_allocator.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv )\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n/* task.cpp v3 */\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm )\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm )\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n/* task_v2.cpp */\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif /* !TBB_NO_LEGACY */\n\n/* Exception handling in task scheduler */\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/* Symbols for exceptions thrown from TBB */\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n\n/* tbb_misc.cpp */\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n/* tbb_main.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n\n/* pipeline.cpp */\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEm )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE )\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n/* queuing_rw_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n\n/* reader_writer_lock.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n/* spin_rw_mutex.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n/* spin_rw_mutex v3 */\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n\n/* spin_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n\n/* mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n/* recursive_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n/* QueuingMutex.cpp */\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n\n/* critical_section.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n/* concurrent_hash_map */\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n/* concurrent_queue.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n/* concurrent_queue v3 */\n/* constructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em )\n/* destructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n/* typeinfo */\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n/* vtable */\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n/* methods */\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n#if !TBB_NO_LEGACY\n/* concurrent_vector.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE )\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n/* concurrent_vector v3 */\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ )\n\n/* tbb_thread */\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n\n/* asm functions */\n__TBB_SYMBOL( __TBB_machine_fetchadd1__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchadd2__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchadd4__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchadd8__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchstore1__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchstore2__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchstore4__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchstore8__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_fetchadd1acquire )\n__TBB_SYMBOL( __TBB_machine_fetchadd1release )\n__TBB_SYMBOL( __TBB_machine_fetchadd2acquire )\n__TBB_SYMBOL( __TBB_machine_fetchadd2release )\n__TBB_SYMBOL( __TBB_machine_fetchadd4acquire )\n__TBB_SYMBOL( __TBB_machine_fetchadd4release )\n__TBB_SYMBOL( __TBB_machine_fetchadd8acquire )\n__TBB_SYMBOL( __TBB_machine_fetchadd8release )\n__TBB_SYMBOL( __TBB_machine_fetchstore1acquire )\n__TBB_SYMBOL( __TBB_machine_fetchstore1release )\n__TBB_SYMBOL( __TBB_machine_fetchstore2acquire )\n__TBB_SYMBOL( __TBB_machine_fetchstore2release )\n__TBB_SYMBOL( __TBB_machine_fetchstore4acquire )\n__TBB_SYMBOL( __TBB_machine_fetchstore4release )\n__TBB_SYMBOL( __TBB_machine_fetchstore8acquire )\n__TBB_SYMBOL( __TBB_machine_fetchstore8release )\n__TBB_SYMBOL( __TBB_machine_cmpswp1acquire )\n__TBB_SYMBOL( __TBB_machine_cmpswp1release )\n__TBB_SYMBOL( __TBB_machine_cmpswp1__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_cmpswp2acquire )\n__TBB_SYMBOL( __TBB_machine_cmpswp2release )\n__TBB_SYMBOL( __TBB_machine_cmpswp2__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_cmpswp4acquire )\n__TBB_SYMBOL( __TBB_machine_cmpswp4release )\n__TBB_SYMBOL( __TBB_machine_cmpswp4__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_cmpswp8acquire )\n__TBB_SYMBOL( __TBB_machine_cmpswp8release )\n__TBB_SYMBOL( __TBB_machine_cmpswp8__TBB_full_fence )\n__TBB_SYMBOL( __TBB_machine_lg )\n__TBB_SYMBOL( __TBB_machine_lockbyte )\n__TBB_SYMBOL( __TBB_machine_pause )\n__TBB_SYMBOL( __TBB_machine_trylockbyte )\n__TBB_SYMBOL( __TBB_machine_load8_relaxed )\n__TBB_SYMBOL( __TBB_machine_store8_relaxed )\n__TBB_SYMBOL( __TBB_machine_load4_relaxed )\n__TBB_SYMBOL( __TBB_machine_store4_relaxed )\n__TBB_SYMBOL( __TBB_machine_load2_relaxed )\n__TBB_SYMBOL( __TBB_machine_store2_relaxed )\n__TBB_SYMBOL( __TBB_machine_load1_relaxed )\n__TBB_SYMBOL( __TBB_machine_store1_relaxed )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mac32-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#define __TBB_SYMBOL( sym ) _##sym\n#include \"mac32-tbb-export.lst\"\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mac32-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/*\n    Sometimes OS X* requires leading underscore (e. g. in export list file), but sometimes not\n    (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD\n    be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when\n    necessary, depending on the indended usage.\n*/\n\n// cache_aligned_allocator.cpp\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv )\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n// task.cpp v3\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm )\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm )\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n// task_v2.cpp\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif\n\n// Exception handling in task scheduler\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n// Symbols for exceptions thrown from TBB\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZNSt13runtime_errorD1Ev )\n__TBB_SYMBOL( _ZTISt13runtime_error )\n__TBB_SYMBOL( _ZTSSt13runtime_error )\n__TBB_SYMBOL( _ZNSt16invalid_argumentD1Ev )\n__TBB_SYMBOL( _ZTISt16invalid_argument )\n__TBB_SYMBOL( _ZTSSt16invalid_argument )\n__TBB_SYMBOL( _ZNSt11range_errorD1Ev )\n__TBB_SYMBOL( _ZTISt11range_error )\n__TBB_SYMBOL( _ZTSSt11range_error )\n__TBB_SYMBOL( _ZNSt12length_errorD1Ev )\n__TBB_SYMBOL( _ZTISt12length_error )\n__TBB_SYMBOL( _ZTSSt12length_error )\n__TBB_SYMBOL( _ZNSt12out_of_rangeD1Ev )\n__TBB_SYMBOL( _ZTISt12out_of_range )\n__TBB_SYMBOL( _ZTSSt12out_of_range )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n\n// tbb_misc.cpp\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n#if __TBB_x86_32\n__TBB_SYMBOL( __TBB_machine_store8_slow_perf_warning )\n__TBB_SYMBOL( __TBB_machine_store8_slow )\n#endif\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n// tbb_main.cpp\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n\n// pipeline.cpp\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEm )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE )\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n// queuing_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n\n// reader_writer_lock.cpp\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n// spin_rw_mutex.cpp v2\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n// spin_rw_mutex v3\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE )\n\n// spin_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n\n// mutex.cpp\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n// recursive_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n// queuing_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n\n// critical_section.cpp\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n// concurrent_hash_map\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n// concurrent_queue.cpp v2\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEim )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n// concurrent_queue v3\n// constructors\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em )\n// destructors\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n// typeinfo\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n// vtable\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n// methods\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEim )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n#if !TBB_NO_LEGACY\n// concurrent_vector.cpp v2\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE )\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n// concurrent_vector v3\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ )\n\n// tbb_thread\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mac64-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#define __TBB_SYMBOL( sym ) _##sym\n#include \"mac64-tbb-export.lst\"\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mac64-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/*\n    Sometimes OS X* requires leading underscore (e. g. in export list file), but sometimes not\n    (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD\n    be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when\n    necessary, depending on the indended usage.\n*/\n\n// cache_aligned_allocator.cpp\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv )\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n// task.cpp v3\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm )\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm )\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n// task_v2.cpp\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif\n\n// Exception handling in task scheduler\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm )\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n// Symbols for exceptions thrown from TBB\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZNSt13runtime_errorD1Ev )\n__TBB_SYMBOL( _ZTISt13runtime_error )\n__TBB_SYMBOL( _ZTSSt13runtime_error )\n__TBB_SYMBOL( _ZNSt16invalid_argumentD1Ev )\n__TBB_SYMBOL( _ZTISt16invalid_argument )\n__TBB_SYMBOL( _ZTSSt16invalid_argument )\n__TBB_SYMBOL( _ZNSt11range_errorD1Ev )\n__TBB_SYMBOL( _ZTISt11range_error )\n__TBB_SYMBOL( _ZTSSt11range_error )\n__TBB_SYMBOL( _ZNSt12length_errorD1Ev )\n__TBB_SYMBOL( _ZTISt12length_error )\n__TBB_SYMBOL( _ZTSSt12length_error )\n__TBB_SYMBOL( _ZNSt12out_of_rangeD1Ev )\n__TBB_SYMBOL( _ZTISt12out_of_range )\n__TBB_SYMBOL( _ZTSSt12out_of_range )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n\n\n// tbb_misc.cpp\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n// tbb_main.cpp\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n\n// pipeline.cpp\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEm )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE )\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n// queuing_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n\n// reader_writer_lock.cpp\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n// spin_rw_mutex.cpp v2\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n// spin_rw_mutex v3\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE )\n\n// spin_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n\n// mutex.cpp\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n// recursive_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n// queuing_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n\n// critical_section.cpp\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n// concurrent_hash_map\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n// concurrent_queue.cpp v2\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n// concurrent_queue v3\n// constructors\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em )\n// destructors\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n// typeinfo\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n// vtable\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n// methods\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n#if !TBB_NO_LEGACY\n// concurrent_vector.cpp v2\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE )\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n// concurrent_vector v3\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ )\n\n// tbb_thread\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em )\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/gcc_armv7.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/*\n    Platform isolation layer for the ARMv7-a architecture.\n*/\n\n#ifndef __TBB_machine_H\n#error Do not include this file directly; include tbb_machine.h instead\n#endif\n\n//TODO: is ARMv7 is the only version ever to support?\n#if !(__ARM_ARCH_7A__)\n#error compilation requires an ARMv7-a architecture.\n#endif\n\n#include <sys/param.h>\n#include <unistd.h>\n\n#define __TBB_WORDSIZE 4\n\n// Traditionally ARM is little-endian.\n// Note that, since only the layout of aligned 32-bit words is of interest,\n// any apparent PDP-endianness of 32-bit words at half-word alignment or\n// any little-endian ordering of big-endian 32-bit words in 64-bit quantities\n// may be disregarded for this setting.\n#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n#elif defined(__BYTE_ORDER__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED\n#else\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT\n#endif\n\n\n#define __TBB_compiler_fence()    __asm__ __volatile__(\"\": : :\"memory\")\n#define __TBB_full_memory_fence() __asm__ __volatile__(\"dmb ish\": : :\"memory\")\n#define __TBB_control_consistency_helper() __TBB_full_memory_fence()\n#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence()\n#define __TBB_release_consistency_helper() __TBB_full_memory_fence()\n\n//--------------------------------------------------\n// Compare and swap\n//--------------------------------------------------\n\n/**\n * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr\n * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand\n * @param value value to assign *ptr to if *ptr==comparand\n * @param comparand value to compare with *ptr\n * @return value originally in memory at ptr, regardless of success\n*/\nstatic inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand )\n{\n    int32_t oldval, res;\n\n    __TBB_full_memory_fence();\n\n    do {\n    __asm__ __volatile__(\n        \"ldrex      %1, [%3]\\n\"\n        \"mov        %0, #0\\n\"\n        \"cmp        %1, %4\\n\"\n        \"it         eq\\n\"\n        \"strexeq    %0, %5, [%3]\\n\"\n        : \"=&r\" (res), \"=&r\" (oldval), \"+Qo\" (*(volatile int32_t*)ptr)\n        : \"r\" ((int32_t *)ptr), \"Ir\" (comparand), \"r\" (value)\n        : \"cc\");\n    } while (res);\n\n    __TBB_full_memory_fence();\n\n    return oldval;\n}\n\n/**\n * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr\n * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand\n * @param value value to assign *ptr to if *ptr==comparand\n * @param comparand value to compare with *ptr\n * @return value originally in memory at ptr, regardless of success\n */\nstatic inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand )\n{\n    int64_t oldval;\n    int32_t res;\n\n    __TBB_full_memory_fence();\n\n    do {\n        __asm__ __volatile__(\n            \"mov        %0, #0\\n\"\n            \"ldrexd     %1, %H1, [%3]\\n\"\n            \"cmp        %1, %4\\n\"\n            \"it         eq\\n\"\n            \"cmpeq      %H1, %H4\\n\"\n            \"it         eq\\n\"\n            \"strexdeq   %0, %5, %H5, [%3]\"\n        : \"=&r\" (res), \"=&r\" (oldval), \"+Qo\" (*(volatile int64_t*)ptr)\n        : \"r\" ((int64_t *)ptr), \"r\" (comparand), \"r\" (value)\n        : \"cc\");\n    } while (res);\n\n    __TBB_full_memory_fence();\n\n    return oldval;\n}\n\nstatic inline int32_t __TBB_machine_fetchadd4(volatile void* ptr, int32_t addend)\n{\n    unsigned long tmp;\n    int32_t result, tmp2;\n\n    __TBB_full_memory_fence();\n\n    __asm__ __volatile__(\n\"1:     ldrex   %0, [%4]\\n\"\n\"       add     %3, %0, %5\\n\"\n\"       strex   %1, %3, [%4]\\n\"\n\"       cmp     %1, #0\\n\"\n\"       bne     1b\\n\"\n    : \"=&r\" (result), \"=&r\" (tmp), \"+Qo\" (*(volatile int32_t*)ptr), \"=&r\"(tmp2)\n    : \"r\" ((int32_t *)ptr), \"Ir\" (addend)\n    : \"cc\");\n\n    __TBB_full_memory_fence();\n\n    return result;\n}\n\nstatic inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend)\n{\n    unsigned long tmp;\n    int64_t result, tmp2;\n\n    __TBB_full_memory_fence();\n\n    __asm__ __volatile__(\n\"1:     ldrexd  %0, %H0, [%4]\\n\"\n\"       adds    %3, %0, %5\\n\"\n\"       adc     %H3, %H0, %H5\\n\"\n\"       strexd  %1, %3, %H3, [%4]\\n\"\n\"       cmp     %1, #0\\n\"\n\"       bne     1b\"\n    : \"=&r\" (result), \"=&r\" (tmp), \"+Qo\" (*(volatile int64_t*)ptr), \"=&r\"(tmp2)\n    : \"r\" ((int64_t *)ptr), \"r\" (addend)\n    : \"cc\");\n\n\n    __TBB_full_memory_fence();\n\n    return result;\n}\n\ninline void __TBB_machine_pause (int32_t delay )\n{\n    while(delay>0)\n    {\n\t__TBB_compiler_fence();\n        delay--;\n    }\n}\n\nnamespace tbb {\nnamespace internal {\n    template <typename T, size_t S>\n    struct machine_load_store_relaxed {\n        static inline T load ( const volatile T& location ) {\n            const T value = location;\n\n            /*\n            * An extra memory barrier is required for errata #761319\n            * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a\n            */\n            __TBB_acquire_consistency_helper();\n            return value;\n        }\n\n        static inline void store ( volatile T& location, T value ) {\n            location = value;\n        }\n    };\n}} // namespaces internal, tbb\n\n// Machine specific atomic operations\n\n#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)\n#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n\n// Use generics for some things\n#define __TBB_USE_GENERIC_PART_WORD_CAS                         1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD                   1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE                 1\n#define __TBB_USE_GENERIC_FETCH_STORE                           1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_DWORD_LOAD_STORE                      1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE     1\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/gcc_generic.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_gcc_generic_H\n\n#include <stdint.h>\n#include <unistd.h>\n\n#define __TBB_WORDSIZE      __SIZEOF_POINTER__\n\n#if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN\n    #define __TBB_64BIT_ATOMICS 0\n#endif\n\n/** FPU control setting not available for non-Intel architectures on Android **/\n#if __ANDROID__ && __TBB_generic_arch \n    #define __TBB_CPU_CTL_ENV_PRESENT 0\n#endif\n\n// __BYTE_ORDER__ is used in accordance with http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html,\n// but __BIG_ENDIAN__ or __LITTLE_ENDIAN__ may be more commonly found instead.\n#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n#elif defined(__BYTE_ORDER__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED\n#else\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT\n#endif\n\n/** As this generic implementation has absolutely no information about underlying\n    hardware, its performance most likely will be sub-optimal because of full memory\n    fence usages where a more lightweight synchronization means (or none at all)\n    could suffice. Thus if you use this header to enable TBB on a new platform,\n    consider forking it and relaxing below helpers as appropriate. **/\n#define __TBB_acquire_consistency_helper()  __sync_synchronize()\n#define __TBB_release_consistency_helper()  __sync_synchronize()\n#define __TBB_full_memory_fence()           __sync_synchronize()\n#define __TBB_control_consistency_helper()  __sync_synchronize()\n\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,T)                                                         \\\ninline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) {                    \\\n    return __sync_val_compare_and_swap(reinterpret_cast<volatile T *>(ptr),comparand,value);      \\\n}                                                                                                 \\\n                                                                                                  \\\ninline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) {                               \\\n    return __sync_fetch_and_add(reinterpret_cast<volatile T *>(ptr),value);                       \\\n}                                                                                                 \\\n\n__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t)\n__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t)\n__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t)\n__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t)\n\n#undef __TBB_MACHINE_DEFINE_ATOMICS\n\nnamespace tbb{ namespace internal { namespace gcc_builtins {\n    inline int clz(unsigned int x){ return __builtin_clz(x);};\n    inline int clz(unsigned long int x){ return __builtin_clzl(x);};\n    inline int clz(unsigned long long int x){ return __builtin_clzll(x);};\n}}}\n//gcc __builtin_clz builtin count _number_ of leading zeroes\nstatic inline intptr_t __TBB_machine_lg( uintptr_t x ) {\n    return sizeof(x)*8 - tbb::internal::gcc_builtins::clz(x) -1 ;\n}\n\nstatic inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) {\n    __sync_fetch_and_or(reinterpret_cast<volatile uintptr_t *>(ptr),addend);\n}\n\nstatic inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) {\n    __sync_fetch_and_and(reinterpret_cast<volatile uintptr_t *>(ptr),addend);\n}\n\n\ntypedef unsigned char __TBB_Flag;\n\ntypedef __TBB_atomic __TBB_Flag __TBB_atomic_flag;\n\ninline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) {\n    return __sync_lock_test_and_set(&flag,1)==0;\n}\n\ninline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) {\n    __sync_lock_release(&flag);\n}\n\n// Machine specific atomic operations\n#define __TBB_AtomicOR(P,V)     __TBB_machine_or(P,V)\n#define __TBB_AtomicAND(P,V)    __TBB_machine_and(P,V)\n\n#define __TBB_TryLockByte   __TBB_machine_try_lock_byte\n#define __TBB_UnlockByte    __TBB_machine_unlock_byte\n\n// Definition of other functions\n#define __TBB_Log2(V)           __TBB_machine_lg(V)\n\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#if __TBB_WORDSIZE==4\n    #define __TBB_USE_GENERIC_DWORD_LOAD_STORE              1\n#endif\n\n#if __TBB_x86_32 || __TBB_x86_64\n#include \"gcc_itsx.h\"\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/gcc_ia32_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_machine_gcc_ia32_common_H\n#define __TBB_machine_gcc_ia32_common_H\n\n//TODO: Add a higher-level function, e.g. tbb::interal::log2(), into tbb_stddef.h, which\n//uses __TBB_Log2 and contains the assert and remove the assert from here and all other\n//platform-specific headers.\n//TODO: Check if use of gcc intrinsic gives a better chance for cross call optimizations\ntemplate <typename T>\nstatic inline intptr_t __TBB_machine_lg( T x ) {\n    __TBB_ASSERT(x>0, \"The logarithm of a non-positive value is undefined.\");\n    uintptr_t j;\n    __asm__(\"bsr %1,%0\" : \"=r\"(j) : \"r\"((uintptr_t)x));\n    return j;\n}\n#define __TBB_Log2(V)  __TBB_machine_lg(V)\n\n#ifndef __TBB_Pause\n//TODO: check if raising a ratio of pause instructions to loop control instructions\n//(via e.g. loop unrolling) gives any benefit for HT.  E.g, the current implementation\n//does about 2 CPU-consuming instructions for every pause instruction.  Perhaps for\n//high pause counts it should use an unrolled loop to raise the ratio, and thus free\n//up more integer cycles for the other hyperthread.  On the other hand, if the loop is\n//unrolled too far, it won't fit in the core's loop cache, and thus take away\n//instruction decode slots from the other hyperthread.\n\n//TODO: check if use of gcc __builtin_ia32_pause intrinsic gives a \"some how\" better performing code\nstatic inline void __TBB_machine_pause( int32_t delay ) {\n    for (int32_t i = 0; i < delay; i++) {\n       __asm__ __volatile__(\"pause;\");\n    }\n    return;\n}\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n#endif /* !__TBB_Pause */\n\n// API to retrieve/update FPU control setting\n#ifndef __TBB_CPU_CTL_ENV_PRESENT\n#define __TBB_CPU_CTL_ENV_PRESENT 1\nnamespace tbb {\nnamespace internal {\nclass cpu_ctl_env {\nprivate:\n    int     mxcsr;\n    short   x87cw;\n    static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */\npublic:\n    bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; }\n    void get_env() {\n    #if __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN\n        cpu_ctl_env loc_ctl;\n        __asm__ __volatile__ (\n                \"stmxcsr %0\\n\\t\"\n                \"fstcw %1\"\n                : \"=m\"(loc_ctl.mxcsr), \"=m\"(loc_ctl.x87cw)\n        );\n        *this = loc_ctl;\n    #else\n        __asm__ __volatile__ (\n                \"stmxcsr %0\\n\\t\"\n                \"fstcw %1\"\n                : \"=m\"(mxcsr), \"=m\"(x87cw)\n        );\n    #endif\n        mxcsr &= MXCSR_CONTROL_MASK;\n    }\n    void set_env() const {\n        __asm__ __volatile__ (\n                \"ldmxcsr %0\\n\\t\"\n                \"fldcw %1\"\n                : : \"m\"(mxcsr), \"m\"(x87cw)\n        );\n    }\n};\n} // namespace internal\n} // namespace tbb\n#endif /* !__TBB_CPU_CTL_ENV_PRESENT */\n\n#include \"gcc_itsx.h\"\n\n#endif /* __TBB_machine_gcc_ia32_common_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/gcc_itsx.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_itsx_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_gcc_itsx_H\n\n#define __TBB_OP_XACQUIRE 0xF2\n#define __TBB_OP_XRELEASE 0xF3\n#define __TBB_OP_LOCK     0xF0\n\n#define __TBB_STRINGIZE_INTERNAL(arg) #arg\n#define __TBB_STRINGIZE(arg) __TBB_STRINGIZE_INTERNAL(arg)\n\n#ifdef __TBB_x86_64\n#define __TBB_r_out \"=r\"\n#else\n#define __TBB_r_out \"=q\"\n#endif\n\ninline static uint8_t __TBB_machine_try_lock_elided( volatile uint8_t* lk )\n{\n    uint8_t value = 1;\n    __asm__ volatile (\".byte \" __TBB_STRINGIZE(__TBB_OP_XACQUIRE)\"; lock; xchgb %0, %1;\"\n                      : __TBB_r_out(value), \"=m\"(*lk)  : \"0\"(value), \"m\"(*lk) : \"memory\" );\n    return uint8_t(value^1);\n}\n\ninline static void __TBB_machine_try_lock_elided_cancel()\n{\n    // 'pause' instruction aborts HLE/RTM transactions\n    __asm__ volatile (\"pause\\n\" : : : \"memory\" );\n}\n\ninline static void __TBB_machine_unlock_elided( volatile uint8_t* lk )\n{\n    __asm__ volatile (\".byte \" __TBB_STRINGIZE(__TBB_OP_XRELEASE)\"; movb $0, %0\" \n                      : \"=m\"(*lk) : \"m\"(*lk) : \"memory\" );\n}\n\n#if __TBB_TSX_INTRINSICS_PRESENT\n#include <immintrin.h>\n\n#define __TBB_machine_is_in_transaction _xtest\n#define __TBB_machine_begin_transaction _xbegin\n#define __TBB_machine_end_transaction   _xend\n#define __TBB_machine_transaction_conflict_abort() _xabort(0xff)\n\n#else\n\n/*!\n * Check if the instruction is executed in a transaction or not\n */\ninline static bool __TBB_machine_is_in_transaction()\n{\n    int8_t res = 0;\n#if __TBB_x86_32\n    __asm__ volatile (\".byte 0x0F; .byte 0x01; .byte 0xD6;\\n\"\n                      \"setz %0\" : \"=q\"(res) : : \"memory\" );\n#else\n    __asm__ volatile (\".byte 0x0F; .byte 0x01; .byte 0xD6;\\n\"\n                      \"setz %0\" : \"=r\"(res) : : \"memory\" );\n#endif\n    return res==0;\n}\n\n/*!\n * Enter speculative execution mode.\n * @return -1 on success\n *         abort cause ( or 0 ) on abort\n */\ninline static uint32_t __TBB_machine_begin_transaction()\n{\n    uint32_t res = ~uint32_t(0);   // success value\n    __asm__ volatile (\"1: .byte  0xC7; .byte 0xF8;\\n\"           //  XBEGIN <abort-offset>\n                      \"   .long  2f-1b-6\\n\"                     //  2f-1b == difference in addresses of start \n                                                                //  of XBEGIN and the MOVL\n                                                                //  2f - 1b - 6 == that difference minus the size of the\n                                                                //  XBEGIN instruction.  This is the abort offset to\n                                                                //  2: below.\n                      \"    jmp   3f\\n\"                          //  success (leave -1 in res)\n                      \"2:  movl  %%eax,%0\\n\"                    //  store failure code in res\n                      \"3:\"\n                      :\"=r\"(res):\"0\"(res):\"memory\",\"%eax\");\n    return res;\n}\n\n/*!\n * Attempt to commit/end transaction \n */\ninline static void __TBB_machine_end_transaction()\n{\n    __asm__ volatile (\".byte 0x0F; .byte 0x01; .byte 0xD5\" :::\"memory\");   // XEND\n}\n\n/*\n * aborts with code 0xFF (lock already held)\n */\ninline static void __TBB_machine_transaction_conflict_abort()\n{\n    __asm__ volatile (\".byte 0xC6; .byte 0xF8; .byte 0xFF\" :::\"memory\");\n}\n\n#endif /* __TBB_TSX_INTRINSICS_PRESENT */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/ibm_aix51.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// TODO: revise by comparing with mac_ppc.h\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_ibm_aix51_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_ibm_aix51_H\n\n#define __TBB_WORDSIZE 8\n#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG // assumption based on operating system\n\n#include <stdint.h>\n#include <unistd.h>\n#include <sched.h>\n\nextern \"C\" {\nint32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand);\nint64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand);\nvoid __TBB_machine_flush ();\nvoid __TBB_machine_lwsync ();\nvoid __TBB_machine_isync ();\n}\n\n// Mapping of old entry point names retained for the sake of backward binary compatibility\n#define __TBB_machine_cmpswp4 __TBB_machine_cas_32\n#define __TBB_machine_cmpswp8 __TBB_machine_cas_64\n\n#define __TBB_Yield() sched_yield()\n\n#define __TBB_USE_GENERIC_PART_WORD_CAS                     1\n#define __TBB_USE_GENERIC_FETCH_ADD                         1\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#if __GNUC__\n    #define __TBB_control_consistency_helper() __asm__ __volatile__( \"isync\": : :\"memory\")\n    #define __TBB_acquire_consistency_helper() __asm__ __volatile__(\"lwsync\": : :\"memory\")\n    #define __TBB_release_consistency_helper() __asm__ __volatile__(\"lwsync\": : :\"memory\")\n    #define __TBB_full_memory_fence()          __asm__ __volatile__(  \"sync\": : :\"memory\")\n#else\n    // IBM C++ Compiler does not support inline assembly\n    // TODO: Since XL 9.0 or earlier GCC syntax is supported. Replace with more\n    //       lightweight implementation (like in mac_ppc.h)\n    #define __TBB_control_consistency_helper() __TBB_machine_isync ()\n    #define __TBB_acquire_consistency_helper() __TBB_machine_lwsync ()\n    #define __TBB_release_consistency_helper() __TBB_machine_lwsync ()\n    #define __TBB_full_memory_fence()          __TBB_machine_flush ()\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/icc_generic.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT\n    #error \"Intel C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port\"\n#endif\n\n#define __TBB_machine_icc_generic_H\n\n//ICC mimics the \"native\" target compiler\n#if _MSC_VER\n    #include \"msvc_ia32_common.h\"\n#else\n    #include \"gcc_ia32_common.h\"\n#endif\n\n//TODO: Make __TBB_WORDSIZE macro optional for ICC intrinsics port.\n//As compiler intrinsics are used for all the operations it is possible to do.\n\n#if __TBB_x86_32\n    #define __TBB_WORDSIZE 4\n#else\n    #define __TBB_WORDSIZE 8\n#endif\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n//__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else\n#if _MSC_VER\n    //TODO: any way to use same intrinsics on windows and linux?\n    #pragma intrinsic(_ReadWriteBarrier)\n    #define __TBB_compiler_fence()    _ReadWriteBarrier()\n#else\n    #define __TBB_compiler_fence()    __asm__ __volatile__(\"\": : :\"memory\")\n#endif\n\n#ifndef __TBB_full_memory_fence\n#if _MSC_VER \n    //TODO: any way to use same intrinsics on windows and linux?\n    #pragma intrinsic(_mm_mfence)\n    #define __TBB_full_memory_fence() _mm_mfence()\n#else\n    #define __TBB_full_memory_fence() __asm__ __volatile__(\"mfence\": : :\"memory\")\n#endif\n#endif\n\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n\nnamespace tbb { namespace internal {\n//TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste.\n//however it seems unlikely that ICC will silently change exact enum values, as they are defined\n//in the ISO exactly like this.\n//TODO: add test that exact values of the enum are same as in the ISO C++11\ntypedef enum memory_order {\n    memory_order_relaxed, memory_order_consume, memory_order_acquire,\n    memory_order_release, memory_order_acq_rel, memory_order_seq_cst\n} memory_order;\n\nnamespace icc_intrinsics_port {\n    template <typename T>\n    T convert_argument(T value){\n        return value;\n    }\n    //The overload below is needed to have explicit conversion of pointer to void* in argument list.\n    //compiler bug?\n    //TODO: add according broken macro and recheck with ICC 13.0 if the overload is still needed\n    template <typename T>\n    void* convert_argument(T* value){\n        return (void*)value;\n    }\n}\n//TODO: code below is a bit repetitive, consider simplifying it\ntemplate <typename T, size_t S>\nstruct machine_load_store {\n    static T load_with_acquire ( const volatile T& location ) {\n        return __atomic_load_explicit(&location, memory_order_acquire);\n    }\n    static void store_with_release ( volatile T &location, T value ) {\n        __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);\n    }\n};\n\ntemplate <typename T, size_t S>\nstruct machine_load_store_relaxed {\n    static inline T load ( const T& location ) {\n        return __atomic_load_explicit(&location, memory_order_relaxed);\n    }\n    static inline void store (  T& location, T value ) {\n        __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);\n    }\n};\n\ntemplate <typename T, size_t S>\nstruct machine_load_store_seq_cst {\n    static T load ( const volatile T& location ) {\n        return __atomic_load_explicit(&location, memory_order_seq_cst);\n    }\n\n    static void store ( volatile T &location, T value ) {\n        __atomic_store_explicit(&location, value, memory_order_seq_cst);\n    }\n};\n\n}} // namespace tbb::internal\n\nnamespace tbb{ namespace internal { namespace icc_intrinsics_port{\n    typedef enum memory_order_map {\n        relaxed = memory_order_relaxed,\n        acquire = memory_order_acquire,\n        release = memory_order_release,\n        full_fence=  memory_order_seq_cst\n    } memory_order_map;\n}}}// namespace tbb::internal\n\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M)                                                     \\\ninline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) {               \\\n    __atomic_compare_exchange_strong_explicit(                                                  \\\n            (T*)ptr                                                                             \\\n            ,&comparand                                                                         \\\n            ,value                                                                              \\\n            , tbb::internal::icc_intrinsics_port::M                                             \\\n            , tbb::internal::icc_intrinsics_port::M);                                           \\\n    return comparand;                                                                           \\\n}                                                                                               \\\n                                                                                                \\\ninline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) {                          \\\n    return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M);   \\\n}                                                                                               \\\n                                                                                                \\\ninline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) {                            \\\n    return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M);  \\\n}                                                                                               \\\n\n__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, full_fence)\n__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, acquire)\n__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, release)\n__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, relaxed)\n\n__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, full_fence)\n__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, acquire)\n__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, release)\n__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, relaxed)\n\n__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, full_fence)\n__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, acquire)\n__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, release)\n__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, relaxed)\n\n__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, full_fence)\n__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, acquire)\n__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, release)\n__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, relaxed)\n\n\n#undef __TBB_MACHINE_DEFINE_ATOMICS\n\n#define __TBB_USE_FENCED_ATOMICS                            1\n\nnamespace tbb { namespace internal {\n#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN\n__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence)\n__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence)\n\n__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(acquire)\n__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(release)\n\n__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(relaxed)\n__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(relaxed)\n\ntemplate <typename T>\nstruct machine_load_store<T,8> {\n    static T load_with_acquire ( const volatile T& location ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            return __atomic_load_explicit(&location, memory_order_acquire);\n        } else {\n            return __TBB_machine_generic_load8acquire(&location);\n        }\n    }\n    static void store_with_release ( volatile T &location, T value ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);\n        } else {\n            return __TBB_machine_generic_store8release(&location,value);\n        }\n    }\n};\n\ntemplate <typename T>\nstruct machine_load_store_relaxed<T,8> {\n    static T load( const volatile T& location ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            return __atomic_load_explicit(&location, memory_order_relaxed);\n        } else {\n            return __TBB_machine_generic_load8relaxed(&location);\n        }\n    }\n    static void store( volatile T &location, T value ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);\n        } else {\n            return __TBB_machine_generic_store8relaxed(&location,value);\n        }\n    }\n};\n\ntemplate <typename T >\nstruct machine_load_store_seq_cst<T,8> {\n    static T load ( const volatile T& location ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            return __atomic_load_explicit(&location, memory_order_seq_cst);\n        } else {\n            return __TBB_machine_generic_load8full_fence(&location);\n        }\n\n    }\n\n    static void store ( volatile T &location, T value ) {\n        if( tbb::internal::is_aligned(&location,8)) {\n            __atomic_store_explicit(&location, value, memory_order_seq_cst);\n        } else {\n            return __TBB_machine_generic_store8full_fence(&location,value);\n        }\n\n    }\n};\n\n#endif\n}} // namespace tbb::internal\ntemplate <typename T>\ninline void __TBB_machine_OR( T *operand, T addend ) {\n    __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst);\n}\n\ntemplate <typename T>\ninline void __TBB_machine_AND( T *operand, T addend ) {\n    __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst);\n}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/linux_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_machine_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#include <sched.h>\n#define __TBB_Yield()  sched_yield()\n\n#include <unistd.h>\n/* Futex definitions */\n#include <sys/syscall.h>\n\n#if defined(SYS_futex)\n\n#define __TBB_USE_FUTEX 1\n#include <limits.h>\n#include <errno.h>\n// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE.\n\n#ifdef FUTEX_WAIT\n#define __TBB_FUTEX_WAIT FUTEX_WAIT\n#else\n#define __TBB_FUTEX_WAIT 0\n#endif\n\n#ifdef FUTEX_WAKE\n#define __TBB_FUTEX_WAKE FUTEX_WAKE\n#else\n#define __TBB_FUTEX_WAKE 1\n#endif\n\n#ifndef __TBB_ASSERT\n#error machine specific headers must be included after tbb_stddef.h\n#endif\n\nnamespace tbb {\n\nnamespace internal {\n\ninline int futex_wait( void *futex, int comparand ) {\n    int r = syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 );\n#if TBB_USE_ASSERT\n    int e = errno;\n    __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), \"futex_wait failed.\" );\n#endif /* TBB_USE_ASSERT */\n    return r;\n}\n\ninline int futex_wakeup_one( void *futex ) {\n    int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 );\n    __TBB_ASSERT( r==0||r==1, \"futex_wakeup_one: more than one thread woken up?\" );\n    return r;\n}\n\ninline int futex_wakeup_all( void *futex ) {\n    int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 );\n    __TBB_ASSERT( r>=0, \"futex_wakeup_all: error in waking up threads\" );\n    return r;\n}\n\n} /* namespace internal */\n\n} /* namespace tbb */\n\n#endif /* SYS_futex */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/linux_ia32.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_linux_ia32_H\n\n#include <stdint.h>\n#include \"gcc_ia32_common.h\"\n\n#define __TBB_WORDSIZE 4\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n#define __TBB_compiler_fence() __asm__ __volatile__(\"\": : :\"memory\")\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n#define __TBB_full_memory_fence()          __asm__ __volatile__(\"mfence\": : :\"memory\")\n\n#if __TBB_ICC_ASM_VOLATILE_BROKEN\n#define __TBB_VOLATILE\n#else\n#define __TBB_VOLATILE volatile\n#endif\n\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R)                                        \\\nstatic inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand )  \\\n{                                                                                    \\\n    T result;                                                                        \\\n                                                                                     \\\n    __asm__ __volatile__(\"lock\\ncmpxchg\" X \" %2,%1\"                                  \\\n                          : \"=a\"(result), \"=m\"(*(__TBB_VOLATILE T*)ptr)              \\\n                          : \"q\"(value), \"0\"(comparand), \"m\"(*(__TBB_VOLATILE T*)ptr) \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n                                                                                     \\\nstatic inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend)              \\\n{                                                                                    \\\n    T result;                                                                        \\\n    __asm__ __volatile__(\"lock\\nxadd\" X \" %0,%1\"                                     \\\n                          : R (result), \"=m\"(*(__TBB_VOLATILE T*)ptr)                \\\n                          : \"0\"(addend), \"m\"(*(__TBB_VOLATILE T*)ptr)                \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n                                                                                     \\\nstatic inline  T __TBB_machine_fetchstore##S(volatile void *ptr, T value)            \\\n{                                                                                    \\\n    T result;                                                                        \\\n    __asm__ __volatile__(\"lock\\nxchg\" X \" %0,%1\"                                     \\\n                          : R (result), \"=m\"(*(__TBB_VOLATILE T*)ptr)                \\\n                          : \"0\"(value), \"m\"(*(__TBB_VOLATILE T*)ptr)                 \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n                                                                                     \n__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,\"\",\"=q\")\n__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,\"\",\"=r\")\n__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,\"l\",\"=r\")\n\n#if __INTEL_COMPILER\n#pragma warning( push )\n// reference to EBX in a function requiring stack alignment\n#pragma warning( disable: 998 )\n#endif\n\n#if __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN\n#define  __TBB_IA32_CAS8_NOINLINE  __attribute__ ((noinline))\n#else\n#define  __TBB_IA32_CAS8_NOINLINE\n#endif\n\nstatic inline __TBB_IA32_CAS8_NOINLINE int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )  {\n//TODO: remove the extra part of condition once __TBB_GCC_BUILTIN_ATOMICS_PRESENT is lowered to gcc version 4.1.2\n#if (__TBB_GCC_BUILTIN_ATOMICS_PRESENT || (__TBB_GCC_VERSION >= 40102)) && !__TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN\n    return __sync_val_compare_and_swap( reinterpret_cast<volatile int64_t*>(ptr), comparand, value );\n#else /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */\n    //TODO: look like ICC 13.0 has some issues with this code, investigate it more deeply\n    int64_t result;\n    union {\n        int64_t i64;\n        int32_t i32[2];\n    };\n    i64 = value;\n#if __PIC__ \n    /* compiling position-independent code */\n    // EBX register preserved for compliance with position-independent code rules on IA32\n    int32_t tmp;\n    __asm__ __volatile__ (\n            \"movl  %%ebx,%2\\n\\t\"\n            \"movl  %5,%%ebx\\n\\t\"\n#if __GNUC__==3\n            \"lock\\n\\t cmpxchg8b %1\\n\\t\"\n#else\n            \"lock\\n\\t cmpxchg8b (%3)\\n\\t\"\n#endif\n            \"movl  %2,%%ebx\"\n             : \"=A\"(result)\n             , \"=m\"(*(__TBB_VOLATILE int64_t *)ptr)\n             , \"=m\"(tmp)\n#if __GNUC__==3\n             : \"m\"(*(__TBB_VOLATILE int64_t *)ptr)\n#else\n             : \"SD\"(ptr)\n#endif\n             , \"0\"(comparand)\n             , \"m\"(i32[0]), \"c\"(i32[1])\n             : \"memory\"\n#if __INTEL_COMPILER\n             ,\"ebx\"\n#endif\n    );\n#else /* !__PIC__ */\n    __asm__ __volatile__ (\n            \"lock\\n\\t cmpxchg8b %1\\n\\t\"\n             : \"=A\"(result), \"=m\"(*(__TBB_VOLATILE int64_t *)ptr)\n             : \"m\"(*(__TBB_VOLATILE int64_t *)ptr)\n             , \"0\"(comparand)\n             , \"b\"(i32[0]), \"c\"(i32[1])\n             : \"memory\"\n    );\n#endif /* __PIC__ */\n    return result;\n#endif /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */\n}\n\n#undef __TBB_IA32_CAS8_NOINLINE\n\n#if __INTEL_COMPILER\n#pragma warning( pop )\n#endif // warning 998 is back\n\nstatic inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) {\n    __asm__ __volatile__(\"lock\\norl %1,%0\" : \"=m\"(*(__TBB_VOLATILE uint32_t *)ptr) : \"r\"(addend), \"m\"(*(__TBB_VOLATILE uint32_t *)ptr) : \"memory\");\n}\n\nstatic inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) {\n    __asm__ __volatile__(\"lock\\nandl %1,%0\" : \"=m\"(*(__TBB_VOLATILE uint32_t *)ptr) : \"r\"(addend), \"m\"(*(__TBB_VOLATILE uint32_t *)ptr) : \"memory\");\n}\n\n//TODO: Check if it possible and profitable for IA-32 architecture on (Linux* and Windows*)\n//to use of 64-bit load/store via floating point registers together with full fence\n//for sequentially consistent load/store, instead of CAS.\n\n#if __clang__\n#define __TBB_fildq  \"fildll\"\n#define __TBB_fistpq \"fistpll\"\n#else\n#define __TBB_fildq  \"fildq\"\n#define __TBB_fistpq \"fistpq\"\n#endif\n\nstatic inline int64_t __TBB_machine_aligned_load8 (const volatile void *ptr) {\n    __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),\"__TBB_machine_aligned_load8 should be used with 8 byte aligned locations only \\n\");\n    int64_t result;\n    __asm__ __volatile__ ( __TBB_fildq  \" %1\\n\\t\"\n                           __TBB_fistpq \" %0\" :  \"=m\"(result) : \"m\"(*(const __TBB_VOLATILE uint64_t*)ptr) : \"memory\" );\n    return result;\n}\n\nstatic inline void __TBB_machine_aligned_store8 (volatile void *ptr, int64_t value ) {\n    __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),\"__TBB_machine_aligned_store8 should be used with 8 byte aligned locations only \\n\");\n    // Aligned store\n    __asm__ __volatile__ ( __TBB_fildq  \" %1\\n\\t\"\n                           __TBB_fistpq \" %0\" :  \"=m\"(*(__TBB_VOLATILE int64_t*)ptr) : \"m\"(value) : \"memory\" );\n}\n\nstatic inline int64_t __TBB_machine_load8 (const volatile void *ptr) {\n#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN\n    if( tbb::internal::is_aligned(ptr,8)) {\n#endif\n        return __TBB_machine_aligned_load8(ptr);\n#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN\n    } else {\n        // Unaligned load\n        return __TBB_machine_cmpswp8(const_cast<void*>(ptr),0,0);\n    }\n#endif\n}\n\n//! Handles misaligned 8-byte store\n/** Defined in tbb_misc.cpp */\nextern \"C\" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value );\nextern \"C\" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr );\n\nstatic inline void __TBB_machine_store8(volatile void *ptr, int64_t value) {\n#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN\n    if( tbb::internal::is_aligned(ptr,8)) {\n#endif\n        __TBB_machine_aligned_store8(ptr,value);\n#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN\n    } else {\n        // Unaligned store\n#if TBB_USE_PERFORMANCE_WARNINGS\n        __TBB_machine_store8_slow_perf_warning(ptr);\n#endif /* TBB_USE_PERFORMANCE_WARNINGS */\n        __TBB_machine_store8_slow(ptr,value);\n    }\n#endif\n}\n \n// Machine specific atomic operations\n#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)\n#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)\n\n#define __TBB_USE_GENERIC_DWORD_FETCH_ADD                   1\n#define __TBB_USE_GENERIC_DWORD_FETCH_STORE                 1\n#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE           1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/linux_ia64.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia64_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_linux_ia64_H\n\n#include <stdint.h>\n#include <ia64intrin.h>\n\n#define __TBB_WORDSIZE 8\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n#if __INTEL_COMPILER\n    #define __TBB_compiler_fence()\n    #define __TBB_control_consistency_helper() __TBB_compiler_fence()\n    #define __TBB_acquire_consistency_helper()\n    #define __TBB_release_consistency_helper()\n    #define __TBB_full_memory_fence()          __mf()\n#else\n    #define __TBB_compiler_fence() __asm__ __volatile__(\"\": : :\"memory\")\n    #define __TBB_control_consistency_helper() __TBB_compiler_fence()\n    // Even though GCC imbues volatile loads with acquire semantics, it sometimes moves \n    // loads over the acquire fence. The following helpers stop such incorrect code motion.\n    #define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n    #define __TBB_release_consistency_helper() __TBB_compiler_fence()\n    #define __TBB_full_memory_fence()          __asm__ __volatile__(\"mf\": : :\"memory\")\n#endif /* !__INTEL_COMPILER */\n\n// Most of the functions will be in a .s file\n// TODO: revise dynamic_link, memory pools and etc. if the library dependency is removed.\n\nextern \"C\" {\n    int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend);\n    int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend);\n    int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend);\n\n    int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend);\n    int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend);\n    int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend);\n\n    int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value);\n    int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend);\n    int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend);\n\n    int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value);\n    int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend);\n    int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend);\n\n    int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value);\n    int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value);\n    int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value);\n\n    int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value);\n    int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value);\n    int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value);\n\n    int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value);\n    int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value);\n    int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value);\n\n    int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value);\n    int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value);\n    int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value);\n\n    int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); \n    int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); \n    int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); \n\n    int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand);\n    int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); \n    int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); \n\n    int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand);\n    int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); \n    int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); \n\n    int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand);\n    int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); \n    int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); \n\n    int64_t __TBB_machine_lg(uint64_t value);\n    void __TBB_machine_pause(int32_t delay);\n    bool __TBB_machine_trylockbyte( volatile unsigned char &ptr );\n    int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr );\n\n    //! Retrieves the current RSE backing store pointer. IA64 specific.\n    void* __TBB_get_bsp();\n\n    int32_t __TBB_machine_load1_relaxed(const void *ptr);\n    int32_t __TBB_machine_load2_relaxed(const void *ptr);\n    int32_t __TBB_machine_load4_relaxed(const void *ptr);\n    int64_t __TBB_machine_load8_relaxed(const void *ptr);\n\n    void __TBB_machine_store1_relaxed(void *ptr, int32_t value);\n    void __TBB_machine_store2_relaxed(void *ptr, int32_t value);\n    void __TBB_machine_store4_relaxed(void *ptr, int32_t value);\n    void __TBB_machine_store8_relaxed(void *ptr, int64_t value);\n} // extern \"C\"\n\n// Mapping old entry points to the names corresponding to the new full_fence identifier.\n#define __TBB_machine_fetchadd1full_fence   __TBB_machine_fetchadd1__TBB_full_fence\n#define __TBB_machine_fetchadd2full_fence   __TBB_machine_fetchadd2__TBB_full_fence\n#define __TBB_machine_fetchadd4full_fence   __TBB_machine_fetchadd4__TBB_full_fence\n#define __TBB_machine_fetchadd8full_fence   __TBB_machine_fetchadd8__TBB_full_fence\n#define __TBB_machine_fetchstore1full_fence __TBB_machine_fetchstore1__TBB_full_fence\n#define __TBB_machine_fetchstore2full_fence __TBB_machine_fetchstore2__TBB_full_fence\n#define __TBB_machine_fetchstore4full_fence __TBB_machine_fetchstore4__TBB_full_fence\n#define __TBB_machine_fetchstore8full_fence __TBB_machine_fetchstore8__TBB_full_fence\n#define __TBB_machine_cmpswp1full_fence     __TBB_machine_cmpswp1__TBB_full_fence\n#define __TBB_machine_cmpswp2full_fence     __TBB_machine_cmpswp2__TBB_full_fence \n#define __TBB_machine_cmpswp4full_fence     __TBB_machine_cmpswp4__TBB_full_fence\n#define __TBB_machine_cmpswp8full_fence     __TBB_machine_cmpswp8__TBB_full_fence\n\n// Mapping relaxed operations to the entry points implementing them.\n/** On IA64 RMW operations implicitly have acquire semantics. Thus one cannot\n    actually have completely relaxed RMW operation here. **/\n#define __TBB_machine_fetchadd1relaxed      __TBB_machine_fetchadd1acquire\n#define __TBB_machine_fetchadd2relaxed      __TBB_machine_fetchadd2acquire\n#define __TBB_machine_fetchadd4relaxed      __TBB_machine_fetchadd4acquire\n#define __TBB_machine_fetchadd8relaxed      __TBB_machine_fetchadd8acquire\n#define __TBB_machine_fetchstore1relaxed    __TBB_machine_fetchstore1acquire\n#define __TBB_machine_fetchstore2relaxed    __TBB_machine_fetchstore2acquire\n#define __TBB_machine_fetchstore4relaxed    __TBB_machine_fetchstore4acquire\n#define __TBB_machine_fetchstore8relaxed    __TBB_machine_fetchstore8acquire\n#define __TBB_machine_cmpswp1relaxed        __TBB_machine_cmpswp1acquire\n#define __TBB_machine_cmpswp2relaxed        __TBB_machine_cmpswp2acquire \n#define __TBB_machine_cmpswp4relaxed        __TBB_machine_cmpswp4acquire\n#define __TBB_machine_cmpswp8relaxed        __TBB_machine_cmpswp8acquire\n\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,V)                               \\\n    template <typename T>                                               \\\n    struct machine_load_store_relaxed<T,S> {                      \\\n        static inline T load ( const T& location ) {                    \\\n            return (T)__TBB_machine_load##S##_relaxed(&location);       \\\n        }                                                               \\\n        static inline void store ( T& location, T value ) {             \\\n            __TBB_machine_store##S##_relaxed(&location, (V)value);      \\\n        }                                                               \\\n    }\n\nnamespace tbb {\nnamespace internal {\n    __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t);\n    __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t);\n    __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t);\n    __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t);\n}} // namespaces internal, tbb\n\n#undef __TBB_MACHINE_DEFINE_ATOMICS\n\n#define __TBB_USE_FENCED_ATOMICS                            1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n// Definition of Lock functions\n#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P)\n#define __TBB_LockByte(P)    __TBB_machine_lockbyte(P)\n\n// Definition of other utility functions\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n#define __TBB_Log2(V)  __TBB_machine_lg(V)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/linux_intel64.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_linux_intel64_H\n\n#include <stdint.h>\n#include \"gcc_ia32_common.h\"\n\n#define __TBB_WORDSIZE 8\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n#define __TBB_compiler_fence() __asm__ __volatile__(\"\": : :\"memory\")\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n\n#ifndef __TBB_full_memory_fence\n#define __TBB_full_memory_fence() __asm__ __volatile__(\"mfence\": : :\"memory\")\n#endif\n\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X)                                          \\\nstatic inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand )  \\\n{                                                                                    \\\n    T result;                                                                        \\\n                                                                                     \\\n    __asm__ __volatile__(\"lock\\ncmpxchg\" X \" %2,%1\"                                  \\\n                          : \"=a\"(result), \"=m\"(*(volatile T*)ptr)                    \\\n                          : \"q\"(value), \"0\"(comparand), \"m\"(*(volatile T*)ptr)       \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n                                                                                     \\\nstatic inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend)              \\\n{                                                                                    \\\n    T result;                                                                        \\\n    __asm__ __volatile__(\"lock\\nxadd\" X \" %0,%1\"                                     \\\n                          : \"=r\"(result),\"=m\"(*(volatile T*)ptr)                     \\\n                          : \"0\"(addend), \"m\"(*(volatile T*)ptr)                      \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n                                                                                     \\\nstatic inline  T __TBB_machine_fetchstore##S(volatile void *ptr, T value)            \\\n{                                                                                    \\\n    T result;                                                                        \\\n    __asm__ __volatile__(\"lock\\nxchg\" X \" %0,%1\"                                     \\\n                          : \"=r\"(result),\"=m\"(*(volatile T*)ptr)                     \\\n                          : \"0\"(value), \"m\"(*(volatile T*)ptr)                       \\\n                          : \"memory\");                                               \\\n    return result;                                                                   \\\n}                                                                                    \\\n\n__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,\"\")\n__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,\"\")\n__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,\"\")\n__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,\"q\")\n\n#undef __TBB_MACHINE_DEFINE_ATOMICS\n\nstatic inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) {\n    __asm__ __volatile__(\"lock\\norq %1,%0\" : \"=m\"(*(volatile uint64_t*)ptr) : \"r\"(value), \"m\"(*(volatile uint64_t*)ptr) : \"memory\");\n}\n\nstatic inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) {\n    __asm__ __volatile__(\"lock\\nandq %1,%0\" : \"=m\"(*(volatile uint64_t*)ptr) : \"r\"(value), \"m\"(*(volatile uint64_t*)ptr) : \"memory\");\n}\n\n#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)\n#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)\n\n#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE           1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/mac_ppc.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_power_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_gcc_power_H\n\n#include <stdint.h>\n#include <unistd.h>\n\n// TODO: rename to gcc_power.h?\n// This file is for Power Architecture with compilers supporting GNU inline-assembler syntax (currently GNU g++ and IBM XL).\n// Note that XL V9.0 (sometimes?) has trouble dealing with empty input and/or clobber lists, so they should be avoided.\n\n#if __powerpc64__ || __ppc64__\n    // IBM XL documents __powerpc64__ (and __PPC64__).\n    // Apple documents __ppc64__ (with __ppc__ only on 32-bit).\n    #define __TBB_WORDSIZE 8\n#else\n    #define __TBB_WORDSIZE 4\n#endif\n\n// Traditionally Power Architecture is big-endian.\n// Little-endian could be just an address manipulation (compatibility with TBB not verified),\n// or normal little-endian (on more recent systems). Embedded PowerPC systems may support\n// page-specific endianness, but then one endianness must be hidden from TBB so that it still sees only one.\n#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n#elif defined(__BYTE_ORDER__)\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED\n#else\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT\n#endif\n\n// On Power Architecture, (lock-free) 64-bit atomics require 64-bit hardware:\n#if __TBB_WORDSIZE==8\n    // Do not change the following definition, because TBB itself will use 64-bit atomics in 64-bit builds.\n    #define __TBB_64BIT_ATOMICS 1\n#elif __bgp__\n    // Do not change the following definition, because this is known 32-bit hardware.\n    #define __TBB_64BIT_ATOMICS 0\n#else\n    // To enable 64-bit atomics in 32-bit builds, set the value below to 1 instead of 0.\n    // You must make certain that the program will only use them on actual 64-bit hardware\n    // (which typically means that the entire program is only executed on such hardware),\n    // because their implementation involves machine instructions that are illegal elsewhere.\n    // The setting can be chosen independently per compilation unit,\n    // which also means that TBB itself does not need to be rebuilt.\n    // Alternatively (but only for the current architecture and TBB version),\n    // override the default as a predefined macro when invoking the compiler.\n    #ifndef __TBB_64BIT_ATOMICS\n    #define __TBB_64BIT_ATOMICS 0\n    #endif\n#endif\n\ninline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand )\n{\n    int32_t result;\n\n    __asm__ __volatile__(\"sync\\n\"\n                         \"0:\\n\\t\"\n                         \"lwarx %[res],0,%[ptr]\\n\\t\"     /* load w/ reservation */\n                         \"cmpw %[res],%[cmp]\\n\\t\"        /* compare against comparand */\n                         \"bne- 1f\\n\\t\"                   /* exit if not same */\n                         \"stwcx. %[val],0,%[ptr]\\n\\t\"    /* store new value */\n                         \"bne- 0b\\n\"                     /* retry if reservation lost */\n                         \"1:\\n\\t\"                        /* the exit */\n                         \"isync\"\n                         : [res]\"=&r\"(result)\n                         , \"+m\"(* (int32_t*) ptr)        /* redundant with \"memory\" */\n                         : [ptr]\"r\"(ptr)\n                         , [val]\"r\"(value)\n                         , [cmp]\"r\"(comparand)\n                         : \"memory\"                      /* compiler full fence */\n                         , \"cr0\"                         /* clobbered by cmp and/or stwcx. */\n                         );\n    return result;\n}\n\n#if __TBB_WORDSIZE==8\n\ninline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )\n{\n    int64_t result;\n    __asm__ __volatile__(\"sync\\n\"\n                         \"0:\\n\\t\"\n                         \"ldarx %[res],0,%[ptr]\\n\\t\"     /* load w/ reservation */\n                         \"cmpd %[res],%[cmp]\\n\\t\"        /* compare against comparand */\n                         \"bne- 1f\\n\\t\"                   /* exit if not same */\n                         \"stdcx. %[val],0,%[ptr]\\n\\t\"    /* store new value */\n                         \"bne- 0b\\n\"                     /* retry if reservation lost */\n                         \"1:\\n\\t\"                        /* the exit */\n                         \"isync\"\n                         : [res]\"=&r\"(result)\n                         , \"+m\"(* (int64_t*) ptr)        /* redundant with \"memory\" */\n                         : [ptr]\"r\"(ptr)\n                         , [val]\"r\"(value)\n                         , [cmp]\"r\"(comparand)\n                         : \"memory\"                      /* compiler full fence */\n                         , \"cr0\"                         /* clobbered by cmp and/or stdcx. */\n                         );\n    return result;\n}\n\n#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */\n\ninline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )\n{\n    int64_t result;\n    int64_t value_register, comparand_register, result_register; // dummy variables to allocate registers\n    __asm__ __volatile__(\"sync\\n\\t\"\n                         \"ld %[val],%[valm]\\n\\t\"\n                         \"ld %[cmp],%[cmpm]\\n\"\n                         \"0:\\n\\t\"\n                         \"ldarx %[res],0,%[ptr]\\n\\t\"     /* load w/ reservation */\n                         \"cmpd %[res],%[cmp]\\n\\t\"        /* compare against comparand */\n                         \"bne- 1f\\n\\t\"                   /* exit if not same */\n                         \"stdcx. %[val],0,%[ptr]\\n\\t\"    /* store new value */\n                         \"bne- 0b\\n\"                     /* retry if reservation lost */\n                         \"1:\\n\\t\"                        /* the exit */\n                         \"std %[res],%[resm]\\n\\t\"\n                         \"isync\"\n                         : [resm]\"=m\"(result)\n                         , [res] \"=&r\"(   result_register)\n                         , [val] \"=&r\"(    value_register)\n                         , [cmp] \"=&r\"(comparand_register)\n                         , \"+m\"(* (int64_t*) ptr)        /* redundant with \"memory\" */\n                         : [ptr] \"r\"(ptr)\n                         , [valm]\"m\"(value)\n                         , [cmpm]\"m\"(comparand)\n                         : \"memory\"                      /* compiler full fence */\n                         , \"cr0\"                         /* clobbered by cmpd and/or stdcx. */\n                         );\n    return result;\n}\n\n#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */\n\n#define __TBB_MACHINE_DEFINE_LOAD_STORE(S,ldx,stx,cmpx)                                                       \\\n    template <typename T>                                                                                     \\\n    struct machine_load_store<T,S> {                                                                          \\\n        static inline T load_with_acquire(const volatile T& location) {                                       \\\n            T result;                                                                                         \\\n            __asm__ __volatile__(ldx \" %[res],0(%[ptr])\\n\"                                                    \\\n                                 \"0:\\n\\t\"                                                                     \\\n                                 cmpx \" %[res],%[res]\\n\\t\"                                                    \\\n                                 \"bne- 0b\\n\\t\"                                                                \\\n                                 \"isync\"                                                                      \\\n                                 : [res]\"=r\"(result)                                                          \\\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */                       \\\n                                 , \"m\"(location)       /* redundant with \"memory\" */                          \\\n                                 : \"memory\"            /* compiler acquire fence */                           \\\n                                 , \"cr0\"               /* clobbered by cmpw/cmpd */);                         \\\n            return result;                                                                                    \\\n        }                                                                                                     \\\n        static inline void store_with_release(volatile T &location, T value) {                                \\\n            __asm__ __volatile__(\"lwsync\\n\\t\"                                                                 \\\n                                 stx \" %[val],0(%[ptr])\"                                                      \\\n                                 : \"=m\"(location)      /* redundant with \"memory\" */                          \\\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */                       \\\n                                 , [val]\"r\"(value)                                                            \\\n                                 : \"memory\"/*compiler release fence*/ /*(cr0 not affected)*/);                \\\n        }                                                                                                     \\\n    };                                                                                                        \\\n                                                                                                              \\\n    template <typename T>                                                                                     \\\n    struct machine_load_store_relaxed<T,S> {                                                                  \\\n        static inline T load (const __TBB_atomic T& location) {                                               \\\n            T result;                                                                                         \\\n            __asm__ __volatile__(ldx \" %[res],0(%[ptr])\"                                                      \\\n                                 : [res]\"=r\"(result)                                                          \\\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */                       \\\n                                 , \"m\"(location)                                                              \\\n                                 ); /*(no compiler fence)*/ /*(cr0 not affected)*/                            \\\n            return result;                                                                                    \\\n        }                                                                                                     \\\n        static inline void store (__TBB_atomic T &location, T value) {                                        \\\n            __asm__ __volatile__(stx \" %[val],0(%[ptr])\"                                                      \\\n                                 : \"=m\"(location)                                                             \\\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */                       \\\n                                 , [val]\"r\"(value)                                                            \\\n                                 ); /*(no compiler fence)*/ /*(cr0 not affected)*/                            \\\n        }                                                                                                     \\\n    };\n\nnamespace tbb {\nnamespace internal {\n    __TBB_MACHINE_DEFINE_LOAD_STORE(1,\"lbz\",\"stb\",\"cmpw\")\n    __TBB_MACHINE_DEFINE_LOAD_STORE(2,\"lhz\",\"sth\",\"cmpw\")\n    __TBB_MACHINE_DEFINE_LOAD_STORE(4,\"lwz\",\"stw\",\"cmpw\")\n\n#if __TBB_WORDSIZE==8\n\n    __TBB_MACHINE_DEFINE_LOAD_STORE(8,\"ld\" ,\"std\",\"cmpd\")\n\n#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */\n\n    template <typename T>\n    struct machine_load_store<T,8> {\n        static inline T load_with_acquire(const volatile T& location) {\n            T result;\n            T result_register; // dummy variable to allocate a register\n            __asm__ __volatile__(\"ld %[res],0(%[ptr])\\n\\t\"\n                                 \"std %[res],%[resm]\\n\"\n                                 \"0:\\n\\t\"\n                                 \"cmpd %[res],%[res]\\n\\t\"\n                                 \"bne- 0b\\n\\t\"\n                                 \"isync\"\n                                 : [resm]\"=m\"(result)\n                                 , [res]\"=&r\"(result_register)\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */\n                                 , \"m\"(location)       /* redundant with \"memory\" */\n                                 : \"memory\"            /* compiler acquire fence */\n                                 , \"cr0\"               /* clobbered by cmpd */);\n            return result;\n        }\n\n        static inline void store_with_release(volatile T &location, T value) {\n            T value_register; // dummy variable to allocate a register\n            __asm__ __volatile__(\"lwsync\\n\\t\"\n                                 \"ld %[val],%[valm]\\n\\t\"\n                                 \"std %[val],0(%[ptr])\"\n                                 : \"=m\"(location)      /* redundant with \"memory\" */\n                                 , [val]\"=&r\"(value_register)\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */\n                                 , [valm]\"m\"(value)\n                                 : \"memory\"/*compiler release fence*/ /*(cr0 not affected)*/);\n        }\n    };\n\n    struct machine_load_store_relaxed<T,8> {\n        static inline T load (const volatile T& location) {\n            T result;\n            T result_register; // dummy variable to allocate a register\n            __asm__ __volatile__(\"ld %[res],0(%[ptr])\\n\\t\"\n                                 \"std %[res],%[resm]\"\n                                 : [resm]\"=m\"(result)\n                                 , [res]\"=&r\"(result_register)\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */\n                                 , \"m\"(location)\n                                 ); /*(no compiler fence)*/ /*(cr0 not affected)*/\n            return result;\n        }\n\n        static inline void store (volatile T &location, T value) {\n            T value_register; // dummy variable to allocate a register\n            __asm__ __volatile__(\"ld %[val],%[valm]\\n\\t\"\n                                 \"std %[val],0(%[ptr])\"\n                                 : \"=m\"(location)\n                                 , [val]\"=&r\"(value_register)\n                                 : [ptr]\"b\"(&location) /* cannot use register 0 here */\n                                 , [valm]\"m\"(value)\n                                 ); /*(no compiler fence)*/ /*(cr0 not affected)*/\n        }\n    };\n    #define __TBB_machine_load_store_relaxed_8\n\n#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */\n\n}} // namespaces internal, tbb\n\n#undef __TBB_MACHINE_DEFINE_LOAD_STORE\n\n#define __TBB_USE_GENERIC_PART_WORD_CAS                     1\n#define __TBB_USE_GENERIC_FETCH_ADD                         1\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#define __TBB_control_consistency_helper() __asm__ __volatile__(\"isync\": : :\"memory\")\n#define __TBB_full_memory_fence()          __asm__ __volatile__( \"sync\": : :\"memory\")\n\nstatic inline intptr_t __TBB_machine_lg( uintptr_t x ) {\n    __TBB_ASSERT(x, \"__TBB_Log2(0) undefined\");\n    // cntlzd/cntlzw starts counting at 2^63/2^31 (ignoring any higher-order bits), and does not affect cr0\n#if __TBB_WORDSIZE==8\n    __asm__ __volatile__ (\"cntlzd %0,%0\" : \"+r\"(x));\n    return 63-static_cast<intptr_t>(x);\n#else\n    __asm__ __volatile__ (\"cntlzw %0,%0\" : \"+r\"(x));\n    return 31-static_cast<intptr_t>(x);\n#endif\n}\n#define __TBB_Log2(V) __TBB_machine_lg(V)\n\n// Assumes implicit alignment for any 32-bit value\ntypedef uint32_t __TBB_Flag;\n#define __TBB_Flag __TBB_Flag\n\ninline bool __TBB_machine_trylockbyte( __TBB_atomic __TBB_Flag &flag ) {\n    return __TBB_machine_cmpswp4(&flag,1,0)==0;\n}\n#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/macos_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_macos_common_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_macos_common_H\n\n#include <sched.h>\n#define __TBB_Yield()  sched_yield()\n\n// __TBB_HardwareConcurrency\n\n#include <sys/types.h>\n#include <sys/sysctl.h>\n\nstatic inline int __TBB_macos_available_cpu() {\n    int name[2] = {CTL_HW, HW_AVAILCPU};\n    int ncpu;\n    size_t size = sizeof(ncpu);\n    sysctl( name, 2, &ncpu, &size, NULL, 0 );\n    return ncpu;\n}\n\n#define __TBB_HardwareConcurrency() __TBB_macos_available_cpu()\n\n#ifndef __TBB_full_memory_fence\n    // TBB has not recognized the architecture (none of the architecture abstraction\n    // headers was included).\n    #define __TBB_UnknownArchitecture 1\n#endif\n\n#if __TBB_UnknownArchitecture\n// Implementation of atomic operations based on OS provided primitives\n#include <libkern/OSAtomic.h>\n\nstatic inline int64_t __TBB_machine_cmpswp8_OsX(volatile void *ptr, int64_t value, int64_t comparand)\n{\n    __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), \"address not properly aligned for OS X* atomics\");\n    int64_t* address = (int64_t*)ptr;\n    while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){\n#if __TBB_WORDSIZE==8\n        int64_t snapshot = *address;\n#else\n        int64_t snapshot = OSAtomicAdd64( 0, address );\n#endif\n        if( snapshot!=comparand ) return snapshot;\n    }\n    return comparand;\n}\n\n#define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8_OsX\n\n#endif /* __TBB_UnknownArchitecture */\n\n#if __TBB_UnknownArchitecture\n\n#ifndef __TBB_WORDSIZE\n#define __TBB_WORDSIZE 4\n#endif\n\n#ifdef __TBB_ENDIANNESS\n    // Already determined based on hardware architecture.\n#elif __BIG_ENDIAN__\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n#elif __LITTLE_ENDIAN__\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n#else\n    #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED\n#endif\n\n/** As this generic implementation has absolutely no information about underlying\n    hardware, its performance most likely will be sub-optimal because of full memory\n    fence usages where a more lightweight synchronization means (or none at all)\n    could suffice. Thus if you use this header to enable TBB on a new platform,\n    consider forking it and relaxing below helpers as appropriate. **/\n#define __TBB_control_consistency_helper() OSMemoryBarrier()\n#define __TBB_acquire_consistency_helper() OSMemoryBarrier()\n#define __TBB_release_consistency_helper() OSMemoryBarrier()\n#define __TBB_full_memory_fence()          OSMemoryBarrier()\n\nstatic inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand)\n{\n    __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), \"address not properly aligned for OS X* atomics\");\n    int32_t* address = (int32_t*)ptr;\n    while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){\n        int32_t snapshot = *address;\n        if( snapshot!=comparand ) return snapshot;\n    }\n    return comparand;\n}\n\nstatic inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend)\n{\n    __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), \"address not properly aligned for OS X* atomics\");\n    return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend;\n}\n\nstatic inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend)\n{\n    __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), \"address not properly aligned for OS X* atomics\");\n    return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend;\n}\n\n#define __TBB_USE_GENERIC_PART_WORD_CAS                     1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD               1\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#if __TBB_WORDSIZE == 4\n    #define __TBB_USE_GENERIC_DWORD_LOAD_STORE              1\n#endif\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#endif /* __TBB_UnknownArchitecture */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/mic_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_mic_common_H\n#define __TBB_mic_common_H\n\n#ifndef __TBB_machine_H\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#if ! __TBB_DEFINE_MIC\n    #error mic_common.h should be included only when building for Intel(R) Many Integrated Core Architecture\n#endif\n\n#ifndef __TBB_PREFETCHING\n#define __TBB_PREFETCHING 1\n#endif\n#if __TBB_PREFETCHING\n#include <immintrin.h>\n#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1)\n#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1)\n#endif\n\n/** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/\n#define __TBB_full_memory_fence() __asm__ __volatile__(\"lock; addl $0,(%%rsp)\":::\"memory\")\n#define __TBB_Pause(x) _mm_delay_32(16*(x))\n#define __TBB_STEALING_PAUSE 1500/16\n#include <sched.h>\n#define __TBB_Yield() sched_yield()\n\n// low-level timing intrinsic and its type\n#define __TBB_machine_time_stamp() _rdtsc()\ntypedef uint64_t machine_tsc_t;\n\n/** Specifics **/\n#define __TBB_STEALING_ABORT_ON_CONTENTION 1\n#define __TBB_YIELD2P 1\n#define __TBB_HOARD_NONLOCAL_TASKS 1\n\n#if ! ( __FreeBSD__ || __linux__ )\n    #error Intel(R) Many Integrated Core Compiler does not define __FreeBSD__ or __linux__ anymore. Check for the __TBB_XXX_BROKEN defined under __FreeBSD__ or __linux__.\n#endif /* ! ( __FreeBSD__ || __linux__ ) */\n\n#endif /* __TBB_mic_common_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/msvc_armv7.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_msvc_armv7_H\n\n#include <intrin.h>\n#include <float.h>\n\n#define __TBB_WORDSIZE 4\n\n#define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED\n\n#if defined(TBB_WIN32_USE_CL_BUILTINS)\n// We can test this on _M_IX86\n#pragma intrinsic(_ReadWriteBarrier)\n#pragma intrinsic(_mm_mfence)\n#define __TBB_compiler_fence()    _ReadWriteBarrier()\n#define __TBB_full_memory_fence() _mm_mfence()\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n#else\n//Now __dmb(_ARM_BARRIER_SY) is used for both compiler and memory fences\n//This might be changed later after testing\n#define __TBB_compiler_fence()    __dmb(_ARM_BARRIER_SY)\n#define __TBB_full_memory_fence() __dmb(_ARM_BARRIER_SY)\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence()\n#define __TBB_release_consistency_helper() __TBB_full_memory_fence()\n#endif\n\n//--------------------------------------------------\n// Compare and swap\n//--------------------------------------------------\n\n/**\n * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr\n * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand\n * @param value value to assign *ptr to if *ptr==comparand\n * @param comparand value to compare with *ptr\n * @return value originally in memory at ptr, regardless of success\n*/\n\n#define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S,T,F)                                               \\\ninline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) {                   \\\n    return _InterlockedCompareExchange##F(reinterpret_cast<volatile T *>(ptr),value,comparand);  \\\n}                                                                                                \\\n\n#define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S,T,F)                                             \\\ninline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) {                              \\\n    return _InterlockedExchangeAdd##F(reinterpret_cast<volatile T *>(ptr),value);                \\\n}                                                                                                \\\n\n__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(1,char,8)\n__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(2,short,16)\n__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(4,long,)\n__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(8,__int64,64)\n__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(4,long,)\n#if defined(TBB_WIN32_USE_CL_BUILTINS)\n// No _InterlockedExchangeAdd64 intrinsic on _M_IX86\n#define __TBB_64BIT_ATOMICS 0\n#else\n__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(8,__int64,64)\n#endif\n\ninline void __TBB_machine_pause (int32_t delay )\n{\n    while(delay>0)\n    {\n        __TBB_compiler_fence();\n        delay--;\n    }\n}\n\n// API to retrieve/update FPU control setting\n#define __TBB_CPU_CTL_ENV_PRESENT 1\n\nnamespace tbb {\nnamespace internal {\n\ntemplate <typename T, size_t S>\nstruct machine_load_store_relaxed {\n    static inline T load ( const volatile T& location ) {\n        const T value = location;\n\n        /*\n        * An extra memory barrier is required for errata #761319\n        * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a\n        */\n        __TBB_acquire_consistency_helper();\n        return value;\n    }\n\n    static inline void store ( volatile T& location, T value ) {\n        location = value;\n    }\n};\n\nclass cpu_ctl_env {\nprivate:\n    unsigned int my_ctl;\npublic:\n    bool operator!=( const cpu_ctl_env& ctl ) const { return my_ctl != ctl.my_ctl; }\n    void get_env() { my_ctl = _control87(0, 0); }\n    void set_env() const { _control87( my_ctl, ~0U ); }\n};\n\n} // namespace internal\n} // namespaces tbb\n\n// Machine specific atomic operations\n#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)\n#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n\n// Use generics for some things\n#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE               1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD                   1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE                 1\n#define __TBB_USE_GENERIC_FETCH_STORE                           1\n#define __TBB_USE_GENERIC_DWORD_LOAD_STORE                      1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE     1\n\n#if defined(TBB_WIN32_USE_CL_BUILTINS)\n#if !__TBB_WIN8UI_SUPPORT\nextern \"C\" __declspec(dllimport) int __stdcall SwitchToThread( void );\n#define __TBB_Yield()  SwitchToThread()\n#else\n#include<thread>\n#define __TBB_Yield()  std::this_thread::yield()\n#endif\n#else\n#define __TBB_Yield() __yield()\n#endif\n\n// Machine specific atomic operations\n#define __TBB_AtomicOR(P,V)     __TBB_machine_OR(P,V)\n#define __TBB_AtomicAND(P,V)    __TBB_machine_AND(P,V)\n\ntemplate <typename T1,typename T2>\ninline void __TBB_machine_OR( T1 *operand, T2 addend ) {\n    _InterlockedOr((long volatile *)operand, (long)addend);\n}\n\ntemplate <typename T1,typename T2>\ninline void __TBB_machine_AND( T1 *operand, T2 addend ) {\n    _InterlockedAnd((long volatile *)operand, (long)addend);\n}\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/msvc_ia32_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_machine_msvc_ia32_common_H\n#define __TBB_machine_msvc_ia32_common_H\n\n#include <intrin.h>\n\n//TODO: consider moving this macro to tbb_config.h and used there MSVC asm is used\n#if  !_M_X64 || __INTEL_COMPILER\n    #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 1\n\n    #if _M_X64\n        #define __TBB_r(reg_name) r##reg_name\n    #else\n        #define __TBB_r(reg_name) e##reg_name\n    #endif\n#else\n    //MSVC in x64 mode does not accept inline assembler\n    #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 0\n#endif\n\n#define __TBB_NO_X86_MSVC_INLINE_ASM_MSG \"The compiler being used is not supported (outdated?)\"\n\n#if (_MSC_VER >= 1300) || (__INTEL_COMPILER) //Use compiler intrinsic when available\n    #define __TBB_PAUSE_USE_INTRINSIC 1\n    #pragma intrinsic(_mm_pause)\n    namespace tbb { namespace internal { namespace intrinsics { namespace msvc {\n        static inline void __TBB_machine_pause (uintptr_t delay ) {\n            for (;delay>0; --delay )\n                _mm_pause();\n        }\n    }}}}\n#else\n    #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE\n        #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG\n    #endif\n\n    namespace tbb { namespace internal { namespace inline_asm { namespace msvc {\n        static inline void __TBB_machine_pause (uintptr_t delay ) {\n            _asm\n            {\n                mov __TBB_r(ax), delay\n              __TBB_L1:\n                pause\n                add __TBB_r(ax), -1\n                jne __TBB_L1\n            }\n            return;\n        }\n    }}}}\n#endif\n\nstatic inline void __TBB_machine_pause (uintptr_t delay ){\n    #if __TBB_PAUSE_USE_INTRINSIC\n        tbb::internal::intrinsics::msvc::__TBB_machine_pause(delay);\n    #else\n        tbb::internal::inline_asm::msvc::__TBB_machine_pause(delay);\n    #endif\n}\n\n//TODO: move this function to windows_api.h or to place where it is used\n#if (_MSC_VER<1400) && (!_WIN64) && (__TBB_X86_MSVC_INLINE_ASM_AVAILABLE)\n    static inline void* __TBB_machine_get_current_teb () {\n        void* pteb;\n        __asm mov eax, fs:[0x18]\n        __asm mov pteb, eax\n        return pteb;\n    }\n#endif\n\n#if ( _MSC_VER>=1400 && !defined(__INTEL_COMPILER) ) ||  (__INTEL_COMPILER>=1200)\n// MSVC did not have this intrinsic prior to VC8.\n// ICL 11.1 fails to compile a TBB example if __TBB_Log2 uses the intrinsic.\n    #define __TBB_LOG2_USE_BSR_INTRINSIC 1\n    #if _M_X64\n        #define __TBB_BSR_INTRINSIC _BitScanReverse64\n    #else\n        #define __TBB_BSR_INTRINSIC _BitScanReverse\n    #endif\n    #pragma intrinsic(__TBB_BSR_INTRINSIC)\n\n    namespace tbb { namespace internal { namespace intrinsics { namespace msvc {\n        inline uintptr_t __TBB_machine_lg( uintptr_t i ){\n            unsigned long j;\n            __TBB_BSR_INTRINSIC( &j, i );\n            return j;\n        }\n    }}}}\n#else\n    #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE\n        #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG\n    #endif\n\n    namespace tbb { namespace internal { namespace inline_asm { namespace msvc {\n        inline uintptr_t __TBB_machine_lg( uintptr_t i ){\n            uintptr_t j;\n            __asm\n            {\n                bsr __TBB_r(ax), i\n                mov j, __TBB_r(ax)\n            }\n            return j;\n        }\n    }}}}\n#endif\n\nstatic inline intptr_t __TBB_machine_lg( uintptr_t i ) {\n#if __TBB_LOG2_USE_BSR_INTRINSIC\n    return tbb::internal::intrinsics::msvc::__TBB_machine_lg(i);\n#else\n    return tbb::internal::inline_asm::msvc::__TBB_machine_lg(i);\n#endif\n}\n\n// API to retrieve/update FPU control setting\n#define __TBB_CPU_CTL_ENV_PRESENT 1\n\nnamespace tbb { namespace internal { class cpu_ctl_env; } }\n#if __TBB_X86_MSVC_INLINE_ASM_AVAILABLE\n    inline void __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ctl ) {\n        __asm {\n            __asm mov     __TBB_r(ax), ctl\n            __asm stmxcsr [__TBB_r(ax)]\n            __asm fstcw   [__TBB_r(ax)+4]\n        }\n    }\n    inline void __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ctl ) {\n        __asm {\n            __asm mov     __TBB_r(ax), ctl\n            __asm ldmxcsr [__TBB_r(ax)]\n            __asm fldcw   [__TBB_r(ax)+4]\n        }\n    }\n#else\n    extern \"C\" {\n        void __TBB_EXPORTED_FUNC __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* );\n        void __TBB_EXPORTED_FUNC __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* );\n    }\n#endif\n\nnamespace tbb {\nnamespace internal {\nclass cpu_ctl_env {\nprivate:\n    int         mxcsr;\n    short       x87cw;\n    static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */\npublic:\n    bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; }\n    void get_env() {\n        __TBB_get_cpu_ctl_env( this );\n        mxcsr &= MXCSR_CONTROL_MASK;\n    }\n    void set_env() const { __TBB_set_cpu_ctl_env( this ); }\n};\n} // namespace internal\n} // namespace tbb\n\n#if !__TBB_WIN8UI_SUPPORT\nextern \"C\" __declspec(dllimport) int __stdcall SwitchToThread( void );\n#define __TBB_Yield()  SwitchToThread()\n#else\n#include<thread>\n#define __TBB_Yield()  std::this_thread::yield()\n#endif\n\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n#define __TBB_Log2(V)  __TBB_machine_lg(V)\n\n#undef __TBB_r\n\nextern \"C\" {\n    __int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void* ptr);\n    void   __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void* ptr);\n\n    // 'pause' instruction aborts HLE/RTM transactions\n#if __TBB_PAUSE_USE_INTRINSIC\n    inline static void __TBB_machine_try_lock_elided_cancel() { _mm_pause(); }\n#else\n    inline static void __TBB_machine_try_lock_elided_cancel() { _asm pause; }\n#endif\n\n#if __TBB_TSX_INTRINSICS_PRESENT\n    #define __TBB_machine_is_in_transaction _xtest\n    #define __TBB_machine_begin_transaction _xbegin\n    #define __TBB_machine_end_transaction   _xend\n    // The value (0xFF) below comes from the\n    // Intel(R) 64 and IA-32 Architectures Optimization Reference Manual 12.4.5 lock not free\n    #define __TBB_machine_transaction_conflict_abort() _xabort(0xFF)\n#else\n    __int8           __TBB_EXPORTED_FUNC __TBB_machine_is_in_transaction();\n    unsigned __int32 __TBB_EXPORTED_FUNC __TBB_machine_begin_transaction();\n    void             __TBB_EXPORTED_FUNC __TBB_machine_end_transaction();\n    void             __TBB_EXPORTED_FUNC __TBB_machine_transaction_conflict_abort();\n#endif /* __TBB_TSX_INTRINSICS_PRESENT */\n}\n\n#endif /* __TBB_machine_msvc_ia32_common_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/sunos_sparc.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_sunos_sparc_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_sunos_sparc_H\n\n#include <stdint.h>\n#include <unistd.h>\n\n#define __TBB_WORDSIZE 8\n// Big endian is assumed for SPARC.\n// While hardware may support page-specific bi-endianness, only big endian pages may be exposed to TBB  \n#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n\n/** To those working on SPARC hardware. Consider relaxing acquire and release\n    consistency helpers to no-op (as this port covers TSO mode only). **/\n#define __TBB_compiler_fence()             __asm__ __volatile__ (\"\": : :\"memory\")\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n#define __TBB_full_memory_fence()          __asm__ __volatile__(\"membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad\": : : \"memory\")\n\n//--------------------------------------------------\n// Compare and swap\n//--------------------------------------------------\n\n/**\n * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr\n * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand\n * @param value value to assign *ptr to if *ptr==comparand\n * @param comparand value to compare with *ptr\n ( @return value originally in memory at ptr, regardless of success\n*/\nstatic inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){\n  int32_t result;\n  __asm__ __volatile__(\n                       \"cas\\t[%5],%4,%1\"\n                       : \"=m\"(*(int32_t *)ptr), \"=r\"(result)\n                       : \"m\"(*(int32_t *)ptr), \"1\"(value), \"r\"(comparand), \"r\"(ptr)\n                       : \"memory\");\n  return result;\n}\n\n/**\n * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr\n * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand\n * @param value value to assign *ptr to if *ptr==comparand\n * @param comparand value to compare with *ptr\n ( @return value originally in memory at ptr, regardless of success\n */\nstatic inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){\n  int64_t result;\n  __asm__ __volatile__(\n                       \"casx\\t[%5],%4,%1\"\n               : \"=m\"(*(int64_t *)ptr), \"=r\"(result)\n               : \"m\"(*(int64_t *)ptr), \"1\"(value), \"r\"(comparand), \"r\"(ptr)\n               : \"memory\");\n  return result;\n}\n\n//---------------------------------------------------\n// Fetch and add\n//---------------------------------------------------\n\n/**\n * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity\n * @param ptr pointer to value to add addend to\n * @param addened value to add to *ptr\n * @return value at ptr before addened was added\n */\nstatic inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){\n  int32_t result;\n  __asm__ __volatile__ (\n                        \"0:\\t add\\t %3, %4, %0\\n\"           // do addition\n                        \"\\t cas\\t [%2], %3, %0\\n\"           // cas to store result in memory\n                        \"\\t cmp\\t %3, %0\\n\"                 // check if value from memory is original\n                        \"\\t bne,a,pn\\t %%icc, 0b\\n\"         // if not try again\n                        \"\\t mov %0, %3\\n\"                   // use branch delay slot to move new value in memory to be added\n               : \"=&r\"(result), \"=m\"(*(int32_t *)ptr)\n               : \"r\"(ptr), \"r\"(*(int32_t *)ptr), \"r\"(addend), \"m\"(*(int32_t *)ptr)\n               : \"ccr\", \"memory\");\n  return result;\n}\n\n/**\n * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity\n * @param ptr pointer to value to add addend to\n * @param addened value to add to *ptr\n * @return value at ptr before addened was added\n */\nstatic inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){\n  int64_t result;\n  __asm__ __volatile__ (\n                        \"0:\\t add\\t %3, %4, %0\\n\"           // do addition\n                        \"\\t casx\\t [%2], %3, %0\\n\"          // cas to store result in memory\n                        \"\\t cmp\\t %3, %0\\n\"                 // check if value from memory is original\n                        \"\\t bne,a,pn\\t %%xcc, 0b\\n\"         // if not try again\n                        \"\\t mov %0, %3\\n\"                   // use branch delay slot to move new value in memory to be added\n                : \"=&r\"(result), \"=m\"(*(int64_t *)ptr)\n                : \"r\"(ptr), \"r\"(*(int64_t *)ptr), \"r\"(addend), \"m\"(*(int64_t *)ptr)\n                : \"ccr\", \"memory\");\n  return result;\n}\n\n//--------------------------------------------------------\n// Logarithm (base two, integer)\n//--------------------------------------------------------\n\nstatic inline int64_t __TBB_machine_lg( uint64_t x ) {\n    __TBB_ASSERT(x, \"__TBB_Log2(0) undefined\");\n    uint64_t count;\n    // one hot encode\n    x |= (x >> 1);\n    x |= (x >> 2);\n    x |= (x >> 4);\n    x |= (x >> 8);\n    x |= (x >> 16);\n    x |= (x >> 32);\n    // count 1's\n    __asm__ (\"popc %1, %0\" : \"=r\"(count) : \"r\"(x) );\n    return count-1;\n}\n\n//--------------------------------------------------------\n\nstatic inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) {\n  __asm__ __volatile__ (\n                        \"0:\\t or\\t %2, %3, %%g1\\n\"          // do operation\n                        \"\\t casx\\t [%1], %2, %%g1\\n\"        // cas to store result in memory\n                        \"\\t cmp\\t %2, %%g1\\n\"               // check if value from memory is original\n                        \"\\t bne,a,pn\\t %%xcc, 0b\\n\"         // if not try again\n                        \"\\t mov %%g1, %2\\n\"                 // use branch delay slot to move new value in memory to be added\n                : \"=m\"(*(int64_t *)ptr)\n                : \"r\"(ptr), \"r\"(*(int64_t *)ptr), \"r\"(value), \"m\"(*(int64_t *)ptr)\n                : \"ccr\", \"g1\", \"memory\");\n}\n\nstatic inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) {\n  __asm__ __volatile__ (\n                        \"0:\\t and\\t %2, %3, %%g1\\n\"         // do operation\n                        \"\\t casx\\t [%1], %2, %%g1\\n\"        // cas to store result in memory\n                        \"\\t cmp\\t %2, %%g1\\n\"               // check if value from memory is original\n                        \"\\t bne,a,pn\\t %%xcc, 0b\\n\"         // if not try again\n                        \"\\t mov %%g1, %2\\n\"                 // use branch delay slot to move new value in memory to be added\n                : \"=m\"(*(int64_t *)ptr)\n                : \"r\"(ptr), \"r\"(*(int64_t *)ptr), \"r\"(value), \"m\"(*(int64_t *)ptr)\n                : \"ccr\", \"g1\", \"memory\");\n}\n\n\nstatic inline void __TBB_machine_pause( int32_t delay ) {\n    // do nothing, inlined, doesn't matter\n}\n\n// put 0xff in memory location, return memory value,\n//  generic trylockbyte puts 0x01, however this is fine\n//  because all that matters is that 0 is unlocked\nstatic inline bool __TBB_machine_trylockbyte(unsigned char &flag){\n    unsigned char result;\n    __asm__ __volatile__ (\n            \"ldstub\\t [%2], %0\\n\"\n        : \"=r\"(result), \"=m\"(flag)\n        : \"r\"(&flag), \"m\"(flag)\n        : \"memory\");\n    return result == 0;\n}\n\n#define __TBB_USE_GENERIC_PART_WORD_CAS                     1\n#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD               1\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)\n#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)\n\n// Definition of other functions\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n#define __TBB_Log2(V)  __TBB_machine_lg(V)\n\n#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/windows_api.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_machine_windows_api_H\n#define __TBB_machine_windows_api_H\n\n#if _WIN32 || _WIN64\n\n#if _XBOX\n\n#define NONET\n#define NOD3D\n#include <xtl.h>\n\n#else // Assume \"usual\" Windows\n\n#include <windows.h>\n\n#endif // _XBOX\n\n#if _WIN32_WINNT < 0x0600\n// The following Windows API function is declared explicitly;\n// otherwise it fails to compile by VS2005.\n#if !defined(WINBASEAPI) || (_WIN32_WINNT < 0x0501 && _MSC_VER == 1400)\n#define __TBB_WINBASEAPI extern \"C\"\n#else\n#define __TBB_WINBASEAPI WINBASEAPI\n#endif\n__TBB_WINBASEAPI BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION );\n__TBB_WINBASEAPI BOOL WINAPI InitializeCriticalSectionAndSpinCount( LPCRITICAL_SECTION, DWORD );\n// Overloading WINBASEAPI macro and using local functions missing in Windows XP/2003\n#define InitializeCriticalSectionEx inlineInitializeCriticalSectionEx\n#define CreateSemaphoreEx inlineCreateSemaphoreEx\n#define CreateEventEx inlineCreateEventEx\ninline BOOL WINAPI inlineInitializeCriticalSectionEx( LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD )\n{\n    return InitializeCriticalSectionAndSpinCount( lpCriticalSection, dwSpinCount );\n}\ninline HANDLE WINAPI inlineCreateSemaphoreEx( LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, LONG lInitialCount, LONG lMaximumCount, LPCTSTR lpName, DWORD, DWORD )\n{\n    return CreateSemaphore( lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName );\n}\ninline HANDLE WINAPI inlineCreateEventEx( LPSECURITY_ATTRIBUTES lpEventAttributes, LPCTSTR lpName, DWORD dwFlags, DWORD )\n{\n    BOOL manual_reset = dwFlags&0x00000001 ? TRUE : FALSE; // CREATE_EVENT_MANUAL_RESET\n    BOOL initial_set  = dwFlags&0x00000002 ? TRUE : FALSE; // CREATE_EVENT_INITIAL_SET\n    return CreateEvent( lpEventAttributes, manual_reset, initial_set, lpName );\n}\n#endif\n\n#if defined(RTL_SRWLOCK_INIT)\n#ifndef __TBB_USE_SRWLOCK\n// TODO: turn it on when bug 1952 will be fixed\n#define __TBB_USE_SRWLOCK 0\n#endif\n#endif\n\n#else\n#error tbb/machine/windows_api.h should only be used for Windows based platforms\n#endif // _WIN32 || _WIN64\n\n#endif // __TBB_machine_windows_api_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/windows_ia32.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_ia32_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_windows_ia32_H\n\n#include \"msvc_ia32_common.h\"\n\n#define __TBB_WORDSIZE 4\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100)\n    #define __TBB_compiler_fence()    __asm { __asm nop }\n    #define __TBB_full_memory_fence() __asm { __asm mfence }\n#elif _MSC_VER >= 1300 || __INTEL_COMPILER\n    #pragma intrinsic(_ReadWriteBarrier)\n    #pragma intrinsic(_mm_mfence)\n    #define __TBB_compiler_fence()    _ReadWriteBarrier()\n    #define __TBB_full_memory_fence() _mm_mfence()\n#else\n    #error Unsupported compiler - need to define __TBB_{control,acquire,release}_consistency_helper to support it\n#endif\n\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (push)\n    #pragma warning (disable: 4244 4267)\n#endif\n\nextern \"C\" {\n    __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand );\n    __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend );\n    __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value );\n    void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value );\n    __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr);\n}\n\n//TODO: use _InterlockedXXX intrinsics as they available since VC 2005\n#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \\\nstatic inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \\\n    T result; \\\n    volatile T *p = (T *)ptr; \\\n    __asm \\\n    { \\\n       __asm mov edx, p \\\n       __asm mov C , value \\\n       __asm mov A , comparand \\\n       __asm lock cmpxchg [edx], C \\\n       __asm mov result, A \\\n    } \\\n    return result; \\\n} \\\n\\\nstatic inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \\\n    T result; \\\n    volatile T *p = (T *)ptr; \\\n    __asm \\\n    { \\\n        __asm mov edx, p \\\n        __asm mov A, addend \\\n        __asm lock xadd [edx], A \\\n        __asm mov result, A \\\n    } \\\n    return result; \\\n}\\\n\\\nstatic inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \\\n    T result; \\\n    volatile T *p = (T *)ptr; \\\n    __asm \\\n    { \\\n        __asm mov edx, p \\\n        __asm mov A, value \\\n        __asm lock xchg [edx], A \\\n        __asm mov result, A \\\n    } \\\n    return result; \\\n}\n\n\n__TBB_MACHINE_DEFINE_ATOMICS(1, __int8, __int8, al, cl)\n__TBB_MACHINE_DEFINE_ATOMICS(2, __int16, __int16, ax, cx)\n__TBB_MACHINE_DEFINE_ATOMICS(4, ptrdiff_t, ptrdiff_t, eax, ecx)\n\n#undef __TBB_MACHINE_DEFINE_ATOMICS\n\nstatic inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) {\n   __asm \n   {\n       mov eax, addend\n       mov edx, [operand]\n       lock or [edx], eax\n   }\n}\n\nstatic inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) {\n   __asm \n   {\n       mov eax, addend\n       mov edx, [operand]\n       lock and [edx], eax\n   }\n}\n\n#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)\n#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)\n\n//TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows)\n//to use of 64-bit load/store via floating point registers together with full fence\n//for sequentially consistent load/store, instead of CAS.\n#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE           1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warnings 4244, 4267 are back\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/windows_intel64.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_intel64_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_windows_intel64_H\n\n#define __TBB_WORDSIZE 8\n#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE\n\n#include <intrin.h>\n#include \"msvc_ia32_common.h\"\n\n//TODO: Use _InterlockedXXX16 intrinsics for 2 byte operations\n#if !__INTEL_COMPILER\n    #pragma intrinsic(_InterlockedOr64)\n    #pragma intrinsic(_InterlockedAnd64)\n    #pragma intrinsic(_InterlockedCompareExchange)\n    #pragma intrinsic(_InterlockedCompareExchange64)\n    #pragma intrinsic(_InterlockedExchangeAdd)\n    #pragma intrinsic(_InterlockedExchangeAdd64)\n    #pragma intrinsic(_InterlockedExchange)\n    #pragma intrinsic(_InterlockedExchange64)\n#endif /* !(__INTEL_COMPILER) */\n\n#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100)\n    #define __TBB_compiler_fence()    __asm { __asm nop }\n    #define __TBB_full_memory_fence() __asm { __asm mfence }\n#elif _MSC_VER >= 1300 || __INTEL_COMPILER\n    #pragma intrinsic(_ReadWriteBarrier)\n    #pragma intrinsic(_mm_mfence)\n    #define __TBB_compiler_fence()    _ReadWriteBarrier()\n    #define __TBB_full_memory_fence() _mm_mfence()\n#endif\n\n#define __TBB_control_consistency_helper() __TBB_compiler_fence()\n#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()\n#define __TBB_release_consistency_helper() __TBB_compiler_fence()\n\n// ATTENTION: if you ever change argument types in machine-specific primitives,\n// please take care of atomic_word<> specializations in tbb/atomic.h\nextern \"C\" {\n    __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand );\n    __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend );\n    __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value );\n    __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand );\n    __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend );\n    __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value );\n}\n\ninline long __TBB_machine_cmpswp4 (volatile void *ptr, __int32 value, __int32 comparand ) {\n    return _InterlockedCompareExchange( (long*)ptr, value, comparand );\n}\ninline long __TBB_machine_fetchadd4 (volatile void *ptr, __int32 addend ) {\n    return _InterlockedExchangeAdd( (long*)ptr, addend );\n}\ninline long __TBB_machine_fetchstore4 (volatile void *ptr, __int32 value ) {\n    return _InterlockedExchange( (long*)ptr, value );\n}\n\ninline __int64 __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ) {\n    return _InterlockedCompareExchange64( (__int64*)ptr, value, comparand );\n}\ninline __int64 __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ) {\n    return _InterlockedExchangeAdd64( (__int64*)ptr, addend );\n}\ninline __int64 __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ) {\n    return _InterlockedExchange64( (__int64*)ptr, value );\n}\n\n#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE           1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\ninline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) {\n    _InterlockedOr64((__int64*)operand, addend); \n}\n\ninline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) {\n    _InterlockedAnd64((__int64*)operand, addend); \n}\n\n#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)\n#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/machine/xbox360_ppc.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// TODO: revise by comparing with mac_ppc.h\n\n#if !defined(__TBB_machine_H) || defined(__TBB_machine_xbox360_ppc_H)\n#error Do not #include this internal file directly; use public TBB headers instead.\n#endif\n\n#define __TBB_machine_xbox360_ppc_H\n\n#define NONET\n#define NOD3D\n#include \"xtl.h\"    \n#include \"ppcintrinsics.h\"\n\n#if _MSC_VER >= 1300\nextern \"C\" void _MemoryBarrier();\n#pragma intrinsic(_MemoryBarrier)\n#define __TBB_control_consistency_helper() __isync()\n#define __TBB_acquire_consistency_helper() _MemoryBarrier()\n#define __TBB_release_consistency_helper() _MemoryBarrier()\n#endif\n\n#define __TBB_full_memory_fence() __sync()\n\n#define __TBB_WORDSIZE 4\n#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG\n\n//todo: define __TBB_USE_FENCED_ATOMICS and define acquire/release primitives to maximize performance\n\ninline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) {                               \n __sync();\n __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand);\n __isync();\n return result;\n}\n\ninline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand )\n{\n __sync();\n __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand);\n __isync();\n return result;\n}\n\n#define __TBB_USE_GENERIC_PART_WORD_CAS                     1\n#define __TBB_USE_GENERIC_FETCH_ADD                         1\n#define __TBB_USE_GENERIC_FETCH_STORE                       1\n#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE            1\n#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE                1\n#define __TBB_USE_GENERIC_DWORD_LOAD_STORE                  1\n#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1\n\n#pragma optimize( \"\", off )\ninline void __TBB_machine_pause (__int32 delay ) \n{\n for (__int32 i=0; i<delay; i++) {;};\n}\n#pragma optimize( \"\", on ) \n\n#define __TBB_Yield()  Sleep(0)\n#define __TBB_Pause(V) __TBB_machine_pause(V)\n\n// This port uses only 2 hardware threads for TBB on XBOX 360. \n// Others are left to sound etc.\n// Change the following mask to allow TBB use more HW threads.\nstatic const int __TBB_XBOX360_HARDWARE_THREAD_MASK = 0x0C;\n\nstatic inline int __TBB_XBOX360_DetectNumberOfWorkers() \n{\n     char a[__TBB_XBOX360_HARDWARE_THREAD_MASK];  //compile time assert - at least one bit should be set always\n     a[0]=0;\n\n     return ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 0) & 1) +\n            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) +\n            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) +\n            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) +\n            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) +\n            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1;  // +1 accomodates for the master thread\n}\n\nstatic inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex)\n{\n    workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1;\n    int m = __TBB_XBOX360_HARDWARE_THREAD_MASK;\n    int index = 0;\n    int skipcount = workerThreadIndex;\n    while (true)\n    {\n        if ((m & 1)!=0) \n        {\n            if (skipcount==0) break;\n            skipcount--;\n        }\n        m >>= 1;\n       index++;\n    }\n    return index; \n}\n\n#define __TBB_HardwareConcurrency() __TBB_XBOX360_DetectNumberOfWorkers()\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mailbox.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_mailbox_H\n#define _TBB_mailbox_H\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n\n#include \"scheduler_common.h\"\n#include \"tbb/atomic.h\"\n\nnamespace tbb {\nnamespace internal {\n\nstruct task_proxy : public task {\n    static const intptr_t      pool_bit = 1<<0;\n    static const intptr_t   mailbox_bit = 1<<1;\n    static const intptr_t location_mask = pool_bit | mailbox_bit;\n    /* All but two low-order bits represent a (task*).\n       Two low-order bits mean:\n       1 = proxy is/was/will be in task pool\n       2 = proxy is/was/will be in mailbox */\n    intptr_t task_and_tag;\n\n    //! Pointer to next task_proxy in a mailbox\n    task_proxy *__TBB_atomic next_in_mailbox;\n\n    //! Mailbox to which this was mailed.\n    mail_outbox* outbox;\n\n    //! True if the proxy is stored both in its sender's pool and in the destination mailbox.\n    static bool is_shared ( intptr_t tat ) {\n        return (tat & location_mask) == location_mask;\n    }\n\n    //! Returns a pointer to the encapsulated task or NULL.\n    static task* task_ptr ( intptr_t tat ) {\n        return (task*)(tat & ~location_mask);\n    }\n\n    //! Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary.\n    template<intptr_t from_bit>\n    inline task* extract_task () {\n        __TBB_ASSERT( prefix().extra_state == es_task_proxy, \"Normal task misinterpreted as a proxy?\" );\n        intptr_t tat = __TBB_load_with_acquire(task_and_tag);\n        __TBB_ASSERT( tat == from_bit || (is_shared(tat) && task_ptr(tat)),\n            \"Proxy's tag cannot specify both locations if the proxy \"\n            \"was retrieved from one of its original locations\" );\n        if ( tat != from_bit ) {\n            const intptr_t cleaner_bit = location_mask & ~from_bit;\n            // Attempt to transition the proxy to the \"empty\" state with\n            // cleaner_bit specifying entity responsible for its eventual freeing.\n            // Explicit cast to void* is to work around a seeming ICC 11.1 bug.\n            if ( as_atomic(task_and_tag).compare_and_swap(cleaner_bit, tat) == tat ) {\n                // Successfully grabbed the task, and left new owner with the job of freeing the proxy\n                return task_ptr(tat);\n            }\n        }\n        // Proxied task has already been claimed from another proxy location.\n        __TBB_ASSERT( task_and_tag == from_bit, \"Empty proxy cannot contain non-zero task pointer\" );\n        poison_pointer(outbox);\n        poison_pointer(next_in_mailbox);\n        poison_value(task_and_tag);\n        return NULL;\n    }\n}; // struct task_proxy\n\n//! Internal representation of mail_outbox, without padding.\nclass unpadded_mail_outbox {\nprotected:\n    typedef task_proxy*__TBB_atomic proxy_ptr;\n\n    //! Pointer to first task_proxy in mailbox, or NULL if box is empty. \n    proxy_ptr my_first;\n\n    //! Pointer to pointer that will point to next item in the queue.  Never NULL.\n    proxy_ptr* __TBB_atomic my_last;\n\n    //! Owner of mailbox is not executing a task, and has drained its own task pool.\n    bool my_is_idle;\n};\n\n//! Class representing where mail is put.\n/** Padded to occupy a cache line. */\nclass mail_outbox : padded<unpadded_mail_outbox> {\n\n    task_proxy* internal_pop() {\n        task_proxy* const first = __TBB_load_relaxed(my_first);\n        if( !first )\n            return NULL;\n        __TBB_control_consistency_helper(); // on my_first\n        // There is a first item in the mailbox.  See if there is a second.\n        if( task_proxy* second = first->next_in_mailbox ) {\n            // There are at least two items, so first item can be popped easily.\n            my_first = second;\n        } else {\n            // There is only one item.  Some care is required to pop it.\n            my_first = NULL;\n            if( as_atomic(my_last).compare_and_swap(&my_first,&first->next_in_mailbox) == &first->next_in_mailbox )\n            {\n                // Successfully transitioned mailbox from having one item to having none.\n                __TBB_ASSERT(!first->next_in_mailbox,NULL);\n            } else {\n                // Some other thread updated my_last but has not filled in first->next_in_mailbox\n                // Wait until first item points to second item.\n                atomic_backoff backoff;\n                while( !(second = first->next_in_mailbox) ) backoff.pause();\n                my_first = second;\n            }\n        }\n        return first;\n    }\npublic:\n    friend class mail_inbox;\n\n    //! Push task_proxy onto the mailbox queue of another thread.\n    /** Implementation is wait-free. */\n    void push( task_proxy& t ) {\n        __TBB_ASSERT(&t, NULL);\n        t.next_in_mailbox = NULL; \n        proxy_ptr * const link = (proxy_ptr *)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t.next_in_mailbox);\n        // No release fence required for the next store, because there are no memory operations \n        // between the previous fully fenced atomic operation and the store.\n        __TBB_store_relaxed(*link, &t);\n    }\n\n    //! Return true if mailbox is empty\n    bool empty() {\n        return __TBB_load_relaxed(my_first) == NULL;\n    }\n\n    //! Construct *this as a mailbox from zeroed memory.\n    /** Raise assertion if *this is not previously zeroed, or sizeof(this) is wrong.\n        This method is provided instead of a full constructor since we know the object\n        will be constructed in zeroed memory. */\n    void construct() {\n        __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL );\n        __TBB_ASSERT( !my_first, NULL );\n        __TBB_ASSERT( !my_last, NULL );\n        __TBB_ASSERT( !my_is_idle, NULL );\n        my_last=&my_first;\n        suppress_unused_warning(pad);\n    }\n\n    //! Drain the mailbox \n    intptr_t drain() {\n        intptr_t k = 0;\n        // No fences here because other threads have already quit.\n        for( ; task_proxy* t = my_first; ++k ) {\n            my_first = t->next_in_mailbox;\n            NFS_Free((char*)t - task_prefix_reservation_size);\n        }\n        return k;  \n    }\n\n    //! True if thread that owns this mailbox is looking for work.\n    bool recipient_is_idle() {\n        return my_is_idle;\n    }\n}; // class mail_outbox\n\n//! Class representing source of mail.\nclass mail_inbox {\n    //! Corresponding sink where mail that we receive will be put.\n    mail_outbox* my_putter;\npublic:\n    //! Construct unattached inbox\n    mail_inbox() : my_putter(NULL) {}\n\n    //! Attach inbox to a corresponding outbox. \n    void attach( mail_outbox& putter ) {\n        __TBB_ASSERT(!my_putter,\"already attached\");\n        my_putter = &putter;\n    }\n    //! Detach inbox from its outbox\n    void detach() {\n        __TBB_ASSERT(my_putter,\"not attached\");\n        my_putter = NULL;\n    }\n    //! Get next piece of mail, or NULL if mailbox is empty.\n    task_proxy* pop() {\n        return my_putter->internal_pop();\n    }\n    //! Return true if mailbox is empty\n    bool empty() {\n        return my_putter->empty();\n    }\n    //! Indicate whether thread that reads this mailbox is idle.\n    /** Raises assertion failure if mailbox is redundantly marked as not idle. */\n    void set_is_idle( bool value ) {\n        if( my_putter ) {\n            __TBB_ASSERT( my_putter->my_is_idle || value, \"attempt to redundantly mark mailbox as not idle\" );\n            my_putter->my_is_idle = value;\n        }\n    }\n    //! Indicate whether thread that reads this mailbox is idle.\n    bool is_idle_state ( bool value ) const {\n        return !my_putter || my_putter->my_is_idle == value;\n    }\n\n#if DO_ITT_NOTIFY\n    //! Get pointer to corresponding outbox used for ITT_NOTIFY calls.\n    void* outbox() const {return my_putter;}\n#endif /* DO_ITT_NOTIFY */ \n}; // class mail_inbox\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_mailbox_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/market.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_stddef.h\"\n\n#include \"market.h\"\n#include \"tbb_main.h\"\n#include \"governor.h\"\n#include \"scheduler.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\nnamespace internal {\n\nvoid market::insert_arena_into_list ( arena& a ) {\n#if __TBB_TASK_PRIORITY\n    arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas;\n    arena *&next = my_priority_levels[a.my_top_priority].next_arena;\n#else /* !__TBB_TASK_PRIORITY */\n    arena_list_type &arenas = my_arenas;\n    arena *&next = my_next_arena;\n#endif /* !__TBB_TASK_PRIORITY */\n    arenas.push_front( a );\n    if ( arenas.size() == 1 )\n        next = &*arenas.begin();\n}\n\nvoid market::remove_arena_from_list ( arena& a ) {\n#if __TBB_TASK_PRIORITY\n    arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas;\n    arena *&next = my_priority_levels[a.my_top_priority].next_arena;\n#else /* !__TBB_TASK_PRIORITY */\n    arena_list_type &arenas = my_arenas;\n    arena *&next = my_next_arena;\n#endif /* !__TBB_TASK_PRIORITY */\n    arena_list_type::iterator it = next;\n    __TBB_ASSERT( it != arenas.end(), NULL );\n    if ( next == &a ) {\n        if ( ++it == arenas.end() && arenas.size() > 1 )\n            it = arenas.begin();\n        next = &*it;\n    }\n    arenas.remove( a );\n}\n\n//------------------------------------------------------------------------\n// market\n//------------------------------------------------------------------------\n\nmarket::market ( unsigned max_num_workers, size_t stack_size )\n    : my_ref_count(1)\n    , my_stack_size(stack_size)\n    , my_max_num_workers(max_num_workers)\n#if __TBB_TASK_PRIORITY\n    , my_global_top_priority(normalized_normal_priority)\n    , my_global_bottom_priority(normalized_normal_priority)\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    , my_lowest_populated_level(normalized_normal_priority)\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n#endif /* __TBB_TASK_PRIORITY */\n{\n#if __TBB_TASK_PRIORITY\n    __TBB_ASSERT( my_global_reload_epoch == 0, NULL );\n    my_priority_levels[normalized_normal_priority].workers_available = max_num_workers;\n#endif /* __TBB_TASK_PRIORITY */\n\n    // Once created RML server will start initializing workers that will need \n    // global market instance to get worker stack size\n    my_server = governor::create_rml_server( *this );\n    __TBB_ASSERT( my_server, \"Failed to create RML server\" );\n}\n\n\nmarket& market::global_market ( unsigned max_num_workers, size_t stack_size ) {\n    global_market_mutex_type::scoped_lock lock( theMarketMutex );\n    market *m = theMarket;\n    if ( m ) {\n        ++m->my_ref_count;\n        if ( m->my_stack_size < stack_size )\n            runtime_warning( \"Newer master request for larger stack cannot be satisfied\\n\" );\n    }\n    else {\n        max_num_workers = max( governor::default_num_threads() - 1, max_num_workers );\n        // at least 1 worker is required to support starvation resistant tasks\n        if( max_num_workers==0 ) max_num_workers = 1;\n        // Create the global market instance\n        size_t size = sizeof(market);\n#if __TBB_TASK_GROUP_CONTEXT\n        __TBB_ASSERT( __TBB_offsetof(market, my_workers) + sizeof(generic_scheduler*) == sizeof(market),\n                      \"my_workers must be the last data field of the market class\");\n        size += sizeof(generic_scheduler*) * (max_num_workers - 1);\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n        __TBB_InitOnce::add_ref();\n        void* storage = NFS_Allocate(1, size, NULL);\n        memset( storage, 0, size );\n        // Initialize and publish global market\n        m = new (storage) market( max_num_workers, stack_size );\n        theMarket = m;\n    }\n    return *m;\n}\n\nvoid market::destroy () {\n#if __TBB_COUNT_TASK_NODES\n    if ( my_task_node_count )\n        runtime_warning( \"Leaked %ld task objects\\n\", (long)my_task_node_count );\n#endif /* __TBB_COUNT_TASK_NODES */\n    this->~market();\n    NFS_Free( this );\n    __TBB_InitOnce::remove_ref();\n}\n\nvoid market::release () {\n    __TBB_ASSERT( theMarket == this, \"Global market instance was destroyed prematurely?\" );\n    bool do_release = false;\n    {\n        global_market_mutex_type::scoped_lock lock(theMarketMutex);\n        if ( --my_ref_count == 0 ) {\n            do_release = true;\n            theMarket = NULL;\n        }\n    }\n    if( do_release )\n        my_server->request_close_connection();\n}\n\nvoid market::wait_workers () {\n    // usable for this kind of scheduler only\n    __TBB_ASSERT(governor::needsWaitWorkers(), NULL);\n    // wait till terminating last worker decresed my_ref_count\n    while (__TBB_load_with_acquire(my_ref_count) > 1)\n        __TBB_Yield();\n    __TBB_ASSERT(1 == my_ref_count, NULL);\n    release();\n}\n\narena& market::create_arena ( unsigned max_num_workers, size_t stack_size ) {\n    market &m = global_market( max_num_workers, stack_size ); // increases market's ref count\n#if __TBB_TASK_ARENA\n    // Prevent cutting an extra slot for task_arena(p,0) with default market (p-1 workers).\n    // This is a temporary workaround for 1968 until (TODO:) master slot reservation is reworked\n    arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers+1) );\n#else\n    arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers) );\n#endif\n    // Add newly created arena into the existing market's list.\n    arenas_list_mutex_type::scoped_lock lock(m.my_arenas_list_mutex);\n    m.insert_arena_into_list(a);\n    return a;\n}\n\n/** This method must be invoked under my_arenas_list_mutex. **/\nvoid market::detach_arena ( arena& a ) {\n    __TBB_ASSERT( theMarket == this, \"Global market instance was destroyed prematurely?\" );\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    __TBB_ASSERT( !a.my_num_workers_present, NULL );\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n    __TBB_ASSERT( !a.my_slots[0].my_scheduler, NULL );\n    remove_arena_from_list(a);\n    if ( a.my_aba_epoch == my_arenas_aba_epoch )\n        ++my_arenas_aba_epoch;\n}\n\nvoid market::try_destroy_arena ( arena* a, uintptr_t aba_epoch ) {\n    __TBB_ASSERT ( a, NULL );\n    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex);\n    assert_market_valid();\n#if __TBB_TASK_PRIORITY\n    for ( int p = my_global_top_priority; p >= my_global_bottom_priority; --p ) {\n        priority_level_info &pl = my_priority_levels[p];\n        arena_list_type &my_arenas = pl.arenas;\n#endif /* __TBB_TASK_PRIORITY */\n        arena_list_type::iterator it = my_arenas.begin();\n        for ( ; it != my_arenas.end(); ++it ) {\n            if ( a == &*it ) {\n                if ( it->my_aba_epoch == aba_epoch ) {\n                    // Arena is alive\n                    if ( !a->my_num_workers_requested && !a->my_references ) {\n                        __TBB_ASSERT( !a->my_num_workers_allotted && (a->my_pool_state == arena::SNAPSHOT_EMPTY || !a->my_max_num_workers), \"Inconsistent arena state\" );\n                        // Arena is abandoned. Destroy it.\n                        detach_arena( *a );\n                        lock.release();\n                        a->free_arena();\n                    }\n                }\n                return;\n            }\n        }\n#if __TBB_TASK_PRIORITY\n    }\n#endif /* __TBB_TASK_PRIORITY */\n}\n\nvoid market::try_destroy_arena ( market* m, arena* a, uintptr_t aba_epoch, bool master ) {\n    // Arena may have been orphaned. Or it may have been destroyed.\n    // Thus we cannot dereference the pointer to it until its liveness is verified.\n    // Arena is alive if it is found in the market's list.\n\n    if ( m != theMarket ) {\n        // The market has already been emptied.\n        return;\n    }\n    else if ( master ) {\n        // If this is a master thread, market can be destroyed at any moment.\n        // So protect it with an extra refcount.\n        global_market_mutex_type::scoped_lock lock(theMarketMutex);\n        if ( m != theMarket )\n            return;\n        ++m->my_ref_count;\n    }\n    m->try_destroy_arena( a, aba_epoch );\n    if ( master )\n        m->release();\n}\n\n/** This method must be invoked under my_arenas_list_mutex. **/\narena* market::arena_in_need ( arena_list_type &arenas, arena *&next ) {\n    if ( arenas.empty() )\n        return NULL;\n    arena_list_type::iterator it = next;\n    __TBB_ASSERT( it != arenas.end(), NULL );\n    do {\n        arena& a = *it;\n        if ( ++it == arenas.end() )\n            it = arenas.begin();\n        if ( a.num_workers_active() < a.my_num_workers_allotted ) {\n            a.my_references += 2; // add a worker\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n            ++a.my_num_workers_present;\n            ++my_priority_levels[a.my_top_priority].workers_present;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n            as_atomic(next) = &*it; // a subject for innocent data race under the reader lock\n            // TODO: rework global round robin policy to local or random to avoid this write\n            return &a;\n        }\n    } while ( it != next );\n    return NULL;\n}\n\nvoid market::update_allotment ( arena_list_type& arenas, int workers_demand, int max_workers ) {\n    __TBB_ASSERT( workers_demand, NULL );\n    max_workers = min(workers_demand, max_workers);\n    int carry = 0;\n#if TBB_USE_ASSERT\n    int assigned = 0;\n#endif /* TBB_USE_ASSERT */\n    arena_list_type::iterator it = arenas.begin();\n    for ( ; it != arenas.end(); ++it ) {\n        arena& a = *it;\n        if ( a.my_num_workers_requested <= 0 ) {\n            __TBB_ASSERT( !a.my_num_workers_allotted, NULL );\n            continue;\n        }\n        int tmp = a.my_num_workers_requested * max_workers + carry;\n        int allotted = tmp / workers_demand;\n        carry = tmp % workers_demand;\n        // a.my_num_workers_requested may temporarily exceed a.my_max_num_workers\n        a.my_num_workers_allotted = min( allotted, (int)a.my_max_num_workers );\n#if TBB_USE_ASSERT\n        assigned += a.my_num_workers_allotted;\n#endif /* TBB_USE_ASSERT */\n    }\n    __TBB_ASSERT( assigned <= workers_demand, NULL );\n}\n\n#if __TBB_TASK_PRIORITY\ninline void market::update_global_top_priority ( intptr_t newPriority ) {\n    GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.market_prio_switches );\n    my_global_top_priority = newPriority;\n    my_priority_levels[newPriority].workers_available = my_max_num_workers;\n    advance_global_reload_epoch();\n}\n\ninline void market::reset_global_priority () {\n    my_global_bottom_priority = normalized_normal_priority;\n    update_global_top_priority(normalized_normal_priority);\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    my_lowest_populated_level = normalized_normal_priority;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n}\n\narena* market::arena_in_need ( arena* prev_arena )\n{\n    if( !has_any_demand() )\n        return NULL;\n    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false);\n    assert_market_valid();\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    if ( prev_arena ) {\n        priority_level_info &pl = my_priority_levels[prev_arena->my_top_priority];\n        --prev_arena->my_num_workers_present;\n        --pl.workers_present;\n        if ( !--prev_arena->my_references && !prev_arena->my_num_workers_requested ) {\n            detach_arena( *a );\n            lock.release();\n            a->free_arena();\n            lock.acquire();\n        }\n    }\n#else\n    suppress_unused_warning(prev_arena);\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n    int p = my_global_top_priority;\n    arena *a = NULL;\n    do {\n        priority_level_info &pl = my_priority_levels[p];\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n        __TBB_ASSERT( p >= my_lowest_populated_level, NULL );\n        if ( pl.workers_present >= pl.workers_requested )\n            continue;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n        a = arena_in_need( pl.arenas, pl.next_arena );\n    } while ( !a && --p >= my_global_bottom_priority );\n    return a;\n}\n\nvoid market::update_allotment ( intptr_t highest_affected_priority ) {\n    intptr_t i = highest_affected_priority;\n    int available = my_priority_levels[i].workers_available;\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    my_lowest_populated_level = my_global_bottom_priority;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n    for ( ; i >= my_global_bottom_priority; --i ) {\n        priority_level_info &pl = my_priority_levels[i];\n        pl.workers_available = available;\n        if ( pl.workers_requested ) {\n            update_allotment( pl.arenas, pl.workers_requested, available );\n            available -= pl.workers_requested;\n            if ( available < 0 ) {\n                available = 0;\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n                my_lowest_populated_level = i;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n                break;\n            }\n        }\n    }\n    __TBB_ASSERT( i <= my_global_bottom_priority || !available, NULL );\n    for ( --i; i >= my_global_bottom_priority; --i ) {\n        priority_level_info &pl = my_priority_levels[i];\n        pl.workers_available = 0;\n        arena_list_type::iterator it = pl.arenas.begin();\n        for ( ; it != pl.arenas.end(); ++it ) {\n            __TBB_ASSERT( it->my_num_workers_requested || !it->my_num_workers_allotted, NULL );\n            it->my_num_workers_allotted = 0;\n        }\n    }\n}\n#endif /* __TBB_TASK_PRIORITY */\n\nvoid market::adjust_demand ( arena& a, int delta ) {\n    __TBB_ASSERT( theMarket, \"market instance was destroyed prematurely?\" );\n    if ( !delta )\n        return;\n    my_arenas_list_mutex.lock();\n    int prev_req = a.my_num_workers_requested;\n    a.my_num_workers_requested += delta;\n    if ( a.my_num_workers_requested <= 0 ) {\n        a.my_num_workers_allotted = 0;\n        if ( prev_req <= 0 ) {\n            my_arenas_list_mutex.unlock();\n            return;\n        }\n        delta = -prev_req;\n    }\n#if __TBB_TASK_ARENA\n    else if ( prev_req < 0 ) {\n        delta = a.my_num_workers_requested;\n    }\n#else  /* __TBB_TASK_ARENA */\n    __TBB_ASSERT( prev_req >= 0, \"Part-size request to RML?\" );\n#endif /* __TBB_TASK_ARENA */\n#if __TBB_TASK_PRIORITY\n    intptr_t p = a.my_top_priority;\n    priority_level_info &pl = my_priority_levels[p];\n    pl.workers_requested += delta;\n    __TBB_ASSERT( pl.workers_requested >= 0, NULL );\n#if !__TBB_TASK_ARENA\n    __TBB_ASSERT( a.my_num_workers_requested >= 0, NULL );\n#else\n    //TODO: understand the assertion and modify\n#endif\n    if ( a.my_num_workers_requested <= 0 ) {\n        if ( a.my_top_priority != normalized_normal_priority ) {\n            GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_resets );\n            update_arena_top_priority( a, normalized_normal_priority );\n        }\n        a.my_bottom_priority = normalized_normal_priority;\n    }\n    if ( p == my_global_top_priority ) {\n        if ( !pl.workers_requested ) {\n            while ( --p >= my_global_bottom_priority && !my_priority_levels[p].workers_requested )\n                continue;\n            if ( p < my_global_bottom_priority )\n                reset_global_priority();\n            else\n                update_global_top_priority(p);\n        }\n        update_allotment( my_global_top_priority );\n    }\n    else if ( p > my_global_top_priority ) {\n#if !__TBB_TASK_ARENA\n        __TBB_ASSERT( pl.workers_requested > 0, NULL );\n#else\n        //TODO: understand the assertion and modify\n#endif\n        update_global_top_priority(p);\n        a.my_num_workers_allotted = min( (int)my_max_num_workers, a.my_num_workers_requested );\n        my_priority_levels[p - 1].workers_available = my_max_num_workers - a.my_num_workers_allotted;\n        update_allotment( p - 1 );\n    }\n    else if ( p == my_global_bottom_priority ) {\n        if ( !pl.workers_requested ) {\n            while ( ++p <= my_global_top_priority && !my_priority_levels[p].workers_requested )\n                continue;\n            if ( p > my_global_top_priority )\n                reset_global_priority();\n            else {\n                my_global_bottom_priority = p;\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n                my_lowest_populated_level = max( my_lowest_populated_level, p );\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n            }\n        }\n        else\n            update_allotment( p );\n    }\n    else if ( p < my_global_bottom_priority ) {\n        __TBB_ASSERT( a.my_num_workers_requested > 0, NULL );\n        int prev_bottom = my_global_bottom_priority;\n        my_global_bottom_priority = p;\n        update_allotment( prev_bottom );\n    }\n    else {\n        __TBB_ASSERT( my_global_bottom_priority < p && p < my_global_top_priority, NULL );\n        update_allotment( p );\n    }\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested<=0, NULL );\n    assert_market_valid();\n#else /* !__TBB_TASK_PRIORITY */\n    my_total_demand += delta;\n    update_allotment();\n#endif /* !__TBB_TASK_PRIORITY */\n    my_arenas_list_mutex.unlock();\n    // Must be called outside of any locks\n    my_server->adjust_job_count_estimate( delta );\n    GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 );\n}\n\nvoid market::process( job& j ) {\n    generic_scheduler& s = static_cast<generic_scheduler&>(j);\n    arena *a = NULL;\n    __TBB_ASSERT( governor::is_set(&s), NULL );\n#if !__TBB_SLEEP_PERMISSION\n    while ( (a = arena_in_need(a)) )\n        a->process(s);\n#else//__TBB_SLEEP_PERMISSION\n    enum {\n        query_interval = 1000,\n        first_interval = 1,\n        pause_time = 100 // similar to PauseTime used for the stealing loop\n    };\n    for(int i = first_interval; ; i--) {\n        while ( (a = arena_in_need(a)) )\n        {\n            a->process(s);\n            i = first_interval;\n        }\n        if( i == 0 ) {\n#if __TBB_TASK_PRIORITY\n            arena_list_type &al = my_priority_levels[my_global_top_priority].arenas;\n#else /* __TBB_TASK_PRIORITY */\n            arena_list_type &al = my_arenas;\n#endif /* __TBB_TASK_PRIORITY */\n            if( al.empty() ) // races if any are innocent TODO: replace by an RML query interface\n                break; // no arenas left, perhaps going to shut down\n            if( the_global_observer_list.ask_permission_to_leave() )\n                break; // go sleep\n            __TBB_Yield();\n            i = query_interval;\n        } else __TBB_Pause(pause_time);\n    }\n#endif//__TBB_SLEEP_PERMISSION\n    GATHER_STATISTIC( ++s.my_counters.market_roundtrips );\n}\n\nvoid market::cleanup( job& j ) {\n    __TBB_ASSERT( theMarket != this, NULL );\n    generic_scheduler& s = static_cast<generic_scheduler&>(j);\n    generic_scheduler* mine = governor::local_scheduler_if_initialized();\n    __TBB_ASSERT( !mine || mine->my_arena_index!=0, NULL );\n    if( mine!=&s ) {\n        governor::assume_scheduler( &s );\n        generic_scheduler::cleanup_worker( &s, mine!=NULL );\n        governor::assume_scheduler( mine );\n    } else {\n        generic_scheduler::cleanup_worker( &s, true );\n    }\n}\n\nvoid market::acknowledge_close_connection() {\n    destroy();\n}\n\n::rml::job* market::create_one_job() {\n    unsigned index = ++my_num_workers;\n    __TBB_ASSERT( index > 0, NULL );\n    ITT_THREAD_SET_NAME(_T(\"TBB Worker Thread\"));\n    // index serves as a hint decreasing conflicts between workers when they migrate between arenas\n    generic_scheduler* s = generic_scheduler::create_worker( *this, index );\n#if __TBB_TASK_GROUP_CONTEXT\n    __TBB_ASSERT( index <= my_max_num_workers, NULL );\n    __TBB_ASSERT( !my_workers[index - 1], NULL );\n    my_workers[index - 1] = s;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    governor::sign_on(s);\n    return s;\n}\n\n#if __TBB_TASK_PRIORITY\nvoid market::update_arena_top_priority ( arena& a, intptr_t new_priority ) {\n    GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_switches );\n    __TBB_ASSERT( a.my_top_priority != new_priority, NULL );\n    priority_level_info &prev_level = my_priority_levels[a.my_top_priority],\n                        &new_level = my_priority_levels[new_priority];\n    remove_arena_from_list(a);\n    a.my_top_priority = new_priority;\n    insert_arena_into_list(a);\n    ++a.my_reload_epoch; // TODO: synch with global reload epoch in order to optimize usage of local reload epoch\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    // Arena's my_num_workers_present may remain positive for some time after its\n    // my_num_workers_requested becomes zero. Thus the following two lines are\n    // executed unconditionally.\n    prev_level.workers_present -= a.my_num_workers_present;\n    new_level.workers_present += a.my_num_workers_present;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n    prev_level.workers_requested -= a.my_num_workers_requested;\n    new_level.workers_requested += a.my_num_workers_requested;\n    __TBB_ASSERT( prev_level.workers_requested >= 0 && new_level.workers_requested >= 0, NULL );\n}\n\nbool market::lower_arena_priority ( arena& a, intptr_t new_priority, uintptr_t old_reload_epoch ) {\n    // TODO: replace the lock with a try_lock loop which performs a double check of the epoch\n    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex);\n    if ( a.my_reload_epoch != old_reload_epoch ) {\n        assert_market_valid();\n        return false;\n    }\n    __TBB_ASSERT( a.my_top_priority > new_priority, NULL );\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );\n\n    intptr_t p = a.my_top_priority;\n    update_arena_top_priority( a, new_priority );\n    if ( a.my_num_workers_requested > 0 ) {\n        if ( my_global_bottom_priority > new_priority ) {\n            my_global_bottom_priority = new_priority;\n        }\n        if ( p == my_global_top_priority && !my_priority_levels[p].workers_requested ) {\n            // Global top level became empty\n            for ( --p; !my_priority_levels[p].workers_requested; --p ) continue;\n            __TBB_ASSERT( p >= my_global_bottom_priority, NULL );\n            update_global_top_priority(p);\n        }\n        update_allotment( p );\n    }\n\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );\n    assert_market_valid();\n    return true;\n}\n\nbool market::update_arena_priority ( arena& a, intptr_t new_priority ) {\n    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex);\n\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested <= 0, NULL );\n    assert_market_valid();\n    if ( a.my_top_priority == new_priority ) {\n        return false;\n    }\n    else if ( a.my_top_priority > new_priority ) {\n        if ( a.my_bottom_priority > new_priority )\n            a.my_bottom_priority = new_priority;\n        return false;\n    }\n    else if ( a.my_num_workers_requested <= 0 ) {\n        return false;\n    }\n\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );\n\n    intptr_t p = a.my_top_priority;\n    intptr_t highest_affected_level = max(p, new_priority);\n    update_arena_top_priority( a, new_priority );\n\n    if ( my_global_top_priority < new_priority ) {\n        update_global_top_priority(new_priority);\n    }\n    else if ( my_global_top_priority == new_priority ) {\n        advance_global_reload_epoch();\n    }\n    else {\n        __TBB_ASSERT( new_priority < my_global_top_priority, NULL );\n        __TBB_ASSERT( new_priority > my_global_bottom_priority, NULL );\n        if ( p == my_global_top_priority && !my_priority_levels[p].workers_requested ) {\n            // Global top level became empty\n            __TBB_ASSERT( my_global_bottom_priority < p, NULL );\n            for ( --p; !my_priority_levels[p].workers_requested; --p ) continue;\n            __TBB_ASSERT( p >= new_priority, NULL );\n            update_global_top_priority(p);\n            highest_affected_level = p;\n        }\n    }\n    if ( p == my_global_bottom_priority ) {\n        // Arena priority was increased from the global bottom level.\n        __TBB_ASSERT( p < new_priority, NULL );                     // n\n        __TBB_ASSERT( new_priority <= my_global_top_priority, NULL );\n        while ( !my_priority_levels[my_global_bottom_priority].workers_requested )\n            ++my_global_bottom_priority;\n        __TBB_ASSERT( my_global_bottom_priority <= new_priority, NULL );\n        __TBB_ASSERT( my_priority_levels[my_global_bottom_priority].workers_requested > 0, NULL );\n    }\n    update_allotment( highest_affected_level );\n\n    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );\n    assert_market_valid();\n    return true;\n}\n#endif /* __TBB_TASK_PRIORITY */\n\n#if __TBB_COUNT_TASK_NODES \nintptr_t market::workers_task_node_count() {\n    intptr_t result = 0;\n    ForEachArena(a) {\n        result += a.workers_task_node_count();\n    } EndForEach();\n    return result;\n}\n#endif /* __TBB_COUNT_TASK_NODES */\n\n} // namespace internal\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/market.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_market_H\n#define _TBB_market_H\n\n#include \"tbb/tbb_stddef.h\"\n\n#include \"scheduler_common.h\"\n#include \"tbb/atomic.h\"\n#include \"tbb/spin_rw_mutex.h\"\n#include \"../rml/include/rml_tbb.h\"\n\n#include \"intrusive_list.h\"\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (push)\n    #pragma warning (disable: 4244)\n#endif\n\nnamespace tbb {\n\nclass task_group_context;\n\nnamespace internal {\n\n//------------------------------------------------------------------------\n// Class market\n//------------------------------------------------------------------------\n\nclass market : no_copy, rml::tbb_client {\n    friend class generic_scheduler;\n    friend class arena;\n    template<typename SchedulerTraits> friend class custom_scheduler;\n    friend class tbb::task_group_context;\nprivate:\n    friend void ITT_DoUnsafeOneTimeInitialization ();\n\n    typedef intrusive_list<arena> arena_list_type;\n\n    //! Currently active global market\n    static market* theMarket;\n\n    typedef scheduler_mutex_type global_market_mutex_type;\n\n    //! Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation\n    static global_market_mutex_type  theMarketMutex;\n\n    //! Reference count controlling market object lifetime\n    intptr_t my_ref_count;\n\n    //! Lightweight mutex guarding accounting operations with arenas list\n    typedef spin_rw_mutex arenas_list_mutex_type;\n    arenas_list_mutex_type my_arenas_list_mutex;\n\n    //! Pointer to the RML server object that services this TBB instance.\n    rml::tbb_server* my_server;\n\n    //! Stack size of worker threads\n    size_t my_stack_size;\n\n    //! Number of workers requested from the underlying resource manager\n    unsigned my_max_num_workers;\n\n    //! Number of workers that have been delivered by RML\n    /** Used to assign indices to the new workers coming from RML, and busy part\n        of my_workers array. **/\n    atomic<unsigned> my_num_workers;\n\n#if __TBB_TASK_PRIORITY\n    //! Highest priority among active arenas in the market.\n    /** Arena priority level is its tasks highest priority (specified by arena's\n        my_top_priority member).\n        Arena is active when it has outstanding request for workers. Note that \n        inactive arena may have workers lingering there for some time. **/\n    intptr_t my_global_top_priority;\n\n    //! Lowest priority among active arenas in the market.\n    /** See also my_global_top_priority **/\n    intptr_t my_global_bottom_priority;\n\n    //! Tracks events that may bring tasks in offload areas to the top priority level.\n    /** Incremented when global top priority is decremented or a task group priority\n        is elevated to the current top level. **/\n    uintptr_t my_global_reload_epoch;\n\n    //! Information about arenas at a particular priority level\n    struct priority_level_info {\n        //! List of arenas at this priority level\n        arena_list_type arenas;\n\n        //! The first arena to be checked when idle worker seeks for an arena to enter\n        /** The check happens in round-robin fashion. **/\n        arena *next_arena;\n\n        //! Total amount of workers requested by arenas at this priority level.\n        int workers_requested;\n\n        //! Maximal amount of workers the market can tell off to this priority level.\n        int workers_available;\n\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n        //! Total amount of workers that are in arenas at this priority level.\n        int workers_present;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n    }; // struct priority_level_info\n\n    //! Information about arenas at different priority levels\n    priority_level_info my_priority_levels[num_priority_levels];\n\n#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION\n    //! Lowest priority level having workers available.\n    intptr_t my_lowest_populated_level;\n#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */\n\n#else /* !__TBB_TASK_PRIORITY */\n\n    //! List of registered arenas\n    arena_list_type my_arenas;\n\n    //! The first arena to be checked when idle worker seeks for an arena to enter\n    /** The check happens in round-robin fashion. **/\n    arena *my_next_arena;\n\n    //! Number of workers that were requested by all arenas\n    int my_total_demand;\n#endif /* !__TBB_TASK_PRIORITY */\n\n    //! ABA prevention marker to assign to newly created arenas\n    uintptr_t my_arenas_aba_epoch;\n\n#if __TBB_COUNT_TASK_NODES\n    //! Net number of nodes that have been allocated from heap.\n    /** Updated each time a scheduler or arena is destroyed. */\n    atomic<intptr_t> my_task_node_count;\n#endif /* __TBB_COUNT_TASK_NODES */\n\n    //! Constructor\n    market ( unsigned max_num_workers, size_t stack_size );\n\n    //! Factory method creating new market object\n    static market& global_market ( unsigned max_num_workers, size_t stack_size );\n\n    //! Destroys and deallocates market object created by market::create()\n    void destroy ();\n\n    void try_destroy_arena ( arena*, uintptr_t aba_epoch );\n\n#if __TBB_TASK_PRIORITY\n    //! Returns next arena that needs more workers, or NULL.\n    arena* arena_in_need ( arena* prev_arena );\n\n    //! Recalculates the number of workers assigned to each arena at and below the specified priority.\n    /** The actual number of workers servicing a particular arena may temporarily \n        deviate from the calculated value. **/\n    void update_allotment ( intptr_t highest_affected_priority );\n\n    //! Changes arena's top priority and updates affected priority levels info in the market.\n    void update_arena_top_priority ( arena& a, intptr_t newPriority );\n\n    //! Changes market's global top priority and related settings.\n    inline void update_global_top_priority ( intptr_t newPriority );\n\n    //! Resets empty market's global top and bottom priority to the normal level.\n    inline void reset_global_priority ();\n\n    inline void advance_global_reload_epoch () {\n        __TBB_store_with_release( my_global_reload_epoch, my_global_reload_epoch + 1 );\n    }\n\n    void assert_market_valid () const {\n        __TBB_ASSERT( (my_priority_levels[my_global_top_priority].workers_requested > 0\n                           && !my_priority_levels[my_global_top_priority].arenas.empty())\n                       || (my_global_top_priority == my_global_bottom_priority &&\n                           my_global_top_priority == normalized_normal_priority), NULL );\n    }\n\n    bool has_any_demand() const {\n        for(int p = 0; p < num_priority_levels; p++)\n            if( __TBB_load_with_acquire(my_priority_levels[p].workers_requested) > 0 ) // TODO: use as_atomic here and below\n                return true;\n        return false;\n    }\n\n#else /* !__TBB_TASK_PRIORITY */\n\n    //! Recalculates the number of workers assigned to each arena in the list.\n    /** The actual number of workers servicing a particular arena may temporarily \n        deviate from the calculated value. **/\n    void update_allotment () {\n        if ( my_total_demand )\n            update_allotment( my_arenas, my_total_demand, (int)my_max_num_workers );\n    }\n\n    //! Returns next arena that needs more workers, or NULL.\n    arena* arena_in_need (arena*) {\n        if(__TBB_load_with_acquire(my_total_demand) <= 0)\n            return NULL;\n        arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false);\n        return arena_in_need(my_arenas, my_next_arena);\n    }\n    void assert_market_valid () const {}\n#endif /* !__TBB_TASK_PRIORITY */\n\n    //! Returns number of masters doing computational (CPU-intensive) work\n    int num_active_masters () { return 1; }  // APM TODO: replace with a real mechanism\n\n\n    ////////////////////////////////////////////////////////////////////////////////\n    // Helpers to unify code branches dependent on priority feature presence\n\n    void insert_arena_into_list ( arena& a );\n\n    void remove_arena_from_list ( arena& a );\n\n    arena* arena_in_need ( arena_list_type &arenas, arena *&next );\n\n    static void update_allotment ( arena_list_type& arenas, int total_demand, int max_workers );\n\n\n    ////////////////////////////////////////////////////////////////////////////////\n    // Implementation of rml::tbb_client interface methods\n\n    /*override*/ version_type version () const { return 0; }\n\n    /*override*/ unsigned max_job_count () const { return my_max_num_workers; }\n\n    /*override*/ size_t min_stack_size () const { return worker_stack_size(); }\n\n    /*override*/ policy_type policy () const { return throughput; }\n\n    /*override*/ job* create_one_job ();\n\n    /*override*/ void cleanup( job& j );\n\n    /*override*/ void acknowledge_close_connection ();\n\n    /*override*/ void process( job& j );\n\npublic:\n    //! Creates an arena object\n    /** If necessary, also creates global market instance, and boosts its ref count.\n        Each call to create_arena() must be matched by the call to arena::free_arena(). **/\n    static arena& create_arena ( unsigned max_num_workers, size_t stack_size );\n\n    //! Removes the arena from the market's list\n    static void try_destroy_arena ( market*, arena*, uintptr_t aba_epoch, bool master );\n\n    //! Removes the arena from the market's list\n    void detach_arena ( arena& );\n\n    //! Decrements market's refcount and destroys it in the end\n    void release ();\n\n    //! Request that arena's need in workers should be adjusted.\n    /** Concurrent invocations are possible only on behalf of different arenas. **/\n    void adjust_demand ( arena&, int delta );\n\n    //! Guarantee that request_close_connection() is called by master, not some worker\n    /** Must be called before arena::on_thread_leaving() **/\n    void prepare_wait_workers() { ++my_ref_count; }\n\n    //! Wait workers termination\n    void wait_workers ();\n\n    //! Returns the requested stack size of worker threads.\n    size_t worker_stack_size () const { return my_stack_size; }\n\n#if _WIN32||_WIN64\n    //! register master with the resource manager\n    void register_master( ::rml::server::execution_resource_t& rsc_handle ) {\n        __TBB_ASSERT( my_server, \"RML server not defined?\" );\n        // the server may ignore registration and set master_exec_resource to NULL.\n        my_server->register_master( rsc_handle );\n    }\n\n    //! unregister master with the resource manager\n    void unregister_master( ::rml::server::execution_resource_t& rsc_handle ) const {\n        my_server->unregister_master( rsc_handle );\n    }\n#endif /* WIN */\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Finds all contexts affected by the state change and propagates the new state to them.\n    template <typename T>\n    bool propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#if __TBB_TASK_PRIORITY\n    //! Lowers arena's priority is not higher than newPriority \n    /** Returns true if arena priority was actually elevated. **/ \n    bool lower_arena_priority ( arena& a, intptr_t new_priority, uintptr_t old_reload_epoch );\n\n    //! Makes sure arena's priority is not lower than newPriority \n    /** Returns true if arena priority was elevated. Also updates arena's bottom\n        priority boundary if necessary.\n\n        This method is called whenever a user changes priority, because whether\n        it was hiked or sunk can be determined for sure only under the lock used\n        by this function. **/\n    bool update_arena_priority ( arena& a, intptr_t new_priority );\n#endif /* __TBB_TASK_PRIORITY */\n\n#if __TBB_COUNT_TASK_NODES\n    //! Returns the number of task objects \"living\" in worker threads\n    intptr_t workers_task_node_count();\n\n    //! Net number of nodes that have been allocated from heap.\n    /** Updated each time a scheduler or arena is destroyed. */\n    void update_task_node_count( intptr_t delta ) { my_task_node_count += delta; }\n#endif /* __TBB_COUNT_TASK_NODES */\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Array of pointers to the registered workers\n    /** Used by cancellation propagation mechanism.\n        Must be the last data member of the class market. **/\n    generic_scheduler* my_workers[1];\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n}; // class market\n\n#if __TBB_TASK_PRIORITY\n    #define BeginForEachArena(a)    \\\n        arenas_list_mutex_type::scoped_lock arena_list_lock(my_arenas_list_mutex);  \\\n        for ( intptr_t i = my_global_top_priority; i >= my_global_bottom_priority; --i ) {  \\\n            /*arenas_list_mutex_type::scoped_lock arena_list_lock(my_priority_levels[i].my_arenas_list_mutex);*/ \\\n            arena_list_type &arenas = my_priority_levels[i].arenas;\n#else /* !__TBB_TASK_PRIORITY */\n    #define BeginForEachArena(a)    \\\n        arena_list_type &arenas = my_arenas; {\n#endif /* !__TBB_TASK_PRIORITY */\n\n#define ForEachArena(a)     \\\n    BeginForEachArena(a)    \\\n        arena_list_type::iterator it = arenas.begin();  \\\n        for ( ; it != arenas.end(); ++it ) {            \\\n            arena &a = *it;\n\n#define EndForEach() }}\n\n\n} // namespace internal\n} // namespace tbb\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (pop)\n#endif // warning 4244 is back\n\n#endif /* _TBB_market_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/memory_pool.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_memory_pool_H\n#define __TBB_memory_pool_H\n\n#if !TBB_PREVIEW_MEMORY_POOL\n#error Set TBB_PREVIEW_MEMORY_POOL to include memory_pool.h\n#endif\n/** @file */\n\n#include \"scalable_allocator.h\"\n#include <new> // std::bad_alloc\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n#include <utility> // std::forward\n#endif\n\n#if __TBB_EXTRA_DEBUG\n#define __TBBMALLOC_ASSERT ASSERT\n#else\n#define __TBBMALLOC_ASSERT(a,b) ((void)0)\n#endif\n\nnamespace tbb {\nnamespace interface6 {\n//! @cond INTERNAL\nnamespace internal {\n\n//! Base of thread-safe pool allocator for variable-size requests\nclass pool_base : tbb::internal::no_copy {\n    // Pool interface is separate from standard allocator classes because it has\n    // to maintain internal state, no copy or assignment. Move and swap are possible.\npublic:\n    //! Reset pool to reuse its memory (free all objects at once)\n    void recycle() { rml::pool_reset(my_pool); }\n\n    //! The \"malloc\" analogue to allocate block of memory of size bytes\n    void *malloc(size_t size) { return rml::pool_malloc(my_pool, size); }\n\n    //! The \"free\" analogue to discard a previously allocated piece of memory.\n    void free(void* ptr) { rml::pool_free(my_pool, ptr); }\n\n    //! The \"realloc\" analogue complementing pool_malloc.\n    // Enables some low-level optimization possibilities\n    void *realloc(void* ptr, size_t size) {\n        return rml::pool_realloc(my_pool, ptr, size);\n    }\n\nprotected:\n    //! destroy pool - must be called in a child class\n    void destroy() { rml::pool_destroy(my_pool); }\n\n    rml::MemoryPool *my_pool;\n};\n\n} // namespace internal\n//! @endcond\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for erroneous \"unreferenced parameter\" warning in method destroy.\n    #pragma warning (push)\n    #pragma warning (disable: 4100)\n#endif\n\n//! Meets \"allocator\" requirements of ISO C++ Standard, Section 20.1.5\n/** @ingroup memory_allocation */\ntemplate<typename T, typename P = internal::pool_base>\nclass memory_pool_allocator {\nprotected:\n    typedef P pool_type;\n    pool_type *my_pool;\n    template<typename U, typename R>\n    friend class memory_pool_allocator;\n    template<typename V, typename U, typename R>\n    friend bool operator==( const memory_pool_allocator<V,R>& a, const memory_pool_allocator<U,R>& b);\n    template<typename V, typename U, typename R>\n    friend bool operator!=( const memory_pool_allocator<V,R>& a, const memory_pool_allocator<U,R>& b);\npublic:\n    typedef typename tbb::internal::allocator_type<T>::value_type value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n    template<typename U> struct rebind {\n        typedef memory_pool_allocator<U, P> other;\n    };\n\n    memory_pool_allocator(pool_type &pool) throw() : my_pool(&pool) {}\n    memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {}\n    template<typename U>\n    memory_pool_allocator(const memory_pool_allocator<U,P>& src) throw() : my_pool(src.my_pool) {}\n\n    pointer address(reference x) const { return &x; }\n    const_pointer address(const_reference x) const { return &x; }\n    \n    //! Allocate space for n objects.\n    pointer allocate( size_type n, const void* /*hint*/ = 0) {\n        return static_cast<pointer>( my_pool->malloc( n*sizeof(value_type) ) );\n    }\n    //! Free previously allocated block of memory.\n    void deallocate( pointer p, size_type ) {\n        my_pool->free(p);\n    }\n    //! Largest value for which method allocate might succeed.\n    size_type max_size() const throw() {\n        size_type max = static_cast<size_type>(-1) / sizeof (value_type);\n        return (max > 0 ? max : 1);\n    }\n    //! Copy-construct value at location pointed to by p.\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n    template<typename U, typename... Args>\n    void construct(U *p, Args&&... args)\n        { ::new((void *)p) U(std::forward<Args>(args)...); }\n#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));}\n#endif\n    void construct( pointer p, const value_type& value ) { ::new((void*)(p)) value_type(value); }\n#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n\n    //! Destroy value at location pointed to by p.\n    void destroy( pointer p ) { p->~value_type(); }\n\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4100 is back\n\n//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1\n/** @ingroup memory_allocation */\ntemplate<typename P> \nclass memory_pool_allocator<void, P> {\npublic:\n    typedef P pool_type;\n    typedef void* pointer;\n    typedef const void* const_pointer;\n    typedef void value_type;\n    template<typename U> struct rebind {\n        typedef memory_pool_allocator<U, P> other;\n    };\n\n    memory_pool_allocator( pool_type &pool) throw() : my_pool(&pool) {}\n    memory_pool_allocator( const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {}\n    template<typename U>\n    memory_pool_allocator(const memory_pool_allocator<U,P>& src) throw() : my_pool(src.my_pool) {}\n\nprotected:\n    pool_type *my_pool;\n    template<typename U, typename R>\n    friend class memory_pool_allocator;\n    template<typename V, typename U, typename R>\n    friend bool operator==( const memory_pool_allocator<V,R>& a, const memory_pool_allocator<U,R>& b);\n    template<typename V, typename U, typename R>\n    friend bool operator!=( const memory_pool_allocator<V,R>& a, const memory_pool_allocator<U,R>& b);\n};\n\ntemplate<typename T, typename U, typename P>\ninline bool operator==( const memory_pool_allocator<T,P>& a, const memory_pool_allocator<U,P>& b) {return a.my_pool==b.my_pool;}\n\ntemplate<typename T, typename U, typename P>\ninline bool operator!=( const memory_pool_allocator<T,P>& a, const memory_pool_allocator<U,P>& b) {return a.my_pool!=b.my_pool;}\n\n\n//! Thread-safe growable pool allocator for variable-size requests\ntemplate <typename Alloc>\nclass memory_pool : public internal::pool_base {\n    Alloc my_alloc; // TODO: base-class optimization\n    static void *allocate_request(intptr_t pool_id, size_t & bytes);\n    static int deallocate_request(intptr_t pool_id, void*, size_t raw_bytes);\n\npublic:\n    //! construct pool with underlying allocator\n    memory_pool(const Alloc &src = Alloc());\n\n    //! destroy pool\n    ~memory_pool() { destroy(); } // call the callbacks first and destroy my_alloc latter\n\n};\n\nclass fixed_pool : public internal::pool_base {\n    void *my_buffer;\n    size_t my_size;\n    inline static void *allocate_request(intptr_t pool_id, size_t & bytes);\n\npublic:\n    //! construct pool with underlying allocator\n    inline fixed_pool(void *buf, size_t size);\n    //! destroy pool\n    ~fixed_pool() { destroy(); }\n};\n\n//////////////// Implementation ///////////////\n\ntemplate <typename Alloc>\nmemory_pool<Alloc>::memory_pool(const Alloc &src) : my_alloc(src) {\n    rml::MemPoolPolicy args(allocate_request, deallocate_request,\n                            sizeof(typename Alloc::value_type));\n    rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool);\n    if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc());\n}\ntemplate <typename Alloc>\nvoid *memory_pool<Alloc>::allocate_request(intptr_t pool_id, size_t & bytes) {\n    memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_id);\n    const size_t unit_size = sizeof(typename Alloc::value_type);\n    __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL);\n    void *ptr;\n    __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); }\n    __TBB_CATCH(...) { return 0; }\n    return ptr;\n}\n#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED\n    // Workaround for erroneous \"unreachable code\" warning in the template below.\n    // Specific for VC++ 17-18 compiler\n    #pragma warning (push)\n    #pragma warning (disable: 4702)\n#endif\ntemplate <typename Alloc>\nint memory_pool<Alloc>::deallocate_request(intptr_t pool_id, void* raw_ptr, size_t raw_bytes) {\n    memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_id);\n    const size_t unit_size = sizeof(typename Alloc::value_type);\n    __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL);\n    self.my_alloc.deallocate( static_cast<typename Alloc::value_type*>(raw_ptr), raw_bytes/unit_size );\n    return 0;\n}\n#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED\n    #pragma warning (pop)\n#endif\ninline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_size(size) {\n    if( !buf || !size ) __TBB_THROW(std::bad_alloc());\n    rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true);\n    rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool);\n    if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc());\n}\ninline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) {\n    fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id);\n    __TBBMALLOC_ASSERT(0 != self.my_size, \"The buffer must not be used twice.\");\n    bytes = self.my_size;\n    self.my_size = 0; // remember that buffer has been used\n    return self.my_buffer;\n}\n\n} //namespace interface6\nusing interface6::memory_pool_allocator;\nusing interface6::memory_pool;\nusing interface6::fixed_pool;\n} //namespace tbb\n\n#undef __TBBMALLOC_ASSERT\n#endif// __TBB_memory_pool_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if _WIN32||_WIN64\n#include <errno.h> // EDEADLK\n#endif\n#include \"tbb/mutex.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\n    void mutex::scoped_lock::internal_acquire( mutex& m ) {\n\n#if _WIN32||_WIN64\n        switch( m.state ) {\n        case INITIALIZED: \n        case HELD:\n            EnterCriticalSection( &m.impl );\n            // If a thread comes here, and another thread holds the lock, it will block\n            // in EnterCriticalSection.  When it returns from EnterCriticalSection,\n            // m.state must be set to INITIALIZED.  If the same thread tries to acquire a lock it\n            // aleady holds, the lock is in HELD state, thus will cause throwing the exception.\n            if (m.state==HELD)\n                tbb::internal::handle_perror(EDEADLK,\"mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex\");\n            m.state = HELD;\n            break;\n        case DESTROYED:\n            __TBB_ASSERT(false,\"mutex::scoped_lock: mutex already destroyed\");\n            break;\n        default:\n            __TBB_ASSERT(false,\"mutex::scoped_lock: illegal mutex state\");\n            break;\n        }\n#else\n        int error_code = pthread_mutex_lock(&m.impl);\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"mutex::scoped_lock: pthread_mutex_lock failed\");\n#endif /* _WIN32||_WIN64 */\n        my_mutex = &m;\n    }\n\nvoid mutex::scoped_lock::internal_release() {\n    __TBB_ASSERT( my_mutex, \"mutex::scoped_lock: not holding a mutex\" );\n#if _WIN32||_WIN64    \n     switch( my_mutex->state ) {\n        case INITIALIZED: \n            __TBB_ASSERT(false,\"mutex::scoped_lock: try to release the lock without acquisition\");\n            break;\n        case HELD:\n            my_mutex->state = INITIALIZED;\n            LeaveCriticalSection(&my_mutex->impl);\n            break;\n        case DESTROYED: \n            __TBB_ASSERT(false,\"mutex::scoped_lock: mutex already destroyed\"); \n            break;\n        default: \n            __TBB_ASSERT(false,\"mutex::scoped_lock: illegal mutex state\");\n            break;\n    }\n#else\n     int error_code = pthread_mutex_unlock(&my_mutex->impl);\n     __TBB_ASSERT_EX(!error_code, \"mutex::scoped_lock: pthread_mutex_unlock failed\");\n#endif /* _WIN32||_WIN64 */\n     my_mutex = NULL;\n}\n\nbool mutex::scoped_lock::internal_try_acquire( mutex& m ) {\n#if _WIN32||_WIN64\n    switch( m.state ) {\n        case INITIALIZED: \n        case HELD:\n            break;\n        case DESTROYED: \n            __TBB_ASSERT(false,\"mutex::scoped_lock: mutex already destroyed\"); \n            break;\n        default: \n            __TBB_ASSERT(false,\"mutex::scoped_lock: illegal mutex state\");\n            break;\n    }\n#endif /* _WIN32||_WIN64 */\n\n    bool result;\n#if _WIN32||_WIN64\n    result = TryEnterCriticalSection(&m.impl)!=0;\n    if( result ) {\n        __TBB_ASSERT(m.state!=HELD, \"mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex\");\n        m.state = HELD;\n    }\n#else\n    result = pthread_mutex_trylock(&m.impl)==0;\n#endif /* _WIN32||_WIN64 */\n    if( result ) \n        my_mutex = &m;\n    return result;\n}\n\nvoid mutex::internal_construct() {\n#if _WIN32||_WIN64\n    InitializeCriticalSectionEx(&impl, 4000, 0);\n    state = INITIALIZED;  \n#else\n    int error_code = pthread_mutex_init(&impl,NULL);\n    if( error_code )\n        tbb::internal::handle_perror(error_code,\"mutex: pthread_mutex_init failed\");\n#endif /* _WIN32||_WIN64*/    \n    ITT_SYNC_CREATE(&impl, _T(\"tbb::mutex\"), _T(\"\"));\n}\n\nvoid mutex::internal_destroy() {\n#if _WIN32||_WIN64\n    switch( state ) {\n      case INITIALIZED:\n        DeleteCriticalSection(&impl);\n       break;\n      case DESTROYED: \n        __TBB_ASSERT(false,\"mutex: already destroyed\");\n        break;\n      default: \n        __TBB_ASSERT(false,\"mutex: illegal state for destruction\");\n        break;\n    }\n    state = DESTROYED;\n#else\n    int error_code = pthread_mutex_destroy(&impl); \n    __TBB_ASSERT_EX(!error_code,\"mutex: pthread_mutex_destroy failed\");\n#endif /* _WIN32||_WIN64 */\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_mutex_H\n#define __TBB_mutex_H\n\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#else\n#include <pthread.h>\n#endif /* _WIN32||_WIN64 */\n\n#include <new>\n#include \"aligned_space.h\"\n#include \"tbb_stddef.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n\n//! Wrapper around the platform's native reader-writer lock.\n/** For testing purposes only.\n    @ingroup synchronization */\nclass mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:\n    //! Construct unacquired mutex.\n    mutex() {\n#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS\n    internal_construct();\n#else\n  #if _WIN32||_WIN64\n        InitializeCriticalSectionEx(&impl, 4000, 0);\n  #else\n        int error_code = pthread_mutex_init(&impl,NULL);\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"mutex: pthread_mutex_init failed\");\n  #endif /* _WIN32||_WIN64*/\n#endif /* TBB_USE_ASSERT */\n    };\n\n    ~mutex() {\n#if TBB_USE_ASSERT\n        internal_destroy();\n#else\n  #if _WIN32||_WIN64\n        DeleteCriticalSection(&impl);\n  #else\n        pthread_mutex_destroy(&impl); \n\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    };\n\n    class scoped_lock;\n    friend class scoped_lock;\n\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    class scoped_lock : internal::no_copy {\n    public:\n        //! Construct lock that has not acquired a mutex. \n        scoped_lock() : my_mutex(NULL) {};\n\n        //! Acquire lock on given mutex.\n        scoped_lock( mutex& mutex ) {\n            acquire( mutex );\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if( my_mutex ) \n                release();\n        }\n\n        //! Acquire lock on given mutex.\n        void acquire( mutex& mutex ) {\n#if TBB_USE_ASSERT\n            internal_acquire(mutex);\n#else\n            mutex.lock();\n            my_mutex = &mutex;\n#endif /* TBB_USE_ASSERT */\n        }\n\n        //! Try acquire lock on given mutex.\n        bool try_acquire( mutex& mutex ) {\n#if TBB_USE_ASSERT\n            return internal_try_acquire (mutex);\n#else\n            bool result = mutex.try_lock();\n            if( result )\n                my_mutex = &mutex;\n            return result;\n#endif /* TBB_USE_ASSERT */\n        }\n\n        //! Release lock\n        void release() {\n#if TBB_USE_ASSERT\n            internal_release ();\n#else\n            my_mutex->unlock();\n            my_mutex = NULL;\n#endif /* TBB_USE_ASSERT */\n        }\n\n    private:\n        //! The pointer to the current mutex to work\n        mutex* my_mutex;\n\n        //! All checks from acquire using mutex.state were moved here\n        void __TBB_EXPORTED_METHOD internal_acquire( mutex& m );\n\n        //! All checks from try_acquire using mutex.state were moved here\n        bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m );\n\n        //! All checks from release using mutex.state were moved here\n        void __TBB_EXPORTED_METHOD internal_release();\n\n        friend class mutex;\n    };\n\n    // Mutex traits\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = false;\n\n    // ISO C++0x compatibility methods\n\n    //! Acquire lock\n    void lock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        new(tmp.begin()) scoped_lock(*this);\n#else\n  #if _WIN32||_WIN64\n        EnterCriticalSection(&impl);\n  #else\n        int error_code = pthread_mutex_lock(&impl);\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"mutex: pthread_mutex_lock failed\");\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Try acquiring lock (non-blocking)\n    /** Return true if lock acquired; false otherwise. */\n    bool try_lock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        scoped_lock& s = *tmp.begin();\n        s.my_mutex = NULL;\n        return s.internal_try_acquire(*this);\n#else\n  #if _WIN32||_WIN64\n        return TryEnterCriticalSection(&impl)!=0;\n  #else\n        return pthread_mutex_trylock(&impl)==0;\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Release lock\n    void unlock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        scoped_lock& s = *tmp.begin();\n        s.my_mutex = this;\n        s.internal_release();\n#else\n  #if _WIN32||_WIN64\n        LeaveCriticalSection(&impl);\n  #else\n        pthread_mutex_unlock(&impl);\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Return native_handle\n  #if _WIN32||_WIN64\n    typedef LPCRITICAL_SECTION native_handle_type;\n  #else\n    typedef pthread_mutex_t* native_handle_type;\n  #endif\n    native_handle_type native_handle() { return (native_handle_type) &impl; }\n\n    enum state_t {\n        INITIALIZED=0x1234,\n        DESTROYED=0x789A,\n        HELD=0x56CD\n    };\nprivate:\n#if _WIN32||_WIN64\n    CRITICAL_SECTION impl;    \n    enum state_t state;\n#else\n    pthread_mutex_t impl;\n#endif /* _WIN32||_WIN64 */\n\n    //! All checks from mutex constructor using mutex.state were moved here\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    //! All checks from mutex destructor using mutex.state were moved here\n    void __TBB_EXPORTED_METHOD internal_destroy();\n\n#if _WIN32||_WIN64\npublic:\n    //!  Set the internal state\n    void set_state( state_t to ) { state = to; }\n#endif\n};\n\n__TBB_DEFINE_PROFILING_SET_NAME(mutex)\n\n} // namespace tbb \n\n#endif /* __TBB_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/null_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_null_mutex_H\n#define __TBB_null_mutex_H\n\n#include \"tbb_stddef.h\"\n\nnamespace tbb {\n    \n//! A mutex which does nothing\n/** A null_mutex does no operation and simulates success.\n    @ingroup synchronization */\nclass null_mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:   \n    //! Represents acquisition of a mutex.\n    class scoped_lock : internal::no_copy {   \n    public:   \n        scoped_lock() {}\n        scoped_lock( null_mutex& ) {}   \n        ~scoped_lock() {}\n        void acquire( null_mutex& ) {}\n        bool try_acquire( null_mutex& ) { return true; }\n        void release() {}\n    };\n  \n    null_mutex() {}\n    \n    // Mutex traits   \n    static const bool is_rw_mutex = false;   \n    static const bool is_recursive_mutex = true;\n    static const bool is_fair_mutex = true;\n};  \n\n}\n\n#endif /* __TBB_null_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/null_rw_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_null_rw_mutex_H\n#define __TBB_null_rw_mutex_H\n\n#include \"tbb_stddef.h\"\n\nnamespace tbb {\n    \n//! A rw mutex which does nothing\n/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation.\n    @ingroup synchronization */\nclass null_rw_mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:   \n    //! Represents acquisition of a mutex.\n    class scoped_lock : internal::no_copy {   \n    public:   \n        scoped_lock() {}\n        scoped_lock( null_rw_mutex& , bool = true ) {}\n        ~scoped_lock() {}\n        void acquire( null_rw_mutex& , bool = true ) {}\n        bool upgrade_to_writer() { return true; }\n        bool downgrade_to_reader() { return true; }\n        bool try_acquire( null_rw_mutex& , bool = true ) { return true; }\n        void release() {}\n    };\n  \n    null_rw_mutex() {}\n    \n    // Mutex traits   \n    static const bool is_rw_mutex = true;   \n    static const bool is_recursive_mutex = true;\n    static const bool is_fair_mutex = true;\n};  \n\n}\n\n#endif /* __TBB_null_rw_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/observer_proxy.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n#if !__TBB_ARENA_OBSERVER\n    #error __TBB_ARENA_OBSERVER must be defined\n#endif\n\n#if __TBB_SCHEDULER_OBSERVER\n\n#include \"observer_proxy.h\"\n#include \"tbb_main.h\"\n#include \"governor.h\"\n#include \"scheduler.h\"\n#include \"arena.h\"\n\nnamespace tbb {\nnamespace internal {\n\npadded<observer_list> the_global_observer_list;\n\n#if TBB_USE_ASSERT\nstatic atomic<int> observer_proxy_count;\n\nstruct check_observer_proxy_count {\n    ~check_observer_proxy_count() {\n        if( observer_proxy_count!=0 ) {\n            runtime_warning( \"Leaked %ld observer_proxy objects\\n\", long(observer_proxy_count) );\n        }\n    }\n};\n\nstatic check_observer_proxy_count the_check_observer_proxy_count;\n#endif /* TBB_USE_ASSERT */\n\ninterface6::task_scheduler_observer* observer_proxy::get_v6_observer() {\n    if(my_version != 6) return NULL;\n    return static_cast<interface6::task_scheduler_observer*>(my_observer);\n}\n\nbool observer_proxy::is_global() {\n    return !get_v6_observer() || get_v6_observer()->my_context_tag == interface6::task_scheduler_observer::global_tag;\n}\n\nobserver_proxy::observer_proxy( task_scheduler_observer_v3& tso )\n    : my_list(NULL), my_next(NULL), my_prev(NULL), my_observer(&tso)\n{\n#if TBB_USE_ASSERT\n    ++observer_proxy_count;\n#endif /* TBB_USE_ASSERT */\n    // 1 for observer\n    my_ref_count = 1;\n    my_version = load<relaxed>(my_observer->my_busy_count)\n                 == interface6::task_scheduler_observer::v6_trait ? 6 : 0;\n    __TBB_ASSERT( my_version >= 6 || !load<relaxed>(my_observer->my_busy_count), NULL );\n}\n\n#if TBB_USE_ASSERT\nobserver_proxy::~observer_proxy () {\n    __TBB_ASSERT( !my_ref_count, \"Attempt to destroy proxy still in use\" );\n    poison_value(my_ref_count);\n    poison_pointer(my_prev);\n    poison_pointer(my_next);\n    --observer_proxy_count;\n}\n#endif /* TBB_USE_ASSERT */\n\ntemplate<memory_semantics M, class T, class V>\nT atomic_fetch_and_store ( T* addr, const V& val ) {\n    return (T)atomic_traits<sizeof(T), M>::fetch_and_store( addr, (T)val );\n}\n\nvoid observer_list::clear () {\n    __TBB_ASSERT( this != &the_global_observer_list, \"Method clear() cannot be used on the list of global observers\" );\n    // Though the method will work fine for the empty list, we require the caller\n    // to check for the list emptiness before invoking it to avoid extra overhead.\n    __TBB_ASSERT( !empty(), NULL );\n    {\n        scoped_lock lock(mutex(), /*is_writer=*/true);\n        observer_proxy *next = my_head;\n        while ( observer_proxy *p = next ) {\n            __TBB_ASSERT( p->my_version >= 6, NULL );\n            next = p->my_next;\n            // Both proxy p and observer p->my_observer (if non-null) are guaranteed\n            // to be alive while the list is locked.\n            task_scheduler_observer_v3 *obs = p->my_observer;\n            // Make sure that possible concurrent observer destruction does not\n            // conflict with the proxy list cleanup.\n            if ( !obs || !(p = (observer_proxy*)__TBB_FetchAndStoreW(&obs->my_proxy, 0)) )\n                continue;\n            // accessing 'obs' after detaching of obs->my_proxy leads to the race with observer destruction\n            __TBB_ASSERT( !next || p == next->my_prev, NULL );\n            __TBB_ASSERT( is_alive(p->my_ref_count), \"Observer's proxy died prematurely\" );\n            __TBB_ASSERT( p->my_ref_count == 1, \"Reference for observer is missing\" );\n#if TBB_USE_ASSERT\n            p->my_observer = NULL;\n            p->my_ref_count = 0;\n#endif /* TBB_USE_ASSERT */\n            remove(p);\n            delete p;\n        }\n    }\n    while( my_head )\n        __TBB_Yield();\n}\n\nvoid observer_list::insert ( observer_proxy* p ) {\n    scoped_lock lock(mutex(), /*is_writer=*/true);\n    if ( my_head ) {\n        p->my_prev = my_tail;\n        my_tail->my_next = p;\n    }\n    else\n        my_head = p;\n    my_tail = p;\n}\n\nvoid observer_list::remove ( observer_proxy* p ) {\n    __TBB_ASSERT( my_head, \"Attempt to remove an item from an empty list\" );\n    __TBB_ASSERT( !my_tail->my_next, \"Last item's my_next must be NULL\" );\n    if( p == my_tail ) {\n        __TBB_ASSERT( !p->my_next, NULL );\n        my_tail = p->my_prev;\n    }\n    else {\n        __TBB_ASSERT( p->my_next, NULL );\n        p->my_next->my_prev = p->my_prev;\n    }\n    if ( p == my_head ) {\n        __TBB_ASSERT( !p->my_prev, NULL );\n        my_head = p->my_next;\n    }\n    else {\n        __TBB_ASSERT( p->my_prev, NULL );\n        p->my_prev->my_next = p->my_next;\n    }\n    __TBB_ASSERT( (my_head && my_tail) || (!my_head && !my_tail), NULL );\n}\n\nvoid observer_list::remove_ref( observer_proxy* p ) {\n    int r = p->my_ref_count;\n    __TBB_ASSERT( is_alive(r), NULL );\n    while(r>1) {\n        __TBB_ASSERT( r!=0, NULL );\n        int r_old = p->my_ref_count.compare_and_swap(r-1,r);\n        if( r_old==r ) {\n            // Successfully decremented count.\n            return;\n        }\n        r = r_old;\n    }\n    __TBB_ASSERT( r==1, NULL );\n    // Reference count might go to zero\n    {\n        // Use lock to avoid resurrection by a thread concurrently walking the list\n        observer_list::scoped_lock lock(mutex(), /*is_writer=*/true);\n        r = --p->my_ref_count;\n        if( !r )\n            remove(p);\n    }\n    __TBB_ASSERT( r || !p->my_ref_count, NULL );\n    if( !r )\n        delete p;\n}\n\nvoid observer_list::do_notify_entry_observers( observer_proxy*& last, bool worker ) {\n    // Pointer p marches though the list from last (exclusively) to the end.\n    observer_proxy *p = last, *prev = p;\n    for(;;) {\n        task_scheduler_observer_v3* tso=NULL;\n        // Hold lock on list only long enough to advance to the next proxy in the list.\n        {\n            scoped_lock lock(mutex(), /*is_writer=*/false);\n            do {\n                if( p ) {\n                    // We were already processing the list.\n                    if( observer_proxy* q = p->my_next ) {\n                        if( p == prev )\n                            remove_ref_fast(prev); // sets prev to NULL if successful\n                        p = q;\n                    }\n                    else {\n                        // Reached the end of the list.\n                        if( p == prev ) {\n                            // Keep the reference as we store the 'last' pointer in scheduler\n                            __TBB_ASSERT(p->my_ref_count >= 1 + (p->my_observer?1:0), NULL);\n                        } else {\n                            // The last few proxies were empty\n                            __TBB_ASSERT(p->my_ref_count, NULL);\n                            ++p->my_ref_count;\n                            if( prev ) {\n                                lock.release();\n                                remove_ref(prev);\n                            }\n                        }\n                        last = p;\n                        return;\n                    }\n                } else {\n                    // Starting pass through the list\n                    p = my_head;\n                    if( !p )\n                        return;\n                }\n                tso = p->my_observer;\n            } while( !tso );\n            ++p->my_ref_count;\n            ++tso->my_busy_count;\n        }\n        __TBB_ASSERT( !prev || p!=prev, NULL );\n        // Release the proxy pinned before p\n        if( prev )\n            remove_ref(prev);\n        // Do not hold any locks on the list while calling user's code.\n        // Do not intercept any exceptions that may escape the callback so that\n        // they are either handled by the TBB scheduler or passed to the debugger.\n        tso->on_scheduler_entry(worker);\n        __TBB_ASSERT(p->my_ref_count, NULL);\n        intptr_t bc = --tso->my_busy_count;\n        __TBB_ASSERT_EX( bc>=0, \"my_busy_count underflowed\" );\n        prev = p;\n    }\n}\n\nvoid observer_list::do_notify_exit_observers( observer_proxy* last, bool worker ) {\n    // Pointer p marches though the list from the beginning to last (inclusively).\n    observer_proxy *p = NULL, *prev = NULL;\n    for(;;) {\n        task_scheduler_observer_v3* tso=NULL;\n        // Hold lock on list only long enough to advance to the next proxy in the list.\n        {\n            scoped_lock lock(mutex(), /*is_writer=*/false);\n            do {\n                if( p ) {\n                    // We were already processing the list.\n                    if( p != last ) {\n                        __TBB_ASSERT( p->my_next, \"List items before 'last' must have valid my_next pointer\" );\n                        if( p == prev )\n                            remove_ref_fast(prev); // sets prev to NULL if successful\n                        p = p->my_next;\n                    } else {\n                        // remove the reference from the last item\n                        remove_ref_fast(p);\n                        if( p ) {\n                            lock.release();\n                            remove_ref(p);\n                        }\n                        return;\n                    }\n                } else {\n                    // Starting pass through the list\n                    p = my_head;\n                    __TBB_ASSERT( p, \"Nonzero 'last' must guarantee that the global list is non-empty\" );\n                }\n                tso = p->my_observer;\n            } while( !tso );\n            // The item is already refcounted\n            if ( p != last ) // the last is already referenced since entry notification\n                ++p->my_ref_count;\n            ++tso->my_busy_count;\n        }\n        __TBB_ASSERT( !prev || p!=prev, NULL );\n        if( prev )\n            remove_ref(prev);\n        // Do not hold any locks on the list while calling user's code.\n        // Do not intercept any exceptions that may escape the callback so that\n        // they are either handled by the TBB scheduler or passed to the debugger.\n        tso->on_scheduler_exit(worker);\n        __TBB_ASSERT(p->my_ref_count || p == last, NULL);\n        intptr_t bc = --tso->my_busy_count;\n        __TBB_ASSERT_EX( bc>=0, \"my_busy_count underflowed\" );\n        prev = p;\n    }\n}\n\n#if __TBB_SLEEP_PERMISSION\nbool observer_list::ask_permission_to_leave() {\n    __TBB_ASSERT( this == &the_global_observer_list, \"This method cannot be used on lists of arena observers\" );\n    if( !my_head ) return true;\n    // Pointer p marches though the list\n    observer_proxy *p = NULL, *prev = NULL;\n    bool result = true;\n    while( result ) {\n        task_scheduler_observer* tso = NULL;\n        // Hold lock on list only long enough to advance to the next proxy in the list.\n        {\n            scoped_lock lock(mutex(), /*is_writer=*/false);\n            do {\n                if( p ) {\n                    // We were already processing the list.\n                    observer_proxy* q = p->my_next;\n                    // read next, remove the previous reference\n                    if( p == prev )\n                        remove_ref_fast(prev); // sets prev to NULL if successful\n                    if( q ) p = q;\n                    else {\n                        // Reached the end of the list.\n                        if( prev ) {\n                            lock.release();\n                            remove_ref(prev);\n                        }\n                        return result;\n                    }\n                } else {\n                    // Starting pass through the list\n                    p = my_head;\n                    if( !p )\n                        return result;\n                }\n                tso = p->get_v6_observer();\n            } while( !tso );\n            ++p->my_ref_count;\n            ++tso->my_busy_count;\n        }\n        __TBB_ASSERT( !prev || p!=prev, NULL );\n        // Release the proxy pinned before p\n        if( prev )\n            remove_ref(prev);\n        // Do not hold any locks on the list while calling user's code.\n        // Do not intercept any exceptions that may escape the callback so that\n        // they are either handled by the TBB scheduler or passed to the debugger.\n        result = tso->may_sleep();\n        __TBB_ASSERT(p->my_ref_count, NULL);\n        intptr_t bc = --tso->my_busy_count;\n        __TBB_ASSERT_EX( bc>=0, \"my_busy_count underflowed\" );\n        prev = p;\n    }\n    if( prev )\n        remove_ref(prev);\n    return result;\n}\n#endif//__TBB_SLEEP_PERMISSION\n\nvoid task_scheduler_observer_v3::observe( bool enable ) {\n    if( enable ) {\n        if( !my_proxy ) {\n            my_proxy = new observer_proxy( *this );\n            my_busy_count = 0; // proxy stores versioning information, clear it\n            if ( !my_proxy->is_global() ) {\n                // Local observer activation\n                generic_scheduler* s = governor::local_scheduler_if_initialized();\n#if __TBB_TASK_ARENA\n                __TBB_ASSERT( my_proxy->get_v6_observer(), NULL );\n                intptr_t tag = my_proxy->get_v6_observer()->my_context_tag;\n                if( tag != interface6::task_scheduler_observer::implicit_tag ) { // explicit arena\n                    task_arena *a = reinterpret_cast<task_arena*>(tag);\n                    a->initialize();\n                    my_proxy->my_list = &a->my_arena->my_observers;\n                } else\n#endif\n                {\n                    if( !s ) s = governor::init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true );\n                    __TBB_ASSERT( __TBB_InitOnce::initialization_done(), NULL );\n                    __TBB_ASSERT( s && s->my_arena, NULL );\n                    my_proxy->my_list = &s->my_arena->my_observers;\n                }\n                my_proxy->my_list->insert(my_proxy);\n                // Notify newly activated observer and other pending ones if it belongs to current arena\n                if(s && &s->my_arena->my_observers == my_proxy->my_list )\n                    my_proxy->my_list->notify_entry_observers( s->my_last_local_observer, s->is_worker() );\n            } else {\n                // Obsolete. Global observer activation\n                if( !__TBB_InitOnce::initialization_done() )\n                    DoOneTimeInitializations();\n                my_proxy->my_list = &the_global_observer_list;\n                my_proxy->my_list->insert(my_proxy);\n                if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) {\n                    // Notify newly created observer of its own thread.\n                    // Any other pending observers are notified too.\n                    the_global_observer_list.notify_entry_observers( s->my_last_global_observer, s->is_worker() );\n                }\n            }\n        }\n    } else {\n        // Make sure that possible concurrent proxy list cleanup does not conflict\n        // with the observer destruction here.\n        if ( observer_proxy* proxy = (observer_proxy*)__TBB_FetchAndStoreW(&my_proxy, 0) ) {\n            // List destruction should not touch this proxy after we've won the above interlocked exchange.\n            __TBB_ASSERT( proxy->my_observer == this, NULL );\n            __TBB_ASSERT( is_alive(proxy->my_ref_count), \"Observer's proxy died prematurely\" );\n            __TBB_ASSERT( proxy->my_ref_count >= 1, \"reference for observer missing\" );\n            observer_list &list = *proxy->my_list;\n            {\n                // Ensure that none of the list walkers relies on observer pointer validity\n                observer_list::scoped_lock lock(list.mutex(), /*is_writer=*/true);\n                proxy->my_observer = NULL;\n                // Proxy may still be held by other threads (to track the last notified observer)\n                if( !--proxy->my_ref_count ) {// nobody can increase it under exclusive lock\n                    list.remove(proxy);\n                    __TBB_ASSERT( !proxy->my_ref_count, NULL );\n                    delete proxy;\n                }\n            }\n            while( my_busy_count ) // other threads are still accessing the callback\n                __TBB_Yield();\n        }\n    }\n}\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_SCHEDULER_OBSERVER */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/observer_proxy.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_observer_proxy_H\n#define _TBB_observer_proxy_H\n\n#if __TBB_SCHEDULER_OBSERVER\n\n#include \"scheduler_common.h\" // to include task.h\n#include \"tbb/task_scheduler_observer.h\"\n#include \"tbb/spin_rw_mutex.h\"\n#include \"tbb/aligned_space.h\"\n\nnamespace tbb {\nnamespace internal {\n\nclass observer_list {\n    friend class arena;\n\n    // Mutex is wrapped with aligned_space to shut up warnings when its destructor\n    // is called while threads are still using it.\n    typedef aligned_space<spin_rw_mutex>  my_mutex_type;\n\n    //! Pointer to the head of this list.\n    observer_proxy* my_head;\n\n    //! Pointer to the tail of this list.\n    observer_proxy* my_tail;\n\n    //! Mutex protecting this list.\n    my_mutex_type my_mutex;\n\n    //! Back-pointer to the arena this list belongs to.\n    arena* my_arena;\n\n    //! Decrement refcount of the proxy p if there are other outstanding references.\n    /** In case of success sets p to NULL. Must be invoked from under the list lock. **/\n    inline static void remove_ref_fast( observer_proxy*& p );\n\n    //! Implements notify_entry_observers functionality.\n    void do_notify_entry_observers( observer_proxy*& last, bool worker );\n\n    //! Implements notify_exit_observers functionality.\n    void do_notify_exit_observers( observer_proxy* last, bool worker );\n\npublic:\n    observer_list () : my_head(NULL), my_tail(NULL) {}\n\n    //! Removes and destroys all observer proxies from the list.\n    /** Cannot be used concurrently with other methods. **/\n    void clear ();\n\n    //! Add observer proxy to the tail of the list.\n    void insert ( observer_proxy* p );\n\n    //! Remove observer proxy from the list.\n    void remove ( observer_proxy* p );\n\n    //! Decrement refcount of the proxy and destroy it if necessary.\n    /** When refcount reaches zero removes the proxy from the list and destructs it. **/\n    void remove_ref( observer_proxy* p );\n\n    //! Type of the scoped lock for the reader-writer mutex associated with the list.\n    typedef spin_rw_mutex::scoped_lock scoped_lock;\n\n    //! Accessor to the reader-writer mutex associated with the list.\n    spin_rw_mutex& mutex () { return my_mutex.begin()[0]; }\n\n    bool empty () const { return my_head == NULL; }\n\n    //! Call entry notifications on observers added after last was notified.\n    /** Updates last to become the last notified observer proxy (in the global list)\n        or leaves it to be NULL. The proxy has its refcount incremented. **/\n    inline void notify_entry_observers( observer_proxy*& last, bool worker );\n\n    //! Call exit notifications on last and observers added before it.\n    inline void notify_exit_observers( observer_proxy*& last, bool worker );\n\n    //! Call may_sleep callbacks to ask for permission for a worker thread to leave market\n    bool ask_permission_to_leave();\n}; // class observer_list\n\n//! Wrapper for an observer object\n/** To maintain shared lists of observers the scheduler first wraps each observer\n    object into a proxy so that a list item remained valid even after the corresponding\n    proxy object is destroyed by the user code. **/\nclass observer_proxy {\n    friend class task_scheduler_observer_v3;\n    friend class observer_list;\n    //! Reference count used for garbage collection.\n    /** 1 for reference from my task_scheduler_observer.\n        1 for each task dispatcher's last observer pointer. \n        No accounting for neighbors in the shared list. */\n    atomic<int> my_ref_count;\n    //! Reference to the list this observer belongs to.\n    observer_list* my_list;\n    //! Pointer to next observer in the list specified by my_head.\n    /** NULL for the last item in the list. **/\n    observer_proxy* my_next;\n    //! Pointer to the previous observer in the list specified by my_head.\n    /** For the head of the list points to the last item. **/\n    observer_proxy* my_prev;\n    //! Associated observer\n    task_scheduler_observer_v3* my_observer;\n    //! Version\n    char my_version;\n\n    interface6::task_scheduler_observer* get_v6_observer();\n    bool is_global(); //TODO: move them back inline when un-CPF'ing\n\n    //! Constructs proxy for the given observer and adds it to the specified list.\n    observer_proxy( task_scheduler_observer_v3& );\n\n#if TBB_USE_ASSERT\n    ~observer_proxy();\n#endif /* TBB_USE_ASSERT */\n\n    //! Shut up the warning\n    observer_proxy& operator = ( const observer_proxy& );\n}; // class observer_proxy\n\ninline void observer_list::remove_ref_fast( observer_proxy*& p ) {\n    if( p->my_observer ) {\n        // Can decrement refcount quickly, as it cannot drop to zero while under the lock.\n        int r = --p->my_ref_count;\n        __TBB_ASSERT_EX( r, NULL );\n        p = NULL;\n    } else {\n        // Use slow form of refcount decrementing, after the lock is released.\n    }\n}\n\ninline void observer_list::notify_entry_observers( observer_proxy*& last, bool worker ) {\n    if ( last == my_tail )\n        return;\n    do_notify_entry_observers( last, worker );\n}\n\ninline void observer_list::notify_exit_observers( observer_proxy*& last, bool worker ) {\n    if ( !last )\n        return;\n    __TBB_ASSERT(is_alive((uintptr_t)last), NULL);\n    do_notify_exit_observers( last, worker );\n    __TBB_ASSERT(last, NULL);\n    poison_value(last);\n}\n\nextern padded<observer_list> the_global_observer_list;\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#endif /* _TBB_observer_proxy_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_do.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_do_H\n#define __TBB_parallel_do_H\n\n#include \"internal/_range_iterator.h\"\n#include \"task.h\"\n#include \"aligned_space.h\"\n#include <iterator>\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n    template<typename Body, typename Item> class parallel_do_feeder_impl;\n    template<typename Body> class do_group_task;\n\n    //! Strips its template type argument from 'cv' and '&' qualifiers\n    template<typename T>\n    struct strip { typedef T type; };\n    template<typename T>\n    struct strip<T&> { typedef T type; };\n    template<typename T>\n    struct strip<const T&> { typedef T type; };\n    template<typename T>\n    struct strip<volatile T&> { typedef T type; };\n    template<typename T>\n    struct strip<const volatile T&> { typedef T type; };\n    // Most of the compilers remove cv-qualifiers from non-reference function argument types. \n    // But unfortunately there are those that don't.\n    template<typename T>\n    struct strip<const T> { typedef T type; };\n    template<typename T>\n    struct strip<volatile T> { typedef T type; };\n    template<typename T>\n    struct strip<const volatile T> { typedef T type; };\n} // namespace internal\n//! @endcond\n\n//! Class the user supplied algorithm body uses to add new tasks\n/** \\param Item Work item type **/\ntemplate<typename Item>\nclass parallel_do_feeder: internal::no_copy\n{\n    parallel_do_feeder() {}\n    virtual ~parallel_do_feeder () {}\n    virtual void internal_add( const Item& item ) = 0;\n    template<typename Body_, typename Item_> friend class internal::parallel_do_feeder_impl;\npublic:\n    //! Add a work item to a running parallel_do.\n    void add( const Item& item ) {internal_add(item);}\n};\n\n//! @cond INTERNAL\nnamespace internal {\n    //! For internal use only.\n    /** Selects one of the two possible forms of function call member operator.\n        @ingroup algorithms **/\n    template<class Body, typename Item>\n    class parallel_do_operator_selector\n    {\n        typedef parallel_do_feeder<Item> Feeder;\n        template<typename A1, typename A2, typename CvItem >\n        static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) {\n            obj(arg1);\n        }\n        template<typename A1, typename A2, typename CvItem >\n        static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder<Item>&) const ) {\n            obj(arg1, arg2);\n        }\n\n    public:\n        template<typename A1, typename A2 >\n        static void call( const Body& obj, A1& arg1, A2& arg2 )\n        {\n            internal_call( obj, arg1, arg2, &Body::operator() );\n        }\n    };\n\n    //! For internal use only.\n    /** Executes one iteration of a do.\n        @ingroup algorithms */\n    template<typename Body, typename Item>\n    class do_iteration_task: public task\n    {\n        typedef parallel_do_feeder_impl<Body, Item> feeder_type;\n\n        Item my_value;\n        feeder_type& my_feeder;\n\n        do_iteration_task( const Item& value, feeder_type& feeder ) : \n            my_value(value), my_feeder(feeder)\n        {}\n\n        /*override*/ \n        task* execute()\n        {\n            parallel_do_operator_selector<Body, Item>::call(*my_feeder.my_body, my_value, my_feeder);\n            return NULL;\n        }\n\n        template<typename Body_, typename Item_> friend class parallel_do_feeder_impl;\n    }; // class do_iteration_task\n\n    template<typename Iterator, typename Body, typename Item>\n    class do_iteration_task_iter: public task\n    {\n        typedef parallel_do_feeder_impl<Body, Item> feeder_type;\n\n        Iterator my_iter;\n        feeder_type& my_feeder;\n\n        do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : \n            my_iter(iter), my_feeder(feeder)\n        {}\n\n        /*override*/ \n        task* execute()\n        {\n            parallel_do_operator_selector<Body, Item>::call(*my_feeder.my_body, *my_iter, my_feeder);\n            return NULL;\n        }\n\n        template<typename Iterator_, typename Body_, typename Item_> friend class do_group_task_forward;    \n        template<typename Body_, typename Item_> friend class do_group_task_input;    \n        template<typename Iterator_, typename Body_, typename Item_> friend class do_task_iter;    \n    }; // class do_iteration_task_iter\n\n    //! For internal use only.\n    /** Implements new task adding procedure.\n        @ingroup algorithms **/\n    template<class Body, typename Item>\n    class parallel_do_feeder_impl : public parallel_do_feeder<Item>\n    {\n        /*override*/ \n        void internal_add( const Item& item )\n        {\n            typedef do_iteration_task<Body, Item> iteration_type;\n\n            iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this);\n\n            t.spawn( t );\n        }\n    public:\n        const Body* my_body;\n        empty_task* my_barrier;\n\n        parallel_do_feeder_impl()\n        {\n            my_barrier = new( task::allocate_root() ) empty_task();\n            __TBB_ASSERT(my_barrier, \"root task allocation failed\");\n        }\n\n#if __TBB_TASK_GROUP_CONTEXT\n        parallel_do_feeder_impl(tbb::task_group_context &context)\n        {\n            my_barrier = new( task::allocate_root(context) ) empty_task();\n            __TBB_ASSERT(my_barrier, \"root task allocation failed\");\n        }\n#endif\n\n        ~parallel_do_feeder_impl()\n        {\n            my_barrier->destroy(*my_barrier);\n        }\n    }; // class parallel_do_feeder_impl\n\n\n    //! For internal use only\n    /** Unpacks a block of iterations.\n        @ingroup algorithms */\n    \n    template<typename Iterator, typename Body, typename Item>\n    class do_group_task_forward: public task\n    {\n        static const size_t max_arg_size = 4;         \n\n        typedef parallel_do_feeder_impl<Body, Item> feeder_type;\n\n        feeder_type& my_feeder;\n        Iterator my_first;\n        size_t my_size;\n        \n        do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) \n            : my_feeder(feeder), my_first(first), my_size(size)\n        {}\n\n        /*override*/ task* execute()\n        {\n            typedef do_iteration_task_iter<Iterator, Body, Item> iteration_type;\n            __TBB_ASSERT( my_size>0, NULL );\n            task_list list;\n            task* t; \n            size_t k=0; \n            for(;;) {\n                t = new( allocate_child() ) iteration_type( my_first, my_feeder );\n                ++my_first;\n                if( ++k==my_size ) break;\n                list.push_back(*t);\n            }\n            set_ref_count(int(k+1));\n            spawn(list);\n            spawn_and_wait_for_all(*t);\n            return NULL;\n        }\n\n        template<typename Iterator_, typename Body_, typename _Item> friend class do_task_iter;\n    }; // class do_group_task_forward\n\n    template<typename Body, typename Item>\n    class do_group_task_input: public task\n    {\n        static const size_t max_arg_size = 4;         \n        \n        typedef parallel_do_feeder_impl<Body, Item> feeder_type;\n\n        feeder_type& my_feeder;\n        size_t my_size;\n        aligned_space<Item, max_arg_size> my_arg;\n\n        do_group_task_input( feeder_type& feeder ) \n            : my_feeder(feeder), my_size(0)\n        {}\n\n        /*override*/ task* execute()\n        {\n            typedef do_iteration_task_iter<Item*, Body, Item> iteration_type;\n            __TBB_ASSERT( my_size>0, NULL );\n            task_list list;\n            task* t; \n            size_t k=0; \n            for(;;) {\n                t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder );\n                if( ++k==my_size ) break;\n                list.push_back(*t);\n            }\n            set_ref_count(int(k+1));\n            spawn(list);\n            spawn_and_wait_for_all(*t);\n            return NULL;\n        }\n\n        ~do_group_task_input(){\n            for( size_t k=0; k<my_size; ++k)\n                (my_arg.begin() + k)->~Item();\n        }\n\n        template<typename Iterator_, typename Body_, typename Item_> friend class do_task_iter;\n    }; // class do_group_task_input\n    \n    //! For internal use only.\n    /** Gets block of iterations and packages them into a do_group_task.\n        @ingroup algorithms */\n    template<typename Iterator, typename Body, typename Item>\n    class do_task_iter: public task\n    {\n        typedef parallel_do_feeder_impl<Body, Item> feeder_type;\n\n    public:\n        do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : \n            my_first(first), my_last(last), my_feeder(feeder)\n        {}\n\n    private:\n        Iterator my_first;\n        Iterator my_last;\n        feeder_type& my_feeder;\n\n        /* Do not merge run(xxx) and run_xxx() methods. They are separated in order\n            to make sure that compilers will eliminate unused argument of type xxx\n            (that is will not put it on stack). The sole purpose of this argument \n            is overload resolution.\n            \n            An alternative could be using template functions, but explicit specialization \n            of member function templates is not supported for non specialized class \n            templates. Besides template functions would always fall back to the least \n            efficient variant (the one for input iterators) in case of iterators having \n            custom tags derived from basic ones. */\n        /*override*/ task* execute()\n        {\n            typedef typename std::iterator_traits<Iterator>::iterator_category iterator_tag;\n            return run( (iterator_tag*)NULL );\n        }\n\n        /** This is the most restricted variant that operates on input iterators or\n            iterators with unknown tags (tags not derived from the standard ones). **/\n        inline task* run( void* ) { return run_for_input_iterator(); }\n        \n        task* run_for_input_iterator() {\n            typedef do_group_task_input<Body, Item> block_type;\n\n            block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder);\n            size_t k=0; \n            while( !(my_first == my_last) ) {\n                new (t.my_arg.begin() + k) Item(*my_first);\n                ++my_first;\n                if( ++k==block_type::max_arg_size ) {\n                    if ( !(my_first == my_last) )\n                        recycle_to_reexecute();\n                    break;\n                }\n            }\n            if( k==0 ) {\n                destroy(t);\n                return NULL;\n            } else {\n                t.my_size = k;\n                return &t;\n            }\n        }\n\n        inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); }\n\n        task* run_for_forward_iterator() {\n            typedef do_group_task_forward<Iterator, Body, Item> block_type;\n\n            Iterator first = my_first;\n            size_t k=0; \n            while( !(my_first==my_last) ) {\n                ++my_first;\n                if( ++k==block_type::max_arg_size ) {\n                    if ( !(my_first==my_last) )\n                        recycle_to_reexecute();\n                    break;\n                }\n            }\n            return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder);\n        }\n        \n        inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); }\n\n        task* run_for_random_access_iterator() {\n            typedef do_group_task_forward<Iterator, Body, Item> block_type;\n            typedef do_iteration_task_iter<Iterator, Body, Item> iteration_type;\n            \n            size_t k = static_cast<size_t>(my_last-my_first); \n            if( k > block_type::max_arg_size ) {\n                Iterator middle = my_first + k/2;\n\n                empty_task& c = *new( allocate_continuation() ) empty_task;\n                do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder);\n                recycle_as_child_of(c);\n\n                my_last = middle;\n                c.set_ref_count(2);\n                c.spawn(b);\n                return this;\n            }else if( k != 0 ) {\n                task_list list;\n                task* t; \n                size_t k1=0; \n                for(;;) {\n                    t = new( allocate_child() ) iteration_type(my_first, my_feeder);\n                    ++my_first;\n                    if( ++k1==k ) break;\n                    list.push_back(*t);\n                }\n                set_ref_count(int(k+1));\n                spawn(list);\n                spawn_and_wait_for_all(*t);\n            }\n            return NULL;\n        }\n    }; // class do_task_iter\n\n    //! For internal use only.\n    /** Implements parallel iteration over a range.\n        @ingroup algorithms */\n    template<typename Iterator, typename Body, typename Item> \n    void run_parallel_do( Iterator first, Iterator last, const Body& body\n#if __TBB_TASK_GROUP_CONTEXT\n        , task_group_context& context\n#endif\n        )\n    {\n        typedef do_task_iter<Iterator, Body, Item> root_iteration_task;\n#if __TBB_TASK_GROUP_CONTEXT\n        parallel_do_feeder_impl<Body, Item> feeder(context);\n#else\n        parallel_do_feeder_impl<Body, Item> feeder;\n#endif\n        feeder.my_body = &body;\n\n        root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder);\n\n        feeder.my_barrier->set_ref_count(2);\n        feeder.my_barrier->spawn_and_wait_for_all(t);\n    }\n\n    //! For internal use only.\n    /** Detects types of Body's operator function arguments.\n        @ingroup algorithms **/\n    template<typename Iterator, typename Body, typename Item> \n    void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const\n#if __TBB_TASK_GROUP_CONTEXT\n        , task_group_context& context \n#endif // __TBB_TASK_GROUP_CONTEXT \n        )\n    {\n        run_parallel_do<Iterator, Body, typename strip<Item>::type>( first, last, body\n#if __TBB_TASK_GROUP_CONTEXT\n            , context\n#endif // __TBB_TASK_GROUP_CONTEXT \n            );\n    }\n\n    //! For internal use only.\n    /** Detects types of Body's operator function arguments.\n        @ingroup algorithms **/\n    template<typename Iterator, typename Body, typename Item, typename _Item> \n    void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const\n#if __TBB_TASK_GROUP_CONTEXT\n        , task_group_context& context \n#endif // __TBB_TASK_GROUP_CONTEXT\n        )\n    {\n        run_parallel_do<Iterator, Body, typename strip<Item>::type>( first, last, body\n#if __TBB_TASK_GROUP_CONTEXT\n            , context\n#endif // __TBB_TASK_GROUP_CONTEXT\n            );\n    }\n\n} // namespace internal\n//! @endcond\n\n\n/** \\page parallel_do_body_req Requirements on parallel_do body\n    Class \\c Body implementing the concept of parallel_do body must define:\n    - \\code \n        B::operator()( \n                cv_item_type item,\n                parallel_do_feeder<item_type>& feeder\n        ) const\n        \n        OR\n\n        B::operator()( cv_item_type& item ) const\n      \\endcode                                                      Process item. \n                                                                    May be invoked concurrently  for the same \\c this but different \\c item.\n                                                        \n    - \\code item_type( const item_type& ) \\endcode \n                                                                    Copy a work item.\n    - \\code ~item_type() \\endcode                            Destroy a work item\n**/\n\n/** \\name parallel_do\n    See also requirements on \\ref parallel_do_body_req \"parallel_do Body\". **/\n//@{\n//! Parallel iteration over a range, with optional addition of more work.\n/** @ingroup algorithms */\ntemplate<typename Iterator, typename Body> \nvoid parallel_do( Iterator first, Iterator last, const Body& body )\n{\n    if ( first == last )\n        return;\n#if __TBB_TASK_GROUP_CONTEXT\n    task_group_context context;\n#endif // __TBB_TASK_GROUP_CONTEXT\n    internal::select_parallel_do( first, last, body, &Body::operator()\n#if __TBB_TASK_GROUP_CONTEXT\n        , context\n#endif // __TBB_TASK_GROUP_CONTEXT\n        );\n}\n\ntemplate<typename Range, typename Body>\nvoid parallel_do(Range& rng, const Body& body) {\n    parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body);\n}\n\ntemplate<typename Range, typename Body>\nvoid parallel_do(const Range& rng, const Body& body) {\n    parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body);\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration over a range, with optional addition of more work and user-supplied context\n/** @ingroup algorithms */\ntemplate<typename Iterator, typename Body> \nvoid parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context  )\n{\n    if ( first == last )\n        return;\n    internal::select_parallel_do( first, last, body, &Body::operator(), context );\n}\n\ntemplate<typename Range, typename Body>\nvoid parallel_do(Range& rng, const Body& body, task_group_context& context) {\n    parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context);\n}\n\ntemplate<typename Range, typename Body>\nvoid parallel_do(const Range& rng, const Body& body, task_group_context& context) {\n    parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context);\n}\n\n#endif // __TBB_TASK_GROUP_CONTEXT\n\n//@}\n\n} // namespace \n\n#endif /* __TBB_parallel_do_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_for.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_for_H\n#define __TBB_parallel_for_H\n\n#include <new>\n#include \"task.h\"\n#include \"partitioner.h\"\n#include \"blocked_range.h\"\n#include \"tbb_exception.h\"\n\nnamespace tbb {\n\nnamespace interface7 {\n//! @cond INTERNAL\nnamespace internal {\n\n    //! allocate right task with new parent\n    void* allocate_sibling(task* start_for_task, size_t bytes);\n\n    //! Task type used in parallel_for\n    /** @ingroup algorithms */\n    template<typename Range, typename Body, typename Partitioner>\n    class start_for: public task {\n        Range my_range;\n        const Body my_body;\n        typename Partitioner::task_partition_type my_partition;\n        /*override*/ task* execute();\n\n        //! Update affinity info, if any.\n        /*override*/ void note_affinity( affinity_id id ) {\n            my_partition.note_affinity( id );\n        }\n\n    public:\n        //! Constructor for root task.\n        start_for( const Range& range, const Body& body, Partitioner& partitioner ) :\n            my_range(range),\n            my_body(body),\n            my_partition(partitioner)\n        {\n        }\n        //! Splitting constructor used to generate children.\n        /** parent_ becomes left child.  Newly constructed object is right child. */\n        start_for( start_for& parent_, typename Partitioner::split_type& split_obj) :\n            my_range(parent_.my_range, split_obj),\n            my_body(parent_.my_body),\n            my_partition(parent_.my_partition, split_obj)\n        {\n            my_partition.set_affinity(*this);\n        }\n        //! Construct right child from the given range as response to the demand.\n        /** parent_ remains left child.  Newly constructed object is right child. */\n        start_for( start_for& parent_, const Range& r, depth_t d ) :\n            my_range(r),\n            my_body(parent_.my_body),\n            my_partition(parent_.my_partition, split())\n        {\n            my_partition.set_affinity(*this);\n            my_partition.align_depth( d );\n        }\n        static void run(  const Range& range, const Body& body, Partitioner& partitioner ) {\n            if( !range.empty() ) {\n#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP\n                start_for& a = *new(task::allocate_root()) start_for(range,body,partitioner);\n#else\n                // Bound context prevents exceptions from body to affect nesting or sibling algorithms,\n                // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.\n                task_group_context context;\n                start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner);\n#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */\n                task::spawn_root_and_wait(a);\n            }\n        }\n#if __TBB_TASK_GROUP_CONTEXT\n        static void run(  const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context ) {\n            if( !range.empty() ) {\n                start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner);\n                task::spawn_root_and_wait(a);\n            }\n        }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n        //! Run body for range, serves as callback for partitioner\n        void run_body( Range &r ) { my_body( r ); }\n\n        //! spawn right task, serves as callback for partitioner\n        void offer_work(typename Partitioner::split_type& split_obj) {\n            spawn( *new( allocate_sibling(static_cast<task*>(this), sizeof(start_for)) ) start_for(*this, split_obj) );\n        }\n        //! spawn right task, serves as callback for partitioner\n        void offer_work(const Range& r, depth_t d = 0) {\n            spawn( *new( allocate_sibling(static_cast<task*>(this), sizeof(start_for)) ) start_for(*this, r, d) );\n        }\n    };\n\n    //! allocate right task with new parent\n    // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined\n    inline void* allocate_sibling(task* start_for_task, size_t bytes) {\n        task* parent_ptr = new( start_for_task->allocate_continuation() ) flag_task();\n        start_for_task->set_parent(parent_ptr);\n        parent_ptr->set_ref_count(2);\n        return &parent_ptr->allocate_child().allocate(bytes);\n    }\n\n    //! execute task for parallel_for\n    template<typename Range, typename Body, typename Partitioner>\n    task* start_for<Range,Body,Partitioner>::execute() {\n        my_partition.check_being_stolen( *this );\n        my_partition.execute(*this, my_range);\n        return NULL;\n    }\n} // namespace internal\n//! @endcond\n} // namespace interfaceX\n\n//! @cond INTERNAL\nnamespace internal {\n    using interface7::internal::start_for;\n\n    //! Calls the function with values from range [begin, end) with a step provided\n    template<typename Function, typename Index>\n    class parallel_for_body : internal::no_assign {\n        const Function &my_func;\n        const Index my_begin;\n        const Index my_step;\n    public:\n        parallel_for_body( const Function& _func, Index& _begin, Index& _step )\n            : my_func(_func), my_begin(_begin), my_step(_step) {}\n\n        void operator()( const tbb::blocked_range<Index>& r ) const {\n            // A set of local variables to help the compiler with vectorization of the following loop.\n            Index b = r.begin();\n            Index e = r.end();\n            Index ms = my_step;\n            Index k = my_begin + b*ms;\n\n#if __INTEL_COMPILER\n#pragma ivdep\n#if __TBB_ASSERT_ON_VECTORIZATION_FAILURE\n#pragma vector always assert\n#endif\n#endif\n            for ( Index i = b; i < e; ++i, k += ms ) {\n                my_func( k );\n            }\n        }\n    };\n} // namespace internal\n//! @endcond\n\n// Requirements on Range concept are documented in blocked_range.h\n\n/** \\page parallel_for_body_req Requirements on parallel_for body\n    Class \\c Body implementing the concept of parallel_for body must define:\n    - \\code Body::Body( const Body& ); \\endcode                 Copy constructor\n    - \\code Body::~Body(); \\endcode                             Destructor\n    - \\code void Body::operator()( Range& r ) const; \\endcode   Function call operator applying the body to range \\c r.\n**/\n\n/** \\name parallel_for\n    See also requirements on \\ref range_req \"Range\" and \\ref parallel_for_body_req \"parallel_for Body\". **/\n//@{\n\n//! Parallel iteration over range with default partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body ) {\n    internal::start_for<Range,Body,const __TBB_DEFAULT_PARTITIONER>::run(range,body,__TBB_DEFAULT_PARTITIONER());\n}\n\n//! Parallel iteration over range with simple partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) {\n    internal::start_for<Range,Body,const simple_partitioner>::run(range,body,partitioner);\n}\n\n//! Parallel iteration over range with auto_partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) {\n    internal::start_for<Range,Body,const auto_partitioner>::run(range,body,partitioner);\n}\n\n//! Parallel iteration over range with affinity_partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) {\n    internal::start_for<Range,Body,affinity_partitioner>::run(range,body,partitioner);\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration over range with default partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, task_group_context& context ) {\n    internal::start_for<Range,Body,const __TBB_DEFAULT_PARTITIONER>::run(range, body, __TBB_DEFAULT_PARTITIONER(), context);\n}\n\n//! Parallel iteration over range with simple partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) {\n    internal::start_for<Range,Body,const simple_partitioner>::run(range, body, partitioner, context);\n}\n\n//! Parallel iteration over range with auto_partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) {\n    internal::start_for<Range,Body,const auto_partitioner>::run(range, body, partitioner, context);\n}\n\n//! Parallel iteration over range with affinity_partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) {\n    internal::start_for<Range,Body,affinity_partitioner>::run(range,body,partitioner, context);\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n//@}\n\nnamespace strict_ppl {\n\n//@{\n//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner\ntemplate <typename Index, typename Function, typename Partitioner>\nvoid parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner) {\n    if (step <= 0 )\n        internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument\n    else if (last > first) {\n        // Above \"else\" avoids \"potential divide by zero\" warning on some platforms\n        Index end = (last - first - Index(1)) / step + Index(1);\n        tbb::blocked_range<Index> range(static_cast<Index>(0), end);\n        internal::parallel_for_body<Function, Index> body(f, first, step);\n        tbb::parallel_for(range, body, partitioner);\n    }\n}\n\n//! Parallel iteration over a range of integers with a step provided and default partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, step, f, auto_partitioner());\n}\n//! Parallel iteration over a range of integers with a step provided and simple partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner) {\n    parallel_for_impl<Index,Function,const simple_partitioner>(first, last, step, f, partitioner);\n}\n//! Parallel iteration over a range of integers with a step provided and auto partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, step, f, partitioner);\n}\n//! Parallel iteration over a range of integers with a step provided and affinity partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner) {\n    parallel_for_impl(first, last, step, f, partitioner);\n}\n\n//! Parallel iteration over a range of integers with a default step value and default partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, const Function& f) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, static_cast<Index>(1), f, auto_partitioner());\n}\n//! Parallel iteration over a range of integers with a default step value and simple partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner) {\n    parallel_for_impl<Index,Function,const simple_partitioner>(first, last, static_cast<Index>(1), f, partitioner);\n}\n//! Parallel iteration over a range of integers with a default step value and auto partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, static_cast<Index>(1), f, partitioner);\n}\n//! Parallel iteration over a range of integers with a default step value and affinity partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner) {\n    parallel_for_impl(first, last, static_cast<Index>(1), f, partitioner);\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Implementation of parallel iteration over stepped range of integers with explicit step, task group context, and partitioner\ntemplate <typename Index, typename Function, typename Partitioner>\nvoid parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner, tbb::task_group_context &context) {\n    if (step <= 0 )\n        internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument\n    else if (last > first) {\n        // Above \"else\" avoids \"potential divide by zero\" warning on some platforms\n        Index end = (last - first - Index(1)) / step + Index(1);\n        tbb::blocked_range<Index> range(static_cast<Index>(0), end);\n        internal::parallel_for_body<Function, Index> body(f, first, step);\n        tbb::parallel_for(range, body, partitioner, context);\n    }\n}\n\n//! Parallel iteration over a range of integers with explicit step, task group context, and default partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, step, f, auto_partitioner(), context);\n}\n//! Parallel iteration over a range of integers with explicit step, task group context, and simple partitioner\n template <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const simple_partitioner>(first, last, step, f, partitioner, context);\n}\n//! Parallel iteration over a range of integers with explicit step, task group context, and auto partitioner\n template <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, step, f, partitioner, context);\n}\n//! Parallel iteration over a range of integers with explicit step, task group context, and affinity partitioner\n template <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl(first, last, step, f, partitioner, context);\n}\n\n\n//! Parallel iteration over a range of integers with a default step value, explicit task group context, and default partitioner\ntemplate <typename Index, typename Function>\nvoid parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, static_cast<Index>(1), f, auto_partitioner(), context);\n}\n//! Parallel iteration over a range of integers with a default step value, explicit task group context, and simple partitioner\n template <typename Index, typename Function, typename Partitioner>\nvoid parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const simple_partitioner>(first, last, static_cast<Index>(1), f, partitioner, context);\n}\n//! Parallel iteration over a range of integers with a default step value, explicit task group context, and auto partitioner\n template <typename Index, typename Function, typename Partitioner>\nvoid parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl<Index,Function,const auto_partitioner>(first, last, static_cast<Index>(1), f, partitioner, context);\n}\n//! Parallel iteration over a range of integers with a default step value, explicit task group context, and affinity_partitioner\n template <typename Index, typename Function, typename Partitioner>\nvoid parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) {\n    parallel_for_impl(first, last, static_cast<Index>(1), f, partitioner, context);\n}\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n//@}\n\n} // namespace strict_ppl\n\nusing strict_ppl::parallel_for;\n\n} // namespace tbb\n\n#if TBB_PREVIEW_SERIAL_SUBSET\n#define __TBB_NORMAL_EXECUTION\n#include \"../serial/tbb/parallel_for.h\"\n#undef __TBB_NORMAL_EXECUTION\n#endif\n\n#endif /* __TBB_parallel_for_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_for_each.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_for_each_H\n#define __TBB_parallel_for_each_H\n\n#include \"parallel_do.h\"\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n    // The class calls user function in operator()\n    template <typename Function, typename Iterator>\n    class parallel_for_each_body : internal::no_assign {\n        const Function &my_func;\n    public:\n        parallel_for_each_body(const Function &_func) : my_func(_func) {}\n        parallel_for_each_body(const parallel_for_each_body<Function, Iterator> &_caller) : my_func(_caller.my_func) {}\n\n        void operator() ( typename std::iterator_traits<Iterator>::reference value ) const {\n            my_func(value);\n        }\n    };\n} // namespace internal\n//! @endcond\n\n/** \\name parallel_for_each\n    **/\n//@{\n//! Calls function f for all items from [first, last) interval using user-supplied context\n/** @ingroup algorithms */\n#if __TBB_TASK_GROUP_CONTEXT\ntemplate<typename InputIterator, typename Function>\nvoid parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) {\n    internal::parallel_for_each_body<Function, InputIterator> body(f);\n    tbb::parallel_do (first, last, body, context);\n}\n\n//! Calls function f for all items from rng using user-supplied context\n/** @ingroup algorithms */\ntemplate<typename Range, typename Function>\nvoid parallel_for_each(Range& rng, const Function& f, task_group_context& context) {\n    parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context);\n}\n\n//! Calls function f for all items from const rng user-supplied context\n/** @ingroup algorithms */\ntemplate<typename Range, typename Function>\nvoid parallel_for_each(const Range& rng, const Function& f, task_group_context& context) {\n    parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context);\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! Uses default context\ntemplate<typename InputIterator, typename Function>\nvoid parallel_for_each(InputIterator first, InputIterator last, const Function& f) {\n    internal::parallel_for_each_body<Function, InputIterator> body(f);\n    tbb::parallel_do (first, last, body);\n}\n\n//! Uses default context\ntemplate<typename Range, typename Function>\nvoid parallel_for_each(Range& rng, const Function& f) {\n    parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f);\n}\n\n//! Uses default context\ntemplate<typename Range, typename Function>\nvoid parallel_for_each(const Range& rng, const Function& f) {\n    parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f);\n}\n\n//@}\n\n} // namespace\n\n#endif /* __TBB_parallel_for_each_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_invoke.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_invoke_H\n#define __TBB_parallel_invoke_H\n\n#include \"task.h\"\n\n#if __TBB_VARIADIC_PARALLEL_INVOKE\n    #include <utility>\n#endif\n\nnamespace tbb {\n\n#if !__TBB_TASK_GROUP_CONTEXT\n    /** Dummy to avoid cluttering the bulk of the header with enormous amount of ifdefs. **/\n    struct task_group_context {};\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! @cond INTERNAL\nnamespace internal {\n    // Simple task object, executing user method\n    template<typename function>\n    class function_invoker : public task{\n    public:\n        function_invoker(const function& _function) : my_function(_function) {}\n    private:\n        const function &my_function;\n        /*override*/\n        task* execute()\n        {\n            my_function();\n            return NULL;\n        }\n    };\n\n    // The class spawns two or three child tasks\n    template <size_t N, typename function1, typename function2, typename function3>\n    class spawner : public task {\n    private:\n        const function1& my_func1;\n        const function2& my_func2;\n        const function3& my_func3;\n        bool is_recycled;\n\n        task* execute (){\n            if(is_recycled){\n                return NULL;\n            }else{\n                __TBB_ASSERT(N==2 || N==3, \"Number of arguments passed to spawner is wrong\");\n                set_ref_count(N);\n                recycle_as_safe_continuation();\n                internal::function_invoker<function2>* invoker2 = new (allocate_child()) internal::function_invoker<function2>(my_func2);\n                __TBB_ASSERT(invoker2, \"Child task allocation failed\");\n                spawn(*invoker2);\n                size_t n = N; // To prevent compiler warnings\n                if (n>2) {\n                    internal::function_invoker<function3>* invoker3 = new (allocate_child()) internal::function_invoker<function3>(my_func3);\n                    __TBB_ASSERT(invoker3, \"Child task allocation failed\");\n                    spawn(*invoker3);\n                }\n                my_func1();\n                is_recycled = true;\n                return NULL;\n            }\n        } // execute\n\n    public:\n        spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {}\n    };\n\n    // Creates and spawns child tasks\n    class parallel_invoke_helper : public empty_task {\n    public:\n        // Dummy functor class\n        class parallel_invoke_noop {\n        public:\n            void operator() () const {}\n        };\n        // Creates a helper object with user-defined number of children expected\n        parallel_invoke_helper(int number_of_children)\n        {\n            set_ref_count(number_of_children + 1);\n        }\n\n#if __TBB_VARIADIC_PARALLEL_INVOKE\n        void add_children() {}\n        void add_children(tbb::task_group_context&) {}\n\n        template <typename function>\n        void add_children(function&& _func)\n        {\n            internal::function_invoker<function>* invoker = new (allocate_child()) internal::function_invoker<function>(std::forward<function>(_func));\n            __TBB_ASSERT(invoker, \"Child task allocation failed\");\n            spawn(*invoker);\n        }\n\n        template<typename function>\n        void add_children(function&& _func, tbb::task_group_context&)\n        {\n            add_children(std::forward<function>(_func));\n        }\n\n        // Adds child(ren) task(s) and spawns them\n        template <typename function1, typename function2, typename... function>\n        void add_children(function1&& _func1, function2&& _func2, function&&... _func)\n        {\n            // The third argument is dummy, it is ignored actually.\n            parallel_invoke_noop noop;\n            typedef internal::spawner<2, function1, function2, parallel_invoke_noop> spawner_type;\n            spawner_type & sub_root = *new(allocate_child()) spawner_type(std::forward<function1>(_func1), std::forward<function2>(_func2), noop);\n            spawn(sub_root);\n            add_children(std::forward<function>(_func)...);\n        }\n#else\n        // Adds child task and spawns it\n        template <typename function>\n        void add_children (const function &_func)\n        {\n            internal::function_invoker<function>* invoker = new (allocate_child()) internal::function_invoker<function>(_func);\n            __TBB_ASSERT(invoker, \"Child task allocation failed\");\n            spawn(*invoker);\n        }\n\n        // Adds a task with multiple child tasks and spawns it\n        // two arguments\n        template <typename function1, typename function2>\n        void add_children (const function1& _func1, const function2& _func2)\n        {\n            // The third argument is dummy, it is ignored actually.\n            parallel_invoke_noop noop;\n            internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop);\n            spawn(sub_root);\n        }\n        // three arguments\n        template <typename function1, typename function2, typename function3>\n        void add_children (const function1& _func1, const function2& _func2, const function3& _func3)\n        {\n            internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3);\n            spawn(sub_root);\n        }\n#endif // __TBB_VARIADIC_PARALLEL_INVOKE\n\n        // Waits for all child tasks\n        template <typename F0>\n        void run_and_finish(const F0& f0)\n        {\n            internal::function_invoker<F0>* invoker = new (allocate_child()) internal::function_invoker<F0>(f0);\n            __TBB_ASSERT(invoker, \"Child task allocation failed\");\n            spawn_and_wait_for_all(*invoker);\n        }\n    };\n    // The class destroys root if exception occurred as well as in normal case\n    class parallel_invoke_cleaner: internal::no_copy {\n    public:\n#if __TBB_TASK_GROUP_CONTEXT\n        parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context)\n            : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children))\n#else\n        parallel_invoke_cleaner(int number_of_children, tbb::task_group_context&)\n            : root(*new(task::allocate_root()) internal::parallel_invoke_helper(number_of_children))\n#endif /* !__TBB_TASK_GROUP_CONTEXT */\n        {}\n\n        ~parallel_invoke_cleaner(){\n            root.destroy(root);\n        }\n        internal::parallel_invoke_helper& root;\n    };\n\n#if __TBB_VARIADIC_PARALLEL_INVOKE\n//  Determine whether the last parameter in a pack is task_group_context\n    template<typename... T> struct impl_selector; // to workaround a GCC bug\n\n    template<typename T1, typename... T> struct impl_selector<T1, T...> {\n        typedef typename impl_selector<T...>::type type;\n    };\n\n    template<typename T> struct impl_selector<T> {\n        typedef false_type type;\n    };\n    template<> struct impl_selector<task_group_context&> {\n        typedef true_type  type;\n    };\n\n    // Select task_group_context parameter from the back of a pack\n    task_group_context& get_context( task_group_context& tgc ) { return tgc; }\n\n    template<typename T1, typename... T>\n    task_group_context& get_context( T1&& /*ignored*/, T&&... t )\n    { return get_context( std::forward<T>(t)... ); }\n\n    // task_group_context is known to be at the back of the parameter pack\n    template<typename F0, typename F1, typename... F>\n    void parallel_invoke_impl(true_type, F0&& f0, F1&& f1, F&&... f) {\n        __TBB_STATIC_ASSERT(sizeof...(F)>0, \"Variadic parallel_invoke implementation broken?\");\n        // # of child tasks: f0, f1, and a task for each two elements of the pack except the last\n        const size_t number_of_children = 2 + sizeof...(F)/2;\n        parallel_invoke_cleaner cleaner(number_of_children, get_context(std::forward<F>(f)...));\n        parallel_invoke_helper& root = cleaner.root;\n\n        root.add_children(std::forward<F>(f)...);\n        root.add_children(std::forward<F1>(f1));\n        root.run_and_finish(std::forward<F0>(f0));\n    }\n\n    // task_group_context is not in the pack, needs to be added\n    template<typename F0, typename F1, typename... F>\n    void parallel_invoke_impl(false_type, F0&& f0, F1&& f1, F&&... f) {\n        tbb::task_group_context context;\n        // Add context to the arguments, and redirect to the other overload\n        parallel_invoke_impl(true_type(), std::forward<F0>(f0), std::forward<F1>(f1), std::forward<F>(f)..., context);\n    }\n#endif\n} // namespace internal\n//! @endcond\n\n/** \\name parallel_invoke\n    **/\n//@{\n//! Executes a list of tasks in parallel and waits for all tasks to complete.\n/** @ingroup algorithms */\n\n#if __TBB_VARIADIC_PARALLEL_INVOKE\n\n// parallel_invoke for two or more arguments via variadic templates\n// presence of task_group_context is defined automatically\ntemplate<typename F0, typename F1, typename... F>\nvoid parallel_invoke(F0&& f0, F1&& f1, F&&... f) {\n    typedef typename internal::impl_selector<internal::false_type, F...>::type selector_type;\n    internal::parallel_invoke_impl(selector_type(), std::forward<F0>(f0), std::forward<F1>(f1), std::forward<F>(f)...);\n}\n\n#else\n\n// parallel_invoke with user-defined context\n// two arguments\ntemplate<typename F0, typename F1 >\nvoid parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) {\n    internal::parallel_invoke_cleaner cleaner(2, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f1);\n\n    root.run_and_finish(f0);\n}\n\n// three arguments\ntemplate<typename F0, typename F1, typename F2 >\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) {\n    internal::parallel_invoke_cleaner cleaner(3, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f2);\n    root.add_children(f1);\n\n    root.run_and_finish(f0);\n}\n\n// four arguments\ntemplate<typename F0, typename F1, typename F2, typename F3>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(4, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f3);\n    root.add_children(f2);\n    root.add_children(f1);\n\n    root.run_and_finish(f0);\n}\n\n// five arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4 >\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(3, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f4, f3);\n    root.add_children(f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// six arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(3, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f5, f4, f3);\n    root.add_children(f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// seven arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(3, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f6, f5, f4);\n    root.add_children(f3, f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// eight arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(4, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f7, f6, f5);\n    root.add_children(f4, f3);\n    root.add_children(f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// nine arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7, typename F8>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7, const F8& f8,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(4, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f8, f7, f6);\n    root.add_children(f5, f4, f3);\n    root.add_children(f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// ten arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7, typename F8, typename F9>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9,\n                     tbb::task_group_context& context)\n{\n    internal::parallel_invoke_cleaner cleaner(4, context);\n    internal::parallel_invoke_helper& root = cleaner.root;\n\n    root.add_children(f9, f8, f7);\n    root.add_children(f6, f5, f4);\n    root.add_children(f3, f2, f1);\n\n    root.run_and_finish(f0);\n}\n\n// two arguments\ntemplate<typename F0, typename F1>\nvoid parallel_invoke(const F0& f0, const F1& f1) {\n    task_group_context context;\n    parallel_invoke<F0, F1>(f0, f1, context);\n}\n// three arguments\ntemplate<typename F0, typename F1, typename F2>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2) {\n    task_group_context context;\n    parallel_invoke<F0, F1, F2>(f0, f1, f2, context);\n}\n// four arguments\ntemplate<typename F0, typename F1, typename F2, typename F3 >\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) {\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3>(f0, f1, f2, f3, context);\n}\n// five arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) {\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4>(f0, f1, f2, f3, f4, context);\n}\n// six arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) {\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4, F5>(f0, f1, f2, f3, f4, f5, context);\n}\n// seven arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6)\n{\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4, F5, F6>(f0, f1, f2, f3, f4, f5, f6, context);\n}\n// eight arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7)\n{\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7>(f0, f1, f2, f3, f4, f5, f6, f7, context);\n}\n// nine arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7, typename F8>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7, const F8& f8)\n{\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7, F8>(f0, f1, f2, f3, f4, f5, f6, f7, f8, context);\n}\n// ten arguments\ntemplate<typename F0, typename F1, typename F2, typename F3, typename F4,\n         typename F5, typename F6, typename F7, typename F8, typename F9>\nvoid parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,\n                     const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9)\n{\n    task_group_context context;\n    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7, F8, F9>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context);\n}\n#endif // __TBB_VARIADIC_PARALLEL_INVOKE\n//@}\n\n} // namespace\n\n#endif /* __TBB_parallel_invoke_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_reduce.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_reduce_H\n#define __TBB_parallel_reduce_H\n\n#include <new>\n#include \"task.h\"\n#include \"aligned_space.h\"\n#include \"partitioner.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n\nnamespace interface7 {\n//! @cond INTERNAL\nnamespace internal {\n\n    using namespace tbb::internal;\n\n    /** Values for reduction_context. */\n    enum {\n        root_task, left_child, right_child\n    };\n\n    /** Represented as a char, not enum, for compactness. */\n    typedef char reduction_context;\n\n    //! Task type used to combine the partial results of parallel_reduce.\n    /** @ingroup algorithms */\n    template<typename Body>\n    class finish_reduce: public flag_task {\n        //! Pointer to body, or NULL if the left child has not yet finished.\n        bool has_right_zombie;\n        const reduction_context my_context;\n        Body* my_body;\n        aligned_space<Body> zombie_space;\n        finish_reduce( reduction_context context_ ) :\n            has_right_zombie(false), // TODO: substitute by flag_task::child_stolen?\n            my_context(context_),\n            my_body(NULL)\n        {\n        }\n        ~finish_reduce() {\n            if( has_right_zombie )\n                zombie_space.begin()->~Body();\n        }\n        task* execute() {\n            if( has_right_zombie ) {\n                // Right child was stolen.\n                Body* s = zombie_space.begin();\n                my_body->join( *s );\n                // Body::join() won't be called if canceled. Defer destruction to destructor\n            }\n            if( my_context==left_child )\n                itt_store_word_with_release( static_cast<finish_reduce*>(parent())->my_body, my_body );\n            return NULL;\n        }\n        template<typename Range,typename Body_, typename Partitioner>\n        friend class start_reduce;\n    };\n\n    //! allocate right task with new parent\n    void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes);\n\n    //! Task type used to split the work of parallel_reduce.\n    /** @ingroup algorithms */\n    template<typename Range, typename Body, typename Partitioner>\n    class start_reduce: public task {\n        typedef finish_reduce<Body> finish_type;\n        Body* my_body;\n        Range my_range;\n        typename Partitioner::task_partition_type my_partition;\n        reduction_context my_context;\n        /*override*/ task* execute();\n        //! Update affinity info, if any\n        /*override*/ void note_affinity( affinity_id id ) {\n            my_partition.note_affinity( id );\n        }\n        template<typename Body_>\n        friend class finish_reduce;\n\npublic:\n        //! Constructor used for root task\n        start_reduce( const Range& range, Body* body, Partitioner& partitioner ) :\n            my_body(body),\n            my_range(range),\n            my_partition(partitioner),\n            my_context(root_task)\n        {\n        }\n        //! Splitting constructor used to generate children.\n        /** parent_ becomes left child.  Newly constructed object is right child. */\n        start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) :\n            my_body(parent_.my_body),\n            my_range(parent_.my_range, split_obj),\n            my_partition(parent_.my_partition, split_obj),\n            my_context(right_child)\n        {\n            my_partition.set_affinity(*this);\n            parent_.my_context = left_child;\n        }\n        //! Construct right child from the given range as response to the demand.\n        /** parent_ remains left child.  Newly constructed object is right child. */\n        start_reduce( start_reduce& parent_, const Range& r, depth_t d ) :\n            my_body(parent_.my_body),\n            my_range(r),\n            my_partition(parent_.my_partition, split()),\n            my_context(right_child)\n        {\n            my_partition.set_affinity(*this);\n            my_partition.align_depth( d ); // TODO: move into constructor of partitioner\n            parent_.my_context = left_child;\n        }\n        static void run( const Range& range, Body& body, Partitioner& partitioner ) {\n            if( !range.empty() ) {\n#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP\n                task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) );\n#else\n                // Bound context prevents exceptions from body to affect nesting or sibling algorithms,\n                // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.\n                task_group_context context;\n                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );\n#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */\n            }\n        }\n#if __TBB_TASK_GROUP_CONTEXT\n        static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {\n            if( !range.empty() )\n                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );\n        }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n        //! Run body for range\n        void run_body( Range &r ) { (*my_body)( r ); }\n\n        //! spawn right task, serves as callback for partitioner\n        // TODO: remove code duplication from 'offer_work' methods\n        void offer_work(typename Partitioner::split_type& split_obj) {\n            task *tasks[2];\n            allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));\n            new((void*)tasks[0]) finish_type(my_context);\n            new((void*)tasks[1]) start_reduce(*this, split_obj);\n            spawn(*tasks[1]);\n        }\n        //! spawn right task, serves as callback for partitioner\n        void offer_work(const Range& r, depth_t d = 0) {\n            task *tasks[2];\n            allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));\n            new((void*)tasks[0]) finish_type(my_context);\n            new((void*)tasks[1]) start_reduce(*this, r, d);\n            spawn(*tasks[1]);\n        }\n    };\n\n    //! allocate right task with new parent\n    // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined\n    inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) {\n        tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes);\n        start_reduce_task->set_parent(tasks[0]);\n        tasks[0]->set_ref_count(2);\n        tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes);\n    }\n\n    template<typename Range, typename Body, typename Partitioner>\n    task* start_reduce<Range,Body,Partitioner>::execute() {\n        my_partition.check_being_stolen( *this );\n        if( my_context==right_child ) {\n            finish_type* parent_ptr = static_cast<finish_type*>(parent());\n            if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2???\n                my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split());\n                parent_ptr->has_right_zombie = true;\n            }\n        } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling\n        my_partition.execute(*this, my_range);\n        if( my_context==left_child ) {\n            finish_type* parent_ptr = static_cast<finish_type*>(parent());\n            __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL);\n            itt_store_word_with_release(parent_ptr->my_body, my_body );\n        }\n        return NULL;\n    }\n\n    //! Task type used to combine the partial results of parallel_deterministic_reduce.\n    /** @ingroup algorithms */\n    template<typename Body>\n    class finish_deterministic_reduce: public task {\n        Body &my_left_body;\n        Body my_right_body;\n\n        finish_deterministic_reduce( Body &body ) :\n            my_left_body( body ),\n            my_right_body( body, split() )\n        {\n        }\n        task* execute() {\n            my_left_body.join( my_right_body );\n            return NULL;\n        }\n        template<typename Range,typename Body_>\n        friend class start_deterministic_reduce;\n    };\n\n    //! Task type used to split the work of parallel_deterministic_reduce.\n    /** @ingroup algorithms */\n    template<typename Range, typename Body>\n    class start_deterministic_reduce: public task {\n        typedef finish_deterministic_reduce<Body> finish_type;\n        Body &my_body;\n        Range my_range;\n        /*override*/ task* execute();\n\n        //! Constructor used for root task\n        start_deterministic_reduce( const Range& range, Body& body ) :\n            my_body( body ),\n            my_range( range )\n        {\n        }\n        //! Splitting constructor used to generate children.\n        /** parent_ becomes left child.  Newly constructed object is right child. */\n        start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c ) :\n            my_body( c.my_right_body ),\n            my_range( parent_.my_range, split() )\n        {\n        }\n\npublic:\n        static void run( const Range& range, Body& body ) {\n            if( !range.empty() ) {\n#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP\n                task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body) );\n#else\n                // Bound context prevents exceptions from body to affect nesting or sibling algorithms,\n                // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.\n                task_group_context context;\n                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) );\n#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */\n            }\n        }\n#if __TBB_TASK_GROUP_CONTEXT\n        static void run( const Range& range, Body& body, task_group_context& context ) {\n            if( !range.empty() )\n                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) );\n        }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    };\n\n    template<typename Range, typename Body>\n    task* start_deterministic_reduce<Range,Body>::execute() {\n        if( !my_range.is_divisible() ) {\n            my_body( my_range );\n            return NULL;\n        } else {\n            finish_type& c = *new( allocate_continuation() ) finish_type( my_body );\n            recycle_as_child_of(c);\n            c.set_ref_count(2);\n            start_deterministic_reduce& b = *new( c.allocate_child() ) start_deterministic_reduce( *this, c );\n            task::spawn(b);\n            return this;\n        }\n    }\n} // namespace internal\n//! @endcond\n} //namespace interfaceX\n\n//! @cond INTERNAL\nnamespace internal {\n    using interface7::internal::start_reduce;\n    using interface7::internal::start_deterministic_reduce;\n    //! Auxiliary class for parallel_reduce; for internal use only.\n    /** The adaptor class that implements \\ref parallel_reduce_body_req \"parallel_reduce Body\"\n        using given \\ref parallel_reduce_lambda_req \"anonymous function objects\".\n     **/\n    /** @ingroup algorithms */\n    template<typename Range, typename Value, typename RealBody, typename Reduction>\n    class lambda_reduce_body {\n\n//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced\n//       (might require some performance measurements)\n\n        const Value&     identity_element;\n        const RealBody&  my_real_body;\n        const Reduction& my_reduction;\n        Value            my_value;\n        lambda_reduce_body& operator= ( const lambda_reduce_body& other );\n    public:\n        lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction )\n            : identity_element(identity)\n            , my_real_body(body)\n            , my_reduction(reduction)\n            , my_value(identity)\n        { }\n        lambda_reduce_body( const lambda_reduce_body& other )\n            : identity_element(other.identity_element)\n            , my_real_body(other.my_real_body)\n            , my_reduction(other.my_reduction)\n            , my_value(other.my_value)\n        { }\n        lambda_reduce_body( lambda_reduce_body& other, tbb::split )\n            : identity_element(other.identity_element)\n            , my_real_body(other.my_real_body)\n            , my_reduction(other.my_reduction)\n            , my_value(other.identity_element)\n        { }\n        void operator()(Range& range) {\n            my_value = my_real_body(range, const_cast<const Value&>(my_value));\n        }\n        void join( lambda_reduce_body& rhs ) {\n            my_value = my_reduction(const_cast<const Value&>(my_value), const_cast<const Value&>(rhs.my_value));\n        }\n        Value result() const {\n            return my_value;\n        }\n    };\n\n} // namespace internal\n//! @endcond\n\n// Requirements on Range concept are documented in blocked_range.h\n\n/** \\page parallel_reduce_body_req Requirements on parallel_reduce body\n    Class \\c Body implementing the concept of parallel_reduce body must define:\n    - \\code Body::Body( Body&, split ); \\endcode        Splitting constructor.\n                                                        Must be able to run concurrently with operator() and method \\c join\n    - \\code Body::~Body(); \\endcode                     Destructor\n    - \\code void Body::operator()( Range& r ); \\endcode Function call operator applying body to range \\c r\n                                                        and accumulating the result\n    - \\code void Body::join( Body& b ); \\endcode        Join results.\n                                                        The result in \\c b should be merged into the result of \\c this\n**/\n\n/** \\page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions)\n    TO BE DOCUMENTED\n**/\n\n/** \\name parallel_reduce\n    See also requirements on \\ref range_req \"Range\" and \\ref parallel_reduce_body_req \"parallel_reduce Body\". **/\n//@{\n\n//! Parallel iteration with reduction and default partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body ) {\n    internal::start_reduce<Range,Body, const __TBB_DEFAULT_PARTITIONER>::run( range, body, __TBB_DEFAULT_PARTITIONER() );\n}\n\n//! Parallel iteration with reduction and simple_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {\n    internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner );\n}\n\n//! Parallel iteration with reduction and auto_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) {\n    internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner );\n}\n\n//! Parallel iteration with reduction and affinity_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) {\n    internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner );\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration with reduction, simple partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {\n    internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner, context );\n}\n\n//! Parallel iteration with reduction, auto_partitioner and user-supplied context\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) {\n    internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner, context );\n}\n\n//! Parallel iteration with reduction, affinity_partitioner and user-supplied context\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) {\n    internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner, context );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/** parallel_reduce overloads that work with anonymous function objects\n    (see also \\ref parallel_reduce_lambda_req \"requirements on parallel_reduce anonymous function objects\"). **/\n\n//! Parallel iteration with reduction and default partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const __TBB_DEFAULT_PARTITIONER>\n                          ::run(range, body, __TBB_DEFAULT_PARTITIONER() );\n    return body.result();\n}\n\n//! Parallel iteration with reduction and simple_partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       const simple_partitioner& partitioner ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>\n                          ::run(range, body, partitioner );\n    return body.result();\n}\n\n//! Parallel iteration with reduction and auto_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       const auto_partitioner& partitioner ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>\n                          ::run( range, body, partitioner );\n    return body.result();\n}\n\n//! Parallel iteration with reduction and affinity_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       affinity_partitioner& partitioner ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>\n                                        ::run( range, body, partitioner );\n    return body.result();\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration with reduction, simple partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       const simple_partitioner& partitioner, task_group_context& context ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>\n                          ::run( range, body, partitioner, context );\n    return body.result();\n}\n\n//! Parallel iteration with reduction, auto_partitioner and user-supplied context\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       const auto_partitioner& partitioner, task_group_context& context ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>\n                          ::run( range, body, partitioner, context );\n    return body.result();\n}\n\n//! Parallel iteration with reduction, affinity_partitioner and user-supplied context\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       affinity_partitioner& partitioner, task_group_context& context ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>\n                                        ::run( range, body, partitioner, context );\n    return body.result();\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! Parallel iteration with deterministic reduction and default partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_deterministic_reduce( const Range& range, Body& body ) {\n    internal::start_deterministic_reduce<Range,Body>::run( range, body );\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) {\n    internal::start_deterministic_reduce<Range,Body>::run( range, body, context );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/** parallel_reduce overloads that work with anonymous function objects\n    (see also \\ref parallel_reduce_lambda_req \"requirements on parallel_reduce anonymous function objects\"). **/\n\n//! Parallel iteration with deterministic reduction and default partitioner.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_deterministic_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction> >\n                          ::run(range, body);\n    return body.result();\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Value, typename RealBody, typename Reduction>\nValue parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,\n                       task_group_context& context ) {\n    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);\n    internal::start_deterministic_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction> >\n                          ::run( range, body, context );\n    return body.result();\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n//@}\n\n} // namespace tbb\n\n#endif /* __TBB_parallel_reduce_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_scan.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_scan_H\n#define __TBB_parallel_scan_H\n\n#include \"task.h\"\n#include \"aligned_space.h\"\n#include <new>\n#include \"partitioner.h\"\n\nnamespace tbb {\n\n//! Used to indicate that the initial scan is being performed.\n/** @ingroup algorithms */\nstruct pre_scan_tag {\n    static bool is_final_scan() {return false;}\n};\n\n//! Used to indicate that the final scan is being performed.\n/** @ingroup algorithms */\nstruct final_scan_tag {\n    static bool is_final_scan() {return true;}\n};\n\n//! @cond INTERNAL\nnamespace internal {\n\n    //! Performs final scan for a leaf \n    /** @ingroup algorithms */\n    template<typename Range, typename Body>\n    class final_sum: public task {\n    public:\n        Body my_body;\n    private:\n        aligned_space<Range> my_range;\n        //! Where to put result of last subrange, or NULL if not last subrange.\n        Body* my_stuff_last;\n    public:\n        final_sum( Body& body_ ) :\n            my_body(body_,split())\n        {\n            poison_pointer(my_stuff_last);\n        }\n        ~final_sum() {\n            my_range.begin()->~Range();\n        }     \n        void finish_construction( const Range& range_, Body* stuff_last_ ) {\n            new( my_range.begin() ) Range(range_);\n            my_stuff_last = stuff_last_;\n        }\n    private:\n        /*override*/ task* execute() {\n            my_body( *my_range.begin(), final_scan_tag() );\n            if( my_stuff_last )\n                my_stuff_last->assign(my_body);\n            return NULL;\n        }\n    };       \n\n    //! Split work to be done in the scan.\n    /** @ingroup algorithms */\n    template<typename Range, typename Body>\n    class sum_node: public task {\n        typedef final_sum<Range,Body> final_sum_type;\n    public:\n        final_sum_type *my_incoming; \n        final_sum_type *my_body;\n        Body *my_stuff_last;\n    private:\n        final_sum_type *my_left_sum;\n        sum_node *my_left;\n        sum_node *my_right;     \n        bool my_left_is_final;\n        Range my_range;\n        sum_node( const Range range_, bool left_is_final_ ) : \n            my_left_sum(NULL), \n            my_left(NULL), \n            my_right(NULL), \n            my_left_is_final(left_is_final_), \n            my_range(range_)\n        {\n            // Poison fields that will be set by second pass.\n            poison_pointer(my_body);\n            poison_pointer(my_incoming);\n        }\n        task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) {\n            if( !n ) {\n                f.recycle_as_child_of( *this );\n                f.finish_construction( range_, stuff_last_ );\n                return &f;\n            } else {\n                n->my_body = &f;\n                n->my_incoming = incoming_;\n                n->my_stuff_last = stuff_last_;\n                return n;\n            }\n        }\n        /*override*/ task* execute() {\n            if( my_body ) {\n                if( my_incoming )\n                    my_left_sum->my_body.reverse_join( my_incoming->my_body );\n                recycle_as_continuation();\n                sum_node& c = *this;\n                task* b = c.create_child(Range(my_range,split()),*my_left_sum,my_right,my_left_sum,my_stuff_last);\n                task* a = my_left_is_final ? NULL : c.create_child(my_range,*my_body,my_left,my_incoming,NULL);\n                set_ref_count( (a!=NULL)+(b!=NULL) );\n                my_body = NULL; \n                if( a ) spawn(*b);\n                else a = b;\n                return a;\n            } else {\n                return NULL;\n            }\n        }\n        template<typename Range_,typename Body_,typename Partitioner_>\n        friend class start_scan;\n\n        template<typename Range_,typename Body_>\n        friend class finish_scan;\n    };\n\n    //! Combine partial results\n    /** @ingroup algorithms */\n    template<typename Range, typename Body>\n    class finish_scan: public task {\n        typedef sum_node<Range,Body> sum_node_type;\n        typedef final_sum<Range,Body> final_sum_type;\n        final_sum_type** const my_sum;\n        sum_node_type*& my_return_slot;\n    public:\n        final_sum_type* my_right_zombie;\n        sum_node_type& my_result;\n\n        /*override*/ task* execute() {\n            __TBB_ASSERT( my_result.ref_count()==(my_result.my_left!=NULL)+(my_result.my_right!=NULL), NULL );\n            if( my_result.my_left )\n                my_result.my_left_is_final = false;\n            if( my_right_zombie && my_sum ) \n                ((*my_sum)->my_body).reverse_join(my_result.my_left_sum->my_body);\n            __TBB_ASSERT( !my_return_slot, NULL );\n            if( my_right_zombie || my_result.my_right ) {\n                my_return_slot = &my_result;\n            } else {\n                destroy( my_result );\n            }\n            if( my_right_zombie && !my_sum && !my_result.my_right ) {\n                destroy(*my_right_zombie);\n                my_right_zombie = NULL;\n            }\n            return NULL;\n        }\n\n        finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : \n            my_sum(sum_),\n            my_return_slot(return_slot_), \n            my_right_zombie(NULL),\n            my_result(result_)\n        {\n            __TBB_ASSERT( !my_return_slot, NULL );\n        }\n    };\n\n    //! Initial task to split the work\n    /** @ingroup algorithms */\n    template<typename Range, typename Body, typename Partitioner=simple_partitioner>\n    class start_scan: public task {\n        typedef sum_node<Range,Body> sum_node_type;\n        typedef final_sum<Range,Body> final_sum_type;\n        final_sum_type* my_body;\n        /** Non-null if caller is requesting total. */\n        final_sum_type** my_sum; \n        sum_node_type** my_return_slot;\n        /** Null if computing root. */\n        sum_node_type* my_parent_sum;\n        bool my_is_final;\n        bool my_is_right_child;\n        Range my_range;\n        typename Partitioner::partition_type my_partition;\n        /*override*/ task* execute();\n    public:\n        start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) :\n            my_body(parent_.my_body),\n            my_sum(parent_.my_sum),\n            my_return_slot(&return_slot_),\n            my_parent_sum(parent_sum_),\n            my_is_final(parent_.my_is_final),\n            my_is_right_child(false),\n            my_range(parent_.my_range,split()),\n            my_partition(parent_.my_partition,split())\n        {\n            __TBB_ASSERT( !*my_return_slot, NULL );\n        }\n\n        start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) :\n            my_body(&body_),\n            my_sum(NULL),\n            my_return_slot(&return_slot_),\n            my_parent_sum(NULL),\n            my_is_final(true),\n            my_is_right_child(false),\n            my_range(range_),\n            my_partition(partitioner_)\n        {\n            __TBB_ASSERT( !*my_return_slot, NULL );\n        }\n\n        static void run( const Range& range_, Body& body_, const Partitioner& partitioner_ ) {\n            if( !range_.empty() ) {\n                typedef internal::start_scan<Range,Body,Partitioner> start_pass1_type;\n                internal::sum_node<Range,Body>* root = NULL;\n                typedef internal::final_sum<Range,Body> final_sum_type;\n                final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body_ );\n                start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type(\n                    /*my_return_slot=*/root,\n                    range_,\n                    *temp_body,\n                    partitioner_ );\n                task::spawn_root_and_wait( pass1 );\n                if( root ) {\n                    root->my_body = temp_body;\n                    root->my_incoming = NULL;\n                    root->my_stuff_last = &body_;\n                    task::spawn_root_and_wait( *root );\n                } else {\n                    body_.assign(temp_body->my_body);\n                    temp_body->finish_construction( range_, NULL );\n                    temp_body->destroy(*temp_body);\n                }\n            }\n        }\n    };\n\n    template<typename Range, typename Body, typename Partitioner>\n    task* start_scan<Range,Body,Partitioner>::execute() {\n        typedef internal::finish_scan<Range,Body> finish_pass1_type;\n        finish_pass1_type* p = my_parent_sum ? static_cast<finish_pass1_type*>( parent() ) : NULL;\n        // Inspecting p->result.left_sum would ordinarily be a race condition.\n        // But we inspect it only if we are not a stolen task, in which case we\n        // know that task assigning to p->result.left_sum has completed.\n        bool treat_as_stolen = my_is_right_child && (is_stolen_task() || my_body!=p->my_result.my_left_sum);\n        if( treat_as_stolen ) {\n            // Invocation is for right child that has been really stolen or needs to be virtually stolen\n            p->my_right_zombie = my_body = new( allocate_root() ) final_sum_type(my_body->my_body);\n            my_is_final = false;\n        }\n        task* next_task = NULL;\n        if( (my_is_right_child && !treat_as_stolen) || !my_range.is_divisible() || my_partition.should_execute_range(*this) ) {\n            if( my_is_final )\n                (my_body->my_body)( my_range, final_scan_tag() );\n            else if( my_sum )\n                (my_body->my_body)( my_range, pre_scan_tag() );\n            if( my_sum ) \n                *my_sum = my_body;\n            __TBB_ASSERT( !*my_return_slot, NULL );\n        } else {\n            sum_node_type* result;\n            if( my_parent_sum ) \n                result = new(allocate_additional_child_of(*my_parent_sum)) sum_node_type(my_range,/*my_left_is_final=*/my_is_final);\n            else\n                result = new(task::allocate_root()) sum_node_type(my_range,/*my_left_is_final=*/my_is_final);\n            finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*my_return_slot,my_sum,*result);\n            // Split off right child\n            start_scan& b = *new( c.allocate_child() ) start_scan( /*my_return_slot=*/result->my_right, *this, result );\n            b.my_is_right_child = true;    \n            // Left child is recycling of *this.  Must recycle this before spawning b, \n            // otherwise b might complete and decrement c.ref_count() to zero, which\n            // would cause c.execute() to run prematurely.\n            recycle_as_child_of(c);\n            c.set_ref_count(2);\n            c.spawn(b);\n            my_sum = &result->my_left_sum;\n            my_return_slot = &result->my_left;\n            my_is_right_child = false;\n            next_task = this;\n            my_parent_sum = result; \n            __TBB_ASSERT( !*my_return_slot, NULL );\n        }\n        return next_task;\n    } \n} // namespace internal\n//! @endcond\n\n// Requirements on Range concept are documented in blocked_range.h\n\n/** \\page parallel_scan_body_req Requirements on parallel_scan body\n    Class \\c Body implementing the concept of parallel_scan body must define:\n    - \\code Body::Body( Body&, split ); \\endcode    Splitting constructor.\n                                                    Split \\c b so that \\c this and \\c b can accumulate separately\n    - \\code Body::~Body(); \\endcode                 Destructor\n    - \\code void Body::operator()( const Range& r, pre_scan_tag ); \\endcode\n                                                    Preprocess iterations for range \\c r\n    - \\code void Body::operator()( const Range& r, final_scan_tag ); \\endcode \n                                                    Do final processing for iterations of range \\c r\n    - \\code void Body::reverse_join( Body& a ); \\endcode\n                                                    Merge preprocessing state of \\c a into \\c this, where \\c a was \n                                                    created earlier from \\c b by b's splitting constructor\n**/\n\n/** \\name parallel_scan\n    See also requirements on \\ref range_req \"Range\" and \\ref parallel_scan_body_req \"parallel_scan Body\". **/\n//@{\n\n//! Parallel prefix with default partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_scan( const Range& range, Body& body ) {\n    internal::start_scan<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,body,__TBB_DEFAULT_PARTITIONER());\n}\n\n//! Parallel prefix with simple_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) {\n    internal::start_scan<Range,Body,simple_partitioner>::run(range,body,partitioner);\n}\n\n//! Parallel prefix with auto_partitioner\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Body>\nvoid parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) {\n    internal::start_scan<Range,Body,auto_partitioner>::run(range,body,partitioner);\n}\n//@}\n\n} // namespace tbb\n\n#endif /* __TBB_parallel_scan_H */\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_sort.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_sort_H\n#define __TBB_parallel_sort_H\n\n#include \"parallel_for.h\"\n#include \"blocked_range.h\"\n#include \"internal/_range_iterator.h\"\n#include <algorithm>\n#include <iterator>\n#include <functional>\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n\n//! Range used in quicksort to split elements into subranges based on a value.\n/** The split operation selects a splitter and places all elements less than or equal \n    to the value in the first range and the remaining elements in the second range.\n    @ingroup algorithms */\ntemplate<typename RandomAccessIterator, typename Compare>\nclass quick_sort_range: private no_assign {\n\n    inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const {\n        return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) \n                                        : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) );\n    }\n\n    inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const {\n        size_t offset = range.size/8u;\n        return median_of_three(array, \n                               median_of_three(array, 0, offset, offset*2),\n                               median_of_three(array, offset*3, offset*4, offset*5),\n                               median_of_three(array, offset*6, offset*7, range.size - 1) );\n\n    }\n\npublic:\n\n    static const size_t grainsize = 500;\n    const Compare &comp;\n    RandomAccessIterator begin;\n    size_t size;\n\n    quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) :\n        comp(comp_), begin(begin_), size(size_) {}\n\n    bool empty() const {return size==0;}\n    bool is_divisible() const {return size>=grainsize;}\n\n    quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) {\n        using std::swap;\n        RandomAccessIterator array = range.begin;\n        RandomAccessIterator key0 = range.begin; \n        size_t m = pseudo_median_of_nine(array, range);\n        if (m) swap ( array[0], array[m] );\n\n        size_t i=0;\n        size_t j=range.size;\n        // Partition interval [i+1,j-1] with key *key0.\n        for(;;) {\n            __TBB_ASSERT( i<j, NULL );\n            // Loop must terminate since array[l]==*key0.\n            do {\n                --j;\n                __TBB_ASSERT( i<=j, \"bad ordering relation?\" );\n            } while( comp( *key0, array[j] ));\n            do {\n                __TBB_ASSERT( i<=j, NULL );\n                if( i==j ) goto partition;\n                ++i;\n            } while( comp( array[i],*key0 ));\n            if( i==j ) goto partition;\n            swap( array[i], array[j] );\n        }\npartition:\n        // Put the partition key were it belongs\n        swap( array[j], *key0 );\n        // array[l..j) is less or equal to key.\n        // array(j..r) is greater or equal to key.\n        // array[j] is equal to key\n        i=j+1;\n        begin = array+i;\n        size = range.size-i;\n        range.size = j;\n    }\n};\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Body class used to test if elements in a range are presorted\n/** @ingroup algorithms */\ntemplate<typename RandomAccessIterator, typename Compare>\nclass quick_sort_pretest_body : internal::no_assign {\n    const Compare &comp;\n\npublic:\n    quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {}\n\n    void operator()( const blocked_range<RandomAccessIterator>& range ) const {\n        task &my_task = task::self();\n        RandomAccessIterator my_end = range.end();\n\n        int i = 0;\n        for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) {\n            if ( i%64 == 0 && my_task.is_cancelled() ) break;\n          \n            // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1\n            if ( comp( *(k), *(k-1) ) ) {\n                my_task.cancel_group_execution();\n                break;\n            }\n        }\n    }\n\n};\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! Body class used to sort elements in a range that is smaller than the grainsize.\n/** @ingroup algorithms */\ntemplate<typename RandomAccessIterator, typename Compare>\nstruct quick_sort_body {\n    void operator()( const quick_sort_range<RandomAccessIterator,Compare>& range ) const {\n        //SerialQuickSort( range.begin, range.size, range.comp );\n        std::sort( range.begin, range.begin + range.size, range.comp );\n    }\n};\n\n//! Wrapper method to initiate the sort by calling parallel_for.\n/** @ingroup algorithms */\ntemplate<typename RandomAccessIterator, typename Compare>\nvoid parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) {\n#if __TBB_TASK_GROUP_CONTEXT\n    task_group_context my_context;\n    const int serial_cutoff = 9;\n\n    __TBB_ASSERT( begin + serial_cutoff < end, \"min_parallel_size is smaller than serial cutoff?\" );\n    RandomAccessIterator k;\n    for ( k = begin ; k != begin + serial_cutoff; ++k ) {\n        if ( comp( *(k+1), *k ) ) {\n            goto do_parallel_quick_sort;\n        }\n    }\n\n    parallel_for( blocked_range<RandomAccessIterator>(k+1, end),\n                  quick_sort_pretest_body<RandomAccessIterator,Compare>(comp),\n                  auto_partitioner(),\n                  my_context);\n\n    if (my_context.is_group_execution_cancelled())\ndo_parallel_quick_sort:\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n        parallel_for( quick_sort_range<RandomAccessIterator,Compare>(begin, end-begin, comp ), \n                      quick_sort_body<RandomAccessIterator,Compare>(),\n                      auto_partitioner() );\n}\n\n} // namespace internal\n//! @endcond\n\n/** \\page parallel_sort_iter_req Requirements on iterators for parallel_sort\n    Requirements on value type \\c T of \\c RandomAccessIterator for \\c parallel_sort:\n    - \\code void swap( T& x, T& y ) \\endcode        Swaps \\c x and \\c y\n    - \\code bool Compare::operator()( const T& x, const T& y ) \\endcode\n                                                    True if x comes before y;\n**/\n\n/** \\name parallel_sort\n    See also requirements on \\ref parallel_sort_iter_req \"iterators for parallel_sort\". **/\n//@{\n\n//! Sorts the data in [begin,end) using the given comparator \n/** The compare function object is used for all comparisons between elements during sorting.\n    The compare object must define a bool operator() function.\n    @ingroup algorithms **/\ntemplate<typename RandomAccessIterator, typename Compare>\nvoid parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { \n    const int min_parallel_size = 500; \n    if( end > begin ) {\n        if (end - begin < min_parallel_size) { \n            std::sort(begin, end, comp);\n        } else {\n            internal::parallel_quick_sort(begin, end, comp);\n        }\n    }\n}\n\n//! Sorts the data in [begin,end) with a default comparator \\c std::less<RandomAccessIterator>\n/** @ingroup algorithms **/\ntemplate<typename RandomAccessIterator>\ninline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { \n    parallel_sort( begin, end, std::less< typename std::iterator_traits<RandomAccessIterator>::value_type >() );\n}\n\n//! Sorts the data in rng using the given comparator\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Compare>\nvoid parallel_sort(Range& rng, const Compare& comp) {\n    parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp);\n}\n\n//! Sorts the data in const rng using the given comparator\n/** @ingroup algorithms **/\ntemplate<typename Range, typename Compare>\nvoid parallel_sort(const Range& rng, const Compare& comp) {\n    parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp);\n}\n\n//! Sorts the data in rng with a default comparator \\c std::less<RandomAccessIterator>\n/** @ingroup algorithms **/\ntemplate<typename Range>\nvoid parallel_sort(Range& rng) {\n    parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng));\n}\n\n//! Sorts the data in const rng with a default comparator \\c std::less<RandomAccessIterator>\n/** @ingroup algorithms **/\ntemplate<typename Range>\nvoid parallel_sort(const Range& rng) {\n    parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng));\n}\n\n//! Sorts the data in the range \\c [begin,end) with a default comparator \\c std::less<T>\n/** @ingroup algorithms **/\ntemplate<typename T>\ninline void parallel_sort( T * begin, T * end ) {\n    parallel_sort( begin, end, std::less< T >() );\n}   \n//@}\n\n\n} // namespace tbb\n\n#endif\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/parallel_while.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_parallel_while\n#define __TBB_parallel_while\n\n#include \"task.h\"\n#include <new>\n\nnamespace tbb {\n\ntemplate<typename Body>\nclass parallel_while;\n\n//! @cond INTERNAL\nnamespace internal {\n\n    template<typename Stream, typename Body> class while_task;\n\n    //! For internal use only.\n    /** Executes one iteration of a while.\n        @ingroup algorithms */\n    template<typename Body>\n    class while_iteration_task: public task {\n        const Body& my_body;\n        typename Body::argument_type my_value;\n        /*override*/ task* execute() {\n            my_body(my_value); \n            return NULL;\n        }\n        while_iteration_task( const typename Body::argument_type& value, const Body& body ) : \n            my_body(body), my_value(value)\n        {}\n        template<typename Body_> friend class while_group_task;\n        friend class tbb::parallel_while<Body>;\n    };\n\n    //! For internal use only\n    /** Unpacks a block of iterations.\n        @ingroup algorithms */\n    template<typename Body>\n    class while_group_task: public task {\n        static const size_t max_arg_size = 4;         \n        const Body& my_body;\n        size_t size;\n        typename Body::argument_type my_arg[max_arg_size];\n        while_group_task( const Body& body ) : my_body(body), size(0) {} \n        /*override*/ task* execute() {\n            typedef while_iteration_task<Body> iteration_type;\n            __TBB_ASSERT( size>0, NULL );\n            task_list list;\n            task* t; \n            size_t k=0; \n            for(;;) {\n                t = new( allocate_child() ) iteration_type(my_arg[k],my_body); \n                if( ++k==size ) break;\n                list.push_back(*t);\n            }\n            set_ref_count(int(k+1));\n            spawn(list);\n            spawn_and_wait_for_all(*t);\n            return NULL;\n        }\n        template<typename Stream, typename Body_> friend class while_task;\n    };\n    \n    //! For internal use only.\n    /** Gets block of iterations from a stream and packages them into a while_group_task.\n        @ingroup algorithms */\n    template<typename Stream, typename Body>\n    class while_task: public task {\n        Stream& my_stream;\n        const Body& my_body;\n        empty_task& my_barrier;\n        /*override*/ task* execute() {\n            typedef while_group_task<Body> block_type;\n            block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body);\n            size_t k=0; \n            while( my_stream.pop_if_present(t.my_arg[k]) ) {\n                if( ++k==block_type::max_arg_size ) {\n                    // There might be more iterations.\n                    recycle_to_reexecute();\n                    break;\n                }\n            }\n            if( k==0 ) {\n                destroy(t);\n                return NULL;\n            } else {\n                t.size = k;\n                return &t;\n            }\n        }\n        while_task( Stream& stream, const Body& body, empty_task& barrier ) : \n            my_stream(stream),\n            my_body(body),\n            my_barrier(barrier)\n        {} \n        friend class tbb::parallel_while<Body>;\n    };\n\n} // namespace internal\n//! @endcond\n\n//! Parallel iteration over a stream, with optional addition of more work.\n/** The Body b has the requirement: \\n\n        \"b(v)\"                      \\n\n        \"b.argument_type\"           \\n\n    where v is an argument_type\n    @ingroup algorithms */\ntemplate<typename Body>\nclass parallel_while: internal::no_copy {\npublic:\n    //! Construct empty non-running parallel while.\n    parallel_while() : my_body(NULL), my_barrier(NULL) {}\n\n    //! Destructor cleans up data members before returning.\n    ~parallel_while() {\n        if( my_barrier ) {\n            my_barrier->destroy(*my_barrier);    \n            my_barrier = NULL;\n        }\n    }\n\n    //! Type of items\n    typedef typename Body::argument_type value_type;\n\n    //! Apply body.apply to each item in the stream.\n    /** A Stream s has the requirements \\n\n         \"S::value_type\"                \\n\n         \"s.pop_if_present(value) is convertible to bool */\n    template<typename Stream>\n    void run( Stream& stream, const Body& body );\n\n    //! Add a work item while running.\n    /** Should be executed only by body.apply or a thread spawned therefrom. */\n    void add( const value_type& item );\n\nprivate:\n    const Body* my_body;\n    empty_task* my_barrier;\n};\n\ntemplate<typename Body>\ntemplate<typename Stream>\nvoid parallel_while<Body>::run( Stream& stream, const Body& body ) {\n    using namespace internal;\n    empty_task& barrier = *new( task::allocate_root() ) empty_task();\n    my_body = &body;\n    my_barrier = &barrier;\n    my_barrier->set_ref_count(2);\n    while_task<Stream,Body>& w = *new( my_barrier->allocate_child() ) while_task<Stream,Body>( stream, body, barrier );\n    my_barrier->spawn_and_wait_for_all(w);\n    my_barrier->destroy(*my_barrier);\n    my_barrier = NULL;\n    my_body = NULL;\n}\n\ntemplate<typename Body>\nvoid parallel_while<Body>::add( const value_type& item ) {\n    __TBB_ASSERT(my_barrier,\"attempt to add to parallel_while that is not running\");\n    typedef internal::while_iteration_task<Body> iteration_type;\n    iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body);\n    task::self().spawn( i );\n}\n\n} // namespace \n\n#endif /* __TBB_parallel_while */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/partitioner.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_partitioner_H\n#define __TBB_partitioner_H\n\n#ifndef __TBB_INITIAL_CHUNKS\n// initial task divisions per thread\n#define __TBB_INITIAL_CHUNKS 2\n#endif\n#ifndef __TBB_RANGE_POOL_CAPACITY\n// maximum number of elements in range pool\n#define __TBB_RANGE_POOL_CAPACITY 8\n#endif\n#ifndef __TBB_INIT_DEPTH\n// initial value for depth of range pool\n#define __TBB_INIT_DEPTH 5\n#endif\n#ifndef __TBB_DEMAND_DEPTH_ADD\n// when imbalance is found range splits this value times more\n#define __TBB_DEMAND_DEPTH_ADD 2\n#endif\n#ifndef __TBB_STATIC_THRESHOLD\n// necessary number of clocks for the work to be distributed among all tasks\n#define __TBB_STATIC_THRESHOLD 40000\n#endif\n#if __TBB_DEFINE_MIC\n#define __TBB_NONUNIFORM_TASK_CREATION 1\n#ifdef __TBB_machine_time_stamp\n#define __TBB_USE_MACHINE_TIME_STAMPS 1\n#define __TBB_task_duration() __TBB_STATIC_THRESHOLD\n#endif // __TBB_machine_time_stamp\n#endif // __TBB_DEFINE_MIC\n\n#include \"task.h\"\n#include \"aligned_space.h\"\n#include \"atomic.h\"\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    #pragma warning (push)\n    #pragma warning (disable: 4244)\n#endif\n\nnamespace tbb {\n\nclass auto_partitioner;\nclass simple_partitioner;\nclass affinity_partitioner;\nnamespace interface7 {\n    namespace internal {\n        class affinity_partition_type;\n    }\n}\n\nnamespace internal { //< @cond INTERNAL\nsize_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor();\n\n//! Defines entry point for affinity partitioner into tbb run-time library.\nclass affinity_partitioner_base_v3: no_copy {\n    friend class tbb::affinity_partitioner;\n    friend class tbb::interface7::internal::affinity_partition_type;\n    //! Array that remembers affinities of tree positions to affinity_id.\n    /** NULL if my_size==0. */\n    affinity_id* my_array;\n    //! Number of elements in my_array.\n    size_t my_size;\n    //! Zeros the fields.\n    affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {}\n    //! Deallocates my_array.\n    ~affinity_partitioner_base_v3() {resize(0);}\n    //! Resize my_array.\n    /** Retains values if resulting size is the same. */\n    void __TBB_EXPORTED_METHOD resize( unsigned factor );\n};\n\n//! Provides backward-compatible methods for partition objects without affinity.\nclass partition_type_base {\npublic:\n    void set_affinity( task & ) {}\n    void note_affinity( task::affinity_id ) {}\n    task* continue_after_execute_range() {return NULL;}\n    bool decide_whether_to_delay() {return false;}\n    void spawn_or_delay( bool, task& b ) {\n        task::spawn(b);\n    }\n};\n\ntemplate<typename Range, typename Body, typename Partitioner> class start_scan;\n\n} //< namespace internal @endcond\n\nnamespace serial {\nnamespace interface7 {\ntemplate<typename Range, typename Body, typename Partitioner> class start_for;\n}\n}\n\nnamespace interface7 {\n//! @cond INTERNAL\nnamespace internal {\nusing namespace tbb::internal;\ntemplate<typename Range, typename Body, typename Partitioner> class start_for;\ntemplate<typename Range, typename Body, typename Partitioner> class start_reduce;\n\n//! Join task node that contains shared flag for stealing feedback\nclass flag_task: public task {\npublic:\n    tbb::atomic<bool> my_child_stolen;\n    flag_task() { my_child_stolen = false; }\n    task* execute() { return NULL; }\n    static void mark_task_stolen(task &t) {\n        tbb::atomic<bool> &flag = static_cast<flag_task*>(t.parent())->my_child_stolen;\n#if TBB_USE_THREADING_TOOLS\n        // Threading tools respect lock prefix but report false-positive data-race via plain store\n        flag.fetch_and_store<release>(true);\n#else\n        flag = true;\n#endif //TBB_USE_THREADING_TOOLS\n    }\n    static bool is_peer_stolen(task &t) {\n        return static_cast<flag_task*>(t.parent())->my_child_stolen;\n    }\n};\n\n//! Depth is a relative depth of recursive division inside a range pool. Relative depth allows\n//! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented\n//! by a number that cannot fit into machine word.\ntypedef unsigned char depth_t;\n\n//! Range pool stores ranges of type T in a circular buffer with MaxCapacity\ntemplate <typename T, depth_t MaxCapacity>\nclass range_vector {\n    depth_t my_head;\n    depth_t my_tail;\n    depth_t my_size;\n    depth_t my_depth[MaxCapacity]; // relative depths of stored ranges\n    tbb::aligned_space<T, MaxCapacity> my_pool;\n\npublic:\n    //! initialize via first range in pool\n    range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) {\n        my_depth[0] = 0;\n        new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::move?\n    }\n    ~range_vector() {\n        while( !empty() ) pop_back();\n    }\n    bool empty() const { return my_size == 0; }\n    depth_t size() const { return my_size; }\n    //! Populates range pool via ranges up to max depth or while divisible\n    //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces\n    void split_to_fill(depth_t max_depth) {\n        while( my_size < MaxCapacity && is_divisible(max_depth) ) {\n            depth_t prev = my_head;\n            my_head = (my_head + 1) % MaxCapacity;\n            new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move?\n            my_pool.begin()[prev].~T(); // instead of assignment\n            new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split\n            my_depth[my_head] = ++my_depth[prev];\n            my_size++;\n        }\n    }\n    void pop_back() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::pop_back() with empty size\");\n        my_pool.begin()[my_head].~T();\n        my_size--;\n        my_head = (my_head + MaxCapacity - 1) % MaxCapacity;\n    }\n    void pop_front() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::pop_front() with empty size\");\n        my_pool.begin()[my_tail].~T();\n        my_size--;\n        my_tail = (my_tail + 1) % MaxCapacity;\n    }\n    T& back() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::back() with empty size\");\n        return my_pool.begin()[my_head];\n    }\n    T& front() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::front() with empty size\");\n        return my_pool.begin()[my_tail];\n    }\n    //! similarly to front(), returns depth of the first range in the pool\n    depth_t front_depth() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::front_depth() with empty size\");\n        return my_depth[my_tail];\n    }\n    depth_t back_depth() {\n        __TBB_ASSERT(my_size > 0, \"range_vector::back_depth() with empty size\");\n        return my_depth[my_head];\n    }\n    bool is_divisible(depth_t max_depth) {\n        return back_depth() < max_depth && back().is_divisible();\n    }\n};\n\n//! Provides default methods for partition objects and common algorithm blocks.\ntemplate <typename Partition>\nstruct partition_type_base {\n    typedef split split_type;\n    // decision makers\n    void set_affinity( task & ) {}\n    void note_affinity( task::affinity_id ) {}\n    bool check_being_stolen(task &) { return false; } // part of old should_execute_range()\n    bool check_for_demand(task &) { return false; }\n    bool is_divisible() { return true; } // part of old should_execute_range()\n    depth_t max_depth() { return 0; }\n    void align_depth(depth_t) { }\n    template <typename Range> split_type get_split() { return split(); }\n\n    // common function blocks\n    Partition& self() { return *static_cast<Partition*>(this); } // CRTP helper\n    template<typename StartType, typename Range>\n    void execute(StartType &start, Range &range) {\n        // The algorithm in a few words ([]-denotes calls to decision methods of partitioner):\n        // [If this task is stolen, adjust depth and divisions if necessary, set flag].\n        // If range is divisible {\n        //    Spread the work while [initial divisions left];\n        //    Create trap task [if necessary];\n        // }\n        // If not divisible or [max depth is reached], execute, else do the range pool part\n        if ( range.is_divisible() ) {\n            if ( self().is_divisible() ) {\n                do { // split until is divisible\n                    typename Partition::split_type split_obj = self().template get_split<Range>();\n                    start.offer_work( split_obj );\n                } while ( range.is_divisible() && self().is_divisible() );\n            }\n        }\n        if( !range.is_divisible() || !self().max_depth() )\n            start.run_body( range ); // simple partitioner goes always here\n        else { // do range pool\n            internal::range_vector<Range, Partition::range_pool_size> range_pool(range);\n            do {\n                range_pool.split_to_fill(self().max_depth()); // fill range pool\n                if( self().check_for_demand( start ) ) {\n                    if( range_pool.size() > 1 ) {\n                        start.offer_work( range_pool.front(), range_pool.front_depth() );\n                        range_pool.pop_front();\n                        continue;\n                    }\n                    if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task\n                        continue; // note: next split_to_fill() should split range at least once\n                }\n                start.run_body( range_pool.back() );\n                range_pool.pop_back();\n            } while( !range_pool.empty() && !start.is_cancelled() );\n        }\n    }\n};\n\n//! Provides default methods for auto (adaptive) partition objects.\ntemplate <typename Partition>\nstruct adaptive_partition_type_base : partition_type_base<Partition> {\n    size_t my_divisor;\n    depth_t my_max_depth;\n    adaptive_partition_type_base() : my_max_depth(__TBB_INIT_DEPTH) {\n        my_divisor = tbb::internal::get_initial_auto_partitioner_divisor() / 4;\n        __TBB_ASSERT(my_divisor, \"initial value of get_initial_auto_partitioner_divisor() is not valid\");\n    }\n    adaptive_partition_type_base(adaptive_partition_type_base &src, split) {\n        my_max_depth = src.my_max_depth;\n#if TBB_USE_ASSERT\n        size_t old_divisor = src.my_divisor;\n#endif\n\n#if __TBB_INITIAL_TASK_IMBALANCE\n        if( src.my_divisor <= 1 ) my_divisor = 0;\n        else my_divisor = src.my_divisor = (src.my_divisor + 1u) / 2u;\n#else\n        my_divisor = src.my_divisor / 2u;\n        src.my_divisor = src.my_divisor - my_divisor; // TODO: check the effect separately\n        if (my_divisor) src.my_max_depth += static_cast<depth_t>(__TBB_Log2(src.my_divisor / my_divisor));\n#endif\n        // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves.\n        // A task which has only one index must produce the right split without reserved index in order to avoid\n        // it to be overwritten in note_affinity() of the created (right) task.\n        // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order)\n        __TBB_ASSERT( (old_divisor <= 1 && my_divisor == 0) ||\n                      (old_divisor > 1 && my_divisor != 0), NULL);\n    }\n    adaptive_partition_type_base(adaptive_partition_type_base &src, const proportional_split& split_obj) {\n        my_max_depth = src.my_max_depth;\n        my_divisor = size_t(float(src.my_divisor) * float(split_obj.right())\n                            / float(split_obj.left() + split_obj.right()));\n        src.my_divisor -= my_divisor;\n    }\n    bool check_being_stolen( task &t) { // part of old should_execute_range()\n        if( !my_divisor ) { // if not from the top P tasks of binary tree\n            my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)?\n            if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task\n#if TBB_USE_EXCEPTIONS\n                // RTTI is available, check whether the cast is valid\n                __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0);\n                // correctness of the cast relies on avoiding the root task for which:\n                // - initial value of my_divisor != 0 (protected by separate assertion)\n                // - is_stolen_task() always returns false for the root task.\n#endif\n                flag_task::mark_task_stolen(t);\n                if( !my_max_depth ) my_max_depth++;\n                my_max_depth += __TBB_DEMAND_DEPTH_ADD;\n                return true;\n            }\n        }\n        return false;\n    }\n    void align_depth(depth_t base) {\n        __TBB_ASSERT(base <= my_max_depth, 0);\n        my_max_depth -= base;\n    }\n    depth_t max_depth() { return my_max_depth; }\n};\n\n//! Helper that enables one or the other code branches (see example in is_range_divisible_in_proportion)\ntemplate<bool C, typename T = void> struct enable_if { typedef T type; };\ntemplate<typename T> struct enable_if<false, T> { };\n\n//! Class determines whether template parameter has static boolean\n//! constant 'is_divisible_in_proportion' initialized with value of\n//! 'true' or not.\n/** If template parameter has such field that has been initialized\n *  with non-zero value then class field will be set to 'true',\n *  otherwise - 'false'\n */\ntemplate <typename Range>\nclass is_range_divisible_in_proportion {\nprivate:\n    typedef char yes[1];\n    typedef char no [2];\n\n    template <typename range_type> static yes& decide(typename enable_if<range_type::is_divisible_in_proportion>::type *);\n    template <typename range_type> static no& decide(...);\npublic:\n    // equals to 'true' if and only if static const variable 'is_divisible_in_proportion' of template parameter\n    // initialized with the value of 'true'\n    static const bool value = (sizeof(decide<Range>(0)) == sizeof(yes));\n};\n\n//! Provides default methods for affinity (adaptive) partition objects.\nclass affinity_partition_type : public adaptive_partition_type_base<affinity_partition_type> {\n    static const unsigned factor_power = 4;\n    static const unsigned factor = 1<<factor_power;  // number of slots in affinity array per task\n    enum {\n        start = 0,\n        run,\n        pass\n    } my_delay;\n#ifdef __TBB_USE_MACHINE_TIME_STAMPS\n    machine_tsc_t my_dst_tsc;\n#endif\n    size_t my_begin;\n    tbb::internal::affinity_id* my_array;\npublic:\n    typedef proportional_split split_type;\n\n    affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& ap )\n        : adaptive_partition_type_base<affinity_partition_type>(),\n          my_delay(start)\n#ifdef __TBB_USE_MACHINE_TIME_STAMPS\n        , my_dst_tsc(0)\n#endif\n        {\n        __TBB_ASSERT( (factor&(factor-1))==0, \"factor must be power of two\" );\n        my_divisor *= factor;\n        ap.resize(factor);\n        my_array = ap.my_array;\n        my_begin = 0;\n        my_max_depth = factor_power + 1; // the first factor_power ranges will be spawned, and >=1 ranges should be left\n        __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 );\n    }\n    affinity_partition_type(affinity_partition_type& p, split)\n        : adaptive_partition_type_base<affinity_partition_type>(p, split()),\n          my_delay(pass),\n#ifdef __TBB_USE_MACHINE_TIME_STAMPS\n          my_dst_tsc(0),\n#endif\n          my_array(p.my_array) {\n        // the sum of the divisors represents original value of p.my_divisor before split\n        __TBB_ASSERT(my_divisor + p.my_divisor <= factor, NULL);\n        my_begin = p.my_begin + p.my_divisor;\n    }\n    affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj)\n        : adaptive_partition_type_base<affinity_partition_type>(p, split_obj),\n          my_delay(start),\n#ifdef __TBB_USE_MACHINE_TIME_STAMPS\n          my_dst_tsc(0),\n#endif\n          my_array(p.my_array) {\n        size_t total_divisor = my_divisor + p.my_divisor;\n        __TBB_ASSERT(total_divisor % factor == 0, NULL);\n        my_divisor = (my_divisor + factor/2) & (0u - factor);\n        if (!my_divisor)\n            my_divisor = factor;\n        else if (my_divisor == total_divisor)\n            my_divisor = total_divisor - factor;\n        p.my_divisor = total_divisor - my_divisor;\n        __TBB_ASSERT(my_divisor && p.my_divisor, NULL);\n        my_begin = p.my_begin + p.my_divisor;\n    }\n    void set_affinity( task &t ) {\n        if( my_divisor ) {\n            if( !my_array[my_begin] ) {\n                // TODO: consider code reuse for static_paritioner\n                my_array[my_begin] = affinity_id(my_begin / factor + 1);\n            }\n            t.set_affinity( my_array[my_begin] );\n        }\n    }\n    void note_affinity( task::affinity_id id ) {\n        if( my_divisor )\n            my_array[my_begin] = id;\n    }\n    bool check_for_demand( task &t ) {\n        if( pass == my_delay ) {\n            if( my_divisor > 1 ) // produce affinitized tasks while they have slot in array\n                return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more\n            else if( my_divisor && my_max_depth ) { // make balancing task\n                my_divisor = 0; // once for each task; depth will be decreased in align_depth()\n                return true;\n            }\n            else if( flag_task::is_peer_stolen(t) ) {\n                my_max_depth += __TBB_DEMAND_DEPTH_ADD;\n                return true;\n            }\n        } else if( start == my_delay ) {\n#ifndef __TBB_USE_MACHINE_TIME_STAMPS\n            my_delay = pass;\n#else\n            my_dst_tsc = __TBB_machine_time_stamp() + __TBB_task_duration();\n            my_delay = run;\n        } else if( run == my_delay ) {\n            if( __TBB_machine_time_stamp() < my_dst_tsc ) {\n                __TBB_ASSERT(my_max_depth > 0, NULL);\n                return false;\n            }\n            my_delay = pass;\n            return true;\n#endif // __TBB_USE_MACHINE_TIME_STAMPS\n        }\n        return false;\n    }\n    bool is_divisible() { // part of old should_execute_range()\n        return my_divisor > factor;\n    }\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Suppress \"conditional expression is constant\" warning.\n    #pragma warning( push )\n    #pragma warning( disable: 4127 )\n#endif\n    template <typename Range>\n    split_type get_split() {\n        if (is_range_divisible_in_proportion<Range>::value) {\n            size_t size = my_divisor / factor;\n#if __TBB_NONUNIFORM_TASK_CREATION\n            size_t right = (size + 2) / 3;\n#else\n            size_t right = size / 2;\n#endif\n            size_t left = size - right;\n            return split_type(left, right);\n        } else {\n            return split_type(1, 1);\n        }\n    }\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning( pop )\n#endif // warning 4127 is back\n\n    static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;\n};\n\nclass auto_partition_type: public adaptive_partition_type_base<auto_partition_type> {\npublic:\n    auto_partition_type( const auto_partitioner& ) {\n        my_divisor *= __TBB_INITIAL_CHUNKS;\n    }\n    auto_partition_type( auto_partition_type& src, split)\n      : adaptive_partition_type_base<auto_partition_type>(src, split()) {}\n\n    bool is_divisible() { // part of old should_execute_range()\n        if( my_divisor > 1 ) return true;\n        if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead\n            // keep same fragmentation while splitting for the local task pool\n            my_max_depth--;\n            my_divisor = 0; // decrease max_depth once per task\n            return true;\n        } else return false;\n    }\n    bool check_for_demand(task &t) {\n        if( flag_task::is_peer_stolen(t) ) {\n            my_max_depth += __TBB_DEMAND_DEPTH_ADD;\n            return true;\n        } else return false;\n    }\n\n    static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;\n};\n\nclass simple_partition_type: public partition_type_base<simple_partition_type> {\npublic:\n    simple_partition_type( const simple_partitioner& ) {}\n    simple_partition_type( const simple_partition_type&, split ) {}\n    //! simplified algorithm\n    template<typename StartType, typename Range>\n    void execute(StartType &start, Range &range) {\n        split_type split_obj = split(); // start.offer_work accepts split_type as reference\n        while( range.is_divisible() )\n            start.offer_work( split_obj );\n        start.run_body( range );\n    }\n    //static const unsigned range_pool_size = 1; - not necessary because execute() is overridden\n};\n\n//! Backward-compatible partition for auto and affinity partition objects.\nclass old_auto_partition_type: public tbb::internal::partition_type_base {\n    size_t num_chunks;\n    static const size_t VICTIM_CHUNKS = 4;\npublic:\n    bool should_execute_range(const task &t) {\n        if( num_chunks<VICTIM_CHUNKS && t.is_stolen_task() )\n            num_chunks = VICTIM_CHUNKS;\n        return num_chunks==1;\n    }\n    old_auto_partition_type( const auto_partitioner& )\n      : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}\n    old_auto_partition_type( const affinity_partitioner& )\n      : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}\n    old_auto_partition_type( old_auto_partition_type& pt, split ) {\n        num_chunks = pt.num_chunks = (pt.num_chunks+1u) / 2u;\n    }\n};\n\n} // namespace interfaceX::internal\n//! @endcond\n} // namespace interfaceX\n\n//! A simple partitioner\n/** Divides the range until the range is not divisible.\n    @ingroup algorithms */\nclass simple_partitioner {\npublic:\n    simple_partitioner() {}\nprivate:\n    template<typename Range, typename Body, typename Partitioner> friend class serial::interface7::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_reduce;\n    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;\n    // backward compatibility\n    class partition_type: public internal::partition_type_base {\n    public:\n        bool should_execute_range(const task& ) {return false;}\n        partition_type( const simple_partitioner& ) {}\n        partition_type( const partition_type&, split ) {}\n    };\n    // new implementation just extends existing interface\n    typedef interface7::internal::simple_partition_type task_partition_type;\n\n    // TODO: consider to make split_type public\n    typedef interface7::internal::simple_partition_type::split_type split_type;\n};\n\n//! An auto partitioner\n/** The range is initial divided into several large chunks.\n    Chunks are further subdivided into smaller pieces if demand detected and they are divisible.\n    @ingroup algorithms */\nclass auto_partitioner {\npublic:\n    auto_partitioner() {}\n\nprivate:\n    template<typename Range, typename Body, typename Partitioner> friend class serial::interface7::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_reduce;\n    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;\n    // backward compatibility\n    typedef interface7::internal::old_auto_partition_type partition_type;\n    // new implementation just extends existing interface\n    typedef interface7::internal::auto_partition_type task_partition_type;\n\n    // TODO: consider to make split_type public\n    typedef interface7::internal::auto_partition_type::split_type split_type;\n};\n\n//! An affinity partitioner\nclass affinity_partitioner: internal::affinity_partitioner_base_v3 {\npublic:\n    affinity_partitioner() {}\n\nprivate:\n    template<typename Range, typename Body, typename Partitioner> friend class serial::interface7::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_for;\n    template<typename Range, typename Body, typename Partitioner> friend class interface7::internal::start_reduce;\n    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;\n    // backward compatibility - for parallel_scan only\n    typedef interface7::internal::old_auto_partition_type partition_type;\n    // new implementation just extends existing interface\n    typedef interface7::internal::affinity_partition_type task_partition_type;\n\n    // TODO: consider to make split_type public\n    typedef interface7::internal::affinity_partition_type::split_type split_type;\n};\n\n} // namespace tbb\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4244 is back\n#undef __TBB_INITIAL_CHUNKS\n#undef __TBB_RANGE_POOL_CAPACITY\n#undef __TBB_INIT_DEPTH\n#endif /* __TBB_partitioner_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/pipeline.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/pipeline.h\"\n#include \"tbb/spin_mutex.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n#include \"itt_notify.h\"\n#include \"semaphore.h\"\n#include \"tls.h\"  // for parallel filters that do not use NULL as end_of_input\n\n\nnamespace tbb {\n\nnamespace internal {\n\n//! This structure is used to store task information in a input buffer\nstruct task_info {\n    void* my_object;\n    //! Invalid unless a task went through an ordered stage.\n    Token my_token;\n    //! False until my_token is set.\n    bool my_token_ready;\n    //! True if my_object is valid.\n    bool is_valid;\n    //! Set to initial state (no object, no token)\n    void reset() {\n        my_object = NULL;\n        my_token = 0;\n        my_token_ready = false;\n        is_valid = false;\n    }\n};\n//! A buffer of input items for a filter.\n/** Each item is a task_info, inserted into a position in the buffer corresponding to a Token. */\nclass input_buffer : no_copy {\n    friend class tbb::internal::pipeline_root_task;\n    friend class tbb::filter;\n    friend class tbb::thread_bound_filter;\n    friend class tbb::internal::stage_task;\n    friend class tbb::pipeline;\n\n    typedef  Token  size_type;\n\n    //! Array of deferred tasks that cannot yet start executing.\n    task_info* array;\n\n    //! for thread-bound filter, semaphore for waiting, NULL otherwise.\n    semaphore* my_sem;\n\n    //! Size of array\n    /** Always 0 or a power of 2 */\n    size_type array_size;\n\n    //! Lowest token that can start executing.\n    /** All prior Token have already been seen. */\n    Token low_token;\n\n    //! Serializes updates.\n    spin_mutex array_mutex;\n\n    //! Resize \"array\".\n    /** Caller is responsible to acquiring a lock on \"array_mutex\". */\n    void grow( size_type minimum_size );\n\n    //! Initial size for \"array\"\n    /** Must be a power of 2 */\n    static const size_type initial_buffer_size = 4;\n\n    //! Used for out of order buffer, and for assigning my_token if is_ordered and my_token not already assigned\n    Token high_token;\n\n    //! True for ordered filter, false otherwise.\n    bool is_ordered;\n\n    //! True for thread-bound filter, false otherwise.\n    bool is_bound;\n\n    //! for parallel filters that accepts NULLs, thread-local flag for reaching end_of_input\n    typedef basic_tls<intptr_t> end_of_input_tls_t;\n    end_of_input_tls_t end_of_input_tls;\n    bool end_of_input_tls_allocated; // no way to test pthread creation of TLS\n\n    void create_sema(size_t initial_tokens) { __TBB_ASSERT(!my_sem,NULL); my_sem = new internal::semaphore(initial_tokens); }\n    void free_sema() { __TBB_ASSERT(my_sem,NULL); delete my_sem; }\n    void sema_P() { __TBB_ASSERT(my_sem,NULL); my_sem->P(); }\n    void sema_V() { __TBB_ASSERT(my_sem,NULL); my_sem->V(); }\n\npublic:\n    //! Construct empty buffer.\n    input_buffer( bool is_ordered_, bool is_bound_ ) :\n            array(NULL), my_sem(NULL), array_size(0),\n            low_token(0), high_token(0),\n            is_ordered(is_ordered_), is_bound(is_bound_),\n            end_of_input_tls_allocated(false) {\n        grow(initial_buffer_size);\n        __TBB_ASSERT( array, NULL );\n        if(is_bound) create_sema(0);\n    }\n\n    //! Destroy the buffer.\n    ~input_buffer() {\n        __TBB_ASSERT( array, NULL );\n        cache_aligned_allocator<task_info>().deallocate(array,array_size);\n        poison_pointer( array );\n        if(my_sem) {\n            free_sema();\n        }\n        if(end_of_input_tls_allocated) {\n            destroy_my_tls();\n        }\n    }\n\n    //! Put a token into the buffer.\n    /** If task information was placed into buffer, returns true;\n        otherwise returns false, informing the caller to create and spawn a task.\n        If input buffer owned by thread-bound filter and the item at\n        low_token was not valid, issue a V()\n        If the input_buffer is owned by a successor to a thread-bound filter,\n        the force_put parameter should be true to ensure the token is inserted\n        in the buffer.\n    */\n    bool put_token( task_info& info_, bool force_put = false ) {\n        {\n            info_.is_valid = true;\n            spin_mutex::scoped_lock lock( array_mutex );\n            Token token;\n            bool was_empty = !array[low_token&(array_size-1)].is_valid;\n            if( is_ordered ) {\n                if( !info_.my_token_ready ) {\n                    info_.my_token = high_token++;\n                    info_.my_token_ready = true;\n                }\n                token = info_.my_token;\n            } else\n                token = high_token++;\n            __TBB_ASSERT( (tokendiff_t)(token-low_token)>=0, NULL );\n            if( token!=low_token || is_bound || force_put ) {\n                // Trying to put token that is beyond low_token.\n                // Need to wait until low_token catches up before dispatching.\n                if( token-low_token>=array_size )\n                    grow( token-low_token+1 );\n                ITT_NOTIFY( sync_releasing, this );\n                array[token&(array_size-1)] = info_;\n                if(was_empty && is_bound) {\n                    sema_V();\n                }\n                return true;\n            }\n        }\n        return false;\n    }\n\n    //! Note that processing of a token is finished.\n    /** Fires up processing of the next token, if processing was deferred. */\n    // Using template to avoid explicit dependency on stage_task\n    // this is only called for serial filters, and is the reason for the\n    // advance parameter in return_item (we're incrementing low_token here.)\n    // Non-TBF serial stages don't advance the token at the start because the presence\n    // of the current token in the buffer keeps another stage from being spawned.\n    template<typename StageTask>\n    void note_done( Token token, StageTask& spawner ) {\n        task_info wakee;\n        wakee.reset();\n        {\n            spin_mutex::scoped_lock lock( array_mutex );\n            if( !is_ordered || token==low_token ) {\n                // Wake the next task\n                task_info& item = array[++low_token & (array_size-1)];\n                ITT_NOTIFY( sync_acquired, this );\n                wakee = item;\n                item.is_valid = false;\n            }\n        }\n        if( wakee.is_valid )\n            spawner.spawn_stage_task(wakee);\n    }\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! The method destroys all data in filters to prevent memory leaks\n    void clear( filter* my_filter ) {\n        long t=low_token;\n        for( size_type i=0; i<array_size; ++i, ++t ){\n            task_info& temp = array[t&(array_size-1)];\n            if (temp.is_valid ) {\n                my_filter->finalize(temp.my_object);\n                temp.is_valid = false;\n            }\n        }\n    }\n#endif\n\n    //! return an item, invalidate the queued item, but only advance if advance\n    //  advance == true for parallel filters.  If the filter is serial, leave the\n    // item in the buffer to keep another stage from being spawned.\n    bool return_item(task_info& info, bool advance) {\n        spin_mutex::scoped_lock lock( array_mutex );\n        task_info& item = array[low_token&(array_size-1)];\n        ITT_NOTIFY( sync_acquired, this );\n        if( item.is_valid ) {\n            info = item;\n            item.is_valid = false;\n            if (advance) low_token++;\n            return true;\n        }\n        return false;\n    }\n\n    //! true if the current low_token is valid.\n    bool has_item() { spin_mutex::scoped_lock lock(array_mutex); return array[low_token&(array_size -1)].is_valid; }\n\n    // end_of_input signal for parallel_pipeline, parallel input filters with 0 tokens allowed.\n    void create_my_tls() { int status = end_of_input_tls.create(); if(status) handle_perror(status, \"TLS not allocated for filter\"); end_of_input_tls_allocated = true; }\n    void destroy_my_tls() { int status = end_of_input_tls.destroy(); if(status) handle_perror(status, \"Failed to destroy filter TLS\"); }\n    bool my_tls_end_of_input() { return end_of_input_tls.get() != 0; }\n    void set_my_tls_end_of_input() { end_of_input_tls.set(1); }\n};\n\nvoid input_buffer::grow( size_type minimum_size ) {\n    size_type old_size = array_size;\n    size_type new_size = old_size ? 2*old_size : initial_buffer_size;\n    while( new_size<minimum_size )\n        new_size*=2;\n    task_info* new_array = cache_aligned_allocator<task_info>().allocate(new_size);\n    task_info* old_array = array;\n    for( size_type i=0; i<new_size; ++i )\n        new_array[i].is_valid = false;\n    long t=low_token;\n    for( size_type i=0; i<old_size; ++i, ++t )\n        new_array[t&(new_size-1)] = old_array[t&(old_size-1)];\n    array = new_array;\n    array_size = new_size;\n    if( old_array )\n        cache_aligned_allocator<task_info>().deallocate(old_array,old_size);\n}\n\nclass stage_task: public task, public task_info {\nprivate:\n    friend class tbb::pipeline;\n    pipeline& my_pipeline;\n    filter* my_filter;\n    //! True if this task has not yet read the input.\n    bool my_at_start;\n\npublic:\n    //! Construct stage_task for first stage in a pipeline.\n    /** Such a stage has not read any input yet. */\n    stage_task( pipeline& pipeline ) :\n        my_pipeline(pipeline),\n        my_filter(pipeline.filter_list),\n        my_at_start(true)\n    {\n        task_info::reset();\n    }\n    //! Construct stage_task for a subsequent stage in a pipeline.\n    stage_task( pipeline& pipeline, filter* filter_, const task_info& info ) :\n        task_info(info),\n        my_pipeline(pipeline),\n        my_filter(filter_),\n        my_at_start(false)\n    {}\n    //! Roughly equivalent to the constructor of input stage task\n    void reset() {\n        task_info::reset();\n        my_filter = my_pipeline.filter_list;\n        my_at_start = true;\n    }\n    //! The virtual task execution method\n    /*override*/ task* execute();\n#if __TBB_TASK_GROUP_CONTEXT\n    ~stage_task()\n    {\n        if (my_filter && my_object && (my_filter->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) {\n            __TBB_ASSERT(is_cancelled(), \"Trying to finalize the task that wasn't cancelled\");\n            my_filter->finalize(my_object);\n            my_object = NULL;\n        }\n    }\n#endif // __TBB_TASK_GROUP_CONTEXT\n    //! Creates and spawns stage_task from task_info\n    void spawn_stage_task(const task_info& info)\n    {\n        stage_task* clone = new (allocate_additional_child_of(*parent()))\n                                stage_task( my_pipeline, my_filter, info );\n        spawn(*clone);\n    }\n};\n\ntask* stage_task::execute() {\n    __TBB_ASSERT( !my_at_start || !my_object, NULL );\n    __TBB_ASSERT( !my_filter->is_bound(), NULL );\n    if( my_at_start ) {\n        if( my_filter->is_serial() ) {\n            my_object = (*my_filter)(my_object);\n            if( my_object || ( my_filter->object_may_be_null() && !my_pipeline.end_of_input) )\n            {\n                if( my_filter->is_ordered() ) {\n                    my_token = my_pipeline.token_counter++; // ideally, with relaxed semantics\n                    my_token_ready = true;\n                } else if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {\n                    if( my_pipeline.has_thread_bound_filters )\n                        my_pipeline.token_counter++; // ideally, with relaxed semantics\n                }\n                if( !my_filter->next_filter_in_pipeline ) { // we're only filter in pipeline\n                    reset();\n                    goto process_another_stage;\n                } else {\n                    ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens );\n                    if( --my_pipeline.input_tokens>0 )\n                        spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) );\n                }\n            } else {\n                my_pipeline.end_of_input = true;\n                return NULL;\n            }\n        } else /*not is_serial*/ {\n            if( my_pipeline.end_of_input )\n                return NULL;\n            if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {\n                if( my_pipeline.has_thread_bound_filters )\n                    my_pipeline.token_counter++;\n            }\n            ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens );\n            if( --my_pipeline.input_tokens>0 )\n                spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) );\n            my_object = (*my_filter)(my_object);\n            if( !my_object && (!my_filter->object_may_be_null() || my_filter->my_input_buffer->my_tls_end_of_input()) )\n            {\n                my_pipeline.end_of_input = true;\n                if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {\n                    if( my_pipeline.has_thread_bound_filters )\n                        my_pipeline.token_counter--;  // fix token_counter\n                }\n                return NULL;\n            }\n        }\n        my_at_start = false;\n    } else {\n        my_object = (*my_filter)(my_object);\n        if( my_filter->is_serial() )\n            my_filter->my_input_buffer->note_done(my_token, *this);\n    }\n    my_filter = my_filter->next_filter_in_pipeline;\n    if( my_filter ) {\n        // There is another filter to execute.\n        if( my_filter->is_serial() ) {\n            // The next filter must execute tokens in order\n            if( my_filter->my_input_buffer->put_token(*this) ){\n                // Can't proceed with the same item\n                if( my_filter->is_bound() ) {\n                    // Find the next non-thread-bound filter\n                    do {\n                        my_filter = my_filter->next_filter_in_pipeline;\n                    } while( my_filter && my_filter->is_bound() );\n                    // Check if there is an item ready to process\n                    if( my_filter && my_filter->my_input_buffer->return_item(*this, !my_filter->is_serial()))\n                        goto process_another_stage;\n                }\n                my_filter = NULL; // To prevent deleting my_object twice if exception occurs\n                return NULL;\n            }\n        }\n    } else {\n        // Reached end of the pipe.\n        size_t ntokens_avail = ++my_pipeline.input_tokens;\n        if(my_pipeline.filter_list->is_bound() ) {\n            if(ntokens_avail == 1) {\n                my_pipeline.filter_list->my_input_buffer->sema_V();\n            }\n            return NULL;\n        }\n        if( ntokens_avail>1  // Only recycle if there is one available token\n                || my_pipeline.end_of_input ) {\n            return NULL; // No need to recycle for new input\n        }\n        ITT_NOTIFY( sync_acquired, &my_pipeline.input_tokens );\n        // Recycle as an input stage task.\n        reset();\n    }\nprocess_another_stage:\n    /* A semi-hackish way to reexecute the same task object immediately without spawning.\n       recycle_as_continuation marks the task for future execution,\n       and then 'this' pointer is returned to bypass spawning. */\n    recycle_as_continuation();\n    return this;\n}\n\nclass pipeline_root_task: public task {\n    pipeline& my_pipeline;\n    bool do_segment_scanning;\n\n    /*override*/ task* execute() {\n        if( !my_pipeline.end_of_input )\n            if( !my_pipeline.filter_list->is_bound() )\n                if( my_pipeline.input_tokens > 0 ) {\n                    recycle_as_continuation();\n                    set_ref_count(1);\n                    return new( allocate_child() ) stage_task( my_pipeline );\n                }\n        if( do_segment_scanning ) {\n            filter* current_filter = my_pipeline.filter_list->next_segment;\n            /* first non-thread-bound filter that follows thread-bound one\n            and may have valid items to process */\n            filter* first_suitable_filter = current_filter;\n            while( current_filter ) {\n                __TBB_ASSERT( !current_filter->is_bound(), \"filter is thread-bound?\" );\n                __TBB_ASSERT( current_filter->prev_filter_in_pipeline->is_bound(), \"previous filter is not thread-bound?\" );\n                if( !my_pipeline.end_of_input || current_filter->has_more_work())\n                {\n                    task_info info;\n                    info.reset();\n                    if( current_filter->my_input_buffer->return_item(info, !current_filter->is_serial()) ) {\n                        set_ref_count(1);\n                        recycle_as_continuation();\n                        return new( allocate_child() ) stage_task( my_pipeline, current_filter, info);\n                    }\n                    current_filter = current_filter->next_segment;\n                    if( !current_filter ) {\n                        if( !my_pipeline.end_of_input ) {\n                            recycle_as_continuation();\n                            return this;\n                        }\n                        current_filter = first_suitable_filter;\n                        __TBB_Yield();\n                    }\n                } else {\n                    /* The preceding pipeline segment is empty.\n                    Fast-forward to the next post-TBF segment. */\n                    first_suitable_filter = first_suitable_filter->next_segment;\n                    current_filter = first_suitable_filter;\n                }\n            } /* while( current_filter ) */\n            return NULL;\n        } else {\n            if( !my_pipeline.end_of_input ) {\n                recycle_as_continuation();\n                return this;\n            }\n            return NULL;\n        }\n    }\npublic:\n    pipeline_root_task( pipeline& pipeline ): my_pipeline(pipeline), do_segment_scanning(false)\n    {\n        __TBB_ASSERT( my_pipeline.filter_list, NULL );\n        filter* first = my_pipeline.filter_list;\n        if( (first->my_filter_mode & first->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {\n            // Scanning the pipeline for segments\n            filter* head_of_previous_segment = first;\n            for(  filter* subfilter=first->next_filter_in_pipeline;\n                  subfilter!=NULL;\n                  subfilter=subfilter->next_filter_in_pipeline )\n            {\n                if( subfilter->prev_filter_in_pipeline->is_bound() && !subfilter->is_bound() ) {\n                    do_segment_scanning = true;\n                    head_of_previous_segment->next_segment = subfilter;\n                    head_of_previous_segment = subfilter;\n                }\n            }\n        }\n    }\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    // Suppress compiler warning about constant conditional expression\n    #pragma warning (disable: 4127)\n#endif\n\n// The class destroys end_counter and clears all input buffers if pipeline was cancelled.\nclass pipeline_cleaner: internal::no_copy {\n    pipeline& my_pipeline;\npublic:\n    pipeline_cleaner(pipeline& _pipeline) :\n        my_pipeline(_pipeline)\n    {}\n    ~pipeline_cleaner(){\n#if __TBB_TASK_GROUP_CONTEXT\n        if (my_pipeline.end_counter->is_cancelled()) // Pipeline was cancelled\n            my_pipeline.clear_filters();\n#endif\n        my_pipeline.end_counter = NULL;\n    }\n};\n\n} // namespace internal\n\nvoid pipeline::inject_token( task& ) {\n    __TBB_ASSERT(false,\"illegal call to inject_token\");\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\nvoid pipeline::clear_filters() {\n    for( filter* f = filter_list; f; f = f->next_filter_in_pipeline ) {\n        if ((f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4))\n            if( internal::input_buffer* b = f->my_input_buffer )\n                b->clear(f);\n    }\n}\n#endif\n\npipeline::pipeline() :\n    filter_list(NULL),\n    filter_end(NULL),\n    end_counter(NULL),\n    end_of_input(false),\n    has_thread_bound_filters(false)\n{\n    token_counter = 0;\n    input_tokens = 0;\n}\n\npipeline::~pipeline() {\n    clear();\n}\n\nvoid pipeline::clear() {\n    filter* next;\n    for( filter* f = filter_list; f; f=next ) {\n        if( internal::input_buffer* b = f->my_input_buffer ) {\n            delete b;\n            f->my_input_buffer = NULL;\n        }\n        next=f->next_filter_in_pipeline;\n        f->next_filter_in_pipeline = filter::not_in_pipeline();\n        if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) {\n            f->prev_filter_in_pipeline = filter::not_in_pipeline();\n            f->my_pipeline = NULL;\n        }\n        if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) )\n            f->next_segment = NULL;\n    }\n    filter_list = filter_end = NULL;\n}\n\nvoid pipeline::add_filter( filter& filter_ ) {\n#if TBB_USE_ASSERT\n    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) )\n        __TBB_ASSERT( filter_.prev_filter_in_pipeline==filter::not_in_pipeline(), \"filter already part of pipeline?\" );\n    __TBB_ASSERT( filter_.next_filter_in_pipeline==filter::not_in_pipeline(), \"filter already part of pipeline?\" );\n    __TBB_ASSERT( !end_counter, \"invocation of add_filter on running pipeline\" );\n#endif\n    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) {\n        filter_.my_pipeline = this;\n        filter_.prev_filter_in_pipeline = filter_end;\n        if ( filter_list == NULL)\n            filter_list = &filter_;\n        else\n            filter_end->next_filter_in_pipeline = &filter_;\n        filter_.next_filter_in_pipeline = NULL;\n        filter_end = &filter_;\n    }\n    else\n    {\n        if( !filter_end )\n            filter_end = reinterpret_cast<filter*>(&filter_list);\n\n        *reinterpret_cast<filter**>(filter_end) = &filter_;\n        filter_end = reinterpret_cast<filter*>(&filter_.next_filter_in_pipeline);\n        *reinterpret_cast<filter**>(filter_end) = NULL;\n    }\n    if( (filter_.my_filter_mode & filter_.version_mask) >= __TBB_PIPELINE_VERSION(5) ) {\n        if( filter_.is_serial() ) {\n            if( filter_.is_bound() )\n                has_thread_bound_filters = true;\n            filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), filter_.is_bound() );\n        }\n        else {\n            if(filter_.prev_filter_in_pipeline) {\n                if(filter_.prev_filter_in_pipeline->is_bound()) {\n                    // successors to bound filters must have an input_buffer\n                    filter_.my_input_buffer = new internal::input_buffer( /*is_ordered*/false, false );\n                }\n            }\n            else {  // input filter\n                if(filter_.object_may_be_null() ) {\n                    //TODO: buffer only needed to hold TLS; could improve\n                    filter_.my_input_buffer = new internal::input_buffer( /*is_ordered*/false, false );\n                    filter_.my_input_buffer->create_my_tls();\n                }\n            }\n        }\n    } else {\n        if( filter_.is_serial() ) {\n            filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), false );\n        }\n    }\n\n}\n\nvoid pipeline::remove_filter( filter& filter_ ) {\n    __TBB_ASSERT( filter_.prev_filter_in_pipeline!=filter::not_in_pipeline(), \"filter not part of pipeline\" );\n    __TBB_ASSERT( filter_.next_filter_in_pipeline!=filter::not_in_pipeline(), \"filter not part of pipeline\" );\n    __TBB_ASSERT( !end_counter, \"invocation of remove_filter on running pipeline\" );\n    if (&filter_ == filter_list)\n        filter_list = filter_.next_filter_in_pipeline;\n    else {\n        __TBB_ASSERT( filter_.prev_filter_in_pipeline, \"filter list broken?\" );\n        filter_.prev_filter_in_pipeline->next_filter_in_pipeline = filter_.next_filter_in_pipeline;\n    }\n    if (&filter_ == filter_end)\n        filter_end = filter_.prev_filter_in_pipeline;\n    else {\n        __TBB_ASSERT( filter_.next_filter_in_pipeline, \"filter list broken?\" );\n        filter_.next_filter_in_pipeline->prev_filter_in_pipeline = filter_.prev_filter_in_pipeline;\n    }\n    if( internal::input_buffer* b = filter_.my_input_buffer ) {\n        delete b;\n        filter_.my_input_buffer = NULL;\n    }\n    filter_.next_filter_in_pipeline = filter_.prev_filter_in_pipeline = filter::not_in_pipeline();\n    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) )\n        filter_.next_segment = NULL;\n    filter_.my_pipeline = NULL;\n}\n\nvoid pipeline::run( size_t max_number_of_live_tokens\n#if __TBB_TASK_GROUP_CONTEXT\n    , tbb::task_group_context& context\n#endif\n    ) {\n    __TBB_ASSERT( max_number_of_live_tokens>0, \"pipeline::run must have at least one token\" );\n    __TBB_ASSERT( !end_counter, \"pipeline already running?\" );\n    if( filter_list ) {\n        internal::pipeline_cleaner my_pipeline_cleaner(*this);\n        end_of_input = false;\n        input_tokens = internal::Token(max_number_of_live_tokens);\n        if(has_thread_bound_filters) {\n            // release input filter if thread-bound\n            if(filter_list->is_bound()) {\n                filter_list->my_input_buffer->sema_V();\n            }\n        }\n#if __TBB_TASK_GROUP_CONTEXT\n        end_counter = new( task::allocate_root(context) ) internal::pipeline_root_task( *this );\n#else\n        end_counter = new( task::allocate_root() ) internal::pipeline_root_task( *this );\n#endif\n        // Start execution of tasks\n        task::spawn_root_and_wait( *end_counter );\n\n        if(has_thread_bound_filters) {\n            for(filter* f = filter_list->next_filter_in_pipeline; f; f=f->next_filter_in_pipeline) {\n                if(f->is_bound()) {\n                    f->my_input_buffer->sema_V(); // wake to end\n                }\n            }\n        }\n    }\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\nvoid pipeline::run( size_t max_number_of_live_tokens ) {\n    if( filter_list ) {\n        // Construct task group context with the exception propagation mode expected\n        // by the pipeline caller.\n        uintptr_t ctx_traits = filter_list->my_filter_mode & filter::exact_exception_propagation ?\n                task_group_context::default_traits :\n                task_group_context::default_traits & ~task_group_context::exact_exception;\n        task_group_context context(task_group_context::bound, ctx_traits);\n        run(max_number_of_live_tokens, context);\n    }\n}\n#endif // __TBB_TASK_GROUP_CONTEXT\n\nbool filter::has_more_work() {\n    __TBB_ASSERT(my_pipeline, NULL);\n    __TBB_ASSERT(my_input_buffer, \"has_more_work() called for filter with no input buffer\");\n    return (internal::tokendiff_t)(my_pipeline->token_counter - my_input_buffer->low_token) != 0;\n}\n\nfilter::~filter() {\n    if ( (my_filter_mode & version_mask) >= __TBB_PIPELINE_VERSION(3) ) {\n        if ( next_filter_in_pipeline != filter::not_in_pipeline() )\n            my_pipeline->remove_filter(*this);\n        else\n            __TBB_ASSERT( prev_filter_in_pipeline == filter::not_in_pipeline(), \"probably filter list is broken\" );\n    } else {\n        __TBB_ASSERT( next_filter_in_pipeline==filter::not_in_pipeline(), \"cannot destroy filter that is part of pipeline\" );\n    }\n}\n\nvoid\nfilter::set_end_of_input() {\n    __TBB_ASSERT(my_input_buffer, NULL);\n    __TBB_ASSERT(object_may_be_null(), NULL);\n    if(is_serial()) {\n        my_pipeline->end_of_input = true;\n    }\n    else {\n        __TBB_ASSERT(my_input_buffer->end_of_input_tls_allocated, NULL);\n        my_input_buffer->set_my_tls_end_of_input();\n    }\n}\n\nthread_bound_filter::result_type thread_bound_filter::process_item() {\n    return internal_process_item(true);\n}\n\nthread_bound_filter::result_type thread_bound_filter::try_process_item() {\n    return internal_process_item(false);\n}\n\nthread_bound_filter::result_type thread_bound_filter::internal_process_item(bool is_blocking) {\n    __TBB_ASSERT(my_pipeline != NULL,\"It's not supposed that process_item is called for a filter that is not in a pipeline.\");\n    internal::task_info info;\n    info.reset();\n\n    if( my_pipeline->end_of_input && !has_more_work() )\n        return end_of_stream;\n\n    if( !prev_filter_in_pipeline ) {\n        if( my_pipeline->end_of_input )\n            return end_of_stream;\n        while( my_pipeline->input_tokens == 0 ) {\n            if( !is_blocking )\n                return item_not_available;\n            my_input_buffer->sema_P();\n        }\n        info.my_object = (*this)(info.my_object);\n        if( info.my_object ) {\n            __TBB_ASSERT(my_pipeline->input_tokens > 0, \"Token failed in thread-bound filter\");\n            my_pipeline->input_tokens--;\n            if( is_ordered() ) {\n                info.my_token = my_pipeline->token_counter;\n                info.my_token_ready = true;\n            }\n            my_pipeline->token_counter++; // ideally, with relaxed semantics\n        } else {\n            my_pipeline->end_of_input = true;\n            return end_of_stream;\n        }\n    } else { /* this is not an input filter */\n        while( !my_input_buffer->has_item() ) {\n            if( !is_blocking ) {\n                return item_not_available;\n            }\n            my_input_buffer->sema_P();\n            if( my_pipeline->end_of_input && !has_more_work() ) {\n                return end_of_stream;\n            }\n        }\n        if( !my_input_buffer->return_item(info, /*advance*/true) ) {\n            __TBB_ASSERT(false,\"return_item failed\");\n        }\n        info.my_object = (*this)(info.my_object);\n    }\n    if( next_filter_in_pipeline ) {\n        if ( !next_filter_in_pipeline->my_input_buffer->put_token(info,/*force_put=*/true) ) {\n            __TBB_ASSERT(false, \"Couldn't put token after thread-bound buffer\");\n        }\n    } else {\n        size_t ntokens_avail = ++(my_pipeline->input_tokens);\n        if( my_pipeline->filter_list->is_bound() ) {\n            if( ntokens_avail == 1 ) {\n                my_pipeline->filter_list->my_input_buffer->sema_V();\n            }\n        }\n    }\n\n    return success;\n}\n\n} // tbb\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/pipeline.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_pipeline_H \n#define __TBB_pipeline_H \n\n#include \"atomic.h\"\n#include \"task.h\"\n#include \"tbb_allocator.h\"\n#include <cstddef>\n\n#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT || __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT\n#include <type_traits>\n#endif\n\nnamespace tbb {\n\nclass pipeline;\nclass filter;\n\n//! @cond INTERNAL\nnamespace internal {\n\n// The argument for PIPELINE_VERSION should be an integer between 2 and 9\n#define __TBB_PIPELINE_VERSION(x) ((unsigned char)(x-2)<<1)\n\ntypedef unsigned long Token;\ntypedef long tokendiff_t;\nclass stage_task;\nclass input_buffer;\nclass pipeline_root_task;\nclass pipeline_cleaner;\n\n} // namespace internal\n\nnamespace interface6 {\n    template<typename T, typename U> class filter_t;\n\n    namespace internal {\n        class pipeline_proxy;\n    }\n}\n\n//! @endcond\n\n//! A stage in a pipeline.\n/** @ingroup algorithms */\nclass filter: internal::no_copy {\nprivate:\n    //! Value used to mark \"not in pipeline\"\n    static filter* not_in_pipeline() {return reinterpret_cast<filter*>(intptr_t(-1));}\nprotected:    \n    //! The lowest bit 0 is for parallel vs. serial\n    static const unsigned char filter_is_serial = 0x1; \n\n    //! 4th bit distinguishes ordered vs unordered filters.\n    /** The bit was not set for parallel filters in TBB 2.1 and earlier,\n        but is_ordered() function always treats parallel filters as out of order. */\n    static const unsigned char filter_is_out_of_order = 0x1<<4;  \n\n    //! 5th bit distinguishes thread-bound and regular filters.\n    static const unsigned char filter_is_bound = 0x1<<5;  \n\n    //! 6th bit marks input filters emitting small objects\n    static const unsigned char filter_may_emit_null = 0x1<<6;\n\n    //! 7th bit defines exception propagation mode expected by the application.\n    static const unsigned char exact_exception_propagation =\n#if TBB_USE_CAPTURED_EXCEPTION\n            0x0;\n#else\n            0x1<<7;\n#endif /* TBB_USE_CAPTURED_EXCEPTION */\n\n    static const unsigned char current_version = __TBB_PIPELINE_VERSION(5);\n    static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version\npublic:\n    enum mode {\n        //! processes multiple items in parallel and in no particular order\n        parallel = current_version | filter_is_out_of_order, \n        //! processes items one at a time; all such filters process items in the same order\n        serial_in_order = current_version | filter_is_serial,\n        //! processes items one at a time and in no particular order\n        serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order,\n        //! @deprecated use serial_in_order instead\n        serial = serial_in_order\n    };\nprotected:\n    filter( bool is_serial_ ) : \n        next_filter_in_pipeline(not_in_pipeline()),\n        my_input_buffer(NULL),\n        my_filter_mode(static_cast<unsigned char>((is_serial_ ? serial : parallel) | exact_exception_propagation)),\n        prev_filter_in_pipeline(not_in_pipeline()),\n        my_pipeline(NULL),\n        next_segment(NULL)\n    {}\n    \n    filter( mode filter_mode ) :\n        next_filter_in_pipeline(not_in_pipeline()),\n        my_input_buffer(NULL),\n        my_filter_mode(static_cast<unsigned char>(filter_mode | exact_exception_propagation)),\n        prev_filter_in_pipeline(not_in_pipeline()),\n        my_pipeline(NULL),\n        next_segment(NULL)\n    {}\n\n    // signal end-of-input for concrete_filters\n    void __TBB_EXPORTED_METHOD set_end_of_input();\n\npublic:\n    //! True if filter is serial.\n    bool is_serial() const {\n        return bool( my_filter_mode & filter_is_serial );\n    }  \n    \n    //! True if filter must receive stream in order.\n    bool is_ordered() const {\n        return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial;\n    }\n\n    //! True if filter is thread-bound.\n    bool is_bound() const {\n        return ( my_filter_mode & filter_is_bound )==filter_is_bound;\n    }\n\n    //! true if an input filter can emit null\n    bool object_may_be_null() { \n        return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null;\n    }\n\n    //! Operate on an item from the input stream, and return item for output stream.\n    /** Returns NULL if filter is a sink. */\n    virtual void* operator()( void* item ) = 0;\n\n    //! Destroy filter.  \n    /** If the filter was added to a pipeline, the pipeline must be destroyed first. */\n    virtual __TBB_EXPORTED_METHOD ~filter();\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Destroys item if pipeline was cancelled.\n    /** Required to prevent memory leaks.\n        Note it can be called concurrently even for serial filters.*/\n    virtual void finalize( void* /*item*/ ) {};\n#endif\n\nprivate:\n    //! Pointer to next filter in the pipeline.\n    filter* next_filter_in_pipeline;\n\n    //! has the filter not yet processed all the tokens it will ever see?  \n    //  (pipeline has not yet reached end_of_input or this filter has not yet\n    //  seen the last token produced by input_filter)\n    bool has_more_work();\n\n    //! Buffer for incoming tokens, or NULL if not required.\n    /** The buffer is required if the filter is serial or follows a thread-bound one. */\n    internal::input_buffer* my_input_buffer;\n\n    friend class internal::stage_task;\n    friend class internal::pipeline_root_task;\n    friend class pipeline;\n    friend class thread_bound_filter;\n\n    //! Storage for filter mode and dynamically checked implementation version.\n    const unsigned char my_filter_mode;\n\n    //! Pointer to previous filter in the pipeline.\n    filter* prev_filter_in_pipeline;\n\n    //! Pointer to the pipeline.\n    pipeline* my_pipeline;\n\n    //! Pointer to the next \"segment\" of filters, or NULL if not required.\n    /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */\n    filter* next_segment;\n};\n\n//! A stage in a pipeline served by a user thread.\n/** @ingroup algorithms */\nclass thread_bound_filter: public filter {\npublic:\n    enum result_type {\n        // item was processed\n        success,\n        // item is currently not available\n        item_not_available,\n        // there are no more items to process\n        end_of_stream\n    };\nprotected:\n    thread_bound_filter(mode filter_mode): \n         filter(static_cast<mode>(filter_mode | filter::filter_is_bound))\n    {\n        __TBB_ASSERT(filter_mode & filter::filter_is_serial, \"thread-bound filters must be serial\");\n    }\npublic:\n    //! If a data item is available, invoke operator() on that item.  \n    /** This interface is non-blocking.\n        Returns 'success' if an item was processed.\n        Returns 'item_not_available' if no item can be processed now \n        but more may arrive in the future, or if token limit is reached. \n        Returns 'end_of_stream' if there are no more items to process. */\n    result_type __TBB_EXPORTED_METHOD try_process_item(); \n\n    //! Wait until a data item becomes available, and invoke operator() on that item.\n    /** This interface is blocking.\n        Returns 'success' if an item was processed.\n        Returns 'end_of_stream' if there are no more items to process.\n        Never returns 'item_not_available', as it blocks until another return condition applies. */\n    result_type __TBB_EXPORTED_METHOD process_item();\n\nprivate:\n    //! Internal routine for item processing\n    result_type internal_process_item(bool is_blocking);\n};\n\n//! A processing pipeline that applies filters to items.\n/** @ingroup algorithms */\nclass pipeline {\npublic:\n    //! Construct empty pipeline.\n    __TBB_EXPORTED_METHOD pipeline();\n\n    /** Though the current implementation declares the destructor virtual, do not rely on this \n        detail.  The virtualness is deprecated and may disappear in future versions of TBB. */\n    virtual __TBB_EXPORTED_METHOD ~pipeline();\n\n    //! Add filter to end of pipeline.\n    void __TBB_EXPORTED_METHOD add_filter( filter& filter_ );\n\n    //! Run the pipeline to completion.\n    void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens );\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Run the pipeline to completion with user-supplied context.\n    void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context );\n#endif\n\n    //! Remove all filters from the pipeline.\n    void __TBB_EXPORTED_METHOD clear();\n\nprivate:\n    friend class internal::stage_task;\n    friend class internal::pipeline_root_task;\n    friend class filter;\n    friend class thread_bound_filter;\n    friend class internal::pipeline_cleaner;\n    friend class tbb::interface6::internal::pipeline_proxy;\n\n    //! Pointer to first filter in the pipeline.\n    filter* filter_list;\n\n    //! Pointer to location where address of next filter to be added should be stored.\n    filter* filter_end;\n\n    //! task who's reference count is used to determine when all stages are done.\n    task* end_counter;\n\n    //! Number of idle tokens waiting for input stage.\n    atomic<internal::Token> input_tokens;\n\n    //! Global counter of tokens \n    atomic<internal::Token> token_counter;\n\n    //! False until fetch_input returns NULL.\n    bool end_of_input;\n\n    //! True if the pipeline contains a thread-bound filter; false otherwise.\n    bool has_thread_bound_filters;\n\n    //! Remove filter from pipeline.\n    void remove_filter( filter& filter_ );\n\n    //! Not used, but retained to satisfy old export files.\n    void __TBB_EXPORTED_METHOD inject_token( task& self );\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Does clean up if pipeline is cancelled or exception occurred\n    void clear_filters();\n#endif\n};\n\n//------------------------------------------------------------------------\n// Support for lambda-friendly parallel_pipeline interface\n//------------------------------------------------------------------------\n\nnamespace interface6 {\n\nnamespace internal {\n    template<typename T, typename U, typename Body> class concrete_filter;\n}\n\n//! input_filter control to signal end-of-input for parallel_pipeline\nclass flow_control {\n    bool is_pipeline_stopped;\n    flow_control() { is_pipeline_stopped = false; }\n    template<typename T, typename U, typename Body> friend class internal::concrete_filter;\npublic:\n    void stop() { is_pipeline_stopped = true; }\n};\n\n//! @cond INTERNAL\nnamespace internal {\n\ntemplate<typename T> struct tbb_large_object {enum { value = sizeof(T) > sizeof(void *) }; };\n\n// Obtain type properties in one or another way\n#if   __TBB_CPP11_TYPE_PROPERTIES_PRESENT\ntemplate<typename T> struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable<T>::value }; };\n#elif __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT\ntemplate<typename T> struct tbb_trivially_copyable { enum { value = std::has_trivial_copy_constructor<T>::value }; };\n#else\n// Explicitly list the types we wish to be placed as-is in the pipeline input_buffers.\ntemplate<typename T> struct tbb_trivially_copyable { enum { value = false }; };\ntemplate<typename T> struct tbb_trivially_copyable <T*> { enum { value = true }; };\ntemplate<> struct tbb_trivially_copyable <short> { enum { value = true }; };\ntemplate<> struct tbb_trivially_copyable <unsigned short> { enum { value = true }; };\ntemplate<> struct tbb_trivially_copyable <int> { enum { value = !tbb_large_object<int>::value }; };\ntemplate<> struct tbb_trivially_copyable <unsigned int> { enum { value = !tbb_large_object<int>::value }; };\ntemplate<> struct tbb_trivially_copyable <long> { enum { value = !tbb_large_object<long>::value }; };\ntemplate<> struct tbb_trivially_copyable <unsigned long> { enum { value = !tbb_large_object<long>::value }; };\ntemplate<> struct tbb_trivially_copyable <float> { enum { value = !tbb_large_object<float>::value }; };\ntemplate<> struct tbb_trivially_copyable <double> { enum { value = !tbb_large_object<double>::value }; };\n#endif // Obtaining type properties\n\ntemplate<typename T> struct is_large_object {enum { value = tbb_large_object<T>::value || !tbb_trivially_copyable<T>::value }; };\n\ntemplate<typename T, bool> class token_helper;\n\n// large object helper (uses tbb_allocator)\ntemplate<typename T>\nclass token_helper<T, true> {\n    public:\n    typedef typename tbb::tbb_allocator<T> allocator;\n    typedef T* pointer;\n    typedef T value_type;\n    static pointer create_token(const value_type & source) {\n        pointer output_t = allocator().allocate(1);\n        return new (output_t) T(source);\n    }\n    static value_type & token(pointer & t) { return *t;}\n    static void * cast_to_void_ptr(pointer ref) { return (void *) ref; }\n    static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; }\n    static void destroy_token(pointer token) {\n        allocator().destroy(token);\n        allocator().deallocate(token,1);\n    }\n};\n\n// pointer specialization\ntemplate<typename T>\nclass token_helper<T*, false > {\n    public:\n    typedef T* pointer;\n    typedef T* value_type;\n    static pointer create_token(const value_type & source) { return source; }\n    static value_type & token(pointer & t) { return t;}\n    static void * cast_to_void_ptr(pointer ref) { return (void *)ref; }\n    static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; }\n    static void destroy_token( pointer /*token*/) {}\n};\n\n// small object specialization (converts void* to the correct type, passes objects directly.)\ntemplate<typename T>\nclass token_helper<T, false> {\n    typedef union {\n        T actual_value;\n        void * void_overlay;\n    } type_to_void_ptr_map;\n    public:\n    typedef T pointer;  // not really a pointer in this case.\n    typedef T value_type;\n    static pointer create_token(const value_type & source) {\n        return source; }\n    static value_type & token(pointer & t) { return t;}\n    static void * cast_to_void_ptr(pointer ref) { \n        type_to_void_ptr_map mymap; \n        mymap.void_overlay = NULL;\n        mymap.actual_value = ref; \n        return mymap.void_overlay; \n    }\n    static pointer cast_from_void_ptr(void * ref) { \n        type_to_void_ptr_map mymap;\n        mymap.void_overlay = ref;\n        return mymap.actual_value;\n    }\n    static void destroy_token( pointer /*token*/) {}\n};\n\ntemplate<typename T, typename U, typename Body>\nclass concrete_filter: public tbb::filter {\n    const Body& my_body;\n    typedef token_helper<T,is_large_object<T>::value > t_helper;\n    typedef typename t_helper::pointer t_pointer;\n    typedef token_helper<U,is_large_object<U>::value > u_helper;\n    typedef typename u_helper::pointer u_pointer;\n\n    /*override*/ void* operator()(void* input) {\n        t_pointer temp_input = t_helper::cast_from_void_ptr(input);\n        u_pointer output_u = u_helper::create_token(my_body(t_helper::token(temp_input)));\n        t_helper::destroy_token(temp_input);\n        return u_helper::cast_to_void_ptr(output_u);\n    }\n\n    /*override*/ void finalize(void * input) {\n        t_pointer temp_input = t_helper::cast_from_void_ptr(input);\n        t_helper::destroy_token(temp_input);\n    }\n\npublic:\n    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}\n};\n\n// input \ntemplate<typename U, typename Body>\nclass concrete_filter<void,U,Body>: public filter {\n    const Body& my_body;\n    typedef token_helper<U, is_large_object<U>::value > u_helper;\n    typedef typename u_helper::pointer u_pointer;\n\n    /*override*/void* operator()(void*) {\n        flow_control control;\n        u_pointer output_u = u_helper::create_token(my_body(control));\n        if(control.is_pipeline_stopped) {\n            u_helper::destroy_token(output_u);\n            set_end_of_input();\n            return NULL;\n        }\n        return u_helper::cast_to_void_ptr(output_u);\n    }\n\npublic:\n    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : \n        filter(static_cast<tbb::filter::mode>(filter_mode | filter_may_emit_null)),\n        my_body(body)\n    {}\n};\n\ntemplate<typename T, typename Body>\nclass concrete_filter<T,void,Body>: public filter {\n    const Body& my_body;\n    typedef token_helper<T, is_large_object<T>::value > t_helper;\n    typedef typename t_helper::pointer t_pointer;\n   \n    /*override*/ void* operator()(void* input) {\n        t_pointer temp_input = t_helper::cast_from_void_ptr(input);\n        my_body(t_helper::token(temp_input));\n        t_helper::destroy_token(temp_input);\n        return NULL;\n    }\n    /*override*/ void finalize(void* input) {\n        t_pointer temp_input = t_helper::cast_from_void_ptr(input);\n        t_helper::destroy_token(temp_input);\n    }\n\npublic:\n    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}\n};\n\ntemplate<typename Body>\nclass concrete_filter<void,void,Body>: public filter {\n    const Body& my_body;\n    \n    /** Override privately because it is always called virtually */\n    /*override*/ void* operator()(void*) {\n        flow_control control;\n        my_body(control);\n        void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; \n        return output;\n    }\npublic:\n    concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}\n};\n\n//! The class that represents an object of the pipeline for parallel_pipeline().\n/** It primarily serves as RAII class that deletes heap-allocated filter instances. */\nclass pipeline_proxy {\n    tbb::pipeline my_pipe;\npublic:\n    pipeline_proxy( const filter_t<void,void>& filter_chain );\n    ~pipeline_proxy() {\n        while( filter* f = my_pipe.filter_list ) \n            delete f; // filter destructor removes it from the pipeline\n    }\n    tbb::pipeline* operator->() { return &my_pipe; }\n};\n\n//! Abstract base class that represents a node in a parse tree underlying a filter_t.\n/** These nodes are always heap-allocated and can be shared by filter_t objects. */\nclass filter_node: tbb::internal::no_copy {\n    /** Count must be atomic because it is hidden state for user, but might be shared by threads. */\n    tbb::atomic<intptr_t> ref_count;\nprotected:\n    filter_node() {\n        ref_count = 0;\n#ifdef __TBB_TEST_FILTER_NODE_COUNT\n        ++(__TBB_TEST_FILTER_NODE_COUNT);\n#endif\n    }\npublic:\n    //! Add concrete_filter to pipeline \n    virtual void add_to( pipeline& ) = 0;\n    //! Increment reference count\n    void add_ref() {++ref_count;}\n    //! Decrement reference count and delete if it becomes zero.\n    void remove_ref() {\n        __TBB_ASSERT(ref_count>0,\"ref_count underflow\");\n        if( --ref_count==0 ) \n            delete this;\n    }\n    virtual ~filter_node() {\n#ifdef __TBB_TEST_FILTER_NODE_COUNT\n        --(__TBB_TEST_FILTER_NODE_COUNT);\n#endif\n    }\n};\n\n//! Node in parse tree representing result of make_filter.\ntemplate<typename T, typename U, typename Body>\nclass filter_node_leaf: public filter_node  {\n    const tbb::filter::mode mode;\n    const Body body;\n    /*override*/void add_to( pipeline& p ) {\n        concrete_filter<T,U,Body>* f = new concrete_filter<T,U,Body>(mode,body);\n        p.add_filter( *f );\n    }\npublic:\n    filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {}\n};\n\n//! Node in parse tree representing join of two filters.\nclass filter_node_join: public filter_node {\n    friend class filter_node; // to suppress GCC 3.2 warnings\n    filter_node& left;\n    filter_node& right;\n    /*override*/~filter_node_join() {\n       left.remove_ref();\n       right.remove_ref();\n    }\n    /*override*/void add_to( pipeline& p ) {\n        left.add_to(p);\n        right.add_to(p);\n    }\npublic:\n    filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) {\n       left.add_ref();\n       right.add_ref();\n    }\n};\n\n} // namespace internal\n//! @endcond\n\n//! Create a filter to participate in parallel_pipeline\ntemplate<typename T, typename U, typename Body>\nfilter_t<T,U> make_filter(tbb::filter::mode mode, const Body& body) {\n    return new internal::filter_node_leaf<T,U,Body>(mode, body);\n}\n\ntemplate<typename T, typename V, typename U>\nfilter_t<T,U> operator& (const filter_t<T,V>& left, const filter_t<V,U>& right) {\n    __TBB_ASSERT(left.root,\"cannot use default-constructed filter_t as left argument of '&'\");\n    __TBB_ASSERT(right.root,\"cannot use default-constructed filter_t as right argument of '&'\");\n    return new internal::filter_node_join(*left.root,*right.root);\n}\n\n//! Class representing a chain of type-safe pipeline filters\ntemplate<typename T, typename U>\nclass filter_t {\n    typedef internal::filter_node filter_node;\n    filter_node* root;\n    filter_t( filter_node* root_ ) : root(root_) {\n        root->add_ref();\n    }\n    friend class internal::pipeline_proxy;\n    template<typename T_, typename U_, typename Body>\n    friend filter_t<T_,U_> make_filter(tbb::filter::mode, const Body& );\n    template<typename T_, typename V_, typename U_>\n    friend filter_t<T_,U_> operator& (const filter_t<T_,V_>& , const filter_t<V_,U_>& );\npublic:\n    filter_t() : root(NULL) {}\n    filter_t( const filter_t<T,U>& rhs ) : root(rhs.root) {\n        if( root ) root->add_ref();\n    }\n    template<typename Body>\n    filter_t( tbb::filter::mode mode, const Body& body ) :\n        root( new internal::filter_node_leaf<T,U,Body>(mode, body) ) {\n        root->add_ref();\n    }\n\n    void operator=( const filter_t<T,U>& rhs ) {\n        // Order of operations below carefully chosen so that reference counts remain correct\n        // in unlikely event that remove_ref throws exception.\n        filter_node* old = root;\n        root = rhs.root; \n        if( root ) root->add_ref();\n        if( old ) old->remove_ref();\n    }\n    ~filter_t() {\n        if( root ) root->remove_ref();\n    }\n    void clear() {\n        // Like operator= with filter_t() on right side.\n        if( root ) {\n            filter_node* old = root;\n            root = NULL;\n            old->remove_ref();\n        }\n    }\n};\n\ninline internal::pipeline_proxy::pipeline_proxy( const filter_t<void,void>& filter_chain ) : my_pipe() {\n    __TBB_ASSERT( filter_chain.root, \"cannot apply parallel_pipeline to default-constructed filter_t\"  );\n    filter_chain.root->add_to(my_pipe);\n}\n\ninline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain\n#if __TBB_TASK_GROUP_CONTEXT\n    , tbb::task_group_context& context\n#endif\n    ) {\n    internal::pipeline_proxy pipe(filter_chain);\n    // tbb::pipeline::run() is called via the proxy\n    pipe->run(max_number_of_live_tokens\n#if __TBB_TASK_GROUP_CONTEXT\n              , context\n#endif\n    );\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\ninline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain) {\n    tbb::task_group_context context;\n    parallel_pipeline(max_number_of_live_tokens, filter_chain, context);\n}\n#endif // __TBB_TASK_GROUP_CONTEXT\n\n} // interface6\n\nusing interface6::flow_control;\nusing interface6::filter_t;\nusing interface6::make_filter;\nusing interface6::parallel_pipeline;\n\n} // tbb\n\n#endif /* __TBB_pipeline_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/private_server.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"rml_tbb.h\"\n#include \"../server/thread_monitor.h\"\n#include \"tbb/atomic.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n#include \"scheduler_common.h\"\n#include \"governor.h\"\n#include \"tbb_misc.h\"\n\nusing rml::internal::thread_monitor;\n\nnamespace tbb {\nnamespace internal {\nnamespace rml {\n\ntypedef thread_monitor::handle_type thread_handle;\n\nclass private_server;\n\nclass private_worker: no_copy {\n    //! State in finite-state machine that controls the worker.\n    /** State diagram:\n        init --> starting --> normal\n          |         |           |\n          |         V           |\n          \\------> quit <------/\n      */\n    enum state_t {\n        //! *this is initialized\n        st_init,\n        //! *this has associated thread that is starting up.\n        st_starting,\n        //! Associated thread is doing normal life sequence.\n        st_normal,\n        //! Associated thread has ended normal life sequence and promises to never touch *this again.\n        st_quit\n    };\n    atomic<state_t> my_state;\n    \n    //! Associated server\n    private_server& my_server; \n\n    //! Associated client\n    tbb_client& my_client; \n\n    //! index used for avoiding the 64K aliasing problem\n    const size_t my_index;\n\n    //! Monitor for sleeping when there is no work to do.\n    /** The invariant that holds for sleeping workers is:\n        \"my_slack<=0 && my_state==st_normal && I am on server's list of asleep threads\" */\n    thread_monitor my_thread_monitor;\n\n    //! Handle of the OS thread associated with this worker\n    thread_handle my_handle;\n\n    //! Link for list of workers that are sleeping or have no associated thread.\n    private_worker* my_next;\n\n    friend class private_server;\n\n    //! Actions executed by the associated thread \n    void run();\n\n    //! Wake up associated thread (or launch a thread if there is none)\n    void wake_or_launch();\n\n    //! Called by a thread (usually not the associated thread) to commence termination.\n    void start_shutdown();\n\n    static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg );\n\n    static void release_handle(thread_handle my_handle);\n\nprotected:\n    private_worker( private_server& server, tbb_client& client, const size_t i ) : \n        my_server(server),\n        my_client(client),\n        my_index(i)\n    {\n        my_state = st_init;\n    }\n};\n\nstatic const size_t cache_line_size = tbb::internal::NFS_MaxLineSize;\n\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Suppress overzealous compiler warnings about uninstantiable class\n    #pragma warning(push)\n    #pragma warning(disable:4510 4610)\n#endif\nclass padded_private_worker: public private_worker {\n    char pad[cache_line_size - sizeof(private_worker)%cache_line_size];\npublic:\n    padded_private_worker( private_server& server, tbb_client& client, const size_t i )\n    : private_worker(server,client,i) { suppress_unused_warning(pad); }\n};\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning(pop)\n#endif\n\nclass private_server: public tbb_server, no_copy {\n    tbb_client& my_client;\n    //! Maximum number of threads to be created.\n    /** Threads are created lazily, so maximum might not actually be reached. */\n    const tbb_client::size_type my_n_thread;\n\n    //! Stack size for each thread. */\n    const size_t my_stack_size;\n\n    //! Number of jobs that could use their associated thread minus number of active threads.\n    /** If negative, indicates oversubscription.\n        If positive, indicates that more threads should run. \n        Can be lowered asynchronously, but must be raised only while holding my_asleep_list_mutex,\n        because raising it impacts the invariant for sleeping threads. */\n    atomic<int> my_slack;\n\n    //! Counter used to determine when to delete this.\n    atomic<int> my_ref_count;\n\n    padded_private_worker* my_thread_array;\n\n    //! List of workers that are asleep or committed to sleeping until notified by another thread.\n    tbb::atomic<private_worker*> my_asleep_list_root;\n\n    //! Protects my_asleep_list_root\n    typedef scheduler_mutex_type asleep_list_mutex_type;\n    asleep_list_mutex_type my_asleep_list_mutex;\n\n#if TBB_USE_ASSERT\n    atomic<int> my_net_slack_requests;\n#endif /* TBB_USE_ASSERT */\n\n    //! Wake up to two sleeping workers, if there are any sleeping.\n    /** The call is used to propagate a chain reaction where each thread wakes up two threads,\n        which in turn each wake up two threads, etc. */\n    void propagate_chain_reaction() {\n        // First test of a double-check idiom.  Second test is inside wake_some(0).\n        if( my_asleep_list_root ) \n            wake_some(0);\n    }\n\n    //! Try to add t to list of sleeping workers\n    bool try_insert_in_asleep_list( private_worker& t );\n\n    //! Equivalent of adding additional_slack to my_slack and waking up to 2 threads if my_slack permits.\n    void wake_some( int additional_slack );\n\n    virtual ~private_server();\n    \n    void remove_server_ref() {\n        if( --my_ref_count==0 ) {\n            my_client.acknowledge_close_connection();\n            this->~private_server();\n            tbb::cache_aligned_allocator<private_server>().deallocate( this, 1 );\n        } \n    }\n\n    friend class private_worker;\npublic:\n    private_server( tbb_client& client );\n\n    /*override*/ version_type version() const {\n        return 0;\n    } \n\n    /*override*/ void request_close_connection( bool /*exiting*/ ) {\n        for( size_t i=0; i<my_n_thread; ++i )\n            my_thread_array[i].start_shutdown();\n        remove_server_ref();\n    }\n\n    /*override*/ void yield() {__TBB_Yield();}\n\n    /*override*/ void independent_thread_number_changed( int ) {__TBB_ASSERT(false,NULL);}\n\n    /*override*/ unsigned default_concurrency() const { return governor::default_num_threads() - 1; }\n\n    /*override*/ void adjust_job_count_estimate( int delta );\n\n#if _WIN32||_WIN64\n    /*override*/ void register_master ( ::rml::server::execution_resource_t& ) {}\n    /*override*/ void unregister_master ( ::rml::server::execution_resource_t ) {}\n#endif /* _WIN32||_WIN64 */\n};\n\n//------------------------------------------------------------------------\n// Methods of private_worker\n//------------------------------------------------------------------------\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced\n    #pragma warning(push)\n    #pragma warning(disable:4189)\n#endif\n#if __MINGW32__ && __GNUC__==4 &&__GNUC_MINOR__>=2 && !__MINGW64__\n// ensure that stack is properly aligned for TBB threads\n__attribute__((force_align_arg_pointer))\n#endif\n__RML_DECL_THREAD_ROUTINE private_worker::thread_routine( void* arg ) {\n    private_worker* self = static_cast<private_worker*>(arg);\n    AVOID_64K_ALIASING( self->my_index );\n#if _XBOX\n    int HWThreadIndex = __TBB_XBOX360_GetHardwareThreadIndex(i);\n    XSetThreadProcessor(GetCurrentThread(), HWThreadIndex);\n#endif\n    self->run();\n    return 0;\n}\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning(pop)\n#endif\n\nvoid private_worker::release_handle(thread_handle handle) {\n    if (governor::needsWaitWorkers())\n        thread_monitor::join(handle);\n    else\n        thread_monitor::detach_thread(handle);\n}\n\nvoid private_worker::start_shutdown() {\n    state_t s;\n\n    do {\n        s = my_state;\n        __TBB_ASSERT( s!=st_quit, NULL );\n    } while( my_state.compare_and_swap( st_quit, s )!=s );\n    if( s==st_normal || s==st_starting ) {\n        // May have invalidated invariant for sleeping, so wake up the thread.\n        // Note that the notify() here occurs without maintaining invariants for my_slack.\n        // It does not matter, because my_state==st_quit overrides checking of my_slack.\n        my_thread_monitor.notify();\n        // Do not need release handle in st_init state,\n        // because in this case the thread wasn't started yet.\n        // For st_starting release is done at launch site.\n        if (s==st_normal)\n            release_handle(my_handle);\n    } else if( s==st_init ) {\n        // Perform action that otherwise would be performed by associated thread when it quits.\n        my_server.remove_server_ref();\n    }\n}\n\nvoid private_worker::run() {\n    my_server.propagate_chain_reaction();\n\n    // Transiting to st_normal here would require setting my_handle,\n    // which would create race with the launching thread and \n    // complications in handle management on Windows.\n\n    ::rml::job& j = *my_client.create_one_job();\n    while( my_state!=st_quit ) {\n        if( my_server.my_slack>=0 ) {\n            my_client.process(j);\n        } else {\n            thread_monitor::cookie c;\n            // Prepare to wait\n            my_thread_monitor.prepare_wait(c);\n            // Check/set the invariant for sleeping\n            if( my_state!=st_quit && my_server.try_insert_in_asleep_list(*this) ) {\n                my_thread_monitor.commit_wait(c);\n                my_server.propagate_chain_reaction();\n            } else {\n                // Invariant broken\n                my_thread_monitor.cancel_wait();\n            }\n        }\n    }\n    my_client.cleanup(j);\n\n    ++my_server.my_slack;\n    my_server.remove_server_ref();\n}\n\ninline void private_worker::wake_or_launch() {\n    if( my_state==st_init && my_state.compare_and_swap( st_starting, st_init )==st_init ) {\n        // after this point, remove_server_ref() must be done by created thread\n#if USE_WINTHREAD\n        my_handle = thread_monitor::launch( thread_routine, this, my_server.my_stack_size, &this->my_index );\n#elif USE_PTHREAD\n        {\n        affinity_helper fpa;\n        fpa.protect_affinity_mask();\n        my_handle = thread_monitor::launch( thread_routine, this, my_server.my_stack_size );\n        // Implicit destruction of fpa resets original affinity mask.\n        }\n#endif /* USE_PTHREAD */\n        state_t s = my_state.compare_and_swap( st_normal, st_starting );\n        if (st_starting != s) {\n            // Do shutdown during startup. my_handle can't be released\n            // by start_shutdown, because my_handle value might be not set yet\n            // at time of transition from st_starting to st_quit.\n            __TBB_ASSERT( s==st_quit, NULL );\n            release_handle(my_handle);\n        }\n    }\n    else\n        my_thread_monitor.notify();\n}\n\n//------------------------------------------------------------------------\n// Methods of private_server\n//------------------------------------------------------------------------\nprivate_server::private_server( tbb_client& client ) : \n    my_client(client), \n    my_n_thread(client.max_job_count()),\n    my_stack_size(client.min_stack_size()),\n    my_thread_array(NULL) \n{\n    my_ref_count = my_n_thread+1;\n    my_slack = 0;\n#if TBB_USE_ASSERT\n    my_net_slack_requests = 0;\n#endif /* TBB_USE_ASSERT */\n    my_asleep_list_root = NULL;\n    my_thread_array = tbb::cache_aligned_allocator<padded_private_worker>().allocate( my_n_thread );\n    memset( my_thread_array, 0, sizeof(private_worker)*my_n_thread );\n    for( size_t i=0; i<my_n_thread; ++i ) {\n        private_worker* t = new( &my_thread_array[i] ) padded_private_worker( *this, client, i ); \n        t->my_next = my_asleep_list_root;\n        my_asleep_list_root = t;\n    } \n}\n\nprivate_server::~private_server() {\n    __TBB_ASSERT( my_net_slack_requests==0, NULL );\n    for( size_t i=my_n_thread; i--; ) \n        my_thread_array[i].~padded_private_worker();\n    tbb::cache_aligned_allocator<padded_private_worker>().deallocate( my_thread_array, my_n_thread );\n    tbb::internal::poison_pointer( my_thread_array );\n}\n\ninline bool private_server::try_insert_in_asleep_list( private_worker& t ) {\n    asleep_list_mutex_type::scoped_lock lock;\n    if( !lock.try_acquire(my_asleep_list_mutex) )\n        return false;\n    // Contribute to slack under lock so that if another takes that unit of slack,\n    // it sees us sleeping on the list and wakes us up.\n    int k = ++my_slack;\n    if( k<=0 ) {\n        t.my_next = my_asleep_list_root;\n        my_asleep_list_root = &t;\n        return true;\n    } else {\n        --my_slack;\n        return false;\n    }\n}\n\nvoid private_server::wake_some( int additional_slack ) {\n    __TBB_ASSERT( additional_slack>=0, NULL );\n    private_worker* wakee[2];\n    private_worker**w = wakee;\n    {\n        asleep_list_mutex_type::scoped_lock lock(my_asleep_list_mutex);\n        while( my_asleep_list_root && w<wakee+2 ) {\n            if( additional_slack>0 ) {\n                if (additional_slack+my_slack<=0) // additional demand does not exceed surplus supply\n                    break;\n                --additional_slack;\n            } else {\n                // Chain reaction; Try to claim unit of slack\n                int old;\n                do {\n                    old = my_slack;\n                    if( old<=0 ) goto done;\n                } while( my_slack.compare_and_swap(old-1,old)!=old );\n            }\n            // Pop sleeping worker to combine with claimed unit of slack\n            my_asleep_list_root = (*w++ = my_asleep_list_root)->my_next;\n        }\n        if( additional_slack ) {\n            // Contribute our unused slack to my_slack.\n            my_slack += additional_slack;\n        }\n    }\ndone:\n    while( w>wakee ) \n        (*--w)->wake_or_launch();\n}\n\nvoid private_server::adjust_job_count_estimate( int delta ) {\n#if TBB_USE_ASSERT\n    my_net_slack_requests+=delta;\n#endif /* TBB_USE_ASSERT */\n    if( delta<0 ) {\n        my_slack+=delta;\n    } else if( delta>0 ) {\n        wake_some( delta );\n    }\n}\n\n//! Factory method called from task.cpp to create a private_server.\ntbb_server* make_private_server( tbb_client& client ) {\n    return new( tbb::cache_aligned_allocator<private_server>().allocate(1) ) private_server(client);\n}\n\n} // namespace rml\n} // namespace internal\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/queuing_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/queuing_mutex.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb_misc.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\n\nusing namespace internal;\n\n//! A method to acquire queuing_mutex lock\nvoid queuing_mutex::scoped_lock::acquire( queuing_mutex& m )\n{\n    __TBB_ASSERT( !this->mutex, \"scoped_lock is already holding a mutex\");\n\n    // Must set all fields before the fetch_and_store, because once the\n    // fetch_and_store executes, *this becomes accessible to other threads.\n    mutex = &m;\n    next  = NULL;\n    going = 0;\n\n    // The fetch_and_store must have release semantics, because we are\n    // \"sending\" the fields initialized above to other processors.\n    scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);\n    if( pred ) {\n        ITT_NOTIFY(sync_prepare, mutex);\n#if TBB_USE_ASSERT\n        __TBB_control_consistency_helper(); // on \"m.q_tail\"\n        __TBB_ASSERT( !pred->next, \"the predecessor has another successor!\");\n#endif\n        pred->next = this;\n        spin_wait_while_eq( going, 0ul );\n    }\n    ITT_NOTIFY(sync_acquired, mutex);\n\n    // Force acquire so that user's critical section receives correct values\n    // from processor that was previously in the user's critical section.\n    __TBB_load_with_acquire(going);\n}\n\n//! A method to acquire queuing_mutex if it is free\nbool queuing_mutex::scoped_lock::try_acquire( queuing_mutex& m )\n{\n    __TBB_ASSERT( !this->mutex, \"scoped_lock is already holding a mutex\");\n\n    // Must set all fields before the fetch_and_store, because once the\n    // fetch_and_store executes, *this becomes accessible to other threads.\n    next  = NULL;\n    going = 0;\n\n    // The CAS must have release semantics, because we are\n    // \"sending\" the fields initialized above to other processors.\n    if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) )\n        return false;\n\n    // Force acquire so that user's critical section receives correct values\n    // from processor that was previously in the user's critical section.\n    // try_acquire should always have acquire semantic, even if failed.\n    __TBB_load_with_acquire(going);\n    mutex = &m;\n    ITT_NOTIFY(sync_acquired, mutex);\n    return true;\n}\n\n//! A method to release queuing_mutex lock\nvoid queuing_mutex::scoped_lock::release( )\n{\n    __TBB_ASSERT(this->mutex!=NULL, \"no lock acquired\");\n\n    ITT_NOTIFY(sync_releasing, mutex);\n    if( !next ) {\n        if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {\n            // this was the only item in the queue, and the queue is now empty.\n            goto done;\n        }\n        // Someone in the queue\n        spin_wait_while_eq( next, (scoped_lock*)0 );\n    }\n    __TBB_ASSERT(next,NULL);\n    __TBB_store_with_release(next->going, 1);\ndone:\n    initialize();\n}\n\nvoid queuing_mutex::internal_construct() {\n    ITT_SYNC_CREATE(this, _T(\"tbb::queuing_mutex\"), _T(\"\"));\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/queuing_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_queuing_mutex_H\n#define __TBB_queuing_mutex_H\n\n#include \"tbb_config.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#include \"atomic.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n\n//! Queuing mutex with local-only spinning.\n/** @ingroup synchronization */\nclass queuing_mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:\n    //! Construct unacquired mutex.\n    queuing_mutex() {\n        q_tail = NULL;\n#if TBB_USE_THREADING_TOOLS\n        internal_construct();\n#endif\n    }\n\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    class scoped_lock: internal::no_copy {\n        //! Initialize fields to mean \"no lock held\".\n        void initialize() {\n            mutex = NULL;\n#if TBB_USE_ASSERT\n            internal::poison_pointer(next);\n#endif /* TBB_USE_ASSERT */\n        }\n\n    public:\n        //! Construct lock that has not acquired a mutex.\n        /** Equivalent to zero-initialization of *this. */\n        scoped_lock() {initialize();}\n\n        //! Acquire lock on given mutex.\n        scoped_lock( queuing_mutex& m ) {\n            initialize();\n            acquire(m);\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if( mutex ) release();\n        }\n\n        //! Acquire lock on given mutex.\n        void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );\n\n        //! Acquire lock on given mutex if free (i.e. non-blocking)\n        bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );\n\n        //! Release lock.\n        void __TBB_EXPORTED_METHOD release();\n\n    private:\n        //! The pointer to the mutex owned, or NULL if not holding a mutex.\n        queuing_mutex* mutex;\n\n        //! The pointer to the next competitor for a mutex\n        scoped_lock *next;\n\n        //! The local spin-wait variable\n        /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of\n            zero-initialization.  Defining it as an entire word instead of\n            a byte seems to help performance slightly. */\n        uintptr_t going;\n    };\n\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    // Mutex traits\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = true;\n\nprivate:\n    //! The last competitor requesting the lock\n    atomic<scoped_lock*> q_tail;\n\n};\n\n__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)\n\n} // namespace tbb\n\n#endif /* __TBB_queuing_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/queuing_rw_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/** Before making any changes in the implementation, please emulate algorithmic changes\n    with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml.\n    There could be some code looking as \"can be restructured\" but its structure does matter! */\n\n#include \"tbb/queuing_rw_mutex.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"itt_notify.h\"\n\n\nnamespace tbb {\n\nusing namespace internal;\n\n//! Flag bits in a state_t that specify information about a locking request.\nenum state_t_flags {\n    STATE_NONE                   = 0,\n    STATE_WRITER                 = 1<<0,\n    STATE_READER                 = 1<<1,\n    STATE_READER_UNBLOCKNEXT     = 1<<2,\n    STATE_ACTIVEREADER           = 1<<3,\n    STATE_UPGRADE_REQUESTED      = 1<<4,\n    STATE_UPGRADE_WAITING        = 1<<5,\n    STATE_UPGRADE_LOSER          = 1<<6,\n    STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT,\n    STATE_COMBINED_READER        = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER,\n    STATE_COMBINED_UPGRADING     = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER\n};\n\nconst unsigned char RELEASED = 0;\nconst unsigned char ACQUIRED = 1;\n\ninline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock()\n{\n    return as_atomic(my_internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED;\n}\n\ninline void queuing_rw_mutex::scoped_lock::acquire_internal_lock()\n{\n    // Usually, we would use the test-test-and-set idiom here, with exponential backoff.\n    // But so far, experiments indicate there is no value in doing so here.\n    while( !try_acquire_internal_lock() ) {\n        __TBB_Pause(1);\n    }\n}\n\ninline void queuing_rw_mutex::scoped_lock::release_internal_lock()\n{\n    __TBB_store_with_release(my_internal_lock,RELEASED);\n}\n\ninline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock()\n{\n    spin_wait_until_eq(my_internal_lock, RELEASED);\n}\n\ninline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) {\n    if( flag )\n        wait_for_release_of_internal_lock();\n    else\n        release_internal_lock();\n}\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    #pragma warning (push)\n    #pragma warning (disable: 4311 4312)\n#endif\n\n//! A view of a T* with additional functionality for twiddling low-order bits.\ntemplate<typename T>\nclass tricky_atomic_pointer: no_copy {\npublic:\n    typedef typename atomic_selector<sizeof(T*)>::word word;\n\n    template<memory_semantics M>\n    static T* fetch_and_add( T* volatile * location, word addend ) {\n        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );\n    }\n    template<memory_semantics M>\n    static T* fetch_and_store( T* volatile * location, T* value ) {\n        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );\n    }\n    template<memory_semantics M>\n    static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {\n        return reinterpret_cast<T*>(\n                 atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),\n                                                              reinterpret_cast<word>(comparand))\n               );\n    }\n\n    T* & ref;\n    tricky_atomic_pointer( T*& original ) : ref(original) {};\n    tricky_atomic_pointer( T* volatile & original ) : ref(original) {};\n    T* operator&( word operand2 ) const {\n        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );\n    }\n    T* operator|( word operand2 ) const {\n        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );\n    }\n};\n\ntypedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer;\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    #pragma warning (pop)\n#endif\n\n//! Mask for low order bit of a pointer.\nstatic const tricky_pointer::word FLAG = 0x1;\n\ninline\nuintptr_t get_flag( queuing_rw_mutex::scoped_lock* ptr ) {\n    return uintptr_t(ptr) & FLAG;\n}\n\n//------------------------------------------------------------------------\n// Methods of queuing_rw_mutex::scoped_lock\n//------------------------------------------------------------------------\n\n//! A method to acquire queuing_rw_mutex lock\nvoid queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write )\n{\n    __TBB_ASSERT( !my_mutex, \"scoped_lock is already holding a mutex\");\n\n    // Must set all fields before the fetch_and_store, because once the\n    // fetch_and_store executes, *this becomes accessible to other threads.\n    my_mutex = &m;\n    __TBB_store_relaxed(my_prev , (scoped_lock*)0);\n    __TBB_store_relaxed(my_next , (scoped_lock*)0);\n    __TBB_store_relaxed(my_going, 0);\n    my_state = state_t(write ? STATE_WRITER : STATE_READER);\n    my_internal_lock = RELEASED;\n\n    queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);\n\n    if( write ) {       // Acquiring for write\n\n        if( pred ) {\n            ITT_NOTIFY(sync_prepare, my_mutex);\n            pred = tricky_pointer(pred) & ~FLAG;\n            __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), \"use of corrupted pointer!\" );\n#if TBB_USE_ASSERT\n            __TBB_control_consistency_helper(); // on \"m.q_tail\"\n            __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), \"the predecessor has another successor!\");\n#endif\n           __TBB_store_with_release(pred->my_next,this);\n            spin_wait_until_eq(my_going, 1);\n        }\n\n    } else {            // Acquiring for read\n#if DO_ITT_NOTIFY\n        bool sync_prepare_done = false;\n#endif\n        if( pred ) {\n            unsigned short pred_state;\n            __TBB_ASSERT( !__TBB_load_relaxed(my_prev), \"the predecessor is already set\" );\n            if( uintptr_t(pred) & FLAG ) {\n                /* this is only possible if pred is an upgrading reader and it signals us to wait */\n                pred_state = STATE_UPGRADE_WAITING;\n                pred = tricky_pointer(pred) & ~FLAG;\n            } else {\n                // Load pred->my_state now, because once pred->my_next becomes\n                // non-NULL, we must assume that *pred might be destroyed.\n                pred_state = pred->my_state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER);\n            }\n            __TBB_store_relaxed(my_prev, pred);\n            __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), \"use of corrupted pointer!\" );\n#if TBB_USE_ASSERT\n            __TBB_control_consistency_helper(); // on \"m.q_tail\"\n            __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), \"the predecessor has another successor!\");\n#endif\n           __TBB_store_with_release(pred->my_next,this);\n            if( pred_state != STATE_ACTIVEREADER ) {\n#if DO_ITT_NOTIFY\n                sync_prepare_done = true;\n                ITT_NOTIFY(sync_prepare, my_mutex);\n#endif\n                spin_wait_until_eq(my_going, 1);\n            }\n        }\n\n        // The protected state must have been acquired here before it can be further released to any other reader(s):\n        unsigned short old_state = my_state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);\n        if( old_state!=STATE_READER ) {\n#if DO_ITT_NOTIFY\n            if( !sync_prepare_done )\n                ITT_NOTIFY(sync_prepare, my_mutex);\n#endif\n            // Failed to become active reader -> need to unblock the next waiting reader first\n            __TBB_ASSERT( my_state==STATE_READER_UNBLOCKNEXT, \"unexpected state\" );\n            spin_wait_while_eq(my_next, (scoped_lock*)NULL);\n            /* my_state should be changed before unblocking the next otherwise it might finish\n               and another thread can get our old state and left blocked */\n            my_state = STATE_ACTIVEREADER;\n           __TBB_store_with_release(my_next->my_going,1);\n        }\n    }\n\n    ITT_NOTIFY(sync_acquired, my_mutex);\n\n    // Force acquire so that user's critical section receives correct values\n    // from processor that was previously in the user's critical section.\n    __TBB_load_with_acquire(my_going);\n}\n\n//! A method to acquire queuing_rw_mutex if it is free\nbool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write )\n{\n    __TBB_ASSERT( !my_mutex, \"scoped_lock is already holding a mutex\");\n\n    if( load<relaxed>(m.q_tail) )\n        return false; // Someone already took the lock\n\n    // Must set all fields before the fetch_and_store, because once the\n    // fetch_and_store executes, *this becomes accessible to other threads.\n    __TBB_store_relaxed(my_prev, (scoped_lock*)0);\n    __TBB_store_relaxed(my_next, (scoped_lock*)0);\n    __TBB_store_relaxed(my_going, 0); // TODO: remove dead assignment?\n    my_state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);\n    my_internal_lock = RELEASED;\n\n    // The CAS must have release semantics, because we are\n    // \"sending\" the fields initialized above to other processors.\n    if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) )\n        return false; // Someone already took the lock\n    // Force acquire so that user's critical section receives correct values\n    // from processor that was previously in the user's critical section.\n    // try_acquire should always have acquire semantic, even if failed.\n    __TBB_load_with_acquire(my_going);\n    my_mutex = &m;\n    ITT_NOTIFY(sync_acquired, my_mutex);\n    return true;\n}\n\n//! A method to release queuing_rw_mutex lock\nvoid queuing_rw_mutex::scoped_lock::release( )\n{\n    __TBB_ASSERT(my_mutex!=NULL, \"no lock acquired\");\n\n    ITT_NOTIFY(sync_releasing, my_mutex);\n\n    if( my_state == STATE_WRITER ) { // Acquired for write\n\n        // The logic below is the same as \"writerUnlock\", but elides\n        // \"return\" from the middle of the routine.\n        // In the statement below, acquire semantics of reading my_next is required\n        // so that following operations with fields of my_next are safe.\n        scoped_lock* n = __TBB_load_with_acquire(my_next);\n        if( !n ) {\n            if( this == my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {\n                // this was the only item in the queue, and the queue is now empty.\n                goto done;\n            }\n            spin_wait_while_eq( my_next, (scoped_lock*)NULL );\n            n = __TBB_load_with_acquire(my_next);\n        }\n        __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early\n        if( n->my_state==STATE_UPGRADE_WAITING ) {\n            // the next waiting for upgrade means this writer was upgraded before.\n            acquire_internal_lock();\n            queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);\n            n->my_state = STATE_UPGRADE_LOSER;\n            __TBB_store_with_release(n->my_going,1);\n            unblock_or_wait_on_internal_lock(get_flag(tmp));\n        } else {\n            __TBB_ASSERT( my_state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), \"unexpected state\" );\n            __TBB_ASSERT( !( uintptr_t(__TBB_load_relaxed(n->my_prev)) & FLAG ), \"use of corrupted pointer!\" );\n            __TBB_store_relaxed(n->my_prev, (scoped_lock*)0);\n            __TBB_store_with_release(n->my_going,1);\n        }\n\n    } else { // Acquired for read\n\n        queuing_rw_mutex::scoped_lock *tmp = NULL;\nretry:\n        // Addition to the original paper: Mark my_prev as in use\n        queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);\n\n        if( pred ) {\n            if( !(pred->try_acquire_internal_lock()) )\n            {\n                // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.\n                // In the second case, it could or could not know my \"in use\" flag - need to check\n                tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred) | FLAG );\n                if( !(uintptr_t(tmp) & FLAG) ) {\n                    // Wait for the predecessor to change my_prev (e.g. during unlink)\n                    spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );\n                    // Now owner of pred is waiting for _us_ to release its lock\n                    pred->release_internal_lock();\n                }\n                // else the \"in use\" flag is back -> the predecessor didn't get it and will release itself; nothing to do\n\n                tmp = NULL;\n                goto retry;\n            }\n            __TBB_ASSERT(pred && pred->my_internal_lock==ACQUIRED, \"predecessor's lock is not acquired\");\n            __TBB_store_relaxed(my_prev, pred);\n            acquire_internal_lock();\n\n            __TBB_store_with_release(pred->my_next,reinterpret_cast<scoped_lock *>(NULL));\n\n            if( !__TBB_load_relaxed(my_next) && this != my_mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {\n                spin_wait_while_eq( my_next, (void*)NULL );\n            }\n            __TBB_ASSERT( !get_flag(__TBB_load_relaxed(my_next)), \"use of corrupted pointer\" );\n\n            // ensure acquire semantics of reading 'my_next'\n            if( scoped_lock *const l_next = __TBB_load_with_acquire(my_next) ) { // I->next != nil, TODO: rename to n after clearing up and adapting the n in the comment two lines below\n                // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0\n                tmp = tricky_pointer::fetch_and_store<tbb::release>(&(l_next->my_prev), pred);\n                // I->prev->next = I->next;\n                __TBB_ASSERT(__TBB_load_relaxed(my_prev)==pred, NULL);\n                __TBB_store_with_release(pred->my_next, my_next);\n            }\n            // Safe to release in the order opposite to acquiring which makes the code simpler\n            pred->release_internal_lock();\n\n        } else { // No predecessor when we looked\n            acquire_internal_lock();  // \"exclusiveLock(&I->EL)\"\n            scoped_lock* n = __TBB_load_with_acquire(my_next);\n            if( !n ) {\n                if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {\n                    spin_wait_while_eq( my_next, (scoped_lock*)NULL );\n                    n = __TBB_load_relaxed(my_next);\n                } else {\n                    goto unlock_self;\n                }\n            }\n            __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early\n            tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);\n            __TBB_store_with_release(n->my_going,1);\n        }\nunlock_self:\n        unblock_or_wait_on_internal_lock(get_flag(tmp));\n    }\ndone:\n    spin_wait_while_eq( my_going, 2 );\n\n    initialize();\n}\n\nbool queuing_rw_mutex::scoped_lock::downgrade_to_reader()\n{\n    __TBB_ASSERT( my_state==STATE_WRITER, \"no sense to downgrade a reader\" );\n\n    ITT_NOTIFY(sync_releasing, my_mutex);\n\n    if( ! __TBB_load_with_acquire(my_next) ) {\n        my_state = STATE_READER;\n        if( this==my_mutex->q_tail ) {\n            unsigned short old_state = my_state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);\n            if( old_state==STATE_READER ) {\n                // Downgrade completed\n                return true;\n            }\n        }\n        /* wait for the next to register */\n        spin_wait_while_eq( my_next, (void*)NULL );\n    }\n    scoped_lock *const n = __TBB_load_relaxed(my_next);\n    __TBB_ASSERT( n, \"still no successor at this point!\" );\n    if( n->my_state & STATE_COMBINED_WAITINGREADER )\n        __TBB_store_with_release(n->my_going,1);\n    else if( n->my_state==STATE_UPGRADE_WAITING )\n        // the next waiting for upgrade means this writer was upgraded before.\n        n->my_state = STATE_UPGRADE_LOSER;\n    my_state = STATE_ACTIVEREADER;\n    return true;\n}\n\nbool queuing_rw_mutex::scoped_lock::upgrade_to_writer()\n{\n    __TBB_ASSERT( my_state==STATE_ACTIVEREADER, \"only active reader can be upgraded\" );\n\n    queuing_rw_mutex::scoped_lock * tmp;\n    queuing_rw_mutex::scoped_lock * me = this;\n\n    ITT_NOTIFY(sync_releasing, my_mutex);\n    my_state = STATE_UPGRADE_REQUESTED;\nrequested:\n    __TBB_ASSERT( !(uintptr_t(__TBB_load_relaxed(my_next)) & FLAG), \"use of corrupted pointer!\" );\n    acquire_internal_lock();\n    if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {\n        spin_wait_while_eq( my_next, (void*)NULL );\n        queuing_rw_mutex::scoped_lock * n;\n        n = tricky_pointer::fetch_and_add<tbb::acquire>(&my_next, FLAG);\n        unsigned short n_state = n->my_state;\n        /* the next reader can be blocked by our state. the best thing to do is to unblock it */\n        if( n_state & STATE_COMBINED_WAITINGREADER )\n            __TBB_store_with_release(n->my_going,1);\n        tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), this);\n        unblock_or_wait_on_internal_lock(get_flag(tmp));\n        if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {\n            // save n|FLAG for simplicity of following comparisons\n            tmp = tricky_pointer(n)|FLAG;\n            for( atomic_backoff b; __TBB_load_relaxed(my_next)==tmp; b.pause() ) {\n                if( my_state & STATE_COMBINED_UPGRADING ) {\n                    if( __TBB_load_with_acquire(my_next)==tmp )\n                        __TBB_store_relaxed(my_next, n);\n                    goto waiting;\n                }\n            }\n            __TBB_ASSERT(__TBB_load_relaxed(my_next) != (tricky_pointer(n)|FLAG), NULL);\n            goto requested;\n        } else {\n            __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), \"unexpected state\");\n            __TBB_ASSERT( (tricky_pointer(n)|FLAG) == __TBB_load_relaxed(my_next), NULL);\n            __TBB_store_relaxed(my_next, n);\n        }\n    } else {\n        /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */\n        release_internal_lock();\n    } // if( this != my_mutex->q_tail... )\n    my_state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);\n\nwaiting:\n    __TBB_ASSERT( !( intptr_t(__TBB_load_relaxed(my_next)) & FLAG ), \"use of corrupted pointer!\" );\n    __TBB_ASSERT( my_state & STATE_COMBINED_UPGRADING, \"wrong state at upgrade waiting_retry\" );\n    __TBB_ASSERT( me==this, NULL );\n    ITT_NOTIFY(sync_prepare, my_mutex);\n    /* if no one was blocked by the \"corrupted\" q_tail, turn it back */\n    my_mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );\n    queuing_rw_mutex::scoped_lock * pred;\n    pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);\n    if( pred ) {\n        bool success = pred->try_acquire_internal_lock();\n        pred->my_state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);\n        if( !success ) {\n            tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred)|FLAG );\n            if( uintptr_t(tmp) & FLAG ) {\n                spin_wait_while_eq(my_prev, pred);\n                pred = __TBB_load_relaxed(my_prev);\n            } else {\n                spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );\n                pred->release_internal_lock();\n            }\n        } else {\n            __TBB_store_relaxed(my_prev, pred);\n            pred->release_internal_lock();\n            spin_wait_while_eq(my_prev, pred);\n            pred = __TBB_load_relaxed(my_prev);\n        }\n        if( pred )\n            goto waiting;\n    } else {\n        // restore the corrupted my_prev field for possible further use (e.g. if downgrade back to reader)\n        __TBB_store_relaxed(my_prev, pred);\n    }\n    __TBB_ASSERT( !pred && !__TBB_load_relaxed(my_prev), NULL );\n\n    // additional lifetime issue prevention checks\n    // wait for the successor to finish working with my fields\n    wait_for_release_of_internal_lock();\n    // now wait for the predecessor to finish working with my fields\n    spin_wait_while_eq( my_going, 2 );\n\n    // Acquire critical section indirectly from previous owner or directly from predecessor (TODO: not clear).\n    __TBB_control_consistency_helper(); // on either \"my_mutex->q_tail\" or \"my_going\" (TODO: not clear)\n\n    bool result = ( my_state != STATE_UPGRADE_LOSER );\n    my_state = STATE_WRITER;\n    __TBB_store_relaxed(my_going, 1);\n\n    ITT_NOTIFY(sync_acquired, my_mutex);\n    return result;\n}\n\nvoid queuing_rw_mutex::internal_construct() {\n    ITT_SYNC_CREATE(this, _T(\"tbb::queuing_rw_mutex\"), _T(\"\"));\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/queuing_rw_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_queuing_rw_mutex_H\n#define __TBB_queuing_rw_mutex_H\n\n#include \"tbb_config.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\n#include \"atomic.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n\n//! Queuing reader-writer mutex with local-only spinning.\n/** Adapted from Krieger, Stumm, et al. pseudocode at\n    http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93\n    @ingroup synchronization */\nclass queuing_rw_mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:\n    //! Construct unacquired mutex.\n    queuing_rw_mutex() {\n        q_tail = NULL;\n#if TBB_USE_THREADING_TOOLS\n        internal_construct();\n#endif\n    }\n\n    //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL\n    ~queuing_rw_mutex() {\n#if TBB_USE_ASSERT\n        __TBB_ASSERT( !q_tail, \"destruction of an acquired mutex\");\n#endif\n    }\n\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    class scoped_lock: internal::no_copy {\n        //! Initialize fields to mean \"no lock held\".\n        void initialize() {\n            my_mutex = NULL;\n#if TBB_USE_ASSERT\n            my_state = 0xFF; // Set to invalid state\n            internal::poison_pointer(my_next);\n            internal::poison_pointer(my_prev);\n#endif /* TBB_USE_ASSERT */\n        }\n\n    public:\n        //! Construct lock that has not acquired a mutex.\n        /** Equivalent to zero-initialization of *this. */\n        scoped_lock() {initialize();}\n\n        //! Acquire lock on given mutex.\n        scoped_lock( queuing_rw_mutex& m, bool write=true ) {\n            initialize();\n            acquire(m,write);\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if( my_mutex ) release();\n        }\n\n        //! Acquire lock on given mutex.\n        void acquire( queuing_rw_mutex& m, bool write=true );\n\n        //! Acquire lock on given mutex if free (i.e. non-blocking)\n        bool try_acquire( queuing_rw_mutex& m, bool write=true );\n\n        //! Release lock.\n        void release();\n\n        //! Upgrade reader to become a writer.\n        /** Returns whether the upgrade happened without releasing and re-acquiring the lock */\n        bool upgrade_to_writer();\n\n        //! Downgrade writer to become a reader.\n        bool downgrade_to_reader();\n\n    private:\n        //! The pointer to the mutex owned, or NULL if not holding a mutex.\n        queuing_rw_mutex* my_mutex;\n\n        //! The pointer to the previous and next competitors for a mutex\n        scoped_lock *__TBB_atomic my_prev, *__TBB_atomic my_next;\n\n        typedef unsigned char state_t;\n\n        //! State of the request: reader, writer, active reader, other service states\n        atomic<state_t> my_state;\n\n        //! The local spin-wait variable\n        /** Corresponds to \"spin\" in the pseudocode but inverted for the sake of zero-initialization */\n        unsigned char __TBB_atomic my_going;\n\n        //! A tiny internal lock\n        unsigned char my_internal_lock;\n\n        //! Acquire the internal lock\n        void acquire_internal_lock();\n\n        //! Try to acquire the internal lock\n        /** Returns true if lock was successfully acquired. */\n        bool try_acquire_internal_lock();\n\n        //! Release the internal lock\n        void release_internal_lock();\n\n        //! Wait for internal lock to be released\n        void wait_for_release_of_internal_lock();\n\n        //! A helper function\n        void unblock_or_wait_on_internal_lock( uintptr_t );\n    };\n\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    // Mutex traits\n    static const bool is_rw_mutex = true;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = true;\n\nprivate:\n    //! The last competitor requesting the lock\n    atomic<scoped_lock*> q_tail;\n\n};\n\n__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex)\n\n} // namespace tbb\n\n#endif /* __TBB_queuing_rw_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/reader_writer_lock.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/reader_writer_lock.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/tbb_exception.h\"\n#include \"itt_notify.h\"\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (disable: 4244)\n#endif\n\nnamespace tbb {\nnamespace interface5 {\n\nconst uintptr_t WFLAG1 = 0x1;  // writer interested or active\nconst uintptr_t WFLAG2 = 0x2;  // writers interested, no entering readers\nconst uintptr_t RFLAG = 0x4;   // reader interested but not active\nconst uintptr_t RC_INCR = 0x8; // to adjust reader count\n\n\n// Perform an atomic bitwise-OR on the operand, and return its previous value.\ninline uintptr_t fetch_and_or(atomic<uintptr_t>& operand, uintptr_t value) {\n    for (tbb::internal::atomic_backoff b;;b.pause()) {\n        uintptr_t old = operand;\n        uintptr_t result = operand.compare_and_swap(old|value, old);\n        if (result==old) return result;\n    }\n}\n\n// Perform an atomic bitwise-AND on the operand, and return its previous value.\ninline uintptr_t fetch_and_and(atomic<uintptr_t>& operand, uintptr_t value) {\n    for (tbb::internal::atomic_backoff b;;b.pause()) {\n        uintptr_t old = operand;\n        uintptr_t result = operand.compare_and_swap(old&value, old);\n        if (result==old) return result;\n    }\n}\n\n//! Spin WHILE the value at the location is greater than or equal to a given value\n/** T and U should be comparable types. */\ntemplate<typename T, typename U>\nvoid spin_wait_while_geq( const volatile T& location, U value ) {\n    tbb::internal::atomic_backoff backoff;\n    while( location>=value ) backoff.pause();\n}\n\n//! Spin UNTIL (location & value) is true.\n/** T and U should be comparable types. */\ntemplate<typename T, typename U>\nvoid spin_wait_until_and( const volatile T& location, U value ) {\n    tbb::internal::atomic_backoff backoff;\n    while( !(location & value) ) backoff.pause();\n}\n\n\nvoid reader_writer_lock::internal_construct() {\n    reader_head = NULL;\n    writer_head = NULL;\n    writer_tail = NULL;\n    rdr_count_and_flags = 0;\n    my_current_writer = tbb_thread::id();\n#if TBB_USE_THREADING_TOOLS\n    ITT_SYNC_CREATE(this, _T(\"tbb::reader_writer_lock\"), _T(\"\"));\n#endif /* TBB_USE_THREADING_TOOLS */\n}\n\nvoid reader_writer_lock::internal_destroy() {\n    __TBB_ASSERT(rdr_count_and_flags==0, \"reader_writer_lock destroyed with pending readers/writers.\");\n    __TBB_ASSERT(reader_head==NULL, \"reader_writer_lock destroyed with pending readers.\");\n    __TBB_ASSERT(writer_tail==NULL, \"reader_writer_lock destroyed with pending writers.\");\n    __TBB_ASSERT(writer_head==NULL, \"reader_writer_lock destroyed with pending/active writers.\");\n}\n\n// Acquires the reader_writer_lock for write.    If the lock is currently held in write\n// mode by another context, the writer will block by spinning on a local variable.\n// Throws exception improper_lock if the context tries to acquire a\n// reader_writer_lock that it already has write ownership of.\nvoid reader_writer_lock::lock() {\n    if (is_current_writer()) { // recursive lock attempt\n        // we don't support recursive writer locks; throw exception\n        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);\n    }\n    else {\n        scoped_lock *a_writer_lock = new scoped_lock();\n        (void) start_write(a_writer_lock);\n    }\n}\n\n// Tries to acquire the reader_writer_lock for write.    This function does not block.\n// Return Value: True or false, depending on whether the lock is acquired or not.\n// If the lock is already held by this acquiring context, try_lock() returns false.\nbool reader_writer_lock::try_lock() {\n    if (is_current_writer()) { // recursive lock attempt\n        return false;\n    }\n    else {\n        scoped_lock *a_writer_lock = new scoped_lock();\n        a_writer_lock->status = waiting_nonblocking;\n        return start_write(a_writer_lock);\n    }\n}\n\nbool reader_writer_lock::start_write(scoped_lock *I) {\n    tbb_thread::id id = this_tbb_thread::get_id();\n    scoped_lock *pred = NULL;\n    if (I->status == waiting_nonblocking) {\n        if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {\n            delete I;\n            return false;\n        }\n    }\n    else {\n        ITT_NOTIFY(sync_prepare, this);\n        pred = writer_tail.fetch_and_store(I);\n    }\n    if (pred)\n        pred->next = I;\n    else {\n        set_next_writer(I);\n        if (I->status == waiting_nonblocking) {\n            if (I->next) { // potentially more writers\n                set_next_writer(I->next);\n            }\n            else { // no more writers\n                writer_head.fetch_and_store(NULL);\n                if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added\n                    spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added\n                    __TBB_ASSERT(I->next, \"There should be a node following the last writer.\");\n                    set_next_writer(I->next);\n                }\n            }\n            delete I;\n            return false;\n        }\n    }\n    spin_wait_while_eq(I->status, waiting);\n    ITT_NOTIFY(sync_acquired, this);\n    my_current_writer = id;\n    return true;\n}\n\nvoid reader_writer_lock::set_next_writer(scoped_lock *W) {\n    writer_head = W;\n    if (W->status == waiting_nonblocking) {\n        if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) {\n            W->status = active;\n        }\n    }\n    else {\n        if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present\n            spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2\n        }\n        else { // no reader in timing window\n            __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);\n        }\n        spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish\n        W->status = active;\n   }\n}\n\n// Acquires the reader_writer_lock for read.    If the lock is currently held by a writer,\n// this reader will block and wait until the writers are done.\n// Throws exception improper_lock when the context tries to acquire a reader_writer_lock\n// that it already has write ownership of.\nvoid reader_writer_lock::lock_read() {\n    if (is_current_writer()) { // recursive lock attempt\n        // we don't support writer->reader downgrade; throw exception\n        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);\n    }\n    else {\n        scoped_lock_read a_reader_lock;\n        start_read(&a_reader_lock);\n    }\n}\n\n// Tries to acquire the reader_writer_lock for read.    This function does not block.\n// Return Value: True or false, depending on whether the lock is acquired or not.\nbool reader_writer_lock::try_lock_read() {\n    if (is_current_writer()) { // recursive lock attempt\n        return false;\n    }\n    else {\n        if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present\n            rdr_count_and_flags -= RC_INCR;\n            return false;\n        }\n        else { // no writers\n            ITT_NOTIFY(sync_acquired, this);\n            return true;\n        }\n    }\n}\n\nvoid reader_writer_lock::start_read(scoped_lock_read *I) {\n    ITT_NOTIFY(sync_prepare, this);\n    I->next = reader_head.fetch_and_store(I);\n    if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags\n        // unblock and/or update statuses of non-blocking readers\n        if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers\n            unblock_readers();\n        }\n    }\n    __TBB_ASSERT(I->status == waiting || I->status == active, \"Lock requests should be waiting or active before blocking.\");\n    spin_wait_while_eq(I->status, waiting); // block\n    if (I->next) {\n        __TBB_ASSERT(I->next->status == waiting, NULL);\n        rdr_count_and_flags += RC_INCR;\n        I->next->status = active; // wake successor\n    }\n    ITT_NOTIFY(sync_acquired, this);\n}\n\nvoid reader_writer_lock::unblock_readers() {\n    // clear rdr interest flag, increment rdr count\n    __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL);\n    rdr_count_and_flags += RC_INCR-RFLAG;\n    __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL);\n    // indicate clear of window\n    if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) {\n        __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);\n    }\n    // unblock waiting readers\n    scoped_lock_read *head = reader_head.fetch_and_store(NULL);\n    __TBB_ASSERT(head, NULL);\n    __TBB_ASSERT(head->status == waiting, NULL);\n    head->status = active;\n}\n\n// Releases the reader_writer_lock\nvoid reader_writer_lock::unlock() {\n    if( my_current_writer!=tbb_thread::id() ) {\n        // A writer owns the lock\n        __TBB_ASSERT(is_current_writer(), \"caller of reader_writer_lock::unlock() does not own the lock.\");\n        __TBB_ASSERT(writer_head, NULL);\n        __TBB_ASSERT(writer_head->status==active, NULL);\n        scoped_lock *a_writer_lock = writer_head;\n        end_write(a_writer_lock);\n        __TBB_ASSERT(a_writer_lock != writer_head, \"Internal error: About to turn writer_head into dangling reference.\");\n        delete a_writer_lock;\n    } else {\n        end_read();\n    }\n}\n\nvoid reader_writer_lock::end_write(scoped_lock *I) {\n    __TBB_ASSERT(I==writer_head, \"Internal error: can't unlock a thread that is not holding the lock.\");\n    my_current_writer = tbb_thread::id();\n    ITT_NOTIFY(sync_releasing, this);\n    if (I->next) { // potentially more writers\n        writer_head = I->next;\n        writer_head->status = active;\n    }\n    else { // No more writers; clear writer flag, test reader interest flag\n        __TBB_ASSERT(writer_head, NULL);\n        if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) {\n            unblock_readers();\n        }\n        writer_head.fetch_and_store(NULL);\n        if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added\n            spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added\n            __TBB_ASSERT(I->next, \"There should be a node following the last writer.\");\n            set_next_writer(I->next);\n        }\n    }\n}\n\nvoid reader_writer_lock::end_read() {\n    ITT_NOTIFY(sync_releasing, this);\n    __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, \"unlock() called but no readers hold the lock.\");\n    rdr_count_and_flags -= RC_INCR;\n}\n\ninline bool reader_writer_lock::is_current_writer() {\n    return my_current_writer==this_tbb_thread::get_id();\n}\n\n// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock\nvoid reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock& lock) {\n    mutex = &lock;\n    next = NULL;\n    status = waiting;\n    if (mutex->is_current_writer()) { // recursive lock attempt\n        // we don't support recursive writer locks; throw exception\n        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);\n    }\n    else { // this thread holds no locks\n        (void) mutex->start_write(this);\n    }\n}\n\ninline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) { \n    status = waiting;\n}\n\n// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock\nvoid reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock& lock) {\n    mutex = &lock;\n    next = NULL;\n    status = waiting;\n    if (mutex->is_current_writer()) { // recursive lock attempt\n        // we don't support writer->reader downgrade; throw exception\n        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);\n    }\n    else { // this thread holds no locks\n        mutex->start_read(this);\n    }\n}\n\ninline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) {\n    status = waiting;\n}\n\nvoid reader_writer_lock::scoped_lock::internal_destroy() {\n    if (mutex) {\n        __TBB_ASSERT(mutex->is_current_writer(), \"~scoped_lock() destroyed by thread different than thread that holds lock.\");\n        mutex->end_write(this);\n    }\n    status = invalid;\n}\n\nvoid reader_writer_lock::scoped_lock_read::internal_destroy() {\n    if (mutex)\n        mutex->end_read();\n    status = invalid;\n}\n\n} // namespace interface5\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/reader_writer_lock.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_reader_writer_lock_H\n#define __TBB_reader_writer_lock_H\n\n#include \"tbb_thread.h\"\n#include \"tbb_allocator.h\"\n#include \"atomic.h\"\n\nnamespace tbb {\nnamespace interface5 {\n//! Writer-preference reader-writer lock with local-only spinning on readers.\n/** Loosely adapted from Mellor-Crummey and Scott pseudocode at\n    http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp\n    @ingroup synchronization */\n    class reader_writer_lock : tbb::internal::no_copy {\n public:\n    friend class scoped_lock;\n    friend class scoped_lock_read;\n    //! Status type for nodes associated with lock instances\n    /** waiting_nonblocking: the wait state for nonblocking lock\n          instances; for writes, these transition straight to active\n          states; for reads, these are unused.\n\n        waiting: the start and spin state for all lock instances; these will\n          transition to active state when appropriate.  Non-blocking write locks\n          transition from this state to waiting_nonblocking immediately.\n\n        active: the active state means that the lock instance holds\n          the lock; it will transition to invalid state during node deletion\n\n        invalid: the end state for all nodes; this is set in the\n          destructor so if we encounter this state, we are looking at\n          memory that has already been freed\n\n        The state diagrams below describe the status transitions.\n        Single arrows indicate that the thread that owns the node is\n        responsible for the transition; double arrows indicate that\n        any thread could make the transition.\n\n        State diagram for scoped_lock status:\n\n        waiting ----------> waiting_nonblocking\n          |     _____________/       |\n          V    V                     V\n        active -----------------> invalid\n\n        State diagram for scoped_lock_read status:\n\n        waiting\n          |\n          V\n        active ----------------->invalid\n\n    */\n    enum status_t { waiting_nonblocking, waiting, active, invalid };\n\n    //! Constructs a new reader_writer_lock\n    reader_writer_lock() {\n        internal_construct();\n    }\n\n    //! Destructs a reader_writer_lock object\n    ~reader_writer_lock() {\n        internal_destroy();\n    }\n\n    //! The scoped lock pattern for write locks\n    /** Scoped locks help avoid the common problem of forgetting to release the lock.\n        This type also serves as the node for queuing locks. */\n    class scoped_lock : tbb::internal::no_copy {\n    public:\n        friend class reader_writer_lock;\n\n        //! Construct with blocking attempt to acquire write lock on the passed-in lock\n        scoped_lock(reader_writer_lock& lock) {\n            internal_construct(lock);\n        }\n\n        //! Destructor, releases the write lock\n        ~scoped_lock() {\n            internal_destroy();\n        }\n\n        void* operator new(size_t s) {\n            return tbb::internal::allocate_via_handler_v3(s);\n        }\n        void operator delete(void* p) {\n            tbb::internal::deallocate_via_handler_v3(p);\n        }\n\n    private:\n        //! The pointer to the mutex to lock\n        reader_writer_lock *mutex;\n        //! The next queued competitor for the mutex\n        scoped_lock* next;\n        //! Status flag of the thread associated with this node\n        atomic<status_t> status;\n\n        //! Construct scoped_lock that is not holding lock\n        scoped_lock();\n\n        void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);\n        void __TBB_EXPORTED_METHOD internal_destroy();\n   };\n\n    //! The scoped lock pattern for read locks\n    class scoped_lock_read : tbb::internal::no_copy {\n    public:\n        friend class reader_writer_lock;\n\n        //! Construct with blocking attempt to acquire read lock on the passed-in lock\n        scoped_lock_read(reader_writer_lock& lock) {\n            internal_construct(lock);\n        }\n\n        //! Destructor, releases the read lock\n        ~scoped_lock_read() {\n            internal_destroy();\n        }\n\n        void* operator new(size_t s) {\n            return tbb::internal::allocate_via_handler_v3(s);\n        }\n        void operator delete(void* p) {\n            tbb::internal::deallocate_via_handler_v3(p);\n        }\n\n    private:\n        //! The pointer to the mutex to lock\n        reader_writer_lock *mutex;\n        //! The next queued competitor for the mutex\n        scoped_lock_read *next;\n        //! Status flag of the thread associated with this node\n        atomic<status_t> status;\n\n        //! Construct scoped_lock_read that is not holding lock\n        scoped_lock_read();\n\n        void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);\n        void __TBB_EXPORTED_METHOD internal_destroy();\n    };\n\n    //! Acquires the reader_writer_lock for write.\n    /** If the lock is currently held in write mode by another\n        context, the writer will block by spinning on a local\n        variable.  Exceptions thrown: improper_lock The context tries\n        to acquire a reader_writer_lock that it already has write\n        ownership of.*/\n    void __TBB_EXPORTED_METHOD lock();\n\n    //! Tries to acquire the reader_writer_lock for write.\n    /** This function does not block.  Return Value: True or false,\n        depending on whether the lock is acquired or not.  If the lock\n        is already held by this acquiring context, try_lock() returns\n        false. */\n    bool __TBB_EXPORTED_METHOD try_lock();\n\n    //! Acquires the reader_writer_lock for read.\n    /** If the lock is currently held by a writer, this reader will\n        block and wait until the writers are done.  Exceptions thrown:\n        improper_lock The context tries to acquire a\n        reader_writer_lock that it already has write ownership of. */\n    void __TBB_EXPORTED_METHOD lock_read();\n\n    //! Tries to acquire the reader_writer_lock for read.\n    /** This function does not block.  Return Value: True or false,\n        depending on whether the lock is acquired or not.  */\n    bool __TBB_EXPORTED_METHOD try_lock_read();\n\n    //! Releases the reader_writer_lock\n    void __TBB_EXPORTED_METHOD unlock();\n\n private:\n    void __TBB_EXPORTED_METHOD internal_construct();\n    void __TBB_EXPORTED_METHOD internal_destroy();\n\n    //! Attempts to acquire write lock\n    /** If unavailable, spins in blocking case, returns false in non-blocking case. */\n    bool start_write(scoped_lock *);\n    //! Sets writer_head to w and attempts to unblock\n    void set_next_writer(scoped_lock *w);\n    //! Relinquishes write lock to next waiting writer or group of readers\n    void end_write(scoped_lock *);\n    //! Checks if current thread holds write lock\n    bool is_current_writer();\n\n    //! Attempts to acquire read lock\n    /** If unavailable, spins in blocking case, returns false in non-blocking case. */\n    void start_read(scoped_lock_read *);\n    //! Unblocks pending readers\n    void unblock_readers();\n    //! Relinquishes read lock by decrementing counter; last reader wakes pending writer\n    void end_read();\n\n    //! The list of pending readers\n    atomic<scoped_lock_read*> reader_head;\n    //! The list of pending writers\n    atomic<scoped_lock*> writer_head;\n    //! The last node in the list of pending writers\n    atomic<scoped_lock*> writer_tail;\n    //! Writer that owns the mutex; tbb_thread::id() otherwise.\n    tbb_thread::id my_current_writer;\n    //! Status of mutex\n    atomic<uintptr_t> rdr_count_and_flags; // used with __TBB_AtomicOR, which assumes uintptr_t\n};\n\n} // namespace interface5\n\nusing interface5::reader_writer_lock;\n\n} // namespace tbb\n\n#endif /* __TBB_reader_writer_lock_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/recursive_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/recursive_mutex.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\n\nvoid recursive_mutex::scoped_lock::internal_acquire( recursive_mutex& m ) {\n#if _WIN32||_WIN64\n    switch( m.state ) {\n      case INITIALIZED:\n        // since we cannot look into the internal of the CriticalSection object\n        // we won't know how many times the lock has been acquired, and thus\n        // we won't know when we may safely set the state back to INITIALIZED\n        // if we change the state to HELD as in mutex.cpp.  thus, we won't change\n        // the state for recursive_mutex\n        EnterCriticalSection( &m.impl );\n        break;\n      case DESTROYED:\n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: mutex already destroyed\");\n        break;\n      default:\n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: illegal mutex state\");\n        break;\n    }\n#else\n    int error_code = pthread_mutex_lock(&m.impl);\n    if( error_code )\n        tbb::internal::handle_perror(error_code,\"recursive_mutex::scoped_lock: pthread_mutex_lock failed\");\n#endif /* _WIN32||_WIN64 */\n    my_mutex = &m;\n}\n\nvoid recursive_mutex::scoped_lock::internal_release() {\n    __TBB_ASSERT( my_mutex, \"recursive_mutex::scoped_lock: not holding a mutex\" );\n#if _WIN32||_WIN64    \n    switch( my_mutex->state ) {\n      case INITIALIZED: \n        LeaveCriticalSection( &my_mutex->impl );\n        break;\n      case DESTROYED: \n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: mutex already destroyed\"); \n        break;\n      default: \n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: illegal mutex state\");\n        break;\n    }\n#else\n     int error_code = pthread_mutex_unlock(&my_mutex->impl);\n     __TBB_ASSERT_EX(!error_code, \"recursive_mutex::scoped_lock: pthread_mutex_unlock failed\");\n#endif /* _WIN32||_WIN64 */\n     my_mutex = NULL;\n}\n\nbool recursive_mutex::scoped_lock::internal_try_acquire( recursive_mutex& m ) {\n#if _WIN32||_WIN64\n    switch( m.state ) {\n      case INITIALIZED: \n        break;\n      case DESTROYED: \n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: mutex already destroyed\"); \n        break;\n      default: \n        __TBB_ASSERT(false,\"recursive_mutex::scoped_lock: illegal mutex state\");\n        break;\n    }\n#endif /* _WIN32||_WIN64 */\n    bool result;\n#if _WIN32||_WIN64\n    result = TryEnterCriticalSection(&m.impl)!=0;\n#else\n    result = pthread_mutex_trylock(&m.impl)==0;\n#endif /* _WIN32||_WIN64 */\n    if( result )\n        my_mutex = &m;\n    return result;\n}\n\nvoid recursive_mutex::internal_construct() {\n#if _WIN32||_WIN64\n    InitializeCriticalSectionEx(&impl, 4000, 0);\n    state = INITIALIZED;\n#else\n    pthread_mutexattr_t mtx_attr;\n    int error_code = pthread_mutexattr_init( &mtx_attr );\n    if( error_code )\n        tbb::internal::handle_perror(error_code,\"recursive_mutex: pthread_mutexattr_init failed\");\n\n    pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE );\n    error_code = pthread_mutex_init( &impl, &mtx_attr );\n    if( error_code )\n        tbb::internal::handle_perror(error_code,\"recursive_mutex: pthread_mutex_init failed\");\n    pthread_mutexattr_destroy( &mtx_attr );\n#endif /* _WIN32||_WIN64*/    \n    ITT_SYNC_CREATE(&impl, _T(\"tbb::recursive_mutex\"), _T(\"\"));\n}\n\nvoid recursive_mutex::internal_destroy() {\n#if _WIN32||_WIN64\n    switch( state ) {\n      case INITIALIZED:\n        DeleteCriticalSection(&impl);\n        break;\n      case DESTROYED: \n        __TBB_ASSERT(false,\"recursive_mutex: already destroyed\");\n        break;\n      default: \n        __TBB_ASSERT(false,\"recursive_mutex: illegal state for destruction\");\n        break;\n    }\n    state = DESTROYED;\n#else\n    int error_code = pthread_mutex_destroy(&impl); \n    __TBB_ASSERT_EX(!error_code,\"recursive_mutex: pthread_mutex_destroy failed\");\n#endif /* _WIN32||_WIN64 */\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/recursive_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_recursive_mutex_H\n#define __TBB_recursive_mutex_H\n\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#else\n#include <pthread.h>\n#endif /* _WIN32||_WIN64 */\n\n#include <new>\n#include \"aligned_space.h\"\n#include \"tbb_stddef.h\"\n#include \"tbb_profiling.h\"\n\nnamespace tbb {\n//! Mutex that allows recursive mutex acquisition.\n/** Mutex that allows recursive mutex acquisition.\n    @ingroup synchronization */\nclass recursive_mutex : internal::mutex_copy_deprecated_and_disabled {\npublic:\n    //! Construct unacquired recursive_mutex.\n    recursive_mutex() {\n#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS\n        internal_construct();\n#else\n  #if _WIN32||_WIN64\n        InitializeCriticalSectionEx(&impl, 4000, 0);\n  #else\n        pthread_mutexattr_t mtx_attr;\n        int error_code = pthread_mutexattr_init( &mtx_attr );\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"recursive_mutex: pthread_mutexattr_init failed\");\n\n        pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE );\n        error_code = pthread_mutex_init( &impl, &mtx_attr );\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"recursive_mutex: pthread_mutex_init failed\");\n\n        pthread_mutexattr_destroy( &mtx_attr );\n  #endif /* _WIN32||_WIN64*/\n#endif /* TBB_USE_ASSERT */\n    };\n\n    ~recursive_mutex() {\n#if TBB_USE_ASSERT\n        internal_destroy();\n#else\n  #if _WIN32||_WIN64\n        DeleteCriticalSection(&impl);\n  #else\n        pthread_mutex_destroy(&impl); \n\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    };\n\n    class scoped_lock;\n    friend class scoped_lock;\n\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    class scoped_lock: internal::no_copy {\n    public:\n        //! Construct lock that has not acquired a recursive_mutex. \n        scoped_lock() : my_mutex(NULL) {};\n\n        //! Acquire lock on given mutex.\n        scoped_lock( recursive_mutex& mutex ) {\n#if TBB_USE_ASSERT\n            my_mutex = &mutex; \n#endif /* TBB_USE_ASSERT */\n            acquire( mutex );\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if( my_mutex ) \n                release();\n        }\n\n        //! Acquire lock on given mutex.\n        void acquire( recursive_mutex& mutex ) {\n#if TBB_USE_ASSERT\n            internal_acquire( mutex );\n#else\n            my_mutex = &mutex;\n            mutex.lock();\n#endif /* TBB_USE_ASSERT */\n        }\n\n        //! Try acquire lock on given recursive_mutex.\n        bool try_acquire( recursive_mutex& mutex ) {\n#if TBB_USE_ASSERT\n            return internal_try_acquire( mutex );\n#else\n            bool result = mutex.try_lock();\n            if( result )\n                my_mutex = &mutex;\n            return result;\n#endif /* TBB_USE_ASSERT */\n        }\n\n        //! Release lock\n        void release() {\n#if TBB_USE_ASSERT\n            internal_release();\n#else\n            my_mutex->unlock();\n            my_mutex = NULL;\n#endif /* TBB_USE_ASSERT */\n        }\n\n    private:\n        //! The pointer to the current recursive_mutex to work\n        recursive_mutex* my_mutex;\n\n        //! All checks from acquire using mutex.state were moved here\n        void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m );\n\n        //! All checks from try_acquire using mutex.state were moved here\n        bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m );\n\n        //! All checks from release using mutex.state were moved here\n        void __TBB_EXPORTED_METHOD internal_release();\n\n        friend class recursive_mutex;\n    };\n\n    // Mutex traits\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = true;\n    static const bool is_fair_mutex = false;\n\n    // C++0x compatibility interface\n    \n    //! Acquire lock\n    void lock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        new(tmp.begin()) scoped_lock(*this);\n#else\n  #if _WIN32||_WIN64\n        EnterCriticalSection(&impl);\n  #else\n        int error_code = pthread_mutex_lock(&impl);\n        if( error_code )\n            tbb::internal::handle_perror(error_code,\"recursive_mutex: pthread_mutex_lock failed\");\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Try acquiring lock (non-blocking)\n    /** Return true if lock acquired; false otherwise. */\n    bool try_lock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);\n#else        \n  #if _WIN32||_WIN64\n        return TryEnterCriticalSection(&impl)!=0;\n  #else\n        return pthread_mutex_trylock(&impl)==0;\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Release lock\n    void unlock() {\n#if TBB_USE_ASSERT\n        aligned_space<scoped_lock> tmp;\n        scoped_lock& s = *tmp.begin();\n        s.my_mutex = this;\n        s.internal_release();\n#else\n  #if _WIN32||_WIN64\n        LeaveCriticalSection(&impl);\n  #else\n        pthread_mutex_unlock(&impl);\n  #endif /* _WIN32||_WIN64 */\n#endif /* TBB_USE_ASSERT */\n    }\n\n    //! Return native_handle\n  #if _WIN32||_WIN64\n    typedef LPCRITICAL_SECTION native_handle_type;\n  #else\n    typedef pthread_mutex_t* native_handle_type;\n  #endif\n    native_handle_type native_handle() { return (native_handle_type) &impl; }\n\nprivate:\n#if _WIN32||_WIN64\n    CRITICAL_SECTION impl;\n    enum state_t {\n        INITIALIZED=0x1234,\n        DESTROYED=0x789A,\n    } state;\n#else\n    pthread_mutex_t impl;\n#endif /* _WIN32||_WIN64 */\n\n    //! All checks from mutex constructor using mutex.state were moved here\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    //! All checks from mutex destructor using mutex.state were moved here\n    void __TBB_EXPORTED_METHOD internal_destroy();\n};\n\n__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex)\n\n} // namespace tbb \n\n#endif /* __TBB_recursive_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/runtime_loader.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_runtime_loader_H\n#define __TBB_runtime_loader_H\n\n#if ! TBB_PREVIEW_RUNTIME_LOADER\n    #error Set TBB_PREVIEW_RUNTIME_LOADER to include runtime_loader.h\n#endif\n\n#include \"tbb_stddef.h\"\n#include <climits>\n\n#if _MSC_VER\n    #if ! __TBB_NO_IMPLICIT_LINKAGE\n        #ifdef _DEBUG\n            #pragma comment( linker, \"/nodefaultlib:tbb_debug.lib\" )\n            #pragma comment( linker, \"/defaultlib:tbbproxy_debug.lib\" )\n        #else\n            #pragma comment( linker, \"/nodefaultlib:tbb.lib\" )\n            #pragma comment( linker, \"/defaultlib:tbbproxy.lib\" )\n        #endif\n    #endif\n#endif\n\nnamespace tbb {\n\nnamespace interface6 {\n\n//! Load TBB at runtime.\n/*!\n\n\\b Usage:\n\nIn source code:\n\n\\code\n#include \"tbb/runtime_loader.h\"\n\nchar const * path[] = { \"<install dir>/lib/ia32\", NULL };\ntbb::runtime_loader loader( path );\n\n// Now use TBB.\n\\endcode\n\nLink with \\c tbbproxy.lib (or \\c libtbbproxy.a) instead of \\c tbb.lib (\\c libtbb.dylib,\n\\c libtbb.so).\n\nTBB library will be loaded at runtime from \\c <install dir>/lib/ia32 directory.\n\n\\b Attention:\n\nAll \\c runtime_loader objects (in the same module, i.e. exe or dll) share some global state.\nThe most noticeable piece of global state is loaded TBB library.\nThere are some implications:\n\n    -   Only one TBB library can be loaded per module.\n\n    -   If one object has already loaded TBB library, another object will not load TBB.\n        If the loaded TBB library is suitable for the second object, both will use TBB\n        cooperatively, otherwise the second object will report an error.\n\n    -   \\c runtime_loader objects will not work (correctly) in parallel due to absence of\n        synchronization.\n\n*/\n\nclass runtime_loader : tbb::internal::no_copy {\n\n    public:\n\n        //! Error mode constants.\n        enum error_mode {\n            em_status,     //!< Save status of operation and continue.\n            em_throw,      //!< Throw an exception of tbb::runtime_loader::error_code type.\n            em_abort       //!< Print message to \\c stderr and call \\c abort().\n        }; // error_mode\n\n        //! Error codes.\n        enum error_code {\n            ec_ok,         //!< No errors.\n            ec_bad_call,   //!< Invalid function call (e. g. load() called when TBB is already loaded).\n            ec_bad_arg,    //!< Invalid argument passed.\n            ec_bad_lib,    //!< Invalid library found (e. g. \\c TBB_runtime_version symbol not found).\n            ec_bad_ver,    //!< TBB found but version is not suitable.\n            ec_no_lib      //!< No suitable TBB library found.\n        }; // error_code\n\n        //! Initialize object but do not load TBB.\n        runtime_loader( error_mode mode = em_abort );\n\n        //! Initialize object and load TBB.\n        /*!\n            See load() for details.\n\n            If error mode is \\c em_status, call status() to check whether TBB was loaded or not.\n        */\n        runtime_loader(\n            char const * path[],                           //!< List of directories to search TBB in.\n            int          min_ver = TBB_INTERFACE_VERSION,  //!< Minimal suitable version of TBB.\n            int          max_ver = INT_MAX,                //!< Maximal suitable version of TBB.\n            error_mode   mode    = em_abort                //!< Error mode for this object.\n        );\n\n        //! Destroy object.\n        ~runtime_loader();\n\n        //! Load TBB.\n        /*!\n            The method searches the directories specified in \\c path[] array for the TBB library.\n            When the library is found, it is loaded and its version is checked. If the version is\n            not suitable, the library is unloaded, and the search continues.\n\n            \\b Note:\n\n            For security reasons, avoid using relative directory names. For example, never load\n            TBB from current (\\c \".\"), parent (\\c \"..\") or any other relative directory (like\n            \\c \"lib\" ). Use only absolute directory names (e. g. \"/usr/local/lib\").\n\n            For the same security reasons, avoid using system default directories (\\c \"\") on\n            Windows. (See http://www.microsoft.com/technet/security/advisory/2269637.mspx for\n            details.)\n\n            Neglecting these rules may cause your program to execute 3-rd party malicious code.\n\n            \\b Errors:\n                -   \\c ec_bad_call - TBB already loaded by this object.\n                -   \\c ec_bad_arg - \\p min_ver and/or \\p max_ver negative or zero,\n                    or \\p min_ver > \\p max_ver.\n                -   \\c ec_bad_ver - TBB of unsuitable version already loaded by another object.\n                -   \\c ec_no_lib - No suitable library found.\n        */\n        error_code\n        load(\n            char const * path[],                           //!< List of directories to search TBB in.\n            int          min_ver = TBB_INTERFACE_VERSION,  //!< Minimal suitable version of TBB.\n            int          max_ver = INT_MAX                 //!< Maximal suitable version of TBB.\n\n        );\n\n\n        //! Report status.\n        /*!\n            If error mode is \\c em_status, the function returns status of the last operation.\n        */\n        error_code status();\n\n    private:\n\n        error_mode const my_mode;\n        error_code       my_status;\n        bool             my_loaded;\n\n}; // class runtime_loader\n\n} // namespace interface6\n\nusing interface6::runtime_loader;\n\n} // namespace tbb\n\n#endif /* __TBB_runtime_loader_H */\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/scalable_allocator.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_scalable_allocator_H\n#define __TBB_scalable_allocator_H\n/** @file */\n\n#include <stddef.h> /* Need ptrdiff_t and size_t from here. */\n#if !_MSC_VER\n#include <stdint.h> /* Need intptr_t from here. */\n#endif\n\n#if !defined(__cplusplus) && __ICC==1100\n    #pragma warning (push)\n    #pragma warning (disable: 991)\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n#if _MSC_VER >= 1400\n#define __TBB_EXPORTED_FUNC   __cdecl\n#else\n#define __TBB_EXPORTED_FUNC\n#endif\n\n/** The \"malloc\" analogue to allocate block of memory of size bytes.\n  * @ingroup memory_allocation */\nvoid * __TBB_EXPORTED_FUNC scalable_malloc (size_t size);\n\n/** The \"free\" analogue to discard a previously allocated piece of memory.\n    @ingroup memory_allocation */\nvoid   __TBB_EXPORTED_FUNC scalable_free (void* ptr);\n\n/** The \"realloc\" analogue complementing scalable_malloc.\n    @ingroup memory_allocation */\nvoid * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size);\n\n/** The \"calloc\" analogue complementing scalable_malloc.\n    @ingroup memory_allocation */\nvoid * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size);\n\n/** The \"posix_memalign\" analogue.\n    @ingroup memory_allocation */\nint __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size);\n\n/** The \"_aligned_malloc\" analogue.\n    @ingroup memory_allocation */\nvoid * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment);\n\n/** The \"_aligned_realloc\" analogue.\n    @ingroup memory_allocation */\nvoid * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment);\n\n/** The \"_aligned_free\" analogue.\n    @ingroup memory_allocation */\nvoid __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr);\n\n/** The analogue of _msize/malloc_size/malloc_usable_size.\n    Returns the usable size of a memory block previously allocated by scalable_*,\n    or 0 (zero) if ptr does not point to such a block.\n    @ingroup memory_allocation */\nsize_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr);\n\n/* Results for scalable_allocation_* functions */\ntypedef enum {\n    TBBMALLOC_OK,\n    TBBMALLOC_INVALID_PARAM,\n    TBBMALLOC_UNSUPPORTED,\n    TBBMALLOC_NO_MEMORY,\n    TBBMALLOC_NO_EFFECT\n} ScalableAllocationResult;\n\n/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages.\n   scalable_allocation_mode call has priority over environment variable. */\ntypedef enum {\n    TBBMALLOC_USE_HUGE_PAGES,  /* value turns using huge pages on and off */\n    /* deprecated, kept for backward compatibility only */\n    USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES,\n    /* try to limit memory consumption value Bytes, clean internal buffers\n       if limit is exceeded, but not prevents from requesting memory from OS */\n    TBBMALLOC_SET_SOFT_HEAP_LIMIT\n} AllocationModeParam;\n\n/** Set TBB allocator-specific allocation modes.\n    @ingroup memory_allocation */\nint __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value);\n\ntypedef enum {\n    /* Clean internal allocator buffers for all threads.\n       Returns TBBMALLOC_NO_EFFECT if no buffers cleaned,\n       TBBMALLOC_OK if some memory released from buffers. */\n    TBBMALLOC_CLEAN_ALL_BUFFERS,\n    /* Clean internal allocator buffer for current thread only.\n       Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */\n    TBBMALLOC_CLEAN_THREAD_BUFFERS\n} ScalableAllocationCmd;\n\n/** Call TBB allocator-specific commands.\n    @ingroup memory_allocation */\nint __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param);\n\n#ifdef __cplusplus\n} /* extern \"C\" */\n#endif /* __cplusplus */\n\n#ifdef __cplusplus\n\n//! The namespace rml contains components of low-level memory pool interface.\nnamespace rml {\nclass MemoryPool;\n\ntypedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes);\ntypedef int   (*rawFreeType)(intptr_t pool_id, void* raw_ptr, size_t raw_bytes);\n\n/*\nMemPoolPolicy extension must be compatible with such structure fields layout\n\nstruct MemPoolPolicy {\n    rawAllocType pAlloc;\n    rawFreeType  pFree;\n    size_t       granularity;   // granularity of pAlloc allocations\n};\n*/\n\nstruct MemPoolPolicy {\n    enum {\n        TBBMALLOC_POOL_VERSION = 1\n    };\n\n    rawAllocType pAlloc;\n    rawFreeType  pFree;\n                 // granularity of pAlloc allocations. 0 means default used.\n    size_t       granularity;\n    int          version;\n                 // all memory consumed at 1st pAlloc call and never returned,\n                 // no more pAlloc calls after 1st\n    unsigned     fixedPool : 1,\n                 // memory consumed but returned only at pool termination\n                 keepAllMemory : 1,\n                 reserved : 30;\n\n    MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_,\n                  size_t granularity_ = 0, bool fixedPool_ = false,\n                  bool keepAllMemory_ = false) :\n        pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(TBBMALLOC_POOL_VERSION),\n        fixedPool(fixedPool_), keepAllMemory(keepAllMemory_),\n        reserved(0) {}\n};\n\n// enums have same values as appropriate enums from ScalableAllocationResult\n// TODO: use ScalableAllocationResult in pool_create directly\nenum MemPoolError {\n    // pool created successfully\n    POOL_OK = TBBMALLOC_OK,\n    // invalid policy parameters found\n    INVALID_POLICY = TBBMALLOC_INVALID_PARAM,\n     // requested pool policy is not supported by allocator library\n    UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED,\n    // lack of memory during pool creation\n    NO_MEMORY = TBBMALLOC_NO_MEMORY,\n    // action takes no effect\n    NO_EFFECT = TBBMALLOC_NO_EFFECT\n};\n\nMemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy,\n                            rml::MemoryPool **pool);\n\nbool  pool_destroy(MemoryPool* memPool);\nvoid *pool_malloc(MemoryPool* memPool, size_t size);\nvoid *pool_realloc(MemoryPool* memPool, void *object, size_t size);\nvoid *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment);\nvoid *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t alignment);\nbool  pool_reset(MemoryPool* memPool);\nbool  pool_free(MemoryPool *memPool, void *object);\n}\n\n#include <new>      /* To use new with the placement argument */\n\n/* Ensure that including this header does not cause implicit linkage with TBB */\n#ifndef __TBB_NO_IMPLICIT_LINKAGE\n    #define __TBB_NO_IMPLICIT_LINKAGE 1\n    #include \"tbb_stddef.h\"\n    #undef  __TBB_NO_IMPLICIT_LINKAGE\n#else\n    #include \"tbb_stddef.h\"\n#endif\n\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n #include <utility> // std::forward\n#endif\n\nnamespace tbb {\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for erroneous \"unreferenced parameter\" warning in method destroy.\n    #pragma warning (push)\n    #pragma warning (disable: 4100)\n#endif\n\n//! Meets \"allocator\" requirements of ISO C++ Standard, Section 20.1.5\n/** The members are ordered the same way they are in section 20.4.1\n    of the ISO C++ standard.\n    @ingroup memory_allocation */\ntemplate<typename T>\nclass scalable_allocator {\npublic:\n    typedef typename internal::allocator_type<T>::value_type value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n    template<class U> struct rebind {\n        typedef scalable_allocator<U> other;\n    };\n\n    scalable_allocator() throw() {}\n    scalable_allocator( const scalable_allocator& ) throw() {}\n    template<typename U> scalable_allocator(const scalable_allocator<U>&) throw() {}\n\n    pointer address(reference x) const {return &x;}\n    const_pointer address(const_reference x) const {return &x;}\n\n    //! Allocate space for n objects.\n    pointer allocate( size_type n, const void* /*hint*/ =0 ) {\n        return static_cast<pointer>( scalable_malloc( n * sizeof(value_type) ) );\n    }\n\n    //! Free previously allocated block of memory\n    void deallocate( pointer p, size_type ) {\n        scalable_free( p );\n    }\n\n    //! Largest value for which method allocate might succeed.\n    size_type max_size() const throw() {\n        size_type absolutemax = static_cast<size_type>(-1) / sizeof (value_type);\n        return (absolutemax > 0 ? absolutemax : 1);\n    }\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n    template<typename U, typename... Args>\n    void construct(U *p, Args&&... args)\n        { ::new((void *)p) U(std::forward<Args>(args)...); }\n#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    void construct( pointer p, value_type&& value ) { ::new((void*)(p)) value_type( std::move( value ) ); }\n#endif\n    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}\n#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n    void destroy( pointer p ) {p->~value_type();}\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4100 is back\n\n//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1\n/** @ingroup memory_allocation */\ntemplate<>\nclass scalable_allocator<void> {\npublic:\n    typedef void* pointer;\n    typedef const void* const_pointer;\n    typedef void value_type;\n    template<class U> struct rebind {\n        typedef scalable_allocator<U> other;\n    };\n};\n\ntemplate<typename T, typename U>\ninline bool operator==( const scalable_allocator<T>&, const scalable_allocator<U>& ) {return true;}\n\ntemplate<typename T, typename U>\ninline bool operator!=( const scalable_allocator<T>&, const scalable_allocator<U>& ) {return false;}\n\n} // namespace tbb\n\n#if _MSC_VER\n    #if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE)\n        #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1\n    #endif\n\n    #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE\n        #ifdef _DEBUG\n            #pragma comment(lib, \"tbbmalloc_debug.lib\")\n        #else\n            #pragma comment(lib, \"tbbmalloc.lib\")\n        #endif\n    #endif\n\n\n#endif\n\n#endif /* __cplusplus */\n\n#if !defined(__cplusplus) && __ICC==1100\n    #pragma warning (pop)\n#endif // ICC 11.0 warning 991 is back\n\n#endif /* __TBB_scalable_allocator_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/scheduler.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"custom_scheduler.h\"\n#include \"scheduler_utility.h\"\n#include \"governor.h\"\n#include \"market.h\"\n#include \"arena.h\"\n#include \"mailbox.h\"\n#include \"observer_proxy.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/atomic.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//------------------------------------------------------------------------\n// Library initialization\n//------------------------------------------------------------------------\n\n/** Defined in tbb_main.cpp **/\nextern generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index );\n\ninline generic_scheduler* allocate_scheduler ( arena* a, size_t index ) {\n    return AllocateSchedulerPtr(a, index);\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\ncontext_state_propagation_mutex_type the_context_state_propagation_mutex;\n\nuintptr_t the_context_state_propagation_epoch = 0;\n\n//! Context to be associated with dummy tasks of worker threads schedulers.\n/** It is never used for its direct purpose, and is introduced solely for the sake\n    of avoiding one extra conditional branch in the end of wait_for_all method. **/\nstatic task_group_context the_dummy_context(task_group_context::isolated);\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\nvoid Scheduler_OneTimeInitialization ( bool itt_present ) {\n    AllocateSchedulerPtr = itt_present ? &custom_scheduler<DefaultSchedulerTraits>::allocate_scheduler :\n                                      &custom_scheduler<IntelSchedulerTraits>::allocate_scheduler;\n#if __TBB_TASK_GROUP_CONTEXT\n    // There must be no tasks belonging to this fake task group. Mark invalid for the assert\n    __TBB_ASSERT(!(task_group_context::low_unused_state_bit & (task_group_context::low_unused_state_bit-1)), NULL);\n    the_dummy_context.my_state = task_group_context::low_unused_state_bit;\n#if __TBB_TASK_PRIORITY\n    // It should never prevent tasks from being passed to execution.\n    the_dummy_context.my_priority = num_priority_levels - 1;\n#endif /* __TBB_TASK_PRIORITY */\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n}\n\n//------------------------------------------------------------------------\n// scheduler interface\n//------------------------------------------------------------------------\n\n//  A pure virtual destructor should still have a body\n//  so the one for tbb::internal::scheduler::~scheduler() is provided here\nscheduler::~scheduler( ) {}\n\n//------------------------------------------------------------------------\n// generic_scheduler\n//------------------------------------------------------------------------\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Suppress overzealous compiler warning about using 'this' in base initializer list.\n    #pragma warning(push)\n    #pragma warning(disable:4355)\n#endif\n\ngeneric_scheduler::generic_scheduler( arena* a, size_t index )\n    : my_stealing_threshold(0)\n    , my_market(NULL)\n    , my_random( this )\n    , my_free_list(NULL)\n#if __TBB_HOARD_NONLOCAL_TASKS\n    , my_nonlocal_free_list(NULL)\n#endif\n    , my_dummy_task(NULL)\n    , my_ref_count(1)\n    , my_auto_initialized(false)\n#if __TBB_COUNT_TASK_NODES\n    , my_task_node_count(0)\n#endif /* __TBB_COUNT_TASK_NODES */\n    , my_small_task_count(1)   // Extra 1 is a guard reference\n    , my_return_list(NULL)\n#if __TBB_TASK_GROUP_CONTEXT\n    , my_local_ctx_list_update(make_atomic(uintptr_t(0)))\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n#if __TBB_TASK_PRIORITY\n    , my_offloaded_tasks(NULL)\n    , my_offloaded_task_list_tail_link(NULL)\n    , my_local_reload_epoch(0)\n    , my_pool_reshuffling_pending(false)\n#endif /* __TBB_TASK_PRIORITY */\n#if __TBB_TASK_GROUP_CONTEXT\n    , my_nonlocal_ctx_list_update(make_atomic(uintptr_t(0)))\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n#if __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT\n    , my_cilk_state(cs_none)\n#endif /* __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT */\n{\n    my_arena_index = index;\n    my_arena_slot = 0;\n    my_arena = a;\n    my_innermost_running_task = NULL;\n    my_dispatching_task = NULL;\n    my_affinity_id = 0;\n#if __TBB_SCHEDULER_OBSERVER\n    my_last_global_observer = NULL;\n    my_last_local_observer = NULL;\n#endif /* __TBB_SCHEDULER_OBSERVER */\n#if __TBB_TASK_PRIORITY\n    my_ref_top_priority = NULL;\n    my_ref_reload_epoch = NULL;\n#endif /* __TBB_TASK_PRIORITY */\n\n    my_dummy_task = &allocate_task( sizeof(task), __TBB_CONTEXT_ARG(NULL, NULL) );\n#if __TBB_TASK_GROUP_CONTEXT\n    my_context_list_head.my_prev = &my_context_list_head;\n    my_context_list_head.my_next = &my_context_list_head;\n    ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    my_dummy_task->prefix().ref_count = 2;\n    ITT_SYNC_CREATE(&my_dummy_task->prefix().ref_count, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt);\n    ITT_SYNC_CREATE(&my_return_list, SyncType_Scheduler, SyncObj_TaskReturnList);\n    assert_task_pool_valid();\n#if __TBB_SURVIVE_THREAD_SWITCH\n    my_cilk_unwatch_thunk.routine = NULL;\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n}\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning(pop)\n#endif // warning 4355 is back\n\n#if TBB_USE_ASSERT > 1\nvoid generic_scheduler::assert_task_pool_valid() const {\n    acquire_task_pool();\n    task** tp = my_arena_slot->task_pool_ptr;\n    __TBB_ASSERT( my_arena_slot->my_task_pool_size >= min_task_pool_size, NULL );\n    const size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror\n    const size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror\n    __TBB_ASSERT( H <= T, NULL );\n    for ( size_t i = 0; i < H; ++i )\n        __TBB_ASSERT( tp[i] == poisoned_ptr, \"Task pool corrupted\" );\n    for ( size_t i = H; i < T; ++i ) {\n        __TBB_ASSERT( (uintptr_t)tp[i] + 1 > 1u, \"nil or invalid task pointer in the deque\" );\n        __TBB_ASSERT( tp[i]->prefix().state == task::ready ||\n                      tp[i]->prefix().extra_state == es_task_proxy, \"task in the deque has invalid state\" );\n    }\n    for ( size_t i = T; i < my_arena_slot->my_task_pool_size; ++i )\n        __TBB_ASSERT( tp[i] == poisoned_ptr, \"Task pool corrupted\" );\n    release_task_pool();\n}\n#endif /* TBB_USE_ASSERT > 1 */\n\nvoid generic_scheduler::init_stack_info () {\n    // Stacks are growing top-down. Highest address is called \"stack base\",\n    // and the lowest is \"stack limit\".\n    __TBB_ASSERT( !my_stealing_threshold, \"Stealing threshold has already been calculated\" );\n    size_t  stack_size = my_market->worker_stack_size();\n#if USE_WINTHREAD\n#if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64\n    NT_TIB  *pteb = (NT_TIB*)__TBB_machine_get_current_teb();\n#else\n    NT_TIB  *pteb = (NT_TIB*)NtCurrentTeb();\n#endif\n    __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit, \"invalid stack info in TEB\" );\n    __TBB_ASSERT( stack_size >0, \"stack_size not initialized?\" );\n    // When a thread is created with the attribute STACK_SIZE_PARAM_IS_A_RESERVATION, stack limit\n    // in the TIB points to the committed part of the stack only. This renders the expression\n    // \"(uintptr_t)pteb->StackBase / 2 + (uintptr_t)pteb->StackLimit / 2\" virtually useless.\n    // Thus for worker threads we use the explicit stack size we used while creating them.\n    // And for master threads we rely on the following fact and assumption:\n    // - the default stack size of a master thread on Windows is 1M;\n    // - if it was explicitly set by the application it is at least as large as the size of a worker stack.\n    if ( is_worker() || stack_size < MByte )\n        my_stealing_threshold = (uintptr_t)pteb->StackBase - stack_size / 2;\n    else\n        my_stealing_threshold = (uintptr_t)pteb->StackBase - MByte / 2;\n#else /* USE_PTHREAD */\n    // There is no portable way to get stack base address in Posix, so we use\n    // non-portable method (on all modern Linux) or the simplified approach\n    // based on the common sense assumptions. The most important assumption\n    // is that the main thread's stack size is not less than that of other threads.\n    // See also comment 3 at the end of this file\n    void    *stack_base = &stack_size;\n#if __linux__ && !__bg__\n#if __TBB_ipf\n    void    *rsb_base = __TBB_get_bsp();\n#endif\n    size_t  np_stack_size = 0;\n    void    *stack_limit = NULL;\n    pthread_attr_t  np_attr_stack;\n    if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {\n        if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {\n#if __TBB_ipf\n            pthread_attr_t  attr_stack;\n            if ( 0 == pthread_attr_init(&attr_stack) ) {\n                if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {\n                    if ( np_stack_size < stack_size ) {\n                        // We are in a secondary thread. Use reliable data.\n                        // IA-64 architecture stack is split into RSE backup and memory parts\n                        rsb_base = stack_limit;\n                        stack_size = np_stack_size/2;\n                        // Limit of the memory part of the stack\n                        stack_limit = (char*)stack_limit + stack_size;\n                    }\n                    // We are either in the main thread or this thread stack\n                    // is bigger that that of the main one. As we cannot discern\n                    // these cases we fall back to the default (heuristic) values.\n                }\n                pthread_attr_destroy(&attr_stack);\n            }\n            // IA-64 architecture stack is split into RSE backup and memory parts\n            my_rsb_stealing_threshold = (uintptr_t)((char*)rsb_base + stack_size/2);\n#endif /* __TBB_ipf */\n            // Size of the stack free part \n            stack_size = size_t((char*)stack_base - (char*)stack_limit);\n        }\n        pthread_attr_destroy(&np_attr_stack);\n    }\n#endif /* __linux__ */\n    __TBB_ASSERT( stack_size>0, \"stack size must be positive\" );\n    my_stealing_threshold = (uintptr_t)((char*)stack_base - stack_size/2);\n#endif /* USE_PTHREAD */\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n/** The function uses synchronization scheme similar to the one in the destructor\n    of task_group_context augmented with interlocked state change of each context\n    object. The purpose of this algo is to prevent threads doing nonlocal context\n    destruction from accessing destroyed owner-scheduler instance still pointed to\n    by the context object. **/\nvoid generic_scheduler::cleanup_local_context_list () {\n    // Detach contexts remaining in the local list\n    bool wait_for_concurrent_destroyers_to_leave = false;\n    uintptr_t local_count_snapshot = my_context_state_propagation_epoch;\n    my_local_ctx_list_update.store<relaxed>(1);\n    {\n        // This is just a definition. Actual lock is acquired only in case of conflict.\n        spin_mutex::scoped_lock lock;\n        // Full fence prevents reordering of store to my_local_ctx_list_update with\n        // load from my_nonlocal_ctx_list_update.\n        atomic_fence();\n        // Check for the conflict with concurrent destroyer or cancellation propagator\n        if ( my_nonlocal_ctx_list_update.load<relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )\n            lock.acquire(my_context_list_mutex);\n        // No acquire fence is necessary for loading my_context_list_head.my_next,\n        // as the list can be updated by this thread only.\n        context_list_node_t *node = my_context_list_head.my_next;\n        while ( node != &my_context_list_head ) {\n            task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);\n            __TBB_ASSERT( __TBB_load_relaxed(ctx.my_kind) != task_group_context::binding_required, \"Only a context bound to a root task can be detached\" );\n            node = node->my_next;\n            __TBB_ASSERT( is_alive(ctx.my_version_and_traits), \"Walked into a destroyed context while detaching contexts from the local list\" );\n            // Synchronizes with ~task_group_context(). TODO: evaluate and perhaps relax\n            if ( internal::as_atomic(ctx.my_kind).fetch_and_store(task_group_context::detached) == task_group_context::dying )\n                wait_for_concurrent_destroyers_to_leave = true;\n        }\n    }\n    my_local_ctx_list_update.store<release>(0);\n    // Wait until other threads referencing this scheduler object finish with it\n    if ( wait_for_concurrent_destroyers_to_leave )\n        spin_wait_until_eq( my_nonlocal_ctx_list_update, 0u );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\nvoid generic_scheduler::free_scheduler() {\n    __TBB_ASSERT( !my_arena_slot, NULL );\n#if __TBB_TASK_GROUP_CONTEXT\n    cleanup_local_context_list();\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    free_task<small_local_task>( *my_dummy_task );\n\n#if __TBB_HOARD_NONLOCAL_TASKS\n    while( task* t = my_nonlocal_free_list ) {\n        task_prefix& p = t->prefix();\n        my_nonlocal_free_list = p.next;\n        __TBB_ASSERT( p.origin && p.origin!=this, NULL );\n        free_nonlocal_small_task(*t);\n    }\n#endif\n    // k accounts for a guard reference and each task that we deallocate.\n    intptr_t k = 1;\n    for(;;) {\n        while( task* t = my_free_list ) {\n            my_free_list = t->prefix().next;\n            deallocate_task(*t);\n            ++k;\n        }\n        if( my_return_list==plugged_return_list() )\n            break;\n        my_free_list = (task*)__TBB_FetchAndStoreW( &my_return_list, (intptr_t)plugged_return_list() );\n    }\n#if __TBB_COUNT_TASK_NODES\n    my_market->update_task_node_count( my_task_node_count );\n#endif /* __TBB_COUNT_TASK_NODES */\n    // Update my_small_task_count last.  Doing so sooner might cause another thread to free *this.\n    __TBB_ASSERT( my_small_task_count>=k, \"my_small_task_count corrupted\" );\n    governor::sign_off(this);\n    if( __TBB_FetchAndAddW( &my_small_task_count, -k )==k )\n        NFS_Free( this );\n}\n\ntask& generic_scheduler::allocate_task( size_t number_of_bytes,\n                                            __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ) {\n    GATHER_STATISTIC(++my_counters.active_tasks);\n    task *t;\n    if( number_of_bytes<=quick_task_size ) {\n#if __TBB_HOARD_NONLOCAL_TASKS\n        if( (t = my_nonlocal_free_list) ) {\n            GATHER_STATISTIC(--my_counters.free_list_length);\n            __TBB_ASSERT( t->state()==task::freed, \"free list of tasks is corrupted\" );\n            my_nonlocal_free_list = t->prefix().next;\n        } else\n#endif\n        if( (t = my_free_list) ) {\n            GATHER_STATISTIC(--my_counters.free_list_length);\n            __TBB_ASSERT( t->state()==task::freed, \"free list of tasks is corrupted\" );\n            my_free_list = t->prefix().next;\n        } else if( my_return_list ) {\n            // No fence required for read of my_return_list above, because __TBB_FetchAndStoreW has a fence.\n            t = (task*)__TBB_FetchAndStoreW( &my_return_list, 0 ); // with acquire\n            __TBB_ASSERT( t, \"another thread emptied the my_return_list\" );\n            __TBB_ASSERT( t->prefix().origin==this, \"task returned to wrong my_return_list\" );\n            ITT_NOTIFY( sync_acquired, &my_return_list );\n            my_free_list = t->prefix().next;\n        } else {\n            t = (task*)((char*)NFS_Allocate( 1, task_prefix_reservation_size+quick_task_size, NULL ) + task_prefix_reservation_size );\n#if __TBB_COUNT_TASK_NODES\n            ++my_task_node_count;\n#endif /* __TBB_COUNT_TASK_NODES */\n            t->prefix().origin = this;\n            t->prefix().next = 0;\n            ++my_small_task_count;\n        }\n#if __TBB_PREFETCHING\n        task *t_next = t->prefix().next;\n        if( !t_next ) { // the task was last in the list\n#if __TBB_HOARD_NONLOCAL_TASKS\n            if( my_free_list )\n                t_next = my_free_list;\n            else\n#endif\n            if( my_return_list ) // enable prefetching, gives speedup\n                t_next = my_free_list = (task*)__TBB_FetchAndStoreW( &my_return_list, 0 );\n        }\n        if( t_next ) { // gives speedup for both cache lines\n            __TBB_cl_prefetch(t_next);\n            __TBB_cl_prefetch(&t_next->prefix());\n        }\n#endif /* __TBB_PREFETCHING */\n    } else {\n        GATHER_STATISTIC(++my_counters.big_tasks);\n        t = (task*)((char*)NFS_Allocate( 1, task_prefix_reservation_size+number_of_bytes, NULL ) + task_prefix_reservation_size );\n#if __TBB_COUNT_TASK_NODES\n        ++my_task_node_count;\n#endif /* __TBB_COUNT_TASK_NODES */\n        t->prefix().origin = NULL;\n    }\n    task_prefix& p = t->prefix();\n#if __TBB_TASK_GROUP_CONTEXT\n    p.context = context;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    // Obsolete. But still in use, so has to be assigned correct value here.\n    p.owner = this;\n    p.ref_count = 0;\n    // Obsolete. Assign some not outrageously out-of-place value for a while.\n    p.depth = 0;\n    p.parent = parent;\n    // In TBB 2.1 and later, the constructor for task sets extra_state to indicate the version of the tbb/task.h header.\n    // In TBB 2.0 and earlier, the constructor leaves extra_state as zero.\n    p.extra_state = 0;\n    p.affinity = 0;\n    p.state = task::allocated;\n    return *t;\n}\n\nvoid generic_scheduler::free_nonlocal_small_task( task& t ) {\n    __TBB_ASSERT( t.state()==task::freed, NULL );\n    generic_scheduler& s = *static_cast<generic_scheduler*>(t.prefix().origin);\n    __TBB_ASSERT( &s!=this, NULL );\n    for(;;) {\n        task* old = s.my_return_list;\n        if( old==plugged_return_list() )\n            break;\n        // Atomically insert t at head of s.my_return_list\n        t.prefix().next = old;\n        ITT_NOTIFY( sync_releasing, &s.my_return_list );\n        if( as_atomic(s.my_return_list).compare_and_swap(&t, old )==old ) {\n#if __TBB_PREFETCHING\n            __TBB_cl_evict(&t.prefix());\n            __TBB_cl_evict(&t);\n#endif\n            return;\n        }\n    }\n    deallocate_task(t);\n    if( __TBB_FetchAndDecrementWrelease( &s.my_small_task_count )==1 ) {\n        // We freed the last task allocated by scheduler s, so it's our responsibility\n        // to free the scheduler.\n        NFS_Free( &s );\n    }\n}\n\nsize_t generic_scheduler::prepare_task_pool ( size_t num_tasks ) {\n    size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror\n    if ( T + num_tasks <= my_arena_slot->my_task_pool_size )\n        return T;\n    acquire_task_pool();\n    size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror\n    T -= H;\n    size_t new_size = T + num_tasks;\n    __TBB_ASSERT(!my_arena_slot->my_task_pool_size || my_arena_slot->my_task_pool_size >= min_task_pool_size, NULL);\n    if( !my_arena_slot->my_task_pool_size ) {\n        __TBB_ASSERT( !in_arena() && !my_arena_slot->task_pool_ptr, NULL );\n        if( new_size < min_task_pool_size ) new_size = min_task_pool_size;\n        my_arena_slot->allocate_task_pool( new_size );\n    }\n    // If the free space at the beginning of the task pool is too short, we\n    // are likely facing a pathological single-producer-multiple-consumers\n    // scenario, and thus it's better to expand the task pool\n    else if ( new_size <= my_arena_slot->my_task_pool_size - min_task_pool_size/4 ) {\n        // Relocate the busy part to the beginning of the deque\n        memmove( my_arena_slot->task_pool_ptr, my_arena_slot->task_pool_ptr + H, T * sizeof(task*) );\n        my_arena_slot->fill_with_canary_pattern( T, my_arena_slot->tail );\n        commit_relocated_tasks(T);\n    }\n    else {\n        // Grow task pool. As this operation is rare, and its cost is asymptotically\n        // amortizable, we can tolerate new task pool allocation done under the lock.\n        if ( new_size < 2 * my_arena_slot->my_task_pool_size )\n            new_size = 2 * my_arena_slot->my_task_pool_size;\n        task** old_pool = my_arena_slot->task_pool_ptr;\n        my_arena_slot->allocate_task_pool( new_size ); // updates my_task_pool_size\n        __TBB_ASSERT( T <= my_arena_slot->my_task_pool_size, \"new task pool is too short\" );\n        memcpy( my_arena_slot->task_pool_ptr, old_pool + H, T * sizeof(task*) );\n        commit_relocated_tasks(T);\n        __TBB_ASSERT( old_pool, \"attempt to free NULL TaskPool\" );\n        NFS_Free( old_pool );\n    }\n    assert_task_pool_valid();\n    return T;\n}\n\n/** ATTENTION:\n    This method is mostly the same as generic_scheduler::lock_task_pool(), with\n    a little different logic of slot state checks (slot is either locked or points\n    to our task pool).\n    Thus if either of them is changed, consider changing the counterpart as well. **/\ninline void generic_scheduler::acquire_task_pool() const {\n    if ( !in_arena() )\n        return; // we are not in arena - nothing to lock\n    bool sync_prepare_done = false;\n    for( atomic_backoff b;;b.pause() ) {\n#if TBB_USE_ASSERT\n        __TBB_ASSERT( my_arena_slot == my_arena->my_slots + my_arena_index, \"invalid arena slot index\" );\n        // Local copy of the arena slot task pool pointer is necessary for the next\n        // assertion to work correctly to exclude asynchronous state transition effect.\n        task** tp = my_arena_slot->task_pool;\n        __TBB_ASSERT( tp == LockedTaskPool || tp == my_arena_slot->task_pool_ptr, \"slot ownership corrupt?\" );\n#endif\n        if( my_arena_slot->task_pool != LockedTaskPool &&\n            as_atomic(my_arena_slot->task_pool).compare_and_swap(LockedTaskPool, my_arena_slot->task_pool_ptr ) == my_arena_slot->task_pool_ptr )\n        {\n            // We acquired our own slot\n            ITT_NOTIFY(sync_acquired, my_arena_slot);\n            break;\n        }\n        else if( !sync_prepare_done ) {\n            // Start waiting\n            ITT_NOTIFY(sync_prepare, my_arena_slot);\n            sync_prepare_done = true;\n        }\n        // Someone else acquired a lock, so pause and do exponential backoff.\n    }\n    __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, \"not really acquired task pool\" );\n} // generic_scheduler::acquire_task_pool\n\ninline void generic_scheduler::release_task_pool() const {\n    if ( !in_arena() )\n        return; // we are not in arena - nothing to unlock\n    __TBB_ASSERT( my_arena_slot, \"we are not in arena\" );\n    __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, \"arena slot is not locked\" );\n    ITT_NOTIFY(sync_releasing, my_arena_slot);\n    __TBB_store_with_release( my_arena_slot->task_pool, my_arena_slot->task_pool_ptr );\n}\n\n/** ATTENTION:\n    This method is mostly the same as generic_scheduler::acquire_task_pool(),\n    with a little different logic of slot state checks (slot can be empty, locked\n    or point to any task pool other than ours, and asynchronous transitions between\n    all these states are possible).\n    Thus if any of them is changed, consider changing the counterpart as well **/\ninline task** generic_scheduler::lock_task_pool( arena_slot* victim_arena_slot ) const {\n    task** victim_task_pool;\n    bool sync_prepare_done = false;\n    for( atomic_backoff backoff;; /*backoff pause embedded in the loop*/) {\n        victim_task_pool = victim_arena_slot->task_pool;\n        // NOTE: Do not use comparison of head and tail indices to check for\n        // the presence of work in the victim's task pool, as they may give\n        // incorrect indication because of task pool relocations and resizes.\n        if ( victim_task_pool == EmptyTaskPool ) {\n            // The victim thread emptied its task pool - nothing to lock\n            if( sync_prepare_done )\n                ITT_NOTIFY(sync_cancel, victim_arena_slot);\n            break;\n        }\n        if( victim_task_pool != LockedTaskPool &&\n            as_atomic(victim_arena_slot->task_pool).compare_and_swap(LockedTaskPool, victim_task_pool ) == victim_task_pool )\n        {\n            // We've locked victim's task pool\n            ITT_NOTIFY(sync_acquired, victim_arena_slot);\n            break;\n        }\n        else if( !sync_prepare_done ) {\n            // Start waiting\n            ITT_NOTIFY(sync_prepare, victim_arena_slot);\n            sync_prepare_done = true;\n        }\n        GATHER_STATISTIC( ++my_counters.thieves_conflicts );\n        // Someone else acquired a lock, so pause and do exponential backoff.\n#if __TBB_STEALING_ABORT_ON_CONTENTION\n        if(!backoff.bounded_pause()) {\n            // the 16 was acquired empirically and a theory behind it supposes\n            // that number of threads becomes much bigger than number of\n            // tasks which can be spawned by one thread causing excessive contention.\n            // TODO: However even small arenas can benefit from the abort on contention\n            //       if preemption of a thief is a problem\n            if(my_arena->my_limit >= 16)\n                return EmptyTaskPool;\n            __TBB_Yield();\n        }\n#else\n        backoff.pause();\n#endif\n    }\n    __TBB_ASSERT( victim_task_pool == EmptyTaskPool ||\n                  (victim_arena_slot->task_pool == LockedTaskPool && victim_task_pool != LockedTaskPool),\n                  \"not really locked victim's task pool?\" );\n    return victim_task_pool;\n} // generic_scheduler::lock_task_pool\n\ninline void generic_scheduler::unlock_task_pool( arena_slot* victim_arena_slot,\n                                                task** victim_task_pool ) const {\n    __TBB_ASSERT( victim_arena_slot, \"empty victim arena slot pointer\" );\n    __TBB_ASSERT( victim_arena_slot->task_pool == LockedTaskPool, \"victim arena slot is not locked\" );\n    ITT_NOTIFY(sync_releasing, victim_arena_slot);\n    __TBB_store_with_release( victim_arena_slot->task_pool, victim_task_pool );\n}\n\n\ninline task* generic_scheduler::prepare_for_spawning( task* t ) {\n    __TBB_ASSERT( t->state()==task::allocated, \"attempt to spawn task that is not in 'allocated' state\" );\n    t->prefix().state = task::ready;\n#if TBB_USE_ASSERT\n    if( task* parent = t->parent() ) {\n        internal::reference_count ref_count = parent->prefix().ref_count;\n        __TBB_ASSERT( ref_count>=0, \"attempt to spawn task whose parent has a ref_count<0\" );\n        __TBB_ASSERT( ref_count!=0, \"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)\" );\n        parent->prefix().extra_state |= es_ref_count_active;\n    }\n#endif /* TBB_USE_ASSERT */\n    affinity_id dst_thread = t->prefix().affinity;\n    __TBB_ASSERT( dst_thread == 0 || is_version_3_task(*t),\n                  \"backwards compatibility to TBB 2.0 tasks is broken\" );\n    if( dst_thread != 0 && dst_thread != my_affinity_id ) {\n        task_proxy& proxy = (task_proxy&)allocate_task( sizeof(task_proxy),\n                                                      __TBB_CONTEXT_ARG(NULL, NULL) );\n        // Mark as a proxy\n        proxy.prefix().extra_state = es_task_proxy;\n        proxy.outbox = &my_arena->mailbox(dst_thread);\n        // Mark proxy as present in both locations (sender's task pool and destination mailbox)\n        proxy.task_and_tag = intptr_t(t) | task_proxy::location_mask;\n#if __TBB_TASK_PRIORITY\n        proxy.prefix().context = t->prefix().context;\n#endif /* __TBB_TASK_PRIORITY */\n        ITT_NOTIFY( sync_releasing, proxy.outbox );\n        // Mail the proxy - after this point t may be destroyed by another thread at any moment.\n        proxy.outbox->push(proxy);\n        return &proxy;\n    }\n    return t;\n}\n\n/** Conceptually, this method should be a member of class scheduler.\n    But doing so would force us to publish class scheduler in the headers. */\nvoid generic_scheduler::local_spawn( task& first, task*& next ) {\n    __TBB_ASSERT( governor::is_set(this), NULL );\n    if ( &first.prefix().next == &next ) {\n        // Single task is being spawned\n        size_t T = prepare_task_pool( 1 );\n        my_arena_slot->task_pool_ptr[T] = prepare_for_spawning( &first );\n        commit_spawned_tasks( T + 1 );\n    }\n    else {\n        // Task list is being spawned\n        task *arr[min_task_pool_size];\n        fast_reverse_vector<task*> tasks(arr, min_task_pool_size);\n        task *t_next = NULL;\n        for( task* t = &first; ; t = t_next ) {\n            // If t is affinitized to another thread, it may already be executed\n            // and destroyed by the time prepare_for_spawning returns.\n            // So milk it while it is alive.\n            bool end = &t->prefix().next == &next;\n            t_next = t->prefix().next;\n            tasks.push_back( prepare_for_spawning(t) );\n            if( end )\n                break;\n        }\n        size_t num_tasks = tasks.size();\n        size_t T = prepare_task_pool( num_tasks );\n        tasks.copy_memory( my_arena_slot->task_pool_ptr + T );\n        commit_spawned_tasks( T + num_tasks );\n    }\n    if ( !in_arena() )\n        enter_arena();\n    my_arena->advertise_new_work</*Spawned=*/true>();\n    assert_task_pool_valid();\n}\n\nvoid generic_scheduler::local_spawn_root_and_wait( task& first, task*& next ) {\n    __TBB_ASSERT( governor::is_set(this), NULL );\n    __TBB_ASSERT( &first, NULL );\n    auto_empty_task dummy( __TBB_CONTEXT_ARG(this, first.prefix().context) );\n    internal::reference_count n = 0;\n    for( task* t=&first; ; t=t->prefix().next ) {\n        ++n;\n        __TBB_ASSERT( !t->prefix().parent, \"not a root task, or already running\" );\n        t->prefix().parent = &dummy;\n        if( &t->prefix().next==&next ) break;\n#if __TBB_TASK_GROUP_CONTEXT\n        __TBB_ASSERT( t->prefix().context == t->prefix().next->prefix().context,\n                    \"all the root tasks in list must share the same context\");\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    }\n    dummy.prefix().ref_count = n+1;\n    if( n>1 )\n        local_spawn( *first.prefix().next, next );\n    local_wait_for_all( dummy, &first );\n}\n\nvoid tbb::internal::generic_scheduler::spawn( task& first, task*& next ) {\n    governor::local_scheduler()->local_spawn( first, next );\n}\n\nvoid tbb::internal::generic_scheduler::spawn_root_and_wait( task& first, task*& next ) {\n    governor::local_scheduler()->local_spawn_root_and_wait( first, next );\n}\n\nvoid tbb::internal::generic_scheduler::enqueue( task& t, void* prio ) {\n    generic_scheduler *s = governor::local_scheduler();\n    // these redirections are due to bw-compatibility, consider reworking some day\n    __TBB_ASSERT( s->my_arena, \"thread is not in any arena\" );\n    s->my_arena->enqueue_task(t, (intptr_t)prio, s->my_random );\n}\n\n#if __TBB_TASK_PRIORITY\nclass auto_indicator : no_copy {\n    volatile bool& my_indicator;\npublic:\n    auto_indicator ( volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}\n    ~auto_indicator () { my_indicator = false; }\n};\n\ntask* generic_scheduler::winnow_task_pool () {\n    GATHER_STATISTIC( ++my_counters.prio_winnowings );\n    __TBB_ASSERT( in_arena(), NULL );\n    __TBB_ASSERT( my_offloaded_tasks, \"At least one task is expected to be already offloaded\" );\n    // To eliminate possible sinking of the store to the indicator below the subsequent\n    // store to my_arena_slot->tail, the stores should have either been separated\n    // by full fence or both use release fences. And resetting indicator should have\n    // been done with release fence. But since this is just an optimization, and\n    // the corresponding checking sequence in arena::is_out_of_work() is not atomic\n    // anyway, fences aren't used, so that not to penalize warmer path.\n    auto_indicator indicator(my_pool_reshuffling_pending);\n    // The purpose of the synchronization algorithm here is for the owner thread\n    // to avoid locking task pool most of the time.\n    size_t T0 = __TBB_load_relaxed(my_arena_slot->tail);\n    __TBB_store_relaxed( my_arena_slot->tail, __TBB_load_relaxed(my_arena_slot->head) - 1 );\n    atomic_fence();\n    size_t H = __TBB_load_relaxed(my_arena_slot->head);\n    size_t T = __TBB_load_relaxed(my_arena_slot->tail);\n    __TBB_ASSERT( (intptr_t)T <= (intptr_t)T0, NULL);\n    __TBB_ASSERT( (intptr_t)H >= (intptr_t)T || (H == T0 && T == T0), NULL );\n    bool acquired = false;\n    if ( H == T ) {\n        // Either no contention with thieves during arbitration protocol execution or ...\n        if ( H >= T0 ) {\n            // ... the task pool got empty\n            reset_deque_and_leave_arena( /*locked=*/false );\n            return NULL;\n        }\n    }\n    else {\n        // Contention with thieves detected. Now without taking lock it is impossible\n        // to define the current head value because of its jitter caused by continuing\n        // stealing attempts (the pool is not locked so far).\n        acquired = true;\n        acquire_task_pool();\n        H = __TBB_load_relaxed(my_arena_slot->head);\n        if ( H >= T0 ) {\n            reset_deque_and_leave_arena( /*locked=*/true );\n            return NULL;\n        }\n    }\n    size_t src,\n           dst = T0;\n    // Find the first task to offload.\n    for ( src = H; src < T0; ++src ) {\n        task &t = *my_arena_slot->task_pool_ptr[src];\n        intptr_t p = priority(t);\n        if ( p < *my_ref_top_priority ) {\n            // Position of the first offloaded task will be the starting point\n            // for relocation of subsequent tasks that survive winnowing.\n            dst = src;\n            offload_task( t, p );\n            break;\n        }\n    }\n    for ( ++src; src < T0; ++src ) {\n        task &t = *my_arena_slot->task_pool_ptr[src];\n        intptr_t p = priority(t);\n        if ( p < *my_ref_top_priority )\n            offload_task( t, p );\n        else\n            my_arena_slot->task_pool_ptr[dst++] = &t;\n    }\n    __TBB_ASSERT( T0 >= dst, NULL );\n    task *t = H < dst ? my_arena_slot->task_pool_ptr[--dst] : NULL;\n    if ( H == dst ) {\n        // No tasks remain the primary pool\n        reset_deque_and_leave_arena( acquired );\n    }\n    else if ( acquired ) {\n        __TBB_ASSERT( !is_poisoned(my_arena_slot->task_pool_ptr[H]), NULL );\n        __TBB_store_relaxed( my_arena_slot->tail, dst );\n        release_task_pool();\n    }\n    else {\n        __TBB_ASSERT( !is_poisoned(my_arena_slot->task_pool_ptr[H]), NULL );\n        // Release fence is necessary to make sure possibly relocated task pointers\n        // become visible to potential thieves\n        __TBB_store_with_release( my_arena_slot->tail, dst );\n    }\n    my_arena_slot->fill_with_canary_pattern( dst, T0 );\n    assert_task_pool_valid();\n    return t;\n}\n\ntask* generic_scheduler::reload_tasks ( task*& offloaded_tasks, task**& offloaded_task_list_link, intptr_t top_priority ) {\n    GATHER_STATISTIC( ++my_counters.prio_reloads );\n    __TBB_ASSERT( !in_arena(), NULL );\n    task *arr[min_task_pool_size];\n    fast_reverse_vector<task*> tasks(arr, min_task_pool_size);\n    task **link = &offloaded_tasks;\n    task *t;\n    while ( (t = *link) ) {\n        task** next_ptr = &t->prefix().next_offloaded;\n        if ( priority(*t) >= top_priority ) {\n            tasks.push_back( t );\n            // Note that owner is an alias of next_offloaded. Thus the following\n            // assignment overwrites *next_ptr\n            task* next = *next_ptr;\n            t->prefix().owner = this;\n            __TBB_ASSERT( t->prefix().state == task::ready || t->prefix().extra_state == es_task_proxy, NULL );\n            *link = next;\n        }\n        else {\n            link = next_ptr;\n        }\n    }\n    if ( link == &offloaded_tasks ) {\n        offloaded_tasks = NULL;\n#if TBB_USE_ASSERT\n        offloaded_task_list_link = NULL;\n#endif /* TBB_USE_ASSERT */\n    }\n    else {\n        __TBB_ASSERT( link, NULL );\n        // Mark end of list\n        *link = NULL;\n        offloaded_task_list_link = link;\n    }\n    __TBB_ASSERT( link, NULL );\n    size_t num_tasks = tasks.size();\n    if ( num_tasks ) {\n        GATHER_STATISTIC( ++my_counters.prio_tasks_reloaded );\n        size_t T = prepare_task_pool( num_tasks );\n        tasks.copy_memory( my_arena_slot->task_pool_ptr + T );\n        if ( --num_tasks ) {\n            commit_spawned_tasks( T += num_tasks );\n            enter_arena();\n            my_arena->advertise_new_work</*Spawned=*/true>();\n        }\n        __TBB_ASSERT( T == __TBB_load_relaxed(my_arena_slot->tail), NULL );\n        __TBB_ASSERT( T < my_arena_slot->my_task_pool_size, NULL );\n        t = my_arena_slot->task_pool_ptr[T];\n        poison_pointer(my_arena_slot->task_pool_ptr[T]);\n        assert_task_pool_valid();\n    }\n    return t;\n}\n\ntask* generic_scheduler::reload_tasks () {\n    uintptr_t reload_epoch = *my_ref_reload_epoch;\n    __TBB_ASSERT( my_offloaded_tasks, NULL );\n    __TBB_ASSERT( my_local_reload_epoch <= reload_epoch\n                  || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,\n                  \"Reload epoch counter overflow?\" );\n    if ( my_local_reload_epoch == reload_epoch )\n        return NULL;\n    __TBB_ASSERT( my_offloaded_tasks, NULL );\n    intptr_t top_priority = effective_reference_priority();\n    __TBB_ASSERT( (uintptr_t)top_priority < (uintptr_t)num_priority_levels, NULL );\n    task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link, top_priority );\n    if ( my_offloaded_tasks && (my_arena->my_bottom_priority >= top_priority || !my_arena->my_num_workers_requested) ) {\n        // Safeguard against deliberately relaxed synchronization while checking\n        // for the presence of work in arena (so that not to impact hot paths).\n        // Arena may be reset to empty state when offloaded low priority tasks\n        // are still present. This results in both bottom and top priority bounds\n        // becoming 'normal', which makes offloaded low priority tasks unreachable.\n        // Update arena's bottom priority to accommodate them.\n\n        // First indicate the presence of lower-priority tasks\n        my_market->update_arena_priority( *my_arena, priority(*my_offloaded_tasks) );\n        // Then mark arena as full to unlock arena priority level adjustment\n        // by arena::is_out_of_work(), and ensure worker's presence\n        my_arena->advertise_new_work</*Spawned=*/false>();\n    }\n    my_local_reload_epoch = reload_epoch;\n    return t;\n}\n#endif /* __TBB_TASK_PRIORITY */\n\ninline task* generic_scheduler::get_task() {\n    __TBB_ASSERT( in_arena(), NULL );\n    task* result = NULL;\n    size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror\nretry:\n    __TBB_store_relaxed(my_arena_slot->tail, --T);\n    atomic_fence();\n    if ( (intptr_t)__TBB_load_relaxed(my_arena_slot->head) > (intptr_t)T ) {\n        acquire_task_pool();\n        size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror\n        if ( (intptr_t)H <= (intptr_t)T ) {\n            // The thief backed off - grab the task\n            result = my_arena_slot->task_pool_ptr[T];\n            __TBB_ASSERT( !is_poisoned(result), NULL );\n            poison_pointer( my_arena_slot->task_pool_ptr[T] );\n        }\n        else {\n            __TBB_ASSERT ( H == __TBB_load_relaxed(my_arena_slot->head)\n                        && T == __TBB_load_relaxed(my_arena_slot->tail)\n                        && H == T + 1, \"victim/thief arbitration algorithm failure\" );\n        }\n        if ( (intptr_t)H < (intptr_t)T )\n            release_task_pool();\n        else\n            reset_deque_and_leave_arena( /*locked=*/true );\n    }\n    else {\n        __TBB_control_consistency_helper(); // on my_arena_slot->head\n        result = my_arena_slot->task_pool_ptr[T];\n        __TBB_ASSERT( !is_poisoned(result), NULL );\n        poison_pointer( my_arena_slot->task_pool_ptr[T] );\n    }\n    if( result && is_proxy(*result) ) {\n        task_proxy &tp = *(task_proxy*)result;\n        result = tp.extract_task<task_proxy::pool_bit>();\n        if( !result ) {\n            // Proxy was empty, so it's our responsibility to free it\n            free_task<small_task>(tp);\n            if ( in_arena() )\n                goto retry;\n            __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL );\n            return NULL;\n        }\n        GATHER_STATISTIC( ++my_counters.proxies_executed );\n        // Following assertion should be true because TBB 2.0 tasks never specify affinity, and hence are not proxied.\n        __TBB_ASSERT( is_version_3_task(*result), \"backwards compatibility with TBB 2.0 broken\" );\n        // Task affinity has changed.\n        my_innermost_running_task = result;\n        result->note_affinity(my_affinity_id);\n    }\n    __TBB_ASSERT( result || is_quiescent_local_task_pool_reset(), NULL );\n    return result;\n} // generic_scheduler::get_task\n\ntask* generic_scheduler::steal_task( arena_slot& victim_slot ) {\n    task** victim_pool = lock_task_pool( &victim_slot );\n    if ( !victim_pool )\n        return NULL;\n    task* result = NULL;\n    size_t H = __TBB_load_relaxed(victim_slot.head); // mirror\n    const size_t H0 = H;\n    int skip_and_bump = 0; // +1 for skipped task and +1 for bumped head&tail\nretry:\n    __TBB_store_relaxed( victim_slot.head, ++H );\n    atomic_fence();\n    if ( (intptr_t)H > (intptr_t)__TBB_load_relaxed(victim_slot.tail) ) {\n        // Stealing attempt failed, deque contents has not been changed by us\n        GATHER_STATISTIC( ++my_counters.thief_backoffs );\n        __TBB_store_relaxed( victim_slot.head, /*dead: H = */ H0 );\n        skip_and_bump++; // trigger that we bumped head and tail\n        __TBB_ASSERT ( !result, NULL );\n    }\n    else {\n        __TBB_control_consistency_helper(); // on victim_slot.tail\n        result = victim_pool[H-1];\n        __TBB_ASSERT( !is_poisoned(result), NULL );\n        if( is_proxy(*result) ) {\n            task_proxy& tp = *static_cast<task_proxy*>(result);\n            // If mailed task is likely to be grabbed by its destination thread, skip it.\n            if ( task_proxy::is_shared(tp.task_and_tag) && tp.outbox->recipient_is_idle() )\n            {\n                GATHER_STATISTIC( ++my_counters.proxies_bypassed );\n                result = NULL;\n                __TBB_ASSERT( skip_and_bump < 2, NULL );\n                skip_and_bump = 1; // note we skipped a task\n                goto retry;\n            }\n        }\n        __TBB_ASSERT( result, NULL );\n        // emit \"task was consumed\" signal\n        ITT_NOTIFY(sync_acquired, (void*)((uintptr_t)&victim_slot+sizeof(uintptr_t)));\n        const size_t H1 = H0 + 1;\n        if ( H1 < H ) {\n            // Some proxies in the task pool have been bypassed. Need to close\n            // the hole left by the stolen task. The following variant:\n            //     victim_pool[H-1] = victim_pool[H0];\n            // is of constant time, but creates a potential for degrading stealing\n            // mechanism efficiency and growing owner's stack size too much because\n            // of moving earlier split off (and thus larger) chunks closer to owner's\n            // end of the deque (tail).\n            // So we use linear time variant that is likely to be amortized to be\n            // near-constant time, though, and preserves stealing efficiency premises.\n            // These changes in the deque must be released to the owner.\n            memmove( victim_pool + H1, victim_pool + H0, (H - H1) * sizeof(task*) );\n            __TBB_store_with_release( victim_slot.head, /*dead: H = */ H1 );\n            if ( (intptr_t)H >= (intptr_t)__TBB_load_relaxed(victim_slot.tail) )\n                skip_and_bump++; // trigger that we bumped head and tail\n        }\n        poison_pointer( victim_pool[H0] );\n    }\n\n    unlock_task_pool( &victim_slot, victim_pool );\n    __TBB_ASSERT( skip_and_bump <= 2, NULL );\n#if __TBB_PREFETCHING\n    __TBB_cl_evict(&victim_slot.head);\n    __TBB_cl_evict(&victim_slot.tail);\n#endif\n    if( --skip_and_bump > 0 ) { // if both: task skipped and head&tail bumped\n        // Synchronize with snapshot as we bumped head and tail which can falsely trigger EMPTY state\n        atomic_fence();\n        my_arena->advertise_new_work</*Spawned=*/true>();\n    }\n    return result;\n}\n\ntask* generic_scheduler::get_mailbox_task() {\n    __TBB_ASSERT( my_affinity_id>0, \"not in arena\" );\n    while ( task_proxy* const tp = my_inbox.pop() ) {\n        if ( task* result = tp->extract_task<task_proxy::mailbox_bit>() ) {\n            ITT_NOTIFY( sync_acquired, my_inbox.outbox() );\n            result->prefix().extra_state |= es_task_is_stolen;\n            return result;\n        }\n        // We have exclusive access to the proxy, and can destroy it.\n        free_task<no_cache_small_task>(*tp);\n    }\n    return NULL;\n}\n\n// TODO: Rename to publish_task_pool\nvoid generic_scheduler::enter_arena() {\n    __TBB_ASSERT ( my_arena, \"no arena: initialization not completed?\" );\n    __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots, \"arena slot index is out-of-bound\" );\n    __TBB_ASSERT ( my_arena_slot == &my_arena->my_slots[my_arena_index], NULL);\n    __TBB_ASSERT ( my_arena_slot->task_pool == EmptyTaskPool, \"someone else grabbed my arena slot?\" );\n    __TBB_ASSERT ( __TBB_load_relaxed(my_arena_slot->head) < __TBB_load_relaxed(my_arena_slot->tail),\n                   \"entering arena without tasks to share\" );\n    // Release signal on behalf of previously spawned tasks (when this thread was not in arena yet)\n    ITT_NOTIFY(sync_releasing, my_arena_slot);\n    __TBB_store_with_release( my_arena_slot->task_pool, my_arena_slot->task_pool_ptr );\n}\n\nvoid generic_scheduler::leave_arena() {\n    __TBB_ASSERT( in_arena(), \"Not in arena\" );\n    // Do not reset my_arena_index. It will be used to (attempt to) re-acquire the slot next time\n    __TBB_ASSERT( &my_arena->my_slots[my_arena_index] == my_arena_slot, \"arena slot and slot index mismatch\" );\n    __TBB_ASSERT ( my_arena_slot->task_pool == LockedTaskPool, \"Task pool must be locked when leaving arena\" );\n    __TBB_ASSERT ( is_quiescent_local_task_pool_empty(), \"Cannot leave arena when the task pool is not empty\" );\n    ITT_NOTIFY(sync_releasing, &my_arena->my_slots[my_arena_index]);\n    // No release fence is necessary here as this assignment precludes external\n    // accesses to the local task pool when becomes visible. Thus it is harmless\n    // if it gets hoisted above preceding local bookkeeping manipulations.\n    __TBB_store_relaxed( my_arena_slot->task_pool, EmptyTaskPool );\n}\n\ngeneric_scheduler* generic_scheduler::create_worker( market& m, size_t index ) {\n    generic_scheduler* s = allocate_scheduler( NULL, index ); // index is not a real slot in arena\n#if __TBB_TASK_GROUP_CONTEXT\n    s->my_dummy_task->prefix().context = &the_dummy_context;\n    // Sync up the local cancellation state with the global one. No need for fence here.\n    s->my_context_state_propagation_epoch = the_context_state_propagation_epoch;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    s->my_market = &m;\n    s->init_stack_info();\n#if __TBB_TASK_PRIORITY\n    s->my_ref_top_priority = &s->my_market->my_global_top_priority;\n    s->my_ref_reload_epoch = &s->my_market->my_global_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n    return s;\n}\n\n// TODO: make it a member method\ngeneric_scheduler* generic_scheduler::create_master( arena& a ) {\n    generic_scheduler* s = allocate_scheduler( &a, 0 /*Master thread always occupies the first slot*/ );\n    task& t = *s->my_dummy_task;\n    s->my_innermost_running_task = &t;\n    s->my_dispatching_task = &t;\n    t.prefix().ref_count = 1;\n    governor::sign_on(s);\n    __TBB_ASSERT( &task::self()==&t, \"governor::sign_on failed?\" );\n#if __TBB_TASK_GROUP_CONTEXT\n    // Context to be used by root tasks by default (if the user has not specified one).\n    // Allocation is done by NFS allocator because we cannot reuse memory allocated\n    // for task objects since the free list is empty at the moment.\n    t.prefix().context = a.my_default_ctx;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    s->my_market = a.my_market;\n    __TBB_ASSERT( s->my_arena_index == 0, \"Master thread must occupy the first slot in its arena\" );\n    s->attach_mailbox(1);\n    s->my_arena_slot = a.my_slots + 0;\n    s->my_arena_slot->my_scheduler = s;\n#if _WIN32||_WIN64\n    __TBB_ASSERT( s->my_market, NULL );\n    s->my_market->register_master( s->master_exec_resource );\n#endif /* _WIN32||_WIN64 */\n    s->init_stack_info();\n#if __TBB_TASK_GROUP_CONTEXT\n    // Sync up the local cancellation state with the global one. No need for fence here.\n    s->my_context_state_propagation_epoch = the_context_state_propagation_epoch;\n#endif\n#if __TBB_TASK_PRIORITY\n    // In the current implementation master threads continue processing even when\n    // there are other masters with higher priority. Only TBB worker threads are\n    // redistributed between arenas based on the latters' priority. Thus master\n    // threads use arena's top priority as a reference point (in contrast to workers\n    // that use my_market->my_global_top_priority).\n    s->my_ref_top_priority = &s->my_arena->my_top_priority;\n    s->my_ref_reload_epoch = &s->my_arena->my_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n#if __TBB_SCHEDULER_OBSERVER\n    // Process any existing observers.\n    __TBB_ASSERT( a.my_observers.empty(), \"Just created arena cannot have any observers associated with it\" );\n    the_global_observer_list.notify_entry_observers( s->my_last_global_observer, /*worker=*/false );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n    return s;\n}\n\nvoid generic_scheduler::cleanup_worker( void* arg, bool worker ) {\n    generic_scheduler& s = *(generic_scheduler*)arg;\n    __TBB_ASSERT( !s.my_arena_slot, \"cleaning up attached worker\" );\n#if __TBB_SCHEDULER_OBSERVER\n    if ( worker ) // can be called by master for worker, do not notify master twice\n        the_global_observer_list.notify_exit_observers( s.my_last_global_observer, /*worker=*/true );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n    s.free_scheduler();\n}\n\nvoid generic_scheduler::cleanup_master() {\n    generic_scheduler& s = *this; // for similarity with cleanup_worker\n    __TBB_ASSERT( s.my_arena_slot, NULL);\n#if __TBB_SCHEDULER_OBSERVER\n    s.my_arena->my_observers.notify_exit_observers( s.my_last_local_observer, /*worker=*/false );\n    the_global_observer_list.notify_exit_observers( s.my_last_global_observer, /*worker=*/false );\n#endif /* __TBB_SCHEDULER_OBSERVER */\n    if( in_arena() ) {\n        acquire_task_pool();\n        if ( my_arena_slot->task_pool == EmptyTaskPool ||\n             __TBB_load_relaxed(my_arena_slot->head) >= __TBB_load_relaxed(my_arena_slot->tail) )\n        {\n            // Local task pool is empty\n            leave_arena();\n        }\n        else {\n            // Master's local task pool may e.g. contain proxies of affinitized tasks.\n            release_task_pool();\n            __TBB_ASSERT ( governor::is_set(this), \"TLS slot is cleared before the task pool cleanup\" );\n            s.local_wait_for_all( *s.my_dummy_task, NULL );\n            __TBB_ASSERT( !in_arena(), NULL );\n            __TBB_ASSERT ( governor::is_set(this), \"Other thread reused our TLS key during the task pool cleanup\" );\n        }\n    }\n    __TBB_ASSERT( s.my_market, NULL );\n    market *my_market = s.my_market;\n#if _WIN32||_WIN64\n    s.my_market->unregister_master( s.master_exec_resource );\n#endif /* _WIN32||_WIN64 */\n    arena* a = s.my_arena;\n    __TBB_ASSERT(a->my_slots+0 == my_arena_slot, NULL);\n#if __TBB_STATISTICS\n    *my_arena_slot->my_counters += s.my_counters;\n#endif /* __TBB_STATISTICS */\n#if __TBB_TASK_PRIORITY\n    __TBB_ASSERT( my_arena_slot->my_scheduler, NULL );\n    // Master's scheduler may be locked by a worker taking arena snapshot or by\n    // a thread propagating task group state change across the context tree.\n    while ( as_atomic(my_arena_slot->my_scheduler).compare_and_swap(NULL, this) != this )\n        __TBB_Yield();\n    __TBB_ASSERT( !my_arena_slot->my_scheduler, NULL );\n#else /* !__TBB_TASK_PRIORITY */\n    __TBB_store_with_release(my_arena_slot->my_scheduler, (generic_scheduler*)NULL);\n#endif /* __TBB_TASK_PRIORITY */\n    my_arena_slot = NULL; // detached from slot\n    s.free_scheduler();\n    // Resetting arena to EMPTY state (as earlier TBB versions did) should not be\n    // done here (or anywhere else in the master thread to that matter) because\n    // after introducing arena-per-master logic and fire-and-forget tasks doing\n    // so can result either in arena's premature destruction (at least without\n    // additional costly checks in workers) or in unnecessary arena state changes\n    // (and ensuing workers migration).\n#if __TBB_STATISTICS_EARLY_DUMP\n    GATHER_STATISTIC( a->dump_arena_statistics() );\n#endif\n    if (governor::needsWaitWorkers())\n        my_market->prepare_wait_workers();\n    a->on_thread_leaving</*is_master*/true>();\n    if (governor::needsWaitWorkers())\n        my_market->wait_workers();\n}\n\n} // namespace internal\n} // namespace tbb\n\n/*\n    Comments:\n\n1.  The premise of the cancellation support implementation is that cancellations are\n    not part of the hot path of the program execution. Therefore all changes in its\n    implementation in order to reduce the overhead of the cancellation control flow\n    should be done only in ways that do not increase overhead of the normal execution.\n\n    In general contexts are used by all threads and their descendants are created in\n    different threads as well. In order to minimize impact of the cross-thread tree\n    maintenance (first of all because of the synchronization), the tree of contexts\n    is split into pieces, each of which is handled by the only thread. Such pieces\n    are represented as lists of contexts, members of which are contexts that were\n    bound to their parents in the given thread.\n\n    The context tree maintenance and cancellation propagation algorithms is designed\n    in such a manner that cross-thread access to a context list will take place only\n    when cancellation signal is sent (by user or when an exception happens), and\n    synchronization is necessary only then. Thus the normal execution flow (without\n    exceptions and cancellation) remains free from any synchronization done on\n    behalf of exception handling and cancellation support.\n\n2.  Consider parallel cancellations at the different levels of the context tree:\n\n        Ctx1 <- Cancelled by Thread1            |- Thread2 started processing\n         |                                      |\n        Ctx2                                    |- Thread1 started processing\n         |                                   T1 |- Thread2 finishes and syncs up local counters\n        Ctx3 <- Cancelled by Thread2            |\n         |                                      |- Ctx5 is bound to Ctx2\n        Ctx4                                    |\n                                             T2 |- Thread1 reaches Ctx2\n\n    Thread-propagator of each cancellation increments global counter. However the thread\n    propagating the cancellation from the outermost context (Thread1) may be the last\n    to finish. Which means that the local counters may be synchronized earlier (by Thread2,\n    at Time1) than it propagated cancellation into Ctx2 (at time Time2). If a new context\n    (Ctx5) is created and bound to Ctx2 between Time1 and Time2, checking its parent only\n    (Ctx2) may result in cancellation request being lost.\n\n    This issue is solved by doing the whole propagation under the lock.\n\n    If we need more concurrency while processing parallel cancellations, we could try\n    the following modification of the propagation algorithm:\n\n    advance global counter and remember it\n    for each thread:\n        scan thread's list of contexts\n    for each thread:\n        sync up its local counter only if the global counter has not been changed\n\n    However this version of the algorithm requires more analysis and verification.\n\n3.  There is no portable way to get stack base address in Posix, however the modern\n    Linux versions provide pthread_attr_np API that can be used  to obtain thread's\n    stack size and base address. Unfortunately even this function does not provide\n    enough information for the main thread on IA-64 architecture (RSE spill area\n    and memory stack are allocated as two separate discontinuous chunks of memory),\n    and there is no portable way to discern the main and the secondary threads.\n    Thus for OS X* and IA-64 Linux architecture we use the TBB worker stack size for \n    all threads and use the current stack top as the stack base. This simplified \n    approach is based on the following assumptions:\n    1) If the default stack size is insufficient for the user app needs, the\n    required amount will be explicitly specified by the user at the point of the\n    TBB scheduler initialization (as an argument to tbb::task_scheduler_init\n    constructor).\n    2) When a master thread initializes the scheduler, it has enough space on its\n    stack. Here \"enough\" means \"at least as much as worker threads have\".\n    3) If the user app strives to conserve the memory by cutting stack size, it\n    should do this for TBB workers too (as in the #1).\n*/\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/scheduler.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_scheduler_H\n#define _TBB_scheduler_H\n\n#include \"scheduler_common.h\"\n#include \"tbb/spin_mutex.h\"\n#include \"mailbox.h\"\n#include \"tbb_misc.h\" // for FastRandom\n#include \"itt_notify.h\"\n#include \"../rml/include/rml_tbb.h\"\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n#include \"cilk-tbb-interop.h\"\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\nnamespace tbb {\nnamespace internal {\n\ntemplate<typename SchedulerTraits> class custom_scheduler;\nstruct nested_arena_context;\n\n//------------------------------------------------------------------------\n// generic_scheduler\n//------------------------------------------------------------------------\n\n#if __TBB_TASK_GROUP_CONTEXT\nstruct scheduler_list_node_t {\n    scheduler_list_node_t *my_prev,\n                          *my_next;\n};\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#define EmptyTaskPool ((task**)0)\n#define LockedTaskPool ((task**)~(intptr_t)0)\n\n#define LockedMaster ((generic_scheduler*)~(intptr_t)0)\n\nstruct scheduler_state {\n    //! Index of the arena slot the scheduler occupies now, or occupied last time.\n    size_t my_arena_index; // TODO: make it unsigned and pair with my_affinity_id to fit into cache line\n\n    //! Pointer to the slot in the arena we own at the moment.\n    arena_slot* my_arena_slot;\n\n    //! The arena that I own (if master) or am servicing at the moment (if worker)\n    arena* my_arena;\n\n    //! Innermost task whose task::execute() is running.\n    task* my_innermost_running_task;\n\n    //! Task, in the context of which the current TBB dispatch loop is running.\n    /** Outside of or in the outermost dispatch loop (not in a nested call to\n        wait_for_all) it is my_dummy_task for master threads, and NULL for workers. **/\n    task* my_dispatching_task;\n\n    mail_inbox my_inbox;\n\n    //! The mailbox id assigned to this scheduler.\n    /** The id is assigned upon first entry into the arena.\n        TODO: how are id's being garbage collected?\n        TODO: master thread may enter arena and leave and then reenter.\n                We want to give it the same affinity_id upon reentry, if practical.\n      */\n    affinity_id my_affinity_id;\n\n#if __TBB_SCHEDULER_OBSERVER\n    //! Last observer in the global observers list processed by this scheduler\n    observer_proxy* my_last_global_observer;\n\n    //! Last observer in the local observers list processed by this scheduler\n    observer_proxy* my_last_local_observer;\n#endif /* __TBB_SCHEDULER_OBSERVER */\n#if __TBB_TASK_PRIORITY\n    //! Latest known highest priority of tasks in the market or arena.\n    /** Master threads currently tracks only tasks in their arenas, while workers\n        take into account global top priority (among all arenas in the market). **/\n    volatile intptr_t *my_ref_top_priority;\n\n    //! Pointer to market's (for workers) or current arena's (for the master) reload epoch counter.\n    volatile uintptr_t *my_ref_reload_epoch;\n#endif /* __TBB_TASK_PRIORITY */\n};\n\n//! Work stealing task scheduler.\n/** None of the fields here are ever read or written by threads other than\n    the thread that creates the instance.\n\n    Class generic_scheduler is an abstract base class that contains most of the scheduler,\n    except for tweaks specific to processors and tools (e.g. VTune).\n    The derived template class custom_scheduler<SchedulerTraits> fills in the tweaks. */\nclass generic_scheduler: public scheduler, public ::rml::job, public scheduler_state {\npublic: // almost every class in TBB uses generic_scheduler\n\n    //! If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.\n    static const size_t quick_task_size = 256-task_prefix_reservation_size;\n\n    static bool is_version_3_task( task& t ) {\n        return (t.prefix().extra_state & 0x0F)>=0x1;\n    }\n\n    //! Position in the call stack specifying its maximal filling when stealing is still allowed\n    uintptr_t my_stealing_threshold;\n#if __TBB_ipf\n    //! Position in the RSE backup area specifying its maximal filling when stealing is still allowed\n    uintptr_t my_rsb_stealing_threshold;\n#endif\n\n    static const size_t null_arena_index = ~size_t(0);\n\n    // TODO: Rename into is_task_pool_published()\n    inline bool in_arena () const;\n\n    inline bool is_local_task_pool_quiescent () const;\n\n    inline bool is_quiescent_local_task_pool_empty () const;\n\n    inline bool is_quiescent_local_task_pool_reset () const;\n\n    //! The market I am in\n    market* my_market;\n\n    //! Random number generator used for picking a random victim from which to steal.\n    FastRandom my_random;\n\n    //! Free list of small tasks that can be reused.\n    task* my_free_list;\n\n#if __TBB_HOARD_NONLOCAL_TASKS\n    //! Free list of small non-local tasks that should be returned or can be reused.\n    task* my_nonlocal_free_list;\n#endif\n    //! Fake root task created by slave threads.\n    /** The task is used as the \"parent\" argument to method wait_for_all. */\n    task* my_dummy_task;\n\n    //! Reference count for scheduler\n    /** Number of task_scheduler_init objects that point to this scheduler */\n    long my_ref_count;\n\n    inline void attach_mailbox( affinity_id id );\n\n    /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */\n\n    //! True if *this was created by automatic TBB initialization\n    bool my_auto_initialized;\n\n#if __TBB_COUNT_TASK_NODES\n    //! Net number of big task objects that have been allocated but not yet freed.\n    intptr_t my_task_node_count;\n#endif /* __TBB_COUNT_TASK_NODES */\n\n    //! Sets up the data necessary for the stealing limiting heuristics\n    void init_stack_info ();\n\n    //! Returns true if stealing is allowed\n    bool can_steal () {\n        int anchor;\n        // TODO IDEA: Add performance warning?\n#if __TBB_ipf\n        return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold;\n#else\n        return my_stealing_threshold < (uintptr_t)&anchor;\n#endif\n    }\n\n    //! Actions common to enter_arena and try_enter_arena\n    void do_enter_arena();\n\n    //! Used by workers to enter the arena \n    /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/\n    void enter_arena();\n\n    //! Leave the arena\n    /** Leaving arena automatically releases the task pool if it is locked. **/\n    void leave_arena();\n\n    //! Resets head and tail indices to 0, and leaves arena\n    /** Argument specifies whether the task pool is currently locked by the owner\n        (via acquire_task_pool).**/\n    inline void reset_deque_and_leave_arena ( bool locked );\n\n    //! Locks victim's task pool, and returns pointer to it. The pointer can be NULL.\n    /** Garbles victim_arena_slot->task_pool for the duration of the lock. **/\n    task** lock_task_pool( arena_slot* victim_arena_slot ) const;\n\n    //! Unlocks victim's task pool\n    /** Restores victim_arena_slot->task_pool munged by lock_task_pool. **/\n    void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const;\n\n    //! Locks the local task pool\n    /** Garbles my_arena_slot->task_pool for the duration of the lock. Requires\n        correctly set my_arena_slot->task_pool_ptr. **/\n    void acquire_task_pool() const;\n\n    //! Unlocks the local task pool\n    /** Restores my_arena_slot->task_pool munged by acquire_task_pool. Requires\n        correctly set my_arena_slot->task_pool_ptr. **/\n    void release_task_pool() const;\n\n    //! Checks if t is affinitized to another thread, and if so, bundles it as proxy.\n    /** Returns either t or proxy containing t. **/\n    task* prepare_for_spawning( task* t );\n\n    //! Makes newly spawned tasks visible to thieves\n    inline void commit_spawned_tasks( size_t new_tail );\n\n    //! Makes relocated tasks visible to thieves and releases the local task pool.\n    /** Obviously, the task pool must be locked when calling this method. **/\n    inline void commit_relocated_tasks( size_t new_tail );\n\n    //! Get a task from the local pool.\n    /** Called only by the pool owner.\n        Returns the pointer to the task or NULL if the pool is empty. \n        In the latter case compacts the pool. **/\n    task* get_task();\n\n    //! Attempt to get a task from the mailbox.\n    /** Gets a task only if it has not been executed by its sender or a thief \n        that has stolen it from the sender's task pool. Otherwise returns NULL.\n\n        This method is intended to be used only by the thread extracting the proxy \n        from its mailbox. (In contrast to local task pool, mailbox can be read only\n        by its owner). **/\n    task* get_mailbox_task();\n\n    //! True if t is a task_proxy\n    static bool is_proxy( const task& t ) {\n        return t.prefix().extra_state==es_task_proxy;\n    }\n\n    //! Steal task from another scheduler's ready pool.\n    task* steal_task( arena_slot& victim_arena_slot );\n\n    /** Initial size of the task deque sufficient to serve without reallocation\n        4 nested parallel_for calls with iteration space of 65535 grains each. **/\n    static const size_t min_task_pool_size = 64;\n\n    //! Makes sure that the task pool can accommodate at least n more elements\n    /** If necessary relocates existing task pointers or grows the ready task deque.\n        Returns (possible updated) tail index (not accounting for n). **/\n    size_t prepare_task_pool( size_t n );\n\n    //! Initialize a scheduler for a master thread.\n    static generic_scheduler* create_master( arena& a );\n\n    //! Perform necessary cleanup when a master thread stops using TBB.\n    void cleanup_master();\n\n    //! Initialize a scheduler for a worker thread.\n    static generic_scheduler* create_worker( market& m, size_t index );\n\n    //! Perform necessary cleanup when a worker thread finishes.\n    static void cleanup_worker( void* arg, bool worker );\n\nprotected:\n    template<typename SchedulerTraits> friend class custom_scheduler;\n    generic_scheduler( arena*, size_t index );\n\npublic:\n#if TBB_USE_ASSERT > 1\n    //! Check that internal data structures are in consistent state.\n    /** Raises __TBB_ASSERT failure if inconsistency is found. */\n    void assert_task_pool_valid () const;\n#else\n    void assert_task_pool_valid() const {}\n#endif /* TBB_USE_ASSERT <= 1 */\n\n#if __TBB_TASK_ARENA\n    void nested_arena_entry(arena*, nested_arena_context &, bool);\n    void nested_arena_exit(nested_arena_context &);\n    void wait_until_empty();\n#endif\n\n    /*override*/ \n    void spawn( task& first, task*& next );\n\n    /*override*/ \n    void spawn_root_and_wait( task& first, task*& next );\n\n    /*override*/ \n    void enqueue( task&, void* reserved );\n\n    void local_spawn( task& first, task*& next );\n    void local_spawn_root_and_wait( task& first, task*& next );\n    virtual void local_wait_for_all( task& parent, task* child ) = 0;\n\n    //! Destroy and deallocate this scheduler object\n    void free_scheduler();\n\n    //! Allocate task object, either from the heap or a free list.\n    /** Returns uninitialized task object with initialized prefix. */\n    task& allocate_task( size_t number_of_bytes, \n                       __TBB_CONTEXT_ARG(task* parent, task_group_context* context) );\n\n    //! Put task on free list.\n    /** Does not call destructor. */\n    template<free_task_hint h>\n    void free_task( task& t );\n\n    //! Return task object to the memory allocator.\n    inline void deallocate_task( task& t );\n\n    //! True if running on a worker thread, false otherwise.\n    inline bool is_worker();\n\n    //! True if the scheduler is on the outermost dispatch level in a master thread.\n    /** Returns true when this scheduler instance is associated with an application\n        thread, and is not executing any TBB task. This includes being in a TBB \n        dispatch loop (one of wait_for_all methods) invoked directly from that thread. **/\n    inline bool master_outermost_level () const;\n\n    //! True if the scheduler is on the outermost dispatch level in a worker thread.\n    inline bool worker_outermost_level () const;\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Returns task group context used by this scheduler instance.\n    /** This context is associated with root tasks created by a master thread \n        without explicitly specified context object outside of any running task.\n\n        Note that the default context of a worker thread is never accessed by\n        user code (directly or indirectly). **/\n    inline task_group_context* default_context ();\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    //! Returns number of worker threads in the arena this thread belongs to.\n    unsigned number_of_workers_in_my_arena();\n\n#if __TBB_COUNT_TASK_NODES\n    intptr_t get_task_node_count( bool count_arena_workers = false );\n#endif /* __TBB_COUNT_TASK_NODES */\n\n    //! Special value used to mark my_return_list as not taking any more entries.\n    static task* plugged_return_list() {return (task*)(intptr_t)(-1);}\n\n    //! Number of small tasks that have been allocated by this scheduler. \n    intptr_t my_small_task_count;\n\n    //! List of small tasks that have been returned to this scheduler by other schedulers.\n    task* my_return_list;\n\n    //! Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption).\n    /** Returns obtained task or NULL if all attempts fail. */\n    virtual task* receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count,\n                                         bool return_if_no_work ) = 0;\n\n    //! Free a small task t that that was allocated by a different scheduler \n    void free_nonlocal_small_task( task& t ); \n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Padding isolating thread-local members from members that can be written to by other threads.\n    char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)];\n\n    //! Head of the thread specific list of task group contexts.\n    context_list_node_t my_context_list_head;\n\n    //! Mutex protecting access to the list of task group contexts.\n    // TODO: check whether it can be deadly preempted and replace by spinning/sleeping mutex\n    spin_mutex my_context_list_mutex;\n\n    //! Last state propagation epoch known to this thread \n    /** Together with the_context_state_propagation_epoch constitute synchronization protocol\n        that keeps hot path of task group context construction destruction mostly \n        lock-free.\n        When local epoch equals the global one, the state of task group contexts\n        registered with this thread is consistent with that of the task group trees\n        they belong to. **/\n    uintptr_t my_context_state_propagation_epoch;\n\n    //! Flag indicating that a context is being destructed by its owner thread \n    /** Together with my_nonlocal_ctx_list_update constitute synchronization protocol\n        that keeps hot path of context destruction (by the owner thread) mostly \n        lock-free. **/\n    tbb::atomic<uintptr_t> my_local_ctx_list_update;\n\n#if __TBB_TASK_PRIORITY\n    //! Returns reference priority used to decide whether a task should be offloaded.\n    inline intptr_t effective_reference_priority () const;\n\n    // TODO: move into slots and fix is_out_of_work\n    //! Task pool for offloading tasks with priorities lower than the current top priority.\n    task* my_offloaded_tasks;\n\n    //! Points to the last offloaded task in the my_offloaded_tasks list.\n    task** my_offloaded_task_list_tail_link;\n\n    //! Indicator of how recently the offload area was checked for the presence of top priority tasks.\n    uintptr_t my_local_reload_epoch;\n\n    //! Indicates that the pool is likely non-empty even if appears so from outside\n    volatile bool my_pool_reshuffling_pending;\n\n    //! Searches offload area for top priority tasks and reloads found ones into primary task pool.\n    /** Returns one of the found tasks or NULL. **/\n    task* reload_tasks ();\n\n    task* reload_tasks ( task*& offloaded_tasks, task**& offloaded_task_list_link, intptr_t top_priority );\n\n    //! Moves tasks with priority below the top one from primary task pool into offload area.\n    /** Returns the next execution candidate task or NULL. **/\n    task* winnow_task_pool ();\n\n    //! Unconditionally moves the task into offload area.\n    inline void offload_task ( task& t, intptr_t task_priority );\n#endif /* __TBB_TASK_PRIORITY */\n\n    //! Detaches abandoned contexts\n    /** These contexts must be destroyed by other threads. **/\n    void cleanup_local_context_list ();\n\n    //! Finds all contexts registered by this scheduler affected by the state change\n    //! and propagates the new state to them.\n    template <typename T>\n    void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );\n\n    // check consistency\n    static void assert_context_valid(const task_group_context *tgc) {\n        suppress_unused_warning(tgc);\n#if TBB_USE_ASSERT\n        __TBB_ASSERT(tgc, NULL);\n        uintptr_t ctx = tgc->my_version_and_traits;\n        __TBB_ASSERT(is_alive(ctx), \"referenced task_group_context was destroyed\");\n        static const char *msg = \"task_group_context is invalid\";\n        __TBB_ASSERT(!(ctx&~(3|(7<<task_group_context::traits_offset))), msg); // the value fits known values of versions and traits\n        __TBB_ASSERT(tgc->my_kind < task_group_context::dying, msg);\n        __TBB_ASSERT(tgc->my_cancellation_requested == 0 || tgc->my_cancellation_requested == 1, msg);\n        __TBB_ASSERT(tgc->my_state < task_group_context::low_unused_state_bit, msg);\n        if(tgc->my_kind != task_group_context::isolated) {\n            __TBB_ASSERT(tgc->my_owner, msg);\n            __TBB_ASSERT(tgc->my_node.my_next && tgc->my_node.my_prev, msg);\n        }\n#if __TBB_TASK_PRIORITY\n        assert_priority_valid(tgc->my_priority);\n#endif\n        if(tgc->my_parent)\n#if TBB_USE_ASSERT > 1\n            assert_context_valid(tgc->my_parent);\n#else\n            __TBB_ASSERT(is_alive(tgc->my_parent->my_version_and_traits), msg);\n#endif\n#endif\n    }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#if _WIN32||_WIN64\nprivate:\n    //! Handle returned by RML when registering a master with RML\n    ::rml::server::execution_resource_t master_exec_resource;\npublic:\n#endif /* _WIN32||_WIN64 */\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Flag indicating that a context is being destructed by non-owner thread.\n    /** See also my_local_ctx_list_update. **/\n    tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#if __TBB_SURVIVE_THREAD_SWITCH\n    __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk;\n#if TBB_USE_ASSERT\n    //! State values used to check interface contract with cilkrts.\n    /** Names of cs_running...cs_freed derived from state machine diagram in cilk-tbb-interop.h */\n    enum cilk_state_t {\n        cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory.\n        cs_running,\n        cs_limbo,\n        cs_freed\n    };\n    cilk_state_t my_cilk_state;\n#endif /* TBB_USE_ASSERT */\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\n#if __TBB_STATISTICS\n    //! Set of counters to track internal statistics on per thread basis\n    /** Placed at the end of the class definition to minimize the disturbance of\n        the core logic memory operations. **/\n    mutable statistics_counters my_counters;\n#endif /* __TBB_STATISTICS */\n\n}; // class generic_scheduler\n\n\n} // namespace internal\n} // namespace tbb\n\n#include \"arena.h\"\n#include \"governor.h\"\n\nnamespace tbb {\nnamespace internal {\n\ninline bool generic_scheduler::in_arena () const {\n    __TBB_ASSERT(my_arena_slot, 0);\n    return my_arena_slot->task_pool != EmptyTaskPool;\n}\n\ninline bool generic_scheduler::is_local_task_pool_quiescent () const {\n    __TBB_ASSERT(my_arena_slot, 0);\n    task** tp = my_arena_slot->task_pool;\n    return tp == EmptyTaskPool || tp == LockedTaskPool;\n}\n\ninline bool generic_scheduler::is_quiescent_local_task_pool_empty () const {\n    __TBB_ASSERT( is_local_task_pool_quiescent(), \"Task pool is not quiescent\" );\n    return __TBB_load_relaxed(my_arena_slot->head) == __TBB_load_relaxed(my_arena_slot->tail);\n}\n\ninline bool generic_scheduler::is_quiescent_local_task_pool_reset () const {\n    __TBB_ASSERT( is_local_task_pool_quiescent(), \"Task pool is not quiescent\" );\n    return __TBB_load_relaxed(my_arena_slot->head) == 0 && __TBB_load_relaxed(my_arena_slot->tail) == 0;\n}\n\ninline bool generic_scheduler::master_outermost_level () const {\n    return my_dispatching_task == my_dummy_task;\n}\n\ninline bool generic_scheduler::worker_outermost_level () const {\n    return !my_dispatching_task;\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\ninline task_group_context* generic_scheduler::default_context () {\n    return my_dummy_task->prefix().context;\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\ninline void generic_scheduler::attach_mailbox( affinity_id id ) {\n    __TBB_ASSERT(id>0,NULL);\n    my_inbox.attach( my_arena->mailbox(id) );\n    my_affinity_id = id;\n}\n\ninline bool generic_scheduler::is_worker() {\n    return my_arena_index != 0; //TODO: rework for multiple master\n}\n\ninline unsigned generic_scheduler::number_of_workers_in_my_arena() {\n    return my_arena->my_max_num_workers;\n}\n\n//! Return task object to the memory allocator.\ninline void generic_scheduler::deallocate_task( task& t ) {\n#if TBB_USE_ASSERT\n    task_prefix& p = t.prefix();\n    p.state = 0xFF;\n    p.extra_state = 0xFF; \n    poison_pointer(p.next);\n#endif /* TBB_USE_ASSERT */\n    NFS_Free((char*)&t-task_prefix_reservation_size);\n#if __TBB_COUNT_TASK_NODES\n    --my_task_node_count;\n#endif /* __TBB_COUNT_TASK_NODES */\n}\n\n#if __TBB_COUNT_TASK_NODES\ninline intptr_t generic_scheduler::get_task_node_count( bool count_arena_workers ) {\n    return my_task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0);\n}\n#endif /* __TBB_COUNT_TASK_NODES */\n\ninline void generic_scheduler::reset_deque_and_leave_arena ( bool locked ) {\n    if ( !locked )\n        acquire_task_pool();\n    __TBB_store_relaxed( my_arena_slot->tail, 0 );\n    __TBB_store_relaxed( my_arena_slot->head, 0 );\n    leave_arena();\n}\n\n//TODO: move to arena_slot\ninline void generic_scheduler::commit_spawned_tasks( size_t new_tail ) {\n    __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size, \"task deque end was overwritten\" );\n    // emit \"task was released\" signal\n    ITT_NOTIFY(sync_releasing, (void*)((uintptr_t)my_arena_slot+sizeof(uintptr_t)));\n    // Release fence is necessary to make sure that previously stored task pointers\n    // are visible to thieves.\n    __TBB_store_with_release( my_arena_slot->tail, new_tail );\n}\n\nvoid generic_scheduler::commit_relocated_tasks ( size_t new_tail ) {\n    __TBB_ASSERT( is_local_task_pool_quiescent(),\n                  \"Task pool must be locked when calling commit_relocated_tasks()\" );\n    __TBB_store_relaxed( my_arena_slot->head, 0 );\n    // Tail is updated last to minimize probability of a thread making arena \n    // snapshot being misguided into thinking that this task pool is empty.\n    __TBB_store_relaxed( my_arena_slot->tail, new_tail );\n    release_task_pool();\n}\n\ntemplate<free_task_hint hint>\nvoid generic_scheduler::free_task( task& t ) {\n#if __TBB_HOARD_NONLOCAL_TASKS\n    static const int h = hint&(~local_task);\n#else\n    static const free_task_hint h = hint;\n#endif\n    GATHER_STATISTIC(--my_counters.active_tasks);\n    task_prefix& p = t.prefix();\n    // Verify that optimization hints are correct.\n    __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL );\n    __TBB_ASSERT( !(h&small_task) || p.origin, NULL );\n    __TBB_ASSERT( !(h&local_task) || (!p.origin || uintptr_t(p.origin) > uintptr_t(4096)), \"local_task means allocated\");\n    poison_value(p.depth);\n    poison_value(p.ref_count);\n    poison_pointer(p.owner);\n    __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL );\n    p.state = task::freed;\n    if( h==small_local_task || p.origin==this ) {\n        GATHER_STATISTIC(++my_counters.free_list_length);\n        p.next = my_free_list;\n        my_free_list = &t;\n    } else if( !(h&local_task) && p.origin && uintptr_t(p.origin) < uintptr_t(4096) ) {\n        // a special value reserved for future use, do nothing since\n        // origin is not pointing to a scheduler instance\n    } else if( !(h&local_task) && p.origin ) {\n        GATHER_STATISTIC(++my_counters.free_list_length);\n#if __TBB_HOARD_NONLOCAL_TASKS\n        if( !(h&no_cache) ) {\n            p.next = my_nonlocal_free_list;\n            my_nonlocal_free_list = &t;\n        } else\n#endif\n        free_nonlocal_small_task(t);\n    } else {\n        GATHER_STATISTIC(--my_counters.big_tasks);\n        deallocate_task(t);\n    }\n}\n\n#if __TBB_TASK_PRIORITY\ninline intptr_t generic_scheduler::effective_reference_priority () const {\n    // Workers on the outermost dispatch level (i.e. with empty stack) use market's\n    // priority as a reference point (to speedup discovering process level priority\n    // changes). But when there are enough workers to service (even if only partially)\n    // a lower priority arena, they should use arena's priority as a reference, lest\n    // be trapped in a futile spinning (because market's priority would prohibit\n    // executing ANY tasks in this arena).\n    return !worker_outermost_level() || \n            my_arena->my_num_workers_allotted < my_arena->num_workers_active()\n            ? *my_ref_top_priority : my_arena->my_top_priority;\n}\n\ninline void generic_scheduler::offload_task ( task& t, intptr_t /*priority*/ ) {\n    GATHER_STATISTIC( ++my_counters.prio_tasks_offloaded );\n    __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );\n#if TBB_USE_ASSERT\n    t.prefix().state = task::ready;\n#endif /* TBB_USE_ASSERT */\n    t.prefix().next_offloaded = my_offloaded_tasks;\n    my_offloaded_tasks = &t;\n}\n#endif /* __TBB_TASK_PRIORITY */\n\n#if __TBB_FP_CONTEXT\nclass cpu_ctl_env_helper {\n    cpu_ctl_env guard_cpu_ctl_env;\n    cpu_ctl_env curr_cpu_ctl_env;\npublic:\n    cpu_ctl_env_helper() {\n        guard_cpu_ctl_env.get_env();\n        curr_cpu_ctl_env = guard_cpu_ctl_env;\n    }\n    ~cpu_ctl_env_helper() {\n        if ( curr_cpu_ctl_env != guard_cpu_ctl_env )\n            guard_cpu_ctl_env.set_env();\n    }\n    void set_env( const task_group_context *ctx ) {\n        generic_scheduler::assert_context_valid(ctx);\n        const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env);\n        if ( ctl != curr_cpu_ctl_env ) {\n            curr_cpu_ctl_env = ctl;\n            curr_cpu_ctl_env.set_env();\n        }\n    }\n    void restore_default() {\n        if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {\n            guard_cpu_ctl_env.set_env();\n            curr_cpu_ctl_env = guard_cpu_ctl_env;\n        }\n    }\n};\n#else\nstruct cpu_ctl_env_helper {\n    void set_env( __TBB_CONTEXT_ARG1(task_group_context *) ) {}\n    void restore_default() {}\n};\n#endif /* __TBB_FP_CONTEXT */\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_scheduler_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/scheduler_common.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_scheduler_common_H\n#define _TBB_scheduler_common_H\n\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/cache_aligned_allocator.h\"\n\n#include <string.h>  // for memset, memcpy, memmove\n\n#include \"tbb_statistics.h\"\n\n#if TBB_USE_ASSERT > 1\n#include <stdio.h>\n#endif /* TBB_USE_ASSERT > 1 */\n\n/* Temporarily change \"private\" to \"public\" while including \"tbb/task.h\".\n   This hack allows us to avoid publishing internal types and methods\n   in the public header files just for sake of friend declarations. */\n#ifndef private\n    #define private public\n    #define undef_private\n#endif\n\n#include \"tbb/task.h\"\n#include \"tbb/tbb_exception.h\"\n\n#ifdef undef_private\n    #undef private\n#endif\n\n#ifndef __TBB_SCHEDULER_MUTEX_TYPE\n#define __TBB_SCHEDULER_MUTEX_TYPE tbb::spin_mutex\n#endif\n// TODO: add conditional inclusion based on specified type\n#include \"tbb/spin_mutex.h\"\n\n// This macro is an attempt to get rid of ugly ifdefs in the shared parts of the code.\n// It drops the second argument depending on whether the controlling macro is defined.\n// The first argument is just a convenience allowing to keep comma before the macro usage.\n#if __TBB_TASK_GROUP_CONTEXT\n    #define __TBB_CONTEXT_ARG1(context) context\n    #define __TBB_CONTEXT_ARG(arg1, context) arg1, context\n#else /* !__TBB_TASK_GROUP_CONTEXT */\n    #define __TBB_CONTEXT_ARG1(context)\n    #define __TBB_CONTEXT_ARG(arg1, context) arg1\n#endif /* !__TBB_TASK_GROUP_CONTEXT */\n\n#if DO_TBB_TRACE\n#include <cstdio>\n#define TBB_TRACE(x) ((void)std::printf x)\n#else\n#define TBB_TRACE(x) ((void)(0))\n#endif /* DO_TBB_TRACE */\n\n#if !__TBB_CPU_CTL_ENV_PRESENT\n#include <fenv.h>\n#endif\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for overzealous compiler warnings\n    // These particular warnings are so ubiquitous that no attempt is made to narrow\n    // the scope of the warnings.\n    #pragma warning (disable: 4100 4127 4312 4244 4267 4706)\n#endif\n\nnamespace tbb {\nnamespace interface7 {\nnamespace internal {\nclass task_arena_base;\nclass delegated_task;\nclass wait_task;\n}}\nnamespace internal {\nusing namespace interface7::internal;\n\nclass arena;\ntemplate<typename SchedulerTraits> class custom_scheduler;\nclass generic_scheduler;\nclass governor;\nclass mail_outbox;\nclass market;\nclass observer_proxy;\nclass task_scheduler_observer_v3;\n\n#if __TBB_TASK_PRIORITY\nstatic const intptr_t num_priority_levels = 3;\nstatic const intptr_t normalized_normal_priority = (num_priority_levels - 1) / 2;\n\ninline intptr_t normalize_priority ( priority_t p ) {\n    return intptr_t(p - priority_low) / priority_stride_v4;\n}\n\nstatic const priority_t priority_from_normalized_rep[num_priority_levels] = {\n    priority_low, priority_normal, priority_high\n};\n\ninline void assert_priority_valid ( intptr_t p ) {\n    __TBB_ASSERT_EX( p >= 0 && p < num_priority_levels, NULL );\n}\n\ninline intptr_t& priority ( task& t ) {\n    return t.prefix().context->my_priority;\n}\n#endif /* __TBB_TASK_PRIORITY */\n\n//! Mutex type for global locks in the scheduler\ntypedef __TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type;\n\n#if __TBB_TASK_GROUP_CONTEXT\n//! Task group state change propagation global epoch\n/** Together with generic_scheduler::my_context_state_propagation_epoch forms\n    cross-thread signaling mechanism that allows to avoid locking at the hot path\n    of normal execution flow.\n\n    When a descendant task group context is registered or unregistered, the global\n    and local epochs are compared. If they differ, a state change is being propagated,\n    and thus registration/deregistration routines take slower branch that may block\n    (at most one thread of the pool can be blocked at any moment). Otherwise the\n    control path is lock-free and fast. **/\nextern uintptr_t the_context_state_propagation_epoch;\n\n//! Mutex guarding state change propagation across task groups forest.\n/** Also protects modification of related data structures. **/\ntypedef scheduler_mutex_type context_state_propagation_mutex_type;\nextern context_state_propagation_mutex_type the_context_state_propagation_mutex;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! Alignment for a task object\nconst size_t task_alignment = 32;\n\n//! Number of bytes reserved for a task prefix\n/** If not exactly sizeof(task_prefix), the extra bytes *precede* the task_prefix. */\nconst size_t task_prefix_reservation_size = ((sizeof(internal::task_prefix)-1)/task_alignment+1)*task_alignment;\n\n//! Definitions for bits in task_prefix::extra_state\nenum task_extra_state {\n    //! Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0)\n    es_version_1_task = 0,\n    //! Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2)\n    es_version_3_task = 1,\n    //! Tag for enqueued tasks\n    es_task_enqueued = 0x10,\n    //! Tag for v3 task_proxy.\n    es_task_proxy = 0x20,\n    //! Set if ref_count might be changed by another thread.  Used for debugging.\n    es_ref_count_active = 0x40,\n    //! Set if the task has been stolen\n    es_task_is_stolen = 0x80\n};\n\ninline void reset_extra_state ( task *t ) {\n    t->prefix().extra_state &= ~(es_task_is_stolen | es_task_enqueued);\n}\n\n//! Optimization hint to free_task that enables it omit unnecessary tests and code.\nenum free_task_hint {\n    //! No hint\n    no_hint=0,\n    //! Task is known to have been allocated by this scheduler\n    local_task=1,\n    //! Task is known to be a small task.\n    /** Task should be returned to the free list of *some* scheduler, possibly not this scheduler. */\n    small_task=2,\n    //! Bitwise-OR of local_task and small_task.\n    /** Task should be returned to free list of this scheduler. */\n    small_local_task=3,\n    //! Disable caching for a small task.\n    no_cache = 4,\n    //! Task is known to be a small task and must not be cached.\n    no_cache_small_task = no_cache | small_task\n};\n\n//------------------------------------------------------------------------\n// Debugging support\n//------------------------------------------------------------------------\n\n#if TBB_USE_ASSERT\n\nstatic const uintptr_t venom = tbb::internal::select_size_t_constant<0xDEADBEEFU,0xDDEEAADDDEADBEEFULL>::value;\n\ntemplate <typename T>\nvoid poison_value ( T& val ) { val = * punned_cast<T*>(&venom); }\n\n/** Expected to be used in assertions only, thus no empty form is defined. **/\ninline bool is_alive( uintptr_t v ) { return v != venom; }\n\n/** Logically, this method should be a member of class task.\n    But we do not want to publish it, so it is here instead. */\ninline void assert_task_valid( const task& task ) {\n    __TBB_ASSERT( &task!=NULL, NULL );\n    __TBB_ASSERT( !is_poisoned(&task), NULL );\n    __TBB_ASSERT( (uintptr_t)&task % task_alignment == 0, \"misaligned task\" );\n#if __TBB_RECYCLE_TO_ENQUEUE\n    __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::to_enqueue, \"corrupt task (invalid state)\" );\n#else\n    __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::recycle, \"corrupt task (invalid state)\" );\n#endif\n}\n\n#else /* !TBB_USE_ASSERT */\n\n/** In contrast to debug version poison_value() is a macro here because\n    the variable used as its argument may be undefined in release builds. **/\n#define poison_value(g) ((void)0)\n\ninline void assert_task_valid( const task& ) {}\n\n#endif /* !TBB_USE_ASSERT */\n\n//------------------------------------------------------------------------\n// Helpers\n//------------------------------------------------------------------------\n\n#if __TBB_TASK_GROUP_CONTEXT\ninline bool ConcurrentWaitsEnabled ( task& t ) {\n    return (t.prefix().context->my_version_and_traits & task_group_context::concurrent_wait) != 0;\n}\n\ninline bool CancellationInfoPresent ( task& t ) {\n    return t.prefix().context->my_cancellation_requested != 0;\n}\n\n#if TBB_USE_CAPTURED_EXCEPTION\n    inline tbb_exception* TbbCurrentException( task_group_context*, tbb_exception* src) { return src->move(); }\n    inline tbb_exception* TbbCurrentException( task_group_context*, captured_exception* src) { return src; }\n#else\n    // Using macro instead of an inline function here allows to avoid evaluation of the\n    // TbbCapturedException expression when exact propagation is enabled for the context.\n    #define TbbCurrentException(context, TbbCapturedException) \\\n        context->my_version_and_traits & task_group_context::exact_exception    \\\n            ? tbb_exception_ptr::allocate()    \\\n            : tbb_exception_ptr::allocate( *(TbbCapturedException) );\n#endif /* !TBB_USE_CAPTURED_EXCEPTION */\n\n#define TbbRegisterCurrentException(context, TbbCapturedException) \\\n    if ( context->cancel_group_execution() ) {  \\\n        /* We are the first to signal cancellation, so store the exception that caused it. */  \\\n        context->my_exception = TbbCurrentException( context, TbbCapturedException ); \\\n    }\n\n#define TbbCatchAll(context)  \\\n    catch ( tbb_exception& exc ) {  \\\n        TbbRegisterCurrentException( context, &exc );   \\\n    } catch ( std::exception& exc ) {   \\\n        TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \\\n    } catch ( ... ) {   \\\n        TbbRegisterCurrentException( context, captured_exception::allocate(\"...\", \"Unidentified exception\") );\\\n    }\n\n#else /* !__TBB_TASK_GROUP_CONTEXT */\n\ninline bool ConcurrentWaitsEnabled ( task& t ) { return false; }\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//------------------------------------------------------------------------\n// arena_slot\n//------------------------------------------------------------------------\nstruct arena_slot_line1 {\n    //TODO: make this tbb:atomic<>.\n    //! Scheduler of the thread attached to the slot\n    /** Marks the slot as busy, and is used to iterate through the schedulers belonging to this arena **/\n    generic_scheduler* my_scheduler;\n\n    // Synchronization of access to Task pool\n    /** Also is used to specify if the slot is empty or locked:\n         0 - empty\n        -1 - locked **/\n    task* *__TBB_atomic task_pool;\n\n    //! Index of the first ready task in the deque.\n    /** Modified by thieves, and by the owner during compaction/reallocation **/\n    __TBB_atomic size_t head;\n};\n\nstruct arena_slot_line2 {\n    //! Hint provided for operations with the container of starvation-resistant tasks.\n    /** Modified by the owner thread (during these operations). **/\n    unsigned hint_for_pop;\n\n    //! Index of the element following the last ready task in the deque.\n    /** Modified by the owner thread. **/\n    __TBB_atomic size_t tail;\n\n    //! Capacity of the primary task pool (number of elements - pointers to task).\n    size_t my_task_pool_size;\n\n    // Task pool of the scheduler that owns this slot\n    task* *__TBB_atomic task_pool_ptr;\n\n#if __TBB_STATISTICS\n    //! Set of counters to accumulate internal statistics related to this arena\n    statistics_counters *my_counters;\n#endif /* __TBB_STATISTICS */\n};\n\nstruct arena_slot : padded<arena_slot_line1>, padded<arena_slot_line2> {\n#if TBB_USE_ASSERT\n    void fill_with_canary_pattern ( size_t first, size_t last ) {\n        for ( size_t i = first; i < last; ++i )\n            poison_pointer(task_pool_ptr[i]);\n    }\n#else\n    void fill_with_canary_pattern ( size_t, size_t ) {}\n#endif /* TBB_USE_ASSERT */\n\n    void allocate_task_pool( size_t n ) {\n        size_t byte_size = ((n * sizeof(task*) + NFS_MaxLineSize - 1) / NFS_MaxLineSize) * NFS_MaxLineSize;\n        my_task_pool_size = byte_size / sizeof(task*);\n        task_pool_ptr = (task**)NFS_Allocate( 1, byte_size, NULL );\n        // No need to clear the fresh deque since valid items are designated by the head and tail members.\n        // But fill it with a canary pattern in the high vigilance debug mode.\n        fill_with_canary_pattern( 0, my_task_pool_size );\n    }\n\n    //! Deallocate task pool that was allocated by means of allocate_task_pool.\n    void free_task_pool( ) {\n#if !__TBB_TASK_ARENA\n        __TBB_ASSERT( !task_pool /*TODO: == EmptyTaskPool*/, NULL);\n#else\n        //TODO: understand the assertion and modify\n#endif\n        if( task_pool_ptr ) {\n           __TBB_ASSERT( my_task_pool_size, NULL);\n           NFS_Free( task_pool_ptr );\n           task_pool_ptr = NULL;\n           my_task_pool_size = 0;\n        }\n    }\n};\n\n#if !__TBB_CPU_CTL_ENV_PRESENT\nclass cpu_ctl_env {\n    fenv_t *my_fenv_ptr;\npublic:\n    cpu_ctl_env() : my_fenv_ptr(NULL) {}\n    ~cpu_ctl_env() {\n        if ( my_fenv_ptr )\n            tbb::internal::NFS_Free( (void*)my_fenv_ptr );\n    }\n    // It is possible not to copy memory but just to copy pointers but the following issues should be addressed:\n    //   1. The arena lifetime and the context lifetime are independent;\n    //   2. The user is allowed to recapture different FPU settings to context so 'current FPU settings' inside\n    //   dispatch loop may become invalid.\n    // But do we really want to improve the fenv implementation? It seems to be better to replace the fenv implementation\n    // with a platform specific implementation.\n    cpu_ctl_env( const cpu_ctl_env &src ) : my_fenv_ptr(NULL) {\n        *this = src;\n    }\n    cpu_ctl_env& operator=( const cpu_ctl_env &src ) {\n        __TBB_ASSERT( src.my_fenv_ptr, NULL );\n        if ( !my_fenv_ptr )\n            my_fenv_ptr = (fenv_t*)tbb::internal::NFS_Allocate(1, sizeof(fenv_t), NULL);\n        *my_fenv_ptr = *src.my_fenv_ptr;\n        return *this;\n    }\n    bool operator!=( const cpu_ctl_env &ctl ) const {\n        __TBB_ASSERT( my_fenv_ptr, \"cpu_ctl_env is not initialized.\" );\n        __TBB_ASSERT( ctl.my_fenv_ptr, \"cpu_ctl_env is not initialized.\" );\n        return memcmp( (void*)my_fenv_ptr, (void*)ctl.my_fenv_ptr, sizeof(fenv_t) );\n    }\n    void get_env () {\n        if ( !my_fenv_ptr )\n            my_fenv_ptr = (fenv_t*)tbb::internal::NFS_Allocate(1, sizeof(fenv_t), NULL);\n        fegetenv( my_fenv_ptr );\n    }\n    const cpu_ctl_env& set_env () const {\n        __TBB_ASSERT( my_fenv_ptr, \"cpu_ctl_env is not initialized.\" );\n        fesetenv( my_fenv_ptr );\n        return *this;\n    }\n};\n#endif /* !__TBB_CPU_CTL_ENV_PRESENT */\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_scheduler_common_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/scheduler_utility.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_scheduler_utility_H\n#define _TBB_scheduler_utility_H\n\n#include \"scheduler.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//------------------------------------------------------------------------\n// auto_empty_task\n//------------------------------------------------------------------------\n\n//! Smart holder for the empty task class with automatic destruction\nclass auto_empty_task {\n    task* my_task;\n    generic_scheduler* my_scheduler;\npublic:\n    auto_empty_task ( __TBB_CONTEXT_ARG(generic_scheduler *s, task_group_context* context) ) \n        : my_task( new(&s->allocate_task(sizeof(empty_task), __TBB_CONTEXT_ARG(NULL, context))) empty_task )\n        , my_scheduler(s)\n    {}\n    // empty_task has trivial destructor, so there's no need to call it.\n    ~auto_empty_task () { my_scheduler->free_task<small_local_task>(*my_task); }\n\n    operator task& () { return *my_task; }\n    task* operator & () { return my_task; }\n    task_prefix& prefix () { return my_task->prefix(); }\n}; // class auto_empty_task\n\n//------------------------------------------------------------------------\n// fast_reverse_vector\n//------------------------------------------------------------------------\n\n//! Vector that grows without reallocations, and stores items in the reverse order.\n/** Requires to initialize its first segment with a preallocated memory chunk\n    (usually it is static array or an array allocated on the stack).\n    The second template parameter specifies maximal number of segments. Each next \n    segment is twice as large as the previous one. **/\ntemplate<typename T, size_t max_segments = 16>\nclass fast_reverse_vector\n{\npublic:\n    fast_reverse_vector ( T* initial_segment, size_t segment_size )\n        : m_cur_segment(initial_segment)\n        , m_cur_segment_size(segment_size)\n        , m_pos(segment_size)\n        , m_num_segments(0)\n        , m_size(0)\n    {\n        __TBB_ASSERT ( initial_segment && segment_size, \"Nonempty initial segment must be supplied\");\n    }\n\n    ~fast_reverse_vector ()\n    {\n        for ( size_t i = 1; i < m_num_segments; ++i )\n            NFS_Free( m_segments[i] );\n    }\n\n    size_t size () const { return m_size + m_cur_segment_size - m_pos; }\n\n    void push_back ( const T& val )\n    {\n        if ( !m_pos ) {\n            if ( !m_num_segments ) m_segments[m_num_segments++] = m_cur_segment;\n            m_size += m_cur_segment_size;\n            m_cur_segment_size *= 2;\n            m_pos = m_cur_segment_size;\n            m_segments[m_num_segments++] = m_cur_segment = (T*)NFS_Allocate( m_cur_segment_size, sizeof(T), NULL );\n            __TBB_ASSERT ( m_num_segments < max_segments, \"Maximal capacity exceeded\" );\n        }\n        m_cur_segment[--m_pos] = val;\n    }\n\n    //! Copies the contents of the vector into the dst array. \n    /** Can only be used when T is a POD type, as copying does not invoke copy constructors. **/\n    void copy_memory ( T* dst ) const\n    {\n        size_t sz = m_cur_segment_size - m_pos;\n        memcpy( dst, m_cur_segment + m_pos, sz * sizeof(T) );\n        dst += sz;\n        sz = m_cur_segment_size / 2;\n        for ( long i = (long)m_num_segments - 2; i >= 0; --i ) {\n            memcpy( dst, m_segments[i], sz * sizeof(T) );\n            dst += sz;\n            sz /= 2;\n        }\n    }\n\nprotected:\n    //! The current (not completely filled) segment\n    T       *m_cur_segment;\n\n    //! Capacity of m_cur_segment\n    size_t  m_cur_segment_size;\n\n    //! Insertion position in m_cur_segment\n    size_t  m_pos;\n\n    //! Array of segments (has fixed size specified by the second template parameter)\n    T       *m_segments[max_segments];\n    \n    //! Number of segments (the size of m_segments)\n    size_t  m_num_segments;\n\n    //! Number of items in the segments in m_segments\n    size_t  m_size;\n\n}; // class fast_reverse_vector\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_scheduler_utility_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/semaphore.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"semaphore.h\"\n#if __TBB_USE_SRWLOCK\n#include \"dynamic_link.h\" // Refers to src/tbb, not include/tbb\n#include \"tbb_misc.h\"\n#endif\n\nnamespace tbb {\nnamespace internal {\n\n// TODO: For new win UI port, we can use SRWLock API without dynamic_link etc.\n#if __TBB_USE_SRWLOCK\n\nstatic atomic<do_once_state> concmon_module_inited;\n\nvoid WINAPI init_binsem_using_event( SRWLOCK* h_ )\n{\n    srwl_or_handle* shptr = (srwl_or_handle*) h_;\n    shptr->h = CreateEventEx( NULL, NULL, 0, EVENT_ALL_ACCESS|SEMAPHORE_ALL_ACCESS );\n}\n\nvoid WINAPI acquire_binsem_using_event( SRWLOCK* h_ )\n{\n    srwl_or_handle* shptr = (srwl_or_handle*) h_;\n    WaitForSingleObjectEx( shptr->h, INFINITE, FALSE );\n}\n\nvoid WINAPI release_binsem_using_event( SRWLOCK* h_ )\n{\n    srwl_or_handle* shptr = (srwl_or_handle*) h_;\n    SetEvent( shptr->h );\n}\n\nstatic void (WINAPI *__TBB_init_binsem)( SRWLOCK* ) = (void (WINAPI *)(SRWLOCK*))&init_binsem_using_event;\nstatic void (WINAPI *__TBB_acquire_binsem)( SRWLOCK* ) = (void (WINAPI *)(SRWLOCK*))&acquire_binsem_using_event;\nstatic void (WINAPI *__TBB_release_binsem)( SRWLOCK* ) = (void (WINAPI *)(SRWLOCK*))&release_binsem_using_event;\n\n//! Table describing the how to link the handlers.\nstatic const dynamic_link_descriptor SRWLLinkTable[] = {\n    DLD(InitializeSRWLock,       __TBB_init_binsem),\n    DLD(AcquireSRWLockExclusive, __TBB_acquire_binsem),\n    DLD(ReleaseSRWLockExclusive, __TBB_release_binsem)\n};\n\ninline void init_concmon_module()\n{\n    __TBB_ASSERT( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event, NULL );\n    if( dynamic_link( \"Kernel32.dll\", SRWLLinkTable, sizeof(SRWLLinkTable)/sizeof(dynamic_link_descriptor) ) ) {\n        __TBB_ASSERT( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event, NULL );\n        __TBB_ASSERT( (uintptr_t)__TBB_acquire_binsem!=(uintptr_t)&acquire_binsem_using_event, NULL );\n        __TBB_ASSERT( (uintptr_t)__TBB_release_binsem!=(uintptr_t)&release_binsem_using_event, NULL );\n    }\n}\n\nbinary_semaphore::binary_semaphore() {\n    atomic_do_once( &init_concmon_module, concmon_module_inited );\n\n    __TBB_init_binsem( &my_sem.lock ); \n    if( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event )\n        P();\n}\n\nbinary_semaphore::~binary_semaphore() {\n    if( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event )\n        CloseHandle( my_sem.h );\n}\n\nvoid binary_semaphore::P() { __TBB_acquire_binsem( &my_sem.lock ); }\n\nvoid binary_semaphore::V() { __TBB_release_binsem( &my_sem.lock ); }\n\n#endif /* __TBB_USE_SRWLOCK */\n\n} // namespace internal\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/semaphore.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_semaphore_H\n#define __TBB_tbb_semaphore_H\n\n#include \"tbb/tbb_stddef.h\"\n\n#if _WIN32||_WIN64\n#include \"tbb/machine/windows_api.h\"\n\n#elif __APPLE__\n#include <mach/semaphore.h>\n#include <mach/task.h>\n#include <mach/mach_init.h>\n#include <mach/error.h>\n\n#else\n#include <semaphore.h>\n#ifdef TBB_USE_DEBUG\n#include <errno.h>\n#endif\n#endif /*_WIN32||_WIN64*/\n\nnamespace tbb {\nnamespace internal {\n\n\n#if _WIN32||_WIN64\ntypedef LONG sem_count_t;\n//! Edsger Dijkstra's counting semaphore\nclass semaphore : no_copy {\n    static const int max_semaphore_cnt = MAXLONG;\npublic:\n    //! ctor\n    semaphore(size_t start_cnt_ = 0) {init_semaphore(start_cnt_);}\n    //! dtor\n    ~semaphore() {CloseHandle( sem );}\n    //! wait/acquire\n    void P() {WaitForSingleObjectEx( sem, INFINITE, FALSE );}\n    //! post/release \n    void V() {ReleaseSemaphore( sem, 1, NULL );}\nprivate:\n    HANDLE sem;\n    void init_semaphore(size_t start_cnt_) {\n        sem = CreateSemaphoreEx( NULL, LONG(start_cnt_), max_semaphore_cnt, NULL, 0, SEMAPHORE_ALL_ACCESS );\n    }\n};\n#elif __APPLE__\n//! Edsger Dijkstra's counting semaphore\nclass semaphore : no_copy {\npublic:\n    //! ctor\n    semaphore(int start_cnt_ = 0) : sem(start_cnt_) { init_semaphore(start_cnt_); }\n    //! dtor\n    ~semaphore() {\n        kern_return_t ret = semaphore_destroy( mach_task_self(), sem );\n        __TBB_ASSERT_EX( ret==err_none, NULL );\n    }\n    //! wait/acquire\n    void P() { \n        int ret;\n        do {\n            ret = semaphore_wait( sem );\n        } while( ret==KERN_ABORTED );\n        __TBB_ASSERT( ret==KERN_SUCCESS, \"semaphore_wait() failed\" );\n    }\n    //! post/release \n    void V() { semaphore_signal( sem ); }\nprivate:\n    semaphore_t sem;\n    void init_semaphore(int start_cnt_) {\n        kern_return_t ret = semaphore_create( mach_task_self(), &sem, SYNC_POLICY_FIFO, start_cnt_ );\n        __TBB_ASSERT_EX( ret==err_none, \"failed to create a semaphore\" );\n    }\n};\n#else /* Linux/Unix */\ntypedef uint32_t sem_count_t;\n//! Edsger Dijkstra's counting semaphore\nclass semaphore : no_copy {\npublic:\n    //! ctor\n    semaphore(int start_cnt_ = 0 ) { init_semaphore( start_cnt_ ); }\n\n    //! dtor\n    ~semaphore() {\n        int ret = sem_destroy( &sem );\n        __TBB_ASSERT_EX( !ret, NULL );\n    }\n    //! wait/acquire\n    void P() {\n        while( sem_wait( &sem )!=0 )\n            __TBB_ASSERT( errno==EINTR, NULL );\n    }\n    //! post/release \n    void V() { sem_post( &sem ); }\nprivate:\n    sem_t sem;\n    void init_semaphore(int start_cnt_) {\n        int ret = sem_init( &sem, /*shared among threads*/ 0, start_cnt_ );\n        __TBB_ASSERT_EX( !ret, NULL );\n    }\n};\n#endif /* _WIN32||_WIN64 */\n\n\n//! for performance reasons, we want specialized binary_semaphore\n#if _WIN32||_WIN64\n#if !__TBB_USE_SRWLOCK\n//! binary_semaphore for concurrent_monitor\nclass binary_semaphore : no_copy {\npublic:\n    //! ctor\n    binary_semaphore() { my_sem = CreateEventEx( NULL, NULL, 0, EVENT_ALL_ACCESS );  }\n    //! dtor\n    ~binary_semaphore() { CloseHandle( my_sem ); }\n    //! wait/acquire\n    void P() { WaitForSingleObjectEx( my_sem, INFINITE, FALSE ); }\n    //! post/release \n    void V() { SetEvent( my_sem ); }\nprivate:\n    HANDLE my_sem;\n};\n#else /* __TBB_USE_SRWLOCK */\n\nunion srwl_or_handle {\n    SRWLOCK lock;\n    HANDLE  h;\n};\n\n//! binary_semaphore for concurrent_monitor\nclass binary_semaphore : no_copy {\npublic:\n    //! ctor\n    binary_semaphore();\n    //! dtor\n    ~binary_semaphore();\n    //! wait/acquire\n    void P();\n    //! post/release \n    void V();\nprivate:\n    srwl_or_handle my_sem;\n};\n#endif /* !__TBB_USE_SRWLOCK */\n#elif __APPLE__\n//! binary_semaphore for concurrent monitor\nclass binary_semaphore : no_copy {\npublic:\n    //! ctor\n    binary_semaphore() : my_sem(0) {\n        kern_return_t ret = semaphore_create( mach_task_self(), &my_sem, SYNC_POLICY_FIFO, 0 );\n        __TBB_ASSERT_EX( ret==err_none, \"failed to create a semaphore\" );\n    }\n    //! dtor\n    ~binary_semaphore() {\n        kern_return_t ret = semaphore_destroy( mach_task_self(), my_sem );\n        __TBB_ASSERT_EX( ret==err_none, NULL );\n    }\n    //! wait/acquire\n    void P() { \n        int ret;\n        do {\n            ret = semaphore_wait( my_sem );\n        } while( ret==KERN_ABORTED );\n        __TBB_ASSERT( ret==KERN_SUCCESS, \"semaphore_wait() failed\" );\n    }\n    //! post/release \n    void V() { semaphore_signal( my_sem ); }\nprivate:\n    semaphore_t my_sem;\n};\n#else /* Linux/Unix */\n\n#if __TBB_USE_FUTEX\nclass binary_semaphore : no_copy {\npublic:\n    //! ctor\n    binary_semaphore() { my_sem = 1; }\n    //! dtor\n    ~binary_semaphore() {}\n    //! wait/acquire\n    void P() {\n        int s;\n        if( (s = my_sem.compare_and_swap( 1, 0 ))!=0 ) {\n            if( s!=2 )\n                s = my_sem.fetch_and_store( 2 );\n            while( s!=0 ) {\n                futex_wait( &my_sem, 2 );\n                s = my_sem.fetch_and_store( 2 );\n            }\n        }\n    }\n    //! post/release \n    void V() { \n        __TBB_ASSERT( my_sem>=1, \"multiple V()'s in a row?\" );\n        if( my_sem--!=1 ) {\n            //if old value was 2\n            my_sem = 0;\n            futex_wakeup_one( &my_sem );\n        }\n    }\nprivate:\n    atomic<int> my_sem;\n};\n#else\ntypedef uint32_t sem_count_t;\n//! binary_semaphore for concurrent monitor\nclass binary_semaphore : no_copy {\npublic:\n    //! ctor\n    binary_semaphore() {\n        int ret = sem_init( &my_sem, /*shared among threads*/ 0, 0 );\n        __TBB_ASSERT_EX( !ret, NULL );\n    }\n    //! dtor\n    ~binary_semaphore() {\n        int ret = sem_destroy( &my_sem );\n        __TBB_ASSERT_EX( !ret, NULL );\n    }\n    //! wait/acquire\n    void P() {\n        while( sem_wait( &my_sem )!=0 )\n            __TBB_ASSERT( errno==EINTR, NULL );\n    }\n    //! post/release \n    void V() { sem_post( &my_sem ); }\nprivate:\n    sem_t my_sem;\n};\n#endif /* __TBB_USE_FUTEX */\n#endif /* _WIN32||_WIN64 */\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_tbb_semaphore_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/spin_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/spin_mutex.h\"\n#include \"itt_notify.h\"\n#include \"tbb_misc.h\"\n\nnamespace tbb {\n\nvoid spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) {\n    __TBB_ASSERT( !my_mutex, \"already holding a lock on a spin_mutex\" );\n    ITT_NOTIFY(sync_prepare, &m);\n    __TBB_LockByte(m.flag);\n    my_mutex = &m;\n    ITT_NOTIFY(sync_acquired, &m);\n}\n\nvoid spin_mutex::scoped_lock::internal_release() {\n    __TBB_ASSERT( my_mutex, \"release on spin_mutex::scoped_lock that is not holding a lock\" );\n\n    ITT_NOTIFY(sync_releasing, my_mutex);\n    __TBB_UnlockByte(my_mutex->flag);\n    my_mutex = NULL;\n}\n\nbool spin_mutex::scoped_lock::internal_try_acquire( spin_mutex& m ) {\n    __TBB_ASSERT( !my_mutex, \"already holding a lock on a spin_mutex\" );\n    bool result = bool( __TBB_TryLockByte(m.flag) );\n    if( result ) {\n        my_mutex = &m;\n        ITT_NOTIFY(sync_acquired, &m);\n    }\n    return result;\n}\n\nvoid spin_mutex::internal_construct() {\n    ITT_SYNC_CREATE(this, _T(\"tbb::spin_mutex\"), _T(\"\"));\n}\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/spin_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_spin_mutex_H\n#define __TBB_spin_mutex_H\n\n#include <cstddef>\n#include <new>\n#include \"aligned_space.h\"\n#include \"tbb_stddef.h\"\n#include \"tbb_machine.h\"\n#include \"tbb_profiling.h\"\n#include \"internal/_mutex_padding.h\"\n\nnamespace tbb {\n\n//! A lock that occupies a single byte.\n/** A spin_mutex is a spin mutex that fits in a single byte.\n    It should be used only for locking short critical sections\n    (typically less than 20 instructions) when fairness is not an issue.\n    If zero-initialized, the mutex is considered unheld.\n    @ingroup synchronization */\nclass spin_mutex : internal::mutex_copy_deprecated_and_disabled {\n    //! 0 if lock is released, 1 if lock is acquired.\n    __TBB_atomic_flag flag;\n\npublic:\n    //! Construct unacquired lock.\n    /** Equivalent to zero-initialization of *this. */\n    spin_mutex() : flag(0) {\n#if TBB_USE_THREADING_TOOLS\n        internal_construct();\n#endif\n    }\n\n    //! Represents acquisition of a mutex.\n    class scoped_lock : internal::no_copy {\n    private:\n        //! Points to currently held mutex, or NULL if no lock is held.\n        spin_mutex* my_mutex;\n\n        //! Value to store into spin_mutex::flag to unlock the mutex.\n        /** This variable is no longer used. Instead, 0 and 1 are used to\n            represent that the lock is free and acquired, respectively.\n            We keep the member variable here to ensure backward compatibility */\n        __TBB_Flag my_unlock_value;\n\n        //! Like acquire, but with ITT instrumentation.\n        void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );\n\n        //! Like try_acquire, but with ITT instrumentation.\n        bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );\n\n        //! Like release, but with ITT instrumentation.\n        void __TBB_EXPORTED_METHOD internal_release();\n\n        friend class spin_mutex;\n\n    public:\n        //! Construct without acquiring a mutex.\n        scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}\n\n        //! Construct and acquire lock on a mutex.\n        scoped_lock( spin_mutex& m ) : my_unlock_value(0) {\n            internal::suppress_unused_warning(my_unlock_value);\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            my_mutex=NULL;\n            internal_acquire(m);\n#else\n            my_mutex=&m;\n            __TBB_LockByte(m.flag);\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/\n        }\n\n        //! Acquire lock.\n        void acquire( spin_mutex& m ) {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            internal_acquire(m);\n#else\n            my_mutex = &m;\n            __TBB_LockByte(m.flag);\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/\n        }\n\n        //! Try acquiring lock (non-blocking)\n        /** Return true if lock acquired; false otherwise. */\n        bool try_acquire( spin_mutex& m ) {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            return internal_try_acquire(m);\n#else\n            bool result = __TBB_TryLockByte(m.flag);\n            if( result )\n                my_mutex = &m;\n            return result;\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/\n        }\n\n        //! Release lock\n        void release() {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            internal_release();\n#else\n            __TBB_UnlockByte(my_mutex->flag);\n            my_mutex = NULL;\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n        }\n\n        //! Destroy lock.  If holding a lock, releases the lock first.\n        ~scoped_lock() {\n            if( my_mutex ) {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n                internal_release();\n#else\n                __TBB_UnlockByte(my_mutex->flag);\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n            }\n        }\n    };\n\n    //! Internal constructor with ITT instrumentation.\n    void __TBB_EXPORTED_METHOD internal_construct();\n\n    // Mutex traits\n    static const bool is_rw_mutex = false;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = false;\n\n    // ISO C++0x compatibility methods\n\n    //! Acquire lock\n    void lock() {\n#if TBB_USE_THREADING_TOOLS\n        aligned_space<scoped_lock> tmp;\n        new(tmp.begin()) scoped_lock(*this);\n#else\n        __TBB_LockByte(flag);\n#endif /* TBB_USE_THREADING_TOOLS*/\n    }\n\n    //! Try acquiring lock (non-blocking)\n    /** Return true if lock acquired; false otherwise. */\n    bool try_lock() {\n#if TBB_USE_THREADING_TOOLS\n        aligned_space<scoped_lock> tmp;\n        return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);\n#else\n        return __TBB_TryLockByte(flag);\n#endif /* TBB_USE_THREADING_TOOLS*/\n    }\n\n    //! Release lock\n    void unlock() {\n#if TBB_USE_THREADING_TOOLS\n        aligned_space<scoped_lock> tmp;\n        scoped_lock& s = *tmp.begin();\n        s.my_mutex = this;\n        s.internal_release();\n#else\n        __TBB_store_with_release(flag, 0);\n#endif /* TBB_USE_THREADING_TOOLS */\n    }\n\n    friend class scoped_lock;\n}; // end of spin_mutex\n\n__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)\n\n} // namespace tbb\n\n#if ( __TBB_x86_32 || __TBB_x86_64 )\n#include \"internal/_x86_eliding_mutex_impl.h\"\n#endif\n\nnamespace tbb {\n//! A cross-platform spin mutex with speculative lock acquisition.\n/** On platforms with proper HW support, this lock may speculatively execute\n    its critical sections, using HW mechanisms to detect real data races and\n    ensure atomicity of the critical sections. In particular, it uses\n    Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).\n    Without such HW support, it behaves like a spin_mutex.\n    It should be used for locking short critical sections where the lock is\n    contended but the data it protects are not.  If zero-initialized, the\n    mutex is considered unheld.\n    @ingroup synchronization */\n\n#if ( __TBB_x86_32 || __TBB_x86_64 )\ntypedef interface7::internal::padded_mutex<interface7::internal::x86_eliding_mutex,false> speculative_spin_mutex;\n#else\ntypedef interface7::internal::padded_mutex<spin_mutex,false> speculative_spin_mutex;\n#endif\n__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex)\n\n} // namespace tbb\n\n#endif /* __TBB_spin_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/spin_rw_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/spin_rw_mutex.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/atomic.h\"\n#include \"itt_notify.h\"\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (disable: 4244)\n#endif\n\nnamespace tbb {\n\ntemplate<typename T> // a template can work with private spin_rw_mutex::state_t\nstatic inline T CAS(volatile T &addr, T newv, T oldv) {\n    // ICC (9.1 and 10.1 tried) unable to do implicit conversion\n    // from \"volatile T*\" to \"volatile void*\", so explicit cast added.\n    return tbb::internal::as_atomic(addr).compare_and_swap( newv, oldv );\n}\n\n//! Acquire write lock on the given mutex.\nbool spin_rw_mutex_v3::internal_acquire_writer()\n{\n    ITT_NOTIFY(sync_prepare, this);\n    for( internal::atomic_backoff backoff;;backoff.pause() ){\n        state_t s = const_cast<volatile state_t&>(state); // ensure reloading\n        if( !(s & BUSY) ) { // no readers, no writers\n            if( CAS(state, WRITER, s)==s )\n                break; // successfully stored writer flag\n            backoff.reset(); // we could be very close to complete op.\n        } else if( !(s & WRITER_PENDING) ) { // no pending writers\n            __TBB_AtomicOR(&state, WRITER_PENDING);\n        }\n    }\n    ITT_NOTIFY(sync_acquired, this);\n    return false;\n}\n\n//! Release writer lock on the given mutex\nvoid spin_rw_mutex_v3::internal_release_writer()\n{\n    ITT_NOTIFY(sync_releasing, this);\n    __TBB_AtomicAND( &state, READERS );\n}\n\n//! Acquire read lock on given mutex.\nvoid spin_rw_mutex_v3::internal_acquire_reader()\n{\n    ITT_NOTIFY(sync_prepare, this);\n    for( internal::atomic_backoff b;;b.pause() ){\n        state_t s = const_cast<volatile state_t&>(state); // ensure reloading\n        if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests\n            state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );\n            if( !( t&WRITER )) \n                break; // successfully stored increased number of readers\n            // writer got there first, undo the increment\n            __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );\n        }\n    }\n\n    ITT_NOTIFY(sync_acquired, this);\n    __TBB_ASSERT( state & READERS, \"invalid state of a read lock: no readers\" );\n}\n\n//! Upgrade reader to become a writer.\n/** Returns whether the upgrade happened without releasing and re-acquiring the lock */\nbool spin_rw_mutex_v3::internal_upgrade()\n{\n    state_t s = state;\n    __TBB_ASSERT( s & READERS, \"invalid state before upgrade: no readers \" );\n    // check and set writer-pending flag\n    // required conditions: either no pending writers, or we are the only reader\n    // (with multiple readers and pending writer, another upgrade could have been requested)\n    while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {\n        state_t old_s = s;\n        if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) {\n            ITT_NOTIFY(sync_prepare, this);\n            internal::atomic_backoff backoff;\n            while( (state & READERS) != ONE_READER ) backoff.pause();\n            __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),\"invalid state when upgrading to writer\");\n            // both new readers and writers are blocked at this time\n            __TBB_FetchAndAddW( &state,  - (intptr_t)(ONE_READER+WRITER_PENDING));\n            ITT_NOTIFY(sync_acquired, this);\n            return true; // successfully upgraded\n        }\n    }\n    // slow reacquire\n    internal_release_reader();\n    return internal_acquire_writer(); // always returns false\n}\n\n//! Downgrade writer to a reader\nvoid spin_rw_mutex_v3::internal_downgrade() {\n    ITT_NOTIFY(sync_releasing, this);\n    __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER));\n    __TBB_ASSERT( state & READERS, \"invalid state after downgrade: no readers\" );\n}\n\n//! Release read lock on the given mutex\nvoid spin_rw_mutex_v3::internal_release_reader()\n{\n    __TBB_ASSERT( state & READERS, \"invalid state of a read lock: no readers\" );\n    ITT_NOTIFY(sync_releasing, this); // release reader\n    __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);\n}\n\n//! Try to acquire write lock on the given mutex\nbool spin_rw_mutex_v3::internal_try_acquire_writer()\n{\n    // for a writer: only possible to acquire if no active readers or writers\n    state_t s = state;\n    if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101\n        if( CAS(state, WRITER, s)==s ) {\n            ITT_NOTIFY(sync_acquired, this);\n            return true; // successfully stored writer flag\n        }\n    return false;\n}\n\n//! Try to acquire read lock on the given mutex\nbool spin_rw_mutex_v3::internal_try_acquire_reader()\n{\n    // for a reader: acquire if no active or waiting writers\n    state_t s = state;\n    if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers\n        state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );\n        if( !( t&WRITER )) {  // got the lock\n            ITT_NOTIFY(sync_acquired, this);\n            return true; // successfully stored increased number of readers\n        }\n        // writer got there first, undo the increment\n        __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );\n    }\n    return false;\n}\n\nvoid spin_rw_mutex_v3::internal_construct() {\n    ITT_SYNC_CREATE(this, _T(\"tbb::spin_rw_mutex\"), _T(\"\"));\n}\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/spin_rw_mutex.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_spin_rw_mutex_H\n#define __TBB_spin_rw_mutex_H\n\n#include \"tbb_stddef.h\"\n#include \"tbb_machine.h\"\n#include \"tbb_profiling.h\"\n#include \"internal/_mutex_padding.h\"\n\nnamespace tbb {\n\n#if __TBB_TSX_AVAILABLE\nnamespace interface8 { namespace internal {\n    class x86_rtm_rw_mutex;\n}}\n#endif\n\nclass spin_rw_mutex_v3;\ntypedef spin_rw_mutex_v3 spin_rw_mutex;\n\n//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference\n/** @ingroup synchronization */\nclass spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled {\n    //! @cond INTERNAL\n\n    //! Internal acquire write lock.\n    bool __TBB_EXPORTED_METHOD internal_acquire_writer();\n\n    //! Out of line code for releasing a write lock.\n    /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */\n    void __TBB_EXPORTED_METHOD internal_release_writer();\n\n    //! Internal acquire read lock.\n    void __TBB_EXPORTED_METHOD internal_acquire_reader();\n\n    //! Internal upgrade reader to become a writer.\n    bool __TBB_EXPORTED_METHOD internal_upgrade();\n\n    //! Out of line code for downgrading a writer to a reader.\n    /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */\n    void __TBB_EXPORTED_METHOD internal_downgrade();\n\n    //! Internal release read lock.\n    void __TBB_EXPORTED_METHOD internal_release_reader();\n\n    //! Internal try_acquire write lock.\n    bool __TBB_EXPORTED_METHOD internal_try_acquire_writer();\n\n    //! Internal try_acquire read lock.\n    bool __TBB_EXPORTED_METHOD internal_try_acquire_reader();\n\n    //! @endcond\npublic:\n    //! Construct unacquired mutex.\n    spin_rw_mutex_v3() : state(0) {\n#if TBB_USE_THREADING_TOOLS\n        internal_construct();\n#endif\n    }\n\n#if TBB_USE_ASSERT\n    //! Destructor asserts if the mutex is acquired, i.e. state is zero.\n    ~spin_rw_mutex_v3() {\n        __TBB_ASSERT( !state, \"destruction of an acquired mutex\");\n    };\n#endif /* TBB_USE_ASSERT */\n\n    //! The scoped locking pattern\n    /** It helps to avoid the common problem of forgetting to release lock.\n        It also nicely provides the \"node\" for queuing locks. */\n    class scoped_lock : internal::no_copy {\n#if __TBB_TSX_AVAILABLE\n        friend class tbb::interface8::internal::x86_rtm_rw_mutex;\n        // helper methods for x86_rtm_rw_mutex\n        spin_rw_mutex *internal_get_mutex() const { return mutex; }\n        void internal_set_mutex(spin_rw_mutex* m) { mutex = m; }\n#endif\n    public:\n        //! Construct lock that has not acquired a mutex.\n        /** Equivalent to zero-initialization of *this. */\n        scoped_lock() : mutex(NULL), is_writer(false) {}\n\n        //! Acquire lock on given mutex.\n        scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) {\n            acquire(m, write);\n        }\n\n        //! Release lock (if lock is held).\n        ~scoped_lock() {\n            if( mutex ) release();\n        }\n\n        //! Acquire lock on given mutex.\n        void acquire( spin_rw_mutex& m, bool write = true ) {\n            __TBB_ASSERT( !mutex, \"holding mutex already\" );\n            is_writer = write;\n            mutex = &m;\n            if( write ) mutex->internal_acquire_writer();\n            else        mutex->internal_acquire_reader();\n        }\n\n        //! Upgrade reader to become a writer.\n        /** Returns whether the upgrade happened without releasing and re-acquiring the lock */\n        bool upgrade_to_writer() {\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            __TBB_ASSERT( !is_writer, \"not a reader\" );\n            is_writer = true;\n            return mutex->internal_upgrade();\n        }\n\n        //! Release lock.\n        void release() {\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            spin_rw_mutex *m = mutex; \n            mutex = NULL;\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            if( is_writer ) m->internal_release_writer();\n            else            m->internal_release_reader();\n#else\n            if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); \n            else            __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER);\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n        }\n\n        //! Downgrade writer to become a reader.\n        bool downgrade_to_reader() {\n            __TBB_ASSERT( mutex, \"lock is not acquired\" );\n            __TBB_ASSERT( is_writer, \"not a writer\" );\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n            mutex->internal_downgrade();\n#else\n            __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER));\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n            is_writer = false;\n            return true;\n        }\n\n        //! Try acquire lock on given mutex.\n        bool try_acquire( spin_rw_mutex& m, bool write = true ) {\n            __TBB_ASSERT( !mutex, \"holding mutex already\" );\n            bool result;\n            is_writer = write; \n            result = write? m.internal_try_acquire_writer()\n                          : m.internal_try_acquire_reader();\n            if( result ) \n                mutex = &m;\n            return result;\n        }\n\n    protected:\n\n        //! The pointer to the current mutex that is held, or NULL if no mutex is held.\n        spin_rw_mutex* mutex;\n\n        //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock.\n        /** Not defined if not holding a lock. */\n        bool is_writer;\n    };\n\n    // Mutex traits\n    static const bool is_rw_mutex = true;\n    static const bool is_recursive_mutex = false;\n    static const bool is_fair_mutex = false;\n\n    // ISO C++0x compatibility methods\n\n    //! Acquire writer lock\n    void lock() {internal_acquire_writer();}\n\n    //! Try acquiring writer lock (non-blocking)\n    /** Return true if lock acquired; false otherwise. */\n    bool try_lock() {return internal_try_acquire_writer();}\n\n    //! Release lock\n    void unlock() {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n        if( state&WRITER ) internal_release_writer();\n        else               internal_release_reader();\n#else\n        if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); \n        else               __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER);\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n    }\n\n    // Methods for reader locks that resemble ISO C++0x compatibility methods.\n\n    //! Acquire reader lock\n    void lock_read() {internal_acquire_reader();}\n\n    //! Try acquiring reader lock (non-blocking)\n    /** Return true if reader lock acquired; false otherwise. */\n    bool try_lock_read() {return internal_try_acquire_reader();}\n\nprotected:\n    typedef intptr_t state_t;\n    static const state_t WRITER = 1;\n    static const state_t WRITER_PENDING = 2;\n    static const state_t READERS = ~(WRITER | WRITER_PENDING);\n    static const state_t ONE_READER = 4;\n    static const state_t BUSY = WRITER | READERS;\n    //! State of lock\n    /** Bit 0 = writer is holding lock\n        Bit 1 = request by a writer to acquire lock (hint to readers to wait)\n        Bit 2..N = number of readers holding lock */\n    state_t state;\n\nprivate:\n    void __TBB_EXPORTED_METHOD internal_construct();\n};\n\n__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex)\n\n} // namespace tbb\n\n#if __TBB_TSX_AVAILABLE\n#include \"internal/_x86_rtm_rw_mutex_impl.h\"\n#endif\n\nnamespace tbb {\nnamespace interface8 {\n//! A cross-platform spin reader/writer mutex with speculative lock acquisition.\n/** On platforms with proper HW support, this lock may speculatively execute\n    its critical sections, using HW mechanisms to detect real data races and\n    ensure atomicity of the critical sections. In particular, it uses\n    Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).\n    Without such HW support, it behaves like a spin_rw_mutex.\n    It should be used for locking short critical sections where the lock is \n    contended but the data it protects are not.\n    @ingroup synchronization */\n#if __TBB_TSX_AVAILABLE\ntypedef interface7::internal::padded_mutex<tbb::interface8::internal::x86_rtm_rw_mutex,true> speculative_spin_rw_mutex;\n#else\ntypedef interface7::internal::padded_mutex<tbb::spin_rw_mutex,true> speculative_spin_rw_mutex;\n#endif\n}  // namespace interface8\n\nusing interface8::speculative_spin_rw_mutex;\n__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_rw_mutex)\n} // namespace tbb\n#endif /* __TBB_spin_rw_mutex_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// Do not include task.h directly. Use scheduler_common.h instead\n#include \"scheduler_common.h\"\n#include \"governor.h\"\n#include \"scheduler.h\"\n#include \"itt_notify.h\"\n\n#include \"tbb/cache_aligned_allocator.h\"\n#include \"tbb/partitioner.h\"\n\n#include <new>\n\nnamespace tbb {\n\nusing namespace std;\n\nnamespace internal {\n\n//------------------------------------------------------------------------\n// Methods of allocate_root_proxy\n//------------------------------------------------------------------------\ntask& allocate_root_proxy::allocate( size_t size ) {\n    internal::generic_scheduler* v = governor::local_scheduler();\n    __TBB_ASSERT( v, \"thread did not activate a task_scheduler_init object?\" );\n#if __TBB_TASK_GROUP_CONTEXT\n    task_prefix& p = v->my_innermost_running_task->prefix();\n\n    ITT_STACK_CREATE(p.context->itt_caller);\n#endif\n    // New root task becomes part of the currently running task's cancellation context\n    return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) );\n}\n\nvoid allocate_root_proxy::free( task& task ) {\n    internal::generic_scheduler* v = governor::local_scheduler();\n    __TBB_ASSERT( v, \"thread does not have initialized task_scheduler_init object?\" );\n#if __TBB_TASK_GROUP_CONTEXT\n    // No need to do anything here as long as there is no context -> task connection\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    v->free_task<local_task>( task );\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\n//------------------------------------------------------------------------\n// Methods of allocate_root_with_context_proxy\n//------------------------------------------------------------------------\ntask& allocate_root_with_context_proxy::allocate( size_t size ) const {\n    internal::generic_scheduler* s = governor::local_scheduler();\n    __TBB_ASSERT( s, \"Scheduler auto-initialization failed?\" );\n    task& t = s->allocate_task( size, NULL, &my_context );\n    // Supported usage model prohibits concurrent initial binding. Thus we do not\n    // need interlocked operations or fences to manipulate with my_context.my_kind\n    if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::binding_required ) {\n        // If we are in the outermost task dispatch loop of a master thread, then\n        // there is nothing to bind this context to, and we skip the binding part\n        // treating the context as isolated.\n        if ( s->my_innermost_running_task == s->my_dummy_task )\n            __TBB_store_relaxed(my_context.my_kind, task_group_context::isolated);\n        else\n            my_context.bind_to( s );\n    }\n#if __TBB_FP_CONTEXT\n    if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::isolated &&\n            !(my_context.my_version_and_traits & task_group_context::fp_settings) )\n        my_context.copy_fp_settings( *s->my_arena->my_default_ctx );\n#endif\n    ITT_STACK_CREATE(my_context.itt_caller);\n    return t;\n}\n\nvoid allocate_root_with_context_proxy::free( task& task ) const {\n    internal::generic_scheduler* v = governor::local_scheduler();\n    __TBB_ASSERT( v, \"thread does not have initialized task_scheduler_init object?\" );\n    // No need to do anything here as long as unbinding is performed by context destructor only.\n    v->free_task<local_task>( task );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//------------------------------------------------------------------------\n// Methods of allocate_continuation_proxy\n//------------------------------------------------------------------------\ntask& allocate_continuation_proxy::allocate( size_t size ) const {\n    task& t = *((task*)this);\n    assert_task_valid(t);\n    generic_scheduler* s = governor::local_scheduler();\n    task* parent = t.parent();\n    t.prefix().parent = NULL;\n    return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t.prefix().context) );\n}\n\nvoid allocate_continuation_proxy::free( task& mytask ) const {\n    // Restore the parent as it was before the corresponding allocate was called.\n    ((task*)this)->prefix().parent = mytask.parent();\n    governor::local_scheduler()->free_task<local_task>(mytask);\n}\n\n//------------------------------------------------------------------------\n// Methods of allocate_child_proxy\n//------------------------------------------------------------------------\ntask& allocate_child_proxy::allocate( size_t size ) const {\n    task& t = *((task*)this);\n    assert_task_valid(t);\n    generic_scheduler* s = governor::local_scheduler();\n    return s->allocate_task( size, __TBB_CONTEXT_ARG(&t, t.prefix().context) );\n}\n\nvoid allocate_child_proxy::free( task& mytask ) const {\n    governor::local_scheduler()->free_task<local_task>(mytask);\n}\n\n//------------------------------------------------------------------------\n// Methods of allocate_additional_child_of_proxy\n//------------------------------------------------------------------------\ntask& allocate_additional_child_of_proxy::allocate( size_t size ) const {\n    parent.increment_ref_count();\n    generic_scheduler* s = governor::local_scheduler();\n    return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) );\n}\n\nvoid allocate_additional_child_of_proxy::free( task& task ) const {\n    // Undo the increment.  We do not check the result of the fetch-and-decrement.\n    // We could consider be spawning the task if the fetch-and-decrement returns 1.\n    // But we do not know that was the programmer's intention.\n    // Furthermore, if it was the programmer's intention, the program has a fundamental\n    // race condition (that we warn about in Reference manual), because the\n    // reference count might have become zero before the corresponding call to\n    // allocate_additional_child_of_proxy::allocate.\n    parent.internal_decrement_ref_count();\n    governor::local_scheduler()->free_task<local_task>(task);\n}\n\n//------------------------------------------------------------------------\n// Support for auto_partitioner\n//------------------------------------------------------------------------\nsize_t get_initial_auto_partitioner_divisor() {\n    const size_t X_FACTOR = 4;\n    return X_FACTOR * (1+governor::local_scheduler()->number_of_workers_in_my_arena());\n}\n\n//------------------------------------------------------------------------\n// Methods of affinity_partitioner_base_v3\n//------------------------------------------------------------------------\nvoid affinity_partitioner_base_v3::resize( unsigned factor ) {\n    // Check factor to avoid asking for number of workers while there might be no arena.\n    size_t new_size = factor ? factor*(1+governor::local_scheduler()->number_of_workers_in_my_arena()) : 0;\n    if( new_size!=my_size ) {\n        if( my_array ) {\n            NFS_Free( my_array );\n            // Following two assignments must be done here for sake of exception safety.\n            my_array = NULL;\n            my_size = 0;\n        }\n        if( new_size ) {\n            my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL ));\n            memset( my_array, 0, sizeof(affinity_id)*new_size );\n            my_size = new_size;\n        }\n    }\n}\n\n} // namespace internal\n\nusing namespace tbb::internal;\n\n//------------------------------------------------------------------------\n// task\n//------------------------------------------------------------------------\n\nvoid task::internal_set_ref_count( int count ) {\n    __TBB_ASSERT( count>=0, \"count must not be negative\" );\n    task_prefix &p = prefix();\n    __TBB_ASSERT(p.ref_count==1 && p.state==allocated && self().parent()==this\n        || !(p.extra_state & es_ref_count_active), \"ref_count race detected\");\n    ITT_NOTIFY(sync_releasing, &p.ref_count);\n    p.ref_count = count;\n}\n\ninternal::reference_count task::internal_decrement_ref_count() {\n    ITT_NOTIFY( sync_releasing, &prefix().ref_count );\n    internal::reference_count k = __TBB_FetchAndDecrementWrelease( &prefix().ref_count );\n    __TBB_ASSERT( k>=1, \"task's reference count underflowed\" );\n    if( k==1 )\n        ITT_NOTIFY( sync_acquired, &prefix().ref_count );\n    return k-1;\n}\n\ntask& task::self() {\n    generic_scheduler *v = governor::local_scheduler();\n    v->assert_task_pool_valid();\n    __TBB_ASSERT( v->my_innermost_running_task, NULL );\n    return *v->my_innermost_running_task;\n}\n\nbool task::is_owned_by_current_thread() const {\n    return true;\n}\n\nvoid interface5::internal::task_base::destroy( task& victim ) {\n    // 1 may be a guard reference for wait_for_all, which was not reset because\n    // of concurrent_wait mode or because prepared root task was not actually used\n    // for spawning tasks (as in structured_task_group).\n    __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, \"Task being destroyed must not have children\" );\n    __TBB_ASSERT( victim.state()==task::allocated, \"illegal state for victim task\" );\n    task* parent = victim.parent();\n    victim.~task();\n    if( parent ) {\n        __TBB_ASSERT( parent->state()!=task::freed && parent->state()!=task::ready,\n                      \"attempt to destroy child of running or corrupted parent?\" );\n        // 'reexecute' and 'executing' are also signs of a race condition, since most tasks\n        // set their ref_count upon entry but \"es_ref_count_active\" should detect this\n        parent->internal_decrement_ref_count();\n        // Even if the last reference to *parent is removed, it should not be spawned (documented behavior).\n    }\n    governor::local_scheduler()->free_task<no_cache>( victim );\n}\n\nvoid task::spawn_and_wait_for_all( task_list& list ) {\n    generic_scheduler* s = governor::local_scheduler();\n    task* t = list.first;\n    if( t ) {\n        if( &t->prefix().next!=list.next_ptr )\n            s->local_spawn( *t->prefix().next, *list.next_ptr );\n        list.clear();\n    }\n    s->local_wait_for_all( *this, t );\n}\n\n/** Defined out of line so that compiler does not replicate task's vtable.\n    It's pointless to define it inline anyway, because all call sites to it are virtual calls\n    that the compiler is unlikely to optimize. */\nvoid task::note_affinity( affinity_id ) {\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\nvoid task::change_group ( task_group_context& ctx ) {\n    prefix().context = &ctx;\n    internal::generic_scheduler* s = governor::local_scheduler();\n    if ( __TBB_load_relaxed(ctx.my_kind) == task_group_context::binding_required ) {\n        // If we are in the outermost task dispatch loop of a master thread, then\n        // there is nothing to bind this context to, and we skip the binding part\n        // treating the context as isolated.\n        if ( s->my_innermost_running_task == s->my_dummy_task )\n            __TBB_store_relaxed(ctx.my_kind, task_group_context::isolated);\n        else\n            ctx.bind_to( s );\n    }\n#if __TBB_FP_CONTEXT\n    if ( __TBB_load_relaxed(ctx.my_kind) == task_group_context::isolated &&\n            !(ctx.my_version_and_traits & task_group_context::fp_settings) )\n        ctx.copy_fp_settings( *s->my_arena->my_default_ctx );\n#endif\n    ITT_STACK_CREATE(ctx.itt_caller);\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n} // namespace tbb\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_task_H\n#define __TBB_task_H\n\n#include \"tbb_stddef.h\"\n#include \"tbb_machine.h\"\n#include <climits>\n\ntypedef struct ___itt_caller *__itt_caller;\n\nnamespace tbb {\n\nclass task;\nclass task_list;\nclass task_group_context;\n\n// MSVC does not allow taking the address of a member that was defined\n// privately in task_base and made public in class task via a using declaration.\n#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)\n#define __TBB_TASK_BASE_ACCESS public\n#else\n#define __TBB_TASK_BASE_ACCESS private\n#endif\n\nnamespace internal { //< @cond INTERNAL\n\n    class allocate_additional_child_of_proxy: no_assign {\n        //! No longer used, but retained for binary layout compatibility.  Always NULL.\n        task* self;\n        task& parent;\n    public:\n        explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}\n        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;\n        void __TBB_EXPORTED_METHOD free( task& ) const;\n    };\n\n    struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };\n} //< namespace internal @endcond\n\nnamespace interface5 {\n    namespace internal {\n        //! Base class for methods that became static in TBB 3.0.\n        /** TBB's evolution caused the \"this\" argument for several methods to become obsolete.\n            However, for backwards binary compatibility, the new methods need distinct names,\n            otherwise the One Definition Rule would be broken.  Hence the new methods are\n            defined in this private base class, and then exposed in class task via\n            using declarations. */\n        class task_base: tbb::internal::no_copy {\n        __TBB_TASK_BASE_ACCESS:\n            friend class tbb::task;\n\n            //! Schedule task for execution when a worker becomes available.\n            static void spawn( task& t );\n\n            //! Spawn multiple tasks and clear list.\n            static void spawn( task_list& list );\n\n            //! Like allocate_child, except that task's parent becomes \"t\", not this.\n            /** Typically used in conjunction with schedule_to_reexecute to implement while loops.\n               Atomically increments the reference count of t.parent() */\n            static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {\n                return tbb::internal::allocate_additional_child_of_proxy(t);\n            }\n\n            //! Destroy a task.\n            /** Usually, calling this method is unnecessary, because a task is\n                implicitly deleted after its execute() method runs.  However,\n                sometimes a task needs to be explicitly deallocated, such as\n                when a root task is used as the parent in spawn_and_wait_for_all. */\n            static void __TBB_EXPORTED_FUNC destroy( task& victim );\n        };\n    } // internal\n} // interface5\n\n//! @cond INTERNAL\nnamespace internal {\n\n    class scheduler: no_copy {\n    public:\n        //! For internal use only\n        virtual void spawn( task& first, task*& next ) = 0;\n\n        //! For internal use only\n        virtual void wait_for_all( task& parent, task* child ) = 0;\n\n        //! For internal use only\n        virtual void spawn_root_and_wait( task& first, task*& next ) = 0;\n\n        //! Pure virtual destructor;\n        //  Have to have it just to shut up overzealous compilation warnings\n        virtual ~scheduler() = 0;\n\n        //! For internal use only\n        virtual void enqueue( task& t, void* reserved ) = 0;\n    };\n\n    //! A reference count\n    /** Should always be non-negative.  A signed type is used so that underflow can be detected. */\n    typedef intptr_t reference_count;\n\n    //! An id as used for specifying affinity.\n    typedef unsigned short affinity_id;\n\n#if __TBB_TASK_GROUP_CONTEXT\n    class generic_scheduler;\n\n    struct context_list_node_t {\n        context_list_node_t *my_prev,\n                            *my_next;\n    };\n\n    class allocate_root_with_context_proxy: no_assign {\n        task_group_context& my_context;\n    public:\n        allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}\n        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;\n        void __TBB_EXPORTED_METHOD free( task& ) const;\n    };\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    class allocate_root_proxy: no_assign {\n    public:\n        static task& __TBB_EXPORTED_FUNC allocate( size_t size );\n        static void __TBB_EXPORTED_FUNC free( task& );\n    };\n\n    class allocate_continuation_proxy: no_assign {\n    public:\n        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;\n        void __TBB_EXPORTED_METHOD free( task& ) const;\n    };\n\n    class allocate_child_proxy: no_assign {\n    public:\n        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;\n        void __TBB_EXPORTED_METHOD free( task& ) const;\n    };\n\n    //! Memory prefix to a task object.\n    /** This class is internal to the library.\n        Do not reference it directly, except within the library itself.\n        Fields are ordered in way that preserves backwards compatibility and yields\n        good packing on typical 32-bit and 64-bit platforms.\n\n        In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64\n        architectures correspondingly, consider dynamic setting of task_alignment\n        and task_prefix_reservation_size based on the maximal operand size supported\n        by the current CPU.\n\n        @ingroup task_scheduling */\n    class task_prefix {\n    private:\n        friend class tbb::task;\n        friend class tbb::interface5::internal::task_base;\n        friend class tbb::task_list;\n        friend class internal::scheduler;\n        friend class internal::allocate_root_proxy;\n        friend class internal::allocate_child_proxy;\n        friend class internal::allocate_continuation_proxy;\n        friend class internal::allocate_additional_child_of_proxy;\n\n#if __TBB_TASK_GROUP_CONTEXT\n        //! Shared context that is used to communicate asynchronous state changes\n        /** Currently it is used to broadcast cancellation requests generated both\n            by users and as the result of unhandled exceptions in the task::execute()\n            methods. */\n        task_group_context  *context;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n        //! The scheduler that allocated the task, or NULL if the task is big.\n        /** Small tasks are pooled by the scheduler that allocated the task.\n            If a scheduler needs to free a small task allocated by another scheduler,\n            it returns the task to that other scheduler.  This policy avoids\n            memory space blowup issues for memory allocators that allocate from\n            thread-specific pools. */\n        scheduler* origin;\n\n#if __TBB_TASK_PRIORITY\n        union {\n#endif /* __TBB_TASK_PRIORITY */\n        //! Obsolete. The scheduler that owns the task.\n        /** Retained only for the sake of backward binary compatibility.\n            Still used by inline methods in the task.h header. **/\n        scheduler* owner;\n\n#if __TBB_TASK_PRIORITY\n        //! Pointer to the next offloaded lower priority task.\n        /** Used to maintain a list of offloaded tasks inside the scheduler. **/\n        task* next_offloaded;\n        };\n#endif /* __TBB_TASK_PRIORITY */\n\n        //! The task whose reference count includes me.\n        /** In the \"blocking style\" of programming, this field points to the parent task.\n            In the \"continuation-passing style\" of programming, this field points to the\n            continuation of the parent. */\n        tbb::task* parent;\n\n        //! Reference count used for synchronization.\n        /** In the \"continuation-passing style\" of programming, this field is\n            the difference of the number of allocated children minus the\n            number of children that have completed.\n            In the \"blocking style\" of programming, this field is one more than the difference. */\n        __TBB_atomic reference_count ref_count;\n\n        //! Obsolete. Used to be scheduling depth before TBB 2.2\n        /** Retained only for the sake of backward binary compatibility.\n            Not used by TBB anymore. **/\n        int depth;\n\n        //! A task::state_type, stored as a byte for compactness.\n        /** This state is exposed to users via method task::state(). */\n        unsigned char state;\n\n        //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.\n        /** 0x0 -> version 1.0 task\n            0x1 -> version >=2.1 task\n            0x10 -> task was enqueued\n            0x20 -> task_proxy\n            0x40 -> task has live ref_count\n            0x80 -> a stolen task */\n        unsigned char extra_state;\n\n        affinity_id affinity;\n\n        //! \"next\" field for list of task\n        tbb::task* next;\n\n        //! The task corresponding to this task_prefix.\n        tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}\n    };\n\n} // namespace internal\n//! @endcond\n\n#if __TBB_TASK_GROUP_CONTEXT\n\n#if __TBB_TASK_PRIORITY\nnamespace internal {\n    static const int priority_stride_v4 = INT_MAX / 4;\n}\n\nenum priority_t {\n    priority_normal = internal::priority_stride_v4 * 2,\n    priority_low = priority_normal - internal::priority_stride_v4,\n    priority_high = priority_normal + internal::priority_stride_v4\n};\n\n#endif /* __TBB_TASK_PRIORITY */\n\n#if TBB_USE_CAPTURED_EXCEPTION\n    class tbb_exception;\n#else\n    namespace internal {\n        class tbb_exception_ptr;\n    }\n#endif /* !TBB_USE_CAPTURED_EXCEPTION */\n\nclass task_scheduler_init;\nnamespace interface7 { class task_arena; }\n\n//! Used to form groups of tasks\n/** @ingroup task_scheduling\n    The context services explicit cancellation requests from user code, and unhandled\n    exceptions intercepted during tasks execution. Intercepting an exception results\n    in generating internal cancellation requests (which is processed in exactly the\n    same way as external ones).\n\n    The context is associated with one or more root tasks and defines the cancellation\n    group that includes all the descendants of the corresponding root task(s). Association\n    is established when a context object is passed as an argument to the task::allocate_root()\n    method. See task_group_context::task_group_context for more details.\n\n    The context can be bound to another one, and other contexts can be bound to it,\n    forming a tree-like structure: parent -> this -> children. Arrows here designate\n    cancellation propagation direction. If a task in a cancellation group is cancelled\n    all the other tasks in this group and groups bound to it (as children) get cancelled too.\n\n    IMPLEMENTATION NOTE:\n    When adding new members to task_group_context or changing types of existing ones,\n    update the size of both padding buffers (_leading_padding and _trailing_padding)\n    appropriately. See also VERSIONING NOTE at the constructor definition below. **/\nclass task_group_context : internal::no_copy {\nprivate:\n    friend class internal::generic_scheduler;\n    friend class task_scheduler_init;\n    friend class interface7::task_arena;\n\n#if TBB_USE_CAPTURED_EXCEPTION\n    typedef tbb_exception exception_container_type;\n#else\n    typedef internal::tbb_exception_ptr exception_container_type;\n#endif\n\n    enum version_traits_word_layout {\n        traits_offset = 16,\n        version_mask = 0xFFFF,\n        traits_mask = 0xFFFFul << traits_offset\n    };\n\npublic:\n    enum kind_type {\n        isolated,\n        bound\n    };\n\n    enum traits_type {\n        exact_exception = 0x0001ul << traits_offset,\n#if __TBB_FP_CONTEXT\n        fp_settings     = 0x0002ul << traits_offset,\n#endif\n        concurrent_wait = 0x0004ul << traits_offset,\n#if TBB_USE_CAPTURED_EXCEPTION\n        default_traits = 0\n#else\n        default_traits = exact_exception\n#endif /* !TBB_USE_CAPTURED_EXCEPTION */\n    };\n\nprivate:\n    enum state {\n        may_have_children = 1,\n        // the following enumerations must be the last, new 2^x values must go above\n        next_state_value, low_unused_state_bit = (next_state_value-1)*2\n    };\n\n    union {\n        //! Flavor of this context: bound or isolated.\n        // TODO: describe asynchronous use, and whether any memory semantics are needed\n        __TBB_atomic kind_type my_kind;\n        uintptr_t _my_kind_aligner;\n    };\n\n    //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.\n    task_group_context *my_parent;\n\n    //! Used to form the thread specific list of contexts without additional memory allocation.\n    /** A context is included into the list of the current thread when its binding to\n        its parent happens. Any context can be present in the list of one thread only. **/\n    internal::context_list_node_t my_node;\n\n    //! Used to set and maintain stack stitching point for Intel Performance Tools.\n    __itt_caller itt_caller;\n\n    //! Leading padding protecting accesses to frequently used members from false sharing.\n    /** Read accesses to the field my_cancellation_requested are on the hot path inside\n        the scheduler. This padding ensures that this field never shares the same cache\n        line with a local variable that is frequently written to. **/\n    char _leading_padding[internal::NFS_MaxLineSize\n                          - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)\n                          - sizeof(__itt_caller)\n#if __TBB_FP_CONTEXT\n                          - sizeof(internal::cpu_ctl_env_space)\n#endif\n                         ];\n\n#if __TBB_FP_CONTEXT\n    //! Space for platform-specific FPU settings.\n    /** Must only be accessed inside TBB binaries, and never directly in user\n        code or inline methods. */\n    internal::cpu_ctl_env_space my_cpu_ctl_env;\n#endif\n\n    //! Specifies whether cancellation was requested for this task group.\n    uintptr_t my_cancellation_requested;\n\n    //! Version for run-time checks and behavioral traits of the context.\n    /** Version occupies low 16 bits, and traits (zero or more ORed enumerators\n        from the traits_type enumerations) take the next 16 bits.\n        Original (zeroth) version of the context did not support any traits. **/\n    uintptr_t my_version_and_traits;\n\n    //! Pointer to the container storing exception being propagated across this task group.\n    exception_container_type *my_exception;\n\n    //! Scheduler instance that registered this context in its thread specific list.\n    internal::generic_scheduler *my_owner;\n\n    //! Internal state (combination of state flags, currently only may_have_children).\n    uintptr_t my_state;\n\n#if __TBB_TASK_PRIORITY\n    //! Priority level of the task group (in normalized representation)\n    intptr_t my_priority;\n#endif /* __TBB_TASK_PRIORITY */\n\n    //! Trailing padding protecting accesses to frequently used members from false sharing\n    /** \\sa _leading_padding **/\n    char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)\n#if __TBB_TASK_PRIORITY\n                            - sizeof(intptr_t)\n#endif /* __TBB_TASK_PRIORITY */\n                          ];\n\npublic:\n    //! Default & binding constructor.\n    /** By default a bound context is created. That is this context will be bound\n        (as child) to the context of the task calling task::allocate_root(this_context)\n        method. Cancellation requests passed to the parent context are propagated\n        to all the contexts bound to it. Similarly priority change is propagated\n        from the parent context to its children.\n\n        If task_group_context::isolated is used as the argument, then the tasks associated\n        with this context will never be affected by events in any other context.\n\n        Creating isolated contexts involve much less overhead, but they have limited\n        utility. Normally when an exception occurs in an algorithm that has nested\n        ones running, it is desirably to have all the nested algorithms cancelled\n        as well. Such a behavior requires nested algorithms to use bound contexts.\n\n        There is one good place where using isolated algorithms is beneficial. It is\n        a master thread. That is if a particular algorithm is invoked directly from\n        the master thread (not from a TBB task), supplying it with explicitly\n        created isolated context will result in a faster algorithm startup.\n\n        VERSIONING NOTE:\n        Implementation(s) of task_group_context constructor(s) cannot be made\n        entirely out-of-line because the run-time version must be set by the user\n        code. This will become critically important for binary compatibility, if\n        we ever have to change the size of the context object.\n\n        Boosting the runtime version will also be necessary if new data fields are\n        introduced in the currently unused padding areas and these fields are updated\n        by inline methods. **/\n    task_group_context ( kind_type relation_with_parent = bound,\n                         uintptr_t traits = default_traits )\n        : my_kind(relation_with_parent)\n        , my_version_and_traits(2 | traits)\n    {\n        init();\n    }\n\n    // Do not introduce standalone unbind method since it will break state propagation assumptions\n    __TBB_EXPORTED_METHOD ~task_group_context ();\n\n    //! Forcefully reinitializes the context after the task tree it was associated with is completed.\n    /** Because the method assumes that all the tasks that used to be associated with\n        this context have already finished, calling it while the context is still\n        in use somewhere in the task hierarchy leads to undefined behavior.\n\n        IMPORTANT: This method is not thread safe!\n\n        The method does not change the context's parent if it is set. **/\n    void __TBB_EXPORTED_METHOD reset ();\n\n    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.\n    /** \\return false if cancellation has already been requested, true otherwise.\n\n        Note that canceling never fails. When false is returned, it just means that\n        another thread (or this one) has already sent cancellation request to this\n        context or to one of its ancestors (if this context is bound). It is guaranteed\n        that when this method is concurrently called on the same not yet cancelled\n        context, true will be returned by one and only one invocation. **/\n    bool __TBB_EXPORTED_METHOD cancel_group_execution ();\n\n    //! Returns true if the context received cancellation request.\n    bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;\n\n    //! Records the pending exception, and cancels the task group.\n    /** May be called only from inside a catch-block. If the context is already\n        cancelled, does nothing.\n        The method brings the task group associated with this context exactly into\n        the state it would be in, if one of its tasks threw the currently pending\n        exception during its execution. In other words, it emulates the actions\n        of the scheduler's dispatch loop exception handler. **/\n    void __TBB_EXPORTED_METHOD register_pending_exception ();\n\n#if __TBB_FP_CONTEXT\n    //! Captures the current FPU control settings to the context.\n    /** Because the method assumes that all the tasks that used to be associated with\n        this context have already finished, calling it while the context is still\n        in use somewhere in the task hierarchy leads to undefined behavior.\n\n        IMPORTANT: This method is not thread safe!\n\n        The method does not change the FPU control settings of the context's parent. **/\n    void __TBB_EXPORTED_METHOD capture_fp_settings ();\n#endif\n\n#if __TBB_TASK_PRIORITY\n    //! Changes priority of the task group\n    void set_priority ( priority_t );\n\n    //! Retrieves current priority of the current task group\n    priority_t priority () const;\n#endif /* __TBB_TASK_PRIORITY */\n\nprotected:\n    //! Out-of-line part of the constructor.\n    /** Singled out to ensure backward binary compatibility of the future versions. **/\n    void __TBB_EXPORTED_METHOD init ();\n\nprivate:\n    friend class task;\n    friend class internal::allocate_root_with_context_proxy;\n\n    static const kind_type binding_required = bound;\n    static const kind_type binding_completed = kind_type(bound+1);\n    static const kind_type detached = kind_type(binding_completed+1);\n    static const kind_type dying = kind_type(detached+1);\n\n    //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line.\n    template <typename T>\n    void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );\n\n    //! Registers this context with the local scheduler and binds it to its parent context\n    void bind_to ( internal::generic_scheduler *local_sched );\n\n    //! Registers this context with the local scheduler\n    void register_with ( internal::generic_scheduler *local_sched );\n\n#if __TBB_FP_CONTEXT\n    //! Copies FPU control setting from another context\n    // TODO: Consider adding #else stub in order to omit #if sections in other code\n    void copy_fp_settings( const task_group_context &src );\n#endif /* __TBB_FP_CONTEXT */\n}; // class task_group_context\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n//! Base class for user-defined tasks.\n/** @ingroup task_scheduling */\nclass task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {\n\n    //! Set reference count\n    void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );\n\n    //! Decrement reference count and return its new value.\n    internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();\n\nprotected:\n    //! Default constructor.\n    task() {prefix().extra_state=1;}\n\npublic:\n    //! Destructor.\n    virtual ~task() {}\n\n    //! Should be overridden by derived classes.\n    virtual task* execute() = 0;\n\n    //! Enumeration of task states that the scheduler considers.\n    enum state_type {\n        //! task is running, and will be destroyed after method execute() completes.\n        executing,\n        //! task to be rescheduled.\n        reexecute,\n        //! task is in ready pool, or is going to be put there, or was just taken off.\n        ready,\n        //! task object is freshly allocated or recycled.\n        allocated,\n        //! task object is on free list, or is going to be put there, or was just taken off.\n        freed,\n        //! task to be recycled as continuation\n        recycle\n#if __TBB_RECYCLE_TO_ENQUEUE\n        //! task to be scheduled for starvation-resistant execution\n        ,to_enqueue\n#endif\n    };\n\n    //------------------------------------------------------------------------\n    // Allocating tasks\n    //------------------------------------------------------------------------\n\n    //! Returns proxy for overloaded new that allocates a root task.\n    static internal::allocate_root_proxy allocate_root() {\n        return internal::allocate_root_proxy();\n    }\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.\n    static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {\n        return internal::allocate_root_with_context_proxy(ctx);\n    }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    //! Returns proxy for overloaded new that allocates a continuation task of *this.\n    /** The continuation's parent becomes the parent of *this. */\n    internal::allocate_continuation_proxy& allocate_continuation() {\n        return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);\n    }\n\n    //! Returns proxy for overloaded new that allocates a child task of *this.\n    internal::allocate_child_proxy& allocate_child() {\n        return *reinterpret_cast<internal::allocate_child_proxy*>(this);\n    }\n\n    //! Define recommended static form via import from base class.\n    using task_base::allocate_additional_child_of;\n\n#if __TBB_DEPRECATED_TASK_INTERFACE\n    //! Destroy a task.\n    /** Usually, calling this method is unnecessary, because a task is\n        implicitly deleted after its execute() method runs.  However,\n        sometimes a task needs to be explicitly deallocated, such as\n        when a root task is used as the parent in spawn_and_wait_for_all. */\n    void __TBB_EXPORTED_METHOD destroy( task& t );\n#else /* !__TBB_DEPRECATED_TASK_INTERFACE */\n    //! Define recommended static form via import from base class.\n    using task_base::destroy;\n#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */\n\n    //------------------------------------------------------------------------\n    // Recycling of tasks\n    //------------------------------------------------------------------------\n\n    //! Change this to be a continuation of its former self.\n    /** The caller must guarantee that the task's refcount does not become zero until\n        after the method execute() returns.  Typically, this is done by having\n        method execute() return a pointer to a child of the task.  If the guarantee\n        cannot be made, use method recycle_as_safe_continuation instead.\n\n        Because of the hazard, this method may be deprecated in the future. */\n    void recycle_as_continuation() {\n        __TBB_ASSERT( prefix().state==executing, \"execute not running?\" );\n        prefix().state = allocated;\n    }\n\n    //! Recommended to use, safe variant of recycle_as_continuation\n    /** For safety, it requires additional increment of ref_count.\n        With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */\n    void recycle_as_safe_continuation() {\n        __TBB_ASSERT( prefix().state==executing, \"execute not running?\" );\n        prefix().state = recycle;\n    }\n\n    //! Change this to be a child of new_parent.\n    void recycle_as_child_of( task& new_parent ) {\n        internal::task_prefix& p = prefix();\n        __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, \"execute not running, or already recycled\" );\n        __TBB_ASSERT( prefix().ref_count==0, \"no child tasks allowed when recycled as a child\" );\n        __TBB_ASSERT( p.parent==NULL, \"parent must be null\" );\n        __TBB_ASSERT( new_parent.prefix().state<=recycle, \"corrupt parent's state\" );\n        __TBB_ASSERT( new_parent.prefix().state!=freed, \"parent already freed\" );\n        p.state = allocated;\n        p.parent = &new_parent;\n#if __TBB_TASK_GROUP_CONTEXT\n        p.context = new_parent.prefix().context;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    }\n\n    //! Schedule this for reexecution after current execute() returns.\n    /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */\n    void recycle_to_reexecute() {\n        __TBB_ASSERT( prefix().state==executing, \"execute not running, or already recycled\" );\n        __TBB_ASSERT( prefix().ref_count==0, \"no child tasks allowed when recycled for reexecution\" );\n        prefix().state = reexecute;\n    }\n\n#if __TBB_RECYCLE_TO_ENQUEUE\n    //! Schedule this to enqueue after descendant tasks complete.\n    /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */\n    void recycle_to_enqueue() {\n        __TBB_ASSERT( prefix().state==executing, \"execute not running, or already recycled\" );\n        prefix().state = to_enqueue;\n    }\n#endif /* __TBB_RECYCLE_TO_ENQUEUE */\n\n    //------------------------------------------------------------------------\n    // Spawning and blocking\n    //------------------------------------------------------------------------\n\n    //! Set reference count\n    void set_ref_count( int count ) {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n        internal_set_ref_count(count);\n#else\n        prefix().ref_count = count;\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n    }\n\n    //! Atomically increment reference count and returns its old value.\n    /** Has acquire semantics */\n    void increment_ref_count() {\n        __TBB_FetchAndIncrementWacquire( &prefix().ref_count );\n    }\n\n    //! Atomically decrement reference count and returns its new value.\n    /** Has release semantics. */\n    int decrement_ref_count() {\n#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT\n        return int(internal_decrement_ref_count());\n#else\n        return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;\n#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */\n    }\n\n    //! Define recommended static forms via import from base class.\n    using task_base::spawn;\n\n    //! Similar to spawn followed by wait_for_all, but more efficient.\n    void spawn_and_wait_for_all( task& child ) {\n        prefix().owner->wait_for_all( *this, &child );\n    }\n\n    //! Similar to spawn followed by wait_for_all, but more efficient.\n    void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );\n\n    //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.\n    static void spawn_root_and_wait( task& root ) {\n        root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );\n    }\n\n    //! Spawn root tasks on list and wait for all of them to finish.\n    /** If there are more tasks than worker threads, the tasks are spawned in\n        order of front to back. */\n    static void spawn_root_and_wait( task_list& root_list );\n\n    //! Wait for reference count to become one, and set reference count to zero.\n    /** Works on tasks while waiting. */\n    void wait_for_all() {\n        prefix().owner->wait_for_all( *this, NULL );\n    }\n\n    //! Enqueue task for starvation-resistant execution.\n#if __TBB_TASK_PRIORITY\n    /** The task will be enqueued on the normal priority level disregarding the\n        priority of its task group.\n\n        The rationale of such semantics is that priority of an enqueued task is\n        statically fixed at the moment of its enqueuing, while task group priority\n        is dynamic. Thus automatic priority inheritance would be generally a subject\n        to the race, which may result in unexpected behavior.\n\n        Use enqueue() overload with explicit priority value and task::group_priority()\n        method to implement such priority inheritance when it is really necessary. **/\n#endif /* __TBB_TASK_PRIORITY */\n    static void enqueue( task& t ) {\n        t.prefix().owner->enqueue( t, NULL );\n    }\n\n#if __TBB_TASK_PRIORITY\n    //! Enqueue task for starvation-resistant execution on the specified priority level.\n    static void enqueue( task& t, priority_t p ) {\n        __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, \"Invalid priority level value\" );\n        t.prefix().owner->enqueue( t, (void*)p );\n    }\n#endif /* __TBB_TASK_PRIORITY */\n\n    //! The innermost task being executed or destroyed by the current thread at the moment.\n    static task& __TBB_EXPORTED_FUNC self();\n\n    //! task on whose behalf this task is working, or NULL if this is a root.\n    task* parent() const {return prefix().parent;}\n\n    //! sets parent task pointer to specified value\n    void set_parent(task* p) {\n#if __TBB_TASK_GROUP_CONTEXT\n        __TBB_ASSERT(prefix().context == p->prefix().context, \"The tasks must be in the same context\");\n#endif\n        prefix().parent = p;\n    }\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! This method is deprecated and will be removed in the future.\n    /** Use method group() instead. **/\n    task_group_context* context() {return prefix().context;}\n\n    //! Pointer to the task group descriptor.\n    task_group_context* group () { return prefix().context; }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n    //! True if task was stolen from the task pool of another thread.\n    bool is_stolen_task() const {\n        return (prefix().extra_state & 0x80)!=0;\n    }\n\n    //------------------------------------------------------------------------\n    // Debugging\n    //------------------------------------------------------------------------\n\n    //! Current execution state\n    state_type state() const {return state_type(prefix().state);}\n\n    //! The internal reference count.\n    int ref_count() const {\n#if TBB_USE_ASSERT\n        internal::reference_count ref_count_ = prefix().ref_count;\n        __TBB_ASSERT( ref_count_==int(ref_count_), \"integer overflow error\");\n#endif\n        return int(prefix().ref_count);\n    }\n\n    //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.\n    bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;\n\n    //------------------------------------------------------------------------\n    // Affinity\n    //------------------------------------------------------------------------\n\n    //! An id as used for specifying affinity.\n    /** Guaranteed to be integral type.  Value of 0 means no affinity. */\n    typedef internal::affinity_id affinity_id;\n\n    //! Set affinity for this task.\n    void set_affinity( affinity_id id ) {prefix().affinity = id;}\n\n    //! Current affinity of this task\n    affinity_id affinity() const {return prefix().affinity;}\n\n    //! Invoked by scheduler to notify task that it ran on unexpected thread.\n    /** Invoked before method execute() runs, if task is stolen, or task has\n        affinity but will be executed on another thread.\n\n        The default action does nothing. */\n    virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! Moves this task from its current group into another one.\n    /** Argument ctx specifies the new group.\n\n        The primary purpose of this method is to associate unique task group context\n        with a task allocated for subsequent enqueuing. In contrast to spawned tasks\n        enqueued ones normally outlive the scope where they were created. This makes\n        traditional usage model where task group context are allocated locally on\n        the stack inapplicable. Dynamic allocation of context objects is performance\n        inefficient. Method change_group() allows to make task group context object\n        a member of the task class, and then associate it with its containing task\n        object in the latter's constructor. **/\n    void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );\n\n    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.\n    /** \\return false if cancellation has already been requested, true otherwise. **/\n    bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }\n\n    //! Returns true if the context has received cancellation request.\n    bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }\n#else\n    bool is_cancelled () const { return false; }\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#if __TBB_TASK_PRIORITY\n    //! Changes priority of the task group this task belongs to.\n    void set_group_priority ( priority_t p ) {  prefix().context->set_priority(p); }\n\n    //! Retrieves current priority of the task group this task belongs to.\n    priority_t group_priority () const { return prefix().context->priority(); }\n\n#endif /* __TBB_TASK_PRIORITY */\n\nprivate:\n    friend class interface5::internal::task_base;\n    friend class task_list;\n    friend class internal::scheduler;\n    friend class internal::allocate_root_proxy;\n#if __TBB_TASK_GROUP_CONTEXT\n    friend class internal::allocate_root_with_context_proxy;\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n    friend class internal::allocate_continuation_proxy;\n    friend class internal::allocate_child_proxy;\n    friend class internal::allocate_additional_child_of_proxy;\n\n    //! Get reference to corresponding task_prefix.\n    /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/\n    internal::task_prefix& prefix( internal::version_tag* = NULL ) const {\n        return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];\n    }\n}; // class task\n\n//! task that does nothing.  Useful for synchronization.\n/** @ingroup task_scheduling */\nclass empty_task: public task {\n    /*override*/ task* execute() {\n        return NULL;\n    }\n};\n\n//! @cond INTERNAL\nnamespace internal {\n    template<typename F>\n    class function_task : public task {\n        F my_func;\n        /*override*/ task* execute() {\n            my_func();\n            return NULL;\n        }\n    public:\n        function_task( const F& f ) : my_func(f) {}\n    };\n} // namespace internal\n//! @endcond\n\n//! A list of children.\n/** Used for method task::spawn_children\n    @ingroup task_scheduling */\nclass task_list: internal::no_copy {\nprivate:\n    task* first;\n    task** next_ptr;\n    friend class task;\n    friend class interface5::internal::task_base;\npublic:\n    //! Construct empty list\n    task_list() : first(NULL), next_ptr(&first) {}\n\n    //! Destroys the list, but does not destroy the task objects.\n    ~task_list() {}\n\n    //! True if list if empty; false otherwise.\n    bool empty() const {return !first;}\n\n    //! Push task onto back of list.\n    void push_back( task& task ) {\n        task.prefix().next = NULL;\n        *next_ptr = &task;\n        next_ptr = &task.prefix().next;\n    }\n\n    //! Pop the front task from the list.\n    task& pop_front() {\n        __TBB_ASSERT( !empty(), \"attempt to pop item from empty task_list\" );\n        task* result = first;\n        first = result->prefix().next;\n        if( !first ) next_ptr = &first;\n        return *result;\n    }\n\n    //! Clear the list\n    void clear() {\n        first=NULL;\n        next_ptr=&first;\n    }\n};\n\ninline void interface5::internal::task_base::spawn( task& t ) {\n    t.prefix().owner->spawn( t, t.prefix().next );\n}\n\ninline void interface5::internal::task_base::spawn( task_list& list ) {\n    if( task* t = list.first ) {\n        t->prefix().owner->spawn( *t, *list.next_ptr );\n        list.clear();\n    }\n}\n\ninline void task::spawn_root_and_wait( task_list& root_list ) {\n    if( task* t = root_list.first ) {\n        t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );\n        root_list.clear();\n    }\n}\n\n} // namespace tbb\n\ninline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {\n    return &tbb::internal::allocate_root_proxy::allocate(bytes);\n}\n\ninline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {\n    tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\ninline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {\n    return &p.allocate(bytes);\n}\n\ninline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {\n    p.free( *static_cast<tbb::task*>(task) );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\ninline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {\n    return &p.allocate(bytes);\n}\n\ninline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {\n    p.free( *static_cast<tbb::task*>(task) );\n}\n\ninline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {\n    return &p.allocate(bytes);\n}\n\ninline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {\n    p.free( *static_cast<tbb::task*>(task) );\n}\n\ninline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {\n    return &p.allocate(bytes);\n}\n\ninline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {\n    p.free( *static_cast<tbb::task*>(task) );\n}\n\n#endif /* __TBB_task_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_arena.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_task_arena_H\n#define __TBB_task_arena_H\n\n#include \"task.h\"\n#include \"tbb_exception.h\"\n#if TBB_USE_THREADING_TOOLS\n#include \"atomic.h\" // for as_atomic\n#endif\n\n#if __TBB_TASK_ARENA\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n    //! Internal to library. Should not be used by clients.\n    /** @ingroup task_scheduling */\n    class arena;\n    class task_scheduler_observer_v3;\n} // namespace internal\n//! @endcond\n\nnamespace interface7 {\n//! @cond INTERNAL\nnamespace internal {\nusing namespace tbb::internal; //e.g. function_task from task.h\n\nclass delegate_base : no_assign {\npublic:\n    virtual void operator()() const = 0;\n    virtual ~delegate_base() {}\n};\n\ntemplate<typename F>\nclass delegated_function : public delegate_base {\n    F &my_func;\n    /*override*/ void operator()() const {\n        my_func();\n    }\npublic:\n    delegated_function ( F& f ) : my_func(f) {}\n};\n\nclass task_arena_base {\nprotected:\n    //! NULL if not currently initialized.\n    internal::arena* my_arena;\n\n#if __TBB_TASK_GROUP_CONTEXT\n    //! default context of the arena\n    task_group_context *my_context;\n#endif\n\n    //! Concurrency level for deferred initialization\n    int my_max_concurrency;\n\n    //! Reserved master slots\n    unsigned my_master_slots;\n\n    //! Special settings\n    intptr_t my_version_and_traits;\n\n    enum {\n        default_flags = 0\n#if __TBB_TASK_GROUP_CONTEXT\n        | (task_group_context::default_traits & task_group_context::exact_exception)  // 0 or 1 << 16\n        , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly\n#endif\n    };\n\n    task_arena_base(int max_concurrency, unsigned reserved_for_masters)\n        : my_arena(0)\n#if __TBB_TASK_GROUP_CONTEXT\n        , my_context(0)\n#endif\n        , my_max_concurrency(max_concurrency)\n        , my_master_slots(reserved_for_masters)\n        , my_version_and_traits(default_flags)\n        {}\n\n    void __TBB_EXPORTED_METHOD internal_initialize( );\n    void __TBB_EXPORTED_METHOD internal_terminate( );\n    void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const;\n    void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const;\n    void __TBB_EXPORTED_METHOD internal_wait() const;\n    static int __TBB_EXPORTED_FUNC internal_current_slot();\npublic:\n    //! Typedef for number of threads that is automatic.\n    static const int automatic = -1; // any value < 1 means 'automatic'\n\n};\n\n} // namespace internal\n//! @endcond\n\n/** 1-to-1 proxy representation class of scheduler's arena\n * Constructors set up settings only, real construction is deferred till the first method invocation\n * Destructor only removes one of the references to the inner arena representation.\n * Final destruction happens when all the references (and the work) are gone.\n */\nclass task_arena : public internal::task_arena_base {\n    friend class tbb::internal::task_scheduler_observer_v3;\n    bool my_initialized;\n\npublic:\n    //! Creates task_arena with certain concurrency limits\n    /** Sets up settings only, real construction is deferred till the first method invocation\n     *  @arg max_concurrency specifies total number of slots in arena where threads work\n     *  @arg reserved_for_masters specifies number of slots to be used by master threads only.\n     *       Value of 1 is default and reflects behavior of implicit arenas.\n     **/\n    task_arena(int max_concurrency = automatic, unsigned reserved_for_masters = 1)\n        : task_arena_base(max_concurrency, reserved_for_masters)\n        , my_initialized(false)\n    {}\n\n    //! Copies settings from another task_arena\n    task_arena(const task_arena &s) // copy settings but not the reference or instance\n        : task_arena_base(s.my_max_concurrency, s.my_master_slots)\n        , my_initialized(false)\n    {}\n\n    //! Forces allocation of the resources for the task_arena as specified in constructor arguments\n    inline void initialize() {\n        if( !my_initialized ) {\n            internal_initialize();\n#if TBB_USE_THREADING_TOOLS\n            // Threading tools respect lock prefix but report false-positive data-race via plain store\n            internal::as_atomic(my_initialized).fetch_and_store<release>(true);\n#else\n            my_initialized = true;\n#endif //TBB_USE_THREADING_TOOLS\n        }\n    }\n\n    //! Overrides concurrency level and forces initialization of internal representation\n    inline void initialize(int max_concurrency, unsigned reserved_for_masters = 1) {\n        __TBB_ASSERT( !my_arena, \"Impossible to modify settings of an already initialized task_arena\");\n        if( !my_initialized ) {\n            my_max_concurrency = max_concurrency;\n            my_master_slots = reserved_for_masters;\n            initialize();\n        }\n    }\n\n    //! Removes the reference to the internal arena representation.\n    //! Not thread safe wrt concurrent invocations of other methods.\n    inline void terminate() {\n        if( my_initialized ) {\n            internal_terminate();\n            my_initialized = false;\n        }\n    }\n\n    //! Removes the reference to the internal arena representation, and destroys the external object.\n    //! Not thread safe wrt concurrent invocations of other methods.\n    ~task_arena() {\n        terminate();\n    }\n\n    //! Returns true if the arena is active (initialized); false otherwise.\n    //! The name was chosen to match a task_scheduler_init method with the same semantics.\n    bool is_active() const { return my_initialized; }\n\n    //! Enqueues a task into the arena to process a functor, and immediately returns.\n    //! Does not require the calling thread to join the arena\n    template<typename F>\n    void enqueue( const F& f ) {\n        initialize();\n#if __TBB_TASK_GROUP_CONTEXT\n        internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task<F>(f), 0 );\n#else\n        internal_enqueue( *new( task::allocate_root() ) internal::function_task<F>(f), 0 );\n#endif\n    }\n\n#if __TBB_TASK_PRIORITY\n    //! Enqueues a task with priority p into the arena to process a functor f, and immediately returns.\n    //! Does not require the calling thread to join the arena\n    template<typename F>\n    void enqueue( const F& f, priority_t p ) {\n        __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, \"Invalid priority level value\" );\n        initialize();\n#if __TBB_TASK_GROUP_CONTEXT\n        internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task<F>(f), (intptr_t)p );\n#else\n        internal_enqueue( *new( task::allocate_root() ) internal::function_task<F>(f), (intptr_t)p );\n#endif\n    }\n#endif// __TBB_TASK_PRIORITY\n\n    //! Joins the arena and executes a functor, then returns\n    //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion\n    //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread\n    template<typename F>\n    void execute(F& f) {\n        initialize();\n        internal::delegated_function<F> d(f);\n        internal_execute( d );\n    }\n\n    //! Joins the arena and executes a functor, then returns\n    //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion\n    //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread\n    template<typename F>\n    void execute(const F& f) {\n        initialize();\n        internal::delegated_function<const F> d(f);\n        internal_execute( d );\n    }\n\n#if __TBB_EXTRA_DEBUG\n    //! Wait for all work in the arena to be completed\n    //! Even submitted by other application threads\n    //! Joins arena if/when possible (in the same way as execute())\n    void debug_wait_until_empty() {\n        initialize();\n        internal_wait();\n    }\n#endif //__TBB_EXTRA_DEBUG\n\n    //! Returns the index, aka slot number, of the calling thread in its current arena\n    inline static int current_thread_index() {\n        return internal_current_slot();\n    }\n};\n\n} // namespace interfaceX\n\nusing interface7::task_arena;\n\n} // namespace tbb\n\n#endif /* __TBB_TASK_ARENA */\n\n#endif /* __TBB_task_arena_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_group.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_task_group_H\n#define __TBB_task_group_H\n\n#include \"task.h\"\n#include \"tbb_exception.h\"\n\n#if __TBB_TASK_GROUP_CONTEXT\n\nnamespace tbb {\n\nnamespace internal {\n    template<typename F> class task_handle_task;\n}\n\nclass task_group;\nclass structured_task_group;\n\ntemplate<typename F>\nclass task_handle : internal::no_assign {\n    template<typename _F> friend class internal::task_handle_task;\n    friend class task_group;\n    friend class structured_task_group;\n\n    static const intptr_t scheduled = 0x1;\n\n    F my_func;\n    intptr_t my_state;\n\n    void mark_scheduled () {\n        // The check here is intentionally lax to avoid the impact of interlocked operation\n        if ( my_state & scheduled )\n            internal::throw_exception( internal::eid_invalid_multiple_scheduling );\n        my_state |= scheduled;\n    }\npublic:\n    task_handle( const F& f ) : my_func(f), my_state(0) {}\n\n    void operator() () const { my_func(); }\n};\n\nenum task_group_status {\n    not_complete,\n    complete,\n    canceled\n};\n\nnamespace internal {\n\ntemplate<typename F>\nclass task_handle_task : public task {\n    task_handle<F>& my_handle;\n    /*override*/ task* execute() {\n        my_handle();\n        return NULL;\n    }\npublic:\n    task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }\n};\n\nclass task_group_base : internal::no_copy {\nprotected:\n    empty_task* my_root;\n    task_group_context my_context;\n\n    task& owner () { return *my_root; }\n\n    template<typename F>\n    task_group_status internal_run_and_wait( F& f ) {\n        __TBB_TRY {\n            if ( !my_context.is_group_execution_cancelled() )\n                f();\n        } __TBB_CATCH( ... ) {\n            my_context.register_pending_exception();\n        }\n        return wait();\n    }\n\n    template<typename F, typename Task>\n    void internal_run( F& f ) {\n        owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) );\n    }\n\npublic:\n    task_group_base( uintptr_t traits = 0 )\n        : my_context(task_group_context::bound, task_group_context::default_traits | traits)\n    {\n        my_root = new( task::allocate_root(my_context) ) empty_task;\n        my_root->set_ref_count(1);\n    }\n\n    ~task_group_base() __TBB_NOEXCEPT(false) {\n        if( my_root->ref_count() > 1 ) {\n            bool stack_unwinding_in_progress = std::uncaught_exception();\n            // Always attempt to do proper cleanup to avoid inevitable memory corruption \n            // in case of missing wait (for the sake of better testability & debuggability)\n            if ( !is_canceling() )\n                cancel();\n            __TBB_TRY {\n                my_root->wait_for_all();\n            } __TBB_CATCH (...) {\n                task::destroy(*my_root);\n                __TBB_RETHROW();\n            }\n            task::destroy(*my_root);\n            if ( !stack_unwinding_in_progress )\n                internal::throw_exception( internal::eid_missing_wait );\n        }\n        else {\n            task::destroy(*my_root);\n        }\n    }\n\n    template<typename F>\n    void run( task_handle<F>& h ) {\n        internal_run< task_handle<F>, internal::task_handle_task<F> >( h );\n    }\n\n    task_group_status wait() {\n        __TBB_TRY {\n            my_root->wait_for_all();\n        } __TBB_CATCH( ... ) {\n            my_context.reset();\n            __TBB_RETHROW();\n        }\n        if ( my_context.is_group_execution_cancelled() ) {\n            my_context.reset();\n            return canceled;\n        }\n        return complete;\n    }\n\n    bool is_canceling() {\n        return my_context.is_group_execution_cancelled();\n    }\n\n    void cancel() {\n        my_context.cancel_group_execution();\n    }\n}; // class task_group_base\n\n} // namespace internal\n\nclass task_group : public internal::task_group_base {\npublic:\n    task_group () : task_group_base( task_group_context::concurrent_wait ) {}\n\n#if __SUNPRO_CC\n    template<typename F>\n    void run( task_handle<F>& h ) {\n        internal_run< task_handle<F>, internal::task_handle_task<F> >( h );\n    }\n#else\n    using task_group_base::run;\n#endif\n\n    template<typename F>\n    void run( const F& f ) {\n        internal_run< const F, internal::function_task<F> >( f );\n    }\n\n    template<typename F>\n    task_group_status run_and_wait( const F& f ) {\n        return internal_run_and_wait<const F>( f );\n    }\n\n    template<typename F>\n    task_group_status run_and_wait( task_handle<F>& h ) {\n      h.mark_scheduled();\n      return internal_run_and_wait< task_handle<F> >( h );\n    }\n}; // class task_group\n\nclass structured_task_group : public internal::task_group_base {\npublic:\n    template<typename F>\n    task_group_status run_and_wait ( task_handle<F>& h ) {\n        h.mark_scheduled();\n        return internal_run_and_wait< task_handle<F> >( h );\n    }\n\n    task_group_status wait() {\n        task_group_status res = task_group_base::wait();\n        my_root->set_ref_count(1);\n        return res;\n    }\n}; // class structured_task_group\n\ninline \nbool is_current_task_group_canceling() {\n    return task::self().is_cancelled();\n}\n\ntemplate<class F>\ntask_handle<F> make_task( const F& f ) {\n    return task_handle<F>( f );\n}\n\n} // namespace tbb\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#endif /* __TBB_task_group_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_group_context.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"scheduler.h\"\n\n#include \"itt_notify.h\"\n\nnamespace tbb {\n\n#if __TBB_TASK_GROUP_CONTEXT\n\nusing namespace internal;\n\n//------------------------------------------------------------------------\n// captured_exception\n//------------------------------------------------------------------------\n\ninline char* duplicate_string ( const char* src ) {\n    char* dst = NULL;\n    if ( src ) {\n        size_t len = strlen(src) + 1;\n        dst = (char*)allocate_via_handler_v3(len);\n        strncpy (dst, src, len);\n    }\n    return dst;\n}\n\ncaptured_exception::~captured_exception () throw() {\n    clear();\n}\n\nvoid captured_exception::set ( const char* a_name, const char* info ) throw() {\n    my_exception_name = duplicate_string( a_name );\n    my_exception_info = duplicate_string( info );\n}\n\nvoid captured_exception::clear () throw() {\n    deallocate_via_handler_v3 (const_cast<char*>(my_exception_name));\n    deallocate_via_handler_v3 (const_cast<char*>(my_exception_info));\n}\n\ncaptured_exception* captured_exception::move () throw() {\n    captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception));\n    if ( e ) {\n        ::new (e) captured_exception();\n        e->my_exception_name = my_exception_name;\n        e->my_exception_info = my_exception_info;\n        e->my_dynamic = true;\n        my_exception_name = my_exception_info = NULL;\n    }\n    return e;\n}\n\nvoid captured_exception::destroy () throw() {\n    __TBB_ASSERT ( my_dynamic, \"Method destroy can be used only on objects created by clone or allocate\" );\n    if ( my_dynamic ) {\n        this->captured_exception::~captured_exception();\n        deallocate_via_handler_v3 (this);\n    }\n}\n\ncaptured_exception* captured_exception::allocate ( const char* a_name, const char* info ) {\n    captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) );\n    if ( e ) {\n        ::new (e) captured_exception(a_name, info);\n        e->my_dynamic = true;\n    }\n    return e;\n}\n\nconst char* captured_exception::name() const throw() {\n    return my_exception_name;\n}\n\nconst char* captured_exception::what() const throw() {\n    return my_exception_info;\n}\n\n\n//------------------------------------------------------------------------\n// tbb_exception_ptr\n//------------------------------------------------------------------------\n\n#if !TBB_USE_CAPTURED_EXCEPTION\n\nnamespace internal {\n\ntemplate<typename T>\ntbb_exception_ptr* AllocateExceptionContainer( const T& src ) {\n    tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );\n    if ( eptr )\n        new (eptr) tbb_exception_ptr(src);\n    return eptr;\n}\n\ntbb_exception_ptr* tbb_exception_ptr::allocate () {\n    return AllocateExceptionContainer( std::current_exception() );\n}\n\ntbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {\n    return AllocateExceptionContainer( std::current_exception() );\n}\n\ntbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {\n    tbb_exception_ptr *res = AllocateExceptionContainer( src );\n    src.destroy();\n    return res;\n}\n\nvoid tbb_exception_ptr::destroy () throw() {\n    this->tbb_exception_ptr::~tbb_exception_ptr();\n    deallocate_via_handler_v3 (this);\n}\n\n} // namespace internal\n#endif /* !TBB_USE_CAPTURED_EXCEPTION */\n\n\n//------------------------------------------------------------------------\n// task_group_context\n//------------------------------------------------------------------------\n\ntask_group_context::~task_group_context () {\n    if ( __TBB_load_relaxed(my_kind) == binding_completed ) {\n        if ( governor::is_set(my_owner) ) {\n            // Local update of the context list\n            uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;\n            my_owner->my_local_ctx_list_update.store<relaxed>(1);\n            // Prevent load of nonlocal update flag from being hoisted before the\n            // store to local update flag.\n            atomic_fence();\n            if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {\n                spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);\n                my_node.my_prev->my_next = my_node.my_next;\n                my_node.my_next->my_prev = my_node.my_prev;\n                my_owner->my_local_ctx_list_update.store<relaxed>(0);\n            }\n            else {\n                my_node.my_prev->my_next = my_node.my_next;\n                my_node.my_next->my_prev = my_node.my_prev;\n                // Release fence is necessary so that update of our neighbors in\n                // the context list was committed when possible concurrent destroyer\n                // proceeds after local update flag is reset by the following store.\n                my_owner->my_local_ctx_list_update.store<release>(0);\n                if ( local_count_snapshot != the_context_state_propagation_epoch ) {\n                    // Another thread was propagating cancellation request when we removed\n                    // ourselves from the list. We must ensure that it is not accessing us\n                    // when this destructor finishes. We'll be able to acquire the lock\n                    // below only after the other thread finishes with us.\n                    spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);\n                }\n            }\n        }\n        else {\n            // Nonlocal update of the context list\n            // Synchronizes with generic_scheduler::cleanup_local_context_list()\n            // TODO: evaluate and perhaps relax, or add some lock instead\n            if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {\n                my_node.my_prev->my_next = my_node.my_next;\n                my_node.my_next->my_prev = my_node.my_prev;\n            }\n            else {\n                //TODO: evaluate and perhaps relax\n                my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();\n                //TODO: evaluate and perhaps remove\n                spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );\n                my_owner->my_context_list_mutex.lock();\n                my_node.my_prev->my_next = my_node.my_next;\n                my_node.my_next->my_prev = my_node.my_prev;\n                my_owner->my_context_list_mutex.unlock();\n                //TODO: evaluate and perhaps relax\n                my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();\n            }\n        }\n    }\n#if __TBB_FP_CONTEXT\n    internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();\n#endif\n    poison_value(my_version_and_traits);\n    if ( my_exception )\n        my_exception->destroy();\n    ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);\n}\n\nvoid task_group_context::init () {\n    __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, \"Layout of my_version_and_traits must be reconsidered on this platform\" );\n    __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, \"Context class has wrong size - check padding and members alignment\" );\n    __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, \"Context is improperly aligned\" );\n    __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, \"Context can be created only as isolated or bound\" );\n    my_parent = NULL;\n    my_cancellation_requested = 0;\n    my_exception = NULL;\n    my_owner = NULL;\n    my_state = 0;\n    itt_caller = ITT_CALLER_NULL;\n#if __TBB_TASK_PRIORITY\n    my_priority = normalized_normal_priority;\n#endif /* __TBB_TASK_PRIORITY */\n#if __TBB_FP_CONTEXT\n    __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), \"The reserved space for FPU settings are not equal sizeof(uint64_t)\" );\n    __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), \"FPU settings storage does not fit to uint64_t\" );\n    suppress_unused_warning( my_cpu_ctl_env.space );\n\n    cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);\n    new ( &ctl ) cpu_ctl_env;\n    if ( my_version_and_traits & fp_settings )\n        ctl.get_env();\n#endif\n}\n\nvoid task_group_context::register_with ( generic_scheduler *local_sched ) {\n    __TBB_ASSERT( local_sched, NULL );\n    my_owner = local_sched;\n    // state propagation logic assumes new contexts are bound to head of the list\n    my_node.my_prev = &local_sched->my_context_list_head;\n    // Notify threads that may be concurrently destroying contexts registered\n    // in this scheduler's list that local list update is underway.\n    local_sched->my_local_ctx_list_update.store<relaxed>(1);\n    // Prevent load of global propagation epoch counter from being hoisted before\n    // speculative stores above, as well as load of nonlocal update flag from\n    // being hoisted before the store to local update flag.\n    atomic_fence();\n    // Finalize local context list update\n    if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) {\n        spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);\n        local_sched->my_context_list_head.my_next->my_prev = &my_node;\n        my_node.my_next = local_sched->my_context_list_head.my_next;\n        my_owner->my_local_ctx_list_update.store<relaxed>(0);\n        local_sched->my_context_list_head.my_next = &my_node;\n    }\n    else {\n        local_sched->my_context_list_head.my_next->my_prev = &my_node;\n        my_node.my_next = local_sched->my_context_list_head.my_next;\n        my_owner->my_local_ctx_list_update.store<release>(0);\n        // Thread-local list of contexts allows concurrent traversal by another thread\n        // while propagating state change. To ensure visibility of my_node's members\n        // to the concurrently traversing thread, the list's head is updated by means\n        // of store-with-release.\n        __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node);\n    }\n}\n\nvoid task_group_context::bind_to ( generic_scheduler *local_sched ) {\n    __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, \"Already bound or isolated?\" );\n    __TBB_ASSERT ( !my_parent, \"Parent is set before initial binding\" );\n    my_parent = local_sched->my_innermost_running_task->prefix().context;\n#if __TBB_FP_CONTEXT\n    // Inherit FPU settings only if the context has not captured FPU settings yet.\n    if ( !(my_version_and_traits & fp_settings) )\n        copy_fp_settings(*my_parent);\n#endif\n\n    // Condition below prevents unnecessary thrashing parent context's cache line\n    if ( !(my_parent->my_state & may_have_children) )\n        my_parent->my_state |= may_have_children; // full fence is below\n    if ( my_parent->my_parent ) {\n        // Even if this context were made accessible for state change propagation\n        // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)\n        // above), it still could be missed if state propagation from a grand-ancestor\n        // was underway concurrently with binding.\n        // Speculative propagation from the parent together with epoch counters\n        // detecting possibility of such a race allow to avoid taking locks when\n        // there is no contention.\n\n        // Acquire fence is necessary to prevent reordering subsequent speculative\n        // loads of parent state data out of the scope where epoch counters comparison\n        // can reliably validate it.\n        uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );\n        // Speculative propagation of parent's state. The speculation will be\n        // validated by the epoch counters check further on.\n        my_cancellation_requested = my_parent->my_cancellation_requested;\n#if __TBB_TASK_PRIORITY\n        my_priority = my_parent->my_priority;\n#endif /* __TBB_TASK_PRIORITY */\n        register_with( local_sched ); // Issues full fence\n\n        // If no state propagation was detected by the following condition, the above\n        // full fence guarantees that the parent had correct state during speculative\n        // propagation before the fence. Otherwise the propagation from parent is\n        // repeated under the lock.\n        if ( local_count_snapshot != the_context_state_propagation_epoch ) {\n            // Another thread may be propagating state change right now. So resort to lock.\n            context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);\n            my_cancellation_requested = my_parent->my_cancellation_requested;\n#if __TBB_TASK_PRIORITY\n            my_priority = my_parent->my_priority;\n#endif /* __TBB_TASK_PRIORITY */\n        }\n    }\n    else {\n        register_with( local_sched ); // Issues full fence\n        // As we do not have grand-ancestors, concurrent state propagation (if any)\n        // may originate only from the parent context, and thus it is safe to directly\n        // copy the state from it.\n        my_cancellation_requested = my_parent->my_cancellation_requested;\n#if __TBB_TASK_PRIORITY\n        my_priority = my_parent->my_priority;\n#endif /* __TBB_TASK_PRIORITY */\n    }\n    __TBB_store_relaxed(my_kind, binding_completed);\n}\n\n#if __TBB_TASK_GROUP_CONTEXT\ntemplate <typename T>\nvoid task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {\n    if (this->*mptr_state == new_state) {\n        // Nothing to do, whether descending from \"src\" or not, so no need to scan.\n        // Hopefully this happens often thanks to earlier invocations.\n        // This optimization is enabled by LIFO order in the context lists:\n        // - new contexts are bound to the beginning of lists;\n        // - descendants are newer than ancestors;\n        // - earlier invocations are therefore likely to \"paint\" long chains.\n    }\n    else if (this == &src) {\n        // This clause is disjunct from the traversal below, which skips src entirely.\n        // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again).\n        // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down).\n        // Letting the other thread prevail may also be fairer.\n    }\n    else {\n        for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {\n            __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), \"context tree was corrupted\");\n            if ( ancestor == &src ) {\n                for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent )\n                    ctx->*mptr_state = new_state;\n                break;\n            }\n        }\n    }\n}\n\ntemplate <typename T>\nvoid generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {\n    spin_mutex::scoped_lock lock(my_context_list_mutex);\n    // Acquire fence is necessary to ensure that the subsequent node->my_next load\n    // returned the correct value in case it was just inserted in another thread.\n    // The fence also ensures visibility of the correct my_parent value.\n    context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);\n    while ( node != &my_context_list_head ) {\n        task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);\n        if ( ctx.*mptr_state != new_state )\n            ctx.propagate_task_group_state( mptr_state, src, new_state );\n        node = node->my_next;\n        __TBB_ASSERT( is_alive(ctx.my_version_and_traits), \"Local context list contains destroyed object\" );\n    }\n    // Sync up local propagation epoch with the global one. Release fence prevents\n    // reordering of possible store to *mptr_state after the sync point.\n    __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);\n}\n\ntemplate <typename T>\nbool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {\n    if ( !(src.my_state & task_group_context::may_have_children) )\n        return true;\n    // The whole propagation algorithm is under the lock in order to ensure correctness\n    // in case of concurrent state changes at the different levels of the context tree.\n    // See comment at the bottom of scheduler.cpp\n    context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);\n    if ( src.*mptr_state != new_state )\n        // Another thread has concurrently changed the state. Back down.\n        return false;\n    // Advance global state propagation epoch\n    __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);\n    // Propagate to all workers and masters and sync up their local epochs with the global one\n    unsigned num_workers = my_num_workers;\n    for ( unsigned i = 0; i < num_workers; ++i ) {\n        generic_scheduler *s = my_workers[i];\n        // If the worker is only about to be registered, skip it.\n        if ( s )\n            s->propagate_task_group_state( mptr_state, src, new_state );\n    }\n    // Propagate to all master threads (under my_arenas_list_mutex lock)\n    ForEachArena(a) { // uses lock on my_arenas_list_mutex\n        arena_slot &slot = a.my_slots[0];\n        generic_scheduler *s = slot.my_scheduler;\n        // If the master is under construction, skip it. Otherwise make sure that it does not\n        // leave its arena and its scheduler get destroyed while we accessing its data.\n        if ( s && as_atomic(slot.my_scheduler).compare_and_swap(LockedMaster, s) == s ) { //TODO: remove need in lock\n            __TBB_ASSERT( slot.my_scheduler == LockedMaster, NULL );\n            // The whole propagation sequence is locked, thus no contention is expected\n            __TBB_ASSERT( s != LockedMaster, NULL );\n            s->propagate_task_group_state( mptr_state, src, new_state );\n            __TBB_store_with_release( slot.my_scheduler, s );\n        }\n    } EndForEach();\n    return true;\n}\n\ntemplate <typename T>\nbool arena::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {\n    return my_market->propagate_task_group_state( mptr_state, src, new_state );\n}\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\nbool task_group_context::cancel_group_execution () {\n    __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, \"Invalid cancellation state\");\n    if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {\n        // This task group and any descendants have already been canceled.\n        // (A newly added descendant would inherit its parent's my_cancellation_requested,\n        // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.)\n        return false;\n    }\n    governor::local_scheduler()->my_arena->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 );\n    return true;\n}\n\nbool task_group_context::is_group_execution_cancelled () const {\n    return my_cancellation_requested != 0;\n}\n\n// IMPORTANT: It is assumed that this method is not used concurrently!\nvoid task_group_context::reset () {\n    //! TODO: Add assertion that this context does not have children\n    // No fences are necessary since this context can be accessed from another thread\n    // only after stealing happened (which means necessary fences were used).\n    if ( my_exception )  {\n        my_exception->destroy();\n        my_exception = NULL;\n    }\n    my_cancellation_requested = 0;\n}\n\n#if __TBB_FP_CONTEXT\n// IMPORTANT: It is assumed that this method is not used concurrently!\nvoid task_group_context::capture_fp_settings () {\n    //! TODO: Add assertion that this context does not have children\n    // No fences are necessary since this context can be accessed from another thread\n    // only after stealing happened (which means necessary fences were used).\n    cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);\n    if ( !(my_version_and_traits & fp_settings) ) {\n        new ( &ctl ) cpu_ctl_env;\n        my_version_and_traits |= fp_settings;\n    }\n    ctl.get_env();\n}\n\nvoid task_group_context::copy_fp_settings( const task_group_context &src ) {\n    __TBB_ASSERT( !(my_version_and_traits & fp_settings), \"The context already has FPU settings.\" );\n    __TBB_ASSERT( src.my_version_and_traits & fp_settings, \"The source context does not have FPU settings.\" );\n\n    cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);\n    cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);\n    new (&ctl) cpu_ctl_env( src_ctl );\n    my_version_and_traits |= fp_settings;\n}\n#endif /* __TBB_FP_CONTEXT */\n\nvoid task_group_context::register_pending_exception () {\n    if ( my_cancellation_requested )\n        return;\n#if TBB_USE_EXCEPTIONS\n    try {\n        throw;\n    } TbbCatchAll( this );\n#endif /* TBB_USE_EXCEPTIONS */\n}\n\n#if __TBB_TASK_PRIORITY\nvoid task_group_context::set_priority ( priority_t prio ) {\n    __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, \"Invalid priority level value\" );\n    intptr_t p = normalize_priority(prio);\n    if ( my_priority == p && !(my_state & task_group_context::may_have_children))\n        return;\n    my_priority = p;\n    internal::generic_scheduler* s = governor::local_scheduler_if_initialized();\n    if ( !s || !s->my_arena->propagate_task_group_state(&task_group_context::my_priority, *this, p) )\n        return;\n    // Updating arena priority here does not eliminate necessity of checking each\n    // task priority and updating arena priority if necessary before the task execution.\n    // These checks will be necessary because:\n    // a) set_priority() may be invoked before any tasks from this task group are spawned;\n    // b) all spawned tasks from this task group are retrieved from the task pools.\n    // These cases create a time window when arena priority may be lowered.\n    s->my_market->update_arena_priority( *s->my_arena, p );\n}\n\npriority_t task_group_context::priority () const {\n    return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);\n}\n#endif /* __TBB_TASK_PRIORITY */\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_scheduler_init.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_task_scheduler_init_H\n#define __TBB_task_scheduler_init_H\n\n#include \"tbb_stddef.h\"\n#include \"limits.h\"\n\nnamespace tbb {\n\ntypedef std::size_t stack_size_type;\n\n//! @cond INTERNAL\nnamespace internal {\n    //! Internal to library. Should not be used by clients.\n    /** @ingroup task_scheduling */\n    class scheduler;\n} // namespace internal\n//! @endcond\n\n//! Class delimiting the scope of task scheduler activity.\n/** A thread can construct a task_scheduler_init object and keep it alive\n    while it uses TBB's tasking subsystem (including parallel algorithms).\n\n    This class allows to customize properties of the TBB task pool to some extent.\n    For example it can limit concurrency level of parallel work initiated by the\n    given thread. It also can be used to specify stack size of the TBB worker threads,\n    though this setting is not effective if the thread pool has already been created.\n\n    If a parallel construct is used without task_scheduler_init object previously\n    created, the scheduler will be initialized automatically with default settings,\n    and will persist until this thread exits. Default concurrency level is defined\n    as described in task_scheduler_init::initialize().\n    @ingroup task_scheduling */\nclass task_scheduler_init: internal::no_copy {\n    enum ExceptionPropagationMode {\n        propagation_mode_exact = 1u,\n        propagation_mode_captured = 2u,\n        propagation_mode_mask = propagation_mode_exact | propagation_mode_captured\n    };\n#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE\n    enum {\n        wait_workers_in_terminate_flag = 128u\n    };\n#endif\n\n    /** NULL if not currently initialized. */\n    internal::scheduler* my_scheduler;\npublic:\n\n    //! Typedef for number of threads that is automatic.\n    static const int automatic = -1;\n\n    //! Argument to initialize() or constructor that causes initialization to be deferred.\n    static const int deferred = -2;\n\n    //! Ensure that scheduler exists for this thread\n    /** A value of -1 lets TBB decide on the number of threads, which is usually\n        maximal hardware concurrency for this process, that is the number of logical\n        CPUs on the machine (possibly limited by the processor affinity mask of this\n        process (Windows) or of this thread (Linux, FreeBSD). It is preferable option\n        for production code because it helps to avoid nasty surprises when several\n        TBB based components run side-by-side or in a nested fashion inside the same\n        process.\n\n        The number_of_threads is ignored if any other task_scheduler_inits \n        currently exist.  A thread may construct multiple task_scheduler_inits.  \n        Doing so does no harm because the underlying scheduler is reference counted. */\n    void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic );\n\n    //! The overloaded method with stack size parameter\n    /** Overloading is necessary to preserve ABI compatibility */\n    void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size );\n\n    //! Inverse of method initialize.\n    void __TBB_EXPORTED_METHOD terminate();\n\n    //! Shorthand for default constructor followed by call to initialize(number_of_threads).\n#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE\n    task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0, bool wait_workers_in_terminate = false ) : my_scheduler(NULL)\n#else\n    task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL)\n#endif\n    {\n        // Two lowest order bits of the stack size argument may be taken to communicate\n        // default exception propagation mode of the client to be used when the\n        // client manually creates tasks in the master thread and does not use\n        // explicit task group context object. This is necessary because newer \n        // TBB binaries with exact propagation enabled by default may be used \n        // by older clients that expect tbb::captured_exception wrapper.\n        // All zeros mean old client - no preference. \n        __TBB_ASSERT( !(thread_stack_size & propagation_mode_mask), \"Requested stack size is not aligned\" );\n#if TBB_USE_EXCEPTIONS\n        thread_stack_size |= TBB_USE_CAPTURED_EXCEPTION ? propagation_mode_captured : propagation_mode_exact;\n#endif /* TBB_USE_EXCEPTIONS */\n#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE\n        if (wait_workers_in_terminate)\n            my_scheduler = (internal::scheduler*)wait_workers_in_terminate_flag;\n#endif\n        initialize( number_of_threads, thread_stack_size );\n    }\n\n    //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits.\n    ~task_scheduler_init() {\n        if( my_scheduler ) \n            terminate();\n        internal::poison_pointer( my_scheduler );\n    }\n    //! Returns the number of threads TBB scheduler would create if initialized by default.\n    /** Result returned by this method does not depend on whether the scheduler \n        has already been initialized.\n        \n        Because tbb 2.0 does not support blocking tasks yet, you may use this method\n        to boost the number of threads in the tbb's internal pool, if your tasks are \n        doing I/O operations. The optimal number of additional threads depends on how\n        much time your tasks spend in the blocked state.\n        \n        Before TBB 3.0 U4 this method returned the number of logical CPU in the\n        system. Currently on Windows, Linux and FreeBSD it returns the number of\n        logical CPUs available to the current process in accordance with its affinity\n        mask.\n        \n        NOTE: The return value of this method never changes after its first invocation. \n        This means that changes in the process affinity mask that took place after\n        this method was first invoked will not affect the number of worker threads\n        in the TBB worker threads pool. */\n    static int __TBB_EXPORTED_FUNC default_num_threads ();\n\n    //! Returns true if scheduler is active (initialized); false otherwise\n    bool is_active() const { return my_scheduler != NULL; }\n};\n\n} // namespace tbb\n\n#endif /* __TBB_task_scheduler_init_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_scheduler_observer.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_task_scheduler_observer_H\n#define __TBB_task_scheduler_observer_H\n\n#include \"atomic.h\"\n#if __TBB_TASK_ARENA\n#include \"task_arena.h\"\n#endif //__TBB_TASK_ARENA\n\n#if __TBB_SCHEDULER_OBSERVER\n\nnamespace tbb {\nnamespace interface6 {\nclass task_scheduler_observer;\n}\nnamespace internal {\n\nclass observer_proxy;\nclass observer_list;\n\nclass task_scheduler_observer_v3 {\n    friend class observer_proxy;\n    friend class observer_list;\n    friend class interface6::task_scheduler_observer;\n\n    //! Pointer to the proxy holding this observer.\n    /** Observers are proxied by the scheduler to maintain persistent lists of them. **/\n    observer_proxy* my_proxy;\n\n    //! Counter preventing the observer from being destroyed while in use by the scheduler.\n    /** Valid only when observation is on. **/\n    atomic<intptr_t> my_busy_count;\n\npublic:\n    //! Enable or disable observation\n    /** For local observers the method can be used only when the current thread\n        has the task scheduler initialized or is attached to an arena.\n\n        Repeated calls with the same state are no-ops. **/\n    void __TBB_EXPORTED_METHOD observe( bool state=true );\n\n    //! Returns true if observation is enabled, false otherwise.\n    bool is_observing() const {return my_proxy!=NULL;}\n\n    //! Construct observer with observation disabled.\n    task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store<relaxed>(0); }\n\n    //! Entry notification\n    /** Invoked from inside observe(true) call and whenever a worker enters the arena \n        this observer is associated with. If a thread is already in the arena when\n        the observer is activated, the entry notification is called before it\n        executes the first stolen task.\n\n        Obsolete semantics. For global observers it is called by a thread before\n        the first steal since observation became enabled. **/\n    virtual void on_scheduler_entry( bool /*is_worker*/ ) {} \n\n    //! Exit notification\n    /** Invoked from inside observe(false) call and whenever a worker leaves the\n        arena this observer is associated with.\n\n        Obsolete semantics. For global observers it is called by a thread before\n        the first steal since observation became enabled. **/\n    virtual void on_scheduler_exit( bool /*is_worker*/ ) {}\n\n    //! Destructor automatically switches observation off if it is enabled.\n    virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);}\n};\n\n} // namespace internal\n\n#if __TBB_ARENA_OBSERVER\nnamespace interface6 {\nclass task_scheduler_observer : public internal::task_scheduler_observer_v3 {\n    friend class internal::task_scheduler_observer_v3;\n    friend class internal::observer_proxy;\n    friend class internal::observer_list;\n\n    /** Negative numbers with the largest absolute value to minimize probability\n        of coincidence in case of a bug in busy count usage. **/\n    // TODO: take more high bits for version number\n    static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1);\n\n    //! contains task_arena pointer or tag indicating local or global semantics of the observer\n    intptr_t my_context_tag;\n    enum { global_tag = 0, implicit_tag = 1 };\n\npublic:\n    //! Construct local or global observer in inactive state (observation disabled).\n    /** For a local observer entry/exit notifications are invoked whenever a worker\n        thread joins/leaves the arena of the observer's owner thread. If a thread is\n        already in the arena when the observer is activated, the entry notification is\n        called before it executes the first stolen task. **/\n    /** TODO: Obsolete.\n        Global observer semantics is obsolete as it violates master thread isolation\n        guarantees and is not composable. Thus the current default behavior of the\n        constructor is obsolete too and will be changed in one of the future versions\n        of the library. **/\n    task_scheduler_observer( bool local = false ) {\n        my_context_tag = local? implicit_tag : global_tag;\n    }\n\n#if __TBB_TASK_ARENA\n    //! Construct local observer for a given arena in inactive state (observation disabled).\n    /** entry/exit notifications are invoked whenever a thread joins/leaves arena.\n        If a thread is already in the arena when the observer is activated, the entry notification\n        is called before it executes the first stolen task. **/\n    task_scheduler_observer( task_arena & a) {\n        my_context_tag = (intptr_t)&a;\n    }\n#endif //__TBB_TASK_ARENA\n\n    /** Destructor protects instance of the observer from concurrent notification.\n       It is recommended to disable observation before destructor of a derived class starts,\n       otherwise it can lead to concurrent notification callback on partly destroyed object **/\n    virtual ~task_scheduler_observer() { if(my_proxy) observe(false); }\n\n    //! Enable or disable observation\n    /** Warning: concurrent invocations of this method are not safe.\n        Repeated calls with the same state are no-ops. **/\n    void observe( bool state=true ) {\n        if( state && !my_proxy ) {\n            __TBB_ASSERT( !my_busy_count, \"Inconsistent state of task_scheduler_observer instance\");\n            my_busy_count.store<relaxed>(v6_trait);\n        }\n        internal::task_scheduler_observer_v3::observe(state);\n    }\n\n    //! Return commands for may_sleep()\n    enum { keep_awake = false, allow_sleep = true };\n\n    //! The callback can be invoked by a worker thread before it goes to sleep.\n    /** If it returns false ('keep_awake'), the thread will keep spinning and looking for work.\n        It will not be called for master threads. **/\n    virtual bool may_sleep() { return allow_sleep; }\n};\n\n} //namespace interface6\nusing interface6::task_scheduler_observer;\n#else /*__TBB_ARENA_OBSERVER*/\ntypedef tbb::internal::task_scheduler_observer_v3 task_scheduler_observer;\n#endif /*__TBB_ARENA_OBSERVER*/\n\n} // namespace tbb\n\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#endif /* __TBB_task_scheduler_observer_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/task_stream.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_task_stream_H\n#define _TBB_task_stream_H\n\n#include \"tbb/tbb_stddef.h\"\n#include <deque>\n#include <climits>\n#include \"tbb/atomic.h\" // for __TBB_Atomic*\n#include \"tbb/spin_mutex.h\"\n#include \"tbb/tbb_allocator.h\"\n#include \"scheduler_common.h\"\n#include \"tbb_misc.h\" // for FastRandom\n\nnamespace tbb {\nnamespace internal {\n\n//! Essentially, this is just a pair of a queue and a mutex to protect the queue.\n/** The reason std::pair is not used is that the code would look less clean\n    if field names were replaced with 'first' and 'second'. **/\ntemplate< typename T, typename mutex_t >\nstruct queue_and_mutex {\n    typedef std::deque< T, tbb_allocator<T> > queue_base_t;\n\n    queue_base_t my_queue;\n    mutex_t      my_mutex;\n\n    queue_and_mutex () : my_queue(), my_mutex() {}\n    ~queue_and_mutex () {}\n};\n\nconst uintptr_t one = 1;\n\ninline void set_one_bit( uintptr_t& dest, int pos ) {\n    __TBB_ASSERT( pos>=0, NULL );\n    __TBB_ASSERT( pos<32, NULL );\n    __TBB_AtomicOR( &dest, one<<pos );\n}\n\ninline void clear_one_bit( uintptr_t& dest, int pos ) {\n    __TBB_ASSERT( pos>=0, NULL );\n    __TBB_ASSERT( pos<32, NULL );\n    __TBB_AtomicAND( &dest, ~(one<<pos) );\n}\n\ninline bool is_bit_set( uintptr_t val, int pos ) {\n    __TBB_ASSERT( pos>=0, NULL );\n    __TBB_ASSERT( pos<32, NULL );\n    return (val & (one<<pos)) != 0;\n}\n\n//! The container for \"fairness-oriented\" aka \"enqueued\" tasks.\nclass task_stream : no_copy {\n    typedef queue_and_mutex <task*, spin_mutex> lane_t;\n    uintptr_t population;\n    padded<lane_t>* lanes;\n    unsigned N;\n\npublic:\n    task_stream() : population(), lanes()\n    {\n    }\n\n    void initialize( unsigned n_lanes ) {\n        const unsigned max_lanes =\n#if __TBB_MORE_FIFO_LANES\n                sizeof(population) * CHAR_BIT;\n#else\n                32;\n#endif\n        N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(__TBB_Log2(n_lanes-1)+1) : 2;\n        __TBB_ASSERT( N==max_lanes || N>=n_lanes && ((N-1)&N)==0, \"number of lanes miscalculated\");\n        __TBB_ASSERT( N <= sizeof(population) * CHAR_BIT, NULL );\n        lanes = new padded<lane_t>[N];\n        __TBB_ASSERT( !population, NULL );\n    }\n\n    ~task_stream() { if (lanes) delete[] lanes; }\n\n    //! Push a task into a lane.\n    void push( task* source, FastRandom& random ) {\n        // Lane selection is random. Each thread should keep a separate seed value.\n        unsigned idx;\n        for( ; ; ) {\n            idx = random.get() & (N-1);\n            spin_mutex::scoped_lock lock;\n            if( lock.try_acquire(lanes[idx].my_mutex) ) {\n                lanes[idx].my_queue.push_back(source);\n                set_one_bit( population, idx ); //TODO: avoid atomic op if the bit is already set\n                break;\n            }\n        }\n    }\n\n    //! Try finding and popping a task.\n    task* pop( unsigned& last_used_lane ) {\n        task* result = NULL;\n        // Lane selection is round-robin. Each thread should keep its last used lane.\n        unsigned idx = (last_used_lane+1)&(N-1);\n        for( ; population; idx=(idx+1)&(N-1) ) {\n            if( is_bit_set( population, idx ) ) {\n                lane_t& lane = lanes[idx];\n                spin_mutex::scoped_lock lock;\n                if( lock.try_acquire(lane.my_mutex) && !lane.my_queue.empty() ) {\n                    result = lane.my_queue.front();\n                    lane.my_queue.pop_front();\n                    if( lane.my_queue.empty() )\n                        clear_one_bit( population, idx );\n                    break;\n                }\n            }\n        }\n        last_used_lane = idx;\n        return result;\n    }\n\n    //! Checks existence of a task.\n    bool empty() {\n        return !population;\n    }\n\n    //! Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.\n    /** Tasks are not executed, because it would potentially create more tasks at a late stage.\n        The scheduler is really expected to execute all tasks before task_stream destruction. */\n    intptr_t drain() {\n        intptr_t result = 0;\n        for(unsigned i=0; i<N; ++i) {\n            lane_t& lane = lanes[i];\n            spin_mutex::scoped_lock lock(lane.my_mutex);\n            for(lane_t::queue_base_t::iterator it=lane.my_queue.begin();\n                it!=lane.my_queue.end(); ++it, ++result)\n            {\n                task* t = *it;\n                tbb::task::destroy(*t);\n            }\n            lane.my_queue.clear();\n            clear_one_bit( population, i );\n        }\n        return result;\n    }\n}; // task_stream\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_task_stream_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_H\n#define __TBB_tbb_H\n\n/** \n    This header bulk-includes declarations or definitions of all the functionality \n    provided by TBB (save for malloc dependent headers). \n\n    If you use only a few TBB constructs, consider including specific headers only.\n    Any header listed below can be included independently of others.\n**/\n\n#if TBB_PREVIEW_AGGREGATOR\n#include \"aggregator.h\"\n#endif\n#include \"aligned_space.h\"\n#include \"atomic.h\"\n#include \"blocked_range.h\"\n#include \"blocked_range2d.h\"\n#include \"blocked_range3d.h\"\n#include \"cache_aligned_allocator.h\"\n#include \"combinable.h\"\n#include \"concurrent_hash_map.h\"\n#if TBB_PREVIEW_CONCURRENT_LRU_CACHE\n#include \"concurrent_lru_cache.h\"\n#endif\n#include \"concurrent_priority_queue.h\"\n#include \"concurrent_queue.h\"\n#include \"concurrent_unordered_map.h\"\n#include \"concurrent_unordered_set.h\"\n#include \"concurrent_vector.h\"\n#include \"critical_section.h\"\n#include \"enumerable_thread_specific.h\"\n#include \"flow_graph.h\"\n#include \"mutex.h\"\n#include \"null_mutex.h\"\n#include \"null_rw_mutex.h\"\n#include \"parallel_do.h\"\n#include \"parallel_for.h\"\n#include \"parallel_for_each.h\"\n#include \"parallel_invoke.h\"\n#include \"parallel_reduce.h\"\n#include \"parallel_scan.h\"\n#include \"parallel_sort.h\"\n#include \"partitioner.h\"\n#include \"pipeline.h\"\n#include \"queuing_mutex.h\"\n#include \"queuing_rw_mutex.h\"\n#include \"reader_writer_lock.h\"\n#include \"recursive_mutex.h\"\n#include \"spin_mutex.h\"\n#include \"spin_rw_mutex.h\"\n#include \"task.h\"\n#include \"task_arena.h\"\n#include \"task_group.h\"\n#include \"task_scheduler_init.h\"\n#include \"task_scheduler_observer.h\"\n#include \"tbb_allocator.h\"\n#include \"tbb_exception.h\"\n#include \"tbb_thread.h\"\n#include \"tick_count.h\"\n\n#endif /* __TBB_tbb_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_allocator.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_allocator_H\n#define __TBB_tbb_allocator_H\n\n#include \"tbb_stddef.h\"\n#include <new>\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n #include <utility> // std::forward\n#endif\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nnamespace tbb {\n\n//! @cond INTERNAL\nnamespace internal {\n\n    //! Deallocates memory using FreeHandler\n    /** The function uses scalable_free if scalable allocator is available and free if not*/\n    void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p );\n\n    //! Allocates memory using MallocHandler\n    /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/\n    void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n );\n\n    //! Returns true if standard malloc/free are used to work with memory.\n    bool __TBB_EXPORTED_FUNC is_malloc_used_v3();\n}\n//! @endcond\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    // Workaround for erroneous \"unreferenced parameter\" warning in method destroy.\n    #pragma warning (push)\n    #pragma warning (disable: 4100)\n#endif\n\n//! Meets \"allocator\" requirements of ISO C++ Standard, Section 20.1.5\n/** The class selects the best memory allocation mechanism available \n    from scalable_malloc and standard malloc.\n    The members are ordered the same way they are in section 20.4.1\n    of the ISO C++ standard.\n    @ingroup memory_allocation */\ntemplate<typename T>\nclass tbb_allocator {\npublic:\n    typedef typename internal::allocator_type<T>::value_type value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n    template<typename U> struct rebind {\n        typedef tbb_allocator<U> other;\n    };\n\n    //! Specifies current allocator\n    enum malloc_type {\n        scalable, \n        standard\n    };\n\n    tbb_allocator() throw() {}\n    tbb_allocator( const tbb_allocator& ) throw() {}\n    template<typename U> tbb_allocator(const tbb_allocator<U>&) throw() {}\n\n    pointer address(reference x) const {return &x;}\n    const_pointer address(const_reference x) const {return &x;}\n    \n    //! Allocate space for n objects.\n    pointer allocate( size_type n, const void* /*hint*/ = 0) {\n        return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) ));\n    }\n\n    //! Free previously allocated block of memory.\n    void deallocate( pointer p, size_type ) {\n        internal::deallocate_via_handler_v3(p);        \n    }\n\n    //! Largest value for which method allocate might succeed.\n    size_type max_size() const throw() {\n        size_type max = static_cast<size_type>(-1) / sizeof (value_type);\n        return (max > 0 ? max : 1);\n    }\n    \n    //! Copy-construct value at location pointed to by p.\n#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n    template<typename U, typename... Args>\n    void construct(U *p, Args&&... args)\n        { ::new((void *)p) U(std::forward<Args>(args)...); }\n#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n    void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));}\n#endif\n    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}\n#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC\n\n    //! Destroy value at location pointed to by p.\n    void destroy( pointer p ) {p->~value_type();}\n\n    //! Returns current allocator\n    static malloc_type allocator_type() {\n        return internal::is_malloc_used_v3() ? standard : scalable;\n    }\n};\n\n#if _MSC_VER && !defined(__INTEL_COMPILER)\n    #pragma warning (pop)\n#endif // warning 4100 is back\n\n//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1\n/** @ingroup memory_allocation */\ntemplate<> \nclass tbb_allocator<void> {\npublic:\n    typedef void* pointer;\n    typedef const void* const_pointer;\n    typedef void value_type;\n    template<typename U> struct rebind {\n        typedef tbb_allocator<U> other;\n    };\n};\n\ntemplate<typename T, typename U>\ninline bool operator==( const tbb_allocator<T>&, const tbb_allocator<U>& ) {return true;}\n\ntemplate<typename T, typename U>\ninline bool operator!=( const tbb_allocator<T>&, const tbb_allocator<U>& ) {return false;}\n\n//! Meets \"allocator\" requirements of ISO C++ Standard, Section 20.1.5\n/** The class is an adapter over an actual allocator that fills the allocation\n    using memset function with template argument C as the value.\n    The members are ordered the same way they are in section 20.4.1\n    of the ISO C++ standard.\n    @ingroup memory_allocation */\ntemplate <typename T, template<typename X> class Allocator = tbb_allocator>\nclass zero_allocator : public Allocator<T>\n{\npublic:\n    typedef Allocator<T> base_allocator_type;\n    typedef typename base_allocator_type::value_type value_type;\n    typedef typename base_allocator_type::pointer pointer;\n    typedef typename base_allocator_type::const_pointer const_pointer;\n    typedef typename base_allocator_type::reference reference;\n    typedef typename base_allocator_type::const_reference const_reference;\n    typedef typename base_allocator_type::size_type size_type;\n    typedef typename base_allocator_type::difference_type difference_type;\n    template<typename U> struct rebind {\n        typedef zero_allocator<U, Allocator> other;\n    };\n\n    zero_allocator() throw() { }\n    zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { }\n    template<typename U>\n    zero_allocator(const zero_allocator<U> &a) throw() : base_allocator_type( Allocator<U>( a ) ) { }\n\n    pointer allocate(const size_type n, const void *hint = 0 ) {\n        pointer ptr = base_allocator_type::allocate( n, hint );\n        std::memset( ptr, 0, n * sizeof(value_type) );\n        return ptr;\n    }\n};\n\n//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1\n/** @ingroup memory_allocation */\ntemplate<template<typename T> class Allocator> \nclass zero_allocator<void, Allocator> : public Allocator<void> {\npublic:\n    typedef Allocator<void> base_allocator_type;\n    typedef typename base_allocator_type::value_type value_type;\n    typedef typename base_allocator_type::pointer pointer;\n    typedef typename base_allocator_type::const_pointer const_pointer;\n    template<typename U> struct rebind {\n        typedef zero_allocator<U, Allocator> other;\n    };\n};\n\ntemplate<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>\ninline bool operator==( const zero_allocator<T1,B1> &a, const zero_allocator<T2,B2> &b) {\n    return static_cast< B1<T1> >(a) == static_cast< B2<T2> >(b);\n}\ntemplate<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>\ninline bool operator!=( const zero_allocator<T1,B1> &a, const zero_allocator<T2,B2> &b) {\n    return static_cast< B1<T1> >(a) != static_cast< B2<T2> >(b);\n}\n\n} // namespace tbb \n\n#endif /* __TBB_tbb_allocator_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_assert_impl.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// IMPORTANT: To use assertion handling in TBB, exactly one of the TBB source files\n// should #include tbb_assert_impl.h thus instantiating assertion handling routines.\n// The intent of putting it to a separate file is to allow some tests to use it\n// as well in order to avoid dependency on the library.\n\n// include headers for required function declarations\n#include <cstdlib>\n#include <stdio.h>\n#include <string.h>\n#include <stdarg.h>\n#if _MSC_VER\n#include <crtdbg.h>\n#endif\n\n#if _MSC_VER >= 1400\n#define __TBB_EXPORTED_FUNC   __cdecl\n#else\n#define __TBB_EXPORTED_FUNC\n#endif\n\nusing namespace std;\n\n#if __TBBMALLOC_BUILD\nnamespace rml { namespace internal {\n#else\nnamespace tbb {\n#endif\n    //! Type for an assertion handler\n    typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment );\n\n    static assertion_handler_type assertion_handler;\n\n    assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ) {\n        assertion_handler_type old_handler = assertion_handler;\n        assertion_handler = new_handler;\n        return old_handler;\n    }\n\n    void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ) {\n        if( assertion_handler_type a = assertion_handler ) {\n            (*a)(filename,line,expression,comment);\n        } else {\n            static bool already_failed;\n            if( !already_failed ) {\n                already_failed = true;\n                fprintf( stderr, \"Assertion %s failed on line %d of file %s\\n\",\n                         expression, line, filename );\n                if( comment )\n                    fprintf( stderr, \"Detailed description: %s\\n\", comment );\n#if _MSC_VER && _DEBUG\n                if(1 == _CrtDbgReport(_CRT_ASSERT, filename, line, \"tbb_debug.dll\", \"%s\\r\\n%s\", expression, comment?comment:\"\"))\n                        _CrtDbgBreak();\n#else\n                fflush(stderr);\n                abort();\n#endif\n            }\n        }\n    }\n\n#if defined(_MSC_VER)&&_MSC_VER<1400\n#   define vsnprintf _vsnprintf\n#endif\n\n#if !__TBBMALLOC_BUILD\n    namespace internal {\n        //! Report a runtime warning.\n        void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... )\n        {\n            char str[1024]; memset(str, 0, 1024);\n            va_list args; va_start(args, format);\n            vsnprintf( str, 1024-1, format, args);\n            va_end(args);\n            fprintf( stderr, \"TBB Warning: %s\\n\", str);\n        }\n    } // namespace internal\n#endif\n\n#if __TBBMALLOC_BUILD\n}} // namespaces rml::internal\n#else\n}  // namespace tbb\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_config.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_config_H\n#define __TBB_tbb_config_H\n\n/** This header is supposed to contain macro definitions and C style comments only.\n    The macros defined here are intended to control such aspects of TBB build as\n    - presence of compiler features\n    - compilation modes\n    - feature sets\n    - known compiler/platform issues\n**/\n\n/*Check which standard library we use on OS X.*/\n/*__TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed.*/\n#if !defined(__TBB_SYMBOL) && __APPLE__\n    #include <cstddef>\n#endif\n\n// note that when ICC is in use __TBB_GCC_VERSION might not closely match GCC version on the machine\n#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)\n\n#if __clang__\n    /**according to clang documentation version can be vendor specific **/\n    #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)\n#endif\n\n/** Preprocessor symbols to determine HW architecture **/\n\n#if _WIN32||_WIN64\n#   if defined(_M_X64)||defined(__x86_64__)  // the latter for MinGW support\n#       define __TBB_x86_64 1\n#   elif defined(_M_IA64)\n#       define __TBB_ipf 1\n#   elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support\n#       define __TBB_x86_32 1\n#   else\n#       define __TBB_generic_arch 1\n#   endif\n#else /* Assume generic Unix */\n#   if !__linux__ && !__APPLE__\n#       define __TBB_generic_os 1\n#   endif\n#   if __x86_64__\n#       define __TBB_x86_64 1\n#   elif __ia64__\n#       define __TBB_ipf 1\n#   elif __i386__||__i386  // __i386 is for Sun OS\n#       define __TBB_x86_32 1\n#   else\n#       define __TBB_generic_arch 1\n#   endif\n#endif\n\n#if __MIC__ || __MIC2__\n#define __TBB_DEFINE_MIC 1\n#endif\n\n#define __TBB_TSX_AVAILABLE  (__TBB_x86_32 || __TBB_x86_64) && !__TBB_DEFINE_MIC\n\n/** Presence of compiler features **/\n\n#if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811\n/* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fix it. */\n    #undef __INTEL_COMPILER\n    #define __INTEL_COMPILER 1210\n#endif\n\n#if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER)\n    /** warning suppression pragmas available in GCC since 4.4 **/\n    #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1\n#endif\n\n/* Select particular features of C++11 based on compiler version.\n   ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher\n   set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode.\n\n   Compilers that mimics other compilers (ICC, clang) must be processed before\n   compilers they mimic (GCC, MSVC).\n\n   TODO: The following conditions should be extended when new compilers/runtimes\n   support added.\n */\n\n#if __INTEL_COMPILER\n    /** C++11 mode detection macros for Intel C++ compiler (enabled by -std=c++0x option):\n          __INTEL_CXX11_MODE__ for version >=13.0\n          __STDC_HOSTED__ for version >=12.0 on Windows,\n          __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and OS X. **/\n    //  On Windows, C++11 features supported by Visual Studio 2010 and higher are enabled by default\n    #ifndef __INTEL_CXX11_MODE__\n        #define __INTEL_CXX11_MODE__ ((_MSC_VER && __STDC_HOSTED__) || __GXX_EXPERIMENTAL_CXX0X__)\n        // TODO: check if more conditions can be simplified with the above macro\n    #endif\n    #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT    (__INTEL_CXX11_MODE__ && __VARIADIC_TEMPLATES)\n    // Both r-value reference support in compiler and std::move/std::forward\n    // presence in C++ standard library is checked.\n    #define __TBB_CPP11_RVALUE_REF_PRESENT            ((__GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION) || _MSC_VER >= 1600) && __INTEL_COMPILER >= 1200)\n    #if  _MSC_VER >= 1600\n        #define __TBB_EXCEPTION_PTR_PRESENT           ( __INTEL_COMPILER > 1300                                                \\\n                                                      /*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking  issue*/ \\\n                                                      || (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \\\n                                                      || (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) )\n    /** libstdc++ that comes with GCC 4.6 use C++11 features not supported by ICC 12.1.\n     *  Because of that ICC 12.1 does not support C++11 mode with with gcc 4.6 (or higher),\n     *  and therefore does not  define __GXX_EXPERIMENTAL_CXX0X__ macro **/\n    #elif __TBB_GCC_VERSION >= 40404 && __TBB_GCC_VERSION < 40600\n        #define __TBB_EXCEPTION_PTR_PRESENT           (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1200)\n    #elif __TBB_GCC_VERSION >= 40600\n        #define __TBB_EXCEPTION_PTR_PRESENT           (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1300)\n    #else\n        #define __TBB_EXCEPTION_PTR_PRESENT           0\n    #endif\n    #define __TBB_MAKE_EXCEPTION_PTR_PRESENT          (_MSC_VER >= 1700 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600))\n    #define __TBB_STATIC_ASSERT_PRESENT               (__INTEL_CXX11_MODE__ || _MSC_VER >= 1600)\n    #define __TBB_CPP11_TUPLE_PRESENT                 (_MSC_VER >= 1600 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300))\n    /**Intel C++ compiler 14.0 crashes on using __has_include. When it fixed, condition will need to be updated. **/\n    #if (__clang__ && __INTEL_COMPILER > 1400)\n        #if (__has_feature(__cxx_generalized_initializers__) && __has_include(<initializer_list>))\n            #define __TBB_INITIALIZER_LISTS_PRESENT   1\n        #endif\n    #else\n        /** TODO: when MSVC2013 is supported by Intel C++ compiler, it will be enabled silently by compiler, so rule will need to be updated.**/\n        #define __TBB_INITIALIZER_LISTS_PRESENT       __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GCC_VERSION >= 40400 || _LIBCPP_VERSION)\n    #endif\n    \n    #define __TBB_CONSTEXPR_PRESENT                   __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400\n    #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT  __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200\n    /** ICC seems to disable support of noexcept event in c++11 when compiling in compatibility mode for gcc <4.6 **/\n    #define __TBB_NOEXCEPT_PRESENT                    __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1300 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION || _MSC_VER)\n    #define __TBB_CPP11_STD_BEGIN_END_PRESENT         (_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1310 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION))\n    #define __TBB_CPP11_AUTO_PRESENT                  (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210)\n    #define __TBB_CPP11_DECLTYPE_PRESENT              (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210)\n#elif __clang__\n//TODO: these options need to be rechecked\n/** on OS X* the only way to get C++11 is to use clang. For library features (e.g. exception_ptr) libc++ is also\n *  required. So there is no need to check GCC version for clang**/\n    #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT     (__has_feature(__cxx_variadic_templates__))\n    #define __TBB_CPP11_RVALUE_REF_PRESENT             (__has_feature(__cxx_rvalue_references__) && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION))\n/** TODO: extend exception_ptr related conditions to cover libstdc++ **/\n    #define __TBB_EXCEPTION_PTR_PRESENT               (__cplusplus >= 201103L && _LIBCPP_VERSION)\n    #define __TBB_MAKE_EXCEPTION_PTR_PRESENT          (__cplusplus >= 201103L && _LIBCPP_VERSION)\n    #define __TBB_STATIC_ASSERT_PRESENT               __has_feature(__cxx_static_assert__)\n    /**Clang (preprocessor) has problems with dealing with expression having __has_include in #ifs\n     * used inside C++ code. (At least version that comes with OS X 10.8 : Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)) **/\n    #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>))\n        #define __TBB_CPP11_TUPLE_PRESENT             1\n    #endif\n    #if (__has_feature(__cxx_generalized_initializers__) && __has_include(<initializer_list>))\n        #define __TBB_INITIALIZER_LISTS_PRESENT       1\n    #endif\n    #define __TBB_CONSTEXPR_PRESENT                   __has_feature(__cxx_constexpr__)\n    #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT  (__has_feature(__cxx_defaulted_functions__) && __has_feature(__cxx_deleted_functions__))\n    /**For some unknown reason  __has_feature(__cxx_noexcept) does not yield true for all cases. Compiler bug ? **/\n    #define __TBB_NOEXCEPT_PRESENT                    (__cplusplus >= 201103L)\n    #define __TBB_CPP11_STD_BEGIN_END_PRESENT         (__has_feature(__cxx_range_for__) && _LIBCPP_VERSION)\n    #define __TBB_CPP11_AUTO_PRESENT                  __has_feature(__cxx_auto_type__)\n    #define __TBB_CPP11_DECLTYPE_PRESENT              __has_feature(__cxx_decltype__)\n#elif __GNUC__\n    #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT    __GXX_EXPERIMENTAL_CXX0X__\n    #define __TBB_CPP11_RVALUE_REF_PRESENT            __GXX_EXPERIMENTAL_CXX0X__\n    /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIBCXX_ATOMIC_BUILTINS_4, which is a prerequisite \n        for exception_ptr but cannot be used in this file because it is defined in a header, not by the compiler. \n        If the compiler has no atomic intrinsics, the C++ library should not expect those as well. **/\n    #define __TBB_EXCEPTION_PTR_PRESENT               (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    #define __TBB_MAKE_EXCEPTION_PTR_PRESENT          (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)\n    #define __TBB_STATIC_ASSERT_PRESENT               (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300)\n    #define __TBB_CPP11_TUPLE_PRESENT                 (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300)\n    #define __TBB_INITIALIZER_LISTS_PRESENT           (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)\n    /** gcc seems have to support constexpr from 4.4 but tests in (test_atomic) seeming reasonable fail to compile prior 4.6**/\n    #define __TBB_CONSTEXPR_PRESENT                   (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)\n    #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT  (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)\n    #define __TBB_NOEXCEPT_PRESENT                    (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)\n    #define __TBB_CPP11_STD_BEGIN_END_PRESENT         (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)\n    #define __TBB_CPP11_AUTO_PRESENT                  (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)\n    #define __TBB_CPP11_DECLTYPE_PRESENT              (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)\n#elif _MSC_VER\n    #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT    (_MSC_VER >= 1800)\n    #define __TBB_CPP11_RVALUE_REF_PRESENT            (_MSC_VER >= 1600)\n    #define __TBB_EXCEPTION_PTR_PRESENT               (_MSC_VER >= 1600)\n    #define __TBB_STATIC_ASSERT_PRESENT               (_MSC_VER >= 1600)\n    #define __TBB_MAKE_EXCEPTION_PTR_PRESENT          (_MSC_VER >= 1700)\n    #define __TBB_CPP11_TUPLE_PRESENT                 (_MSC_VER >= 1600)\n    #define __TBB_INITIALIZER_LISTS_PRESENT           (_MSC_VER >= 1800)\n    #define __TBB_CONSTEXPR_PRESENT                   0\n    #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT  (_MSC_VER >= 1800)\n    #define __TBB_NOEXCEPT_PRESENT                    0 /*for _MSC_VER == 1800*/\n    #define __TBB_CPP11_STD_BEGIN_END_PRESENT         (_MSC_VER >= 1700)\n    #define __TBB_CPP11_AUTO_PRESENT                  (_MSC_VER >= 1600)\n    #define __TBB_CPP11_DECLTYPE_PRESENT              (_MSC_VER >= 1600)\n#else\n    #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT    0\n    #define __TBB_CPP11_RVALUE_REF_PRESENT            0\n    #define __TBB_EXCEPTION_PTR_PRESENT               0\n    #define __TBB_STATIC_ASSERT_PRESENT               0\n    #define __TBB_MAKE_EXCEPTION_PTR_PRESENT          0\n    #define __TBB_CPP11_TUPLE_PRESENT                 0\n    #define __TBB_INITIALIZER_LISTS_PRESENT           0\n    #define __TBB_CONSTEXPR_PRESENT                   0\n    #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT  0\n    #define __TBB_NOEXCEPT_PRESENT                    0\n    #define __TBB_CPP11_STD_BEGIN_END_PRESENT         0\n    #define __TBB_CPP11_AUTO_PRESENT                  0\n    #define __TBB_CPP11_DECLTYPE_PRESENT              0\n#endif\n\n// C++11 standard library features\n\n#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT      (_LIBCPP_VERSION || _MSC_VER >= 1700)\n#define __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300 || _MSC_VER >= 1600)\n//TODO: Probably more accurate way is to analyze version of stdlibc++ via__GLIBCXX__ instead of __TBB_GCC_VERSION\n#define __TBB_ALLOCATOR_TRAITS_PRESENT           (__cplusplus >= 201103L && _LIBCPP_VERSION  || _MSC_VER >= 1700 ||                                             \\\n                                                  __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 && !(__TBB_GCC_VERSION == 40700 && __TBB_DEFINE_MIC) \\\n                                                 )\n\n//TODO: not clear how exactly this macro affects exception_ptr - investigate\n// On linux ICC fails to find existing std::exception_ptr in libstdc++ without this define\n#if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\n    #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1\n#endif\n\n// Work around a bug in MinGW32\n#if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_BUILTINS_4)\n    #define _GLIBCXX_ATOMIC_BUILTINS_4\n#endif\n\n#if __GNUC__ || __SUNPRO_CC || __IBMCPP__\n    /* ICC defines __GNUC__ and so is covered */\n    #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1\n#elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER)\n    #define __TBB_DECLSPEC_ALIGN_PRESENT 1\n#endif\n\n/* Actually ICC supports gcc __sync_* intrinsics starting 11.1,\n * but 64 bit support for 32 bit target comes in later ones*/\n/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */\n#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200\n    /** built-in atomics available in GCC since 4.1.2 **/\n    #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1\n#endif\n\n#if __INTEL_COMPILER >= 1200\n    /** built-in C++11 style atomics available in ICC since 12.0 **/\n    #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1\n#endif\n\n#define __TBB_TSX_INTRINSICS_PRESENT ((__RTM__ || _MSC_VER>=1700 || __INTEL_COMPILER>=1300) && !__TBB_DEFINE_MIC && !__ANDROID__)\n\n/** User controlled TBB features & modes **/\n\n#ifndef TBB_USE_DEBUG\n#ifdef _DEBUG\n#define TBB_USE_DEBUG _DEBUG\n#else\n#define TBB_USE_DEBUG 0\n#endif\n#endif /* TBB_USE_DEBUG */\n\n#ifndef TBB_USE_ASSERT\n#define TBB_USE_ASSERT TBB_USE_DEBUG\n#endif /* TBB_USE_ASSERT */\n\n#ifndef TBB_USE_THREADING_TOOLS\n#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG\n#endif /* TBB_USE_THREADING_TOOLS */\n\n#ifndef TBB_USE_PERFORMANCE_WARNINGS\n#ifdef TBB_PERFORMANCE_WARNINGS\n#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS\n#else\n#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG\n#endif /* TBB_PEFORMANCE_WARNINGS */\n#endif /* TBB_USE_PERFORMANCE_WARNINGS */\n\n#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX)\n    #if TBB_USE_EXCEPTIONS\n        #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0.\n    #elif !defined(TBB_USE_EXCEPTIONS)\n        #define TBB_USE_EXCEPTIONS 0\n    #endif\n#elif !defined(TBB_USE_EXCEPTIONS)\n    #if __TBB_DEFINE_MIC\n    #define TBB_USE_EXCEPTIONS 0\n    #else\n    #define TBB_USE_EXCEPTIONS 1\n    #endif\n#elif TBB_USE_EXCEPTIONS && __TBB_DEFINE_MIC\n    #error Please do not set TBB_USE_EXCEPTIONS macro or set it to 0.\n#endif\n\n#ifndef TBB_IMPLEMENT_CPP0X\n    /** By default, use C++11 classes if available **/\n    #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__\n        #define TBB_IMPLEMENT_CPP0X 0\n    #elif __clang__ && __cplusplus >= 201103L\n        //TODO: consider introducing separate macros for each file?\n        //prevent injection of corresponding tbb names into std:: namespace if native headers are present\n        #if __has_include(<thread>) || __has_include(<condition_variable>)\n            #define TBB_IMPLEMENT_CPP0X 0\n        #else\n            #define TBB_IMPLEMENT_CPP0X 1\n        #endif\n    #elif _MSC_VER>=1700\n        #define TBB_IMPLEMENT_CPP0X 0\n    #elif __STDCPP_THREADS__\n        #define TBB_IMPLEMENT_CPP0X 0\n    #else\n        #define TBB_IMPLEMENT_CPP0X 1\n    #endif\n#endif /* TBB_IMPLEMENT_CPP0X */\n\n/* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */\n#ifndef TBB_USE_CAPTURED_EXCEPTION\n    /** IA-64 architecture pre-built TBB binaries do not support exception_ptr. **/\n    #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__ia64__)\n        #define TBB_USE_CAPTURED_EXCEPTION 0\n    #else\n        #define TBB_USE_CAPTURED_EXCEPTION 1\n    #endif\n#else /* defined TBB_USE_CAPTURED_EXCEPTION */\n    #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT\n        #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception.\n    #endif\n#endif /* defined TBB_USE_CAPTURED_EXCEPTION */\n\n/** Check whether the request to use GCC atomics can be satisfied **/\n#if TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT\n    #error \"GCC atomic built-ins are not supported.\"\n#endif\n\n/** Internal TBB features & modes **/\n\n/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/\n#ifndef __TBB_WEAK_SYMBOLS_PRESENT\n#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) )\n#endif\n\n/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load shared libraries at run time **/\n#ifndef __TBB_DYNAMIC_LOAD_ENABLED\n    #define __TBB_DYNAMIC_LOAD_ENABLED 1\n#endif\n\n/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when\n    it's necessary to test internal functions not exported from TBB DLLs\n**/\n#if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_BINARY)\n    #define __TBB_NO_IMPLICIT_LINKAGE 1\n    #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1\n#endif\n\n#ifndef __TBB_COUNT_TASK_NODES\n    #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT\n#endif\n\n#ifndef __TBB_TASK_GROUP_CONTEXT\n    #define __TBB_TASK_GROUP_CONTEXT 1\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#ifndef __TBB_SCHEDULER_OBSERVER\n    #define __TBB_SCHEDULER_OBSERVER 1\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#ifndef __TBB_FP_CONTEXT\n    #define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT\n#endif /* __TBB_FP_CONTEXT */\n\n#if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT\n    #error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled\n#endif\n\n#ifndef __TBB_TASK_ARENA\n    #define __TBB_TASK_ARENA 1\n#endif /* __TBB_TASK_ARENA  */\n#if __TBB_TASK_ARENA\n    #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official\n    #if !__TBB_SCHEDULER_OBSERVER\n        #error __TBB_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to be enabled\n    #endif\n#endif /* __TBB_TASK_ARENA */\n\n#ifndef __TBB_ARENA_OBSERVER\n    #define __TBB_ARENA_OBSERVER ((__TBB_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER)\n#endif /* __TBB_ARENA_OBSERVER */\n\n#ifndef __TBB_SLEEP_PERMISSION\n    #define __TBB_SLEEP_PERMISSION ((__TBB_CPF_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER)\n#endif /* __TBB_SLEEP_PERMISSION */\n\n#if TBB_PREVIEW_FLOW_GRAPH_TRACE\n#define __TBB_NO_IMPLICIT_LINKAGE 1\n#endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */\n\n#ifndef __TBB_ITT_STRUCTURE_API\n#define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD || TBB_PREVIEW_FLOW_GRAPH_TRACE) )\n#endif\n\n#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT\n    #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled\n#endif\n\n#ifndef __TBB_TASK_PRIORITY\n    #define __TBB_TASK_PRIORITY (!(__TBB_CPF_BUILD||TBB_USE_PREVIEW_BINARY)&&__TBB_TASK_GROUP_CONTEXT) // TODO: it will be enabled for CPF in the next versions\n#endif /* __TBB_TASK_PRIORITY */\n\n#if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT\n    #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enabled\n#endif\n\n#if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD\n    #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1\n#endif\n\n#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \\\n          (_WIN32 || _WIN64 || __APPLE__ || (__linux__ && !__ANDROID__))\n    #define __TBB_SURVIVE_THREAD_SWITCH 1\n#endif /* __TBB_SURVIVE_THREAD_SWITCH */\n\n#ifndef __TBB_DEFAULT_PARTITIONER\n#if TBB_DEPRECATED\n/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */\n#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner\n#else\n/** Default partitioner for parallel loop templates since TBB 2.2 */\n#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner\n#endif /* TBB_DEPRECATED */\n#endif /* !defined(__TBB_DEFAULT_PARTITIONER */\n\n#ifndef __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES\n#define __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES 1\n#endif\n\n#ifdef _VARIADIC_MAX\n#define __TBB_VARIADIC_MAX _VARIADIC_MAX\n#else\n#if _MSC_VER >= 1700\n#define __TBB_VARIADIC_MAX 5  /* current VS11 setting, may change. */\n#else\n#define __TBB_VARIADIC_MAX 10\n#endif\n#endif\n\n/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and limit a possibility to load\n    shared libraries at run time only from application container **/\n#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP\n    #define __TBB_WIN8UI_SUPPORT 1\n#else\n    #define __TBB_WIN8UI_SUPPORT 0\n#endif\n\n/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by\n    the bugs in compilers, standard or OS specific libraries. They should be\n    removed as soon as the corresponding bugs are fixed or the buggy OS/compiler\n    versions go out of the support list.\n**/\n\n#if __ANDROID__ && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8\n    /** Necessary because on Android 8-byte CAS and F&A are not available for some processor architectures,\n        but no mandatory warning message appears from GCC 4.4.3. Instead, only a linkage error occurs when\n        these atomic operations are used (such as in unit test test_atomic.exe). **/\n    #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1\n#elif __TBB_x86_32 && __TBB_GCC_VERSION == 40102 && ! __GNUC_RH_RELEASE__\n    /** GCC 4.1.2 erroneously emit call to external function for 64 bit sync_ intrinsics.\n        However these functions are not defined anywhere. It seems that this problem was fixed later on\n        and RHEL got an updated version of gcc 4.1.2. **/\n    #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1\n#endif\n\n#if __GNUC__ && __TBB_x86_64 && __INTEL_COMPILER == 1200\n    #define __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN 1\n#endif\n\n#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012)\n    /** Necessary to avoid ICL error (or warning in non-strict mode):\n        \"exception specification for implicitly declared virtual destructor is\n        incompatible with that of overridden one\". **/\n    #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1\n#endif\n\n#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER)\n    /** VS2005 and earlier do not allow declaring template class as a friend\n        of classes defined in other namespaces. **/\n    #define __TBB_TEMPLATE_FRIENDS_BROKEN 1\n#endif\n\n//TODO: recheck for different clang versions \n#if __GLIBC__==2 && __GLIBC_MINOR__==3 ||  (__APPLE__ && ( __INTEL_COMPILER==1200 && !TBB_USE_DEBUG))\n    /** Macro controlling EH usages in TBB tests.\n        Some older versions of glibc crash when exception handling happens concurrently. **/\n    #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1\n#else\n    #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0\n#endif\n\n#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110\n    /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/\n    #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1\n#endif\n\n#if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER))\n    /** Bugs with access to nested classes declared in protected area */\n    #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1\n#endif\n\n#if __MINGW32__ && __TBB_GCC_VERSION < 40200\n    /** MinGW has a bug with stack alignment for routines invoked from MS RTLs.\n        Since GCC 4.2, the bug can be worked around via a special attribute. **/\n    #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1\n#else\n    #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0\n#endif\n\n#if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0\n    /* GCC of this version may rashly ignore control dependencies */\n    #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1\n#endif\n\n#if __FreeBSD__\n    /** A bug in FreeBSD 8.0 results in kernel panic when there is contention\n        on a mutex created with this attribute. **/\n    #define __TBB_PRIO_INHERIT_BROKEN 1\n\n    /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs\n        during (concurrent?) object construction by means of placement new operator. **/\n    #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1\n#endif /* __FreeBSD__ */\n\n#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER)\n    /** The Intel compiler for IA-32 (Linux|OS X) crashes or generates\n        incorrect code when __asm__ arguments have a cast to volatile. **/\n    #define __TBB_ICC_ASM_VOLATILE_BROKEN 1\n#endif\n\n#if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2)\n    /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __alignof(T)\n        when T has not yet been instantiated. **/\n    #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1\n#endif\n\n#if __TBB_DEFINE_MIC\n    /** Main thread and user's thread have different default thread affinity masks. **/\n    #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1\n#endif\n\n#if __GXX_EXPERIMENTAL_CXX0X__ && !defined(__EXCEPTIONS) && \\\n    ((!__INTEL_COMPILER && !__clang__ && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<40600)) || \\\n     (__INTEL_COMPILER<=1400 && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<=40801)))\n/* There is an issue for specific GCC toolchain when C++11 is enabled\n   and exceptions are disabled:\n   exceprion_ptr.h/nested_exception.h use throw unconditionally.\n   GCC can ignore 'throw' since 4.6; but with ICC the issue still exists.\n */\n    #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1\n#else\n    #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0\n#endif\n\n#if __INTEL_COMPILER==1300 && __TBB_GCC_VERSION>=40700 && defined(__GXX_EXPERIMENTAL_CXX0X__)\n/* Some C++11 features used inside libstdc++ are not supported by Intel compiler.\n * Checking version of gcc instead of libstdc++ because\n *  - they are directly connected,\n *  - for now it is not possible to check version of any standard library in this file\n */\n    #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 1\n#else\n    #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 0\n#endif\n\n#if (__GNUC__==4 && __GNUC_MINOR__==4 ) && !defined(__INTEL_COMPILER) && !defined(__clang__)\n    /** excessive warnings related to strict aliasing rules in GCC 4.4 **/\n    #define __TBB_GCC_STRICT_ALIASING_BROKEN 1\n    /* topical remedy: #pragma GCC diagnostic ignored \"-Wstrict-aliasing\" */\n    #if !__TBB_GCC_WARNING_SUPPRESSION_PRESENT\n        #error Warning suppression is not supported, while should.\n    #endif\n#endif\n\n/*In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */\n#if __TBB_GCC_VERSION == 40102 && __PIC__ && !defined(__INTEL_COMPILER) && !defined(__clang__)\n    #define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1\n#endif\n\n#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun || __ANDROID__) &&  (__INTEL_COMPILER || (__GNUC__==3 && __GNUC_MINOR__==3 ) || __SUNPRO_CC)\n    // Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack,\n    // even if the object specifies 8-byte alignment.  On such platforms, the IA-32 implementation\n    // of 64 bit atomics (e.g. atomic<long long>) use different tactics depending upon\n    // whether the object is properly aligned or not.\n    #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1\n#else\n    #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0\n#endif\n\n#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 && !defined(__INTEL_COMPILER) && !defined (__clang__)\n    #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1\n#endif\n\n#if _MSC_VER && _MSC_VER <= 1800 && !__INTEL_COMPILER\n    // With MSVC, when an array is passed by const reference to a template function,\n    // constness from the function parameter may get propagated to the template parameter.\n    #define __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN 1\n#endif\n\n// A compiler bug: a disabled copy constructor prevents use of the moving constructor\n#define __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN (_MSC_VER && (__INTEL_COMPILER >= 1300 && __INTEL_COMPILER <= 1310) && !__INTEL_CXX11_MODE__)\n\n// MSVC 2013 and ICC 15 seems do not generate implicit move constructor for empty derived class while should\n#define __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN  (__TBB_CPP11_RVALUE_REF_PRESENT &&  \\\n      ( !__INTEL_COMPILER && _MSC_VER && _MSC_VER <=1800 || __INTEL_COMPILER && __INTEL_COMPILER <= 1500 ))\n\n/** End of __TBB_XXX_BROKEN macro section **/\n\n#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER)\n    // A macro to suppress erroneous or benign \"unreachable code\" MSVC warning (4702)\n    #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1\n#endif\n\n#define __TBB_ATOMIC_CTORS     (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN))\n\n#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC      (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT)\n\n#define __TBB_VARIADIC_PARALLEL_INVOKE          (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT)\n#endif /* __TBB_tbb_config_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_exception.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_exception_H\n#define __TBB_exception_H\n\n#include \"tbb_stddef.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <exception>\n#include <new>    //required for bad_alloc definition, operators new\n#include <string> // required to construct std exception classes\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nnamespace tbb {\n\n//! Exception for concurrent containers\nclass bad_last_alloc : public std::bad_alloc {\npublic:\n    /*override*/ const char* what() const throw();\n#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN\n    /*override*/ ~bad_last_alloc() throw() {}\n#endif\n};\n\n//! Exception for PPL locks\nclass improper_lock : public std::exception {\npublic:\n    /*override*/ const char* what() const throw();\n};\n\n//! Exception for user-initiated abort\nclass user_abort : public std::exception {\npublic:\n    /*override*/ const char* what() const throw();\n};\n\n//! Exception for missing wait on structured_task_group\nclass missing_wait : public std::exception {\npublic:\n    /*override*/ const char* what() const throw();\n};\n\n//! Exception for repeated scheduling of the same task_handle\nclass invalid_multiple_scheduling : public std::exception {\npublic:\n    /*override*/ const char* what() const throw();\n};\n\nnamespace internal {\n//! Obsolete\nvoid __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4();\n\nenum exception_id {\n    eid_bad_alloc = 1,\n    eid_bad_last_alloc,\n    eid_nonpositive_step,\n    eid_out_of_range,\n    eid_segment_range_error,\n    eid_index_range_error,\n    eid_missing_wait,\n    eid_invalid_multiple_scheduling,\n    eid_improper_lock,\n    eid_possible_deadlock,\n    eid_operation_not_permitted,\n    eid_condvar_wait_failed,\n    eid_invalid_load_factor,\n    eid_reserved, // free slot for backward compatibility, can be reused.\n    eid_invalid_swap,\n    eid_reservation_length_error,\n    eid_invalid_key,\n    eid_user_abort,\n    eid_reserved1,\n#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE\n    // This id is used only inside library and only for support of CPF functionality.\n    // So, if we drop the functionality, eid_reserved1 can be safely renamed and reused.\n    eid_blocking_sch_init = eid_reserved1,\n#endif\n    eid_bad_tagged_msg_cast,\n    //! The last enumerator tracks the number of defined IDs. It must remain the last one.\n    /** When adding new IDs, place them immediately _before_ this comment (that is\n        _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/\n    eid_max\n};\n\n//! Gathers all throw operators in one place.\n/** Its purpose is to minimize code bloat that can be caused by throw operators\n    scattered in multiple places, especially in templates. **/\nvoid __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id );\n\n//! Versionless convenience wrapper for throw_exception_v4()\ninline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); }\n\n} // namespace internal\n} // namespace tbb\n\n#if __TBB_TASK_GROUP_CONTEXT\n#include \"tbb_allocator.h\"\n#include <typeinfo> //for typeid\n\nnamespace tbb {\n\n//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.\n/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted\n    by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in\n    the root thread. The root thread is the thread that has started the outermost algorithm\n    or root task sharing the same task_group_context with the guilty algorithm/task (the one\n    that threw the exception first).\n\n    Note: when documentation mentions workers with respect to exception handling,\n    masters are implied as well, because they are completely equivalent in this context.\n    Consequently a root thread can be master or worker thread.\n\n    NOTE: In case of nested algorithms or complex task hierarchies when the nested\n    levels share (explicitly or by means of implicit inheritance) the task group\n    context of the outermost level, the exception may be (re-)thrown multiple times\n    (ultimately - in each worker on each nesting level) before reaching the root\n    thread at the outermost level. IMPORTANT: if you intercept an exception derived\n    from this class on a nested level, you must re-throw it in the catch block by means\n    of the \"throw;\" operator.\n\n    TBB provides two implementations of this interface: tbb::captured_exception and\n    template class tbb::movable_exception. See their declarations for more info. **/\nclass tbb_exception : public std::exception\n{\n    /** No operator new is provided because the TBB usage model assumes dynamic\n        creation of the TBB exception objects only by means of applying move()\n        operation on an exception thrown out of TBB scheduler. **/\n    void* operator new ( size_t );\n\npublic:\n#if __clang__\n    // At -O3 or even -O2 optimization level, Clang may fully throw away an empty destructor\n    // of tbb_exception from destructors of derived classes. As a result, it does not create\n    // vtable for tbb_exception, which is a required part of TBB binary interface.\n    // Making the destructor non-empty (with just a semicolon) prevents that optimization.\n    ~tbb_exception() throw() { /* keep the semicolon! */ ; }\n#endif\n\n    //! Creates and returns pointer to the deep copy of this exception object.\n    /** Move semantics is allowed. **/\n    virtual tbb_exception* move () throw() = 0;\n\n    //! Destroys objects created by the move() method.\n    /** Frees memory and calls destructor for this exception object.\n        Can and must be used only on objects created by the move method. **/\n    virtual void destroy () throw() = 0;\n\n    //! Throws this exception object.\n    /** Make sure that if you have several levels of derivation from this interface\n        you implement or override this method on the most derived level. The implementation\n        is as simple as \"throw *this;\". Failure to do this will result in exception\n        of a base class type being thrown. **/\n    virtual void throw_self () = 0;\n\n    //! Returns RTTI name of the originally intercepted exception\n    virtual const char* name() const throw() = 0;\n\n    //! Returns the result of originally intercepted exception's what() method.\n    virtual const char* what() const throw() = 0;\n\n    /** Operator delete is provided only to allow using existing smart pointers\n        with TBB exception objects obtained as the result of applying move()\n        operation on an exception thrown out of TBB scheduler.\n\n        When overriding method move() make sure to override operator delete as well\n        if memory is allocated not by TBB's scalable allocator. **/\n    void operator delete ( void* p ) {\n        internal::deallocate_via_handler_v3(p);\n    }\n};\n\n//! This class is used by TBB to propagate information about unhandled exceptions into the root thread.\n/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel\n    algorithm ) if an unhandled exception was intercepted during the algorithm execution in one\n    of the workers.\n    \\sa tbb::tbb_exception **/\nclass captured_exception : public tbb_exception\n{\npublic:\n    captured_exception ( const captured_exception& src )\n        : tbb_exception(src), my_dynamic(false)\n    {\n        set(src.my_exception_name, src.my_exception_info);\n    }\n\n    captured_exception ( const char* name_, const char* info )\n        : my_dynamic(false)\n    {\n        set(name_, info);\n    }\n\n    __TBB_EXPORTED_METHOD ~captured_exception () throw();\n\n    captured_exception& operator= ( const captured_exception& src ) {\n        if ( this != &src ) {\n            clear();\n            set(src.my_exception_name, src.my_exception_info);\n        }\n        return *this;\n    }\n\n    /*override*/\n    captured_exception* __TBB_EXPORTED_METHOD move () throw();\n\n    /*override*/\n    void __TBB_EXPORTED_METHOD destroy () throw();\n\n    /*override*/\n    void throw_self () { __TBB_THROW(*this); }\n\n    /*override*/\n    const char* __TBB_EXPORTED_METHOD name() const throw();\n\n    /*override*/\n    const char* __TBB_EXPORTED_METHOD what() const throw();\n\n    void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw();\n    void __TBB_EXPORTED_METHOD clear () throw();\n\nprivate:\n    //! Used only by method clone().\n    captured_exception() {}\n\n    //! Functionally equivalent to {captured_exception e(name,info); return e.clone();}\n    static captured_exception* allocate ( const char* name, const char* info );\n\n    bool my_dynamic;\n    const char* my_exception_name;\n    const char* my_exception_info;\n};\n\n//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread\n/** Code using TBB can instantiate this template with an arbitrary ExceptionData type\n    and throw this exception object. Such exceptions are intercepted by the TBB scheduler\n    and delivered to the root thread ().\n    \\sa tbb::tbb_exception **/\ntemplate<typename ExceptionData>\nclass movable_exception : public tbb_exception\n{\n    typedef movable_exception<ExceptionData> self_type;\n\npublic:\n    movable_exception ( const ExceptionData& data_ )\n        : my_exception_data(data_)\n        , my_dynamic(false)\n        , my_exception_name(\n#if TBB_USE_EXCEPTIONS\n        typeid(self_type).name()\n#else /* !TBB_USE_EXCEPTIONS */\n        \"movable_exception\"\n#endif /* !TBB_USE_EXCEPTIONS */\n        )\n    {}\n\n    movable_exception ( const movable_exception& src ) throw ()\n        : tbb_exception(src)\n        , my_exception_data(src.my_exception_data)\n        , my_dynamic(false)\n        , my_exception_name(src.my_exception_name)\n    {}\n\n    ~movable_exception () throw() {}\n\n    const movable_exception& operator= ( const movable_exception& src ) {\n        if ( this != &src ) {\n            my_exception_data = src.my_exception_data;\n            my_exception_name = src.my_exception_name;\n        }\n        return *this;\n    }\n\n    ExceptionData& data () throw() { return my_exception_data; }\n\n    const ExceptionData& data () const throw() { return my_exception_data; }\n\n    /*override*/ const char* name () const throw() { return my_exception_name; }\n\n    /*override*/ const char* what () const throw() { return \"tbb::movable_exception\"; }\n\n    /*override*/\n    movable_exception* move () throw() {\n        void* e = internal::allocate_via_handler_v3(sizeof(movable_exception));\n        if ( e ) {\n            ::new (e) movable_exception(*this);\n            ((movable_exception*)e)->my_dynamic = true;\n        }\n        return (movable_exception*)e;\n    }\n    /*override*/\n    void destroy () throw() {\n        __TBB_ASSERT ( my_dynamic, \"Method destroy can be called only on dynamically allocated movable_exceptions\" );\n        if ( my_dynamic ) {\n            this->~movable_exception();\n            internal::deallocate_via_handler_v3(this);\n        }\n    }\n    /*override*/\n    void throw_self () { __TBB_THROW( *this ); }\n\nprotected:\n    //! User data\n    ExceptionData  my_exception_data;\n\nprivate:\n    //! Flag specifying whether this object has been dynamically allocated (by the move method)\n    bool my_dynamic;\n\n    //! RTTI name of this class\n    /** We rely on the fact that RTTI names are static string constants. **/\n    const char* my_exception_name;\n};\n\n#if !TBB_USE_CAPTURED_EXCEPTION\nnamespace internal {\n\n//! Exception container that preserves the exact copy of the original exception\n/** This class can be used only when the appropriate runtime support (mandated\n    by C++0x) is present **/\nclass tbb_exception_ptr {\n    std::exception_ptr  my_ptr;\n\npublic:\n    static tbb_exception_ptr* allocate ();\n    static tbb_exception_ptr* allocate ( const tbb_exception& tag );\n    //! This overload uses move semantics (i.e. it empties src)\n    static tbb_exception_ptr* allocate ( captured_exception& src );\n\n    //! Destroys this objects\n    /** Note that objects of this type can be created only by the allocate() method. **/\n    void destroy () throw();\n\n    //! Throws the contained exception .\n    void throw_self () { std::rethrow_exception(my_ptr); }\n\nprivate:\n    tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {}\n    tbb_exception_ptr ( const captured_exception& src ) :\n        #if __TBB_MAKE_EXCEPTION_PTR_PRESENT\n            my_ptr(std::make_exception_ptr(src))  // the final function name in C++11\n        #else\n            my_ptr(std::copy_exception(src))      // early C++0x drafts name\n        #endif\n    {}\n}; // class tbb::internal::tbb_exception_ptr\n\n} // namespace internal\n#endif /* !TBB_USE_CAPTURED_EXCEPTION */\n\n} // namespace tbb\n\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n#endif /* __TBB_exception_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_machine.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_machine_H\n#define __TBB_machine_H\n\n/** This header provides basic platform abstraction layer by hooking up appropriate\n    architecture/OS/compiler specific headers from the /include/tbb/machine directory.\n    If a plug-in header does not implement all the required APIs, it must specify\n    the missing ones by setting one or more of the following macros:\n\n    __TBB_USE_GENERIC_PART_WORD_CAS\n    __TBB_USE_GENERIC_PART_WORD_FETCH_ADD\n    __TBB_USE_GENERIC_PART_WORD_FETCH_STORE\n    __TBB_USE_GENERIC_FETCH_ADD\n    __TBB_USE_GENERIC_FETCH_STORE\n    __TBB_USE_GENERIC_DWORD_FETCH_ADD\n    __TBB_USE_GENERIC_DWORD_FETCH_STORE\n    __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE\n    __TBB_USE_GENERIC_FULL_FENCED_LOAD_STORE\n    __TBB_USE_GENERIC_RELAXED_LOAD_STORE\n    __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE\n\n    In this case tbb_machine.h will add missing functionality based on a minimal set \n    of APIs that are required to be implemented by all plug-n headers as described\n    further.\n    Note that these generic implementations may be sub-optimal for a particular\n    architecture, and thus should be relied upon only after careful evaluation\n    or as the last resort.\n\n    Additionally __TBB_64BIT_ATOMICS can be set to 0 on a 32-bit architecture to\n    indicate that the port is not going to support double word atomics. It may also\n    be set to 1 explicitly, though normally this is not necessary as tbb_machine.h\n    will set it automatically.\n\n    __TBB_ENDIANNESS macro can be defined by the implementation as well.\n    It is used only if __TBB_USE_GENERIC_PART_WORD_CAS is set (or for testing),\n    and must specify the layout of aligned 16-bit and 32-bit data anywhere within a process\n    (while the details of unaligned 16-bit or 32-bit data or of 64-bit data are irrelevant).\n    The layout must be the same at all relevant memory locations within the current process;\n    in case of page-specific endianness, one endianness must be kept \"out of sight\".\n    Possible settings, reflecting hardware and possibly O.S. convention, are:\n    -  __TBB_ENDIAN_BIG for big-endian data,\n    -  __TBB_ENDIAN_LITTLE for little-endian data,\n    -  __TBB_ENDIAN_DETECT for run-time detection iff exactly one of the above,\n    -  __TBB_ENDIAN_UNSUPPORTED to prevent undefined behavior if none of the above.\n\n    Prerequisites for each architecture port\n    ----------------------------------------\n    The following functions and macros have no generic implementation. Therefore they must be\n    implemented in each machine architecture specific header either as a conventional\n    function or as a functional macro.\n\n    __TBB_WORDSIZE\n        This is the size of machine word in bytes, i.e. for 32 bit systems it\n        should be defined to 4.\n\n    __TBB_Yield()\n        Signals OS that the current thread is willing to relinquish the remainder\n        of its time quantum.\n\n    __TBB_full_memory_fence()\n        Must prevent all memory operations from being reordered across it (both\n        by hardware and compiler). All such fences must be totally ordered (or\n        sequentially consistent).\n\n    __TBB_machine_cmpswp4( volatile void *ptr, int32_t value, int32_t comparand )\n        Must be provided if __TBB_USE_FENCED_ATOMICS is not set.\n\n    __TBB_machine_cmpswp8( volatile void *ptr, int32_t value, int64_t comparand )\n        Must be provided for 64-bit architectures if __TBB_USE_FENCED_ATOMICS is not set,\n        and for 32-bit architectures if __TBB_64BIT_ATOMICS is set\n\n    __TBB_machine_<op><S><fence>(...), where\n        <op> = {cmpswp, fetchadd, fetchstore}\n        <S> = {1, 2, 4, 8}\n        <fence> = {full_fence, acquire, release, relaxed}\n        Must be provided if __TBB_USE_FENCED_ATOMICS is set.\n\n    __TBB_control_consistency_helper()\n        Bridges the memory-semantics gap between architectures providing only\n        implicit C++0x \"consume\" semantics (like Power Architecture) and those\n        also implicitly obeying control dependencies (like IA-64 architecture).\n        It must be used only in conditional code where the condition is itself\n        data-dependent, and will then make subsequent code behave as if the\n        original data dependency were acquired.\n        It needs only a compiler fence where implied by the architecture\n        either specifically (like IA-64 architecture) or because generally stronger \n        \"acquire\" semantics are enforced (like x86).\n        It is always valid, though potentially suboptimal, to replace\n        control with acquire on the load and then remove the helper.\n\n    __TBB_acquire_consistency_helper(), __TBB_release_consistency_helper()\n        Must be provided if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE is set.\n        Enforce acquire and release semantics in generic implementations of fenced\n        store and load operations. Depending on the particular architecture/compiler\n        combination they may be a hardware fence, a compiler fence, both or nothing.\n **/\n\n#include \"tbb_stddef.h\"\n\nnamespace tbb {\nnamespace internal { //< @cond INTERNAL\n\n////////////////////////////////////////////////////////////////////////////////\n// Overridable helpers declarations\n//\n// A machine/*.h file may choose to define these templates, otherwise it must\n// request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s).\n//\ntemplate <typename T, std::size_t S>\nstruct machine_load_store;\n\ntemplate <typename T, std::size_t S>\nstruct machine_load_store_relaxed;\n\ntemplate <typename T, std::size_t S>\nstruct machine_load_store_seq_cst;\n//\n// End of overridable helpers declarations\n////////////////////////////////////////////////////////////////////////////////\n\ntemplate<size_t S> struct atomic_selector;\n\ntemplate<> struct atomic_selector<1> {\n    typedef int8_t word;\n    inline static word fetch_store ( volatile void* location, word value );\n};\n\ntemplate<> struct atomic_selector<2> {\n    typedef int16_t word;\n    inline static word fetch_store ( volatile void* location, word value );\n};\n\ntemplate<> struct atomic_selector<4> {\n#if _MSC_VER && !_WIN64\n    // Work-around that avoids spurious /Wp64 warnings\n    typedef intptr_t word;\n#else\n    typedef int32_t word;\n#endif\n    inline static word fetch_store ( volatile void* location, word value );\n};\n\ntemplate<> struct atomic_selector<8> {\n    typedef int64_t word;\n    inline static word fetch_store ( volatile void* location, word value );\n};\n\n}} //< namespaces internal @endcond, tbb\n\n#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M)                                        \\\n    inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) {         \\\n        for(;;) {                                                                            \\\n            int64_t result = *(volatile int64_t *)ptr;                                       \\\n            if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break;                  \\\n        }                                                                                    \\\n    }                                                                                        \\\n\n#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M)                                         \\\n    inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) {                \\\n        /* Comparand and new value may be anything, they only must be equal, and      */     \\\n        /* the value should have a low probability to be actually found in 'location'.*/     \\\n        const int64_t anyvalue = 2305843009213693951LL;                                      \\\n        return __TBB_machine_cmpswp8##M(const_cast<volatile void *>(ptr),anyvalue,anyvalue); \\\n    }                                                                                        \\\n\n// The set of allowed values for __TBB_ENDIANNESS (see above for details)\n#define __TBB_ENDIAN_UNSUPPORTED -1\n#define __TBB_ENDIAN_LITTLE       0\n#define __TBB_ENDIAN_BIG          1\n#define __TBB_ENDIAN_DETECT       2\n\n#if _WIN32||_WIN64\n\n#ifdef _MANAGED\n#pragma managed(push, off)\n#endif\n\n    #if __MINGW64__ || __MINGW32__\n        extern \"C\" __declspec(dllimport) int __stdcall SwitchToThread( void );\n        #define __TBB_Yield()  SwitchToThread()\n        #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)\n            #include \"machine/gcc_generic.h\"\n        #elif __MINGW64__\n            #include \"machine/linux_intel64.h\"\n        #elif __MINGW32__\n            #include \"machine/linux_ia32.h\"\n        #endif\n    #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)\n        #include \"machine/icc_generic.h\"\n    #elif defined(_M_IX86) && !defined(__TBB_WIN32_USE_CL_BUILTINS)\n        #include \"machine/windows_ia32.h\"\n    #elif defined(_M_X64) \n        #include \"machine/windows_intel64.h\"\n    #elif defined(_XBOX)\n        #include \"machine/xbox360_ppc.h\"\n    #elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS)\n        #include \"machine/msvc_armv7.h\"\n    #endif\n\n#ifdef _MANAGED\n#pragma managed(pop)\n#endif\n\n#elif __TBB_DEFINE_MIC\n\n    #include \"machine/mic_common.h\"\n    #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)\n        #include \"machine/icc_generic.h\"\n    #else\n        #include \"machine/linux_intel64.h\"\n    #endif\n\n#elif __linux__ || __FreeBSD__ || __NetBSD__\n\n    #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)\n        #include \"machine/gcc_generic.h\"\n    #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)\n        #include \"machine/icc_generic.h\"\n    #elif __i386__\n        #include \"machine/linux_ia32.h\"\n    #elif __x86_64__\n        #include \"machine/linux_intel64.h\"\n    #elif __ia64__\n        #include \"machine/linux_ia64.h\"\n    #elif __powerpc__\n        #include \"machine/mac_ppc.h\"\n    #elif __arm__\n        #include \"machine/gcc_armv7.h\"\n    #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT\n        #include \"machine/gcc_generic.h\"\n    #endif\n    #include \"machine/linux_common.h\"\n\n#elif __APPLE__\n    //TODO:  TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix\n    #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)\n        #include \"machine/icc_generic.h\"\n    #elif __i386__\n        #include \"machine/linux_ia32.h\"\n    #elif __x86_64__\n        #include \"machine/linux_intel64.h\"\n    #elif __POWERPC__\n        #include \"machine/mac_ppc.h\"\n    #endif\n    #include \"machine/macos_common.h\"\n\n#elif _AIX\n\n    #include \"machine/ibm_aix51.h\"\n\n#elif __sun || __SUNPRO_CC\n\n    #define __asm__ asm\n    #define __volatile__ volatile\n\n    #if __i386  || __i386__\n        #include \"machine/linux_ia32.h\"\n    #elif __x86_64__\n        #include \"machine/linux_intel64.h\"\n    #elif __sparc\n        #include \"machine/sunos_sparc.h\"\n    #endif\n    #include <sched.h>\n\n    #define __TBB_Yield() sched_yield()\n\n#endif /* OS selection */\n\n#ifndef __TBB_64BIT_ATOMICS\n    #define __TBB_64BIT_ATOMICS 1\n#endif\n\n//TODO: replace usage of these functions with usage of tbb::atomic, and then remove them\n//TODO: map functions with W suffix to use cast to tbb::atomic and according op, i.e. as_atomic().op()\n// Special atomic functions\n#if __TBB_USE_FENCED_ATOMICS\n    #define __TBB_machine_cmpswp1   __TBB_machine_cmpswp1full_fence\n    #define __TBB_machine_cmpswp2   __TBB_machine_cmpswp2full_fence\n    #define __TBB_machine_cmpswp4   __TBB_machine_cmpswp4full_fence\n    #define __TBB_machine_cmpswp8   __TBB_machine_cmpswp8full_fence\n\n    #if __TBB_WORDSIZE==8\n        #define __TBB_machine_fetchadd8             __TBB_machine_fetchadd8full_fence\n        #define __TBB_machine_fetchstore8           __TBB_machine_fetchstore8full_fence\n        #define __TBB_FetchAndAddWrelease(P,V)      __TBB_machine_fetchadd8release(P,V)\n        #define __TBB_FetchAndIncrementWacquire(P)  __TBB_machine_fetchadd8acquire(P,1)\n        #define __TBB_FetchAndDecrementWrelease(P)  __TBB_machine_fetchadd8release(P,(-1))\n    #else\n        #define __TBB_machine_fetchadd4             __TBB_machine_fetchadd4full_fence\n        #define __TBB_machine_fetchstore4           __TBB_machine_fetchstore4full_fence\n        #define __TBB_FetchAndAddWrelease(P,V)      __TBB_machine_fetchadd4release(P,V)\n        #define __TBB_FetchAndIncrementWacquire(P)  __TBB_machine_fetchadd4acquire(P,1)\n        #define __TBB_FetchAndDecrementWrelease(P)  __TBB_machine_fetchadd4release(P,(-1))\n    #endif /* __TBB_WORDSIZE==4 */\n#else /* !__TBB_USE_FENCED_ATOMICS */\n    #define __TBB_FetchAndAddWrelease(P,V)      __TBB_FetchAndAddW(P,V)\n    #define __TBB_FetchAndIncrementWacquire(P)  __TBB_FetchAndAddW(P,1)\n    #define __TBB_FetchAndDecrementWrelease(P)  __TBB_FetchAndAddW(P,(-1))\n#endif /* !__TBB_USE_FENCED_ATOMICS */\n\n#if __TBB_WORDSIZE==4\n    #define __TBB_CompareAndSwapW(P,V,C)    __TBB_machine_cmpswp4(P,V,C)\n    #define __TBB_FetchAndAddW(P,V)         __TBB_machine_fetchadd4(P,V)\n    #define __TBB_FetchAndStoreW(P,V)       __TBB_machine_fetchstore4(P,V)\n#elif  __TBB_WORDSIZE==8\n    #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE\n        #error These macros should only be used on 32-bit platforms.\n    #endif\n\n    #define __TBB_CompareAndSwapW(P,V,C)    __TBB_machine_cmpswp8(P,V,C)\n    #define __TBB_FetchAndAddW(P,V)         __TBB_machine_fetchadd8(P,V)\n    #define __TBB_FetchAndStoreW(P,V)       __TBB_machine_fetchstore8(P,V)\n#else /* __TBB_WORDSIZE != 8 */\n    #error Unsupported machine word size.\n#endif /* __TBB_WORDSIZE */\n\n#ifndef __TBB_Pause\n    inline void __TBB_Pause(int32_t) {\n        __TBB_Yield();\n    }\n#endif\n\nnamespace tbb {\n\n//! Sequentially consistent full memory fence.\ninline void atomic_fence () { __TBB_full_memory_fence(); }\n\nnamespace internal { //< @cond INTERNAL\n\n//! Class that implements exponential backoff.\n/** See implementation of spin_wait_while_eq for an example. */\nclass atomic_backoff : no_copy {\n    //! Time delay, in units of \"pause\" instructions.\n    /** Should be equal to approximately the number of \"pause\" instructions\n        that take the same time as an context switch. */\n    static const int32_t LOOPS_BEFORE_YIELD = 16;\n    int32_t count;\npublic:\n    // In many cases, an object of this type is initialized eagerly on hot path,\n    // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ }\n    // For this reason, the construction cost must be very small!\n    atomic_backoff() : count(1) {}\n    // This constructor pauses immediately; do not use on hot paths!\n    atomic_backoff( bool ) : count(1) { pause(); }\n\n    //! Pause for a while.\n    void pause() {\n        if( count<=LOOPS_BEFORE_YIELD ) {\n            __TBB_Pause(count);\n            // Pause twice as long the next time.\n            count*=2;\n        } else {\n            // Pause is so long that we might as well yield CPU to scheduler.\n            __TBB_Yield();\n        }\n    }\n\n    // pause for a few times and then return false immediately.\n    bool bounded_pause() {\n        if( count<=LOOPS_BEFORE_YIELD ) {\n            __TBB_Pause(count);\n            // Pause twice as long the next time.\n            count*=2;\n            return true;\n        } else {\n            return false;\n        }\n    }\n\n    void reset() {\n        count = 1;\n    }\n};\n\n//! Spin WHILE the value of the variable is equal to a given value\n/** T and U should be comparable types. */\ntemplate<typename T, typename U>\nvoid spin_wait_while_eq( const volatile T& location, U value ) {\n    atomic_backoff backoff;\n    while( location==value ) backoff.pause();\n}\n\n//! Spin UNTIL the value of the variable is equal to a given value\n/** T and U should be comparable types. */\ntemplate<typename T, typename U>\nvoid spin_wait_until_eq( const volatile T& location, const U value ) {\n    atomic_backoff backoff;\n    while( location!=value ) backoff.pause();\n}\n\ntemplate <typename predicate_type>\nvoid spin_wait_while(predicate_type condition){\n    atomic_backoff backoff;\n    while( condition() ) backoff.pause();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Generic compare-and-swap applied to only a part of a machine word.\n//\n#ifndef __TBB_ENDIANNESS\n#define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT\n#endif\n\n#if __TBB_USE_GENERIC_PART_WORD_CAS && __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED\n#error Generic implementation of part-word CAS may not be used with __TBB_ENDIAN_UNSUPPORTED\n#endif\n\n#if __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED\n//\n// This function is the only use of __TBB_ENDIANNESS.\n// The following restrictions/limitations apply for this operation:\n//  - T must be an integer type of at most 4 bytes for the casts and calculations to work\n//  - T must also be less than 4 bytes to avoid compiler warnings when computing mask\n//      (and for the operation to be useful at all, so no workaround is applied)\n//  - the architecture must consistently use either little-endian or big-endian (same for all locations)\n//\n// TODO: static_assert for the type requirements stated above\ntemplate<typename T>\ninline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value, const T comparand ) {\n    struct endianness{ static bool is_big_endian(){\n        #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT\n            const uint32_t probe = 0x03020100;\n            return (((const char*)(&probe))[0]==0x03);\n        #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG || __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE\n            return __TBB_ENDIANNESS==__TBB_ENDIAN_BIG;\n        #else\n            #error Unexpected value of __TBB_ENDIANNESS\n        #endif\n    }};\n\n    const uint32_t byte_offset            = (uint32_t) ((uintptr_t)ptr & 0x3);\n    volatile uint32_t * const aligned_ptr = (uint32_t*)((uintptr_t)ptr - byte_offset );\n\n    // location of T within uint32_t for a C++ shift operation\n    const uint32_t bits_to_shift     = 8*(endianness::is_big_endian() ? (4 - sizeof(T) - (byte_offset)) : byte_offset);\n    const uint32_t mask              = (((uint32_t)1<<(sizeof(T)*8)) - 1 )<<bits_to_shift;\n    // for signed T, any sign extension bits in cast value/comparand are immediately clipped by mask\n    const uint32_t shifted_comparand = ((uint32_t)comparand << bits_to_shift)&mask;\n    const uint32_t shifted_value     = ((uint32_t)value     << bits_to_shift)&mask;\n\n    for( atomic_backoff b;;b.pause() ) {\n        const uint32_t surroundings  = *aligned_ptr & ~mask ; // may have changed during the pause\n        const uint32_t big_comparand = surroundings | shifted_comparand ;\n        const uint32_t big_value     = surroundings | shifted_value     ;\n        // __TBB_machine_cmpswp4 presumed to have full fence.\n        // Cast shuts up /Wp64 warning\n        const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligned_ptr, big_value, big_comparand );\n        if( big_result == big_comparand                    // CAS succeeded\n          || ((big_result ^ big_comparand) & mask) != 0)   // CAS failed and the bits of interest have changed\n        {\n            return T((big_result & mask) >> bits_to_shift);\n        }\n        else continue;                                     // CAS failed but the bits of interest were not changed\n    }\n}\n#endif // __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED\n////////////////////////////////////////////////////////////////////////////////\n\ntemplate<size_t S, typename T>\ninline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand );\n\ntemplate<>\ninline int8_t __TBB_CompareAndSwapGeneric <1,int8_t> (volatile void *ptr, int8_t value, int8_t comparand ) {\n#if __TBB_USE_GENERIC_PART_WORD_CAS\n    return __TBB_MaskedCompareAndSwap<int8_t>((volatile int8_t *)ptr,value,comparand);\n#else\n    return __TBB_machine_cmpswp1(ptr,value,comparand);\n#endif\n}\n\ntemplate<>\ninline int16_t __TBB_CompareAndSwapGeneric <2,int16_t> (volatile void *ptr, int16_t value, int16_t comparand ) {\n#if __TBB_USE_GENERIC_PART_WORD_CAS\n    return __TBB_MaskedCompareAndSwap<int16_t>((volatile int16_t *)ptr,value,comparand);\n#else\n    return __TBB_machine_cmpswp2(ptr,value,comparand);\n#endif\n}\n\ntemplate<>\ninline int32_t __TBB_CompareAndSwapGeneric <4,int32_t> (volatile void *ptr, int32_t value, int32_t comparand ) {\n    // Cast shuts up /Wp64 warning\n    return (int32_t)__TBB_machine_cmpswp4(ptr,value,comparand);\n}\n\n#if __TBB_64BIT_ATOMICS\ntemplate<>\ninline int64_t __TBB_CompareAndSwapGeneric <8,int64_t> (volatile void *ptr, int64_t value, int64_t comparand ) {\n    return __TBB_machine_cmpswp8(ptr,value,comparand);\n}\n#endif\n\ntemplate<size_t S, typename T>\ninline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) {\n    T result;\n    for( atomic_backoff b;;b.pause() ) {\n        result = *reinterpret_cast<volatile T *>(ptr);\n        // __TBB_CompareAndSwapGeneric presumed to have full fence.\n        if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, result+addend, result )==result )\n            break;\n    }\n    return result;\n}\n\ntemplate<size_t S, typename T>\ninline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) {\n    T result;\n    for( atomic_backoff b;;b.pause() ) {\n        result = *reinterpret_cast<volatile T *>(ptr);\n        // __TBB_CompareAndSwapGeneric presumed to have full fence.\n        if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, value, result )==result )\n            break;\n    }\n    return result;\n}\n\n#if __TBB_USE_GENERIC_PART_WORD_CAS\n#define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,int8_t>\n#define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,int16_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD\n#define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,int8_t>\n#define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,int16_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_ADD\n#define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,int32_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD\n#define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,int64_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE\n#define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,int8_t>\n#define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,int16_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_STORE\n#define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,int32_t>\n#endif\n\n#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE\n#define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,int64_t>\n#endif\n\n#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE\n#define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S)                                             \\\n    atomic_selector<S>::word atomic_selector<S>::fetch_store ( volatile void* location, word value ) {  \\\n        return __TBB_machine_fetchstore##S( location, value );                                          \\\n    }\n\n__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1)\n__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2)\n__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4)\n__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8)\n\n#undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE\n#endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */\n\n#if __TBB_USE_GENERIC_DWORD_LOAD_STORE\n/*TODO: find a more elegant way to handle function names difference*/\n#if ! __TBB_USE_FENCED_ATOMICS\n    /* This name forwarding is needed for generic implementation of\n     * load8/store8 defined below (via macro) to pick the right CAS function*/\n    #define   __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8\n#endif\n__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence)\n__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence)\n\n#if ! __TBB_USE_FENCED_ATOMICS\n    #undef   __TBB_machine_cmpswp8full_fence\n#endif\n\n#define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8full_fence\n#define __TBB_machine_load8  tbb::internal::__TBB_machine_generic_load8full_fence\n#endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */\n\n#if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE\n/** Fenced operations use volatile qualifier to prevent compiler from optimizing\n    them out, and on architectures with weak memory ordering to induce compiler\n    to generate code with appropriate acquire/release semantics.\n    On architectures like IA32, Intel64 (and likely Sparc TSO) volatile has\n    no effect on code gen, and consistency helpers serve as a compiler fence (the\n    latter being true for IA64/gcc as well to fix a bug in some gcc versions).\n    This code assumes that the generated instructions will operate atomically,\n    which typically requires a type that can be moved in a single instruction,\n    cooperation from the compiler for effective use of such an instruction,\n    and appropriate alignment of the data. **/\ntemplate <typename T, size_t S>\nstruct machine_load_store {\n    static T load_with_acquire ( const volatile T& location ) {\n        T to_return = location;\n        __TBB_acquire_consistency_helper();\n        return to_return;\n    }\n    static void store_with_release ( volatile T &location, T value ) {\n        __TBB_release_consistency_helper();\n        location = value;\n    }\n};\n\n//in general, plain load and store of 32bit compiler is not atomic for 64bit types\n#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS\ntemplate <typename T>\nstruct machine_load_store<T,8> {\n    static T load_with_acquire ( const volatile T& location ) {\n        return (T)__TBB_machine_load8( (const volatile void*)&location );\n    }\n    static void store_with_release ( volatile T& location, T value ) {\n        __TBB_machine_store8( (volatile void*)&location, (int64_t)value );\n    }\n};\n#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */\n#endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */\n\n#if __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE\ntemplate <typename T, size_t S>\nstruct machine_load_store_seq_cst {\n    static T load ( const volatile T& location ) {\n        __TBB_full_memory_fence();\n        return machine_load_store<T,S>::load_with_acquire( location );\n    }\n#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE\n    static void store ( volatile T &location, T value ) {\n        atomic_selector<S>::fetch_store( (volatile void*)&location, (typename atomic_selector<S>::word)value );\n    }\n#else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */\n    static void store ( volatile T &location, T value ) {\n        machine_load_store<T,S>::store_with_release( location, value );\n        __TBB_full_memory_fence();\n    }\n#endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */\n};\n\n#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS\n/** The implementation does not use functions __TBB_machine_load8/store8 as they\n    are not required to be sequentially consistent. **/\ntemplate <typename T>\nstruct machine_load_store_seq_cst<T,8> {\n    static T load ( const volatile T& location ) {\n        // Comparand and new value may be anything, they only must be equal, and\n        // the value should have a low probability to be actually found in 'location'.\n        const int64_t anyvalue = 2305843009213693951LL;\n        return __TBB_machine_cmpswp8( (volatile void*)const_cast<volatile T*>(&location), anyvalue, anyvalue );\n    }\n    static void store ( volatile T &location, T value ) {\n        int64_t result = (volatile int64_t&)location;\n        while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result )\n            result = (volatile int64_t&)location;\n    }\n};\n#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */\n#endif /*__TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE */\n\n#if __TBB_USE_GENERIC_RELAXED_LOAD_STORE\n// Relaxed operations add volatile qualifier to prevent compiler from optimizing them out.\n/** Volatile should not incur any additional cost on IA32, Intel64, and Sparc TSO\n    architectures. However on architectures with weak memory ordering compiler may\n    generate code with acquire/release semantics for operations on volatile data. **/\ntemplate <typename T, size_t S>\nstruct machine_load_store_relaxed {\n    static inline T load ( const volatile T& location ) {\n        return location;\n    }\n    static inline void store ( volatile T& location, T value ) {\n        location = value;\n    }\n};\n\n#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS\ntemplate <typename T>\nstruct machine_load_store_relaxed<T,8> {\n    static inline T load ( const volatile T& location ) {\n        return (T)__TBB_machine_load8( (const volatile void*)&location );\n    }\n    static inline void store ( volatile T& location, T value ) {\n        __TBB_machine_store8( (volatile void*)&location, (int64_t)value );\n    }\n};\n#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */\n#endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */\n\n#undef __TBB_WORDSIZE //this macro is forbidden to use outside of atomic machinery\n\ntemplate<typename T>\ninline T __TBB_load_with_acquire(const volatile T &location) {\n    return machine_load_store<T,sizeof(T)>::load_with_acquire( location );\n}\ntemplate<typename T, typename V>\ninline void __TBB_store_with_release(volatile T& location, V value) {\n    machine_load_store<T,sizeof(T)>::store_with_release( location, T(value) );\n}\n//! Overload that exists solely to avoid /Wp64 warnings.\ninline void __TBB_store_with_release(volatile size_t& location, size_t value) {\n    machine_load_store<size_t,sizeof(size_t)>::store_with_release( location, value );\n}\n\ntemplate<typename T>\ninline T __TBB_load_full_fence(const volatile T &location) {\n    return machine_load_store_seq_cst<T,sizeof(T)>::load( location );\n}\ntemplate<typename T, typename V>\ninline void __TBB_store_full_fence(volatile T& location, V value) {\n    machine_load_store_seq_cst<T,sizeof(T)>::store( location, T(value) );\n}\n//! Overload that exists solely to avoid /Wp64 warnings.\ninline void __TBB_store_full_fence(volatile size_t& location, size_t value) {\n    machine_load_store_seq_cst<size_t,sizeof(size_t)>::store( location, value );\n}\n\ntemplate<typename T>\ninline T __TBB_load_relaxed (const volatile T& location) {\n    return machine_load_store_relaxed<T,sizeof(T)>::load( const_cast<T&>(location) );\n}\ntemplate<typename T, typename V>\ninline void __TBB_store_relaxed ( volatile T& location, V value ) {\n    machine_load_store_relaxed<T,sizeof(T)>::store( const_cast<T&>(location), T(value) );\n}\n//! Overload that exists solely to avoid /Wp64 warnings.\ninline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) {\n    machine_load_store_relaxed<size_t,sizeof(size_t)>::store( const_cast<size_t&>(location), value );\n}\n\n// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as\n// strict as type T.  The type should have a trivial default constructor and destructor, so that\n// arrays of that type can be declared without initializers.\n// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands\n// to a type bigger than T.\n// The default definition here works on machines where integers are naturally aligned and the\n// strictest alignment is 64.\n#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict\n\n#if __TBB_ATTRIBUTE_ALIGNED_PRESENT\n\n#define __TBB_DefineTypeWithAlignment(PowerOf2)       \\\nstruct __TBB_machine_type_with_alignment_##PowerOf2 { \\\n    uint32_t member[PowerOf2/sizeof(uint32_t)];       \\\n} __attribute__((aligned(PowerOf2)));\n#define __TBB_alignof(T) __alignof__(T)\n\n#elif __TBB_DECLSPEC_ALIGN_PRESENT\n\n#define __TBB_DefineTypeWithAlignment(PowerOf2)       \\\n__declspec(align(PowerOf2))                           \\\nstruct __TBB_machine_type_with_alignment_##PowerOf2 { \\\n    uint32_t member[PowerOf2/sizeof(uint32_t)];       \\\n};\n#define __TBB_alignof(T) __alignof(T)\n\n#else /* A compiler with unknown syntax for data alignment */\n#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T)\n#endif\n\n/* Now declare types aligned to useful powers of two */\n// TODO: Is __TBB_DefineTypeWithAlignment(8) needed on 32 bit platforms?\n__TBB_DefineTypeWithAlignment(16)\n__TBB_DefineTypeWithAlignment(32)\n__TBB_DefineTypeWithAlignment(64)\n\ntypedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment;\n\n// Primary template is a declaration of incomplete type so that it fails with unknown alignments\ntemplate<size_t N> struct type_with_alignment;\n\n// Specializations for allowed alignments\ntemplate<> struct type_with_alignment<1> { char member; };\ntemplate<> struct type_with_alignment<2> { uint16_t member; };\ntemplate<> struct type_with_alignment<4> { uint32_t member; };\ntemplate<> struct type_with_alignment<8> { uint64_t member; };\ntemplate<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; };\ntemplate<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; };\ntemplate<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; };\n\n#if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN\n//! Work around for bug in GNU 3.2 and MSVC compilers.\n/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated.\n    The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */\ntemplate<size_t Size, typename T>\nstruct work_around_alignment_bug {\n    static const size_t alignment = __TBB_alignof(T);\n};\n#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<tbb::internal::work_around_alignment_bug<sizeof(T),T>::alignment>\n#else\n#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)>\n#endif  /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */\n\n#endif  /* __TBB_TypeWithAlignmentAtLeastAsStrict */\n\n// Template class here is to avoid instantiation of the static data for modules that don't use it\ntemplate<typename T>\nstruct reverse {\n    static const T byte_table[256];\n};\n// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed\n// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost.\ntemplate<typename T>\nconst T reverse<T>::byte_table[256] = {\n    0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,\n    0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,\n    0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,\n    0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,\n    0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,\n    0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,\n    0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,\n    0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,\n    0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,\n    0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,\n    0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,\n    0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,\n    0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,\n    0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,\n    0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,\n    0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF\n};\n\n} // namespace internal @endcond\n} // namespace tbb\n\n// Preserving access to legacy APIs\nusing tbb::internal::__TBB_load_with_acquire;\nusing tbb::internal::__TBB_store_with_release;\n\n// Mapping historically used names to the ones expected by atomic_load_store_traits\n#define __TBB_load_acquire  __TBB_load_with_acquire\n#define __TBB_store_release __TBB_store_with_release\n\n#ifndef __TBB_Log2\ninline intptr_t __TBB_Log2( uintptr_t x ) {\n    if( x==0 ) return -1;\n    intptr_t result = 0;\n\n#if !defined(_M_ARM) \n    uintptr_t tmp;\n    if( sizeof(x)>4 && (tmp = ((uint64_t)x)>>32) ) { x=tmp; result += 32; }\n#endif\n    if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; }\n    if( uintptr_t tmp = x>>8 )  { x=tmp; result += 8; }\n    if( uintptr_t tmp = x>>4 )  { x=tmp; result += 4; }\n    if( uintptr_t tmp = x>>2 )  { x=tmp; result += 2; }\n\n    return (x&2)? result+1: result;\n}\n#endif\n\n#ifndef __TBB_AtomicOR\ninline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) {\n    for( tbb::internal::atomic_backoff b;;b.pause() ) {\n        uintptr_t tmp = *(volatile uintptr_t *)operand;\n        uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp);\n        if( result==tmp ) break;\n    }\n}\n#endif\n\n#ifndef __TBB_AtomicAND\ninline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) {\n    for( tbb::internal::atomic_backoff b;;b.pause() ) {\n        uintptr_t tmp = *(volatile uintptr_t *)operand;\n        uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp);\n        if( result==tmp ) break;\n    }\n}\n#endif\n\n#if __TBB_PREFETCHING\n#ifndef __TBB_cl_prefetch\n#error This platform does not define cache management primitives required for __TBB_PREFETCHING\n#endif\n\n#ifndef __TBB_cl_evict\n#define __TBB_cl_evict(p)\n#endif\n#endif\n\n#ifndef __TBB_Flag\ntypedef unsigned char __TBB_Flag;\n#endif\ntypedef __TBB_atomic __TBB_Flag __TBB_atomic_flag;\n\n#ifndef __TBB_TryLockByte\ninline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) {\n    return __TBB_machine_cmpswp1(&flag,1,0)==0;\n}\n#endif\n\n#ifndef __TBB_LockByte\ninline __TBB_Flag __TBB_LockByte( __TBB_atomic_flag& flag ) {\n    tbb::internal::atomic_backoff backoff;\n    while( !__TBB_TryLockByte(flag) ) backoff.pause();\n    return 0;\n}\n#endif\n\n#ifndef  __TBB_UnlockByte\n#define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0)\n#endif\n\n// lock primitives with TSX\n#if ( __TBB_x86_32 || __TBB_x86_64 )  /* only on ia32/intel64 */\ninline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided_cancel(); }\n\ninline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) {\n    bool res = __TBB_machine_try_lock_elided( &flag )!=0;\n    // to avoid the \"lemming\" effect, we need to abort the transaction\n    // if  __TBB_machine_try_lock_elided returns false (i.e., someone else\n    // has acquired the mutex non-speculatively).\n    if( !res ) __TBB_TryLockByteElidedCancel();\n    return res;\n}\n\ninline void __TBB_LockByteElided( __TBB_atomic_flag& flag )\n{\n    for(;;) {\n        tbb::internal::spin_wait_while_eq( flag, 1 );\n        if( __TBB_machine_try_lock_elided( &flag ) )\n            return;\n        // Another thread acquired the lock \"for real\".\n        // To avoid the \"lemming\" effect, we abort the transaction.\n        __TBB_TryLockByteElidedCancel();\n    }\n}\n\ninline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) {\n    __TBB_machine_unlock_elided( &flag );\n}\n#endif\n\n#ifndef __TBB_ReverseByte\ninline unsigned char __TBB_ReverseByte(unsigned char src) {\n    return tbb::internal::reverse<unsigned char>::byte_table[src];\n}\n#endif\n\ntemplate<typename T>\nT __TBB_ReverseBits(T src) {\n    T dst;\n    unsigned char *original = (unsigned char *) &src;\n    unsigned char *reversed = (unsigned char *) &dst;\n\n    for( int i = sizeof(T)-1; i >= 0; i-- )\n        reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] );\n\n    return dst;\n}\n\n#endif /* __TBB_machine_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_main.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n#include \"tbb_main.h\"\n#include \"governor.h\"\n#include \"market.h\"\n#include \"tbb_misc.h\"\n#include \"itt_notify.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//------------------------------------------------------------------------\n// Begin shared data layout.\n// The following global data items are mostly read-only after initialization.\n//------------------------------------------------------------------------\n\n//! Padding in order to prevent false sharing.\nstatic const char _pad[NFS_MaxLineSize - sizeof(int)] = {};\n\n//------------------------------------------------------------------------\n// governor data\nbasic_tls<generic_scheduler*> governor::theTLS;\nunsigned governor::DefaultNumberOfThreads;\nrml::tbb_factory governor::theRMLServerFactory;\nbool governor::UsePrivateRML;\nconst task_scheduler_init *governor::BlockingTSI;\n#if TBB_USE_ASSERT\nbool governor::IsBlockingTerminationInProgress;\n#endif\nbool governor::is_speculation_enabled;\n\n//------------------------------------------------------------------------\n// market data\nmarket* market::theMarket;\nmarket::global_market_mutex_type market::theMarketMutex;\n\n//------------------------------------------------------------------------\n// One time initialization data\n\n//! Counter of references to global shared resources such as TLS.\natomic<int> __TBB_InitOnce::count;\n\n__TBB_atomic_flag __TBB_InitOnce::InitializationLock;\n\n//! Flag that is set to true after one-time initializations are done.\nbool __TBB_InitOnce::InitializationDone;\n\n#if DO_ITT_NOTIFY\n    static bool ITT_Present;\n    static bool ITT_InitializationDone;\n#endif\n\n#if !(_WIN32||_WIN64) || __TBB_SOURCE_DIRECTLY_INCLUDED\n    static __TBB_InitOnce __TBB_InitOnceHiddenInstance;\n#endif\n\n//------------------------------------------------------------------------\n// generic_scheduler data\n\n//! Pointer to the scheduler factory function\ngeneric_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index );\n\n#if __TBB_OLD_PRIMES_RNG\n//! Table of primes used by fast random-number generator (FastRandom).\n/** Also serves to keep anything else from being placed in the same\n    cache line as the global data items preceding it. */\nstatic const unsigned Primes[] = {\n    0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5,\n    0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b,\n    0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,\n    0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b,\n    0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801,\n    0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,\n    0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed,\n    0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b,\n    0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,\n    0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7,\n    0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7,\n    0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,\n    0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b,\n    0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b,\n    0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,\n    0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f\n};\n\n//------------------------------------------------------------------------\n// End of shared data layout\n//------------------------------------------------------------------------\n\n//------------------------------------------------------------------------\n// Shared data accessors\n//------------------------------------------------------------------------\n\nunsigned GetPrime ( unsigned seed ) {\n    return Primes[seed%(sizeof(Primes)/sizeof(Primes[0]))];\n}\n#endif //__TBB_OLD_PRIMES_RNG\n\n//------------------------------------------------------------------------\n// __TBB_InitOnce\n//------------------------------------------------------------------------\n\nvoid __TBB_InitOnce::add_ref() {\n    if( ++count==1 )\n        governor::acquire_resources();\n}\n\nvoid __TBB_InitOnce::remove_ref() {\n    int k = --count;\n    __TBB_ASSERT(k>=0,\"removed __TBB_InitOnce ref that was not added?\"); \n    if( k==0 ) {\n        governor::release_resources();\n        ITT_FINI_ITTLIB();\n    }\n}\n\n//------------------------------------------------------------------------\n// One-time Initializations\n//------------------------------------------------------------------------\n\n//! Defined in cache_aligned_allocator.cpp\nvoid initialize_cache_aligned_allocator();\n\n//! Defined in scheduler.cpp\nvoid Scheduler_OneTimeInitialization ( bool itt_present );\n\n#if DO_ITT_NOTIFY\n\n#if __TBB_ITT_STRUCTURE_API\n\nstatic __itt_domain *fgt_domain = NULL;\n\nstruct resource_string {\n    const char *str;\n    __itt_string_handle *itt_str_handle;\n};\n\n//\n// populate resource strings\n//\n#define TBB_STRING_RESOURCE( index_name, str ) { str, NULL },\nstatic resource_string strings_for_itt[] = {\n    #include \"tbb/internal/_tbb_strings.h\"\n    { \"num_resource_strings\", NULL } \n};\n#undef TBB_STRING_RESOURCE\n\nstatic __itt_string_handle *ITT_get_string_handle(int idx) {\n    __TBB_ASSERT(idx >= 0, NULL);\n    return idx < NUM_STRINGS ? strings_for_itt[idx].itt_str_handle : NULL;\n}\n\nstatic void ITT_init_domains() {\n    fgt_domain = __itt_domain_create( _T(\"tbb.flow\") );\n    fgt_domain->flags = 1;\n}\n\nstatic void ITT_init_strings() {\n    for ( int i = 0; i < NUM_STRINGS; ++i ) {\n#if _WIN32||_WIN64\n        strings_for_itt[i].itt_str_handle = __itt_string_handle_createA( strings_for_itt[i].str );\n#else\n        strings_for_itt[i].itt_str_handle = __itt_string_handle_create( strings_for_itt[i].str );\n#endif\n    }\n}\n\nstatic void ITT_init() {\n    ITT_init_domains();\n    ITT_init_strings();\n}\n\n#endif // __TBB_ITT_STRUCTURE_API\n\n/** Thread-unsafe lazy one-time initialization of tools interop.\n    Used by both dummy handlers and general TBB one-time initialization routine. **/\nvoid ITT_DoUnsafeOneTimeInitialization () {\n    if ( !ITT_InitializationDone ) {\n        ITT_Present = (__TBB_load_ittnotify()!=0);\n#if __TBB_ITT_STRUCTURE_API\n        if (ITT_Present) ITT_init();\n#endif\n        ITT_InitializationDone = true;\n        ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization);\n    }\n}\n\n/** Thread-safe lazy one-time initialization of tools interop.\n    Used by dummy handlers only. **/\nextern \"C\"\nvoid ITT_DoOneTimeInitialization() {\n    __TBB_InitOnce::lock();\n    ITT_DoUnsafeOneTimeInitialization();\n    __TBB_InitOnce::unlock();\n}\n#endif /* DO_ITT_NOTIFY */\n\n//! Performs thread-safe lazy one-time general TBB initialization.\nvoid DoOneTimeInitializations() {\n    suppress_unused_warning(_pad);\n    __TBB_InitOnce::lock();\n    // No fence required for load of InitializationDone, because we are inside a critical section.\n    if( !__TBB_InitOnce::InitializationDone ) {\n        __TBB_InitOnce::add_ref();\n        if( GetBoolEnvironmentVariable(\"TBB_VERSION\") )\n            PrintVersion();\n        bool itt_present = false;\n#if DO_ITT_NOTIFY\n        ITT_DoUnsafeOneTimeInitialization();\n        itt_present = ITT_Present;\n#endif /* DO_ITT_NOTIFY */\n        initialize_cache_aligned_allocator();\n        governor::initialize_rml_factory();\n        Scheduler_OneTimeInitialization( itt_present );\n        // Force processor groups support detection\n        governor::default_num_threads();\n        // Dump version data\n        governor::print_version_info();\n        PrintExtraVersionInfo( \"Tools support\", itt_present ? \"enabled\" : \"disabled\" );\n        __TBB_InitOnce::InitializationDone = true;\n    }\n    __TBB_InitOnce::unlock();\n}\n\n#if (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED\n//! Windows \"DllMain\" that handles startup and shutdown of dynamic library.\nextern \"C\" bool WINAPI DllMain( HANDLE /*hinstDLL*/, DWORD reason, LPVOID /*lpvReserved*/ ) {\n    switch( reason ) {\n        case DLL_PROCESS_ATTACH:\n            __TBB_InitOnce::add_ref();\n            break;\n        case DLL_PROCESS_DETACH:\n            __TBB_InitOnce::remove_ref();\n            // It is assumed that InitializationDone is not set after DLL_PROCESS_DETACH,\n            // and thus no race on InitializationDone is possible.\n            if( __TBB_InitOnce::initialization_done() ) {\n                // Remove reference that we added in DoOneTimeInitializations.\n                __TBB_InitOnce::remove_ref();\n            }\n            break;\n        case DLL_THREAD_DETACH:\n            governor::terminate_auto_initialized_scheduler();\n            break;\n    }\n    return true;\n}\n#endif /* (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED */\n\nvoid itt_store_pointer_with_release_v3( void* dst, void* src ) {\n    ITT_NOTIFY(sync_releasing, dst);\n    __TBB_store_with_release(*static_cast<void**>(dst),src);\n}\n\nvoid* itt_load_pointer_with_acquire_v3( const void* src ) {\n    void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src));\n    ITT_NOTIFY(sync_acquired, const_cast<void*>(src));\n    return result;\n}\n    \n#if DO_ITT_NOTIFY\nvoid call_itt_notify_v5(int t, void *ptr) {\n    switch (t) {\n    case 0: ITT_NOTIFY(sync_prepare, ptr); break;\n    case 1: ITT_NOTIFY(sync_cancel, ptr); break;\n    case 2: ITT_NOTIFY(sync_acquired, ptr); break;\n    case 3: ITT_NOTIFY(sync_releasing, ptr); break;\n    }\n}\n#else\nvoid call_itt_notify_v5(int /*t*/, void* /*ptr*/) {}\n#endif\n\n#if __TBB_ITT_STRUCTURE_API\n\n#if DO_ITT_NOTIFY\n\nconst __itt_id itt_null_id = {0, 0, 0};\n\nstatic inline __itt_domain* get_itt_domain( itt_domain_enum idx ) {\n    return ( idx == ITT_DOMAIN_FLOW ) ? fgt_domain : NULL;\n}\n\nstatic inline void itt_id_make(__itt_id *id, void* addr, unsigned long long extra) {\n    *id = __itt_id_make(addr, extra);\n}\n\nstatic inline void itt_id_create(const __itt_domain *domain, __itt_id id) {\n    ITTNOTIFY_VOID_D1(id_create, domain, id);\n}\n\nvoid itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, \n                             void *parent, unsigned long long parent_extra, string_index name_index ) {\n    if ( __itt_domain *d = get_itt_domain( domain ) ) {\n        __itt_id group_id = itt_null_id;\n        __itt_id parent_id = itt_null_id;\n        itt_id_make( &group_id, group, group_extra );\n        itt_id_create( d, group_id );\n        if ( parent ) {\n            itt_id_make( &parent_id, parent, parent_extra );\n        }\n        __itt_string_handle *n = ITT_get_string_handle(name_index);\n        ITTNOTIFY_VOID_D3(task_group, d, group_id, parent_id, n);\n    }\n}\n\nvoid itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, \n                              string_index key, const char *value ) {\n    if ( __itt_domain *d = get_itt_domain( domain ) ) {\n        __itt_id id = itt_null_id;\n        itt_id_make( &id, addr, addr_extra );\n        __itt_string_handle *k = ITT_get_string_handle(key);\n       size_t value_length = strlen( value );\n#if _WIN32||_WIN64\n        ITTNOTIFY_VOID_D4(metadata_str_addA, d, id, k, value, value_length);\n#else\n        ITTNOTIFY_VOID_D4(metadata_str_add, d, id, k, value, value_length);\n#endif\n    }\n}\n\nvoid itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, \n                          itt_relation relation, void *addr1, unsigned long long addr1_extra ) {\n    if ( __itt_domain *d = get_itt_domain( domain ) ) {\n        __itt_id id0 = itt_null_id; \n        __itt_id id1 = itt_null_id;\n        itt_id_make( &id0, addr0, addr0_extra );\n        itt_id_make( &id1, addr1, addr1_extra );\n        ITTNOTIFY_VOID_D3(relation_add, d, id0, (__itt_relation)relation, id1); \n    }\n}\n\nvoid itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, \n                        void *parent, unsigned long long parent_extra, string_index /* name_index */ ) {\n    if ( __itt_domain *d = get_itt_domain( domain ) ) {\n        __itt_id task_id = itt_null_id;\n        __itt_id parent_id = itt_null_id;\n        itt_id_make( &task_id, task, task_extra );\n        if ( parent ) {\n            itt_id_make( &parent_id, parent, parent_extra );\n        }\n        ITTNOTIFY_VOID_D3(task_begin, d, task_id, parent_id, NULL );\n    }\n}\n\nvoid itt_task_end_v7( itt_domain_enum domain ) {\n    if ( __itt_domain *d = get_itt_domain( domain ) ) {\n        ITTNOTIFY_VOID_D0(task_end, d);\n    }\n}\n\n#else // DO_ITT_NOTIFY\n\nvoid itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, \n                             void *parent, unsigned long long parent_extra, string_index name_index ) { }\n\nvoid itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, \n                              string_index key, const char *value ) { }\n\nvoid itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, \n                          itt_relation relation, void *addr1, unsigned long long addr1_extra ) { }\n\nvoid itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, \n                        void * /*parent*/, unsigned long long /* parent_extra */, string_index /* name_index */ ) { }\n\nvoid itt_task_end_v7( itt_domain_enum domain ) { }\n\n#endif // DO_ITT_NOTIFY\n\n#endif // __TBB_ITT_STRUCTURE_API\n\nvoid* itt_load_pointer_v3( const void* src ) {\n    //TODO: replace this with __TBB_load_relaxed\n    void* result = *static_cast<void*const*>(src);\n    return result;\n}\n\nvoid itt_set_sync_name_v3( void* obj, const tchar* name) {\n    ITT_SYNC_RENAME(obj, name);\n    suppress_unused_warning(obj && name);\n}\n\n\n} // namespace internal\n} // namespace tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_main.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_tbb_main_H\n#define _TBB_tbb_main_H\n\n#include \"tbb/atomic.h\"\n\nnamespace tbb {\n\nnamespace internal {\n\nvoid DoOneTimeInitializations ();\n\n//------------------------------------------------------------------------\n// __TBB_InitOnce\n//------------------------------------------------------------------------\n\n//! Class that supports TBB initialization. \n/** It handles acquisition and release of global resources (e.g. TLS) during startup and shutdown,\n    as well as synchronization for DoOneTimeInitializations. */\nclass __TBB_InitOnce {\n    friend void DoOneTimeInitializations();\n    friend void ITT_DoUnsafeOneTimeInitialization ();\n\n    static atomic<int> count;\n\n    //! Platform specific code to acquire resources.\n    static void acquire_resources();\n\n    //! Platform specific code to release resources.\n    static void release_resources();\n\n    //! Specifies if the one-time initializations has been done.\n    static bool InitializationDone;\n\n    //! Global initialization lock\n    /** Scenarios are possible when tools interop has to be initialized before the\n        TBB itself. This imposes a requirement that the global initialization lock \n        has to support valid static initialization, and does not issue any tool\n        notifications in any build mode. **/\n    static __TBB_atomic_flag InitializationLock;\n\npublic:\n    static void lock()   { __TBB_LockByte( InitializationLock ); }\n\n    static void unlock() { __TBB_UnlockByte( InitializationLock ); }\n\n    static bool initialization_done() { return __TBB_load_with_acquire(InitializationDone); }\n\n    //! Add initial reference to resources. \n    /** We assume that dynamic loading of the library prevents any other threads \n        from entering the library until this constructor has finished running. **/\n    __TBB_InitOnce() { add_ref(); }\n\n    //! Remove the initial reference to resources.\n    /** This is not necessarily the last reference if other threads are still running. **/\n    ~__TBB_InitOnce() {\n        remove_ref();\n        // We assume that InitializationDone is not set after file-scope destructors\n        // start running, and thus no race on InitializationDone is possible.\n        if( initialization_done() ) {\n            // Remove an extra reference that was added in DoOneTimeInitializations.\n            remove_ref();  \n        }\n    } \n    //! Add reference to resources.  If first reference added, acquire the resources.\n    static void add_ref();\n\n    //! Remove reference to resources.  If last reference removed, release the resources.\n    static void remove_ref();\n}; // class __TBB_InitOnce\n\n\n} // namespace internal\n\n} // namespace tbb\n\n#endif /* _TBB_tbb_main_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_misc.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// Source file for miscellaneous entities that are infrequently referenced by \n// an executing program.\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb_assert_impl.h\" // Out-of-line TBB assertion handling routines are instantiated here.\n#include \"tbb/tbb_exception.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb_misc.h\"\n#include <cstdio>\n#include <cstdlib>\n#include <stdexcept>\n\n#if _WIN32||_WIN64\n#include \"tbb/machine/windows_api.h\"\n#endif\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <cstring>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nusing namespace std;\n\nnamespace tbb {\n\nconst char* bad_last_alloc::what() const throw() { return \"bad allocation in previous or concurrent attempt\"; }\nconst char* improper_lock::what() const throw() { return \"attempted recursive lock on critical section or non-recursive mutex\"; }\nconst char* user_abort::what() const throw() { return \"User-initiated abort has terminated this operation\"; }\nconst char* invalid_multiple_scheduling::what() const throw() { return \"The same task_handle object cannot be executed more than once\"; }\nconst char* missing_wait::what() const throw() { return \"wait() was not called on the structured_task_group\"; }\n\nnamespace internal {\n\n#if TBB_USE_EXCEPTIONS\n    #define DO_THROW(exc, init_args) throw exc init_args;\n#else /* !TBB_USE_EXCEPTIONS */\n    #define PRINT_ERROR_AND_ABORT(exc_name, msg) \\\n        fprintf (stderr, \"Exception %s with message %s would've been thrown, \"  \\\n            \"if exception handling were not disabled. Aborting.\\n\", exc_name, msg); \\\n        fflush(stderr); \\\n        abort();\n    #define DO_THROW(exc, init_args) PRINT_ERROR_AND_ABORT(#exc, #init_args)\n#endif /* !TBB_USE_EXCEPTIONS */\n\n\n/* The \"what\" should be fairly short, not more than about 128 characters.\n   Because we control all the call sites to handle_perror, it is pointless\n   to bullet-proof it for very long strings.\n\n   Design note: ADR put this routine off to the side in tbb_misc.cpp instead of\n   Task.cpp because the throw generates a pathetic lot of code, and ADR wanted\n   this large chunk of code to be placed on a cold page. */\nvoid handle_perror( int error_code, const char* what ) {\n    char buf[256];\n#if _MSC_VER\n #define snprintf _snprintf\n#endif\n    int written = snprintf(buf, sizeof(buf), \"%s: %s\", what, strerror( error_code ));\n    // On overflow, the returned value exceeds sizeof(buf) (for GLIBC) or is negative (for MSVC).\n    __TBB_ASSERT_EX( written>0 && written<(int)sizeof(buf), \"Error description is too long\" );\n    // Ensure that buffer ends in terminator.\n    buf[sizeof(buf)-1] = 0;\n#if TBB_USE_EXCEPTIONS\n    throw runtime_error(buf);\n#else\n    PRINT_ERROR_AND_ABORT( \"runtime_error\", buf);\n#endif /* !TBB_USE_EXCEPTIONS */\n}\n\n#if _WIN32||_WIN64 \nvoid handle_win_error( int error_code ) {\n    char buf[512];\n#if !__TBB_WIN8UI_SUPPORT\n    FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,\n                    NULL, error_code, 0, buf, sizeof(buf), NULL );\n#else\n//TODO: update with right replacement for FormatMessageA\n    sprintf_s((char*)&buf, 512, \"error code %d\", error_code);\n#endif\n#if TBB_USE_EXCEPTIONS\n    throw runtime_error(buf);\n#else\n    PRINT_ERROR_AND_ABORT( \"runtime_error\", buf);\n#endif /* !TBB_USE_EXCEPTIONS */\n}\n#endif // _WIN32||_WIN64\n\nvoid throw_bad_last_alloc_exception_v4() {\n    throw_exception_v4(eid_bad_last_alloc);\n}\n\nvoid throw_exception_v4 ( exception_id eid ) {\n    __TBB_ASSERT ( eid > 0 && eid < eid_max, \"Unknown exception ID\" );\n    switch ( eid ) {\n    case eid_bad_alloc: DO_THROW( bad_alloc, () );\n    case eid_bad_last_alloc: DO_THROW( bad_last_alloc, () );\n    case eid_nonpositive_step: DO_THROW( invalid_argument, (\"Step must be positive\") );\n    case eid_out_of_range: DO_THROW( out_of_range, (\"Index out of requested size range\") );\n    case eid_segment_range_error: DO_THROW( range_error, (\"Index out of allocated segment slots\") );\n    case eid_index_range_error: DO_THROW( range_error, (\"Index is not allocated\") );\n    case eid_missing_wait: DO_THROW( missing_wait, () );\n    case eid_invalid_multiple_scheduling: DO_THROW( invalid_multiple_scheduling, () );\n    case eid_improper_lock: DO_THROW( improper_lock, () );\n    case eid_possible_deadlock: DO_THROW( runtime_error, (\"Resource deadlock would occur\") );\n    case eid_operation_not_permitted: DO_THROW( runtime_error, (\"Operation not permitted\") );\n    case eid_condvar_wait_failed: DO_THROW( runtime_error, (\"Wait on condition variable failed\") );\n    case eid_invalid_load_factor: DO_THROW( out_of_range, (\"Invalid hash load factor\") );\n    case eid_reserved: DO_THROW( out_of_range, (\"[backward compatibility] Invalid number of buckets\") );\n    case eid_invalid_swap: DO_THROW( invalid_argument, (\"swap() is invalid on non-equal allocators\") );\n    case eid_reservation_length_error: DO_THROW( length_error, (\"reservation size exceeds permitted max size\") );\n    case eid_invalid_key: DO_THROW( out_of_range, (\"invalid key\") );\n    case eid_user_abort: DO_THROW( user_abort, () );\n    case eid_bad_tagged_msg_cast: DO_THROW( runtime_error, (\"Illegal tagged_msg cast\") );\n#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE\n    case eid_blocking_sch_init: DO_THROW( runtime_error, (\"Nesting of blocking termination is impossible\") );\n#endif\n    default: break;\n    }\n#if !TBB_USE_EXCEPTIONS && __APPLE__\n    out_of_range e1(\"\");\n    length_error e2(\"\");\n    range_error e3(\"\");\n    invalid_argument e4(\"\");\n#endif /* !TBB_USE_EXCEPTIONS && __APPLE__ */\n}\n\n#if _XBOX || __TBB_WIN8UI_SUPPORT\nbool GetBoolEnvironmentVariable( const char * ) { return false;}\n#else  /* _XBOX || __TBB_WIN8UI_SUPPORT */\nbool GetBoolEnvironmentVariable( const char * name ) {\n    if( const char* s = getenv(name) )\n        return strcmp(s,\"0\") != 0;\n    return false;\n}\n#endif /* _XBOX || __TBB_WIN8UI_SUPPORT */\n\n#include \"tbb_version.h\"\n\n/** The leading \"\\0\" is here so that applying \"strings\" to the binary delivers a clean result. */\nstatic const char VersionString[] = \"\\0\" TBB_VERSION_STRINGS;\n\nstatic bool PrintVersionFlag = false;\n\nvoid PrintVersion() {\n    PrintVersionFlag = true;\n    fputs(VersionString+1,stderr);\n}\n\nvoid PrintExtraVersionInfo( const char* category, const char* format, ... ) {\n    if( PrintVersionFlag ) {\n        char str[1024]; memset(str, 0, 1024);\n        va_list args; va_start(args, format);\n        // Note: correct vsnprintf definition obtained from tbb_assert_impl.h\n        vsnprintf( str, 1024-1, format, args);\n        va_end(args);\n        fprintf(stderr, \"TBB: %s\\t%s\\n\", category, str );\n    }\n}\n\nvoid PrintRMLVersionInfo( void* arg, const char* server_info ) {\n    PrintExtraVersionInfo( server_info, (const char *)arg );\n}\n\n//! check for transaction support.\n#if _MSC_VER\n#include <intrin.h> // for __cpuid\n#endif\nbool cpu_has_speculation() {\n#if __TBB_TSX_AVAILABLE\n#if (__INTEL_COMPILER || __GNUC__ || _MSC_VER || __SUNPRO_CC)\n    bool result = false;\n    const int hle_ebx_mask = 1<<4;\n#if _MSC_VER\n    int info[4] = {0,0,0,0};\n    const int reg_ebx = 1;\n    __cpuidex(info, 7, 0);\n    result = (info[reg_ebx] & hle_ebx_mask)!=0;\n#elif __GNUC__ || __SUNPRO_CC\n    int32_t reg_ebx = 0;\n    int32_t reg_eax = 7;\n    int32_t reg_ecx = 0;\n    __asm__ __volatile__ ( \"movl %%ebx, %%esi\\n\"\n                           \"cpuid\\n\"\n                           \"movl %%ebx, %0\\n\"\n                           \"movl %%esi, %%ebx\\n\"\n                           : \"=a\"(reg_ebx) : \"0\" (reg_eax), \"c\" (reg_ecx) : \"esi\", \n#if __TBB_x86_64\n                           \"ebx\",\n#endif\n                           \"edx\"\n                           );\n    result = (reg_ebx & hle_ebx_mask)!=0 ;\n#endif\n    return result;\n#else\n    #error Speculation detection not enabled for compiler\n#endif /* __INTEL_COMPILER || __GNUC__ || _MSC_VER */\n#else  /* __TBB_TSX_AVAILABLE */\n    return false;\n#endif /* __TBB_TSX_AVAILABLE */\n}\n\n} // namespace internal\n\nextern \"C\" int TBB_runtime_interface_version() {\n    return TBB_INTERFACE_VERSION;\n}\n\n} // namespace tbb\n\n#if !__TBB_RML_STATIC\n#if __TBB_x86_32\n\n#include \"tbb/atomic.h\"\n\n// in MSVC environment, int64_t defined in tbb::internal namespace only (see tbb_stddef.h)\n#if _MSC_VER\nusing tbb::internal::int64_t;\n#endif\n\n//! Warn about 8-byte store that crosses a cache line.\nextern \"C\" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ) {\n    // Report run-time warning unless we have already recently reported warning for that address.\n    const unsigned n = 4;\n    static tbb::atomic<void*> cache[n];\n    static tbb::atomic<unsigned> k;\n    for( unsigned i=0; i<n; ++i ) \n        if( ptr==cache[i] ) \n            goto done;\n    cache[(k++)%n] = const_cast<void*>(ptr);\n    tbb::internal::runtime_warning( \"atomic store on misaligned 8-byte location %p is slow\", ptr );\ndone:;\n}\n\n//! Handle 8-byte store that crosses a cache line.\nextern \"C\" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ) {\n    for( tbb::internal::atomic_backoff b;;b.pause() ) {\n        int64_t tmp = *(int64_t*)ptr;\n        if( __TBB_machine_cmpswp8(ptr,value,tmp)==tmp ) \n            break;\n    }\n}\n\n#endif /* __TBB_x86_32 */\n#endif /* !__TBB_RML_STATIC */\n\n#if __TBB_ipf\n/* It was found that on IA-64 architecture inlining of __TBB_machine_lockbyte leads\n   to serious performance regression with ICC. So keep it out-of-line.\n */\nextern \"C\" intptr_t __TBB_machine_lockbyte( volatile unsigned char& flag ) {\n    tbb::internal::atomic_backoff backoff;\n    while( !__TBB_TryLockByte(flag) ) backoff.pause();\n    return 0;\n}\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_misc.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_tbb_misc_H\n#define _TBB_tbb_misc_H\n\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"tbb/atomic.h\"     // For atomic_xxx definitions\n\n#if __linux__ || __FreeBSD__\n#include <sys/param.h>  // __FreeBSD_version\n#if __FreeBSD_version >= 701000\n#include <sys/cpuset.h>\n#endif\n#endif\n\n// Does the operating system have a system call to pin a thread to a set of OS processors?\n#define __TBB_OS_AFFINITY_SYSCALL_PRESENT ((__linux__ && !__ANDROID__) || (__FreeBSD_version >= 701000))\n// On IBM* Blue Gene* CNK nodes, the affinity API has restrictions that prevent its usability for TBB,\n// and also sysconf(_SC_NPROCESSORS_ONLN) already takes process affinity into account.\n#define __TBB_USE_OS_AFFINITY_SYSCALL (__TBB_OS_AFFINITY_SYSCALL_PRESENT && !__bg__)\n\nnamespace tbb {\nnamespace internal {\n\nconst size_t MByte = 1024*1024;\n\n#if __TBB_WIN8UI_SUPPORT\n// In Win8UI mode, TBB uses a thread creation API that does not allow to specify the stack size.\n// Still, the thread stack size value, either explicit or default, is used by the scheduler.\n// So here we set the default value to match the platform's default of 1MB.\nconst size_t ThreadStackSize = 1*MByte;\n#else\nconst size_t ThreadStackSize = (sizeof(uintptr_t) <= 4 ? 2 : 4 )*MByte;\n#endif\n\n#ifndef __TBB_HardwareConcurrency\n\n//! Returns maximal parallelism level supported by the current OS configuration.\nint AvailableHwConcurrency();\n\n#else\n\ninline int AvailableHwConcurrency() {\n    int n = __TBB_HardwareConcurrency();\n    return n > 0 ? n : 1; // Fail safety strap\n}\n#endif /* __TBB_HardwareConcurrency */\n\n\n#if _WIN32||_WIN64\n\n//! Returns number of processor groups in the current OS configuration.\n/** AvailableHwConcurrency must be called at least once before calling this method. **/\nint NumberOfProcessorGroups();\n\n//! Retrieves index of processor group containing processor with the given index\nint FindProcessorGroupIndex ( int processorIndex );\n\n//! Affinitizes the thread to the specified processor group\nvoid MoveThreadIntoProcessorGroup( void* hThread, int groupIndex );\n\n#endif /* _WIN32||_WIN64 */\n\n//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info\nvoid handle_win_error( int error_code );\n\n//! True if environment variable with given name is set and not 0; otherwise false.\nbool GetBoolEnvironmentVariable( const char * name );\n\n//! Prints TBB version information on stderr\nvoid PrintVersion();\n\n//! Prints arbitrary extra TBB version information on stderr\nvoid PrintExtraVersionInfo( const char* category, const char* format, ... );\n\n//! A callback routine to print RML version information on stderr\nvoid PrintRMLVersionInfo( void* arg, const char* server_info );\n\n// For TBB compilation only; not to be used in public headers\n#if defined(min) || defined(max)\n#undef min\n#undef max\n#endif\n\n//! Utility template function returning lesser of the two values.\n/** Provided here to avoid including not strict safe <algorithm>.\\n\n    In case operands cause signed/unsigned or size mismatch warnings it is caller's\n    responsibility to do the appropriate cast before calling the function. **/\ntemplate<typename T1, typename T2>\nT1 min ( const T1& val1, const T2& val2 ) {\n    return val1 < val2 ? val1 : val2;\n}\n\n//! Utility template function returning greater of the two values.\n/** Provided here to avoid including not strict safe <algorithm>.\\n\n    In case operands cause signed/unsigned or size mismatch warnings it is caller's\n    responsibility to do the appropriate cast before calling the function. **/\ntemplate<typename T1, typename T2>\nT1 max ( const T1& val1, const T2& val2 ) {\n    return val1 < val2 ? val2 : val1;\n}\n\n//! Utility helper structure to ease overload resolution\ntemplate<int > struct int_to_type {};\n\n//------------------------------------------------------------------------\n// FastRandom\n//------------------------------------------------------------------------\n\n/** Defined in tbb_main.cpp **/\nunsigned GetPrime ( unsigned seed );\n\n//! A fast random number generator.\n/** Uses linear congruential method. */\nclass FastRandom {\nprivate:\n#if __TBB_OLD_PRIMES_RNG\n    unsigned x, a;\n    static const unsigned c = 1;\n#else\n    unsigned x, c;\n    static const unsigned a = 0x9e3779b1; // a big prime number\n#endif //__TBB_OLD_PRIMES_RNG\npublic:\n    //! Get a random number.\n    unsigned short get() {\n        return get(x);\n    }\n    //! Get a random number for the given seed; update the seed for next use.\n    unsigned short get( unsigned& seed ) {\n        unsigned short r = (unsigned short)(seed>>16);\n        __TBB_ASSERT(c&1, \"c must be odd for big rng period\");\n        seed = seed*a+c;\n        return r;\n    }\n    //! Construct a random number generator.\n    FastRandom( void* unique_ptr ) { init(uintptr_t(unique_ptr)); }\n    FastRandom( uint32_t seed) { init(seed); }\n    FastRandom( uint64_t seed) { init(seed); }\n    template <typename T>\n    void init( T seed ) {\n        init(seed,int_to_type<sizeof(seed)>());\n    }\n    void init( uint64_t seed , int_to_type<8> ) {\n        init(uint32_t((seed>>32)+seed), int_to_type<4>());\n    }\n    void init( uint32_t seed, int_to_type<4> ) {\n#if __TBB_OLD_PRIMES_RNG\n        x = seed;\n        a = GetPrime( seed );\n#else\n        // threads use different seeds for unique sequences\n        c = (seed|1)*0xba5703f5; // c must be odd, shuffle by a prime number\n        x = c^(seed>>1); // also shuffle x for the first get() invocation\n#endif\n    }\n};\n\n//------------------------------------------------------------------------\n// Atomic extensions\n//------------------------------------------------------------------------\n\n//! Atomically replaces value of dst with newValue if they satisfy condition of compare predicate\n/** Return value semantics is the same as for CAS. **/\ntemplate<typename T1, typename T2, class Pred>\nT1 atomic_update ( tbb::atomic<T1>& dst, T2 newValue, Pred compare ) {\n    T1 oldValue = dst;\n    while ( compare(oldValue, newValue) ) {\n        if ( dst.compare_and_swap((T1)newValue, oldValue) == oldValue )\n            break;\n        oldValue = dst;\n    }\n    return oldValue;\n}\n\n//! One-time initialization states\nenum do_once_state {\n    do_once_uninitialized = 0,  ///< No execution attempts have been undertaken yet\n    do_once_pending,            ///< A thread is executing associated do-once routine\n    do_once_executed,           ///< Do-once routine has been executed\n    initialization_complete = do_once_executed  ///< Convenience alias\n};\n\n//! One-time initialization function\n/** /param initializer Pointer to function without arguments\n           The variant that returns bool is used for cases when initialization can fail\n           and it is OK to continue execution, but the state should be reset so that\n           the initialization attempt was repeated the next time.\n    /param state Shared state associated with initializer that specifies its\n            initialization state. Must be initially set to #uninitialized value\n            (e.g. by means of default static zero initialization). **/\ntemplate <typename F>\nvoid atomic_do_once ( const F& initializer, atomic<do_once_state>& state ) {\n    // tbb::atomic provides necessary acquire and release fences.\n    // The loop in the implementation is necessary to avoid race when thread T2\n    // that arrived in the middle of initialization attempt by another thread T1\n    // has just made initialization possible.\n    // In such a case T2 has to rely on T1 to initialize, but T1 may already be past\n    // the point where it can recognize the changed conditions.\n    while ( state != do_once_executed ) {\n        if( state == do_once_uninitialized ) {\n            if( state.compare_and_swap( do_once_pending, do_once_uninitialized ) == do_once_uninitialized ) {\n                run_initializer( initializer, state );\n                break;\n            }\n        }\n        spin_wait_while_eq( state, do_once_pending );\n    }\n}\n\n// Run the initializer which can not fail\ninline void run_initializer( void (*f)(), atomic<do_once_state>& state ) {\n    f();\n    state = do_once_executed;\n}\n\n// Run the initializer which can require repeated call\ninline void run_initializer( bool (*f)(), atomic<do_once_state>& state ) {\n    state = f() ? do_once_executed : do_once_uninitialized;\n}\n\n#if __TBB_USE_OS_AFFINITY_SYSCALL\n  #if __linux__\n    typedef cpu_set_t basic_mask_t;\n  #elif __FreeBSD_version >= 701000\n    typedef cpuset_t basic_mask_t;\n  #else\n    #error affinity_helper is not implemented in this OS\n  #endif\n    class affinity_helper : no_copy {\n        basic_mask_t* threadMask;\n        int is_changed;\n    public:\n        affinity_helper() : threadMask(NULL), is_changed(0) {}\n        ~affinity_helper();\n        void protect_affinity_mask();\n    };\n#else\n    class affinity_helper : no_copy {\n    public:\n        void protect_affinity_mask() {}\n    };\n#endif /* __TBB_USE_OS_AFFINITY_SYSCALL */\n\nextern bool cpu_has_speculation();\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* _TBB_tbb_misc_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_misc_ex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// Source file for miscellaneous entities that are infrequently referenced by \n// an executing program, and implementation of which requires dynamic linking.\n\n#include \"tbb_misc.h\"\n\n#if !defined(__TBB_HardwareConcurrency)\n\n#include \"dynamic_link.h\"\n#include <stdio.h>\n#include <limits.h>\n\n#if _WIN32||_WIN64\n#include \"tbb/machine/windows_api.h\"\n#if __TBB_WIN8UI_SUPPORT\n#include <thread>\n#endif\n#else\n#include <unistd.h>\n#if __linux__\n#include <sys/sysinfo.h>\n#include <string.h>\n#include <sched.h>\n#include <errno.h>\n#elif __sun\n#include <sys/sysinfo.h>\n#elif __FreeBSD__\n#include <errno.h>\n#include <string.h>\n#include <sys/param.h>  // Required by <sys/cpuset.h>\n#include <sys/cpuset.h>\n#endif\n#endif\n\nnamespace tbb {\nnamespace internal {\n\n#if __TBB_USE_OS_AFFINITY_SYSCALL\n\nstatic void set_affinity_mask( size_t maskSize, const basic_mask_t* threadMask ) {\n#if __linux__\n    if( sched_setaffinity( 0, maskSize, threadMask ) )\n#else /* FreeBSD */\n    if( cpuset_setaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, maskSize, threadMask ) )\n#endif\n        runtime_warning( \"setaffinity syscall failed\" );\n}\n\nstatic void get_affinity_mask( size_t maskSize, basic_mask_t* threadMask ) {\n#if __linux__\n    if( sched_getaffinity( 0, maskSize, threadMask ) )\n#else /* FreeBSD */\n    if( cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, maskSize, threadMask ) )\n#endif\n        runtime_warning( \"getaffinity syscall failed\" );\n}\n\nstatic basic_mask_t* process_mask;\nstatic int num_masks;\nstruct process_mask_cleanup_helper {\n    ~process_mask_cleanup_helper() {\n        if( process_mask ) {\n            delete [] process_mask;\n        }\n     }\n};\nstatic process_mask_cleanup_helper process_mask_cleanup;\n\n#define curMaskSize sizeof(basic_mask_t) * num_masks\naffinity_helper::~affinity_helper() {\n    if( threadMask ) {\n        if( is_changed ) {\n            set_affinity_mask( curMaskSize, threadMask );\n        }\n        delete [] threadMask;\n    }\n}\nvoid affinity_helper::protect_affinity_mask() {\n    if( threadMask == NULL && num_masks && process_mask ) {\n        threadMask = new basic_mask_t [num_masks];\n        memset( threadMask, 0, curMaskSize );\n        get_affinity_mask( curMaskSize, threadMask );\n        is_changed = memcmp( process_mask, threadMask, curMaskSize );\n        if( is_changed ) {\n            set_affinity_mask( curMaskSize, process_mask );\n        }\n    }\n}\n#undef curMaskSize\n\nstatic atomic<do_once_state> hardware_concurrency_info;\n\nstatic int theNumProcs;\n\nstatic void initialize_hardware_concurrency_info () {\n    int err;\n    int availableProcs = 0;\n    int numMasks = 1;\n#if __linux__\n#if __TBB_MAIN_THREAD_AFFINITY_BROKEN\n    int maxProcs = INT_MAX; // To check the entire mask.\n    int pid = 0; // Get the mask of the calling thread.\n#else\n    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);\n    int pid = getpid();\n#endif\n    cpu_set_t *processMask;\n    const size_t BasicMaskSize =  sizeof(cpu_set_t);\n    for (;;) {\n        int curMaskSize = BasicMaskSize * numMasks;\n        processMask = new cpu_set_t[numMasks];\n        memset( processMask, 0, curMaskSize );\n        err = sched_getaffinity( pid, curMaskSize, processMask );\n        if ( !err || errno != EINVAL || curMaskSize * CHAR_BIT >= 256 * 1024 )\n            break;\n        delete[] processMask;\n        numMasks <<= 1;\n    }\n#else /* FreeBSD >= 7.1 */\n    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);\n    cpuset_t *processMask;\n    const size_t BasicMaskSize = sizeof(cpuset_t);\n    for (;;) {\n        int curMaskSize = BasicMaskSize * numMasks;\n        processMask = new cpuset_t[numMasks];\n        memset( processMask, 0, curMaskSize );\n        // CPU_LEVEL_WHICH - anonymous (current) mask, CPU_LEVEL_CPUSET - assigned mask\n#if __TBB_MAIN_THREAD_AFFINITY_BROKEN\n        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, curMaskSize, processMask );\n#else\n        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, curMaskSize, processMask );\n#endif\n        if ( !err || errno != ERANGE || curMaskSize * CHAR_BIT >= 16 * 1024 )\n            break;\n        delete[] processMask;\n        numMasks <<= 1;\n    }\n#endif /* FreeBSD >= 7.1 */\n    if ( !err ) {\n        for ( int m = 0; availableProcs < maxProcs && m < numMasks; ++m ) {\n            for ( size_t i = 0; (availableProcs < maxProcs) && (i < BasicMaskSize * CHAR_BIT); ++i ) {\n                if ( CPU_ISSET( i, processMask + m ) )\n                    ++availableProcs;\n            }\n        }\n        num_masks = numMasks;\n        process_mask = processMask;\n    }\n    else {\n        availableProcs = (maxProcs == INT_MAX) ? sysconf(_SC_NPROCESSORS_ONLN) : maxProcs;\n        delete[] processMask;\n    }\n    theNumProcs = availableProcs > 0 ? availableProcs : 1; // Fail safety strap\n    __TBB_ASSERT( theNumProcs <= sysconf(_SC_NPROCESSORS_ONLN), NULL );\n}\n\nint AvailableHwConcurrency() {\n    atomic_do_once( &initialize_hardware_concurrency_info, hardware_concurrency_info );\n    return theNumProcs;\n}\n\n#elif __ANDROID__\n// Work-around for Android that reads the correct number of available CPUs since system calls are unreliable.\n// Format of \"present\" file is: ([<int>-<int>|<int>],)+\nint AvailableHwConcurrency() {\n    FILE *fp = fopen(\"/sys/devices/system/cpu/present\", \"r\");\n    if (fp == NULL) return 1;\n    int num_args, lower, upper, num_cpus=0;\n    while ((num_args = fscanf(fp, \"%u-%u\", &lower, &upper)) != EOF) {\n        switch(num_args) {\n            case 2: num_cpus += upper - lower + 1; break;\n            case 1: num_cpus += 1; break;\n        }\n        fscanf(fp, \",\");\n    }\n    return (num_cpus > 0) ? num_cpus : 1;\n}\n\n#elif defined(_SC_NPROCESSORS_ONLN)\nint AvailableHwConcurrency() {\n    int n = sysconf(_SC_NPROCESSORS_ONLN);\n    return (n > 0) ? n : 1;\n}\n\n#elif _WIN32||_WIN64\n\nstatic atomic<do_once_state> hardware_concurrency_info;\n\nstatic const WORD TBB_ALL_PROCESSOR_GROUPS = 0xffff;\n\n// Statically allocate an array for processor group information.\n// Windows 7 supports maximum 4 groups, but let's look ahead a little.\nstatic const WORD MaxProcessorGroups = 64;\n\nstruct ProcessorGroupInfo {\n    DWORD_PTR   mask;                   ///< Affinity mask covering the whole group\n    int         numProcs;               ///< Number of processors in the group\n    int         numProcsRunningTotal;   ///< Subtotal of processors in this and preceding groups\n\n    //! Total number of processor groups in the system\n    static int NumGroups; \n\n    //! Index of the group with a slot reserved for the first master thread\n    /** In the context of multiple processor groups support current implementation\n        defines \"the first master thread\" as the first thread to invoke\n        AvailableHwConcurrency(). \n\n        TODO:   Implement a dynamic scheme remapping workers depending on the pending\n                master threads affinity. **/\n    static int HoleIndex;\n};\n\nint ProcessorGroupInfo::NumGroups = 1;\nint ProcessorGroupInfo::HoleIndex = 0;\n\n\nProcessorGroupInfo theProcessorGroups[MaxProcessorGroups];\n\nstruct TBB_GROUP_AFFINITY {\n    DWORD_PTR Mask;\n    WORD   Group;\n    WORD   Reserved[3];\n};\n\nstatic DWORD (WINAPI *TBB_GetActiveProcessorCount)( WORD groupIndex ) = NULL;\nstatic WORD (WINAPI *TBB_GetActiveProcessorGroupCount)() = NULL;\nstatic BOOL (WINAPI *TBB_SetThreadGroupAffinity)( HANDLE hThread, \n                        const TBB_GROUP_AFFINITY* newAff, TBB_GROUP_AFFINITY *prevAff );\nstatic BOOL (WINAPI *TBB_GetThreadGroupAffinity)( HANDLE hThread, TBB_GROUP_AFFINITY* );\n\nstatic const dynamic_link_descriptor ProcessorGroupsApiLinkTable[] = {\n      DLD(GetActiveProcessorCount, TBB_GetActiveProcessorCount)\n    , DLD(GetActiveProcessorGroupCount, TBB_GetActiveProcessorGroupCount)\n    , DLD(SetThreadGroupAffinity, TBB_SetThreadGroupAffinity)\n    , DLD(GetThreadGroupAffinity, TBB_GetThreadGroupAffinity)\n};\n\nstatic void initialize_hardware_concurrency_info () {\n#if __TBB_WIN8UI_SUPPORT\n    // For these applications processor groups info is unavailable\n    // Setting up a number of processors for one processor group\n    theProcessorGroups[0].numProcs = theProcessorGroups[0].numProcsRunningTotal = std::thread::hardware_concurrency();\n#else /* __TBB_WIN8UI_SUPPORT */\n    dynamic_link( \"Kernel32.dll\", ProcessorGroupsApiLinkTable,\n                  sizeof(ProcessorGroupsApiLinkTable)/sizeof(dynamic_link_descriptor) );\n    SYSTEM_INFO si;\n    GetNativeSystemInfo(&si);\n    DWORD_PTR pam, sam, m = 1;\n    GetProcessAffinityMask( GetCurrentProcess(), &pam, &sam );\n    int nproc = 0;\n    for ( size_t i = 0; i < sizeof(DWORD_PTR) * CHAR_BIT; ++i, m <<= 1 ) {\n        if ( pam & m )\n            ++nproc;\n    }\n    __TBB_ASSERT( nproc <= (int)si.dwNumberOfProcessors, NULL );\n    // By default setting up a number of processors for one processor group\n    theProcessorGroups[0].numProcs = theProcessorGroups[0].numProcsRunningTotal = nproc;\n    // Setting up processor groups in case the process does not restrict affinity mask and more than one processor group is present\n    if ( nproc == (int)si.dwNumberOfProcessors && TBB_GetActiveProcessorCount ) {\n        // The process does not have restricting affinity mask and multiple processor groups are possible\n        ProcessorGroupInfo::NumGroups = (int)TBB_GetActiveProcessorGroupCount();\n        __TBB_ASSERT( ProcessorGroupInfo::NumGroups <= MaxProcessorGroups, NULL );\n        // Fail safety bootstrap. Release versions will limit available concurrency\n        // level, while debug ones would assert.\n        if ( ProcessorGroupInfo::NumGroups > MaxProcessorGroups )\n            ProcessorGroupInfo::NumGroups = MaxProcessorGroups;\n        if ( ProcessorGroupInfo::NumGroups > 1 ) {\n            TBB_GROUP_AFFINITY ga;\n            if ( TBB_GetThreadGroupAffinity( GetCurrentThread(), &ga ) )\n                ProcessorGroupInfo::HoleIndex = ga.Group;\n            int nprocs = 0;\n            for ( WORD i = 0; i < ProcessorGroupInfo::NumGroups; ++i ) {\n                ProcessorGroupInfo  &pgi = theProcessorGroups[i];\n                pgi.numProcs = (int)TBB_GetActiveProcessorCount(i);\n                __TBB_ASSERT( pgi.numProcs <= (int)sizeof(DWORD_PTR) * CHAR_BIT, NULL );\n                pgi.mask = pgi.numProcs == sizeof(DWORD_PTR) * CHAR_BIT ? ~(DWORD_PTR)0 : (DWORD_PTR(1) << pgi.numProcs) - 1;\n                pgi.numProcsRunningTotal = nprocs += pgi.numProcs;\n            }\n            __TBB_ASSERT( nprocs == (int)TBB_GetActiveProcessorCount( TBB_ALL_PROCESSOR_GROUPS ), NULL );\n        }\n    }\n#endif /* __TBB_WIN8UI_SUPPORT */\n\n    PrintExtraVersionInfo(\"Processor groups\", \"%d\", ProcessorGroupInfo::NumGroups);\n    if (ProcessorGroupInfo::NumGroups>1)\n        for (int i=0; i<ProcessorGroupInfo::NumGroups; ++i)\n            PrintExtraVersionInfo( \"----- Group\", \"%d: size %d\", i, theProcessorGroups[i].numProcs);\n}\n\nint AvailableHwConcurrency() {\n    atomic_do_once( &initialize_hardware_concurrency_info, hardware_concurrency_info );\n    return theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal;\n}\n\nint NumberOfProcessorGroups() {\n    __TBB_ASSERT( hardware_concurrency_info == initialization_complete, \"NumberOfProcessorGroups is used before AvailableHwConcurrency\" );\n    return ProcessorGroupInfo::NumGroups;\n}\n\n// Offset for the slot reserved for the first master thread\n#define HoleAdjusted(procIdx, grpIdx) (procIdx + (holeIdx <= grpIdx))\n\nint FindProcessorGroupIndex ( int procIdx ) {\n    // In case of oversubscription spread extra workers in a round robin manner\n    int holeIdx;\n    const int numProcs = theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal;\n    if ( procIdx >= numProcs - 1 ) {\n        holeIdx = INT_MAX;\n        procIdx = (procIdx - numProcs + 1) % numProcs;\n    }\n    else\n        holeIdx = ProcessorGroupInfo::HoleIndex;\n    __TBB_ASSERT( hardware_concurrency_info == initialization_complete, \"FindProcessorGroupIndex is used before AvailableHwConcurrency\" );\n    // Approximate the likely group index assuming all groups are of the same size\n    int i = procIdx / theProcessorGroups[0].numProcs;\n    // Make sure the approximation is a valid group index\n    if (i >= ProcessorGroupInfo::NumGroups) i = ProcessorGroupInfo::NumGroups-1;\n    // Now adjust the approximation up or down\n    if ( theProcessorGroups[i].numProcsRunningTotal > HoleAdjusted(procIdx, i) ) {\n        while ( theProcessorGroups[i].numProcsRunningTotal - theProcessorGroups[i].numProcs > HoleAdjusted(procIdx, i) ) {\n            __TBB_ASSERT( i > 0, NULL );\n            --i;\n        }\n    }\n    else {\n        do {\n            ++i;\n        } while ( theProcessorGroups[i].numProcsRunningTotal <= HoleAdjusted(procIdx, i) );\n    }\n    __TBB_ASSERT( i < ProcessorGroupInfo::NumGroups, NULL );\n    return i;\n}\n\nvoid MoveThreadIntoProcessorGroup( void* hThread, int groupIndex ) {\n    __TBB_ASSERT( hardware_concurrency_info == initialization_complete, \"MoveThreadIntoProcessorGroup is used before AvailableHwConcurrency\" );\n    if ( !TBB_SetThreadGroupAffinity )\n        return;\n    TBB_GROUP_AFFINITY ga = { theProcessorGroups[groupIndex].mask, (WORD)groupIndex, {0,0,0} };\n    TBB_SetThreadGroupAffinity( hThread, &ga, NULL );\n}\n\n#else\n    #error AvailableHwConcurrency is not implemented in this OS \n#endif /* OS */\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* !__TBB_HardwareConcurrency */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_profiling.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_profiling_H\n#define __TBB_profiling_H\n\nnamespace tbb {\n    namespace internal {\n\n        //\n        // This is not under __TBB_ITT_STRUCTURE_API because these values are used directly in flow_graph.h.  \n        //\n       \n        // include list of index names\n        #define TBB_STRING_RESOURCE(index_name,str) index_name,\n        enum string_index {\n           #include \"internal/_tbb_strings.h\"\n           NUM_STRINGS\n        };\n        #undef TBB_STRING_RESOURCE\n\n        enum itt_relation\n        {\n        __itt_relation_is_unknown = 0,\n        __itt_relation_is_dependent_on,         /**< \"A is dependent on B\" means that A cannot start until B completes */\n        __itt_relation_is_sibling_of,           /**< \"A is sibling of B\" means that A and B were created as a group */\n        __itt_relation_is_parent_of,            /**< \"A is parent of B\" means that A created B */\n        __itt_relation_is_continuation_of,      /**< \"A is continuation of B\" means that A assumes the dependencies of B */\n        __itt_relation_is_child_of,             /**< \"A is child of B\" means that A was created by B (inverse of is_parent_of) */\n        __itt_relation_is_continued_by,         /**< \"A is continued by B\" means that B assumes the dependencies of A (inverse of is_continuation_of) */\n        __itt_relation_is_predecessor_to        /**< \"A is predecessor to B\" means that B cannot start until A completes (inverse of is_dependent_on) */\n        };\n    \n    }\n}\n\n// Check if the tools support is enabled\n#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS\n\n#if _WIN32||_WIN64\n#include <stdlib.h>  /* mbstowcs_s */\n#endif\n#include \"tbb_stddef.h\"\n\nnamespace tbb {\n    namespace internal {\n\n#if _WIN32||_WIN64\n        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name );\n        inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) {\n#if _MSC_VER>=1400\n            size_t len;\n            mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE );\n            return len;   // mbstowcs_s counts null terminator\n#else\n            size_t len = mbstowcs( wcs, mbs, bufsize );\n            if(wcs && len!=size_t(-1) )\n                wcs[len<bufsize-1? len: bufsize-1] = wchar_t('\\0');\n            return len+1; // mbstowcs does not count null terminator\n#endif\n        }\n#else\n        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const char* name );\n#endif\n    } // namespace internal\n} // namespace tbb\n\n//! Macro __TBB_DEFINE_PROFILING_SET_NAME(T) defines \"set_name\" methods for sync objects of type T\n/** Should be used in the \"tbb\" namespace only.\n    Don't place semicolon after it to avoid compiler warnings. **/\n#if _WIN32||_WIN64\n    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)                       \\\n        namespace profiling {                                                       \\\n            inline void set_name( sync_object_type& obj, const wchar_t* name ) {    \\\n                tbb::internal::itt_set_sync_name_v3( &obj, name );                  \\\n            }                                                                       \\\n            inline void set_name( sync_object_type& obj, const char* name ) {       \\\n                size_t len = tbb::internal::multibyte_to_widechar(NULL, name, 0);   \\\n                wchar_t *wname = new wchar_t[len];                                  \\\n                tbb::internal::multibyte_to_widechar(wname, name, len);             \\\n                set_name( obj, wname );                                             \\\n                delete[] wname;                                                     \\\n            }                                                                       \\\n        }\n#else /* !WIN */\n    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)                       \\\n        namespace profiling {                                                       \\\n            inline void set_name( sync_object_type& obj, const char* name ) {       \\\n                tbb::internal::itt_set_sync_name_v3( &obj, name );                  \\\n            }                                                                       \\\n        }\n#endif /* !WIN */\n\n#else /* no tools support */\n\n#if _WIN32||_WIN64\n    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)               \\\n        namespace profiling {                                               \\\n            inline void set_name( sync_object_type&, const wchar_t* ) {}    \\\n            inline void set_name( sync_object_type&, const char* ) {}       \\\n        }\n#else /* !WIN */\n    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)               \\\n        namespace profiling {                                               \\\n            inline void set_name( sync_object_type&, const char* ) {}       \\\n        }\n#endif /* !WIN */\n\n#endif /* no tools support */\n\n#include \"atomic.h\"\n// Need these to work regardless of tools support\nnamespace tbb {\n    namespace internal {\n\n        enum notify_type {prepare=0, cancel, acquired, releasing};\n\n        const uintptr_t NUM_NOTIFY_TYPES = 4; // set to # elements in enum above\n\n        void __TBB_EXPORTED_FUNC call_itt_notify_v5(int t, void *ptr);\n        void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3(void *dst, void *src);\n        void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3(const void *src);\n        void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );\n#if __TBB_ITT_STRUCTURE_API\n        enum itt_domain_enum { ITT_DOMAIN_FLOW=0 };\n\n        void __TBB_EXPORTED_FUNC itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, \n                                                         void *parent, unsigned long long parent_extra, string_index name_index ); \n        void __TBB_EXPORTED_FUNC itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, \n                                                          string_index key, const char *value ); \n        void __TBB_EXPORTED_FUNC itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, \n                                                      itt_relation relation, void *addr1, unsigned long long addr1_extra );\n        void __TBB_EXPORTED_FUNC itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, \n                                                    void *parent, unsigned long long parent_extra, string_index name_index );\n        void __TBB_EXPORTED_FUNC itt_task_end_v7( itt_domain_enum domain );\n#endif // __TBB_ITT_STRUCTURE_API\n\n        // two template arguments are to workaround /Wp64 warning with tbb::atomic specialized for unsigned type\n        template <typename T, typename U>\n        inline void itt_store_word_with_release(tbb::atomic<T>& dst, U src) {\n#if TBB_USE_THREADING_TOOLS\n            // This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized.\");\n            itt_store_pointer_with_release_v3(&dst, (void *)uintptr_t(src));\n#else\n            dst = src;\n#endif // TBB_USE_THREADING_TOOLS\n        }\n\n        template <typename T>\n        inline T itt_load_word_with_acquire(const tbb::atomic<T>& src) {\n#if TBB_USE_THREADING_TOOLS\n            // This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized.\");\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n            // Workaround for overzealous compiler warnings\n            #pragma warning (push)\n            #pragma warning (disable: 4311)\n#endif\n            T result = (T)itt_load_pointer_with_acquire_v3(&src);\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n            #pragma warning (pop)\n#endif\n            return result;\n#else\n            return src;\n#endif // TBB_USE_THREADING_TOOLS\n        }\n\n        template <typename T>\n        inline void itt_store_word_with_release(T& dst, T src) {\n#if TBB_USE_THREADING_TOOLS\n            // This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized.\");\n            itt_store_pointer_with_release_v3(&dst, (void *)src);\n#else\n            __TBB_store_with_release(dst, src); \n#endif // TBB_USE_THREADING_TOOLS\n        }\n\n        template <typename T>\n        inline T itt_load_word_with_acquire(const T& src) {\n#if TBB_USE_THREADING_TOOLS\n            // This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized\");\n            return (T)itt_load_pointer_with_acquire_v3(&src);\n#else\n            return __TBB_load_with_acquire(src);\n#endif // TBB_USE_THREADING_TOOLS\n        }\n\n        template <typename T>\n        inline void itt_hide_store_word(T& dst, T src) {\n#if TBB_USE_THREADING_TOOLS\n            //TODO: This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized\");\n            itt_store_pointer_with_release_v3(&dst, (void *)src);\n#else\n            dst = src;\n#endif\n        }\n\n        //TODO: rename to itt_hide_load_word_relaxed\n        template <typename T>\n        inline T itt_hide_load_word(const T& src) {\n#if TBB_USE_THREADING_TOOLS\n            //TODO: This assertion should be replaced with static_assert\n            __TBB_ASSERT(sizeof(T) == sizeof(void *), \"Type must be word-sized.\");\n            return (T)itt_load_pointer_v3(&src);\n#else\n            return src;\n#endif\n        }\n\n#if TBB_USE_THREADING_TOOLS\n        inline void call_itt_notify(notify_type t, void *ptr) {\n            call_itt_notify_v5((int)t, ptr);\n        }\n\n#else\n        inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {}\n\n#endif // TBB_USE_THREADING_TOOLS\n\n#if __TBB_ITT_STRUCTURE_API\n        inline void itt_make_task_group( itt_domain_enum domain, void *group, unsigned long long group_extra, \n                                         void *parent, unsigned long long parent_extra, string_index name_index ) {\n            itt_make_task_group_v7( domain, group, group_extra, parent, parent_extra, name_index ); \n        }\n\n        inline void itt_metadata_str_add( itt_domain_enum domain, void *addr, unsigned long long addr_extra, \n                                          string_index key, const char *value ) {\n            itt_metadata_str_add_v7( domain, addr, addr_extra, key, value ); \n        }\n\n        inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, \n                                      itt_relation relation, void *addr1, unsigned long long addr1_extra ) {\n            itt_relation_add_v7( domain, addr0, addr0_extra, relation, addr1, addr1_extra );\n        }\n\n        inline void itt_task_begin( itt_domain_enum domain, void *task, unsigned long long task_extra, \n                                                        void *parent, unsigned long long parent_extra, string_index name_index ) {\n            itt_task_begin_v7( domain, task, task_extra, parent, parent_extra, name_index );\n        }\n\n        inline void itt_task_end( itt_domain_enum domain ) {\n            itt_task_end_v7( domain );\n        }\n#endif // __TBB_ITT_STRUCTURE_API\n\n    } // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_profiling_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_resource.rc",
    "content": "// Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n//\n// This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n// you can redistribute it and/or modify it under the terms of the GNU General Public License\n// version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n// See  the GNU General Public License for more details.   You should have received a copy of\n// the  GNU General Public License along with Threading Building Blocks; if not, write to the\n// Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n//\n// As a special exception,  you may use this file  as part of a free software library without\n// restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n// functions from this file, or you compile this file and link it with other files to produce\n// an executable,  this file does not by itself cause the resulting executable to be covered\n// by the GNU General Public License. This exception does not however invalidate any other\n// reasons why the executable file might be covered by the GNU General Public License.\n\n// Microsoft Visual C++ generated resource script.\n//\n#ifdef APSTUDIO_INVOKED\n#ifndef APSTUDIO_READONLY_SYMBOLS\n#define _APS_NO_MFC                     1\n#define _APS_NEXT_RESOURCE_VALUE        102\n#define _APS_NEXT_COMMAND_VALUE         40001\n#define _APS_NEXT_CONTROL_VALUE         1001\n#define _APS_NEXT_SYMED_VALUE           101\n#endif\n#endif\n\n#define APSTUDIO_READONLY_SYMBOLS\n/////////////////////////////////////////////////////////////////////////////\n//\n// Generated from the TEXTINCLUDE 2 resource.\n//\n#include <winresrc.h>\n#define ENDL \"\\r\\n\"\n#include \"tbb_version.h\"\n\n/////////////////////////////////////////////////////////////////////////////\n#undef APSTUDIO_READONLY_SYMBOLS\n\n/////////////////////////////////////////////////////////////////////////////\n// Neutral resources\n\n//#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU)\n#ifdef _WIN32\nLANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL\n#pragma code_page(1252)\n#endif //_WIN32\n\n/////////////////////////////////////////////////////////////////////////////\n// manifest integration\n#ifdef TBB_MANIFEST\n#include \"winuser.h\"\n2 RT_MANIFEST tbbmanifest.exe.manifest\n#endif\n\n/////////////////////////////////////////////////////////////////////////////\n//\n// Version\n//\n\nVS_VERSION_INFO VERSIONINFO\n FILEVERSION TBB_VERNUMBERS\n PRODUCTVERSION TBB_VERNUMBERS\n FILEFLAGSMASK 0x17L\n#ifdef _DEBUG\n FILEFLAGS 0x1L\n#else\n FILEFLAGS 0x0L\n#endif\n FILEOS 0x40004L\n FILETYPE 0x2L\n FILESUBTYPE 0x0L\nBEGIN\n    BLOCK \"StringFileInfo\"\n    BEGIN\n        BLOCK \"000004b0\"\n        BEGIN\n            VALUE \"CompanyName\", \"Intel Corporation\\0\"\n            VALUE \"FileDescription\", \"Intel(R) Threading Building Blocks library\\0\"\n            VALUE \"FileVersion\", TBB_VERSION \"\\0\"\n            VALUE \"LegalCopyright\", \"Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\\0\"\n            VALUE \"LegalTrademarks\", \"\\0\"\n#ifndef TBB_USE_DEBUG\n            VALUE \"OriginalFilename\", \"tbb.dll\\0\"\n#else\n            VALUE \"OriginalFilename\", \"tbb_debug.dll\\0\"\n#endif\n            VALUE \"ProductName\", \"Intel(R) Threading Building Blocks for Windows\\0\"\n            VALUE \"ProductVersion\", TBB_VERSION \"\\0\"\n            VALUE \"PrivateBuild\", \"\\0\"\n            VALUE \"SpecialBuild\", \"\\0\"\n        END\n    END\n    BLOCK \"VarFileInfo\"\n    BEGIN\n        VALUE \"Translation\", 0x0, 1200\n    END\nEND\n\n//#endif    // Neutral resources\n/////////////////////////////////////////////////////////////////////////////\n\n\n#ifndef APSTUDIO_INVOKED\n/////////////////////////////////////////////////////////////////////////////\n//\n// Generated from the TEXTINCLUDE 3 resource.\n//\n\n\n/////////////////////////////////////////////////////////////////////////////\n#endif    // not APSTUDIO_INVOKED\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_statistics.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb_statistics.h\"\n\n#if __TBB_STATISTICS\n\n#include <climits>\n#include <cstdarg>\n#if __TBB_STATISTICS_STDOUT\n#include <cstdio>\n#endif\n\n#include \"tbb/spin_mutex.h\"\n\nnamespace tbb {\nnamespace internal {\n\n//! Human readable titles of statistics groups defined by statistics_groups enum.\n/** The order of this vector elements must correspond to the statistics_counters \n    structure layout. **/\nconst char* StatGroupTitles[] = { \n    \"task objects\", \"tasks executed\", \"stealing attempts\", \"task proxies\", \"arena\", \"market\", \"priority ops\", \"prio ops details\"\n};\n\n//! Human readable titles of statistics elements defined by statistics_counters struct.\n/** The order of this vector elements must correspond to the statistics_counters \n    structure layout (with NULLs interspersed to separate groups). **/\nconst char* StatFieldTitles[] = {\n    /*task objects*/        \"active\", \"freed\", \"big\", NULL,\n    /*tasks executed*/      \"total\", \"w/o spawn\", NULL,\n    /*stealing attempts*/   \"succeeded\", \"failed\", \"conflicts\", \"backoffs\", NULL,\n    /*task proxies*/        \"mailed\", \"revoked\", \"stolen\", \"bypassed\", \"ignored\", NULL,\n    /*arena*/               \"switches\", \"roundtrips\", \"avg.conc\", \"avg.allot\", NULL,\n    /*market*/              \"roundtrips\", NULL,\n    /*priority ops*/        \"ar.switch\", \"mkt.switch\", \"ar.reset\", \"ref.fixup\", \"avg.ar.pr\", \"avg.mkt.pr\", NULL,\n    /*prio ops details*/    \"winnows\", \"reloads\", \"orphaned\", \"winnowed\", \"reloaded\", NULL\n};\n\n//! Class for logging statistics\n/** There should be only one instance of this class. \n    Results are written to a file \"statistics.txt\" in tab-separated format. */\nclass statistics_logger {\npublic:\n    statistics_logger () {\n        __TBB_ASSERT( sg_end - 1 == 1 << (sizeof(StatGroupTitles)/sizeof(*StatGroupTitles) - 1), NULL );\n\n        my_file = fopen(\"statistics.txt\",\"w\");\n        if( !my_file )\n            perror(\"fopen(\\\"statistics.txt\\\"\\\")\");\n        // Initialize groups dump layout info\n        group_start_field[0] = 0;\n        for ( size_t i = 0, j = 0; i < NumGroups; ++i, ++j ) {\n            __TBB_ASSERT( StatFieldTitles[j], \"Empty group occurred\" );\n            while ( StatFieldTitles[j] )\n                ++j;\n            group_start_field[i + 1] = j - i; // -i accounts for preceding NULL separators\n        }\n        __TBB_ASSERT( group_start_field[NumGroups] == statistics_counters::size(),\n                      \"Wrong number of elements in StatFieldTitles\" );\n        dump( \"%-*s\", IDColumnWidth, \"\");\n        process_groups( &statistics_logger::print_group_title );\n        dump( \"%-*s\", IDColumnWidth, \"ID\");\n        process_groups( &statistics_logger::print_field_titles );\n    }\n\n    ~statistics_logger () { fclose(my_file); }\n\n    void record( const statistics_counters& c, size_t id ) {\n        spin_mutex::scoped_lock lock(my_mutex);\n        counters_to_dump = &c;\n#if __TBB_STATISTICS_TOTALS_ONLY\n        if ( id == arena_counters_total ) {\n            dump( \"%-*s\", IDColumnWidth, \"Tot\" );\n            process_groups( &statistics_logger::print_field_values );\n        }\n#else /* !__TBB_STATISTICS_TOTALS_ONLY */\n        const char* idString = NULL;\n        switch ( id ) {\n        case 0:\n            idString = \"M\"; break;\n        case workers_counters_total:\n            idString = \"Wtot\"; break;\n        case arena_counters_total:\n            idString = \"Tot\"; break;\n        default:\n            dump( \"W%-*u\", IDColumnWidth - 1, id );\n        }\n        if ( idString )\n            dump( \"%-*s\", IDColumnWidth, idString );\n        process_groups( &statistics_logger::print_field_values );\n#endif /* !__TBB_STATISTICS_TOTALS_ONLY */\n    }\nprivate:\n    static const size_t IDColumnWidth = 5;\n    static const size_t StatisticsColumnWidth = 10;\n    static const size_t NumGroups = sizeof(StatGroupTitles)/sizeof(char*);\n\n    //! File into which statistics are written.\n    FILE* my_file;\n    //! Mutex that serializes accesses to my_file\n    spin_mutex my_mutex;\n    //! Indices of the each group's first field in statistics_counters struct.\n    /** An extra element is used to track the total number of statistics fields. **/\n    size_t group_start_field[NumGroups + 1];\n    //! Currently processed set of counters.\n    const statistics_counters* counters_to_dump;\n\n    static const size_t NumFields = sizeof(StatFieldTitles)/sizeof(*StatFieldTitles) - NumGroups;\n    bool averages_fields[NumFields];\n\n    void dump ( char const* fmt, ... ) {\n        va_list args;\n        if ( my_file ) {\n            va_start( args, fmt );\n            vfprintf( my_file, fmt, args );\n            va_end( args );\n        }\n#if __TBB_STATISTICS_STDOUT\n        va_start( args, fmt );\n        vprintf( fmt, args );\n        va_end( args );\n#endif\n    }\n\n    void process_groups ( void (statistics_logger::*per_group_action)(size_t group_idx) ) {\n        for ( size_t i = 0, group_flag = 1; i < NumGroups; ++i, group_flag <<= 1 ) {\n            __TBB_ASSERT( group_flag < sg_end, \"StatGroupTitles contents is incompatible with statistics_groups definition\" );\n            if ( __TBB_ActiveStatisticsGroups & group_flag )\n                (this->*per_group_action)( i );\n        }\n        dump( \"\\n\" );\n    }\n\n    void print_group_title ( size_t group_idx ) {\n        dump( \"%-*s\", (group_start_field[group_idx + 1] - group_start_field[group_idx]) * (StatisticsColumnWidth + 1),\n                        StatGroupTitles[group_idx] );\n    }\n\n    void print_field_titles ( size_t group_idx ) {\n        // +group_idx accounts for preceding NULL separators\n        size_t i = group_start_field[group_idx] + group_idx;\n        while ( StatFieldTitles[i] ) {\n            averages_fields[i - group_idx] = strncmp(StatFieldTitles[i], \"avg.\", 4) == 0;\n            dump( \"%-*s \", StatisticsColumnWidth, StatFieldTitles[i++] );\n        }\n    }\n\n    void print_field_values ( size_t group_idx ) {\n        size_t begin = group_start_field[group_idx],\n               end = group_start_field[group_idx + 1];\n        for ( size_t i = begin; i < end; ++i ) {\n            if ( averages_fields[i] )\n                dump( \"%-*.2f \", StatisticsColumnWidth, (double)counters_to_dump->field(i)/counters_to_dump->tasks_executed );\n            else\n                dump( \"%-*ld \", StatisticsColumnWidth, counters_to_dump->field(i) );\n        }\n    }\n}; // class statistics_logger\n\nstatic statistics_logger the_statistics;\n\nvoid dump_statistics ( const statistics_counters& c, size_t id ) {\n    the_statistics.record(c, id);\n}\n\n} // namespace internal\n} // namespace tbb\n\n#endif /* __TBB_STATISTICS */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_statistics.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_tbb_statistics_H\n#define _TBB_tbb_statistics_H\n\n/**\n    This file defines parameters of the internal statistics collected by the TBB\n    library (currently by the task scheduler only).\n    \n    Statistics is accumulated separately in each thread and is dumped when \n    the scheduler instance associated with the given  thread is destroyed.\n    For apps with multiple master threads or with the same master repeatedly\n    initializing and then deinitializing task scheduler this results in TBB\n    workers statistics getting inseparably mixed.\n    \n    Therefore statistics is accumulated in arena slots, and should be dumped\n    when arena is destroyed. This separates statistics collected for each\n    scheduler activity region in each master thread.\n\n    With the current RML implementation (TBB 2.2, 3.0) to avoid complete loss of \n    statistics data during app shutdown (because of lazy workers deinitialization \n    logic) set __TBB_STATISTICS_EARLY_DUMP macro to write the statistics at the \n    moment a master thread deinitializes its scheduler. This may happen a little \n    earlier than the moment of arena destruction resulting in the following undesired\n    (though usually tolerable) effects:\n    - a few events related to unsuccessful stealing or thread pool activity may be lost,\n    - statistics may be substantially incomplete in case of FIFO tasks used in \n      the FAF mode.\n\n    Macro __TBB_STATISTICS_STDOUT and global variable __TBB_ActiveStatisticsGroups\n    defined below can be used to configure the statistics output.\n\n    To add new counter:\n    1) Insert it into the appropriate group range in statistics_counters;\n    2) Insert the corresponding field title into StatFieldTitles (preserving \n       relative order of the fields).\n\n    To add new counters group:\n    1) Insert new group bit flag into statistics_groups;\n    2) Insert the new group title into StatGroupTitles (preserving \n       relative order of the groups).\n    3) Add counter belonging to the new group as described above\n**/\n\n#include \"tbb/tbb_stddef.h\"\n\n#ifndef __TBB_STATISTICS\n#define __TBB_STATISTICS 0\n#endif /* __TBB_STATISTICS */\n\n#if __TBB_STATISTICS\n\n#include <string.h>  // for memset\n\n//! Dump counters into stdout as well.\n/** By default statistics counters are written to the file \"statistics.txt\" only. **/\n#define __TBB_STATISTICS_STDOUT 1\n\n//! Dump only totals for all threads in the given arena\n/** By default statistics counters for each arena slot are dumped separately, as\n    well as the subtotal for workers. **/\n#define __TBB_STATISTICS_TOTALS_ONLY 1\n\n//! Dump statistics for an arena when its master completes\n/** By default (when this macro is not set) the statistics is sent to output when\n    arena object is destroyed. But with the current lazy workers termination\n    logic default behavior may result in loosing all statistics output. **/\n#define __TBB_STATISTICS_EARLY_DUMP 1\n\n#define GATHER_STATISTIC(x) (x)\n\nnamespace tbb {\nnamespace internal {\n\n//! Groups of statistics counters.\n/** The order of enumerators must be the same as the order of the corresponding\n    field groups in the statistics_counters structure. **/\nenum statistics_groups {\n    sg_task_allocation = 0x01,\n    sg_task_execution = 0x02,\n    sg_stealing = 0x04,\n    sg_affinity = 0x08,\n    sg_arena = 0x10,\n    sg_market = 0x20,\n    sg_prio = 0x40,\n    sg_prio_ex = 0x80,\n    // List end marker. Insert new groups only before it.\n    sg_end\n};\n\n//! Groups of counters to output\nconst uintptr_t __TBB_ActiveStatisticsGroups = sg_task_execution | sg_stealing | sg_affinity | sg_arena | sg_market;\n\n//! A set of various statistics counters that are updated by the library on per thread basis.\n/** All the fields must be of the same type (statistics_counters::counter_type).\n    This is necessary to allow reinterpreting this structure as an array. **/\nstruct statistics_counters {\n    typedef long counter_type;\n\n    // Group: sg_task_allocation\n    // Counters in this group can have negative values as the tasks migrate across \n    // threads while the associated counters are updated in the current thread only\n    // to avoid data races\n    \n    //! Number of tasks allocated and not yet destroyed\n    counter_type active_tasks;\n    //! Number of task corpses stored for future reuse\n    counter_type free_list_length;\n    //! Number of big tasks allocated during the run\n    /** To find total number of tasks malloc'd, compute (big_tasks+my_small_task_count) */\n    counter_type big_tasks;\n    \n    // Group: sg_task_execution\n\n    //! Number of tasks executed\n    counter_type tasks_executed;\n    //! Number of elided spawns\n    counter_type spawns_bypassed;\n    \n    // Group: sg_stealing\n\n    //! Number of tasks successfully stolen\n    counter_type steals_committed;\n    //! Number of failed stealing attempts\n    counter_type steals_failed;\n    //! Number of failed attempts to lock victim's task pool\n    counter_type thieves_conflicts;\n    //! Number of times thief backed off because of the collision with the owner\n    counter_type thief_backoffs;\n\n    // Group: sg_affinity\n\n    //! Number of tasks received from mailbox\n    counter_type mails_received;\n    //! Number of affinitized tasks executed by the owner\n    /** Goes as \"revoked\" in statistics printout. **/\n    counter_type proxies_executed;\n    //! Number of affinitized tasks intercepted by thieves \n    counter_type proxies_stolen;\n    //! Number of proxy bypasses by thieves during stealing\n    counter_type proxies_bypassed;\n    //! Number of affinitized tasks executed by the owner via scheduler bypass mechanism\n    counter_type affinity_ignored;\n\n    // Group: sg_arena\n\n    //! Number of times the state of arena switched between \"full\" and \"empty\"\n    counter_type gate_switches;\n    //! Number of times workers left an arena and returned into the market\n    counter_type arena_roundtrips;\n    // !Average concurrency level of this arena\n    counter_type avg_arena_concurrency;\n    //! Average assigned priority\n    counter_type avg_assigned_workers;\n\n    // Group: sg_market\n\n    //! Number of times workers left the market and returned into RML\n    counter_type market_roundtrips;\n\n    // Group; sg_prio\n\n    //! Number of arena priority switches\n    counter_type arena_prio_switches;\n    //! Number of market priority switches\n    counter_type market_prio_switches;\n    //! Number of arena priority switches\n    counter_type arena_prio_resets;\n    //! Number of reference priority source fixups to avoid deadlock\n    counter_type prio_ref_fixups;\n    //! Average arena priority\n    counter_type avg_arena_prio;\n    //! Average market priority\n    counter_type avg_market_prio;\n\n    // Group; sg_prio_ex\n\n    //! Number of times local task pools were winnowed\n    counter_type prio_winnowings;\n    //! Number of times secondary task pools were searched for top priority tasks\n    counter_type prio_reloads;\n    //! Number of times secondary task pools were abandoned by quitting workers\n    counter_type prio_orphanings;\n    //! Number of tasks offloaded into secondary task pools\n    counter_type prio_tasks_offloaded;\n    //! Number of tasks reloaded from secondary task pools\n    counter_type prio_tasks_reloaded;\n\n    // Constructor and helpers\n\n    statistics_counters() { reset(); }\n\n    void reset () { memset( this, 0, sizeof(statistics_counters) ); }\n\n    counter_type& field ( size_t index ) { return reinterpret_cast<counter_type*>(this)[index]; }\n\n    const counter_type& field ( size_t index ) const { return reinterpret_cast<const counter_type*>(this)[index]; }\n\n    static size_t size () { return sizeof(statistics_counters) / sizeof(counter_type); }\n\n    const statistics_counters& operator += ( const statistics_counters& rhs ) {\n        for ( size_t i = 0; i < size(); ++i )\n            field(i) += rhs.field(i);\n        return *this;\n    }\n}; // statistics_counters\n\nstatic const size_t workers_counters_total = (size_t)-1;\nstatic const size_t arena_counters_total = (size_t)-2;\n\nvoid dump_statistics ( const statistics_counters& c, size_t id );\n\n} // namespace internal\n} // namespace tbb\n\n#else /* !__TBB_STATISTICS */\n\n#define GATHER_STATISTIC(x) ((void)0)\n\n#endif /* !__TBB_STATISTICS */\n\n#endif /* _TBB_tbb_statistics_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_stddef.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_stddef_H\n#define __TBB_tbb_stddef_H\n\n// Marketing-driven product version\n#define TBB_VERSION_MAJOR 4\n#define TBB_VERSION_MINOR 3\n\n// Engineering-focused interface version\n#define TBB_INTERFACE_VERSION 8000\n#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000\n\n// The oldest major interface version still supported\n// To be used in SONAME, manifests, etc.\n#define TBB_COMPATIBLE_INTERFACE_VERSION 2\n\n#define __TBB_STRING_AUX(x) #x\n#define __TBB_STRING(x) __TBB_STRING_AUX(x)\n\n// We do not need defines below for resource processing on windows\n#if !defined RC_INVOKED\n\n// Define groups for Doxygen documentation\n/**\n * @defgroup algorithms         Algorithms\n * @defgroup containers         Containers\n * @defgroup memory_allocation  Memory Allocation\n * @defgroup synchronization    Synchronization\n * @defgroup timing             Timing\n * @defgroup task_scheduling    Task Scheduling\n */\n\n// Simple text that is displayed on the main page of Doxygen documentation.\n/**\n * \\mainpage Main Page\n *\n * Click the tabs above for information about the\n * - <a href=\"./modules.html\">Modules</a> (groups of functionality) implemented by the library\n * - <a href=\"./annotated.html\">Classes</a> provided by the library\n * - <a href=\"./files.html\">Files</a> constituting the library.\n * .\n * Please note that significant part of TBB functionality is implemented in the form of\n * template functions, descriptions of which are not accessible on the <a href=\"./annotated.html\">Classes</a>\n * tab. Use <a href=\"./modules.html\">Modules</a> or <a href=\"./namespacemembers.html\">Namespace/Namespace Members</a>\n * tabs to find them.\n *\n * Additional pieces of information can be found here\n * - \\subpage concepts\n * .\n */\n\n/** \\page concepts TBB concepts\n\n    A concept is a set of requirements to a type, which are necessary and sufficient\n    for the type to model a particular behavior or a set of behaviors. Some concepts\n    are specific to a particular algorithm (e.g. algorithm body), while other ones\n    are common to several algorithms (e.g. range concept).\n\n    All TBB algorithms make use of different classes implementing various concepts.\n    Implementation classes are supplied by the user as type arguments of template\n    parameters and/or as objects passed as function call arguments. The library\n    provides predefined  implementations of some concepts (e.g. several kinds of\n    \\ref range_req \"ranges\"), while other ones must always be implemented by the user.\n\n    TBB defines a set of minimal requirements each concept must conform to. Here is\n    the list of different concepts hyperlinked to the corresponding requirements specifications:\n    - \\subpage range_req\n    - \\subpage parallel_do_body_req\n    - \\subpage parallel_for_body_req\n    - \\subpage parallel_reduce_body_req\n    - \\subpage parallel_scan_body_req\n    - \\subpage parallel_sort_iter_req\n**/\n\n// tbb_config.h should be included the first since it contains macro definitions used in other headers\n#include \"tbb_config.h\"\n\n#if _MSC_VER >=1400\n    #define __TBB_EXPORTED_FUNC   __cdecl\n    #define __TBB_EXPORTED_METHOD __thiscall\n#else\n    #define __TBB_EXPORTED_FUNC\n    #define __TBB_EXPORTED_METHOD\n#endif\n\n#if __INTEL_COMPILER || _MSC_VER\n#define __TBB_NOINLINE(decl) __declspec(noinline) decl\n#elif __GNUC__\n#define __TBB_NOINLINE(decl) decl __attribute__ ((noinline))\n#else\n#define __TBB_NOINLINE(decl) decl\n#endif\n\n#if __TBB_NOEXCEPT_PRESENT\n#define __TBB_NOEXCEPT(expression) noexcept(expression)\n#else\n#define __TBB_NOEXCEPT(expression)\n#endif\n\n#include <cstddef>      /* Need size_t and ptrdiff_t */\n\n#if _MSC_VER\n    #define __TBB_tbb_windef_H\n    #include \"internal/_tbb_windef.h\"\n    #undef __TBB_tbb_windef_H\n#endif\n#if !defined(_MSC_VER) || _MSC_VER>=1600\n    #include <stdint.h>\n#endif\n\n//! Type for an assertion handler\ntypedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment );\n\n#if TBB_USE_ASSERT\n\n     #define __TBB_ASSERT_NS(predicate,message,ns) ((predicate)?((void)0) : ns::assertion_failure(__FILE__,__LINE__,#predicate,message))\n    //! Assert that x is true.\n    /** If x is false, print assertion failure message.\n        If the comment argument is not NULL, it is printed as part of the failure message.\n        The comment argument has no other effect. */\n#if __TBBMALLOC_BUILD\nnamespace rml { namespace internal {\n    #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,rml::internal)\n#else\nnamespace tbb {\n    #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,tbb)\n#endif\n\n    #define __TBB_ASSERT_EX __TBB_ASSERT\n\n    //! Set assertion handler and return previous value of it.\n    assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler );\n\n    //! Process an assertion failure.\n    /** Normally called from __TBB_ASSERT macro.\n        If assertion handler is null, print message for assertion failure and abort.\n        Otherwise call the assertion handler. */\n    void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment );\n\n#if __TBBMALLOC_BUILD\n}}  // namespace rml::internal\n#else\n} // namespace tbb\n#endif\n#else /* !TBB_USE_ASSERT */\n\n    //! No-op version of __TBB_ASSERT.\n    #define __TBB_ASSERT(predicate,comment) ((void)0)\n    //! \"Extended\" version is useful to suppress warnings if a variable is only used with an assert\n    #define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate)))\n\n#endif /* !TBB_USE_ASSERT */\n\n//! The namespace tbb contains all components of the library.\nnamespace tbb {\n\n#if _MSC_VER && _MSC_VER<1600\n    namespace internal {\n        typedef __int8 int8_t;\n        typedef __int16 int16_t;\n        typedef __int32 int32_t;\n        typedef __int64 int64_t;\n        typedef unsigned __int8 uint8_t;\n        typedef unsigned __int16 uint16_t;\n        typedef unsigned __int32 uint32_t;\n        typedef unsigned __int64 uint64_t;\n    } // namespace internal\n#else /* Posix */\n    namespace internal {\n        using ::int8_t;\n        using ::int16_t;\n        using ::int32_t;\n        using ::int64_t;\n        using ::uint8_t;\n        using ::uint16_t;\n        using ::uint32_t;\n        using ::uint64_t;\n    } // namespace internal\n#endif /* Posix */\n\n    using std::size_t;\n    using std::ptrdiff_t;\n\n//! The function returns the interface version of the TBB shared library being used.\n/**\n * The version it returns is determined at runtime, not at compile/link time.\n * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time.\n */\nextern \"C\" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version();\n\n//! Dummy type that distinguishes splitting constructor from copy constructor.\n/**\n * See description of parallel_for and parallel_reduce for example usages.\n * @ingroup algorithms\n */\nclass split {\n};\n\n//! Type enables transmission of splitting proportion from partitioners to range objects\n/**\n * In order to make use of such facility Range objects must implement\n * splitting constructor with this type passed and initialize static\n * constant boolean field 'is_divisible_in_proportion' with the value\n * of 'true'\n */\nclass proportional_split {\npublic:\n    proportional_split(size_t _left = 1, size_t _right = 1) : my_left(_left), my_right(_right) { }\n    proportional_split(split) : my_left(1), my_right(1) { }\n\n    size_t left() const { return my_left; }\n    size_t right() const { return my_right; }\n\n    void set_proportion(size_t _left, size_t _right) {\n        my_left = _left;\n        my_right = _right;\n    }\n\n    // used when range does not support proportional split\n    operator split() const { return split(); }\nprivate:\n    size_t my_left, my_right;\n};\n\n/**\n * @cond INTERNAL\n * @brief Identifiers declared inside namespace internal should never be used directly by client code.\n */\nnamespace internal {\n\n//! Compile-time constant that is upper bound on cache line/sector size.\n/** It should be used only in situations where having a compile-time upper\n    bound is more useful than a run-time exact answer.\n    @ingroup memory_allocation */\nconst size_t NFS_MaxLineSize = 128;\n\n/** Label for data that may be accessed from different threads, and that may eventually become wrapped\n    in a formal atomic type.\n\n    Note that no problems have yet been observed relating to the definition currently being empty,\n    even if at least \"volatile\" would seem to be in order to avoid data sometimes temporarily hiding\n    in a register (although \"volatile\" as a \"poor man's atomic\" lacks several other features of a proper\n    atomic, some of which are now provided instead through specialized functions).\n\n    Note that usage is intentionally compatible with a definition as qualifier \"volatile\",\n    both as a way to have the compiler help enforce use of the label and to quickly rule out\n    one potential issue.\n\n    Note however that, with some architecture/compiler combinations, e.g. on IA-64 architecture, \"volatile\"\n    also has non-portable memory semantics that are needlessly expensive for \"relaxed\" operations.\n\n    Note that this must only be applied to data that will not change bit patterns when cast to/from\n    an integral type of the same length; tbb::atomic must be used instead for, e.g., floating-point types.\n\n    TODO: apply wherever relevant **/\n#define __TBB_atomic // intentionally empty, see above\n\ntemplate<class T, size_t S, size_t R>\nstruct padded_base : T {\n    char pad[S - R];\n};\ntemplate<class T, size_t S> struct padded_base<T, S, 0> : T {};\n\n//! Pads type T to fill out to a multiple of cache line size.\ntemplate<class T, size_t S = NFS_MaxLineSize>\nstruct padded : padded_base<T, S, sizeof(T) % S> {};\n\n//! Extended variant of the standard offsetof macro\n/** The standard offsetof macro is not sufficient for TBB as it can be used for\n    POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/\n#define __TBB_offsetof(class_name, member_name) \\\n    ((ptrdiff_t)&(reinterpret_cast<class_name*>(0x1000)->member_name) - 0x1000)\n\n//! Returns address of the object containing a member with the given name and address\n#define __TBB_get_object_ref(class_name, member_name, member_addr) \\\n    (*reinterpret_cast<class_name*>((char*)member_addr - __TBB_offsetof(class_name, member_name)))\n\n//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info\nvoid __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info );\n\n#if TBB_USE_EXCEPTIONS\n    #define __TBB_TRY try\n    #define __TBB_CATCH(e) catch(e)\n    #define __TBB_THROW(e) throw e\n    #define __TBB_RETHROW() throw\n#else /* !TBB_USE_EXCEPTIONS */\n    inline bool __TBB_false() { return false; }\n    #define __TBB_TRY\n    #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() )\n    #define __TBB_THROW(e) ((void)0)\n    #define __TBB_RETHROW() ((void)0)\n#endif /* !TBB_USE_EXCEPTIONS */\n\n//! Report a runtime warning.\nvoid __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... );\n\n#if TBB_USE_ASSERT\nstatic void* const poisoned_ptr = reinterpret_cast<void*>(-1);\n\n//! Set p to invalid pointer value.\n//  Also works for regular (non-__TBB_atomic) pointers.\ntemplate<typename T>\ninline void poison_pointer( T* __TBB_atomic & p ) { p = reinterpret_cast<T*>(poisoned_ptr); }\n\n/** Expected to be used in assertions only, thus no empty form is defined. **/\ntemplate<typename T>\ninline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned_ptr); }\n#else\ntemplate<typename T>\ninline void poison_pointer( T* __TBB_atomic & ) {/*do nothing*/}\n#endif /* !TBB_USE_ASSERT */\n\n//! Cast between unrelated pointer types.\n/** This method should be used sparingly as a last resort for dealing with\n    situations that inherently break strict ISO C++ aliasing rules. */\n// T is a pointer type because it will be explicitly provided by the programmer as a template argument;\n// U is a referent type to enable the compiler to check that \"ptr\" is a pointer, deducing U in the process.\ntemplate<typename T, typename U>\ninline T punned_cast( U* ptr ) {\n    uintptr_t x = reinterpret_cast<uintptr_t>(ptr);\n    return reinterpret_cast<T>(x);\n}\n\n//! Base class for types that should not be assigned.\nclass no_assign {\n    // Deny assignment\n    void operator=( const no_assign& );\npublic:\n#if __GNUC__\n    //! Explicitly define default construction, because otherwise gcc issues gratuitous warning.\n    no_assign() {}\n#endif /* __GNUC__ */\n};\n\n//! Base class for types that should not be copied or assigned.\nclass no_copy: no_assign {\n    //! Deny copy construction\n    no_copy( const no_copy& );\npublic:\n    //! Allow default construction\n    no_copy() {}\n};\n\n#if TBB_DEPRECATED_MUTEX_COPYING\nclass mutex_copy_deprecated_and_disabled {};\n#else\n// By default various implementations of mutexes are not copy constructible\n// and not copy assignable.\nclass mutex_copy_deprecated_and_disabled : no_copy {};\n#endif\n\n//! A function to check if passed in pointer is aligned on a specific border\ntemplate<typename T>\ninline bool is_aligned(T* pointer, uintptr_t alignment) {\n    return 0==((uintptr_t)pointer & (alignment-1));\n}\n\n//! A function to check if passed integer is a power of 2\ntemplate<typename integer_type>\ninline bool is_power_of_two(integer_type arg) {\n    return arg && (0 == (arg & (arg - 1)));\n}\n\n//! A function to compute arg modulo divisor where divisor is a power of 2.\ntemplate<typename argument_integer_type, typename divisor_integer_type>\ninline argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor) {\n    // Divisor is assumed to be a power of two (which is valid for current uses).\n    __TBB_ASSERT( is_power_of_two(divisor), \"Divisor should be a power of two\" );\n    return (arg & (divisor - 1));\n}\n\n\n//! A function to determine if \"arg is a multiplication of a number and a power of 2\".\n// i.e. for strictly positive i and j, with j a power of 2,\n// determines whether i==j<<k for some nonnegative k (so i==j yields true).\ntemplate<typename argument_integer_type, typename divisor_integer_type>\ninline bool is_power_of_two_factor(argument_integer_type arg, divisor_integer_type divisor) {\n    // Divisor is assumed to be a power of two (which is valid for current uses).\n    __TBB_ASSERT( is_power_of_two(divisor), \"Divisor should be a power of two\" );\n    return 0 == (arg & (arg - divisor));\n}\n\n//! Utility template function to prevent \"unused\" warnings by various compilers.\ntemplate<typename T>\nvoid suppress_unused_warning( const T& ) {}\n\n// Struct to be used as a version tag for inline functions.\n/** Version tag can be necessary to prevent loader on Linux from using the wrong\n    symbol in debug builds (when inline functions are compiled as out-of-line). **/\nstruct version_tag_v3 {};\n\ntypedef version_tag_v3 version_tag;\n\n} // internal\n} // tbb\n\n// Following is a set of classes and functions typically used in compile-time \"metaprogramming\".\n// TODO: move all that to a separate header\n\n#if __TBB_ALLOCATOR_TRAITS_PRESENT\n#include <memory> //for allocator_traits\n#endif\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT || _LIBCPP_VERSION\n#include <utility> // for std::move\n#endif\n\nnamespace tbb {\nnamespace internal {\n\n//! Class for determining type of std::allocator<T>::value_type.\ntemplate<typename T>\nstruct allocator_type {\n    typedef T value_type;\n};\n\n#if _MSC_VER\n//! Microsoft std::allocator has non-standard extension that strips const from a type.\ntemplate<typename T>\nstruct allocator_type<const T> {\n    typedef T value_type;\n};\n#endif\n\n// Ad-hoc implementation of true_type & false_type\n// Intended strictly for internal use! For public APIs (traits etc), use C++11 analogues.\ntemplate <bool v>\nstruct bool_constant {\n    static /*constexpr*/ const bool value = v;\n};\ntypedef bool_constant<true> true_type;\ntypedef bool_constant<false> false_type;\n\n#if __TBB_ALLOCATOR_TRAITS_PRESENT\nusing std::allocator_traits;\n#else\ntemplate<typename allocator>\nstruct allocator_traits{\n    typedef tbb::internal::false_type propagate_on_container_move_assignment;\n};\n#endif\n\n//! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size.\ntemplate <unsigned u, unsigned long long ull >\nstruct select_size_t_constant {\n    //Explicit cast is needed to avoid compiler warnings about possible truncation.\n    //The value of the right size,   which is selected by ?:, is anyway not truncated or promoted.\n    static const size_t value = (size_t)((sizeof(size_t)==sizeof(u)) ? u : ull);\n};\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\nusing std::move;\n#elif defined(_LIBCPP_NAMESPACE)\n// libc++ defines \"pre-C++11 move\" similarly to our; use it to avoid name conflicts in some cases.\nusing std::_LIBCPP_NAMESPACE::move;\n#else\ntemplate <typename T>\nT& move( T& x ) { return x; }\n#endif\n\ntemplate <bool condition>\nstruct STATIC_ASSERTION_FAILED;\n\ntemplate <>\nstruct STATIC_ASSERTION_FAILED<false> { enum {value=1};};\n\ntemplate<>\nstruct STATIC_ASSERTION_FAILED<true>; //intentionally left undefined to cause compile time error\n\n//! @endcond\n}} // namespace tbb::internal\n\n#if    __TBB_STATIC_ASSERT_PRESENT\n#define __TBB_STATIC_ASSERT(condition,msg) static_assert(condition,msg)\n#else\n//please note condition is intentionally inverted to get a bit more understandable error msg\n#define __TBB_STATIC_ASSERT_IMPL1(condition,msg,line)       \\\n    enum {static_assert_on_line_##line = tbb::internal::STATIC_ASSERTION_FAILED<!(condition)>::value}\n\n#define __TBB_STATIC_ASSERT_IMPL(condition,msg,line) __TBB_STATIC_ASSERT_IMPL1(condition,msg,line)\n//! Verify at compile time that passed in condition is hold\n#define __TBB_STATIC_ASSERT(condition,msg) __TBB_STATIC_ASSERT_IMPL(condition,msg,__LINE__)\n#endif\n\n#endif /* RC_INVOKED */\n#endif /* __TBB_tbb_stddef_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_thread.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#if _WIN32||_WIN64\n#include <process.h>        // _beginthreadex()\n#endif\n#include <errno.h>\n#include \"tbb_misc.h\"       // handle_win_error(), ThreadStackSize\n#include \"tbb/tbb_stddef.h\"\n#include \"tbb/tbb_thread.h\"\n#include \"tbb/tbb_allocator.h\"\n#include \"governor.h\"       // default_num_threads()\n#if __TBB_WIN8UI_SUPPORT\n#include <thread>\n#endif\n\nnamespace tbb {\nnamespace internal {\n\n//! Allocate a closure\nvoid* allocate_closure_v3( size_t size )\n{\n    return allocate_via_handler_v3( size );\n}\n\n//! Free a closure allocated by allocate_closure_v3\nvoid free_closure_v3( void *ptr )\n{\n    deallocate_via_handler_v3( ptr );\n}\n\nvoid tbb_thread_v3::join()\n{\n    if (!joinable())\n        handle_perror( EINVAL, \"tbb_thread::join\" ); // Invalid argument\n    if (this_tbb_thread::get_id() == get_id())\n        handle_perror( EDEADLK, \"tbb_thread::join\" ); // Resource deadlock avoided\n#if _WIN32||_WIN64\n#if __TBB_WIN8UI_SUPPORT\n    std::thread* thread_tmp=(std::thread*)my_thread_id;\n    thread_tmp->join();\n    delete thread_tmp;\n#else // __TBB_WIN8UI_SUPPORT\n    DWORD status = WaitForSingleObjectEx( my_handle, INFINITE, FALSE );\n    if ( status == WAIT_FAILED )\n        handle_win_error( GetLastError() );\n    BOOL close_stat = CloseHandle( my_handle );\n    if ( close_stat == 0 )\n        handle_win_error( GetLastError() );\n    my_thread_id = 0;\n#endif // __TBB_WIN8UI_SUPPORT\n#else\n    int status = pthread_join( my_handle, NULL );\n    if( status )\n        handle_perror( status, \"pthread_join\" );\n#endif // _WIN32||_WIN64\n    my_handle = 0;\n}\n\nvoid tbb_thread_v3::detach() {\n    if (!joinable())\n        handle_perror( EINVAL, \"tbb_thread::detach\" ); // Invalid argument\n#if _WIN32||_WIN64\n    BOOL status = CloseHandle( my_handle );\n    if ( status == 0 )\n      handle_win_error( GetLastError() );\n    my_thread_id = 0;\n#else\n    int status = pthread_detach( my_handle );\n    if( status )\n        handle_perror( status, \"pthread_detach\" );\n#endif // _WIN32||_WIN64\n    my_handle = 0;\n}\n\nvoid tbb_thread_v3::internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine),\n                                    void* closure ) {\n#if _WIN32||_WIN64\n#if __TBB_WIN8UI_SUPPORT\n    std::thread* thread_tmp=new std::thread(start_routine, closure);\n    my_handle  = thread_tmp->native_handle();\n//  TODO: to find out the way to find thread_id without GetThreadId and other\n//  desktop functions.\n//  Now tbb_thread does have its own thread_id that stores std::thread object\n    my_thread_id = (size_t)thread_tmp;\n#else\n    unsigned thread_id;\n    // The return type of _beginthreadex is \"uintptr_t\" on new MS compilers,\n    // and 'unsigned long' on old MS compilers.  uintptr_t works for both.\n    uintptr_t status = _beginthreadex( NULL, ThreadStackSize, start_routine,\n                                     closure, 0, &thread_id );\n    if( status==0 )\n        handle_perror(errno,\"__beginthreadex\");\n    else {\n        my_handle = (HANDLE)status;\n        my_thread_id = thread_id;\n    }\n#endif\n#else\n    pthread_t thread_handle;\n    int status;\n    pthread_attr_t stack_size;\n    status = pthread_attr_init( &stack_size );\n    if( status )\n        handle_perror( status, \"pthread_attr_init\" );\n    status = pthread_attr_setstacksize( &stack_size, ThreadStackSize );\n    if( status )\n        handle_perror( status, \"pthread_attr_setstacksize\" );\n\n    status = pthread_create( &thread_handle, &stack_size, start_routine, closure );\n    if( status )\n        handle_perror( status, \"pthread_create\" );\n    status = pthread_attr_destroy( &stack_size );\n    if( status )\n        handle_perror( status, \"pthread_attr_destroy\" );\n\n    my_handle = thread_handle;\n#endif // _WIN32||_WIN64\n}\n\nunsigned tbb_thread_v3::hardware_concurrency() __TBB_NOEXCEPT(true) {\n    return governor::default_num_threads();\n}\n\ntbb_thread_v3::id thread_get_id_v3() {\n#if _WIN32||_WIN64\n    return tbb_thread_v3::id( GetCurrentThreadId() );\n#else\n    return tbb_thread_v3::id( pthread_self() );\n#endif // _WIN32||_WIN64\n}\n    \nvoid move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 )\n{\n    if (t1.joinable())\n        t1.detach();\n    t1.my_handle = t2.my_handle;\n    t2.my_handle = 0;\n#if _WIN32||_WIN64\n    t1.my_thread_id = t2.my_thread_id;\n    t2.my_thread_id = 0;\n#endif // _WIN32||_WIN64\n}\n\nvoid thread_yield_v3()\n{\n    __TBB_Yield();\n}\n\nvoid thread_sleep_v3(const tick_count::interval_t &i)\n{\n#if _WIN32||_WIN64\n     tick_count t0 = tick_count::now();\n     tick_count t1 = t0;\n     for(;;) {\n         double remainder = (i-(t1-t0)).seconds()*1e3;  // milliseconds remaining to sleep\n         if( remainder<=0 ) break;\n         DWORD t = remainder>=INFINITE ? INFINITE-1 : DWORD(remainder);\n#if !__TBB_WIN8UI_SUPPORT\n         Sleep( t );\n#else\n         std::chrono::milliseconds sleep_time( t );\n         std::this_thread::sleep_for( sleep_time );\n#endif\n         t1 = tick_count::now();\n    }\n#else\n    struct timespec req;\n    double sec = i.seconds();\n\n    req.tv_sec = static_cast<long>(sec);\n    req.tv_nsec = static_cast<long>( (sec - req.tv_sec)*1e9 );\n    nanosleep(&req, NULL);\n#endif // _WIN32||_WIN64\n}\n\n} // internal\n} // tbb\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_thread.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tbb_thread_H\n#define __TBB_tbb_thread_H\n\n#include \"tbb_stddef.h\"\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI\n#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* )\n#if __TBB_WIN8UI_SUPPORT\ntypedef size_t thread_id_type;\n#else  // __TBB_WIN8UI_SUPPORT\ntypedef DWORD thread_id_type;\n#endif // __TBB_WIN8UI_SUPPORT\n#else\n#define __TBB_NATIVE_THREAD_ROUTINE void*\n#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* )\n#include <pthread.h>\n#endif // _WIN32||_WIN64\n\n#include \"tick_count.h\"\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    // Suppress \"C++ exception handler used, but unwind semantics are not enabled\" warning in STL headers\n    #pragma warning (push)\n    #pragma warning (disable: 4530)\n#endif\n\n#include <iosfwd>\n\n#if !TBB_USE_EXCEPTIONS && _MSC_VER\n    #pragma warning (pop)\n#endif\n\nnamespace tbb {\n\nnamespace internal {\n    class tbb_thread_v3;\n}\n\ninline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true);\n\nnamespace internal {\n\n    //! Allocate a closure\n    void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size );\n    //! Free a closure allocated by allocate_closure_v3\n    void __TBB_EXPORTED_FUNC free_closure_v3( void* );\n   \n    struct thread_closure_base {\n        void* operator new( size_t size ) {return allocate_closure_v3(size);}\n        void operator delete( void* ptr ) {free_closure_v3(ptr);}\n    };\n\n    template<class F> struct thread_closure_0: thread_closure_base {\n        F function;\n\n        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {\n            thread_closure_0 *self = static_cast<thread_closure_0*>(c);\n            self->function();\n            delete self;\n            return 0;\n        }\n        thread_closure_0( const F& f ) : function(f) {}\n    };\n    //! Structure used to pass user function with 1 argument to thread.  \n    template<class F, class X> struct thread_closure_1: thread_closure_base {\n        F function;\n        X arg1;\n        //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll\n        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {\n            thread_closure_1 *self = static_cast<thread_closure_1*>(c);\n            self->function(self->arg1);\n            delete self;\n            return 0;\n        }\n        thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {}\n    };\n    template<class F, class X, class Y> struct thread_closure_2: thread_closure_base {\n        F function;\n        X arg1;\n        Y arg2;\n        //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll\n        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {\n            thread_closure_2 *self = static_cast<thread_closure_2*>(c);\n            self->function(self->arg1, self->arg2);\n            delete self;\n            return 0;\n        }\n        thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {}\n    };\n\n    //! Versioned thread class.\n    class tbb_thread_v3 {\n#if __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN\n        // Workaround for a compiler bug: declaring the copy constructor as public\n        // enables use of the moving constructor.\n        // The definition is not provided in order to prohibit copying.\n    public:\n#endif\n        tbb_thread_v3(const tbb_thread_v3&); // = delete;   // Deny access\n    public:\n#if _WIN32||_WIN64\n        typedef HANDLE native_handle_type; \n#else\n        typedef pthread_t native_handle_type; \n#endif // _WIN32||_WIN64\n\n        class id;\n        //! Constructs a thread object that does not represent a thread of execution. \n        tbb_thread_v3() __TBB_NOEXCEPT(true) : my_handle(0)\n#if _WIN32||_WIN64\n            , my_thread_id(0)\n#endif // _WIN32||_WIN64\n        {}\n        \n        //! Constructs an object and executes f() in a new thread\n        template <class F> explicit tbb_thread_v3(F f) {\n            typedef internal::thread_closure_0<F> closure_type;\n            internal_start(closure_type::start_routine, new closure_type(f));\n        }\n        //! Constructs an object and executes f(x) in a new thread\n        template <class F, class X> tbb_thread_v3(F f, X x) {\n            typedef internal::thread_closure_1<F,X> closure_type;\n            internal_start(closure_type::start_routine, new closure_type(f,x));\n        }\n        //! Constructs an object and executes f(x,y) in a new thread\n        template <class F, class X, class Y> tbb_thread_v3(F f, X x, Y y) {\n            typedef internal::thread_closure_2<F,X,Y> closure_type;\n            internal_start(closure_type::start_routine, new closure_type(f,x,y));\n        }\n\n#if __TBB_CPP11_RVALUE_REF_PRESENT\n        tbb_thread_v3(tbb_thread_v3&& x) __TBB_NOEXCEPT(true)\n            : my_handle(x.my_handle)\n#if _WIN32||_WIN64\n            , my_thread_id(x.my_thread_id)\n#endif\n        {\n            x.internal_wipe();\n        }\n        tbb_thread_v3& operator=(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) {\n            internal_move(x);\n            return *this;\n        }\n    private:\n        tbb_thread_v3& operator=(const tbb_thread_v3& x); // = delete;\n    public:\n#else  // __TBB_CPP11_RVALUE_REF_PRESENT\n        tbb_thread_v3& operator=(tbb_thread_v3& x) {\n            internal_move(x);\n            return *this;\n        }\n#endif // __TBB_CPP11_RVALUE_REF_PRESENT\n\n        void swap( tbb_thread_v3& t ) __TBB_NOEXCEPT(true) {tbb::swap( *this, t );}\n        bool joinable() const __TBB_NOEXCEPT(true) {return my_handle!=0; }\n        //! The completion of the thread represented by *this happens before join() returns.\n        void __TBB_EXPORTED_METHOD join();\n        //! When detach() returns, *this no longer represents the possibly continuing thread of execution.\n        void __TBB_EXPORTED_METHOD detach();\n        ~tbb_thread_v3() {if( joinable() ) detach();}\n        inline id get_id() const __TBB_NOEXCEPT(true);\n        native_handle_type native_handle() { return my_handle; }\n    \n        //! The number of hardware thread contexts.\n        /** Before TBB 3.0 U4 this methods returned the number of logical CPU in\n            the system. Currently on Windows, Linux and FreeBSD it returns the\n            number of logical CPUs available to the current process in accordance\n            with its affinity mask.\n            \n            NOTE: The return value of this method never changes after its first\n            invocation. This means that changes in the process affinity mask that\n            took place after this method was first invoked will not affect the\n            number of worker threads in the TBB worker threads pool. **/\n        static unsigned __TBB_EXPORTED_FUNC hardware_concurrency() __TBB_NOEXCEPT(true);\n    private:\n        native_handle_type my_handle; \n#if _WIN32||_WIN64\n        thread_id_type my_thread_id;\n#endif // _WIN32||_WIN64\n\n        void internal_wipe() __TBB_NOEXCEPT(true) {\n            my_handle = 0;\n#if _WIN32||_WIN64\n            my_thread_id = 0;\n#endif\n        }\n        void internal_move(tbb_thread_v3& x) __TBB_NOEXCEPT(true) {\n            if (joinable()) detach();\n            my_handle = x.my_handle;\n#if _WIN32||_WIN64\n            my_thread_id = x.my_thread_id;\n#endif // _WIN32||_WIN64\n            x.internal_wipe();\n        }\n\n        /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */\n        void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), \n                             void* closure );\n        friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 );\n        friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true);\n    };\n        \n    class tbb_thread_v3::id { \n#if _WIN32||_WIN64\n        thread_id_type my_id;\n        id( thread_id_type id_ ) : my_id(id_) {}\n#else\n        pthread_t my_id;\n        id( pthread_t id_ ) : my_id(id_) {}\n#endif // _WIN32||_WIN64\n        friend class tbb_thread_v3;\n    public:\n        id() __TBB_NOEXCEPT(true) : my_id(0) {}\n\n        friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true);\n        \n        template<class charT, class traits>\n        friend std::basic_ostream<charT, traits>&\n        operator<< (std::basic_ostream<charT, traits> &out, \n                    tbb_thread_v3::id id)\n        {\n            out << id.my_id;\n            return out;\n        }\n        friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();\n    }; // tbb_thread_v3::id\n\n    tbb_thread_v3::id tbb_thread_v3::get_id() const __TBB_NOEXCEPT(true) {\n#if _WIN32||_WIN64\n        return id(my_thread_id);\n#else\n        return id(my_handle);\n#endif // _WIN32||_WIN64\n    }\n    void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 );\n    tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();\n    void __TBB_EXPORTED_FUNC thread_yield_v3();\n    void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i);\n\n    inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id == y.my_id;\n    }\n    inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id != y.my_id;\n    }\n    inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id < y.my_id;\n    }\n    inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id <= y.my_id;\n    }\n    inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id > y.my_id;\n    }\n    inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true)\n    {\n        return x.my_id >= y.my_id;\n    }\n\n} // namespace internal;\n\n//! Users reference thread class by name tbb_thread\ntypedef internal::tbb_thread_v3 tbb_thread;\n\nusing internal::operator==;\nusing internal::operator!=;\nusing internal::operator<;\nusing internal::operator>;\nusing internal::operator<=;\nusing internal::operator>=;\n\ninline void move( tbb_thread& t1, tbb_thread& t2 ) {\n    internal::move_v3(t1, t2);\n}\n\ninline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 )  __TBB_NOEXCEPT(true) {\n    tbb::tbb_thread::native_handle_type h = t1.my_handle;\n    t1.my_handle = t2.my_handle;\n    t2.my_handle = h;\n#if _WIN32||_WIN64\n    thread_id_type i = t1.my_thread_id;\n    t1.my_thread_id  = t2.my_thread_id;\n    t2.my_thread_id  = i;\n#endif /* _WIN32||_WIN64 */\n}\n\nnamespace this_tbb_thread {\n    inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); }\n    //! Offers the operating system the opportunity to schedule another thread.\n    inline void yield() { internal::thread_yield_v3(); }\n    //! The current thread blocks at least until the time specified.\n    inline void sleep(const tick_count::interval_t &i) { \n        internal::thread_sleep_v3(i);  \n    }\n}  // namespace this_tbb_thread\n\n} // namespace tbb\n\n#endif /* __TBB_tbb_thread_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbb_version.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n// Please define version number in the file:\n#include \"tbb/tbb_stddef.h\"\n\n// And don't touch anything below\n#ifndef ENDL\n#define ENDL \"\\n\"\n#endif\n#include \"version_string.ver\"\n\n#ifndef __TBB_VERSION_STRINGS\n#pragma message(\"Warning: version_string.ver isn't generated properly by version_info.sh script!\")\n// here is an example of macros value:\n#define __TBB_VERSION_STRINGS \\\n\"TBB: BUILD_HOST\\tUnknown\\n\" \\\n\"TBB: BUILD_ARCH\\tUnknown\\n\" \\\n\"TBB: BUILD_OS\\t\\tUnknown\\n\" \\\n\"TBB: BUILD_CL\\t\\tUnknown\\n\" \\\n\"TBB: BUILD_COMPILER\\tUnknown\\n\" \\\n\"TBB: BUILD_COMMAND\\tUnknown\\n\"\n#endif\n#ifndef __TBB_DATETIME\n#ifdef RC_INVOKED\n#define __TBB_DATETIME \"Unknown\"\n#else\n#define __TBB_DATETIME __DATE__ __TIME__\n#endif\n#endif\n\n#define __TBB_VERSION_NUMBER(N) #N \": VERSION\\t\\t\" __TBB_STRING(TBB_VERSION_MAJOR.TBB_VERSION_MINOR) ENDL\n#define __TBB_INTERFACE_VERSION_NUMBER(N) #N \": INTERFACE VERSION\\t\" __TBB_STRING(TBB_INTERFACE_VERSION) ENDL\n\n#define __TBB_VERSION_DATETIME(N) #N \": BUILD_DATE\\t\\t\" __TBB_DATETIME ENDL\n#ifndef TBB_USE_DEBUG\n    #define __TBB_VERSION_USE_DEBUG(N) #N \": TBB_USE_DEBUG\\tundefined\" ENDL\n#elif TBB_USE_DEBUG==0\n    #define __TBB_VERSION_USE_DEBUG(N) #N \": TBB_USE_DEBUG\\t0\" ENDL\n#elif TBB_USE_DEBUG==1\n    #define __TBB_VERSION_USE_DEBUG(N) #N \": TBB_USE_DEBUG\\t1\" ENDL\n#elif TBB_USE_DEBUG==2\n    #define __TBB_VERSION_USE_DEBUG(N) #N \": TBB_USE_DEBUG\\t2\" ENDL\n#else\n    #error Unexpected value for TBB_USE_DEBUG\n#endif\n\n/* Make __TBB_VERSION_USE_ASSERT and __TBB_VERSION_DO_NOTIFY empty for rc\n * because rc from VS2005 crashed with fatal error RC10056 for too complex\n * macros (for example, when __TBB_CPF_BUILD is enabled).\n * All information is available in BUILD_COMMAND anyway.\n */\n\n#ifdef RC_INVOKED\n    #define __TBB_VERSION_USE_ASSERT(N)\n#else // RC_INVOKED\n#ifndef TBB_USE_ASSERT\n    #define __TBB_VERSION_USE_ASSERT(N) #N \": TBB_USE_ASSERT\\tundefined\" ENDL\n#elif TBB_USE_ASSERT==0\n    #define __TBB_VERSION_USE_ASSERT(N) #N \": TBB_USE_ASSERT\\t0\" ENDL\n#elif TBB_USE_ASSERT==1\n    #define __TBB_VERSION_USE_ASSERT(N) #N \": TBB_USE_ASSERT\\t1\" ENDL\n#elif TBB_USE_ASSERT==2\n    #define __TBB_VERSION_USE_ASSERT(N) #N \": TBB_USE_ASSERT\\t2\" ENDL\n#else\n    #error Unexpected value for TBB_USE_ASSERT\n#endif\n#endif // RC_INVOKED\n\n#ifndef __TBB_CPF_BUILD\n    #define __TBB_VERSION_TBB_PREVIEW_BINARY(N)\n#else\n    #define __TBB_VERSION_TBB_PREVIEW_BINARY(N) #N \": TBB_PREVIEW_BINARY\\t1\" ENDL\n#endif\n\n#ifdef RC_INVOKED\n    #define __TBB_VERSION_DO_NOTIFY(N)\n#else\n#ifndef DO_ITT_NOTIFY\n    #define __TBB_VERSION_DO_NOTIFY(N) #N \": DO_ITT_NOTIFY\\tundefined\" ENDL\n#elif DO_ITT_NOTIFY==1\n    #define __TBB_VERSION_DO_NOTIFY(N) #N \": DO_ITT_NOTIFY\\t1\" ENDL\n#elif DO_ITT_NOTIFY==0\n    #define __TBB_VERSION_DO_NOTIFY(N)\n#else\n    #error Unexpected value for DO_ITT_NOTIFY\n#endif\n#endif // RC_INVOKED\n\n#define TBB_VERSION_STRINGS_P(N) __TBB_VERSION_NUMBER(N) __TBB_INTERFACE_VERSION_NUMBER(N) __TBB_VERSION_DATETIME(N) __TBB_VERSION_STRINGS(N) __TBB_VERSION_USE_DEBUG(N) __TBB_VERSION_USE_ASSERT(N) __TBB_VERSION_TBB_PREVIEW_BINARY(N) __TBB_VERSION_DO_NOTIFY(N)\n\n#define TBB_VERSION_STRINGS TBB_VERSION_STRINGS_P(TBB)\n#define TBBMALLOC_VERSION_STRINGS TBB_VERSION_STRINGS_P(TBBmalloc)\n\n// numbers\n#ifndef __TBB_VERSION_YMD\n#define __TBB_VERSION_YMD 0, 0\n#endif\n\n#define TBB_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD\n\n#define TBB_VERSION __TBB_STRING(TBB_VERNUMBERS)\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tbbmalloc_proxy.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n/*\nReplacing the standard memory allocation routines in Microsoft* C/C++ RTL \n(malloc/free, global new/delete, etc.) with the TBB memory allocator. \n\nInclude the following header to a source of any binary which is loaded during \napplication startup\n\n#include \"tbb/tbbmalloc_proxy.h\"\n\nor add following parameters to the linker options for the binary which is \nloaded during application startup. It can be either exe-file or dll.\n\nFor win32\ntbbmalloc_proxy.lib /INCLUDE:\"___TBB_malloc_proxy\"\nwin64\ntbbmalloc_proxy.lib /INCLUDE:\"__TBB_malloc_proxy\"\n*/\n\n#ifndef __TBB_tbbmalloc_proxy_H\n#define __TBB_tbbmalloc_proxy_H\n\n#if _MSC_VER\n\n#ifdef _DEBUG\n    #pragma comment(lib, \"tbbmalloc_proxy_debug.lib\")\n#else\n    #pragma comment(lib, \"tbbmalloc_proxy.lib\")\n#endif\n\n#if defined(_WIN64)\n    #pragma comment(linker, \"/include:__TBB_malloc_proxy\")\n#else\n    #pragma comment(linker, \"/include:___TBB_malloc_proxy\")\n#endif\n\n#else\n/* Primarily to support MinGW */\n\nextern \"C\" void __TBB_malloc_proxy();\nstruct __TBB_malloc_proxy_caller {\n    __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); }\n} volatile __TBB_malloc_proxy_helper_object;\n\n#endif // _MSC_VER\n\n#endif //__TBB_tbbmalloc_proxy_H\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tick_count.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef __TBB_tick_count_H\n#define __TBB_tick_count_H\n\n#include \"tbb_stddef.h\"\n\n#if _WIN32||_WIN64\n#include \"machine/windows_api.h\"\n#elif __linux__\n#include <ctime>\n#else /* generic Unix */\n#include <sys/time.h>\n#endif /* (choice of OS) */\n\nnamespace tbb {\n\n//! Absolute timestamp\n/** @ingroup timing */\nclass tick_count {\npublic:\n    //! Relative time interval.\n    class interval_t {\n        long long value;\n        explicit interval_t( long long value_ ) : value(value_) {}\n    public:\n        //! Construct a time interval representing zero time duration\n        interval_t() : value(0) {};\n\n        //! Construct a time interval representing sec seconds time  duration\n        explicit interval_t( double sec );\n\n        //! Return the length of a time interval in seconds\n        double seconds() const;\n\n        friend class tbb::tick_count;\n\n        //! Extract the intervals from the tick_counts and subtract them.\n        friend interval_t operator-( const tick_count& t1, const tick_count& t0 );\n\n        //! Add two intervals.\n        friend interval_t operator+( const interval_t& i, const interval_t& j ) {\n            return interval_t(i.value+j.value);\n        }\n\n        //! Subtract two intervals.\n        friend interval_t operator-( const interval_t& i, const interval_t& j ) {\n            return interval_t(i.value-j.value);\n        }\n\n        //! Accumulation operator\n        interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;}\n\n        //! Subtraction operator\n        interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;}\n    private:\n        static long long ticks_per_second(){\n#if _WIN32||_WIN64\n            LARGE_INTEGER qpfreq;\n            int rval = QueryPerformanceFrequency(&qpfreq);\n            __TBB_ASSERT_EX(rval, \"QueryPerformanceFrequency returned zero\");\n            return static_cast<long long>(qpfreq.QuadPart);\n#elif __linux__\n            return static_cast<long long>(1E9);\n#else /* generic Unix */\n            return static_cast<long long>(1E6);\n#endif /* (choice of OS) */\n        }\n    };\n    \n    //! Construct an absolute timestamp initialized to zero.\n    tick_count() : my_count(0) {};\n\n    //! Return current time.\n    static tick_count now();\n    \n    //! Subtract two timestamps to get the time interval between\n    friend interval_t operator-( const tick_count& t1, const tick_count& t0 );\n\n    //! Return the resolution of the clock in seconds per tick.\n    static double resolution() { return 1.0 / interval_t::ticks_per_second(); }\n\nprivate:\n    long long my_count;\n};\n\ninline tick_count tick_count::now() {\n    tick_count result;\n#if _WIN32||_WIN64\n    LARGE_INTEGER qpcnt;\n    int rval = QueryPerformanceCounter(&qpcnt);\n    __TBB_ASSERT_EX(rval, \"QueryPerformanceCounter failed\");\n    result.my_count = qpcnt.QuadPart;\n#elif __linux__\n    struct timespec ts;\n    int status = clock_gettime( CLOCK_REALTIME, &ts );\n    __TBB_ASSERT_EX( status==0, \"CLOCK_REALTIME not supported\" );\n    result.my_count = static_cast<long long>(1000000000UL)*static_cast<long long>(ts.tv_sec) + static_cast<long long>(ts.tv_nsec);\n#else /* generic Unix */\n    struct timeval tv;\n    int status = gettimeofday(&tv, NULL);\n    __TBB_ASSERT_EX( status==0, \"gettimeofday failed\" );\n    result.my_count = static_cast<long long>(1000000)*static_cast<long long>(tv.tv_sec) + static_cast<long long>(tv.tv_usec);\n#endif /*(choice of OS) */\n    return result;\n}\n\ninline tick_count::interval_t::interval_t( double sec ) {\n    value = static_cast<long long>(sec*interval_t::ticks_per_second());\n}\n\ninline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) {\n    return tick_count::interval_t( t1.my_count-t0.my_count );\n}\n\ninline double tick_count::interval_t::seconds() const {\n    return value*tick_count::resolution();\n}\n\n} // namespace tbb\n\n#endif /* __TBB_tick_count_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tls.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _TBB_tls_H\n#define _TBB_tls_H\n\n#if USE_PTHREAD\n#include <pthread.h>\n#else /* assume USE_WINTHREAD */\n#include \"tbb/machine/windows_api.h\"\n#endif\n\nnamespace tbb {\n\nnamespace internal {\n\ntypedef void (*tls_dtor_t)(void*);\n\n//! Basic cross-platform wrapper class for TLS operations.\ntemplate <typename T>\nclass basic_tls {\n#if USE_PTHREAD\n    typedef pthread_key_t tls_key_t;\npublic:\n    int  create( tls_dtor_t dtor = NULL ) {\n        return pthread_key_create(&my_key, dtor);\n    }\n    int  destroy()      { return pthread_key_delete(my_key); }\n    void set( T value ) { pthread_setspecific(my_key, (void*)value); }\n    T    get()          { return (T)pthread_getspecific(my_key); }\n#else /* USE_WINTHREAD */\n    typedef DWORD tls_key_t;\npublic:\n#if !__TBB_WIN8UI_SUPPORT\n    int create() {\n        tls_key_t tmp = TlsAlloc();\n        if( tmp==TLS_OUT_OF_INDEXES )\n            return TLS_OUT_OF_INDEXES;\n        my_key = tmp;\n        return 0;\n    }\n    int  destroy()      { TlsFree(my_key); my_key=0; return 0; }\n    void set( T value ) { TlsSetValue(my_key, (LPVOID)value); }\n    T    get()          { return (T)TlsGetValue(my_key); }\n#else /*!__TBB_WIN8UI_SUPPORT*/\n    int create() {\n        tls_key_t tmp = FlsAlloc(NULL);\n        if( tmp== (DWORD)0xFFFFFFFF )\n            return (DWORD)0xFFFFFFFF;\n        my_key = tmp;\n        return 0;\n    }\n    int  destroy()      { FlsFree(my_key); my_key=0; return 0; }\n    void set( T value ) { FlsSetValue(my_key, (LPVOID)value); }\n    T    get()          { return (T)FlsGetValue(my_key); }\n#endif /* !__TBB_WIN8UI_SUPPORT */\n#endif /* USE_WINTHREAD */\nprivate:\n    tls_key_t my_key;\n};\n\n//! More advanced TLS support template class.\n/** It supports RAII and to some extent mimic __declspec(thread) variables. */\ntemplate <typename T>\nclass tls : public basic_tls<T> {\n    typedef basic_tls<T> base;\npublic:\n    tls()  { base::create();  }\n    ~tls() { base::destroy(); }\n    T operator=(T value) { base::set(value); return value; }\n    operator T() { return base::get(); }\n};\n\ntemplate <typename T>\nclass tls<T*> : basic_tls<T*> {\n    typedef basic_tls<T*> base;\n    static void internal_dtor(void* ptr) {\n        if (ptr) delete (T*)ptr;\n    }\n    T* internal_get() {\n        T* result = base::get();\n        if (!result) {\n            result = new T;\n            base::set(result);\n        }\n        return result;\n    }\npublic:\n    tls()  {\n#if USE_PTHREAD\n        base::create( internal_dtor );\n#else\n        base::create();\n#endif\n    }\n    ~tls() { base::destroy(); }\n    T* operator=(T* value) { base::set(value); return value; }\n    operator T*()   { return  internal_get(); }\n    T* operator->() { return  internal_get(); }\n    T& operator*()  { return *internal_get(); }\n};\n\n} // namespace internal\n\n} // namespace tbb\n\n#endif /* _TBB_tls_H */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/disable_warnings.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"ittnotify_config.h\"\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n\n#pragma warning (disable: 593)   /* parameter \"XXXX\" was set but never used                 */\n#pragma warning (disable: 344)   /* typedef name has already been declared (with same type) */\n#pragma warning (disable: 174)   /* expression has no effect                                */\n#pragma warning (disable: 4127)  /* conditional expression is constant                      */\n#pragma warning (disable: 4306)  /* conversion from '?' to '?' of greater size              */\n\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#if defined __INTEL_COMPILER\n\n#pragma warning (disable: 869)  /* parameter \"XXXXX\" was never referenced                  */\n#pragma warning (disable: 1418) /* external function definition with no prior declaration  */\n#pragma warning (disable: 1419) /* external declaration in primary source file             */\n\n#endif /* __INTEL_COMPILER */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/internal/ittnotify.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _INTERNAL_ITTNOTIFY_H_\n#define _INTERNAL_ITTNOTIFY_H_\n\n/**\n * @file\n * @brief Internal User API functions and types\n */\n\n/** @cond exclude_from_documentation */\n#ifndef ITT_OS_WIN\n#  define ITT_OS_WIN   1\n#endif /* ITT_OS_WIN */\n\n#ifndef ITT_OS_LINUX\n#  define ITT_OS_LINUX 2\n#endif /* ITT_OS_LINUX */\n\n#ifndef ITT_OS_MAC\n#  define ITT_OS_MAC   3\n#endif /* ITT_OS_MAC */\n\n#ifndef ITT_OS\n#  if defined WIN32 || defined _WIN32\n#    define ITT_OS ITT_OS_WIN\n#  elif defined( __APPLE__ ) && defined( __MACH__ )\n#    define ITT_OS ITT_OS_MAC\n#  else\n#    define ITT_OS ITT_OS_LINUX\n#  endif\n#endif /* ITT_OS */\n\n#ifndef ITT_PLATFORM_WIN\n#  define ITT_PLATFORM_WIN 1\n#endif /* ITT_PLATFORM_WIN */\n\n#ifndef ITT_PLATFORM_POSIX\n#  define ITT_PLATFORM_POSIX 2\n#endif /* ITT_PLATFORM_POSIX */\n\n#ifndef ITT_PLATFORM_MAC\n#  define ITT_PLATFORM_MAC 3\n#endif /* ITT_PLATFORM_MAC */\n\n#ifndef ITT_PLATFORM\n#  if ITT_OS==ITT_OS_WIN\n#    define ITT_PLATFORM ITT_PLATFORM_WIN\n#  elif ITT_OS==ITT_OS_MAC\n#    define ITT_PLATFORM ITT_PLATFORM_MAC\n#  else\n#    define ITT_PLATFORM ITT_PLATFORM_POSIX\n#  endif\n#endif /* ITT_PLATFORM */\n\n#if defined(_UNICODE) && !defined(UNICODE)\n#define UNICODE\n#endif\n\n#include <stddef.h>\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <tchar.h>\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdint.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE || _UNICODE */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#ifndef CDECL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define CDECL __cdecl\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__ \n#      define CDECL __attribute__ ((cdecl))\n#    else  /* _M_IX86 || __i386__ */\n#      define CDECL /* actual only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* CDECL */\n\n#ifndef STDCALL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define STDCALL __stdcall\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__\n#      define STDCALL __attribute__ ((stdcall)) \n#    else  /* _M_IX86 || __i386__ */\n#      define STDCALL /* supported only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* STDCALL */\n\n#define ITTAPI    CDECL\n#define LIBITTAPI CDECL\n\n/* TODO: Temporary for compatibility! */\n#define ITTAPI_CALL    CDECL\n#define LIBITTAPI_CALL CDECL\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n/* use __forceinline (VC++ specific) */\n#define ITT_INLINE           __forceinline\n#define ITT_INLINE_ATTRIBUTE /* nothing */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/*\n * Generally, functions are not inlined unless optimization is specified.\n * For functions declared inline, this attribute inlines the function even\n * if no optimization level was specified.\n */\n#ifdef __STRICT_ANSI__\n#define ITT_INLINE           static\n#else  /* __STRICT_ANSI__ */\n#define ITT_INLINE           static inline\n#endif /* __STRICT_ANSI__ */\n#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n/* Helper macro for joining tokens */\n#define ITT_JOIN_AUX(p,n) p##n\n#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)\n\n#ifdef ITT_MAJOR\n#undef ITT_MAJOR\n#endif\n#ifdef ITT_MINOR\n#undef ITT_MINOR\n#endif\n#define ITT_MAJOR     3\n#define ITT_MINOR     0\n\n/* Standard versioning of a token with major and minor version numbers */\n#define ITT_VERSIONIZE(x)    \\\n    ITT_JOIN(x,              \\\n    ITT_JOIN(_,              \\\n    ITT_JOIN(ITT_MAJOR,      \\\n    ITT_JOIN(_, ITT_MINOR))))\n\n#ifndef INTEL_ITTNOTIFY_PREFIX\n#  define INTEL_ITTNOTIFY_PREFIX __itt_\n#endif /* INTEL_ITTNOTIFY_PREFIX */\n#ifndef INTEL_ITTNOTIFY_POSTFIX\n#  define INTEL_ITTNOTIFY_POSTFIX _ptr_\n#endif /* INTEL_ITTNOTIFY_POSTFIX */\n\n#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)\n#define ITTNOTIFY_NAME(n)     ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)))\n\n#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)\n#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)\n\n#define ITTNOTIFY_VOID_D0(n,d)       (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_VOID_D1(n,d,x)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_VOID_D2(n,d,x,y)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n#define ITTNOTIFY_DATA_D0(n,d)       (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_DATA_D1(n,d,x)     (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_DATA_D2(n,d,x,y)   (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a)     (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n\n#ifdef ITT_STUB\n#undef ITT_STUB\n#endif\n#ifdef ITT_STUBV\n#undef ITT_STUBV\n#endif\n#define ITT_STUBV(api,type,name,args)                             \\\n    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \\\n    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);\n#define ITT_STUB ITT_STUBV\n/** @endcond */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n#define INTEL_ITTNOTIFY_API_PRIVATE\n#include \"../ittnotify.h\"\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _INTERNAL_ITTNOTIFY_H_ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/ittnotify.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _ITTNOTIFY_H_\n#define _ITTNOTIFY_H_\n\n/**\n@file\n@brief Public User API functions and types\n@mainpage\n\nThe ITT API is used to annotate a user's program with additional information\nthat can be used by correctness and performance tools. The user inserts\ncalls in their program. Those calls generate information that is collected\nat runtime, and used by tools such as Intel(R) Parallel Amplifier and\nIntel(R) Parallel Inspector.\n\n@section API Concepts\nThe following general concepts are used throughout the API.\n\n@subsection Unicode Support\nMany API functions take character string arguments. On Windows, there\nare two versions of each such function. The function name is suffixed\nby W if Unicode support is enabled, and by A otherwise. Any API function\nthat takes a character string argument adheres to this convention.\n\n@subsection Conditional Compilation\nMany users prefer having an option to modify ITT API code when linking it\ninside their runtimes. ITT API header file provides a mechanism to replace\nITT API function names inside your code with empty strings. To do this,\ndefine the macros INTEL_NO_ITTNOTIFY_API during compilation and remove the\nstatic library from the linker script.\n\n@subsection Domains\n[see domains]\nDomains provide a way to separate notification for different modules or\nlibraries in a program. Domains are specified by dotted character strings,\ne.g. TBB.Internal.Control.\n\nA mechanism (to be specified) is provided to enable and disable\ndomains. By default, all domains are enabled.\n@subsection Named Entities and Instances\nNamed entities (frames, regions, tasks, and markers) communicate\ninformation about the program to the analysis tools. A named entity often\nrefers to a section of program code, or to some set of logical concepts\nthat the programmer wants to group together.\n\nNamed entities relate to the programmer's static view of the program. When\nthe program actually executes, many instances of a given named entity\nmay be created.\n\nThe API annotations denote instances of named entities. The actual\nnamed entities are displayed using the analysis tools. In other words,\nthe named entities come into existence when instances are created.\n\nInstances of named entities may have instance identifiers (IDs). Some\nAPI calls use instance identifiers to create relationships between\ndifferent instances of named entities. Other API calls associate data\nwith instances of named entities.\n\nSome named entities must always have instance IDs. In particular, regions\nand frames always have IDs. Task and markers need IDs only if the ID is\nneeded in another API call (such as adding a relation or metadata).\n\nThe lifetime of instance IDs is distinct from the lifetime of\ninstances. This allows various relationships to be specified separate\nfrom the actual execution of instances. This flexibility comes at the\nexpense of extra API calls.\n\nThe same ID may not be reused for different instances, unless a previous\n[ref] __itt_id_destroy call for that ID has been issued.\n*/\n\n/** @cond exclude_from_documentation */\n#ifndef ITT_OS_WIN\n#  define ITT_OS_WIN   1\n#endif /* ITT_OS_WIN */\n\n#ifndef ITT_OS_LINUX\n#  define ITT_OS_LINUX 2\n#endif /* ITT_OS_LINUX */\n\n#ifndef ITT_OS_MAC\n#  define ITT_OS_MAC   3\n#endif /* ITT_OS_MAC */\n\n#ifndef ITT_OS\n#  if defined WIN32 || defined _WIN32\n#    define ITT_OS ITT_OS_WIN\n#  elif defined( __APPLE__ ) && defined( __MACH__ )\n#    define ITT_OS ITT_OS_MAC\n#  else\n#    define ITT_OS ITT_OS_LINUX\n#  endif\n#endif /* ITT_OS */\n\n#ifndef ITT_PLATFORM_WIN\n#  define ITT_PLATFORM_WIN 1\n#endif /* ITT_PLATFORM_WIN */\n\n#ifndef ITT_PLATFORM_POSIX\n#  define ITT_PLATFORM_POSIX 2\n#endif /* ITT_PLATFORM_POSIX */\n\n#ifndef ITT_PLATFORM_MAC\n#  define ITT_PLATFORM_MAC 3\n#endif /* ITT_PLATFORM_MAC */\n\n#ifndef ITT_PLATFORM\n#  if ITT_OS==ITT_OS_WIN\n#    define ITT_PLATFORM ITT_PLATFORM_WIN\n#  elif ITT_OS==ITT_OS_MAC\n#    define ITT_PLATFORM ITT_PLATFORM_MAC\n#  else\n#    define ITT_PLATFORM ITT_PLATFORM_POSIX\n#  endif\n#endif /* ITT_PLATFORM */\n\n#if defined(_UNICODE) && !defined(UNICODE)\n#define UNICODE\n#endif\n\n#include <stddef.h>\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <tchar.h>\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdint.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE || _UNICODE */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#ifndef CDECL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define CDECL __cdecl\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__ \n#      define CDECL __attribute__ ((cdecl))\n#    else  /* _M_IX86 || __i386__ */\n#      define CDECL /* actual only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* CDECL */\n\n#ifndef STDCALL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define STDCALL __stdcall\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__\n#      define STDCALL __attribute__ ((stdcall)) \n#    else  /* _M_IX86 || __i386__ */\n#      define STDCALL /* supported only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* STDCALL */\n\n#define ITTAPI    CDECL\n#define LIBITTAPI CDECL\n\n/* TODO: Temporary for compatibility! */\n#define ITTAPI_CALL    CDECL\n#define LIBITTAPI_CALL CDECL\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n/* use __forceinline (VC++ specific) */\n#define ITT_INLINE           __forceinline\n#define ITT_INLINE_ATTRIBUTE /* nothing */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/*\n * Generally, functions are not inlined unless optimization is specified.\n * For functions declared inline, this attribute inlines the function even\n * if no optimization level was specified.\n */\n#ifdef __STRICT_ANSI__\n#define ITT_INLINE           static inline\n#else  /* __STRICT_ANSI__ */\n#define ITT_INLINE           static inline\n#endif /* __STRICT_ANSI__ */\n#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/** @endcond */\n\n#ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    pragma message(\"WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro\")\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n// #warning usage leads to ICC's compilation error\n// #    warning \"Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro\"\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#  include \"legacy/ittnotify.h\"\n#endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */\n\n/** @cond exclude_from_documentation */\n/* Helper macro for joining tokens */\n#define ITT_JOIN_AUX(p,n) p##n\n#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)\n\n#ifdef ITT_MAJOR\n#undef ITT_MAJOR\n#endif\n#ifdef ITT_MINOR\n#undef ITT_MINOR\n#endif\n#define ITT_MAJOR     3\n#define ITT_MINOR     0\n\n/* Standard versioning of a token with major and minor version numbers */\n#define ITT_VERSIONIZE(x)    \\\n    ITT_JOIN(x,              \\\n    ITT_JOIN(_,              \\\n    ITT_JOIN(ITT_MAJOR,      \\\n    ITT_JOIN(_, ITT_MINOR))))\n\n#ifndef INTEL_ITTNOTIFY_PREFIX\n#  define INTEL_ITTNOTIFY_PREFIX __itt_\n#endif /* INTEL_ITTNOTIFY_PREFIX */\n#ifndef INTEL_ITTNOTIFY_POSTFIX\n#  define INTEL_ITTNOTIFY_POSTFIX _ptr_\n#endif /* INTEL_ITTNOTIFY_POSTFIX */\n\n#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)\n#define ITTNOTIFY_NAME(n)     ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)))\n\n#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)\n#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)\n\n#define ITTNOTIFY_VOID_D0(n,d)       (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_VOID_D1(n,d,x)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_VOID_D2(n,d,x,y)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n#define ITTNOTIFY_DATA_D0(n,d)       (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_DATA_D1(n,d,x)     (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_DATA_D2(n,d,x,y)   (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a)     (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n\n#ifdef ITT_STUB\n#undef ITT_STUB\n#endif\n#ifdef ITT_STUBV\n#undef ITT_STUBV\n#endif\n#define ITT_STUBV(api,type,name,args)                             \\\n    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \\\n    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);\n#define ITT_STUB ITT_STUBV\n/** @endcond */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/** @cond exclude_from_gpa_documentation */\n/**\n * @defgroup public Public API\n * @{\n * @}\n */\n\n/**\n * @defgroup control Collection Control\n * @ingroup public\n * General behavior: application continues to run, but no profiling information is being collected\n *\n * Pausing occurs not only for the current thread but for all process as well as spawned processes\n * - Intel(R) Parallel Inspector and Intel(R) Inspector XE:\n *   - Does not analyze or report errors that involve memory access.\n *   - Other errors are reported as usual. Pausing data collection in\n *     Intel(R) Parallel Inspector and Intel(R) Inspector XE\n *     only pauses tracing and analyzing memory access.\n *     It does not pause tracing or analyzing threading APIs.\n *   .\n * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE:\n *   - Does continue to record when new threads are started.\n *   .\n * - Other effects:\n *   - Possible reduction of runtime overhead.\n *   .\n * @{\n */\n/** @brief Pause collection */\nvoid ITTAPI __itt_pause(void);\n/** @brief Resume collection */\nvoid ITTAPI __itt_resume(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, pause,  (void))\nITT_STUBV(ITTAPI, void, resume, (void))\n#define __itt_pause      ITTNOTIFY_VOID(pause)\n#define __itt_pause_ptr  ITTNOTIFY_NAME(pause)\n#define __itt_resume     ITTNOTIFY_VOID(resume)\n#define __itt_resume_ptr ITTNOTIFY_NAME(resume)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_pause()\n#define __itt_pause_ptr  0\n#define __itt_resume()\n#define __itt_resume_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_pause_ptr  0\n#define __itt_resume_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} control group */\n/** @endcond */\n\n/**\n * @defgroup threads Threads\n * @ingroup public\n * Give names to threads\n * @{\n */\n/**\n * @brief Sets thread name of calling thread\n * @param[in] name - name of thread\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_thread_set_nameA(const char    *name);\nvoid ITTAPI __itt_thread_set_nameW(const wchar_t *name);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_thread_set_name     __itt_thread_set_nameW\n#  define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr\n#else /* UNICODE */\n#  define __itt_thread_set_name     __itt_thread_set_nameA\n#  define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_thread_set_name(const char *name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, thread_set_nameA, (const char    *name))\nITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, thread_set_name,  (const char    *name))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thread_set_nameA     ITTNOTIFY_VOID(thread_set_nameA)\n#define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA)\n#define __itt_thread_set_nameW     ITTNOTIFY_VOID(thread_set_nameW)\n#define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thread_set_name     ITTNOTIFY_VOID(thread_set_name)\n#define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thread_set_nameA(name)\n#define __itt_thread_set_nameA_ptr 0\n#define __itt_thread_set_nameW(name)\n#define __itt_thread_set_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thread_set_name(name)\n#define __itt_thread_set_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thread_set_nameA_ptr 0\n#define __itt_thread_set_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thread_set_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @cond exclude_from_gpa_documentation */\n\n/**\n * @brief Mark current thread as ignored from this point on, for the duration of its existence.\n */\nvoid ITTAPI __itt_thread_ignore(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, thread_ignore, (void))\n#define __itt_thread_ignore     ITTNOTIFY_VOID(thread_ignore)\n#define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_thread_ignore()\n#define __itt_thread_ignore_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_thread_ignore_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} threads group */\n\n/**\n * @defgroup suppress Error suppression\n * @ingroup public\n * General behavior: application continues to run, but errors are suppressed\n *\n * @{\n */\n\n/*****************************************************************//**\n * @name group of functions used for error suppression in correctness tools\n *********************************************************************/\n/** @{ */\n/**\n * @hideinitializer \n * @brief possible value for suppression mask\n */\n#define __itt_suppress_all_errors 0x7fffffff\n\n/**\n * @hideinitializer \n * @brief possible value for suppression mask (suppresses errors from threading analysis)\n */\n#define __itt_suppress_threading_errors 0x000000ff\n\n/**\n * @hideinitializer \n * @brief possible value for suppression mask (suppresses errors from memory analysis)\n */\n#define __itt_suppress_memory_errors 0x0000ff00\n\n/**\n * @brief Start suppressing errors identified in mask on this thread\n */\nvoid ITTAPI __itt_suppress_push(unsigned int mask);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask))\n#define __itt_suppress_push     ITTNOTIFY_VOID(suppress_push)\n#define __itt_suppress_push_ptr ITTNOTIFY_NAME(suppress_push)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_suppress_push(mask)\n#define __itt_suppress_push_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_suppress_push_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Undo the effects of the matching call to __itt_suppress_push  \n */\nvoid ITTAPI __itt_suppress_pop(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, suppress_pop, (void))\n#define __itt_suppress_pop     ITTNOTIFY_VOID(suppress_pop)\n#define __itt_suppress_pop_ptr ITTNOTIFY_NAME(suppress_pop)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_suppress_pop()\n#define __itt_suppress_pop_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_suppress_pop_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @enum __itt_model_disable\n * @brief Enumerator for the disable methods\n */\ntypedef enum __itt_suppress_mode {\n    __itt_unsuppress_range,\n    __itt_suppress_range\n} __itt_suppress_mode_t;\n\n/**\n * @brief Mark a range of memory for error suppression or unsuppression for error types included in mask\n */\nvoid ITTAPI __itt_suppress_mark_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size))\n#define __itt_suppress_mark_range     ITTNOTIFY_VOID(suppress_mark_range)\n#define __itt_suppress_mark_range_ptr ITTNOTIFY_NAME(suppress_mark_range)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_suppress_mark_range(mask)\n#define __itt_suppress_mark_range_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_suppress_mark_range_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Undo the effect of a matching call to __itt_suppress_mark_range.   If not matching\n *        call is found, nothing is changed.\n */\nvoid ITTAPI __itt_suppress_clear_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, suppress_clear_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size))\n#define __itt_suppress_clear_range     ITTNOTIFY_VOID(suppress_clear_range)\n#define __itt_suppress_clear_range_ptr ITTNOTIFY_NAME(suppress_clear_range)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_suppress_clear_range(mask)\n#define __itt_suppress_clear_range_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_suppress_clear_range_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} */\n/** @} suppress group */\n\n/**\n * @defgroup sync Synchronization\n * @ingroup public\n * Indicate user-written synchronization code\n * @{\n */\n/**\n * @hideinitializer\n * @brief possible value of attribute argument for sync object type\n */\n#define __itt_attr_barrier 1\n\n/**\n * @hideinitializer\n * @brief possible value of attribute argument for sync object type\n */\n#define __itt_attr_mutex   2\n\n/**\n@brief Name a synchronization object\n@param[in] addr       Handle for the synchronization object. You should\nuse a real address to uniquely identify the synchronization object.\n@param[in] objtype    null-terminated object type string. If NULL is\npassed, the name will be \"User Synchronization\".\n@param[in] objname    null-terminated object name string. If NULL,\nno name will be assigned to the object.\n@param[in] attribute  one of [#__itt_attr_barrier, #__itt_attr_mutex]\n */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_sync_createA(void *addr, const char    *objtype, const char    *objname, int attribute);\nvoid ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_sync_create     __itt_sync_createW\n#  define __itt_sync_create_ptr __itt_sync_createW_ptr\n#else /* UNICODE */\n#  define __itt_sync_create     __itt_sync_createA\n#  define __itt_sync_create_ptr __itt_sync_createA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char    *objtype, const char    *objname, int attribute))\nITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_create,  (void *addr, const char*    objtype, const char*    objname, int attribute))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_createA     ITTNOTIFY_VOID(sync_createA)\n#define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA)\n#define __itt_sync_createW     ITTNOTIFY_VOID(sync_createW)\n#define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_create     ITTNOTIFY_VOID(sync_create)\n#define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_createA(addr, objtype, objname, attribute)\n#define __itt_sync_createA_ptr 0\n#define __itt_sync_createW(addr, objtype, objname, attribute)\n#define __itt_sync_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_create(addr, objtype, objname, attribute)\n#define __itt_sync_create_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_createA_ptr 0\n#define __itt_sync_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_create_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n@brief Rename a synchronization object\n\nYou can use the rename call to assign or reassign a name to a given\nsynchronization object.\n@param[in] addr  handle for the synchronization object.\n@param[in] name  null-terminated object name string.\n*/\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_sync_renameA(void *addr, const char    *name);\nvoid ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_sync_rename     __itt_sync_renameW\n#  define __itt_sync_rename_ptr __itt_sync_renameW_ptr\n#else /* UNICODE */\n#  define __itt_sync_rename     __itt_sync_renameA\n#  define __itt_sync_rename_ptr __itt_sync_renameA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_sync_rename(void *addr, const char *name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char    *name))\nITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_rename,  (void *addr, const char    *name))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_renameA     ITTNOTIFY_VOID(sync_renameA)\n#define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA)\n#define __itt_sync_renameW     ITTNOTIFY_VOID(sync_renameW)\n#define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_rename     ITTNOTIFY_VOID(sync_rename)\n#define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_renameA(addr, name)\n#define __itt_sync_renameA_ptr 0\n#define __itt_sync_renameW(addr, name)\n#define __itt_sync_renameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_rename(addr, name)\n#define __itt_sync_rename_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_renameA_ptr 0\n#define __itt_sync_renameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_rename_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n @brief Destroy a synchronization object.\n @param addr Handle for the synchronization object.\n */\nvoid ITTAPI __itt_sync_destroy(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, sync_destroy, (void *addr))\n#define __itt_sync_destroy     ITTNOTIFY_VOID(sync_destroy)\n#define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_sync_destroy(addr)\n#define __itt_sync_destroy_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_sync_destroy_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/*****************************************************************//**\n * @name group of functions is used for performance measurement tools\n *********************************************************************/\n/** @{ */\n/**\n * @brief Enter spin loop on user-defined sync object\n */\nvoid ITTAPI __itt_sync_prepare(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, sync_prepare, (void *addr))\n#define __itt_sync_prepare     ITTNOTIFY_VOID(sync_prepare)\n#define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_sync_prepare(addr)\n#define __itt_sync_prepare_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_sync_prepare_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Quit spin loop without acquiring spin object\n */\nvoid ITTAPI __itt_sync_cancel(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, sync_cancel, (void *addr))\n#define __itt_sync_cancel     ITTNOTIFY_VOID(sync_cancel)\n#define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_sync_cancel(addr)\n#define __itt_sync_cancel_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_sync_cancel_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Successful spin loop completion (sync object acquired)\n */\nvoid ITTAPI __itt_sync_acquired(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, sync_acquired, (void *addr))\n#define __itt_sync_acquired     ITTNOTIFY_VOID(sync_acquired)\n#define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_sync_acquired(addr)\n#define __itt_sync_acquired_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_sync_acquired_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Start sync object releasing code. Is called before the lock release call.\n */\nvoid ITTAPI __itt_sync_releasing(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, sync_releasing, (void *addr))\n#define __itt_sync_releasing     ITTNOTIFY_VOID(sync_releasing)\n#define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_sync_releasing(addr)\n#define __itt_sync_releasing_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_sync_releasing_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} */\n\n/** @} sync group */\n\n/**************************************************************//**\n * @name group of functions is used for correctness checking tools\n ******************************************************************/\n/** @{ */\n/**\n * @ingroup legacy\n * @deprecated Legacy API\n * @brief Fast synchronization which does no require spinning.\n * - This special function is to be used by TBB and OpenMP libraries only when they know\n *   there is no spin but they need to suppress TC warnings about shared variable modifications.\n * - It only has corresponding pointers in static library and does not have corresponding function\n *   in dynamic library.\n * @see void __itt_sync_prepare(void* addr);\n */\nvoid ITTAPI __itt_fsync_prepare(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr))\n#define __itt_fsync_prepare     ITTNOTIFY_VOID(fsync_prepare)\n#define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_fsync_prepare(addr)\n#define __itt_fsync_prepare_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_fsync_prepare_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup legacy\n * @deprecated Legacy API\n * @brief Fast synchronization which does no require spinning.\n * - This special function is to be used by TBB and OpenMP libraries only when they know\n *   there is no spin but they need to suppress TC warnings about shared variable modifications.\n * - It only has corresponding pointers in static library and does not have corresponding function\n *   in dynamic library.\n * @see void __itt_sync_cancel(void *addr);\n */\nvoid ITTAPI __itt_fsync_cancel(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr))\n#define __itt_fsync_cancel     ITTNOTIFY_VOID(fsync_cancel)\n#define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_fsync_cancel(addr)\n#define __itt_fsync_cancel_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_fsync_cancel_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup legacy\n * @deprecated Legacy API\n * @brief Fast synchronization which does no require spinning.\n * - This special function is to be used by TBB and OpenMP libraries only when they know\n *   there is no spin but they need to suppress TC warnings about shared variable modifications.\n * - It only has corresponding pointers in static library and does not have corresponding function\n *   in dynamic library.\n * @see void __itt_sync_acquired(void *addr);\n */\nvoid ITTAPI __itt_fsync_acquired(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr))\n#define __itt_fsync_acquired     ITTNOTIFY_VOID(fsync_acquired)\n#define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_fsync_acquired(addr)\n#define __itt_fsync_acquired_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_fsync_acquired_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup legacy\n * @deprecated Legacy API\n * @brief Fast synchronization which does no require spinning.\n * - This special function is to be used by TBB and OpenMP libraries only when they know\n *   there is no spin but they need to suppress TC warnings about shared variable modifications.\n * - It only has corresponding pointers in static library and does not have corresponding function\n *   in dynamic library.\n * @see void __itt_sync_releasing(void* addr);\n */\nvoid ITTAPI __itt_fsync_releasing(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr))\n#define __itt_fsync_releasing     ITTNOTIFY_VOID(fsync_releasing)\n#define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_fsync_releasing(addr)\n#define __itt_fsync_releasing_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_fsync_releasing_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} */\n\n/**\n * @defgroup model Modeling by Intel(R) Parallel Advisor\n * @ingroup public\n * This is the subset of itt used for modeling by Intel(R) Parallel Advisor.\n * This API is called ONLY using annotate.h, by \"Annotation\" macros\n * the user places in their sources during the parallelism modeling steps.\n *\n * site_begin/end and task_begin/end take the address of handle variables,\n * which are writeable by the API.  Handles must be 0 initialized prior\n * to the first call to begin, or may cause a run-time failure.\n * The handles are initialized in a multi-thread safe way by the API if\n * the handle is 0.  The commonly expected idiom is one static handle to\n * identify a site or task.  If a site or task of the same name has already\n * been started during this collection, the same handle MAY be returned,\n * but is not required to be - it is unspecified if data merging is done\n * based on name.  These routines also take an instance variable.  Like\n * the lexical instance, these must be 0 initialized.  Unlike the lexical\n * instance, this is used to track a single dynamic instance.\n *\n * API used by the Intel(R) Parallel Advisor to describe potential concurrency\n * and related activities. User-added source annotations expand to calls\n * to these procedures to enable modeling of a hypothetical concurrent\n * execution serially.\n * @{\n */\n#if !defined(_ADVISOR_ANNOTATE_H_) || defined(ANNOTATE_EXPAND_NULL)\n\ntypedef void* __itt_model_site;             /*!< @brief handle for lexical site     */\ntypedef void* __itt_model_site_instance;    /*!< @brief handle for dynamic instance */\ntypedef void* __itt_model_task;             /*!< @brief handle for lexical site     */\ntypedef void* __itt_model_task_instance;    /*!< @brief handle for dynamic instance */\n\n/**\n * @enum __itt_model_disable\n * @brief Enumerator for the disable methods\n */\ntypedef enum {\n    __itt_model_disable_observation,\n    __itt_model_disable_collection\n} __itt_model_disable;\n\n#endif /* !_ADVISOR_ANNOTATE_H_ || ANNOTATE_EXPAND_NULL */\n\n/**\n * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support.\n *\n * site_begin/end model a potential concurrency site.\n * site instances may be recursively nested with themselves.\n * site_end exits the most recently started but unended site for the current\n * thread.  The handle passed to end may be used to validate structure.\n * Instances of a site encountered on different threads concurrently\n * are considered completely distinct. If the site name for two different\n * lexical sites match, it is unspecified whether they are treated as the\n * same or different for data presentation.\n */\nvoid ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_model_site_beginW(const wchar_t *name);\n#endif\nvoid ITTAPI __itt_model_site_beginA(const char *name);\nvoid ITTAPI __itt_model_site_beginAL(const char *name, size_t siteNameLen);\nvoid ITTAPI __itt_model_site_end  (__itt_model_site *site, __itt_model_site_instance *instance);\nvoid ITTAPI __itt_model_site_end_2(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_site_begin,  (__itt_model_site *site, __itt_model_site_instance *instance, const char *name))\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, model_site_beginW,  (const wchar_t *name))\n#endif\nITT_STUBV(ITTAPI, void, model_site_beginA,  (const char *name))\nITT_STUBV(ITTAPI, void, model_site_beginAL,  (const char *name, size_t siteNameLen))\nITT_STUBV(ITTAPI, void, model_site_end,    (__itt_model_site *site, __itt_model_site_instance *instance))\nITT_STUBV(ITTAPI, void, model_site_end_2,  (void))\n#define __itt_model_site_begin      ITTNOTIFY_VOID(model_site_begin)\n#define __itt_model_site_begin_ptr  ITTNOTIFY_NAME(model_site_begin)\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_site_beginW      ITTNOTIFY_VOID(model_site_beginW)\n#define __itt_model_site_beginW_ptr  ITTNOTIFY_NAME(model_site_beginW)\n#endif\n#define __itt_model_site_beginA      ITTNOTIFY_VOID(model_site_beginA)\n#define __itt_model_site_beginA_ptr  ITTNOTIFY_NAME(model_site_beginA)\n#define __itt_model_site_beginAL      ITTNOTIFY_VOID(model_site_beginAL)\n#define __itt_model_site_beginAL_ptr  ITTNOTIFY_NAME(model_site_beginAL)\n#define __itt_model_site_end        ITTNOTIFY_VOID(model_site_end)\n#define __itt_model_site_end_ptr    ITTNOTIFY_NAME(model_site_end)\n#define __itt_model_site_end_2        ITTNOTIFY_VOID(model_site_end_2)\n#define __itt_model_site_end_2_ptr    ITTNOTIFY_NAME(model_site_end_2)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_site_begin(site, instance, name)\n#define __itt_model_site_begin_ptr  0\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_site_beginW(name)\n#define __itt_model_site_beginW_ptr  0\n#endif\n#define __itt_model_site_beginA(name)\n#define __itt_model_site_beginA_ptr  0\n#define __itt_model_site_beginAL(name, siteNameLen)\n#define __itt_model_site_beginAL_ptr  0\n#define __itt_model_site_end(site, instance)\n#define __itt_model_site_end_ptr    0\n#define __itt_model_site_end_2()\n#define __itt_model_site_end_2_ptr    0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_site_begin_ptr  0\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_site_beginW_ptr  0\n#endif\n#define __itt_model_site_beginA_ptr  0\n#define __itt_model_site_beginAL_ptr  0\n#define __itt_model_site_end_ptr    0\n#define __itt_model_site_end_2_ptr    0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support\n *\n * task_begin/end model a potential task, which is contained within the most\n * closely enclosing dynamic site.  task_end exits the most recently started\n * but unended task.  The handle passed to end may be used to validate\n * structure.  It is unspecified if bad dynamic nesting is detected.  If it\n * is, it should be encoded in the resulting data collection.  The collector\n * should not fail due to construct nesting issues, nor attempt to directly\n * indicate the problem.\n */\nvoid ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_model_task_beginW(const wchar_t *name);\nvoid ITTAPI __itt_model_iteration_taskW(const wchar_t *name);\n#endif\nvoid ITTAPI __itt_model_task_beginA(const char *name);\nvoid ITTAPI __itt_model_task_beginAL(const char *name, size_t taskNameLen);\nvoid ITTAPI __itt_model_iteration_taskA(const char *name);\nvoid ITTAPI __itt_model_iteration_taskAL(const char *name, size_t taskNameLen);\nvoid ITTAPI __itt_model_task_end  (__itt_model_task *task, __itt_model_task_instance *instance);\nvoid ITTAPI __itt_model_task_end_2(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_task_begin,  (__itt_model_task *task, __itt_model_task_instance *instance, const char *name))\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, model_task_beginW,  (const wchar_t *name))\nITT_STUBV(ITTAPI, void, model_iteration_taskW, (const wchar_t *name))\n#endif\nITT_STUBV(ITTAPI, void, model_task_beginA,  (const char *name))\nITT_STUBV(ITTAPI, void, model_task_beginAL,  (const char *name, size_t taskNameLen))\nITT_STUBV(ITTAPI, void, model_iteration_taskA,  (const char *name))\nITT_STUBV(ITTAPI, void, model_iteration_taskAL,  (const char *name, size_t taskNameLen))\nITT_STUBV(ITTAPI, void, model_task_end,    (__itt_model_task *task, __itt_model_task_instance *instance))\nITT_STUBV(ITTAPI, void, model_task_end_2,  (void))\n#define __itt_model_task_begin      ITTNOTIFY_VOID(model_task_begin)\n#define __itt_model_task_begin_ptr  ITTNOTIFY_NAME(model_task_begin)\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_task_beginW     ITTNOTIFY_VOID(model_task_beginW)\n#define __itt_model_task_beginW_ptr ITTNOTIFY_NAME(model_task_beginW)\n#define __itt_model_iteration_taskW     ITTNOTIFY_VOID(model_iteration_taskW)\n#define __itt_model_iteration_taskW_ptr ITTNOTIFY_NAME(model_iteration_taskW)\n#endif\n#define __itt_model_task_beginA    ITTNOTIFY_VOID(model_task_beginA)\n#define __itt_model_task_beginA_ptr ITTNOTIFY_NAME(model_task_beginA)\n#define __itt_model_task_beginAL    ITTNOTIFY_VOID(model_task_beginAL)\n#define __itt_model_task_beginAL_ptr ITTNOTIFY_NAME(model_task_beginAL)\n#define __itt_model_iteration_taskA    ITTNOTIFY_VOID(model_iteration_taskA)\n#define __itt_model_iteration_taskA_ptr ITTNOTIFY_NAME(model_iteration_taskA)\n#define __itt_model_iteration_taskAL    ITTNOTIFY_VOID(model_iteration_taskAL)\n#define __itt_model_iteration_taskAL_ptr ITTNOTIFY_NAME(model_iteration_taskAL)\n#define __itt_model_task_end        ITTNOTIFY_VOID(model_task_end)\n#define __itt_model_task_end_ptr    ITTNOTIFY_NAME(model_task_end)\n#define __itt_model_task_end_2        ITTNOTIFY_VOID(model_task_end_2)\n#define __itt_model_task_end_2_ptr    ITTNOTIFY_NAME(model_task_end_2)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_task_begin(task, instance, name)\n#define __itt_model_task_begin_ptr  0\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_task_beginW(name)\n#define __itt_model_task_beginW_ptr  0\n#endif\n#define __itt_model_task_beginA(name)\n#define __itt_model_task_beginA_ptr  0\n#define __itt_model_task_beginAL(name, siteNameLen)\n#define __itt_model_task_beginAL_ptr  0\n#define __itt_model_iteration_taskA(name)\n#define __itt_model_iteration_taskA_ptr  0\n#define __itt_model_iteration_taskAL(name, siteNameLen)\n#define __itt_model_iteration_taskAL_ptr  0\n#define __itt_model_task_end(task, instance)\n#define __itt_model_task_end_ptr    0\n#define __itt_model_task_end_2()\n#define __itt_model_task_end_2_ptr    0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_task_begin_ptr  0\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_model_task_beginW_ptr 0\n#endif\n#define __itt_model_task_beginA_ptr  0\n#define __itt_model_task_beginAL_ptr  0\n#define __itt_model_iteration_taskA_ptr    0\n#define __itt_model_iteration_taskAL_ptr    0\n#define __itt_model_task_end_ptr    0\n#define __itt_model_task_end_2_ptr    0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support\n *\n * lock_acquire/release model a potential lock for both lockset and\n * performance modeling.  Each unique address is modeled as a separate\n * lock, with invalid addresses being valid lock IDs.  Specifically:\n * no storage is accessed by the API at the specified address - it is only\n * used for lock identification.  Lock acquires may be self-nested and are\n * unlocked by a corresponding number of releases.\n * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing,\n * but may not have identical semantics.)\n */\nvoid ITTAPI __itt_model_lock_acquire(void *lock);\nvoid ITTAPI __itt_model_lock_acquire_2(void *lock);\nvoid ITTAPI __itt_model_lock_release(void *lock);\nvoid ITTAPI __itt_model_lock_release_2(void *lock);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock))\nITT_STUBV(ITTAPI, void, model_lock_acquire_2, (void *lock))\nITT_STUBV(ITTAPI, void, model_lock_release, (void *lock))\nITT_STUBV(ITTAPI, void, model_lock_release_2, (void *lock))\n#define __itt_model_lock_acquire     ITTNOTIFY_VOID(model_lock_acquire)\n#define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire)\n#define __itt_model_lock_acquire_2     ITTNOTIFY_VOID(model_lock_acquire_2)\n#define __itt_model_lock_acquire_2_ptr ITTNOTIFY_NAME(model_lock_acquire_2)\n#define __itt_model_lock_release     ITTNOTIFY_VOID(model_lock_release)\n#define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release)\n#define __itt_model_lock_release_2     ITTNOTIFY_VOID(model_lock_release_2)\n#define __itt_model_lock_release_2_ptr ITTNOTIFY_NAME(model_lock_release_2)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_lock_acquire(lock)\n#define __itt_model_lock_acquire_ptr 0\n#define __itt_model_lock_acquire_2(lock)\n#define __itt_model_lock_acquire_2_ptr 0\n#define __itt_model_lock_release(lock)\n#define __itt_model_lock_release_ptr 0\n#define __itt_model_lock_release_2(lock)\n#define __itt_model_lock_release_2_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_lock_acquire_ptr 0\n#define __itt_model_lock_acquire_2_ptr 0\n#define __itt_model_lock_release_ptr 0\n#define __itt_model_lock_release_2_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support\n *\n * record_allocation/deallocation describe user-defined memory allocator\n * behavior, which may be required for correctness modeling to understand\n * when storage is not expected to be actually reused across threads.\n */\nvoid ITTAPI __itt_model_record_allocation  (void *addr, size_t size);\nvoid ITTAPI __itt_model_record_deallocation(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_record_allocation,   (void *addr, size_t size))\nITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr))\n#define __itt_model_record_allocation       ITTNOTIFY_VOID(model_record_allocation)\n#define __itt_model_record_allocation_ptr   ITTNOTIFY_NAME(model_record_allocation)\n#define __itt_model_record_deallocation     ITTNOTIFY_VOID(model_record_deallocation)\n#define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_record_allocation(addr, size)\n#define __itt_model_record_allocation_ptr   0\n#define __itt_model_record_deallocation(addr)\n#define __itt_model_record_deallocation_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_record_allocation_ptr   0\n#define __itt_model_record_deallocation_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_INDUCTION_USES support\n *\n * Note particular storage is inductive through the end of the current site\n */\nvoid ITTAPI __itt_model_induction_uses(void* addr, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size))\n#define __itt_model_induction_uses     ITTNOTIFY_VOID(model_induction_uses)\n#define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_induction_uses(addr, size)\n#define __itt_model_induction_uses_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_induction_uses_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_REDUCTION_USES support\n *\n * Note particular storage is used for reduction through the end\n * of the current site\n */\nvoid ITTAPI __itt_model_reduction_uses(void* addr, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size))\n#define __itt_model_reduction_uses     ITTNOTIFY_VOID(model_reduction_uses)\n#define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_reduction_uses(addr, size)\n#define __itt_model_reduction_uses_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_reduction_uses_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_OBSERVE_USES support\n *\n * Have correctness modeling record observations about uses of storage\n * through the end of the current site\n */\nvoid ITTAPI __itt_model_observe_uses(void* addr, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size))\n#define __itt_model_observe_uses     ITTNOTIFY_VOID(model_observe_uses)\n#define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_observe_uses(addr, size)\n#define __itt_model_observe_uses_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_observe_uses_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_CLEAR_USES support\n *\n * Clear the special handling of a piece of storage related to induction,\n * reduction or observe_uses\n */\nvoid ITTAPI __itt_model_clear_uses(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr))\n#define __itt_model_clear_uses     ITTNOTIFY_VOID(model_clear_uses)\n#define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_clear_uses(addr)\n#define __itt_model_clear_uses_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_clear_uses_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support\n *\n * disable_push/disable_pop push and pop disabling based on a parameter.\n * Disabling observations stops processing of memory references during\n * correctness modeling, and all annotations that occur in the disabled\n * region.  This allows description of code that is expected to be handled\n * specially during conversion to parallelism or that is not recognized\n * by tools (e.g. some kinds of synchronization operations.)\n * This mechanism causes all annotations in the disabled region, other\n * than disable_push and disable_pop, to be ignored.  (For example, this\n * might validly be used to disable an entire parallel site and the contained\n * tasks and locking in it for data collection purposes.)\n * The disable for collection is a more expensive operation, but reduces\n * collector overhead significantly.  This applies to BOTH correctness data\n * collection and performance data collection.  For example, a site\n * containing a task might only enable data collection for the first 10\n * iterations.  Both performance and correctness data should reflect this,\n * and the program should run as close to full speed as possible when\n * collection is disabled.\n */\nvoid ITTAPI __itt_model_disable_push(__itt_model_disable x);\nvoid ITTAPI __itt_model_disable_pop(void);\nvoid ITTAPI __itt_model_aggregate_task(size_t x);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x))\nITT_STUBV(ITTAPI, void, model_disable_pop,  (void))\nITT_STUBV(ITTAPI, void, model_aggregate_task, (size_t x))\n#define __itt_model_disable_push     ITTNOTIFY_VOID(model_disable_push)\n#define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push)\n#define __itt_model_disable_pop      ITTNOTIFY_VOID(model_disable_pop)\n#define __itt_model_disable_pop_ptr  ITTNOTIFY_NAME(model_disable_pop)\n#define __itt_model_aggregate_task      ITTNOTIFY_VOID(model_aggregate_task)\n#define __itt_model_aggregate_task_ptr  ITTNOTIFY_NAME(model_aggregate_task)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_model_disable_push(x)\n#define __itt_model_disable_push_ptr 0\n#define __itt_model_disable_pop()\n#define __itt_model_disable_pop_ptr 0\n#define __itt_model_aggregate_task(x)\n#define __itt_model_aggregate_task_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_model_disable_push_ptr 0\n#define __itt_model_disable_pop_ptr 0\n#define __itt_model_aggregate_task_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} model group */\n\n/**\n * @defgroup heap Heap\n * @ingroup public\n * Heap group\n * @{\n */\n\ntypedef void* __itt_heap_function;\n\n/**\n * @brief Create an identification for heap function\n * @return non-zero identifier or NULL\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_heap_function ITTAPI __itt_heap_function_createA(const char*    name, const char*    domain);\n__itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_heap_function_create     __itt_heap_function_createW\n#  define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr\n#else\n#  define __itt_heap_function_create     __itt_heap_function_createA\n#  define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char*    name, const char*    domain))\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_create,  (const char*    name, const char*    domain))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_heap_function_createA     ITTNOTIFY_DATA(heap_function_createA)\n#define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA)\n#define __itt_heap_function_createW     ITTNOTIFY_DATA(heap_function_createW)\n#define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_heap_function_create      ITTNOTIFY_DATA(heap_function_create)\n#define __itt_heap_function_create_ptr  ITTNOTIFY_NAME(heap_function_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_heap_function_createA(name, domain) (__itt_heap_function)0\n#define __itt_heap_function_createA_ptr 0\n#define __itt_heap_function_createW(name, domain) (__itt_heap_function)0\n#define __itt_heap_function_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_heap_function_create(name, domain)  (__itt_heap_function)0\n#define __itt_heap_function_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_heap_function_createA_ptr 0\n#define __itt_heap_function_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_heap_function_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an allocation begin occurrence.\n */\nvoid ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized))\n#define __itt_heap_allocate_begin     ITTNOTIFY_VOID(heap_allocate_begin)\n#define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_allocate_begin(h, size, initialized)\n#define __itt_heap_allocate_begin_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_allocate_begin_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an allocation end occurrence.\n */\nvoid ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void** addr, size_t size, int initialized);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, size_t size, int initialized))\n#define __itt_heap_allocate_end     ITTNOTIFY_VOID(heap_allocate_end)\n#define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_allocate_end(h, addr, size, initialized)\n#define __itt_heap_allocate_end_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_allocate_end_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an free begin occurrence.\n */\nvoid ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr))\n#define __itt_heap_free_begin     ITTNOTIFY_VOID(heap_free_begin)\n#define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_free_begin(h, addr)\n#define __itt_heap_free_begin_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_free_begin_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an free end occurrence.\n */\nvoid ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr))\n#define __itt_heap_free_end     ITTNOTIFY_VOID(heap_free_end)\n#define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_free_end(h, addr)\n#define __itt_heap_free_end_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_free_end_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an reallocation begin occurrence.\n */\nvoid ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized))\n#define __itt_heap_reallocate_begin     ITTNOTIFY_VOID(heap_reallocate_begin)\n#define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_reallocate_begin(h, addr, new_size, initialized)\n#define __itt_heap_reallocate_begin_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_reallocate_begin_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an reallocation end occurrence.\n */\nvoid ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized))\n#define __itt_heap_reallocate_end     ITTNOTIFY_VOID(heap_reallocate_end)\n#define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized)\n#define __itt_heap_reallocate_end_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_reallocate_end_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief internal access begin */\nvoid ITTAPI __itt_heap_internal_access_begin(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_internal_access_begin,  (void))\n#define __itt_heap_internal_access_begin      ITTNOTIFY_VOID(heap_internal_access_begin)\n#define __itt_heap_internal_access_begin_ptr  ITTNOTIFY_NAME(heap_internal_access_begin)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_internal_access_begin()\n#define __itt_heap_internal_access_begin_ptr  0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_internal_access_begin_ptr  0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief internal access end */\nvoid ITTAPI __itt_heap_internal_access_end(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_internal_access_end, (void))\n#define __itt_heap_internal_access_end     ITTNOTIFY_VOID(heap_internal_access_end)\n#define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_internal_access_end()\n#define __itt_heap_internal_access_end_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_internal_access_end_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief record memory growth begin */\nvoid ITTAPI __itt_heap_record_memory_growth_begin(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin,  (void))\n#define __itt_heap_record_memory_growth_begin      ITTNOTIFY_VOID(heap_record_memory_growth_begin)\n#define __itt_heap_record_memory_growth_begin_ptr  ITTNOTIFY_NAME(heap_record_memory_growth_begin)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_record_memory_growth_begin()\n#define __itt_heap_record_memory_growth_begin_ptr  0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_record_memory_growth_begin_ptr  0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief record memory growth end */\nvoid ITTAPI __itt_heap_record_memory_growth_end(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void))\n#define __itt_heap_record_memory_growth_end     ITTNOTIFY_VOID(heap_record_memory_growth_end)\n#define __itt_heap_record_memory_growth_end_ptr ITTNOTIFY_NAME(heap_record_memory_growth_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_record_memory_growth_end()\n#define __itt_heap_record_memory_growth_end_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_record_memory_growth_end_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Specify the type of heap detection/reporting to modify.\n */\n/**\n * @hideinitializer \n * @brief Report on memory leaks.\n */\n#define __itt_heap_leaks 0x00000001\n\n/**\n * @hideinitializer \n * @brief Report on memory growth.\n */\n#define __itt_heap_growth 0x00000002\n\n\n/** @brief heap reset detection */\nvoid ITTAPI __itt_heap_reset_detection(unsigned int reset_mask);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_reset_detection,  (unsigned int reset_mask))\n#define __itt_heap_reset_detection      ITTNOTIFY_VOID(heap_reset_detection)\n#define __itt_heap_reset_detection_ptr  ITTNOTIFY_NAME(heap_reset_detection)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_reset_detection()\n#define __itt_heap_reset_detection_ptr  0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_reset_detection_ptr  0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief report */\nvoid ITTAPI __itt_heap_record(unsigned int record_mask);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, heap_record, (unsigned int record_mask))\n#define __itt_heap_record     ITTNOTIFY_VOID(heap_record)\n#define __itt_heap_record_ptr ITTNOTIFY_NAME(heap_record)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_heap_record()\n#define __itt_heap_record_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_heap_record_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @} heap group */\n/** @endcond */\n/* ========================================================================== */\n\n/**\n * @defgroup domains Domains\n * @ingroup public\n * Domains group\n * @{\n */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_domain\n{\n    volatile int flags; /*!< Zero if disabled, non-zero if enabled. The meaning of different non-zero values is reserved to the runtime */\n    const char* nameA;  /*!< Copy of original name in ASCII. */\n#if defined(UNICODE) || defined(_UNICODE)\n    const wchar_t* nameW; /*!< Copy of original name in UNICODE. */\n#else  /* UNICODE || _UNICODE */\n    void* nameW;\n#endif /* UNICODE || _UNICODE */\n    int   extra1; /*!< Reserved to the runtime */\n    void* extra2; /*!< Reserved to the runtime */\n    struct ___itt_domain* next;\n} __itt_domain;\n\n#pragma pack(pop)\n/** @endcond */\n\n/**\n * @ingroup domains\n * @brief Create a domain.\n * Create domain using some domain name: the URI naming style is recommended.\n * Because the set of domains is expected to be static over the application's \n * execution time, there is no mechanism to destroy a domain.\n * Any domain can be accessed by any thread in the process, regardless of\n * which thread created the domain. This call is thread-safe.\n * @param[in] name name of domain\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_domain* ITTAPI __itt_domain_createA(const char    *name);\n__itt_domain* ITTAPI __itt_domain_createW(const wchar_t *name);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_domain_create     __itt_domain_createW\n#  define __itt_domain_create_ptr __itt_domain_createW_ptr\n#else /* UNICODE */\n#  define __itt_domain_create     __itt_domain_createA\n#  define __itt_domain_create_ptr __itt_domain_createA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_domain* ITTAPI __itt_domain_create(const char *name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char    *name))\nITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_domain*, domain_create,  (const char    *name))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_domain_createA     ITTNOTIFY_DATA(domain_createA)\n#define __itt_domain_createA_ptr ITTNOTIFY_NAME(domain_createA)\n#define __itt_domain_createW     ITTNOTIFY_DATA(domain_createW)\n#define __itt_domain_createW_ptr ITTNOTIFY_NAME(domain_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_domain_create     ITTNOTIFY_DATA(domain_create)\n#define __itt_domain_create_ptr ITTNOTIFY_NAME(domain_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_domain_createA(name) (__itt_domain*)0\n#define __itt_domain_createA_ptr 0\n#define __itt_domain_createW(name) (__itt_domain*)0\n#define __itt_domain_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_domain_create(name)  (__itt_domain*)0\n#define __itt_domain_create_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_domain_createA_ptr 0\n#define __itt_domain_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_domain_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} domains group */\n\n/**\n * @defgroup ids IDs\n * @ingroup public\n * IDs group\n * @{\n */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_id\n{\n    unsigned long long d1, d2, d3;\n} __itt_id;\n\n#pragma pack(pop)\n/** @endcond */\n\nstatic const __itt_id __itt_null = { 0, 0, 0 };\n\n/**\n * @ingroup ids\n * @brief A convenience function is provided to create an ID without domain control.\n * @brief This is a convenience function to initialize an __itt_id structure. This function\n * does not affect the trace collector runtime in any way. After you make the ID with this\n * function, you still must create it with the __itt_id_create function before using the ID\n * to identify a named entity.\n * @param[in] addr The address of object; high QWORD of the ID value.\n * @param[in] extra The extra data to unique identify object; low QWORD of the ID value.\n */\n\nITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) ITT_INLINE_ATTRIBUTE;\nITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra)\n{\n    __itt_id id = __itt_null;\n    id.d1 = (unsigned long long)((uintptr_t)addr);\n    id.d2 = (unsigned long long)extra;\n    id.d3 = (unsigned long long)0; /* Reserved. Must be zero */\n    return id;\n}\n\n/**\n * @ingroup ids\n * @brief Create an instance of identifier.\n * This establishes the beginning of the lifetime of an instance of\n * the given ID in the trace. Once this lifetime starts, the ID\n * can be used to tag named entity instances in calls such as\n * __itt_task_begin, and to specify relationships among\n * identified named entity instances, using the \\ref relations APIs.\n * Instance IDs are not domain specific!\n * @param[in] domain The domain controlling the execution of this call.\n * @param[in] id The ID to create.\n */\nvoid ITTAPI __itt_id_create(const __itt_domain *domain, __itt_id id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id))\n#define __itt_id_create(d,x) ITTNOTIFY_VOID_D1(id_create,d,x)\n#define __itt_id_create_ptr  ITTNOTIFY_NAME(id_create)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_id_create(domain,id)\n#define __itt_id_create_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_id_create_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup ids\n * @brief Destroy an instance of identifier.\n * This ends the lifetime of the current instance of the given ID value in the trace.\n * Any relationships that are established after this lifetime ends are invalid.\n * This call must be performed before the given ID value can be reused for a different \n * named entity instance.\n * @param[in] domain The domain controlling the execution of this call.\n * @param[in] id The ID to destroy.\n */\nvoid ITTAPI __itt_id_destroy(const __itt_domain *domain, __itt_id id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id))\n#define __itt_id_destroy(d,x) ITTNOTIFY_VOID_D1(id_destroy,d,x)\n#define __itt_id_destroy_ptr  ITTNOTIFY_NAME(id_destroy)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_id_destroy(domain,id)\n#define __itt_id_destroy_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_id_destroy_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} ids group */\n\n/**\n * @defgroup handless String Handles\n * @ingroup public\n * String Handles group\n * @{\n */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_string_handle\n{\n    const char* strA; /*!< Copy of original string in ASCII. */\n#if defined(UNICODE) || defined(_UNICODE)\n    const wchar_t* strW; /*!< Copy of original string in UNICODE. */\n#else  /* UNICODE || _UNICODE */\n    void* strW;\n#endif /* UNICODE || _UNICODE */\n    int   extra1; /*!< Reserved. Must be zero   */\n    void* extra2; /*!< Reserved. Must be zero   */\n    struct ___itt_string_handle* next;\n} __itt_string_handle;\n\n#pragma pack(pop)\n/** @endcond */\n\n/**\n * @ingroup handles\n * @brief Create a string handle.\n * Create and return handle value that can be associated with a string.\n * Consecutive calls to __itt_string_handle_create with the same name\n * return the same value. Because the set of string handles is expected to remain\n * static during the application's execution time, there is no mechanism to destroy a string handle.\n * Any string handle can be accessed by any thread in the process, regardless of which thread created\n * the string handle. This call is thread-safe.\n * @param[in] name The input string\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_string_handle* ITTAPI __itt_string_handle_createA(const char    *name);\n__itt_string_handle* ITTAPI __itt_string_handle_createW(const wchar_t *name);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_string_handle_create     __itt_string_handle_createW\n#  define __itt_string_handle_create_ptr __itt_string_handle_createW_ptr\n#else /* UNICODE */\n#  define __itt_string_handle_create     __itt_string_handle_createA\n#  define __itt_string_handle_create_ptr __itt_string_handle_createA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_string_handle* ITTAPI __itt_string_handle_create(const char *name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char    *name))\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create,  (const char    *name))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_string_handle_createA     ITTNOTIFY_DATA(string_handle_createA)\n#define __itt_string_handle_createA_ptr ITTNOTIFY_NAME(string_handle_createA)\n#define __itt_string_handle_createW     ITTNOTIFY_DATA(string_handle_createW)\n#define __itt_string_handle_createW_ptr ITTNOTIFY_NAME(string_handle_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_string_handle_create     ITTNOTIFY_DATA(string_handle_create)\n#define __itt_string_handle_create_ptr ITTNOTIFY_NAME(string_handle_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_string_handle_createA(name) (__itt_string_handle*)0\n#define __itt_string_handle_createA_ptr 0\n#define __itt_string_handle_createW(name) (__itt_string_handle*)0\n#define __itt_string_handle_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_string_handle_create(name)  (__itt_string_handle*)0\n#define __itt_string_handle_create_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_string_handle_createA_ptr 0\n#define __itt_string_handle_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_string_handle_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} handles group */\n\n/** @cond exclude_from_documentation */\ntypedef unsigned long long __itt_timestamp;\n/** @endcond */\n\nstatic const __itt_timestamp __itt_timestamp_none = (__itt_timestamp)-1LL;\n\n/** @cond exclude_from_gpa_documentation */\n\n/**\n * @ingroup timestamps\n * @brief Return timestamp corresponding to the current moment.\n * This returns the timestamp in the format that is the most relevant for the current\n * host or platform (RDTSC, QPC, and others). You can use the \"<\" operator to\n * compare __itt_timestamp values.\n */\n__itt_timestamp ITTAPI __itt_get_timestamp(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void))\n#define __itt_get_timestamp      ITTNOTIFY_DATA(get_timestamp)\n#define __itt_get_timestamp_ptr  ITTNOTIFY_NAME(get_timestamp)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_get_timestamp()\n#define __itt_get_timestamp_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_get_timestamp_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} timestamps */\n/** @endcond */\n\n/** @cond exclude_from_gpa_documentation */\n\n/**\n * @defgroup regions Regions\n * @ingroup public\n * Regions group\n * @{\n */\n/**\n * @ingroup regions\n * @brief Begin of region instance.\n * Successive calls to __itt_region_begin with the same ID are ignored\n * until a call to __itt_region_end with the same ID\n * @param[in] domain The domain for this region instance\n * @param[in] id The instance ID for this region instance. Must not be __itt_null\n * @param[in] parentid The instance ID for the parent of this region instance, or __itt_null\n * @param[in] name The name of this region\n */\nvoid ITTAPI __itt_region_begin(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name);\n\n/**\n * @ingroup regions\n * @brief End of region instance.\n * The first call to __itt_region_end with a given ID ends the\n * region. Successive calls with the same ID are ignored, as are\n * calls that do not have a matching __itt_region_begin call.\n * @param[in] domain The domain for this region instance\n * @param[in] id The instance ID for this region instance\n */\nvoid ITTAPI __itt_region_end(const __itt_domain *domain, __itt_id id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name))\nITT_STUBV(ITTAPI, void, region_end,   (const __itt_domain *domain, __itt_id id))\n#define __itt_region_begin(d,x,y,z) ITTNOTIFY_VOID_D3(region_begin,d,x,y,z)\n#define __itt_region_begin_ptr      ITTNOTIFY_NAME(region_begin)\n#define __itt_region_end(d,x)       ITTNOTIFY_VOID_D1(region_end,d,x)\n#define __itt_region_end_ptr        ITTNOTIFY_NAME(region_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_region_begin(d,x,y,z)\n#define __itt_region_begin_ptr 0\n#define __itt_region_end(d,x)\n#define __itt_region_end_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_region_begin_ptr 0\n#define __itt_region_end_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} regions group */\n\n/**\n * @defgroup frames Frames\n * @ingroup public\n * Frames are similar to regions, but are intended to be easier to use and to implement.\n * In particular:\n * - Frames always represent periods of elapsed time\n * - By default, frames have no nesting relationships\n * @{\n */\n\n/**\n * @ingroup frames\n * @brief Begin a frame instance.\n * Successive calls to __itt_frame_begin with the\n * same ID are ignored until a call to __itt_frame_end with the same ID.\n * @param[in] domain The domain for this frame instance\n * @param[in] id The instance ID for this frame instance or NULL\n */\nvoid ITTAPI __itt_frame_begin_v3(const __itt_domain *domain, __itt_id *id);\n\n/**\n * @ingroup frames\n * @brief End a frame instance.\n * The first call to __itt_frame_end with a given ID\n * ends the frame. Successive calls with the same ID are ignored, as are\n * calls that do not have a matching __itt_frame_begin call.\n * @param[in] domain The domain for this frame instance\n * @param[in] id The instance ID for this frame instance or NULL for current\n */\nvoid ITTAPI __itt_frame_end_v3(const __itt_domain *domain, __itt_id *id);\n\n/**\n * @ingroup frames\n * @brief Submits a frame instance.\n * Successive calls to __itt_frame_begin or __itt_frame_submit with the\n * same ID are ignored until a call to __itt_frame_end or __itt_frame_submit\n * with the same ID.\n * Passing special __itt_timestamp_none value as \"end\" argument means\n * take the current timestamp as the end timestamp.\n * @param[in] domain The domain for this frame instance\n * @param[in] id The instance ID for this frame instance or NULL\n * @param[in] begin Timestamp of the beggining of the frame\n * @param[in] end Timestamp of the end of the frame\n */\nvoid ITTAPI __itt_frame_submit_v3(const __itt_domain *domain, __itt_id *id,\n    __itt_timestamp begin, __itt_timestamp end);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, frame_begin_v3,  (const __itt_domain *domain, __itt_id *id))\nITT_STUBV(ITTAPI, void, frame_end_v3,    (const __itt_domain *domain, __itt_id *id))\nITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end))\n#define __itt_frame_begin_v3(d,x)      ITTNOTIFY_VOID_D1(frame_begin_v3,d,x)\n#define __itt_frame_begin_v3_ptr       ITTNOTIFY_NAME(frame_begin_v3)\n#define __itt_frame_end_v3(d,x)        ITTNOTIFY_VOID_D1(frame_end_v3,d,x)\n#define __itt_frame_end_v3_ptr         ITTNOTIFY_NAME(frame_end_v3)\n#define __itt_frame_submit_v3(d,x,b,e) ITTNOTIFY_VOID_D3(frame_submit_v3,d,x,b,e)\n#define __itt_frame_submit_v3_ptr      ITTNOTIFY_NAME(frame_submit_v3)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_frame_begin_v3(domain,id)\n#define __itt_frame_begin_v3_ptr 0\n#define __itt_frame_end_v3(domain,id)\n#define __itt_frame_end_v3_ptr   0\n#define __itt_frame_submit_v3(domain,id,begin,end)\n#define __itt_frame_submit_v3_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_frame_begin_v3_ptr 0\n#define __itt_frame_end_v3_ptr   0\n#define __itt_frame_submit_v3_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} frames group */\n/** @endcond */\n\n/**\n * @defgroup taskgroup Task Group\n * @ingroup public\n * Task Group\n * @{\n */\n/**\n * @ingroup task_groups\n * @brief Denotes a task_group instance.\n * Successive calls to __itt_task_group with the same ID are ignored.\n * @param[in] domain The domain for this task_group instance\n * @param[in] id The instance ID for this task_group instance. Must not be __itt_null.\n * @param[in] parentid The instance ID for the parent of this task_group instance, or __itt_null.\n * @param[in] name The name of this task_group\n */\nvoid ITTAPI __itt_task_group(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, task_group, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name))\n#define __itt_task_group(d,x,y,z) ITTNOTIFY_VOID_D3(task_group,d,x,y,z)\n#define __itt_task_group_ptr      ITTNOTIFY_NAME(task_group)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_task_group(d,x,y,z)\n#define __itt_task_group_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_task_group_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} taskgroup group */\n\n/**\n * @defgroup tasks Tasks\n * @ingroup public\n * A task instance represents a piece of work performed by a particular\n * thread for a period of time. A call to __itt_task_begin creates a\n * task instance. This becomes the current instance for that task on that\n * thread. A following call to __itt_task_end on the same thread ends the\n * instance. There may be multiple simultaneous instances of tasks with the\n * same name on different threads. If an ID is specified, the task instance\n * receives that ID. Nested tasks are allowed.\n *\n * Note: The task is defined by the bracketing of __itt_task_begin and\n * __itt_task_end on the same thread. If some scheduling mechanism causes\n * task switching (the thread executes a different user task) or task\n * switching (the user task switches to a different thread) then this breaks\n * the notion of  current instance. Additional API calls are required to\n * deal with that possibility.\n * @{\n */\n\n/**\n * @ingroup tasks\n * @brief Begin a task instance.\n * @param[in] domain The domain for this task\n * @param[in] taskid The instance ID for this task instance, or __itt_null\n * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null\n * @param[in] name The name of this task\n */\nvoid ITTAPI __itt_task_begin(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name);\n\n/**\n * @ingroup tasks\n * @brief Begin a task instance.\n * @param[in] domain The domain for this task\n * @param[in] taskid The identifier for this task instance (may be 0)\n * @param[in] parentid The parent of this task (may be 0)\n * @param[in] fn The pointer to the function you are tracing\n */\nvoid ITTAPI __itt_task_begin_fn(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, void* fn);\n\n/**\n * @ingroup tasks\n * @brief End the current task instance.\n * @param[in] domain The domain for this task\n */\nvoid ITTAPI __itt_task_end(const __itt_domain *domain);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, task_begin,    (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name))\nITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parentid, void* fn))\nITT_STUBV(ITTAPI, void, task_end,      (const __itt_domain *domain))\n#define __itt_task_begin(d,x,y,z)    ITTNOTIFY_VOID_D3(task_begin,d,x,y,z)\n#define __itt_task_begin_ptr         ITTNOTIFY_NAME(task_begin)\n#define __itt_task_begin_fn(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_fn,d,x,y,z)\n#define __itt_task_begin_fn_ptr      ITTNOTIFY_NAME(task_begin_fn)\n#define __itt_task_end(d)            ITTNOTIFY_VOID_D0(task_end,d)\n#define __itt_task_end_ptr           ITTNOTIFY_NAME(task_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_task_begin(domain,id,parentid,name)\n#define __itt_task_begin_ptr    0\n#define __itt_task_begin_fn(domain,id,parentid,fn)\n#define __itt_task_begin_fn_ptr 0\n#define __itt_task_end(domain)\n#define __itt_task_end_ptr      0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_task_begin_ptr    0\n#define __itt_task_begin_fn_ptr 0\n#define __itt_task_end_ptr      0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} tasks group */\n\n/**\n * @defgroup counters Counters\n * @ingroup public\n * Counters are user-defined objects with a monotonically increasing\n * value. Counter values are 64-bit unsigned integers. Counter values\n * are tracked per-thread. Counters have names that can be displayed in\n * the tools.\n * @{\n */\n\n/**\n * @ingroup counters\n * @brief Increment a counter by one.\n * The first call with a given name creates a counter by that name and sets its\n * value to zero on every thread. Successive calls increment the counter value\n * on the thread on which the call is issued.\n * @param[in] domain The domain controlling the call. Counter names are not domain specific.\n *            The domain argument is used only to enable or disable the API calls.\n * @param[in] name The name of the counter\n */\nvoid ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name);\n\n/**\n * @ingroup counters\n * @brief Increment a counter by the value specified in delta.\n * @param[in] domain The domain controlling the call. Counter names are not domain specific.\n *            The domain argument is used only to enable or disable the API calls.\n * @param[in] name The name of the counter\n * @param[in] delta The amount by which to increment the counter\n */\nvoid ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, counter_inc_v3,       (const __itt_domain *domain, __itt_string_handle *name))\nITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta))\n#define __itt_counter_inc_v3(d,x)         ITTNOTIFY_VOID_D1(counter_inc_v3,d,x)\n#define __itt_counter_inc_v3_ptr          ITTNOTIFY_NAME(counter_inc_v3)\n#define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y)\n#define __itt_counter_inc_delta_v3_ptr    ITTNOTIFY_NAME(counter_inc_delta_v3)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_counter_inc_v3(domain,name)\n#define __itt_counter_inc_v3_ptr       0\n#define __itt_counter_inc_delta_v3(domain,name,delta)\n#define __itt_counter_inc_delta_v3_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_counter_inc_v3_ptr       0\n#define __itt_counter_inc_delta_v3_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} counters group */\n\n/**\n * @defgroup markers Markers\n * Markers represent a single discreet event in time. Markers have a scope,\n * described by an enumerated type __itt_scope. Markers are created by\n * the API call __itt_marker. A marker instance can be given an ID for use in\n * adding metadata.\n * @{\n */\n\n/**\n * @brief Describes the scope of an event object in the trace.\n */\ntypedef enum\n{\n    __itt_scope_unknown = 0,\n    __itt_scope_global,\n    __itt_scope_track_group,\n    __itt_scope_track,\n    __itt_scope_task,\n    __itt_scope_marker\n} __itt_scope;\n\n/** @cond exclude_from_documentation */\n#define __itt_marker_scope_unknown  __itt_scope_unknown\n#define __itt_marker_scope_global   __itt_scope_global\n#define __itt_marker_scope_process  __itt_scope_track_group\n#define __itt_marker_scope_thread   __itt_scope_track\n#define __itt_marker_scope_task     __itt_scope_task\n/** @endcond */\n\n/**\n * @ingroup markers\n * @brief Create a marker instance\n * @param[in] domain The domain for this marker\n * @param[in] id The instance ID for this marker or __itt_null\n * @param[in] name The name for this marker\n * @param[in] scope The scope for this marker\n */\nvoid ITTAPI __itt_marker(const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope))\n#define __itt_marker(d,x,y,z) ITTNOTIFY_VOID_D3(marker,d,x,y,z)\n#define __itt_marker_ptr      ITTNOTIFY_NAME(marker)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_marker(domain,id,name,scope)\n#define __itt_marker_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_marker_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} markers group */\n\n/**\n * @defgroup metadata Metadata\n * The metadata API is used to attach extra information to named\n * entities. Metadata can be attached to an identified named entity by ID,\n * or to the current entity (which is always a task).\n *\n * Conceptually metadata has a type (what kind of metadata), a key (the\n * name of the metadata), and a value (the actual data). The encoding of\n * the value depends on the type of the metadata.\n *\n * The type of metadata is specified by an enumerated type __itt_metdata_type.\n * @{\n */\n\n/**\n * @ingroup parameters\n * @brief describes the type of metadata\n */\ntypedef enum {\n    __itt_metadata_unknown = 0,\n    __itt_metadata_u64,     /**< Unsigned 64-bit integer */\n    __itt_metadata_s64,     /**< Signed 64-bit integer */\n    __itt_metadata_u32,     /**< Unsigned 32-bit integer */\n    __itt_metadata_s32,     /**< Signed 32-bit integer */\n    __itt_metadata_u16,     /**< Unsigned 16-bit integer */\n    __itt_metadata_s16,     /**< Signed 16-bit integer */\n    __itt_metadata_float,   /**< Signed 32-bit floating-point */\n    __itt_metadata_double   /**< SIgned 64-bit floating-point */\n} __itt_metadata_type;\n\n/**\n * @ingroup parameters\n * @brief Add metadata to an instance of a named entity.\n * @param[in] domain The domain controlling the call\n * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task\n * @param[in] key The name of the metadata\n * @param[in] type The type of the metadata\n * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added.\n * @param[in] data The metadata itself\n*/\nvoid ITTAPI __itt_metadata_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data))\n#define __itt_metadata_add(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add,d,x,y,z,a,b)\n#define __itt_metadata_add_ptr          ITTNOTIFY_NAME(metadata_add)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_metadata_add(d,x,y,z,a,b)\n#define __itt_metadata_add_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_metadata_add_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup parameters\n * @brief Add string metadata to an instance of a named entity.\n * @param[in] domain The domain controlling the call\n * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task\n * @param[in] key The name of the metadata\n * @param[in] data The metadata itself\n * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated \n*/\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length);\nvoid ITTAPI __itt_metadata_str_addW(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_metadata_str_add     __itt_metadata_str_addW\n#  define __itt_metadata_str_add_ptr __itt_metadata_str_addW_ptr\n#else /* UNICODE */\n#  define __itt_metadata_str_add     __itt_metadata_str_addA\n#  define __itt_metadata_str_add_ptr __itt_metadata_str_addA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_metadata_str_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length);\n#endif\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length))\nITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_addA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addA,d,x,y,z,a)\n#define __itt_metadata_str_addA_ptr        ITTNOTIFY_NAME(metadata_str_addA)\n#define __itt_metadata_str_addW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addW,d,x,y,z,a)\n#define __itt_metadata_str_addW_ptr        ITTNOTIFY_NAME(metadata_str_addW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add(d,x,y,z,a)  ITTNOTIFY_VOID_D4(metadata_str_add,d,x,y,z,a)\n#define __itt_metadata_str_add_ptr         ITTNOTIFY_NAME(metadata_str_add)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_addA(d,x,y,z,a) \n#define __itt_metadata_str_addA_ptr 0\n#define __itt_metadata_str_addW(d,x,y,z,a) \n#define __itt_metadata_str_addW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add(d,x,y,z,a)\n#define __itt_metadata_str_add_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_addA_ptr 0\n#define __itt_metadata_str_addW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup parameters\n * @brief Add metadata to an instance of a named entity.\n * @param[in] domain The domain controlling the call\n * @param[in] scope The scope of the instance to which the metadata is to be added\n\n * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task\n \n * @param[in] key The name of the metadata\n * @param[in] type The type of the metadata\n * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added.\n * @param[in] data The metadata itself\n*/\nvoid ITTAPI __itt_metadata_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data))\n#define __itt_metadata_add_with_scope(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add_with_scope,d,x,y,z,a,b)\n#define __itt_metadata_add_with_scope_ptr          ITTNOTIFY_NAME(metadata_add_with_scope)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_metadata_add_with_scope(d,x,y,z,a,b)\n#define __itt_metadata_add_with_scope_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_metadata_add_with_scope_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup parameters\n * @brief Add string metadata to an instance of a named entity.\n * @param[in] domain The domain controlling the call\n * @param[in] scope The scope of the instance to which the metadata is to be added\n\n * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task\n\n * @param[in] key The name of the metadata\n * @param[in] data The metadata itself\n * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated \n*/\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length);\nvoid ITTAPI __itt_metadata_str_add_with_scopeW(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_metadata_str_add_with_scope     __itt_metadata_str_add_with_scopeW\n#  define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeW_ptr\n#else /* UNICODE */\n#  define __itt_metadata_str_add_with_scope     __itt_metadata_str_add_with_scopeA\n#  define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_metadata_str_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length);\n#endif\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length))\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeA,d,x,y,z,a)\n#define __itt_metadata_str_add_with_scopeA_ptr        ITTNOTIFY_NAME(metadata_str_add_with_scopeA)\n#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeW,d,x,y,z,a)\n#define __itt_metadata_str_add_with_scopeW_ptr        ITTNOTIFY_NAME(metadata_str_add_with_scopeW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add_with_scope(d,x,y,z,a)  ITTNOTIFY_VOID_D4(metadata_str_add_with_scope,d,x,y,z,a)\n#define __itt_metadata_str_add_with_scope_ptr         ITTNOTIFY_NAME(metadata_str_add_with_scope)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) \n#define __itt_metadata_str_add_with_scopeA_ptr  0\n#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) \n#define __itt_metadata_str_add_with_scopeW_ptr  0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add_with_scope(d,x,y,z,a)\n#define __itt_metadata_str_add_with_scope_ptr   0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_metadata_str_add_with_scopeA_ptr  0\n#define __itt_metadata_str_add_with_scopeW_ptr  0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_metadata_str_add_with_scope_ptr   0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @} metadata group */\n\n/**\n * @defgroup relations Relations\n * Instances of named entities can be explicitly associated with other\n * instances using instance IDs and the relationship API calls.\n *\n * @{\n */\n\n/**\n * @ingroup relations\n * @brief The kind of relation between two instances is specified by the enumerated type __itt_relation.\n * Relations between instances can be added with an API call. The relation\n * API uses instance IDs. Relations can be added before or after the actual\n * instances are created and persist independently of the instances. This\n * is the motivation for having different lifetimes for instance IDs and\n * the actual instances.\n */\ntypedef enum\n{\n    __itt_relation_is_unknown = 0,\n    __itt_relation_is_dependent_on,         /**< \"A is dependent on B\" means that A cannot start until B completes */\n    __itt_relation_is_sibling_of,           /**< \"A is sibling of B\" means that A and B were created as a group */\n    __itt_relation_is_parent_of,            /**< \"A is parent of B\" means that A created B */\n    __itt_relation_is_continuation_of,      /**< \"A is continuation of B\" means that A assumes the dependencies of B */\n    __itt_relation_is_child_of,             /**< \"A is child of B\" means that A was created by B (inverse of is_parent_of) */\n    __itt_relation_is_continued_by,         /**< \"A is continued by B\" means that B assumes the dependencies of A (inverse of is_continuation_of) */\n    __itt_relation_is_predecessor_to        /**< \"A is predecessor to B\" means that B cannot start until A completes (inverse of is_dependent_on) */\n} __itt_relation;\n\n/**\n * @ingroup relations\n * @brief Add a relation to the current task instance.\n * The current task instance is the head of the relation.\n * @param[in] domain The domain controlling this call\n * @param[in] relation The kind of relation\n * @param[in] tail The ID for the tail of the relation\n */\nvoid ITTAPI __itt_relation_add_to_current(const __itt_domain *domain, __itt_relation relation, __itt_id tail);\n\n/**\n * @ingroup relations\n * @brief Add a relation between two instance identifiers.\n * @param[in] domain The domain controlling this call\n * @param[in] head The ID for the head of the relation\n * @param[in] relation The kind of relation\n * @param[in] tail The ID for the tail of the relation\n */\nvoid ITTAPI __itt_relation_add(const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail))\nITT_STUBV(ITTAPI, void, relation_add,            (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail))\n#define __itt_relation_add_to_current(d,x,y) ITTNOTIFY_VOID_D2(relation_add_to_current,d,x,y)\n#define __itt_relation_add_to_current_ptr    ITTNOTIFY_NAME(relation_add_to_current)\n#define __itt_relation_add(d,x,y,z)          ITTNOTIFY_VOID_D3(relation_add,d,x,y,z)\n#define __itt_relation_add_ptr               ITTNOTIFY_NAME(relation_add)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_relation_add_to_current(d,x,y)\n#define __itt_relation_add_to_current_ptr 0\n#define __itt_relation_add(d,x,y,z)\n#define __itt_relation_add_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_relation_add_to_current_ptr 0\n#define __itt_relation_add_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} relations group */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_clock_info\n{\n    unsigned long long clock_freq; /*!< Clock domain frequency */\n    unsigned long long clock_base; /*!< Clock domain base timestamp */\n} __itt_clock_info;\n\n#pragma pack(pop)\n/** @endcond */\n\n/** @cond exclude_from_documentation */\ntypedef void (ITTAPI *__itt_get_clock_info_fn)(__itt_clock_info* clock_info, void* data);\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_clock_domain\n{\n    __itt_clock_info info;      /*!< Most recent clock domain info */\n    __itt_get_clock_info_fn fn; /*!< Callback function pointer */\n    void* fn_data;              /*!< Input argument for the callback function */\n    int   extra1;               /*!< Reserved. Must be zero */\n    void* extra2;               /*!< Reserved. Must be zero */\n    struct ___itt_clock_domain* next;\n} __itt_clock_domain;\n\n#pragma pack(pop)\n/** @endcond */\n\n/**\n * @ingroup clockdomains\n * @brief Create a clock domain.\n * Certain applications require the capability to trace their application using\n * a clock domain different than the CPU, for instance the instrumentation of events\n * that occur on a GPU.\n * Because the set of domains is expected to be static over the application's execution time,\n * there is no mechanism to destroy a domain.\n * Any domain can be accessed by any thread in the process, regardless of which thread created\n * the domain. This call is thread-safe.\n * @param[in] fn A pointer to a callback function which retrieves alternative CPU timestamps\n * @param[in] fn_data Argument for a callback function; may be NULL\n */\n__itt_clock_domain* ITTAPI __itt_clock_domain_create(__itt_get_clock_info_fn fn, void* fn_data);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data))\n#define __itt_clock_domain_create     ITTNOTIFY_DATA(clock_domain_create)\n#define __itt_clock_domain_create_ptr ITTNOTIFY_NAME(clock_domain_create)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_clock_domain_create(fn,fn_data) (__itt_clock_domain*)0\n#define __itt_clock_domain_create_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_clock_domain_create_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup clockdomains\n * @brief Recalculate clock domains frequences and clock base timestamps.\n */\nvoid ITTAPI __itt_clock_domain_reset(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, clock_domain_reset, (void))\n#define __itt_clock_domain_reset     ITTNOTIFY_VOID(clock_domain_reset)\n#define __itt_clock_domain_reset_ptr ITTNOTIFY_NAME(clock_domain_reset)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_clock_domain_reset()\n#define __itt_clock_domain_reset_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_clock_domain_reset_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup clockdomain\n * @brief Create an instance of identifier. This establishes the beginning of the lifetime of\n * an instance of the given ID in the trace. Once this lifetime starts, the ID can be used to\n * tag named entity instances in calls such as __itt_task_begin, and to specify relationships among\n * identified named entity instances, using the \\ref relations APIs.\n * @param[in] domain The domain controlling the execution of this call.\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] id The ID to create.\n */\nvoid ITTAPI __itt_id_create_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id);\n\n/**\n * @ingroup clockdomain\n * @brief Destroy an instance of identifier. This ends the lifetime of the current instance of the\n * given ID value in the trace. Any relationships that are established after this lifetime ends are\n * invalid. This call must be performed before the given ID value can be reused for a different\n * named entity instance.\n * @param[in] domain The domain controlling the execution of this call.\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] id The ID to destroy.\n */\nvoid ITTAPI __itt_id_destroy_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, id_create_ex,  (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id))\nITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id))\n#define __itt_id_create_ex(d,x,y,z)  ITTNOTIFY_VOID_D3(id_create_ex,d,x,y,z)\n#define __itt_id_create_ex_ptr       ITTNOTIFY_NAME(id_create_ex)\n#define __itt_id_destroy_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_destroy_ex,d,x,y,z)\n#define __itt_id_destroy_ex_ptr      ITTNOTIFY_NAME(id_destroy_ex)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_id_create_ex(domain,clock_domain,timestamp,id)\n#define __itt_id_create_ex_ptr    0\n#define __itt_id_destroy_ex(domain,clock_domain,timestamp,id)\n#define __itt_id_destroy_ex_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_id_create_ex_ptr    0\n#define __itt_id_destroy_ex_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup clockdomain\n * @brief Begin a task instance.\n * @param[in] domain The domain for this task\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] taskid The instance ID for this task instance, or __itt_null\n * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null\n * @param[in] name The name of this task\n */\nvoid ITTAPI __itt_task_begin_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name);\n\n/**\n * @ingroup clockdomain\n * @brief Begin a task instance.\n * @param[in] domain The domain for this task\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] taskid The identifier for this task instance, or __itt_null\n * @param[in] parentid The parent of this task, or __itt_null\n * @param[in] fn The pointer to the function you are tracing\n */\nvoid ITTAPI __itt_task_begin_fn_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, void* fn);\n\n/**\n * @ingroup clockdomain\n * @brief End the current task instance.\n * @param[in] domain The domain for this task\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n */\nvoid ITTAPI __itt_task_end_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, task_begin_ex,        (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name))\nITT_STUBV(ITTAPI, void, task_begin_fn_ex,     (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn))\nITT_STUBV(ITTAPI, void, task_end_ex,          (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp))\n#define __itt_task_begin_ex(d,x,y,z,a,b)      ITTNOTIFY_VOID_D5(task_begin_ex,d,x,y,z,a,b)\n#define __itt_task_begin_ex_ptr               ITTNOTIFY_NAME(task_begin_ex)\n#define __itt_task_begin_fn_ex(d,x,y,z,a,b)   ITTNOTIFY_VOID_D5(task_begin_fn_ex,d,x,y,z,a,b)\n#define __itt_task_begin_fn_ex_ptr            ITTNOTIFY_NAME(task_begin_fn_ex)\n#define __itt_task_end_ex(d,x,y)              ITTNOTIFY_VOID_D2(task_end_ex,d,x,y)\n#define __itt_task_end_ex_ptr                 ITTNOTIFY_NAME(task_end_ex)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_task_begin_ex(domain,clock_domain,timestamp,id,parentid,name)\n#define __itt_task_begin_ex_ptr          0\n#define __itt_task_begin_fn_ex(domain,clock_domain,timestamp,id,parentid,fn)\n#define __itt_task_begin_fn_ex_ptr       0\n#define __itt_task_end_ex(domain,clock_domain,timestamp)\n#define __itt_task_end_ex_ptr            0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_task_begin_ex_ptr          0\n#define __itt_task_begin_fn_ex_ptr       0\n#define __itt_task_end_ex_ptr            0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup markers\n * @brief Create a marker instance.\n * @param[in] domain The domain for this marker\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] id The instance ID for this marker, or __itt_null\n * @param[in] name The name for this marker\n * @param[in] scope The scope for this marker\n */\nvoid ITTAPI __itt_marker_ex(const __itt_domain *domain,  __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, marker_ex,    (const __itt_domain *domain,  __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope))\n#define __itt_marker_ex(d,x,y,z,a,b)    ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b)\n#define __itt_marker_ex_ptr             ITTNOTIFY_NAME(marker_ex)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope)\n#define __itt_marker_ex_ptr    0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_marker_ex_ptr    0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @ingroup clockdomain\n * @brief Add a relation to the current task instance.\n * The current task instance is the head of the relation.\n * @param[in] domain The domain controlling this call\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] relation The kind of relation\n * @param[in] tail The ID for the tail of the relation\n */\nvoid ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain,  __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail);\n\n/**\n * @ingroup clockdomain\n * @brief Add a relation between two instance identifiers.\n * @param[in] domain The domain controlling this call\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] head The ID for the head of the relation\n * @param[in] relation The kind of relation\n * @param[in] tail The ID for the tail of the relation\n */\nvoid ITTAPI __itt_relation_add_ex(const __itt_domain *domain,  __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail))\nITT_STUBV(ITTAPI, void, relation_add_ex,            (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail))\n#define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a)\n#define __itt_relation_add_to_current_ex_ptr        ITTNOTIFY_NAME(relation_add_to_current_ex)\n#define __itt_relation_add_ex(d,x,y,z,a,b)          ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b)\n#define __itt_relation_add_ex_ptr                   ITTNOTIFY_NAME(relation_add_ex)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail)\n#define __itt_relation_add_to_current_ex_ptr 0\n#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail)\n#define __itt_relation_add_ex_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_relation_add_to_current_ex_ptr 0\n#define __itt_relation_add_ex_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @cond exclude_from_documentation */\ntypedef enum ___itt_track_group_type\n{\n    __itt_track_group_type_normal = 0\n} __itt_track_group_type;\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_track_group\n{\n    __itt_string_handle* name;     /*!< Name of the track group */\n    struct ___itt_track* track;    /*!< List of child tracks    */\n    __itt_track_group_type tgtype; /*!< Type of the track group */\n    int   extra1;                  /*!< Reserved. Must be zero  */\n    void* extra2;                  /*!< Reserved. Must be zero  */\n    struct ___itt_track_group* next;\n} __itt_track_group;\n\n#pragma pack(pop)\n/** @endcond */\n\n/**\n * @brief Placeholder for custom track types. Currently, \"normal\" custom track\n * is the only available track type.\n */\ntypedef enum ___itt_track_type\n{\n    __itt_track_type_normal = 0\n#ifdef INTEL_ITTNOTIFY_API_PRIVATE\n    , __itt_track_type_queue\n#endif /* INTEL_ITTNOTIFY_API_PRIVATE */\n} __itt_track_type;\n\n/** @cond exclude_from_documentation */\n#pragma pack(push, 8)\n\ntypedef struct ___itt_track\n{\n    __itt_string_handle* name; /*!< Name of the track group */\n    __itt_track_group* group;  /*!< Parent group to a track */\n    __itt_track_type ttype;    /*!< Type of the track       */\n    int   extra1;              /*!< Reserved. Must be zero  */\n    void* extra2;              /*!< Reserved. Must be zero  */\n    struct ___itt_track* next;\n} __itt_track;\n\n#pragma pack(pop)\n/** @endcond */\n\n/**\n * @brief Create logical track group.\n */\n__itt_track_group* ITTAPI __itt_track_group_create(__itt_string_handle* name, __itt_track_group_type track_group_type);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type))\n#define __itt_track_group_create     ITTNOTIFY_DATA(track_group_create)\n#define __itt_track_group_create_ptr ITTNOTIFY_NAME(track_group_create)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_track_group_create(name)  (__itt_track_group*)0\n#define __itt_track_group_create_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_track_group_create_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Create logical track.\n */\n__itt_track* ITTAPI __itt_track_create(__itt_track_group* track_group, __itt_string_handle* name, __itt_track_type track_type);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_track*, track_create, (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type))\n#define __itt_track_create     ITTNOTIFY_DATA(track_create)\n#define __itt_track_create_ptr ITTNOTIFY_NAME(track_create)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_track_create(track_group,name,track_type)  (__itt_track*)0\n#define __itt_track_create_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_track_create_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Set the logical track.\n */\nvoid ITTAPI __itt_set_track(__itt_track* track);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, set_track, (__itt_track *track))\n#define __itt_set_track     ITTNOTIFY_VOID(set_track)\n#define __itt_set_track_ptr ITTNOTIFY_NAME(set_track)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_set_track(track)\n#define __itt_set_track_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_set_track_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/* ========================================================================== */\n/** @cond exclude_from_gpa_documentation */\n/**\n * @defgroup events Events\n * @ingroup public\n * Events group\n * @{\n */\n/** @brief user event type */\ntypedef int __itt_event;\n\n/**\n * @brief Create an event notification\n * @note name or namelen being null/name and namelen not matching, user event feature not enabled\n * @return non-zero event identifier upon success and __itt_err otherwise\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_event LIBITTAPI __itt_event_createA(const char    *name, int namelen);\n__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_event_create     __itt_event_createW\n#  define __itt_event_create_ptr __itt_event_createW_ptr\n#else\n#  define __itt_event_create     __itt_event_createA\n#  define __itt_event_create_ptr __itt_event_createA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen))\nITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char    *name, int namelen))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA     ITTNOTIFY_DATA(event_createA)\n#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA)\n#define __itt_event_createW     ITTNOTIFY_DATA(event_createW)\n#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create      ITTNOTIFY_DATA(event_create)\n#define __itt_event_create_ptr  ITTNOTIFY_NAME(event_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA(name, namelen) (__itt_event)0\n#define __itt_event_createA_ptr 0\n#define __itt_event_createW(name, namelen) (__itt_event)0\n#define __itt_event_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create(name, namelen)  (__itt_event)0\n#define __itt_event_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA_ptr 0\n#define __itt_event_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an event occurrence.\n * @return __itt_err upon failure (invalid event id/user event feature not enabled)\n */\nint LIBITTAPI __itt_event_start(__itt_event event);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(LIBITTAPI, int, event_start, (__itt_event event))\n#define __itt_event_start     ITTNOTIFY_DATA(event_start)\n#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_event_start(event) (int)0\n#define __itt_event_start_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_event_start_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an event end occurrence.\n * @note It is optional if events do not have durations.\n * @return __itt_err upon failure (invalid event id/user event feature not enabled)\n */\nint LIBITTAPI __itt_event_end(__itt_event event);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(LIBITTAPI, int, event_end, (__itt_event event))\n#define __itt_event_end     ITTNOTIFY_DATA(event_end)\n#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_event_end(event) (int)0\n#define __itt_event_end_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_event_end_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} events group */\n\n\n/**\n * @defgroup arrays Arrays Visualizer\n * @ingroup public\n * Visualize arrays\n * @{\n */\n\n/**\n * @enum __itt_av_data_type\n * @brief Defines types of arrays data (for C/C++ intrinsic types) \n */\ntypedef enum \n{\n    __itt_e_first = 0,\n    __itt_e_char = 0,  /* 1-byte integer */\n    __itt_e_uchar,     /* 1-byte unsigned integer */\n    __itt_e_int16,     /* 2-byte integer */\n    __itt_e_uint16,    /* 2-byte unsigned integer  */\n    __itt_e_int32,     /* 4-byte integer */\n    __itt_e_uint32,    /* 4-byte unsigned integer */\n    __itt_e_int64,     /* 8-byte integer */\n    __itt_e_uint64,    /* 8-byte unsigned integer */\n    __itt_e_float,     /* 4-byte floating */\n    __itt_e_double,    /* 8-byte floating */\n    __itt_e_last = __itt_e_double\n} __itt_av_data_type;\n\n/**\n * @brief Save an array data to a file.\n * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only).\n * @param[in] data - pointer to the array data\n * @param[in] rank - the rank of the array \n * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. \n * The size of dimensions must be equal to the rank\n * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types)\n * @param[in] filePath - the file path; the output format is defined by the file extension\n * @param[in] columnOrder - defines how the array is stored in the linear memory.\n * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C).\n */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nint ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder);\nint ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_av_save     __itt_av_saveW\n#  define __itt_av_save_ptr __itt_av_saveW_ptr\n#else /* UNICODE */\n#  define __itt_av_save     __itt_av_saveA\n#  define __itt_av_save_ptr __itt_av_saveA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nint ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder))\nITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int, av_save,  (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_av_saveA     ITTNOTIFY_DATA(av_saveA)\n#define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA)\n#define __itt_av_saveW     ITTNOTIFY_DATA(av_saveW)\n#define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_av_save     ITTNOTIFY_DATA(av_save)\n#define __itt_av_save_ptr ITTNOTIFY_NAME(av_save)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_av_saveA(name)\n#define __itt_av_saveA_ptr 0\n#define __itt_av_saveW(name)\n#define __itt_av_saveW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_av_save(name)\n#define __itt_av_save_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_av_saveA_ptr 0\n#define __itt_av_saveW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_av_save_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\nvoid ITTAPI __itt_enable_attach(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, enable_attach, (void))\n#define __itt_enable_attach     ITTNOTIFY_VOID(enable_attach)\n#define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_enable_attach()\n#define __itt_enable_attach_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_enable_attach_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @cond exclude_from_gpa_documentation */\n\n/** @} arrays group */\n\n/** @endcond */\n\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _ITTNOTIFY_H_ */\n\n#ifdef INTEL_ITTNOTIFY_API_PRIVATE\n\n#ifndef _ITTNOTIFY_PRIVATE_\n#define _ITTNOTIFY_PRIVATE_\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/**\n * @ingroup tasks\n * @brief Begin an overlapped task instance.\n * @param[in] domain The domain for this task.\n * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null.\n * @param[in] parentid The parent of this task, or __itt_null.\n * @param[in] name The name of this task.\n */\nvoid ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name);\n\n/**\n * @ingroup clockdomain\n * @brief Begin an overlapped task instance.\n * @param[in] domain The domain for this task\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null.\n * @param[in] parentid The parent of this task, or __itt_null.\n * @param[in] name The name of this task.\n */\nvoid ITTAPI __itt_task_begin_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name);\n\n/**\n * @ingroup tasks\n * @brief End an overlapped task instance.\n * @param[in] domain The domain for this task\n * @param[in] taskid Explicit ID of finished task\n */\nvoid ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid);\n\n/**\n * @ingroup clockdomain\n * @brief End an overlapped task instance.\n * @param[in] domain The domain for this task\n * @param[in] clock_domain The clock domain controlling the execution of this call.\n * @param[in] timestamp The user defined timestamp.\n * @param[in] taskid Explicit ID of finished task\n */\nvoid ITTAPI __itt_task_end_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, task_begin_overlapped,          (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name))\nITT_STUBV(ITTAPI, void, task_begin_overlapped_ex,       (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name))\nITT_STUBV(ITTAPI, void, task_end_overlapped,            (const __itt_domain *domain, __itt_id taskid))\nITT_STUBV(ITTAPI, void, task_end_overlapped_ex,         (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid))\n#define __itt_task_begin_overlapped(d,x,y,z)            ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z)\n#define __itt_task_begin_overlapped_ptr                 ITTNOTIFY_NAME(task_begin_overlapped)\n#define __itt_task_begin_overlapped_ex(d,x,y,z,a,b)     ITTNOTIFY_VOID_D5(task_begin_overlapped_ex,d,x,y,z,a,b)\n#define __itt_task_begin_overlapped_ex_ptr              ITTNOTIFY_NAME(task_begin_overlapped_ex)\n#define __itt_task_end_overlapped(d,x)                  ITTNOTIFY_VOID_D1(task_end_overlapped,d,x)\n#define __itt_task_end_overlapped_ptr                   ITTNOTIFY_NAME(task_end_overlapped)\n#define __itt_task_end_overlapped_ex(d,x,y,z)           ITTNOTIFY_VOID_D3(task_end_overlapped_ex,d,x,y,z)\n#define __itt_task_end_overlapped_ex_ptr                ITTNOTIFY_NAME(task_end_overlapped_ex)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_task_begin_overlapped(domain,taskid,parentid,name)\n#define __itt_task_begin_overlapped_ptr         0\n#define __itt_task_begin_overlapped_ex(domain,clock_domain,timestamp,taskid,parentid,name)\n#define __itt_task_begin_overlapped_ex_ptr      0\n#define __itt_task_end_overlapped(domain,taskid)\n#define __itt_task_end_overlapped_ptr           0\n#define __itt_task_end_overlapped_ex(domain,clock_domain,timestamp,taskid)\n#define __itt_task_end_overlapped_ex_ptr        0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_task_begin_overlapped_ptr         0\n#define __itt_task_begin_overlapped_ex_ptr      0\n#define __itt_task_end_overlapped_ptr           0\n#define __itt_task_end_overlapped_ex_ptr        0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @defgroup makrs_internal Marks\n * @ingroup internal\n * Marks group\n * @warning Internal API:\n *   - It is not shipped to outside of Intel\n *   - It is delivered to internal Intel teams using e-mail or SVN access only\n * @{\n */\n/** @brief user mark type */\ntypedef int __itt_mark_type;\n\n/**\n * @brief Creates a user mark type with the specified name using char or Unicode string.\n * @param[in] name - name of mark to create\n * @return Returns a handle to the mark type\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_mark_type ITTAPI __itt_mark_createA(const char    *name);\n__itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_mark_create     __itt_mark_createW\n#  define __itt_mark_create_ptr __itt_mark_createW_ptr\n#else /* UNICODE */\n#  define __itt_mark_create     __itt_mark_createA\n#  define __itt_mark_create_ptr __itt_mark_createA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_mark_type ITTAPI __itt_mark_create(const char *name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char    *name))\nITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_mark_type, mark_create,  (const char *name))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_createA     ITTNOTIFY_DATA(mark_createA)\n#define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA)\n#define __itt_mark_createW     ITTNOTIFY_DATA(mark_createW)\n#define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_create      ITTNOTIFY_DATA(mark_create)\n#define __itt_mark_create_ptr  ITTNOTIFY_NAME(mark_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_createA(name) (__itt_mark_type)0\n#define __itt_mark_createA_ptr 0\n#define __itt_mark_createW(name) (__itt_mark_type)0\n#define __itt_mark_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_create(name)  (__itt_mark_type)0\n#define __itt_mark_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_createA_ptr 0\n#define __itt_mark_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Creates a \"discrete\" user mark type of the specified type and an optional parameter using char or Unicode string.\n *\n * - The mark of \"discrete\" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign.\n * - The call is \"synchronous\" - function returns after mark is actually added to results.\n * - This function is useful, for example, to mark different phases of application\n *   (beginning of the next mark automatically meand end of current region).\n * - Can be used together with \"continuous\" marks (see below) at the same collection session\n * @param[in] mt - mark, created by __itt_mark_create(const char* name) function\n * @param[in] parameter - string parameter of mark\n * @return Returns zero value in case of success, non-zero value otherwise.\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nint ITTAPI __itt_markA(__itt_mark_type mt, const char    *parameter);\nint ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_mark     __itt_markW\n#  define __itt_mark_ptr __itt_markW_ptr\n#else /* UNICODE  */\n#  define __itt_mark     __itt_markA\n#  define __itt_mark_ptr __itt_markA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nint ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char    *parameter))\nITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int, mark,  (__itt_mark_type mt, const char *parameter))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_markA     ITTNOTIFY_DATA(markA)\n#define __itt_markA_ptr ITTNOTIFY_NAME(markA)\n#define __itt_markW     ITTNOTIFY_DATA(markW)\n#define __itt_markW_ptr ITTNOTIFY_NAME(markW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark      ITTNOTIFY_DATA(mark)\n#define __itt_mark_ptr  ITTNOTIFY_NAME(mark)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_markA(mt, parameter) (int)0\n#define __itt_markA_ptr 0\n#define __itt_markW(mt, parameter) (int)0\n#define __itt_markW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark(mt, parameter)  (int)0\n#define __itt_mark_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_markA_ptr 0\n#define __itt_markW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Use this if necessary to create a \"discrete\" user event type (mark) for process\n * rather then for one thread\n * @see int __itt_mark(__itt_mark_type mt, const char* parameter);\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nint ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char    *parameter);\nint ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_mark_global     __itt_mark_globalW\n#  define __itt_mark_global_ptr __itt_mark_globalW_ptr\n#else /* UNICODE  */\n#  define __itt_mark_global     __itt_mark_globalA\n#  define __itt_mark_global_ptr __itt_mark_globalA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nint ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char    *parameter))\nITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int, mark_global,  (__itt_mark_type mt, const char *parameter))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_globalA     ITTNOTIFY_DATA(mark_globalA)\n#define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA)\n#define __itt_mark_globalW     ITTNOTIFY_DATA(mark_globalW)\n#define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_global      ITTNOTIFY_DATA(mark_global)\n#define __itt_mark_global_ptr  ITTNOTIFY_NAME(mark_global)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_globalA(mt, parameter) (int)0\n#define __itt_mark_globalA_ptr 0\n#define __itt_mark_globalW(mt, parameter) (int)0\n#define __itt_mark_globalW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_global(mt, parameter)  (int)0\n#define __itt_mark_global_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_mark_globalA_ptr 0\n#define __itt_mark_globalW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_mark_global_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Creates an \"end\" point for \"continuous\" mark with specified name.\n *\n * - Returns zero value in case of success, non-zero value otherwise.\n *   Also returns non-zero value when preceding \"begin\" point for the\n *   mark with the same name failed to be created or not created.\n * - The mark of \"continuous\" type is placed to collection results in\n *   case of success. It appears in overtime view(s) as a special tick\n *   sign (different from \"discrete\" mark) together with line from\n *   corresponding \"begin\" mark to \"end\" mark.\n * @note Continuous marks can overlap and be nested inside each other.\n * Discrete mark can be nested inside marked region\n * @param[in] mt - mark, created by __itt_mark_create(const char* name) function\n * @return Returns zero value in case of success, non-zero value otherwise.\n */\nint ITTAPI __itt_mark_off(__itt_mark_type mt);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt))\n#define __itt_mark_off     ITTNOTIFY_DATA(mark_off)\n#define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_mark_off(mt) (int)0\n#define __itt_mark_off_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_mark_off_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Use this if necessary to create an \"end\" point for mark of process\n * @see int __itt_mark_off(__itt_mark_type mt);\n */\nint ITTAPI __itt_mark_global_off(__itt_mark_type mt);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt))\n#define __itt_mark_global_off     ITTNOTIFY_DATA(mark_global_off)\n#define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_mark_global_off(mt) (int)0\n#define __itt_mark_global_off_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_mark_global_off_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} marks group */\n\n/**\n * @defgroup counters_internal Counters\n * @ingroup internal\n * Counters group\n * @{\n */\n/**\n * @brief opaque structure for counter identification\n */\ntypedef struct ___itt_counter *__itt_counter;\n\n/**\n * @brief Create a counter with given name/domain for the calling thread\n *\n * After __itt_counter_create() is called, __itt_counter_inc() / __itt_counter_inc_delta() can be used\n * to increment the counter on any thread\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_counter ITTAPI __itt_counter_createA(const char    *name, const char    *domain);\n__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_counter_create     __itt_counter_createW\n#  define __itt_counter_create_ptr __itt_counter_createW_ptr\n#else /* UNICODE */\n#  define __itt_counter_create     __itt_counter_createA\n#  define __itt_counter_create_ptr __itt_counter_createA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char    *name, const char    *domain))\nITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_counter, counter_create,  (const char *name, const char *domain))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_counter_createA     ITTNOTIFY_DATA(counter_createA)\n#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA)\n#define __itt_counter_createW     ITTNOTIFY_DATA(counter_createW)\n#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_counter_create     ITTNOTIFY_DATA(counter_create)\n#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_counter_createA(name, domain)\n#define __itt_counter_createA_ptr 0\n#define __itt_counter_createW(name, domain)\n#define __itt_counter_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_counter_create(name, domain)\n#define __itt_counter_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_counter_createA_ptr 0\n#define __itt_counter_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_counter_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create()\n */\nvoid ITTAPI __itt_counter_destroy(__itt_counter id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id))\n#define __itt_counter_destroy     ITTNOTIFY_VOID(counter_destroy)\n#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_counter_destroy(id)\n#define __itt_counter_destroy_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_counter_destroy_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Increment the counter value\n */\nvoid ITTAPI __itt_counter_inc(__itt_counter id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id))\n#define __itt_counter_inc     ITTNOTIFY_VOID(counter_inc)\n#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_counter_inc(id)\n#define __itt_counter_inc_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_counter_inc_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Increment the counter value with x\n */\nvoid ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value))\n#define __itt_counter_inc_delta     ITTNOTIFY_VOID(counter_inc_delta)\n#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_counter_inc_delta(id, value)\n#define __itt_counter_inc_delta_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_counter_inc_delta_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} counters group */\n\n/**\n * @defgroup stitch Stack Stitching\n * @ingroup internal\n * Stack Stitching group\n * @{\n */\n/**\n * @brief opaque structure for counter identification\n */\ntypedef struct ___itt_caller *__itt_caller;\n\n/**\n * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to.\n * The function returns a unique identifier which is used to match the cut points with corresponding stitch points.\n */\n__itt_caller ITTAPI __itt_stack_caller_create(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void))\n#define __itt_stack_caller_create     ITTNOTIFY_DATA(stack_caller_create)\n#define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_stack_caller_create() (__itt_caller)0\n#define __itt_stack_caller_create_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_stack_caller_create_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create()\n */\nvoid ITTAPI __itt_stack_caller_destroy(__itt_caller id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id))\n#define __itt_stack_caller_destroy     ITTNOTIFY_VOID(stack_caller_destroy)\n#define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_stack_caller_destroy(id)\n#define __itt_stack_caller_destroy_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_stack_caller_destroy_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Sets the cut point. Stack from each event which occurs after this call will be cut\n * at the same stack level the function was called and stitched to the corresponding stitch point.\n */\nvoid ITTAPI __itt_stack_callee_enter(__itt_caller id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id))\n#define __itt_stack_callee_enter     ITTNOTIFY_VOID(stack_callee_enter)\n#define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_stack_callee_enter(id)\n#define __itt_stack_callee_enter_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_stack_callee_enter_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter().\n */\nvoid ITTAPI __itt_stack_callee_leave(__itt_caller id);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id))\n#define __itt_stack_callee_leave     ITTNOTIFY_VOID(stack_callee_leave)\n#define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_stack_callee_leave(id)\n#define __itt_stack_callee_leave_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_stack_callee_leave_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @} stitch group */\n\n/* ***************************************************************************************************************************** */\n\n#include <stdarg.h>\n\n/** @cond exclude_from_documentation */\ntypedef enum __itt_error_code\n{\n    __itt_error_success       = 0, /*!< no error */\n    __itt_error_no_module     = 1, /*!< module can't be loaded */\n    /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */\n    __itt_error_no_symbol     = 2, /*!< symbol not found */\n    /* %1$s -- library name, %2$s -- symbol name. */\n    __itt_error_unknown_group = 3, /*!< unknown group specified */\n    /* %1$s -- env var name, %2$s -- group name. */\n    __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */\n    /* %1$s -- env var name, %2$d -- system error. */\n    __itt_error_env_too_long  = 5, /*!< variable value too long */\n    /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */\n    __itt_error_system        = 6  /*!< pthread_mutexattr_init or pthread_mutex_init failed */\n    /* %1$s -- function name, %2$d -- errno. */\n} __itt_error_code;\n\ntypedef void (__itt_error_handler_t)(__itt_error_code code, va_list);\n__itt_error_handler_t* __itt_set_error_handler(__itt_error_handler_t*);\n\nconst char* ITTAPI __itt_api_version(void);\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler)\nvoid __itt_error_handler(__itt_error_code code, va_list args);\nextern const int ITTNOTIFY_NAME(err);\n#define __itt_err ITTNOTIFY_NAME(err)\nITT_STUB(ITTAPI, const char*, api_version, (void))\n#define __itt_api_version     ITTNOTIFY_DATA(api_version)\n#define __itt_api_version_ptr ITTNOTIFY_NAME(api_version)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_api_version()   (const char*)0\n#define __itt_api_version_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_api_version_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _ITTNOTIFY_PRIVATE_ */\n\n#endif /* INTEL_ITTNOTIFY_API_PRIVATE */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/ittnotify_config.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _ITTNOTIFY_CONFIG_H_\n#define _ITTNOTIFY_CONFIG_H_\n\n/** @cond exclude_from_documentation */\n#ifndef ITT_OS_WIN\n#  define ITT_OS_WIN   1\n#endif /* ITT_OS_WIN */\n\n#ifndef ITT_OS_LINUX\n#  define ITT_OS_LINUX 2\n#endif /* ITT_OS_LINUX */\n\n#ifndef ITT_OS_MAC\n#  define ITT_OS_MAC   3\n#endif /* ITT_OS_MAC */\n\n#ifndef ITT_OS\n#  if defined WIN32 || defined _WIN32\n#    define ITT_OS ITT_OS_WIN\n#  elif defined( __APPLE__ ) && defined( __MACH__ )\n#    define ITT_OS ITT_OS_MAC\n#  else\n#    define ITT_OS ITT_OS_LINUX\n#  endif\n#endif /* ITT_OS */\n\n#ifndef ITT_PLATFORM_WIN\n#  define ITT_PLATFORM_WIN 1\n#endif /* ITT_PLATFORM_WIN */\n\n#ifndef ITT_PLATFORM_POSIX\n#  define ITT_PLATFORM_POSIX 2\n#endif /* ITT_PLATFORM_POSIX */\n\n#ifndef ITT_PLATFORM_MAC\n#  define ITT_PLATFORM_MAC 3\n#endif /* ITT_PLATFORM_MAC */\n\n#ifndef ITT_PLATFORM\n#  if ITT_OS==ITT_OS_WIN\n#    define ITT_PLATFORM ITT_PLATFORM_WIN\n#  elif ITT_OS==ITT_OS_MAC\n#    define ITT_PLATFORM ITT_PLATFORM_MAC\n#  else\n#    define ITT_PLATFORM ITT_PLATFORM_POSIX\n#  endif\n#endif /* ITT_PLATFORM */\n\n#if defined(_UNICODE) && !defined(UNICODE)\n#define UNICODE\n#endif\n\n#include <stddef.h>\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <tchar.h>\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdint.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE || _UNICODE */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#ifndef CDECL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define CDECL __cdecl\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__ \n#      define CDECL __attribute__ ((cdecl))\n#    else  /* _M_IX86 || __i386__ */\n#      define CDECL /* actual only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* CDECL */\n\n#ifndef STDCALL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define STDCALL __stdcall\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__\n#      define STDCALL __attribute__ ((stdcall)) \n#    else  /* _M_IX86 || __i386__ */\n#      define STDCALL /* supported only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* STDCALL */\n\n#define ITTAPI    CDECL\n#define LIBITTAPI CDECL\n\n/* TODO: Temporary for compatibility! */\n#define ITTAPI_CALL    CDECL\n#define LIBITTAPI_CALL CDECL\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n/* use __forceinline (VC++ specific) */\n#define ITT_INLINE           __forceinline\n#define ITT_INLINE_ATTRIBUTE /* nothing */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/*\n * Generally, functions are not inlined unless optimization is specified.\n * For functions declared inline, this attribute inlines the function even\n * if no optimization level was specified.\n */\n#ifdef __STRICT_ANSI__\n#define ITT_INLINE           static inline\n#else  /* __STRICT_ANSI__ */\n#define ITT_INLINE           static inline\n#endif /* __STRICT_ANSI__ */\n#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/** @endcond */\n\n#ifndef ITT_ARCH_IA32\n#  define ITT_ARCH_IA32  1\n#endif /* ITT_ARCH_IA32 */\n\n#ifndef ITT_ARCH_IA32E\n#  define ITT_ARCH_IA32E 2\n#endif /* ITT_ARCH_IA32E */\n\n#ifndef ITT_ARCH_ARM\n#  define ITT_ARCH_ARM  4\n#endif /* ITT_ARCH_ARM */\n\n#ifndef ITT_ARCH\n#  if defined _M_IX86 || defined __i386__\n#    define ITT_ARCH ITT_ARCH_IA32\n#  elif defined _M_X64 || defined _M_AMD64 || defined __x86_64__\n#    define ITT_ARCH ITT_ARCH_IA32E\n#  elif defined _M_IA64 || defined __ia64__\n#    define ITT_ARCH ITT_ARCH_IA64\n#  elif defined _M_ARM || __arm__\n#    define ITT_ARCH ITT_ARCH_ARM\n#  endif\n#endif\n\n#ifdef __cplusplus\n#  define ITT_EXTERN_C extern \"C\"\n#else\n#  define ITT_EXTERN_C /* nothing */\n#endif /* __cplusplus */\n\n#define ITT_TO_STR_AUX(x) #x\n#define ITT_TO_STR(x)     ITT_TO_STR_AUX(x)\n\n#define __ITT_BUILD_ASSERT(expr, suffix) do { \\\n    static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \\\n    __itt_build_check_##suffix[0] = 0; \\\n} while(0)\n#define _ITT_BUILD_ASSERT(expr, suffix)  __ITT_BUILD_ASSERT((expr), suffix)\n#define ITT_BUILD_ASSERT(expr)           _ITT_BUILD_ASSERT((expr), __LINE__)\n\n#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }\n\n/* Replace with snapshot date YYYYMMDD for promotion build. */\n#define API_VERSION_BUILD    20111111\n\n#ifndef API_VERSION_NUM\n#define API_VERSION_NUM 0.0.0\n#endif /* API_VERSION_NUM */\n\n#define API_VERSION \"ITT-API-Version \" ITT_TO_STR(API_VERSION_NUM) \\\n                                \" (\" ITT_TO_STR(API_VERSION_BUILD) \")\"\n\n/* OS communication functions */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <windows.h>\ntypedef HMODULE           lib_t;\ntypedef DWORD             TIDT;\ntypedef CRITICAL_SECTION  mutex_t;\n#define MUTEX_INITIALIZER { 0 }\n#define strong_alias(name, aliasname) /* empty for Windows */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <dlfcn.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE */\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */\n#endif /* _GNU_SOURCE */\n#ifndef __USE_UNIX98\n#define __USE_UNIX98 1 /* need for PTHREAD_MUTEX_RECURSIVE, on SLES11.1 with gcc 4.3.4 wherein pthread.h missing dependency on __USE_XOPEN2K8 */\n#endif /*__USE_UNIX98*/\n#include <pthread.h>\ntypedef void*             lib_t;\ntypedef pthread_t         TIDT;\ntypedef pthread_mutex_t   mutex_t;\n#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER\n#define _strong_alias(name, aliasname) \\\n            extern __typeof (name) aliasname __attribute__ ((alias (#name)));\n#define strong_alias(name, aliasname) _strong_alias(name, aliasname)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_get_proc(lib, name) GetProcAddress(lib, name)\n#define __itt_mutex_init(mutex)   InitializeCriticalSection(mutex)\n#define __itt_mutex_lock(mutex)   EnterCriticalSection(mutex)\n#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)\n#define __itt_load_lib(name)      LoadLibraryA(name)\n#define __itt_unload_lib(handle)  FreeLibrary(handle)\n#define __itt_system_error()      (int)GetLastError()\n#define __itt_fstrcmp(s1, s2)     lstrcmpA(s1, s2)\n#define __itt_fstrlen(s)          lstrlenA(s)\n#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)\n#define __itt_fstrdup(s)          _strdup(s)\n#define __itt_thread_id()         GetCurrentThreadId()\n#define __itt_thread_yield()      SwitchToThread()\n#ifndef ITT_SIMPLE_INIT\nITT_INLINE long\n__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;\nITT_INLINE long __itt_interlocked_increment(volatile long* ptr)\n{\n    return InterlockedIncrement(ptr);\n}\n#endif /* ITT_SIMPLE_INIT */\n#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\n#define __itt_get_proc(lib, name) dlsym(lib, name)\n#define __itt_mutex_init(mutex)   {\\\n    pthread_mutexattr_t mutex_attr;                                         \\\n    int error_code = pthread_mutexattr_init(&mutex_attr);                   \\\n    if (error_code)                                                         \\\n        __itt_report_error(__itt_error_system, \"pthread_mutexattr_init\",    \\\n                           error_code);                                     \\\n    error_code = pthread_mutexattr_settype(&mutex_attr,                     \\\n                                           PTHREAD_MUTEX_RECURSIVE);        \\\n    if (error_code)                                                         \\\n        __itt_report_error(__itt_error_system, \"pthread_mutexattr_settype\", \\\n                           error_code);                                     \\\n    error_code = pthread_mutex_init(mutex, &mutex_attr);                    \\\n    if (error_code)                                                         \\\n        __itt_report_error(__itt_error_system, \"pthread_mutex_init\",        \\\n                           error_code);                                     \\\n    error_code = pthread_mutexattr_destroy(&mutex_attr);                    \\\n    if (error_code)                                                         \\\n        __itt_report_error(__itt_error_system, \"pthread_mutexattr_destroy\", \\\n                           error_code);                                     \\\n}\n#define __itt_mutex_lock(mutex)   pthread_mutex_lock(mutex)\n#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)\n#define __itt_load_lib(name)      dlopen(name, RTLD_LAZY)\n#define __itt_unload_lib(handle)  dlclose(handle)\n#define __itt_system_error()      errno\n#define __itt_fstrcmp(s1, s2)     strcmp(s1, s2)\n#define __itt_fstrlen(s)          strlen(s)\n#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)\n#define __itt_fstrdup(s)          strdup(s)\n#define __itt_thread_id()         pthread_self()\n#define __itt_thread_yield()      sched_yield()\n#if ITT_ARCH==ITT_ARCH_IA64\n#ifdef __INTEL_COMPILER\n#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)\n#else  /* __INTEL_COMPILER */\n/* TODO: Add Support for not Intel compilers for IA-64 architecture */\n#endif /* __INTEL_COMPILER */\n#elif ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_IA32E /* ITT_ARCH!=ITT_ARCH_IA64 */\nITT_INLINE long\n__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;\nITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)\n{\n    long result;\n    __asm__ __volatile__(\"lock\\nxadd %0,%1\"\n                          : \"=r\"(result),\"=m\"(*(int*)ptr)\n                          : \"0\"(addend), \"m\"(*(int*)ptr)\n                          : \"memory\");\n    return result;\n}\n#elif ITT_ARCH==ITT_ARCH_ARM\n#define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val)\n#endif /* ITT_ARCH==ITT_ARCH_IA64 */\n#ifndef ITT_SIMPLE_INIT\nITT_INLINE long\n__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;\nITT_INLINE long __itt_interlocked_increment(volatile long* ptr)\n{\n    return __TBB_machine_fetchadd4(ptr, 1) + 1L;\n}\n#endif /* ITT_SIMPLE_INIT */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\ntypedef enum {\n    __itt_collection_normal = 0,\n    __itt_collection_paused = 1\n} __itt_collection_state;\n\ntypedef enum {\n    __itt_thread_normal  = 0,\n    __itt_thread_ignored = 1\n} __itt_thread_state;\n\n#pragma pack(push, 8)\n\ntypedef struct ___itt_thread_info\n{\n    const char* nameA; /*!< Copy of original name in ASCII. */\n#if defined(UNICODE) || defined(_UNICODE)\n    const wchar_t* nameW; /*!< Copy of original name in UNICODE. */\n#else  /* UNICODE || _UNICODE */\n    void* nameW;\n#endif /* UNICODE || _UNICODE */\n    TIDT               tid;\n    __itt_thread_state state;   /*!< Thread state (paused or normal) */\n    int                extra1;  /*!< Reserved to the runtime */\n    void*              extra2;  /*!< Reserved to the runtime */\n    struct ___itt_thread_info* next;\n} __itt_thread_info;\n\n#include \"ittnotify_types.h\" /* For __itt_group_id definition */\n\ntypedef struct ___itt_api_info_20101001\n{\n    const char*    name;\n    void**         func_ptr;\n    void*          init_func;\n    __itt_group_id group;\n}  __itt_api_info_20101001;\n\ntypedef struct ___itt_api_info\n{\n    const char*    name;\n    void**         func_ptr;\n    void*          init_func;\n    void*          null_func;\n    __itt_group_id group;\n}  __itt_api_info;\n\nstruct ___itt_domain;\nstruct ___itt_string_handle;\n\ntypedef struct ___itt_global\n{\n    unsigned char          magic[8];\n    unsigned long          version_major;\n    unsigned long          version_minor;\n    unsigned long          version_build;\n    volatile long          api_initialized;\n    volatile long          mutex_initialized;\n    volatile long          atomic_counter;\n    mutex_t                mutex;\n    lib_t                  lib;\n    void*                  error_handler;\n    const char**           dll_path_ptr;\n    __itt_api_info*        api_list_ptr;\n    struct ___itt_global*  next;\n    /* Joinable structures below */\n    __itt_thread_info*     thread_list;\n    struct ___itt_domain*  domain_list;\n    struct ___itt_string_handle* string_list;\n    __itt_collection_state state;\n} __itt_global;\n\n#pragma pack(pop)\n\n#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \\\n    h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \\\n    if (h != NULL) { \\\n        h->tid    = t; \\\n        h->nameA  = NULL; \\\n        h->nameW  = n ? _wcsdup(n) : NULL; \\\n        h->state  = s; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->thread_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \\\n    h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \\\n    if (h != NULL) { \\\n        h->tid    = t; \\\n        h->nameA  = n ? __itt_fstrdup(n) : NULL; \\\n        h->nameW  = NULL; \\\n        h->state  = s; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->thread_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \\\n    h = (__itt_domain*)malloc(sizeof(__itt_domain)); \\\n    if (h != NULL) { \\\n        h->flags  = 0;    /* domain is disabled by default */ \\\n        h->nameA  = NULL; \\\n        h->nameW  = name ? _wcsdup(name) : NULL; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->domain_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \\\n    h = (__itt_domain*)malloc(sizeof(__itt_domain)); \\\n    if (h != NULL) { \\\n        h->flags  = 0;    /* domain is disabled by default */ \\\n        h->nameA  = name ? __itt_fstrdup(name) : NULL; \\\n        h->nameW  = NULL; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->domain_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \\\n    h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \\\n    if (h != NULL) { \\\n        h->strA   = NULL; \\\n        h->strW   = name ? _wcsdup(name) : NULL; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->string_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \\\n    h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \\\n    if (h != NULL) { \\\n        h->strA   = name ? __itt_fstrdup(name) : NULL; \\\n        h->strW   = NULL; \\\n        h->extra1 = 0;    /* reserved */ \\\n        h->extra2 = NULL; /* reserved */ \\\n        h->next   = NULL; \\\n        if (h_tail == NULL) \\\n            (gptr)->string_list = h; \\\n        else \\\n            h_tail->next = h; \\\n    } \\\n}\n\n#endif /* _ITTNOTIFY_CONFIG_H_ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/ittnotify_static.c",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"ittnotify_config.h\"\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define PATH_MAX 512\n#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\n#include <limits.h>\n#include <dlfcn.h>\n#include <errno.h>\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n\n#define INTEL_NO_MACRO_BODY \n#define INTEL_ITTNOTIFY_API_PRIVATE\n#include \"ittnotify.h\"\n#include \"legacy/ittnotify.h\"\n\n#include \"disable_warnings.h\"\n\nstatic const char api_version[] = API_VERSION \"\\0\\n@(#) $Revision: 336044 $\\n\";\n\n#define _N_(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)\n\n#if ITT_OS==ITT_OS_WIN\nstatic const char* ittnotify_lib_name = \"libittnotify.dll\";\n#elif ITT_OS==ITT_OS_LINUX\nstatic const char* ittnotify_lib_name = \"libittnotify.so\";\n#elif ITT_OS==ITT_OS_MAC\nstatic const char* ittnotify_lib_name = \"libittnotify.dylib\";\n#else\n#error Unsupported or unknown OS.\n#endif\n\n#ifdef __ANDROID__\n#include <android/log.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <linux/limits.h>\n\n#ifdef ITT_ANDROID_LOG\n    #define ITT_ANDROID_LOG_TAG   \"INTEL_VTUNE_USERAPI\"\n    #define ITT_ANDROID_LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, ITT_ANDROID_LOG_TAG, __VA_ARGS__))\n    #define ITT_ANDROID_LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, ITT_ANDROID_LOG_TAG, __VA_ARGS__))\n    #define ITT_ANDROID_LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR,ITT_ANDROID_LOG_TAG, __VA_ARGS__))\n    #define ITT_ANDROID_LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG,ITT_ANDROID_LOG_TAG, __VA_ARGS__))\n#else\n    #define ITT_ANDROID_LOGI(...)\n    #define ITT_ANDROID_LOGW(...)\n    #define ITT_ANDROID_LOGE(...)\n    #define ITT_ANDROID_LOGD(...)\n#endif\n\n/* default location of userapi collector on Android */\n#define ANDROID_ITTNOTIFY_DEFAULT_PATH  \"/data/data/com.intel.vtune/intel/libittnotify.so\"\n#endif\n\n\n#ifndef LIB_VAR_NAME\n#if ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_ARM\n#define LIB_VAR_NAME INTEL_LIBITTNOTIFY32\n#else\n#define LIB_VAR_NAME INTEL_LIBITTNOTIFY64\n#endif\n#endif /* LIB_VAR_NAME */\n\n#define ITT_MUTEX_INIT_AND_LOCK(p) {                                 \\\n        if (!p.mutex_initialized)                                    \\\n        {                                                            \\\n            if (__itt_interlocked_increment(&p.atomic_counter) == 1) \\\n            {                                                        \\\n                __itt_mutex_init(&p.mutex);                          \\\n                p.mutex_initialized = 1;                             \\\n            }                                                        \\\n            else                                                     \\\n                while (!p.mutex_initialized)                         \\\n                    __itt_thread_yield();                            \\\n        }                                                            \\\n        __itt_mutex_lock(&p.mutex);                                  \\\n}\n\nconst int _N_(err) = 0;\n\ntypedef int (__itt_init_ittlib_t)(const char*, __itt_group_id);\n\n/* this define used to control initialization function name. */\n#ifndef __itt_init_ittlib_name\nITT_EXTERN_C int _N_(init_ittlib)(const char*, __itt_group_id);\nstatic __itt_init_ittlib_t* __itt_init_ittlib_ptr = _N_(init_ittlib);\n#define __itt_init_ittlib_name __itt_init_ittlib_ptr\n#endif /* __itt_init_ittlib_name */\n\ntypedef void (__itt_fini_ittlib_t)(void);\n\n/* this define used to control finalization function name. */\n#ifndef __itt_fini_ittlib_name\nITT_EXTERN_C void _N_(fini_ittlib)(void);\nstatic __itt_fini_ittlib_t* __itt_fini_ittlib_ptr = _N_(fini_ittlib);\n#define __itt_fini_ittlib_name __itt_fini_ittlib_ptr\n#endif /* __itt_fini_ittlib_name */\n\n/* building pointers to imported funcs */\n#undef ITT_STUBV\n#undef ITT_STUB\n#define ITT_STUB(api,type,name,args,params,ptr,group,format)   \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\\\ntypedef type api ITT_JOIN(_N_(name),_t) args;                  \\\nITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); }  \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \\\n{                                                              \\\n    __itt_init_ittlib_name(NULL, __itt_group_all);             \\\n    if (ITTNOTIFY_NAME(name) && ITTNOTIFY_NAME(name) != ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init))) \\\n        return ITTNOTIFY_NAME(name) params;                    \\\n    else                                                       \\\n        return (type)0;                                        \\\n}\n\n#define ITT_STUBV(api,type,name,args,params,ptr,group,format)  \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\\\ntypedef type api ITT_JOIN(_N_(name),_t) args;                  \\\nITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); }  \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \\\n{                                                              \\\n    __itt_init_ittlib_name(NULL, __itt_group_all);             \\\n    if (ITTNOTIFY_NAME(name) && ITTNOTIFY_NAME(name) != ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init))) \\\n        ITTNOTIFY_NAME(name) params;                           \\\n    else                                                       \\\n        return;                                                \\\n}\n\n#undef __ITT_INTERNAL_INIT\n#include \"ittnotify_static.h\"\n\n#undef ITT_STUB\n#undef ITT_STUBV\n#define ITT_STUB(api,type,name,args,params,ptr,group,format)   \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\\\ntypedef type api ITT_JOIN(_N_(name),_t) args;                  \\\nITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); }\n\n#define ITT_STUBV(api,type,name,args,params,ptr,group,format)  \\\nstatic type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\\\ntypedef type api ITT_JOIN(_N_(name),_t) args;                  \\\nITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); }\n\n#define __ITT_INTERNAL_INIT\n#include \"ittnotify_static.h\"\n#undef __ITT_INTERNAL_INIT\n\nITT_GROUP_LIST(group_list);\n\n#pragma pack(push, 8)\n\ntypedef struct ___itt_group_alias\n{\n    const char*    env_var;\n    __itt_group_id groups;\n} __itt_group_alias;\n\nstatic __itt_group_alias group_alias[] = {\n    { \"KMP_FOR_TPROFILE\", (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_sync  | __itt_group_mark) },\n    { \"KMP_FOR_TCHECK\",   (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_sync  | __itt_group_fsync | __itt_group_mark | __itt_group_suppress) },\n    { NULL,               (__itt_group_none) },\n    { api_version,        (__itt_group_none) } /* !!! Just to avoid unused code elimination !!! */\n};\n\n#pragma pack(pop)\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(push)\n#pragma warning(disable: 4054) /* warning C4054: 'type cast' : from function pointer 'XXX' to data pointer 'void *' */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nstatic __itt_api_info api_list[] = {\n/* Define functions with static implementation */\n#undef ITT_STUB\n#undef ITT_STUBV\n#define ITT_STUB(api,type,name,args,params,nameindll,group,format) { ITT_TO_STR(ITT_JOIN(__itt_,nameindll)), (void**)(void*)&ITTNOTIFY_NAME(name), (void*)(size_t)&ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)), (void*)(size_t)&ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)), (__itt_group_id)(group)},\n#define ITT_STUBV ITT_STUB\n#define __ITT_INTERNAL_INIT\n#include \"ittnotify_static.h\"\n#undef __ITT_INTERNAL_INIT\n/* Define functions without static implementation */\n#undef ITT_STUB\n#undef ITT_STUBV\n#define ITT_STUB(api,type,name,args,params,nameindll,group,format) {ITT_TO_STR(ITT_JOIN(__itt_,nameindll)), (void**)(void*)&ITTNOTIFY_NAME(name), (void*)(size_t)&ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)), NULL, (__itt_group_id)(group)},\n#define ITT_STUBV ITT_STUB\n#include \"ittnotify_static.h\"\n    {NULL, NULL, NULL, NULL, __itt_group_none}\n};\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(pop)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/* private, init thread info item. used for internal purposes */\nstatic __itt_thread_info init_thread_info = {\n    (const char*)NULL,                        /* nameA */\n#if defined(UNICODE) || defined(_UNICODE)\n    (const wchar_t*)NULL,                     /* nameW */\n#else\n    (void*)NULL,                              /* nameW */\n#endif\n    0,                                        /* tid */\n    __itt_thread_normal,                      /* state */\n    0,                                        /* extra1 */\n    (void*)NULL,                              /* extra2 */\n    (__itt_thread_info*)NULL                  /* next */\n};\n\n/* private, NULL domain item. used for internal purposes */\nstatic __itt_domain null_domain = {\n    0,                                        /* flags:  disabled by default */\n    (const char*)NULL,                        /* nameA */\n#if defined(UNICODE) || defined(_UNICODE)\n    (const wchar_t*)NULL,                     /* nameW */\n#else\n    (void*)NULL,                              /* nameW */\n#endif\n    0,                                        /* extra1 */\n    (void*)NULL,                              /* extra2 */\n    (__itt_domain*)NULL                       /* next */\n};\n\n/* private, NULL string handle item. used for internal purposes */\nstatic __itt_string_handle null_string_handle = {\n    (const char*)NULL,                        /* strA */\n#if defined(UNICODE) || defined(_UNICODE)\n    (const wchar_t*)NULL,                     /* strW */\n#else\n    (void*)NULL,                              /* strW */\n#endif\n    0,                                        /* extra1 */\n    (void*)NULL,                              /* extra2 */\n    (__itt_string_handle*)NULL                /* next */\n};\n\nstatic const char dll_path[PATH_MAX] = { 0 };\n\n/* static part descriptor which handles. all notification api attributes. */\n__itt_global _N_(_ittapi_global) = {\n    ITT_MAGIC,                                     /* identification info */\n    ITT_MAJOR, ITT_MINOR, API_VERSION_BUILD,       /* version info */\n    0,                                             /* api_initialized */\n    0,                                             /* mutex_initialized */\n    0,                                             /* atomic_counter */\n    MUTEX_INITIALIZER,                             /* mutex */\n    NULL,                                          /* dynamic library handle */\n    NULL,                                          /* error_handler */\n    (const char**)&dll_path,                       /* dll_path_ptr */\n    (__itt_api_info*)&api_list,                    /* api_list_ptr */\n    NULL,                                          /* next __itt_global */\n    (__itt_thread_info*)&init_thread_info,         /* thread_list */\n    (__itt_domain*)&null_domain,                   /* domain_list */\n    (__itt_string_handle*)&null_string_handle,     /* string_list */\n    __itt_collection_normal                        /* collection state */\n};\n\ntypedef void (__itt_api_init_t)(__itt_global*, __itt_group_id);\ntypedef void (__itt_api_fini_t)(__itt_global*);\n\n/* ========================================================================= */\n\n#ifdef ITT_NOTIFY_EXT_REPORT\nITT_EXTERN_C void _N_(error_handler)(__itt_error_code, va_list args);\n#endif /* ITT_NOTIFY_EXT_REPORT */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(push)\n#pragma warning(disable: 4055) /* warning C4055: 'type cast' : from data pointer 'void *' to function pointer 'XXX' */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nstatic void __itt_report_error(__itt_error_code code, ...)\n{\n    va_list args;\n    va_start(args, code);\n    if (_N_(_ittapi_global).error_handler != NULL)\n    {\n        __itt_error_handler_t* handler = (__itt_error_handler_t*)(size_t)_N_(_ittapi_global).error_handler;\n        handler(code, args);\n    }\n#ifdef ITT_NOTIFY_EXT_REPORT\n    _N_(error_handler)(code, args);\n#endif /* ITT_NOTIFY_EXT_REPORT */\n    va_end(args);\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(pop)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nstatic __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createW),_init))(const wchar_t* name)\n{\n    __itt_domain *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(domain_createW) && ITTNOTIFY_NAME(domain_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createW),_init)))\n            return ITTNOTIFY_NAME(domain_createW)(name);\n    }\n\n    if (name == NULL)\n        return _N_(_ittapi_global).domain_list;\n\n    ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global));\n    for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next)\n        if (h->nameW != NULL && !wcscmp(h->nameW, name))\n            break;\n    if (h == NULL) {\n        NEW_DOMAIN_W(&_N_(_ittapi_global),h,h_tail,name);\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n    return h;\n}\n\nstatic __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init))(const char* name)\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nstatic __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init))(const char* name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n{\n    __itt_domain *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n        if (ITTNOTIFY_NAME(domain_createA) && ITTNOTIFY_NAME(domain_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init)))\n            return ITTNOTIFY_NAME(domain_createA)(name);\n#else\n        if (ITTNOTIFY_NAME(domain_create) && ITTNOTIFY_NAME(domain_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init)))\n            return ITTNOTIFY_NAME(domain_create)(name);\n#endif\n    }\n\n    if (name == NULL)\n        return _N_(_ittapi_global).domain_list;\n\n    ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global));\n    for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next)\n        if (h->nameA != NULL && !__itt_fstrcmp(h->nameA, name))\n            break;\n    if (h == NULL) {\n        NEW_DOMAIN_A(&_N_(_ittapi_global),h,h_tail,name);\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n    return h;\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nstatic __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init))(const wchar_t* name)\n{\n    __itt_string_handle *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(string_handle_createW) && ITTNOTIFY_NAME(string_handle_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init)))\n            return ITTNOTIFY_NAME(string_handle_createW)(name);\n    }\n\n    if (name == NULL)\n        return _N_(_ittapi_global).string_list;\n\n    ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global));\n    for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next)\n        if (h->strW != NULL && !wcscmp(h->strW, name))\n            break;\n    if (h == NULL) {\n        NEW_STRING_HANDLE_W(&_N_(_ittapi_global),h,h_tail,name);\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n    return h;\n}\n\nstatic __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init))(const char* name)\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nstatic __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init))(const char* name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n{\n    __itt_string_handle *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n        if (ITTNOTIFY_NAME(string_handle_createA) && ITTNOTIFY_NAME(string_handle_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init)))\n            return ITTNOTIFY_NAME(string_handle_createA)(name);\n#else\n        if (ITTNOTIFY_NAME(string_handle_create) && ITTNOTIFY_NAME(string_handle_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init)))\n            return ITTNOTIFY_NAME(string_handle_create)(name);\n#endif\n    }\n\n    if (name == NULL)\n        return _N_(_ittapi_global).string_list;\n\n    ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global));\n    for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next)\n        if (h->strA != NULL && !__itt_fstrcmp(h->strA, name))\n            break;\n    if (h == NULL) {\n        NEW_STRING_HANDLE_A(&_N_(_ittapi_global),h,h_tail,name);\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n    return h;\n}\n\n/* -------------------------------------------------------------------------- */\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(pause),_init))(void)\n{\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(pause) && ITTNOTIFY_NAME(pause) != ITT_VERSIONIZE(ITT_JOIN(_N_(pause),_init)))\n        {\n            ITTNOTIFY_NAME(pause)();\n            return;\n        }\n    }\n    _N_(_ittapi_global).state = __itt_collection_paused;\n}\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(resume),_init))(void)\n{\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(resume) && ITTNOTIFY_NAME(resume) != ITT_VERSIONIZE(ITT_JOIN(_N_(resume),_init)))\n        {\n            ITTNOTIFY_NAME(resume)();\n            return;\n        }\n    }\n    _N_(_ittapi_global).state = __itt_collection_normal;\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))(const wchar_t* name)\n{\n    TIDT tid = __itt_thread_id();\n    __itt_thread_info *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(thread_set_nameW) && ITTNOTIFY_NAME(thread_set_nameW) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init)))\n        {\n            ITTNOTIFY_NAME(thread_set_nameW)(name);\n            return;\n        }\n    }\n\n    __itt_mutex_lock(&_N_(_ittapi_global).mutex);\n    for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next)\n        if (h->tid == tid)\n            break;\n    if (h == NULL) {\n        NEW_THREAD_INFO_W(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_normal, name);\n    }\n    else\n    {\n        h->nameW = name ? _wcsdup(name) : NULL;\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n}\n\nstatic int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_setW),_init))(const wchar_t* name, int namelen)\n{\n    namelen = namelen;\n    ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))(name);\n    return 0;\n}\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))(const char* name)\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))(const char* name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n{\n    TIDT tid = __itt_thread_id();\n    __itt_thread_info *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n        if (ITTNOTIFY_NAME(thread_set_nameA) && ITTNOTIFY_NAME(thread_set_nameA) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init)))\n        {\n            ITTNOTIFY_NAME(thread_set_nameA)(name);\n            return;\n        }\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n        if (ITTNOTIFY_NAME(thread_set_name) && ITTNOTIFY_NAME(thread_set_name) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init)))\n        {\n            ITTNOTIFY_NAME(thread_set_name)(name);\n            return;\n        }\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n    }\n\n    __itt_mutex_lock(&_N_(_ittapi_global).mutex);\n    for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next)\n        if (h->tid == tid)\n            break;\n    if (h == NULL) {\n        NEW_THREAD_INFO_A(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_normal, name);\n    }\n    else\n    {\n        h->nameA = name ? __itt_fstrdup(name) : NULL;\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nstatic int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_setA),_init))(const char* name, int namelen)\n{\n    namelen = namelen;\n    ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))(name);\n    return 0;\n}\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nstatic int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_set),_init))(const char* name, int namelen)\n{\n    ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))(name);\n    return 0;\n}\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init))(void)\n{\n    TIDT tid = __itt_thread_id();\n    __itt_thread_info *h_tail, *h;\n\n    if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0)\n    {\n        __itt_init_ittlib_name(NULL, __itt_group_all);\n        if (ITTNOTIFY_NAME(thread_ignore) && ITTNOTIFY_NAME(thread_ignore) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init)))\n        {\n            ITTNOTIFY_NAME(thread_ignore)();\n            return;\n        }\n    }\n\n    __itt_mutex_lock(&_N_(_ittapi_global).mutex);\n    for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next)\n        if (h->tid == tid)\n            break;\n    if (h == NULL) {\n        static const char* name = \"unknown\";\n        NEW_THREAD_INFO_A(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_ignored, name);\n    }\n    else\n    {\n        h->state = __itt_thread_ignored;\n    }\n    __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n}\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_ignore),_init))(void)\n{\n    ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init))();\n}\n\nstatic void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(enable_attach),_init))(void)\n{\n#ifdef __ANDROID__\n    /*\n     * if LIB_VAR_NAME env variable were set before then stay previous value\n     * else set default path\n    */\n    setenv(ITT_TO_STR(LIB_VAR_NAME), ANDROID_ITTNOTIFY_DEFAULT_PATH, 0);\n#endif\n}\n\n/* -------------------------------------------------------------------------- */\n\nstatic const char* __itt_fsplit(const char* s, const char* sep, const char** out, int* len)\n{\n    int i;\n    int j;\n\n    if (!s || !sep || !out || !len)\n        return NULL;\n\n    for (i = 0; s[i]; i++)\n    {\n        int b = 0;\n        for (j = 0; sep[j]; j++)\n            if (s[i] == sep[j])\n            {\n                b = 1;\n                break;\n            }\n        if (!b)\n            break;\n    }\n\n    if (!s[i])\n        return NULL;\n\n    *len = 0;\n    *out = &s[i];\n\n    for (; s[i]; i++, (*len)++)\n    {\n        int b = 0;\n        for (j = 0; sep[j]; j++)\n            if (s[i] == sep[j])\n            {\n                b = 1;\n                break;\n            }\n        if (b)\n            break;\n    }\n\n    for (; s[i]; i++)\n    {\n        int b = 0;\n        for (j = 0; sep[j]; j++)\n            if (s[i] == sep[j])\n            {\n                b = 1;\n                break;\n            }\n        if (!b)\n            break;\n    }\n\n    return &s[i];\n}\n\n/* This function return value of env variable that placed into static buffer.\n * !!! The same static buffer is used for subsequent calls. !!!\n * This was done to aviod dynamic allocation for few calls.\n * Actually we need this function only four times.\n */\nstatic const char* __itt_get_env_var(const char* name)\n{\n#define MAX_ENV_VALUE_SIZE 4086\n    static char  env_buff[MAX_ENV_VALUE_SIZE];\n    static char* env_value = (char*)env_buff;\n\n    if (name != NULL)\n    {\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n        size_t max_len = MAX_ENV_VALUE_SIZE - (size_t)(env_value - env_buff);\n        DWORD rc = GetEnvironmentVariableA(name, env_value, (DWORD)max_len);\n        if (rc >= max_len)\n            __itt_report_error(__itt_error_env_too_long, name, (size_t)rc - 1, (size_t)(max_len - 1));\n        else if (rc > 0)\n        {\n            const char* ret = (const char*)env_value;\n            env_value += rc + 1;\n            return ret;\n        }\n        else\n        {\n            /* If environment variable is empty, GetEnvirornmentVariables()\n             * returns zero (number of characters (not including terminating null),\n             * and GetLastError() returns ERROR_SUCCESS. */\n            DWORD err = GetLastError();\n            if (err == ERROR_SUCCESS)\n                return env_value;\n\n            if (err != ERROR_ENVVAR_NOT_FOUND)\n                __itt_report_error(__itt_error_cant_read_env, name, (int)err);\n        }\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\n        char* env = getenv(name);\n        if (env != NULL)\n        {\n            size_t len = strlen(env);\n            size_t max_len = MAX_ENV_VALUE_SIZE - (size_t)(env_value - env_buff);\n            if (len < max_len)\n            {\n                const char* ret = (const char*)env_value;\n                strncpy(env_value, env, len + 1);\n                env_value += len + 1;\n                return ret;\n            } else\n                __itt_report_error(__itt_error_env_too_long, name, (size_t)len, (size_t)(max_len - 1));\n        }\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n    }\n    return NULL;\n}\n\nstatic const char* __itt_get_lib_name(void)\n{\n    const char* lib_name = __itt_get_env_var(ITT_TO_STR(LIB_VAR_NAME));\n\n#ifdef __ANDROID__\n    if (lib_name == NULL)\n    {\n        const char* const system_wide_marker_filename = \"/data/local/tmp/com.intel.itt.collector_lib\";\n        int itt_marker_file_fd = open(system_wide_marker_filename, O_RDONLY);\n        ssize_t res = 0;\n\n        if (itt_marker_file_fd == -1)\n        {\n            const pid_t my_pid = getpid();\n            char cmdline_path[PATH_MAX] = {0};\n            char package_name[PATH_MAX] = {0};\n            char app_sandbox_file[PATH_MAX] = {0};\n            int cmdline_fd = 0;\n\n            ITT_ANDROID_LOGI(\"Unable to open system-wide marker file.\");\n            snprintf(cmdline_path, PATH_MAX - 1, \"/proc/%d/cmdline\", my_pid);\n            ITT_ANDROID_LOGI(\"CMD file: %s\\n\", cmdline_path);\n            cmdline_fd = open(cmdline_path, O_RDONLY);\n            if (cmdline_fd == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to open %s file!\", cmdline_path);\n                return lib_name;\n            }\n            res = read(cmdline_fd, package_name, PATH_MAX - 1);\n            if (res == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to read %s file!\", cmdline_path);\n                res = close(cmdline_fd);\n                if (res == -1)\n                {\n                    ITT_ANDROID_LOGE(\"Unable to close %s file!\", cmdline_path);\n                }\n                return lib_name;\n            }\n            res = close(cmdline_fd);\n            if (res == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to close %s file!\", cmdline_path);\n                return lib_name;\n            }\n            ITT_ANDROID_LOGI(\"Package name: %s\\n\", package_name);\n            snprintf(app_sandbox_file, PATH_MAX - 1, \"/data/data/%s/com.intel.itt.collector_lib\", package_name);\n            ITT_ANDROID_LOGI(\"Lib marker file name: %s\\n\", app_sandbox_file);\n            itt_marker_file_fd = open(app_sandbox_file, O_RDONLY);\n            if (itt_marker_file_fd == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to open app marker file!\");\n                return lib_name;\n            }\n        }\n\n        {\n            char itt_lib_name[PATH_MAX] = {0};\n\n            res = read(itt_marker_file_fd, itt_lib_name, PATH_MAX - 1);\n            if (res == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to read %s file!\", itt_marker_file_fd);\n                res = close(itt_marker_file_fd);\n                if (res == -1)\n                {\n                    ITT_ANDROID_LOGE(\"Unable to close %s file!\", itt_marker_file_fd);\n                }\n                return lib_name;\n            }\n            ITT_ANDROID_LOGI(\"ITT Lib path: %s\", itt_lib_name);\n            res = close(itt_marker_file_fd);\n            if (res == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to close %s file!\", itt_marker_file_fd);\n                return lib_name;\n            }\n            ITT_ANDROID_LOGI(\"Set env\");\n            res = setenv(ITT_TO_STR(LIB_VAR_NAME), itt_lib_name, 0);\n            if (res == -1)\n            {\n                ITT_ANDROID_LOGE(\"Unable to set env var!\");\n                return lib_name;\n            }\n            lib_name = __itt_get_env_var(ITT_TO_STR(LIB_VAR_NAME));\n            ITT_ANDROID_LOGI(\"ITT Lib path from env: %s\", itt_lib_name);\n        }\n    }\n#endif\n\n    return lib_name;\n}\n\n#ifndef min\n#define min(a,b) (a) < (b) ? (a) : (b)\n#endif /* min */\n\nstatic __itt_group_id __itt_get_groups(void)\n{\n    int i;\n    __itt_group_id res = __itt_group_none;\n    const char* var_name  = \"INTEL_ITTNOTIFY_GROUPS\";\n    const char* group_str = __itt_get_env_var(var_name);\n\n    if (group_str != NULL)\n    {\n        int len;\n        char gr[255];\n        const char* chunk;\n        while ((group_str = __itt_fsplit(group_str, \",; \", &chunk, &len)) != NULL)\n        {\n            __itt_fstrcpyn(gr, chunk, sizeof(gr) - 1);\n            gr[min(len, (int)(sizeof(gr) - 1))] = 0;\n\n            for (i = 0; group_list[i].name != NULL; i++)\n            {\n                if (!__itt_fstrcmp(gr, group_list[i].name))\n                {\n                    res = (__itt_group_id)(res | group_list[i].id);\n                    break;\n                }\n            }\n        }\n        /* TODO: !!! Workaround for bug with warning for unknown group !!!\n         * Should be fixed in new initialization scheme.\n         * Now the following groups should be set always. */\n        for (i = 0; group_list[i].id != __itt_group_none; i++)\n            if (group_list[i].id != __itt_group_all &&\n                group_list[i].id > __itt_group_splitter_min &&\n                group_list[i].id < __itt_group_splitter_max)\n                res = (__itt_group_id)(res | group_list[i].id);\n        return res;\n    }\n    else\n    {\n        for (i = 0; group_alias[i].env_var != NULL; i++)\n            if (__itt_get_env_var(group_alias[i].env_var) != NULL)\n                return group_alias[i].groups;\n    }\n\n    return res;\n}\n\nstatic int __itt_lib_version(lib_t lib)\n{\n    if (lib == NULL)\n        return 0;\n    if (__itt_get_proc(lib, \"__itt_api_init\"))\n        return 2;\n    if (__itt_get_proc(lib, \"__itt_api_version\"))\n        return 1;\n    return 0;\n}\n\n/* It's not used right now! Comment it out to avoid warnings.\nstatic void __itt_reinit_all_pointers(void)\n{\n    register int i;\n    // Fill all pointers with initial stubs\n    for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)\n        *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].init_func;\n}\n*/\n\nstatic void __itt_nullify_all_pointers(void)\n{\n    int i;\n    /* Nulify all pointers except domain_create and string_handle_create */\n    for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)\n        *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(push)\n#pragma warning(disable: 4054) /* warning C4054: 'type cast' : from function pointer 'XXX' to data pointer 'void *' */\n#pragma warning(disable: 4055) /* warning C4055: 'type cast' : from data pointer 'void *' to function pointer 'XXX' */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nITT_EXTERN_C void _N_(fini_ittlib)(void)\n{\n    __itt_api_fini_t* __itt_api_fini_ptr;\n    static volatile TIDT current_thread = 0;\n\n    if (_N_(_ittapi_global).api_initialized)\n    {\n        __itt_mutex_lock(&_N_(_ittapi_global).mutex);\n        if (_N_(_ittapi_global).api_initialized)\n        {\n            if (current_thread == 0)\n            {\n                current_thread = __itt_thread_id();\n                __itt_api_fini_ptr = (__itt_api_fini_t*)(size_t)__itt_get_proc(_N_(_ittapi_global).lib, \"__itt_api_fini\");\n                if (__itt_api_fini_ptr)\n                    __itt_api_fini_ptr(&_N_(_ittapi_global));\n\n                __itt_nullify_all_pointers();\n\n /* TODO: !!! not safe !!! don't support unload so far.\n  *             if (_N_(_ittapi_global).lib != NULL)\n  *                 __itt_unload_lib(_N_(_ittapi_global).lib);\n  *             _N_(_ittapi_global).lib = NULL;\n  */\n                _N_(_ittapi_global).api_initialized = 0;\n                current_thread = 0;\n            }\n        }\n        __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n    }\n}\n\nITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_groups)\n{\n    int i;\n    __itt_group_id groups;\n#ifdef ITT_COMPLETE_GROUP\n    __itt_group_id zero_group = __itt_group_none;\n#endif /* ITT_COMPLETE_GROUP */\n    static volatile TIDT current_thread = 0;\n\n    if (!_N_(_ittapi_global).api_initialized)\n    {\n#ifndef ITT_SIMPLE_INIT\n        ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global));\n#endif /* ITT_SIMPLE_INIT */\n\n        if (!_N_(_ittapi_global).api_initialized)\n        {\n            if (current_thread == 0)\n            {\n                current_thread = __itt_thread_id();\n                _N_(_ittapi_global).thread_list->tid = current_thread;\n                if (lib_name == NULL)\n                    lib_name = __itt_get_lib_name();\n                groups = __itt_get_groups();\n                if (groups != __itt_group_none || lib_name != NULL)\n                {\n                    _N_(_ittapi_global).lib = __itt_load_lib((lib_name == NULL) ? ittnotify_lib_name : lib_name);\n\n                    if (_N_(_ittapi_global).lib != NULL)\n                    {\n                        __itt_api_init_t* __itt_api_init_ptr;\n                        int lib_version = __itt_lib_version(_N_(_ittapi_global).lib);\n\n                        switch (lib_version) {\n                        case 0:\n                            groups = __itt_group_legacy;\n                        case 1:\n                            /* Fill all pointers from dynamic library */\n                            for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)\n                            {\n                                if (_N_(_ittapi_global).api_list_ptr[i].group & groups & init_groups)\n                                {\n                                    *_N_(_ittapi_global).api_list_ptr[i].func_ptr = (void*)__itt_get_proc(_N_(_ittapi_global).lib, _N_(_ittapi_global).api_list_ptr[i].name);\n                                    if (*_N_(_ittapi_global).api_list_ptr[i].func_ptr == NULL)\n                                    {\n                                        /* Restore pointers for function with static implementation */\n                                        *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;\n                                        __itt_report_error(__itt_error_no_symbol, lib_name, _N_(_ittapi_global).api_list_ptr[i].name);\n#ifdef ITT_COMPLETE_GROUP\n                                        zero_group = (__itt_group_id)(zero_group | _N_(_ittapi_global).api_list_ptr[i].group);\n#endif /* ITT_COMPLETE_GROUP */\n                                    }\n                                }\n                                else\n                                    *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;\n                            }\n\n                            if (groups == __itt_group_legacy)\n                            {\n                                /* Compatibility with legacy tools */\n                                ITTNOTIFY_NAME(thread_ignore)  = ITTNOTIFY_NAME(thr_ignore);\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n                                ITTNOTIFY_NAME(sync_createA)   = ITTNOTIFY_NAME(sync_set_nameA);\n                                ITTNOTIFY_NAME(sync_createW)   = ITTNOTIFY_NAME(sync_set_nameW);\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\n                                ITTNOTIFY_NAME(sync_create)    = ITTNOTIFY_NAME(sync_set_name);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n                                ITTNOTIFY_NAME(sync_prepare)   = ITTNOTIFY_NAME(notify_sync_prepare);\n                                ITTNOTIFY_NAME(sync_cancel)    = ITTNOTIFY_NAME(notify_sync_cancel);\n                                ITTNOTIFY_NAME(sync_acquired)  = ITTNOTIFY_NAME(notify_sync_acquired);\n                                ITTNOTIFY_NAME(sync_releasing) = ITTNOTIFY_NAME(notify_sync_releasing);\n                            }\n\n#ifdef ITT_COMPLETE_GROUP\n                            for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)\n                                if (_N_(_ittapi_global).api_list_ptr[i].group & zero_group)\n                                    *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;\n#endif /* ITT_COMPLETE_GROUP */\n                            break;\n                        case 2:\n                            __itt_api_init_ptr = (__itt_api_init_t*)(size_t)__itt_get_proc(_N_(_ittapi_global).lib, \"__itt_api_init\");\n                            if (__itt_api_init_ptr)\n                                __itt_api_init_ptr(&_N_(_ittapi_global), init_groups);\n                            break;\n                        }\n                    }\n                    else\n                    {\n                        __itt_nullify_all_pointers();\n\n                        __itt_report_error(__itt_error_no_module, lib_name,\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n                            __itt_system_error()\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n                            dlerror()\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n                        );\n                    }\n                }\n                else\n                {\n                    __itt_nullify_all_pointers();\n                }\n                _N_(_ittapi_global).api_initialized = 1;\n                current_thread = 0;\n                /* !!! Just to avoid unused code elimination !!! */\n                if (__itt_fini_ittlib_ptr == _N_(fini_ittlib)) current_thread = 0;\n            }\n        }\n\n#ifndef ITT_SIMPLE_INIT\n        __itt_mutex_unlock(&_N_(_ittapi_global).mutex);\n#endif /* ITT_SIMPLE_INIT */\n    }\n\n    /* Evaluating if any function ptr is non empty and it's in init_groups */\n    for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)\n        if (*_N_(_ittapi_global).api_list_ptr[i].func_ptr != _N_(_ittapi_global).api_list_ptr[i].null_func &&\n            _N_(_ittapi_global).api_list_ptr[i].group & init_groups)\n            return 1;\n    return 0;\n}\n\nITT_EXTERN_C __itt_error_handler_t* _N_(set_error_handler)(__itt_error_handler_t* handler)\n{\n    __itt_error_handler_t* prev = (__itt_error_handler_t*)(size_t)_N_(_ittapi_global).error_handler;\n    _N_(_ittapi_global).error_handler = (void*)(size_t)handler;\n    return prev;\n}\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#pragma warning(pop)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/ittnotify_static.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"ittnotify_config.h\"\n\n#ifndef ITT_FORMAT_DEFINED\n#  ifndef ITT_FORMAT\n#    define ITT_FORMAT\n#  endif /* ITT_FORMAT */\n#  ifndef ITT_NO_PARAMS\n#    define ITT_NO_PARAMS\n#  endif /* ITT_NO_PARAMS */\n#endif /* ITT_FORMAT_DEFINED */\n\n/*\n * parameters for macro expected:\n * ITT_STUB(api, type, func_name, arguments, params, func_name_in_dll, group, printf_fmt)\n */\n#ifdef __ITT_INTERNAL_INIT\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char    *name), (ITT_FORMAT name), domain_createA, __itt_group_structure, \"\\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name), (ITT_FORMAT name), domain_createW, __itt_group_structure, \"\\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_domain*, domain_create,  (const char    *name), (ITT_FORMAT name), domain_create,  __itt_group_structure, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char    *name), (ITT_FORMAT name), string_handle_createA, __itt_group_structure, \"\\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name), (ITT_FORMAT name), string_handle_createW, __itt_group_structure, \"\\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create,  (const char    *name), (ITT_FORMAT name), string_handle_create,  __itt_group_structure, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nITT_STUBV(ITTAPI, void, pause,  (void), (ITT_NO_PARAMS), pause,  __itt_group_control | __itt_group_legacy, \"no args\")\nITT_STUBV(ITTAPI, void, resume, (void), (ITT_NO_PARAMS), resume, __itt_group_control | __itt_group_legacy, \"no args\")\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, thread_set_nameA, (const char    *name), (ITT_FORMAT name), thread_set_nameA, __itt_group_thread, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (ITT_FORMAT name), thread_set_nameW, __itt_group_thread, \"\\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, thread_set_name,  (const char    *name), (ITT_FORMAT name), thread_set_name,  __itt_group_thread, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, thread_ignore, (void), (ITT_NO_PARAMS), thread_ignore, __itt_group_thread, \"no args\")\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, int,  thr_name_setA, (const char    *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setA, __itt_group_thread | __itt_group_legacy, \"\\\"%s\\\", %d\")\nITT_STUB(LIBITTAPI, int,  thr_name_setW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setW, __itt_group_thread | __itt_group_legacy, \"\\\"%S\\\", %d\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, int,  thr_name_set,  (const char    *name, int namelen), (ITT_FORMAT name, namelen), thr_name_set,  __itt_group_thread | __itt_group_legacy, \"\\\"%s\\\", %d\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(LIBITTAPI, void, thr_ignore,   (void),                             (ITT_NO_PARAMS),            thr_ignore,    __itt_group_thread | __itt_group_legacy, \"no args\")\n#endif /* __ITT_INTERNAL_BODY */\n\nITT_STUBV(ITTAPI, void, enable_attach, (void), (ITT_NO_PARAMS), enable_attach, __itt_group_all, \"no args\")\n\n#else  /* __ITT_INTERNAL_INIT */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createA, __itt_group_sync | __itt_group_fsync, \"%p, \\\"%s\\\", \\\"%s\\\", %x\")\nITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createW, __itt_group_sync | __itt_group_fsync, \"%p, \\\"%S\\\", \\\"%S\\\", %x\")\nITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char    *name), (ITT_FORMAT addr, name), sync_renameA, __itt_group_sync | __itt_group_fsync, \"%p, \\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (ITT_FORMAT addr, name), sync_renameW, __itt_group_sync | __itt_group_fsync, \"%p, \\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_create,  (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_create,  __itt_group_sync | __itt_group_fsync, \"%p, \\\"%s\\\", \\\"%s\\\", %x\")\nITT_STUBV(ITTAPI, void, sync_rename,  (void *addr, const char    *name), (ITT_FORMAT addr, name), sync_rename,  __itt_group_sync | __itt_group_fsync, \"%p, \\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_destroy,    (void *addr), (ITT_FORMAT addr), sync_destroy,   __itt_group_sync | __itt_group_fsync, \"%p\")\n\nITT_STUBV(ITTAPI, void, sync_prepare,    (void* addr), (ITT_FORMAT addr), sync_prepare,   __itt_group_sync,  \"%p\")\nITT_STUBV(ITTAPI, void, sync_cancel,     (void *addr), (ITT_FORMAT addr), sync_cancel,    __itt_group_sync,  \"%p\")\nITT_STUBV(ITTAPI, void, sync_acquired,   (void *addr), (ITT_FORMAT addr), sync_acquired,  __itt_group_sync,  \"%p\")\nITT_STUBV(ITTAPI, void, sync_releasing,  (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_sync,  \"%p\")\n\nITT_STUBV(ITTAPI, void, suppress_push,       (unsigned int mask),                             (ITT_FORMAT mask), suppress_push,  __itt_group_suppress,  \"%p\")\nITT_STUBV(ITTAPI, void, suppress_pop,        (void),                                          (ITT_NO_PARAMS),   suppress_pop,   __itt_group_suppress,  \"no args\")\nITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size),(ITT_FORMAT mode, mask, address, size), suppress_mark_range, __itt_group_suppress, \"%d, %p, %p, %d\")\nITT_STUBV(ITTAPI, void, suppress_clear_range,(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size),(ITT_FORMAT mode, mask, address, size), suppress_clear_range,__itt_group_suppress, \"%d, %p, %p, %d\")\n\nITT_STUBV(ITTAPI, void, fsync_prepare,   (void* addr), (ITT_FORMAT addr), sync_prepare,   __itt_group_fsync, \"%p\")\nITT_STUBV(ITTAPI, void, fsync_cancel,    (void *addr), (ITT_FORMAT addr), sync_cancel,    __itt_group_fsync, \"%p\")\nITT_STUBV(ITTAPI, void, fsync_acquired,  (void *addr), (ITT_FORMAT addr), sync_acquired,  __itt_group_fsync, \"%p\")\nITT_STUBV(ITTAPI, void, fsync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_fsync, \"%p\")\n\nITT_STUBV(ITTAPI, void, model_site_begin,          (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (ITT_FORMAT site, instance, name), model_site_begin, __itt_group_model, \"%p, %p, \\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_site_end,            (__itt_model_site *site, __itt_model_site_instance *instance),                   (ITT_FORMAT site, instance),       model_site_end,   __itt_group_model, \"%p, %p\")\nITT_STUBV(ITTAPI, void, model_task_begin,          (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (ITT_FORMAT task, instance, name), model_task_begin, __itt_group_model, \"%p, %p, \\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_task_end,            (__itt_model_task *task, __itt_model_task_instance *instance),                   (ITT_FORMAT task, instance),       model_task_end,   __itt_group_model, \"%p, %p\")\nITT_STUBV(ITTAPI, void, model_lock_acquire,        (void *lock), (ITT_FORMAT lock), model_lock_acquire, __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_lock_release,        (void *lock), (ITT_FORMAT lock), model_lock_release, __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_record_allocation,   (void *addr, size_t size), (ITT_FORMAT addr, size), model_record_allocation,   __itt_group_model, \"%p, %d\")\nITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr),              (ITT_FORMAT addr),       model_record_deallocation, __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_induction_uses,      (void* addr, size_t size), (ITT_FORMAT addr, size), model_induction_uses,      __itt_group_model, \"%p, %d\")\nITT_STUBV(ITTAPI, void, model_reduction_uses,      (void* addr, size_t size), (ITT_FORMAT addr, size), model_reduction_uses,      __itt_group_model, \"%p, %d\")\nITT_STUBV(ITTAPI, void, model_observe_uses,        (void* addr, size_t size), (ITT_FORMAT addr, size), model_observe_uses,        __itt_group_model, \"%p, %d\")\nITT_STUBV(ITTAPI, void, model_clear_uses,          (void* addr),              (ITT_FORMAT addr),       model_clear_uses,          __itt_group_model, \"%p\")\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, model_site_beginW,         (const wchar_t *name),     (ITT_FORMAT name),       model_site_beginW,         __itt_group_model, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_task_beginW,         (const wchar_t *name),     (ITT_FORMAT name),       model_task_beginW,         __itt_group_model, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_iteration_taskW,     (const wchar_t *name),     (ITT_FORMAT name),       model_iteration_taskW,     __itt_group_model, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, model_site_beginA,         (const char *name),        (ITT_FORMAT name),       model_site_beginA,         __itt_group_model, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_site_beginAL,        (const char *name, size_t len), (ITT_FORMAT name, len), model_site_beginAL,    __itt_group_model, \"\\\"%s\\\", %d\")\nITT_STUBV(ITTAPI, void, model_task_beginA,         (const char *name),        (ITT_FORMAT name),       model_task_beginA,         __itt_group_model, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_task_beginAL,        (const char *name, size_t len), (ITT_FORMAT name, len), model_task_beginAL,    __itt_group_model, \"\\\"%s\\\", %d\")\nITT_STUBV(ITTAPI, void, model_iteration_taskA,     (const char *name),        (ITT_FORMAT name),       model_iteration_taskA,     __itt_group_model, \"\\\"%s\\\"\")\nITT_STUBV(ITTAPI, void, model_iteration_taskAL,    (const char *name, size_t len), (ITT_FORMAT name, len), model_iteration_taskAL, __itt_group_model, \"\\\"%s\\\", %d\")\nITT_STUBV(ITTAPI, void, model_site_end_2,          (void),                    (ITT_NO_PARAMS),         model_site_end_2,          __itt_group_model, \"no args\")\nITT_STUBV(ITTAPI, void, model_task_end_2,          (void),                    (ITT_NO_PARAMS),         model_task_end_2,          __itt_group_model, \"no args\")\nITT_STUBV(ITTAPI, void, model_lock_acquire_2,      (void *lock),              (ITT_FORMAT lock),       model_lock_acquire_2,      __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_lock_release_2,      (void *lock),              (ITT_FORMAT lock),       model_lock_release_2,      __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_aggregate_task,      (size_t count),            (ITT_FORMAT count),      model_aggregate_task,      __itt_group_model, \"%d\")\nITT_STUBV(ITTAPI, void, model_disable_push,        (__itt_model_disable x),   (ITT_FORMAT x),          model_disable_push,        __itt_group_model, \"%p\")\nITT_STUBV(ITTAPI, void, model_disable_pop,         (void),                    (ITT_NO_PARAMS),         model_disable_pop,         __itt_group_model, \"no args\")\n#endif /* __ITT_INTERNAL_BODY */\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char    *name, const char    *domain), (ITT_FORMAT name, domain), heap_function_createA, __itt_group_heap, \"\\\"%s\\\", \\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), heap_function_createW, __itt_group_heap, \"\\\"%s\\\", \\\"%s\\\"\")\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_heap_function, heap_function_create,  (const char    *name, const char    *domain), (ITT_FORMAT name, domain), heap_function_create,  __itt_group_heap, \"\\\"%s\\\", \\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* __ITT_INTERNAL_BODY */\nITT_STUBV(ITTAPI, void, heap_allocate_begin,   (__itt_heap_function h, size_t size, int initialized),             (ITT_FORMAT h, size, initialized),       heap_allocate_begin, __itt_group_heap, \"%p, %lu, %d\")\nITT_STUBV(ITTAPI, void, heap_allocate_end,     (__itt_heap_function h, void** addr, size_t size, int initialized), (ITT_FORMAT h, addr, size, initialized), heap_allocate_end,   __itt_group_heap, \"%p, %p, %lu, %d\")\nITT_STUBV(ITTAPI, void, heap_free_begin,       (__itt_heap_function h, void*  addr), (ITT_FORMAT h, addr), heap_free_begin, __itt_group_heap, \"%p, %p\")\nITT_STUBV(ITTAPI, void, heap_free_end,         (__itt_heap_function h, void*  addr), (ITT_FORMAT h, addr), heap_free_end,   __itt_group_heap, \"%p, %p\")\nITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void*  addr, size_t new_size, int initialized),                  (ITT_FORMAT h, addr, new_size, initialized),           heap_reallocate_begin, __itt_group_heap, \"%p, %p, %lu, %d\")\nITT_STUBV(ITTAPI, void, heap_reallocate_end,   (__itt_heap_function h, void*  addr, void** new_addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_addr, new_size, initialized), heap_reallocate_end,   __itt_group_heap, \"%p, %p, %p, %lu, %d\")\nITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void), (ITT_NO_PARAMS), heap_internal_access_begin, __itt_group_heap, \"no args\")\nITT_STUBV(ITTAPI, void, heap_internal_access_end,   (void), (ITT_NO_PARAMS), heap_internal_access_end,   __itt_group_heap, \"no args\")\nITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin, (void), (ITT_NO_PARAMS), heap_record_memory_growth_begin, __itt_group_heap, \"no args\")\nITT_STUBV(ITTAPI, void, heap_record_memory_growth_end,   (void), (ITT_NO_PARAMS), heap_record_memory_growth_end,   __itt_group_heap, \"no args\")\nITT_STUBV(ITTAPI, void, heap_reset_detection, (unsigned int reset_mask),  (ITT_FORMAT reset_mask), heap_reset_detection, __itt_group_heap, \"%u\")\nITT_STUBV(ITTAPI, void, heap_record,          (unsigned int record_mask), (ITT_FORMAT record_mask),  heap_record,        __itt_group_heap, \"%u\")\n\nITT_STUBV(ITTAPI, void, id_create,  (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), id_create,  __itt_group_structure, \"%p, %lu\")\nITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id), (ITT_FORMAT domain, id), id_destroy, __itt_group_structure, \"%p, %lu\")\n\nITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void), (ITT_NO_PARAMS), get_timestamp,  __itt_group_structure, \"no args\")\n\nITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), region_begin, __itt_group_structure, \"%p, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, region_end,   (const __itt_domain *domain, __itt_id id),                                             (ITT_FORMAT domain, id),               region_end,   __itt_group_structure, \"%p, %lu\")\n\n#ifndef __ITT_INTERNAL_BODY\nITT_STUBV(ITTAPI, void, frame_begin_v3,  (const __itt_domain *domain, __itt_id *id),                                             (ITT_FORMAT domain, id),             frame_begin_v3,  __itt_group_structure, \"%p, %p\")\nITT_STUBV(ITTAPI, void, frame_end_v3,    (const __itt_domain *domain, __itt_id *id),                                             (ITT_FORMAT domain, id),             frame_end_v3,    __itt_group_structure, \"%p, %p\")\nITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end), (ITT_FORMAT domain, id, begin, end), frame_submit_v3, __itt_group_structure, \"%p, %p, %lu, %lu\")\n#endif /* __ITT_INTERNAL_BODY */\n\nITT_STUBV(ITTAPI, void, task_group,   (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), task_group,  __itt_group_structure, \"%p, %lu, %lu, %p\")\n\nITT_STUBV(ITTAPI, void, task_begin,    (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name), (ITT_FORMAT domain, id, parent, name), task_begin,    __itt_group_structure, \"%p, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parent, void* fn),                  (ITT_FORMAT domain, id, parent, fn),   task_begin_fn, __itt_group_structure, \"%p, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_end,      (const __itt_domain *domain),                                                          (ITT_FORMAT domain),                   task_end,      __itt_group_structure, \"%p\")\n\nITT_STUBV(ITTAPI, void, counter_inc_v3,       (const __itt_domain *domain, __itt_string_handle *name),                           (ITT_FORMAT domain, name),        counter_inc_v3,       __itt_group_structure, \"%p, %p\")\nITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long value), (ITT_FORMAT domain, name, value), counter_inc_delta_v3, __itt_group_structure, \"%p, %p, %lu\")\n\nITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope), (ITT_FORMAT domain, id, name, scope), marker, __itt_group_structure, \"%p, %lu, %p, %d\")\n\nITT_STUBV(ITTAPI, void, metadata_add,      (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data), (ITT_FORMAT domain, id, key, type, count, data), metadata_add, __itt_group_structure, \"%p, %lu, %p, %d, %lu, %p\")\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char* data, size_t length),    (ITT_FORMAT domain, id, key, data, length), metadata_str_addA, __itt_group_structure, \"%p, %lu, %p, %p, %lu\")\nITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t* data, size_t length), (ITT_FORMAT domain, id, key, data, length), metadata_str_addW, __itt_group_structure, \"%p, %lu, %p, %p, %lu\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, metadata_str_add,  (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char* data, size_t length),    (ITT_FORMAT domain, id, key, data, length), metadata_str_add,  __itt_group_structure, \"%p, %lu, %p, %p, %lu\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail),                (ITT_FORMAT domain, relation, tail),       relation_add_to_current, __itt_group_structure, \"%p, %lu, %p\")\nITT_STUBV(ITTAPI, void, relation_add,            (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, head, relation, tail), relation_add,            __itt_group_structure, \"%p, %p, %lu, %p\")\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen), (ITT_FORMAT name, namelen), event_createA, __itt_group_mark | __itt_group_legacy, \"\\\"%s\\\", %d\")\nITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), event_createW, __itt_group_mark | __itt_group_legacy, \"\\\"%S\\\", %d\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char    *name, int namelen), (ITT_FORMAT name, namelen), event_create,  __itt_group_mark | __itt_group_legacy, \"\\\"%s\\\", %d\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, int,  event_start,          (__itt_event event),                (ITT_FORMAT event),         event_start,   __itt_group_mark | __itt_group_legacy, \"%d\")\nITT_STUB(LIBITTAPI, int,  event_end,            (__itt_event event),                (ITT_FORMAT event),         event_end,     __itt_group_mark | __itt_group_legacy, \"%d\")\n#endif /* __ITT_INTERNAL_BODY */\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p, \\\"%s\\\", \\\"%s\\\", %x\")\nITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p, \\\"%S\\\", \\\"%S\\\", %x\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_set_name,  (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_name,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"p, \\\"%s\\\", \\\"%s\\\", %x\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *p, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p, \\\"%s\\\", %d, \\\"%s\\\", %d, %x\")\nITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *p, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p, \\\"%S\\\", %d, \\\"%S\\\", %d, %x\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, int, notify_sync_name,  (void *p, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_name,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p, \\\"%s\\\", %d, \\\"%s\\\", %d, %x\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\nITT_STUBV(LIBITTAPI, void, notify_sync_prepare,   (void *p), (ITT_FORMAT p), notify_sync_prepare,   __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p\")\nITT_STUBV(LIBITTAPI, void, notify_sync_cancel,    (void *p), (ITT_FORMAT p), notify_sync_cancel,    __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p\")\nITT_STUBV(LIBITTAPI, void, notify_sync_acquired,  (void *p), (ITT_FORMAT p), notify_sync_acquired,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p\")\nITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *p), (ITT_FORMAT p), notify_sync_releasing, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, \"%p\")\n#endif /* __ITT_INTERNAL_BODY */\n\nITT_STUBV(LIBITTAPI, void, memory_read,   (void *addr, size_t size), (ITT_FORMAT addr, size), memory_read,   __itt_group_legacy, \"%p, %lu\")\nITT_STUBV(LIBITTAPI, void, memory_write,  (void *addr, size_t size), (ITT_FORMAT addr, size), memory_write,  __itt_group_legacy, \"%p, %lu\")\nITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_update, __itt_group_legacy, \"%p, %lu\")\n\nITT_STUB(LIBITTAPI, __itt_state_t,     state_get,    (void),                                    (ITT_NO_PARAMS),   state_get,    __itt_group_legacy, \"no args\")\nITT_STUB(LIBITTAPI, __itt_state_t,     state_set,    (__itt_state_t s),                         (ITT_FORMAT s),    state_set,    __itt_group_legacy, \"%d\")\nITT_STUB(LIBITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (ITT_FORMAT p, s), obj_mode_set, __itt_group_legacy, \"%d, %d\")\nITT_STUB(LIBITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (ITT_FORMAT p, s), thr_mode_set, __itt_group_legacy, \"%d, %d\")\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char    *domain), (ITT_FORMAT domain), frame_createA, __itt_group_frame, \"\\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (ITT_FORMAT domain), frame_createW, __itt_group_frame, \"\\\"%s\\\"\")\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_frame, frame_create,  (const char    *domain), (ITT_FORMAT domain), frame_create,  __itt_group_frame, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* __ITT_INTERNAL_BODY */\nITT_STUBV(ITTAPI, void, frame_begin,         (__itt_frame frame),     (ITT_FORMAT frame),  frame_begin,   __itt_group_frame, \"%p\")\nITT_STUBV(ITTAPI, void, frame_end,           (__itt_frame frame),     (ITT_FORMAT frame),  frame_end,     __itt_group_frame, \"%p\")\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char    *name, const char    *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, \"\\\"%s\\\", \\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, \"\\\"%s\\\", \\\"%s\\\"\")\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_counter, counter_create,  (const char    *name, const char    *domain), (ITT_FORMAT name, domain), counter_create,  __itt_group_counter, \"\\\"%s\\\", \\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* __ITT_INTERNAL_BODY */\nITT_STUBV(ITTAPI, void, counter_destroy,   (__itt_counter id),                           (ITT_FORMAT id),        counter_destroy,   __itt_group_counter, \"%p\")\nITT_STUBV(ITTAPI, void, counter_inc,       (__itt_counter id),                           (ITT_FORMAT id),        counter_inc,       __itt_group_counter, \"%p\")\nITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, \"%p, %lu\")\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char    *name), (ITT_FORMAT name), mark_createA, __itt_group_mark, \"\\\"%s\\\"\")\nITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (ITT_FORMAT name), mark_createW, __itt_group_mark, \"\\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_mark_type, mark_create,  (const char    *name), (ITT_FORMAT name), mark_create,  __itt_group_mark, \"\\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* __ITT_INTERNAL_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int,  markA,        (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), markA, __itt_group_mark, \"%d, \\\"%s\\\"\")\nITT_STUB(ITTAPI, int,  markW,        (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), markW, __itt_group_mark, \"%d, \\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int,  mark,         (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark,  __itt_group_mark, \"%d, \\\"%s\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int,  mark_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_off, __itt_group_mark, \"%d\")\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int,  mark_globalA, (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark_globalA, __itt_group_mark, \"%d, \\\"%s\\\"\")\nITT_STUB(ITTAPI, int,  mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), mark_globalW, __itt_group_mark, \"%d, \\\"%S\\\"\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int,  mark_global,  (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark_global,  __itt_group_mark, \"%d, \\\"%S\\\"\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int,  mark_global_off, (__itt_mark_type mt),                        (ITT_FORMAT mt),            mark_global_off, __itt_group_mark, \"%d\")\n\n#ifndef __ITT_INTERNAL_BODY\nITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void), (ITT_NO_PARAMS), stack_caller_create,  __itt_group_stitch, \"no args\")\n#endif /* __ITT_INTERNAL_BODY */\nITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id), (ITT_FORMAT id), stack_caller_destroy, __itt_group_stitch, \"%p\")\nITT_STUBV(ITTAPI, void, stack_callee_enter,   (__itt_caller id), (ITT_FORMAT id), stack_callee_enter,   __itt_group_stitch, \"%p\")\nITT_STUBV(ITTAPI, void, stack_callee_leave,   (__itt_caller id), (ITT_FORMAT id), stack_callee_leave,   __itt_group_stitch, \"%p\")\n\nITT_STUB(ITTAPI,  __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data), (ITT_FORMAT fn, fn_data), clock_domain_create, __itt_group_structure, \"%p, %p\")\nITT_STUBV(ITTAPI, void,                clock_domain_reset,  (void),                                      (ITT_NO_PARAMS),          clock_domain_reset,  __itt_group_structure, \"no args\")\nITT_STUBV(ITTAPI, void, id_create_ex,  (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id), (ITT_FORMAT domain, clock_domain, timestamp, id), id_create_ex,  __itt_group_structure, \"%p, %p, %lu, %lu\")\nITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id), (ITT_FORMAT domain, clock_domain, timestamp, id), id_destroy_ex, __itt_group_structure, \"%p, %p, %lu, %lu\")\nITT_STUBV(ITTAPI, void, task_begin_ex,    (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name), (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, name), task_begin_ex, __itt_group_structure, \"%p, %p, %lu, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_begin_fn_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn),                  (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, fn), task_begin_fn_ex, __itt_group_structure, \"%p, %p, %lu, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_end_ex,      (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp),                                                            (ITT_FORMAT domain, clock_domain, timestamp), task_end_ex, __itt_group_structure, \"%p, %p, %lu\")\nITT_STUBV(ITTAPI, void, task_begin_overlapped,       (const __itt_domain *domain, __itt_id id, __itt_id parent, __itt_string_handle *name),                                                                   (ITT_FORMAT domain, id, parent, name), task_begin_overlapped, __itt_group_structure, \"%p, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_begin_overlapped_ex,    (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name), (ITT_FORMAT domain, clock_domain, timestamp, id, parentid, name), task_begin_overlapped_ex, __itt_group_structure, \"%p, %p, %lu, %lu, %lu, %p\")\nITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id id),                                                                                                                       (ITT_FORMAT domain, id), task_end_overlapped, __itt_group_structure, \"%p, %lu\")\nITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id),                                                    (ITT_FORMAT domain, clock_domain, timestamp, id), task_end_overlapped_ex, __itt_group_structure, \"%p, %p, %lu, %lu\")\nITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope), (ITT_FORMAT domain, clock_domain, timestamp, id, name, scope), marker_ex, __itt_group_structure, \"%p, %p, %lu, %lu, %p, %d\")\nITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data), (ITT_FORMAT domain, scope, key, type, count, data), metadata_add_with_scope, __itt_group_structure, \"%p, %d, %p, %d, %lu, %p\")\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length),    (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scopeA, __itt_group_structure, \"%p, %d, %p, %p, %lu\")\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length), (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scopeW, __itt_group_structure, \"%p, %d, %p, %p, %lu\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, metadata_str_add_with_scope,  (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length),    (ITT_FORMAT domain, scope, key, data, length), metadata_str_add_with_scope,  __itt_group_structure, \"%p, %d, %p, %p, %lu\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail),                (ITT_FORMAT domain, clock_domain, timestamp, relation, tail),       relation_add_to_current_ex, __itt_group_structure, \"%p, %p, %lu, %d, %lu\")\nITT_STUBV(ITTAPI, void, relation_add_ex,            (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail), (ITT_FORMAT domain, clock_domain, timestamp, head, relation, tail), relation_add_ex,            __itt_group_structure, \"%p, %p, %lu, %lu, %d, %lu\")\nITT_STUB(ITTAPI,  __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type),                    (ITT_FORMAT name, track_group_type),        track_group_create, __itt_group_structure, \"%p, %d\")\nITT_STUB(ITTAPI,  __itt_track*,       track_create,       (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type), (ITT_FORMAT track_group, name, track_type), track_create,       __itt_group_structure, \"%p, %p, %d\")\nITT_STUBV(ITTAPI, void,               set_track,          (__itt_track *track),                                                                    (ITT_FORMAT track),                         set_track,          __itt_group_structure, \"%p\")\n\n#ifndef __ITT_INTERNAL_BODY\nITT_STUB(ITTAPI, const char*, api_version, (void), (ITT_NO_PARAMS), api_version, __itt_group_all & ~__itt_group_legacy, \"no args\")\n#endif /* __ITT_INTERNAL_BODY */\n\n#ifndef __ITT_INTERNAL_BODY\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_saveA, __itt_group_arrays, \"%p, %d, %p, %d, \\\"%s\\\", %d\")\nITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_saveW, __itt_group_arrays, \"%p, %d, %p, %d, \\\"%S\\\", %d\")\n#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, int, av_save,  (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder), (ITT_FORMAT data, rank, dimensions, type, filePath, columnOrder), av_save,  __itt_group_arrays, \"%p, %d, %p, %d, \\\"%s\\\", %d\")\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* __ITT_INTERNAL_BODY */\n\n#endif /* __ITT_INTERNAL_INIT */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/ittnotify_types.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _ITTNOTIFY_TYPES_H_\n#define _ITTNOTIFY_TYPES_H_\n\ntypedef enum ___itt_group_id\n{\n    __itt_group_none      = 0,\n    __itt_group_legacy    = 1<<0,\n    __itt_group_control   = 1<<1,\n    __itt_group_thread    = 1<<2,\n    __itt_group_mark      = 1<<3,\n    __itt_group_sync      = 1<<4,\n    __itt_group_fsync     = 1<<5,\n    __itt_group_jit       = 1<<6,\n    __itt_group_model     = 1<<7,\n    __itt_group_splitter_min = 1<<7,\n    __itt_group_counter   = 1<<8,\n    __itt_group_frame     = 1<<9,\n    __itt_group_stitch    = 1<<10,\n    __itt_group_heap      = 1<<11,\n    __itt_group_splitter_max = 1<<12,\n    __itt_group_structure = 1<<12,\n    __itt_group_suppress = 1<<13,\n    __itt_group_arrays    = 1<<14,\n    __itt_group_all       = -1\n} __itt_group_id;\n\n#pragma pack(push, 8)\n\ntypedef struct ___itt_group_list\n{\n    __itt_group_id id;\n    const char*    name;\n} __itt_group_list;\n\n#pragma pack(pop)\n\n#define ITT_GROUP_LIST(varname) \\\n    static __itt_group_list varname[] = {       \\\n        { __itt_group_all,       \"all\"       }, \\\n        { __itt_group_control,   \"control\"   }, \\\n        { __itt_group_thread,    \"thread\"    }, \\\n        { __itt_group_mark,      \"mark\"      }, \\\n        { __itt_group_sync,      \"sync\"      }, \\\n        { __itt_group_fsync,     \"fsync\"     }, \\\n        { __itt_group_jit,       \"jit\"       }, \\\n        { __itt_group_model,     \"model\"     }, \\\n        { __itt_group_counter,   \"counter\"   }, \\\n        { __itt_group_frame,     \"frame\"     }, \\\n        { __itt_group_stitch,    \"stitch\"    }, \\\n        { __itt_group_heap,      \"heap\"      }, \\\n        { __itt_group_structure, \"structure\" }, \\\n        { __itt_group_suppress,  \"suppress\"  }, \\\n        { __itt_group_arrays,    \"arrays\"    }, \\\n        { __itt_group_none,      NULL        }  \\\n    }\n\n#endif /* _ITTNOTIFY_TYPES_H_ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/legacy/ittnotify.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _LEGACY_ITTNOTIFY_H_\n#define _LEGACY_ITTNOTIFY_H_\n\n/**\n * @file\n * @brief Legacy User API functions and types\n */\n\n/** @cond exclude_from_documentation */\n#ifndef ITT_OS_WIN\n#  define ITT_OS_WIN   1\n#endif /* ITT_OS_WIN */\n\n#ifndef ITT_OS_LINUX\n#  define ITT_OS_LINUX 2\n#endif /* ITT_OS_LINUX */\n\n#ifndef ITT_OS_MAC\n#  define ITT_OS_MAC   3\n#endif /* ITT_OS_MAC */\n\n#ifndef ITT_OS\n#  if defined WIN32 || defined _WIN32\n#    define ITT_OS ITT_OS_WIN\n#  elif defined( __APPLE__ ) && defined( __MACH__ )\n#    define ITT_OS ITT_OS_MAC\n#  else\n#    define ITT_OS ITT_OS_LINUX\n#  endif\n#endif /* ITT_OS */\n\n#ifndef ITT_PLATFORM_WIN\n#  define ITT_PLATFORM_WIN 1\n#endif /* ITT_PLATFORM_WIN */\n\n#ifndef ITT_PLATFORM_POSIX\n#  define ITT_PLATFORM_POSIX 2\n#endif /* ITT_PLATFORM_POSIX */\n\n#ifndef ITT_PLATFORM_MAC\n#  define ITT_PLATFORM_MAC 3\n#endif /* ITT_PLATFORM_MAC */\n\n#ifndef ITT_PLATFORM\n#  if ITT_OS==ITT_OS_WIN\n#    define ITT_PLATFORM ITT_PLATFORM_WIN\n#  elif ITT_OS==ITT_OS_MAC\n#    define ITT_PLATFORM ITT_PLATFORM_MAC\n#  else\n#    define ITT_PLATFORM ITT_PLATFORM_POSIX\n#  endif\n#endif /* ITT_PLATFORM */\n\n#if defined(_UNICODE) && !defined(UNICODE)\n#define UNICODE\n#endif\n\n#include <stddef.h>\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <tchar.h>\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdint.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE || _UNICODE */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#ifndef CDECL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define CDECL __cdecl\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__ \n#      define CDECL __attribute__ ((cdecl))\n#    else  /* _M_IX86 || __i386__ */\n#      define CDECL /* actual only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* CDECL */\n\n#ifndef STDCALL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define STDCALL __stdcall\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__\n#      define STDCALL __attribute__ ((stdcall)) \n#    else  /* _M_IX86 || __i386__ */\n#      define STDCALL /* supported only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* STDCALL */\n\n#define ITTAPI    CDECL\n#define LIBITTAPI CDECL\n\n/* TODO: Temporary for compatibility! */\n#define ITTAPI_CALL    CDECL\n#define LIBITTAPI_CALL CDECL\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n/* use __forceinline (VC++ specific) */\n#define ITT_INLINE           __forceinline\n#define ITT_INLINE_ATTRIBUTE /* nothing */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/*\n * Generally, functions are not inlined unless optimization is specified.\n * For functions declared inline, this attribute inlines the function even\n * if no optimization level was specified.\n */\n#ifdef __STRICT_ANSI__\n#define ITT_INLINE           static inline\n#else  /* __STRICT_ANSI__ */\n#define ITT_INLINE           static inline\n#endif /* __STRICT_ANSI__ */\n#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n/* Helper macro for joining tokens */\n#define ITT_JOIN_AUX(p,n) p##n\n#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)\n\n#ifdef ITT_MAJOR\n#undef ITT_MAJOR\n#endif\n#ifdef ITT_MINOR\n#undef ITT_MINOR\n#endif\n#define ITT_MAJOR     3\n#define ITT_MINOR     0\n\n/* Standard versioning of a token with major and minor version numbers */\n#define ITT_VERSIONIZE(x)    \\\n    ITT_JOIN(x,              \\\n    ITT_JOIN(_,              \\\n    ITT_JOIN(ITT_MAJOR,      \\\n    ITT_JOIN(_, ITT_MINOR))))\n\n#ifndef INTEL_ITTNOTIFY_PREFIX\n#  define INTEL_ITTNOTIFY_PREFIX __itt_\n#endif /* INTEL_ITTNOTIFY_PREFIX */\n#ifndef INTEL_ITTNOTIFY_POSTFIX\n#  define INTEL_ITTNOTIFY_POSTFIX _ptr_\n#endif /* INTEL_ITTNOTIFY_POSTFIX */\n\n#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)\n#define ITTNOTIFY_NAME(n)     ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)))\n\n#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)\n#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)\n\n#define ITTNOTIFY_VOID_D0(n,d)       (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_VOID_D1(n,d,x)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_VOID_D2(n,d,x,y)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n#define ITTNOTIFY_DATA_D0(n,d)       (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_DATA_D1(n,d,x)     (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_DATA_D2(n,d,x,y)   (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a)     (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n\n#ifdef ITT_STUB\n#undef ITT_STUB\n#endif\n#ifdef ITT_STUBV\n#undef ITT_STUBV\n#endif\n#define ITT_STUBV(api,type,name,args)                             \\\n    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \\\n    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);\n#define ITT_STUB ITT_STUBV\n/** @endcond */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/**\n * @defgroup legacy Legacy API\n * @{\n * @}\n */\n\n/**\n * @defgroup legacy_control Collection Control\n * @ingroup legacy\n * General behavior: application continues to run, but no profiling information is being collected\n *\n * Pausing occurs not only for the current thread but for all process as well as spawned processes\n * - Intel(R) Parallel Inspector and Intel(R) Inspector XE:\n *   - Does not analyze or report errors that involve memory access.\n *   - Other errors are reported as usual. Pausing data collection in\n *     Intel(R) Parallel Inspector and Intel(R) Inspector XE\n *     only pauses tracing and analyzing memory access.\n *     It does not pause tracing or analyzing threading APIs.\n *   .\n * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE:\n *   - Does continue to record when new threads are started.\n *   .\n * - Other effects:\n *   - Possible reduction of runtime overhead.\n *   .\n * @{\n */\n#ifndef _ITTNOTIFY_H_\n/** @brief Pause collection */\nvoid ITTAPI __itt_pause(void);\n/** @brief Resume collection */\nvoid ITTAPI __itt_resume(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, pause,   (void))\nITT_STUBV(ITTAPI, void, resume,  (void))\n#define __itt_pause      ITTNOTIFY_VOID(pause)\n#define __itt_pause_ptr  ITTNOTIFY_NAME(pause)\n#define __itt_resume     ITTNOTIFY_VOID(resume)\n#define __itt_resume_ptr ITTNOTIFY_NAME(resume)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_pause()\n#define __itt_pause_ptr  0\n#define __itt_resume()\n#define __itt_resume_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_pause_ptr  0\n#define __itt_resume_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n#endif /* _ITTNOTIFY_H_ */\n/** @} legacy_control group */\n\n/**\n * @defgroup legacy_threads Threads\n * @ingroup legacy\n * Threads group\n * @warning Legacy API\n * @{\n */\n/**\n * @deprecated Legacy API\n * @brief Set name to be associated with thread in analysis GUI.\n * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nint LIBITTAPI __itt_thr_name_setA(const char    *name, int namelen);\nint LIBITTAPI __itt_thr_name_setW(const wchar_t *name, int namelen);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_thr_name_set     __itt_thr_name_setW\n#  define __itt_thr_name_set_ptr __itt_thr_name_setW_ptr\n#else\n#  define __itt_thr_name_set     __itt_thr_name_setA\n#  define __itt_thr_name_set_ptr __itt_thr_name_setA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nint LIBITTAPI __itt_thr_name_set(const char *name, int namelen);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, int, thr_name_setA, (const char    *name, int namelen))\nITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, int, thr_name_set,  (const char    *name, int namelen))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thr_name_setA     ITTNOTIFY_DATA(thr_name_setA)\n#define __itt_thr_name_setA_ptr ITTNOTIFY_NAME(thr_name_setA)\n#define __itt_thr_name_setW     ITTNOTIFY_DATA(thr_name_setW)\n#define __itt_thr_name_setW_ptr ITTNOTIFY_NAME(thr_name_setW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thr_name_set     ITTNOTIFY_DATA(thr_name_set)\n#define __itt_thr_name_set_ptr ITTNOTIFY_NAME(thr_name_set)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thr_name_setA(name, namelen)\n#define __itt_thr_name_setA_ptr 0\n#define __itt_thr_name_setW(name, namelen)\n#define __itt_thr_name_setW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thr_name_set(name, namelen)\n#define __itt_thr_name_set_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_thr_name_setA_ptr 0\n#define __itt_thr_name_setW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_thr_name_set_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Mark current thread as ignored from this point on, for the duration of its existence.\n */\nvoid LIBITTAPI __itt_thr_ignore(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, thr_ignore, (void))\n#define __itt_thr_ignore     ITTNOTIFY_VOID(thr_ignore)\n#define __itt_thr_ignore_ptr ITTNOTIFY_NAME(thr_ignore)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_thr_ignore()\n#define __itt_thr_ignore_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_thr_ignore_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} legacy_threads group */\n\n/**\n * @defgroup legacy_sync Synchronization\n * @ingroup legacy\n * Synchronization group\n * @warning Legacy API\n * @{\n */\n/**\n * @hideinitializer\n * @brief possible value of attribute argument for sync object type\n */\n#define __itt_attr_barrier 1\n\n/**\n * @hideinitializer\n * @brief possible value of attribute argument for sync object type\n */\n#define __itt_attr_mutex   2\n\n/**\n * @deprecated Legacy API\n * @brief Assign a name to a sync object using char or Unicode string\n * @param[in] addr    - pointer to the sync object. You should use a real pointer to your object\n *                      to make sure that the values don't clash with other object addresses\n * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will\n *                      be assumed to be of generic \"User Synchronization\" type\n * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned\n *                      to the object -- you can use the __itt_sync_rename call later to assign\n *                      the name\n * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the\n *                      exact semantics of how prepare/acquired/releasing calls work.\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nvoid ITTAPI __itt_sync_set_nameA(void *addr, const char    *objtype, const char    *objname, int attribute);\nvoid ITTAPI __itt_sync_set_nameW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_sync_set_name     __itt_sync_set_nameW\n#  define __itt_sync_set_name_ptr __itt_sync_set_nameW_ptr\n#else /* UNICODE */\n#  define __itt_sync_set_name     __itt_sync_set_nameA\n#  define __itt_sync_set_name_ptr __itt_sync_set_nameA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nvoid ITTAPI __itt_sync_set_name(void *addr, const char* objtype, const char* objname, int attribute);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char    *objtype, const char    *objname, int attribute))\nITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUBV(ITTAPI, void, sync_set_name,  (void *addr, const char    *objtype, const char    *objname, int attribute))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_set_nameA     ITTNOTIFY_VOID(sync_set_nameA)\n#define __itt_sync_set_nameA_ptr ITTNOTIFY_NAME(sync_set_nameA)\n#define __itt_sync_set_nameW     ITTNOTIFY_VOID(sync_set_nameW)\n#define __itt_sync_set_nameW_ptr ITTNOTIFY_NAME(sync_set_nameW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_set_name     ITTNOTIFY_VOID(sync_set_name)\n#define __itt_sync_set_name_ptr ITTNOTIFY_NAME(sync_set_name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_set_nameA(addr, objtype, objname, attribute)\n#define __itt_sync_set_nameA_ptr 0\n#define __itt_sync_set_nameW(addr, objtype, objname, attribute)\n#define __itt_sync_set_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_set_name(addr, objtype, objname, attribute)\n#define __itt_sync_set_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_sync_set_nameA_ptr 0\n#define __itt_sync_set_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_sync_set_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Assign a name and type to a sync object using char or Unicode string\n * @param[in] addr -      pointer to the sync object. You should use a real pointer to your object\n *                        to make sure that the values don't clash with other object addresses\n * @param[in] objtype -   null-terminated object type string. If NULL is passed, the object will\n *                        be assumed to be of generic \"User Synchronization\" type\n * @param[in] objname -   null-terminated object name string. If NULL, no name will be assigned\n *                        to the object -- you can use the __itt_sync_rename call later to assign\n *                        the name\n * @param[in] typelen, namelen -   a lenght of string for appropriate objtype and objname parameter\n * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the\n *                        exact semantics of how prepare/acquired/releasing calls work.\n * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nint LIBITTAPI __itt_notify_sync_nameA(void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute);\nint LIBITTAPI __itt_notify_sync_nameW(void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_notify_sync_name __itt_notify_sync_nameW\n#else\n#  define __itt_notify_sync_name __itt_notify_sync_nameA\n#endif\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nint LIBITTAPI __itt_notify_sync_name(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute))\nITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, int, notify_sync_name,  (void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_notify_sync_nameA     ITTNOTIFY_DATA(notify_sync_nameA)\n#define __itt_notify_sync_nameA_ptr ITTNOTIFY_NAME(notify_sync_nameA)\n#define __itt_notify_sync_nameW     ITTNOTIFY_DATA(notify_sync_nameW)\n#define __itt_notify_sync_nameW_ptr ITTNOTIFY_NAME(notify_sync_nameW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_notify_sync_name     ITTNOTIFY_DATA(notify_sync_name)\n#define __itt_notify_sync_name_ptr ITTNOTIFY_NAME(notify_sync_name)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_notify_sync_nameA(addr, objtype, typelen, objname, namelen, attribute)\n#define __itt_notify_sync_nameA_ptr 0\n#define __itt_notify_sync_nameW(addr, objtype, typelen, objname, namelen, attribute)\n#define __itt_notify_sync_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_notify_sync_name(addr, objtype, typelen, objname, namelen, attribute)\n#define __itt_notify_sync_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_notify_sync_nameA_ptr 0\n#define __itt_notify_sync_nameW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_notify_sync_name_ptr 0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Enter spin loop on user-defined sync object\n */\nvoid LIBITTAPI __itt_notify_sync_prepare(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *addr))\n#define __itt_notify_sync_prepare     ITTNOTIFY_VOID(notify_sync_prepare)\n#define __itt_notify_sync_prepare_ptr ITTNOTIFY_NAME(notify_sync_prepare)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_notify_sync_prepare(addr)\n#define __itt_notify_sync_prepare_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_notify_sync_prepare_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Quit spin loop without acquiring spin object\n */\nvoid LIBITTAPI __itt_notify_sync_cancel(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *addr))\n#define __itt_notify_sync_cancel     ITTNOTIFY_VOID(notify_sync_cancel)\n#define __itt_notify_sync_cancel_ptr ITTNOTIFY_NAME(notify_sync_cancel)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_notify_sync_cancel(addr)\n#define __itt_notify_sync_cancel_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_notify_sync_cancel_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Successful spin loop completion (sync object acquired)\n */\nvoid LIBITTAPI __itt_notify_sync_acquired(void *addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *addr))\n#define __itt_notify_sync_acquired     ITTNOTIFY_VOID(notify_sync_acquired)\n#define __itt_notify_sync_acquired_ptr ITTNOTIFY_NAME(notify_sync_acquired)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_notify_sync_acquired(addr)\n#define __itt_notify_sync_acquired_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_notify_sync_acquired_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Start sync object releasing code. Is called before the lock release call.\n */\nvoid LIBITTAPI __itt_notify_sync_releasing(void* addr);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *addr))\n#define __itt_notify_sync_releasing     ITTNOTIFY_VOID(notify_sync_releasing)\n#define __itt_notify_sync_releasing_ptr ITTNOTIFY_NAME(notify_sync_releasing)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_notify_sync_releasing(addr)\n#define __itt_notify_sync_releasing_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_notify_sync_releasing_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} legacy_sync group */\n\n#ifndef _ITTNOTIFY_H_\n/**\n * @defgroup legacy_events Events\n * @ingroup legacy\n * Events group\n * @{\n */\n\n/** @brief user event type */\ntypedef int __itt_event;\n\n/**\n * @brief Create an event notification\n * @note name or namelen being null/name and namelen not matching, user event feature not enabled\n * @return non-zero event identifier upon success and __itt_err otherwise\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_event LIBITTAPI __itt_event_createA(const char    *name, int namelen);\n__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_event_create     __itt_event_createW\n#  define __itt_event_create_ptr __itt_event_createW_ptr\n#else\n#  define __itt_event_create     __itt_event_createA\n#  define __itt_event_create_ptr __itt_event_createA_ptr\n#endif /* UNICODE */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen))\nITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char *name, int namelen))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA     ITTNOTIFY_DATA(event_createA)\n#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA)\n#define __itt_event_createW     ITTNOTIFY_DATA(event_createW)\n#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create      ITTNOTIFY_DATA(event_create)\n#define __itt_event_create_ptr  ITTNOTIFY_NAME(event_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA(name, namelen) (__itt_event)0\n#define __itt_event_createA_ptr 0\n#define __itt_event_createW(name, namelen) (__itt_event)0\n#define __itt_event_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create(name, namelen)  (__itt_event)0\n#define __itt_event_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_event_createA_ptr 0\n#define __itt_event_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_event_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an event occurrence.\n * @return __itt_err upon failure (invalid event id/user event feature not enabled)\n */\nint LIBITTAPI __itt_event_start(__itt_event event);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(LIBITTAPI, int, event_start, (__itt_event event))\n#define __itt_event_start     ITTNOTIFY_DATA(event_start)\n#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_event_start(event) (int)0\n#define __itt_event_start_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_event_start_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @brief Record an event end occurrence.\n * @note It is optional if events do not have durations.\n * @return __itt_err upon failure (invalid event id/user event feature not enabled)\n */\nint LIBITTAPI __itt_event_end(__itt_event event);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(LIBITTAPI, int, event_end, (__itt_event event))\n#define __itt_event_end     ITTNOTIFY_DATA(event_end)\n#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_event_end(event) (int)0\n#define __itt_event_end_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_event_end_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} legacy_events group */\n#endif /* _ITTNOTIFY_H_ */\n\n/**\n * @defgroup legacy_memory Memory Accesses\n * @ingroup legacy\n */\n\n/**\n * @deprecated Legacy API\n * @brief Inform the tool of memory accesses on reading\n */\nvoid LIBITTAPI __itt_memory_read(void *addr, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size))\n#define __itt_memory_read     ITTNOTIFY_VOID(memory_read)\n#define __itt_memory_read_ptr ITTNOTIFY_NAME(memory_read)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_memory_read(addr, size)\n#define __itt_memory_read_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_memory_read_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Inform the tool of memory accesses on writing\n */\nvoid LIBITTAPI __itt_memory_write(void *addr, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size))\n#define __itt_memory_write     ITTNOTIFY_VOID(memory_write)\n#define __itt_memory_write_ptr ITTNOTIFY_NAME(memory_write)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_memory_write(addr, size)\n#define __itt_memory_write_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_memory_write_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief Inform the tool of memory accesses on updating\n */\nvoid LIBITTAPI __itt_memory_update(void *address, size_t size);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size))\n#define __itt_memory_update     ITTNOTIFY_VOID(memory_update)\n#define __itt_memory_update_ptr ITTNOTIFY_NAME(memory_update)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_memory_update(addr, size)\n#define __itt_memory_update_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_memory_update_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} legacy_memory group */\n\n/**\n * @defgroup legacy_state Thread and Object States\n * @ingroup legacy\n */\n\n/** @brief state type */\ntypedef int __itt_state_t;\n\n/** @cond exclude_from_documentation */\ntypedef enum __itt_obj_state {\n    __itt_obj_state_err = 0,\n    __itt_obj_state_clr = 1,\n    __itt_obj_state_set = 2,\n    __itt_obj_state_use = 3\n} __itt_obj_state_t;\n\ntypedef enum __itt_thr_state {\n    __itt_thr_state_err = 0,\n    __itt_thr_state_clr = 1,\n    __itt_thr_state_set = 2\n} __itt_thr_state_t;\n\ntypedef enum __itt_obj_prop {\n    __itt_obj_prop_watch    = 1,\n    __itt_obj_prop_ignore   = 2,\n    __itt_obj_prop_sharable = 3\n} __itt_obj_prop_t;\n\ntypedef enum __itt_thr_prop {\n    __itt_thr_prop_quiet = 1\n} __itt_thr_prop_t;\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief managing thread and object states\n */\n__itt_state_t LIBITTAPI __itt_state_get(void);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_state_t, state_get, (void))\n#define __itt_state_get     ITTNOTIFY_DATA(state_get)\n#define __itt_state_get_ptr ITTNOTIFY_NAME(state_get)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_state_get(void) (__itt_state_t)0\n#define __itt_state_get_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_state_get_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief managing thread and object states\n */\n__itt_state_t LIBITTAPI __itt_state_set(__itt_state_t s);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_state_t, state_set, (__itt_state_t s))\n#define __itt_state_set     ITTNOTIFY_DATA(state_set)\n#define __itt_state_set_ptr ITTNOTIFY_NAME(state_set)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_state_set(s) (__itt_state_t)0\n#define __itt_state_set_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_state_set_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief managing thread and object modes\n */\n__itt_thr_state_t LIBITTAPI __itt_thr_mode_set(__itt_thr_prop_t p, __itt_thr_state_t s);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s))\n#define __itt_thr_mode_set     ITTNOTIFY_DATA(thr_mode_set)\n#define __itt_thr_mode_set_ptr ITTNOTIFY_NAME(thr_mode_set)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_thr_mode_set(p, s) (__itt_thr_state_t)0\n#define __itt_thr_mode_set_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_thr_mode_set_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/**\n * @deprecated Legacy API\n * @brief managing thread and object modes\n */\n__itt_obj_state_t LIBITTAPI __itt_obj_mode_set(__itt_obj_prop_t p, __itt_obj_state_t s);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUB(ITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s))\n#define __itt_obj_mode_set     ITTNOTIFY_DATA(obj_mode_set)\n#define __itt_obj_mode_set_ptr ITTNOTIFY_NAME(obj_mode_set)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_obj_mode_set(p, s) (__itt_obj_state_t)0\n#define __itt_obj_mode_set_ptr 0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_obj_mode_set_ptr 0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} legacy_state group */\n\n/**\n * @defgroup frames Frames\n * @ingroup legacy\n * Frames group\n * @{\n */\n/**\n * @brief opaque structure for frame identification\n */\ntypedef struct __itt_frame_t *__itt_frame;\n\n/**\n * @brief Create a global frame with given domain\n */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n__itt_frame ITTAPI __itt_frame_createA(const char    *domain);\n__itt_frame ITTAPI __itt_frame_createW(const wchar_t *domain);\n#if defined(UNICODE) || defined(_UNICODE)\n#  define __itt_frame_create     __itt_frame_createW\n#  define __itt_frame_create_ptr __itt_frame_createW_ptr\n#else /* UNICODE */\n#  define __itt_frame_create     __itt_frame_createA\n#  define __itt_frame_create_ptr __itt_frame_createA_ptr\n#endif /* UNICODE */\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n__itt_frame ITTAPI __itt_frame_create(const char *domain);\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\nITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char    *domain))\nITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain))\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\nITT_STUB(ITTAPI, __itt_frame, frame_create,  (const char *domain))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_frame_createA     ITTNOTIFY_DATA(frame_createA)\n#define __itt_frame_createA_ptr ITTNOTIFY_NAME(frame_createA)\n#define __itt_frame_createW     ITTNOTIFY_DATA(frame_createW)\n#define __itt_frame_createW_ptr ITTNOTIFY_NAME(frame_createW)\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_frame_create     ITTNOTIFY_DATA(frame_create)\n#define __itt_frame_create_ptr ITTNOTIFY_NAME(frame_create)\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_frame_createA(domain)\n#define __itt_frame_createA_ptr 0\n#define __itt_frame_createW(domain)\n#define __itt_frame_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_frame_create(domain)\n#define __itt_frame_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#define __itt_frame_createA_ptr 0\n#define __itt_frame_createW_ptr 0\n#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#define __itt_frame_create_ptr  0\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n\n/** @brief Record an frame begin occurrence. */\nvoid ITTAPI __itt_frame_begin(__itt_frame frame);\n/** @brief Record an frame end occurrence. */\nvoid ITTAPI __itt_frame_end  (__itt_frame frame);\n\n/** @cond exclude_from_documentation */\n#ifndef INTEL_NO_MACRO_BODY\n#ifndef INTEL_NO_ITTNOTIFY_API\nITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame))\nITT_STUBV(ITTAPI, void, frame_end,   (__itt_frame frame))\n#define __itt_frame_begin     ITTNOTIFY_VOID(frame_begin)\n#define __itt_frame_begin_ptr ITTNOTIFY_NAME(frame_begin)\n#define __itt_frame_end       ITTNOTIFY_VOID(frame_end)\n#define __itt_frame_end_ptr   ITTNOTIFY_NAME(frame_end)\n#else  /* INTEL_NO_ITTNOTIFY_API */\n#define __itt_frame_begin(frame)\n#define __itt_frame_begin_ptr 0\n#define __itt_frame_end(frame)\n#define __itt_frame_end_ptr   0\n#endif /* INTEL_NO_ITTNOTIFY_API */\n#else  /* INTEL_NO_MACRO_BODY */\n#define __itt_frame_begin_ptr 0\n#define __itt_frame_end_ptr   0\n#endif /* INTEL_NO_MACRO_BODY */\n/** @endcond */\n/** @} frames group */\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _LEGACY_ITTNOTIFY_H_ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/tools_api/prototype/ittnotify.h",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#ifndef _PROTOTYPE_ITTNOTIFY_H_\n#define _PROTOTYPE_ITTNOTIFY_H_\n\n/**\n * @file\n * @brief Prototype User API functions and types\n */\n\n/** @cond exclude_from_documentation */\n#ifndef ITT_OS_WIN\n#  define ITT_OS_WIN   1\n#endif /* ITT_OS_WIN */\n\n#ifndef ITT_OS_LINUX\n#  define ITT_OS_LINUX 2\n#endif /* ITT_OS_LINUX */\n\n#ifndef ITT_OS_MAC\n#  define ITT_OS_MAC   3\n#endif /* ITT_OS_MAC */\n\n#ifndef ITT_OS\n#  if defined WIN32 || defined _WIN32\n#    define ITT_OS ITT_OS_WIN\n#  elif defined( __APPLE__ ) && defined( __MACH__ )\n#    define ITT_OS ITT_OS_MAC\n#  else\n#    define ITT_OS ITT_OS_LINUX\n#  endif\n#endif /* ITT_OS */\n\n#ifndef ITT_PLATFORM_WIN\n#  define ITT_PLATFORM_WIN 1\n#endif /* ITT_PLATFORM_WIN */\n\n#ifndef ITT_PLATFORM_POSIX\n#  define ITT_PLATFORM_POSIX 2\n#endif /* ITT_PLATFORM_POSIX */\n\n#ifndef ITT_PLATFORM_MAC\n#  define ITT_PLATFORM_MAC 3\n#endif /* ITT_PLATFORM_MAC */\n\n#ifndef ITT_PLATFORM\n#  if ITT_OS==ITT_OS_WIN\n#    define ITT_PLATFORM ITT_PLATFORM_WIN\n#  elif ITT_OS==ITT_OS_MAC\n#    define ITT_PLATFORM ITT_PLATFORM_MAC\n#  else\n#    define ITT_PLATFORM ITT_PLATFORM_POSIX\n#  endif\n#endif /* ITT_PLATFORM */\n\n#if defined(_UNICODE) && !defined(UNICODE)\n#define UNICODE\n#endif\n\n#include <stddef.h>\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n#include <tchar.h>\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#include <stdint.h>\n#if defined(UNICODE) || defined(_UNICODE)\n#include <wchar.h>\n#endif /* UNICODE || _UNICODE */\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n\n#ifndef CDECL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define CDECL __cdecl\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__ \n#      define CDECL __attribute__ ((cdecl))\n#    else  /* _M_IX86 || __i386__ */\n#      define CDECL /* actual only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* CDECL */\n\n#ifndef STDCALL\n#  if ITT_PLATFORM==ITT_PLATFORM_WIN\n#    define STDCALL __stdcall\n#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#    if defined _M_IX86 || defined __i386__\n#      define STDCALL __attribute__ ((stdcall)) \n#    else  /* _M_IX86 || __i386__ */\n#      define STDCALL /* supported only on x86 platform */\n#    endif /* _M_IX86 || __i386__ */\n#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n#endif /* STDCALL */\n\n#define ITTAPI    CDECL\n#define LIBITTAPI CDECL\n\n/* TODO: Temporary for compatibility! */\n#define ITTAPI_CALL    CDECL\n#define LIBITTAPI_CALL CDECL\n\n#if ITT_PLATFORM==ITT_PLATFORM_WIN\n/* use __forceinline (VC++ specific) */\n#define ITT_INLINE           __forceinline\n#define ITT_INLINE_ATTRIBUTE /* nothing */\n#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/*\n * Generally, functions are not inlined unless optimization is specified.\n * For functions declared inline, this attribute inlines the function even\n * if no optimization level was specified.\n */\n#ifdef __STRICT_ANSI__\n#define ITT_INLINE           static\n#else  /* __STRICT_ANSI__ */\n#define ITT_INLINE           static inline\n#endif /* __STRICT_ANSI__ */\n#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused))\n#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */\n/** @endcond */\n\n/** @cond exclude_from_documentation */\n/* Helper macro for joining tokens */\n#define ITT_JOIN_AUX(p,n) p##n\n#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)\n\n#ifdef ITT_MAJOR\n#undef ITT_MAJOR\n#endif\n#ifdef ITT_MINOR\n#undef ITT_MINOR\n#endif\n#define ITT_MAJOR     3\n#define ITT_MINOR     0\n\n/* Standard versioning of a token with major and minor version numbers */\n#define ITT_VERSIONIZE(x)    \\\n    ITT_JOIN(x,              \\\n    ITT_JOIN(_,              \\\n    ITT_JOIN(ITT_MAJOR,      \\\n    ITT_JOIN(_, ITT_MINOR))))\n\n#ifndef INTEL_ITTNOTIFY_PREFIX\n#  define INTEL_ITTNOTIFY_PREFIX __itt_\n#endif /* INTEL_ITTNOTIFY_PREFIX */\n#ifndef INTEL_ITTNOTIFY_POSTFIX\n#  define INTEL_ITTNOTIFY_POSTFIX _ptr_\n#endif /* INTEL_ITTNOTIFY_POSTFIX */\n\n#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)\n#define ITTNOTIFY_NAME(n)     ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)))\n\n#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)\n#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)\n\n#define ITTNOTIFY_VOID_D0(n,d)       (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_VOID_D1(n,d,x)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_VOID_D2(n,d,x,y)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a)     (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n#define ITTNOTIFY_DATA_D0(n,d)       (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d)\n#define ITTNOTIFY_DATA_D1(n,d,x)     (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x)\n#define ITTNOTIFY_DATA_D2(n,d,x,y)   (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y)\n#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ?       0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z)\n#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a)     (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a)\n#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b)   (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b)\n#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c)\n\n#ifdef ITT_STUB\n#undef ITT_STUB\n#endif\n#ifdef ITT_STUBV\n#undef ITT_STUBV\n#endif\n#define ITT_STUBV(api,type,name,args)                             \\\n    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \\\n    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);\n#define ITT_STUB ITT_STUBV\n/** @endcond */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/**\n * @defgroup prototype Prototype API\n * @{\n * @}\n */\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _PROTOTYPE_ITTNOTIFY_H_ */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/version_string.ver",
    "content": "#define __TBB_VERSION_STRINGS(N) \"Empty\"\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win32-tbb-export.def",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\nEXPORTS\n\n#define __TBB_SYMBOL( sym ) sym\n#if _M_ARM\n#include \"winrt-tbb-export.lst\"\n#else\n#include \"win32-tbb-export.lst\"\n#endif\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win32-tbb-export.lst",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n#include \"tbb/tbb_config.h\"\n\n// Assembly-language support that is called directly by clients\n// __TBB_SYMBOL( __TBB_machine_cmpswp1 )\n// __TBB_SYMBOL( __TBB_machine_cmpswp2 )\n// __TBB_SYMBOL( __TBB_machine_cmpswp4 )\n__TBB_SYMBOL( __TBB_machine_cmpswp8 )\n// __TBB_SYMBOL( __TBB_machine_fetchadd1 )\n// __TBB_SYMBOL( __TBB_machine_fetchadd2 )\n// __TBB_SYMBOL( __TBB_machine_fetchadd4 )\n__TBB_SYMBOL( __TBB_machine_fetchadd8 )\n// __TBB_SYMBOL( __TBB_machine_fetchstore1 )\n// __TBB_SYMBOL( __TBB_machine_fetchstore2 )\n// __TBB_SYMBOL( __TBB_machine_fetchstore4 )\n__TBB_SYMBOL( __TBB_machine_fetchstore8 )\n__TBB_SYMBOL( __TBB_machine_store8 )\n__TBB_SYMBOL( __TBB_machine_load8 )\n__TBB_SYMBOL( __TBB_machine_trylockbyte )\n__TBB_SYMBOL( __TBB_machine_try_lock_elided )\n__TBB_SYMBOL( __TBB_machine_unlock_elided )\n__TBB_SYMBOL( __TBB_machine_is_in_transaction )\n\n// cache_aligned_allocator.cpp\n__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z )\n__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YAIXZ )\n__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z )\n__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ )\n\n// task.cpp v3\n__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBEAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QBEAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QBEAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z )\n__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QBEXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QBEXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QBEXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AAEXH@Z )\n__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AAEHXZ )\n__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QBE_NXZ )\n__TBB_SYMBOL( ?note_affinity@task@tbb@@UAEXG@Z )\n__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AAEXI@Z )\n__TBB_SYMBOL( ?self@task@tbb@@SAAAV12@XZ )\n__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QAEXAAVtask_list@2@@Z )\n__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAEXHI@Z )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAEXH@Z )\n__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QAEXXZ )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QAEX_N@Z )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ )\n__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IBEXAAVtask@4@H@Z )\n__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IBEXAAVdelegate_base@234@@Z )\n__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IBEXXZ )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n// task_v2.cpp\n__TBB_SYMBOL( ?destroy@task@tbb@@QAEXAAV12@@Z )\n#endif\n\n// exception handling support\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QBEAAVtask@3@I@Z )\n__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QBEXAAVtask@3@@Z )\n__TBB_SYMBOL( ?change_group@task@tbb@@QAEXAAVtask_group_context@2@@Z )\n__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QBE_NXZ )\n__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QAE_NXZ )\n__TBB_SYMBOL( ?reset@task_group_context@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?init@task_group_context@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QAEXXZ )\n__TBB_SYMBOL( ??1task_group_context@tbb@@QAE@XZ )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QAEXW4priority_t@2@@Z )\n__TBB_SYMBOL( ?priority@task_group_context@tbb@@QBE?AW4priority_t@2@XZ )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( ?name@captured_exception@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ?what@captured_exception@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ??1captured_exception@tbb@@UAE@XZ )\n__TBB_SYMBOL( ?move@captured_exception@tbb@@UAEPAV12@XZ )\n__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UAEXXZ )\n__TBB_SYMBOL( ?set@captured_exception@tbb@@QAEXPBD0@Z )\n__TBB_SYMBOL( ?clear@captured_exception@tbb@@QAEXXZ )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n// Symbols for exceptions thrown from TBB\n__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ )\n__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z )\n__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ?what@missing_wait@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ?what@improper_lock@tbb@@UBEPBDXZ )\n__TBB_SYMBOL( ?what@user_abort@tbb@@UBEPBDXZ )\n\n// tbb_misc.cpp\n__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPBDH00@Z )\n__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ )\n__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPBD@Z )\n__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z )\n__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPBDZZ )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n// tbb_main.cpp\n__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z )\n__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z )\n__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPAX@Z )\n__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z )\n__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z )\n#if __TBB_ITT_STRUCTURE_API\n__TBB_SYMBOL( ?itt_make_task_group_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_K12W4string_index@12@@Z )\n__TBB_SYMBOL( ?itt_metadata_str_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_KW4string_index@12@PBD@Z )\n__TBB_SYMBOL( ?itt_relation_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_KW4itt_relation@12@12@Z )\n__TBB_SYMBOL( ?itt_task_begin_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_K12W4string_index@12@@Z )\n__TBB_SYMBOL( ?itt_task_end_v7@internal@tbb@@YAXW4itt_domain_enum@12@@Z )\n#endif\n\n// pipeline.cpp\n__TBB_SYMBOL( ??0pipeline@tbb@@QAE@XZ )\n__TBB_SYMBOL( ??1filter@tbb@@UAE@XZ )\n__TBB_SYMBOL( ??1pipeline@tbb@@UAE@XZ )\n__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ )\n__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QAEXAAVfilter@2@@Z )\n__TBB_SYMBOL( ?clear@pipeline@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AAEXAAVtask@2@@Z )\n__TBB_SYMBOL( ?run@pipeline@tbb@@QAEXI@Z )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?run@pipeline@tbb@@QAEXIAAVtask_group_context@2@@Z )\n#endif\n__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IAEXXZ )\n\n// queuing_rw_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAEXAAV23@_N@Z )\n__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAE_NAAV23@_N@Z )\n\n// reader_writer_lock.cpp\n__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QAE_NXZ )\n__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QAE_NXZ )\n__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXXZ )\n\n#if !TBB_NO_LEGACY\n// spin_rw_mutex.cpp v2\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n#endif\n\n// spin_rw_mutex v3\n__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AAE_NXZ )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAE_NXZ )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ )\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXXZ ) \n__TBB_SYMBOL( ?internal_release@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@@Z ) \n__TBB_SYMBOL( ?internal_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@_N@Z ) \n__TBB_SYMBOL( ?internal_acquire_reader@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@_N@Z ) \n__TBB_SYMBOL( ?internal_upgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) \n__TBB_SYMBOL( ?internal_downgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) \n__TBB_SYMBOL( ?internal_try_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) \n\n// spin_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AAEXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAE_NAAV23@@Z )\n\n// mutex.cpp\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AAEXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AAE_NAAV23@@Z )\n__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AAEXXZ )\n\n// recursive_mutex.cpp\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAEXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAE_NAAV23@@Z )\n__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AAEXXZ )\n__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AAEXXZ )\n\n// queuing_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QAEXAAV23@@Z )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QAE_NAAV23@@Z )\n\n// critical_section.cpp\n__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QAEXXZ )\n\n#if !TBB_NO_LEGACY\n// concurrent_hash_map.cpp\n__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBE_NXZ )\n\n// concurrent_queue.cpp v2\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IAEXABV123@@Z )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IBEHXZ )\n__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IAE@I@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IAE@ABVconcurrent_queue_base@12@@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MAE@XZ )\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IAE@XZ )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IAEXPAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAE_NPAX@Z )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IAEXPBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAE_NPBX@Z )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAEXHI@Z )\n#endif\n\n// concurrent_queue v3\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IAE@XZ )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@I@Z )\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXABV123@@Z )\n__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IAE@I@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MAE@XZ )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAEXPAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAE_NPAX@Z )\n__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IAEXPBX@Z )\n__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IAEXPBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAE_NPBX@Z )\n__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IAE_NPBX@Z )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IBEHXZ )\n__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBE_NXZ )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAEXHI@Z )\n__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAEXXZ )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBEXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IAEXABV123@@Z )\n__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IAEXAAV123@@Z )\n\n#if !TBB_NO_LEGACY\n// concurrent_vector.cpp v2\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IBEIXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IAEXP6AXPAXI@Z_N@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IAEIIIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAEXIIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IAEPAXIAAI@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IAEXIII@Z )\n#endif\n\n// concurrent_vector v3\n__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IAE@XZ )\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBEIXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAEIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAEXIIP6AXPAXPBXI@Z1@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAEPAXIAAI@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAEXIII@Z )\n__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAEPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z )\n__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAEXAAV123@@Z )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBEXI@Z )\n__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAEXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z )\n\n// tbb_thread\n__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QAEXXZ )\n__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AAEXP6GIPAX@Z0@Z )\n__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPAXI@Z )\n__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ )\n__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ )\n__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z )\n__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z )\n__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ )\n\n// condition_variable\n__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win64-gcc-tbb-export.def",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n\n{\nglobal:\n\n#define __TBB_SYMBOL( sym ) sym;\n#include \"win64-gcc-tbb-export.lst\"\n\nlocal:\n\n/* TBB symbols */\n*3tbb*;\n*__TBB*;\n\n/* Intel Compiler (libirc) symbols */\n__intel_*;\n_intel_*;\nget_msg_buf;\nget_text_buf;\nmessage_catalog;\nprint_buf;\nirc__get_msg;\nirc__print;\n\n};\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win64-gcc-tbb-export.lst",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n\n/* cache_aligned_allocator.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEyyPv ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv )\n__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv )\n__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Ey ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev )\n\n/* task.cpp v3 */\n__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt )\n__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi )\n__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv )\n__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE )\n__TBB_SYMBOL( _ZN3tbb4task4selfEv )\n__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZTIN3tbb4taskE )\n__TBB_SYMBOL( _ZTSN3tbb4taskE )\n__TBB_SYMBOL( _ZTVN3tbb4taskE )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEiy )  // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi )\n__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev )\n__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev )\n__TBB_SYMBOL( _ZTIN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTSN3tbb10empty_taskE )\n__TBB_SYMBOL( _ZTVN3tbb10empty_taskE )\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEx )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE )\n__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv )\n__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n/* task_v2.cpp */\n__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ )\n#endif /* !TBB_NO_LEGACY */\n\n/* Exception handling in task scheduler */\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev )\n__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE )\n__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv )\n__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv )\n__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev )\n__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE )\n__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE )\n__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n/* Symbols for exceptions thrown from TBB */\n__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev )\n__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev )\n__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev )\n__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev )\n__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev )\n__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTSN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZTVN3tbb12missing_waitE )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev )\n__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev )\n__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev )\n__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev )\n__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTSN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZTVN3tbb13improper_lockE )\n__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev )\n__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev )\n__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv )\n__TBB_SYMBOL( _ZTIN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTSN3tbb10user_abortE )\n__TBB_SYMBOL( _ZTVN3tbb10user_abortE )\n\n/* tbb_misc.cpp */\n__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ )\n__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E )\n__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv )\n__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc )\n__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n/* tbb_main.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv )\n__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv )\n__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc )\n__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv )\n\n/* pipeline.cpp */\n__TBB_SYMBOL( _ZTIN3tbb6filterE )\n__TBB_SYMBOL( _ZTSN3tbb6filterE )\n__TBB_SYMBOL( _ZTVN3tbb6filterE )\n__TBB_SYMBOL( _ZN3tbb6filterD2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE )\n__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE )\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEy ) // MODIFIED LINUX ENTRY\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( _ZN3tbb8pipeline3runEyRNS_18task_group_contextE ) // MODIFIED LINUX ENTRY\n#endif\n__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv )\n__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv )\n__TBB_SYMBOL( _ZTIN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTSN3tbb8pipelineE )\n__TBB_SYMBOL( _ZTVN3tbb8pipelineE )\n__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev )\n__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev )\n__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv )\n\n/* queuing_rw_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b )\n\n/* reader_writer_lock.cpp */\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv )\n__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv )\n\n#if !TBB_NO_LEGACY\n/* spin_rw_mutex.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ )\n__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ )\n#endif\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE )\n__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE )\n\n/* spin_rw_mutex v3 */\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv )\n__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv )\n\n/* spin_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv )\n\n/* mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv )\n\n/* recursive_mutex.cpp */\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv )\n__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv )\n\n/* QueuingMutex.cpp */\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv )\n__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ )\n\n/* critical_section.cpp */\n__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv )\n\n#if !TBB_NO_LEGACY\n/* concurrent_hash_map */\n__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv )\n\n/* concurrent_queue.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityExy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Ey ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev )\n__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE )\n__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev )\n__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv )\n#endif\n\n/* concurrent_queue v3 */\n/* constructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Ey ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ey ) // MODIFIED LINUX ENTRY\n/* destructors */\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev )\n/* typeinfo */\n__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E )\n__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E )\n/* vtable */\n__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E )\n/* methods */\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityExy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv )\n__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ )\n__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ )\n\n\n#if !TBB_NO_LEGACY\n/* concurrent_vector.cpp v2 */\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_yPFvPvPKvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvyEb ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_ ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEyyPFvPvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEyyy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEyRy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEyyPFvPvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv )\n#endif\n\n/* concurrent_vector v3 */\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_yPFvPvPKvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_ ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEyyy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEyRy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEyPvPFvS2_yEPFvS2_PKvyE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ )\n__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEy ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev )\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEyyyPKvPFvPvyEPFvS4_S3_yE ) // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY\n\n/* tbb_thread */\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv )\n__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv )\n__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_ )  // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Ey )  // MODIFIED LINUX ENTRY\n__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev )\n__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE )\n\n/* condition_variable */\n__TBB_SYMBOL( _ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE )\n__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win64-tbb-export.def",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n; This file is organized with a section for each .cpp file.\n; Each of these sections is in alphabetical order.\n\nEXPORTS\n\n#define __TBB_SYMBOL( sym ) sym\n#include \"win64-tbb-export.lst\"\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/win64-tbb-export.lst",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n// This file is organized with a section for each .cpp file.\n// Each of these sections is in alphabetical order.\n\n#include \"tbb/tbb_config.h\"\n\n// Assembly-language support that is called directly by clients\n__TBB_SYMBOL( __TBB_machine_cmpswp1 )\n__TBB_SYMBOL( __TBB_machine_fetchadd1 )\n__TBB_SYMBOL( __TBB_machine_fetchstore1 )\n__TBB_SYMBOL( __TBB_machine_cmpswp2 )\n__TBB_SYMBOL( __TBB_machine_fetchadd2 )\n__TBB_SYMBOL( __TBB_machine_fetchstore2 )\n__TBB_SYMBOL( __TBB_machine_pause )\n__TBB_SYMBOL( __TBB_machine_try_lock_elided )\n__TBB_SYMBOL( __TBB_machine_unlock_elided )\n__TBB_SYMBOL( __TBB_machine_is_in_transaction )\n\n// cache_aligned_allocator.cpp\n__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPEAX_K0PEAX@Z )\n__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YA_KXZ )\n__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPEAX@Z )\n__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPEAX_K@Z )\n__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPEAX@Z )\n__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ )\n\n\n// task.cpp v3\n__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AEAAXI@Z )\n__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z )\n__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z )\n__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z )\n__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAEAVtask@3@_K@Z )\n__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAEAVtask@4@@Z )\n__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAEAVtask@3@@Z )\n__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AEAAXH@Z )\n__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AEAA_JXZ )\n__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QEBA_NXZ )\n__TBB_SYMBOL( ?note_affinity@task@tbb@@UEAAXG@Z )\n__TBB_SYMBOL( ?self@task@tbb@@SAAEAV12@XZ )\n__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QEAAXAEAVtask_list@2@@Z )\n__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QEAAXH_K@Z )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QEAAXH@Z )\n__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QEAAXXZ )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QEAAX_N@Z )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ )\n__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IEBAXAEAVtask@4@_J@Z )\n__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IEBAXAEAVdelegate_base@234@@Z )\n__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IEBAXXZ )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n// task_v2.cpp\n__TBB_SYMBOL( ?destroy@task@tbb@@QEAAXAEAV12@@Z )\n#endif\n\n// Exception handling in task scheduler\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z )\n__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z )\n__TBB_SYMBOL( ?change_group@task@tbb@@QEAAXAEAVtask_group_context@2@@Z )\n__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QEBA_NXZ )\n__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QEAA_NXZ )\n__TBB_SYMBOL( ?reset@task_group_context@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?init@task_group_context@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ??1task_group_context@tbb@@QEAA@XZ )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QEAAXW4priority_t@2@@Z )\n__TBB_SYMBOL( ?priority@task_group_context@tbb@@QEBA?AW4priority_t@2@XZ )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( ?name@captured_exception@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ?what@captured_exception@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ??1captured_exception@tbb@@UEAA@XZ )\n__TBB_SYMBOL( ?move@captured_exception@tbb@@UEAAPEAV12@XZ )\n__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UEAAXXZ )\n__TBB_SYMBOL( ?set@captured_exception@tbb@@QEAAXPEBD0@Z )\n__TBB_SYMBOL( ?clear@captured_exception@tbb@@QEAAXXZ )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n// Symbols for exceptions thrown from TBB\n__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ )\n__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z )\n__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ?what@missing_wait@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ?what@improper_lock@tbb@@UEBAPEBDXZ )\n__TBB_SYMBOL( ?what@user_abort@tbb@@UEBAPEBDXZ )\n\n// tbb_misc.cpp\n__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPEBDH00@Z )\n__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YA_KXZ )\n__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPEBD@Z )\n__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPEBDH00@ZP6AX0H00@Z@Z )\n__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPEBDZZ )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n// tbb_main.cpp\n__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPEAXPEBX@Z )\n__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPEAX0@Z )\n__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPEAX@Z )\n__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPEAXPEBX@Z )\n__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPEAXPEB_W@Z )\n#if __TBB_ITT_STRUCTURE_API\n__TBB_SYMBOL( ?itt_make_task_group_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_K12W4string_index@12@@Z )\n__TBB_SYMBOL( ?itt_metadata_str_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_KW4string_index@12@PEBD@Z )\n__TBB_SYMBOL( ?itt_relation_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_KW4itt_relation@12@12@Z )\n__TBB_SYMBOL( ?itt_task_begin_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_K12W4string_index@12@@Z )\n__TBB_SYMBOL( ?itt_task_end_v7@internal@tbb@@YAXW4itt_domain_enum@12@@Z )\n#endif\n\n// pipeline.cpp\n__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ )\n__TBB_SYMBOL( ??0pipeline@tbb@@QEAA@XZ )\n__TBB_SYMBOL( ??1filter@tbb@@UEAA@XZ )\n__TBB_SYMBOL( ??1pipeline@tbb@@UEAA@XZ )\n__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QEAAXAEAVfilter@2@@Z )\n__TBB_SYMBOL( ?clear@pipeline@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AEAAXAEAVtask@2@@Z )\n__TBB_SYMBOL( ?run@pipeline@tbb@@QEAAX_K@Z )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?run@pipeline@tbb@@QEAAX_KAEAVtask_group_context@2@@Z )\n#endif\n__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IEAAXXZ )\n\n// queuing_rw_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAAXAEAV23@_N@Z )\n__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NAEAV23@_N@Z )\n\n// reader_writer_lock.cpp\n__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QEAA_NXZ )\n__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QEAA_NXZ )\n__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXXZ )\n\n#if !TBB_NO_LEGACY\n// spin_rw_mutex.cpp v2\n__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPEAV12@@Z )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z )\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPEAV12@@Z )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPEAV12@@Z )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPEAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPEAV12@@Z )\n#endif\n\n// spin_rw_mutex v3\n__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AEAA_NXZ )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AEAA_NXZ )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ )\n\n// x86_rtm_rw_mutex.cpp\n__TBB_SYMBOL( ?internal_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@_N@Z )\n__TBB_SYMBOL( ?internal_acquire_reader@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@_N@Z )\n__TBB_SYMBOL( ?internal_upgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z )\n__TBB_SYMBOL( ?internal_downgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z )\n__TBB_SYMBOL( ?internal_release@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@@Z )\n__TBB_SYMBOL( ?internal_construct@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXXZ )\n\n// spin_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AEAAXAEAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AEAA_NAEAV23@@Z )\n\n// mutex.cpp\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AEAAXAEAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AEAA_NAEAV23@@Z )\n__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AEAAXXZ )\n\n// recursive_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AEAAXXZ )\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AEAAXAEAV23@@Z )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AEAA_NAEAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AEAAXXZ )\n\n// queuing_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QEAAXAEAV23@@Z )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QEAA_NAEAV23@@Z )\n\n//critical_section.cpp\n__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QEAAXXZ )\n\n#if !TBB_NO_LEGACY\n// concurrent_hash_map.cpp\n__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QEBA_NXZ )\n\n// concurrent_queue.cpp v2\n__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IEAA@_K@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IEAA@AEBVconcurrent_queue_base@12@@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MEAA@XZ )\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IEAA@XZ )\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IEAAXAEBV123@@Z )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IEAAXPEAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IEAA_NPEAX@Z )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IEAAXPEBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IEAA_NPEBX@Z )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IEAAX_J_K@Z )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IEBA_JXZ )\n#endif\n\n// concurrent_queue v3\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@_K@Z )\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@XZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXAEBV123@@Z )\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IEAA@_K@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MEAA@XZ )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IEAAXPEBX@Z )\n__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IEAAXPEBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEBX@Z )\n__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IEAA_NPEBX@Z )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IEAAXPEAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEAX@Z )\n__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IEBA_JXZ )\n__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IEBA_NXZ )\n__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IEAAX_J_K@Z )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IEBAXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IEAAXAEBV123@@Z )\n__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IEAAXAEAV123@@Z )\n\n#if !TBB_NO_LEGACY\n// concurrent_vector.cpp v2\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IEBA_KXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IEAAXP6AXPEAX_K@Z_N@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IEAA_K_K0P6AXPEAX0@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IEAAX_K0P6AXPEAX0@Z@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IEAAPEAX_KAEA_K@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IEAAX_K00@Z )\n#endif\n\n// concurrent_vector v3\n__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IEAA@XZ )\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IEBA_KXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IEAA_KP6AXPEAX_K@Z@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IEAAX_K0P6AXPEAXPEBX0@Z2@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KAEA_K@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00@Z )\n__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KPEAXP6AX10@ZP6AX1PEBX0@Z@Z )\n__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IEAAXAEAV123@@Z )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IEBAX_K@Z )\n__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00PEBXP6AXPEAX0@ZP6AX210@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z )\n\n// tbb_thread\n__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPEAX_K@Z )\n__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPEAX@Z )\n__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ )\n__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AEAAXP6AIPEAX@Z0@Z )\n__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QEAAXXZ )\n__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAEAVtbb_thread_v3@12@0@Z )\n__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ )\n__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXAEBVinterval_t@tick_count@2@@Z )\n__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ )\n\n// condition_variable\n__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAEATcondvar_impl_t@123@PEAVmutex@3@PEBVinterval_t@tick_count@3@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/winrt-tbb-export.lst",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\n#include \"tbb/tbb_config.h\"\n\n// cache_aligned_allocator.cpp\n__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z )\n__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YAIXZ )\n__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z )\n__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ )\n\n// task.cpp v3\n__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z )\n__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AAAXH@Z )\n__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AAAHXZ )\n__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QBA_NXZ )\n__TBB_SYMBOL( ?note_affinity@task@tbb@@UAAXG@Z )\n__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z )\n__TBB_SYMBOL( ?self@task@tbb@@SAAAV12@XZ )\n__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z )\n__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAAXHI@Z )\n__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAAXH@Z )\n__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QAAXXZ )\n#if __TBB_SCHEDULER_OBSERVER\n__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z )\n#endif /* __TBB_SCHEDULER_OBSERVER */\n\n#if __TBB_TASK_ARENA\n/* arena.cpp */\n__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ )\n__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IBAXAAVtask@4@H@Z )\n__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IBAXAAVdelegate_base@234@@Z )\n__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IBAXXZ )\n#endif /* __TBB_TASK_ARENA */\n\n#if !TBB_NO_LEGACY\n// task_v2.cpp\n__TBB_SYMBOL( ?destroy@task@tbb@@QAAXAAV12@@Z )\n#endif\n\n// exception handling support\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z )\n__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z )\n__TBB_SYMBOL( ?change_group@task@tbb@@QAAXAAVtask_group_context@2@@Z )\n__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ )\n__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QAA_NXZ )\n__TBB_SYMBOL( ?reset@task_group_context@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?init@task_group_context@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QAAXXZ )\n__TBB_SYMBOL( ??1task_group_context@tbb@@QAA@XZ )\n#if __TBB_TASK_PRIORITY\n__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QAAXW4priority_t@2@@Z )\n__TBB_SYMBOL( ?priority@task_group_context@tbb@@QBA?AW4priority_t@2@XZ )\n#endif /* __TBB_TASK_PRIORITY */\n__TBB_SYMBOL( ?name@captured_exception@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ?what@captured_exception@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ??1captured_exception@tbb@@UAA@XZ )\n__TBB_SYMBOL( ?move@captured_exception@tbb@@UAAPAV12@XZ )\n__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UAAXXZ )\n__TBB_SYMBOL( ?set@captured_exception@tbb@@QAAXPBD0@Z )\n__TBB_SYMBOL( ?clear@captured_exception@tbb@@QAAXXZ )\n#endif /* __TBB_TASK_GROUP_CONTEXT */\n\n// Symbols for exceptions thrown from TBB\n__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ )\n__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z )\n__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ?what@missing_wait@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ?what@improper_lock@tbb@@UBAPBDXZ )\n__TBB_SYMBOL( ?what@user_abort@tbb@@UBAPBDXZ )\n\n// tbb_misc.cpp\n__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPBDH00@Z )\n__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ )\n__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPBD@Z )\n__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z )\n__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPBDZZ )\n__TBB_SYMBOL( TBB_runtime_interface_version )\n\n// tbb_main.cpp\n__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z )\n__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z )\n__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPAX@Z )\n__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z )\n__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z )\n\n// pipeline.cpp\n__TBB_SYMBOL( ??0pipeline@tbb@@QAA@XZ )\n__TBB_SYMBOL( ??1filter@tbb@@UAA@XZ )\n__TBB_SYMBOL( ??1pipeline@tbb@@UAA@XZ )\n__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ )\n__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z )\n__TBB_SYMBOL( ?clear@pipeline@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z )\n__TBB_SYMBOL( ?run@pipeline@tbb@@QAAXI@Z )\n#if __TBB_TASK_GROUP_CONTEXT\n__TBB_SYMBOL( ?run@pipeline@tbb@@QAAXIAAVtask_group_context@2@@Z )\n#endif\n__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QAA?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QAA?AW4result_type@12@XZ )\n__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IAAXXZ )\n\n// queuing_rw_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z )\n__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z )\n\n// reader_writer_lock.cpp\n__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QAA_NXZ )\n__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QAA_NXZ )\n__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAAXAAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAAXAAV234@@Z )\n__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAAXXZ )\n\n#if !TBB_NO_LEGACY\n// spin_rw_mutex.cpp v2\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z )\n#endif\n\n// spin_rw_mutex v3\n__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ )\n__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ )\n__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ )\n__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ )\n\n// spin_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z )\n\n// mutex.cpp\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z )\n__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AAAXXZ )\n\n// recursive_mutex.cpp\n__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z )\n__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z )\n__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AAAXXZ )\n__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AAAXXZ )\n\n// queuing_mutex.cpp\n__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z )\n__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z )\n\n// critical_section.cpp\n__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QAAXXZ )\n\n#if !TBB_NO_LEGACY\n// concurrent_hash_map.cpp\n__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ )\n\n// concurrent_queue.cpp v2\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ )\n__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IAA@I@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MAA@XZ )\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z )\n#endif\n\n// concurrent_queue v3\n__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z )\n__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@I@Z )\n__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z )\n__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z )\n__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ )\n__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z )\n__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z )\n__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z )\n__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IAAXPBX@Z )\n__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z )\n__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IAA_NPBX@Z )\n__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ )\n__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBA_NXZ )\n__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z )\n__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ )\n__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IAAXABV123@@Z )\n__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IAAXAAV123@@Z )\n\n#if !TBB_NO_LEGACY\n// concurrent_vector.cpp v2\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z )\n#endif\n\n// concurrent_vector v3\n__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ )\n__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z )\n__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ )\n__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z )\n__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z )\n__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z )\n__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z )\n__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z )\n__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z )\n__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z )\n__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z )\n__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAAXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z )\n__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z )\n\n// tbb_thread\n__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QAAXXZ )\n__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z )\n__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPAXI@Z )\n__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPAX@Z )\n__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ )\n__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ )\n__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z )\n__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z )\n__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ )\n\n// condition_variable\n__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z )\n\n#undef __TBB_SYMBOL\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/x86_rtm_rw_mutex.cpp",
    "content": "/*\n    Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n\n    This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n    you can redistribute it and/or modify it under the terms of the GNU General Public License\n    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n    See  the GNU General Public License for more details.   You should have received a copy of\n    the  GNU General Public License along with Threading Building Blocks; if not, write to the\n    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n\n    As a special exception,  you may use this file  as part of a free software library without\n    restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n    functions from this file, or you compile this file and link it with other files to produce\n    an executable,  this file does not by itself cause the resulting executable to be covered\n    by the GNU General Public License. This exception does not however invalidate any other\n    reasons why the executable file might be covered by the GNU General Public License.\n*/\n\n#include \"tbb/tbb_config.h\"\n#if __TBB_TSX_AVAILABLE\n#include \"tbb/spin_rw_mutex.h\"\n#include \"tbb/tbb_machine.h\"\n#include \"itt_notify.h\"\n#include \"governor.h\"\n#include \"tbb/atomic.h\"\n\n// __TBB_RW_MUTEX_DELAY_TEST shifts the point where flags aborting speculation are\n// added to the read-set of the operation.  If 1, will add the test just before\n// the transaction is ended.\n#ifndef __TBB_RW_MUTEX_DELAY_TEST\n    #define __TBB_RW_MUTEX_DELAY_TEST 1\n#endif\n\n#if defined(_MSC_VER) && defined(_Wp64)\n    // Workaround for overzealous compiler warnings in /Wp64 mode\n    #pragma warning (disable: 4244)\n#endif\n\nnamespace tbb {\n\nnamespace interface8 {\nnamespace internal {\n\n// abort code for mutexes that detect a conflict with another thread.\n// value is hexadecimal\nenum {\n    speculation_transaction_aborted = 0x01,\n    speculation_can_retry           = 0x02,\n    speculation_memadd_conflict     = 0x04,\n    speculation_buffer_overflow     = 0x08,\n    speculation_breakpoint_hit      = 0x10,\n    speculation_nested_abort        = 0x20,\n    speculation_xabort_mask         = 0xFF000000,\n    speculation_xabort_shift        = 24,\n    speculation_retry               = speculation_transaction_aborted\n                                      | speculation_can_retry\n                                      | speculation_memadd_conflict\n};\n\n// maximum number of times to retry\nstatic const int retry_threshold_read = 10;\nstatic const int retry_threshold_write = 10;\n\n//! Release speculative mutex\nvoid x86_rtm_rw_mutex::internal_release(x86_rtm_rw_mutex::scoped_lock& s) {\n    switch(s.transaction_state) {\n    case RTM_transacting_writer:\n    case RTM_transacting_reader:\n        {\n            __TBB_ASSERT(__TBB_machine_is_in_transaction(), \"transaction_state && not speculating\");\n#if __TBB_RW_MUTEX_DELAY_TEST\n            if(s.transaction_state == RTM_transacting_reader) {\n                if(this->w_flag) __TBB_machine_transaction_conflict_abort();\n            } else {\n                if(this->state) __TBB_machine_transaction_conflict_abort();\n            }\n#endif\n            __TBB_machine_end_transaction();\n            s.my_scoped_lock.internal_set_mutex(NULL);\n        }\n        break;\n    case RTM_real_reader:\n        __TBB_ASSERT(!this->w_flag, \"w_flag set but read lock acquired\");\n        s.my_scoped_lock.release();\n        break;\n    case RTM_real_writer:\n        __TBB_ASSERT(this->w_flag, \"w_flag unset but write lock acquired\");\n        this->w_flag = false;\n        s.my_scoped_lock.release();\n        break;\n    case RTM_not_in_mutex:\n        __TBB_ASSERT(false, \"RTM_not_in_mutex, but in release\");\n    default:\n        __TBB_ASSERT(false, \"invalid transaction_state\");\n    }\n    s.transaction_state = RTM_not_in_mutex;\n}\n\n//! Acquire write lock on the given mutex.\nvoid x86_rtm_rw_mutex::internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate)\n{\n    __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, \"scoped_lock already in transaction\");\n    if(tbb::internal::governor::speculation_enabled()) {\n        int num_retries = 0;\n        unsigned int abort_code;\n        do {\n            tbb::internal::atomic_backoff backoff;\n            if(this->state) {\n                if(only_speculate) return;\n                do {\n                    backoff.pause();  // test the spin_rw_mutex (real readers or writers)\n                } while(this->state);\n            }\n            // _xbegin returns -1 on success or the abort code, so capture it\n            if(( abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) )\n            {\n                // started speculation\n#if !__TBB_RW_MUTEX_DELAY_TEST\n                if(this->state) {  // add spin_rw_mutex to read-set.\n                    // reader or writer grabbed the lock, so abort.\n                    __TBB_machine_transaction_conflict_abort();\n                }\n#endif\n                s.transaction_state = RTM_transacting_writer;\n                s.my_scoped_lock.internal_set_mutex(this);  // need mutex for release()\n                return;  // successfully started speculation\n            }\n            ++num_retries;\n        } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_write) );\n    }\n\n    if(only_speculate) return;              // should apply a real try_lock...\n    s.my_scoped_lock.acquire(*this, true);  // kill transactional writers\n    __TBB_ASSERT(!w_flag, \"After acquire for write, w_flag already true\");\n    w_flag = true;                          // kill transactional readers\n    s.transaction_state = RTM_real_writer;\n    return;\n}\n\n//! Acquire read lock on given mutex.\n//  only_speculate : true if we are doing a try_acquire.  If true and we fail to speculate, don't\n//     really acquire the lock, return and do a try_acquire on the contained spin_rw_mutex.  If\n//     the lock is already held by a writer, just return.\nvoid x86_rtm_rw_mutex::internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate) {\n    __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, \"scoped_lock already in transaction\");\n    if(tbb::internal::governor::speculation_enabled()) {\n        int num_retries = 0;\n        unsigned int abort_code;\n        do {\n            tbb::internal::atomic_backoff backoff;\n            // if in try_acquire, and lock is held as writer, don't attempt to speculate.\n            if(w_flag) {\n                if(only_speculate) return;\n                do {\n                    backoff.pause();  // test the spin_rw_mutex (real readers or writers)\n                } while(w_flag);\n            }\n            // _xbegin returns -1 on success or the abort code, so capture it\n            if((abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) )\n            {\n                // started speculation\n#if !__TBB_RW_MUTEX_DELAY_TEST\n                if(w_flag) {  // add w_flag to read-set.\n                    __TBB_machine_transaction_conflict_abort();  // writer grabbed the lock, so abort.\n                }\n#endif\n                s.transaction_state = RTM_transacting_reader;\n                s.my_scoped_lock.internal_set_mutex(this);  // need mutex for release()\n                return;  // successfully started speculation\n            }\n            // fallback path\n            // retry only if there is any hope of getting into a transaction soon\n            // Retry in the following cases (from Section 8.3.5 of Intel(R)\n            // Architecture Instruction Set Extensions Programming Reference):\n            // 1. abort caused by XABORT instruction (bit 0 of EAX register is set)\n            // 2. the transaction may succeed on a retry (bit 1 of EAX register is set)\n            // 3. if another logical processor conflicted with a memory address\n            //    that was part of the transaction that aborted (bit 2 of EAX register is set)\n            // That is, retry if (abort_code & 0x7) is non-zero\n            ++num_retries;\n        } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_read) );\n    }\n\n    if(only_speculate) return;\n    s.my_scoped_lock.acquire( *this, false );\n    s.transaction_state = RTM_real_reader;\n}\n\n//! Upgrade reader to become a writer.\n/** Returns whether the upgrade happened without releasing and re-acquiring the lock */\nbool x86_rtm_rw_mutex::internal_upgrade(x86_rtm_rw_mutex::scoped_lock& s)\n{\n    switch(s.transaction_state) {\n    case RTM_real_reader: {\n            s.transaction_state = RTM_real_writer;\n            bool no_release = s.my_scoped_lock.upgrade_to_writer();\n            __TBB_ASSERT(!w_flag, \"After upgrade_to_writer, w_flag already true\");\n            w_flag = true;\n            return no_release;\n        }\n    case RTM_transacting_reader:\n        s.transaction_state = RTM_transacting_writer;\n        // don't need to add w_flag to read_set even if __TBB_RW_MUTEX_DELAY_TEST\n        // because the this pointer (the spin_rw_mutex) will be sufficient on release.\n        return true;\n    default:\n        __TBB_ASSERT(false, \"Invalid state for upgrade\");\n        return false;\n    }\n}\n\n//! Downgrade writer to a reader.\nbool x86_rtm_rw_mutex::internal_downgrade(x86_rtm_rw_mutex::scoped_lock& s) {\n    switch(s.transaction_state) {\n    case RTM_real_writer:\n        s.transaction_state = RTM_real_reader;\n        __TBB_ASSERT(w_flag, \"Before downgrade_to_reader w_flag not true\");\n        w_flag = false;\n        return s.my_scoped_lock.downgrade_to_reader();\n    case RTM_transacting_writer:\n#if __TBB_RW_MUTEX_DELAY_TEST\n        if(this->state) {  // a reader or writer has acquired mutex for real.\n            __TBB_machine_transaction_conflict_abort();\n        }\n#endif\n        s.transaction_state = RTM_transacting_reader;\n        return true;\n    default:\n        __TBB_ASSERT(false, \"Invalid state for downgrade\");\n        return false;\n    }\n}\n\n//! Try to acquire write lock on the given mutex.\n//  There may be reader(s) which acquired the spin_rw_mutex, as well as possibly\n//  transactional reader(s).  If this is the case, the acquire will fail, and assigning\n//  w_flag will kill the transactors.  So we only assign w_flag if we have successfully\n//  acquired the lock.\nbool x86_rtm_rw_mutex::internal_try_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s)\n{\n    internal_acquire_writer(s, /*only_speculate=*/true);\n    if(s.transaction_state == RTM_transacting_writer) {\n        return true;\n    }\n    __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, \"Trying to acquire writer which is already allocated\");\n    // transacting write acquire failed.  try_acquire the real mutex\n    bool result = s.my_scoped_lock.try_acquire(*this, true);\n    if(result) {\n        // only shoot down readers if we're not transacting ourselves\n        __TBB_ASSERT(!w_flag, \"After try_acquire_writer, w_flag already true\");\n        w_flag = true;\n        s.transaction_state = RTM_real_writer;\n    }\n    return result;\n}\n\nvoid x86_rtm_rw_mutex::internal_construct() {\n    ITT_SYNC_CREATE(this, _T(\"tbb::x86_rtm_rw_mutex\"), _T(\"\"));\n}\n\n} // namespace internal\n} // namespace interface8\n} // namespace tbb\n\n#endif /* __TBB_TSX_AVAILABLE */\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbb/xbox360-tbb-export.def",
    "content": "; Copyright 2005-2014 Intel Corporation.  All Rights Reserved.\n;\n; This file is part of Threading Building Blocks. Threading Building Blocks is free software;\n; you can redistribute it and/or modify it under the terms of the GNU General Public License\n; version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is\n; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n; See  the GNU General Public License for more details.   You should have received a copy of\n; the  GNU General Public License along with Threading Building Blocks; if not, write to the\n; Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA\n;\n; As a special exception,  you may use this file  as part of a free software library without\n; restriction.  Specifically,  if other files instantiate templates  or use macros or inline\n; functions from this file, or you compile this file and link it with other files to produce\n; an executable,  this file does not by itself cause the resulting executable to be covered\n; by the GNU General Public License. This exception does not however invalidate any other\n; reasons why the executable file might be covered by the GNU General Public License.\n\nEXPORTS\n\n; Assembly-language support that is called directly by clients\n;__TBB_machine_cmpswp1\n;__TBB_machine_cmpswp2\n;__TBB_machine_cmpswp4\n;__TBB_machine_cmpswp8\n;__TBB_machine_fetchadd1\n;__TBB_machine_fetchadd2\n;__TBB_machine_fetchadd4\n;__TBB_machine_fetchadd8\n;__TBB_machine_fetchstore1\n;__TBB_machine_fetchstore2\n;__TBB_machine_fetchstore4\n;__TBB_machine_fetchstore8\n;__TBB_machine_store8\n;__TBB_machine_load8\n;__TBB_machine_trylockbyte\n\n; cache_aligned_allocator.cpp\n?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z @1\n?NFS_GetLineSize@internal@tbb@@YAIXZ @2\n?NFS_Free@internal@tbb@@YAXPAX@Z @3 \n?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z @4\n?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z @5\n?is_malloc_used_v3@internal@tbb@@YA_NXZ @6\n\n; task.cpp v3\n?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z @7\n?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z @8\n?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z @9\n?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z @10\n?destroy@task@tbb@@QAAXAAV12@@Z @11\n?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z @12\n?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z @13\n?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z @14\n?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z @15\n?internal_set_ref_count@task@tbb@@AAAXH@Z @16\n?is_owned_by_current_thread@task@tbb@@QBA_NXZ @17\n?note_affinity@task@tbb@@UAAXG@Z @18\n?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z @19\n?self@task@tbb@@SAAAV12@XZ @20\n?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z @21\n?default_num_threads@task_scheduler_init@tbb@@SAHXZ @22\n?initialize@task_scheduler_init@tbb@@QAAXHI@Z @23\n?initialize@task_scheduler_init@tbb@@QAAXH@Z @24\n?terminate@task_scheduler_init@tbb@@QAAXXZ @25\n?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z @26\n\n; exception handling support\n?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z @27\n?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z @28\n?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ @29\n?cancel_group_execution@task_group_context@tbb@@QAA_NXZ @30\n?reset@task_group_context@tbb@@QAAXXZ @31\n?init@task_group_context@tbb@@IAAXXZ @32\n??1task_group_context@tbb@@QAA@XZ @33\n?name@captured_exception@tbb@@UBAPBDXZ @34\n?what@captured_exception@tbb@@UBAPBDXZ @35   \n??1captured_exception@tbb@@UAA@XZ @36\n\n; tbb_misc.cpp\n?assertion_failure@tbb@@YAXPBDH00@Z @37\n?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ @38\n?handle_perror@internal@tbb@@YAXHPBD@Z @39\n?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z @40\n?runtime_warning@internal@tbb@@YAXPBDZZ @41\n\n; tbb_main.cpp\n?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z @42\n?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z @43\n\n; pipeline.cpp\n??0pipeline@tbb@@QAA@XZ @44\n??1filter@tbb@@UAA@XZ @45\n??1pipeline@tbb@@UAA@XZ @46   \n??_7pipeline@tbb@@6B@ @47\n?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z @48\n?clear@pipeline@tbb@@QAAXXZ @49\n?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z @50\n?run@pipeline@tbb@@QAAXI@Z @51\n\n; queuing_rw_mutex.cpp\n?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z @52\n?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @53\n?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ @54\n?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @55\n?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z @56\n\n#if !TBB_NO_LEGACY\n; spin_rw_mutex.cpp v2\n?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @57\n?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @58\n?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z @59\n?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z @60\n?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @61   \n?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z @62\n?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z @63\n?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @64\n?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z @65\n#endif\n\n; spin_rw_mutex v3\n?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ @66\n?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ @67\n?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @68\n?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @69\n?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @70\n?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ @71\n?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ @72\n?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @73\n\n; spin_mutex.cpp\n?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z @74\n?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ @75\n?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z @76\n\n; mutex.cpp\n?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z @77\n?internal_release@scoped_lock@mutex@tbb@@AAAXXZ @78\n?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z @79\n?internal_construct@mutex@tbb@@AAAXXZ @80\n?internal_destroy@mutex@tbb@@AAAXXZ @81\n\n; recursive_mutex.cpp\n?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z @82 \n?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ @83\n?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z @84\n?internal_construct@recursive_mutex@tbb@@AAAXXZ @85\n?internal_destroy@recursive_mutex@tbb@@AAAXXZ @86\n\n; queuing_mutex.cpp\n?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z @87\n?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ @88\n?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z @89\n\n; concurrent_hash_map.cpp\n?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ @90\n\n#if !TBB_NO_LEGACY\n; concurrent_queue.cpp v2\n?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ @91\n?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z @92\n?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ @93\n??0concurrent_queue_base@internal@tbb@@IAA@I@Z @94\n??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z @95\n??1concurrent_queue_base@internal@tbb@@MAA@XZ @96\n??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ @97\n?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z @98\n?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z @99\n?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z @100\n?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z @101\n?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z @102\n#endif\n\n; concurrent_queue v3\n??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ @103\n??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z @104\n?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ @105\n?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z @106\n??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z @107\n??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ @108\n?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z @109\n?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z @110\n?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z @111\n?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z @112\n?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ @113\n?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z @114\n?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ @115\n?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ @116\n\n#if !TBB_NO_LEGACY\n; concurrent_vector.cpp v2\n?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @117\n?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ @118\n?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z @119\n?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @120\n?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z @121\n?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z @122\n?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z @123\n?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z @124\n#endif\n\n; concurrent_vector v3\n??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ @125\n?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @126\n?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ @127\n?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z @128\n?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @129\n?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z @130\n?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z @131\n?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z @132\n?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z @133\n?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z @134\n?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z @135\n?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z @136\n\n; tbb_thread\n?join@tbb_thread_v3@internal@tbb@@QAAXXZ @137\n?detach@tbb_thread_v3@internal@tbb@@QAAXXZ @138\n?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z @139\n?allocate_closure_v3@internal@tbb@@YAPAXI@Z @140\n?free_closure_v3@internal@tbb@@YAXPAX@Z @141\n?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ @142\n?thread_yield_v3@internal@tbb@@YAXXZ @143\n?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z @144\n?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z @145\n?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ @146\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/tbbqueue.h",
    "content": "#pragma once\n\n#include <utility>\n\n#include \"tbb/concurrent_queue.h\"\n#include \"wrappers.h\"\n\ntemplate<typename T>\nstruct TbbQueueWrapper\n{\npublic:\n\ttypedef DummyToken producer_token_t;\n\ttypedef DummyToken consumer_token_t;\n\t\npublic:\n\ttemplate<typename U>\n\tinline bool enqueue(U&& item)\n\t{\n\t\tq.push(std::forward<U>(item));\n\t\treturn true;\t\t// assume successful allocation for the sake of the benchmarks\n\t}\n\t\n\tinline bool try_dequeue(T& item)\n\t{\n\t\treturn q.try_pop(item);\n\t}\n\t\n\t// Dummy token methods (not used)\n\tbool enqueue(producer_token_t const&, T const&) { return false; }\n\tbool try_enqueue(producer_token_t, T const&) { return false; }\n\tbool try_dequeue(consumer_token_t, T& item) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(It, size_t) { return false; }\n\ttemplate<typename It> bool enqueue_bulk(producer_token_t const&, It, size_t) { return false; }\n\ttemplate<typename It> size_t try_dequeue_bulk(It, size_t) { return 0; }\n\ttemplate<typename It> size_t try_dequeue_bulk(consumer_token_t, It, size_t) { return 0; }\n\t\nprivate:\n\ttbb::concurrent_queue<T> q;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/benchmarks/wrappers.h",
    "content": "#pragma once\n\nstruct DummyToken\n{\n\ttemplate<typename TQueue>\n\tDummyToken(TQueue const&)\n\t{\n\t}\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/blockingconcurrentqueue.h",
    "content": "// Provides an efficient blocking version of moodycamel::ConcurrentQueue.\n// ©2015-2016 Cameron Desrochers. Distributed under the terms of the simplified\n// BSD license, available at the top of concurrentqueue.h.\n// Uses Jeff Preshing's semaphore implementation (under the terms of its\n// separate zlib license, embedded below).\n\n#pragma once\n\n#include \"concurrentqueue.h\"\n#include <type_traits>\n#include <cerrno>\n#include <memory>\n#include <chrono>\n#include <ctime>\n\n#if defined(_WIN32)\n// Avoid including windows.h in a header; we only need a handful of\n// items, so we'll redeclare them here (this is relatively safe since\n// the API generally has to remain stable between Windows versions).\n// I know this is an ugly hack but it still beats polluting the global\n// namespace with thousands of generic names or adding a .cpp for nothing.\nextern \"C\" {\n\tstruct _SECURITY_ATTRIBUTES;\n\t__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);\n\t__declspec(dllimport) int __stdcall CloseHandle(void* hObject);\n\t__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);\n\t__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);\n}\n#elif defined(__MACH__)\n#include <mach/mach.h>\n#elif defined(__unix__)\n#include <semaphore.h>\n#endif\n\nnamespace moodycamel\n{\nnamespace details\n{\n\t// Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's\n\t// portable + lightweight semaphore implementations, originally from\n\t// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h\n\t// LICENSE:\n\t// Copyright (c) 2015 Jeff Preshing\n\t//\n\t// This software is provided 'as-is', without any express or implied\n\t// warranty. In no event will the authors be held liable for any damages\n\t// arising from the use of this software.\n\t//\n\t// Permission is granted to anyone to use this software for any purpose,\n\t// including commercial applications, and to alter it and redistribute it\n\t// freely, subject to the following restrictions:\n\t//\n\t// 1. The origin of this software must not be misrepresented; you must not\n\t//\tclaim that you wrote the original software. If you use this software\n\t//\tin a product, an acknowledgement in the product documentation would be\n\t//\tappreciated but is not required.\n\t// 2. Altered source versions must be plainly marked as such, and must not be\n\t//\tmisrepresented as being the original software.\n\t// 3. This notice may not be removed or altered from any source distribution.\n\tnamespace mpmc_sema\n\t{\n#if defined(_WIN32)\n\t\tclass Semaphore\n\t\t{\n\t\tprivate:\n\t\t\tvoid* m_hSema;\n\t\t\t\n\t\t\tSemaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\tSemaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\n\t\tpublic:\n\t\t\tSemaphore(int initialCount = 0)\n\t\t\t{\n\t\t\t\tassert(initialCount >= 0);\n\t\t\t\tconst long maxLong = 0x7fffffff;\n\t\t\t\tm_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);\n\t\t\t}\n\n\t\t\t~Semaphore()\n\t\t\t{\n\t\t\t\tCloseHandle(m_hSema);\n\t\t\t}\n\n\t\t\tvoid wait()\n\t\t\t{\n\t\t\t\tconst unsigned long infinite = 0xffffffff;\n\t\t\t\tWaitForSingleObject(m_hSema, infinite);\n\t\t\t}\n\t\t\t\n\t\t\tbool try_wait()\n\t\t\t{\n\t\t\t\tconst unsigned long RC_WAIT_TIMEOUT = 0x00000102;\n\t\t\t\treturn WaitForSingleObject(m_hSema, 0) != RC_WAIT_TIMEOUT;\n\t\t\t}\n\t\t\t\n\t\t\tbool timed_wait(std::uint64_t usecs)\n\t\t\t{\n\t\t\t\tconst unsigned long RC_WAIT_TIMEOUT = 0x00000102;\n\t\t\t\treturn WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) != RC_WAIT_TIMEOUT;\n\t\t\t}\n\n\t\t\tvoid signal(int count = 1)\n\t\t\t{\n\t\t\t\tReleaseSemaphore(m_hSema, count, nullptr);\n\t\t\t}\n\t\t};\n#elif defined(__MACH__)\n\t\t//---------------------------------------------------------\n\t\t// Semaphore (Apple iOS and OSX)\n\t\t// Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html\n\t\t//---------------------------------------------------------\n\t\tclass Semaphore\n\t\t{\n\t\tprivate:\n\t\t\tsemaphore_t m_sema;\n\n\t\t\tSemaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\tSemaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\n\t\tpublic:\n\t\t\tSemaphore(int initialCount = 0)\n\t\t\t{\n\t\t\t\tassert(initialCount >= 0);\n\t\t\t\tsemaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);\n\t\t\t}\n\n\t\t\t~Semaphore()\n\t\t\t{\n\t\t\t\tsemaphore_destroy(mach_task_self(), m_sema);\n\t\t\t}\n\n\t\t\tvoid wait()\n\t\t\t{\n\t\t\t\tsemaphore_wait(m_sema);\n\t\t\t}\n\t\t\t\n\t\t\tbool try_wait()\n\t\t\t{\n\t\t\t\treturn timed_wait(0);\n\t\t\t}\n\t\t\t\n\t\t\tbool timed_wait(std::uint64_t timeout_usecs)\n\t\t\t{\n\t\t\t\tmach_timespec_t ts;\n\t\t\t\tts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);\n\t\t\t\tts.tv_nsec = (timeout_usecs % 1000000) * 1000;\n\n\t\t\t\t// added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html\n\t\t\t\tkern_return_t rc = semaphore_timedwait(m_sema, ts);\n\n\t\t\t\treturn rc != KERN_OPERATION_TIMED_OUT && rc != KERN_ABORTED;\n\t\t\t}\n\n\t\t\tvoid signal()\n\t\t\t{\n\t\t\t\tsemaphore_signal(m_sema);\n\t\t\t}\n\n\t\t\tvoid signal(int count)\n\t\t\t{\n\t\t\t\twhile (count-- > 0)\n\t\t\t\t{\n\t\t\t\t\tsemaphore_signal(m_sema);\n\t\t\t\t}\n\t\t\t}\n\t\t};\n#elif defined(__unix__)\n\t\t//---------------------------------------------------------\n\t\t// Semaphore (POSIX, Linux)\n\t\t//---------------------------------------------------------\n\t\tclass Semaphore\n\t\t{\n\t\tprivate:\n\t\t\tsem_t m_sema;\n\n\t\t\tSemaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\tSemaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;\n\n\t\tpublic:\n\t\t\tSemaphore(int initialCount = 0)\n\t\t\t{\n\t\t\t\tassert(initialCount >= 0);\n\t\t\t\tsem_init(&m_sema, 0, initialCount);\n\t\t\t}\n\n\t\t\t~Semaphore()\n\t\t\t{\n\t\t\t\tsem_destroy(&m_sema);\n\t\t\t}\n\n\t\t\tvoid wait()\n\t\t\t{\n\t\t\t\t// http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error\n\t\t\t\tint rc;\n\t\t\t\tdo {\n\t\t\t\t\trc = sem_wait(&m_sema);\n\t\t\t\t} while (rc == -1 && errno == EINTR);\n\t\t\t}\n\n\t\t\tbool try_wait()\n\t\t\t{\n\t\t\t\tint rc;\n\t\t\t\tdo {\n\t\t\t\t\trc = sem_trywait(&m_sema);\n\t\t\t\t} while (rc == -1 && errno == EINTR);\n\t\t\t\treturn !(rc == -1 && errno == EAGAIN);\n\t\t\t}\n\n\t\t\tbool timed_wait(std::uint64_t usecs)\n\t\t\t{\n\t\t\t\tstruct timespec ts;\n\t\t\t\tconst int usecs_in_1_sec = 1000000;\n\t\t\t\tconst int nsecs_in_1_sec = 1000000000;\n\t\t\t\tclock_gettime(CLOCK_REALTIME, &ts);\n\t\t\t\tts.tv_sec += usecs / usecs_in_1_sec;\n\t\t\t\tts.tv_nsec += (usecs % usecs_in_1_sec) * 1000;\n\t\t\t\t// sem_timedwait bombs if you have more than 1e9 in tv_nsec\n\t\t\t\t// so we have to clean things up before passing it in\n\t\t\t\tif (ts.tv_nsec >= nsecs_in_1_sec) {\n\t\t\t\t\tts.tv_nsec -= nsecs_in_1_sec;\n\t\t\t\t\t++ts.tv_sec;\n\t\t\t\t}\n\n\t\t\t\tint rc;\n\t\t\t\tdo {\n\t\t\t\t\trc = sem_timedwait(&m_sema, &ts);\n\t\t\t\t} while (rc == -1 && errno == EINTR);\n\t\t\t\treturn !(rc == -1 && errno == ETIMEDOUT);\n\t\t\t}\n\n\t\t\tvoid signal()\n\t\t\t{\n\t\t\t\tsem_post(&m_sema);\n\t\t\t}\n\n\t\t\tvoid signal(int count)\n\t\t\t{\n\t\t\t\twhile (count-- > 0)\n\t\t\t\t{\n\t\t\t\t\tsem_post(&m_sema);\n\t\t\t\t}\n\t\t\t}\n\t\t};\n#else\n#error Unsupported platform! (No semaphore wrapper available)\n#endif\n\n\t\t//---------------------------------------------------------\n\t\t// LightweightSemaphore\n\t\t//---------------------------------------------------------\n\t\tclass LightweightSemaphore\n\t\t{\n\t\tpublic:\n\t\t\ttypedef std::make_signed<std::size_t>::type ssize_t;\n\n\t\tprivate:\n\t\t\tstd::atomic<ssize_t> m_count;\n\t\t\tSemaphore m_sema;\n\n\t\t\tbool waitWithPartialSpinning(std::int64_t timeout_usecs = -1)\n\t\t\t{\n\t\t\t\tssize_t oldCount;\n\t\t\t\t// Is there a better way to set the initial spin count?\n\t\t\t\t// If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,\n\t\t\t\t// as threads start hitting the kernel semaphore.\n\t\t\t\tint spin = 10000;\n\t\t\t\twhile (--spin >= 0)\n\t\t\t\t{\n\t\t\t\t\toldCount = m_count.load(std::memory_order_relaxed);\n\t\t\t\t\tif ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed))\n\t\t\t\t\t\treturn true;\n\t\t\t\t\tstd::atomic_signal_fence(std::memory_order_acquire);\t // Prevent the compiler from collapsing the loop.\n\t\t\t\t}\n\t\t\t\toldCount = m_count.fetch_sub(1, std::memory_order_acquire);\n\t\t\t\tif (oldCount > 0)\n\t\t\t\t\treturn true;\n\t\t\t\tif (timeout_usecs < 0)\n\t\t\t\t{\n\t\t\t\t\tm_sema.wait();\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\tif (m_sema.timed_wait((std::uint64_t)timeout_usecs))\n\t\t\t\t\treturn true;\n\t\t\t\t// At this point, we've timed out waiting for the semaphore, but the\n\t\t\t\t// count is still decremented indicating we may still be waiting on\n\t\t\t\t// it. So we have to re-adjust the count, but only if the semaphore\n\t\t\t\t// wasn't signaled enough times for us too since then. If it was, we\n\t\t\t\t// need to release the semaphore too.\n\t\t\t\twhile (true)\n\t\t\t\t{\n\t\t\t\t\toldCount = m_count.load(std::memory_order_acquire);\n\t\t\t\t\tif (oldCount >= 0 && m_sema.try_wait())\n\t\t\t\t\t\treturn true;\n\t\t\t\t\tif (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed))\n\t\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tssize_t waitManyWithPartialSpinning(ssize_t max, std::int64_t timeout_usecs = -1)\n\t\t\t{\n\t\t\t\tassert(max > 0);\n\t\t\t\tssize_t oldCount;\n\t\t\t\tint spin = 10000;\n\t\t\t\twhile (--spin >= 0)\n\t\t\t\t{\n\t\t\t\t\toldCount = m_count.load(std::memory_order_relaxed);\n\t\t\t\t\tif (oldCount > 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tssize_t newCount = oldCount > max ? oldCount - max : 0;\n\t\t\t\t\t\tif (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed))\n\t\t\t\t\t\t\treturn oldCount - newCount;\n\t\t\t\t\t}\n\t\t\t\t\tstd::atomic_signal_fence(std::memory_order_acquire);\n\t\t\t\t}\n\t\t\t\toldCount = m_count.fetch_sub(1, std::memory_order_acquire);\n\t\t\t\tif (oldCount <= 0)\n\t\t\t\t{\n\t\t\t\t\tif (timeout_usecs < 0)\n\t\t\t\t\t\tm_sema.wait();\n\t\t\t\t\telse if (!m_sema.timed_wait((std::uint64_t)timeout_usecs))\n\t\t\t\t\t{\n\t\t\t\t\t\twhile (true)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\toldCount = m_count.load(std::memory_order_acquire);\n\t\t\t\t\t\t\tif (oldCount >= 0 && m_sema.try_wait())\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tif (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed))\n\t\t\t\t\t\t\t\treturn 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (max > 1)\n\t\t\t\t\treturn 1 + tryWaitMany(max - 1);\n\t\t\t\treturn 1;\n\t\t\t}\n\n\t\tpublic:\n\t\t\tLightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)\n\t\t\t{\n\t\t\t\tassert(initialCount >= 0);\n\t\t\t}\n\n\t\t\tbool tryWait()\n\t\t\t{\n\t\t\t\tssize_t oldCount = m_count.load(std::memory_order_relaxed);\n\t\t\t\twhile (oldCount > 0)\n\t\t\t\t{\n\t\t\t\t\tif (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed))\n\t\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tvoid wait()\n\t\t\t{\n\t\t\t\tif (!tryWait())\n\t\t\t\t\twaitWithPartialSpinning();\n\t\t\t}\n\n\t\t\tbool wait(std::int64_t timeout_usecs)\n\t\t\t{\n\t\t\t\treturn tryWait() || waitWithPartialSpinning(timeout_usecs);\n\t\t\t}\n\n\t\t\t// Acquires between 0 and (greedily) max, inclusive\n\t\t\tssize_t tryWaitMany(ssize_t max)\n\t\t\t{\n\t\t\t\tassert(max >= 0);\n\t\t\t\tssize_t oldCount = m_count.load(std::memory_order_relaxed);\n\t\t\t\twhile (oldCount > 0)\n\t\t\t\t{\n\t\t\t\t\tssize_t newCount = oldCount > max ? oldCount - max : 0;\n\t\t\t\t\tif (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed))\n\t\t\t\t\t\treturn oldCount - newCount;\n\t\t\t\t}\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\t// Acquires at least one, and (greedily) at most max\n\t\t\tssize_t waitMany(ssize_t max, std::int64_t timeout_usecs)\n\t\t\t{\n\t\t\t\tassert(max >= 0);\n\t\t\t\tssize_t result = tryWaitMany(max);\n\t\t\t\tif (result == 0 && max > 0)\n\t\t\t\t\tresult = waitManyWithPartialSpinning(max, timeout_usecs);\n\t\t\t\treturn result;\n\t\t\t}\n\t\t\t\n\t\t\tssize_t waitMany(ssize_t max)\n\t\t\t{\n\t\t\t\tssize_t result = waitMany(max, -1);\n\t\t\t\tassert(result > 0);\n\t\t\t\treturn result;\n\t\t\t}\n\n\t\t\tvoid signal(ssize_t count = 1)\n\t\t\t{\n\t\t\t\tassert(count >= 0);\n\t\t\t\tssize_t oldCount = m_count.fetch_add(count, std::memory_order_release);\n\t\t\t\tssize_t toRelease = -oldCount < count ? -oldCount : count;\n\t\t\t\tif (toRelease > 0)\n\t\t\t\t{\n\t\t\t\t\tm_sema.signal((int)toRelease);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tssize_t availableApprox() const\n\t\t\t{\n\t\t\t\tssize_t count = m_count.load(std::memory_order_relaxed);\n\t\t\t\treturn count > 0 ? count : 0;\n\t\t\t}\n\t\t};\n\t}\t// end namespace mpmc_sema\n}\t// end namespace details\n\n\n// This is a blocking version of the queue. It has an almost identical interface to\n// the normal non-blocking version, with the addition of various wait_dequeue() methods\n// and the removal of producer-specific dequeue methods.\ntemplate<typename T, typename Traits = ConcurrentQueueDefaultTraits>\nclass BlockingConcurrentQueue\n{\nprivate:\n\ttypedef ::moodycamel::ConcurrentQueue<T, Traits> ConcurrentQueue;\n\ttypedef details::mpmc_sema::LightweightSemaphore LightweightSemaphore;\n\npublic:\n\ttypedef typename ConcurrentQueue::producer_token_t producer_token_t;\n\ttypedef typename ConcurrentQueue::consumer_token_t consumer_token_t;\n\t\n\ttypedef typename ConcurrentQueue::index_t index_t;\n\ttypedef typename ConcurrentQueue::size_t size_t;\n\ttypedef typename std::make_signed<size_t>::type ssize_t;\n\t\n\tstatic const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE;\n\tstatic const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD;\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE;\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE;\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE;\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE;\n\tstatic const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE;\n\t\npublic:\n\t// Creates a queue with at least `capacity` element slots; note that the\n\t// actual number of elements that can be inserted without additional memory\n\t// allocation depends on the number of producers and the block size (e.g. if\n\t// the block size is equal to `capacity`, only a single block will be allocated\n\t// up-front, which means only a single producer will be able to enqueue elements\n\t// without an extra allocation -- blocks aren't shared between producers).\n\t// This method is not thread safe -- it is up to the user to ensure that the\n\t// queue is fully constructed before it starts being used by other threads (this\n\t// includes making the memory effects of construction visible, possibly with a\n\t// memory barrier).\n\texplicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)\n\t\t: inner(capacity), sema(create<LightweightSemaphore>(), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)\n\t{\n\t\tassert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && \"BlockingConcurrentQueue must have ConcurrentQueue as its first member\");\n\t\tif (!sema) {\n\t\t\tMOODYCAMEL_THROW(std::bad_alloc());\n\t\t}\n\t}\n\t\n\tBlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)\n\t\t: inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create<LightweightSemaphore>(), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)\n\t{\n\t\tassert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && \"BlockingConcurrentQueue must have ConcurrentQueue as its first member\");\n\t\tif (!sema) {\n\t\t\tMOODYCAMEL_THROW(std::bad_alloc());\n\t\t}\n\t}\n\t\n\t// Disable copying and copy assignment\n\tBlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\tBlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\n\t// Moving is supported, but note that it is *not* a thread-safe operation.\n\t// Nobody can use the queue while it's being moved, and the memory effects\n\t// of that move must be propagated to other threads before they can use it.\n\t// Note: When a queue is moved, its tokens are still valid but can only be\n\t// used with the destination queue (i.e. semantically they are moved along\n\t// with the queue itself).\n\tBlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t\t: inner(std::move(other.inner)), sema(std::move(other.sema))\n\t{ }\n\t\n\tinline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\treturn swap_internal(other);\n\t}\n\t\n\t// Swaps this queue's state with the other's. Not thread-safe.\n\t// Swapping two queues does not invalidate their tokens, however\n\t// the tokens that were created for one queue must be used with\n\t// only the swapped queue (i.e. the tokens are tied to the\n\t// queue's movable state, not the object itself).\n\tinline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap_internal(other);\n\t}\n\t\nprivate:\n\tBlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other)\n\t{\n\t\tif (this == &other) {\n\t\t\treturn *this;\n\t\t}\n\t\t\n\t\tinner.swap(other.inner);\n\t\tsema.swap(other.sema);\n\t\treturn *this;\n\t}\n\t\npublic:\n\t// Enqueues a single item (by copying it).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T const& item)\n\t{\n\t\tif ((details::likely)(inner.enqueue(item))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T&& item)\n\t{\n\t\tif ((details::likely)(inner.enqueue(std::move(item)))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\tif ((details::likely)(inner.enqueue(token, item))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\tif ((details::likely)(inner.enqueue(token, std::move(item)))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues several items.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tinline bool enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tif ((details::likely)(inner.enqueue_bulk(std::forward<It>(itemFirst), count))) {\n\t\t\tsema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails\n\t// (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tinline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\tif ((details::likely)(inner.enqueue_bulk(token, std::forward<It>(itemFirst), count))) {\n\t\t\tsema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by copying it).\n\t// Does not allocate memory. Fails if not enough room to enqueue (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T const& item)\n\t{\n\t\tif (inner.try_enqueue(item)) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T&& item)\n\t{\n\t\tif (inner.try_enqueue(std::move(item))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\tif (inner.try_enqueue(token, item)) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\tif (inner.try_enqueue(token, std::move(item))) {\n\t\t\tsema->signal();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues several items.\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tinline bool try_enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tif (inner.try_enqueue_bulk(std::forward<It>(itemFirst), count)) {\n\t\t\tsema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tinline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\tif (inner.try_enqueue_bulk(token, std::forward<It>(itemFirst), count)) {\n\t\t\tsema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t\n\t// Attempts to dequeue from the queue.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool try_dequeue(U& item)\n\t{\n\t\tif (sema->tryWait()) {\n\t\t\twhile (!inner.try_dequeue(item)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue from the queue using an explicit consumer token.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool try_dequeue(consumer_token_t& token, U& item)\n\t{\n\t\tif (sema->tryWait()) {\n\t\t\twhile (!inner.try_dequeue(token, item)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t try_dequeue_bulk(It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t\n\t\n\t// Blocks the current thread until there's something to dequeue, then\n\t// dequeues it.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline void wait_dequeue(U& item)\n\t{\n\t\tsema->wait();\n\t\twhile (!inner.try_dequeue(item)) {\n\t\t\tcontinue;\n\t\t}\n\t}\n\n\t// Blocks the current thread until either there's something to dequeue\n\t// or the timeout (specified in microseconds) expires. Returns false\n\t// without setting `item` if the timeout expires, otherwise assigns\n\t// to `item` and returns true.\n\t// Using a negative timeout indicates an indefinite timeout,\n\t// and is thus functionally equivalent to calling wait_dequeue.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool wait_dequeue_timed(U& item, std::int64_t timeout_usecs)\n\t{\n\t\tif (!sema->wait(timeout_usecs)) {\n\t\t\treturn false;\n\t\t}\n\t\twhile (!inner.try_dequeue(item)) {\n\t\t\tcontinue;\n\t\t}\n\t\treturn true;\n\t}\n    \n    // Blocks the current thread until either there's something to dequeue\n\t// or the timeout expires. Returns false without setting `item` if the\n    // timeout expires, otherwise assigns to `item` and returns true.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U, typename Rep, typename Period>\n\tinline bool wait_dequeue_timed(U& item, std::chrono::duration<Rep, Period> const& timeout)\n    {\n        return wait_dequeue_timed(item, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());\n    }\n\t\n\t// Blocks the current thread until there's something to dequeue, then\n\t// dequeues it using an explicit consumer token.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline void wait_dequeue(consumer_token_t& token, U& item)\n\t{\n\t\tsema->wait();\n\t\twhile (!inner.try_dequeue(token, item)) {\n\t\t\tcontinue;\n\t\t}\n\t}\n\t\n\t// Blocks the current thread until either there's something to dequeue\n\t// or the timeout (specified in microseconds) expires. Returns false\n\t// without setting `item` if the timeout expires, otherwise assigns\n\t// to `item` and returns true.\n\t// Using a negative timeout indicates an indefinite timeout,\n\t// and is thus functionally equivalent to calling wait_dequeue.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::int64_t timeout_usecs)\n\t{\n\t\tif (!sema->wait(timeout_usecs)) {\n\t\t\treturn false;\n\t\t}\n\t\twhile (!inner.try_dequeue(token, item)) {\n\t\t\tcontinue;\n\t\t}\n\t\treturn true;\n\t}\n    \n    // Blocks the current thread until either there's something to dequeue\n\t// or the timeout expires. Returns false without setting `item` if the\n    // timeout expires, otherwise assigns to `item` and returns true.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U, typename Rep, typename Period>\n\tinline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::chrono::duration<Rep, Period> const& timeout)\n    {\n        return wait_dequeue_timed(token, item, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());\n    }\n\t\n\t// Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued, which will\n\t// always be at least one (this method blocks until the queue\n\t// is non-empty) and at most max.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t wait_dequeue_bulk(It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued, which can\n\t// be 0 if the timeout expires while waiting for elements,\n\t// and at most max.\n\t// Using a negative timeout indicates an indefinite timeout,\n\t// and is thus functionally equivalent to calling wait_dequeue_bulk.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::int64_t timeout_usecs)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n    \n    // Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued, which can\n\t// be 0 if the timeout expires while waiting for elements,\n\t// and at most max.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It, typename Rep, typename Period>\n\tinline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::chrono::duration<Rep, Period> const& timeout)\n    {\n        return wait_dequeue_bulk_timed<It&>(itemFirst, max, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());\n    }\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued, which will\n\t// always be at least one (this method blocks until the queue\n\t// is non-empty) and at most max.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued, which can\n\t// be 0 if the timeout expires while waiting for elements,\n\t// and at most max.\n\t// Using a negative timeout indicates an indefinite timeout,\n\t// and is thus functionally equivalent to calling wait_dequeue_bulk.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::int64_t timeout_usecs)\n\t{\n\t\tsize_t count = 0;\n\t\tmax = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs);\n\t\twhile (count != max) {\n\t\t\tcount += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued, which can\n\t// be 0 if the timeout expires while waiting for elements,\n\t// and at most max.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It, typename Rep, typename Period>\n\tinline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::chrono::duration<Rep, Period> const& timeout)\n    {\n        return wait_dequeue_bulk_timed<It&>(token, itemFirst, max, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());\n    }\n\t\n\t\n\t// Returns an estimate of the total number of elements currently in the queue. This\n\t// estimate is only accurate if the queue has completely stabilized before it is called\n\t// (i.e. all enqueue and dequeue operations have completed and their memory effects are\n\t// visible on the calling thread, and no further operations start while this method is\n\t// being called).\n\t// Thread-safe.\n\tinline size_t size_approx() const\n\t{\n\t\treturn (size_t)sema->availableApprox();\n\t}\n\t\n\t\n\t// Returns true if the underlying atomic variables used by\n\t// the queue are lock-free (they should be on most platforms).\n\t// Thread-safe.\n\tstatic bool is_lock_free()\n\t{\n\t\treturn ConcurrentQueue::is_lock_free();\n\t}\n\t\n\nprivate:\n\ttemplate<typename U>\n\tstatic inline U* create()\n\t{\n\t\tauto p = (Traits::malloc)(sizeof(U));\n\t\treturn p != nullptr ? new (p) U : nullptr;\n\t}\n\t\n\ttemplate<typename U, typename A1>\n\tstatic inline U* create(A1&& a1)\n\t{\n\t\tauto p = (Traits::malloc)(sizeof(U));\n\t\treturn p != nullptr ? new (p) U(std::forward<A1>(a1)) : nullptr;\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline void destroy(U* p)\n\t{\n\t\tif (p != nullptr) {\n\t\t\tp->~U();\n\t\t}\n\t\t(Traits::free)(p);\n\t}\n\t\nprivate:\n\tConcurrentQueue inner;\n\tstd::unique_ptr<LightweightSemaphore, void (*)(LightweightSemaphore*)> sema;\n};\n\n\ntemplate<typename T, typename Traits>\ninline void swap(BlockingConcurrentQueue<T, Traits>& a, BlockingConcurrentQueue<T, Traits>& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\n}\t// end namespace moodycamel\n"
  },
  {
    "path": "src/third_party/concurrentqueue/concurrentqueue.h",
    "content": "// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue.\n// An overview, including benchmark results, is provided here:\n//     http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++\n// The full design is also described in excruciating detail at:\n//    http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue\n\n// Simplified BSD license:\n// Copyright (c) 2013-2016, Cameron Desrochers.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n// - Redistributions of source code must retain the above copyright notice, this list of\n// conditions and the following disclaimer.\n// - Redistributions in binary form must reproduce the above copyright notice, this list of\n// conditions and the following disclaimer in the documentation and/or other materials\n// provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY\n// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL\n// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n#pragma once\n\n#if defined(__GNUC__)\n// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and\n// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings\n// upon assigning any computed values)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wconversion\"\n\n#ifdef MCDBGQ_USE_RELACY\n#pragma GCC diagnostic ignored \"-Wint-to-pointer-cast\"\n#endif\n#endif\n\n#if defined(__APPLE__)\n#include \"TargetConditionals.h\"\n#endif\n\n#ifdef MCDBGQ_USE_RELACY\n#include \"relacy/relacy_std.hpp\"\n#include \"relacy_shims.h\"\n// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations.\n// We'll override the default trait malloc ourselves without a macro.\n#undef new\n#undef delete\n#undef malloc\n#undef free\n#else\n#include <atomic>\t\t// Requires C++11. Sorry VS2010.\n#include <cassert>\n#endif\n#include <cstddef>              // for max_align_t\n#include <cstdint>\n#include <cstdlib>\n#include <type_traits>\n#include <algorithm>\n#include <utility>\n#include <limits>\n#include <climits>\t\t// for CHAR_BIT\n#include <array>\n#include <thread>\t\t// partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading\n\n// Platform-specific definitions of a numeric thread ID type and an invalid value\nnamespace moodycamel { namespace details {\n\ttemplate<typename thread_id_t> struct thread_id_converter {\n\t\ttypedef thread_id_t thread_id_numeric_size_t;\n\t\ttypedef thread_id_t thread_id_hash_t;\n\t\tstatic thread_id_hash_t prehash(thread_id_t const& x) { return x; }\n\t};\n} }\n#if defined(MCDBGQ_USE_RELACY)\nnamespace moodycamel { namespace details {\n\ttypedef std::uint32_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0xFFFFFFFFU;\n\tstatic const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU;\n\tstatic inline thread_id_t thread_id() { return rl::thread_index(); }\n} }\n#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)\n// No sense pulling in windows.h in a header, we'll manually declare the function\n// we use and rely on backwards-compatibility for this not to break\nextern \"C\" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void);\nnamespace moodycamel { namespace details {\n\tstatic_assert(sizeof(unsigned long) == sizeof(std::uint32_t), \"Expected size of unsigned long to be 32 bits on Windows\");\n\ttypedef std::uint32_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0;\t\t\t// See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx\n\tstatic const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU;\t// Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4.\n\tstatic inline thread_id_t thread_id() { return static_cast<thread_id_t>(::GetCurrentThreadId()); }\n} }\n#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE)\nnamespace moodycamel { namespace details {\n\tstatic_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, \"std::thread::id is expected to be either 4 or 8 bytes\");\n\t\n\ttypedef std::thread::id thread_id_t;\n\tstatic const thread_id_t invalid_thread_id;         // Default ctor creates invalid ID\n\n\t// Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's\n\t// only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't\n\t// be.\n\tstatic inline thread_id_t thread_id() { return std::this_thread::get_id(); }\n\n\ttemplate<std::size_t> struct thread_id_size { };\n\ttemplate<> struct thread_id_size<4> { typedef std::uint32_t numeric_t; };\n\ttemplate<> struct thread_id_size<8> { typedef std::uint64_t numeric_t; };\n\n\ttemplate<> struct thread_id_converter<thread_id_t> {\n\t\ttypedef thread_id_size<sizeof(thread_id_t)>::numeric_t thread_id_numeric_size_t;\n#ifndef __APPLE__\n\t\ttypedef std::size_t thread_id_hash_t;\n#else\n\t\ttypedef thread_id_numeric_size_t thread_id_hash_t;\n#endif\n\n\t\tstatic thread_id_hash_t prehash(thread_id_t const& x)\n\t\t{\n#ifndef __APPLE__\n\t\t\treturn std::hash<std::thread::id>()(x);\n#else\n\t\t\treturn *reinterpret_cast<thread_id_hash_t const*>(&x);\n#endif\n\t\t}\n\t};\n} }\n#else\n// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475\n// In order to get a numeric thread ID in a platform-independent way, we use a thread-local\n// static variable's address as a thread identifier :-)\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define MOODYCAMEL_THREADLOCAL __thread\n#elif defined(_MSC_VER)\n#define MOODYCAMEL_THREADLOCAL __declspec(thread)\n#else\n// Assume C++11 compliant compiler\n#define MOODYCAMEL_THREADLOCAL thread_local\n#endif\nnamespace moodycamel { namespace details {\n\ttypedef std::uintptr_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0;\t\t// Address can't be nullptr\n\tstatic const thread_id_t invalid_thread_id2 = 1;\t\t// Member accesses off a null pointer are also generally invalid. Plus it's not aligned.\n\tstatic inline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast<thread_id_t>(&x); }\n} }\n#endif\n\n// Exceptions\n#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED\n#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__))\n#define MOODYCAMEL_EXCEPTIONS_ENABLED\n#endif\n#endif\n#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED\n#define MOODYCAMEL_TRY try\n#define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__)\n#define MOODYCAMEL_RETHROW throw\n#define MOODYCAMEL_THROW(expr) throw (expr)\n#else\n#define MOODYCAMEL_TRY if (true)\n#define MOODYCAMEL_CATCH(...) else if (false)\n#define MOODYCAMEL_RETHROW\n#define MOODYCAMEL_THROW(expr)\n#endif\n\n#ifndef MOODYCAMEL_NOEXCEPT\n#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED)\n#define MOODYCAMEL_NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true\n#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800\n// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-(\n// We have to assume *all* non-trivial constructors may throw on VS2012!\n#define MOODYCAMEL_NOEXCEPT _NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))\n#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900\n#define MOODYCAMEL_NOEXCEPT _NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value || std::is_nothrow_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value || std::is_nothrow_copy_constructible<type>::value)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))\n#else\n#define MOODYCAMEL_NOEXCEPT noexcept\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr)\n#endif\n#endif\n\n#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#ifdef MCDBGQ_USE_RELACY\n#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#else\n// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445\n// g++ <=4.7 doesn't support thread_local either.\n// Finally, iOS/ARM doesn't have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work\n#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__)\n// Assume `thread_local` is fully supported in all other C++11 compilers/platforms\n//#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED    // always disabled for now since several users report having problems with it on\n#endif\n#endif\n#endif\n\n// VS2012 doesn't support deleted functions. \n// In this case, we declare the function normally but don't define it. A link error will be generated if the function is called.\n#ifndef MOODYCAMEL_DELETE_FUNCTION\n#if defined(_MSC_VER) && _MSC_VER < 1800\n#define MOODYCAMEL_DELETE_FUNCTION\n#else\n#define MOODYCAMEL_DELETE_FUNCTION = delete\n#endif\n#endif\n\n// Compiler-specific likely/unlikely hints\nnamespace moodycamel { namespace details {\n#if defined(__GNUC__)\n\tstatic inline bool (likely)(bool x) { return __builtin_expect((x), true); }\n\tstatic inline bool (unlikely)(bool x) { return __builtin_expect((x), false); }\n#else\n\tstatic inline bool (likely)(bool x) { return x; }\n\tstatic inline bool (unlikely)(bool x) { return x; }\n#endif\n} }\n\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n#include \"internal/concurrentqueue_internal_debug.h\"\n#endif\n\nnamespace moodycamel {\nnamespace details {\n\ttemplate<typename T>\n\tstruct const_numeric_max {\n\t\tstatic_assert(std::is_integral<T>::value, \"const_numeric_max can only be used with integers\");\n\t\tstatic const T value = std::numeric_limits<T>::is_signed\n\t\t\t? (static_cast<T>(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast<T>(1)\n\t\t\t: static_cast<T>(-1);\n\t};\n\n#if defined(__GLIBCXX__)\n\ttypedef ::max_align_t std_max_align_t;      // libstdc++ forgot to add it to std:: for a while\n#else\n\ttypedef std::max_align_t std_max_align_t;   // Others (e.g. MSVC) insist it can *only* be accessed via std::\n#endif\n\n\t// Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting\n\t// 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64.\n\ttypedef union {\n\t\tstd_max_align_t x;\n\t\tlong long y;\n\t\tvoid* z;\n\t} max_align_t;\n}\n\n// Default traits for the ConcurrentQueue. To change some of the\n// traits without re-implementing all of them, inherit from this\n// struct and shadow the declarations you wish to be different;\n// since the traits are used as a template type parameter, the\n// shadowed declarations will be used where defined, and the defaults\n// otherwise.\nstruct ConcurrentQueueDefaultTraits\n{\n\t// General-purpose size type. std::size_t is strongly recommended.\n\ttypedef std::size_t size_t;\n\t\n\t// The type used for the enqueue and dequeue indices. Must be at least as\n\t// large as size_t. Should be significantly larger than the number of elements\n\t// you expect to hold at once, especially if you have a high turnover rate;\n\t// for example, on 32-bit x86, if you expect to have over a hundred million\n\t// elements or pump several million elements through your queue in a very\n\t// short space of time, using a 32-bit type *may* trigger a race condition.\n\t// A 64-bit int type is recommended in that case, and in practice will\n\t// prevent a race condition no matter the usage of the queue. Note that\n\t// whether the queue is lock-free with a 64-int type depends on the whether\n\t// std::atomic<std::uint64_t> is lock-free, which is platform-specific.\n\ttypedef std::size_t index_t;\n\t\n\t// Internally, all elements are enqueued and dequeued from multi-element\n\t// blocks; this is the smallest controllable unit. If you expect few elements\n\t// but many producers, a smaller block size should be favoured. For few producers\n\t// and/or many elements, a larger block size is preferred. A sane default\n\t// is provided. Must be a power of 2.\n\tstatic const size_t BLOCK_SIZE = 32;\n\t\n\t// For explicit producers (i.e. when using a producer token), the block is\n\t// checked for being empty by iterating through a list of flags, one per element.\n\t// For large block sizes, this is too inefficient, and switching to an atomic\n\t// counter-based approach is faster. The switch is made for block sizes strictly\n\t// larger than this threshold.\n\tstatic const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32;\n\t\n\t// How many full blocks can be expected for a single explicit producer? This should\n\t// reflect that number's maximum for optimal performance. Must be a power of 2.\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32;\n\t\n\t// How many full blocks can be expected for a single implicit producer? This should\n\t// reflect that number's maximum for optimal performance. Must be a power of 2.\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32;\n\t\n\t// The initial size of the hash table mapping thread IDs to implicit producers.\n\t// Note that the hash is resized every time it becomes half full.\n\t// Must be a power of two, and either 0 or at least 1. If 0, implicit production\n\t// (using the enqueue methods without an explicit producer token) is disabled.\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32;\n\t\n\t// Controls the number of items that an explicit consumer (i.e. one with a token)\n\t// must consume before it causes all consumers to rotate and move on to the next\n\t// internal queue.\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256;\n\t\n\t// The maximum number of elements (inclusive) that can be enqueued to a sub-queue.\n\t// Enqueue operations that would cause this limit to be surpassed will fail. Note\n\t// that this limit is enforced at the block level (for performance reasons), i.e.\n\t// it's rounded up to the nearest block size.\n\tstatic const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max<size_t>::value;\n\t\n\t\n#ifndef MCDBGQ_USE_RELACY\n\t// Memory allocation can be customized if needed.\n\t// malloc should return nullptr on failure, and handle alignment like std::malloc.\n#if defined(malloc) || defined(free)\n\t// Gah, this is 2015, stop defining macros that break standard code already!\n\t// Work around malloc/free being special macros:\n\tstatic inline void* WORKAROUND_malloc(size_t size) { return malloc(size); }\n\tstatic inline void WORKAROUND_free(void* ptr) { return free(ptr); }\n\tstatic inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); }\n\tstatic inline void (free)(void* ptr) { return WORKAROUND_free(ptr); }\n#else\n\tstatic inline void* malloc(size_t size) { return std::malloc(size); }\n\tstatic inline void free(void* ptr) { return std::free(ptr); }\n#endif\n#else\n\t// Debug versions when running under the Relacy race detector (ignore\n\t// these in user code)\n\tstatic inline void* malloc(size_t size) { return rl::rl_malloc(size, $); }\n\tstatic inline void free(void* ptr) { return rl::rl_free(ptr, $); }\n#endif\n};\n\n\n// When producing or consuming many elements, the most efficient way is to:\n//    1) Use one of the bulk-operation methods of the queue with a token\n//    2) Failing that, use the bulk-operation methods without a token\n//    3) Failing that, create a token and use that with the single-item methods\n//    4) Failing that, use the single-parameter methods of the queue\n// Having said that, don't create tokens willy-nilly -- ideally there should be\n// a maximum of one token per thread (of each kind).\nstruct ProducerToken;\nstruct ConsumerToken;\n\ntemplate<typename T, typename Traits> class ConcurrentQueue;\ntemplate<typename T, typename Traits> class BlockingConcurrentQueue;\nclass ConcurrentQueueTests;\n\n\nnamespace details\n{\n\tstruct ConcurrentQueueProducerTypelessBase\n\t{\n\t\tConcurrentQueueProducerTypelessBase* next;\n\t\tstd::atomic<bool> inactive;\n\t\tProducerToken* token;\n\t\t\n\t\tConcurrentQueueProducerTypelessBase()\n\t\t\t: next(nullptr), inactive(false), token(nullptr)\n\t\t{\n\t\t}\n\t};\n\t\n\ttemplate<bool use32> struct _hash_32_or_64 {\n\t\tstatic inline std::uint32_t hash(std::uint32_t h)\n\t\t{\n\t\t\t// MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp\n\t\t\t// Since the thread ID is already unique, all we really want to do is propagate that\n\t\t\t// uniqueness evenly across all the bits, so that we can use a subset of the bits while\n\t\t\t// reducing collisions significantly\n\t\t\th ^= h >> 16;\n\t\t\th *= 0x85ebca6b;\n\t\t\th ^= h >> 13;\n\t\t\th *= 0xc2b2ae35;\n\t\t\treturn h ^ (h >> 16);\n\t\t}\n\t};\n\ttemplate<> struct _hash_32_or_64<1> {\n\t\tstatic inline std::uint64_t hash(std::uint64_t h)\n\t\t{\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xff51afd7ed558ccd;\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xc4ceb9fe1a85ec53;\n\t\t\treturn h ^ (h >> 33);\n\t\t}\n\t};\n\ttemplate<std::size_t size> struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> {  };\n\t\n\tstatic inline size_t hash_thread_id(thread_id_t id)\n\t{\n\t\tstatic_assert(sizeof(thread_id_t) <= 8, \"Expected a platform where thread IDs are at most 64-bit values\");\n\t\treturn static_cast<size_t>(hash_32_or_64<sizeof(thread_id_converter<thread_id_t>::thread_id_hash_t)>::hash(\n\t\t\tthread_id_converter<thread_id_t>::prehash(id)));\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline bool circular_less_than(T a, T b)\n\t{\n#ifdef _MSC_VER\n#pragma warning(push)\n#pragma warning(disable: 4554)\n#endif\n\t\tstatic_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, \"circular_less_than is intended to be used only with unsigned integer types\");\n\t\treturn static_cast<T>(a - b) > static_cast<T>(static_cast<T>(1) << static_cast<T>(sizeof(T) * CHAR_BIT - 1));\n#ifdef _MSC_VER\n#pragma warning(pop)\n#endif\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline char* align_for(char* ptr)\n\t{\n\t\tconst std::size_t alignment = std::alignment_of<U>::value;\n\t\treturn ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;\n\t}\n\n\ttemplate<typename T>\n\tstatic inline T ceil_to_pow_2(T x)\n\t{\n\t\tstatic_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, \"ceil_to_pow_2 is intended to be used only with unsigned integer types\");\n\n\t\t// Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2\n\t\t--x;\n\t\tx |= x >> 1;\n\t\tx |= x >> 2;\n\t\tx |= x >> 4;\n\t\tfor (std::size_t i = 1; i < sizeof(T); i <<= 1) {\n\t\t\tx |= x >> (i << 3);\n\t\t}\n\t\t++x;\n\t\treturn x;\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline void swap_relaxed(std::atomic<T>& left, std::atomic<T>& right)\n\t{\n\t\tT temp = std::move(left.load(std::memory_order_relaxed));\n\t\tleft.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed);\n\t\tright.store(std::move(temp), std::memory_order_relaxed);\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline T const& nomove(T const& x)\n\t{\n\t\treturn x;\n\t}\n\t\n\ttemplate<bool Enable>\n\tstruct nomove_if\n\t{\n\t\ttemplate<typename T>\n\t\tstatic inline T const& eval(T const& x)\n\t\t{\n\t\t\treturn x;\n\t\t}\n\t};\n\t\n\ttemplate<>\n\tstruct nomove_if<false>\n\t{\n\t\ttemplate<typename U>\n\t\tstatic inline auto eval(U&& x)\n\t\t\t-> decltype(std::forward<U>(x))\n\t\t{\n\t\t\treturn std::forward<U>(x);\n\t\t}\n\t};\n\t\n\ttemplate<typename It>\n\tstatic inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it)\n\t{\n\t\treturn *it;\n\t}\n\t\n#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)\n\ttemplate<typename T> struct is_trivially_destructible : std::is_trivially_destructible<T> { };\n#else\n\ttemplate<typename T> struct is_trivially_destructible : std::has_trivial_destructor<T> { };\n#endif\n\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#ifdef MCDBGQ_USE_RELACY\n\ttypedef RelacyThreadExitListener ThreadExitListener;\n\ttypedef RelacyThreadExitNotifier ThreadExitNotifier;\n#else\n\tstruct ThreadExitListener\n\t{\n\t\ttypedef void (*callback_t)(void*);\n\t\tcallback_t callback;\n\t\tvoid* userData;\n\t\t\n\t\tThreadExitListener* next;\t\t// reserved for use by the ThreadExitNotifier\n\t};\n\t\n\t\n\tclass ThreadExitNotifier\n\t{\n\tpublic:\n\t\tstatic void subscribe(ThreadExitListener* listener)\n\t\t{\n\t\t\tauto& tlsInst = instance();\n\t\t\tlistener->next = tlsInst.tail;\n\t\t\ttlsInst.tail = listener;\n\t\t}\n\t\t\n\t\tstatic void unsubscribe(ThreadExitListener* listener)\n\t\t{\n\t\t\tauto& tlsInst = instance();\n\t\t\tThreadExitListener** prev = &tlsInst.tail;\n\t\t\tfor (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\t\tif (ptr == listener) {\n\t\t\t\t\t*prev = ptr->next;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tprev = &ptr->next;\n\t\t\t}\n\t\t}\n\t\t\n\tprivate:\n\t\tThreadExitNotifier() : tail(nullptr) { }\n\t\tThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\tThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\n\t\t~ThreadExitNotifier()\n\t\t{\n\t\t\t// This thread is about to exit, let everyone know!\n\t\t\tassert(this == &instance() && \"If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined.\");\n\t\t\tfor (auto ptr = tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\t\tptr->callback(ptr->userData);\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Thread-local\n\t\tstatic inline ThreadExitNotifier& instance()\n\t\t{\n\t\t\tstatic thread_local ThreadExitNotifier notifier;\n\t\t\treturn notifier;\n\t\t}\n\t\t\n\tprivate:\n\t\tThreadExitListener* tail;\n\t};\n#endif\n#endif\n\t\n\ttemplate<typename T> struct static_is_lock_free_num { enum { value = 0 }; };\n\ttemplate<> struct static_is_lock_free_num<signed char> { enum { value = ATOMIC_CHAR_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<short> { enum { value = ATOMIC_SHORT_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<int> { enum { value = ATOMIC_INT_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<long> { enum { value = ATOMIC_LONG_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<long long> { enum { value = ATOMIC_LLONG_LOCK_FREE }; };\n\ttemplate<typename T> struct static_is_lock_free : static_is_lock_free_num<typename std::make_signed<T>::type> {  };\n\ttemplate<> struct static_is_lock_free<bool> { enum { value = ATOMIC_BOOL_LOCK_FREE }; };\n\ttemplate<typename U> struct static_is_lock_free<U*> { enum { value = ATOMIC_POINTER_LOCK_FREE }; };\n}\n\n\nstruct ProducerToken\n{\n\ttemplate<typename T, typename Traits>\n\texplicit ProducerToken(ConcurrentQueue<T, Traits>& queue);\n\t\n\ttemplate<typename T, typename Traits>\n\texplicit ProducerToken(BlockingConcurrentQueue<T, Traits>& queue);\n\t\n\tProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT\n\t\t: producer(other.producer)\n\t{\n\t\tother.producer = nullptr;\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = this;\n\t\t}\n\t}\n\t\n\tinline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap(other);\n\t\treturn *this;\n\t}\n\t\n\tvoid swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tstd::swap(producer, other.producer);\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = this;\n\t\t}\n\t\tif (other.producer != nullptr) {\n\t\t\tother.producer->token = &other;\n\t\t}\n\t}\n\t\n\t// A token is always valid unless:\n\t//     1) Memory allocation failed during construction\n\t//     2) It was moved via the move constructor\n\t//        (Note: assignment does a swap, leaving both potentially valid)\n\t//     3) The associated queue was destroyed\n\t// Note that if valid() returns true, that only indicates\n\t// that the token is valid for use with a specific queue,\n\t// but not which one; that's up to the user to track.\n\tinline bool valid() const { return producer != nullptr; }\n\t\n\t~ProducerToken()\n\t{\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = nullptr;\n\t\t\tproducer->inactive.store(true, std::memory_order_release);\n\t\t}\n\t}\n\t\n\t// Disable copying and assignment\n\tProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\tProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\nprivate:\n\ttemplate<typename T, typename Traits> friend class ConcurrentQueue;\n\tfriend class ConcurrentQueueTests;\n\t\nprotected:\n\tdetails::ConcurrentQueueProducerTypelessBase* producer;\n};\n\n\nstruct ConsumerToken\n{\n\ttemplate<typename T, typename Traits>\n\texplicit ConsumerToken(ConcurrentQueue<T, Traits>& q);\n\t\n\ttemplate<typename T, typename Traits>\n\texplicit ConsumerToken(BlockingConcurrentQueue<T, Traits>& q);\n\t\n\tConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT\n\t\t: initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer)\n\t{\n\t}\n\t\n\tinline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap(other);\n\t\treturn *this;\n\t}\n\t\n\tvoid swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tstd::swap(initialOffset, other.initialOffset);\n\t\tstd::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset);\n\t\tstd::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent);\n\t\tstd::swap(currentProducer, other.currentProducer);\n\t\tstd::swap(desiredProducer, other.desiredProducer);\n\t}\n\t\n\t// Disable copying and assignment\n\tConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\tConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\nprivate:\n\ttemplate<typename T, typename Traits> friend class ConcurrentQueue;\n\tfriend class ConcurrentQueueTests;\n\t\nprivate: // but shared with ConcurrentQueue\n\tstd::uint32_t initialOffset;\n\tstd::uint32_t lastKnownGlobalOffset;\n\tstd::uint32_t itemsConsumedFromCurrent;\n\tdetails::ConcurrentQueueProducerTypelessBase* currentProducer;\n\tdetails::ConcurrentQueueProducerTypelessBase* desiredProducer;\n};\n\n// Need to forward-declare this swap because it's in a namespace.\n// See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces\ntemplate<typename T, typename Traits>\ninline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT;\n\n\ntemplate<typename T, typename Traits = ConcurrentQueueDefaultTraits>\nclass ConcurrentQueue\n{\npublic:\n\ttypedef ::moodycamel::ProducerToken producer_token_t;\n\ttypedef ::moodycamel::ConsumerToken consumer_token_t;\n\t\n\ttypedef typename Traits::index_t index_t;\n\ttypedef typename Traits::size_t size_t;\n\t\n\tstatic const size_t BLOCK_SIZE = static_cast<size_t>(Traits::BLOCK_SIZE);\n\tstatic const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast<size_t>(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD);\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::EXPLICIT_INITIAL_INDEX_SIZE);\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::IMPLICIT_INITIAL_INDEX_SIZE);\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast<size_t>(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE);\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast<std::uint32_t>(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE);\n#ifdef _MSC_VER\n#pragma warning(push)\n#pragma warning(disable: 4307)\t\t// + integral constant overflow (that's what the ternary expression is for!)\n#pragma warning(disable: 4309)\t\t// static_cast: Truncation of constant value\n#endif\n\tstatic const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max<size_t>::value - static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max<size_t>::value : ((static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE);\n#ifdef _MSC_VER\n#pragma warning(pop)\n#endif\n\n\tstatic_assert(!std::numeric_limits<size_t>::is_signed && std::is_integral<size_t>::value, \"Traits::size_t must be an unsigned integral type\");\n\tstatic_assert(!std::numeric_limits<index_t>::is_signed && std::is_integral<index_t>::value, \"Traits::index_t must be an unsigned integral type\");\n\tstatic_assert(sizeof(index_t) >= sizeof(size_t), \"Traits::index_t must be at least as wide as Traits::size_t\");\n\tstatic_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), \"Traits::BLOCK_SIZE must be a power of 2 (and at least 2)\");\n\tstatic_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), \"Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)\");\n\tstatic_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), \"Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)\");\n\tstatic_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), \"Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)\");\n\tstatic_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), \"Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2\");\n\tstatic_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, \"Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)\");\n\npublic:\n\t// Creates a queue with at least `capacity` element slots; note that the\n\t// actual number of elements that can be inserted without additional memory\n\t// allocation depends on the number of producers and the block size (e.g. if\n\t// the block size is equal to `capacity`, only a single block will be allocated\n\t// up-front, which means only a single producer will be able to enqueue elements\n\t// without an extra allocation -- blocks aren't shared between producers).\n\t// This method is not thread safe -- it is up to the user to ensure that the\n\t// queue is fully constructed before it starts being used by other threads (this\n\t// includes making the memory effects of construction visible, possibly with a\n\t// memory barrier).\n\texplicit ConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)\n\t\t: producerListTail(nullptr),\n\t\tproducerCount(0),\n\t\tinitialBlockPoolIndex(0),\n\t\tnextExplicitConsumerId(0),\n\t\tglobalExplicitConsumerOffset(0)\n\t{\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tpopulate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1));\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\t// Track all the producers using a fully-resolved typed list for\n\t\t// each kind; this makes it possible to debug them starting from\n\t\t// the root queue object (otherwise wacky casts are needed that\n\t\t// don't compile in the debugger's expression evaluator).\n\t\texplicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t}\n\t\n\t// Computes the correct amount of pre-allocated blocks for you based\n\t// on the minimum number of elements you want available at any given\n\t// time, and the maximum concurrent number of each type of producer.\n\tConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)\n\t\t: producerListTail(nullptr),\n\t\tproducerCount(0),\n\t\tinitialBlockPoolIndex(0),\n\t\tnextExplicitConsumerId(0),\n\t\tglobalExplicitConsumerOffset(0)\n\t{\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tsize_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers);\n\t\tpopulate_initial_block_list(blocks);\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\texplicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t}\n\t\n\t// Note: The queue should not be accessed concurrently while it's\n\t// being deleted. It's up to the user to synchronize this.\n\t// This method is not thread safe.\n\t~ConcurrentQueue()\n\t{\n\t\t// Destroy producers\n\t\tauto ptr = producerListTail.load(std::memory_order_relaxed);\n\t\twhile (ptr != nullptr) {\n\t\t\tauto next = ptr->next_prod();\n\t\t\tif (ptr->token != nullptr) {\n\t\t\t\tptr->token->producer = nullptr;\n\t\t\t}\n\t\t\tdestroy(ptr);\n\t\t\tptr = next;\n\t\t}\n\t\t\n\t\t// Destroy implicit producer hash tables\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) {\n\t\t\tauto hash = implicitProducerHash.load(std::memory_order_relaxed);\n\t\t\twhile (hash != nullptr) {\n\t\t\t\tauto prev = hash->prev;\n\t\t\t\tif (prev != nullptr) {\t\t// The last hash is part of this object and was not allocated dynamically\n\t\t\t\t\tfor (size_t i = 0; i != hash->capacity; ++i) {\n\t\t\t\t\t\thash->entries[i].~ImplicitProducerKVP();\n\t\t\t\t\t}\n\t\t\t\t\thash->~ImplicitProducerHash();\n\t\t\t\t\t(Traits::free)(hash);\n\t\t\t\t}\n\t\t\t\thash = prev;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Destroy global free list\n\t\tauto block = freeList.head_unsafe();\n\t\twhile (block != nullptr) {\n\t\t\tauto next = block->freeListNext.load(std::memory_order_relaxed);\n\t\t\tif (block->dynamicallyAllocated) {\n\t\t\t\tdestroy(block);\n\t\t\t}\n\t\t\tblock = next;\n\t\t}\n\t\t\n\t\t// Destroy initial free list\n\t\tdestroy_array(initialBlockPool, initialBlockPoolSize);\n\t}\n\n\t// Disable copying and copy assignment\n\tConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\tConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\n\t// Moving is supported, but note that it is *not* a thread-safe operation.\n\t// Nobody can use the queue while it's being moved, and the memory effects\n\t// of that move must be propagated to other threads before they can use it.\n\t// Note: When a queue is moved, its tokens are still valid but can only be\n\t// used with the destination queue (i.e. semantically they are moved along\n\t// with the queue itself).\n\tConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t\t: producerListTail(other.producerListTail.load(std::memory_order_relaxed)),\n\t\tproducerCount(other.producerCount.load(std::memory_order_relaxed)),\n\t\tinitialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)),\n\t\tinitialBlockPool(other.initialBlockPool),\n\t\tinitialBlockPoolSize(other.initialBlockPoolSize),\n\t\tfreeList(std::move(other.freeList)),\n\t\tnextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)),\n\t\tglobalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed))\n\t{\n\t\t// Move the other one into this, and leave the other one as an empty queue\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tswap_implicit_producer_hashes(other);\n\t\t\n\t\tother.producerListTail.store(nullptr, std::memory_order_relaxed);\n\t\tother.producerCount.store(0, std::memory_order_relaxed);\n\t\tother.nextExplicitConsumerId.store(0, std::memory_order_relaxed);\n\t\tother.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed);\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\texplicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\tother.explicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\tother.implicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t\t\n\t\tother.initialBlockPoolIndex.store(0, std::memory_order_relaxed);\n\t\tother.initialBlockPoolSize = 0;\n\t\tother.initialBlockPool = nullptr;\n\t\t\n\t\treown_producers();\n\t}\n\t\n\tinline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\treturn swap_internal(other);\n\t}\n\t\n\t// Swaps this queue's state with the other's. Not thread-safe.\n\t// Swapping two queues does not invalidate their tokens, however\n\t// the tokens that were created for one queue must be used with\n\t// only the swapped queue (i.e. the tokens are tied to the\n\t// queue's movable state, not the object itself).\n\tinline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap_internal(other);\n\t}\n\t\nprivate:\n\tConcurrentQueue& swap_internal(ConcurrentQueue& other)\n\t{\n\t\tif (this == &other) {\n\t\t\treturn *this;\n\t\t}\n\t\t\n\t\tdetails::swap_relaxed(producerListTail, other.producerListTail);\n\t\tdetails::swap_relaxed(producerCount, other.producerCount);\n\t\tdetails::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex);\n\t\tstd::swap(initialBlockPool, other.initialBlockPool);\n\t\tstd::swap(initialBlockPoolSize, other.initialBlockPoolSize);\n\t\tfreeList.swap(other.freeList);\n\t\tdetails::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId);\n\t\tdetails::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset);\n\t\t\n\t\tswap_implicit_producer_hashes(other);\n\t\t\n\t\treown_producers();\n\t\tother.reown_producers();\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\tdetails::swap_relaxed(explicitProducers, other.explicitProducers);\n\t\tdetails::swap_relaxed(implicitProducers, other.implicitProducers);\n#endif\n\t\t\n\t\treturn *this;\n\t}\n\t\npublic:\n\t// Enqueues a single item (by copying it).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T const& item)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue<CanAlloc>(item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T&& item)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue<CanAlloc>(std::move(item));\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\treturn inner_enqueue<CanAlloc>(token, item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\treturn inner_enqueue<CanAlloc>(token, std::move(item));\n\t}\n\t\n\t// Enqueues several items.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue_bulk<CanAlloc>(itemFirst, count);\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails\n\t// (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn inner_enqueue_bulk<CanAlloc>(token, itemFirst, count);\n\t}\n\t\n\t// Enqueues a single item (by copying it).\n\t// Does not allocate memory. Fails if not enough room to enqueue (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T const& item)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue<CannotAlloc>(item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T&& item)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue<CannotAlloc>(std::move(item));\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\treturn inner_enqueue<CannotAlloc>(token, item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\treturn inner_enqueue<CannotAlloc>(token, std::move(item));\n\t}\n\t\n\t// Enqueues several items.\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool try_enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\treturn inner_enqueue_bulk<CannotAlloc>(itemFirst, count);\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn inner_enqueue_bulk<CannotAlloc>(token, itemFirst, count);\n\t}\n\t\n\t\n\t\n\t// Attempts to dequeue from the queue.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue(U& item)\n\t{\n\t\t// Instead of simply trying each producer in turn (which could cause needless contention on the first\n\t\t// producer), we score them heuristically.\n\t\tsize_t nonEmptyCount = 0;\n\t\tProducerBase* best = nullptr;\n\t\tsize_t bestSize = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tauto size = ptr->size_approx();\n\t\t\tif (size > 0) {\n\t\t\t\tif (size > bestSize) {\n\t\t\t\t\tbestSize = size;\n\t\t\t\t\tbest = ptr;\n\t\t\t\t}\n\t\t\t\t++nonEmptyCount;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// If there was at least one non-empty queue but it appears empty at the time\n\t\t// we try to dequeue from it, we need to make sure every queue's been tried\n\t\tif (nonEmptyCount > 0) {\n\t\t\tif ((details::likely)(best->dequeue(item))) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\t\tif (ptr != best && ptr->dequeue(item)) {\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue from the queue.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// This differs from the try_dequeue(item) method in that this one does\n\t// not attempt to reduce contention by interleaving the order that producer\n\t// streams are dequeued from. So, using this method can reduce overall throughput\n\t// under contention, but will give more predictable results in single-threaded\n\t// consumer scenarios. This is mostly only useful for internal unit tests.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue_non_interleaved(U& item)\n\t{\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tif (ptr->dequeue(item)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue from the queue using an explicit consumer token.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue(consumer_token_t& token, U& item)\n\t{\n\t\t// The idea is roughly as follows:\n\t\t// Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less\n\t\t// If you see that the global offset has changed, you must reset your consumption counter and move to your designated place\n\t\t// If there's no items where you're supposed to be, keep moving until you find a producer with some items\n\t\t// If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it\n\t\t\n\t\tif (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {\n\t\t\tif (!update_current_producer_after_rotation(token)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// If there was at least one non-empty queue but it appears empty at the time\n\t\t// we try to dequeue from it, we need to make sure every queue's been tried\n\t\tif (static_cast<ProducerBase*>(token.currentProducer)->dequeue(item)) {\n\t\t\tif (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {\n\t\t\t\tglobalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tauto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();\n\t\tif (ptr == nullptr) {\n\t\t\tptr = tail;\n\t\t}\n\t\twhile (ptr != static_cast<ProducerBase*>(token.currentProducer)) {\n\t\t\tif (ptr->dequeue(item)) {\n\t\t\t\ttoken.currentProducer = ptr;\n\t\t\t\ttoken.itemsConsumedFromCurrent = 1;\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tptr = ptr->next_prod();\n\t\t\tif (ptr == nullptr) {\n\t\t\t\tptr = tail;\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tsize_t try_dequeue_bulk(It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tcount += ptr->dequeue_bulk(itemFirst, max - count);\n\t\t\tif (count == max) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tsize_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)\n\t{\n\t\tif (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {\n\t\t\tif (!update_current_producer_after_rotation(token)) {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t\t\n\t\tsize_t count = static_cast<ProducerBase*>(token.currentProducer)->dequeue_bulk(itemFirst, max);\n\t\tif (count == max) {\n\t\t\tif ((token.itemsConsumedFromCurrent += static_cast<std::uint32_t>(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {\n\t\t\t\tglobalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);\n\t\t\t}\n\t\t\treturn max;\n\t\t}\n\t\ttoken.itemsConsumedFromCurrent += static_cast<std::uint32_t>(count);\n\t\tmax -= count;\n\t\t\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tauto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();\n\t\tif (ptr == nullptr) {\n\t\t\tptr = tail;\n\t\t}\n\t\twhile (ptr != static_cast<ProducerBase*>(token.currentProducer)) {\n\t\t\tauto dequeued = ptr->dequeue_bulk(itemFirst, max);\n\t\t\tcount += dequeued;\n\t\t\tif (dequeued != 0) {\n\t\t\t\ttoken.currentProducer = ptr;\n\t\t\t\ttoken.itemsConsumedFromCurrent = static_cast<std::uint32_t>(dequeued);\n\t\t\t}\n\t\t\tif (dequeued == max) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tmax -= dequeued;\n\t\t\tptr = ptr->next_prod();\n\t\t\tif (ptr == nullptr) {\n\t\t\t\tptr = tail;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t\n\t\n\t// Attempts to dequeue from a specific producer's inner queue.\n\t// If you happen to know which producer you want to dequeue from, this\n\t// is significantly faster than using the general-case try_dequeue methods.\n\t// Returns false if the producer's queue appeared empty at the time it\n\t// was checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool try_dequeue_from_producer(producer_token_t const& producer, U& item)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(producer.producer)->dequeue(item);\n\t}\n\t\n\t// Attempts to dequeue several elements from a specific producer's inner queue.\n\t// Returns the number of items actually dequeued.\n\t// If you happen to know which producer you want to dequeue from, this\n\t// is significantly faster than using the general-case try_dequeue methods.\n\t// Returns 0 if the producer's queue appeared empty at the time it\n\t// was checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(producer.producer)->dequeue_bulk(itemFirst, max);\n\t}\n\t\n\t\n\t// Returns an estimate of the total number of elements currently in the queue. This\n\t// estimate is only accurate if the queue has completely stabilized before it is called\n\t// (i.e. all enqueue and dequeue operations have completed and their memory effects are\n\t// visible on the calling thread, and no further operations start while this method is\n\t// being called).\n\t// Thread-safe.\n\tsize_t size_approx() const\n\t{\n\t\tsize_t size = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tsize += ptr->size_approx();\n\t\t}\n\t\treturn size;\n\t}\n\t\n\t\n\t// Returns true if the underlying atomic variables used by\n\t// the queue are lock-free (they should be on most platforms).\n\t// Thread-safe.\n\tstatic bool is_lock_free()\n\t{\n\t\treturn\n\t\t\tdetails::static_is_lock_free<bool>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<size_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<std::uint32_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<index_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<void*>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<typename details::thread_id_converter<details::thread_id_t>::thread_id_numeric_size_t>::value == 2;\n\t}\n\n\nprivate:\n\tfriend struct ProducerToken;\n\tfriend struct ConsumerToken;\n\tstruct ExplicitProducer;\n\tfriend struct ExplicitProducer;\n\tstruct ImplicitProducer;\n\tfriend struct ImplicitProducer;\n\tfriend class ConcurrentQueueTests;\n\t\t\n\tenum AllocationMode { CanAlloc, CannotAlloc };\n\t\n\t\n\t///////////////////////////////\n\t// Queue methods\n\t///////////////////////////////\n\t\n\ttemplate<AllocationMode canAlloc, typename U>\n\tinline bool inner_enqueue(producer_token_t const& token, U&& element)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename U>\n\tinline bool inner_enqueue(U&& element)\n\t{\n\t\tauto producer = get_or_add_implicit_producer();\n\t\treturn producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename It>\n\tinline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename It>\n\tinline bool inner_enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tauto producer = get_or_add_implicit_producer();\n\t\treturn producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);\n\t}\n\t\n\tinline bool update_current_producer_after_rotation(consumer_token_t& token)\n\t{\n\t\t// Ah, there's been a rotation, figure out where we should be!\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tif (token.desiredProducer == nullptr && tail == nullptr) {\n\t\t\treturn false;\n\t\t}\n\t\tauto prodCount = producerCount.load(std::memory_order_relaxed);\n\t\tauto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed);\n\t\tif ((details::unlikely)(token.desiredProducer == nullptr)) {\n\t\t\t// Aha, first time we're dequeueing anything.\n\t\t\t// Figure out our local position\n\t\t\t// Note: offset is from start, not end, but we're traversing from end -- subtract from count first\n\t\t\tstd::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount);\n\t\t\ttoken.desiredProducer = tail;\n\t\t\tfor (std::uint32_t i = 0; i != offset; ++i) {\n\t\t\t\ttoken.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();\n\t\t\t\tif (token.desiredProducer == nullptr) {\n\t\t\t\t\ttoken.desiredProducer = tail;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tstd::uint32_t delta = globalOffset - token.lastKnownGlobalOffset;\n\t\tif (delta >= prodCount) {\n\t\t\tdelta = delta % prodCount;\n\t\t}\n\t\tfor (std::uint32_t i = 0; i != delta; ++i) {\n\t\t\ttoken.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();\n\t\t\tif (token.desiredProducer == nullptr) {\n\t\t\t\ttoken.desiredProducer = tail;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttoken.lastKnownGlobalOffset = globalOffset;\n\t\ttoken.currentProducer = token.desiredProducer;\n\t\ttoken.itemsConsumedFromCurrent = 0;\n\t\treturn true;\n\t}\n\t\n\t\n\t///////////////////////////\n\t// Free list\n\t///////////////////////////\n\t\n\ttemplate <typename N>\n\tstruct FreeListNode\n\t{\n\t\tFreeListNode() : freeListRefs(0), freeListNext(nullptr) { }\n\t\t\n\t\tstd::atomic<std::uint32_t> freeListRefs;\n\t\tstd::atomic<N*> freeListNext;\n\t};\n\t\n\t// A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but\n\t// simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly\n\t// speedy under low contention.\n\ttemplate<typename N>\t\t// N must inherit FreeListNode or have the same fields (and initialization of them)\n\tstruct FreeList\n\t{\n\t\tFreeList() : freeListHead(nullptr) { }\n\t\tFreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); }\n\t\tvoid swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); }\n\t\t\n\t\tFreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\tFreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\n\t\tinline void add(N* node)\n\t\t{\n#if MCDBGQ_NOLOCKFREE_FREELIST\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\t\t\n\t\t\t// We know that the should-be-on-freelist bit is 0 at this point, so it's safe to\n\t\t\t// set it using a fetch_add\n\t\t\tif (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {\n\t\t\t\t// Oh look! We were the last ones referencing this node, and we know\n\t\t\t\t// we want to add it to the free list, so let's do it!\n\t\t \t\tadd_knowing_refcount_is_zero(node);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline N* try_get()\n\t\t{\n#if MCDBGQ_NOLOCKFREE_FREELIST\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\t\t\n\t\t\tauto head = freeListHead.load(std::memory_order_acquire);\n\t\t\twhile (head != nullptr) {\n\t\t\t\tauto prevHead = head;\n\t\t\t\tauto refs = head->freeListRefs.load(std::memory_order_relaxed);\n\t\t\t\tif ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\thead = freeListHead.load(std::memory_order_acquire);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Good, reference count has been incremented (it wasn't at zero), which means we can read the\n\t\t\t\t// next and not worry about it changing between now and the time we do the CAS\n\t\t\t\tauto next = head->freeListNext.load(std::memory_order_relaxed);\n\t\t\t\tif (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\t// Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no\n\t\t\t\t\t// matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on).\n\t\t\t\t\tassert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);\n\t\t\t\t\t\n\t\t\t\t\t// Decrease refcount twice, once for our ref, and once for the list's ref\n\t\t\t\t\thead->freeListRefs.fetch_sub(2, std::memory_order_release);\n\t\t\t\t\treturn head;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// OK, the head must have changed on us, but we still need to decrease the refcount we increased.\n\t\t\t\t// Note that we don't need to release any memory effects, but we do need to ensure that the reference\n\t\t\t\t// count decrement happens-after the CAS on the head.\n\t\t\t\trefs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel);\n\t\t\t\tif (refs == SHOULD_BE_ON_FREELIST + 1) {\n\t\t\t\t\tadd_knowing_refcount_is_zero(prevHead);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\t// Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)\n\t\tN* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }\n\t\t\n\tprivate:\n\t\tinline void add_knowing_refcount_is_zero(N* node)\n\t\t{\n\t\t\t// Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run\n\t\t\t// only one copy of this method per node at a time, i.e. the single thread case), then we know\n\t\t\t// we can safely change the next pointer of the node; however, once the refcount is back above\n\t\t\t// zero, then other threads could increase it (happens under heavy contention, when the refcount\n\t\t\t// goes to zero in between a load and a refcount increment of a node in try_get, then back up to\n\t\t\t// something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS\n\t\t\t// to add the node to the actual list fails, decrease the refcount and leave the add operation to\n\t\t\t// the next thread who puts the refcount back at zero (which could be us, hence the loop).\n\t\t\tauto head = freeListHead.load(std::memory_order_relaxed);\n\t\t\twhile (true) {\n\t\t\t\tnode->freeListNext.store(head, std::memory_order_relaxed);\n\t\t\t\tnode->freeListRefs.store(1, std::memory_order_release);\n\t\t\t\tif (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) {\n\t\t\t\t\t// Hmm, the add failed, but we can only try again when the refcount goes back to zero\n\t\t\t\t\tif (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t\n\tprivate:\n\t\t// Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention)\n\t\tstd::atomic<N*> freeListHead;\n\t\n\tstatic const std::uint32_t REFS_MASK = 0x7FFFFFFF;\n\tstatic const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;\n\t\t\n#if MCDBGQ_NOLOCKFREE_FREELIST\n\t\tdebug::DebugMutex mutex;\n#endif\n\t};\n\t\n\t\n\t///////////////////////////\n\t// Block\n\t///////////////////////////\n\t\n\tenum InnerQueueContext { implicit_context = 0, explicit_context = 1 };\n\t\n\tstruct Block\n\t{\n\t\tBlock()\n\t\t\t: next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), shouldBeOnFreeList(false), dynamicallyAllocated(true)\n\t\t{\n#if MCDBGQ_TRACKMEM\n\t\t\towner = nullptr;\n#endif\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool is_empty() const\n\t\t{\n\t\t\tif (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Check flags\n\t\t\t\tfor (size_t i = 0; i < BLOCK_SIZE; ++i) {\n\t\t\t\t\tif (!emptyFlags[i].load(std::memory_order_relaxed)) {\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Aha, empty; make sure we have all other memory effects that happened before the empty flags were set\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Check counter\n\t\t\t\tif (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) {\n\t\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\tassert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Returns true if the block is now empty (does not apply in explicit context)\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool set_empty(index_t i)\n\t\t{\n\t\t\tif (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set flag\n\t\t\t\tassert(!emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].load(std::memory_order_relaxed));\n\t\t\t\temptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].store(true, std::memory_order_release);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Increment counter\n\t\t\t\tauto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release);\n\t\t\t\tassert(prevVal < BLOCK_SIZE);\n\t\t\t\treturn prevVal == BLOCK_SIZE - 1;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0).\n\t\t// Returns true if the block is now empty (does not apply in explicit context).\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool set_many_empty(index_t i, size_t count)\n\t\t{\n\t\t\tif (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set flags\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_release);\n\t\t\t\ti = BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1)) - count + 1;\n\t\t\t\tfor (size_t j = 0; j != count; ++j) {\n\t\t\t\t\tassert(!emptyFlags[i + j].load(std::memory_order_relaxed));\n\t\t\t\t\temptyFlags[i + j].store(true, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Increment counter\n\t\t\t\tauto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release);\n\t\t\t\tassert(prevVal + count <= BLOCK_SIZE);\n\t\t\t\treturn prevVal + count == BLOCK_SIZE;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline void set_all_empty()\n\t\t{\n\t\t\tif (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set all flags\n\t\t\t\tfor (size_t i = 0; i != BLOCK_SIZE; ++i) {\n\t\t\t\t\temptyFlags[i].store(true, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Reset counter\n\t\t\t\telementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline void reset_empty()\n\t\t{\n\t\t\tif (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Reset flags\n\t\t\t\tfor (size_t i = 0; i != BLOCK_SIZE; ++i) {\n\t\t\t\t\temptyFlags[i].store(false, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Reset counter\n\t\t\t\telementsCompletelyDequeued.store(0, std::memory_order_relaxed);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT { return static_cast<T*>(static_cast<void*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }\n\t\tinline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT { return static_cast<T const*>(static_cast<void const*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }\n\t\t\n\tprivate:\n\t\t// IMPORTANT: This must be the first member in Block, so that if T depends on the alignment of\n\t\t// addresses returned by malloc, that alignment will be preserved. Apparently clang actually\n\t\t// generates code that uses this assumption for AVX instructions in some cases. Ideally, we\n\t\t// should also align Block to the alignment of T in case it's higher than malloc's 16-byte\n\t\t// alignment, but this is hard to do in a cross-platform way. Assert for this case:\n\t\tstatic_assert(std::alignment_of<T>::value <= std::alignment_of<details::max_align_t>::value, \"The queue does not support super-aligned types at this time\");\n\t\t// Additionally, we need the alignment of Block itself to be a multiple of max_align_t since\n\t\t// otherwise the appropriate padding will not be added at the end of Block in order to make\n\t\t// arrays of Blocks all be properly aligned (not just the first one). We use a union to force\n\t\t// this.\n\t\tunion {\n\t\t\tchar elements[sizeof(T) * BLOCK_SIZE];\n\t\t\tdetails::max_align_t dummy;\n\t\t};\n\tpublic:\n\t\tBlock* next;\n\t\tstd::atomic<size_t> elementsCompletelyDequeued;\n\t\tstd::atomic<bool> emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1];\n\tpublic:\n\t\tstd::atomic<std::uint32_t> freeListRefs;\n\t\tstd::atomic<Block*> freeListNext;\n\t\tstd::atomic<bool> shouldBeOnFreeList;\n\t\tbool dynamicallyAllocated;\t\t// Perhaps a better name for this would be 'isNotPartOfInitialBlockPool'\n\t\t\n#if MCDBGQ_TRACKMEM\n\t\tvoid* owner;\n#endif\n\t};\n\tstatic_assert(std::alignment_of<Block>::value >= std::alignment_of<details::max_align_t>::value, \"Internal error: Blocks must be at least as aligned as the type they are wrapping\");\n\n\n#if MCDBGQ_TRACKMEM\npublic:\n\tstruct MemStats;\nprivate:\n#endif\n\t\n\t///////////////////////////\n\t// Producer base\n\t///////////////////////////\n\t\n\tstruct ProducerBase : public details::ConcurrentQueueProducerTypelessBase\n\t{\n\t\tProducerBase(ConcurrentQueue* parent_, bool isExplicit_) :\n\t\t\ttailIndex(0),\n\t\t\theadIndex(0),\n\t\t\tdequeueOptimisticCount(0),\n\t\t\tdequeueOvercommit(0),\n\t\t\ttailBlock(nullptr),\n\t\t\tisExplicit(isExplicit_),\n\t\t\tparent(parent_)\n\t\t{\n\t\t}\n\t\t\n\t\tvirtual ~ProducerBase() { };\n\t\t\n\t\ttemplate<typename U>\n\t\tinline bool dequeue(U& element)\n\t\t{\n\t\t\tif (isExplicit) {\n\t\t\t\treturn static_cast<ExplicitProducer*>(this)->dequeue(element);\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn static_cast<ImplicitProducer*>(this)->dequeue(element);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<typename It>\n\t\tinline size_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tif (isExplicit) {\n\t\t\t\treturn static_cast<ExplicitProducer*>(this)->dequeue_bulk(itemFirst, max);\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn static_cast<ImplicitProducer*>(this)->dequeue_bulk(itemFirst, max);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline ProducerBase* next_prod() const { return static_cast<ProducerBase*>(next); }\n\t\t\n\t\tinline size_t size_approx() const\n\t\t{\n\t\t\tauto tail = tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto head = headIndex.load(std::memory_order_relaxed);\n\t\t\treturn details::circular_less_than(head, tail) ? static_cast<size_t>(tail - head) : 0;\n\t\t}\n\t\t\n\t\tinline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); }\n\tprotected:\n\t\tstd::atomic<index_t> tailIndex;\t\t// Where to enqueue to next\n\t\tstd::atomic<index_t> headIndex;\t\t// Where to dequeue from next\n\t\t\n\t\tstd::atomic<index_t> dequeueOptimisticCount;\n\t\tstd::atomic<index_t> dequeueOvercommit;\n\t\t\n\t\tBlock* tailBlock;\n\t\t\n\tpublic:\n\t\tbool isExplicit;\n\t\tConcurrentQueue* parent;\n\t\t\n\tprotected:\n#if MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t///////////////////////////\n\t// Explicit queue\n\t///////////////////////////\n\t\t\n\tstruct ExplicitProducer : public ProducerBase\n\t{\n\t\texplicit ExplicitProducer(ConcurrentQueue* parent) :\n\t\t\tProducerBase(parent, true),\n\t\t\tblockIndex(nullptr),\n\t\t\tpr_blockIndexSlotsUsed(0),\n\t\t\tpr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1),\n\t\t\tpr_blockIndexFront(0),\n\t\t\tpr_blockIndexEntries(nullptr),\n\t\t\tpr_blockIndexRaw(nullptr)\n\t\t{\n\t\t\tsize_t poolBasedIndexSize = details::ceil_to_pow_2(parent->initialBlockPoolSize) >> 1;\n\t\t\tif (poolBasedIndexSize > pr_blockIndexSize) {\n\t\t\t\tpr_blockIndexSize = poolBasedIndexSize;\n\t\t\t}\n\t\t\t\n\t\t\tnew_block_index(0);\t\t// This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE\n\t\t}\n\t\t\n\t\t~ExplicitProducer()\n\t\t{\n\t\t\t// Destruct any elements not yet dequeued.\n\t\t\t// Since we're in the destructor, we can assume all elements\n\t\t\t// are either completely dequeued or completely not (no halfways).\n\t\t\tif (this->tailBlock != nullptr) {\t\t// Note this means there must be a block index too\n\t\t\t\t// First find the block that's partially dequeued, if any\n\t\t\t\tBlock* halfDequeuedBlock = nullptr;\n\t\t\t\tif ((this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) != 0) {\n\t\t\t\t\t// The head's not on a block boundary, meaning a block somewhere is partially dequeued\n\t\t\t\t\t// (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary)\n\t\t\t\t\tsize_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1);\n\t\t\t\t\twhile (details::circular_less_than<index_t>(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) {\n\t\t\t\t\t\ti = (i + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t\t}\n\t\t\t\t\tassert(details::circular_less_than<index_t>(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed)));\n\t\t\t\t\thalfDequeuedBlock = pr_blockIndexEntries[i].block;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration)\n\t\t\t\tauto block = this->tailBlock;\n\t\t\t\tdo {\n\t\t\t\t\tblock = block->next;\n\t\t\t\t\tif (block->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tsize_t i = 0;\t// Offset into block\n\t\t\t\t\tif (block == halfDequeuedBlock) {\n\t\t\t\t\t\ti = static_cast<size_t>(this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index\n\t\t\t\t\tauto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast<size_t>(this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\t\t\twhile (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) {\n\t\t\t\t\t\t(*block)[i++]->~T();\n\t\t\t\t\t}\n\t\t\t\t} while (block != this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy all blocks that we own\n\t\t\tif (this->tailBlock != nullptr) {\n\t\t\t\tauto block = this->tailBlock;\n\t\t\t\tdo {\n\t\t\t\t\tauto nextBlock = block->next;\n\t\t\t\t\tif (block->dynamicallyAllocated) {\n\t\t\t\t\t\tdestroy(block);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t}\n\t\t\t\t\tblock = nextBlock;\n\t\t\t\t} while (block != this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy the block indices\n\t\t\tauto header = static_cast<BlockIndexHeader*>(pr_blockIndexRaw);\n\t\t\twhile (header != nullptr) {\n\t\t\t\tauto prev = static_cast<BlockIndexHeader*>(header->prev);\n\t\t\t\theader->~BlockIndexHeader();\n\t\t\t\t(Traits::free)(header);\n\t\t\t\theader = prev;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename U>\n\t\tinline bool enqueue(U&& element)\n\t\t{\n\t\t\tindex_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t newTailIndex = 1 + currentTailIndex;\n\t\t\tif ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t// We reached the end of a block, start a new one\n\t\t\t\tauto startBlock = this->tailBlock;\n\t\t\t\tauto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;\n\t\t\t\tif (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\t// We can re-use the block ahead of us, it's empty!\t\t\t\t\t\n\t\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t\t\tthis->tailBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\t\n\t\t\t\t\t// We'll put the block on the block index (guaranteed to be room since we're conceptually removing the\n\t\t\t\t\t// last block from it first -- except instead of removing then adding, we can just overwrite).\n\t\t\t\t\t// Note that there must be a valid block index here, since even if allocation failed in the ctor,\n\t\t\t\t\t// it would have been re-attempted when adding the first block to the queue; since there is such\n\t\t\t\t\t// a block, a block index must have been successfully allocated.\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Whatever head value we see here is >= the last value we saw here (relatively),\n\t\t\t\t\t// and <= its current value. Since we have the most recent tail, the head must be\n\t\t\t\t\t// <= to it.\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tif (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE)\n\t\t\t\t\t\t|| (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {\n\t\t\t\t\t\t// We can't enqueue in another block because there's not enough leeway -- the\n\t\t\t\t\t\t// tail could surpass the head by the time the block fills up! (Or we'll exceed\n\t\t\t\t\t\t// the size limit, if the second part of the condition was true.)\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t// We're going to need a new block; check that the block index has room\n\t\t\t\t\tif (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) {\n\t\t\t\t\t\t// Hmm, the circular block index is already full -- we'll need\n\t\t\t\t\t\t// to allocate a new index. Note pr_blockIndexRaw can only be nullptr if\n\t\t\t\t\t\t// the initial allocation failed in the constructor.\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (allocMode == CannotAlloc || !new_block_index(pr_blockIndexSlotsUsed)) {\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Insert a new block in the circular linked list\n\t\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n#if MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\tif (this->tailBlock == nullptr) {\n\t\t\t\t\t\tnewBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tnewBlock->next = this->tailBlock->next;\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\t++pr_blockIndexSlotsUsed;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward<U>(element)))) {\n\t\t\t\t\t// The constructor may throw. We want the element not to appear in the queue in\n\t\t\t\t\t// that case (without corrupting the queue):\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t// Revert change to the current block, but leave the new block available\n\t\t\t\t\t\t// for next time\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock;\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t(void)startBlock;\n\t\t\t\t\t(void)originalBlockIndexSlotsUsed;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Add block to block index\n\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release);\n\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t\n\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward<U>(element)))) {\n\t\t\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue\n\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename U>\n\t\tbool dequeue(U& element)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tif (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {\n\t\t\t\t// Might be something to dequeue, let's give it a try\n\t\t\t\t\n\t\t\t\t// Note that this if is purely for performance purposes in the common case when the queue is\n\t\t\t\t// empty and the values are eventually consistent -- we may enter here spuriously.\n\t\t\t\t\n\t\t\t\t// Note that whatever the values of overcommit and tail are, they are not going to change (unless we\n\t\t\t\t// change them) and must be the same value at this point (inside the if) as when the if condition was\n\t\t\t\t// evaluated.\n\n\t\t\t\t// We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below.\n\t\t\t\t// This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in\n\t\t\t\t// the fetch_add below will result in a value at least as recent as that (and therefore at least as large).\n\t\t\t\t// Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all\n\t\t\t\t// read-modify-write operations are guaranteed to work on the latest value in the modification order), but\n\t\t\t\t// unfortunately that can't be shown to be correct using only the C++11 standard.\n\t\t\t\t// See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\t// Increment optimistic counter, then check if it went over the boundary\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\t// Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever\n\t\t\t\t// incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now\n\t\t\t\t// have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon\n\t\t\t\t// incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount.\n\t\t\t\t// However, we can't assert this since both dequeueOptimisticCount and dequeueOvercommit may (independently)\n\t\t\t\t// overflow; in such a case, though, the logic still holds since the difference between the two is maintained.\n\t\t\t\t\n\t\t\t\t// Note that we reload tail here in case it changed; it will be the same value as before or greater, since\n\t\t\t\t// this load is sequenced after (happens after) the earlier load above. This is supported by read-read\n\t\t\t\t// coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tif ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {\n\t\t\t\t\t// Guaranteed to be at least one element to dequeue!\n\t\t\t\t\t\n\t\t\t\t\t// Get the index. Note that since there's guaranteed to be at least one element, this\n\t\t\t\t\t// will never exceed tail. We need to do an acquire-release fence here since it's possible\n\t\t\t\t\t// that whatever condition got us to this point was for an earlier enqueued element (that\n\t\t\t\t\t// we already see the memory effects for), but that by the time we increment somebody else\n\t\t\t\t\t// has incremented it, and we need to see the memory effects for *that* element, which is\n\t\t\t\t\t// in such a case is necessarily visible on the thread that incremented it in the first\n\t\t\t\t\t// place with the more current condition (they must have acquired a tail that is at least\n\t\t\t\t\t// as recent).\n\t\t\t\t\tauto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the element is in\n\t\t\t\t\t\n\t\t\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\t\t\tauto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);\n\t\t\t\t\t\n\t\t\t\t\t// We need to be careful here about subtracting and dividing because of index wrap-around.\n\t\t\t\t\t// When an index wraps, we need to preserve the sign of the offset when dividing it by the\n\t\t\t\t\t// block size (in order to get a correct signed block count offset in all cases):\n\t\t\t\t\tauto headBase = localBlockIndex->entries[localBlockIndexHead].base;\n\t\t\t\t\tauto blockBaseIndex = index & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(blockBaseIndex - headBase) / BLOCK_SIZE);\n\t\t\t\t\tauto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block;\n\t\t\t\t\t\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {\n\t\t\t\t\t\t// Make sure the element is still fully dequeued and destroyed even if the assignment\n\t\t\t\t\t\t// throws\n\t\t\t\t\t\tstruct Guard {\n\t\t\t\t\t\t\tBlock* block;\n\t\t\t\t\t\t\tindex_t index;\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t~Guard()\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t(*block)[index]->~T();\n\t\t\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_empty<explicit_context>(index);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} guard = { block, index };\n\t\t\t\t\t\t\n\t\t\t\t\t\telement = std::move(el);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\telement = std::move(el);\n\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_empty<explicit_context>(index);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(1, std::memory_order_release);\t\t// Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write\n\t\t\t\t}\n\t\t\t}\n\t\t\n\t\t\treturn false;\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename It>\n\t\tbool enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\t// First, we need to make sure we have enough room to enqueue all of the elements;\n\t\t\t// this means pre-allocating blocks and putting them in the block index (but only if\n\t\t\t// all the allocations succeeded).\n\t\t\tindex_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto startBlock = this->tailBlock;\n\t\t\tauto originalBlockIndexFront = pr_blockIndexFront;\n\t\t\tauto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;\n\t\t\t\n\t\t\tBlock* firstAllocatedBlock = nullptr;\n\t\t\t\n\t\t\t// Figure out how many blocks we'll need to allocate, and do so\n\t\t\tsize_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\tindex_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tif (blockBaseDiff > 0) {\n\t\t\t\t// Allocate as many blocks as possible from ahead\n\t\t\t\twhile (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;\n\t\t\t\t\t\n\t\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Now allocate as many blocks as necessary from the block pool\n\t\t\t\twhile (blockBaseDiff > 0) {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tbool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));\n\t\t\t\t\tif (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) {\n\t\t\t\t\t\tif (allocMode == CannotAlloc || full || !new_block_index(originalBlockIndexSlotsUsed)) {\n\t\t\t\t\t\t\t// Failed to allocate, undo changes (but keep injected blocks)\n\t\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\t// pr_blockIndexFront is updated inside new_block_index, so we need to\n\t\t\t\t\t\t// update our fallback value too (since we keep the new index even if we\n\t\t\t\t\t\t// later fail)\n\t\t\t\t\t\toriginalBlockIndexFront = originalBlockIndexSlotsUsed;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Insert a new block in the circular linked list\n\t\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t\n#if MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template set_all_empty<explicit_context>();\n\t\t\t\t\tif (this->tailBlock == nullptr) {\n\t\t\t\t\t\tnewBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tnewBlock->next = this->tailBlock->next;\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;\n\t\t\t\t\t\n\t\t\t\t\t++pr_blockIndexSlotsUsed;\n\t\t\t\t\t\n\t\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and\n\t\t\t\t// publish the new block index front\n\t\t\t\tauto block = firstAllocatedBlock;\n\t\t\t\twhile (true) {\n\t\t\t\t\tblock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\tif (block == this->tailBlock) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tblock = block->next;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue, one block at a time\n\t\t\tindex_t newTailIndex = startTailIndex + static_cast<index_t>(count);\n\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\tauto endBlock = this->tailBlock;\n\t\t\tthis->tailBlock = startBlock;\n\t\t\tassert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);\n\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {\n\t\t\t\tthis->tailBlock = firstAllocatedBlock;\n\t\t\t}\n\t\t\twhile (true) {\n\t\t\t\tauto stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\tif (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {\n\t\t\t\t\tstopIndex = newTailIndex;\n\t\t\t\t}\n\t\t\t\tif (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t// Must use copy constructor even if move constructor is available\n\t\t\t\t\t\t\t// because we may have to revert if there's an exception.\n\t\t\t\t\t\t\t// Sorry about the horrible templated next line, but it was the only way\n\t\t\t\t\t\t\t// to disable moving *at compile time*, which is important because a type\n\t\t\t\t\t\t\t// may only define a (noexcept) move constructor, and so calls to the\n\t\t\t\t\t\t\t// cctor will not compile, even if they are in an if branch that will never\n\t\t\t\t\t\t\t// be executed\n\t\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));\n\t\t\t\t\t\t\t++currentTailIndex;\n\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t// Oh dear, an exception's been thrown -- destroy the elements that\n\t\t\t\t\t\t// were enqueued so far and revert the entire bulk operation (we'll keep\n\t\t\t\t\t\t// any allocated blocks in our linked list for later, though).\n\t\t\t\t\t\tauto constructedStopIndex = currentTailIndex;\n\t\t\t\t\t\tauto lastBlockEnqueued = this->tailBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (!details::is_trivially_destructible<T>::value) {\n\t\t\t\t\t\t\tauto block = startBlock;\n\t\t\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t\t\t\t\tblock = firstAllocatedBlock;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\t\tstopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\tif (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {\n\t\t\t\t\t\t\t\t\tstopIndex = constructedStopIndex;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t\t\t(*block)[currentTailIndex++]->~T();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (block == lastBlockEnqueued) {\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (this->tailBlock == endBlock) {\n\t\t\t\t\tassert(currentTailIndex == newTailIndex);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t}\n\t\t\t\n\t\t\tif (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst))) && firstAllocatedBlock != nullptr) {\n\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);\n\t\t\t}\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename It>\n\t\tsize_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tauto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));\n\t\t\tif (details::circular_less_than<size_t>(0, desiredCount)) {\n\t\t\t\tdesiredCount = desiredCount < max ? desiredCount : max;\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);;\n\t\t\t\t\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tauto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));\n\t\t\t\tif (details::circular_less_than<size_t>(0, actualCount)) {\n\t\t\t\t\tactualCount = desiredCount < actualCount ? desiredCount : actualCount;\n\t\t\t\t\tif (actualCount < desiredCount) {\n\t\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this\n\t\t\t\t\t// will never exceed tail.\n\t\t\t\t\tauto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the first element is in\n\t\t\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\t\t\tauto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);\n\t\t\t\t\t\n\t\t\t\t\tauto headBase = localBlockIndex->entries[localBlockIndexHead].base;\n\t\t\t\t\tauto firstBlockBaseIndex = firstIndex & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(firstBlockBaseIndex - headBase) / BLOCK_SIZE);\n\t\t\t\t\tauto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1);\n\t\t\t\t\t\n\t\t\t\t\t// Iterate the blocks and dequeue\n\t\t\t\t\tauto index = firstIndex;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tauto firstIndexInBlock = index;\n\t\t\t\t\t\tauto endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\tauto block = localBlockIndex->entries[indexIndex].block;\n\t\t\t\t\t\tif (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {\n\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t*itemFirst++ = std::move(el);\n\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t\t*itemFirst = std::move(el);\n\t\t\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t\t\t// It's too late to revert the dequeue, but we can make sure that all\n\t\t\t\t\t\t\t\t// the dequeued objects are properly destroyed and the block index\n\t\t\t\t\t\t\t\t// (and empty count) are properly updated before we propagate the exception\n\t\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t\tblock = localBlockIndex->entries[indexIndex].block;\n\t\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\t\t(*block)[index++]->~T();\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));\n\t\t\t\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tfirstIndexInBlock = index;\n\t\t\t\t\t\t\t\t\tendIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));\n\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);\n\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\n\t\t\t\t\treturn actualCount;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn 0;\n\t\t}\n\t\t\n\tprivate:\n\t\tstruct BlockIndexEntry\n\t\t{\n\t\t\tindex_t base;\n\t\t\tBlock* block;\n\t\t};\n\t\t\n\t\tstruct BlockIndexHeader\n\t\t{\n\t\t\tsize_t size;\n\t\t\tstd::atomic<size_t> front;\t\t// Current slot (not next, like pr_blockIndexFront)\n\t\t\tBlockIndexEntry* entries;\n\t\t\tvoid* prev;\n\t\t};\n\t\t\n\t\t\n\t\tbool new_block_index(size_t numberOfFilledSlotsToExpose)\n\t\t{\n\t\t\tauto prevBlockSizeMask = pr_blockIndexSize - 1;\n\t\t\t\n\t\t\t// Create the new block\n\t\t\tpr_blockIndexSize <<= 1;\n\t\t\tauto newRawPtr = static_cast<char*>((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize));\n\t\t\tif (newRawPtr == nullptr) {\n\t\t\t\tpr_blockIndexSize >>= 1;\t\t// Reset to allow graceful retry\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t\n\t\t\tauto newBlockIndexEntries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(newRawPtr + sizeof(BlockIndexHeader)));\n\t\t\t\n\t\t\t// Copy in all the old indices, if any\n\t\t\tsize_t j = 0;\n\t\t\tif (pr_blockIndexSlotsUsed != 0) {\n\t\t\t\tauto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask;\n\t\t\t\tdo {\n\t\t\t\t\tnewBlockIndexEntries[j++] = pr_blockIndexEntries[i];\n\t\t\t\t\ti = (i + 1) & prevBlockSizeMask;\n\t\t\t\t} while (i != pr_blockIndexFront);\n\t\t\t}\n\t\t\t\n\t\t\t// Update everything\n\t\t\tauto header = new (newRawPtr) BlockIndexHeader;\n\t\t\theader->size = pr_blockIndexSize;\n\t\t\theader->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed);\n\t\t\theader->entries = newBlockIndexEntries;\n\t\t\theader->prev = pr_blockIndexRaw;\t\t// we link the new block to the old one so we can free it later\n\t\t\t\n\t\t\tpr_blockIndexFront = j;\n\t\t\tpr_blockIndexEntries = newBlockIndexEntries;\n\t\t\tpr_blockIndexRaw = newRawPtr;\n\t\t\tblockIndex.store(header, std::memory_order_release);\n\t\t\t\n\t\t\treturn true;\n\t\t}\n\t\t\n\tprivate:\n\t\tstd::atomic<BlockIndexHeader*> blockIndex;\n\t\t\n\t\t// To be used by producer only -- consumer must use the ones in referenced by blockIndex\n\t\tsize_t pr_blockIndexSlotsUsed;\n\t\tsize_t pr_blockIndexSize;\n\t\tsize_t pr_blockIndexFront;\t\t// Next slot (not current)\n\t\tBlockIndexEntry* pr_blockIndexEntries;\n\t\tvoid* pr_blockIndexRaw;\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tpublic:\n\t\tExplicitProducer* nextExplicitProducer;\n\tprivate:\n#endif\n\t\t\n#if MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t//////////////////////////////////\n\t// Implicit queue\n\t//////////////////////////////////\n\t\n\tstruct ImplicitProducer : public ProducerBase\n\t{\t\t\t\n\t\tImplicitProducer(ConcurrentQueue* parent) :\n\t\t\tProducerBase(parent, false),\n\t\t\tnextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE),\n\t\t\tblockIndex(nullptr)\n\t\t{\n\t\t\tnew_block_index();\n\t\t}\n\t\t\n\t\t~ImplicitProducer()\n\t\t{\n\t\t\t// Note that since we're in the destructor we can assume that all enqueue/dequeue operations\n\t\t\t// completed already; this means that all undequeued elements are placed contiguously across\n\t\t\t// contiguous blocks, and that only the first and last remaining blocks can be only partially\n\t\t\t// empty (all other remaining blocks must be completely full).\n\t\t\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t// Unregister ourselves for thread termination notification\n\t\t\tif (!this->inactive.load(std::memory_order_relaxed)) {\n\t\t\t\tdetails::ThreadExitNotifier::unsubscribe(&threadExitListener);\n\t\t\t}\n#endif\n\t\t\t\n\t\t\t// Destroy all remaining elements!\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto index = this->headIndex.load(std::memory_order_relaxed);\n\t\t\tBlock* block = nullptr;\n\t\t\tassert(index == tail || details::circular_less_than(index, tail));\n\t\t\tbool forceFreeLastBlock = index != tail;\t\t// If we enter the loop, then the last (tail) block will not be freed\n\t\t\twhile (index != tail) {\n\t\t\t\tif ((index & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 || block == nullptr) {\n\t\t\t\t\tif (block != nullptr) {\n\t\t\t\t\t\t// Free the old block\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tblock = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t((*block)[index])->~T();\n\t\t\t\t++index;\n\t\t\t}\n\t\t\t// Even if the queue is empty, there's still one block that's not on the free list\n\t\t\t// (unless the head index reached the end of it, in which case the tail will be poised\n\t\t\t// to create a new block).\n\t\t\tif (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast<index_t>(BLOCK_SIZE - 1)) != 0)) {\n\t\t\t\tthis->parent->add_block_to_free_list(this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy block index\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tif (localBlockIndex != nullptr) {\n\t\t\t\tfor (size_t i = 0; i != localBlockIndex->capacity; ++i) {\n\t\t\t\t\tlocalBlockIndex->index[i]->~BlockIndexEntry();\n\t\t\t\t}\n\t\t\t\tdo {\n\t\t\t\t\tauto prev = localBlockIndex->prev;\n\t\t\t\t\tlocalBlockIndex->~BlockIndexHeader();\n\t\t\t\t\t(Traits::free)(localBlockIndex);\n\t\t\t\t\tlocalBlockIndex = prev;\n\t\t\t\t} while (localBlockIndex != nullptr);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename U>\n\t\tinline bool enqueue(U&& element)\n\t\t{\n\t\t\tindex_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t newTailIndex = 1 + currentTailIndex;\n\t\t\tif ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t// We reached the end of a block, start a new one\n\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\tif (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t// Find out where we'll be inserting this block in the block index\n\t\t\t\tBlockIndexEntry* idxEntry;\n\t\t\t\tif (!insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Get ahold of a new block\n\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\treturn false;\n\t\t\t\t}\n#if MCDBGQ_TRACKMEM\n\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();\n\t\t\t\t\n\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward<U>(element)))) {\n\t\t\t\t\t// May throw, try to insert now before we publish the fact that we have this new block\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\tnew ((*newBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(newBlock);\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Insert the new block into the index\n\t\t\t\tidxEntry->value.store(newBlock, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\n\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward<U>(element)))) {\n\t\t\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue\n\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename U>\n\t\tbool dequeue(U& element)\n\t\t{\n\t\t\t// See ExplicitProducer::dequeue for rationale and explanation\n\t\t\tindex_t tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tif (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tindex_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tif ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {\n\t\t\t\t\tindex_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the element is in\n\t\t\t\t\tauto entry = get_block_index_entry_for_index(index);\n\t\t\t\t\t\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tauto block = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\n\t\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t// Note: Acquiring the mutex with every dequeue instead of only when a block\n\t\t\t\t\t\t// is released is very sub-optimal, but it is, after all, purely debug code.\n\t\t\t\t\t\tdebug::DebugLock lock(producer->mutex);\n#endif\n\t\t\t\t\t\tstruct Guard {\n\t\t\t\t\t\t\tBlock* block;\n\t\t\t\t\t\t\tindex_t index;\n\t\t\t\t\t\t\tBlockIndexEntry* entry;\n\t\t\t\t\t\t\tConcurrentQueue* parent;\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t~Guard()\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t(*block)[index]->~T();\n\t\t\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {\n\t\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\tparent->add_block_to_free_list(block);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} guard = { block, index, entry, this->parent };\n\t\t\t\t\t\t\n\t\t\t\t\t\telement = std::move(el);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\telement = std::move(el);\n\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\n\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {\n\t\t\t\t\t\t\t{\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t// Add the block back into the global free pool (and remove from block index)\n\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\t\t// releases the above store\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(1, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\n\t\t\treturn false;\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename It>\n\t\tbool enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\t// First, we need to make sure we have enough room to enqueue all of the elements;\n\t\t\t// this means pre-allocating blocks and putting them in the block index (but only if\n\t\t\t// all the allocations succeeded).\n\t\t\t\n\t\t\t// Note that the tailBlock we start off with may not be owned by us any more;\n\t\t\t// this happens if it was filled up exactly to the top (setting tailIndex to\n\t\t\t// the first index of the next block which is not yet allocated), then dequeued\n\t\t\t// completely (putting it on the free list) before we enqueue again.\n\t\t\t\n\t\t\tindex_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto startBlock = this->tailBlock;\n\t\t\tBlock* firstAllocatedBlock = nullptr;\n\t\t\tauto endBlock = this->tailBlock;\n\t\t\t\n\t\t\t// Figure out how many blocks we'll need to allocate, and do so\n\t\t\tsize_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\tindex_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tif (blockBaseDiff > 0) {\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\tdo {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\t// Find out where we'll be inserting this block in the block index\n\t\t\t\t\tBlockIndexEntry* idxEntry = nullptr;  // initialization here unnecessary but compiler can't always tell\n\t\t\t\t\tBlock* newBlock;\n\t\t\t\t\tbool indexInserted = false;\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tbool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));\n\t\t\t\t\tif (full || !(indexInserted = insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>()) == nullptr) {\n\t\t\t\t\t\t// Index allocation or block allocation failed; revert any other allocations\n\t\t\t\t\t\t// and index insertions done so far for this operation\n\t\t\t\t\t\tif (indexInserted) {\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcurrentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\t\tfor (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {\n\t\t\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\tidxEntry = get_block_index_entry_for_index(currentTailIndex);\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthis->parent->add_blocks_to_free_list(firstAllocatedBlock);\n\t\t\t\t\t\tthis->tailBlock = startBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t\n#if MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();\n\t\t\t\t\tnewBlock->next = nullptr;\n\t\t\t\t\t\n\t\t\t\t\t// Insert the new block into the index\n\t\t\t\t\tidxEntry->value.store(newBlock, std::memory_order_relaxed);\n\t\t\t\t\t\n\t\t\t\t\t// Store the chain of blocks so that we can undo if later allocations fail,\n\t\t\t\t\t// and so that we can find the blocks when we do the actual enqueueing\n\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) {\n\t\t\t\t\t\tassert(this->tailBlock != nullptr);\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\tendBlock = newBlock;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock;\n\t\t\t\t} while (blockBaseDiff > 0);\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue, one block at a time\n\t\t\tindex_t newTailIndex = startTailIndex + static_cast<index_t>(count);\n\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\tthis->tailBlock = startBlock;\n\t\t\tassert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);\n\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {\n\t\t\t\tthis->tailBlock = firstAllocatedBlock;\n\t\t\t}\n\t\t\twhile (true) {\n\t\t\t\tauto stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\tif (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {\n\t\t\t\t\tstopIndex = newTailIndex;\n\t\t\t\t}\n\t\t\t\tif (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));\n\t\t\t\t\t\t\t++currentTailIndex;\n\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\tauto constructedStopIndex = currentTailIndex;\n\t\t\t\t\t\tauto lastBlockEnqueued = this->tailBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (!details::is_trivially_destructible<T>::value) {\n\t\t\t\t\t\t\tauto block = startBlock;\n\t\t\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t\t\t\t\tblock = firstAllocatedBlock;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\t\tstopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\tif (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {\n\t\t\t\t\t\t\t\t\tstopIndex = constructedStopIndex;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t\t\t(*block)[currentTailIndex++]->~T();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (block == lastBlockEnqueued) {\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tcurrentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\t\tfor (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {\n\t\t\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\tauto idxEntry = get_block_index_entry_for_index(currentTailIndex);\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthis->parent->add_blocks_to_free_list(firstAllocatedBlock);\n\t\t\t\t\t\tthis->tailBlock = startBlock;\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (this->tailBlock == endBlock) {\n\t\t\t\t\tassert(currentTailIndex == newTailIndex);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t}\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename It>\n\t\tsize_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tauto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));\n\t\t\tif (details::circular_less_than<size_t>(0, desiredCount)) {\n\t\t\t\tdesiredCount = desiredCount < max ? desiredCount : max;\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tauto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));\n\t\t\t\tif (details::circular_less_than<size_t>(0, actualCount)) {\n\t\t\t\t\tactualCount = desiredCount < actualCount ? desiredCount : actualCount;\n\t\t\t\t\tif (actualCount < desiredCount) {\n\t\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this\n\t\t\t\t\t// will never exceed tail.\n\t\t\t\t\tauto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Iterate the blocks and dequeue\n\t\t\t\t\tauto index = firstIndex;\n\t\t\t\t\tBlockIndexHeader* localBlockIndex;\n\t\t\t\t\tauto indexIndex = get_block_index_index_for_index(index, localBlockIndex);\n\t\t\t\t\tdo {\n\t\t\t\t\t\tauto blockStartIndex = index;\n\t\t\t\t\t\tauto endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\n\t\t\t\t\t\tauto entry = localBlockIndex->index[indexIndex];\n\t\t\t\t\t\tauto block = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\t\tif (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {\n\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t*itemFirst++ = std::move(el);\n\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t\t*itemFirst = std::move(el);\n\t\t\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t\tentry = localBlockIndex->index[indexIndex];\n\t\t\t\t\t\t\t\t\tblock = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\t\t(*block)[index++]->~T();\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tblockStartIndex = index;\n\t\t\t\t\t\t\t\t\tendIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {\n\t\t\t\t\t\t\t{\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t// Note that the set_many_empty above did a release, meaning that anybody who acquires the block\n\t\t\t\t\t\t\t\t// we're about to free can use it safely since our writes (and reads!) will have happened-before then.\n\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\t\t// releases the above store\n\t\t\t\t\t\t}\n\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);\n\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\n\t\t\t\t\treturn actualCount;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn 0;\n\t\t}\n\t\t\n\tprivate:\n\t\t// The block size must be > 1, so any number with the low bit set is an invalid block base index\n\t\tstatic const index_t INVALID_BLOCK_BASE = 1;\n\t\t\n\t\tstruct BlockIndexEntry\n\t\t{\n\t\t\tstd::atomic<index_t> key;\n\t\t\tstd::atomic<Block*> value;\n\t\t};\n\t\t\n\t\tstruct BlockIndexHeader\n\t\t{\n\t\t\tsize_t capacity;\n\t\t\tstd::atomic<size_t> tail;\n\t\t\tBlockIndexEntry* entries;\n\t\t\tBlockIndexEntry** index;\n\t\t\tBlockIndexHeader* prev;\n\t\t};\n\t\t\n\t\ttemplate<AllocationMode allocMode>\n\t\tinline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex)\n\t\t{\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\t\t// We're the only writer thread, relaxed is OK\n\t\t\tif (localBlockIndex == nullptr) {\n\t\t\t\treturn false;  // this can happen if new_block_index failed in the constructor\n\t\t\t}\n\t\t\tauto newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);\n\t\t\tidxEntry = localBlockIndex->index[newTail];\n\t\t\tif (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE ||\n\t\t\t\tidxEntry->value.load(std::memory_order_relaxed) == nullptr) {\n\t\t\t\t\n\t\t\t\tidxEntry->key.store(blockStartIndex, std::memory_order_relaxed);\n\t\t\t\tlocalBlockIndex->tail.store(newTail, std::memory_order_release);\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\t\n\t\t\t// No room in the old block index, try to allocate another one!\n\t\t\tif (allocMode == CannotAlloc || !new_block_index()) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\tlocalBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tnewTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);\n\t\t\tidxEntry = localBlockIndex->index[newTail];\n\t\t\tassert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE);\n\t\t\tidxEntry->key.store(blockStartIndex, std::memory_order_relaxed);\n\t\t\tlocalBlockIndex->tail.store(newTail, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\tinline void rewind_block_index_tail()\n\t\t{\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tlocalBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\tinline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const\n\t\t{\n\t\t\tBlockIndexHeader* localBlockIndex;\n\t\t\tauto idx = get_block_index_index_for_index(index, localBlockIndex);\n\t\t\treturn localBlockIndex->index[idx];\n\t\t}\n\t\t\n\t\tinline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const\n\t\t{\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\tindex &= ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tlocalBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\tauto tail = localBlockIndex->tail.load(std::memory_order_acquire);\n\t\t\tauto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed);\n\t\t\tassert(tailBase != INVALID_BLOCK_BASE);\n\t\t\t// Note: Must use division instead of shift because the index may wrap around, causing a negative\n\t\t\t// offset, whose negativity we want to preserve\n\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(index - tailBase) / BLOCK_SIZE);\n\t\t\tsize_t idx = (tail + offset) & (localBlockIndex->capacity - 1);\n\t\t\tassert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr);\n\t\t\treturn idx;\n\t\t}\n\t\t\n\t\tbool new_block_index()\n\t\t{\n\t\t\tauto prev = blockIndex.load(std::memory_order_relaxed);\n\t\t\tsize_t prevCapacity = prev == nullptr ? 0 : prev->capacity;\n\t\t\tauto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity;\n\t\t\tauto raw = static_cast<char*>((Traits::malloc)(\n\t\t\t\tsizeof(BlockIndexHeader) +\n\t\t\t\tstd::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * entryCount +\n\t\t\t\tstd::alignment_of<BlockIndexEntry*>::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity));\n\t\t\tif (raw == nullptr) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t\n\t\t\tauto header = new (raw) BlockIndexHeader;\n\t\t\tauto entries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(raw + sizeof(BlockIndexHeader)));\n\t\t\tauto index = reinterpret_cast<BlockIndexEntry**>(details::align_for<BlockIndexEntry*>(reinterpret_cast<char*>(entries) + sizeof(BlockIndexEntry) * entryCount));\n\t\t\tif (prev != nullptr) {\n\t\t\t\tauto prevTail = prev->tail.load(std::memory_order_relaxed);\n\t\t\t\tauto prevPos = prevTail;\n\t\t\t\tsize_t i = 0;\n\t\t\t\tdo {\n\t\t\t\t\tprevPos = (prevPos + 1) & (prev->capacity - 1);\n\t\t\t\t\tindex[i++] = prev->index[prevPos];\n\t\t\t\t} while (prevPos != prevTail);\n\t\t\t\tassert(i == prevCapacity);\n\t\t\t}\n\t\t\tfor (size_t i = 0; i != entryCount; ++i) {\n\t\t\t\tnew (entries + i) BlockIndexEntry;\n\t\t\t\tentries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed);\n\t\t\t\tindex[prevCapacity + i] = entries + i;\n\t\t\t}\n\t\t\theader->prev = prev;\n\t\t\theader->entries = entries;\n\t\t\theader->index = index;\n\t\t\theader->capacity = nextBlockIndexCapacity;\n\t\t\theader->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed);\n\t\t\t\n\t\t\tblockIndex.store(header, std::memory_order_release);\n\t\t\t\n\t\t\tnextBlockIndexCapacity <<= 1;\n\t\t\t\n\t\t\treturn true;\n\t\t}\n\t\t\n\tprivate:\n\t\tsize_t nextBlockIndexCapacity;\n\t\tstd::atomic<BlockIndexHeader*> blockIndex;\n\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\tpublic:\n\t\tdetails::ThreadExitListener threadExitListener;\n\tprivate:\n#endif\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tpublic:\n\t\tImplicitProducer* nextImplicitProducer;\n\tprivate:\n#endif\n\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\tmutable debug::DebugMutex mutex;\n#endif\n#if MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t//////////////////////////////////\n\t// Block pool manipulation\n\t//////////////////////////////////\n\t\n\tvoid populate_initial_block_list(size_t blockCount)\n\t{\n\t\tinitialBlockPoolSize = blockCount;\n\t\tif (initialBlockPoolSize == 0) {\n\t\t\tinitialBlockPool = nullptr;\n\t\t\treturn;\n\t\t}\n\t\t\n\t\tinitialBlockPool = create_array<Block>(blockCount);\n\t\tif (initialBlockPool == nullptr) {\n\t\t\tinitialBlockPoolSize = 0;\n\t\t}\n\t\tfor (size_t i = 0; i < initialBlockPoolSize; ++i) {\n\t\t\tinitialBlockPool[i].dynamicallyAllocated = false;\n\t\t}\n\t}\n\t\n\tinline Block* try_get_block_from_initial_pool()\n\t{\n\t\tif (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) {\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\tauto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed);\n\t\t\n\t\treturn index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr;\n\t}\n\t\n\tinline void add_block_to_free_list(Block* block)\n\t{\n#if MCDBGQ_TRACKMEM\n\t\tblock->owner = nullptr;\n#endif\n\t\tfreeList.add(block);\n\t}\n\t\n\tinline void add_blocks_to_free_list(Block* block)\n\t{\n\t\twhile (block != nullptr) {\n\t\t\tauto next = block->next;\n\t\t\tadd_block_to_free_list(block);\n\t\t\tblock = next;\n\t\t}\n\t}\n\t\n\tinline Block* try_get_block_from_free_list()\n\t{\n\t\treturn freeList.try_get();\n\t}\n\t\n\t// Gets a free block from one of the memory pools, or allocates a new one (if applicable)\n\ttemplate<AllocationMode canAlloc>\n\tBlock* requisition_block()\n\t{\n\t\tauto block = try_get_block_from_initial_pool();\n\t\tif (block != nullptr) {\n\t\t\treturn block;\n\t\t}\n\t\t\n\t\tblock = try_get_block_from_free_list();\n\t\tif (block != nullptr) {\n\t\t\treturn block;\n\t\t}\n\t\t\n\t\tif (canAlloc == CanAlloc) {\n\t\t\treturn create<Block>();\n\t\t}\n\t\t\n\t\treturn nullptr;\n\t}\n\t\n\n#if MCDBGQ_TRACKMEM\n\tpublic:\n\t\tstruct MemStats {\n\t\t\tsize_t allocatedBlocks;\n\t\t\tsize_t usedBlocks;\n\t\t\tsize_t freeBlocks;\n\t\t\tsize_t ownedBlocksExplicit;\n\t\t\tsize_t ownedBlocksImplicit;\n\t\t\tsize_t implicitProducers;\n\t\t\tsize_t explicitProducers;\n\t\t\tsize_t elementsEnqueued;\n\t\t\tsize_t blockClassBytes;\n\t\t\tsize_t queueClassBytes;\n\t\t\tsize_t implicitBlockIndexBytes;\n\t\t\tsize_t explicitBlockIndexBytes;\n\t\t\t\n\t\t\tfriend class ConcurrentQueue;\n\t\t\t\n\t\tprivate:\n\t\t\tstatic MemStats getFor(ConcurrentQueue* q)\n\t\t\t{\n\t\t\t\tMemStats stats = { 0 };\n\t\t\t\t\n\t\t\t\tstats.elementsEnqueued = q->size_approx();\n\t\t\t\n\t\t\t\tauto block = q->freeList.head_unsafe();\n\t\t\t\twhile (block != nullptr) {\n\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t++stats.freeBlocks;\n\t\t\t\t\tblock = block->freeListNext.load(std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\t\t\tbool implicit = dynamic_cast<ImplicitProducer*>(ptr) != nullptr;\n\t\t\t\t\tstats.implicitProducers += implicit ? 1 : 0;\n\t\t\t\t\tstats.explicitProducers += implicit ? 0 : 1;\n\t\t\t\t\t\n\t\t\t\t\tif (implicit) {\n\t\t\t\t\t\tauto prod = static_cast<ImplicitProducer*>(ptr);\n\t\t\t\t\t\tstats.queueClassBytes += sizeof(ImplicitProducer);\n\t\t\t\t\t\tauto head = prod->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tauto tail = prod->tailIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tauto hash = prod->blockIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tif (hash != nullptr) {\n\t\t\t\t\t\t\tfor (size_t i = 0; i != hash->capacity; ++i) {\n\t\t\t\t\t\t\t\tif (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) {\n\t\t\t\t\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t\t\t\t\t++stats.ownedBlocksImplicit;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry);\n\t\t\t\t\t\t\tfor (; hash != nullptr; hash = hash->prev) {\n\t\t\t\t\t\t\t\tstats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor (; details::circular_less_than<index_t>(head, tail); head += BLOCK_SIZE) {\n\t\t\t\t\t\t\t//auto block = prod->get_block_index_entry_for_index(head);\n\t\t\t\t\t\t\t++stats.usedBlocks;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tauto prod = static_cast<ExplicitProducer*>(ptr);\n\t\t\t\t\t\tstats.queueClassBytes += sizeof(ExplicitProducer);\n\t\t\t\t\t\tauto tailBlock = prod->tailBlock;\n\t\t\t\t\t\tbool wasNonEmpty = false;\n\t\t\t\t\t\tif (tailBlock != nullptr) {\n\t\t\t\t\t\t\tauto block = tailBlock;\n\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t\t\t\tif (!block->ConcurrentQueue::Block::template is_empty<explicit_context>() || wasNonEmpty) {\n\t\t\t\t\t\t\t\t\t++stats.usedBlocks;\n\t\t\t\t\t\t\t\t\twasNonEmpty = wasNonEmpty || block != tailBlock;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t++stats.ownedBlocksExplicit;\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t} while (block != tailBlock);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tauto index = prod->blockIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\twhile (index != nullptr) {\n\t\t\t\t\t\t\tstats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry);\n\t\t\t\t\t\t\tindex = static_cast<typename ExplicitProducer::BlockIndexHeader*>(index->prev);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tauto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed);\n\t\t\t\tstats.allocatedBlocks += freeOnInitialPool;\n\t\t\t\tstats.freeBlocks += freeOnInitialPool;\n\t\t\t\t\n\t\t\t\tstats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks;\n\t\t\t\tstats.queueClassBytes += sizeof(ConcurrentQueue);\n\t\t\t\t\n\t\t\t\treturn stats;\n\t\t\t}\n\t\t};\n\t\t\n\t\t// For debugging only. Not thread-safe.\n\t\tMemStats getMemStats()\n\t\t{\n\t\t\treturn MemStats::getFor(this);\n\t\t}\n\tprivate:\n\t\tfriend struct MemStats;\n#endif\n\t\n\t\n\t//////////////////////////////////\n\t// Producer list manipulation\n\t//////////////////////////////////\t\n\t\n\tProducerBase* recycle_or_create_producer(bool isExplicit)\n\t{\n\t\tbool recycled;\n\t\treturn recycle_or_create_producer(isExplicit, recycled);\n\t}\n\t\n\tProducerBase* recycle_or_create_producer(bool isExplicit, bool& recycled)\n\t{\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\t// Try to re-use one first\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tif (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) {\n\t\t\t\tbool expected = true;\n\t\t\t\tif (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\t// We caught one! It's been marked as activated, the caller can have it\n\t\t\t\t\trecycled = true;\n\t\t\t\t\treturn ptr;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\trecycled = false;\n\t\treturn add_producer(isExplicit ? static_cast<ProducerBase*>(create<ExplicitProducer>(this)) : create<ImplicitProducer>(this));\n\t}\n\t\n\tProducerBase* add_producer(ProducerBase* producer)\n\t{\n\t\t// Handle failed memory allocation\n\t\tif (producer == nullptr) {\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\tproducerCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\n\t\t// Add it to the lock-free list\n\t\tauto prevTail = producerListTail.load(std::memory_order_relaxed);\n\t\tdo {\n\t\t\tproducer->next = prevTail;\n\t\t} while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed));\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\tif (producer->isExplicit) {\n\t\t\tauto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed);\n\t\t\tdo {\n\t\t\t\tstatic_cast<ExplicitProducer*>(producer)->nextExplicitProducer = prevTailExplicit;\n\t\t\t} while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast<ExplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));\n\t\t}\n\t\telse {\n\t\t\tauto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed);\n\t\t\tdo {\n\t\t\t\tstatic_cast<ImplicitProducer*>(producer)->nextImplicitProducer = prevTailImplicit;\n\t\t\t} while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast<ImplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));\n\t\t}\n#endif\n\t\t\n\t\treturn producer;\n\t}\n\t\n\tvoid reown_producers()\n\t{\n\t\t// After another instance is moved-into/swapped-with this one, all the\n\t\t// producers we stole still think their parents are the other queue.\n\t\t// So fix them up!\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tptr->parent = this;\n\t\t}\n\t}\n\t\n\t\n\t//////////////////////////////////\n\t// Implicit producer hash\n\t//////////////////////////////////\n\t\n\tstruct ImplicitProducerKVP\n\t{\n\t\tstd::atomic<details::thread_id_t> key;\n\t\tImplicitProducer* value;\t\t// No need for atomicity since it's only read by the thread that sets it in the first place\n\t\t\n\t\tImplicitProducerKVP() : value(nullptr) { }\n\t\t\n\t\tImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tkey.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\t\tvalue = other.value;\n\t\t}\n\t\t\n\t\tinline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tswap(other);\n\t\t\treturn *this;\n\t\t}\n\t\t\n\t\tinline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tif (this != &other) {\n\t\t\t\tdetails::swap_relaxed(key, other.key);\n\t\t\t\tstd::swap(value, other.value);\n\t\t\t}\n\t\t}\n\t};\n\t\n\ttemplate<typename XT, typename XTraits>\n\tfriend void moodycamel::swap(typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&, typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT;\n\t\n\tstruct ImplicitProducerHash\n\t{\n\t\tsize_t capacity;\n\t\tImplicitProducerKVP* entries;\n\t\tImplicitProducerHash* prev;\n\t};\n\t\n\tinline void populate_initial_implicit_producer_hash()\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return;\n\t\t\n\t\timplicitProducerHashCount.store(0, std::memory_order_relaxed);\n\t\tauto hash = &initialImplicitProducerHash;\n\t\thash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE;\n\t\thash->entries = &initialImplicitProducerHashEntries[0];\n\t\tfor (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) {\n\t\t\tinitialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t}\n\t\thash->prev = nullptr;\n\t\timplicitProducerHash.store(hash, std::memory_order_relaxed);\n\t}\n\t\n\tvoid swap_implicit_producer_hashes(ConcurrentQueue& other)\n\t{\n\t\tif (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return;\n\t\t\n\t\t// Swap (assumes our implicit producer hash is initialized)\n\t\tinitialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries);\n\t\tinitialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0];\n\t\tother.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0];\n\t\t\n\t\tdetails::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount);\n\t\t\n\t\tdetails::swap_relaxed(implicitProducerHash, other.implicitProducerHash);\n\t\tif (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) {\n\t\t\timplicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed);\n\t\t}\n\t\telse {\n\t\t\tImplicitProducerHash* hash;\n\t\t\tfor (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\thash->prev = &initialImplicitProducerHash;\n\t\t}\n\t\tif (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) {\n\t\t\tother.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed);\n\t\t}\n\t\telse {\n\t\t\tImplicitProducerHash* hash;\n\t\t\tfor (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\thash->prev = &other.initialImplicitProducerHash;\n\t\t}\n\t}\n\t\n\t// Only fails (returns nullptr) if memory allocation fails\n\tImplicitProducer* get_or_add_implicit_producer()\n\t{\n\t\t// Note that since the data is essentially thread-local (key is thread ID),\n\t\t// there's a reduced need for fences (memory ordering is already consistent\n\t\t// for any individual thread), except for the current table itself.\n\t\t\n\t\t// Start by looking for the thread ID in the current and all previous hash tables.\n\t\t// If it's not found, it must not be in there yet, since this same thread would\n\t\t// have added it previously to one of the tables that we traversed.\n\t\t\n\t\t// Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table\n\t\t\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\t\n\t\tauto id = details::thread_id();\n\t\tauto hashedId = details::hash_thread_id(id);\n\t\t\n\t\tauto mainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\tfor (auto hash = mainHash; hash != nullptr; hash = hash->prev) {\n\t\t\t// Look for the id in this hash\n\t\t\tauto index = hashedId;\n\t\t\twhile (true) {\t\t// Not an infinite loop because at least one slot is free in the hash table\n\t\t\t\tindex &= hash->capacity - 1;\n\t\t\t\t\n\t\t\t\tauto probedKey = hash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\tif (probedKey == id) {\n\t\t\t\t\t// Found it! If we had to search several hashes deep, though, we should lazily add it\n\t\t\t\t\t// to the current main hash table to avoid the extended search next time.\n\t\t\t\t\t// Note there's guaranteed to be room in the current hash table since every subsequent\n\t\t\t\t\t// table implicitly reserves space for all previous tables (there's only one\n\t\t\t\t\t// implicitProducerHashCount).\n\t\t\t\t\tauto value = hash->entries[index].value;\n\t\t\t\t\tif (hash != mainHash) {\n\t\t\t\t\t\tindex = hashedId;\n\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\t\t\tprobedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\t\t\tauto empty = details::invalid_thread_id;\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\t\t\t\tauto reusable = details::invalid_thread_id2;\n\t\t\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed)) ||\n\t\t\t\t\t\t\t\t(probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) {\n#else\n\t\t\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed))) {\n#endif\n\t\t\t\t\t\t\t\tmainHash->entries[index].value = value;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn value;\n\t\t\t\t}\n\t\t\t\tif (probedKey == details::invalid_thread_id) {\n\t\t\t\t\tbreak;\t\t// Not in this hash table\n\t\t\t\t}\n\t\t\t\t++index;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Insert!\n\t\tauto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed);\n\t\twhile (true) {\n\t\t\tif (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) {\n\t\t\t\t// We've acquired the resize lock, try to allocate a bigger hash table.\n\t\t\t\t// Note the acquire fence synchronizes with the release fence at the end of this block, and hence when\n\t\t\t\t// we reload implicitProducerHash it must be the most recent version (it only gets changed within this\n\t\t\t\t// locked block).\n\t\t\t\tmainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\t\t\tif (newCount >= (mainHash->capacity >> 1)) {\n\t\t\t\t\tauto newCapacity = mainHash->capacity << 1;\n\t\t\t\t\twhile (newCount >= (newCapacity >> 1)) {\n\t\t\t\t\t\tnewCapacity <<= 1;\n\t\t\t\t\t}\n\t\t\t\t\tauto raw = static_cast<char*>((Traits::malloc)(sizeof(ImplicitProducerHash) + std::alignment_of<ImplicitProducerKVP>::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity));\n\t\t\t\t\tif (raw == nullptr) {\n\t\t\t\t\t\t// Allocation failed\n\t\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\t\t\t\t\treturn nullptr;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tauto newHash = new (raw) ImplicitProducerHash;\n\t\t\t\t\tnewHash->capacity = newCapacity;\n\t\t\t\t\tnewHash->entries = reinterpret_cast<ImplicitProducerKVP*>(details::align_for<ImplicitProducerKVP>(raw + sizeof(ImplicitProducerHash)));\n\t\t\t\t\tfor (size_t i = 0; i != newCapacity; ++i) {\n\t\t\t\t\t\tnew (newHash->entries + i) ImplicitProducerKVP;\n\t\t\t\t\t\tnewHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t\t\t\t}\n\t\t\t\t\tnewHash->prev = mainHash;\n\t\t\t\t\timplicitProducerHash.store(newHash, std::memory_order_release);\n\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_release);\n\t\t\t\t\tmainHash = newHash;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table\n\t\t\t// to finish being allocated by another thread (and if we just finished allocating above, the condition will\n\t\t\t// always be true)\n\t\t\tif (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) {\n\t\t\t\tbool recycled;\n\t\t\t\tauto producer = static_cast<ImplicitProducer*>(recycle_or_create_producer(false, recycled));\n\t\t\t\tif (producer == nullptr) {\n\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t\treturn nullptr;\n\t\t\t\t}\n\t\t\t\tif (recycled) {\n\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\tproducer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback;\n\t\t\t\tproducer->threadExitListener.userData = producer;\n\t\t\t\tdetails::ThreadExitNotifier::subscribe(&producer->threadExitListener);\n#endif\n\t\t\t\t\n\t\t\t\tauto index = hashedId;\n\t\t\t\twhile (true) {\n\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\tauto probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\t\n\t\t\t\t\tauto empty = details::invalid_thread_id;\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\t\tauto reusable = details::invalid_thread_id2;\n\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed)) ||\n\t\t\t\t\t\t(probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) {\n#else\n\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed))) {\n#endif\n\t\t\t\t\t\tmainHash->entries[index].value = producer;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t++index;\n\t\t\t\t}\n\t\t\t\treturn producer;\n\t\t\t}\n\t\t\t\n\t\t\t// Hmm, the old hash is quite full and somebody else is busy allocating a new one.\n\t\t\t// We need to wait for the allocating thread to finish (if it succeeds, we add, if not,\n\t\t\t// we try to allocate ourselves).\n\t\t\tmainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\t}\n\t}\n\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\tvoid implicit_producer_thread_exited(ImplicitProducer* producer)\n\t{\n\t\t// Remove from thread exit listeners\n\t\tdetails::ThreadExitNotifier::unsubscribe(&producer->threadExitListener);\n\t\t\n\t\t// Remove from hash\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\tauto hash = implicitProducerHash.load(std::memory_order_acquire);\n\t\tassert(hash != nullptr);\t\t// The thread exit listener is only registered if we were added to a hash in the first place\n\t\tauto id = details::thread_id();\n\t\tauto hashedId = details::hash_thread_id(id);\n\t\tdetails::thread_id_t probedKey;\n\t\t\n\t\t// We need to traverse all the hashes just in case other threads aren't on the current one yet and are\n\t\t// trying to add an entry thinking there's a free slot (because they reused a producer)\n\t\tfor (; hash != nullptr; hash = hash->prev) {\n\t\t\tauto index = hashedId;\n\t\t\tdo {\n\t\t\t\tindex &= hash->capacity - 1;\n\t\t\t\tprobedKey = hash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\tif (probedKey == id) {\n\t\t\t\t\thash->entries[index].key.store(details::invalid_thread_id2, std::memory_order_release);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t++index;\n\t\t\t} while (probedKey != details::invalid_thread_id);\t\t// Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place\n\t\t}\n\t\t\n\t\t// Mark the queue as being recyclable\n\t\tproducer->inactive.store(true, std::memory_order_release);\n\t}\n\t\n\tstatic void implicit_producer_thread_exited_callback(void* userData)\n\t{\n\t\tauto producer = static_cast<ImplicitProducer*>(userData);\n\t\tauto queue = producer->parent;\n\t\tqueue->implicit_producer_thread_exited(producer);\n\t}\n#endif\n\t\n\t//////////////////////////////////\n\t// Utility functions\n\t//////////////////////////////////\n\t\n\ttemplate<typename U>\n\tstatic inline U* create_array(size_t count)\n\t{\n\t\tassert(count > 0);\n\t\tauto p = static_cast<U*>((Traits::malloc)(sizeof(U) * count));\n\t\tif (p == nullptr) {\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\tfor (size_t i = 0; i != count; ++i) {\n\t\t\tnew (p + i) U();\n\t\t}\n\t\treturn p;\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline void destroy_array(U* p, size_t count)\n\t{\n\t\tif (p != nullptr) {\n\t\t\tassert(count > 0);\n\t\t\tfor (size_t i = count; i != 0; ) {\n\t\t\t\t(p + --i)->~U();\n\t\t\t}\n\t\t\t(Traits::free)(p);\n\t\t}\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline U* create()\n\t{\n\t\tauto p = (Traits::malloc)(sizeof(U));\n\t\treturn p != nullptr ? new (p) U : nullptr;\n\t}\n\t\n\ttemplate<typename U, typename A1>\n\tstatic inline U* create(A1&& a1)\n\t{\n\t\tauto p = (Traits::malloc)(sizeof(U));\n\t\treturn p != nullptr ? new (p) U(std::forward<A1>(a1)) : nullptr;\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline void destroy(U* p)\n\t{\n\t\tif (p != nullptr) {\n\t\t\tp->~U();\n\t\t}\n\t\t(Traits::free)(p);\n\t}\n\nprivate:\n\tstd::atomic<ProducerBase*> producerListTail;\n\tstd::atomic<std::uint32_t> producerCount;\n\t\n\tstd::atomic<size_t> initialBlockPoolIndex;\n\tBlock* initialBlockPool;\n\tsize_t initialBlockPoolSize;\n\t\n#if !MCDBGQ_USEDEBUGFREELIST\n\tFreeList<Block> freeList;\n#else\n\tdebug::DebugFreeList<Block> freeList;\n#endif\n\t\n\tstd::atomic<ImplicitProducerHash*> implicitProducerHash;\n\tstd::atomic<size_t> implicitProducerHashCount;\t\t// Number of slots logically used\n\tImplicitProducerHash initialImplicitProducerHash;\n\tstd::array<ImplicitProducerKVP, INITIAL_IMPLICIT_PRODUCER_HASH_SIZE> initialImplicitProducerHashEntries;\n\tstd::atomic_flag implicitProducerHashResizeInProgress;\n\t\n\tstd::atomic<std::uint32_t> nextExplicitConsumerId;\n\tstd::atomic<std::uint32_t> globalExplicitConsumerOffset;\n\t\n#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\tdebug::DebugMutex implicitProdMutex;\n#endif\n\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tstd::atomic<ExplicitProducer*> explicitProducers;\n\tstd::atomic<ImplicitProducer*> implicitProducers;\n#endif\n};\n\n\ntemplate<typename T, typename Traits>\nProducerToken::ProducerToken(ConcurrentQueue<T, Traits>& queue)\n\t: producer(queue.recycle_or_create_producer(true))\n{\n\tif (producer != nullptr) {\n\t\tproducer->token = this;\n\t}\n}\n\ntemplate<typename T, typename Traits>\nProducerToken::ProducerToken(BlockingConcurrentQueue<T, Traits>& queue)\n\t: producer(reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->recycle_or_create_producer(true))\n{\n\tif (producer != nullptr) {\n\t\tproducer->token = this;\n\t}\n}\n\ntemplate<typename T, typename Traits>\nConsumerToken::ConsumerToken(ConcurrentQueue<T, Traits>& queue)\n\t: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)\n{\n\tinitialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release);\n\tlastKnownGlobalOffset = -1;\n}\n\ntemplate<typename T, typename Traits>\nConsumerToken::ConsumerToken(BlockingConcurrentQueue<T, Traits>& queue)\n\t: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)\n{\n\tinitialOffset = reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release);\n\tlastKnownGlobalOffset = -1;\n}\n\ntemplate<typename T, typename Traits>\ninline void swap(ConcurrentQueue<T, Traits>& a, ConcurrentQueue<T, Traits>& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ninline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ninline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ntemplate<typename T, typename Traits>\ninline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\n}\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n"
  },
  {
    "path": "src/third_party/concurrentqueue/internal/concurrentqueue_internal_debug.h",
    "content": "#pragma once\n\n//#define MCDBGQ_TRACKMEM 1\n//#define MCDBGQ_NOLOCKFREE_FREELIST 1\n//#define MCDBGQ_USEDEBUGFREELIST 1\n//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX 1\n//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH 1\n\n#if defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\nnamespace moodycamel { namespace debug {\n\tstruct DebugMutex {\n\t\tDebugMutex() { InitializeCriticalSectionAndSpinCount(&cs, 0x400); }\n\t\t~DebugMutex() { DeleteCriticalSection(&cs); }\n\t\t\n\t\tvoid lock() { EnterCriticalSection(&cs); }\n\t\tvoid unlock() { LeaveCriticalSection(&cs); }\n\t\t\n\tprivate:\n\t\tCRITICAL_SECTION cs;\n\t};\n} }\n#else\n#include <mutex>\nnamespace moodycamel { namespace debug {\n\tstruct DebugMutex {\n\t\tvoid lock() { m.lock(); }\n\t\tvoid unlock() { m.unlock(); }\n\t\t\n\tprivate:\n\t\tstd::mutex m;\n\t};\n} }\n#define\n#endif\n\nnamespace moodycamel { namespace debug {\n\tstruct DebugLock {\n\t\texplicit DebugLock(DebugMutex& mutex)\n\t\t\t: mutex(mutex)\n\t\t{\n\t\t\tmutex.lock();\n\t\t}\n\t\t\n\t\t~DebugLock()\n\t\t{\n\t\t\tmutex.unlock();\n\t\t}\n\t\t\n\tprivate:\n\t\tDebugMutex& mutex;\n\t};\n\t\n\t\n\ttemplate<typename N>\n\tstruct DebugFreeList {\n\t\tDebugFreeList() : head(nullptr) { }\n\t\tDebugFreeList(DebugFreeList&& other) : head(other.head) { other.head = nullptr; }\n\t\tvoid swap(DebugFreeList& other) { std::swap(head, other.head); }\n\t\t\n\t\tinline void add(N* node)\n\t\t{\n\t\t\tDebugLock lock(mutex);\n\t\t\tnode->freeListNext = head;\n\t\t\thead = node;\n\t\t}\n\t\t\n\t\tinline N* try_get()\n\t\t{\n\t\t\tDebugLock lock(mutex);\n\t\t\tif (head == nullptr) {\n\t\t\t\treturn nullptr;\n\t\t\t}\n\t\t\t\n\t\t\tauto prevHead = head;\n\t\t\thead = head->freeListNext;\n\t\t\treturn prevHead;\n\t\t}\n\t\t\n\t\tN* head_unsafe() const { return head; }\n\t\t\n\tprivate:\n\t\tN* head;\n\t\tDebugMutex mutex;\n\t};\n} }\n"
  },
  {
    "path": "src/third_party/concurrentqueue/samples.md",
    "content": "# Samples for moodycamel::ConcurrentQueue\n\nHere are some example usage scenarios with sample code. Note that most\nuse the simplest version of each available method for demonstration purposes,\nbut they can all be adapted to use tokens and/or the corresponding bulk methods for\nextra speed.\n\n\n## Hello queue\n```C++\nConcurrentQueue<int> q;\n\nfor (int i = 0; i != 123; ++i)\n\tq.enqueue(i);\n\nint item;\nfor (int i = 0; i != 123; ++i) {\n\tq.try_dequeue(item);\n\tassert(item == i);\n}\n```\n\n## Hello concurrency\n\nBasic example of how to use the queue from multiple threads, with no\nparticular goal (i.e. it does nothing, but in an instructive way).\n```C++\nConcurrentQueue<int> q;\nint dequeued[100] = { 0 };\nstd::thread threads[20];\n\n// Producers\nfor (int i = 0; i != 10; ++i) {\n\tthreads[i] = std::thread([&](int i) {\n\t\tfor (int j = 0; j != 10; ++j) {\n\t\t\tq.enqueue(i * 10 + j);\n\t\t}\n\t}, i);\n}\n\n// Consumers\nfor (int i = 10; i != 20; ++i) {\n\tthreads[i] = std::thread([&]() {\n\t\tint item;\n\t\tfor (int j = 0; j != 20; ++j) {\n\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t++dequeued[item];\n\t\t\t}\n\t\t}\n\t});\n}\n\n// Wait for all threads\nfor (int i = 0; i != 20; ++i) {\n\tthreads[i].join();\n}\n\n// Collect any leftovers (could be some if e.g. consumers finish before producers)\nint item;\nwhile (q.try_dequeue(item)) {\n\t++dequeued[item];\n}\n\n// Make sure everything went in and came back out!\nfor (int i = 0; i != 100; ++i) {\n\tassert(dequeued[i] == 1);\n}\n```\n\n## Bulk up\n\nSame as previous example, but runs faster.\n```C++\nConcurrentQueue<int> q;\nint dequeued[100] = { 0 };\nstd::thread threads[20];\n\n// Producers\nfor (int i = 0; i != 10; ++i) {\n\tthreads[i] = std::thread([&](int i) {\n\t\tint items[10];\n\t\tfor (int j = 0; j != 10; ++j) {\n\t\t\titems[j] = i * 10 + j;\n\t\t}\n\t\tq.enqueue_bulk(items, 10);\n\t}, i);\n}\n\n// Consumers\nfor (int i = 10; i != 20; ++i) {\n\tthreads[i] = std::thread([&]() {\n\t\tint items[20];\n\t\tfor (std::size_t count = q.try_dequeue_bulk(items, 20); count != 0; --count) {\n\t\t\t++dequeued[items[count - 1]];\n\t\t}\n\t});\n}\n\n// Wait for all threads\nfor (int i = 0; i != 20; ++i) {\n\tthreads[i].join();\n}\n\n// Collect any leftovers (could be some if e.g. consumers finish before producers)\nint items[10];\nstd::size_t count;\nwhile ((count = q.try_dequeue_bulk(items, 10)) != 0) {\n\tfor (std::size_t i = 0; i != count; ++i) {\n\t\t++dequeued[items[i]];\n\t}\n}\n\n// Make sure everything went in and came back out!\nfor (int i = 0; i != 100; ++i) {\n\tassert(dequeued[i] == 1);\n}\n```\n\n## Producer/consumer model (simultaneous)\n\nIn this model, one set of threads is producing items,\nand the other is consuming them concurrently until all of\nthem have been consumed. The counters are required to\nensure that all items eventually get consumed.\n```C++\nConcurrentQueue<Item> q;\nconst int ProducerCount = 8;\nconst int ConsumerCount = 8;\nstd::thread producers[ProducerCount];\nstd::thread consumers[ConsumerCount];\nstd::atomic<int> doneProducers(0);\nstd::atomic<int> doneConsumers(0);\nfor (int i = 0; i != ProducerCount; ++i) {\n\tproducers[i] = std::thread([&]() {\n\t\twhile (produce) {\n\t\t\tq.enqueue(produceItem());\n\t\t}\n\t\tdoneProducers.fetch_add(1, std::memory_order_release);\n\t});\n}\nfor (int i = 0; i != ConsumerCount; ++i) {\n\tconsumers[i] = std::thread([&]() {\n\t\tItem item;\n\t\tbool itemsLeft;\n\t\tdo {\n\t\t\t// It's important to fence (if the producers have finished) *before* dequeueing\n\t\t\titemsLeft = doneProducers.load(std::memory_order_acquire) != ProducerCount;\n\t\t\twhile (q.try_dequeue(item)) {\n\t\t\t\titemsLeft = true;\n\t\t\t\tconsumeItem(item);\n\t\t\t}\n\t\t} while (itemsLeft || doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == ConsumerCount);\n\t\t// The condition above is a bit tricky, but it's necessary to ensure that the\n\t\t// last consumer sees the memory effects of all the other consumers before it\n\t\t// calls try_dequeue for the last time\n\t});\n}\nfor (int i = 0; i != ProducerCount; ++i) {\n\tproducers[i].join();\n}\nfor (int i = 0; i != ConsumerCount; ++i) {\n\tconsumers[i].join();\n}\n```\n## Producer/consumer model (simultaneous, blocking)\n\nThe blocking version is different, since either the number of elements being produced needs\nto be known ahead of time, or some other coordination is required to tell the consumers when\nto stop calling wait_dequeue (not shown here). This is necessary because otherwise a consumer\ncould end up blocking forever -- and destroying a queue while a consumer is blocking on it leads\nto undefined behaviour.\n```C++\nBlockingConcurrentQueue<Item> q;\nconst int ProducerCount = 8;\nconst int ConsumerCount = 8;\nstd::thread producers[ProducerCount];\nstd::thread consumers[ConsumerCount];\nstd::atomic<int> promisedElementsRemaining(ProducerCount * 1000);\nfor (int i = 0; i != ProducerCount; ++i) {\n\tproducers[i] = std::thread([&]() {\n\t\tfor (int j = 0; j != 1000; ++j) {\n\t\t\tq.enqueue(produceItem());\n\t\t}\n\t});\n}\nfor (int i = 0; i != ConsumerCount; ++i) {\n\tconsumers[i] = std::thread([&]() {\n\t\tItem item;\n\t\twhile (promisedElementsRemaining.fetch_sub(1, std::memory_order_relaxed)) {\n\t\t\tq.wait_dequeue(item);\n\t\t\tconsumeItem(item);\n\t\t}\n\t});\n}\nfor (int i = 0; i != ProducerCount; ++i) {\n\tproducers[i].join();\n}\nfor (int i = 0; i != ConsumerCount; ++i) {\n\tconsumers[i].join();\n}\n```\n\n## Producer/consumer model (separate stages)\n```C++\nConcurrentQueue<Item> q;\n\n// Production stage\nstd::thread threads[8];\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i] = std::thread([&]() {\n\t\twhile (produce) {\n\t\t\tq.enqueue(produceItem());\n\t\t}\n\t});\n}\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i].join();\n}\n\n// Consumption stage\nstd::atomic<int> doneConsumers(0);\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i] = std::thread([&]() {\n\t\tItem item;\n\t\tdo {\n\t\t\twhile (q.try_dequeue(item)) {\n\t\t\t\tconsumeItem(item);\n\t\t\t}\n\t\t\t// Loop again one last time if we're the last producer (with the acquired\n\t\t\t// memory effects of the other producers):\n\t\t} while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8);\n\t});\n}\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i].join();\n}\n```\nNote that there's no point trying to use the blocking queue with this model, since\nthere's no need to use the `wait` methods (all the elements are produced before any\nare consumed), and hence the complexity would be the same but with additional overhead.\n\n\n## Object pool\n\nIf you don't know what threads will be using the queue in advance,\nyou can't really declare any long-term tokens. The obvious solution\nis to use the implicit methods (that don't take any tokens):\n```C++\n// A pool of 'Something' objects that can be safely accessed\n// from any thread\nclass SomethingPool\n{\npublic:\n    Something getSomething()\n    {\n\tSomething obj;\n\tqueue.try_dequeue(obj);\n\n\t// If the dequeue succeeded, obj will be an object from the\n\t// thread pool, otherwise it will be the default-constructed\n\t// object as declared above\n\treturn obj;\n    }\n\n    void recycleSomething(Something&& obj)\n    {\n\tqueue.enqueue(std::move(obj));\n    }\n};\n```\n\n## Threadpool task queue\n```C++\nBlockingConcurrentQueue<Task> q;\n\n// To create a task from any thread:\nq.enqueue(...);\n\n// On threadpool threads:\nTask task;\nwhile (true) {\n\tq.wait_dequeue(task);\n\n\t// Process task...\n}\n```\n\n## Multithreaded game loop\n```C++\nBlockingConcurrentQueue<Task> q;\nstd::atomic<int> pendingTasks(0);\n\n// On threadpool threads:\nTask task;\nwhile (true) {\n\tq.wait_dequeue(task);\n\n\t// Process task...\n\n\tpendingTasks.fetch_add(-1, std::memory_order_release);\n}\n\n// Whenever a new task needs to be processed for the frame:\npendingTasks.fetch_add(1, std::memory_order_release);\nq.enqueue(...);\n\n// To wait for all the frame's tasks to complete before rendering:\nwhile (pendingTasks.load(std::memory_order_acquire) != 0)\n\tcontinue;\n\n// Alternatively you could help out the thread pool while waiting:\nwhile (pendingTasks.load(std::memory_order_acquire) != 0) {\n\tif (!q.try_dequeue(task)) {\n\t\tcontinue;\n\t}\n\n\t// Process task...\n\n\tpendingTasks.fetch_add(-1, std::memory_order_release);\n}\n```\n\n## Pump until empty\n\nThis might be useful if, for example, you want to process any remaining items\nin the queue before it's destroyed. Note that it is your responsibility\nto ensure that the memory effects of any enqueue operations you wish to see on\nthe dequeue thread are visible (i.e. if you're waiting for a certain set of elements,\nyou need to use memory fences to ensure that those elements are visible to the dequeue\nthread after they've been enqueued).\n```C++\nConcurrentQueue<Item> q;\n\n// Single-threaded pumping:\nItem item;\nwhile (q.try_dequeue(item)) {\n\t// Process item...\n}\n// q is guaranteed to be empty here, unless there is another thread enqueueing still or\n// there was another thread dequeueing at one point and its memory effects have not\n// yet been propagated to this thread.\n\n// Multi-threaded pumping:\nstd::thread threads[8];\nstd::atomic<int> doneConsumers(0);\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i] = std::thread([&]() {\n\t\tItem item;\n\t\tdo {\n\t\t\twhile (q.try_dequeue(item)) {\n\t\t\t\t// Process item...\n\t\t\t}\n\t\t} while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8);\n\t\t// If there are still enqueue operations happening on other threads,\n\t\t// then the queue may not be empty at this point. However, if all enqueue\n\t\t// operations completed before we finished pumping (and the propagation of\n\t\t// their memory effects too), and all dequeue operations apart from those\n\t\t// our threads did above completed before we finished pumping (and the\n\t\t// propagation of their memory effects too), then the queue is guaranteed\n\t\t// to be empty at this point.\n\t});\n}\nfor (int i = 0; i != 8; ++i) {\n\tthreads[i].join();\n}\n```\n\n## Wait for a queue to become empty (without dequeueing)\n\nYou can't (robustly) :-) However, you can set up your own atomic counter and\npoll that instead (see the game loop example). If you're satisfied with merely an estimate, you can use\n`size_approx()`. Note that `size_approx()` may return 0 even if the queue is\nnot completely empty, unless the queue has already stabilized first (no threads\nare enqueueing or dequeueing, and all memory effects of any previous operations\nhave been propagated to the thread before it calls `size_approx()`).\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/CDSChecker/README.txt",
    "content": "These tests require CDSChecker to be checked out into a subdirectory\r\nnamed 'model-checker'.\r\n\r\nCDSChecker can be obtained from: git://demsky.eecs.uci.edu/model-checker.git\r\nThe version last used for testing was: 5c4efe5cd8bdfe1e85138396109876a121ca61d1\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/CDSChecker/corealgo.h",
    "content": "// ©2013 Cameron Desrochers\n\n// Provides the core enqueue/dequeue algorithm of moodycamel::ConcurrentQueue\n// for testing with CDSChecker (a C++11 memory model checking tool).\n// See http://demsky.eecs.uci.edu/c11modelchecker.html for more info.\n\n#pragma once\n\n#include \"model-checker/include/atomic\"\n#include \"model-checker/include/librace.h\"\n#include \"model-checker/include/model-assert.h\"\n\n#ifndef CHAR_BIT\n#define CHAR_BIT 8\n#endif\n\ntypedef unsigned int index_t;\nstatic std::atomic<index_t> headIndex;\nstatic std::atomic<index_t> tailIndex;\nstatic std::atomic<index_t> dequeueOvercommit;\nstatic std::atomic<index_t> dequeueOptimisticCount;\n\nstatic const unsigned int BLOCK_SIZE = 256;\nstatic int block[BLOCK_SIZE];\n\nstatic void init()\n{\n\theadIndex.store(0, std::memory_order_relaxed);\n\ttailIndex.store(0, std::memory_order_relaxed);\n\tdequeueOvercommit.store(0, std::memory_order_relaxed);\n\tdequeueOptimisticCount.store(0, std::memory_order_relaxed);\n}\n\ntemplate<typename T>\nstatic inline bool circular_less_than(T a, T b)\n{\n\treturn static_cast<T>(a - b) > static_cast<T>(static_cast<T>(1) << static_cast<T>(sizeof(T) * CHAR_BIT - 1));\n}\n\nstatic void enqueue(int element)\n{\n\tindex_t currentTailIndex = tailIndex.load(std::memory_order_relaxed);\n\tindex_t newTailIndex = 1 + currentTailIndex;\n\t\n\tstore_32(&block[currentTailIndex & (BLOCK_SIZE - 1)], element);\n\t\n\ttailIndex.store(newTailIndex, std::memory_order_release);\n}\n\nstatic bool try_dequeue(int& element)\n{\n\tauto tail = tailIndex.load(std::memory_order_relaxed);\n\tauto overcommit = dequeueOvercommit.load(std::memory_order_relaxed);\n\tif (circular_less_than<index_t>(dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {\n\t\t// Might be something to dequeue, let's give it a try\n\t\t\n\t\t// Note that this if is purely for performance purposes in the common case when the queue is\n\t\t// empty and the values are eventually consistent -- we may enter here spuriously.\n\t\t\n\t\t// Note that whatever the values of overcommit and tail are, they are not going to change (unless we\n\t\t// change them) and must be the same value at this point (inside the if) as when the if condition was\n\t\t// evaluated.\n\n\t\t// We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below.\n\t\t// This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in\n\t\t// the fetch_add below will result in a value at least as recent as that (and therefore at least as large).\n\t\t// Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all\n\t\t// read-modify-write operations are guaranteed to work on the latest value in the modification order), but\n\t\t// unfortunately that can't be shown to be correct using only the C++11 standard.\n\t\t// See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case\n\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\n\t\t// Increment optimistic counter, then check if it went over the boundary\n\t\tauto myDequeueCount = dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\n\t\t// Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever\n\t\t// incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now\n\t\t// have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon\n\t\t// incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount.\n\t\tMODEL_ASSERT(overcommit <= myDequeueCount);\n\t\t\n\t\t// Note that we reload tail here in case it changed; it will be the same value as before or greater, since\n\t\t// this load is sequenced after (happens after) the earlier load above. This is supported by read-read\n\t\t// coherance (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order\n\t\tauto newTail = tailIndex.load(std::memory_order_relaxed);\n\t\tMODEL_ASSERT(newTail >= tail);\n\t\ttail = newTail;\n\t\tif (circular_less_than<index_t>(myDequeueCount - overcommit, tail)) {\n\t\t\t// Guaranteed to be at least one element to dequeue!\n\t\t\t\n\t\t\t// Get the index. Note that since there's guaranteed to be at least one element, this\n\t\t\t// will never exceed tail.\n\t\t\tauto index = headIndex.fetch_add(1, std::memory_order_relaxed);\n\t\t\tMODEL_ASSERT(index <= tail);\n\t\t\t\n\t\t\t// Dequeue\n\t\t\telement = load_32(&block[index & (BLOCK_SIZE - 1)]);\n\t\t\t\n\t\t\treturn true;\n\t\t}\n\t\telse {\n\t\t\t// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent\n\t\t\tdequeueOvercommit.fetch_add(1, std::memory_order_release);\t\t// Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write\n\t\t}\n\t}\n\n\treturn false;\n}\n\nstatic int size_approx()\n{\n\tauto tail = tailIndex.load(std::memory_order_relaxed);\n\tauto head = headIndex.load(std::memory_order_relaxed);\n\treturn circular_less_than(head, tail) ? static_cast<int>(tail - head) : 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/CDSChecker/enqueue_dequeue_many.cpp",
    "content": "// ©2013 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n#include \"model-checker/include/threads.h\"\n#include \"corealgo.h\"\n\nvoid thread_main(void* param)\n{\n\tint id = *(int*)param;\n\tint& dequeueCount = *(int*)param;\n\tdequeueCount = 0;\n\t\n\tint last[4] = { 0 };\n\t\n\tenqueue((id << 24) | 1);\n\tenqueue((id << 24) | 2);\n\t\n\tint element;\n\tbool success = try_dequeue(element);\n\tif (success) {\n\t\tMODEL_ASSERT((element & 0xFFFFFF) > last[element >> 24]);\n\t\tlast[element >> 24] = element & 0xFFFFFF;\n\t\t++dequeueCount;\n\t}\n\tsuccess = try_dequeue(element);\n\tif (success) {\n\t\tMODEL_ASSERT((element & 0xFFFFFF) > last[element >> 24]);\n\t\tlast[element >> 24] = element & 0xFFFFFF;\n\t\t++dequeueCount;\n\t}\n}\n\nint user_main(int, char**)\n{\n\tinit();\n\t\n\t// Start out as thread IDs, but are re-used by the threads\n\t// to indicate the number of elements each one dequeued\n\tint w = 1, x = 2, y = 3, z = 4;\n\t\n\tthrd_t a, b, c, d;\n\t\n\tthrd_create(&a, &thread_main, &w);\n\tthrd_create(&b, &thread_main, &x);\n\tthrd_create(&c, &thread_main, &y);\n\tthrd_create(&d, &thread_main, &z);\n\t\n\tthrd_join(a);\n\tthrd_join(b);\n\tthrd_join(c);\n\tthrd_join(d);\n\t\n\tMODEL_ASSERT(w + x + y + z + size_approx() == 8);\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/CDSChecker/enqueue_dequeue_one.cpp",
    "content": "// ©2013 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n#include \"model-checker/include/threads.h\"\n#include \"corealgo.h\"\n\nvoid producer_thread(void*)\n{\n\tenqueue(1234);\n}\n\nvoid consumer_thread(void*)\n{\n\tint element;\n\tbool result = try_dequeue(element);\n\tMODEL_ASSERT(!result || element == 1234);\n\t\n\tif (result) {\n\t\tMODEL_ASSERT(!try_dequeue(element));\n\t}\n}\n\nint user_main(int, char**)\n{\n\tinit();\n\t\n\tthrd_t p, c;\n\t\n\tthrd_create(&p, &producer_thread, nullptr);\n\tthrd_create(&c, &consumer_thread, nullptr);\n\t\n\tthrd_join(p);\n\tthrd_join(c);\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/common/simplethread.cpp",
    "content": "// ©2013 Cameron Desrochers\r\n\r\n#include \"simplethread.h\"\r\n\r\n#if defined(_WIN32)\r\n#define WIN32_LEAN_AND_MEAN\r\n#include <windows.h>\r\n\r\nstruct SimpleThread::ThreadRef\r\n{\r\n\tHANDLE handle;\r\n\r\n\tstatic DWORD WINAPI ThreadProc(LPVOID param)\r\n\t{\r\n\t\tauto threadRef = static_cast<ThreadRef*>(param);\r\n\t\tthreadRef->callbackFunc(threadRef->callbackObj);\r\n\t\treturn 0;\r\n\t}\r\n\t\r\n\tThreadRef(void* callbackObj, CallbackFunc callbackFunc)\r\n\t\t: callbackObj(callbackObj), callbackFunc(callbackFunc)\r\n\t{\r\n\t}\r\n\t\r\n\tvoid* callbackObj;\r\n\tCallbackFunc callbackFunc;\r\n};\r\n\r\nvoid SimpleThread::startThread(void* callbackObj, CallbackFunc callbackFunc)\r\n{\r\n\tthread = new ThreadRef(callbackObj, callbackFunc);\r\n\tthread->handle = CreateThread(NULL, StackSize, &ThreadRef::ThreadProc, thread, 0, NULL);\r\n}\r\n\r\nvoid SimpleThread::join()\r\n{\r\n\tif (thread != nullptr && thread->handle != NULL) {\r\n\t\tWaitForSingleObject(thread->handle, INFINITE);\r\n\t\tCloseHandle(thread->handle);\r\n\t\tthread->handle = NULL;\r\n\t}\r\n}\r\n#else\r\n#include <thread>\r\n\r\nstruct SimpleThread::ThreadRef\r\n{\r\n\tstd::thread thread;\r\n\r\n\tstatic void threadProc(ThreadRef* threadRef)\r\n\t{\r\n\t\tthreadRef->callbackFunc(threadRef->callbackObj);\r\n\t}\r\n\t\r\n\tThreadRef(void* callbackObj, CallbackFunc callbackFunc)\r\n\t\t: callbackObj(callbackObj), callbackFunc(callbackFunc)\r\n\t{\r\n\t}\r\n\t\r\n\tvoid* callbackObj;\r\n\tCallbackFunc callbackFunc;\r\n};\r\n\r\nvoid SimpleThread::startThread(void* callbackObj, CallbackFunc callbackFunc)\r\n{\r\n\tthread = new ThreadRef(callbackObj, callbackFunc);\r\n\tthread->thread = std::thread(&ThreadRef::threadProc, thread);\r\n}\r\n\r\nvoid SimpleThread::join()\r\n{\r\n\tif (thread != nullptr && thread->thread.joinable()) {\r\n\t\tthread->thread.join();\r\n\t}\r\n}\r\n#endif\r\n\r\nSimpleThread::~SimpleThread()\r\n{\r\n\tif (thread != nullptr) {\r\n\t\tjoin();\r\n\t\tdelete thread;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/common/simplethread.h",
    "content": "// ©2013 Cameron Desrochers\r\n\r\n#pragma once\r\n\r\n// Like C++11's std::thread, but with a reduced API, and works on Windows with MSVC2010+.\r\n// Wraps std::thread on other OSes. Perhaps the most significant departure between\r\n// std::thread and this mini-library is that join() is called implicitly in the destructor,\r\n// if the thread is joinable. The thread callback functions should not throw exceptions.\r\n\r\n#include <utility>\r\n#include <type_traits>\r\n\r\n\r\nnamespace details\r\n{\r\n\ttemplate<typename TArg1 = void, typename TArg2 = void, typename TArg3 = void>\r\n\tstruct ArgWrapper\r\n\t{\r\n\t\ttypename std::remove_reference<TArg1>::type arg1;\r\n\t\ttypename std::remove_reference<TArg2>::type arg2;\r\n\t\ttypename std::remove_reference<TArg3>::type arg3;\r\n\t\tArgWrapper(ArgWrapper const& o) : arg1(o.arg1), arg2(o.arg2), arg3(o.arg3) { }\r\n\t\tArgWrapper(ArgWrapper&& o) : arg1(std::move(o.arg1)), arg2(std::move(o.arg2)), arg3(std::move(o.arg3)) { }\r\n\t\ttemplate<typename T, typename U, typename V>\r\n\t\tArgWrapper(T&& a1, U&& a2, V&& a3) : arg1(std::forward<T>(a1)), arg2(std::forward<U>(a2)), arg3(std::forward<V>(a3)) { }\r\n\t\ttemplate<typename TCallback>\r\n\t\tvoid callCallback(TCallback&& callback) const { std::forward<TCallback>(callback)(std::move(arg1), std::move(arg2), std::move(arg3)); }\r\n\t};\r\n\t\r\n\ttemplate<typename TArg1, typename TArg2>\r\n\tstruct ArgWrapper<TArg1, TArg2, void>\r\n\t{\r\n\t\ttypename std::remove_reference<TArg1>::type arg1;\r\n\t\ttypename std::remove_reference<TArg2>::type arg2;\r\n\t\tArgWrapper(ArgWrapper const& o) : arg1(o.arg1), arg2(o.arg2) { }\r\n\t\tArgWrapper(ArgWrapper&& o) : arg1(std::move(o.arg1)), arg2(std::move(o.arg2)) { }\r\n\t\ttemplate<typename T, typename U>\r\n\t\tArgWrapper(T&& a1, U&& a2) : arg1(std::forward<T>(a1)), arg2(std::forward<U>(a2)) { }\r\n\t\ttemplate<typename TCallback>\r\n\t\tvoid callCallback(TCallback&& callback) const { std::forward<TCallback>(callback)(std::move(arg1), std::move(arg2)); }\r\n\t};\r\n\t\r\n\ttemplate<typename TArg1>\r\n\tstruct ArgWrapper<TArg1, void, void>\r\n\t{\r\n\t\ttypename std::remove_reference<TArg1>::type arg1;\r\n\t\tArgWrapper(ArgWrapper const& o) : arg1(o.arg1) { }\r\n\t\tArgWrapper(ArgWrapper&& o) : arg1(std::move(o.arg1)) { }\r\n\t\ttemplate<typename T>\r\n\t\tArgWrapper(T&& a1) : arg1(std::forward<T>(a1)) { }\r\n\t\ttemplate<typename TCallback>\r\n\t\tvoid callCallback(TCallback&& callback) const { std::forward<TCallback>(callback)(std::move(arg1)); }\r\n\t};\r\n\t\r\n\ttemplate<> struct ArgWrapper<void, void, void>\r\n\t{\r\n\t\ttemplate<typename TCallback> void callCallback(TCallback&& callback) const { std::forward<TCallback>(callback)(); }\r\n\t};\r\n}\r\n\r\n\r\nclass SimpleThread\r\n{\r\nprivate:\r\n\tstruct ThreadRef;\r\n\t\r\n\ttemplate<typename TCallback, typename TArgs>\r\n\tstruct CallbackWrapper\r\n\t{\r\n\t\ttemplate<typename U>\r\n\t\tCallbackWrapper(TCallback&& callback, U&& args)\r\n\t\t\t: callback(std::forward<TCallback>(callback)), args(std::forward<U>(args))\r\n\t\t{\r\n\t\t}\r\n\r\n\t\tstatic void callAndDelete(void* wrapper)\r\n\t\t{\r\n\t\t\tauto typedWrapper = static_cast<CallbackWrapper*>(wrapper);\r\n\t\t\ttypedWrapper->args.callCallback(std::move(typedWrapper->callback));\r\n\t\t\tdelete typedWrapper;\r\n\t\t}\r\n\r\n\t\ttypename std::decay<TCallback>::type callback;\r\n\t\tTArgs args;\r\n\t};\r\n\t\r\n\ttypedef void (*CallbackFunc)(void*);\r\n\r\n\tvoid startThread(void* callbackObj, CallbackFunc callbackFunc);\r\n\r\n\r\npublic:\r\n\tstatic const int StackSize = 4 * 1024;\t// bytes\r\n\r\n\tSimpleThread() : thread(nullptr) {  }\r\n\r\n\tSimpleThread(SimpleThread&& other)\r\n\t\t: thread(other.thread)\r\n\t{\r\n\t\tother.thread = nullptr;\r\n\t}\r\n\t\r\n\tSimpleThread& operator=(SimpleThread&& other)\r\n\t{\r\n\t\tthread = other.thread;\r\n\t\tother.thread = nullptr;\r\n\t\treturn *this;\r\n\t}\r\n\t\r\n\t// Disable copying and copy-assignment\r\nprivate:\r\n\tSimpleThread(SimpleThread const&);\r\n\tSimpleThread& operator=(SimpleThread const&);\r\npublic:\r\n\r\n\ttemplate<typename TCallback>\r\n\texplicit SimpleThread(TCallback&& callback)\r\n\t{\r\n\t\tauto wrapper = new CallbackWrapper<TCallback, ::details::ArgWrapper<>>(\r\n\t\t\tstd::forward<TCallback>(callback),\r\n\t\t\t::details::ArgWrapper<>()\r\n\t\t);\r\n\t\tstartThread(wrapper, &CallbackWrapper<TCallback, ::details::ArgWrapper<>>::callAndDelete);\r\n\t}\r\n\r\n\ttemplate<typename TCallback, typename TArg1>\r\n\texplicit SimpleThread(TCallback&& callback, TArg1&& arg1)\r\n\t{\r\n\t\tauto wrapper = new CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1>>(\r\n\t\t\tstd::forward<TCallback>(callback),\r\n\t\t\t::details::ArgWrapper<TArg1>(std::forward<TArg1>(arg1))\r\n\t\t);\r\n\t\tstartThread(wrapper, &CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1>>::callAndDelete);\r\n\t}\r\n\r\n\ttemplate<typename TCallback, typename TArg1, typename TArg2>\r\n\texplicit SimpleThread(TCallback&& callback, TArg1&& arg1, TArg2&& arg2)\r\n\t{\r\n\t\tauto wrapper = new CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1, TArg2>>(\r\n\t\t\tstd::forward<TCallback>(callback),\r\n\t\t\t::details::ArgWrapper<TArg1, TArg2>(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2))\r\n\t\t);\r\n\t\tstartThread(wrapper, &CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1, TArg2>>::callAndDelete);\r\n\t}\r\n\r\n\ttemplate<typename TCallback, typename TArg1, typename TArg2, typename TArg3>\r\n\texplicit SimpleThread(TCallback&& callback, TArg1&& arg1, TArg2&& arg2, TArg3&& arg3)\r\n\t{\r\n\t\tauto wrapper = new CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1, TArg2, TArg3>>(\r\n\t\t\tstd::forward<TCallback>(callback),\r\n\t\t\t::details::ArgWrapper<TArg1, TArg2, TArg3>(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2), std::forward<TArg3>(arg3))\r\n\t\t);\r\n\t\tstartThread(wrapper, &CallbackWrapper<TCallback, ::details::ArgWrapper<TArg1, TArg2, TArg3>>::callAndDelete);\r\n\t}\r\n\t\r\n\t~SimpleThread();\r\n\r\n\tvoid join();\r\n\r\nprivate:\r\n\tThreadRef* thread;\r\n};\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/common/systemtime.cpp",
    "content": "// ©2013-2014 Cameron Desrochers\r\n\r\n#include \"systemtime.h\"\r\n#include <climits>\r\n\r\n#if defined(_MSC_VER) && _MSC_VER < 1700\r\n#include <intrin.h>\r\n#define CompilerMemBar() _ReadWriteBarrier()\r\n#else\r\n#include <atomic>\r\n#define CompilerMemBar() std::atomic_signal_fence(std::memory_order_seq_cst)\r\n#endif\r\n\r\n#if defined(ST_WINDOWS)\r\n\r\n#include <windows.h>\r\n\r\nnamespace moodycamel\r\n{\r\n\r\nvoid sleep(int milliseconds)\r\n{\r\n\t::Sleep(milliseconds);\r\n}\r\n\r\nSystemTime getSystemTime()\r\n{\r\n\tLARGE_INTEGER t;\r\n\tCompilerMemBar();\r\n\tif (!QueryPerformanceCounter(&t)) {\r\n\t\treturn static_cast<SystemTime>(-1);\r\n\t}\r\n\tCompilerMemBar();\r\n\t\r\n\treturn static_cast<SystemTime>(t.QuadPart);\r\n}\r\n\r\ndouble getTimeDelta(SystemTime start)\r\n{\r\n\tLARGE_INTEGER t;\r\n\tCompilerMemBar();\r\n\tif (start == static_cast<SystemTime>(-1) || !QueryPerformanceCounter(&t)) {\r\n\t\treturn -1;\r\n\t}\r\n\tCompilerMemBar();\r\n\r\n\tauto now = static_cast<SystemTime>(t.QuadPart);\r\n\r\n\tLARGE_INTEGER f;\r\n\tif (!QueryPerformanceFrequency(&f)) {\r\n\t\treturn -1;\r\n\t}\r\n\r\n#if defined(__GNUC__)\r\n#pragma GCC diagnostic push\r\n#pragma GCC diagnostic ignored \"-Wconversion\"\r\n#endif\r\n\treturn static_cast<double>(static_cast<__int64>(now - start)) / f.QuadPart * 1000;\r\n#if defined(__GNUC__)\r\n#pragma GCC diagnostic pop\r\n#endif\r\n}\r\n\r\n}  // end namespace moodycamel\r\n\r\n#elif defined(ST_APPLE)\r\n\r\n#include <mach/mach.h>\r\n#include <mach/mach_time.h>\r\n#include <unistd.h>\r\n#include <time.h>\r\n\r\nnamespace moodycamel\r\n{\r\n\r\nvoid sleep(int milliseconds)\r\n{\r\n\t::usleep(milliseconds * 1000);\r\n}\r\n\r\nSystemTime getSystemTime()\r\n{\r\n\tCompilerMemBar();\r\n\tstd::uint64_t result = mach_absolute_time();\r\n\tCompilerMemBar();\r\n\t\r\n\treturn result;\r\n}\r\n\r\ndouble getTimeDelta(SystemTime start)\r\n{\r\n\tCompilerMemBar();\r\n\tstd::uint64_t end = mach_absolute_time();\r\n\tCompilerMemBar();\r\n\r\n\tmach_timebase_info_data_t tb = { 0 };\r\n\tmach_timebase_info(&tb);\r\n\tdouble toNano = static_cast<double>(tb.numer) / tb.denom;\r\n\t\r\n\treturn static_cast<double>(end - start) * toNano * 0.000001;\r\n}\r\n\r\n}  // end namespace moodycamel\r\n\r\n#elif defined(ST_NIX)\r\n\r\n#include <unistd.h>\r\n\r\nnamespace moodycamel\r\n{\r\n\r\nvoid sleep(int milliseconds)\r\n{\r\n\t::usleep(milliseconds * 1000);\r\n}\r\n\r\nSystemTime getSystemTime()\r\n{\r\n\ttimespec t;\r\n\tCompilerMemBar();\r\n\tif (clock_gettime(CLOCK_MONOTONIC_RAW, &t) != 0) {\r\n\t\tt.tv_sec = (time_t)-1;\r\n\t\tt.tv_nsec = -1;\r\n\t}\r\n\tCompilerMemBar();\r\n\t\r\n\treturn t;\r\n}\r\n\r\ndouble getTimeDelta(SystemTime start)\r\n{\r\n\ttimespec t;\r\n\tCompilerMemBar();\r\n\tif ((start.tv_sec == (time_t)-1 && start.tv_nsec == -1) || clock_gettime(CLOCK_MONOTONIC_RAW, &t) != 0) {\r\n\t\treturn -1;\r\n\t}\r\n\tCompilerMemBar();\r\n\r\n\treturn static_cast<double>(static_cast<long>(t.tv_sec) - static_cast<long>(start.tv_sec)) * 1000 + double(t.tv_nsec - start.tv_nsec) / 1000000;\r\n}\r\n\r\n}  // end namespace moodycamel\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/common/systemtime.h",
    "content": "// ©2013-2014 Cameron Desrochers\r\n\r\n#pragma once\r\n\r\n#if defined(_WIN32)\r\n#define ST_WINDOWS\r\n#elif defined(__APPLE__) && defined(__MACH__)\r\n#define ST_APPLE\r\n#elif defined(__linux__) || defined(__FreeBSD__) || defined(BSD)\r\n#define ST_NIX\r\n#else\r\n#error \"Unknown platform\"\r\n#endif\r\n\r\n#if defined(ST_WINDOWS)\r\nnamespace moodycamel { typedef unsigned long long SystemTime; }\r\n#elif defined(ST_APPLE)\r\n#include <cstdint>\r\nnamespace moodycamel { typedef std::uint64_t SystemTime; }\r\n#elif defined(ST_NIX)\r\n#include <time.h>\r\nnamespace moodycamel { typedef timespec SystemTime; }\r\n#endif\r\n\r\nnamespace moodycamel\r\n{\r\nvoid sleep(int milliseconds);\r\n\r\nSystemTime getSystemTime();\r\n\r\n// Returns the delta time, in milliseconds\r\ndouble getTimeDelta(SystemTime start);\r\n}\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/corealgos.h",
    "content": "// ©2014 Cameron Desrochers\n\n// moodycamel::ConcurrentQueue contains many inner data structures which\n// are difficult to test in isolation. So, this file contains copies of\n// them, extracted and isolated so as to be independently testable.\n\n#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <cstdlib>\n#include <algorithm>\n#include <utility>\n#include <limits>\n#include <cassert>\n\n\n// Define corealgos_allocator before including this header in order to override the\n// default malloc/free functions\n#ifndef corealgos_allocator\nstruct corealgos_allocator\n{\n\tstatic inline void* malloc(std::size_t size) { return std::malloc(size); }\n\tstatic inline void free(void* ptr) { std::free(ptr); }\n};\n#endif\n\n\n////////////////////////////////////////////////////////////////////////////////\n// Lock-free add-only list (e.g. used to track producers)\n////////////////////////////////////////////////////////////////////////////////\n\nnamespace moodycamel { namespace corealgos {\n\nstruct ListItem\n{\n\tListItem()\n\t\t: concurrentListPrev(nullptr)\n\t{\n\t}\n\t\npublic:\n\tstd::atomic<ListItem*> concurrentListPrev;\n};\n\ntemplate<typename T>\t\t// T should inherit ListItem or implement the same interface\nstruct ConcurrentAddOnlyList\n{\n\tConcurrentAddOnlyList()\n\t\t: tail_(nullptr)\n\t{\n\t}\n\t\n\tinline T* tail() { return tail_.load(std::memory_order_acquire); }\n\t\n\tvoid add(T* element)\n\t{\n\t\tassert(element != nullptr);\n\t\t\n\t\t// Add it to the lock-free list\n\t\tauto prevTail = tail_.load(std::memory_order_relaxed);\n\t\tdo {\n\t\t\telement->concurrentListPrev = prevTail;\n\t\t} while (!tail_.compare_exchange_weak(prevTail, element, std::memory_order_release, std::memory_order_relaxed));\n\t}\n\t\nprivate:\n\tstd::atomic<T*> tail_;\n};\n\n} }\n\n\n\n////////////////////////////////////////////////////////////////////////////////\n// Thread local hash map\n////////////////////////////////////////////////////////////////////////////////\n\n#if defined(__APPLE__)\n#include \"TargetConditionals.h\" // Needed for TARGET_OS_IPHONE\n#endif\n\n// Platform-specific definitions of a numeric thread ID type and an invalid value\n#if defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)\n// No sense pulling in windows.h in a header, we'll manually declare the function\n// we use and rely on backwards-compatibility for this not to break\nextern \"C\" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void);\nnamespace moodycamel { namespace corealgos { namespace details {\n\tstatic_assert(sizeof(unsigned long) == sizeof(std::uint32_t), \"Expected size of unsigned long to be 32 bits on Windows\");\n\ttypedef std::uint32_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id = 0;\t\t// See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx\n\tstatic inline thread_id_t thread_id() { return static_cast<thread_id_t>(::GetCurrentThreadId()); }\n} } }\n#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE)\nnamespace moodycamel { namespace corealgos { namespace details {\n\ttypedef std::uintptr_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id = 0;\n\tstatic inline thread_id_t thread_id() { return std::hash<std::thread::id>()(std::this_thread::get_id()); }\n} } }\n#else\n// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475\n// In order to get a numeric thread ID in a platform-independent way, we use a thread-local\n// static variable's address as a thread identifier :-)\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define MOODYCAMEL_COREALGO_THREADLOCAL __thread\n#elif defined(_MSC_VER)\n#define MOODYCAMEL_COREALGO_THREADLOCAL __declspec(thread)\n#else\n// Assume C++11 compliant compiler\n#define MOODYCAMEL_COREALGO_THREADLOCAL thread_local\n#endif\nnamespace moodycamel { namespace corealgos { namespace details {\n\ttypedef std::uintptr_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id = 0;\t\t// Address can't be nullptr\n\tstatic inline thread_id_t thread_id() { static MOODYCAMEL_COREALGO_THREADLOCAL int x; return reinterpret_cast<thread_id_t>(&x); }\n} } }\n#endif\n\nnamespace moodycamel { namespace corealgos {\n\nnamespace details\n{\n\ttemplate<bool use32> struct _hash_32_or_64 {\n\t\tstatic inline std::size_t hash(std::uint32_t h)\n\t\t{\n\t\t\t// MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp\n\t\t\t// Since the thread ID is already unique, all we really want to do is propagate that\n\t\t\t// uniqueness evenly across all the bits, so that we can use a subset of the bits while\n\t\t\t// reducing collisions significantly\n\t\t\th ^= h >> 16;\n\t\t\th *= 0x85ebca6b;\n\t\t\th ^= h >> 13;\n\t\t\th *= 0xc2b2ae35;\n\t\t\treturn static_cast<std::size_t>(h ^ (h >> 16));\n\t\t}\n\t};\n\ttemplate<> struct _hash_32_or_64<1> {\n\t\tstatic inline std::size_t hash(std::uint64_t h)\n\t\t{\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xff51afd7ed558ccd;\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xc4ceb9fe1a85ec53;\n\t\t\treturn static_cast<std::size_t>(h ^ (h >> 33));\n\t\t}\n\t};\n\ttemplate<std::size_t size> struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> {  };\n\t\n\tstatic inline std::size_t hash_thread_id(thread_id_t id)\n\t{\n\t\tstatic_assert(sizeof(thread_id_t) <= 8, \"Expected a platform where thread IDs are at most 64-bit values\");\n\t\treturn hash_32_or_64<sizeof(thread_id_t)>::hash(id);\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline char* align_for(char* ptr)\n\t{\n\t\tconst std::size_t alignment = std::alignment_of<U>::value;\n\t\treturn ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;\n\t}\n}\n\n\ntemplate<typename T>\t\t// T should inherit ListItem or implement the same interface\nstruct ThreadLocal\n{\n\texplicit ThreadLocal(std::size_t initialHashSize)\n\t\t: initialHashEntries(initialHashSize)\n\t{\n\t\tassert(initialHashSize > 0 && (initialHashSize & (initialHashSize - 1)) == 0);\n\t\t\n\t\tresizeInProgress.clear();\n\t\tcurrentHashCount.store(0, std::memory_order_relaxed);\n\t\tauto hash = &initialHash;\n\t\thash->capacity = initialHashSize;\n\t\thash->entries = &initialHashEntries[0];\n\t\tfor (std::size_t i = 0; i != initialHashSize; ++i) {\n\t\t\tinitialHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t}\n\t\thash->prev = nullptr;\n\t\tcurrentHash.store(hash, std::memory_order_relaxed);\n\t}\n\t\n\t~ThreadLocal()\n\t{\n\t\t// Destroy items\n\t\tauto ptr = items.tail();\n\t\twhile (ptr != nullptr) {\n\t\t\tauto prev = static_cast<T*>(ptr->concurrentListPrev.load(std::memory_order_relaxed));\n\t\t\tptr->~T();\n\t\t\tcorealgos_allocator::free(ptr);\n\t\t\tptr = prev;\n\t\t}\n\t\t\n\t\t// Destroy hash tables\n\t\tauto hash = currentHash.load(std::memory_order_relaxed);\n\t\twhile (hash != nullptr) {\n\t\t\tauto prev = hash->prev;\n\t\t\tif (prev != nullptr) {\t\t// The last hash is part of this object and was not allocated dynamically\n\t\t\t\tfor (std::size_t i = 0; i != hash->capacity; ++i) {\n\t                hash->entries[i].~KeyValuePair();\n\t            }\n\t            hash->~InnerHash();\n\t\t\t\tcorealgos_allocator::free(hash);\n\t\t\t}\n\t\t\thash = prev;\n\t\t}\n\t}\n\t\n\t// Only fails (returns nullptr) if memory allocation fails\n\tT* get_or_create()\n\t{\n\t\t// Note that since the data is essentially thread-local (key is thread ID),\n\t\t// there's a reduced need for fences (memory ordering is already consistent\n\t\t// for any individual thread), except for the current table itself\n\t\t\n\t\t// Start by looking for the thread ID in the current and all previous hash tables.\n\t\t// If it's not found, it must not be in there yet, since this same thread would\n\t\t// have added it previously to one of the tables that we traversed.\n\t\t\n\t\t// Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table\n\t\t\n\t\tauto id = details::thread_id();\n\t\tauto hashedId = details::hash_thread_id(id);\n\t\t\n\t\tauto mainHash = currentHash.load(std::memory_order_acquire);\n\t\tfor (auto hash = mainHash; hash != nullptr; hash = hash->prev) {\n\t\t\t// Look for the id in this hash\n\t\t\tauto index = hashedId;\n\t\t\twhile (true) {\t\t// Not an infinite loop because at least one slot is free in the hash table\n\t\t\t\tindex &= hash->capacity - 1;\n\t\t\t\t\n\t\t\t\tauto probedKey = hash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\tif (probedKey == id) {\n\t\t\t\t\t// Found it! If we had to search several hashes deep, though, we should lazily add it\n\t\t\t\t\t// to the current main hash table to avoid the extended search next time.\n\t\t\t\t\t// Note there's guaranteed to be room in the current hash table since every subsequent\n\t\t\t\t\t// table implicitly reserves space for all previous tables (there's only one\n\t\t\t\t\t// currentHashCount).\n\t\t\t\t\tauto value = hash->entries[index].value;\n\t\t\t\t\tif (hash != mainHash) {\n\t\t\t\t\t\tindex = hashedId;\n\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\t\t\tprobedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\t\t\tauto expected = details::invalid_thread_id;\n\t\t\t\t\t\t\tif (probedKey == expected && mainHash->entries[index].key.compare_exchange_strong(expected, id, std::memory_order_relaxed)) {\n\t\t\t\t\t\t\t\tmainHash->entries[index].value = value;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn value;\n\t\t\t\t}\n\t\t\t\tif (probedKey == details::invalid_thread_id) {\n\t\t\t\t\tbreak;\t\t// Not in this hash table\n\t\t\t\t}\n\t\t        ++index;\n\t\t    }\n\t\t}\n\t\t\n\t\t// Insert!\n\t\tauto newCount = 1 + currentHashCount.fetch_add(1, std::memory_order_relaxed);\n\t\twhile (true) {\n\t\t\tif (newCount >= (mainHash->capacity >> 1) && !resizeInProgress.test_and_set(std::memory_order_acquire)) {\n\t\t\t\t// We've acquired the resize lock, try to allocate a bigger hash table.\n\t\t\t\t// Note the acquire fence synchronizes with the release fence at the end of this block, and hence when\n\t\t\t\t// we reload currentHash it must be the most recent version (it only gets changed within this\n\t\t\t\t// locked block).\n\t\t\t\tmainHash = currentHash.load(std::memory_order_acquire);\n\t\t\t\tauto newCapacity = mainHash->capacity << 1;\n\t\t\t\twhile (newCount >= (newCapacity >> 1)) {\n\t\t\t\t\tnewCapacity <<= 1;\n\t\t\t\t}\n\t\t\t\tauto raw = static_cast<char*>(corealgos_allocator::malloc(sizeof(InnerHash) + std::alignment_of<KeyValuePair>::value - 1 + sizeof(KeyValuePair) * newCapacity));\n\t\t\t\tif (raw == nullptr) {\n\t\t\t\t\t// Allocation failed\n\t\t\t\t\tcurrentHashCount.fetch_add(-1, std::memory_order_relaxed);\n\t\t\t\t\tresizeInProgress.clear(std::memory_order_relaxed);\n\t\t\t\t\treturn nullptr;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tauto newHash = new (raw) InnerHash;\n\t\t\t\tnewHash->capacity = newCapacity;\n\t\t\t\tnewHash->entries = reinterpret_cast<KeyValuePair*>(details::align_for<KeyValuePair>(raw + sizeof(InnerHash)));\n\t\t\t\tfor (std::size_t i = 0; i != newCapacity; ++i) {\n\t\t\t\t\tnew (newHash->entries + i) KeyValuePair;\n\t\t\t\t\tnewHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\tnewHash->prev = mainHash;\n\t\t\t\tcurrentHash.store(newHash, std::memory_order_release);\n\t\t\t\tresizeInProgress.clear(std::memory_order_release);\n\t\t\t\tmainHash = newHash;\n\t\t\t}\n\t\t\t\n\t\t\t// If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table\n\t\t\t// to finish being allocated by another thread (and if we just finished allocating above, the condition will\n\t\t\t// always be true)\n\t\t\tif (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) {\n\t\t\t\tauto element = (T*)corealgos_allocator::malloc(sizeof(T));\n\t\t\t\tif (element == nullptr) {\n\t\t\t\t\treturn nullptr;\n\t\t\t\t}\n\t\t\t\tnew (element) T();\n\t\t\t\titems.add(element);\t\t// Track items so they can be destructed later\n\t\t\t\t\n\t\t\t\tauto index = hashedId;\n\t\t\t\twhile (true) {\n\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\tauto probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\tauto expected = details::invalid_thread_id;\n\t\t\t\t\tif (probedKey == expected && mainHash->entries[index].key.compare_exchange_strong(expected, id, std::memory_order_relaxed)) {\n\t\t\t\t\t\tmainHash->entries[index].value = element;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t++index;\n\t\t\t\t}\n\t\t\t\treturn element;\n\t\t\t}\n\t\t\t\n\t\t\t// Hmm, the old hash is quite full and somebody else is busy allocating a new one.\n\t\t\t// We need to wait for the allocating thread to finish (if it succeeds, we add, if not,\n\t\t\t// we try to allocate ourselves).\n\t\t\tmainHash = currentHash.load(std::memory_order_acquire);\n\t\t}\n\t}\n\t\nprivate:\n\tstruct KeyValuePair\n\t{\n\t\tstd::atomic<details::thread_id_t> key;\n\t\tT* value;\t\t// No need for atomicity since it's only read by the thread that sets it in the first place\n\n\t\tKeyValuePair()\n\t\t{ }\n\n\t\tKeyValuePair(KeyValuePair const& other)\n\t\t\t: key(other.key.load()), value(other.value)\n\t\t{ }\n\n\t\tKeyValuePair& operator=(KeyValuePair const& other)\n\t\t{\n\t\t\tkey.store(other.key.load());\n\t\t\tvalue = other.value;\n\t\t\treturn *this;\n\t\t}\n\t};\n\t\n\tstruct InnerHash\n\t{\n\t\tstd::size_t capacity;\n\t\tKeyValuePair* entries;\n\t\tInnerHash* prev;\n\t};\n\t\n\tstd::atomic_flag resizeInProgress;\n\tstd::atomic<InnerHash*> currentHash;\n\tstd::atomic<std::size_t> currentHashCount;\t\t// Number of slots logically used\n\tInnerHash initialHash;\n\tstd::vector<KeyValuePair> initialHashEntries;\n\tConcurrentAddOnlyList<T> items;\n};\n\n\n\n\n\n////////////////////////////////////////////////////////////////////////////////\n// Lock-free free list\n////////////////////////////////////////////////////////////////////////////////\n\ntemplate <typename N>\nstruct FreeListNode\n{\n    FreeListNode() : freeListRefs(0), freeListNext(nullptr) { }\n\n    std::atomic<std::uint32_t> freeListRefs;\n    std::atomic<N*> freeListNext;\n\n\tFreeListNode(FreeListNode const& other)\n\t\t: freeListRefs(other.freeListRefs.load()), freeListNext(other.freeListNext.load())\n\t{ }\n\n\tFreeListNode& operator=(FreeListNode const& other)\n\t{\n\t\tfreeListRefs.store(other.freeListRefs.load());\n\t\tfreeListNext.store(other.freeListNext.load());\n\t\treturn *this;\n\t}\n};\n\n// A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention,\n// but simple and correct (assuming nodes are never freed until after the free list is destroyed),\n// and fairly speedy under low contention.\ntemplate<typename N>    // N must inherit FreeListNode or have the same fields (and initialization)\nstruct FreeList\n{\n    FreeList() : freeListHead(nullptr) { }\n\n    inline void add(N* node)\n    {\n        // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to\n        // set it using a fetch_add\n        if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {\n            // Oh look! We were the last ones referencing this node, and we know\n            // we want to add it to the free list, so let's do it!\n     \t    add_knowing_refcount_is_zero(node);\n        }\n    }\n\n    inline N* try_get()\n    {\n        auto head = freeListHead.load(std::memory_order_acquire);\n        while (head != nullptr) {\n            auto prevHead = head;\n            auto refs = head->freeListRefs.load(std::memory_order_relaxed);\n            if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1,\n                    std::memory_order_acquire, std::memory_order_relaxed)) {\n                head = freeListHead.load(std::memory_order_acquire);\n                continue;\n            }\n\n            // Good, reference count has been incremented (it wasn't at zero), which means\n            // we can read the next and not worry about it changing between now and the time\n            // we do the CAS\n            auto next = head->freeListNext.load(std::memory_order_relaxed);\n            if (freeListHead.compare_exchange_strong(head, next,\n                    std::memory_order_acquire, std::memory_order_relaxed)) {\n                // Yay, got the node. This means it was on the list, which means\n                // shouldBeOnFreeList must be false no matter the refcount (because\n                // nobody else knows it's been taken off yet, it can't have been put back on).\n           \t\tassert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);\n\n                // Decrease refcount twice, once for our ref, and once for the list's ref\n                head->freeListRefs.fetch_add(-2, std::memory_order_release);\n                return head;\n            }\n\n            // OK, the head must have changed on us, but we still need to decrease the refcount we\n            // increased.\n            // Note that we don't need to release any memory effects, but we do need to ensure that the reference\n\t\t\t// count decrement happens-after the CAS on the head.\n            refs = prevHead->freeListRefs.fetch_add(-1, std::memory_order_acq_rel);\n            if (refs == SHOULD_BE_ON_FREELIST + 1) {\n                add_knowing_refcount_is_zero(prevHead);\n            }\n        }\n\n        return nullptr;\n    }\n\n    // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)\n    N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }\n\nprivate:\n    inline void add_knowing_refcount_is_zero(N* node)\n    {\n        // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we\n        // run only one copy of this method per node at a time, i.e. the single thread case), then we\n        // know we can safely change the next pointer of the node; however, once the refcount is back\n        // above zero, then other threads could increase it (happens under heavy contention, when the\n        // refcount goes to zero in between a load and a refcount increment of a node in try_get, then\n        // back up to something non-zero, then the refcount increment is done by the other thread) --\n        // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave\n        // the add operation to the next thread who puts the refcount back at zero (which could be us,\n        // hence the loop).\n        auto head = freeListHead.load(std::memory_order_relaxed);\n        while (true) {\n            node->freeListNext.store(head, std::memory_order_relaxed);\n            node->freeListRefs.store(1, std::memory_order_release);\n            if (!freeListHead.compare_exchange_strong(head, node,\n                    std::memory_order_release, std::memory_order_relaxed)) {\n                // Hmm, the add failed, but we can only try again when the refcount goes back to zero\n                if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) {\n                    continue;\n                }\n            }\n            return;\n        }\n    }\n\nprivate:\n    static const std::uint32_t REFS_MASK = 0x7FFFFFFF;\n    static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;\n\t\n    // Implemented like a stack, but where node order doesn't matter (nodes are\n    // inserted out of order under contention)\n    std::atomic<N*> freeListHead;\n};\n\n\n\n////////////////////////////////////////////////////////////////////////////////\n// Lock-free (single-producer, multi-consumer) numeric-key hash map of sorts;\n// there are many conditions that must be met, i.e. items have to be inserted\n// in increasing order by key (wrap-around is OK), and items cannot be searched\n// for or removed unless they are known to be in the map in the first place.\n////////////////////////////////////////////////////////////////////////////////\n\ntemplate<typename TValue>\nstruct SPMCSequentialHashMap\n{\n\texplicit SPMCSequentialHashMap(std::size_t initialSize)\n\t\t: nextCapacity(initialSize), index(nullptr)\n\t{\n\t\tnew_index();\n\t}\n\t\n\t~SPMCSequentialHashMap()\n\t{\n\t\tauto ptr = index.load(std::memory_order_relaxed);\n\t\tif (ptr != nullptr) {\n\t\t\tfor (std::size_t i = 0; i != ptr->capacity; ++i) {\n\t\t\t\tptr->index[i]->~IndexEntry();\n\t\t\t}\n\t\t\tdo {\n\t\t\t\tauto prev = ptr->prev;\n\t\t\t\tptr->~IndexHeader();\n\t\t\t\tcorealgos_allocator::free(ptr);\n\t\t\t\tptr = prev;\n\t\t\t} while (ptr != nullptr);\n\t\t}\n\t}\n\t\n\t// Not thread safe. Only call from single producer thread.\n\t// Note: key must *not* be in hash already, and must be exactly\n\t// one larger than the previously inserted key value.\n\tvoid insert(std::uint64_t key, TValue* value)\n\t{\n\t\tIndexEntry* idxEntry;\n\t\tinsert_index_entry(idxEntry, key);\n\t\tidxEntry->value.store(value, std::memory_order_release);\n\t}\n\t\n\t// Thread-safe, but if somebody can remove the key while find() is\n\t// in progress, then any returned value is not guaranteed to correspond\n\t// to that key. This also applies if the key was not already present but\n\t// once was. Elements can be found in any order.\n\tTValue* find(std::uint64_t key)\n\t{\n\t\tauto idxEntry = get_entry_for_key(key);\n\t\tif (idxEntry == nullptr)\n\t\t\treturn nullptr;\n\t\treturn idxEntry->value.load(std::memory_order_acquire);\n\t}\n\t\n\t// Thread-safe, but if somebody else can remove the same key while remove()\n\t// is in progress, then any removed value is not guaranteed to correspond\n\t// to that key This also applies if the key was not already present but\n\t// once was. Elements can be removed in an order.\n\tTValue* remove(std::uint64_t key)\n\t{\n\t\tauto idxEntry = get_entry_for_key(key);\n\t\tif (idxEntry == nullptr)\n\t\t\treturn nullptr;\n\t\tTValue* val = nullptr;\n\t\twhile (!idxEntry->value.compare_exchange_weak(val, nullptr, std::memory_order_acquire, std::memory_order_relaxed))\n\t\t\tcontinue;\n\t\treturn val;\n\t}\n\t\nprivate:\n\tstruct IndexEntry\n\t{\n\t\tstd::atomic<std::uint64_t> key;\n\t\tstd::atomic<TValue*> value;\n\t};\n\t\n\tstruct IndexHeader\n\t{\n\t\tstd::size_t capacity;\n\t\tstd::atomic<std::size_t> tail;\n\t\tIndexEntry* entries;\n\t\tIndexEntry** index;\n\t\tIndexHeader* prev;\n\t};\n\t\n\tinline void insert_index_entry(IndexEntry*& idxEntry, std::uint64_t key)\n\t{\n\t\tauto localIndex = index.load(std::memory_order_relaxed);\t\t// We're the only writer thread, relaxed is OK\n\t\tauto newTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);\n\t\tidxEntry = localIndex->index[newTail];\n\t\tif (idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY ||\n\t\t\tidxEntry->value.load(std::memory_order_relaxed) == nullptr) {\n\t\t\t\n\t\t\tidxEntry->key.store(key, std::memory_order_relaxed);\n\t\t\tlocalIndex->tail.store(newTail, std::memory_order_release);\n\t\t\treturn;\n\t\t}\n\t\t\n\t\t// No room in the old index, try to allocate another one!\n\t\tnew_index();\n\t\tlocalIndex = index.load(std::memory_order_relaxed);\n\t\tnewTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);\n\t\tidxEntry = localIndex->index[newTail];\n\t\tassert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY);\n\t\tidxEntry->key.store(key, std::memory_order_relaxed);\n\t\tlocalIndex->tail.store(newTail, std::memory_order_release);\n\t}\n\t\n\tinline IndexEntry* get_entry_for_key(std::uint64_t key) const\n\t{\n\t\tauto localIndex = index.load(std::memory_order_acquire);\n\t\tauto tail = localIndex->tail.load(std::memory_order_acquire);\n\t\tauto tailBase = localIndex->index[tail]->key.load(std::memory_order_relaxed);\n\t\tif (tailBase == INVALID_KEY) {\n\t\t\treturn nullptr;\n\t\t}\n\t\tauto offset = static_cast<std::size_t>(key - tailBase);\n\t\tstd::size_t idx = (tail + offset) & (localIndex->capacity - 1);\n\t\tauto entry = localIndex->index[idx];\n\t\treturn entry->key.load(std::memory_order_relaxed) == key ? entry : nullptr;\n\t}\n\t\n\tbool new_index()\n\t{\n\t\tauto prev = index.load(std::memory_order_relaxed);\n\t\tstd::size_t prevCapacity = prev == nullptr ? 0 : prev->capacity;\n\t\tauto entryCount = prev == nullptr ? nextCapacity : prevCapacity;\n\t\tauto raw = static_cast<char*>(corealgos_allocator::malloc(\n\t\t\tsizeof(IndexHeader) +\n\t\t\tstd::alignment_of<IndexEntry>::value - 1 + sizeof(IndexEntry) * entryCount +\n\t\t\tstd::alignment_of<IndexEntry*>::value - 1 + sizeof(IndexEntry*) * nextCapacity));\n\t\tif (raw == nullptr) {\n\t\t\treturn false;\n\t\t}\n\t\t\n\t\tauto header = new (raw) IndexHeader;\n\t\tauto entries = reinterpret_cast<IndexEntry*>(details::align_for<IndexEntry>(raw + sizeof(IndexHeader)));\n\t\tauto idx = reinterpret_cast<IndexEntry**>(details::align_for<IndexEntry*>(reinterpret_cast<char*>(entries) + sizeof(IndexEntry) * entryCount));\n\t\tif (prev != nullptr) {\n\t\t\tauto prevTail = prev->tail.load(std::memory_order_relaxed);\n\t\t\tauto prevPos = prevTail;\n\t\t\tstd::size_t i = 0;\n\t\t\tdo {\n\t\t\t\tprevPos = (prevPos + 1) & (prev->capacity - 1);\n\t\t\t\tidx[i++] = prev->index[prevPos];\n\t\t\t} while (prevPos != prevTail);\n\t\t\tassert(i == prevCapacity);\n\t\t}\n\t\tfor (std::size_t i = 0; i != entryCount; ++i) {\n\t\t\tnew (entries + i) IndexEntry;\n\t\t\tentries[i].key.store(INVALID_KEY, std::memory_order_relaxed);\n\t\t\tentries[i].value.store(nullptr, std::memory_order_relaxed);\n\t\t\tidx[prevCapacity + i] = entries + i;\n\t\t}\n\t\theader->prev = prev;\n\t\theader->entries = entries;\n\t\theader->index = idx;\n\t\theader->capacity = nextCapacity;\n\t\theader->tail.store((prevCapacity - 1) & (nextCapacity - 1), std::memory_order_relaxed);\n\t\t\n\t\tindex.store(header, std::memory_order_release);\n\t\t\n\t\tnextCapacity <<= 1;\n\t\t\n\t\treturn true;\n\t}\n\t\t\n\tprivate:\n\t\tstd::size_t nextCapacity;\n\t\tstd::atomic<IndexHeader*> index;\n\t\t\n\t\tstatic const std::uint64_t INVALID_KEY = ~(std::uint64_t)0;\n};\n\n} }\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/fuzztests/fuzztests.cpp",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n// Fuzz (random) tests for moodycamel::ConcurrentQueue\n\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <cstdint>\n#include <ctime>\n#include <cassert>\n#include <string>\n#include <random>\n#include <atomic>\n#include <fstream>\n#include <iomanip>\n#include <vector>\n#include <csignal>\n#include <mutex>\n#include <exception>\n#include <cctype>\n\n#ifdef _WIN32\n#define WIN32_LEAN_AND_MEAN\n#ifndef NOMINMAX\n#define NOMINMAX\n#endif\n#include <windows.h>\n#endif\n\n#include \"../../concurrentqueue.h\"\n#include \"../common/simplethread.h\"\n#include \"../common/systemtime.h\"\n#include \"../corealgos.h\"\n\nvoid failHook()\n{\n\t(void)1;\t\t// Attach debuggers here\n}\n\n#define _STR(x) #x\n#define STR(x) _STR(x)\n#define ASSERT_OR_FAIL_THREAD(cond) if (!(cond)) { const char* n = nullptr; failReason.compare_exchange_strong(n, \"assertion failed on line \" STR(__LINE__) \": \" #cond, std::memory_order_relaxed, std::memory_order_relaxed); \\\n                                                   failed.store(true, std::memory_order_relaxed); failHook(); return; }\n#define FAIL_IF_THREAD_TIMEOUT() if (getTimeDelta(startTime) > 60000) { const char* n = nullptr; failReason.compare_exchange_strong(n, \"test timed out (detected on line \" STR(__LINE__) \")\", std::memory_order_relaxed, std::memory_order_relaxed); \\\n                                                                        failed.store(true, std::memory_order_relaxed); failHook(); return; }\n#define ASSERT_OR_FAIL(cond) if (!(cond)) { out_failReason = \"assertion failed on line \" STR(__LINE__) \": \" #cond; result = false; failHook(); break; }\n\n\nusing namespace moodycamel;\n\n\ntypedef std::minstd_rand RNG_t;\n\nenum test_type {\n\tmultithread_produce,\n\tmultithread_consume,\n\tmultithread_produce_and_consume,\n\tcompletely_random,\n\t\n\t// Core algo tests\n\tcore_add_only_list,\n\tcore_thread_local,\n\t\n\tTEST_TYPE_COUNT\n};\n\nstd::uint64_t test_count[TEST_TYPE_COUNT] = { 0 };\nstd::uint64_t fail_count[TEST_TYPE_COUNT] = { 0 };\nconst char* test_names[TEST_TYPE_COUNT] = {\n\t\"multithread_produce\",\n\t\"multithread_consume\",\n\t\"multithread_produce_and_consume\",\n\t\"completely_random\",\n\t\"core_add_only_list\",\n\t\"core_thread_local\",\n};\n\nconst int SINGLE_SEED_ITERATIONS = 100;\nconst char* LOG_FILE = \"fuzztests.log\";\n\n\nstruct FuzzTraits : public ConcurrentQueueDefaultTraits\n{\n\tstatic const size_t BLOCK_SIZE = 8;\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = 4;\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = 4;\n\tstatic const size_t INITIAL_IMPLCICIT_PRODUCER_HASH_SIZE = 1;\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 24;\n};\n\nstruct TestListItem : corealgos::ListItem\n{\n\tint value;\n\t\n\tTestListItem() : value(0) { }\n\texplicit TestListItem(int value) : value(value) { }\n\t\n\tinline TestListItem* prev(std::memory_order order = std::memory_order_relaxed) const\n\t{\n\t\treturn static_cast<TestListItem*>(concurrentListPrev.load(order));\n\t}\n};\n\n\nbool run_test(uint64_t seed, int iterations, test_type& out_type, const char*& out_failReason)\n{\n\tbool result = true;\n\tRNG_t baseRng((unsigned int)seed);\n\t\n\tstd::uniform_int_distribution<int> randTest(0, TEST_TYPE_COUNT - 1);\n\tstd::uniform_int_distribution<int> randInitialSize(0, 70);\n\t\n\tauto type = static_cast<test_type>(randTest(baseRng));\n\tout_type = type;\n\tfor (int iteration = 0; iteration != iterations; ++iteration) {\n\t\tRNG_t rng(baseRng);\n\t\t\n\t\tstd::atomic<bool> failed(false);\n\t\tstd::atomic<const char*> failReason;\n\t\tfailReason = nullptr;\n\t\tSystemTime startTime = getSystemTime();\n\t\t\n\t\tswitch (type) {\n\t\tcase multithread_produce:\n\t\t{\n\t\t\tconst int countIncrement = std::uniform_int_distribution<int>(1, 1000)(rng);\n\t\t\tint count = std::uniform_int_distribution<int>(0, 500)(rng) * countIncrement;\n\t\t\tint prodCount = std::uniform_int_distribution<int>(0, 6)(rng);\n\t\t\tbool useConsumerToken = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\t\n\t\t\tConcurrentQueue<int, FuzzTraits> q(randInitialSize(rng));\n\t\t\t\n\t\t\tstd::vector<SimpleThread> producers(prodCount);\n\t\t\tstd::vector<bool> useProducerToken(prodCount);\n\t\t\tfor (int i = 0; i != prodCount; ++i) {\n\t\t\t\tuseProducerToken[i] = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\t\tproducers[i] = SimpleThread([&](int i) {\n\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\tfor (int j = 0; j != count && !failed.load(std::memory_order_relaxed); j += countIncrement) {\n\t\t\t\t\t\tif (useProducerToken[i]) {\n\t\t\t\t\t\t\tfor (int k = 0; k != countIncrement; ++k) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(t, (i << 24) | (k + j)));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (int k = 0; k != countIncrement; ++k) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue((i << 24) | (k + j)));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tSimpleThread consumer([&]() {\n\t\t\t\tint item;\n\t\t\t\tstd::vector<int> lastItems(prodCount);\n\t\t\t\tConsumerToken t(q);\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != prodCount; ++i) {\n\t\t\t\t\tlastItems[i] = -1;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != count * prodCount && !failed.load(std::memory_order_relaxed);) {\n\t\t\t\t\tif (useConsumerToken) {\n\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\t\t\t\t++i;\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) < count);\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) == lastItems[item >> 24] + 1);\n\t\t\t\t\t\t\t\tlastItems[item >> 24] = (item & 0xFFFFFF);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\t++i;\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) < count);\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) == lastItems[item >> 24] + 1);\n\t\t\t\t\t\t\t\tlastItems[item >> 24] = (item & 0xFFFFFF);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t}\n\t\t\t});\n\t\t\t\n\t\t\tfor (int i = 0; i != prodCount; ++i) {\n\t\t\t\tproducers[i].join();\n\t\t\t}\n\t\t\tconsumer.join();\n\t\t\t\n\t\t\tif (failed.load(std::memory_order_relaxed)) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t\tbreak;\n\t\t}\n\t\tcase multithread_consume:\n\t\t{\n\t\t\tconst int countIncrement = std::uniform_int_distribution<int>(1, 1000)(rng);\n\t\t\tint count = std::uniform_int_distribution<int>(0, 500)(rng) * countIncrement;\n\t\t\tint consCount = std::uniform_int_distribution<int>(0, 6)(rng);\n\t\t\tbool useProducerToken = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\tstd::atomic<bool> producerDone(false);\n\t\t\t\n\t\t\tConcurrentQueue<int, FuzzTraits> q(randInitialSize(rng));\n\t\t\t\n\t\t\tstd::vector<SimpleThread> consumers(consCount);\n\t\t\tstd::vector<bool> useConsumerToken(consCount);\n\t\t\tfor (int i = 0; i != consCount; ++i) {\n\t\t\t\tuseConsumerToken[i] = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\t\tconsumers[i] = SimpleThread([&](int i) {\n\t\t\t\t\tint item, lastItem = -1;\n\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\t\n\t\t\t\t\tbool doneConsuming = false;\n\t\t\t\t\twhile (!doneConsuming && !failed.load(std::memory_order_relaxed)) {\n\t\t\t\t\t\tauto producerDoneLocal = producerDone.load(std::memory_order_acquire);\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (useConsumerToken[i]) {\n\t\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(item >= 0 && item < count * consCount && item > lastItem);\n\t\t\t\t\t\t\t\t\tlastItem = item;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse if (producerDoneLocal) {\n\t\t\t\t\t\t\t\t\tdoneConsuming = true;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(item >= 0 && item < count * consCount && item > lastItem);\n\t\t\t\t\t\t\t\t\tlastItem = item;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse if (producerDoneLocal)  {\n\t\t\t\t\t\t\t\t\tdoneConsuming = true;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tSimpleThread producer([&]() {\n\t\t\t\tProducerToken t(q);\n\t\t\t\tfor (int i = 0; i != count * consCount && !failed.load(std::memory_order_relaxed); i += countIncrement) {\n\t\t\t\t\tif (useProducerToken) {\n\t\t\t\t\t\tfor (int j = 0; j != countIncrement; ++j) {\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(t, i + j));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (int j = 0; j != countIncrement; ++j) {\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(i + j));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t}\n\t\t\t\tproducerDone.store(true, std::memory_order_release);\n\t\t\t});\n\t\t\t\n\t\t\tproducer.join();\n\t\t\tfor (int i = 0; i != consCount; ++i) {\n\t\t\t\tconsumers[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tif (failed.load(std::memory_order_relaxed)) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(consCount == 0 || !q.try_dequeue(item));\n\t\t\t\n\t\t\tbreak;\n\t\t}\n\t\tcase multithread_produce_and_consume:\n\t\t{\n\t\t\tconst int countIncrement = std::uniform_int_distribution<int>(1, 1000)(rng);\n\t\t\tint count = std::uniform_int_distribution<int>(0, 500)(rng) * countIncrement;\n\t\t\tint prodCount = std::uniform_int_distribution<int>(0, 6)(rng);\n\t\t\tint consCount = std::uniform_int_distribution<int>(0, 6)(rng);\n\t\t\tstd::atomic<bool> producersDone(false);\n\t\t\t\n\t\t\tConcurrentQueue<int, FuzzTraits> q(randInitialSize(rng));\n\t\t\t\n\t\t\tstd::vector<SimpleThread> producers(prodCount);\n\t\t\tstd::vector<bool> useProducerToken(prodCount);\n\t\t\tfor (int i = 0; i != prodCount; ++i) {\n\t\t\t\tuseProducerToken[i] = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\t\tproducers[i] = SimpleThread([&](int i) {\n\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\tfor (int j = 0; j != count && !failed.load(std::memory_order_relaxed); j += countIncrement) {\n\t\t\t\t\t\tif (useProducerToken[i]) {\n\t\t\t\t\t\t\tfor (int k = 0; k != countIncrement; ++k) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(t, (i << 24) | (k + j)));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (int k = 0; k != countIncrement; ++k) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue((i << 24) | (k + j)));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tstd::vector<SimpleThread> consumers(consCount);\n\t\t\tstd::vector<bool> useConsumerToken(consCount);\n\t\t\tfor (int i = 0; i != consCount; ++i) {\n\t\t\t\tuseConsumerToken[i] = static_cast<bool>(std::uniform_int_distribution<int>(0, 1)(rng));\n\t\t\t\tconsumers[i] = SimpleThread([&](int i) {\n\t\t\t\t\tint item;\n\t\t\t\t\tstd::vector<int> lastItems(prodCount);\n\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\t\n\t\t\t\t\tfor (int j = 0; j != prodCount; ++j) {\n\t\t\t\t\t\tlastItems[j] = -1;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tbool doneConsuming = false;\n\t\t\t\t\twhile (!doneConsuming && !failed.load(std::memory_order_relaxed)) {\n\t\t\t\t\t\tauto producersDoneLocal = producersDone.load(std::memory_order_acquire);\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (useConsumerToken[i]) {\n\t\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) < count);\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);\n\t\t\t\t\t\t\t\t\tlastItems[item >> 24] = item & 0xFFFFFF;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse if (producersDoneLocal) {\n\t\t\t\t\t\t\t\t\tdoneConsuming = true;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tfor (int j = 0; j != 10000; ++j) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) < count);\n\t\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);\n\t\t\t\t\t\t\t\t\tlastItems[item >> 24] = item & 0xFFFFFF;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse if (producersDoneLocal)  {\n\t\t\t\t\t\t\t\t\tdoneConsuming = true;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != prodCount; ++i) {\n\t\t\t\tproducers[i].join();\n\t\t\t}\n\t\t\tproducersDone.store(true, std::memory_order_release);\n\t\t\tfor (int i = 0; i != consCount; ++i) {\n\t\t\t\tconsumers[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tif (failed.load(std::memory_order_relaxed)) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(consCount == 0 || !q.try_dequeue(item));\n\t\t\t\n\t\t\tbreak;\n\t\t}\n\t\tcase completely_random:\n\t\t{\n\t\t\tint threadCount = std::uniform_int_distribution<int>(0, 32)(rng);\n\t\t\t\n\t\t\tConcurrentQueue<int, FuzzTraits> q(randInitialSize(rng));\n\t\t\t\n\t\t\tstd::vector<SimpleThread> threads(threadCount);\n\t\t\tstd::vector<unsigned int> seeds(threadCount);\n\t\t\tstd::vector<unsigned int> opCounts(threadCount);\n\t\t\tunsigned int largestOpCount = 0;\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\topCounts[i] = std::uniform_int_distribution<unsigned int>(0, 500000)(rng);\n\t\t\t\tif (opCounts[i] > largestOpCount) {\n\t\t\t\t\tlargestOpCount = opCounts[i];\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Note: If you're wondering where all the memory goes, it's mostly here!\n\t\t\tstd::vector<unsigned int> itemStates(largestOpCount * threadCount * 2);\n\t\t\tfor (std::size_t j = 0; j != itemStates.size(); ++j) {\n\t\t\t\titemStates[j] = 0;\n\t\t\t}\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tseeds[i] = std::uniform_int_distribution<unsigned int>(0, 0xFFFFFFFF)(rng);\n\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\tRNG_t rng((unsigned int)seeds[i]);\n\t\t\t\t\tConsumerToken ct(q);\n\t\t\t\t\tProducerToken pt(q);\n\t\t\t\t\tint item;\n\t\t\t\t\tint opCount = opCounts[i];\n\t\t\t\t\tstd::vector<int> lastItems(threadCount * 2);\t\t// * 2 because there's two producer queues per thread (one implicit, one explicit)\n\t\t\t\t\tfor (int j = 0; j != threadCount * 2; ++j) {\n\t\t\t\t\t\tlastItems[j] = -1;\n\t\t\t\t\t}\n\t\t\t\t\tfor (int j = 0; j < opCount && !failed.load(std::memory_order_relaxed); ++j) {\n\t\t\t\t\t\tint op = std::uniform_int_distribution<int>(0, 7)(rng);\n\t\t\t\t\t\tunsigned int* state;\n\t\t\t\t\t\tswitch (op) {\n\t\t\t\t\t\tcase 0:\n\t\t\t\t\t\t\tstate = &itemStates[(i * 2) * largestOpCount + j];\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 0);\n\t\t\t\t\t\t\t*state = 1;\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(pt, ((i * 2) << 24) | j));\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase 1:\n\t\t\t\t\t\t\tstate = &itemStates[(i * 2 + 1) * largestOpCount + j];\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 0);\n\t\t\t\t\t\t\t*state = 1;\n\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue(((i * 2 + 1) << 24) | j));\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase 2:\n\t\t\t\t\t\t\tif (q.try_dequeue(ct, item)) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) >= 0 && (item & 0xFFFFFF) < (int)largestOpCount);\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);\n\t\t\t\t\t\t\t\tlastItems[item >> 24] = item & 0xFFFFFF;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tstate = &itemStates[(item >> 24) * largestOpCount + (item & 0xFFFFFF)];\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 1);\n\t\t\t\t\t\t\t\t*state = 2;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\tcase 3:\n\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) >= 0 && (item & 0xFFFFFF) < (int)largestOpCount);\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);\n\t\t\t\t\t\t\t\tlastItems[item >> 24] = item & 0xFFFFFF;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tstate = &itemStates[(item >> 24) * largestOpCount + (item & 0xFFFFFF)];\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 1);\n\t\t\t\t\t\t\t\t*state = 2;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tcase 4:\n\t\t\t\t\t\tcase 5: {\n\t\t\t\t\t\t\tstd::vector<int> bulkData(std::min(opCount - j, std::uniform_int_distribution<int>(0, 1024)(rng)));\n\t\t\t\t\t\t\tfor (std::size_t k = 0; k != bulkData.size(); ++k) {\n\t\t\t\t\t\t\t\tstate = &itemStates[(i * 2 + op - 4) * largestOpCount + j + k];\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 0);\n\t\t\t\t\t\t\t\t*state = 1;\n\t\t\t\t\t\t\t\tbulkData[k] = ((i * 2 + op - 4) << 24) | (j + (int)k);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (op == 4) {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue_bulk(pt, bulkData.begin(), bulkData.size()));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q.enqueue_bulk(bulkData.begin(), bulkData.size()));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tj += (int)bulkData.size() - 1;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tcase 6:\n\t\t\t\t\t\tcase 7: {\n\t\t\t\t\t\t\tstd::vector<int> bulkData(std::min(opCount - j, std::uniform_int_distribution<int>(0, 1024)(rng)));\n\t\t\t\t\t\t\tstd::size_t count = 0;\n\t\t\t\t\t\t\tif (op == 6) {\n\t\t\t\t\t\t\t\tcount = q.try_dequeue_bulk(ct, bulkData.begin(), bulkData.size());\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tcount = q.try_dequeue_bulk(bulkData.begin(), bulkData.size());\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor (std::size_t k = 0; k != count; ++k) {\n\t\t\t\t\t\t\t\tauto item = bulkData[k];\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) >= 0 && (item & 0xFFFFFF) < (int)largestOpCount);\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);\n\t\t\t\t\t\t\t\tlastItems[item >> 24] = item & 0xFFFFFF;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tstate = &itemStates[(item >> 24) * largestOpCount + (item & 0xFFFFFF)];\n\t\t\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(*state == 1);\n\t\t\t\t\t\t\t\t*state = 2;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (count > 0) {\n\t\t\t\t\t\t\t\tj += (int)count - 1;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tassert(false);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n#if MCDBGQ_TRACKMEM\n\t\t\tauto stats = q.getMemStats();\t\t// Make available under debugger\n\t\t\t((void)stats);\n#endif\t\t\n\t\t\t\n\t\t\tint item;\n\t\t\twhile (q.try_dequeue(item)) {\n\t\t\t\tunsigned int* state = &itemStates[(item >> 24) * largestOpCount + (item & 0xFFFFFF)];\n\t\t\t\tASSERT_OR_FAIL(*state == 1);\n\t\t\t\t*state = 2;\n\t\t\t}\n\t\t\tfor (std::size_t j = 0; j != itemStates.size(); ++j) {\n\t\t\t\tASSERT_OR_FAIL(itemStates[j] == 0 || itemStates[j] == 2);\n\t\t\t}\n\t\t\t\n\t\t\tif (failed.load(std::memory_order_relaxed)) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t\tcase core_add_only_list:\n\t\t{\n\t\t\tint threadCount = std::uniform_int_distribution<int>(0, 48)(rng);\n\t\t\tstd::vector<SimpleThread> threads(threadCount);\n\t\t\tstd::vector<int> opCounts(threadCount);\n\t\t\t\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\topCounts[i] = std::uniform_int_distribution<int>(0, 500000)(rng);\n\t\t\t}\n\t\t\t\n\t\t\tstd::size_t expectedMemUsage = 0;\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\texpectedMemUsage += opCounts[i] * sizeof(TestListItem);\n\t\t\t}\n\t\t\t\n\t\t\tcorealgos::ConcurrentAddOnlyList<TestListItem> list;\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&](int tid) {\n\t\t\t\t\tauto temp = expectedMemUsage;\n\t\t\t\t\t((void)temp);\n\t\t\t\t\t\n\t\t\t\t\tint opCount = opCounts[tid];\n\t\t\t\t\tfor (int j = 0; j != opCount; ++j) {\n\t\t\t\t\t\tlist.add(new TestListItem((tid << 24) | j));\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\tstd::vector<int> lastItems(threadCount);\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tlastItems[i] = opCounts[i];\n\t\t\t}\n\t\t\tauto tail = list.tail();\n\t\t\twhile (tail != nullptr) {\n\t\t\t\tauto tid = tail->value >> 24;\n\t\t\t\tASSERT_OR_FAIL(lastItems[tid] - 1 == (tail->value & 0xFFFFFF));\n\t\t\t\t--lastItems[tid];\n\t\t\t\tauto next = tail->prev();\n\t\t\t\tdelete tail;\n\t\t\t\ttail = next;\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t\tcase core_thread_local:\n\t\t{\n\t\t\tint threadCount = std::uniform_int_distribution<int>(32, 256)(rng);\n\t\t\tstd::vector<SimpleThread> threads(threadCount);\n\t\t\tstd::vector<int> opCounts(threadCount);\n\t\t\tstd::vector<int*> localData(threadCount);\n\t\t\t\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\topCounts[i] = std::uniform_int_distribution<int>(10000, 250000)(rng);\n\t\t\t}\n\t\t\t\n\t\t\tcorealgos::ThreadLocal<TestListItem> tls(1);\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&](int tid) {\n\t\t\t\t\tauto p = tls.get_or_create();\n\t\t\t\t\tASSERT_OR_FAIL_THREAD(p->value == 0);\n\t\t\t\t\tp->value = tid;\n\t\t\t\t\tlocalData[tid] = &p->value;\n\t\t\t\t\t\n\t\t\t\t\tint opCount = opCounts[tid];\n\t\t\t\t\tfor (int j = 0; j != opCount; ++j) {\n\t\t\t\t\t\tauto q = tls.get_or_create();\n\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q == p);\n\t\t\t\t\t\tASSERT_OR_FAIL_THREAD(q->value == tid);\n\t\t\t\t\t\tFAIL_IF_THREAD_TIMEOUT();\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\tfor (int i = 0; i != threadCount; ++i) {\n\t\t\t\tASSERT_OR_FAIL(localData[i] != nullptr);\n\t\t\t\tASSERT_OR_FAIL(*localData[i] == i);\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t\tdefault:\n\t\t\tassert(false);\n\t\t}\n\t\t\n\t\t++test_count[type];\n\t\tif (failed.load(std::memory_order_relaxed)) {\n\t\t\tout_failReason = failReason.load(std::memory_order_relaxed);\n\t\t\tresult = false;\n\t\t}\n\t\tif (!result) {\n\t\t\t++fail_count[type];\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn result;\n}\n\n\nstatic const char* timestamp()\n{\n\tstatic char buf[32];\n\ttime_t time = std::time(NULL);\n\tstrcpy(buf, std::asctime(std::localtime(&time)));\n\tbuf[strlen(buf) - 1] = '\\0';\t// Remove trailing newline\n\treturn buf;\n}\n\nextern \"C\" { typedef void (*signal_handler_t)(int); }\nstatic std::atomic<std::uint64_t> g_seed(0);\nstatic std::atomic_flag reported_signal_error = ATOMIC_FLAG_INIT;\nstatic std::atomic<signal_handler_t> g_prev_sigsegv(nullptr);\nstatic std::atomic<signal_handler_t> g_prev_sigabrt(nullptr);\nstatic std::mutex g_signal_handler_mutex;\n\nvoid on_signal(int signal)\n{\n\tif (reported_signal_error.test_and_set()) {\n\t\treturn;\n\t}\n\t\n\tstd::unique_lock<std::mutex> lock(g_signal_handler_mutex);\n\tauto seed = g_seed.load(std::memory_order_acquire);\n\t\n\t// Technically undefined behaviour to use stdlib functions,\n\t// but oh well\n\tconst char* error = signal == SIGABRT ?\n\t\t\"Abort detected (assertion failed?)\" :\n\t\t\"Segmentation fault detected!\";\n\t\n\t{\n\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\tfout << \"*** \" << error << \"\\n      Seed: \" << std::hex << seed << std::endl;\n\t}\n\tstd::printf(\"*** %s\\n      Seed: %08x%08x\\n\", error, (uint32_t)(seed >> 32), (uint32_t)(seed));\n\tstd::fflush(stdout);\n}\n\nextern \"C\" void signal_handler(int signal)\n{\n\ton_signal(signal);\n\tif (signal_handler_t handler_fn = g_prev_sigsegv.load(std::memory_order_relaxed)) {\n\t\thandler_fn(signal);\n\t}\n\telse {\n\t\tstd::exit(signal);\n\t}\n}\n\n#ifdef _WIN32\nLONG CALLBACK se_handler(PEXCEPTION_POINTERS info)\n{\n\tif (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {\n\t\ton_signal(SIGSEGV);\n\t}\n\treturn EXCEPTION_CONTINUE_SEARCH;\n}\n#endif\n\nint main(int argc, char** argv)\n{\n\tbool singleSeed = false;\n\tuint64_t seed = 0;\n\t\n\t// Disable buffering (so that when run in, e.g., Sublime Text, the output appears as it is written)\n\tstd::setvbuf(stdout, nullptr, _IONBF, 0);\n\t\n\t// Isolate the executable name\n\tstd::string progName = argv[0];\n\tauto slash = progName.find_last_of(\"/\\\\\");\n\tif (slash != std::string::npos) {\n\t\tprogName = progName.substr(slash + 1);\n\t}\n\t\n\t// Parse command line options\n\tif (argc > 1) {\n\t\tbool printHelp = false;\n\t\tbool error = false;\n\t\tfor (int i = 1; i < argc; ++i) {\n\t\t\tif (std::strcmp(argv[i], \"--help\") == 0) {\n\t\t\t\tprintHelp = true;\n\t\t\t}\n\t\t\telse if (std::strcmp(argv[i], \"--seed\") == 0) {\n\t\t\t\tif (i + 1 == argc || argv[i + 1][0] == '-') {\n\t\t\t\t\tstd::printf(\"Expected seed number argument for --seed option.\\n\");\n\t\t\t\t\terror = true;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t++i;\n\t\t\t\tseed = 0;\n\t\t\t\t// hex\n\t\t\t\tfor (int j = 0; argv[i][j] != '\\0'; ++j) {\n\t\t\t\t\tchar ch = static_cast<char>(std::tolower(argv[i][j]));\n\t\t\t\t\tif (j == 1 && seed == 0 && ch == 'x') {\n\t\t\t\t\t\tcontinue;\t// Skip 0x, if any\n\t\t\t\t\t}\n\t\t\t\t\telse if (ch >= 'a' && ch <= 'f') {\n\t\t\t\t\t\tseed = (seed << 4) | (10 + ch - 'a');\n\t\t\t\t\t}\n\t\t\t\t\telse if (ch >= '0' && ch <= '9') {\n\t\t\t\t\t\tseed = (seed << 4) | (ch - '0');\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tstd::printf(\"Expected hex seed argument, found '%s' instead\\n\", argv[i]);\n\t\t\t\t\t\terror = true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsingleSeed = true;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tstd::printf(\"Unrecognized option '%s'.\\n\\n\", argv[i]);\n\t\t\t\terror = true;\n\t\t\t}\n\t\t}\n\t\t\n\t\tif (error || printHelp) {\n\t\t\tstd::printf(\"%s\\n    Description: Runs fuzz tests (randomized stability tests) for moodycamel::ConcurrentQueue\\n\", progName.c_str());\n\t\t\tstd::printf(\"    An infinite series of random tests are run, each with a different seed.\\nIf a test fails, the seed for that test is reported.\\n\");\n\t\t\tstd::printf(\"    --help        Prints this help blurb\\n\");\n\t\t\tstd::printf(\"    --seed N      Runs one test with the given seed\\n\");\n\t\t\treturn error ? -1 : 0;\n\t\t}\n\t}\n\t\n\t\n\t{\n\t\tbool logExists = true;\n\t\t{\n\t\t\tstd::ifstream fin(LOG_FILE);\n\t\t\tif (!fin) {\n\t\t\t\tlogExists = false;\n\t\t\t}\n\t\t}\n\t\t\n\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\tif (logExists) {\n\t\t\tfout << \"\\n\\n\";\n\t\t}\n\t\tif (singleSeed) {\n\t\t\tstd::printf(\"Running %d iterations of single test with seed %08x%08x.\\n\\n\", SINGLE_SEED_ITERATIONS, (uint32_t)(seed >> 32), (uint32_t)(seed));\n\t\t\t\n\t\t\tfout << \"--- New run (\" << timestamp() << \"): Executing \" << SINGLE_SEED_ITERATIONS << \" iterations of a single test with seed \" << std::hex << seed << \" ---\" << std::endl;\n\t\t}\n\t\telse {\n\t\t\tstd::printf(\"Running random fuzz tests for moodycamel::ConcurrentQueue.\\n\");\n\t\t\tstd::printf(\"Press CTRL+C to exit.\\n\");\n\t\t\tstd::printf(\"(Run %s --help for options.)\\n\\n\", progName.c_str());\n\t\t\t\n\t\t\tfout << \"--- New run (\" << timestamp() << \"): Executing random fuzz tests ---\" << std::endl;\n\t\t}\t\t\n\t}\n\t\n\tint result = 0;\n\ttest_type test;\n\tconst char* failReason;\n\tif (singleSeed) {\n\t\tif (!run_test(seed, SINGLE_SEED_ITERATIONS, test, failReason)) {\n\t\t\tresult = 1;\n\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\tfout << test_names[test] << \" failed: \" << failReason << std::endl;\n\t\t\tstd::printf(\"    %s failed: %s\\n\", test_names[test], failReason);\n\t\t}\n\t\telse {\n\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\tfout << test_names[test] << \" succeeded!\" << std::endl;\n\t\t\tstd::printf(\"    %s succeeded!\\n\", test_names[test]);\n\t\t}\n\t}\n\telse {\n#ifdef _WIN32\n\t\tAddVectoredExceptionHandler(1 /* first? */, &se_handler);\n#endif\n\t\t\n\t\tuint32_t iteration = 0;\n\t\twhile (true) {\n\t\t\tseed = (static_cast<uint64_t>(std::time(NULL)) << 32) | iteration++;\n\t\t\t// MurmurHash3 64-bit finalizer\n\t\t\tseed ^= seed >> 33;\n\t\t\tseed *= 0xff51afd7ed558ccd;\n\t\t\tseed ^= seed >> 33;\n\t\t\tseed *= 0xc4ceb9fe1a85ec53;\n\t\t\t\n\t\t\tg_seed.store(seed, std::memory_order_release);\n\t\t\tstd::signal(SIGSEGV, signal_handler);\n\t\t\tstd::signal(SIGABRT, signal_handler);\n\t\t\t\n\t\t\tint result;\n\t\t\ttry {\n\t\t\t\tresult = run_test(seed, 2, test, failReason);\n\t\t\t}\n\t\t\tcatch (std::exception const& e) {\n\t\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\t\tfout << \"*** Exception thrown: \" << e.what() << \"\\n      Seed: \" << std::hex << seed << \"\\n      Test: \" << test_names[test] << std::endl;\n\t\t\t\tstd::printf(\"*** Exception thrown: %s\\n      Seed: %08x%08x\\n      Test: %s\\n\\n\", e.what(), (uint32_t)(seed >> 32), (uint32_t)(seed), test_names[test]);\n\t\t\t\tstd::exit(2);\t\t// There shouldn't be any exceptions!\n\t\t\t}\n\t\t\tcatch (...) {\n\t\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\t\tfout << \"*** Unknown exception thrown!\\n      Seed: \" << std::hex << seed << \"\\n      Test: \" << test_names[test] << std::endl;\n\t\t\t\tstd::printf(\"*** Unknown exception thrown!\\n      Seed: %08x%08x\\n      Test: %s\\n\\n\", (uint32_t)(seed >> 32), (uint32_t)(seed), test_names[test]);\n\t\t\t\tstd::exit(2);\n\t\t\t}\n\t\t\t\n\t\t\tstd::signal(SIGSEGV, SIG_DFL);\n\t\t\tstd::signal(SIGABRT, SIG_DFL);\n\t\t\t\n\t\t\tif (!result) {\n\t\t\t\tresult = 1;\n\t\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\t\tfout << \"*** Failure detected!\\n      Seed: \" << std::hex << seed << \"\\n      Test: \" << test_names[test] << \"\\n      Reason: \" << failReason << std::endl;\n\t\t\t\tstd::printf(\"*** Failure detected!\\n      Seed: %08x%08x\\n      Test: %s\\n      Reason: %s\\n\", (uint32_t)(seed >> 32), (uint32_t)(seed), test_names[test], failReason);\n\t\t\t}\n\t\t\t\n\t\t\tif ((iteration & 31) == 0) {\n\t\t\t\tstd::uint64_t total = 0;\n\t\t\t\t\n\t\t\t\tchar breakdown[128 * TEST_TYPE_COUNT];\n\t\t\t\tchar* ptr = breakdown;\n\t\t\t\tfor (int i = 0; i != TEST_TYPE_COUNT; ++i) {\n\t\t\t\t\tstd::sprintf(ptr, \"    %s: %llu successful, %llu failed\\n\", test_names[i], (unsigned long long)(test_count[i] - fail_count[i]), (unsigned long long)fail_count[i]);\n\t\t\t\t\tptr += std::strlen(ptr);\n\t\t\t\t\ttotal += test_count[i];\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tstd::ofstream fout(LOG_FILE, std::ios::app);\n\t\t\t\tfout << \"Executed \" << total << \" tests so far:\\n\" << breakdown;\n\t\t\t\tstd::printf(\"Executed %llu tests so far:\\n%s\", (unsigned long long)total, breakdown);\n\t\t\t}\n\t\t}\n\t}\n\t\n\treturn result;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/freelist.cpp",
    "content": "// ©2014 Cameron Desrochers\n\n#include \"relacy/relacy/relacy_std.hpp\"\n\n\ntemplate <typename N>\nstruct FreeListNode\n{\n    FreeListNode() : freeListRefs(0), freeListNext(nullptr) { }\n\n    std::atomic<std::uint32_t> freeListRefs;\n    std::atomic<N*> freeListNext;\n};\n\n// A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention,\n// but simple and correct (assuming nodes are never freed until after the free list is destroyed),\n// and fairly speedy under low contention.\ntemplate<typename N>    // N must inherit FreeListNode or have the same fields (and initialization)\nstruct FreeList\n{\n    FreeList() : freeListHead(nullptr) { }\n\n    inline void add(N* node)\n    {\n    \t// We know that the should-be-on-freelist bit is 0 at this point, so it's safe to\n    \t// set it using a fetch_add\n        if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {\n            // Oh look! We were the last ones referencing this node, and we know\n            // we want to add it to the free list, so let's do it!\n     \t   add_knowing_refcount_is_zero(node);\n        }\n    }\n\n    inline N* try_get()\n    {\n        auto head = freeListHead.load(std::memory_order_acquire);\n        while (head != nullptr) {\n            auto prevHead = head;\n            auto refs = head->freeListRefs.load(std::memory_order_relaxed);\n            if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1,\n                    std::memory_order_acquire, std::memory_order_relaxed)) {\n                head = freeListHead.load(std::memory_order_acquire);\n                continue;\n            }\n\n            // Good, reference count has been incremented (it wasn't at zero), which means\n            // we can read the next and not worry about it changing between now and the time\n            // we do the CAS\n            auto next = head->freeListNext.load(std::memory_order_relaxed);\n            if (freeListHead.compare_exchange_strong(head, next,\n                    std::memory_order_acquire, std::memory_order_relaxed)) {\n                // Yay, got the node. This means it was on the list, which means\n                // shouldBeOnFreeList must be false no matter the refcount (because\n                // nobody else knows it's been taken off yet, it can't have been put back on).\n                RL_ASSERT((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);\n\n                // Decrease refcount twice, once for our ref, and once for the list's ref\n                head->freeListRefs.fetch_add(-2, std::memory_order_release);\n\n                return head;\n            }\n\n            // OK, the head must have changed on us, but we still need to decrease the refcount we\n            // increased.\n            // Note that we don't need to release any memory effects, but we do need to ensure that the reference\n\t\t\t// count decrement happens-after the CAS on the head.\n            refs = prevHead->freeListRefs.fetch_add(-1, std::memory_order_acq_rel);\n            if (refs == SHOULD_BE_ON_FREELIST + 1) {\n                add_knowing_refcount_is_zero(prevHead);\n            }\n        }\n\n        return nullptr;\n    }\n\n    // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)\n    N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }\n\nprivate:\n    inline void add_knowing_refcount_is_zero(N* node)\n    {\n        // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we\n        // run only one copy of this method per node at a time, i.e. the single thread case), then we\n        // know we can safely change the next pointer of the node; however, once the refcount is back\n        // above zero, then other threads could increase it (happens under heavy contention, when the\n        // refcount goes to zero in between a load and a refcount increment of a node in try_get, then\n        // back up to something non-zero, then the refcount increment is done by the other thread) --\n        // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave\n        // the add operation to the next thread who puts the refcount back at zero (which could be us,\n        // hence the loop).\n        auto head = freeListHead.load(std::memory_order_relaxed);\n        while (true) {\n            node->freeListNext.store(head, std::memory_order_relaxed);\n            node->freeListRefs.store(1, std::memory_order_release);\n            if (!freeListHead.compare_exchange_strong(head, node,\n                    std::memory_order_release, std::memory_order_relaxed)) {\n                // Hmm, the add failed, but we can only try again when the refcount goes back to zero\n                if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) {\n                    continue;\n                }\n            }\n            return;\n        }\n    }\n\nprivate:\n\tstatic const std::uint32_t REFS_MASK = 0x7FFFFFFF;\n\tstatic const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;\n\t\n    // Implemented like a stack, but where node order doesn't matter (nodes are\n    // inserted out of order under contention)\n    std::atomic<N*> freeListHead;\n};\n\n\nstruct TestNode : FreeListNode<TestNode>\n{\n\tint value;\n\tTestNode() { }\n\texplicit TestNode(int value) : value(value) { }\n};\n\nstruct basic_test : rl::test_suite<basic_test, 2>\n{\n\tFreeList<TestNode> freeList;\n\tTestNode initialNodes[2];\n\t\n\tvoid before()\n\t{\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tTestNode* node = &initialNodes[tid];\n\t\tnode->value = tid;\n\t\tfreeList.add(node);\n\t\t\n\t\tnode = freeList.try_get();\n\t\tif (node != nullptr) {\n\t\t\tfreeList.add(node);\n\t\t}\n\t}\n\t\n\tvoid after()\n\t{\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\nstruct full_test : rl::test_suite<full_test, 4>\n{\n    FreeList<TestNode> freeList;\n    TestNode initialNodes[6];\n    \n    void before()\n    {\n    }\n    \n    void thread(unsigned int tid)\n    {\n        TestNode* node;\n        int myNodeCount = tid >= 4 ? 2 : 1;\n        for (int i = 0; i != myNodeCount; ++i) {\n            node = &initialNodes[tid + (tid >= 5 ? 1 : 0) + i];\n            node->value = tid;\n            freeList.add(node);\n        }\n        \n        for (int i = 0; i != 3; ++i) {\n            node = freeList.try_get();\n            if (node != nullptr) {\n                freeList.add(node);\n            }\n        }\n    }\n    \n    void after()\n    {\n    }\n    \n    void invariant()\n    {\n    }\n};\n\nint main()\n{\n\trl::test_params params;\n\t//params.search_type = rl::sched_full;\n\t//params.iteration_count = 100000000;\n\tparams.search_type = rl::sched_random;\n\tparams.iteration_count = 1000000;\n    rl::simulate<basic_test>(params);\n\trl::simulate<full_test>(params);\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/integrated.cpp",
    "content": "// ©2015 Cameron Desrochers\n\n// Tests various parts of the queue using the actual\n// full implementation itself, instead of isolated\n// components. This is much slower, but provides much\n// better coverage too.\n\n#define MCDBGQ_USE_RELACY\n#include \"../../concurrentqueue.h\"\n\n#include <string>\n\nusing namespace moodycamel;\n\nstruct SmallConstantTraits : public ConcurrentQueueDefaultTraits\n{\n\tstatic const size_t BLOCK_SIZE = 2;\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = 2;\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = 2;\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 1;\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 2;\n};\n\nstruct MediumConstantTraits : public ConcurrentQueueDefaultTraits\n{\n\tstatic const size_t BLOCK_SIZE = 4;\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = 2;\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = 4;\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 2;\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 4;\n};\n\nstruct Foo {\n\tstatic int& ctorCount() { static int c; return c; }\n\tstatic int& dtorCount() { static int c; return c; }\n\tstatic void reset() { ctorCount() = 0; dtorCount() = 0; }\n\t\n\tFoo()\n\t\t: id(-2)\n\t{\n\t\t++ctorCount();\n\t}\n\t\n\tFoo(int id)\n\t\t: id(id)\n\t{\n\t\t++ctorCount();\n\t}\n\t\n\tFoo(Foo const& o)\n\t\t: id(o.id)\n\t{\n\t\t++ctorCount();\n\t}\n\t\n\t~Foo()\n\t{\n\t\tRL_ASSERT(id != -1);\n\t\t++dtorCount();\n\t\tid = -1;\n\t}\n\t\npublic:\n\tint id;\n};\n\n\n\nstruct enqueue_explicit_one : rl::test_suite<enqueue_explicit_one, 2>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\t\n\tvoid before()\n\t{\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tProducerToken t(q);\n\t\tq.enqueue(t, tid);\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint tid0, tid1;\n\t\tRL_ASSERT(q.try_dequeue(tid0));\n\t\tRL_ASSERT(tid0 == 0 || tid0 == 1);\n\t\tRL_ASSERT(q.try_dequeue(tid1));\n\t\tRL_ASSERT(tid1 == 0 || tid1 == 1);\n\t\tRL_ASSERT(tid0 != tid1);\n\t\tRL_ASSERT(!q.try_dequeue(tid0));\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\nstruct enqueue_explicit_many : rl::test_suite<enqueue_explicit_many, 3>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\t\n\tvoid before()\n\t{\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tProducerToken t(q);\n\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\tq.enqueue(t, tid * 10 + i);\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\tfor (int i = 0; i != 15; ++i) {\n\t\t\tRL_ASSERT(q.try_dequeue(item));\n\t\t}\n\t\tRL_ASSERT(!q.try_dequeue(item));\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\n// This one caught a bug with the memory ordering in the core dequeue algorithm\nstruct dequeue_some_explicit : rl::test_suite<dequeue_some_explicit, 3>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\t\n\tvoid before()\n\t{\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid <= 1) {\n\t\t\tint item;\n\t\t\tConsumerToken t(q);\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tq.try_dequeue(t, item);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tProducerToken t(q);\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tq.enqueue(t, tid * 10 + i);\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\n// Causes blocks to be reused\nstruct recycle_blocks_explicit : rl::test_suite<recycle_blocks_explicit, 3>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(8, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid == 0) {\n\t\t\tProducerToken t(q);\n\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\tq.enqueue(t, i);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tint item;\n\t\t\tConsumerToken t(q);\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\t\tseen[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Causes the explicit producer's block index to expand\nstruct expand_block_index_explicit : rl::test_suite<expand_block_index_explicit, 4>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(12, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid == 0) {\n\t\t\tProducerToken t(q);\n\t\t\tfor (int i = 0; i != 12; ++i) {\n\t\t\t\tq.enqueue(t, i);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tint item;\n\t\t\tConsumerToken t(q);\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\t\tseen[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\n// Tests that implicit producers work at a very basic level\nstruct enqueue_implicit_one : rl::test_suite<enqueue_implicit_one, 2>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\t\n\tvoid before()\n\t{\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tq.enqueue(tid);\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint tid0, tid1;\n\t\tRL_ASSERT(q.try_dequeue(tid0));\n\t\tRL_ASSERT(tid0 == 0 || tid0 == 1);\n\t\tRL_ASSERT(q.try_dequeue(tid1));\n\t\tRL_ASSERT(tid1 == 0 || tid1 == 1);\n\t\tRL_ASSERT(tid0 != tid1);\n\t\tRL_ASSERT(!q.try_dequeue(tid0));\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Tests implicit producer at a simple level\nstruct implicit_simple : rl::test_suite<implicit_simple, 3>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(5, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid == 0) {\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\t\tseen[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\n// Tests multiple implicit producers being created (stresses the implicit producer hash map)\nstruct many_implicit_producers : rl::test_suite<many_implicit_producers, 6>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(18, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tq.enqueue(tid * 3 + 0);\n\t\tq.enqueue(tid * 3 + 1);\n\t\tq.enqueue(tid * 3 + 2);\n\t\t\n\t\tint item;\n\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\tseen[item] = true;\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Tests multiple implicit producers being created (stresses the implicit producer hash map)\nstruct implicit_producer_reuse : rl::test_suite<implicit_producer_reuse, 9>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(9, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tq.enqueue(tid);\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Tests implicit producer block recycling\nstruct implicit_block_reuse : rl::test_suite<implicit_block_reuse, 4>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(28, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tfor (int i = 0; i != 7; ++i) {\n\t\t\tq.enqueue(tid * 7 + i);\n\t\t}\n\t\t\n\t\tint item;\n\t\tConsumerToken t(q);\n\t\tfor (int i = 0; i != 7; ++i) {\n\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\tseen[item] = true;\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Tests consumption from mixed producers\nstruct mixed : rl::test_suite<mixed, 4>\n{\n\tConcurrentQueue<int, SmallConstantTraits> q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(28, false);\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid <= 1) {\n\t\t\tfor (int i = 0; i != 7; ++i) {\n\t\t\t\tq.enqueue(tid * 7 + i);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tProducerToken t(q);\n\t\t\tfor (int i = 0; i != 7; ++i) {\n\t\t\t\tq.enqueue(t, tid * 7 + i);\n\t\t\t}\n\t\t}\n\t\t\n\t\tint item;\n\t\tif (tid & 1) {\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\t\tseen[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tConsumerToken t(q);\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item]);\n\t\t\t\t\tseen[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint item;\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tRL_ASSERT(!seen[item]);\n\t\t\tseen[item] = true;\n\t\t}\n\t\tfor (auto s : seen) {\n\t\t\tRL_ASSERT(s);\n\t\t}\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// Test leftovers are being properly destroyed\nstruct leftovers_destroyed_explicit : rl::test_suite<leftovers_destroyed_explicit, 3>\n{\n\tConcurrentQueue<Foo, MediumConstantTraits>* q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(rl::rand(32), false);\n\t\t\n\t\tq = new ConcurrentQueue<Foo, MediumConstantTraits>();\n\t\tFoo::reset();\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid == 0) {\n\t\t\tProducerToken t(*q);\n\t\t\tfor (int i = 0; i != (int)seen.size(); ++i) {\n\t\t\t\tq->enqueue(t, Foo(i));\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tFoo item;\n\t\t\tConsumerToken t(*q);\n\t\t\tfor (int i = rl::rand(17); i > 0; --i) {\n\t\t\t\tif (q->try_dequeue(t, item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item.id]);\n\t\t\t\t\tseen[item.id] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint seenCount = 0;\n\t\t{\n\t\t\tfor (auto s : seen) {\n\t\t\t\tif (s) {\n\t\t\t\t\t++seenCount;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRL_ASSERT(Foo::ctorCount() == seen.size() * 2 + 2);\n\t\tRL_ASSERT(Foo::dtorCount() == seen.size() + seenCount + 2);\n\t\tdelete q;\n\t\t\n\t\tRL_ASSERT(Foo::ctorCount() == seen.size() * 2 + 2);\n\t\tRL_ASSERT(Foo::ctorCount() == Foo::dtorCount());\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n// implicit\nstruct leftovers_destroyed_implicit : rl::test_suite<leftovers_destroyed_implicit, 3>\n{\n\tConcurrentQueue<Foo, MediumConstantTraits>* q;\n\tstd::vector<bool> seen;\n\t\n\tvoid before()\n\t{\n\t\tseen.resize(rl::rand(32), false);\n\t\t\n\t\tq = new ConcurrentQueue<Foo, MediumConstantTraits>();\n\t\tFoo::reset();\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_start();\n\t\t\n\t\tif (tid == 0) {\n\t\t\tfor (int i = 0; i != (int)seen.size(); ++i) {\n\t\t\t\tq->enqueue(Foo(i));\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tFoo item;\n\t\t\tfor (int i = rl::rand(17); i > 0; --i) {\n\t\t\t\tif (q->try_dequeue(item)) {\n\t\t\t\t\tRL_ASSERT(!seen[item.id]);\n\t\t\t\t\tseen[item.id] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRelacyThreadExitNotifier::notify_relacy_thread_exit();\n\t}\n\t\n\tvoid after()\n\t{\n\t\tint seenCount = 0;\n\t\t{\n\t\t\tfor (auto s : seen) {\n\t\t\t\tif (s) {\n\t\t\t\t\t++seenCount;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tRL_ASSERT(Foo::ctorCount() == seen.size() * 2 + 2);\n\t\tRL_ASSERT(Foo::dtorCount() == seen.size() + seenCount + 2);\n\t\tdelete q;\n\t\t\n\t\tRL_ASSERT(Foo::ctorCount() == seen.size() * 2 + 2);\n\t\tRL_ASSERT(Foo::ctorCount() == Foo::dtorCount());\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\n\ntemplate<typename TTest>\nvoid simulate(int iterations)\n{\n\t// Note: There's no point using the full search params\n\t// Even with the simple enqueue_explicit_one test, it\n\t// would take a few millenia to complete(!)\n\t//rl::test_params fullParams;\n\t//fullParams.search_type = rl::sched_full;\n\t\n\trl::test_params randomParams;\n\trandomParams.search_type = rl::sched_random;\n\trandomParams.iteration_count = iterations;\n\trl::simulate<TTest>(randomParams);\n}\n\nint main()\n{\n\tsimulate<enqueue_explicit_one>(1000000);\n\tsimulate<enqueue_explicit_many>(1000000);\n\tsimulate<dequeue_some_explicit>(1000000);\n\tsimulate<recycle_blocks_explicit>(1000000);\n\tsimulate<expand_block_index_explicit>(1000000);\n\tsimulate<enqueue_implicit_one>(1000000);\n\tsimulate<implicit_simple>(1000000);\n\tsimulate<many_implicit_producers>(500000);\n\tsimulate<implicit_producer_reuse>(1000000);\n\tsimulate<implicit_block_reuse>(1000000);\n\tsimulate<mixed>(1000000);\n\tsimulate<leftovers_destroyed_explicit>(1000000);\n\tsimulate<leftovers_destroyed_implicit>(1000000);\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/CHANGES",
    "content": "Version 2.4\nFeatures:\n+ Support for futex(FUTEX_WAIT/FUTEX_WAKE) \n+ Linux/Darwin performance improved (2.5x for Linux, 7x for Darwin)\nFixes:\n+ Fixed a bunch of issues with WaitForMultipleObjects()/SignalObjectAndWait()\n+ Fixed rare spurious memory leak reports related to test progress reporting\n\nVersion 2.3\nFeatures:\n+ Support for FlushProcessWriteBuffers()\n\nVersion 2.2\nFeatures:\n+ Support for pthread_mutex_timedlock()\n+ Support for ETIMEDOUT, EINTR in pthread_cond_timedwait()/pthread_cond_wait()\n+ rl::hash_ptr(p, sz) function which provides deterministic hashing of pointers\nFixes:\n+ Win32 mutex is now recursive\n+ Compilation issue on MSVC x64 when RL_DEBUGBREAK_ON_ASSERT/RL_DEBUGBREAK_ON_FAILURE defined\n+ Fixed OOM crash when execution history is very large\n+ Fixed rare crash during iteration count estimation in context bound scheduler\n+ Fixed bug in pthread_rwlock/SRWLOCK that at most 2 readers may acquire it simultaneously\n+ Fixed bug regarding false race detection when simulation runs for very long time (int overflow)\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/LICENSE",
    "content": "Relacy Race Detector\r\nCopyright (c) 2008-2013, Dmitry S. Vyukov\r\nAll rights reserved.\r\n\nRedistribution and use in source and binary forms, with or without modification,\r\nare permitted provided that the following conditions are met:\r\n  - Redistributions of source code must retain the above copyright notice,\r\n    this list of conditions and the following disclaimer.\r\n  - Redistributions in binary form must reproduce the above copyright notice, this list of conditions\r\n    and the following disclaimer in the documentation and/or other materials provided with the distribution.\r\n  - The name of the owner may not be used to endorse or promote products derived from this software\r\n    without specific prior written permission.\r\nTHIS SOFTWARE IS PROVIDED BY THE OWNER \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\r\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\r\nIN NO EVENT SHALL THE OWNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\r\nOR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\r\nSTRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\r\nEVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/VERSION",
    "content": "974f5c228473"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/cli_ws_deque/cli_ws_deque.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_cli.hpp\"\r\n\r\n\r\nusing rl::nvar;\r\nusing rl::nvolatile;\r\nusing rl::mutex;\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass ws_deque\r\n{\r\npublic:\r\n    ws_deque()\r\n    {\r\n        m_mask($) = initial_size - 1;\r\n        m_headIndex($) = 0;\r\n        m_tailIndex($) = 0;\r\n        m_array($) = new nvar<T> [initial_size];\r\n        m_arraySize($) = initial_size;\r\n    }\r\n\r\n    bool IsEmpty()\r\n    {\r\n        return m_headIndex($) >= m_tailIndex($);\r\n    }\r\n\r\n    size_t Count()\r\n    {\r\n        return m_tailIndex($) - m_headIndex($);\r\n    }\r\n\r\n    void push(T item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        if (tail <= m_headIndex($) + m_mask($))\r\n        {\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            size_t head = m_headIndex($);\r\n            size_t count = Count();\r\n            if (count >= m_mask($))\r\n            {\r\n                size_t arraySize = m_arraySize($);\r\n                size_t mask = m_mask($);\r\n                nvar<T>* newArray = new nvar<T> [arraySize * 2];\r\n                nvar<T>* arr = m_array($);\r\n                for (size_t i = 0; i != count; ++i)\r\n                    newArray[i]($) = arr[(i + head) & mask]($);\r\n                m_array($) = newArray;\r\n                m_arraySize($) = arraySize * 2;\r\n                m_headIndex($) = 0;\r\n                m_tailIndex($) = count;\r\n                tail = count;\r\n                m_mask($) = (mask * 2) | 1;\r\n            }\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n            m_foreignLock.unlock($);\r\n        }\r\n    }\r\n\r\n    bool pop(T& item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        if (tail == 0)\r\n            return false;\r\n        tail -= 1;\r\n        rl::Interlocked::Exchange(m_tailIndex, tail, $);\r\n        if (m_headIndex($) <= tail)\r\n        {\r\n            item = m_array($)[tail & m_mask($)]($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            if (m_headIndex($) <= tail)\r\n            {\r\n                item = m_array($)[tail & m_mask($)]($);\r\n                m_foreignLock.unlock($);\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                m_tailIndex($) = tail + 1;\r\n                m_foreignLock.unlock($);\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\n    bool steal(T& item)\r\n    {\r\n        if (false == m_foreignLock.try_lock($))\r\n            return false;\r\n        size_t head = m_headIndex($);\r\n        rl::Interlocked::Exchange(m_headIndex, head + 1, $);\r\n        if (head < m_tailIndex($))\r\n        {\r\n            item = m_array($)[head & m_mask($)]($);\r\n            m_foreignLock.unlock($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_headIndex($) = head;\r\n            m_foreignLock.unlock($);\r\n            return false;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    static size_t const initial_size = 2;\r\n    nvar<nvar<T>*> m_array;\r\n    nvar<size_t> m_mask;\r\n    nvar<size_t> m_arraySize;\r\n    nvolatile<size_t> m_headIndex;\r\n    nvolatile<size_t> m_tailIndex;\r\n    mutex m_foreignLock;\r\n};\r\n\r\n\r\n\r\n\r\nstruct ws_deque_test : rl::test_suite<ws_deque_test, 2>\r\n{\r\n    ws_deque<int> q;\r\n    bool state [2];\r\n\r\n    void before()\r\n    {\r\n        state[0] = true;\r\n        state[1] = true;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(state[0] == false);\r\n        RL_ASSERT(state[1] == false);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            q.push(1);\r\n            q.push(2);\r\n\r\n            int item = 0;\r\n            bool res = q.pop(item);\r\n            RL_ASSERT(res && item == 2);\r\n            RL_ASSERT(state[1]);\r\n            state[1] = false;\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            RL_ASSERT(res == false);\r\n        }\r\n        else\r\n        {\r\n            int item = 0;\r\n            bool res = q.steal(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(item == 1);\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<ws_deque_test>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/cli_ws_deque/msvc8/cli_ws_deque.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"cli_ws_deque\", \"cli_ws_deque.vcproj\", \"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/cli_ws_deque/msvc8/cli_ws_deque.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"cli_ws_deque\"\r\n\tProjectGUID=\"{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}\"\r\n\tRootNamespace=\"cli_ws_deque\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\cli_ws_deque.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/cli_ws_deque/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/cli_ws_deque/stdafx.h",
    "content": "#pragma once\r\n\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/condvar.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n// THE TEST IS EXPECTED TO FAIL WITH \"DEADLOCK\"\r\n\r\nclass CondVar\r\n{\r\npublic:\r\n        CondVar();\r\n        ~CondVar();\r\n   void Enter();\r\n   void Wait();\r\n   void Release();\r\n   void ReleaseAll();\r\n   void Leave();\r\n\r\nprivate:\r\n    std::atomic<int>        m_lMutex;\r\n    std::atomic<unsigned>   m_dwWaitingForSignal;\r\n    HANDLE                  m_xhEvtEnter;\r\n    HANDLE                  m_xhSemRelease;\r\n};\r\n\r\nCondVar::CondVar()\r\n    : m_xhEvtEnter(CreateEvent(0, 0, 0, 0))\r\n    , m_xhSemRelease(CreateSemaphore(0, 0, 0x7FFFFFFF, 0))\r\n{\r\n    m_lMutex.store(0, std::memory_order_relaxed);\r\n    m_dwWaitingForSignal.store(0, std::memory_order_relaxed);\r\n}\r\n\r\nCondVar::~CondVar()\r\n{\r\n    CloseHandle(m_xhEvtEnter);\r\n    CloseHandle(m_xhSemRelease);\r\n}\r\n\r\nvoid CondVar::Enter()\r\n{\r\n   int lMutex = m_lMutex.load(std::memory_order_seq_cst);\r\n   for (;;)\r\n   {\r\n     if( lMutex >= 0 )\r\n     {\r\n         if (m_lMutex.compare_exchange_weak(lMutex, lMutex | 0x80000000u, std::memory_order_seq_cst))\r\n            break;\r\n     }\r\n     else\r\n     {\r\n        if (false == m_lMutex.compare_exchange_weak(lMutex, lMutex + 1, std::memory_order_seq_cst))\r\n            continue;\r\n        WaitForSingleObject(m_xhEvtEnter, INFINITE);\r\n        RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);\r\n        break;\r\n     }\r\n   }\r\n}\r\n\r\nvoid CondVar::Wait()\r\n{\r\n    unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);\r\n    m_dwWaitingForSignal.store(dwWaitingForSignal + 1, std::memory_order_seq_cst);\r\n    RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);\r\n\r\n    int lMutex = m_lMutex.load(std::memory_order_seq_cst);\r\n    for (;;)\r\n    {\r\n        unsigned dwWaitingToOwn = lMutex & 0x7FFFFFFFu;\r\n        RL_ASSERT(dwWaitingToOwn >= dwWaitingForSignal);\r\n        if (dwWaitingToOwn == dwWaitingForSignal)\r\n        {\r\n            if (m_lMutex.compare_exchange_weak(lMutex, dwWaitingToOwn + 1, std::memory_order_seq_cst))\r\n                break;\r\n        }\r\n        else\r\n        {\r\n            SetEvent(m_xhEvtEnter);\r\n            break;\r\n       }\r\n   }\r\n\r\n   WaitForSingleObject(m_xhSemRelease, INFINITE);\r\n   WaitForSingleObject(m_xhEvtEnter, INFINITE);\r\n\r\n   RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);\r\n}\r\n\r\nvoid CondVar::Release()\r\n{\r\n   RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);\r\n    unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);\r\n    if (dwWaitingForSignal != 0)\r\n    {\r\n        m_dwWaitingForSignal.store(dwWaitingForSignal - 1, std::memory_order_seq_cst);\r\n        ReleaseSemaphore(m_xhSemRelease, 1, 0);\r\n    }\r\n}\r\n\r\nvoid CondVar::ReleaseAll()\r\n{\r\n   RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);\r\n    unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);\r\n    if (dwWaitingForSignal != 0)\r\n    {\r\n        m_dwWaitingForSignal.store(0, std::memory_order_seq_cst);\r\n        ReleaseSemaphore(m_xhSemRelease, dwWaitingForSignal, 0);\r\n   }\r\n}\r\n\r\nvoid CondVar::Leave()\r\n{\r\n    int lMutex = m_lMutex.load(std::memory_order_seq_cst);\r\n    RL_ASSERT(lMutex < 0);\r\n    for (;;)\r\n    {\r\n        unsigned dwWaitingToOwn     = lMutex & 0x7FFFFFFFu;\r\n        unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);\r\n        RL_ASSERT(dwWaitingToOwn >= dwWaitingForSignal);\r\n        if (dwWaitingToOwn == dwWaitingForSignal)\r\n        {\r\n            if (m_lMutex.compare_exchange_weak(lMutex, lMutex & 0x7FFFFFFF, std::memory_order_seq_cst))\r\n                break;\r\n        }\r\n        else\r\n        {\r\n            if (false == m_lMutex.compare_exchange_weak(lMutex, lMutex - 1, std::memory_order_seq_cst))\r\n                continue;\r\n            SetEvent(m_xhEvtEnter);\r\n            break;\r\n        }\r\n    } \r\n}\r\n\r\nstruct CondVarTest : rl::test_suite<CondVarTest, 3>\r\n{\r\n    VAR_T(int) stage;\r\n    CondVar cv;\r\n\r\n    void before()\r\n    {\r\n        VAR(stage) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            cv.Enter();\r\n            VAR(stage) += 1;\r\n            cv.ReleaseAll();\r\n            while (VAR(stage) != 2)\r\n                cv.Wait();\r\n            cv.Leave();\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            cv.Enter();\r\n            while (VAR(stage) != 1)\r\n                cv.Wait();\r\n            VAR(stage) += 1;\r\n            cv.ReleaseAll();\r\n            cv.Leave();\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            cv.Enter();\r\n            while (VAR(stage) != 2)\r\n                cv.Wait();\r\n            cv.Leave();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<CondVarTest>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/msvc8/condvar.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"condvar\", \"condvar.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/msvc8/condvar.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"condvar\"\r\n\tProjectGUID=\"{6CC59CF8-408B-441B-8F65-15651210CB82}\"\r\n\tRootNamespace=\"condvar\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\condvar.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/msvc9/condvar.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"condvar\", \"condvar.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/msvc9/condvar.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"condvar\"\r\n\tProjectGUID=\"{6CC59CF8-408B-441B-8F65-15651210CB82}\"\r\n\tRootNamespace=\"condvar\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\condvar.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/condvar/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_MSVC_OUTPUT\r\n//#define RL_DEBUGBREAK_ON_FAILURE\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eao_blocking/eao_blocking.cpp",
    "content": "/*\r\n#define BOOST_ALL_NO_LIB\r\n#pragma warning (push, 3)\r\n#include <boost/thread/mutex.hpp>\r\n#include <boost/thread/shared_mutex.hpp>\r\n#include \"C:\\boost_1_35_0\\libs\\thread\\src\\win32\\exceptions.cpp\"\r\n#pragma warning (pop)\r\n\r\n\r\nclass business_logic\r\n{\r\npublic:\r\n    typedef unsigned account_id_t;\r\n    typedef double balance_t;\r\n\r\n    bool add_account(account_id_t acc_id, balance_t balance)\r\n    {\r\n        accounts_guard.lock();\r\n        if (accounts.find(acc_id) != accounts.end())\r\n        {\r\n            accounts_guard.unlock();\r\n            return false;\r\n        }\r\n        accounts[acc_id].balance = balance;\r\n        accounts_guard.unlock();\r\n        return true;\r\n    }\r\n\r\n    bool transfer_balance(account_id_t acc_id1, account_id_t acc_id2, balance_t amount)\r\n    {\r\n        accounts_guard.lock_shared();\r\n        if (accounts.find(acc_id1) != accounts.end()\r\n            || accounts.find(acc_id2) != accounts.end())\r\n        {\r\n            accounts_guard.unlock_shared();\r\n            return false;\r\n        }\r\n        account_info& acc1 = accounts[acc_id1];\r\n        account_info& acc2 = accounts[acc_id2];\r\n        acc1.mtx.lock();\r\n        acc2.mtx.lock();\r\n        accounts_guard.unlock_shared();\r\n\r\n        acc1.balance -= amount;\r\n        acc2.balance += amount;\r\n\r\n        acc1.mtx.unlock();\r\n        acc2.mtx.unlock();\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    struct account_info\r\n    {\r\n        balance_t balance;\r\n        boost::mutex mtx;\r\n\r\n        account_info()\r\n            : balance()\r\n        {}\r\n\r\n        account_info(account_info const& acc)\r\n            : balance(acc.balance)\r\n        {}\r\n    };\r\n\r\n    typedef std::map<account_id_t, account_info> account_map_t;\r\n    account_map_t accounts;\r\n    boost::shared_mutex accounts_guard;\r\n};\r\n\r\n*/\r\n\r\n/*\r\n#undef RL_TEST\r\n\r\n#ifndef RL_TEST\r\n//#   define ASSERT assert\r\ntypedef boost::mutex mutex_t;\r\n#   define $$\r\n#else\r\n//#   define ASSERT RL_ASSERT\r\ntypedef rl::recursive_mutex mutex_t;\r\n#   define $$ $\r\n#endif\r\n\r\n\r\n\r\nclass business_logic\r\n{\r\npublic:\r\n    typedef unsigned account_id_t;\r\n    typedef double balance_t;\r\n\r\n    bool add_account(account_id_t acc_id, balance_t balance)\r\n    {\r\n        accounts_guard.lock($$);\r\n        if (accounts.find(acc_id) != accounts.end())\r\n        {\r\n            accounts_guard.unlock($$);\r\n            return false;\r\n        }\r\n        accounts[acc_id].balance = balance;\r\n        accounts_guard.unlock($$);\r\n        return true;\r\n    }\r\n\r\n    bool transfer_balance(account_id_t acc_id1, account_id_t acc_id2, balance_t amount)\r\n    {\r\n        accounts_guard.lock($$);\r\n        if (accounts.find(acc_id1) == accounts.end()\r\n            || accounts.find(acc_id2) == accounts.end())\r\n        {\r\n            accounts_guard.unlock($$);\r\n            return false;\r\n        }\r\n        account_info& acc1 = accounts[acc_id1];\r\n        account_info& acc2 = accounts[acc_id2];\r\n        acc1.mtx.lock($$);\r\n        acc2.mtx.lock($$);\r\n        accounts_guard.unlock($$);\r\n\r\n        acc1.balance -= amount;\r\n        acc2.balance += amount;\r\n\r\n        acc1.mtx.unlock($$);\r\n        acc2.mtx.unlock($$);\r\n\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    struct account_info\r\n    {\r\n        balance_t balance;\r\n        mutex_t mtx;\r\n\r\n        account_info()\r\n            : balance()\r\n        {}\r\n\r\n        account_info(account_info const& acc)\r\n            : balance(acc.balance)\r\n        {}\r\n    };\r\n\r\n    typedef std::map<account_id_t, account_info> account_map_t;\r\n    account_map_t accounts;\r\n    mutex_t accounts_guard;\r\n};\r\n\r\n*/\r\n\r\n/*\r\nstruct business_logic_test : rl::test_suite<business_logic_test, 2>\r\n{\r\n    business_logic bl;\r\n\r\n    static size_t const account_count = 10;\r\n\r\n    void before()\r\n    {\r\n        for (size_t i = 0; i != account_count; ++i)\r\n        {\r\n            bool rv = bl.add_account(i, i * 10.0);\r\n            RL_ASSERT(rv);\r\n        }\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        business_logic::account_id_t acc1 = rl::rand(account_count);\r\n        business_logic::account_id_t acc2 = rl::rand(account_count);\r\n        bool rv = bl.transfer_balance(acc1, acc2, 1.0);\r\n        RL_ASSERT(rv);\r\n    }\r\n};\r\n*/\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/eventcount.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../../relacy/windows.h\"\r\n\r\n#define HANDLE rl::HANDLE\r\n\r\n#define CreateSemaphoreA rl::RL_CreateSemaphore($)\r\n#define CreateSemaphoreW rl::RL_CreateSemaphore($)\r\n#ifndef CreateSemaphore\r\n#   define CreateSemaphore CreateSemaphoreW\r\n#endif\r\n\r\n\r\n#define CloseHandle rl::RL_CloseHandle($)\r\n\r\n\r\n#include <stddef.h>\r\n\r\n\r\n#if defined(WIN32) && defined(_MSC_VER)\r\n\r\n#include <windows.h>\r\n#include <intrin.h>\r\n\r\nclass semaphore\r\n{\r\npublic:\r\n    semaphore()\r\n    {\r\n        h_ = CreateSemaphore(0, 0, LONG_MAX, 0);\r\n    }\r\n\r\n    ~semaphore()\r\n    {\r\n        CloseHandle(h_);\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        WaitForSingleObject(h_, INFINITE);\r\n    }\r\n\r\n    void post()\r\n    {\r\n        ReleaseSemaphore(h_, 1, 0);\r\n    }\r\n\r\nprivate:\r\n    HANDLE h_;\r\n\r\n    semaphore(semaphore const&);\r\n    semaphore& operator = (semaphore const&);\r\n};\r\n\r\nclass mutex\r\n{\r\npublic:\r\n    mutex()\r\n    {\r\n        InitializeCriticalSection(&cs_);\r\n    }\r\n\r\n    ~mutex()\r\n    {\r\n        DeleteCriticalSection(&cs_);\r\n    }\r\n\r\n    void lock()\r\n    {\r\n        EnterCriticalSection(&cs_);\r\n    }\r\n\r\n    void unlock()\r\n    {\r\n        LeaveCriticalSection(&cs_);\r\n    }\r\n\r\nprivate:\r\n    CRITICAL_SECTION    cs_;\r\n\r\n    mutex(mutex const&);\r\n    mutex& operator = (mutex const&);\r\n};\r\n\r\nvoid full_memory_fence()\r\n{\r\n    _mm_mfence();\r\n}\r\n\r\n#define THREAD_LOCAL __declspec(thread)\r\n\r\n#elif defined(POSIX) && defined(GCC)\r\n\r\n#include <pthread.h>\r\n#include <semaphore.h>\r\n\r\nclass semaphore\r\n{\r\npublic:\r\n    semaphore()\r\n    {\r\n        sem_init(&sem_, 0, 0);\r\n    }\r\n\r\n    ~semaphore()\r\n    {\r\n        sem_destroy(&sem_);\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        sem_wait(&sem_);\r\n    }\r\n\r\n    void post()\r\n    {\r\n        sem_post(&sem_);\r\n    }\r\n\r\nprivate:\r\n    sem_t               sem_;\r\n\r\n    semaphore(semaphore const&);\r\n    semaphore& operator = (semaphore const&);\r\n};\r\n\r\nclass mutex\r\n{\r\npublic:\r\n    mutex()\r\n    {\r\n        pthread_mutex_init(&mutex_, 0);\r\n    }\r\n\r\n    ~mutex()\r\n    {\r\n        pthread_mutex_destroy(&mutex_);\r\n    }\r\n\r\n    void lock()\r\n    {\r\n        pthread_mutex_lock(&mutex_);\r\n    }\r\n\r\n    void unlock()\r\n    {\r\n        pthread_mutex_unlock(&mutex_);\r\n    }\r\n\r\nprivate:\r\n    pthread_mutex_t     mutex_;\r\n\r\n    mutex(mutex const&);\r\n    mutex& operator = (mutex const&);\r\n};\r\n\r\nvoid full_memory_fence()\r\n{\r\n    __sync_synchronize();\r\n}\r\n\r\n#define THREAD_LOCAL __thread\r\n\r\n#endif\r\n\r\n\r\n\r\nclass lock\r\n{\r\npublic:\r\n    lock(mutex& m)\r\n        : m_(m)\r\n    {\r\n        m.lock();\r\n    }\r\n\r\n    ~lock()\r\n    {\r\n        m_.unlock();\r\n    }\r\n\r\nprivate:\r\n    mutex&              m_;\r\n\r\n    lock(lock const&);\r\n    lock& operator = (lock const&);\r\n};\r\n\r\n\r\n\r\n\r\n/** simple single-threaded double-linked list\r\n *  nothing interesting\r\n */\r\nclass dlist\r\n{\r\npublic:\r\n    struct node\r\n    {\r\n        node*           prev_;\r\n        node*           next_;\r\n\r\n        node()\r\n        {\r\n            prev_ = 0;\r\n            next_ = 0;\r\n        }\r\n    };\r\n\r\n    dlist()\r\n    {\r\n        reset();\r\n    }\r\n\r\n    void push(node* n)\r\n    {\r\n        size_ += 1;\r\n        n->next_ = head_.next_;\r\n        n->prev_ = &head_;\r\n        head_.next_->prev_ = n;\r\n        head_.next_ = n;\r\n    }\r\n\r\n    node* pop()\r\n    {\r\n        if (size_ == 0)\r\n            return 0;\r\n        node* n = head_.next_;\r\n        remove(n);\r\n        return n;\r\n    }\r\n\r\n    void remove(node* n)\r\n    {\r\n        size_ -= 1;\r\n        n->prev_->next_ = n->next_;\r\n        n->next_->prev_ = n->prev_;\r\n    }\r\n\r\n    size_t size() const\r\n    {\r\n        return size_;\r\n    }\r\n\r\n    node* begin()\r\n    {\r\n        return head_.next_;\r\n    }\r\n\r\n    void flush_to(dlist& target)\r\n    {\r\n        if (size_)\r\n        {\r\n            target.size_ = size_;\r\n            target.head_.next_ = head_.next_;\r\n            target.head_.next_->prev_ = &target.head_;\r\n            target.tail_.prev_ = tail_.prev_;\r\n            target.tail_.prev_->next_ = &target.tail_;\r\n        }\r\n        else\r\n        {\r\n            target.reset();\r\n        }\r\n        reset();\r\n    }\r\n\r\n    static bool not_last(node* n)\r\n    {\r\n        return n->next_ != 0;\r\n    }\r\n\r\n    static node* get_next(node* n)\r\n    {\r\n        return n->next_;\r\n    }\r\n\r\nprivate:\r\n    size_t volatile     size_;\r\n    node                head_;\r\n    node                tail_;\r\n\r\n    void reset()\r\n    {\r\n        size_ = 0;\r\n        head_.next_ = &tail_;\r\n        head_.prev_ = 0;\r\n        tail_.next_ = 0;\r\n        tail_.prev_ = &head_;\r\n    }\r\n\r\n    dlist(dlist const&);\r\n    dlist& operator = (dlist const&);\r\n};\r\n\r\n\r\n\r\n/** pre-thread descriptor for eventcount\r\n */\r\nstruct ec_thread\r\n{\r\n    dlist::node         node_;\r\n    semaphore           sema_;\r\n    unsigned            epoch_;\r\n    bool volatile       in_waitset_;\r\n    bool                spurious_;\r\n    void*               ctx_;\r\n\r\n    ec_thread()\r\n    {\r\n        epoch_ = 0;\r\n        in_waitset_ = false;\r\n        spurious_ = false;\r\n        ctx_ = 0;\r\n    }\r\n\r\n    ~ec_thread()\r\n    {\r\n        if (spurious_)\r\n            sema_.wait();\r\n    }\r\n\r\n    static ec_thread* current()\r\n    {\r\n        static THREAD_LOCAL ec_thread* ec_thread_instance = 0;\r\n        ec_thread* instance = ec_thread_instance;\r\n        if (instance == 0)\r\n        {\r\n            instance = new ec_thread;\r\n            ec_thread_instance = instance;\r\n        }\r\n        return instance;\r\n        // instance must be destroyed in DllMain() callback\r\n        // or in pthread_key_create() callback\r\n    }\r\n\r\nprivate:\r\n    ec_thread(ec_thread const&);\r\n    ec_thread& operator = (ec_thread const&);\r\n};\r\n\r\n\r\n\r\n/** fine-grained eventcount implementation\r\n */\r\nclass eventcount\r\n{\r\npublic:\r\n    eventcount()\r\n    {\r\n        epoch_ = 0;\r\n    }\r\n\r\n    void prepare_wait(void* ctx = 0)\r\n    {\r\n        ec_thread* th = ec_thread::current();\r\n        // this is good place to pump previous spurious wakeup\r\n        if (th->spurious_)\r\n        {\r\n            th->spurious_ = false;\r\n            th->sema_.wait();\r\n        }\r\n        th->in_waitset_ = true;\r\n        th->ctx_ = ctx;\r\n        {\r\n            lock l (mtx_);\r\n            th->epoch_ = epoch_;\r\n            waitset_.push(&th->node_);\r\n        }\r\n        full_memory_fence();\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        ec_thread* th = ec_thread::current();\r\n        // this check is just an optimization\r\n        if (th->epoch_ == epoch_)\r\n            th->sema_.wait();\r\n        else\r\n            retire_wait();\r\n    }\r\n\r\n    void retire_wait()\r\n    {\r\n        ec_thread* th = ec_thread::current();\r\n        // spurious wakeup will be pumped in following prepare_wait()\r\n        th->spurious_  = true;\r\n        // try to remove node from waitset\r\n        if (th->in_waitset_)\r\n        {\r\n            lock l (mtx_);\r\n            if (th->in_waitset_)\r\n            {\r\n                // successfully removed from waitset,\r\n                // so there will be no spurious wakeup\r\n                th->in_waitset_ = false;\r\n                th->spurious_ = false;\r\n                waitset_.remove(&th->node_);\r\n            }\r\n        }\r\n    }\r\n\r\n    void notify_one()\r\n    {\r\n        full_memory_fence();\r\n        notify_one_relaxed();\r\n    }\r\n\r\n    template<typename predicate_t>\r\n    void notify(predicate_t pred)\r\n    {\r\n        full_memory_fence();\r\n        notify_relaxed(pred);\r\n    }\r\n\r\n    void notify_all()\r\n    {\r\n        full_memory_fence();\r\n        notify_all_relaxed();\r\n    }\r\n\r\n    void notify_one_relaxed()\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist::node* n;\r\n        {\r\n            lock l (mtx_);\r\n            epoch_ += 1;\r\n            n = waitset_.pop();\r\n            if (n)\r\n                to_ec_thread(n)->in_waitset_ = false;\r\n        }\r\n        if (n)\r\n        {\r\n            to_ec_thread(n)->sema_.post();\r\n        }\r\n    }\r\n\r\n    template<typename predicate_t>\r\n    void notify_relaxed(predicate_t pred)\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist temp;\r\n        {\r\n            lock l (mtx_);\r\n            epoch_ += 1;\r\n            size_t size = waitset_.size();\r\n            size_t idx = 0;\r\n            dlist::node* n = waitset_.begin();\r\n            while (dlist::not_last(n))\r\n            {\r\n                dlist::node* next = dlist::get_next(n);\r\n                ec_thread* th = to_ec_thread(n);\r\n                if (pred(th->ctx_, size, idx))\r\n                {\r\n                    waitset_.remove(n);\r\n                    temp.push(n);\r\n                    th->in_waitset_ = false;\r\n                }\r\n                n = next;\r\n                idx += 1;\r\n            }\r\n        }\r\n        dlist::node* n = temp.begin();\r\n        while (dlist::not_last(n))\r\n        {\r\n            dlist::node* next = dlist::get_next(n);\r\n            to_ec_thread(n)->sema_.post();\r\n            n = next;\r\n        }\r\n    }\r\n\r\n    void notify_all_relaxed()\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist temp;\r\n        {\r\n            lock l (mtx_);\r\n            epoch_ += 1;\r\n            waitset_.flush_to(temp);\r\n            dlist::node* n = temp.begin();\r\n            while (dlist::not_last(n))\r\n            {\r\n                to_ec_thread(n)->in_waitset_ = false;\r\n                n = dlist::get_next(n);\r\n            }\r\n        }\r\n        dlist::node* n = temp.begin();\r\n        while (dlist::not_last(n))\r\n        {\r\n            dlist::node* next = dlist::get_next(n);\r\n            to_ec_thread(n)->sema_.post();\r\n            n = next;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    mutex               mtx_;\r\n    dlist               waitset_;\r\n    volatile unsigned   epoch_;\r\n\r\n    ec_thread* to_ec_thread(dlist::node* n)\r\n    {\r\n        return (ec_thread*)((char*)n - offsetof(ec_thread, node_));\r\n    }\r\n\r\n    eventcount(eventcount const&);\r\n    eventcount& operator = (eventcount const&);\r\n};\r\n\r\n\r\n\r\n\r\n\r\nstruct scheduler\r\n{\r\n    struct tbb_thread {};\r\n\r\n    eventcount          ec_;\r\n    tbb_thread*         threads_;\r\n    bool volatile       is_permanently_open_;\r\n\r\n    void wait_while_pool_is_empty(tbb_thread* th)\r\n    {\r\n        if (is_permanently_open_)\r\n            return;\r\n        ec_.prepare_wait(th);\r\n        if (pool_is_empty())\r\n            ec_.wait();\r\n        else\r\n            ec_.retire_wait();\r\n    }\r\n\r\n    void notify_about_new_task_available()\r\n    {\r\n        ec_.notify_one_relaxed();\r\n    }\r\n\r\n    void notify_about_new_task_available_with_preference(tbb_thread* preference)\r\n    {\r\n        struct local\r\n        {\r\n            tbb_thread*     preference_;\r\n            bool            fired_;\r\n\r\n            bool operator () (void* ctx, size_t count, size_t idx)\r\n            {\r\n                tbb_thread* th = (tbb_thread*)ctx;\r\n                if (th == preference_)\r\n                {\r\n                    fired_ = true;\r\n                    return true;\r\n                }\r\n                else if (idx == count - 1 && fired_ == false)\r\n                {\r\n                    return true;\r\n                }\r\n                else\r\n                {\r\n                    return false;\r\n                }\r\n            }\r\n        }\r\n        pred = {preference};\r\n        ec_.notify_relaxed(pred);\r\n    }\r\n\r\n    void notify_about_list_of_tasks_available(size_t total_count, size_t preference_count, tbb_thread** preferences)\r\n    {\r\n        struct local\r\n        {\r\n            size_t          remain_to_signal_;\r\n            size_t          preference_count_;\r\n            tbb_thread**    preferences_;\r\n\r\n            bool operator () (void* ctx, size_t count, size_t idx)\r\n            {\r\n                tbb_thread* th = (tbb_thread*)ctx;\r\n                size_t remain_in_waitset = count - idx;\r\n                if (remain_in_waitset <= remain_to_signal_)\r\n                {\r\n                    return true;\r\n                }\r\n                else\r\n                {\r\n                    for (size_t i = 0; i != preference_count_; ++i)\r\n                    {\r\n                        if (preferences_[i] == th)\r\n                        {\r\n                            remain_to_signal_ -= 1;\r\n                            return true;\r\n                        }\r\n                    }\r\n                }\r\n                return false;\r\n            }\r\n        }\r\n        pred = {total_count, preference_count, preferences};\r\n        ec_.notify_relaxed(pred);\r\n    }\r\n\r\n    bool pool_is_empty()\r\n    {\r\n        return true;\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct queue\r\n{\r\n    int                 producer_idx_;\r\n    int                 consumer_idx_;\r\n\r\n    void**              buffer_;\r\n\r\n    eventcount          ec_;\r\n\r\n    void enqueue(void* data)\r\n    {\r\n        int idx = ++producer_idx_; // atomic\r\n        buffer_[idx] = data;\r\n\r\n        struct local\r\n        {\r\n            int         idx_;\r\n            bool operator () (void* ctx, size_t /*count*/, size_t /*idx*/)\r\n            {\r\n                return idx_ == *(int*)ctx;\r\n            }\r\n        }\r\n        pred = {idx};\r\n        ec_.notify(pred); // not relaxed!!!\r\n    }\r\n\r\n    void* dequeue()\r\n    {\r\n        int idx = ++consumer_idx_; // atomic\r\n        void* data = buffer_[idx];\r\n        if (data)\r\n            return data;\r\n        for (;;)\r\n        {\r\n            ec_.prepare_wait(&idx);\r\n            data = buffer_[idx];\r\n            if (data)\r\n            {\r\n                ec_.retire_wait();\r\n                return data;\r\n            }\r\n            ec_.wait();\r\n            data = buffer_[idx];\r\n            if (data)\r\n            {\r\n                return data;\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nclass condition_variable\r\n{\r\n    eventcount ec_;\r\n\r\npublic:\r\n    void wait(mutex& mtx)\r\n    {\r\n        ec_.prepare_wait();\r\n        mtx.unlock();\r\n        ec_.wait();\r\n        mtx.lock();\r\n    }\r\n\r\n    void signal()\r\n    {\r\n        ec_.notify_one();\r\n    }\r\n\r\n    void broadcast()\r\n    {\r\n        ec_.notify_all();\r\n    }\r\n}; \r\n\r\n\r\nstruct eventcount_test : rl::test_suite<eventcount_test, 2>\r\n{\r\n    void thread(unsigned index)\r\n    {\r\n        delete ec_thread::current();\r\n        (void)index;\r\n    }\r\n};\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    //p.iteration_count = 1000000;\r\n    rl::simulate<eventcount_test>(p);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/msvc8/eventcount.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"eventcount\", \"eventcount.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/msvc8/eventcount.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"eventcount\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E54}\"\r\n\tRootNamespace=\"eventcount\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\eventcount.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/msvc9/eventcount.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"eventcount\", \"eventcount.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/msvc9/eventcount.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"eventcount\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E54}\"\r\n\tRootNamespace=\"eventcount\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\eventcount.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/stdafx.cpp",
    "content": "// stdafx.cpp : source file that includes just the standard includes\r\n// ws_deque.pch will be the pre-compiled header\r\n// stdafx.obj will contain the pre-compiled type information\r\n\r\n#include \"stdafx.h\"\r\n\r\n// TODO: reference any additional headers you need in STDAFX.H\r\n// and not in this file\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/eventcount/stdafx.h",
    "content": "#pragma once\r\n\r\n#define _WIN32_WINNT 0x0500\r\n#include <windows.h>\r\n#include <intrin.h>\r\n#include <stddef.h>\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/amp_condvar.hpp",
    "content": "#pragma once\r\n\r\n\r\nstruct amp_raw_condition_variable_s\r\n{\r\n    CRITICAL_SECTION access_waiting_threads_count_critsec;\r\n    HANDLE wake_waiting_threads_mutex;\r\n    HANDLE waking_waiting_threads_count_control_sem;\r\n    HANDLE finished_waking_waiting_threads_event;\r\n    VAR_T(LONG) waiting_thread_count;\r\n    VAR_T(BOOL) broadcast_in_progress;\r\n};\r\n\r\n\r\nstruct amp_raw_mutex_s\r\n{\r\n    CRITICAL_SECTION critical_section;\r\n    BOOL is_locked;\r\n};\r\n\r\ntypedef amp_raw_condition_variable_s* amp_raw_condition_variable_t;\r\ntypedef amp_raw_mutex_s* amp_raw_mutex_t;\r\n\r\nint const AMP_SUCCESS = 0;\r\n\r\n\r\nint amp_raw_mutex_init(amp_raw_mutex_t mutex)\r\n{\r\n    InitializeCriticalSectionAndSpinCount(&mutex->critical_section, 1);\r\n    \r\n    mutex->is_locked = FALSE;\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n \r\n \r\n \r\nint amp_raw_mutex_finalize(amp_raw_mutex_t mutex)\r\n{\r\n    assert(NULL != mutex);\r\n    \r\n    int retval = AMP_SUCCESS;\r\n    \r\n    DeleteCriticalSection(&mutex->critical_section);\r\n    \r\n    return retval;\r\n}\r\n \r\n \r\n \r\nint amp_raw_mutex_lock(amp_raw_mutex_t mutex)\r\n{\r\n    assert(NULL != mutex);\r\n    \r\n    EnterCriticalSection(&mutex->critical_section);\r\n    \r\n    mutex->is_locked = TRUE;\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n \r\nint amp_raw_mutex_unlock(amp_raw_mutex_t mutex)\r\n{\r\n    assert(NULL != mutex);\r\n \r\n    mutex->is_locked = FALSE;\r\n    LeaveCriticalSection(&mutex->critical_section);\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n\r\n\r\n\r\n\r\nint amp_raw_condition_variable_init(amp_raw_condition_variable_t cond)\r\n{\r\n    InitializeCriticalSectionAndSpinCount(&cond->access_waiting_threads_count_critsec, 1);\r\n    \r\n    cond->wake_waiting_threads_mutex = CreateMutex(0, 0, 0);\r\n\r\n    cond->waking_waiting_threads_count_control_sem = CreateSemaphore(NULL, /* No inheritance to child processes */\r\n                                                                     0, /* Initially no threads can pass */\r\n                                                                     LONG_MAX, /* Max semaphore count */\r\n                                                                     NULL); /* Only intra-process semaphore */\r\n\r\n    cond->finished_waking_waiting_threads_event = CreateEvent(NULL, /* Default security and no inheritance to child processes */\r\n                                                              FALSE, /* No manual reset */\r\n                                                              0, /* Initially not signaled */\r\n                                                              NULL /* Not inter-process available */\r\n                                                              );\r\n    \r\n    \r\n    cond->VAR(waiting_thread_count) = 0l;\r\n    cond->VAR(broadcast_in_progress) = FALSE;\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n \r\n \r\n \r\nint amp_raw_condition_variable_finalize(amp_raw_condition_variable_t cond)\r\n{\r\n    DeleteCriticalSection(&cond->access_waiting_threads_count_critsec);\r\n    \r\n    CloseHandle(cond->wake_waiting_threads_mutex);\r\n    CloseHandle(cond->waking_waiting_threads_count_control_sem);\r\n    CloseHandle(cond->finished_waking_waiting_threads_event);\r\n    \r\n    int ret_error_code = AMP_SUCCESS;\r\n    return ret_error_code;\r\n}\r\n \r\n\r\n\r\nint amp_raw_condition_variable_signal(amp_raw_condition_variable_t cond)\r\n{\r\n    WaitForSingleObject(cond->wake_waiting_threads_mutex,\r\n                                                  INFINITE);\r\n    BOOL at_least_one_waiting_thread = (0l != cond->VAR(waiting_thread_count));\r\n    \r\n    if (at_least_one_waiting_thread) {\r\n        LONG prev_sem_count = 0;\r\n        ReleaseSemaphore(cond->waking_waiting_threads_count_control_sem,\r\n                                                     1,\r\n                                                     &prev_sem_count /* No interest in the previous sem count. */\r\n                                                     );\r\n\r\n        WaitForSingleObject(cond->finished_waking_waiting_threads_event,\r\n                                                     INFINITE);\r\n    }\r\n    \r\n    ReleaseMutex(cond->wake_waiting_threads_mutex);\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n\r\n\r\nint amp_raw_condition_variable_broadcast(amp_raw_condition_variable_t cond)\r\n{\r\n    WaitForSingleObject(cond->wake_waiting_threads_mutex,\r\n                                                  INFINITE);\r\n    \r\n    LONG const waiting_thread_count = cond->VAR(waiting_thread_count);\r\n    \r\n    if (0 < waiting_thread_count) {\r\n        \r\n        cond->VAR(broadcast_in_progress) = TRUE;\r\n        /* Releasing the sem here and waiting on it should update the memory of\r\n* the waiting threads to see that a broadcast is in progress.\r\n*/\r\n        LONG prev_sem_count = 0;\r\n        /* Assuming that less threads exist than max possible semaphore count.\r\n* TODO: @todo Decide if to spin here if the assumption doesn't hold\r\n* true in the future?\r\n*/\r\n        ReleaseSemaphore(cond->waking_waiting_threads_count_control_sem,\r\n                                                     waiting_thread_count,\r\n                                                     &prev_sem_count /* No interest in the previous sem count. */\r\n                                                     );\r\n\r\n        WaitForSingleObject(cond->finished_waking_waiting_threads_event,\r\n                                                     INFINITE);\r\n        cond->VAR(broadcast_in_progress) = FALSE;\r\n        \r\n    }\r\n    \r\n    ReleaseMutex(cond->wake_waiting_threads_mutex);\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n\r\n\r\n\r\n\r\nint amp_raw_condition_variable_wait(amp_raw_condition_variable_t cond,\r\n                                    struct amp_raw_mutex_s *mutex)\r\n{\r\n    WaitForSingleObject(cond->wake_waiting_threads_mutex,\r\n                                                  INFINITE);\r\n    {\r\n        ++(cond->VAR(waiting_thread_count));\r\n    }\r\n    \r\n    amp_raw_mutex_unlock(mutex);\r\n\r\n    SignalObjectAndWait(cond->wake_waiting_threads_mutex, cond->waking_waiting_threads_count_control_sem, INFINITE, FALSE);\r\n \r\n    BOOL broadcast_in_progress = FALSE;\r\n    LONG count = 0;\r\n    EnterCriticalSection(&cond->access_waiting_threads_count_critsec);\r\n    {\r\n        count = --(cond->VAR(waiting_thread_count));\r\n        \r\n        broadcast_in_progress = cond->VAR(broadcast_in_progress);\r\n    }\r\n    LeaveCriticalSection(&cond->access_waiting_threads_count_critsec);\r\n    \r\n    BOOL all_waiting_threads_awake = TRUE;\r\n    if (TRUE == broadcast_in_progress && count > 0) {\r\n        all_waiting_threads_awake = FALSE;\r\n    }\r\n    \r\n    if (TRUE == all_waiting_threads_awake) {\r\n        SetEvent(cond->finished_waking_waiting_threads_event);\r\n    }\r\n    \r\n    \r\n    amp_raw_mutex_lock(mutex);\r\n    \r\n    return AMP_SUCCESS;\r\n}\r\n\r\n\r\nstruct amp_condvar_test : rl::test_suite<amp_condvar_test, 2>\r\n{\r\n    VAR_T(int) data;\r\n    amp_raw_mutex_s mtx;\r\n    amp_raw_condition_variable_s cv;\r\n\r\n    void before()\r\n    {\r\n        VAR(data) = 0;\r\n        amp_raw_mutex_init(&mtx);\r\n        amp_raw_condition_variable_init(&cv);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        amp_raw_mutex_finalize(&mtx);\r\n        amp_raw_condition_variable_finalize(&cv);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            data($) += 1;\r\n            amp_raw_condition_variable_signal(&cv);\r\n            amp_raw_mutex_unlock(&mtx);\r\n        }\r\n        else\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            while (0 == data($))\r\n            {\r\n                amp_raw_condition_variable_wait(&cv, &mtx);\r\n            }\r\n            amp_raw_mutex_unlock(&mtx);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct amp_condvar_test2 : rl::test_suite<amp_condvar_test2, 4>\r\n{\r\n    VAR_T(int) stage;\r\n    amp_raw_mutex_s mtx;\r\n    amp_raw_condition_variable_s cv;\r\n\r\n    void before()\r\n    {\r\n        VAR(stage) = 0;\r\n        amp_raw_mutex_init(&mtx);\r\n        amp_raw_condition_variable_init(&cv);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        amp_raw_mutex_finalize(&mtx);\r\n        amp_raw_condition_variable_finalize(&cv);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            stage($) += 1;\r\n            amp_raw_condition_variable_broadcast(&cv);\r\n            while (stage($) < 2)\r\n                amp_raw_condition_variable_wait(&cv, &mtx);\r\n            amp_raw_mutex_unlock(&mtx);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            while (stage($) != 1)\r\n                amp_raw_condition_variable_wait(&cv, &mtx);\r\n            stage($) += 1;\r\n            amp_raw_condition_variable_broadcast(&cv);\r\n            amp_raw_mutex_unlock(&mtx);\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            while (stage($) != 2)\r\n                amp_raw_condition_variable_wait(&cv, &mtx);\r\n            stage($) += 1;\r\n            //amp_raw_condition_variable_broadcast(&cv);\r\n            amp_raw_mutex_unlock(&mtx);\r\n            amp_raw_condition_variable_signal(&cv);\r\n        }\r\n        else if (3 == index)\r\n        {\r\n            amp_raw_mutex_lock(&mtx);\r\n            while (stage($) != 3)\r\n                amp_raw_condition_variable_wait(&cv, &mtx);\r\n            amp_raw_mutex_unlock(&mtx);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/examples.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"spsc_overwrite_queue.hpp\"\r\n#include \"amp_condvar.hpp\"\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    p.iteration_count = 10000;\r\n    //p.search_type = rl::sched_bound;\r\n    //p.context_bound = 3;\r\n\r\n    rl::execute<spsc_overwrite_queue_test, 2>(p);\r\n    rl::simulate<amp_condvar_test>(p);\r\n    rl::simulate<amp_condvar_test2>(p);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/msvc9/examples.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"examples\", \"examples.vcproj\", \"{6CC59CF8-408B-441B-8F65-15651210CB82}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/msvc9/examples.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9.00\"\r\n\tName=\"examples\"\r\n\tProjectGUID=\"{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}\"\r\n\tRootNamespace=\"examples\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"0\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\amp_condvar.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\examples.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\spsc_overwrite_queue.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/spsc_overwrite_queue.hpp",
    "content": "#pragma once\r\n\r\n\r\ntemplate<typename T>\r\nclass queue\r\n{\r\npublic:\r\n    queue(size_t count)\r\n    {\r\n        assert(count >= 6);\r\n        sema = CreateSemaphore(0, 0, 1, 0);\r\n        waiting.store(false, std::memory_order_relaxed);\r\n        deq_node = 0;\r\n        block = new node [count];\r\n        block->next.store(0, std::memory_order_relaxed);\r\n        full_tail = block;\r\n        full_head.store(block, std::memory_order_relaxed);\r\n        free_head = block + 1;\r\n        free_tail.store(block + count - 1, std::memory_order_relaxed);\r\n        free_tail.load(std::memory_order_relaxed)->next.store(0, std::memory_order_relaxed);\r\n        for (size_t i = 1; i != count - 1; i += 1)\r\n            block[i].next.store(block + i + 1, std::memory_order_relaxed);\r\n    }\r\n\r\n    ~queue()\r\n    {\r\n        CloseHandle(sema);\r\n        delete [] block;\r\n    }\r\n\r\n    VAR_T(T)& enqueue_prepare()\r\n    {\r\n        return full_tail->data;\r\n    }\r\n\r\n    void enqueue_commit()\r\n    {\r\n        node* n = get_free_node();\r\n        n->next.store(0, std::memory_order_release);\r\n        full_tail->next.store(n, std::memory_order_seq_cst);\r\n        bool signal = waiting.load(std::memory_order_seq_cst);\r\n        full_tail = n;\r\n        if (signal)\r\n        {\r\n            waiting.store(false, std::memory_order_relaxed);\r\n            ReleaseSemaphore(sema, 1, 0);\r\n        }\r\n    }\r\n\r\n    VAR_T(T)& dequeue_prepare()\r\n    {\r\n        deq_node = get_full_node();\r\n        return deq_node->data;\r\n    }\r\n\r\n    void dequeue_commit()\r\n    {\r\n        deq_node->next.store(0, std::memory_order_release);\r\n        node* prev = free_tail.exchange(deq_node, std::memory_order_acq_rel);\r\n        prev->next.store(deq_node, std::memory_order_release);\r\n    }\r\n\r\nprivate:\r\n    struct node\r\n    {\r\n        std::atomic<node*>  next;\r\n        VAR_T(T)            data;\r\n    };\r\n\r\n    node*                   block;\r\n    node*                   full_tail;\r\n    node*                   free_head;\r\n    node*                   deq_node;\r\n    char                    pad [64];\r\n    std::atomic<node*>      full_head;\r\n    std::atomic<node*>      free_tail;\r\n    std::atomic<bool>       waiting;\r\n    HANDLE                  sema;\r\n\r\n    node* get_free_node()\r\n    {\r\n        for (;;)\r\n        {\r\n            node* n = free_head;\r\n            node* next = n->next.load(std::memory_order_acquire);\r\n            if (next)\r\n            {\r\n                free_head = next;\r\n                return n;\r\n            }\r\n\r\n            n = full_head.load(std::memory_order_acquire);\r\n            next = n->next.load(std::memory_order_acquire);\r\n            if (next)\r\n            {\r\n                if (full_head.compare_exchange_strong(n, next, std::memory_order_seq_cst))\r\n                {\r\n                    //node* n2 = free_head;\r\n                    //node* next2 = n2->next.load(std::memory_order_acquire);\r\n                    //if (next2)\r\n                    //{\r\n                    //    n->next.store(0, std::memory_order_release);\r\n                    //    node* prev = free_tail.exchange(n, std::memory_order_acq_rel);\r\n                    //    prev->next.store(n, std::memory_order_release);\r\n                    //    free_head = next2;\r\n                    //    return n2;\r\n                    //}\r\n                    //else\r\n                    {\r\n                        return n;\r\n                    }\r\n                }\r\n            }\r\n        }\r\n    }\r\n\r\n    node* get_full_node()\r\n    {\r\n        node* n = full_head.load(std::memory_order_acquire);\r\n        for (;;)\r\n        {\r\n            node* next = n->next.load(std::memory_order_acquire);\r\n            if (next == 0)\r\n            {\r\n                waiting.store(true, std::memory_order_seq_cst);\r\n                n = full_head.load(std::memory_order_seq_cst);\r\n                next = n->next.load(std::memory_order_acquire);\r\n                if (next)\r\n                {\r\n                    waiting.store(false, std::memory_order_relaxed);\r\n                }\r\n                else\r\n                {\r\n                    WaitForSingleObject(sema, INFINITE);\r\n                    n = full_head.load(std::memory_order_acquire);\r\n                    continue;\r\n                }\r\n            }\r\n            if (full_head.compare_exchange_strong(n, next, std::memory_order_acq_rel))\r\n                return n;\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nunsigned RL_STDCALL consumer_thread(void* ctx)\r\n{\r\n    queue<int>* q = (queue<int>*)ctx;\r\n    int prev_data = -1;\r\n    for (;;)\r\n    {\r\n        VAR_T(int)& data0 = q->dequeue_prepare();\r\n        int data = VAR(data0);\r\n        assert(data > prev_data);\r\n        prev_data = data;\r\n        q->dequeue_commit();\r\n        //printf(\"%d\\n\", prev_data);\r\n        if (prev_data == 11)\r\n            break;\r\n        //Sleep(5);\r\n    }\r\n    return 0;\r\n}\r\n\r\nunsigned RL_STDCALL producer_thread(void* ctx)\r\n{\r\n    queue<int>* q = (queue<int>*)ctx;\r\n    for (int i = 0; i != 12; i += 1)\r\n    {\r\n        VAR_T(int)& data = q->enqueue_prepare();\r\n        VAR(data) = i;\r\n        q->enqueue_commit();\r\n        //Sleep(1);\r\n    }\r\n    return 0;\r\n}\r\n\r\nvoid spsc_overwrite_queue_test()\r\n{\r\n    queue<int> q (6);\r\n    HANDLE th [2];\r\n    th[0] = (HANDLE)_beginthreadex(0, 0, consumer_thread, &q, 0, 0);\r\n    th[1] = (HANDLE)_beginthreadex(0, 0, producer_thread, &q, 0, 0);\r\n    WaitForMultipleObjects(2, th, 1, INFINITE);\r\n\r\n    for (int i = 100; i != 104; i += 1)\r\n    {\r\n        VAR_T(int)& data = q.enqueue_prepare();\r\n        VAR(data) = i;\r\n        q.enqueue_commit();\r\n    }\r\n\r\n    for (int i = 100; i != 104; i += 1)\r\n    {\r\n        VAR_T(int)& data0 = q.dequeue_prepare();\r\n        int data = VAR(data0);\r\n        assert(data == i);\r\n        q.dequeue_commit();\r\n    }\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/examples/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_FORCE_SEQ_CST\r\n#define RL_MSVC_OUTPUT\r\n//#define RL_DEBUGBREAK_ON_FAILURE\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n#include \"../../relacy/stdlib/windows.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/java_ws_deque/java_ws_deque.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_java.hpp\"\r\n\r\n\r\nusing rl::jvar;\r\nusing rl::jvolatile;\r\nusing rl::mutex;\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass ws_deque\r\n{\r\npublic:\r\n    ws_deque()\r\n    {\r\n        m_mask($) = initial_size - 1;\r\n        m_headIndex($) = 0;\r\n        m_tailIndex($) = 0;\r\n        m_array($) = new jvar<T> [initial_size];\r\n        m_arraySize($) = initial_size;\r\n    }\r\n\r\n    bool IsEmpty()\r\n    {\r\n        return m_headIndex($) >= m_tailIndex($);\r\n    }\r\n\r\n    size_t Count()\r\n    {\r\n        return m_tailIndex($) - m_headIndex($);\r\n    }\r\n\r\n    void push(T item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        if (tail <= m_headIndex($) + m_mask($))\r\n        {\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            size_t head = m_headIndex($);\r\n            size_t count = Count();\r\n            if (count >= m_mask($))\r\n            {\r\n                size_t arraySize = m_arraySize($);\r\n                size_t mask = m_mask($);\r\n                jvar<T>* newArray = new jvar<T> [arraySize * 2];\r\n                jvar<T>* arr = m_array($);\r\n                for (size_t i = 0; i != count; ++i)\r\n                    newArray[i]($) = arr[(i + head) & mask]($);\r\n                m_array($) = newArray;\r\n                m_arraySize($) = arraySize * 2;\r\n                m_headIndex($) = 0;\r\n                m_tailIndex($) = count;\r\n                tail = count;\r\n                m_mask($) = (mask * 2) | 1;\r\n            }\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n            m_foreignLock.unlock($);\r\n        }\r\n    }\r\n\r\n    bool pop(T& item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        if (tail == 0)\r\n            return false;\r\n        tail -= 1;\r\n        m_tailIndex($) = tail;\r\n        if (m_headIndex($) <= tail)\r\n        {\r\n            item = m_array($)[tail & m_mask($)]($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            if (m_headIndex($) <= tail)\r\n            {\r\n                item = m_array($)[tail & m_mask($)]($);\r\n                m_foreignLock.unlock($);\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                m_tailIndex($) = tail + 1;\r\n                m_foreignLock.unlock($);\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\n    bool steal(T& item)\r\n    {\r\n        if (false == m_foreignLock.try_lock($))\r\n            return false;\r\n        size_t head = m_headIndex($);\r\n        m_headIndex($) = head + 1;\r\n        if (head < m_tailIndex($))\r\n        {\r\n            item = m_array($)[head & m_mask($)]($);\r\n            m_foreignLock.unlock($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_headIndex($) = head;\r\n            m_foreignLock.unlock($);\r\n            return false;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    static size_t const initial_size = 2;\r\n    jvar<jvar<T>*> m_array;\r\n    jvar<size_t> m_mask;\r\n    jvar<size_t> m_arraySize;\r\n    jvolatile<size_t> m_headIndex;\r\n    jvolatile<size_t> m_tailIndex;\r\n    mutex m_foreignLock;\r\n};\r\n\r\n\r\n\r\n\r\nstruct ws_deque_test : rl::test_suite<ws_deque_test, 2>\r\n{\r\n    ws_deque<int> q;\r\n    bool state [2];\r\n\r\n    void before()\r\n    {\r\n        state[0] = true;\r\n        state[1] = true;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(state[0] == false);\r\n        RL_ASSERT(state[1] == false);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            q.push(1);\r\n            q.push(2);\r\n\r\n            int item = 0;\r\n            bool res = q.pop(item);\r\n            RL_ASSERT(res && item == 2);\r\n            RL_ASSERT(state[1]);\r\n            state[1] = false;\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            RL_ASSERT(res == false);\r\n        }\r\n        else\r\n        {\r\n            int item = 0;\r\n            bool res = q.steal(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(item == 1);\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<ws_deque_test>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/java_ws_deque/msvc8/java_ws_deque.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"java_ws_deque\", \"java_ws_deque.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/java_ws_deque/msvc8/java_ws_deque.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"java_ws_deque\"\r\n\tProjectGUID=\"{9E88433F-779E-4461-9963-35E3338873AC}\"\r\n\tRootNamespace=\"java_ws_deque\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\java_ws_deque.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/java_ws_deque/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/java_ws_deque/stdafx.h",
    "content": "#pragma once\r\n\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/mpmc.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n\r\n#ifdef RL_TEST\r\n#define ATOMIC(x) rl::atomic<x>\r\n#define VAR(x) rl::var<x>\r\n#define ATOMIC_FETCH_ADD(x, v) x($).fetch_add(v)\r\n#define ATOMIC_COMPARE_EXCHANGE(x, c, v) x($).compare_exchange(c, v)\r\n#define LOAD_ACQ(x) x($).load(rl::memory_order_acquire)\r\n#define STORE_REL(x, v) x($).store(v, rl::memory_order_release)\r\n#else\r\n#define ATOMIC(x) x volatile\r\n#define VAR(x) x\r\n#define ATOMIC_FETCH_ADD(x, v) _InterlockedExchangeAdd((long*)&x, v)\r\n#define ATOMIC_COMPARE_EXCHANGE(x, c, v) interlocked_compare_exchange(x, c, v)\r\n#define LOAD_ACQ(x) x\r\n#define STORE_REL(x, v) x = v\r\n\r\ntemplate<typename T>\r\nbool interlocked_compare_exchange(T& x, T& c, T v)\r\n{\r\n    T c0 = _InterlockedCompareExchange((long*)&x), v, c);\r\n    if (c0 == c)\r\n    {\r\n        return true;\r\n    }\r\n    else\r\n    {\r\n        c = c0;\r\n        return false;\r\n    }\r\n}\r\n#endif\r\n\r\n//#include \"pcx.h\"\r\n\r\n\r\n/*\r\ntemplate<typename T>\r\nclass mpmcq\r\n{\r\npublic:\r\n\tmpmcq()\r\n\t{\r\n        STORE_REL(head_, alloc_block());\r\n        STORE_REL(tail_, LOAD_ACQ(head_));\r\n\t}\r\n\r\n\tvoid enqueue(T v)\r\n\t{\r\n        for (;;)\r\n        {\r\n            block* b = LOAD_ACQ(head_);\r\n            unsigned raw = ATOMIC_FETCH_ADD(b->state_, state_head_inc);\r\n            unsigned idx = raw >> state_head_pos;\r\n            if (idx < item_count)\r\n            {\r\n                STORE_REL(b->data_[idx], v);\r\n                return;\r\n            }\r\n            unsigned last = raw & state_last_msk;\r\n            if (0 == last)\r\n            {\r\n                ATOMIC_COMPARE_EXCHANGE(head_, b, b+1);\r\n            }\r\n            else\r\n            {\r\n                block* b2 = LOAD_ACQ(b->next_);\r\n                if (b2)\r\n                {\r\n                    ATOMIC_COMPARE_EXCHANGE(head_, b, b2);\r\n                }\r\n                else\r\n                {\r\n                    b2 = alloc_block();\r\n                    block* b3 = 0;\r\n                    if (ATOMIC_COMPARE_EXCHANGE(b->next_, b3, b2))\r\n                    {\r\n                        ATOMIC_COMPARE_EXCHANGE(head_, b, b2);\r\n                    }\r\n                    else\r\n                    {\r\n                        for (;;)\r\n                        {\r\n                            b = LOAD_ACQ(head_);\r\n                            while (0 == (LOAD_ACQ(b->state_) & state_last_msk))\r\n                                b = b + 1;\r\n                            while (LOAD_ACQ(b->next_))\r\n                                b = LOAD_ACQ(b->next_) + block_count - 1;\r\n                            b3 = 0;\r\n                            if (ATOMIC_COMPARE_EXCHANGE(b->next_, b3, b2))\r\n                                break;\r\n                        }\r\n                    }\r\n                }\r\n            }\r\n        }\r\n\t}\r\n\r\n\tT dequeue()\r\n\t{\r\n        for (;;)\r\n        {\r\n            block* b = LOAD_ACQ(tail_);\r\n            unsigned cmp = LOAD_ACQ(b->state_);\r\n            unsigned tail = cmp & (state_last_msk - 1);\r\n            if (tail < item_count)\r\n            {\r\n                unsigned head = cmp >> state_head_pos;\r\n                if (tail < head)\r\n                {\r\n                    unsigned xchg = cmp + state_tail_inc;\r\n                    if (ATOMIC_COMPARE_EXCHANGE(b->state_, cmp, xchg))\r\n                    {\r\n                        for (;;)\r\n                        {\r\n                            T v = LOAD_ACQ(b->data_[tail]);\r\n                            if (v != T())\r\n                                return v;\r\n                            rl::yield($);\r\n                        }\r\n                    }\r\n                }\r\n                else\r\n                {\r\n                    return T();\r\n                }\r\n            }\r\n            else\r\n            {\r\n                unsigned last = cmp & state_last_msk;\r\n                if (0 == last)\r\n                {\r\n                    ATOMIC_COMPARE_EXCHANGE(tail_, b, b+1);\r\n                }\r\n                else\r\n                {\r\n                    block* b2 = LOAD_ACQ(b->next_);\r\n                    if (0 == b2)\r\n                        return T();\r\n                    ATOMIC_COMPARE_EXCHANGE(tail_, b, b2);\r\n                }\r\n            }\r\n        }\r\n\t}\r\n\r\nprivate:\r\n    static unsigned const state_head_pos = 7;\r\n    static unsigned const state_head_inc = 1 << state_head_pos;\r\n    static unsigned const state_last_msk = 1 << 6;\r\n    static unsigned const state_tail_inc = 1 << 0;\r\n\r\n    static unsigned const item_count = 2;\r\n    static unsigned const block_count = 16;\r\n\r\n    struct block\r\n    {\r\n        //unsigned          head_ : 24;\r\n        //unsigned          last_ : 1;\r\n        //unsigned          tail_ : 7;\r\n        ATOMIC(unsigned)    state_;\r\n        ATOMIC(block*)      next_;\r\n        ATOMIC(T)           data_ [item_count];\r\n    };\r\n\r\n    struct superblock\r\n    {\r\n        block           blocks_ [block_count];\r\n    };\r\n\r\n\tchar                pad0_ [64];\r\n    ATOMIC(block*)      head_;\r\n\tchar                pad1_ [64];\r\n    ATOMIC(block*)      tail_;\r\n\tchar                pad2_ [64];\r\n\r\n    block* alloc_block()\r\n    {\r\n        superblock* sb = RL_NEW(superblock);\r\n        for (int x = 0; x != block_count; ++x)\r\n        {\r\n            block* b = &sb->blocks_[x];\r\n            STORE_REL(b->state_, 0);\r\n            STORE_REL(b->next_, 0);\r\n            for (int y = 0; y != item_count; ++y)\r\n            {\r\n                STORE_REL(b->data_[y], 0);\r\n            }\r\n        }\r\n        STORE_REL(sb->blocks_[block_count - 1].state_, 1 * state_head_inc + 1 * state_tail_inc + state_last_msk);\r\n        return &sb->blocks_[0];\r\n    }\r\n};\r\n\r\n\r\nstruct test_mpmc : rl::test_suite<test_mpmc, 6>\r\n{\r\n    mpmcq<int> q;\r\n\r\n    void thread(unsigned idx)\r\n    {\r\n        if (idx < thread_count / 2)\r\n        {\r\n            for (int i = 0; i != 2; ++i)\r\n                q.enqueue(1);\r\n        }\r\n        else\r\n        {\r\n            for (int i = 0; i != 2; ++i)\r\n                q.dequeue();\r\n        }\r\n    }\r\n};\r\n*/\r\n\r\n\r\n\r\nstruct thread_node\r\n{\r\n    rl::var<thread_node*>       next;\r\n    rl::var<size_t>             count;\r\n    rl::var<size_t>             unconsumed;\r\n    rl::HANDLE                  sema;\r\n    rl::CRITICAL_SECTION        mtx;\r\n};\r\n\r\n\r\nvoid on_thread_exit(thread_node*& t_thread_node)\r\n{\r\n    thread_node* head = t_thread_node;\r\n    thread_node* my = 0;\r\n    if (head)\r\n    {\r\n        rl::EnterCriticalSection(&head->mtx, $);\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        if (head->next($))\r\n        {\r\n            my = head->next($);\r\n            head->next($) = (thread_node*)my->next($);\r\n        }\r\n        else\r\n        {\r\n            my = head;\r\n        }\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        rl::LeaveCriticalSection(&head->mtx, $);\r\n\r\n        while (my->unconsumed($))\r\n        {\r\n            rl::WaitForSingleObject(my->sema, rl::RL_INFINITE, $);\r\n            my->unconsumed($) -= 1;\r\n        }\r\n\r\n        rl::DeleteCriticalSection(&my->mtx, $);\r\n        rl::CloseHandle(my->sema, $);\r\n        RL_DELETE(my);\r\n    }\r\n\r\n}\r\n\r\nstruct eventcount\r\n{\r\n    eventcount()\r\n    {\r\n        root($) = 0;\r\n        rl::InitializeCriticalSection(&mtx, $);\r\n    }\r\n\r\n    ~eventcount()\r\n    {\r\n        rl::DeleteCriticalSection(&mtx, $);\r\n    }\r\n\r\n    void prepare_wait(thread_node*& t_thread_node)\r\n    {\r\n        thread_node* my = 0;\r\n        thread_node* head = t_thread_node;\r\n        if (head)\r\n        {\r\n            rl::EnterCriticalSection(&head->mtx, $);\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            //RL_ASSERT(head->status == stat_root);\r\n            RL_ASSERT (root($) != head);\r\n            if (head->next($))\r\n            {\r\n                my = head->next($);\r\n                head->next($) = (thread_node*)my->next($);\r\n                my->next($) = 0;\r\n\r\n                //node_status st;\r\n                //if (stat_bucket != (st = (node_status)_InterlockedExchange(&my->status, stat_private)))\r\n                //    __asm int 3;\r\n                RL_ASSERT (0 == my->count($));\r\n            }\r\n            else\r\n            {\r\n                my = head;\r\n\r\n                //node_status st;\r\n                //if (stat_root != (st = (node_status)_InterlockedExchange(&my->status, stat_private)))\r\n                //    __asm int 3;\r\n                RL_ASSERT(0 == my->count($));\r\n            }\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            rl::LeaveCriticalSection(&head->mtx, $);\r\n        }\r\n        else\r\n        {\r\n            my = RL_NEW thread_node;\r\n            my->next($) = 0;\r\n            my->count($) = 0;\r\n            my->unconsumed($) = 0;\r\n            my->sema = rl::CreateSemaphore(0, 0, LONG_MAX, 0, $);\r\n            //my->status = stat_private;\r\n            rl::InitializeCriticalSection(&my->mtx, $);\r\n        }\r\n\r\n        while (my->unconsumed($))\r\n        {\r\n            rl::WaitForSingleObject(my->sema, rl::RL_INFINITE, $);\r\n            my->unconsumed($) -= 1;\r\n        }\r\n\r\n        RL_ASSERT(0 == my->next($));\r\n        RL_ASSERT(0 == my->count($));\r\n        //if (my->status != stat_private) __asm int 3;\r\n\r\n        rl::EnterCriticalSection(&mtx, $);\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        RL_ASSERT(root($) != my);\r\n        if (root($))\r\n        {\r\n            my->next($) = (thread_node*)((thread_node*)root($))->next($);\r\n            ((thread_node*)root($))->next($) = my;\r\n\r\n            //node_status st;\r\n            //if (stat_private != (st = (node_status)_InterlockedExchange(&my->status, stat_bucket)))\r\n            //    __asm int 3;\r\n\r\n            my = root($);\r\n        }\r\n        else\r\n        {\r\n            root($) = my;\r\n\r\n            //node_status st;\r\n            //if (stat_private != (st = (node_status)_InterlockedExchange(&my->status, stat_root)))\r\n            //    __asm int 3;\r\n        }\r\n        ((thread_node*)root($))->count($) += 1;\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        rl::LeaveCriticalSection(&mtx, $);\r\n        t_thread_node = my;\r\n    }\r\n\r\n    void wait(thread_node*& t_thread_node)\r\n    {\r\n        thread_node* head = t_thread_node;\r\n        if (head == root($))\r\n        {\r\n            rl::WaitForSingleObject(head->sema, rl::RL_INFINITE, $);\r\n        }\r\n        else\r\n        {\r\n            rl::EnterCriticalSection(&head->mtx, $);\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            head->unconsumed($) += 1;\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            rl::LeaveCriticalSection(&head->mtx, $);\r\n        }\r\n    }\r\n\r\n    void retire_wait(thread_node*& t_thread_node)\r\n    {\r\n        thread_node* head = t_thread_node;\r\n        if (head == root($))\r\n        {\r\n            rl::EnterCriticalSection(&mtx, $);\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            if (head == root($))\r\n            {\r\n                thread_node* my = 0;\r\n                head->count($) -= 1;\r\n                if (head->next($))\r\n                {\r\n                    my = head->next($);\r\n                    head->next($) = (thread_node*)my->next($);\r\n                    my->next($) = 0;\r\n                }\r\n                else\r\n                {\r\n                    my = head;\r\n                    root($) = 0;\r\n                }\r\n                std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n                rl::LeaveCriticalSection(&mtx, $);\r\n                //my->status = stat_root;\r\n                t_thread_node = my;\r\n                return;\r\n            }\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            rl::LeaveCriticalSection(&mtx, $);\r\n        }\r\n        rl::EnterCriticalSection(&head->mtx, $);\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        head->unconsumed($) += 1;\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        rl::LeaveCriticalSection(&head->mtx, $);\r\n    }\r\n\r\n    void signal_all()\r\n    {\r\n        //std::\r\n        //_mm_mfence();\r\n        thread_node* head = root($);\r\n        if (0 == head)\r\n            return;\r\n        rl::EnterCriticalSection(&mtx, $);\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        if (head != root($))\r\n        {\r\n            std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n            rl::LeaveCriticalSection(&mtx, $);\r\n            return;\r\n        }\r\n        size_t count = head->count($);\r\n        head->count($) = 0;\r\n        root($) = 0;\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        rl::LeaveCriticalSection(&mtx, $);\r\n        rl::ReleaseSemaphore(head->sema, count, 0, $);\r\n    }\r\n\r\n    std::atomic<thread_node*>       root;\r\n    rl::CRITICAL_SECTION            mtx;\r\n\r\n}; \r\n\r\n\r\n\r\n\r\nstruct test_ec : rl::test_suite<test_ec, 8>\r\n{\r\n    std::atomic<int> x [2];\r\n    eventcount ec;\r\n\r\n    void before()\r\n    {\r\n        x[0]($) = 0;\r\n        x[1]($) = 0;\r\n    }\r\n\r\n    void thread(unsigned idx)\r\n    {\r\n        if (idx < 4)\r\n        {\r\n            for (int i = 0; i != 3; ++i)\r\n            {\r\n                x[idx % 2]($).fetch_add(1);\r\n                ec.signal_all();\r\n            }\r\n        }\r\n        else\r\n        {\r\n            thread_node* my = 0;\r\n            for (int i = 0; i != 3; ++i)\r\n            {\r\n                for (;;)\r\n                {\r\n                    int cmp = x[idx % 2]($);\r\n                    if (cmp > 0)\r\n                    {\r\n                        if (x[idx % 2]($).compare_exchange(cmp, cmp - 1))\r\n                            break;\r\n                    }\r\n                    else\r\n                    {\r\n                        for (;;)\r\n                        {\r\n                            ec.prepare_wait(my);\r\n                            cmp = x[idx % 2]($);\r\n                            if (cmp > 0)\r\n                            {\r\n                                ec.retire_wait(my);\r\n                                break;\r\n                            }\r\n                            ec.wait(my);\r\n                            cmp = x[idx % 2]($);\r\n                            if (cmp > 0)\r\n                            {\r\n                                break;\r\n                            }\r\n                        }\r\n                    }\r\n                }\r\n            }\r\n            on_thread_exit(my);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    p.iteration_count = 20000000;\r\n    p.initial_state = \"10000000\";\r\n    rl::simulate<test_ec>(p);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/msvc8/mpmc.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"mpmc\", \"mpmc.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/msvc8/mpmc.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"mpmc\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}\"\r\n\tRootNamespace=\"mpmc\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\mpmc.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\pcx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/pcx.h",
    "content": "#pragma once\r\n\r\n#include <intrin.h>\r\n#pragma intrinsic (_InterlockedExchangeAdd)\r\n#pragma intrinsic (_InterlockedCompareExchange)\r\n\r\n//#define PCX_DEBUG\r\n\r\n#ifdef PCX_DEBUG\r\n#include <sstream>\r\n#include <windows.h>\r\n#endif\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nsize_t const cacheline_size = 64;\r\n\r\nstruct pcx_node\r\n{\r\n    typedef void            (*pcx_dtor_t)(pcx_node*);\r\n    ATOMIC(pcx_node*)       pcx_next_;\r\n    ATOMIC(pcx_dtor_t)      pcx_dtor_;\r\n};\r\n\r\nnamespace pcx_int\r\n{\r\n    unsigned const word_bits = 32;\r\n    unsigned const collector_bits = 4;\r\n    unsigned const collector_count = 1 << collector_bits;\r\n    unsigned const counter_inc = 1 << (collector_bits * 2);\r\n    unsigned const is_current_inc = 1;\r\n    unsigned const back_link_inc = 2;\r\n\r\n    struct master;\r\n    struct collector;\r\n\r\n    struct local_collector\r\n    {\r\n        pcx_node*               defer_head_;\r\n        pcx_node                defer_tail_;\r\n        unsigned                defer_size_;\r\n    };\r\n\r\n    struct thread_int\r\n    {\r\n        pcx_int::master*        master_;\r\n        pcx_int::collector*     collectors_;\r\n        unsigned                recursion_count_;\r\n        unsigned                is_acquired_;\r\n        unsigned                collector_index_;\r\n        unsigned                last_seen_collector_index_;\r\n        unsigned                flush_tail_;\r\n        pcx_node*               defer_head_;\r\n        pcx_node                defer_tail_;\r\n        unsigned                defer_size_;\r\n        unsigned                promote_;\r\n        local_collector         local_collectors_ [collector_count];\r\n    };\r\n}\r\n\r\nclass pcx_thread : private pcx_int::thread_int\r\n{\r\npublic:\r\n    static pcx_thread& get();\r\n\r\n    void acquire();\r\n    void release();\r\n    void defer(pcx_node* node, pcx_node::pcx_dtor_t dtor);\r\n    void flush();\r\n    void promote();\r\n    void quiescent();\r\n\r\n    void init();\r\n    void deinit();\r\n\r\nprivate:\r\n    unsigned acquire_impl();\r\n    void release_impl(unsigned, unsigned);\r\n    void flush_impl();\r\n    void local_flush();\r\n    void quiescent_impl();\r\n    friend void init();\r\n    friend void deinit();\r\n    friend void thread_callback(bool);\r\n};\r\n\r\nnamespace pcx_int\r\n{\r\n    struct master\r\n    {\r\n        char                pad0_ [64];\r\n\r\n        unsigned            garbage_threshold_;\r\n\r\n        char                pad1_ [64];\r\n\r\n        struct state_part\r\n        {\r\n            unsigned        current_collector_ : collector_bits;\r\n            unsigned        collector_tail_ : collector_bits;\r\n            unsigned        outer_counter_ : word_bits - 2 * collector_bits;\r\n        };\r\n\r\n        union state\r\n        {\r\n             long           whole_;\r\n             state_part     part_;\r\n        };\r\n\r\n        state               state_;\r\n\r\n        char                pad2_ [64];\r\n\r\n        state               state_copy_;\r\n\r\n        char                pad3_ [64];\r\n    };\r\n\r\n    struct collector\r\n    {\r\n        char                pad0_ [64];\r\n\r\n        pcx_node*           defer_list_head_;\r\n        unsigned            defer_list_size_;\r\n\r\n        char                pad1_ [64];\r\n\r\n        struct state_part\r\n        {\r\n            unsigned        is_current_ : 1;\r\n            unsigned        back_link_ : 1;\r\n            unsigned        pad_ : collector_bits * 2 - 2;\r\n            unsigned        inner_counter_ : word_bits - 2 * collector_bits;\r\n        };\r\n\r\n        union state\r\n        {\r\n             long           whole_;\r\n             state_part     part_;\r\n        };\r\n\r\n        state               state_;\r\n\r\n        char                pad2_ [64];\r\n    };\r\n\r\n    __declspec(selectany)\r\n    master                  g_master;\r\n    __declspec(selectany)\r\n    collector               g_collectors [collector_count];\r\n    __declspec(selectany, thread)\r\n    thread_int*             g_thread_instance;\r\n\r\n    typedef void (__stdcall nt_tls_cb_t)(void*, unsigned long, void*);\r\n    nt_tls_cb_t on_tls_callback;\r\n\r\n    #pragma data_seg(push, old_seg)\r\n    #pragma data_seg(\".CRT$XLB\")\r\n    __declspec(selectany, dllexport)\r\n    nt_tls_cb_t* volatile p_thread_callback = on_tls_callback;\r\n    #pragma data_seg(pop, old_seg)\r\n\r\n    inline void __stdcall on_tls_callback(void*, unsigned long reason, void*)\r\n    {\r\n        if (1 == reason)\r\n        {\r\n            init();\r\n            thread_callback(true);\r\n        }\r\n        else if (0 == reason)\r\n        {\r\n            thread_callback(false);\r\n            deinit();\r\n        }\r\n        if (2 == reason)\r\n        {\r\n            thread_callback(true);\r\n        }\r\n        else if (3 == reason)\r\n        {\r\n            thread_callback(false);\r\n        }\r\n    }\r\n}\r\n\r\ninline void init()\r\n{\r\n    using namespace pcx_int;\r\n    master& m = g_master;\r\n    m.garbage_threshold_ = 128;\r\n    m.state_.part_.current_collector_ = 0;\r\n    m.state_.part_.collector_tail_ = 0;\r\n    m.state_.part_.outer_counter_ = 0;\r\n    m.state_copy_.part_.current_collector_ = 0;\r\n    m.state_copy_.part_.collector_tail_ = 0;\r\n    m.state_copy_.part_.outer_counter_ = 0;\r\n    for (unsigned i = 0; i != collector_count; ++i)\r\n    {\r\n        collector& c = g_collectors[i];\r\n        c.defer_list_head_ = 0;\r\n        c.defer_list_size_ = 0;\r\n        c.state_.part_.is_current_ = 1;\r\n        c.state_.part_.back_link_ = 1;\r\n        c.state_.part_.inner_counter_ = 0;\r\n    }\r\n    g_collectors[0].state_.part_.back_link_ = 0;\r\n}\r\n\r\ninline void deinit()\r\n{\r\n    using namespace pcx_int;\r\n    pcx_thread::get().release_impl(g_master.state_.part_.current_collector_, is_current_inc);\r\n}\r\n\r\ninline void thread_callback(bool init)\r\n{\r\n    if (init)\r\n    {\r\n        g_thread_instance = RL_NEW pcx_thread ();\r\n        pcx_thread::get().init();\r\n    }\r\n    else\r\n    {\r\n        pcx_thread::get().deinit();\r\n        RL_DELETE(g_thread_instance);\r\n        g_thread_instance = 0;\r\n    }\r\n}\r\n\r\ninline pcx_thread& pcx_thread::get()\r\n{\r\n    return static_cast<pcx_thread&>(*pcx_int::g_thread_instance);\r\n}\r\n\r\ninline unsigned pcx_thread::acquire_impl()\r\n{\r\n    using namespace pcx_int;\r\n    long const prev =\r\n        _InterlockedExchangeAdd(\r\n            &master_->state_.whole_, counter_inc);\r\n    master::state_part u = {prev};\r\n\r\n#ifdef PCX_DEBUG\r\n    std::ostringstream ss;\r\n    ss << \"[PCX] thread \" << this << \" acquire \" << u.current_collector_ << \"\\n\";\r\n    OutputDebugStringA(ss.str().c_str());\r\n#endif\r\n\r\n    if (u.current_collector_ == flush_tail_\r\n        && local_collectors_[flush_tail_].defer_size_)\r\n    {\r\n        local_flush();\r\n    }\r\n\r\n    return u.current_collector_;\r\n}\r\n\r\ninline void pcx_thread::release_impl(unsigned index, unsigned count)\r\n{\r\n    using namespace pcx_int;\r\n    collector& c = collectors_[index];\r\n    unsigned const prev =\r\n        _InterlockedExchangeAdd(\r\n            &c.state_.whole_, (unsigned)-(int)count);\r\n\r\n#ifdef PCX_DEBUG\r\n    std::ostringstream ss;\r\n    ss << \"[PCX] thread \" << this << \" release \" << index << \"\\n\";\r\n    OutputDebugStringA(ss.str().c_str());\r\n#endif\r\n\r\n    if (0 == prev - count)\r\n    {\r\n        pcx_node* curr = c.defer_list_head_;\r\n        while (curr)\r\n        {\r\n            pcx_node* next = curr->pcx_next_;\r\n            curr->pcx_dtor_(curr);\r\n            curr = next;\r\n        }\r\n        c.defer_list_head_ = 0;\r\n        c.defer_list_size_ = 0;\r\n        c.state_.part_.back_link_ = 1;\r\n        c.state_.part_.is_current_ = 1;\r\n\r\n        long u;\r\n        if (index != collector_count - 1)\r\n            u = collector_count;\r\n        else\r\n            u = -(long)(collector_count * (collector_count - 1));\r\n        _InterlockedExchangeAdd(&master_->state_.whole_, u);\r\n\r\n        release_impl((index + 1) % collector_count, back_link_inc);\r\n    }\r\n}\r\n\r\ninline void pcx_thread::flush_impl()\r\n{\r\n    using namespace pcx_int;\r\n    _mm_mfence();\r\n    master::state state = master_->state_;\r\n    last_seen_collector_index_ = state.part_.current_collector_;\r\n    collector& gc = collectors_[state.part_.current_collector_];\r\n    local_collector& lc = local_collectors_[state.part_.current_collector_];\r\n    lc.defer_head_->pcx_next_ = defer_tail_.pcx_next_;\r\n    lc.defer_head_ = defer_tail_.pcx_next_;\r\n    lc.defer_size_ += defer_size_;\r\n    defer_head_ = &defer_tail_;\r\n    defer_tail_.pcx_next_ = 0;\r\n    defer_size_ = 0;\r\n    if (master_->garbage_threshold_ < lc.defer_size_ || promote_)\r\n    {\r\n        master::state cmp;\r\n        master::state val;\r\n        do\r\n        {\r\n            cmp = master_->state_;\r\n            if (cmp.part_.current_collector_ != last_seen_collector_index_)\r\n            {\r\n                promote_ = 0;\r\n                return;\r\n            }\r\n            unsigned next_index = (last_seen_collector_index_ + 1) % collector_count;\r\n            if (cmp.part_.collector_tail_ == next_index)\r\n                return;\r\n            val = cmp;\r\n            val.part_.current_collector_ += 1;\r\n            val.part_.outer_counter_ = 0;\r\n        }\r\n        while (cmp.whole_ != _InterlockedCompareExchange(\r\n          (long*)&master_->state_.whole_, val.whole_, cmp.whole_));\r\n        last_seen_collector_index_ = val.part_.current_collector_;\r\n        promote_ = 0;\r\n        _InterlockedIncrement((long*)&master_->state_copy_.whole_);\r\n        _InterlockedExchangeAdd((long*)&gc.state_.whole_,\r\n            cmp.part_.outer_counter_ * counter_inc - is_current_inc);\r\n    }\r\n}\r\n\r\n__declspec(noinline)\r\ninline void pcx_thread::local_flush()\r\n{\r\n    using namespace pcx_int;\r\n    if (flush_tail_ == master_->state_.part_.collector_tail_)\r\n        return;\r\n\r\n#ifdef PCX_DEBUG\r\n    std::ostringstream ss;\r\n    ss << \"[PCX] thread \" << this << \" flush   \" << flush_tail_ << \"\\n\";\r\n    OutputDebugStringA(ss.str().c_str());\r\n#endif\r\n\r\n    local_collector& lc = local_collectors_[flush_tail_];\r\n    pcx_node* curr = lc.defer_tail_.pcx_next_;\r\n    while (curr)\r\n    {\r\n#ifdef PCX_DEBUG\r\n    std::ostringstream ss;\r\n    ss << \"[PCX] thread \" << this << \" destroy \" << curr << \"\\n\";\r\n    OutputDebugStringA(ss.str().c_str());\r\n#endif\r\n\r\n        pcx_node* next = curr->pcx_next_;\r\n        curr->pcx_dtor_(curr);\r\n        curr = next;\r\n    }\r\n    lc.defer_head_ = &lc.defer_tail_;\r\n    lc.defer_tail_.pcx_next_ = 0;\r\n    lc.defer_size_ = 0;\r\n    flush_tail_ = (flush_tail_ + 1) % collector_count;\r\n}\r\n\r\n__declspec(noinline)\r\ninline void pcx_thread::quiescent_impl()\r\n{\r\n    using namespace pcx_int;\r\n    if (defer_size_)\r\n        flush_impl();\r\n    release_impl(collector_index_, counter_inc);\r\n    collector_index_ = acquire_impl();\r\n}\r\n\r\ninline void pcx_thread::acquire()\r\n{\r\n    using namespace pcx_int;\r\n    recursion_count_ += 1;\r\n    if (1 != recursion_count_)\r\n        return;\r\n    if (is_acquired_)\r\n        return;\r\n    collector_index_ = acquire_impl();\r\n    last_seen_collector_index_ = collector_index_;\r\n    is_acquired_ = 1;\r\n}\r\n\r\ninline void pcx_thread::release()\r\n{\r\n    using namespace pcx_int;\r\n    recursion_count_ -= 1;\r\n    if (0 == recursion_count_)\r\n    {\r\n        if (master_->state_copy_.part_.current_collector_ != collector_index_\r\n            || promote_)\r\n        {\r\n            if (defer_size_)\r\n                flush_impl();\r\n            release_impl(collector_index_, counter_inc);\r\n            is_acquired_ = 0;\r\n        }\r\n    }\r\n    if (flush_tail_ != last_seen_collector_index_)\r\n    {\r\n        local_flush();\r\n    }\r\n}\r\n\r\ninline void pcx_thread::quiescent()\r\n{\r\n    if (master_->state_copy_.part_.current_collector_ != collector_index_\r\n        || promote_)\r\n    {\r\n        quiescent_impl();\r\n    }\r\n    if (flush_tail_ != last_seen_collector_index_)\r\n    {\r\n        local_flush();\r\n    }\r\n}\r\n\r\ninline void pcx_thread::defer(pcx_node* node, pcx_node::pcx_dtor_t dtor)\r\n{\r\n    using namespace pcx_int;\r\n    node->pcx_next_ = 0;\r\n    node->pcx_dtor_ = dtor;\r\n    defer_head_->pcx_next_ = node;\r\n    defer_head_ = node;\r\n    defer_size_ += 1;\r\n}\r\n\r\ninline void pcx_thread::flush()\r\n{\r\n    using namespace pcx_int;\r\n    if (recursion_count_)\r\n        return;\r\n    if (0 == is_acquired_)\r\n        return;\r\n    if (defer_size_)\r\n        flush_impl();\r\n    release_impl(collector_index_, counter_inc);\r\n    is_acquired_ = 0;\r\n}\r\n\r\ninline void pcx_thread::promote()\r\n{\r\n    promote_ = 1;\r\n}\r\n\r\ninline void pcx_thread::init()\r\n{\r\n    using namespace pcx_int;\r\n    master_ = &g_master;\r\n    collectors_ = g_collectors;\r\n    defer_head_ = &defer_tail_;\r\n    defer_tail_.pcx_next_ = 0;\r\n    for (unsigned i = 0; i != collector_count; ++i)\r\n    {\r\n        local_collectors_[i].defer_head_ = &local_collectors_[i].defer_tail_;\r\n    }\r\n}\r\n\r\ninline void pcx_thread::deinit()\r\n{\r\n    flush();\r\n}\r\n\r\n}\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/stdafx.cpp",
    "content": "// stdafx.cpp : source file that includes just the standard includes\r\n// ws_deque.pch will be the pre-compiled header\r\n// stdafx.obj will contain the pre-compiled type information\r\n\r\n#include \"stdafx.h\"\r\n\r\n// TODO: reference any additional headers you need in STDAFX.H\r\n// and not in this file\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mpmc/stdafx.h",
    "content": "#pragma once\r\n\r\n#pragma warning (disable: 4201)\r\n\r\n//#define RL_GC\r\n#define RL_MSVC_OUTPUT\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mutex_business_logic/msvc8/mutex_business_logic.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"mutex_business_logic\", \"mutex_business_logic.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mutex_business_logic/msvc8/mutex_business_logic.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"mutex_business_logic\"\r\n\tProjectGUID=\"{B03A7216-E196-44C6-8861-C77D90055512}\"\r\n\tRootNamespace=\"mutex_business_logic\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\mutex_business_logic.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mutex_business_logic/mutex_business_logic.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nclass business_logic\r\n{\r\npublic:\r\n    typedef unsigned account_id_t;\r\n    typedef double balance_t;\r\n\r\n    business_logic()\r\n    {\r\n        pthread_rwlock_init(&accounts_guard, 0);\r\n    }\r\n\r\n    ~business_logic()\r\n    {\r\n        pthread_rwlock_destroy(&accounts_guard);\r\n    }\r\n\r\n    bool add_account(account_id_t acc_id, balance_t balance)\r\n    {\r\n        pthread_rwlock_wrlock(&accounts_guard);\r\n        if (accounts.find(acc_id) != accounts.end())\r\n        {\r\n            pthread_rwlock_unlock(&accounts_guard);\r\n            return false;\r\n        }\r\n        accounts[acc_id].balance = balance;\r\n        pthread_rwlock_unlock(&accounts_guard);\r\n        return true;\r\n    }\r\n\r\n    bool transfer_balance(account_id_t acc_id1, account_id_t acc_id2, balance_t amount)\r\n    {\r\n        if (acc_id1 == acc_id2)\r\n            return true;\r\n        pthread_rwlock_rdlock(&accounts_guard);\r\n        if (accounts.find(acc_id1) == accounts.end()\r\n            || accounts.find(acc_id2) == accounts.end())\r\n        {\r\n            pthread_rwlock_unlock(&accounts_guard);\r\n            return false;\r\n        }\r\n        account_info& acc1 = accounts[acc_id1];\r\n        account_info& acc2 = accounts[acc_id2];\r\n        if (acc_id1 > acc_id2)\r\n        {\r\n            pthread_mutex_lock(&acc1.mtx);\r\n            pthread_mutex_lock(&acc2.mtx);\r\n        }\r\n        else\r\n        {\r\n            pthread_mutex_lock(&acc2.mtx);\r\n            pthread_mutex_lock(&acc1.mtx);\r\n        }\r\n        pthread_rwlock_unlock(&accounts_guard);\r\n\r\n        acc1.balance -= amount;\r\n        acc2.balance += amount;\r\n\r\n        pthread_mutex_unlock(&acc1.mtx);\r\n        pthread_mutex_unlock(&acc2.mtx);\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    struct account_info\r\n    {\r\n        balance_t balance;\r\n        pthread_mutex_t mtx;\r\n\r\n        account_info()\r\n            : balance()\r\n        {\r\n            pthread_mutex_init(&mtx, 0);\r\n        }\r\n\r\n        account_info(account_info const& acc)\r\n            : balance(acc.balance)\r\n        {\r\n            pthread_mutex_init(&mtx, 0);\r\n        }\r\n\r\n        ~account_info()\r\n        {\r\n            pthread_mutex_destroy(&mtx);\r\n        }\r\n    };\r\n\r\n    typedef std::map<account_id_t, account_info> account_map_t;\r\n    account_map_t accounts;\r\n    pthread_rwlock_t accounts_guard;\r\n};\r\n\r\n\r\n\r\n\r\nstruct business_logic_test : rl::test_suite<business_logic_test, 2>\r\n{\r\n    business_logic bl;\r\n\r\n    static size_t const account_count = 4;\r\n\r\n    void before()\r\n    {\r\n        for (size_t i = 0; i != account_count; ++i)\r\n        {\r\n            bool rv = bl.add_account(i, i * 10.0);\r\n            RL_ASSERT(rv);\r\n        }\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        business_logic::account_id_t acc1 = rl::rand(account_count);\r\n        business_logic::account_id_t acc2 = rl::rand(account_count);\r\n        bool rv = bl.transfer_balance(acc1, acc2, 1.0);\r\n        RL_ASSERT(rv);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{    \r\n    rl::simulate<business_logic_test>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mutex_business_logic/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/mutex_business_logic/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_MSVC_OUTPUT\r\n//#define RL_DEBUGBREAK_ON_FAILURE\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/msvc8/peterson.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"peterson\", \"peterson.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/msvc8/peterson.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"peterson\"\r\n\tProjectGUID=\"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\n\tRootNamespace=\"peterson\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\peterson.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/msvc9/peterson.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"peterson\", \"peterson.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/msvc9/peterson.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"peterson\"\r\n\tProjectGUID=\"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\n\tRootNamespace=\"peterson\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"131072\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\peterson.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/peterson.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n#include \"../../relacy/windows.h\"\r\n\r\n\r\nstruct peterson_mutex_test : rl::test_suite<peterson_mutex_test, 2>\r\n{\r\n    std::atomic<int> flag0;\r\n    std::atomic<int> flag1;\r\n    std::atomic<int> turn;\r\n\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        flag0($) = 0;\r\n        flag1($) = 0;\r\n        turn($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            flag0($).store(1);\r\n            turn($).store(1);\r\n\r\n            while (flag1($).load()\r\n                && 1 == turn($).load());\r\n\r\n            data($) = 1;\r\n\r\n            flag0($).store(0);\r\n        }\r\n        else\r\n        {\r\n            flag1($).store(1);\r\n            turn($).store(0);\r\n\r\n            while (flag0($).load()\r\n                && 0 == turn($).load());\r\n\r\n            data($) = 2;\r\n\r\n            flag1($).store(0);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct peterson_mutex_test2 : rl::test_suite<peterson_mutex_test2, 2>\r\n{\r\n    std::atomic<int> flag0;\r\n    std::atomic<int> flag1;\r\n    std::atomic<int> turn;\r\n\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        flag0($) = 0;\r\n        flag1($) = 0;\r\n        turn($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            flag0.store(1, rl::memory_order_relaxed);\r\n            turn.exchange(1, rl::memory_order_acq_rel);\r\n\r\n            while (flag1.load(rl::memory_order_acquire)\r\n                && 1 == turn.load(rl::memory_order_relaxed))\r\n                rl::yield(1, $);\r\n\r\n            data($) = 1;\r\n\r\n            flag0.store(0, rl::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            flag1.store(1, rl::memory_order_relaxed);\r\n            turn.exchange(0, rl::memory_order_acq_rel);\r\n\r\n            while (flag0.load(rl::memory_order_acquire)\r\n                && 0 == turn.load(rl::memory_order_relaxed))\r\n                rl::yield(1, $);\r\n\r\n            data($) = 2;\r\n\r\n            flag1.store(0, rl::memory_order_release);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct peterson_mutex_test3 : rl::test_suite<peterson_mutex_test3, 2>\r\n{\r\n    std::atomic<int> flag0;\r\n    std::atomic<int> flag1;\r\n    std::atomic<int> turn;\r\n\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        flag0($) = 0;\r\n        flag1($) = 0;\r\n        turn($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            flag0.store(1, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n            turn.store(1, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n\r\n            while (flag1.load(std::memory_order_acquire)\r\n                && 1 == turn.load(std::memory_order_relaxed));\r\n\r\n            data($) = 1;\r\n\r\n            flag0.store(0, std::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            flag1.store(1, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n            turn.store(0, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n\r\n            while (flag0.load(std::memory_order_acquire)\r\n                && 0 == turn.load(std::memory_order_relaxed));\r\n\r\n            data($) = 2;\r\n\r\n            flag1.store(0, std::memory_order_release);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n// FAILS WITH DATA RACE\r\nstruct peterson_mutex_test4 : rl::test_suite<peterson_mutex_test4, 2>\r\n{\r\n    std::atomic<int> flag0;\r\n    std::atomic<int> flag1;\r\n    std::atomic<int> turn;\r\n\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        flag0($) = 0;\r\n        flag1($) = 0;\r\n        turn($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            flag0.exchange(1, rl::memory_order_acq_rel);\r\n            turn.store(1, rl::memory_order_release);\r\n\r\n            while (flag1.load(rl::memory_order_acquire)\r\n                && 1 == turn.load(rl::memory_order_acquire))\r\n                rl::yield(1, $);\r\n\r\n            data($) = 1;\r\n\r\n            flag0.store(0, rl::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            flag1.exchange(1, rl::memory_order_acq_rel);\r\n            turn.store(0, rl::memory_order_release);\r\n\r\n            while (flag0.load(rl::memory_order_acquire)\r\n                && 0 == turn.load(rl::memory_order_relaxed))\r\n                rl::yield(1, $);\r\n\r\n            data($) = 2;\r\n\r\n            flag1.store(0, rl::memory_order_release);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nclass eventcount\r\n{\r\npublic:\r\n    typedef unsigned state_t;\r\n\r\n    eventcount()\r\n    {\r\n        state_.store(0, std::memory_order_relaxed);\r\n        sema_ = CreateSemaphore(0, 0, LONG_MAX, 0);\r\n    }\r\n\r\n    ~eventcount()\r\n    {\r\n        CloseHandle(sema_);\r\n    }\r\n\r\n    state_t prepare()\r\n    {\r\n        return state_.fetch_add(waiters_inc, std::memory_order_seq_cst);\r\n    }\r\n\r\n    void retire()\r\n    {\r\n        state_.fetch_add((state_t)-(int)waiters_inc, std::memory_order_seq_cst);\r\n    }\r\n\r\n    void wait(state_t cmp)\r\n    {\r\n        WaitForSingleObject(sema_, INFINITE);\r\n        state_t cmp0 = state_.load(std::memory_order_seq_cst);\r\n        if ((cmp & generation_mask) == (cmp0 & generation_mask))\r\n        {\r\n            state_.fetch_add((state_t)-(int)waiters_inc, std::memory_order_seq_cst);\r\n            ReleaseSemaphore(sema_, 1, 0);\r\n            SwitchToThread();\r\n        }\r\n    }\r\n\r\n    void signal()\r\n    {\r\n        std::atomic_thread_fence(std::memory_order_seq_cst);\r\n        signal_relaxed();\r\n    }\r\n\r\n    void signal_relaxed()\r\n    {\r\n        state_t cmp = state_.load(std::memory_order_seq_cst);\r\n        if (0 == (cmp & waiters_mask))\r\n            return;\r\n        for (;;)\r\n        {\r\n            state_t xchg = (cmp & ~waiters_mask) + generation_inc;\r\n            if (state_.compare_exchange_weak(cmp, xchg, std::memory_order_seq_cst))\r\n            {\r\n                ReleaseSemaphore(sema_, cmp & waiters_mask, 0);\r\n                return;\r\n            }\r\n            if (0 == (cmp & waiters_mask))\r\n                return;\r\n        }\r\n    }\r\n    \r\nprivate:\r\n    std::atomic<state_t> state_;\r\n    HANDLE sema_;\r\n\r\n    static state_t const waiters_inc = 1;\r\n    static state_t const waiters_mask = (1 << 20) - 1;\r\n    static state_t const generation_inc = 1 << 20;\r\n    static state_t const generation_mask = ~waiters_mask;\r\n\r\n    eventcount(eventcount const&);\r\n    eventcount& operator = (eventcount const&);\r\n};\r\n\r\n\r\n\r\n\r\nclass eventcount_blocking\r\n{\r\npublic:\r\n    eventcount_blocking(eventcount& ec)\r\n        : ec_(ec)\r\n    {\r\n        cmp_ = ec_.prepare();\r\n        wait_ = false;\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        RL_ASSERT(false == wait_);\r\n        wait_ = true;\r\n        ec_.wait(cmp_);\r\n    }\r\n\r\n    ~eventcount_blocking()\r\n    {\r\n        if (false == wait_)\r\n            ec_.retire();\r\n    }\r\n\r\nprivate:\r\n    eventcount& ec_;\r\n    eventcount::state_t cmp_;\r\n    bool wait_;\r\n\r\n    eventcount_blocking(eventcount_blocking const&);\r\n    eventcount_blocking& operator = (eventcount_blocking const&);\r\n};\r\n\r\n\r\n\r\n\r\n\r\nstruct signaling_test : rl::test_suite<signaling_test, 6>\r\n{\r\n    //rl::HANDLE              var_wait_for_items;\r\n    //rl::CRITICAL_SECTION    mtx_items_avail;\r\n    //std::atomic<unsigned>   n_waiting_consumers;\r\n    //rl::var<unsigned>       consumer_wait_generation;\r\n    //rl::var<unsigned>       n_consumers_to_wakeup;\r\n\r\n    eventcount ec_;\r\n\r\n    static int const max_queue_length = 4;\r\n    int queue [max_queue_length];\r\n    int queue_head;\r\n    int queue_tail;\r\n    int queue_head_data;\r\n    int queue_tail_data;\r\n\r\n    void before()\r\n    {\r\n        //var_wait_for_items = rl::CreateEvent(0, 1, 0, 0, $);\r\n        //rl::InitializeCriticalSection(&mtx_items_avail, $);\r\n        //n_waiting_consumers($) = 0;\r\n        //consumer_wait_generation($) = 0;\r\n        //n_consumers_to_wakeup($) = 0;\r\n        for (int i = 0; i != max_queue_length; ++i)\r\n            queue[i] = 0;\r\n        queue_head = 0;\r\n        queue_tail = 0;\r\n        queue_head_data = 0;\r\n        queue_tail_data = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        //rl::CloseHandle(var_wait_for_items, $);\r\n        //rl::DeleteCriticalSection(&mtx_items_avail, $);\r\n    }\r\n\r\n    struct enqueue_desc\r\n    {\r\n        int pos;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"enqueue \" << pos;\r\n        }\r\n    };\r\n\r\n    void enqueue()\r\n    {\r\n        queue[queue_head++] = ++queue_head_data;\r\n        RL_HIST_IMPL(rl::ctx(), $, enqueue_desc) {queue_head - 1} RL_HIST_END();\r\n        signal();\r\n    }\r\n\r\n    void dequeue()\r\n    {\r\n        int my_pos = queue_tail++;\r\n        for (;;)\r\n        {\r\n            if (queue[my_pos])\r\n            {\r\n                RL_ASSERT(queue[my_pos] == my_pos + 1);\r\n                return;\r\n            }\r\n            wait(my_pos);\r\n        }\r\n    }\r\n\r\n    void signal()\r\n    {\r\n        ec_.signal();\r\n        /*\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        if (n_waiting_consumers($).load(std::memory_order_relaxed))\r\n        {\r\n            rl::EnterCriticalSection(&mtx_items_avail, $);\r\n            if (n_waiting_consumers($).load(std::memory_order_relaxed) > 0)\r\n            {\r\n                consumer_wait_generation($) += 1;\r\n                //RL_ASSERT(n_consumers_to_wakeup($) == 0);\r\n                n_consumers_to_wakeup($) = n_waiting_consumers($).load(std::memory_order_relaxed);\r\n                rl::SetEvent(var_wait_for_items, $);\r\n            }\r\n            rl::LeaveCriticalSection(&mtx_items_avail, $);\r\n        }\r\n        */\r\n    }\r\n\r\n    void wait(int my_pos)\r\n    {\r\n        eventcount_blocking block (ec_);\r\n        if (queue[my_pos])\r\n            return;\r\n        block.wait();\r\n\r\n        /*\r\n        rl::EnterCriticalSection(&mtx_items_avail, $);\r\n        n_waiting_consumers($).store(n_waiting_consumers($).load(std::memory_order_relaxed) + 1, std::memory_order_relaxed);\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n        while (0 == queue[my_pos])\r\n        {\r\n            unsigned my_generation = consumer_wait_generation($);\r\n            for (;;)\r\n            {\r\n                rl::LeaveCriticalSection(&mtx_items_avail, $);\r\n                rl::WaitForSingleObject(var_wait_for_items, rl::RL_INFINITE, $);\r\n                rl::EnterCriticalSection(&mtx_items_avail, $);\r\n                if (n_consumers_to_wakeup($) > 0 && consumer_wait_generation($) != my_generation)\r\n                    break;\r\n            }\r\n            if (--n_consumers_to_wakeup($) == 0)\r\n                rl::ResetEvent(var_wait_for_items, $);\r\n        }\r\n        n_waiting_consumers($).store(n_waiting_consumers($).load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);\r\n        rl::LeaveCriticalSection(&mtx_items_avail, $);\r\n        */\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index < rl::test_suite<signaling_test, 6>::params::thread_count/2+1)\r\n        {\r\n            enqueue();\r\n        }\r\n        else\r\n        {\r\n            dequeue();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    //p.search_type = rl::fair_context_bound_scheduler_type;\r\n    p.search_type = rl::sched_bound;\r\n    //p.context_bound = 1;\r\n    //p.execution_depth_limit = 100;\r\n    //p.iteration_count = 5000;\r\n    //p.initial_state = \"280572\";\r\n    //rl::simulate<signaling_test>(p);\r\n\r\n    rl::simulate<peterson_mutex_test>();\r\n    rl::simulate<peterson_mutex_test2>(p);\r\n    rl::simulate<peterson_mutex_test3>();\r\n    rl::simulate<peterson_mutex_test4>(p);\r\n}\r\n\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/peterson/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_MSVC_OUTPUT\r\n//#define RL_DEBUGBREAK_ON_FAILURE\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/msvc8/proxy_collector.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"proxy_collector\", \"proxy_collector.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/msvc8/proxy_collector.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"proxy_collector\"\r\n\tProjectGUID=\"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\n\tRootNamespace=\"ref_counting\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/Ob0\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\proxy_collector.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/msvc9/proxy_collector.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"proxy_collector\", \"proxy_collector.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/msvc9/proxy_collector.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"proxy_collector\"\r\n\tProjectGUID=\"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\n\tRootNamespace=\"ref_counting\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/Ob0\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\original.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\proxy_collector.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\rtl.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/proxy_collector.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\nstruct pc_sys_anchor;\r\nstruct pc_region;\r\nstruct pc_master;\r\ntypedef pc_region pc_node;\r\ntypedef void (pc_fp_dtor) (pc_region*);\r\ntypedef unsigned __int64 uint64_t;\r\n\r\nstruct pc_sys_anchor\r\n{\r\n    int refcnt;\r\n    pc_region* region;\r\n\r\n    pc_sys_anchor()\r\n    {\r\n    }\r\n\r\n    pc_sys_anchor(int rc, pc_region* r = 0)\r\n    {\r\n        refcnt = rc;\r\n        region = r;\r\n    }\r\n\r\n    bool operator == (pc_sys_anchor const& right) const\r\n    {\r\n        return refcnt == right.refcnt\r\n            && region == right.region;\r\n    }\r\n\r\n    pc_sys_anchor operator + (pc_sys_anchor const& right) const\r\n    {\r\n        pc_sys_anchor res;\r\n        res.refcnt = refcnt + right.refcnt;\r\n        res.region = (pc_region*)((intptr_t)region + (intptr_t)right.region);\r\n        return res;\r\n    }\r\n\r\n    pc_sys_anchor operator - (pc_sys_anchor const& right) const\r\n    {\r\n        pc_sys_anchor res;\r\n        res.refcnt = refcnt - right.refcnt;\r\n        res.region = (pc_region*)((intptr_t)region - (intptr_t)right.region);\r\n        return res;\r\n    }\r\n};\r\n\r\nstd::ostream& operator << (std::ostream& s, pc_sys_anchor const& right)\r\n{\r\n    return s << \"{\" << right.refcnt << \",\" << right.region << \"}\";\r\n}\r\n\r\nstruct pc_region\r\n{\r\n    std::atomic<pc_sys_anchor> next;\r\n    std::atomic<pc_region*> defer;\r\n\r\n    pc_region()\r\n    {\r\n        next($) = pc_sys_anchor(0, 0);\r\n        defer($) = 0;\r\n    }\r\n\r\n    void link(pc_region* next)\r\n    {\r\n        defer.store(next, rl::memory_order_relaxed);\r\n    }\r\n\r\n    void defer_node(pc_region* node)\r\n    {\r\n        pc_region* region = defer.exchange(node, rl::memory_order_release);\r\n        node->defer.store(region, rl::memory_order_relaxed);\r\n    }\r\n};\r\n\r\nstruct pc_master\r\n{\r\n    std::atomic<pc_sys_anchor> head;\r\n    pc_region stub_region;\r\n    pc_fp_dtor* fp_dtor;\r\n\r\n    pc_master(pc_fp_dtor* const dtor)\r\n    {\r\n        pc_sys_anchor src (0, &stub_region);\r\n        head.store(src, rl::memory_order_relaxed);\r\n        fp_dtor = dtor;\r\n    }\r\n\r\n    pc_region* acquire()\r\n    {\r\n        pc_sys_anchor cmp (head.load(rl::memory_order_relaxed));\r\n        pc_sys_anchor xchg;\r\n        do\r\n        {\r\n            xchg.refcnt = cmp.refcnt + 2;\r\n            xchg.region = cmp.region;\r\n        }\r\n        while (false == head.compare_exchange_weak(cmp, xchg, rl::memory_order_acquire));\r\n        return cmp.region;\r\n    }\r\n\r\n    void release(pc_region* region)\r\n    {\r\n        pc_sys_anchor prev = region->next.fetch_sub(2, rl::memory_order_acq_rel);\r\n        if (prev.refcnt == 3)\r\n            sys_dtor(region);\r\n    }\r\n\r\n    void mutate(pc_region* node)\r\n    {\r\n        pc_sys_anchor src (2, 0);\r\n        node->next.store(src, rl::memory_order_relaxed);\r\n        pc_sys_anchor xchg (0, node);\r\n        pc_sys_anchor cmp = head.load(rl::memory_order_relaxed);\r\n        while (false == head.compare_exchange_weak(cmp, xchg, std::memory_order_acq_rel));\r\n\r\n        pc_sys_anchor cmp2 = cmp.region->next.load(rl::memory_order_relaxed);\r\n        pc_sys_anchor xchg2;\r\n        do\r\n        {\r\n            xchg2 = pc_sys_anchor(cmp2.refcnt, node);\r\n        }\r\n        while (false == cmp.region->next.compare_exchange_weak(cmp2, xchg2, rl::memory_order_release));\r\n\r\n        pc_sys_anchor prev = cmp.region->next.fetch_add(cmp.refcnt + 1, rl::memory_order_acq_rel);\r\n        if (prev.refcnt == -cmp.refcnt)\r\n            sys_dtor(cmp.region);\r\n    }\r\n\r\n    void sys_dtor(pc_region* region)\r\n    {\r\n        int reset = 0;\r\n        pc_region* head = region;\r\n        pc_region* tail = region;\r\n        pc_sys_anchor nx = region->next.load(rl::memory_order_relaxed);\r\n        pc_region* next = nx.region;\r\n\r\n        while (next)\r\n        {\r\n            pc_sys_anchor prev = next->next.fetch_sub(2, rl::memory_order_acq_rel);\r\n            if (prev.refcnt != 3)\r\n                break;\r\n            tail = next;\r\n            nx = next->next.load(rl::memory_order_relaxed);\r\n            next = nx.region;\r\n        }\r\n\r\n        nx = tail->next.load(rl::memory_order_relaxed);\r\n        nx.region = 0;\r\n        tail->next.store(nx, rl::memory_order_relaxed);\r\n\r\n        while (head)\r\n        {\r\n            nx = head->next.load(rl::memory_order_relaxed);\r\n            pc_region* const next = nx.region;\r\n\r\n            pc_region* defer = head->defer.load(rl::memory_order_relaxed);\r\n\r\n            nx = head->next.load(rl::memory_order_relaxed);\r\n            RL_ASSERT(nx.refcnt == 1);\r\n\r\n            if (head != &stub_region)\r\n            {\r\n                head->defer.store(defer, rl::memory_order_relaxed);\r\n                defer = head;\r\n            }\r\n            else\r\n            {\r\n                reset = 1;\r\n            }\r\n\r\n            while (defer)\r\n            {\r\n                pc_region* const next = defer->defer.load(rl::memory_order_relaxed);\r\n                fp_dtor(defer);\r\n                defer = next;\r\n            }\r\n            head = next;\r\n        }\r\n\r\n        if (reset)\r\n        {\r\n            stub_region.defer.store(0, rl::memory_order_relaxed);\r\n            mutate(&stub_region);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct foo_node\r\n{\r\n    pc_node pcn;\r\n    std::atomic<foo_node*> next;\r\n    rl::var<int> data;\r\n};\r\n\r\nvoid foo_node_dtor(pc_node* pcn)\r\n{\r\n    // yes, very fragile\r\n    foo_node* const n = (foo_node*)pcn;\r\n    delete n;\r\n}\r\n\r\nstruct foo_list\r\n{\r\n    std::atomic<foo_node*> head;\r\n    pc_master pc;\r\n\r\n    foo_list()\r\n        : head(0)\r\n        , pc(foo_node_dtor)\r\n    {\r\n    }\r\n};\r\n\r\nstruct proxy_collector_test : rl::test_suite<proxy_collector_test, 4>\r\n{\r\n    foo_list m_list;\r\n\r\n    void before()\r\n    {\r\n        m_list.head($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        foo_node* node = new foo_node;\r\n        m_list.pc.mutate(&node->pcn);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index < 2)\r\n        {\r\n            pc_region* pcr = m_list.pc.acquire();\r\n            for (int i = 0; i != 4; ++i)\r\n            {\r\n                foo_node* node = m_list.head.load(rl::memory_order_acquire);\r\n                while (node)\r\n                {\r\n                    foo_node* const next = node->next.load(rl::memory_order_acquire);\r\n                    intptr_t volatile data = node->data($);\r\n                    (void)data;\r\n                    node = next;\r\n                }\r\n                if (2 == i)\r\n                {\r\n                    m_list.pc.release(pcr);\r\n                    pcr = m_list.pc.acquire();\r\n                }\r\n            }\r\n            m_list.pc.release(pcr);\r\n        }\r\n        else\r\n        {\r\n            pc_region* pcr = m_list.pc.acquire();\r\n            for (int i = 0; i != 4; ++i)\r\n            {\r\n                if (0 == (i % 2))\r\n                {\r\n                    foo_node* node = new foo_node;\r\n                    node->data($) = 1;\r\n                    foo_node* cmp = m_list.head.load(rl::memory_order_relaxed);\r\n                    do\r\n                    {\r\n                        node->next.store(cmp, rl::memory_order_relaxed);\r\n                    }\r\n                    while (false == m_list.head.compare_exchange_weak(cmp, node, rl::memory_order_release));\r\n                }\r\n                else\r\n                {\r\n                    foo_node* node = m_list.head.load(rl::memory_order_acquire);\r\n                    foo_node* next;\r\n                    do\r\n                    {\r\n                        if (0 == node)\r\n                            break;\r\n                        next = node->next.load(rl::memory_order_relaxed);\r\n                    }\r\n                    while (false == m_list.head.compare_exchange_weak(node, next, rl::memory_order_acquire));\r\n\r\n                    if (node)\r\n                    {\r\n                        //if (1 == i)\r\n                        {\r\n                            m_list.pc.mutate(&node->pcn);\r\n                        }\r\n                        //else\r\n                        //{\r\n                        //    pcr->defer_node(&node->pcn);\r\n                        //}\r\n                    }\r\n                }\r\n                if (i % 2)\r\n                {\r\n                    m_list.pc.release(pcr);\r\n                    pcr = m_list.pc.acquire();\r\n                }\r\n            }\r\n            m_list.pc.release(pcr);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params params;\r\n    params.iteration_count = 1000;\r\n    rl::simulate<proxy_collector_test>(params);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/proxy_collector/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n#include <sstream>\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/msvc8/ref_counting.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ref_counting\", \"ref_counting.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/msvc8/ref_counting.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"ref_counting\"\r\n\tProjectGUID=\"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\n\tRootNamespace=\"ref_counting\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ref_counting.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/msvc9/ref_counting.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ref_counting\", \"ref_counting.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/msvc9/ref_counting.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"ref_counting\"\r\n\tProjectGUID=\"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\n\tRootNamespace=\"ref_counting\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"131072\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ref_counting.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/ref_counting.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\nstruct rc_object\r\n{\r\n    std::atomic<int> rc;\r\n    rl::var<int> data;\r\n\r\n    void acquire()\r\n    {\r\n        rc.fetch_add(1, rl::memory_order_acquire);\r\n    }\r\n\r\n    void release()\r\n    {\r\n        if (1 == rc.fetch_sub(1, rl::memory_order_release))\r\n        {\r\n            rc.load(rl::memory_order_acquire);\r\n            data($) = 0;\r\n            delete this;\r\n        }\r\n    }\r\n\r\n    rc_object(int data)\r\n        : rc(1)\r\n        , data(data)\r\n    {\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nvoid post_to_channel(rl::atomic<rc_object*>& ch, rc_object* obj)\r\n{\r\n    obj->acquire();\r\n    rl::backoff b;\r\n    for (;;)\r\n    {\r\n        rc_object* cmp = 0;\r\n        if (ch.compare_exchange_weak(cmp, obj, rl::memory_order_release))\r\n            break;\r\n        b.yield($);\r\n    }\r\n}\r\n\r\nrc_object* get_from_channel(rl::atomic<rc_object*>& ch)\r\n{\r\n    return ch.exchange(0, rl::memory_order_acquire);\r\n}\r\n\r\n\r\n\r\n\r\nstruct ref_counting_test : rl::test_suite<ref_counting_test, 2>\r\n{\r\n    std::atomic<rc_object*> channel;\r\n\r\n    void before()\r\n    {\r\n        channel($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            rc_object* obj = new rc_object (rand());\r\n            post_to_channel(channel, obj);\r\n            int data = obj->data($);\r\n            (void)data;\r\n            obj->release();\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            rl::backoff b;\r\n            for (;;)\r\n            {\r\n                rc_object* obj = get_from_channel(channel);\r\n                if (obj)\r\n                {\r\n                    int data = obj->data($);\r\n                    (void)data;\r\n                    obj->release();\r\n                    break;\r\n                }\r\n                else\r\n                {\r\n                    b.yield($);\r\n                }\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct ref_counting_test2 : rl::test_suite<ref_counting_test2, 3>\r\n{\r\n    std::atomic<rc_object*> channel01;\r\n    std::atomic<rc_object*> channel02;\r\n    std::atomic<rc_object*> channel12;\r\n    std::atomic<rc_object*> channel21;\r\n\r\n    void before()\r\n    {\r\n        channel01($) = 0;\r\n        channel02($) = 0;\r\n        channel12($) = 0;\r\n        channel21($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            {\r\n                rc_object* obj1 = new rc_object (rand());\r\n                post_to_channel(channel01, obj1);\r\n                volatile int data = obj1->data($);\r\n                (void)data;\r\n                obj1->release();\r\n            }\r\n\r\n            {\r\n                rc_object* obj2 = new rc_object (rand());\r\n                post_to_channel(channel02, obj2);\r\n                volatile int data = obj2->data($);\r\n                (void)data;\r\n                obj2->release();\r\n            }\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            rl::backoff b;\r\n            bool ch0 = false;\r\n            bool ch2 = false;\r\n            while (!ch0 || !ch2)\r\n            {\r\n                {\r\n                    rc_object* obj = get_from_channel(channel01);\r\n                    if (obj)\r\n                    {\r\n                        post_to_channel(channel12, obj);\r\n                        volatile int data = obj->data($);\r\n                        (void)data;\r\n                        obj->release();\r\n                        ch0 = true;\r\n                    }\r\n                    else\r\n                    {\r\n                        b.yield($);\r\n                    }\r\n                }\r\n                {\r\n                    rc_object* obj = get_from_channel(channel21);\r\n                    if (obj)\r\n                    {\r\n                        volatile int data = obj->data($);\r\n                        (void)data;\r\n                        obj->release();\r\n                        ch2 = true;\r\n                    }\r\n                    else\r\n                    {\r\n                        b.yield($);\r\n                    }\r\n                }\r\n            }\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            bool ch0 = false;\r\n            bool ch1 = false;\r\n            while (!ch0 || !ch1)\r\n            {\r\n                {\r\n                    rc_object* obj = get_from_channel(channel02);\r\n                    if (obj)\r\n                    {\r\n                        post_to_channel(channel21, obj);\r\n                        volatile int data = obj->data($);\r\n                        (void)data;\r\n                        obj->release();\r\n                        ch0 = true;\r\n                    }\r\n                    else\r\n                    {\r\n                        b.yield($);\r\n                    }\r\n                }\r\n                {\r\n                    rc_object* obj = get_from_channel(channel12);\r\n                    if (obj)\r\n                    {\r\n                        volatile int data = obj->data($);\r\n                        (void)data;\r\n                        obj->release();\r\n                        ch1 = true;\r\n                    }\r\n                    else\r\n                    {\r\n                        b.yield($);\r\n                    }\r\n                }\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct ref_counting_test3 : rl::test_suite<ref_counting_test3, 2>\r\n{\r\n    std::atomic<rc_object*> channel;\r\n\r\n    void before()\r\n    {\r\n        channel($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            rc_object* obj = new rc_object (rand());\r\n            post_to_channel(channel, obj);\r\n            volatile int data = obj->data($);\r\n            (void)data;\r\n            obj->release();\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            rl::backoff b;\r\n            rc_object* obj = 0;\r\n            for (;;)\r\n            {\r\n                obj = get_from_channel(channel);\r\n                if (obj)\r\n                    break;\r\n                else\r\n                    b.yield($);\r\n            }\r\n            obj->acquire();\r\n            obj->release();\r\n            //volatile int data = obj->data($);\r\n            //(void)data;\r\n            obj->release();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params params;\r\n    params.context_bound = 2;\r\n    params.iteration_count = 10000;\r\n    rl::simulate<ref_counting_test>(params);\r\n    std::cout << \"count: \" << params.stop_iteration << std::endl;\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ref_counting/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/msvc8/smr.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"smr\", \"smr.vcproj\", \"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/msvc8/smr.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"smr\"\r\n\tProjectGUID=\"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\n\tRootNamespace=\"smr\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"196613\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tEnableFunctionLevelLinking=\"true\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\smr.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/msvc9/smr.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"smr\", \"smr.vcproj\", \"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/msvc9/smr.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"smr\"\r\n\tProjectGUID=\"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\n\tRootNamespace=\"smr\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"196613\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tEnableFunctionLevelLinking=\"true\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\smr.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/smr.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\nunsigned const thread_count = 3;\r\nunsigned const node_count = 6;\r\n\r\n\r\nstruct smr_test : rl::test_suite<smr_test, thread_count>\r\n{\r\n    struct node\r\n    {\r\n        std::atomic<node*> next_;\r\n        rl::var<int> data_;\r\n    };\r\n\r\n    std::atomic<node*> head_;\r\n\r\n    std::atomic<node*> hp_ [thread_count];\r\n    rl::var<node*> defer_ [thread_count][thread_count];\r\n    rl::var<int> defer_size_ [thread_count];\r\n\r\n    void before()\r\n    {\r\n        head_.store(0, std::memory_order_relaxed);\r\n\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            hp_[i].store(0, std::memory_order_relaxed);\r\n            VAR(defer_size_[i]) = 0;\r\n            for (size_t j = 0; j != thread_count; ++j)\r\n                VAR(defer_[i][j]) = 0;\r\n        }\r\n    }\r\n\r\n    void push(unsigned index, int data)\r\n    {\r\n        node* n = new node ();\r\n        n->VAR(data_) = data;\r\n        node* next = head_.load(std::memory_order_relaxed);\r\n        for (;;)\r\n        {\r\n            n->next_.store(next, rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(next, n, rl::memory_order_release))\r\n                break;\r\n        }\r\n    }\r\n\r\n    int pop(unsigned index)\r\n    {\r\n        node* n = 0;\r\n        for (;;)\r\n        {\r\n            n = smr_acquire(index, head_);\r\n            if (0 == n)\r\n                break;\r\n            node* next = n->next_.load(rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(n, next, rl::memory_order_acquire))\r\n                break;\r\n            smr_release(index);\r\n        }\r\n        smr_release(index);\r\n        if (n)\r\n        {\r\n            int data = n->VAR(data_);\r\n            smr_defer(index, n);\r\n            return data;\r\n        }\r\n        else\r\n        {\r\n            return 0;\r\n        }\r\n    }\r\n\r\n    void smr_pump(unsigned index)\r\n    {\r\n        node* hp [thread_count] = {};\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            hp[i] = hp_[i].load(std::memory_order_relaxed);\r\n        }\r\n\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            node* nn = VAR(defer_[index][i]);\r\n            if (nn)\r\n            {\r\n                for (size_t j = 0; j != thread_count; ++j)\r\n                {\r\n                    if (nn == hp[j])\r\n                    {\r\n                        nn = 0;\r\n                        break;\r\n                    }\r\n                }\r\n                if (nn)\r\n                {\r\n                    VAR(defer_[index][i]) = 0;\r\n                    VAR(defer_size_[index]) -= 1;\r\n                    delete nn;\r\n                }\r\n            }\r\n        }\r\n    }\r\n\r\n    void smr_defer(unsigned index, node* n)\r\n    {\r\n        std::atomic_thread_fence(std::memory_order_seq_cst);\r\n\r\n        smr_pump(index);\r\n\r\n        if (VAR(defer_size_[index]) == thread_count)\r\n        {\r\n            delete n;\r\n        }\r\n        else\r\n        {\r\n            bool found = false;\r\n            for (size_t i = 0; i != thread_count; ++i)\r\n            {\r\n                if (VAR(defer_[index][i]) == 0)\r\n                {\r\n                    VAR(defer_[index][i]) = n;\r\n                    found = true;\r\n                    break;\r\n                }\r\n            }\r\n            RL_ASSERT(found);\r\n            VAR(defer_size_[index]) += 1;\r\n        }\r\n    }\r\n\r\n    node* smr_acquire(unsigned index, std::atomic<node*>& n)\r\n    {\r\n        node* v = 0;\r\n        for (;;)\r\n        {\r\n            v = n.load(std::memory_order_relaxed);\r\n            hp_[index].store(v, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n            node* v2 = n.load(std::memory_order_acquire);\r\n            if (v2 == v)\r\n                break;\r\n        }\r\n        return v;\r\n    }\r\n\r\n    void smr_release(unsigned index)\r\n    {\r\n        hp_[index].store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        for (unsigned i = 0; i != node_count; ++i)\r\n        {\r\n            push(index, index * thread_count + i + 1);\r\n        }\r\n        for (unsigned i = 0; i != node_count; ++i)\r\n        {\r\n            int data = pop(index);\r\n            RL_ASSERT(0 != data);\r\n        }\r\n    }\r\n\r\n    void after()\r\n    {\r\n        for (unsigned i = 0; i != ::thread_count; ++i)\r\n        {\r\n            smr_pump(i);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    //p.collect_history = true;\r\n    //p.output_history = true;\r\n    //p.initial_state = \"991172\";\r\n    p.iteration_count = 1000;\r\n    rl::simulate<smr_test>(p);\r\n}\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/smr/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_MSVC_OUTPUT\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/msvc8/spsc_queue.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"spsc_queue\", \"spsc_queue.vcproj\", \"{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/msvc8/spsc_queue.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"spsc_queue\"\r\n\tProjectGUID=\"{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}\"\r\n\tRootNamespace=\"spsc_queue\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tAdditionalIncludeDirectories=\"../..\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalIncludeDirectories=\"../../..\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\spsc_queue.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/msvc9/spsc_queue.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"spsc_queue\", \"spsc_queue.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/msvc9/spsc_queue.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9.00\"\r\n\tName=\"spsc_queue\"\r\n\tProjectGUID=\"{3F32C4FA-E451-42BC-9E65-74129120B6E4}\"\r\n\tRootNamespace=\"spsc_queue\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"131072\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\spsc_queue.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/spsc_queue.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\ntemplate<typename T>\r\nclass nonblocking_spsc_queue\r\n{\r\npublic:\r\n    nonblocking_spsc_queue()\r\n    {\r\n        node* n = new node ();\r\n        VAR(head) = n;\r\n        VAR(tail) = n;\r\n    }\r\n\r\n    ~nonblocking_spsc_queue()\r\n    {\r\n        RL_ASSERT(VAR(head) == VAR(tail));\r\n        delete (node*)VAR(head);\r\n    }\r\n\r\n    void enqueue(T data)\r\n    {\r\n        node* n = new node (data);\r\n        VAR(head)->next.store(n, std::memory_order_release); \r\n        VAR(head) = n;\r\n    }\r\n\r\n    bool dequeue(T& data)\r\n    {\r\n        node* t = VAR(tail);\r\n        node* n = t->next.load(std::memory_order_acquire);\r\n        if (0 == n)\r\n            return false;\r\n        data = n->VAR(data);\r\n        delete t;\r\n        VAR(tail) = n;\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    struct node\r\n    {\r\n        std::atomic<node*> next;\r\n        VAR_T(T) data;\r\n\r\n        node(T data = T())\r\n            : next(0)\r\n            , data(data)\r\n        {}\r\n    };\r\n\r\n    VAR_T(node*) head;\r\n    VAR_T(node*) tail;\r\n};\r\n\r\nstruct nonblocking_spsc_queue_test : rl::test_suite<nonblocking_spsc_queue_test, 2>\r\n{\r\n    nonblocking_spsc_queue<int> q;\r\n\r\n    void thread(unsigned thread_index)\r\n    {\r\n        if (0 == thread_index)\r\n        {\r\n            q.enqueue(11);\r\n        }\r\n        else\r\n        {\r\n            int data = 0;\r\n            while (false == q.dequeue(data))\r\n            {}\r\n            RL_ASSERT(11 == data);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nclass eventcount\r\n{\r\npublic:\r\n    eventcount()\r\n        : count(0)\r\n        , waiters(0)\r\n    {}\r\n\r\n    void signal_relaxed()\r\n    {\r\n        unsigned cmp = count.load(std::memory_order_relaxed);\r\n        signal_impl(cmp);\r\n    }\r\n\r\n    void signal()\r\n    {\r\n        unsigned cmp = count.fetch_add(0, std::memory_order_seq_cst);\r\n        signal_impl(cmp);\r\n    }\r\n\r\n    unsigned get()\r\n    {\r\n        unsigned cmp = count.fetch_or(0x80000000, std::memory_order_acquire);\r\n        return cmp & 0x7FFFFFFF;\r\n    }\r\n\r\n    void wait(unsigned cmp)\r\n    {\r\n        unsigned ec = count.load(std::memory_order_seq_cst);\r\n        if (cmp == (ec & 0x7FFFFFFF))\r\n        {\r\n            guard.lock($);\r\n            ec = count.load(std::memory_order_seq_cst);\r\n            if (cmp == (ec & 0x7FFFFFFF))\r\n            {\r\n                waiters($) += 1;\r\n                cv.wait(guard, $);\r\n            }\r\n            guard.unlock($);\r\n        }\r\n    }\r\n\r\nprivate:\r\n    std::atomic<unsigned> count;\r\n    VAR_T(unsigned) waiters;\r\n    std::mutex guard;\r\n    std::condition_variable cv;\r\n\r\n    void signal_impl(unsigned cmp)\r\n    {\r\n        if (cmp & 0x80000000)\r\n        {\r\n            guard.lock($);\r\n            while (false == count.compare_exchange_weak(cmp,\r\n                (cmp + 1) & 0x7FFFFFFF, std::memory_order_relaxed));\r\n            unsigned w = VAR(waiters);\r\n            VAR(waiters) = 0;\r\n            guard.unlock($);\r\n            if (w)\r\n                cv.notify_all($);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass spsc_queue : nonblocking_spsc_queue<T>\r\n{\r\npublic:\r\n    typedef nonblocking_spsc_queue<T> base_t;\r\n\r\n    void enqueue(T data)\r\n    {\r\n        base_t::enqueue(data);\r\n        ec.signal/*_relaxed*/();\r\n    }\r\n\r\n    T dequeue()\r\n    {\r\n        T data;\r\n        bool res = base_t::dequeue(data);\r\n        while (false == res)\r\n        {\r\n            int cmp = ec.get();\r\n            res = base_t::dequeue(data);\r\n            if (res)\r\n                break;\r\n            ec.wait(cmp);\r\n            res = base_t::dequeue(data);\r\n            if (res)\r\n                break;\r\n        }\r\n        return data;\r\n    }\r\n\r\nprivate:\r\n    eventcount ec;\r\n};\r\n\r\n\r\nstruct spsc_queue_test : rl::test_suite<spsc_queue_test, 2>\r\n{\r\n    spsc_queue<int> q;\r\n\r\n    void thread(unsigned thread_index)\r\n    {\r\n        if (0 == thread_index)\r\n        {\r\n            q.enqueue(11);\r\n        }\r\n        else\r\n        {\r\n            int d = q.dequeue();\r\n            RL_ASSERT(11 == d);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<nonblocking_spsc_queue_test>();\r\n    rl::simulate<spsc_queue_test>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/spsc_queue/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n//#define RL_MSVC_OUTPUT\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/DESCRIPTION.TXT",
    "content": "lock-free stack\r\ncode contains several bugs: access to freed memory and ABA problem\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/msvc8/stack.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"stack\", \"stack.vcproj\", \"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/msvc8/stack.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"stack\"\r\n\tProjectGUID=\"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\n\tRootNamespace=\"stack\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stack.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/msvc9/stack.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"stack\", \"stack.vcproj\", \"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/msvc9/stack.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9.00\"\r\n\tName=\"stack\"\r\n\tProjectGUID=\"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\n\tRootNamespace=\"stack\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"0\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"1\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"0\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stack.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/stack.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n// TEST FAILS WITH \"ACCESS TO FREED MEMORY\"\r\n\r\nclass stack\r\n{\r\npublic:\r\n    stack()\r\n        : head_(0)\r\n    {\r\n    }\r\n\r\n    void push(int data)\r\n    {\r\n        rl::var<node*> n = new node ();\r\n        n($)->data_($) = data;\r\n        node* next = head_.load(rl::memory_order_relaxed);\r\n        for (;;)\r\n        {\r\n            n($)->next_.store(next, rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(next, n($), rl::memory_order_release))\r\n                break;\r\n        }\r\n    }\r\n\r\n    int pop()\r\n    {\r\n        node* n = head_.load(rl::memory_order_relaxed);\r\n        for (;;)\r\n        {\r\n            if (0 == n)\r\n                break;\r\n            node* next = n->next_.load(rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(n, next, rl::memory_order_acquire))\r\n                break;\r\n        }\r\n        if (n)\r\n        {\r\n            int data = n->data_($);\r\n            delete n;\r\n            return data;\r\n        }\r\n        else\r\n        {\r\n            return 0;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    struct node\r\n    {\r\n        std::atomic<node*> next_;\r\n        rl::var<int> data_;\r\n    };\r\n\r\n    std::atomic<node*> head_;\r\n\r\n    stack(stack const&);\r\n    stack& operator = (stack const&);\r\n};\r\n\r\n\r\n\r\n\r\nstruct stack_test : rl::test_suite<stack_test, 4>\r\n{\r\n    stack s_;\r\n\r\n    int produced_count_;\r\n    int consumed_count_;\r\n\r\n    void before()\r\n    {\r\n        produced_count_ = 0;\r\n        consumed_count_ = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        typedef rl::test_suite<stack_test, 4> base_t;\r\n        RL_ASSERT(base_t::params::thread_count == produced_count_);\r\n        RL_ASSERT(base_t::params::thread_count == consumed_count_);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        s_.push(rand() + 1);\r\n        produced_count_ += 1;\r\n        int data = s_.pop();\r\n        RL_ASSERT(data);\r\n        consumed_count_ += 1;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<stack_test>();\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/stack/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/eventcount.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../../relacy/windows.h\"\r\n\r\n/*\r\n#define HANDLE rl::HANDLE\r\n\r\n#define CreateSemaphoreA rl::RL_CreateSemaphore($)\r\n#define CreateSemaphoreW rl::RL_CreateSemaphore($)\r\n#ifndef CreateSemaphore\r\n#   define CreateSemaphore CreateSemaphoreW\r\n#endif\r\n\r\n//#define CRITICAL_SECTION rl::CRITICAL_SECTION\r\n//#define InitializeCriticalSection rl::InitializeCriticalSection($)\r\n\r\n#define CloseHandle rl::RL_CloseHandle($)\r\n*/\r\n\r\n#include <stddef.h>\r\n\r\n\r\n#if defined(WIN32) && defined(_MSC_VER)\r\n\r\n#include <windows.h>\r\n#include <intrin.h>\r\n\r\nclass semaphore\r\n{\r\npublic:\r\n    semaphore()\r\n    {\r\n        h_ = rl::CreateSemaphore(0, 0, LONG_MAX, 0, $);\r\n    }\r\n\r\n    ~semaphore()\r\n    {\r\n        rl::CloseHandle(h_, $);\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        rl::WaitForSingleObject(h_, rl::RL_INFINITE, $);\r\n    }\r\n\r\n    void post()\r\n    {\r\n        rl::ReleaseSemaphore(h_, 1, 0, $);\r\n    }\r\n\r\nprivate:\r\n    rl::HANDLE h_;\r\n\r\n    semaphore(semaphore const&);\r\n    semaphore& operator = (semaphore const&);\r\n};\r\n\r\nclass mutex\r\n{\r\npublic:\r\n    mutex()\r\n    {\r\n        rl::InitializeCriticalSection(&cs_, $);\r\n    }\r\n\r\n    ~mutex()\r\n    {\r\n        rl::DeleteCriticalSection(&cs_, $);\r\n    }\r\n\r\n    void lock()\r\n    {\r\n        rl::EnterCriticalSection(&cs_, $);\r\n    }\r\n\r\n    void unlock()\r\n    {\r\n        rl::LeaveCriticalSection(&cs_, $);\r\n    }\r\n\r\nprivate:\r\n    rl::CRITICAL_SECTION    cs_;\r\n\r\n    mutex(mutex const&);\r\n    mutex& operator = (mutex const&);\r\n};\r\n\r\n//void full_memory_fence()\r\n//{\r\n//    _mm_mfence();\r\n//}\r\n\r\n//#define THREAD_LOCAL __declspec(thread)\r\n\r\n#elif defined(POSIX) && defined(GCC)\r\n\r\n#include <pthread.h>\r\n#include <semaphore.h>\r\n\r\nclass semaphore\r\n{\r\npublic:\r\n    semaphore()\r\n    {\r\n        sem_init(&sem_, 0, 0);\r\n    }\r\n\r\n    ~semaphore()\r\n    {\r\n        sem_destroy(&sem_);\r\n    }\r\n\r\n    void wait()\r\n    {\r\n        sem_wait(&sem_);\r\n    }\r\n\r\n    void post()\r\n    {\r\n        sem_post(&sem_);\r\n    }\r\n\r\nprivate:\r\n    sem_t               sem_;\r\n\r\n    semaphore(semaphore const&);\r\n    semaphore& operator = (semaphore const&);\r\n};\r\n\r\nclass mutex\r\n{\r\npublic:\r\n    mutex()\r\n    {\r\n        pthread_mutex_init(&mutex_, 0);\r\n    }\r\n\r\n    ~mutex()\r\n    {\r\n        pthread_mutex_destroy(&mutex_);\r\n    }\r\n\r\n    void lock()\r\n    {\r\n        pthread_mutex_lock(&mutex_);\r\n    }\r\n\r\n    void unlock()\r\n    {\r\n        pthread_mutex_unlock(&mutex_);\r\n    }\r\n\r\nprivate:\r\n    pthread_mutex_t     mutex_;\r\n\r\n    mutex(mutex const&);\r\n    mutex& operator = (mutex const&);\r\n};\r\n\r\nvoid full_memory_fence()\r\n{\r\n    __sync_synchronize();\r\n}\r\n\r\n//#define THREAD_LOCAL __thread\r\n\r\n#endif\r\n\r\n\r\n\r\nclass lock\r\n{\r\npublic:\r\n    lock(mutex& m)\r\n        : m_(m)\r\n    {\r\n        m.lock();\r\n    }\r\n\r\n    ~lock()\r\n    {\r\n        m_.unlock();\r\n    }\r\n\r\nprivate:\r\n    mutex&              m_;\r\n\r\n    lock(lock const&);\r\n    lock& operator = (lock const&);\r\n};\r\n\r\n\r\n\r\n\r\n/** simple single-threaded double-linked list\r\n *  nothing interesting\r\n */\r\nclass dlist\r\n{\r\npublic:\r\n    struct node\r\n    {\r\n        rl::var<node*>  prev_;\r\n        rl::var<node*>  next_;\r\n\r\n        node()\r\n        {\r\n            prev_($) = 0;\r\n            next_($) = 0;\r\n        }\r\n    };\r\n\r\n    dlist()\r\n    {\r\n        reset();\r\n    }\r\n\r\n    void push(node* n)\r\n    {\r\n        size_t s = size_($).load(rl::memory_order_relaxed);\r\n        size_($).store(s + 1, rl::memory_order_relaxed);\r\n        n->next_($) = head_.next_($);\r\n        n->prev_($) = &head_;\r\n        head_.next_($)->prev_($) = n;\r\n        head_.next_($) = n;\r\n    }\r\n\r\n    node* pop()\r\n    {\r\n        if (size_($).load(rl::memory_order_relaxed) == 0)\r\n            return 0;\r\n        node* n = head_.next_($);\r\n        remove(n);\r\n        return n;\r\n    }\r\n\r\n    void remove(node* n)\r\n    {\r\n        size_t s = size_($).load(rl::memory_order_relaxed);\r\n        size_($).store(s - 1, rl::memory_order_relaxed);\r\n        n->prev_($)->next_($) = n->next_($);\r\n        n->next_($)->prev_($) = n->prev_($);\r\n    }\r\n\r\n    size_t size() const\r\n    {\r\n        return size_($).load(rl::memory_order_relaxed);\r\n    }\r\n\r\n    node* begin()\r\n    {\r\n        return head_.next_($);\r\n    }\r\n\r\n    void flush_to(dlist& target)\r\n    {\r\n        if (size_($).load(rl::memory_order_relaxed))\r\n        {\r\n            target.size_($).store(size_($).load(rl::memory_order_relaxed));\r\n            target.head_.next_($) = head_.next_($);\r\n            target.head_.next_($)->prev_($) = &target.head_;\r\n            target.tail_.prev_($) = tail_.prev_($);\r\n            target.tail_.prev_($)->next_($) = &target.tail_;\r\n        }\r\n        else\r\n        {\r\n            target.reset();\r\n        }\r\n        reset();\r\n    }\r\n\r\n    static bool not_last(node* n)\r\n    {\r\n        return n->next_($) != 0;\r\n    }\r\n\r\n    static node* get_next(node* n)\r\n    {\r\n        return n->next_($);\r\n    }\r\n\r\nprivate:\r\n    rl::atomic<size_t>  size_;\r\n    node                head_;\r\n    node                tail_;\r\n\r\n    void reset()\r\n    {\r\n        size_($) = 0;\r\n        head_.next_($) = &tail_;\r\n        head_.prev_($) = 0;\r\n        tail_.next_($) = 0;\r\n        tail_.prev_($) = &head_;\r\n    }\r\n\r\n    dlist(dlist const&);\r\n    dlist& operator = (dlist const&);\r\n};\r\n\r\n\r\n\r\n/** pre-thread descriptor for eventcount\r\n */\r\nstruct ec_thread\r\n{\r\n    dlist::node         node_;\r\n    semaphore           sema_;\r\n    rl::var<unsigned>   epoch_;\r\n    rl::atomic<bool>    in_waitset_;\r\n    rl::var<bool>       spurious_;\r\n    rl::var<void*>      ctx_;\r\n\r\n    ec_thread()\r\n    {\r\n        epoch_($) = 0;\r\n        in_waitset_($) = false;\r\n        spurious_($) = false;\r\n        ctx_($) = 0;\r\n    }\r\n\r\n    ~ec_thread()\r\n    {\r\n        if (spurious_($))\r\n            sema_.wait();\r\n    }\r\n\r\n    /*\r\n    static ec_thread* current()\r\n    {\r\n        static THREAD_LOCAL ec_thread* ec_thread_instance = 0;\r\n        ec_thread* instance = ec_thread_instance;\r\n        if (instance == 0)\r\n        {\r\n            instance = new ec_thread;\r\n            ec_thread_instance = instance;\r\n        }\r\n        return instance;\r\n        // instance must be destroyed in DllMain() callback\r\n        // or in pthread_key_create() callback\r\n    }\r\n    */\r\n\r\nprivate:\r\n    ec_thread(ec_thread const&);\r\n    ec_thread& operator = (ec_thread const&);\r\n};\r\n\r\n\r\n\r\n/** fine-grained eventcount implementation\r\n */\r\nclass eventcount\r\n{\r\npublic:\r\n    eventcount()\r\n    {\r\n        epoch_($) = 0;\r\n    }\r\n\r\n    void prepare_wait(ec_thread* th = 0, void* ctx = 0)\r\n    {\r\n        RL_ASSERT(th);\r\n        // this is good place to pump previous spurious wakeup\r\n        if (th->spurious_($))\r\n        {\r\n            th->spurious_($) = false;\r\n            th->sema_.wait();\r\n        }\r\n        th->in_waitset_($).store(true, rl::memory_order_relaxed);\r\n        th->ctx_($) = ctx;\r\n        {\r\n            lock l (mtx_);\r\n            th->epoch_($) = epoch_($).load(rl::memory_order_relaxed);\r\n            waitset_.push(&th->node_);\r\n        }\r\n        rl::atomic_thread_fence($)(rl::memory_order_seq_cst);\r\n    }\r\n\r\n    void commit_wait(ec_thread* th = 0)\r\n    {\r\n        RL_ASSERT(th);\r\n        // this check is just an optimization\r\n        //if (th->epoch_($) == epoch_($).load(rl::memory_order_relaxed))\r\n        if (th->in_waitset_($).load(rl::memory_order_acquire))\r\n            th->sema_.wait();\r\n        else\r\n            cancel_wait(true, th); //!!! add 'th'\r\n    }\r\n\r\n    void cancel_wait(bool /*from_commit*/, ec_thread* th = 0)\r\n    {\r\n        RL_ASSERT(th);\r\n        // spurious wakeup will be pumped in the following prepare_wait()\r\n        th->spurious_($)  = true;\r\n        // try to remove node from waitset\r\n        if (th->in_waitset_($).load(rl::memory_order_acquire))\r\n        {\r\n            lock l (mtx_);\r\n            if (th->in_waitset_($).load(rl::memory_order_relaxed))\r\n            {\r\n                // successfully removed from waitset,\r\n                // so there will be no spurious wakeup\r\n                th->in_waitset_($).store(false, rl::memory_order_relaxed);\r\n                th->spurious_($) = false;\r\n                waitset_.remove(&th->node_);\r\n            }\r\n            else\r\n            {\r\n                //if (from_commit)\r\n                    //int volatile x = 0;\r\n            }\r\n        }\r\n        else\r\n        {\r\n            //RL_ASSERT(from_commit == false);\r\n            //if (from_commit)\r\n              //  int volatile x = 0;\r\n        }\r\n    }\r\n\r\n    void notify_one()\r\n    {\r\n        rl::atomic_thread_fence($)(rl::memory_order_seq_cst);\r\n        notify_one_relaxed();\r\n    }\r\n\r\n    template<typename predicate_t>\r\n    void notify(predicate_t pred)\r\n    {\r\n        rl::atomic_thread_fence($)(rl::memory_order_seq_cst);\r\n        notify_relaxed(pred);\r\n    }\r\n\r\n    void notify_all()\r\n    {\r\n        rl::atomic_thread_fence($)(rl::memory_order_seq_cst);\r\n        notify_all_relaxed();\r\n    }\r\n\r\n    void notify_one_relaxed()\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist::node* n;\r\n        {\r\n            lock l (mtx_);\r\n            unsigned ep = epoch_($).load(rl::memory_order_relaxed);\r\n            epoch_($).store(ep + 1, rl::memory_order_relaxed);\r\n            n = waitset_.pop();\r\n            if (n)\r\n                to_ec_thread(n)->in_waitset_($).store(false, rl::memory_order_release);\r\n        }\r\n        if (n)\r\n        {\r\n            to_ec_thread(n)->sema_.post();\r\n        }\r\n    }\r\n\r\n    template<typename predicate_t>\r\n    void notify_relaxed(predicate_t pred)\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist temp;\r\n        {\r\n            lock l (mtx_);\r\n            unsigned ep = epoch_($).load(rl::memory_order_relaxed);\r\n            epoch_($).store(ep + 1, rl::memory_order_relaxed);\r\n            size_t size = waitset_.size();\r\n            size_t idx = 0;\r\n            dlist::node* n = waitset_.begin();\r\n            while (dlist::not_last(n))\r\n            {\r\n                dlist::node* next = dlist::get_next(n);\r\n                ec_thread* th = to_ec_thread(n);\r\n                if (pred(th->ctx_($), size, idx))\r\n                {\r\n                    waitset_.remove(n);\r\n                    temp.push(n);\r\n                    th->in_waitset_($).store(false, rl::memory_order_release);\r\n                }\r\n                n = next;\r\n                idx += 1;\r\n            }\r\n        }\r\n        dlist::node* n = temp.begin();\r\n        while (dlist::not_last(n))\r\n        {\r\n            dlist::node* next = dlist::get_next(n);\r\n            to_ec_thread(n)->sema_.post();\r\n            n = next;\r\n        }\r\n    }\r\n\r\n    void notify_all_relaxed()\r\n    {\r\n        if (waitset_.size() == 0)\r\n            return;\r\n        dlist temp;\r\n        {\r\n            lock l (mtx_);\r\n            waitset_.flush_to(temp);\r\n            dlist::node* n = temp.begin();\r\n            while (dlist::not_last(n))\r\n            {\r\n                to_ec_thread(n)->in_waitset_($).store(false, rl::memory_order_release);\r\n                n = dlist::get_next(n);\r\n            }\r\n            unsigned ep = epoch_($).load(rl::memory_order_relaxed);\r\n            epoch_($).store(ep + 1, rl::memory_order_relaxed);\r\n        }\r\n        dlist::node* n = temp.begin();\r\n        while (dlist::not_last(n))\r\n        {\r\n            dlist::node* next = dlist::get_next(n);\r\n            to_ec_thread(n)->sema_.post();\r\n            n = next;\r\n        }\r\n    }\r\n\r\n    class wait_guard;\r\n\r\nprivate:\r\n    mutex               mtx_;\r\n    dlist               waitset_;\r\n    rl::atomic<unsigned>epoch_;\r\n\r\n    ec_thread* to_ec_thread(dlist::node* n)\r\n    {\r\n        return (ec_thread*)((char*)n - offsetof(ec_thread, node_));\r\n    }\r\n\r\n    eventcount(eventcount const&);\r\n    eventcount& operator = (eventcount const&);\r\n};\r\n\r\n\r\n\r\n\r\nclass eventcount::wait_guard\r\n{\r\npublic:\r\n    wait_guard(eventcount& ec, ec_thread* th = 0, void* ctx = 0)\r\n        : ec_(ec)\r\n        , th_(th)\r\n        , wait_(false)\r\n    {\r\n        ec_.prepare_wait(th_, ctx);\r\n    }\r\n\r\n    void commit_wait()\r\n    {\r\n        assert(false == wait_);\r\n        wait_ = true;\r\n        ec_.commit_wait(th_);\r\n    }\r\n\r\n    ~wait_guard()\r\n    {\r\n        if (false == wait_)\r\n            ec_.cancel_wait(false, th_);\r\n    }\r\n\r\nprivate:\r\n    eventcount& ec_;\r\n    ec_thread* th_;\r\n    bool wait_;\r\n\r\n    wait_guard(wait_guard const&);\r\n    wait_guard& operator = (wait_guard const&);\r\n};\r\n\r\n\r\n\r\n\r\n\r\nstruct scheduler\r\n{\r\n    struct tbb_thread\r\n    {\r\n        ec_thread       th;\r\n    };\r\n\r\n    eventcount          ec_;\r\n    tbb_thread*         threads_;\r\n    bool volatile       is_permanently_open_;\r\n\r\n    void wait_while_pool_is_empty(tbb_thread* th)\r\n    {\r\n        if (is_permanently_open_)\r\n            return;\r\n        eventcount::wait_guard wait (ec_, &th->th);\r\n        if (pool_is_empty())\r\n            wait.commit_wait();\r\n    }\r\n\r\n    void notify_about_new_task_available()\r\n    {\r\n        ec_.notify_one_relaxed();\r\n    }\r\n\r\n    void notify_about_new_task_available_with_preference(tbb_thread* preference)\r\n    {\r\n        struct local\r\n        {\r\n            tbb_thread*     preference_;\r\n            bool            fired_;\r\n\r\n            bool operator () (void* ctx, size_t count, size_t idx)\r\n            {\r\n                tbb_thread* th = (tbb_thread*)ctx;\r\n                if (th == preference_)\r\n                {\r\n                    fired_ = true;\r\n                    return true;\r\n                }\r\n                else if (idx == count - 1 && fired_ == false)\r\n                {\r\n                    return true;\r\n                }\r\n                else\r\n                {\r\n                    return false;\r\n                }\r\n            }\r\n        }\r\n        pred = {preference};\r\n        ec_.notify_relaxed(pred);\r\n    }\r\n\r\n    void notify_about_list_of_tasks_available(size_t total_count, size_t preference_count, tbb_thread** preferences)\r\n    {\r\n        struct local\r\n        {\r\n            size_t          remain_to_signal_;\r\n            size_t          preference_count_;\r\n            tbb_thread**    preferences_;\r\n\r\n            bool operator () (void* ctx, size_t count, size_t idx)\r\n            {\r\n                tbb_thread* th = (tbb_thread*)ctx;\r\n                size_t remain_in_waitset = count - idx;\r\n                if (remain_in_waitset <= remain_to_signal_)\r\n                {\r\n                    return true;\r\n                }\r\n                else\r\n                {\r\n                    for (size_t i = 0; i != preference_count_; ++i)\r\n                    {\r\n                        if (preferences_[i] == th)\r\n                        {\r\n                            remain_to_signal_ -= 1;\r\n                            return true;\r\n                        }\r\n                    }\r\n                }\r\n                return false;\r\n            }\r\n        }\r\n        pred = {total_count, preference_count, preferences};\r\n        ec_.notify_relaxed(pred);\r\n    }\r\n\r\n    bool pool_is_empty()\r\n    {\r\n        return true;\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct queue\r\n{\r\n    rl::atomic<int>     producer_idx_;\r\n    rl::atomic<int>     consumer_idx_;\r\n\r\n    rl::atomic<void*>*  buffer_;\r\n\r\n    eventcount          ec_;\r\n\r\n    queue()\r\n    {\r\n        producer_idx_($) = 0;\r\n        consumer_idx_($) = 0;\r\n        buffer_ = RL_NEW_ARR(rl::atomic<void*>, 10);\r\n        for (size_t i = 0; i != 10; ++i)\r\n            buffer_[i]($) = 0;\r\n    }\r\n\r\n    ~queue()\r\n    {\r\n        RL_DELETE_ARR(buffer_);\r\n    }\r\n\r\n    void enqueue(void* data)\r\n    {\r\n        int idx = producer_idx_($).fetch_add(1) + 1; // atomic\r\n        buffer_[idx]($).store(data, rl::memory_order_relaxed);\r\n\r\n        struct local\r\n        {\r\n            int         idx_;\r\n            bool operator () (void* ctx, size_t /*count*/, size_t /*idx*/)\r\n            {\r\n                return idx_ == (*(rl::var<int>*)ctx)($);\r\n            }\r\n        }\r\n        pred = {idx};\r\n        ec_.notify(pred); // not relaxed!!!\r\n    }\r\n\r\n    void* dequeue(ec_thread* th)\r\n    {\r\n        int idx = consumer_idx_($).fetch_add(1) + 1; // atomic\r\n        void* data = buffer_[idx]($).load(rl::memory_order_relaxed);\r\n        if (data)\r\n            return data;\r\n        for (;;)\r\n        {\r\n            rl::var<int> idxv (idx);\r\n            eventcount::wait_guard wait (ec_, th, &idxv);\r\n            data = buffer_[idx]($).load(rl::memory_order_relaxed);\r\n            if (data)\r\n            {\r\n                return data;\r\n            }\r\n            wait.commit_wait();\r\n            idxv($) = 0;\r\n            data = buffer_[idx]($).load(rl::memory_order_relaxed);\r\n            if (data)\r\n            {\r\n                return data;\r\n            }\r\n            rl::yield($, 1);\r\n            //RL_ASSERT(false);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nclass condition_variable\r\n{\r\n    eventcount ec_;\r\n\r\npublic:\r\n    void wait(mutex& mtx, ec_thread* th)\r\n    {\r\n        eventcount::wait_guard wait (ec_, th);\r\n        mtx.unlock();\r\n        wait.commit_wait();\r\n        mtx.lock();\r\n    }\r\n\r\n    void signal()\r\n    {\r\n        ec_.notify_one();\r\n    }\r\n\r\n    void broadcast()\r\n    {\r\n        ec_.notify_all();\r\n    }\r\n}; \r\n\r\n\r\nstruct queue_test : rl::test_suite<queue_test, 4>\r\n{\r\n    ec_thread threads_ [6];\r\n    queue q_;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index < 2)\r\n        {\r\n            q_.enqueue((void*)(index*2+1));\r\n            q_.enqueue((void*)(index*2+2));\r\n        }\r\n        else\r\n        {\r\n            int data1 = (int)q_.dequeue(&threads_[index]);\r\n            RL_ASSERT(data1 >= 1 && data1 <= 6);\r\n            int data2 = (int)q_.dequeue(&threads_[index]);\r\n            RL_ASSERT(data2 >= 1 && data2 <= 6);\r\n        }\r\n    }\r\n};\r\n\r\nstruct condvar_test : rl::test_suite<condvar_test, 3>\r\n{\r\n    rl::var<int> stage;\r\n    condition_variable cv;\r\n    mutex mtx;\r\n    ec_thread th [3];\r\n\r\n    void before()\r\n    {\r\n        stage($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            mtx.lock();\r\n            stage($) += 1;\r\n            cv.broadcast();\r\n            while (stage($) != 2)\r\n                cv.wait(mtx, &th[index]);\r\n            mtx.unlock();\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            mtx.lock();\r\n            while (stage($) != 1)\r\n                cv.wait(mtx, &th[index]);\r\n            stage($) += 1;\r\n            cv.broadcast();\r\n            mtx.unlock();\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            mtx.lock();\r\n            while (stage($) != 2)\r\n                cv.wait(mtx, &th[index]);\r\n            mtx.unlock();\r\n        }\r\n    }\r\n};\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    p.iteration_count = 100000000;\r\n    //p.initial_state = \"30000000\";\r\n    //p.search_type = rl::fair_context_bound_scheduler_type;\r\n    rl::simulate<queue_test>(p);\r\n    //rl::simulate<condvar_test>(p);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/msvc8/eventcount.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"eventcount\", \"eventcount.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/msvc8/eventcount.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"eventcount\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\n\tRootNamespace=\"eventcount\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\eventcount.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/msvc9/eventcount.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"eventcount\", \"eventcount.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/msvc9/eventcount.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"eventcount\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E5}\"\r\n\tRootNamespace=\"eventcount\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\eventcount.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/stdafx.cpp",
    "content": "// stdafx.cpp : source file that includes just the standard includes\r\n// ws_deque.pch will be the pre-compiled header\r\n// stdafx.obj will contain the pre-compiled type information\r\n\r\n#include \"stdafx.h\"\r\n\r\n// TODO: reference any additional headers you need in STDAFX.H\r\n// and not in this file\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/tbb_eventcount/stdafx.h",
    "content": "#pragma once\r\n\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/msvc8/ws_deque.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ws_deque\", \"ws_deque.vcproj\", \"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/msvc8/ws_deque.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"ws_deque\"\r\n\tProjectGUID=\"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\n\tRootNamespace=\"ws_deque\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ws_deque.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/msvc9/ws_deque.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ws_deque\", \"ws_deque.vcproj\", \"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/msvc9/ws_deque.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"ws_deque\"\r\n\tProjectGUID=\"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\n\tRootNamespace=\"ws_deque\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t\tObjectFile=\"$(IntDir)\\$(InputName)1.obj\"\r\n\t\t\t\t\tXMLDocumentationFileName=\"$(IntDir)\\$(InputName)1.xdc\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ws_deque.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/stdafx.h",
    "content": "#pragma once\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#define RL_MSVC_OUTPUT\r\n//#define RL_DEBUGBREAK_ON_FAILURE\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque/ws_deque.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\nusing namespace std;\r\nusing rl::var;\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass ws_deque\r\n{\r\npublic:\r\n    ws_deque()\r\n    {\r\n        VAR(m_mask) = initial_size - 1;\r\n        m_headIndex.store(0, memory_order_relaxed);\r\n        m_tailIndex.store(0, memory_order_relaxed);\r\n        VAR(m_array) = new atomic<T> [initial_size];\r\n        VAR(m_arraySize) = initial_size;\r\n    }\r\n\r\n    ~ws_deque()\r\n    {\r\n        delete [] VAR(m_array);\r\n    }\r\n\r\n    bool IsEmpty() const\r\n    {\r\n        return m_headIndex.load(memory_order_acquire)\r\n            >= m_tailIndex.load(memory_order_acquire);\r\n    }\r\n\r\n    size_t Count() const\r\n    {\r\n        return m_tailIndex.load(memory_order_acquire)\r\n             - m_headIndex.load(memory_order_acquire);\r\n    }\r\n\r\n    void push(T item)\r\n    {\r\n        size_t tail = m_tailIndex.load(memory_order_acquire);\r\n        if (tail < m_headIndex.load(memory_order_acquire) + VAR(m_mask))\r\n        {\r\n            VAR(m_array)[tail & VAR(m_mask)].store(item, memory_order_relaxed);\r\n            m_tailIndex.store(tail + 1, memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            size_t head = m_headIndex.load(memory_order_acquire);\r\n            size_t count = Count();\r\n            if (count >= VAR(m_mask))\r\n            {\r\n                size_t arraySize = m_arraySize($);\r\n                size_t mask = VAR(m_mask);\r\n                atomic<T>* newArray = new atomic<T> [arraySize * 2];\r\n                atomic<T>* arr = m_array($);\r\n                //!!! for (size_t i = 0; i != arraySize; ++i)\r\n                for (size_t i = 0; i != count; ++i)\r\n                    newArray[i].store(arr[(i + head) & mask].load(memory_order_seq_cst), memory_order_relaxed);\r\n                delete [] VAR(m_array);\r\n                VAR(m_array) = newArray;\r\n                VAR(m_arraySize) = arraySize * 2;\r\n                m_headIndex.store(0, memory_order_release);\r\n                m_tailIndex.store(count, memory_order_release);\r\n                tail = count;\r\n                VAR(m_mask) = (mask * 2) | 1;\r\n            }\r\n            VAR(m_array)[tail & VAR(m_mask)].store(item, memory_order_relaxed);\r\n            m_tailIndex.store(tail + 1, memory_order_release);\r\n            m_foreignLock.unlock($);\r\n        }\r\n    }\r\n\r\n    bool pop(T& item)\r\n    {\r\n        size_t tail = m_tailIndex.load(memory_order_acquire);\r\n        if (tail == 0)\r\n            return false;\r\n        tail -= 1;\r\n        m_tailIndex.store(tail, memory_order_release);\r\n        atomic_thread_fence(memory_order_seq_cst);\r\n        if (m_headIndex.load(memory_order_acquire) <= tail)\r\n        {\r\n            item = VAR(m_array)[tail & VAR(m_mask)].load(memory_order_relaxed);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            if (m_headIndex.load(memory_order_acquire) <= tail)\r\n            {\r\n                item = VAR(m_array)[tail & VAR(m_mask)].load(memory_order_relaxed);\r\n                m_foreignLock.unlock($);\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                m_tailIndex.store(tail + 1, memory_order_release);\r\n                m_foreignLock.unlock($);\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\n    bool steal(T& item)\r\n    {\r\n        if (false == m_foreignLock.try_lock($))\r\n            return false;\r\n        size_t head = m_headIndex.load(memory_order_acquire);\r\n        m_headIndex.store(head + 1, memory_order_release);\r\n        atomic_thread_fence(memory_order_seq_cst);\r\n        if (head < m_tailIndex.load(memory_order_acquire))\r\n        {\r\n            item = VAR(m_array)[head & VAR(m_mask)].load(memory_order_relaxed);\r\n            m_foreignLock.unlock($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_headIndex.store(head, memory_order_release);\r\n            m_foreignLock.unlock($);\r\n            return false;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    static size_t const initial_size = 2;\r\n    var<atomic<T>*> m_array;\r\n    var<size_t> m_mask;\r\n    var<size_t> m_arraySize;\r\n    atomic<size_t> m_headIndex;\r\n    atomic<size_t> m_tailIndex;\r\n    mutex m_foreignLock;\r\n};\r\n\r\n\r\n\r\n\r\nstruct ws_deque_test0 : rl::test_suite<ws_deque_test0, 4>\r\n{\r\n    ws_deque<int> q;\r\n\r\n    void before()\r\n    {\r\n    }\r\n\r\n    void after()\r\n    {\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n            }\r\n\r\n            for (size_t i = 0; i != 5; ++i)\r\n            {\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n                p = 0;\r\n                res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                q.push(10);\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 14; ++i)\r\n            {\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                int p = 0;\r\n                bool res = q.steal(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct ws_deque_test : rl::test_suite<ws_deque_test, 2>\r\n{\r\n    ws_deque<int> q;\r\n    bool state [2];\r\n\r\n    void before()\r\n    {\r\n        state[0] = true;\r\n        state[1] = true;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(state[0] == false);\r\n        RL_ASSERT(state[1] == false);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            q.push(1);\r\n            q.push(2);\r\n\r\n            int item = 0;\r\n            bool res = q.pop(item);\r\n            RL_ASSERT(res && item == 2);\r\n            RL_ASSERT(state[1]);\r\n            state[1] = false;\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            RL_ASSERT(res == false);\r\n        }\r\n        else\r\n        {\r\n            int item = 0;\r\n            bool res = q.steal(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(item == 1);\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate<ws_deque_test0>();\r\n    rl::simulate<ws_deque_test>();\r\n}\r\n \r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque2/msvc8/ws_deque.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ws_deque\", \"ws_deque.vcproj\", \"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque2/msvc8/ws_deque.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"ws_deque\"\r\n\tProjectGUID=\"{ECB64178-A35E-4EB2-9EB0-BD72D6F7B6E4}\"\r\n\tRootNamespace=\"ws_deque\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ws_deque.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque2/stdafx.cpp",
    "content": "// stdafx.cpp : source file that includes just the standard includes\r\n// ws_deque.pch will be the pre-compiled header\r\n// stdafx.obj will contain the pre-compiled type information\r\n\r\n#include \"stdafx.h\"\r\n\r\n// TODO: reference any additional headers you need in STDAFX.H\r\n// and not in this file\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque2/stdafx.h",
    "content": "#pragma once\r\n\r\n\r\n#include \"../../relacy/pch.hpp\"\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/example/ws_deque2/ws_deque.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n\r\nstruct pdr\r\n{\r\n    __declspec(thread) static pdr* instance;\r\n\r\n    static size_t const defer_limit = 1024;\r\n\r\n    typedef void(*dtor_f)(void*);\r\n    struct entry_t\r\n    {\r\n        dtor_f dtor;\r\n        void* ctx;\r\n    };\r\n    entry_t defer_list [defer_limit];\r\n    size_t pos;\r\n    size_t pos0;\r\n    size_t thread_count;\r\n\r\n    size_t th [4];\r\n\r\n    void init(size_t count)\r\n    {\r\n        //assert(0 == instance);\r\n        instance = this;\r\n\r\n        thread_count = count;\r\n        pos = 0;\r\n        pos0 = 0;\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            th[i] = defer_limit;\r\n        }\r\n    }\r\n\r\n    void fini()\r\n    {\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            assert(th[i] == defer_limit);\r\n        }\r\n\r\n        for (size_t i = pos0; i != pos; ++i)\r\n        {\r\n            assert(defer_list[i].dtor);\r\n            defer_list[i].dtor(defer_list[i].ctx);\r\n        }\r\n        assert(this == instance);\r\n        instance = 0;\r\n    }\r\n\r\n    void lock()\r\n    {\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n\r\n        assert(th[rl::ctx().threadx_->index_] == defer_limit);\r\n        th[rl::ctx().threadx_->index_] = pos;\r\n    }\r\n\r\n    void unlock()\r\n    {\r\n        assert(th[rl::ctx().threadx_->index_] != defer_limit);\r\n        th[rl::ctx().threadx_->index_] = defer_limit;\r\n        pump();\r\n    }\r\n\r\n    template<typename T>\r\n    static void dtor_impl(void* p)\r\n    {\r\n        RL_DELETE(static_cast<T*>(p));\r\n    }\r\n\r\n    template<typename T>\r\n    void defer(T* p)\r\n    {\r\n        std::atomic_thread_fence($)(std::memory_order_seq_cst);\r\n\r\n        assert(pos < defer_limit);\r\n        entry_t& e = defer_list[pos++];\r\n        e.dtor = &pdr::dtor_impl<T>;\r\n        e.ctx = p;\r\n        pump();\r\n    }\r\n\r\n    void pump()\r\n    {\r\n        if (pos0 == pos)\r\n            return;\r\n        size_t min_pos = pos;\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            if (th[i] < min_pos)\r\n                min_pos = th[i];\r\n        }\r\n        for (size_t i = pos0; i != min_pos; ++i)\r\n        {\r\n            assert(defer_list[i].dtor);\r\n            defer_list[i].dtor(defer_list[i].ctx);\r\n        }\r\n        pos0 = min_pos;\r\n    }\r\n};\r\n\r\npdr* pdr::instance = 0;\r\n\r\nvoid pdr_lock()\r\n{\r\n    assert(pdr::instance);\r\n    pdr::instance->lock();\r\n}\r\n\r\nvoid pdr_unlock()\r\n{\r\n    assert(pdr::instance);\r\n    pdr::instance->unlock();\r\n}\r\n\r\ntemplate<typename T>\r\nvoid pdr_defer(T* p)\r\n{\r\n    assert(pdr::instance);\r\n    pdr::instance->defer(p);\r\n}\r\n\r\n\r\n\r\nclass ws_deque\r\n{\r\npublic:\r\n\r\n    ws_deque()\r\n    {\r\n\t\tbottom_.block_($) = 0;\r\n\t\tbottom_.real_block_id_ = 0;\r\n\t\tbottom_.real_index_ = 0;\r\n\t\tbottom_.block_id_ = 0;\r\n\t\tbottom_.index_ = 0;\r\n\t\tbottom_.block_seq_ = 0;\r\n\t\tbottom_.check_order_ = 1;\r\n\r\n        top::info t = {};\r\n\t\ttop_.block_($) = 0;\r\n        top_.info_($) = t;\r\n\r\n\t\talloc_block();\r\n\t\tbottom_.block_id_ = bottom_.block_($)->header_.id_;\r\n\t\ttop_.block_($) = bottom_.block_($);\r\n\t\tt.top_block_id_ = static_cast<unsigned short>(top_.block_($).load()->header_.id_);\r\n\t\tt.bottom_block_id_ = static_cast<unsigned short>(top_.block_($).load()->header_.id_);\r\n        top_.info_($) = t;\r\n    }\r\n\r\n\t~ws_deque()\r\n\t{\r\n\t\tfor (block* p = top_.block_($), *next; p; p = next)\r\n\t\t{\r\n\t\t\tnext = p->header_.next_($).load(std::memory_order_relaxed);\r\n            RL_DELETE(p);\r\n\t\t}\r\n\t}\r\n\r\n    void push(void* const& i)\r\n    {\r\n        pdr_lock();\r\n\r\n        push_unbalanced(i);\r\n        rebalance();\r\n\r\n        pdr_unlock();\r\n    }\r\n\r\n    void push_unbalanced(void* i)\r\n    {\r\n        RL_ASSERT(bottom_.block_($)->header_.id_);\r\n\r\n        bottom_.block_($)->data_[bottom_.real_index_]($).store(i, std::memory_order_release);\r\n\r\n\t\tif (block::item_count - 1 != bottom_.real_index_)\r\n\t\t{\r\n\t\t\tbottom_.real_index_ += 1;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\talloc_block();\r\n\t\t}\r\n    }\r\n\r\n    void rebalance()\r\n    {\r\n\t\tif (0 == --bottom_.check_order_)\r\n\t\t{\r\n\t\t\tcheck_bottom();\r\n\t\t}\r\n    }\r\n\r\n    void* pop()\r\n    {\r\n        pdr_lock();\r\n\r\n        rebalance();\r\n        void* p = pop_unbalanced();\r\n\r\n        pdr_unlock();\r\n\r\n        return p;\r\n    }\r\n\r\n    void* pop_unbalanced()\r\n    {\r\n\t\t//!!! optimize\r\n\r\n\t\t//! fast-path for empty deque\r\n\r\n\t\t//! make comparasion faster\r\n\r\n\t\tif ((bottom_.block_id_ != bottom_.real_block_id_\r\n            || bottom_.index_ != bottom_.real_index_)\r\n\t\t\t&& bottom_.real_index_)\r\n\t\t{\r\n\t\t\tbottom_.real_index_ -= 1;\r\n            void* i = bottom_.block_($)->data_[bottom_.real_index_]($).load(std::memory_order_consume);\r\n\t\t\treturn i;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\treturn pop_unbalanced_slow();\r\n\t\t}\r\n    }\r\n\r\n    void* pop_unbalanced_slow()\r\n\t{\r\n\t\tif (0 == bottom_.real_index_)\r\n\t\t{\r\n            if (bottom_.real_block_id_ > bottom_.block_id_)\r\n            {\r\n\t\t\t    return pop_slow();\r\n            }\r\n            else\r\n            {\r\n\t\t\t    return 0;\r\n            }\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tvoid* i;\r\n\t\t\tpop_check_result const rv = pop_check(i);\r\n\t\t\tif (pop_check_cont != rv)\r\n\t\t\t\treturn pop_check_succ == rv ? i : 0;\r\n\t\t\treturn pop_unbalanced(); // recursion, must succeed\r\n\t\t}\r\n\t}\r\n\r\n    void* steal()\r\n    {\r\n        pdr_lock();\r\n\r\nretry:\r\n\t\tfor (;;)\r\n\t\t{\r\n            block* old_b = top_.block_($).load(std::memory_order_acquire);\r\n\t\t    block* b = old_b;\r\n\t\t\ttop::info old = top_.info_($).load(std::memory_order_consume);\r\n\r\n\t\t\tif (old.top_index_ == old.bottom_index_\r\n\t\t\t\t&& old.top_block_id_ == old.bottom_block_id_)\r\n\t\t\t{\r\n                pdr_unlock();\r\n\t\t\t\treturn 0;\r\n\t\t\t}\r\n\r\n\t\t\tif (b->header_.id_ != old.top_block_id_)\r\n\t\t\t{\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tb = b->header_.next_($).load(std::memory_order_relaxed);\r\n                    //RL_ASSERT(b);\r\n                    //!!! temp stub - is it right?\r\n                    // it seems that we always return 0 after we hit this goto\r\n                    if (0 == b)\r\n                        goto retry;\r\n\t\t\t\t}\r\n\t\t\t\t//!!! AV\r\n\t\t\t\t// b == 0\r\n\t\t\t\twhile (b->header_.id_ != old.top_block_id_);\r\n\r\n                if (top_.block_($).compare_swap(old_b, b, std::memory_order_seq_cst))\r\n\t\t\t\t{\r\n\t\t\t\t\tblock* cur_b = old_b;\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tpdr_defer(cur_b);\r\n\t\t\t\t\t\tcur_b = cur_b->header_.next_($).load(std::memory_order_relaxed);\r\n\t\t\t\t\t}\r\n\t\t\t\t\twhile (cur_b != b);\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tblock* next_block = 0;\r\n\t\t\ttop::info mod = old;\r\n\r\n            void* i = b->data_[mod.top_index_]($).load(std::memory_order_consume);\r\n\r\n\t\t\tif (block::item_count - 1 == mod.top_index_)\r\n\t\t\t{\r\n\t\t\t\tnext_block = b->header_.next_($).load(std::memory_order_relaxed);\r\n\t\t\t\tmod.top_block_id_ += 1;\r\n\t\t\t\tmod.top_index_ = 0;\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tmod.top_index_ += 1;\r\n\t\t\t}\r\n\r\n\t\t\tif (top_.info_($).compare_swap(old, mod, std::memory_order_seq_cst))\r\n\t\t\t{\r\n\t\t\t\tif (next_block)\r\n\t\t\t\t{\r\n\t\t\t\t\tif (top_.block_($).compare_swap(b, next_block))\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tpdr_defer(b);\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n                pdr_unlock();\r\n\t\t\t\treturn i;\r\n\t\t\t}\r\n\t\t}\r\n    }\r\n\r\n\tunsigned size() const\r\n\t{\r\n        top::info const top = top_.info_($).load(std::memory_order_relaxed);\r\n\t\t//unsigned volatile const top_block_id = top_.info_.part_.top_block_id_;\r\n\t\t//unsigned volatile const top_index = top_.info_.part_.top_index_;\r\n\r\n\t\tif (bottom_.real_block_id_ == top.top_block_id_)\r\n\t\t{\r\n\t\t\tunsigned const size = bottom_.real_index_ - top.top_index_;\r\n\t\t\treturn size;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned size = bottom_.real_index_;\r\n\t\t\tsize += block::item_count - top.top_index_;\r\n\t\t\tsize += (bottom_.real_block_id_ - top.top_block_id_ - 1) * block::item_count;\r\n\t\t\treturn size;\r\n\t\t}\r\n\t}\r\n\r\nprivate:\r\n\tstruct block\r\n\t{\r\n\t\tstruct header\r\n\t\t{\r\n            std::atomic<block*> next_;\r\n            std::atomic<block*> prev_;\r\n\t\t\tws_deque* deque_;\r\n\t\t\tunsigned id_;\r\n\r\n            //!!!\r\n            ~header()\r\n            {\r\n                id_ = 0;\r\n            }\r\n\t\t};\r\n\t\tstatic unsigned const item_count = 2;\r\n\r\n\t\theader header_;\r\n        std::atomic<void*> data_ [item_count];\r\n\t};\r\n\r\n\tstruct bottom\r\n\t{\r\n        rl::var<block*> block_;\r\n\t\t\r\n\t\tunsigned check_order_;\r\n\r\n\t\tunsigned real_block_id_;\r\n\t\tunsigned real_index_;\r\n\r\n\t\tunsigned block_id_;\r\n\t\tunsigned index_;\r\n\r\n\t\tunsigned block_seq_;\r\n\t};\r\n\r\n\tstruct top\r\n\t{\r\n\t\tstruct info\r\n\t\t{\r\n\t\t\tunsigned short top_index_;\r\n\t\t\tunsigned short top_block_id_;\r\n\t\t\tunsigned short bottom_index_;\r\n\t\t\tunsigned short bottom_block_id_;\r\n\r\n            bool operator == (info const& x) const\r\n            {\r\n                return top_index_ == x.top_index_\r\n                    && top_block_id_ == x.top_block_id_\r\n                    && bottom_index_ == x.bottom_index_\r\n                    && bottom_block_id_ == x.bottom_block_id_;\r\n            }\r\n\r\n            friend std::ostream& operator << (std::ostream& ss, info const& x)\r\n            {\r\n                return ss << \"{\" << x.top_index_\r\n                    << \",\" << x.top_block_id_\r\n                    << \",\" << x.bottom_index_\r\n                    << \",\" << x.bottom_block_id_ << \"}\";\r\n            }\r\n\t\t};\r\n\r\n        std::atomic<block*> block_;\r\n        std::atomic<info> info_;\r\n\t};\r\n\r\n\tbottom bottom_;\r\n\r\n\tchar pad1 [64];\r\n\r\n\ttop top_;\r\n\r\n\tchar pad2 [64];\r\n\r\n\tvoid alloc_block()\r\n\t{\r\n\t\t//!!! check whether we already have next block in \r\n\t\t// bottom_.block_->header_.next_\r\n\t\tblock* b = bottom_.block_($) ? bottom_.block_($)->header_.next_($).load(std::memory_order_relaxed) : 0;\r\n\t\tif (0 == b)\r\n\t\t{\r\n\t\t\tb = RL_NEW block;\r\n\t\t\tb->header_.deque_ = this;\r\n\t\t\tbottom_.block_seq_ += 1;\r\n\r\n\t\t\t//!!!\r\n\t\t\tif (bottom_.block_seq_ > 0xffff) __asm int 3;\r\n\r\n\t\t\tbottom_.block_seq_ &= 0xffff;\r\n\t\t\tb->header_.id_ = bottom_.block_seq_;\r\n            b->header_.prev_($).store(bottom_.block_($), std::memory_order_relaxed);\r\n\t\t\tif (bottom_.block_($))\r\n\t\t\t\tbottom_.block_($)->header_.next_($).store(b, std::memory_order_relaxed);\r\n\t\t\tb->header_.next_($).store(0, std::memory_order_relaxed);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n            b = b;\r\n\t\t\tbottom_.block_seq_ += 1;\r\n\t\t\t//__asm int 3;\r\n\t\t}\r\n\t\tbottom_.block_($) = b;\r\n\t\tbottom_.real_block_id_ = b->header_.id_;\r\n\t\tbottom_.real_index_ = 0;\r\n\t}\r\n\r\n\tenum pop_check_result {pop_check_fail, pop_check_succ, pop_check_cont};\r\n\r\n\tpop_check_result pop_check(void*& i)\r\n\t{\r\n\t\tcheck_bottom();\r\n\r\n\t\tif (bottom_.block_id_ == bottom_.real_block_id_\r\n\t\t\t&& bottom_.index_ == bottom_.real_index_)\r\n\t\t{\r\n            top::info const top = top_.info_($).load(std::memory_order_seq_cst);\r\n\r\n\t\t\tif ((bottom_.block_id_ == top.top_block_id_\r\n\t\t\t\t\t&& bottom_.index_ == (unsigned)top.top_index_ + 1)\r\n\t\t\t\t|| (bottom_.block_id_ == (unsigned)top.top_block_id_ + 1\r\n\t\t\t\t\t&& block::item_count - 1 == top.top_index_\r\n\t\t\t\t\t&& 0 == bottom_.index_ ))\r\n\t\t\t{\r\n                __asm int 3;\r\n                i = steal();\r\n\t\t\t\tif (i)\r\n\t\t\t\t\treturn pop_check_succ;\r\n\t\t\t}\r\n\r\n\t\t\treturn pop_check_fail;\r\n\t\t}\r\n\r\n\t\treturn pop_check_cont;\r\n\t}\r\n\r\n\tvoid* pop_slow()\r\n\t{\r\n\t\tbottom_.block_seq_ -= 1;\r\n\t\tbottom_.block_seq_ &= 0xffff;\r\n        bottom_.block_($) = bottom_.block_($)->header_.prev_($).load(std::memory_order_relaxed);\r\n\r\n\t\t//!!! AV: when core count set to 16\r\n\t\t// bottom_.block_ = 0\r\n\t\t// bottom.real_block_id = 1\r\n\t\t// bottom.block_id = 8\r\n\r\n\t\t//!!! AV in xscale too (thread count is 4)\r\n\t\t// the same variables values\r\n\t\tbottom_.real_block_id_ = bottom_.block_($)->header_.id_;\r\n\t\tbottom_.real_index_ = block::item_count - 1;\r\n\r\n        top::info i = top_.info_($).load(std::memory_order_relaxed);\r\n\r\n        RL_ASSERT(bottom_.block_($)->header_.id_ == bottom_.block_seq_);\r\n        RL_ASSERT((bottom_.real_block_id_ == i.bottom_block_id_ && bottom_.real_index_ >= i.bottom_index_)\r\n            || (bottom_.real_block_id_ > i.bottom_block_id_));\r\n\r\n        void* v = bottom_.block_($)->data_[block::item_count - 1]($).load(std::memory_order_consume);\r\n        return v;\r\n\t}\r\n\r\n\tvoid check_bottom()\r\n\t{\r\n\t\t//!!! must leave at least 1 element unreserved\r\n\t\t// because owner have to steal it\r\n\r\n\t\tfor (;;)\r\n\t\t{\r\n            top::info old = top_.info_($).load(std::memory_order_relaxed);\r\n\r\n\t\t\tunsigned const top_block_id = old.top_block_id_;\r\n\t\t\tunsigned const top_index = old.top_index_;\r\n\r\n\t\t\tif (bottom_.real_block_id_ == top_block_id\r\n\t\t\t\t&& bottom_.real_index_ == top_index)\r\n\t\t\t{\r\n\t\t\t\tbottom_.check_order_ = 2;\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\t\t\tunsigned const s = size();\r\n\t\t\tunsigned const r = reserved();\r\n\t\t\tif (!(0 == r || (r > 1 && 4*r > 3*s)))\r\n\t\t\t{\r\n\t\t\t\t//bottom_.check_order_ = 2;\r\n\t\t\t\t//!!! bottom_.check_order_ = s / 8 + 2;\r\n\t\t\t\tbottom_.check_order_ = s / 2 + 2;\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\t\t\tunsigned r2 = s*3/4 + 1;\r\n\t\t\tif (r2 >= s)\r\n\t\t\t\tr2 = s - 1;\r\n\t\t\tunsigned bottom_block_id;\r\n\t\t\tunsigned bottom_index;\r\n\t\t\tif (r2 + top_index < block::item_count)\r\n\t\t\t{\r\n\t\t\t\tbottom_block_id = top_block_id;\r\n\t\t\t\tbottom_index = top_index + r2;\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tunsigned const r3 = r2 + top_index;\r\n\t\t\t\tbottom_block_id = top_block_id + r3 / block::item_count;\r\n\t\t\t\tbottom_index = r3 % block::item_count;\r\n\t\t\t}\r\n\t\t\ttop::info i;\r\n\t\t\ti.top_block_id_ = static_cast<unsigned short>(top_block_id);\r\n\t\t\ti.top_index_ = static_cast<unsigned short>(top_index);\r\n\t\t\ti.bottom_block_id_ = static_cast<unsigned short>(bottom_block_id);\r\n\t\t\ti.bottom_index_ = static_cast<unsigned short>(bottom_index);\r\n\r\n\t\t\t/*\r\n\t\t\tbottom volatile btm = bottom_;\r\n\t\t\tif (i.part_.top_block_id_ > i.part_.bottom_block_id_)\r\n\t\t\t\t__asm int 3;\r\n\t\t\tif (i.part_.top_block_id_ == i.part_.bottom_block_id_\r\n\t\t\t\t&& i.part_.top_index_ >= i.part_.bottom_index_)\r\n\t\t\t\t__asm int 3;\r\n\t\t\tif (i.part_.bottom_block_id_ > btm.real_block_id_)\r\n\t\t\t\t__asm int 3;\r\n\t\t\tif (i.part_.bottom_block_id_ == btm.real_block_id_\r\n\t\t\t\t&& i.part_.bottom_index_ > btm.real_index_)\r\n\t\t\t\t__asm int 3;\r\n\t\t\t*/\r\n\r\n            if (top_.info_($).compare_swap(old, i, std::memory_order_seq_cst))\r\n\t\t\t{\r\n\t\t\t\tbottom_.block_id_ = bottom_block_id;\r\n\t\t\t\tbottom_.index_ = bottom_index;\r\n\t\t\t\t//!!! bottom_.check_order_ = s / 8 + 2;\r\n\t\t\t\tbottom_.check_order_ = s / 2 + 2;\r\n\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tunsigned reserved() const\r\n\t{\r\n\t\tif (bottom_.real_block_id_ == bottom_.block_id_)\r\n\t\t{\r\n\t\t\tunsigned const reserved = bottom_.real_index_ - bottom_.index_;\r\n\t\t\treturn reserved;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned reserved = bottom_.real_index_;\r\n\t\t\treserved += block::item_count - bottom_.index_;\r\n\t\t\treserved += (bottom_.real_block_id_ - bottom_.block_id_ - 1) * block::item_count;\r\n\t\t\treturn reserved;\r\n\t\t}\r\n\t}\r\n};\r\n\r\nint x = 0;\r\n\r\nstruct ws_deque_test : rl::test_suite<ws_deque_test, 4>\r\n{\r\n    ws_deque q;\r\n    pdr p;\r\n\r\n    void before()\r\n    {\r\n        p.init(4);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        p.fini();\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push((void*)10);\r\n            }\r\n\r\n            for (size_t i = 0; i != 5; ++i)\r\n            {\r\n                void* p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push((void*)10);\r\n                void* p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push((void*)10);\r\n                q.push((void*)10);\r\n                void* p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n                p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push((void*)10);\r\n                q.push((void*)10);\r\n                q.push((void*)10);\r\n                void* p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n\r\n            for (size_t i = 0; i != 14; ++i)\r\n            {\r\n                q.push((void*)10);\r\n                void* p = q.pop();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                void* p = q.steal();\r\n                RL_ASSERT((void*)10 == p || 0 == p);\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    p.iteration_count = 1000000;\r\n    rl::simulate<ws_deque_test>(p);\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/atomic.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_ATOMIC_HPP\r\n#define RL_ATOMIC_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context.hpp\"\r\n#include \"memory_order.hpp\"\r\n#include \"signature.hpp\"\r\n#include \"atomic_events.hpp\"\r\n#include \"waitset.hpp\"\r\n#include \"rmw.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<typename T>\r\nclass atomic;\r\n\r\n\r\ntemplate<bool> struct bool_t {};\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass atomic_proxy_const\r\n{\r\npublic:\r\n    atomic_proxy_const(atomic<T> const /*volatile*/& var, debug_info_param info)\r\n        : var_(const_cast<atomic<T>&>(var))\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load(memory_order mo = mo_seq_cst) const\r\n    {\r\n        return var_.load(mo, info_);\r\n    }\r\n\r\n    operator T () const\r\n    {\r\n        return load();\r\n    }\r\n\r\nprotected:\r\n    atomic<T>& var_;\r\n    debug_info info_;\r\n\r\n    atomic_proxy_const& operator = (atomic_proxy_const const&);\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass atomic_proxy : public atomic_proxy_const<T>\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n\r\n    atomic_proxy(atomic<T> /*volatile*/& var, debug_info_param info)\r\n        : atomic_proxy_const<T>(var, info)\r\n    {\r\n    }\r\n\r\n    void store(T value, memory_order mo = mo_seq_cst)\r\n    {\r\n        this->var_.store(value, mo, this->info_);\r\n    }\r\n\r\n    bool compare_exchange_weak(T& cmp, T xchg, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, this->info_);\r\n    }\r\n\r\n    bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, memory_order failure_mo)\r\n    {\r\n        return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, this->info_);\r\n    }\r\n\r\n    bool compare_exchange_strong(T& cmp, T xchg, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, this->info_);\r\n    }\r\n\r\n    bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, memory_order failure_mo)\r\n    {\r\n        return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, this->info_);\r\n    }\r\n\r\n    T exchange(T xchg, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, this->info_);\r\n    }\r\n\r\n    T fetch_add(add_type value, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_add>(), value, mo, this->info_);\r\n    }\r\n\r\n    T fetch_sub(add_type value, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_sub>(), value, mo, this->info_);\r\n    }\r\n\r\n    T fetch_and(T value, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_and>(), value, mo, this->info_);\r\n    }\r\n\r\n    T fetch_or(T value, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_or>(), value, mo, this->info_);\r\n    }\r\n\r\n    T fetch_xor(T value, memory_order mo = mo_seq_cst)\r\n    {\r\n        return this->var_.rmw(rmw_type_t<rmw_type_xor>(), value, mo, this->info_);\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        return fetch_add(1);\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        return fetch_sub(1);\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        return fetch_add(1) + 1;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        return fetch_sub(1) - 1;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        return fetch_add(value) + value;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        return fetch_sub(value) + value;\r\n    }\r\n\r\n    T operator &= (T value)\r\n    {\r\n        return fetch_and(value) & value;\r\n    }\r\n\r\n    T operator |= (T value)\r\n    {\r\n        return fetch_or(value) | value;\r\n    }\r\n\r\n    T operator ^= (T value)\r\n    {\r\n        return fetch_xor(value) ^ value;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T, bool strong_init>\r\nclass generic_atomic\r\n{\r\npublic:\r\n    generic_atomic()\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        impl_ = c.atomic_ctor(this);\r\n        initialized_ = false;\r\n        value_ = T();\r\n        already_failed_ = false;\r\n\r\n        if (val(strong_init))\r\n        {\r\n            unsigned const index = c.threadx_->atomic_init(impl_);\r\n            last_index_ = index;\r\n            initialized_ = true;\r\n            history_[index] = T();\r\n            value_ = T();\r\n        }\r\n    }\r\n\r\n    ~generic_atomic()\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        sign_.check($);\r\n        c.atomic_dtor(impl_);\r\n    }\r\n\r\n    T debug_value() const\r\n    {\r\n        sign_.check($);\r\n        return value_;\r\n    }\r\n\r\n    RL_INLINE\r\n    T load(memory_order mo, debug_info_param info) const\r\n    {\r\n        RL_VERIFY(mo_release != mo);\r\n        RL_VERIFY(mo_acq_rel != mo);\r\n\r\n        switch (mo)\r\n        {\r\n        case mo_relaxed: return load_impl<mo_relaxed, &thread_info_base::atomic_load_relaxed>(info);\r\n        case mo_consume: return load_impl<mo_consume, &thread_info_base::atomic_load_acquire>(info);\r\n        case mo_acquire: return load_impl<mo_acquire, &thread_info_base::atomic_load_acquire>(info);\r\n        case mo_seq_cst: return load_impl<mo_seq_cst, &thread_info_base::atomic_load_seq_cst>(info);\r\n        default: break;\r\n        }\r\n\r\n        RL_VERIFY(false);\r\n        return T();\r\n    }\r\n\r\n    RL_INLINE\r\n    void store(T v, memory_order mo, debug_info_param info)\r\n    {\r\n        RL_VERIFY(mo_acquire != mo);\r\n        RL_VERIFY(mo_acq_rel != mo);\r\n\r\n        switch (mo)\r\n        {\r\n        case mo_relaxed: return store_impl<mo_relaxed, &thread_info_base::atomic_store_relaxed>(v, info);\r\n        case mo_release: return store_impl<mo_release, &thread_info_base::atomic_store_release>(v, info);\r\n        case mo_seq_cst: return store_impl< mo_seq_cst, &thread_info_base::atomic_store_seq_cst>(v, info);\r\n        default: break;\r\n        }\r\n\r\n        RL_VERIFY(false);\r\n    }\r\n\r\n    RL_INLINE\r\n    bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info)\r\n    {\r\n        return compare_exchange(bool_t<true>(), cmp, xchg, mo, info);\r\n    }\r\n\r\n    RL_INLINE\r\n    bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info)\r\n    {\r\n        return compare_exchange(bool_t<false>(), cmp, xchg, mo, info);\r\n    }\r\n\r\n    RL_INLINE\r\n    bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param)\r\n    {\r\n        return compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, info);\r\n    }\r\n\r\n    RL_INLINE\r\n    bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param)\r\n    {\r\n        return compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, info);\r\n    }\r\n\r\n    template<bool spurious_failures>\r\n    RL_INLINE\r\n    bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, debug_info_param info)\r\n    {\r\n        switch (mo)\r\n        {\r\n        case mo_relaxed: return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n        case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n        case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n        case mo_release: return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n        case mo_acq_rel: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n        case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info);\r\n        }\r\n\r\n        RL_VERIFY(false);\r\n        return false;\r\n    }\r\n\r\n    template<bool spurious_failures>\r\n    RL_INLINE\r\n    bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, memory_order failure_mo, debug_info_param info)\r\n    {\r\n        switch (mo)\r\n        {\r\n        case mo_relaxed:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo);\r\n                return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n            }\r\n        case mo_consume:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo);\r\n                switch (failure_mo)\r\n                {\r\n                case mo_relaxed: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n                case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                default: RL_VERIFY(false); return false;\r\n                }\r\n            }\r\n        case mo_acquire:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo);\r\n                switch (failure_mo)\r\n                {\r\n                case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n                case mo_consume: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                default: RL_VERIFY(false); return false;\r\n                }\r\n            }\r\n        case mo_release:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo);\r\n                return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n            }\r\n        case mo_acq_rel:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo);\r\n                switch (failure_mo)\r\n                {\r\n                case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n                case mo_consume: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                case mo_acquire: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                default: RL_VERIFY(false); return false;\r\n                }\r\n            }\r\n        case mo_seq_cst:\r\n            {\r\n                RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo || mo_seq_cst == failure_mo);\r\n                switch (failure_mo)\r\n                {\r\n                case mo_relaxed: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);\r\n                case mo_consume: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                case mo_acquire: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);\r\n                case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info);\r\n                default: RL_VERIFY(false); return false;\r\n                }\r\n            }\r\n        }\r\n\r\n        RL_VERIFY(false);\r\n        return false;\r\n    }\r\n\r\n    T exchange(T xchg, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, info);\r\n    }\r\n\r\n    T fetch_add(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_add>(), value, mo, info);\r\n    }\r\n\r\n    T fetch_sub(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_sub>(), value, mo, info);\r\n    }\r\n\r\n    T fetch_and(T value, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_and>(), value, mo, info);\r\n    }\r\n\r\n    T fetch_or(T value, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_or>(), value, mo, info);\r\n    }\r\n\r\n    T fetch_xor(T value, memory_order mo, debug_info_param info)\r\n    {\r\n        return rmw(rmw_type_t<rmw_type_xor>(), value, mo, info);\r\n    }\r\n\r\n    template<typename Y, rmw_type_e type>\r\n    RL_INLINE\r\n    T rmw(rmw_type_t<type>, Y op, memory_order mo, debug_info_param info)\r\n    {\r\n        switch (mo)\r\n        {\r\n        case mo_relaxed: return rmw_impl<Y, mo_relaxed, &thread_info_base::atomic_rmw_relaxed>(rmw_type_t<type>(), op, info);\r\n        case mo_consume: return rmw_impl<Y, mo_consume, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);\r\n        case mo_acquire: return rmw_impl<Y, mo_acquire, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);\r\n        case mo_release: return rmw_impl<Y, mo_release, &thread_info_base::atomic_rmw_release>(rmw_type_t<type>(), op, info);\r\n        case mo_acq_rel: return rmw_impl<Y, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel>(rmw_type_t<type>(), op, info);\r\n        case mo_seq_cst: return rmw_impl<Y, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst>(rmw_type_t<type>(), op, info);\r\n        }\r\n\r\n        RL_VERIFY(false);\r\n        return T();\r\n    }\r\n\r\n    unpark_reason wait(context& c, bool is_timed, bool allow_spurious_wakeup, debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        return c.threadx_->atomic_wait(impl_, is_timed, allow_spurious_wakeup, info);\r\n    }\r\n\r\n    thread_id_t wake(context& c, thread_id_t count, debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        return c.threadx_->atomic_wake(impl_, count, info);\r\n    }\r\n\r\nprivate:\r\n    T value_;\r\n    T history_ [atomic_history_size];\r\n    atomic_data* impl_;\r\n    unsigned last_index_;\r\n    signature<987654321> sign_;\r\n    bool initialized_;\r\n    bool already_failed_;\r\n\r\n    template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)>\r\n    T load_impl(debug_info_param info) const\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        if (false == c.invariant_executing)\r\n        {\r\n            unsigned const index = (c.threadx_->*impl)(impl_);\r\n            if ((unsigned)-1 == index)\r\n            {\r\n                RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();\r\n                RL_ASSERT_IMPL(false, test_result_unitialized_access, \"\", info);\r\n            }\r\n            T const v = history_[index];\r\n\r\n            RL_HIST(atomic_load_event<T>) {this, v, mo, last_index_ != index} RL_HIST_END();\r\n\r\n            return v;\r\n        }\r\n        else\r\n        {\r\n            if (false == initialized_)\r\n            {\r\n                RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();\r\n                RL_ASSERT_IMPL(false, test_result_unitialized_access, \"\", info);\r\n            }\r\n            return value_;\r\n        }\r\n    }\r\n\r\n    template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)>\r\n    void store_impl(T v, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        unsigned const index = (c.threadx_->*impl)(impl_);\r\n        \r\n        T const prev = value_;\r\n        last_index_ = index;\r\n        history_[index] = v;\r\n        value_ = v;\r\n        initialized_ = true;\r\n        RL_HIST(atomic_store_event<T>) {this, prev, v, mo} RL_HIST_END();\r\n    }\r\n\r\n    template<bool spurious_failures, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), memory_order failure_mo, unsigned (thread_info_base::*failure_impl)(atomic_data* RL_RESTRICT data)>\r\n    bool compare_swap_impl(T& cmp, T xchg, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        if (false == initialized_)\r\n        {\r\n            RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_unitialized_access, \"\", info);\r\n        }\r\n\r\n        bool success = false;\r\n        bool spurious_failure = false;\r\n        bool aba = false;\r\n\r\n        T const cmpv = cmp;\r\n        T const current = value_;\r\n        if (current == cmpv)\r\n        {\r\n            if (val(spurious_failures))\r\n            {\r\n                if (c.is_random_sched())\r\n                {\r\n                    spurious_failure = (0 == c.rand(4, sched_type_cas_fail));\r\n                }\r\n                else\r\n                {\r\n                    if (false == already_failed_)\r\n                    {\r\n                        spurious_failure = 0 == c.rand(2, sched_type_cas_fail);\r\n                        if (spurious_failure)\r\n                            already_failed_ = true;\r\n                    }\r\n                }\r\n            }\r\n\r\n            if (false == spurious_failure)\r\n            {\r\n                success = true;\r\n                unsigned const index = (c.threadx_->*impl)(impl_, aba);\r\n                value_ = xchg;\r\n                last_index_ = index;\r\n                history_[index] = xchg;\r\n            }\r\n        }\r\n\r\n        if (false == success)\r\n        {\r\n            (c.threadx_->*failure_impl)(impl_);\r\n            cmp = current;\r\n        }\r\n\r\n        RL_HIST(atomic_cas_event<T>) {RL_INFO, this, current, cmpv, xchg, mo, success, spurious_failure, aba} RL_HIST_END();\r\n\r\n        return success;\r\n    }\r\n\r\n    template<typename Y, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), rmw_type_e type>\r\n    T rmw_impl(rmw_type_t<type>, Y op, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        if (false == initialized_)\r\n        {\r\n            RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_unitialized_access, \"\", info);\r\n        }\r\n\r\n        bool aba;\r\n        unsigned const index = (c.threadx_->*impl)(impl_, aba);\r\n\r\n        T const prev_value = value_;\r\n        T const new_value = perform_rmw(rmw_type_t<type>(), prev_value, op);\r\n        value_ = new_value;\r\n        last_index_ = index;\r\n        history_[index] = new_value;\r\n\r\n        typedef atomic_rmw_event<T, Y> atomic_rmw_event_t;\r\n        RL_HIST(atomic_rmw_event_t) {RL_INFO, this, prev_value, op, new_value, mo, type} RL_HIST_END();\r\n\r\n        return prev_value;\r\n    }\r\n\r\n    RL_NOCOPY(generic_atomic);\r\n};\r\n\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass atomic : public generic_atomic<T, false>\r\n{\r\npublic:\r\n    atomic()\r\n    {\r\n    }\r\n\r\n    /*explicit*/ atomic(T value)\r\n    {\r\n        this->store(value, mo_relaxed, $);\r\n    }\r\n\r\n    atomic_proxy_const<T> operator () (debug_info_param info) const /*volatile*/\r\n    {\r\n        return atomic_proxy_const<T>(*this, info);\r\n    }\r\n\r\n    atomic_proxy<T> operator () (debug_info_param info) /*volatile*/\r\n    {\r\n        return atomic_proxy<T>(*this, info);\r\n    }\r\n\r\n    bool is_lock_free() const /*volatile*/\r\n    {\r\n        return true;\r\n    }\r\n\r\n    friend class atomic_proxy<T>;\r\n    friend class atomic_proxy_const<T>;\r\n\r\n    RL_NOCOPY(atomic);\r\n};\r\n\r\n\r\n\r\n\r\ntypedef atomic<bool> atomic_bool;\r\ntypedef atomic<void*> atomic_address;\r\n\r\ntypedef atomic<char> atomic_char;\r\ntypedef atomic<signed char> atomic_schar;\r\ntypedef atomic<unsigned char> atomic_uchar;\r\ntypedef atomic<short> atomic_short;\r\ntypedef atomic<unsigned short> atomic_ushort;\r\ntypedef atomic<int> atomic_int;\r\ntypedef atomic<unsigned int> atomic_uint;\r\ntypedef atomic<long> atomic_long;\r\ntypedef atomic<unsigned long> atomic_ulong;\r\ntypedef atomic<long long> atomic_llong;\r\ntypedef atomic<unsigned long long> atomic_ullong;\r\n//typedef atomic<char16_t> atomic_char16_t;\r\n//typedef atomic<char32_t> atomic_char32_t;\r\ntypedef atomic<wchar_t> atomic_wchar_t;\r\n\r\n//typedef atomic<int_least8_t> atomic_int_least8_t;\r\n//typedef atomic<uint_least8_t> atomic_uint_least8_t;\r\n//typedef atomic<int_least16_t> atomic_int_least16_t;\r\n//typedef atomic<uint_least16_t> atomic_uint_least16_t;\r\n//typedef atomic<int_least32_t> atomic_int_least32_t;\r\n//typedef atomic<uint_least32_t> atomic_uint_least32_t;\r\n//typedef atomic<int_least64_t> atomic_int_least64_t;\r\n//typedef atomic<uint_least64_t> atomic_uint_least64_t;\r\n//typedef atomic<int_fast8_t> atomic_int_fast8_t;\r\n//typedef atomic<uint_fast8_t> atomic_uint_fast8_t;\r\n//typedef atomic<int_fast16_t> atomic_int_fast16_t;\r\n//typedef atomic<uint_fast16_t> atomic_uint_fast16_t;\r\n//typedef atomic<int_fast32_t> atomic_int_fast32_t;\r\n//typedef atomic<uint_fast32_t> atomic_uint_fast32_t;\r\n//typedef atomic<int_fast64_t> atomic_int_fast64_t;\r\n//typedef atomic<uint_fast64_t> atomic_uint_fast64_t;\r\ntypedef atomic<intptr_t> atomic_intptr_t;\r\ntypedef atomic<uintptr_t> atomic_uintptr_t;\r\ntypedef atomic<size_t> atomic_size_t;\r\n//typedef atomic<ssize_t> atomic_ssize_t;\r\ntypedef atomic<ptrdiff_t> atomic_ptrdiff_t;\r\n//typedef atomic<intmax_t> atomic_intmax_t;\r\n//typedef atomic<uintmax_t> atomic_uintmax_t;\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nstruct atomic_data_impl : atomic_data\r\n{\r\n    typedef thread_info<thread_count> thread_info_t;\r\n\r\n    struct history_record\r\n    {\r\n        timestamp_t acq_rel_order_ [thread_count];\r\n        timestamp_t last_seen_order_ [thread_count];\r\n\r\n        bool busy_;\r\n        bool seq_cst_;\r\n        thread_id_t thread_id_;\r\n        timestamp_t acq_rel_timestamp_;\r\n    };\r\n\r\n    static size_t const history_size = atomic_history_size;\r\n    aligned<history_record> history_ [history_size];\r\n    unsigned current_index_;\r\n    waitset<thread_count> futex_ws_;\r\n    sync_var<thread_count> futex_sync_;\r\n\r\n    atomic_data_impl()\r\n    {\r\n        current_index_ = 0;\r\n        history_record& rec = history_[0];\r\n        history_[atomic_history_size - 1].busy_ = false;\r\n\r\n        rec.busy_ = false;\r\n        rec.seq_cst_ = false;\r\n        rec.thread_id_ = (thread_id_t)-1;\r\n    }\r\n\r\n    atomic_data_impl(thread_info_t& th)\r\n    {\r\n        current_index_ = 0;\r\n        history_[atomic_history_size - 1].busy_ = false;\r\n\r\n        history_record& rec = history_[0];\r\n        rec.busy_ = true;\r\n        rec.seq_cst_ = false;\r\n        rec.thread_id_ = th.index_;\r\n\r\n        th.own_acq_rel_order_ += 1;\r\n        rec.acq_rel_timestamp_ = th.own_acq_rel_order_;\r\n\r\n        foreach<thread_count>(rec.acq_rel_order_, assign_zero);\r\n        foreach<thread_count>(rec.last_seen_order_, assign<(timestamp_t)-1>);\r\n        rec.last_seen_order_[th.index_] = th.own_acq_rel_order_;\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/atomic_events.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_ATOMIC_EVENTS_HPP\r\n#define RL_ATOMIC_EVENTS_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"memory_order.hpp\"\r\n#include \"rmw.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T> class atomic;\r\ntemplate<typename T, bool strong_init> class generic_atomic;\r\n\r\ntemplate<typename T>\r\nstruct atomic_add_type\r\n{\r\n    typedef T type;\r\n    typedef T output_type;\r\n};\r\n\r\ntemplate<typename T>\r\nstruct atomic_add_type<T*>\r\n{\r\n    typedef ptrdiff_t type;\r\n    typedef void* output_type;\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nstruct atomic_cas_event\r\n{\r\n    typedef typename atomic_add_type<T>::output_type type;\r\n\r\n    debug_info var_info_;\r\n    void const* var_addr_;\r\n    type cur_value_;\r\n    type cmp_value_;\r\n    type xchg_value_;\r\n    memory_order mo_;\r\n    bool success_;\r\n    bool spurious_failure_;\r\n    bool aba_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"<\" << std::hex << var_addr_ << std::dec << \">\"\r\n            << \" CAS \"\r\n            << (success_ ? \"succ \" : \"fail \")\r\n            << (spurious_failure_ ? \"[SPURIOUSLY] \" : \"\")\r\n            << (aba_ ? \"[ABA] \" : \"\")\r\n            << \"orig=\" << cur_value_\r\n            << \", cmp=\" << cmp_value_\r\n            << \", xchg=\" << xchg_value_\r\n            << \", order=\" << format(mo_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nstruct atomic_load_event\r\n{\r\n    typedef typename atomic_add_type<T>::output_type type;\r\n\r\n    void const* var_addr_;\r\n    type value_;\r\n    memory_order mo_;\r\n    bool not_current_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"<\" << std::hex << var_addr_ << std::dec << \">\"\r\n            << \" atomic load, value=\" << value_\r\n            << (not_current_ ? \" [NOT CURRENT]\" : \"\")\r\n            << \", order=\" << format(mo_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nstruct atomic_store_event\r\n{\r\n    typedef typename atomic_add_type<T>::output_type type;\r\n\r\n    void const* var_addr_;\r\n    type prev_value_;\r\n    type value_;\r\n    memory_order mo_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"<\" << std::hex << var_addr_ << std::dec << \">\"\r\n            << \" atomic store, value=\" << value_\r\n            << \", (prev value=\" << prev_value_ << \")\"\r\n            << \", order=\" << format(mo_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T, typename Y>\r\nstruct atomic_rmw_event\r\n{\r\n    typedef typename atomic_add_type<T>::output_type type;\r\n\r\n    debug_info var_info_;\r\n    void const* var_addr_;\r\n    type prev_value_;\r\n    Y op_value_;\r\n    type new_value_;\r\n    memory_order mo_;\r\n    rmw_type_e type_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"<\" << std::hex << var_addr_ << std::dec << \">\"\r\n            << \" \" << format(type_) << \" \"\r\n            << \", prev=\" << prev_value_\r\n            << \", arg=\" << op_value_\r\n            << \", new=\" << new_value_\r\n            << \", order=\" << format(mo_);\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/atomic_fence.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_FENCE_HPP\r\n#define RL_FENCE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context.hpp\"\r\n#include \"memory_order.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nstruct atomic_fence_event\r\n{\r\n    memory_order mo_;\r\n    bool is_thread_fence_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << (is_thread_fence_ ? \"\" : \"compiler \")\r\n            << format(mo_) << \" fence\";\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nRL_INLINE\r\nvoid atomic_thread_fence(memory_order mo, debug_info_param info)\r\n{\r\n    context& c = ctx();\r\n    RL_VERIFY(false == c.invariant_executing);\r\n\r\n    switch (mo)\r\n    {\r\n    case mo_relaxed:\r\n        RL_VERIFY(false);\r\n        break;\r\n    case mo_consume:\r\n    case mo_acquire:\r\n        c.atomic_thread_fence_acquire();\r\n        break;\r\n    case mo_release:\r\n        c.atomic_thread_fence_release();\r\n        break;\r\n    case mo_acq_rel:\r\n        c.atomic_thread_fence_acq_rel();\r\n        break;\r\n    case mo_seq_cst:\r\n        c.atomic_thread_fence_seq_cst();\r\n        break;\r\n    }\r\n\r\n    RL_HIST(atomic_fence_event) {mo, true} RL_HIST_END();\r\n}\r\n\r\n\r\n\r\n\r\nRL_INLINE\r\nvoid atomic_signal_fence(memory_order mo, debug_info_param info)\r\n{\r\n    context& c = ctx();\r\n    RL_HIST(atomic_fence_event) {mo, false} RL_HIST_END();\r\n}\r\n\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/backoff.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_BACKOFF_HPP\r\n#define RL_BACKOFF_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ninline void yield(unsigned count, debug_info_param info)\r\n{\r\n    ctx().yield(count, info);\r\n}\r\n\r\n\r\ntemplate<unsigned factor_t, unsigned add_t>\r\nclass backoff_t\r\n{\r\npublic:\r\n    backoff_t()\r\n        : count_(1)\r\n    {\r\n    }\r\n\r\n    void yield(debug_info_param info)\r\n    {\r\n        rl::yield(count_, info);\r\n        count_ = count_ * factor_t + add_t;\r\n    }\r\n\r\nprivate:\r\n    unsigned count_;\r\n};\r\n\r\n\r\ntypedef backoff_t<1, 0> backoff;\r\ntypedef backoff_t<1, 1> linear_backoff;\r\ntypedef backoff_t<2, 0> exp_backoff;\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/base.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_BASE_HPP\r\n#define RL_BASE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"pch.hpp\"\r\n#include \"platform.hpp\"\r\n\r\nnamespace rl\r\n{\r\nsize_t const subsequent_timed_wait_limit = 4;\r\n}\r\n\r\n#define RL_TEST\r\n\r\n#ifdef RL_JAVA_MODE\r\n#   define RL_GC\r\n#   define RL_NO_MALLOC\r\n#   define RL_JAVA_API\r\n#   define RL_JAVA_MM\r\n#endif\r\n\r\n#ifdef RL_CLI_MODE\r\n#   define RL_GC\r\n#   define RL_NO_MALLOC\r\n#   define RL_CLI_API\r\n#   define RL_CLI_MM\r\n#endif\r\n\r\n#ifdef RL_POSIX_MODE\r\n#   define RL_POSIX_API\r\n#endif\r\n\r\n#ifdef RL_WIN_MODE\r\n#   define RL_WIN_API\r\n#endif\r\n\r\n#ifdef RL_CPP_MODE\r\n#   define RL_CPP_API\r\n#   define RL_CPP_MM\r\n#endif\r\n\r\n#if defined(RL_JAVA_MM) || defined(RL_CLI_MM)\r\n#   define RL_IMPROVED_SEQ_CST_FENCE\r\n#   define RL_IMPROVED_SEQ_CST_RMW\r\n#endif\r\n\r\nnamespace rl\r\n{\r\n\r\n#define RL_NOCOPY(CLASS) \\\r\n    private: \\\r\n    CLASS(CLASS const&); \\\r\n    CLASS& operator = (CLASS const&);\r\n/**/\r\n\r\n\r\ntemplate<typename T = void>\r\nclass nocopy\r\n{\r\n    nocopy(nocopy const&);\r\n    nocopy& operator = (nocopy const&);\r\n\r\nprotected:\r\n    nocopy() {}\r\n};\r\n\r\n\r\ntemplate<size_t sz, size_t base = 4>\r\nstruct align_pad\r\n{\r\n    template<bool perfect, bool fit, int fake> struct helper\r\n    {\r\n        struct type { char pad [base - sz]; };\r\n    };\r\n\r\n    template<int fake> struct helper<true, true, fake>\r\n    {\r\n        struct type {};\r\n    };\r\n\r\n    template<bool perfect, int fake> struct helper<perfect, false, fake>\r\n    {\r\n        typedef typename align_pad<sz, base * 2>::type type;\r\n    };\r\n\r\n    typedef typename helper<sz == base, sz <= base, 0>::type type;\r\n};\r\n\r\n\r\ntemplate<typename T>\r\nstruct aligned : T, align_pad<sizeof(T)>::type\r\n{};\r\n\r\ntemplate<typename T>\r\nT val(T x)\r\n{\r\n    return x;\r\n}\r\n\r\n}\r\n\r\n\r\n#include \"defs.hpp\"\r\n\r\n\r\n#define RL_INFO ::rl::debug_info(__FUNCTION__, __FILE__, __LINE__)\r\n#define $ RL_INFO\r\n\r\n\r\n#ifdef RL_DO_ASSERT\r\n#   if RL_DO_ASSERT\r\n#       define RL_DO_ASSERT_IMPL\r\n#   endif\r\n#else\r\n#   ifdef _DEBUG\r\n#       define RL_DO_ASSERT_IMPL\r\n#   endif\r\n#endif\r\n\r\n#ifdef _MSC_VER\r\n#   define RL_INT3() __debugbreak(); abort()\r\n#else\r\n#   define RL_INT3() abort()\r\n#endif\r\n\r\n#ifdef RL_DO_ASSERT_IMPL\r\n#   define RL_VERIFY(x) do { if (!((void)0, (x))) { \\\r\n        ::rl::assert_failed(#x, $); RL_INT3(); } } while ((void)0, 0)\r\n#else\r\n#   define RL_VERIFY(x) (void)0\r\n#endif\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/cli.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CLI_HPP\r\n#define RL_CLI_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n#include \"atomic_fence.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nstruct Thread\r\n{\r\n    static void MemoryBarrier(debug_info_param info)\r\n    {\r\n        atomic_thread_fence(mo_seq_cst, info);\r\n    }\r\n\r\n    template<typename T>\r\n    static T VolatileRead(generic_atomic<T, true> const& v, debug_info_param info)\r\n    {\r\n        return v.load(mo_acquire, info);\r\n    }\r\n\r\n    template<typename T>\r\n    static void VolatileWrite(generic_atomic<T, true>& v, T x, debug_info_param info)\r\n    {\r\n        v.store(x, mo_release, info);\r\n    }\r\n\r\n    static void SpinWait(int iterations, debug_info_param info)\r\n    {\r\n        ctx().yield(iterations, info);\r\n    }\r\n};\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/cli_interlocked.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CLI_INTERLOCKED_HPP\r\n#define RL_CLI_INTERLOCKED_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n    struct Interlocked\r\n    {\r\n        template<typename T>\r\n        static T Add(generic_atomic<T, true>& v, T x, debug_info_param info)\r\n        {\r\n            T result = v.rmw(rmw_type_t<rmw_type_add>(), x, mo_seq_cst, info) + x;\r\n            return result;\r\n        }\r\n\r\n        template<typename T>\r\n        static T CompareExchange(generic_atomic<T, true>& v, T xchg, T cmp, debug_info_param info)\r\n        {\r\n            v.compare_exchange(bool_t<false>(), cmp, xchg, mo_seq_cst, mo_seq_cst, info);\r\n            return cmp;\r\n        }\r\n\r\n        template<typename T>\r\n        static T Increment(generic_atomic<T, true>& v, debug_info_param info)\r\n        {\r\n            return Add(v, (T)1, info);\r\n        }\r\n\r\n        template<typename T>\r\n        static T Decrement(generic_atomic<T, true>& v, debug_info_param info)\r\n        {\r\n            return Add(v, (T)-1, info);\r\n        }\r\n\r\n        template<typename T>\r\n        static T Exchange(generic_atomic<T, true>& v, T x, debug_info_param info)\r\n        {\r\n            T result = v.rmw(rmw_type_t<rmw_type_swap>(), x, mo_seq_cst, info);\r\n            return result;\r\n        }\r\n\r\n        template<typename T>\r\n        static T Read(generic_atomic<T, true> const& v, debug_info_param info)\r\n        {\r\n            return v.load(mo_acquire, info);\r\n        }\r\n    };\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/cli_var.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CLI_VAR_HPP\r\n#define RL_CLI_VAR_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T> class nvar;\r\n\r\n\r\ntemplate<typename T>\r\nclass nvar_proxy\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n    template<typename Y> friend class nvar;\r\n\r\n    operator T () const\r\n    {\r\n        return load();\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator = (nvar_proxy const& r)\r\n    {\r\n        T const value = r.load();\r\n        store(value);\r\n        return *this;\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp + 1;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp - 1;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + value);\r\n        return tmp + value;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - value);\r\n        return tmp - value;\r\n    }\r\n\r\nprivate:\r\n    nvar<T>& var_;\r\n    debug_info info_;\r\n\r\n    nvar_proxy(nvar<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load() const\r\n    {\r\n        return var_.load(mo_relaxed, info_);\r\n    }\r\n\r\n    void store(T value)\r\n    {\r\n        var_.store(value, mo_relaxed, info_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass nvar : public generic_atomic<T, true>\r\n{\r\npublic:\r\n    typedef nvar_proxy<T> proxy_t;\r\n    friend class nvar_proxy<T>;\r\n\r\n    nvar()\r\n    {\r\n    }\r\n\r\n    explicit nvar(T value)\r\n    {\r\n        this->store(value, mo_relaxed, $);\r\n    }\r\n\r\n    nvar(nvar const& r)\r\n    {\r\n        T const value = r.load(mo_relaxed, $);\r\n        this->store(value, mo_relaxed, $);\r\n    }\r\n\r\n    nvar(proxy_t const& r)\r\n    {\r\n        T const value = r.load();\r\n        this->store(value, mo_relaxed, r.info_);\r\n    }\r\n\r\n    proxy_t operator () (debug_info_param info)\r\n    {\r\n        return proxy_t(*this, info);\r\n    }\r\n\r\nprivate:\r\n    nvar& operator = (nvar const&);\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/cli_volatile.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CLI_VOLATILE_HPP\r\n#define RL_CLI_VOLATILE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\n//!!! fix Java volatiles!\r\n// they must be modeled as seq_cst stores/loads\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T> class nvolatile;\r\n\r\n\r\ntemplate<typename T>\r\nclass nvolatile_proxy\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n    template<typename Y> friend class nvolatile;\r\n\r\n    operator T () const\r\n    {\r\n        return load();\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator = (nvolatile_proxy const& r)\r\n    {\r\n        T const value = r.load();\r\n        store(value);\r\n        return *this;\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp + 1;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp - 1;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + value);\r\n        return tmp + value;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - value);\r\n        return tmp - value;\r\n    }\r\n\r\nprivate:\r\n    nvolatile<T>& var_;\r\n    debug_info info_;\r\n\r\n    nvolatile_proxy(nvolatile<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load() const\r\n    {\r\n        return var_.load(mo_acquire, info_);\r\n    }\r\n\r\n    void store(T value)\r\n    {\r\n        var_.store(value, mo_release, info_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass nvolatile : public generic_atomic<T, true>\r\n{\r\npublic:\r\n    typedef nvolatile_proxy<T> proxy_t;\r\n    friend class nvolatile_proxy<T>;\r\n\r\n    nvolatile()\r\n    {\r\n    }\r\n\r\n    explicit nvolatile(T value)\r\n    {\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_release, $);\r\n    }\r\n\r\n    nvolatile(nvolatile const& r)\r\n    {\r\n        T const value = r.load(mo_acquire, $);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_release, $);\r\n    }\r\n\r\n    nvolatile(proxy_t const& r)\r\n    {\r\n        T const value = r.var_.load(mo_acquire, r.info_);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_release, r.info_);\r\n    }\r\n\r\n    proxy_t operator () (debug_info_param info)\r\n    {\r\n        return proxy_t(*this, info);\r\n    }\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/context.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONTEXT_HPP\r\n#define RL_CONTEXT_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"thread_local_ctx.hpp\"\r\n#include \"context_base.hpp\"\r\n#include \"thread.hpp\"\r\n#include \"history.hpp\"\r\n#include \"memory.hpp\"\r\n#include \"test_result.hpp\"\r\n#include \"slab_allocator.hpp\"\r\n#include \"test_params.hpp\"\r\n#include \"random.hpp\"\r\n#include \"foreach.hpp\"\r\n\r\n#include \"random_scheduler.hpp\"\r\n#include \"full_search_scheduler.hpp\"\r\n#include \"context_bound_scheduler.hpp\"\r\n\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<thread_id_t thread_count> class generic_mutex_data_impl;\r\ntemplate<thread_id_t thread_count> class condvar_data_impl;\r\ntemplate<thread_id_t thread_count> class sema_data_impl;\r\ntemplate<thread_id_t thread_count> class event_data_impl;\r\n\r\n\r\nstruct park_event\r\n{\r\n    bool is_timed_;\r\n    bool allow_spurious_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"blocking current thread\" << (is_timed_ ? \" [timed]\" : \"\");\r\n    }\r\n};\r\n\r\nstruct unpark_event\r\n{\r\n    thread_id_t thread_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"unblocking thread \" << thread_;\r\n    }\r\n};\r\n\r\nstruct yield_event\r\n{\r\n    unsigned count_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"yield(\" << count_ << \")\";\r\n    }\r\n};\r\n\r\n\r\n/*\r\ntemplate<typename test_t, typename scheduler_t>\r\nstruct context_persistent\r\n{\r\n    static thread_id_t const        thread_count = test_t::params::thread_count;\r\n    fiber_t                         fibers_ [thread_count];\r\n    memory_mgr                      memory_;\r\n\r\n    context_persistent()\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            create_fiber(fibers_[i], &context_impl<test_t, scheduler_t>::fiber_proc, (void*)(intptr_t)i);\r\n        }\r\n    }\r\n\r\n    ~context_persistent()\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            delete_fiber(fibers_[i]);\r\n        }\r\n    }\r\n};\r\n*/\r\n\r\n\r\ntemplate<typename test_t, typename scheduler_t>\r\nclass context_impl\r\n    : thread_local_contxt_impl<context_addr_hash_impl<context, test_t::params::thread_count>, test_t::params::thread_count>\r\n{\r\nprivate:\r\n    typedef thread_local_contxt_impl\r\n        <context_addr_hash_impl<context, test_t::params::thread_count>,\r\n            test_t::params::thread_count>\r\n                base_t;\r\n    typedef typename scheduler_t::shared_context_t shared_context_t;\r\n\r\n    using base_t::params_;\r\n    using base_t::history_;\r\n    using base_t::threadx_;\r\n    using base_t::disable_preemption_;\r\n    using base_t::disable_alloc_;\r\n    using base_t::invariant_executing;\r\n\r\n    static thread_id_t const main_thread_id = -1;\r\n    static thread_id_t const static_thread_count = test_t::params::static_thread_count;\r\n    static thread_id_t const dynamic_thread_count = test_t::params::dynamic_thread_count;\r\n    static thread_id_t const thread_count = test_t::params::thread_count;\r\n\r\n    iteration_t                     current_iter_;\r\n    test_result_e                   test_result_;\r\n    string                          test_result_str_;\r\n    fiber_t                         main_fiber_;\r\n    bool                            special_function_executing;\r\n    memory_mgr                      memory_;\r\n    iteration_t                     start_iteration_;\r\n    size_t                          sched_count_;\r\n    scheduler_t                     sched_;\r\n    shared_context_t&               sctx_;\r\n    random_generator                rand_;\r\n    test_t*                         current_test_suite;\r\n    bool                            current_test_suite_constructed;\r\n    bool                            first_thread_;\r\n    timestamp_t                     seq_cst_fence_order_ [thread_count];\r\n\r\n    aligned<thread_info<thread_count> > threads_ [thread_count];\r\n\r\n    thread_info<thread_count>& threadi()\r\n    {\r\n        return *static_cast<thread_info<thread_count>*>(threadx_);\r\n    }\r\n\r\n    slab_allocator<atomic_data_impl<thread_count> >*        atomic_alloc_;\r\n    slab_allocator<var_data_impl<thread_count> >*           var_alloc_;\r\n    slab_allocator<generic_mutex_data_impl<thread_count> >* mutex_alloc_;\r\n    slab_allocator<condvar_data_impl<thread_count> >*       condvar_alloc_;\r\n    slab_allocator<sema_data_impl<thread_count> >*          sema_alloc_;\r\n    slab_allocator<event_data_impl<thread_count> >*         event_alloc_;\r\n\r\n    virtual atomic_data* atomic_ctor(void* ctx)\r\n    {\r\n        return new (atomic_alloc_->alloc(ctx)) atomic_data_impl<thread_count> ();\r\n    }\r\n\r\n    virtual void atomic_dtor(atomic_data* data)\r\n    {\r\n        static_cast<atomic_data_impl<thread_count>*>(data)->~atomic_data_impl<thread_count>();\r\n        atomic_alloc_->free(static_cast<atomic_data_impl<thread_count>*>(data));\r\n    }\r\n\r\n    virtual var_data* var_ctor()\r\n    {\r\n        return new (var_alloc_->alloc()) var_data_impl<thread_count> ();\r\n    }\r\n\r\n    virtual void var_dtor(var_data* data)\r\n    {\r\n        static_cast<var_data_impl<thread_count>*>(data)->~var_data_impl<thread_count>();\r\n        var_alloc_->free(static_cast<var_data_impl<thread_count>*>(data));\r\n    }\r\n\t\r\n    virtual unpark_reason wfmo_park(void** ws,\r\n                                    win_waitable_object** wo,\r\n                                    size_t count,\r\n                                    bool wait_all,\r\n                                    bool is_timed,\r\n                                    debug_info_param info)\r\n    {\r\n\t\t\t  return waitset<thread_count>::park_current(*this,\r\n                                                         reinterpret_cast<waitset<thread_count>**>(ws),\r\n                                                         wo, count, wait_all, is_timed, true, info);\r\n    }\r\n\r\npublic:\r\n    context_impl(test_params& params, shared_context_t& sctx)\r\n        : base_t(thread_count, params)\r\n        , current_iter_(0)\r\n        , start_iteration_(1)\r\n        , sched_(params, sctx, dynamic_thread_count)\r\n        , sctx_(sctx)\r\n    {\r\n        this->context::seq_cst_fence_order_ = this->seq_cst_fence_order_;\r\n\r\n        current_test_suite = (test_t*)(::malloc)(sizeof(test_t));\r\n        current_test_suite_constructed = false;\r\n\r\n        test_result_ = test_result_success;\r\n        threadx_ = 0;\r\n        special_function_executing = false;\r\n        invariant_executing = false;\r\n\r\n        create_main_fiber(main_fiber_);\r\n        set_low_thread_prio();\r\n\r\n        if (0 == val(thread_count))\r\n        {\r\n            throw std::logic_error(\"no threads created\");\r\n        }\r\n\r\n        atomic_alloc_ = new slab_allocator<atomic_data_impl<thread_count> >();\r\n        var_alloc_ = new slab_allocator<var_data_impl<thread_count> >();\r\n        mutex_alloc_ = new slab_allocator<generic_mutex_data_impl<thread_count> >();\r\n        condvar_alloc_ = new slab_allocator<condvar_data_impl<thread_count> >();\r\n        sema_alloc_ = new slab_allocator<sema_data_impl<thread_count> >();\r\n        event_alloc_ = new slab_allocator<event_data_impl<thread_count> >();\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            new (&threads_[i]) thread_info<thread_count> (i);\r\n            threads_[i].ctx_ = this;\r\n        }\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            //threads_[i].fiber_ = persistent.fibers_[i];\r\n            create_fiber(threads_[i].fiber_, &context_impl::fiber_proc, (void*)(intptr_t)i);\r\n        }\r\n\r\n        disable_alloc_ = 0;\r\n    }\r\n\r\n    ~context_impl()\r\n    {\r\n        disable_alloc_ += 1;\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            delete_fiber(threads_[i].fiber_);\r\n        }\r\n\r\n        delete_main_fiber(main_fiber_);\r\n\r\n        // there can be atomic loads and stores etc\r\n        // it's not good place to calling user code\r\n        //destroy_current_test_suite();\r\n        //::free(current_test_suite);\r\n\r\n        delete atomic_alloc_;\r\n        delete var_alloc_;\r\n        delete mutex_alloc_;\r\n        delete condvar_alloc_;\r\n        delete sema_alloc_;\r\n        delete event_alloc_;\r\n    }\r\n\r\n    void construct_current_test_suite()\r\n    {\r\n        RL_VERIFY(false == current_test_suite_constructed);\r\n        new (current_test_suite) test_t ();\r\n        current_test_suite_constructed = true;\r\n    }\r\n\r\n    void destroy_current_test_suite()\r\n    {\r\n        if (current_test_suite_constructed)\r\n        {\r\n            current_test_suite->~test_t();\r\n            current_test_suite_constructed = false;\r\n        }\r\n    }\r\n\r\n    virtual void* alloc(size_t size, bool is_array, debug_info_param info)\r\n    {\r\n        disable_alloc_ += 1;\r\n#ifndef RL_GC\r\n        void* p = memory_.alloc(size);\r\n#else\r\n        void* p = memory_.alloc(size, (void(*)(void*))0);\r\n#endif\r\n        disable_alloc_ -= 1;\r\n        RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END();\r\n        return p;\r\n    }\r\n\r\n#ifdef RL_GC\r\n    virtual void* alloc(size_t size, bool is_array, void(*dtor)(void*), debug_info_param info)\r\n    {\r\n        disable_alloc_ += 1;\r\n        void* p = memory_.alloc(size, dtor);\r\n        disable_alloc_ -= 1;\r\n        RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END();\r\n        return p;\r\n    }\r\n#endif\r\n\r\n    virtual void free(void* p, bool is_array, debug_info_param info)\r\n    {\r\n        RL_HIST_CTX(memory_free_event) {p, is_array} RL_HIST_END();\r\n#ifndef RL_GC\r\n        bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc));\r\n#else\r\n        bool const defer = false;\r\n#endif\r\n        disable_alloc_ += 1;\r\n        if (false == memory_.free(p, defer))\r\n            fail_test(\"incorrect address passed to free() function\", test_result_double_free, info);\r\n        disable_alloc_ -= 1;\r\n    }\r\n\r\n    size_t prev_alloc_size_;\r\n    debug_info last_info_;\r\n\r\n    virtual void* alloc(size_t size)\r\n    {\r\n        if (disable_alloc_)\r\n            return (::malloc)(size);\r\n\r\n        prev_alloc_size_ = size;\r\n        disable_alloc_ += 1;\r\n#ifndef RL_GC\r\n        void* p = (memory_.alloc)(size);\r\n#else\r\n        void* p = (memory_.alloc)(size, 0);\r\n#endif\r\n        disable_alloc_ -= 1;\r\n        return p;\r\n    }\r\n\r\n    virtual size_t prev_alloc_size()\r\n    {\r\n        size_t sz = prev_alloc_size_;\r\n        prev_alloc_size_ = 0;\r\n        return sz;\r\n    }\r\n\r\n    virtual void set_debug_info(debug_info_param info)\r\n    {\r\n        last_info_ = info;\r\n    }\r\n\r\n    virtual void free(void* p)\r\n    {\r\n        if (disable_alloc_)\r\n        {\r\n            (::free)(p);\r\n            return;\r\n        }\r\n        \r\n        disable_alloc_ += 1;\r\n        debug_info const& info = last_info_;\r\n        RL_HIST_CTX(memory_free_event) {p, false} RL_HIST_END();\r\n#ifndef RL_GC\r\n        bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc));\r\n#else\r\n        bool const defer = false;\r\n#endif\r\n        if (false == memory_.free(p, defer))\r\n            fail_test(\"incorrect address passed to free() function\", test_result_double_free, info);\r\n        disable_alloc_ -= 1;\r\n    }\r\n\r\n    virtual unpark_reason park_current_thread(bool is_timed,\r\n                                              bool allow_spurious_wakeup,\r\n                                              bool do_switch,\r\n                                              debug_info_param info)\r\n    {\r\n        RL_VERIFY(false == special_function_executing);\r\n        RL_VERIFY(threadx_->saved_disable_preemption_ == -1);\r\n        unsigned dp = disable_preemption_;\r\n        disable_preemption_ = 0;\r\n        RL_HIST_CTX(park_event) {is_timed, allow_spurious_wakeup} RL_HIST_END();\r\n        if (false == sched_.park_current_thread(is_timed, allow_spurious_wakeup))\r\n        {\r\n            fail_test(\"deadlock detected\", test_result_deadlock, info);\r\n        }\r\n        schedule(1);\r\n        // otherwise it's restored in switch_back()\r\n        RL_VERIFY(threadx_->saved_disable_preemption_ == -1);\r\n        if (do_switch == false || threadx_->unpark_reason_ != unpark_reason_normal)\r\n            disable_preemption_ = dp;\r\n        else\r\n            threadx_->saved_disable_preemption_ = dp;\r\n        unpark_reason reason = threadx_->unpark_reason_;\r\n        return reason;\r\n    }\r\n\r\n    virtual void unpark_thread(thread_id_t th, bool do_switch, debug_info_param info)\r\n    {\r\n        RL_VERIFY(false == special_function_executing);\r\n        RL_HIST_CTX(unpark_event) {th} RL_HIST_END();\r\n        sched_.unpark_thread(th, do_switch);\r\n        if (do_switch)\r\n        {\r\n            threads_[th].unpark_reason_ = unpark_reason_normal;\r\n            threads_[th].temp_switch_from_ = threadx_->index_;\r\n            switch_to_fiber(th);\r\n        }\r\n    }\r\n\r\n    virtual void switch_back(debug_info_param info)\r\n    {\r\n//std::cout << \"switching back from \" << threadx_->index_ << \" to \" << threadx_->temp_switch_from_ << std::endl;\r\n        (void)info;\r\n        RL_VERIFY(threadx_->saved_disable_preemption_ != -1);\r\n        RL_VERIFY(threadx_->temp_switch_from_ != -1);\r\n        thread_id_t const tid = threadx_->temp_switch_from_;\r\n        threadx_->temp_switch_from_ = -1;\r\n        switch_to_fiber(tid);\r\n        RL_VERIFY(threadx_->saved_disable_preemption_ != -1);\r\n        disable_preemption_ = threadx_->saved_disable_preemption_;\r\n        threadx_->saved_disable_preemption_ = -1;\r\n    }\r\n\r\n    void ensure(bool cond, char const* desc, test_result_e res, debug_info_param info)\r\n    {\r\n        if (false == cond)\r\n            fail_test(desc, res, info);\r\n    }\r\n\r\n    virtual void fail_test(char const* desc, test_result_e res, debug_info_param info)\r\n    {\r\n\r\n        RL_DEBUGBREAK_ON_FAILURE_IMPL;\r\n\r\n        RL_VERIFY(test_result_success != res);\r\n\r\n        test_result_ = res;\r\n        if (test_result_user_assert_failed == res && invariant_executing)\r\n            test_result_ = test_result_user_invariant_failed;\r\n        if (0 == desc || 0 == desc[0])\r\n            test_result_str_ = test_result_str(test_result_);\r\n        else\r\n            test_result_str_ = string(test_result_str(test_result_)) + \" (\" + desc + \")\";\r\n\r\n        RL_HIST_CTX(user_event) {test_result_str_.c_str()} RL_HIST_END();\r\n\r\n        switch_to_main_fiber();\r\n    }\r\n\r\n    virtual void rl_until(char const* desc, debug_info_param info)\r\n    {\r\n        RL_HIST_CTX(user_event) {desc} RL_HIST_END();\r\n        test_result_ = test_result_until_condition_hit;\r\n        switch_to_main_fiber();\r\n    }\r\n\r\n    static void fiber_proc(void* thread_index);\r\n\r\n    virtual void fiber_proc_impl(int thread_index)\r\n    {\r\n        thread_info_base* param = &threads_[thread_index];\r\n        debug_info info = $;\r\n        for (;;)\r\n        {\r\n            if (first_thread_)\r\n            {\r\n                first_thread_ = false;\r\n                special_function_executing = true;\r\n                RL_HIST_CTX(user_event) {\"[CTOR BEGIN]\"} RL_HIST_END();\r\n                construct_current_test_suite();\r\n                RL_HIST_CTX(user_event) {\"[CTOR END]\"} RL_HIST_END();\r\n                RL_HIST_CTX(user_event) {\"[BEFORE BEGIN]\"} RL_HIST_END();\r\n                current_test_suite->before();\r\n                RL_HIST_CTX(user_event) {\"[BEFORE END]\"} RL_HIST_END();\r\n                rl_global_fence();\r\n                invariant_executing = true;\r\n                current_test_suite->invariant();\r\n                invariant_executing = false;\r\n                special_function_executing = false;\r\n            }\r\n\r\n//std::cout << \"thread \" << param->index_ << \" started\" << std::endl;\r\n            param->on_start();\r\n\r\n            if (param->index_ < static_thread_count)\r\n            {\r\n                current_test_suite->thread(param->index_);\r\n            }\r\n            else\r\n            {\r\n                if (param->dynamic_thread_func_)\r\n                    param->dynamic_thread_func_(param->dynamic_thread_param_);\r\n            }\r\n\r\n//std::cout << \"thread \" << param->index_ << \" finished\" << std::endl;\r\n            RL_HIST_CTX(user_event) {\"[THREAD FINISHED]\"} RL_HIST_END();\r\n            RL_VERIFY(disable_preemption_ == 0);\r\n            RL_VERIFY(threadx_->temp_switch_from_ == -1);\r\n            RL_VERIFY(threadx_->saved_disable_preemption_ == -1);\r\n\r\n            param->on_finish();\r\n\r\n            thread_finish_result res = sched_.thread_finished();\r\n//std::cout << \"thread \" << param->index_ << \" finished res=\" << res << std::endl;\r\n            if (thread_finish_result_normal == res)\r\n            {\r\n                sched();\r\n            }\r\n            else if (thread_finish_result_last == res)\r\n            {\r\n                special_function_executing = true;\r\n                invariant_executing = true;\r\n                current_test_suite->invariant();\r\n                invariant_executing = false;\r\n                rl_global_fence();\r\n                RL_HIST_CTX(user_event) {\"[AFTER BEGIN]\"} RL_HIST_END();\r\n                current_test_suite->after();\r\n                RL_HIST_CTX(user_event) {\"[AFTER END]\"} RL_HIST_END();\r\n                RL_HIST_CTX(user_event) {\"[DTOR BEGIN]\"} RL_HIST_END();\r\n                destroy_current_test_suite();\r\n                RL_HIST_CTX(user_event) {\"[DTOR END]\"} RL_HIST_END();\r\n                special_function_executing = false;\r\n\r\n                ensure(memory_.iteration_end(), \"memory leak detected\", test_result_memory_leak, $);\r\n                ensure(atomic_alloc_->iteration_end(), \"atomic leak\", test_result_resource_leak, $);\r\n                ensure(var_alloc_->iteration_end(), \"var leak\", test_result_resource_leak, $);\r\n                ensure(mutex_alloc_->iteration_end(), \"mutex leak\", test_result_resource_leak, $);\r\n                ensure(condvar_alloc_->iteration_end(), \"condition variable leak\", test_result_resource_leak, $);\r\n                ensure(sema_alloc_->iteration_end(), \"semaphore leak\", test_result_resource_leak, $);\r\n                ensure(event_alloc_->iteration_end(), \"event leak\", test_result_resource_leak, $);\r\n\r\n                switch_to_main_fiber();\r\n            }\r\n            else if (thread_finish_result_deadlock == res)\r\n            {\r\n                fail_test(\"deadlock detected\", test_result_deadlock, info);\r\n            }\r\n            else\r\n            {\r\n                RL_VERIFY(false);\r\n            }\r\n        }\r\n    }\r\n\r\n    virtual win_waitable_object* create_thread(void*(*fn)(void*), void* ctx)\r\n    {\r\n        RL_VERIFY(fn);\r\n        thread_id_t id = sched_.create_thread();\r\n        threads_[id].dynamic_thread_func_ = fn;\r\n        threads_[id].dynamic_thread_param_ = ctx;\r\n        threads_[id].sync_object_.on_create();\r\n        return &threads_[id].sync_object_;\r\n    }\r\n\r\n    virtual void yield(unsigned count, debug_info_param info)\r\n    {\r\n        RL_VERIFY(count);\r\n        RL_HIST_CTX(yield_event) {count} RL_HIST_END();\r\n        if (sched_count_++ > params_.execution_depth_limit)\r\n            fail_test(\"livelock\", test_result_livelock, RL_INFO);\r\n        schedule(count);\r\n    }\r\n\r\n    virtual void sched()\r\n    {\r\n        if (sched_count_++ > params_.execution_depth_limit)\r\n            fail_test(\"livelock\", test_result_livelock, RL_INFO);\r\n        if (disable_preemption_)\r\n            return;\r\n        schedule(0);\r\n    }\r\n\r\n    void schedule(unsigned yield)\r\n    {\r\n        RL_VERIFY(threadx_->temp_switch_from_ == -1);\r\n        RL_VERIFY(disable_preemption_ == 0);\r\n        if (special_function_executing)\r\n        {\r\n            threadx_->unpark_reason_ = unpark_reason_normal;\r\n            return;\r\n        }\r\n\r\n        special_function_executing = true;\r\n        invariant_executing = true;\r\n        current_test_suite->invariant();\r\n        invariant_executing = false;\r\n        special_function_executing = false;\r\n\r\n        if (yield)\r\n            threadx_->last_yield_ = threadi().own_acq_rel_order_;\r\n\r\n        unpark_reason reason = unpark_reason_normal;\r\n        thread_id_t const th = sched_.schedule(reason, yield);\r\n        threads_[th].unpark_reason_ = reason;\r\n\r\n        switch_to_fiber(th);\r\n        RL_VERIFY(0 == disable_preemption_);\r\n    }\r\n\r\n    test_result_e simulate(std::ostream& ss, std::istream& sss, bool second)\r\n    {\r\n        if (EOF != sss.peek())\r\n        {\r\n            sss >> start_iteration_;\r\n            sched_.set_state(sss);\r\n        }\r\n\r\n        test_result_e const res = simulate2(second);\r\n\r\n        if (test_result_success != res && false == params_.collect_history)\r\n        {\r\n            ss << params_.stop_iteration << \" \";\r\n            sched_.get_state(ss);\r\n        }\r\n\r\n        return res;\r\n    }\r\n\r\n    test_result_e simulate2(bool second)\r\n    {\r\n        debug_info info = $;\r\n\r\n        current_iter_ = start_iteration_;\r\n        for (; ; ++current_iter_)\r\n        {\r\n            rand_.seed(current_iter_);\r\n\r\n            iteration(current_iter_);\r\n\r\n            if (test_result_success != test_result_)\r\n            {\r\n                params_.test_result = test_result_;\r\n                params_.stop_iteration = current_iter_;\r\n                if (params_.collect_history)\r\n                    output_history();\r\n                return test_result_;\r\n            }\r\n\r\n            // If you hit assert here, then probably your test is non-deterministic\r\n            // Check whether you are using functions like ::rand()\r\n            // or static variables or values of object addresses (for hashing) in your test\r\n            // Replace ::rand() with rl::rand(), eliminate static variables in the test\r\n            RL_VERIFY(second == false);\r\n            (void)second;\r\n\r\n            RL_HIST_CTX(user_event) {\"ITERATION END\"} RL_HIST_END();\r\n\r\n            if (sched_.iteration_end())\r\n                break;\r\n        }\r\n\r\n        params_.test_result = test_result_success;\r\n        params_.stop_iteration = current_iter_;\r\n        return test_result_success;\r\n    }\r\n\r\n    RL_INLINE static void reset_thread(thread_info<thread_count>& ti)\r\n    {\r\n        foreach<thread_count>(\r\n            ti.acquire_fence_order_,\r\n            &assign_zero);\r\n        foreach<thread_count>(\r\n            ti.release_fence_order_,\r\n            &assign_zero);\r\n\r\n#ifdef RL_IMPROVED_SEQ_CST_FENCE\r\n        foreach<thread_count>(ti.imp_seq_cst_order_, &assign_zero);\r\n#endif\r\n    }\r\n\r\n    void iteration(iteration_t iter)\r\n    {\r\n        first_thread_ = true;\r\n        disable_preemption_ = 0;\r\n        sched_count_ = 0;\r\n\r\n        foreach<thread_count>(\r\n            threads_,\r\n            &context_impl::reset_thread);\r\n\r\n        foreach<thread_count>(\r\n            seq_cst_fence_order_,\r\n            &assign_zero);\r\n\r\n        base_t::iteration_begin();\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            threads_[i].iteration_begin();\r\n        }\r\n\r\n        disable_alloc_ += 1;\r\n        thread_id_t const th = sched_.iteration_begin(iter);\r\n        disable_alloc_ -= 1;\r\n        switch_to_fiber(th);\r\n\r\n        if (0 == iter % progress_probe_period)\r\n        {\r\n            output_progress(iter);\r\n        }\r\n    }\r\n\r\nprivate:\r\n    void switch_to_fiber(thread_id_t th)\r\n    {\r\n        fiber_t& prev = threadx_ ? threadx_->fiber_ : main_fiber_;\r\n        threadx_ = &threads_[th];\r\n        ::switch_to_fiber(threadx_->fiber_, prev);\r\n    }\r\n\r\n    void switch_to_main_fiber()\r\n    {\r\n        fiber_t& prev = threadx_->fiber_;\r\n        threadx_ = 0;\r\n        ::switch_to_fiber(main_fiber_, prev);\r\n    }\r\n\r\n    void output_progress(iteration_t iter)\r\n    {\r\n        iteration_t const total = sched_.iteration_count();\r\n\r\n        if (0 == iter % (progress_probe_period * 16))\r\n        {\r\n            disable_alloc_ += 1;\r\n            *params_.progress_stream << iter * 100 / total << \"% (\"\r\n                << iter << \"/\" << total << \")\" << std::endl;\r\n            disable_alloc_ -= 1;\r\n        }\r\n    }\r\n\r\n    virtual unsigned rand(unsigned limit, sched_type t)\r\n    {\r\n        return sched_.rand(limit, t);\r\n    }\r\n\r\n    void output_history()\r\n    {\r\n        if (false == params_.output_history)\r\n        {\r\n            *params_.output_stream << test_result_str_ << std::endl;\r\n            *params_.output_stream << \"iteration: \" << params_.stop_iteration << std::endl;\r\n            *params_.output_stream << std::endl;\r\n        }\r\n        history_.print_exec_history(params_.output_history);\r\n\r\n#ifndef RL_GC\r\n        if (test_result_memory_leak == test_result_)\r\n        {\r\n            memory_.output_allocs(*params_.output_stream);\r\n        }\r\n#endif\r\n\r\n        //!!! output other leaked resources\r\n        if (test_result_ == test_result_resource_leak\r\n            && atomic_alloc_->iteration_end() == false)\r\n        {\r\n            *params_.output_stream << \"leaked atomics:\" << std::endl;\r\n            atomic_alloc_->output_allocs(*params_.output_stream);\r\n        }\r\n    }\r\n\r\n    void rl_global_fence()\r\n    {\r\n        timestamp_t max_acq_rel = 0;\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            if (threads_[i].acq_rel_order_[i] > max_acq_rel)\r\n                max_acq_rel = threads_[i].acq_rel_order_[i];\r\n        }\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            for (thread_id_t j = 0; j != thread_count; ++j)\r\n            {\r\n                threads_[i].acq_rel_order_[j] = max_acq_rel;\r\n            }\r\n        }\r\n    }\r\n\r\n    virtual void atomic_thread_fence_acquire()\r\n    {\r\n        threadi().atomic_thread_fence_acquire();\r\n    }\r\n\r\n    virtual void atomic_thread_fence_release()\r\n    {\r\n        threadi().atomic_thread_fence_release();\r\n    }\r\n\r\n    virtual void atomic_thread_fence_acq_rel()\r\n    {\r\n        threadi().atomic_thread_fence_acq_rel();\r\n    }\r\n\r\n    virtual void atomic_thread_fence_seq_cst()\r\n    {\r\n        sched();\r\n        threadi().atomic_thread_fence_seq_cst(seq_cst_fence_order_);\r\n    }\r\n\r\n    virtual thread_id_t get_thread_count() const\r\n    {\r\n        return thread_count;\r\n    }\r\n\r\n    virtual generic_mutex_data* mutex_ctor(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock)\r\n    {\r\n        return new (mutex_alloc_->alloc()) generic_mutex_data_impl<thread_count>(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock);\r\n    }\r\n\r\n    virtual void mutex_dtor(generic_mutex_data* m)\r\n    {\r\n        generic_mutex_data_impl<thread_count>* mm = static_cast<generic_mutex_data_impl<thread_count>*>(m);\r\n        mm->~generic_mutex_data_impl<thread_count>();\r\n        mutex_alloc_->free(mm);\r\n    }\r\n\r\n    virtual condvar_data* condvar_ctor(bool allow_spurious_wakeups)\r\n    {\r\n        return new (condvar_alloc_->alloc()) condvar_data_impl<thread_count>(allow_spurious_wakeups);\r\n    }\r\n\r\n    virtual void condvar_dtor(condvar_data* cv)\r\n    {\r\n        condvar_data_impl<thread_count>* mm = static_cast<condvar_data_impl<thread_count>*>(cv);\r\n        mm->~condvar_data_impl<thread_count>();\r\n        condvar_alloc_->free(mm);\r\n    }\r\n\r\n    virtual sema_data* sema_ctor(bool spurious_wakeups, unsigned initial_count, unsigned max_count)\r\n    {\r\n        return new (sema_alloc_->alloc()) sema_data_impl<thread_count>(spurious_wakeups, initial_count, max_count);\r\n    }\r\n\r\n    virtual void sema_dtor(sema_data* cv)\r\n    {\r\n        sema_data_impl<thread_count>* mm = static_cast<sema_data_impl<thread_count>*>(cv);\r\n        mm->~sema_data_impl<thread_count>();\r\n        sema_alloc_->free(mm);\r\n    }\r\n\r\n    virtual event_data* event_ctor(bool manual_reset, bool initial_state)\r\n    {\r\n        return new (event_alloc_->alloc()) event_data_impl<thread_count>(manual_reset, initial_state);\r\n    }\r\n\r\n    virtual void event_dtor(event_data* cv)\r\n    {\r\n        event_data_impl<thread_count>* mm = static_cast<event_data_impl<thread_count>*>(cv);\r\n        mm->~event_data_impl<thread_count>();\r\n        event_alloc_->free(mm);\r\n    }\r\n\r\n    context_impl(context_impl const&);\r\n    context_impl& operator = (context_impl const&);\r\n};\r\n\r\n/*\r\ntemplate<typename test_t, typename sched_t>\r\nstruct thread_params_t\r\n{\r\n    typedef context_impl<test_t, sched_t> context_t;\r\n\r\n    //HANDLE                  handle;\r\n    context_t*              ctx;\r\n    ostringstream      oss;\r\n    istringstream*     iss;\r\n\r\n    //RL_NOCOPY(thread_params_t);\r\n};\r\n\r\n\r\ntemplate<typename test_t, typename sched_t>\r\nunsigned __stdcall thread_func(void * ctx)\r\n{\r\n    typedef thread_params_t<test_t, sched_t> params_t;\r\n    params_t& p = *static_cast<params_t*>(ctx);\r\n    p.ctx->simulate(p.oss, *p.iss, false);\r\n    return 0;\r\n}\r\n*/\r\n\r\ntemplate<typename test_t, typename sched_t>\r\ntest_result_e run_test(test_params& params, std::ostream& oss, bool second)\r\n{\r\n    typedef context_impl<test_t, sched_t> context_t;\r\n    typedef typename sched_t::shared_context_t shared_context_t;\r\n    //typedef thread_params_t<test_t, sched_t> params_t;\r\n\r\n    //bool destroy_persistent = false;\r\n    //context_persistent<test_t, sched_t>* persistent = 0;\r\n    //if (persistent_ptr == 0)\r\n    //{\r\n    //    persistent = new context_persistent<test_t, sched_t>;\r\n    //    persistent_ptr = persistent;\r\n    //}\r\n    //else\r\n    //{\r\n    //    persistent = static_cast<context_persistent<test_t, sched_t>*>(persistent_ptr);\r\n    //    destroy_persistent = true;\r\n    //}\r\n\r\n    shared_context_t sctx;\r\n    test_result_e res;\r\n\r\n    //if (second == false)\r\n    {\r\n        istringstream iss (params.initial_state);\r\n        res = context_t(params, sctx).simulate(oss, iss, second);\r\n    }\r\n    //else\r\n    //{\r\n    //    size_t const thread_count = 2;\r\n    //    vector<params_t*>::type threads (thread_count);\r\n    //    for (size_t i = 0; i != thread_count; i += 1)\r\n    //    {\r\n    //        threads[i] = new params_t;\r\n    //        threads[i]->iss = new istringstream(params.initial_state);\r\n    //        threads[i]->ctx = new context_t(params, sctx);\r\n    //        threads[i]->handle = (HANDLE)(_beginthreadex)(0, 0, &thread_func<test_t, sched_t>, threads[i], 0, 0);\r\n    //    }\r\n\r\n    //    for (size_t i = 0; i != thread_count; i += 1)\r\n    //    {\r\n    //        (WaitForSingleObject)(threads[i]->handle, (INFINITE));\r\n    //    }\r\n\r\n    //    for (size_t i = 0; i != thread_count; i += 1)\r\n    //    {\r\n    //        delete threads[i]->ctx;\r\n    //        delete threads[i]->iss;\r\n    //        delete threads[i];\r\n    //    }\r\n\r\n    //    return test_result_success;\r\n    //}\r\n\r\n    //if (destroy_persistent)\r\n    //{\r\n    //    delete persistent;\r\n    //    persistent_ptr = 0;\r\n    //}\r\n\r\n    return res;\r\n}\r\n\r\n\r\ntemplate<typename test_t>\r\nbool simulate(test_params& params)\r\n{\r\n    char const* test_name = typeid(test_t).name();\r\n\t\twhile (test_name[0] >= '0' && test_name[0] <= '9')\r\n        test_name += 1;\r\n    params.test_name = test_name;\r\n    *params.output_stream << params.test_name << std::endl;\r\n\r\n    unsigned start_time = get_tick_count();\r\n\r\n    //void* persistent = 0;\r\n\r\n    ostringstream oss;\r\n    //istringstream iss (params.initial_state);\r\n    test_result_e res = test_result_success;\r\n    if (random_scheduler_type == params.search_type)\r\n        res = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss, false);\r\n    else if (fair_full_search_scheduler_type == params.search_type)\r\n        res = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss, false);\r\n    else if (fair_context_bound_scheduler_type == params.search_type)\r\n        res = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss, false);\r\n    else\r\n        RL_VERIFY(false);\r\n\r\n    if (test_result_success == res)\r\n    {\r\n        unsigned t = get_tick_count() - start_time;\r\n        if (0 == t)\r\n            t = 1;\r\n\r\n        *params.output_stream << \"iterations: \" << params.stop_iteration << std::endl;\r\n        *params.output_stream << \"total time: \" << t << std::endl;\r\n        *params.output_stream << \"throughput: \" << (uint64_t)params.stop_iteration * 1000 / t << std::endl;\r\n        *params.output_stream << std::endl;\r\n    }\r\n    else if (false == params.output_history && false == params.collect_history)\r\n    {\r\n        ostringstream oss2;\r\n        params.initial_state = oss.str();\r\n        //istringstream iss2 (oss.str());\r\n        params.collect_history = true;\r\n        params.final_state = oss.str();\r\n        iteration_t const stop_iter = params.stop_iteration;\r\n        test_result_e res2 = test_result_success;\r\n        if (random_scheduler_type == params.search_type)\r\n            res2 = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss2, true);\r\n        else if (fair_full_search_scheduler_type == params.search_type)\r\n            res2 = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss2, true);\r\n        else if (fair_context_bound_scheduler_type == params.search_type)\r\n            res2 = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss2, true);\r\n        else\r\n            RL_VERIFY(false);\r\n\r\n        // If you hit assert here, then probably your test is non-deterministic\r\n        // Check whether you are using functions like ::rand()\r\n        // or static variables or values of object addresses (for hashing) in your test\r\n        // Replace ::rand() with rl::rand(), eliminate static variables in the test\r\n        RL_VERIFY(res == res2);\r\n\r\n        RL_VERIFY(params.stop_iteration == stop_iter);\r\n        (void)stop_iter;\r\n        (void)res2;\r\n    }\r\n    return test_t::params::expected_result == res;\r\n}\r\n\r\ntemplate<typename test_t>\r\nbool simulate()\r\n{\r\n    test_params params;\r\n    return simulate<test_t>(params);\r\n}\r\n\r\ntemplate<void(*func)(), size_t thread_count>\r\nstruct simulate_thunk : test_suite<simulate_thunk<func, thread_count>, 1>\r\n{\r\n    static size_t const dynamic_thread_count = thread_count;\r\n    void thread(unsigned)\r\n    {\r\n        func();\r\n    }\r\n};\r\n\r\ntemplate<void(*func)(), size_t thread_count>\r\nbool execute(test_params& params)\r\n{\r\n    return simulate<simulate_thunk<func, thread_count> >(params);\r\n}\r\n\r\ntemplate<void(*func)(), size_t thread_count>\r\nbool execute()\r\n{\r\n    return simulate<simulate_thunk<func, thread_count> >();\r\n}\r\n\r\ntypedef bool (*simulate_f)(test_params&);\r\n\r\n\r\ntemplate<typename test_t, typename scheduler_t>\r\nvoid context_impl<test_t, scheduler_t>::fiber_proc(void* thread_index)\r\n{\r\n    ctx().fiber_proc_impl((int)(intptr_t)thread_index);\r\n}\r\n\r\ntemplate<typename type>\r\nvoid dtor_arr_impl(void* pp)\r\n{\r\n    type* p = (type*)((char*)pp + alignment);\r\n    size_t count = *(size_t*)pp;\r\n    for (size_t i = 0; i != count; ++i)\r\n    {\r\n       p->~type();\r\n       p += 1;\r\n    }\r\n}\r\n\r\ntemplate<typename type>\r\ntype* new_arr_impl(size_t count, rl::debug_info_param info)\r\n{\r\n    RL_VERIFY(alignment >= sizeof(size_t));\r\n    context& c = ctx();\r\n#ifndef RL_GC\r\n    void* mem = c.alloc(alignment + count * sizeof(type), true, info);\r\n#else\r\n    void* mem = c.alloc(alignment + count * sizeof(type), true, &dtor_arr_impl<type>, info);\r\n#endif\r\n    *(size_t*)mem = count;\r\n    size_t i = 0;\r\n    char* begin = (char*)mem + alignment;\r\n    char* pos = begin;\r\n    try\r\n    {\r\n        for (; i != count; ++i)\r\n        {\r\n            new (pos) type;\r\n            pos += sizeof(type);\r\n        }\r\n        return (type*)begin;\r\n    }\r\n    catch (...)\r\n    {\r\n        pos -= sizeof(type);\r\n        i -= 1;\r\n        for (; i < count; --i)\r\n        {\r\n            ((type*)pos)->~type();\r\n            pos -= sizeof(type);\r\n        }\r\n        ctx().free(mem, true, info);\r\n        throw;\r\n    }\r\n}\r\n\r\ntemplate<typename type>\r\nvoid delete_arr_impl(type* p, debug_info_param info)\r\n{\r\n    if (p == 0)\r\n        return;\r\n    context& c = ctx();\r\n    char* begin = (char*)p - alignment;\r\n    size_t count = *(size_t*)begin;\r\n    for (size_t i = 0; i != count; ++i)\r\n    {\r\n       p->~type();\r\n       p += 1;\r\n    }\r\n    c.free(begin, true, info);\r\n}\r\n\r\ntemplate<typename type>\r\nvoid delete_impl(type* p, debug_info_param info)\r\n{\r\n    p->~type();\r\n    ctx().free(p, false, info);\r\n}\r\n\r\ntemplate<typename type>\r\nvoid dtor_impl(void* p)\r\n{\r\n    static_cast<type*>(p)->~type();\r\n}\r\n\r\ninline unsigned rand(unsigned limit)\r\n{\r\n    return ctx().rand(limit, sched_type_user);\r\n}\r\n\r\ninline unsigned thread_index()\r\n{\r\n    return ctx().threadx_->index_;\r\n}\r\n\r\n\r\nstruct new_proxy\r\n{\r\n    debug_info info;\r\n    new_proxy(debug_info_param info)\r\n        : info(info)\r\n    {\r\n        //printf(__FUNCSIG__ \"\\n\");\r\n    }\r\n\r\n    template<typename T>\r\n    T* operator % (T* p)\r\n    {\r\n        context& c = ctx();\r\n        size_t sz = c.prev_alloc_size();\r\n        if (sz)\r\n        {\r\n            RL_HIST(memory_alloc_event) {p, sz, false} RL_HIST_END();\r\n        }\r\n        return p;\r\n    }\r\n};\r\n\r\nstruct delete_proxy\r\n{\r\n    //debug_info info_;\r\n    delete_proxy(debug_info_param info)\r\n        //: info_(info)\r\n    {\r\n        ctx().set_debug_info(info);\r\n        //printf(__FUNCSIG__ \"\\n\");\r\n    }\r\n};\r\n\r\ninline void* rl_malloc(size_t sz, debug_info_param info)\r\n{\r\n    return ctx().alloc(sz, false, info);\r\n}\r\n\r\ninline void* rl_calloc(size_t sz, size_t cnt, debug_info_param info)\r\n{\r\n    void* p = ctx().alloc(sz * cnt, false, info);\r\n    memset(p, 0, sz * cnt);\r\n    return p;\r\n}\r\n\r\ninline void* realloc(void* p, size_t sz, debug_info_param info)\r\n{\r\n    if (sz == 0)\r\n    {\r\n        ctx().free(p, false, info);\r\n        return 0;\r\n    }\r\n    else\r\n    {\r\n        void* pp = ctx().alloc(sz, false, info);\r\n        memcpy(pp, p, sz); //!!! how much memory to move?\r\n        ctx().free(p, false, info);\r\n        return pp;\r\n    }\r\n}\r\n\r\ninline void rl_free(void* p, debug_info_param info)\r\n{\r\n    ctx().free(p, false, info);\r\n}\r\n\r\ninline size_t hash_ptr(void const* p, size_t size)\r\n{\r\n    return ctx().get_addr_hash(p) % size;\r\n}\r\n\r\ninline void systemwide_fence(debug_info_param info)\r\n{\r\n    context& c = ctx();\r\n    RL_HIST(user_msg_event) {\"system-wide fence\"} RL_HIST_END();\r\n    c.rl_global_fence();\r\n}\r\n\r\n} // namespace rl\r\n\r\n\r\n#ifndef RL_GC\r\ninline void* operator new (size_t size, rl::debug_info_param info)\r\n{\r\n    return rl::ctx().alloc(size, false, info);\r\n}\r\n\r\ninline void* operator new [] (size_t size, rl::debug_info_param info)\r\n{\r\n    return rl::ctx().alloc(size, false, info);\r\n}\r\n\r\ninline void operator delete (void* p, rl::debug_info_param info)\r\n{\r\n    rl::ctx().free(p, false, info);\r\n}\r\n\r\ninline void operator delete [] (void* p, rl::debug_info_param info)\r\n{\r\n    rl::ctx().free(p, false, info);\r\n}\r\n#endif\r\n\r\n\r\n\r\n#ifdef RL_GC\r\ninline void* operator new (size_t size, void(*dtor)(void*), rl::debug_info_param info)\r\n{\r\n    return rl::ctx().alloc(size, false, dtor, info);\r\n}\r\n\r\ninline void operator delete (void* p, void(*dtor)(void*), rl::debug_info_param info)\r\n{\r\n    (void)p;\r\n    (void)dtor;\r\n    (void)info;\r\n}\r\n#endif\r\n\r\ninline void* operator new (size_t size) RL_THROW_SPEC(std::bad_alloc)\r\n{\r\n    if (&rl::ctx())\r\n        return rl::ctx().alloc(size);\r\n    else\r\n        return (::malloc)(size);\r\n}\r\n\r\ninline void* operator new [] (size_t size) RL_THROW_SPEC(std::bad_alloc)\r\n{\r\n    if (&rl::ctx())\r\n        return rl::ctx().alloc(size);\r\n    else\r\n        return (::malloc)(size);\r\n}\r\n\r\ninline void operator delete (void* p) throw()\r\n{\r\n    if (&rl::ctx())\r\n        rl::ctx().free(p);\r\n    else\r\n        (::free)(p);\r\n}\r\n\r\ninline void operator delete [] (void* p) throw()\r\n{\r\n    if (&rl::ctx())\r\n        rl::ctx().free(p);\r\n    else\r\n        (::free)(p);\r\n}\r\n\r\n#define RL_NEW_PROXY rl::new_proxy($) % new\r\n#define RL_DELETE_PROXY rl::delete_proxy($) , delete\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/context_addr_hash.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONTEXT_ADDR_HASH_HPP\r\n#define RL_CONTEXT_ADDR_HASH_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nstruct context_addr_hash_iface\r\n{\r\n    virtual size_t      get_addr_hash               (void const* p) = 0;\r\n    virtual             ~context_addr_hash_iface    () {} // to calm down g++\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename base_t, thread_id_t thread_count>\r\nclass context_addr_hash_impl : protected base_t\r\n{\r\npublic:\r\n    context_addr_hash_impl(thread_id_t thread_count_param, test_params& params)\r\n        : base_t(thread_count_param, params)\r\n    {\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n        base_t::iteration_begin();\r\n        hash_map_.clear();\r\n        hash_seq_ = 0;\r\n    }\r\n\r\nprivate:\r\n    struct entry\r\n    {\r\n        uintptr_t       ptr_;\r\n        size_t          hash_;\r\n    };\r\n    typedef map<void const*, size_t>::type  hash_map_t;\r\n    hash_map_t                              hash_map_;\r\n    size_t                                  hash_seq_;\r\n\r\n    virtual size_t      get_addr_hash               (void const* p)\r\n    {\r\n        //!!! accept 'table size' to do 'hash % table_size'\r\n        // will give more information for state exploration\r\n\r\n        hash_map_t::iterator iter (hash_map_.find(p));\r\n        if (iter != hash_map_.end() && iter->first == p)\r\n        {\r\n            return iter->second;\r\n        }\r\n        else\r\n        {\r\n            //!!! distribute hashes more randomly, use rand()\r\n            size_t hash = hash_seq_++;\r\n            hash_map_.insert(std::make_pair(p, hash));\r\n            return hash;\r\n        }\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/context_base.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONTEXT_BASE_HPP\r\n#define RL_CONTEXT_BASE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"history.hpp\"\r\n#include \"memory.hpp\"\r\n#include \"test_result.hpp\"\r\n#include \"slab_allocator.hpp\"\r\n#include \"test_params.hpp\"\r\n#include \"random.hpp\"\r\n#include \"foreach.hpp\"\r\n#include \"thread_base.hpp\"\r\n#include \"context_addr_hash.hpp\"\r\n\r\n\r\n#ifdef RL_DEBUGBREAK_ON_ASSERT\r\n#    ifdef _MSC_VER\r\n#        define RL_DEBUGBREAK_ON_ASSERT_IMPL {if (IsDebuggerPresent()) __debugbreak();}\r\n#    else\r\n#        define RL_DEBUGBREAK_ON_ASSERT_IMPL {__asm(\"int3\");}\r\n#    endif\r\n#else\r\n#   define RL_DEBUGBREAK_ON_ASSERT_IMPL\r\n#endif\r\n\r\n#ifdef RL_DEBUGBREAK_ON_FAILURE\r\n#    ifdef _MSC_VER\r\n#        define RL_DEBUGBREAK_ON_FAILURE_IMPL {if (IsDebuggerPresent()) __debugbreak();}\r\n#    else\r\n#        define RL_DEBUGBREAK_ON_FAILURE_IMPL {__asm(\"int3\");}\r\n#    endif\r\n#else\r\n#   define RL_DEBUGBREAK_ON_FAILURE_IMPL\r\n#endif\r\n\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nclass thread_info_base;\r\n\r\nstruct atomic_data {};\r\nstruct var_data\r\n{\r\n    virtual void init(thread_info_base& th) = 0;\r\n    virtual bool store(thread_info_base& th) = 0;\r\n    virtual bool load(thread_info_base& th) = 0;\r\n    virtual ~var_data() {} // just to calm down gcc\r\n};\r\n\r\nstruct generic_mutex_data;\r\nstruct condvar_data;\r\nstruct sema_data;\r\nstruct event_data;\r\n\r\n\r\nstruct user_msg_event\r\n{\r\n    string msg_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << msg_;\r\n    }            \r\n};\r\n\r\nclass context;\r\n\r\ntemplate<int fake = 0>\r\nstruct context_holder\r\n{\r\n    static context* instance_;\r\n\r\n    static long volatile ctx_seq;\r\n};\r\n\r\ntemplate<int fake>\r\nlong volatile context_holder<fake>::ctx_seq = 0;\r\n\r\nclass context\r\n    : public thread_local_context_iface\r\n    , public context_addr_hash_iface\r\n    , nocopy<>\r\n{\r\npublic:\r\n    static context& instance()\r\n    {\r\n        //!!! disabled for check in operator new RL_VERIFY(context_holder<>::instance_);\r\n        return *context_holder<>::instance_;\r\n    }\r\n\r\n    virtual atomic_data* atomic_ctor(void* ctx) = 0;\r\n    virtual void atomic_dtor(atomic_data* data) = 0;\r\n\r\n    virtual var_data* var_ctor() = 0;\r\n    virtual void var_dtor(var_data* data) = 0;\r\n\r\n    virtual generic_mutex_data* mutex_ctor(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock) = 0;\r\n    virtual void mutex_dtor(generic_mutex_data* m) = 0;\r\n\r\n    virtual condvar_data* condvar_ctor(bool allow_spurious_wakeups) = 0;\r\n    virtual void condvar_dtor(condvar_data* cv) = 0;\r\n\r\n    virtual sema_data* sema_ctor(bool spurious_wakeups, unsigned initial_count, unsigned max_count) = 0;\r\n    virtual void sema_dtor(sema_data* cv) = 0;\r\n\r\n    virtual event_data* event_ctor(bool manual_reset, bool initial_state) = 0;\r\n    virtual void event_dtor(event_data* cv) = 0;\r\n\r\n    virtual void rl_global_fence() = 0;\r\n    virtual void sched() = 0;\r\n    virtual void yield(unsigned count, debug_info_param info) = 0;\r\n    virtual void fail_test(char const* desc, test_result_e res, debug_info_param info) = 0;\r\n    virtual void rl_until(char const* desc, debug_info_param info) = 0;\r\n\r\n    virtual void* alloc(size_t size, bool is_array, debug_info_param info) = 0;\r\n#ifdef RL_GC\r\n    virtual void* alloc(size_t size, bool is_array, void(*dtor)(void*), debug_info_param info) = 0;\r\n#endif\r\n    virtual void free(void* p, bool is_array, debug_info_param info) = 0;\r\n\r\n    virtual void* alloc(size_t size) = 0;\r\n    virtual void free(void* p) = 0;\r\n    virtual size_t prev_alloc_size() = 0;\r\n    virtual void set_debug_info(debug_info_param info) = 0;\r\n\r\n    virtual void fiber_proc_impl(int thread_index) = 0;\r\n\r\n    virtual unpark_reason park_current_thread(bool is_timed,\r\n                                              bool allow_spurious_wakeup,\r\n                                              bool do_switch,\r\n                                              debug_info_param info) = 0;\r\n    virtual void unpark_thread(thread_id_t th, bool do_switch, debug_info_param info) = 0;\r\n    virtual void switch_back(debug_info_param info) = 0;\r\n\r\n    virtual void atomic_thread_fence_acquire() = 0;\r\n    virtual void atomic_thread_fence_release() = 0;\r\n    virtual void atomic_thread_fence_acq_rel() = 0;\r\n    virtual void atomic_thread_fence_seq_cst() = 0;\r\n\r\n    virtual unsigned rand(unsigned limit, sched_type t) = 0;\r\n\r\n    virtual win_waitable_object* create_thread(void*(*fn)(void*), void* ctx) = 0;\r\n\r\n    virtual unpark_reason wfmo_park(void** ws,\r\n                                    win_waitable_object** wo,\r\n                                    size_t count,\r\n                                    bool wait_all,\r\n                                    bool is_timed,\r\n                                    debug_info_param info) = 0;\r\n\t\r\n    int get_errno();\r\n    void set_errno(int value);\r\n\r\n    thread_info_base* threadx_;\r\n    timestamp_t* seq_cst_fence_order_;\r\n\r\n    bool invariant_executing;\r\n\r\n    RL_INLINE bool collecting_history() const\r\n    {\r\n        return params_.collect_history && false == invariant_executing;\r\n    }\r\n\r\n    template<typename event_t>\r\n    void exec_log(debug_info_param info, event_t const& ev);\r\n\r\n    void exec_log_msg(debug_info_param info, char const* msg)\r\n    {\r\n        user_msg_event ev = {msg};\r\n        exec_log(info, ev);\r\n    }\r\n\r\n    bool is_random_sched() const\r\n    {\r\n        return is_random_sched_;\r\n    }\r\n\r\n    unsigned get_ctx_seq() const\r\n    {\r\n        return ctx_seq_;\r\n    }\r\n\r\n    void disable_preemption();\r\n    void enable_preemption();\r\n\r\n    virtual thread_id_t get_thread_count() const = 0;\r\n\r\n    thread_id_t current_thread() const\r\n    {\r\n        return threadx_->index_;\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n    }\r\n\r\nprotected:\r\n    history_mgr history_;\r\n    test_params& params_;\r\n    unsigned disable_preemption_;\r\n    int                         disable_alloc_;\r\n\r\n    context(thread_id_t thread_count, test_params& params)\r\n        : history_(*params.output_stream, thread_count)\r\n        , params_(params)\r\n        , disable_alloc_(1)\r\n    {\r\n        RL_VERIFY(0 == context_holder<>::instance_);\r\n        context_holder<>::instance_ = this;\r\n\r\n        is_random_sched_ = params_.search_type == random_scheduler_type;\r\n\r\n#ifdef _MSC_VER\r\n        ctx_seq_ = _InterlockedExchangeAdd(&context_holder<>::ctx_seq, 1) + 1;\r\n#else\r\n        ctx_seq_ = __sync_fetch_and_add(&context_holder<>::ctx_seq, 1) + 1;\r\n#endif\r\n    }\r\n\r\n    virtual ~context()\r\n    {\r\n        RL_VERIFY(this == context_holder<>::instance_);\r\n        context_holder<>::instance_ = 0;\r\n    }\r\n    \r\nprivate:\r\n    bool is_random_sched_;\r\n    unsigned ctx_seq_;\r\n};\r\n\r\n\r\ntemplate<int fake>\r\ncontext* context_holder<fake>::instance_ = 0;\r\n\r\n\r\n\r\n\r\ninline context& ctx()\r\n{\r\n    return context::instance();\r\n}\r\n\r\n\r\ninline int get_errno()\r\n{\r\n    return ctx().get_errno();\r\n}\r\n\r\ninline void set_errno(int value)\r\n{\r\n    return ctx().set_errno(value);\r\n}\r\n\r\nclass preemption_disabler : nocopy<>\r\n{\r\npublic:\r\n    preemption_disabler(context& c)\r\n        : c_(c)\r\n    {\r\n        c_.disable_preemption();\r\n    }\r\n\r\n    ~preemption_disabler()\r\n    {\r\n        c_.enable_preemption();\r\n    }\r\n\r\nprivate:\r\n    context& c_;\r\n};\r\n\r\n\r\n}\r\n\r\n\r\n#define RL_HIST_IMPL(C, INFO, TYPE) \\\r\n    do { \\\r\n        if (C.collecting_history()) { \\\r\n            rl::debug_info const& rl_info_c = INFO; \\\r\n            rl::context& rl_hist_c = C; \\\r\n            TYPE ev = \\\r\n/**/\r\n\r\n#define RL_HIST_END() \\\r\n                        ; \\\r\n            rl_hist_c.exec_log(rl_info_c, ev); \\\r\n        } \\\r\n    } while ((void)0, 0) \\\r\n/**/\r\n\r\n#define RL_HIST_CTX(TYPE) RL_HIST_IMPL((*this), info, TYPE)\r\n\r\n#define RL_HIST(TYPE) RL_HIST_IMPL(c, info, TYPE)\r\n\r\n#define RL_LOG(desc) rl::ctx().exec_log_msg(RL_INFO, desc)\r\n\r\n\r\n\r\n#ifdef _MSC_VER\r\n#   define RL_ASSERT_IMPL(x, res, str, info) do {if (!((void)0, (x))) {{RL_DEBUGBREAK_ON_ASSERT_IMPL} rl::ctx().fail_test(str, res, info);}} while ((void)0, 0)\r\n#else\r\n#   define RL_ASSERT_IMPL(x, res, str, info) do {if (!((void)0, (x))) rl::ctx().fail_test(str, res, info);} while ((void)0, 0)\r\n#endif\r\n#define RL_ASSERT(x) RL_ASSERT_IMPL(x, rl::test_result_user_assert_failed, \"assertion: \" #x, RL_INFO)\r\n#define RL_UNTIL(x) do {if ((x)) rl::ctx().rl_until(#x, RL_INFO);} while ((void)0, 0)\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/context_base_impl.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONTEXT_BASE_IMPL_HPP\r\n#define RL_CONTEXT_BASE_IMPL_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n/*\r\ninline void context::disable_history()\r\n{\r\n    RL_VERIFY(threadx_);\r\n    threadx_->disable_history_ += 1;\r\n}\r\n\r\ninline void context::enable_history()\r\n{\r\n    RL_VERIFY(threadx_);\r\n    RL_VERIFY(threadx_->disable_history_);\r\n    threadx_->disable_history_ -= 1;\r\n}\r\n*/\r\n\r\ninline void context::disable_preemption()\r\n{\r\n    disable_preemption_ += 1;\r\n}\r\n\r\ninline void context::enable_preemption()\r\n{\r\n    disable_preemption_ -= 1;\r\n}\r\n\r\ninline int context::get_errno()\r\n{\r\n    RL_VERIFY(threadx_);\r\n    return threadx_->errno_;\r\n}\r\n\r\ninline void context::set_errno(int value)\r\n{\r\n    RL_VERIFY(threadx_);\r\n    threadx_->errno_ = value;\r\n}\r\n\r\ntemplate<typename event_t>\r\nvoid context::exec_log(debug_info_param info, event_t const& ev)\r\n{\r\n    RL_VERIFY(collecting_history());\r\n    disable_alloc_ += 1;\r\n    history_.exec_log(threadx_ ? threadx_->index_ : -1, info, ev, params_.output_history);\r\n    disable_alloc_ -= 1;\r\n}\r\n\r\n\r\n\r\n}\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/context_bound_scheduler.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONTEXT_BOUND_SCHEDULER_HPP\r\n#define RL_CONTEXT_BOUND_SCHEDULER_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"full_search_scheduler.hpp\"\r\n#include \"foreach.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nstruct context_bound_scheduler_thread_info : tree_search_scheduler_thread_info<thread_count>\r\n{\r\n    unsigned sched_count_;\r\n    unsigned forced_context_switch_count_;\r\n\r\n    void reset(test_params& params)\r\n    {\r\n        tree_search_scheduler_thread_info<thread_count>::reset(params);\r\n        sched_count_ = 0;\r\n        forced_context_switch_count_ = 0;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass context_bound_scheduler\r\n    : public tree_search_scheduler<context_bound_scheduler<thread_count>\r\n        , context_bound_scheduler_thread_info<thread_count>, thread_count>\r\n{\r\npublic:\r\n    typedef tree_search_scheduler<context_bound_scheduler<thread_count>\r\n        , context_bound_scheduler_thread_info<thread_count>, thread_count> base_t;\r\n    typedef typename base_t::thread_info_t thread_info_t;\r\n    typedef typename base_t::shared_context_t shared_context_t;\r\n\r\n    context_bound_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count)\r\n        : base_t(params, ctx, dynamic_thread_count)\r\n    {\r\n    }\r\n\r\n    thread_id_t iteration_begin_impl()\r\n    {\r\n        switches_remain_ = this->params_.context_bound;\r\n        return base_t::iteration_begin_impl();\r\n    }\r\n\r\n    bool can_switch(thread_info_t& t)\r\n    {\r\n        t.sched_count_ += 1;\r\n        return switches_remain_ != 0;\r\n    }\r\n\r\n    void on_switch(thread_info_t& t)\r\n    {\r\n        if (t.state_ == thread_state_running)\r\n        {\r\n            RL_VERIFY(switches_remain_);\r\n            switches_remain_ -= 1;\r\n        }\r\n        else\r\n        {\r\n            t.forced_context_switch_count_ += 1;\r\n        }\r\n    }\r\n\r\n    double iteration_count_approx()\r\n    {\r\n        return 1.0;\r\n        /*\r\n        iteration_t const P = thread_count;\r\n        iteration_t const C0 = this->params_.context_bound;\r\n        iteration_t total = 1;//factorial(P);// * power(P, P * C0);\r\n        for (iteration_t i = 0; i != P - 1; ++i)\r\n            total *= power(i + 1, C0 + 1);\r\n        //if (C0)\r\n        //    total *= power(P - 1, P - 1);\r\n        if (val(P) > 1)\r\n        {\r\n            for (iteration_t i = 0; i != P; ++i)\r\n            {\r\n                iteration_t const N = this->threads_[i].sched_count_;\r\n                iteration_t const C = C0 + this->threads_[i].forced_context_switch_count_;\r\n                //total *= (iteration_t)pow((double)(threads_[i].sched_count_ + 2) * (thread_count - 1), (int)(params_.context_bound + threads_[i].forced_context_switch_count_));\r\n                total *= factorial(N, C) / factorial(C);\r\n                //C$ += C + 1;\r\n                //total *= (int)(params_.context_bound + threads_[i].forced_context_switch_count_));\r\n            }\r\n            //total *= factorial(C$);\r\n        }\r\n        else\r\n        {\r\n            total = 1;\r\n        }\r\n        //iteration_t total = (iteration_t)pow((double)sched_count / thread_count + 1, (int)(params_.context_bound * thread_count + forced_context_switch_mean_ + 0.5));\r\n        //total *= thread_count;\r\n        //total *= (iteration_t)pow((double)thread_count - 1, thread_count);\r\n        for (size_t i = 0; i != this->stree_.size(); ++i)\r\n        {\r\n            if (this->stree_[i].type_ != sched_type_sched)\r\n            {\r\n                total *= this->stree_[i].count_;\r\n            }\r\n        }\r\n        return (double)total;\r\n        */\r\n    }\r\n\r\nprivate:\r\n    unsigned switches_remain_;\r\n\r\n    template<typename T>\r\n    static T factorial(T x, T i)\r\n    {\r\n        if (0 == i)\r\n            return 1;\r\n        T r = x;\r\n        for (--i; i; --i)\r\n            r *= x - i;\r\n        return r;\r\n    }\r\n\r\n    template<typename T>\r\n    static T factorial(T x)\r\n    {\r\n        if (0 == x)\r\n            return 1;\r\n        T r = x;\r\n        for (T i = x - 1; i; --i)\r\n            r *= i;\r\n        return r;\r\n    }\r\n\r\n    template<typename T>\r\n    static T power(T x, T y)\r\n    {\r\n        if (0 == y)\r\n            return 1;\r\n        T r = x;\r\n        for (T i = y - 1; i; --i)\r\n            r *= x;\r\n        return r;\r\n    }\r\n\r\n    RL_NOCOPY(context_bound_scheduler);\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/defs.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_DEFS_HPP\r\n#define RL_DEFS_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntypedef int thread_id_t;\r\ntypedef size_t timestamp_t;\r\ntypedef uint64_t iteration_t;\r\n\r\nsize_t const atomic_history_size = 3;\r\niteration_t const progress_probe_period = 4 * 1024;\r\n\r\nsize_t const alignment = 16;\r\n\r\nclass context;\r\nclass thread_base;\r\nstruct win_waitable_object;\r\n\r\nenum sched_type\r\n{\r\n    sched_type_sched,\r\n    sched_type_atomic_load,\r\n    sched_type_cas_fail,\r\n    sched_type_mem_realloc,\r\n    sched_type_user,\r\n};\r\n\r\nenum unpark_reason\r\n{\r\n    unpark_reason_normal,\r\n    unpark_reason_timeout,\r\n    unpark_reason_spurious,\r\n};\r\n\r\nstruct debug_info\r\n{\r\n    char const* func_;\r\n    char const* file_;\r\n    unsigned line_;\r\n\r\n    debug_info(char const* func = \"\", char const* file = \"\", unsigned line = 0)\r\n        : func_(func)\r\n        , file_(file)\r\n        , line_(line)\r\n    {\r\n    }\r\n};\r\n\r\ntypedef debug_info const& debug_info_param;\r\n\r\ninline void assert_failed(char const* cond, debug_info_param info)\r\n{\r\n    std::cout << \"RELACY INTERNAL ASSERT FAILED: '\" << cond\r\n    << \"' at \" << info.file_ << \":\" << info.line_ << \" (\" << info.func_ << \")\" << std::endl;\r\n}\r\n\r\ntemplate<typename T>\r\nstruct raw_allocator : std::allocator<T>\r\n{\r\n    template<class Y>\r\n    struct rebind\r\n    {\r\n        typedef raw_allocator<Y> other;\r\n    };\r\n\r\n    template<typename Y>\r\n    raw_allocator(raw_allocator<Y> const&)\r\n    {\r\n    }\r\n\r\n    raw_allocator(raw_allocator const& rhs)\r\n        : std::allocator<T>(rhs)\r\n    {\r\n    }\r\n\r\n    raw_allocator()\r\n        : std::allocator<T>()\r\n    {\r\n    }\r\n\r\n    T* allocate(size_t count, void* = 0)\r\n    {\r\n        return (T*)(::malloc)(count * sizeof(T));\r\n    }\r\n\r\n    void deallocate(T* p, size_t)\r\n    {\r\n        (::free)(p);\r\n    }\r\n};\r\n\r\n\r\ntemplate<typename T>\r\nstruct vector\r\n{\r\n    typedef std::vector<T, raw_allocator<T> > type;\r\n};\r\n\r\ntemplate<typename T>\r\nstruct queue\r\n{\r\n    typedef std::queue<T, std::deque<T, raw_allocator<T> > > type;\r\n};\r\n\r\ntemplate<typename T>\r\nstruct stack\r\n{\r\n    typedef std::stack<T, std::vector<T, raw_allocator<T> > > type;\r\n};\r\n\r\ntemplate<typename T>\r\nstruct set\r\n{\r\n    typedef std::set<T, std::less<T>, raw_allocator<T> > type;\r\n};\r\n\r\ntemplate<typename T, typename Y>\r\nstruct map\r\n{\r\n    typedef std::map<T, Y, std::less<T>, raw_allocator<std::pair<T, Y> > > type;\r\n};\r\n\r\ntypedef std::basic_string<char, std::char_traits<char>, raw_allocator<char> > string;\r\ntypedef std::basic_ostringstream<char, std::char_traits<char>, raw_allocator<char> > ostringstream;\r\ntypedef std::basic_istringstream<char, std::char_traits<char>, raw_allocator<char> > istringstream;\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/dyn_thread.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_DYN_THREAD_HPP\r\n#define RL_DYN_THREAD_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n#include \"stdlib/semaphore.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nclass dyn_thread : nocopy<>\r\n{\r\npublic:\r\n    dyn_thread()\r\n    {\r\n        handle_ = 0;\r\n    }\r\n\r\n    void start(void*(*fn)(void*), void* arg)\r\n    {\r\n        RL_VERIFY(handle_ == 0);\r\n        handle_ = ctx().create_thread(fn, arg);\r\n    }\r\n\r\n    void join()\r\n    {\r\n        RL_VERIFY(handle_);\r\n        handle_->wait(false, false, $);\r\n        handle_ = 0;\r\n    }\r\n\r\nprivate:\r\n    win_waitable_object* handle_;\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/dyn_thread_ctx.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_DYN_THREAD_CTX_HPP\r\n#define RL_DYN_THREAD_CTX_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"base.hpp\"\r\n#include \"waitset.hpp\"\r\n#include \"sync_var.hpp\"\r\n#include \"stdlib/semaphore.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass thread_sync_object : public win_waitable_object\r\n{\r\npublic:\r\n    thread_sync_object()\r\n    {\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n        finished_ = false;\r\n        sync_.iteration_begin();\r\n        RL_VERIFY(!ws_);\r\n    }\r\n\r\n    void on_create()\r\n    {\r\n        sync_.release(ctx().threadx_);\r\n    }\r\n\r\n    void on_start()\r\n    {\r\n        RL_VERIFY(finished_ == false);\r\n        context& c = ctx();\r\n        sync_.acquire(c.threadx_);\r\n    }\r\n    \r\n    void on_finish()\r\n    {\r\n        RL_VERIFY(finished_ == false);\r\n        context& c = ctx();\r\n        finished_ = true;\r\n        sync_.release(c.threadx_);\r\n        ws_.unpark_all(c, $);\r\n    }\r\n\r\nprivate:\r\n    bool finished_;\r\n    waitset<thread_count> ws_;\r\n    sync_var<thread_count> sync_;\r\n\r\n    virtual void deinit(debug_info_param info)\r\n    {\r\n        (void)info;\r\n    }\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        if (finished_)\r\n        {\r\n            sync_.acquire(c.threadx_);\r\n            return sema_wakeup_reason_success;\r\n        }\r\n        else if (try_wait)\r\n        {\r\n            sync_.acquire(c.threadx_);\r\n            return sema_wakeup_reason_failed;\r\n        }\r\n        else\r\n        {\r\n            unpark_reason reason = ws_.park_current(c, is_timed, false, false, info);\r\n            sync_.acquire(c.threadx_);\r\n            if (reason == unpark_reason_normal)\r\n                return sema_wakeup_reason_success;\r\n            else if (reason == unpark_reason_timeout)\r\n                return sema_wakeup_reason_timeout;\r\n            RL_VERIFY(false);\r\n            return sema_wakeup_reason_failed;\r\n        }\r\n    }\r\n\r\n    virtual bool signal(debug_info_param info)\r\n    {\r\n        RL_ASSERT_IMPL(false, test_result_thread_signal, \"trying to signal a thread\", info);\r\n        return false;\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return finished_;\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        sync_.acquire(ctx().threadx_);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return &ws_;\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/foreach.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_FOREACH_HPP\r\n#define RL_FOREACH_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<typename T, thread_id_t i, thread_id_t index>\r\nstruct foreach_thread_impl\r\n{\r\n    template<typename F>\r\n    RL_INLINE static void exec(\r\n        T* v1,\r\n        F func)\r\n    {\r\n        (*func)(v1[i]);\r\n        foreach_thread_impl<T, i + 1, index - 1>::exec(v1, func);\r\n    }\r\n\r\n    RL_INLINE static void exec(\r\n        T* v1, T* v2,\r\n        void (*func)(T& e1, T& e2))\r\n    {\r\n        (*func)(v1[i], v2[i]);\r\n        foreach_thread_impl<T, i + 1, index - 1>::exec(v1, v2, func);\r\n    }\r\n\r\n    RL_INLINE static void exec(\r\n        T* v1, T* v2, T* v3,\r\n        void (*func)(T& e1, T& e2, T& e3))\r\n    {\r\n        (*func)(v1[i], v2[i], v3[i]);\r\n        foreach_thread_impl<T, i + 1, index - 1>::exec(v1, v2, v3, func);\r\n    }\r\n};\r\n\r\ntemplate<typename T, thread_id_t i>\r\nstruct foreach_thread_impl<T, i, 0>\r\n{\r\n    template<typename F>\r\n    RL_INLINE static void exec(\r\n        T*,\r\n        F)\r\n    {\r\n    }\r\n\r\n    RL_INLINE static void exec(\r\n        T*, T*,\r\n        void (*)(T&, T&))\r\n    {\r\n    }\r\n\r\n    RL_INLINE static void exec(\r\n        T*, T*, T*,\r\n        void (*)(T&, T&, T&))\r\n    {\r\n    }\r\n};\r\n\r\ntemplate<thread_id_t count, typename T, typename F>\r\nRL_INLINE void foreach(\r\n    T* v1,\r\n    F func)\r\n{\r\n    foreach_thread_impl<T, 0, count>::exec(v1, func);\r\n}\r\n\r\ntemplate<thread_id_t count, typename T>\r\nRL_INLINE void foreach(\r\n    T* v1, T* v2,\r\n    void (*func)(T& e1, T& e2))\r\n{\r\n    foreach_thread_impl<T, 0, count>::exec(v1, v2, func);\r\n}\r\n\r\ntemplate<thread_id_t count, typename T>\r\nRL_INLINE void foreach(\r\n    T* v1, T* v2, T* v3,\r\n    void (*func)(T& e1, T& e2, T& e3))\r\n{\r\n    foreach_thread_impl<T, 0, count>::exec(v1, v2, v3, func);\r\n}\r\n\r\nRL_INLINE void assign_zero(timestamp_t& elem)\r\n{\r\n    elem = 0;\r\n}\r\n\r\nRL_INLINE void assign_zero_u(unsigned& elem)\r\n{\r\n    elem = 0;\r\n}\r\n\r\ntemplate<timestamp_t value>\r\nRL_INLINE void assign(timestamp_t& elem)\r\n{\r\n    elem = value;\r\n}\r\n\r\nRL_INLINE void assign(timestamp_t& elem1, timestamp_t& elem2)\r\n{\r\n    elem1 = elem2;\r\n}\r\n\r\nRL_INLINE void assign_max(timestamp_t& elem1, timestamp_t& elem2)\r\n{\r\n    if (elem2 > elem1)\r\n        elem1 = elem2;\r\n}\r\n\r\nRL_INLINE void plus_one(timestamp_t& elem)\r\n{\r\n    elem += 1;\r\n}\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/full_search_scheduler.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_FULL_SEARCH_SCHEDULER_HPP\r\n#define RL_FULL_SEARCH_SCHEDULER_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"scheduler.hpp\"\r\n#include \"foreach.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nstruct tree_search_scheduler_thread_info : scheduler_thread_info\r\n{\r\n    unsigned                    yield_sched_count_ [thread_count];\r\n    unsigned                    yield_priority_ [thread_count];\r\n    unsigned                    total_yield_priority_;\r\n    //unsigned                    subsequent_timed_waits_;\r\n\r\n    void reset(test_params& params)\r\n    {\r\n        scheduler_thread_info::reset(params);\r\n        foreach<thread_count>(yield_sched_count_, &assign_zero_u);\r\n        foreach<thread_count>(yield_priority_, &assign_zero_u);\r\n        total_yield_priority_ = 0;\r\n        //subsequent_timed_waits_ = 0;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename derived_t, typename thread_info_type, thread_id_t thread_count>\r\nclass tree_search_scheduler\r\n    : public scheduler<derived_t, thread_info_type, thread_count>\r\n{\r\npublic:\r\n    typedef scheduler<derived_t, thread_info_type, thread_count> base_t;\r\n    typedef typename base_t::thread_info_t thread_info_t;\r\n    typedef typename base_t::shared_context_t shared_context_t;\r\n\r\n    struct task_t\r\n    {\r\n    };\r\n\r\n    tree_search_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count)\r\n        : base_t(params, ctx, dynamic_thread_count)\r\n        , stree_depth_()\r\n        , iteration_count_mean_()\r\n        , iteration_count_probe_count_()\r\n    {\r\n        stree_.reserve(128);\r\n    }\r\n\r\n    thread_id_t iteration_begin_impl()\r\n    {\r\n        stree_depth_ = 0;\r\n\r\n        unsigned const index = rand_impl(this->running_threads_count, sched_type_sched);\r\n        thread_id_t const th = this->running_threads[index];\r\n        return th;\r\n    }\r\n\r\n    bool iteration_end_impl()\r\n    {\r\n        RL_VERIFY(stree_depth_ == stree_.size());\r\n\r\n        for (size_t i = stree_.size(); i != 0; --i)\r\n        {\r\n            stree_node& n = stree_[i - 1];\r\n            if (n.index_ != n.count_ - 1)\r\n            {\r\n                stree_.resize(i);\r\n                n.index_ += 1;\r\n                RL_VERIFY(n.index_ < n.count_);\r\n                return false;\r\n            }\r\n        }\r\n        return true;\r\n    }\r\n\r\n    void yield_priority(unsigned yield)\r\n    {\r\n        RL_VERIFY(yield);\r\n\r\n        thread_info_t& t = *this->thread_;\r\n        thread_id_t const& running_thread_count = this->running_threads_count;\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            thread_info_t& y = this->threads_[i];\r\n            RL_VERIFY(0 == y.yield_priority_[t.index_]);\r\n\r\n            if (t.index_ != i\r\n                && y.yield_sched_count_[t.index_] < yield\r\n                && y.state_ != thread_state_finished)\r\n            {\r\n                y.yield_priority_[t.index_] = yield;\r\n                y.total_yield_priority_ += yield;\r\n                this->block_thread(t.index_, false);\r\n            }\r\n            y.yield_sched_count_[t.index_] = 0;\r\n        }\r\n\r\n        if (0 == running_thread_count)\r\n            purge_blocked_threads();\r\n    }\r\n\r\n    thread_id_t schedule_impl(unpark_reason& reason, unsigned yield)\r\n    {\r\n        thread_info_t& t = *this->thread_;\r\n        thread_id_t const& running_thread_count = this->running_threads_count;\r\n\r\n#ifdef _DEBUG\r\n        {\r\n            unsigned tmp = 0;\r\n            for (thread_id_t i = 0; i != thread_count; ++i)\r\n                tmp += t.yield_priority_[i];\r\n            RL_VERIFY(t.total_yield_priority_ == tmp);\r\n        }\r\n#endif\r\n\r\n        if (t.total_yield_priority_)\r\n        {\r\n            for (thread_id_t i = 0; i != thread_count; ++i)\r\n            {\r\n                unsigned& prio = t.yield_priority_[i];\r\n                if (prio)\r\n                {\r\n                    prio -= 1;\r\n                    t.total_yield_priority_ -= 1;\r\n                    if (0 == prio)\r\n                    {\r\n                        this->unblock_thread(i);\r\n                    }\r\n                }\r\n                t.yield_sched_count_[i] += 1;\r\n            }\r\n        }\r\n\r\n        if (yield)\r\n            yield_priority(yield);\r\n\r\n        reason = unpark_reason_normal;\r\n        thread_id_t thread_index = 0;\r\n\r\n        if (self().can_switch(t)\r\n            || t.state_ != thread_state_running)\r\n        {\r\n            thread_id_t timed_thread_count = this->timed_thread_count_;\r\n            if (timed_thread_count)\r\n            {\r\n                thread_id_t cnt;\r\n                if (running_thread_count)\r\n                    cnt = timed_thread_count + 1;\r\n                else\r\n                    //!!! spurious thread will be never unblocked in such case - bad\r\n                    cnt = timed_thread_count;\r\n                thread_id_t idx = this->rand(cnt, sched_type_user);\r\n                if (idx < timed_thread_count)\r\n                {\r\n                    thread_info_t* thr = this->timed_threads_[idx];\r\n                    thread_index = thr->index_;\r\n                    //??? suboptimal state space exploration\r\n                    // if (1 != thr->block_count_) then we are making\r\n                    // superfluous rand()\r\n                    if (1 == thr->block_count_)\r\n                    {\r\n                        this->unpark_thread(thread_index);\r\n                        RL_VERIFY(thr->state_ == thread_state_running);\r\n                        reason = unpark_reason_timeout;\r\n                    }\r\n                }\r\n            }\r\n\r\n            RL_VERIFY(running_thread_count);\r\n\r\n            if (unpark_reason_normal == reason)\r\n            {\r\n                thread_id_t spurious_thread_count = this->spurious_thread_count_;\r\n                if (spurious_thread_count)\r\n                {\r\n                    thread_id_t cnt = spurious_thread_count + 1;\r\n                    thread_id_t idx = this->rand(cnt, sched_type_user);\r\n                    if (idx < spurious_thread_count)\r\n                    {\r\n                        thread_info_t* thr = this->spurious_threads_[idx];\r\n                        thread_index = thr->index_;\r\n                        //??? suboptimal state space exploration\r\n                        // if (1 != thr->block_count_) then we are making\r\n                        // superfluous rand()\r\n                        if (1 == thr->block_count_)\r\n                        {\r\n                            this->unpark_thread(thread_index);\r\n                            RL_VERIFY(thr->state_ == thread_state_running);\r\n                            reason = unpark_reason_spurious;\r\n                        }\r\n                    }\r\n                }\r\n            }\r\n\r\n            if (unpark_reason_normal == reason)\r\n            {\r\n                if (1 != running_thread_count)\r\n                {\r\n                    unsigned const index = this->rand(running_thread_count, sched_type_sched);\r\n                    thread_index = this->running_threads[index];\r\n                }\r\n                else\r\n                {\r\n                    thread_index = this->running_threads[0];\r\n                }\r\n            }\r\n        }\r\n        else\r\n        {\r\n            RL_VERIFY(t.state_ == thread_state_running);\r\n            thread_index = t.index_;\r\n        }\r\n\r\n        if (t.index_ == thread_index)\r\n            return thread_index;\r\n\r\n        //t.subsequent_timed_waits_ = 0;\r\n        self().on_switch(t);\r\n\r\n        return thread_index;\r\n    }\r\n\r\n    void thread_finished_impl()\r\n    {\r\n    }\r\n\r\n    void purge_blocked_threads()\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            on_thread_block(i, false);\r\n        }\r\n    }\r\n\r\n    unsigned rand_impl(unsigned limit, sched_type t)\r\n    {\r\n        unsigned result = 0;\r\n        size_t const size = stree_.size();\r\n        if (stree_depth_ == size)\r\n        {\r\n            stree_node n = {limit, 0, t};\r\n            stree_.push_back(n);\r\n        }\r\n        else\r\n        {\r\n            RL_VERIFY(size);\r\n            stree_node& n = stree_[stree_depth_];\r\n\r\n            // If you hit assert here, then probably your test is non-deterministic\r\n            // Check whether you are using functions like ::rand()\r\n            // or static variables or values of object addresses (for hashing) in your test\r\n            // Replace ::rand() with rl::rand(), eliminate static variables in the test\r\n            RL_VERIFY(n.type_ == t);\r\n\r\n            RL_VERIFY(n.count_ == limit);\r\n            RL_VERIFY(n.index_ < n.count_);\r\n            result = n.index_;\r\n        }\r\n        stree_depth_ += 1;\r\n        return result;\r\n    }\r\n\r\n    iteration_t iteration_count_impl()\r\n    {\r\n        double current = self().iteration_count_approx();\r\n        if (current <= this->iter_)\r\n            current = this->iter_ + 1.0;\r\n\r\n        iteration_count_mean_ *= iteration_count_probe_count_;\r\n        iteration_count_probe_count_ += 1;\r\n        iteration_count_mean_ /= iteration_count_probe_count_;\r\n        iteration_count_mean_ += current / iteration_count_probe_count_;\r\n\r\n        iteration_t result = (iteration_t)(iteration_count_mean_ + 0.5);\r\n        if (result <= this->iter_)\r\n            result = this->iter_ + 1;\r\n        return result;\r\n    }\r\n\r\n    void get_state_impl(std::ostream& ss)\r\n    {\r\n        ss << (unsigned)stree_.size() << \" \";\r\n        for (size_t i = 0; i != stree_.size(); ++i)\r\n        {\r\n            stree_node& n = stree_[i];\r\n            ss << n.count_ << \" \";\r\n            ss << n.index_ << \" \";\r\n            ss << static_cast<unsigned>(n.type_) << \" \";\r\n        }\r\n    }\r\n\r\n    void set_state_impl(std::istream& ss)\r\n    {\r\n        size_t size = 0;\r\n        ss >> size;\r\n        for (size_t i = 0; i != size; ++i)\r\n        {\r\n            stree_node n = {};\r\n            ss >> n.count_;\r\n            ss >> n.index_;\r\n            unsigned type = 0;\r\n            ss >> type;\r\n            n.type_ = static_cast<sched_type>(type);\r\n            stree_.push_back(n);\r\n        }\r\n    }\r\n\r\n    void on_thread_block(thread_id_t th, bool yield)\r\n    {\r\n        //!!! doubled in schedule_impl()\r\n        thread_info_t& t = this->threads_[th];\r\n        if (t.total_yield_priority_)\r\n        {\r\n            for (thread_id_t i = 0; i != thread_count; ++i)\r\n            {\r\n                if (t.yield_priority_[i])\r\n                {\r\n                    t.total_yield_priority_ -= t.yield_priority_[i];\r\n                    t.yield_priority_[i] = 0;\r\n                    this->unblock_thread(i);\r\n                }\r\n            }\r\n        }\r\n\r\n        (void)yield;\r\n        //if (yield)\r\n        //    yield_priority(1);\r\n    }\r\n\r\nprotected:\r\n    struct stree_node\r\n    {\r\n        unsigned    count_;\r\n        unsigned    index_;\r\n        sched_type  type_;\r\n        unsigned    pad_;\r\n    };\r\n\r\n    typedef typename vector<stree_node>::type stree_t;\r\n    stree_t         stree_;\r\n    size_t          stree_depth_;\r\n\r\nprivate:\r\n    double          iteration_count_mean_;\r\n    unsigned        iteration_count_probe_count_;\r\n\r\n    derived_t& self()\r\n    {\r\n        return *static_cast<derived_t*>(this);\r\n    }\r\n\r\n    RL_NOCOPY(tree_search_scheduler);\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass full_search_scheduler\r\n    : public tree_search_scheduler<full_search_scheduler<thread_count>\r\n        , tree_search_scheduler_thread_info<thread_count>, thread_count>\r\n{\r\npublic:\r\n    typedef tree_search_scheduler<full_search_scheduler<thread_count>\r\n        , tree_search_scheduler_thread_info<thread_count>, thread_count> base_t;\r\n    typedef typename base_t::thread_info_t thread_info_t;\r\n    typedef typename base_t::shared_context_t shared_context_t;\r\n\r\n    full_search_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count)\r\n        : base_t(params, ctx, dynamic_thread_count)\r\n    {\r\n    }\r\n\r\n    bool can_switch(thread_info_t& /*t*/)\r\n    {\r\n        return true;\r\n    }\r\n\r\n    void on_switch(thread_info_t& /*t*/)\r\n    {\r\n    }\r\n\r\n    double iteration_count_approx()\r\n    {\r\n        double total = 1;\r\n        size_t const size = this->stree_.size();\r\n        for (size_t i = 0; i != size; ++i)\r\n        {\r\n            total *= this->stree_[i].count_;\r\n        }\r\n        return total;\r\n    }\r\n\r\n    RL_NOCOPY(full_search_scheduler);\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/history.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_HISTORY_HPP\r\n#define RL_HISTORY_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntypedef void (*event_output_f)(std::ostream& s, void const* ev);\r\ntypedef void (*event_dtor_f)(void* ev);\r\n\r\nstruct history_entry\r\n{\r\n    thread_id_t thread_index_;\r\n    debug_info info_;\r\n    void* ev_;\r\n    event_output_f output_;\r\n    event_dtor_f dtor_;\r\n\r\n    history_entry(thread_id_t thread_index, debug_info_param info, void* ev, event_output_f output, event_dtor_f dtor)\r\n        : thread_index_(thread_index)\r\n        , info_(info)\r\n        , ev_(ev)\r\n        , output_(output)\r\n        , dtor_(dtor)\r\n    {\r\n    }\r\n};\r\n\r\ntemplate<typename T>\r\nvoid event_output(std::ostream& s, void const* ev)\r\n{\r\n    static_cast<T const*>(ev)->output(s);\r\n}\r\n\r\ntemplate<typename T>\r\nvoid event_dtor(void* ev)\r\n{\r\n    delete static_cast<T*>(ev);\r\n}\r\n\r\n\r\nstruct user_event\r\n{\r\n    char const* desc_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << desc_;\r\n    }\r\n};\r\n\r\ninline string strip_path(char const* filename)\r\n{\r\n    char const* slash = strrchr(filename, '\\\\');\r\n    if (slash)\r\n        return slash + 1;\r\n    else\r\n        return filename;\r\n}\r\n\r\ninline std::ostream& operator << (std::ostream& ss, debug_info_param info)\r\n{\r\n    /*\r\n    char const* func = info;\r\n    char const* file = info + strlen(info) + 1;\r\n    char const* line = file + strlen(file) + 1;\r\n    */\r\n\r\n#ifdef RL_MSVC_OUTPUT\r\n    ss << info.file_ << \"(\" << info.line_ << \") : \";\r\n#else\r\n    ss << info.func_ << \", \" << strip_path(info.file_) << \"(\" << info.line_ << \")\";\r\n#endif\r\n    return ss;\r\n}\r\n\r\n\r\n\r\nclass history_mgr : nocopy<>\r\n{\r\npublic:\r\n    history_mgr(std::ostream& stream, thread_id_t thread_count)\r\n        : thread_count_(thread_count)\r\n        , out_stream_(stream)\r\n    {\r\n    }\r\n\r\n    ~history_mgr()\r\n    {\r\n        clear();\r\n    }\r\n\r\n    template<typename event_t>\r\n    void exec_log(thread_id_t th, debug_info_param info, event_t const& ev, bool output_history)\r\n    {\r\n        exec_history_.push_back(history_entry(th, info, new event_t(ev), &event_output<event_t>, &event_dtor<event_t>));\r\n        if (output_history)\r\n        {\r\n            output(exec_history_.size() - 1);\r\n        }\r\n    }\r\n\r\n    void print_exec_history(bool output_history)\r\n    {\r\n        size_t const buf_size = 4096;\r\n        char buf [buf_size + 1];\r\n\r\n        size_t const count = exec_history_.size();\r\n        if (false == output_history)\r\n        {\r\n            sprintf(buf, \"execution history (%u):\\n\", (unsigned)count);\r\n            out_stream_ << buf;\r\n#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT)\r\n            OutputDebugStringA(buf);\r\n#endif\r\n\r\n            for (size_t i = 0; i != count; ++i)\r\n            {\r\n                output(i);\r\n            }\r\n        }\r\n        out_stream_ << \"\\n\";\r\n#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT)\r\n        OutputDebugStringA(\"\\n\");\r\n#endif\r\n\r\n        for (thread_id_t th = 0; th != thread_count_; ++th)\r\n        {\r\n            sprintf(buf, \"thread %u:\\n\", th);\r\n            out_stream_ << buf;\r\n#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT)\r\n            OutputDebugStringA(buf);\r\n#endif\r\n            for (size_t i = 0; i != count; ++i)\r\n            {\r\n                if (exec_history_[i].thread_index_ == th)\r\n                {\r\n                    output(i);\r\n                }\r\n            }\r\n            out_stream_ << \"\\n\";\r\n#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT)\r\n            OutputDebugStringA(\"\\n\");\r\n#endif\r\n        }\r\n    }\r\n\r\n    void clear()\r\n    {\r\n        for (size_t i = 0; i != exec_history_.size(); ++i)\r\n        {\r\n            history_entry const& ent = exec_history_[i];\r\n            ent.dtor_(ent.ev_);\r\n        }\r\n        exec_history_.clear();\r\n    }\r\n\r\nprivate:\r\n    vector<history_entry>::type exec_history_;\r\n    thread_id_t                 thread_count_;\r\n    std::ostream&               out_stream_;\r\n\r\n    void output(size_t i)\r\n    {\r\n        std::basic_ostringstream<char, std::char_traits<char>, raw_allocator<char> > stream;\r\n\r\n        history_entry const& ent = exec_history_[i];\r\n#ifdef RL_MSVC_OUTPUT\r\n        {\r\n            stream << ent.info_ << \"[\" << i << \"] \" << ent.thread_index_ << \": \";\r\n            ent.output_(stream, ent.ev_);\r\n            stream << std::endl;\r\n        }\r\n#else\r\n        stream << \"[\" << (unsigned)i << \"] \" << ent.thread_index_ << \": \";\r\n        ent.output_(stream, ent.ev_);\r\n        stream << \", in \" << ent.info_ << std::endl;\r\n#endif\r\n\r\n        out_stream_ << stream.str();\r\n#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT)\r\n        OutputDebugStringA(stream.str().c_str());\r\n#endif\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/java.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_JAVA_HPP\r\n#define RL_JAVA_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n/*\r\n\r\nHierarchy For Package java.util.concurrent.locks \r\n\r\nClass Hierarchy\r\n\r\n    * java.lang.Object\r\n          o java.util.concurrent.locks.AbstractQueuedSynchronizer (implements java.io.Serializable)\r\n          o java.util.concurrent.locks.AbstractQueuedSynchronizer.ConditionObject (implements java.util.concurrent.locks.Condition, java.io.Serializable)\r\n          o java.util.concurrent.locks.LockSupport\r\n          o java.util.concurrent.locks.ReentrantLock (implements java.util.concurrent.locks.Lock, java.io.Serializable)\r\n          o java.util.concurrent.locks.ReentrantReadWriteLock (implements java.util.concurrent.locks.ReadWriteLock, java.io.Serializable)\r\n          o java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock (implements java.util.concurrent.locks.Lock, java.io.Serializable)\r\n          o java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock (implements java.util.concurrent.locks.Lock, java.io.Serializable) \r\n\r\nInterface Hierarchy\r\n\r\n    * java.util.concurrent.locks.Condition\r\n    * java.util.concurrent.locks.Lock\r\n    * java.util.concurrent.locks.ReadWriteLock\r\n*/\r\n\r\n\r\n\r\n\r\n\r\n/*\r\n\r\njava.util.concurrent.Semaphore\r\n\r\n\r\n\r\nPublic Constructors\r\npublic Semaphore(int permits)\r\nCreates a Semaphore with the given number of permits and nonfair fairness setting.\r\nParameters\r\npermits     the initial number of permits available. This value may be negative, in which case releases must occur before any acquires will be granted.\r\npublic Semaphore(int permits, boolean fair)\r\nCreates a Semaphore with the given number of permits and the given fairness setting.\r\nParameters\r\npermits     the initial number of permits available. This value may be negative, in which case releases must occur before any acquires will be granted.\r\nfair     true if this semaphore will guarantee first-in first-out granting of permits under contention, else false.\r\nPublic Methods\r\npublic void acquire()\r\nAcquires a permit from this semaphore, blocking until one is available, or the thread is interrupted.\r\n\r\nAcquires a permit, if one is available and returns immediately, reducing the number of available permits by one.\r\n\r\nIf no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of two things happens:\r\n\r\n    * Some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit; or\r\n    * Some other thread interrupts the current thread. \r\n\r\nIf the current thread:\r\n\r\n    * has its interrupted status set on entry to this method; or\r\n    * is interrupted while waiting for a permit, \r\n\r\nthen InterruptedException is thrown and the current thread's interrupted status is cleared.\r\nThrows\r\nInterruptedException     if the current thread is interrupted\r\nSee Also\r\n\r\n    * interrupt()\r\n\r\npublic void acquire(int permits)\r\nAcquires the given number of permits from this semaphore, blocking until all are available, or the thread is interrupted.\r\n\r\nAcquires the given number of permits, if they are available, and returns immediately, reducing the number of available permits by the given amount.\r\n\r\nIf insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of two things happens:\r\n\r\n    * Some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request; or\r\n    * Some other thread interrupts the current thread. \r\n\r\nIf the current thread:\r\n\r\n    * has its interrupted status set on entry to this method; or\r\n    * is interrupted while waiting for a permit, \r\n\r\nthen InterruptedException is thrown and the current thread's interrupted status is cleared. Any permits that were to be assigned to this thread are instead assigned to the next waiting thread(s), as if they had been made available by a call to release().\r\nParameters\r\npermits     the number of permits to acquire\r\nThrows\r\nInterruptedException     if the current thread is interrupted\r\nIllegalArgumentException     if permits less than zero.\r\nSee Also\r\n\r\n    * interrupt()\r\n\r\npublic void acquireUninterruptibly(int permits)\r\nAcquires the given number of permits from this semaphore, blocking until all are available.\r\n\r\nAcquires the given number of permits, if they are available, and returns immediately, reducing the number of available permits by the given amount.\r\n\r\nIf insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request.\r\n\r\nIf the current thread is interrupted while waiting for permits then it will continue to wait and its position in the queue is not affected. When the thread does return from this method its interrupt status will be set.\r\nParameters\r\npermits     the number of permits to acquire\r\nThrows\r\nIllegalArgumentException     if permits less than zero.\r\npublic void acquireUninterruptibly()\r\nAcquires a permit from this semaphore, blocking until one is available.\r\n\r\nAcquires a permit, if one is available and returns immediately, reducing the number of available permits by one.\r\n\r\nIf no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit.\r\n\r\nIf the current thread is interrupted while waiting for a permit then it will continue to wait, but the time at which the thread is assigned a permit may change compared to the time it would have received the permit had no interruption occurred. When the thread does return from this method its interrupt status will be set.\r\npublic int availablePermits()\r\nReturns the current number of permits available in this semaphore.\r\n\r\nThis method is typically used for debugging and testing purposes.\r\nReturns\r\n\r\n    * the number of permits available in this semaphore. \r\n\r\npublic int drainPermits()\r\nAcquire and return all permits that are immediately available.\r\nReturns\r\n\r\n    * the number of permits \r\n\r\npublic final int getQueueLength()\r\nReturns an estimate of the number of threads waiting to acquire. The value is only an estimate because the number of threads may change dynamically while this method traverses internal data structures. This method is designed for use in monitoring of the system state, not for synchronization control.\r\nReturns\r\n\r\n    * the estimated number of threads waiting for this lock \r\n\r\npublic final boolean hasQueuedThreads()\r\nQueries whether any threads are waiting to acquire. Note that because cancellations may occur at any time, a true return does not guarantee that any other thread will ever acquire. This method is designed primarily for use in monitoring of the system state.\r\nReturns\r\n\r\n    * true if there may be other threads waiting to acquire the lock. \r\n\r\npublic boolean isFair()\r\nReturns true if this semaphore has fairness set true.\r\nReturns\r\n\r\n    * true if this semaphore has fairness set true. \r\n\r\npublic void release(int permits)\r\nReleases the given number of permits, returning them to the semaphore.\r\n\r\nReleases the given number of permits, increasing the number of available permits by that amount. If any threads are blocking trying to acquire permits, then the one that has been waiting the longest is selected and given the permits that were just released. If the number of available permits satisfies that thread's request then that thread is re-enabled for thread scheduling purposes; otherwise the thread continues to wait. If there are still permits available after the first thread's request has been satisfied, then those permits are assigned to the next waiting thread. If it is satisfied then it is re-enabled for thread scheduling purposes. This continues until there are insufficient permits to satisfy the next waiting thread, or there are no more waiting threads.\r\n\r\nThere is no requirement that a thread that releases a permit must have acquired that permit by calling acquire. Correct usage of a semaphore is established by programming convention in the application.\r\nParameters\r\npermits     the number of permits to release\r\nThrows\r\nIllegalArgumentException     if permits less than zero.\r\npublic void release()\r\nReleases a permit, returning it to the semaphore.\r\n\r\nReleases a permit, increasing the number of available permits by one. If any threads are blocking trying to acquire a permit, then one is selected and given the permit that was just released. That thread is re-enabled for thread scheduling purposes.\r\n\r\nThere is no requirement that a thread that releases a permit must have acquired that permit by calling acquire(). Correct usage of a semaphore is established by programming convention in the application.\r\npublic String toString()\r\nReturns a string identifying this semaphore, as well as its state. The state, in brackets, includes the String \"Permits =\" followed by the number of permits.\r\nReturns\r\n\r\n    * a string identifying this semaphore, as well as its state \r\n\r\npublic boolean tryAcquire(long timeout, TimeUnit unit)\r\nAcquires a permit from this semaphore, if one becomes available within the given waiting time and the current thread has not been interrupted.\r\n\r\nAcquires a permit, if one is available and returns immediately, with the value true, reducing the number of available permits by one.\r\n\r\nIf no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happens:\r\n\r\n    * Some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit; or\r\n    * Some other thread interrupts the current thread; or\r\n    * The specified waiting time elapses. \r\n\r\nIf a permit is acquired then the value true is returned.\r\n\r\nIf the current thread:\r\n\r\n    * has its interrupted status set on entry to this method; or\r\n    * is interrupted while waiting to acquire a permit, \r\n\r\nthen InterruptedException is thrown and the current thread's interrupted status is cleared.\r\n\r\nIf the specified waiting time elapses then the value false is returned. If the time is less than or equal to zero, the method will not wait at all.\r\nParameters\r\ntimeout     the maximum time to wait for a permit\r\nunit     the time unit of the timeout argument.\r\nReturns\r\n\r\n    * true if a permit was acquired and false if the waiting time elapsed before a permit was acquired.\r\n\r\nThrows\r\nInterruptedException     if the current thread is interrupted\r\nSee Also\r\n\r\n    * interrupt()\r\n\r\npublic boolean tryAcquire(int permits, long timeout, TimeUnit unit)\r\nAcquires the given number of permits from this semaphore, if all become available within the given waiting time and the current thread has not been interrupted.\r\n\r\nAcquires the given number of permits, if they are available and returns immediately, with the value true, reducing the number of available permits by the given amount.\r\n\r\nIf insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happens:\r\n\r\n    * Some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request; or\r\n    * Some other thread interrupts the current thread; or\r\n    * The specified waiting time elapses. \r\n\r\nIf the permits are acquired then the value true is returned.\r\n\r\nIf the current thread:\r\n\r\n    * has its interrupted status set on entry to this method; or\r\n    * is interrupted while waiting to acquire the permits, \r\n\r\nthen InterruptedException is thrown and the current thread's interrupted status is cleared. Any permits that were to be assigned to this thread, are instead assigned to the next waiting thread(s), as if they had been made available by a call to release().\r\n\r\nIf the specified waiting time elapses then the value false is returned. If the time is less than or equal to zero, the method will not wait at all. Any permits that were to be assigned to this thread, are instead assigned to the next waiting thread(s), as if they had been made available by a call to release().\r\nParameters\r\npermits     the number of permits to acquire\r\ntimeout     the maximum time to wait for the permits\r\nunit     the time unit of the timeout argument.\r\nReturns\r\n\r\n    * true if all permits were acquired and false if the waiting time elapsed before all permits were acquired.\r\n\r\nThrows\r\nInterruptedException     if the current thread is interrupted\r\nIllegalArgumentException     if permits less than zero.\r\nSee Also\r\n\r\n    * interrupt()\r\n\r\npublic boolean tryAcquire(int permits)\r\nAcquires the given number of permits from this semaphore, only if all are available at the time of invocation.\r\n\r\nAcquires the given number of permits, if they are available, and returns immediately, with the value true, reducing the number of available permits by the given amount.\r\n\r\nIf insufficient permits are available then this method will return immediately with the value false and the number of available permits is unchanged.\r\n\r\nEven when this semaphore has been set to use a fair ordering policy, a call to tryAcquire will immediately acquire a permit if one is available, whether or not other threads are currently waiting. This \"barging\" behavior can be useful in certain circumstances, even though it breaks fairness. If you want to honor the fairness setting, then use tryAcquire(permits, 0, TimeUnit.SECONDS) which is almost equivalent (it also detects interruption).\r\nParameters\r\npermits     the number of permits to acquire\r\nReturns\r\n\r\n    * true if the permits were acquired and false otherwise.\r\n\r\nThrows\r\nIllegalArgumentException     if permits less than zero.\r\npublic boolean tryAcquire()\r\nAcquires a permit from this semaphore, only if one is available at the time of invocation.\r\n\r\nAcquires a permit, if one is available and returns immediately, with the value true, reducing the number of available permits by one.\r\n\r\nIf no permit is available then this method will return immediately with the value false.\r\n\r\nEven when this semaphore has been set to use a fair ordering policy, a call to tryAcquire() will immediately acquire a permit if one is available, whether or not other threads are currently waiting. This \"barging\" behavior can be useful in certain circumstances, even though it breaks fairness. If you want to honor the fairness setting, then use tryAcquire(0, TimeUnit.SECONDS) which is almost equivalent (it also detects interruption).\r\nReturns\r\n\r\n    * true if a permit was acquired and false otherwise. \r\n\r\nProtected Methods\r\nprotected Collection<Thread> getQueuedThreads()\r\nReturns a collection containing threads that may be waiting to acquire. Because the actual set of threads may change dynamically while constructing this result, the returned collection is only a best-effort estimate. The elements of the returned collection are in no particular order. This method is designed to facilitate construction of subclasses that provide more extensive monitoring facilities.\r\nReturns\r\n\r\n    * the collection of threads \r\n\r\nprotected void reducePermits(int reduction)\r\nShrinks the number of available permits by the indicated reduction. This method can be useful in subclasses that use semaphores to track resources that become unavailable. This method differs from acquire in that it does not block waiting for permits to become available.\r\nParameters\r\nreduction     the number of permits to remove\r\nThrows\r\nIllegalArgumentException     if reduction is negative \r\n*/\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/java_atomic.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_JAVA_ATOMIC_HPP\r\n#define RL_JAVA_ATOMIC_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<typename T> class jatomic;\r\n\r\n\r\ntemplate<typename T>\r\nclass jatomic_proxy\r\n{\r\npublic:\r\n    T get() const\r\n    {\r\n        return var_.load(mo_seq_cst, info_);\r\n    }\r\n\r\n    void set(T value)\r\n    {\r\n        var_.store(value, mo_seq_cst, info_);\r\n    }\r\n\r\n    T addAndGet(T delta)\r\n    {\r\n        return getAndAdd(delta) + delta;\r\n    }\r\n\r\n    bool compareAndSet(T expect, T update)\r\n    {\r\n        bool result = var_.compare_exchange(bool_t<false>(), expect, update, mo_seq_cst, info_);\r\n        return result;\r\n    }\r\n\r\n    bool weakCompareAndSet(T expect, T update)\r\n    {\r\n        bool result = var_.compare_exchange(bool_t<true>(), expect, update, mo_seq_cst, info_);\r\n        return result;\r\n    }\r\n\r\n    T decrementAndGet()\r\n    {\r\n        return getAndAdd(-1) - 1;\r\n    }\r\n\r\n    T getAndAdd(T delta)\r\n    {\r\n        T result = var_.rmw(rmw_type_t<rmw_type_add>(), delta, mo_seq_cst, info_);\r\n        return result;\r\n    }\r\n\r\n    T getAndDecrement()\r\n    {\r\n        return getAndAdd(-1);\r\n    }\r\n\r\n    T getAndIncrement()\r\n    {\r\n        return getAndAdd(+1);\r\n    }\r\n\r\n    T getAndSet(T newValue)\r\n    {\r\n        T result = var_.rmw(rmw_type_t<rmw_type_swap>(), newValue, mo_seq_cst, info_);\r\n        return result;\r\n    }\r\n\r\n    T incrementAndGet()\r\n    {\r\n        return getAndAdd(1) + 1;\r\n    }\r\n\r\nprivate:\r\n    jatomic<T>& var_;\r\n    debug_info info_;\r\n\r\n    //typedef typename atomic_add_type<T>::type add_type;\r\n    template<typename Y> friend class jatomic;\r\n\r\n    jatomic_proxy(jatomic<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    jatomic_proxy& operator = (jatomic_proxy const&);\r\n};\r\n\r\n\r\ntemplate<typename T>\r\nclass jatomic : generic_atomic<T, true>\r\n{\r\npublic:\r\n    typedef jatomic_proxy<T> proxy_t;\r\n    friend class jatomic_proxy<T>;\r\n\r\n    jatomic()\r\n    {\r\n    }\r\n\r\n    jatomic(T value)\r\n    {\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, $);\r\n    }\r\n\r\n    jatomic(jatomic const& r)\r\n    {\r\n        T const value = r.load(mo_seq_cst, $);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, $);\r\n    }\r\n\r\n    jatomic(proxy_t const& r)\r\n    {\r\n        T const value = r.var_.load(mo_seq_cst, r.info_);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, r.info_);\r\n    }\r\n\r\n    proxy_t operator () (debug_info_param info)\r\n    {\r\n        return proxy_t(*this, info);\r\n    }\r\n};\r\n\r\n\r\ntypedef jatomic<int> AtomicInteger;\r\ntypedef jatomic<long> AtomicLong;\r\n\r\n\r\n\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/java_var.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_JAVA_VAR_HPP\r\n#define RL_JAVA_VAR_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T> class jvar;\r\n\r\n\r\ntemplate<typename T>\r\nclass jvar_proxy\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n    template<typename Y> friend class jvar;\r\n\r\n    operator T () const\r\n    {\r\n        return load();\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator = (jvar_proxy const& r)\r\n    {\r\n        T const value = r.load();\r\n        store(value);\r\n        return *this;\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp + 1;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp - 1;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + value);\r\n        return tmp + value;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - value);\r\n        return tmp - value;\r\n    }\r\n\r\nprivate:\r\n    jvar<T>& var_;\r\n    debug_info info_;\r\n\r\n    jvar_proxy(jvar<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load() const\r\n    {\r\n        return var_.load(mo_relaxed, info_);\r\n    }\r\n\r\n    void store(T value)\r\n    {\r\n        var_.store(value, mo_relaxed, info_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass jvar : generic_atomic<T, true>\r\n{\r\npublic:\r\n    typedef jvar_proxy<T> proxy_t;\r\n    friend class jvar_proxy<T>;\r\n\r\n    jvar()\r\n    {\r\n    }\r\n\r\n    jvar(T value)\r\n    {\r\n        this->store(value, mo_relaxed, $);\r\n    }\r\n\r\n    jvar(jvar const& r)\r\n    {\r\n        T const value = r.load(mo_relaxed, $);\r\n        this->store(value, mo_relaxed, $);\r\n    }\r\n\r\n    jvar(proxy_t const& r)\r\n    {\r\n        T const value = r.load();\r\n        this->store(value, mo_relaxed, r.info_);\r\n    }\r\n\r\n    proxy_t operator () (debug_info_param info)\r\n    {\r\n        return proxy_t(*this, info);\r\n    }\r\n\r\nprivate:\r\n    jvar& operator = (jvar const&);\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/java_volatile.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_JAVA_VOLATILE_HPP\r\n#define RL_JAVA_VOLATILE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"atomic.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T> class jvolatile;\r\n\r\n\r\ntemplate<typename T>\r\nclass jvolatile_proxy\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n    template<typename Y> friend class jvolatile;\r\n\r\n    operator T () const\r\n    {\r\n        return load();\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator = (jvolatile_proxy const& r)\r\n    {\r\n        T const value = r.load();\r\n        store(value);\r\n        return *this;\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp;\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp + 1);\r\n        return tmp + 1;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        T tmp = load();\r\n        store(tmp - 1);\r\n        return tmp - 1;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp + value);\r\n        return tmp + value;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        T tmp = load();\r\n        store(tmp - value);\r\n        return tmp - value;\r\n    }\r\n\r\nprivate:\r\n    jvolatile<T>& var_;\r\n    debug_info info_;\r\n\r\n    jvolatile_proxy(jvolatile<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load() const\r\n    {\r\n        return var_.load(mo_seq_cst, info_);\r\n    }\r\n\r\n    void store(T value)\r\n    {\r\n        var_.store(value, mo_seq_cst, info_);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass jvolatile : generic_atomic<T, true>\r\n{\r\npublic:\r\n    typedef jvolatile_proxy<T> proxy_t;\r\n    friend class jvolatile_proxy<T>;\r\n\r\n    jvolatile()\r\n    {\r\n    }\r\n\r\n    explicit jvolatile(T value)\r\n    {\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, $);\r\n    }\r\n\r\n    jvolatile(jvolatile const& r)\r\n    {\r\n        T const value = r.load(mo_seq_cst, $);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, $);\r\n    }\r\n\r\n    jvolatile(proxy_t const& r)\r\n    {\r\n        T const value = r.var_.load(mo_seq_cst, r.info_);\r\n        //??? whether here must be mo_relaxed or mo_release?\r\n        this->store(value, mo_seq_cst, r.info_);\r\n    }\r\n\r\n    proxy_t operator () (debug_info_param info)\r\n    {\r\n        return proxy_t(*this, info);\r\n    }\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/memory.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_MEMORY_HPP\r\n#define RL_MEMORY_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nclass memory_mgr : nocopy<>\r\n{\r\npublic:\r\n    memory_mgr()\r\n    {\r\n        memset(deferred_free_, 0, sizeof(deferred_free_));\r\n        memset(deferred_free_size_, 0, sizeof(deferred_free_size_));\r\n        deferred_index_ = 0;\r\n    }\r\n\r\n    ~memory_mgr()\r\n    {\r\n        /*\r\n        while (allocs_.size())\r\n        {\r\n            size_t* p = (size_t*)(allocs_.begin()->first);\r\n            free(p - 1, false);\r\n            allocs_.erase(allocs_.begin());\r\n        }\r\n        */\r\n    }\r\n\r\n#ifndef RL_GC\r\n    void* alloc(size_t size)\r\n#else\r\n    void* alloc(size_t size, void (*dtor)(void*))\r\n#endif\r\n    {\r\n        void* pp = 0;\r\n        for (size_t i = 0; i != alloc_cache_.size(); ++i)\r\n        {\r\n            if (alloc_cache_[i].first == size)\r\n            {\r\n                if (alloc_cache_[i].second.size())\r\n                {\r\n                    pp = alloc_cache_[i].second.top();\r\n                    alloc_cache_[i].second.pop();\r\n                }\r\n                break;\r\n            }\r\n        }\r\n        if (0 == pp)\r\n            pp = (::malloc)(size + alignment);\r\n\r\n        if (pp)\r\n        {\r\n            RL_VERIFY(alignment >= sizeof(void*));\r\n            *(size_t*)pp = size;\r\n            void* p = (char*)pp + alignment;\r\n#ifndef RL_GC\r\n            allocs_.insert(std::make_pair(p, size));\r\n#else\r\n            alloc_desc_t desc = {p, size, dtor};\r\n            gc_allocs_.push_back(desc);\r\n#endif\r\n            return p;\r\n        }\r\n        else\r\n        {\r\n            throw std::bad_alloc();\r\n        }\r\n    }\r\n\r\n    bool free(void* pp, bool defer)\r\n    {\r\n        if (0 == pp)\r\n            return true;\r\n\r\n#ifndef RL_GC\r\n        map<void*, size_t>::type::iterator iter = allocs_.find(pp);\r\n        if (allocs_.end() == iter)\r\n            return false;\r\n\r\n        allocs_.erase(iter);\r\n\r\n        void* p = (char*)pp - alignment;\r\n        size_t size = *(size_t*)p;\r\n\r\n        if (defer)\r\n        {\r\n            deferred_free_[deferred_index_ % deferred_count] = p;\r\n            deferred_free_size_[deferred_index_ % deferred_count] = size;\r\n            deferred_index_ += 1;\r\n            p = deferred_free_[deferred_index_ % deferred_count];\r\n            size = deferred_free_size_[deferred_index_ % deferred_count];\r\n            if (p)\r\n                rl_free_impl(p, size);\r\n        }\r\n        else\r\n        {\r\n            rl_free_impl(p, size);\r\n        }\r\n        return true;\r\n#else\r\n        (void)defer;\r\n        for (size_t i = 0; i != gc_allocs_.size(); ++i)\r\n        {\r\n            alloc_desc_t const& desc = gc_allocs_[i];\r\n            if (desc.addr == pp)\r\n            {\r\n                void* p = (char*)desc.addr - alignment;\r\n                rl_free_impl(p, desc.size);\r\n                gc_allocs_.erase(gc_allocs_.begin() + i);\r\n                return true;\r\n            }\r\n        }\r\n        return false;\r\n#endif\r\n    }\r\n\r\n    bool iteration_end()\r\n    {\r\n#ifndef RL_GC\r\n        return allocs_.empty();\r\n#else\r\n        for (size_t i = 0; i != gc_allocs_.size(); ++i)\r\n        {\r\n            alloc_desc_t const& desc = gc_allocs_[i];\r\n            if (desc.dtor)\r\n                desc.dtor(desc.addr);\r\n            void* p = (char*)desc.addr - alignment;\r\n            rl_free_impl(p, desc.size);\r\n        }\r\n        gc_allocs_.clear();\r\n        return true;\r\n#endif\r\n    }\r\n\r\n#ifndef RL_GC\r\n    void output_allocs(std::ostream& stream)\r\n    {\r\n        stream << \"memory allocations:\" << std::endl;\r\n        map<void*, size_t>::type::iterator iter = allocs_.begin();\r\n        map<void*, size_t>::type::iterator end = allocs_.end();\r\n        for (; iter != end; ++iter)\r\n        {\r\n            stream << iter->first << \" [\" << (unsigned)iter->second << \"]\" << std::endl;\r\n        }\r\n        stream << std::endl;\r\n    }\r\n#endif\r\n\r\nprivate:\r\n    typedef stack<void*>::type              freelist_t;\r\n    typedef std::pair<size_t, freelist_t>   alloc_entry_t;\r\n    typedef vector<alloc_entry_t>::type     alloc_t;\r\n\r\n    static size_t const deferred_count      = 64;\r\n\r\n    alloc_t alloc_cache_;\r\n    size_t deferred_index_;\r\n    void* deferred_free_ [deferred_count];\r\n    size_t deferred_free_size_ [deferred_count];\r\n\r\n#ifndef RL_GC\r\n    map<void*, size_t>::type allocs_;\r\n#else\r\n    struct alloc_desc_t\r\n    {\r\n        void*       addr;\r\n        size_t      size;\r\n        void        (*dtor)(void*);\r\n    };\r\n    vector<alloc_desc_t>::type gc_allocs_;\r\n#endif\r\n\r\n    void rl_free_impl(void* p, size_t size)\r\n    {\r\n        bool found = false;\r\n        for (size_t i = 0; i != alloc_cache_.size(); ++i)\r\n        {\r\n            if (alloc_cache_[i].first == size)\r\n            {\r\n                found = true;\r\n                alloc_cache_[i].second.push(p);\r\n                break;\r\n            }\r\n        }\r\n        if (!found)\r\n        {\r\n            alloc_cache_.push_back(std::make_pair(size, freelist_t()));\r\n            alloc_cache_.back().second.push(p);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct memory_alloc_event\r\n{\r\n    void*                       addr_;\r\n    size_t                      size_;\r\n    bool                        is_array_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"memory allocation: addr=\" << std::hex << (void*)((char*)addr_ + (is_array_ ? alignment : 0)) << std::dec\r\n            << \", size=\" << (unsigned)size_;\r\n    }\r\n};\r\n\r\n\r\nstruct memory_free_event\r\n{\r\n    void*                       addr_;\r\n    bool                        is_array_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"memory deallocation: addr=\" << std::hex << (void*)((char*)addr_ + (is_array_ ? alignment : 0)) << std::dec;\r\n    }\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/memory_order.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_MEMORY_ORDER_HPP\r\n#define RL_MEMORY_ORDER_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nenum memory_order\r\n{\r\n    mo_relaxed,\r\n    mo_consume,\r\n    mo_acquire,\r\n    mo_release,\r\n    mo_acq_rel,\r\n    mo_seq_cst,\r\n};\r\n\r\n\r\n\r\n\r\ninline char const* format(memory_order mo)\r\n{\r\n    switch (mo)\r\n    {\r\n    case mo_relaxed: return \"relaxed\";\r\n    case mo_consume: return \"consume\";\r\n    case mo_acquire: return \"acquire\";\r\n    case mo_release: return \"release\";\r\n    case mo_acq_rel: return \"acq_rel\";\r\n    case mo_seq_cst: return \"seq_cst\";\r\n    }\r\n    RL_VERIFY(!\"invalid value of memory order\");\r\n    throw std::logic_error(\"invalid value of memory order\");\r\n}\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/pch.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_PCH_HPP\r\n#define RL_PCH_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#ifndef _CRT_SECURE_NO_WARNINGS\r\n#   define _CRT_SECURE_NO_WARNINGS 1\r\n#endif\r\n\r\n#ifdef _FORTIFY_SOURCE\r\n#    undef _FORTIFY_SOURCE\r\n#endif\r\n\r\n#ifndef _XOPEN_SOURCE\r\n#    define _XOPEN_SOURCE\r\n#endif\r\n\r\n#include <stdlib.h>\r\n#include <stdio.h>\r\n#include <stddef.h>\r\n#include <limits.h>\r\n#include <memory.h>\r\n#include <string.h>\r\n\r\n#include <typeinfo>\r\n#include <iostream>\r\n#include <sstream>\r\n#include <algorithm>\r\n#include <stdexcept>\r\n#include <utility>\r\n#include <iterator>\r\n#include <memory>\r\n#include <vector>\r\n#include <queue>\r\n#include <string>\r\n#include <stack>\r\n#include <set>\r\n#include <map>\r\n#include <new>\r\n\r\n#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)\r\n#   define RL_WIN\r\n#endif\r\n\r\n#if defined(RL_WIN) || defined(_CYGWIN)\r\n#   ifndef _WIN32_WINNT\r\n#       define _WIN32_WINNT 0x0500\r\n#   endif\r\n#   define WIN32_LEAN_AND_MEAN\r\n#   include <windows.h>\r\n#   include <process.h>\r\n#   ifdef RL_WIN\r\n#       include <intrin.h>\r\n#   else\r\n#       include <stdint.h>\r\n#       include <sys/times.h>\r\n#   endif\r\n#else\r\n#   include <stdint.h>\r\n#   include <sys/times.h>\r\n#   include <unistd.h>\r\n#   include <ucontext.h>\r\n#   include <setjmp.h>\r\n#endif\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/platform.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_PLATFORM_HPP\r\n#define RL_PLATFORM_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"pch.hpp\"\r\n\r\n\r\n#if defined(RL_WIN) || defined(_CYGWIN)\r\n\r\ntypedef void* fiber_t;\r\n\r\ninline unsigned get_tick_count()\r\n{\r\n    return GetTickCount();\r\n}\r\n\r\ninline void set_low_thread_prio()\r\n{\r\n    SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);\r\n}\r\n\r\ninline void create_main_fiber(fiber_t& fib)\r\n{\r\n    fib = ConvertThreadToFiber(0);\r\n    if (0 == fib)\r\n    {\r\n        unsigned long err = ::GetLastError(); (void)err;\r\n        throw std::logic_error(\"you must start simulation inside a thread (not a fiber)\");\r\n    }\r\n}\r\n\r\ninline void delete_main_fiber(fiber_t& fib)\r\n{\r\n    (void)fib;\r\n    HMODULE lib = LoadLibraryW(L\"kernel32.dll\");\r\n    if (lib)\r\n    {\r\n        void* proc = (void*)GetProcAddress(lib, \"ConvertFiberToThread\");\r\n        if (proc)\r\n        {\r\n            typedef BOOL (WINAPI * ConvertFiberToThreadT)();\r\n            ConvertFiberToThreadT ConvertFiberToThread = (ConvertFiberToThreadT)proc;\r\n            ConvertFiberToThread();\r\n        }\r\n        FreeLibrary(lib);\r\n    }\r\n}\r\n\r\ninline void create_fiber(fiber_t& fib, void(*fiber_proc)(void*), void* ctx)\r\n{\r\n    size_t const stack_size = 64*1024;\r\n    fib = CreateFiberEx(4*1024, stack_size, 0, (LPFIBER_START_ROUTINE)fiber_proc, ctx);\r\n    if (fib == 0)\r\n        throw std::runtime_error(\"error creating fiber\");\r\n}\r\n\r\ninline void delete_fiber(fiber_t& fib)\r\n{\r\n    DeleteFiber(fib);\r\n}\r\n\r\ninline void switch_to_fiber(fiber_t fib, fiber_t)\r\n{\r\n    SwitchToFiber(fib);\r\n}\r\n\r\n// work-around for some versions of cygwin\r\nextern \"C\" inline int __gxx_personality_v0()\r\n{\r\n    return 0;\r\n}\r\n\r\n#ifdef RL_WIN\r\n#else\r\n\r\n/*\r\ninline unsigned get_tick_count()\r\n{\r\n    return GetTickCount();\r\n}\r\n\r\ntypedef void* fiber_t;\r\n\r\nstruct ucontext_t\r\n{\r\n    struct stack_t\r\n    {\r\n        void* ss_sp;\r\n        size_t ss_size;\r\n    };\r\n    stack_t uc_stack;\r\n    void* uc_link;\r\n\r\n};\r\nvoid getcontext(void*) {}\r\nvoid makecontext(void*, void(*)(), int, void*) {}\r\nvoid swapcontext(void*, void*) {}\r\n\r\n*/\r\n\r\n#endif\r\n\r\n#else\r\n\r\ninline unsigned get_tick_count()\r\n{\r\n    struct tms tms;\r\n    return ((unsigned)(times (&tms) * (1000 / sysconf(_SC_CLK_TCK))));\r\n}\r\n\r\ninline void set_low_thread_prio()\r\n{\r\n}\r\n\r\n#if 0\r\n\r\ntypedef ucontext_t fiber_t;\r\n\r\ninline void create_main_fiber(fiber_t& fib)\r\n{\r\n    ucontext_t f = {};\r\n    fib = f;\r\n}\r\n\r\ninline void delete_main_fiber(fiber_t& fib)\r\n{\r\n    (void)fib;\r\n}\r\n\r\ninline void create_fiber(fiber_t& fib, void(*fiber_proc)(void*), void* ctx)\r\n{\r\n    size_t const stack_size = 64*1024;\r\n    getcontext(&fib);\r\n    fib.uc_stack.ss_sp = (::malloc)(stack_size);\r\n    fib.uc_stack.ss_size = stack_size;\r\n    fib.uc_link = 0;\r\n    typedef void(*fn_t)();\r\n    fn_t fn = (fn_t)fiber_proc;\r\n    makecontext(&fib, fn, 1, ctx);\r\n}\r\n\r\ninline void delete_fiber(fiber_t& fib)\r\n{\r\n    //(::free)(fib.uc_stack.ss_sp);\r\n}\r\n\r\ninline void switch_to_fiber(fiber_t& fib, fiber_t& prev)\r\n{\r\n    swapcontext(&prev, &fib);\r\n}\r\n\r\n#else\r\n\r\nstruct fiber_t\r\n{\r\n    ucontext_t  fib;\r\n    jmp_buf     jmp;\r\n};\r\n\r\nstruct fiber_ctx_t\r\n{\r\n    void(*      fnc)(void*);\r\n    void*       ctx;\r\n    jmp_buf*    cur;\r\n    ucontext_t* prv;\r\n};\r\n\r\nstatic void fiber_start_fnc(void* p)\r\n{\r\n    fiber_ctx_t* ctx = (fiber_ctx_t*)p;\r\n    void (*volatile ufnc)(void*) = ctx->fnc;\r\n    void* volatile uctx = ctx->ctx;\r\n    if (_setjmp(*ctx->cur) == 0)\r\n    {\r\n        ucontext_t tmp;\r\n        swapcontext(&tmp, ctx->prv);\r\n    }\r\n    ufnc(uctx);\r\n}\r\n\r\ninline void create_main_fiber(fiber_t& fib)\r\n{\r\n    memset(&fib, 0, sizeof(fib));\r\n}\r\n\r\ninline void delete_main_fiber(fiber_t& fib)\r\n{\r\n    (void)fib;\r\n}\r\n\r\ninline void create_fiber(fiber_t& fib, void(*ufnc)(void*), void* uctx)\r\n{\r\n    size_t const stack_size = 64*1024;\r\n    getcontext(&fib.fib);\r\n    fib.fib.uc_stack.ss_sp = (::malloc)(stack_size);\r\n    fib.fib.uc_stack.ss_size = stack_size;\r\n    fib.fib.uc_link = 0;\r\n    ucontext_t tmp;\r\n    fiber_ctx_t ctx = {ufnc, uctx, &fib.jmp, &tmp};\r\n    makecontext(&fib.fib, (void(*)())fiber_start_fnc, 1, &ctx);\r\n    swapcontext(&tmp, &fib.fib);\r\n}\r\n\r\ninline void delete_fiber(fiber_t& fib)\r\n{\r\n    //(::free)(fib.uc_stack.ss_sp);\r\n}\r\n\r\ninline void switch_to_fiber(fiber_t& fib, fiber_t& prv)\r\n{\r\n    if (_setjmp(prv.jmp) == 0)\r\n        _longjmp(fib.jmp, 1);\r\n}\r\n\r\n#endif\r\n\r\n#endif\r\n\r\n\r\n\r\n#ifdef _MSC_VER\r\n    typedef unsigned __int64 uint64_t;\r\n#   define RL_INLINE __forceinline\r\n#   define RL_NOINLINE __declspec(noinline)\r\n#   define RL_STRINGIZE(text) RL_STRINGIZE_A((text))\r\n#   define RL_STRINGIZE_I(text) #text\r\n#   define RL_STRINGIZE_A(arg) RL_STRINGIZE_I arg\r\n#   define RL_STDCALL __stdcall\r\n#   define RL_THROW_SPEC(ex)\r\n#else\r\n#   define RL_INLINE inline\r\n#   define RL_NOINLINE\r\n#   define RL_STRINGIZE_I(text) #text\r\n#   define RL_STRINGIZE(text) RL_STRINGIZE_I(text)\r\n#   define RL_STDCALL\r\n#   define RL_THROW_SPEC(ex) throw(ex)\r\n#endif\r\n\r\n\r\n#if defined (_MSC_VER) && (_MSC_VER >= 1400)\r\n#   define RL_RESTRICT __restrict\r\n#else\r\n#   define RL_RESTRICT\r\n#endif\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/pthread.h",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_PTHREAD_IFACE_HPP\r\n#define RL_PTHREAD_IFACE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"relacy.hpp\"\r\n#include \"stdlib/pthread.hpp\"\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/random.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RANDOM_HPP\r\n#define RL_RANDOM_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nunsigned const primes[16] = {1, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53};\r\n\r\nstruct random_generator\r\n{\r\n    unsigned k;\r\n    unsigned c;\r\n    unsigned x;\r\n\r\n    void seed(iteration_t s)\r\n    {\r\n        k = ((unsigned)(s >> 32) & 0xf) + 8;\r\n        c = primes[((unsigned)(s >> 36) & 0xf)];\r\n        x = (unsigned)((s + 1) * 0x95949347 + c);\r\n    }\r\n\r\n    unsigned rand()\r\n    {\r\n        return ((x = x + c + (x << k)) >> 16);\r\n    }\r\n\r\n    template<typename T, T max>\r\n    RL_INLINE\r\n    T get()\r\n    {\r\n        return static_cast<T>(rand() % max);\r\n    }\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/random_scheduler.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RANDOM_SCHEDULER_HPP\r\n#define RL_RANDOM_SCHEDULER_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"scheduler.hpp\"\r\n#include \"random.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass random_scheduler : public scheduler<random_scheduler<thread_count>, scheduler_thread_info, thread_count>\r\n{\r\npublic:\r\n    typedef scheduler<random_scheduler<thread_count>, scheduler_thread_info, thread_count> base_t;\r\n    typedef typename base_t::thread_info_t thread_info_t;\r\n    typedef typename base_t::shared_context_t shared_context_t;\r\n\r\n    struct task_t\r\n    {\r\n    };\r\n\r\n    random_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count)\r\n        : base_t(params, ctx, dynamic_thread_count)\r\n    {\r\n    }\r\n\r\n    thread_id_t iteration_begin_impl()\r\n    {\r\n        rand_.seed(this->iter_);\r\n        unpark_reason reason;\r\n        return schedule_impl(reason, false);\r\n    }\r\n\r\n    bool iteration_end_impl()\r\n    {\r\n        return this->iter_ == this->params_.iteration_count;\r\n    }\r\n\r\n    thread_id_t schedule_impl(unpark_reason& reason, unsigned /*yield*/)\r\n    {\r\n        thread_id_t const running_thread_count = this->running_threads_count;\r\n\r\n        thread_id_t timed_thread_count = this->timed_thread_count_;\r\n        if (timed_thread_count)\r\n        {\r\n            thread_id_t cnt = running_thread_count ? timed_thread_count * 4 : timed_thread_count;\r\n            thread_id_t idx = rand_.rand() % cnt;\r\n            if (idx < timed_thread_count)\r\n            {\r\n                thread_info_t* thr = this->timed_threads_[idx];\r\n                thread_id_t th = thr->index_;\r\n                RL_VERIFY(1 == thr->block_count_);\r\n                this->unpark_thread(th);\r\n                RL_VERIFY(thr->state_ == thread_state_running);\r\n                reason = unpark_reason_timeout;\r\n                return th;\r\n            }\r\n        }\r\n\r\n        thread_id_t spurious_thread_count = this->spurious_thread_count_;\r\n        if (spurious_thread_count && running_thread_count)\r\n        {\r\n            thread_id_t cnt = spurious_thread_count * 8;\r\n            thread_id_t idx = rand_.rand() % cnt;\r\n            if (idx < spurious_thread_count)\r\n            {\r\n                thread_info_t* thr = this->spurious_threads_[idx];\r\n                thread_id_t th = thr->index_;\r\n                RL_VERIFY(1 == thr->block_count_);\r\n                this->unpark_thread(th);\r\n                RL_VERIFY(thr->state_ == thread_state_running);\r\n                reason = unpark_reason_spurious;\r\n                return th;\r\n            }\r\n        }\r\n\r\n        RL_VERIFY(running_thread_count);\r\n        unsigned index = rand_.rand() % running_thread_count;\r\n        thread_id_t th = this->running_threads[index];\r\n        reason = unpark_reason_normal;\r\n        return th;\r\n    }\r\n\r\n    unsigned rand_impl(unsigned limit, sched_type t)\r\n    {\r\n        (void)t;\r\n        unsigned r = rand_.rand() % limit;\r\n        ///!!!\r\n#ifdef RL_MY_TEST\r\n        if (this->iter_ == 8761115)\r\n        {\r\n            char buf [1024];\r\n            sprintf(buf, \"rand(%u, %u) = %u\\n\", t, limit, r);\r\n            OutputDebugStringA(buf);\r\n        }\r\n#endif\r\n        return r;\r\n    }\r\n\r\n    iteration_t iteration_count_impl()\r\n    {\r\n        return this->params_.iteration_count;\r\n    }\r\n\r\n    void get_state_impl(std::ostream& /*ss*/)\r\n    {\r\n    }\r\n\r\n    void set_state_impl(std::istream& /*ss*/)\r\n    {\r\n    }\r\n\r\n    void on_thread_block(thread_id_t /*th*/, bool /*yield*/)\r\n    {\r\n    }\r\n\r\nprivate:\r\n    random_generator rand_;\r\n\r\n    RL_NOCOPY(random_scheduler);\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/relacy.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RELACY_HPP\r\n#define RL_RELACY_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"base.hpp\"\r\n#include \"context.hpp\"\r\n#include \"context_base_impl.hpp\"\r\n#include \"backoff.hpp\"\r\n#include \"atomic_fence.hpp\"\r\n#include \"atomic.hpp\"\r\n#include \"var.hpp\"\r\n#include \"thread_local.hpp\"\r\n#include \"test_suite.hpp\"\r\n#include \"dyn_thread.hpp\"\r\n\r\n#include \"stdlib/mutex.hpp\"\r\n#include \"stdlib/condition_variable.hpp\"\r\n#include \"stdlib/semaphore.hpp\"\r\n#include \"stdlib/event.hpp\"\r\n\r\n#include \"stdlib/windows.hpp\"\r\n#include \"stdlib/pthread.hpp\"\r\n\r\n#define VAR_T(x) rl::var<x>\r\n#define TLS_T(T) rl::thread_local_var<T>\r\n#define VAR(x) x($)\r\n\r\n#ifndef RL_FORCE_SEQ_CST\r\n#define memory_order_relaxed mo_relaxed, $\r\n#define memory_order_consume mo_consume, $\r\n#define memory_order_acquire mo_acquire, $\r\n#define memory_order_release mo_release, $\r\n#define memory_order_acq_rel mo_acq_rel, $\r\n#define memory_order_seq_cst mo_seq_cst, $\r\n#else\r\n#define memory_order_relaxed mo_seq_cst, $\r\n#define memory_order_consume mo_seq_cst, $\r\n#define memory_order_acquire mo_seq_cst, $\r\n#define memory_order_release mo_seq_cst, $\r\n#define memory_order_acq_rel mo_seq_cst, $\r\n#define memory_order_seq_cst mo_seq_cst, $\r\n#endif\r\n\r\n#define new                 RL_NEW_PROXY\r\n#define delete              RL_DELETE_PROXY\r\n#define malloc(sz)          rl::rl_malloc((sz), $)\r\n#define calloc(sz, cnt)     rl::rl_calloc((sz), (cnt), $)\r\n#define realloc(p, sz)      rl::rl_realloc((p), (sz), $)\r\n#define free(p)             rl::rl_free((p), $)\r\n\r\n#ifdef assert\r\n#undef assert\r\n#endif\r\n#define assert              RL_ASSERT\r\n\r\n#ifdef errno\r\n#undef errno\r\n#endif\r\n#define errno               (rl::get_errno())\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/relacy_cli.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RELACY_CLI_HPP\r\n#define RL_RELACY_CLI_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#define RL_CLI_MODE\r\n\r\n#include \"relacy.hpp\"\r\n\r\n#include \"cli.hpp\"\r\n#include \"cli_interlocked.hpp\"\r\n#include \"cli_volatile.hpp\"\r\n#include \"cli_var.hpp\"\r\n\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/relacy_java.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RELACY_JAVA_HPP\r\n#define RL_RELACY_JAVA_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#define RL_JAVA_MODE\r\n\r\n#include \"relacy.hpp\"\r\n\r\n#include \"java.hpp\"\r\n#include \"java_atomic.hpp\"\r\n#include \"java_volatile.hpp\"\r\n#include \"java_var.hpp\"\r\n\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/relacy_std.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RELACY_STD_HPP\r\n#define RL_RELACY_STD_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"relacy.hpp\"\r\n\r\n\r\nnamespace std\r\n{\r\n    using rl::memory_order;\r\n    using rl::mo_relaxed;\r\n    using rl::mo_consume;\r\n    using rl::mo_acquire;\r\n    using rl::mo_release;\r\n    using rl::mo_acq_rel;\r\n    using rl::mo_seq_cst;\r\n\r\n    using rl::atomic;\r\n    using rl::atomic_thread_fence;\r\n    using rl::atomic_signal_fence;\r\n\r\n    using rl::atomic_bool;\r\n    using rl::atomic_address;\r\n\r\n    using rl::atomic_char;\r\n    using rl::atomic_schar;\r\n    using rl::atomic_uchar;\r\n    using rl::atomic_short;\r\n    using rl::atomic_ushort;\r\n    using rl::atomic_int;\r\n    using rl::atomic_uint;\r\n    using rl::atomic_long;\r\n    using rl::atomic_ulong;\r\n    using rl::atomic_llong;\r\n    using rl::atomic_ullong;\r\n//    using rl::atomic_char16_t;\r\n//    using rl::atomic_char32_t;\r\n    using rl::atomic_wchar_t;\r\n\r\n//    using rl::atomic_int_least8_t;\r\n//    using rl::atomic_uint_least8_t;\r\n//    using rl::atomic_int_least16_t;\r\n//    using rl::atomic_uint_least16_t;\r\n//    using rl::atomic_int_least32_t;\r\n//    using rl::atomic_uint_least32_t;\r\n//    using rl::atomic_int_least64_t;\r\n//    using rl::atomic_uint_least64_t;\r\n//    using rl::atomic_int_fast8_t;\r\n//    using rl::atomic_uint_fast8_t;\r\n//    using rl::atomic_int_fast16_t;\r\n//    using rl::atomic_uint_fast16_t;\r\n//    using rl::atomic_int_fast32_t;\r\n//    using rl::atomic_uint_fast32_t;\r\n//    using rl::atomic_int_fast64_t;\r\n//    using rl::atomic_uint_fast64_t;\r\n    using rl::atomic_intptr_t;\r\n    using rl::atomic_uintptr_t;\r\n    using rl::atomic_size_t;\r\n//    using rl::atomic_ssize_t;\r\n    using rl::atomic_ptrdiff_t;\r\n//    using rl::atomic_intmax_t;\r\n//    using rl::atomic_uintmax_t;\r\n\r\n    using rl::mutex;\r\n    using rl::recursive_mutex;\r\n    using rl::condition_variable;\r\n    using rl::condition_variable_any;\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/rmw.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_RMW_HPP\r\n#define RL_RMW_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nenum rmw_type_e\r\n{\r\n    rmw_type_swap,\r\n    rmw_type_add,\r\n    rmw_type_sub,\r\n    rmw_type_and,\r\n    rmw_type_or,\r\n    rmw_type_xor,\r\n};\r\n\r\n\r\n\r\n\r\ninline char const* format(rmw_type_e t)\r\n{\r\n    switch (t)\r\n    {\r\n    case rmw_type_swap: return \"exchange\";\r\n    case rmw_type_add: return \"fetch_add\";\r\n    case rmw_type_sub: return \"fetch_sub\";\r\n    case rmw_type_and: return \"fetch_and\";\r\n    case rmw_type_or: return \"fetch_or\";\r\n    case rmw_type_xor: return \"fetch_xor\";\r\n    }\r\n    RL_VERIFY(!\"invalid rmw type\");\r\n    throw std::logic_error(\"invalid rmw type\");\r\n}\r\n\r\n\r\n\r\n\r\ntemplate<rmw_type_e type> struct rmw_type_t {};\r\n\r\n\r\n\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_swap>, T v, Y op)\r\n{\r\n    (void)v;\r\n    return op;\r\n}\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_add>, T v, Y op)\r\n{\r\n    return v + op;\r\n}\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_sub>, T v, Y op)\r\n{\r\n    return v - op;\r\n}\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_and>, T v, Y op)\r\n{\r\n    return v & op;\r\n}\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_or>, T v, Y op)\r\n{\r\n    return v | op;\r\n}\r\n\r\ntemplate<typename T, typename Y>\r\nT perform_rmw(rmw_type_t<rmw_type_xor>, T v, Y op)\r\n{\r\n    return v ^ op;\r\n}\r\n\r\n\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/scheduler.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_SCHEDULER_HPP\r\n#define RL_SCHEDULER_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nenum thread_state_e\r\n{\r\n    thread_state_running,\r\n    thread_state_blocked,\r\n    thread_state_finished,\r\n};\r\n\r\nenum thread_finish_result\r\n{\r\n    thread_finish_result_normal,\r\n    thread_finish_result_last,\r\n    thread_finish_result_deadlock,\r\n};\r\n\r\n\r\n\r\nstruct scheduler_thread_info\r\n{\r\n    thread_id_t             index_;\r\n    unsigned                block_count_;\r\n    thread_state_e          state_;\r\n\r\n    void reset(test_params& /*params*/)\r\n    {\r\n        block_count_ = 0;\r\n        state_ = thread_state_running;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename derived_t, typename thread_info_type, thread_id_t thread_count>\r\nclass scheduler : nocopy<>\r\n{\r\npublic:\r\n    typedef thread_info_type                    thread_info_t;\r\n\r\n    struct shared_context_t\r\n    {\r\n        typedef typename derived_t::task_t      task_t;\r\n        //CRITICAL_SECTION                        guard_;\r\n        queue<task_t>                           queue_;\r\n    };\r\n\r\n    scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count)\r\n        : params_(params)\r\n        , ctx_(ctx)\r\n        , total_dynamic_threads_(dynamic_thread_count)\r\n        , iter_()\r\n        , thread_()\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            threads_[i].index_ = i;\r\n        }\r\n    }\r\n\r\n    thread_id_t iteration_begin(iteration_t iter)\r\n    {\r\n        iter_ = iter;\r\n        running_threads_count = thread_count;\r\n        finished_thread_count_ = 0;\r\n        timed_thread_count_ = 0;\r\n        spurious_thread_count_ = 0;\r\n        dynamic_thread_count_ = 0;\r\n\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            running_threads.push_back(i);\r\n            threads_[i].reset(params_);\r\n        }\r\n\r\n        for (thread_id_t i = thread_count - total_dynamic_threads_; i != thread_count; ++i)\r\n        {\r\n            dynamic_threads_[dynamic_thread_count_++] = &threads_[i];\r\n            block_thread(i, false);\r\n        }\r\n\r\n        thread_id_t const th = self().iteration_begin_impl();\r\n    \r\n        thread_ = &threads_[th];\r\n\r\n        return th;\r\n    }\r\n\r\n    bool iteration_end()\r\n    {\r\n        bool const finish = self().iteration_end_impl();\r\n\r\n        thread_ = 0;\r\n\r\n        return finish;\r\n    }\r\n\r\n    thread_id_t schedule(unpark_reason& reason, unsigned yield)\r\n    {\r\n        thread_id_t const th = self().schedule_impl(reason, yield);\r\n\r\n        RL_VERIFY(threads_[th].state_ == thread_state_running);\r\n        thread_ = &threads_[th];\r\n\r\n        return th;\r\n    }\r\n\r\n    RL_INLINE\r\n    unsigned rand(unsigned limit, sched_type t)\r\n    {\r\n        RL_VERIFY(limit);\r\n        return self().rand_impl(limit, t);\r\n    }\r\n\r\n    iteration_t iteration_count()\r\n    {\r\n        return self().iteration_count_impl();\r\n    }\r\n\r\n    bool park_current_thread(bool is_timed, bool allow_spurious_wakeup)\r\n    {\r\n        if (is_timed)\r\n        {\r\n            timed_threads_[timed_thread_count_++] = thread_;\r\n            RL_VERIFY(timed_thread_count_ <= thread_count);\r\n        }\r\n\r\n        if (allow_spurious_wakeup)\r\n        {\r\n            spurious_threads_[spurious_thread_count_++] = thread_;\r\n            RL_VERIFY(spurious_thread_count_ <= thread_count);\r\n        }\r\n\r\n        block_thread(thread_->index_, true);\r\n\r\n        return is_deadlock() ? false : true;\r\n    }\r\n\r\n    void unpark_thread(thread_id_t th, bool do_switch = false)\r\n    {\r\n        (void)do_switch;\r\n        unblock_thread(th);\r\n\r\n        thread_info_t& t = threads_[th];\r\n\r\n        //!!! store flag as to whether thread is spurious blocked in thread object\r\n        // (to eliminate iteration over all threads)\r\n        for (thread_id_t i = 0; i != spurious_thread_count_; ++i)\r\n        {\r\n            if (spurious_threads_[i] == &t)\r\n            {\r\n                for (thread_id_t j = i + 1; j != spurious_thread_count_; ++j)\r\n                    spurious_threads_[j - 1] = spurious_threads_[j];\r\n                spurious_thread_count_ -= 1;\r\n                break;\r\n            }\r\n        }\r\n\r\n        //!!! store flag as to whether thread is spurious blocked in thread object\r\n        for (thread_id_t i = 0; i != timed_thread_count_; ++i)\r\n        {\r\n            if (timed_threads_[i] == &t)\r\n            {\r\n                for (thread_id_t j = i + 1; j != timed_thread_count_; ++j)\r\n                    timed_threads_[j - 1] = timed_threads_[j];\r\n                timed_thread_count_ -= 1;\r\n                break;\r\n            }\r\n        }\r\n    }\r\n\r\n    thread_finish_result thread_finished()\r\n    {\r\n        RL_VERIFY(thread_->state_ == thread_state_running);\r\n        block_thread(thread_->index_, false);\r\n        thread_->state_ = thread_state_finished;\r\n        finished_thread_count_ += 1;\r\n        self().thread_finished_impl();\r\nretry:\r\n        if (finished_thread_count_ == thread_count)\r\n        {\r\n            return thread_finish_result_last;\r\n        }\r\n        else if (is_deadlock())\r\n        {\r\n            if (dynamic_thread_count_)\r\n            {\r\n                while (dynamic_thread_count_)\r\n                {\r\n                    thread_info_t* th = dynamic_threads_[--dynamic_thread_count_];\r\n                    unblock_thread(th->index_);\r\n                }\r\n                goto retry;\r\n            }\r\n            return thread_finish_result_deadlock;\r\n        }\r\n        else\r\n        {\r\n            return thread_finish_result_normal;\r\n        }\r\n    }\r\n\r\n    thread_id_t create_thread()\r\n    {\r\n        RL_VERIFY(dynamic_thread_count_);\r\n        thread_info_t* th = dynamic_threads_[--dynamic_thread_count_];\r\n        unblock_thread(th->index_);\r\n        return th->index_;\r\n    }\r\n\r\n    void get_state(std::ostream& ss)\r\n    {\r\n        self().get_state_impl(ss);\r\n    }\r\n\r\n    void set_state(std::istream& ss)\r\n    {\r\n        self().set_state_impl(ss);\r\n    }\r\n\r\nprotected:\r\n    test_params&                    params_;\r\n    shared_context_t&               ctx_;\r\n    thread_id_t const               total_dynamic_threads_;\r\n    iteration_t                     iter_;\r\n\r\n    aligned<thread_info_t>          threads_ [thread_count];\r\n    thread_info_t*                  thread_;\r\n\r\n    vector<thread_id_t>::type       running_threads;\r\n    thread_id_t                     running_threads_count;\r\n    thread_id_t                     finished_thread_count_;\r\n\r\n    //!!! doesn't timed/spurious waits must belong to full scheduler?\r\n    // hyphotesis: random scheduler can ignore timed/spurious waits\r\n    // (however must detect deadlock with spurious threads)\r\n    thread_info_t*                  timed_threads_ [thread_count];\r\n    thread_id_t                     timed_thread_count_;\r\n\r\n    thread_info_t*                  spurious_threads_ [thread_count];\r\n    thread_id_t                     spurious_thread_count_;\r\n\r\n    thread_info_t*                  dynamic_threads_ [thread_count];\r\n    thread_id_t                     dynamic_thread_count_;\r\n\r\n    void block_thread(thread_id_t th, bool yield)\r\n    {\r\n        RL_VERIFY(th < thread_count);\r\n        thread_info_t& t = threads_[th];\r\n        RL_VERIFY(t.state_ != thread_state_finished);\r\n        if (t.block_count_++)\r\n            return;\r\n\r\n        for (thread_id_t i = 0; i != running_threads_count; ++i)\r\n        {\r\n            if (running_threads[i] == th)\r\n            {\r\n                running_threads.erase(running_threads.begin() + i);\r\n                running_threads_count -= 1;\r\n                t.state_ = thread_state_blocked;\r\n                self().on_thread_block(th, yield);\r\n                return;\r\n            }\r\n        }\r\n        RL_VERIFY(false);\r\n    }\r\n\r\n    bool unblock_thread(thread_id_t th)\r\n    {\r\n        RL_VERIFY(th < thread_count);\r\n        thread_info_t& t = threads_[th];\r\n        RL_VERIFY(t.state_ == thread_state_blocked);\r\n        if (--t.block_count_)\r\n            return false;\r\n\r\n        running_threads.push_back(th);\r\n        running_threads_count += 1;\r\n        t.state_ = thread_state_running;\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    derived_t& self()\r\n    {\r\n        return *static_cast<derived_t*>(this);\r\n    }\r\n\r\n    bool is_deadlock()\r\n    {\r\n        if ((0 == running_threads_count) && (0 == timed_thread_count_))\r\n        {\r\n            self().purge_blocked_threads();\r\n            if ((0 == running_threads_count) && (0 == timed_thread_count_))\r\n                return true;\r\n        }\r\n        return false;\r\n    }\r\n\r\n    void thread_finished_impl()\r\n    {\r\n    }\r\n\r\n    void purge_blocked_threads()\r\n    {\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/signature.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_SIGNATURE_HPP\r\n#define RL_SIGNATURE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"test_result.hpp\"\r\n#include \"context_base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<unsigned magic>\r\nclass signature\r\n{\r\npublic:\r\n    signature()\r\n        : magic_(magic)\r\n    {\r\n    }\r\n\r\n    signature(signature const&)\r\n        : magic_(magic)\r\n    {\r\n    }\r\n\r\n    ~signature()\r\n    {\r\n        check(RL_INFO);\r\n        magic_ = 0;\r\n    }\r\n\r\n    void check(debug_info_param info) const\r\n    {\r\n        if (\r\n            ((uintptr_t)this <= (uintptr_t)-1 - 4096) && \r\n            ((uintptr_t)this >= 4096) &&\r\n            ((uintptr_t)this % sizeof(unsigned) == 0) && (magic == magic_))\r\n        {\r\n            return;\r\n        }\r\n        else\r\n        {\r\n            fail(info);\r\n        }\r\n    }\r\n\r\nprivate:\r\n    unsigned magic_;\r\n\r\n    struct fault_event\r\n    {\r\n        void const* addr_;\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \">\"\r\n                << \" access to freed memory\";\r\n        }\r\n    };\r\n\r\n    RL_NOINLINE void fail(debug_info_param info) const\r\n    {\r\n        context& c = ctx();\r\n        RL_HIST(fault_event) {this} RL_HIST_END();\r\n        rl::ctx().fail_test(\"access to freed memory\", test_result_access_to_freed_memory, info);\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/slab_allocator.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_SLAB_ALLOCATOR_HPP\r\n#define RL_SLAB_ALLOCATOR_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<typename type>\r\nclass slab_allocator : nocopy<>\r\n{\r\npublic:\r\n    slab_allocator()\r\n        : freelist_()\r\n        , blocks_()\r\n        , alloc_count_()\r\n    {\r\n    }\r\n\r\n    ~slab_allocator()\r\n    {\r\n        char* pos = blocks_;\r\n        while (pos)\r\n        {\r\n            char* const next = *reinterpret_cast<char**>(pos);\r\n            ::free(pos);\r\n            pos = next;\r\n        }\r\n    }\r\n\r\n    type* alloc(void* ctx = 0)\r\n    {\r\n        if (freelist_)\r\n        {\r\n            type* p = freelist_;\r\n            freelist_ = *reinterpret_cast<type**>(p);\r\n            alloc_count_ += 1;\r\n            *(void**)p = ctx;\r\n            type* pp = reinterpret_cast<type*>((reinterpret_cast<void**>(p) + 1));\r\n            return pp;\r\n        }\r\n        else\r\n        {\r\n            return alloc_batch();\r\n        }\r\n    }\r\n\r\n    void free(type* p)\r\n    {\r\n        type** pos = reinterpret_cast<type**>((reinterpret_cast<void**>(p) - 1));\r\n        pos[0] = freelist_;\r\n        freelist_ = reinterpret_cast<type*>(pos);\r\n        alloc_count_ -= 1;\r\n    }\r\n\r\n    bool iteration_end()\r\n    {\r\n#ifndef RL_GC\r\n        return alloc_count_ == 0;\r\n#else\r\n        freelist_ = 0;\r\n        size_t elem_size = sizeof(void*) + sizeof(type);\r\n        elem_size = (elem_size + 15) & ~15;\r\n        char* pos = blocks_;\r\n        while (pos)\r\n        {\r\n            char* p = pos;\r\n            p += elem_size;\r\n            for (size_t i = 0; i != batch_size; ++i)\r\n            {\r\n                *reinterpret_cast<type**>(p) = freelist_;\r\n                freelist_ = reinterpret_cast<type*>(p);\r\n                p += elem_size;\r\n            }\r\n            pos = *reinterpret_cast<char**>(pos);\r\n        }\r\n        return true;\r\n#endif\r\n    }\r\n\r\n    void output_allocs(std::ostream& stream)\r\n    {\r\n        size_t elem_size = sizeof(void*) + sizeof(type);\r\n        elem_size = (elem_size + 15) & ~15;\r\n        set<void*>::type allocs;\r\n        char* pos = blocks_;\r\n        while (pos)\r\n        {\r\n            char* p = pos;\r\n            p += elem_size;\r\n            for (size_t i = 0; i != batch_size; ++i)\r\n            {\r\n                allocs.insert(p);\r\n                p += elem_size;\r\n            }\r\n            pos = *reinterpret_cast<char**>(pos);\r\n        }\r\n        set<void*>::type avail;\r\n        type* pos2 = freelist_;\r\n        while (pos2)\r\n        {\r\n            avail.insert(pos2);\r\n            pos2 = *reinterpret_cast<type**>(pos2);\r\n        }\r\n        vector<void*>::type diff;\r\n        std::set_difference(allocs.begin(), allocs.end(), avail.begin(), avail.end(), std::back_inserter(diff));\r\n        for (size_t i = 0; i != diff.size(); ++i)\r\n        {\r\n            stream << *(void**)diff[i] << std::endl;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    static size_t const batch_size = 128;\r\n    type* freelist_;\r\n    char* blocks_;\r\n    size_t alloc_count_;\r\n\r\n    RL_NOINLINE type* alloc_batch()\r\n    {\r\n        size_t elem_size = sizeof(void*) + sizeof(type);\r\n        elem_size = (elem_size + 15) & ~15;\r\n        char* const batch = (char*)(::malloc)(elem_size * (batch_size + 1));\r\n        if (0 == batch)\r\n            throw std::bad_alloc();\r\n        *reinterpret_cast<char**>(batch) = blocks_;\r\n        blocks_ = batch;\r\n        char* p = batch;\r\n        p += elem_size;\r\n        for (size_t i = 0; i != batch_size; ++i)\r\n        {\r\n            *reinterpret_cast<type**>(p) = freelist_;\r\n            freelist_ = reinterpret_cast<type*>(p);\r\n            p += elem_size;\r\n        }\r\n        return alloc();\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/condition_variable.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_CONDITION_VARIABLE_HPP\r\n#define RL_CONDITION_VARIABLE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"../base.hpp\"\r\n#include \"../context_base.hpp\"\r\n#include \"../waitset.hpp\"\r\n#include \"../signature.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nstruct mutex_wrapper\r\n{\r\n    virtual void lock(debug_info_param info) const = 0;\r\n    virtual void unlock(debug_info_param info) const = 0;\r\n    virtual ~mutex_wrapper() {}\r\n};\r\n\r\ntemplate<typename mutex_t>\r\nclass mutex_wrapper_impl : public mutex_wrapper\r\n{\r\npublic:\r\n    mutex_wrapper_impl(mutex_t& m)\r\n        : m_(m)\r\n    {\r\n    }\r\n\r\nprivate:\r\n    mutex_t& m_;\r\n\r\n    virtual void lock(debug_info_param info) const\r\n    {\r\n        m_.lock(info);\r\n    }\r\n\r\n    virtual void unlock(debug_info_param info) const\r\n    {\r\n        m_.unlock(info);\r\n    }\r\n\r\n    RL_NOCOPY(mutex_wrapper_impl);\r\n};\r\n\r\nstruct pred_wrapper\r\n{\r\n    virtual bool exec() const = 0;\r\n    virtual ~pred_wrapper() {}\r\n};\r\n\r\ntemplate<typename pred_t>\r\nclass pred_wrapper_impl : public pred_wrapper\r\n{\r\npublic:\r\n    pred_wrapper_impl(pred_t p)\r\n        : p_(p)\r\n    {\r\n    }\r\n\r\nprivate:\r\n    mutable pred_t p_;\r\n\r\n    virtual bool exec() const\r\n    {\r\n        return p_();\r\n    }\r\n\r\n    RL_NOCOPY(pred_wrapper_impl);\r\n};\r\n\r\n\r\nstruct condvar_data\r\n{\r\n    virtual void notify_one(debug_info_param info) = 0;\r\n    virtual void notify_all(debug_info_param info) = 0;\r\n    virtual sema_wakeup_reason wait(mutex_wrapper const& lock, bool is_timed, debug_info_param info) = 0;\r\n    virtual bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info) = 0;\r\n    virtual ~condvar_data() {} // just to calm down gcc\r\n};\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass condvar_data_impl : public condvar_data\r\n{\r\npublic:\r\n    condvar_data_impl(bool allow_spurious_wakeups)\r\n    {\r\n        spurious_wakeup_limit_ = 0;\r\n        if (allow_spurious_wakeups && ctx().is_random_sched())\r\n            spurious_wakeup_limit_ = 10;\r\n    }\r\n\r\n    ~condvar_data_impl()\r\n    {\r\n        //!!! detect destoy when there are blocked threads\r\n    }\r\n\r\nprivate:\r\n    waitset<thread_count>           ws_;\r\n    signature<0xc0ffe3ad>           sign_;\r\n    int                             spurious_wakeup_limit_;\r\n\r\n    struct event_t\r\n    {\r\n        enum type_e\r\n        {\r\n            type_notify_one,\r\n            type_notify_all,\r\n            type_wait_enter,\r\n            type_wait_exit,\r\n            type_wait_pred_enter,\r\n            type_wait_pred_exit,\r\n        };\r\n\r\n        condvar_data_impl const*    var_addr_;\r\n        type_e                      type_;\r\n        thread_id_t                 thread_count_;\r\n        unpark_reason               reason_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << var_addr_ << std::dec << \"> cond_var: \";\r\n            switch (type_)\r\n            {\r\n            case type_notify_one:\r\n                s << \"notify one total_blocked=\" << thread_count_ << \" unblocked=\" << (thread_count_ ? 1 : 0);\r\n                break;\r\n            case type_notify_all:\r\n                s << \"notify all unblocked=\" << thread_count_;\r\n                break;\r\n            case type_wait_enter: s << \"wait enter\"; break;\r\n            case type_wait_exit:\r\n                s << \"wait exit\";\r\n                if (unpark_reason_normal == reason_)\r\n                    s << \" due to notified\";\r\n                else if (unpark_reason_timeout == reason_)\r\n                    s << \" due to timeout\";\r\n                else if (unpark_reason_spurious == reason_)\r\n                    s << \" spuriously\";\r\n                break;\r\n            case type_wait_pred_enter: s << \"wait pred enter\"; break;\r\n            case type_wait_pred_exit: s << \"wait pred exit\"; break;\r\n            }\r\n        }\r\n    };\r\n\r\n    virtual void notify_one(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        //??? do I need this scheduler call?\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_HIST(event_t) {this, event_t::type_notify_one, ws_.size()} RL_HIST_END();\r\n        ws_.unpark_one(c, info);\r\n    }\r\n\r\n    virtual void notify_all(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        //??? do I need this scheduler call?\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_HIST(event_t) {this, event_t::type_notify_all, ws_.size()} RL_HIST_END();\r\n        ws_.unpark_all(c, info);\r\n    }\r\n\r\n    virtual sema_wakeup_reason wait(mutex_wrapper const& lock, bool is_timed, debug_info_param info)\r\n    {\r\n        //!!! detect whether mutex is the same\r\n        context& c = ctx();\r\n        sign_.check(info);\r\n        RL_HIST(event_t) {this, event_t::type_wait_enter} RL_HIST_END();\r\n        lock.unlock(info);\r\n        sign_.check(info);\r\n        bool allow_spurious_wakeup = (spurious_wakeup_limit_ > 0);\r\n        unpark_reason reason = ws_.park_current(c, is_timed, allow_spurious_wakeup, false, info);\r\n        if (reason == unpark_reason_spurious)\r\n            spurious_wakeup_limit_ -= 1;\r\n        RL_HIST(event_t) {this, event_t::type_wait_exit, 0, reason} RL_HIST_END();\r\n        lock.lock(info);\r\n        sign_.check(info);\r\n        if (reason == unpark_reason_normal)\r\n            return sema_wakeup_reason_success;\r\n        else if (reason == unpark_reason_spurious)\r\n            return sema_wakeup_reason_spurious;\r\n        else //if (reason == unpark_reason_timeout)\r\n            return sema_wakeup_reason_timeout;\r\n    }\r\n\r\n    virtual bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        sign_.check(info);\r\n        RL_HIST(event_t) {this, event_t::type_wait_pred_enter} RL_HIST_END();\r\n        while (!pred.exec())\r\n        {\r\n            sema_wakeup_reason reason = wait(lock, is_timed, info);\r\n            if (reason == sema_wakeup_reason_timeout)\r\n            {\r\n                RL_HIST(event_t) {this, event_t::type_wait_pred_exit} RL_HIST_END();\r\n                return pred.exec();\r\n            }\r\n        }\r\n        RL_HIST(event_t) {this, event_t::type_wait_pred_exit} RL_HIST_END();\r\n        return true;\r\n    }\r\n};\r\n\r\n\r\ntemplate<typename tag_t>\r\nclass condvar\r\n{\r\npublic:\r\n    condvar()\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    condvar(condvar const&)\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    condvar& operator = (condvar const&)\r\n    {\r\n        return *this;\r\n    }\r\n\r\n    ~condvar()\r\n    {\r\n    }\r\n\r\n    void init(bool allow_spurious_wakeups, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_condvar, \"\", info);\r\n        sign_.check(info);\r\n        impl_ = c.condvar_ctor(allow_spurious_wakeups);\r\n    }\r\n\r\n    void deinit(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        check(info);\r\n        c.condvar_dtor(impl_);\r\n        impl_ = 0;\r\n    }\r\n\r\n    void notify_one(debug_info_param info)\r\n    {\r\n        check(info);\r\n        impl_->notify_one(info);\r\n    }\r\n\r\n    void notify_all(debug_info_param info)\r\n    {\r\n        check(info);\r\n        impl_->notify_all(info);\r\n    }\r\n\r\n    template<typename lock_t>\r\n    sema_wakeup_reason wait(lock_t& lock, bool is_timed, debug_info_param info)\r\n    {\r\n        check(info);\r\n        mutex_wrapper_impl<lock_t> w (lock);\r\n        return impl_->wait(w, is_timed, info);\r\n    }\r\n\r\n    template<typename lock_t, typename pred_t>\r\n    bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info)\r\n    {\r\n        check(info);\r\n        return impl_->wait(mutex_wrapper_impl<lock_t>(lock), pred_wrapper_impl<pred_t>(pred), is_timed, info);\r\n    }\r\n\r\nprivate:\r\n    condvar_data* impl_;\r\n    signature<0xbadc0ffe> sign_;\r\n\r\n    void check(debug_info_param info)\r\n    {\r\n        RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_condvar, \"\", info);\r\n        sign_.check(info);\r\n    }\r\n};\r\n\r\n\r\n\r\ntemplate<typename tag_t>\r\nclass condition_variable_std : condvar<tag_t>\r\n{\r\npublic:\r\n    condition_variable_std()\r\n    {\r\n        condvar<tag_t>::init(true, $);\r\n    }\r\n\r\n    ~condition_variable_std()\r\n    {\r\n        condvar<tag_t>::deinit($);\r\n    }\r\n\r\n    void notify_one(debug_info_param info)\r\n    {\r\n        condvar<tag_t>::notify_one(info);\r\n    }\r\n\r\n    void notify_all(debug_info_param info)\r\n    {\r\n        condvar<tag_t>::notify_all(info);\r\n    }\r\n\r\n    template<typename lock_t>\r\n    void wait(lock_t& lock, debug_info_param info)\r\n    {\r\n        condvar<tag_t>::wait(lock, false, info);\r\n    }\r\n\r\n    template<typename lock_t, typename pred_t>\r\n    void wait(lock_t& lock, pred_t pred, debug_info_param info)\r\n    {\r\n        condvar<tag_t>::wait(lock, pred, false, info);\r\n    }\r\n\r\n    template<typename lock_t, typename abs_time_t>\r\n    bool wait_until(lock_t& lock, abs_time_t const&, debug_info_param info)\r\n    {\r\n        return condvar<tag_t>::wait(lock, true, info);\r\n    }\r\n\r\n    template<typename lock_t, typename abs_time_t, typename pred_t>\r\n    bool wait_until(lock_t& lock, abs_time_t const&, pred_t pred, debug_info_param info)\r\n    {\r\n        return condvar<tag_t>::wait(lock, pred, true, info);\r\n    }\r\n    \r\n    template<typename lock_t, typename rel_time_t>\r\n    bool wait_for(lock_t& lock, rel_time_t const&, debug_info_param info)\r\n    {\r\n        sema_wakeup_reason reason = condvar<tag_t>::wait(lock, true, info);\r\n        return reason == sema_wakeup_reason_success;\r\n    }\r\n\r\n    template<typename lock_t, typename rel_time_t, typename pred_t>\r\n    bool wait_for(lock_t& lock, rel_time_t const&, pred_t pred, debug_info_param info)\r\n    {\r\n        return condvar<tag_t>::wait(lock, pred, true, info);\r\n    }\r\n\r\n    RL_NOCOPY(condition_variable_std);\r\n};\r\n\r\n\r\nstruct condvar_tag_std;\r\ntypedef condition_variable_std<condvar_tag_std> condition_variable;\r\nstruct condvar_tag_std_any;\r\ntypedef condition_variable_std<condvar_tag_std_any> condition_variable_any;\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/event.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_EVENT_HPP\r\n#define RL_EVENT_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"../base.hpp\"\r\n#include \"../context_base.hpp\"\r\n#include \"../sync_var.hpp\"\r\n#include \"../waitset.hpp\"\r\n#include \"semaphore.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nstruct event_data\r\n{\r\n    virtual void set(debug_info_param info) = 0;\r\n    virtual void reset(debug_info_param info) = 0;\r\n    virtual void pulse(debug_info_param info) = 0;\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0;\r\n    virtual bool is_signaled(debug_info_param info) = 0;\r\n    virtual void memory_acquire(debug_info_param info) = 0;\r\n    virtual void* prepare_wait(debug_info_param info) = 0;\r\n    virtual ~event_data() {} // just to calm down gcc\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass event_data_impl : public event_data\r\n{\r\npublic:\r\n    event_data_impl(bool manual_reset, bool initial_state)\r\n        : manual_reset_(manual_reset)\r\n        , state_(initial_state)\r\n    {\r\n    }\r\n\r\n    ~event_data_impl()\r\n    {\r\n        //!!! detect destuction with waiters\r\n    }\r\n\r\nprivate:\r\n    signature<0xdada1234> sign_;\r\n    bool const manual_reset_;\r\n    bool state_;\r\n    waitset<thread_count> ws_;\r\n    sync_var<thread_count> sync_;\r\n\r\n    struct state_event\r\n    {\r\n        enum type\r\n        {\r\n            type_set,\r\n            type_reset,\r\n            type_pulse,\r\n        };\r\n\r\n        event_data_impl* addr_;\r\n        type type_;\r\n        bool initial_state_;\r\n        bool final_state_;\r\n        thread_id_t unblocked_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> event: \";\r\n            if (type_set == type_)\r\n                s << \"set \";\r\n            else if (type_reset == type_)\r\n                s << \"reset \";\r\n            else\r\n                s << \"pulse \";\r\n            s << \"initial_state=\" << initial_state_\r\n                << \" final_state=\" << final_state_;\r\n            if (type_reset != type_)\r\n                s << \" unblocked=\" << unblocked_;\r\n        }\r\n\r\n    };\r\n\r\n    virtual void set(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        bool initial_state = state_;\r\n        thread_id_t unblocked = 0;\r\n\r\n        if (state_)\r\n        {\r\n            //!!! probably can break if a thread waits in wfmo\r\n            RL_VERIFY(false == ws_);\r\n        }\r\n        else\r\n        {\r\n            sync_.release(c.threadx_);\r\n            state_ = true;\r\n\r\n            if (manual_reset_)\r\n            {\r\n                unblocked = ws_.unpark_all(c, info);\r\n            }\r\n            else\r\n            {\r\n                if (ws_.unpark_one(c, info))\r\n                    unblocked = 1;\r\n            }\r\n        }\r\n\r\n        RL_HIST(state_event) {this, state_event::type_set, initial_state, state_, unblocked} RL_HIST_END();\r\n    }\r\n\r\n    virtual void reset(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        bool initial_state = state_;\r\n\r\n        if (state_)\r\n        {\r\n            RL_VERIFY(false == ws_);\r\n            sync_.release(c.threadx_);\r\n            state_ = false;\r\n        }\r\n\r\n        RL_HIST(state_event) {this, state_event::type_reset, initial_state, state_, 0} RL_HIST_END();\r\n    }\r\n\r\n    virtual void pulse(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        //??? should I model nasty caveat described in MSDN\r\n        thread_id_t unblocked = 0;\r\n\r\n        if (state_)\r\n        {\r\n            //!!! probably can break if a thread waits in wfmo\r\n            RL_VERIFY(false == ws_);\r\n        }\r\n        else\r\n        {\r\n            sync_.release(c.threadx_);\r\n            state_ = true;\r\n            unblocked = ws_.unpark_all(c, info);\r\n            state_ = false;\r\n        }\r\n\r\n        RL_HIST(state_event) {this, state_event::type_pulse, state_, state_, unblocked} RL_HIST_END();\r\n    }\r\n\r\n    struct wait_event\r\n    {\r\n        event_data_impl* addr_;\r\n        bool try_wait_;\r\n        bool is_timed_;\r\n        bool initial_state_;\r\n        bool final_state_;\r\n        sema_wakeup_reason reason_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> event: \";\r\n            if (try_wait_)\r\n                s << \"try_wait \";\r\n            else if (is_timed_)\r\n                s << \"timed wait \";\r\n            else\r\n                s << \"wait \";\r\n\r\n            if (reason_ == sema_wakeup_reason_success)\r\n                s << \"succeeded \";\r\n            else if (reason_ == sema_wakeup_reason_failed)\r\n                s << \"failed \";\r\n            else if (reason_ == sema_wakeup_reason_timeout)\r\n                s << \"timed out \";\r\n            else if (reason_ == sema_wakeup_reason_spurious)\r\n                s << \"spuriously failed \";\r\n\r\n            s << \"initial_state=\" << initial_state_\r\n                << \" final_state=\" << final_state_;\r\n        }\r\n    };\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        bool initial_state = state_;\r\n        sema_wakeup_reason reason = sema_wakeup_reason_success;\r\n\r\n        for (;;)\r\n        {\r\n            if (state_)\r\n            {\r\n                if (manual_reset_)\r\n                {\r\n                    sync_.acquire(c.threadx_);\r\n                }\r\n                else\r\n                {\r\n                    state_ = false;\r\n                    sync_.acq_rel(c.threadx_);\r\n                }\r\n                reason = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n\r\n            if (try_wait)\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                reason = sema_wakeup_reason_failed;\r\n                break;\r\n            }\r\n\r\n            unpark_reason wr = ws_.park_current(c, is_timed, false, true, info);\r\n            initial_state = state_;\r\n            if (unpark_reason_timeout == wr)\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                reason = sema_wakeup_reason_timeout;\r\n                break;\r\n            }\r\n            else if (unpark_reason_normal == wr)\r\n            {\r\n                RL_VERIFY(state_ == true);\r\n                if (manual_reset_)\r\n                {\r\n                    sync_.acquire(c.threadx_);\r\n                }\r\n                else\r\n                {\r\n                    state_ = false;\r\n                    sync_.acq_rel(c.threadx_);\r\n                }\r\n                c.switch_back(info);\r\n                reason = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n            RL_VERIFY(false);\r\n        }\r\n\r\n        RL_HIST(wait_event) {this, try_wait, is_timed, initial_state, state_, reason} RL_HIST_END();\r\n        return reason;\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return state_;\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        sync_.acquire(ctx().threadx_);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return &ws_;\r\n    }\r\n\r\n    RL_NOCOPY(event_data_impl);\r\n};\r\n\r\n\r\n\r\nclass generic_event : public win_waitable_object\r\n{\r\npublic:\r\n    generic_event()\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    generic_event(generic_event const&)\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    generic_event& operator = (generic_event const&)\r\n    {\r\n        return *this;\r\n    }\r\n\r\n    void init(bool manual_reset, bool initial_state, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_event, \"\", info);\r\n        sign_.check(info);\r\n        impl_ = c.event_ctor(manual_reset, initial_state);\r\n    }\r\n\r\n    void deinit(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        check(info);\r\n        c.event_dtor(impl_);\r\n        impl_ = 0;\r\n    }\r\n\r\n    void set(debug_info_param info)\r\n    {\r\n        check(info);\r\n        impl_->set(info);\r\n    }\r\n\r\n    void reset(debug_info_param info)\r\n    {\r\n        check(info);\r\n        impl_->reset(info);\r\n    }\r\n\r\n    void pulse(debug_info_param info)\r\n    {\r\n        check(info);\r\n        impl_->pulse(info);\r\n    }\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)\r\n    {\r\n        check(info);\r\n        return impl_->wait(try_wait, is_timed, info);\r\n    }\r\n\r\n    virtual bool signal(debug_info_param info)\r\n    {\r\n        set(info);\r\n        return true;\r\n    }\r\n\r\nprivate:\r\n    event_data* impl_;\r\n    signature<0x3390eeaa> sign_;\r\n\r\n    event_data* check(debug_info_param info)\r\n    {\r\n        RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_event, \"\", info);\r\n        sign_.check(info);\r\n        return impl_;\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        return check(info)->is_signaled(info);\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        check(info)->memory_acquire(info);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        return check(info)->prepare_wait(info);\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/mutex.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_MUTEX_HPP\r\n#define RL_MUTEX_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"../base.hpp\"\r\n#include \"../context.hpp\"\r\n#include \"../thread.hpp\"\r\n#include \"../atomic.hpp\"\r\n#include \"../waitset.hpp\"\r\n#include \"../signature.hpp\"\r\n#include \"../sync_var.hpp\"\r\n#include \"../foreach.hpp\"\r\n#include \"semaphore.hpp\"\r\n\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nstruct generic_mutex_data : nocopy<>\r\n{\r\n    virtual bool lock_exclusive(bool is_timed, debug_info_param info) = 0;\r\n    virtual bool try_lock_exclusive(debug_info_param info) = 0;\r\n    virtual void unlock_exclusive(debug_info_param info) = 0;\r\n    virtual void lock_shared(debug_info_param info) = 0;\r\n    virtual bool try_lock_shared(debug_info_param info) = 0;\r\n    virtual void unlock_shared(debug_info_param info) = 0;\r\n    virtual void unlock_exclusive_or_shared(debug_info_param info) = 0;\r\n    virtual bool is_signaled(debug_info_param info) = 0;\r\n    virtual void memory_acquire(debug_info_param info) = 0;\r\n    virtual void* prepare_wait(debug_info_param info) = 0;\r\n    virtual ~generic_mutex_data() {} // just to calm down gcc\r\n};\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass generic_mutex_data_impl : public generic_mutex_data\r\n{\r\npublic:\r\n    struct event_t\r\n    {\r\n        enum type_e\r\n        {\r\n            type_lock,\r\n            type_unlock,\r\n            type_recursive_lock,\r\n            type_recursive_unlock,\r\n            type_failed_try_lock,\r\n            type_spuriously_failed_try_lock,\r\n            type_lock_shared,\r\n            type_unlock_shared,\r\n            type_recursive_lock_shared,\r\n            type_recursive_unlock_shared,\r\n            type_failed_try_lock_shared,\r\n            type_spuriously_failed_try_lock_shared,\r\n            type_wait,\r\n            type_destroying_owned_mutex,\r\n        };\r\n\r\n        generic_mutex_data_impl const* var_addr_;\r\n        type_e type_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << var_addr_ << std::dec << \"> mutex: \";\r\n            switch (type_)\r\n            {\r\n            case type_lock: s << \"exclusive lock\"; break;\r\n            case type_unlock: s << \"exclusive unlock\"; break;\r\n            case type_recursive_lock: s << \"recursive exclusive lock\"; break;\r\n            case type_recursive_unlock: s << \"recursive exclusive unlock\"; break;\r\n            case type_failed_try_lock: s << \"failed exclusive try lock\"; break;\r\n            case type_spuriously_failed_try_lock: s << \"spuriously failed exclusive try lock\"; break;\r\n            case type_lock_shared: s << \"shared lock\"; break;\r\n            case type_unlock_shared: s << \"shared unlock\"; break;\r\n            case type_recursive_lock_shared: s << \"recursive shared lock\"; break;\r\n            case type_recursive_unlock_shared: s << \"recursive shared unlock\"; break;\r\n            case type_failed_try_lock_shared: s << \"failed shared try lock\"; break;\r\n            case type_spuriously_failed_try_lock_shared: s << \"spuriously failed shared try lock\"; break;\r\n            case type_wait: s << \"blocking\"; break;\r\n            case type_destroying_owned_mutex: s << \"destroying owned mutex\"; break;\r\n            }\r\n        }\r\n    };\r\n\r\n    generic_mutex_data_impl(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock)\r\n        : is_rw_(is_rw)\r\n        , is_exclusive_recursive_(is_exclusive_recursive)\r\n        , is_shared_recursive_(is_shared_recursive)\r\n        , failing_try_lock_(failing_try_lock)\r\n        , exclusive_owner_(state_free)\r\n        , exclusive_recursion_count_(0)\r\n        , shared_lock_count_(0)\r\n        , try_lock_failed_()\r\n    {\r\n        context& c = ctx();\r\n        (void)c;\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        foreach<thread_count>(shared_owner_, &assign_zero);\r\n    }\r\n\r\n    ~generic_mutex_data_impl()\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        if (exclusive_owner_ != state_free\r\n            || exclusive_waitset_\r\n            || shared_waitset_)\r\n        {\r\n            debug_info info = $;\r\n            RL_HIST(event_t) {this, event_t::type_destroying_owned_mutex} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_destroying_owned_mutex, \"\", $);\r\n        }\r\n    }\r\n\r\n    virtual bool lock_exclusive(bool is_timed, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ == state_shared && shared_owner_[my_id])\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, \"\", info);\r\n        }\r\n\r\n        if (exclusive_owner_ == my_id)\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END();\r\n            if (is_exclusive_recursive_)\r\n            {\r\n                exclusive_recursion_count_ += 1;\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, \"\", info);\r\n            }\r\n        }\r\n\r\n        for (;;)\r\n        {\r\n            if (exclusive_owner_ == state_free)\r\n            {\r\n                RL_VERIFY(exclusive_recursion_count_ == 0);\r\n                //!!! in some implementation here must be acq_rel\r\n                sync_.acquire(c.threadx_);\r\n                exclusive_recursion_count_ = 1;\r\n                exclusive_owner_ = my_id;\r\n                RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                RL_VERIFY(my_id != exclusive_owner_);\r\n                RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END();\r\n                unpark_reason reason = exclusive_waitset_.park_current(c, is_timed, false, false, info);\r\n                RL_VERIFY(reason != unpark_reason_spurious);\r\n                if (reason == unpark_reason_timeout)\r\n                {\r\n                    sync_.acquire(c.threadx_);\r\n                    return false;\r\n                }\r\n            }\r\n\r\n            //??? c.sched();\r\n            //sign_.check(info);\r\n        }\r\n    }\r\n\r\n    virtual bool try_lock_exclusive(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ == state_shared && shared_owner_[my_id])\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, \"\", info);\r\n        }\r\n\r\n        if (exclusive_owner_ == my_id)\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END();\r\n            if (is_exclusive_recursive_)\r\n            {\r\n                exclusive_recursion_count_ += 1;\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, \"\", info);\r\n            }\r\n        }\r\n\r\n        if (exclusive_owner_ == state_free)\r\n        {\r\n            RL_VERIFY(exclusive_recursion_count_ == 0);\r\n            //!!! probability rand\r\n            if (true == failing_try_lock_\r\n                && false == try_lock_failed_\r\n                && c.rand(2, sched_type_user))\r\n            {\r\n                try_lock_failed_ = true;\r\n                RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock} RL_HIST_END();\r\n                return false;\r\n            }\r\n            else\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                exclusive_recursion_count_ = 1;\r\n                exclusive_owner_ = my_id;\r\n                RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();\r\n                return true;\r\n            }\r\n        }\r\n        else\r\n        {\r\n            //!!! in some implementation here must be acquire\r\n            //sync_.acquire(c.threadx_);\r\n\r\n            RL_VERIFY(my_id != exclusive_owner_);\r\n            RL_HIST(event_t) {this, event_t::type_failed_try_lock} RL_HIST_END();\r\n            return false;\r\n        }\r\n    }\r\n\r\n    virtual void unlock_exclusive(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ != my_id)\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, \"\", info);\r\n        }\r\n\r\n        exclusive_recursion_count_ -= 1;\r\n        if (exclusive_recursion_count_)\r\n        {\r\n            RL_VERIFY(is_exclusive_recursive_);\r\n            RL_HIST(event_t) {this, event_t::type_recursive_unlock} RL_HIST_END();\r\n            return;\r\n        }\r\n\r\n        sync_.release(c.threadx_);\r\n        exclusive_owner_ = state_free;\r\n        RL_VERIFY(exclusive_recursion_count_ == 0);\r\n\r\n        if (false == exclusive_waitset_.unpark_one(c, info))\r\n            shared_waitset_.unpark_all(c, info);\r\n\r\n        RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END();\r\n    }\r\n\r\n    virtual void lock_shared(debug_info_param info)\r\n    {\r\n        RL_VERIFY(is_rw_);\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ == my_id)\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, \"\", info);\r\n        }\r\n\r\n        if (exclusive_owner_ == state_shared && shared_owner_[my_id])\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END();\r\n            if (is_shared_recursive_)\r\n            {\r\n                shared_owner_[my_id] += 1;\r\n                shared_lock_count_ += 1;\r\n                return;\r\n            }\r\n            else\r\n            {\r\n                RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, \"\", info);\r\n            }\r\n        }\r\n\r\n        for (;;)\r\n        {\r\n            if ((exclusive_owner_ == state_free)\r\n                || (exclusive_owner_ == state_shared\r\n                    && false == exclusive_waitset_))\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                shared_owner_[my_id] += 1;\r\n                shared_lock_count_ += 1;\r\n                exclusive_owner_ = state_shared;\r\n                RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();\r\n                break;\r\n            }\r\n            else\r\n            {\r\n                RL_VERIFY(my_id != exclusive_owner_);\r\n                RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END();\r\n                shared_waitset_.park_current(c, false, false, false, info);\r\n            }\r\n\r\n            //??? c.sched();\r\n            //sign_.check(info);\r\n        }\r\n    }\r\n\r\n    virtual bool try_lock_shared(debug_info_param info)\r\n    {\r\n        RL_VERIFY(is_rw_);\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ == my_id)\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, \"\", info);\r\n        }\r\n\r\n        if (exclusive_owner_ == state_shared && shared_owner_[my_id])\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END();\r\n            if (is_shared_recursive_)\r\n            {\r\n                shared_owner_[my_id] += 1;\r\n                shared_lock_count_ += 1;\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, \"\", info);\r\n            }\r\n        }\r\n\r\n        if ((exclusive_owner_ == state_free)\r\n            || (exclusive_owner_ == state_shared\r\n                && false == exclusive_waitset_))\r\n        {\r\n            //!!! probability rand\r\n            if (true == failing_try_lock_\r\n                && false == try_lock_failed_\r\n                && c.rand(2, sched_type_user))\r\n            {\r\n                try_lock_failed_ = true;\r\n                RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock_shared} RL_HIST_END();\r\n                return false;\r\n            }\r\n            else\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                shared_owner_[my_id] += 1;\r\n                shared_lock_count_ += 1;\r\n                exclusive_owner_ = state_shared;\r\n                RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();\r\n                return true;\r\n            }\r\n        }\r\n        else\r\n        {\r\n            RL_VERIFY(my_id != exclusive_owner_);\r\n            RL_HIST(event_t) {this, event_t::type_failed_try_lock_shared} RL_HIST_END();\r\n            return false;\r\n        }\r\n    }\r\n\r\n    virtual void unlock_shared(debug_info_param info)\r\n    {\r\n        RL_VERIFY(is_rw_);\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        RL_VERIFY(false == c.invariant_executing);\r\n\r\n        thread_id_t const my_id = c.threadx_->index_;\r\n\r\n        if (exclusive_owner_ != state_shared || 0 == shared_owner_[my_id])\r\n        {\r\n            RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, \"\", info);\r\n        }\r\n\r\n        RL_VERIFY(shared_lock_count_);\r\n        shared_owner_[my_id] -= 1;\r\n        shared_lock_count_ -= 1;\r\n        if (shared_lock_count_ != 0)\r\n        {\r\n            if (shared_owner_[my_id])\r\n            {\r\n                RL_VERIFY(is_shared_recursive_);\r\n                RL_HIST(event_t) {this, event_t::type_recursive_unlock_shared} RL_HIST_END();\r\n            }\r\n            else\r\n            {\r\n                sync_.release(c.threadx_);\r\n                RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();\r\n            }\r\n            return;\r\n        }\r\n\r\n        sync_.release(c.threadx_);\r\n        exclusive_owner_ = state_free;\r\n\r\n        exclusive_waitset_.unpark_one(c, info);\r\n\r\n        RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();\r\n    }\r\n\r\n    virtual void unlock_exclusive_or_shared(debug_info_param info)\r\n    {\r\n        if (exclusive_owner_ == ctx().threadx_->index_)\r\n            unlock_exclusive(info);\r\n        else\r\n            unlock_shared(info);\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return (exclusive_owner_ == state_free);\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        sync_.acquire(ctx().threadx_);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return &exclusive_waitset_;\r\n    }\r\n\r\nprivate:\r\n    static thread_id_t const state_shared = (thread_id_t)-1;\r\n    static thread_id_t const state_free = (thread_id_t)-2;\r\n\r\n    signature<0xbabaf1f1> sign_;\r\n    bool is_rw_;\r\n    bool is_exclusive_recursive_;\r\n    bool is_shared_recursive_;\r\n    bool failing_try_lock_;\r\n    sync_var<thread_count> sync_;\r\n    thread_id_t exclusive_owner_;\r\n    unsigned exclusive_recursion_count_;\r\n    waitset<thread_count> exclusive_waitset_;\r\n    waitset<thread_count> shared_waitset_;\r\n    timestamp_t shared_owner_ [thread_count];\r\n    unsigned shared_lock_count_;\r\n    bool try_lock_failed_;\r\n\r\n    RL_NOCOPY(generic_mutex_data_impl);\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename type>\r\nclass generic_mutex : public win_waitable_object\r\n{\r\npublic:\r\n    generic_mutex()\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    generic_mutex(generic_mutex const&)\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    generic_mutex& operator = (generic_mutex const&)\r\n    {\r\n        return *this;\r\n    }\r\n\r\n    ~generic_mutex()\r\n    {\r\n    }\r\n\r\n    void init(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_mutex, \"\", info);\r\n        sign_.check(info);\r\n        impl_ = c.mutex_ctor(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock);\r\n    }\r\n\r\n    void deinit(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        check(info);\r\n        c.mutex_dtor(impl_);\r\n        impl_ = 0;\r\n    }\r\n\r\n    void lock(debug_info_param info)\r\n    {\r\n        lock_exclusive(info);\r\n    }\r\n\r\n    bool lock_exclusive_timed(debug_info_param info)\r\n    {\r\n        return check(info)->lock_exclusive(true, info);\r\n    }\r\n\r\n    void unlock(debug_info_param info)\r\n    {\r\n        unlock_exclusive(info);\r\n    }\r\n\r\n    void lock_exclusive(debug_info_param info)\r\n    {\r\n        check(info)->lock_exclusive(false, info);\r\n    }\r\n\r\n    bool try_lock_exclusive(debug_info_param info)\r\n    {\r\n        return check(info)->try_lock_exclusive(info);\r\n    }\r\n\r\n    void unlock_exclusive(debug_info_param info)\r\n    {\r\n        check(info)->unlock_exclusive(info);\r\n    }\r\n\r\n    void lock_shared(debug_info_param info)\r\n    {\r\n        check(info)->lock_shared(info);\r\n    }\r\n\r\n    bool try_lock_shared(debug_info_param info)\r\n    {\r\n        return check(info)->try_lock_shared(info);\r\n    }\r\n\r\n    void unlock_shared(debug_info_param info)\r\n    {\r\n        check(info)->unlock_shared(info);\r\n    }\r\n\r\n    void unlock_exclusive_or_shared(debug_info_param info)\r\n    {\r\n        check(info)->unlock_exclusive_or_shared(info);\r\n    }\r\n\r\nprivate:\r\n    generic_mutex_data* impl_;\r\n    signature<0x6A6cB03A> sign_;\r\n\r\n    generic_mutex_data* check(debug_info_param info)\r\n    {\r\n        RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_mutex, \"\", info);\r\n        sign_.check(info);\r\n        return impl_;\r\n    }\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)\r\n    {\r\n        if (try_wait)\r\n        {\r\n            if (check(info)->try_lock_exclusive(info))\r\n                return sema_wakeup_reason_success;\r\n            else\r\n                return sema_wakeup_reason_failed;\r\n        }\r\n        else\r\n        {\r\n            if (check(info)->lock_exclusive(is_timed, info))\r\n                return sema_wakeup_reason_success;\r\n            else\r\n                return sema_wakeup_reason_timeout;\r\n\r\n        }\r\n    }\r\n\r\n    virtual bool signal(debug_info_param info)\r\n    {\r\n        check(info)->unlock_exclusive(info);\r\n        return true;\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        return check(info)->is_signaled(info);\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        check(info)->memory_acquire(info);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        return check(info)->prepare_wait(info);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename tag, bool is_recursive>\r\nclass std_generic_mutex : generic_mutex<tag>, nocopy<>\r\n{\r\npublic:\r\n    std_generic_mutex()\r\n    {\r\n        generic_mutex<tag>::init(false, is_recursive, false, true, $);\r\n    }\r\n\r\n    ~std_generic_mutex()\r\n    {\r\n        generic_mutex<tag>::deinit($);\r\n    }\r\n\r\n    void lock(debug_info_param info)\r\n    {\r\n        generic_mutex<tag>::lock_exclusive(info);\r\n    }\r\n\r\n    bool try_lock(debug_info_param info)\r\n    {\r\n        return generic_mutex<tag>::try_lock_exclusive(info);\r\n    }\r\n\r\n    void unlock(debug_info_param info)\r\n    {\r\n        generic_mutex<tag>::unlock_exclusive(info);\r\n    }\r\n};\r\n\r\n\r\nstruct mutex_tag_std;\r\ntypedef std_generic_mutex<mutex_tag_std, false> mutex;\r\n\r\nstruct mutex_tag_std_recursive;\r\ntypedef std_generic_mutex<mutex_tag_std_recursive, true> recursive_mutex;\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/pthread.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_PTHREAD_HPP\r\n#define RL_PTHREAD_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"mutex.hpp\"\r\n#include \"condition_variable.hpp\"\r\n#include \"semaphore.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nenum RL_POSIX_ERROR_CODE\r\n{\r\n    RL_SUCCESS,\r\n    RL_EINVAL,\r\n    RL_ETIMEDOUT,\r\n    RL_EBUSY,\r\n    RL_EINTR,\r\n    RL_EAGAIN,\r\n    RL_EWOULDBLOCK,\r\n};\r\n\r\n\r\ninline void rl_sched_yield(debug_info_param info)\r\n{\r\n    yield(1, info);\r\n}\r\n\r\n\r\ntypedef win_waitable_object* rl_pthread_t;\r\ntypedef void* rl_pthread_attr_t;\r\n\r\ninline int rl_pthread_create(rl_pthread_t* th, rl_pthread_attr_t* attr, void* (*func) (void*), void* arg, debug_info_param info)\r\n{\r\n    (void)attr;\r\n    (void)info;//!!!\r\n    RL_VERIFY(th && func);\r\n    th[0] = ctx().create_thread(func, arg);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_join(rl_pthread_t th, void** res, debug_info_param info)\r\n{\r\n    RL_VERIFY(th && res);\r\n    res[0] = 0; //!!!\r\n    th->wait(false, false, info);\r\n    return 0;\r\n}\r\n\r\n\r\n\r\n\r\nstruct sem_tag_pthread;\r\ntypedef semaphore<sem_tag_pthread> rl_sem_t;\r\n\r\ninline int rl_sem_init(rl_sem_t* sema, int /*pshared*/, unsigned int initial_count, debug_info_param info)\r\n{\r\n    RL_VERIFY(initial_count >= 0);\r\n    sema->init(true, initial_count, INT_MAX, info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_sem_destroy(rl_sem_t* sema, debug_info_param info)\r\n{\r\n    sema->deinit(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_sem_wait(rl_sem_t* sema, debug_info_param info)\r\n{\r\n    sema_wakeup_reason reason = sema->wait(false, false, info);\r\n    if (reason == sema_wakeup_reason_success)\r\n        return 0;\r\n    if (reason == sema_wakeup_reason_spurious)\r\n    {\r\n        set_errno(RL_EINTR);\r\n        return -1;\r\n    }\r\n    RL_VERIFY(false);\r\n    return -1;\r\n}\r\n\r\ninline int rl_sem_trywait(rl_sem_t* sema, debug_info_param info)\r\n{\r\n    sema_wakeup_reason reason = sema->wait(true, false, info);\r\n    if (sema_wakeup_reason_success == reason)\r\n        return 0;\r\n    if (sema_wakeup_reason_failed == reason)\r\n    {\r\n        set_errno(RL_EAGAIN);\r\n        return -1;\r\n    }\r\n    if (sema_wakeup_reason_spurious == reason)\r\n    {\r\n        set_errno(RL_EINTR);\r\n        return -1;\r\n    }\r\n    RL_VERIFY(false);\r\n    return -1;\r\n}\r\n\r\ninline int rl_sem_post(rl_sem_t* sema, debug_info_param info)\r\n{\r\n    unsigned prev_cout = 0;\r\n    bool result = sema->post(1, prev_cout, info);\r\n    RL_VERIFY(result);\r\n    (void)result;\r\n    return 0;\r\n}\r\n\r\ninline int rl_sem_getvalue(rl_sem_t* sema, int* value, debug_info_param info)\r\n{\r\n    RL_VERIFY(value);\r\n    if (value)\r\n        value[0] = sema->get_value(info);\r\n    return 0;\r\n}\r\n\r\n\r\n\r\n\r\nstruct mutex_tag_pthread_mtx;\r\ntypedef generic_mutex<mutex_tag_pthread_mtx> rl_pthread_mutex_t;\r\n\r\nstruct rl_pthread_mutexattr_t\r\n{\r\n    bool is_recursive_;\r\n};\r\n\r\nenum RL_PTHREAD_MUTEX_TYPE\r\n{\r\n    RL_PTHREAD_MUTEX_NORMAL,\r\n    RL_PTHREAD_MUTEX_ERRORCHECK,\r\n    RL_PTHREAD_MUTEX_RECURSIVE,\r\n    RL_PTHREAD_MUTEX_DEFAULT,\r\n};\r\n\r\ninline int rl_pthread_mutexattr_init(rl_pthread_mutexattr_t* attr, debug_info_param info)\r\n{\r\n    (void)info;\r\n    if (0 == attr)\r\n        return RL_EINVAL;\r\n    attr->is_recursive_ = false;\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutexattr_destroy(rl_pthread_mutexattr_t* attr, debug_info_param info)\r\n{\r\n    (void)info;\r\n    if (0 == attr)\r\n        return RL_EINVAL;\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutexattr_settype(rl_pthread_mutexattr_t* attr, int type, debug_info_param info)\r\n{\r\n    (void)info;\r\n    if (0 == attr)\r\n        return RL_EINVAL;\r\n    if (RL_PTHREAD_MUTEX_RECURSIVE == type)\r\n        attr->is_recursive_ = true;\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutex_init(rl_pthread_mutex_t* m, rl_pthread_mutexattr_t const* attr, debug_info_param info)\r\n{\r\n    bool is_recursive = attr && attr->is_recursive_;\r\n    m->init(false, is_recursive, false, false, info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutex_destroy(rl_pthread_mutex_t* m, debug_info_param info)\r\n{\r\n    m->deinit(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutex_lock(rl_pthread_mutex_t* m, debug_info_param info)\r\n{\r\n    m->lock_exclusive(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_mutex_timedlock(rl_pthread_mutex_t* m, const void* abs_timeout, debug_info_param info)\r\n{\r\n    (void)abs_timeout;\r\n    bool rv = m->lock_exclusive_timed(info);\r\n    return rv ? 0 : RL_ETIMEDOUT;\r\n}\r\n\r\ninline int rl_pthread_mutex_try_lock(rl_pthread_mutex_t* m, debug_info_param info)\r\n{\r\n    return m->try_lock_exclusive(info) ? 0 : 1;\r\n}\r\n\r\ninline int rl_pthread_mutex_unlock(rl_pthread_mutex_t* m, debug_info_param info)\r\n{\r\n    m->unlock_exclusive(info);\r\n    return 0;\r\n}\r\n\r\n\r\n\r\nstruct mutex_tag_pthread_rwlock;\r\ntypedef generic_mutex<mutex_tag_pthread_rwlock> rl_pthread_rwlock_t;\r\n\r\ninline int rl_pthread_rwlock_init(rl_pthread_rwlock_t* lock, void const* /*attr*/, debug_info_param info)\r\n{\r\n    lock->init(true, false, true, false, info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_rwlock_destroy(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    lock->deinit(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_rwlock_rdlock(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    lock->lock_shared(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_rwlock_tryrdlock(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    bool res = lock->try_lock_shared(info);\r\n    return res ? 0 : RL_EBUSY;\r\n}\r\n\r\ninline int rl_pthread_rwlock_wrlock(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    lock->lock_exclusive(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_rwlock_trywrlock(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    bool res = lock->try_lock_exclusive(info);\r\n    return res ? 0 : RL_EBUSY;\r\n}\r\n\r\ninline int rl_pthread_rwlock_unlock(rl_pthread_rwlock_t* lock, debug_info_param info)\r\n{\r\n    lock->unlock_exclusive_or_shared(info);\r\n    return 0;\r\n}\r\n\r\n\r\n\r\n\r\nstruct condvar_tag_pthread;\r\ntypedef condvar<condvar_tag_pthread> rl_pthread_cond_t;\r\ntypedef int rl_pthread_condattr_t;\r\n\r\ninline int rl_pthread_cond_init(rl_pthread_cond_t* cv, rl_pthread_condattr_t* /*condattr*/, debug_info_param info)\r\n{\r\n    cv->init(true, info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_cond_destroy(rl_pthread_cond_t* cv, debug_info_param info)\r\n{\r\n    cv->deinit(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_cond_broadcast(rl_pthread_cond_t* cv, debug_info_param info)\r\n{\r\n    cv->notify_all(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_cond_signal(rl_pthread_cond_t* cv, debug_info_param info)\r\n{\r\n    cv->notify_one(info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_pthread_cond_timedwait(rl_pthread_cond_t* cv, rl_pthread_mutex_t* m, void const* /*timespec*/, debug_info_param info)\r\n{\r\n    sema_wakeup_reason res = cv->wait(*m, true, info);\r\n    if (res == sema_wakeup_reason_success)\r\n        return 0;\r\n    else if (res == sema_wakeup_reason_timeout)\r\n        return RL_ETIMEDOUT;\r\n    else if (res == sema_wakeup_reason_spurious)\r\n        return RL_EINTR;\r\n    else \r\n        return RL_EINVAL;\r\n}\r\n\r\ninline int rl_pthread_cond_wait(rl_pthread_cond_t* cv, rl_pthread_mutex_t* m, debug_info_param info)\r\n{\r\n    sema_wakeup_reason res = cv->wait(*m, false, info);\r\n    if (res == sema_wakeup_reason_success)\r\n        return 0;\r\n    else if (res == sema_wakeup_reason_spurious)\r\n        return RL_EINTR;\r\n    else \r\n        return RL_EINVAL;\r\n}\r\n\r\n\t\r\n\t\r\n\t\r\nenum RL_FUTEX_OP\r\n{\r\n    RL_FUTEX_WAIT,\r\n    RL_FUTEX_WAKE,\r\n};\r\n\r\ninline int rl_int_futex_impl(context& c,\r\n                    atomic<int>* uaddr,\r\n                    int op,\r\n                    int val,\r\n                    struct timespec const* timeout,\r\n                    atomic<int>* uaddr2,\r\n                    int val3,\r\n                    debug_info_param info)\r\n{\r\n    (void)uaddr2;\r\n    (void)val3;\r\n    if (op == RL_FUTEX_WAIT)\r\n    {\r\n        c.sched();\r\n        c.atomic_thread_fence_seq_cst();\r\n        int v0;\r\n        {\r\n            preemption_disabler pd (c);\r\n            v0 = uaddr->load(mo_acquire, info);\r\n        }\r\n\tif (v0 != val)\r\n            return RL_EWOULDBLOCK;\r\n        unpark_reason reason = uaddr->wait(c, timeout != 0, true, info);\r\n        if (reason == unpark_reason_normal)\r\n            return 0;\r\n        else if (reason == unpark_reason_timeout)\r\n            return RL_ETIMEDOUT;\r\n        else if (reason == unpark_reason_spurious)\r\n            return RL_EINTR;\r\n        RL_VERIFY(false);\r\n        return RL_EINVAL;\r\n    }\r\n    else if (op == RL_FUTEX_WAKE)\r\n    {\r\n        if (val <= 0)\r\n            return 0;\r\n\r\n        c.sched();\r\n        c.atomic_thread_fence_seq_cst();\r\n        return uaddr->wake(c, val, info);\r\n    }\r\n    else\r\n    {\r\n        return RL_EINVAL;\r\n    }\r\n}\r\n\r\n    struct futex_event\r\n    {\r\n        void* addr_;\r\n        int   op_;\r\n        int   val_;\r\n        bool  timeout_;\r\n        int   res_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> futex(\"\r\n              << (op_ == RL_FUTEX_WAIT ? \"FUTEX_WAIT\" : op_ == RL_FUTEX_WAKE ? \"FUTEX_WAKE\" : \"UNSUPPORTED\") << \", \"\r\n              << val_ << \", \" << timeout_ << \") = \";\r\n            if (op_ == RL_FUTEX_WAKE)\r\n                s << res_;\r\n            else\r\n                s << (res_ == RL_EWOULDBLOCK ? \"EWOULDBLOCK\" : res_ == RL_ETIMEDOUT ? \"ETIMEDOUT\" : res_ == RL_EINTR ? \"EINTR\" : \"UNKNOWN\");\r\n        }\r\n    };\r\n\t\r\ninline int rl_futex(atomic<int>* uaddr,\r\n                    int op,\r\n                    int val,\r\n                    struct timespec const* timeout,\r\n                    atomic<int>* uaddr2,\r\n                    int val3,\r\n                    debug_info_param info)\r\n{\r\n    context& c = ctx();\r\n    int res = rl_int_futex_impl(c, uaddr, op, val, timeout, uaddr2, val3, info);\r\n    RL_HIST(futex_event) {uaddr, op, val, timeout != 0, res} RL_HIST_END();\r\n    return res;    \r\n}\r\n\r\n}\r\n\r\n\r\n\r\n#ifdef EINVAL\r\n#   undef EINVAL\r\n#endif\r\n#define EINVAL                  rl::RL_EINVAL\r\n\r\n#ifdef ETIMEDOUT\r\n#   undef ETIMEDOUT\r\n#endif\r\n#define ETIMEDOUT               rl::RL_ETIMEDOUT\r\n\r\n#ifdef EBUSY\r\n#   undef EBUSY\r\n#endif\r\n#define EBUSY                   rl::RL_EBUSY\r\n\r\n#ifdef EINTR\r\n#   undef EINTR\r\n#endif\r\n#define EINTR                   rl::RL_EINTR\r\n\r\n#ifdef EAGAIN\r\n#   undef EAGAIN\r\n#endif\r\n#define EAGAIN                  rl::RL_EAGAIN\r\n\r\n#ifdef EWOULDBLOCK\r\n#   undef EWOULDBLOCK\r\n#endif\r\n#define EWOULDBLOCK                  rl::RL_EWOULDBLOCK\r\n\r\n#define sched_yield() \\\r\n rl::rl_sched_yield($)\r\n\r\n#define pthread_yield() \\\r\n rl::rl_sched_yield($)\r\n\r\n\r\n\r\n#define pthread_t rl::rl_pthread_t\r\n#define pthread_attr_t rl::rl_pthread_attr_t\r\n\r\n#define pthread_create(th, attr, func, arg) \\\r\n rl::rl_pthread_create(th, attr, func, arg, $)\r\n\r\n#define pthread_join(th, res) \\\r\n rl::rl_pthread_join(th, res, $)\r\n\r\n\r\n\r\n\r\n#define sem_t rl::rl_sem_t\r\n\r\n#define sem_init(sema, pshared, initial_count)\\\r\n rl::rl_sem_init(sema, pshared, initial_count, $)\r\n\r\n#define sem_destroy(sema)\\\r\n rl::rl_sem_destroy(sema, $)\r\n\r\n#define sem_wait(sema)\\\r\n rl::rl_sem_wait(sema, $)\r\n\r\n#define sem_trywait(sema)\\\r\n rl::rl_sem_trywait(sema, $)\r\n\r\n#define sem_post(sema)\\\r\nrl::rl_sem_post(sema, $)\r\n\r\n#define sem_getvalue(sema, pvalue)\\\r\n rl::rl_sem_getvalue(sema, pvalue, $)\r\n\r\n\r\n\r\n\r\n\r\n#define pthread_mutex_t             rl::rl_pthread_mutex_t\r\n#define pthread_mutexattr_t         rl::rl_pthread_mutexattr_t\r\n\r\n#ifdef PTHREAD_MUTEX_NORMAL\r\n#   undef PTHREAD_MUTEX_NORMAL\r\n#   undef PTHREAD_MUTEX_ERRORCHECK\r\n#   undef PTHREAD_MUTEX_RECURSIVE\r\n#   undef PTHREAD_MUTEX_DEFAULT\r\n#endif\r\n\r\n#define PTHREAD_MUTEX_NORMAL        rl::RL_PTHREAD_MUTEX_NORMAL\r\n#define PTHREAD_MUTEX_ERRORCHECK    rl::RL_PTHREAD_MUTEX_ERRORCHECK\r\n#define PTHREAD_MUTEX_RECURSIVE     rl::RL_PTHREAD_MUTEX_RECURSIVE\r\n#define PTHREAD_MUTEX_DEFAULT       rl::RL_PTHREAD_MUTEX_DEFAULT\r\n\r\n#define pthread_mutexattr_init(attr) \\\r\n rl::rl_pthread_mutexattr_init(attr, $)\r\n\r\n#define pthread_mutexattr_destroy(attr) \\\r\n rl::rl_pthread_mutexattr_destroy(attr, $)\r\n\r\n#define pthread_mutexattr_settype(attr, type) \\\r\n rl::rl_pthread_mutexattr_settype(attr, type, $)\r\n\r\n#define pthread_mutex_init(m, attr) \\\r\n rl::rl_pthread_mutex_init(m, attr, $)\r\n\r\n#define pthread_mutex_destroy(m) \\\r\n rl::rl_pthread_mutex_destroy(m, $)\r\n\r\n#define pthread_mutex_lock(m) \\\r\n rl::rl_pthread_mutex_lock(m, $)\r\n\r\n#define pthread_mutex_timedlock(m, abs_timeout) \\\r\n rl::rl_pthread_mutex_timedlock(m, abs_timeout, $)\r\n\r\n#define pthread_mutex_try_lock(m) \\\r\n rl::rl_pthread_mutex_try_lock(m, $)\r\n\r\n#define pthread_mutex_unlock(m) \\\r\n rl::rl_pthread_mutex_unlock(m, $)\r\n\r\n#define pthread_rwlock_t rl::rl_pthread_rwlock_t\r\n\r\n#define pthread_rwlock_init(lock, attr) \\\r\n rl::rl_pthread_rwlock_init(lock, attr, $)\r\n\r\n#define pthread_rwlock_destroy(lock) \\\r\n rl::rl_pthread_rwlock_destroy(lock, $)\r\n\r\n#define pthread_rwlock_rdlock(lock) \\\r\n rl::rl_pthread_rwlock_rdlock(lock, $)\r\n\r\n#define pthread_rwlock_tryrdlock(lock) \\\r\n rl::rl_pthread_rwlock_tryrdlock(lock, $)\r\n\r\n#define pthread_rwlock_wrlock(lock) \\\r\n rl::rl_pthread_rwlock_wrlock(lock, $)\r\n\r\n#define pthread_rwlock_trywrlock(lock) \\\r\n rl::rl_pthread_rwlock_trywrlock(lock, $)\r\n\r\n#define pthread_rwlock_unlock(lock) \\\r\n rl::rl_pthread_rwlock_unlock(lock, $)\r\n\r\n\r\n\r\n\r\n#define pthread_cond_t rl::rl_pthread_cond_t\r\n#define pthread_condattr_t rl::rl_pthread_condattr_t\r\n\r\n#define pthread_cond_init(cv, condattr) \\\r\n rl::rl_pthread_cond_init(cv, condattr, $)\r\n\r\n#define pthread_cond_destroy(cv) \\\r\n rl::rl_pthread_cond_destroy(cv, $)\r\n\r\n#define pthread_cond_broadcast(cv) \\\r\n rl::rl_pthread_cond_broadcast(cv, $)\r\n\r\n#define pthread_cond_signal(cv) \\\r\n rl::rl_pthread_cond_signal(cv, $)\r\n\r\n#define pthread_cond_timedwait(cv, m, timespec) \\\r\n rl::rl_pthread_cond_timedwait(cv, m, timespec, $)\r\n\r\n#define pthread_cond_wait(cv, m) \\\r\n rl::rl_pthread_cond_wait(cv, m, $)\r\n\r\n\r\n\r\n#ifdef FUTEX_WAKE\r\n#   undef FUTEX_WAKE\r\n#endif\r\n#define FUTEX_WAKE                  rl::RL_FUTEX_WAKE\r\n\r\n#ifdef FUTEX_WAIT\r\n#   undef FUTEX_WAIT\r\n#endif\r\n#define FUTEX_WAIT                  rl::RL_FUTEX_WAIT\r\n\r\n#define futex(uaddr, op, val, timeout, uaddr2, val3) \\\r\n rl::rl_futex(uaddr, op, val, timeout, uaddr2, val3, $)\r\n\r\n#endif\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/semaphore.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_SEMAPHORE_HPP\r\n#define RL_SEMAPHORE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"../base.hpp\"\r\n#include \"../context_base.hpp\"\r\n#include \"../sync_var.hpp\"\r\n#include \"../waitset.hpp\"\r\n#include \"../signature.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nenum sema_wakeup_reason\r\n{\r\n    sema_wakeup_reason_success,\r\n    sema_wakeup_reason_failed,\r\n    sema_wakeup_reason_timeout,\r\n    sema_wakeup_reason_spurious,\r\n};\r\n\r\nstruct win_object\r\n{\r\n    virtual void deinit(debug_info_param info) = 0;\r\n    virtual ~win_object() {}\r\n};\r\n\r\nstruct win_waitable_object : win_object\r\n{\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0;\r\n    virtual bool signal(debug_info_param info) = 0;\r\n\r\n    virtual bool is_signaled(debug_info_param info) = 0;\r\n    virtual void memory_acquire(debug_info_param info) = 0;\r\n    virtual void* prepare_wait(debug_info_param info) = 0;\r\n};\r\n\r\n\r\n\r\n\r\nstruct sema_data\r\n{\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0;\r\n    virtual bool post(unsigned count, unsigned& prev_count, debug_info_param info) = 0;\r\n    virtual int get_value(debug_info_param info) = 0;\r\n    virtual bool is_signaled(debug_info_param info) = 0;\r\n    virtual void memory_acquire(debug_info_param info) = 0;\r\n    virtual void* prepare_wait(debug_info_param info) = 0;\r\n    virtual ~sema_data() {} // just to calm down gcc\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass sema_data_impl : public sema_data\r\n{\r\npublic:\r\n    sema_data_impl(bool spurious_wakeups, unsigned initial_count, unsigned max_count)\r\n        : spurious_wakeups_(spurious_wakeups)\r\n        , count_(initial_count)\r\n        , max_count_(max_count)\r\n    {\r\n        RL_VERIFY(max_count <= INT_MAX);\r\n    }\r\n\r\n    ~sema_data_impl()\r\n    {\r\n        //!!! detect destruction with waiters\r\n    }\r\n\r\n    struct wait_event\r\n    {\r\n        sema_data_impl*         addr_;\r\n        bool                    try_wait_;\r\n        bool                    is_timed_;\r\n        unsigned                count_;\r\n        sema_wakeup_reason      reason_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> semaphore: \";\r\n            if (try_wait_)\r\n                s << \"try_wait \";\r\n            else if (is_timed_)\r\n                s << \"timed wait \";\r\n            else\r\n                s << \"wait \";\r\n\r\n            if (reason_ == sema_wakeup_reason_success)\r\n                s << \"succeeded \";\r\n            else if (reason_ == sema_wakeup_reason_failed)\r\n                s << \"failed \";\r\n            else if (reason_ == sema_wakeup_reason_timeout)\r\n                s << \"timed out \";\r\n            else if (reason_ == sema_wakeup_reason_spurious)\r\n                s << \"spuriously failed \";\r\n\r\n            s << \"new_count=\" << count_;\r\n        }\r\n    };\r\n\r\n    struct post_event\r\n    {\r\n        sema_data_impl*         addr_;\r\n        unsigned                value_;\r\n        unsigned                count_;\r\n        bool                    result_;\r\n        thread_id_t             unblocked_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> semaphore: \";\r\n            if (result_)\r\n                s << \"post \";\r\n            else\r\n                s << \"post FAILED \";\r\n\r\n            s << \"value=\" << value_;\r\n            s << \" new_count=\" << count_;\r\n            s << \" unblocked=\" << unblocked_;\r\n        }\r\n    };\r\n\r\n    struct get_value_event\r\n    {\r\n        sema_data_impl* addr_;\r\n        unsigned count_;\r\n\r\n        void output(std::ostream& s) const\r\n        {\r\n            s << \"<\" << std::hex << addr_ << std::dec << \"> semaphore: \";\r\n            s << \"get_value count=\" << count_;\r\n        }\r\n    };\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait,\r\n                                    bool is_timed,\r\n                                    debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        sema_wakeup_reason reason = sema_wakeup_reason_success;\r\n        for (;;)\r\n        {\r\n            if (count_)\r\n            {\r\n                count_ -= 1;\r\n                sync_.acq_rel(c.threadx_);\r\n                reason = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n\r\n            if (try_wait)\r\n            {\r\n                sync_.acquire(c.threadx_);\r\n                reason = sema_wakeup_reason_failed;\r\n                break;\r\n            }\r\n\r\n            unpark_reason wr = ws_.park_current(c, is_timed, spurious_wakeups_, true, info);\r\n            if (unpark_reason_timeout == wr)\r\n            {\r\n                RL_VERIFY(is_timed);\r\n                sync_.acquire(c.threadx_);\r\n                reason = sema_wakeup_reason_timeout;\r\n                break;\r\n            }\r\n            else if (unpark_reason_spurious == wr)\r\n            {\r\n                RL_VERIFY(spurious_wakeups_);\r\n                sync_.acquire(c.threadx_);\r\n                reason = sema_wakeup_reason_spurious;\r\n                break;\r\n            }\r\n            else if (unpark_reason_normal == wr)\r\n            {\r\n                RL_VERIFY(count_ > 0);\r\n                count_ -= 1;\r\n                sync_.acq_rel(c.threadx_);\r\n                c.switch_back(info);\r\n                reason = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n            RL_VERIFY(false);\r\n        }\r\n\r\n        RL_HIST(wait_event) {this, try_wait, is_timed, count_, reason} RL_HIST_END();\r\n        return reason;\r\n    }\r\n\r\n    virtual bool post(unsigned count, unsigned& prev_count, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n\r\n        bool result = false;\r\n        prev_count = count_;\r\n        thread_id_t unblocked = 0;\r\n        if (false == (count >= INT_MAX || count + count_ > max_count_))\r\n        {\r\n            result = true;\r\n            count_ += count;\r\n            sync_.acq_rel(c.threadx_);\r\n            for (unsigned i = 0; i != count; ++i)\r\n            {\r\n                if (false == ws_.unpark_one(c, info))\r\n                    break;\r\n                unblocked += 1;\r\n            }\r\n        }\r\n        else\r\n        {\r\n            sync_.acquire(c.threadx_);\r\n        }\r\n        RL_HIST(post_event) {this, count, count_, result, unblocked} RL_HIST_END();\r\n        return result;\r\n    }\r\n\r\n    virtual int get_value(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        c.sched();\r\n        sign_.check(info);\r\n        \r\n        RL_VERIFY(count_ <= INT_MAX);\r\n        int result = (int)count_ - ws_.size();\r\n        sync_.acquire(c.threadx_);\r\n\r\n        RL_HIST(get_value_event) {this, (unsigned)result} RL_HIST_END();\r\n        return result;\r\n    }\r\n\r\nprivate:\r\n    signature<0xaabb6634> sign_;\r\n    bool const spurious_wakeups_;\r\n    unsigned count_;\r\n    unsigned const max_count_;\r\n    waitset<thread_count> ws_;\r\n    sync_var<thread_count> sync_;\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return count_ > 0;\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        sync_.acquire(ctx().threadx_);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        (void)info;\r\n        return &ws_;\r\n    }\r\n\r\n    RL_NOCOPY(sema_data_impl);\r\n};\r\n\r\n\r\n\r\ntemplate<typename tag_t>\r\nclass semaphore : public win_waitable_object\r\n{\r\npublic:\r\n    semaphore()\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    semaphore(semaphore const&)\r\n        : impl_()\r\n    {\r\n    }\r\n\r\n    semaphore& operator = (semaphore const&)\r\n    {\r\n        return *this;\r\n    }\r\n\r\n    void init(bool spurious_wakeups, unsigned initial_count, unsigned max_count, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_semaphore, \"\", info);\r\n        sign_.check(info);\r\n        impl_ = c.sema_ctor(spurious_wakeups, initial_count, max_count);\r\n    }\r\n\r\n    void deinit(debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        check(info);\r\n        c.sema_dtor(impl_);\r\n        impl_ = 0;\r\n    }\r\n\r\n    virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)\r\n    {\r\n        check(info);\r\n        return impl_->wait(try_wait, is_timed, info);\r\n    }\r\n\r\n    virtual bool signal(debug_info_param info)\r\n    {\r\n        unsigned prev_count = 0;\r\n        return post(1, prev_count, info);\r\n    }\r\n\r\n    bool post(unsigned count, unsigned& prev_count, debug_info_param info)\r\n    {\r\n        check(info);\r\n        return impl_->post(count, prev_count, info);\r\n    }\r\n\r\n    int get_value(debug_info_param info)\r\n    {\r\n        check(info);\r\n        return impl_->get_value(info);\r\n    }\r\n\r\nprivate:\r\n    sema_data* impl_;\r\n    signature<0x228855dd> sign_;\r\n\r\n    sema_data* check(debug_info_param info)\r\n    {\r\n        RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_semaphore, \"\", info);\r\n        sign_.check(info);\r\n        return impl_;\r\n    }\r\n\r\n    virtual bool is_signaled(debug_info_param info)\r\n    {\r\n        return check(info)->is_signaled(info);\r\n    }\r\n\r\n    virtual void memory_acquire(debug_info_param info)\r\n    {\r\n        check(info)->memory_acquire(info);\r\n    }\r\n\r\n    virtual void* prepare_wait(debug_info_param info)\r\n    {\r\n        return check(info)->prepare_wait(info);\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct wfmo_event\r\n{\r\n    unsigned long               count_;\r\n    bool                        wait_all_;\r\n    bool                        try_wait_;\r\n    bool                        is_timed_;\r\n    sema_wakeup_reason          result_;\r\n    size_t                      signaled_;\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s   << \"WFMO: \"\r\n            << \"count=\" << count_\r\n            << \", wait_all=\" << wait_all_\r\n            << \", try_wait=\" << try_wait_\r\n            << \", is_timed=\" << is_timed_\r\n            << \", result=\";\r\n        if (sema_wakeup_reason_success == result_)\r\n        {\r\n            s << \"success\";\r\n            if (wait_all_ == false)\r\n                s << \", object=\" << signaled_;\r\n        }\r\n        else\r\n        {\r\n            s << \"timeout\";\r\n        }\r\n    }\r\n};\r\n\r\nsize_t const wfmo_max_objects = 32;\r\n\r\ninline sema_wakeup_reason wait_for_multiple_objects(\r\n    size_t& signaled,\r\n    size_t count,\r\n    win_waitable_object** wo,\r\n    bool wait_all,\r\n    bool try_wait,\r\n    bool is_timed,\r\n    debug_info_param info)\r\n{\r\n    context& c = ctx();\r\n    c.sched();\r\n\r\n    RL_VERIFY(count <= wfmo_max_objects);\r\n    void* ws [wfmo_max_objects];\r\n\r\n    sema_wakeup_reason result = sema_wakeup_reason_failed;\r\n    signaled = 0;\r\n\r\n    if (wait_all)\r\n    {\r\n        for (;;)\r\n        {\r\n            unsigned long i = 0;\r\n            for (i = 0; i != count; ++i)\r\n            {\r\n                if (false == wo[i]->is_signaled(info))\r\n                    break;\r\n            }\r\n            if (i == count)\r\n            {\r\n                preemption_disabler pd (c);\r\n                for (i = 0; i != count; ++i)\r\n                {\r\n                    sema_wakeup_reason r = wo[i]->wait(true, false, info);\r\n                    RL_VERIFY(r == sema_wakeup_reason_success);\r\n                    (void)r;\r\n                }\r\n                result = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n            else if (try_wait)\r\n            {\r\n                for (i = 0; i != count; ++i)\r\n                    wo[i]->memory_acquire(info);\r\n                result = sema_wakeup_reason_timeout;\r\n                break;\r\n            }\r\n            else\r\n            {\r\n                for (i = 0; i != count; ++i)\r\n                {\r\n                    ws[i] = wo[i]->prepare_wait(info);\r\n                }\r\n                unpark_reason reason = c.wfmo_park(ws, wo, (unsigned)count, !!wait_all, is_timed, info);\r\n                RL_VERIFY(unpark_reason_spurious != reason);\r\n                if (unpark_reason_timeout == reason)\r\n                {\r\n                    for (i = 0; i != count; ++i)\r\n                        wo[i]->memory_acquire(info);\r\n                    result = sema_wakeup_reason_timeout;\r\n                    break;\r\n                }\r\n                else if (unpark_reason_normal == reason)\r\n                {\r\n                    {\r\n                        preemption_disabler pd (c);\r\n                        for (unsigned long i = 0; i != count; ++i)\r\n                        {\r\n                            RL_VERIFY(wo[i]->is_signaled(info));\r\n                            sema_wakeup_reason r = wo[i]->wait(true, false, info);\r\n                            RL_VERIFY(r == sema_wakeup_reason_success);\r\n                            (void)r;\r\n                        }\r\n                    }\r\n                    c.switch_back(info);\r\n                    result = sema_wakeup_reason_success;\r\n                    break;\r\n                }\r\n                RL_VERIFY(false);\r\n            }\r\n        }\r\n    }\r\n    else\r\n    {\r\n        for (;;)\r\n        {\r\n            unsigned long i = 0;\r\n            for (i = 0; i != count; ++i)\r\n            {\r\n                if (true == wo[i]->is_signaled(info))\r\n                    break;\r\n            }\r\n            if (i != count)\r\n            {\r\n                preemption_disabler pd (c);\r\n                sema_wakeup_reason r = wo[i]->wait(true, false, info);\r\n                RL_VERIFY(r == sema_wakeup_reason_success);\r\n                (void)r;\r\n                signaled = i;\r\n                result = sema_wakeup_reason_success;\r\n                break;\r\n            }\r\n            else if (try_wait)\r\n            {\r\n                for (i = 0; i != count; ++i)\r\n                    wo[i]->memory_acquire(info);\r\n                result = sema_wakeup_reason_timeout;\r\n                break;\r\n            }\r\n            else\r\n            {\r\n                for (i = 0; i != count; ++i)\r\n                {\r\n                    ws[i] = wo[i]->prepare_wait(info);\r\n                }\r\n                unpark_reason reason = c.wfmo_park(ws, wo, (unsigned)count, !!wait_all, is_timed, info);\r\n                RL_VERIFY(unpark_reason_spurious != reason);\r\n                if (unpark_reason_timeout == reason)\r\n                {\r\n                    for (i = 0; i != count; ++i)\r\n                        wo[i]->memory_acquire(info);\r\n                    result = sema_wakeup_reason_timeout;\r\n                    break;\r\n                }\r\n                else if (unpark_reason_normal == reason)\r\n                {\r\n                    unsigned long i = 0;\r\n                    for (i = 0; i != count; ++i)\r\n                    {\r\n                        if (true == wo[i]->is_signaled(info))\r\n                            break;\r\n                    }\r\n                    RL_VERIFY(i != count);\r\n                    {\r\n                        preemption_disabler pd (c);\r\n                        sema_wakeup_reason r = wo[i]->wait(true, false, info);\r\n                        RL_VERIFY(r == sema_wakeup_reason_success);\r\n                        (void)r;\r\n                    }\r\n                    c.switch_back(info);\r\n                    signaled = i;\r\n                    result = sema_wakeup_reason_success;\r\n                    break;\r\n                }\r\n                RL_VERIFY(false);\r\n            }\r\n        }\r\n    }\r\n    \r\n    RL_HIST(wfmo_event) {(unsigned)count, wait_all, try_wait, is_timed, result, signaled} RL_HIST_END();\r\n    return result;\r\n}\r\n\r\n\r\n}\r\n\r\n\r\n#endif\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/stdlib/windows.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2010, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE.TXT in this distribution.\r\n */\r\n\r\n#ifndef RL_WINDOWS_HPP\r\n#define RL_WINDOWS_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"mutex.hpp\"\r\n#include \"condition_variable.hpp\"\r\n#include \"semaphore.hpp\"\r\n#include \"event.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntypedef win_object* rl_HANDLE;\r\nunsigned long const rl_INFINITE = (unsigned long)-1;\r\n\r\nunsigned long const rl_WAIT_FAILED                = (unsigned long)-1;\r\nunsigned long const rl_WAIT_OBJECT_0              = 100;\r\nunsigned long const rl_WAIT_TIMEOUT               = 1;\r\nunsigned long const rl_WAIT_IO_COMPLETION         = 2;\r\nunsigned long const rl_MAXIMUM_WAIT_OBJECTS       = wfmo_max_objects;\r\n\r\n\r\ninline int rl_SwitchToThread(debug_info_param info)\r\n{\r\n    yield(1, info);\r\n    return 1;\r\n}\r\n\r\ninline void rl_Sleep(unsigned long milliseconds, debug_info_param info)\r\n{\r\n    yield(milliseconds ? milliseconds : 1, info);\r\n}\r\n\r\n\r\n\r\ninline unsigned long rl_WaitForSingleObjectEx(rl_HANDLE obj, unsigned long timeout, int alertable, debug_info_param info)\r\n{\r\n    (void)alertable; //!!! not yet supported  support it!\r\n    //!!! support WAIT_IO_COMPLETION\r\n    RL_VERIFY(false == alertable && \"Alertable wait is not supported in WaitForSingleObject() yet\");\r\n\r\n    bool try_wait = (timeout == 0);\r\n    bool is_timed = (timeout != rl_INFINITE);\r\n    sema_wakeup_reason reason = static_cast<win_waitable_object*>(obj)->wait(try_wait, is_timed, info);\r\n    if (reason == sema_wakeup_reason_success)\r\n        return rl_WAIT_OBJECT_0;\r\n    else if (reason == sema_wakeup_reason_timeout)\r\n        return rl_WAIT_TIMEOUT;\r\n    else if (reason == sema_wakeup_reason_failed)\r\n        return rl_WAIT_TIMEOUT;\r\n    RL_VERIFY(false);\r\n    return rl_WAIT_FAILED;\r\n}\r\n\r\ninline unsigned long rl_WaitForSingleObject(rl_HANDLE obj, unsigned long timeout, debug_info_param info)\r\n{\r\n    return rl_WaitForSingleObjectEx(obj, timeout, 0, info);\r\n}\r\n\r\ninline unsigned long rl_WaitForMultipleObjectsEx(unsigned long count, rl_HANDLE* objects, int wait_all, unsigned long timeout, int alertable, debug_info_param info)\r\n{\r\n    (void)alertable; //!!!\r\n    //!!! support WAIT_IO_COMPLETION\r\n    RL_VERIFY(false == alertable && \"Alertable wait is not supported in WaitForMultipleObjects() yet\");\r\n\r\n    bool try_wait = (timeout == 0);\r\n    bool is_timed = (timeout != rl_INFINITE);\r\n    win_waitable_object** obj = reinterpret_cast<win_waitable_object**>(objects);\r\n    size_t signaled = 0;\r\n    sema_wakeup_reason reason = wait_for_multiple_objects(signaled, count, obj, !!wait_all, try_wait, is_timed, info);\r\n    if (reason == sema_wakeup_reason_success)\r\n        return rl_WAIT_OBJECT_0 + (int)signaled;\r\n    else if (reason == sema_wakeup_reason_timeout)\r\n        return rl_WAIT_TIMEOUT;\r\n    RL_VERIFY(false);\r\n    return rl_WAIT_FAILED;\r\n}\r\n\r\ninline unsigned long rl_WaitForMultipleObjects(unsigned long count, rl_HANDLE* objects, int wait_all, unsigned long timeout, debug_info_param info)\r\n{\r\n    return rl_WaitForMultipleObjectsEx(count, objects, wait_all, timeout, 0, info);\r\n}\r\n\r\ninline unsigned long rl_SignalObjectAndWait(rl_HANDLE obj_to_signal,\r\n                                            rl_HANDLE obj_to_wait,\r\n                                            unsigned long timeout,\r\n                                            int alertable,\r\n                                            debug_info_param info)\r\n{\r\n    bool result = static_cast<win_waitable_object*>(obj_to_signal)->signal(info);\r\n    if (false == result)\r\n        return result ? 1 : 0;\r\n    preemption_disabler pd (ctx());\r\n    return rl_WaitForSingleObjectEx(obj_to_wait, timeout, alertable, info);\r\n}\r\n\r\n\r\n\r\nstruct sem_tag_win;\r\n\r\ninline rl_HANDLE rl_CreateSemaphore(void* /*security*/, long initial_count, long max_count, void const* /*name*/, debug_info_param info)\r\n{\r\n    void* mem = ctx().alloc(sizeof(semaphore<sem_tag_win>), false, info);\r\n    semaphore<sem_tag_win>* sema = new (mem) semaphore<sem_tag_win>;\r\n    sema->init(false, initial_count, max_count, info);\r\n    return sema;\r\n}\r\n\r\ninline int rl_CloseHandle(rl_HANDLE h, debug_info_param info)\r\n{\r\n    h->deinit(info);\r\n    h->~win_object();\r\n    (ctx().free)(h, false, info); //!!! rename free because of the define\r\n    return 1;\r\n}\r\n\r\ninline int rl_ReleaseSemaphore(rl_HANDLE sema, long count, long* prev_count, debug_info_param info)\r\n{\r\n    unsigned prev = 0;\r\n    bool result = static_cast<semaphore<sem_tag_win>*>(sema)->post(count, prev, info);\r\n    if (prev_count)\r\n        prev_count[0] = prev;\r\n    return result ? 1 : 0;\r\n}\r\n\r\n\r\n\r\n\r\ninline rl_HANDLE rl_CreateEvent(void* /*security*/, int manual_reset, int initial_state, void const* /*name*/, debug_info_param info)\r\n{\r\n    void* mem = ctx().alloc(sizeof(generic_event), false, info);\r\n    generic_event* ev = new (mem) generic_event;\r\n    ev->init(!!manual_reset, !!initial_state, info);\r\n    return ev;\r\n}\r\n\r\ninline int rl_SetEvent(rl_HANDLE ev, debug_info_param info)\r\n{\r\n    static_cast<generic_event*>(ev)->set(info);\r\n    return 1;\r\n}\r\n\r\ninline int rl_ResetEvent(rl_HANDLE ev, debug_info_param info)\r\n{\r\n    static_cast<generic_event*>(ev)->reset(info);\r\n    return 1;\r\n}\r\n\r\ninline int rl_PulseEvent(rl_HANDLE ev, debug_info_param info)\r\n{\r\n    static_cast<generic_event*>(ev)->pulse(info);\r\n    return 1;\r\n}\r\n\r\n\r\n\r\nstruct mutex_tag_win_cs;\r\ntypedef generic_mutex<mutex_tag_win_cs> rl_CRITICAL_SECTION;\r\n\r\ninline void rl_InitializeCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info)\r\n{\r\n    m->init(false, true, false, false, info);\r\n}\r\n\r\ninline int rl_InitializeCriticalSectionAndSpinCount(rl_CRITICAL_SECTION* m, unsigned long spin_count, debug_info_param info)\r\n{\r\n    (void)spin_count;\r\n    m->init(false, true, false, false, info);\r\n    return 1;\r\n}\r\n\r\ninline int rl_InitializeCriticalSectionEx(rl_CRITICAL_SECTION* m, unsigned long spin_count, unsigned long flags, debug_info_param info)\r\n{\r\n    (void)spin_count;\r\n    (void)flags;\r\n    m->init(false, true, false, false, info);\r\n    return 1;\r\n}\r\n\r\ninline void rl_DeleteCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info)\r\n{\r\n    m->deinit(info);\r\n}\r\n\r\ninline void rl_EnterCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info)\r\n{\r\n    m->lock_exclusive(info);\r\n}\r\n\r\ninline int rl_TryEnterCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info)\r\n{\r\n    return m->try_lock_exclusive(info) ? 1 : 0;\r\n}\r\n\r\ninline void rl_LeaveCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info)\r\n{\r\n    m->unlock_exclusive(info);\r\n}\r\n\r\nstruct mutex_tag_win_srwl;\r\ntypedef generic_mutex<mutex_tag_win_srwl> rl_SRWLOCK;\r\n\r\ninline void rl_InitializeSRWLock(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->init(true, false, false, false, info);\r\n}\r\n\r\ninline void rl_AcquireSRWLockExclusive(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->lock_exclusive(info);\r\n}\r\n\r\ninline void rl_AcquireSRWLockShared(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->lock_shared(info);\r\n}\r\n\r\ninline void rl_ReleaseSRWLockExclusive(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->unlock_exclusive(info);\r\n}\r\n\r\ninline void rl_ReleaseSRWLockShared(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->unlock_shared(info);\r\n}\r\n\r\n//!!!\r\ninline void rl_DeleteSRWLock(rl_SRWLOCK* lock, debug_info_param info)\r\n{\r\n    lock->deinit(info);\r\n}\r\n\r\n\r\nstruct mutex_tag_win_mutex;\r\ntypedef generic_mutex<mutex_tag_win_mutex> rl_win_mutex;\r\n\r\n\r\ninline rl_HANDLE rl_CreateMutex(void* /*security*/, int initial_owner, void const* /*name*/, debug_info_param info)\r\n{\r\n    void* mem = ctx().alloc(sizeof(rl_win_mutex), false, info);\r\n    rl_win_mutex* mtx = new (mem) rl_win_mutex ();\r\n    mtx->init(false, true, false, false, info);\r\n    if (initial_owner)\r\n        mtx->lock_exclusive(info);\r\n    return mtx;\r\n}\r\n\r\ninline int rl_ReleaseMutex(rl_HANDLE mtx, debug_info_param info)\r\n{\r\n    static_cast<rl_win_mutex*>(mtx)->unlock_exclusive(info);\r\n    return 1;\r\n\r\n}\r\n\r\n\r\n\r\nstruct condvar_tag_win;\r\ntypedef condvar<condvar_tag_win> rl_CONDITION_VARIABLE;\r\nunsigned long const rl_CONDITION_VARIABLE_LOCKMODE_SHARED = 1;\r\n\r\ninline void rl_InitializeConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info)\r\n{\r\n    cv->init(false, info);\r\n}\r\n\r\ninline int rl_SleepConditionVariableCS(rl_CONDITION_VARIABLE* cv, rl_CRITICAL_SECTION* cs, unsigned long ms, debug_info_param info)\r\n{\r\n    cv->wait(*cs, ms != rl_INFINITE, info);\r\n    return 0;\r\n}\r\n\r\ninline int rl_SleepConditionVariableSRW(rl_CONDITION_VARIABLE* cv, rl_SRWLOCK* lock, unsigned long ms, unsigned long flags, debug_info_param info)\r\n{\r\n    //!!! CONDITION_VARIABLE_LOCKMODE_SHARED\r\n    (void)flags;\r\n    cv->wait(*lock, ms != rl_INFINITE, info);\r\n    return 0;\r\n}\r\n\r\ninline void rl_WakeAllConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info)\r\n{\r\n    cv->notify_all(info);\r\n}\r\n\r\ninline void rl_WakeConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info)\r\n{\r\n    cv->notify_one(info);\r\n}\r\n\r\ninline void rl_DeleteConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info)\r\n{\r\n    cv->deinit(info);\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\ntypedef unsigned long (RL_STDCALL *rl_WIN_START_ROUTINE)(void* param);\r\ntypedef unsigned (RL_STDCALL *rl_MSVCR_THREAD_ROUTINE)(void* param);\r\n\r\ntemplate<typename thread_fn_t>\r\nstruct win32_thread_helper\r\n{\r\n    thread_fn_t fn;\r\n    void* param;\r\n\r\n    static void* thread(void* p)\r\n    {\r\n        win32_thread_helper* self = (win32_thread_helper*)p;\r\n        void* result = (void*)(uintptr_t)(self->fn(self->param));\r\n        delete_impl(self, $);\r\n        return result;\r\n    }\r\n};\r\n\r\ninline rl_HANDLE rl_CreateThread(void* security, unsigned stack_size, rl_WIN_START_ROUTINE fn, void* param, unsigned long creation_flags, unsigned long* thread_id, debug_info_param info)\r\n{\r\n    (void)security;\r\n    (void)stack_size;\r\n    (void)creation_flags;\r\n    (void)thread_id;\r\n\r\n    void* mem =\r\n        ctx().alloc(sizeof(win32_thread_helper<rl_WIN_START_ROUTINE>), false, info);\r\n    win32_thread_helper<rl_WIN_START_ROUTINE>* arg =\r\n        new (mem) win32_thread_helper<rl_WIN_START_ROUTINE>;\r\n    arg->fn = fn;\r\n    arg->param = param;\r\n    win_waitable_object* handle = ctx().create_thread(&win32_thread_helper<rl_WIN_START_ROUTINE>::thread, arg);\r\n    return handle;\r\n}\r\n\r\n\r\ninline uintptr_t rl_beginthreadex(void *security, unsigned stack_size, rl_MSVCR_THREAD_ROUTINE start_address, void *arglist, unsigned initflag, unsigned* thrdaddr, debug_info_param info)\r\n{\r\n    (void)security;\r\n    (void)stack_size;\r\n    (void)initflag;\r\n    (void)thrdaddr;\r\n\r\n    void* mem = ctx().alloc(sizeof(win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>), false, info);\r\n    win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>* arg =\r\n        new (mem) win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>;\r\n    arg->fn = start_address;\r\n    arg->param = arglist;\r\n    win_waitable_object* handle = ctx().create_thread(&win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>::thread, arg);\r\n    return (uintptr_t)handle;\r\n}\r\n\r\ninline unsigned long rl_SetThreadAffinityMask(rl_HANDLE th, unsigned long affinity_mask, debug_info_param info)\r\n{\r\n    (void)(th);\r\n    (void)(affinity_mask);\r\n    (void)info;\r\n    return 0;\r\n}\r\n\r\ninline int rl_SuspendThread(rl_HANDLE th, debug_info_param info)\r\n{\r\n    (void)th;\r\n    (void)info;\r\n    return 1;\r\n}\r\n\r\ninline int rl_ResumeThread(rl_HANDLE th, debug_info_param info)\r\n{\r\n    (void)th;\r\n    (void)info;\r\n    return 1;\r\n}\r\n\r\ninline unsigned long GetLastError()\r\n{\r\n    return (unsigned long)get_errno();\r\n}\r\n\r\ninline void SetLastError(unsigned long value)\r\n{\r\n    set_errno((int)value);\r\n}\r\n\r\ninline void rl_FlushProcessWriteBuffers(debug_info_param info)\r\n{\r\n    systemwide_fence(info);\r\n}\r\n\r\n}\r\n\r\n\r\n#ifdef HANDLE\r\n#   undef HANDLE\r\n#endif\r\n#define HANDLE rl::rl_HANDLE\r\n\r\n#ifdef INFINITE\r\n#   undef INFINITE\r\n#endif\r\n#define INFINITE rl::rl_INFINITE\r\n\r\n\r\n#ifdef WAIT_FAILED\r\n#   undef WAIT_FAILED\r\n#endif\r\n#define WAIT_FAILED rl::rl_WAIT_FAILED\r\n\r\n#ifdef WAIT_OBJECT_0\r\n#   undef WAIT_OBJECT_0\r\n#endif\r\n#define WAIT_OBJECT_0 rl::rl_WAIT_OBJECT_0\r\n\r\n#ifdef WAIT_TIMEOUT\r\n#   undef WAIT_TIMEOUT\r\n#endif\r\n#define WAIT_TIMEOUT rl::rl_WAIT_TIMEOUT\r\n\r\n#ifdef WAIT_IO_COMPLETION\r\n#   undef WAIT_IO_COMPLETION\r\n#endif\r\n#define WAIT_IO_COMPLETION rl::rl_WAIT_IO_COMPLETION\r\n\r\n#ifdef MAXIMUM_WAIT_OBJECTS\r\n#   undef MAXIMUM_WAIT_OBJECTS\r\n#endif\r\n#define MAXIMUM_WAIT_OBJECTS rl::rl_MAXIMUM_WAIT_OBJECTS\r\n\r\n\r\n\r\n#define SwitchToThread() \\\r\n rl::rl_SwitchToThread($)\r\n\r\n#define Sleep(milliseconds) \\\r\n rl::rl_Sleep(milliseconds, $)\r\n\r\n\r\n\r\n#define CloseHandle(obj) \\\r\n rl::rl_CloseHandle(obj, $)\r\n\r\n#define WaitForSingleObject(obj, timeout) \\\r\n rl::rl_WaitForSingleObject(obj, timeout, $)\r\n\r\n#define WaitForMultipleObjects(count, objects, wait_all, timeout) \\\r\n rl::rl_WaitForMultipleObjects(count, objects, wait_all, timeout, $)\r\n\r\n#define WaitForMultipleObjectsEx(count, objects, wait_all, timeout, alertable)] \\\r\n rl::rl_WaitForMultipleObjectsEx(count, objects, wait_all, timeout, alertable, $)\r\n\r\n#define SignalObjectAndWait(obj_to_signal, obj_to_wait, timeout, alertable) \\\r\n rl::rl_SignalObjectAndWait(obj_to_signal, obj_to_wait, timeout, alertable, $)\r\n\r\n#ifdef CreateSemaphore\r\n#   undef CreateSemaphore\r\n#endif\r\n\r\n#ifdef CreateSemaphore\r\n#   undef ReleaseSemaphore\r\n#endif\r\n\r\n#define CreateSemaphoreA rl_CreateSemaphore\r\n#define CreateSemaphoreW rl_CreateSemaphore\r\n#define CreateSemaphore rl_CreateSemaphore\r\n#define rl_CreateSemaphore(security, initial_count, max_count, name) \\\r\n    rl::rl_CreateSemaphore(security, initial_count, max_count, name, $)\\\r\n\r\n#define ReleaseSemaphore(sema, count, prev_count) \\\r\n rl::rl_ReleaseSemaphore(sema, count, prev_count, $)\r\n\r\n\r\n\r\n#ifdef CreateEvent\r\n#   undef CreateEvent\r\n#endif\r\n#define CreateEventA rl_CreateEvent\r\n#define CreateEventW rl_CreateEvent\r\n#define CreateEvent rl_CreateEvent\r\n#define rl_CreateEvent(security, manual_reset, initial_state, name)\\\r\n    rl::rl_CreateEvent(security, manual_reset, initial_state, name, $)\r\n\r\n#define SetEvent(ev)\\\r\n rl::rl_SetEvent(ev, $)\r\n\r\n#define ResetEvent(ev)\\\r\n rl::rl_ResetEvent(ev, $)\r\n\r\n#define PulseEvent(ev)\\\r\n rl::rl_PulseEvent(ev, $)\r\n\r\n\r\n#ifdef CreateMutex\r\n#   undef CreateMutex\r\n#endif\r\n#define CreateMutexA rl_CreateMutex\r\n#define CreateMutexW rl_CreateMutex\r\n#define CreateMutex rl_CreateMutex\r\n#define rl_CreateMutex(security, initial_owner, name)\\\r\n    rl::rl_CreateMutex(security, initial_owner, name, $)\r\n\r\n#define ReleaseMutex(mtx)\\\r\n rl::rl_ReleaseMutex(mtx, $)\r\n\r\n\r\n\r\n#define CRITICAL_SECTION rl::rl_CRITICAL_SECTION\r\n\r\n#define InitializeCriticalSection(cs) \\\r\n rl::rl_InitializeCriticalSection(cs, $)\r\n\r\n#define InitializeCriticalSectionAndSpinCount(cs, spin) \\\r\n rl::rl_InitializeCriticalSectionAndSpinCount(cs, spin, $)\r\n\r\n#define InitializeCriticalSectionEx(cs, spin, flags) \\\r\n rl::rl_InitializeCriticalSectionEx(cs, spin, flags, $)\r\n\r\n#define DeleteCriticalSection(cs) \\\r\n rl::rl_DeleteCriticalSection(cs, $)\r\n\r\n#define EnterCriticalSection(cs) \\\r\n rl::rl_EnterCriticalSection(cs, $)\r\n\r\n#define TryEnterCriticalSection(cs) \\\r\n rl::rl_TryEnterCriticalSection(cs, $)\r\n\r\n#define LeaveCriticalSection(cs) \\\r\n rl::rl_LeaveCriticalSection(cs, $)\r\n\r\n\r\n\r\n\r\n#define SRWLOCK rl::rl_SRWLOCK\r\n\r\n#define InitializeSRWLock(lock) \\\r\n rl::rl_InitializeSRWLock(lock, $)\r\n\r\n#define AcquireSRWLockExclusive(lock) \\\r\n rl::rl_AcquireSRWLockExclusive(lock, $)\r\n\r\n#define AcquireSRWLockShared(lock) \\\r\n rl::rl_AcquireSRWLockShared(lock, $)\r\n\r\n#define ReleaseSRWLockExclusive(lock) \\\r\n rl::rl_ReleaseSRWLockExclusive(lock, $)\r\n\r\n#define ReleaseSRWLockShared(lock) \\\r\n rl::rl_ReleaseSRWLockShared(lock, $)\r\n\r\n//!!! no such function in WIN API\r\n#define DeleteSRWLock(lock) \\\r\n rl::rl_DeleteSRWLock(lock, $)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#define CONDITION_VARIABLE rl::rl_CONDITION_VARIABLE\r\n\r\n#ifdef CONDITION_VARIABLE_LOCKMODE_SHARED\r\n#   undef CONDITION_VARIABLE_LOCKMODE_SHARED\r\n#endif\r\n#define CONDITION_VARIABLE_LOCKMODE_SHARED rl::rl_CONDITION_VARIABLE_LOCKMODE_SHARED\r\n\r\n#define InitializeConditionVariable(cv) \\\r\n rl::rl_InitializeConditionVariable(cv, $)\r\n\r\n#define SleepConditionVariableCS(cv, cs, ms) \\\r\n rl::rl_SleepConditionVariableCS(cv, cs, ms, $)\r\n\r\n#define SleepConditionVariableSRW(cv, lock, ms, flags) \\\r\n rl::rl_SleepConditionVariableSRW(cv, lock, ms, flags, $)\r\n\r\n#define WakeAllConditionVariable(cv) \\\r\n rl::rl_WakeAllConditionVariable(cv, $)\r\n\r\n#define WakeConditionVariable(cv) \\\r\n rl::rl_WakeConditionVariable(cv, $)\r\n\r\n//!!! no such function in WIN API\r\n#define DeleteConditionVariable(cv) \\\r\n rl::rl_DeleteConditionVariable(cv, $)\r\n\r\n\r\n\r\n#define CreateThread(security, stack_size, fn, param, creation_flags, thread_id) \\\r\n rl::rl_CreateThread(security, stack_size, fn, param, creation_flags, thread_id, $)\r\n\r\n#define _beginthreadex(security, stack_size, start_address, arglist, initflag, thrdaddr) \\\r\n  rl::rl_beginthreadex(security, stack_size, start_address, arglist, initflag, thrdaddr, $)\r\n\r\n#define SetThreadAffinityMask(th, affinity_mask) \\\r\n rl::rl_SetThreadAffinityMask(th, affinity_mask, $)\r\n\r\n#define SuspendThread(th) \\\r\n rl::rl_SuspendThread(th, $)\r\n\r\n#define ResumeThread(th) \\\r\n rl::rl_ResumeThread(th, $)\r\n\r\n#define FlushProcessWriteBuffers() \\\r\n rl::rl_FlushProcessWriteBuffers($)\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/sync_var.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_SYNC_VAR_HPP\r\n#define RL_SYNC_VAR_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"foreach.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass sync_var : nocopy<>\r\n{\r\npublic:\r\n    sync_var()\r\n    {\r\n        iteration_begin();\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n        foreach<thread_count>(order_, &assign_zero);\r\n    }\r\n\r\n    void acquire(thread_info_base* th)\r\n    {\r\n        th->own_acq_rel_order_ += 1;\r\n        foreach<thread_count>(th->acq_rel_order_, order_, &assign_max);\r\n    }\r\n\r\n    void release(thread_info_base* th)\r\n    {\r\n        th->own_acq_rel_order_ += 1;\r\n        foreach<thread_count>(order_, th->acq_rel_order_, &assign_max);\r\n    }\r\n\r\n    void acq_rel(thread_info_base* th)\r\n    {\r\n        th->own_acq_rel_order_ += 1;\r\n        timestamp_t* acq_rel_order = th->acq_rel_order_;\r\n        timestamp_t* order = order_;\r\n        foreach<thread_count>(acq_rel_order, order, &assign_max);\r\n        foreach<thread_count>(order, acq_rel_order, &assign_max);\r\n    }\r\n\r\nprivate:\r\n    timestamp_t order_ [thread_count];\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/test_params.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_TEST_PARAMS_HPP\r\n#define RL_TEST_PARAMS_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"test_result.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\nenum scheduler_type_e\r\n{\r\n    sched_random,\r\n    sched_bound,\r\n    sched_full,\r\n    sched_count,\r\n\r\n    random_scheduler_type = sched_random,\r\n    fair_context_bound_scheduler_type = sched_bound,\r\n    fair_full_search_scheduler_type = sched_full,\r\n    scheduler_type_count\r\n};\r\n\r\ninline char const* format(scheduler_type_e t)\r\n{\r\n    switch (t)\r\n    {\r\n    case sched_random: return \"random scheduler\";\r\n    case sched_bound: return \"context bound scheduler\";\r\n    case sched_full: return \"full search scheduler\";\r\n    default: break;\r\n    }\r\n    RL_VERIFY(false);\r\n    throw std::logic_error(\"invalid scheduler type\");\r\n}\r\n\r\n\r\nstruct test_params\r\n{\r\n    // input params\r\n    iteration_t                 iteration_count;\r\n    std::ostream*               output_stream;\r\n    std::ostream*               progress_stream;\r\n    unsigned                    progress_output_period;\r\n    bool                        collect_history;\r\n    bool                        output_history;\r\n    scheduler_type_e            search_type;\r\n    unsigned                    context_bound;\r\n    unsigned                    execution_depth_limit;\r\n    string                      initial_state;\r\n\r\n    // output params\r\n    test_result_e               test_result;\r\n    iteration_t                 stop_iteration;\r\n    string                      test_name;\r\n    string                      final_state;\r\n\r\n    test_params()\r\n    {\r\n        iteration_count         = 1000;\r\n        output_stream           = &std::cout;\r\n        progress_stream         = &std::cout;\r\n        progress_output_period  = 3;\r\n        collect_history         = false;\r\n        output_history          = false;\r\n        search_type             = random_scheduler_type;\r\n        context_bound           = 1;\r\n        execution_depth_limit   = 2000;\r\n\r\n        test_result             = test_result_success;\r\n        stop_iteration          = 0;\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/test_result.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_TEST_RESULT_HPP\r\n#define RL_TEST_RESULT_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nenum test_result_e\r\n{\r\n    test_result_success,\r\n    test_result_until_condition_hit,\r\n    test_result_inconsistent_test_suite,\r\n    test_result_user_assert_failed,\r\n    test_result_user_invariant_failed,\r\n    test_result_data_race,\r\n    test_result_access_to_freed_memory,\r\n    test_result_double_free,\r\n    test_result_memory_leak,\r\n    test_result_resource_leak,\r\n    test_result_unitialized_access,\r\n    test_result_deadlock,\r\n    test_result_livelock,\r\n\r\n    // mutex\r\n    test_result_recursion_on_nonrecursive_mutex,\r\n    test_result_unlocking_mutex_wo_ownership,\r\n    test_result_destroying_owned_mutex,\r\n    test_result_double_initialization_of_mutex,\r\n    test_result_usage_of_non_initialized_mutex,\r\n    test_result_mutex_write_to_read_upgrade,\r\n    test_result_mutex_read_to_write_upgrade,\r\n\r\n    //condvar\r\n    test_result_double_initialization_of_condvar,\r\n    test_result_usage_of_non_initialized_condvar,\r\n\r\n    //semaphore\r\n    test_result_double_initialization_of_semaphore,\r\n    test_result_usage_of_non_initialized_semaphore,\r\n\r\n    //event\r\n    test_result_double_initialization_of_event,\r\n    test_result_usage_of_non_initialized_event,\r\n\r\n    //dynamic thread\r\n    test_result_thread_signal,\r\n};\r\n\r\n\r\ninline char const* test_result_str(test_result_e r)\r\n{\r\n    switch (r)\r\n    {\r\n    case test_result_success: return \"SUCCESS\";\r\n    case test_result_until_condition_hit: return \"UNTIL CONDITION HIT\";\r\n    case test_result_inconsistent_test_suite: return \"INCONSISTENT TEST SUITE\";\r\n    case test_result_user_assert_failed: return \"USER ASSERT FAILED\";\r\n    case test_result_user_invariant_failed: return \"USER INVARIANT FAILED\";\r\n    case test_result_data_race: return \"DATA RACE\";\r\n    case test_result_access_to_freed_memory: return \"ACCESS TO FREED MEMORY\";\r\n    case test_result_double_free: return \"DOUBLE FREE\";\r\n    case test_result_memory_leak: return \"MEMORY LEAK\";\r\n    case test_result_resource_leak: return \"RESOURCE LEAK\";\r\n    case test_result_unitialized_access: return \"ACCESS TO UNITIALIZED VARIABLE\";\r\n    case test_result_deadlock: return \"DEADLOCK\";\r\n    case test_result_livelock: return \"LIVELOCK\";\r\n\r\n    // mutex\r\n    case test_result_recursion_on_nonrecursive_mutex: return \"RECURSION ON NON-RECURSIVE MUTEX\";\r\n    case test_result_unlocking_mutex_wo_ownership: return \"UNLOCKING MUTEX W/O OWNERSHIP\";\r\n    case test_result_destroying_owned_mutex: return \"DESTROYING OWNED MUTEX\";\r\n    case test_result_double_initialization_of_mutex: return \"DOUBLE INITIALIZATION OF MUTEX\";\r\n    case test_result_usage_of_non_initialized_mutex: return \"USAGE OF NON INITIALIZED MUTEX\";\r\n    case test_result_mutex_write_to_read_upgrade: return \"ATTEMPT TO UPGRADE EXCLUSIVE MUTEX OWNERSHIP TO SHARED\";\r\n    case test_result_mutex_read_to_write_upgrade: return \"ATTEMPT TO UPGRADE SHARED MUTEX OWNERSHIP TO EXCLUSIVE\";\r\n\r\n    // condvar\r\n    case test_result_double_initialization_of_condvar: return \"DOUBLE INITIALIZATION OF CONDITION VARIABLE\";\r\n    case test_result_usage_of_non_initialized_condvar: return \"USAGE OF NON INITIALIZED CONDITION VARIABLE\";\r\n\r\n    // semaphore\r\n    case test_result_double_initialization_of_semaphore: return \"DOUBLE INITIALIZATION OF SEMAPHORE\";\r\n    case test_result_usage_of_non_initialized_semaphore: return \"USAGE OF NON INITIALIZED SEMAPHORE\";\r\n\r\n    // event\r\n    case test_result_double_initialization_of_event: return \"DOUBLE INITIALIZATION OF EVENT\";\r\n    case test_result_usage_of_non_initialized_event: return \"USAGE OF NON INITIALIZED EVENT\";\r\n\r\n    default: RL_VERIFY(false); return \"UNKNOWN ERROR\";\r\n    }\r\n}\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/test_suite.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_TEST_SUITE_HPP\r\n#define RL_TEST_SUITE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"test_result.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<\r\n    typename derived_t,\r\n    thread_id_t static_thread_count_param,\r\n    test_result_e result = test_result_success>\r\nstruct test_suite : nocopy<>\r\n{\r\n    static thread_id_t const dynamic_thread_count = 0;\r\n\r\n    struct params\r\n    {\r\n        static thread_id_t const static_thread_count = static_thread_count_param;\r\n        static thread_id_t const dynamic_thread_count = derived_t::dynamic_thread_count;\r\n        static thread_id_t const thread_count = static_thread_count + dynamic_thread_count;\r\n        static test_result_e const expected_result = result;\r\n    };\r\n\r\n    void invariant() {}\r\n    void before() {}\r\n    void after() {}\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/thread.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_THREAD_HPP\r\n#define RL_THREAD_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n#include \"dyn_thread_ctx.hpp\"\r\n#include \"thread_base.hpp\"\r\n#include \"test_suite.hpp\"\r\n#include \"memory_order.hpp\"\r\n#include \"foreach.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\n\r\nstruct atomic_data;\r\nstruct var_data;\r\ntemplate<thread_id_t thread_count> struct atomic_data_impl;\r\ntemplate<thread_id_t thread_count> struct var_data_impl;\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nstruct thread_info : thread_info_base\r\n{\r\n    thread_info(thread_id_t index = 0)\r\n        : thread_info_base(index, acq_rel_order_)\r\n    {\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n        sync_object_.iteration_begin();\r\n        last_yield_ = 0;\r\n        dynamic_thread_func_ = 0;\r\n        dynamic_thread_param_ = 0;\r\n        for (thread_id_t j = 0; j != thread_count; ++j)\r\n        {\r\n            acq_rel_order_[j] = 0;\r\n        }\r\n        acq_rel_order_[index_] = 1;\r\n        temp_switch_from_ = -1;\r\n        saved_disable_preemption_ = -1;\r\n    }\r\n\r\n    thread_sync_object<thread_count> sync_object_;\r\n\r\n    timestamp_t acq_rel_order_ [thread_count];\r\n    timestamp_t acquire_fence_order_ [thread_count];\r\n    timestamp_t release_fence_order_ [thread_count];\r\n\r\n#ifdef RL_IMPROVED_SEQ_CST_FENCE\r\n    timestamp_t imp_seq_cst_order_ [thread_count];\r\n#endif\r\n\r\n    virtual void on_start()\r\n    {\r\n        RL_VERIFY(temp_switch_from_ == -1);\r\n        RL_VERIFY(saved_disable_preemption_ == -1);\r\n        sync_object_.on_start();\r\n    }\r\n\r\n    virtual void on_finish()\r\n    {\r\n        RL_VERIFY(temp_switch_from_ == -1);\r\n        RL_VERIFY(saved_disable_preemption_ == -1);\r\n        sync_object_.on_finish();\r\n    }\r\n\r\n    void atomic_thread_fence_acquire()\r\n    {\r\n        foreach<thread_count>(\r\n            acq_rel_order_,\r\n            acquire_fence_order_,\r\n            &assign_max);\r\n    }\r\n\r\n    void atomic_thread_fence_release()\r\n    {\r\n        foreach<thread_count>(\r\n            release_fence_order_,\r\n            acq_rel_order_,\r\n            &assign);\r\n    }\r\n\r\n    void atomic_thread_fence_acq_rel()\r\n    {\r\n        atomic_thread_fence_acquire();\r\n        atomic_thread_fence_release();\r\n    }\r\n\r\n    void atomic_thread_fence_seq_cst(timestamp_t* seq_cst_fence_order)\r\n    {\r\n#ifdef RL_IMPROVED_SEQ_CST_FENCE\r\n        foreach<thread_count>(acq_rel_order_, imp_seq_cst_order_, assign_max);\r\n#endif\r\n\r\n        atomic_thread_fence_acquire();\r\n\r\n        foreach<thread_count>(\r\n            acq_rel_order_,\r\n            seq_cst_fence_order,\r\n            &assign_max);\r\n\r\n        foreach<thread_count>(\r\n            seq_cst_fence_order,\r\n            acq_rel_order_,\r\n            &assign);\r\n\r\n        atomic_thread_fence_release();\r\n    }\r\n\r\n    virtual ~thread_info() {} // just to calm down gcc\r\n\r\nprivate:\r\n    thread_info(thread_info const&);\r\n    thread_info& operator = (thread_info const&);\r\n\r\n    virtual unsigned atomic_load_relaxed(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_relaxed, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_load_acquire(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_acquire, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_load_seq_cst(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_seq_cst, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_load_relaxed_rmw(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_relaxed, true>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_load_acquire_rmw(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_acquire, true>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_load_seq_cst_rmw(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_load<mo_seq_cst, true>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_store_relaxed(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_store<mo_relaxed, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_store_release(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_store<mo_release, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_store_seq_cst(atomic_data* RL_RESTRICT data)\r\n    {\r\n        return atomic_store<mo_seq_cst, false>(data);\r\n    }\r\n\r\n    virtual unsigned atomic_rmw_relaxed(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        return atomic_rmw<mo_relaxed>(data, aba);\r\n    }\r\n\r\n    virtual unsigned atomic_rmw_acquire(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        return atomic_rmw<mo_acquire>(data, aba);\r\n    }\r\n\r\n    virtual unsigned atomic_rmw_release(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        return atomic_rmw<mo_release>(data, aba);\r\n    }\r\n\r\n    virtual unsigned atomic_rmw_acq_rel(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        return atomic_rmw<mo_acq_rel>(data, aba);\r\n    }\r\n\r\n    virtual unsigned atomic_rmw_seq_cst(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        return atomic_rmw<mo_seq_cst>(data, aba);\r\n    }\r\n\r\n    template<memory_order mo, bool rmw>\r\n    unsigned get_load_index(atomic_data_impl<thread_count>& var)\r\n    {\r\n        typedef typename atomic_data_impl<thread_count>::history_record history_t;\r\n\r\n        unsigned index = var.current_index_;\r\n        context& c = ctx();\r\n\r\n        if (false == val(rmw))\r\n        {\r\n            size_t const limit = c.is_random_sched() ? atomic_history_size  - 1: 1;\r\n            for (size_t i = 0; i != limit; ++i, --index)\r\n            {\r\n                history_t const& rec = var.history_[index % atomic_history_size];\r\n                if (false == rec.busy_)\r\n                    return (unsigned)-1; // access to unitialized var\r\n\r\n                history_t const& prev = var.history_[(index - 1) % atomic_history_size];\r\n                if (prev.busy_ && prev.last_seen_order_[index_] <= last_yield_)\r\n                    break;\r\n\r\n                if (mo_seq_cst == val(mo) && rec.seq_cst_)\r\n                    break;\r\n\r\n                timestamp_t acq_rel_order =\r\n                    acq_rel_order_[rec.thread_id_];\r\n\r\n                if (acq_rel_order >= rec.acq_rel_timestamp_)\r\n                    break;\r\n\r\n                bool stop = false;\r\n                for (thread_id_t i = 0; i != thread_count; ++i)\r\n                {\r\n                    timestamp_t acq_rel_order2 = acq_rel_order_[i];\r\n                    if (acq_rel_order2 >= rec.last_seen_order_[i])\r\n                    {\r\n                        stop = true;\r\n                        break;\r\n                    }\r\n                }\r\n                if (stop)\r\n                    break;\r\n\r\n                if (0 == c.rand(2, sched_type_atomic_load))\r\n                    break;\r\n            }\r\n        }\r\n\r\n        if (false == var.history_[index % atomic_history_size].busy_)\r\n            return (unsigned)-1;\r\n\r\n        return index;\r\n    }\r\n\r\n    template<memory_order mo, bool rmw>\r\n    unsigned atomic_load(atomic_data* RL_RESTRICT data)\r\n    {\r\n        RL_VERIFY(mo_release != mo || rmw);\r\n        RL_VERIFY(mo_acq_rel != mo || rmw);\r\n\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n\r\n        typedef typename atomic_data_impl<thread_count>::history_record history_t;\r\n\r\n        unsigned index = get_load_index<mo, rmw>(var);\r\n        if ((unsigned)-1 == index)\r\n            return (unsigned)-1;\r\n\r\n        index %= atomic_history_size;\r\n        history_t& rec = var.history_[index];\r\n        RL_VERIFY(rec.busy_);\r\n\r\n        own_acq_rel_order_ += 1;\r\n        rec.last_seen_order_[index_] = own_acq_rel_order_;\r\n\r\n        bool const synch =\r\n            (mo_acquire == mo\r\n            || mo_acq_rel == mo\r\n            || mo_seq_cst == mo);\r\n\r\n        timestamp_t* acq_rel_order = (synch ? acq_rel_order_ : acquire_fence_order_);\r\n\r\n        foreach<thread_count>(acq_rel_order, rec.acq_rel_order_, assign_max);\r\n\r\n        return index;\r\n    }\r\n\r\n    virtual unsigned atomic_init(atomic_data* RL_RESTRICT data)\r\n    {\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n\r\n        typedef typename atomic_data_impl<thread_count>::history_record history_t;\r\n\r\n        unsigned const idx = ++var.current_index_ % atomic_history_size;\r\n        history_t& rec = var.history_[idx];\r\n\r\n        rec.busy_ = true;\r\n        rec.thread_id_ = index_;\r\n        rec.seq_cst_ = false;\r\n        rec.acq_rel_timestamp_ = 0;\r\n\r\n        foreach<thread_count>(rec.acq_rel_order_, assign_zero);\r\n\r\n        return idx;\r\n    }\r\n\r\n    template<memory_order mo, bool rmw>\r\n    unsigned atomic_store(atomic_data* RL_RESTRICT data)\r\n    {\r\n        RL_VERIFY(mo_consume != mo || rmw);\r\n        RL_VERIFY(mo_acquire != mo || rmw);\r\n        RL_VERIFY(mo_acq_rel != mo || rmw);\r\n\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n\r\n        typedef typename atomic_data_impl<thread_count>::history_record history_t;\r\n\r\n        unsigned const idx = ++var.current_index_ % atomic_history_size;\r\n        history_t& rec = var.history_[idx];\r\n\r\n        rec.busy_ = true;\r\n        rec.thread_id_ = index_;\r\n        rec.seq_cst_ = (mo_seq_cst == mo);\r\n\r\n        own_acq_rel_order_ += 1;\r\n        rec.acq_rel_timestamp_ = own_acq_rel_order_;\r\n\r\n        foreach<thread_count>(rec.last_seen_order_, assign<(timestamp_t)-1>);\r\n\r\n        rec.last_seen_order_[index_] = own_acq_rel_order_;\r\n\r\n        unsigned const prev_idx = (var.current_index_ - 1) % atomic_history_size;\r\n        history_t& prev = var.history_[prev_idx];\r\n\r\n#ifdef RL_IMPROVED_SEQ_CST_FENCE\r\n        if (val(mo) == mo_release && val(rmw) == false)\r\n            foreach<thread_count>(imp_seq_cst_order_, prev.acq_rel_order_, assign_max);\r\n#endif\r\n\r\n        bool const synch = \r\n            (mo_release == mo\r\n            || mo_acq_rel == mo\r\n            || mo_seq_cst == mo);\r\n\r\n        bool const preserve = \r\n            prev.busy_ && (rmw || (index_ == prev.thread_id_));\r\n\r\n        timestamp_t* acq_rel_order = (synch ? acq_rel_order_ : release_fence_order_);\r\n\r\n        if (preserve)\r\n        {\r\n            foreach<thread_count>(rec.acq_rel_order_, prev.acq_rel_order_, assign);\r\n            foreach<thread_count>(rec.acq_rel_order_, acq_rel_order, assign_max);\r\n        }\r\n        else\r\n        {\r\n            foreach<thread_count>(rec.acq_rel_order_, acq_rel_order, assign);\r\n        }\r\n\r\n        return idx;\r\n    }\r\n\r\n    template<memory_order mo>\r\n    unsigned atomic_rmw(atomic_data* RL_RESTRICT data, bool& aba)\r\n    {\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n        timestamp_t const last_seen = var.history_[var.current_index_ % atomic_history_size].last_seen_order_[index_];\r\n        aba = (last_seen > own_acq_rel_order_);\r\n        atomic_load<mo, true>(data);\r\n        unsigned result = atomic_store<mo, true>(data);\r\n\r\n#ifdef RL_IMPROVED_SEQ_CST_RMW\r\n        atomic_thread_fence_seq_cst(ctx_->seq_cst_fence_order_);\r\n#endif\r\n\r\n        return result;\r\n    }\r\n\r\n    virtual unpark_reason atomic_wait(atomic_data* RL_RESTRICT data, bool is_timed, bool allow_spurious_wakeup, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n        unpark_reason const res = var.futex_ws_.park_current(c, is_timed, allow_spurious_wakeup, false, info);\r\n        if (res == unpark_reason_normal)\r\n            var.futex_sync_.acquire(this);\r\n        return res;\r\n    }\r\n\r\n    virtual thread_id_t atomic_wake(atomic_data* RL_RESTRICT data, thread_id_t count, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        atomic_data_impl<thread_count>& var = \r\n            *static_cast<atomic_data_impl<thread_count>*>(data);\r\n        thread_id_t unblocked = 0;\r\n        for (; count != 0; count -= 1, unblocked += 1)\r\n        {\r\n            if (var.futex_ws_.unpark_one(c, info) == false)\r\n                break;\r\n        }\r\n        if (unblocked != 0)\r\n            var.futex_sync_.release(this);\r\n        return unblocked;\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/thread_base.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_THREAD_BASE_HPP\r\n#define RL_THREAD_BASE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context_base.hpp\"\r\n//#include \"test_suite.hpp\"\r\n//#include \"memory_order.hpp\"\r\n//#include \"foreach.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\n\r\nstruct atomic_data;\r\nstruct var_data;\r\ntemplate<thread_id_t thread_count> struct atomic_data_impl;\r\ntemplate<thread_id_t thread_count> struct var_data_impl;\r\n\r\n\r\nclass thread_info_base\r\n{\r\npublic:\r\n    virtual void on_start() = 0;\r\n    virtual void on_finish() = 0;\r\n\r\n    virtual unsigned atomic_init(atomic_data* RL_RESTRICT data) = 0;\r\n\r\n    virtual unsigned atomic_load_relaxed(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_load_acquire(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_load_seq_cst(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_load_relaxed_rmw(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_load_acquire_rmw(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_load_seq_cst_rmw(atomic_data* RL_RESTRICT data) = 0;\r\n\r\n    virtual unsigned atomic_store_relaxed(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_store_release(atomic_data* RL_RESTRICT data) = 0;\r\n    virtual unsigned atomic_store_seq_cst(atomic_data* RL_RESTRICT data) = 0;\r\n\r\n    virtual unsigned atomic_rmw_relaxed(atomic_data* RL_RESTRICT data, bool& aba) = 0;\r\n    virtual unsigned atomic_rmw_acquire(atomic_data* RL_RESTRICT data, bool& aba) = 0;\r\n    virtual unsigned atomic_rmw_release(atomic_data* RL_RESTRICT data, bool& aba) = 0;\r\n    virtual unsigned atomic_rmw_acq_rel(atomic_data* RL_RESTRICT data, bool& aba) = 0;\r\n    virtual unsigned atomic_rmw_seq_cst(atomic_data* RL_RESTRICT data, bool& aba) = 0;\r\n\r\n    virtual unpark_reason atomic_wait(atomic_data* RL_RESTRICT data, bool is_timed, bool allow_spurious_wakeup, debug_info_param info) = 0;\r\n    virtual thread_id_t atomic_wake(atomic_data* RL_RESTRICT data, thread_id_t count, debug_info_param info) = 0;\r\n\r\n    virtual ~thread_info_base() {} // just to calm down gcc\r\n\r\n    fiber_t fiber_;\r\n    thread_id_t const index_;\r\n    context* ctx_;\r\n    timestamp_t* const acq_rel_order_;\r\n    timestamp_t last_yield_;\r\n    timestamp_t& own_acq_rel_order_;\r\n    unpark_reason unpark_reason_;\r\n    thread_id_t temp_switch_from_;\r\n    int saved_disable_preemption_;\r\n    int errno_;\r\n    void* (*dynamic_thread_func_)(void*);\r\n    void* dynamic_thread_param_;\r\n    //unsigned disable_history_;\r\n\r\n    thread_info_base(thread_id_t index, timestamp_t* acq_rel_order)\r\n        : index_(index)\r\n        , acq_rel_order_(acq_rel_order)\r\n        , own_acq_rel_order_(acq_rel_order[index])\r\n    {\r\n    }\r\n\r\nprivate:\r\n    thread_info_base(thread_info_base const&);\r\n    thread_info_base& operator = (thread_info_base const&);\r\n};\r\n\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/thread_local.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_THREAD_LOCAL_HPP\r\n#define RL_THREAD_LOCAL_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"signature.hpp\"\r\n#include \"context.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nclass generic_thread_local : nocopy<>\r\n{\r\npublic:\r\n    generic_thread_local()\r\n        : index_(-1)\r\n    {\r\n    }\r\n\r\n    ~generic_thread_local()\r\n    {\r\n    }\r\n\r\n    void init(void (*dtor)(intptr_t), debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        //RL_ASSERT(index_ == -1);\r\n        index_ = ctx().thread_local_alloc(dtor);\r\n    }\r\n\r\n    void deinit(debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        RL_ASSERT(index_ != -1);\r\n        ctx().thread_local_free(index_);\r\n        index_ = -1;\r\n    }\r\n\r\n    void set(intptr_t value, debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        ctx().thread_local_set(index_, value);\r\n    }\r\n\r\n    intptr_t get(debug_info_param info)\r\n    {\r\n        sign_.check(info);\r\n        return ctx().thread_local_get(index_);\r\n    }\r\n\r\nprivate:\r\n    signature<0xf1724ae2> sign_;\r\n    int index_;\r\n};\r\n\r\n\r\ntemplate<typename T>\r\nclass thread_local_var;\r\n\r\n\r\ntemplate<typename T>\r\nclass thread_local_proxy\r\n{\r\npublic:\r\n    thread_local_proxy(thread_local_var<T>& var, debug_info_param info)\r\n        : var_(var)\r\n        , info_(info)\r\n    {}\r\n\r\n    operator T () const\r\n    {\r\n        return var_.get(info_);\r\n    }\r\n\r\n    T operator -> () const\r\n    {\r\n        return var_.get(info_);\r\n    }\r\n\r\n    thread_local_proxy operator = (T value)\r\n    {\r\n        var_.set(value, info_);\r\n        return *this;\r\n    }\r\n\r\nprivate:\r\n    thread_local_var<T>& var_;\r\n    debug_info info_;\r\n    thread_local_proxy& operator = (thread_local_proxy const&);\r\n};\r\n\r\n\r\ntemplate<typename T>\r\nclass thread_local_var : generic_thread_local\r\n{\r\npublic:\r\n    thread_local_var()\r\n        : ctx_seq_()\r\n    {\r\n    }\r\n\r\n    ~thread_local_var()\r\n    {\r\n    }\r\n\r\n    thread_local_proxy<T> operator () (debug_info_param info)\r\n    {\r\n        return thread_local_proxy<T>(*this, info);\r\n    }\r\n\r\n    void set(T value, debug_info_param info)\r\n    {\r\n        if (ctx_seq_ != ctx().get_ctx_seq())\r\n        {\r\n            ctx_seq_ = ctx().get_ctx_seq();\r\n            generic_thread_local::init(0, info);\r\n        }\r\n        generic_thread_local::set((intptr_t)value, info);\r\n    }\r\n\r\n    T get(debug_info_param info)\r\n    {\r\n        if (ctx_seq_ != ctx().get_ctx_seq())\r\n        {\r\n            ctx_seq_ = ctx().get_ctx_seq();\r\n            generic_thread_local::init(0, info);\r\n        }\r\n        return (T)generic_thread_local::get(info);\r\n    }\r\n\r\nprivate:\r\n    unsigned ctx_seq_;\r\n};\r\n\r\n\r\ninline unsigned long rl_TlsAlloc(debug_info_param info)\r\n{\r\n#ifndef RL_GC\r\n    //!!! may break on x64 platform\r\n    // TLS index is exactly DWORD (not DWORD_PTR), so one has to use indirection\r\n    return (unsigned long)new (info) thread_local_var<void*> ();\r\n#else\r\n    void* p = ctx().alloc(sizeof(thread_local_var<void*>), false, info);\r\n    new (p) thread_local_var<void*> ();\r\n    return (unsigned long)p;\r\n#endif\r\n}\r\n\r\ninline void rl_TlsFree(unsigned long slot, debug_info_param info)\r\n{\r\n#ifndef RL_GC\r\n    delete_impl((thread_local_var<void*>*)slot, info);\r\n#else\r\n    thread_local_var<void*>* t = (thread_local_var<void*>*)slot;\r\n    t->~thread_local_var<void*>();\r\n    ctx().free(t, false, info);\r\n#endif\r\n}\r\n\r\ninline void* rl_TlsGetValue(unsigned long slot, debug_info_param info)\r\n{\r\n    return ((thread_local_var<void*>*)slot)->get(info);\r\n}\r\n\r\ninline int rl_TlsSetValue(unsigned long slot, void* value, debug_info_param info)\r\n{\r\n    ((thread_local_var<void*>*)slot)->set(value, info);\r\n    return 1;\r\n}\r\n\r\n\r\n#define TlsAlloc() rl::rl_TlsAlloc($)\r\n#define TlsFree(slot) rl::rl_TlsFree((slot), $)\r\n#define TlsGetValue(slot) rl::rl_TlsGetValue((slot), $)\r\n#define TlsSetValue(slot, value) rl::rl_TlsSetValue((slot), (value), $)\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/thread_local_ctx.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_THREAD_LOCAL_CTX_HPP\r\n#define RL_THREAD_LOCAL_CTX_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"test_params.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\nstruct thread_local_context_iface\r\n{\r\n    virtual int         thread_local_alloc          (void (*dtor)(intptr_t)) = 0;\r\n    virtual void        thread_local_free           (int index) = 0;\r\n    virtual void        thread_local_set            (int index, intptr_t value) = 0;\r\n    virtual intptr_t    thread_local_get            (int index) = 0;\r\n    virtual             ~thread_local_context_iface () {} // to calm down g++\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename base_t, thread_id_t thread_count>\r\nclass thread_local_contxt_impl : protected base_t\r\n{\r\npublic:\r\n    thread_local_contxt_impl(thread_id_t thread_count_param, test_params& params)\r\n        : base_t(thread_count_param, params)\r\n    {\r\n    }\r\n\r\n    void iteration_begin()\r\n    {\r\n        base_t::iteration_begin();\r\n\r\n        for (size_t ent = 0; ent != entries_.size(); ent += 1)\r\n        {\r\n            for (size_t th = 0; th != thread_count; th += 1)\r\n            {\r\n                entries_[ent].value_[th] = 0;\r\n            }\r\n        }\r\n    }\r\n\r\nprivate:\r\n    struct entry\r\n    {\r\n        bool            alive_;\r\n        intptr_t        value_ [thread_count];\r\n        void            (*dtor_) (intptr_t);\r\n    };\r\n\r\n    typename vector<entry>::type            entries_;\r\n    using base_t::current_thread;\r\n\r\n    virtual int         thread_local_alloc          (void (*dtor)(intptr_t))\r\n    {\r\n        int index = (int)entries_.size();\r\n        entries_.resize(index + 1);\r\n        entry& ent = entries_[index];\r\n        ent.alive_ = true;\r\n        ent.dtor_ = dtor;\r\n        for (size_t i = 0; i != thread_count; ++i)\r\n        {\r\n            ent.value_[i] = 0;\r\n        }\r\n        return index;\r\n    }\r\n\r\n    virtual void        thread_local_free           (int index)\r\n    {\r\n        RL_VERIFY(index >= 0 && (size_t)index < entries_.size());\r\n        entry& ent = entries_[index];\r\n        RL_VERIFY(ent.alive_);\r\n        ent.alive_ = false;\r\n        if (ent.dtor_)\r\n        {\r\n            for (size_t i = 0; i != thread_count; ++i)\r\n            {\r\n                if (ent.value_[i])\r\n                {\r\n                    ent.dtor_(ent.value_[i]);\r\n                }\r\n            }\r\n        }\r\n    }\r\n\r\n    virtual void        thread_local_set            (int index, intptr_t value)\r\n    {\r\n        RL_VERIFY(index >= 0 && (size_t)index < entries_.size());\r\n        entry& ent = entries_[index];\r\n        RL_VERIFY(ent.alive_);\r\n        ent.value_[current_thread()] = value;\r\n    }\r\n\r\n    virtual intptr_t    thread_local_get            (int index)\r\n    {\r\n        RL_VERIFY(index >= 0 && (size_t)index < entries_.size());\r\n        entry& ent = entries_[index];\r\n        RL_VERIFY(ent.alive_);\r\n        return ent.value_[current_thread()];\r\n    }\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/var.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_VAR_HPP\r\n#define RL_VAR_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"context.hpp\"\r\n#include \"signature.hpp\"\r\n#include \"atomic_events.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\ntemplate<typename T>\r\nclass var;\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass var_proxy_const\r\n{\r\npublic:\r\n    var_proxy_const(var<T> const& v, debug_info_param info)\r\n        : var_(const_cast<var<T>&>(v))\r\n        , info_(info)\r\n    {\r\n    }\r\n\r\n    T load() const\r\n    {\r\n        return var_.load(info_);\r\n    }\r\n\r\n    operator T () const\r\n    {\r\n        return this->load();\r\n    }\r\n\r\n    T const operator -> () const\r\n    {\r\n        return this->load();\r\n    }\r\n\r\nprotected:\r\n    var<T>& var_;\r\n    debug_info info_;\r\n\r\nprivate:\r\n    var_proxy_const& operator = (var_proxy_const const&);\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass var_proxy : public var_proxy_const<T>\r\n{\r\npublic:\r\n    typedef typename atomic_add_type<T>::type add_type;\r\n\r\n    var_proxy(var<T>& v, debug_info_param info)\r\n        : var_proxy_const<T>(v, info)\r\n    {\r\n    }\r\n\r\n    void store(T value)\r\n    {\r\n        this->var_.store(value, this->info_);\r\n    }\r\n\r\n    template<typename Y>\r\n    T operator = (var_proxy_const<Y> const& v)\r\n    {\r\n        Y y = v.load();\r\n        T t = y;\r\n        store(t);\r\n        return t;\r\n    }\r\n\r\n    T operator = (var_proxy<T> const& v)\r\n    {\r\n        T t = v.load();\r\n        store(t);\r\n        return t;\r\n    }\r\n\r\n    T operator = (T value)\r\n    {\r\n        store(value);\r\n        return value;\r\n    }\r\n\r\n    T operator -> ()\r\n    {\r\n        return this->load();\r\n    }\r\n\r\n    T operator ++ (int)\r\n    {\r\n        T v = this->load();\r\n        T y = ++v;\r\n        this->store(y);\r\n        return v;\r\n    }\r\n\r\n    T operator -- (int)\r\n    {\r\n        T v = this->load();\r\n        T y = --v;\r\n        this->store(y);\r\n        return v;\r\n    }\r\n\r\n    T operator ++ ()\r\n    {\r\n        T v = this->load();\r\n        this->store(++v);\r\n        return v;\r\n    }\r\n\r\n    T operator -- ()\r\n    {\r\n        T v = this->load();\r\n        this->store(--v);\r\n        return v;\r\n    }\r\n\r\n    T operator += (add_type value)\r\n    {\r\n        T v = this->load();\r\n        v += value;\r\n        this->store(v);\r\n        return v;\r\n    }\r\n\r\n    T operator -= (add_type value)\r\n    {\r\n        T v = this->load();\r\n        v -= value;\r\n        this->store(v);\r\n        return v;\r\n    }\r\n\r\n    T operator &= (T value)\r\n    {\r\n        T v = this->load();\r\n        v &= value;\r\n        this->store(v);\r\n        return v;\r\n    }\r\n\r\n    T operator |= (T value)\r\n    {\r\n        T v = this->load();\r\n        v |= value;\r\n        this->store(v);\r\n        return v;\r\n    }\r\n\r\n    T operator ^= (T value)\r\n    {\r\n        T v = this->load();\r\n        v ^= value;\r\n        this->store(v);\r\n        return v;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nstruct var_event\r\n{\r\n    debug_info var_info_;\r\n    var<T> const* var_addr_;\r\n    T value_;\r\n    bool load_;\r\n\r\n    template<typename Y>\r\n    struct map_type\r\n    {\r\n        typedef T result;\r\n    };\r\n\r\n    template<typename Y>\r\n    struct map_type<Y*>\r\n    {\r\n        typedef void* result;\r\n    };\r\n\r\n    void output(std::ostream& s) const\r\n    {\r\n        s << \"<\" << std::hex << var_addr_ << std::dec << \"> \"\r\n            << (load_ ? \"load\" : \"store\") << \", value=\" << (typename map_type<T>::result)value_;\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<typename T>\r\nclass var\r\n{\r\npublic:\r\n    var()\r\n    {\r\n        value_ = 0;\r\n        initialized_ = false;\r\n        data_ = ctx().var_ctor();\r\n    }\r\n\r\n    var(T value)\r\n    {\r\n        init(value);\r\n    }\r\n\r\n    var(var const& r)\r\n    {\r\n        init(r.load($));\r\n    }\r\n\r\n    ~var()\r\n    {\r\n        sign_.check($);\r\n        ctx().var_dtor(data_);\r\n    }\r\n\r\n    var_proxy_const<T> operator () (debug_info_param info) const\r\n    {\r\n        return var_proxy_const<T>(*this, info);\r\n    }\r\n\r\n    var_proxy<T> operator () (debug_info_param info)\r\n    {\r\n        return var_proxy<T>(*this, info);\r\n    }\r\n\r\nprivate:\r\n    T value_;\r\n    bool initialized_;\r\n\r\n    var_data* data_;\r\n\r\n    signature<123456789> sign_;\r\n    friend class var_proxy<T>;\r\n    friend class var_proxy_const<T>;\r\n\r\n    void init(T value)\r\n    {\r\n        context& c = ctx();\r\n        initialized_ = true;\r\n        value_ = value;\r\n        data_ = ctx().var_ctor();\r\n        data_->init(*c.threadx_);\r\n    }\r\n\r\n    T load(debug_info_param info) const\r\n    {\r\n        context& c = ctx();\r\n        sign_.check(info);\r\n\r\n        if (false == initialized_)\r\n        {\r\n            RL_HIST(var_event<T>) {RL_INFO, this, T(), true} RL_HIST_END();\r\n            RL_ASSERT_IMPL(false, test_result_unitialized_access, \"\", info);\r\n        }\r\n\r\n        if (false == c.invariant_executing)\r\n        {\r\n            if (false == data_->load(*c.threadx_))\r\n            {\r\n                RL_HIST(var_event<T>) {RL_INFO, this, T(), true} RL_HIST_END();\r\n                RL_ASSERT_IMPL(false, test_result_data_race, \"data race detected\", info);\r\n            }\r\n\r\n            T const v = value_;\r\n\r\n            RL_HIST(var_event<T>) {RL_INFO, this, v, true} RL_HIST_END();\r\n\r\n            return v;\r\n        }\r\n        else\r\n        {\r\n            return value_;\r\n        }\r\n    }\r\n\r\n    void store(T v, debug_info_param info)\r\n    {\r\n        context& c = ctx();\r\n        RL_VERIFY(false == c.invariant_executing);\r\n        sign_.check(info);\r\n\r\n        if (initialized_)\r\n        {\r\n            if (false == data_->store(*c.threadx_))\r\n            {\r\n                RL_HIST(var_event<T>) {RL_INFO, this, T(), false} RL_HIST_END();\r\n                RL_ASSERT_IMPL(false, test_result_data_race, \"data race detected\", info);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            initialized_ = true;\r\n            data_->init(*c.threadx_);\r\n        }\r\n\r\n        value_ = v;\r\n\r\n        RL_HIST(var_event<T>) {RL_INFO, this, v, false} RL_HIST_END();\r\n    }\r\n\r\n    var& operator = (var const& r);\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nstruct var_data_impl : var_data\r\n{\r\n    typedef thread_info<thread_count> thread_info_t;\r\n\r\n    timestamp_t load_acq_rel_timestamp_ [thread_count];\r\n    timestamp_t store_acq_rel_timestamp_ [thread_count];\r\n\r\n    var_data_impl()\r\n    {\r\n        foreach<thread_count>(load_acq_rel_timestamp_, assign_zero);\r\n        foreach<thread_count>(store_acq_rel_timestamp_, assign_zero);\r\n    }\r\n\r\n    virtual void init(thread_info_base& th)\r\n    {\r\n        th.own_acq_rel_order_ += 1;\r\n        store_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_;\r\n    }\r\n\r\n    virtual bool store(thread_info_base& th)\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            if (th.acq_rel_order_[i] < store_acq_rel_timestamp_[i])\r\n                return false;\r\n            if (th.acq_rel_order_[i] < load_acq_rel_timestamp_[i])\r\n                return false;\r\n        }\r\n\r\n        th.own_acq_rel_order_ += 1;\r\n        store_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_;\r\n        return true;\r\n    }\r\n\r\n    virtual bool load(thread_info_base& th)\r\n    {\r\n        for (thread_id_t i = 0; i != thread_count; ++i)\r\n        {\r\n            if (th.acq_rel_order_[i] < store_acq_rel_timestamp_[i])\r\n                return false;\r\n        }\r\n\r\n        th.own_acq_rel_order_ += 1;\r\n        load_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_;\r\n        return true;\r\n    }\r\n\r\n    virtual ~var_data_impl() {} // just to calm down gcc\r\n};\r\n\r\n\r\n\r\n}\r\n\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/volatile.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_VOLATILE_HPP\r\n#define RL_VOLATILE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n}\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/waitset.hpp",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_WAITSET_HPP\r\n#define RL_WAITSET_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#include \"base.hpp\"\r\n#include \"thread_base.hpp\"\r\n#include \"context_base.hpp\"\r\n\r\n\r\nnamespace rl\r\n{\r\n\r\n\r\ntemplate<thread_id_t thread_count>\r\nclass waitset\r\n{\r\npublic:\r\n    waitset()\r\n    {\r\n        size_ = 0;\r\n    }\r\n\r\n    unpark_reason park_current(context& c,\r\n                               bool is_timed,\r\n                               bool allow_spurious_wakeup,\r\n                               bool do_switch,\r\n                               debug_info_param info)\r\n    {\r\n        RL_VERIFY(size_ < thread_count);\r\n        thread_info_base* th = c.threadx_;\r\n        thread_desc desc = {th, 0, 0, 0, false, do_switch};\r\n        set_[size_] = desc;\r\n        size_ += 1;\r\n        unpark_reason reason = c.park_current_thread(is_timed, allow_spurious_wakeup, do_switch, info);\r\n        if (reason == unpark_reason_normal)\r\n        {\r\n            if (do_switch)\r\n                RL_VERIFY(c.threadx_->temp_switch_from_ != -1);\r\n            else\r\n                RL_VERIFY(c.threadx_->temp_switch_from_ == -1);\r\n        }\r\n        else\r\n        {\r\n            remove(th);\r\n        }\r\n        return reason;\r\n    }\r\n\r\n    static unpark_reason park_current(context& c,\r\n                                      waitset** ws,\r\n                                      win_waitable_object** wo,\r\n                                      size_t count,\r\n                                      bool wait_all,\r\n                                      bool is_timed,\r\n                                      bool do_switch,\r\n                                      debug_info_param info)\r\n    {\r\n        thread_info_base* th = c.threadx_;\r\n        thread_desc desc = {th, (unsigned)count, ws, wo, wait_all, do_switch};\r\n        for (unsigned wsi = 0; wsi != count; ++wsi)\r\n        {\r\n            RL_VERIFY(ws[wsi]->size_ < thread_count);\r\n            ws[wsi]->set_[ws[wsi]->size_] = desc;\r\n            ws[wsi]->size_ += 1;\r\n        }\r\n        unpark_reason reason = c.park_current_thread(is_timed, false, do_switch, info);\r\n        if (reason == unpark_reason_normal)\r\n        {\r\n            if (do_switch)\r\n                RL_VERIFY(c.threadx_->temp_switch_from_ != -1);\r\n            else\r\n                RL_VERIFY(c.threadx_->temp_switch_from_ == -1);\r\n        }\r\n        else\r\n        {\r\n            remove(th, ws, (unsigned)count);\r\n        }\r\n        return reason;\r\n    }\r\n\r\n    bool unpark_one(context& c, debug_info_param info)\r\n    {\r\n        if (0 == size_)\r\n            return false;\r\n        //!!! too high preassure on full sched\r\n        thread_id_t idx = c.rand(size_, sched_type_user);\r\n        if (try_remove(c, idx, info))\r\n            return true;\r\n        for (idx = 0; idx != size_; idx += 1)\r\n        {\r\n            if (try_remove(c, idx, info))\r\n                return true;\r\n        }\r\n        return false;\r\n    }\r\n\r\n    thread_id_t unpark_all(context& c, debug_info_param info)\r\n    {\r\n        thread_id_t cnt = 0;\r\n        for (thread_id_t idx = 0; idx != size_; idx += 1)\r\n        {\r\n            if (try_remove(c, idx, info))\r\n            {\r\n                cnt += 1;\r\n                idx -= 1;\r\n            }\r\n        }\r\n        return cnt;\r\n    }\r\n\r\n    thread_id_t size() const\r\n    {\r\n        return size_;\r\n    }\r\n\r\n    operator bool () const\r\n    {\r\n        return 0 != size_;\r\n    }\r\n\r\nprivate:\r\n    struct thread_desc\r\n    {\r\n        thread_info_base*       th_;\r\n        unsigned                count_;     // 0 - wfso, !0 - wfmo\r\n        waitset**               ws_;        // 0 - wfso, !0 - wfmo\r\n        win_waitable_object**   wo_;        // 0 - wfso, !0 - wfmo\r\n        bool                    wait_all_;\r\n        bool                    do_switch_;\r\n    };\r\n\r\n    thread_desc                 set_ [thread_count];\r\n    thread_id_t                 size_;\r\n\r\n    bool try_remove(context& c, thread_id_t const idx, debug_info_param info)\r\n    {\r\n        RL_VERIFY(idx < size_);\r\n        thread_desc const& d = set_[idx];\r\n        if (d.count_ != 0 && d.wait_all_ == true)\r\n        {\r\n            for (size_t i = 0; i != d.count_; i += 1)\r\n            {\r\n                if (d.wo_[i]->is_signaled(info) == false)\r\n                    return false;\r\n            }\r\n        }\r\n        size_t const tid = d.th_->index_;\r\n        bool const do_switch = d.do_switch_;\r\n        if (d.ws_)\r\n            remove(d.th_, d.ws_, d.count_);\r\n        else\r\n            remove(d.th_);\r\n        c.unpark_thread(tid, do_switch, info);\r\n        return true;\r\n    }\r\n\r\n    void remove(thread_info_base* th)\r\n    {\r\n        thread_id_t size = size_;\r\n        thread_id_t i = 0;\r\n        for (; i != size; ++i)\r\n        {\r\n            if (set_[i].th_ == th)\r\n                break;\r\n        }\r\n        RL_VERIFY(i != size);\r\n        for (thread_id_t j = i + 1; j != size; ++j)\r\n        {\r\n            set_[j - 1] = set_[j];\r\n        }\r\n        size_ -= 1;\r\n    }\r\n\r\n    static void remove(thread_info_base* th, waitset** ws, unsigned count)\r\n    {\r\n        for (unsigned wsi = 0; wsi != count; ++wsi)\r\n        {\r\n            ws[wsi]->remove(th);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n}\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/relacy/windows.h",
    "content": "/*  Relacy Race Detector\r\n *  Copyright (c) 2008-2013, Dmitry S. Vyukov\r\n *  All rights reserved.\r\n *  This software is provided AS-IS with no warranty, either express or implied.\r\n *  This software is distributed under a license and may not be copied,\r\n *  modified or distributed except as expressly authorized under the\r\n *  terms of the license contained in the file LICENSE in this distribution.\r\n */\r\n\r\n#ifndef RL_WINDOWS_IFACE_HPP\r\n#define RL_WINDOWS_IFACE_HPP\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"relacy.hpp\"\r\n#include \"stdlib/windows.hpp\"\r\n\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/addr_hash.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_addr_hash : rl::test_suite<test_addr_hash, 2>\r\n{\r\n    void* p1;\r\n    void* p2;\r\n    size_t h1, h2;\r\n    static size_t const table_size = 1000;\r\n\r\n    void before()\r\n    {\r\n        p1 = malloc(0);\r\n        h1 = rl::hash_ptr(p1, table_size);\r\n        p2 = malloc(0);\r\n        h2 = rl::hash_ptr(p2, table_size);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        free(p1);\r\n        free(p2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        assert(h1 == rl::hash_ptr(p1, table_size));\r\n        assert(h2 == rl::hash_ptr(p2, table_size));\r\n        assert(rl::hash_ptr(&index, table_size) == rl::hash_ptr(&index,table_size));\r\n        assert(rl::hash_ptr(0, table_size) == rl::hash_ptr(0, table_size));\r\n\r\n    }\r\n};\r\n\r\n\r\nstruct test_addr_hash2 : rl::test_suite<test_addr_hash2, 2, rl::test_result_until_condition_hit>\r\n{\r\n    static size_t const table_size = 4;\r\n    std::atomic<int> table [table_size];\r\n\r\n    void before()\r\n    {\r\n        for (size_t i = 0; i != table_size; i += 1)\r\n            table[i].store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        for (size_t i = 0; i != table_size + 1; i += 1)\r\n        {\r\n            void* p = malloc(0);\r\n            size_t idx = rl::hash_ptr(p, table_size);\r\n            free(p);\r\n            int v = table[idx].exchange(1, std::memory_order_relaxed);\r\n            RL_UNTIL(v);\r\n        }\r\n    }\r\n};\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/advanced.txt",
    "content": "Test parameters. You can specify various parameters for test.\r\nrl::test_params p;\r\np.search_type = rl::fair_context_bound_scheduler_type;\r\np.context_bound = 1;\r\np.execution_depth_limit = 1000;\r\nrl::simulate<test_t>(p);\r\n\r\nThe main parameter is scheduler type used for simulation. There is 3 types of scheduler:\r\nrandom_scheduler_type - random exploration of state space\r\nfair_full_search_scheduler_type - exhaustive systematic exploration of state space\r\nfair_context_bound_scheduler_type - systematic exploration of state space with limit on context switches.\r\n\r\nFor random_scheduler_type you can specify 'iteration_count' parameter - number of explored executions.\r\nFor fair_context_bound_scheduler_type you can specify 'context_bound' parameter - limit on context switches.\r\n\r\nAlso you can specify 'execution_depth_limit' parameter - used for livelock detection. All executions with trace longer than execution_depth_limit will be treated as livelocked (or non-terminating).\r\n\r\nAlso from test_params structure you can receive output parameters from simulation. Main output parameter is 'test_result' which describes cause of test failure.\r\n\r\nIf you use fair_full_search_scheduler_type or fair_context_bound_scheduler_type, in order to ensure fairness of scheduler, you must use 'yield' calls in all 'spin-loops', otherwise simulation will report non-terminating execution. Example:\r\n\r\nstruct race_seq_ld_ld_test : rl::test_suite<race_seq_ld_ld_test, 2>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x($).load();\r\n            a($).store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a($).load(rl::memory_order_relaxed))\r\n                  b.yield($);\r\n            x($).load();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/compare_swap.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\ntemplate<int T>\r\nstruct cas_spurious_fail_test : rl::test_suite<cas_spurious_fail_test<T>, 1, rl::test_result_until_condition_hit>\r\n{\r\n    std::atomic<int> x;\r\n    std::atomic<int> y;\r\n\r\n    void before()\r\n    {\r\n        x.store(0, std::memory_order_relaxed);\r\n        y.store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        int cmp = 0;\r\n        if (x.compare_exchange_weak(cmp, 1, std::memory_order_seq_cst, std::memory_order_seq_cst))\r\n        {\r\n            cmp = 1;\r\n            if (x.compare_exchange_weak(cmp, 2, std::memory_order_seq_cst))\r\n            {\r\n                cmp = 0;\r\n                if (y.compare_exchange_weak(cmp, 1, std::memory_order_seq_cst))\r\n                {\r\n                }\r\n                else\r\n                {\r\n                    if (T == 2) RL_UNTIL(true);\r\n                }\r\n            }\r\n            else\r\n            {\r\n                if (T == 1) RL_UNTIL(true);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            if (T == 0) RL_UNTIL(true);\r\n        }\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/condvar.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_condvar : rl::test_suite<test_condvar, 2>\r\n{\r\n    std::mutex mtx;\r\n    std::condition_variable cv;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        data($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            mtx.lock($);\r\n            data($) += 1;\r\n            mtx.unlock($);\r\n            cv.notify_one($);\r\n        }\r\n        else\r\n        {\r\n            mtx.lock($);\r\n            while (0 == data($))\r\n            {\r\n                cv.wait(mtx, $);\r\n            }\r\n            mtx.unlock($);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_condvar2 : rl::test_suite<test_condvar2, 3>\r\n{\r\n    rl::var<int> stage;\r\n    std::mutex mtx;\r\n    std::condition_variable cv;\r\n\r\n    void before()\r\n    {\r\n        stage($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            mtx.lock($);\r\n            stage($) += 1;\r\n            cv.notify_all($);\r\n            while (stage($) != 2)\r\n                cv.wait(mtx, $);\r\n            mtx.unlock($);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            mtx.lock($);\r\n            while (stage($) != 1)\r\n                cv.wait(mtx, $);\r\n            stage($) += 1;\r\n            cv.notify_all($);\r\n            mtx.unlock($);\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            mtx.lock($);\r\n            while (stage($) != 2)\r\n                cv.wait(mtx, $);\r\n            mtx.unlock($);\r\n        }\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/data_race.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\n\r\nstruct race_ld_ld_test : rl::test_suite<race_ld_ld_test, 2>\r\n{\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n            x($).load();\r\n        else\r\n            x($).load();\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_ld_st_test : rl::test_suite<race_ld_st_test, 2, rl::test_result_data_race>\r\n{\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n            x($).load();\r\n        else\r\n            x($).store(1);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_st_st_test : rl::test_suite<race_st_st_test, 2, rl::test_result_data_race>\r\n{\r\n    rl::var<int> x;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n            x($).store(1);\r\n        else\r\n            x($).store(1);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_seq_ld_ld_test : rl::test_suite<race_seq_ld_ld_test, 2>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x($).load();\r\n            a.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_relaxed))\r\n                b.yield($);\r\n            x($).load();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_seq_ld_st_test : rl::test_suite<race_seq_ld_st_test, 2, rl::test_result_data_race>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x($).load();\r\n            a.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_relaxed))\r\n                b.yield($);\r\n            x($).store(1);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_seq_st_ld_test : rl::test_suite<race_seq_st_ld_test, 2, rl::test_result_data_race>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x($).store(1);\r\n            a.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_relaxed))\r\n                b.yield($);\r\n            x($).load();\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_seq_st_st_test : rl::test_suite<race_seq_st_st_test, 2, rl::test_result_data_race>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x($).store(1);\r\n            a.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_relaxed))\r\n                b.yield($);\r\n            VAR(x) = 1;\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_uninit_test : rl::test_suite<race_uninit_test, 2, rl::test_result_unitialized_access>\r\n{\r\n    std::atomic<int> a;\r\n    std::atomic<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x.store(1, std::memory_order_relaxed);\r\n            a.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_relaxed))\r\n                b.yield($);\r\n            x.load(std::memory_order_seq_cst);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct race_indirect_test : rl::test_suite<race_indirect_test, 2, rl::test_result_data_race>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x($) = 1;\r\n            a.store(1, std::memory_order_release);\r\n            (void)(int)x($);\r\n        }\r\n        else\r\n        {\r\n            rl::backoff b;\r\n            while (0 == a.load(std::memory_order_acquire))\r\n                b.yield($);\r\n            (void)(int)x($);\r\n            x($) = 2;\r\n        }\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/detection.txt",
    "content": " - Race condition (accoring to ISO C++0x)\r\n - Access to uninitialized variable\r\n - Access to freed memory\r\n - Double free\r\n - Memory leak\r\n - Deadlock\r\n - Livelock\r\n - User assert failed\r\n - User invariant failed\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/dyn_thread.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy.hpp\"\r\n#include \"../relacy/dyn_thread.hpp\"\r\n\r\n\r\n\r\nstruct dyn_thread_basic_test : rl::test_suite<dyn_thread_basic_test, 2>\r\n{\r\n    static unsigned const dynamic_thread_count = 4;\r\n\r\n    rl::var<int> data1;\r\n    rl::var<int> data2;\r\n    rl::atomic<int> data3;\r\n\r\n    void before()\r\n    {\r\n        data3($) = 0;\r\n    }\r\n\r\n    static void* thread1(void* p)\r\n    {\r\n        dyn_thread_basic_test& self = *(dyn_thread_basic_test*)p;\r\n        self.data1($) = 1;\r\n        return 0;\r\n    }\r\n\r\n    static void* thread2(void* p)\r\n    {\r\n        dyn_thread_basic_test& self = *(dyn_thread_basic_test*)p;\r\n        self.data2($) = 2;\r\n        return 0;\r\n    }\r\n\r\n    static void* thread3(void* p)\r\n    {\r\n        dyn_thread_basic_test& self = *(dyn_thread_basic_test*)p;\r\n        self.data3.store(3, rl::memory_order_relaxed);\r\n        return 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index == 0)\r\n        {\r\n            rl::dyn_thread t1;\r\n            t1.start(&dyn_thread_basic_test::thread1, this);\r\n            rl::dyn_thread t2;\r\n            t2.start(&dyn_thread_basic_test::thread2, this);\r\n            t1.join();\r\n            t2.join();\r\n            RL_ASSERT(data1($) == 1);\r\n            RL_ASSERT(data2($) == 2);\r\n        }\r\n        else if (index == 1)\r\n        {\r\n            rl::dyn_thread t1;\r\n            t1.start(&dyn_thread_basic_test::thread3, this);\r\n            while (data3.load(rl::memory_order_relaxed) != 3)\r\n                rl::yield(1, $);\r\n            t1.join();\r\n        }\r\n        else\r\n        {\r\n            RL_ASSERT(false);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct dyn_thread_win32_test : rl::test_suite<dyn_thread_win32_test, 2>\r\n{\r\n    static unsigned const dynamic_thread_count = 4;\r\n\r\n    rl::var<int> data1;\r\n    rl::var<int> data2;\r\n    rl::atomic<int> data3;\r\n\r\n    void before()\r\n    {\r\n        data3($) = 0;\r\n    }\r\n\r\n    static unsigned long RL_STDCALL thread1(void* p)\r\n    {\r\n        dyn_thread_win32_test& self = *(dyn_thread_win32_test*)p;\r\n        self.data1($) = 1;\r\n        return 0;\r\n    }\r\n\r\n    static unsigned long RL_STDCALL thread2(void* p)\r\n    {\r\n        dyn_thread_win32_test& self = *(dyn_thread_win32_test*)p;\r\n        self.data2($) = 2;\r\n        return 0;\r\n    }\r\n\r\n    static unsigned long RL_STDCALL thread3(void* p)\r\n    {\r\n        dyn_thread_win32_test& self = *(dyn_thread_win32_test*)p;\r\n        self.data3.store(3, rl::memory_order_relaxed);\r\n        return 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index == 0)\r\n        {\r\n            HANDLE threads [2];\r\n            threads[0] = CreateThread(0, 0, &dyn_thread_win32_test::thread1, this, 0, 0);\r\n            threads[1] = CreateThread(0, 0, &dyn_thread_win32_test::thread2, this, 0, 0);\r\n            WaitForMultipleObjects(2, threads, 1, INFINITE);\r\n            RL_ASSERT(VAR(data1) == 1);\r\n            RL_ASSERT(VAR(data2) == 2);\r\n        }\r\n        else if (index == 1)\r\n        {\r\n            HANDLE th = CreateThread(0, 0, &dyn_thread_win32_test::thread3, this, 0, 0);\r\n            while (data3.load(rl::memory_order_relaxed) != 3)\r\n                rl::yield(1, $);\r\n            WaitForSingleObject(th, INFINITE);\r\n        }\r\n        else\r\n        {\r\n            RL_ASSERT(false);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nstruct dyn_thread_visibility_test : rl::test_suite<dyn_thread_visibility_test, 1>\r\n{\r\n    static unsigned const dynamic_thread_count = 1;\r\n\r\n    rl::var<int> data;\r\n\r\n    static unsigned long RL_STDCALL thread(void* p)\r\n    {\r\n        dyn_thread_visibility_test& self = *(dyn_thread_visibility_test*)p;\r\n        RL_ASSERT(self.data($) == 1);\r\n        self.data($) = 2;\r\n        return 0;\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        data($) = 1;\r\n        HANDLE th = CreateThread(0, 0, &dyn_thread_visibility_test::thread, this, 0, 0);\r\n        WaitForSingleObject(th, INFINITE);\r\n        RL_ASSERT(data($) == 2);\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/event.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_event_auto : rl::test_suite<test_event_auto, 2>\r\n{\r\n    HANDLE ev;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        VAR(data) = 0;\r\n        ev = CreateEvent(0, 0, 0, 0);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(ev);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            VAR(data) = 1;\r\n            SetEvent(ev);\r\n        }\r\n        else\r\n        {\r\n            unsigned rv = WaitForSingleObject(ev, INFINITE);\r\n            assert(rv == WAIT_OBJECT_0);\r\n            assert(VAR(data) == 1);\r\n\t\t\t\t\t\trv = WaitForSingleObject(ev, 0);\r\n\t\t\t\t\t\tassert(rv == WAIT_TIMEOUT);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_event_atomic : rl::test_suite<test_event_atomic, 2>\r\n{\r\n\tHANDLE ev1;\r\n\tHANDLE ev2;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tev1 = CreateEvent(0, 0, 0, 0);\r\n\t\tev2 = CreateEvent(0, 0, 0, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev1);\r\n\t\tCloseHandle(ev2);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(ev1, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\tSetEvent(ev2);\r\n\t\t\trv = WaitForSingleObject(ev2, 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned rv = SignalObjectAndWait(ev1, ev2, INFINITE, 0);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\trv = WaitForSingleObject(ev2, 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_event_manual : rl::test_suite<test_event_manual, 2>\r\n{\r\n\tHANDLE ev;\r\n\tVAR_T(int) data;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tVAR(data) = 0;\r\n\t\tev = CreateEvent(0, 1, 0, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tVAR(data) = 1;\r\n\t\t\tSetEvent(ev);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(ev, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\tassert(VAR(data) == 1);\r\n\t\t\trv = WaitForSingleObject(ev, 0);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/features.txt",
    "content": " - Relaxed ISO C++0x Memory Model. Relaxed/acquire/release/acq_rel/seq_cst memory operations. The only non-supported feature is memory_order_consume, it's simulated with memory_order_acquire.\r\n - Exhaustive automatic error checking (including ABA detection).\r\n - Full-fledged atomics library (with spurious failures in compare_exchange()).\r\n - Memory fences.\r\n - Arbitrary number of threads.\r\n - Detailed execution history for failed tests.\r\n - No false positives.\r\n - Before/after/invariant functions for test suites.\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/fence.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\ntemplate<int index, int mo_index>\r\nstruct fence_synch_test : rl::test_suite<fence_synch_test<index, mo_index>, 2>\r\n{\r\n    std::atomic<int> x;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned th)\r\n    {\r\n        if (0 == th)\r\n        {\r\n            data($) = 1;\r\n            if (0 == index || 1 == index)\r\n            {\r\n                std::atomic_thread_fence(order().first, $);\r\n                x.store(1, std::memory_order_relaxed);\r\n            }\r\n            else\r\n            {\r\n                x.store(1, order().first, $);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            if (0 == index || 2 == index)\r\n            {\r\n                if (x.load(std::memory_order_relaxed))\r\n                {\r\n                    std::atomic_thread_fence(order().second, $);\r\n                    data($).load();\r\n                }\r\n            }\r\n            else\r\n            {\r\n                if (x.load(order().second, $))\r\n                {\r\n                    data($).load();\r\n                }\r\n            }\r\n        }\r\n    }\r\n\r\n    std::pair<std::memory_order, std::memory_order> order()\r\n    {\r\n        switch (mo_index)\r\n        {\r\n        default: RL_VERIFY(false);\r\n        case 0: return std::make_pair(std::mo_release, std::mo_acquire);\r\n        case 1: return std::make_pair(std::mo_seq_cst, std::mo_seq_cst);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct two_fence_synch_test : rl::test_suite<two_fence_synch_test, 3>\r\n{\r\n    std::atomic<int> x0;\r\n    std::atomic<int> x1;\r\n    rl::var<int> data0;\r\n    rl::var<int> data1;\r\n\r\n    void before()\r\n    {\r\n        x0($) = 0;\r\n        x1($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            data0($) = 1;\r\n            std::atomic_thread_fence(std::memory_order_release);\r\n            x0.store(1, std::memory_order_relaxed);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            data1($) = 1;\r\n            std::atomic_thread_fence(std::memory_order_release);\r\n            x1.store(1, std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            int y0 = x0.load(std::memory_order_relaxed);\r\n            int y1 = x1.load(std::memory_order_relaxed);\r\n            if (y0 || y1)\r\n            {\r\n                std::atomic_thread_fence(std::memory_order_acquire);\r\n                if (y0)\r\n                    data0($).load();\r\n                if (y1)\r\n                    data1($).load();\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<int index>\r\nstruct seq_cst_fence_test : rl::test_suite<seq_cst_fence_test<index>, 2,\r\n    (rl::test_result_e)((0 == index) * rl::test_result_success\r\n    + (1 == index) * rl::test_result_until_condition_hit)>\r\n{\r\n    std::atomic<int> x0;\r\n    std::atomic<int> x1;\r\n    rl::var<int> r0;\r\n    rl::var<int> r1;\r\n\r\n    void before()\r\n    {\r\n        x0($) = 0;\r\n        x1($) = 0;\r\n    }\r\n\r\n    void thread(unsigned th)\r\n    {\r\n        if (0 == th)\r\n        {\r\n            x0.store(1, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n            r0($) = x1.load(std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            x1.store(1, std::memory_order_relaxed);\r\n            std::atomic_thread_fence(std::memory_order_seq_cst);\r\n            r1($) = x0.load(std::memory_order_relaxed);\r\n        }\r\n    }\r\n\r\n    void after()\r\n    {\r\n        if (0 == index)\r\n            RL_ASSERT(r0($) || r1($));\r\n        else if (1 == index)\r\n            RL_UNTIL(r0($) && r1($));\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/foo.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../relacy/relacy_std.hpp\"\r\n#include \"../relacy/windows.h\"\r\n#include \"../relacy/pthread.h\"\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/futex.hpp",
    "content": "#pragma once\n\n#include \"../relacy/pthread.h\"\n\n\n\nstruct test_futex : rl::test_suite<test_futex, 2>\n{\n\trl::atomic<int> state;\n\tint wakeres;\n\tint waitres;\n\t\n\tvoid before()\n\t{\n\t\tstate.store(0, rl::memory_order_relaxed);\n\t\twakeres = 0;\n\t\twaitres = 0;\n\t}\n\t\n\tvoid after()\n\t{\n\t\tassert((waitres == 0 && wakeres == 1)\n\t\t\t\t\t || (waitres == EWOULDBLOCK && wakeres == 0)\n\t\t\t\t\t || (waitres == EINTR && wakeres == 0));\n\t}\n\t\n\tvoid thread(unsigned index)\n\t{\n\t\tif (index == 0)\n\t\t{\n\t\t\tstate.store(1, std::memory_order_relaxed);\n\t\t\twakeres = futex(&state, FUTEX_WAKE, 1, 0, 0, 0);\n\t\t}\n\t\telse\n\t\t{\n\t\t\twaitres = EINTR;\n\t\t\twhile (state.load(rl::memory_order_relaxed) == 0)\n\t\t\t{\n\t\t\t\twaitres = futex(&state, FUTEX_WAIT, 0, 0, 0, 0);\n\t\t\t}\n\t\t}\n\t}\n};\n\n\n\n\nstruct test_futex_deadlock : rl::test_suite<test_futex_deadlock, 1, rl::test_result_deadlock>\n{\n\trl::atomic<int> state;\n\t\n\tvoid thread(unsigned index)\n\t{\n\t\tstate.store(0, rl::memory_order_relaxed);\n\t\tint rv = futex(&state, FUTEX_WAIT, 0, 0, 0, 0);\n\t\tassert(rv == EINTR);\n\t}\n};\n\n\n\n\nstruct test_futex_sync1 : rl::test_suite<test_futex_sync1, 2, rl::test_result_until_condition_hit>\n{\n\trl::atomic<int> state;\n\tVAR_T(int) data;\n\t\n\tvoid before()\n\t{\n\t\tstate.store(0, rl::memory_order_relaxed);\n\t\tVAR(data) = 0;\n\t}\n\t\n\tvoid thread(unsigned index)\n\t{\n\t\tif (index == 0)\n\t\t{\n\t\t\tVAR(data) = 1;\n\t\t\tstate.store(1, std::memory_order_release);\n\t\t\tfutex(&state, FUTEX_WAKE, 1, 0, 0, 0);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tint rv = futex(&state, FUTEX_WAIT, 0, 0, 0, 0);\n\t\t\tassert(rv == 0 || rv == EWOULDBLOCK || rv == EINTR);\n\t\t\tif (rv == 0)\n\t\t\t{\n\t\t\t\tassert(VAR(data) == 1);\n\t\t\t\tassert(state.load(rl::memory_order_relaxed) == 1);\n\t\t\t\tRL_UNTIL(true);\n\t\t\t}\n\t\t}\n\t}\n};\n\n\n\n\nstruct test_futex_sync2 : rl::test_suite<test_futex_sync2, 2, rl::test_result_until_condition_hit>\n{\n\trl::atomic<int> state;\n\tVAR_T(int) data;\n\t\n\tvoid before()\n\t{\n\t\tstate.store(0, rl::memory_order_relaxed);\n\t\tVAR(data) = 0;\n\t}\n\t\n\tvoid thread(unsigned index)\n\t{\n\t\tif (index == 0)\n\t\t{\n\t\t\tVAR(data) = 1;\n\t\t\tstate.store(1, std::memory_order_release);\n\t\t\tfutex(&state, FUTEX_WAKE, 1, 0, 0, 0);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tint rv = futex(&state, FUTEX_WAIT, 0, 0, 0, 0);\n\t\t\tassert(rv == 0 || rv == EWOULDBLOCK || rv == EINTR);\n\t\t\tif (rv == EWOULDBLOCK)\n\t\t\t{\n\t\t\t\tassert(VAR(data) == 1);\n\t\t\t\tassert(state.load(rl::memory_order_relaxed) == 1);\n\t\t\t\tRL_UNTIL(true);\n\t\t\t}\n\t\t}\n\t}\n};\n\n\n\n\nstruct test_futex_intr : rl::test_suite<test_futex_intr, 2, rl::test_result_until_condition_hit>\n{\n\trl::atomic<int> state;\n\tVAR_T(int) data;\n\t\n\tvoid before()\n\t{\n\t\tstate.store(0, rl::memory_order_relaxed);\n\t\tVAR(data) = 0;\n\t}\n\t\n\tvoid thread(unsigned index)\n\t{\n\t\tif (index == 0)\n\t\t{\n\t\t\tVAR(data) = 1;\n\t\t\tstate.store(1, std::memory_order_release);\n\t\t\tfutex(&state, FUTEX_WAKE, 1, 0, 0, 0);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tint rv = futex(&state, FUTEX_WAIT, 0, 0, 0, 0);\n\t\t\tassert(rv == 0 || rv == EWOULDBLOCK || rv == EINTR);\n\t\t\tRL_UNTIL(rv == EINTR);\n\t\t}\n\t}\n};\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_all_cygwin_debug.bat",
    "content": "g++ ../../jtest/jtest.cpp -c -o jtest_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../ntest/ntest.cpp -c -o ntest_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/peterson/peterson.cpp -c -o peterson_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/proxy_collector/proxy_collector.cpp -c -o proxy_collector_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/ref_counting/ref_counting.cpp -c -o ref_counting_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/smr/smr.cpp -c -o smr_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/spsc_queue/spsc_queue.cpp -c -o spsc_queue_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/stack/stack.cpp -c -o stack_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/condvar/condvar.cpp -c -o condvar_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/mutex_business_logic/mutex_business_logic.cpp -c -o mutex_business_logic_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/ws_deque/ws_deque.cpp -c -o ws_deque_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/cli_ws_deque/cli_ws_deque.cpp -c -o cli_ws_deque_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../../example/java_ws_deque/java_ws_deque.cpp -c -o java_ws_deque_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\ng++ ../main.cpp -c -o test_debug.exe -D_DEBUG -Wall -DRL_CYGWIN_STUB -march=i686\r\n\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_all_debug.bat",
    "content": "g++ ../../jtest/jtest.cpp -o jtest_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../ntest/ntest.cpp -o ntest_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/peterson/peterson.cpp -o peterson_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/proxy_collector/proxy_collector.cpp -o proxy_collector_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/ref_counting/ref_counting.cpp -o ref_counting_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/smr/smr.cpp -o smr_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/spsc_queue/spsc_queue.cpp -o spsc_queue_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/stack/stack.cpp -o stack_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/condvar/condvar.cpp -o condvar_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/mutex_business_logic/mutex_business_logic.cpp -o mutex_business_logic_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/ws_deque/ws_deque.cpp -o ws_deque_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/cli_ws_deque/cli_ws_deque.cpp -o cli_ws_deque_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../../example/java_ws_deque/java_ws_deque.cpp -o java_ws_deque_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\ng++ ../main.cpp -o test_debug.exe -D_DEBUG -Wall -Wno-deprecated -g\r\n\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_all_release.sh",
    "content": "g++ ../../jtest/jtest.cpp -o jtest_debug.exe -Wall -D_DEBUG\r\ng++ ../../ntest/ntest.cpp -o ntest_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/peterson/peterson.cpp -o peterson_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/proxy_collector/proxy_collector.cpp -o proxy_collector_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/ref_counting/ref_counting.cpp -o ref_counting_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/smr/smr.cpp -o smr_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/spsc_queue/spsc_queue.cpp -o spsc_queue_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/stack/stack.cpp -o stack_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/condvar/condvar.cpp -o condvar_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/mutex_business_logic/mutex_business_logic.cpp -o mutex_business_logic_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/ws_deque/ws_deque.cpp -o ws_deque_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/cli_ws_deque/cli_ws_deque.cpp -o cli_ws_deque_debug.exe -Wall -D_DEBUG\r\ng++ ../../example/java_ws_deque/java_ws_deque.cpp -o java_ws_deque_debug.exe -Wall -D_DEBUG\r\ng++ ../main.cpp -o test_debug.exe -Wall -D_DEBUG\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_cygwin_release.cmd",
    "content": "#!/bin/bash\ng++ ../main.cpp -o test_release.exe -DNDEBUG -DRL_CYGWIN_STUB -Wall -O3\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_debug.cmd",
    "content": "#!/bin/bash\ng++ ../main.cpp -o test_debug.exe -D_DEBUG -D_XOPEN_SOURCE -Wall -Wno-deprecated -g -O0 -fno-inline\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/build_release.cmd",
    "content": "#!/bin/bash\ng++ ../main.cpp -o test_release.exe -DNDEBUG -Wall -O3 -D_XOPEN_SOURCE -Wno-deprecated\n\n\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/g++/test.cpp",
    "content": "//#ifdef _FORTIFY_SOURCE\n//#undef _FORTIFY_SOURCE\n//#endif\n//#define _FORTIFY_SOURCE 0\n\n#include \"../../relacy/pthread.h\"\n\nclass queue_t\n{\npublic:\n    queue_t()\n    {\n        VAR(head) = 0;\n        VAR(tail) = 0;\n        pthread_mutex_init(&mtx, 0);\n        pthread_cond_init(&cv, 0);\n    }\n    \n    ~queue_t()\n    {\n        pthread_mutex_destroy(&mtx);\n        pthread_cond_destroy(&cv);\n    }\n    \n    void enqueue(void* data)\n    {\n        node_t* n = new node_t;\n        n->VAR(next) = 0;\n        n->VAR(data) = data;\n        bool was_empty = false;\n        \n        pthread_mutex_lock(&mtx);\n        if (VAR(head) == 0)\n        {\n            was_empty = true;\n            VAR(head) = n;\n            VAR(tail) = n;\n        }\n        else\n        {\n            VAR(tail)->VAR(next) = n;\n            VAR(tail) = n;\n        }\n        pthread_mutex_unlock(&mtx);\n        \n        if (was_empty)\n            pthread_cond_broadcast(&cv);\n    }\n    \n    void* dequeue()\n    {\n        node_t* n = 0;\n        \n        pthread_mutex_lock(&mtx);\n        while (VAR(head) == 0)\n            pthread_cond_wait(&cv, &mtx);\n        n = VAR(head);\n        if (n->VAR(next) == 0)\n            VAR(tail) = 0;\n        VAR(head) = n->VAR(next);\n        pthread_mutex_unlock(&mtx);\n        \n        void* data = n->VAR(data);\n        delete n;\n        return data;\n    }\n    \nprivate:\n    struct node_t\n    {\n        VAR_T(node_t*) next;\n        VAR_T(void*) data;\n    };\n    \n    VAR_T(node_t*) head;\n    VAR_T(node_t*) tail;\n    \n    pthread_mutex_t mtx;\n    pthread_cond_t cv;\n};\n\nvoid* enqueue_thread(void* ctx)\n{\n    queue_t* q = static_cast<queue_t*>(ctx);\n    for (size_t i = 0; i != 4; i += 1)\n        q->enqueue((void*)(i + 1));\n    return 0;\n}\n\nvoid* dequeue_thread(void* ctx)\n{\n    queue_t* q = static_cast<queue_t*>(ctx);\n    for (size_t i = 0; i != 4; i += 1)\n    {\n        void* data = q->dequeue();\n        assert((int)(uintptr_t)data >= 1 && (int)(uintptr_t)data <= 4);\n    }\n    return 0;\n}\n\nvoid queue_test()\n{\n    queue_t q;\n    \n    pthread_t th [4];\n    for (size_t i = 0; i != 2; i += 1)\n        pthread_create(&th[i], 0, enqueue_thread, &q);\n    for (size_t i = 2; i != 4; i += 1)\n        pthread_create(&th[i], 0, dequeue_thread, &q);\n    \n    void* res = 0;\n    for (size_t i = 0; i != 4; i += 1)\n        pthread_join(th[i], &res);\n}\n\nint main()\n{\n    rl::test_params p;\n    p.iteration_count = 100000;\n    //p.search_type = rl::sched_full;\n    //p.context_bound = 5;\n    //p.execution_depth_limit = 200;\n    rl::execute<queue_test, 4>(p);\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/jtest.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n#include \"../relacy/relacy_java.hpp\"\r\n\r\n\r\n\r\n\r\n\r\nclass stack\r\n{\r\npublic:\r\n    stack()\r\n        : head_(0)\r\n    {\r\n    }\r\n\r\n    void push(int data)\r\n    {\r\n        rl::var<node*> n = new node ();\r\n        VAR(n)->VAR(data_) = data;\r\n        node* next = head_.load(rl::memory_order_relaxed);\r\n        for (;;)\r\n        {\r\n            VAR(n)->next_.store(next, rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(next, VAR(n), rl::memory_order_release))\r\n                break;\r\n        }\r\n    }\r\n\r\n    int pop()\r\n    {\r\n        node* n = head_.load(rl::memory_order_acquire);\r\n        for (;;)\r\n        {\r\n            if (0 == n)\r\n                break;\r\n            node* next = n->next_.load(rl::memory_order_relaxed);\r\n            if (head_.compare_exchange_weak(n, next, rl::memory_order_acquire))\r\n                break;\r\n        }\r\n        if (n)\r\n        {\r\n            int data = n->VAR(data_);\r\n            return data;\r\n        }\r\n        else\r\n        {\r\n            return 0;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    struct node\r\n    {\r\n        rl::atomic<node*> next_;\r\n        rl::var<int> data_;\r\n    };\r\n\r\n    rl::atomic<node*> head_;\r\n\r\n    stack(stack const&);\r\n    stack& operator = (stack const&);\r\n};\r\n\r\n\r\n\r\n\r\nstruct stack_test : rl::test_suite<stack_test, 4>\r\n{\r\n    stack s_;\r\n\r\n    int produced_count_;\r\n    int consumed_count_;\r\n\r\n    void before()\r\n    {\r\n        produced_count_ = 0;\r\n        consumed_count_ = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        typedef rl::test_suite<stack_test, 4> base_t;\r\n        RL_ASSERT(base_t::params::thread_count == produced_count_);\r\n        RL_ASSERT(base_t::params::thread_count == consumed_count_);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        s_.push(rand() + 1);\r\n        produced_count_ += 1;\r\n        int data = s_.pop();\r\n        RL_ASSERT(data);\r\n        consumed_count_ += 1;\r\n    }\r\n};\r\n\r\n\r\nstruct test_api : rl::test_suite<test_api, 1>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        rl::jvolatile<int> jv1;\r\n        rl::jvolatile<int> jv2 (2);\r\n        rl::jvolatile<int> jv3 (jv2($));\r\n        rl::jvolatile<int> jv4 (jv1);\r\n        jv1($) = jv3($);\r\n        jv1($) = 2;\r\n        (int)jv1($);\r\n        jv1($) += 1;\r\n        jv1($) -= 1;\r\n        int x = jv1($)++;\r\n        x = jv1($)--;\r\n        x = --jv1($);\r\n        x = ++jv1($);\r\n\r\n        rl::AtomicInteger ai, ai2(1), ai3(x), ai4(ai($)), ai5(ai);\r\n        x = ai($).get();\r\n        ai($).set(1);\r\n        x = ai($).addAndGet(2);\r\n        bool b = ai($).compareAndSet(1, 2);\r\n        (void)b;\r\n        x = ai($).addAndGet(2);\r\n        x = ai($).getAndSet(2);\r\n    }\r\n};\r\n\r\nstruct test_seq_cst_volatiles : rl::test_suite<test_seq_cst_volatiles, 2>\r\n{\r\n    rl::jvolatile<int> flag0;\r\n    rl::jvolatile<int> flag1;\r\n    rl::jvolatile<int> turn;\r\n\r\n    rl::var<int> data;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            flag0($) = 1;\r\n            turn($) = 1;\r\n            while (flag1($) && 1 == turn($))\r\n                rl::yield(1, $);\r\n            data($) = 1;\r\n            flag0($) = 0;\r\n        }\r\n        else\r\n        {\r\n            flag1($) = 1;\r\n            turn($) = 0;\r\n            while (flag0($) && 0 == turn($))\r\n                rl::yield(1, $);\r\n            data($) = 2;\r\n            flag1($) = 0;\r\n        }\r\n    }\r\n};\r\n\r\nstruct test_seq_cst_volatiles2 : rl::test_suite<test_seq_cst_volatiles2, 4>\r\n{\r\n    rl::jvolatile<int> x;\r\n    rl::jvolatile<int> y;\r\n\r\n    int r1, r2, r3, r4;\r\n\r\n    void before()\r\n    {\r\n        r1 = r2 = r3 = r4 = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x($) = 0;\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            y($) = 0;\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            r1 = x($);\r\n            r2 = y($);\r\n        }\r\n        else if (3 == index)\r\n        {\r\n            r3 = y($);\r\n            r4 = x($);\r\n        }\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(false == (r1 && !r2 && r3 && !r4));\r\n    }\r\n};\r\n\r\ntemplate<int expected>\r\nstruct test_unitialized_var : rl::test_suite<test_unitialized_var<expected>, 2, rl::test_result_until_condition_hit>\r\n{\r\n    rl::jvar<rl::jvar<int>*> www;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            www($) = new rl::jvar<int> (1);\r\n        }\r\n        else\r\n        {\r\n            while (0 == www($))\r\n                rl::yield(1, $);\r\n            int x = (*www($))($);\r\n            RL_UNTIL(x == expected);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nint main()\r\n{\r\n    rl::simulate_f tests[] = \r\n    {\r\n        //!!! broken &rl::simulate<test_unitialized_var<0> >,\r\n        &rl::simulate<test_unitialized_var<1> >,\r\n        &rl::simulate<test_seq_cst_volatiles>,\r\n        &rl::simulate<test_seq_cst_volatiles2>,\r\n        &rl::simulate<test_api>,\r\n        &rl::simulate<stack_test>,\r\n    };\r\n\r\n    for (size_t i = 0; i != sizeof(tests)/sizeof(*tests); ++i)\r\n    {\r\n        rl::ostringstream stream;\r\n        rl::test_params params;\r\n        params.iteration_count = 10000;\r\n        params.output_stream = &stream;\r\n        params.progress_stream = &stream;\r\n        params.context_bound = 2;\r\n        params.execution_depth_limit = 500;\r\n\r\n        if (false == tests[i](params))\r\n        {\r\n            std::cout << std::endl;\r\n            std::cout << \"FAILED\" << std::endl;\r\n            std::cout << stream.str();\r\n            return 1;\r\n        }\r\n        else\r\n        {\r\n            std::cout << params.test_name << \"...OK\" << std::endl;\r\n        }\r\n    }\r\n\r\n    std::cout << std::endl << \"SUCCESS\" << std::endl;\r\n}\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/msvc8/jtest.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"jtest\", \"jtest.vcproj\", \"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"..\\..\\test\\msvc8\\rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tDebug|x64 = Debug|x64\r\n\t\tDebug64|Win32 = Debug64|Win32\r\n\t\tDebug64|x64 = Debug64|x64\r\n\t\tRelease|Win32 = Release|Win32\r\n\t\tRelease|x64 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|x64.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|x64.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|x64.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.ActiveCfg = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.Build.0 = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.ActiveCfg = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.Build.0 = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.Build.0 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/msvc8/jtest.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"jtest\"\r\n\tProjectGUID=\"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\n\tRootNamespace=\"jtest\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\jtest.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/msvc9/jtest.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"jtest\", \"jtest.vcproj\", \"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"..\\..\\test\\msvc9\\rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tDebug|x64 = Debug|x64\r\n\t\tDebug64|Win32 = Debug64|Win32\r\n\t\tDebug64|x64 = Debug64|x64\r\n\t\tRelease|Win32 = Release|Win32\r\n\t\tRelease|x64 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|x64.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug64|x64.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|x64.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.ActiveCfg = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.Build.0 = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.ActiveCfg = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.Build.0 = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.Build.0 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/msvc9/jtest.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"jtest\"\r\n\tProjectGUID=\"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\n\tRootNamespace=\"jtest\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"131072\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\jtest.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/jtest/stdafx.h",
    "content": "#pragma once\r\n\r\n//#define RL_JAVA_MODE\r\n//#define RL_MSVC_OUTPUT\r\n\r\n#include \"../relacy/pch.hpp\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/main.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n//#define RL_MSVC_OUTPUT\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n#include \"memory_order.hpp\"\r\n#include \"fence.hpp\"\r\n#include \"data_race.hpp\"\r\n#include \"mutex.hpp\"\r\n#include \"condvar.hpp\"\r\n#include \"semaphore.hpp\"\r\n#include \"event.hpp\"\r\n#include \"scheduler.hpp\"\r\n#include \"compare_swap.hpp\"\r\n#include \"wfmo.hpp\"\r\n#include \"thread_local.hpp\"\r\n#include \"dyn_thread.hpp\"\r\n#include \"memory.hpp\"\r\n#include \"pthread.hpp\"\r\n#include \"windows.hpp\"\r\n#include \"addr_hash.hpp\"\r\n#include \"futex.hpp\"\r\n\r\n#include \"../relacy/windows.h\"\r\n#include \"../relacy/pthread.h\"\r\n\r\n#include <cstdio>\r\n#include <climits>\r\n\r\nclass queue_t\r\n{\r\npublic:\r\n    queue_t()\r\n    {\r\n        VAR(head) = 0;\r\n        VAR(tail) = 0;\r\n        pthread_mutex_init(&mtx, 0);\r\n        pthread_cond_init(&cv, 0);\r\n    }\r\n\r\n    ~queue_t()\r\n    {\r\n        pthread_mutex_destroy(&mtx);\r\n        pthread_cond_destroy(&cv);\r\n    }\r\n\r\n    void enqueue(void* data)\r\n    {\r\n        node_t* n = new node_t;\r\n        n->VAR(next) = 0;\r\n        n->VAR(data) = data;\r\n        bool was_empty = false;\r\n\r\n        pthread_mutex_lock(&mtx);\r\n        if (VAR(head) == 0)\r\n        {\r\n            was_empty = true;\r\n            VAR(head) = n;\r\n            VAR(tail) = n;\r\n        }\r\n        else\r\n        {\r\n            VAR(tail)->VAR(next) = n;\r\n            VAR(tail) = n;\r\n        }\r\n        pthread_mutex_unlock(&mtx);\r\n\r\n        if (was_empty)\r\n            pthread_cond_broadcast(&cv);\r\n    }\r\n\r\n    void* dequeue()\r\n    {\r\n        node_t* n = 0;\r\n\r\n        pthread_mutex_lock(&mtx);\r\n        while (VAR(head) == 0)\r\n            pthread_cond_wait(&cv, &mtx);\r\n        n = VAR(head);\r\n        if (n->VAR(next) == 0)\r\n            VAR(tail) = 0;\r\n        VAR(head) = n->VAR(next);\r\n        pthread_mutex_unlock(&mtx);\r\n\r\n        void* data = n->VAR(data);\r\n        delete n;\r\n        return data;\r\n    }\r\n\r\nprivate:\r\n    struct node_t\r\n    {\r\n        VAR_T(node_t*) next;\r\n        VAR_T(void*) data;\r\n    };\r\n\r\n    VAR_T(node_t*) head;\r\n    VAR_T(node_t*) tail;\r\n\r\n    pthread_mutex_t mtx;\r\n    pthread_cond_t cv;\r\n};\r\n\r\nvoid* enqueue_thread(void* ctx)\r\n{\r\n    queue_t* q = static_cast<queue_t*>(ctx);\r\n    for (size_t i = 0; i != 4; i += 1)\r\n        q->enqueue((void*)(i + 1));\r\n    return 0;\r\n}\r\n\r\nvoid* dequeue_thread(void* ctx)\r\n{\r\n    queue_t* q = static_cast<queue_t*>(ctx);\r\n    for (size_t i = 0; i != 4; i += 1)\r\n    {\r\n        void* data = q->dequeue();\r\n        assert((int)(uintptr_t)data >= 1 && (int)(uintptr_t)data <= 4);\r\n    }\r\n    return 0;\r\n}\r\n\r\nvoid queue_test()\r\n{\r\n    queue_t q;\r\n\r\n    pthread_t th [4];\r\n    for (size_t i = 0; i != 2; i += 1)\r\n        pthread_create(&th[i], 0, enqueue_thread, &q);\r\n    for (size_t i = 2; i != 4; i += 1)\r\n        pthread_create(&th[i], 0, dequeue_thread, &q);\r\n\r\n    void* res = 0;\r\n    for (size_t i = 0; i != 4; i += 1)\r\n        pthread_join(th[i], &res);\r\n}\r\n\r\n/*\r\nclass recursive_timed_mutex\r\n{\r\npublic:\r\n    recursive_timed_mutex()\r\n    {\r\n        sema.init(false, 1, 1, $);\r\n        owner = -1;\r\n        recursion_count = 0;\r\n    }\r\n\r\n    ~recursive_timed_mutex()\r\n    {\r\n        assert(owner == -1 && recursion_count == 0);\r\n        sema.deinit($);\r\n    }\r\n    \r\n    void lock(rl::debug_info_param info)\r\n    {\r\n        rl::context& c = rl::ctx();\r\n        if (owner == c.current_thread())\r\n        {\r\n            RL_HIST(rl::user_msg_event) {\"recursive mutex lock\"} RL_HIST_END();\r\n            assert(recursion_count > 0);\r\n            recursion_count += 1;\r\n        }\r\n        else\r\n        {\r\n            sema.wait(false, false, info);\r\n            assert(owner == -1 && recursion_count == 0);\r\n            owner = c.current_thread();\r\n            recursion_count = 1;\r\n        }\r\n    }\r\n\r\n    bool try_lock(rl::debug_info_param info)\r\n    {\r\n        rl::context& c = rl::ctx();\r\n        if (owner == c.current_thread())\r\n        {\r\n            RL_HIST(rl::user_msg_event) {\"recursive mutex try lock\"} RL_HIST_END();\r\n            assert(recursion_count > 0);\r\n            recursion_count += 1;\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            rl::sema_wakeup_reason r = sema.wait(true, false, info);\r\n            if (r == rl::sema_wakeup_reason_success)\r\n            {\r\n                assert(owner == -1 && recursion_count == 0);\r\n                owner = c.current_thread();\r\n                recursion_count = 1;\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\n    void unlock(rl::debug_info_param info)\r\n    {\r\n        rl::context& c = rl::ctx();\r\n        assert(owner == c.current_thread() && recursion_count > 0);\r\n        RL_HIST(rl::user_msg_event) {\"recursive mutex unlock\"} RL_HIST_END();\r\n        recursion_count -= 1;\r\n        if (recursion_count == 0)\r\n        {\r\n            owner = -1;\r\n            unsigned prev;\r\n            sema.post(1, prev, info);\r\n        }\r\n    }\r\n\r\n    bool timed_lock(rl::debug_info_param info, ... )\r\n    {\r\n        rl::context& c = rl::ctx();\r\n        if (owner == c.current_thread())\r\n        {\r\n            RL_HIST(rl::user_msg_event) {\"recursive mutex timed lock\"} RL_HIST_END();\r\n            assert(recursion_count > 0);\r\n            recursion_count += 1;\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            rl::sema_wakeup_reason r = sema.wait(false, true, info);\r\n            if (r == rl::sema_wakeup_reason_success)\r\n            {\r\n                assert(owner == -1 && recursion_count == 0);\r\n                owner = c.current_thread();\r\n                recursion_count = 1;\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\nprivate:\r\n    struct tag_t;\r\n    rl::semaphore<tag_t> sema;\r\n    rl::thread_id_t owner;\r\n    int recursion_count;\r\n\r\n    recursive_timed_mutex(recursive_timed_mutex const&);\r\n    recursive_timed_mutex& operator = (recursive_timed_mutex const&);\r\n};\r\n*/\r\n\r\nclass recursive_timed_mutex\r\n{\r\npublic:\r\n    recursive_timed_mutex()\r\n    {\r\n        mtx = CreateMutex(0, 0, 0);\r\n    }\r\n\r\n    ~recursive_timed_mutex()\r\n    {\r\n        CloseHandle(mtx);\r\n    }\r\n    \r\n    void lock(rl::debug_info_param info)\r\n    {\r\n        rl::rl_WaitForSingleObject(mtx, INFINITE, info);\r\n    }\r\n\r\n    bool try_lock(rl::debug_info_param info)\r\n    {\r\n        return WAIT_OBJECT_0 == rl::rl_WaitForSingleObject(mtx, 0, info);\r\n    }\r\n\r\n    void unlock(rl::debug_info_param info)\r\n    {\r\n        rl::rl_ReleaseMutex(mtx, info);\r\n    }\r\n\r\n    bool timed_lock(rl::debug_info_param info, ... /*abs_time*/)\r\n    {\r\n        return WAIT_OBJECT_0 == rl::rl_WaitForSingleObject(mtx, 1, info);\r\n    }\r\n\r\nprivate:\r\n    HANDLE mtx;\r\n\r\n    recursive_timed_mutex(recursive_timed_mutex const&);\r\n    recursive_timed_mutex& operator = (recursive_timed_mutex const&);\r\n};\r\n\r\n\r\n\r\nstruct recursive_timed_mutex_test : rl::test_suite<recursive_timed_mutex_test, 3>\r\n{\r\n    recursive_timed_mutex mtx;\r\n    VAR_T(int) data;\r\n\r\n    void thread(unsigned idx)\r\n    {\r\n        if (idx)\r\n        {\r\n            mtx.lock($);\r\n            mtx.lock($);\r\n            VAR(data) = 1;\r\n            mtx.unlock($);\r\n            mtx.unlock($);\r\n        }\r\n        else\r\n        {\r\n            if (mtx.timed_lock($))\r\n            {\r\n                VAR(data) = 2;\r\n                mtx.unlock($);\r\n            }\r\n        }\r\n    }\r\n\r\n    void after()\r\n    {\r\n        //assert(VAR(data) != 2);\r\n    }\r\n};\r\n\r\n\r\nint main()\r\n{\r\n    //rl::test_params p;\r\n    //p.search_type = rl::sched_full;\r\n    //p.context_bound = 5;\r\n    //p.execution_depth_limit = 200;\r\n    //rl::simulate<test_pthread_condvar>(p);\r\n    //if (rand() <= RAND_MAX) return 0;\r\n\r\n    //rl::execute<queue_test, 4>();\r\n    //if (rand() <= RAND_MAX) return 0;\r\n\r\n    //rl::test_params p;\r\n    //p.initial_state = \"1000000\";\r\n    //p.iteration_count = 2000000;\r\n    //p.collect_history = true;\r\n    //p.output_history = true;\r\n    //p.search_type = rl::sched_bound;\r\n    //p.search_type = rl::sched_full;\r\n    //p.execution_depth_limit = 500;\r\n    //p.context_bound = 1;\r\n    //rl::simulate<test_pthread_condvar>(p);\r\n    //std::cout << \"scheduler state = \\\"\" << p.final_state << \"\\\"\" << std::endl;\r\n    //std::cout << std::endl;\r\n    //if (rand() <= RAND_MAX) return 0;\r\n\r\n    //rl::test_params p;\r\n    //p.iteration_count = 80000000;\r\n    //p.initial_state = \"50000000\";\r\n    //p.search_type = rl::fair_context_bound_scheduler_type;\r\n    //p.context_bound = 1;\r\n    //p.collect_history = true;\r\n    //p.output_history = true;\r\n    //rl::simulate<test>(p);\r\n    //if (rand() <= RAND_MAX) return 0;\r\n\r\n    //rl::test_params p;\r\n    //p.context_bound = 1;\r\n    //p.iteration_count = 1000;\r\n    //p.search_type = rl::fair_full_search_scheduler_type;\r\n    //p.search_type = rl::random_scheduler_type;\r\n    //p.collect_history = true;\r\n    //p.output_history = true;\r\n    //p.execution_depth_limit = 1000;\r\n    //p.initial_state = \"550 24 3 0 0 3 0 0 3 0 0 3 0 0 2 0 4 2 0 0 2 0 4 2 1 0 2 0 4 3 1 0 3 0 0 2 0 0 1 0 4 2 0 4 3 0 0 3 0 0 2 0 4 3 1 0 3 0 0 2 1 0 2 0 4 2 1 0 2 1 0 2 1 4\";\r\n    //bool result = rl::simulate<test>(p);\r\n    //std::cout << \"result=\" << result << std::endl;\r\n    //simulate<my_test>();\r\n    //if (rand() <= RAND_MAX) return 0;\r\n\r\n    rl::simulate_f tests[] = \r\n    {\r\n#if 1\r\n        &rl::simulate<test_FlushProcessWriteBuffers>,\r\n\r\n        &rl::simulate<test_addr_hash>,\r\n        &rl::simulate<test_addr_hash2>,\r\n        //!!! fails &rl::simulate<sched_load_test>,\r\n        &rl::simulate<test_memory_allocation>,\r\n\r\n        // memory model\r\n        &rl::simulate<test_pthread_thread>,\r\n        &rl::simulate<test_pthread_mutex>,\r\n        &rl::simulate<test_pthread_rwlock>,\r\n        &rl::simulate<test_pthread_condvar>,\r\n        &rl::simulate<test_pthread_condvar2>,\r\n        &rl::simulate<test_pthread_sem>,\r\n        \r\n        &rl::simulate<order_relaxed_test<0> >,\r\n        &rl::simulate<order_relaxed_test<1> >,\r\n        &rl::simulate<order_relaxed_test<2> >,\r\n        &rl::simulate<order_relaxed_test<3> >,\r\n        &rl::simulate<order_relaxed_test<4> >,\r\n        &rl::simulate<reorder_single_var_test>,\r\n        &rl::simulate<acq_rel_test>,\r\n\r\n        &rl::simulate<seq_cst_test<0> >,\r\n        &rl::simulate<seq_cst_test<1> >,\r\n\r\n        &rl::simulate<reordering_test>,\r\n        &rl::simulate<reordering_test2>,\r\n\r\n        &rl::simulate<test_win_thread>,\r\n        &rl::simulate<test_win_mutex>,\r\n        &rl::simulate<test_win_cs>,\r\n        &rl::simulate<test_win_condvar>,\r\n        &rl::simulate<test_win_condvar_srw>,\r\n        &rl::simulate<test_win_sem>,\r\n        &rl::simulate<test_win_event>,\r\n\r\n        &rl::simulate<modification_order_test>,\r\n        &rl::simulate<transitive_test>,\r\n        &rl::simulate<cc_transitive_test>,\r\n        &rl::simulate<occasional_test>,\r\n\r\n        // fences\r\n        &rl::simulate<fence_synch_test<0, 0> >,\r\n        &rl::simulate<fence_synch_test<1, 0> >,\r\n        &rl::simulate<fence_synch_test<2, 0> >,\r\n        &rl::simulate<fence_synch_test<0, 1> >,\r\n        &rl::simulate<fence_synch_test<1, 1> >,\r\n        &rl::simulate<fence_synch_test<2, 1> >,\r\n  \r\n        &rl::simulate<two_fence_synch_test>,\r\n        &rl::simulate<seq_cst_fence_test<0> >,\r\n        &rl::simulate<seq_cst_fence_test<1> >,\r\n\r\n        // data races\r\n        &rl::simulate<race_ld_ld_test>,\r\n        &rl::simulate<race_ld_st_test>,\r\n        &rl::simulate<race_st_st_test>,\r\n\r\n        &rl::simulate<race_seq_ld_ld_test>,\r\n        &rl::simulate<race_seq_ld_st_test>,\r\n        &rl::simulate<race_seq_st_ld_test>,\r\n        &rl::simulate<race_seq_st_st_test>,\r\n\r\n        &rl::simulate<race_uninit_test>,\r\n        &rl::simulate<race_indirect_test>,\r\n\r\n        // compare_exchange\r\n        &rl::simulate<cas_spurious_fail_test<0> >,\r\n        &rl::simulate<cas_spurious_fail_test<1> >,\r\n        &rl::simulate<cas_spurious_fail_test<2> >,\r\n\r\n        // mutex\r\n        &rl::simulate<test_deadlock>,\r\n        &rl::simulate<test_deadlock2>,\r\n        &rl::simulate<test_mutex_destuction>,\r\n        &rl::simulate<test_mutex_destuction2>,\r\n        &rl::simulate<test_mutex_recursion>,\r\n        &rl::simulate<test_mutex_recursion_error>,\r\n        &rl::simulate<test_mutex_unlock_error>,\r\n        &rl::simulate<test_mutex_leak>,\r\n        &rl::simulate<test_mutex>,\r\n        &rl::simulate<test_mutex_try_lock>,\r\n\t\r\n        // futex\r\n        &rl::simulate<test_futex>,\r\n        &rl::simulate<test_futex_deadlock>,\r\n        &rl::simulate<test_futex_sync1>,\r\n        &rl::simulate<test_futex_sync2>,\r\n        &rl::simulate<test_futex_intr>,\r\n\r\n        // condition variable\r\n        &rl::simulate<test_condvar>,\r\n        &rl::simulate<test_condvar2>,\r\n\r\n        // semaphore\r\n        &rl::simulate<test_semaphore>,\r\n        &rl::simulate<test_semaphore_atomic>,\r\n\r\n        // event\r\n        &rl::simulate<test_event_auto>,\r\n        &rl::simulate<test_event_manual>,\r\n        &rl::simulate<test_event_atomic>,\r\n\r\n        //wfmo\r\n        &rl::simulate<test_wfmo_all>,\r\n        &rl::simulate<test_wfmo_single>,\r\n        &rl::simulate<test_wfmo_timeout>,\r\n        &rl::simulate<test_wfmo_try>,\r\n        &rl::simulate<test_wfmo_mixed>,\r\n        &rl::simulate<test_wfmo_mixed2>,\r\n        &rl::simulate<test_wfmo_event_all>,\r\n        &rl::simulate<test_wfmo_event_any>,\r\n        &rl::simulate<test_wfmo_atomic>,\r\n\r\n        // thread local storage\r\n        &rl::simulate<tls_basic_test>,\r\n        &rl::simulate<tls_reset_test>,\r\n        &rl::simulate<tls_global_test>,\r\n        &rl::simulate<tls_win32_test>,\r\n\r\n        // dynamic thread\r\n        &rl::simulate<dyn_thread_basic_test>,\r\n        &rl::simulate<dyn_thread_win32_test>,\r\n        &rl::simulate<dyn_thread_visibility_test>,\r\n#endif\r\n    };\r\n\r\n    for (size_t sched = 0; sched != rl::sched_count; ++sched)\r\n    {\r\n        std::cout << format((rl::scheduler_type_e)sched) << \" tests:\" << std::endl;\r\n\r\n        for (size_t i = 0; i != sizeof(tests)/sizeof(*tests); ++i)\r\n        {\r\n            //!!! make it work under sched_full\r\n            if (sched == rl::sched_full\r\n                && (tests[i] == (rl::simulate_f)&rl::simulate<test_pthread_condvar>\r\n                    || tests[i] == (rl::simulate_f)&rl::simulate<test_win_condvar>))\r\n                continue;\r\n\r\n            rl::ostringstream stream;\r\n            rl::test_params params;\r\n            params.search_type = (rl::scheduler_type_e)sched;\r\n            params.iteration_count =\r\n                (params.test_result == rl::test_result_success ? 100000 : 500);\r\n            params.output_stream = &stream;\r\n            params.progress_stream = &stream;\r\n            params.context_bound = 2;\r\n            params.execution_depth_limit = 500;\r\n\r\n            if (false == tests[i](params))\r\n            {\r\n                std::cout << std::endl;\r\n                std::cout << \"FAILED\" << std::endl;\r\n                std::cout << stream.str();\r\n                std::cout << std::endl;\r\n                return 1;\r\n            }\r\n            else\r\n            {\r\n                std::cout << params.test_name << \"...OK\" << std::endl;\r\n            }\r\n        }\r\n        std::cout << std::endl;\r\n    }\r\n\r\n    rl::simulate_f scheduler_tests[] = \r\n    {\r\n        &rl::simulate<livelock_test>,\r\n        &rl::simulate<yield_livelock_test>,\r\n    };\r\n\r\n    std::cout << \"full search scheduler tests:\" << std::endl;\r\n    for (size_t i = 0; i != sizeof(scheduler_tests)/sizeof(*scheduler_tests); ++i)\r\n    {\r\n        rl::ostringstream stream;\r\n        rl::test_params params;\r\n        params.search_type = rl::sched_full;\r\n        params.output_stream = &stream;\r\n        params.progress_stream = &stream;\r\n        params.context_bound = 2;\r\n        params.execution_depth_limit = 500;\r\n\r\n        if (false == scheduler_tests[i](params))\r\n        {\r\n            std::cout << std::endl;\r\n            std::cout << \"FAILED\" << std::endl;\r\n            std::cout << stream.str();\r\n            return 1;\r\n        }\r\n        else\r\n        {\r\n            std::cout << params.test_name << \"...OK\" << std::endl;\r\n        }\r\n    }\r\n    std::cout << std::endl;\r\n\r\n    std::cout << \"SUCCESS\" << std::endl;\r\n}\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/memory.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\nstruct test_memory_allocation : rl::test_suite<test_memory_allocation, 2>\r\n{\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        VAR_T(int)* p1 = new VAR_T(int) (5), i1 = 5, * p11 = new VAR_T(int) (6);\r\n        VAR(p1[0]) = 1;\r\n        delete p1, delete p11;\r\n\r\n        VAR_T(int)* p2 = new VAR_T(int) [10], i2 = 6, *p22 = new VAR_T(int) [20];\r\n        VAR(p2[0]) = 1;\r\n        delete [] p2, delete [] p22;\r\n\r\n        void* p3 = malloc(10), *i3 = 0, *p33 = malloc(20);\r\n        free(p3), free(p33);\r\n\r\n        void* p4 = malloc(sizeof(int));\r\n        int* i4 = new (p4) int (11);\r\n        free(p4);\r\n\r\n        //RL_ASSERT(false);\r\n        (void)i1, (void)i2, (void)i3; (void)i4;\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/memory_order.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\ntemplate<int index>\r\nstruct order_relaxed_test : rl::test_suite<order_relaxed_test<index>, 2>\r\n{\r\n    std::atomic<int> x1;\r\n    std::atomic<int> x2;\r\n\r\n    void before()\r\n    {\r\n        x1($) = 0;\r\n        x2($) = 0;\r\n    }\r\n\r\n    void thread(unsigned th)\r\n    {\r\n        if (th)\r\n        {\r\n            x1.store(1, order().first, $);\r\n            x2.store(1, order().first, $);\r\n        }\r\n        else\r\n        {\r\n            int y2 = x2.load(order().second, $);\r\n            int y1 = x1.load(order().second, $);\r\n            //RL_UNTIL(0 == y1 && 0 != y2);\r\n            (void)y2;\r\n            (void)y1;\r\n        }\r\n    }\r\n\r\n    std::pair<rl::memory_order, rl::memory_order> order()\r\n    {\r\n        switch (index)\r\n        {\r\n        default: RL_VERIFY(false);\r\n        case 0: return std::make_pair(rl::mo_relaxed, rl::mo_relaxed);\r\n        case 1: return std::make_pair(rl::mo_release, rl::mo_relaxed);\r\n        case 2: return std::make_pair(rl::mo_seq_cst, rl::mo_relaxed);\r\n        case 3: return std::make_pair(rl::mo_relaxed, rl::mo_acquire);\r\n        case 4: return std::make_pair(rl::mo_relaxed, rl::mo_seq_cst);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct reorder_single_var_test : rl::test_suite<reorder_single_var_test, 2>\r\n{\r\n    std::atomic<int> x;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x.store(1, rl::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            int y1 = x.load(rl::memory_order_relaxed);\r\n            int y2 = x.load(rl::memory_order_relaxed);\r\n            RL_ASSERT(y1 == 0 || y2 == 1);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct acq_rel_test : rl::test_suite<acq_rel_test, 2>\r\n{\r\n    std::atomic<int> x;\r\n    rl::var<int> y;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            VAR(y) = 1;\r\n            x.store(1, std::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            int f = x.load(rl::memory_order_acquire);\r\n            if (f)\r\n            {\r\n                int d = VAR(y);\r\n                RL_ASSERT(1 == d);\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\ntemplate<int index>\r\nstruct seq_cst_test : rl::test_suite<seq_cst_test<index>, 4, \r\n    (rl::test_result_e)((1 - index) * rl::test_result_until_condition_hit)>\r\n{\r\n    std::atomic<int> x1;\r\n    std::atomic<int> x2;\r\n\r\n    int res;\r\n\r\n    void before()\r\n    {\r\n        x1($) = 0;\r\n        x2($) = 0;\r\n        res = 0;\r\n    }\r\n\r\n    void thread(unsigned th)\r\n    {\r\n        if (0 == th)\r\n        {\r\n            x1.store(1, order().first, $);\r\n        }\r\n        else if (1 == th)\r\n        {\r\n            x2.store(1, order().first, $);\r\n        }\r\n        else if (2 == th)\r\n        {\r\n            int v1 = x1.load(order().second, $);\r\n            int v2 = x2.load(order().second, $);\r\n            res += (v1 == 1 && v2 == 0);\r\n        }\r\n        else if (3 == th)\r\n        {\r\n            int v2 = x2.load(order().second, $);\r\n            int v1 = x1.load(order().second, $);\r\n            res += (v2 == 1 && v1 == 0);\r\n        }\r\n    }\r\n\r\n    void after()\r\n    {\r\n        if ((void)0, 0 == index)\r\n        {\r\n            RL_UNTIL(2 == res);\r\n        }\r\n        else\r\n        {\r\n            RL_ASSERT(2 != res);\r\n        }\r\n    }\r\n\r\n    std::pair<rl::memory_order, rl::memory_order> order()\r\n    {\r\n        switch (index)\r\n        {\r\n        default: RL_VERIFY(false);\r\n        case 0: return std::make_pair(rl::mo_release, rl::mo_acquire);\r\n        case 1: return std::make_pair(rl::mo_seq_cst, rl::mo_seq_cst);\r\n        }\r\n    }\r\n};\r\n\r\nstruct modification_order_test : rl::test_suite<modification_order_test, 2>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x($) = 1;\r\n            a.store(1, rl::memory_order_release);\r\n            a.store(2, rl::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            if (a.load(rl::memory_order_acquire))\r\n                x($).load();\r\n        }\r\n    }\r\n};\r\n\r\nstruct reordering_test : rl::test_suite<reordering_test, 3>\r\n{\r\n    std::atomic<int> x;\r\n    std::atomic<int> y;\r\n    std::atomic<int> r;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n        y($) = 0;\r\n        r($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x.store(1, rl::memory_order_relaxed);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            if (x.load(rl::memory_order_relaxed))\r\n                r.store(1, rl::memory_order_relaxed);\r\n            y.store(1, rl::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            if (y.load(rl::memory_order_acquire))\r\n            {\r\n                if (r.load(rl::memory_order_relaxed))\r\n                {\r\n                    RL_ASSERT(x.load(rl::memory_order_relaxed));\r\n                }\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\nstruct reordering_test2 : rl::test_suite<reordering_test2, 3, rl::test_result_until_condition_hit>\r\n{\r\n    std::atomic<int> x1;\r\n    std::atomic<int> x2;\r\n    std::atomic<int> y;\r\n    std::atomic<int> r;\r\n\r\n    void before()\r\n    {\r\n        std::atomic<char*> x (0);\r\n        char* ch = 0;\r\n        x.compare_exchange_weak(ch, 0, std::memory_order_seq_cst);\r\n\r\n        x1($) = 0;\r\n        x2($) = 0;\r\n        y($) = 0;\r\n        r($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x1.store(1, rl::memory_order_relaxed);\r\n            x2.store(1, rl::memory_order_relaxed);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            if (x2.load(rl::memory_order_relaxed))\r\n                r.store(1, rl::memory_order_relaxed);\r\n            y.store(1, rl::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            if (y.load(rl::memory_order_acquire))\r\n            {\r\n                if (r.load(rl::memory_order_relaxed))\r\n                {\r\n                    RL_UNTIL(0 == x1.load(rl::memory_order_relaxed));\r\n                }\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\nstruct transitive_test : rl::test_suite<transitive_test, 3>\r\n{\r\n    std::atomic<int> x;\r\n    rl::var<int> y;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            VAR(y) = 1;\r\n            x.fetch_add(1, rl::memory_order_release);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            x.fetch_add(2, rl::memory_order_acquire);\r\n        }\r\n        else\r\n        {\r\n            x.load(rl::memory_order_acquire);\r\n            int w = x.load(rl::memory_order_acquire);\r\n            if (1 == w || 3 == w)\r\n            {\r\n                y($).load();\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\nstruct cc_transitive_test : rl::test_suite<cc_transitive_test, 3>\r\n{\r\n    std::atomic<int> x;\r\n    std::atomic<int> y;\r\n\r\n    void before()\r\n    {\r\n        x.store(0, std::memory_order_relaxed);\r\n        y.store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x.store(1, std::memory_order_relaxed);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            if (x.load(std::memory_order_relaxed))\r\n                y.store(1, std::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            if (y.load(std::memory_order_acquire))\r\n                assert(x.load(std::memory_order_relaxed));\r\n        }\r\n    }\r\n};\r\n\r\n\r\nstruct occasional_test : rl::test_suite<occasional_test, 3, rl::test_result_until_condition_hit>\r\n{\r\n    std::atomic<int> x, y, z;\r\n\r\n    void before()\r\n    {\r\n        x.store(0, std::memory_order_relaxed);\r\n        y.store(0, std::memory_order_relaxed);\r\n        z.store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            x.store(1, rl::memory_order_relaxed);\r\n            y.store(1, rl::memory_order_release);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            if (y.load(rl::memory_order_relaxed))\r\n                z.store(1, rl::memory_order_release);\r\n        }\r\n        else\r\n        {\r\n            if (z.load(rl::memory_order_acquire))\r\n            {\r\n                RL_ASSERT(y.load(rl::memory_order_relaxed));\r\n                RL_UNTIL(0 == x.load(rl::memory_order_relaxed));\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc71/test.sln",
    "content": "Microsoft Visual Studio Solution File, Format Version 8.00\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcproj\", \"{8C8174E2-2B2E-484D-9EB4-85D29347F22F}\"\r\n\tProjectSection(ProjectDependencies) = postProject\r\n\tEndProjectSection\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfiguration) = preSolution\r\n\t\tDebug = Debug\r\n\t\tRelease = Release\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfiguration) = postSolution\r\n\t\t{8C8174E2-2B2E-484D-9EB4-85D29347F22F}.Debug.ActiveCfg = Debug|Win32\r\n\t\t{8C8174E2-2B2E-484D-9EB4-85D29347F22F}.Debug.Build.0 = Debug|Win32\r\n\t\t{8C8174E2-2B2E-484D-9EB4-85D29347F22F}.Release.ActiveCfg = Release|Win32\r\n\t\t{8C8174E2-2B2E-484D-9EB4-85D29347F22F}.Release.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ExtensibilityGlobals) = postSolution\r\n\tEndGlobalSection\r\n\tGlobalSection(ExtensibilityAddIns) = postSolution\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc71/test.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"7.10\"\r\n\tName=\"test\"\r\n\tProjectGUID=\"{8C8174E2-2B2E-484D-9EB4-85D29347F22F}\"\r\n\tKeyword=\"Win32Proj\">\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"/>\r\n\t</Platforms>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"Debug\"\r\n\t\t\tIntermediateDirectory=\"Debug\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"2\">\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"TRUE\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"5\"\r\n\t\t\t\tUsePrecompiledHeader=\"3\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"TRUE\"\r\n\t\t\t\tDebugInformationFormat=\"4\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tOutputFile=\"$(OutDir)/test.exe\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"TRUE\"\r\n\t\t\t\tProgramDatabaseFile=\"$(OutDir)/test.pdb\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedWrapperGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAuxiliaryManagedWrapperGeneratorTool\"/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"Release\"\r\n\t\t\tIntermediateDirectory=\"Release\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"2\">\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"4\"\r\n\t\t\t\tUsePrecompiledHeader=\"3\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"TRUE\"\r\n\t\t\t\tDebugInformationFormat=\"3\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tOutputFile=\"$(OutDir)/test.exe\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"TRUE\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedWrapperGeneratorTool\"/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAuxiliaryManagedWrapperGeneratorTool\"/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\blocking_mutex.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\compare_swap.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\condvar.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\data_race.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\fence.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\foo.cpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\main.cpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\memory_order.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\scheduler.hpp\">\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\">\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\">\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\">\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\">\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc8/rrd.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcproj\", \"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"peterson\", \"..\\..\\example\\peterson\\msvc8\\peterson.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"proxy_collector\", \"..\\..\\example\\proxy_collector\\msvc8\\proxy_collector.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ref_counting\", \"..\\..\\example\\ref_counting\\msvc8\\ref_counting.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"stack\", \"..\\..\\example\\stack\\msvc8\\stack.vcproj\", \"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"spsc_queue\", \"..\\..\\example\\spsc_queue\\msvc8\\spsc_queue.vcproj\", \"{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"condvar\", \"..\\..\\example\\condvar\\msvc8\\condvar.vcproj\", \"{6CC59CF8-408B-441B-8F65-15651210CB82}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"smr\", \"..\\..\\example\\smr\\msvc8\\smr.vcproj\", \"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"mutex_business_logic\", \"..\\..\\example\\mutex_business_logic\\msvc8\\mutex_business_logic.vcproj\", \"{B03A7216-E196-44C6-8861-C77D90055512}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ws_deque\", \"..\\..\\example\\ws_deque\\msvc8\\ws_deque.vcproj\", \"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"jtest\", \"..\\..\\jtest\\msvc8\\jtest.vcproj\", \"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ntest\", \"..\\..\\ntest\\msvc8\\ntest.vcproj\", \"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"cli_ws_deque\", \"..\\..\\example\\cli_ws_deque\\msvc8\\cli_ws_deque.vcproj\", \"{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"java_ws_deque\", \"..\\..\\example\\java_ws_deque\\msvc8\\java_ws_deque.vcproj\", \"{9E88433F-779E-4461-9963-35E3338873AC}\"\r\n\tProjectSection(WebsiteProperties) = preProject\r\n\t\tDebug.AspNetCompiler.Debug = \"True\"\r\n\t\tRelease.AspNetCompiler.Debug = \"False\"\r\n\tEndProjectSection\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tProfile|Win32 = Profile|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Profile|Win32.Build.0 = Profile|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{2F0B1A3B-27CA-47D4-A9D1-5EC66BB0A85B}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{B03A7216-E196-44C6-8861-C77D90055512}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{967F376B-BDBF-4AC8-9325-371CC8ABD8FD}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{9E88433F-779E-4461-9963-35E3338873AC}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc8/rrd.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"rrd\"\r\n\tProjectGUID=\"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\n\tRootNamespace=\"rrd\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t\t<Platform\r\n\t\t\tName=\"x64\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_LIB\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_LIB\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug64|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug64|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<Filter\r\n\t\t\tName=\"front-end\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic_events.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic_fence.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\backoff.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_interlocked.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_atomic.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"scheduler\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\context_bound_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\full_search_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\random_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"base\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\base.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\foreach.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\pch.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\platform.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\random.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\signature.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"stdlib\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\condition_variable.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\event.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\mutex.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\recursive_mutex.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\semaphore.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\shared_mutex.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context_base.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context_base_impl.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\history.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\memory.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\memory_order.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\pthread.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_cli.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_java.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_std.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\rmw.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\slab_allocator.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\sync_var.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_params.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_result.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_suite.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\thread.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\waitset.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\windows.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc8/test.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2005\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcproj\", \"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tDebug|x64 = Debug|x64\r\n\t\tDebug64|Win32 = Debug64|Win32\r\n\t\tDebug64|x64 = Debug64|x64\r\n\t\tProfile|Win32 = Profile|Win32\r\n\t\tProfile|x64 = Profile|x64\r\n\t\tRelease|Win32 = Release|Win32\r\n\t\tRelease|x64 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug64|Win32.ActiveCfg = Debug64|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug64|Win32.Build.0 = Debug64|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug64|x64.ActiveCfg = Debug64|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug64|x64.Build.0 = Debug64|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.Build.0 = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|x64.ActiveCfg = Profile|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|x64.Build.0 = Profile|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|x64.Build.0 = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.ActiveCfg = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|Win32.Build.0 = Debug64|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.ActiveCfg = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug64|x64.Build.0 = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|x64.ActiveCfg = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|x64.Build.0 = Debug64|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.Build.0 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc8/test.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"test\"\r\n\tProjectGUID=\"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\n\tRootNamespace=\"test\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t\t<Platform\r\n\t\t\tName=\"x64\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/Ob0\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/Ob0\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"false\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug64|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug64|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\compare_swap.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\condvar.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\data_race.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\event.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\fence.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\foo.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\main.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\memory_order.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\mutex.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\scheduler.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\semaphore.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Profile|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Profile|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug64|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug64|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\todo.txt\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\wfmo.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc9/rrd.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcproj\", \"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ref_counting\", \"..\\..\\example\\ref_counting\\msvc9\\ref_counting.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B28}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"peterson\", \"..\\..\\example\\peterson\\msvc9\\peterson.vcproj\", \"{D4756EE9-3953-4E17-B1B5-E89F853303C1}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"stack\", \"..\\..\\example\\stack\\msvc9\\stack.vcproj\", \"{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"proxy_collector\", \"..\\..\\example\\proxy_collector\\msvc9\\proxy_collector.vcproj\", \"{31994C0C-3BAD-4F25-8BC8-3206FF349B29}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ntest\", \"..\\..\\ntest\\msvc9\\ntest.vcproj\", \"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"jtest\", \"..\\..\\jtest\\msvc9\\jtest.vcproj\", \"{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"smr\", \"..\\..\\example\\smr\\msvc9\\smr.vcproj\", \"{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"spsc_queue\", \"..\\..\\example\\spsc_queue\\msvc9\\spsc_queue.vcproj\", \"{3F32C4FA-E451-42BC-9E65-74129120B6E4}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"condvar\", \"..\\..\\example\\condvar\\msvc9\\condvar.vcproj\", \"{6CC59CF8-408B-441B-8F65-15651210CB82}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ws_deque\", \"..\\..\\example\\ws_deque\\msvc9\\ws_deque.vcproj\", \"{0B597F19-DEBB-4832-B520-9A93A286D595}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"examples\", \"..\\..\\example\\examples\\msvc9\\examples.vcproj\", \"{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tProfile|Win32 = Profile|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B28}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4756EE9-3953-4E17-B1B5-E89F853303C1}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{4D6D7FC3-66D1-4F80-B434-2FDCBBFBC9F5}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Profile|Win32.Build.0 = Profile|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{31994C0C-3BAD-4F25-8BC8-3206FF349B29}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{1889E8F4-47F7-48B6-9FC7-61FD7CD000C8}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{BC168133-5E3D-4691-BA15-8E0FD61DFDB5}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{3F32C4FA-E451-42BC-9E65-74129120B6E4}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{6CC59CF8-408B-441B-8F65-15651210CB82}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{0B597F19-DEBB-4832-B520-9A93A286D595}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{1EB73A6F-7F94-4ED4-8EB3-C245E773207A}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc9/rrd.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9.00\"\r\n\tName=\"rrd\"\r\n\tProjectGUID=\"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\n\tRootNamespace=\"rrd\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"0\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t\t<Platform\r\n\t\t\tName=\"x64\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_LIB\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_LIB\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"4\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_LIB\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"0\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDetect64BitPortabilityProblems=\"true\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLibrarianTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<Filter\r\n\t\t\tName=\"front-end\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic_events.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\atomic_fence.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\backoff.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_interlocked.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\cli_volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_atomic.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\java_volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\var.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\volatile.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"scheduler\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\context_bound_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\full_search_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\random_scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\scheduler.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"base\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\base.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\foreach.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\pch.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\platform.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\random.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\signature.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<Filter\r\n\t\t\tName=\"stdlib\"\r\n\t\t\t>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\condition_variable.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\event.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\mutex.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\pthread.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\semaphore.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t\t<File\r\n\t\t\t\tRelativePath=\"..\\..\\relacy\\stdlib\\windows.hpp\"\r\n\t\t\t\t>\r\n\t\t\t</File>\r\n\t\t</Filter>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\addr_hash.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context_addr_hash.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context_base.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\context_base_impl.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\defs.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\dyn_thread.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\dyn_thread_ctx.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\history.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\memory.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\memory_order.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\pthread.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_cli.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_java.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\relacy_std.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\rmw.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\slab_allocator.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\sync_var.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_params.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_result.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\test_suite.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\thread.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\thread_base.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\thread_local.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\thread_local_ctx.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\waitset.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\relacy\\windows.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc9/test.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcproj\", \"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tDebug|x64 = Debug|x64\r\n\t\tProfile|Win32 = Profile|Win32\r\n\t\tProfile|x64 = Profile|x64\r\n\t\tRelease|Win32 = Release|Win32\r\n\t\tRelease|x64 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.ActiveCfg = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|Win32.Build.0 = Profile|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|x64.ActiveCfg = Profile|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Profile|x64.Build.0 = Profile|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{99882C71-3316-411F-A8AE-EC1E40702040}.Release|x64.Build.0 = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|x64.ActiveCfg = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Profile|x64.Build.0 = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|x64.Build.0 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/msvc9/test.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9.00\"\r\n\tName=\"test\"\r\n\tProjectGUID=\"{99882C71-3316-411F-A8AE-EC1E40702040}\"\r\n\tRootNamespace=\"test\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"131072\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t\t<Platform\r\n\t\t\tName=\"x64\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t\tEnablePREfast=\"false\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t\tPageHeapConserveMemory=\"true\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/bigobj\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t\tEnablePREfast=\"false\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t\tPageHeapConserveMemory=\"true\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tEnableEnhancedInstructionSet=\"2\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/bigobj\"\r\n\t\t\t\tInlineFunctionExpansion=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/Ob0\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Profile|x64\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(PlatformName)\\$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"0\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t\tTargetEnvironment=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tAdditionalOptions=\"/bigobj\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tFavorSizeOrSpeed=\"1\"\r\n\t\t\t\tOmitFramePointers=\"true\"\r\n\t\t\t\tEnableFiberSafeOptimizations=\"true\"\r\n\t\t\t\tWholeProgramOptimization=\"false\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tBufferSecurityCheck=\"false\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"4\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateManifest=\"true\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tLinkTimeCodeGeneration=\"0\"\r\n\t\t\t\tRandomizedBaseAddress=\"1\"\r\n\t\t\t\tDataExecutionPrevention=\"0\"\r\n\t\t\t\tTargetMachine=\"17\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\..\\CHANGES.TXT\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\compare_swap.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\condvar.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\data_race.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\dyn_thread.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\event.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\fence.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\main.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\memory.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\memory_order.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\mutex.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\pthread.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\scheduler.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\semaphore.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Profile|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Profile|x64\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\thread_local.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\todo.txt\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\wfmo.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\windows.hpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/mutex.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_mutex : rl::test_suite<test_mutex, 3>\r\n{\r\n    rl::mutex mtx;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(data($) == 3);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        mtx.lock($);\r\n        data($) += 1;\r\n        mtx.unlock($);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_deadlock : rl::test_suite<test_deadlock, 2, rl::test_result_deadlock>\r\n{\r\n    rl::mutex mtx1;\r\n    rl::mutex mtx2;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            mtx1.lock($);\r\n            mtx2.lock($);\r\n            mtx1.unlock($);\r\n            mtx2.unlock($);\r\n        }\r\n        else\r\n        {\r\n            mtx2.lock($);\r\n            mtx1.lock($);\r\n            mtx1.unlock($);\r\n            mtx2.unlock($);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_deadlock2 : rl::test_suite<test_deadlock2, 2, rl::test_result_deadlock>\r\n{\r\n    std::mutex m;\r\n    std::atomic<int> f;\r\n\r\n    void before()\r\n    {\r\n        f($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            m.lock($);\r\n            f($) = 1;\r\n            for (int i = 0; i != 100; ++i)\r\n                rl::yield(1, $);\r\n        }\r\n        else\r\n        {\r\n            while (0 == f($))\r\n                rl::yield(1, $);\r\n            m.lock($);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_mutex_destuction : rl::test_suite<test_mutex_destuction, 1, rl::test_result_destroying_owned_mutex>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        std::mutex* m = new std::mutex;\r\n        m->lock($);\r\n        delete m;\r\n    }\r\n};\r\n\r\n\r\nstruct test_mutex_destuction2 : rl::test_suite<test_mutex_destuction2, 2, rl::test_result_destroying_owned_mutex>\r\n{\r\n    std::mutex* m;\r\n    std::atomic<int> f;\r\n\r\n    void before()\r\n    {\r\n        m = new std::mutex;\r\n        f($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            m->lock($);\r\n            f($) = 1;\r\n            while (1 == f($))\r\n                rl::yield(1, $);\r\n            m->unlock($);\r\n        }\r\n        else\r\n        {\r\n            while (0 == f($))\r\n                rl::yield(1, $);\r\n            delete m;\r\n            f($) = 2;\r\n        }\r\n    }\r\n};\r\n\r\n\r\nstruct test_mutex_recursion : rl::test_suite<test_mutex_recursion, 2>\r\n{\r\n    std::recursive_mutex mtx;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(data($) == 2);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        mtx.lock($);\r\n        mtx.lock($);\r\n        data($) += 1;\r\n        mtx.unlock($);\r\n        mtx.unlock($);\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_mutex_try_lock : rl::test_suite<test_mutex_try_lock, 2>\r\n{\r\n    std::recursive_mutex mtx;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(data($) == 2);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        while (false == mtx.try_lock($))\r\n            rl::yield(1, $);\r\n        RL_ASSERT(mtx.try_lock($));\r\n        data($) += 1;\r\n        mtx.unlock($);\r\n        mtx.unlock($);\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_mutex_recursion_error : rl::test_suite<test_mutex_recursion_error, 1, rl::test_result_recursion_on_nonrecursive_mutex>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        std::mutex m;\r\n        m.lock($);\r\n        m.lock($);\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_mutex_unlock_error : rl::test_suite<test_mutex_unlock_error, 1, rl::test_result_unlocking_mutex_wo_ownership>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        std::mutex m;\r\n        m.lock($);\r\n        m.unlock($);\r\n        m.unlock($);\r\n    }\r\n};\r\n\r\n\r\nstruct test_mutex_leak : rl::test_suite<test_mutex_leak, 1, rl::test_result_resource_leak>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        char* p = new char [sizeof(std::mutex)];\r\n        new (p) std::mutex();\r\n        delete [] p;\r\n    }\r\n};\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/msvc8/ntest.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 9.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ntest\", \"ntest.vcproj\", \"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"..\\..\\test\\msvc8\\rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/msvc8/ntest.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"8,00\"\r\n\tName=\"ntest\"\r\n\tProjectGUID=\"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\n\tRootNamespace=\"ntest\"\r\n\tKeyword=\"Win32Proj\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tEnableFunctionLevelLinking=\"true\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebDeploymentTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ntest.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/msvc9/ntest.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 10.00\r\n# Visual Studio 2008\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"ntest\", \"ntest.vcproj\", \"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\nEndProject\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"rrd\", \"..\\..\\test\\msvc9\\rrd.vcproj\", \"{D4F501D0-382D-4CBC-86F4-56181F383444}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{D4F501D0-382D-4CBC-86F4-56181F383444}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/msvc9/ntest.vcproj",
    "content": "<?xml version=\"1.0\" encoding=\"windows-1251\"?>\r\n<VisualStudioProject\r\n\tProjectType=\"Visual C++\"\r\n\tVersion=\"9,00\"\r\n\tName=\"ntest\"\r\n\tProjectGUID=\"{D8A75C0E-3C9A-42E5-97EC-75AEBE64C372}\"\r\n\tRootNamespace=\"ntest\"\r\n\tKeyword=\"Win32Proj\"\r\n\tTargetFrameworkVersion=\"196613\"\r\n\t>\r\n\t<Platforms>\r\n\t\t<Platform\r\n\t\t\tName=\"Win32\"\r\n\t\t/>\r\n\t</Platforms>\r\n\t<ToolFiles>\r\n\t</ToolFiles>\r\n\t<Configurations>\r\n\t\t<Configuration\r\n\t\t\tName=\"Debug|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"0\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;_DEBUG;_CONSOLE\"\r\n\t\t\t\tMinimalRebuild=\"true\"\r\n\t\t\t\tBasicRuntimeChecks=\"3\"\r\n\t\t\t\tRuntimeLibrary=\"3\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"4\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"2\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t\t<Configuration\r\n\t\t\tName=\"Release|Win32\"\r\n\t\t\tOutputDirectory=\"$(SolutionDir)$(ConfigurationName)\"\r\n\t\t\tIntermediateDirectory=\"$(ConfigurationName)\"\r\n\t\t\tConfigurationType=\"1\"\r\n\t\t\tCharacterSet=\"1\"\r\n\t\t\tWholeProgramOptimization=\"1\"\r\n\t\t\t>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreBuildEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCustomBuildTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXMLDataGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCWebServiceProxyGeneratorTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCMIDLTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\tOptimization=\"2\"\r\n\t\t\t\tEnableIntrinsicFunctions=\"true\"\r\n\t\t\t\tPreprocessorDefinitions=\"WIN32;NDEBUG;_CONSOLE\"\r\n\t\t\t\tRuntimeLibrary=\"2\"\r\n\t\t\t\tEnableFunctionLevelLinking=\"true\"\r\n\t\t\t\tUsePrecompiledHeader=\"2\"\r\n\t\t\t\tWarningLevel=\"3\"\r\n\t\t\t\tDebugInformationFormat=\"3\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManagedResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCResourceCompilerTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPreLinkEventTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCLinkerTool\"\r\n\t\t\t\tLinkIncremental=\"1\"\r\n\t\t\t\tGenerateDebugInformation=\"true\"\r\n\t\t\t\tSubSystem=\"1\"\r\n\t\t\t\tOptimizeReferences=\"2\"\r\n\t\t\t\tEnableCOMDATFolding=\"2\"\r\n\t\t\t\tTargetMachine=\"1\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCALinkTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCManifestTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCXDCMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCBscMakeTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCFxCopTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCAppVerifierTool\"\r\n\t\t\t/>\r\n\t\t\t<Tool\r\n\t\t\t\tName=\"VCPostBuildEventTool\"\r\n\t\t\t/>\r\n\t\t</Configuration>\r\n\t</Configurations>\r\n\t<References>\r\n\t</References>\r\n\t<Files>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\ntest.cpp\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.cpp\"\r\n\t\t\t>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Debug|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t\t<FileConfiguration\r\n\t\t\t\tName=\"Release|Win32\"\r\n\t\t\t\t>\r\n\t\t\t\t<Tool\r\n\t\t\t\t\tName=\"VCCLCompilerTool\"\r\n\t\t\t\t\tUsePrecompiledHeader=\"1\"\r\n\t\t\t\t/>\r\n\t\t\t</FileConfiguration>\r\n\t\t</File>\r\n\t\t<File\r\n\t\t\tRelativePath=\"..\\stdafx.h\"\r\n\t\t\t>\r\n\t\t</File>\r\n\t</Files>\r\n\t<Globals>\r\n\t</Globals>\r\n</VisualStudioProject>\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/ntest.cpp",
    "content": "#include \"stdafx.h\"\r\n#include \"../relacy/relacy_cli.hpp\"\r\n\r\nusing rl::nvar;\r\nusing rl::nvolatile;\r\nusing rl::mutex;\r\n\r\ntemplate<typename T>\r\nclass ws_deque\r\n{\r\npublic:\r\n    ws_deque()\r\n    {\r\n        m_mask($) = initial_size - 1;\r\n        m_headIndex($) = 0;\r\n        m_tailIndex($) = 0;\r\n        m_array($) = new nvar<T> [initial_size];\r\n        m_arraySize($) = initial_size;\r\n    }\r\n\r\n    bool IsEmpty()\r\n    {\r\n        return m_headIndex($) >= m_tailIndex($);\r\n    }\r\n\r\n    size_t Count()\r\n    {\r\n        return m_tailIndex($) - m_headIndex($);\r\n    }\r\n\r\n    void push(T item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        // original version:\r\n        //if (tail < m_headIndex($) + m_mask($))\r\n        // corrected version:\r\n        if (tail <= m_headIndex($) + m_mask($))\r\n        {\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            size_t head = m_headIndex($);\r\n            size_t count = Count();\r\n            if (count >= m_mask($))\r\n            {\r\n                size_t arraySize = m_arraySize($);\r\n                size_t mask = m_mask($);\r\n                nvar<T>* newArray = new nvar<T> [arraySize * 2];\r\n                nvar<T>* arr = m_array($);\r\n                // original version:\r\n                //for (size_t i = 0; i != arraySize; ++i)\r\n                // corrected version:\r\n                for (size_t i = 0; i != count; ++i)\r\n                    newArray[i]($) = arr[(i + head) & mask]($);\r\n                m_array($) = newArray;\r\n                m_arraySize($) = arraySize * 2;\r\n                m_headIndex($) = 0;\r\n                m_tailIndex($) = count;\r\n                tail = count;\r\n                m_mask($) = (mask * 2) | 1;\r\n            }\r\n            m_array($)[tail & m_mask($)]($) = item;\r\n            m_tailIndex($) = tail + 1;\r\n            m_foreignLock.unlock($);\r\n        }\r\n    }\r\n\r\n    bool pop(T& item)\r\n    {\r\n        size_t tail = m_tailIndex($);\r\n        // original version:\r\n        //if (m_headIndex($) >= tail)\r\n        //    return false;\r\n        // corrected version:\r\n        if (tail == 0)\r\n            return false;\r\n        tail -= 1;\r\n        rl::Interlocked::Exchange(m_tailIndex, tail, $);\r\n        if (m_headIndex($) <= tail)\r\n        {\r\n            item = m_array($)[tail & m_mask($)]($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_foreignLock.lock($);\r\n            if (m_headIndex($) <= tail)\r\n            {\r\n                item = m_array($)[tail & m_mask($)]($);\r\n                m_foreignLock.unlock($);\r\n                return true;\r\n            }\r\n            else\r\n            {\r\n                m_tailIndex($) = tail + 1;\r\n                m_foreignLock.unlock($);\r\n                return false;\r\n            }\r\n        }\r\n    }\r\n\r\n    bool steal(T& item)\r\n    {\r\n        if (false == m_foreignLock.try_lock($))\r\n            return false;\r\n        size_t head = m_headIndex($);\r\n        rl::Interlocked::Exchange(m_headIndex, head + 1, $);\r\n        if (head < m_tailIndex($))\r\n        {\r\n            item = m_array($)[head & m_mask($)]($);\r\n            m_foreignLock.unlock($);\r\n            return true;\r\n        }\r\n        else\r\n        {\r\n            m_headIndex($) = head;\r\n            m_foreignLock.unlock($);\r\n            return false;\r\n        }\r\n    }\r\n\r\nprivate:\r\n    static size_t const initial_size = 2;\r\n    nvar<nvar<T>*> m_array;\r\n    nvar<size_t> m_mask;\r\n    nvar<size_t> m_arraySize;\r\n    nvolatile<size_t> m_headIndex;\r\n    nvolatile<size_t> m_tailIndex;\r\n    mutex m_foreignLock;\r\n};\r\n\r\nstruct ws_deque_test : rl::test_suite<ws_deque_test, 2>\r\n{\r\n    ws_deque<int> q;\r\n    bool state [2];\r\n\r\n    void before()\r\n    {\r\n        state[0] = true;\r\n        state[1] = true;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        RL_ASSERT(state[0] == false);\r\n        RL_ASSERT(state[1] == false);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            q.push(1);\r\n            q.push(2);\r\n\r\n            int item = 0;\r\n            bool res = q.pop(item);\r\n            RL_ASSERT(res && item == 2);\r\n            RL_ASSERT(state[1]);\r\n            state[1] = false;\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n\r\n            item = 0;\r\n            res = q.pop(item);\r\n            RL_ASSERT(res == false);\r\n        }\r\n        else\r\n        {\r\n            int item = 0;\r\n            bool res = q.steal(item);\r\n            if (res)\r\n            {\r\n                RL_ASSERT(item == 1);\r\n                RL_ASSERT(state[0]);\r\n                state[0] = false;\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_api : rl::test_suite<test_api, 1>\r\n{\r\n    void thread(unsigned)\r\n    {\r\n        rl::nvar<int> cv1, cv2(3), cv3(cv1($)), cv4(cv1);\r\n        cv1($) = cv2($);\r\n        cv1($) = 1;\r\n        (int)cv1($);\r\n        cv1($) += 1;\r\n        cv1($) -= 1;\r\n        cv1($)++;\r\n        cv1($)--;\r\n        ++cv1($);\r\n        --cv1($);\r\n\r\n        int x = rl::Interlocked::Add(cv1, 3, $);\r\n        x = rl::Interlocked::CompareExchange(cv1, 3, x, $);\r\n        x = rl::Interlocked::Exchange(cv2, 6, $);\r\n        x = rl::Interlocked::Read(cv2, $);\r\n        x = rl::Interlocked::Increment(cv2, $);\r\n        x = rl::Interlocked::Decrement(cv2, $);\r\n\r\n        rl::Thread::MemoryBarrier($);\r\n        x = rl::Thread::VolatileRead(cv1, $);\r\n        rl::Thread::VolatileWrite(cv1, 5, $);\r\n        rl::Thread::SpinWait(1, $);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\n\r\n\r\nstruct ws_deque_test0 : rl::test_suite<ws_deque_test0, 4>\r\n{\r\n    ws_deque<int> q;\r\n\r\n    void before()\r\n    {\r\n    }\r\n\r\n    void after()\r\n    {\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n            }\r\n\r\n            for (size_t i = 0; i != 5; ++i)\r\n            {\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n                p = 0;\r\n                res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                q.push(10);\r\n                q.push(10);\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n\r\n            for (size_t i = 0; i != 14; ++i)\r\n            {\r\n                q.push(10);\r\n                int p = 0;\r\n                bool res = q.pop(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n        }\r\n        else\r\n        {\r\n            for (size_t i = 0; i != 4; ++i)\r\n            {\r\n                int p = 0;\r\n                bool res = q.steal(p);\r\n                RL_ASSERT(10 == p || false == res);\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nint main()\r\n{\r\n    rl::test_params p;\r\n    p.iteration_count = 1000;\r\n    rl::simulate<ws_deque_test0>(p);\r\n    rl::simulate<ws_deque_test>(p);\r\n    rl::simulate<test_api>();\r\n}\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/ntest/stdafx.h",
    "content": "#ifndef STDAFX_H\r\n#define STDAFX_H\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n\r\n#include \"../relacy/pch.hpp\"\r\n\r\n\r\n#endif\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/pthread.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/pthread.h\"\r\n\r\n\r\n\r\nstruct test_pthread_thread : rl::test_suite<test_pthread_thread, 1>\r\n{\r\n    static size_t const dynamic_thread_count = 2;\r\n\r\n    VAR_T(int) data;\r\n\r\n    static void* func(void* param)\r\n    {\r\n        static_cast<test_pthread_thread*>(param)->VAR(data) += 1;\r\n        return 0;\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        VAR(data) = 0;\r\n\r\n        pthread_t th1;\r\n        pthread_create(&th1, 0, &test_pthread_thread::func, this);\r\n        void* res1 = 0;\r\n        pthread_join(th1, &res1);\r\n\r\n        RL_ASSERT(VAR(data) == 1);\r\n\r\n        pthread_t th2;\r\n        pthread_create(&th2, 0, &test_pthread_thread::func, this);\r\n        void* res2 = 0;\r\n        pthread_join(th2, &res2);\r\n\r\n        RL_ASSERT(VAR(data) == 2);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_pthread_mutex : rl::test_suite<test_pthread_mutex, 2>\r\n{\r\n    pthread_mutex_t mtx;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        pthread_mutexattr_t attr;\r\n        pthread_mutexattr_init(&attr);\r\n        pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);\r\n        pthread_mutex_init(&mtx, &attr);\r\n        pthread_mutexattr_destroy(&attr);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        pthread_mutex_destroy(&mtx);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        pthread_mutex_lock(&mtx);\r\n        VAR(data) += 1;\r\n        pthread_mutex_unlock(&mtx);\r\n\r\n        if (0 == pthread_mutex_try_lock(&mtx))\r\n        {\r\n            VAR(data) += 1;\r\n            pthread_mutex_unlock(&mtx);\r\n        }\r\n\r\n        //pthread_mutex_timedlock\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_pthread_condvar : rl::test_suite<test_pthread_condvar, 3>\r\n{\r\n    pthread_cond_t cv;\r\n    pthread_mutex_t mtx;\r\n    VAR_T(int) stage;\r\n\r\n    void before()\r\n    {\r\n        pthread_condattr_t attr;\r\n        pthread_cond_init(&cv, &attr);\r\n        pthread_mutex_init(&mtx, 0);\r\n        VAR(stage) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        pthread_cond_destroy(&cv);\r\n        pthread_mutex_destroy(&mtx);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            pthread_mutex_lock(&mtx);\r\n            VAR(stage) += 1;\r\n            pthread_cond_broadcast(&cv);\r\n            while (VAR(stage) != 2)\r\n                pthread_cond_wait(&cv, &mtx);\r\n            pthread_mutex_unlock(&mtx);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            pthread_mutex_lock(&mtx);\r\n            while (VAR(stage) != 1)\r\n            {\r\n                int ts = 1;\r\n                pthread_cond_timedwait(&cv, &mtx, &ts);\r\n            }\r\n            VAR(stage) += 1;\r\n            pthread_cond_broadcast(&cv);\r\n            pthread_mutex_unlock(&mtx);\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            pthread_mutex_lock(&mtx);\r\n            while (VAR(stage) != 2)\r\n                pthread_cond_wait(&cv, &mtx);\r\n            pthread_mutex_unlock(&mtx);\r\n            pthread_cond_signal(&cv);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_pthread_condvar2 : rl::test_suite<test_pthread_condvar2, 2>\r\n{\r\n    pthread_cond_t cv1, cv2;\r\n    pthread_mutex_t mtx1, mtx2;\r\n    VAR_T(int) stage;\r\n\r\n    void before()\r\n    {\r\n        pthread_cond_init(&cv1, 0);\r\n        pthread_cond_init(&cv2, 0);\r\n        pthread_mutex_init(&mtx1, 0);\r\n        pthread_mutex_init(&mtx2, 0);\r\n        VAR(stage) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        pthread_cond_destroy(&cv1);\r\n        pthread_cond_destroy(&cv2);\r\n        pthread_mutex_destroy(&mtx1);\r\n        pthread_mutex_destroy(&mtx2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            pthread_mutex_lock(&mtx1);\r\n            int ts = 1;\r\n            pthread_cond_timedwait(&cv1, &mtx1, &ts);\r\n            pthread_mutex_unlock(&mtx1);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            pthread_mutex_lock(&mtx2);\r\n            int ts = 1;\r\n            pthread_cond_timedwait(&cv2, &mtx2, &ts);\r\n            pthread_mutex_unlock(&mtx2);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_pthread_rwlock : rl::test_suite<test_pthread_rwlock, 3>\r\n{\r\n    pthread_rwlock_t mtx;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        pthread_mutexattr_t attr;\r\n        pthread_mutexattr_init(&attr);\r\n        pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);\r\n        pthread_rwlock_init(&mtx, &attr);\r\n        pthread_mutexattr_destroy(&attr);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        pthread_rwlock_destroy(&mtx);\r\n    }\r\n\r\n    void thread(unsigned /*index*/)\r\n    {\r\n        pthread_rwlock_wrlock(&mtx);\r\n        VAR(data) += 1;\r\n        pthread_rwlock_unlock(&mtx);\r\n\r\n        if (0 == pthread_rwlock_trywrlock(&mtx))\r\n        {\r\n            VAR(data) += 1;\r\n            pthread_rwlock_unlock(&mtx);\r\n        }\r\n\r\n        pthread_rwlock_rdlock(&mtx);\r\n        (void)(int)VAR(data);\r\n        pthread_rwlock_unlock(&mtx);\r\n\r\n        if (0 == pthread_rwlock_tryrdlock(&mtx))\r\n        {\r\n            (void)(int)VAR(data);\r\n            pthread_rwlock_unlock(&mtx);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_pthread_sem : rl::test_suite<test_pthread_sem, 2>\r\n{\r\n    sem_t sem1, sem2;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        sem_init(&sem1, 0, 0);\r\n        sem_init(&sem2, 0, 0);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        sem_destroy(&sem1);\r\n        sem_destroy(&sem2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            VAR(data) = 1;\r\n            sem_post(&sem1);\r\n            while (sem_trywait(&sem2))\r\n            {\r\n                assert(errno == EINTR || errno == EAGAIN);\r\n                pthread_yield();\r\n            }\r\n            RL_ASSERT(VAR(data) == 2);\r\n            VAR(data) = 3;\r\n            int count = -1;\r\n            sem_getvalue(&sem2, &count);\r\n            RL_ASSERT(count == 0);\r\n            sem_post(&sem2);\r\n            sem_getvalue(&sem2, &count);\r\n            RL_ASSERT(count == 1);\r\n        }\r\n        else\r\n        {\r\n            while (sem_wait(&sem1))\r\n                assert(errno == EINTR);\r\n            RL_ASSERT(VAR(data) == 1);\r\n            VAR(data) = 2;\r\n            sem_post(&sem2);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/scheduler.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct livelock_test : rl::test_suite<livelock_test, 2, rl::test_result_livelock>\r\n{\r\n    std::atomic<int> x;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            for (;;)\r\n            {\r\n                int cmp = 1;\r\n                if (x($).compare_exchange_weak(cmp, 2))\r\n                    break;\r\n            }\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            x($).store(1);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct yield_livelock_test : rl::test_suite<yield_livelock_test, 2, rl::test_result_livelock>\r\n{\r\n    std::atomic<int> x, y;\r\n\r\n    void before()\r\n    {\r\n        x($) = 0;\r\n        y($) = 0;\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            rl::backoff b;\r\n            for (;;)\r\n            {\r\n                int cmp = 0;\r\n                if (x($).compare_exchange_weak(cmp, 1))\r\n                {\r\n                    cmp = 0;\r\n                    if (y($).compare_exchange_weak(cmp, 1))\r\n                    {\r\n                        x($).store(0);\r\n                        y($).store(0);\r\n                        break;\r\n                    }\r\n                    else\r\n                    {\r\n                        x($).store(0);\r\n                    }\r\n                }\r\n                b.yield($);\r\n            }\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            rl::backoff b;\r\n            for (;;)\r\n            {\r\n                int cmp = 0;\r\n                if (y($).compare_exchange_weak(cmp, 1))\r\n                {\r\n                    cmp = 0;\r\n                    if (x($).compare_exchange_weak(cmp, 1))\r\n                    {\r\n                        y($).store(0);\r\n                        x($).store(0);\r\n                        break;\r\n                    }\r\n                    else\r\n                    {\r\n                        y($).store(0);\r\n                    }\r\n                }\r\n                b.yield($);\r\n            }\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct sched_load_test : rl::test_suite<sched_load_test, 2>\r\n{\r\n    std::recursive_mutex mtx1, mtx2;\r\n    std::condition_variable_any cv1, cv2;\r\n    VAR_T(int) data1, data2;\r\n\r\n    void before()\r\n    {\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index % 2)\r\n        {\r\n            mtx1.lock($);\r\n            VAR(data1) = 1;\r\n            mtx1.unlock($);\r\n\r\n            mtx2.lock($);\r\n            mtx2.lock($);\r\n            VAR(data2) = 1;\r\n            mtx2.unlock($);\r\n            mtx2.unlock($);\r\n\r\n            if (mtx1.try_lock($))\r\n            {\r\n                //mtx1.lock($);\r\n                VAR(data1) = 1;\r\n                //mtx1.unlock($);\r\n                mtx1.unlock($);\r\n            }\r\n\r\n            mtx1.lock($);\r\n            VAR(data1) = 2;\r\n            cv1.notify_all($);\r\n            mtx1.unlock($);\r\n\r\n            mtx2.lock($);\r\n            while (VAR(data2) != 2)\r\n            {\r\n                rl::yield(1, $);\r\n                cv2.wait_for(mtx2, 1, $);\r\n            }\r\n            mtx2.unlock($);\r\n        }\r\n        else\r\n        {\r\n            mtx2.lock($);\r\n            VAR(data2) = 1;\r\n            mtx2.unlock($);\r\n\r\n            mtx1.lock($);\r\n            mtx1.lock($);\r\n            VAR(data1) = 1;\r\n            mtx1.unlock($);\r\n            mtx1.unlock($);\r\n\r\n            if (mtx2.try_lock($))\r\n            {\r\n                //mtx2.lock($);\r\n                VAR(data2) = 1;\r\n                //mtx2.unlock($);\r\n                mtx2.unlock($);\r\n            }\r\n\r\n            mtx2.lock($);\r\n            VAR(data2) = 2;\r\n            mtx2.unlock($);\r\n            cv2.notify_all($);\r\n\r\n            mtx1.lock($);\r\n            while (VAR(data1) != 2)\r\n            {\r\n                rl::yield(1, $);\r\n                cv1.wait_for(mtx1, 1, $);\r\n            }\r\n            mtx1.unlock($);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/semaphore.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_semaphore : rl::test_suite<test_semaphore, 2>\r\n{\r\n    HANDLE sema;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        VAR(data) = 0;\r\n        sema = CreateSemaphore(0, 0, 2, 0);\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sema);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            VAR(data) = 1;\r\n            ReleaseSemaphore(sema, 1, 0);\r\n        }\r\n        else\r\n        {\r\n            unsigned rv = WaitForSingleObject(sema, INFINITE);\r\n\t  \t\t\t\tassert(rv == WAIT_OBJECT_0);\r\n            assert(VAR(data) == 1);\r\n\t\t\t  \t\trv = WaitForSingleObject(sema, 0);\r\n\t\t\t\t\t  assert(rv == WAIT_TIMEOUT);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_semaphore_atomic : rl::test_suite<test_semaphore_atomic, 2>\r\n{\r\n\tHANDLE sem [2];\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tsem[0] = CreateSemaphore(0, 0, 2, 0);\r\n\t\tsem[1] = CreateSemaphore(0, 0, 2, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(sem[0]);\r\n\t\tCloseHandle(sem[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(sem[0], INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\tReleaseSemaphore(sem[1], 1, 0);\r\n\t\t\trv = WaitForSingleObject(sem[1], 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned rv = SignalObjectAndWait(sem[0], sem[1], INFINITE, 0);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\trv = WaitForSingleObject(sem[1], 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t\trv = WaitForSingleObject(sem[0], 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/stdafx.cpp",
    "content": "#include \"stdafx.h\"\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/stdafx.h",
    "content": "#ifndef STDAFX_H\r\n#define STDAFX_H\r\n#ifdef _MSC_VER\r\n#   pragma once\r\n#endif\r\n\r\n#ifdef _MSC_VER\r\n#    pragma warning (disable: 4127)\r\n#endif\r\n\r\n#if defined(_MSC_VER) && (_MSC_VER <= 1310)\r\n//#    pragma warning (disable: 4511)\r\n//#    pragma warning (disable: 4512)\r\n#endif\r\n\r\n\r\n#ifdef NDEBUG\r\n#   define _SECURE_SCL 0\r\n#endif\r\n\r\n#include \"../relacy/pch.hpp\"\r\n\r\n\r\n#endif\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/thread_local.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy.hpp\"\r\n\r\n\r\n\r\nstruct tls_basic_test : rl::test_suite<tls_basic_test, 3>\r\n{\r\n    rl::thread_local_var<unsigned> x;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        RL_ASSERT(x.get($) == 0);\r\n        x.set(index + 10, $);\r\n        RL_ASSERT(x.get($) == index + 10);\r\n    }\r\n};\r\n\r\n\r\nstruct tls_basic_test2 : rl::test_suite<tls_basic_test2, 3>\r\n{\r\n    TLS_T(unsigned) x;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        RL_ASSERT(VAR(x) == 0);\r\n        VAR(x) = index + 10;\r\n        RL_ASSERT(VAR(x) == index + 10);\r\n    }\r\n};\r\n\r\n\r\nstruct tls_reset_test : rl::test_suite<tls_reset_test, 3, rl::test_result_user_assert_failed>\r\n{\r\n    rl::thread_local_var<unsigned> x;\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        RL_ASSERT(x.get($) == 0);\r\n        x.set(index + 10, $);\r\n        RL_ASSERT(x.get($) == index + 10);\r\n        RL_ASSERT(false);\r\n    }\r\n};\r\n\r\n\r\nrl::thread_local_var<unsigned> tls_global_test_x;\r\nstruct tls_global_test : rl::test_suite<tls_global_test, 3, rl::test_result_user_assert_failed>\r\n{\r\n    void thread(unsigned index)\r\n    {\r\n        RL_ASSERT(tls_global_test_x.get($) == 0);\r\n        tls_global_test_x.set(index + 10, $);\r\n        RL_ASSERT(tls_global_test_x.get($) == index + 10);\r\n        RL_ASSERT(false);\r\n    }\r\n};\r\n\r\n\r\nstruct tls_win32_test : rl::test_suite<tls_win32_test, 3>\r\n{\r\n    unsigned long slot;\r\n\r\n    void before()\r\n    {\r\n        slot = TlsAlloc();\r\n    }\r\n\r\n    void after()\r\n    {\r\n        TlsFree(slot);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        RL_ASSERT(TlsGetValue(slot) == 0);\r\n        TlsSetValue(slot, (void*)(uintptr_t)(index + 10));\r\n        RL_ASSERT(TlsGetValue(slot) == (void*)(uintptr_t)(index + 10));\r\n    }\r\n};\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/todo.txt",
    "content": "Relacy Race Detector Todo List:\r\n\r\n- use indirection and indices for TLS, because on Windows TLS index is DWORD (not DWORD_PTR) (eliminate pointers?)\r\n+ provide rl::hash_ptr()\r\n- support for fair timed waits\r\n+ remove iteration count estimation from full sched -> causes division by 0\r\n- history: memory allocation before object ctor (new T (...))\r\n+ code in test::after() affects iteration count with full scheduler -> final and estimated iteration counts are the same\r\n\r\n- non-deterministic sub-expression calculation:\r\nfoo(bar.load(std::memory_order_acquire), baz.load(std::memory_order_acquire));\r\n\r\n- post issue:\r\ncan't simulate some modification orders in presence of data-races-type-2 for atomic vars:\r\n//thread 1\r\nx.store(1, std::memory_order_relaxed);\r\ny.store(1, std::memory_order_relaxed);\r\n//thread 2\r\nwhile (y.load(std::memory_order_relaxed) == 0\r\n{}\r\nx.store(2, std::memory_order_relaxed);\r\n-> modification order of 'x' will never be \"2, 1\"\r\n\r\n\r\n [CORE]\r\n- initially run threads one by one\r\n- initially run some iterations twice, in order to check that unit-test is deterministic\r\n? add unique identifiers to atomics, vars, mutexes etc (address can be useful too)\r\n- example catalog (description, used techniques, what error is found)\r\n- do I need sched() before atomic loads?\r\n- do I need sched() before mutex unlock?\r\n- for loads output in history value of which store is loaded\r\n- detect dead-code\r\n- output which operations cause data race\r\n? output happens-before matrix, synchronizes-with matrix etc\r\n- SEH handler to catch paging faults\r\n- sched before malloc/free to allow more ABA\r\n\r\n [PERF]\r\n- implement performance simulation\r\n - cacheline transfers\r\n - atomic rmw operations\r\n - fences\r\n\r\n[OTHER]\r\n- parallelize the run-time for random scheduler\r\n- parallelize the run-time for tree search scheduler\r\n- manual control over scheduler\r\n- persistent checkpointing of scheduler state (to allow \"continue\")\r\n- atomic blocks (pdr implementation -> pdr component)\r\n? state space reductions (sleep sets, dynamic persistent sets)\r\n? what can I do with serialization points -> user specifies \"visible\" results\r\n    system checks for linearizablity -> \"visible\" results equal to some sequential execution\r\n? save program state inside iteration (save point), continue other iterations from this save point \r\n? partial order reductions by memorizing happens-before graphs, not program state\r\n? estimate progress by seeing how many iterations it gets to move 0->1 on some stree level\r\n? lower bound, upper bound, mean of progress\r\n\r\nO(X) = (P^(C + 3)) * (N^(P + C + 1)) * (P + C)!\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/trash/original.hpp",
    "content": "Here is a recent version of the revised pc_sample.c which uses inline x86\r\nASM and compiles under VC++ (I am planning on coding the entire thing in\r\npure assembly language):\r\n____________________________________________________________________\r\n#if ! defined(PC_SAMPLE_INCLUDE_H)\r\n#   define PC_SAMPLE_INCLUDE_H\r\n#   pragma warning(push)\r\n#   pragma warning (disable : 4100 4505 4706)\r\n#   if defined(__cplusplus)\r\n      extern \"C\" {\r\n#   endif\r\n/*===========================================================*/\r\n\r\n/* Very Simple x86 Atomic Operations API & Implmentation\r\n_____________________________________________________________*/\r\ntypedef __int32 atomicword;\r\ntypedef atomicword volatile* const atomicword_pthis;\r\n\r\nstatic int\r\nx86_DWCASPTR(\r\n void volatile* const,\r\n void* const,\r\n void const* const\r\n);\r\n\r\nstatic atomicword\r\nx86_XADDWORD(\r\n atomicword_pthis,\r\n atomicword const\r\n);\r\n\r\nstatic atomicword\r\nx86_XCHGWORD(\r\n atomicword_pthis,\r\n atomicword const\r\n);\r\n\r\n__declspec(naked) int\r\nx86_DWCASPTR(\r\n void volatile* const _pthis,\r\n void* const pcmp,\r\n void const* const pxhcg\r\n) {\r\n  _asm {\r\n    PUSH ESI\r\n    PUSH EBX\r\n    MOV ESI, [ESP + 16]\r\n    MOV EAX, [ESI]\r\n    MOV EDX, [ESI + 4]\r\n    MOV ESI, [ESP + 20]\r\n    MOV EBX, [ESI]\r\n    MOV ECX, [ESI + 4]\r\n    MOV ESI, [ESP + 12]\r\n    LOCK CMPXCHG8B QWORD PTR [ESI]\r\n    JNE x86_DWCASPTR_failed\r\n    MOV EAX, 1\r\n    POP EBX\r\n    POP ESI\r\n    RET\r\n\r\nx86_DWCASPTR_failed:\r\n    MOV ESI, [ESP + 16]\r\n    MOV [ESI], EAX\r\n    MOV [ESI + 4], EDX\r\n    MOV EAX, 0\r\n    POP EBX\r\n    POP ESI\r\n    RET\r\n  }\r\n\r\n}\r\n\r\n__declspec(naked) atomicword\r\nx86_XADDWORD(\r\n atomicword_pthis _pthis,\r\n atomicword const value\r\n) {\r\n  _asm {\r\n    MOV EDX, [ESP + 4]\r\n    MOV EAX, [ESP + 8]\r\n    LOCK XADD [EDX], EAX\r\n    RET\r\n  }\r\n\r\n}\r\n\r\n__declspec(naked) atomicword\r\nx86_XCHGWORD(\r\n atomicword_pthis _pthis,\r\n atomicword const value\r\n) {\r\n  _asm {\r\n    MOV EDX, [ESP + 4]\r\n    MOV EAX, [ESP + 8]\r\n    XCHG [EDX], EAX\r\n    RET\r\n  }\r\n\r\n}\r\n\r\n#define x86_XCHGPTR(mp_pdest, mp_src) ( \\\r\n  (void*)x86_XCHGWORD( \\\r\n    ((atomicword_pthis)(mp_pdest)), \\\r\n    ((atomicword const)(mp_src)) \\\r\n  ) \\\r\n)\r\n\r\n#define XCHGWORD x86_XCHGWORD\r\n#define XCHGPTR x86_XCHGPTR\r\n#define XADDWORD x86_XADDWORD\r\n#define DWCASPTR x86_DWCASPTR\r\n\r\n/* Proxy-Collector API & Implmentation (Revisited)  ;^)\r\n   Inventor: Chris M. Thomasson\r\n_____________________________________________________________*/\r\n#include <stddef.h>\r\n#include <assert.h>\r\n#if ! defined(NDEBUG)\r\n# include <stdio.h>\r\n#endif\r\n\r\n#define CONTAINER_OF(mp_this, mp_type, mp_member) ( \\\r\n  (mp_type*)(((unsigned char*)(mp_this)) - \\\r\n  offsetof(mp_type, mp_member)) \\\r\n)\r\n\r\ntypedef struct pc_region_s pc_region, pc_node;\r\ntypedef struct pc_master_s pc_master;\r\ntypedef void (pc_fp_dtor) (pc_node*);\r\ntypedef struct pc_sys_anchor_s pc_sys_anchor;\r\n\r\nstruct pc_sys_anchor_s {\r\n  atomicword refcnt;\r\n  pc_region* region;\r\n\r\n};\r\n\r\nstruct pc_region_s {\r\n  pc_sys_anchor next;\r\n  pc_node* defer;\r\n\r\n};\r\n\r\nstruct pc_master_s {\r\n  pc_sys_anchor head;\r\n  pc_region region;\r\n  pc_fp_dtor* fp_dtor;\r\n};\r\n\r\n#define PC_MASTER_STATICINIT(mp_this, mp_fp_dtor) { \\\r\n  { 0, &(mp_this)->region }, \\\r\n  { { 0, NULL }, NULL }, (mp_fp_dtor) \\\r\n\r\n}\r\n\r\nstatic void\r\npc_sys_dtor(\r\n pc_master* const,\r\n pc_region* const\r\n);\r\n\r\nstatic void\r\npc_init(\r\n pc_master* const,\r\n pc_fp_dtor* const\r\n);\r\n\r\nstatic void\r\npc_node_init(\r\n pc_node* const\r\n);\r\n\r\nstatic void\r\npc_node_link(\r\n pc_node* const,\r\n pc_node* const\r\n);\r\n\r\nstatic pc_region*\r\npc_acquire(\r\n pc_master* const\r\n);\r\n\r\nstatic void\r\npc_release(\r\n pc_master* const,\r\n pc_region* const\r\n);\r\n\r\nstatic void\r\npc_defer(\r\n pc_region* const,\r\n pc_node* const\r\n);\r\n\r\nstatic void\r\npc_mutate(\r\n pc_master* const,\r\n pc_node* const\r\n);\r\n\r\nvoid\r\npc_init(\r\n pc_master* const _this,\r\n pc_fp_dtor* const fp_dtor\r\n) {\r\n  pc_master src = { { 0 } };\r\n  *_this = src;\r\n  _this->head.region = &_this->region;\r\n  _this->fp_dtor = fp_dtor;\r\n\r\n}\r\n\r\npc_region*\r\npc_acquire(\r\n pc_master* const _this\r\n) {\r\n  pc_sys_anchor cmp = _this->head, xchg;\r\n  do {\r\n    xchg.refcnt = cmp.refcnt + 2;\r\n    xchg.region = cmp.region;\r\n  } while (! DWCASPTR(&_this->head, &cmp, &xchg));\r\n  return cmp.region;\r\n\r\n}\r\n\r\nvoid\r\npc_release(\r\n pc_master* const _this,\r\n pc_region* const region\r\n) {\r\n  if (XADDWORD(&region->next.refcnt, -2) == 3) {\r\n    pc_sys_dtor(_this, region);\r\n  }\r\n\r\n}\r\n\r\nvoid\r\npc_node_init(\r\n pc_node* const _this\r\n) {\r\n  pc_node src = { { 0 } };\r\n  *_this = src;\r\n\r\n}\r\n\r\nvoid\r\npc_node_link(\r\n pc_node* const _this,\r\n pc_node* const next\r\n) {\r\n  _this->defer = next;\r\n\r\n}\r\n\r\nvoid\r\npc_defer(\r\n pc_region* const _this,\r\n pc_node* const node\r\n) {\r\n  node->defer = XCHGPTR(&_this->defer, node);\r\n\r\n}\r\n\r\nvoid\r\npc_mutate(\r\n pc_master* const _this,\r\n pc_node* const node\r\n) {\r\n  pc_sys_anchor cmp = _this->head, xchg = { 0 };\r\n  node->next.refcnt = 2;\r\n  node->next.region = NULL;\r\n  xchg.region = node;\r\n  while (! DWCASPTR(&_this->head, &cmp, &xchg));\r\n  cmp.region->next.region = node;\r\n  if (XADDWORD(&cmp.region->next.refcnt,\r\n               cmp.refcnt + 1) == -cmp.refcnt) {\r\n    pc_sys_dtor(_this, cmp.region);\r\n  }\r\n\r\n}\r\n\r\nvoid\r\npc_sys_dtor(\r\n pc_master* const _this,\r\n pc_region* const region\r\n) {\r\n  int dtors = 0, reset = 0;\r\n  pc_region* head = region;\r\n  pc_region* tail = region;\r\n  pc_region* next = region->next.region;\r\n\r\n  while (next) {\r\n    if (XADDWORD(&next->next.refcnt, -2) != 3) {\r\n      break;\r\n    }\r\n    tail = next;\r\n    next = next->next.region;\r\n  }\r\n\r\n  tail->next.region = NULL;\r\n  while (head) {\r\n    pc_region* const next = head->next.region;\r\n    pc_node* defer = head->defer;\r\n    assert(head->next.refcnt == 1);\r\n    if (head != &_this->region) {\r\n      head->defer = defer;\r\n      defer = head;\r\n    } else {\r\n      reset = 1;\r\n    }\r\n    while (defer) {\r\n      pc_node* const next = defer->defer;\r\n      _this->fp_dtor(defer);\r\n      ++dtors;\r\n      defer = next;\r\n    }\r\n    head = next;\r\n  }\r\n\r\n  if (reset) {\r\n    _this->region.defer = NULL;\r\n    pc_mutate(_this, &_this->region);\r\n  }\r\n\r\n#if ! defined(NDEBUG)\r\n  {\r\n    static atomicword g_pc_sys_epoch = 0;\r\n    atomicword const epoch = XADDWORD(&g_pc_sys_epoch, 1);\r\n    if (dtors) {\r\n      printf(\"pc_sys_dtor::epoch/dtors(%d/%d)\\n\",\r\n             epoch, dtors);\r\n    }\r\n  }\r\n#endif\r\n\r\n}\r\n\r\n/*===========================================================*/\r\n#   if defined(__cplusplus)\r\n      }\r\n#   endif\r\n#   pragma warning(pop)\r\n#endif\r\n____________________________________________________________________\r\n\r\n\r\n struct foo_node {\r\n  foo_node* next;\r\n  pc_node pcn;\r\n\r\n};\r\n\r\nstruct foo_list {\r\n  foo_node* head;\r\n  pc_master pc;\r\n\r\n};\r\n\r\nstatic foo_list g_list = {\r\n  NULL, PC_MASTER_STATICINIT()\r\n\r\n};\r\n\r\nvoid foo_node_dtor(pc_node* pcn) {\r\n  foo_node* const _this = container_of(pcn, foo_node, pcn);\r\n  free(_this);\r\n\r\n}\r\n\r\nvoid foo_reader() {\r\n  int i;\r\n  foo_node* node;\r\n  pc_region* pcr = pc_acquire(&g_list.pc);\r\n  for (i = 1 ;; ++i) {\r\n    node = LOAD_DEPENDS(&g_list.head);\r\n    while (node) {\r\n      foo_node* const next = LOAD_MBDEPEND(&node->next);\r\n      [...];\r\n      node = next;\r\n    }\r\n    if (! (i % 1000)) {\r\n      pc_release(&g_list.pc, pcr);\r\n      pcr = pc_acquire(&g_list.pc);\r\n    }\r\n  }\r\n  pc_release(&g_list.pc, pcr);\r\n\r\n}\r\n\r\nvoid foo_writer() {\r\n  int i;\r\n  foo_node* node, *cmp;\r\n  pc_region* pcr = pc_acquire(&g_list.pc);\r\n  for (i = 1 ;; ++i) {\r\n    if (i % 10) {\r\n      node = malloc(sizeof(*node));\r\n      if (node) {\r\n        foo_node* cmp;\r\n        pc_node_init(node, NULL, foo_node_dtor);\r\n        cmp = g_list.head;\r\n        do {\r\n          node->next = cmp;\r\n        } while (! CASIBM_MBREL(&g_list.head, &cmp, node));\r\n      }\r\n    } else {\r\n      node = g_list.head;\r\n      do {\r\n        if (! node) { break; }\r\n      } while (! CASIBM_MBACQ(&g_list.head, &node, node->next));\r\n      if (node) {\r\n        if (! (i % 20)) {\r\n          pc_mutate(&g_list.pc, &node->pcn);\r\n        } else {\r\n          pc_defer(pcr, &node->pcn);\r\n        }\r\n      }\r\n    }\r\n    if (! (i % 500)) {\r\n      pc_release(&g_list.pc, pcr);\r\n      pcr = pc_acquire(&g_list.pc);\r\n    }\r\n  }\r\n  pc_release(&g_list.pc, pcr);\r\n} \r\n\r\n\r\n\r\n\r\n\r\n1. Region 1 is current\r\n2. Thread 1 acquires region 1\r\n3. Thread 2 executes pc_mutate()\r\n4. Region 2 is current\r\n5. Thread 3 acquires region 2\r\n6. Thread 3 loads pointer to node 1\r\n7. Thread 1 removes node 1 from data structure\r\n8. Thread 1 executes pc_defer() and defers node 1 to region 1\r\n9. Thread 1 releases region 1\r\n10. Dtor executed for region 1, node 1 is deleted\r\n11. Thread 3 accesses node 1\r\n12. Bang! \r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/trash/rtl.hpp",
    "content": "#pragma once\r\n\r\n#include \"../../relacy/relacy_std.hpp\"\r\n\r\n\r\nintptr_t const lock_value\t\t        = (intptr_t)-1;\r\n\r\nstruct rdesc\r\n{\r\n    rl::var<std::atomic<intptr_t> const*> addr;\r\n    rl::var<intptr_t>           cmp;\r\n};\r\n\r\nstruct wdesc\r\n{\r\n    rl::var<std::atomic<intptr_t>*> addr;\r\n    rl::var<intptr_t>           cmp;\r\n    rl::var<intptr_t>           xchg;\r\n};\r\n\r\nstruct trx\r\n{\r\n\tstatic size_t const rset_max_size   = 64;\r\n\tstatic size_t const wset_max_size   = 32;\r\n\r\n\trl::var<size_t>             rset_idx;\r\n\trl::var<size_t>\t\t\t    wset_idx;\r\n    rdesc\t\t\trset\t            [rset_max_size];\r\n    wdesc\t\t\twset                [wset_max_size];\r\n\r\n    rl::var<rdesc*> read(std::atomic<intptr_t> const* addr, std::memory_order mo = std::memory_order_relaxed)\r\n    {\r\n        intptr_t value = (*addr)($).load(mo);\r\n\t\tif (lock_value == value)\r\n\t\t\treturn 0;\r\n        rdesc* desc = &rset[rset_idx($)];\r\n\t\t++rset_idx($);\r\n        desc->addr($) = addr;\r\n        desc->cmp($) = value;\r\n\t\treturn desc;\r\n    }\r\n\r\n    rl::var<wdesc*> write(std::atomic<intptr_t>* addr)\r\n    {\r\n        intptr_t value = (*addr)($).swap(lock_value, rl::memory_order_acq_rel);\r\n\t\tif (lock_value == value)\r\n\t\t\treturn 0;\r\n        wdesc* desc = &wset[wset_idx($)];\r\n\t\t++wset_idx($);\r\n\t\tdesc->addr($) = addr;\r\n\t\tdesc->cmp($) = value;\r\n\t\treturn desc;\r\n    }\r\n\r\n    bool begin()\r\n    {\r\n        std::atomic_signal_fence($)(std::memory_order_acquire);\r\n\t\trset_idx($) = 0;\r\n\t\twset_idx($) = 0;\r\n\t\treturn true;\r\n    }\r\n\r\n    bool commit()\r\n    {\r\n        std::atomic_signal_fence($)(std::memory_order_release);\r\n\r\n        size_t i;\r\n        for (i = 0; i != rset_idx($); ++i)\r\n        {\r\n            rdesc const* desc = &rset[i];\r\n            if ((*(desc->addr($)))($).load(std::memory_order_relaxed) != desc->cmp($))\r\n                break;\r\n        }\r\n        if (i != rset_idx($))\r\n        {\r\n            return rollback();\r\n        }\r\n\r\n        std::atomic_thread_fence($)(std::memory_order_release);\r\n\r\n        for (i = 0; i != wset_idx($); ++i)\r\n        {\r\n            wdesc const* desc = &wset[i];\r\n            (*(desc->addr($)))($).store(desc->xchg($), std::memory_order_relaxed);\r\n        }\r\n\r\n        //std::atomic_thread_fence(std::memory_order_acq_rel);\r\n\r\n        return true;\r\n    }\r\n\r\n    bool rollback()\r\n    {\r\n        for (size_t i = 0; i != wset_idx($); ++i)\r\n        {\r\n            wdesc const* desc = &wset[i];\r\n            (*(desc->addr($)))($).store(desc->cmp($), std::memory_order_relaxed);\r\n        }\r\n\t\twset_idx($) = 0;\r\n\t\trset_idx($) = 0;\r\n\t\treturn false;\r\n    }\r\n\r\n    /*\r\n\tbool readset_validate()\r\n    {\r\n        for (size_t i = 0; i != rset_idx; ++i)\r\n        {\r\n            rdesc const* desc = &rset[i];\r\n            if (*(intptr_t const volatile*)desc->addr != desc->cmp)\r\n                return true;\r\n        }\r\n        return false;\r\n    }\r\n\r\n    bool writeset_load(intptr_t* addr, intptr_t* value)\r\n    {\r\n        for (size_t i = 0; i != wset_idx; ++i)\r\n        {\r\n            wdesc const* desc = &wset[i];\r\n            if (desc->addr == addr)\r\n            {\r\n                *value = desc->xchg;\r\n                return true;\r\n            }\r\n        }\r\n        return false;\r\n    }\r\n    */\r\n};\r\n\r\n\r\n\r\n\r\ninline void pdr_lock()\r\n{\r\n}\r\n\r\ninline void pdr_unlock()\r\n{\r\n}\r\n\r\ninline void pdr_acquire(void*)\r\n{\r\n}\r\n\r\ninline void pdr_release(void*)\r\n{\r\n}\r\n\r\ninline void pdr_dispose(void*)\r\n{\r\n}\r\n\r\n\r\n\r\n\r\nstruct dlist_trx_node\r\n{\r\n    std::atomic<intptr_t> prev; // dlist_trx_node*\r\n    std::atomic<intptr_t> next; // dlist_trx_node*\r\n    rl::var<intptr_t> key;\r\n    rl::var<intptr_t> value;\r\n\r\n    dlist_trx_node(intptr_t key = 0, intptr_t value = 0)\r\n        : key(key)\r\n        , value(value)\r\n    {}\r\n};\r\n\r\n\r\n\r\n\r\n\r\nclass dlist_trx\r\n{\r\npublic:\r\n    dlist_trx()\r\n        : first(0, 0)\r\n        , last(0, 0)\r\n    {\r\n        first.prev($).store(0, std::memory_order_relaxed);\r\n        first.next($).store((intptr_t)&last, std::memory_order_relaxed);\r\n        last.prev($).store((intptr_t)&first, std::memory_order_relaxed);\r\n        last.next($).store(0, std::memory_order_relaxed);\r\n    }\r\n\r\n    __declspec(noinline) void remove(dlist_trx_node* node)\r\n    {\r\n        pdr_lock();\r\n\r\n        for (trx t; t.begin(); t.rollback())\r\n        {\r\n\t\t\trdesc* r1 = t.read(&node->prev)($);\r\n\t\t\tif (0 == r1)\r\n\t\t\t\tcontinue;\r\n\t\t\tdlist_trx_node* prev = (dlist_trx_node*)(intptr_t)r1->cmp($);\r\n\r\n\t\t\trdesc* r2 = t.read(&node->next)($);\r\n\t\t\tif (0 == r2)\r\n\t\t\t\tcontinue;\r\n\t\t\tdlist_trx_node* next = (dlist_trx_node*)(intptr_t)r2->cmp($);\r\n\r\n\t\t\twdesc* w1 = t.write(&prev->next)($);\r\n\t\t\tif (0 == w1)\r\n\t\t\t\tcontinue;\r\n\t\t\t//dlist_trx_node* prev_next = (dlist_trx_node*)w1->cmp;\r\n\r\n\t\t\twdesc* w2 = t.write(&next->prev)($);\r\n\t\t\tif (0 == w2)\r\n\t\t\t\tcontinue;\r\n\t\t\t//dlist_trx_node* next_prev = (dlist_trx_node*)w2->cmp;\r\n\r\n\t\t\tw1->xchg($) = (intptr_t)next;\r\n            w2->xchg($) = (intptr_t)prev;\r\n\r\n            if (t.commit())\r\n                break;\r\n        }\r\n\r\n        pdr_unlock();\r\n    }\r\n\r\n    __declspec(noinline) void insert(dlist_trx_node* node)\r\n    {\r\n        pdr_lock();\r\n\r\n\t\tfor (trx t; t.begin(); t.rollback())\r\n        {\r\n\t\t\twdesc* w1 = t.write(&first.next)($);\r\n            if (0 == w1)\r\n\t\t\t\tcontinue;\r\n\t\t\tdlist_trx_node* next = (dlist_trx_node*)(intptr_t)w1->cmp($);\r\n\r\n\t\t\twdesc* w2 = t.write(&next->prev)($);\r\n\t\t\tif (0 == w2)\r\n                continue;\r\n\t\t\tdlist_trx_node* const& prev = (dlist_trx_node*)(intptr_t)w2->cmp($);\r\n\r\n            if (prev != &first)\r\n                continue;\r\n\r\n            node->prev($).store((intptr_t)prev, std::memory_order_relaxed);\r\n            node->next($).store((intptr_t)next, std::memory_order_relaxed);\r\n\t\t\tw1->xchg($) = (intptr_t)node;\r\n\t\t\tw2->xchg($) = (intptr_t)node;\r\n\r\n            if (t.commit())\r\n                break;\r\n        }\r\n\r\n        pdr_unlock();\r\n    }\r\n\r\n    __declspec(noinline) void foreach(void (*f)(void*, dlist_trx_node*), void (*reset)(void*), void* ctx)\r\n    {\r\n        pdr_lock();\r\n\r\n        for (trx t; t.begin(); t.rollback())\r\n        {\r\n            reset(ctx);\r\n\r\n            rdesc* r1 = t.read(&first.next, std::memory_order_consume)($);\r\n            if (0 == r1)\r\n                continue;\r\n            dlist_trx_node* node = (dlist_trx_node*)(intptr_t)r1->cmp($);\r\n\r\n            while (node->next($).load(std::memory_order_consume))\r\n            {\r\n                rdesc* r = t.read(&node->next)($);\r\n\t\t\t\tif (0 == r)\r\n                    break;\r\n\t\t\t\tdlist_trx_node* next = (dlist_trx_node*)(intptr_t)r->cmp($);\r\n\r\n                f(ctx, node);\r\n                node = next;\r\n            }\r\n            if (node->next($).load(std::memory_order_relaxed))\r\n                continue;\r\n\r\n            if (t.commit())\r\n                break;\r\n        }\r\n\r\n        pdr_unlock();\r\n    }\r\n\r\n    dlist_trx_node first;\r\n    dlist_trx_node last;\r\n};\r\n\r\n\r\n\r\n\r\nstruct dlist_trx_test : rl::test_suite<dlist_trx_test, 4>\r\n{\r\n    dlist_trx list;\r\n\r\n    static int const count = 4;\r\n    dlist_trx_node nodes[2][count];\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index || 1 == index)\r\n        {\r\n            for (int i = 0; i != count; ++i)\r\n            {\r\n                dlist_trx_node* n = &nodes[index][i];\r\n                intptr_t value = 1 << ((index * count + i) * 4);\r\n                n->key($) = value;\r\n                n->value($) = value;\r\n                list.insert(n);\r\n            }\r\n            for (int i = 0; i != count; ++i)\r\n            {\r\n                dlist_trx_node* n = &nodes[index][i];\r\n                list.remove(n);\r\n            }\r\n        }\r\n        else if (2 == index || 3 == index)\r\n        {\r\n            struct local\r\n            {\r\n                static void reset(void* ctx)\r\n                {\r\n                    *(int*)ctx = 0;\r\n                }\r\n\r\n                static void apply(void* ctx, dlist_trx_node* n)\r\n                {\r\n                    *(int*)ctx += (int)n->value($);\r\n                }\r\n            };\r\n\r\n            int volatile sum = 0;\r\n            list.foreach(&local::apply, &local::reset, (void*)&sum);\r\n            int volatile x = sum;\r\n            (void)x;\r\n        }\r\n    }\r\n\r\n    void invariant()\r\n    {\r\n        int volatile sum = 0;\r\n        dlist_trx_node* n = (dlist_trx_node*)list.first.next($).load();\r\n        for (;;)\r\n        {\r\n            if (lock_value == (intptr_t)n)\r\n                break;\r\n            dlist_trx_node* next = (dlist_trx_node*)n->next($).load();\r\n            if (0 == next)\r\n                break;\r\n            sum += (int)n->value($);\r\n            n = next;\r\n        }\r\n    }\r\n};\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/tutorial.txt",
    "content": "1. Add\r\n#include <relacy/relacy_std.hpp>\r\n\r\n2. For atomic variables use type std::atomic<T>:\r\nstd::atomic<void*> head;\r\n\r\n3. For usual non-atomic variables use type rl::var<T>:\r\nrl::var<int> data;\r\nSuch vars will be checked for races and included into trace.\r\n\r\n4. All accesses to std::atomic<T> and rl::var<T> variables postfix with '($)':\r\nstd::atomic<void*> head;\r\nrl::var<int> data;\r\nhead($).store(0);\r\ndata($) = head($).load();\r\n\r\n5. Strictly thread-private variables use can leave as-is:\r\nfor (int i = 0; i != 10; ++i)\r\nSuch vars will be NOT checked for races NOR included into trace. But they will accelerate verification.\r\n\r\n6. Describe test-suite: number of threads, thread function, before/after/invariant functions. See example below.\r\n\r\n7. Place asserts:\r\nint x = g($).load();\r\nRL_ASSERT(x > 0);\r\n\r\n8. Start verification:\r\nrl::simulate<test_suite_t>();\r\n\r\nHere is complete example:\r\n\r\n#include <relacy/relacy_std.hpp>\r\n\r\n// template parameter '2' is number of threads\r\nstruct race_test : rl::test_suite<race_test, 2>\r\n{\r\n    std::atomic<int> a;\r\n    rl::var<int> x;\r\n\r\n    // executed in single thread before main thread function\r\n    void before()\r\n    {\r\n        a($) = 0;\r\n        x($) = 0;\r\n    }\r\n\r\n    // main thread function\r\n    void thread(unsigned thread_index)\r\n    {\r\n        if (0 == thread_index)\r\n        {\r\n            x($) = 1;\r\n            a($).store(1, rl::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            if (1 == a($).load(rl::memory_order_relaxed))\r\n                x($) = 2;\r\n        }\r\n    }\r\n\r\n    // executed in single thread after main thread function\r\n    void after()\r\n    {\r\n    }\r\n\r\n    // executed in single thread after every 'visible' action in main threads\r\n    // disallowed to modify any state\r\n    void invariant()\r\n    {\r\n    }\r\n};\r\n\r\nint main()\r\n{\r\n    rl::simulate<race_test>();\r\n}\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/wfmo.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/relacy_std.hpp\"\r\n\r\n\r\n\r\nstruct test_wfmo_all : rl::test_suite<test_wfmo_all, 2>\r\n{\r\n    HANDLE sema1;\r\n    HANDLE sema2;\r\n    rl::var<int> data;\r\n\r\n    void before()\r\n    {\r\n        sema1 = CreateSemaphore(0, 0, 2, 0);\r\n        sema2 = CreateSemaphore(0, 0, 2, 0);\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sema1);\r\n        CloseHandle(sema2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            HANDLE handles [2] = {sema1, sema2};\r\n            WaitForMultipleObjects(2, handles, 1, INFINITE);\r\n            RL_ASSERT(data($) == 2);\r\n        }\r\n        else\r\n        {\r\n            data($) = 1;\r\n            ReleaseSemaphore(sema1, 1, 0);\r\n            data($) = 2;\r\n            ReleaseSemaphore(sema2, 1, 0);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_single : rl::test_suite<test_wfmo_single, 2, rl::test_result_until_condition_hit>\r\n{\r\n    HANDLE sema1;\r\n    HANDLE sema2;\r\n    rl::atomic<int> data;\r\n\r\n    void before()\r\n    {\r\n        sema1 = CreateSemaphore(0, 0, 2, 0);\r\n        sema2 = CreateSemaphore(0, 0, 2, 0);\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sema1);\r\n        CloseHandle(sema2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            HANDLE handles [2] = {sema1, sema2};\r\n            WaitForMultipleObjects(2, handles, 0, INFINITE);\r\n            int d = data.load(rl::memory_order_relaxed);\r\n            RL_ASSERT(d == 1 || d == 2);\r\n            RL_UNTIL(d == 1);\r\n        }\r\n        else\r\n        {\r\n            data.store(1, rl::memory_order_relaxed);\r\n            ReleaseSemaphore(sema1, 1, 0);\r\n            data.store(2, rl::memory_order_relaxed);\r\n            ReleaseSemaphore(sema2, 1, 0);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_timeout : rl::test_suite<test_wfmo_timeout, 2, rl::test_result_until_condition_hit>\r\n{\r\n    HANDLE sema1;\r\n    HANDLE sema2;\r\n    rl::atomic<int> data;\r\n\r\n    void before()\r\n    {\r\n        sema1 = CreateSemaphore(0, 0, 2, 0);\r\n        sema2 = CreateSemaphore(0, 0, 2, 0);\r\n        data($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sema1);\r\n        CloseHandle(sema2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            HANDLE handles [2] = {sema1, sema2};\r\n            WaitForMultipleObjects(2, handles, 0, 100);\r\n            int d = data.load(rl::memory_order_relaxed);\r\n            RL_ASSERT(d == 0 || d == 1 || d == 2);\r\n            RL_UNTIL(d == 0);\r\n        }\r\n        else\r\n        {\r\n            data.store(1, rl::memory_order_relaxed);\r\n            ReleaseSemaphore(sema1, 1, 0);\r\n            data.store(2, rl::memory_order_relaxed);\r\n            ReleaseSemaphore(sema2, 1, 0);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_try : rl::test_suite<test_wfmo_try, 2>\r\n{\r\n    HANDLE sema1;\r\n    HANDLE sema2;\r\n    rl::atomic<int> d;\r\n    rl::atomic<int> d1;\r\n    rl::atomic<int> d2;\r\n\r\n    void before()\r\n    {\r\n        sema1 = CreateSemaphore(0, 1, 2, 0);\r\n        sema2 = CreateSemaphore(0, 1, 2, 0);\r\n        d1($) = 0;\r\n        d2($) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sema1);\r\n        CloseHandle(sema2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            d1.store(1, rl::memory_order_relaxed);\r\n            HANDLE handles [2] = {sema1, sema2};\r\n            if (WAIT_TIMEOUT == WaitForMultipleObjects(2, handles, 1, 0))\r\n                RL_ASSERT(1 == d2.load(rl::memory_order_relaxed));\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            d2.store(1, rl::memory_order_relaxed);\r\n            HANDLE handles [2] = {sema2, sema1};\r\n            if (WAIT_TIMEOUT == WaitForMultipleObjects(2, handles, 1, 0))\r\n                RL_ASSERT(1 == d1.load(rl::memory_order_relaxed));\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_mixed : rl::test_suite<test_wfmo_mixed, 3>\r\n{\r\n\tHANDLE sem [2];\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tsem[0] = CreateSemaphore(0, 0, 2, 0);\r\n\t\tsem[1] = CreateSemaphore(0, 0, 2, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(sem[0]);\r\n\t\tCloseHandle(sem[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tReleaseSemaphore(sem[0], 1, 0);\r\n\t\t\tReleaseSemaphore(sem[0], 1, 0);\r\n\t\t\tReleaseSemaphore(sem[1], 1, 0);\r\n\t\t}\r\n\t\telse if (1 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForMultipleObjects(2, sem, 1, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t}\r\n\t\telse if (2 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(sem[0], INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_mixed2 : rl::test_suite<test_wfmo_mixed2, 4>\r\n{\r\n\tHANDLE sem [2];\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tsem[0] = CreateSemaphore(0, 0, 2, 0);\r\n\t\tsem[1] = CreateSemaphore(0, 0, 2, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(sem[0]);\r\n\t\tCloseHandle(sem[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tReleaseSemaphore(sem[1], 1, 0);\r\n\t\t\tReleaseSemaphore(sem[0], 1, 0);\r\n\t\t\tReleaseSemaphore(sem[0], 1, 0);\r\n\t\t}\r\n\t\telse if (1 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(sem[0], INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t}\r\n\t\telse if (2 == index || 3 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForMultipleObjects(2, sem, 1, 42);\r\n\t\t\tassert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_event_all : rl::test_suite<test_wfmo_event_all, 2>\r\n{\r\n\tHANDLE ev [2];\r\n\trl::atomic<int> state;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tev[0] = CreateEvent(0, 0, 0, 0);\r\n\t\tev[1] = CreateEvent(0, 1, 0, 0);\r\n\t\tstate.store(0, rl::memory_order_relaxed);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev[0]);\r\n\t\tCloseHandle(ev[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForMultipleObjects(2, ev, 1, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0 + 0 || rv == WAIT_OBJECT_0 + 1);\r\n\t\t\tassert(state.load(rl::memory_order_relaxed) == 1);\r\n\t\t}\r\n\t\telse if (1 == index)\r\n\t\t{\r\n\t\t\tSetEvent(ev[0]);\r\n\t\t\tstate.store(1, rl::memory_order_relaxed);\r\n\t\t\tSetEvent(ev[1]);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_event_any : rl::test_suite<test_wfmo_event_any, 2>\r\n{\r\n\tHANDLE ev [2];\r\n\trl::atomic<int> state;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tev[0] = CreateEvent(0, 0, 0, 0);\r\n\t\tev[1] = CreateEvent(0, 1, 0, 0);\r\n\t\tstate.store(0, rl::memory_order_relaxed);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev[0]);\r\n\t\tCloseHandle(ev[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForMultipleObjects(2, ev, 0, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0 + 0 || rv == WAIT_OBJECT_0 + 1);\r\n\t\t\tassert(state.load(rl::memory_order_relaxed) == 1);\r\n\t\t}\r\n\t\telse if (1 == index)\r\n\t\t{\r\n\t\t\tstate.store(1, rl::memory_order_relaxed);\r\n\t\t\tSetEvent(ev[0]);\r\n\t\t\tSetEvent(ev[1]);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_wfmo_atomic : rl::test_suite<test_wfmo_atomic, 2, rl::test_result_until_condition_hit>\r\n{\r\n\tHANDLE ev [2];\r\n\trl::atomic<int> state;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tev[0] = CreateEvent(0, 0, 0, 0);\r\n\t\tev[1] = CreateEvent(0, 0, 0, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev[0]);\r\n\t\tCloseHandle(ev[1]);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tstate.store(1, rl::memory_order_relaxed);\r\n\t\t\tWaitForMultipleObjects(2, ev, 0, 1);\r\n\t\t}\r\n\t\telse if (1 == index)\r\n\t\t{\r\n\t\t\tSetEvent(ev[0]);\r\n\t\t\tSetEvent(ev[1]);\r\n\t\t\tunsigned rv = WaitForSingleObject(ev[0], 0);\r\n\t\t\tif (rv == WAIT_TIMEOUT) {\r\n\t\t\t\tassert(state.load(rl::memory_order_relaxed) == 1);\r\n\t\t\t\tRL_UNTIL(true);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy/test/windows.hpp",
    "content": "#pragma once\r\n\r\n#include \"../relacy/windows.h\"\r\n\r\n\r\nstruct test_win_thread : rl::test_suite<test_win_thread, 1>\r\n{\r\n    static size_t const dynamic_thread_count = 2;\r\n\r\n    VAR_T(int) data;\r\n\r\n    static unsigned long RL_STDCALL win_func(void* param)\r\n    {\r\n        static_cast<test_win_thread*>(param)->VAR(data) += 1;\r\n        return 0;\r\n    }\r\n\r\n    static unsigned RL_STDCALL msvc_func(void* param)\r\n    {\r\n        static_cast<test_win_thread*>(param)->VAR(data) += 1;\r\n        return 0;\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        VAR(data) = 0;\r\n\r\n        HANDLE th1 = CreateThread(0, 0, &test_win_thread::win_func, this, 0, 0);\r\n        WaitForSingleObject(th1, INFINITE);\r\n        RL_ASSERT(VAR(data) == 1);\r\n\r\n        HANDLE th2 = (HANDLE)_beginthreadex(0, 0, &test_win_thread::msvc_func, this, 0, 0);\r\n        WaitForSingleObject(th2, INFINITE);\r\n        RL_ASSERT(VAR(data) == 2);\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_win_mutex : rl::test_suite<test_win_mutex, 2>\r\n{\r\n    HANDLE mtx;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        mtx = CreateMutex(0, 0, 0);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(mtx);\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        WaitForSingleObject(mtx, INFINITE);\r\n        WaitForSingleObject(mtx, INFINITE);\r\n        VAR(data) += 1;\r\n        ReleaseMutex(mtx);\r\n        ReleaseMutex(mtx);\r\n\r\n        if (WAIT_OBJECT_0 == WaitForSingleObject(mtx, 0))\r\n        {\r\n            VAR(data) += 1;\r\n            ReleaseMutex(mtx);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_win_cs : rl::test_suite<test_win_cs, 2>\r\n{\r\n    CRITICAL_SECTION mtx;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        InitializeCriticalSection(&mtx);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        DeleteCriticalSection(&mtx);\r\n    }\r\n\r\n    void thread(unsigned)\r\n    {\r\n        EnterCriticalSection(&mtx);\r\n        VAR(data) += 1;\r\n        LeaveCriticalSection(&mtx);\r\n\r\n        if (TryEnterCriticalSection(&mtx))\r\n        {\r\n            VAR(data) += 1;\r\n            LeaveCriticalSection(&mtx);\r\n        }\r\n    }\r\n};\r\n\r\n\r\nstruct test_win_condvar : rl::test_suite<test_win_condvar, 3>\r\n{\r\n    CONDITION_VARIABLE cv;\r\n    CRITICAL_SECTION mtx;\r\n    VAR_T(int) stage;\r\n\r\n    void before()\r\n    {\r\n        InitializeConditionVariable(&cv);\r\n        InitializeCriticalSection(&mtx);\r\n        VAR(stage) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        DeleteCriticalSection(&mtx);\r\n        DeleteConditionVariable(&cv);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            EnterCriticalSection(&mtx);\r\n            VAR(stage) += 1;\r\n            WakeAllConditionVariable(&cv);\r\n            while (VAR(stage) != 2)\r\n                SleepConditionVariableCS(&cv, &mtx, INFINITE);\r\n            LeaveCriticalSection(&mtx);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            EnterCriticalSection(&mtx);\r\n            while (VAR(stage) != 1)\r\n                SleepConditionVariableCS(&cv, &mtx, 1);\r\n            VAR(stage) += 1;\r\n            WakeAllConditionVariable(&cv);\r\n            LeaveCriticalSection(&mtx);\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            EnterCriticalSection(&mtx);\r\n            while (VAR(stage) != 2)\r\n                SleepConditionVariableCS(&cv, &mtx, INFINITE);\r\n            LeaveCriticalSection(&mtx);\r\n            WakeConditionVariable(&cv);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_win_condvar_srw : rl::test_suite<test_win_condvar_srw, 2>\r\n{\r\n    CONDITION_VARIABLE cv;\r\n    SRWLOCK mtx;\r\n    VAR_T(int) stage;\r\n\r\n    void before()\r\n    {\r\n        InitializeConditionVariable(&cv);\r\n        InitializeSRWLock(&mtx);\r\n        VAR(stage) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        DeleteSRWLock(&mtx);\r\n        DeleteConditionVariable(&cv);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (0 == index)\r\n        {\r\n            AcquireSRWLockExclusive(&mtx);\r\n            VAR(stage) += 1;\r\n            WakeAllConditionVariable(&cv);\r\n            while (VAR(stage) != 2)\r\n                SleepConditionVariableSRW(&cv, &mtx, INFINITE, 0);\r\n            ReleaseSRWLockExclusive(&mtx);\r\n        }\r\n        else if (1 == index)\r\n        {\r\n            AcquireSRWLockExclusive(&mtx);\r\n            while (VAR(stage) != 1)\r\n                SleepConditionVariableSRW(&cv, &mtx, 1, 0);\r\n            VAR(stage) += 1;\r\n            WakeAllConditionVariable(&cv);\r\n            ReleaseSRWLockExclusive(&mtx);\r\n        }\r\n        else if (2 == index)\r\n        {\r\n            AcquireSRWLockExclusive(&mtx);\r\n            while (VAR(stage) != 2)\r\n                SleepConditionVariableSRW(&cv, &mtx, INFINITE, 0);\r\n            ReleaseSRWLockExclusive(&mtx);\r\n            WakeConditionVariable(&cv);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\nstruct test_win_sem : rl::test_suite<test_win_sem, 2>\r\n{\r\n    HANDLE sem1, sem2;\r\n    VAR_T(int) data;\r\n\r\n    void before()\r\n    {\r\n        sem1 = CreateSemaphore(0, 0, 1, 0);\r\n        sem2 = CreateSemaphore(0, 0, 1, 0);\r\n        VAR(data) = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        CloseHandle(sem1);\r\n        CloseHandle(sem2);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            VAR(data) = 1;\r\n            long count = -1;\r\n            ReleaseSemaphore(sem1, 1, &count);\r\n            assert(count == 0);\r\n            for (;;)\r\n            {\r\n                unsigned long rv = WaitForSingleObject(sem2, 0);\r\n                if (rv == WAIT_OBJECT_0)\r\n                    break;\r\n                RL_ASSERT(rv == WAIT_TIMEOUT);\r\n                Sleep(0);\r\n            }\r\n            RL_ASSERT(VAR(data) == 2);\r\n            VAR(data) = 3;\r\n            ReleaseSemaphore(sem2, 1, &count);\r\n            RL_ASSERT(count == 0);\r\n            ReleaseSemaphore(sem2, 1, &count);\r\n            RL_ASSERT(count == 1);\r\n        }\r\n        else\r\n        {\r\n            unsigned long rv = WaitForSingleObject(sem1, INFINITE);\r\n            assert(rv == WAIT_OBJECT_0);\r\n            RL_ASSERT(VAR(data) == 1);\r\n            VAR(data) = 2;\r\n            ReleaseSemaphore(sem2, 1, 0);\r\n        }\r\n    }\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_win_event : rl::test_suite<test_win_event, 2>\r\n{\r\n\tHANDLE ev;\r\n\tVAR_T(int) data;\r\n\t\r\n\tvoid before()\r\n\t{\r\n\t\tVAR(data) = 0;\r\n\t\tev = CreateEvent(0, 0, 0, 0);\r\n\t}\r\n\t\r\n\tvoid after()\r\n\t{\r\n\t\tCloseHandle(ev);\r\n\t}\r\n\t\r\n\tvoid thread(unsigned index)\r\n\t{\r\n\t\tif (0 == index)\r\n\t\t{\r\n\t\t\tVAR(data) = 1;\r\n\t\t\tSetEvent(ev);\r\n\t\t\tPulseEvent(ev);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tunsigned rv = WaitForSingleObject(ev, INFINITE);\r\n\t\t\tassert(rv == WAIT_OBJECT_0);\r\n\t\t\tassert(VAR(data) == 1);\r\n\t\t\trv = WaitForSingleObject(ev, 0);\r\n\t\t\tassert(rv == WAIT_TIMEOUT);\r\n\t\t\tResetEvent(ev);\r\n\t\t}\r\n\t}\r\n};\r\n\r\n\r\n\r\n\r\nstruct test_FlushProcessWriteBuffers : rl::test_suite<test_FlushProcessWriteBuffers, 2>\r\n{\r\n    std::atomic<int> x1;\r\n    std::atomic<int> x2;\r\n    int r1;\r\n    int r2;\r\n\r\n    void before()\r\n    {\r\n        x1.store(0, std::memory_order_relaxed);\r\n        x2.store(0, std::memory_order_relaxed);\r\n        r1 = r2 = 0;\r\n    }\r\n\r\n    void after()\r\n    {\r\n        assert(r1 == 1 || r2 == 1);\r\n    }\r\n\r\n    void thread(unsigned index)\r\n    {\r\n        if (index)\r\n        {\r\n            x1.store(1, std::memory_order_relaxed);\r\n            r1 = x2.load(std::memory_order_relaxed);\r\n        }\r\n        else\r\n        {\r\n            x2.store(1, std::memory_order_relaxed);\r\n            FlushProcessWriteBuffers();\r\n            r2 = x1.load(std::memory_order_relaxed);\r\n        }\r\n    }\r\n};\r\n\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/relacy_shims.h",
    "content": "#pragma once\n\n// Use relacy assertions\n#undef assert\n#ifdef NDEBUG\n#define assert(x)\n#else\n#define assert(x) RL_ASSERT(x)\n#endif\n\n\nstruct RelacyThreadExitListener\n{\n\ttypedef void (*callback_t)(void*);\n\tcallback_t callback;\n\tvoid* userData;\n\t\n\tRelacyThreadExitListener* next;\n};\n\nclass RelacyThreadExitNotifier\n{\npublic:\n\tstatic void subscribe(RelacyThreadExitListener* listener)\n\t{\n\t\tauto& tlsInst = instance();\n\t\tlistener->next = tlsInst.tail;\n\t\ttlsInst.tail = listener;\n\t}\n\t\n\tstatic void unsubscribe(RelacyThreadExitListener* listener)\n\t{\n\t\tauto& tlsInst = instance();\n\t\tRelacyThreadExitListener** prev = &tlsInst.tail;\n\t\tfor (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\tif (ptr == listener) {\n\t\t\t\t*prev = ptr->next;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tprev = &ptr->next;\n\t\t}\n\t}\n\t\n\tstatic void notify_relacy_thread_start()\n\t{\n\t\tinstance().tail = nullptr;\n\t}\n\t\n\tstatic void notify_relacy_thread_exit()\n\t{\n\t\tfor (auto ptr = instance().tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\tptr->callback(ptr->userData);\n\t\t}\n\t}\n\t\n\t\nprivate:\n\tRelacyThreadExitNotifier() : tail(nullptr) { }\n\t\n\tstatic RelacyThreadExitNotifier& instance()\n\t{\n\t\tstatic RelacyThreadExitNotifier instances[1024];\n\t\t\n\t\tauto tid = rl::thread_index();\n\t\tassert(tid < 1024);\n\t\treturn instances[tid];\n\t}\n\t\nprivate:\n\tRelacyThreadExitListener* tail;\n};\n\nnamespace std\n{\n\t// Relacy doesn't wrap std::atomic_flag\n\tstruct atomic_flag {\n\tprivate:\n\t\tatomic_flag(atomic_flag const&);\n\t\tatomic_flag(atomic_flag&&);\n\t\tatomic_flag& operator=(atomic_flag const&);\n\t\tatomic_flag& operator=(atomic_flag&&);\n\t\n\tpublic:\n\t\tatomic_flag() { }\n\t\tatomic_flag(bool initialValue) : val(initialValue ? 1 : 0) { }\n\t\t\n\t\tvoid clear()\n\t\t{\n\t\t\tclear(std::memory_order_seq_cst);\n\t\t}\n\t\t\n\t\tvoid clear(rl::memory_order order, rl::debug_info_param d)\n\t\t{\n\t\t\tval.store(0, order, d);\n\t\t}\n\t\t\n\t\tbool test_and_set()\n\t\t{\n\t\t\ttest_and_set(std::memory_order_seq_cst);\n\t\t}\n\t\t\n\t\tbool test_and_set(rl::memory_order order, rl::debug_info_param d)\n\t\t{\n\t\t\treturn val.fetch_or(1, order, d) != 0;\n\t\t}\n\t\t\n\tprivate:\n\t\tstd::atomic<int> val;\n\t};\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/relacy/spmchash.cpp",
    "content": "// ©2014 Cameron Desrochers\n\n#include \"relacy/relacy/relacy_std.hpp\"\n\nnamespace details\n{\n    template<typename U>\n    static inline char* align_for(char* ptr)\n    {\n        const std::size_t alignment = std::alignment_of<U>::value;\n        return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;\n    }\n}\n\ntemplate<typename TValue>\nstruct SPMCSequentialHashMap\n{\n    explicit SPMCSequentialHashMap(std::size_t initialSize)\n        : nextCapacity(initialSize), index(nullptr)\n    {\n        new_index();\n    }\n    \n    ~SPMCSequentialHashMap()\n    {\n        auto ptr = index.load(std::memory_order_relaxed);\n        if (ptr != nullptr) {\n            for (std::size_t i = 0; i != ptr->capacity; ++i) {\n                ptr->index[i]->~IndexEntry();\n            }\n            do {\n                auto prev = ptr->prev;\n                ptr->~IndexHeader();\n                free(ptr);\n                ptr = prev;\n            } while (ptr != nullptr);\n        }\n    }\n    \n    // Not thread safe. Only call from single producer thread.\n    // Note: key must *not* be in hash already, and must be exactly\n    // one larger than the previously inserted key value.\n    void insert(std::uint64_t key, TValue* value)\n    {\n        IndexEntry* idxEntry;\n        insert_index_entry(idxEntry, key);\n        idxEntry->value.store(value, std::memory_order_release);\n    }\n    \n    // Thread-safe, but if somebody can remove the key while find() is\n    // in progress, then any returned value is not guaranteed to correspond\n    // to that key. This also applies if the key was not already present but\n    // once was. Elements can be found in any order.\n    TValue* find(std::uint64_t key)\n    {\n        auto idxEntry = get_entry_for_key(key);\n        if (idxEntry == nullptr)\n            return nullptr;\n        return idxEntry->value.load(std::memory_order_acquire);\n    }\n    \n    // Thread-safe, but if somebody else can remove the same key while remove()\n    // is in progress, then any removed value is not guaranteed to correspond\n    // to that key This also applies if the key was not already present but\n    // once was. Elements can be removed in an order.\n    TValue* remove(std::uint64_t key)\n    {\n        auto idxEntry = get_entry_for_key(key);\n        if (idxEntry == nullptr)\n            return nullptr;\n        TValue* val = nullptr;\n        while (!idxEntry->value.compare_exchange_weak(val, nullptr, std::memory_order_acquire, std::memory_order_relaxed))\n            continue;\n        return val;\n    }\n    \nprivate:\n    struct IndexEntry\n    {\n        std::atomic<std::uint64_t> key;\n        std::atomic<TValue*> value;\n    };\n    \n    struct IndexHeader\n    {\n        std::size_t capacity;\n        std::atomic<std::size_t> tail;\n        IndexEntry* entries;\n        IndexEntry** index;\n        IndexHeader* prev;\n    };\n    \n    inline void insert_index_entry(IndexEntry*& idxEntry, std::uint64_t key)\n    {\n        auto localIndex = index.load(std::memory_order_relaxed);        // We're the only writer thread, relaxed is OK\n        auto newTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);\n        idxEntry = localIndex->index[newTail];\n        if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY ||\n            idxEntry->value.load(std::memory_order_relaxed) == nullptr) {\n            \n            idxEntry->key.store(key, std::memory_order_relaxed);\n            localIndex->tail.store(newTail, std::memory_order_release);\n            return;\n        }\n        \n        // No room in the old index, try to allocate another one!\n        new_index();\n        localIndex = index.load(std::memory_order_relaxed);\n        newTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);\n        idxEntry = localIndex->index[newTail];\n        assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY);\n        idxEntry->key.store(key, std::memory_order_relaxed);\n        localIndex->tail.store(newTail, std::memory_order_release);\n    }\n    \n    inline IndexEntry* get_entry_for_key(std::uint64_t key) const\n    {\n        auto localIndex = index.load(std::memory_order_acquire);\n        auto tail = localIndex->tail.load(std::memory_order_acquire);\n        auto tailBase = localIndex->index[tail]->key.load(std::memory_order_relaxed);\n        if (tailBase == INVALID_KEY) {\n            return nullptr;\n        }\n        auto offset = static_cast<std::size_t>(key - tailBase);\n        std::size_t idx = (tail + offset) & (localIndex->capacity - 1);\n        auto entry = localIndex->index[idx];\n        return entry->key.load(std::memory_order_relaxed) == key ? entry : nullptr;\n    }\n    \n    bool new_index()\n    {\n        auto prev = index.load(std::memory_order_relaxed);\n        std::size_t prevCapacity = prev == nullptr ? 0 : prev->capacity;\n        auto entryCount = prev == nullptr ? nextCapacity : prevCapacity;\n        auto raw = static_cast<char*>(malloc(\n            sizeof(IndexHeader) +\n            std::alignment_of<IndexEntry>::value - 1 + sizeof(IndexEntry) * entryCount +\n            std::alignment_of<IndexEntry*>::value - 1 + sizeof(IndexEntry*) * nextCapacity));\n        if (raw == nullptr) {\n            return false;\n        }\n        \n        auto header = new (raw) IndexHeader;\n        auto entries = reinterpret_cast<IndexEntry*>(details::align_for<IndexEntry>(raw + sizeof(IndexHeader)));\n        auto idx = reinterpret_cast<IndexEntry**>(details::align_for<IndexEntry*>(reinterpret_cast<char*>(entries) + sizeof(IndexEntry) * entryCount));\n        if (prev != nullptr) {\n            auto prevTail = prev->tail.load(std::memory_order_relaxed);\n            auto prevPos = prevTail;\n            std::size_t i = 0;\n            do {\n                prevPos = (prevPos + 1) & (prev->capacity - 1);\n                idx[i++] = prev->index[prevPos];\n            } while (prevPos != prevTail);\n            assert(i == prevCapacity);\n        }\n        for (std::size_t i = 0; i != entryCount; ++i) {\n            new (entries + i) IndexEntry;\n            entries[i].key.store(INVALID_KEY, std::memory_order_relaxed);\n            entries[i].value.store(nullptr, std::memory_order_relaxed);\n            idx[prevCapacity + i] = entries + i;\n        }\n        header->prev = prev;\n        header->entries = entries;\n        header->index = idx;\n        header->capacity = nextCapacity;\n        header->tail.store((prevCapacity - 1) & (nextCapacity - 1), std::memory_order_relaxed);\n        \n        index.store(header, std::memory_order_release);\n        \n        nextCapacity <<= 1;\n        \n        return true;\n    }\n        \n    private:\n        std::size_t nextCapacity;\n        std::atomic<IndexHeader*> index;\n        \n        static const std::uint64_t INVALID_KEY = ~(std::uint64_t)0;\n};\n\n\n\ntemplate<int ThreadCount, int NUM_VALUES>\nstruct test : rl::test_suite<test<ThreadCount, NUM_VALUES>, ThreadCount>\n{\n\tSPMCSequentialHashMap<int>* hash;\n    int values[NUM_VALUES];\n    std::atomic<int> useCounts[NUM_VALUES];\n\tstd::atomic<bool> removed[NUM_VALUES];\n\t\n\tvoid before()\n\t{\n        hash = new SPMCSequentialHashMap<int>(2);\n        for (int i = 0; i != NUM_VALUES; ++i) {\n            values[i] = i;\n            useCounts[i].store(0, std::memory_order_relaxed);\n            removed[i].store(false, std::memory_order_relaxed);\n        }\n\t}\n\t\n\tvoid thread(unsigned int tid)\n\t{\n        if (tid == 0) {\n            // Producer\n            for (int i = 0; i != NUM_VALUES; ++i) {\n                hash->insert(i, &values[i]);\n                useCounts[i].store(ThreadCount / 2, std::memory_order_release);\n            }\n        }\n\t\telse {\n            // Consumer\n            for (int i = 0; i != NUM_VALUES; ++i) {\n                auto useCount = useCounts[i].fetch_add(-1, std::memory_order_acquire);\n                auto val = hash->find(i);\n                bool isRemoved = removed[i].load(std::memory_order_relaxed);\n                auto current = useCounts[i].fetch_add(0, std::memory_order_release);\n                if (useCount > 0 && (current > 0 || current == 0 && useCount == 1)) {\n                    RL_ASSERT(val != nullptr && *val == i && !isRemoved);\n                }\n                if (useCount == 1) {\n                    val = hash->remove(i);\n                    RL_ASSERT(val != nullptr && *val == i && !removed[i].load(std::memory_order_relaxed));\n                    removed[i].store(true, std::memory_order_release);\n                }\n            }\n        }\n\t}\n\t\n\tvoid after()\n\t{\n        delete hash;\n\t}\n\t\n\tvoid invariant()\n\t{\n\t}\n};\n\nint main()\n{\n\trl::test_params params;\n\t//params.search_type = rl::sched_full;\n\t//params.iteration_count = 100000000;\n\tparams.search_type = rl::sched_random;\n\tparams.iteration_count = 1000000;\n    rl::simulate<test<2, 4>>(params);\n    rl::simulate<test<3, 4>>(params);\n    rl::simulate<test<4, 8>>(params);\n\t\n\treturn 0;\n}\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/unittests/mallocmacro.cpp",
    "content": "#define malloc(x) malloc(x)\r\n#define free(x) free(x)\r\n\r\n#include \"../../blockingconcurrentqueue.h\"\r\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/unittests/minitest.h",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this header).\n\n// Provides an extremely basic unit testing framework.\n\n#pragma once\n\n#include <cstdio>\n#include <string>\n#include <map>\n#include <vector>\n#include <type_traits>\n#include <typeinfo>\n\n#ifdef __GNUG__\n#include <cxxabi.h>\n#include <cstdlib>\n#endif\n\n\n\n#define REGISTER_TEST(testName) registerTest(#testName, &subclass_t::testName)\n\n#define ASSERT_OR_FAIL(expr) { if (!(expr)) { notifyTestFailed(__LINE__, #expr); return false; } }\n#define SUCCEED() { return true; }\n\n\n\n// Uses CRTP\ntemplate<typename TSubclass>\nclass TestClass\n{\npublic:\n\tstatic void notifyTestFailed(int line, const char* expr)\n\t{\n\t\tstd::printf(\"    FAILED!\\n    ******* Assertion failed (line %d): %s\\n\\n\", line, expr);\n\t}\n\t\n\tbool validateTestName(std::string const& which) const\n\t{\n\t\treturn testMap.find(which) != testMap.end();\n\t}\n\t\n\tvoid getAllTestNames(std::vector<std::string>& names) const\n\t{\n\t\tfor (auto it = testMap.cbegin(); it != testMap.cend(); ++it) {\n\t\t\tnames.push_back(it->first);\n\t\t}\n\t}\n\t\n\tbool run(unsigned int iterations = 1)\n\t{\n\t\tbool success = true;\n\t\tfor (auto it = testVec.cbegin(); it != testVec.cend(); ++it) {\n\t\t\tif (!execTest(*it, iterations)) {\n\t\t\t\tsuccess = false;\n\t\t\t}\n\t\t}\n\t\treturn success;\n\t}\n\t\n\tbool run(std::vector<std::string> const& which, unsigned int iterations = 1)\n\t{\n\t\tbool success = true;\n\t\tfor (auto it = which.begin(); it != which.end(); ++it) {\n\t\t\tif (!execTest(*testMap.find(*it), iterations)) {\n\t\t\t\tsuccess = false;\n\t\t\t}\n\t\t}\n\t\treturn success;\n\t}\n\t\nprotected:\n\ttypedef TSubclass subclass_t;\n\n\tvoid registerTest(const char* name, bool (subclass_t::* method)())\n\t{\n\t\ttestVec.push_back(std::make_pair(std::string(name), method));\n\t\ttestMap[std::string(name)] = method;\n\t}\n\t\n\tvirtual bool preTest() { return true; }\n\tvirtual bool postTest(bool) { return true; }\n\t\n\tbool execTest(std::pair<std::string, bool (subclass_t::*)()> const& testRef, unsigned int iterations)\n\t{\n\t\tstd::printf(\"%s::%s... \\n\", demangle_type_name(typeid(subclass_t).name()).c_str(), testRef.first.c_str());\n\t\t\n\t\tbool result = true;\n\t\tfor (unsigned int i = 0; result && i != iterations; ++i) {\n\t\t\tresult = preTest();\n\t\t\ttry {\n\t\t\t\tresult = result && (static_cast<subclass_t*>(this)->*testRef.second)();\n\t\t\t}\n\t\t\tcatch (...) {\n\t\t\t\tstd::printf(\"    FAILED!\\n    ******* Unhandled exception thrown\\n\\n\");\n\t\t\t\tresult = false;\n\t\t\t}\n\t\t\tresult = postTest(result) && result;\n\t\t}\n\t\t\n\t\tif (result) {\n\t\t\tstd::printf(\"    passed\\n\\n\");\n\t\t}\n\t\treturn result;\n\t}\n\t\nprivate:\n\tstatic std::string demangle_type_name(const char* name)\n\t{\n#ifdef __GNUG__\n\t\t// Adapted from http://stackoverflow.com/a/4541470/21475\n\t\tint status = -4;\n\t\tchar* res = abi::__cxa_demangle(name, nullptr, nullptr, &status);\n\n\t\tconst char* const demangled_name = (status == 0) ? res : name;\n\t\tstd::string ret(demangled_name);\n\n\t\tstd::free(res);\n\t\treturn ret;\n#else\n\t\treturn name;\n#endif\n\t}\n\t\nprotected:\n\tstd::vector<std::pair<std::string, bool (TSubclass::*)()> > testVec;\n\tstd::map<std::string, bool (TSubclass::*)()> testMap;\n};\n"
  },
  {
    "path": "src/third_party/concurrentqueue/tests/unittests/unittests.cpp",
    "content": "// ©2013-2014 Cameron Desrochers.\n// Distributed under the simplified BSD license (see the LICENSE file that\n// should have come with this file).\n\n// Unit tests for moodycamel::ConcurrentQueue\n\n#define likely MAKE_SURE_LIKELY_MACRO_CAN_PEACEFULLY_COEXIST\n#define unlikely MAKE_SURE_UNLIKELY_MACRO_CAN_PEACEFULLY_COEXIST\n\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <string>\n#include <cstddef>\n#include <string>\n\n#ifdef _WIN32\n#ifndef NOMINMAX\n#define NOMINMAX\n#endif\n#include <windows.h>\t\t// Not because we need it, but to ensure no conflicts arise with the queue's declarations\n#endif\n\n#include \"minitest.h\"\n#include \"../common/simplethread.h\"\n#include \"../common/systemtime.h\"\n#include \"../../concurrentqueue.h\"\n#include \"../../blockingconcurrentqueue.h\"\n\nnamespace {\n\tstruct tracking_allocator\n\t{\n\t\tunion tag {\n\t\t\tstd::size_t size;\n#ifdef __GNUC__\n\t\t\tmax_align_t dummy;\t\t// GCC forgot to add it to std:: for a while\n#else\n\t\t\tstd::max_align_t dummy;\t// Others (e.g. MSVC) insist it can *only* be accessed via std::\n#endif\n\t\t};\n\t\t\n\t\tstatic inline void* malloc(std::size_t size)\n\t\t{\n\t\t\tauto ptr = std::malloc(size + sizeof(tag));\n\t\t\treinterpret_cast<tag*>(ptr)->size = size;\n\t\t\tusage.fetch_add(size, std::memory_order_relaxed);\n\t\t\treturn reinterpret_cast<char*>(ptr) + sizeof(tag);\n\t\t}\n\t\t\n\t\tstatic inline void free(void* ptr)\n\t\t{\n\t\t\tptr = reinterpret_cast<char*>(ptr) - sizeof(tag);\n\t\t\tauto size = reinterpret_cast<tag*>(ptr)->size;\n\t\t\tusage.fetch_add(-size, std::memory_order_relaxed);\n\t\t\tstd::free(ptr);\n\t\t}\n\t\t\n\t\tstatic inline std::size_t current_usage() { return usage.load(std::memory_order_relaxed); }\n\t\t\n\tprivate:\n\t\tstatic std::atomic<std::size_t> usage;\n\t};\n\t\n\tstd::atomic<std::size_t> tracking_allocator::usage(0);\n}\n\nstruct corealgos_allocator\n{\n\tstatic inline void* malloc(std::size_t size) { return tracking_allocator::malloc(size); }\n\tstatic inline void free(void* ptr) { tracking_allocator::free(ptr); }\n};\n\n#define corealgos_allocator corealgos_allocator\n\n#include \"../corealgos.h\"\n\nusing namespace moodycamel;\n\n\nnamespace moodycamel\n{\nstruct MallocTrackingTraits : public ConcurrentQueueDefaultTraits\n{\n\tstatic inline void* malloc(std::size_t size) { return tracking_allocator::malloc(size); }\n\tstatic inline void free(void* ptr) { tracking_allocator::free(ptr); }\n};\n\ntemplate<std::size_t BlockSize = ConcurrentQueueDefaultTraits::BLOCK_SIZE, std::size_t InitialIndexSize = ConcurrentQueueDefaultTraits::EXPLICIT_INITIAL_INDEX_SIZE>\nstruct TestTraits : public MallocTrackingTraits\n{\n\ttypedef std::size_t size_t;\n\ttypedef uint64_t index_t;\n\t\n\tstatic const size_t BLOCK_SIZE = BlockSize;\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = InitialIndexSize;\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = InitialIndexSize * 2;\n\t\n\tstatic inline void reset() { _malloc_count() = 0; _free_count() = 0; }\n\tstatic inline std::atomic<int>& _malloc_count() { static std::atomic<int> c; return c; }\n\tstatic inline int malloc_count() { return _malloc_count().load(std::memory_order_seq_cst); }\n\tstatic inline std::atomic<int>& _free_count() { static std::atomic<int> c; return c; }\n\tstatic inline int free_count() { return _free_count().load(std::memory_order_seq_cst); }\n\t\n\tstatic inline void* malloc(ConcurrentQueueDefaultTraits::size_t bytes) { ++_malloc_count(); return tracking_allocator::malloc(bytes); }\n\tstatic inline void free(void* obj) { ++_free_count(); return tracking_allocator::free(obj); }\n};\n\nstruct SmallIndexTraits : public MallocTrackingTraits\n{\n\ttypedef uint16_t size_t;\n\ttypedef uint16_t index_t;\n};\n\nstruct ExtraSmallIndexTraits : public MallocTrackingTraits\n{\n\ttypedef uint8_t size_t;\n\ttypedef uint8_t index_t;\n};\n\n// Note: Not thread safe!\nstruct Foo\n{\n\tstatic int& nextId() { static int i; return i; }\n\tstatic int& createCount() { static int c; return c; }\n\tstatic int& destroyCount() { static int c; return c; }\n\tstatic bool& destroyedInOrder() { static bool d = true; return d; }\n\tstatic void reset() { createCount() = 0; destroyCount() = 0; nextId() = 0; destroyedInOrder() = true; lastDestroyedId() = -1; }\n\t\n\tFoo() { id = nextId()++; ++createCount(); }\n\tFoo(Foo const&) MOODYCAMEL_DELETE_FUNCTION;\n\tFoo(Foo&& other) { id = other.id; other.id = -1; }\n\tvoid operator=(Foo&& other) { id = other.id; other.id = -1; }\n\t~Foo()\n\t{\n\t\t++destroyCount();\n\t\tif (id == -2) {\n\t\t\t// Double free!\n\t\t\tdestroyedInOrder() = false;\n\t\t}\n\t\telse if (id != -1) {\n\t\t\tif (id <= lastDestroyedId()) {\n\t\t\t\tdestroyedInOrder() = false;\n\t\t\t}\n\t\t\tlastDestroyedId() = id;\n\t\t}\n\t\tid = -2;\n\t}\n\t\nprivate:\n\tint id;\n\tstatic int& lastDestroyedId() { static int i = -1; return i; }\n};\n\nstruct Copyable {\n\tCopyable(int id) : copied(false), id(id) { }\n\tCopyable(Copyable const& o) : copied(true), id(o.id) { }\n\tvoid operator=(Copyable const& o) { copied = true; id = o.id; }\n\tbool copied;\n\tint id;\n};\n\nstruct Moveable {\n\tMoveable(int id) : moved(false), copied(false), id(id) { }\n\tMoveable(Moveable&& o) MOODYCAMEL_NOEXCEPT : moved(true), copied(o.copied), id(o.id) { }\n\tvoid operator=(Moveable&& o) MOODYCAMEL_NOEXCEPT { moved = true; copied = o.copied; id = o.id; }\n\tbool moved;\n\tbool copied;\n\tint id;\n\n#if defined(_MSC_VER) && _MSC_VER < 1800\n\t// VS2012's std::is_nothrow_[move_]constructible is broken, so the queue never attempts to\n\t// move objects with that compiler. In this case, we don't know whether it's really a copy\n\t// or not being done, so give the benefit of the doubt (given the tests pass on other platforms)\n\t// and assume it would have done a move if it could have (don't set copied to true).\n\tMoveable(Moveable const& o) MOODYCAMEL_NOEXCEPT : moved(o.moved), copied(o.copied), id(o.id) { }\n\tvoid operator=(Moveable const& o) MOODYCAMEL_NOEXCEPT { moved = o.moved; copied = o.copied; id = o.id; }\n#else\n\tMoveable(Moveable const& o) MOODYCAMEL_NOEXCEPT : moved(o.moved), copied(true), id(o.id) { }\n\tvoid operator=(Moveable const& o) MOODYCAMEL_NOEXCEPT { moved = o.moved; copied = true; id = o.id; }\n#endif\n};\n\nstruct ThrowingMovable {\n\tstatic std::atomic<int>& ctorCount() { static std::atomic<int> c; return c; }\n\tstatic std::atomic<int>& destroyCount() { static std::atomic<int> c; return c; }\n\tstatic void reset() { ctorCount() = 0; destroyCount() = 0; }\n\t\n\texplicit ThrowingMovable(int id, bool throwOnCctor = false, bool throwOnAssignment = false, bool throwOnSecondCctor = false)\n\t\t: id(id), moved(false), copied(false), throwOnCctor(throwOnCctor), throwOnAssignment(throwOnAssignment), throwOnSecondCctor(throwOnSecondCctor)\n\t{\n\t\tctorCount().fetch_add(1, std::memory_order_relaxed);\n\t}\n\t\n\tThrowingMovable(ThrowingMovable const& o)\n\t\t: id(o.id), moved(false), copied(true), throwOnCctor(o.throwOnCctor), throwOnAssignment(o.throwOnAssignment), throwOnSecondCctor(false)\n\t{\n\t\tif (throwOnCctor) {\n\t\t\tthrow this;\n\t\t}\n\t\tctorCount().fetch_add(1, std::memory_order_relaxed);\n\t\tthrowOnCctor = o.throwOnSecondCctor;\n\t}\n\t\n\tThrowingMovable(ThrowingMovable&& o)\n\t\t: id(o.id), moved(true), copied(false), throwOnCctor(o.throwOnCctor), throwOnAssignment(o.throwOnAssignment), throwOnSecondCctor(false)\n\t{\n\t\tif (throwOnCctor) {\n\t\t\tthrow this;\n\t\t}\n\t\tctorCount().fetch_add(1, std::memory_order_relaxed);\n\t\tthrowOnCctor = o.throwOnSecondCctor;\n\t}\n\t\n\t~ThrowingMovable()\n\t{\n\t\tdestroyCount().fetch_add(1, std::memory_order_relaxed);\n\t}\n\t\n\tvoid operator=(ThrowingMovable const& o)\n\t{\n\t\tid = o.id;\n\t\tmoved = false;\n\t\tcopied = true;\n\t\tthrowOnCctor = o.throwOnCctor;\n\t\tthrowOnAssignment = o.throwOnAssignment;\n\t\tthrowOnSecondCctor = o.throwOnSecondCctor;\n\t\tif (throwOnAssignment) {\n\t\t\tthrow this;\n\t\t}\n\t}\n\t\n\tvoid operator=(ThrowingMovable&& o)\n\t{\n\t\tid = o.id;\n\t\tmoved = true;\n\t\tcopied = false;\n\t\tthrowOnCctor = o.throwOnCctor;\n\t\tthrowOnAssignment = o.throwOnAssignment;\n\t\tthrowOnSecondCctor = o.throwOnSecondCctor;\n\t\tif (throwOnAssignment) {\n\t\t\tthrow this;\n\t\t}\n\t}\n\t\n\tint id;\n\tbool moved;\n\tbool copied;\n\t\npublic:\n\tbool throwOnCctor;\n\tbool throwOnAssignment;\n\tbool throwOnSecondCctor;\n};\n\n\nclass ConcurrentQueueTests : public TestClass<ConcurrentQueueTests>\n{\npublic:\n\tConcurrentQueueTests()\n\t{\n\t\tREGISTER_TEST(create_empty_queue);\n\t\tREGISTER_TEST(create_token);\n\t\tREGISTER_TEST(circular_less_than);\n\t\tREGISTER_TEST(enqueue_one_explicit);\n\t\tREGISTER_TEST(enqueue_and_dequeue_one_explicit);\n\t\tREGISTER_TEST(enqueue_one_implicit);\n\t\tREGISTER_TEST(enqueue_and_dequeue_one_implicit);\n\t\tREGISTER_TEST(enqueue_and_dequeue_a_few);\n\t\tREGISTER_TEST(enqueue_bulk);\n\t\tREGISTER_TEST(block_alloc);\n\t\tREGISTER_TEST(token_move);\n\t\tREGISTER_TEST(multi_producers);\n\t\tREGISTER_TEST(producer_reuse);\n\t\tREGISTER_TEST(block_reuse);\n\t\tREGISTER_TEST(block_recycling);\n\t\tREGISTER_TEST(leftovers_destroyed);\n\t\tREGISTER_TEST(block_index_resized);\n\t\tREGISTER_TEST(try_dequeue);\n\t\tREGISTER_TEST(try_dequeue_threaded);\n\t\tREGISTER_TEST(try_dequeue_bulk);\n\t\tREGISTER_TEST(try_dequeue_bulk_threaded);\n\t\tREGISTER_TEST(implicit_producer_hash);\n\t\tREGISTER_TEST(index_wrapping);\n\t\tREGISTER_TEST(subqueue_size_limit);\n\t\tREGISTER_TEST(exceptions);\n\t\tREGISTER_TEST(test_threaded);\n\t\tREGISTER_TEST(test_threaded_bulk);\n\t\tREGISTER_TEST(full_api<ConcurrentQueueDefaultTraits>);\n\t\tREGISTER_TEST(full_api<SmallIndexTraits>);\n\t\tREGISTER_TEST(blocking_wrappers);\n\t\tREGISTER_TEST(timed_blocking_wrappers);\n\t\t\n\t\t// Core algos\n\t\tREGISTER_TEST(core_add_only_list);\n\t\tREGISTER_TEST(core_thread_local);\n\t\tREGISTER_TEST(core_free_list);\n\t\tREGISTER_TEST(core_spmc_hash);\n\t\t\n\t\tREGISTER_TEST(explicit_strings_threaded);\n\t}\n\t\n\tbool postTest(bool testSucceeded) override\n\t{\n\t\tif (testSucceeded) {\n\t\t\t// If this assertion fails, there's necessarily a memory leak somewhere!\n\t\t\tASSERT_OR_FAIL(tracking_allocator::current_usage() == 0);\n\t\t}\n\t\treturn true;\n\t}\n\t\n\t\n\tbool create_empty_queue()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\treturn true;\n\t}\n\t\n\t\n\tbool create_token()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\tProducerToken tok(q);\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool circular_less_than()\n\t{\n\t\t{\n\t\t\tuint32_t a, b;\n\t\t\t\n\t\t\ta = 0; b = 100;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = 0;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = 0;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = 100;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = 1 << 31;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 1; b = 1 << 31;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = (1 << 31) + 1;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = (1 << 31) + 1;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = (1 << 31) + 7; b = 5;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = (1 << 16) + 7; b = (1 << 16) + 5;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0xFFFFFFFF; b = 0;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0xFFFFFFFF; b = 0xFFFFFF;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t}\n\t\t\n\t\t{\n\t\t\tuint16_t a, b;\n\t\t\t\n\t\t\ta = 0; b = 100;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = 0;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = 0;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = 100;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = 1 << 15;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 1; b = 1 << 15;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0; b = (1 << 15) + 1;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 100; b = (1 << 15) + 1;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = (1 << 15) + 7; b = 5;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = (1 << 15) + 7; b = (1 << 15) + 5;\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0xFFFF; b = 0;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t\t\n\t\t\ta = 0xFFFF; b = 0xFFF;\n\t\t\tASSERT_OR_FAIL(details::circular_less_than(a, b));\n\t\t\tASSERT_OR_FAIL(!details::circular_less_than(b, a));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\t\n\tbool enqueue_one_explicit()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\tProducerToken tok(q);\n\t\t\n\t\tbool result = q.enqueue(tok, 17);\n\t\t\n\t\tASSERT_OR_FAIL(result);\n\t\treturn true;\n\t}\n\t\n\tbool enqueue_and_dequeue_one_explicit()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\tProducerToken tok(q);\n\t\t\n\t\tint item = 0;\n\t\tASSERT_OR_FAIL(q.enqueue(tok, 123));\n\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\tASSERT_OR_FAIL(item == 123);\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool enqueue_one_implicit()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\t\n\t\tbool result = q.enqueue(17);\n\t\t\n\t\tASSERT_OR_FAIL(result);\n\t\treturn true;\n\t}\n\t\n\tbool enqueue_and_dequeue_one_implicit()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\t\n\t\tint item = 0;\n\t\tASSERT_OR_FAIL(q.enqueue(123));\n\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\tASSERT_OR_FAIL(item == 123);\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool enqueue_and_dequeue_a_few()\n\t{\n\t\t// Fairly straightforward mass enqueue and dequeue\n\t\t{\n\t\t\tConcurrentQueue<int, TestTraits<16>> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(tok, i));\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(tok, item));\n\t\t}\n\t\t\n\t\t// Interleaved enqueue and dequeue (though still no threads involved)\n\t\t{\n\t\t\tConcurrentQueue<int, TestTraits<16>> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(tok, i));\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(tok, i * 2));\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\t\t\tASSERT_OR_FAIL(item == (i / 2) * (i % 2 == 0 ? 1 : 2));\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\t\t\tASSERT_OR_FAIL(item == ((i + 99999) / 2) * (i % 2 == 1 ? 1 : 2));\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(tok, item));\n\t\t}\n\t\t\n\t\t// Implicit usage\n\t\t{\n\t\t\tConcurrentQueue<int, TestTraits<16>> q;\n\t\t\t\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t{\n\t\t\tConcurrentQueue<int, TestTraits<16>> q;\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(i * 2));\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == (i / 2) * (i % 2 == 0 ? 1 : 2));\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 99999; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == ((i + 99999) / 2) * (i % 2 == 1 ? 1 : 2));\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool enqueue_bulk()\n\t{\n\t\ttypedef TestTraits<2> Traits2;\n\t\ttypedef TestTraits<4> Traits4;\n\t\t\n\t\tint arr123[] = { 1, 2, 3 };\n\t\tint arr1234[] = { 1, 2, 3, 4 };\n\t\tint arr123456[] = { 1, 2, 3, 4, 5, 6 };\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Implicit, block allocation required\n\t\t\tConcurrentQueue<int, Traits2> q(2);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tq.enqueue_bulk(arr123, 3);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 4);\t\t// One for producer, one for block index, one for block\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Implicit, block allocation not required (end on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(2);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tq.enqueue_bulk(arr1234, 4);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Implicit, allocation fail\n\t\t\tConcurrentQueue<int, Traits2> q(2);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(arr123, 3));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// Still has to allocate implicit producer and block index\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(arr123, 2));\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Implicit, block allocation not required\n\t\t\tConcurrentQueue<int, Traits2> q(4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tq.enqueue_bulk(arr1234, 4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Implicit, block allocation required (end not on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(4);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(0));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(arr1234, 4));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 4);\t\t// One for producer, one for block index, one for block\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Implicit, block allocation not required (end not on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(5);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(0));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(arr1234, 4));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Implicit, block allocation fail (end not on block boundary) -- test rewind\n\t\t\tConcurrentQueue<int, Traits2> q(4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(17));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(arr123456, 6));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item == 17);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Implicit, enqueue nothing\n\t\t\tConcurrentQueue<int, Traits2> q(3);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(arr123, 0));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t////////\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Explicit, block allocation required\n\t\t\tConcurrentQueue<int, Traits2> q(2);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tq.enqueue_bulk(tok, arr123, 3);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 4);\t\t// One for block\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Explicit, block allocation not required (end on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(2);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tq.enqueue_bulk(tok, arr1234, 4);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Explicit, allocation fail\n\t\t\tConcurrentQueue<int, Traits2> q(2);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(tok, arr123, 3));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(tok, arr123, 2));\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Explicit, block allocation not required\n\t\t\tConcurrentQueue<int, Traits2> q(4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tq.enqueue_bulk(tok, arr1234, 4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Explicit, block allocation required (end not on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(4);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, 0));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(tok, arr1234, 4));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 4);\t\t// One for block\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Explicit, block allocation not required (end not on block boundary)\n\t\t\tConcurrentQueue<int, Traits4> q(5);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, 0));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(tok, arr1234, 4));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Explicit, block allocation fail (end not on block boundary) -- test rewind\n\t\t\tConcurrentQueue<int, Traits2> q(4);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, 17));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(tok, arr123456, 6));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item == 17);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits2::reset();\n\t\t{\n\t\t\t// Explicit, enqueue nothing\n\t\t\tConcurrentQueue<int, Traits2> q(3);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(tok, arr123, 0));\n\t\t\tASSERT_OR_FAIL(Traits2::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, 17));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item == 17);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\tTraits4::reset();\n\t\t{\n\t\t\t// Explicit, re-use empty blocks\n\t\t\tConcurrentQueue<int, Traits4> q(8);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 1);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\t\t// One for producer, one for block index\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(tok, i));\n\t\t\t}\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(tok, arr123456, 6));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\n\t\t\t\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(Traits4::malloc_count() == 3);\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool block_alloc()\n\t{\n\t\ttypedef TestTraits<2> Traits;\n\t\tTraits::reset();\n\t\t\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q(7);\n\t\t\tASSERT_OR_FAIL(q.initialBlockPoolSize == 4);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 1);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\tProducerToken tok(q);\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\t\t// one for producer, one for its block index\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Enqueue one item too many (force extra block allocation)\n\t\t\tfor (int i = 0; i != 9; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(tok, i));\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Still room for one more...\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, 9));\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// No more room without further allocations\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue(tok, 10));\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Check items were enqueued properly\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\t\n\t\t\t// Queue should be empty, but not freed\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(tok, item));\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\tASSERT_OR_FAIL(Traits::free_count() == 4);\n\t\t\n\t\t// Implicit\n\t\tTraits::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q(7);\n\t\t\tASSERT_OR_FAIL(q.initialBlockPoolSize == 4);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(39));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\t\t// one for producer, one for its block index\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Enqueue one item too many (force extra block allocation)\n\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Still room for one more...\n\t\t\tASSERT_OR_FAIL(q.enqueue(8));\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// No more room without further allocations\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue(9));\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t\t\n\t\t\t// Check items were enqueued properly\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item == 39);\n\t\t\tfor (int i = 0; i != 9; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\t\n\t\t\t// Queue should be empty, but not freed\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(Traits::free_count() == 0);\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 4);\n\t\tASSERT_OR_FAIL(Traits::free_count() == 4);\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool token_move()\n\t{\n\t\ttypedef TestTraits<16> Traits;\n\t\tTraits::reset();\n\t\t\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tProducerToken t0(q);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(t0.valid());\n\t\t\t\n\t\t\tProducerToken t1(std::move(t0));\n\t\t\tASSERT_OR_FAIL(t1.valid());\n\t\t\tASSERT_OR_FAIL(!t0.valid());\n\t\t\t\n\t\t\tt1 = std::move(t1);\n\t\t\tASSERT_OR_FAIL(t1.valid());\n\t\t\tASSERT_OR_FAIL(!t0.valid());\n\t\t\t\n\t\t\tProducerToken t2(q);\n\t\t\tt2 = std::move(t1);\n\t\t\tASSERT_OR_FAIL(t2.valid());\n\t\t\tASSERT_OR_FAIL(t1.valid());\n\t\t\tASSERT_OR_FAIL(!t0.valid());\n\t\t\t\n\t\t\tt0 = std::move(t1);\n\t\t\tASSERT_OR_FAIL(t2.valid());\n\t\t\tASSERT_OR_FAIL(!t1.valid());\n\t\t\tASSERT_OR_FAIL(t0.valid());\n\t\t}\n\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 5);\t\t// 2 for each producer + 1 for initial block pool\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool multi_producers()\n\t{\n\t\ttypedef TestTraits<16> Traits;\n\t\tTraits::reset();\n\t\t\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tProducerToken t0(q);\n\t\t\tProducerToken t1(q);\n\t\t\tProducerToken t2(q);\n\t\t\tProducerToken t3(q);\n\t\t\tProducerToken t4(q);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(t0, 0));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t1, 1));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t2, 2));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t3, 3));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t4, 4));\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t0, item) && item == 0 && !q.try_dequeue_from_producer(t0, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t1, item) && item == 1 && !q.try_dequeue_from_producer(t1, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t2, item) && item == 2 && !q.try_dequeue_from_producer(t2, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t3, item) && item == 3 && !q.try_dequeue_from_producer(t3, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t4, item) && item == 4 && !q.try_dequeue_from_producer(t4, item));\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 11);\t\t// 2 for each producer + 1 for initial block pool\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n\t\t// Implicit\n\t\tTraits::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tstd::atomic<bool> success[5];\n\t\t\tstd::atomic<int> done(0);\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tsuccess[i].store(false, std::memory_order_relaxed);\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tSimpleThread t([&](int j) {\n\t\t\t\t\tsuccess[j].store(q.enqueue(j), std::memory_order_relaxed);\n\t\t\t\t\tdone.fetch_add(1, std::memory_order_release);\n\t\t\t\t}, i);\n\t\t\t\tt.join();\n\t\t\t}\n\t\t\twhile (done.load(std::memory_order_acquire) != 5) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i].load(std::memory_order_relaxed));\n\t\t\t}\n\t\t\t\n\t\t\t// Cannot rely on order that producers are added (there's a race condition), only that they are all there somewhere.\n\t\t\t// Also, all items may not be visible to this thread yet.\n\t\t\tbool itemDequeued[5] = { false, false, false, false, false };\n\t\t\tint item;\n\t\t\tfor (int i = 0; i != 5;) {\n\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\titemDequeued[item] = true;\n\t\t\t\t\t++i;\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(itemDequeued[i]);\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() <= 11 && Traits::malloc_count() >= 3);\t\t// 2 for each producer (depending on thread ID re-use) + 1 for initial block pool\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool producer_reuse()\n\t{\n\t\ttypedef TestTraits<16> Traits;\n\t\t\n\t\tTraits::reset();\n\t\t{\n\t\t\t// Explicit\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t0(q);\n\t\t\t}\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t1(q);\n\t\t\t}\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t2(q);\n\t\t\t\tProducerToken t3(q);\n\t\t\t\tProducerToken t4(q);\n\t\t\t\tProducerToken t5(q);\n\t\t\t}\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t6(q);\n\t\t\t\tProducerToken t7(q);\n\t\t\t}\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t8(q);\n\t\t\t\tProducerToken t9(q);\n\t\t\t}\n\n\t\t\t\n\t\t\t{\n\t\t\t\tProducerToken t10(q);\n\t\t\t\tProducerToken t11(q);\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 9);\t\t// 2 for max number of live producers + 1 for initial block pool\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\tTraits::reset();\n\t\t{\n\t\t\t// Implicit\n\t\t\tconst int MAX_THREADS = 48;\n\t\t\tConcurrentQueue<int, Traits> q(Traits::BLOCK_SIZE * (MAX_THREADS + 1));\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 1);\t\t// Initial block pool\n\t\t\t\n\t\t\tSimpleThread t0([&]() { q.enqueue(0); });\n\t\t\tt0.join();\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\t\t// Implicit producer\n\t\t\t\n\t\t\tSimpleThread t1([&]() { q.enqueue(1); });\n\t\t\tt1.join();\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\n\t\t\t\n\t\t\tSimpleThread t2([&]() { q.enqueue(2); });\n\t\t\tt2.join();\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\n\t\t\t\n\t\t\tq.enqueue(3);\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\n\t\t\t\n\t\t\tint item;\n\t\t\tint i = 0;\n\t\t\twhile (q.try_dequeue(item)) {\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t++i;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(i == 4);\n\t\t\tASSERT_OR_FAIL(Traits::malloc_count() == 3);\n\t\t\t\n\t\t\tstd::vector<SimpleThread> threads(MAX_THREADS);\n\t\t\tfor (int rep = 0; rep != 2; ++rep) {\n\t\t\t\tfor (std::size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid] = SimpleThread([&](std::size_t tid) {\n\t\t\t\t\t\tfor (volatile int i = 0; i != 4096; ++i) {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tq.enqueue((int)tid);\n\t\t\t\t\t\tfor (volatile int i = 0; i != 4096; ++i) {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t}, tid);\n\t\t\t\t}\n\t\t\t\tfor (std::size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid].join();\n\t\t\t\t}\n\t\t\t\tstd::vector<bool> seenIds(threads.size());\n\t\t\t\tfor (std::size_t i = 0; i != threads.size(); ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(!seenIds[item]);\n\t\t\t\t\tseenIds[item] = true;\n\t\t\t\t}\n\t\t\t\tfor (std::size_t i = 0; i != seenIds.size(); ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(seenIds[i]);\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(Traits::malloc_count() <= 2 * MAX_THREADS + 1);\n\t\t\t}\n\t\t}\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\t\n\t\t\n\t\t\n\t\tTraits::reset();\n\t\t{\n\t\t\t// Test many threads and implicit queues being created and destroyed concurrently\n\t\t\tstd::vector<SimpleThread> threads(32);\n\t\t\tstd::vector<bool> success(threads.size(), true);\n\t\t\tfor (std::size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](std::size_t tid) {\n\t\t\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\t\t\tConcurrentQueue<int, MallocTrackingTraits> q(1);\n\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tConcurrentQueue<int, MallocTrackingTraits> q(15);\n\t\t\t\t\tfor (int i = 0; i != 100; ++i) {\n\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t}\n\t\t\t\t\tint item;\n\t\t\t\t\tfor (int i = 0; i != 100; ++i) {\n\t\t\t\t\t\tif (!q.try_dequeue(item) || item != i) {\n\t\t\t\t\t\t\tsuccess[tid] = false;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif (q.size_approx() != 0) {\n\t\t\t\t\t\tsuccess[tid] = false;\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (std::size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t\tASSERT_OR_FAIL(success[tid]);\n\t\t\t}\n\t\t}\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\t\n#endif\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool block_reuse()\n\t{\n\t\tint item;\n\t\t\n\t\ttypedef TestTraits<4> SmallBlocks;\n\t\tSmallBlocks::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, SmallBlocks> q(8);\t\t// 2 blocks\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tfor (int j = 0; j != 3; ++j) {\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == ((i + 4) & 7));\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(t, item));\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(SmallBlocks::malloc_count() == 3);\n\t\tASSERT_OR_FAIL(SmallBlocks::free_count() == SmallBlocks::malloc_count());\n\t\t\n\t\t\n\t\ttypedef TestTraits<8192> HugeBlocks;\n\t\tHugeBlocks::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, HugeBlocks> q(8192 * 2);\t\t// 2 blocks\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tfor (int j = 0; j != 3; ++j) {\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != 8192 * 2; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192 * 2; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == ((i + 8192) & (8192 * 2 - 1)));\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(t, item));\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(HugeBlocks::malloc_count() == 3);\n\t\tASSERT_OR_FAIL(HugeBlocks::free_count() == HugeBlocks::malloc_count());\n\t\t\n\t\t\n\t\t// Implicit\n\t\tSmallBlocks::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, SmallBlocks> q(8);\t\t// 2 blocks\n\t\t\t\n\t\t\tfor (int j = 0; j != 3; ++j) {\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == ((i + 4) & 7));\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(SmallBlocks::malloc_count() == 3);\n\t\tASSERT_OR_FAIL(SmallBlocks::free_count() == SmallBlocks::malloc_count());\n\t\t\n\t\tHugeBlocks::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, HugeBlocks> q(8192 * 2);\t\t// 2 blocks\n\t\t\t\n\t\t\tfor (int j = 0; j != 3; ++j) {\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (int i = 0; i != 8192 * 2; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i));\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != 8192 * 2; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == ((i + 8192) & (8192 * 2 - 1)));\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(HugeBlocks::malloc_count() == 3);\n\t\tASSERT_OR_FAIL(HugeBlocks::free_count() == HugeBlocks::malloc_count());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool block_recycling()\n\t{\n\t\ttypedef TestTraits<4> SmallBlocks;\n\t\tSmallBlocks::reset();\n\t\t\n\t\tConcurrentQueue<int, SmallBlocks> q(24);\t\t// 6 blocks\n\t\tSimpleThread threads[4];\n\t\tstd::atomic<bool> success(true);\n\t\t\n\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\tint item;\n\t\t\t\tint next = 0;\n\t\t\t\tint prevItems[4] = { -1, -1, -1, -1 };\n\t\t\t\tfor (int successfulEnqueues = 0; successfulEnqueues < 10000;) {\n\t\t\t\t\tfor (int j = 0; j != 12; ++j) {\n\t\t\t\t\t\tif (q.try_enqueue((i << 28) | next++)) {\n\t\t\t\t\t\t\t++successfulEnqueues;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor (int j = 0; j != 12; ++j) {\n\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\tif ((item & 0x0FFFFFFF) <= prevItems[item >> 28]) {\n\t\t\t\t\t\t\t\tsuccess.store(false, std::memory_order_relaxed);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tprevItems[item >> 28] = item & 0x0FFFFFFF;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, i);\n\t\t}\n\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\tthreads[i].join();\n\t\t}\n\t\t\n\t\tint item;\n\t\tint prevItems[4] = { -1, -1, -1, -1 };\n\t\twhile (q.try_dequeue(item)) {\n\t\t\tASSERT_OR_FAIL((item & 0x0FFFFFFF) > prevItems[item >> 28]);\n\t\t\tprevItems[item >> 28] = item & 0x0FFFFFFF;\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(success.load(std::memory_order_relaxed));\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool leftovers_destroyed()\n\t{\n\t\ttypedef TestTraits<4> Traits;\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(4);\t\t// One block\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tFoo item;\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.try_dequeue_from_producer(t, item);\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 4);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 7);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(4);\t\t// One block\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.enqueue(t, Foo());\n\t\t\tq.enqueue(t, Foo());\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 4);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 8);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(8);\t\t// Two blocks\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\tq.enqueue(t, Foo());\n\t\t\t}\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 8);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 16);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(12);\t\t// Three blocks\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\t// Last block only partially full\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tq.enqueue(t, Foo());\n\t\t\t}\n\t\t\t\n\t\t\t// First block only partially full\n\t\t\tFoo item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 11);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 21);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\t\n\t\t// Implicit\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(4);\t\t// One block\n\t\t\t\n\t\t\tFoo item;\n\t\t\tq.enqueue(Foo());\n\t\t\tq.enqueue(Foo());\n\t\t\tq.enqueue(Foo());\n\t\t\tq.try_dequeue(item);\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 4);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 7);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(4);\t\t// One block\n\t\t\t\n\t\t\tq.enqueue(Foo());\n\t\t\tq.enqueue(Foo());\n\t\t\tq.enqueue(Foo());\n\t\t\tq.enqueue(Foo());\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 4);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 8);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(8);\t\t// Two blocks\n\t\t\t\n\t\t\tfor (int i = 0; i != 8; ++i) {\n\t\t\t\tq.enqueue(Foo());\n\t\t\t}\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 8);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 16);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(12);\t\t// Three blocks\n\t\t\t\n\t\t\t// Last block only partially full\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tq.enqueue(Foo());\n\t\t\t}\n\t\t\t\n\t\t\t// First block only partially full\n\t\t\tFoo item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t}\n\t\tASSERT_OR_FAIL(Foo::createCount() == 11);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 21);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool block_index_resized()\n\t{\n\t\ttypedef TestTraits<4, 2> Traits;\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(8);\t\t// 2 blocks, matches initial index size\n\t\t\tProducerToken t(q);\n\t\t\t\n\t\t\tfor (int i = 0; i != 1024; ++i) {\n\t\t\t\tq.enqueue(t, Foo());\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 1024; ++i) {\n\t\t\t\tFoo item;\n\t\t\t\tq.try_dequeue_from_producer(t, item);\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 1 + 2 + 254 + 7);\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n\t\tASSERT_OR_FAIL(Foo::createCount() == 2048);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 3072);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\t// Implicit\n\t\tTraits::reset();\n\t\tFoo::reset();\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q(8);\t\t// 2 blocks\n\t\t\t\n\t\t\tfor (int i = 0; i != 1024; ++i) {\n\t\t\t\tq.enqueue(Foo());\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 1024; ++i) {\n\t\t\t\tFoo item;\n\t\t\t\tq.try_dequeue(item);\n\t\t\t}\n\t\t}\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == 1 + 2 + 254 + 6);\n\t\tASSERT_OR_FAIL(Traits::free_count() == Traits::malloc_count());\n\t\t\n\t\tASSERT_OR_FAIL(Foo::createCount() == 2048);\n\t\tASSERT_OR_FAIL(Foo::destroyCount() == 3072);\n\t\tASSERT_OR_FAIL(Foo::destroyedInOrder());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool try_dequeue()\n\t{\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\tint item;\n\t\t\n\t\t// Producer token\n\t\t{\n\t\t\tfor (int i = 0; i != 50; ++i) {\n\t\t\t\tProducerToken t(q);\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i * 100 + j));\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t\n\t\t\tfor (int i = 0; i != 50; ++i) {\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == i * 100 + j);\n\t\t\t\t}\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// Mixed producer types\n\t\t{\n\t\t\tfor (int i = 0; i != 25; ++i) {\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i * 100 + j));\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 25; i != 50; ++i) {\n\t\t\t\tProducerToken t(q);\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i * 100 + j));\n\t\t\t\t}\n\t\t\t}\n\t\t\tbool success[5000];\n\t\t\tstd::memset(success, 0, sizeof(success));\n\t\t\tfor (int i = 0; i != 50; ++i) {\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tsuccess[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 5000; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// Mixed producer types with consumer token\n\t\t{\n\t\t\tfor (int i = 0; i != 25; ++i) {\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(i * 100 + j));\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 25; i != 50; ++i) {\n\t\t\t\tProducerToken t(q);\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.enqueue(t, i * 100 + j));\n\t\t\t\t}\n\t\t\t}\n\t\t\tbool success[5000];\n\t\t\tstd::memset(success, 0, sizeof(success));\n\t\t\tfor (int i = 0; i != 50; ++i) {\n\t\t\t\tConsumerToken t(q);\n\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(t, item));\n\t\t\t\t\tsuccess[item] = true;\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 5000; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tConsumerToken t(q);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(t, item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(t, item));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool try_dequeue_threaded()\n\t{\n\t\tint item;\n\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\t\n\t\t// Threaded consumption with tokens\n\t\t{\n\t\t\tSimpleThread threads[20];\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\t\tq.enqueue(t, i * 10 + j);\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\n\t\t\tstd::atomic<int> dequeueCount(0);\n\t\t\tfor (int i = 10; i != 20; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&]() {\n\t\t\t\t\tint item;\n\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\twhile (dequeueCount.load(std::memory_order_relaxed) != 1000) {\n\t\t\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\t\t\tdequeueCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 20; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// Threaded consumption\n\t\t{\n\t\t\tSimpleThread threads[20];\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\t\tq.enqueue(i * 10 + j);\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\n\t\t\tstd::atomic<int> dequeueCount(0);\n\t\t\tfor (int i = 10; i != 20; ++i) {\n\t\t\t\tthreads[i] = SimpleThread([&]() {\n\t\t\t\t\tint item;\n\t\t\t\t\twhile (dequeueCount.load(std::memory_order_relaxed) != 1000) {\n\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\tdequeueCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 20; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool try_dequeue_bulk()\n\t{\n\t\ttypedef TestTraits<4> Traits;\n\t\tint items[5];\n\t\t\n\t\t// Explicit producer\n\t\t{\n\t\t\tTraits::reset();\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 0);\n\t\t\t\n\t\t\tq.enqueue(tok, 17);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 1);\n\t\t\tASSERT_OR_FAIL(items[0] == 17);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tq.enqueue(tok, i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 4);\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tq.enqueue(tok, i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tq.enqueue(tok, i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(items[0]));\n\t\t\tASSERT_OR_FAIL(items[0] == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tq.enqueue(tok, i + 1);\n\t\t\t}\n\t\t\tfor (int k = 0; k != 2; ++k) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(items[i] == k * 5 + i + 1);\n\t\t\t\t}\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t}\n\t\t\n\t\t// Implicit producer\n\t\t{\n\t\t\tTraits::reset();\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 0);\n\t\t\t\n\t\t\tq.enqueue(17);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 1);\n\t\t\tASSERT_OR_FAIL(items[0] == 17);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tq.enqueue(i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 4);\n\t\t\tfor (int i = 0; i != 4; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tq.enqueue(i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tq.enqueue(i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\tASSERT_OR_FAIL(items[i] == i + 1);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(items[0]));\n\t\t\tASSERT_OR_FAIL(items[0] == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t\t\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tq.enqueue(i + 1);\n\t\t\t}\n\t\t\tfor (int k = 0; k != 2; ++k) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 5) == 5);\n\t\t\t\tfor (int i = 0; i != 5; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(items[i] == k * 5 + i + 1);\n\t\t\t\t}\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(items[0]));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool try_dequeue_bulk_threaded()\n\t{\n\t\ttypedef TestTraits<2> Traits;\n\t\tint dummy;\n\t\t\n\t\t// Explicit producer\n\t\t{\n\t\t\tTraits::reset();\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tSimpleThread threads[2];\n\t\t\tbool success[2] = { true, true };\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tif (i == 0) {\n\t\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\t\t// Producer\n\t\t\t\t\t\tProducerToken tok(q);\n\t\t\t\t\t\tfor (int i = 0; i != 32*1024; ++i) {\n\t\t\t\t\t\t\tq.enqueue(tok, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\t\t// Consumer\n\t\t\t\t\t\tint items[5];\n\t\t\t\t\t\tint prevItem = -1;\n\t\t\t\t\t\tfor (int i = 0; i != 32*1024;) {\n\t\t\t\t\t\t\tauto dequeued = q.try_dequeue_bulk(items, 5);\n\t\t\t\t\t\t\tif (dequeued > 0) {\n\t\t\t\t\t\t\t\tif (dequeued > 5) {\n\t\t\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tfor (std::size_t j = 0; j != dequeued; ++j) {\n\t\t\t\t\t\t\t\t\tif (items[j] != prevItem + 1) {\n\t\t\t\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tprevItem = items[j];\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ti += (int)dequeued;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(success[0]);\n\t\t\tASSERT_OR_FAIL(success[1]);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(dummy));\n\t\t}\n\t\t\n\t\t// Implicit producer\n\t\t{\n\t\t\tTraits::reset();\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tSimpleThread threads[2];\n\t\t\tbool success[2] = { true, true };\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tif (i == 0) {\n\t\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\t\t// Producer\n\t\t\t\t\t\tfor (int i = 0; i != 32*1024; ++i) {\n\t\t\t\t\t\t\tq.enqueue(i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\t\t// Consumer\n\t\t\t\t\t\tint items[5];\n\t\t\t\t\t\tint prevItem = -1;\n\t\t\t\t\t\tfor (int i = 0; i != 32*1024;) {\n\t\t\t\t\t\t\tauto dequeued = q.try_dequeue_bulk(items, 5);\n\t\t\t\t\t\t\tif (dequeued > 0) {\n\t\t\t\t\t\t\t\tif (dequeued > 5) {\n\t\t\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tfor (std::size_t j = 0; j != dequeued; ++j) {\n\t\t\t\t\t\t\t\t\tif (items[j] != prevItem + 1) {\n\t\t\t\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tprevItem = items[j];\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ti += (int)dequeued;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(success[0]);\n\t\t\tASSERT_OR_FAIL(success[1]);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(dummy));\n\t\t}\n\t\t\n\t\t// Multithreaded consumption\n\t\t{\n\t\t\tTraits::reset();\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\t\n\t\t\tbool success[20];\n\t\t\tSimpleThread threads[20];\n\t\t\tfor (int i = 0; i != 10; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\tif ((i & 1) == 1) {\n\t\t\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\t\t\tq.enqueue(t, i * 128 + j);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfor (int j = 0; j != 100; ++j) {\n\t\t\t\t\t\t\tq.enqueue(i * 128 + j);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tstd::atomic<size_t> dequeueCount(0);\n\t\t\tfor (int i = 10; i != 20; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\tthreads[i] = SimpleThread([&](int i) {\n\t\t\t\t\tint prevItems[10];\n\t\t\t\t\tfor (int j = 0; j != 10; ++j) {\n\t\t\t\t\t\tprevItems[j] = -1;\n\t\t\t\t\t}\n\t\t\t\t\tint items[15];\n\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\t\n\t\t\t\t\twhile (dequeueCount.load(std::memory_order_relaxed) != 1000) {\n\t\t\t\t\t\tsize_t count;\n\t\t\t\t\t\tif ((i & 1) == 1) {\n\t\t\t\t\t\t\tcount = q.try_dequeue_bulk(items, 15);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tcount = q.try_dequeue_bulk(t, items, 15);\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (count > 15) {\n\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor (size_t k = 0; k != count; ++k) {\n\t\t\t\t\t\t\tif (prevItems[items[k] / 128] >= (items[k] & 127)) {\n\t\t\t\t\t\t\t\tsuccess[i] = false;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tprevItems[items[k] / 128] = items[k] & 127;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdequeueCount.fetch_add(count, std::memory_order_relaxed);\n\t\t\t\t\t}\n\t\t\t\t}, i);\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 20; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tfor (int i = 0; i != 20; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool implicit_producer_hash()\n\t{\n\t\tfor (int j = 0; j != 5; ++j) {\n\t\t\tConcurrentQueue<int, MallocTrackingTraits> q;\n\t\t\tstd::vector<SimpleThread> threads;\n\t\t\tfor (int i = 0; i != 20; ++i) {\n\t\t\t\tthreads.push_back(SimpleThread([&]() {\n\t\t\t\t\tq.enqueue(7);\n\t\t\t\t}));\n\t\t\t}\n\t\t\t\n\t\t\tfor (auto it = threads.begin(); it != threads.end(); ++it) {\n\t\t\t\tit->join();\n\t\t\t}\n\t\t\t\n\t\t\tint item;\n\t\t\tConsumerToken t(q);\n\t\t\tfor (auto i = 0; i != 20; ++i) {\n\t\t\t\tif ((j & 1) == 0) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(t, item));\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(item == 7);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool index_wrapping()\n\t{\n\t\t{\n\t\t\t// Implicit\n\t\t\tConcurrentQueue<int, SmallIndexTraits> q(16);\n\t\t\tint item;\n\t\t\t\n\t\t\tfor (int i = 0; i != (1 << 18); ++i) {\n\t\t\t\tif ((i & 16) == 0) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_enqueue(i));\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\t\tASSERT_OR_FAIL(item == (i - 16));\n\t\t\t\t}\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Explicit\n\t\t\tConcurrentQueue<int, SmallIndexTraits> q(16);\n\t\t\tProducerToken tok(q);\n\t\t\tint item;\n\t\t\t\n\t\t\tfor (int i = 0; i != (1 << 18); ++i) {\n\t\t\t\tif ((i & 16) == 0) {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_enqueue(tok, i));\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(tok, item));\n\t\t\t\t\tASSERT_OR_FAIL(item == (i - 16));\n\t\t\t\t}\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Implicit extra small\n\t\t\tConcurrentQueue<int, ExtraSmallIndexTraits> q(1);\n\t\t\tint item;\n\t\t\t\n\t\t\tfor (int i = 0; i != 4097; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Explicit extra small\n\t\t\tConcurrentQueue<int, ExtraSmallIndexTraits> q(1);\n\t\t\tProducerToken tok(q);\n\t\t\tint item;\n\t\t\t\n\t\t\tfor (int i = 0; i != 4097; ++i) {\n\t\t\t\tq.enqueue(tok, i);\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tstruct SizeLimitTraits : public MallocTrackingTraits\n\t{\n\t\tstatic const size_t BLOCK_SIZE = 2;\n\t\tstatic const size_t MAX_SUBQUEUE_SIZE = 5;\t\t// Will round up to 6 because of block size\n\t};\n\t\n\tbool subqueue_size_limit()\n\t{\n\t\t{\n\t\t\t// Explicit\n\t\t\tConcurrentQueue<int, SizeLimitTraits> q;\n\t\t\tProducerToken t(q);\n\t\t\tint item;\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 1));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 2));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 3));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 4));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 5));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 6));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 8));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 1);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 7));\t\t// Can't reuse block until it's completely empty\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 2);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 7));\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 8));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 9));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 3);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 9));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 4);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 9));\n\t\t\t\n\t\t\tfor (int i = 5; i <= 9; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, 10));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 10);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_enqueue(t, i));\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue(t, 7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 7));\n\t\t\t\n\t\t\t// Bulk\n\t\t\tint items[6];\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 6) == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(t, items, 7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(t, items, 7));\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(t, items, 6));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 6) == 6);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(t, items, 3));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(t, items, 4));\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(t, items, 3));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(t, items, 1));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 100));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 1) == 1);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(t, 100));\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Implicit\n\t\t\tConcurrentQueue<int, SizeLimitTraits> q;\n\t\t\tint item;\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(1));\n\t\t\tASSERT_OR_FAIL(q.enqueue(2));\n\t\t\tASSERT_OR_FAIL(q.enqueue(3));\n\t\t\tASSERT_OR_FAIL(q.enqueue(4));\n\t\t\tASSERT_OR_FAIL(q.enqueue(5));\n\t\t\tASSERT_OR_FAIL(q.enqueue(6));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(8));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 1);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(7));\t\t// Can't reuse block until it's completely empty\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 2);\n\t\t\tASSERT_OR_FAIL(q.enqueue(7));\n\t\t\tASSERT_OR_FAIL(q.enqueue(8));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(9));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 3);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(9));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 4);\n\t\t\tASSERT_OR_FAIL(q.enqueue(9));\n\t\t\t\n\t\t\tfor (int i = 5; i <= 9; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.enqueue(10));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item) && item == 10);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tfor (int i = 0; i != 6; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_enqueue(i));\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue(7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(7));\n\t\t\t\n\t\t\t// Bulk\n\t\t\tint items[6];\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 6) == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_enqueue_bulk(items, 7));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(items, 7));\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(items, 6));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 6) == 6);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(items, 3));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(items, 4));\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(items, 3));\n\t\t\tASSERT_OR_FAIL(!q.enqueue_bulk(items, 1));\n\t\t\tASSERT_OR_FAIL(!q.enqueue(100));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(items, 1) == 1);\n\t\t\tASSERT_OR_FAIL(!q.enqueue(100));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool exceptions()\n\t{\n\t\ttypedef TestTraits<4, 2> Traits;\n\t\t\n\t\t{\n\t\t\t// Explicit, basic\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\t\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(tok, ThrowingMovable(1, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 1);\n\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(tok, ThrowingMovable(2)));\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 2);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 3);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(tok, ThrowingMovable(10));\n\t\t\tq.enqueue(tok, ThrowingMovable(11, false, true));\n\t\t\tq.enqueue(tok, ThrowingMovable(12));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 3);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 10);\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.try_dequeue(result);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tASSERT_OR_FAIL(m->id == 11);\n\t\t\t\tthrew = true;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 12);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tq.enqueue(tok, ThrowingMovable(13));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 13);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 8);\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Explicit, on and off block boundaries\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\t\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tq.enqueue(tok, ThrowingMovable(i));\n\t\t\t}\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(tok, ThrowingMovable(3, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 3);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 3);\n\t\t\t\n\t\t\tq.enqueue(tok, ThrowingMovable(4));\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(tok, ThrowingMovable(5, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 4);\n\t\t\tq.enqueue(tok, ThrowingMovable(6));\n\t\t\t\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 2);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 4);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 12);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(tok, ThrowingMovable(10, false, true));\n\t\t\tq.enqueue(tok, ThrowingMovable(11));\n\t\t\tq.enqueue(tok, ThrowingMovable(12));\n\t\t\tq.enqueue(tok, ThrowingMovable(13, false, true));\n\t\t\tq.enqueue(tok, ThrowingMovable(14, false, true));\n\t\t\tq.enqueue(tok, ThrowingMovable(15, false, true));\n\t\t\tq.enqueue(tok, ThrowingMovable(16));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 7);\n\t\t\t\n\t\t\tfor (int i = 10; i != 17; ++i) {\n\t\t\t\tif (i == 10 || (i >= 13 && i <= 15)) {\n\t\t\t\t\tthrew = false;\n\t\t\t\t\ttry {\n\t\t\t\t\t\tq.try_dequeue(result);\n\t\t\t\t\t}\n\t\t\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\t\t\tASSERT_OR_FAIL(m->id == i);\n\t\t\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t\t\t\tthrew = true;\n\t\t\t\t\t}\n\t\t\t\t\tASSERT_OR_FAIL(threw);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\t\t\tASSERT_OR_FAIL(result.id == i);\n\t\t\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(q.size_approx() == (std::uint32_t)(16 - i));\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tq.enqueue(tok, ThrowingMovable(20));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 20);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 16);\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Explicit bulk\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\tProducerToken tok(q);\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\tstd::vector<ThrowingMovable> items;\n\t\t\titems.reserve(5);\n\t\t\titems.push_back(ThrowingMovable(1));\n\t\t\titems.push_back(ThrowingMovable(2));\n\t\t\titems.push_back(ThrowingMovable(3));\n\t\t\titems.push_back(ThrowingMovable(4));\n\t\t\titems.push_back(ThrowingMovable(5));\n\t\t\titems.back().throwOnCctor = true;\n\t\t\t\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue_bulk(tok, std::make_move_iterator(items.begin()), 5);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t\tASSERT_OR_FAIL(m->copied);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\tq.enqueue(tok, ThrowingMovable(6));\n\t\t\t\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue_bulk(tok, std::make_move_iterator(items.begin()), 5);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\t\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 6);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 15);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(tok, ThrowingMovable(10));\n\t\t\tq.enqueue(tok, ThrowingMovable(11));\n\t\t\tq.enqueue(tok, ThrowingMovable(12));\n\t\t\tq.enqueue(tok, ThrowingMovable(13));\n\t\t\tq.enqueue(tok, ThrowingMovable(14, false, true, true));\t\t// std::back_inserter turns an assignment into a ctor call\n\t\t\tq.enqueue(tok, ThrowingMovable(15));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 6);\n\t\t\t\n\t\t\tstd::vector<ThrowingMovable> results;\n\t\t\tresults.reserve(5);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(std::back_inserter(results), 2));\n\t\t\tASSERT_OR_FAIL(results.size() == 2);\n\t\t\tASSERT_OR_FAIL(results[0].id == 10);\n\t\t\tASSERT_OR_FAIL(results[1].id == 11);\n\t\t\tASSERT_OR_FAIL(results[0].moved);\n\t\t\tASSERT_OR_FAIL(results[1].moved);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 4);\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.try_dequeue_bulk(std::back_inserter(results), 4);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable*) {\n\t\t\t\t// Note: Can't inspect thrown value since it points to an object whose construction was attempted on the vector and\n\t\t\t\t// no longer exists\n\t\t\t\tthrew = true;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(std::back_inserter(results), 1) == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(results.size() == 4);\n\t\t\tASSERT_OR_FAIL(results[2].id == 12);\n\t\t\tASSERT_OR_FAIL(results[3].id == 13);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 12);\n\t\t}\n\t\t\n\t\t\n\t\t{\n\t\t\t// Implicit, basic\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\t\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(ThrowingMovable(1, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 1);\n\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(ThrowingMovable(2)));\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 2);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 3);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(ThrowingMovable(10));\n\t\t\tq.enqueue(ThrowingMovable(11, false, true));\n\t\t\tq.enqueue(ThrowingMovable(12));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 3);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 10);\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.try_dequeue(result);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tASSERT_OR_FAIL(m->id == 11);\n\t\t\t\tthrew = true;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 12);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tq.enqueue(ThrowingMovable(13));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 13);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 8);\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Implicit, on and off block boundaries\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\t\n\t\t\tfor (int i = 0; i != 3; ++i) {\n\t\t\t\tq.enqueue(ThrowingMovable(i));\n\t\t\t}\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(ThrowingMovable(3, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 3);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 3);\n\t\t\t\n\t\t\tq.enqueue(ThrowingMovable(4));\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue(ThrowingMovable(5, true));\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 4);\n\t\t\tq.enqueue(ThrowingMovable(6));\n\t\t\t\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 2);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 4);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 6);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 12);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(ThrowingMovable(10, false, true));\n\t\t\tq.enqueue(ThrowingMovable(11));\n\t\t\tq.enqueue(ThrowingMovable(12));\n\t\t\tq.enqueue(ThrowingMovable(13, false, true));\n\t\t\tq.enqueue(ThrowingMovable(14, false, true));\n\t\t\tq.enqueue(ThrowingMovable(15, false, true));\n\t\t\tq.enqueue(ThrowingMovable(16));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 7);\n\t\t\t\n\t\t\tfor (int i = 10; i != 17; ++i) {\n\t\t\t\tif (i == 10 || (i >= 13 && i <= 15)) {\n\t\t\t\t\tthrew = false;\n\t\t\t\t\ttry {\n\t\t\t\t\t\tq.try_dequeue(result);\n\t\t\t\t\t}\n\t\t\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\t\t\tASSERT_OR_FAIL(m->id == i);\n\t\t\t\t\t\tASSERT_OR_FAIL(m->moved);\n\t\t\t\t\t\tthrew = true;\n\t\t\t\t\t}\n\t\t\t\t\tASSERT_OR_FAIL(threw);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\t\t\tASSERT_OR_FAIL(result.id == i);\n\t\t\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(q.size_approx() == (std::uint32_t)(16 - i));\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tq.enqueue(ThrowingMovable(20));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 20);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 16);\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Impplicit bulk\n\t\t\t// enqueue\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\t\n\t\t\tThrowingMovable::reset();\n\t\t\tstd::vector<ThrowingMovable> items;\n\t\t\titems.reserve(5);\n\t\t\titems.push_back(ThrowingMovable(1));\n\t\t\titems.push_back(ThrowingMovable(2));\n\t\t\titems.push_back(ThrowingMovable(3));\n\t\t\titems.push_back(ThrowingMovable(4));\n\t\t\titems.push_back(ThrowingMovable(5));\n\t\t\titems.back().throwOnCctor = true;\n\t\t\t\n\t\t\tbool threw = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue_bulk(std::make_move_iterator(items.begin()), 5);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t\tASSERT_OR_FAIL(m->copied);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\tq.enqueue(ThrowingMovable(6));\n\t\t\t\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.enqueue_bulk(std::make_move_iterator(items.begin()), 5);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable* m) {\n\t\t\t\tthrew = true;\n\t\t\t\tASSERT_OR_FAIL(m->id == 5);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\t\n\t\t\tThrowingMovable result(-1);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(result.id == 6);\n\t\t\tASSERT_OR_FAIL(result.moved);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 15);\n\t\t\t\n\t\t\t// dequeue\n\t\t\tThrowingMovable::reset();\n\t\t\tq.enqueue(ThrowingMovable(10));\n\t\t\tq.enqueue(ThrowingMovable(11));\n\t\t\tq.enqueue(ThrowingMovable(12));\n\t\t\tq.enqueue(ThrowingMovable(13));\n\t\t\tq.enqueue(ThrowingMovable(14, false, true, true));\t\t// std::back_inserter turns an assignment into a ctor call\n\t\t\tq.enqueue(ThrowingMovable(15));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 6);\n\t\t\t\n\t\t\tstd::vector<ThrowingMovable> results;\n\t\t\tresults.reserve(5);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(std::back_inserter(results), 2));\n\t\t\tASSERT_OR_FAIL(results.size() == 2);\n\t\t\tASSERT_OR_FAIL(results[0].id == 10);\n\t\t\tASSERT_OR_FAIL(results[1].id == 11);\n\t\t\tASSERT_OR_FAIL(results[0].moved);\n\t\t\tASSERT_OR_FAIL(results[1].moved);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 4);\n\t\t\tthrew = false;\n\t\t\ttry {\n\t\t\t\tq.try_dequeue_bulk(std::back_inserter(results), 4);\n\t\t\t}\n\t\t\tcatch (ThrowingMovable*) {\n\t\t\t\tthrew = true;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(threw);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(result));\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(std::back_inserter(results), 1) == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(results.size() == 4);\n\t\t\tASSERT_OR_FAIL(results[2].id == 12);\n\t\t\tASSERT_OR_FAIL(results[3].id == 13);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() == 12);\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Threaded\n\t\t\tConcurrentQueue<ThrowingMovable, Traits> q;\n\t\t\tThrowingMovable::reset();\n\t\t\t\n\t\t\tstd::vector<SimpleThread> threads(6);\n\t\t\tfor (std::size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](std::size_t tid) {\n\t\t\t\t\tstd::vector<ThrowingMovable> inVec;\n\t\t\t\t\tinVec.push_back(ThrowingMovable(1));\n\t\t\t\t\tinVec.push_back(ThrowingMovable(2));\n\t\t\t\t\tinVec.push_back(ThrowingMovable(3));\n\t\t\t\t\t\n\t\t\t\t\tstd::vector<ThrowingMovable> outVec;\n\t\t\t\t\toutVec.push_back(ThrowingMovable(-1));\n\t\t\t\t\toutVec.push_back(ThrowingMovable(-1));\n\t\t\t\t\toutVec.push_back(ThrowingMovable(-1));\n\t\t\t\t\t\n\t\t\t\t\tProducerToken tok(q);\n\t\t\t\t\tThrowingMovable result(-1);\n\t\t\t\t\t\n\t\t\t\t\tfor (std::size_t i = 0; i != 8192; ++i) {\n\t\t\t\t\t\tauto magic = (tid + 1) * i + tid * 17 + i;\n\t\t\t\t\t\tauto op = magic & 7;\n\t\t\t\t\t\tauto ctorThrow = (magic & 0x10) != 0;\n\t\t\t\t\t\tauto assignThrow = (magic & 0x20) != 0;\n\t\t\t\t\t\tauto throwOnNextCctor = (magic & 0x40) != 0;\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tswitch (op) {\n\t\t\t\t\t\t\tcase 0:\n\t\t\t\t\t\t\t\tq.enqueue(tok, ThrowingMovable((int)i, ctorThrow, assignThrow, throwOnNextCctor));\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase 1:\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnCctor = ctorThrow;\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnAssignment = assignThrow;\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnSecondCctor = throwOnNextCctor;\n\t\t\t\t\t\t\t\tq.enqueue_bulk(tok, inVec.begin(), 3);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase 2:\n\t\t\t\t\t\t\t\tq.enqueue(ThrowingMovable((int)i, ctorThrow, assignThrow, throwOnNextCctor));\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase 3:\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnCctor = ctorThrow;\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnAssignment = assignThrow;\n\t\t\t\t\t\t\t\tinVec[i & 3].throwOnSecondCctor = throwOnNextCctor;\n\t\t\t\t\t\t\t\tq.enqueue_bulk(inVec.begin(), 3);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase 4:\n\t\t\t\t\t\t\tcase 5:\n\t\t\t\t\t\t\t\tq.try_dequeue(result);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase 6:\n\t\t\t\t\t\t\tcase 7:\n\t\t\t\t\t\t\t\tq.try_dequeue_bulk(outVec.data(), 3);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcatch (ThrowingMovable*) {\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (std::size_t i = 0; i != threads.size(); ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tThrowingMovable result(-1);\n\t\t\twhile (true) {\n\t\t\t\ttry {\n\t\t\t\t\tif (!q.try_dequeue(result)) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcatch (ThrowingMovable*) {\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(ThrowingMovable::destroyCount() + 1 == ThrowingMovable::ctorCount());\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool test_threaded()\n\t{\n\t\ttypedef TestTraits<4> Traits;\n\t\tTraits::reset();\n\t\t\n\t\tbool inOrder = true;\n\t\t\n\t\t{\n\t\t\t// Single producer, single consumer\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tSimpleThread a([&]() {\n\t\t\t\tfor (int i = 0; i != 123456; ++i) {\n\t\t\t\t\tq.enqueue(t, i);\n\t\t\t\t}\n\t\t\t});\n\t\t\tSimpleThread b([&]() {\n\t\t\t\tint item;\n\t\t\t\tint prevItem = -1;\n\t\t\t\twhile (true) {\n\t\t\t\t\tif (q.try_dequeue_from_producer(t, item)) {\n\t\t\t\t\t\tif (item == 123455) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinOrder = item == prevItem + 1 && inOrder;\n\t\t\t\t\t\tprevItem = item;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t\t\n\t\t\ta.join();\n\t\t\tb.join();\n\t\t}\n\t\tASSERT_OR_FAIL(inOrder);\n\t\t\n\t\t{\n\t\t\t// Single producer, multi consumer\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tSimpleThread a([&]() {\n\t\t\t\tfor (int i = 0; i != 123456; ++i) {\n\t\t\t\t\tq.enqueue(t, i);\n\t\t\t\t}\n\t\t\t});\n\t\t\tSimpleThread b([&]() {\n\t\t\t\tint item, prevItem = -1;\n\t\t\t\tfor (int i = 0; i != 123456; ++i) {\n\t\t\t\t\tif (q.try_dequeue_from_producer(t, item)) {\n\t\t\t\t\t\tinOrder = item > prevItem && inOrder;\n\t\t\t\t\t\tprevItem = item;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t\tSimpleThread c([&]() {\n\t\t\t\tint item;\n\t\t\t\tfor (int i = 0; i != 123456; ++i) q.try_dequeue_from_producer(t, item);\n\t\t\t});\n\t\t\tSimpleThread d([&]() {\n\t\t\t\tint item;\n\t\t\t\tfor (int i = 0; i != 123456; ++i) q.try_dequeue_from_producer(t, item);\n\t\t\t});\n\t\t\t\n\t\t\ta.join();\n\t\t\tb.join();\n\t\t\tc.join();\n\t\t\td.join();\n\t\t}\n\t\tASSERT_OR_FAIL(inOrder);\n\t\t\n\t\tASSERT_OR_FAIL(Traits::malloc_count() == Traits::free_count());\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool test_threaded_bulk()\n\t{\n\t\ttypedef TestTraits<2> Traits;\n\t\t\n\t\t// Enqueue bulk (implicit)\n\t\tTraits::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tSimpleThread threads[2];\n\t\t\tbool success[2];\n\t\t\t\n\t\t\tint stuff[] = { 1, 2, 3, 4, 5 };\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i == 0) {\n\t\t\t\t\t// Enqueue bulk\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(stuff, 5) && success[j];\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tint prevItem = 0;\n\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5;) {\n\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\tif (item != prevItem + 1) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItem = item;\n\t\t\t\t\t\t\t\tif (item == 5) {\n\t\t\t\t\t\t\t\t\tprevItem = 0;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t++k;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(success[0]);\n\t\t\tASSERT_OR_FAIL(success[1]);\n\t\t}\n\t\t\n\t\t// Enqueue bulk (while somebody is dequeueing (with tokens))\n\t\tTraits::reset();\n\t\t{\n\t\t\tConcurrentQueue<int, Traits> q;\n\t\t\tSimpleThread threads[2];\n\t\t\tbool success[2];\n\t\t\t\n\t\t\tint stuff[] = { 1, 2, 3, 4, 5 };\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i == 0) {\n\t\t\t\t\t// Enqueue bulk\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tProducerToken tok(q);\n\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(tok, stuff, 5) && success[j];\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tConsumerToken tok(q);\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tint prevItem = 0;\n\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5;) {\n\t\t\t\t\t\t\tif (q.try_dequeue(tok, item)) {\n\t\t\t\t\t\t\t\tif (item != prevItem + 1) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItem = item;\n\t\t\t\t\t\t\t\tif (item == 5) {\n\t\t\t\t\t\t\t\t\tprevItem = 0;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t++k;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != 2; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(success[0]);\n\t\t\tASSERT_OR_FAIL(success[1]);\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\ttemplate<typename Traits>\n\tbool full_api()\n\t{\n\t\t// A simple test that exercises the full public API (just to make sure every function is implemented\n\t\t// and works on at least the most basic level)\n\t\t\n\t\t// enqueue(T const&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue(original));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// enqueue(T&&)\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue(std::move(original)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// enqueue(Token, T const&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, original));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// enqueue(Token, T&&)\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, std::move(original)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue(T const&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(original));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue(T&&)\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(std::move(original)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue(Token, T const&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(t, original));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue(Token, T&&)\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(t, Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(t, std::move(original)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue(t, Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(&original, 1));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(std::make_move_iterator(&original), 1));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// enqueue_bulk(Token, It itemFirst, size_t count)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(t, &original, 1));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.enqueue_bulk(t, std::make_move_iterator(&original), 1));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(&original, 1));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(std::make_move_iterator(&original), 1));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_enqueue_bulk(Token, It itemFirst, size_t count)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tCopyable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(t, &original, 1));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tMoveable original(12345);\n\t\t\tASSERT_OR_FAIL(q.try_enqueue_bulk(t, std::make_move_iterator(&original), 1));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_dequeue(T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_dequeue(Token, T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tConsumerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(t, item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(t, item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tConsumerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(t, item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(t, item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_dequeue_from_producer(Token, T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_from_producer(t, item));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// try_dequeue_bulk(T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(&item, 1) == 1);\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(&item, 1));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(&item, 1) == 1);\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(&item, 1));\n\t\t}\n\t\t\n\t\t// try_dequeue_bulk(Token, T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tConsumerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(&item, 1));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tConsumerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk(&item, 1));\n\t\t}\n\t\t\n\t\t// try_dequeue_bulk_from_producer(Token, T&)\n\t\t{\n\t\t\tConcurrentQueue<Copyable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Copyable(12345)));\n\t\t\tCopyable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk_from_producer(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk_from_producer(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t{\n\t\t\tConcurrentQueue<Moveable, Traits> q;\n\t\t\tProducerToken t(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(t, Moveable(12345)));\n\t\t\tMoveable item(0);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue_bulk_from_producer(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(item.id == 12345);\n\t\t\tASSERT_OR_FAIL(item.moved);\n\t\t\tASSERT_OR_FAIL(!item.copied);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_bulk_from_producer(t, &item, 1));\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t}\n\t\t\n\t\t// size_approx()\n\t\t{\n\t\t\tConcurrentQueue<Foo, Traits> q;\n\t\t\tfor (int i = 0; i != 1234; ++i) {\n\t\t\t\tq.enqueue(Foo());\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1234);\n\t\t}\n\t\t\n\t\t// is_lock_free()\n\t\t{\n\t\t\tbool lockFree = ConcurrentQueue<Foo, Traits>::is_lock_free();\n#if defined(__amd64__) || defined(_M_X64) || defined(__x86_64__) || defined(_M_IX86) || defined(__i386__) || defined(_M_PPC) || defined(__powerpc__)\n\t\t\tASSERT_OR_FAIL(lockFree);\n#endif\n\t\t}\n\t\t\n\t\t// moving\n\t\t{\n\t\t\tConcurrentQueue<int, MallocTrackingTraits> q(4);\n\t\t\tProducerToken t(q);\n\t\t\tfor (int i = 0; i != 1233; ++i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t}\n\t\t\tfor (int i = 1234; i != 5678; ++i) {\n\t\t\t\tq.enqueue(t, i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 5677);\n\t\t\t\n\t\t\tConcurrentQueue<int, MallocTrackingTraits> q2(std::move(q));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\tASSERT_OR_FAIL(q2.size_approx() == 5677);\n\t\t\t\n\t\t\tq2.enqueue(t, 5678);\n\t\t\tq2.enqueue(1233);\n\t\t\tASSERT_OR_FAIL(q2.size_approx() == 5679);\n\t\t\t\n\t\t\tfor (int i = 1234; i != 0; --i) {\n\t\t\t\tq.enqueue(i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1234);\n\t\t\t\n\t\t\tint item;\n\t\t\tfor (int i = 0; i <= 5678; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q2.try_dequeue_non_interleaved(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q2.try_dequeue_non_interleaved(item));\n\t\t\tASSERT_OR_FAIL(q2.size_approx() == 0);\n\t\t\t\n\t\t\tfor (int i = 1234; i != 0; --i) {\n\t\t\t\tASSERT_OR_FAIL(q.try_dequeue_non_interleaved(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue_non_interleaved(item));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t}\n\t\t\n\t\t// swapping\n\t\t{\n\t\t\tConcurrentQueue<int, MallocTrackingTraits> q1, q2, q3;\n\t\t\tProducerToken t1(q1), t2(q2), t3(q3);\n\t\t\t\n\t\t\tfor (int i = 1234; i != 5678; ++i) {\n\t\t\t\tq1.enqueue(t1, i);\n\t\t\t}\n\t\t\tfor (int i = 21234; i != 25678; ++i) {\n\t\t\t\tq2.enqueue(t2, i);\n\t\t\t}\n\t\t\tfor (int i = 31234; i != 35678; ++i) {\n\t\t\t\tq3.enqueue(t3, i);\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != 1234; ++i) {\n\t\t\t\tq1.enqueue(i);\n\t\t\t}\n\t\t\tfor (int i = 20000; i != 21234; ++i) {\n\t\t\t\tq2.enqueue(i);\n\t\t\t}\n\t\t\tfor (int i = 30000; i != 31234; ++i) {\n\t\t\t\tq3.enqueue(i);\n\t\t\t}\n\t\t\t\n\t\t\t{\n\t\t\t\tConcurrentQueue<int, MallocTrackingTraits> temp;\n\t\t\t\ttemp = std::move(q1);\n\t\t\t\tq1 = std::move(q2);\n\t\t\t\tq2 = std::move(temp);\n\t\t\t}\n\t\t\t// q1 in q2, q2 in q1\n\t\t\t\n\t\t\tswap(q2, q3);\t// q1 in q3, q3 in q2\n\t\t\tq1.swap(q2);\t// q2 in q2, q3 in q1\n\t\t\tq1.swap(q2);\t// q3 in q2, q2 in q1\n\t\t\tq1.swap(q2);\t// q2 in q2, q3 in q1\n\t\t\tq2.swap(q3);\t// q1 in q2, q2 in q3\n\t\t\t\n\t\t\t// So now q1 is in q2, q2 is in q3, and q3 is in q1\n\t\t\tint item;\n\t\t\tfor (int i = 30000; i != 35678; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q1.try_dequeue_non_interleaved(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q1.try_dequeue_non_interleaved(item));\n\t\t\tASSERT_OR_FAIL(q1.size_approx() == 0);\n\t\t\t\n\t\t\tfor (int i = 0; i != 5678; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q2.try_dequeue_non_interleaved(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q2.try_dequeue_non_interleaved(item));\n\t\t\tASSERT_OR_FAIL(q2.size_approx() == 0);\n\t\t\t\n\t\t\tfor (int i = 20000; i != 25678; ++i) {\n\t\t\t\tASSERT_OR_FAIL(q3.try_dequeue_non_interleaved(item));\n\t\t\t\tASSERT_OR_FAIL(item == i);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(!q3.try_dequeue_non_interleaved(item));\n\t\t\tASSERT_OR_FAIL(q3.size_approx() == 0);\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\t\n\tbool blocking_wrappers()\n\t{\n\t\ttypedef BlockingConcurrentQueue<int, MallocTrackingTraits> Q;\n\t\tASSERT_OR_FAIL((Q::is_lock_free() == ConcurrentQueue<int, MallocTrackingTraits>::is_lock_free()));\n\t\t\n\t\t// Moving\n\t\t{\n\t\t\tQ a, b, c;\n\t\t\ta = std::move(b);\n\t\t\tb = std::move(c);\n\t\t\ta = std::move(a);\n\t\t\tc = std::move(b);\n\t\t\tb = Q(std::move(b));\n\t\t\tusing std::swap;\n\t\t\tswap(a, b);\n\t\t\ta.swap(c);\n\t\t\tc.swap(c);\n\t\t}\n\t\t\n\t\t// Implicit\n\t\t{\n\t\t\tQ q;\n\t\t\tASSERT_OR_FAIL(q.enqueue(1));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(item == 1);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(2));\n\t\t\tASSERT_OR_FAIL(q.enqueue(3));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 2);\n\t\t\tq.wait_dequeue(item);\n\t\t\tASSERT_OR_FAIL(item == 2);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tq.wait_dequeue(item);\n\t\t\tASSERT_OR_FAIL(item == 3);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(item));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t}\n\t\t\n\t\t// Implicit threaded\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue((j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(item)) {\n\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tfor (int k = 0; k < 4096;  ++k) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.try_dequeue_bulk(items, 6)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Implicit threaded, blocking\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue((j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tq.wait_dequeue(item);\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tint k;\n\t\t\t\t\t\t\tfor (k = 0; k < 4090; ) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.wait_dequeue_bulk(items, 6)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tk += (int)dequeued;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor (; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tq.wait_dequeue(item);\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t}\n\t\t\n\t\t// Explicit\n\t\t{\n\t\t\tQ q;\n\t\t\tProducerToken pt(q);\n\t\t\tASSERT_OR_FAIL(q.enqueue(pt, 1));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tint item;\n\t\t\tConsumerToken ct(q);\n\t\t\tASSERT_OR_FAIL(q.try_dequeue(ct, item));\n\t\t\tASSERT_OR_FAIL(item == 1);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(ct, item));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(q.enqueue(pt, 2));\n\t\t\tASSERT_OR_FAIL(q.enqueue(pt, 3));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 2);\n\t\t\tq.wait_dequeue(ct, item);\n\t\t\tASSERT_OR_FAIL(item == 2);\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 1);\n\t\t\tq.wait_dequeue(ct, item);\n\t\t\tASSERT_OR_FAIL(item == 3);\n\t\t\tASSERT_OR_FAIL(!q.try_dequeue(ct, item));\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t}\n\t\t\n\t\t// Explicit threaded\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(t, stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue(t, (j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tif (q.try_dequeue(t, item)) {\n\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tfor (int k = 0; k < 4096;  ++k) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.try_dequeue_bulk(t, items, 6)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Explicit threaded, blocking\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(t, stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue(t, (j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tConsumerToken t(q);\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tq.wait_dequeue(t, item);\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tint k;\n\t\t\t\t\t\t\tfor (k = 0; k < 4090; ) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.wait_dequeue_bulk(t, items, 6)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tk += (int)dequeued;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor (; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tq.wait_dequeue(t, item);\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool timed_blocking_wrappers()\n\t{\n\t\ttypedef BlockingConcurrentQueue<int, MallocTrackingTraits> Q;\n\t\t\n\t\t// Implicit\n\t\t{\n\t\t\tQ q;\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(item, 0));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(item, 1));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(item, 100));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(item, std::chrono::milliseconds(1)));\n\t\t\tq.enqueue(123);\n\t\t\tASSERT_OR_FAIL(q.wait_dequeue_timed(item, 0));\n\t\t\tASSERT_OR_FAIL(item == 123);\n\t\t}\n\t\t\n\t\t// Implicit, threaded\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue((j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tif (!q.wait_dequeue_timed(item, 1000)) {\n\t\t\t\t\t\t\t\t\t--k;\n\t\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tint k;\n\t\t\t\t\t\t\tfor (k = 0; k < 4090; ) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.wait_dequeue_bulk_timed(items, 6, 1000)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tk += (int)dequeued;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor (; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tif (!q.wait_dequeue_timed(item, std::chrono::hours(1))) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(item, 0));\n\t\t}\n\t\t\n\t\t// Explicit\n\t\t{\n\t\t\tQ q;\n\t\t\tProducerToken ptok(q);\n\t\t\tConsumerToken ctok(q);\n\t\t\tint item;\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(ctok, item, 0));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(ctok, item, 1));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(ctok, item, 100));\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(ctok, item, std::chrono::milliseconds(1)));\n\t\t\tq.enqueue(ptok, 123);\n\t\t\tASSERT_OR_FAIL(q.wait_dequeue_timed(ctok, item, 0));\n\t\t\tASSERT_OR_FAIL(item == 123);\n\t\t}\n\t\t\n\t\t// Explicit, threaded\n\t\t{\n\t\t\tQ q;\n\t\t\tconst int THREADS = 8;\n\t\t\tSimpleThread threads[THREADS];\n\t\t\tbool success[THREADS];\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tsuccess[i] = true;\n\t\t\t\t\n\t\t\t\tif (i % 2 == 0) {\n\t\t\t\t\t// Enqueue\n\t\t\t\t\tif (i % 4 == 0) {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken tok(q);\n\t\t\t\t\t\t\tint stuff[5];\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048; ++k) {\n\t\t\t\t\t\t\t\tfor (int x = 0; x != 5; ++x) {\n\t\t\t\t\t\t\t\t\tstuff[x] = (j << 16) | (k * 5 + x);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue_bulk(tok, stuff, 5) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\t\tProducerToken tok(q);\n\t\t\t\t\t\t\tfor (int k = 0; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tsuccess[j] = q.enqueue(tok, (j << 16) | k) && success[j];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, i);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tthreads[i] = SimpleThread([&](int j) {\n\t\t\t\t\t\tint item;\n\t\t\t\t\t\tstd::vector<int> prevItems(THREADS, -1);\n\t\t\t\t\t\tConsumerToken tok(q);\n\t\t\t\t\t\tif (j % 4 == 1) {\n\t\t\t\t\t\t\tfor (int k = 0; k != 2048 * 5; ++k) {\n\t\t\t\t\t\t\t\tif (!q.wait_dequeue_timed(tok, item, 1000)) {\n\t\t\t\t\t\t\t\t\t--k;\n\t\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tint items[6];\n\t\t\t\t\t\t\tint k;\n\t\t\t\t\t\t\tfor (k = 0; k < 4090; ) {\n\t\t\t\t\t\t\t\tif (std::size_t dequeued = q.wait_dequeue_bulk_timed(tok, items, 6, 1000)) {\n\t\t\t\t\t\t\t\t\tfor (std::size_t x = 0; x != dequeued; ++x) {\n\t\t\t\t\t\t\t\t\t\titem = items[x];\n\t\t\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tk += (int)dequeued;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor (; k != 4096; ++k) {\n\t\t\t\t\t\t\t\tif (!q.wait_dequeue_timed(tok, item, std::chrono::hours(1))) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tint thread = item >> 16;\n\t\t\t\t\t\t\t\titem &= 0xffff;\n\t\t\t\t\t\t\t\tif (item <= prevItems[thread]) {\n\t\t\t\t\t\t\t\t\tsuccess[j] = false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tprevItems[thread] = item;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, i);\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tthreads[i].join();\n\t\t\t}\n\t\t\t\n\t\t\tfor (int i = 0; i != THREADS; ++i) {\n\t\t\t\tASSERT_OR_FAIL(success[i]);\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(q.size_approx() == 0);\n\t\t\t\n\t\t\tint item;\n\t\t\tConsumerToken tok(q);\n\t\t\tASSERT_OR_FAIL(!q.wait_dequeue_timed(tok, item, 0));\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tstruct TestListItem : corealgos::ListItem\n\t{\n\t\tint value;\n\t\t\n\t\tTestListItem()\n\t\t\t: value(0)\n\t\t{\n\t\t\tctorCount().fetch_add(1, std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\texplicit TestListItem(int value)\n\t\t\t: value(value)\n\t\t{\n\t\t\tctorCount().fetch_add(1, std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\t~TestListItem()\n\t\t{\n\t\t\tdtorCount().fetch_add(1, std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\tinline TestListItem* prev(std::memory_order order = std::memory_order_relaxed) const\n\t\t{\n\t\t\treturn static_cast<TestListItem*>(concurrentListPrev.load(order));\n\t\t}\n\t\t\n\t\t\n\t\tinline static void reset()\n\t\t{\n\t\t\tctorCount().store(0, std::memory_order_relaxed);\n\t\t\tdtorCount().store(0, std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\tinline static size_t constructed() { return ctorCount().load(std::memory_order_relaxed); }\n\t\tinline static size_t destructed() { return dtorCount().load(std::memory_order_relaxed); }\n\t\t\n\tprivate:\n\t\tinline static std::atomic<size_t>& ctorCount() { static std::atomic<size_t> count(0); return count; }\n\t\tinline static std::atomic<size_t>& dtorCount() { static std::atomic<size_t> count(0); return count; }\n\t};\n\t\n\tbool core_add_only_list()\n\t{\n\t\tauto destroyList = [](corealgos::ConcurrentAddOnlyList<TestListItem>& list) {\n\t\t\tsize_t count = 0;\n\t\t\t\n\t\t\tauto tail = list.tail();\n\t\t\twhile (tail != nullptr) {\n\t\t\t\tauto next = tail->prev();\n\t\t\t\tdelete tail;\n\t\t\t\t++count;\n\t\t\t\ttail = next;\n\t\t\t}\n\t\t\treturn count;\n\t\t};\n\t\t\n\t\t{\n\t\t\tcorealgos::ConcurrentAddOnlyList<TestListItem> list;\n\t\t\tASSERT_OR_FAIL(list.tail() == nullptr);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(destroyList(list) == 0);\n\t\t}\n\t\t\n\t\t{\n\t\t\tcorealgos::ConcurrentAddOnlyList<TestListItem> list;\n\t\t\tfor (int i = 0; i != 1000; ++i) {\n\t\t\t\tlist.add(new TestListItem(i));\n\t\t\t}\n\t\t\tint i = 999;\n\t\t\tfor (auto tail = list.tail(); tail != nullptr; tail = tail->prev()) {\n\t\t\t\tASSERT_OR_FAIL(i == tail->value);\n\t\t\t\t--i;\n\t\t\t}\n\t\t\tASSERT_OR_FAIL(i == -1);\n\t\t\t\n\t\t\tASSERT_OR_FAIL(destroyList(list) == 1000);\n\t\t}\n\t\t\n\t\tfor (int repeats = 0; repeats != 10; ++repeats) {\n\t\t\tcorealgos::ConcurrentAddOnlyList<TestListItem> list;\n\t\t\tstd::vector<SimpleThread> threads(8);\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid] = SimpleThread([&](size_t tid) {\n\t\t\t\t\tfor (int i = 0; i != 1000; ++i) {\n\t\t\t\t\t\tlist.add(new TestListItem((int)((tid << 16) | i)));\n\t\t\t\t\t}\n\t\t\t\t}, tid);\n\t\t\t}\n\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\tthreads[tid].join();\n\t\t\t}\n\t\t\t\n\t\t\tstd::vector<int> prevItems(threads.size());\n\t\t\tfor (size_t i = 0; i != prevItems.size(); ++i) {\n\t\t\t\tprevItems[i] = 1000;\n\t\t\t}\n\t\t\tfor (auto tail = list.tail(); tail != nullptr; tail = tail->prev()) {\n\t\t\t\tauto tid = tail->value >> 16;\n\t\t\t\tauto i = tail->value & ((1 << 16) - 1);\n\t\t\t\tASSERT_OR_FAIL(prevItems[tid] == i + 1);\n\t\t\t\tprevItems[tid] = i;\n\t\t\t}\n\t\t\t\n\t\t\tASSERT_OR_FAIL(destroyList(list) == 1000 * threads.size());\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool core_thread_local()\n\t{\n\t\tTestListItem::reset();\n\t\t{\n\t\t\tcorealgos::ThreadLocal<TestListItem> local(4);\n\t\t}\n\t\tASSERT_OR_FAIL(TestListItem::constructed() == 0);\n\t\tASSERT_OR_FAIL(TestListItem::destructed() == 0);\n\t\t\n\t\tTestListItem::reset();\n\t\t{\n\t\t\tcorealgos::ThreadLocal<TestListItem> local(4);\n\t\t\tlocal.get_or_create();\n\t\t}\n\t\tASSERT_OR_FAIL(TestListItem::constructed() == 1);\n\t\tASSERT_OR_FAIL(TestListItem::destructed() == 1);\n\t\t\n\t\tTestListItem::reset();\n\t\t{\n\t\t\tcorealgos::ThreadLocal<TestListItem> local(4);\n\t\t\tauto item = local.get_or_create();\n\t\t\titem->value = 7;\n\t\t\titem = local.get_or_create();\n\t\t\tASSERT_OR_FAIL(item->value == 7);\n\t\t}\n\t\tASSERT_OR_FAIL(TestListItem::constructed() == 1);\n\t\tASSERT_OR_FAIL(TestListItem::destructed() == 1);\n\t\t\n\t\t\n\t\tfor (size_t initialSize = 1; initialSize <= 4; initialSize <<= 1) {\n\t\t\tfor (int reps = 0; reps != 20; ++reps) {\n\t\t\t\tTestListItem::reset();\n\t\t\t\t{\n\t\t\t\t\tcorealgos::ThreadLocal<TestListItem> local(initialSize);\n\t\t\t\t\tstd::vector<SimpleThread> threads(5 * initialSize);\n\t\t\t\t\tstd::vector<bool> failed(threads.size());\n\t\t\t\t\tstd::atomic<std::size_t> done(0);\n\t\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\t\tthreads[tid] = SimpleThread([&](size_t tid) {\n\t\t\t\t\t\t\tfailed[tid] = false;\n\t\t\t\t\t\t\tauto item = local.get_or_create();\n\t\t\t\t\t\t\titem->value = (int)tid;\n\t\t\t\t\t\t\tfor (int i = 0; i != 1024; ++i) {\n\t\t\t\t\t\t\t\tauto item = local.get_or_create();\n\t\t\t\t\t\t\t\tif (item->value != (int)tid) {\n\t\t\t\t\t\t\t\t\tfailed[tid] = true;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tdone.fetch_add(1, std::memory_order_seq_cst);\n\t\t\t\t\t\t\twhile (done.load(std::memory_order_relaxed) != threads.size()) {\n\t\t\t\t\t\t\t\tmoodycamel::sleep(1);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, tid);\n\t\t\t\t\t}\n\t\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\t\tthreads[tid].join();\n\t\t\t\t\t\tASSERT_OR_FAIL(!failed[tid]);\n\t\t\t\t\t}\n\t\t\t\t\tASSERT_OR_FAIL(TestListItem::constructed() == 5 * initialSize);\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(TestListItem::destructed() == 5 * initialSize);\n\t\t\t}\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tstruct TestNode : corealgos::FreeListNode<TestNode>\n\t{\n\t\tint value;\n\t\tTestNode() { }\n\t\texplicit TestNode(int value) : value(value) { }\n\t};\n\t\n\tbool core_free_list()\n\t{\n\t\t{\n\t\t\t// Basic\n\t\t\tcorealgos::FreeList<TestNode> freeList;\n\t\t\tASSERT_OR_FAIL(freeList.try_get() == nullptr);\n\t\t\t\n\t\t\tfreeList.add(new TestNode(7));\n\t\t\tTestNode* node = freeList.try_get();\n\t\t\tASSERT_OR_FAIL(node != nullptr);\n\t\t\tASSERT_OR_FAIL(node->value == 7);\n\t\t\tASSERT_OR_FAIL(freeList.try_get() == nullptr);\n\t\t\t\n\t\t\tfreeList.add(node);\n\t\t\tnode = freeList.try_get();\n\t\t\tASSERT_OR_FAIL(node != nullptr);\n\t\t\tASSERT_OR_FAIL(node->value == 7);\n\t\t\tASSERT_OR_FAIL(freeList.try_get() == nullptr);\n\t\t\tdelete node;\n\t\t}\n\t\t\n\t\t{\n\t\t\t// Multi-threaded. Tests ABA too.\n\t\t\tfor (int rep = 0; rep != 10; ++rep) {\n\t\t\t\tcorealgos::FreeList<TestNode> freeList;\n\t\t\t\tstd::vector<SimpleThread> threads(rep < 8 ? 4 : 16);\n\t\t\t\tstd::vector<bool> failed(threads.size());\n\t\t\t\tstd::vector<TestNode> initialNodes(threads.size());\n\t\t\t\tconst int OP_COUNT = 2048;\n\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid] = SimpleThread([&](size_t tid) {\n\t\t\t\t\t\tstd::vector<bool> seenValues(threads.size() * OP_COUNT, false);\n\t\t\t\t\t\tfailed[tid] = false;\n\t\t\t\t\t\tTestNode* node = &initialNodes[tid];\n\t\t\t\t\t\tnode->value = ((int)tid << 20) | 1;\n\t\t\t\t\t\tfreeList.add(node);\n\t\t\t\t\t\tfor (int i = 1; i != OP_COUNT - 1; ++i) {\n\t\t\t\t\t\t\tnode = freeList.try_get();\n\t\t\t\t\t\t\tif (node != nullptr) {\n\t\t\t\t\t\t\t\tauto seen = seenValues.begin() + ((node->value >> 20) * OP_COUNT + (node->value & 0xFFFFF));\n\t\t\t\t\t\t\t\tif (*seen) {\n\t\t\t\t\t\t\t\t\tfailed[tid] = true;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t*seen = true;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tnode->value = ((int)tid << 20) | (i + 1);\n\t\t\t\t\t\t\t\tfreeList.add(node);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, tid);\n\t\t\t\t}\n\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid].join();\n\t\t\t\t\tASSERT_OR_FAIL(!failed[tid]);\n\t\t\t\t}\n\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tauto node = freeList.try_get();\n\t\t\t\t\tASSERT_OR_FAIL(node != nullptr);\n\t\t\t\t\tASSERT_OR_FAIL(node->value != -1);\n\t\t\t\t\tnode->value = -1;\n\t\t\t\t}\n\t\t\t\tauto node = freeList.try_get();\n\t\t\t\tASSERT_OR_FAIL(node == nullptr);\n\t\t\t}\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n\t\n\tbool core_spmc_hash()\n\t{\n\t\t{\n\t\t\tfor (int rep = 0; rep != 20; ++rep) {\n\t\t\t\tcorealgos::SPMCSequentialHashMap<int> hash(rep < 10 ? 2 : 4);\n\t\t\t\tstd::vector<SimpleThread> threads(rep < 12 ? 4 : 16);\n\t\t\t\tstd::vector<bool> failed(threads.size());\n\t\t\t\t\n\t\t\t\tconst int MAX_ENTRIES = 4096;\n\t\t\t\tstd::vector<int> values(MAX_ENTRIES);\n\t\t\t\tstd::array<std::atomic<int>, MAX_ENTRIES> useCounts;\n\t\t\t\tstd::array<std::atomic<bool>, MAX_ENTRIES> removed;\n\t\t\t\t\n\t\t\t\tfor (std::size_t i = 0; i != useCounts.size(); ++i) {\n\t\t\t\t\tuseCounts[i].store(0, std::memory_order_relaxed);\n\t\t\t\t\tremoved[i].store(false, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid] = SimpleThread([&](size_t tid) {\n\t\t\t\t\t\tfailed[tid] = false;\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (tid == 0) {\n\t\t\t\t\t\t\t// Producer thread\n\t\t\t\t\t\t\tfor (int i = 0; i != MAX_ENTRIES; ++i) {\n\t\t\t\t\t\t\t\tvalues[i] = i;\n\t\t\t\t\t\t\t\thash.insert(i, &values[i]);\n\t\t\t\t\t\t\t\tuseCounts[i].store((int)threads.size() / 2, std::memory_order_release);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t// One of the consumer threads\n\t\t\t\t\t\t\tfor (int i = MAX_ENTRIES * 2; i != 0; --i) {\t// Purposefully off-by-lots\n\t\t\t\t\t\t\t\tint useCount = -1;\n\t\t\t\t\t\t\t\tif (i < MAX_ENTRIES) {\n\t\t\t\t\t\t\t\t\tuseCount = useCounts[i].fetch_add(-1, std::memory_order_acquire);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tint* val;\n\t\t\t\t\t\t\t\tif (useCount > 0) {\n\t\t\t\t\t\t\t\t\tval = hash.find(i);\n\t\t\t\t\t\t\t\t\tbool isRemoved = removed[i].load(std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\tassert(val == nullptr || *val == *val);\t\t// Find segfaults\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t// We read the use count again; if it's still > 0, the item must have been in\n\t\t\t\t\t\t\t\t\t// the hash during the entire call to find(), so we can check its value\n\t\t\t\t\t\t\t\t\tauto currentUseCount = useCounts[i].fetch_add(0, std::memory_order_release);\n\t\t\t\t\t\t\t\t\tif ((currentUseCount > 0 || (currentUseCount == 0 && useCount == 1)) && (val == nullptr || *val != i || isRemoved)) {\n\t\t\t\t\t\t\t\t\t\tfailed[tid] = true;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (useCount == 1) {\n\t\t\t\t\t\t\t\t\tval = hash.remove(i);\n\t\t\t\t\t\t\t\t\tif (val == nullptr || *val != i || removed[i].load(std::memory_order_relaxed)) {\n\t\t\t\t\t\t\t\t\t\tfailed[tid] = true;\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tremoved[i].store(true, std::memory_order_release);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, tid);\n\t\t\t\t}\n\t\t\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\t\t\tthreads[tid].join();\n\t\t\t\t\tASSERT_OR_FAIL(!failed[tid]);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != MAX_ENTRIES; ++i) {\n\t\t\t\t\tauto val = hash.find(i);\n\t\t\t\t\tif (val != nullptr) {\n\t\t\t\t\t\tASSERT_OR_FAIL(&values[i] == val && *val == i && !removed[i].load(std::memory_order_relaxed));\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tASSERT_OR_FAIL(removed[i].load(std::memory_order_relaxed));\n\t\t\t\t\t}\n\t\t\t\t\tauto removed = hash.remove(i);\n\t\t\t\t\tASSERT_OR_FAIL(removed == val);\n\t\t\t\t}\n\t\t\t\tfor (int i = 0; i != MAX_ENTRIES; ++i) {\n\t\t\t\t\tASSERT_OR_FAIL(hash.find(i) == nullptr);\n\t\t\t\t\tASSERT_OR_FAIL(hash.remove(i) == nullptr);\n\t\t\t\t}\n\t\t\t\tASSERT_OR_FAIL(hash.find(MAX_ENTRIES) == nullptr);\n\t\t\t\tASSERT_OR_FAIL(hash.remove(MAX_ENTRIES) == nullptr);\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n\t\n\tbool explicit_strings_threaded()\n\t{\n\t\tstd::vector<SimpleThread> threads(8);\n\t\tConcurrentQueue<std::string, MallocTrackingTraits> q(1024 * 1024);\n\t\t\n\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\tthreads[tid] = SimpleThread([&](size_t tid) {\n\t\t\t\tconst size_t ITERATIONS = 100 * 1024;\n\t\t\t\tif (tid % 2 == 0) {\n\t\t\t\t\t// Produce\n\t\t\t\t\tProducerToken t(q);\n\t\t\t\t\tfor (size_t i = 0; i != ITERATIONS; ++i) {\n\t\t\t\t\t\tq.enqueue(t, std::string(\"banana\", i % 6));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Consume\n\t\t\t\t\tstd::string item;\n\t\t\t\t\tfor (size_t i = 0; i != ITERATIONS / 2; ++i) {\n\t\t\t\t\t\tq.try_dequeue(item);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, tid);\n\t\t}\n\t\tfor (size_t tid = 0; tid != threads.size(); ++tid) {\n\t\t\tthreads[tid].join();\n\t\t}\n\t\t\n\t\treturn true;\n\t}\n};\n\n}\n\n\nvoid printTests(ConcurrentQueueTests const& tests)\n{\n\tstd::printf(\"   Supported tests are:\\n\");\n\t\n\tstd::vector<std::string> names;\n\ttests.getAllTestNames(names);\n\tfor (auto it = names.cbegin(); it != names.cend(); ++it) {\n\t\tstd::printf(\"      %s\\n\", it->c_str());\n\t}\n}\n\n\n// Basic test harness\n#if !defined(TARGET_OS_IPHONE)\nint main(int argc, char** argv)\n{\n\tbool disablePrompt = false;\n\tunsigned int iterations = 8;\n\tstd::vector<std::string> selectedTests;\n\t\n\t// Disable buffering (so that when run in, e.g., Sublime Text, the output appears as it is written)\n\tstd::setvbuf(stdout, nullptr, _IONBF, 0);\n\t\n\t// Isolate the executable name\n\tstd::string progName = argv[0];\n\tauto slash = progName.find_last_of(\"/\\\\\");\n\tif (slash != std::string::npos) {\n\t\tprogName = progName.substr(slash + 1);\n\t}\n\t\n\tConcurrentQueueTests tests;\n\t\n\t// Parse command line options\n\tif (argc > 1) {\n\t\tbool printHelp = false;\n\t\tbool printedTests = false;\n\t\tbool error = false;\n\t\tfor (int i = 1; i < argc; ++i) {\n\t\t\tif (std::strcmp(argv[i], \"--help\") == 0) {\n\t\t\t\tprintHelp = true;\n\t\t\t}\n\t\t\telse if (std::strcmp(argv[i], \"--disable-prompt\") == 0) {\n\t\t\t\tdisablePrompt = true;\n\t\t\t}\n\t\t\telse if (std::strcmp(argv[i], \"--run\") == 0) {\n\t\t\t\tif (i + 1 == argc || argv[i + 1][0] == '-') {\n\t\t\t\t\tstd::printf(\"Expected test name argument for --run option.\\n\");\n\t\t\t\t\tif (!printedTests) {\n\t\t\t\t\t\tprintTests(tests);\n\t\t\t\t\t\tprintedTests = true;\n\t\t\t\t\t}\n\t\t\t\t\terror = true;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (!tests.validateTestName(argv[++i])) {\n\t\t\t\t\tstd::printf(\"Unrecognized test '%s'.\\n\", argv[i]);\n\t\t\t\t\tif (!printedTests) {\n\t\t\t\t\t\tprintTests(tests);\n\t\t\t\t\t\tprintedTests = true;\n\t\t\t\t\t}\n\t\t\t\t\terror = true;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tselectedTests.push_back(argv[i]);\n\t\t\t}\n\t\t\telse if (std::strcmp(argv[i], \"--iterations\") == 0) {\n\t\t\t\tif (i + 1 == argc || argv[i + 1][0] == '-') {\n\t\t\t\t\tstd::printf(\"Expected iteration count argument for --iterations option.\\n\");\n\t\t\t\t\terror = true;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\titerations = static_cast<unsigned int>(std::atoi(argv[++i]));\n\t\t\t}\n\t\t\telse {\n\t\t\t\tstd::printf(\"Unrecognized option '%s'.\\n\", argv[i]);\n\t\t\t\terror = true;\n\t\t\t}\n\t\t}\n\t\t\n\t\tif (error || printHelp) {\n\t\t\tif (error) {\n\t\t\t\tstd::printf(\"\\n\");\n\t\t\t}\n\t\t\tstd::printf(\"%s\\n    Description: Runs unit tests for moodycamel::ConcurrentQueue\\n\", progName.c_str());\n\t\t\tstd::printf(\"    --help            Prints this help blurb\\n\");\n\t\t\tstd::printf(\"    --run test        Runs only the specified test(s)\\n\");\n\t\t\tstd::printf(\"    --iterations N    Do N iterations of each test\\n\");\n\t\t\tstd::printf(\"    --disable-prompt  Disables prompt before exit when the tests finish\\n\");\n\t\t\treturn error ? -1 : 0;\n\t\t}\n\t}\n\t\n\tint exitCode = 0;\n\t\n\tbool result;\n\tif (selectedTests.size() > 0) {\n\t\tstd::printf(\"Running %d iteration%s of selected unit test%s for moodycamel::ConcurrentQueue.\\n\\n\", iterations, iterations == 1 ? \"\" : \"s\", selectedTests.size() == 1 ? \"\" : \"s\");\n\t\tresult = tests.run(selectedTests, iterations);\n\t}\n\telse {\n\t\tstd::printf(\"Running %d iteration%s of all unit tests for moodycamel::ConcurrentQueue.\\n(Run %s --help for other options.)\\n\\n\", iterations, iterations == 1 ? \"\" : \"s\", progName.c_str());\n\t\tresult = tests.run(iterations);\n\t}\n\t\n\tif (result) {\n\t\tstd::printf(\"All %stests passed.\\n\", (selectedTests.size() > 0 ? \"selected \" : \"\"));\n\t}\n\telse {\n\t\tstd::printf(\"Test(s) failed!\\n\");\n\t\texitCode = 2;\n\t}\n\t\n\tif (!disablePrompt) {\n\t\tstd::printf(\"Press ENTER to exit.\\n\");\n\t\tgetchar();\n\t}\n\treturn exitCode;\n}\n#else\n// Provide entry function that can be invoked\n// by a test host (iOS app / test runner)\nbool runAllTests() {\n  unsigned int iterations = 8;\n  ConcurrentQueueTests tests;\n  return tests.run(iterations);\n}\n#endif // !defined(TARGET_OS_IPHONE)\n"
  },
  {
    "path": "src/third_party/fmt/.clang-format",
    "content": "# Run manually to reformat a file:\n# clang-format -i --style=file <file>\nLanguage: Cpp\nBasedOnStyle: Google\nIndentPPDirectives: AfterHash\nIndentCaseLabels: false\nAlwaysBreakTemplateDeclarations: false\nDerivePointerAlignment: false\n"
  },
  {
    "path": "src/third_party/fmt/.github/pull_request_template.md",
    "content": "<!-- Please read the contribution guidelines before submitting a pull request. -->\n<!-- By submitting this pull request, you agree that your contributions are licensed under the {fmt} license,\n     and agree to future changes to the licensing. -->\n<!-- If you're a first-time contributor, please acknowledge it by leaving the statement below. -->\n\nI agree that my contributions are licensed under the {fmt} license, and agree to future changes to the licensing.\n"
  },
  {
    "path": "src/third_party/fmt/.gitignore",
    "content": ".vscode/\n\n*.iml\n.idea/\n.externalNativeBuild/\n.gradle/\ngradle/\ngradlew*\nlocal.properties\nbuild/\n\nbin/\n/_CPack_Packages\n/CMakeScripts\n/doc/doxyxml\n/doc/html\nvirtualenv\n/Testing\n/install_manifest.txt\n*~\n*.a\n*.so*\n*.xcodeproj\n*.zip\ncmake_install.cmake\nCPack*.cmake\nfmt-*.cmake\nCTestTestfile.cmake\nCMakeCache.txt\nCMakeFiles\nFMT.build\nMakefile\nrun-msbuild.bat\nfmt.pc\n"
  },
  {
    "path": "src/third_party/fmt/.travis.yml",
    "content": "language: cpp\ndist: trusty\nsudo: false\n\nos: linux\n\ngit:\n  depth: 1\n\nenv:\n  global:\n    - secure: |-\n        a1eovNn4uol9won7ghr67eD3/59oeESN+G9bWE+ecI1V6yRseG9whniGhIpC/YfMW/Qz5I\n        5sxSmFjaw9bxCISNwUIrL1O5x2AmRYTnFcXk4dFsUvlZg+WeF/aKyBYCNRM8C2ndbBmtAO\n        o1F2EwFbiso0EmtzhAPs19ujiVxkLn4=\n\nmatrix:\n  include:\n      # Documentation\n    - env: BUILD=Doc\n      sudo: required\n      # g++ 6 on Linux with C++14\n    - env: COMPILER=g++-6 BUILD=Debug STANDARD=14\n      compiler: gcc\n      addons:\n        apt:\n          update: true\n          sources:\n            - ubuntu-toolchain-r-test\n          packages:\n            - g++-6\n    - env: COMPILER=g++-6 BUILD=Release STANDARD=14\n      compiler: gcc\n      addons:\n        apt:\n          update: true\n          sources:\n            - ubuntu-toolchain-r-test\n          packages:\n            - g++-6\n     # g++ 8 on Linux with C++17\n    - env: COMPILER=g++-8 BUILD=Debug STANDARD=17\n      compiler: gcc\n      addons:\n        apt:\n          update: true\n          sources:\n            - ubuntu-toolchain-r-test\n          packages:\n            - g++-8\n    - env: COMPILER=g++-8 BUILD=Release STANDARD=17\n      compiler: gcc\n      addons:\n        apt:\n          update: true\n          sources:\n            - ubuntu-toolchain-r-test\n          packages:\n            - g++-8\n\n      # Apple clang on OS X with C++14\n    - env: BUILD=Debug STANDARD=14\n      compiler: clang\n      os: osx\n    - env: BUILD=Release STANDARD=14\n      compiler: clang\n      os: osx\n      # clang 6.0 on Linux with C++14 (builds the fuzzers as well)\n    - env: COMPILER=clang++-6.0 BUILD=Debug STANDARD=14 ENABLE_FUZZING=1\n      compiler: clang\n      addons:\n        apt:\n          update: true\n          packages:\n            - clang-6.0\n          sources:\n            - ubuntu-toolchain-r-test\n            - llvm-toolchain-trusty\n            - llvm-toolchain-trusty-6.0\n      # clang 4.0 on Linux with C++14\n    - env: COMPILER=clang++-4.0 BUILD=Debug STANDARD=11\n      compiler: clang\n      addons:\n        apt:\n          update: true\n          packages:\n            - clang-4.0\n          sources:\n            - ubuntu-toolchain-r-test\n            - llvm-toolchain-trusty\n            - llvm-toolchain-trusty-4.0\n      # g++ 4.8 on Linux with C++11\n    - env: COMPILER=g++-4.8 BUILD=Debug STANDARD=11\n      compiler: gcc\n    - name: Android NDK (Gradle)\n      language: android\n      addons:\n        apt:\n          update: true\n          sources:\n            - ubuntu-toolchain-r-test\n          packages:\n            - ninja-build\n            - curl\n            - tree\n      android:\n        components:\n          - tools\n          - platform-tools\n          - android-25 # 7.0\n          - android-27 # 8.1\n          - android-28 # 9.0\n          - build-tools-28.0.3\n      before_install:\n        # Install Gradle from https://sdkman.io/\n        - curl -s \"https://get.sdkman.io\" | bash > /dev/null\n        - source \"$HOME/.sdkman/bin/sdkman-init.sh\"\n        - sdk version\n        - sdk install gradle\n        - sdk use gradle\n        - gradle --version\n      install:\n        # Accept SDK Licenses + Install NDK\n        - yes | sdkmanager --update > /dev/null 2>&1\n        - sdkmanager ndk-bundle > /dev/null 2>&1\n      before_script:\n        - pushd ./support\n      script:\n        - gradle clean\n        - gradle assemble\n      after_success:\n        - popd;\n        - tree ./libs\n\nbefore_script:\n  - if [[ \"${TRAVIS_OS_NAME}\" == \"linux\" ]]; then export CXX=${COMPILER}; fi\n  - if [[ \"${BUILD}\" != \"Doc\" ]]; then ${CXX} --version; fi\n\nscript:\n  - support/travis-build.py\n"
  },
  {
    "path": "src/third_party/fmt/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.1.0)\n\n# Use newer policies if available, up to most recent tested version of CMake.\nif(${CMAKE_VERSION} VERSION_LESS 3.11)\n  cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})\nelse()\n  cmake_policy(VERSION 3.11)\nendif()\n\n# Determine if fmt is built as a subproject (using add_subdirectory)\n# or if it is the master project.\nset(MASTER_PROJECT OFF)\nif (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n  set(MASTER_PROJECT ON)\n  message(STATUS \"CMake version: ${CMAKE_VERSION}\")\nendif ()\n\n# Joins arguments and places the results in ${result_var}.\nfunction(join result_var)\n  set(result )\n  foreach (arg ${ARGN})\n    set(result \"${result}${arg}\")\n  endforeach ()\n  set(${result_var} \"${result}\" PARENT_SCOPE)\nendfunction()\n\n# Set the default CMAKE_BUILD_TYPE to Release.\n# This should be done before the project command since the latter can set\n# CMAKE_BUILD_TYPE itself (it does so for nmake).\nif (MASTER_PROJECT AND NOT CMAKE_BUILD_TYPE)\n  join(doc \"Choose the type of build, options are: None(CMAKE_CXX_FLAGS or \"\n           \"CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.\")\n  set(CMAKE_BUILD_TYPE Release CACHE STRING ${doc})\nendif ()\n\noption(FMT_PEDANTIC \"Enable extra warnings and expensive tests.\" OFF)\noption(FMT_WERROR \"Halt the compilation with an error on compiler warnings.\"\n       OFF)\n\n# Options that control generation of various targets.\noption(FMT_DOC \"Generate the doc target.\" ${MASTER_PROJECT})\noption(FMT_INSTALL \"Generate the install target.\" ${MASTER_PROJECT})\noption(FMT_TEST \"Generate the test target.\" ${MASTER_PROJECT})\noption(FMT_FUZZ \"Generate the fuzz target.\" OFF)\n\nproject(FMT CXX)\n\n# Get version from core.h\nfile(READ include/fmt/core.h core_h)\nif (NOT core_h MATCHES \"FMT_VERSION ([0-9]+)([0-9][0-9])([0-9][0-9])\")\n  message(FATAL_ERROR \"Cannot get FMT_VERSION from core.h.\")\nendif ()\n# Use math to skip leading zeros if any.\nmath(EXPR CPACK_PACKAGE_VERSION_MAJOR ${CMAKE_MATCH_1})\nmath(EXPR CPACK_PACKAGE_VERSION_MINOR ${CMAKE_MATCH_2})\nmath(EXPR CPACK_PACKAGE_VERSION_PATCH ${CMAKE_MATCH_3})\njoin(FMT_VERSION ${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.\n                 ${CPACK_PACKAGE_VERSION_PATCH})\nmessage(STATUS \"Version: ${FMT_VERSION}\")\n\nmessage(STATUS \"Build type: ${CMAKE_BUILD_TYPE}\")\n\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}\n  \"${CMAKE_CURRENT_SOURCE_DIR}/support/cmake\")\n\ninclude(cxx14)\ninclude(CheckCXXCompilerFlag)\n\nset(FMT_REQUIRED_FEATURES cxx_auto_type cxx_variadic_templates)\n\nif (CMAKE_CXX_COMPILER_ID MATCHES \"GNU\")\n  set(PEDANTIC_COMPILE_FLAGS -pedantic-errors -Wall -Wextra -pedantic\n      -Wold-style-cast -Wundef\n      -Wredundant-decls -Wwrite-strings -Wpointer-arith\n      -Wcast-qual -Wformat=2 -Wmissing-include-dirs\n      -Wcast-align -Wnon-virtual-dtor\n      -Wctor-dtor-privacy -Wdisabled-optimization\n      -Winvalid-pch -Woverloaded-virtual\n      -Wconversion\n      -Wno-ctor-dtor-privacy -Wno-format-nonliteral -Wno-shadow)\n  if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.6)\n      set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wnoexcept\n         -Wno-dangling-else -Wno-unused-local-typedefs)\n  endif ()\n  if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)\n      set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wdouble-promotion\n          -Wtrampolines -Wzero-as-null-pointer-constant -Wuseless-cast\n          -Wvector-operation-performance -Wsized-deallocation)\n  endif ()\n  if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0)\n      set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wshift-overflow=2\n          -Wnull-dereference -Wduplicated-cond)\n  endif ()\n  set(WERROR_FLAG -Werror)\nendif ()\n\nif (CMAKE_CXX_COMPILER_ID MATCHES \"Clang\")\n  set(PEDANTIC_COMPILE_FLAGS -Wall -Wextra -pedantic -Wconversion\n      -Wno-sign-conversion)\n  check_cxx_compiler_flag(-Wzero-as-null-pointer-constant HAS_NULLPTR_WARNING)\n  if (HAS_NULLPTR_WARNING)\n    set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS}\n        -Wzero-as-null-pointer-constant)\n  endif ()\n  set(WERROR_FLAG -Werror)\nendif ()\n\nif (MSVC)\n  set(PEDANTIC_COMPILE_FLAGS /W3)\n  set(WERROR_FLAG /WX)\nendif ()\n\nif (MASTER_PROJECT AND CMAKE_GENERATOR MATCHES \"Visual Studio\")\n  # If Microsoft SDK is installed create script run-msbuild.bat that\n  # calls SetEnv.cmd to set up build environment and runs msbuild.\n  # It is useful when building Visual Studio projects with the SDK\n  # toolchain rather than Visual Studio.\n  include(FindSetEnv)\n  if (WINSDK_SETENV)\n    set(MSBUILD_SETUP \"call \\\"${WINSDK_SETENV}\\\"\")\n  endif ()\n  # Set FrameworkPathOverride to get rid of MSB3644 warnings.\n  set(netfxpath \"C:\\\\Program Files\\\\Reference Assemblies\\\\Microsoft\\\\Framework\\\\.NETFramework\\\\v4.0\")\n  file(WRITE run-msbuild.bat \"\n    ${MSBUILD_SETUP}\n    ${CMAKE_MAKE_PROGRAM} -p:FrameworkPathOverride=\\\"${netfxpath}\\\" %*\")\nendif ()\n\nset(strtod_l_headers stdlib.h)\nif (APPLE)\n  set(strtod_l_headers ${strtod_l_headers} xlocale.h)\nendif ()\n\ninclude(CheckSymbolExists)\nif (WIN32)\n  check_symbol_exists(open io.h HAVE_OPEN)\n  check_symbol_exists(_strtod_l \"${strtod_l_headers}\" HAVE_STRTOD_L)\nelse ()\n  check_symbol_exists(open fcntl.h HAVE_OPEN)\n  check_symbol_exists(strtod_l \"${strtod_l_headers}\" HAVE_STRTOD_L)\nendif ()\n\nfunction(add_headers VAR)\n  set(headers ${${VAR}})\n  foreach (header ${ARGN})\n    set(headers ${headers} include/fmt/${header})\n  endforeach()\n  set(${VAR} ${headers} PARENT_SCOPE)\nendfunction()\n\n# Define the fmt library, its includes and the needed defines.\nadd_headers(FMT_HEADERS chrono.h color.h core.h format.h format-inl.h locale.h\n                        ostream.h prepare.h printf.h ranges.h safe-duration-cast.h)\nset(FMT_SOURCES src/format.cc)\nif (HAVE_OPEN)\n  add_headers(FMT_HEADERS posix.h)\n  set(FMT_SOURCES ${FMT_SOURCES} src/posix.cc)\nendif ()\n\nadd_library(fmt ${FMT_SOURCES} ${FMT_HEADERS} README.rst ChangeLog.rst)\nadd_library(fmt::fmt ALIAS fmt)\n\nif (HAVE_STRTOD_L)\n  target_compile_definitions(fmt PUBLIC FMT_LOCALE)\nendif ()\n\nif (FMT_WERROR)\n  target_compile_options(fmt PRIVATE ${WERROR_FLAG})\nendif ()\nif (FMT_PEDANTIC)\n  target_compile_options(fmt PRIVATE ${PEDANTIC_COMPILE_FLAGS})\nendif ()\n\ntarget_compile_features(fmt INTERFACE ${FMT_REQUIRED_FEATURES})\n\ntarget_include_directories(fmt PUBLIC\n  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>\n  $<INSTALL_INTERFACE:include>)\n\nset_target_properties(fmt PROPERTIES\n  VERSION ${FMT_VERSION} SOVERSION ${CPACK_PACKAGE_VERSION_MAJOR}\n  DEBUG_POSTFIX d)\n\nif (BUILD_SHARED_LIBS)\n  if (UNIX AND NOT APPLE)\n    # Fix rpmlint warning:\n    # unused-direct-shlib-dependency /usr/lib/libformat.so.1.1.0 /lib/libm.so.6.\n    target_link_libraries(fmt -Wl,--as-needed)\n  endif ()\n  target_compile_definitions(fmt PRIVATE FMT_EXPORT INTERFACE FMT_SHARED)\nendif ()\nif (FMT_SAFE_DURATION_CAST)\n  target_compile_definitions(fmt PUBLIC FMT_SAFE_DURATION_CAST)\nendif()\n\nadd_library(fmt-header-only INTERFACE)\nadd_library(fmt::fmt-header-only ALIAS fmt-header-only)\n\ntarget_compile_definitions(fmt-header-only INTERFACE FMT_HEADER_ONLY=1)\n\ntarget_compile_features(fmt-header-only INTERFACE ${FMT_REQUIRED_FEATURES})\n\ntarget_include_directories(fmt-header-only INTERFACE\n  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>\n  $<INSTALL_INTERFACE:include>)\n\n# Install targets.\nif (FMT_INSTALL)\n  include(GNUInstallDirs)\n  include(CMakePackageConfigHelpers)\n  set(FMT_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/fmt CACHE STRING\n      \"Installation directory for cmake files, relative to ${CMAKE_INSTALL_PREFIX}.\")\n  set(version_config ${PROJECT_BINARY_DIR}/fmt-config-version.cmake)\n  set(project_config ${PROJECT_BINARY_DIR}/fmt-config.cmake)\n  set(pkgconfig ${PROJECT_BINARY_DIR}/fmt.pc)\n  set(targets_export_name fmt-targets)\n\n  set (INSTALL_TARGETS fmt)\n  if (TARGET fmt-header-only)\n    set(INSTALL_TARGETS ${INSTALL_TARGETS} fmt-header-only)\n  endif ()\n\n  set(FMT_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE STRING\n      \"Installation directory for libraries, relative to ${CMAKE_INSTALL_PREFIX}.\")\n\n  set(FMT_INC_DIR ${CMAKE_INSTALL_INCLUDEDIR}/fmt CACHE STRING\n      \"Installation directory for include files, relative to ${CMAKE_INSTALL_PREFIX}.\")\n\n  set(FMT_PKGCONFIG_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig CACHE PATH\n      \"Installation directory for pkgconfig (.pc) files, relative to ${CMAKE_INSTALL_PREFIX}.\")\n\n  # Generate the version, config and target files into the build directory.\n  write_basic_package_version_file(\n    ${version_config}\n    VERSION ${FMT_VERSION}\n    COMPATIBILITY AnyNewerVersion)\n  configure_file(\n    \"${PROJECT_SOURCE_DIR}/support/cmake/fmt.pc.in\"\n    \"${pkgconfig}\"\n    @ONLY)\n  configure_package_config_file(\n    ${PROJECT_SOURCE_DIR}/support/cmake/fmt-config.cmake.in\n    ${project_config}\n    INSTALL_DESTINATION ${FMT_CMAKE_DIR})\n  # Use a namespace because CMake provides better diagnostics for namespaced\n  # imported targets.\n  export(TARGETS ${INSTALL_TARGETS} NAMESPACE fmt::\n         FILE ${PROJECT_BINARY_DIR}/${targets_export_name}.cmake)\n\n  # Install version, config and target files.\n  install(\n    FILES ${project_config} ${version_config}\n    DESTINATION ${FMT_CMAKE_DIR})\n  install(EXPORT ${targets_export_name} DESTINATION ${FMT_CMAKE_DIR}\n          NAMESPACE fmt::)\n\n  # Install the library and headers.\n  install(TARGETS ${INSTALL_TARGETS} EXPORT ${targets_export_name}\n          DESTINATION ${FMT_LIB_DIR})\n\n  install(FILES $<TARGET_PDB_FILE:${INSTALL_TARGETS}>\n          DESTINATION ${FMT_LIB_DIR} OPTIONAL)\n  install(FILES ${FMT_HEADERS} DESTINATION ${FMT_INC_DIR})\n  install(FILES \"${pkgconfig}\" DESTINATION \"${FMT_PKGCONFIG_DIR}\")\nendif ()\n\nif (FMT_DOC)\n  add_subdirectory(doc)\nendif ()\n\nif (FMT_TEST)\n  enable_testing()\n  add_subdirectory(test)\nendif ()\n\n# Control fuzzing independent of the unit tests.\nif (FMT_FUZZ)\n  add_subdirectory(test/fuzzing)\nendif ()\n\nset(gitignore ${PROJECT_SOURCE_DIR}/.gitignore)\nif (MASTER_PROJECT AND EXISTS ${gitignore})\n  # Get the list of ignored files from .gitignore.\n  file (STRINGS ${gitignore} lines)\n  LIST(REMOVE_ITEM lines /doc/html)\n  foreach (line ${lines})\n    string(REPLACE \".\" \"[.]\" line \"${line}\")\n    string(REPLACE \"*\" \".*\" line \"${line}\")\n    set(ignored_files ${ignored_files} \"${line}$\" \"${line}/\")\n  endforeach ()\n  set(ignored_files ${ignored_files}\n    /.git /breathe /format-benchmark sphinx/ .buildinfo .doctrees)\n\n  set(CPACK_SOURCE_GENERATOR ZIP)\n  set(CPACK_SOURCE_IGNORE_FILES ${ignored_files})\n  set(CPACK_SOURCE_PACKAGE_FILE_NAME fmt-${FMT_VERSION})\n  set(CPACK_PACKAGE_NAME fmt)\n  set(CPACK_RESOURCE_FILE_README ${PROJECT_SOURCE_DIR}/README.rst)\n  include(CPack)\nendif ()\n"
  },
  {
    "path": "src/third_party/fmt/CONTRIBUTING.md",
    "content": "Contributing to {fmt}\n=====================\n\nBy submitting a pull request or a patch, you represent that you have the right\nto license your contribution to the {fmt} project owners and the community,\nagree that your contributions are licensed under the {fmt} license, and agree\nto future changes to the licensing.\n\nAll C++ code must adhere to [Google C++ Style Guide](\nhttps://google.github.io/styleguide/cppguide.html) with the following\nexceptions:\n\n* Exceptions are permitted\n* snake_case should be used instead of UpperCamelCase for function and type\n  names\n\nThanks for contributing!\n"
  },
  {
    "path": "src/third_party/fmt/ChangeLog.rst",
    "content": "6.0.0 - TBD\n-----------\n\n* Made floating-point formatting locale-independent:\n\n  .. code:: c++\n\n     #include <locale>\n     #include <fmt/core.h>\n\n     int main() {\n       std::locale::global(std::locale(\"ru_RU.UTF-8\"));\n       fmt::print(\"value = {}\", 4.2);\n     }\n\n  prints \"value = 4.2\" regardless of the locale.\n\n  For locale-specific formatting use the ``n`` specifier:\n\n  .. code:: c++\n\n     std::locale::global(std::locale(\"ru_RU.UTF-8\"));\n     fmt::print(\"value = {:n}\", 4.2);\n\n  prints \"value = 4,2\".\n\n* Stopped setting ``CMAKE_BUILD_TYPE`` if fmt is a subproject\n  (`#1081 <https://github.com/fmtlib/fmt/issues/1081>`_).\n\n5.3.0 - 2018-12-28\n------------------\n\n* Introduced experimental chrono formatting support:\n\n  .. code:: c++\n\n     #include <fmt/chrono.h>\n\n     int main() {\n       using namespace std::literals::chrono_literals;\n       fmt::print(\"Default format: {} {}\\n\", 42s, 100ms);\n       fmt::print(\"strftime-like format: {:%H:%M:%S}\\n\", 3h + 15min + 30s);\n     }\n\n  prints::\n\n     Default format: 42s 100ms\n     strftime-like format: 03:15:30\n\n* Added experimental support for emphasis (bold, italic, underline,\n  strikethrough), colored output to a file stream, and improved colored\n  formatting API\n  (`#961 <https://github.com/fmtlib/fmt/pull/961>`_,\n  `#967 <https://github.com/fmtlib/fmt/pull/967>`_,\n  `#973 <https://github.com/fmtlib/fmt/pull/973>`_):\n\n  .. code:: c++\n\n     #include <fmt/color.h>\n\n     int main() {\n       print(fg(fmt::color::crimson) | fmt::emphasis::bold,\n             \"Hello, {}!\\n\", \"world\");\n       print(fg(fmt::color::floral_white) | bg(fmt::color::slate_gray) |\n             fmt::emphasis::underline, \"Hello, {}!\\n\", \"мир\");\n       print(fg(fmt::color::steel_blue) | fmt::emphasis::italic,\n             \"Hello, {}!\\n\", \"世界\");\n     }\n\n  prints the following on modern terminals with RGB color support:\n\n  .. image:: https://user-images.githubusercontent.com/576385/\n             50405788-b66e7500-076e-11e9-9592-7324d1f951d8.png\n\n  Thanks `@Rakete1111 (Nicolas) <https://github.com/Rakete1111>`_.\n\n* Added support for 4-bit terminal colors\n  (`#968 <https://github.com/fmtlib/fmt/issues/968>`_,\n  `#974 <https://github.com/fmtlib/fmt/pull/974>`_)\n\n  .. code:: c++\n\n     #include <fmt/color.h>\n\n     int main() {\n       print(fg(fmt::terminal_color::red), \"stop\\n\");\n     }\n\n  Note that these colors vary by terminal:\n\n  .. image:: https://user-images.githubusercontent.com/576385/\n             50405925-dbfc7e00-0770-11e9-9b85-333fab0af9ac.png\n\n  Thanks `@Rakete1111 (Nicolas) <https://github.com/Rakete1111>`_.\n\n* Parameterized formatting functions on the type of the format string\n  (`#880 <https://github.com/fmtlib/fmt/issues/880>`_,\n  `#881 <https://github.com/fmtlib/fmt/pull/881>`_,\n  `#883 <https://github.com/fmtlib/fmt/pull/883>`_,\n  `#885 <https://github.com/fmtlib/fmt/pull/885>`_,\n  `#897 <https://github.com/fmtlib/fmt/pull/897>`_,\n  `#920 <https://github.com/fmtlib/fmt/issues/920>`_).\n  Any object of type ``S`` that has an overloaded ``to_string_view(const S&)``\n  returning ``fmt::string_view`` can be used as a format string:\n\n  .. code:: c++\n\n     namespace my_ns {\n     inline string_view to_string_view(const my_string& s) {\n       return {s.data(), s.length()};\n     }\n     }\n\n     std::string message = fmt::format(my_string(\"The answer is {}.\"), 42);\n\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Made ``std::string_view`` work as a format string\n  (`#898 <https://github.com/fmtlib/fmt/pull/898>`_):\n\n  .. code:: c++\n\n     auto message = fmt::format(std::string_view(\"The answer is {}.\"), 42);\n\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Added wide string support to compile-time format string checks\n  (`#924 <https://github.com/fmtlib/fmt/pull/924>`_):\n\n  .. code:: c++\n\n     print(fmt(L\"{:f}\"), 42); // compile-time error: invalid type specifier\n\n  Thanks `@XZiar <https://github.com/XZiar>`_.\n\n* Made colored print functions work with wide strings\n  (`#867 <https://github.com/fmtlib/fmt/pull/867>`_):\n\n  .. code:: c++\n\n     #include <fmt/color.h>\n\n     int main() {\n       print(fg(fmt::color::red), L\"{}\\n\", 42);\n     }\n\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Introduced experimental Unicode support\n  (`#628 <https://github.com/fmtlib/fmt/issues/628>`_,\n  `#891 <https://github.com/fmtlib/fmt/pull/891>`_):\n\n  .. code:: c++\n\n     using namespace fmt::literals;\n     auto s = fmt::format(\"{:*^5}\"_u, \"🤡\"_u); // s == \"**🤡**\"_u\n\n* Improved locale support:\n\n  .. code:: c++\n\n     #include <fmt/locale.h>\n\n     struct numpunct : std::numpunct<char> {\n      protected:\n       char do_thousands_sep() const override { return '~'; }\n     };\n\n     std::locale loc;\n     auto s = fmt::format(std::locale(loc, new numpunct()), \"{:n}\", 1234567);\n     // s == \"1~234~567\"\n\n* Constrained formatting functions on proper iterator types\n  (`#921 <https://github.com/fmtlib/fmt/pull/921>`_).\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Added ``make_printf_args`` and ``make_wprintf_args`` functions\n  (`#934 <https://github.com/fmtlib/fmt/pull/934>`_).\n  Thanks `@tnovotny <https://github.com/tnovotny>`_.\n\n* Deprecated ``fmt::visit``, ``parse_context``, and ``wparse_context``.\n  Use ``fmt::visit_format_arg``, ``format_parse_context``, and\n  ``wformat_parse_context`` instead.\n\n* Removed undocumented ``basic_fixed_buffer`` which has been superseded by the\n  iterator-based API\n  (`#873 <https://github.com/fmtlib/fmt/issues/873>`_,\n  `#902 <https://github.com/fmtlib/fmt/pull/902>`_).\n  Thanks `@superfunc (hollywood programmer) <https://github.com/superfunc>`_.\n\n* Disallowed repeated leading zeros in an argument ID:\n\n  .. code:: c++\n\n     fmt::print(\"{000}\", 42); // error\n\n* Reintroduced support for gcc 4.4.\n\n* Fixed compilation on platforms with exotic ``double``\n  (`#878 <https://github.com/fmtlib/fmt/issues/878>`_).\n\n* Improved documentation\n  (`#164 <https://github.com/fmtlib/fmt/issues/164>`_,\n  `#877 <https://github.com/fmtlib/fmt/issues/877>`_,\n  `#901 <https://github.com/fmtlib/fmt/pull/901>`_,\n  `#906 <https://github.com/fmtlib/fmt/pull/906>`_,\n  `#979 <https://github.com/fmtlib/fmt/pull/979>`_).\n  Thanks `@kookjr (Mathew Cucuzella) <https://github.com/kookjr>`_,\n  `@DarkDimius (Dmitry Petrashko) <https://github.com/DarkDimius>`_,\n  `@HecticSerenity <https://github.com/HecticSerenity>`_.\n\n* Added pkgconfig support which makes it easier to consume the library from\n  meson and other build systems\n  (`#916 <https://github.com/fmtlib/fmt/pull/916>`_).\n  Thanks `@colemickens (Cole Mickens) <https://github.com/colemickens>`_.\n\n* Various build improvements\n  (`#909 <https://github.com/fmtlib/fmt/pull/909>`_,\n  `#926 <https://github.com/fmtlib/fmt/pull/926>`_,\n  `#937 <https://github.com/fmtlib/fmt/pull/937>`_,\n  `#953 <https://github.com/fmtlib/fmt/pull/953>`_,\n  `#959 <https://github.com/fmtlib/fmt/pull/959>`_).\n  Thanks `@tchaikov (Kefu Chai) <https://github.com/tchaikov>`_,\n  `@luncliff (Park DongHa) <https://github.com/luncliff>`_,\n  `@AndreasSchoenle (Andreas Schönle) <https://github.com/AndreasSchoenle>`_,\n  `@hotwatermorning <https://github.com/hotwatermorning>`_,\n  `@Zefz (JohanJansen) <https://github.com/Zefz>`_.\n\n* Improved ``string_view`` construction performance\n  (`#914 <https://github.com/fmtlib/fmt/pull/914>`_).\n  Thanks `@gabime (Gabi Melman) <https://github.com/gabime>`_.\n\n* Fixed non-matching char types\n  (`#895 <https://github.com/fmtlib/fmt/pull/895>`_).\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Fixed ``format_to_n`` with ``std::back_insert_iterator``\n  (`#913 <https://github.com/fmtlib/fmt/pull/913>`_).\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Fixed locale-dependent formatting\n  (`#905 <https://github.com/fmtlib/fmt/issues/905>`_).\n\n* Fixed various compiler warnings and errors\n  (`#882 <https://github.com/fmtlib/fmt/pull/882>`_,\n  `#886 <https://github.com/fmtlib/fmt/pull/886>`_,\n  `#933 <https://github.com/fmtlib/fmt/pull/933>`_,\n  `#941 <https://github.com/fmtlib/fmt/pull/941>`_,\n  `#931 <https://github.com/fmtlib/fmt/issues/931>`_,\n  `#943 <https://github.com/fmtlib/fmt/pull/943>`_,\n  `#954 <https://github.com/fmtlib/fmt/pull/954>`_,\n  `#956 <https://github.com/fmtlib/fmt/pull/956>`_,\n  `#962 <https://github.com/fmtlib/fmt/pull/962>`_,\n  `#965 <https://github.com/fmtlib/fmt/issues/965>`_,\n  `#977 <https://github.com/fmtlib/fmt/issues/977>`_,\n  `#983 <https://github.com/fmtlib/fmt/pull/983>`_,\n  `#989 <https://github.com/fmtlib/fmt/pull/989>`_).\n  Thanks `@Luthaf (Guillaume Fraux) <https://github.com/Luthaf>`_,\n  `@stevenhoving (Steven Hoving) <https://github.com/stevenhoving>`_,\n  `@christinaa (Kristina Brooks) <https://github.com/christinaa>`_,\n  `@lgritz (Larry Gritz) <https://github.com/lgritz>`_,\n  `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_,\n  `@0x8000-0000 (Sign Bit) <https://github.com/0x8000-0000>`_,\n  `@liuping1997 <https://github.com/liuping1997>`_.\n\n5.2.1 - 2018-09-21\n------------------\n\n* Fixed ``visit`` lookup issues on gcc 7 & 8\n  (`#870 <https://github.com/fmtlib/fmt/pull/870>`_).\n  Thanks `@medithe <https://github.com/medithe>`_.\n\n* Fixed linkage errors on older gcc.\n\n* Prevented ``fmt/range.h`` from specializing ``fmt::basic_string_view``\n  (`#865 <https://github.com/fmtlib/fmt/issues/865>`_,\n  `#868 <https://github.com/fmtlib/fmt/pull/868>`_).\n  Thanks `@hhggit (dual) <https://github.com/hhggit>`_.\n\n* Improved error message when formatting unknown types\n  (`#872 <https://github.com/fmtlib/fmt/pull/872>`_).\n  Thanks `@foonathan (Jonathan Müller) <https://github.com/foonathan>`_,\n\n* Disabled templated user-defined literals when compiled under nvcc\n  (`#875 <https://github.com/fmtlib/fmt/pull/875>`_).\n  Thanks `@CandyGumdrop (Candy Gumdrop) <https://github.com/CandyGumdrop>`_,\n\n* Fixed ``format_to`` formatting to ``wmemory_buffer``\n  (`#874 <https://github.com/fmtlib/fmt/issues/874>`_).\n\n5.2.0 - 2018-09-13\n------------------\n\n* Optimized format string parsing and argument processing which resulted in up\n  to 5x speed up on long format strings and significant performance boost on\n  various benchmarks. For example, version 5.2 is 2.22x faster than 5.1 on\n  decimal integer formatting with ``format_to`` (macOS, clang-902.0.39.2):\n\n  ==================  =======  =======\n  Method              Time, s  Speedup\n  ==================  =======  =======\n  fmt::format 5.1      0.58\n  fmt::format 5.2      0.35     1.66x\n  fmt::format_to 5.1   0.51\n  fmt::format_to 5.2   0.23     2.22x\n  sprintf              0.71\n  std::to_string       1.01\n  std::stringstream    1.73\n  ==================  =======  =======\n\n* Changed the ``fmt`` macro from opt-out to opt-in to prevent name collisions.\n  To enable it define the ``FMT_STRING_ALIAS`` macro to 1 before including\n  ``fmt/format.h``:\n\n  .. code:: c++\n\n     #define FMT_STRING_ALIAS 1\n     #include <fmt/format.h>\n     std::string answer = format(fmt(\"{}\"), 42);\n\n* Added compile-time format string checks to ``format_to`` overload that takes\n  ``fmt::memory_buffer`` (`#783 <https://github.com/fmtlib/fmt/issues/783>`_):\n\n  .. code:: c++\n\n     fmt::memory_buffer buf;\n     // Compile-time error: invalid type specifier.\n     fmt::format_to(buf, fmt(\"{:d}\"), \"foo\");\n\n* Moved experimental color support to ``fmt/color.h`` and enabled the\n  new API by default. The old API can be enabled by defining the\n  ``FMT_DEPRECATED_COLORS`` macro.\n\n* Added formatting support for types explicitly convertible to\n  ``fmt::string_view``:\n\n  .. code:: c++\n\n     struct foo {\n       explicit operator fmt::string_view() const { return \"foo\"; }\n     };\n     auto s = format(\"{}\", foo());\n\n  In particular, this makes formatting function work with\n  ``folly::StringPiece``.\n\n* Implemented preliminary support for ``char*_t`` by replacing the ``format``\n  function overloads with a single function template parameterized on the string\n  type.\n\n* Added support for dynamic argument lists\n  (`#814 <https://github.com/fmtlib/fmt/issues/814>`_,\n  `#819 <https://github.com/fmtlib/fmt/pull/819>`_).\n  Thanks `@MikePopoloski (Michael Popoloski)\n  <https://github.com/MikePopoloski>`_.\n\n* Reduced executable size overhead for embedded targets using newlib nano by\n  making locale dependency optional\n  (`#839 <https://github.com/fmtlib/fmt/pull/839>`_).\n  Thanks `@teajay-fr (Thomas Benard) <https://github.com/teajay-fr>`_.\n\n* Keep ``noexcept`` specifier when exceptions are disabled\n  (`#801 <https://github.com/fmtlib/fmt/issues/801>`_,\n  `#810 <https://github.com/fmtlib/fmt/pull/810>`_).\n  Thanks `@qis (Alexej Harm) <https://github.com/qis>`_.\n\n* Fixed formatting of user-defined types providing ``operator<<`` with\n  ``format_to_n``\n  (`#806 <https://github.com/fmtlib/fmt/pull/806>`_).\n  Thanks `@mkurdej (Marek Kurdej) <https://github.com/mkurdej>`_.\n\n* Fixed dynamic linkage of new symbols\n  (`#808 <https://github.com/fmtlib/fmt/issues/808>`_).\n\n* Fixed global initialization issue\n  (`#807 <https://github.com/fmtlib/fmt/issues/807>`_):\n\n  .. code:: c++\n\n     // This works on compilers with constexpr support.\n     static const std::string answer = fmt::format(\"{}\", 42);\n\n* Fixed various compiler warnings and errors\n  (`#804 <https://github.com/fmtlib/fmt/pull/804>`_,\n  `#809 <https://github.com/fmtlib/fmt/issues/809>`_,\n  `#811 <https://github.com/fmtlib/fmt/pull/811>`_,\n  `#822 <https://github.com/fmtlib/fmt/issues/822>`_,\n  `#827 <https://github.com/fmtlib/fmt/pull/827>`_,\n  `#830 <https://github.com/fmtlib/fmt/issues/830>`_,\n  `#838 <https://github.com/fmtlib/fmt/pull/838>`_,\n  `#843 <https://github.com/fmtlib/fmt/issues/843>`_,\n  `#844 <https://github.com/fmtlib/fmt/pull/844>`_,\n  `#851 <https://github.com/fmtlib/fmt/issues/851>`_,\n  `#852 <https://github.com/fmtlib/fmt/pull/852>`_,\n  `#854 <https://github.com/fmtlib/fmt/pull/854>`_).\n  Thanks `@henryiii (Henry Schreiner) <https://github.com/henryiii>`_,\n  `@medithe <https://github.com/medithe>`_, and\n  `@eliasdaler (Elias Daler) <https://github.com/eliasdaler>`_.\n\n5.1.0 - 2018-07-05\n------------------\n\n* Added experimental support for RGB color output enabled with\n  the ``FMT_EXTENDED_COLORS`` macro:\n\n  .. code:: c++\n\n     #define FMT_EXTENDED_COLORS\n     #define FMT_HEADER_ONLY // or compile fmt with FMT_EXTENDED_COLORS defined\n     #include <fmt/format.h>\n\n     fmt::print(fmt::color::steel_blue, \"Some beautiful text\");\n\n  The old API (the ``print_colored`` and ``vprint_colored`` functions and the\n  ``color`` enum) is now deprecated.\n  (`#762 <https://github.com/fmtlib/fmt/issues/762>`_\n  `#767 <https://github.com/fmtlib/fmt/pull/767>`_).\n  thanks `@remotion (remo) <https://github.com/remotion>`_.\n\n* Added quotes to strings in ranges and tuples\n  (`#766 <https://github.com/fmtlib/fmt/pull/766>`_).\n  Thanks `@Remotion (Remo) <https://github.com/Remotion>`_.\n\n* Made ``format_to`` work with ``basic_memory_buffer``\n  (`#776 <https://github.com/fmtlib/fmt/issues/776>`_).\n\n* Added ``vformat_to_n`` and ``wchar_t`` overload of ``format_to_n``\n  (`#764 <https://github.com/fmtlib/fmt/issues/764>`_,\n  `#769 <https://github.com/fmtlib/fmt/issues/769>`_).\n\n* Made ``is_range`` and ``is_tuple_like`` part of public (experimental) API\n  to allow specialization for user-defined types\n  (`#751 <https://github.com/fmtlib/fmt/issues/751>`_,\n  `#759 <https://github.com/fmtlib/fmt/pull/759>`_).\n  Thanks `@drrlvn (Dror Levin) <https://github.com/drrlvn>`_.\n\n* Added more compilers to continuous integration and increased ``FMT_PEDANTIC``\n  warning levels\n  (`#736 <https://github.com/fmtlib/fmt/pull/736>`_).\n  Thanks `@eliaskosunen (Elias Kosunen) <https://github.com/eliaskosunen>`_.\n\n* Fixed compilation with MSVC 2013.\n\n* Fixed handling of user-defined types in ``format_to``\n  (`#793 <https://github.com/fmtlib/fmt/issues/793>`_).\n\n* Forced linking of inline ``vformat`` functions into the library\n  (`#795 <https://github.com/fmtlib/fmt/issues/795>`_).\n\n* Fixed incorrect call to on_align in ``'{:}='``\n  (`#750 <https://github.com/fmtlib/fmt/issues/750>`_).\n\n* Fixed floating-point formatting to a non-back_insert_iterator with sign &\n  numeric alignment specified\n  (`#756 <https://github.com/fmtlib/fmt/issues/756>`_).\n\n* Fixed formatting to an array with ``format_to_n``\n  (`#778 <https://github.com/fmtlib/fmt/issues/778>`_).\n\n* Fixed formatting of more than 15 named arguments\n  (`#754 <https://github.com/fmtlib/fmt/issues/754>`_).\n\n* Fixed handling of compile-time strings when including ``fmt/ostream.h``.\n  (`#768 <https://github.com/fmtlib/fmt/issues/768>`_).\n\n* Fixed various compiler warnings and errors\n  (`#742 <https://github.com/fmtlib/fmt/issues/742>`_,\n  `#748 <https://github.com/fmtlib/fmt/issues/748>`_,\n  `#752 <https://github.com/fmtlib/fmt/issues/752>`_,\n  `#770 <https://github.com/fmtlib/fmt/issues/770>`_,\n  `#775 <https://github.com/fmtlib/fmt/pull/775>`_,\n  `#779 <https://github.com/fmtlib/fmt/issues/779>`_,\n  `#780 <https://github.com/fmtlib/fmt/pull/780>`_,\n  `#790 <https://github.com/fmtlib/fmt/pull/790>`_,\n  `#792 <https://github.com/fmtlib/fmt/pull/792>`_,\n  `#800 <https://github.com/fmtlib/fmt/pull/800>`_).\n  Thanks `@Remotion (Remo) <https://github.com/Remotion>`_,\n  `@gabime (Gabi Melman) <https://github.com/gabime>`_,\n  `@foonathan (Jonathan Müller) <https://github.com/foonathan>`_,\n  `@Dark-Passenger (Dhruv Paranjape) <https://github.com/Dark-Passenger>`_, and\n  `@0x8000-0000 (Sign Bit) <https://github.com/0x8000-0000>`_.\n\n5.0.0 - 2018-05-21\n------------------\n\n* Added a requirement for partial C++11 support, most importantly variadic\n  templates and type traits, and dropped ``FMT_VARIADIC_*`` emulation macros.\n  Variadic templates are available since GCC 4.4, Clang 2.9 and MSVC 18.0 (2013).\n  For older compilers use {fmt} `version 4.x\n  <https://github.com/fmtlib/fmt/releases/tag/4.1.0>`_ which continues to be\n  maintained and works with C++98 compilers.\n\n* Renamed symbols to follow standard C++ naming conventions and proposed a subset\n  of the library for standardization in `P0645R2 Text Formatting\n  <https://wg21.link/P0645>`_.\n\n* Implemented ``constexpr`` parsing of format strings and `compile-time format\n  string checks\n  <https://fmt.dev/dev/api.html#compile-time-format-string-checks>`_. For\n  example\n\n  .. code:: c++\n\n     #include <fmt/format.h>\n\n     std::string s = format(fmt(\"{:d}\"), \"foo\");\n\n  gives a compile-time error because ``d`` is an invalid specifier for strings\n  (`godbolt <https://godbolt.org/g/rnCy9Q>`__)::\n\n     ...\n     <source>:4:19: note: in instantiation of function template specialization 'fmt::v5::format<S, char [4]>' requested here\n       std::string s = format(fmt(\"{:d}\"), \"foo\");\n                       ^\n     format.h:1337:13: note: non-constexpr function 'on_error' cannot be used in a constant expression\n         handler.on_error(\"invalid type specifier\");\n\n  Compile-time checks require relaxed ``constexpr`` (C++14 feature) support. If\n  the latter is not available, checks will be performed at runtime.\n\n* Separated format string parsing and formatting in the extension API to enable\n  compile-time format string processing. For example\n\n  .. code:: c++\n\n     struct Answer {};\n\n     namespace fmt {\n     template <>\n     struct formatter<Answer> {\n       constexpr auto parse(parse_context& ctx) {\n         auto it = ctx.begin();\n         spec = *it;\n         if (spec != 'd' && spec != 's')\n           throw format_error(\"invalid specifier\");\n         return ++it;\n       }\n\n       template <typename FormatContext>\n       auto format(Answer, FormatContext& ctx) {\n         return spec == 's' ?\n           format_to(ctx.begin(), \"{}\", \"fourty-two\") :\n           format_to(ctx.begin(), \"{}\", 42);\n       }\n\n       char spec = 0;\n     };\n     }\n\n     std::string s = format(fmt(\"{:x}\"), Answer());\n\n  gives a compile-time error due to invalid format specifier (`godbolt\n  <https://godbolt.org/g/2jQ1Dv>`__)::\n\n     ...\n     <source>:12:45: error: expression '<throw-expression>' is not a constant expression\n            throw format_error(\"invalid specifier\");\n\n* Added `iterator support\n  <https://fmt.dev/dev/api.html#output-iterator-support>`_:\n\n  .. code:: c++\n\n     #include <vector>\n     #include <fmt/format.h>\n\n     std::vector<char> out;\n     fmt::format_to(std::back_inserter(out), \"{}\", 42);\n\n* Added the `format_to_n\n  <https://fmt.dev/dev/api.html#_CPPv2N3fmt11format_to_nE8OutputItNSt6size_tE11string_viewDpRK4Args>`_\n  function that restricts the output to the specified number of characters\n  (`#298 <https://github.com/fmtlib/fmt/issues/298>`_):\n\n  .. code:: c++\n\n     char out[4];\n     fmt::format_to_n(out, sizeof(out), \"{}\", 12345);\n     // out == \"1234\" (without terminating '\\0')\n\n* Added the `formatted_size\n  <https://fmt.dev/dev/api.html#_CPPv2N3fmt14formatted_sizeE11string_viewDpRK4Args>`_\n  function for computing the output size:\n\n  .. code:: c++\n\n     #include <fmt/format.h>\n\n     auto size = fmt::formatted_size(\"{}\", 12345); // size == 5\n\n* Improved compile times by reducing dependencies on standard headers and\n  providing a lightweight `core API <https://fmt.dev/dev/api.html#core-api>`_:\n\n  .. code:: c++\n\n     #include <fmt/core.h>\n\n     fmt::print(\"The answer is {}.\", 42);\n\n  See `Compile time and code bloat\n  <https://github.com/fmtlib/fmt#compile-time-and-code-bloat>`_.\n\n* Added the `make_format_args\n  <https://fmt.dev/dev/api.html#_CPPv2N3fmt16make_format_argsEDpRK4Args>`_\n  function for capturing formatting arguments:\n\n  .. code:: c++\n  \n     // Prints formatted error message.\n     void vreport_error(const char *format, fmt::format_args args) {\n       fmt::print(\"Error: \");\n       fmt::vprint(format, args);\n     }\n     template <typename... Args>\n     void report_error(const char *format, const Args & ... args) {\n       vreport_error(format, fmt::make_format_args(args...));\n     }\n\n* Added the ``make_printf_args`` function for capturing ``printf`` arguments\n  (`#687 <https://github.com/fmtlib/fmt/issues/687>`_,\n  `#694 <https://github.com/fmtlib/fmt/pull/694>`_).\n  Thanks `@Kronuz (Germán Méndez Bravo) <https://github.com/Kronuz>`_.\n\n* Added prefix ``v`` to non-variadic functions taking ``format_args`` to\n  distinguish them from variadic ones:\n\n  .. code:: c++\n\n     std::string vformat(string_view format_str, format_args args);\n     \n     template <typename... Args>\n     std::string format(string_view format_str, const Args & ... args);\n\n* Added experimental support for formatting ranges, containers and tuple-like\n  types in ``fmt/ranges.h`` (`#735 <https://github.com/fmtlib/fmt/pull/735>`_):\n\n  .. code:: c++\n\n     #include <fmt/ranges.h>\n\n     std::vector<int> v = {1, 2, 3};\n     fmt::print(\"{}\", v); // prints {1, 2, 3}\n\n  Thanks `@Remotion (Remo) <https://github.com/Remotion>`_.\n\n* Implemented ``wchar_t`` date and time formatting\n  (`#712 <https://github.com/fmtlib/fmt/pull/712>`_):\n\n  .. code:: c++\n\n     #include <fmt/time.h>\n\n     std::time_t t = std::time(nullptr);\n     auto s = fmt::format(L\"The date is {:%Y-%m-%d}.\", *std::localtime(&t));\n\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Provided more wide string overloads\n  (`#724 <https://github.com/fmtlib/fmt/pull/724>`_).\n  Thanks `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_.\n\n* Switched from a custom null-terminated string view class to ``string_view``\n  in the format API and provided ``fmt::string_view`` which implements a subset\n  of ``std::string_view`` API for pre-C++17 systems.\n\n* Added support for ``std::experimental::string_view``\n  (`#607 <https://github.com/fmtlib/fmt/pull/607>`_):\n\n  .. code:: c++\n\n     #include <fmt/core.h>\n     #include <experimental/string_view>\n\n     fmt::print(\"{}\", std::experimental::string_view(\"foo\"));\n\n  Thanks `@virgiliofornazin (Virgilio Alexandre Fornazin)\n  <https://github.com/virgiliofornazin>`__.\n\n* Allowed mixing named and automatic arguments:\n\n  .. code:: c++\n\n     fmt::format(\"{} {two}\", 1, fmt::arg(\"two\", 2));\n\n* Removed the write API in favor of the `format API\n  <https://fmt.dev/dev/api.html#format-api>`_ with compile-time handling of\n  format strings.\n\n* Disallowed formatting of multibyte strings into a wide character target\n  (`#606 <https://github.com/fmtlib/fmt/pull/606>`_).\n\n* Improved documentation\n  (`#515 <https://github.com/fmtlib/fmt/pull/515>`_,\n  `#614 <https://github.com/fmtlib/fmt/issues/614>`_,\n  `#617 <https://github.com/fmtlib/fmt/pull/617>`_,\n  `#661 <https://github.com/fmtlib/fmt/pull/661>`_,\n  `#680 <https://github.com/fmtlib/fmt/pull/680>`_).\n  Thanks `@ibell (Ian Bell) <https://github.com/ibell>`_,\n  `@mihaitodor (Mihai Todor) <https://github.com/mihaitodor>`_, and\n  `@johnthagen <https://github.com/johnthagen>`_.\n\n* Implemented more efficient handling of large number of format arguments.\n\n* Introduced an inline namespace for symbol versioning.\n\n* Added debug postfix ``d`` to the ``fmt`` library name\n  (`#636 <https://github.com/fmtlib/fmt/issues/636>`_).\n\n* Removed unnecessary ``fmt/`` prefix in includes\n  (`#397 <https://github.com/fmtlib/fmt/pull/397>`_).\n  Thanks `@chronoxor (Ivan Shynkarenka) <https://github.com/chronoxor>`_.\n\n* Moved ``fmt/*.h`` to ``include/fmt/*.h`` to prevent irrelevant files and\n  directories appearing on the include search paths when fmt is used as a\n  subproject and moved source files to the ``src`` directory.\n\n* Added qmake project file ``support/fmt.pro``\n  (`#641 <https://github.com/fmtlib/fmt/pull/641>`_).\n  Thanks `@cowo78 (Giuseppe Corbelli) <https://github.com/cowo78>`_.\n\n* Added Gradle build file ``support/build.gradle``\n  (`#649 <https://github.com/fmtlib/fmt/pull/649>`_).\n  Thanks `@luncliff (Park DongHa) <https://github.com/luncliff>`_.\n\n* Removed ``FMT_CPPFORMAT`` CMake option.\n\n* Fixed a name conflict with the macro ``CHAR_WIDTH`` in glibc\n  (`#616 <https://github.com/fmtlib/fmt/pull/616>`_).\n  Thanks `@aroig (Abdó Roig-Maranges) <https://github.com/aroig>`_.\n\n* Fixed handling of nested braces in ``fmt::join``\n  (`#638 <https://github.com/fmtlib/fmt/issues/638>`_).\n\n* Added ``SOURCELINK_SUFFIX`` for compatibility with Sphinx 1.5\n  (`#497 <https://github.com/fmtlib/fmt/pull/497>`_).\n  Thanks `@ginggs (Graham Inggs) <https://github.com/ginggs>`_.\n\n* Added a missing ``inline`` in the header-only mode\n  (`#626 <https://github.com/fmtlib/fmt/pull/626>`_).\n  Thanks `@aroig (Abdó Roig-Maranges) <https://github.com/aroig>`_.\n\n* Fixed various compiler warnings\n  (`#640 <https://github.com/fmtlib/fmt/pull/640>`_,\n  `#656 <https://github.com/fmtlib/fmt/pull/656>`_,\n  `#679 <https://github.com/fmtlib/fmt/pull/679>`_,\n  `#681 <https://github.com/fmtlib/fmt/pull/681>`_,\n  `#705 <https://github.com/fmtlib/fmt/pull/705>`__,\n  `#715 <https://github.com/fmtlib/fmt/issues/715>`_,\n  `#717 <https://github.com/fmtlib/fmt/pull/717>`_,\n  `#720 <https://github.com/fmtlib/fmt/pull/720>`_,\n  `#723 <https://github.com/fmtlib/fmt/pull/723>`_,\n  `#726 <https://github.com/fmtlib/fmt/pull/726>`_,\n  `#730 <https://github.com/fmtlib/fmt/pull/730>`_,\n  `#739 <https://github.com/fmtlib/fmt/pull/739>`_).\n  Thanks `@peterbell10 <https://github.com/peterbell10>`_,\n  `@LarsGullik <https://github.com/LarsGullik>`_,\n  `@foonathan (Jonathan Müller) <https://github.com/foonathan>`_,\n  `@eliaskosunen (Elias Kosunen) <https://github.com/eliaskosunen>`_,\n  `@christianparpart (Christian Parpart) <https://github.com/christianparpart>`_,\n  `@DanielaE (Daniela Engert) <https://github.com/DanielaE>`_,\n  and `@mwinterb <https://github.com/mwinterb>`_.\n\n* Worked around an MSVC bug and fixed several warnings\n  (`#653 <https://github.com/fmtlib/fmt/pull/653>`_).\n  Thanks `@alabuzhev (Alex Alabuzhev) <https://github.com/alabuzhev>`_.\n\n* Worked around GCC bug 67371\n  (`#682 <https://github.com/fmtlib/fmt/issues/682>`_).\n\n* Fixed compilation with ``-fno-exceptions``\n  (`#655 <https://github.com/fmtlib/fmt/pull/655>`_).\n  Thanks `@chenxiaolong (Andrew Gunnerson) <https://github.com/chenxiaolong>`_.\n\n* Made ``constexpr remove_prefix`` gcc version check tighter\n  (`#648 <https://github.com/fmtlib/fmt/issues/648>`_).\n\n* Renamed internal type enum constants to prevent collision with poorly written\n  C libraries (`#644 <https://github.com/fmtlib/fmt/issues/644>`_).\n\n* Added detection of ``wostream operator<<``\n  (`#650 <https://github.com/fmtlib/fmt/issues/650>`_).\n\n* Fixed compilation on OpenBSD\n  (`#660 <https://github.com/fmtlib/fmt/pull/660>`_).\n  Thanks `@hubslave <https://github.com/hubslave>`_.\n\n* Fixed compilation on FreeBSD 12\n  (`#732 <https://github.com/fmtlib/fmt/pull/732>`_).\n  Thanks `@dankm <https://github.com/dankm>`_.\n\n* Fixed compilation when there is a mismatch between ``-std`` options between\n  the library and user code\n  (`#664 <https://github.com/fmtlib/fmt/issues/664>`_).\n\n* Fixed compilation with GCC 7 and ``-std=c++11``\n  (`#734 <https://github.com/fmtlib/fmt/issues/734>`_).\n\n* Improved generated binary code on GCC 7 and older\n  (`#668 <https://github.com/fmtlib/fmt/issues/668>`_).\n\n* Fixed handling of numeric alignment with no width \n  (`#675 <https://github.com/fmtlib/fmt/issues/675>`_).\n\n* Fixed handling of empty strings in UTF8/16 converters\n  (`#676 <https://github.com/fmtlib/fmt/pull/676>`_).\n  Thanks `@vgalka-sl (Vasili Galka) <https://github.com/vgalka-sl>`_.\n\n* Fixed formatting of an empty ``string_view``\n  (`#689 <https://github.com/fmtlib/fmt/issues/689>`_).\n\n* Fixed detection of ``string_view`` on libc++ \n  (`#686 <https://github.com/fmtlib/fmt/issues/686>`_).\n\n* Fixed DLL issues (`#696 <https://github.com/fmtlib/fmt/pull/696>`_).\n  Thanks `@sebkoenig <https://github.com/sebkoenig>`_.\n\n* Fixed compile checks for mixing narrow and wide strings\n  (`#690 <https://github.com/fmtlib/fmt/issues/690>`_).\n\n* Disabled unsafe implicit conversion to ``std::string``\n  (`#729 <https://github.com/fmtlib/fmt/issues/729>`_).\n\n* Fixed handling of reused format specs (as in ``fmt::join``) for pointers\n  (`#725 <https://github.com/fmtlib/fmt/pull/725>`_).\n  Thanks `@mwinterb <https://github.com/mwinterb>`_.\n\n* Fixed installation of ``fmt/ranges.h``\n  (`#738 <https://github.com/fmtlib/fmt/pull/738>`_).\n  Thanks `@sv1990 <https://github.com/sv1990>`_.\n\n4.1.0 - 2017-12-20\n------------------\n\n* Added ``fmt::to_wstring()`` in addition to ``fmt::to_string()``\n  (`#559 <https://github.com/fmtlib/fmt/pull/559>`_).\n  Thanks `@alabuzhev (Alex Alabuzhev) <https://github.com/alabuzhev>`_.\n\n* Added support for C++17 ``std::string_view``\n  (`#571 <https://github.com/fmtlib/fmt/pull/571>`_ and\n  `#578 <https://github.com/fmtlib/fmt/pull/578>`_).\n  Thanks `@thelostt (Mário Feroldi) <https://github.com/thelostt>`_ and\n  `@mwinterb <https://github.com/mwinterb>`_.\n\n* Enabled stream exceptions to catch errors\n  (`#581 <https://github.com/fmtlib/fmt/issues/581>`_).\n  Thanks `@crusader-mike <https://github.com/crusader-mike>`_.\n\n* Allowed formatting of class hierarchies with ``fmt::format_arg()``\n  (`#547 <https://github.com/fmtlib/fmt/pull/547>`_).\n  Thanks `@rollbear (Björn Fahller) <https://github.com/rollbear>`_.\n\n* Removed limitations on character types\n  (`#563 <https://github.com/fmtlib/fmt/pull/563>`_).\n  Thanks `@Yelnats321 (Elnar Dakeshov) <https://github.com/Yelnats321>`_.\n\n* Conditionally enabled use of ``std::allocator_traits``\n  (`#583 <https://github.com/fmtlib/fmt/pull/583>`_).\n  Thanks `@mwinterb <https://github.com/mwinterb>`_.\n\n* Added support for ``const`` variadic member function emulation with\n  ``FMT_VARIADIC_CONST`` (`#591 <https://github.com/fmtlib/fmt/pull/591>`_).\n  Thanks `@ludekvodicka (Ludek Vodicka) <https://github.com/ludekvodicka>`_.\n\n* Various bugfixes: bad overflow check, unsupported implicit type conversion\n  when determining formatting function, test segfaults\n  (`#551 <https://github.com/fmtlib/fmt/issues/551>`_), ill-formed macros\n  (`#542 <https://github.com/fmtlib/fmt/pull/542>`_) and ambiguous overloads\n  (`#580 <https://github.com/fmtlib/fmt/issues/580>`_).\n  Thanks `@xylosper (Byoung-young Lee) <https://github.com/xylosper>`_.\n\n* Prevented warnings on MSVC (`#605 <https://github.com/fmtlib/fmt/pull/605>`_,\n  `#602 <https://github.com/fmtlib/fmt/pull/602>`_, and\n  `#545 <https://github.com/fmtlib/fmt/pull/545>`_),\n  clang (`#582 <https://github.com/fmtlib/fmt/pull/582>`_),\n  GCC (`#573 <https://github.com/fmtlib/fmt/issues/573>`_),\n  various conversion warnings (`#609 <https://github.com/fmtlib/fmt/pull/609>`_,\n  `#567 <https://github.com/fmtlib/fmt/pull/567>`_,\n  `#553 <https://github.com/fmtlib/fmt/pull/553>`_ and\n  `#553 <https://github.com/fmtlib/fmt/pull/553>`_), and added ``override`` and\n  ``[[noreturn]]`` (`#549 <https://github.com/fmtlib/fmt/pull/549>`_ and\n  `#555 <https://github.com/fmtlib/fmt/issues/555>`_).\n  Thanks `@alabuzhev (Alex Alabuzhev) <https://github.com/alabuzhev>`_,\n  `@virgiliofornazin (Virgilio Alexandre Fornazin)\n  <https://gihtub.com/virgiliofornazin>`_,\n  `@alexanderbock (Alexander Bock) <https://github.com/alexanderbock>`_,\n  `@yumetodo <https://github.com/yumetodo>`_,\n  `@VaderY (Császár Mátyás) <https://github.com/VaderY>`_,\n  `@jpcima (JP Cimalando) <https://github.com/jpcima>`_,\n  `@thelostt (Mário Feroldi) <https://github.com/thelostt>`_, and\n  `@Manu343726 (Manu Sánchez) <https://github.com/Manu343726>`_.\n\n* Improved CMake: Used ``GNUInstallDirs`` to set installation location\n  (`#610 <https://github.com/fmtlib/fmt/pull/610>`_) and fixed warnings\n  (`#536 <https://github.com/fmtlib/fmt/pull/536>`_ and\n  `#556 <https://github.com/fmtlib/fmt/pull/556>`_).\n  Thanks `@mikecrowe (Mike Crowe) <https://github.com/mikecrowe>`_,\n  `@evgen231 <https://github.com/evgen231>`_ and\n  `@henryiii (Henry Schreiner) <https://github.com/henryiii>`_.\n\n4.0.0 - 2017-06-27\n------------------\n\n* Removed old compatibility headers ``cppformat/*.h`` and CMake options\n  (`#527 <https://github.com/fmtlib/fmt/pull/527>`_).\n  Thanks `@maddinat0r (Alex Martin) <https://github.com/maddinat0r>`_.\n\n* Added ``string.h`` containing ``fmt::to_string()`` as alternative to\n  ``std::to_string()`` as well as other string writer functionality\n  (`#326 <https://github.com/fmtlib/fmt/issues/326>`_ and\n  `#441 <https://github.com/fmtlib/fmt/pull/441>`_):\n\n  .. code:: c++\n\n    #include \"fmt/string.h\"\n  \n    std::string answer = fmt::to_string(42);\n\n  Thanks to `@glebov-andrey (Andrey Glebov)\n  <https://github.com/glebov-andrey>`_.\n\n* Moved ``fmt::printf()`` to new ``printf.h`` header and allowed ``%s`` as\n  generic specifier (`#453 <https://github.com/fmtlib/fmt/pull/453>`_),\n  made ``%.f`` more conformant to regular ``printf()``\n  (`#490 <https://github.com/fmtlib/fmt/pull/490>`_), added custom writer\n  support (`#476 <https://github.com/fmtlib/fmt/issues/476>`_) and implemented\n  missing custom argument formatting\n  (`#339 <https://github.com/fmtlib/fmt/pull/339>`_ and\n  `#340 <https://github.com/fmtlib/fmt/pull/340>`_):\n\n  .. code:: c++\n\n    #include \"fmt/printf.h\"\n \n    // %s format specifier can be used with any argument type.\n    fmt::printf(\"%s\", 42);\n\n  Thanks `@mojoBrendan <https://github.com/mojoBrendan>`_,\n  `@manylegged (Arthur Danskin) <https://github.com/manylegged>`_ and\n  `@spacemoose (Glen Stark) <https://github.com/spacemoose>`_.\n  See also `#360 <https://github.com/fmtlib/fmt/issues/360>`_,\n  `#335 <https://github.com/fmtlib/fmt/issues/335>`_ and\n  `#331 <https://github.com/fmtlib/fmt/issues/331>`_.\n\n* Added ``container.h`` containing a ``BasicContainerWriter``\n  to write to containers like ``std::vector``\n  (`#450 <https://github.com/fmtlib/fmt/pull/450>`_).\n  Thanks `@polyvertex (Jean-Charles Lefebvre) <https://github.com/polyvertex>`_.\n\n* Added ``fmt::join()`` function that takes a range and formats\n  its elements separated by a given string\n  (`#466 <https://github.com/fmtlib/fmt/pull/466>`_):\n\n  .. code:: c++\n\n    #include \"fmt/format.h\"\n \n    std::vector<double> v = {1.2, 3.4, 5.6};\n    // Prints \"(+01.20, +03.40, +05.60)\".\n    fmt::print(\"({:+06.2f})\", fmt::join(v.begin(), v.end(), \", \"));\n\n  Thanks `@olivier80 <https://github.com/olivier80>`_.\n\n* Added support for custom formatting specifications to simplify customization\n  of built-in formatting (`#444 <https://github.com/fmtlib/fmt/pull/444>`_).\n  Thanks `@polyvertex (Jean-Charles Lefebvre) <https://github.com/polyvertex>`_.\n  See also `#439 <https://github.com/fmtlib/fmt/issues/439>`_.\n\n* Added ``fmt::format_system_error()`` for error code formatting\n  (`#323 <https://github.com/fmtlib/fmt/issues/323>`_ and\n  `#526 <https://github.com/fmtlib/fmt/pull/526>`_).\n  Thanks `@maddinat0r (Alex Martin) <https://github.com/maddinat0r>`_.\n\n* Added thread-safe ``fmt::localtime()`` and ``fmt::gmtime()``\n  as replacement   for the standard version to ``time.h``\n  (`#396 <https://github.com/fmtlib/fmt/pull/396>`_).\n  Thanks `@codicodi <https://github.com/codicodi>`_.\n\n* Internal improvements to ``NamedArg`` and ``ArgLists``\n  (`#389 <https://github.com/fmtlib/fmt/pull/389>`_ and\n  `#390 <https://github.com/fmtlib/fmt/pull/390>`_).\n  Thanks `@chronoxor <https://github.com/chronoxor>`_.\n\n* Fixed crash due to bug in ``FormatBuf``\n  (`#493 <https://github.com/fmtlib/fmt/pull/493>`_).\n  Thanks `@effzeh <https://github.com/effzeh>`_. See also\n  `#480 <https://github.com/fmtlib/fmt/issues/480>`_ and\n  `#491 <https://github.com/fmtlib/fmt/issues/491>`_.\n\n* Fixed handling of wide strings in ``fmt::StringWriter``.\n\n* Improved compiler error messages\n  (`#357 <https://github.com/fmtlib/fmt/issues/357>`_).\n\n* Fixed various warnings and issues with various compilers\n  (`#494 <https://github.com/fmtlib/fmt/pull/494>`_,\n  `#499 <https://github.com/fmtlib/fmt/pull/499>`_,\n  `#483 <https://github.com/fmtlib/fmt/pull/483>`_,\n  `#485 <https://github.com/fmtlib/fmt/pull/485>`_,\n  `#482 <https://github.com/fmtlib/fmt/pull/482>`_,\n  `#475 <https://github.com/fmtlib/fmt/pull/475>`_,\n  `#473 <https://github.com/fmtlib/fmt/pull/473>`_ and\n  `#414 <https://github.com/fmtlib/fmt/pull/414>`_).\n  Thanks `@chronoxor <https://github.com/chronoxor>`_,\n  `@zhaohuaxishi <https://github.com/zhaohuaxishi>`_,\n  `@pkestene (Pierre Kestener) <https://github.com/pkestene>`_,\n  `@dschmidt (Dominik Schmidt) <https://github.com/dschmidt>`_ and\n  `@0x414c (Alexey Gorishny) <https://github.com/0x414c>`_ .\n\n* Improved CMake: targets are now namespaced\n  (`#511 <https://github.com/fmtlib/fmt/pull/511>`_ and\n  `#513 <https://github.com/fmtlib/fmt/pull/513>`_), supported header-only\n  ``printf.h`` (`#354 <https://github.com/fmtlib/fmt/pull/354>`_), fixed issue\n  with minimal supported library subset\n  (`#418 <https://github.com/fmtlib/fmt/issues/418>`_,\n  `#419 <https://github.com/fmtlib/fmt/pull/419>`_ and\n  `#420 <https://github.com/fmtlib/fmt/pull/420>`_).\n  Thanks `@bjoernthiel (Bjoern Thiel) <https://github.com/bjoernthiel>`_,\n  `@niosHD (Mario Werner) <https://github.com/niosHD>`_,\n  `@LogicalKnight (Sean LK) <https://github.com/LogicalKnight>`_ and\n  `@alabuzhev (Alex Alabuzhev) <https://github.com/alabuzhev>`_.\n\n* Improved documentation. Thanks to\n  `@pwm1234 (Phil) <https://github.com/pwm1234>`_ for\n  `#393 <https://github.com/fmtlib/fmt/pull/393>`_.\n\n3.0.2 - 2017-06-14\n------------------\n\n* Added ``FMT_VERSION`` macro\n  (`#411 <https://github.com/fmtlib/fmt/issues/411>`_).\n\n* Used ``FMT_NULL`` instead of literal ``0``\n  (`#409 <https://github.com/fmtlib/fmt/pull/409>`_).\n  Thanks `@alabuzhev (Alex Alabuzhev) <https://github.com/alabuzhev>`_.\n\n* Added extern templates for ``format_float``\n  (`#413 <https://github.com/fmtlib/fmt/issues/413>`_).\n\n* Fixed implicit conversion issue\n  (`#507 <https://github.com/fmtlib/fmt/issues/507>`_).\n\n* Fixed signbit detection (`#423 <https://github.com/fmtlib/fmt/issues/423>`_).\n\n* Fixed naming collision (`#425 <https://github.com/fmtlib/fmt/issues/425>`_).\n\n* Fixed missing intrinsic for C++/CLI\n  (`#457 <https://github.com/fmtlib/fmt/pull/457>`_).\n  Thanks `@calumr (Calum Robinson) <https://github.com/calumr>`_\n\n* Fixed Android detection (`#458 <https://github.com/fmtlib/fmt/pull/458>`_).\n  Thanks `@Gachapen (Magnus Bjerke Vik) <https://github.com/Gachapen>`_.\n\n* Use lean ``windows.h`` if not in header-only mode\n  (`#503 <https://github.com/fmtlib/fmt/pull/503>`_).\n  Thanks `@Quentin01 (Quentin Buathier) <https://github.com/Quentin01>`_.\n\n* Fixed issue with CMake exporting C++11 flag\n  (`#445 <https://github.com/fmtlib/fmt/pull/455>`_).\n  Thanks `@EricWF (Eric) <https://github.com/EricWF>`_.\n\n* Fixed issue with nvcc and MSVC compiler bug and MinGW\n  (`#505 <https://github.com/fmtlib/fmt/issues/505>`_).\n\n* Fixed DLL issues (`#469 <https://github.com/fmtlib/fmt/pull/469>`_ and\n  `#502 <https://github.com/fmtlib/fmt/pull/502>`_).\n  Thanks `@richardeakin (Richard Eakin) <https://github.com/richardeakin>`_ and\n  `@AndreasSchoenle (Andreas Schönle) <https://github.com/AndreasSchoenle>`_.\n\n* Fixed test compilation under FreeBSD\n  (`#433 <https://github.com/fmtlib/fmt/issues/433>`_).\n\n* Fixed various warnings (`#403 <https://github.com/fmtlib/fmt/pull/403>`_,\n  `#410 <https://github.com/fmtlib/fmt/pull/410>`_ and\n  `#510 <https://github.com/fmtlib/fmt/pull/510>`_).\n  Thanks `@Lecetem <https://github.com/Lectem>`_,\n  `@chenhayat (Chen Hayat) <https://github.com/chenhayat>`_ and\n  `@trozen <https://github.com/trozen>`_.\n\n* Worked around a broken ``__builtin_clz`` in clang with MS codegen\n  (`#519 <https://github.com/fmtlib/fmt/issues/519>`_).\n\n* Removed redundant include\n  (`#479 <https://github.com/fmtlib/fmt/issues/479>`_).\n\n* Fixed documentation issues.\n\n3.0.1 - 2016-11-01\n------------------\n* Fixed handling of thousands separator\n  (`#353 <https://github.com/fmtlib/fmt/issues/353>`_).\n\n* Fixed handling of ``unsigned char`` strings\n  (`#373 <https://github.com/fmtlib/fmt/issues/373>`_).\n\n* Corrected buffer growth when formatting time\n  (`#367 <https://github.com/fmtlib/fmt/issues/367>`_).\n\n* Removed warnings under MSVC and clang\n  (`#318 <https://github.com/fmtlib/fmt/issues/318>`_,\n  `#250 <https://github.com/fmtlib/fmt/issues/250>`_, also merged\n  `#385 <https://github.com/fmtlib/fmt/pull/385>`_ and\n  `#361 <https://github.com/fmtlib/fmt/pull/361>`_).\n  Thanks `@jcelerier (Jean-Michaël Celerier) <https://github.com/jcelerier>`_\n  and `@nmoehrle (Nils Moehrle) <https://github.com/nmoehrle>`_.\n\n* Fixed compilation issues under Android\n  (`#327 <https://github.com/fmtlib/fmt/pull/327>`_,\n  `#345 <https://github.com/fmtlib/fmt/issues/345>`_ and\n  `#381 <https://github.com/fmtlib/fmt/pull/381>`_),\n  FreeBSD (`#358 <https://github.com/fmtlib/fmt/pull/358>`_),\n  Cygwin (`#388 <https://github.com/fmtlib/fmt/issues/388>`_),\n  MinGW (`#355 <https://github.com/fmtlib/fmt/issues/355>`_) as well as other\n  issues (`#350 <https://github.com/fmtlib/fmt/issues/350>`_,\n  `#366 <https://github.com/fmtlib/fmt/issues/355>`_,\n  `#348 <https://github.com/fmtlib/fmt/pull/348>`_,\n  `#402 <https://github.com/fmtlib/fmt/pull/402>`_,\n  `#405 <https://github.com/fmtlib/fmt/pull/405>`_).\n  Thanks to `@dpantele (Dmitry) <https://github.com/dpantele>`_,\n  `@hghwng (Hugh Wang) <https://github.com/hghwng>`_,\n  `@arvedarved (Tilman Keskinöz) <https://github.com/arvedarved>`_,\n  `@LogicalKnight (Sean) <https://github.com/LogicalKnight>`_ and\n  `@JanHellwig (Jan Hellwig) <https://github.com/janhellwig>`_.\n\n* Fixed some documentation issues and extended specification\n  (`#320 <https://github.com/fmtlib/fmt/issues/320>`_,\n  `#333 <https://github.com/fmtlib/fmt/pull/333>`_,\n  `#347 <https://github.com/fmtlib/fmt/issues/347>`_,\n  `#362 <https://github.com/fmtlib/fmt/pull/362>`_).\n  Thanks to `@smellman (Taro Matsuzawa aka. btm)\n  <https://github.com/smellman>`_.\n\n3.0.0 - 2016-05-07\n------------------\n\n* The project has been renamed from C++ Format (cppformat) to fmt for\n  consistency with the used namespace and macro prefix\n  (`#307 <https://github.com/fmtlib/fmt/issues/307>`_).\n  Library headers are now located in the ``fmt`` directory:\n\n  .. code:: c++\n\n    #include \"fmt/format.h\"\n\n  Including ``format.h`` from the ``cppformat`` directory is deprecated\n  but works via a proxy header which will be removed in the next major version.\n  \n  The documentation is now available at https://fmt.dev.\n\n* Added support for `strftime <http://en.cppreference.com/w/cpp/chrono/c/strftime>`_-like\n  `date and time formatting <https://fmt.dev/3.0.0/api.html#date-and-time-formatting>`_\n  (`#283 <https://github.com/fmtlib/fmt/issues/283>`_):\n\n  .. code:: c++\n\n    #include \"fmt/time.h\"\n\n    std::time_t t = std::time(nullptr);\n    // Prints \"The date is 2016-04-29.\" (with the current date)\n    fmt::print(\"The date is {:%Y-%m-%d}.\", *std::localtime(&t));\n\n* ``std::ostream`` support including formatting of user-defined types that provide\n  overloaded ``operator<<`` has been moved to ``fmt/ostream.h``:\n\n  .. code:: c++\n\n    #include \"fmt/ostream.h\"\n\n    class Date {\n      int year_, month_, day_;\n    public:\n      Date(int year, int month, int day) : year_(year), month_(month), day_(day) {}\n\n      friend std::ostream &operator<<(std::ostream &os, const Date &d) {\n        return os << d.year_ << '-' << d.month_ << '-' << d.day_;\n      }\n    };\n\n    std::string s = fmt::format(\"The date is {}\", Date(2012, 12, 9));\n    // s == \"The date is 2012-12-9\"\n\n* Added support for `custom argument formatters\n  <https://fmt.dev/3.0.0/api.html#argument-formatters>`_\n  (`#235 <https://github.com/fmtlib/fmt/issues/235>`_).\n\n* Added support for locale-specific integer formatting with the ``n`` specifier\n  (`#305 <https://github.com/fmtlib/fmt/issues/305>`_):\n\n  .. code:: c++\n\n    std::setlocale(LC_ALL, \"en_US.utf8\");\n    fmt::print(\"cppformat: {:n}\\n\", 1234567); // prints 1,234,567\n\n* Sign is now preserved when formatting an integer with an incorrect ``printf``\n  format specifier (`#265 <https://github.com/fmtlib/fmt/issues/265>`_):\n\n  .. code:: c++\n\n    fmt::printf(\"%lld\", -42); // prints -42\n\n  Note that it would be an undefined behavior in ``std::printf``.\n\n* Length modifiers such as ``ll`` are now optional in printf formatting\n  functions and the correct type is determined automatically\n  (`#255 <https://github.com/fmtlib/fmt/issues/255>`_):\n\n  .. code:: c++\n\n    fmt::printf(\"%d\", std::numeric_limits<long long>::max());\n\n  Note that it would be an undefined behavior in ``std::printf``.\n\n* Added initial support for custom formatters\n  (`#231 <https://github.com/fmtlib/fmt/issues/231>`_).\n\n* Fixed detection of user-defined literal support on Intel C++ compiler\n  (`#311 <https://github.com/fmtlib/fmt/issues/311>`_,\n  `#312 <https://github.com/fmtlib/fmt/pull/312>`_).\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_ and\n  `@speth (Ray Speth) <https://github.com/speth>`_.\n\n* Reduced compile time\n  (`#243 <https://github.com/fmtlib/fmt/pull/243>`_,\n  `#249 <https://github.com/fmtlib/fmt/pull/249>`_,\n  `#317 <https://github.com/fmtlib/fmt/issues/317>`_):\n\n  .. image:: https://cloud.githubusercontent.com/assets/4831417/11614060/\n             b9e826d2-9c36-11e5-8666-d4131bf503ef.png\n\n  .. image:: https://cloud.githubusercontent.com/assets/4831417/11614080/\n             6ac903cc-9c37-11e5-8165-26df6efae364.png\n\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_.\n\n* Compile test fixes (`#313 <https://github.com/fmtlib/fmt/pull/313>`_).\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_.\n\n* Documentation fixes (`#239 <https://github.com/fmtlib/fmt/pull/239>`_,\n  `#248 <https://github.com/fmtlib/fmt/issues/248>`_,\n  `#252 <https://github.com/fmtlib/fmt/issues/252>`_,\n  `#258 <https://github.com/fmtlib/fmt/pull/258>`_,\n  `#260 <https://github.com/fmtlib/fmt/issues/260>`_,\n  `#301 <https://github.com/fmtlib/fmt/issues/301>`_,\n  `#309 <https://github.com/fmtlib/fmt/pull/309>`_).\n  Thanks to `@ReadmeCritic <https://github.com/ReadmeCritic>`_\n  `@Gachapen (Magnus Bjerke Vik) <https://github.com/Gachapen>`_ and\n  `@jwilk (Jakub Wilk) <https://github.com/jwilk>`_.\n\n* Fixed compiler and sanitizer warnings\n  (`#244 <https://github.com/fmtlib/fmt/issues/244>`_,\n  `#256 <https://github.com/fmtlib/fmt/pull/256>`_,\n  `#259 <https://github.com/fmtlib/fmt/pull/259>`_,\n  `#263 <https://github.com/fmtlib/fmt/issues/263>`_,\n  `#274 <https://github.com/fmtlib/fmt/issues/274>`_,\n  `#277 <https://github.com/fmtlib/fmt/pull/277>`_,\n  `#286 <https://github.com/fmtlib/fmt/pull/286>`_,\n  `#291 <https://github.com/fmtlib/fmt/issues/291>`_,\n  `#296 <https://github.com/fmtlib/fmt/issues/296>`_,\n  `#308 <https://github.com/fmtlib/fmt/issues/308>`_)\n  Thanks to `@mwinterb <https://github.com/mwinterb>`_,\n  `@pweiskircher (Patrik Weiskircher) <https://github.com/pweiskircher>`_,\n  `@Naios <https://github.com/Naios>`_.\n\n* Improved compatibility with Windows Store apps\n  (`#280 <https://github.com/fmtlib/fmt/issues/280>`_,\n  `#285 <https://github.com/fmtlib/fmt/pull/285>`_)\n  Thanks to `@mwinterb <https://github.com/mwinterb>`_.\n\n* Added tests of compatibility with older C++ standards\n  (`#273 <https://github.com/fmtlib/fmt/pull/273>`_).\n  Thanks to `@niosHD <https://github.com/niosHD>`_.\n\n* Fixed Android build (`#271 <https://github.com/fmtlib/fmt/pull/271>`_).\n  Thanks to `@newnon <https://github.com/newnon>`_.\n\n* Changed ``ArgMap`` to be backed by a vector instead of a map.\n  (`#261 <https://github.com/fmtlib/fmt/issues/261>`_,\n  `#262 <https://github.com/fmtlib/fmt/pull/262>`_).\n  Thanks to `@mwinterb <https://github.com/mwinterb>`_.\n\n* Added ``fprintf`` overload that writes to a ``std::ostream``\n  (`#251 <https://github.com/fmtlib/fmt/pull/251>`_).\n  Thanks to `nickhutchinson (Nicholas Hutchinson) <https://github.com/nickhutchinson>`_.\n\n* Export symbols when building a Windows DLL\n  (`#245 <https://github.com/fmtlib/fmt/pull/245>`_).\n  Thanks to `macdems (Maciek Dems) <https://github.com/macdems>`_.\n\n* Fixed compilation on Cygwin (`#304 <https://github.com/fmtlib/fmt/issues/304>`_).\n\n* Implemented a workaround for a bug in Apple LLVM version 4.2 of clang\n  (`#276 <https://github.com/fmtlib/fmt/issues/276>`_).\n\n* Implemented a workaround for Google Test bug\n  `#705 <https://github.com/google/googletest/issues/705>`_ on gcc 6\n  (`#268 <https://github.com/fmtlib/fmt/issues/268>`_).\n  Thanks to `octoploid <https://github.com/octoploid>`_.\n\n* Removed Biicode support because the latter has been discontinued.\n\n2.1.1 - 2016-04-11\n------------------\n\n* The install location for generated CMake files is now configurable via\n  the ``FMT_CMAKE_DIR`` CMake variable\n  (`#299 <https://github.com/fmtlib/fmt/pull/299>`_).\n  Thanks to `@niosHD <https://github.com/niosHD>`_.\n\n* Documentation fixes (`#252 <https://github.com/fmtlib/fmt/issues/252>`_).\n\n2.1.0 - 2016-03-21\n------------------\n\n* Project layout and build system improvements\n  (`#267 <https://github.com/fmtlib/fmt/pull/267>`_):\n\n  * The code have been moved to the ``cppformat`` directory.\n    Including ``format.h`` from the top-level directory is deprecated\n    but works via a proxy header which will be removed in the next\n    major version.\n\n  * C++ Format CMake targets now have proper interface definitions.\n\n  * Installed version of the library now supports the header-only\n    configuration.\n\n  * Targets ``doc``, ``install``, and ``test`` are now disabled if C++ Format\n    is included as a CMake subproject. They can be enabled by setting\n    ``FMT_DOC``, ``FMT_INSTALL``, and ``FMT_TEST`` in the parent project.\n\n  Thanks to `@niosHD <https://github.com/niosHD>`_.\n\n2.0.1 - 2016-03-13\n------------------\n\n* Improved CMake find and package support\n  (`#264 <https://github.com/fmtlib/fmt/issues/264>`_).\n  Thanks to `@niosHD <https://github.com/niosHD>`_.\n\n* Fix compile error with Android NDK and mingw32\n  (`#241 <https://github.com/fmtlib/fmt/issues/241>`_).\n  Thanks to `@Gachapen (Magnus Bjerke Vik) <https://github.com/Gachapen>`_.\n\n* Documentation fixes\n  (`#248 <https://github.com/fmtlib/fmt/issues/248>`_,\n  `#260 <https://github.com/fmtlib/fmt/issues/260>`_).\n\n2.0.0 - 2015-12-01\n------------------\n\nGeneral\n~~~~~~~\n\n* [Breaking] Named arguments\n  (`#169 <https://github.com/fmtlib/fmt/pull/169>`_,\n  `#173 <https://github.com/fmtlib/fmt/pull/173>`_,\n  `#174 <https://github.com/fmtlib/fmt/pull/174>`_):\n\n  .. code:: c++\n\n    fmt::print(\"The answer is {answer}.\", fmt::arg(\"answer\", 42));\n\n  Thanks to `@jamboree <https://github.com/jamboree>`_.\n\n* [Experimental] User-defined literals for format and named arguments\n  (`#204 <https://github.com/fmtlib/fmt/pull/204>`_,\n  `#206 <https://github.com/fmtlib/fmt/pull/206>`_,\n  `#207 <https://github.com/fmtlib/fmt/pull/207>`_):\n\n  .. code:: c++\n\n    using namespace fmt::literals;\n    fmt::print(\"The answer is {answer}.\", \"answer\"_a=42);\n\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_.\n\n* [Breaking] Formatting of more than 16 arguments is now supported when using\n  variadic templates\n  (`#141 <https://github.com/fmtlib/fmt/issues/141>`_).\n  Thanks to `@Shauren <https://github.com/Shauren>`_.\n\n* Runtime width specification\n  (`#168 <https://github.com/fmtlib/fmt/pull/168>`_):\n\n  .. code:: c++\n\n    fmt::format(\"{0:{1}}\", 42, 5); // gives \"   42\"\n\n  Thanks to `@jamboree <https://github.com/jamboree>`_.\n\n* [Breaking] Enums are now formatted with an overloaded ``std::ostream`` insertion\n  operator (``operator<<``) if available\n  (`#232 <https://github.com/fmtlib/fmt/issues/232>`_).\n\n* [Breaking] Changed default ``bool`` format to textual, \"true\" or \"false\"\n  (`#170 <https://github.com/fmtlib/fmt/issues/170>`_):\n\n  .. code:: c++\n  \n    fmt::print(\"{}\", true); // prints \"true\"\n\n  To print ``bool`` as a number use numeric format specifier such as ``d``:\n\n  .. code:: c++\n\n    fmt::print(\"{:d}\", true); // prints \"1\"\n\n* ``fmt::printf`` and ``fmt::sprintf`` now support formatting of ``bool`` with the\n  ``%s`` specifier giving textual output, \"true\" or \"false\"\n  (`#223 <https://github.com/fmtlib/fmt/pull/223>`_):\n\n  .. code:: c++\n\n    fmt::printf(\"%s\", true); // prints \"true\"\n\n  Thanks to `@LarsGullik <https://github.com/LarsGullik>`_.\n\n* [Breaking] ``signed char`` and ``unsigned char`` are now formatted as integers by default\n  (`#217 <https://github.com/fmtlib/fmt/pull/217>`_).\n\n* [Breaking] Pointers to C strings can now be formatted with the ``p`` specifier\n  (`#223 <https://github.com/fmtlib/fmt/pull/223>`_):\n\n  .. code:: c++\n\n    fmt::print(\"{:p}\", \"test\"); // prints pointer value\n\n  Thanks to `@LarsGullik <https://github.com/LarsGullik>`_.\n\n* [Breaking] ``fmt::printf`` and ``fmt::sprintf`` now print null pointers as ``(nil)``\n  and null strings as ``(null)`` for consistency with glibc\n  (`#226 <https://github.com/fmtlib/fmt/pull/226>`_).\n  Thanks to `@LarsGullik <https://github.com/LarsGullik>`_.\n\n* [Breaking] ``fmt::(s)printf`` now supports formatting of objects of user-defined types\n  that provide an overloaded ``std::ostream`` insertion operator (``operator<<``)\n  (`#201 <https://github.com/fmtlib/fmt/issues/201>`_):\n\n  .. code:: c++\n\n    fmt::printf(\"The date is %s\", Date(2012, 12, 9));\n\n* [Breaking] The ``Buffer`` template is now part of the public API and can be used\n  to implement custom memory buffers\n  (`#140 <https://github.com/fmtlib/fmt/issues/140>`_).\n  Thanks to `@polyvertex (Jean-Charles Lefebvre) <https://github.com/polyvertex>`_.\n\n* [Breaking] Improved compatibility between ``BasicStringRef`` and\n  `std::experimental::basic_string_view\n  <http://en.cppreference.com/w/cpp/experimental/basic_string_view>`_\n  (`#100 <https://github.com/fmtlib/fmt/issues/100>`_,\n  `#159 <https://github.com/fmtlib/fmt/issues/159>`_,\n  `#183 <https://github.com/fmtlib/fmt/issues/183>`_):\n\n  - Comparison operators now compare string content, not pointers\n  - ``BasicStringRef::c_str`` replaced by ``BasicStringRef::data``\n  - ``BasicStringRef`` is no longer assumed to be null-terminated\n\n  References to null-terminated strings are now represented by a new class,\n  ``BasicCStringRef``.\n\n* Dependency on pthreads introduced by Google Test is now optional\n  (`#185 <https://github.com/fmtlib/fmt/issues/185>`_).\n\n* New CMake options ``FMT_DOC``, ``FMT_INSTALL`` and ``FMT_TEST`` to control\n  generation of ``doc``, ``install`` and ``test`` targets respectively, on by default\n  (`#197 <https://github.com/fmtlib/fmt/issues/197>`_,\n  `#198 <https://github.com/fmtlib/fmt/issues/198>`_,\n  `#200 <https://github.com/fmtlib/fmt/issues/200>`_).\n  Thanks to `@maddinat0r (Alex Martin) <https://github.com/maddinat0r>`_.\n\n* ``noexcept`` is now used when compiling with MSVC2015\n  (`#215 <https://github.com/fmtlib/fmt/pull/215>`_).\n  Thanks to `@dmkrepo (Dmitriy) <https://github.com/dmkrepo>`_.\n\n* Added an option to disable use of ``windows.h`` when ``FMT_USE_WINDOWS_H``\n  is defined as 0 before including ``format.h``\n  (`#171 <https://github.com/fmtlib/fmt/issues/171>`_).\n  Thanks to `@alfps (Alf P. Steinbach) <https://github.com/alfps>`_.\n\n* [Breaking] ``windows.h`` is now included with ``NOMINMAX`` unless\n  ``FMT_WIN_MINMAX`` is defined. This is done to prevent breaking code using\n  ``std::min`` and ``std::max`` and only affects the header-only configuration\n  (`#152 <https://github.com/fmtlib/fmt/issues/152>`_,\n  `#153 <https://github.com/fmtlib/fmt/pull/153>`_,\n  `#154 <https://github.com/fmtlib/fmt/pull/154>`_).\n  Thanks to `@DevO2012 <https://github.com/DevO2012>`_.\n\n* Improved support for custom character types\n  (`#171 <https://github.com/fmtlib/fmt/issues/171>`_).\n  Thanks to `@alfps (Alf P. Steinbach) <https://github.com/alfps>`_.\n\n* Added an option to disable use of IOStreams when ``FMT_USE_IOSTREAMS``\n  is defined as 0 before including ``format.h``\n  (`#205 <https://github.com/fmtlib/fmt/issues/205>`_,\n  `#208 <https://github.com/fmtlib/fmt/pull/208>`_).\n  Thanks to `@JodiTheTigger <https://github.com/JodiTheTigger>`_.\n\n* Improved detection of ``isnan``, ``isinf`` and ``signbit``.\n\nOptimization\n~~~~~~~~~~~~\n\n* Made formatting of user-defined types more efficient with a custom stream buffer\n  (`#92 <https://github.com/fmtlib/fmt/issues/92>`_,\n  `#230 <https://github.com/fmtlib/fmt/pull/230>`_).\n  Thanks to `@NotImplemented <https://github.com/NotImplemented>`_.\n\n* Further improved performance of ``fmt::Writer`` on integer formatting\n  and fixed a minor regression. Now it is ~7% faster than ``karma::generate``\n  on Karma's benchmark\n  (`#186 <https://github.com/fmtlib/fmt/issues/186>`_).\n\n* [Breaking] Reduced `compiled code size\n  <https://github.com/fmtlib/fmt#compile-time-and-code-bloat>`_\n  (`#143 <https://github.com/fmtlib/fmt/issues/143>`_,\n  `#149 <https://github.com/fmtlib/fmt/pull/149>`_).\n\nDistribution\n~~~~~~~~~~~~\n\n* [Breaking] Headers are now installed in\n  ``${CMAKE_INSTALL_PREFIX}/include/cppformat``\n  (`#178 <https://github.com/fmtlib/fmt/issues/178>`_).\n  Thanks to `@jackyf (Eugene V. Lyubimkin) <https://github.com/jackyf>`_.\n\n* [Breaking] Changed the library name from ``format`` to ``cppformat``\n  for consistency with the project name and to avoid potential conflicts\n  (`#178 <https://github.com/fmtlib/fmt/issues/178>`_).\n  Thanks to `@jackyf (Eugene V. Lyubimkin) <https://github.com/jackyf>`_.\n\n* C++ Format is now available in `Debian <https://www.debian.org/>`_ GNU/Linux\n  (`stretch <https://packages.debian.org/source/stretch/cppformat>`_,\n  `sid <https://packages.debian.org/source/sid/cppformat>`_) and \n  derived distributions such as\n  `Ubuntu <https://launchpad.net/ubuntu/+source/cppformat>`_ 15.10 and later\n  (`#155 <https://github.com/fmtlib/fmt/issues/155>`_)::\n\n    $ sudo apt-get install libcppformat1-dev\n\n  Thanks to `@jackyf (Eugene V. Lyubimkin) <https://github.com/jackyf>`_.\n\n* `Packages for Fedora and RHEL <https://admin.fedoraproject.org/pkgdb/package/cppformat/>`_\n  are now available. Thanks to Dave Johansen.\n  \n* C++ Format can now be installed via `Homebrew <http://brew.sh/>`_ on OS X\n  (`#157 <https://github.com/fmtlib/fmt/issues/157>`_)::\n\n    $ brew install cppformat\n\n  Thanks to `@ortho <https://github.com/ortho>`_, Anatoliy Bulukin.\n\nDocumentation\n~~~~~~~~~~~~~\n\n* Migrated from ReadTheDocs to GitHub Pages for better responsiveness\n  and reliability\n  (`#128 <https://github.com/fmtlib/fmt/issues/128>`_).\n  New documentation address is http://cppformat.github.io/.\n\n\n* Added `Building the documentation\n  <https://fmt.dev/2.0.0/usage.html#building-the-documentation>`_\n  section to the documentation.\n\n* Documentation build script is now compatible with Python 3 and newer pip versions.\n  (`#189 <https://github.com/fmtlib/fmt/pull/189>`_,\n  `#209 <https://github.com/fmtlib/fmt/issues/209>`_).\n  Thanks to `@JodiTheTigger <https://github.com/JodiTheTigger>`_ and\n  `@xentec <https://github.com/xentec>`_.\n  \n* Documentation fixes and improvements\n  (`#36 <https://github.com/fmtlib/fmt/issues/36>`_,\n  `#75 <https://github.com/fmtlib/fmt/issues/75>`_,\n  `#125 <https://github.com/fmtlib/fmt/issues/125>`_,\n  `#160 <https://github.com/fmtlib/fmt/pull/160>`_,\n  `#161 <https://github.com/fmtlib/fmt/pull/161>`_,\n  `#162 <https://github.com/fmtlib/fmt/issues/162>`_,\n  `#165 <https://github.com/fmtlib/fmt/issues/165>`_,\n  `#210 <https://github.com/fmtlib/fmt/issues/210>`_).\n  Thanks to `@syohex (Syohei YOSHIDA) <https://github.com/syohex>`_ and\n  bug reporters.\n\n* Fixed out-of-tree documentation build\n  (`#177 <https://github.com/fmtlib/fmt/issues/177>`_).\n  Thanks to `@jackyf (Eugene V. Lyubimkin) <https://github.com/jackyf>`_.\n\nFixes\n~~~~~\n\n* Fixed ``initializer_list`` detection\n  (`#136 <https://github.com/fmtlib/fmt/issues/136>`_).\n  Thanks to `@Gachapen (Magnus Bjerke Vik) <https://github.com/Gachapen>`_.\n\n* [Breaking] Fixed formatting of enums with numeric format specifiers in\n  ``fmt::(s)printf`` \n  (`#131 <https://github.com/fmtlib/fmt/issues/131>`_,\n  `#139 <https://github.com/fmtlib/fmt/issues/139>`_):\n\n  .. code:: c++\n\n    enum { ANSWER = 42 };\n    fmt::printf(\"%d\", ANSWER);\n\n  Thanks to `@Naios <https://github.com/Naios>`_.\n\n* Improved compatibility with old versions of MinGW\n  (`#129 <https://github.com/fmtlib/fmt/issues/129>`_,\n  `#130 <https://github.com/fmtlib/fmt/pull/130>`_,\n  `#132 <https://github.com/fmtlib/fmt/issues/132>`_).\n  Thanks to `@cstamford (Christopher Stamford) <https://github.com/cstamford>`_.\n\n* Fixed a compile error on MSVC with disabled exceptions\n  (`#144 <https://github.com/fmtlib/fmt/issues/144>`_).\n\n* Added a workaround for broken implementation of variadic templates in MSVC2012\n  (`#148 <https://github.com/fmtlib/fmt/issues/148>`_).\n\n* Placed the anonymous namespace within ``fmt`` namespace for the header-only\n  configuration\n  (`#171 <https://github.com/fmtlib/fmt/issues/171>`_).\n  Thanks to `@alfps (Alf P. Steinbach) <https://github.com/alfps>`_.\n\n* Fixed issues reported by Coverity Scan\n  (`#187 <https://github.com/fmtlib/fmt/issues/187>`_,\n  `#192 <https://github.com/fmtlib/fmt/issues/192>`_).\n\n* Implemented a workaround for a name lookup bug in MSVC2010\n  (`#188 <https://github.com/fmtlib/fmt/issues/188>`_).\n\n* Fixed compiler warnings\n  (`#95 <https://github.com/fmtlib/fmt/issues/95>`_,\n  `#96 <https://github.com/fmtlib/fmt/issues/96>`_,\n  `#114 <https://github.com/fmtlib/fmt/pull/114>`_,\n  `#135 <https://github.com/fmtlib/fmt/issues/135>`_,\n  `#142 <https://github.com/fmtlib/fmt/issues/142>`_,\n  `#145 <https://github.com/fmtlib/fmt/issues/145>`_,\n  `#146 <https://github.com/fmtlib/fmt/issues/146>`_,\n  `#158 <https://github.com/fmtlib/fmt/issues/158>`_,\n  `#163 <https://github.com/fmtlib/fmt/issues/163>`_,\n  `#175 <https://github.com/fmtlib/fmt/issues/175>`_,\n  `#190 <https://github.com/fmtlib/fmt/issues/190>`_,\n  `#191 <https://github.com/fmtlib/fmt/pull/191>`_,\n  `#194 <https://github.com/fmtlib/fmt/issues/194>`_,\n  `#196 <https://github.com/fmtlib/fmt/pull/196>`_,\n  `#216 <https://github.com/fmtlib/fmt/issues/216>`_,\n  `#218 <https://github.com/fmtlib/fmt/pull/218>`_,\n  `#220 <https://github.com/fmtlib/fmt/pull/220>`_,\n  `#229 <https://github.com/fmtlib/fmt/pull/229>`_,\n  `#233 <https://github.com/fmtlib/fmt/issues/233>`_,\n  `#234 <https://github.com/fmtlib/fmt/issues/234>`_,\n  `#236 <https://github.com/fmtlib/fmt/pull/236>`_,\n  `#281 <https://github.com/fmtlib/fmt/issues/281>`_,\n  `#289 <https://github.com/fmtlib/fmt/issues/289>`_).\n  Thanks to `@seanmiddleditch (Sean Middleditch) <https://github.com/seanmiddleditch>`_,\n  `@dixlorenz (Dix Lorenz) <https://github.com/dixlorenz>`_,\n  `@CarterLi (李通洲) <https://github.com/CarterLi>`_,\n  `@Naios <https://github.com/Naios>`_,\n  `@fmatthew5876 (Matthew Fioravante) <https://github.com/fmatthew5876>`_,\n  `@LevskiWeng (Levski Weng) <https://github.com/LevskiWeng>`_,\n  `@rpopescu <https://github.com/rpopescu>`_,\n  `@gabime (Gabi Melman) <https://github.com/gabime>`_,\n  `@cubicool (Jeremy Moles) <https://github.com/cubicool>`_,\n  `@jkflying (Julian Kent) <https://github.com/jkflying>`_,\n  `@LogicalKnight (Sean L) <https://github.com/LogicalKnight>`_,\n  `@inguin (Ingo van Lil) <https://github.com/inguin>`_ and\n  `@Jopie64 (Johan) <https://github.com/Jopie64>`_.\n\n* Fixed portability issues (mostly causing test failures) on ARM, ppc64, ppc64le,\n  s390x and SunOS 5.11 i386\n  (`#138 <https://github.com/fmtlib/fmt/issues/138>`_,\n  `#179 <https://github.com/fmtlib/fmt/issues/179>`_,\n  `#180 <https://github.com/fmtlib/fmt/issues/180>`_,\n  `#202 <https://github.com/fmtlib/fmt/issues/202>`_,\n  `#225 <https://github.com/fmtlib/fmt/issues/225>`_,\n  `Red Hat Bugzilla Bug 1260297 <https://bugzilla.redhat.com/show_bug.cgi?id=1260297>`_).\n  Thanks to `@Naios <https://github.com/Naios>`_,\n  `@jackyf (Eugene V. Lyubimkin) <https://github.com/jackyf>`_ and Dave Johansen.\n\n* Fixed a name conflict with macro ``free`` defined in\n  ``crtdbg.h`` when ``_CRTDBG_MAP_ALLOC`` is set\n  (`#211 <https://github.com/fmtlib/fmt/issues/211>`_).\n\n* Fixed shared library build on OS X\n  (`#212 <https://github.com/fmtlib/fmt/pull/212>`_).\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_.\n\n* Fixed an overload conflict on MSVC when ``/Zc:wchar_t-`` option is specified\n  (`#214 <https://github.com/fmtlib/fmt/pull/214>`_).\n  Thanks to `@slavanap (Vyacheslav Napadovsky) <https://github.com/slavanap>`_.\n\n* Improved compatibility with MSVC 2008\n  (`#236 <https://github.com/fmtlib/fmt/pull/236>`_).\n  Thanks to `@Jopie64 (Johan) <https://github.com/Jopie64>`_.\n\n* Improved compatibility with bcc32\n  (`#227 <https://github.com/fmtlib/fmt/issues/227>`_).\n\n* Fixed ``static_assert`` detection on Clang\n  (`#228 <https://github.com/fmtlib/fmt/pull/228>`_).\n  Thanks to `@dean0x7d (Dean Moldovan) <https://github.com/dean0x7d>`_.\n\n1.1.0 - 2015-03-06\n------------------\n\n* Added ``BasicArrayWriter``, a class template that provides operations for\n  formatting and writing data into a fixed-size array\n  (`#105 <https://github.com/fmtlib/fmt/issues/105>`_ and\n  `#122 <https://github.com/fmtlib/fmt/issues/122>`_):\n\n  .. code:: c++\n  \n    char buffer[100];\n    fmt::ArrayWriter w(buffer);\n    w.write(\"The answer is {}\", 42);\n\n* Added `0 A.D. <http://play0ad.com/>`_ and `PenUltima Online (POL)\n  <http://www.polserver.com/>`_ to the list of notable projects using C++ Format.\n\n* C++ Format now uses MSVC intrinsics for better formatting performance\n  (`#115 <https://github.com/fmtlib/fmt/pull/115>`_,\n  `#116 <https://github.com/fmtlib/fmt/pull/116>`_,\n  `#118 <https://github.com/fmtlib/fmt/pull/118>`_ and\n  `#121 <https://github.com/fmtlib/fmt/pull/121>`_).\n  Previously these optimizations where only used on GCC and Clang.\n  Thanks to `@CarterLi <https://github.com/CarterLi>`_ and\n  `@objectx <https://github.com/objectx>`_.\n\n* CMake install target (`#119 <https://github.com/fmtlib/fmt/pull/119>`_).\n  Thanks to `@TrentHouliston <https://github.com/TrentHouliston>`_.\n\n  You can now install C++ Format with ``make install`` command.\n\n* Improved `Biicode <http://www.biicode.com/>`_ support\n  (`#98 <https://github.com/fmtlib/fmt/pull/98>`_ and\n  `#104 <https://github.com/fmtlib/fmt/pull/104>`_). Thanks to\n  `@MariadeAnton <https://github.com/MariadeAnton>`_ and\n  `@franramirez688 <https://github.com/franramirez688>`_.\n\n* Improved support for building with `Android NDK\n  <https://developer.android.com/tools/sdk/ndk/index.html>`_\n  (`#107 <https://github.com/fmtlib/fmt/pull/107>`_).\n  Thanks to `@newnon <https://github.com/newnon>`_.\n  \n  The `android-ndk-example <https://github.com/fmtlib/android-ndk-example>`_\n  repository provides and example of using C++ Format with Android NDK:\n\n  .. image:: https://raw.githubusercontent.com/fmtlib/android-ndk-example/\n            master/screenshot.png\n\n* Improved documentation of ``SystemError`` and ``WindowsError``\n  (`#54 <https://github.com/fmtlib/fmt/issues/54>`_).\n\n* Various code improvements\n  (`#110 <https://github.com/fmtlib/fmt/pull/110>`_,\n  `#111 <https://github.com/fmtlib/fmt/pull/111>`_\n  `#112 <https://github.com/fmtlib/fmt/pull/112>`_).\n  Thanks to `@CarterLi <https://github.com/CarterLi>`_.\n\n* Improved compile-time errors when formatting wide into narrow strings\n  (`#117 <https://github.com/fmtlib/fmt/issues/117>`_).\n\n* Fixed ``BasicWriter::write`` without formatting arguments when C++11 support\n  is disabled (`#109 <https://github.com/fmtlib/fmt/issues/109>`_).\n\n* Fixed header-only build on OS X with GCC 4.9\n  (`#124 <https://github.com/fmtlib/fmt/issues/124>`_).\n\n* Fixed packaging issues (`#94 <https://github.com/fmtlib/fmt/issues/94>`_).\n\n* Added `changelog <https://github.com/fmtlib/fmt/blob/master/ChangeLog.rst>`_\n  (`#103 <https://github.com/fmtlib/fmt/issues/103>`_).\n\n1.0.0 - 2015-02-05\n------------------\n\n* Add support for a header-only configuration when ``FMT_HEADER_ONLY`` is\n  defined before including ``format.h``:\n\n  .. code:: c++\n\n    #define FMT_HEADER_ONLY\n    #include \"format.h\"\n\n* Compute string length in the constructor of ``BasicStringRef``\n  instead of the ``size`` method\n  (`#79 <https://github.com/fmtlib/fmt/issues/79>`_).\n  This eliminates size computation for string literals on reasonable optimizing\n  compilers.\n\n* Fix formatting of types with overloaded ``operator <<`` for ``std::wostream``\n  (`#86 <https://github.com/fmtlib/fmt/issues/86>`_):\n\n  .. code:: c++\n\n    fmt::format(L\"The date is {0}\", Date(2012, 12, 9));\n\n* Fix linkage of tests on Arch Linux\n  (`#89 <https://github.com/fmtlib/fmt/issues/89>`_).\n\n* Allow precision specifier for non-float arguments\n  (`#90 <https://github.com/fmtlib/fmt/issues/90>`_):\n\n  .. code:: c++\n\n    fmt::print(\"{:.3}\\n\", \"Carpet\"); // prints \"Car\"\n\n* Fix build on Android NDK\n  (`#93 <https://github.com/fmtlib/fmt/issues/93>`_)\n\n* Improvements to documentation build procedure.\n\n* Remove ``FMT_SHARED`` CMake variable in favor of standard `BUILD_SHARED_LIBS\n  <http://www.cmake.org/cmake/help/v3.0/variable/BUILD_SHARED_LIBS.html>`_.\n\n* Fix error handling in ``fmt::fprintf``.\n\n* Fix a number of warnings.\n\n0.12.0 - 2014-10-25\n-------------------\n\n* [Breaking] Improved separation between formatting and buffer management.\n  ``Writer`` is now a base class that cannot be instantiated directly.\n  The new ``MemoryWriter`` class implements the default buffer management\n  with small allocations done on stack. So ``fmt::Writer`` should be replaced\n  with ``fmt::MemoryWriter`` in variable declarations.\n\n  Old code:\n\n  .. code:: c++\n\n    fmt::Writer w;\n\n  New code: \n\n  .. code:: c++\n\n    fmt::MemoryWriter w;\n\n  If you pass ``fmt::Writer`` by reference, you can continue to do so:\n\n  .. code:: c++\n\n      void f(fmt::Writer &w);\n\n  This doesn't affect the formatting API.\n\n* Support for custom memory allocators\n  (`#69 <https://github.com/fmtlib/fmt/issues/69>`_)\n\n* Formatting functions now accept `signed char` and `unsigned char` strings as\n  arguments (`#73 <https://github.com/fmtlib/fmt/issues/73>`_):\n\n  .. code:: c++\n\n    auto s = format(\"GLSL version: {}\", glGetString(GL_VERSION));\n\n* Reduced code bloat. According to the new `benchmark results\n  <https://github.com/fmtlib/fmt#compile-time-and-code-bloat>`_,\n  cppformat is close to ``printf`` and by the order of magnitude better than\n  Boost Format in terms of compiled code size.\n\n* Improved appearance of the documentation on mobile by using the `Sphinx\n  Bootstrap theme <http://ryan-roemer.github.io/sphinx-bootstrap-theme/>`_:\n\n  .. |old| image:: https://cloud.githubusercontent.com/assets/576385/4792130/\n                   cd256436-5de3-11e4-9a62-c077d0c2b003.png\n\n  .. |new| image:: https://cloud.githubusercontent.com/assets/576385/4792131/\n                   cd29896c-5de3-11e4-8f59-cac952942bf0.png\n  \n  +-------+-------+\n  |  Old  |  New  |\n  +-------+-------+\n  | |old| | |new| |\n  +-------+-------+\n\n0.11.0 - 2014-08-21\n-------------------\n\n* Safe printf implementation with a POSIX extension for positional arguments:\n\n  .. code:: c++\n\n    fmt::printf(\"Elapsed time: %.2f seconds\", 1.23);\n    fmt::printf(\"%1$s, %3$d %2$s\", weekday, month, day);\n\n* Arguments of ``char`` type can now be formatted as integers\n  (Issue `#55 <https://github.com/fmtlib/fmt/issues/55>`_):\n\n  .. code:: c++\n\n    fmt::format(\"0x{0:02X}\", 'a');\n\n* Deprecated parts of the API removed.\n\n* The library is now built and tested on MinGW with Appveyor in addition to\n  existing test platforms Linux/GCC, OS X/Clang, Windows/MSVC.\n\n0.10.0 - 2014-07-01\n-------------------\n\n**Improved API**\n\n* All formatting methods are now implemented as variadic functions instead\n  of using ``operator<<`` for feeding arbitrary arguments into a temporary\n  formatter object. This works both with C++11 where variadic templates are\n  used and with older standards where variadic functions are emulated by\n  providing lightweight wrapper functions defined with the ``FMT_VARIADIC``\n  macro. You can use this macro for defining your own portable variadic\n  functions:\n\n  .. code:: c++\n\n    void report_error(const char *format, const fmt::ArgList &args) {\n      fmt::print(\"Error: {}\");\n      fmt::print(format, args);\n    }\n    FMT_VARIADIC(void, report_error, const char *)\n\n    report_error(\"file not found: {}\", path);\n\n  Apart from a more natural syntax, this also improves performance as there\n  is no need to construct temporary formatter objects and control arguments'\n  lifetimes. Because the wrapper functions are very lightweight, this doesn't\n  cause code bloat even in pre-C++11 mode.\n\n* Simplified common case of formatting an ``std::string``. Now it requires a\n  single function call:\n\n  .. code:: c++\n\n    std::string s = format(\"The answer is {}.\", 42);\n\n  Previously it required 2 function calls:\n\n  .. code:: c++\n\n    std::string s = str(Format(\"The answer is {}.\") << 42);\n\n  Instead of unsafe ``c_str`` function, ``fmt::Writer`` should be used directly\n  to bypass creation of ``std::string``:\n\n  .. code:: c++\n\n    fmt::Writer w;\n    w.write(\"The answer is {}.\", 42);\n    w.c_str();  // returns a C string\n\n  This doesn't do dynamic memory allocation for small strings and is less error\n  prone as the lifetime of the string is the same as for ``std::string::c_str``\n  which is well understood (hopefully).\n\n* Improved consistency in naming functions that are a part of the public API.\n  Now all public functions are lowercase following the standard library\n  conventions. Previously it was a combination of lowercase and\n  CapitalizedWords.\n  Issue `#50 <https://github.com/fmtlib/fmt/issues/50>`_.\n\n* Old functions are marked as deprecated and will be removed in the next\n  release.\n\n**Other Changes**\n\n* Experimental support for printf format specifications (work in progress):\n\n  .. code:: c++\n\n    fmt::printf(\"The answer is %d.\", 42);\n    std::string s = fmt::sprintf(\"Look, a %s!\", \"string\");\n\n* Support for hexadecimal floating point format specifiers ``a`` and ``A``:\n\n  .. code:: c++\n\n    print(\"{:a}\", -42.0); // Prints -0x1.5p+5\n    print(\"{:A}\", -42.0); // Prints -0X1.5P+5\n\n* CMake option ``FMT_SHARED`` that specifies whether to build format as a\n  shared library (off by default).\n\n0.9.0 - 2014-05-13\n------------------\n\n* More efficient implementation of variadic formatting functions.\n\n* ``Writer::Format`` now has a variadic overload:\n\n  .. code:: c++\n\n    Writer out;\n    out.Format(\"Look, I'm {}!\", \"variadic\");\n\n* For efficiency and consistency with other overloads, variadic overload of\n  the ``Format`` function now returns ``Writer`` instead of ``std::string``.\n  Use the ``str`` function to convert it to ``std::string``:\n\n  .. code:: c++\n\n    std::string s = str(Format(\"Look, I'm {}!\", \"variadic\"));\n\n* Replaced formatter actions with output sinks: ``NoAction`` -> ``NullSink``,\n  ``Write`` -> ``FileSink``, ``ColorWriter`` -> ``ANSITerminalSink``.\n  This improves naming consistency and shouldn't affect client code unless\n  these classes are used directly which should be rarely needed.\n\n* Added ``ThrowSystemError`` function that formats a message and throws\n  ``SystemError`` containing the formatted message and system-specific error\n  description. For example, the following code\n\n  .. code:: c++\n\n    FILE *f = fopen(filename, \"r\");\n    if (!f)\n      ThrowSystemError(errno, \"Failed to open file '{}'\") << filename;\n\n  will throw ``SystemError`` exception with description\n  \"Failed to open file '<filename>': No such file or directory\" if file\n  doesn't exist.\n\n* Support for AppVeyor continuous integration platform.\n\n* ``Format`` now throws ``SystemError`` in case of I/O errors.\n\n* Improve test infrastructure. Print functions are now tested by redirecting\n  the output to a pipe.\n\n0.8.0 - 2014-04-14\n------------------\n\n* Initial release\n"
  },
  {
    "path": "src/third_party/fmt/LICENSE.rst",
    "content": "Copyright (c) 2012 - present, Victor Zverovich\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "src/third_party/fmt/README.rst",
    "content": "{fmt}\n=====\n\n.. image:: https://travis-ci.org/fmtlib/fmt.png?branch=master\n   :target: https://travis-ci.org/fmtlib/fmt\n\n.. image:: https://ci.appveyor.com/api/projects/status/ehjkiefde6gucy1v\n   :target: https://ci.appveyor.com/project/vitaut/fmt\n\n.. image:: https://img.shields.io/badge/stackoverflow-fmt-blue.svg\n   :alt: Ask questions at StackOverflow with the tag fmt\n   :target: http://stackoverflow.com/questions/tagged/fmt\n\n**{fmt}** is an open-source formatting library for C++.\nIt can be used as a safe and fast alternative to (s)printf and iostreams.\n\n`Documentation <https://fmt.dev/latest/>`__\n\nQ&A: ask questions on `StackOverflow with the tag fmt <http://stackoverflow.com/questions/tagged/fmt>`_.\n\nFeatures\n--------\n\n* Replacement-based `format API <https://fmt.dev/dev/api.html>`_ with\n  positional arguments for localization.\n* `Format string syntax <https://fmt.dev/dev/syntax.html>`_ similar to the one\n  of `str.format <https://docs.python.org/2/library/stdtypes.html#str.format>`_\n  in Python.\n* Safe `printf implementation\n  <https://fmt.dev/latest/api.html#printf-formatting>`_ including\n  the POSIX extension for positional arguments.\n* Implementation of `C++20 std::format <https://fmt.dev/Text%20Formatting.html>`__.\n* Support for user-defined types.\n* High performance: faster than common standard library implementations of\n  `printf <http://en.cppreference.com/w/cpp/io/c/fprintf>`_ and\n  iostreams. See `Speed tests`_ and `Fast integer to string conversion in C++\n  <http://zverovich.net/2013/09/07/integer-to-string-conversion-in-cplusplus.html>`_.\n* Small code size both in terms of source code (the minimum configuration\n  consists of just three header files, ``core.h``, ``format.h`` and\n  ``format-inl.h``) and compiled code. See `Compile time and code bloat`_.\n* Reliability: the library has an extensive set of `unit tests\n  <https://github.com/fmtlib/fmt/tree/master/test>`_.\n* Safety: the library is fully type safe, errors in format strings can be\n  reported at compile time, automatic memory management prevents buffer overflow\n  errors.\n* Ease of use: small self-contained code base, no external dependencies,\n  permissive BSD `license\n  <https://github.com/fmtlib/fmt/blob/master/LICENSE.rst>`_\n* `Portability <https://fmt.dev/latest/index.html#portability>`_ with\n  consistent output across platforms and support for older compilers.\n* Clean warning-free codebase even on high warning levels\n  (``-Wall -Wextra -pedantic``).\n* Support for wide strings.\n* Optional header-only configuration enabled with the ``FMT_HEADER_ONLY`` macro.\n\nSee the `documentation <https://fmt.dev/latest/>`_ for more details.\n\nExamples\n--------\n\nPrint ``Hello, world!`` to ``stdout``:\n\n.. code:: c++\n\n    fmt::print(\"Hello, {}!\", \"world\");  // Python-like format string syntax\n    fmt::printf(\"Hello, %s!\", \"world\"); // printf format string syntax\n\nFormat a string and use positional arguments:\n\n.. code:: c++\n\n    std::string s = fmt::format(\"I'd rather be {1} than {0}.\", \"right\", \"happy\");\n    // s == \"I'd rather be happy than right.\"\n\nCheck a format string at compile time:\n\n.. code:: c++\n\n    // test.cc\n    #define FMT_STRING_ALIAS 1\n    #include <fmt/format.h>\n    std::string s = format(fmt(\"{2}\"), 42);\n\n.. code::\n\n    $ c++ -Iinclude -std=c++14 test.cc\n    ...\n    test.cc:4:17: note: in instantiation of function template specialization 'fmt::v5::format<S, int>' requested here\n    std::string s = format(fmt(\"{2}\"), 42);\n                    ^\n    include/fmt/core.h:778:19: note: non-constexpr function 'on_error' cannot be used in a constant expression\n        ErrorHandler::on_error(message);\n                      ^\n    include/fmt/format.h:2226:16: note: in call to '&checker.context_->on_error(&\"argument index out of range\"[0])'\n          context_.on_error(\"argument index out of range\");\n                   ^\n\nUse {fmt} as a safe portable replacement for ``itoa``\n(`godbolt <https://godbolt.org/g/NXmpU4>`_):\n\n.. code:: c++\n\n    fmt::memory_buffer buf;\n    format_to(buf, \"{}\", 42);    // replaces itoa(42, buffer, 10)\n    format_to(buf, \"{:x}\", 42);  // replaces itoa(42, buffer, 16)\n    // access the string with to_string(buf) or buf.data()\n\nFormat objects of user-defined types via a simple `extension API\n<https://fmt.dev/latest/api.html#formatting-user-defined-types>`_:\n\n.. code:: c++\n\n    #include \"fmt/format.h\"\n\n    struct date {\n      int year, month, day;\n    };\n\n    template <>\n    struct fmt::formatter<date> {\n      template <typename ParseContext>\n      constexpr auto parse(ParseContext &ctx) { return ctx.begin(); }\n\n      template <typename FormatContext>\n      auto format(const date &d, FormatContext &ctx) {\n        return format_to(ctx.out(), \"{}-{}-{}\", d.year, d.month, d.day);\n      }\n    };\n\n    std::string s = fmt::format(\"The date is {}\", date{2012, 12, 9});\n    // s == \"The date is 2012-12-9\"\n\nCreate your own functions similar to `format\n<https://fmt.dev/latest/api.html#format>`_ and\n`print <https://fmt.dev/latest/api.html#print>`_\nwhich take arbitrary arguments (`godbolt <https://godbolt.org/g/MHjHVf>`_):\n\n.. code:: c++\n\n    // Prints formatted error message.\n    void vreport_error(const char *format, fmt::format_args args) {\n      fmt::print(\"Error: \");\n      fmt::vprint(format, args);\n    }\n    template <typename... Args>\n    void report_error(const char *format, const Args & ... args) {\n      vreport_error(format, fmt::make_format_args(args...));\n    }\n\n    report_error(\"file not found: {}\", path);\n\nNote that ``vreport_error`` is not parameterized on argument types which can\nimprove compile times and reduce code size compared to a fully parameterized\nversion.\n\nBenchmarks\n----------\n\nSpeed tests\n~~~~~~~~~~~\n\n================= ============= ===========\nLibrary           Method        Run Time, s\n================= ============= ===========\nlibc              printf          1.01\nlibc++            std::ostream    3.04\n{fmt} 1632f72     fmt::print      0.86\ntinyformat 2.0.1  tfm::printf     3.23\nBoost Format 1.67 boost::format   7.98\nFolly Format      folly::format   2.23\n================= ============= ===========\n\n{fmt} is the fastest of the benchmarked methods, ~17% faster than ``printf``.\n\nThe above results were generated by building ``tinyformat_test.cpp`` on macOS\n10.14.3 with ``clang++ -O3 -DSPEED_TEST -DHAVE_FORMAT``, and taking the best of\nthree runs. In the test, the format string ``\"%0.10f:%04d:%+g:%s:%p:%c:%%\\n\"``\nor equivalent is filled 2,000,000 times with output sent to ``/dev/null``; for\nfurther details refer to the `source\n<https://github.com/fmtlib/format-benchmark/blob/master/tinyformat_test.cpp>`_.\n\n{fmt} is 10x faster than ``std::ostringstream`` and ``sprintf`` on floating-point\nformatting (`dtoa-benchmark <https://github.com/fmtlib/dtoa-benchmark>`_)\nand as fast as `double-conversion <https://github.com/google/double-conversion>`_:\n\n.. image:: https://user-images.githubusercontent.com/576385/54883977-9fe8c000-4e28-11e9-8bde-272d122e7c52.jpg\n   :target: https://fmt.dev/unknown_mac64_clang10.0.html\n\nCompile time and code bloat\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe script `bloat-test.py\n<https://github.com/fmtlib/format-benchmark/blob/master/bloat-test.py>`_\nfrom `format-benchmark <https://github.com/fmtlib/format-benchmark>`_\ntests compile time and code bloat for nontrivial projects.\nIt generates 100 translation units and uses ``printf()`` or its alternative\nfive times in each to simulate a medium sized project.  The resulting\nexecutable size and compile time (Apple LLVM version 8.1.0 (clang-802.0.42),\nmacOS Sierra, best of three) is shown in the following tables.\n\n**Optimized build (-O3)**\n\n============= =============== ==================== ==================\nMethod        Compile Time, s Executable size, KiB Stripped size, KiB\n============= =============== ==================== ==================\nprintf                    2.6                   29                 26\nprintf+string            16.4                   29                 26\niostreams                31.1                   59                 55\n{fmt}                    19.0                   37                 34\ntinyformat               44.0                  103                 97\nBoost Format             91.9                  226                203\nFolly Format            115.7                  101                 88\n============= =============== ==================== ==================\n\nAs you can see, {fmt} has 60% less overhead in terms of resulting binary code\nsize compared to iostreams and comes pretty close to ``printf``. Boost Format\nand Folly Format have the largest overheads.\n\n``printf+string`` is the same as ``printf`` but with extra ``<string>``\ninclude to measure the overhead of the latter.\n\n**Non-optimized build**\n\n============= =============== ==================== ==================\nMethod        Compile Time, s Executable size, KiB Stripped size, KiB\n============= =============== ==================== ==================\nprintf                    2.2                   33                 30\nprintf+string            16.0                   33                 30\niostreams                28.3                   56                 52\n{fmt}                    18.2                   59                 50\ntinyformat               32.6                   88                 82\nBoost Format             54.1                  365                303\nFolly Format             79.9                  445                430\n============= =============== ==================== ==================\n\n``libc``, ``lib(std)c++`` and ``libfmt`` are all linked as shared libraries to\ncompare formatting function overhead only. Boost Format and tinyformat are\nheader-only libraries so they don't provide any linkage options.\n\nRunning the tests\n~~~~~~~~~~~~~~~~~\n\nPlease refer to `Building the library`__ for the instructions on how to build\nthe library and run the unit tests.\n\n__ https://fmt.dev/latest/usage.html#building-the-library\n\nBenchmarks reside in a separate repository,\n`format-benchmarks <https://github.com/fmtlib/format-benchmark>`_,\nso to run the benchmarks you first need to clone this repository and\ngenerate Makefiles with CMake::\n\n    $ git clone --recursive https://github.com/fmtlib/format-benchmark.git\n    $ cd format-benchmark\n    $ cmake .\n\nThen you can run the speed test::\n\n    $ make speed-test\n\nor the bloat test::\n\n    $ make bloat-test\n\nProjects using this library\n---------------------------\n\n* `0 A.D. <http://play0ad.com/>`_: A free, open-source, cross-platform real-time\n  strategy game\n\n* `AMPL/MP <https://github.com/ampl/mp>`_:\n  An open-source library for mathematical programming\n  \n* `AvioBook <https://www.aviobook.aero/en>`_: A comprehensive aircraft\n  operations suite\n  \n* `Celestia <https://celestia.space/>`_: Real-time 3D visualization of space\n\n* `Ceph <https://ceph.com/>`_: A scalable distributed storage system\n\n* `CUAUV <http://cuauv.org/>`_: Cornell University's autonomous underwater\n  vehicle\n\n* `HarpyWar/pvpgn <https://github.com/pvpgn/pvpgn-server>`_:\n  Player vs Player Gaming Network with tweaks\n\n* `KBEngine <http://kbengine.org/>`_: An open-source MMOG server engine\n\n* `Keypirinha <http://keypirinha.com/>`_: A semantic launcher for Windows\n\n* `Kodi <https://kodi.tv/>`_ (formerly xbmc): Home theater software\n\n* `Lifeline <https://github.com/peter-clark/lifeline>`_: A 2D game\n\n* `Drake <http://drake.mit.edu/>`_: A planning, control, and analysis toolbox\n  for nonlinear dynamical systems (MIT)\n\n* `Envoy <https://lyft.github.io/envoy/>`_: C++ L7 proxy and communication bus\n  (Lyft)\n\n* `FiveM <https://fivem.net/>`_: a modification framework for GTA V\n\n* `MongoDB <https://mongodb.com/>`_: Distributed document database\n\n* `MongoDB Smasher <https://github.com/duckie/mongo_smasher>`_: A small tool to\n  generate randomized datasets\n\n* `OpenSpace <http://openspaceproject.com/>`_: An open-source astrovisualization\n  framework\n\n* `PenUltima Online (POL) <http://www.polserver.com/>`_:\n  An MMO server, compatible with most Ultima Online clients\n\n* `quasardb <https://www.quasardb.net/>`_: A distributed, high-performance,\n  associative database\n\n* `readpe <https://bitbucket.org/sys_dev/readpe>`_: Read Portable Executable\n\n* `redis-cerberus <https://github.com/HunanTV/redis-cerberus>`_: A Redis cluster\n  proxy\n\n* `rpclib <http://rpclib.net/>`_: A modern C++ msgpack-RPC server and client\n  library\n\n* `Saddy <https://github.com/mamontov-cpp/saddy-graphics-engine-2d>`_:\n  Small crossplatform 2D graphic engine\n\n* `Salesforce Analytics Cloud <http://www.salesforce.com/analytics-cloud/overview/>`_:\n  Business intelligence software\n\n* `Scylla <http://www.scylladb.com/>`_: A Cassandra-compatible NoSQL data store\n  that can handle 1 million transactions per second on a single server\n\n* `Seastar <http://www.seastar-project.org/>`_: An advanced, open-source C++\n  framework for high-performance server applications on modern hardware\n\n* `spdlog <https://github.com/gabime/spdlog>`_: Super fast C++ logging library\n\n* `Stellar <https://www.stellar.org/>`_: Financial platform\n\n* `Touch Surgery <https://www.touchsurgery.com/>`_: Surgery simulator\n\n* `TrinityCore <https://github.com/TrinityCore/TrinityCore>`_: Open-source\n  MMORPG framework\n\n`More... <https://github.com/search?q=cppformat&type=Code>`_\n\nIf you are aware of other projects using this library, please let me know\nby `email <mailto:victor.zverovich@gmail.com>`_ or by submitting an\n`issue <https://github.com/fmtlib/fmt/issues>`_.\n\nMotivation\n----------\n\nSo why yet another formatting library?\n\nThere are plenty of methods for doing this task, from standard ones like\nthe printf family of function and iostreams to Boost Format and FastFormat\nlibraries. The reason for creating a new library is that every existing\nsolution that I found either had serious issues or didn't provide\nall the features I needed.\n\nprintf\n~~~~~~\n\nThe good thing about ``printf`` is that it is pretty fast and readily available\nbeing a part of the C standard library. The main drawback is that it\ndoesn't support user-defined types. ``printf`` also has safety issues although\nthey are somewhat mitigated with `__attribute__ ((format (printf, ...))\n<http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html>`_ in GCC.\nThere is a POSIX extension that adds positional arguments required for\n`i18n <https://en.wikipedia.org/wiki/Internationalization_and_localization>`_\nto ``printf`` but it is not a part of C99 and may not be available on some\nplatforms.\n\niostreams\n~~~~~~~~~\n\nThe main issue with iostreams is best illustrated with an example:\n\n.. code:: c++\n\n    std::cout << std::setprecision(2) << std::fixed << 1.23456 << \"\\n\";\n\nwhich is a lot of typing compared to printf:\n\n.. code:: c++\n\n    printf(\"%.2f\\n\", 1.23456);\n\nMatthew Wilson, the author of FastFormat, called this \"chevron hell\". iostreams\ndon't support positional arguments by design.\n\nThe good part is that iostreams support user-defined types and are safe although\nerror handling is awkward.\n\nBoost Format\n~~~~~~~~~~~~\n\nThis is a very powerful library which supports both ``printf``-like format\nstrings and positional arguments. Its main drawback is performance. According to\nvarious benchmarks it is much slower than other methods considered here. Boost\nFormat also has excessive build times and severe code bloat issues (see\n`Benchmarks`_).\n\nFastFormat\n~~~~~~~~~~\n\nThis is an interesting library which is fast, safe and has positional\narguments. However it has significant limitations, citing its author:\n\n    Three features that have no hope of being accommodated within the\n    current design are:\n\n    * Leading zeros (or any other non-space padding)\n    * Octal/hexadecimal encoding\n    * Runtime width/alignment specification\n\nIt is also quite big and has a heavy dependency, STLSoft, which might be\ntoo restrictive for using it in some projects.\n\nLoki SafeFormat\n~~~~~~~~~~~~~~~\n\nSafeFormat is a formatting library which uses ``printf``-like format strings and\nis type safe. It doesn't support user-defined types or positional arguments and\nmakes unconventional use of ``operator()`` for passing format arguments.\n\nTinyformat\n~~~~~~~~~~\n\nThis library supports ``printf``-like format strings and is very small .\nIt doesn't support positional arguments and wrapping it in C++98 is somewhat\ndifficult. Tinyformat relies on iostreams which limits its performance.\n\nBoost Spirit.Karma\n~~~~~~~~~~~~~~~~~~\n\nThis is not really a formatting library but I decided to include it here for\ncompleteness. As iostreams, it suffers from the problem of mixing verbatim text\nwith arguments. The library is pretty fast, but slower on integer formatting\nthan ``fmt::format_int`` on Karma's own benchmark,\nsee `Fast integer to string conversion in C++\n<http://zverovich.net/2013/09/07/integer-to-string-conversion-in-cplusplus.html>`_.\n\nFAQ\n---\n\nQ: how can I capture formatting arguments and format them later?\n\nA: use ``std::tuple``:\n\n.. code:: c++\n\n   template <typename... Args>\n   auto capture(const Args&... args) {\n     return std::make_tuple(args...);\n   }\n\n   auto print_message = [](const auto&... args) {\n     fmt::print(args...);\n   };\n\n   // Capture and store arguments:\n   auto args = capture(\"{} {}\", 42, \"foo\");\n   // Do formatting:\n   std::apply(print_message, args);\n\nLicense\n-------\n\n{fmt} is distributed under the BSD `license\n<https://github.com/fmtlib/fmt/blob/master/LICENSE.rst>`_.\n\nThe `Format String Syntax\n<https://fmt.dev/latest/syntax.html>`_\nsection in the documentation is based on the one from Python `string module\ndocumentation <https://docs.python.org/3/library/string.html#module-string>`_\nadapted for the current library. For this reason the documentation is\ndistributed under the Python Software Foundation license available in\n`doc/python-license.txt\n<https://raw.github.com/fmtlib/fmt/master/doc/python-license.txt>`_.\nIt only applies if you distribute the documentation of fmt.\n\nAcknowledgments\n---------------\n\nThe {fmt} library is maintained by Victor Zverovich (`vitaut\n<https://github.com/vitaut>`_) and Jonathan Müller (`foonathan\n<https://github.com/foonathan>`_) with contributions from many other people.\nSee `Contributors <https://github.com/fmtlib/fmt/graphs/contributors>`_ and\n`Releases <https://github.com/fmtlib/fmt/releases>`_ for some of the names.\nLet us know if your contribution is not listed or mentioned incorrectly and\nwe'll make it right.\n\nThe benchmark section of this readme file and the performance tests are taken\nfrom the excellent `tinyformat <https://github.com/c42f/tinyformat>`_ library\nwritten by Chris Foster.  Boost Format library is acknowledged transitively\nsince it had some influence on tinyformat.\nSome ideas used in the implementation are borrowed from `Loki\n<http://loki-lib.sourceforge.net/>`_ SafeFormat and `Diagnostic API\n<http://clang.llvm.org/doxygen/classclang_1_1Diagnostic.html>`_ in\n`Clang <http://clang.llvm.org/>`_.\nFormat string syntax and the documentation are based on Python's `str.format\n<http://docs.python.org/2/library/stdtypes.html#str.format>`_.\nThanks `Doug Turnbull <https://github.com/softwaredoug>`_ for his valuable\ncomments and contribution to the design of the type-safe API and\n`Gregory Czajkowski <https://github.com/gcflymoto>`_ for implementing binary\nformatting. Thanks `Ruslan Baratov <https://github.com/ruslo>`_ for comprehensive\n`comparison of integer formatting algorithms <https://github.com/ruslo/int-dec-format-tests>`_\nand useful comments regarding performance, `Boris Kaul <https://github.com/localvoid>`_ for\n`C++ counting digits benchmark <https://github.com/localvoid/cxx-benchmark-count-digits>`_.\nThanks to `CarterLi <https://github.com/CarterLi>`_ for contributing various\nimprovements to the code.\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/chrono.h",
    "content": "// Formatting library for C++ - chrono support\n//\n// Copyright (c) 2012 - present, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_CHRONO_H_\n#define FMT_CHRONO_H_\n\n#include \"format.h\"\n#include \"locale.h\"\n\n#include <chrono>\n#include <ctime>\n#include <locale>\n#include <sstream>\n\n// enable safe chrono durations, unless explicitly disabled\n#ifndef FMT_SAFE_DURATION_CAST\n#  define FMT_SAFE_DURATION_CAST 1\n#endif\n\n#if FMT_SAFE_DURATION_CAST\n#  include \"safe-duration-cast.h\"\n#endif\n\nFMT_BEGIN_NAMESPACE\n\n// Prevents expansion of a preceding token as a function-style macro.\n// Usage: f FMT_NOMACRO()\n#define FMT_NOMACRO\n\nnamespace internal {\ninline null<> localtime_r FMT_NOMACRO(...) { return null<>(); }\ninline null<> localtime_s(...) { return null<>(); }\ninline null<> gmtime_r(...) { return null<>(); }\ninline null<> gmtime_s(...) { return null<>(); }\n}  // namespace internal\n\n// Thread-safe replacement for std::localtime\ninline std::tm localtime(std::time_t time) {\n  struct dispatcher {\n    std::time_t time_;\n    std::tm tm_;\n\n    dispatcher(std::time_t t) : time_(t) {}\n\n    bool run() {\n      using namespace fmt::internal;\n      return handle(localtime_r(&time_, &tm_));\n    }\n\n    bool handle(std::tm* tm) { return tm != nullptr; }\n\n    bool handle(internal::null<>) {\n      using namespace fmt::internal;\n      return fallback(localtime_s(&tm_, &time_));\n    }\n\n    bool fallback(int res) { return res == 0; }\n\n#if !FMT_MSC_VER\n    bool fallback(internal::null<>) {\n      using namespace fmt::internal;\n      std::tm* tm = std::localtime(&time_);\n      if (tm) tm_ = *tm;\n      return tm != nullptr;\n    }\n#endif\n  };\n  dispatcher lt(time);\n  // Too big time values may be unsupported.\n  if (!lt.run()) FMT_THROW(format_error(\"time_t value out of range\"));\n  return lt.tm_;\n}\n\n// Thread-safe replacement for std::gmtime\ninline std::tm gmtime(std::time_t time) {\n  struct dispatcher {\n    std::time_t time_;\n    std::tm tm_;\n\n    dispatcher(std::time_t t) : time_(t) {}\n\n    bool run() {\n      using namespace fmt::internal;\n      return handle(gmtime_r(&time_, &tm_));\n    }\n\n    bool handle(std::tm* tm) { return tm != nullptr; }\n\n    bool handle(internal::null<>) {\n      using namespace fmt::internal;\n      return fallback(gmtime_s(&tm_, &time_));\n    }\n\n    bool fallback(int res) { return res == 0; }\n\n#if !FMT_MSC_VER\n    bool fallback(internal::null<>) {\n      std::tm* tm = std::gmtime(&time_);\n      if (tm) tm_ = *tm;\n      return tm != nullptr;\n    }\n#endif\n  };\n  dispatcher gt(time);\n  // Too big time values may be unsupported.\n  if (!gt.run()) FMT_THROW(format_error(\"time_t value out of range\"));\n  return gt.tm_;\n}\n\nnamespace internal {\ninline std::size_t strftime(char* str, std::size_t count, const char* format,\n                            const std::tm* time) {\n  return std::strftime(str, count, format, time);\n}\n\ninline std::size_t strftime(wchar_t* str, std::size_t count,\n                            const wchar_t* format, const std::tm* time) {\n  return std::wcsftime(str, count, format, time);\n}\n}  // namespace internal\n\ntemplate <typename Char> struct formatter<std::tm, Char> {\n  template <typename ParseContext>\n  auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    auto it = ctx.begin();\n    if (it != ctx.end() && *it == ':') ++it;\n    auto end = it;\n    while (end != ctx.end() && *end != '}') ++end;\n    tm_format.reserve(internal::to_unsigned(end - it + 1));\n    tm_format.append(it, end);\n    tm_format.push_back('\\0');\n    return end;\n  }\n\n  template <typename FormatContext>\n  auto format(const std::tm& tm, FormatContext& ctx) -> decltype(ctx.out()) {\n    basic_memory_buffer<Char> buf;\n    std::size_t start = buf.size();\n    for (;;) {\n      std::size_t size = buf.capacity() - start;\n      std::size_t count =\n          internal::strftime(&buf[start], size, &tm_format[0], &tm);\n      if (count != 0) {\n        buf.resize(start + count);\n        break;\n      }\n      if (size >= tm_format.size() * 256) {\n        // If the buffer is 256 times larger than the format string, assume\n        // that `strftime` gives an empty result. There doesn't seem to be a\n        // better way to distinguish the two cases:\n        // https://github.com/fmtlib/fmt/issues/367\n        break;\n      }\n      const std::size_t MIN_GROWTH = 10;\n      buf.reserve(buf.capacity() + (size > MIN_GROWTH ? size : MIN_GROWTH));\n    }\n    return std::copy(buf.begin(), buf.end(), ctx.out());\n  }\n\n  basic_memory_buffer<Char> tm_format;\n};\n\nnamespace internal {\ntemplate <typename Period> FMT_CONSTEXPR const char* get_units() {\n  return nullptr;\n}\ntemplate <> FMT_CONSTEXPR const char* get_units<std::atto>() { return \"as\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::femto>() { return \"fs\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::pico>() { return \"ps\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::nano>() { return \"ns\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::micro>() { return \"µs\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::milli>() { return \"ms\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::centi>() { return \"cs\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::deci>() { return \"ds\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::ratio<1>>() { return \"s\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::deca>() { return \"das\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::hecto>() { return \"hs\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::kilo>() { return \"ks\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::mega>() { return \"Ms\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::giga>() { return \"Gs\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::tera>() { return \"Ts\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::peta>() { return \"Ps\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::exa>() { return \"Es\"; }\ntemplate <> FMT_CONSTEXPR const char* get_units<std::ratio<60>>() {\n  return \"m\";\n}\ntemplate <> FMT_CONSTEXPR const char* get_units<std::ratio<3600>>() {\n  return \"h\";\n}\n\nenum class numeric_system {\n  standard,\n  // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale.\n  alternative\n};\n\n// Parses a put_time-like format string and invokes handler actions.\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR const Char* parse_chrono_format(const Char* begin,\n                                              const Char* end,\n                                              Handler&& handler) {\n  auto ptr = begin;\n  while (ptr != end) {\n    auto c = *ptr;\n    if (c == '}') break;\n    if (c != '%') {\n      ++ptr;\n      continue;\n    }\n    if (begin != ptr) handler.on_text(begin, ptr);\n    ++ptr;  // consume '%'\n    if (ptr == end) FMT_THROW(format_error(\"invalid format\"));\n    c = *ptr++;\n    switch (c) {\n    case '%':\n      handler.on_text(ptr - 1, ptr);\n      break;\n    case 'n': {\n      const char newline[] = \"\\n\";\n      handler.on_text(newline, newline + 1);\n      break;\n    }\n    case 't': {\n      const char tab[] = \"\\t\";\n      handler.on_text(tab, tab + 1);\n      break;\n    }\n    // Day of the week:\n    case 'a':\n      handler.on_abbr_weekday();\n      break;\n    case 'A':\n      handler.on_full_weekday();\n      break;\n    case 'w':\n      handler.on_dec0_weekday(numeric_system::standard);\n      break;\n    case 'u':\n      handler.on_dec1_weekday(numeric_system::standard);\n      break;\n    // Month:\n    case 'b':\n      handler.on_abbr_month();\n      break;\n    case 'B':\n      handler.on_full_month();\n      break;\n    // Hour, minute, second:\n    case 'H':\n      handler.on_24_hour(numeric_system::standard);\n      break;\n    case 'I':\n      handler.on_12_hour(numeric_system::standard);\n      break;\n    case 'M':\n      handler.on_minute(numeric_system::standard);\n      break;\n    case 'S':\n      handler.on_second(numeric_system::standard);\n      break;\n    // Other:\n    case 'c':\n      handler.on_datetime(numeric_system::standard);\n      break;\n    case 'x':\n      handler.on_loc_date(numeric_system::standard);\n      break;\n    case 'X':\n      handler.on_loc_time(numeric_system::standard);\n      break;\n    case 'D':\n      handler.on_us_date();\n      break;\n    case 'F':\n      handler.on_iso_date();\n      break;\n    case 'r':\n      handler.on_12_hour_time();\n      break;\n    case 'R':\n      handler.on_24_hour_time();\n      break;\n    case 'T':\n      handler.on_iso_time();\n      break;\n    case 'p':\n      handler.on_am_pm();\n      break;\n    case 'Q':\n      handler.on_duration_value();\n      break;\n    case 'q':\n      handler.on_duration_unit();\n      break;\n    case 'z':\n      handler.on_utc_offset();\n      break;\n    case 'Z':\n      handler.on_tz_name();\n      break;\n    // Alternative representation:\n    case 'E': {\n      if (ptr == end) FMT_THROW(format_error(\"invalid format\"));\n      c = *ptr++;\n      switch (c) {\n      case 'c':\n        handler.on_datetime(numeric_system::alternative);\n        break;\n      case 'x':\n        handler.on_loc_date(numeric_system::alternative);\n        break;\n      case 'X':\n        handler.on_loc_time(numeric_system::alternative);\n        break;\n      default:\n        FMT_THROW(format_error(\"invalid format\"));\n      }\n      break;\n    }\n    case 'O':\n      if (ptr == end) FMT_THROW(format_error(\"invalid format\"));\n      c = *ptr++;\n      switch (c) {\n      case 'w':\n        handler.on_dec0_weekday(numeric_system::alternative);\n        break;\n      case 'u':\n        handler.on_dec1_weekday(numeric_system::alternative);\n        break;\n      case 'H':\n        handler.on_24_hour(numeric_system::alternative);\n        break;\n      case 'I':\n        handler.on_12_hour(numeric_system::alternative);\n        break;\n      case 'M':\n        handler.on_minute(numeric_system::alternative);\n        break;\n      case 'S':\n        handler.on_second(numeric_system::alternative);\n        break;\n      default:\n        FMT_THROW(format_error(\"invalid format\"));\n      }\n      break;\n    default:\n      FMT_THROW(format_error(\"invalid format\"));\n    }\n    begin = ptr;\n  }\n  if (begin != ptr) handler.on_text(begin, ptr);\n  return ptr;\n}\n\nstruct chrono_format_checker {\n  FMT_NORETURN void report_no_date() { FMT_THROW(format_error(\"no date\")); }\n\n  template <typename Char> void on_text(const Char*, const Char*) {}\n  FMT_NORETURN void on_abbr_weekday() { report_no_date(); }\n  FMT_NORETURN void on_full_weekday() { report_no_date(); }\n  FMT_NORETURN void on_dec0_weekday(numeric_system) { report_no_date(); }\n  FMT_NORETURN void on_dec1_weekday(numeric_system) { report_no_date(); }\n  FMT_NORETURN void on_abbr_month() { report_no_date(); }\n  FMT_NORETURN void on_full_month() { report_no_date(); }\n  void on_24_hour(numeric_system) {}\n  void on_12_hour(numeric_system) {}\n  void on_minute(numeric_system) {}\n  void on_second(numeric_system) {}\n  FMT_NORETURN void on_datetime(numeric_system) { report_no_date(); }\n  FMT_NORETURN void on_loc_date(numeric_system) { report_no_date(); }\n  FMT_NORETURN void on_loc_time(numeric_system) { report_no_date(); }\n  FMT_NORETURN void on_us_date() { report_no_date(); }\n  FMT_NORETURN void on_iso_date() { report_no_date(); }\n  void on_12_hour_time() {}\n  void on_24_hour_time() {}\n  void on_iso_time() {}\n  void on_am_pm() {}\n  void on_duration_value() {}\n  void on_duration_unit() {}\n  FMT_NORETURN void on_utc_offset() { report_no_date(); }\n  FMT_NORETURN void on_tz_name() { report_no_date(); }\n};\n\ntemplate <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\ninline bool isnan(T) {\n  return false;\n}\ntemplate <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>\ninline bool isnan(T value) {\n  return std::isnan(value);\n}\n\ntemplate <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\ninline bool isfinite(T) {\n  return true;\n}\ntemplate <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>\ninline bool isfinite(T value) {\n  return std::isfinite(value);\n}\n\n// Convers value to int and checks that it's in the range [0, upper).\ntemplate <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\ninline int to_nonnegative_int(T value, int upper) {\n  FMT_ASSERT(value >= 0 && value <= upper, \"invalid value\");\n  (void)upper;\n  return static_cast<int>(value);\n}\ntemplate <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>\ninline int to_nonnegative_int(T value, int upper) {\n  FMT_ASSERT(\n      std::isnan(value) || (value >= 0 && value <= static_cast<T>(upper)),\n      \"invalid value\");\n  (void)upper;\n  return static_cast<int>(value);\n}\n\ntemplate <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\ninline T mod(T x, int y) {\n  return x % y;\n}\ntemplate <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>\ninline T mod(T x, int y) {\n  return std::fmod(x, static_cast<T>(y));\n}\n\n// If T is an integral type, maps T to its unsigned counterpart, otherwise\n// leaves it unchanged (unlike std::make_unsigned).\ntemplate <typename T, bool INTEGRAL = std::is_integral<T>::value>\nstruct make_unsigned_or_unchanged {\n  using type = T;\n};\n\ntemplate <typename T> struct make_unsigned_or_unchanged<T, true> {\n  using type = typename std::make_unsigned<T>::type;\n};\n\n#if FMT_SAFE_DURATION_CAST\n// throwing version of safe_duration_cast\ntemplate <typename To, typename FromRep, typename FromPeriod>\nTo fmt_safe_duration_cast(std::chrono::duration<FromRep, FromPeriod> from) {\n  int ec;\n  To to = safe_duration_cast::safe_duration_cast<To>(from, ec);\n  if (ec) FMT_THROW(format_error(\"cannot format duration\"));\n  return to;\n}\n#endif\n\ntemplate <typename Rep, typename Period,\n          FMT_ENABLE_IF(std::is_integral<Rep>::value)>\ninline std::chrono::duration<Rep, std::milli> get_milliseconds(\n    std::chrono::duration<Rep, Period> d) {\n  // this may overflow and/or the result may not fit in the\n  // target type.\n#if FMT_SAFE_DURATION_CAST\n  using CommonSecondsType =\n      typename std::common_type<decltype(d), std::chrono::seconds>::type;\n  const auto d_as_common = fmt_safe_duration_cast<CommonSecondsType>(d);\n  const auto d_as_whole_seconds =\n      fmt_safe_duration_cast<std::chrono::seconds>(d_as_common);\n  // this conversion should be nonproblematic\n  const auto diff = d_as_common - d_as_whole_seconds;\n  const auto ms =\n      fmt_safe_duration_cast<std::chrono::duration<Rep, std::milli>>(diff);\n  return ms;\n#else\n  auto s = std::chrono::duration_cast<std::chrono::seconds>(d);\n  return std::chrono::duration_cast<std::chrono::milliseconds>(d - s);\n#endif\n}\n\ntemplate <typename Rep, typename Period,\n          FMT_ENABLE_IF(std::is_floating_point<Rep>::value)>\ninline std::chrono::duration<Rep, std::milli> get_milliseconds(\n    std::chrono::duration<Rep, Period> d) {\n  using common_type = typename std::common_type<Rep, std::intmax_t>::type;\n  auto ms = mod(d.count() * static_cast<common_type>(Period::num) /\n                    static_cast<common_type>(Period::den) * 1000,\n                1000);\n  return std::chrono::duration<Rep, std::milli>(static_cast<Rep>(ms));\n}\n\ntemplate <typename Rep, typename OutputIt>\nOutputIt format_chrono_duration_value(OutputIt out, Rep val, int precision) {\n  if (precision >= 0) return format_to(out, \"{:.{}f}\", val, precision);\n  return format_to(out, std::is_floating_point<Rep>::value ? \"{:g}\" : \"{}\",\n                   val);\n}\n\ntemplate <typename Period, typename OutputIt>\nstatic OutputIt format_chrono_duration_unit(OutputIt out) {\n  if (const char* unit = get_units<Period>()) return format_to(out, \"{}\", unit);\n  if (Period::den == 1) return format_to(out, \"[{}]s\", Period::num);\n  return format_to(out, \"[{}/{}]s\", Period::num, Period::den);\n}\n\ntemplate <typename FormatContext, typename OutputIt, typename Rep,\n          typename Period>\nstruct chrono_formatter {\n  FormatContext& context;\n  OutputIt out;\n  int precision;\n  // rep is unsigned to avoid overflow.\n  using rep =\n      conditional_t<std::is_integral<Rep>::value && sizeof(Rep) < sizeof(int),\n                    unsigned, typename make_unsigned_or_unchanged<Rep>::type>;\n  rep val;\n  using seconds = std::chrono::duration<rep>;\n  seconds s;\n  using milliseconds = std::chrono::duration<rep, std::milli>;\n  bool negative;\n\n  using char_type = typename FormatContext::char_type;\n\n  explicit chrono_formatter(FormatContext& ctx, OutputIt o,\n                            std::chrono::duration<Rep, Period> d)\n      : context(ctx), out(o), val(d.count()), negative(false) {\n    if (d.count() < 0) {\n      val = -val;\n      negative = true;\n    }\n\n    // this may overflow and/or the result may not fit in the\n    // target type.\n#if FMT_SAFE_DURATION_CAST\n    // might need checked conversion (rep!=Rep)\n    auto tmpval = std::chrono::duration<rep, Period>(val);\n    s = fmt_safe_duration_cast<seconds>(tmpval);\n#else\n    s = std::chrono::duration_cast<seconds>(\n        std::chrono::duration<rep, Period>(val));\n#endif\n  }\n\n  // returns true if nan or inf, writes to out.\n  bool handle_nan_inf() {\n    if (isfinite(val)) {\n      return false;\n    }\n    if (isnan(val)) {\n      write_nan();\n      return true;\n    }\n    // must be +-inf\n    if (val > 0) {\n      write_pinf();\n    } else {\n      write_ninf();\n    }\n    return true;\n  }\n\n  Rep hour() const { return static_cast<Rep>(mod((s.count() / 3600), 24)); }\n\n  Rep hour12() const {\n    Rep hour = static_cast<Rep>(mod((s.count() / 3600), 12));\n    return hour <= 0 ? 12 : hour;\n  }\n\n  Rep minute() const { return static_cast<Rep>(mod((s.count() / 60), 60)); }\n  Rep second() const { return static_cast<Rep>(mod(s.count(), 60)); }\n\n  std::tm time() const {\n    auto time = std::tm();\n    time.tm_hour = to_nonnegative_int(hour(), 24);\n    time.tm_min = to_nonnegative_int(minute(), 60);\n    time.tm_sec = to_nonnegative_int(second(), 60);\n    return time;\n  }\n\n  void write_sign() {\n    if (negative) {\n      *out++ = '-';\n      negative = false;\n    }\n  }\n\n  void write(Rep value, int width) {\n    write_sign();\n    if (isnan(value)) return write_nan();\n    uint32_or_64_t<int> n = to_unsigned(\n        to_nonnegative_int(value, (std::numeric_limits<int>::max)()));\n    int num_digits = internal::count_digits(n);\n    if (width > num_digits) out = std::fill_n(out, width - num_digits, '0');\n    out = format_decimal<char_type>(out, n, num_digits);\n  }\n\n  void write_nan() { std::copy_n(\"nan\", 3, out); }\n  void write_pinf() { std::copy_n(\"inf\", 3, out); }\n  void write_ninf() { std::copy_n(\"-inf\", 4, out); }\n\n  void format_localized(const tm& time, const char* format) {\n    if (isnan(val)) return write_nan();\n    auto locale = context.locale().template get<std::locale>();\n    auto& facet = std::use_facet<std::time_put<char_type>>(locale);\n    std::basic_ostringstream<char_type> os;\n    os.imbue(locale);\n    facet.put(os, os, ' ', &time, format, format + std::strlen(format));\n    auto str = os.str();\n    std::copy(str.begin(), str.end(), out);\n  }\n\n  void on_text(const char_type* begin, const char_type* end) {\n    std::copy(begin, end, out);\n  }\n\n  // These are not implemented because durations don't have date information.\n  void on_abbr_weekday() {}\n  void on_full_weekday() {}\n  void on_dec0_weekday(numeric_system) {}\n  void on_dec1_weekday(numeric_system) {}\n  void on_abbr_month() {}\n  void on_full_month() {}\n  void on_datetime(numeric_system) {}\n  void on_loc_date(numeric_system) {}\n  void on_loc_time(numeric_system) {}\n  void on_us_date() {}\n  void on_iso_date() {}\n  void on_utc_offset() {}\n  void on_tz_name() {}\n\n  void on_24_hour(numeric_system ns) {\n    if (handle_nan_inf()) return;\n\n    if (ns == numeric_system::standard) return write(hour(), 2);\n    auto time = tm();\n    time.tm_hour = to_nonnegative_int(hour(), 24);\n    format_localized(time, \"%OH\");\n  }\n\n  void on_12_hour(numeric_system ns) {\n    if (handle_nan_inf()) return;\n\n    if (ns == numeric_system::standard) return write(hour12(), 2);\n    auto time = tm();\n    time.tm_hour = to_nonnegative_int(hour12(), 12);\n    format_localized(time, \"%OI\");\n  }\n\n  void on_minute(numeric_system ns) {\n    if (handle_nan_inf()) return;\n\n    if (ns == numeric_system::standard) return write(minute(), 2);\n    auto time = tm();\n    time.tm_min = to_nonnegative_int(minute(), 60);\n    format_localized(time, \"%OM\");\n  }\n\n  void on_second(numeric_system ns) {\n    if (handle_nan_inf()) return;\n\n    if (ns == numeric_system::standard) {\n      write(second(), 2);\n#if FMT_SAFE_DURATION_CAST\n      // convert rep->Rep\n      using duration_rep = std::chrono::duration<rep, Period>;\n      using duration_Rep = std::chrono::duration<Rep, Period>;\n      auto tmpval = fmt_safe_duration_cast<duration_Rep>(duration_rep{val});\n#else\n      auto tmpval = std::chrono::duration<Rep, Period>(val);\n#endif\n      auto ms = get_milliseconds(tmpval);\n      if (ms != std::chrono::milliseconds(0)) {\n        *out++ = '.';\n        write(ms.count(), 3);\n      }\n      return;\n    }\n    auto time = tm();\n    time.tm_sec = to_nonnegative_int(second(), 60);\n    format_localized(time, \"%OS\");\n  }\n\n  void on_12_hour_time() {\n    if (handle_nan_inf()) return;\n\n    format_localized(time(), \"%r\");\n  }\n\n  void on_24_hour_time() {\n    if (handle_nan_inf()) {\n      *out++ = ':';\n      handle_nan_inf();\n      return;\n    }\n\n    write(hour(), 2);\n    *out++ = ':';\n    write(minute(), 2);\n  }\n\n  void on_iso_time() {\n    on_24_hour_time();\n    *out++ = ':';\n    if (handle_nan_inf()) return;\n    write(second(), 2);\n  }\n\n  void on_am_pm() {\n    if (handle_nan_inf()) return;\n    format_localized(time(), \"%p\");\n  }\n\n  void on_duration_value() {\n    if (handle_nan_inf()) return;\n    write_sign();\n    out = format_chrono_duration_value(out, val, precision);\n  }\n\n  void on_duration_unit() { out = format_chrono_duration_unit<Period>(out); }\n};\n}  // namespace internal\n\ntemplate <typename Rep, typename Period, typename Char>\nstruct formatter<std::chrono::duration<Rep, Period>, Char> {\n private:\n  basic_format_specs<Char> specs;\n  int precision;\n  using arg_ref_type = internal::arg_ref<Char>;\n  arg_ref_type width_ref;\n  arg_ref_type precision_ref;\n  mutable basic_string_view<Char> format_str;\n  using duration = std::chrono::duration<Rep, Period>;\n\n  struct spec_handler {\n    formatter& f;\n    basic_parse_context<Char>& context;\n    basic_string_view<Char> format_str;\n\n    template <typename Id> FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) {\n      context.check_arg_id(arg_id);\n      return arg_ref_type(arg_id);\n    }\n\n    FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view<Char> arg_id) {\n      context.check_arg_id(arg_id);\n      const auto str_val = internal::string_view_metadata(format_str, arg_id);\n      return arg_ref_type(str_val);\n    }\n\n    FMT_CONSTEXPR arg_ref_type make_arg_ref(internal::auto_id) {\n      return arg_ref_type(context.next_arg_id());\n    }\n\n    void on_error(const char* msg) { FMT_THROW(format_error(msg)); }\n    void on_fill(Char fill) { f.specs.fill[0] = fill; }\n    void on_align(align_t align) { f.specs.align = align; }\n    void on_width(unsigned width) { f.specs.width = width; }\n    void on_precision(unsigned precision) { f.precision = precision; }\n    void end_precision() {}\n\n    template <typename Id> void on_dynamic_width(Id arg_id) {\n      f.width_ref = make_arg_ref(arg_id);\n    }\n\n    template <typename Id> void on_dynamic_precision(Id arg_id) {\n      f.precision_ref = make_arg_ref(arg_id);\n    }\n  };\n\n  using iterator = typename basic_parse_context<Char>::iterator;\n  struct parse_range {\n    iterator begin;\n    iterator end;\n  };\n\n  FMT_CONSTEXPR parse_range do_parse(basic_parse_context<Char>& ctx) {\n    auto begin = ctx.begin(), end = ctx.end();\n    if (begin == end || *begin == '}') return {begin, begin};\n    spec_handler handler{*this, ctx, format_str};\n    begin = internal::parse_align(begin, end, handler);\n    if (begin == end) return {begin, begin};\n    begin = internal::parse_width(begin, end, handler);\n    if (begin == end) return {begin, begin};\n    if (*begin == '.') {\n      if (std::is_floating_point<Rep>::value)\n        begin = internal::parse_precision(begin, end, handler);\n      else\n        handler.on_error(\"precision not allowed for this argument type\");\n    }\n    end = parse_chrono_format(begin, end, internal::chrono_format_checker());\n    return {begin, end};\n  }\n\n public:\n  formatter() : precision(-1) {}\n\n  FMT_CONSTEXPR auto parse(basic_parse_context<Char>& ctx)\n      -> decltype(ctx.begin()) {\n    auto range = do_parse(ctx);\n    format_str = basic_string_view<Char>(\n        &*range.begin, internal::to_unsigned(range.end - range.begin));\n    return range.end;\n  }\n\n  template <typename FormatContext>\n  auto format(const duration& d, FormatContext& ctx) -> decltype(ctx.out()) {\n    auto begin = format_str.begin(), end = format_str.end();\n    // As a possible future optimization, we could avoid extra copying if width\n    // is not specified.\n    basic_memory_buffer<Char> buf;\n    auto out = std::back_inserter(buf);\n    using range = internal::output_range<decltype(ctx.out()), Char>;\n    internal::basic_writer<range> w(range(ctx.out()));\n    internal::handle_dynamic_spec<internal::width_checker>(\n        specs.width, width_ref, ctx, format_str.begin());\n    internal::handle_dynamic_spec<internal::precision_checker>(\n        precision, precision_ref, ctx, format_str.begin());\n    if (begin == end || *begin == '}') {\n      out = internal::format_chrono_duration_value(out, d.count(), precision);\n      internal::format_chrono_duration_unit<Period>(out);\n    } else {\n      internal::chrono_formatter<FormatContext, decltype(out), Rep, Period> f(\n          ctx, out, d);\n      f.precision = precision;\n      parse_chrono_format(begin, end, f);\n    }\n    w.write(buf.data(), buf.size(), specs);\n    return w.out();\n  }\n};\n\nFMT_END_NAMESPACE\n\n#endif  // FMT_CHRONO_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/color.h",
    "content": "// Formatting library for C++ - color support\n//\n// Copyright (c) 2018 - present, Victor Zverovich and fmt contributors\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_COLOR_H_\n#define FMT_COLOR_H_\n\n#include \"format.h\"\n\nFMT_BEGIN_NAMESPACE\n\nenum class color : uint32_t {\n  alice_blue = 0xF0F8FF,               // rgb(240,248,255)\n  antique_white = 0xFAEBD7,            // rgb(250,235,215)\n  aqua = 0x00FFFF,                     // rgb(0,255,255)\n  aquamarine = 0x7FFFD4,               // rgb(127,255,212)\n  azure = 0xF0FFFF,                    // rgb(240,255,255)\n  beige = 0xF5F5DC,                    // rgb(245,245,220)\n  bisque = 0xFFE4C4,                   // rgb(255,228,196)\n  black = 0x000000,                    // rgb(0,0,0)\n  blanched_almond = 0xFFEBCD,          // rgb(255,235,205)\n  blue = 0x0000FF,                     // rgb(0,0,255)\n  blue_violet = 0x8A2BE2,              // rgb(138,43,226)\n  brown = 0xA52A2A,                    // rgb(165,42,42)\n  burly_wood = 0xDEB887,               // rgb(222,184,135)\n  cadet_blue = 0x5F9EA0,               // rgb(95,158,160)\n  chartreuse = 0x7FFF00,               // rgb(127,255,0)\n  chocolate = 0xD2691E,                // rgb(210,105,30)\n  coral = 0xFF7F50,                    // rgb(255,127,80)\n  cornflower_blue = 0x6495ED,          // rgb(100,149,237)\n  cornsilk = 0xFFF8DC,                 // rgb(255,248,220)\n  crimson = 0xDC143C,                  // rgb(220,20,60)\n  cyan = 0x00FFFF,                     // rgb(0,255,255)\n  dark_blue = 0x00008B,                // rgb(0,0,139)\n  dark_cyan = 0x008B8B,                // rgb(0,139,139)\n  dark_golden_rod = 0xB8860B,          // rgb(184,134,11)\n  dark_gray = 0xA9A9A9,                // rgb(169,169,169)\n  dark_green = 0x006400,               // rgb(0,100,0)\n  dark_khaki = 0xBDB76B,               // rgb(189,183,107)\n  dark_magenta = 0x8B008B,             // rgb(139,0,139)\n  dark_olive_green = 0x556B2F,         // rgb(85,107,47)\n  dark_orange = 0xFF8C00,              // rgb(255,140,0)\n  dark_orchid = 0x9932CC,              // rgb(153,50,204)\n  dark_red = 0x8B0000,                 // rgb(139,0,0)\n  dark_salmon = 0xE9967A,              // rgb(233,150,122)\n  dark_sea_green = 0x8FBC8F,           // rgb(143,188,143)\n  dark_slate_blue = 0x483D8B,          // rgb(72,61,139)\n  dark_slate_gray = 0x2F4F4F,          // rgb(47,79,79)\n  dark_turquoise = 0x00CED1,           // rgb(0,206,209)\n  dark_violet = 0x9400D3,              // rgb(148,0,211)\n  deep_pink = 0xFF1493,                // rgb(255,20,147)\n  deep_sky_blue = 0x00BFFF,            // rgb(0,191,255)\n  dim_gray = 0x696969,                 // rgb(105,105,105)\n  dodger_blue = 0x1E90FF,              // rgb(30,144,255)\n  fire_brick = 0xB22222,               // rgb(178,34,34)\n  floral_white = 0xFFFAF0,             // rgb(255,250,240)\n  forest_green = 0x228B22,             // rgb(34,139,34)\n  fuchsia = 0xFF00FF,                  // rgb(255,0,255)\n  gainsboro = 0xDCDCDC,                // rgb(220,220,220)\n  ghost_white = 0xF8F8FF,              // rgb(248,248,255)\n  gold = 0xFFD700,                     // rgb(255,215,0)\n  golden_rod = 0xDAA520,               // rgb(218,165,32)\n  gray = 0x808080,                     // rgb(128,128,128)\n  green = 0x008000,                    // rgb(0,128,0)\n  green_yellow = 0xADFF2F,             // rgb(173,255,47)\n  honey_dew = 0xF0FFF0,                // rgb(240,255,240)\n  hot_pink = 0xFF69B4,                 // rgb(255,105,180)\n  indian_red = 0xCD5C5C,               // rgb(205,92,92)\n  indigo = 0x4B0082,                   // rgb(75,0,130)\n  ivory = 0xFFFFF0,                    // rgb(255,255,240)\n  khaki = 0xF0E68C,                    // rgb(240,230,140)\n  lavender = 0xE6E6FA,                 // rgb(230,230,250)\n  lavender_blush = 0xFFF0F5,           // rgb(255,240,245)\n  lawn_green = 0x7CFC00,               // rgb(124,252,0)\n  lemon_chiffon = 0xFFFACD,            // rgb(255,250,205)\n  light_blue = 0xADD8E6,               // rgb(173,216,230)\n  light_coral = 0xF08080,              // rgb(240,128,128)\n  light_cyan = 0xE0FFFF,               // rgb(224,255,255)\n  light_golden_rod_yellow = 0xFAFAD2,  // rgb(250,250,210)\n  light_gray = 0xD3D3D3,               // rgb(211,211,211)\n  light_green = 0x90EE90,              // rgb(144,238,144)\n  light_pink = 0xFFB6C1,               // rgb(255,182,193)\n  light_salmon = 0xFFA07A,             // rgb(255,160,122)\n  light_sea_green = 0x20B2AA,          // rgb(32,178,170)\n  light_sky_blue = 0x87CEFA,           // rgb(135,206,250)\n  light_slate_gray = 0x778899,         // rgb(119,136,153)\n  light_steel_blue = 0xB0C4DE,         // rgb(176,196,222)\n  light_yellow = 0xFFFFE0,             // rgb(255,255,224)\n  lime = 0x00FF00,                     // rgb(0,255,0)\n  lime_green = 0x32CD32,               // rgb(50,205,50)\n  linen = 0xFAF0E6,                    // rgb(250,240,230)\n  magenta = 0xFF00FF,                  // rgb(255,0,255)\n  maroon = 0x800000,                   // rgb(128,0,0)\n  medium_aquamarine = 0x66CDAA,        // rgb(102,205,170)\n  medium_blue = 0x0000CD,              // rgb(0,0,205)\n  medium_orchid = 0xBA55D3,            // rgb(186,85,211)\n  medium_purple = 0x9370DB,            // rgb(147,112,219)\n  medium_sea_green = 0x3CB371,         // rgb(60,179,113)\n  medium_slate_blue = 0x7B68EE,        // rgb(123,104,238)\n  medium_spring_green = 0x00FA9A,      // rgb(0,250,154)\n  medium_turquoise = 0x48D1CC,         // rgb(72,209,204)\n  medium_violet_red = 0xC71585,        // rgb(199,21,133)\n  midnight_blue = 0x191970,            // rgb(25,25,112)\n  mint_cream = 0xF5FFFA,               // rgb(245,255,250)\n  misty_rose = 0xFFE4E1,               // rgb(255,228,225)\n  moccasin = 0xFFE4B5,                 // rgb(255,228,181)\n  navajo_white = 0xFFDEAD,             // rgb(255,222,173)\n  navy = 0x000080,                     // rgb(0,0,128)\n  old_lace = 0xFDF5E6,                 // rgb(253,245,230)\n  olive = 0x808000,                    // rgb(128,128,0)\n  olive_drab = 0x6B8E23,               // rgb(107,142,35)\n  orange = 0xFFA500,                   // rgb(255,165,0)\n  orange_red = 0xFF4500,               // rgb(255,69,0)\n  orchid = 0xDA70D6,                   // rgb(218,112,214)\n  pale_golden_rod = 0xEEE8AA,          // rgb(238,232,170)\n  pale_green = 0x98FB98,               // rgb(152,251,152)\n  pale_turquoise = 0xAFEEEE,           // rgb(175,238,238)\n  pale_violet_red = 0xDB7093,          // rgb(219,112,147)\n  papaya_whip = 0xFFEFD5,              // rgb(255,239,213)\n  peach_puff = 0xFFDAB9,               // rgb(255,218,185)\n  peru = 0xCD853F,                     // rgb(205,133,63)\n  pink = 0xFFC0CB,                     // rgb(255,192,203)\n  plum = 0xDDA0DD,                     // rgb(221,160,221)\n  powder_blue = 0xB0E0E6,              // rgb(176,224,230)\n  purple = 0x800080,                   // rgb(128,0,128)\n  rebecca_purple = 0x663399,           // rgb(102,51,153)\n  red = 0xFF0000,                      // rgb(255,0,0)\n  rosy_brown = 0xBC8F8F,               // rgb(188,143,143)\n  royal_blue = 0x4169E1,               // rgb(65,105,225)\n  saddle_brown = 0x8B4513,             // rgb(139,69,19)\n  salmon = 0xFA8072,                   // rgb(250,128,114)\n  sandy_brown = 0xF4A460,              // rgb(244,164,96)\n  sea_green = 0x2E8B57,                // rgb(46,139,87)\n  sea_shell = 0xFFF5EE,                // rgb(255,245,238)\n  sienna = 0xA0522D,                   // rgb(160,82,45)\n  silver = 0xC0C0C0,                   // rgb(192,192,192)\n  sky_blue = 0x87CEEB,                 // rgb(135,206,235)\n  slate_blue = 0x6A5ACD,               // rgb(106,90,205)\n  slate_gray = 0x708090,               // rgb(112,128,144)\n  snow = 0xFFFAFA,                     // rgb(255,250,250)\n  spring_green = 0x00FF7F,             // rgb(0,255,127)\n  steel_blue = 0x4682B4,               // rgb(70,130,180)\n  tan = 0xD2B48C,                      // rgb(210,180,140)\n  teal = 0x008080,                     // rgb(0,128,128)\n  thistle = 0xD8BFD8,                  // rgb(216,191,216)\n  tomato = 0xFF6347,                   // rgb(255,99,71)\n  turquoise = 0x40E0D0,                // rgb(64,224,208)\n  violet = 0xEE82EE,                   // rgb(238,130,238)\n  wheat = 0xF5DEB3,                    // rgb(245,222,179)\n  white = 0xFFFFFF,                    // rgb(255,255,255)\n  white_smoke = 0xF5F5F5,              // rgb(245,245,245)\n  yellow = 0xFFFF00,                   // rgb(255,255,0)\n  yellow_green = 0x9ACD32              // rgb(154,205,50)\n};                                     // enum class color\n\nenum class terminal_color : uint8_t {\n  black = 30,\n  red,\n  green,\n  yellow,\n  blue,\n  magenta,\n  cyan,\n  white,\n  bright_black = 90,\n  bright_red,\n  bright_green,\n  bright_yellow,\n  bright_blue,\n  bright_magenta,\n  bright_cyan,\n  bright_white\n};\n\nenum class emphasis : uint8_t {\n  bold = 1,\n  italic = 1 << 1,\n  underline = 1 << 2,\n  strikethrough = 1 << 3\n};\n\n// rgb is a struct for red, green and blue colors.\n// Using the name \"rgb\" makes some editors show the color in a tooltip.\nstruct rgb {\n  FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {}\n  FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {}\n  FMT_CONSTEXPR rgb(uint32_t hex)\n      : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {}\n  FMT_CONSTEXPR rgb(color hex)\n      : r((uint32_t(hex) >> 16) & 0xFF),\n        g((uint32_t(hex) >> 8) & 0xFF),\n        b(uint32_t(hex) & 0xFF) {}\n  uint8_t r;\n  uint8_t g;\n  uint8_t b;\n};\n\nnamespace internal {\n\n// color is a struct of either a rgb color or a terminal color.\nstruct color_type {\n  FMT_CONSTEXPR color_type() FMT_NOEXCEPT : is_rgb(), value{} {}\n  FMT_CONSTEXPR color_type(color rgb_color) FMT_NOEXCEPT : is_rgb(true),\n                                                           value{} {\n    value.rgb_color = static_cast<uint32_t>(rgb_color);\n  }\n  FMT_CONSTEXPR color_type(rgb rgb_color) FMT_NOEXCEPT : is_rgb(true), value{} {\n    value.rgb_color = (static_cast<uint32_t>(rgb_color.r) << 16) |\n                      (static_cast<uint32_t>(rgb_color.g) << 8) | rgb_color.b;\n  }\n  FMT_CONSTEXPR color_type(terminal_color term_color) FMT_NOEXCEPT : is_rgb(),\n                                                                     value{} {\n    value.term_color = static_cast<uint8_t>(term_color);\n  }\n  bool is_rgb;\n  union color_union {\n    uint8_t term_color;\n    uint32_t rgb_color;\n  } value;\n};\n}  // namespace internal\n\n// Experimental text formatting support.\nclass text_style {\n public:\n  FMT_CONSTEXPR text_style(emphasis em = emphasis()) FMT_NOEXCEPT\n      : set_foreground_color(),\n        set_background_color(),\n        ems(em) {}\n\n  FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) {\n    if (!set_foreground_color) {\n      set_foreground_color = rhs.set_foreground_color;\n      foreground_color = rhs.foreground_color;\n    } else if (rhs.set_foreground_color) {\n      if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb)\n        FMT_THROW(format_error(\"can't OR a terminal color\"));\n      foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color;\n    }\n\n    if (!set_background_color) {\n      set_background_color = rhs.set_background_color;\n      background_color = rhs.background_color;\n    } else if (rhs.set_background_color) {\n      if (!background_color.is_rgb || !rhs.background_color.is_rgb)\n        FMT_THROW(format_error(\"can't OR a terminal color\"));\n      background_color.value.rgb_color |= rhs.background_color.value.rgb_color;\n    }\n\n    ems = static_cast<emphasis>(static_cast<uint8_t>(ems) |\n                                static_cast<uint8_t>(rhs.ems));\n    return *this;\n  }\n\n  friend FMT_CONSTEXPR text_style operator|(text_style lhs,\n                                            const text_style& rhs) {\n    return lhs |= rhs;\n  }\n\n  FMT_CONSTEXPR text_style& operator&=(const text_style& rhs) {\n    if (!set_foreground_color) {\n      set_foreground_color = rhs.set_foreground_color;\n      foreground_color = rhs.foreground_color;\n    } else if (rhs.set_foreground_color) {\n      if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb)\n        FMT_THROW(format_error(\"can't AND a terminal color\"));\n      foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color;\n    }\n\n    if (!set_background_color) {\n      set_background_color = rhs.set_background_color;\n      background_color = rhs.background_color;\n    } else if (rhs.set_background_color) {\n      if (!background_color.is_rgb || !rhs.background_color.is_rgb)\n        FMT_THROW(format_error(\"can't AND a terminal color\"));\n      background_color.value.rgb_color &= rhs.background_color.value.rgb_color;\n    }\n\n    ems = static_cast<emphasis>(static_cast<uint8_t>(ems) &\n                                static_cast<uint8_t>(rhs.ems));\n    return *this;\n  }\n\n  friend FMT_CONSTEXPR text_style operator&(text_style lhs,\n                                            const text_style& rhs) {\n    return lhs &= rhs;\n  }\n\n  FMT_CONSTEXPR bool has_foreground() const FMT_NOEXCEPT {\n    return set_foreground_color;\n  }\n  FMT_CONSTEXPR bool has_background() const FMT_NOEXCEPT {\n    return set_background_color;\n  }\n  FMT_CONSTEXPR bool has_emphasis() const FMT_NOEXCEPT {\n    return static_cast<uint8_t>(ems) != 0;\n  }\n  FMT_CONSTEXPR internal::color_type get_foreground() const FMT_NOEXCEPT {\n    assert(has_foreground() && \"no foreground specified for this style\");\n    return foreground_color;\n  }\n  FMT_CONSTEXPR internal::color_type get_background() const FMT_NOEXCEPT {\n    assert(has_background() && \"no background specified for this style\");\n    return background_color;\n  }\n  FMT_CONSTEXPR emphasis get_emphasis() const FMT_NOEXCEPT {\n    assert(has_emphasis() && \"no emphasis specified for this style\");\n    return ems;\n  }\n\n private:\n  FMT_CONSTEXPR text_style(bool is_foreground,\n                           internal::color_type text_color) FMT_NOEXCEPT\n      : set_foreground_color(),\n        set_background_color(),\n        ems() {\n    if (is_foreground) {\n      foreground_color = text_color;\n      set_foreground_color = true;\n    } else {\n      background_color = text_color;\n      set_background_color = true;\n    }\n  }\n\n  friend FMT_CONSTEXPR_DECL text_style fg(internal::color_type foreground)\n      FMT_NOEXCEPT;\n  friend FMT_CONSTEXPR_DECL text_style bg(internal::color_type background)\n      FMT_NOEXCEPT;\n\n  internal::color_type foreground_color;\n  internal::color_type background_color;\n  bool set_foreground_color;\n  bool set_background_color;\n  emphasis ems;\n};\n\nFMT_CONSTEXPR text_style fg(internal::color_type foreground) FMT_NOEXCEPT {\n  return text_style(/*is_foreground=*/true, foreground);\n}\n\nFMT_CONSTEXPR text_style bg(internal::color_type background) FMT_NOEXCEPT {\n  return text_style(/*is_foreground=*/false, background);\n}\n\nFMT_CONSTEXPR text_style operator|(emphasis lhs, emphasis rhs) FMT_NOEXCEPT {\n  return text_style(lhs) | rhs;\n}\n\nnamespace internal {\n\ntemplate <typename Char> struct ansi_color_escape {\n  FMT_CONSTEXPR ansi_color_escape(internal::color_type text_color,\n                                  const char* esc) FMT_NOEXCEPT {\n    // If we have a terminal color, we need to output another escape code\n    // sequence.\n    if (!text_color.is_rgb) {\n      bool is_background = esc == internal::data::background_color;\n      uint32_t value = text_color.value.term_color;\n      // Background ASCII codes are the same as the foreground ones but with\n      // 10 more.\n      if (is_background) value += 10u;\n\n      std::size_t index = 0;\n      buffer[index++] = static_cast<Char>('\\x1b');\n      buffer[index++] = static_cast<Char>('[');\n\n      if (value >= 100u) {\n        buffer[index++] = static_cast<Char>('1');\n        value %= 100u;\n      }\n      buffer[index++] = static_cast<Char>('0' + value / 10u);\n      buffer[index++] = static_cast<Char>('0' + value % 10u);\n\n      buffer[index++] = static_cast<Char>('m');\n      buffer[index++] = static_cast<Char>('\\0');\n      return;\n    }\n\n    for (int i = 0; i < 7; i++) {\n      buffer[i] = static_cast<Char>(esc[i]);\n    }\n    rgb color(text_color.value.rgb_color);\n    to_esc(color.r, buffer + 7, ';');\n    to_esc(color.g, buffer + 11, ';');\n    to_esc(color.b, buffer + 15, 'm');\n    buffer[19] = static_cast<Char>(0);\n  }\n  FMT_CONSTEXPR ansi_color_escape(emphasis em) FMT_NOEXCEPT {\n    uint8_t em_codes[4] = {};\n    uint8_t em_bits = static_cast<uint8_t>(em);\n    if (em_bits & static_cast<uint8_t>(emphasis::bold)) em_codes[0] = 1;\n    if (em_bits & static_cast<uint8_t>(emphasis::italic)) em_codes[1] = 3;\n    if (em_bits & static_cast<uint8_t>(emphasis::underline)) em_codes[2] = 4;\n    if (em_bits & static_cast<uint8_t>(emphasis::strikethrough))\n      em_codes[3] = 9;\n\n    std::size_t index = 0;\n    for (int i = 0; i < 4; ++i) {\n      if (!em_codes[i]) continue;\n      buffer[index++] = static_cast<Char>('\\x1b');\n      buffer[index++] = static_cast<Char>('[');\n      buffer[index++] = static_cast<Char>('0' + em_codes[i]);\n      buffer[index++] = static_cast<Char>('m');\n    }\n    buffer[index++] = static_cast<Char>(0);\n  }\n  FMT_CONSTEXPR operator const Char*() const FMT_NOEXCEPT { return buffer; }\n\n  FMT_CONSTEXPR const Char* begin() const FMT_NOEXCEPT { return buffer; }\n  FMT_CONSTEXPR const Char* end() const FMT_NOEXCEPT {\n    return buffer + std::strlen(buffer);\n  }\n\n private:\n  Char buffer[7u + 3u * 4u + 1u];\n\n  static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out,\n                                   char delimiter) FMT_NOEXCEPT {\n    out[0] = static_cast<Char>('0' + c / 100);\n    out[1] = static_cast<Char>('0' + c / 10 % 10);\n    out[2] = static_cast<Char>('0' + c % 10);\n    out[3] = static_cast<Char>(delimiter);\n  }\n};\n\ntemplate <typename Char>\nFMT_CONSTEXPR ansi_color_escape<Char> make_foreground_color(\n    internal::color_type foreground) FMT_NOEXCEPT {\n  return ansi_color_escape<Char>(foreground, internal::data::foreground_color);\n}\n\ntemplate <typename Char>\nFMT_CONSTEXPR ansi_color_escape<Char> make_background_color(\n    internal::color_type background) FMT_NOEXCEPT {\n  return ansi_color_escape<Char>(background, internal::data::background_color);\n}\n\ntemplate <typename Char>\nFMT_CONSTEXPR ansi_color_escape<Char> make_emphasis(emphasis em) FMT_NOEXCEPT {\n  return ansi_color_escape<Char>(em);\n}\n\ntemplate <typename Char>\ninline void fputs(const Char* chars, FILE* stream) FMT_NOEXCEPT {\n  std::fputs(chars, stream);\n}\n\ntemplate <>\ninline void fputs<wchar_t>(const wchar_t* chars, FILE* stream) FMT_NOEXCEPT {\n  std::fputws(chars, stream);\n}\n\ntemplate <typename Char> inline void reset_color(FILE* stream) FMT_NOEXCEPT {\n  fputs(internal::data::reset_color, stream);\n}\n\ntemplate <> inline void reset_color<wchar_t>(FILE* stream) FMT_NOEXCEPT {\n  fputs(internal::data::wreset_color, stream);\n}\n\ntemplate <typename Char>\ninline void reset_color(basic_memory_buffer<Char>& buffer) FMT_NOEXCEPT {\n  const char* begin = data::reset_color;\n  const char* end = begin + sizeof(data::reset_color) - 1;\n  buffer.append(begin, end);\n}\n\ntemplate <typename Char>\nstd::basic_string<Char> vformat(const text_style& ts,\n                                basic_string_view<Char> format_str,\n                                basic_format_args<buffer_context<Char> > args) {\n  basic_memory_buffer<Char> buffer;\n  bool has_style = false;\n  if (ts.has_emphasis()) {\n    has_style = true;\n    ansi_color_escape<Char> escape = make_emphasis<Char>(ts.get_emphasis());\n    buffer.append(escape.begin(), escape.end());\n  }\n  if (ts.has_foreground()) {\n    has_style = true;\n    ansi_color_escape<Char> escape =\n        make_foreground_color<Char>(ts.get_foreground());\n    buffer.append(escape.begin(), escape.end());\n  }\n  if (ts.has_background()) {\n    has_style = true;\n    ansi_color_escape<Char> escape =\n        make_background_color<Char>(ts.get_background());\n    buffer.append(escape.begin(), escape.end());\n  }\n  internal::vformat_to(buffer, format_str, args);\n  if (has_style) {\n    reset_color<Char>(buffer);\n  }\n  return fmt::to_string(buffer);\n}\n}  // namespace internal\n\ntemplate <typename S, typename Char = char_t<S> >\nvoid vprint(std::FILE* f, const text_style& ts, const S& format,\n            basic_format_args<buffer_context<Char> > args) {\n  bool has_style = false;\n  if (ts.has_emphasis()) {\n    has_style = true;\n    internal::fputs<Char>(internal::make_emphasis<Char>(ts.get_emphasis()), f);\n  }\n  if (ts.has_foreground()) {\n    has_style = true;\n    internal::fputs<Char>(\n        internal::make_foreground_color<Char>(ts.get_foreground()), f);\n  }\n  if (ts.has_background()) {\n    has_style = true;\n    internal::fputs<Char>(\n        internal::make_background_color<Char>(ts.get_background()), f);\n  }\n  vprint(f, format, args);\n  if (has_style) {\n    internal::reset_color<Char>(f);\n  }\n}\n\n/**\n  Formats a string and prints it to the specified file stream using ANSI\n  escape sequences to specify text formatting.\n  Example:\n    fmt::print(fmt::emphasis::bold | fg(fmt::color::red),\n               \"Elapsed time: {0:.2f} seconds\", 1.23);\n */\ntemplate <typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\nvoid print(std::FILE* f, const text_style& ts, const S& format_str,\n           const Args&... args) {\n  internal::check_format_string<Args...>(format_str);\n  using context = buffer_context<char_t<S> >;\n  format_arg_store<context, Args...> as{args...};\n  vprint(f, ts, format_str, basic_format_args<context>(as));\n}\n\n/**\n  Formats a string and prints it to stdout using ANSI escape sequences to\n  specify text formatting.\n  Example:\n    fmt::print(fmt::emphasis::bold | fg(fmt::color::red),\n               \"Elapsed time: {0:.2f} seconds\", 1.23);\n */\ntemplate <typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\nvoid print(const text_style& ts, const S& format_str, const Args&... args) {\n  return print(stdout, ts, format_str, args...);\n}\n\ntemplate <typename S, typename Char = char_t<S> >\ninline std::basic_string<Char> vformat(\n    const text_style& ts, const S& format_str,\n    basic_format_args<buffer_context<Char> > args) {\n  return internal::vformat(ts, to_string_view(format_str), args);\n}\n\n/**\n  \\rst\n  Formats arguments and returns the result as a string using ANSI\n  escape sequences to specify text formatting.\n\n  **Example**::\n\n    #include <fmt/color.h>\n    std::string message = fmt::format(fmt::emphasis::bold | fg(fmt::color::red),\n                                      \"The answer is {}\", 42);\n  \\endrst\n*/\ntemplate <typename S, typename... Args, typename Char = char_t<S> >\ninline std::basic_string<Char> format(const text_style& ts, const S& format_str,\n                                      const Args&... args) {\n  return internal::vformat(ts, to_string_view(format_str),\n                           {internal::make_args_checked(format_str, args...)});\n}\n\nFMT_END_NAMESPACE\n\n#endif  // FMT_COLOR_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/core.h",
    "content": "// Formatting library for C++ - the core API\n//\n// Copyright (c) 2012 - present, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_CORE_H_\n#define FMT_CORE_H_\n\n#include <cassert>\n#include <cstdio>  // std::FILE\n#include <cstring>\n#include <iterator>\n#include <string>\n#include <type_traits>\n\n// The fmt library version in the form major * 10000 + minor * 100 + patch.\n#define FMT_VERSION 50301\n\n#ifdef __has_feature\n#  define FMT_HAS_FEATURE(x) __has_feature(x)\n#else\n#  define FMT_HAS_FEATURE(x) 0\n#endif\n\n#if defined(__has_include) && !defined(__INTELLISENSE__) && \\\n    !(defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1600)\n#  define FMT_HAS_INCLUDE(x) __has_include(x)\n#else\n#  define FMT_HAS_INCLUDE(x) 0\n#endif\n\n#ifdef __has_cpp_attribute\n#  define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)\n#else\n#  define FMT_HAS_CPP_ATTRIBUTE(x) 0\n#endif\n\n#if defined(__GNUC__) && !defined(__clang__)\n#  define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n#else\n#  define FMT_GCC_VERSION 0\n#endif\n\n#if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__)\n#  define FMT_HAS_GXX_CXX11 FMT_GCC_VERSION\n#else\n#  define FMT_HAS_GXX_CXX11 0\n#endif\n\n#ifdef _MSC_VER\n#  define FMT_MSC_VER _MSC_VER\n#else\n#  define FMT_MSC_VER 0\n#endif\n\n// Check if relaxed C++14 constexpr is supported.\n// GCC doesn't allow throw in constexpr until version 6 (bug 67371).\n#ifndef FMT_USE_CONSTEXPR\n#  define FMT_USE_CONSTEXPR                                           \\\n    (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1910 || \\\n     (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L))\n#endif\n#if FMT_USE_CONSTEXPR\n#  define FMT_CONSTEXPR constexpr\n#  define FMT_CONSTEXPR_DECL constexpr\n#else\n#  define FMT_CONSTEXPR inline\n#  define FMT_CONSTEXPR_DECL\n#endif\n\n#ifndef FMT_OVERRIDE\n#  if FMT_HAS_FEATURE(cxx_override) || \\\n      (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900\n#    define FMT_OVERRIDE override\n#  else\n#    define FMT_OVERRIDE\n#  endif\n#endif\n\n// Check if exceptions are disabled.\n#ifndef FMT_EXCEPTIONS\n#  if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \\\n      FMT_MSC_VER && !_HAS_EXCEPTIONS\n#    define FMT_EXCEPTIONS 0\n#  else\n#    define FMT_EXCEPTIONS 1\n#  endif\n#endif\n\n// Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature).\n#ifndef FMT_USE_NOEXCEPT\n#  define FMT_USE_NOEXCEPT 0\n#endif\n\n#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \\\n    (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900\n#  define FMT_DETECTED_NOEXCEPT noexcept\n#  define FMT_HAS_CXX11_NOEXCEPT 1\n#else\n#  define FMT_DETECTED_NOEXCEPT throw()\n#  define FMT_HAS_CXX11_NOEXCEPT 0\n#endif\n\n#ifndef FMT_NOEXCEPT\n#  if FMT_EXCEPTIONS || FMT_HAS_CXX11_NOEXCEPT\n#    define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT\n#  else\n#    define FMT_NOEXCEPT\n#  endif\n#endif\n\n// [[noreturn]] is disabled on MSVC because of bogus unreachable code warnings.\n#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER\n#  define FMT_NORETURN [[noreturn]]\n#else\n#  define FMT_NORETURN\n#endif\n\n#ifndef FMT_DEPRECATED\n#  if (FMT_HAS_CPP_ATTRIBUTE(deprecated) && __cplusplus >= 201402L) || \\\n      FMT_MSC_VER >= 1900\n#    define FMT_DEPRECATED [[deprecated]]\n#  else\n#    if defined(__GNUC__) || defined(__clang__)\n#      define FMT_DEPRECATED __attribute__((deprecated))\n#    elif FMT_MSC_VER\n#      define FMT_DEPRECATED __declspec(deprecated)\n#    else\n#      define FMT_DEPRECATED /* deprecated */\n#    endif\n#  endif\n#endif\n\n#ifndef FMT_BEGIN_NAMESPACE\n#  if FMT_HAS_FEATURE(cxx_inline_namespaces) || FMT_GCC_VERSION >= 404 || \\\n      FMT_MSC_VER >= 1900\n#    define FMT_INLINE_NAMESPACE inline namespace\n#    define FMT_END_NAMESPACE \\\n      }                       \\\n      }\n#  else\n#    define FMT_INLINE_NAMESPACE namespace\n#    define FMT_END_NAMESPACE \\\n      }                       \\\n      using namespace v5;     \\\n      }\n#  endif\n#  define FMT_BEGIN_NAMESPACE \\\n    namespace fmt {           \\\n    FMT_INLINE_NAMESPACE v5 {\n#endif\n\n#if !defined(FMT_HEADER_ONLY) && defined(_WIN32)\n#  ifdef FMT_EXPORT\n#    define FMT_API __declspec(dllexport)\n#  elif defined(FMT_SHARED)\n#    define FMT_API __declspec(dllimport)\n#    define FMT_EXTERN_TEMPLATE_API FMT_API\n#  endif\n#endif\n#ifndef FMT_API\n#  define FMT_API\n#endif\n#ifndef FMT_EXTERN_TEMPLATE_API\n#  define FMT_EXTERN_TEMPLATE_API\n#endif\n\n#ifndef FMT_HEADER_ONLY\n#  define FMT_EXTERN extern\n#else\n#  define FMT_EXTERN\n#endif\n\n#ifndef FMT_ASSERT\n#  define FMT_ASSERT(condition, message) assert((condition) && message)\n#endif\n\n// libc++ supports string_view in pre-c++17.\n#if (FMT_HAS_INCLUDE(<string_view>) &&                       \\\n     (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \\\n    (defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910)\n#  include <string_view>\n#  define FMT_USE_STRING_VIEW\n#elif FMT_HAS_INCLUDE(\"experimental/string_view\") && __cplusplus >= 201402L\n#  include <experimental/string_view>\n#  define FMT_USE_EXPERIMENTAL_STRING_VIEW\n#endif\n\nFMT_BEGIN_NAMESPACE\n\n// Implementations of enable_if_t and other types for pre-C++14 systems.\ntemplate <bool B, class T = void>\nusing enable_if_t = typename std::enable_if<B, T>::type;\ntemplate <bool B, class T, class F>\nusing conditional_t = typename std::conditional<B, T, F>::type;\ntemplate <bool B> using bool_constant = std::integral_constant<bool, B>;\ntemplate <typename T>\nusing remove_reference_t = typename std::remove_reference<T>::type;\ntemplate <typename T>\nusing remove_const_t = typename std::remove_const<T>::type;\n\nstruct monostate {};\n\n// An enable_if helper to be used in template parameters which results in much\n// shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed\n// to workaround a bug in MSVC 2019 (see #1140 and #1186).\n#define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0\n\nnamespace internal {\n\n// A workaround for gcc 4.8 to make void_t work in a SFINAE context.\ntemplate <typename... Ts> struct void_t_impl { using type = void; };\n\n#if defined(FMT_USE_STRING_VIEW)\ntemplate <typename Char> using std_string_view = std::basic_string_view<Char>;\n#elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW)\ntemplate <typename Char>\nusing std_string_view = std::experimental::basic_string_view<Char>;\n#else\ntemplate <typename T> struct std_string_view {};\n#endif\n\n// Casts nonnegative integer to unsigned.\ntemplate <typename Int>\nFMT_CONSTEXPR typename std::make_unsigned<Int>::type to_unsigned(Int value) {\n  FMT_ASSERT(value >= 0, \"negative value\");\n  return static_cast<typename std::make_unsigned<Int>::type>(value);\n}\n}  // namespace internal\n\ntemplate <typename... Ts>\nusing void_t = typename internal::void_t_impl<Ts...>::type;\n\n/**\n  An implementation of ``std::basic_string_view`` for pre-C++17. It provides a\n  subset of the API. ``fmt::basic_string_view`` is used for format strings even\n  if ``std::string_view`` is available to prevent issues when a library is\n  compiled with a different ``-std`` option than the client code (which is not\n  recommended).\n */\ntemplate <typename Char> class basic_string_view {\n private:\n  const Char* data_;\n  size_t size_;\n\n public:\n  using char_type = Char;\n  using iterator = const Char*;\n\n  FMT_CONSTEXPR basic_string_view() FMT_NOEXCEPT : data_(nullptr), size_(0) {}\n\n  /** Constructs a string reference object from a C string and a size. */\n  FMT_CONSTEXPR basic_string_view(const Char* s, size_t count) FMT_NOEXCEPT\n      : data_(s),\n        size_(count) {}\n\n  /**\n    \\rst\n    Constructs a string reference object from a C string computing\n    the size with ``std::char_traits<Char>::length``.\n    \\endrst\n   */\n  basic_string_view(const Char* s)\n      : data_(s), size_(std::char_traits<Char>::length(s)) {}\n\n  /** Constructs a string reference from a ``std::basic_string`` object. */\n  template <typename Alloc>\n  FMT_CONSTEXPR basic_string_view(const std::basic_string<Char, Alloc>& s)\n      FMT_NOEXCEPT : data_(s.data()),\n                     size_(s.size()) {}\n\n  template <\n      typename S,\n      FMT_ENABLE_IF(std::is_same<S, internal::std_string_view<Char>>::value)>\n  FMT_CONSTEXPR basic_string_view(S s) FMT_NOEXCEPT : data_(s.data()),\n                                                      size_(s.size()) {}\n\n  /** Returns a pointer to the string data. */\n  FMT_CONSTEXPR const Char* data() const { return data_; }\n\n  /** Returns the string size. */\n  FMT_CONSTEXPR size_t size() const { return size_; }\n\n  FMT_CONSTEXPR iterator begin() const { return data_; }\n  FMT_CONSTEXPR iterator end() const { return data_ + size_; }\n\n  FMT_CONSTEXPR void remove_prefix(size_t n) {\n    data_ += n;\n    size_ -= n;\n  }\n\n  // Lexicographically compare this string reference to other.\n  int compare(basic_string_view other) const {\n    size_t str_size = size_ < other.size_ ? size_ : other.size_;\n    int result = std::char_traits<Char>::compare(data_, other.data_, str_size);\n    if (result == 0)\n      result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1);\n    return result;\n  }\n\n  friend bool operator==(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) == 0;\n  }\n  friend bool operator!=(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) != 0;\n  }\n  friend bool operator<(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) < 0;\n  }\n  friend bool operator<=(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) <= 0;\n  }\n  friend bool operator>(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) > 0;\n  }\n  friend bool operator>=(basic_string_view lhs, basic_string_view rhs) {\n    return lhs.compare(rhs) >= 0;\n  }\n};\n\nusing string_view = basic_string_view<char>;\nusing wstring_view = basic_string_view<wchar_t>;\n\n#ifndef __cpp_char8_t\n// A UTF-8 code unit type.\nenum char8_t : unsigned char {};\n#endif\n\n/** Specifies if ``T`` is a character type. Can be specialized by users. */\ntemplate <typename T> struct is_char : std::false_type {};\ntemplate <> struct is_char<char> : std::true_type {};\ntemplate <> struct is_char<wchar_t> : std::true_type {};\ntemplate <> struct is_char<char8_t> : std::true_type {};\ntemplate <> struct is_char<char16_t> : std::true_type {};\ntemplate <> struct is_char<char32_t> : std::true_type {};\n\n/**\n  \\rst\n  Returns a string view of `s`. In order to add custom string type support to\n  {fmt} provide an overload of `to_string_view` for it in the same namespace as\n  the type for the argument-dependent lookup to work.\n\n  **Example**::\n\n    namespace my_ns {\n    inline string_view to_string_view(const my_string& s) {\n      return {s.data(), s.length()};\n    }\n    }\n    std::string message = fmt::format(my_string(\"The answer is {}\"), 42);\n  \\endrst\n */\ntemplate <typename Char, FMT_ENABLE_IF(is_char<Char>::value)>\ninline basic_string_view<Char> to_string_view(const Char* s) {\n  return s;\n}\n\ntemplate <typename Char, typename Traits, typename Allocator>\ninline basic_string_view<Char> to_string_view(\n    const std::basic_string<Char, Traits, Allocator>& s) {\n  return {s.data(), s.size()};\n}\n\ntemplate <typename Char>\ninline basic_string_view<Char> to_string_view(basic_string_view<Char> s) {\n  return s;\n}\n\ntemplate <typename Char,\n          FMT_ENABLE_IF(!std::is_empty<internal::std_string_view<Char>>::value)>\ninline basic_string_view<Char> to_string_view(\n    internal::std_string_view<Char> s) {\n  return s;\n}\n\n// A base class for compile-time strings. It is defined in the fmt namespace to\n// make formatting functions visible via ADL, e.g. format(fmt(\"{}\"), 42).\nstruct compile_string {};\n\ntemplate <typename S>\nstruct is_compile_string : std::is_base_of<compile_string, S> {};\n\ntemplate <typename S, FMT_ENABLE_IF(is_compile_string<S>::value)>\nconstexpr basic_string_view<typename S::char_type> to_string_view(const S& s) {\n  return s;\n}\n\nnamespace internal {\nvoid to_string_view(...);\nusing fmt::v5::to_string_view;\n\n// Specifies whether S is a string type convertible to fmt::basic_string_view.\n// It should be a constexpr function but MSVC 2017 fails to compile it in\n// enable_if and MSVC 2015 fails to compile it as an alias template.\ntemplate <typename S>\nstruct is_string : std::is_class<decltype(to_string_view(std::declval<S>()))> {\n};\n\ntemplate <typename S, typename = void> struct char_t_impl {};\ntemplate <typename S> struct char_t_impl<S, enable_if_t<is_string<S>::value>> {\n  using result = decltype(to_string_view(std::declval<S>()));\n  using type = typename result::char_type;\n};\n\nstruct error_handler {\n  FMT_CONSTEXPR error_handler() {}\n  FMT_CONSTEXPR error_handler(const error_handler&) {}\n\n  // This function is intentionally not constexpr to give a compile-time error.\n  FMT_NORETURN FMT_API void on_error(const char* message);\n};\n}  // namespace internal\n\n/** String's character type. */\ntemplate <typename S> using char_t = typename internal::char_t_impl<S>::type;\n\n// Parsing context consisting of a format string range being parsed and an\n// argument counter for automatic indexing.\ntemplate <typename Char, typename ErrorHandler = internal::error_handler>\nclass basic_parse_context : private ErrorHandler {\n private:\n  basic_string_view<Char> format_str_;\n  int next_arg_id_;\n\n public:\n  using char_type = Char;\n  using iterator = typename basic_string_view<Char>::iterator;\n\n  explicit FMT_CONSTEXPR basic_parse_context(basic_string_view<Char> format_str,\n                                             ErrorHandler eh = ErrorHandler())\n      : ErrorHandler(eh), format_str_(format_str), next_arg_id_(0) {}\n\n  // Returns an iterator to the beginning of the format string range being\n  // parsed.\n  FMT_CONSTEXPR iterator begin() const FMT_NOEXCEPT {\n    return format_str_.begin();\n  }\n\n  // Returns an iterator past the end of the format string range being parsed.\n  FMT_CONSTEXPR iterator end() const FMT_NOEXCEPT { return format_str_.end(); }\n\n  // Advances the begin iterator to ``it``.\n  FMT_CONSTEXPR void advance_to(iterator it) {\n    format_str_.remove_prefix(internal::to_unsigned(it - begin()));\n  }\n\n  // Returns the next argument index.\n  FMT_CONSTEXPR int next_arg_id() {\n    if (next_arg_id_ >= 0) return next_arg_id_++;\n    on_error(\"cannot switch from manual to automatic argument indexing\");\n    return 0;\n  }\n\n  FMT_CONSTEXPR bool check_arg_id(int) {\n    if (next_arg_id_ > 0) {\n      on_error(\"cannot switch from automatic to manual argument indexing\");\n      return false;\n    }\n    next_arg_id_ = -1;\n    return true;\n  }\n\n  FMT_CONSTEXPR void check_arg_id(basic_string_view<Char>) {}\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    ErrorHandler::on_error(message);\n  }\n\n  FMT_CONSTEXPR ErrorHandler error_handler() const { return *this; }\n};\n\nusing format_parse_context = basic_parse_context<char>;\nusing wformat_parse_context = basic_parse_context<wchar_t>;\n\nusing parse_context FMT_DEPRECATED = basic_parse_context<char>;\nusing wparse_context FMT_DEPRECATED = basic_parse_context<wchar_t>;\n\ntemplate <typename Context> class basic_format_arg;\ntemplate <typename Context> class basic_format_args;\n\n// A formatter for objects of type T.\ntemplate <typename T, typename Char = char, typename Enable = void>\nstruct formatter {\n  // A deleted default constructor indicates a disabled formatter.\n  formatter() = delete;\n};\n\ntemplate <typename T, typename Char, typename Enable = void>\nstruct FMT_DEPRECATED convert_to_int\n    : bool_constant<!std::is_arithmetic<T>::value &&\n                    std::is_convertible<T, int>::value> {};\n\nnamespace internal {\n\n// Specifies if T has an enabled formatter specialization. A type can be\n// formattable even if it doesn't have a formatter e.g. via a conversion.\ntemplate <typename T, typename Context>\nusing has_formatter =\n    std::is_constructible<typename Context::template formatter_type<T>>;\n\n/** A contiguous memory buffer with an optional growing ability. */\ntemplate <typename T> class buffer {\n private:\n  buffer(const buffer&) = delete;\n  void operator=(const buffer&) = delete;\n\n  T* ptr_;\n  std::size_t size_;\n  std::size_t capacity_;\n\n protected:\n  // Don't initialize ptr_ since it is not accessed to save a few cycles.\n  buffer(std::size_t sz) FMT_NOEXCEPT : size_(sz), capacity_(sz) {}\n\n  buffer(T* p = nullptr, std::size_t sz = 0, std::size_t cap = 0) FMT_NOEXCEPT\n      : ptr_(p),\n        size_(sz),\n        capacity_(cap) {}\n\n  /** Sets the buffer data and capacity. */\n  void set(T* buf_data, std::size_t buf_capacity) FMT_NOEXCEPT {\n    ptr_ = buf_data;\n    capacity_ = buf_capacity;\n  }\n\n  /** Increases the buffer capacity to hold at least *capacity* elements. */\n  virtual void grow(std::size_t capacity) = 0;\n\n public:\n  using value_type = T;\n  using const_reference = const T&;\n\n  virtual ~buffer() {}\n\n  T* begin() FMT_NOEXCEPT { return ptr_; }\n  T* end() FMT_NOEXCEPT { return ptr_ + size_; }\n\n  /** Returns the size of this buffer. */\n  std::size_t size() const FMT_NOEXCEPT { return size_; }\n\n  /** Returns the capacity of this buffer. */\n  std::size_t capacity() const FMT_NOEXCEPT { return capacity_; }\n\n  /** Returns a pointer to the buffer data. */\n  T* data() FMT_NOEXCEPT { return ptr_; }\n\n  /** Returns a pointer to the buffer data. */\n  const T* data() const FMT_NOEXCEPT { return ptr_; }\n\n  /**\n    Resizes the buffer. If T is a POD type new elements may not be initialized.\n   */\n  void resize(std::size_t new_size) {\n    reserve(new_size);\n    size_ = new_size;\n  }\n\n  /** Clears this buffer. */\n  void clear() { size_ = 0; }\n\n  /** Reserves space to store at least *capacity* elements. */\n  void reserve(std::size_t new_capacity) {\n    if (new_capacity > capacity_) grow(new_capacity);\n  }\n\n  void push_back(const T& value) {\n    reserve(size_ + 1);\n    ptr_[size_++] = value;\n  }\n\n  /** Appends data to the end of the buffer. */\n  template <typename U> void append(const U* begin, const U* end);\n\n  T& operator[](std::size_t index) { return ptr_[index]; }\n  const T& operator[](std::size_t index) const { return ptr_[index]; }\n};\n\n// A container-backed buffer.\ntemplate <typename Container>\nclass container_buffer : public buffer<typename Container::value_type> {\n private:\n  Container& container_;\n\n protected:\n  void grow(std::size_t capacity) FMT_OVERRIDE {\n    container_.resize(capacity);\n    this->set(&container_[0], capacity);\n  }\n\n public:\n  explicit container_buffer(Container& c)\n      : buffer<typename Container::value_type>(c.size()), container_(c) {}\n};\n\n// Extracts a reference to the container from back_insert_iterator.\ntemplate <typename Container>\ninline Container& get_container(std::back_insert_iterator<Container> it) {\n  using bi_iterator = std::back_insert_iterator<Container>;\n  struct accessor : bi_iterator {\n    accessor(bi_iterator iter) : bi_iterator(iter) {}\n    using bi_iterator::container;\n  };\n  return *accessor(it).container;\n}\n\ntemplate <typename T, typename Char = char, typename Enable = void>\nstruct fallback_formatter {\n  fallback_formatter() = delete;\n};\n\n// Specifies if T has an enabled fallback_formatter specialization.\ntemplate <typename T, typename Context>\nusing has_fallback_formatter =\n    std::is_constructible<fallback_formatter<T, typename Context::char_type>>;\n\ntemplate <typename Char> struct named_arg_base;\ntemplate <typename T, typename Char> struct named_arg;\n\nenum type {\n  none_type,\n  named_arg_type,\n  // Integer types should go first,\n  int_type,\n  uint_type,\n  long_long_type,\n  ulong_long_type,\n  bool_type,\n  char_type,\n  last_integer_type = char_type,\n  // followed by floating-point types.\n  double_type,\n  long_double_type,\n  last_numeric_type = long_double_type,\n  cstring_type,\n  string_type,\n  pointer_type,\n  custom_type\n};\n\n// Maps core type T to the corresponding type enum constant.\ntemplate <typename T, typename Char>\nstruct type_constant : std::integral_constant<type, custom_type> {};\n\n#define FMT_TYPE_CONSTANT(Type, constant) \\\n  template <typename Char>                \\\n  struct type_constant<Type, Char> : std::integral_constant<type, constant> {}\n\nFMT_TYPE_CONSTANT(const named_arg_base<Char>&, named_arg_type);\nFMT_TYPE_CONSTANT(int, int_type);\nFMT_TYPE_CONSTANT(unsigned, uint_type);\nFMT_TYPE_CONSTANT(long long, long_long_type);\nFMT_TYPE_CONSTANT(unsigned long long, ulong_long_type);\nFMT_TYPE_CONSTANT(bool, bool_type);\nFMT_TYPE_CONSTANT(Char, char_type);\nFMT_TYPE_CONSTANT(double, double_type);\nFMT_TYPE_CONSTANT(long double, long_double_type);\nFMT_TYPE_CONSTANT(const Char*, cstring_type);\nFMT_TYPE_CONSTANT(basic_string_view<Char>, string_type);\nFMT_TYPE_CONSTANT(const void*, pointer_type);\n\nFMT_CONSTEXPR bool is_integral(type t) {\n  FMT_ASSERT(t != named_arg_type, \"invalid argument type\");\n  return t > none_type && t <= last_integer_type;\n}\n\nFMT_CONSTEXPR bool is_arithmetic(type t) {\n  FMT_ASSERT(t != named_arg_type, \"invalid argument type\");\n  return t > none_type && t <= last_numeric_type;\n}\n\ntemplate <typename Char> struct string_value {\n  const Char* data;\n  std::size_t size;\n};\n\ntemplate <typename Context> struct custom_value {\n  using parse_context = basic_parse_context<typename Context::char_type>;\n  const void* value;\n  void (*format)(const void* arg, parse_context& parse_ctx, Context& ctx);\n};\n\n// A formatting argument value.\ntemplate <typename Context> class value {\n public:\n  using char_type = typename Context::char_type;\n\n  union {\n    int int_value;\n    unsigned uint_value;\n    long long long_long_value;\n    unsigned long long ulong_long_value;\n    bool bool_value;\n    char_type char_value;\n    double double_value;\n    long double long_double_value;\n    const void* pointer;\n    string_value<char_type> string;\n    custom_value<Context> custom;\n    const named_arg_base<char_type>* named_arg;\n  };\n\n  FMT_CONSTEXPR value(int val = 0) : int_value(val) {}\n  FMT_CONSTEXPR value(unsigned val) : uint_value(val) {}\n  value(long long val) : long_long_value(val) {}\n  value(unsigned long long val) : ulong_long_value(val) {}\n  value(double val) : double_value(val) {}\n  value(long double val) : long_double_value(val) {}\n  value(bool val) : bool_value(val) {}\n  value(char_type val) : char_value(val) {}\n  value(const char_type* val) { string.data = val; }\n  value(basic_string_view<char_type> val) {\n    string.data = val.data();\n    string.size = val.size();\n  }\n  value(const void* val) : pointer(val) {}\n\n  template <typename T> value(const T& val) {\n    custom.value = &val;\n    // Get the formatter type through the context to allow different contexts\n    // have different extension points, e.g. `formatter<T>` for `format` and\n    // `printf_formatter<T>` for `printf`.\n    custom.format = format_custom_arg<\n        T, conditional_t<has_formatter<T, Context>::value,\n                         typename Context::template formatter_type<T>,\n                         fallback_formatter<T, char_type>>>;\n  }\n\n  value(const named_arg_base<char_type>& val) { named_arg = &val; }\n\n private:\n  // Formats an argument of a custom type, such as a user-defined class.\n  template <typename T, typename Formatter>\n  static void format_custom_arg(const void* arg,\n                                basic_parse_context<char_type>& parse_ctx,\n                                Context& ctx) {\n    Formatter f;\n    parse_ctx.advance_to(f.parse(parse_ctx));\n    ctx.advance_to(f.format(*static_cast<const T*>(arg), ctx));\n  }\n};\n\ntemplate <typename Context, typename T>\nFMT_CONSTEXPR basic_format_arg<Context> make_arg(const T& value);\n\n// To minimize the number of types we need to deal with, long is translated\n// either to int or to long long depending on its size.\nenum { long_short = sizeof(long) == sizeof(int) };\nusing long_type = conditional_t<long_short, int, long long>;\nusing ulong_type = conditional_t<long_short, unsigned, unsigned long long>;\n\n// Maps formatting arguments to core types.\ntemplate <typename Context> struct arg_mapper {\n  using char_type = typename Context::char_type;\n\n  FMT_CONSTEXPR int map(signed char val) { return val; }\n  FMT_CONSTEXPR unsigned map(unsigned char val) { return val; }\n  FMT_CONSTEXPR int map(short val) { return val; }\n  FMT_CONSTEXPR unsigned map(unsigned short val) { return val; }\n  FMT_CONSTEXPR int map(int val) { return val; }\n  FMT_CONSTEXPR unsigned map(unsigned val) { return val; }\n  FMT_CONSTEXPR long_type map(long val) { return val; }\n  FMT_CONSTEXPR ulong_type map(unsigned long val) { return val; }\n  FMT_CONSTEXPR long long map(long long val) { return val; }\n  FMT_CONSTEXPR unsigned long long map(unsigned long long val) { return val; }\n  FMT_CONSTEXPR bool map(bool val) { return val; }\n\n  template <typename T, FMT_ENABLE_IF(is_char<T>::value)>\n  FMT_CONSTEXPR char_type map(T val) {\n    static_assert(\n        std::is_same<T, char>::value || std::is_same<T, char_type>::value,\n        \"mixing character types is disallowed\");\n    return val;\n  }\n\n  FMT_CONSTEXPR double map(float val) { return static_cast<double>(val); }\n  FMT_CONSTEXPR double map(double val) { return val; }\n  FMT_CONSTEXPR long double map(long double val) { return val; }\n\n  FMT_CONSTEXPR const char_type* map(char_type* val) { return val; }\n  FMT_CONSTEXPR const char_type* map(const char_type* val) { return val; }\n  template <typename T, FMT_ENABLE_IF(is_string<T>::value)>\n  FMT_CONSTEXPR basic_string_view<char_type> map(const T& val) {\n    static_assert(std::is_same<char_type, char_t<T>>::value,\n                  \"mixing character types is disallowed\");\n    return to_string_view(val);\n  }\n  template <typename T,\n            FMT_ENABLE_IF(\n                std::is_constructible<basic_string_view<char_type>, T>::value &&\n                !is_string<T>::value)>\n  FMT_CONSTEXPR basic_string_view<char_type> map(const T& val) {\n    return basic_string_view<char_type>(val);\n  }\n  FMT_CONSTEXPR const char* map(const signed char* val) {\n    static_assert(std::is_same<char_type, char>::value, \"invalid string type\");\n    return reinterpret_cast<const char*>(val);\n  }\n  FMT_CONSTEXPR const char* map(const unsigned char* val) {\n    static_assert(std::is_same<char_type, char>::value, \"invalid string type\");\n    return reinterpret_cast<const char*>(val);\n  }\n\n  FMT_CONSTEXPR const void* map(void* val) { return val; }\n  FMT_CONSTEXPR const void* map(const void* val) { return val; }\n  FMT_CONSTEXPR const void* map(std::nullptr_t val) { return val; }\n  template <typename T> FMT_CONSTEXPR int map(const T*) {\n    // Formatting of arbitrary pointers is disallowed. If you want to output\n    // a pointer cast it to \"void *\" or \"const void *\". In particular, this\n    // forbids formatting of \"[const] volatile char *\" which is printed as bool\n    // by iostreams.\n    static_assert(!sizeof(T), \"formatting of non-void pointers is disallowed\");\n    return 0;\n  }\n\n  template <typename T,\n            FMT_ENABLE_IF(std::is_enum<T>::value &&\n                          !has_formatter<T, Context>::value &&\n                          !has_fallback_formatter<T, Context>::value)>\n  FMT_CONSTEXPR int map(const T& val) {\n    return static_cast<int>(val);\n  }\n  template <typename T,\n            FMT_ENABLE_IF(!is_string<T>::value && !is_char<T>::value &&\n                          (has_formatter<T, Context>::value ||\n                           has_fallback_formatter<T, Context>::value))>\n  FMT_CONSTEXPR const T& map(const T& val) {\n    return val;\n  }\n\n  template <typename T>\n  FMT_CONSTEXPR const named_arg_base<char_type>& map(\n      const named_arg<T, char_type>& val) {\n    auto arg = make_arg<Context>(val.value);\n    std::memcpy(val.data, &arg, sizeof(arg));\n    return val;\n  }\n};\n\n// A type constant after applying arg_mapper<Context>.\ntemplate <typename T, typename Context>\nusing mapped_type_constant =\n    type_constant<decltype(arg_mapper<Context>().map(std::declval<T>())),\n                  typename Context::char_type>;\n\n// Maximum number of arguments with packed types.\nenum { max_packed_args = 15 };\nenum : unsigned long long { is_unpacked_bit = 1ull << 63 };\n\ntemplate <typename Context> class arg_map;\n}  // namespace internal\n\n// A formatting argument. It is a trivially copyable/constructible type to\n// allow storage in basic_memory_buffer.\ntemplate <typename Context> class basic_format_arg {\n private:\n  internal::value<Context> value_;\n  internal::type type_;\n\n  template <typename ContextType, typename T>\n  friend FMT_CONSTEXPR basic_format_arg<ContextType> internal::make_arg(\n      const T& value);\n\n  template <typename Visitor, typename Ctx>\n  friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis,\n                                             const basic_format_arg<Ctx>& arg)\n      -> decltype(vis(0));\n\n  friend class basic_format_args<Context>;\n  friend class internal::arg_map<Context>;\n\n  using char_type = typename Context::char_type;\n\n public:\n  class handle {\n   public:\n    explicit handle(internal::custom_value<Context> custom) : custom_(custom) {}\n\n    void format(basic_parse_context<char_type>& parse_ctx, Context& ctx) const {\n      custom_.format(custom_.value, parse_ctx, ctx);\n    }\n\n   private:\n    internal::custom_value<Context> custom_;\n  };\n\n  FMT_CONSTEXPR basic_format_arg() : type_(internal::none_type) {}\n\n  FMT_CONSTEXPR explicit operator bool() const FMT_NOEXCEPT {\n    return type_ != internal::none_type;\n  }\n\n  internal::type type() const { return type_; }\n\n  bool is_integral() const { return internal::is_integral(type_); }\n  bool is_arithmetic() const { return internal::is_arithmetic(type_); }\n};\n\n/**\n  \\rst\n  Visits an argument dispatching to the appropriate visit method based on\n  the argument type. For example, if the argument type is ``double`` then\n  ``vis(value)`` will be called with the value of type ``double``.\n  \\endrst\n */\ntemplate <typename Visitor, typename Context>\nFMT_CONSTEXPR auto visit_format_arg(Visitor&& vis,\n                                    const basic_format_arg<Context>& arg)\n    -> decltype(vis(0)) {\n  using char_type = typename Context::char_type;\n  switch (arg.type_) {\n  case internal::none_type:\n    break;\n  case internal::named_arg_type:\n    FMT_ASSERT(false, \"invalid argument type\");\n    break;\n  case internal::int_type:\n    return vis(arg.value_.int_value);\n  case internal::uint_type:\n    return vis(arg.value_.uint_value);\n  case internal::long_long_type:\n    return vis(arg.value_.long_long_value);\n  case internal::ulong_long_type:\n    return vis(arg.value_.ulong_long_value);\n  case internal::bool_type:\n    return vis(arg.value_.bool_value);\n  case internal::char_type:\n    return vis(arg.value_.char_value);\n  case internal::double_type:\n    return vis(arg.value_.double_value);\n  case internal::long_double_type:\n    return vis(arg.value_.long_double_value);\n  case internal::cstring_type:\n    return vis(arg.value_.string.data);\n  case internal::string_type:\n    return vis(basic_string_view<char_type>(arg.value_.string.data,\n                                            arg.value_.string.size));\n  case internal::pointer_type:\n    return vis(arg.value_.pointer);\n  case internal::custom_type:\n    return vis(typename basic_format_arg<Context>::handle(arg.value_.custom));\n  }\n  return vis(monostate());\n}\n\nnamespace internal {\n// A map from argument names to their values for named arguments.\ntemplate <typename Context> class arg_map {\n private:\n  arg_map(const arg_map&) = delete;\n  void operator=(const arg_map&) = delete;\n\n  using char_type = typename Context::char_type;\n\n  struct entry {\n    basic_string_view<char_type> name;\n    basic_format_arg<Context> arg;\n  };\n\n  entry* map_;\n  unsigned size_;\n\n  void push_back(value<Context> val) {\n    const auto& named = *val.named_arg;\n    map_[size_] = {named.name, named.template deserialize<Context>()};\n    ++size_;\n  }\n\n public:\n  arg_map() : map_(nullptr), size_(0) {}\n  void init(const basic_format_args<Context>& args);\n  ~arg_map() { delete[] map_; }\n\n  basic_format_arg<Context> find(basic_string_view<char_type> name) const {\n    // The list is unsorted, so just return the first matching name.\n    for (entry *it = map_, *end = map_ + size_; it != end; ++it) {\n      if (it->name == name) return it->arg;\n    }\n    return {};\n  }\n};\n\n// A type-erased reference to an std::locale to avoid heavy <locale> include.\nclass locale_ref {\n private:\n  const void* locale_;  // A type-erased pointer to std::locale.\n\n public:\n  locale_ref() : locale_(nullptr) {}\n  template <typename Locale> explicit locale_ref(const Locale& loc);\n\n  template <typename Locale> Locale get() const;\n};\n\ntemplate <typename> constexpr unsigned long long encode_types() { return 0; }\n\ntemplate <typename Context, typename Arg, typename... Args>\nconstexpr unsigned long long encode_types() {\n  return mapped_type_constant<Arg, Context>::value |\n         (encode_types<Context, Args...>() << 4);\n}\n\ntemplate <typename Context, typename T>\nFMT_CONSTEXPR basic_format_arg<Context> make_arg(const T& value) {\n  basic_format_arg<Context> arg;\n  arg.type_ = mapped_type_constant<T, Context>::value;\n  arg.value_ = arg_mapper<Context>().map(value);\n  return arg;\n}\n\ntemplate <bool IS_PACKED, typename Context, typename T,\n          FMT_ENABLE_IF(IS_PACKED)>\ninline value<Context> make_arg(const T& val) {\n  return arg_mapper<Context>().map(val);\n}\n\ntemplate <bool IS_PACKED, typename Context, typename T,\n          FMT_ENABLE_IF(!IS_PACKED)>\ninline basic_format_arg<Context> make_arg(const T& value) {\n  return make_arg<Context>(value);\n}\n}  // namespace internal\n\n// Formatting context.\ntemplate <typename OutputIt, typename Char> class basic_format_context {\n public:\n  /** The character type for the output. */\n  using char_type = Char;\n\n private:\n  OutputIt out_;\n  basic_format_args<basic_format_context> args_;\n  internal::arg_map<basic_format_context> map_;\n  internal::locale_ref loc_;\n\n  basic_format_context(const basic_format_context&) = delete;\n  void operator=(const basic_format_context&) = delete;\n\n public:\n  using iterator = OutputIt;\n  using format_arg = basic_format_arg<basic_format_context>;\n  template <typename T> using formatter_type = formatter<T, char_type>;\n\n  /**\n   Constructs a ``basic_format_context`` object. References to the arguments are\n   stored in the object so make sure they have appropriate lifetimes.\n   */\n  basic_format_context(OutputIt out,\n                       basic_format_args<basic_format_context> ctx_args,\n                       internal::locale_ref loc = internal::locale_ref())\n      : out_(out), args_(ctx_args), loc_(loc) {}\n\n  format_arg arg(int id) const { return args_.get(id); }\n\n  // Checks if manual indexing is used and returns the argument with the\n  // specified name.\n  format_arg arg(basic_string_view<char_type> name);\n\n  internal::error_handler error_handler() { return {}; }\n  void on_error(const char* message) { error_handler().on_error(message); }\n\n  // Returns an iterator to the beginning of the output range.\n  iterator out() { return out_; }\n\n  // Advances the begin iterator to ``it``.\n  void advance_to(iterator it) { out_ = it; }\n\n  internal::locale_ref locale() { return loc_; }\n};\n\ntemplate <typename Char>\nusing buffer_context =\n    basic_format_context<std::back_insert_iterator<internal::buffer<Char>>,\n                         Char>;\nusing format_context = buffer_context<char>;\nusing wformat_context = buffer_context<wchar_t>;\n\n/**\n  \\rst\n  An array of references to arguments. It can be implicitly converted into\n  `~fmt::basic_format_args` for passing into type-erased formatting functions\n  such as `~fmt::vformat`.\n  \\endrst\n */\ntemplate <typename Context, typename... Args> class format_arg_store {\n private:\n  static const size_t num_args = sizeof...(Args);\n  static const bool is_packed = num_args < internal::max_packed_args;\n\n  using value_type = conditional_t<is_packed, internal::value<Context>,\n                                   basic_format_arg<Context>>;\n\n  // If the arguments are not packed, add one more element to mark the end.\n  value_type data_[num_args + (!is_packed || num_args == 0 ? 1 : 0)];\n\n  friend class basic_format_args<Context>;\n\n public:\n  static constexpr unsigned long long types =\n      is_packed ? internal::encode_types<Context, Args...>()\n                : internal::is_unpacked_bit | num_args;\n  FMT_DEPRECATED static constexpr unsigned long long TYPES = types;\n\n  format_arg_store(const Args&... args)\n      : data_{internal::make_arg<is_packed, Context>(args)...} {}\n};\n\n/**\n  \\rst\n  Constructs an `~fmt::format_arg_store` object that contains references to\n  arguments and can be implicitly converted to `~fmt::format_args`. `Context`\n  can be omitted in which case it defaults to `~fmt::context`.\n  See `~fmt::arg` for lifetime considerations.\n  \\endrst\n */\ntemplate <typename Context = format_context, typename... Args>\ninline format_arg_store<Context, Args...> make_format_args(\n    const Args&... args) {\n  return {args...};\n}\n\n/** Formatting arguments. */\ntemplate <typename Context> class basic_format_args {\n public:\n  using size_type = int;\n  using format_arg = basic_format_arg<Context>;\n\n private:\n  // To reduce compiled code size per formatting function call, types of first\n  // max_packed_args arguments are passed in the types_ field.\n  unsigned long long types_;\n  union {\n    // If the number of arguments is less than max_packed_args, the argument\n    // values are stored in values_, otherwise they are stored in args_.\n    // This is done to reduce compiled code size as storing larger objects\n    // may require more code (at least on x86-64) even if the same amount of\n    // data is actually copied to stack. It saves ~10% on the bloat test.\n    const internal::value<Context>* values_;\n    const format_arg* args_;\n  };\n\n  bool is_packed() const { return (types_ & internal::is_unpacked_bit) == 0; }\n\n  internal::type type(int index) const {\n    int shift = index * 4;\n    return static_cast<internal::type>((types_ & (0xfull << shift)) >> shift);\n  }\n\n  friend class internal::arg_map<Context>;\n\n  void set_data(const internal::value<Context>* values) { values_ = values; }\n  void set_data(const format_arg* args) { args_ = args; }\n\n  format_arg do_get(int index) const {\n    format_arg arg;\n    if (!is_packed()) {\n      auto num_args = max_size();\n      if (index < num_args) arg = args_[index];\n      return arg;\n    }\n    if (index > internal::max_packed_args) return arg;\n    arg.type_ = type(index);\n    if (arg.type_ == internal::none_type) return arg;\n    internal::value<Context>& val = arg.value_;\n    val = values_[index];\n    return arg;\n  }\n\n public:\n  basic_format_args() : types_(0) {}\n\n  /**\n   \\rst\n   Constructs a `basic_format_args` object from `~fmt::format_arg_store`.\n   \\endrst\n   */\n  template <typename... Args>\n  basic_format_args(const format_arg_store<Context, Args...>& store)\n      : types_(static_cast<unsigned long long>(store.types)) {\n    set_data(store.data_);\n  }\n\n  /**\n   \\rst\n   Constructs a `basic_format_args` object from a dynamic set of arguments.\n   \\endrst\n   */\n  basic_format_args(const format_arg* args, int count)\n      : types_(internal::is_unpacked_bit | internal::to_unsigned(count)) {\n    set_data(args);\n  }\n\n  /** Returns the argument at specified index. */\n  format_arg get(int index) const {\n    format_arg arg = do_get(index);\n    if (arg.type_ == internal::named_arg_type)\n      arg = arg.value_.named_arg->template deserialize<Context>();\n    return arg;\n  }\n\n  int max_size() const {\n    unsigned long long max_packed = internal::max_packed_args;\n    return static_cast<int>(is_packed() ? max_packed\n                                        : types_ & ~internal::is_unpacked_bit);\n  }\n};\n\n/** An alias to ``basic_format_args<context>``. */\n// It is a separate type rather than an alias to make symbols readable.\nstruct format_args : basic_format_args<format_context> {\n  template <typename... Args>\n  format_args(Args&&... args)\n      : basic_format_args<format_context>(std::forward<Args>(args)...) {}\n};\nstruct wformat_args : basic_format_args<wformat_context> {\n  template <typename... Args>\n  wformat_args(Args&&... args)\n      : basic_format_args<wformat_context>(std::forward<Args>(args)...) {}\n};\n\ntemplate <typename Container> struct is_contiguous : std::false_type {};\n\ntemplate <typename Char>\nstruct is_contiguous<std::basic_string<Char>> : std::true_type {};\n\ntemplate <typename Char>\nstruct is_contiguous<internal::buffer<Char>> : std::true_type {};\n\nnamespace internal {\n\ntemplate <typename OutputIt>\nstruct is_contiguous_back_insert_iterator : std::false_type {};\ntemplate <typename Container>\nstruct is_contiguous_back_insert_iterator<std::back_insert_iterator<Container>>\n    : is_contiguous<Container> {};\n\ntemplate <typename Char> struct named_arg_base {\n  basic_string_view<Char> name;\n\n  // Serialized value<context>.\n  mutable char data[sizeof(basic_format_arg<buffer_context<Char>>)];\n\n  named_arg_base(basic_string_view<Char> nm) : name(nm) {}\n\n  template <typename Context> basic_format_arg<Context> deserialize() const {\n    basic_format_arg<Context> arg;\n    std::memcpy(&arg, data, sizeof(basic_format_arg<Context>));\n    return arg;\n  }\n};\n\ntemplate <typename T, typename Char> struct named_arg : named_arg_base<Char> {\n  const T& value;\n\n  named_arg(basic_string_view<Char> name, const T& val)\n      : named_arg_base<Char>(name), value(val) {}\n};\n\ntemplate <typename..., typename S, FMT_ENABLE_IF(!is_compile_string<S>::value)>\ninline void check_format_string(const S&) {\n#if defined(FMT_ENFORCE_COMPILE_STRING)\n  static_assert(is_compile_string<S>::value,\n                \"FMT_ENFORCE_COMPILE_STRING requires all format strings to \"\n                \"utilize FMT_STRING() or fmt().\");\n#endif\n}\ntemplate <typename..., typename S, FMT_ENABLE_IF(is_compile_string<S>::value)>\nvoid check_format_string(S);\n\nstruct view {};\ntemplate <bool...> struct bool_pack;\ntemplate <bool... Args>\nusing all_true =\n    std::is_same<bool_pack<Args..., true>, bool_pack<true, Args...>>;\n\ntemplate <typename... Args, typename S, typename Char = char_t<S>>\ninline format_arg_store<buffer_context<Char>, remove_reference_t<Args>...>\nmake_args_checked(const S& format_str,\n                  const remove_reference_t<Args>&... args) {\n  static_assert(all_true<(!std::is_base_of<view, remove_reference_t<Args>>() ||\n                          !std::is_reference<Args>())...>::value,\n                \"passing views as lvalues is disallowed\");\n  check_format_string<remove_const_t<remove_reference_t<Args>>...>(format_str);\n  return {args...};\n}\n\ntemplate <typename Char>\nstd::basic_string<Char> vformat(basic_string_view<Char> format_str,\n                                basic_format_args<buffer_context<Char>> args);\n\ntemplate <typename Char>\ntypename buffer_context<Char>::iterator vformat_to(\n    buffer<Char>& buf, basic_string_view<Char> format_str,\n    basic_format_args<buffer_context<Char>> args);\n}  // namespace internal\n\n/**\n  \\rst\n  Returns a named argument to be used in a formatting function.\n\n  The named argument holds a reference and does not extend the lifetime\n  of its arguments.\n  Consequently, a dangling reference can accidentally be created.\n  The user should take care to only pass this function temporaries when\n  the named argument is itself a temporary, as per the following example.\n\n  **Example**::\n\n    fmt::print(\"Elapsed time: {s:.2f} seconds\", fmt::arg(\"s\", 1.23));\n  \\endrst\n */\ntemplate <typename S, typename T, typename Char = char_t<S>>\ninline internal::named_arg<T, Char> arg(const S& name, const T& arg) {\n  static_assert(internal::is_string<S>::value, \"\");\n  return {name, arg};\n}\n\n// Disable nested named arguments, e.g. ``arg(\"a\", arg(\"b\", 42))``.\ntemplate <typename S, typename T, typename Char>\nvoid arg(S, internal::named_arg<T, Char>) = delete;\n\n/** Formats a string and writes the output to ``out``. */\n// GCC 8 and earlier cannot handle std::back_insert_iterator<Container> with\n// vformat_to<ArgFormatter>(...) overload, so SFINAE on iterator type instead.\ntemplate <typename OutputIt, typename S, typename Char = char_t<S>,\n          FMT_ENABLE_IF(\n              internal::is_contiguous_back_insert_iterator<OutputIt>::value)>\nOutputIt vformat_to(OutputIt out, const S& format_str,\n                    basic_format_args<buffer_context<Char>> args) {\n  using container = remove_reference_t<decltype(internal::get_container(out))>;\n  internal::container_buffer<container> buf((internal::get_container(out)));\n  internal::vformat_to(buf, to_string_view(format_str), args);\n  return out;\n}\n\ntemplate <typename Container, typename S, typename... Args,\n          FMT_ENABLE_IF(\n              is_contiguous<Container>::value&& internal::is_string<S>::value)>\ninline std::back_insert_iterator<Container> format_to(\n    std::back_insert_iterator<Container> out, const S& format_str,\n    Args&&... args) {\n  return vformat_to(\n      out, to_string_view(format_str),\n      {internal::make_args_checked<Args...>(format_str, args...)});\n}\n\ntemplate <typename S, typename Char = char_t<S>>\ninline std::basic_string<Char> vformat(\n    const S& format_str, basic_format_args<buffer_context<Char>> args) {\n  return internal::vformat(to_string_view(format_str), args);\n}\n\n/**\n  \\rst\n  Formats arguments and returns the result as a string.\n\n  **Example**::\n\n    #include <fmt/core.h>\n    std::string message = fmt::format(\"The answer is {}\", 42);\n  \\endrst\n*/\n// Pass char_t as a default template parameter instead of using\n// std::basic_string<char_t<S>> to reduce the symbol size.\ntemplate <typename S, typename... Args, typename Char = char_t<S>>\ninline std::basic_string<Char> format(const S& format_str, Args&&... args) {\n  return internal::vformat(\n      to_string_view(format_str),\n      {internal::make_args_checked<Args...>(format_str, args...)});\n}\n\nFMT_API void vprint(std::FILE* f, string_view format_str, format_args args);\nFMT_API void vprint(std::FILE* f, wstring_view format_str, wformat_args args);\n\n/**\n  \\rst\n  Prints formatted data to the file *f*. For wide format strings,\n  *f* should be in wide-oriented mode set via ``fwide(f, 1)`` or\n  ``_setmode(_fileno(f), _O_U8TEXT)`` on Windows.\n\n  **Example**::\n\n    fmt::print(stderr, \"Don't {}!\", \"panic\");\n  \\endrst\n */\ntemplate <typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\ninline void print(std::FILE* f, const S& format_str, Args&&... args) {\n  vprint(f, to_string_view(format_str),\n         internal::make_args_checked<Args...>(format_str, args...));\n}\n\nFMT_API void vprint(string_view format_str, format_args args);\nFMT_API void vprint(wstring_view format_str, wformat_args args);\n\n/**\n  \\rst\n  Prints formatted data to ``stdout``.\n\n  **Example**::\n\n    fmt::print(\"Elapsed time: {0:.2f} seconds\", 1.23);\n  \\endrst\n */\ntemplate <typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\ninline void print(const S& format_str, Args&&... args) {\n  vprint(to_string_view(format_str),\n         internal::make_args_checked<Args...>(format_str, args...));\n}\nFMT_END_NAMESPACE\n\n#endif  // FMT_CORE_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/format-inl.h",
    "content": "// Formatting library for C++\n//\n// Copyright (c) 2012 - 2016, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_FORMAT_INL_H_\n#define FMT_FORMAT_INL_H_\n\n#include \"format.h\"\n\n#include <string.h>\n\n#include <cctype>\n#include <cerrno>\n#include <climits>\n#include <cmath>\n#include <cstdarg>\n#include <cstddef>  // for std::ptrdiff_t\n#include <cstring>  // for std::memmove\n#if !defined(FMT_STATIC_THOUSANDS_SEPARATOR)\n#  include <locale>\n#endif\n\n#if FMT_USE_WINDOWS_H\n#  if !defined(FMT_HEADER_ONLY) && !defined(WIN32_LEAN_AND_MEAN)\n#    define WIN32_LEAN_AND_MEAN\n#  endif\n#  if defined(NOMINMAX) || defined(FMT_WIN_MINMAX)\n#    include <windows.h>\n#  else\n#    define NOMINMAX\n#    include <windows.h>\n#    undef NOMINMAX\n#  endif\n#endif\n\n#if FMT_EXCEPTIONS\n#  define FMT_TRY try\n#  define FMT_CATCH(x) catch (x)\n#else\n#  define FMT_TRY if (true)\n#  define FMT_CATCH(x) if (false)\n#endif\n\n#ifdef _MSC_VER\n#  pragma warning(push)\n#  pragma warning(disable : 4127)  // conditional expression is constant\n#  pragma warning(disable : 4702)  // unreachable code\n// Disable deprecation warning for strerror. The latter is not called but\n// MSVC fails to detect it.\n#  pragma warning(disable : 4996)\n#endif\n\n// Dummy implementations of strerror_r and strerror_s called if corresponding\n// system functions are not available.\ninline fmt::internal::null<> strerror_r(int, char*, ...) {\n  return fmt::internal::null<>();\n}\ninline fmt::internal::null<> strerror_s(char*, std::size_t, ...) {\n  return fmt::internal::null<>();\n}\n\nFMT_BEGIN_NAMESPACE\nnamespace internal {\n\n#ifndef _MSC_VER\n#  define FMT_SNPRINTF snprintf\n#else  // _MSC_VER\ninline int fmt_snprintf(char* buffer, size_t size, const char* format, ...) {\n  va_list args;\n  va_start(args, format);\n  int result = vsnprintf_s(buffer, size, _TRUNCATE, format, args);\n  va_end(args);\n  return result;\n}\n#  define FMT_SNPRINTF fmt_snprintf\n#endif  // _MSC_VER\n\nusing format_func = void (*)(internal::buffer<char>&, int, string_view);\n\n// Portable thread-safe version of strerror.\n// Sets buffer to point to a string describing the error code.\n// This can be either a pointer to a string stored in buffer,\n// or a pointer to some static immutable string.\n// Returns one of the following values:\n//   0      - success\n//   ERANGE - buffer is not large enough to store the error message\n//   other  - failure\n// Buffer should be at least of size 1.\nFMT_FUNC int safe_strerror(int error_code, char*& buffer,\n                           std::size_t buffer_size) FMT_NOEXCEPT {\n  FMT_ASSERT(buffer != nullptr && buffer_size != 0, \"invalid buffer\");\n\n  class dispatcher {\n   private:\n    int error_code_;\n    char*& buffer_;\n    std::size_t buffer_size_;\n\n    // A noop assignment operator to avoid bogus warnings.\n    void operator=(const dispatcher&) {}\n\n    // Handle the result of XSI-compliant version of strerror_r.\n    int handle(int result) {\n      // glibc versions before 2.13 return result in errno.\n      return result == -1 ? errno : result;\n    }\n\n    // Handle the result of GNU-specific version of strerror_r.\n    int handle(char* message) {\n      // If the buffer is full then the message is probably truncated.\n      if (message == buffer_ && strlen(buffer_) == buffer_size_ - 1)\n        return ERANGE;\n      buffer_ = message;\n      return 0;\n    }\n\n    // Handle the case when strerror_r is not available.\n    int handle(internal::null<>) {\n      return fallback(strerror_s(buffer_, buffer_size_, error_code_));\n    }\n\n    // Fallback to strerror_s when strerror_r is not available.\n    int fallback(int result) {\n      // If the buffer is full then the message is probably truncated.\n      return result == 0 && strlen(buffer_) == buffer_size_ - 1 ? ERANGE\n                                                                : result;\n    }\n\n#if !FMT_MSC_VER\n    // Fallback to strerror if strerror_r and strerror_s are not available.\n    int fallback(internal::null<>) {\n      errno = 0;\n      buffer_ = strerror(error_code_);\n      return errno;\n    }\n#endif\n\n   public:\n    dispatcher(int err_code, char*& buf, std::size_t buf_size)\n        : error_code_(err_code), buffer_(buf), buffer_size_(buf_size) {}\n\n    int run() { return handle(strerror_r(error_code_, buffer_, buffer_size_)); }\n  };\n  return dispatcher(error_code, buffer, buffer_size).run();\n}\n\nFMT_FUNC void format_error_code(internal::buffer<char>& out, int error_code,\n                                string_view message) FMT_NOEXCEPT {\n  // Report error code making sure that the output fits into\n  // inline_buffer_size to avoid dynamic memory allocation and potential\n  // bad_alloc.\n  out.resize(0);\n  static const char SEP[] = \": \";\n  static const char ERROR_STR[] = \"error \";\n  // Subtract 2 to account for terminating null characters in SEP and ERROR_STR.\n  std::size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2;\n  auto abs_value = static_cast<uint32_or_64_t<int>>(error_code);\n  if (internal::is_negative(error_code)) {\n    abs_value = 0 - abs_value;\n    ++error_code_size;\n  }\n  error_code_size += internal::to_unsigned(internal::count_digits(abs_value));\n  internal::writer w(out);\n  if (message.size() <= inline_buffer_size - error_code_size) {\n    w.write(message);\n    w.write(SEP);\n  }\n  w.write(ERROR_STR);\n  w.write(error_code);\n  assert(out.size() <= inline_buffer_size);\n}\n\n// try an fwrite, FMT_THROW on failure\nFMT_FUNC void fwrite_fully(const void* ptr, size_t size, size_t count,\n                           FILE* stream) {\n  size_t written = std::fwrite(ptr, size, count, stream);\n  if (written < count) {\n    FMT_THROW(system_error(errno, \"cannot write to file\"));\n  }\n}\n\nFMT_FUNC void report_error(format_func func, int error_code,\n                           string_view message) FMT_NOEXCEPT {\n  memory_buffer full_message;\n  func(full_message, error_code, message);\n  // Use Writer::data instead of Writer::c_str to avoid potential memory\n  // allocation.\n  fwrite_fully(full_message.data(), 1, full_message.size(), stderr);\n  std::fputc('\\n', stderr);\n}\n}  // namespace internal\n\n#if !defined(FMT_STATIC_THOUSANDS_SEPARATOR)\nnamespace internal {\n\ntemplate <typename Locale>\nlocale_ref::locale_ref(const Locale& loc) : locale_(&loc) {\n  static_assert(std::is_same<Locale, std::locale>::value, \"\");\n}\n\ntemplate <typename Locale> Locale locale_ref::get() const {\n  static_assert(std::is_same<Locale, std::locale>::value, \"\");\n  return locale_ ? *static_cast<const std::locale*>(locale_) : std::locale();\n}\n\ntemplate <typename Char> FMT_FUNC Char thousands_sep_impl(locale_ref loc) {\n  return std::use_facet<std::numpunct<Char>>(loc.get<std::locale>())\n      .thousands_sep();\n}\ntemplate <typename Char> FMT_FUNC Char decimal_point_impl(locale_ref loc) {\n  return std::use_facet<std::numpunct<Char>>(loc.get<std::locale>())\n      .decimal_point();\n}\n}  // namespace internal\n#else\ntemplate <typename Char>\nFMT_FUNC Char internal::thousands_sep_impl(locale_ref) {\n  return FMT_STATIC_THOUSANDS_SEPARATOR;\n}\ntemplate <typename Char>\nFMT_FUNC Char internal::decimal_point_impl(locale_ref) {\n  return '.';\n}\n#endif\n\nFMT_API FMT_FUNC format_error::~format_error() FMT_NOEXCEPT {}\nFMT_API FMT_FUNC system_error::~system_error() FMT_NOEXCEPT {}\n\nFMT_FUNC void system_error::init(int err_code, string_view format_str,\n                                 format_args args) {\n  error_code_ = err_code;\n  memory_buffer buffer;\n  format_system_error(buffer, err_code, vformat(format_str, args));\n  std::runtime_error& base = *this;\n  base = std::runtime_error(to_string(buffer));\n}\n\nnamespace internal {\n\ntemplate <> FMT_FUNC int count_digits<4>(internal::fallback_uintptr n) {\n  // Assume little endian; pointer formatting is implementation-defined anyway.\n  int i = static_cast<int>(sizeof(void*)) - 1;\n  while (i > 0 && n.value[i] == 0) --i;\n  auto char_digits = std::numeric_limits<unsigned char>::digits / 4;\n  return i >= 0 ? i * char_digits + count_digits<4, unsigned>(n.value[i]) : 1;\n}\n\ntemplate <typename T>\nint format_float(char* buf, std::size_t size, const char* format, int precision,\n                 T value) {\n#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION\n  if (precision > 100000)\n    throw std::runtime_error(\n        \"fuzz mode - avoid large allocation inside snprintf\");\n#endif\n  // Suppress the warning about nonliteral format string.\n  auto snprintf_ptr = FMT_SNPRINTF;\n  return precision < 0 ? snprintf_ptr(buf, size, format, value)\n                       : snprintf_ptr(buf, size, format, precision, value);\n}\n\ntemplate <typename T>\nconst char basic_data<T>::digits[] =\n    \"0001020304050607080910111213141516171819\"\n    \"2021222324252627282930313233343536373839\"\n    \"4041424344454647484950515253545556575859\"\n    \"6061626364656667686970717273747576777879\"\n    \"8081828384858687888990919293949596979899\";\n\ntemplate <typename T>\nconst char basic_data<T>::hex_digits[] = \"0123456789abcdef\";\n\n#define FMT_POWERS_OF_10(factor)                                             \\\n  factor * 10, factor * 100, factor * 1000, factor * 10000, factor * 100000, \\\n      factor * 1000000, factor * 10000000, factor * 100000000,               \\\n      factor * 1000000000\n\ntemplate <typename T>\nconst uint64_t basic_data<T>::powers_of_10_64[] = {\n    1, FMT_POWERS_OF_10(1), FMT_POWERS_OF_10(1000000000ull),\n    10000000000000000000ull};\n\ntemplate <typename T>\nconst uint32_t basic_data<T>::zero_or_powers_of_10_32[] = {0,\n                                                           FMT_POWERS_OF_10(1)};\n\ntemplate <typename T>\nconst uint64_t basic_data<T>::zero_or_powers_of_10_64[] = {\n    0, FMT_POWERS_OF_10(1), FMT_POWERS_OF_10(1000000000ull),\n    10000000000000000000ull};\n\n// Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., 340.\n// These are generated by support/compute-powers.py.\ntemplate <typename T>\nconst uint64_t basic_data<T>::pow10_significands[] = {\n    0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76,\n    0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df,\n    0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c,\n    0x8dd01fad907ffc3c, 0xd3515c2831559a83, 0x9d71ac8fada6c9b5,\n    0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57,\n    0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7,\n    0xa086cfcd97bf97f4, 0xef340a98172aace5, 0xb23867fb2a35b28e,\n    0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996,\n    0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126,\n    0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053,\n    0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f,\n    0xf8a95fcf88747d94, 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b,\n    0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06,\n    0xaa242499697392d3, 0xfd87b5f28300ca0e, 0xbce5086492111aeb,\n    0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000,\n    0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984,\n    0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, 0xd5d238a4abe98068,\n    0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8,\n    0x83c7088e1aab65db, 0xc45d1df942711d9a, 0x924d692ca61be758,\n    0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85,\n    0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d,\n    0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, 0xa59bc234db398c25,\n    0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2,\n    0xcc20ce9bd35c78a5, 0x98165af37b2153df, 0xe2a0b5dc971f303a,\n    0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410,\n    0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129,\n    0xe7109bfba19c0c9d, 0xac2820d9623bf429, 0x80444b5e7aa7cf85,\n    0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841,\n    0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b,\n};\n\n// Binary exponents of pow(10, k), for k = -348, -340, ..., 340, corresponding\n// to significands above.\ntemplate <typename T>\nconst int16_t basic_data<T>::pow10_exponents[] = {\n    -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954,\n    -927,  -901,  -874,  -847,  -821,  -794,  -768,  -741,  -715,  -688, -661,\n    -635,  -608,  -582,  -555,  -529,  -502,  -475,  -449,  -422,  -396, -369,\n    -343,  -316,  -289,  -263,  -236,  -210,  -183,  -157,  -130,  -103, -77,\n    -50,   -24,   3,     30,    56,    83,    109,   136,   162,   189,  216,\n    242,   269,   295,   322,   348,   375,   402,   428,   455,   481,  508,\n    534,   561,   588,   614,   641,   667,   694,   720,   747,   774,  800,\n    827,   853,   880,   907,   933,   960,   986,   1013,  1039,  1066};\n\ntemplate <typename T>\nconst char basic_data<T>::foreground_color[] = \"\\x1b[38;2;\";\ntemplate <typename T>\nconst char basic_data<T>::background_color[] = \"\\x1b[48;2;\";\ntemplate <typename T> const char basic_data<T>::reset_color[] = \"\\x1b[0m\";\ntemplate <typename T> const wchar_t basic_data<T>::wreset_color[] = L\"\\x1b[0m\";\n\ntemplate <typename T> struct bits {\n  static FMT_CONSTEXPR_DECL const int value =\n      static_cast<int>(sizeof(T) * std::numeric_limits<unsigned char>::digits);\n};\n\n// A handmade floating-point number f * pow(2, e).\nclass fp {\n private:\n  using significand_type = uint64_t;\n\n  // All sizes are in bits.\n  // Subtract 1 to account for an implicit most significant bit in the\n  // normalized form.\n  static FMT_CONSTEXPR_DECL const int double_significand_size =\n      std::numeric_limits<double>::digits - 1;\n  static FMT_CONSTEXPR_DECL const uint64_t implicit_bit =\n      1ull << double_significand_size;\n\n public:\n  significand_type f;\n  int e;\n\n  static FMT_CONSTEXPR_DECL const int significand_size =\n      bits<significand_type>::value;\n\n  fp() : f(0), e(0) {}\n  fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {}\n\n  // Constructs fp from an IEEE754 double. It is a template to prevent compile\n  // errors on platforms where double is not IEEE754.\n  template <typename Double> explicit fp(Double d) {\n    // Assume double is in the format [sign][exponent][significand].\n    using limits = std::numeric_limits<Double>;\n    const int exponent_size =\n        bits<Double>::value - double_significand_size - 1;  // -1 for sign\n    const uint64_t significand_mask = implicit_bit - 1;\n    const uint64_t exponent_mask = (~0ull >> 1) & ~significand_mask;\n    const int exponent_bias = (1 << exponent_size) - limits::max_exponent - 1;\n    auto u = bit_cast<uint64_t>(d);\n    auto biased_e = (u & exponent_mask) >> double_significand_size;\n    f = u & significand_mask;\n    if (biased_e != 0)\n      f += implicit_bit;\n    else\n      biased_e = 1;  // Subnormals use biased exponent 1 (min exponent).\n    e = static_cast<int>(biased_e - exponent_bias - double_significand_size);\n  }\n\n  // Normalizes the value converted from double and multiplied by (1 << SHIFT).\n  template <int SHIFT = 0> void normalize() {\n    // Handle subnormals.\n    auto shifted_implicit_bit = implicit_bit << SHIFT;\n    while ((f & shifted_implicit_bit) == 0) {\n      f <<= 1;\n      --e;\n    }\n    // Subtract 1 to account for hidden bit.\n    auto offset = significand_size - double_significand_size - SHIFT - 1;\n    f <<= offset;\n    e -= offset;\n  }\n\n  // Compute lower and upper boundaries (m^- and m^+ in the Grisu paper), where\n  // a boundary is a value half way between the number and its predecessor\n  // (lower) or successor (upper). The upper boundary is normalized and lower\n  // has the same exponent but may be not normalized.\n  void compute_boundaries(fp& lower, fp& upper) const {\n    lower =\n        f == implicit_bit ? fp((f << 2) - 1, e - 2) : fp((f << 1) - 1, e - 1);\n    upper = fp((f << 1) + 1, e - 1);\n    upper.normalize<1>();  // 1 is to account for the exponent shift above.\n    lower.f <<= lower.e - upper.e;\n    lower.e = upper.e;\n  }\n};\n\n// Returns an fp number representing x - y. Result may not be normalized.\ninline fp operator-(fp x, fp y) {\n  FMT_ASSERT(x.f >= y.f && x.e == y.e, \"invalid operands\");\n  return fp(x.f - y.f, x.e);\n}\n\n// Computes an fp number r with r.f = x.f * y.f / pow(2, 64) rounded to nearest\n// with half-up tie breaking, r.e = x.e + y.e + 64. Result may not be\n// normalized.\nFMT_FUNC fp operator*(fp x, fp y) {\n  int exp = x.e + y.e + 64;\n#if FMT_USE_INT128\n  auto product = static_cast<__uint128_t>(x.f) * y.f;\n  auto f = static_cast<uint64_t>(product >> 64);\n  if ((static_cast<uint64_t>(product) & (1ULL << 63)) != 0) ++f;\n  return fp(f, exp);\n#else\n  // Multiply 32-bit parts of significands.\n  uint64_t mask = (1ULL << 32) - 1;\n  uint64_t a = x.f >> 32, b = x.f & mask;\n  uint64_t c = y.f >> 32, d = y.f & mask;\n  uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d;\n  // Compute mid 64-bit of result and round.\n  uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31);\n  return fp(ac + (ad >> 32) + (bc >> 32) + (mid >> 32), exp);\n#endif\n}\n\n// Returns cached power (of 10) c_k = c_k.f * pow(2, c_k.e) such that its\n// (binary) exponent satisfies min_exponent <= c_k.e <= min_exponent + 28.\nFMT_FUNC fp get_cached_power(int min_exponent, int& pow10_exponent) {\n  const double one_over_log2_10 = 0.30102999566398114;  // 1 / log2(10)\n  int index = static_cast<int>(\n      std::ceil((min_exponent + fp::significand_size - 1) * one_over_log2_10));\n  // Decimal exponent of the first (smallest) cached power of 10.\n  const int first_dec_exp = -348;\n  // Difference between 2 consecutive decimal exponents in cached powers of 10.\n  const int dec_exp_step = 8;\n  index = (index - first_dec_exp - 1) / dec_exp_step + 1;\n  pow10_exponent = first_dec_exp + index * dec_exp_step;\n  return fp(data::pow10_significands[index], data::pow10_exponents[index]);\n}\n\nenum round_direction { unknown, up, down };\n\n// Given the divisor (normally a power of 10), the remainder = v % divisor for\n// some number v and the error, returns whether v should be rounded up, down, or\n// whether the rounding direction can't be determined due to error.\n// error should be less than divisor / 2.\ninline round_direction get_round_direction(uint64_t divisor, uint64_t remainder,\n                                           uint64_t error) {\n  FMT_ASSERT(remainder < divisor, \"\");  // divisor - remainder won't overflow.\n  FMT_ASSERT(error < divisor, \"\");      // divisor - error won't overflow.\n  FMT_ASSERT(error < divisor - error, \"\");  // error * 2 won't overflow.\n  // Round down if (remainder + error) * 2 <= divisor.\n  if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2)\n    return down;\n  // Round up if (remainder - error) * 2 >= divisor.\n  if (remainder >= error &&\n      remainder - error >= divisor - (remainder - error)) {\n    return up;\n  }\n  return unknown;\n}\n\nnamespace digits {\nenum result {\n  more,  // Generate more digits.\n  done,  // Done generating digits.\n  error  // Digit generation cancelled due to an error.\n};\n}\n\n// Generates output using the Grisu digit-gen algorithm.\n// error: the size of the region (lower, upper) outside of which numbers\n// definitely do not round to value (Delta in Grisu3).\ntemplate <typename Handler>\ndigits::result grisu_gen_digits(fp value, uint64_t error, int& exp,\n                                Handler& handler) {\n  fp one(1ull << -value.e, value.e);\n  // The integral part of scaled value (p1 in Grisu) = value / one. It cannot be\n  // zero because it contains a product of two 64-bit numbers with MSB set (due\n  // to normalization) - 1, shifted right by at most 60 bits.\n  uint32_t integral = static_cast<uint32_t>(value.f >> -one.e);\n  FMT_ASSERT(integral != 0, \"\");\n  FMT_ASSERT(integral == value.f >> -one.e, \"\");\n  // The fractional part of scaled value (p2 in Grisu) c = value % one.\n  uint64_t fractional = value.f & (one.f - 1);\n  exp = count_digits(integral);  // kappa in Grisu.\n  // Divide by 10 to prevent overflow.\n  auto result = handler.on_start(data::powers_of_10_64[exp - 1] << -one.e,\n                                 value.f / 10, error * 10, exp);\n  if (result != digits::more) return result;\n  // Generate digits for the integral part. This can produce up to 10 digits.\n  do {\n    uint32_t digit = 0;\n    // This optimization by miloyip reduces the number of integer divisions by\n    // one per iteration.\n    switch (exp) {\n    case 10:\n      digit = integral / 1000000000;\n      integral %= 1000000000;\n      break;\n    case 9:\n      digit = integral / 100000000;\n      integral %= 100000000;\n      break;\n    case 8:\n      digit = integral / 10000000;\n      integral %= 10000000;\n      break;\n    case 7:\n      digit = integral / 1000000;\n      integral %= 1000000;\n      break;\n    case 6:\n      digit = integral / 100000;\n      integral %= 100000;\n      break;\n    case 5:\n      digit = integral / 10000;\n      integral %= 10000;\n      break;\n    case 4:\n      digit = integral / 1000;\n      integral %= 1000;\n      break;\n    case 3:\n      digit = integral / 100;\n      integral %= 100;\n      break;\n    case 2:\n      digit = integral / 10;\n      integral %= 10;\n      break;\n    case 1:\n      digit = integral;\n      integral = 0;\n      break;\n    default:\n      FMT_ASSERT(false, \"invalid number of digits\");\n    }\n    --exp;\n    uint64_t remainder =\n        (static_cast<uint64_t>(integral) << -one.e) + fractional;\n    result = handler.on_digit(static_cast<char>('0' + digit),\n                              data::powers_of_10_64[exp] << -one.e, remainder,\n                              error, exp, true);\n    if (result != digits::more) return result;\n  } while (exp > 0);\n  // Generate digits for the fractional part.\n  for (;;) {\n    fractional *= 10;\n    error *= 10;\n    char digit =\n        static_cast<char>('0' + static_cast<char>(fractional >> -one.e));\n    fractional &= one.f - 1;\n    --exp;\n    result = handler.on_digit(digit, one.f, fractional, error, exp, false);\n    if (result != digits::more) return result;\n  }\n}\n\n// The fixed precision digit handler.\nstruct fixed_handler {\n  char* buf;\n  int size;\n  int precision;\n  int exp10;\n  bool fixed;\n\n  digits::result on_start(uint64_t divisor, uint64_t remainder, uint64_t error,\n                          int& exp) {\n    // Non-fixed formats require at least one digit and no precision adjustment.\n    if (!fixed) return digits::more;\n    // Adjust fixed precision by exponent because it is relative to decimal\n    // point.\n    precision += exp + exp10;\n    // Check if precision is satisfied just by leading zeros, e.g.\n    // format(\"{:.2f}\", 0.001) gives \"0.00\" without generating any digits.\n    if (precision > 0) return digits::more;\n    if (precision < 0) return digits::done;\n    auto dir = get_round_direction(divisor, remainder, error);\n    if (dir == unknown) return digits::error;\n    buf[size++] = dir == up ? '1' : '0';\n    return digits::done;\n  }\n\n  digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder,\n                          uint64_t error, int, bool integral) {\n    FMT_ASSERT(remainder < divisor, \"\");\n    buf[size++] = digit;\n    if (size < precision) return digits::more;\n    if (!integral) {\n      // Check if error * 2 < divisor with overflow prevention.\n      // The check is not needed for the integral part because error = 1\n      // and divisor > (1 << 32) there.\n      if (error >= divisor || error >= divisor - error) return digits::error;\n    } else {\n      FMT_ASSERT(error == 1 && divisor > 2, \"\");\n    }\n    auto dir = get_round_direction(divisor, remainder, error);\n    if (dir != up) return dir == down ? digits::done : digits::error;\n    ++buf[size - 1];\n    for (int i = size - 1; i > 0 && buf[i] > '9'; --i) {\n      buf[i] = '0';\n      ++buf[i - 1];\n    }\n    if (buf[0] > '9') {\n      buf[0] = '1';\n      buf[size++] = '0';\n    }\n    return digits::done;\n  }\n};\n\n// The shortest representation digit handler.\ntemplate <int GRISU_VERSION> struct grisu_shortest_handler {\n  char* buf;\n  int size;\n  // Distance between scaled value and upper bound (wp_W in Grisu3).\n  uint64_t diff;\n\n  digits::result on_start(uint64_t, uint64_t, uint64_t, int&) {\n    return digits::more;\n  }\n\n  // Decrement the generated number approaching value from above.\n  void round(uint64_t d, uint64_t divisor, uint64_t& remainder,\n             uint64_t error) {\n    while (\n        remainder < d && error - remainder >= divisor &&\n        (remainder + divisor < d || d - remainder >= remainder + divisor - d)) {\n      --buf[size - 1];\n      remainder += divisor;\n    }\n  }\n\n  // Implements Grisu's round_weed.\n  digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder,\n                          uint64_t error, int exp, bool integral) {\n    buf[size++] = digit;\n    if (remainder >= error) return digits::more;\n    if (GRISU_VERSION != 3) {\n      uint64_t d = integral ? diff : diff * data::powers_of_10_64[-exp];\n      round(d, divisor, remainder, error);\n      return digits::done;\n    }\n    uint64_t unit = integral ? 1 : data::powers_of_10_64[-exp];\n    uint64_t up = (diff - 1) * unit;  // wp_Wup\n    round(up, divisor, remainder, error);\n    uint64_t down = (diff + 1) * unit;  // wp_Wdown\n    if (remainder < down && error - remainder >= divisor &&\n        (remainder + divisor < down ||\n         down - remainder > remainder + divisor - down)) {\n      return digits::error;\n    }\n    return 2 * unit <= remainder && remainder <= error - 4 * unit\n               ? digits::done\n               : digits::error;\n  }\n};\n\ntemplate <typename Double,\n          enable_if_t<(sizeof(Double) == sizeof(uint64_t)), int>>\nFMT_API bool grisu_format(Double value, buffer<char>& buf, int precision,\n                          unsigned options, int& exp) {\n  FMT_ASSERT(value >= 0, \"value is negative\");\n  bool fixed = (options & grisu_options::fixed) != 0;\n  if (value <= 0) {  // <= instead of == to silence a warning.\n    if (precision <= 0 || !fixed) {\n      exp = 0;\n      buf.push_back('0');\n    } else {\n      exp = -precision;\n      buf.resize(precision);\n      std::uninitialized_fill_n(buf.data(), precision, '0');\n    }\n    return true;\n  }\n\n  fp fp_value(value);\n  const int min_exp = -60;  // alpha in Grisu.\n  int cached_exp10 = 0;     // K in Grisu.\n  if (precision != -1) {\n    if (precision > 17) return false;\n    fp_value.normalize();\n    auto cached_pow = get_cached_power(\n        min_exp - (fp_value.e + fp::significand_size), cached_exp10);\n    fp_value = fp_value * cached_pow;\n    fixed_handler handler{buf.data(), 0, precision, -cached_exp10, fixed};\n    if (grisu_gen_digits(fp_value, 1, exp, handler) == digits::error)\n      return false;\n    buf.resize(to_unsigned(handler.size));\n  } else {\n    fp lower, upper;  // w^- and w^+ in the Grisu paper.\n    fp_value.compute_boundaries(lower, upper);\n    // Find a cached power of 10 such that multiplying upper by it will bring\n    // the exponent in the range [min_exp, -32].\n    auto cached_pow = get_cached_power(  // \\tilde{c}_{-k} in Grisu.\n        min_exp - (upper.e + fp::significand_size), cached_exp10);\n    fp_value.normalize();\n    fp_value = fp_value * cached_pow;\n    lower = lower * cached_pow;  // \\tilde{M}^- in Grisu.\n    upper = upper * cached_pow;  // \\tilde{M}^+ in Grisu.\n    assert(min_exp <= upper.e && upper.e <= -32);\n    auto result = digits::result();\n    int size = 0;\n    if ((options & grisu_options::grisu3) != 0) {\n      --lower.f;  // \\tilde{M}^- - 1 ulp -> M^-_{\\downarrow}.\n      ++upper.f;  // \\tilde{M}^+ + 1 ulp -> M^+_{\\uparrow}.\n      // Numbers outside of (lower, upper) definitely do not round to value.\n      grisu_shortest_handler<3> handler{buf.data(), 0, (upper - fp_value).f};\n      result = grisu_gen_digits(upper, upper.f - lower.f, exp, handler);\n      size = handler.size;\n    } else {\n      ++lower.f;  // \\tilde{M}^- + 1 ulp -> M^-_{\\uparrow}.\n      --upper.f;  // \\tilde{M}^+ - 1 ulp -> M^+_{\\downarrow}.\n      grisu_shortest_handler<2> handler{buf.data(), 0, (upper - fp_value).f};\n      result = grisu_gen_digits(upper, upper.f - lower.f, exp, handler);\n      size = handler.size;\n    }\n    if (result == digits::error) return false;\n    buf.resize(to_unsigned(size));\n  }\n  exp -= cached_exp10;\n  return true;\n}\n\ntemplate <typename Double>\nchar* sprintf_format(Double value, internal::buffer<char>& buf,\n                     sprintf_specs specs) {\n  // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail.\n  FMT_ASSERT(buf.capacity() != 0, \"empty buffer\");\n\n  // Build format string.\n  enum { max_format_size = 10 };  // longest format: %#-*.*Lg\n  char format[max_format_size];\n  char* format_ptr = format;\n  *format_ptr++ = '%';\n  if (specs.alt || !specs.type) *format_ptr++ = '#';\n  if (specs.precision >= 0) {\n    *format_ptr++ = '.';\n    *format_ptr++ = '*';\n  }\n  if (std::is_same<Double, long double>::value) *format_ptr++ = 'L';\n\n  char type = specs.type;\n\n  if (type == '%')\n    type = 'f';\n  else if (type == 0 || type == 'n')\n    type = 'g';\n#if FMT_MSC_VER\n  if (type == 'F') {\n    // MSVC's printf doesn't support 'F'.\n    type = 'f';\n  }\n#endif\n  *format_ptr++ = type;\n  *format_ptr = '\\0';\n\n  // Format using snprintf.\n  char* start = nullptr;\n  char* decimal_point_pos = nullptr;\n  for (;;) {\n    std::size_t buffer_size = buf.capacity();\n    start = &buf[0];\n    int result =\n        format_float(start, buffer_size, format, specs.precision, value);\n    if (result >= 0) {\n      unsigned n = internal::to_unsigned(result);\n      if (n < buf.capacity()) {\n        // Find the decimal point.\n        auto p = buf.data(), end = p + n;\n        if (*p == '+' || *p == '-') ++p;\n        if (specs.type != 'a' && specs.type != 'A') {\n          while (p < end && *p >= '0' && *p <= '9') ++p;\n          if (p < end && *p != 'e' && *p != 'E') {\n            decimal_point_pos = p;\n            if (!specs.type) {\n              // Keep only one trailing zero after the decimal point.\n              ++p;\n              if (*p == '0') ++p;\n              while (p != end && *p >= '1' && *p <= '9') ++p;\n              char* where = p;\n              while (p != end && *p == '0') ++p;\n              if (p == end || *p < '0' || *p > '9') {\n                if (p != end) std::memmove(where, p, to_unsigned(end - p));\n                n -= static_cast<unsigned>(p - where);\n              }\n            }\n          }\n        }\n        buf.resize(n);\n        break;  // The buffer is large enough - continue with formatting.\n      }\n      buf.reserve(n + 1);\n    } else {\n      // If result is negative we ask to increase the capacity by at least 1,\n      // but as std::vector, the buffer grows exponentially.\n      buf.reserve(buf.capacity() + 1);\n    }\n  }\n  return decimal_point_pos;\n}\n}  // namespace internal\n\n#if FMT_USE_WINDOWS_H\n\nFMT_FUNC internal::utf8_to_utf16::utf8_to_utf16(string_view s) {\n  static const char ERROR_MSG[] = \"cannot convert string from UTF-8 to UTF-16\";\n  if (s.size() > INT_MAX)\n    FMT_THROW(windows_error(ERROR_INVALID_PARAMETER, ERROR_MSG));\n  int s_size = static_cast<int>(s.size());\n  if (s_size == 0) {\n    // MultiByteToWideChar does not support zero length, handle separately.\n    buffer_.resize(1);\n    buffer_[0] = 0;\n    return;\n  }\n\n  int length = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, s.data(),\n                                   s_size, nullptr, 0);\n  if (length == 0) FMT_THROW(windows_error(GetLastError(), ERROR_MSG));\n  buffer_.resize(length + 1);\n  length = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, s.data(), s_size,\n                               &buffer_[0], length);\n  if (length == 0) FMT_THROW(windows_error(GetLastError(), ERROR_MSG));\n  buffer_[length] = 0;\n}\n\nFMT_FUNC internal::utf16_to_utf8::utf16_to_utf8(wstring_view s) {\n  if (int error_code = convert(s)) {\n    FMT_THROW(windows_error(error_code,\n                            \"cannot convert string from UTF-16 to UTF-8\"));\n  }\n}\n\nFMT_FUNC int internal::utf16_to_utf8::convert(wstring_view s) {\n  if (s.size() > INT_MAX) return ERROR_INVALID_PARAMETER;\n  int s_size = static_cast<int>(s.size());\n  if (s_size == 0) {\n    // WideCharToMultiByte does not support zero length, handle separately.\n    buffer_.resize(1);\n    buffer_[0] = 0;\n    return 0;\n  }\n\n  int length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, nullptr, 0,\n                                   nullptr, nullptr);\n  if (length == 0) return GetLastError();\n  buffer_.resize(length + 1);\n  length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, &buffer_[0],\n                               length, nullptr, nullptr);\n  if (length == 0) return GetLastError();\n  buffer_[length] = 0;\n  return 0;\n}\n\nFMT_FUNC void windows_error::init(int err_code, string_view format_str,\n                                  format_args args) {\n  error_code_ = err_code;\n  memory_buffer buffer;\n  internal::format_windows_error(buffer, err_code, vformat(format_str, args));\n  std::runtime_error& base = *this;\n  base = std::runtime_error(to_string(buffer));\n}\n\nFMT_FUNC void internal::format_windows_error(internal::buffer<char>& out,\n                                             int error_code,\n                                             string_view message) FMT_NOEXCEPT {\n  FMT_TRY {\n    wmemory_buffer buf;\n    buf.resize(inline_buffer_size);\n    for (;;) {\n      wchar_t* system_message = &buf[0];\n      int result = FormatMessageW(\n          FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr,\n          error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), system_message,\n          static_cast<uint32_t>(buf.size()), nullptr);\n      if (result != 0) {\n        utf16_to_utf8 utf8_message;\n        if (utf8_message.convert(system_message) == ERROR_SUCCESS) {\n          internal::writer w(out);\n          w.write(message);\n          w.write(\": \");\n          w.write(utf8_message);\n          return;\n        }\n        break;\n      }\n      if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)\n        break;  // Can't get error message, report error code instead.\n      buf.resize(buf.size() * 2);\n    }\n  }\n  FMT_CATCH(...) {}\n  format_error_code(out, error_code, message);\n}\n\n#endif  // FMT_USE_WINDOWS_H\n\nFMT_FUNC void format_system_error(internal::buffer<char>& out, int error_code,\n                                  string_view message) FMT_NOEXCEPT {\n  FMT_TRY {\n    memory_buffer buf;\n    buf.resize(inline_buffer_size);\n    for (;;) {\n      char* system_message = &buf[0];\n      int result =\n          internal::safe_strerror(error_code, system_message, buf.size());\n      if (result == 0) {\n        internal::writer w(out);\n        w.write(message);\n        w.write(\": \");\n        w.write(system_message);\n        return;\n      }\n      if (result != ERANGE)\n        break;  // Can't get error message, report error code instead.\n      buf.resize(buf.size() * 2);\n    }\n  }\n  FMT_CATCH(...) {}\n  format_error_code(out, error_code, message);\n}\n\nFMT_FUNC void internal::error_handler::on_error(const char* message) {\n  FMT_THROW(format_error(message));\n}\n\nFMT_FUNC void report_system_error(int error_code,\n                                  fmt::string_view message) FMT_NOEXCEPT {\n  report_error(format_system_error, error_code, message);\n}\n\n#if FMT_USE_WINDOWS_H\nFMT_FUNC void report_windows_error(int error_code,\n                                   fmt::string_view message) FMT_NOEXCEPT {\n  report_error(internal::format_windows_error, error_code, message);\n}\n#endif\n\nFMT_FUNC void vprint(std::FILE* f, string_view format_str, format_args args) {\n  memory_buffer buffer;\n  internal::vformat_to(buffer, format_str,\n                       basic_format_args<buffer_context<char>>(args));\n  internal::fwrite_fully(buffer.data(), 1, buffer.size(), f);\n}\n\nFMT_FUNC void vprint(std::FILE* f, wstring_view format_str, wformat_args args) {\n  wmemory_buffer buffer;\n  internal::vformat_to(buffer, format_str, args);\n  internal::fwrite_fully(buffer.data(), sizeof(wchar_t), buffer.size(), f);\n}\n\nFMT_FUNC void vprint(string_view format_str, format_args args) {\n  vprint(stdout, format_str, args);\n}\n\nFMT_FUNC void vprint(wstring_view format_str, wformat_args args) {\n  vprint(stdout, format_str, args);\n}\n\nFMT_END_NAMESPACE\n\n#ifdef _MSC_VER\n#  pragma warning(pop)\n#endif\n\n#endif  // FMT_FORMAT_INL_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/format.h",
    "content": "/*\n Formatting library for C++\n\n Copyright (c) 2012 - present, Victor Zverovich\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this\n    list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef FMT_FORMAT_H_\n#define FMT_FORMAT_H_\n\n#include <algorithm>\n#include <cassert>\n#include <cmath>\n#include <cstdint>\n#include <cstring>\n#include <iterator>\n#include <limits>\n#include <memory>\n#include <stdexcept>\n\n#include \"core.h\"\n\n#ifdef __clang__\n#  define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__)\n#else\n#  define FMT_CLANG_VERSION 0\n#endif\n\n#ifdef __INTEL_COMPILER\n#  define FMT_ICC_VERSION __INTEL_COMPILER\n#elif defined(__ICL)\n#  define FMT_ICC_VERSION __ICL\n#else\n#  define FMT_ICC_VERSION 0\n#endif\n\n#ifdef __NVCC__\n#  define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__)\n#else\n#  define FMT_CUDA_VERSION 0\n#endif\n\n#ifdef __has_builtin\n#  define FMT_HAS_BUILTIN(x) __has_builtin(x)\n#else\n#  define FMT_HAS_BUILTIN(x) 0\n#endif\n\n#ifndef FMT_THROW\n#  if FMT_EXCEPTIONS\n#    if FMT_MSC_VER\nFMT_BEGIN_NAMESPACE\nnamespace internal {\ntemplate <typename Exception> inline void do_throw(const Exception& x) {\n  // Silence unreachable code warnings in MSVC because these are nearly\n  // impossible to fix in a generic code.\n  volatile bool b = true;\n  if (b) throw x;\n}\n}  // namespace internal\nFMT_END_NAMESPACE\n#      define FMT_THROW(x) fmt::internal::do_throw(x)\n#    else\n#      define FMT_THROW(x) throw x\n#    endif\n#  else\n#    define FMT_THROW(x)              \\\n      do {                            \\\n        static_cast<void>(sizeof(x)); \\\n        assert(false);                \\\n      } while (false)\n#  endif\n#endif\n\n#ifndef FMT_USE_USER_DEFINED_LITERALS\n// For Intel and NVIDIA compilers both they and the system gcc/msc support UDLs.\n#  if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 ||      \\\n       FMT_MSC_VER >= 1900) &&                                              \\\n      (!(FMT_ICC_VERSION || FMT_CUDA_VERSION) || FMT_ICC_VERSION >= 1500 || \\\n       FMT_CUDA_VERSION >= 700)\n#    define FMT_USE_USER_DEFINED_LITERALS 1\n#  else\n#    define FMT_USE_USER_DEFINED_LITERALS 0\n#  endif\n#endif\n\n#ifndef FMT_USE_UDL_TEMPLATE\n// EDG front end based compilers (icc, nvcc) do not support UDL templates yet\n// and GCC 9 warns about them.\n#  if FMT_USE_USER_DEFINED_LITERALS && FMT_ICC_VERSION == 0 && \\\n      FMT_CUDA_VERSION == 0 &&                                 \\\n      ((FMT_GCC_VERSION >= 600 && FMT_GCC_VERSION <= 900 &&    \\\n        __cplusplus >= 201402L) ||                             \\\n       FMT_CLANG_VERSION >= 304)\n#    define FMT_USE_UDL_TEMPLATE 1\n#  else\n#    define FMT_USE_UDL_TEMPLATE 0\n#  endif\n#endif\n\n#ifdef FMT_USE_INT128\n// Do nothing.\n#elif defined(__SIZEOF_INT128__)\n#  define FMT_USE_INT128 1\n#else\n#  define FMT_USE_INT128 0\n#endif\n\n// __builtin_clz is broken in clang with Microsoft CodeGen:\n// https://github.com/fmtlib/fmt/issues/519\n#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clz)) && !FMT_MSC_VER\n#  define FMT_BUILTIN_CLZ(n) __builtin_clz(n)\n#endif\n#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clzll)) && !FMT_MSC_VER\n#  define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n)\n#endif\n\n// Some compilers masquerade as both MSVC and GCC-likes or otherwise support\n// __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the\n// MSVC intrinsics if the clz and clzll builtins are not available.\n#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(_MANAGED)\n#  include <intrin.h>  // _BitScanReverse, _BitScanReverse64\n\nFMT_BEGIN_NAMESPACE\nnamespace internal {\n// Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning.\n#  ifndef __clang__\n#    pragma intrinsic(_BitScanReverse)\n#  endif\ninline uint32_t clz(uint32_t x) {\n  unsigned long r = 0;\n  _BitScanReverse(&r, x);\n\n  assert(x != 0);\n  // Static analysis complains about using uninitialized data\n  // \"r\", but the only way that can happen is if \"x\" is 0,\n  // which the callers guarantee to not happen.\n#  pragma warning(suppress : 6102)\n  return 31 - r;\n}\n#  define FMT_BUILTIN_CLZ(n) fmt::internal::clz(n)\n\n#  if defined(_WIN64) && !defined(__clang__)\n#    pragma intrinsic(_BitScanReverse64)\n#  endif\n\ninline uint32_t clzll(uint64_t x) {\n  unsigned long r = 0;\n#  ifdef _WIN64\n  _BitScanReverse64(&r, x);\n#  else\n  // Scan the high 32 bits.\n  if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32))) return 63 - (r + 32);\n\n  // Scan the low 32 bits.\n  _BitScanReverse(&r, static_cast<uint32_t>(x));\n#  endif\n\n  assert(x != 0);\n  // Static analysis complains about using uninitialized data\n  // \"r\", but the only way that can happen is if \"x\" is 0,\n  // which the callers guarantee to not happen.\n#  pragma warning(suppress : 6102)\n  return 63 - r;\n}\n#  define FMT_BUILTIN_CLZLL(n) fmt::internal::clzll(n)\n}  // namespace internal\nFMT_END_NAMESPACE\n#endif\n\nFMT_BEGIN_NAMESPACE\nnamespace internal {\n\n// A fallback implementation of uintptr_t for systems that lack it.\nstruct fallback_uintptr {\n  unsigned char value[sizeof(void*)];\n};\n#ifdef UINTPTR_MAX\nusing uintptr_t = ::uintptr_t;\n#else\nusing uintptr_t = fallback_uintptr;\n#endif\n\n// An equivalent of `*reinterpret_cast<Dest*>(&source)` that doesn't produce\n// undefined behavior (e.g. due to type aliasing).\n// Example: uint64_t d = bit_cast<uint64_t>(2.718);\ntemplate <typename Dest, typename Source>\ninline Dest bit_cast(const Source& source) {\n  static_assert(sizeof(Dest) == sizeof(Source), \"size mismatch\");\n  Dest dest;\n  std::memcpy(&dest, &source, sizeof(dest));\n  return dest;\n}\n\n// An approximation of iterator_t for pre-C++20 systems.\ntemplate <typename T>\nusing iterator_t = decltype(std::begin(std::declval<T&>()));\n\n// Detect the iterator category of *any* given type in a SFINAE-friendly way.\n// Unfortunately, older implementations of std::iterator_traits are not safe\n// for use in a SFINAE-context.\ntemplate <typename It, typename Enable = void>\nstruct iterator_category : std::false_type {};\n\ntemplate <typename T> struct iterator_category<T*> {\n  using type = std::random_access_iterator_tag;\n};\n\ntemplate <typename It>\nstruct iterator_category<It, void_t<typename It::iterator_category>> {\n  using type = typename It::iterator_category;\n};\n\n// Detect if *any* given type models the OutputIterator concept.\ntemplate <typename It> class is_output_iterator {\n  // Check for mutability because all iterator categories derived from\n  // std::input_iterator_tag *may* also meet the requirements of an\n  // OutputIterator, thereby falling into the category of 'mutable iterators'\n  // [iterator.requirements.general] clause 4. The compiler reveals this\n  // property only at the point of *actually dereferencing* the iterator!\n  template <typename U>\n  static decltype(*(std::declval<U>())) test(std::input_iterator_tag);\n  template <typename U> static char& test(std::output_iterator_tag);\n  template <typename U> static const char& test(...);\n\n  using type = decltype(test<It>(typename iterator_category<It>::type{}));\n\n public:\n  static const bool value = !std::is_const<remove_reference_t<type>>::value;\n};\n\n// A workaround for std::string not having mutable data() until C++17.\ntemplate <typename Char> inline Char* get_data(std::basic_string<Char>& s) {\n  return &s[0];\n}\ntemplate <typename Container>\ninline typename Container::value_type* get_data(Container& c) {\n  return c.data();\n}\n\n#ifdef _SECURE_SCL\n// Make a checked iterator to avoid MSVC warnings.\ntemplate <typename T> using checked_ptr = stdext::checked_array_iterator<T*>;\ntemplate <typename T> checked_ptr<T> make_checked(T* p, std::size_t size) {\n  return {p, size};\n}\n#else\ntemplate <typename T> using checked_ptr = T*;\ntemplate <typename T> inline T* make_checked(T* p, std::size_t) { return p; }\n#endif\n\ntemplate <typename Container, FMT_ENABLE_IF(is_contiguous<Container>::value)>\ninline checked_ptr<typename Container::value_type> reserve(\n    std::back_insert_iterator<Container>& it, std::size_t n) {\n  Container& c = get_container(it);\n  std::size_t size = c.size();\n  c.resize(size + n);\n  return make_checked(get_data(c) + size, n);\n}\n\ntemplate <typename Iterator>\ninline Iterator& reserve(Iterator& it, std::size_t) {\n  return it;\n}\n\n// An output iterator that counts the number of objects written to it and\n// discards them.\ntemplate <typename T> class counting_iterator {\n private:\n  std::size_t count_;\n  mutable T blackhole_;\n\n public:\n  using iterator_category = std::output_iterator_tag;\n  using value_type = T;\n  using difference_type = std::ptrdiff_t;\n  using pointer = T*;\n  using reference = T&;\n  using _Unchecked_type = counting_iterator;  // Mark iterator as checked.\n\n  counting_iterator() : count_(0) {}\n\n  std::size_t count() const { return count_; }\n\n  counting_iterator& operator++() {\n    ++count_;\n    return *this;\n  }\n\n  counting_iterator operator++(int) {\n    auto it = *this;\n    ++*this;\n    return it;\n  }\n\n  T& operator*() const { return blackhole_; }\n};\n\ntemplate <typename OutputIt> class truncating_iterator_base {\n protected:\n  OutputIt out_;\n  std::size_t limit_;\n  std::size_t count_;\n\n  truncating_iterator_base(OutputIt out, std::size_t limit)\n      : out_(out), limit_(limit), count_(0) {}\n\n public:\n  using iterator_category = std::output_iterator_tag;\n  using difference_type = void;\n  using pointer = void;\n  using reference = void;\n  using _Unchecked_type =\n      truncating_iterator_base;  // Mark iterator as checked.\n\n  OutputIt base() const { return out_; }\n  std::size_t count() const { return count_; }\n};\n\n// An output iterator that truncates the output and counts the number of objects\n// written to it.\ntemplate <typename OutputIt,\n          typename Enable = typename std::is_void<\n              typename std::iterator_traits<OutputIt>::value_type>::type>\nclass truncating_iterator;\n\ntemplate <typename OutputIt>\nclass truncating_iterator<OutputIt, std::false_type>\n    : public truncating_iterator_base<OutputIt> {\n  using traits = std::iterator_traits<OutputIt>;\n\n  mutable typename traits::value_type blackhole_;\n\n public:\n  using value_type = typename traits::value_type;\n\n  truncating_iterator(OutputIt out, std::size_t limit)\n      : truncating_iterator_base<OutputIt>(out, limit) {}\n\n  truncating_iterator& operator++() {\n    if (this->count_++ < this->limit_) ++this->out_;\n    return *this;\n  }\n\n  truncating_iterator operator++(int) {\n    auto it = *this;\n    ++*this;\n    return it;\n  }\n\n  value_type& operator*() const {\n    return this->count_ < this->limit_ ? *this->out_ : blackhole_;\n  }\n};\n\ntemplate <typename OutputIt>\nclass truncating_iterator<OutputIt, std::true_type>\n    : public truncating_iterator_base<OutputIt> {\n public:\n  using value_type = typename OutputIt::container_type::value_type;\n\n  truncating_iterator(OutputIt out, std::size_t limit)\n      : truncating_iterator_base<OutputIt>(out, limit) {}\n\n  truncating_iterator& operator=(value_type val) {\n    if (this->count_++ < this->limit_) this->out_ = val;\n    return *this;\n  }\n\n  truncating_iterator& operator++() { return *this; }\n  truncating_iterator& operator++(int) { return *this; }\n  truncating_iterator& operator*() { return *this; }\n};\n\n// A range with the specified output iterator and value type.\ntemplate <typename OutputIt, typename T = typename OutputIt::value_type>\nclass output_range {\n private:\n  OutputIt it_;\n\n public:\n  using value_type = T;\n  using iterator = OutputIt;\n  struct sentinel {};\n\n  explicit output_range(OutputIt it) : it_(it) {}\n  OutputIt begin() const { return it_; }\n  sentinel end() const { return {}; }  // Sentinel is not used yet.\n};\n\n// A range with an iterator appending to a buffer.\ntemplate <typename T>\nclass buffer_range\n    : public output_range<std::back_insert_iterator<buffer<T>>, T> {\n public:\n  using iterator = std::back_insert_iterator<buffer<T>>;\n  using output_range<iterator, T>::output_range;\n  buffer_range(buffer<T>& buf)\n      : output_range<iterator, T>(std::back_inserter(buf)) {}\n};\n\ntemplate <typename Char>\ninline size_t count_code_points(basic_string_view<Char> s) {\n  return s.size();\n}\n\n// Counts the number of code points in a UTF-8 string.\ninline size_t count_code_points(basic_string_view<char8_t> s) {\n  const char8_t* data = s.data();\n  size_t num_code_points = 0;\n  for (size_t i = 0, size = s.size(); i != size; ++i) {\n    if ((data[i] & 0xc0) != 0x80) ++num_code_points;\n  }\n  return num_code_points;\n}\n\ninline char8_t to_char8_t(char c) { return static_cast<char8_t>(c); }\n\ntemplate <typename InputIt, typename OutChar>\nusing needs_conversion = bool_constant<\n    std::is_same<typename std::iterator_traits<InputIt>::value_type,\n                 char>::value &&\n    std::is_same<OutChar, char8_t>::value>;\n\ntemplate <typename OutChar, typename InputIt, typename OutputIt,\n          FMT_ENABLE_IF(!needs_conversion<InputIt, OutChar>::value)>\nOutputIt copy_str(InputIt begin, InputIt end, OutputIt it) {\n  return std::copy(begin, end, it);\n}\n\ntemplate <typename OutChar, typename InputIt, typename OutputIt,\n          FMT_ENABLE_IF(needs_conversion<InputIt, OutChar>::value)>\nOutputIt copy_str(InputIt begin, InputIt end, OutputIt it) {\n  return std::transform(begin, end, it, to_char8_t);\n}\n\n#ifndef FMT_USE_GRISU\n#  define FMT_USE_GRISU 1\n#endif\n\ntemplate <typename T> constexpr bool use_grisu() {\n  return FMT_USE_GRISU && std::numeric_limits<double>::is_iec559 &&\n         sizeof(T) <= sizeof(double);\n}\n\ntemplate <typename T>\ntemplate <typename U>\nvoid buffer<T>::append(const U* begin, const U* end) {\n  std::size_t new_size = size_ + to_unsigned(end - begin);\n  reserve(new_size);\n  std::uninitialized_copy(begin, end, make_checked(ptr_, capacity_) + size_);\n  size_ = new_size;\n}\n}  // namespace internal\n\n// A UTF-8 string view.\nclass u8string_view : public basic_string_view<char8_t> {\n public:\n  u8string_view(const char* s)\n      : basic_string_view<char8_t>(reinterpret_cast<const char8_t*>(s)) {}\n  u8string_view(const char* s, size_t count) FMT_NOEXCEPT\n      : basic_string_view<char8_t>(reinterpret_cast<const char8_t*>(s), count) {\n  }\n};\n\n#if FMT_USE_USER_DEFINED_LITERALS\ninline namespace literals {\ninline u8string_view operator\"\" _u(const char* s, std::size_t n) {\n  return {s, n};\n}\n}  // namespace literals\n#endif\n\n// The number of characters to store in the basic_memory_buffer object itself\n// to avoid dynamic memory allocation.\nenum { inline_buffer_size = 500 };\n\n/**\n  \\rst\n  A dynamically growing memory buffer for trivially copyable/constructible types\n  with the first ``SIZE`` elements stored in the object itself.\n\n  You can use one of the following type aliases for common character types:\n\n  +----------------+------------------------------+\n  | Type           | Definition                   |\n  +================+==============================+\n  | memory_buffer  | basic_memory_buffer<char>    |\n  +----------------+------------------------------+\n  | wmemory_buffer | basic_memory_buffer<wchar_t> |\n  +----------------+------------------------------+\n\n  **Example**::\n\n     fmt::memory_buffer out;\n     format_to(out, \"The answer is {}.\", 42);\n\n  This will append the following output to the ``out`` object:\n\n  .. code-block:: none\n\n     The answer is 42.\n\n  The output can be converted to an ``std::string`` with ``to_string(out)``.\n  \\endrst\n */\ntemplate <typename T, std::size_t SIZE = inline_buffer_size,\n          typename Allocator = std::allocator<T>>\nclass basic_memory_buffer : private Allocator, public internal::buffer<T> {\n private:\n  T store_[SIZE];\n\n  // Deallocate memory allocated by the buffer.\n  void deallocate() {\n    T* data = this->data();\n    if (data != store_) Allocator::deallocate(data, this->capacity());\n  }\n\n protected:\n  void grow(std::size_t size) FMT_OVERRIDE;\n\n public:\n  using value_type = T;\n  using const_reference = const T&;\n\n  explicit basic_memory_buffer(const Allocator& alloc = Allocator())\n      : Allocator(alloc) {\n    this->set(store_, SIZE);\n  }\n  ~basic_memory_buffer() { deallocate(); }\n\n private:\n  // Move data from other to this buffer.\n  void move(basic_memory_buffer& other) {\n    Allocator &this_alloc = *this, &other_alloc = other;\n    this_alloc = std::move(other_alloc);\n    T* data = other.data();\n    std::size_t size = other.size(), capacity = other.capacity();\n    if (data == other.store_) {\n      this->set(store_, capacity);\n      std::uninitialized_copy(other.store_, other.store_ + size,\n                              internal::make_checked(store_, capacity));\n    } else {\n      this->set(data, capacity);\n      // Set pointer to the inline array so that delete is not called\n      // when deallocating.\n      other.set(other.store_, 0);\n    }\n    this->resize(size);\n  }\n\n public:\n  /**\n    \\rst\n    Constructs a :class:`fmt::basic_memory_buffer` object moving the content\n    of the other object to it.\n    \\endrst\n   */\n  basic_memory_buffer(basic_memory_buffer&& other) { move(other); }\n\n  /**\n    \\rst\n    Moves the content of the other ``basic_memory_buffer`` object to this one.\n    \\endrst\n   */\n  basic_memory_buffer& operator=(basic_memory_buffer&& other) {\n    assert(this != &other);\n    deallocate();\n    move(other);\n    return *this;\n  }\n\n  // Returns a copy of the allocator associated with this buffer.\n  Allocator get_allocator() const { return *this; }\n};\n\ntemplate <typename T, std::size_t SIZE, typename Allocator>\nvoid basic_memory_buffer<T, SIZE, Allocator>::grow(std::size_t size) {\n#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION\n  if (size > 1000) throw std::runtime_error(\"fuzz mode - won't grow that much\");\n#endif\n  std::size_t old_capacity = this->capacity();\n  std::size_t new_capacity = old_capacity + old_capacity / 2;\n  if (size > new_capacity) new_capacity = size;\n  T* old_data = this->data();\n  T* new_data = std::allocator_traits<Allocator>::allocate(*this, new_capacity);\n  // The following code doesn't throw, so the raw pointer above doesn't leak.\n  std::uninitialized_copy(old_data, old_data + this->size(),\n                          internal::make_checked(new_data, new_capacity));\n  this->set(new_data, new_capacity);\n  // deallocate must not throw according to the standard, but even if it does,\n  // the buffer already uses the new storage and will deallocate it in\n  // destructor.\n  if (old_data != store_) Allocator::deallocate(old_data, old_capacity);\n}\n\nusing memory_buffer = basic_memory_buffer<char>;\nusing wmemory_buffer = basic_memory_buffer<wchar_t>;\n\n/** A formatting error such as invalid format string. */\nclass FMT_API format_error : public std::runtime_error {\n public:\n  explicit format_error(const char* message) : std::runtime_error(message) {}\n  explicit format_error(const std::string& message)\n      : std::runtime_error(message) {}\n  ~format_error() FMT_NOEXCEPT;\n};\n\nnamespace internal {\n\n// Returns true if value is negative, false otherwise.\n// Same as `value < 0` but doesn't produce warnings if T is an unsigned type.\ntemplate <typename T, FMT_ENABLE_IF(std::numeric_limits<T>::is_signed)>\nFMT_CONSTEXPR bool is_negative(T value) {\n  return value < 0;\n}\ntemplate <typename T, FMT_ENABLE_IF(!std::numeric_limits<T>::is_signed)>\nFMT_CONSTEXPR bool is_negative(T) {\n  return false;\n}\n\n// Smallest of uint32_t and uint64_t that is large enough to represent all\n// values of T.\ntemplate <typename T>\nusing uint32_or_64_t =\n    conditional_t<std::numeric_limits<T>::digits <= 32, uint32_t, uint64_t>;\n\n// Static data is placed in this class template for the header-only config.\ntemplate <typename T = void> struct FMT_EXTERN_TEMPLATE_API basic_data {\n  static const uint64_t powers_of_10_64[];\n  static const uint32_t zero_or_powers_of_10_32[];\n  static const uint64_t zero_or_powers_of_10_64[];\n  static const uint64_t pow10_significands[];\n  static const int16_t pow10_exponents[];\n  static const char digits[];\n  static const char hex_digits[];\n  static const char foreground_color[];\n  static const char background_color[];\n  static const char reset_color[5];\n  static const wchar_t wreset_color[5];\n};\n\nFMT_EXTERN template struct basic_data<void>;\n\n// This is a struct rather than an alias to avoid shadowing warnings in gcc.\nstruct data : basic_data<> {};\n\n#ifdef FMT_BUILTIN_CLZLL\n// Returns the number of decimal digits in n. Leading zeros are not counted\n// except for n == 0 in which case count_digits returns 1.\ninline int count_digits(uint64_t n) {\n  // Based on http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10\n  // and the benchmark https://github.com/localvoid/cxx-benchmark-count-digits.\n  int t = (64 - FMT_BUILTIN_CLZLL(n | 1)) * 1233 >> 12;\n  return t - (n < data::zero_or_powers_of_10_64[t]) + 1;\n}\n#else\n// Fallback version of count_digits used when __builtin_clz is not available.\ninline int count_digits(uint64_t n) {\n  int count = 1;\n  for (;;) {\n    // Integer division is slow so do it for a group of four digits instead\n    // of for every digit. The idea comes from the talk by Alexandrescu\n    // \"Three Optimization Tips for C++\". See speed-test for a comparison.\n    if (n < 10) return count;\n    if (n < 100) return count + 1;\n    if (n < 1000) return count + 2;\n    if (n < 10000) return count + 3;\n    n /= 10000u;\n    count += 4;\n  }\n}\n#endif\n\n// Counts the number of digits in n. BITS = log2(radix).\ntemplate <unsigned BITS, typename UInt> inline int count_digits(UInt n) {\n  int num_digits = 0;\n  do {\n    ++num_digits;\n  } while ((n >>= BITS) != 0);\n  return num_digits;\n}\n\ntemplate <> int count_digits<4>(internal::fallback_uintptr n);\n\n#if FMT_HAS_CPP_ATTRIBUTE(always_inline)\n#  define FMT_ALWAYS_INLINE __attribute__((always_inline))\n#else\n#  define FMT_ALWAYS_INLINE\n#endif\n\ntemplate <typename Handler>\ninline char* lg(uint32_t n, Handler h) FMT_ALWAYS_INLINE;\n\n// Computes g = floor(log10(n)) and calls h.on<g>(n);\ntemplate <typename Handler> inline char* lg(uint32_t n, Handler h) {\n  return n < 100 ? n < 10 ? h.template on<0>(n) : h.template on<1>(n)\n                 : n < 1000000\n                       ? n < 10000 ? n < 1000 ? h.template on<2>(n)\n                                              : h.template on<3>(n)\n                                   : n < 100000 ? h.template on<4>(n)\n                                                : h.template on<5>(n)\n                       : n < 100000000 ? n < 10000000 ? h.template on<6>(n)\n                                                      : h.template on<7>(n)\n                                       : n < 1000000000 ? h.template on<8>(n)\n                                                        : h.template on<9>(n);\n}\n\n// An lg handler that formats a decimal number.\n// Usage: lg(n, decimal_formatter(buffer));\nclass decimal_formatter {\n private:\n  char* buffer_;\n\n  void write_pair(unsigned N, uint32_t index) {\n    std::memcpy(buffer_ + N, data::digits + index * 2, 2);\n  }\n\n public:\n  explicit decimal_formatter(char* buf) : buffer_(buf) {}\n\n  template <unsigned N> char* on(uint32_t u) {\n    if (N == 0) {\n      *buffer_ = static_cast<char>(u) + '0';\n    } else if (N == 1) {\n      write_pair(0, u);\n    } else {\n      // The idea of using 4.32 fixed-point numbers is based on\n      // https://github.com/jeaiii/itoa\n      unsigned n = N - 1;\n      unsigned a = n / 5 * n * 53 / 16;\n      uint64_t t =\n          ((1ULL << (32 + a)) / data::zero_or_powers_of_10_32[n] + 1 - n / 9);\n      t = ((t * u) >> a) + n / 5 * 4;\n      write_pair(0, t >> 32);\n      for (unsigned i = 2; i < N; i += 2) {\n        t = 100ULL * static_cast<uint32_t>(t);\n        write_pair(i, t >> 32);\n      }\n      if (N % 2 == 0) {\n        buffer_[N] =\n            static_cast<char>((10ULL * static_cast<uint32_t>(t)) >> 32) + '0';\n      }\n    }\n    return buffer_ += N + 1;\n  }\n};\n\n#ifdef FMT_BUILTIN_CLZ\n// Optional version of count_digits for better performance on 32-bit platforms.\ninline int count_digits(uint32_t n) {\n  int t = (32 - FMT_BUILTIN_CLZ(n | 1)) * 1233 >> 12;\n  return t - (n < data::zero_or_powers_of_10_32[t]) + 1;\n}\n#endif\n\ntemplate <typename Char> FMT_API Char thousands_sep_impl(locale_ref loc);\ntemplate <typename Char> inline Char thousands_sep(locale_ref loc) {\n  return Char(thousands_sep_impl<char>(loc));\n}\ntemplate <> inline wchar_t thousands_sep(locale_ref loc) {\n  return thousands_sep_impl<wchar_t>(loc);\n}\n\ntemplate <typename Char> FMT_API Char decimal_point_impl(locale_ref loc);\ntemplate <typename Char> inline Char decimal_point(locale_ref loc) {\n  return Char(decimal_point_impl<char>(loc));\n}\ntemplate <> inline wchar_t decimal_point(locale_ref loc) {\n  return decimal_point_impl<wchar_t>(loc);\n}\n\n// Formats a decimal unsigned integer value writing into buffer.\n// add_thousands_sep is called after writing each char to add a thousands\n// separator if necessary.\ntemplate <typename UInt, typename Char, typename F>\ninline Char* format_decimal(Char* buffer, UInt value, int num_digits,\n                            F add_thousands_sep) {\n  FMT_ASSERT(num_digits >= 0, \"invalid digit count\");\n  buffer += num_digits;\n  Char* end = buffer;\n  while (value >= 100) {\n    // Integer division is slow so do it for a group of two digits instead\n    // of for every digit. The idea comes from the talk by Alexandrescu\n    // \"Three Optimization Tips for C++\". See speed-test for a comparison.\n    unsigned index = static_cast<unsigned>((value % 100) * 2);\n    value /= 100;\n    *--buffer = static_cast<Char>(data::digits[index + 1]);\n    add_thousands_sep(buffer);\n    *--buffer = static_cast<Char>(data::digits[index]);\n    add_thousands_sep(buffer);\n  }\n  if (value < 10) {\n    *--buffer = static_cast<Char>('0' + value);\n    return end;\n  }\n  unsigned index = static_cast<unsigned>(value * 2);\n  *--buffer = static_cast<Char>(data::digits[index + 1]);\n  add_thousands_sep(buffer);\n  *--buffer = static_cast<Char>(data::digits[index]);\n  return end;\n}\n\ntemplate <typename Char, typename UInt, typename Iterator, typename F>\ninline Iterator format_decimal(Iterator out, UInt value, int num_digits,\n                               F add_thousands_sep) {\n  FMT_ASSERT(num_digits >= 0, \"invalid digit count\");\n  // Buffer should be large enough to hold all digits (<= digits10 + 1).\n  enum { max_size = std::numeric_limits<UInt>::digits10 + 1 };\n  Char buffer[max_size + max_size / 3];\n  auto end = format_decimal(buffer, value, num_digits, add_thousands_sep);\n  return internal::copy_str<Char>(buffer, end, out);\n}\n\ntemplate <typename Char, typename It, typename UInt>\ninline It format_decimal(It out, UInt value, int num_digits) {\n  return format_decimal<Char>(out, value, num_digits, [](Char*) {});\n}\n\ntemplate <unsigned BASE_BITS, typename Char, typename UInt>\ninline Char* format_uint(Char* buffer, UInt value, int num_digits,\n                         bool upper = false) {\n  buffer += num_digits;\n  Char* end = buffer;\n  do {\n    const char* digits = upper ? \"0123456789ABCDEF\" : data::hex_digits;\n    unsigned digit = (value & ((1 << BASE_BITS) - 1));\n    *--buffer = static_cast<Char>(BASE_BITS < 4 ? static_cast<char>('0' + digit)\n                                                : digits[digit]);\n  } while ((value >>= BASE_BITS) != 0);\n  return end;\n}\n\ntemplate <unsigned BASE_BITS, typename Char>\nChar* format_uint(Char* buffer, internal::fallback_uintptr n, int num_digits,\n                  bool = false) {\n  auto char_digits = std::numeric_limits<unsigned char>::digits / 4;\n  int start = (num_digits + char_digits - 1) / char_digits - 1;\n  if (int start_digits = num_digits % char_digits) {\n    unsigned value = n.value[start--];\n    buffer = format_uint<BASE_BITS>(buffer, value, start_digits);\n  }\n  for (; start >= 0; --start) {\n    unsigned value = n.value[start];\n    buffer += char_digits;\n    auto p = buffer;\n    for (int i = 0; i < char_digits; ++i) {\n      unsigned digit = (value & ((1 << BASE_BITS) - 1));\n      *--p = static_cast<Char>(data::hex_digits[digit]);\n      value >>= BASE_BITS;\n    }\n  }\n  return buffer;\n}\n\ntemplate <unsigned BASE_BITS, typename Char, typename It, typename UInt>\ninline It format_uint(It out, UInt value, int num_digits, bool upper = false) {\n  // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1).\n  char buffer[std::numeric_limits<UInt>::digits / BASE_BITS + 1];\n  format_uint<BASE_BITS>(buffer, value, num_digits, upper);\n  return internal::copy_str<Char>(buffer, buffer + num_digits, out);\n}\n\n#ifndef _WIN32\n#  define FMT_USE_WINDOWS_H 0\n#elif !defined(FMT_USE_WINDOWS_H)\n#  define FMT_USE_WINDOWS_H 1\n#endif\n\n// Define FMT_USE_WINDOWS_H to 0 to disable use of windows.h.\n// All the functionality that relies on it will be disabled too.\n#if FMT_USE_WINDOWS_H\n// A converter from UTF-8 to UTF-16.\n// It is only provided for Windows since other systems support UTF-8 natively.\nclass utf8_to_utf16 {\n private:\n  wmemory_buffer buffer_;\n\n public:\n  FMT_API explicit utf8_to_utf16(string_view s);\n  operator wstring_view() const { return wstring_view(&buffer_[0], size()); }\n  size_t size() const { return buffer_.size() - 1; }\n  const wchar_t* c_str() const { return &buffer_[0]; }\n  std::wstring str() const { return std::wstring(&buffer_[0], size()); }\n};\n\n// A converter from UTF-16 to UTF-8.\n// It is only provided for Windows since other systems support UTF-8 natively.\nclass utf16_to_utf8 {\n private:\n  memory_buffer buffer_;\n\n public:\n  utf16_to_utf8() {}\n  FMT_API explicit utf16_to_utf8(wstring_view s);\n  operator string_view() const { return string_view(&buffer_[0], size()); }\n  size_t size() const { return buffer_.size() - 1; }\n  const char* c_str() const { return &buffer_[0]; }\n  std::string str() const { return std::string(&buffer_[0], size()); }\n\n  // Performs conversion returning a system error code instead of\n  // throwing exception on conversion error. This method may still throw\n  // in case of memory allocation error.\n  FMT_API int convert(wstring_view s);\n};\n\nFMT_API void format_windows_error(fmt::internal::buffer<char>& out,\n                                  int error_code,\n                                  fmt::string_view message) FMT_NOEXCEPT;\n#endif\n\ntemplate <typename T = void> struct null {};\n\n// Workaround an array initialization issue in gcc 4.8.\ntemplate <typename Char> struct fill_t {\n private:\n  Char data_[6];\n\n public:\n  FMT_CONSTEXPR Char& operator[](size_t index) { return data_[index]; }\n  FMT_CONSTEXPR const Char& operator[](size_t index) const {\n    return data_[index];\n  }\n\n  static FMT_CONSTEXPR fill_t<Char> make() {\n    auto fill = fill_t<Char>();\n    fill[0] = Char(' ');\n    return fill;\n  }\n};\n}  // namespace internal\n\n// We cannot use enum classes as bit fields because of a gcc bug\n// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414.\nnamespace align {\nenum type { none, left, right, center, numeric };\n}\nusing align_t = align::type;\n\nnamespace sign {\nenum type { none, minus, plus, space };\n}\nusing sign_t = sign::type;\n\n// Format specifiers for built-in and string types.\ntemplate <typename Char> struct basic_format_specs {\n  int width;\n  int precision;\n  char type;\n  align_t align : 4;\n  sign_t sign : 3;\n  bool alt : 1;  // Alternate form ('#').\n  internal::fill_t<Char> fill;\n\n  constexpr basic_format_specs()\n      : width(0),\n        precision(-1),\n        type(0),\n        align(align::none),\n        sign(sign::none),\n        alt(false),\n        fill(internal::fill_t<Char>::make()) {}\n};\n\nusing format_specs = basic_format_specs<char>;\n\nnamespace internal {\n\n// Writes the exponent exp in the form \"[+-]d{2,3}\" to buffer.\ntemplate <typename Char, typename It> It write_exponent(int exp, It it) {\n  FMT_ASSERT(-1000 < exp && exp < 1000, \"exponent out of range\");\n  if (exp < 0) {\n    *it++ = static_cast<Char>('-');\n    exp = -exp;\n  } else {\n    *it++ = static_cast<Char>('+');\n  }\n  if (exp >= 100) {\n    *it++ = static_cast<Char>(static_cast<char>('0' + exp / 100));\n    exp %= 100;\n  }\n  const char* d = data::digits + exp * 2;\n  *it++ = static_cast<Char>(d[0]);\n  *it++ = static_cast<Char>(d[1]);\n  return it;\n}\n\nstruct gen_digits_params {\n  int num_digits;\n  bool fixed;\n  bool upper;\n  bool trailing_zeros;\n};\n\n// The number is given as v = digits * pow(10, exp).\ntemplate <typename Char, typename It>\nIt grisu_prettify(const char* digits, int size, int exp, It it,\n                  gen_digits_params params, Char decimal_point) {\n  // pow(10, full_exp - 1) <= v <= pow(10, full_exp).\n  int full_exp = size + exp;\n  if (!params.fixed) {\n    // Insert a decimal point after the first digit and add an exponent.\n    *it++ = static_cast<Char>(*digits);\n    if (size > 1) *it++ = decimal_point;\n    exp += size - 1;\n    it = copy_str<Char>(digits + 1, digits + size, it);\n    if (size < params.num_digits)\n      it = std::fill_n(it, params.num_digits - size, static_cast<Char>('0'));\n    *it++ = static_cast<Char>(params.upper ? 'E' : 'e');\n    return write_exponent<Char>(exp, it);\n  }\n  if (size <= full_exp) {\n    // 1234e7 -> 12340000000[.0+]\n    it = copy_str<Char>(digits, digits + size, it);\n    it = std::fill_n(it, full_exp - size, static_cast<Char>('0'));\n    int num_zeros = (std::max)(params.num_digits - full_exp, 1);\n    if (params.trailing_zeros) {\n      *it++ = decimal_point;\n#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION\n      if (num_zeros > 1000)\n        throw std::runtime_error(\"fuzz mode - avoiding excessive cpu use\");\n#endif\n      it = std::fill_n(it, num_zeros, static_cast<Char>('0'));\n    }\n  } else if (full_exp > 0) {\n    // 1234e-2 -> 12.34[0+]\n    it = copy_str<Char>(digits, digits + full_exp, it);\n    if (!params.trailing_zeros) {\n      // Remove trailing zeros.\n      while (size > full_exp && digits[size - 1] == '0') --size;\n      if (size != full_exp) *it++ = decimal_point;\n      return copy_str<Char>(digits + full_exp, digits + size, it);\n    }\n    *it++ = decimal_point;\n    it = copy_str<Char>(digits + full_exp, digits + size, it);\n    if (params.num_digits > size) {\n      // Add trailing zeros.\n      int num_zeros = params.num_digits - size;\n      it = std::fill_n(it, num_zeros, static_cast<Char>('0'));\n    }\n  } else {\n    // 1234e-6 -> 0.001234\n    *it++ = static_cast<Char>('0');\n    int num_zeros = -full_exp;\n    if (params.num_digits >= 0 && params.num_digits < num_zeros)\n      num_zeros = params.num_digits;\n    if (!params.trailing_zeros)\n      while (size > 0 && digits[size - 1] == '0') --size;\n    if (num_zeros != 0 || size != 0) {\n      *it++ = decimal_point;\n      it = std::fill_n(it, num_zeros, static_cast<Char>('0'));\n      it = copy_str<Char>(digits, digits + size, it);\n    }\n  }\n  return it;\n}\n\nnamespace grisu_options {\nenum { fixed = 1, grisu3 = 2 };\n}\n\n// Formats value using the Grisu algorithm:\n// https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf\ntemplate <typename Double, FMT_ENABLE_IF(sizeof(Double) == sizeof(uint64_t))>\nFMT_API bool grisu_format(Double, buffer<char>&, int, unsigned, int&);\ntemplate <typename Double, FMT_ENABLE_IF(sizeof(Double) != sizeof(uint64_t))>\ninline bool grisu_format(Double, buffer<char>&, int, unsigned, int&) {\n  return false;\n}\n\nstruct sprintf_specs {\n  int precision;\n  char type;\n  bool alt : 1;\n\n  template <typename Char>\n  constexpr sprintf_specs(basic_format_specs<Char> specs)\n      : precision(specs.precision), type(specs.type), alt(specs.alt) {}\n\n  constexpr bool has_precision() const { return precision >= 0; }\n};\n\ntemplate <typename Double>\nchar* sprintf_format(Double, internal::buffer<char>&, sprintf_specs);\n\ntemplate <typename Handler>\nFMT_CONSTEXPR void handle_int_type_spec(char spec, Handler&& handler) {\n  switch (spec) {\n  case 0:\n  case 'd':\n    handler.on_dec();\n    break;\n  case 'x':\n  case 'X':\n    handler.on_hex();\n    break;\n  case 'b':\n  case 'B':\n    handler.on_bin();\n    break;\n  case 'o':\n    handler.on_oct();\n    break;\n  case 'n':\n    handler.on_num();\n    break;\n  default:\n    handler.on_error();\n  }\n}\n\ntemplate <typename Handler>\nFMT_CONSTEXPR void handle_float_type_spec(char spec, Handler&& handler) {\n  switch (spec) {\n  case 0:\n  case 'g':\n  case 'G':\n    handler.on_general();\n    break;\n  case 'e':\n  case 'E':\n    handler.on_exp();\n    break;\n  case 'f':\n  case 'F':\n    handler.on_fixed();\n    break;\n  case '%':\n    handler.on_percent();\n    break;\n  case 'a':\n  case 'A':\n    handler.on_hex();\n    break;\n  case 'n':\n    handler.on_num();\n    break;\n  default:\n    handler.on_error();\n    break;\n  }\n}\n\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR void handle_char_specs(const basic_format_specs<Char>* specs,\n                                     Handler&& handler) {\n  if (!specs) return handler.on_char();\n  if (specs->type && specs->type != 'c') return handler.on_int();\n  if (specs->align == align::numeric || specs->sign != sign::none || specs->alt)\n    handler.on_error(\"invalid format specifier for char\");\n  handler.on_char();\n}\n\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR void handle_cstring_type_spec(Char spec, Handler&& handler) {\n  if (spec == 0 || spec == 's')\n    handler.on_string();\n  else if (spec == 'p')\n    handler.on_pointer();\n  else\n    handler.on_error(\"invalid type specifier\");\n}\n\ntemplate <typename Char, typename ErrorHandler>\nFMT_CONSTEXPR void check_string_type_spec(Char spec, ErrorHandler&& eh) {\n  if (spec != 0 && spec != 's') eh.on_error(\"invalid type specifier\");\n}\n\ntemplate <typename Char, typename ErrorHandler>\nFMT_CONSTEXPR void check_pointer_type_spec(Char spec, ErrorHandler&& eh) {\n  if (spec != 0 && spec != 'p') eh.on_error(\"invalid type specifier\");\n}\n\ntemplate <typename ErrorHandler> class int_type_checker : private ErrorHandler {\n public:\n  FMT_CONSTEXPR explicit int_type_checker(ErrorHandler eh) : ErrorHandler(eh) {}\n\n  FMT_CONSTEXPR void on_dec() {}\n  FMT_CONSTEXPR void on_hex() {}\n  FMT_CONSTEXPR void on_bin() {}\n  FMT_CONSTEXPR void on_oct() {}\n  FMT_CONSTEXPR void on_num() {}\n\n  FMT_CONSTEXPR void on_error() {\n    ErrorHandler::on_error(\"invalid type specifier\");\n  }\n};\n\ntemplate <typename ErrorHandler>\nclass float_type_checker : private ErrorHandler {\n public:\n  FMT_CONSTEXPR explicit float_type_checker(ErrorHandler eh)\n      : ErrorHandler(eh) {}\n\n  FMT_CONSTEXPR void on_general() {}\n  FMT_CONSTEXPR void on_exp() {}\n  FMT_CONSTEXPR void on_fixed() {}\n  FMT_CONSTEXPR void on_percent() {}\n  FMT_CONSTEXPR void on_hex() {}\n  FMT_CONSTEXPR void on_num() {}\n\n  FMT_CONSTEXPR void on_error() {\n    ErrorHandler::on_error(\"invalid type specifier\");\n  }\n};\n\ntemplate <typename ErrorHandler>\nclass char_specs_checker : public ErrorHandler {\n private:\n  char type_;\n\n public:\n  FMT_CONSTEXPR char_specs_checker(char type, ErrorHandler eh)\n      : ErrorHandler(eh), type_(type) {}\n\n  FMT_CONSTEXPR void on_int() {\n    handle_int_type_spec(type_, int_type_checker<ErrorHandler>(*this));\n  }\n  FMT_CONSTEXPR void on_char() {}\n};\n\ntemplate <typename ErrorHandler>\nclass cstring_type_checker : public ErrorHandler {\n public:\n  FMT_CONSTEXPR explicit cstring_type_checker(ErrorHandler eh)\n      : ErrorHandler(eh) {}\n\n  FMT_CONSTEXPR void on_string() {}\n  FMT_CONSTEXPR void on_pointer() {}\n};\n\ntemplate <typename Context>\nvoid arg_map<Context>::init(const basic_format_args<Context>& args) {\n  if (map_) return;\n  map_ = new entry[internal::to_unsigned(args.max_size())];\n  if (args.is_packed()) {\n    for (int i = 0;; ++i) {\n      internal::type arg_type = args.type(i);\n      if (arg_type == internal::none_type) return;\n      if (arg_type == internal::named_arg_type) push_back(args.values_[i]);\n    }\n  }\n  for (int i = 0;; ++i) {\n    auto type = args.args_[i].type_;\n    if (type == internal::none_type) return;\n    if (type == internal::named_arg_type) push_back(args.args_[i].value_);\n  }\n}\n\n// This template provides operations for formatting and writing data into a\n// character range.\ntemplate <typename Range> class basic_writer {\n public:\n  using char_type = typename Range::value_type;\n  using iterator = typename Range::iterator;\n  using format_specs = basic_format_specs<char_type>;\n\n private:\n  iterator out_;  // Output iterator.\n  internal::locale_ref locale_;\n\n  // Attempts to reserve space for n extra characters in the output range.\n  // Returns a pointer to the reserved range or a reference to out_.\n  auto reserve(std::size_t n) -> decltype(internal::reserve(out_, n)) {\n    return internal::reserve(out_, n);\n  }\n\n  template <typename F> struct padded_int_writer {\n    size_t size_;\n    string_view prefix;\n    char_type fill;\n    std::size_t padding;\n    F f;\n\n    size_t size() const { return size_; }\n    size_t width() const { return size_; }\n\n    template <typename It> void operator()(It&& it) const {\n      if (prefix.size() != 0)\n        it = internal::copy_str<char_type>(prefix.begin(), prefix.end(), it);\n      it = std::fill_n(it, padding, fill);\n      f(it);\n    }\n  };\n\n  // Writes an integer in the format\n  //   <left-padding><prefix><numeric-padding><digits><right-padding>\n  // where <digits> are written by f(it).\n  template <typename F>\n  void write_int(int num_digits, string_view prefix, format_specs specs, F f) {\n    std::size_t size = prefix.size() + internal::to_unsigned(num_digits);\n    char_type fill = specs.fill[0];\n    std::size_t padding = 0;\n    if (specs.align == align::numeric) {\n      auto unsiged_width = internal::to_unsigned(specs.width);\n      if (unsiged_width > size) {\n        padding = unsiged_width - size;\n        size = unsiged_width;\n      }\n    } else if (specs.precision > num_digits) {\n      size = prefix.size() + internal::to_unsigned(specs.precision);\n      padding = internal::to_unsigned(specs.precision - num_digits);\n      fill = static_cast<char_type>('0');\n    }\n    if (specs.align == align::none) specs.align = align::right;\n    write_padded(specs, padded_int_writer<F>{size, prefix, fill, padding, f});\n  }\n\n  // Writes a decimal integer.\n  template <typename Int> void write_decimal(Int value) {\n    auto abs_value = static_cast<uint32_or_64_t<Int>>(value);\n    bool is_negative = internal::is_negative(value);\n    if (is_negative) abs_value = 0 - abs_value;\n    int num_digits = internal::count_digits(abs_value);\n    auto&& it =\n        reserve((is_negative ? 1 : 0) + static_cast<size_t>(num_digits));\n    if (is_negative) *it++ = static_cast<char_type>('-');\n    it = internal::format_decimal<char_type>(it, abs_value, num_digits);\n  }\n\n  // The handle_int_type_spec handler that writes an integer.\n  template <typename Int, typename Specs> struct int_writer {\n    using unsigned_type = uint32_or_64_t<Int>;\n\n    basic_writer<Range>& writer;\n    const Specs& specs;\n    unsigned_type abs_value;\n    char prefix[4];\n    unsigned prefix_size;\n\n    string_view get_prefix() const { return string_view(prefix, prefix_size); }\n\n    int_writer(basic_writer<Range>& w, Int value, const Specs& s)\n        : writer(w),\n          specs(s),\n          abs_value(static_cast<unsigned_type>(value)),\n          prefix_size(0) {\n      if (internal::is_negative(value)) {\n        prefix[0] = '-';\n        ++prefix_size;\n        abs_value = 0 - abs_value;\n      } else if (specs.sign != sign::none && specs.sign != sign::minus) {\n        prefix[0] = specs.sign == sign::plus ? '+' : ' ';\n        ++prefix_size;\n      }\n    }\n\n    struct dec_writer {\n      unsigned_type abs_value;\n      int num_digits;\n\n      template <typename It> void operator()(It&& it) const {\n        it = internal::format_decimal<char_type>(it, abs_value, num_digits);\n      }\n    };\n\n    void on_dec() {\n      int num_digits = internal::count_digits(abs_value);\n      writer.write_int(num_digits, get_prefix(), specs,\n                       dec_writer{abs_value, num_digits});\n    }\n\n    struct hex_writer {\n      int_writer& self;\n      int num_digits;\n\n      template <typename It> void operator()(It&& it) const {\n        it = internal::format_uint<4, char_type>(it, self.abs_value, num_digits,\n                                                 self.specs.type != 'x');\n      }\n    };\n\n    void on_hex() {\n      if (specs.alt) {\n        prefix[prefix_size++] = '0';\n        prefix[prefix_size++] = specs.type;\n      }\n      int num_digits = internal::count_digits<4>(abs_value);\n      writer.write_int(num_digits, get_prefix(), specs,\n                       hex_writer{*this, num_digits});\n    }\n\n    template <int BITS> struct bin_writer {\n      unsigned_type abs_value;\n      int num_digits;\n\n      template <typename It> void operator()(It&& it) const {\n        it = internal::format_uint<BITS, char_type>(it, abs_value, num_digits);\n      }\n    };\n\n    void on_bin() {\n      if (specs.alt) {\n        prefix[prefix_size++] = '0';\n        prefix[prefix_size++] = static_cast<char>(specs.type);\n      }\n      int num_digits = internal::count_digits<1>(abs_value);\n      writer.write_int(num_digits, get_prefix(), specs,\n                       bin_writer<1>{abs_value, num_digits});\n    }\n\n    void on_oct() {\n      int num_digits = internal::count_digits<3>(abs_value);\n      if (specs.alt && specs.precision <= num_digits) {\n        // Octal prefix '0' is counted as a digit, so only add it if precision\n        // is not greater than the number of digits.\n        prefix[prefix_size++] = '0';\n      }\n      writer.write_int(num_digits, get_prefix(), specs,\n                       bin_writer<3>{abs_value, num_digits});\n    }\n\n    enum { sep_size = 1 };\n\n    struct num_writer {\n      unsigned_type abs_value;\n      int size;\n      char_type sep;\n\n      template <typename It> void operator()(It&& it) const {\n        basic_string_view<char_type> s(&sep, sep_size);\n        // Index of a decimal digit with the least significant digit having\n        // index 0.\n        unsigned digit_index = 0;\n        it = internal::format_decimal<char_type>(\n            it, abs_value, size, [s, &digit_index](char_type*& buffer) {\n              if (++digit_index % 3 != 0) return;\n              buffer -= s.size();\n              std::uninitialized_copy(s.data(), s.data() + s.size(),\n                                      internal::make_checked(buffer, s.size()));\n            });\n      }\n    };\n\n    void on_num() {\n      int num_digits = internal::count_digits(abs_value);\n      char_type sep = internal::thousands_sep<char_type>(writer.locale_);\n      int size = num_digits + sep_size * ((num_digits - 1) / 3);\n      writer.write_int(size, get_prefix(), specs,\n                       num_writer{abs_value, size, sep});\n    }\n\n    FMT_NORETURN void on_error() {\n      FMT_THROW(format_error(\"invalid type specifier\"));\n    }\n  };\n\n  enum { inf_size = 3 };  // This is an enum to workaround a bug in MSVC.\n\n  struct inf_or_nan_writer {\n    char sign;\n    bool as_percentage;\n    const char* str;\n\n    size_t size() const {\n      return static_cast<std::size_t>(inf_size + (sign ? 1 : 0) +\n                                      (as_percentage ? 1 : 0));\n    }\n    size_t width() const { return size(); }\n\n    template <typename It> void operator()(It&& it) const {\n      if (sign) *it++ = static_cast<char_type>(sign);\n      it = internal::copy_str<char_type>(\n          str, str + static_cast<std::size_t>(inf_size), it);\n      if (as_percentage) *it++ = static_cast<char_type>('%');\n    }\n  };\n\n  struct double_writer {\n    char sign;\n    internal::buffer<char>& buffer;\n    char* decimal_point_pos;\n    char_type decimal_point;\n\n    size_t size() const { return buffer.size() + (sign ? 1 : 0); }\n    size_t width() const { return size(); }\n\n    template <typename It> void operator()(It&& it) {\n      if (sign) *it++ = static_cast<char_type>(sign);\n      auto begin = buffer.begin();\n      if (decimal_point_pos) {\n        it = internal::copy_str<char_type>(begin, decimal_point_pos, it);\n        *it++ = decimal_point;\n        begin = decimal_point_pos + 1;\n      }\n      it = internal::copy_str<char_type>(begin, buffer.end(), it);\n    }\n  };\n\n  class grisu_writer {\n   private:\n    internal::buffer<char>& digits_;\n    size_t size_;\n    char sign_;\n    int exp_;\n    internal::gen_digits_params params_;\n    char_type decimal_point_;\n\n   public:\n    grisu_writer(char sign, internal::buffer<char>& digits, int exp,\n                 const internal::gen_digits_params& params,\n                 char_type decimal_point)\n        : digits_(digits),\n          sign_(sign),\n          exp_(exp),\n          params_(params),\n          decimal_point_(decimal_point) {\n      int num_digits = static_cast<int>(digits.size());\n      int full_exp = num_digits + exp - 1;\n      int precision = params.num_digits > 0 ? params.num_digits : 11;\n      params_.fixed |= full_exp >= -4 && full_exp < precision;\n      auto it = internal::grisu_prettify<char>(\n          digits.data(), num_digits, exp, internal::counting_iterator<char>(),\n          params_, '.');\n      size_ = it.count();\n    }\n\n    size_t size() const { return size_ + (sign_ ? 1 : 0); }\n    size_t width() const { return size(); }\n\n    template <typename It> void operator()(It&& it) {\n      if (sign_) *it++ = static_cast<char_type>(sign_);\n      int num_digits = static_cast<int>(digits_.size());\n      it = internal::grisu_prettify<char_type>(digits_.data(), num_digits, exp_,\n                                               it, params_, decimal_point_);\n    }\n  };\n\n  template <typename Char> struct str_writer {\n    const Char* s;\n    size_t size_;\n\n    size_t size() const { return size_; }\n    size_t width() const {\n      return internal::count_code_points(basic_string_view<Char>(s, size_));\n    }\n\n    template <typename It> void operator()(It&& it) const {\n      it = internal::copy_str<char_type>(s, s + size_, it);\n    }\n  };\n\n  template <typename UIntPtr> struct pointer_writer {\n    UIntPtr value;\n    int num_digits;\n\n    size_t size() const { return to_unsigned(num_digits) + 2; }\n    size_t width() const { return size(); }\n\n    template <typename It> void operator()(It&& it) const {\n      *it++ = static_cast<char_type>('0');\n      *it++ = static_cast<char_type>('x');\n      it = internal::format_uint<4, char_type>(it, value, num_digits);\n    }\n  };\n\n public:\n  /** Constructs a ``basic_writer`` object. */\n  explicit basic_writer(Range out,\n                        internal::locale_ref loc = internal::locale_ref())\n      : out_(out.begin()), locale_(loc) {}\n\n  iterator out() const { return out_; }\n\n  // Writes a value in the format\n  //   <left-padding><value><right-padding>\n  // where <value> is written by f(it).\n  template <typename F> void write_padded(const format_specs& specs, F&& f) {\n    // User-perceived width (in code points).\n    unsigned width = to_unsigned(specs.width);\n    size_t size = f.size();  // The number of code units.\n    size_t num_code_points = width != 0 ? f.width() : size;\n    if (width <= num_code_points) return f(reserve(size));\n    auto&& it = reserve(width + (size - num_code_points));\n    char_type fill = specs.fill[0];\n    std::size_t padding = width - num_code_points;\n    if (specs.align == align::right) {\n      it = std::fill_n(it, padding, fill);\n      f(it);\n    } else if (specs.align == align::center) {\n      std::size_t left_padding = padding / 2;\n      it = std::fill_n(it, left_padding, fill);\n      f(it);\n      it = std::fill_n(it, padding - left_padding, fill);\n    } else {\n      f(it);\n      it = std::fill_n(it, padding, fill);\n    }\n  }\n\n  void write(int value) { write_decimal(value); }\n  void write(long value) { write_decimal(value); }\n  void write(long long value) { write_decimal(value); }\n\n  void write(unsigned value) { write_decimal(value); }\n  void write(unsigned long value) { write_decimal(value); }\n  void write(unsigned long long value) { write_decimal(value); }\n\n  // Writes a formatted integer.\n  template <typename T, typename Spec>\n  void write_int(T value, const Spec& spec) {\n    internal::handle_int_type_spec(spec.type,\n                                   int_writer<T, Spec>(*this, value, spec));\n  }\n\n  void write(double value, const format_specs& specs = format_specs()) {\n    write_double(value, specs);\n  }\n\n  /**\n    \\rst\n    Formats *value* using the general format for floating-point numbers\n    (``'g'``) and writes it to the buffer.\n    \\endrst\n   */\n  void write(long double value, const format_specs& specs = format_specs()) {\n    write_double(value, specs);\n  }\n\n  // Formats a floating-point number (double or long double).\n  template <typename T, bool USE_GRISU = fmt::internal::use_grisu<T>()>\n  void write_double(T value, const format_specs& specs);\n\n  /** Writes a character to the buffer. */\n  void write(char value) {\n    auto&& it = reserve(1);\n    *it++ = value;\n  }\n\n  template <typename Char, FMT_ENABLE_IF(std::is_same<Char, char_type>::value)>\n  void write(Char value) {\n    auto&& it = reserve(1);\n    *it++ = value;\n  }\n\n  /**\n    \\rst\n    Writes *value* to the buffer.\n    \\endrst\n   */\n  void write(string_view value) {\n    auto&& it = reserve(value.size());\n    it = internal::copy_str<char_type>(value.begin(), value.end(), it);\n  }\n  void write(wstring_view value) {\n    static_assert(std::is_same<char_type, wchar_t>::value, \"\");\n    auto&& it = reserve(value.size());\n    it = std::copy(value.begin(), value.end(), it);\n  }\n\n  // Writes a formatted string.\n  template <typename Char>\n  void write(const Char* s, std::size_t size, const format_specs& specs) {\n    write_padded(specs, str_writer<Char>{s, size});\n  }\n\n  template <typename Char>\n  void write(basic_string_view<Char> s,\n             const format_specs& specs = format_specs()) {\n    const Char* data = s.data();\n    std::size_t size = s.size();\n    if (specs.precision >= 0 && internal::to_unsigned(specs.precision) < size)\n      size = internal::to_unsigned(specs.precision);\n    write(data, size, specs);\n  }\n\n  template <typename UIntPtr>\n  void write_pointer(UIntPtr value, const format_specs* specs) {\n    int num_digits = internal::count_digits<4>(value);\n    auto pw = pointer_writer<UIntPtr>{value, num_digits};\n    if (!specs) return pw(reserve(to_unsigned(num_digits) + 2));\n    format_specs specs_copy = *specs;\n    if (specs_copy.align == align::none) specs_copy.align = align::right;\n    write_padded(specs_copy, pw);\n  }\n};\n\nusing writer = basic_writer<buffer_range<char>>;\n\ntemplate <typename Range, typename ErrorHandler = internal::error_handler>\nclass arg_formatter_base {\n public:\n  using char_type = typename Range::value_type;\n  using iterator = typename Range::iterator;\n  using format_specs = basic_format_specs<char_type>;\n\n private:\n  using writer_type = basic_writer<Range>;\n  writer_type writer_;\n  format_specs* specs_;\n\n  struct char_writer {\n    char_type value;\n\n    size_t size() const { return 1; }\n    size_t width() const { return 1; }\n\n    template <typename It> void operator()(It&& it) const { *it++ = value; }\n  };\n\n  void write_char(char_type value) {\n    if (specs_)\n      writer_.write_padded(*specs_, char_writer{value});\n    else\n      writer_.write(value);\n  }\n\n  void write_pointer(const void* p) {\n    writer_.write_pointer(internal::bit_cast<internal::uintptr_t>(p), specs_);\n  }\n\n protected:\n  writer_type& writer() { return writer_; }\n  FMT_DEPRECATED format_specs* spec() { return specs_; }\n  format_specs* specs() { return specs_; }\n  iterator out() { return writer_.out(); }\n\n  void write(bool value) {\n    string_view sv(value ? \"true\" : \"false\");\n    specs_ ? writer_.write(sv, *specs_) : writer_.write(sv);\n  }\n\n  void write(const char_type* value) {\n    if (!value) FMT_THROW(format_error(\"string pointer is null\"));\n    auto length = std::char_traits<char_type>::length(value);\n    basic_string_view<char_type> sv(value, length);\n    specs_ ? writer_.write(sv, *specs_) : writer_.write(sv);\n  }\n\n public:\n  arg_formatter_base(Range r, format_specs* s, locale_ref loc)\n      : writer_(r, loc), specs_(s) {}\n\n  iterator operator()(monostate) {\n    FMT_ASSERT(false, \"invalid argument type\");\n    return out();\n  }\n\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  iterator operator()(T value) {\n    if (specs_)\n      writer_.write_int(value, *specs_);\n    else\n      writer_.write(value);\n    return out();\n  }\n\n  iterator operator()(char_type value) {\n    internal::handle_char_specs(\n        specs_, char_spec_handler(*this, static_cast<char_type>(value)));\n    return out();\n  }\n\n  iterator operator()(bool value) {\n    if (specs_ && specs_->type) return (*this)(value ? 1 : 0);\n    write(value != 0);\n    return out();\n  }\n\n  template <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>\n  iterator operator()(T value) {\n    writer_.write_double(value, specs_ ? *specs_ : format_specs());\n    return out();\n  }\n\n  struct char_spec_handler : ErrorHandler {\n    arg_formatter_base& formatter;\n    char_type value;\n\n    char_spec_handler(arg_formatter_base& f, char_type val)\n        : formatter(f), value(val) {}\n\n    void on_int() {\n      if (formatter.specs_)\n        formatter.writer_.write_int(value, *formatter.specs_);\n      else\n        formatter.writer_.write(value);\n    }\n    void on_char() { formatter.write_char(value); }\n  };\n\n  struct cstring_spec_handler : internal::error_handler {\n    arg_formatter_base& formatter;\n    const char_type* value;\n\n    cstring_spec_handler(arg_formatter_base& f, const char_type* val)\n        : formatter(f), value(val) {}\n\n    void on_string() { formatter.write(value); }\n    void on_pointer() { formatter.write_pointer(value); }\n  };\n\n  iterator operator()(const char_type* value) {\n    if (!specs_) return write(value), out();\n    internal::handle_cstring_type_spec(specs_->type,\n                                       cstring_spec_handler(*this, value));\n    return out();\n  }\n\n  iterator operator()(basic_string_view<char_type> value) {\n    if (specs_) {\n      internal::check_string_type_spec(specs_->type, internal::error_handler());\n      writer_.write(value, *specs_);\n    } else {\n      writer_.write(value);\n    }\n    return out();\n  }\n\n  iterator operator()(const void* value) {\n    if (specs_)\n      check_pointer_type_spec(specs_->type, internal::error_handler());\n    write_pointer(value);\n    return out();\n  }\n};\n\ntemplate <typename Char> FMT_CONSTEXPR bool is_name_start(Char c) {\n  return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c;\n}\n\n// Parses the range [begin, end) as an unsigned integer. This function assumes\n// that the range is non-empty and the first character is a digit.\ntemplate <typename Char, typename ErrorHandler>\nFMT_CONSTEXPR int parse_nonnegative_int(const Char*& begin, const Char* end,\n                                        ErrorHandler&& eh) {\n  assert(begin != end && '0' <= *begin && *begin <= '9');\n  if (*begin == '0') {\n    ++begin;\n    return 0;\n  }\n  unsigned value = 0;\n  // Convert to unsigned to prevent a warning.\n  unsigned max_int = (std::numeric_limits<int>::max)();\n  unsigned big = max_int / 10;\n  do {\n    // Check for overflow.\n    if (value > big) {\n      value = max_int + 1;\n      break;\n    }\n    value = value * 10 + unsigned(*begin - '0');\n    ++begin;\n  } while (begin != end && '0' <= *begin && *begin <= '9');\n  if (value > max_int) eh.on_error(\"number is too big\");\n  return static_cast<int>(value);\n}\n\ntemplate <typename Context> class custom_formatter {\n private:\n  using char_type = typename Context::char_type;\n\n  basic_parse_context<char_type>& parse_ctx_;\n  Context& ctx_;\n\n public:\n  explicit custom_formatter(basic_parse_context<char_type>& parse_ctx,\n                            Context& ctx)\n      : parse_ctx_(parse_ctx), ctx_(ctx) {}\n\n  bool operator()(typename basic_format_arg<Context>::handle h) const {\n    h.format(parse_ctx_, ctx_);\n    return true;\n  }\n\n  template <typename T> bool operator()(T) const { return false; }\n};\n\ntemplate <typename T>\nusing is_integer =\n    bool_constant<std::is_integral<T>::value && !std::is_same<T, bool>::value &&\n                  !std::is_same<T, char>::value &&\n                  !std::is_same<T, wchar_t>::value>;\n\ntemplate <typename ErrorHandler> class width_checker {\n public:\n  explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {}\n\n  template <typename T, FMT_ENABLE_IF(is_integer<T>::value)>\n  FMT_CONSTEXPR unsigned long long operator()(T value) {\n    if (is_negative(value)) handler_.on_error(\"negative width\");\n    return static_cast<unsigned long long>(value);\n  }\n\n  template <typename T, FMT_ENABLE_IF(!is_integer<T>::value)>\n  FMT_CONSTEXPR unsigned long long operator()(T) {\n    handler_.on_error(\"width is not integer\");\n    return 0;\n  }\n\n private:\n  ErrorHandler& handler_;\n};\n\ntemplate <typename ErrorHandler> class precision_checker {\n public:\n  explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {}\n\n  template <typename T, FMT_ENABLE_IF(is_integer<T>::value)>\n  FMT_CONSTEXPR unsigned long long operator()(T value) {\n    if (is_negative(value)) handler_.on_error(\"negative precision\");\n    return static_cast<unsigned long long>(value);\n  }\n\n  template <typename T, FMT_ENABLE_IF(!is_integer<T>::value)>\n  FMT_CONSTEXPR unsigned long long operator()(T) {\n    handler_.on_error(\"precision is not integer\");\n    return 0;\n  }\n\n private:\n  ErrorHandler& handler_;\n};\n\n// A format specifier handler that sets fields in basic_format_specs.\ntemplate <typename Char> class specs_setter {\n public:\n  explicit FMT_CONSTEXPR specs_setter(basic_format_specs<Char>& specs)\n      : specs_(specs) {}\n\n  FMT_CONSTEXPR specs_setter(const specs_setter& other)\n      : specs_(other.specs_) {}\n\n  FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; }\n  FMT_CONSTEXPR void on_fill(Char fill) { specs_.fill[0] = fill; }\n  FMT_CONSTEXPR void on_plus() { specs_.sign = sign::plus; }\n  FMT_CONSTEXPR void on_minus() { specs_.sign = sign::minus; }\n  FMT_CONSTEXPR void on_space() { specs_.sign = sign::space; }\n  FMT_CONSTEXPR void on_hash() { specs_.alt = true; }\n\n  FMT_CONSTEXPR void on_zero() {\n    specs_.align = align::numeric;\n    specs_.fill[0] = Char('0');\n  }\n\n  FMT_CONSTEXPR void on_width(int width) { specs_.width = width; }\n  FMT_CONSTEXPR void on_precision(int precision) {\n    specs_.precision = precision;\n  }\n  FMT_CONSTEXPR void end_precision() {}\n\n  FMT_CONSTEXPR void on_type(Char type) {\n    specs_.type = static_cast<char>(type);\n  }\n\n protected:\n  basic_format_specs<Char>& specs_;\n};\n\ntemplate <typename ErrorHandler> class numeric_specs_checker {\n public:\n  FMT_CONSTEXPR numeric_specs_checker(ErrorHandler& eh, internal::type arg_type)\n      : error_handler_(eh), arg_type_(arg_type) {}\n\n  FMT_CONSTEXPR void require_numeric_argument() {\n    if (!is_arithmetic(arg_type_))\n      error_handler_.on_error(\"format specifier requires numeric argument\");\n  }\n\n  FMT_CONSTEXPR void check_sign() {\n    require_numeric_argument();\n    if (is_integral(arg_type_) && arg_type_ != int_type &&\n        arg_type_ != long_long_type && arg_type_ != internal::char_type) {\n      error_handler_.on_error(\"format specifier requires signed argument\");\n    }\n  }\n\n  FMT_CONSTEXPR void check_precision() {\n    if (is_integral(arg_type_) || arg_type_ == internal::pointer_type)\n      error_handler_.on_error(\"precision not allowed for this argument type\");\n  }\n\n private:\n  ErrorHandler& error_handler_;\n  internal::type arg_type_;\n};\n\n// A format specifier handler that checks if specifiers are consistent with the\n// argument type.\ntemplate <typename Handler> class specs_checker : public Handler {\n public:\n  FMT_CONSTEXPR specs_checker(const Handler& handler, internal::type arg_type)\n      : Handler(handler), checker_(*this, arg_type) {}\n\n  FMT_CONSTEXPR specs_checker(const specs_checker& other)\n      : Handler(other), checker_(*this, other.arg_type_) {}\n\n  FMT_CONSTEXPR void on_align(align_t align) {\n    if (align == align::numeric) checker_.require_numeric_argument();\n    Handler::on_align(align);\n  }\n\n  FMT_CONSTEXPR void on_plus() {\n    checker_.check_sign();\n    Handler::on_plus();\n  }\n\n  FMT_CONSTEXPR void on_minus() {\n    checker_.check_sign();\n    Handler::on_minus();\n  }\n\n  FMT_CONSTEXPR void on_space() {\n    checker_.check_sign();\n    Handler::on_space();\n  }\n\n  FMT_CONSTEXPR void on_hash() {\n    checker_.require_numeric_argument();\n    Handler::on_hash();\n  }\n\n  FMT_CONSTEXPR void on_zero() {\n    checker_.require_numeric_argument();\n    Handler::on_zero();\n  }\n\n  FMT_CONSTEXPR void end_precision() { checker_.check_precision(); }\n\n private:\n  numeric_specs_checker<Handler> checker_;\n};\n\ntemplate <template <typename> class Handler, typename T, typename FormatArg,\n          typename ErrorHandler>\nFMT_CONSTEXPR void set_dynamic_spec(T& value, FormatArg arg, ErrorHandler eh) {\n  unsigned long long big_value =\n      visit_format_arg(Handler<ErrorHandler>(eh), arg);\n  if (big_value > to_unsigned((std::numeric_limits<int>::max)()))\n    eh.on_error(\"number is too big\");\n  value = static_cast<T>(big_value);\n}\n\nstruct auto_id {};\n\ntemplate <typename Context>\nFMT_CONSTEXPR typename Context::format_arg get_arg(Context& ctx, unsigned id) {\n  auto arg = ctx.arg(id);\n  if (!arg) ctx.on_error(\"argument index out of range\");\n  return arg;\n}\n\n// The standard format specifier handler with checking.\ntemplate <typename ParseContext, typename Context>\nclass specs_handler : public specs_setter<typename Context::char_type> {\n public:\n  using char_type = typename Context::char_type;\n\n  FMT_CONSTEXPR specs_handler(basic_format_specs<char_type>& specs,\n                              ParseContext& parse_ctx, Context& ctx)\n      : specs_setter<char_type>(specs),\n        parse_context_(parse_ctx),\n        context_(ctx) {}\n\n  template <typename Id> FMT_CONSTEXPR void on_dynamic_width(Id arg_id) {\n    set_dynamic_spec<width_checker>(this->specs_.width, get_arg(arg_id),\n                                    context_.error_handler());\n  }\n\n  template <typename Id> FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) {\n    set_dynamic_spec<precision_checker>(this->specs_.precision, get_arg(arg_id),\n                                        context_.error_handler());\n  }\n\n  void on_error(const char* message) { context_.on_error(message); }\n\n private:\n  // This is only needed for compatibility with gcc 4.4.\n  using format_arg = typename Context::format_arg;\n\n  FMT_CONSTEXPR format_arg get_arg(auto_id) {\n    return internal::get_arg(context_, parse_context_.next_arg_id());\n  }\n\n  FMT_CONSTEXPR format_arg get_arg(unsigned arg_id) {\n    parse_context_.check_arg_id(arg_id);\n    return internal::get_arg(context_, arg_id);\n  }\n\n  FMT_CONSTEXPR format_arg get_arg(basic_string_view<char_type> arg_id) {\n    parse_context_.check_arg_id(arg_id);\n    return context_.arg(arg_id);\n  }\n\n  ParseContext& parse_context_;\n  Context& context_;\n};\n\nstruct string_view_metadata {\n  FMT_CONSTEXPR string_view_metadata() : offset_(0u), size_(0u) {}\n  template <typename Char>\n  FMT_CONSTEXPR string_view_metadata(basic_string_view<Char> primary_string,\n                                     basic_string_view<Char> view)\n      : offset_(to_unsigned(view.data() - primary_string.data())),\n        size_(view.size()) {}\n  FMT_CONSTEXPR string_view_metadata(std::size_t offset, std::size_t size)\n      : offset_(offset), size_(size) {}\n  template <typename Char>\n  FMT_CONSTEXPR basic_string_view<Char> to_view(const Char* str) const {\n    return {str + offset_, size_};\n  }\n\n  std::size_t offset_;\n  std::size_t size_;\n};\n\nenum class arg_id_kind { none, index, name };\n\n// An argument reference.\ntemplate <typename Char> struct arg_ref {\n  FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {}\n  FMT_CONSTEXPR explicit arg_ref(int index)\n      : kind(arg_id_kind::index), val(index) {}\n  FMT_CONSTEXPR explicit arg_ref(string_view_metadata name)\n      : kind(arg_id_kind::name), val(name) {}\n\n  FMT_CONSTEXPR arg_ref& operator=(int idx) {\n    kind = arg_id_kind::index;\n    val.index = idx;\n    return *this;\n  }\n\n  arg_id_kind kind;\n  union value {\n    FMT_CONSTEXPR value() : index(0u) {}\n    FMT_CONSTEXPR value(int id) : index(id) {}\n    FMT_CONSTEXPR value(string_view_metadata n) : name(n) {}\n\n    int index;\n    string_view_metadata name;\n  } val;\n};\n\n// Format specifiers with width and precision resolved at formatting rather\n// than parsing time to allow re-using the same parsed specifiers with\n// different sets of arguments (precompilation of format strings).\ntemplate <typename Char>\nstruct dynamic_format_specs : basic_format_specs<Char> {\n  arg_ref<Char> width_ref;\n  arg_ref<Char> precision_ref;\n};\n\n// Format spec handler that saves references to arguments representing dynamic\n// width and precision to be resolved at formatting time.\ntemplate <typename ParseContext>\nclass dynamic_specs_handler\n    : public specs_setter<typename ParseContext::char_type> {\n public:\n  using char_type = typename ParseContext::char_type;\n\n  FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs<char_type>& specs,\n                                      ParseContext& ctx)\n      : specs_setter<char_type>(specs), specs_(specs), context_(ctx) {}\n\n  FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler& other)\n      : specs_setter<char_type>(other),\n        specs_(other.specs_),\n        context_(other.context_) {}\n\n  template <typename Id> FMT_CONSTEXPR void on_dynamic_width(Id arg_id) {\n    specs_.width_ref = make_arg_ref(arg_id);\n  }\n\n  template <typename Id> FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) {\n    specs_.precision_ref = make_arg_ref(arg_id);\n  }\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    context_.on_error(message);\n  }\n\n private:\n  using arg_ref_type = arg_ref<char_type>;\n\n  FMT_CONSTEXPR arg_ref_type make_arg_ref(int arg_id) {\n    context_.check_arg_id(arg_id);\n    return arg_ref_type(arg_id);\n  }\n\n  FMT_CONSTEXPR arg_ref_type make_arg_ref(auto_id) {\n    return arg_ref_type(context_.next_arg_id());\n  }\n\n  FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view<char_type> arg_id) {\n    context_.check_arg_id(arg_id);\n    basic_string_view<char_type> format_str(\n        context_.begin(), to_unsigned(context_.end() - context_.begin()));\n    const auto id_metadata = string_view_metadata(format_str, arg_id);\n    return arg_ref_type(id_metadata);\n  }\n\n  dynamic_format_specs<char_type>& specs_;\n  ParseContext& context_;\n};\n\ntemplate <typename Char, typename IDHandler>\nFMT_CONSTEXPR const Char* parse_arg_id(const Char* begin, const Char* end,\n                                       IDHandler&& handler) {\n  assert(begin != end);\n  Char c = *begin;\n  if (c == '}' || c == ':') return handler(), begin;\n  if (c >= '0' && c <= '9') {\n    int index = parse_nonnegative_int(begin, end, handler);\n    if (begin == end || (*begin != '}' && *begin != ':'))\n      return handler.on_error(\"invalid format string\"), begin;\n    handler(index);\n    return begin;\n  }\n  if (!is_name_start(c))\n    return handler.on_error(\"invalid format string\"), begin;\n  auto it = begin;\n  do {\n    ++it;\n  } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9')));\n  handler(basic_string_view<Char>(begin, to_unsigned(it - begin)));\n  return it;\n}\n\n// Adapts SpecHandler to IDHandler API for dynamic width.\ntemplate <typename SpecHandler, typename Char> struct width_adapter {\n  explicit FMT_CONSTEXPR width_adapter(SpecHandler& h) : handler(h) {}\n\n  FMT_CONSTEXPR void operator()() { handler.on_dynamic_width(auto_id()); }\n  FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_width(id); }\n  FMT_CONSTEXPR void operator()(basic_string_view<Char> id) {\n    handler.on_dynamic_width(id);\n  }\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    handler.on_error(message);\n  }\n\n  SpecHandler& handler;\n};\n\n// Adapts SpecHandler to IDHandler API for dynamic precision.\ntemplate <typename SpecHandler, typename Char> struct precision_adapter {\n  explicit FMT_CONSTEXPR precision_adapter(SpecHandler& h) : handler(h) {}\n\n  FMT_CONSTEXPR void operator()() { handler.on_dynamic_precision(auto_id()); }\n  FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_precision(id); }\n  FMT_CONSTEXPR void operator()(basic_string_view<Char> id) {\n    handler.on_dynamic_precision(id);\n  }\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    handler.on_error(message);\n  }\n\n  SpecHandler& handler;\n};\n\n// Parses fill and alignment.\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR const Char* parse_align(const Char* begin, const Char* end,\n                                      Handler&& handler) {\n  FMT_ASSERT(begin != end, \"\");\n  auto align = align::none;\n  int i = 0;\n  if (begin + 1 != end) ++i;\n  do {\n    switch (static_cast<char>(begin[i])) {\n    case '<':\n      align = align::left;\n      break;\n    case '>':\n      align = align::right;\n      break;\n    case '=':\n      align = align::numeric;\n      break;\n    case '^':\n      align = align::center;\n      break;\n    }\n    if (align != align::none) {\n      if (i > 0) {\n        auto c = *begin;\n        if (c == '{')\n          return handler.on_error(\"invalid fill character '{'\"), begin;\n        begin += 2;\n        handler.on_fill(c);\n      } else\n        ++begin;\n      handler.on_align(align);\n      break;\n    }\n  } while (i-- > 0);\n  return begin;\n}\n\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR const Char* parse_width(const Char* begin, const Char* end,\n                                      Handler&& handler) {\n  FMT_ASSERT(begin != end, \"\");\n  if ('0' <= *begin && *begin <= '9') {\n    handler.on_width(parse_nonnegative_int(begin, end, handler));\n  } else if (*begin == '{') {\n    ++begin;\n    if (begin != end)\n      begin = parse_arg_id(begin, end, width_adapter<Handler, Char>(handler));\n    if (begin == end || *begin != '}')\n      return handler.on_error(\"invalid format string\"), begin;\n    ++begin;\n  }\n  return begin;\n}\n\ntemplate <typename Char, typename Handler>\nFMT_CONSTEXPR const Char* parse_precision(const Char* begin, const Char* end,\n                                          Handler&& handler) {\n  ++begin;\n  auto c = begin != end ? *begin : Char();\n  if ('0' <= c && c <= '9') {\n    handler.on_precision(parse_nonnegative_int(begin, end, handler));\n  } else if (c == '{') {\n    ++begin;\n    if (begin != end) {\n      begin =\n          parse_arg_id(begin, end, precision_adapter<Handler, Char>(handler));\n    }\n    if (begin == end || *begin++ != '}')\n      return handler.on_error(\"invalid format string\"), begin;\n  } else {\n    return handler.on_error(\"missing precision specifier\"), begin;\n  }\n  handler.end_precision();\n  return begin;\n}\n\n// Parses standard format specifiers and sends notifications about parsed\n// components to handler.\ntemplate <typename Char, typename SpecHandler>\nFMT_CONSTEXPR const Char* parse_format_specs(const Char* begin, const Char* end,\n                                             SpecHandler&& handler) {\n  if (begin == end || *begin == '}') return begin;\n\n  begin = parse_align(begin, end, handler);\n  if (begin == end) return begin;\n\n  // Parse sign.\n  switch (static_cast<char>(*begin)) {\n  case '+':\n    handler.on_plus();\n    ++begin;\n    break;\n  case '-':\n    handler.on_minus();\n    ++begin;\n    break;\n  case ' ':\n    handler.on_space();\n    ++begin;\n    break;\n  }\n  if (begin == end) return begin;\n\n  if (*begin == '#') {\n    handler.on_hash();\n    if (++begin == end) return begin;\n  }\n\n  // Parse zero flag.\n  if (*begin == '0') {\n    handler.on_zero();\n    if (++begin == end) return begin;\n  }\n\n  begin = parse_width(begin, end, handler);\n  if (begin == end) return begin;\n\n  // Parse precision.\n  if (*begin == '.') {\n    begin = parse_precision(begin, end, handler);\n  }\n\n  // Parse type.\n  if (begin != end && *begin != '}') handler.on_type(*begin++);\n  return begin;\n}\n\n// Return the result via the out param to workaround gcc bug 77539.\ntemplate <bool IS_CONSTEXPR, typename T, typename Ptr = const T*>\nFMT_CONSTEXPR bool find(Ptr first, Ptr last, T value, Ptr& out) {\n  for (out = first; out != last; ++out) {\n    if (*out == value) return true;\n  }\n  return false;\n}\n\ntemplate <>\ninline bool find<false, char>(const char* first, const char* last, char value,\n                              const char*& out) {\n  out = static_cast<const char*>(\n      std::memchr(first, value, internal::to_unsigned(last - first)));\n  return out != nullptr;\n}\n\ntemplate <typename Handler, typename Char> struct id_adapter {\n  FMT_CONSTEXPR void operator()() { handler.on_arg_id(); }\n  FMT_CONSTEXPR void operator()(unsigned id) { handler.on_arg_id(id); }\n  FMT_CONSTEXPR void operator()(basic_string_view<Char> id) {\n    handler.on_arg_id(id);\n  }\n  FMT_CONSTEXPR void on_error(const char* message) {\n    handler.on_error(message);\n  }\n  Handler& handler;\n};\n\ntemplate <bool IS_CONSTEXPR, typename Char, typename Handler>\nFMT_CONSTEXPR void parse_format_string(basic_string_view<Char> format_str,\n                                       Handler&& handler) {\n  struct writer {\n    FMT_CONSTEXPR void operator()(const Char* begin, const Char* end) {\n      if (begin == end) return;\n      for (;;) {\n        const Char* p = nullptr;\n        if (!find<IS_CONSTEXPR>(begin, end, '}', p))\n          return handler_.on_text(begin, end);\n        ++p;\n        if (p == end || *p != '}')\n          return handler_.on_error(\"unmatched '}' in format string\");\n        handler_.on_text(begin, p);\n        begin = p + 1;\n      }\n    }\n    Handler& handler_;\n  } write{handler};\n  auto begin = format_str.data();\n  auto end = begin + format_str.size();\n  while (begin != end) {\n    // Doing two passes with memchr (one for '{' and another for '}') is up to\n    // 2.5x faster than the naive one-pass implementation on big format strings.\n    const Char* p = begin;\n    if (*begin != '{' && !find<IS_CONSTEXPR>(begin, end, '{', p))\n      return write(begin, end);\n    write(begin, p);\n    ++p;\n    if (p == end) return handler.on_error(\"invalid format string\");\n    if (static_cast<char>(*p) == '}') {\n      handler.on_arg_id();\n      handler.on_replacement_field(p);\n    } else if (*p == '{') {\n      handler.on_text(p, p + 1);\n    } else {\n      p = parse_arg_id(p, end, id_adapter<Handler, Char>{handler});\n      Char c = p != end ? *p : Char();\n      if (c == '}') {\n        handler.on_replacement_field(p);\n      } else if (c == ':') {\n        p = handler.on_format_specs(p + 1, end);\n        if (p == end || *p != '}')\n          return handler.on_error(\"unknown format specifier\");\n      } else {\n        return handler.on_error(\"missing '}' in format string\");\n      }\n    }\n    begin = p + 1;\n  }\n}\n\ntemplate <typename T, typename ParseContext>\nFMT_CONSTEXPR const typename ParseContext::char_type* parse_format_specs(\n    ParseContext& ctx) {\n  using char_type = typename ParseContext::char_type;\n  using context = buffer_context<char_type>;\n  using mapped_type =\n      conditional_t<internal::mapped_type_constant<T, context>::value !=\n                        internal::custom_type,\n                    decltype(arg_mapper<context>().map(std::declval<T>())), T>;\n  conditional_t<has_formatter<mapped_type, context>::value,\n                formatter<mapped_type, char_type>,\n                internal::fallback_formatter<T, char_type>>\n      f;\n  return f.parse(ctx);\n}\n\ntemplate <typename Char, typename ErrorHandler, typename... Args>\nclass format_string_checker {\n public:\n  explicit FMT_CONSTEXPR format_string_checker(\n      basic_string_view<Char> format_str, ErrorHandler eh)\n      : arg_id_((std::numeric_limits<unsigned>::max)()),\n        context_(format_str, eh),\n        parse_funcs_{&parse_format_specs<Args, parse_context_type>...} {}\n\n  FMT_CONSTEXPR void on_text(const Char*, const Char*) {}\n\n  FMT_CONSTEXPR void on_arg_id() {\n    arg_id_ = context_.next_arg_id();\n    check_arg_id();\n  }\n  FMT_CONSTEXPR void on_arg_id(unsigned id) {\n    arg_id_ = id;\n    context_.check_arg_id(id);\n    check_arg_id();\n  }\n  FMT_CONSTEXPR void on_arg_id(basic_string_view<Char>) {\n    on_error(\"compile-time checks don't support named arguments\");\n  }\n\n  FMT_CONSTEXPR void on_replacement_field(const Char*) {}\n\n  FMT_CONSTEXPR const Char* on_format_specs(const Char* begin, const Char*) {\n    advance_to(context_, begin);\n    return arg_id_ < num_args ? parse_funcs_[arg_id_](context_) : begin;\n  }\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    context_.on_error(message);\n  }\n\n private:\n  using parse_context_type = basic_parse_context<Char, ErrorHandler>;\n  enum { num_args = sizeof...(Args) };\n\n  FMT_CONSTEXPR void check_arg_id() {\n    if (arg_id_ >= num_args) context_.on_error(\"argument index out of range\");\n  }\n\n  // Format specifier parsing function.\n  using parse_func = const Char* (*)(parse_context_type&);\n\n  unsigned arg_id_;\n  parse_context_type context_;\n  parse_func parse_funcs_[num_args > 0 ? num_args : 1];\n};\n\ntemplate <typename Char, typename ErrorHandler, typename... Args>\nFMT_CONSTEXPR bool do_check_format_string(basic_string_view<Char> s,\n                                          ErrorHandler eh = ErrorHandler()) {\n  format_string_checker<Char, ErrorHandler, Args...> checker(s, eh);\n  parse_format_string<true>(s, checker);\n  return true;\n}\n\ntemplate <typename... Args, typename S,\n          enable_if_t<(is_compile_string<S>::value), int>>\nvoid check_format_string(S format_str) {\n  FMT_CONSTEXPR_DECL bool invalid_format =\n      internal::do_check_format_string<typename S::char_type,\n                                       internal::error_handler, Args...>(\n          to_string_view(format_str));\n  (void)invalid_format;\n}\n\ntemplate <template <typename> class Handler, typename Spec, typename Context>\nvoid handle_dynamic_spec(Spec& value, arg_ref<typename Context::char_type> ref,\n                         Context& ctx,\n                         const typename Context::char_type* format_str) {\n  switch (ref.kind) {\n  case arg_id_kind::none:\n    break;\n  case arg_id_kind::index:\n    internal::set_dynamic_spec<Handler>(value, ctx.arg(ref.val.index),\n                                        ctx.error_handler());\n    break;\n  case arg_id_kind::name: {\n    const auto arg_id = ref.val.name.to_view(format_str);\n    internal::set_dynamic_spec<Handler>(value, ctx.arg(arg_id),\n                                        ctx.error_handler());\n    break;\n  }\n  }\n}\n}  // namespace internal\n\ntemplate <typename Range>\nusing basic_writer FMT_DEPRECATED = internal::basic_writer<Range>;\nusing writer FMT_DEPRECATED = internal::writer;\nusing wwriter FMT_DEPRECATED =\n    internal::basic_writer<internal::buffer_range<wchar_t>>;\n\n/** The default argument formatter. */\ntemplate <typename Range>\nclass arg_formatter : public internal::arg_formatter_base<Range> {\n private:\n  using char_type = typename Range::value_type;\n  using base = internal::arg_formatter_base<Range>;\n  using context_type = basic_format_context<typename base::iterator, char_type>;\n\n  context_type& ctx_;\n  basic_parse_context<char_type>* parse_ctx_;\n\n public:\n  using range = Range;\n  using iterator = typename base::iterator;\n  using format_specs = typename base::format_specs;\n\n  /**\n    \\rst\n    Constructs an argument formatter object.\n    *ctx* is a reference to the formatting context,\n    *specs* contains format specifier information for standard argument types.\n    \\endrst\n   */\n  explicit arg_formatter(context_type& ctx,\n                         basic_parse_context<char_type>* parse_ctx = nullptr,\n                         format_specs* specs = nullptr)\n      : base(Range(ctx.out()), specs, ctx.locale()),\n        ctx_(ctx),\n        parse_ctx_(parse_ctx) {}\n\n  using base::operator();\n\n  /** Formats an argument of a user-defined type. */\n  iterator operator()(typename basic_format_arg<context_type>::handle handle) {\n    handle.format(*parse_ctx_, ctx_);\n    return this->out();\n  }\n};\n\n/**\n An error returned by an operating system or a language runtime,\n for example a file opening error.\n*/\nclass FMT_API system_error : public std::runtime_error {\n private:\n  void init(int err_code, string_view format_str, format_args args);\n\n protected:\n  int error_code_;\n\n  system_error() : std::runtime_error(\"\") {}\n\n public:\n  /**\n   \\rst\n   Constructs a :class:`fmt::system_error` object with a description\n   formatted with `fmt::format_system_error`. *message* and additional\n   arguments passed into the constructor are formatted similarly to\n   `fmt::format`.\n\n   **Example**::\n\n     // This throws a system_error with the description\n     //   cannot open file 'madeup': No such file or directory\n     // or similar (system message may vary).\n     const char *filename = \"madeup\";\n     std::FILE *file = std::fopen(filename, \"r\");\n     if (!file)\n       throw fmt::system_error(errno, \"cannot open file '{}'\", filename);\n   \\endrst\n  */\n  template <typename... Args>\n  system_error(int error_code, string_view message, const Args&... args)\n      : std::runtime_error(\"\") {\n    init(error_code, message, make_format_args(args...));\n  }\n  ~system_error() FMT_NOEXCEPT;\n\n  int error_code() const { return error_code_; }\n};\n\n/**\n  \\rst\n  Formats an error returned by an operating system or a language runtime,\n  for example a file opening error, and writes it to *out* in the following\n  form:\n\n  .. parsed-literal::\n     *<message>*: *<system-message>*\n\n  where *<message>* is the passed message and *<system-message>* is\n  the system message corresponding to the error code.\n  *error_code* is a system error code as given by ``errno``.\n  If *error_code* is not a valid error code such as -1, the system message\n  may look like \"Unknown error -1\" and is platform-dependent.\n  \\endrst\n */\nFMT_API void format_system_error(internal::buffer<char>& out, int error_code,\n                                 fmt::string_view message) FMT_NOEXCEPT;\n\nstruct float_spec_handler {\n  char type;\n  bool upper;\n  bool fixed;\n  bool as_percentage;\n  bool use_locale;\n\n  explicit float_spec_handler(char t)\n      : type(t),\n        upper(false),\n        fixed(false),\n        as_percentage(false),\n        use_locale(false) {}\n\n  void on_general() {\n    if (type == 'G') upper = true;\n  }\n\n  void on_exp() {\n    if (type == 'E') upper = true;\n  }\n\n  void on_fixed() {\n    fixed = true;\n    if (type == 'F') upper = true;\n  }\n\n  void on_percent() {\n    fixed = true;\n    as_percentage = true;\n  }\n\n  void on_hex() {\n    if (type == 'A') upper = true;\n  }\n\n  void on_num() { use_locale = true; }\n\n  FMT_NORETURN void on_error() {\n    FMT_THROW(format_error(\"invalid type specifier\"));\n  }\n};\n\ntemplate <typename Range>\ntemplate <typename T, bool USE_GRISU>\nvoid internal::basic_writer<Range>::write_double(T value,\n                                                 const format_specs& specs) {\n  // Check type.\n  float_spec_handler handler(static_cast<char>(specs.type));\n  internal::handle_float_type_spec(handler.type, handler);\n\n  char sign = 0;\n  // Use signbit instead of value < 0 since the latter is always false for NaN.\n  if (std::signbit(value)) {\n    sign = '-';\n    value = -value;\n  } else if (specs.sign != sign::none) {\n    if (specs.sign == sign::plus)\n      sign = '+';\n    else if (specs.sign == sign::space)\n      sign = ' ';\n  }\n\n  if (!std::isfinite(value)) {\n    // Format infinity and NaN ourselves because sprintf's output is not\n    // consistent across platforms.\n    const char* str = std::isinf(value) ? (handler.upper ? \"INF\" : \"inf\")\n                                        : (handler.upper ? \"NAN\" : \"nan\");\n    return write_padded(specs,\n                        inf_or_nan_writer{sign, handler.as_percentage, str});\n  }\n\n  if (handler.as_percentage) value *= 100;\n\n  memory_buffer buffer;\n  int exp = 0;\n  int precision = specs.precision >= 0 || !specs.type ? specs.precision : 6;\n  unsigned options = handler.fixed ? internal::grisu_options::fixed : 0;\n  bool use_grisu = USE_GRISU &&\n                   (specs.type != 'a' && specs.type != 'A' &&\n                    specs.type != 'e' && specs.type != 'E') &&\n                   internal::grisu_format(static_cast<double>(value), buffer,\n                                          precision, options, exp);\n  char* decimal_point_pos = nullptr;\n  if (!use_grisu)\n    decimal_point_pos = internal::sprintf_format(value, buffer, specs);\n\n  if (handler.as_percentage) {\n    buffer.push_back('%');\n    --exp;  // Adjust decimal place position.\n  }\n  format_specs as = specs;\n  if (specs.align == align::numeric) {\n    if (sign) {\n      auto&& it = reserve(1);\n      *it++ = static_cast<char_type>(sign);\n      sign = 0;\n      if (as.width) --as.width;\n    }\n    as.align = align::right;\n  } else if (specs.align == align::none) {\n    as.align = align::right;\n  }\n  char_type decimal_point = handler.use_locale\n                                ? internal::decimal_point<char_type>(locale_)\n                                : static_cast<char_type>('.');\n  if (use_grisu) {\n    auto params = internal::gen_digits_params();\n    params.fixed = handler.fixed;\n    params.num_digits = precision;\n    params.trailing_zeros =\n        (precision != 0 && (handler.fixed || !specs.type)) || specs.alt;\n    write_padded(as, grisu_writer(sign, buffer, exp, params, decimal_point));\n  } else {\n    write_padded(as,\n                 double_writer{sign, buffer, decimal_point_pos, decimal_point});\n  }\n}\n\n// Reports a system error without throwing an exception.\n// Can be used to report errors from destructors.\nFMT_API void report_system_error(int error_code,\n                                 string_view message) FMT_NOEXCEPT;\n\n#if FMT_USE_WINDOWS_H\n\n/** A Windows error. */\nclass windows_error : public system_error {\n private:\n  FMT_API void init(int error_code, string_view format_str, format_args args);\n\n public:\n  /**\n   \\rst\n   Constructs a :class:`fmt::windows_error` object with the description\n   of the form\n\n   .. parsed-literal::\n     *<message>*: *<system-message>*\n\n   where *<message>* is the formatted message and *<system-message>* is the\n   system message corresponding to the error code.\n   *error_code* is a Windows error code as given by ``GetLastError``.\n   If *error_code* is not a valid error code such as -1, the system message\n   will look like \"error -1\".\n\n   **Example**::\n\n     // This throws a windows_error with the description\n     //   cannot open file 'madeup': The system cannot find the file specified.\n     // or similar (system message may vary).\n     const char *filename = \"madeup\";\n     LPOFSTRUCT of = LPOFSTRUCT();\n     HFILE file = OpenFile(filename, &of, OF_READ);\n     if (file == HFILE_ERROR) {\n       throw fmt::windows_error(GetLastError(),\n                                \"cannot open file '{}'\", filename);\n     }\n   \\endrst\n  */\n  template <typename... Args>\n  windows_error(int error_code, string_view message, const Args&... args) {\n    init(error_code, message, make_format_args(args...));\n  }\n};\n\n// Reports a Windows error without throwing an exception.\n// Can be used to report errors from destructors.\nFMT_API void report_windows_error(int error_code,\n                                  string_view message) FMT_NOEXCEPT;\n\n#endif\n\n/** Fast integer formatter. */\nclass format_int {\n private:\n  // Buffer should be large enough to hold all digits (digits10 + 1),\n  // a sign and a null character.\n  enum { buffer_size = std::numeric_limits<unsigned long long>::digits10 + 3 };\n  mutable char buffer_[buffer_size];\n  char* str_;\n\n  // Formats value in reverse and returns a pointer to the beginning.\n  char* format_decimal(unsigned long long value) {\n    char* ptr = buffer_ + (buffer_size - 1);  // Parens to workaround MSVC bug.\n    while (value >= 100) {\n      // Integer division is slow so do it for a group of two digits instead\n      // of for every digit. The idea comes from the talk by Alexandrescu\n      // \"Three Optimization Tips for C++\". See speed-test for a comparison.\n      unsigned index = static_cast<unsigned>((value % 100) * 2);\n      value /= 100;\n      *--ptr = internal::data::digits[index + 1];\n      *--ptr = internal::data::digits[index];\n    }\n    if (value < 10) {\n      *--ptr = static_cast<char>('0' + value);\n      return ptr;\n    }\n    unsigned index = static_cast<unsigned>(value * 2);\n    *--ptr = internal::data::digits[index + 1];\n    *--ptr = internal::data::digits[index];\n    return ptr;\n  }\n\n  void format_signed(long long value) {\n    unsigned long long abs_value = static_cast<unsigned long long>(value);\n    bool negative = value < 0;\n    if (negative) abs_value = 0 - abs_value;\n    str_ = format_decimal(abs_value);\n    if (negative) *--str_ = '-';\n  }\n\n public:\n  explicit format_int(int value) { format_signed(value); }\n  explicit format_int(long value) { format_signed(value); }\n  explicit format_int(long long value) { format_signed(value); }\n  explicit format_int(unsigned value) : str_(format_decimal(value)) {}\n  explicit format_int(unsigned long value) : str_(format_decimal(value)) {}\n  explicit format_int(unsigned long long value) : str_(format_decimal(value)) {}\n\n  /** Returns the number of characters written to the output buffer. */\n  std::size_t size() const {\n    return internal::to_unsigned(buffer_ - str_ + buffer_size - 1);\n  }\n\n  /**\n    Returns a pointer to the output buffer content. No terminating null\n    character is appended.\n   */\n  const char* data() const { return str_; }\n\n  /**\n    Returns a pointer to the output buffer content with terminating null\n    character appended.\n   */\n  const char* c_str() const {\n    buffer_[buffer_size - 1] = '\\0';\n    return str_;\n  }\n\n  /**\n    \\rst\n    Returns the content of the output buffer as an ``std::string``.\n    \\endrst\n   */\n  std::string str() const { return std::string(str_, size()); }\n};\n\n// A formatter specialization for the core types corresponding to internal::type\n// constants.\ntemplate <typename T, typename Char>\nstruct formatter<T, Char,\n                 enable_if_t<internal::type_constant<T, Char>::value !=\n                             internal::custom_type>> {\n  FMT_CONSTEXPR formatter() : format_str_(nullptr) {}\n\n  // Parses format specifiers stopping either at the end of the range or at the\n  // terminating '}'.\n  template <typename ParseContext>\n  FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    format_str_ = ctx.begin();\n    using handler_type = internal::dynamic_specs_handler<ParseContext>;\n    auto type = internal::type_constant<T, Char>::value;\n    internal::specs_checker<handler_type> handler(handler_type(specs_, ctx),\n                                                  type);\n    auto it = parse_format_specs(ctx.begin(), ctx.end(), handler);\n    auto eh = ctx.error_handler();\n    switch (type) {\n    case internal::none_type:\n    case internal::named_arg_type:\n      FMT_ASSERT(false, \"invalid argument type\");\n      break;\n    case internal::int_type:\n    case internal::uint_type:\n    case internal::long_long_type:\n    case internal::ulong_long_type:\n    case internal::bool_type:\n      handle_int_type_spec(specs_.type,\n                           internal::int_type_checker<decltype(eh)>(eh));\n      break;\n    case internal::char_type:\n      handle_char_specs(\n          &specs_, internal::char_specs_checker<decltype(eh)>(specs_.type, eh));\n      break;\n    case internal::double_type:\n    case internal::long_double_type:\n      handle_float_type_spec(specs_.type,\n                             internal::float_type_checker<decltype(eh)>(eh));\n      break;\n    case internal::cstring_type:\n      internal::handle_cstring_type_spec(\n          specs_.type, internal::cstring_type_checker<decltype(eh)>(eh));\n      break;\n    case internal::string_type:\n      internal::check_string_type_spec(specs_.type, eh);\n      break;\n    case internal::pointer_type:\n      internal::check_pointer_type_spec(specs_.type, eh);\n      break;\n    case internal::custom_type:\n      // Custom format specifiers should be checked in parse functions of\n      // formatter specializations.\n      break;\n    }\n    return it;\n  }\n\n  template <typename FormatContext>\n  auto format(const T& val, FormatContext& ctx) -> decltype(ctx.out()) {\n    internal::handle_dynamic_spec<internal::width_checker>(\n        specs_.width, specs_.width_ref, ctx, format_str_);\n    internal::handle_dynamic_spec<internal::precision_checker>(\n        specs_.precision, specs_.precision_ref, ctx, format_str_);\n    using range_type =\n        internal::output_range<typename FormatContext::iterator,\n                               typename FormatContext::char_type>;\n    return visit_format_arg(arg_formatter<range_type>(ctx, nullptr, &specs_),\n                            internal::make_arg<FormatContext>(val));\n  }\n\n private:\n  internal::dynamic_format_specs<Char> specs_;\n  const Char* format_str_;\n};\n\n#define FMT_FORMAT_AS(Type, Base)                                             \\\n  template <typename Char>                                                    \\\n  struct formatter<Type, Char> : formatter<Base, Char> {                      \\\n    template <typename FormatContext>                                         \\\n    auto format(const Type& val, FormatContext& ctx) -> decltype(ctx.out()) { \\\n      return formatter<Base, Char>::format(val, ctx);                         \\\n    }                                                                         \\\n  }\n\nFMT_FORMAT_AS(signed char, int);\nFMT_FORMAT_AS(unsigned char, unsigned);\nFMT_FORMAT_AS(short, int);\nFMT_FORMAT_AS(unsigned short, unsigned);\nFMT_FORMAT_AS(long, long long);\nFMT_FORMAT_AS(unsigned long, unsigned long long);\nFMT_FORMAT_AS(float, double);\nFMT_FORMAT_AS(Char*, const Char*);\nFMT_FORMAT_AS(std::basic_string<Char>, basic_string_view<Char>);\nFMT_FORMAT_AS(std::nullptr_t, const void*);\nFMT_FORMAT_AS(internal::std_string_view<Char>, basic_string_view<Char>);\n\ntemplate <typename Char>\nstruct formatter<void*, Char> : formatter<const void*, Char> {\n  template <typename FormatContext>\n  auto format(void* val, FormatContext& ctx) -> decltype(ctx.out()) {\n    return formatter<const void*, Char>::format(val, ctx);\n  }\n};\n\ntemplate <typename Char, size_t N>\nstruct formatter<Char[N], Char> : formatter<basic_string_view<Char>, Char> {\n  template <typename FormatContext>\n  auto format(const Char* val, FormatContext& ctx) -> decltype(ctx.out()) {\n    return formatter<basic_string_view<Char>, Char>::format(val, ctx);\n  }\n};\n\n// A formatter for types known only at run time such as variant alternatives.\n//\n// Usage:\n//   using variant = std::variant<int, std::string>;\n//   template <>\n//   struct formatter<variant>: dynamic_formatter<> {\n//     void format(buffer &buf, const variant &v, context &ctx) {\n//       visit([&](const auto &val) { format(buf, val, ctx); }, v);\n//     }\n//   };\ntemplate <typename Char = char> class dynamic_formatter {\n private:\n  struct null_handler : internal::error_handler {\n    void on_align(align_t) {}\n    void on_plus() {}\n    void on_minus() {}\n    void on_space() {}\n    void on_hash() {}\n  };\n\n public:\n  template <typename ParseContext>\n  auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    format_str_ = ctx.begin();\n    // Checks are deferred to formatting time when the argument type is known.\n    internal::dynamic_specs_handler<ParseContext> handler(specs_, ctx);\n    return parse_format_specs(ctx.begin(), ctx.end(), handler);\n  }\n\n  template <typename T, typename FormatContext>\n  auto format(const T& val, FormatContext& ctx) -> decltype(ctx.out()) {\n    handle_specs(ctx);\n    internal::specs_checker<null_handler> checker(\n        null_handler(),\n        internal::mapped_type_constant<T, FormatContext>::value);\n    checker.on_align(specs_.align);\n    switch (specs_.sign) {\n    case sign::none:\n      break;\n    case sign::plus:\n      checker.on_plus();\n      break;\n    case sign::minus:\n      checker.on_minus();\n      break;\n    case sign::space:\n      checker.on_space();\n      break;\n    }\n    if (specs_.alt) checker.on_hash();\n    if (specs_.precision >= 0) checker.end_precision();\n    using range = internal::output_range<typename FormatContext::iterator,\n                                         typename FormatContext::char_type>;\n    visit_format_arg(arg_formatter<range>(ctx, nullptr, &specs_),\n                     internal::make_arg<FormatContext>(val));\n    return ctx.out();\n  }\n\n private:\n  template <typename Context> void handle_specs(Context& ctx) {\n    internal::handle_dynamic_spec<internal::width_checker>(\n        specs_.width, specs_.width_ref, ctx, format_str_);\n    internal::handle_dynamic_spec<internal::precision_checker>(\n        specs_.precision, specs_.precision_ref, ctx, format_str_);\n  }\n\n  internal::dynamic_format_specs<Char> specs_;\n  const Char* format_str_;\n};\n\ntemplate <typename Range, typename Char>\ntypename basic_format_context<Range, Char>::format_arg\nbasic_format_context<Range, Char>::arg(basic_string_view<char_type> name) {\n  map_.init(args_);\n  format_arg arg = map_.find(name);\n  if (arg.type() == internal::none_type) this->on_error(\"argument not found\");\n  return arg;\n}\n\ntemplate <typename Char, typename ErrorHandler>\nFMT_CONSTEXPR void advance_to(basic_parse_context<Char, ErrorHandler>& ctx,\n                              const Char* p) {\n  ctx.advance_to(ctx.begin() + (p - &*ctx.begin()));\n}\n\ntemplate <typename ArgFormatter, typename Char, typename Context>\nstruct format_handler : internal::error_handler {\n  using range = typename ArgFormatter::range;\n\n  format_handler(range r, basic_string_view<Char> str,\n                 basic_format_args<Context> format_args,\n                 internal::locale_ref loc)\n      : parse_context(str), context(r.begin(), format_args, loc) {}\n\n  void on_text(const Char* begin, const Char* end) {\n    auto size = internal::to_unsigned(end - begin);\n    auto out = context.out();\n    auto&& it = internal::reserve(out, size);\n    it = std::copy_n(begin, size, it);\n    context.advance_to(out);\n  }\n\n  void get_arg(unsigned id) { arg = internal::get_arg(context, id); }\n\n  void on_arg_id() { get_arg(parse_context.next_arg_id()); }\n  void on_arg_id(unsigned id) {\n    parse_context.check_arg_id(id);\n    get_arg(id);\n  }\n  void on_arg_id(basic_string_view<Char> id) { arg = context.arg(id); }\n\n  void on_replacement_field(const Char* p) {\n    advance_to(parse_context, p);\n    internal::custom_formatter<Context> f(parse_context, context);\n    if (!visit_format_arg(f, arg))\n      context.advance_to(\n          visit_format_arg(ArgFormatter(context, &parse_context), arg));\n  }\n\n  const Char* on_format_specs(const Char* begin, const Char* end) {\n    advance_to(parse_context, begin);\n    internal::custom_formatter<Context> f(parse_context, context);\n    if (visit_format_arg(f, arg)) return parse_context.begin();\n    basic_format_specs<Char> specs;\n    using internal::specs_handler;\n    using parse_context_t = basic_parse_context<Char>;\n    internal::specs_checker<specs_handler<parse_context_t, Context>> handler(\n        specs_handler<parse_context_t, Context>(specs, parse_context, context),\n        arg.type());\n    begin = parse_format_specs(begin, end, handler);\n    if (begin == end || *begin != '}') on_error(\"missing '}' in format string\");\n    advance_to(parse_context, begin);\n    context.advance_to(\n        visit_format_arg(ArgFormatter(context, &parse_context, &specs), arg));\n    return begin;\n  }\n\n  basic_parse_context<Char> parse_context;\n  Context context;\n  basic_format_arg<Context> arg;\n};\n\n/** Formats arguments and writes the output to the range. */\ntemplate <typename ArgFormatter, typename Char, typename Context>\ntypename Context::iterator vformat_to(\n    typename ArgFormatter::range out, basic_string_view<Char> format_str,\n    basic_format_args<Context> args,\n    internal::locale_ref loc = internal::locale_ref()) {\n  format_handler<ArgFormatter, Char, Context> h(out, format_str, args, loc);\n  internal::parse_format_string<false>(format_str, h);\n  return h.context.out();\n}\n\n// Casts ``p`` to ``const void*`` for pointer formatting.\n// Example:\n//   auto s = format(\"{}\", ptr(p));\ntemplate <typename T> inline const void* ptr(const T* p) { return p; }\ntemplate <typename T> inline const void* ptr(const std::unique_ptr<T>& p) {\n  return p.get();\n}\ntemplate <typename T> inline const void* ptr(const std::shared_ptr<T>& p) {\n  return p.get();\n}\n\ntemplate <typename It, typename Char> struct arg_join : internal::view {\n  It begin;\n  It end;\n  basic_string_view<Char> sep;\n\n  arg_join(It b, It e, basic_string_view<Char> s) : begin(b), end(e), sep(s) {}\n};\n\ntemplate <typename It, typename Char>\nstruct formatter<arg_join<It, Char>, Char>\n    : formatter<typename std::iterator_traits<It>::value_type, Char> {\n  template <typename FormatContext>\n  auto format(const arg_join<It, Char>& value, FormatContext& ctx)\n      -> decltype(ctx.out()) {\n    using base = formatter<typename std::iterator_traits<It>::value_type, Char>;\n    auto it = value.begin;\n    auto out = ctx.out();\n    if (it != value.end) {\n      out = base::format(*it++, ctx);\n      while (it != value.end) {\n        out = std::copy(value.sep.begin(), value.sep.end(), out);\n        ctx.advance_to(out);\n        out = base::format(*it++, ctx);\n      }\n    }\n    return out;\n  }\n};\n\n/**\n  Returns an object that formats the iterator range `[begin, end)` with elements\n  separated by `sep`.\n */\ntemplate <typename It>\narg_join<It, char> join(It begin, It end, string_view sep) {\n  return {begin, end, sep};\n}\n\ntemplate <typename It>\narg_join<It, wchar_t> join(It begin, It end, wstring_view sep) {\n  return {begin, end, sep};\n}\n\n/**\n  \\rst\n  Returns an object that formats `range` with elements separated by `sep`.\n\n  **Example**::\n\n    std::vector<int> v = {1, 2, 3};\n    fmt::print(\"{}\", fmt::join(v, \", \"));\n    // Output: \"1, 2, 3\"\n  \\endrst\n */\ntemplate <typename Range>\narg_join<internal::iterator_t<const Range>, char> join(const Range& range,\n                                                       string_view sep) {\n  return join(std::begin(range), std::end(range), sep);\n}\n\ntemplate <typename Range>\narg_join<internal::iterator_t<const Range>, wchar_t> join(const Range& range,\n                                                          wstring_view sep) {\n  return join(std::begin(range), std::end(range), sep);\n}\n\n/**\n  \\rst\n  Converts *value* to ``std::string`` using the default format for type *T*.\n  It doesn't support user-defined types with custom formatters.\n\n  **Example**::\n\n    #include <fmt/format.h>\n\n    std::string answer = fmt::to_string(42);\n  \\endrst\n */\ntemplate <typename T> inline std::string to_string(const T& value) {\n  return format(\"{}\", value);\n}\n\n/**\n  Converts *value* to ``std::wstring`` using the default format for type *T*.\n */\ntemplate <typename T> inline std::wstring to_wstring(const T& value) {\n  return format(L\"{}\", value);\n}\n\ntemplate <typename Char, std::size_t SIZE>\nstd::basic_string<Char> to_string(const basic_memory_buffer<Char, SIZE>& buf) {\n  return std::basic_string<Char>(buf.data(), buf.size());\n}\n\ntemplate <typename Char>\ntypename buffer_context<Char>::iterator internal::vformat_to(\n    internal::buffer<Char>& buf, basic_string_view<Char> format_str,\n    basic_format_args<buffer_context<Char>> args) {\n  using range = buffer_range<Char>;\n  return vformat_to<arg_formatter<range>>(buf, to_string_view(format_str),\n                                          args);\n}\n\ntemplate <typename S, typename Char = char_t<S>,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\ninline typename buffer_context<Char>::iterator vformat_to(\n    internal::buffer<Char>& buf, const S& format_str,\n    basic_format_args<buffer_context<Char>> args) {\n  return internal::vformat_to(buf, to_string_view(format_str), args);\n}\n\ntemplate <typename S, typename... Args, std::size_t SIZE = inline_buffer_size,\n          typename Char = enable_if_t<internal::is_string<S>::value, char_t<S>>>\ninline typename buffer_context<Char>::iterator format_to(\n    basic_memory_buffer<Char, SIZE>& buf, const S& format_str, Args&&... args) {\n  internal::check_format_string<Args...>(format_str);\n  using context = buffer_context<Char>;\n  return internal::vformat_to(buf, to_string_view(format_str),\n                              {make_format_args<context>(args...)});\n}\n\ntemplate <typename OutputIt, typename Char = char>\nusing format_context_t = basic_format_context<OutputIt, Char>;\n\ntemplate <typename OutputIt, typename Char = char>\nusing format_args_t = basic_format_args<format_context_t<OutputIt, Char>>;\n\ntemplate <typename S, typename OutputIt, typename... Args,\n          FMT_ENABLE_IF(\n              internal::is_output_iterator<OutputIt>::value &&\n              !internal::is_contiguous_back_insert_iterator<OutputIt>::value)>\ninline OutputIt vformat_to(OutputIt out, const S& format_str,\n                           format_args_t<OutputIt, char_t<S>> args) {\n  using range = internal::output_range<OutputIt, char_t<S>>;\n  return vformat_to<arg_formatter<range>>(range(out),\n                                          to_string_view(format_str), args);\n}\n\n/**\n \\rst\n Formats arguments, writes the result to the output iterator ``out`` and returns\n the iterator past the end of the output range.\n\n **Example**::\n\n   std::vector<char> out;\n   fmt::format_to(std::back_inserter(out), \"{}\", 42);\n \\endrst\n */\ntemplate <typename OutputIt, typename S, typename... Args>\ninline OutputIt format_to(OutputIt out, const S& format_str, Args&&... args) {\n  static_assert(internal::is_output_iterator<OutputIt>::value &&\n                    internal::is_string<S>::value,\n                \"\");\n  internal::check_format_string<Args...>(format_str);\n  using context = format_context_t<OutputIt, char_t<S>>;\n  return vformat_to(out, to_string_view(format_str),\n                    {make_format_args<context>(args...)});\n}\n\ntemplate <typename OutputIt> struct format_to_n_result {\n  /** Iterator past the end of the output range. */\n  OutputIt out;\n  /** Total (not truncated) output size. */\n  std::size_t size;\n};\n\ntemplate <typename OutputIt, typename Char = typename OutputIt::value_type>\nusing format_to_n_context =\n    format_context_t<fmt::internal::truncating_iterator<OutputIt>, Char>;\n\ntemplate <typename OutputIt, typename Char = typename OutputIt::value_type>\nusing format_to_n_args = basic_format_args<format_to_n_context<OutputIt, Char>>;\n\ntemplate <typename OutputIt, typename Char, typename... Args>\ninline format_arg_store<format_to_n_context<OutputIt, Char>, Args...>\nmake_format_to_n_args(const Args&... args) {\n  return format_arg_store<format_to_n_context<OutputIt, Char>, Args...>(\n      args...);\n}\n\ntemplate <typename OutputIt, typename Char, typename... Args,\n          FMT_ENABLE_IF(internal::is_output_iterator<OutputIt>::value)>\ninline format_to_n_result<OutputIt> vformat_to_n(\n    OutputIt out, std::size_t n, basic_string_view<Char> format_str,\n    format_to_n_args<OutputIt, Char> args) {\n  auto it = vformat_to(internal::truncating_iterator<OutputIt>(out, n),\n                       format_str, args);\n  return {it.base(), it.count()};\n}\n\n/**\n \\rst\n Formats arguments, writes up to ``n`` characters of the result to the output\n iterator ``out`` and returns the total output size and the iterator past the\n end of the output range.\n \\endrst\n */\ntemplate <typename OutputIt, typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value&&\n                            internal::is_output_iterator<OutputIt>::value)>\ninline format_to_n_result<OutputIt> format_to_n(OutputIt out, std::size_t n,\n                                                const S& format_str,\n                                                const Args&... args) {\n  internal::check_format_string<Args...>(format_str);\n  using context = format_to_n_context<OutputIt, char_t<S>>;\n  return vformat_to_n(out, n, to_string_view(format_str),\n                      {make_format_args<context>(args...)});\n}\n\ntemplate <typename Char>\ninline std::basic_string<Char> internal::vformat(\n    basic_string_view<Char> format_str,\n    basic_format_args<buffer_context<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  internal::vformat_to(buffer, format_str, args);\n  return fmt::to_string(buffer);\n}\n\n/**\n  Returns the number of characters in the output of\n  ``format(format_str, args...)``.\n */\ntemplate <typename... Args>\ninline std::size_t formatted_size(string_view format_str, const Args&... args) {\n  auto it = format_to(internal::counting_iterator<char>(), format_str, args...);\n  return it.count();\n}\n\n#if FMT_USE_USER_DEFINED_LITERALS\nnamespace internal {\n\n#  if FMT_USE_UDL_TEMPLATE\ntemplate <typename Char, Char... CHARS> class udl_formatter {\n public:\n  template <typename... Args>\n  std::basic_string<Char> operator()(const Args&... args) const {\n    FMT_CONSTEXPR_DECL Char s[] = {CHARS..., '\\0'};\n    FMT_CONSTEXPR_DECL bool invalid_format =\n        do_check_format_string<Char, error_handler, Args...>(\n            basic_string_view<Char>(s, sizeof...(CHARS)));\n    (void)invalid_format;\n    return format(s, args...);\n  }\n};\n#  else\ntemplate <typename Char> struct udl_formatter {\n  const Char* str;\n\n  template <typename... Args>\n  auto operator()(Args&&... args) const\n      -> decltype(format(str, std::forward<Args>(args)...)) {\n    return format(str, std::forward<Args>(args)...);\n  }\n};\n#  endif  // FMT_USE_UDL_TEMPLATE\n\ntemplate <typename Char> struct udl_arg {\n  const Char* str;\n\n  template <typename T> named_arg<T, Char> operator=(T&& value) const {\n    return {str, std::forward<T>(value)};\n  }\n};\n\n}  // namespace internal\n\ninline namespace literals {\n#  if FMT_USE_UDL_TEMPLATE\n#    pragma GCC diagnostic push\n#    if FMT_CLANG_VERSION\n#      pragma GCC diagnostic ignored \"-Wgnu-string-literal-operator-template\"\n#    endif\ntemplate <typename Char, Char... CHARS>\nFMT_CONSTEXPR internal::udl_formatter<Char, CHARS...> operator\"\"_format() {\n  return {};\n}\n#    pragma GCC diagnostic pop\n#  else\n/**\n  \\rst\n  User-defined literal equivalent of :func:`fmt::format`.\n\n  **Example**::\n\n    using namespace fmt::literals;\n    std::string message = \"The answer is {}\"_format(42);\n  \\endrst\n */\ninline internal::udl_formatter<char> operator\"\" _format(const char* s,\n                                                        std::size_t) {\n  return {s};\n}\ninline internal::udl_formatter<wchar_t> operator\"\" _format(const wchar_t* s,\n                                                           std::size_t) {\n  return {s};\n}\n#  endif  // FMT_USE_UDL_TEMPLATE\n\n/**\n  \\rst\n  User-defined literal equivalent of :func:`fmt::arg`.\n\n  **Example**::\n\n    using namespace fmt::literals;\n    fmt::print(\"Elapsed time: {s:.2f} seconds\", \"s\"_a=1.23);\n  \\endrst\n */\ninline internal::udl_arg<char> operator\"\" _a(const char* s, std::size_t) {\n  return {s};\n}\ninline internal::udl_arg<wchar_t> operator\"\" _a(const wchar_t* s, std::size_t) {\n  return {s};\n}\n}  // namespace literals\n#endif  // FMT_USE_USER_DEFINED_LITERALS\nFMT_END_NAMESPACE\n\n/**\n  \\rst\n  Constructs a compile-time format string.\n\n  **Example**::\n\n    // A compile-time error because 'd' is an invalid specifier for strings.\n    std::string s = format(FMT_STRING(\"{:d}\"), \"foo\");\n  \\endrst\n */\n#define FMT_STRING(s)                                                    \\\n  [] {                                                                   \\\n    struct str : fmt::compile_string {                                   \\\n      using char_type = typename std::remove_cv<std::remove_pointer<     \\\n          typename std::decay<decltype(s)>::type>::type>::type;          \\\n      FMT_CONSTEXPR operator fmt::basic_string_view<char_type>() const { \\\n        return {s, sizeof(s) / sizeof(char_type) - 1};                   \\\n      }                                                                  \\\n    } result;                                                            \\\n    /* Suppress Qt Creator warning about unused operator. */             \\\n    (void)static_cast<fmt::basic_string_view<typename str::char_type>>(  \\\n        result);                                                         \\\n    return result;                                                       \\\n  }()\n\n#if defined(FMT_STRING_ALIAS) && FMT_STRING_ALIAS\n/**\n  \\rst\n  Constructs a compile-time format string. This macro is disabled by default to\n  prevent potential name collisions. To enable it define ``FMT_STRING_ALIAS`` to\n  1 before including ``fmt/format.h``.\n\n  **Example**::\n\n    #define FMT_STRING_ALIAS 1\n    #include <fmt/format.h>\n    // A compile-time error because 'd' is an invalid specifier for strings.\n    std::string s = format(fmt(\"{:d}\"), \"foo\");\n  \\endrst\n */\n#  define fmt(s) FMT_STRING(s)\n#endif\n\n#ifdef FMT_HEADER_ONLY\n#  define FMT_FUNC inline\n#  include \"format-inl.h\"\n#else\n#  define FMT_FUNC\n#endif\n\n#endif  // FMT_FORMAT_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/locale.h",
    "content": "// Formatting library for C++ - std::locale support\n//\n// Copyright (c) 2012 - present, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_LOCALE_H_\n#define FMT_LOCALE_H_\n\n#include <locale>\n#include \"format.h\"\n\nFMT_BEGIN_NAMESPACE\n\nnamespace internal {\ntemplate <typename Char>\ntypename buffer_context<Char>::iterator vformat_to(\n    const std::locale& loc, buffer<Char>& buf,\n    basic_string_view<Char> format_str,\n    basic_format_args<buffer_context<Char>> args) {\n  using range = buffer_range<Char>;\n  return vformat_to<arg_formatter<range>>(buf, to_string_view(format_str), args,\n                                          internal::locale_ref(loc));\n}\n\ntemplate <typename Char>\nstd::basic_string<Char> vformat(const std::locale& loc,\n                                basic_string_view<Char> format_str,\n                                basic_format_args<buffer_context<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  internal::vformat_to(loc, buffer, format_str, args);\n  return fmt::to_string(buffer);\n}\n}  // namespace internal\n\ntemplate <typename S, typename Char = char_t<S>>\ninline std::basic_string<Char> vformat(\n    const std::locale& loc, const S& format_str,\n    basic_format_args<buffer_context<Char>> args) {\n  return internal::vformat(loc, to_string_view(format_str), args);\n}\n\ntemplate <typename S, typename... Args, typename Char = char_t<S>>\ninline std::basic_string<Char> format(const std::locale& loc,\n                                      const S& format_str, Args&&... args) {\n  return internal::vformat(\n      loc, to_string_view(format_str),\n      {internal::make_args_checked<Args...>(format_str, args...)});\n}\n\ntemplate <typename S, typename OutputIt, typename... Args,\n          typename Char = enable_if_t<\n              internal::is_output_iterator<OutputIt>::value, char_t<S>>>\ninline OutputIt vformat_to(OutputIt out, const std::locale& loc,\n                           const S& format_str,\n                           format_args_t<OutputIt, Char> args) {\n  using range = internal::output_range<OutputIt, Char>;\n  return vformat_to<arg_formatter<range>>(\n      range(out), to_string_view(format_str), args, internal::locale_ref(loc));\n}\n\ntemplate <typename OutputIt, typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_output_iterator<OutputIt>::value&&\n                            internal::is_string<S>::value)>\ninline OutputIt format_to(OutputIt out, const std::locale& loc,\n                          const S& format_str, Args&&... args) {\n  internal::check_format_string<Args...>(format_str);\n  using context = format_context_t<OutputIt, char_t<S>>;\n  format_arg_store<context, Args...> as{args...};\n  return vformat_to(out, loc, to_string_view(format_str),\n                    basic_format_args<context>(as));\n}\n\nFMT_END_NAMESPACE\n\n#endif  // FMT_LOCALE_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/ostream.h",
    "content": "// Formatting library for C++ - std::ostream support\n//\n// Copyright (c) 2012 - present, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_OSTREAM_H_\n#define FMT_OSTREAM_H_\n\n#include <ostream>\n#include \"format.h\"\n\nFMT_BEGIN_NAMESPACE\nnamespace internal {\n\ntemplate <class Char> class formatbuf : public std::basic_streambuf<Char> {\n private:\n  using int_type = typename std::basic_streambuf<Char>::int_type;\n  using traits_type = typename std::basic_streambuf<Char>::traits_type;\n\n  buffer<Char>& buffer_;\n\n public:\n  formatbuf(buffer<Char>& buf) : buffer_(buf) {}\n\n protected:\n  // The put-area is actually always empty. This makes the implementation\n  // simpler and has the advantage that the streambuf and the buffer are always\n  // in sync and sputc never writes into uninitialized memory. The obvious\n  // disadvantage is that each call to sputc always results in a (virtual) call\n  // to overflow. There is no disadvantage here for sputn since this always\n  // results in a call to xsputn.\n\n  int_type overflow(int_type ch = traits_type::eof()) FMT_OVERRIDE {\n    if (!traits_type::eq_int_type(ch, traits_type::eof()))\n      buffer_.push_back(static_cast<Char>(ch));\n    return ch;\n  }\n\n  std::streamsize xsputn(const Char* s, std::streamsize count) FMT_OVERRIDE {\n    buffer_.append(s, s + count);\n    return count;\n  }\n};\n\ntemplate <typename Char> struct test_stream : std::basic_ostream<Char> {\n private:\n  struct null;\n  // Hide all operator<< from std::basic_ostream<Char>.\n  void operator<<(null);\n};\n\n// Checks if T has a user-defined operator<< (e.g. not a member of\n// std::ostream).\ntemplate <typename T, typename Char> class is_streamable {\n private:\n  template <typename U>\n  static decltype((void)(std::declval<test_stream<Char>&>()\n                         << std::declval<U>()),\n                  std::true_type())\n  test(int);\n\n  template <typename> static std::false_type test(...);\n\n  using result = decltype(test<T>(0));\n\n public:\n  static const bool value = result::value;\n};\n\n// Write the content of buf to os.\ntemplate <typename Char>\nvoid write(std::basic_ostream<Char>& os, buffer<Char>& buf) {\n  const Char* buf_data = buf.data();\n  using unsigned_streamsize = std::make_unsigned<std::streamsize>::type;\n  unsigned_streamsize size = buf.size();\n  unsigned_streamsize max_size =\n      to_unsigned((std::numeric_limits<std::streamsize>::max)());\n  do {\n    unsigned_streamsize n = size <= max_size ? size : max_size;\n    os.write(buf_data, static_cast<std::streamsize>(n));\n    buf_data += n;\n    size -= n;\n  } while (size != 0);\n}\n\ntemplate <typename Char, typename T>\nvoid format_value(buffer<Char>& buf, const T& value) {\n  formatbuf<Char> format_buf(buf);\n  std::basic_ostream<Char> output(&format_buf);\n  output.exceptions(std::ios_base::failbit | std::ios_base::badbit);\n  output << value;\n  buf.resize(buf.size());\n}\n\n// Formats an object of type T that has an overloaded ostream operator<<.\ntemplate <typename T, typename Char>\nstruct fallback_formatter<T, Char, enable_if_t<is_streamable<T, Char>::value>>\n    : formatter<basic_string_view<Char>, Char> {\n  template <typename Context>\n  auto format(const T& value, Context& ctx) -> decltype(ctx.out()) {\n    basic_memory_buffer<Char> buffer;\n    format_value(buffer, value);\n    basic_string_view<Char> str(buffer.data(), buffer.size());\n    return formatter<basic_string_view<Char>, Char>::format(str, ctx);\n  }\n};\n}  // namespace internal\n\ntemplate <typename Char>\nvoid vprint(std::basic_ostream<Char>& os, basic_string_view<Char> format_str,\n            basic_format_args<buffer_context<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  internal::vformat_to(buffer, format_str, args);\n  internal::write(os, buffer);\n}\n\n/**\n  \\rst\n  Prints formatted data to the stream *os*.\n\n  **Example**::\n\n    fmt::print(cerr, \"Don't {}!\", \"panic\");\n  \\endrst\n */\ntemplate <typename S, typename... Args,\n          typename Char = enable_if_t<internal::is_string<S>::value, char_t<S>>>\nvoid print(std::basic_ostream<Char>& os, const S& format_str, Args&&... args) {\n  vprint(os, to_string_view(format_str),\n         {internal::make_args_checked<Args...>(format_str, args...)});\n}\nFMT_END_NAMESPACE\n\n#endif  // FMT_OSTREAM_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/posix.h",
    "content": "// A C++ interface to POSIX functions.\n//\n// Copyright (c) 2012 - 2016, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_POSIX_H_\n#define FMT_POSIX_H_\n\n#if defined(__MINGW32__) || defined(__CYGWIN__)\n// Workaround MinGW bug https://sourceforge.net/p/mingw/bugs/2024/.\n#  undef __STRICT_ANSI__\n#endif\n\n#include <errno.h>\n#include <fcntl.h>   // for O_RDONLY\n#include <locale.h>  // for locale_t\n#include <stdio.h>\n#include <stdlib.h>  // for strtod_l\n\n#include <cstddef>\n\n#if defined __APPLE__ || defined(__FreeBSD__)\n#  include <xlocale.h>  // for LC_NUMERIC_MASK on OS X\n#endif\n\n#include \"format.h\"\n\n#ifndef FMT_POSIX\n#  if defined(_WIN32) && !defined(__MINGW32__)\n// Fix warnings about deprecated symbols.\n#    define FMT_POSIX(call) _##call\n#  else\n#    define FMT_POSIX(call) call\n#  endif\n#endif\n\n// Calls to system functions are wrapped in FMT_SYSTEM for testability.\n#ifdef FMT_SYSTEM\n#  define FMT_POSIX_CALL(call) FMT_SYSTEM(call)\n#else\n#  define FMT_SYSTEM(call) call\n#  ifdef _WIN32\n// Fix warnings about deprecated symbols.\n#    define FMT_POSIX_CALL(call) ::_##call\n#  else\n#    define FMT_POSIX_CALL(call) ::call\n#  endif\n#endif\n\n// Retries the expression while it evaluates to error_result and errno\n// equals to EINTR.\n#ifndef _WIN32\n#  define FMT_RETRY_VAL(result, expression, error_result) \\\n    do {                                                  \\\n      result = (expression);                              \\\n    } while (result == error_result && errno == EINTR)\n#else\n#  define FMT_RETRY_VAL(result, expression, error_result) result = (expression)\n#endif\n\n#define FMT_RETRY(result, expression) FMT_RETRY_VAL(result, expression, -1)\n\nFMT_BEGIN_NAMESPACE\n\n/**\n  \\rst\n  A reference to a null-terminated string. It can be constructed from a C\n  string or ``std::string``.\n\n  You can use one of the following type aliases for common character types:\n\n  +---------------+-----------------------------+\n  | Type          | Definition                  |\n  +===============+=============================+\n  | cstring_view  | basic_cstring_view<char>    |\n  +---------------+-----------------------------+\n  | wcstring_view | basic_cstring_view<wchar_t> |\n  +---------------+-----------------------------+\n\n  This class is most useful as a parameter type to allow passing\n  different types of strings to a function, for example::\n\n    template <typename... Args>\n    std::string format(cstring_view format_str, const Args & ... args);\n\n    format(\"{}\", 42);\n    format(std::string(\"{}\"), 42);\n  \\endrst\n */\ntemplate <typename Char> class basic_cstring_view {\n private:\n  const Char* data_;\n\n public:\n  /** Constructs a string reference object from a C string. */\n  basic_cstring_view(const Char* s) : data_(s) {}\n\n  /**\n    \\rst\n    Constructs a string reference from an ``std::string`` object.\n    \\endrst\n   */\n  basic_cstring_view(const std::basic_string<Char>& s) : data_(s.c_str()) {}\n\n  /** Returns the pointer to a C string. */\n  const Char* c_str() const { return data_; }\n};\n\nusing cstring_view = basic_cstring_view<char>;\nusing wcstring_view = basic_cstring_view<wchar_t>;\n\n// An error code.\nclass error_code {\n private:\n  int value_;\n\n public:\n  explicit error_code(int value = 0) FMT_NOEXCEPT : value_(value) {}\n\n  int get() const FMT_NOEXCEPT { return value_; }\n};\n\n// A buffered file.\nclass buffered_file {\n private:\n  FILE* file_;\n\n  friend class file;\n\n  explicit buffered_file(FILE* f) : file_(f) {}\n\n public:\n  // Constructs a buffered_file object which doesn't represent any file.\n  buffered_file() FMT_NOEXCEPT : file_(nullptr) {}\n\n  // Destroys the object closing the file it represents if any.\n  FMT_API ~buffered_file() FMT_NOEXCEPT;\n\n private:\n  buffered_file(const buffered_file&) = delete;\n  void operator=(const buffered_file&) = delete;\n\n public:\n  buffered_file(buffered_file&& other) FMT_NOEXCEPT : file_(other.file_) {\n    other.file_ = nullptr;\n  }\n\n  buffered_file& operator=(buffered_file&& other) {\n    close();\n    file_ = other.file_;\n    other.file_ = nullptr;\n    return *this;\n  }\n\n  // Opens a file.\n  FMT_API buffered_file(cstring_view filename, cstring_view mode);\n\n  // Closes the file.\n  FMT_API void close();\n\n  // Returns the pointer to a FILE object representing this file.\n  FILE* get() const FMT_NOEXCEPT { return file_; }\n\n  // We place parentheses around fileno to workaround a bug in some versions\n  // of MinGW that define fileno as a macro.\n  FMT_API int(fileno)() const;\n\n  void vprint(string_view format_str, format_args args) {\n    fmt::vprint(file_, format_str, args);\n  }\n\n  template <typename... Args>\n  inline void print(string_view format_str, const Args&... args) {\n    vprint(format_str, make_format_args(args...));\n  }\n};\n\n// A file. Closed file is represented by a file object with descriptor -1.\n// Methods that are not declared with FMT_NOEXCEPT may throw\n// fmt::system_error in case of failure. Note that some errors such as\n// closing the file multiple times will cause a crash on Windows rather\n// than an exception. You can get standard behavior by overriding the\n// invalid parameter handler with _set_invalid_parameter_handler.\nclass file {\n private:\n  int fd_;  // File descriptor.\n\n  // Constructs a file object with a given descriptor.\n  explicit file(int fd) : fd_(fd) {}\n\n public:\n  // Possible values for the oflag argument to the constructor.\n  enum {\n    RDONLY = FMT_POSIX(O_RDONLY),  // Open for reading only.\n    WRONLY = FMT_POSIX(O_WRONLY),  // Open for writing only.\n    RDWR = FMT_POSIX(O_RDWR)       // Open for reading and writing.\n  };\n\n  // Constructs a file object which doesn't represent any file.\n  file() FMT_NOEXCEPT : fd_(-1) {}\n\n  // Opens a file and constructs a file object representing this file.\n  FMT_API file(cstring_view path, int oflag);\n\n private:\n  file(const file&) = delete;\n  void operator=(const file&) = delete;\n\n public:\n  file(file&& other) FMT_NOEXCEPT : fd_(other.fd_) { other.fd_ = -1; }\n\n  file& operator=(file&& other) {\n    close();\n    fd_ = other.fd_;\n    other.fd_ = -1;\n    return *this;\n  }\n\n  // Destroys the object closing the file it represents if any.\n  FMT_API ~file() FMT_NOEXCEPT;\n\n  // Returns the file descriptor.\n  int descriptor() const FMT_NOEXCEPT { return fd_; }\n\n  // Closes the file.\n  FMT_API void close();\n\n  // Returns the file size. The size has signed type for consistency with\n  // stat::st_size.\n  FMT_API long long size() const;\n\n  // Attempts to read count bytes from the file into the specified buffer.\n  FMT_API std::size_t read(void* buffer, std::size_t count);\n\n  // Attempts to write count bytes from the specified buffer to the file.\n  FMT_API std::size_t write(const void* buffer, std::size_t count);\n\n  // Duplicates a file descriptor with the dup function and returns\n  // the duplicate as a file object.\n  FMT_API static file dup(int fd);\n\n  // Makes fd be the copy of this file descriptor, closing fd first if\n  // necessary.\n  FMT_API void dup2(int fd);\n\n  // Makes fd be the copy of this file descriptor, closing fd first if\n  // necessary.\n  FMT_API void dup2(int fd, error_code& ec) FMT_NOEXCEPT;\n\n  // Creates a pipe setting up read_end and write_end file objects for reading\n  // and writing respectively.\n  FMT_API static void pipe(file& read_end, file& write_end);\n\n  // Creates a buffered_file object associated with this file and detaches\n  // this file object from the file.\n  FMT_API buffered_file fdopen(const char* mode);\n};\n\n// Returns the memory page size.\nlong getpagesize();\n\n#ifdef FMT_LOCALE\n// A \"C\" numeric locale.\nclass Locale {\n private:\n#  ifdef _MSC_VER\n  using locale_t = _locale_t;\n\n  enum { LC_NUMERIC_MASK = LC_NUMERIC };\n\n  static locale_t newlocale(int category_mask, const char* locale, locale_t) {\n    return _create_locale(category_mask, locale);\n  }\n\n  static void freelocale(locale_t locale) { _free_locale(locale); }\n\n  static double strtod_l(const char* nptr, char** endptr, _locale_t locale) {\n    return _strtod_l(nptr, endptr, locale);\n  }\n#  endif\n\n  locale_t locale_;\n\n  Locale(const Locale&) = delete;\n  void operator=(const Locale&) = delete;\n\n public:\n  using type = locale_t;\n\n  Locale() : locale_(newlocale(LC_NUMERIC_MASK, \"C\", nullptr)) {\n    if (!locale_) FMT_THROW(system_error(errno, \"cannot create locale\"));\n  }\n  ~Locale() { freelocale(locale_); }\n\n  type get() const { return locale_; }\n\n  // Converts string to floating-point number and advances str past the end\n  // of the parsed input.\n  double strtod(const char*& str) const {\n    char* end = nullptr;\n    double result = strtod_l(str, &end, locale_);\n    str = end;\n    return result;\n  }\n};\n#endif  // FMT_LOCALE\nFMT_END_NAMESPACE\n\n#endif  // FMT_POSIX_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/prepare.h",
    "content": "// Formatting library for C++ - experimental format string compilation\n//\n// Copyright (c) 2012 - present, Victor Zverovich and fmt contributors\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_PREPARE_H_\n#define FMT_PREPARE_H_\n\n#ifndef FMT_HAS_CONSTRUCTIBLE_TRAITS\n#  define FMT_HAS_CONSTRUCTIBLE_TRAITS \\\n    (FMT_GCC_VERSION >= 407 || FMT_CLANG_VERSION || FMT_MSC_VER)\n#endif\n\n#include \"format.h\"\n\n#include <vector>\n\nFMT_BEGIN_NAMESPACE\n\ntemplate <typename Char> struct format_part {\n public:\n  struct named_argument_id {\n    FMT_CONSTEXPR named_argument_id(internal::string_view_metadata id)\n        : id(id) {}\n    internal::string_view_metadata id;\n  };\n\n  struct argument_id {\n    FMT_CONSTEXPR argument_id() : argument_id(0u) {}\n\n    FMT_CONSTEXPR argument_id(unsigned id)\n        : which(which_arg_id::index), val(id) {}\n\n    FMT_CONSTEXPR argument_id(internal::string_view_metadata id)\n        : which(which_arg_id::named_index), val(id) {}\n\n    enum class which_arg_id { index, named_index };\n\n    which_arg_id which;\n\n    union value {\n      FMT_CONSTEXPR value() : index(0u) {}\n      FMT_CONSTEXPR value(unsigned id) : index(id) {}\n      FMT_CONSTEXPR value(internal::string_view_metadata id)\n          : named_index(id) {}\n\n      unsigned index;\n      internal::string_view_metadata named_index;\n    } val;\n  };\n\n  struct specification {\n    FMT_CONSTEXPR specification() : arg_id(0u) {}\n    FMT_CONSTEXPR specification(unsigned id) : arg_id(id) {}\n\n    FMT_CONSTEXPR specification(internal::string_view_metadata id)\n        : arg_id(id) {}\n\n    argument_id arg_id;\n    internal::dynamic_format_specs<Char> parsed_specs;\n  };\n\n  FMT_CONSTEXPR format_part()\n      : which(which_value::argument_id), end_of_argument_id(0u), val(0u) {}\n\n  FMT_CONSTEXPR format_part(internal::string_view_metadata text)\n      : which(which_value::text), end_of_argument_id(0u), val(text) {}\n\n  FMT_CONSTEXPR format_part(unsigned id)\n      : which(which_value::argument_id), end_of_argument_id(0u), val(id) {}\n\n  FMT_CONSTEXPR format_part(named_argument_id arg_id)\n      : which(which_value::named_argument_id),\n        end_of_argument_id(0u),\n        val(arg_id) {}\n\n  FMT_CONSTEXPR format_part(specification spec)\n      : which(which_value::specification), end_of_argument_id(0u), val(spec) {}\n\n  enum class which_value {\n    argument_id,\n    named_argument_id,\n    text,\n    specification\n  };\n\n  which_value which;\n  std::size_t end_of_argument_id;\n  union value {\n    FMT_CONSTEXPR value() : arg_id(0u) {}\n    FMT_CONSTEXPR value(unsigned id) : arg_id(id) {}\n    FMT_CONSTEXPR value(named_argument_id named_id)\n        : named_arg_id(named_id.id) {}\n    FMT_CONSTEXPR value(internal::string_view_metadata t) : text(t) {}\n    FMT_CONSTEXPR value(specification s) : spec(s) {}\n    unsigned arg_id;\n    internal::string_view_metadata named_arg_id;\n    internal::string_view_metadata text;\n    specification spec;\n  } val;\n};\n\nnamespace internal {\ntemplate <typename Char, typename PartsContainer>\nclass format_preparation_handler : public internal::error_handler {\n private:\n  typedef format_part<Char> part;\n\n public:\n  typedef typename basic_string_view<Char>::iterator iterator;\n\n  FMT_CONSTEXPR format_preparation_handler(basic_string_view<Char> format,\n                                           PartsContainer& parts)\n      : parts_(parts), format_(format), parse_context_(format) {}\n\n  FMT_CONSTEXPR void on_text(const Char* begin, const Char* end) {\n    if (begin == end) {\n      return;\n    }\n    const auto offset = begin - format_.data();\n    const auto size = end - begin;\n    parts_.add(part(string_view_metadata(offset, size)));\n  }\n\n  FMT_CONSTEXPR void on_arg_id() {\n    parts_.add(part(parse_context_.next_arg_id()));\n  }\n\n  FMT_CONSTEXPR void on_arg_id(unsigned id) {\n    parse_context_.check_arg_id(id);\n    parts_.add(part(id));\n  }\n\n  FMT_CONSTEXPR void on_arg_id(basic_string_view<Char> id) {\n    const auto view = string_view_metadata(format_, id);\n    const auto arg_id = typename part::named_argument_id(view);\n    parts_.add(part(arg_id));\n  }\n\n  FMT_CONSTEXPR void on_replacement_field(const Char* ptr) {\n    auto last_part = parts_.last();\n    last_part.end_of_argument_id = ptr - format_.begin();\n    parts_.substitute_last(last_part);\n  }\n\n  FMT_CONSTEXPR const Char* on_format_specs(const Char* begin,\n                                            const Char* end) {\n    const auto specs_offset = to_unsigned(begin - format_.begin());\n\n    typedef basic_parse_context<Char> parse_context;\n    internal::dynamic_format_specs<Char> parsed_specs;\n    dynamic_specs_handler<parse_context> handler(parsed_specs, parse_context_);\n    begin = parse_format_specs(begin, end, handler);\n\n    if (*begin != '}') {\n      on_error(\"missing '}' in format string\");\n    }\n\n    const auto last_part = parts_.last();\n\n    auto specs = last_part.which == part::which_value::argument_id\n                     ? typename part::specification(last_part.val.arg_id)\n                     : typename part::specification(last_part.val.named_arg_id);\n\n    specs.parsed_specs = parsed_specs;\n\n    auto new_part = part(specs);\n    new_part.end_of_argument_id = specs_offset;\n\n    parts_.substitute_last(new_part);\n\n    return begin;\n  }\n\n private:\n  PartsContainer& parts_;\n  basic_string_view<Char> format_;\n  basic_parse_context<Char> parse_context_;\n};\n\ntemplate <typename Format, typename PreparedPartsProvider, typename... Args>\nclass prepared_format {\n public:\n  using char_type = char_t<Format>;\n  using format_part_t = format_part<char_type>;\n\n  prepared_format(Format f)\n      : format_(std::move(f)), parts_provider_(to_string_view(format_)) {}\n\n  prepared_format() = delete;\n\n  std::size_t formatted_size(const Args&... args) const {\n    const auto it = this->format_to(counting_iterator<char_type>(), args...);\n    return it.count();\n  }\n\n  template <typename OutputIt,\n            FMT_ENABLE_IF(internal::is_output_iterator<OutputIt>::value)>\n  inline format_to_n_result<OutputIt> format_to_n(OutputIt out, unsigned n,\n                                                  const Args&... args) const {\n    format_arg_store<typename format_to_n_context<OutputIt, char_type>::type,\n                     Args...>\n    as(args...);\n\n    typedef truncating_iterator<OutputIt> trunc_it;\n    typedef output_range<trunc_it, char_type> range;\n    range r(trunc_it(out, n));\n    auto it = this->vformat_to(\n        r, typename format_to_n_args<OutputIt, char_type>::type(as));\n    return {it.base(), it.count()};\n  }\n\n  std::basic_string<char_type> format(const Args&... args) const {\n    basic_memory_buffer<char_type> buffer;\n    using range = buffer_range<char_type>;\n    this->vformat_to(range(buffer),\n                     basic_format_args<context>{\n                         make_args_checked<Args...>(format_, args...)});\n    return to_string(buffer);\n  }\n\n  template <typename Container, FMT_ENABLE_IF(is_contiguous<Container>::value)>\n  inline std::back_insert_iterator<Container> format_to(\n      std::back_insert_iterator<Container> out, Args&&... args) const {\n    internal::container_buffer<Container> buffer(internal::get_container(out));\n    using range = buffer_range<char_type>;\n    this->vformat_to(range(buffer),\n                     basic_format_args<context>{\n                         make_args_checked<Args...>(format_, args...)});\n    return out;\n  }\n\n  template <typename OutputIt>\n  inline OutputIt format_to(OutputIt out, const Args&... args) const {\n    typedef format_context_t<OutputIt, char_type> context;\n    typedef output_range<OutputIt, char_type> range;\n    format_arg_store<context, Args...> as(args...);\n    return this->vformat_to(range(out), basic_format_args<context>(as));\n  }\n\n  template <std::size_t SIZE = inline_buffer_size>\n  inline typename buffer_context<char_type>::iterator format_to(\n      basic_memory_buffer<char_type, SIZE>& buf, const Args&... args) const {\n    using range = buffer_range<char_type>;\n    return this->vformat_to(range(buf),\n                            basic_format_args<context>{\n                                make_args_checked<Args...>(format_, args...)});\n  }\n\n private:\n  typedef buffer_context<char_type> context;\n\n  template <typename Range, typename Context>\n  auto vformat_to(Range out, basic_format_args<Context> args) const ->\n      typename Context::iterator {\n    const auto format_view = internal::to_string_view(format_);\n    basic_parse_context<char_type> parse_ctx(format_view);\n    Context ctx(out.begin(), args);\n\n    const auto& parts = parts_provider_.parts();\n    for (auto part_it = parts.begin(); part_it != parts.end(); ++part_it) {\n      const auto& part = *part_it;\n      const auto& value = part.val;\n\n      switch (part.which) {\n      case format_part_t::which_value::text: {\n        const auto text = value.text.to_view(format_view.data());\n        auto output = ctx.out();\n        auto&& it = internal::reserve(output, text.size());\n        it = std::copy_n(text.begin(), text.size(), it);\n        ctx.advance_to(output);\n      } break;\n\n      case format_part_t::which_value::argument_id: {\n        advance_parse_context_to_specification(parse_ctx, part);\n        format_arg<Range>(parse_ctx, ctx, value.arg_id);\n      } break;\n\n      case format_part_t::which_value::named_argument_id: {\n        advance_parse_context_to_specification(parse_ctx, part);\n        const auto named_arg_id =\n            value.named_arg_id.to_view(format_view.data());\n        format_arg<Range>(parse_ctx, ctx, named_arg_id);\n      } break;\n      case format_part_t::which_value::specification: {\n        const auto& arg_id_value = value.spec.arg_id.val;\n        const auto arg = value.spec.arg_id.which ==\n                                 format_part_t::argument_id::which_arg_id::index\n                             ? ctx.arg(arg_id_value.index)\n                             : ctx.arg(arg_id_value.named_index.to_view(\n                                   to_string_view(format_).data()));\n\n        auto specs = value.spec.parsed_specs;\n\n        handle_dynamic_spec<internal::width_checker>(\n            specs.width, specs.width_ref, ctx, format_view.begin());\n        handle_dynamic_spec<internal::precision_checker>(\n            specs.precision, specs.precision_ref, ctx, format_view.begin());\n\n        check_prepared_specs(specs, arg.type());\n        advance_parse_context_to_specification(parse_ctx, part);\n        ctx.advance_to(\n            visit_format_arg(arg_formatter<Range>(ctx, nullptr, &specs), arg));\n      } break;\n      }\n    }\n\n    return ctx.out();\n  }\n\n  void advance_parse_context_to_specification(\n      basic_parse_context<char_type>& parse_ctx,\n      const format_part_t& part) const {\n    const auto view = to_string_view(format_);\n    const auto specification_begin = view.data() + part.end_of_argument_id;\n    advance_to(parse_ctx, specification_begin);\n  }\n\n  template <typename Range, typename Context, typename Id>\n  void format_arg(basic_parse_context<char_type>& parse_ctx, Context& ctx,\n                  Id arg_id) const {\n    parse_ctx.check_arg_id(arg_id);\n    const auto stopped_at =\n        visit_format_arg(arg_formatter<Range>(ctx), ctx.arg(arg_id));\n    ctx.advance_to(stopped_at);\n  }\n\n  template <typename Char>\n  void check_prepared_specs(const basic_format_specs<Char>& specs,\n                            internal::type arg_type) const {\n    internal::error_handler h;\n    numeric_specs_checker<internal::error_handler> checker(h, arg_type);\n    if (specs.align == align::numeric) checker.require_numeric_argument();\n    if (specs.sign != sign::none) checker.check_sign();\n    if (specs.alt) checker.require_numeric_argument();\n    if (specs.precision >= 0) checker.check_precision();\n  }\n\n private:\n  Format format_;\n  PreparedPartsProvider parts_provider_;\n};\n\ntemplate <typename Format> class compiletime_prepared_parts_type_provider {\n private:\n  using char_type = char_t<Format>;\n\n  class count_handler {\n   public:\n    FMT_CONSTEXPR count_handler() : counter_(0u) {}\n\n    FMT_CONSTEXPR void on_text(const char_type* begin, const char_type* end) {\n      if (begin != end) {\n        ++counter_;\n      }\n    }\n\n    FMT_CONSTEXPR void on_arg_id() { ++counter_; }\n    FMT_CONSTEXPR void on_arg_id(unsigned) { ++counter_; }\n    FMT_CONSTEXPR void on_arg_id(basic_string_view<char_type>) { ++counter_; }\n\n    FMT_CONSTEXPR void on_replacement_field(const char_type*) {}\n\n    FMT_CONSTEXPR const char_type* on_format_specs(const char_type* begin,\n                                                   const char_type* end) {\n      return find_matching_brace(begin, end);\n    }\n\n    FMT_CONSTEXPR void on_error(const char*) {}\n\n    FMT_CONSTEXPR unsigned result() const { return counter_; }\n\n   private:\n    FMT_CONSTEXPR const char_type* find_matching_brace(const char_type* begin,\n                                                       const char_type* end) {\n      unsigned braces_counter{0u};\n      for (; begin != end; ++begin) {\n        if (*begin == '{') {\n          ++braces_counter;\n        } else if (*begin == '}') {\n          if (braces_counter == 0u) {\n            break;\n          }\n          --braces_counter;\n        }\n      }\n\n      return begin;\n    }\n\n   private:\n    unsigned counter_;\n  };\n\n  static FMT_CONSTEXPR unsigned count_parts() {\n    FMT_CONSTEXPR_DECL const auto text = to_string_view(Format{});\n    count_handler handler;\n    internal::parse_format_string</*IS_CONSTEXPR=*/true>(text, handler);\n    return handler.result();\n  }\n\n// Workaround for old compilers. Compiletime parts preparation will not be\n// performed with them anyway.\n#if FMT_USE_CONSTEXPR\n  static FMT_CONSTEXPR_DECL const unsigned number_of_format_parts =\n      compiletime_prepared_parts_type_provider::count_parts();\n#else\n  static const unsigned number_of_format_parts = 0u;\n#endif\n\n public:\n  template <unsigned N> struct format_parts_array {\n    typedef format_part<char_type> value_type;\n\n    FMT_CONSTEXPR format_parts_array() : arr{} {}\n\n    FMT_CONSTEXPR value_type& operator[](unsigned ind) { return arr[ind]; }\n\n    FMT_CONSTEXPR const value_type* begin() const { return arr; }\n\n    FMT_CONSTEXPR const value_type* end() const { return begin() + N; }\n\n   private:\n    value_type arr[N];\n  };\n\n  struct empty {\n    // Parts preparator will search for it\n    typedef format_part<char_type> value_type;\n  };\n\n  using type = conditional_t<static_cast<bool>(number_of_format_parts),\n                             format_parts_array<number_of_format_parts>, empty>;\n};\n\ntemplate <typename Parts> class compiletime_prepared_parts_collector {\n private:\n  typedef typename Parts::value_type format_part;\n\n public:\n  FMT_CONSTEXPR explicit compiletime_prepared_parts_collector(Parts& parts)\n      : parts_{parts}, counter_{0u} {}\n\n  FMT_CONSTEXPR void add(format_part part) { parts_[counter_++] = part; }\n\n  FMT_CONSTEXPR void substitute_last(format_part part) {\n    parts_[counter_ - 1] = part;\n  }\n\n  FMT_CONSTEXPR format_part last() { return parts_[counter_ - 1]; }\n\n private:\n  Parts& parts_;\n  unsigned counter_;\n};\n\ntemplate <typename PartsContainer, typename Char>\nFMT_CONSTEXPR PartsContainer prepare_parts(basic_string_view<Char> format) {\n  PartsContainer parts;\n  internal::parse_format_string</*IS_CONSTEXPR=*/false>(\n      format, format_preparation_handler<Char, PartsContainer>(format, parts));\n  return parts;\n}\n\ntemplate <typename PartsContainer, typename Char>\nFMT_CONSTEXPR PartsContainer\nprepare_compiletime_parts(basic_string_view<Char> format) {\n  typedef compiletime_prepared_parts_collector<PartsContainer> collector;\n\n  PartsContainer parts;\n  collector c(parts);\n  internal::parse_format_string</*IS_CONSTEXPR=*/true>(\n      format, format_preparation_handler<Char, collector>(format, c));\n  return parts;\n}\n\ntemplate <typename PartsContainer> class runtime_parts_provider {\n public:\n  runtime_parts_provider() = delete;\n  template <typename Char>\n  runtime_parts_provider(basic_string_view<Char> format)\n      : parts_(prepare_parts<PartsContainer>(format)) {}\n\n  const PartsContainer& parts() const { return parts_; }\n\n private:\n  PartsContainer parts_;\n};\n\ntemplate <typename Format, typename PartsContainer>\nstruct compiletime_parts_provider {\n  compiletime_parts_provider() = delete;\n  template <typename Char>\n  FMT_CONSTEXPR compiletime_parts_provider(basic_string_view<Char>) {}\n\n  const PartsContainer& parts() const {\n    static FMT_CONSTEXPR_DECL const PartsContainer prepared_parts =\n        prepare_compiletime_parts<PartsContainer>(\n            internal::to_string_view(Format{}));\n\n    return prepared_parts;\n  }\n};\n\ntemplate <typename PartsContainer>\nstruct parts_container_concept_check : std::true_type {\n#if FMT_HAS_CONSTRUCTIBLE_TRAITS\n  static_assert(std::is_copy_constructible<PartsContainer>::value,\n                \"PartsContainer is not copy constructible\");\n  static_assert(std::is_move_constructible<PartsContainer>::value,\n                \"PartsContainer is not move constructible\");\n#endif\n\n  template <typename T, typename = void>\n  struct has_format_part_type : std::false_type {};\n  template <typename T>\n  struct has_format_part_type<T, void_t<typename T::format_part_type>>\n      : std::true_type {};\n\n  static_assert(has_format_part_type<PartsContainer>::value,\n                \"PartsContainer doesn't provide format_part_type typedef\");\n\n  struct check_second {};\n  struct check_first : check_second {};\n\n  template <typename T> static std::false_type has_add_check(check_second);\n  template <typename T>\n  static decltype(\n      (void)std::declval<T>().add(std::declval<typename T::format_part_type>()),\n      std::true_type()) has_add_check(check_first);\n  typedef decltype(has_add_check<PartsContainer>(check_first())) has_add;\n  static_assert(has_add::value, \"PartsContainer doesn't provide add() method\");\n\n  template <typename T> static std::false_type has_last_check(check_second);\n  template <typename T>\n  static decltype((void)std::declval<T>().last(),\n                  std::true_type()) has_last_check(check_first);\n  typedef decltype(has_last_check<PartsContainer>(check_first())) has_last;\n  static_assert(has_last::value,\n                \"PartsContainer doesn't provide last() method\");\n\n  template <typename T>\n  static std::false_type has_substitute_last_check(check_second);\n  template <typename T>\n  static decltype((void)std::declval<T>().substitute_last(\n                      std::declval<typename T::format_part_type>()),\n                  std::true_type()) has_substitute_last_check(check_first);\n  typedef decltype(has_substitute_last_check<PartsContainer>(\n      check_first())) has_substitute_last;\n  static_assert(has_substitute_last::value,\n                \"PartsContainer doesn't provide substitute_last() method\");\n\n  template <typename T> static std::false_type has_begin_check(check_second);\n  template <typename T>\n  static decltype((void)std::declval<T>().begin(),\n                  std::true_type()) has_begin_check(check_first);\n  typedef decltype(has_begin_check<PartsContainer>(check_first())) has_begin;\n  static_assert(has_begin::value,\n                \"PartsContainer doesn't provide begin() method\");\n\n  template <typename T> static std::false_type has_end_check(check_second);\n  template <typename T>\n  static decltype((void)std::declval<T>().end(),\n                  std::true_type()) has_end_check(check_first);\n  typedef decltype(has_end_check<PartsContainer>(check_first())) has_end;\n  static_assert(has_end::value, \"PartsContainer doesn't provide end() method\");\n};\n\ntemplate <bool IS_CONSTEXPR, typename Format, typename /*PartsContainer*/>\nstruct parts_provider_type {\n  typedef compiletime_parts_provider<\n      Format, typename compiletime_prepared_parts_type_provider<Format>::type>\n      type;\n};\n\ntemplate <typename Format, typename PartsContainer>\nstruct parts_provider_type</*IS_CONSTEXPR=*/false, Format, PartsContainer> {\n  static_assert(parts_container_concept_check<PartsContainer>::value,\n                \"Parts container doesn't meet the concept\");\n  typedef runtime_parts_provider<PartsContainer> type;\n};\n\ntemplate <typename Format, typename PreparedPartsContainer, typename... Args>\nstruct basic_prepared_format {\n  typedef internal::prepared_format<Format,\n                                    typename internal::parts_provider_type<\n                                        is_compile_string<Format>::value,\n                                        Format, PreparedPartsContainer>::type,\n                                    Args...>\n      type;\n};\n\ntemplate <typename Char>\nstd::basic_string<Char> to_runtime_format(basic_string_view<Char> format) {\n  return std::basic_string<Char>(format.begin(), format.size());\n}\n\ntemplate <typename Char>\nstd::basic_string<Char> to_runtime_format(const Char* format) {\n  return std::basic_string<Char>(format);\n}\n\ntemplate <typename Char, typename Container = std::vector<format_part<Char>>>\nclass parts_container {\n public:\n  typedef format_part<Char> format_part_type;\n\n  void add(format_part_type part) { parts_.push_back(std::move(part)); }\n\n  void substitute_last(format_part_type part) {\n    parts_.back() = std::move(part);\n  }\n\n  format_part_type last() { return parts_.back(); }\n\n  auto begin() -> decltype(std::declval<Container>().begin()) {\n    return parts_.begin();\n  }\n\n  auto begin() const -> decltype(std::declval<const Container>().begin()) {\n    return parts_.begin();\n  }\n\n  auto end() -> decltype(std::declval<Container>().end()) {\n    return parts_.end();\n  }\n\n  auto end() const -> decltype(std::declval<const Container>().end()) {\n    return parts_.end();\n  }\n\n private:\n  Container parts_;\n};\n\n// Delegate preparing to preparator, to take advantage of a partial\n// specialization.\ntemplate <typename Format, typename... Args> struct preparator {\n  typedef parts_container<char_t<Format>> container;\n  typedef typename basic_prepared_format<Format, container, Args...>::type\n      prepared_format_type;\n\n  static auto prepare(Format format) -> prepared_format_type {\n    return prepared_format_type(std::move(format));\n  }\n};\n\ntemplate <typename PassedFormat, typename PreparedFormatFormat,\n          typename PartsContainer, typename... Args>\nstruct preparator<PassedFormat, prepared_format<PreparedFormatFormat,\n                                                PartsContainer, Args...>> {\n  typedef prepared_format<PreparedFormatFormat, PartsContainer, Args...>\n      prepared_format_type;\n\n  static auto prepare(PassedFormat format) -> prepared_format_type {\n    return prepared_format_type(std::move(format));\n  }\n};\n\nstruct compiletime_format_tag {};\nstruct runtime_format_tag {};\n\ntemplate <typename Format> struct format_tag {\n  using type = conditional_t<is_compile_string<Format>::value,\n                             compiletime_format_tag, runtime_format_tag>;\n};\n\n#if FMT_USE_CONSTEXPR\ntemplate <typename Format, typename... Args>\nauto do_prepare(runtime_format_tag, Format format) {\n  return preparator<Format, Args...>::prepare(std::move(format));\n}\n\ntemplate <typename Format, typename... Args>\nFMT_CONSTEXPR auto do_prepare(compiletime_format_tag, const Format& format) {\n  return typename basic_prepared_format<Format, void, Args...>::type(format);\n}\n#else\ntemplate <typename Format, typename... Args>\nauto do_prepare(const Format& format)\n    -> decltype(preparator<Format, Args...>::prepare(format)) {\n  return preparator<Format, Args...>::prepare(format);\n}\n#endif\n}  // namespace internal\n\ntemplate <typename Char, typename Container = std::vector<format_part<Char>>>\nstruct parts_container {\n  typedef internal::parts_container<Char, Container> type;\n};\n\ntemplate <typename Format, typename PartsContainer, typename... Args>\nstruct basic_prepared_format {\n  typedef typename internal::basic_prepared_format<Format, PartsContainer,\n                                                   Args...>::type type;\n};\n\ntemplate <typename... Args> struct prepared_format {\n  typedef typename basic_prepared_format<\n      std::string, typename parts_container<char>::type, Args...>::type type;\n};\n\ntemplate <typename... Args> struct wprepared_format {\n  typedef\n      typename basic_prepared_format<std::wstring,\n                                     typename parts_container<wchar_t>::type,\n                                     Args...>::type type;\n};\n\ntemplate <typename Char, typename Container = std::vector<format_part<Char>>>\nusing parts_container_t = typename parts_container<Char, Container>::type;\n\ntemplate <typename Format, typename PreparedPartsContainer, typename... Args>\nusing basic_prepared_format_t =\n    typename basic_prepared_format<Format, PreparedPartsContainer,\n                                   Args...>::type;\n\ntemplate <typename... Args>\nusing prepared_format_t =\n    basic_prepared_format_t<std::string, parts_container<char>, Args...>;\n\ntemplate <typename... Args>\nusing wprepared_format_t =\n    basic_prepared_format_t<std::wstring, parts_container<wchar_t>, Args...>;\n\n#if FMT_USE_CONSTEXPR\n\ntemplate <typename... Args, typename Format>\nFMT_CONSTEXPR auto prepare(Format format) {\n  return internal::do_prepare<Format, Args...>(\n      typename internal::format_tag<Format>::type{}, std::move(format));\n}\n#else\n\ntemplate <typename... Args, typename Format>\nauto prepare(Format format) ->\n    typename internal::preparator<Format, Args...>::prepared_format_type {\n  return internal::preparator<Format, Args...>::prepare(std::move(format));\n}\n#endif\n\ntemplate <typename... Args, typename Char>\nauto prepare(const Char* format) ->\n    typename internal::preparator<std::basic_string<Char>,\n                                  Args...>::prepared_format_type {\n  return prepare<Args...>(internal::to_runtime_format(format));\n}\n\ntemplate <typename... Args, typename Char, unsigned N>\nauto prepare(const Char(format)[N]) ->\n    typename internal::preparator<std::basic_string<Char>,\n                                  Args...>::prepared_format_type {\n  const auto view = basic_string_view<Char>(format, N);\n  return prepare<Args...>(internal::to_runtime_format(view));\n}\n\ntemplate <typename... Args, typename Char>\nauto prepare(basic_string_view<Char> format) ->\n    typename internal::preparator<std::basic_string<Char>,\n                                  Args...>::prepared_format_type {\n  return prepare<Args...>(internal::to_runtime_format(format));\n}\n\nFMT_END_NAMESPACE\n\n#endif  // FMT_PREPARE_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/printf.h",
    "content": "// Formatting library for C++\n//\n// Copyright (c) 2012 - 2016, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#ifndef FMT_PRINTF_H_\n#define FMT_PRINTF_H_\n\n#include <algorithm>  // std::fill_n\n#include <limits>     // std::numeric_limits\n\n#include \"ostream.h\"\n\nFMT_BEGIN_NAMESPACE\nnamespace internal {\n\n// A helper function to suppress bogus \"conditional expression is constant\"\n// warnings.\ntemplate <typename T> inline T const_check(T value) { return value; }\n\n// Checks if a value fits in int - used to avoid warnings about comparing\n// signed and unsigned integers.\ntemplate <bool IsSigned> struct int_checker {\n  template <typename T> static bool fits_in_int(T value) {\n    unsigned max = std::numeric_limits<int>::max();\n    return value <= max;\n  }\n  static bool fits_in_int(bool) { return true; }\n};\n\ntemplate <> struct int_checker<true> {\n  template <typename T> static bool fits_in_int(T value) {\n    return value >= std::numeric_limits<int>::min() &&\n           value <= std::numeric_limits<int>::max();\n  }\n  static bool fits_in_int(int) { return true; }\n};\n\nclass printf_precision_handler {\n public:\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  int operator()(T value) {\n    if (!int_checker<std::numeric_limits<T>::is_signed>::fits_in_int(value))\n      FMT_THROW(format_error(\"number is too big\"));\n    return (std::max)(static_cast<int>(value), 0);\n  }\n\n  template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>\n  int operator()(T) {\n    FMT_THROW(format_error(\"precision is not integer\"));\n    return 0;\n  }\n};\n\n// An argument visitor that returns true iff arg is a zero integer.\nclass is_zero_int {\n public:\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  bool operator()(T value) {\n    return value == 0;\n  }\n\n  template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>\n  bool operator()(T) {\n    return false;\n  }\n};\n\ntemplate <typename T> struct make_unsigned_or_bool : std::make_unsigned<T> {};\n\ntemplate <> struct make_unsigned_or_bool<bool> { using type = bool; };\n\ntemplate <typename T, typename Context> class arg_converter {\n private:\n  using char_type = typename Context::char_type;\n\n  basic_format_arg<Context>& arg_;\n  char_type type_;\n\n public:\n  arg_converter(basic_format_arg<Context>& arg, char_type type)\n      : arg_(arg), type_(type) {}\n\n  void operator()(bool value) {\n    if (type_ != 's') operator()<bool>(value);\n  }\n\n  template <typename U, FMT_ENABLE_IF(std::is_integral<U>::value)>\n  void operator()(U value) {\n    bool is_signed = type_ == 'd' || type_ == 'i';\n    using target_type = conditional_t<std::is_same<T, void>::value, U, T>;\n    if (const_check(sizeof(target_type) <= sizeof(int))) {\n      // Extra casts are used to silence warnings.\n      if (is_signed) {\n        arg_ = internal::make_arg<Context>(\n            static_cast<int>(static_cast<target_type>(value)));\n      } else {\n        using unsigned_type = typename make_unsigned_or_bool<target_type>::type;\n        arg_ = internal::make_arg<Context>(\n            static_cast<unsigned>(static_cast<unsigned_type>(value)));\n      }\n    } else {\n      if (is_signed) {\n        // glibc's printf doesn't sign extend arguments of smaller types:\n        //   std::printf(\"%lld\", -42);  // prints \"4294967254\"\n        // but we don't have to do the same because it's a UB.\n        arg_ = internal::make_arg<Context>(static_cast<long long>(value));\n      } else {\n        arg_ = internal::make_arg<Context>(\n            static_cast<typename make_unsigned_or_bool<U>::type>(value));\n      }\n    }\n  }\n\n  template <typename U, FMT_ENABLE_IF(!std::is_integral<U>::value)>\n  void operator()(U) {}  // No conversion needed for non-integral types.\n};\n\n// Converts an integer argument to T for printf, if T is an integral type.\n// If T is void, the argument is converted to corresponding signed or unsigned\n// type depending on the type specifier: 'd' and 'i' - signed, other -\n// unsigned).\ntemplate <typename T, typename Context, typename Char>\nvoid convert_arg(basic_format_arg<Context>& arg, Char type) {\n  visit_format_arg(arg_converter<T, Context>(arg, type), arg);\n}\n\n// Converts an integer argument to char for printf.\ntemplate <typename Context> class char_converter {\n private:\n  basic_format_arg<Context>& arg_;\n\n public:\n  explicit char_converter(basic_format_arg<Context>& arg) : arg_(arg) {}\n\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  void operator()(T value) {\n    arg_ = internal::make_arg<Context>(\n        static_cast<typename Context::char_type>(value));\n  }\n\n  template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>\n  void operator()(T) {}  // No conversion needed for non-integral types.\n};\n\n// Checks if an argument is a valid printf width specifier and sets\n// left alignment if it is negative.\ntemplate <typename Char> class printf_width_handler {\n private:\n  using format_specs = basic_format_specs<Char>;\n\n  format_specs& specs_;\n\n public:\n  explicit printf_width_handler(format_specs& specs) : specs_(specs) {}\n\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  unsigned operator()(T value) {\n    auto width = static_cast<uint32_or_64_t<T>>(value);\n    if (internal::is_negative(value)) {\n      specs_.align = align::left;\n      width = 0 - width;\n    }\n    unsigned int_max = std::numeric_limits<int>::max();\n    if (width > int_max) FMT_THROW(format_error(\"number is too big\"));\n    return static_cast<unsigned>(width);\n  }\n\n  template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>\n  unsigned operator()(T) {\n    FMT_THROW(format_error(\"width is not integer\"));\n    return 0;\n  }\n};\n\ntemplate <typename Char, typename Context>\nvoid printf(buffer<Char>& buf, basic_string_view<Char> format,\n            basic_format_args<Context> args) {\n  Context(std::back_inserter(buf), format, args).format();\n}\n\ntemplate <typename OutputIt, typename Char, typename Context>\ninternal::truncating_iterator<OutputIt> printf(\n    internal::truncating_iterator<OutputIt> it, basic_string_view<Char> format,\n    basic_format_args<Context> args) {\n  return Context(it, format, args).format();\n}\n}  // namespace internal\n\nusing internal::printf;  // For printing into memory_buffer.\n\ntemplate <typename Range> class printf_arg_formatter;\n\ntemplate <typename OutputIt, typename Char> class basic_printf_context;\n\n/**\n  \\rst\n  The ``printf`` argument formatter.\n  \\endrst\n */\ntemplate <typename Range>\nclass printf_arg_formatter : public internal::arg_formatter_base<Range> {\n public:\n  using iterator = typename Range::iterator;\n\n private:\n  using char_type = typename Range::value_type;\n  using base = internal::arg_formatter_base<Range>;\n  using context_type = basic_printf_context<iterator, char_type>;\n\n  context_type& context_;\n\n  void write_null_pointer(char) {\n    this->specs()->type = 0;\n    this->write(\"(nil)\");\n  }\n\n  void write_null_pointer(wchar_t) {\n    this->specs()->type = 0;\n    this->write(L\"(nil)\");\n  }\n\n public:\n  using format_specs = typename base::format_specs;\n\n  /**\n    \\rst\n    Constructs an argument formatter object.\n    *buffer* is a reference to the output buffer and *specs* contains format\n    specifier information for standard argument types.\n    \\endrst\n   */\n  printf_arg_formatter(iterator iter, format_specs& specs, context_type& ctx)\n      : base(Range(iter), &specs, internal::locale_ref()), context_(ctx) {}\n\n  template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>\n  iterator operator()(T value) {\n    // MSVC2013 fails to compile separate overloads for bool and char_type so\n    // use std::is_same instead.\n    if (std::is_same<T, bool>::value) {\n      format_specs& fmt_specs = *this->specs();\n      if (fmt_specs.type != 's') return base::operator()(value ? 1 : 0);\n      fmt_specs.type = 0;\n      this->write(value != 0);\n    } else if (std::is_same<T, char_type>::value) {\n      format_specs& fmt_specs = *this->specs();\n      if (fmt_specs.type && fmt_specs.type != 'c')\n        return (*this)(static_cast<int>(value));\n      fmt_specs.sign = sign::none;\n      fmt_specs.alt = false;\n      fmt_specs.align = align::right;\n      return base::operator()(value);\n    } else {\n      return base::operator()(value);\n    }\n    return this->out();\n  }\n\n  template <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>\n  iterator operator()(T value) {\n    return base::operator()(value);\n  }\n\n  /** Formats a null-terminated C string. */\n  iterator operator()(const char* value) {\n    if (value)\n      base::operator()(value);\n    else if (this->specs()->type == 'p')\n      write_null_pointer(char_type());\n    else\n      this->write(\"(null)\");\n    return this->out();\n  }\n\n  /** Formats a null-terminated wide C string. */\n  iterator operator()(const wchar_t* value) {\n    if (value)\n      base::operator()(value);\n    else if (this->specs()->type == 'p')\n      write_null_pointer(char_type());\n    else\n      this->write(L\"(null)\");\n    return this->out();\n  }\n\n  iterator operator()(basic_string_view<char_type> value) {\n    return base::operator()(value);\n  }\n\n  iterator operator()(monostate value) { return base::operator()(value); }\n\n  /** Formats a pointer. */\n  iterator operator()(const void* value) {\n    if (value) return base::operator()(value);\n    this->specs()->type = 0;\n    write_null_pointer(char_type());\n    return this->out();\n  }\n\n  /** Formats an argument of a custom (user-defined) type. */\n  iterator operator()(typename basic_format_arg<context_type>::handle handle) {\n    handle.format(context_.parse_context(), context_);\n    return this->out();\n  }\n};\n\ntemplate <typename T> struct printf_formatter {\n  template <typename ParseContext>\n  auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    return ctx.begin();\n  }\n\n  template <typename FormatContext>\n  auto format(const T& value, FormatContext& ctx) -> decltype(ctx.out()) {\n    internal::format_value(internal::get_container(ctx.out()), value);\n    return ctx.out();\n  }\n};\n\n/** This template formats data and writes the output to a writer. */\ntemplate <typename OutputIt, typename Char> class basic_printf_context {\n public:\n  /** The character type for the output. */\n  using char_type = Char;\n  using format_arg = basic_format_arg<basic_printf_context>;\n  template <typename T> using formatter_type = printf_formatter<T>;\n\n private:\n  using format_specs = basic_format_specs<char_type>;\n\n  OutputIt out_;\n  basic_format_args<basic_printf_context> args_;\n  basic_parse_context<Char> parse_ctx_;\n\n  static void parse_flags(format_specs& specs, const Char*& it,\n                          const Char* end);\n\n  // Returns the argument with specified index or, if arg_index is equal\n  // to the maximum unsigned value, the next argument.\n  format_arg get_arg(unsigned arg_index = std::numeric_limits<unsigned>::max());\n\n  // Parses argument index, flags and width and returns the argument index.\n  unsigned parse_header(const Char*& it, const Char* end, format_specs& specs);\n\n public:\n  /**\n   \\rst\n   Constructs a ``printf_context`` object. References to the arguments and\n   the writer are stored in the context object so make sure they have\n   appropriate lifetimes.\n   \\endrst\n   */\n  basic_printf_context(OutputIt out, basic_string_view<char_type> format_str,\n                       basic_format_args<basic_printf_context> args)\n      : out_(out), args_(args), parse_ctx_(format_str) {}\n\n  OutputIt out() { return out_; }\n  void advance_to(OutputIt it) { out_ = it; }\n\n  format_arg arg(unsigned id) const { return args_.get(id); }\n\n  basic_parse_context<Char>& parse_context() { return parse_ctx_; }\n\n  FMT_CONSTEXPR void on_error(const char* message) {\n    parse_ctx_.on_error(message);\n  }\n\n  /** Formats stored arguments and writes the output to the range. */\n  template <typename ArgFormatter =\n                printf_arg_formatter<internal::buffer_range<Char>>>\n  OutputIt format();\n};\n\ntemplate <typename OutputIt, typename Char>\nvoid basic_printf_context<OutputIt, Char>::parse_flags(format_specs& specs,\n                                                       const Char*& it,\n                                                       const Char* end) {\n  for (; it != end; ++it) {\n    switch (*it) {\n    case '-':\n      specs.align = align::left;\n      break;\n    case '+':\n      specs.sign = sign::plus;\n      break;\n    case '0':\n      specs.fill[0] = '0';\n      break;\n    case ' ':\n      specs.sign = sign::space;\n      break;\n    case '#':\n      specs.alt = true;\n      break;\n    default:\n      return;\n    }\n  }\n}\n\ntemplate <typename OutputIt, typename Char>\ntypename basic_printf_context<OutputIt, Char>::format_arg\nbasic_printf_context<OutputIt, Char>::get_arg(unsigned arg_index) {\n  if (arg_index == std::numeric_limits<unsigned>::max())\n    arg_index = parse_ctx_.next_arg_id();\n  else\n    parse_ctx_.check_arg_id(--arg_index);\n  return internal::get_arg(*this, arg_index);\n}\n\ntemplate <typename OutputIt, typename Char>\nunsigned basic_printf_context<OutputIt, Char>::parse_header(\n    const Char*& it, const Char* end, format_specs& specs) {\n  unsigned arg_index = std::numeric_limits<unsigned>::max();\n  char_type c = *it;\n  if (c >= '0' && c <= '9') {\n    // Parse an argument index (if followed by '$') or a width possibly\n    // preceded with '0' flag(s).\n    internal::error_handler eh;\n    unsigned value = parse_nonnegative_int(it, end, eh);\n    if (it != end && *it == '$') {  // value is an argument index\n      ++it;\n      arg_index = value;\n    } else {\n      if (c == '0') specs.fill[0] = '0';\n      if (value != 0) {\n        // Nonzero value means that we parsed width and don't need to\n        // parse it or flags again, so return now.\n        specs.width = value;\n        return arg_index;\n      }\n    }\n  }\n  parse_flags(specs, it, end);\n  // Parse width.\n  if (it != end) {\n    if (*it >= '0' && *it <= '9') {\n      internal::error_handler eh;\n      specs.width = parse_nonnegative_int(it, end, eh);\n    } else if (*it == '*') {\n      ++it;\n      specs.width = visit_format_arg(\n          internal::printf_width_handler<char_type>(specs), get_arg());\n    }\n  }\n  return arg_index;\n}\n\ntemplate <typename OutputIt, typename Char>\ntemplate <typename ArgFormatter>\nOutputIt basic_printf_context<OutputIt, Char>::format() {\n  auto out = this->out();\n  const Char* start = parse_ctx_.begin();\n  const Char* end = parse_ctx_.end();\n  auto it = start;\n  while (it != end) {\n    char_type c = *it++;\n    if (c != '%') continue;\n    if (it != end && *it == c) {\n      out = std::copy(start, it, out);\n      start = ++it;\n      continue;\n    }\n    out = std::copy(start, it - 1, out);\n\n    format_specs specs;\n    specs.align = align::right;\n\n    // Parse argument index, flags and width.\n    unsigned arg_index = parse_header(it, end, specs);\n\n    // Parse precision.\n    if (it != end && *it == '.') {\n      ++it;\n      c = it != end ? *it : 0;\n      if ('0' <= c && c <= '9') {\n        internal::error_handler eh;\n        specs.precision = static_cast<int>(parse_nonnegative_int(it, end, eh));\n      } else if (c == '*') {\n        ++it;\n        specs.precision =\n            visit_format_arg(internal::printf_precision_handler(), get_arg());\n      } else {\n        specs.precision = 0;\n      }\n    }\n\n    format_arg arg = get_arg(arg_index);\n    if (specs.alt && visit_format_arg(internal::is_zero_int(), arg))\n      specs.alt = false;\n    if (specs.fill[0] == '0') {\n      if (arg.is_arithmetic())\n        specs.align = align::numeric;\n      else\n        specs.fill[0] = ' ';  // Ignore '0' flag for non-numeric types.\n    }\n\n    // Parse length and convert the argument to the required type.\n    c = it != end ? *it++ : 0;\n    char_type t = it != end ? *it : 0;\n    using internal::convert_arg;\n    switch (c) {\n    case 'h':\n      if (t == 'h') {\n        ++it;\n        t = it != end ? *it : 0;\n        convert_arg<signed char>(arg, t);\n      } else {\n        convert_arg<short>(arg, t);\n      }\n      break;\n    case 'l':\n      if (t == 'l') {\n        ++it;\n        t = it != end ? *it : 0;\n        convert_arg<long long>(arg, t);\n      } else {\n        convert_arg<long>(arg, t);\n      }\n      break;\n    case 'j':\n      convert_arg<intmax_t>(arg, t);\n      break;\n    case 'z':\n      convert_arg<std::size_t>(arg, t);\n      break;\n    case 't':\n      convert_arg<std::ptrdiff_t>(arg, t);\n      break;\n    case 'L':\n      // printf produces garbage when 'L' is omitted for long double, no\n      // need to do the same.\n      break;\n    default:\n      --it;\n      convert_arg<void>(arg, c);\n    }\n\n    // Parse type.\n    if (it == end) FMT_THROW(format_error(\"invalid format string\"));\n    specs.type = static_cast<char>(*it++);\n    if (arg.is_integral()) {\n      // Normalize type.\n      switch (specs.type) {\n      case 'i':\n      case 'u':\n        specs.type = 'd';\n        break;\n      case 'c':\n        visit_format_arg(internal::char_converter<basic_printf_context>(arg),\n                         arg);\n        break;\n      }\n    }\n\n    start = it;\n\n    // Format argument.\n    visit_format_arg(ArgFormatter(out, specs, *this), arg);\n  }\n  return std::copy(start, it, out);\n}\n\ntemplate <typename Char>\nusing basic_printf_context_t =\n    basic_printf_context<std::back_insert_iterator<internal::buffer<Char>>,\n                         Char>;\n\nusing printf_context = basic_printf_context_t<char>;\nusing wprintf_context = basic_printf_context_t<wchar_t>;\n\nusing printf_args = basic_format_args<printf_context>;\nusing wprintf_args = basic_format_args<wprintf_context>;\n\n/**\n  \\rst\n  Constructs an `~fmt::format_arg_store` object that contains references to\n  arguments and can be implicitly converted to `~fmt::printf_args`.\n  \\endrst\n */\ntemplate <typename... Args>\ninline format_arg_store<printf_context, Args...> make_printf_args(\n    const Args&... args) {\n  return {args...};\n}\n\n/**\n  \\rst\n  Constructs an `~fmt::format_arg_store` object that contains references to\n  arguments and can be implicitly converted to `~fmt::wprintf_args`.\n  \\endrst\n */\ntemplate <typename... Args>\ninline format_arg_store<wprintf_context, Args...> make_wprintf_args(\n    const Args&... args) {\n  return {args...};\n}\n\ntemplate <typename S, typename Char = char_t<S>>\ninline std::basic_string<Char> vsprintf(\n    const S& format, basic_format_args<basic_printf_context_t<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  printf(buffer, to_string_view(format), args);\n  return to_string(buffer);\n}\n\n/**\n  \\rst\n  Formats arguments and returns the result as a string.\n\n  **Example**::\n\n    std::string message = fmt::sprintf(\"The answer is %d\", 42);\n  \\endrst\n*/\ntemplate <typename S, typename... Args,\n          typename Char = enable_if_t<internal::is_string<S>::value, char_t<S>>>\ninline std::basic_string<Char> sprintf(const S& format, const Args&... args) {\n  using context = basic_printf_context_t<Char>;\n  return vsprintf(to_string_view(format), {make_format_args<context>(args...)});\n}\n\ntemplate <typename S, typename Char = char_t<S>>\ninline int vfprintf(std::FILE* f, const S& format,\n                    basic_format_args<basic_printf_context_t<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  printf(buffer, to_string_view(format), args);\n  std::size_t size = buffer.size();\n  return std::fwrite(buffer.data(), sizeof(Char), size, f) < size\n             ? -1\n             : static_cast<int>(size);\n}\n\n/**\n  \\rst\n  Prints formatted data to the file *f*.\n\n  **Example**::\n\n    fmt::fprintf(stderr, \"Don't %s!\", \"panic\");\n  \\endrst\n */\ntemplate <typename S, typename... Args,\n          typename Char = enable_if_t<internal::is_string<S>::value, char_t<S>>>\ninline int fprintf(std::FILE* f, const S& format, const Args&... args) {\n  using context = basic_printf_context_t<Char>;\n  return vfprintf(f, to_string_view(format),\n                  {make_format_args<context>(args...)});\n}\n\ntemplate <typename S, typename Char = char_t<S>>\ninline int vprintf(const S& format,\n                   basic_format_args<basic_printf_context_t<Char>> args) {\n  return vfprintf(stdout, to_string_view(format), args);\n}\n\n/**\n  \\rst\n  Prints formatted data to ``stdout``.\n\n  **Example**::\n\n    fmt::printf(\"Elapsed time: %.2f seconds\", 1.23);\n  \\endrst\n */\ntemplate <typename S, typename... Args,\n          FMT_ENABLE_IF(internal::is_string<S>::value)>\ninline int printf(const S& format_str, const Args&... args) {\n  using context = basic_printf_context_t<char_t<S>>;\n  return vprintf(to_string_view(format_str),\n                 {make_format_args<context>(args...)});\n}\n\ntemplate <typename S, typename Char = char_t<S>>\ninline int vfprintf(std::basic_ostream<Char>& os, const S& format,\n                    basic_format_args<basic_printf_context_t<Char>> args) {\n  basic_memory_buffer<Char> buffer;\n  printf(buffer, to_string_view(format), args);\n  internal::write(os, buffer);\n  return static_cast<int>(buffer.size());\n}\n\n/** Formats arguments and writes the output to the range. */\ntemplate <typename ArgFormatter, typename Char,\n          typename Context =\n              basic_printf_context<typename ArgFormatter::iterator, Char>>\ntypename ArgFormatter::iterator vprintf(internal::buffer<Char>& out,\n                                        basic_string_view<Char> format_str,\n                                        basic_format_args<Context> args) {\n  typename ArgFormatter::iterator iter(out);\n  Context(iter, format_str, args).template format<ArgFormatter>();\n  return iter;\n}\n\n/**\n  \\rst\n  Prints formatted data to the stream *os*.\n\n  **Example**::\n\n    fmt::fprintf(cerr, \"Don't %s!\", \"panic\");\n  \\endrst\n */\ntemplate <typename S, typename... Args, typename Char = char_t<S>>\ninline int fprintf(std::basic_ostream<Char>& os, const S& format_str,\n                   const Args&... args) {\n  using context = basic_printf_context_t<Char>;\n  return vfprintf(os, to_string_view(format_str),\n                  {make_format_args<context>(args...)});\n}\nFMT_END_NAMESPACE\n\n#endif  // FMT_PRINTF_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/ranges.h",
    "content": "// Formatting library for C++ - experimental range support\n//\n// Copyright (c) 2012 - present, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n//\n// Copyright (c) 2018 - present, Remotion (Igor Schulz)\n// All Rights Reserved\n// {fmt} support for ranges, containers and types tuple interface.\n\n#ifndef FMT_RANGES_H_\n#define FMT_RANGES_H_\n\n#include <type_traits>\n#include \"format.h\"\n\n// output only up to N items from the range.\n#ifndef FMT_RANGE_OUTPUT_LENGTH_LIMIT\n#  define FMT_RANGE_OUTPUT_LENGTH_LIMIT 256\n#endif\n\nFMT_BEGIN_NAMESPACE\n\ntemplate <typename Char> struct formatting_base {\n  template <typename ParseContext>\n  FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    return ctx.begin();\n  }\n};\n\ntemplate <typename Char, typename Enable = void>\nstruct formatting_range : formatting_base<Char> {\n  static FMT_CONSTEXPR_DECL const std::size_t range_length_limit =\n      FMT_RANGE_OUTPUT_LENGTH_LIMIT;  // output only up to N items from the\n                                      // range.\n  Char prefix;\n  Char delimiter;\n  Char postfix;\n  formatting_range() : prefix('{'), delimiter(','), postfix('}') {}\n  static FMT_CONSTEXPR_DECL const bool add_delimiter_spaces = true;\n  static FMT_CONSTEXPR_DECL const bool add_prepostfix_space = false;\n};\n\ntemplate <typename Char, typename Enable = void>\nstruct formatting_tuple : formatting_base<Char> {\n  Char prefix;\n  Char delimiter;\n  Char postfix;\n  formatting_tuple() : prefix('('), delimiter(','), postfix(')') {}\n  static FMT_CONSTEXPR_DECL const bool add_delimiter_spaces = true;\n  static FMT_CONSTEXPR_DECL const bool add_prepostfix_space = false;\n};\n\nnamespace internal {\n\ntemplate <typename RangeT, typename OutputIterator>\nOutputIterator copy(const RangeT& range, OutputIterator out) {\n  for (auto it = range.begin(), end = range.end(); it != end; ++it)\n    *out++ = *it;\n  return out;\n}\n\ntemplate <typename OutputIterator>\nOutputIterator copy(const char* str, OutputIterator out) {\n  while (*str) *out++ = *str++;\n  return out;\n}\n\ntemplate <typename OutputIterator>\nOutputIterator copy(char ch, OutputIterator out) {\n  *out++ = ch;\n  return out;\n}\n\n/// Return true value if T has std::string interface, like std::string_view.\ntemplate <typename T> class is_like_std_string {\n  template <typename U>\n  static auto check(U* p)\n      -> decltype((void)p->find('a'), p->length(), (void)p->data(), int());\n  template <typename> static void check(...);\n\n public:\n  static FMT_CONSTEXPR_DECL const bool value =\n      is_string<T>::value || !std::is_void<decltype(check<T>(nullptr))>::value;\n};\n\ntemplate <typename Char>\nstruct is_like_std_string<fmt::basic_string_view<Char>> : std::true_type {};\n\ntemplate <typename... Ts> struct conditional_helper {};\n\ntemplate <typename T, typename _ = void> struct is_range_ : std::false_type {};\n\n#if !FMT_MSC_VER || FMT_MSC_VER > 1800\ntemplate <typename T>\nstruct is_range_<\n    T, conditional_t<false,\n                     conditional_helper<decltype(std::declval<T>().begin()),\n                                        decltype(std::declval<T>().end())>,\n                     void>> : std::true_type {};\n#endif\n\n/// tuple_size and tuple_element check.\ntemplate <typename T> class is_tuple_like_ {\n  template <typename U>\n  static auto check(U* p)\n      -> decltype(std::tuple_size<U>::value,\n                  (void)std::declval<typename std::tuple_element<0, U>::type>(),\n                  int());\n  template <typename> static void check(...);\n\n public:\n  static FMT_CONSTEXPR_DECL const bool value =\n      !std::is_void<decltype(check<T>(nullptr))>::value;\n};\n\n// Check for integer_sequence\n#if defined(__cpp_lib_integer_sequence) || FMT_MSC_VER >= 1900\ntemplate <typename T, T... N>\nusing integer_sequence = std::integer_sequence<T, N...>;\ntemplate <std::size_t... N> using index_sequence = std::index_sequence<N...>;\ntemplate <std::size_t N>\nusing make_index_sequence = std::make_index_sequence<N>;\n#else\ntemplate <typename T, T... N> struct integer_sequence {\n  using value_type = T;\n\n  static FMT_CONSTEXPR std::size_t size() { return sizeof...(N); }\n};\n\ntemplate <std::size_t... N>\nusing index_sequence = integer_sequence<std::size_t, N...>;\n\ntemplate <typename T, std::size_t N, T... Ns>\nstruct make_integer_sequence : make_integer_sequence<T, N - 1, N - 1, Ns...> {};\ntemplate <typename T, T... Ns>\nstruct make_integer_sequence<T, 0, Ns...> : integer_sequence<T, Ns...> {};\n\ntemplate <std::size_t N>\nusing make_index_sequence = make_integer_sequence<std::size_t, N>;\n#endif\n\ntemplate <class Tuple, class F, size_t... Is>\nvoid for_each(index_sequence<Is...>, Tuple&& tup, F&& f) FMT_NOEXCEPT {\n  using std::get;\n  // using free function get<I>(T) now.\n  const int _[] = {0, ((void)f(get<Is>(tup)), 0)...};\n  (void)_;  // blocks warnings\n}\n\ntemplate <class T>\nFMT_CONSTEXPR make_index_sequence<std::tuple_size<T>::value> get_indexes(\n    T const&) {\n  return {};\n}\n\ntemplate <class Tuple, class F> void for_each(Tuple&& tup, F&& f) {\n  const auto indexes = get_indexes(tup);\n  for_each(indexes, std::forward<Tuple>(tup), std::forward<F>(f));\n}\n\ntemplate <typename Arg, FMT_ENABLE_IF(!is_like_std_string<\n                                      typename std::decay<Arg>::type>::value)>\nFMT_CONSTEXPR const char* format_str_quoted(bool add_space, const Arg&) {\n  return add_space ? \" {}\" : \"{}\";\n}\n\ntemplate <typename Arg, FMT_ENABLE_IF(is_like_std_string<\n                                      typename std::decay<Arg>::type>::value)>\nFMT_CONSTEXPR const char* format_str_quoted(bool add_space, const Arg&) {\n  return add_space ? \" \\\"{}\\\"\" : \"\\\"{}\\\"\";\n}\n\nFMT_CONSTEXPR const char* format_str_quoted(bool add_space, const char*) {\n  return add_space ? \" \\\"{}\\\"\" : \"\\\"{}\\\"\";\n}\nFMT_CONSTEXPR const wchar_t* format_str_quoted(bool add_space, const wchar_t*) {\n  return add_space ? L\" \\\"{}\\\"\" : L\"\\\"{}\\\"\";\n}\n\nFMT_CONSTEXPR const char* format_str_quoted(bool add_space, const char) {\n  return add_space ? \" '{}'\" : \"'{}'\";\n}\nFMT_CONSTEXPR const wchar_t* format_str_quoted(bool add_space, const wchar_t) {\n  return add_space ? L\" '{}'\" : L\"'{}'\";\n}\n\n}  // namespace internal\n\ntemplate <typename T> struct is_tuple_like {\n  static FMT_CONSTEXPR_DECL const bool value =\n      internal::is_tuple_like_<T>::value && !internal::is_range_<T>::value;\n};\n\ntemplate <typename TupleT, typename Char>\nstruct formatter<TupleT, Char, enable_if_t<fmt::is_tuple_like<TupleT>::value>> {\n private:\n  // C++11 generic lambda for format()\n  template <typename FormatContext> struct format_each {\n    template <typename T> void operator()(const T& v) {\n      if (i > 0) {\n        if (formatting.add_prepostfix_space) {\n          *out++ = ' ';\n        }\n        out = internal::copy(formatting.delimiter, out);\n      }\n      out = format_to(out,\n                      internal::format_str_quoted(\n                          (formatting.add_delimiter_spaces && i > 0), v),\n                      v);\n      ++i;\n    }\n\n    formatting_tuple<Char>& formatting;\n    std::size_t& i;\n    typename std::add_lvalue_reference<decltype(\n        std::declval<FormatContext>().out())>::type out;\n  };\n\n public:\n  formatting_tuple<Char> formatting;\n\n  template <typename ParseContext>\n  FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    return formatting.parse(ctx);\n  }\n\n  template <typename FormatContext = format_context>\n  auto format(const TupleT& values, FormatContext& ctx) -> decltype(ctx.out()) {\n    auto out = ctx.out();\n    std::size_t i = 0;\n    internal::copy(formatting.prefix, out);\n\n    internal::for_each(values, format_each<FormatContext>{formatting, i, out});\n    if (formatting.add_prepostfix_space) {\n      *out++ = ' ';\n    }\n    internal::copy(formatting.postfix, out);\n\n    return ctx.out();\n  }\n};\n\ntemplate <typename T> struct is_range {\n  static FMT_CONSTEXPR_DECL const bool value =\n      internal::is_range_<T>::value && !internal::is_like_std_string<T>::value;\n};\n\ntemplate <typename RangeT, typename Char>\nstruct formatter<RangeT, Char, enable_if_t<fmt::is_range<RangeT>::value>> {\n  formatting_range<Char> formatting;\n\n  template <typename ParseContext>\n  FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {\n    return formatting.parse(ctx);\n  }\n\n  template <typename FormatContext>\n  typename FormatContext::iterator format(const RangeT& values,\n                                          FormatContext& ctx) {\n    auto out = internal::copy(formatting.prefix, ctx.out());\n    std::size_t i = 0;\n    for (auto it = values.begin(), end = values.end(); it != end; ++it) {\n      if (i > 0) {\n        if (formatting.add_prepostfix_space) *out++ = ' ';\n        out = internal::copy(formatting.delimiter, out);\n      }\n      out = format_to(out,\n                      internal::format_str_quoted(\n                          (formatting.add_delimiter_spaces && i > 0), *it),\n                      *it);\n      if (++i > formatting.range_length_limit) {\n        out = format_to(out, \" ... <other elements>\");\n        break;\n      }\n    }\n    if (formatting.add_prepostfix_space) *out++ = ' ';\n    return internal::copy(formatting.postfix, out);\n  }\n};\n\nFMT_END_NAMESPACE\n\n#endif  // FMT_RANGES_H_\n"
  },
  {
    "path": "src/third_party/fmt/include/fmt/safe-duration-cast.h",
    "content": "/*\n * For conversion between std::chrono::durations without undefined\n * behaviour or erroneous results.\n * This is a stripped down version of duration_cast, for inclusion in fmt.\n * See https://github.com/pauldreik/safe_duration_cast\n *\n * Copyright Paul Dreik 2019\n *\n * This file is licensed under the fmt license, see format.h\n */\n\n#include <chrono>\n#include <cmath>\n#include <limits>\n#include <type_traits>\n\n#include \"format.h\"\n\nFMT_BEGIN_NAMESPACE\n\nnamespace safe_duration_cast {\n\ntemplate <typename To, typename From,\n          FMT_ENABLE_IF(!std::is_same<From, To>::value &&\n                        std::numeric_limits<From>::is_signed ==\n                            std::numeric_limits<To>::is_signed)>\nFMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) {\n  ec = 0;\n  using F = std::numeric_limits<From>;\n  using T = std::numeric_limits<To>;\n  static_assert(F::is_integer, \"From must be integral\");\n  static_assert(T::is_integer, \"To must be integral\");\n\n  // A and B are both signed, or both unsigned.\n  if (F::digits <= T::digits) {\n    // From fits in To without any problem.\n  } else {\n    // From does not always fit in To, resort to a dynamic check.\n    if (from < T::min() || from > T::max()) {\n      // outside range.\n      ec = 1;\n      return {};\n    }\n  }\n  return static_cast<To>(from);\n}\n\n/**\n * converts From to To, without loss. If the dynamic value of from\n * can't be converted to To without loss, ec is set.\n */\ntemplate <typename To, typename From,\n          FMT_ENABLE_IF(!std::is_same<From, To>::value &&\n                        std::numeric_limits<From>::is_signed !=\n                            std::numeric_limits<To>::is_signed)>\nFMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) {\n  ec = 0;\n  using F = std::numeric_limits<From>;\n  using T = std::numeric_limits<To>;\n  static_assert(F::is_integer, \"From must be integral\");\n  static_assert(T::is_integer, \"To must be integral\");\n\n  if (F::is_signed && !T::is_signed) {\n    // From may be negative, not allowed!\n    if (from < 0) {\n      ec = 1;\n      return {};\n    }\n\n    // From is positive. Can it always fit in To?\n    if (F::digits <= T::digits) {\n      // yes, From always fits in To.\n    } else {\n      // from may not fit in To, we have to do a dynamic check\n      if (from > T::max()) {\n        ec = 1;\n        return {};\n      }\n    }\n  }\n\n  if (!F::is_signed && T::is_signed) {\n    // can from be held in To?\n    if (F::digits < T::digits) {\n      // yes, From always fits in To.\n    } else {\n      // from may not fit in To, we have to do a dynamic check\n      if (from > T::max()) {\n        // outside range.\n        ec = 1;\n        return {};\n      }\n    }\n  }\n\n  // reaching here means all is ok for lossless conversion.\n  return static_cast<To>(from);\n\n}  // function\n\ntemplate <typename To, typename From,\n          FMT_ENABLE_IF(std::is_same<From, To>::value)>\nFMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) {\n  ec = 0;\n  return from;\n}  // function\n\n// clang-format off\n/**\n * converts From to To if possible, otherwise ec is set.\n *\n * input                            |    output\n * ---------------------------------|---------------\n * NaN                              | NaN\n * Inf                              | Inf\n * normal, fits in output           | converted (possibly lossy)\n * normal, does not fit in output   | ec is set\n * subnormal                        | best effort\n * -Inf                             | -Inf\n */\n// clang-format on\ntemplate <typename To, typename From,\n          FMT_ENABLE_IF(!std::is_same<From, To>::value)>\nFMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) {\n  ec = 0;\n  using T = std::numeric_limits<To>;\n  static_assert(std::is_floating_point<From>::value, \"From must be floating\");\n  static_assert(std::is_floating_point<To>::value, \"To must be floating\");\n\n  // catch the only happy case\n  if (std::isfinite(from)) {\n    if (from >= T::lowest() && from <= T::max()) {\n      return static_cast<To>(from);\n    }\n    // not within range.\n    ec = 1;\n    return {};\n  }\n\n  // nan and inf will be preserved\n  return static_cast<To>(from);\n}  // function\n\ntemplate <typename To, typename From,\n          FMT_ENABLE_IF(std::is_same<From, To>::value)>\nFMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) {\n  ec = 0;\n  static_assert(std::is_floating_point<From>::value, \"From must be floating\");\n  return from;\n}\n\n/**\n * safe duration cast between integral durations\n */\ntemplate <typename To, typename FromRep, typename FromPeriod,\n          FMT_ENABLE_IF(std::is_integral<FromRep>::value),\n          FMT_ENABLE_IF(std::is_integral<typename To::rep>::value)>\nTo safe_duration_cast(std::chrono::duration<FromRep, FromPeriod> from,\n                      int& ec) {\n  using From = std::chrono::duration<FromRep, FromPeriod>;\n  ec = 0;\n  // the basic idea is that we need to convert from count() in the from type\n  // to count() in the To type, by multiplying it with this:\n  using Factor = std::ratio_divide<typename From::period, typename To::period>;\n\n  static_assert(Factor::num > 0, \"num must be positive\");\n  static_assert(Factor::den > 0, \"den must be positive\");\n\n  // the conversion is like this: multiply from.count() with Factor::num\n  // /Factor::den and convert it to To::rep, all this without\n  // overflow/underflow. let's start by finding a suitable type that can hold\n  // both To, From and Factor::num\n  using IntermediateRep =\n      typename std::common_type<typename From::rep, typename To::rep,\n                                decltype(Factor::num)>::type;\n\n  // safe conversion to IntermediateRep\n  IntermediateRep count =\n      lossless_integral_conversion<IntermediateRep>(from.count(), ec);\n  if (ec) {\n    return {};\n  }\n  // multiply with Factor::num without overflow or underflow\n  if (Factor::num != 1) {\n    constexpr auto max1 =\n        std::numeric_limits<IntermediateRep>::max() / Factor::num;\n    if (count > max1) {\n      ec = 1;\n      return {};\n    }\n    constexpr auto min1 =\n        std::numeric_limits<IntermediateRep>::min() / Factor::num;\n    if (count < min1) {\n      ec = 1;\n      return {};\n    }\n    count *= Factor::num;\n  }\n\n  // this can't go wrong, right? den>0 is checked earlier.\n  if (Factor::den != 1) {\n    count /= Factor::den;\n  }\n  // convert to the to type, safely\n  using ToRep = typename To::rep;\n  const ToRep tocount = lossless_integral_conversion<ToRep>(count, ec);\n  if (ec) {\n    return {};\n  }\n  return To{tocount};\n}\n\n/**\n * safe duration_cast between floating point durations\n */\ntemplate <typename To, typename FromRep, typename FromPeriod,\n          FMT_ENABLE_IF(std::is_floating_point<FromRep>::value),\n          FMT_ENABLE_IF(std::is_floating_point<typename To::rep>::value)>\nTo safe_duration_cast(std::chrono::duration<FromRep, FromPeriod> from,\n                      int& ec) {\n  using From = std::chrono::duration<FromRep, FromPeriod>;\n  ec = 0;\n  if (std::isnan(from.count())) {\n    // nan in, gives nan out. easy.\n    return To{std::numeric_limits<typename To::rep>::quiet_NaN()};\n  }\n  // maybe we should also check if from is denormal, and decide what to do about\n  // it.\n\n  // +-inf should be preserved.\n  if (std::isinf(from.count())) {\n    return To{from.count()};\n  }\n\n  // the basic idea is that we need to convert from count() in the from type\n  // to count() in the To type, by multiplying it with this:\n  using Factor = std::ratio_divide<typename From::period, typename To::period>;\n\n  static_assert(Factor::num > 0, \"num must be positive\");\n  static_assert(Factor::den > 0, \"den must be positive\");\n\n  // the conversion is like this: multiply from.count() with Factor::num\n  // /Factor::den and convert it to To::rep, all this without\n  // overflow/underflow. let's start by finding a suitable type that can hold\n  // both To, From and Factor::num\n  using IntermediateRep =\n      typename std::common_type<typename From::rep, typename To::rep,\n                                decltype(Factor::num)>::type;\n\n  // force conversion of From::rep -> IntermediateRep to be safe,\n  // even if it will never happen be narrowing in this context.\n  IntermediateRep count =\n      safe_float_conversion<IntermediateRep>(from.count(), ec);\n  if (ec) {\n    return {};\n  }\n\n  // multiply with Factor::num without overflow or underflow\n  if (Factor::num != 1) {\n    constexpr auto max1 =\n        std::numeric_limits<IntermediateRep>::max() /\n        static_cast<IntermediateRep>(Factor::num);\n    if (count > max1) {\n      ec = 1;\n      return {};\n    }\n    constexpr auto min1 =\n        std::numeric_limits<IntermediateRep>::lowest() /\n        static_cast<IntermediateRep>(Factor::num);\n    if (count < min1) {\n      ec = 1;\n      return {};\n    }\n    count *= static_cast<IntermediateRep>(Factor::num);\n  }\n\n  // this can't go wrong, right? den>0 is checked earlier.\n  if (Factor::den != 1) {\n    using common_t = typename std::common_type<IntermediateRep, intmax_t>::type;\n    count /= static_cast<common_t>(Factor::den);\n  }\n\n  // convert to the to type, safely\n  using ToRep = typename To::rep;\n\n  const ToRep tocount = safe_float_conversion<ToRep>(count, ec);\n  if (ec) {\n    return {};\n  }\n  return To{tocount};\n}\n\n}  // namespace safe_duration_cast\n\nFMT_END_NAMESPACE\n"
  },
  {
    "path": "src/third_party/fmt/src/format.cc",
    "content": "// Formatting library for C++\n//\n// Copyright (c) 2012 - 2016, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n#include \"fmt/format-inl.h\"\n\nFMT_BEGIN_NAMESPACE\ntemplate struct FMT_API internal::basic_data<void>;\n\n// Workaround a bug in MSVC2013 that prevents instantiation of grisu_format.\nbool (*instantiate_grisu_format)(double, internal::buffer<char>&, int, unsigned,\n                                 int&) = internal::grisu_format;\n\n#ifndef FMT_STATIC_THOUSANDS_SEPARATOR\ntemplate FMT_API internal::locale_ref::locale_ref(const std::locale& loc);\ntemplate FMT_API std::locale internal::locale_ref::get<std::locale>() const;\n#endif\n\n// Explicit instantiations for char.\n\ntemplate FMT_API char internal::thousands_sep_impl(locale_ref);\ntemplate FMT_API char internal::decimal_point_impl(locale_ref);\n\ntemplate FMT_API void internal::buffer<char>::append(const char*, const char*);\n\ntemplate FMT_API void internal::arg_map<format_context>::init(\n    const basic_format_args<format_context>& args);\n\ntemplate FMT_API std::string internal::vformat<char>(\n    string_view, basic_format_args<format_context>);\n\ntemplate FMT_API format_context::iterator internal::vformat_to(\n    internal::buffer<char>&, string_view, basic_format_args<format_context>);\n\ntemplate FMT_API char* internal::sprintf_format(double, internal::buffer<char>&,\n                                                sprintf_specs);\ntemplate FMT_API char* internal::sprintf_format(long double,\n                                                internal::buffer<char>&,\n                                                sprintf_specs);\n\n// Explicit instantiations for wchar_t.\n\ntemplate FMT_API wchar_t internal::thousands_sep_impl(locale_ref);\ntemplate FMT_API wchar_t internal::decimal_point_impl(locale_ref);\n\ntemplate FMT_API void internal::buffer<wchar_t>::append(const wchar_t*,\n                                                        const wchar_t*);\n\ntemplate FMT_API void internal::arg_map<wformat_context>::init(\n    const basic_format_args<wformat_context>&);\n\ntemplate FMT_API std::wstring internal::vformat<wchar_t>(\n    wstring_view, basic_format_args<wformat_context>);\nFMT_END_NAMESPACE\n"
  },
  {
    "path": "src/third_party/fmt/src/posix.cc",
    "content": "// A C++ interface to POSIX functions.\n//\n// Copyright (c) 2012 - 2016, Victor Zverovich\n// All rights reserved.\n//\n// For the license information refer to format.h.\n\n// Disable bogus MSVC warnings.\n#if !defined(_CRT_SECURE_NO_WARNINGS) && defined(_MSC_VER)\n#  define _CRT_SECURE_NO_WARNINGS\n#endif\n\n#include \"fmt/posix.h\"\n\n#include <limits.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n\n#ifndef _WIN32\n#  include <unistd.h>\n#else\n#  ifndef WIN32_LEAN_AND_MEAN\n#    define WIN32_LEAN_AND_MEAN\n#  endif\n#  include <io.h>\n#  include <windows.h>\n\n#  define O_CREAT _O_CREAT\n#  define O_TRUNC _O_TRUNC\n\n#  ifndef S_IRUSR\n#    define S_IRUSR _S_IREAD\n#  endif\n\n#  ifndef S_IWUSR\n#    define S_IWUSR _S_IWRITE\n#  endif\n\n#  ifdef __MINGW32__\n#    define _SH_DENYNO 0x40\n#  endif\n\n#endif  // _WIN32\n\n#ifdef fileno\n#  undef fileno\n#endif\n\nnamespace {\n#ifdef _WIN32\n// Return type of read and write functions.\ntypedef int RWResult;\n\n// On Windows the count argument to read and write is unsigned, so convert\n// it from size_t preventing integer overflow.\ninline unsigned convert_rwcount(std::size_t count) {\n  return count <= UINT_MAX ? static_cast<unsigned>(count) : UINT_MAX;\n}\n#else\n// Return type of read and write functions.\ntypedef ssize_t RWResult;\n\ninline std::size_t convert_rwcount(std::size_t count) { return count; }\n#endif\n}  // namespace\n\nFMT_BEGIN_NAMESPACE\n\nbuffered_file::~buffered_file() FMT_NOEXCEPT {\n  if (file_ && FMT_SYSTEM(fclose(file_)) != 0)\n    report_system_error(errno, \"cannot close file\");\n}\n\nbuffered_file::buffered_file(cstring_view filename, cstring_view mode) {\n  FMT_RETRY_VAL(file_, FMT_SYSTEM(fopen(filename.c_str(), mode.c_str())),\n                nullptr);\n  if (!file_)\n    FMT_THROW(system_error(errno, \"cannot open file {}\", filename.c_str()));\n}\n\nvoid buffered_file::close() {\n  if (!file_) return;\n  int result = FMT_SYSTEM(fclose(file_));\n  file_ = nullptr;\n  if (result != 0) FMT_THROW(system_error(errno, \"cannot close file\"));\n}\n\n// A macro used to prevent expansion of fileno on broken versions of MinGW.\n#define FMT_ARGS\n\nint buffered_file::fileno() const {\n  int fd = FMT_POSIX_CALL(fileno FMT_ARGS(file_));\n  if (fd == -1) FMT_THROW(system_error(errno, \"cannot get file descriptor\"));\n  return fd;\n}\n\nfile::file(cstring_view path, int oflag) {\n  int mode = S_IRUSR | S_IWUSR;\n#if defined(_WIN32) && !defined(__MINGW32__)\n  fd_ = -1;\n  FMT_POSIX_CALL(sopen_s(&fd_, path.c_str(), oflag, _SH_DENYNO, mode));\n#else\n  FMT_RETRY(fd_, FMT_POSIX_CALL(open(path.c_str(), oflag, mode)));\n#endif\n  if (fd_ == -1)\n    FMT_THROW(system_error(errno, \"cannot open file {}\", path.c_str()));\n}\n\nfile::~file() FMT_NOEXCEPT {\n  // Don't retry close in case of EINTR!\n  // See http://linux.derkeiler.com/Mailing-Lists/Kernel/2005-09/3000.html\n  if (fd_ != -1 && FMT_POSIX_CALL(close(fd_)) != 0)\n    report_system_error(errno, \"cannot close file\");\n}\n\nvoid file::close() {\n  if (fd_ == -1) return;\n  // Don't retry close in case of EINTR!\n  // See http://linux.derkeiler.com/Mailing-Lists/Kernel/2005-09/3000.html\n  int result = FMT_POSIX_CALL(close(fd_));\n  fd_ = -1;\n  if (result != 0) FMT_THROW(system_error(errno, \"cannot close file\"));\n}\n\nlong long file::size() const {\n#ifdef _WIN32\n  // Use GetFileSize instead of GetFileSizeEx for the case when _WIN32_WINNT\n  // is less than 0x0500 as is the case with some default MinGW builds.\n  // Both functions support large file sizes.\n  DWORD size_upper = 0;\n  HANDLE handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd_));\n  DWORD size_lower = FMT_SYSTEM(GetFileSize(handle, &size_upper));\n  if (size_lower == INVALID_FILE_SIZE) {\n    DWORD error = GetLastError();\n    if (error != NO_ERROR)\n      FMT_THROW(windows_error(GetLastError(), \"cannot get file size\"));\n  }\n  unsigned long long long_size = size_upper;\n  return (long_size << sizeof(DWORD) * CHAR_BIT) | size_lower;\n#else\n  typedef struct stat Stat;\n  Stat file_stat = Stat();\n  if (FMT_POSIX_CALL(fstat(fd_, &file_stat)) == -1)\n    FMT_THROW(system_error(errno, \"cannot get file attributes\"));\n  static_assert(sizeof(long long) >= sizeof(file_stat.st_size),\n                \"return type of file::size is not large enough\");\n  return file_stat.st_size;\n#endif\n}\n\nstd::size_t file::read(void* buffer, std::size_t count) {\n  RWResult result = 0;\n  FMT_RETRY(result, FMT_POSIX_CALL(read(fd_, buffer, convert_rwcount(count))));\n  if (result < 0) FMT_THROW(system_error(errno, \"cannot read from file\"));\n  return internal::to_unsigned(result);\n}\n\nstd::size_t file::write(const void* buffer, std::size_t count) {\n  RWResult result = 0;\n  FMT_RETRY(result, FMT_POSIX_CALL(write(fd_, buffer, convert_rwcount(count))));\n  if (result < 0) FMT_THROW(system_error(errno, \"cannot write to file\"));\n  return internal::to_unsigned(result);\n}\n\nfile file::dup(int fd) {\n  // Don't retry as dup doesn't return EINTR.\n  // http://pubs.opengroup.org/onlinepubs/009695399/functions/dup.html\n  int new_fd = FMT_POSIX_CALL(dup(fd));\n  if (new_fd == -1)\n    FMT_THROW(system_error(errno, \"cannot duplicate file descriptor {}\", fd));\n  return file(new_fd);\n}\n\nvoid file::dup2(int fd) {\n  int result = 0;\n  FMT_RETRY(result, FMT_POSIX_CALL(dup2(fd_, fd)));\n  if (result == -1) {\n    FMT_THROW(system_error(errno, \"cannot duplicate file descriptor {} to {}\",\n                           fd_, fd));\n  }\n}\n\nvoid file::dup2(int fd, error_code& ec) FMT_NOEXCEPT {\n  int result = 0;\n  FMT_RETRY(result, FMT_POSIX_CALL(dup2(fd_, fd)));\n  if (result == -1) ec = error_code(errno);\n}\n\nvoid file::pipe(file& read_end, file& write_end) {\n  // Close the descriptors first to make sure that assignments don't throw\n  // and there are no leaks.\n  read_end.close();\n  write_end.close();\n  int fds[2] = {};\n#ifdef _WIN32\n  // Make the default pipe capacity same as on Linux 2.6.11+.\n  enum { DEFAULT_CAPACITY = 65536 };\n  int result = FMT_POSIX_CALL(pipe(fds, DEFAULT_CAPACITY, _O_BINARY));\n#else\n  // Don't retry as the pipe function doesn't return EINTR.\n  // http://pubs.opengroup.org/onlinepubs/009696799/functions/pipe.html\n  int result = FMT_POSIX_CALL(pipe(fds));\n#endif\n  if (result != 0) FMT_THROW(system_error(errno, \"cannot create pipe\"));\n  // The following assignments don't throw because read_fd and write_fd\n  // are closed.\n  read_end = file(fds[0]);\n  write_end = file(fds[1]);\n}\n\nbuffered_file file::fdopen(const char* mode) {\n  // Don't retry as fdopen doesn't return EINTR.\n  FILE* f = FMT_POSIX_CALL(fdopen(fd_, mode));\n  if (!f)\n    FMT_THROW(\n        system_error(errno, \"cannot associate stream with file descriptor\"));\n  buffered_file bf(f);\n  fd_ = -1;\n  return bf;\n}\n\nlong getpagesize() {\n#ifdef _WIN32\n  SYSTEM_INFO si;\n  GetSystemInfo(&si);\n  return si.dwPageSize;\n#else\n  long size = FMT_POSIX_CALL(sysconf(_SC_PAGESIZE));\n  if (size < 0) FMT_THROW(system_error(errno, \"cannot get memory page size\"));\n  return size;\n#endif\n}\nFMT_END_NAMESPACE\n"
  },
  {
    "path": "src/third_party/fmt/support/cmake/FindSetEnv.cmake",
    "content": "# A CMake script to find SetEnv.cmd.\n\nfind_program(WINSDK_SETENV NAMES SetEnv.cmd\n  PATHS \"[HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Microsoft SDKs\\\\Windows;CurrentInstallFolder]/bin\")\nif (WINSDK_SETENV AND PRINT_PATH)\n  execute_process(COMMAND ${CMAKE_COMMAND} -E echo \"${WINSDK_SETENV}\")\nendif ()\n"
  },
  {
    "path": "src/third_party/fmt/support/cmake/cxx14.cmake",
    "content": "# C++14 feature support detection\n\ninclude(CheckCXXSourceCompiles)\ninclude(CheckCXXCompilerFlag)\n\nif (NOT CMAKE_CXX_STANDARD)\n  set(CMAKE_CXX_STANDARD 11)\nendif()\nmessage(STATUS \"CXX_STANDARD: ${CMAKE_CXX_STANDARD}\")\n\nif (CMAKE_CXX_STANDARD EQUAL 20)\n  check_cxx_compiler_flag(-std=c++20 has_std_20_flag)\n  check_cxx_compiler_flag(-std=c++2a has_std_2a_flag)\n\n  if (has_std_20_flag)\n    set(CXX_STANDARD_FLAG -std=c++20)\n  elseif (has_std_2a_flag)\n    set(CXX_STANDARD_FLAG -std=c++2a)\n  endif ()\nelseif (CMAKE_CXX_STANDARD EQUAL 17)\n  check_cxx_compiler_flag(-std=c++17 has_std_17_flag)\n  check_cxx_compiler_flag(-std=c++1z has_std_1z_flag)\n\n  if (has_std_17_flag)\n    set(CXX_STANDARD_FLAG -std=c++17)\n  elseif (has_std_1z_flag)\n    set(CXX_STANDARD_FLAG -std=c++1z)\n  endif ()\nelseif (CMAKE_CXX_STANDARD EQUAL 14)\n  check_cxx_compiler_flag(-std=c++14 has_std_14_flag)\n  check_cxx_compiler_flag(-std=c++1y has_std_1y_flag)\n\n  if (has_std_14_flag)\n    set(CXX_STANDARD_FLAG -std=c++14)\n  elseif (has_std_1y_flag)\n    set(CXX_STANDARD_FLAG -std=c++1y)\n  endif ()\nelseif (CMAKE_CXX_STANDARD EQUAL 11)\n  check_cxx_compiler_flag(-std=c++11 has_std_11_flag)\n  check_cxx_compiler_flag(-std=c++0x has_std_0x_flag)\n\n  if (has_std_11_flag)\n    set(CXX_STANDARD_FLAG -std=c++11)\n  elseif (has_std_0x_flag)\n    set(CXX_STANDARD_FLAG -std=c++0x)\n  endif ()\nendif ()\n\nset(CMAKE_REQUIRED_FLAGS ${CXX_STANDARD_FLAG})\n\n# Check if variadic templates are working and not affected by GCC bug 39653:\n# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39653\n# Can be removed once gcc 4.4 support is dropped.\ncheck_cxx_source_compiles(\"\n  template <class T, class ...Types>\n  struct S { typedef typename S<Types...>::type type; };\n  int main() {}\" SUPPORTS_VARIADIC_TEMPLATES)\nif (NOT SUPPORTS_VARIADIC_TEMPLATES)\n  set (SUPPORTS_VARIADIC_TEMPLATES OFF)\nendif ()\n\n# Check if user-defined literals are available\ncheck_cxx_source_compiles(\"\n  void operator\\\"\\\" _udl(long double);\n  int main() {}\"\n  SUPPORTS_USER_DEFINED_LITERALS)\nif (NOT SUPPORTS_USER_DEFINED_LITERALS)\n  set (SUPPORTS_USER_DEFINED_LITERALS OFF)\nendif ()\n\n# Check if <variant> is available\nset(CMAKE_REQUIRED_FLAGS -std=c++1z)\ncheck_cxx_source_compiles(\"\n  #include <variant>\n  int main() {}\"\n  FMT_HAS_VARIANT)\nif (NOT FMT_HAS_VARIANT)\n  set (FMT_HAS_VARIANT OFF)\nendif ()\n\nset(CMAKE_REQUIRED_FLAGS )\n"
  },
  {
    "path": "src/third_party/fmt/support/cmake/fmt-config.cmake.in",
    "content": "@PACKAGE_INIT@\n\ninclude(${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake)\ncheck_required_components(fmt)\n"
  },
  {
    "path": "src/third_party/fmt/support/cmake/fmt.pc.in",
    "content": "prefix=@CMAKE_INSTALL_PREFIX@\nexec_prefix=@CMAKE_INSTALL_PREFIX@\nlibdir=@CMAKE_INSTALL_FULL_LIBDIR@\nincludedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@\n\nName: fmt\nDescription: A modern formatting library\nVersion: @FMT_VERSION@\nLibs: -L${libdir} -lfmt\nCflags: -I${includedir}\n\n"
  },
  {
    "path": "src/third_party/pybind11/.appveyor.yml",
    "content": "version: 1.0.{build}\nimage:\n- Visual Studio 2017\n- Visual Studio 2015\ntest: off\nskip_branch_with_pr: true\nbuild:\n  parallel: true\nplatform:\n- x64\n- x86\nenvironment:\n  matrix:\n  - PYTHON: 36\n    CPP: 14\n    CONFIG: Debug\n  - PYTHON: 27\n    CPP: 14\n    CONFIG: Debug\n  - CONDA: 36\n    CPP: latest\n    CONFIG: Release\nmatrix:\n  exclude:\n    - image: Visual Studio 2015\n      platform: x86\n    - image: Visual Studio 2015\n      CPP: latest\n    - image: Visual Studio 2017\n      CPP: latest\n      platform: x86\ninstall:\n- ps: |\n    if ($env:PLATFORM -eq \"x64\") { $env:CMAKE_ARCH = \"x64\" }\n    if ($env:APPVEYOR_JOB_NAME -like \"*Visual Studio 2017*\") {\n      $env:CMAKE_GENERATOR = \"Visual Studio 15 2017\"\n      $env:CMAKE_INCLUDE_PATH = \"C:\\Libraries\\boost_1_64_0\"\n      $env:CXXFLAGS = \"-permissive-\"\n    } else {\n      $env:CMAKE_GENERATOR = \"Visual Studio 14 2015\"\n    }\n    if ($env:PYTHON) {\n      if ($env:PLATFORM -eq \"x64\") { $env:PYTHON = \"$env:PYTHON-x64\" }\n      $env:PATH = \"C:\\Python$env:PYTHON\\;C:\\Python$env:PYTHON\\Scripts\\;$env:PATH\"\n      python -W ignore -m pip install --upgrade pip wheel\n      python -W ignore -m pip install pytest numpy --no-warn-script-location\n    } elseif ($env:CONDA) {\n      if ($env:CONDA -eq \"27\") { $env:CONDA = \"\" }\n      if ($env:PLATFORM -eq \"x64\") { $env:CONDA = \"$env:CONDA-x64\" }\n      $env:PATH = \"C:\\Miniconda$env:CONDA\\;C:\\Miniconda$env:CONDA\\Scripts\\;$env:PATH\"\n      $env:PYTHONHOME = \"C:\\Miniconda$env:CONDA\"\n      conda --version\n      conda install -y -q pytest numpy scipy\n    }\n- ps: |\n    Start-FileDownload 'http://bitbucket.org/eigen/eigen/get/3.3.3.zip'\n    7z x 3.3.3.zip -y > $null\n    $env:CMAKE_INCLUDE_PATH = \"eigen-eigen-67e894c6cd8f;$env:CMAKE_INCLUDE_PATH\"\nbuild_script:\n- cmake -G \"%CMAKE_GENERATOR%\" -A \"%CMAKE_ARCH%\"\n    -DPYBIND11_CPP_STANDARD=/std:c++%CPP%\n    -DPYBIND11_WERROR=ON\n    -DDOWNLOAD_CATCH=ON\n    -DCMAKE_SUPPRESS_REGENERATION=1\n    .\n- set MSBuildLogger=\"C:\\Program Files\\AppVeyor\\BuildAgent\\Appveyor.MSBuildLogger.dll\"\n- cmake --build . --config %CONFIG% --target pytest -- /m /v:m /logger:%MSBuildLogger%\n- cmake --build . --config %CONFIG% --target cpptest -- /m /v:m /logger:%MSBuildLogger%\n- if \"%CPP%\"==\"latest\" (cmake --build . --config %CONFIG% --target test_cmake_build -- /m /v:m /logger:%MSBuildLogger%)\non_failure: if exist \"tests\\test_cmake_build\" type tests\\test_cmake_build\\*.log*\n"
  },
  {
    "path": "src/third_party/pybind11/.readthedocs.yml",
    "content": "python:\n  version: 3\nrequirements_file: docs/requirements.txt\n"
  },
  {
    "path": "src/third_party/pybind11/.travis.yml",
    "content": "language: cpp\nmatrix:\n  include:\n  # This config does a few things:\n  # - Checks C++ and Python code styles (check-style.sh and flake8).\n  # - Makes sure sphinx can build the docs without any errors or warnings.\n  # - Tests setup.py sdist and install (all header files should be present).\n  # - Makes sure that everything still works without optional deps (numpy/scipy/eigen) and\n  #   also tests the automatic discovery functions in CMake (Python version, C++ standard).\n  - os: linux\n    dist: xenial # Necessary to run doxygen 1.8.15\n    name: Style, docs, and pip\n    cache: false\n    before_install:\n    - pyenv global $(pyenv whence 2to3)  # activate all python versions\n    - PY_CMD=python3\n    - $PY_CMD -m pip install --user --upgrade pip wheel setuptools\n    install: # Breathe does not yet support Sphinx 2\n    - $PY_CMD -m pip install --user --upgrade \"sphinx<2\" sphinx_rtd_theme breathe flake8 pep8-naming pytest\n    - curl -fsSL https://sourceforge.net/projects/doxygen/files/rel-1.8.15/doxygen-1.8.15.linux.bin.tar.gz/download | tar xz\n    - export PATH=\"$PWD/doxygen-1.8.15/bin:$PATH\"\n    script:\n    - tools/check-style.sh\n    - flake8\n    - $PY_CMD -m sphinx -W -b html docs docs/.build\n    - |\n      # Make sure setup.py distributes and installs all the headers\n      $PY_CMD setup.py sdist\n      $PY_CMD -m pip install --user -U ./dist/*\n      installed=$($PY_CMD -c \"import pybind11; print(pybind11.get_include(True) + '/pybind11')\")\n      diff -rq $installed ./include/pybind11\n    - |\n      # Barebones build\n      cmake -DCMAKE_BUILD_TYPE=Debug -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON -DPYTHON_EXECUTABLE=$(which $PY_CMD) .\n      make pytest -j 2\n      make cpptest -j 2\n  # The following are regular test configurations, including optional dependencies.\n  # With regard to each other they differ in Python version, C++ standard and compiler.\n  - os: linux\n    dist: trusty\n    name: Python 2.7, c++11, gcc 4.8\n    env: PYTHON=2.7 CPP=11 GCC=4.8\n    addons:\n      apt:\n        packages:\n          - cmake=2.\\*\n          - cmake-data=2.\\*\n  - os: linux\n    dist: trusty\n    name: Python 3.6, c++11, gcc 4.8\n    env: PYTHON=3.6 CPP=11 GCC=4.8\n    addons:\n      apt:\n        sources:\n          - deadsnakes\n        packages:\n          - python3.6-dev\n          - python3.6-venv\n          - cmake=2.\\*\n          - cmake-data=2.\\*\n  - os: linux\n    dist: trusty\n    env: PYTHON=2.7 CPP=14 GCC=6 CMAKE=1\n    name: Python 2.7, c++14, gcc 4.8, CMake test\n    addons:\n      apt:\n        sources:\n          - ubuntu-toolchain-r-test\n        packages:\n          - g++-6\n  - os: linux\n    dist: trusty\n    name: Python 3.5, c++14, gcc 6, Debug build\n    # N.B. `ensurepip` could be installed transitively by `python3.5-venv`, but\n    # seems to have apt conflicts (at least for Trusty). Use Docker instead.\n    services: docker\n    env: DOCKER=debian:stretch PYTHON=3.5 CPP=14 GCC=6 DEBUG=1\n  - os: linux\n    dist: xenial\n    env: PYTHON=3.6 CPP=17 GCC=7\n    name: Python 3.6, c++17, gcc 7\n    addons:\n      apt:\n        sources:\n          - deadsnakes\n          - ubuntu-toolchain-r-test\n        packages:\n          - g++-7\n          - python3.6-dev\n          - python3.6-venv\n  - os: linux\n    dist: xenial\n    env: PYTHON=3.6 CPP=17 CLANG=7\n    name: Python 3.6, c++17, Clang 7\n    addons:\n      apt:\n        sources:\n          - deadsnakes\n          - llvm-toolchain-xenial-7\n        packages:\n          - python3.6-dev\n          - python3.6-venv\n          - clang-7\n          - libclang-7-dev\n          - llvm-7-dev\n          - lld-7\n          - libc++-7-dev\n          - libc++abi-7-dev  # Why is this necessary???\n  - os: osx\n    name: Python 2.7, c++14, AppleClang 7.3, CMake test\n    osx_image: xcode7.3\n    env: PYTHON=2.7 CPP=14 CLANG CMAKE=1\n  - os: osx\n    name: Python 3.7, c++14, AppleClang 9, Debug build\n    osx_image: xcode9\n    env: PYTHON=3.7 CPP=14 CLANG DEBUG=1\n  # Test a PyPy 2.7 build\n  - os: linux\n    dist: trusty\n    env: PYPY=5.8 PYTHON=2.7 CPP=11 GCC=4.8\n    name: PyPy 5.8, Python 2.7, c++11, gcc 4.8\n    addons:\n      apt:\n        packages:\n          - libblas-dev\n          - liblapack-dev\n          - gfortran\n  # Build in 32-bit mode and tests against the CMake-installed version\n  - os: linux\n    dist: trusty\n    services: docker\n    env: DOCKER=i386/debian:stretch PYTHON=3.5 CPP=14 GCC=6 INSTALL=1\n    name: Python 3.4, c++14, gcc 6, 32-bit\n    script:\n      - |\n        # Consolidated 32-bit Docker Build + Install\n        set -ex\n        $SCRIPT_RUN_PREFIX sh -c \"\n          set -ex\n          cmake ${CMAKE_EXTRA_ARGS} -DPYBIND11_INSTALL=1 -DPYBIND11_TEST=0 .\n          make install\n          cp -a tests /pybind11-tests\n          mkdir /build-tests && cd /build-tests\n          cmake ../pybind11-tests ${CMAKE_EXTRA_ARGS} -DPYBIND11_WERROR=ON\n          make pytest -j 2\"\n        set +ex\ncache:\n  directories:\n  - $HOME/.local/bin\n  - $HOME/.local/lib\n  - $HOME/.local/include\n  - $HOME/Library/Python\nbefore_install:\n- |\n  # Configure build variables\n  set -ex\n  if [ \"$TRAVIS_OS_NAME\" = \"linux\" ]; then\n    if [ -n \"$CLANG\" ]; then\n      export CXX=clang++-$CLANG CC=clang-$CLANG\n      EXTRA_PACKAGES+=\" clang-$CLANG llvm-$CLANG-dev\"\n    else\n      if [ -z \"$GCC\" ]; then GCC=4.8\n      else EXTRA_PACKAGES+=\" g++-$GCC\"\n      fi\n      export CXX=g++-$GCC CC=gcc-$GCC\n    fi\n  elif [ \"$TRAVIS_OS_NAME\" = \"osx\" ]; then\n    export CXX=clang++ CC=clang;\n  fi\n  if [ -n \"$CPP\" ]; then CPP=-std=c++$CPP; fi\n  if [ \"${PYTHON:0:1}\" = \"3\" ]; then PY=3; fi\n  if [ -n \"$DEBUG\" ]; then CMAKE_EXTRA_ARGS+=\" -DCMAKE_BUILD_TYPE=Debug\"; fi\n  set +ex\n- |\n  # Initialize environment\n  set -ex\n  if [ -n \"$DOCKER\" ]; then\n    docker pull $DOCKER\n\n    containerid=$(docker run --detach --tty \\\n      --volume=\"$PWD\":/pybind11 --workdir=/pybind11 \\\n      --env=\"CC=$CC\" --env=\"CXX=$CXX\" --env=\"DEBIAN_FRONTEND=$DEBIAN_FRONTEND\" \\\n      --env=GCC_COLORS=\\  \\\n      $DOCKER)\n    SCRIPT_RUN_PREFIX=\"docker exec --tty $containerid\"\n    $SCRIPT_RUN_PREFIX sh -c 'for s in 0 15; do sleep $s; apt-get update && apt-get -qy dist-upgrade && break; done'\n  else\n    if [ \"$PYPY\" = \"5.8\" ]; then\n      curl -fSL https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.8.0-linux64.tar.bz2 | tar xj\n      PY_CMD=$(echo `pwd`/pypy2-v5.8.0-linux64/bin/pypy)\n      CMAKE_EXTRA_ARGS+=\" -DPYTHON_EXECUTABLE:FILEPATH=$PY_CMD\"\n    else\n      PY_CMD=python$PYTHON\n      if [ \"$TRAVIS_OS_NAME\" = \"osx\" ]; then\n        if [ \"$PY\" = \"3\" ]; then\n          brew update && brew upgrade python\n        else\n          curl -fsSL https://bootstrap.pypa.io/get-pip.py | $PY_CMD - --user\n        fi\n      fi\n    fi\n    if [ \"$PY\" = 3 ] || [ -n \"$PYPY\" ]; then\n      $PY_CMD -m ensurepip --user\n    fi\n    $PY_CMD --version\n    $PY_CMD -m pip install --user --upgrade pip wheel\n  fi\n  set +ex\ninstall:\n- |\n  # Install dependencies\n  set -ex\n  cmake --version\n  if [ -n \"$DOCKER\" ]; then\n    if [ -n \"$DEBUG\" ]; then\n      PY_DEBUG=\"python$PYTHON-dbg python$PY-scipy-dbg\"\n      CMAKE_EXTRA_ARGS+=\" -DPYTHON_EXECUTABLE=/usr/bin/python${PYTHON}dm\"\n    fi\n    $SCRIPT_RUN_PREFIX sh -c \"for s in 0 15; do sleep \\$s; \\\n      apt-get -qy --no-install-recommends install \\\n        $PY_DEBUG python$PYTHON-dev python$PY-pytest python$PY-scipy \\\n        libeigen3-dev libboost-dev cmake make ${EXTRA_PACKAGES} && break; done\"\n  else\n\n    if [ \"$CLANG\" = \"7\" ]; then\n      export CXXFLAGS=\"-stdlib=libc++\"\n    fi\n\n    export NPY_NUM_BUILD_JOBS=2\n    echo \"Installing pytest, numpy, scipy...\"\n    local PIP_CMD=\"\"\n    if [ -n $PYPY ]; then\n      # For expediency, install only versions that are available on the extra index.\n      travis_wait 30 \\\n        $PY_CMD -m pip install --user --upgrade --extra-index-url https://imaginary.ca/trusty-pypi \\\n          pytest numpy==1.15.4 scipy==1.2.0\n    else\n      $PY_CMD -m pip install --user --upgrade pytest numpy scipy\n    fi\n    echo \"done.\"\n\n    mkdir eigen\n    curl -fsSL https://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 | \\\n        tar --extract -j --directory=eigen --strip-components=1\n    export CMAKE_INCLUDE_PATH=\"${CMAKE_INCLUDE_PATH:+$CMAKE_INCLUDE_PATH:}$PWD/eigen\"\n  fi\n  set +ex\nscript:\n- |\n  # CMake Configuration\n  set -ex\n  $SCRIPT_RUN_PREFIX cmake ${CMAKE_EXTRA_ARGS} \\\n    -DPYBIND11_PYTHON_VERSION=$PYTHON \\\n    -DPYBIND11_CPP_STANDARD=$CPP \\\n    -DPYBIND11_WERROR=${WERROR:-ON} \\\n    -DDOWNLOAD_CATCH=${DOWNLOAD_CATCH:-ON} \\\n    .\n  set +ex\n- |\n  # pytest\n  set -ex\n  $SCRIPT_RUN_PREFIX make pytest -j 2 VERBOSE=1\n  set +ex\n- |\n  # cpptest\n  set -ex\n  $SCRIPT_RUN_PREFIX make cpptest -j 2\n  set +ex\n- |\n  # CMake Build Interface\n  set -ex\n  if [ -n \"$CMAKE\" ]; then $SCRIPT_RUN_PREFIX make test_cmake_build; fi\n  set +ex\nafter_failure: cat tests/test_cmake_build/*.log*\nafter_script:\n- |\n  # Cleanup (Docker)\n  set -ex\n  if [ -n \"$DOCKER\" ]; then docker stop \"$containerid\"; docker rm \"$containerid\"; fi\n  set +ex\n"
  },
  {
    "path": "src/third_party/pybind11/CMakeLists.txt",
    "content": "# CMakeLists.txt -- Build system for the pybind11 modules\n#\n# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\ncmake_minimum_required(VERSION 2.8.12)\n\nif (POLICY CMP0048)\n  # cmake warns if loaded from a min-3.0-required parent dir, so silence the warning:\n  cmake_policy(SET CMP0048 NEW)\nendif()\n\n# CMake versions < 3.4.0 do not support try_compile/pthread checks without C as active language.\nif(CMAKE_VERSION VERSION_LESS 3.4.0)\n  project(pybind11)\nelse()\n  project(pybind11 CXX)\nendif()\n\n# Check if pybind11 is being used directly or via add_subdirectory\nset(PYBIND11_MASTER_PROJECT OFF)\nif (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n  set(PYBIND11_MASTER_PROJECT ON)\nendif()\n\noption(PYBIND11_INSTALL \"Install pybind11 header files?\" ${PYBIND11_MASTER_PROJECT})\noption(PYBIND11_TEST    \"Build pybind11 test suite?\"     ${PYBIND11_MASTER_PROJECT})\n\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_LIST_DIR}/tools\")\n\ninclude(pybind11Tools)\n\n# Cache variables so pybind11_add_module can be used in parent projects\nset(PYBIND11_INCLUDE_DIR \"${CMAKE_CURRENT_LIST_DIR}/include\" CACHE INTERNAL \"\")\nset(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} CACHE INTERNAL \"\")\nset(PYTHON_LIBRARIES ${PYTHON_LIBRARIES} CACHE INTERNAL \"\")\nset(PYTHON_MODULE_PREFIX ${PYTHON_MODULE_PREFIX} CACHE INTERNAL \"\")\nset(PYTHON_MODULE_EXTENSION ${PYTHON_MODULE_EXTENSION} CACHE INTERNAL \"\")\nset(PYTHON_VERSION_MAJOR ${PYTHON_VERSION_MAJOR} CACHE INTERNAL \"\")\nset(PYTHON_VERSION_MINOR ${PYTHON_VERSION_MINOR} CACHE INTERNAL \"\")\n\n# NB: when adding a header don't forget to also add it to setup.py\nset(PYBIND11_HEADERS\n  include/pybind11/detail/class.h\n  include/pybind11/detail/common.h\n  include/pybind11/detail/descr.h\n  include/pybind11/detail/init.h\n  include/pybind11/detail/internals.h\n  include/pybind11/detail/typeid.h\n  include/pybind11/attr.h\n  include/pybind11/buffer_info.h\n  include/pybind11/cast.h\n  include/pybind11/chrono.h\n  include/pybind11/common.h\n  include/pybind11/complex.h\n  include/pybind11/options.h\n  include/pybind11/eigen.h\n  include/pybind11/embed.h\n  include/pybind11/eval.h\n  include/pybind11/functional.h\n  include/pybind11/numpy.h\n  include/pybind11/operators.h\n  include/pybind11/pybind11.h\n  include/pybind11/pytypes.h\n  include/pybind11/stl.h\n  include/pybind11/stl_bind.h\n)\nstring(REPLACE \"include/\" \"${CMAKE_CURRENT_SOURCE_DIR}/include/\"\n       PYBIND11_HEADERS \"${PYBIND11_HEADERS}\")\n\nif (PYBIND11_TEST)\n  add_subdirectory(tests)\nendif()\n\ninclude(GNUInstallDirs)\ninclude(CMakePackageConfigHelpers)\n\n# extract project version from source\nfile(STRINGS \"${PYBIND11_INCLUDE_DIR}/pybind11/detail/common.h\" pybind11_version_defines\n     REGEX \"#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) \")\nforeach(ver ${pybind11_version_defines})\n  if (ver MATCHES \"#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$\")\n    set(PYBIND11_VERSION_${CMAKE_MATCH_1} \"${CMAKE_MATCH_2}\" CACHE INTERNAL \"\")\n  endif()\nendforeach()\nset(${PROJECT_NAME}_VERSION ${PYBIND11_VERSION_MAJOR}.${PYBIND11_VERSION_MINOR}.${PYBIND11_VERSION_PATCH})\nmessage(STATUS \"pybind11 v${${PROJECT_NAME}_VERSION}\")\n\noption (USE_PYTHON_INCLUDE_DIR \"Install pybind11 headers in Python include directory instead of default installation prefix\" OFF)\nif (USE_PYTHON_INCLUDE_DIR)\n    file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${PYTHON_INCLUDE_DIRS})\nendif()\n\nif(NOT (CMAKE_VERSION VERSION_LESS 3.0))  # CMake >= 3.0\n  # Build an interface library target:\n  add_library(pybind11 INTERFACE)\n  add_library(pybind11::pybind11 ALIAS pybind11)  # to match exported target\n  target_include_directories(pybind11 INTERFACE $<BUILD_INTERFACE:${PYBIND11_INCLUDE_DIR}>\n                                                $<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>\n                                                $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)\n  target_compile_options(pybind11 INTERFACE $<BUILD_INTERFACE:${PYBIND11_CPP_STANDARD}>)\n\n  add_library(module INTERFACE)\n  add_library(pybind11::module ALIAS module)\n  if(NOT MSVC)\n    target_compile_options(module INTERFACE -fvisibility=hidden)\n  endif()\n  target_link_libraries(module INTERFACE pybind11::pybind11)\n  if(WIN32 OR CYGWIN)\n    target_link_libraries(module INTERFACE $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)\n  elseif(APPLE)\n    target_link_libraries(module INTERFACE \"-undefined dynamic_lookup\")\n  endif()\n\n  add_library(embed INTERFACE)\n  add_library(pybind11::embed ALIAS embed)\n  target_link_libraries(embed INTERFACE pybind11::pybind11 $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)\nendif()\n\nif (PYBIND11_INSTALL)\n  install(DIRECTORY ${PYBIND11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})\n  # GNUInstallDirs \"DATADIR\" wrong here; CMake search path wants \"share\".\n  set(PYBIND11_CMAKECONFIG_INSTALL_DIR \"share/cmake/${PROJECT_NAME}\" CACHE STRING \"install path for pybind11Config.cmake\")\n\n  configure_package_config_file(tools/${PROJECT_NAME}Config.cmake.in\n                                \"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\"\n                                INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})\n  # Remove CMAKE_SIZEOF_VOID_P from ConfigVersion.cmake since the library does\n  # not depend on architecture specific settings or libraries.\n  set(_PYBIND11_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})\n  unset(CMAKE_SIZEOF_VOID_P)\n  write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n                                   VERSION ${${PROJECT_NAME}_VERSION}\n                                   COMPATIBILITY AnyNewerVersion)\n  set(CMAKE_SIZEOF_VOID_P ${_PYBIND11_CMAKE_SIZEOF_VOID_P})\n  install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\n                ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n                tools/FindPythonLibsNew.cmake\n                tools/pybind11Tools.cmake\n          DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})\n\n  if(NOT (CMAKE_VERSION VERSION_LESS 3.0))\n    if(NOT PYBIND11_EXPORT_NAME)\n      set(PYBIND11_EXPORT_NAME \"${PROJECT_NAME}Targets\")\n    endif()\n\n    install(TARGETS pybind11 module embed\n            EXPORT \"${PYBIND11_EXPORT_NAME}\")\n    if(PYBIND11_MASTER_PROJECT)\n      install(EXPORT \"${PYBIND11_EXPORT_NAME}\"\n              NAMESPACE \"${PROJECT_NAME}::\"\n              DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})\n    endif()\n  endif()\nendif()\n"
  },
  {
    "path": "src/third_party/pybind11/CONTRIBUTING.md",
    "content": "Thank you for your interest in this project! Please refer to the following\nsections on how to contribute code and bug reports.\n\n### Reporting bugs\n\nAt the moment, this project is run in the spare time of a single person\n([Wenzel Jakob](http://rgl.epfl.ch/people/wjakob)) with very limited resources\nfor issue tracker tickets. Thus, before submitting a question or bug report,\nplease take a moment of your time and ensure that your issue isn't already\ndiscussed in the project documentation provided at\n[http://pybind11.readthedocs.org/en/latest](http://pybind11.readthedocs.org/en/latest).\n\nAssuming that you have identified a previously unknown problem or an important\nquestion, it's essential that you submit a self-contained and minimal piece of\ncode that reproduces the problem. In other words: no external dependencies,\nisolate the function(s) that cause breakage, submit matched and complete C++\nand Python snippets that can be easily compiled and run on my end.\n\n## Pull requests\nContributions are submitted, reviewed, and accepted using Github pull requests.\nPlease refer to [this\narticle](https://help.github.com/articles/using-pull-requests) for details and\nadhere to the following rules to make the process as smooth as possible:\n\n* Make a new branch for every feature you're working on.\n* Make small and clean pull requests that are easy to review but make sure they\n  do add value by themselves.\n* Add tests for any new functionality and run the test suite (``make pytest``)\n  to ensure that no existing features break.\n* Please run ``flake8`` and ``tools/check-style.sh`` to check your code matches\n  the project style. (Note that ``check-style.sh`` requires ``gawk``.)\n* This project has a strong focus on providing general solutions using a\n  minimal amount of code, thus small pull requests are greatly preferred.\n\n### Licensing of contributions\n\npybind11 is provided under a BSD-style license that can be found in the\n``LICENSE`` file. By using, distributing, or contributing to this project, you\nagree to the terms and conditions of this license.\n\nYou are under no obligation whatsoever to provide any bug fixes, patches, or\nupgrades to the features, functionality or performance of the source code\n(\"Enhancements\") to anyone; however, if you choose to make your Enhancements\navailable either publicly, or directly to the author of this software, without\nimposing a separate written license agreement for such Enhancements, then you\nhereby grant the following license: a non-exclusive, royalty-free perpetual\nlicense to install, use, modify, prepare derivative works, incorporate into\nother computer software, distribute, and sublicense such enhancements or\nderivative works thereof, in binary and source code form.\n"
  },
  {
    "path": "src/third_party/pybind11/ISSUE_TEMPLATE.md",
    "content": "Make sure you've completed the following steps before submitting your issue -- thank you!\n\n1. Check if your question has already been answered in the [FAQ](http://pybind11.readthedocs.io/en/latest/faq.html) section.\n2. Make sure you've read the [documentation](http://pybind11.readthedocs.io/en/latest/). Your issue may be addressed there.\n3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room](https://gitter.im/pybind/Lobby).\n4. If you have a genuine bug report or a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.\n5. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.\n\n*After reading, remove this checklist and the template text in parentheses below.*\n\n## Issue description\n\n(Provide a short description, state the expected behavior and what actually happens.)\n\n## Reproducible example code\n\n(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)\n"
  },
  {
    "path": "src/third_party/pybind11/LICENSE",
    "content": "Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n   may be used to endorse or promote products derived from this software\n   without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n"
  },
  {
    "path": "src/third_party/pybind11/MANIFEST.in",
    "content": "recursive-include include/pybind11 *.h\ninclude LICENSE README.md CONTRIBUTING.md\n"
  },
  {
    "path": "src/third_party/pybind11/README.md",
    "content": "![pybind11 logo](https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png)\n\n# pybind11 — Seamless operability between C++11 and Python\n\n[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=master)](http://pybind11.readthedocs.org/en/master/?badge=master)\n[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=stable)](http://pybind11.readthedocs.org/en/stable/?badge=stable)\n[![Gitter chat](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/pybind/Lobby)\n[![Build Status](https://travis-ci.org/pybind/pybind11.svg?branch=master)](https://travis-ci.org/pybind/pybind11)\n[![Build status](https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true)](https://ci.appveyor.com/project/wjakob/pybind11)\n\n**pybind11** is a lightweight header-only library that exposes C++ types in Python\nand vice versa, mainly to create Python bindings of existing C++ code. Its\ngoals and syntax are similar to the excellent\n[Boost.Python](http://www.boost.org/doc/libs/1_58_0/libs/python/doc/) library\nby David Abrahams: to minimize boilerplate code in traditional extension\nmodules by inferring type information using compile-time introspection.\n\nThe main issue with Boost.Python—and the reason for creating such a similar\nproject—is Boost. Boost is an enormously large and complex suite of utility\nlibraries that works with almost every C++ compiler in existence. This\ncompatibility has its cost: arcane template tricks and workarounds are\nnecessary to support the oldest and buggiest of compiler specimens. Now that\nC++11-compatible compilers are widely available, this heavy machinery has\nbecome an excessively large and unnecessary dependency.\n\nThink of this library as a tiny self-contained version of Boost.Python with\neverything stripped away that isn't relevant for binding generation. Without\ncomments, the core header files only require ~4K lines of code and depend on\nPython (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This\ncompact implementation was possible thanks to some of the new C++11 language\nfeatures (specifically: tuples, lambda functions and variadic templates). Since\nits creation, this library has grown beyond Boost.Python in many ways, leading\nto dramatically simpler binding code in many common situations.\n\nTutorial and reference documentation is provided at\n[http://pybind11.readthedocs.org/en/master](http://pybind11.readthedocs.org/en/master).\nA PDF version of the manual is available\n[here](https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf).\n\n## Core features\npybind11 can map the following core C++ features to Python\n\n- Functions accepting and returning custom data structures per value, reference, or pointer\n- Instance methods and static methods\n- Overloaded functions\n- Instance attributes and static attributes\n- Arbitrary exception types\n- Enumerations\n- Callbacks\n- Iterators and ranges\n- Custom operators\n- Single and multiple inheritance\n- STL data structures\n- Smart pointers with reference counting like ``std::shared_ptr``\n- Internal references with correct reference counting\n- C++ classes with virtual (and pure virtual) methods can be extended in Python\n\n## Goodies\nIn addition to the core functionality, pybind11 provides some extra goodies:\n\n- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an\n  implementation-agnostic interface.\n\n- It is possible to bind C++11 lambda functions with captured variables. The\n  lambda capture data is stored inside the resulting Python function object.\n\n- pybind11 uses C++11 move constructors and move assignment operators whenever\n  possible to efficiently transfer custom data types.\n\n- It's easy to expose the internal storage of custom data types through\n  Pythons' buffer protocols. This is handy e.g. for fast conversion between\n  C++ matrix classes like Eigen and NumPy without expensive copy operations.\n\n- pybind11 can automatically vectorize functions so that they are transparently\n  applied to all entries of one or more NumPy array arguments.\n\n- Python's slice-based access and assignment operations can be supported with\n  just a few lines of code.\n\n- Everything is contained in just a few header files; there is no need to link\n  against any additional libraries.\n\n- Binaries are generally smaller by a factor of at least 2 compared to\n  equivalent bindings generated by Boost.Python. A recent pybind11 conversion\n  of PyRosetta, an enormous Boost.Python binding project,\n  [reported](http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf) a binary\n  size reduction of **5.4x** and compile time reduction by **5.8x**.\n\n- Function signatures are precomputed at compile time (using ``constexpr``),\n  leading to smaller binaries.\n\n- With little extra effort, C++ types can be pickled and unpickled similar to\n  regular Python objects.\n\n## Supported compilers\n\n1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer)\n2. GCC 4.8 or newer\n3. Microsoft Visual Studio 2015 Update 3 or newer\n4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11 v2.0 and a [workaround](https://github.com/pybind/pybind11/issues/276))\n5. Cygwin/GCC (tested on 2.5.1)\n\n## About\n\nThis project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob).\nSignificant features and/or improvements to the code were contributed by\nJonas Adler,\nSylvain Corlay,\nTrent Houliston,\nAxel Huebl,\n@hulucc,\nSergey Lyskov\nJohan Mabille,\nTomasz Miąsko,\nDean Moldovan,\nBen Pritchard,\nJason Rhinelander,\nBoris Schäling,\nPim Schellart,\nIvan Smirnov, and\nPatrick Stewart.\n\n### License\n\npybind11 is provided under a BSD-style license that can be found in the\n``LICENSE`` file. By using, distributing, or contributing to this project,\nyou agree to the terms and conditions of this license.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/Doxyfile",
    "content": "PROJECT_NAME           = pybind11\nINPUT                  = ../include/pybind11/\nRECURSIVE              = YES\n\nGENERATE_HTML          = NO\nGENERATE_LATEX         = NO\nGENERATE_XML           = YES\nXML_OUTPUT             = .build/doxygenxml\nXML_PROGRAMLISTING     = YES\n\nMACRO_EXPANSION        = YES\nEXPAND_ONLY_PREDEF     = YES\nEXPAND_AS_DEFINED      = PYBIND11_RUNTIME_EXCEPTION\n\nALIASES                = \"rst=\\verbatim embed:rst\"\nALIASES               += \"endrst=\\endverbatim\"\n\nQUIET                  = YES\nWARNINGS               = YES\nWARN_IF_UNDOCUMENTED   = NO\n"
  },
  {
    "path": "src/third_party/pybind11/docs/_static/theme_overrides.css",
    "content": ".wy-table-responsive table td,\n.wy-table-responsive table th {\n    white-space: initial !important;\n}\n.rst-content table.docutils td {\n    vertical-align: top !important;\n}\ndiv[class^='highlight'] pre {\n    white-space: pre;\n    white-space: pre-wrap;\n}\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/chrono.rst",
    "content": "Chrono\n======\n\nWhen including the additional header file :file:`pybind11/chrono.h` conversions\nfrom C++11 chrono datatypes to python datetime objects are automatically enabled.\nThis header also enables conversions of python floats (often from sources such\nas ``time.monotonic()``, ``time.perf_counter()`` and ``time.process_time()``)\ninto durations.\n\nAn overview of clocks in C++11\n------------------------------\n\nA point of confusion when using these conversions is the differences between\nclocks provided in C++11. There are three clock types defined by the C++11\nstandard and users can define their own if needed. Each of these clocks have\ndifferent properties and when converting to and from python will give different\nresults.\n\nThe first clock defined by the standard is ``std::chrono::system_clock``. This\nclock measures the current date and time. However, this clock changes with to\nupdates to the operating system time. For example, if your time is synchronised\nwith a time server this clock will change. This makes this clock a poor choice\nfor timing purposes but good for measuring the wall time.\n\nThe second clock defined in the standard is ``std::chrono::steady_clock``.\nThis clock ticks at a steady rate and is never adjusted. This makes it excellent\nfor timing purposes, however the value in this clock does not correspond to the\ncurrent date and time. Often this clock will be the amount of time your system\nhas been on, although it does not have to be. This clock will never be the same\nclock as the system clock as the system clock can change but steady clocks\ncannot.\n\nThe third clock defined in the standard is ``std::chrono::high_resolution_clock``.\nThis clock is the clock that has the highest resolution out of the clocks in the\nsystem. It is normally a typedef to either the system clock or the steady clock\nbut can be its own independent clock. This is important as when using these\nconversions as the types you get in python for this clock might be different\ndepending on the system.\nIf it is a typedef of the system clock, python will get datetime objects, but if\nit is a different clock they will be timedelta objects.\n\nProvided conversions\n--------------------\n\n.. rubric:: C++ to Python\n\n- ``std::chrono::system_clock::time_point`` → ``datetime.datetime``\n    System clock times are converted to python datetime instances. They are\n    in the local timezone, but do not have any timezone information attached\n    to them (they are naive datetime objects).\n\n- ``std::chrono::duration`` → ``datetime.timedelta``\n    Durations are converted to timedeltas, any precision in the duration\n    greater than microseconds is lost by rounding towards zero.\n\n- ``std::chrono::[other_clocks]::time_point`` → ``datetime.timedelta``\n    Any clock time that is not the system clock is converted to a time delta.\n    This timedelta measures the time from the clocks epoch to now.\n\n.. rubric:: Python to C++\n\n- ``datetime.datetime`` → ``std::chrono::system_clock::time_point``\n    Date/time objects are converted into system clock timepoints. Any\n    timezone information is ignored and the type is treated as a naive\n    object.\n\n- ``datetime.timedelta`` → ``std::chrono::duration``\n    Time delta are converted into durations with microsecond precision.\n\n- ``datetime.timedelta`` → ``std::chrono::[other_clocks]::time_point``\n    Time deltas that are converted into clock timepoints are treated as\n    the amount of time from the start of the clocks epoch.\n\n- ``float`` → ``std::chrono::duration``\n    Floats that are passed to C++ as durations be interpreted as a number of\n    seconds. These will be converted to the duration using ``duration_cast``\n    from the float.\n\n- ``float`` → ``std::chrono::[other_clocks]::time_point``\n    Floats that are passed to C++ as time points will be interpreted as the\n    number of seconds from the start of the clocks epoch.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/custom.rst",
    "content": "Custom type casters\n===================\n\nIn very rare cases, applications may require custom type casters that cannot be\nexpressed using the abstractions provided by pybind11, thus requiring raw\nPython C API calls. This is fairly advanced usage and should only be pursued by\nexperts who are familiar with the intricacies of Python reference counting.\n\nThe following snippets demonstrate how this works for a very simple ``inty``\ntype that that should be convertible from Python types that provide a\n``__int__(self)`` method.\n\n.. code-block:: cpp\n\n    struct inty { long long_value; };\n\n    void print(inty s) {\n        std::cout << s.long_value << std::endl;\n    }\n\nThe following Python snippet demonstrates the intended usage from the Python side:\n\n.. code-block:: python\n\n    class A:\n        def __int__(self):\n            return 123\n\n    from example import print\n    print(A())\n\nTo register the necessary conversion routines, it is necessary to add\na partial overload to the ``pybind11::detail::type_caster<T>`` template.\nAlthough this is an implementation detail, adding partial overloads to this\ntype is explicitly allowed.\n\n.. code-block:: cpp\n\n    namespace pybind11 { namespace detail {\n        template <> struct type_caster<inty> {\n        public:\n            /**\n             * This macro establishes the name 'inty' in\n             * function signatures and declares a local variable\n             * 'value' of type inty\n             */\n            PYBIND11_TYPE_CASTER(inty, _(\"inty\"));\n\n            /**\n             * Conversion part 1 (Python->C++): convert a PyObject into a inty\n             * instance or return false upon failure. The second argument\n             * indicates whether implicit conversions should be applied.\n             */\n            bool load(handle src, bool) {\n                /* Extract PyObject from handle */\n                PyObject *source = src.ptr();\n                /* Try converting into a Python integer value */\n                PyObject *tmp = PyNumber_Long(source);\n                if (!tmp)\n                    return false;\n                /* Now try to convert into a C++ int */\n                value.long_value = PyLong_AsLong(tmp);\n                Py_DECREF(tmp);\n                /* Ensure return code was OK (to avoid out-of-range errors etc) */\n                return !(value.long_value == -1 && !PyErr_Occurred());\n            }\n\n            /**\n             * Conversion part 2 (C++ -> Python): convert an inty instance into\n             * a Python object. The second and third arguments are used to\n             * indicate the return value policy and parent object (for\n             * ``return_value_policy::reference_internal``) and are generally\n             * ignored by implicit casters.\n             */\n            static handle cast(inty src, return_value_policy /* policy */, handle /* parent */) {\n                return PyLong_FromLong(src.long_value);\n            }\n        };\n    }} // namespace pybind11::detail\n\n.. note::\n\n    A ``type_caster<T>`` defined with ``PYBIND11_TYPE_CASTER(T, ...)`` requires\n    that ``T`` is default-constructible (``value`` is first default constructed\n    and then ``load()`` assigns to it).\n\n.. warning::\n\n    When using custom type casters, it's important to declare them consistently\n    in every compilation unit of the Python extension module. Otherwise,\n    undefined behavior can ensue.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/eigen.rst",
    "content": "Eigen\n#####\n\n`Eigen <http://eigen.tuxfamily.org>`_ is C++ header-based library for dense and\nsparse linear algebra. Due to its popularity and widespread adoption, pybind11\nprovides transparent conversion and limited mapping support between Eigen and\nScientific Python linear algebra data types.\n\nTo enable the built-in Eigen support you must include the optional header file\n:file:`pybind11/eigen.h`.\n\nPass-by-value\n=============\n\nWhen binding a function with ordinary Eigen dense object arguments (for\nexample, ``Eigen::MatrixXd``), pybind11 will accept any input value that is\nalready (or convertible to) a ``numpy.ndarray`` with dimensions compatible with\nthe Eigen type, copy its values into a temporary Eigen variable of the\nappropriate type, then call the function with this temporary variable.\n\nSparse matrices are similarly copied to or from\n``scipy.sparse.csr_matrix``/``scipy.sparse.csc_matrix`` objects.\n\nPass-by-reference\n=================\n\nOne major limitation of the above is that every data conversion implicitly\ninvolves a copy, which can be both expensive (for large matrices) and disallows\nbinding functions that change their (Matrix) arguments.  Pybind11 allows you to\nwork around this by using Eigen's ``Eigen::Ref<MatrixType>`` class much as you\nwould when writing a function taking a generic type in Eigen itself (subject to\nsome limitations discussed below).\n\nWhen calling a bound function accepting a ``Eigen::Ref<const MatrixType>``\ntype, pybind11 will attempt to avoid copying by using an ``Eigen::Map`` object\nthat maps into the source ``numpy.ndarray`` data: this requires both that the\ndata types are the same (e.g. ``dtype='float64'`` and ``MatrixType::Scalar`` is\n``double``); and that the storage is layout compatible.  The latter limitation\nis discussed in detail in the section below, and requires careful\nconsideration: by default, numpy matrices and eigen matrices are *not* storage\ncompatible.\n\nIf the numpy matrix cannot be used as is (either because its types differ, e.g.\npassing an array of integers to an Eigen parameter requiring doubles, or\nbecause the storage is incompatible), pybind11 makes a temporary copy and\npasses the copy instead.\n\nWhen a bound function parameter is instead ``Eigen::Ref<MatrixType>`` (note the\nlack of ``const``), pybind11 will only allow the function to be called if it\ncan be mapped *and* if the numpy array is writeable (that is\n``a.flags.writeable`` is true).  Any access (including modification) made to\nthe passed variable will be transparently carried out directly on the\n``numpy.ndarray``.\n\nThis means you can can write code such as the following and have it work as\nexpected:\n\n.. code-block:: cpp\n\n    void scale_by_2(Eigen::Ref<Eigen::VectorXd> v) {\n        v *= 2;\n    }\n\nNote, however, that you will likely run into limitations due to numpy and\nEigen's difference default storage order for data; see the below section on\n:ref:`storage_orders` for details on how to bind code that won't run into such\nlimitations.\n\n.. note::\n\n    Passing by reference is not supported for sparse types.\n\nReturning values to Python\n==========================\n\nWhen returning an ordinary dense Eigen matrix type to numpy (e.g.\n``Eigen::MatrixXd`` or ``Eigen::RowVectorXf``) pybind11 keeps the matrix and\nreturns a numpy array that directly references the Eigen matrix: no copy of the\ndata is performed.  The numpy array will have ``array.flags.owndata`` set to\n``False`` to indicate that it does not own the data, and the lifetime of the\nstored Eigen matrix will be tied to the returned ``array``.\n\nIf you bind a function with a non-reference, ``const`` return type (e.g.\n``const Eigen::MatrixXd``), the same thing happens except that pybind11 also\nsets the numpy array's ``writeable`` flag to false.\n\nIf you return an lvalue reference or pointer, the usual pybind11 rules apply,\nas dictated by the binding function's return value policy (see the\ndocumentation on :ref:`return_value_policies` for full details).  That means,\nwithout an explicit return value policy, lvalue references will be copied and\npointers will be managed by pybind11.  In order to avoid copying, you should\nexplicitly specify an appropriate return value policy, as in the following\nexample:\n\n.. code-block:: cpp\n\n    class MyClass {\n        Eigen::MatrixXd big_mat = Eigen::MatrixXd::Zero(10000, 10000);\n    public:\n        Eigen::MatrixXd &getMatrix() { return big_mat; }\n        const Eigen::MatrixXd &viewMatrix() { return big_mat; }\n    };\n\n    // Later, in binding code:\n    py::class_<MyClass>(m, \"MyClass\")\n        .def(py::init<>())\n        .def(\"copy_matrix\", &MyClass::getMatrix) // Makes a copy!\n        .def(\"get_matrix\", &MyClass::getMatrix, py::return_value_policy::reference_internal)\n        .def(\"view_matrix\", &MyClass::viewMatrix, py::return_value_policy::reference_internal)\n        ;\n\n.. code-block:: python\n\n    a = MyClass()\n    m = a.get_matrix()   # flags.writeable = True,  flags.owndata = False\n    v = a.view_matrix()  # flags.writeable = False, flags.owndata = False\n    c = a.copy_matrix()  # flags.writeable = True,  flags.owndata = True\n    # m[5,6] and v[5,6] refer to the same element, c[5,6] does not.\n\nNote in this example that ``py::return_value_policy::reference_internal`` is\nused to tie the life of the MyClass object to the life of the returned arrays.\n\nYou may also return an ``Eigen::Ref``, ``Eigen::Map`` or other map-like Eigen\nobject (for example, the return value of ``matrix.block()`` and related\nmethods) that map into a dense Eigen type.  When doing so, the default\nbehaviour of pybind11 is to simply reference the returned data: you must take\ncare to ensure that this data remains valid!  You may ask pybind11 to\nexplicitly *copy* such a return value by using the\n``py::return_value_policy::copy`` policy when binding the function.  You may\nalso use ``py::return_value_policy::reference_internal`` or a\n``py::keep_alive`` to ensure the data stays valid as long as the returned numpy\narray does.\n\nWhen returning such a reference of map, pybind11 additionally respects the\nreadonly-status of the returned value, marking the numpy array as non-writeable\nif the reference or map was itself read-only.\n\n.. note::\n\n    Sparse types are always copied when returned.\n\n.. _storage_orders:\n\nStorage orders\n==============\n\nPassing arguments via ``Eigen::Ref`` has some limitations that you must be\naware of in order to effectively pass matrices by reference.  First and\nforemost is that the default ``Eigen::Ref<MatrixType>`` class requires\ncontiguous storage along columns (for column-major types, the default in Eigen)\nor rows if ``MatrixType`` is specifically an ``Eigen::RowMajor`` storage type.\nThe former, Eigen's default, is incompatible with ``numpy``'s default row-major\nstorage, and so you will not be able to pass numpy arrays to Eigen by reference\nwithout making one of two changes.\n\n(Note that this does not apply to vectors (or column or row matrices): for such\ntypes the \"row-major\" and \"column-major\" distinction is meaningless).\n\nThe first approach is to change the use of ``Eigen::Ref<MatrixType>`` to the\nmore general ``Eigen::Ref<MatrixType, 0, Eigen::Stride<Eigen::Dynamic,\nEigen::Dynamic>>`` (or similar type with a fully dynamic stride type in the\nthird template argument).  Since this is a rather cumbersome type, pybind11\nprovides a ``py::EigenDRef<MatrixType>`` type alias for your convenience (along\nwith EigenDMap for the equivalent Map, and EigenDStride for just the stride\ntype).\n\nThis type allows Eigen to map into any arbitrary storage order.  This is not\nthe default in Eigen for performance reasons: contiguous storage allows\nvectorization that cannot be done when storage is not known to be contiguous at\ncompile time.  The default ``Eigen::Ref`` stride type allows non-contiguous\nstorage along the outer dimension (that is, the rows of a column-major matrix\nor columns of a row-major matrix), but not along the inner dimension.\n\nThis type, however, has the added benefit of also being able to map numpy array\nslices.  For example, the following (contrived) example uses Eigen with a numpy\nslice to multiply by 2 all coefficients that are both on even rows (0, 2, 4,\n...) and in columns 2, 5, or 8:\n\n.. code-block:: cpp\n\n    m.def(\"scale\", [](py::EigenDRef<Eigen::MatrixXd> m, double c) { m *= c; });\n\n.. code-block:: python\n\n    # a = np.array(...)\n    scale_by_2(myarray[0::2, 2:9:3])\n\nThe second approach to avoid copying is more intrusive: rearranging the\nunderlying data types to not run into the non-contiguous storage problem in the\nfirst place.  In particular, that means using matrices with ``Eigen::RowMajor``\nstorage, where appropriate, such as:\n\n.. code-block:: cpp\n\n    using RowMatrixXd = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;\n    // Use RowMatrixXd instead of MatrixXd\n\nNow bound functions accepting ``Eigen::Ref<RowMatrixXd>`` arguments will be\ncallable with numpy's (default) arrays without involving a copying.\n\nYou can, alternatively, change the storage order that numpy arrays use by\nadding the ``order='F'`` option when creating an array:\n\n.. code-block:: python\n\n    myarray = np.array(source, order='F')\n\nSuch an object will be passable to a bound function accepting an\n``Eigen::Ref<MatrixXd>`` (or similar column-major Eigen type).\n\nOne major caveat with this approach, however, is that it is not entirely as\neasy as simply flipping all Eigen or numpy usage from one to the other: some\noperations may alter the storage order of a numpy array.  For example, ``a2 =\narray.transpose()`` results in ``a2`` being a view of ``array`` that references\nthe same data, but in the opposite storage order!\n\nWhile this approach allows fully optimized vectorized calculations in Eigen, it\ncannot be used with array slices, unlike the first approach.\n\nWhen *returning* a matrix to Python (either a regular matrix, a reference via\n``Eigen::Ref<>``, or a map/block into a matrix), no special storage\nconsideration is required: the created numpy array will have the required\nstride that allows numpy to properly interpret the array, whatever its storage\norder.\n\nFailing rather than copying\n===========================\n\nThe default behaviour when binding ``Eigen::Ref<const MatrixType>`` eigen\nreferences is to copy matrix values when passed a numpy array that does not\nconform to the element type of ``MatrixType`` or does not have a compatible\nstride layout.  If you want to explicitly avoid copying in such a case, you\nshould bind arguments using the ``py::arg().noconvert()`` annotation (as\ndescribed in the :ref:`nonconverting_arguments` documentation).\n\nThe following example shows an example of arguments that don't allow data\ncopying to take place:\n\n.. code-block:: cpp\n\n    // The method and function to be bound:\n    class MyClass {\n        // ...\n        double some_method(const Eigen::Ref<const MatrixXd> &matrix) { /* ... */ }\n    };\n    float some_function(const Eigen::Ref<const MatrixXf> &big,\n                        const Eigen::Ref<const MatrixXf> &small) {\n        // ...\n    }\n\n    // The associated binding code:\n    using namespace pybind11::literals; // for \"arg\"_a\n    py::class_<MyClass>(m, \"MyClass\")\n        // ... other class definitions\n        .def(\"some_method\", &MyClass::some_method, py::arg().noconvert());\n\n    m.def(\"some_function\", &some_function,\n        \"big\"_a.noconvert(), // <- Don't allow copying for this arg\n        \"small\"_a            // <- This one can be copied if needed\n    );\n\nWith the above binding code, attempting to call the the ``some_method(m)``\nmethod on a ``MyClass`` object, or attempting to call ``some_function(m, m2)``\nwill raise a ``RuntimeError`` rather than making a temporary copy of the array.\nIt will, however, allow the ``m2`` argument to be copied into a temporary if\nnecessary.\n\nNote that explicitly specifying ``.noconvert()`` is not required for *mutable*\nEigen references (e.g. ``Eigen::Ref<MatrixXd>`` without ``const`` on the\n``MatrixXd``): mutable references will never be called with a temporary copy.\n\nVectors versus column/row matrices\n==================================\n\nEigen and numpy have fundamentally different notions of a vector.  In Eigen, a\nvector is simply a matrix with the number of columns or rows set to 1 at\ncompile time (for a column vector or row vector, respectively).  Numpy, in\ncontrast, has comparable 2-dimensional 1xN and Nx1 arrays, but *also* has\n1-dimensional arrays of size N.\n\nWhen passing a 2-dimensional 1xN or Nx1 array to Eigen, the Eigen type must\nhave matching dimensions: That is, you cannot pass a 2-dimensional Nx1 numpy\narray to an Eigen value expecting a row vector, or a 1xN numpy array as a\ncolumn vector argument.\n\nOn the other hand, pybind11 allows you to pass 1-dimensional arrays of length N\nas Eigen parameters.  If the Eigen type can hold a column vector of length N it\nwill be passed as such a column vector.  If not, but the Eigen type constraints\nwill accept a row vector, it will be passed as a row vector.  (The column\nvector takes precedence when both are supported, for example, when passing a\n1D numpy array to a MatrixXd argument).  Note that the type need not be\nexpicitly a vector: it is permitted to pass a 1D numpy array of size 5 to an\nEigen ``Matrix<double, Dynamic, 5>``: you would end up with a 1x5 Eigen matrix.\nPassing the same to an ``Eigen::MatrixXd`` would result in a 5x1 Eigen matrix.\n\nWhen returning an eigen vector to numpy, the conversion is ambiguous: a row\nvector of length 4 could be returned as either a 1D array of length 4, or as a\n2D array of size 1x4.  When encoutering such a situation, pybind11 compromises\nby considering the returned Eigen type: if it is a compile-time vector--that\nis, the type has either the number of rows or columns set to 1 at compile\ntime--pybind11 converts to a 1D numpy array when returning the value.  For\ninstances that are a vector only at run-time (e.g. ``MatrixXd``,\n``Matrix<float, Dynamic, 4>``), pybind11 returns the vector as a 2D array to\nnumpy.  If this isn't want you want, you can use ``array.reshape(...)`` to get\na view of the same data in the desired dimensions.\n\n.. seealso::\n\n    The file :file:`tests/test_eigen.cpp` contains a complete example that\n    shows how to pass Eigen sparse and dense data types in more detail.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/functional.rst",
    "content": "Functional\n##########\n\nThe following features must be enabled by including :file:`pybind11/functional.h`.\n\n\nCallbacks and passing anonymous functions\n=========================================\n\nThe C++11 standard brought lambda functions and the generic polymorphic\nfunction wrapper ``std::function<>`` to the C++ programming language, which\nenable powerful new ways of working with functions. Lambda functions come in\ntwo flavors: stateless lambda function resemble classic function pointers that\nlink to an anonymous piece of code, while stateful lambda functions\nadditionally depend on captured variables that are stored in an anonymous\n*lambda closure object*.\n\nHere is a simple example of a C++ function that takes an arbitrary function\n(stateful or stateless) with signature ``int -> int`` as an argument and runs\nit with the value 10.\n\n.. code-block:: cpp\n\n    int func_arg(const std::function<int(int)> &f) {\n        return f(10);\n    }\n\nThe example below is more involved: it takes a function of signature ``int -> int``\nand returns another function of the same kind. The return value is a stateful\nlambda function, which stores the value ``f`` in the capture object and adds 1 to\nits return value upon execution.\n\n.. code-block:: cpp\n\n    std::function<int(int)> func_ret(const std::function<int(int)> &f) {\n        return [f](int i) {\n            return f(i) + 1;\n        };\n    }\n\nThis example demonstrates using python named parameters in C++ callbacks which\nrequires using ``py::cpp_function`` as a wrapper. Usage is similar to defining\nmethods of classes:\n\n.. code-block:: cpp\n\n    py::cpp_function func_cpp() {\n        return py::cpp_function([](int i) { return i+1; },\n           py::arg(\"number\"));\n    }\n\nAfter including the extra header file :file:`pybind11/functional.h`, it is almost\ntrivial to generate binding code for all of these functions.\n\n.. code-block:: cpp\n\n    #include <pybind11/functional.h>\n\n    PYBIND11_MODULE(example, m) {\n        m.def(\"func_arg\", &func_arg);\n        m.def(\"func_ret\", &func_ret);\n        m.def(\"func_cpp\", &func_cpp);\n    }\n\nThe following interactive session shows how to call them from Python.\n\n.. code-block:: pycon\n\n    $ python\n    >>> import example\n    >>> def square(i):\n    ...     return i * i\n    ...\n    >>> example.func_arg(square)\n    100L\n    >>> square_plus_1 = example.func_ret(square)\n    >>> square_plus_1(4)\n    17L\n    >>> plus_1 = func_cpp()\n    >>> plus_1(number=43)\n    44L\n\n.. warning::\n\n    Keep in mind that passing a function from C++ to Python (or vice versa)\n    will instantiate a piece of wrapper code that translates function\n    invocations between the two languages. Naturally, this translation\n    increases the computational cost of each function call somewhat. A\n    problematic situation can arise when a function is copied back and forth\n    between Python and C++ many times in a row, in which case the underlying\n    wrappers will accumulate correspondingly. The resulting long sequence of\n    C++ -> Python -> C++ -> ... roundtrips can significantly decrease\n    performance.\n\n    There is one exception: pybind11 detects case where a stateless function\n    (i.e. a function pointer or a lambda function without captured variables)\n    is passed as an argument to another C++ function exposed in Python. In this\n    case, there is no overhead. Pybind11 will extract the underlying C++\n    function pointer from the wrapped function to sidestep a potential C++ ->\n    Python -> C++ roundtrip. This is demonstrated in :file:`tests/test_callbacks.cpp`.\n\n.. note::\n\n    This functionality is very useful when generating bindings for callbacks in\n    C++ libraries (e.g. GUI libraries, asynchronous networking libraries, etc.).\n\n    The file :file:`tests/test_callbacks.cpp` contains a complete example\n    that demonstrates how to work with callbacks and anonymous functions in\n    more detail.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/index.rst",
    "content": "Type conversions\n################\n\nApart from enabling cross-language function calls, a fundamental problem\nthat a binding tool like pybind11 must address is to provide access to\nnative Python types in C++ and vice versa. There are three fundamentally\ndifferent ways to do this—which approach is preferable for a particular type\ndepends on the situation at hand.\n\n1. Use a native C++ type everywhere. In this case, the type must be wrapped\n   using pybind11-generated bindings so that Python can interact with it.\n\n2. Use a native Python type everywhere. It will need to be wrapped so that\n   C++ functions can interact with it.\n\n3. Use a native C++ type on the C++ side and a native Python type on the\n   Python side. pybind11 refers to this as a *type conversion*.\n\n   Type conversions are the most \"natural\" option in the sense that native\n   (non-wrapped) types are used everywhere. The main downside is that a copy\n   of the data must be made on every Python ↔ C++ transition: this is\n   needed since the C++ and Python versions of the same type generally won't\n   have the same memory layout.\n\n   pybind11 can perform many kinds of conversions automatically. An overview\n   is provided in the table \":ref:`conversion_table`\".\n\nThe following subsections discuss the differences between these options in more\ndetail. The main focus in this section is on type conversions, which represent\nthe last case of the above list.\n\n.. toctree::\n   :maxdepth: 1\n\n   overview\n   strings\n   stl\n   functional\n   chrono\n   eigen\n   custom\n\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/overview.rst",
    "content": "Overview\n########\n\n.. rubric:: 1. Native type in C++, wrapper in Python\n\nExposing a custom C++ type using :class:`py::class_` was covered in detail\nin the :doc:`/classes` section. There, the underlying data structure is\nalways the original C++ class while the :class:`py::class_` wrapper provides\na Python interface. Internally, when an object like this is sent from C++ to\nPython, pybind11 will just add the outer wrapper layer over the native C++\nobject. Getting it back from Python is just a matter of peeling off the\nwrapper.\n\n.. rubric:: 2. Wrapper in C++, native type in Python\n\nThis is the exact opposite situation. Now, we have a type which is native to\nPython, like a ``tuple`` or a ``list``. One way to get this data into C++ is\nwith the :class:`py::object` family of wrappers. These are explained in more\ndetail in the :doc:`/advanced/pycpp/object` section. We'll just give a quick\nexample here:\n\n.. code-block:: cpp\n\n    void print_list(py::list my_list) {\n        for (auto item : my_list)\n            std::cout << item << \" \";\n    }\n\n.. code-block:: pycon\n\n    >>> print_list([1, 2, 3])\n    1 2 3\n\nThe Python ``list`` is not converted in any way -- it's just wrapped in a C++\n:class:`py::list` class. At its core it's still a Python object. Copying a\n:class:`py::list` will do the usual reference-counting like in Python.\nReturning the object to Python will just remove the thin wrapper.\n\n.. rubric:: 3. Converting between native C++ and Python types\n\nIn the previous two cases we had a native type in one language and a wrapper in\nthe other. Now, we have native types on both sides and we convert between them.\n\n.. code-block:: cpp\n\n    void print_vector(const std::vector<int> &v) {\n        for (auto item : v)\n            std::cout << item << \"\\n\";\n    }\n\n.. code-block:: pycon\n\n    >>> print_vector([1, 2, 3])\n    1 2 3\n\nIn this case, pybind11 will construct a new ``std::vector<int>`` and copy each\nelement from the Python ``list``. The newly constructed object will be passed\nto ``print_vector``. The same thing happens in the other direction: a new\n``list`` is made to match the value returned from C++.\n\nLots of these conversions are supported out of the box, as shown in the table\nbelow. They are very convenient, but keep in mind that these conversions are\nfundamentally based on copying data. This is perfectly fine for small immutable\ntypes but it may become quite expensive for large data structures. This can be\navoided by overriding the automatic conversion with a custom wrapper (i.e. the\nabove-mentioned approach 1). This requires some manual effort and more details\nare available in the :ref:`opaque` section.\n\n.. _conversion_table:\n\nList of all builtin conversions\n-------------------------------\n\nThe following basic data types are supported out of the box (some may require\nan additional extension header to be included). To pass other data structures\nas arguments and return values, refer to the section on binding :ref:`classes`.\n\n+------------------------------------+---------------------------+-------------------------------+\n|  Data type                         |  Description              | Header file                   |\n+====================================+===========================+===============================+\n| ``int8_t``, ``uint8_t``            | 8-bit integers            | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``int16_t``, ``uint16_t``          | 16-bit integers           | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``int32_t``, ``uint32_t``          | 32-bit integers           | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``int64_t``, ``uint64_t``          | 64-bit integers           | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``ssize_t``, ``size_t``            | Platform-dependent size   | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``float``, ``double``              | Floating point types      | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``bool``                           | Two-state Boolean type    | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``char``                           | Character literal         | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``char16_t``                       | UTF-16 character literal  | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``char32_t``                       | UTF-32 character literal  | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``wchar_t``                        | Wide character literal    | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``const char *``                   | UTF-8 string literal      | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``const char16_t *``               | UTF-16 string literal     | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``const char32_t *``               | UTF-32 string literal     | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``const wchar_t *``                | Wide string literal       | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::string``                    | STL dynamic UTF-8 string  | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::u16string``                 | STL dynamic UTF-16 string | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::u32string``                 | STL dynamic UTF-32 string | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::wstring``                   | STL dynamic wide string   | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::string_view``,              | STL C++17 string views    | :file:`pybind11/pybind11.h`   |\n| ``std::u16string_view``, etc.      |                           |                               |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::pair<T1, T2>``              | Pair of two custom types  | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::tuple<...>``                | Arbitrary tuple of types  | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::reference_wrapper<...>``    | Reference type wrapper    | :file:`pybind11/pybind11.h`   |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::complex<T>``                | Complex numbers           | :file:`pybind11/complex.h`    |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::array<T, Size>``            | STL static array          | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::vector<T>``                 | STL dynamic array         | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::deque<T>``                  | STL double-ended queue    | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::valarray<T>``               | STL value array           | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::list<T>``                   | STL linked list           | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::map<T1, T2>``               | STL ordered map           | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::unordered_map<T1, T2>``     | STL unordered map         | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::set<T>``                    | STL ordered set           | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::unordered_set<T>``          | STL unordered set         | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::optional<T>``               | STL optional type (C++17) | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::experimental::optional<T>`` | STL optional type (exp.)  | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::variant<...>``              | Type-safe union (C++17)   | :file:`pybind11/stl.h`        |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::function<...>``             | STL polymorphic function  | :file:`pybind11/functional.h` |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::chrono::duration<...>``     | STL time duration         | :file:`pybind11/chrono.h`     |\n+------------------------------------+---------------------------+-------------------------------+\n| ``std::chrono::time_point<...>``   | STL date/time             | :file:`pybind11/chrono.h`     |\n+------------------------------------+---------------------------+-------------------------------+\n| ``Eigen::Matrix<...>``             | Eigen: dense matrix       | :file:`pybind11/eigen.h`      |\n+------------------------------------+---------------------------+-------------------------------+\n| ``Eigen::Map<...>``                | Eigen: mapped memory      | :file:`pybind11/eigen.h`      |\n+------------------------------------+---------------------------+-------------------------------+\n| ``Eigen::SparseMatrix<...>``       | Eigen: sparse matrix      | :file:`pybind11/eigen.h`      |\n+------------------------------------+---------------------------+-------------------------------+\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/stl.rst",
    "content": "STL containers\n##############\n\nAutomatic conversion\n====================\n\nWhen including the additional header file :file:`pybind11/stl.h`, conversions\nbetween ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``,\n``std::set<>``/``std::unordered_set<>``, and\n``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and\n``dict`` data structures are automatically enabled. The types ``std::pair<>``\nand ``std::tuple<>`` are already supported out of the box with just the core\n:file:`pybind11/pybind11.h` header.\n\nThe major downside of these implicit conversions is that containers must be\nconverted (i.e. copied) on every Python->C++ and C++->Python transition, which\ncan have implications on the program semantics and performance. Please read the\nnext sections for more details and alternative approaches that avoid this.\n\n.. note::\n\n    Arbitrary nesting of any of these types is possible.\n\n.. seealso::\n\n    The file :file:`tests/test_stl.cpp` contains a complete\n    example that demonstrates how to pass STL data types in more detail.\n\n.. _cpp17_container_casters:\n\nC++17 library containers\n========================\n\nThe :file:`pybind11/stl.h` header also includes support for ``std::optional<>``\nand ``std::variant<>``. These require a C++17 compiler and standard library.\nIn C++14 mode, ``std::experimental::optional<>`` is supported if available.\n\nVarious versions of these containers also exist for C++11 (e.g. in Boost).\npybind11 provides an easy way to specialize the ``type_caster`` for such\ntypes:\n\n.. code-block:: cpp\n\n    // `boost::optional` as an example -- can be any `std::optional`-like container\n    namespace pybind11 { namespace detail {\n        template <typename T>\n        struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};\n    }}\n\nThe above should be placed in a header file and included in all translation units\nwhere automatic conversion is needed. Similarly, a specialization can be provided\nfor custom variant types:\n\n.. code-block:: cpp\n\n    // `boost::variant` as an example -- can be any `std::variant`-like container\n    namespace pybind11 { namespace detail {\n        template <typename... Ts>\n        struct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};\n\n        // Specifies the function used to visit the variant -- `apply_visitor` instead of `visit`\n        template <>\n        struct visit_helper<boost::variant> {\n            template <typename... Args>\n            static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {\n                return boost::apply_visitor(args...);\n            }\n        };\n    }} // namespace pybind11::detail\n\nThe ``visit_helper`` specialization is not required if your ``name::variant`` provides\na ``name::visit()`` function. For any other function name, the specialization must be\nincluded to tell pybind11 how to visit the variant.\n\n.. note::\n\n    pybind11 only supports the modern implementation of ``boost::variant``\n    which makes use of variadic templates. This requires Boost 1.56 or newer.\n    Additionally, on Windows, MSVC 2017 is required because ``boost::variant``\n    falls back to the old non-variadic implementation on MSVC 2015.\n\n.. _opaque:\n\nMaking opaque types\n===================\n\npybind11 heavily relies on a template matching mechanism to convert parameters\nand return values that are constructed from STL data types such as vectors,\nlinked lists, hash tables, etc. This even works in a recursive manner, for\ninstance to deal with lists of hash maps of pairs of elementary and custom\ntypes, etc.\n\nHowever, a fundamental limitation of this approach is that internal conversions\nbetween Python and C++ types involve a copy operation that prevents\npass-by-reference semantics. What does this mean?\n\nSuppose we bind the following function\n\n.. code-block:: cpp\n\n    void append_1(std::vector<int> &v) {\n       v.push_back(1);\n    }\n\nand call it from Python, the following happens:\n\n.. code-block:: pycon\n\n   >>> v = [5, 6]\n   >>> append_1(v)\n   >>> print(v)\n   [5, 6]\n\nAs you can see, when passing STL data structures by reference, modifications\nare not propagated back the Python side. A similar situation arises when\nexposing STL data structures using the ``def_readwrite`` or ``def_readonly``\nfunctions:\n\n.. code-block:: cpp\n\n    /* ... definition ... */\n\n    class MyClass {\n        std::vector<int> contents;\n    };\n\n    /* ... binding code ... */\n\n    py::class_<MyClass>(m, \"MyClass\")\n        .def(py::init<>())\n        .def_readwrite(\"contents\", &MyClass::contents);\n\nIn this case, properties can be read and written in their entirety. However, an\n``append`` operation involving such a list type has no effect:\n\n.. code-block:: pycon\n\n   >>> m = MyClass()\n   >>> m.contents = [5, 6]\n   >>> print(m.contents)\n   [5, 6]\n   >>> m.contents.append(7)\n   >>> print(m.contents)\n   [5, 6]\n\nFinally, the involved copy operations can be costly when dealing with very\nlarge lists. To deal with all of the above situations, pybind11 provides a\nmacro named ``PYBIND11_MAKE_OPAQUE(T)`` that disables the template-based\nconversion machinery of types, thus rendering them *opaque*. The contents of\nopaque objects are never inspected or extracted, hence they *can* be passed by\nreference. For instance, to turn ``std::vector<int>`` into an opaque type, add\nthe declaration\n\n.. code-block:: cpp\n\n    PYBIND11_MAKE_OPAQUE(std::vector<int>);\n\nbefore any binding code (e.g. invocations to ``class_::def()``, etc.). This\nmacro must be specified at the top level (and outside of any namespaces), since\nit instantiates a partial template overload. If your binding code consists of\nmultiple compilation units, it must be present in every file (typically via a\ncommon header) preceding any usage of ``std::vector<int>``. Opaque types must\nalso have a corresponding ``class_`` declaration to associate them with a name\nin Python, and to define a set of available operations, e.g.:\n\n.. code-block:: cpp\n\n    py::class_<std::vector<int>>(m, \"IntVector\")\n        .def(py::init<>())\n        .def(\"clear\", &std::vector<int>::clear)\n        .def(\"pop_back\", &std::vector<int>::pop_back)\n        .def(\"__len__\", [](const std::vector<int> &v) { return v.size(); })\n        .def(\"__iter__\", [](std::vector<int> &v) {\n           return py::make_iterator(v.begin(), v.end());\n        }, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */\n        // ....\n\n.. seealso::\n\n    The file :file:`tests/test_opaque_types.cpp` contains a complete\n    example that demonstrates how to create and expose opaque types using\n    pybind11 in more detail.\n\n.. _stl_bind:\n\nBinding STL containers\n======================\n\nThe ability to expose STL containers as native Python objects is a fairly\ncommon request, hence pybind11 also provides an optional header file named\n:file:`pybind11/stl_bind.h` that does exactly this. The mapped containers try\nto match the behavior of their native Python counterparts as much as possible.\n\nThe following example showcases usage of :file:`pybind11/stl_bind.h`:\n\n.. code-block:: cpp\n\n    // Don't forget this\n    #include <pybind11/stl_bind.h>\n\n    PYBIND11_MAKE_OPAQUE(std::vector<int>);\n    PYBIND11_MAKE_OPAQUE(std::map<std::string, double>);\n\n    // ...\n\n    // later in binding code:\n    py::bind_vector<std::vector<int>>(m, \"VectorInt\");\n    py::bind_map<std::map<std::string, double>>(m, \"MapStringDouble\");\n\nWhen binding STL containers pybind11 considers the types of the container's\nelements to decide whether the container should be confined to the local module\n(via the :ref:`module_local` feature).  If the container element types are\nanything other than already-bound custom types bound without\n``py::module_local()`` the container binding will have ``py::module_local()``\napplied.  This includes converting types such as numeric types, strings, Eigen\ntypes; and types that have not yet been bound at the time of the stl container\nbinding.  This module-local binding is designed to avoid potential conflicts\nbetween module bindings (for example, from two separate modules each attempting\nto bind ``std::vector<int>`` as a python type).\n\nIt is possible to override this behavior to force a definition to be either\nmodule-local or global.  To do so, you can pass the attributes\n``py::module_local()`` (to make the binding module-local) or\n``py::module_local(false)`` (to make the binding global) into the\n``py::bind_vector`` or ``py::bind_map`` arguments:\n\n.. code-block:: cpp\n\n    py::bind_vector<std::vector<int>>(m, \"VectorInt\", py::module_local(false));\n\nNote, however, that such a global binding would make it impossible to load this\nmodule at the same time as any other pybind module that also attempts to bind\nthe same container type (``std::vector<int>`` in the above example).\n\nSee :ref:`module_local` for more details on module-local bindings.\n\n.. seealso::\n\n    The file :file:`tests/test_stl_binders.cpp` shows how to use the\n    convenience STL container wrappers.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/cast/strings.rst",
    "content": "Strings, bytes and Unicode conversions\n######################################\n\n.. note::\n\n    This section discusses string handling in terms of Python 3 strings. For\n    Python 2.7, replace all occurrences of ``str`` with ``unicode`` and\n    ``bytes`` with ``str``.  Python 2.7 users may find it best to use ``from\n    __future__ import unicode_literals`` to avoid unintentionally using ``str``\n    instead of ``unicode``.\n\nPassing Python strings to C++\n=============================\n\nWhen a Python ``str`` is passed from Python to a C++ function that accepts\n``std::string`` or ``char *`` as arguments, pybind11 will encode the Python\nstring to UTF-8. All Python ``str`` can be encoded in UTF-8, so this operation\ndoes not fail.\n\nThe C++ language is encoding agnostic. It is the responsibility of the\nprogrammer to track encodings. It's often easiest to simply `use UTF-8\neverywhere <http://utf8everywhere.org/>`_.\n\n.. code-block:: c++\n\n    m.def(\"utf8_test\",\n        [](const std::string &s) {\n            cout << \"utf-8 is icing on the cake.\\n\";\n            cout << s;\n        }\n    );\n    m.def(\"utf8_charptr\",\n        [](const char *s) {\n            cout << \"My favorite food is\\n\";\n            cout << s;\n        }\n    );\n\n.. code-block:: python\n\n    >>> utf8_test('🎂')\n    utf-8 is icing on the cake.\n    🎂\n\n    >>> utf8_charptr('🍕')\n    My favorite food is\n    🍕\n\n.. note::\n\n    Some terminal emulators do not support UTF-8 or emoji fonts and may not\n    display the example above correctly.\n\nThe results are the same whether the C++ function accepts arguments by value or\nreference, and whether or not ``const`` is used.\n\nPassing bytes to C++\n--------------------\n\nA Python ``bytes`` object will be passed to C++ functions that accept\n``std::string`` or ``char*`` *without* conversion.  On Python 3, in order to\nmake a function *only* accept ``bytes`` (and not ``str``), declare it as taking\na ``py::bytes`` argument.\n\n\nReturning C++ strings to Python\n===============================\n\nWhen a C++ function returns a ``std::string`` or ``char*`` to a Python caller,\n**pybind11 will assume that the string is valid UTF-8** and will decode it to a\nnative Python ``str``, using the same API as Python uses to perform\n``bytes.decode('utf-8')``. If this implicit conversion fails, pybind11 will\nraise a ``UnicodeDecodeError``.\n\n.. code-block:: c++\n\n    m.def(\"std_string_return\",\n        []() {\n            return std::string(\"This string needs to be UTF-8 encoded\");\n        }\n    );\n\n.. code-block:: python\n\n    >>> isinstance(example.std_string_return(), str)\n    True\n\n\nBecause UTF-8 is inclusive of pure ASCII, there is never any issue with\nreturning a pure ASCII string to Python. If there is any possibility that the\nstring is not pure ASCII, it is necessary to ensure the encoding is valid\nUTF-8.\n\n.. warning::\n\n    Implicit conversion assumes that a returned ``char *`` is null-terminated.\n    If there is no null terminator a buffer overrun will occur.\n\nExplicit conversions\n--------------------\n\nIf some C++ code constructs a ``std::string`` that is not a UTF-8 string, one\ncan perform a explicit conversion and return a ``py::str`` object. Explicit\nconversion has the same overhead as implicit conversion.\n\n.. code-block:: c++\n\n    // This uses the Python C API to convert Latin-1 to Unicode\n    m.def(\"str_output\",\n        []() {\n            std::string s = \"Send your r\\xe9sum\\xe9 to Alice in HR\"; // Latin-1\n            py::str py_s = PyUnicode_DecodeLatin1(s.data(), s.length());\n            return py_s;\n        }\n    );\n\n.. code-block:: python\n\n    >>> str_output()\n    'Send your résumé to Alice in HR'\n\nThe `Python C API\n<https://docs.python.org/3/c-api/unicode.html#built-in-codecs>`_ provides\nseveral built-in codecs.\n\n\nOne could also use a third party encoding library such as libiconv to transcode\nto UTF-8.\n\nReturn C++ strings without conversion\n-------------------------------------\n\nIf the data in a C++ ``std::string`` does not represent text and should be\nreturned to Python as ``bytes``, then one can return the data as a\n``py::bytes`` object.\n\n.. code-block:: c++\n\n    m.def(\"return_bytes\",\n        []() {\n            std::string s(\"\\xba\\xd0\\xba\\xd0\");  // Not valid UTF-8\n            return py::bytes(s);  // Return the data without transcoding\n        }\n    );\n\n.. code-block:: python\n\n    >>> example.return_bytes()\n    b'\\xba\\xd0\\xba\\xd0'\n\n\nNote the asymmetry: pybind11 will convert ``bytes`` to ``std::string`` without\nencoding, but cannot convert ``std::string`` back to ``bytes`` implicitly.\n\n.. code-block:: c++\n\n    m.def(\"asymmetry\",\n        [](std::string s) {  // Accepts str or bytes from Python\n            return s;  // Looks harmless, but implicitly converts to str\n        }\n    );\n\n.. code-block:: python\n\n    >>> isinstance(example.asymmetry(b\"have some bytes\"), str)\n    True\n\n    >>> example.asymmetry(b\"\\xba\\xd0\\xba\\xd0\")  # invalid utf-8 as bytes\n    UnicodeDecodeError: 'utf-8' codec can't decode byte 0xba in position 0: invalid start byte\n\n\nWide character strings\n======================\n\nWhen a Python ``str`` is passed to a C++ function expecting ``std::wstring``,\n``wchar_t*``, ``std::u16string`` or ``std::u32string``, the ``str`` will be\nencoded to UTF-16 or UTF-32 depending on how the C++ compiler implements each\ntype, in the platform's native endianness. When strings of these types are\nreturned, they are assumed to contain valid UTF-16 or UTF-32, and will be\ndecoded to Python ``str``.\n\n.. code-block:: c++\n\n    #define UNICODE\n    #include <windows.h>\n\n    m.def(\"set_window_text\",\n        [](HWND hwnd, std::wstring s) {\n            // Call SetWindowText with null-terminated UTF-16 string\n            ::SetWindowText(hwnd, s.c_str());\n        }\n    );\n    m.def(\"get_window_text\",\n        [](HWND hwnd) {\n            const int buffer_size = ::GetWindowTextLength(hwnd) + 1;\n            auto buffer = std::make_unique< wchar_t[] >(buffer_size);\n\n            ::GetWindowText(hwnd, buffer.data(), buffer_size);\n\n            std::wstring text(buffer.get());\n\n            // wstring will be converted to Python str\n            return text;\n        }\n    );\n\n.. warning::\n\n    Wide character strings may not work as described on Python 2.7 or Python\n    3.3 compiled with ``--enable-unicode=ucs2``.\n\nStrings in multibyte encodings such as Shift-JIS must transcoded to a\nUTF-8/16/32 before being returned to Python.\n\n\nCharacter literals\n==================\n\nC++ functions that accept character literals as input will receive the first\ncharacter of a Python ``str`` as their input. If the string is longer than one\nUnicode character, trailing characters will be ignored.\n\nWhen a character literal is returned from C++ (such as a ``char`` or a\n``wchar_t``), it will be converted to a ``str`` that represents the single\ncharacter.\n\n.. code-block:: c++\n\n    m.def(\"pass_char\", [](char c) { return c; });\n    m.def(\"pass_wchar\", [](wchar_t w) { return w; });\n\n.. code-block:: python\n\n    >>> example.pass_char('A')\n    'A'\n\nWhile C++ will cast integers to character types (``char c = 0x65;``), pybind11\ndoes not convert Python integers to characters implicitly. The Python function\n``chr()`` can be used to convert integers to characters.\n\n.. code-block:: python\n\n    >>> example.pass_char(0x65)\n    TypeError\n\n    >>> example.pass_char(chr(0x65))\n    'A'\n\nIf the desire is to work with an 8-bit integer, use ``int8_t`` or ``uint8_t``\nas the argument type.\n\nGrapheme clusters\n-----------------\n\nA single grapheme may be represented by two or more Unicode characters. For\nexample 'é' is usually represented as U+00E9 but can also be expressed as the\ncombining character sequence U+0065 U+0301 (that is, the letter 'e' followed by\na combining acute accent). The combining character will be lost if the\ntwo-character sequence is passed as an argument, even though it renders as a\nsingle grapheme.\n\n.. code-block:: python\n\n    >>> example.pass_wchar('é')\n    'é'\n\n    >>> combining_e_acute = 'e' + '\\u0301'\n\n    >>> combining_e_acute\n    'é'\n\n    >>> combining_e_acute == 'é'\n    False\n\n    >>> example.pass_wchar(combining_e_acute)\n    'e'\n\nNormalizing combining characters before passing the character literal to C++\nmay resolve *some* of these issues:\n\n.. code-block:: python\n\n    >>> example.pass_wchar(unicodedata.normalize('NFC', combining_e_acute))\n    'é'\n\nIn some languages (Thai for example), there are `graphemes that cannot be\nexpressed as a single Unicode code point\n<http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries>`_, so there is\nno way to capture them in a C++ character type.\n\n\nC++17 string views\n==================\n\nC++17 string views are automatically supported when compiling in C++17 mode.\nThey follow the same rules for encoding and decoding as the corresponding STL\nstring type (for example, a ``std::u16string_view`` argument will be passed\nUTF-16-encoded data, and a returned ``std::string_view`` will be decoded as\nUTF-8).\n\nReferences\n==========\n\n* `The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!) <https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/>`_\n* `C++ - Using STL Strings at Win32 API Boundaries <https://msdn.microsoft.com/en-ca/magazine/mt238407.aspx>`_\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/classes.rst",
    "content": "Classes\n#######\n\nThis section presents advanced binding code for classes and it is assumed\nthat you are already familiar with the basics from :doc:`/classes`.\n\n.. _overriding_virtuals:\n\nOverriding virtual functions in Python\n======================================\n\nSuppose that a C++ class or interface has a virtual function that we'd like to\nto override from within Python (we'll focus on the class ``Animal``; ``Dog`` is\ngiven as a specific example of how one would do this with traditional C++\ncode).\n\n.. code-block:: cpp\n\n    class Animal {\n    public:\n        virtual ~Animal() { }\n        virtual std::string go(int n_times) = 0;\n    };\n\n    class Dog : public Animal {\n    public:\n        std::string go(int n_times) override {\n            std::string result;\n            for (int i=0; i<n_times; ++i)\n                result += \"woof! \";\n            return result;\n        }\n    };\n\nLet's also suppose that we are given a plain function which calls the\nfunction ``go()`` on an arbitrary ``Animal`` instance.\n\n.. code-block:: cpp\n\n    std::string call_go(Animal *animal) {\n        return animal->go(3);\n    }\n\nNormally, the binding code for these classes would look as follows:\n\n.. code-block:: cpp\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Animal>(m, \"Animal\")\n            .def(\"go\", &Animal::go);\n\n        py::class_<Dog, Animal>(m, \"Dog\")\n            .def(py::init<>());\n\n        m.def(\"call_go\", &call_go);\n    }\n\nHowever, these bindings are impossible to extend: ``Animal`` is not\nconstructible, and we clearly require some kind of \"trampoline\" that\nredirects virtual calls back to Python.\n\nDefining a new type of ``Animal`` from within Python is possible but requires a\nhelper class that is defined as follows:\n\n.. code-block:: cpp\n\n    class PyAnimal : public Animal {\n    public:\n        /* Inherit the constructors */\n        using Animal::Animal;\n\n        /* Trampoline (need one for each virtual function) */\n        std::string go(int n_times) override {\n            PYBIND11_OVERLOAD_PURE(\n                std::string, /* Return type */\n                Animal,      /* Parent class */\n                go,          /* Name of function in C++ (must match Python name) */\n                n_times      /* Argument(s) */\n            );\n        }\n    };\n\nThe macro :func:`PYBIND11_OVERLOAD_PURE` should be used for pure virtual\nfunctions, and :func:`PYBIND11_OVERLOAD` should be used for functions which have\na default implementation.  There are also two alternate macros\n:func:`PYBIND11_OVERLOAD_PURE_NAME` and :func:`PYBIND11_OVERLOAD_NAME` which\ntake a string-valued name argument between the *Parent class* and *Name of the\nfunction* slots, which defines the name of function in Python. This is required\nwhen the C++ and Python versions of the\nfunction have different names, e.g.  ``operator()`` vs ``__call__``.\n\nThe binding code also needs a few minor adaptations (highlighted):\n\n.. code-block:: cpp\n    :emphasize-lines: 2,3\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, \"Animal\")\n            .def(py::init<>())\n            .def(\"go\", &Animal::go);\n\n        py::class_<Dog, Animal>(m, \"Dog\")\n            .def(py::init<>());\n\n        m.def(\"call_go\", &call_go);\n    }\n\nImportantly, pybind11 is made aware of the trampoline helper class by\nspecifying it as an extra template argument to :class:`class_`. (This can also\nbe combined with other template arguments such as a custom holder type; the\norder of template types does not matter).  Following this, we are able to\ndefine a constructor as usual.\n\nBindings should be made against the actual class, not the trampoline helper class.\n\n.. code-block:: cpp\n    :emphasize-lines: 3\n\n    py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, \"Animal\");\n        .def(py::init<>())\n        .def(\"go\", &PyAnimal::go); /* <--- THIS IS WRONG, use &Animal::go */\n\nNote, however, that the above is sufficient for allowing python classes to\nextend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the\nnecessary steps required to providing proper overload support for inherited\nclasses.\n\nThe Python session below shows how to override ``Animal::go`` and invoke it via\na virtual method call.\n\n.. code-block:: pycon\n\n    >>> from example import *\n    >>> d = Dog()\n    >>> call_go(d)\n    u'woof! woof! woof! '\n    >>> class Cat(Animal):\n    ...     def go(self, n_times):\n    ...             return \"meow! \" * n_times\n    ...\n    >>> c = Cat()\n    >>> call_go(c)\n    u'meow! meow! meow! '\n\nIf you are defining a custom constructor in a derived Python class, you *must*\nensure that you explicitly call the bound C++ constructor using ``__init__``,\n*regardless* of whether it is a default constructor or not. Otherwise, the\nmemory for the C++ portion of the instance will be left uninitialized, which\nwill generally leave the C++ instance in an invalid state and cause undefined\nbehavior if the C++ instance is subsequently used.\n\nHere is an example:\n\n.. code-block:: python\n\n    class Dachschund(Dog):\n        def __init__(self, name):\n            Dog.__init__(self) # Without this, undefined behavior may occur if the C++ portions are referenced.\n            self.name = name\n        def bark(self):\n            return \"yap!\"\n\nNote that a direct ``__init__`` constructor *should be called*, and ``super()``\nshould not be used. For simple cases of linear inheritance, ``super()``\nmay work, but once you begin mixing Python and C++ multiple inheritance,\nthings will fall apart due to differences between Python's MRO and C++'s\nmechanisms.\n\nPlease take a look at the :ref:`macro_notes` before using this feature.\n\n.. note::\n\n    When the overridden type returns a reference or pointer to a type that\n    pybind11 converts from Python (for example, numeric values, std::string,\n    and other built-in value-converting types), there are some limitations to\n    be aware of:\n\n    - because in these cases there is no C++ variable to reference (the value\n      is stored in the referenced Python variable), pybind11 provides one in\n      the PYBIND11_OVERLOAD macros (when needed) with static storage duration.\n      Note that this means that invoking the overloaded method on *any*\n      instance will change the referenced value stored in *all* instances of\n      that type.\n\n    - Attempts to modify a non-const reference will not have the desired\n      effect: it will change only the static cache variable, but this change\n      will not propagate to underlying Python instance, and the change will be\n      replaced the next time the overload is invoked.\n\n.. seealso::\n\n    The file :file:`tests/test_virtual_functions.cpp` contains a complete\n    example that demonstrates how to override virtual functions using pybind11\n    in more detail.\n\n.. _virtual_and_inheritance:\n\nCombining virtual functions and inheritance\n===========================================\n\nWhen combining virtual methods with inheritance, you need to be sure to provide\nan override for each method for which you want to allow overrides from derived\npython classes.  For example, suppose we extend the above ``Animal``/``Dog``\nexample as follows:\n\n.. code-block:: cpp\n\n    class Animal {\n    public:\n        virtual std::string go(int n_times) = 0;\n        virtual std::string name() { return \"unknown\"; }\n    };\n    class Dog : public Animal {\n    public:\n        std::string go(int n_times) override {\n            std::string result;\n            for (int i=0; i<n_times; ++i)\n                result += bark() + \" \";\n            return result;\n        }\n        virtual std::string bark() { return \"woof!\"; }\n    };\n\nthen the trampoline class for ``Animal`` must, as described in the previous\nsection, override ``go()`` and ``name()``, but in order to allow python code to\ninherit properly from ``Dog``, we also need a trampoline class for ``Dog`` that\noverrides both the added ``bark()`` method *and* the ``go()`` and ``name()``\nmethods inherited from ``Animal`` (even though ``Dog`` doesn't directly\noverride the ``name()`` method):\n\n.. code-block:: cpp\n\n    class PyAnimal : public Animal {\n    public:\n        using Animal::Animal; // Inherit constructors\n        std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Animal, go, n_times); }\n        std::string name() override { PYBIND11_OVERLOAD(std::string, Animal, name, ); }\n    };\n    class PyDog : public Dog {\n    public:\n        using Dog::Dog; // Inherit constructors\n        std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Dog, go, n_times); }\n        std::string name() override { PYBIND11_OVERLOAD(std::string, Dog, name, ); }\n        std::string bark() override { PYBIND11_OVERLOAD(std::string, Dog, bark, ); }\n    };\n\n.. note::\n\n    Note the trailing commas in the ``PYBIND11_OVERLOAD`` calls to ``name()``\n    and ``bark()``. These are needed to portably implement a trampoline for a\n    function that does not take any arguments. For functions that take\n    a nonzero number of arguments, the trailing comma must be omitted.\n\nA registered class derived from a pybind11-registered class with virtual\nmethods requires a similar trampoline class, *even if* it doesn't explicitly\ndeclare or override any virtual methods itself:\n\n.. code-block:: cpp\n\n    class Husky : public Dog {};\n    class PyHusky : public Husky {\n    public:\n        using Husky::Husky; // Inherit constructors\n        std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Husky, go, n_times); }\n        std::string name() override { PYBIND11_OVERLOAD(std::string, Husky, name, ); }\n        std::string bark() override { PYBIND11_OVERLOAD(std::string, Husky, bark, ); }\n    };\n\nThere is, however, a technique that can be used to avoid this duplication\n(which can be especially helpful for a base class with several virtual\nmethods).  The technique involves using template trampoline classes, as\nfollows:\n\n.. code-block:: cpp\n\n    template <class AnimalBase = Animal> class PyAnimal : public AnimalBase {\n    public:\n        using AnimalBase::AnimalBase; // Inherit constructors\n        std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, AnimalBase, go, n_times); }\n        std::string name() override { PYBIND11_OVERLOAD(std::string, AnimalBase, name, ); }\n    };\n    template <class DogBase = Dog> class PyDog : public PyAnimal<DogBase> {\n    public:\n        using PyAnimal<DogBase>::PyAnimal; // Inherit constructors\n        // Override PyAnimal's pure virtual go() with a non-pure one:\n        std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, DogBase, go, n_times); }\n        std::string bark() override { PYBIND11_OVERLOAD(std::string, DogBase, bark, ); }\n    };\n\nThis technique has the advantage of requiring just one trampoline method to be\ndeclared per virtual method and pure virtual method override.  It does,\nhowever, require the compiler to generate at least as many methods (and\npossibly more, if both pure virtual and overridden pure virtual methods are\nexposed, as above).\n\nThe classes are then registered with pybind11 using:\n\n.. code-block:: cpp\n\n    py::class_<Animal, PyAnimal<>> animal(m, \"Animal\");\n    py::class_<Dog, PyDog<>> dog(m, \"Dog\");\n    py::class_<Husky, PyDog<Husky>> husky(m, \"Husky\");\n    // ... add animal, dog, husky definitions\n\nNote that ``Husky`` did not require a dedicated trampoline template class at\nall, since it neither declares any new virtual methods nor provides any pure\nvirtual method implementations.\n\nWith either the repeated-virtuals or templated trampoline methods in place, you\ncan now create a python class that inherits from ``Dog``:\n\n.. code-block:: python\n\n    class ShihTzu(Dog):\n        def bark(self):\n            return \"yip!\"\n\n.. seealso::\n\n    See the file :file:`tests/test_virtual_functions.cpp` for complete examples\n    using both the duplication and templated trampoline approaches.\n\n.. _extended_aliases:\n\nExtended trampoline class functionality\n=======================================\n\nThe trampoline classes described in the previous sections are, by default, only\ninitialized when needed.  More specifically, they are initialized when a python\nclass actually inherits from a registered type (instead of merely creating an\ninstance of the registered type), or when a registered constructor is only\nvalid for the trampoline class but not the registered class.  This is primarily\nfor performance reasons: when the trampoline class is not needed for anything\nexcept virtual method dispatching, not initializing the trampoline class\nimproves performance by avoiding needing to do a run-time check to see if the\ninheriting python instance has an overloaded method.\n\nSometimes, however, it is useful to always initialize a trampoline class as an\nintermediate class that does more than just handle virtual method dispatching.\nFor example, such a class might perform extra class initialization, extra\ndestruction operations, and might define new members and methods to enable a\nmore python-like interface to a class.\n\nIn order to tell pybind11 that it should *always* initialize the trampoline\nclass when creating new instances of a type, the class constructors should be\ndeclared using ``py::init_alias<Args, ...>()`` instead of the usual\n``py::init<Args, ...>()``.  This forces construction via the trampoline class,\nensuring member initialization and (eventual) destruction.\n\n.. seealso::\n\n    See the file :file:`tests/test_virtual_functions.cpp` for complete examples\n    showing both normal and forced trampoline instantiation.\n\n.. _custom_constructors:\n\nCustom constructors\n===================\n\nThe syntax for binding constructors was previously introduced, but it only\nworks when a constructor of the appropriate arguments actually exists on the\nC++ side.  To extend this to more general cases, pybind11 makes it possible\nto bind factory functions as constructors. For example, suppose you have a\nclass like this:\n\n.. code-block:: cpp\n\n    class Example {\n    private:\n        Example(int); // private constructor\n    public:\n        // Factory function:\n        static Example create(int a) { return Example(a); }\n    };\n\n    py::class_<Example>(m, \"Example\")\n        .def(py::init(&Example::create));\n\nWhile it is possible to create a straightforward binding of the static\n``create`` method, it may sometimes be preferable to expose it as a constructor\non the Python side. This can be accomplished by calling ``.def(py::init(...))``\nwith the function reference returning the new instance passed as an argument.\nIt is also possible to use this approach to bind a function returning a new\ninstance by raw pointer or by the holder (e.g. ``std::unique_ptr``).\n\nThe following example shows the different approaches:\n\n.. code-block:: cpp\n\n    class Example {\n    private:\n        Example(int); // private constructor\n    public:\n        // Factory function - returned by value:\n        static Example create(int a) { return Example(a); }\n\n        // These constructors are publicly callable:\n        Example(double);\n        Example(int, int);\n        Example(std::string);\n    };\n\n    py::class_<Example>(m, \"Example\")\n        // Bind the factory function as a constructor:\n        .def(py::init(&Example::create))\n        // Bind a lambda function returning a pointer wrapped in a holder:\n        .def(py::init([](std::string arg) {\n            return std::unique_ptr<Example>(new Example(arg));\n        }))\n        // Return a raw pointer:\n        .def(py::init([](int a, int b) { return new Example(a, b); }))\n        // You can mix the above with regular C++ constructor bindings as well:\n        .def(py::init<double>())\n        ;\n\nWhen the constructor is invoked from Python, pybind11 will call the factory\nfunction and store the resulting C++ instance in the Python instance.\n\nWhen combining factory functions constructors with :ref:`virtual function\ntrampolines <overriding_virtuals>` there are two approaches.  The first is to\nadd a constructor to the alias class that takes a base value by\nrvalue-reference.  If such a constructor is available, it will be used to\nconstruct an alias instance from the value returned by the factory function.\nThe second option is to provide two factory functions to ``py::init()``: the\nfirst will be invoked when no alias class is required (i.e. when the class is\nbeing used but not inherited from in Python), and the second will be invoked\nwhen an alias is required.\n\nYou can also specify a single factory function that always returns an alias\ninstance: this will result in behaviour similar to ``py::init_alias<...>()``,\nas described in the :ref:`extended trampoline class documentation\n<extended_aliases>`.\n\nThe following example shows the different factory approaches for a class with\nan alias:\n\n.. code-block:: cpp\n\n    #include <pybind11/factory.h>\n    class Example {\n    public:\n        // ...\n        virtual ~Example() = default;\n    };\n    class PyExample : public Example {\n    public:\n        using Example::Example;\n        PyExample(Example &&base) : Example(std::move(base)) {}\n    };\n    py::class_<Example, PyExample>(m, \"Example\")\n        // Returns an Example pointer.  If a PyExample is needed, the Example\n        // instance will be moved via the extra constructor in PyExample, above.\n        .def(py::init([]() { return new Example(); }))\n        // Two callbacks:\n        .def(py::init([]() { return new Example(); } /* no alias needed */,\n                      []() { return new PyExample(); } /* alias needed */))\n        // *Always* returns an alias instance (like py::init_alias<>())\n        .def(py::init([]() { return new PyExample(); }))\n        ;\n\nBrace initialization\n--------------------\n\n``pybind11::init<>`` internally uses C++11 brace initialization to call the\nconstructor of the target class. This means that it can be used to bind\n*implicit* constructors as well:\n\n.. code-block:: cpp\n\n    struct Aggregate {\n        int a;\n        std::string b;\n    };\n\n    py::class_<Aggregate>(m, \"Aggregate\")\n        .def(py::init<int, const std::string &>());\n\n.. note::\n\n    Note that brace initialization preferentially invokes constructor overloads\n    taking a ``std::initializer_list``. In the rare event that this causes an\n    issue, you can work around it by using ``py::init(...)`` with a lambda\n    function that constructs the new object as desired.\n\n.. _classes_with_non_public_destructors:\n\nNon-public destructors\n======================\n\nIf a class has a private or protected destructor (as might e.g. be the case in\na singleton pattern), a compile error will occur when creating bindings via\npybind11. The underlying issue is that the ``std::unique_ptr`` holder type that\nis responsible for managing the lifetime of instances will reference the\ndestructor even if no deallocations ever take place. In order to expose classes\nwith private or protected destructors, it is possible to override the holder\ntype via a holder type argument to ``class_``. Pybind11 provides a helper class\n``py::nodelete`` that disables any destructor invocations. In this case, it is\ncrucial that instances are deallocated on the C++ side to avoid memory leaks.\n\n.. code-block:: cpp\n\n    /* ... definition ... */\n\n    class MyClass {\n    private:\n        ~MyClass() { }\n    };\n\n    /* ... binding code ... */\n\n    py::class_<MyClass, std::unique_ptr<MyClass, py::nodelete>>(m, \"MyClass\")\n        .def(py::init<>())\n\n.. _implicit_conversions:\n\nImplicit conversions\n====================\n\nSuppose that instances of two types ``A`` and ``B`` are used in a project, and\nthat an ``A`` can easily be converted into an instance of type ``B`` (examples of this\ncould be a fixed and an arbitrary precision number type).\n\n.. code-block:: cpp\n\n    py::class_<A>(m, \"A\")\n        /// ... members ...\n\n    py::class_<B>(m, \"B\")\n        .def(py::init<A>())\n        /// ... members ...\n\n    m.def(\"func\",\n        [](const B &) { /* .... */ }\n    );\n\nTo invoke the function ``func`` using a variable ``a`` containing an ``A``\ninstance, we'd have to write ``func(B(a))`` in Python. On the other hand, C++\nwill automatically apply an implicit type conversion, which makes it possible\nto directly write ``func(a)``.\n\nIn this situation (i.e. where ``B`` has a constructor that converts from\n``A``), the following statement enables similar implicit conversions on the\nPython side:\n\n.. code-block:: cpp\n\n    py::implicitly_convertible<A, B>();\n\n.. note::\n\n    Implicit conversions from ``A`` to ``B`` only work when ``B`` is a custom\n    data type that is exposed to Python via pybind11.\n\n    To prevent runaway recursion, implicit conversions are non-reentrant: an\n    implicit conversion invoked as part of another implicit conversion of the\n    same type (i.e. from ``A`` to ``B``) will fail.\n\n.. _static_properties:\n\nStatic properties\n=================\n\nThe section on :ref:`properties` discussed the creation of instance properties\nthat are implemented in terms of C++ getters and setters.\n\nStatic properties can also be created in a similar way to expose getters and\nsetters of static class attributes. Note that the implicit ``self`` argument\nalso exists in this case and is used to pass the Python ``type`` subclass\ninstance. This parameter will often not be needed by the C++ side, and the\nfollowing example illustrates how to instantiate a lambda getter function\nthat ignores it:\n\n.. code-block:: cpp\n\n    py::class_<Foo>(m, \"Foo\")\n        .def_property_readonly_static(\"foo\", [](py::object /* self */) { return Foo(); });\n\nOperator overloading\n====================\n\nSuppose that we're given the following ``Vector2`` class with a vector addition\nand scalar multiplication operation, all implemented using overloaded operators\nin C++.\n\n.. code-block:: cpp\n\n    class Vector2 {\n    public:\n        Vector2(float x, float y) : x(x), y(y) { }\n\n        Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); }\n        Vector2 operator*(float value) const { return Vector2(x * value, y * value); }\n        Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; }\n        Vector2& operator*=(float v) { x *= v; y *= v; return *this; }\n\n        friend Vector2 operator*(float f, const Vector2 &v) {\n            return Vector2(f * v.x, f * v.y);\n        }\n\n        std::string toString() const {\n            return \"[\" + std::to_string(x) + \", \" + std::to_string(y) + \"]\";\n        }\n    private:\n        float x, y;\n    };\n\nThe following snippet shows how the above operators can be conveniently exposed\nto Python.\n\n.. code-block:: cpp\n\n    #include <pybind11/operators.h>\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Vector2>(m, \"Vector2\")\n            .def(py::init<float, float>())\n            .def(py::self + py::self)\n            .def(py::self += py::self)\n            .def(py::self *= float())\n            .def(float() * py::self)\n            .def(py::self * float())\n            .def(\"__repr__\", &Vector2::toString);\n    }\n\nNote that a line like\n\n.. code-block:: cpp\n\n            .def(py::self * float())\n\nis really just short hand notation for\n\n.. code-block:: cpp\n\n    .def(\"__mul__\", [](const Vector2 &a, float b) {\n        return a * b;\n    }, py::is_operator())\n\nThis can be useful for exposing additional operators that don't exist on the\nC++ side, or to perform other types of customization. The ``py::is_operator``\nflag marker is needed to inform pybind11 that this is an operator, which\nreturns ``NotImplemented`` when invoked with incompatible arguments rather than\nthrowing a type error.\n\n.. note::\n\n    To use the more convenient ``py::self`` notation, the additional\n    header file :file:`pybind11/operators.h` must be included.\n\n.. seealso::\n\n    The file :file:`tests/test_operator_overloading.cpp` contains a\n    complete example that demonstrates how to work with overloaded operators in\n    more detail.\n\n.. _pickling:\n\nPickling support\n================\n\nPython's ``pickle`` module provides a powerful facility to serialize and\nde-serialize a Python object graph into a binary data stream. To pickle and\nunpickle C++ classes using pybind11, a ``py::pickle()`` definition must be\nprovided. Suppose the class in question has the following signature:\n\n.. code-block:: cpp\n\n    class Pickleable {\n    public:\n        Pickleable(const std::string &value) : m_value(value) { }\n        const std::string &value() const { return m_value; }\n\n        void setExtra(int extra) { m_extra = extra; }\n        int extra() const { return m_extra; }\n    private:\n        std::string m_value;\n        int m_extra = 0;\n    };\n\nPickling support in Python is enabled by defining the ``__setstate__`` and\n``__getstate__`` methods [#f3]_. For pybind11 classes, use ``py::pickle()``\nto bind these two functions:\n\n.. code-block:: cpp\n\n    py::class_<Pickleable>(m, \"Pickleable\")\n        .def(py::init<std::string>())\n        .def(\"value\", &Pickleable::value)\n        .def(\"extra\", &Pickleable::extra)\n        .def(\"setExtra\", &Pickleable::setExtra)\n        .def(py::pickle(\n            [](const Pickleable &p) { // __getstate__\n                /* Return a tuple that fully encodes the state of the object */\n                return py::make_tuple(p.value(), p.extra());\n            },\n            [](py::tuple t) { // __setstate__\n                if (t.size() != 2)\n                    throw std::runtime_error(\"Invalid state!\");\n\n                /* Create a new C++ instance */\n                Pickleable p(t[0].cast<std::string>());\n\n                /* Assign any additional state */\n                p.setExtra(t[1].cast<int>());\n\n                return p;\n            }\n        ));\n\nThe ``__setstate__`` part of the ``py::picke()`` definition follows the same\nrules as the single-argument version of ``py::init()``. The return type can be\na value, pointer or holder type. See :ref:`custom_constructors` for details.\n\nAn instance can now be pickled as follows:\n\n.. code-block:: python\n\n    try:\n        import cPickle as pickle  # Use cPickle on Python 2.7\n    except ImportError:\n        import pickle\n\n    p = Pickleable(\"test_value\")\n    p.setExtra(15)\n    data = pickle.dumps(p, 2)\n\nNote that only the cPickle module is supported on Python 2.7. The second\nargument to ``dumps`` is also crucial: it selects the pickle protocol version\n2, since the older version 1 is not supported. Newer versions are also fine—for\ninstance, specify ``-1`` to always use the latest available version. Beware:\nfailure to follow these instructions will cause important pybind11 memory\nallocation routines to be skipped during unpickling, which will likely lead to\nmemory corruption and/or segmentation faults.\n\n.. seealso::\n\n    The file :file:`tests/test_pickling.cpp` contains a complete example\n    that demonstrates how to pickle and unpickle types using pybind11 in more\n    detail.\n\n.. [#f3] http://docs.python.org/3/library/pickle.html#pickling-class-instances\n\nMultiple Inheritance\n====================\n\npybind11 can create bindings for types that derive from multiple base types\n(aka. *multiple inheritance*). To do so, specify all bases in the template\narguments of the ``class_`` declaration:\n\n.. code-block:: cpp\n\n    py::class_<MyType, BaseType1, BaseType2, BaseType3>(m, \"MyType\")\n       ...\n\nThe base types can be specified in arbitrary order, and they can even be\ninterspersed with alias types and holder types (discussed earlier in this\ndocument)---pybind11 will automatically find out which is which. The only\nrequirement is that the first template argument is the type to be declared.\n\nIt is also permitted to inherit multiply from exported C++ classes in Python,\nas well as inheriting from multiple Python and/or pybind11-exported classes.\n\nThere is one caveat regarding the implementation of this feature:\n\nWhen only one base type is specified for a C++ type that actually has multiple\nbases, pybind11 will assume that it does not participate in multiple\ninheritance, which can lead to undefined behavior. In such cases, add the tag\n``multiple_inheritance`` to the class constructor:\n\n.. code-block:: cpp\n\n    py::class_<MyType, BaseType2>(m, \"MyType\", py::multiple_inheritance());\n\nThe tag is redundant and does not need to be specified when multiple base types\nare listed.\n\n.. _module_local:\n\nModule-local class bindings\n===========================\n\nWhen creating a binding for a class, pybind11 by default makes that binding\n\"global\" across modules.  What this means is that a type defined in one module\ncan be returned from any module resulting in the same Python type.  For\nexample, this allows the following:\n\n.. code-block:: cpp\n\n    // In the module1.cpp binding code for module1:\n    py::class_<Pet>(m, \"Pet\")\n        .def(py::init<std::string>())\n        .def_readonly(\"name\", &Pet::name);\n\n.. code-block:: cpp\n\n    // In the module2.cpp binding code for module2:\n    m.def(\"create_pet\", [](std::string name) { return new Pet(name); });\n\n.. code-block:: pycon\n\n    >>> from module1 import Pet\n    >>> from module2 import create_pet\n    >>> pet1 = Pet(\"Kitty\")\n    >>> pet2 = create_pet(\"Doggy\")\n    >>> pet2.name()\n    'Doggy'\n\nWhen writing binding code for a library, this is usually desirable: this\nallows, for example, splitting up a complex library into multiple Python\nmodules.\n\nIn some cases, however, this can cause conflicts.  For example, suppose two\nunrelated modules make use of an external C++ library and each provide custom\nbindings for one of that library's classes.  This will result in an error when\na Python program attempts to import both modules (directly or indirectly)\nbecause of conflicting definitions on the external type:\n\n.. code-block:: cpp\n\n    // dogs.cpp\n\n    // Binding for external library class:\n    py::class<pets::Pet>(m, \"Pet\")\n        .def(\"name\", &pets::Pet::name);\n\n    // Binding for local extension class:\n    py::class<Dog, pets::Pet>(m, \"Dog\")\n        .def(py::init<std::string>());\n\n.. code-block:: cpp\n\n    // cats.cpp, in a completely separate project from the above dogs.cpp.\n\n    // Binding for external library class:\n    py::class<pets::Pet>(m, \"Pet\")\n        .def(\"get_name\", &pets::Pet::name);\n\n    // Binding for local extending class:\n    py::class<Cat, pets::Pet>(m, \"Cat\")\n        .def(py::init<std::string>());\n\n.. code-block:: pycon\n\n    >>> import cats\n    >>> import dogs\n    Traceback (most recent call last):\n      File \"<stdin>\", line 1, in <module>\n    ImportError: generic_type: type \"Pet\" is already registered!\n\nTo get around this, you can tell pybind11 to keep the external class binding\nlocalized to the module by passing the ``py::module_local()`` attribute into\nthe ``py::class_`` constructor:\n\n.. code-block:: cpp\n\n    // Pet binding in dogs.cpp:\n    py::class<pets::Pet>(m, \"Pet\", py::module_local())\n        .def(\"name\", &pets::Pet::name);\n\n.. code-block:: cpp\n\n    // Pet binding in cats.cpp:\n    py::class<pets::Pet>(m, \"Pet\", py::module_local())\n        .def(\"get_name\", &pets::Pet::name);\n\nThis makes the Python-side ``dogs.Pet`` and ``cats.Pet`` into distinct classes,\navoiding the conflict and allowing both modules to be loaded.  C++ code in the\n``dogs`` module that casts or returns a ``Pet`` instance will result in a\n``dogs.Pet`` Python instance, while C++ code in the ``cats`` module will result\nin a ``cats.Pet`` Python instance.\n\nThis does come with two caveats, however: First, external modules cannot return\nor cast a ``Pet`` instance to Python (unless they also provide their own local\nbindings).  Second, from the Python point of view they are two distinct classes.\n\nNote that the locality only applies in the C++ -> Python direction.  When\npassing such a ``py::module_local`` type into a C++ function, the module-local\nclasses are still considered.  This means that if the following function is\nadded to any module (including but not limited to the ``cats`` and ``dogs``\nmodules above) it will be callable with either a ``dogs.Pet`` or ``cats.Pet``\nargument:\n\n.. code-block:: cpp\n\n    m.def(\"pet_name\", [](const pets::Pet &pet) { return pet.name(); });\n\nFor example, suppose the above function is added to each of ``cats.cpp``,\n``dogs.cpp`` and ``frogs.cpp`` (where ``frogs.cpp`` is some other module that\ndoes *not* bind ``Pets`` at all).\n\n.. code-block:: pycon\n\n    >>> import cats, dogs, frogs  # No error because of the added py::module_local()\n    >>> mycat, mydog = cats.Cat(\"Fluffy\"), dogs.Dog(\"Rover\")\n    >>> (cats.pet_name(mycat), dogs.pet_name(mydog))\n    ('Fluffy', 'Rover')\n    >>> (cats.pet_name(mydog), dogs.pet_name(mycat), frogs.pet_name(mycat))\n    ('Rover', 'Fluffy', 'Fluffy')\n\nIt is possible to use ``py::module_local()`` registrations in one module even\nif another module registers the same type globally: within the module with the\nmodule-local definition, all C++ instances will be cast to the associated bound\nPython type.  In other modules any such values are converted to the global\nPython type created elsewhere.\n\n.. note::\n\n    STL bindings (as provided via the optional :file:`pybind11/stl_bind.h`\n    header) apply ``py::module_local`` by default when the bound type might\n    conflict with other modules; see :ref:`stl_bind` for details.\n\n.. note::\n\n    The localization of the bound types is actually tied to the shared object\n    or binary generated by the compiler/linker.  For typical modules created\n    with ``PYBIND11_MODULE()``, this distinction is not significant.  It is\n    possible, however, when :ref:`embedding` to embed multiple modules in the\n    same binary (see :ref:`embedding_modules`).  In such a case, the\n    localization will apply across all embedded modules within the same binary.\n\n.. seealso::\n\n    The file :file:`tests/test_local_bindings.cpp` contains additional examples\n    that demonstrate how ``py::module_local()`` works.\n\nBinding protected member functions\n==================================\n\nIt's normally not possible to expose ``protected`` member functions to Python:\n\n.. code-block:: cpp\n\n    class A {\n    protected:\n        int foo() const { return 42; }\n    };\n\n    py::class_<A>(m, \"A\")\n        .def(\"foo\", &A::foo); // error: 'foo' is a protected member of 'A'\n\nOn one hand, this is good because non-``public`` members aren't meant to be\naccessed from the outside. But we may want to make use of ``protected``\nfunctions in derived Python classes.\n\nThe following pattern makes this possible:\n\n.. code-block:: cpp\n\n    class A {\n    protected:\n        int foo() const { return 42; }\n    };\n\n    class Publicist : public A { // helper type for exposing protected functions\n    public:\n        using A::foo; // inherited with different access modifier\n    };\n\n    py::class_<A>(m, \"A\") // bind the primary class\n        .def(\"foo\", &Publicist::foo); // expose protected methods via the publicist\n\nThis works because ``&Publicist::foo`` is exactly the same function as\n``&A::foo`` (same signature and address), just with a different access\nmodifier. The only purpose of the ``Publicist`` helper class is to make\nthe function name ``public``.\n\nIf the intent is to expose ``protected`` ``virtual`` functions which can be\noverridden in Python, the publicist pattern can be combined with the previously\ndescribed trampoline:\n\n.. code-block:: cpp\n\n    class A {\n    public:\n        virtual ~A() = default;\n\n    protected:\n        virtual int foo() const { return 42; }\n    };\n\n    class Trampoline : public A {\n    public:\n        int foo() const override { PYBIND11_OVERLOAD(int, A, foo, ); }\n    };\n\n    class Publicist : public A {\n    public:\n        using A::foo;\n    };\n\n    py::class_<A, Trampoline>(m, \"A\") // <-- `Trampoline` here\n        .def(\"foo\", &Publicist::foo); // <-- `Publicist` here, not `Trampoline`!\n\n.. note::\n\n    MSVC 2015 has a compiler bug (fixed in version 2017) which\n    requires a more explicit function binding in the form of\n    ``.def(\"foo\", static_cast<int (A::*)() const>(&Publicist::foo));``\n    where ``int (A::*)() const`` is the type of ``A::foo``.\n\nCustom automatic downcasters\n============================\n\nAs explained in :ref:`inheritance`, pybind11 comes with built-in\nunderstanding of the dynamic type of polymorphic objects in C++; that\nis, returning a Pet to Python produces a Python object that knows it's\nwrapping a Dog, if Pet has virtual methods and pybind11 knows about\nDog and this Pet is in fact a Dog. Sometimes, you might want to\nprovide this automatic downcasting behavior when creating bindings for\na class hierarchy that does not use standard C++ polymorphism, such as\nLLVM [#f4]_. As long as there's some way to determine at runtime\nwhether a downcast is safe, you can proceed by specializing the\n``pybind11::polymorphic_type_hook`` template:\n\n.. code-block:: cpp\n\n    enum class PetKind { Cat, Dog, Zebra };\n    struct Pet {   // Not polymorphic: has no virtual methods\n        const PetKind kind;\n        int age = 0;\n      protected:\n        Pet(PetKind _kind) : kind(_kind) {}\n    };\n    struct Dog : Pet {\n        Dog() : Pet(PetKind::Dog) {}\n        std::string sound = \"woof!\";\n        std::string bark() const { return sound; }\n    };\n\n    namespace pybind11 {\n        template<> struct polymorphic_type_hook<Pet> {\n            static const void *get(const Pet *src, const std::type_info*& type) {\n                // note that src may be nullptr\n                if (src && src->kind == PetKind::Dog) {\n                    type = &typeid(Dog);\n                    return static_cast<const Dog*>(src);\n                }\n                return src;\n            }\n        };\n    } // namespace pybind11\n\nWhen pybind11 wants to convert a C++ pointer of type ``Base*`` to a\nPython object, it calls ``polymorphic_type_hook<Base>::get()`` to\ndetermine if a downcast is possible. The ``get()`` function should use\nwhatever runtime information is available to determine if its ``src``\nparameter is in fact an instance of some class ``Derived`` that\ninherits from ``Base``. If it finds such a ``Derived``, it sets ``type\n= &typeid(Derived)`` and returns a pointer to the ``Derived`` object\nthat contains ``src``. Otherwise, it just returns ``src``, leaving\n``type`` at its default value of nullptr. If you set ``type`` to a\ntype that pybind11 doesn't know about, no downcasting will occur, and\nthe original ``src`` pointer will be used with its static type\n``Base*``.\n\nIt is critical that the returned pointer and ``type`` argument of\n``get()`` agree with each other: if ``type`` is set to something\nnon-null, the returned pointer must point to the start of an object\nwhose type is ``type``. If the hierarchy being exposed uses only\nsingle inheritance, a simple ``return src;`` will achieve this just\nfine, but in the general case, you must cast ``src`` to the\nappropriate derived-class pointer (e.g. using\n``static_cast<Derived>(src)``) before allowing it to be returned as a\n``void*``.\n\n.. [#f4] https://llvm.org/docs/HowToSetUpLLVMStyleRTTI.html\n\n.. note::\n\n    pybind11's standard support for downcasting objects whose types\n    have virtual methods is implemented using\n    ``polymorphic_type_hook`` too, using the standard C++ ability to\n    determine the most-derived type of a polymorphic object using\n    ``typeid()`` and to cast a base pointer to that most-derived type\n    (even if you don't know what it is) using ``dynamic_cast<void*>``.\n\n.. seealso::\n\n    The file :file:`tests/test_tagbased_polymorphic.cpp` contains a\n    more complete example, including a demonstration of how to provide\n    automatic downcasting for an entire class hierarchy without\n    writing one get() function for each class.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/embedding.rst",
    "content": ".. _embedding:\n\nEmbedding the interpreter\n#########################\n\nWhile pybind11 is mainly focused on extending Python using C++, it's also\npossible to do the reverse: embed the Python interpreter into a C++ program.\nAll of the other documentation pages still apply here, so refer to them for\ngeneral pybind11 usage. This section will cover a few extra things required\nfor embedding.\n\nGetting started\n===============\n\nA basic executable with an embedded interpreter can be created with just a few\nlines of CMake and the ``pybind11::embed`` target, as shown below. For more\ninformation, see :doc:`/compiling`.\n\n.. code-block:: cmake\n\n    cmake_minimum_required(VERSION 3.0)\n    project(example)\n\n    find_package(pybind11 REQUIRED)  # or `add_subdirectory(pybind11)`\n\n    add_executable(example main.cpp)\n    target_link_libraries(example PRIVATE pybind11::embed)\n\nThe essential structure of the ``main.cpp`` file looks like this:\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h> // everything needed for embedding\n    namespace py = pybind11;\n\n    int main() {\n        py::scoped_interpreter guard{}; // start the interpreter and keep it alive\n\n        py::print(\"Hello, World!\"); // use the Python API\n    }\n\nThe interpreter must be initialized before using any Python API, which includes\nall the functions and classes in pybind11. The RAII guard class `scoped_interpreter`\ntakes care of the interpreter lifetime. After the guard is destroyed, the interpreter\nshuts down and clears its memory. No Python functions can be called after this.\n\nExecuting Python code\n=====================\n\nThere are a few different ways to run Python code. One option is to use `eval`,\n`exec` or `eval_file`, as explained in :ref:`eval`. Here is a quick example in\nthe context of an executable with an embedded interpreter:\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h>\n    namespace py = pybind11;\n\n    int main() {\n        py::scoped_interpreter guard{};\n\n        py::exec(R\"(\n            kwargs = dict(name=\"World\", number=42)\n            message = \"Hello, {name}! The answer is {number}\".format(**kwargs)\n            print(message)\n        )\");\n    }\n\nAlternatively, similar results can be achieved using pybind11's API (see\n:doc:`/advanced/pycpp/index` for more details).\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h>\n    namespace py = pybind11;\n    using namespace py::literals;\n\n    int main() {\n        py::scoped_interpreter guard{};\n\n        auto kwargs = py::dict(\"name\"_a=\"World\", \"number\"_a=42);\n        auto message = \"Hello, {name}! The answer is {number}\"_s.format(**kwargs);\n        py::print(message);\n    }\n\nThe two approaches can also be combined:\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h>\n    #include <iostream>\n\n    namespace py = pybind11;\n    using namespace py::literals;\n\n    int main() {\n        py::scoped_interpreter guard{};\n\n        auto locals = py::dict(\"name\"_a=\"World\", \"number\"_a=42);\n        py::exec(R\"(\n            message = \"Hello, {name}! The answer is {number}\".format(**locals())\n        )\", py::globals(), locals);\n\n        auto message = locals[\"message\"].cast<std::string>();\n        std::cout << message;\n    }\n\nImporting modules\n=================\n\nPython modules can be imported using `module::import()`:\n\n.. code-block:: cpp\n\n    py::module sys = py::module::import(\"sys\");\n    py::print(sys.attr(\"path\"));\n\nFor convenience, the current working directory is included in ``sys.path`` when\nembedding the interpreter. This makes it easy to import local Python files:\n\n.. code-block:: python\n\n    \"\"\"calc.py located in the working directory\"\"\"\n\n    def add(i, j):\n        return i + j\n\n\n.. code-block:: cpp\n\n    py::module calc = py::module::import(\"calc\");\n    py::object result = calc.attr(\"add\")(1, 2);\n    int n = result.cast<int>();\n    assert(n == 3);\n\nModules can be reloaded using `module::reload()` if the source is modified e.g.\nby an external process. This can be useful in scenarios where the application\nimports a user defined data processing script which needs to be updated after\nchanges by the user. Note that this function does not reload modules recursively.\n\n.. _embedding_modules:\n\nAdding embedded modules\n=======================\n\nEmbedded binary modules can be added using the `PYBIND11_EMBEDDED_MODULE` macro.\nNote that the definition must be placed at global scope. They can be imported\nlike any other module.\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h>\n    namespace py = pybind11;\n\n    PYBIND11_EMBEDDED_MODULE(fast_calc, m) {\n        // `m` is a `py::module` which is used to bind functions and classes\n        m.def(\"add\", [](int i, int j) {\n            return i + j;\n        });\n    }\n\n    int main() {\n        py::scoped_interpreter guard{};\n\n        auto fast_calc = py::module::import(\"fast_calc\");\n        auto result = fast_calc.attr(\"add\")(1, 2).cast<int>();\n        assert(result == 3);\n    }\n\nUnlike extension modules where only a single binary module can be created, on\nthe embedded side an unlimited number of modules can be added using multiple\n`PYBIND11_EMBEDDED_MODULE` definitions (as long as they have unique names).\n\nThese modules are added to Python's list of builtins, so they can also be\nimported in pure Python files loaded by the interpreter. Everything interacts\nnaturally:\n\n.. code-block:: python\n\n    \"\"\"py_module.py located in the working directory\"\"\"\n    import cpp_module\n\n    a = cpp_module.a\n    b = a + 1\n\n\n.. code-block:: cpp\n\n    #include <pybind11/embed.h>\n    namespace py = pybind11;\n\n    PYBIND11_EMBEDDED_MODULE(cpp_module, m) {\n        m.attr(\"a\") = 1;\n    }\n\n    int main() {\n        py::scoped_interpreter guard{};\n\n        auto py_module = py::module::import(\"py_module\");\n\n        auto locals = py::dict(\"fmt\"_a=\"{} + {} = {}\", **py_module.attr(\"__dict__\"));\n        assert(locals[\"a\"].cast<int>() == 1);\n        assert(locals[\"b\"].cast<int>() == 2);\n\n        py::exec(R\"(\n            c = a + b\n            message = fmt.format(a, b, c)\n        )\", py::globals(), locals);\n\n        assert(locals[\"c\"].cast<int>() == 3);\n        assert(locals[\"message\"].cast<std::string>() == \"1 + 2 = 3\");\n    }\n\n\nInterpreter lifetime\n====================\n\nThe Python interpreter shuts down when `scoped_interpreter` is destroyed. After\nthis, creating a new instance will restart the interpreter. Alternatively, the\n`initialize_interpreter` / `finalize_interpreter` pair of functions can be used\nto directly set the state at any time.\n\nModules created with pybind11 can be safely re-initialized after the interpreter\nhas been restarted. However, this may not apply to third-party extension modules.\nThe issue is that Python itself cannot completely unload extension modules and\nthere are several caveats with regard to interpreter restarting. In short, not\nall memory may be freed, either due to Python reference cycles or user-created\nglobal data. All the details can be found in the CPython documentation.\n\n.. warning::\n\n    Creating two concurrent `scoped_interpreter` guards is a fatal error. So is\n    calling `initialize_interpreter` for a second time after the interpreter\n    has already been initialized.\n\n    Do not use the raw CPython API functions ``Py_Initialize`` and\n    ``Py_Finalize`` as these do not properly handle the lifetime of\n    pybind11's internal data.\n\n\nSub-interpreter support\n=======================\n\nCreating multiple copies of `scoped_interpreter` is not possible because it\nrepresents the main Python interpreter. Sub-interpreters are something different\nand they do permit the existence of multiple interpreters. This is an advanced\nfeature of the CPython API and should be handled with care. pybind11 does not\ncurrently offer a C++ interface for sub-interpreters, so refer to the CPython\ndocumentation for all the details regarding this feature.\n\nWe'll just mention a couple of caveats the sub-interpreters support in pybind11:\n\n 1. Sub-interpreters will not receive independent copies of embedded modules.\n    Instead, these are shared and modifications in one interpreter may be\n    reflected in another.\n\n 2. Managing multiple threads, multiple interpreters and the GIL can be\n    challenging and there are several caveats here, even within the pure\n    CPython API (please refer to the Python docs for details). As for\n    pybind11, keep in mind that `gil_scoped_release` and `gil_scoped_acquire`\n    do not take sub-interpreters into account.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/exceptions.rst",
    "content": "Exceptions\n##########\n\nBuilt-in exception translation\n==============================\n\nWhen C++ code invoked from Python throws an ``std::exception``, it is\nautomatically converted into a Python ``Exception``. pybind11 defines multiple\nspecial exception classes that will map to different types of Python\nexceptions:\n\n.. tabularcolumns:: |p{0.5\\textwidth}|p{0.45\\textwidth}|\n\n+--------------------------------------+--------------------------------------+\n|  C++ exception type                  |  Python exception type               |\n+======================================+======================================+\n| :class:`std::exception`              | ``RuntimeError``                     |\n+--------------------------------------+--------------------------------------+\n| :class:`std::bad_alloc`              | ``MemoryError``                      |\n+--------------------------------------+--------------------------------------+\n| :class:`std::domain_error`           | ``ValueError``                       |\n+--------------------------------------+--------------------------------------+\n| :class:`std::invalid_argument`       | ``ValueError``                       |\n+--------------------------------------+--------------------------------------+\n| :class:`std::length_error`           | ``ValueError``                       |\n+--------------------------------------+--------------------------------------+\n| :class:`std::out_of_range`           | ``ValueError``                       |\n+--------------------------------------+--------------------------------------+\n| :class:`std::range_error`            | ``ValueError``                       |\n+--------------------------------------+--------------------------------------+\n| :class:`pybind11::stop_iteration`    | ``StopIteration`` (used to implement |\n|                                      | custom iterators)                    |\n+--------------------------------------+--------------------------------------+\n| :class:`pybind11::index_error`       | ``IndexError`` (used to indicate out |\n|                                      | of bounds access in ``__getitem__``, |\n|                                      | ``__setitem__``, etc.)               |\n+--------------------------------------+--------------------------------------+\n| :class:`pybind11::value_error`       | ``ValueError`` (used to indicate     |\n|                                      | wrong value passed in                |\n|                                      | ``container.remove(...)``)           |\n+--------------------------------------+--------------------------------------+\n| :class:`pybind11::key_error`         | ``KeyError`` (used to indicate out   |\n|                                      | of bounds access in ``__getitem__``, |\n|                                      | ``__setitem__`` in dict-like         |\n|                                      | objects, etc.)                       |\n+--------------------------------------+--------------------------------------+\n| :class:`pybind11::error_already_set` | Indicates that the Python exception  |\n|                                      | flag has already been set via Python |\n|                                      | API calls from C++ code; this C++    |\n|                                      | exception is used to propagate such  |\n|                                      | a Python exception back to Python.   |\n+--------------------------------------+--------------------------------------+\n\nWhen a Python function invoked from C++ throws an exception, it is converted\ninto a C++ exception of type :class:`error_already_set` whose string payload\ncontains a textual summary.\n\nThere is also a special exception :class:`cast_error` that is thrown by\n:func:`handle::call` when the input arguments cannot be converted to Python\nobjects.\n\nRegistering custom translators\n==============================\n\nIf the default exception conversion policy described above is insufficient,\npybind11 also provides support for registering custom exception translators.\nTo register a simple exception conversion that translates a C++ exception into\na new Python exception using the C++ exception's ``what()`` method, a helper\nfunction is available:\n\n.. code-block:: cpp\n\n    py::register_exception<CppExp>(module, \"PyExp\");\n\nThis call creates a Python exception class with the name ``PyExp`` in the given\nmodule and automatically converts any encountered exceptions of type ``CppExp``\ninto Python exceptions of type ``PyExp``.\n\nWhen more advanced exception translation is needed, the function\n``py::register_exception_translator(translator)`` can be used to register\nfunctions that can translate arbitrary exception types (and which may include\nadditional logic to do so).  The function takes a stateless callable (e.g.  a\nfunction pointer or a lambda function without captured variables) with the call\nsignature ``void(std::exception_ptr)``.\n\nWhen a C++ exception is thrown, the registered exception translators are tried\nin reverse order of registration (i.e. the last registered translator gets the\nfirst shot at handling the exception).\n\nInside the translator, ``std::rethrow_exception`` should be used within\na try block to re-throw the exception.  One or more catch clauses to catch\nthe appropriate exceptions should then be used with each clause using\n``PyErr_SetString`` to set a Python exception or ``ex(string)`` to set\nthe python exception to a custom exception type (see below).\n\nTo declare a custom Python exception type, declare a ``py::exception`` variable\nand use this in the associated exception translator (note: it is often useful\nto make this a static declaration when using it inside a lambda expression\nwithout requiring capturing).\n\n\nThe following example demonstrates this for a hypothetical exception classes\n``MyCustomException`` and ``OtherException``: the first is translated to a\ncustom python exception ``MyCustomError``, while the second is translated to a\nstandard python RuntimeError:\n\n.. code-block:: cpp\n\n    static py::exception<MyCustomException> exc(m, \"MyCustomError\");\n    py::register_exception_translator([](std::exception_ptr p) {\n        try {\n            if (p) std::rethrow_exception(p);\n        } catch (const MyCustomException &e) {\n            exc(e.what());\n        } catch (const OtherException &e) {\n            PyErr_SetString(PyExc_RuntimeError, e.what());\n        }\n    });\n\nMultiple exceptions can be handled by a single translator, as shown in the\nexample above. If the exception is not caught by the current translator, the\npreviously registered one gets a chance.\n\nIf none of the registered exception translators is able to handle the\nexception, it is handled by the default converter as described in the previous\nsection.\n\n.. seealso::\n\n    The file :file:`tests/test_exceptions.cpp` contains examples\n    of various custom exception translators and custom exception types.\n\n.. note::\n\n    You must call either ``PyErr_SetString`` or a custom exception's call\n    operator (``exc(string)``) for every exception caught in a custom exception\n    translator.  Failure to do so will cause Python to crash with ``SystemError:\n    error return without exception set``.\n\n    Exceptions that you do not plan to handle should simply not be caught, or\n    may be explicitly (re-)thrown to delegate it to the other,\n    previously-declared existing exception translators.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/functions.rst",
    "content": "Functions\n#########\n\nBefore proceeding with this section, make sure that you are already familiar\nwith the basics of binding functions and classes, as explained in :doc:`/basics`\nand :doc:`/classes`. The following guide is applicable to both free and member\nfunctions, i.e. *methods* in Python.\n\n.. _return_value_policies:\n\nReturn value policies\n=====================\n\nPython and C++ use fundamentally different ways of managing the memory and\nlifetime of objects managed by them. This can lead to issues when creating\nbindings for functions that return a non-trivial type. Just by looking at the\ntype information, it is not clear whether Python should take charge of the\nreturned value and eventually free its resources, or if this is handled on the\nC++ side. For this reason, pybind11 provides a several *return value policy*\nannotations that can be passed to the :func:`module::def` and\n:func:`class_::def` functions. The default policy is\n:enum:`return_value_policy::automatic`.\n\nReturn value policies are tricky, and it's very important to get them right.\nJust to illustrate what can go wrong, consider the following simple example:\n\n.. code-block:: cpp\n\n    /* Function declaration */\n    Data *get_data() { return _data; /* (pointer to a static data structure) */ }\n    ...\n\n    /* Binding code */\n    m.def(\"get_data\", &get_data); // <-- KABOOM, will cause crash when called from Python\n\nWhat's going on here? When ``get_data()`` is called from Python, the return\nvalue (a native C++ type) must be wrapped to turn it into a usable Python type.\nIn this case, the default return value policy (:enum:`return_value_policy::automatic`)\ncauses pybind11 to assume ownership of the static ``_data`` instance.\n\nWhen Python's garbage collector eventually deletes the Python\nwrapper, pybind11 will also attempt to delete the C++ instance (via ``operator\ndelete()``) due to the implied ownership. At this point, the entire application\nwill come crashing down, though errors could also be more subtle and involve\nsilent data corruption.\n\nIn the above example, the policy :enum:`return_value_policy::reference` should have\nbeen specified so that the global data instance is only *referenced* without any\nimplied transfer of ownership, i.e.:\n\n.. code-block:: cpp\n\n    m.def(\"get_data\", &get_data, return_value_policy::reference);\n\nOn the other hand, this is not the right policy for many other situations,\nwhere ignoring ownership could lead to resource leaks.\nAs a developer using pybind11, it's important to be familiar with the different\nreturn value policies, including which situation calls for which one of them.\nThe following table provides an overview of available policies:\n\n.. tabularcolumns:: |p{0.5\\textwidth}|p{0.45\\textwidth}|\n\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| Return value policy                              | Description                                                                |\n+==================================================+============================================================================+\n| :enum:`return_value_policy::take_ownership`      | Reference an existing object (i.e. do not create a new copy) and take      |\n|                                                  | ownership. Python will call the destructor and delete operator when the    |\n|                                                  | object's reference count reaches zero. Undefined behavior ensues when the  |\n|                                                  | C++ side does the same, or when the data was not dynamically allocated.    |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::copy`                | Create a new copy of the returned object, which will be owned by Python.   |\n|                                                  | This policy is comparably safe because the lifetimes of the two instances  |\n|                                                  | are decoupled.                                                             |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::move`                | Use ``std::move`` to move the return value contents into a new instance    |\n|                                                  | that will be owned by Python. This policy is comparably safe because the   |\n|                                                  | lifetimes of the two instances (move source and destination) are decoupled.|\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::reference`           | Reference an existing object, but do not take ownership. The C++ side is   |\n|                                                  | responsible for managing the object's lifetime and deallocating it when    |\n|                                                  | it is no longer used. Warning: undefined behavior will ensue when the C++  |\n|                                                  | side deletes an object that is still referenced and used by Python.        |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::reference_internal`  | Indicates that the lifetime of the return value is tied to the lifetime    |\n|                                                  | of a parent object, namely the implicit ``this``, or ``self`` argument of  |\n|                                                  | the called method or property. Internally, this policy works just like     |\n|                                                  | :enum:`return_value_policy::reference` but additionally applies a          |\n|                                                  | ``keep_alive<0, 1>`` *call policy* (described in the next section) that    |\n|                                                  | prevents the parent object from being garbage collected as long as the     |\n|                                                  | return value is referenced by Python. This is the default policy for       |\n|                                                  | property getters created via ``def_property``, ``def_readwrite``, etc.     |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::automatic`           | **Default policy.** This policy falls back to the policy                   |\n|                                                  | :enum:`return_value_policy::take_ownership` when the return value is a     |\n|                                                  | pointer. Otherwise, it uses :enum:`return_value_policy::move` or           |\n|                                                  | :enum:`return_value_policy::copy` for rvalue and lvalue references,        |\n|                                                  | respectively. See above for a description of what all of these different   |\n|                                                  | policies do.                                                               |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n| :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the   |\n|                                                  | return value is a pointer. This is the default conversion policy for       |\n|                                                  | function arguments when calling Python functions manually from C++ code    |\n|                                                  | (i.e. via handle::operator()). You probably won't need to use this.        |\n+--------------------------------------------------+----------------------------------------------------------------------------+\n\nReturn value policies can also be applied to properties:\n\n.. code-block:: cpp\n\n    class_<MyClass>(m, \"MyClass\")\n        .def_property(\"data\", &MyClass::getData, &MyClass::setData,\n                      py::return_value_policy::copy);\n\nTechnically, the code above applies the policy to both the getter and the\nsetter function, however, the setter doesn't really care about *return*\nvalue policies which makes this a convenient terse syntax. Alternatively,\ntargeted arguments can be passed through the :class:`cpp_function` constructor:\n\n.. code-block:: cpp\n\n    class_<MyClass>(m, \"MyClass\")\n        .def_property(\"data\"\n            py::cpp_function(&MyClass::getData, py::return_value_policy::copy),\n            py::cpp_function(&MyClass::setData)\n        );\n\n.. warning::\n\n    Code with invalid return value policies might access uninitialized memory or\n    free data structures multiple times, which can lead to hard-to-debug\n    non-determinism and segmentation faults, hence it is worth spending the\n    time to understand all the different options in the table above.\n\n.. note::\n\n    One important aspect of the above policies is that they only apply to\n    instances which pybind11 has *not* seen before, in which case the policy\n    clarifies essential questions about the return value's lifetime and\n    ownership.  When pybind11 knows the instance already (as identified by its\n    type and address in memory), it will return the existing Python object\n    wrapper rather than creating a new copy.\n\n.. note::\n\n    The next section on :ref:`call_policies` discusses *call policies* that can be\n    specified *in addition* to a return value policy from the list above. Call\n    policies indicate reference relationships that can involve both return values\n    and parameters of functions.\n\n.. note::\n\n   As an alternative to elaborate call policies and lifetime management logic,\n   consider using smart pointers (see the section on :ref:`smart_pointers` for\n   details). Smart pointers can tell whether an object is still referenced from\n   C++ or Python, which generally eliminates the kinds of inconsistencies that\n   can lead to crashes or undefined behavior. For functions returning smart\n   pointers, it is not necessary to specify a return value policy.\n\n.. _call_policies:\n\nAdditional call policies\n========================\n\nIn addition to the above return value policies, further *call policies* can be\nspecified to indicate dependencies between parameters or ensure a certain state\nfor the function call.\n\nKeep alive\n----------\n\nIn general, this policy is required when the C++ object is any kind of container\nand another object is being added to the container. ``keep_alive<Nurse, Patient>``\nindicates that the argument with index ``Patient`` should be kept alive at least\nuntil the argument with index ``Nurse`` is freed by the garbage collector. Argument\nindices start at one, while zero refers to the return value. For methods, index\n``1`` refers to the implicit ``this`` pointer, while regular arguments begin at\nindex ``2``. Arbitrarily many call policies can be specified. When a ``Nurse``\nwith value ``None`` is detected at runtime, the call policy does nothing.\n\nWhen the nurse is not a pybind11-registered type, the implementation internally\nrelies on the ability to create a *weak reference* to the nurse object. When\nthe nurse object is not a pybind11-registered type and does not support weak\nreferences, an exception will be thrown.\n\nConsider the following example: here, the binding code for a list append\noperation ties the lifetime of the newly added element to the underlying\ncontainer:\n\n.. code-block:: cpp\n\n    py::class_<List>(m, \"List\")\n        .def(\"append\", &List::append, py::keep_alive<1, 2>());\n\nFor consistency, the argument indexing is identical for constructors. Index\n``1`` still refers to the implicit ``this`` pointer, i.e. the object which is\nbeing constructed. Index ``0`` refers to the return type which is presumed to\nbe ``void`` when a constructor is viewed like a function. The following example\nties the lifetime of the constructor element to the constructed object:\n\n.. code-block:: cpp\n\n    py::class_<Nurse>(m, \"Nurse\")\n        .def(py::init<Patient &>(), py::keep_alive<1, 2>());\n\n.. note::\n\n    ``keep_alive`` is analogous to the ``with_custodian_and_ward`` (if Nurse,\n    Patient != 0) and ``with_custodian_and_ward_postcall`` (if Nurse/Patient ==\n    0) policies from Boost.Python.\n\nCall guard\n----------\n\nThe ``call_guard<T>`` policy allows any scope guard type ``T`` to be placed\naround the function call. For example, this definition:\n\n.. code-block:: cpp\n\n    m.def(\"foo\", foo, py::call_guard<T>());\n\nis equivalent to the following pseudocode:\n\n.. code-block:: cpp\n\n    m.def(\"foo\", [](args...) {\n        T scope_guard;\n        return foo(args...); // forwarded arguments\n    });\n\nThe only requirement is that ``T`` is default-constructible, but otherwise any\nscope guard will work. This is very useful in combination with `gil_scoped_release`.\nSee :ref:`gil`.\n\nMultiple guards can also be specified as ``py::call_guard<T1, T2, T3...>``. The\nconstructor order is left to right and destruction happens in reverse.\n\n.. seealso::\n\n    The file :file:`tests/test_call_policies.cpp` contains a complete example\n    that demonstrates using `keep_alive` and `call_guard` in more detail.\n\n.. _python_objects_as_args:\n\nPython objects as arguments\n===========================\n\npybind11 exposes all major Python types using thin C++ wrapper classes. These\nwrapper classes can also be used as parameters of functions in bindings, which\nmakes it possible to directly work with native Python types on the C++ side.\nFor instance, the following statement iterates over a Python ``dict``:\n\n.. code-block:: cpp\n\n    void print_dict(py::dict dict) {\n        /* Easily interact with Python types */\n        for (auto item : dict)\n            std::cout << \"key=\" << std::string(py::str(item.first)) << \", \"\n                      << \"value=\" << std::string(py::str(item.second)) << std::endl;\n    }\n\nIt can be exported:\n\n.. code-block:: cpp\n\n    m.def(\"print_dict\", &print_dict);\n\nAnd used in Python as usual:\n\n.. code-block:: pycon\n\n    >>> print_dict({'foo': 123, 'bar': 'hello'})\n    key=foo, value=123\n    key=bar, value=hello\n\nFor more information on using Python objects in C++, see :doc:`/advanced/pycpp/index`.\n\nAccepting \\*args and \\*\\*kwargs\n===============================\n\nPython provides a useful mechanism to define functions that accept arbitrary\nnumbers of arguments and keyword arguments:\n\n.. code-block:: python\n\n   def generic(*args, **kwargs):\n       ...  # do something with args and kwargs\n\nSuch functions can also be created using pybind11:\n\n.. code-block:: cpp\n\n   void generic(py::args args, py::kwargs kwargs) {\n       /// .. do something with args\n       if (kwargs)\n           /// .. do something with kwargs\n   }\n\n   /// Binding code\n   m.def(\"generic\", &generic);\n\nThe class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives\nfrom ``py::dict``.\n\nYou may also use just one or the other, and may combine these with other\narguments as long as the ``py::args`` and ``py::kwargs`` arguments are the last\narguments accepted by the function.\n\nPlease refer to the other examples for details on how to iterate over these,\nand on how to cast their entries into C++ objects. A demonstration is also\navailable in ``tests/test_kwargs_and_defaults.cpp``.\n\n.. note::\n\n    When combining \\*args or \\*\\*kwargs with :ref:`keyword_args` you should\n    *not* include ``py::arg`` tags for the ``py::args`` and ``py::kwargs``\n    arguments.\n\nDefault arguments revisited\n===========================\n\nThe section on :ref:`default_args` previously discussed basic usage of default\narguments using pybind11. One noteworthy aspect of their implementation is that\ndefault arguments are converted to Python objects right at declaration time.\nConsider the following example:\n\n.. code-block:: cpp\n\n    py::class_<MyClass>(\"MyClass\")\n        .def(\"myFunction\", py::arg(\"arg\") = SomeType(123));\n\nIn this case, pybind11 must already be set up to deal with values of the type\n``SomeType`` (via a prior instantiation of ``py::class_<SomeType>``), or an\nexception will be thrown.\n\nAnother aspect worth highlighting is that the \"preview\" of the default argument\nin the function signature is generated using the object's ``__repr__`` method.\nIf not available, the signature may not be very helpful, e.g.:\n\n.. code-block:: pycon\n\n    FUNCTIONS\n    ...\n    |  myFunction(...)\n    |      Signature : (MyClass, arg : SomeType = <SomeType object at 0x101b7b080>) -> NoneType\n    ...\n\nThe first way of addressing this is by defining ``SomeType.__repr__``.\nAlternatively, it is possible to specify the human-readable preview of the\ndefault argument manually using the ``arg_v`` notation:\n\n.. code-block:: cpp\n\n    py::class_<MyClass>(\"MyClass\")\n        .def(\"myFunction\", py::arg_v(\"arg\", SomeType(123), \"SomeType(123)\"));\n\nSometimes it may be necessary to pass a null pointer value as a default\nargument. In this case, remember to cast it to the underlying type in question,\nlike so:\n\n.. code-block:: cpp\n\n    py::class_<MyClass>(\"MyClass\")\n        .def(\"myFunction\", py::arg(\"arg\") = (SomeType *) nullptr);\n\n.. _nonconverting_arguments:\n\nNon-converting arguments\n========================\n\nCertain argument types may support conversion from one type to another.  Some\nexamples of conversions are:\n\n* :ref:`implicit_conversions` declared using ``py::implicitly_convertible<A,B>()``\n* Calling a method accepting a double with an integer argument\n* Calling a ``std::complex<float>`` argument with a non-complex python type\n  (for example, with a float).  (Requires the optional ``pybind11/complex.h``\n  header).\n* Calling a function taking an Eigen matrix reference with a numpy array of the\n  wrong type or of an incompatible data layout.  (Requires the optional\n  ``pybind11/eigen.h`` header).\n\nThis behaviour is sometimes undesirable: the binding code may prefer to raise\nan error rather than convert the argument.  This behaviour can be obtained\nthrough ``py::arg`` by calling the ``.noconvert()`` method of the ``py::arg``\nobject, such as:\n\n.. code-block:: cpp\n\n    m.def(\"floats_only\", [](double f) { return 0.5 * f; }, py::arg(\"f\").noconvert());\n    m.def(\"floats_preferred\", [](double f) { return 0.5 * f; }, py::arg(\"f\"));\n\nAttempting the call the second function (the one without ``.noconvert()``) with\nan integer will succeed, but attempting to call the ``.noconvert()`` version\nwill fail with a ``TypeError``:\n\n.. code-block:: pycon\n\n    >>> floats_preferred(4)\n    2.0\n    >>> floats_only(4)\n    Traceback (most recent call last):\n      File \"<stdin>\", line 1, in <module>\n    TypeError: floats_only(): incompatible function arguments. The following argument types are supported:\n        1. (f: float) -> float\n\n    Invoked with: 4\n\nYou may, of course, combine this with the :var:`_a` shorthand notation (see\n:ref:`keyword_args`) and/or :ref:`default_args`.  It is also permitted to omit\nthe argument name by using the ``py::arg()`` constructor without an argument\nname, i.e. by specifying ``py::arg().noconvert()``.\n\n.. note::\n\n    When specifying ``py::arg`` options it is necessary to provide the same\n    number of options as the bound function has arguments.  Thus if you want to\n    enable no-convert behaviour for just one of several arguments, you will\n    need to specify a ``py::arg()`` annotation for each argument with the\n    no-convert argument modified to ``py::arg().noconvert()``.\n\n.. _none_arguments:\n\nAllow/Prohibiting None arguments\n================================\n\nWhen a C++ type registered with :class:`py::class_` is passed as an argument to\na function taking the instance as pointer or shared holder (e.g. ``shared_ptr``\nor a custom, copyable holder as described in :ref:`smart_pointers`), pybind\nallows ``None`` to be passed from Python which results in calling the C++\nfunction with ``nullptr`` (or an empty holder) for the argument.\n\nTo explicitly enable or disable this behaviour, using the\n``.none`` method of the :class:`py::arg` object:\n\n.. code-block:: cpp\n\n    py::class_<Dog>(m, \"Dog\").def(py::init<>());\n    py::class_<Cat>(m, \"Cat\").def(py::init<>());\n    m.def(\"bark\", [](Dog *dog) -> std::string {\n        if (dog) return \"woof!\"; /* Called with a Dog instance */\n        else return \"(no dog)\"; /* Called with None, dog == nullptr */\n    }, py::arg(\"dog\").none(true));\n    m.def(\"meow\", [](Cat *cat) -> std::string {\n        // Can't be called with None argument\n        return \"meow\";\n    }, py::arg(\"cat\").none(false));\n\nWith the above, the Python call ``bark(None)`` will return the string ``\"(no\ndog)\"``, while attempting to call ``meow(None)`` will raise a ``TypeError``:\n\n.. code-block:: pycon\n\n    >>> from animals import Dog, Cat, bark, meow\n    >>> bark(Dog())\n    'woof!'\n    >>> meow(Cat())\n    'meow'\n    >>> bark(None)\n    '(no dog)'\n    >>> meow(None)\n    Traceback (most recent call last):\n      File \"<stdin>\", line 1, in <module>\n    TypeError: meow(): incompatible function arguments. The following argument types are supported:\n        1. (cat: animals.Cat) -> str\n\n    Invoked with: None\n\nThe default behaviour when the tag is unspecified is to allow ``None``.\n\nOverload resolution order\n=========================\n\nWhen a function or method with multiple overloads is called from Python,\npybind11 determines which overload to call in two passes.  The first pass\nattempts to call each overload without allowing argument conversion (as if\nevery argument had been specified as ``py::arg().noconvert()`` as described\nabove).\n\nIf no overload succeeds in the no-conversion first pass, a second pass is\nattempted in which argument conversion is allowed (except where prohibited via\nan explicit ``py::arg().noconvert()`` attribute in the function definition).\n\nIf the second pass also fails a ``TypeError`` is raised.\n\nWithin each pass, overloads are tried in the order they were registered with\npybind11.\n\nWhat this means in practice is that pybind11 will prefer any overload that does\nnot require conversion of arguments to an overload that does, but otherwise prefers\nearlier-defined overloads to later-defined ones.\n\n.. note::\n\n    pybind11 does *not* further prioritize based on the number/pattern of\n    overloaded arguments.  That is, pybind11 does not prioritize a function\n    requiring one conversion over one requiring three, but only prioritizes\n    overloads requiring no conversion at all to overloads that require\n    conversion of at least one argument.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/misc.rst",
    "content": "Miscellaneous\n#############\n\n.. _macro_notes:\n\nGeneral notes regarding convenience macros\n==========================================\n\npybind11 provides a few convenience macros such as\n:func:`PYBIND11_DECLARE_HOLDER_TYPE` and ``PYBIND11_OVERLOAD_*``. Since these\nare \"just\" macros that are evaluated in the preprocessor (which has no concept\nof types), they *will* get confused by commas in a template argument; for\nexample, consider:\n\n.. code-block:: cpp\n\n    PYBIND11_OVERLOAD(MyReturnType<T1, T2>, Class<T3, T4>, func)\n\nThe limitation of the C preprocessor interprets this as five arguments (with new\narguments beginning after each comma) rather than three.  To get around this,\nthere are two alternatives: you can use a type alias, or you can wrap the type\nusing the ``PYBIND11_TYPE`` macro:\n\n.. code-block:: cpp\n\n    // Version 1: using a type alias\n    using ReturnType = MyReturnType<T1, T2>;\n    using ClassType = Class<T3, T4>;\n    PYBIND11_OVERLOAD(ReturnType, ClassType, func);\n\n    // Version 2: using the PYBIND11_TYPE macro:\n    PYBIND11_OVERLOAD(PYBIND11_TYPE(MyReturnType<T1, T2>),\n                      PYBIND11_TYPE(Class<T3, T4>), func)\n\nThe ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds.\n\n.. _gil:\n\nGlobal Interpreter Lock (GIL)\n=============================\n\nWhen calling a C++ function from Python, the GIL is always held.\nThe classes :class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be\nused to acquire and release the global interpreter lock in the body of a C++\nfunction call. In this way, long-running C++ code can be parallelized using\nmultiple Python threads. Taking :ref:`overriding_virtuals` as an example, this\ncould be realized as follows (important changes highlighted):\n\n.. code-block:: cpp\n    :emphasize-lines: 8,9,31,32\n\n    class PyAnimal : public Animal {\n    public:\n        /* Inherit the constructors */\n        using Animal::Animal;\n\n        /* Trampoline (need one for each virtual function) */\n        std::string go(int n_times) {\n            /* Acquire GIL before calling Python code */\n            py::gil_scoped_acquire acquire;\n\n            PYBIND11_OVERLOAD_PURE(\n                std::string, /* Return type */\n                Animal,      /* Parent class */\n                go,          /* Name of function */\n                n_times      /* Argument(s) */\n            );\n        }\n    };\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Animal, PyAnimal> animal(m, \"Animal\");\n        animal\n            .def(py::init<>())\n            .def(\"go\", &Animal::go);\n\n        py::class_<Dog>(m, \"Dog\", animal)\n            .def(py::init<>());\n\n        m.def(\"call_go\", [](Animal *animal) -> std::string {\n            /* Release GIL before calling into (potentially long-running) C++ code */\n            py::gil_scoped_release release;\n            return call_go(animal);\n        });\n    }\n\nThe ``call_go`` wrapper can also be simplified using the `call_guard` policy\n(see :ref:`call_policies`) which yields the same result:\n\n.. code-block:: cpp\n\n    m.def(\"call_go\", &call_go, py::call_guard<py::gil_scoped_release>());\n\n\nBinding sequence data types, iterators, the slicing protocol, etc.\n==================================================================\n\nPlease refer to the supplemental example for details.\n\n.. seealso::\n\n    The file :file:`tests/test_sequences_and_iterators.cpp` contains a\n    complete example that shows how to bind a sequence data type, including\n    length queries (``__len__``), iterators (``__iter__``), the slicing\n    protocol and other kinds of useful operations.\n\n\nPartitioning code over multiple extension modules\n=================================================\n\nIt's straightforward to split binding code over multiple extension modules,\nwhile referencing types that are declared elsewhere. Everything \"just\" works\nwithout any special precautions. One exception to this rule occurs when\nextending a type declared in another extension module. Recall the basic example\nfrom Section :ref:`inheritance`.\n\n.. code-block:: cpp\n\n    py::class_<Pet> pet(m, \"Pet\");\n    pet.def(py::init<const std::string &>())\n       .def_readwrite(\"name\", &Pet::name);\n\n    py::class_<Dog>(m, \"Dog\", pet /* <- specify parent */)\n        .def(py::init<const std::string &>())\n        .def(\"bark\", &Dog::bark);\n\nSuppose now that ``Pet`` bindings are defined in a module named ``basic``,\nwhereas the ``Dog`` bindings are defined somewhere else. The challenge is of\ncourse that the variable ``pet`` is not available anymore though it is needed\nto indicate the inheritance relationship to the constructor of ``class_<Dog>``.\nHowever, it can be acquired as follows:\n\n.. code-block:: cpp\n\n    py::object pet = (py::object) py::module::import(\"basic\").attr(\"Pet\");\n\n    py::class_<Dog>(m, \"Dog\", pet)\n        .def(py::init<const std::string &>())\n        .def(\"bark\", &Dog::bark);\n\nAlternatively, you can specify the base class as a template parameter option to\n``class_``, which performs an automated lookup of the corresponding Python\ntype. Like the above code, however, this also requires invoking the ``import``\nfunction once to ensure that the pybind11 binding code of the module ``basic``\nhas been executed:\n\n.. code-block:: cpp\n\n    py::module::import(\"basic\");\n\n    py::class_<Dog, Pet>(m, \"Dog\")\n        .def(py::init<const std::string &>())\n        .def(\"bark\", &Dog::bark);\n\nNaturally, both methods will fail when there are cyclic dependencies.\n\nNote that pybind11 code compiled with hidden-by-default symbol visibility (e.g.\nvia the command line flag ``-fvisibility=hidden`` on GCC/Clang), which is\nrequired for proper pybind11 functionality, can interfere with the ability to\naccess types defined in another extension module.  Working around this requires\nmanually exporting types that are accessed by multiple extension modules;\npybind11 provides a macro to do just this:\n\n.. code-block:: cpp\n\n    class PYBIND11_EXPORT Dog : public Animal {\n        ...\n    };\n\nNote also that it is possible (although would rarely be required) to share arbitrary\nC++ objects between extension modules at runtime. Internal library data is shared\nbetween modules using capsule machinery [#f6]_ which can be also utilized for\nstoring, modifying and accessing user-defined data. Note that an extension module\nwill \"see\" other extensions' data if and only if they were built with the same\npybind11 version. Consider the following example:\n\n.. code-block:: cpp\n\n    auto data = (MyData *) py::get_shared_data(\"mydata\");\n    if (!data)\n        data = (MyData *) py::set_shared_data(\"mydata\", new MyData(42));\n\nIf the above snippet was used in several separately compiled extension modules,\nthe first one to be imported would create a ``MyData`` instance and associate\na ``\"mydata\"`` key with a pointer to it. Extensions that are imported later\nwould be then able to access the data behind the same pointer.\n\n.. [#f6] https://docs.python.org/3/extending/extending.html#using-capsules\n\nModule Destructors\n==================\n\npybind11 does not provide an explicit mechanism to invoke cleanup code at\nmodule destruction time. In rare cases where such functionality is required, it\nis possible to emulate it using Python capsules or weak references with a\ndestruction callback.\n\n.. code-block:: cpp\n\n    auto cleanup_callback = []() {\n        // perform cleanup here -- this function is called with the GIL held\n    };\n\n    m.add_object(\"_cleanup\", py::capsule(cleanup_callback));\n\nThis approach has the potential downside that instances of classes exposed\nwithin the module may still be alive when the cleanup callback is invoked\n(whether this is acceptable will generally depend on the application).\n\nAlternatively, the capsule may also be stashed within a type object, which\nensures that it not called before all instances of that type have been\ncollected:\n\n.. code-block:: cpp\n\n    auto cleanup_callback = []() { /* ... */ };\n    m.attr(\"BaseClass\").attr(\"_cleanup\") = py::capsule(cleanup_callback);\n\nBoth approaches also expose a potentially dangerous ``_cleanup`` attribute in\nPython, which may be undesirable from an API standpoint (a premature explicit\ncall from Python might lead to undefined behavior). Yet another approach that \navoids this issue involves weak reference with a cleanup callback:\n\n.. code-block:: cpp\n\n    // Register a callback function that is invoked when the BaseClass object is colelcted\n    py::cpp_function cleanup_callback(\n        [](py::handle weakref) {\n            // perform cleanup here -- this function is called with the GIL held\n\n            weakref.dec_ref(); // release weak reference\n        }\n    );\n\n    // Create a weak reference with a cleanup callback and initially leak it\n    (void) py::weakref(m.attr(\"BaseClass\"), cleanup_callback).release();\n\n.. note::\n\n    PyPy (at least version 5.9) does not garbage collect objects when the\n    interpreter exits. An alternative approach (which also works on CPython) is to use\n    the :py:mod:`atexit` module [#f7]_, for example:\n\n    .. code-block:: cpp\n\n        auto atexit = py::module::import(\"atexit\");\n        atexit.attr(\"register\")(py::cpp_function([]() {\n            // perform cleanup here -- this function is called with the GIL held\n        }));\n\n    .. [#f7] https://docs.python.org/3/library/atexit.html\n\n\nGenerating documentation using Sphinx\n=====================================\n\nSphinx [#f4]_ has the ability to inspect the signatures and documentation\nstrings in pybind11-based extension modules to automatically generate beautiful\ndocumentation in a variety formats. The python_example repository [#f5]_ contains a\nsimple example repository which uses this approach.\n\nThere are two potential gotchas when using this approach: first, make sure that\nthe resulting strings do not contain any :kbd:`TAB` characters, which break the\ndocstring parsing routines. You may want to use C++11 raw string literals,\nwhich are convenient for multi-line comments. Conveniently, any excess\nindentation will be automatically be removed by Sphinx. However, for this to\nwork, it is important that all lines are indented consistently, i.e.:\n\n.. code-block:: cpp\n\n    // ok\n    m.def(\"foo\", &foo, R\"mydelimiter(\n        The foo function\n\n        Parameters\n        ----------\n    )mydelimiter\");\n\n    // *not ok*\n    m.def(\"foo\", &foo, R\"mydelimiter(The foo function\n\n        Parameters\n        ----------\n    )mydelimiter\");\n\nBy default, pybind11 automatically generates and prepends a signature to the docstring of a function \nregistered with ``module::def()`` and ``class_::def()``. Sometimes this\nbehavior is not desirable, because you want to provide your own signature or remove \nthe docstring completely to exclude the function from the Sphinx documentation.\nThe class ``options`` allows you to selectively suppress auto-generated signatures:\n\n.. code-block:: cpp\n\n    PYBIND11_MODULE(example, m) {\n        py::options options;\n        options.disable_function_signatures();\n\n        m.def(\"add\", [](int a, int b) { return a + b; }, \"A function which adds two numbers\");\n    }\n\nNote that changes to the settings affect only function bindings created during the \nlifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function, \nthe default settings are restored to prevent unwanted side effects.\n\n.. [#f4] http://www.sphinx-doc.org\n.. [#f5] http://github.com/pybind/python_example\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/pycpp/index.rst",
    "content": "Python C++ interface\n####################\n\npybind11 exposes Python types and functions using thin C++ wrappers, which\nmakes it possible to conveniently call Python code from C++ without resorting\nto Python's C API.\n\n.. toctree::\n   :maxdepth: 2\n\n   object\n   numpy\n   utilities\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/pycpp/numpy.rst",
    "content": ".. _numpy:\n\nNumPy\n#####\n\nBuffer protocol\n===============\n\nPython supports an extremely general and convenient approach for exchanging\ndata between plugin libraries. Types can expose a buffer view [#f2]_, which\nprovides fast direct access to the raw internal data representation. Suppose we\nwant to bind the following simplistic Matrix class:\n\n.. code-block:: cpp\n\n    class Matrix {\n    public:\n        Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) {\n            m_data = new float[rows*cols];\n        }\n        float *data() { return m_data; }\n        size_t rows() const { return m_rows; }\n        size_t cols() const { return m_cols; }\n    private:\n        size_t m_rows, m_cols;\n        float *m_data;\n    };\n\nThe following binding code exposes the ``Matrix`` contents as a buffer object,\nmaking it possible to cast Matrices into NumPy arrays. It is even possible to\ncompletely avoid copy operations with Python expressions like\n``np.array(matrix_instance, copy = False)``.\n\n.. code-block:: cpp\n\n    py::class_<Matrix>(m, \"Matrix\", py::buffer_protocol())\n       .def_buffer([](Matrix &m) -> py::buffer_info {\n            return py::buffer_info(\n                m.data(),                               /* Pointer to buffer */\n                sizeof(float),                          /* Size of one scalar */\n                py::format_descriptor<float>::format(), /* Python struct-style format descriptor */\n                2,                                      /* Number of dimensions */\n                { m.rows(), m.cols() },                 /* Buffer dimensions */\n                { sizeof(float) * m.cols(),             /* Strides (in bytes) for each index */\n                  sizeof(float) }\n            );\n        });\n\nSupporting the buffer protocol in a new type involves specifying the special\n``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the\n``def_buffer()`` method with a lambda function that creates a\n``py::buffer_info`` description record on demand describing a given matrix\ninstance. The contents of ``py::buffer_info`` mirror the Python buffer protocol\nspecification.\n\n.. code-block:: cpp\n\n    struct buffer_info {\n        void *ptr;\n        ssize_t itemsize;\n        std::string format;\n        ssize_t ndim;\n        std::vector<ssize_t> shape;\n        std::vector<ssize_t> strides;\n    };\n\nTo create a C++ function that can take a Python buffer object as an argument,\nsimply use the type ``py::buffer`` as one of its arguments. Buffers can exist\nin a great variety of configurations, hence some safety checks are usually\nnecessary in the function body. Below, you can see an basic example on how to\ndefine a custom constructor for the Eigen double precision matrix\n(``Eigen::MatrixXd``) type, which supports initialization from compatible\nbuffer objects (e.g. a NumPy matrix).\n\n.. code-block:: cpp\n\n    /* Bind MatrixXd (or some other Eigen type) to Python */\n    typedef Eigen::MatrixXd Matrix;\n\n    typedef Matrix::Scalar Scalar;\n    constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;\n\n    py::class_<Matrix>(m, \"Matrix\", py::buffer_protocol())\n        .def(\"__init__\", [](Matrix &m, py::buffer b) {\n            typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;\n\n            /* Request a buffer descriptor from Python */\n            py::buffer_info info = b.request();\n\n            /* Some sanity checks ... */\n            if (info.format != py::format_descriptor<Scalar>::format())\n                throw std::runtime_error(\"Incompatible format: expected a double array!\");\n\n            if (info.ndim != 2)\n                throw std::runtime_error(\"Incompatible buffer dimension!\");\n\n            auto strides = Strides(\n                info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar),\n                info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar));\n\n            auto map = Eigen::Map<Matrix, 0, Strides>(\n                static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);\n\n            new (&m) Matrix(map);\n        });\n\nFor reference, the ``def_buffer()`` call for this Eigen data type should look\nas follows:\n\n.. code-block:: cpp\n\n    .def_buffer([](Matrix &m) -> py::buffer_info {\n        return py::buffer_info(\n            m.data(),                                /* Pointer to buffer */\n            sizeof(Scalar),                          /* Size of one scalar */\n            py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */\n            2,                                       /* Number of dimensions */\n            { m.rows(), m.cols() },                  /* Buffer dimensions */\n            { sizeof(Scalar) * (rowMajor ? m.cols() : 1),\n              sizeof(Scalar) * (rowMajor ? 1 : m.rows()) }\n                                                     /* Strides (in bytes) for each index */\n        );\n     })\n\nFor a much easier approach of binding Eigen types (although with some\nlimitations), refer to the section on :doc:`/advanced/cast/eigen`.\n\n.. seealso::\n\n    The file :file:`tests/test_buffers.cpp` contains a complete example\n    that demonstrates using the buffer protocol with pybind11 in more detail.\n\n.. [#f2] http://docs.python.org/3/c-api/buffer.html\n\nArrays\n======\n\nBy exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can\nrestrict the function so that it only accepts NumPy arrays (rather than any\ntype of Python object satisfying the buffer protocol).\n\nIn many situations, we want to define a function which only accepts a NumPy\narray of a certain data type. This is possible via the ``py::array_t<T>``\ntemplate. For instance, the following function requires the argument to be a\nNumPy array containing double precision values.\n\n.. code-block:: cpp\n\n    void f(py::array_t<double> array);\n\nWhen it is invoked with a different type (e.g. an integer or a list of\nintegers), the binding code will attempt to cast the input into a NumPy array\nof the requested type. Note that this feature requires the\n:file:`pybind11/numpy.h` header to be included.\n\nData in NumPy arrays is not guaranteed to packed in a dense manner;\nfurthermore, entries can be separated by arbitrary column and row strides.\nSometimes, it can be useful to require a function to only accept dense arrays\nusing either the C (row-major) or Fortran (column-major) ordering. This can be\naccomplished via a second template argument with values ``py::array::c_style``\nor ``py::array::f_style``.\n\n.. code-block:: cpp\n\n    void f(py::array_t<double, py::array::c_style | py::array::forcecast> array);\n\nThe ``py::array::forcecast`` argument is the default value of the second\ntemplate parameter, and it ensures that non-conforming arguments are converted\ninto an array satisfying the specified requirements instead of trying the next\nfunction overload.\n\nStructured types\n================\n\nIn order for ``py::array_t`` to work with structured (record) types, we first\nneed to register the memory layout of the type. This can be done via\n``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which\nexpects the type followed by field names:\n\n.. code-block:: cpp\n\n    struct A {\n        int x;\n        double y;\n    };\n\n    struct B {\n        int z;\n        A a;\n    };\n\n    // ...\n    PYBIND11_MODULE(test, m) {\n        // ...\n\n        PYBIND11_NUMPY_DTYPE(A, x, y);\n        PYBIND11_NUMPY_DTYPE(B, z, a);\n        /* now both A and B can be used as template arguments to py::array_t */\n    }\n\nThe structure should consist of fundamental arithmetic types, ``std::complex``,\npreviously registered substructures, and arrays of any of the above. Both C++\narrays and ``std::array`` are supported. While there is a static assertion to\nprevent many types of unsupported structures, it is still the user's\nresponsibility to use only \"plain\" structures that can be safely manipulated as\nraw memory without violating invariants.\n\nVectorizing functions\n=====================\n\nSuppose we want to bind a function with the following signature to Python so\nthat it can process arbitrary NumPy array arguments (vectors, matrices, general\nN-D arrays) in addition to its normal arguments:\n\n.. code-block:: cpp\n\n    double my_func(int x, float y, double z);\n\nAfter including the ``pybind11/numpy.h`` header, this is extremely simple:\n\n.. code-block:: cpp\n\n    m.def(\"vectorized_func\", py::vectorize(my_func));\n\nInvoking the function like below causes 4 calls to be made to ``my_func`` with\neach of the array elements. The significant advantage of this compared to\nsolutions like ``numpy.vectorize()`` is that the loop over the elements runs\nentirely on the C++ side and can be crunched down into a tight, optimized loop\nby the compiler. The result is returned as a NumPy array of type\n``numpy.dtype.float64``.\n\n.. code-block:: pycon\n\n    >>> x = np.array([[1, 3],[5, 7]])\n    >>> y = np.array([[2, 4],[6, 8]])\n    >>> z = 3\n    >>> result = vectorized_func(x, y, z)\n\nThe scalar argument ``z`` is transparently replicated 4 times.  The input\narrays ``x`` and ``y`` are automatically converted into the right types (they\nare of type  ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and\n``numpy.dtype.float32``, respectively).\n\n.. note::\n\n    Only arithmetic, complex, and POD types passed by value or by ``const &``\n    reference are vectorized; all other arguments are passed through as-is.\n    Functions taking rvalue reference arguments cannot be vectorized.\n\nIn cases where the computation is too complicated to be reduced to\n``vectorize``, it will be necessary to create and access the buffer contents\nmanually. The following snippet contains a complete example that shows how this\nworks (the code is somewhat contrived, since it could have been done more\nsimply using ``vectorize``).\n\n.. code-block:: cpp\n\n    #include <pybind11/pybind11.h>\n    #include <pybind11/numpy.h>\n\n    namespace py = pybind11;\n\n    py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) {\n        py::buffer_info buf1 = input1.request(), buf2 = input2.request();\n\n        if (buf1.ndim != 1 || buf2.ndim != 1)\n            throw std::runtime_error(\"Number of dimensions must be one\");\n\n        if (buf1.size != buf2.size)\n            throw std::runtime_error(\"Input shapes must match\");\n\n        /* No pointer is passed, so NumPy will allocate the buffer */\n        auto result = py::array_t<double>(buf1.size);\n\n        py::buffer_info buf3 = result.request();\n\n        double *ptr1 = (double *) buf1.ptr,\n               *ptr2 = (double *) buf2.ptr,\n               *ptr3 = (double *) buf3.ptr;\n\n        for (size_t idx = 0; idx < buf1.shape[0]; idx++)\n            ptr3[idx] = ptr1[idx] + ptr2[idx];\n\n        return result;\n    }\n\n    PYBIND11_MODULE(test, m) {\n        m.def(\"add_arrays\", &add_arrays, \"Add two NumPy arrays\");\n    }\n\n.. seealso::\n\n    The file :file:`tests/test_numpy_vectorize.cpp` contains a complete\n    example that demonstrates using :func:`vectorize` in more detail.\n\nDirect access\n=============\n\nFor performance reasons, particularly when dealing with very large arrays, it\nis often desirable to directly access array elements without internal checking\nof dimensions and bounds on every access when indices are known to be already\nvalid.  To avoid such checks, the ``array`` class and ``array_t<T>`` template\nclass offer an unchecked proxy object that can be used for this unchecked\naccess through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods,\nwhere ``N`` gives the required dimensionality of the array:\n\n.. code-block:: cpp\n\n    m.def(\"sum_3d\", [](py::array_t<double> x) {\n        auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable\n        double sum = 0;\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            for (ssize_t j = 0; j < r.shape(1); j++)\n                for (ssize_t k = 0; k < r.shape(2); k++)\n                    sum += r(i, j, k);\n        return sum;\n    });\n    m.def(\"increment_3d\", [](py::array_t<double> x) {\n        auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            for (ssize_t j = 0; j < r.shape(1); j++)\n                for (ssize_t k = 0; k < r.shape(2); k++)\n                    r(i, j, k) += 1.0;\n    }, py::arg().noconvert());\n\nTo obtain the proxy from an ``array`` object, you must specify both the data\ntype and number of dimensions as template arguments, such as ``auto r =\nmyarray.mutable_unchecked<float, 2>()``.\n\nIf the number of dimensions is not known at compile time, you can omit the\ndimensions template parameter (i.e. calling ``arr_t.unchecked()`` or\n``arr.unchecked<T>()``.  This will give you a proxy object that works in the\nsame way, but results in less optimizable code and thus a small efficiency\nloss in tight loops.\n\nNote that the returned proxy object directly references the array's data, and\nonly reads its shape, strides, and writeable flag when constructed.  You must\ntake care to ensure that the referenced array is not destroyed or reshaped for\nthe duration of the returned object, typically by limiting the scope of the\nreturned instance.\n\nThe returned proxy object supports some of the same methods as ``py::array`` so\nthat it can be used as a drop-in replacement for some existing, index-checked\nuses of ``py::array``:\n\n- ``r.ndim()`` returns the number of dimensions\n\n- ``r.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to\n  the ``const T`` or ``T`` data, respectively, at the given indices.  The\n  latter is only available to proxies obtained via ``a.mutable_unchecked()``.\n\n- ``itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.\n\n- ``ndim()`` returns the number of dimensions.\n\n- ``shape(n)`` returns the size of dimension ``n``\n\n- ``size()`` returns the total number of elements (i.e. the product of the shapes).\n\n- ``nbytes()`` returns the number of bytes used by the referenced elements\n  (i.e. ``itemsize()`` times ``size()``).\n\n.. seealso::\n\n    The file :file:`tests/test_numpy_array.cpp` contains additional examples\n    demonstrating the use of this feature.\n\nEllipsis\n========\n\nPython 3 provides a convenient ``...`` ellipsis notation that is often used to\nslice multidimensional arrays. For instance, the following snippet extracts the\nmiddle dimensions of a tensor with the first and last index set to zero.\n\n.. code-block:: python\n\n   a = # a NumPy array\n   b = a[0, ..., 0]\n\nThe function ``py::ellipsis()`` function can be used to perform the same\noperation on the C++ side:\n\n.. code-block:: cpp\n\n   py::array a = /* A NumPy array */;\n   py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/pycpp/object.rst",
    "content": "Python types\n############\n\nAvailable wrappers\n==================\n\nAll major Python types are available as thin C++ wrapper classes. These\ncan also be used as function parameters -- see :ref:`python_objects_as_args`.\n\nAvailable types include :class:`handle`, :class:`object`, :class:`bool_`,\n:class:`int_`, :class:`float_`, :class:`str`, :class:`bytes`, :class:`tuple`,\n:class:`list`, :class:`dict`, :class:`slice`, :class:`none`, :class:`capsule`,\n:class:`iterable`, :class:`iterator`, :class:`function`, :class:`buffer`,\n:class:`array`, and :class:`array_t`.\n\nCasting back and forth\n======================\n\nIn this kind of mixed code, it is often necessary to convert arbitrary C++\ntypes to Python, which can be done using :func:`py::cast`:\n\n.. code-block:: cpp\n\n    MyClass *cls = ..;\n    py::object obj = py::cast(cls);\n\nThe reverse direction uses the following syntax:\n\n.. code-block:: cpp\n\n    py::object obj = ...;\n    MyClass *cls = obj.cast<MyClass *>();\n\nWhen conversion fails, both directions throw the exception :class:`cast_error`.\n\n.. _python_libs:\n\nAccessing Python libraries from C++\n===================================\n\nIt is also possible to import objects defined in the Python standard\nlibrary or available in the current Python environment (``sys.path``) and work\nwith these in C++.\n\nThis example obtains a reference to the Python ``Decimal`` class.\n\n.. code-block:: cpp\n\n    // Equivalent to \"from decimal import Decimal\"\n    py::object Decimal = py::module::import(\"decimal\").attr(\"Decimal\");\n\n.. code-block:: cpp\n\n    // Try to import scipy\n    py::object scipy = py::module::import(\"scipy\");\n    return scipy.attr(\"__version__\");\n\n.. _calling_python_functions:\n\nCalling Python functions\n========================\n\nIt is also possible to call Python classes, functions and methods \nvia ``operator()``.\n\n.. code-block:: cpp\n\n    // Construct a Python object of class Decimal\n    py::object pi = Decimal(\"3.14159\");\n\n.. code-block:: cpp\n\n    // Use Python to make our directories\n    py::object os = py::module::import(\"os\");\n    py::object makedirs = os.attr(\"makedirs\");\n    makedirs(\"/tmp/path/to/somewhere\");\n\nOne can convert the result obtained from Python to a pure C++ version \nif a ``py::class_`` or type conversion is defined.\n\n.. code-block:: cpp\n\n    py::function f = <...>;\n    py::object result_py = f(1234, \"hello\", some_instance);\n    MyClass &result = result_py.cast<MyClass>();\n\n.. _calling_python_methods:\n\nCalling Python methods\n========================\n\nTo call an object's method, one can again use ``.attr`` to obtain access to the\nPython method.\n\n.. code-block:: cpp\n\n    // Calculate e^π in decimal\n    py::object exp_pi = pi.attr(\"exp\")();\n    py::print(py::str(exp_pi));\n\nIn the example above ``pi.attr(\"exp\")`` is a *bound method*: it will always call\nthe method for that same instance of the class. Alternately one can create an \n*unbound method* via the Python class (instead of instance) and pass the ``self`` \nobject explicitly, followed by other arguments.\n\n.. code-block:: cpp\n\n    py::object decimal_exp = Decimal.attr(\"exp\");\n\n    // Compute the e^n for n=0..4\n    for (int n = 0; n < 5; n++) {\n        py::print(decimal_exp(Decimal(n));\n    }\n\nKeyword arguments\n=================\n\nKeyword arguments are also supported. In Python, there is the usual call syntax:\n\n.. code-block:: python\n\n    def f(number, say, to):\n        ...  # function code\n\n    f(1234, say=\"hello\", to=some_instance)  # keyword call in Python\n\nIn C++, the same call can be made using:\n\n.. code-block:: cpp\n\n    using namespace pybind11::literals; // to bring in the `_a` literal\n    f(1234, \"say\"_a=\"hello\", \"to\"_a=some_instance); // keyword call in C++\n\nUnpacking arguments\n===================\n\nUnpacking of ``*args`` and ``**kwargs`` is also possible and can be mixed with\nother arguments:\n\n.. code-block:: cpp\n\n    // * unpacking\n    py::tuple args = py::make_tuple(1234, \"hello\", some_instance);\n    f(*args);\n\n    // ** unpacking\n    py::dict kwargs = py::dict(\"number\"_a=1234, \"say\"_a=\"hello\", \"to\"_a=some_instance);\n    f(**kwargs);\n\n    // mixed keywords, * and ** unpacking\n    py::tuple args = py::make_tuple(1234);\n    py::dict kwargs = py::dict(\"to\"_a=some_instance);\n    f(*args, \"say\"_a=\"hello\", **kwargs);\n\nGeneralized unpacking according to PEP448_ is also supported:\n\n.. code-block:: cpp\n\n    py::dict kwargs1 = py::dict(\"number\"_a=1234);\n    py::dict kwargs2 = py::dict(\"to\"_a=some_instance);\n    f(**kwargs1, \"say\"_a=\"hello\", **kwargs2);\n\n.. seealso::\n\n    The file :file:`tests/test_pytypes.cpp` contains a complete\n    example that demonstrates passing native Python types in more detail. The\n    file :file:`tests/test_callbacks.cpp` presents a few examples of calling\n    Python functions from C++, including keywords arguments and unpacking.\n\n.. _PEP448: https://www.python.org/dev/peps/pep-0448/\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/pycpp/utilities.rst",
    "content": "Utilities\n#########\n\nUsing Python's print function in C++\n====================================\n\nThe usual way to write output in C++ is using ``std::cout`` while in Python one\nwould use ``print``. Since these methods use different buffers, mixing them can\nlead to output order issues. To resolve this, pybind11 modules can use the\n:func:`py::print` function which writes to Python's ``sys.stdout`` for consistency.\n\nPython's ``print`` function is replicated in the C++ API including optional\nkeyword arguments ``sep``, ``end``, ``file``, ``flush``. Everything works as\nexpected in Python:\n\n.. code-block:: cpp\n\n    py::print(1, 2.0, \"three\"); // 1 2.0 three\n    py::print(1, 2.0, \"three\", \"sep\"_a=\"-\"); // 1-2.0-three\n\n    auto args = py::make_tuple(\"unpacked\", true);\n    py::print(\"->\", *args, \"end\"_a=\"<-\"); // -> unpacked True <-\n\n.. _ostream_redirect:\n\nCapturing standard output from ostream\n======================================\n\nOften, a library will use the streams ``std::cout`` and ``std::cerr`` to print,\nbut this does not play well with Python's standard ``sys.stdout`` and ``sys.stderr``\nredirection. Replacing a library's printing with `py::print <print>` may not\nbe feasible. This can be fixed using a guard around the library function that\nredirects output to the corresponding Python streams:\n\n.. code-block:: cpp\n\n    #include <pybind11/iostream.h>\n\n    ...\n\n    // Add a scoped redirect for your noisy code\n    m.def(\"noisy_func\", []() {\n        py::scoped_ostream_redirect stream(\n            std::cout,                               // std::ostream&\n            py::module::import(\"sys\").attr(\"stdout\") // Python output\n        );\n        call_noisy_func();\n    });\n\nThis method respects flushes on the output streams and will flush if needed\nwhen the scoped guard is destroyed. This allows the output to be redirected in\nreal time, such as to a Jupyter notebook. The two arguments, the C++ stream and\nthe Python output, are optional, and default to standard output if not given. An\nextra type, `py::scoped_estream_redirect <scoped_estream_redirect>`, is identical\nexcept for defaulting to ``std::cerr`` and ``sys.stderr``; this can be useful with\n`py::call_guard`, which allows multiple items, but uses the default constructor:\n\n.. code-block:: py\n\n    // Alternative: Call single function using call guard\n    m.def(\"noisy_func\", &call_noisy_function,\n          py::call_guard<py::scoped_ostream_redirect,\n                         py::scoped_estream_redirect>());\n\nThe redirection can also be done in Python with the addition of a context\nmanager, using the `py::add_ostream_redirect() <add_ostream_redirect>` function:\n\n.. code-block:: cpp\n\n    py::add_ostream_redirect(m, \"ostream_redirect\");\n\nThe name in Python defaults to ``ostream_redirect`` if no name is passed.  This\ncreates the following context manager in Python:\n\n.. code-block:: python\n\n    with ostream_redirect(stdout=True, stderr=True):\n        noisy_function()\n\nIt defaults to redirecting both streams, though you can use the keyword\narguments to disable one of the streams if needed.\n\n.. note::\n\n    The above methods will not redirect C-level output to file descriptors, such\n    as ``fprintf``. For those cases, you'll need to redirect the file\n    descriptors either directly in C or with Python's ``os.dup2`` function\n    in an operating-system dependent way.\n\n.. _eval:\n\nEvaluating Python expressions from strings and files\n====================================================\n\npybind11 provides the `eval`, `exec` and `eval_file` functions to evaluate\nPython expressions and statements. The following example illustrates how they\ncan be used.\n\n.. code-block:: cpp\n\n    // At beginning of file\n    #include <pybind11/eval.h>\n\n    ...\n\n    // Evaluate in scope of main module\n    py::object scope = py::module::import(\"__main__\").attr(\"__dict__\");\n\n    // Evaluate an isolated expression\n    int result = py::eval(\"my_variable + 10\", scope).cast<int>();\n\n    // Evaluate a sequence of statements\n    py::exec(\n        \"print('Hello')\\n\"\n        \"print('world!');\",\n        scope);\n\n    // Evaluate the statements in an separate Python file on disk\n    py::eval_file(\"script.py\", scope);\n\nC++11 raw string literals are also supported and quite handy for this purpose.\nThe only requirement is that the first statement must be on a new line following\nthe raw string delimiter ``R\"(``, ensuring all lines have common leading indent:\n\n.. code-block:: cpp\n\n    py::exec(R\"(\n        x = get_answer()\n        if x == 42:\n            print('Hello World!')\n        else:\n            print('Bye!')\n        )\", scope\n    );\n\n.. note::\n\n    `eval` and `eval_file` accept a template parameter that describes how the\n    string/file should be interpreted. Possible choices include ``eval_expr``\n    (isolated expression), ``eval_single_statement`` (a single statement, return\n    value is always ``none``), and ``eval_statements`` (sequence of statements,\n    return value is always ``none``). `eval` defaults to  ``eval_expr``,\n    `eval_file` defaults to ``eval_statements`` and `exec` is just a shortcut\n    for ``eval<eval_statements>``.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/advanced/smart_ptrs.rst",
    "content": "Smart pointers\n##############\n\nstd::unique_ptr\n===============\n\nGiven a class ``Example`` with Python bindings, it's possible to return\ninstances wrapped in C++11 unique pointers, like so\n\n.. code-block:: cpp\n\n    std::unique_ptr<Example> create_example() { return std::unique_ptr<Example>(new Example()); }\n\n.. code-block:: cpp\n\n    m.def(\"create_example\", &create_example);\n\nIn other words, there is nothing special that needs to be done. While returning\nunique pointers in this way is allowed, it is *illegal* to use them as function\narguments. For instance, the following function signature cannot be processed\nby pybind11.\n\n.. code-block:: cpp\n\n    void do_something_with_example(std::unique_ptr<Example> ex) { ... }\n\nThe above signature would imply that Python needs to give up ownership of an\nobject that is passed to this function, which is generally not possible (for\ninstance, the object might be referenced elsewhere).\n\nstd::shared_ptr\n===============\n\nThe binding generator for classes, :class:`class_`, can be passed a template\ntype that denotes a special *holder* type that is used to manage references to\nthe object.  If no such holder type template argument is given, the default for\na type named ``Type`` is ``std::unique_ptr<Type>``, which means that the object\nis deallocated when Python's reference count goes to zero.\n\nIt is possible to switch to other types of reference counting wrappers or smart\npointers, which is useful in codebases that rely on them. For instance, the\nfollowing snippet causes ``std::shared_ptr`` to be used instead.\n\n.. code-block:: cpp\n\n    py::class_<Example, std::shared_ptr<Example> /* <- holder type */> obj(m, \"Example\");\n\nNote that any particular class can only be associated with a single holder type.\n\nOne potential stumbling block when using holder types is that they need to be\napplied consistently. Can you guess what's broken about the following binding\ncode?\n\n.. code-block:: cpp\n\n    class Child { };\n\n    class Parent {\n    public:\n       Parent() : child(std::make_shared<Child>()) { }\n       Child *get_child() { return child.get(); }  /* Hint: ** DON'T DO THIS ** */\n    private:\n        std::shared_ptr<Child> child;\n    };\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Child, std::shared_ptr<Child>>(m, \"Child\");\n\n        py::class_<Parent, std::shared_ptr<Parent>>(m, \"Parent\")\n           .def(py::init<>())\n           .def(\"get_child\", &Parent::get_child);\n    }\n\nThe following Python code will cause undefined behavior (and likely a\nsegmentation fault).\n\n.. code-block:: python\n\n   from example import Parent\n   print(Parent().get_child())\n\nThe problem is that ``Parent::get_child()`` returns a pointer to an instance of\n``Child``, but the fact that this instance is already managed by\n``std::shared_ptr<...>`` is lost when passing raw pointers. In this case,\npybind11 will create a second independent ``std::shared_ptr<...>`` that also\nclaims ownership of the pointer. In the end, the object will be freed **twice**\nsince these shared pointers have no way of knowing about each other.\n\nThere are two ways to resolve this issue:\n\n1. For types that are managed by a smart pointer class, never use raw pointers\n   in function arguments or return values. In other words: always consistently\n   wrap pointers into their designated holder types (such as\n   ``std::shared_ptr<...>``). In this case, the signature of ``get_child()``\n   should be modified as follows:\n\n.. code-block:: cpp\n\n    std::shared_ptr<Child> get_child() { return child; }\n\n2. Adjust the definition of ``Child`` by specifying\n   ``std::enable_shared_from_this<T>`` (see cppreference_ for details) as a\n   base class. This adds a small bit of information to ``Child`` that allows\n   pybind11 to realize that there is already an existing\n   ``std::shared_ptr<...>`` and communicate with it. In this case, the\n   declaration of ``Child`` should look as follows:\n\n.. _cppreference: http://en.cppreference.com/w/cpp/memory/enable_shared_from_this\n\n.. code-block:: cpp\n\n    class Child : public std::enable_shared_from_this<Child> { };\n\n.. _smart_pointers:\n\nCustom smart pointers\n=====================\n\npybind11 supports ``std::unique_ptr`` and ``std::shared_ptr`` right out of the\nbox. For any other custom smart pointer, transparent conversions can be enabled\nusing a macro invocation similar to the following. It must be declared at the\ntop namespace level before any binding code:\n\n.. code-block:: cpp\n\n    PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);\n\nThe first argument of :func:`PYBIND11_DECLARE_HOLDER_TYPE` should be a\nplaceholder name that is used as a template parameter of the second argument.\nThus, feel free to use any identifier, but use it consistently on both sides;\nalso, don't use the name of a type that already exists in your codebase.\n\nThe macro also accepts a third optional boolean parameter that is set to false\nby default. Specify\n\n.. code-block:: cpp\n\n    PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>, true);\n\nif ``SmartPtr<T>`` can always be initialized from a ``T*`` pointer without the\nrisk of inconsistencies (such as multiple independent ``SmartPtr`` instances\nbelieving that they are the sole owner of the ``T*`` pointer). A common\nsituation where ``true`` should be passed is when the ``T`` instances use\n*intrusive* reference counting.\n\nPlease take a look at the :ref:`macro_notes` before using this feature.\n\nBy default, pybind11 assumes that your custom smart pointer has a standard\ninterface, i.e. provides a ``.get()`` member function to access the underlying\nraw pointer. If this is not the case, pybind11's ``holder_helper`` must be\nspecialized:\n\n.. code-block:: cpp\n\n    // Always needed for custom holder types\n    PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);\n\n    // Only needed if the type's `.get()` goes by another name\n    namespace pybind11 { namespace detail {\n        template <typename T>\n        struct holder_helper<SmartPtr<T>> { // <-- specialization\n            static const T *get(const SmartPtr<T> &p) { return p.getPointer(); }\n        };\n    }}\n\nThe above specialization informs pybind11 that the custom ``SmartPtr`` class\nprovides ``.get()`` functionality via ``.getPointer()``.\n\n.. seealso::\n\n    The file :file:`tests/test_smart_ptr.cpp` contains a complete example\n    that demonstrates how to work with custom reference-counting holder types\n    in more detail.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/basics.rst",
    "content": ".. _basics:\n\nFirst steps\n###########\n\nThis sections demonstrates the basic features of pybind11. Before getting\nstarted, make sure that development environment is set up to compile the\nincluded set of test cases.\n\n\nCompiling the test cases\n========================\n\nLinux/MacOS\n-----------\n\nOn Linux  you'll need to install the **python-dev** or **python3-dev** packages as\nwell as **cmake**. On Mac OS, the included python version works out of the box,\nbut **cmake** must still be installed.\n\nAfter installing the prerequisites, run\n\n.. code-block:: bash\n\n   mkdir build\n   cd build\n   cmake ..\n   make check -j 4\n\nThe last line will both compile and run the tests.\n\nWindows\n-------\n\nOn Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies\non various C++11 language features that break older versions of Visual Studio.\n\nTo compile and run the tests:\n\n.. code-block:: batch\n\n   mkdir build\n   cd build\n   cmake ..\n   cmake --build . --config Release --target check\n\nThis will create a Visual Studio project, compile and run the target, all from the\ncommand line.\n\n.. Note::\n\n    If all tests fail, make sure that the Python binary and the testcases are compiled\n    for the same processor type and bitness (i.e. either **i386** or **x86_64**). You\n    can specify **x86_64** as the target architecture for the generated Visual Studio\n    project using ``cmake -A x64 ..``.\n\n.. seealso::\n\n    Advanced users who are already familiar with Boost.Python may want to skip\n    the tutorial and look at the test cases in the :file:`tests` directory,\n    which exercise all features of pybind11.\n\nHeader and namespace conventions\n================================\n\nFor brevity, all code examples assume that the following two lines are present:\n\n.. code-block:: cpp\n\n    #include <pybind11/pybind11.h>\n\n    namespace py = pybind11;\n\nSome features may require additional headers, but those will be specified as needed.\n\n.. _simple_example:\n\nCreating bindings for a simple function\n=======================================\n\nLet's start by creating Python bindings for an extremely simple function, which\nadds two numbers and returns their result:\n\n.. code-block:: cpp\n\n    int add(int i, int j) {\n        return i + j;\n    }\n\nFor simplicity [#f1]_, we'll put both this function and the binding code into\na file named :file:`example.cpp` with the following contents:\n\n.. code-block:: cpp\n\n    #include <pybind11/pybind11.h>\n\n    int add(int i, int j) {\n        return i + j;\n    }\n\n    PYBIND11_MODULE(example, m) {\n        m.doc() = \"pybind11 example plugin\"; // optional module docstring\n\n        m.def(\"add\", &add, \"A function which adds two numbers\");\n    }\n\n.. [#f1] In practice, implementation and binding code will generally be located\n         in separate files.\n\nThe :func:`PYBIND11_MODULE` macro creates a function that will be called when an\n``import`` statement is issued from within Python. The module name (``example``)\nis given as the first macro argument (it should not be in quotes). The second\nargument (``m``) defines a variable of type :class:`py::module <module>` which\nis the main interface for creating bindings. The method :func:`module::def`\ngenerates binding code that exposes the ``add()`` function to Python.\n\n.. note::\n\n    Notice how little code was needed to expose our function to Python: all\n    details regarding the function's parameters and return value were\n    automatically inferred using template metaprogramming. This overall\n    approach and the used syntax are borrowed from Boost.Python, though the\n    underlying implementation is very different.\n\npybind11 is a header-only library, hence it is not necessary to link against\nany special libraries and there are no intermediate (magic) translation steps.\nOn Linux, the above example can be compiled using the following command:\n\n.. code-block:: bash\n\n    $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`\n\nFor more details on the required compiler flags on Linux and MacOS, see\n:ref:`building_manually`. For complete cross-platform compilation instructions,\nrefer to the :ref:`compiling` page.\n\nThe `python_example`_ and `cmake_example`_ repositories are also a good place\nto start. They are both complete project examples with cross-platform build\nsystems. The only difference between the two is that `python_example`_ uses\nPython's ``setuptools`` to build the module, while `cmake_example`_ uses CMake\n(which may be preferable for existing C++ projects).\n\n.. _python_example: https://github.com/pybind/python_example\n.. _cmake_example: https://github.com/pybind/cmake_example\n\nBuilding the above C++ code will produce a binary module file that can be\nimported to Python. Assuming that the compiled module is located in the\ncurrent directory, the following interactive Python session shows how to\nload and execute the example:\n\n.. code-block:: pycon\n\n    $ python\n    Python 2.7.10 (default, Aug 22 2015, 20:33:39)\n    [GCC 4.2.1 Compatible Apple LLVM 7.0.0 (clang-700.0.59.1)] on darwin\n    Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n    >>> import example\n    >>> example.add(1, 2)\n    3L\n    >>>\n\n.. _keyword_args:\n\nKeyword arguments\n=================\n\nWith a simple modification code, it is possible to inform Python about the\nnames of the arguments (\"i\" and \"j\" in this case).\n\n.. code-block:: cpp\n\n    m.def(\"add\", &add, \"A function which adds two numbers\",\n          py::arg(\"i\"), py::arg(\"j\"));\n\n:class:`arg` is one of several special tag classes which can be used to pass\nmetadata into :func:`module::def`. With this modified binding code, we can now\ncall the function using keyword arguments, which is a more readable alternative\nparticularly for functions taking many parameters:\n\n.. code-block:: pycon\n\n    >>> import example\n    >>> example.add(i=1, j=2)\n    3L\n\nThe keyword names also appear in the function signatures within the documentation.\n\n.. code-block:: pycon\n\n    >>> help(example)\n\n    ....\n\n    FUNCTIONS\n        add(...)\n            Signature : (i: int, j: int) -> int\n\n            A function which adds two numbers\n\nA shorter notation for named arguments is also available:\n\n.. code-block:: cpp\n\n    // regular notation\n    m.def(\"add1\", &add, py::arg(\"i\"), py::arg(\"j\"));\n    // shorthand\n    using namespace pybind11::literals;\n    m.def(\"add2\", &add, \"i\"_a, \"j\"_a);\n\nThe :var:`_a` suffix forms a C++11 literal which is equivalent to :class:`arg`.\nNote that the literal operator must first be made visible with the directive\n``using namespace pybind11::literals``. This does not bring in anything else\nfrom the ``pybind11`` namespace except for literals.\n\n.. _default_args:\n\nDefault arguments\n=================\n\nSuppose now that the function to be bound has default arguments, e.g.:\n\n.. code-block:: cpp\n\n    int add(int i = 1, int j = 2) {\n        return i + j;\n    }\n\nUnfortunately, pybind11 cannot automatically extract these parameters, since they\nare not part of the function's type information. However, they are simple to specify\nusing an extension of :class:`arg`:\n\n.. code-block:: cpp\n\n    m.def(\"add\", &add, \"A function which adds two numbers\",\n          py::arg(\"i\") = 1, py::arg(\"j\") = 2);\n\nThe default values also appear within the documentation.\n\n.. code-block:: pycon\n\n    >>> help(example)\n\n    ....\n\n    FUNCTIONS\n        add(...)\n            Signature : (i: int = 1, j: int = 2) -> int\n\n            A function which adds two numbers\n\nThe shorthand notation is also available for default arguments:\n\n.. code-block:: cpp\n\n    // regular notation\n    m.def(\"add1\", &add, py::arg(\"i\") = 1, py::arg(\"j\") = 2);\n    // shorthand\n    m.def(\"add2\", &add, \"i\"_a=1, \"j\"_a=2);\n\nExporting variables\n===================\n\nTo expose a value from C++, use the ``attr`` function to register it in a\nmodule as shown below. Built-in types and general objects (more on that later)\nare automatically converted when assigned as attributes, and can be explicitly\nconverted using the function ``py::cast``.\n\n.. code-block:: cpp\n\n    PYBIND11_MODULE(example, m) {\n        m.attr(\"the_answer\") = 42;\n        py::object world = py::cast(\"World\");\n        m.attr(\"what\") = world;\n    }\n\nThese are then accessible from Python:\n\n.. code-block:: pycon\n\n    >>> import example\n    >>> example.the_answer\n    42\n    >>> example.what\n    'World'\n\n.. _supported_types:\n\nSupported data types\n====================\n\nA large number of data types are supported out of the box and can be used\nseamlessly as functions arguments, return values or with ``py::cast`` in general.\nFor a full overview, see the :doc:`advanced/cast/index` section.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/benchmark.py",
    "content": "import random\nimport os\nimport time\nimport datetime as dt\n\nnfns = 4  # Functions per class\nnargs = 4  # Arguments per function\n\n\ndef generate_dummy_code_pybind11(nclasses=10):\n    decl = \"\"\n    bindings = \"\"\n\n    for cl in range(nclasses):\n        decl += \"class cl%03i;\\n\" % cl\n    decl += '\\n'\n\n    for cl in range(nclasses):\n        decl += \"class cl%03i {\\n\" % cl\n        decl += \"public:\\n\"\n        bindings += '    py::class_<cl%03i>(m, \"cl%03i\")\\n' % (cl, cl)\n        for fn in range(nfns):\n            ret = random.randint(0, nclasses - 1)\n            params  = [random.randint(0, nclasses - 1) for i in range(nargs)]\n            decl += \"    cl%03i *fn_%03i(\" % (ret, fn)\n            decl += \", \".join(\"cl%03i *\" % p for p in params)\n            decl += \");\\n\"\n            bindings += '        .def(\"fn_%03i\", &cl%03i::fn_%03i)\\n' % \\\n                (fn, cl, fn)\n        decl += \"};\\n\\n\"\n        bindings += '        ;\\n'\n\n    result = \"#include <pybind11/pybind11.h>\\n\\n\"\n    result += \"namespace py = pybind11;\\n\\n\"\n    result += decl + '\\n'\n    result += \"PYBIND11_MODULE(example, m) {\\n\"\n    result += bindings\n    result += \"}\"\n    return result\n\n\ndef generate_dummy_code_boost(nclasses=10):\n    decl = \"\"\n    bindings = \"\"\n\n    for cl in range(nclasses):\n        decl += \"class cl%03i;\\n\" % cl\n    decl += '\\n'\n\n    for cl in range(nclasses):\n        decl += \"class cl%03i {\\n\" % cl\n        decl += \"public:\\n\"\n        bindings += '    py::class_<cl%03i>(\"cl%03i\")\\n' % (cl, cl)\n        for fn in range(nfns):\n            ret = random.randint(0, nclasses - 1)\n            params  = [random.randint(0, nclasses - 1) for i in range(nargs)]\n            decl += \"    cl%03i *fn_%03i(\" % (ret, fn)\n            decl += \", \".join(\"cl%03i *\" % p for p in params)\n            decl += \");\\n\"\n            bindings += '        .def(\"fn_%03i\", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\\n' % \\\n                (fn, cl, fn)\n        decl += \"};\\n\\n\"\n        bindings += '        ;\\n'\n\n    result = \"#include <boost/python.hpp>\\n\\n\"\n    result += \"namespace py = boost::python;\\n\\n\"\n    result += decl + '\\n'\n    result += \"BOOST_PYTHON_MODULE(example) {\\n\"\n    result += bindings\n    result += \"}\"\n    return result\n\n\nfor codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:\n    print (\"{\")\n    for i in range(0, 10):\n        nclasses = 2 ** i\n        with open(\"test.cpp\", \"w\") as f:\n            f.write(codegen(nclasses))\n        n1 = dt.datetime.now()\n        os.system(\"g++ -Os -shared -rdynamic -undefined dynamic_lookup \"\n            \"-fvisibility=hidden -std=c++14 test.cpp -I include \"\n            \"-I /System/Library/Frameworks/Python.framework/Headers -o test.so\")\n        n2 = dt.datetime.now()\n        elapsed = (n2 - n1).total_seconds()\n        size = os.stat('test.so').st_size\n        print(\"   {%i, %f, %i},\" % (nclasses * nfns, elapsed, size))\n    print (\"}\")\n"
  },
  {
    "path": "src/third_party/pybind11/docs/benchmark.rst",
    "content": "Benchmark\n=========\n\nThe following is the result of a synthetic benchmark comparing both compilation\ntime and module size of pybind11 against Boost.Python. A detailed report about a\nBoost.Python to pybind11 conversion of a real project is available here: [#f1]_.\n\n.. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf\n\nSetup\n-----\n\nA python script (see the ``docs/benchmark.py`` file) was used to generate a set\nof files with dummy classes whose count increases for each successive benchmark\n(between 1 and 2048 classes in powers of two). Each class has four methods with\na randomly generated signature with a return value and four arguments. (There\nwas no particular reason for this setup other than the desire to generate many\nunique function signatures whose count could be controlled in a simple way.)\n\nHere is an example of the binding code for one class:\n\n.. code-block:: cpp\n\n    ...\n    class cl034 {\n    public:\n        cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *);\n        cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *);\n        cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *);\n        cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *);\n    };\n    ...\n\n    PYBIND11_MODULE(example, m) {\n        ...\n        py::class_<cl034>(m, \"cl034\")\n            .def(\"fn_000\", &cl034::fn_000)\n            .def(\"fn_001\", &cl034::fn_001)\n            .def(\"fn_002\", &cl034::fn_002)\n            .def(\"fn_003\", &cl034::fn_003)\n        ...\n    }\n\nThe Boost.Python version looks almost identical except that a return value\npolicy had to be specified as an argument to ``def()``. For both libraries,\ncompilation was done with\n\n.. code-block:: bash\n\n    Apple LLVM version 7.0.2 (clang-700.1.81)\n\nand the following compilation flags\n\n.. code-block:: bash\n\n    g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14\n\nCompilation time\n----------------\n\nThe following log-log plot shows how the compilation time grows for an\nincreasing number of class and function declarations. pybind11 includes many\nfewer headers, which initially leads to shorter compilation times, but the\nperformance is ultimately fairly similar (pybind11 is 19.8 seconds faster for\nthe largest largest file with 2048 classes and a total of 8192 methods -- a\nmodest **1.2x** speedup relative to Boost.Python, which required 116.35\nseconds).\n\n.. only:: not latex\n\n    .. image:: pybind11_vs_boost_python1.svg\n\n.. only:: latex\n\n    .. image:: pybind11_vs_boost_python1.png\n\nModule size\n-----------\n\nDifferences between the two libraries become much more pronounced when\nconsidering the file size of the generated Python plugin: for the largest file,\nthe binary generated by Boost.Python required 16.8 MiB, which was **2.17\ntimes** / **9.1 megabytes** larger than the output generated by pybind11. For\nvery small inputs, Boost.Python has an edge in the plot below -- however, note\nthat it stores many definitions in an external library, whose size was not\nincluded here, hence the comparison is slightly shifted in Boost.Python's\nfavor.\n\n.. only:: not latex\n\n    .. image:: pybind11_vs_boost_python2.svg\n\n.. only:: latex\n\n    .. image:: pybind11_vs_boost_python2.png\n\n\n"
  },
  {
    "path": "src/third_party/pybind11/docs/changelog.rst",
    "content": ".. _changelog:\n\nChangelog\n#########\n\nStarting with version 1.8.0, pybind11 releases use a `semantic versioning\n<http://semver.org>`_ policy.\n\nv2.3.0 (Not yet released)\n-----------------------------------------------------\n\n* Significantly reduced module binary size (10-20%) when compiled in C++11 mode\n  with GCC/Clang, or in any mode with MSVC. Function signatures are now always\n  precomputed at compile time (this was previously only available in C++14 mode\n  for non-MSVC compilers).\n  `#934 <https://github.com/pybind/pybind11/pull/934>`_.\n\n* Add basic support for tag-based static polymorphism, where classes\n  provide a method to returns the desired type of an instance.\n  `#1326 <https://github.com/pybind/pybind11/pull/1326>`_.\n\n* Added support for write only properties.\n  `#1144 <https://github.com/pybind/pybind11/pull/1144>`_.\n\n* Python type wrappers (``py::handle``, ``py::object``, etc.)\n  now support map Python's number protocol onto C++ arithmetic\n  operators such as ``operator+``, ``operator/=``, etc.\n  `#1511 <https://github.com/pybind/pybind11/pull/1511>`_.\n\n* A number of improvements related to enumerations:\n\n   1. The ``enum_`` implementation was rewritten from scratch to reduce\n      code bloat. Rather than instantiating a full implementation for each\n      enumeration, most code is now contained in a generic base class.\n      `#1511 <https://github.com/pybind/pybind11/pull/1511>`_.\n\n   2. The ``value()``  method of ``py::enum_`` now accepts an optional\n      docstring that will be shown in the documentation of the associated\n      enumeration. `#1160 <https://github.com/pybind/pybind11/pull/1160>`_.\n\n   3. check for already existing enum value and throw an error if present.\n      `#1453 <https://github.com/pybind/pybind11/pull/1453>`_.\n\n* added ``py::ellipsis()`` method for slicing of multidimensional NumPy arrays\n  `#1502 <https://github.com/pybind/pybind11/pull/1502>`_.\n\n* ``pybind11_add_module()``: allow including Python as a ``SYSTEM`` include path.\n  `#1416 <https://github.com/pybind/pybind11/pull/1416>`_.\n\n* ``pybind11/stl.h`` does not convert strings to ``vector<string>`` anymore.\n  `#1258 <https://github.com/pybind/pybind11/issues/1258>`_.\n\nv2.2.4 (September 11, 2018)\n-----------------------------------------------------\n\n* Use new Python 3.7 Thread Specific Storage (TSS) implementation if available.\n  `#1454 <https://github.com/pybind/pybind11/pull/1454>`_,\n  `#1517 <https://github.com/pybind/pybind11/pull/1517>`_.\n\n* Fixes for newer MSVC versions and C++17 mode.\n  `#1347 <https://github.com/pybind/pybind11/pull/1347>`_,\n  `#1462 <https://github.com/pybind/pybind11/pull/1462>`_.\n\n* Propagate return value policies to type-specific casters\n  when casting STL containers.\n  `#1455 <https://github.com/pybind/pybind11/pull/1455>`_.\n\n* Allow ostream-redirection of more than 1024 characters.\n  `#1479 <https://github.com/pybind/pybind11/pull/1479>`_.\n\n* Set ``Py_DEBUG`` define when compiling against a debug Python build.\n  `#1438 <https://github.com/pybind/pybind11/pull/1438>`_.\n\n* Untangle integer logic in number type caster to work for custom\n  types that may only be castable to a restricted set of builtin types.\n  `#1442 <https://github.com/pybind/pybind11/pull/1442>`_.\n\n* CMake build system: Remember Python version in cache file.\n  `#1434 <https://github.com/pybind/pybind11/pull/1434>`_.\n\n* Fix for custom smart pointers: use ``std::addressof`` to obtain holder\n  address instead of ``operator&``.\n  `#1435 <https://github.com/pybind/pybind11/pull/1435>`_.\n\n* Properly report exceptions thrown during module initialization.\n  `#1362 <https://github.com/pybind/pybind11/pull/1362>`_.\n\n* Fixed a segmentation fault when creating empty-shaped NumPy array.\n  `#1371 <https://github.com/pybind/pybind11/pull/1371>`_.\n\n* The version of Intel C++ compiler must be >= 2017, and this is now checked by\n  the header files. `#1363 <https://github.com/pybind/pybind11/pull/1363>`_.\n\n* A few minor typo fixes and improvements to the test suite, and\n  patches that silence compiler warnings.\n\nv2.2.3 (April 29, 2018)\n-----------------------------------------------------\n\n* The pybind11 header location detection was replaced by a new implementation\n  that no longer depends on ``pip`` internals (the recently released ``pip``\n  10 has restricted access to this API).\n  `#1190 <https://github.com/pybind/pybind11/pull/1190>`_.\n\n* Small adjustment to an implementation detail to work around a compiler segmentation fault in Clang 3.3/3.4.\n  `#1350 <https://github.com/pybind/pybind11/pull/1350>`_.\n\n* The minimal supported version of the Intel compiler was >= 17.0 since\n  pybind11 v2.1. This check is now explicit, and a compile-time error is raised\n  if the compiler meet the requirement.\n  `#1363 <https://github.com/pybind/pybind11/pull/1363>`_.\n\n* Fixed an endianness-related fault in the test suite.\n  `#1287 <https://github.com/pybind/pybind11/pull/1287>`_.\n\nv2.2.2 (February 7, 2018)\n-----------------------------------------------------\n\n* Fixed a segfault when combining embedded interpreter\n  shutdown/reinitialization with external loaded pybind11 modules.\n  `#1092 <https://github.com/pybind/pybind11/pull/1092>`_.\n\n* Eigen support: fixed a bug where Nx1/1xN numpy inputs couldn't be passed as\n  arguments to Eigen vectors (which for Eigen are simply compile-time fixed\n  Nx1/1xN matrices).\n  `#1106 <https://github.com/pybind/pybind11/pull/1106>`_.\n\n* Clarified to license by moving the licensing of contributions from\n  ``LICENSE`` into ``CONTRIBUTING.md``: the licensing of contributions is not\n  actually part of the software license as distributed.  This isn't meant to be\n  a substantial change in the licensing of the project, but addresses concerns\n  that the clause made the license non-standard.\n  `#1109 <https://github.com/pybind/pybind11/issues/1109>`_.\n\n* Fixed a regression introduced in 2.1 that broke binding functions with lvalue\n  character literal arguments.\n  `#1128 <https://github.com/pybind/pybind11/pull/1128>`_.\n\n* MSVC: fix for compilation failures under /permissive-, and added the flag to\n  the appveyor test suite.\n  `#1155 <https://github.com/pybind/pybind11/pull/1155>`_.\n\n* Fixed ``__qualname__`` generation, and in turn, fixes how class names\n  (especially nested class names) are shown in generated docstrings.\n  `#1171 <https://github.com/pybind/pybind11/pull/1171>`_.\n\n* Updated the FAQ with a suggested project citation reference.\n  `#1189 <https://github.com/pybind/pybind11/pull/1189>`_.\n\n* Added fixes for deprecation warnings when compiled under C++17 with\n  ``-Wdeprecated`` turned on, and add ``-Wdeprecated`` to the test suite\n  compilation flags.\n  `#1191 <https://github.com/pybind/pybind11/pull/1191>`_.\n\n* Fixed outdated PyPI URLs in ``setup.py``.\n  `#1213 <https://github.com/pybind/pybind11/pull/1213>`_.\n\n* Fixed a refcount leak for arguments that end up in a ``py::args`` argument\n  for functions with both fixed positional and ``py::args`` arguments.\n  `#1216 <https://github.com/pybind/pybind11/pull/1216>`_.\n\n* Fixed a potential segfault resulting from possible premature destruction of\n  ``py::args``/``py::kwargs`` arguments with overloaded functions.\n  `#1223 <https://github.com/pybind/pybind11/pull/1223>`_.\n\n* Fixed ``del map[item]`` for a ``stl_bind.h`` bound stl map.\n  `#1229 <https://github.com/pybind/pybind11/pull/1229>`_.\n\n* Fixed a regression from v2.1.x where the aggregate initialization could\n  unintentionally end up at a constructor taking a templated\n  ``std::initializer_list<T>`` argument.\n  `#1249 <https://github.com/pybind/pybind11/pull/1249>`_.\n\n* Fixed an issue where calling a function with a keep_alive policy on the same\n  nurse/patient pair would cause the internal patient storage to needlessly\n  grow (unboundedly, if the nurse is long-lived).\n  `#1251 <https://github.com/pybind/pybind11/issues/1251>`_.\n\n* Various other minor fixes.\n\nv2.2.1 (September 14, 2017)\n-----------------------------------------------------\n\n* Added ``py::module::reload()`` member function for reloading a module.\n  `#1040 <https://github.com/pybind/pybind11/pull/1040>`_.\n\n* Fixed a reference leak in the number converter.\n  `#1078 <https://github.com/pybind/pybind11/pull/1078>`_.\n\n* Fixed compilation with Clang on host GCC < 5 (old libstdc++ which isn't fully\n  C++11 compliant). `#1062 <https://github.com/pybind/pybind11/pull/1062>`_.\n\n* Fixed a regression where the automatic ``std::vector<bool>`` caster would\n  fail to compile. The same fix also applies to any container which returns\n  element proxies instead of references.\n  `#1053 <https://github.com/pybind/pybind11/pull/1053>`_.\n\n* Fixed a regression where the ``py::keep_alive`` policy could not be applied\n  to constructors. `#1065 <https://github.com/pybind/pybind11/pull/1065>`_.\n\n* Fixed a nullptr dereference when loading a ``py::module_local`` type\n  that's only registered in an external module.\n  `#1058 <https://github.com/pybind/pybind11/pull/1058>`_.\n\n* Fixed implicit conversion of accessors to types derived from ``py::object``.\n  `#1076 <https://github.com/pybind/pybind11/pull/1076>`_.\n\n* The ``name`` in ``PYBIND11_MODULE(name, variable)`` can now be a macro.\n  `#1082 <https://github.com/pybind/pybind11/pull/1082>`_.\n\n* Relaxed overly strict ``py::pickle()`` check for matching get and set types.\n  `#1064 <https://github.com/pybind/pybind11/pull/1064>`_.\n\n* Conversion errors now try to be more informative when it's likely that\n  a missing header is the cause (e.g. forgetting ``<pybind11/stl.h>``).\n  `#1077 <https://github.com/pybind/pybind11/pull/1077>`_.\n\nv2.2.0 (August 31, 2017)\n-----------------------------------------------------\n\n* Support for embedding the Python interpreter. See the\n  :doc:`documentation page </advanced/embedding>` for a\n  full overview of the new features.\n  `#774 <https://github.com/pybind/pybind11/pull/774>`_,\n  `#889 <https://github.com/pybind/pybind11/pull/889>`_,\n  `#892 <https://github.com/pybind/pybind11/pull/892>`_,\n  `#920 <https://github.com/pybind/pybind11/pull/920>`_.\n\n  .. code-block:: cpp\n\n      #include <pybind11/embed.h>\n      namespace py = pybind11;\n\n      int main() {\n          py::scoped_interpreter guard{}; // start the interpreter and keep it alive\n\n          py::print(\"Hello, World!\"); // use the Python API\n      }\n\n* Support for inheriting from multiple C++ bases in Python.\n  `#693 <https://github.com/pybind/pybind11/pull/693>`_.\n\n  .. code-block:: python\n\n      from cpp_module import CppBase1, CppBase2\n\n      class PyDerived(CppBase1, CppBase2):\n          def __init__(self):\n              CppBase1.__init__(self)  # C++ bases must be initialized explicitly\n              CppBase2.__init__(self)\n\n* ``PYBIND11_MODULE`` is now the preferred way to create module entry points.\n  ``PYBIND11_PLUGIN`` is deprecated. See :ref:`macros` for details.\n  `#879 <https://github.com/pybind/pybind11/pull/879>`_.\n\n  .. code-block:: cpp\n\n      // new\n      PYBIND11_MODULE(example, m) {\n          m.def(\"add\", [](int a, int b) { return a + b; });\n      }\n\n      // old\n      PYBIND11_PLUGIN(example) {\n          py::module m(\"example\");\n          m.def(\"add\", [](int a, int b) { return a + b; });\n          return m.ptr();\n      }\n\n* pybind11's headers and build system now more strictly enforce hidden symbol\n  visibility for extension modules. This should be seamless for most users,\n  but see the :doc:`upgrade` if you use a custom build system.\n  `#995 <https://github.com/pybind/pybind11/pull/995>`_.\n\n* Support for ``py::module_local`` types which allow multiple modules to\n  export the same C++ types without conflicts. This is useful for opaque\n  types like ``std::vector<int>``. ``py::bind_vector`` and ``py::bind_map``\n  now default to ``py::module_local`` if their elements are builtins or\n  local types. See :ref:`module_local` for details.\n  `#949 <https://github.com/pybind/pybind11/pull/949>`_,\n  `#981 <https://github.com/pybind/pybind11/pull/981>`_,\n  `#995 <https://github.com/pybind/pybind11/pull/995>`_,\n  `#997 <https://github.com/pybind/pybind11/pull/997>`_.\n\n* Custom constructors can now be added very easily using lambdas or factory\n  functions which return a class instance by value, pointer or holder. This\n  supersedes the old placement-new ``__init__`` technique.\n  See :ref:`custom_constructors` for details.\n  `#805 <https://github.com/pybind/pybind11/pull/805>`_,\n  `#1014 <https://github.com/pybind/pybind11/pull/1014>`_.\n\n  .. code-block:: cpp\n\n      struct Example {\n          Example(std::string);\n      };\n\n      py::class_<Example>(m, \"Example\")\n          .def(py::init<std::string>()) // existing constructor\n          .def(py::init([](int n) { // custom constructor\n              return std::make_unique<Example>(std::to_string(n));\n          }));\n\n* Similarly to custom constructors, pickling support functions are now bound\n  using the ``py::pickle()`` adaptor which improves type safety. See the\n  :doc:`upgrade` and :ref:`pickling` for details.\n  `#1038 <https://github.com/pybind/pybind11/pull/1038>`_.\n\n* Builtin support for converting C++17 standard library types and general\n  conversion improvements:\n\n  1. C++17 ``std::variant`` is supported right out of the box. C++11/14\n     equivalents (e.g. ``boost::variant``) can also be added with a simple\n     user-defined specialization. See :ref:`cpp17_container_casters` for details.\n     `#811 <https://github.com/pybind/pybind11/pull/811>`_,\n     `#845 <https://github.com/pybind/pybind11/pull/845>`_,\n     `#989 <https://github.com/pybind/pybind11/pull/989>`_.\n\n  2. Out-of-the-box support for C++17 ``std::string_view``.\n     `#906 <https://github.com/pybind/pybind11/pull/906>`_.\n\n  3. Improved compatibility of the builtin ``optional`` converter.\n     `#874 <https://github.com/pybind/pybind11/pull/874>`_.\n\n  4. The ``bool`` converter now accepts ``numpy.bool_`` and types which\n     define ``__bool__`` (Python 3.x) or ``__nonzero__`` (Python 2.7).\n     `#925 <https://github.com/pybind/pybind11/pull/925>`_.\n\n  5. C++-to-Python casters are now more efficient and move elements out\n     of rvalue containers whenever possible.\n     `#851 <https://github.com/pybind/pybind11/pull/851>`_,\n     `#936 <https://github.com/pybind/pybind11/pull/936>`_,\n     `#938 <https://github.com/pybind/pybind11/pull/938>`_.\n\n  6. Fixed ``bytes`` to ``std::string/char*`` conversion on Python 3.\n     `#817 <https://github.com/pybind/pybind11/pull/817>`_.\n\n  7. Fixed lifetime of temporary C++ objects created in Python-to-C++ conversions.\n     `#924 <https://github.com/pybind/pybind11/pull/924>`_.\n\n* Scope guard call policy for RAII types, e.g. ``py::call_guard<py::gil_scoped_release>()``,\n  ``py::call_guard<py::scoped_ostream_redirect>()``. See :ref:`call_policies` for details.\n  `#740 <https://github.com/pybind/pybind11/pull/740>`_.\n\n* Utility for redirecting C++ streams to Python (e.g. ``std::cout`` ->\n  ``sys.stdout``). Scope guard ``py::scoped_ostream_redirect`` in C++ and\n  a context manager in Python. See :ref:`ostream_redirect`.\n  `#1009 <https://github.com/pybind/pybind11/pull/1009>`_.\n\n* Improved handling of types and exceptions across module boundaries.\n  `#915 <https://github.com/pybind/pybind11/pull/915>`_,\n  `#951 <https://github.com/pybind/pybind11/pull/951>`_,\n  `#995 <https://github.com/pybind/pybind11/pull/995>`_.\n\n* Fixed destruction order of ``py::keep_alive`` nurse/patient objects\n  in reference cycles.\n  `#856 <https://github.com/pybind/pybind11/pull/856>`_.\n\n* Numpy and buffer protocol related improvements:\n\n  1. Support for negative strides in Python buffer objects/numpy arrays. This\n     required changing integers from unsigned to signed for the related C++ APIs.\n     Note: If you have compiler warnings enabled, you may notice some new conversion\n     warnings after upgrading. These can be resolved with ``static_cast``.\n     `#782 <https://github.com/pybind/pybind11/pull/782>`_.\n\n  2. Support ``std::complex`` and arrays inside ``PYBIND11_NUMPY_DTYPE``.\n     `#831 <https://github.com/pybind/pybind11/pull/831>`_,\n     `#832 <https://github.com/pybind/pybind11/pull/832>`_.\n\n  3. Support for constructing ``py::buffer_info`` and ``py::arrays`` using\n     arbitrary containers or iterators instead of requiring a ``std::vector``.\n     `#788 <https://github.com/pybind/pybind11/pull/788>`_,\n     `#822 <https://github.com/pybind/pybind11/pull/822>`_,\n     `#860 <https://github.com/pybind/pybind11/pull/860>`_.\n\n  4. Explicitly check numpy version and require >= 1.7.0.\n     `#819 <https://github.com/pybind/pybind11/pull/819>`_.\n\n* Support for allowing/prohibiting ``None`` for specific arguments and improved\n  ``None`` overload resolution order. See :ref:`none_arguments` for details.\n  `#843 <https://github.com/pybind/pybind11/pull/843>`_.\n  `#859 <https://github.com/pybind/pybind11/pull/859>`_.\n\n* Added ``py::exec()`` as a shortcut for ``py::eval<py::eval_statements>()``\n  and support for C++11 raw string literals as input. See :ref:`eval`.\n  `#766 <https://github.com/pybind/pybind11/pull/766>`_,\n  `#827 <https://github.com/pybind/pybind11/pull/827>`_.\n\n* ``py::vectorize()`` ignores non-vectorizable arguments and supports\n  member functions.\n  `#762 <https://github.com/pybind/pybind11/pull/762>`_.\n\n* Support for bound methods as callbacks (``pybind11/functional.h``).\n  `#815 <https://github.com/pybind/pybind11/pull/815>`_.\n\n* Allow aliasing pybind11 methods: ``cls.attr(\"foo\") = cls.attr(\"bar\")``.\n  `#802 <https://github.com/pybind/pybind11/pull/802>`_.\n\n* Don't allow mixed static/non-static overloads.\n  `#804 <https://github.com/pybind/pybind11/pull/804>`_.\n\n* Fixed overriding static properties in derived classes.\n  `#784 <https://github.com/pybind/pybind11/pull/784>`_.\n\n* Improved deduction of member functions of a derived class when its bases\n  aren't registered with pybind11.\n  `#855 <https://github.com/pybind/pybind11/pull/855>`_.\n\n  .. code-block:: cpp\n\n      struct Base {\n          int foo() { return 42; }\n      }\n\n      struct Derived : Base {}\n\n      // Now works, but previously required also binding `Base`\n      py::class_<Derived>(m, \"Derived\")\n          .def(\"foo\", &Derived::foo); // function is actually from `Base`\n\n* The implementation of ``py::init<>`` now uses C++11 brace initialization\n  syntax to construct instances, which permits binding implicit constructors of\n  aggregate types. `#1015 <https://github.com/pybind/pybind11/pull/1015>`_.\n\n    .. code-block:: cpp\n\n        struct Aggregate {\n            int a;\n            std::string b;\n        };\n\n        py::class_<Aggregate>(m, \"Aggregate\")\n            .def(py::init<int, const std::string &>());\n\n* Fixed issues with multiple inheritance with offset base/derived pointers.\n  `#812 <https://github.com/pybind/pybind11/pull/812>`_,\n  `#866 <https://github.com/pybind/pybind11/pull/866>`_,\n  `#960 <https://github.com/pybind/pybind11/pull/960>`_.\n\n* Fixed reference leak of type objects.\n  `#1030 <https://github.com/pybind/pybind11/pull/1030>`_.\n\n* Improved support for the ``/std:c++14`` and ``/std:c++latest`` modes\n  on MSVC 2017.\n  `#841 <https://github.com/pybind/pybind11/pull/841>`_,\n  `#999 <https://github.com/pybind/pybind11/pull/999>`_.\n\n* Fixed detection of private operator new on MSVC.\n  `#893 <https://github.com/pybind/pybind11/pull/893>`_,\n  `#918 <https://github.com/pybind/pybind11/pull/918>`_.\n\n* Intel C++ compiler compatibility fixes.\n  `#937 <https://github.com/pybind/pybind11/pull/937>`_.\n\n* Fixed implicit conversion of `py::enum_` to integer types on Python 2.7.\n  `#821 <https://github.com/pybind/pybind11/pull/821>`_.\n\n* Added ``py::hash`` to fetch the hash value of Python objects, and\n  ``.def(hash(py::self))`` to provide the C++ ``std::hash`` as the Python\n  ``__hash__`` method.\n  `#1034 <https://github.com/pybind/pybind11/pull/1034>`_.\n\n* Fixed ``__truediv__`` on Python 2 and ``__itruediv__`` on Python 3.\n  `#867 <https://github.com/pybind/pybind11/pull/867>`_.\n\n* ``py::capsule`` objects now support the ``name`` attribute. This is useful\n  for interfacing with ``scipy.LowLevelCallable``.\n  `#902 <https://github.com/pybind/pybind11/pull/902>`_.\n\n* Fixed ``py::make_iterator``'s ``__next__()`` for past-the-end calls.\n  `#897 <https://github.com/pybind/pybind11/pull/897>`_.\n\n* Added ``error_already_set::matches()`` for checking Python exceptions.\n  `#772 <https://github.com/pybind/pybind11/pull/772>`_.\n\n* Deprecated ``py::error_already_set::clear()``. It's no longer needed\n  following a simplification of the ``py::error_already_set`` class.\n  `#954 <https://github.com/pybind/pybind11/pull/954>`_.\n\n* Deprecated ``py::handle::operator==()`` in favor of ``py::handle::is()``\n  `#825 <https://github.com/pybind/pybind11/pull/825>`_.\n\n* Deprecated ``py::object::borrowed``/``py::object::stolen``.\n  Use ``py::object::borrowed_t{}``/``py::object::stolen_t{}`` instead.\n  `#771 <https://github.com/pybind/pybind11/pull/771>`_.\n\n* Changed internal data structure versioning to avoid conflicts between\n  modules compiled with different revisions of pybind11.\n  `#1012 <https://github.com/pybind/pybind11/pull/1012>`_.\n\n* Additional compile-time and run-time error checking and more informative messages.\n  `#786 <https://github.com/pybind/pybind11/pull/786>`_,\n  `#794 <https://github.com/pybind/pybind11/pull/794>`_,\n  `#803 <https://github.com/pybind/pybind11/pull/803>`_.\n\n* Various minor improvements and fixes.\n  `#764 <https://github.com/pybind/pybind11/pull/764>`_,\n  `#791 <https://github.com/pybind/pybind11/pull/791>`_,\n  `#795 <https://github.com/pybind/pybind11/pull/795>`_,\n  `#840 <https://github.com/pybind/pybind11/pull/840>`_,\n  `#844 <https://github.com/pybind/pybind11/pull/844>`_,\n  `#846 <https://github.com/pybind/pybind11/pull/846>`_,\n  `#849 <https://github.com/pybind/pybind11/pull/849>`_,\n  `#858 <https://github.com/pybind/pybind11/pull/858>`_,\n  `#862 <https://github.com/pybind/pybind11/pull/862>`_,\n  `#871 <https://github.com/pybind/pybind11/pull/871>`_,\n  `#872 <https://github.com/pybind/pybind11/pull/872>`_,\n  `#881 <https://github.com/pybind/pybind11/pull/881>`_,\n  `#888 <https://github.com/pybind/pybind11/pull/888>`_,\n  `#899 <https://github.com/pybind/pybind11/pull/899>`_,\n  `#928 <https://github.com/pybind/pybind11/pull/928>`_,\n  `#931 <https://github.com/pybind/pybind11/pull/931>`_,\n  `#944 <https://github.com/pybind/pybind11/pull/944>`_,\n  `#950 <https://github.com/pybind/pybind11/pull/950>`_,\n  `#952 <https://github.com/pybind/pybind11/pull/952>`_,\n  `#962 <https://github.com/pybind/pybind11/pull/962>`_,\n  `#965 <https://github.com/pybind/pybind11/pull/965>`_,\n  `#970 <https://github.com/pybind/pybind11/pull/970>`_,\n  `#978 <https://github.com/pybind/pybind11/pull/978>`_,\n  `#979 <https://github.com/pybind/pybind11/pull/979>`_,\n  `#986 <https://github.com/pybind/pybind11/pull/986>`_,\n  `#1020 <https://github.com/pybind/pybind11/pull/1020>`_,\n  `#1027 <https://github.com/pybind/pybind11/pull/1027>`_,\n  `#1037 <https://github.com/pybind/pybind11/pull/1037>`_.\n\n* Testing improvements.\n  `#798 <https://github.com/pybind/pybind11/pull/798>`_,\n  `#882 <https://github.com/pybind/pybind11/pull/882>`_,\n  `#898 <https://github.com/pybind/pybind11/pull/898>`_,\n  `#900 <https://github.com/pybind/pybind11/pull/900>`_,\n  `#921 <https://github.com/pybind/pybind11/pull/921>`_,\n  `#923 <https://github.com/pybind/pybind11/pull/923>`_,\n  `#963 <https://github.com/pybind/pybind11/pull/963>`_.\n\nv2.1.1 (April 7, 2017)\n-----------------------------------------------------\n\n* Fixed minimum version requirement for MSVC 2015u3\n  `#773 <https://github.com/pybind/pybind11/pull/773>`_.\n\nv2.1.0 (March 22, 2017)\n-----------------------------------------------------\n\n* pybind11 now performs function overload resolution in two phases. The first\n  phase only considers exact type matches, while the second allows for implicit\n  conversions to take place. A special ``noconvert()`` syntax can be used to\n  completely disable implicit conversions for specific arguments.\n  `#643 <https://github.com/pybind/pybind11/pull/643>`_,\n  `#634 <https://github.com/pybind/pybind11/pull/634>`_,\n  `#650 <https://github.com/pybind/pybind11/pull/650>`_.\n\n* Fixed a regression where static properties no longer worked with classes\n  using multiple inheritance. The ``py::metaclass`` attribute is no longer\n  necessary (and deprecated as of this release) when binding classes with\n  static properties.\n  `#679 <https://github.com/pybind/pybind11/pull/679>`_,\n\n* Classes bound using ``pybind11`` can now use custom metaclasses.\n  `#679 <https://github.com/pybind/pybind11/pull/679>`_,\n\n* ``py::args`` and ``py::kwargs`` can now be mixed with other positional\n  arguments when binding functions using pybind11.\n  `#611 <https://github.com/pybind/pybind11/pull/611>`_.\n\n* Improved support for C++11 unicode string and character types; added\n  extensive documentation regarding pybind11's string conversion behavior.\n  `#624 <https://github.com/pybind/pybind11/pull/624>`_,\n  `#636 <https://github.com/pybind/pybind11/pull/636>`_,\n  `#715 <https://github.com/pybind/pybind11/pull/715>`_.\n\n* pybind11 can now avoid expensive copies when converting Eigen arrays to NumPy\n  arrays (and vice versa). `#610 <https://github.com/pybind/pybind11/pull/610>`_.\n\n* The \"fast path\" in ``py::vectorize`` now works for any full-size group of C or\n  F-contiguous arrays. The non-fast path is also faster since it no longer performs\n  copies of the input arguments (except when type conversions are necessary).\n  `#610 <https://github.com/pybind/pybind11/pull/610>`_.\n\n* Added fast, unchecked access to NumPy arrays via a proxy object.\n  `#746 <https://github.com/pybind/pybind11/pull/746>`_.\n\n* Transparent support for class-specific ``operator new`` and\n  ``operator delete`` implementations.\n  `#755 <https://github.com/pybind/pybind11/pull/755>`_.\n\n* Slimmer and more efficient STL-compatible iterator interface for sequence types.\n  `#662 <https://github.com/pybind/pybind11/pull/662>`_.\n\n* Improved custom holder type support.\n  `#607 <https://github.com/pybind/pybind11/pull/607>`_.\n\n* ``nullptr`` to ``None`` conversion fixed in various builtin type casters.\n  `#732 <https://github.com/pybind/pybind11/pull/732>`_.\n\n* ``enum_`` now exposes its members via a special ``__members__`` attribute.\n  `#666 <https://github.com/pybind/pybind11/pull/666>`_.\n\n* ``std::vector`` bindings created using ``stl_bind.h`` can now optionally\n  implement the buffer protocol. `#488 <https://github.com/pybind/pybind11/pull/488>`_.\n\n* Automated C++ reference documentation using doxygen and breathe.\n  `#598 <https://github.com/pybind/pybind11/pull/598>`_.\n\n* Added minimum compiler version assertions.\n  `#727 <https://github.com/pybind/pybind11/pull/727>`_.\n\n* Improved compatibility with C++1z.\n  `#677 <https://github.com/pybind/pybind11/pull/677>`_.\n\n* Improved ``py::capsule`` API. Can be used to implement cleanup\n  callbacks that are involved at module destruction time.\n  `#752 <https://github.com/pybind/pybind11/pull/752>`_.\n\n* Various minor improvements and fixes.\n  `#595 <https://github.com/pybind/pybind11/pull/595>`_,\n  `#588 <https://github.com/pybind/pybind11/pull/588>`_,\n  `#589 <https://github.com/pybind/pybind11/pull/589>`_,\n  `#603 <https://github.com/pybind/pybind11/pull/603>`_,\n  `#619 <https://github.com/pybind/pybind11/pull/619>`_,\n  `#648 <https://github.com/pybind/pybind11/pull/648>`_,\n  `#695 <https://github.com/pybind/pybind11/pull/695>`_,\n  `#720 <https://github.com/pybind/pybind11/pull/720>`_,\n  `#723 <https://github.com/pybind/pybind11/pull/723>`_,\n  `#729 <https://github.com/pybind/pybind11/pull/729>`_,\n  `#724 <https://github.com/pybind/pybind11/pull/724>`_,\n  `#742 <https://github.com/pybind/pybind11/pull/742>`_,\n  `#753 <https://github.com/pybind/pybind11/pull/753>`_.\n\nv2.0.1 (Jan 4, 2017)\n-----------------------------------------------------\n\n* Fix pointer to reference error in type_caster on MSVC\n  `#583 <https://github.com/pybind/pybind11/pull/583>`_.\n\n* Fixed a segmentation in the test suite due to a typo\n  `cd7eac <https://github.com/pybind/pybind11/commit/cd7eac>`_.\n\nv2.0.0 (Jan 1, 2017)\n-----------------------------------------------------\n\n* Fixed a reference counting regression affecting types with custom metaclasses\n  (introduced in v2.0.0-rc1).\n  `#571 <https://github.com/pybind/pybind11/pull/571>`_.\n\n* Quenched a CMake policy warning.\n  `#570 <https://github.com/pybind/pybind11/pull/570>`_.\n\nv2.0.0-rc1 (Dec 23, 2016)\n-----------------------------------------------------\n\nThe pybind11 developers are excited to issue a release candidate of pybind11\nwith a subsequent v2.0.0 release planned in early January next year.\n\nAn incredible amount of effort by went into pybind11 over the last ~5 months,\nleading to a release that is jam-packed with exciting new features and numerous\nusability improvements. The following list links PRs or individual commits\nwhenever applicable.\n\nHappy Christmas!\n\n* Support for binding C++ class hierarchies that make use of multiple\n  inheritance. `#410 <https://github.com/pybind/pybind11/pull/410>`_.\n\n* PyPy support: pybind11 now supports nightly builds of PyPy and will\n  interoperate with the future 5.7 release. No code changes are necessary,\n  everything \"just\" works as usual. Note that we only target the Python 2.7\n  branch for now; support for 3.x will be added once its ``cpyext`` extension\n  support catches up. A few minor features remain unsupported for the time\n  being (notably dynamic attributes in custom types).\n  `#527 <https://github.com/pybind/pybind11/pull/527>`_.\n\n* Significant work on the documentation -- in particular, the monolithic\n  ``advanced.rst`` file was restructured into a easier to read hierarchical\n  organization. `#448 <https://github.com/pybind/pybind11/pull/448>`_.\n\n* Many NumPy-related improvements:\n\n  1. Object-oriented API to access and modify NumPy ``ndarray`` instances,\n     replicating much of the corresponding NumPy C API functionality.\n     `#402 <https://github.com/pybind/pybind11/pull/402>`_.\n\n  2. NumPy array ``dtype`` array descriptors are now first-class citizens and\n     are exposed via a new class ``py::dtype``.\n\n  3. Structured dtypes can be registered using the ``PYBIND11_NUMPY_DTYPE()``\n     macro. Special ``array`` constructors accepting dtype objects were also\n     added.\n\n     One potential caveat involving this change: format descriptor strings\n     should now be accessed via ``format_descriptor::format()`` (however, for\n     compatibility purposes, the old syntax ``format_descriptor::value`` will\n     still work for non-structured data types). `#308\n     <https://github.com/pybind/pybind11/pull/308>`_.\n\n  4. Further improvements to support structured dtypes throughout the system.\n     `#472 <https://github.com/pybind/pybind11/pull/472>`_,\n     `#474 <https://github.com/pybind/pybind11/pull/474>`_,\n     `#459 <https://github.com/pybind/pybind11/pull/459>`_,\n     `#453 <https://github.com/pybind/pybind11/pull/453>`_,\n     `#452 <https://github.com/pybind/pybind11/pull/452>`_, and\n     `#505 <https://github.com/pybind/pybind11/pull/505>`_.\n\n  5. Fast access operators. `#497 <https://github.com/pybind/pybind11/pull/497>`_.\n\n  6. Constructors for arrays whose storage is owned by another object.\n     `#440 <https://github.com/pybind/pybind11/pull/440>`_.\n\n  7. Added constructors for ``array`` and ``array_t`` explicitly accepting shape\n     and strides; if strides are not provided, they are deduced assuming\n     C-contiguity. Also added simplified constructors for 1-dimensional case.\n\n  8. Added buffer/NumPy support for ``char[N]`` and ``std::array<char, N>`` types.\n\n  9. Added ``memoryview`` wrapper type which is constructible from ``buffer_info``.\n\n* Eigen: many additional conversions and support for non-contiguous\n  arrays/slices.\n  `#427 <https://github.com/pybind/pybind11/pull/427>`_,\n  `#315 <https://github.com/pybind/pybind11/pull/315>`_,\n  `#316 <https://github.com/pybind/pybind11/pull/316>`_,\n  `#312 <https://github.com/pybind/pybind11/pull/312>`_, and\n  `#267 <https://github.com/pybind/pybind11/pull/267>`_\n\n* Incompatible changes in ``class_<...>::class_()``:\n\n    1. Declarations of types that provide access via the buffer protocol must\n       now include the ``py::buffer_protocol()`` annotation as an argument to\n       the ``class_`` constructor.\n\n    2. Declarations of types that require a custom metaclass (i.e. all classes\n       which include static properties via commands such as\n       ``def_readwrite_static()``) must now include the ``py::metaclass()``\n       annotation as an argument to the ``class_`` constructor.\n\n       These two changes were necessary to make type definitions in pybind11\n       future-proof, and to support PyPy via its cpyext mechanism. `#527\n       <https://github.com/pybind/pybind11/pull/527>`_.\n\n\n    3. This version of pybind11 uses a redesigned mechanism for instantiating\n       trampoline classes that are used to override virtual methods from within\n       Python. This led to the following user-visible syntax change: instead of\n\n       .. code-block:: cpp\n\n           py::class_<TrampolineClass>(\"MyClass\")\n             .alias<MyClass>()\n             ....\n\n       write\n\n       .. code-block:: cpp\n\n           py::class_<MyClass, TrampolineClass>(\"MyClass\")\n             ....\n\n       Importantly, both the original and the trampoline class are now\n       specified as an arguments (in arbitrary order) to the ``py::class_``\n       template, and the ``alias<..>()`` call is gone. The new scheme has zero\n       overhead in cases when Python doesn't override any functions of the\n       underlying C++ class. `rev. 86d825\n       <https://github.com/pybind/pybind11/commit/86d825>`_.\n\n* Added ``eval`` and ``eval_file`` functions for evaluating expressions and\n  statements from a string or file. `rev. 0d3fc3\n  <https://github.com/pybind/pybind11/commit/0d3fc3>`_.\n\n* pybind11 can now create types with a modifiable dictionary.\n  `#437 <https://github.com/pybind/pybind11/pull/437>`_ and\n  `#444 <https://github.com/pybind/pybind11/pull/444>`_.\n\n* Support for translation of arbitrary C++ exceptions to Python counterparts.\n  `#296 <https://github.com/pybind/pybind11/pull/296>`_ and\n  `#273 <https://github.com/pybind/pybind11/pull/273>`_.\n\n* Report full backtraces through mixed C++/Python code, better reporting for\n  import errors, fixed GIL management in exception processing.\n  `#537 <https://github.com/pybind/pybind11/pull/537>`_,\n  `#494 <https://github.com/pybind/pybind11/pull/494>`_,\n  `rev. e72d95 <https://github.com/pybind/pybind11/commit/e72d95>`_, and\n  `rev. 099d6e <https://github.com/pybind/pybind11/commit/099d6e>`_.\n\n* Support for bit-level operations, comparisons, and serialization of C++\n  enumerations. `#503 <https://github.com/pybind/pybind11/pull/503>`_,\n  `#508 <https://github.com/pybind/pybind11/pull/508>`_,\n  `#380 <https://github.com/pybind/pybind11/pull/380>`_,\n  `#309 <https://github.com/pybind/pybind11/pull/309>`_.\n  `#311 <https://github.com/pybind/pybind11/pull/311>`_.\n\n* The ``class_`` constructor now accepts its template arguments in any order.\n  `#385 <https://github.com/pybind/pybind11/pull/385>`_.\n\n* Attribute and item accessors now have a more complete interface which makes\n  it possible to chain attributes as in\n  ``obj.attr(\"a\")[key].attr(\"b\").attr(\"method\")(1, 2, 3)``. `#425\n  <https://github.com/pybind/pybind11/pull/425>`_.\n\n* Major redesign of the default and conversion constructors in ``pytypes.h``.\n  `#464 <https://github.com/pybind/pybind11/pull/464>`_.\n\n* Added built-in support for ``std::shared_ptr`` holder type. It is no longer\n  necessary to to include a declaration of the form\n  ``PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)`` (though continuing to\n  do so won't cause an error).\n  `#454 <https://github.com/pybind/pybind11/pull/454>`_.\n\n* New ``py::overload_cast`` casting operator to select among multiple possible\n  overloads of a function. An example:\n\n    .. code-block:: cpp\n\n        py::class_<Pet>(m, \"Pet\")\n            .def(\"set\", py::overload_cast<int>(&Pet::set), \"Set the pet's age\")\n            .def(\"set\", py::overload_cast<const std::string &>(&Pet::set), \"Set the pet's name\");\n\n  This feature only works on C++14-capable compilers.\n  `#541 <https://github.com/pybind/pybind11/pull/541>`_.\n\n* C++ types are automatically cast to Python types, e.g. when assigning\n  them as an attribute. For instance, the following is now legal:\n\n    .. code-block:: cpp\n\n        py::module m = /* ... */\n        m.attr(\"constant\") = 123;\n\n  (Previously, a ``py::cast`` call was necessary to avoid a compilation error.)\n  `#551 <https://github.com/pybind/pybind11/pull/551>`_.\n\n* Redesigned ``pytest``-based test suite. `#321 <https://github.com/pybind/pybind11/pull/321>`_.\n\n* Instance tracking to detect reference leaks in test suite. `#324 <https://github.com/pybind/pybind11/pull/324>`_\n\n* pybind11 can now distinguish between multiple different instances that are\n  located at the same memory address, but which have different types.\n  `#329 <https://github.com/pybind/pybind11/pull/329>`_.\n\n* Improved logic in ``move`` return value policy.\n  `#510 <https://github.com/pybind/pybind11/pull/510>`_,\n  `#297 <https://github.com/pybind/pybind11/pull/297>`_.\n\n* Generalized unpacking API to permit calling Python functions from C++ using\n  notation such as ``foo(a1, a2, *args, \"ka\"_a=1, \"kb\"_a=2, **kwargs)``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.\n\n* ``py::print()`` function whose behavior matches that of the native Python\n  ``print()`` function. `#372 <https://github.com/pybind/pybind11/pull/372>`_.\n\n* Added ``py::dict`` keyword constructor:``auto d = dict(\"number\"_a=42,\n  \"name\"_a=\"World\");``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.\n\n* Added ``py::str::format()`` method and ``_s`` literal: ``py::str s = \"1 + 2\n  = {}\"_s.format(3);``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.\n\n* Added ``py::repr()`` function which is equivalent to Python's builtin\n  ``repr()``. `#333 <https://github.com/pybind/pybind11/pull/333>`_.\n\n* Improved construction and destruction logic for holder types. It is now\n  possible to reference instances with smart pointer holder types without\n  constructing the holder if desired. The ``PYBIND11_DECLARE_HOLDER_TYPE``\n  macro now accepts an optional second parameter to indicate whether the holder\n  type uses intrusive reference counting.\n  `#533 <https://github.com/pybind/pybind11/pull/533>`_ and\n  `#561 <https://github.com/pybind/pybind11/pull/561>`_.\n\n* Mapping a stateless C++ function to Python and back is now \"for free\" (i.e.\n  no extra indirections or argument conversion overheads). `rev. 954b79\n  <https://github.com/pybind/pybind11/commit/954b79>`_.\n\n* Bindings for ``std::valarray<T>``.\n  `#545 <https://github.com/pybind/pybind11/pull/545>`_.\n\n* Improved support for C++17 capable compilers.\n  `#562 <https://github.com/pybind/pybind11/pull/562>`_.\n\n* Bindings for ``std::optional<t>``.\n  `#475 <https://github.com/pybind/pybind11/pull/475>`_,\n  `#476 <https://github.com/pybind/pybind11/pull/476>`_,\n  `#479 <https://github.com/pybind/pybind11/pull/479>`_,\n  `#499 <https://github.com/pybind/pybind11/pull/499>`_, and\n  `#501 <https://github.com/pybind/pybind11/pull/501>`_.\n\n* ``stl_bind.h``: general improvements and support for ``std::map`` and\n  ``std::unordered_map``.\n  `#490 <https://github.com/pybind/pybind11/pull/490>`_,\n  `#282 <https://github.com/pybind/pybind11/pull/282>`_,\n  `#235 <https://github.com/pybind/pybind11/pull/235>`_.\n\n* The ``std::tuple``, ``std::pair``, ``std::list``, and ``std::vector`` type\n  casters now accept any Python sequence type as input. `rev. 107285\n  <https://github.com/pybind/pybind11/commit/107285>`_.\n\n* Improved CMake Python detection on multi-architecture Linux.\n  `#532 <https://github.com/pybind/pybind11/pull/532>`_.\n\n* Infrastructure to selectively disable or enable parts of the automatically\n  generated docstrings. `#486 <https://github.com/pybind/pybind11/pull/486>`_.\n\n* ``reference`` and ``reference_internal`` are now the default return value\n  properties for static and non-static properties, respectively. `#473\n  <https://github.com/pybind/pybind11/pull/473>`_. (the previous defaults\n  were ``automatic``). `#473 <https://github.com/pybind/pybind11/pull/473>`_.\n\n* Support for ``std::unique_ptr`` with non-default deleters or no deleter at\n  all (``py::nodelete``). `#384 <https://github.com/pybind/pybind11/pull/384>`_.\n\n* Deprecated ``handle::call()`` method. The new syntax to call Python\n  functions is simply ``handle()``. It can also be invoked explicitly via\n  ``handle::operator<X>()``, where ``X`` is an optional return value policy.\n\n* Print more informative error messages when ``make_tuple()`` or ``cast()``\n  fail. `#262 <https://github.com/pybind/pybind11/pull/262>`_.\n\n* Creation of holder types for classes deriving from\n  ``std::enable_shared_from_this<>`` now also works for ``const`` values.\n  `#260 <https://github.com/pybind/pybind11/pull/260>`_.\n\n* ``make_iterator()`` improvements for better compatibility with various\n  types (now uses prefix increment operator); it now also accepts iterators\n  with different begin/end types as long as they are equality comparable.\n  `#247 <https://github.com/pybind/pybind11/pull/247>`_.\n\n* ``arg()`` now accepts a wider range of argument types for default values.\n  `#244 <https://github.com/pybind/pybind11/pull/244>`_.\n\n* Support ``keep_alive`` where the nurse object may be ``None``. `#341\n  <https://github.com/pybind/pybind11/pull/341>`_.\n\n* Added constructors for ``str`` and ``bytes`` from zero-terminated char\n  pointers, and from char pointers and length. Added constructors for ``str``\n  from ``bytes`` and for ``bytes`` from ``str``, which will perform UTF-8\n  decoding/encoding as required.\n\n* Many other improvements of library internals without user-visible changes\n\n\n1.8.1 (July 12, 2016)\n----------------------\n* Fixed a rare but potentially very severe issue when the garbage collector ran\n  during pybind11 type creation.\n\n1.8.0 (June 14, 2016)\n----------------------\n* Redesigned CMake build system which exports a convenient\n  ``pybind11_add_module`` function to parent projects.\n* ``std::vector<>`` type bindings analogous to Boost.Python's ``indexing_suite``\n* Transparent conversion of sparse and dense Eigen matrices and vectors (``eigen.h``)\n* Added an ``ExtraFlags`` template argument to the NumPy ``array_t<>`` wrapper\n  to disable an enforced cast that may lose precision, e.g. to create overloads\n  for different precisions and complex vs real-valued matrices.\n* Prevent implicit conversion of floating point values to integral types in\n  function arguments\n* Fixed incorrect default return value policy for functions returning a shared\n  pointer\n* Don't allow registering a type via ``class_`` twice\n* Don't allow casting a ``None`` value into a C++ lvalue reference\n* Fixed a crash in ``enum_::operator==`` that was triggered by the ``help()`` command\n* Improved detection of whether or not custom C++ types can be copy/move-constructed\n* Extended ``str`` type to also work with ``bytes`` instances\n* Added a ``\"name\"_a`` user defined string literal that is equivalent to ``py::arg(\"name\")``.\n* When specifying function arguments via ``py::arg``, the test that verifies\n  the number of arguments now runs at compile time.\n* Added ``[[noreturn]]`` attribute to ``pybind11_fail()`` to quench some\n  compiler warnings\n* List function arguments in exception text when the dispatch code cannot find\n  a matching overload\n* Added ``PYBIND11_OVERLOAD_NAME`` and ``PYBIND11_OVERLOAD_PURE_NAME`` macros which\n  can be used to override virtual methods whose name differs in C++ and Python\n  (e.g. ``__call__`` and ``operator()``)\n* Various minor ``iterator`` and ``make_iterator()`` improvements\n* Transparently support ``__bool__`` on Python 2.x and Python 3.x\n* Fixed issue with destructor of unpickled object not being called\n* Minor CMake build system improvements on Windows\n* New ``pybind11::args`` and ``pybind11::kwargs`` types to create functions which\n  take an arbitrary number of arguments and keyword arguments\n* New syntax to call a Python function from C++ using ``*args`` and ``*kwargs``\n* The functions ``def_property_*`` now correctly process docstring arguments (these\n  formerly caused a segmentation fault)\n* Many ``mkdoc.py`` improvements (enumerations, template arguments, ``DOC()``\n  macro accepts more arguments)\n* Cygwin support\n* Documentation improvements (pickling support, ``keep_alive``, macro usage)\n\n1.7 (April 30, 2016)\n----------------------\n* Added a new ``move`` return value policy that triggers C++11 move semantics.\n  The automatic return value policy falls back to this case whenever a rvalue\n  reference is encountered\n* Significantly more general GIL state routines that are used instead of\n  Python's troublesome ``PyGILState_Ensure`` and ``PyGILState_Release`` API\n* Redesign of opaque types that drastically simplifies their usage\n* Extended ability to pass values of type ``[const] void *``\n* ``keep_alive`` fix: don't fail when there is no patient\n* ``functional.h``: acquire the GIL before calling a Python function\n* Added Python RAII type wrappers ``none`` and ``iterable``\n* Added ``*args`` and ``*kwargs`` pass-through parameters to\n  ``pybind11.get_include()`` function\n* Iterator improvements and fixes\n* Documentation on return value policies and opaque types improved\n\n1.6 (April 30, 2016)\n----------------------\n* Skipped due to upload to PyPI gone wrong and inability to recover\n  (https://github.com/pypa/packaging-problems/issues/74)\n\n1.5 (April 21, 2016)\n----------------------\n* For polymorphic types, use RTTI to try to return the closest type registered with pybind11\n* Pickling support for serializing and unserializing C++ instances to a byte stream in Python\n* Added a convenience routine ``make_iterator()`` which turns a range indicated\n  by a pair of C++ iterators into a iterable Python object\n* Added ``len()`` and a variadic ``make_tuple()`` function\n* Addressed a rare issue that could confuse the current virtual function\n  dispatcher and another that could lead to crashes in multi-threaded\n  applications\n* Added a ``get_include()`` function to the Python module that returns the path\n  of the directory containing the installed pybind11 header files\n* Documentation improvements: import issues, symbol visibility, pickling, limitations\n* Added casting support for ``std::reference_wrapper<>``\n\n1.4 (April 7, 2016)\n--------------------------\n* Transparent type conversion for ``std::wstring`` and ``wchar_t``\n* Allow passing ``nullptr``-valued strings\n* Transparent passing of ``void *`` pointers using capsules\n* Transparent support for returning values wrapped in ``std::unique_ptr<>``\n* Improved docstring generation for compatibility with Sphinx\n* Nicer debug error message when default parameter construction fails\n* Support for \"opaque\" types that bypass the transparent conversion layer for STL containers\n* Redesigned type casting interface to avoid ambiguities that could occasionally cause compiler errors\n* Redesigned property implementation; fixes crashes due to an unfortunate default return value policy\n* Anaconda package generation support\n\n1.3 (March 8, 2016)\n--------------------------\n\n* Added support for the Intel C++ compiler (v15+)\n* Added support for the STL unordered set/map data structures\n* Added support for the STL linked list data structure\n* NumPy-style broadcasting support in ``pybind11::vectorize``\n* pybind11 now displays more verbose error messages when ``arg::operator=()`` fails\n* pybind11 internal data structures now live in a version-dependent namespace to avoid ABI issues\n* Many, many bugfixes involving corner cases and advanced usage\n\n1.2 (February 7, 2016)\n--------------------------\n\n* Optional: efficient generation of function signatures at compile time using C++14\n* Switched to a simpler and more general way of dealing with function default\n  arguments. Unused keyword arguments in function calls are now detected and\n  cause errors as expected\n* New ``keep_alive`` call policy analogous to Boost.Python's ``with_custodian_and_ward``\n* New ``pybind11::base<>`` attribute to indicate a subclass relationship\n* Improved interface for RAII type wrappers in ``pytypes.h``\n* Use RAII type wrappers consistently within pybind11 itself. This\n  fixes various potential refcount leaks when exceptions occur\n* Added new ``bytes`` RAII type wrapper (maps to ``string`` in Python 2.7)\n* Made handle and related RAII classes const correct, using them more\n  consistently everywhere now\n* Got rid of the ugly ``__pybind11__`` attributes on the Python side---they are\n  now stored in a C++ hash table that is not visible in Python\n* Fixed refcount leaks involving NumPy arrays and bound functions\n* Vastly improved handling of shared/smart pointers\n* Removed an unnecessary copy operation in ``pybind11::vectorize``\n* Fixed naming clashes when both pybind11 and NumPy headers are included\n* Added conversions for additional exception types\n* Documentation improvements (using multiple extension modules, smart pointers,\n  other minor clarifications)\n* unified infrastructure for parsing variadic arguments in ``class_`` and cpp_function\n* Fixed license text (was: ZLIB, should have been: 3-clause BSD)\n* Python 3.2 compatibility\n* Fixed remaining issues when accessing types in another plugin module\n* Added enum comparison and casting methods\n* Improved SFINAE-based detection of whether types are copy-constructible\n* Eliminated many warnings about unused variables and the use of ``offsetof()``\n* Support for ``std::array<>`` conversions\n\n1.1 (December 7, 2015)\n--------------------------\n\n* Documentation improvements (GIL, wrapping functions, casting, fixed many typos)\n* Generalized conversion of integer types\n* Improved support for casting function objects\n* Improved support for ``std::shared_ptr<>`` conversions\n* Initial support for ``std::set<>`` conversions\n* Fixed type resolution issue for types defined in a separate plugin module\n* Cmake build system improvements\n* Factored out generic functionality to non-templated code (smaller code size)\n* Added a code size / compile time benchmark vs Boost.Python\n* Added an appveyor CI script\n\n1.0 (October 15, 2015)\n------------------------\n* Initial release\n"
  },
  {
    "path": "src/third_party/pybind11/docs/classes.rst",
    "content": ".. _classes:\n\nObject-oriented code\n####################\n\nCreating bindings for a custom type\n===================================\n\nLet's now look at a more complex example where we'll create bindings for a\ncustom C++ data structure named ``Pet``. Its definition is given below:\n\n.. code-block:: cpp\n\n    struct Pet {\n        Pet(const std::string &name) : name(name) { }\n        void setName(const std::string &name_) { name = name_; }\n        const std::string &getName() const { return name; }\n\n        std::string name;\n    };\n\nThe binding code for ``Pet`` looks as follows:\n\n.. code-block:: cpp\n\n    #include <pybind11/pybind11.h>\n\n    namespace py = pybind11;\n\n    PYBIND11_MODULE(example, m) {\n        py::class_<Pet>(m, \"Pet\")\n            .def(py::init<const std::string &>())\n            .def(\"setName\", &Pet::setName)\n            .def(\"getName\", &Pet::getName);\n    }\n\n:class:`class_` creates bindings for a C++ *class* or *struct*-style data\nstructure. :func:`init` is a convenience function that takes the types of a\nconstructor's parameters as template arguments and wraps the corresponding\nconstructor (see the :ref:`custom_constructors` section for details). An\ninteractive Python session demonstrating this example is shown below:\n\n.. code-block:: pycon\n\n    % python\n    >>> import example\n    >>> p = example.Pet('Molly')\n    >>> print(p)\n    <example.Pet object at 0x10cd98060>\n    >>> p.getName()\n    u'Molly'\n    >>> p.setName('Charly')\n    >>> p.getName()\n    u'Charly'\n\n.. seealso::\n\n    Static member functions can be bound in the same way using\n    :func:`class_::def_static`.\n\nKeyword and default arguments\n=============================\nIt is possible to specify keyword and default arguments using the syntax\ndiscussed in the previous chapter. Refer to the sections :ref:`keyword_args`\nand :ref:`default_args` for details.\n\nBinding lambda functions\n========================\n\nNote how ``print(p)`` produced a rather useless summary of our data structure in the example above:\n\n.. code-block:: pycon\n\n    >>> print(p)\n    <example.Pet object at 0x10cd98060>\n\nTo address this, we could bind an utility function that returns a human-readable\nsummary to the special method slot named ``__repr__``. Unfortunately, there is no\nsuitable functionality in the ``Pet`` data structure, and it would be nice if\nwe did not have to change it. This can easily be accomplished by binding a\nLambda function instead:\n\n.. code-block:: cpp\n\n        py::class_<Pet>(m, \"Pet\")\n            .def(py::init<const std::string &>())\n            .def(\"setName\", &Pet::setName)\n            .def(\"getName\", &Pet::getName)\n            .def(\"__repr__\",\n                [](const Pet &a) {\n                    return \"<example.Pet named '\" + a.name + \"'>\";\n                }\n            );\n\nBoth stateless [#f1]_ and stateful lambda closures are supported by pybind11.\nWith the above change, the same Python code now produces the following output:\n\n.. code-block:: pycon\n\n    >>> print(p)\n    <example.Pet named 'Molly'>\n\n.. [#f1] Stateless closures are those with an empty pair of brackets ``[]`` as the capture object.\n\n.. _properties:\n\nInstance and static fields\n==========================\n\nWe can also directly expose the ``name`` field using the\n:func:`class_::def_readwrite` method. A similar :func:`class_::def_readonly`\nmethod also exists for ``const`` fields.\n\n.. code-block:: cpp\n\n        py::class_<Pet>(m, \"Pet\")\n            .def(py::init<const std::string &>())\n            .def_readwrite(\"name\", &Pet::name)\n            // ... remainder ...\n\nThis makes it possible to write\n\n.. code-block:: pycon\n\n    >>> p = example.Pet('Molly')\n    >>> p.name\n    u'Molly'\n    >>> p.name = 'Charly'\n    >>> p.name\n    u'Charly'\n\nNow suppose that ``Pet::name`` was a private internal variable\nthat can only be accessed via setters and getters.\n\n.. code-block:: cpp\n\n    class Pet {\n    public:\n        Pet(const std::string &name) : name(name) { }\n        void setName(const std::string &name_) { name = name_; }\n        const std::string &getName() const { return name; }\n    private:\n        std::string name;\n    };\n\nIn this case, the method :func:`class_::def_property`\n(:func:`class_::def_property_readonly` for read-only data) can be used to\nprovide a field-like interface within Python that will transparently call\nthe setter and getter functions:\n\n.. code-block:: cpp\n\n        py::class_<Pet>(m, \"Pet\")\n            .def(py::init<const std::string &>())\n            .def_property(\"name\", &Pet::getName, &Pet::setName)\n            // ... remainder ...\n\nWrite only properties can be defined by passing ``nullptr`` as the\ninput for the read function.\n\n.. seealso::\n\n    Similar functions :func:`class_::def_readwrite_static`,\n    :func:`class_::def_readonly_static` :func:`class_::def_property_static`,\n    and :func:`class_::def_property_readonly_static` are provided for binding\n    static variables and properties. Please also see the section on\n    :ref:`static_properties` in the advanced part of the documentation.\n\nDynamic attributes\n==================\n\nNative Python classes can pick up new attributes dynamically:\n\n.. code-block:: pycon\n\n    >>> class Pet:\n    ...     name = 'Molly'\n    ...\n    >>> p = Pet()\n    >>> p.name = 'Charly'  # overwrite existing\n    >>> p.age = 2  # dynamically add a new attribute\n\nBy default, classes exported from C++ do not support this and the only writable\nattributes are the ones explicitly defined using :func:`class_::def_readwrite`\nor :func:`class_::def_property`.\n\n.. code-block:: cpp\n\n    py::class_<Pet>(m, \"Pet\")\n        .def(py::init<>())\n        .def_readwrite(\"name\", &Pet::name);\n\nTrying to set any other attribute results in an error:\n\n.. code-block:: pycon\n\n    >>> p = example.Pet()\n    >>> p.name = 'Charly'  # OK, attribute defined in C++\n    >>> p.age = 2  # fail\n    AttributeError: 'Pet' object has no attribute 'age'\n\nTo enable dynamic attributes for C++ classes, the :class:`py::dynamic_attr` tag\nmust be added to the :class:`py::class_` constructor:\n\n.. code-block:: cpp\n\n    py::class_<Pet>(m, \"Pet\", py::dynamic_attr())\n        .def(py::init<>())\n        .def_readwrite(\"name\", &Pet::name);\n\nNow everything works as expected:\n\n.. code-block:: pycon\n\n    >>> p = example.Pet()\n    >>> p.name = 'Charly'  # OK, overwrite value in C++\n    >>> p.age = 2  # OK, dynamically add a new attribute\n    >>> p.__dict__  # just like a native Python class\n    {'age': 2}\n\nNote that there is a small runtime cost for a class with dynamic attributes.\nNot only because of the addition of a ``__dict__``, but also because of more\nexpensive garbage collection tracking which must be activated to resolve\npossible circular references. Native Python classes incur this same cost by\ndefault, so this is not anything to worry about. By default, pybind11 classes\nare more efficient than native Python classes. Enabling dynamic attributes\njust brings them on par.\n\n.. _inheritance:\n\nInheritance and automatic downcasting\n=====================================\n\nSuppose now that the example consists of two data structures with an\ninheritance relationship:\n\n.. code-block:: cpp\n\n    struct Pet {\n        Pet(const std::string &name) : name(name) { }\n        std::string name;\n    };\n\n    struct Dog : Pet {\n        Dog(const std::string &name) : Pet(name) { }\n        std::string bark() const { return \"woof!\"; }\n    };\n\nThere are two different ways of indicating a hierarchical relationship to\npybind11: the first specifies the C++ base class as an extra template\nparameter of the :class:`class_`:\n\n.. code-block:: cpp\n\n    py::class_<Pet>(m, \"Pet\")\n       .def(py::init<const std::string &>())\n       .def_readwrite(\"name\", &Pet::name);\n\n    // Method 1: template parameter:\n    py::class_<Dog, Pet /* <- specify C++ parent type */>(m, \"Dog\")\n        .def(py::init<const std::string &>())\n        .def(\"bark\", &Dog::bark);\n\nAlternatively, we can also assign a name to the previously bound ``Pet``\n:class:`class_` object and reference it when binding the ``Dog`` class:\n\n.. code-block:: cpp\n\n    py::class_<Pet> pet(m, \"Pet\");\n    pet.def(py::init<const std::string &>())\n       .def_readwrite(\"name\", &Pet::name);\n\n    // Method 2: pass parent class_ object:\n    py::class_<Dog>(m, \"Dog\", pet /* <- specify Python parent type */)\n        .def(py::init<const std::string &>())\n        .def(\"bark\", &Dog::bark);\n\nFunctionality-wise, both approaches are equivalent. Afterwards, instances will\nexpose fields and methods of both types:\n\n.. code-block:: pycon\n\n    >>> p = example.Dog('Molly')\n    >>> p.name\n    u'Molly'\n    >>> p.bark()\n    u'woof!'\n\nThe C++ classes defined above are regular non-polymorphic types with an\ninheritance relationship. This is reflected in Python:\n\n.. code-block:: cpp\n\n    // Return a base pointer to a derived instance\n    m.def(\"pet_store\", []() { return std::unique_ptr<Pet>(new Dog(\"Molly\")); });\n\n.. code-block:: pycon\n\n    >>> p = example.pet_store()\n    >>> type(p)  # `Dog` instance behind `Pet` pointer\n    Pet          # no pointer downcasting for regular non-polymorphic types\n    >>> p.bark()\n    AttributeError: 'Pet' object has no attribute 'bark'\n\nThe function returned a ``Dog`` instance, but because it's a non-polymorphic\ntype behind a base pointer, Python only sees a ``Pet``. In C++, a type is only\nconsidered polymorphic if it has at least one virtual function and pybind11\nwill automatically recognize this:\n\n.. code-block:: cpp\n\n    struct PolymorphicPet {\n        virtual ~PolymorphicPet() = default;\n    };\n\n    struct PolymorphicDog : PolymorphicPet {\n        std::string bark() const { return \"woof!\"; }\n    };\n\n    // Same binding code\n    py::class_<PolymorphicPet>(m, \"PolymorphicPet\");\n    py::class_<PolymorphicDog, PolymorphicPet>(m, \"PolymorphicDog\")\n        .def(py::init<>())\n        .def(\"bark\", &PolymorphicDog::bark);\n\n    // Again, return a base pointer to a derived instance\n    m.def(\"pet_store2\", []() { return std::unique_ptr<PolymorphicPet>(new PolymorphicDog); });\n\n.. code-block:: pycon\n\n    >>> p = example.pet_store2()\n    >>> type(p)\n    PolymorphicDog  # automatically downcast\n    >>> p.bark()\n    u'woof!'\n\nGiven a pointer to a polymorphic base, pybind11 performs automatic downcasting\nto the actual derived type. Note that this goes beyond the usual situation in\nC++: we don't just get access to the virtual functions of the base, we get the\nconcrete derived type including functions and attributes that the base type may\nnot even be aware of.\n\n.. seealso::\n\n    For more information about polymorphic behavior see :ref:`overriding_virtuals`.\n\n\nOverloaded methods\n==================\n\nSometimes there are several overloaded C++ methods with the same name taking\ndifferent kinds of input arguments:\n\n.. code-block:: cpp\n\n    struct Pet {\n        Pet(const std::string &name, int age) : name(name), age(age) { }\n\n        void set(int age_) { age = age_; }\n        void set(const std::string &name_) { name = name_; }\n\n        std::string name;\n        int age;\n    };\n\nAttempting to bind ``Pet::set`` will cause an error since the compiler does not\nknow which method the user intended to select. We can disambiguate by casting\nthem to function pointers. Binding multiple functions to the same Python name\nautomatically creates a chain of function overloads that will be tried in\nsequence.\n\n.. code-block:: cpp\n\n    py::class_<Pet>(m, \"Pet\")\n       .def(py::init<const std::string &, int>())\n       .def(\"set\", (void (Pet::*)(int)) &Pet::set, \"Set the pet's age\")\n       .def(\"set\", (void (Pet::*)(const std::string &)) &Pet::set, \"Set the pet's name\");\n\nThe overload signatures are also visible in the method's docstring:\n\n.. code-block:: pycon\n\n    >>> help(example.Pet)\n\n    class Pet(__builtin__.object)\n     |  Methods defined here:\n     |\n     |  __init__(...)\n     |      Signature : (Pet, str, int) -> NoneType\n     |\n     |  set(...)\n     |      1. Signature : (Pet, int) -> NoneType\n     |\n     |      Set the pet's age\n     |\n     |      2. Signature : (Pet, str) -> NoneType\n     |\n     |      Set the pet's name\n\nIf you have a C++14 compatible compiler [#cpp14]_, you can use an alternative\nsyntax to cast the overloaded function:\n\n.. code-block:: cpp\n\n    py::class_<Pet>(m, \"Pet\")\n        .def(\"set\", py::overload_cast<int>(&Pet::set), \"Set the pet's age\")\n        .def(\"set\", py::overload_cast<const std::string &>(&Pet::set), \"Set the pet's name\");\n\nHere, ``py::overload_cast`` only requires the parameter types to be specified.\nThe return type and class are deduced. This avoids the additional noise of\n``void (Pet::*)()`` as seen in the raw cast. If a function is overloaded based\non constness, the ``py::const_`` tag should be used:\n\n.. code-block:: cpp\n\n    struct Widget {\n        int foo(int x, float y);\n        int foo(int x, float y) const;\n    };\n\n    py::class_<Widget>(m, \"Widget\")\n       .def(\"foo_mutable\", py::overload_cast<int, float>(&Widget::foo))\n       .def(\"foo_const\",   py::overload_cast<int, float>(&Widget::foo, py::const_));\n\n\n.. [#cpp14] A compiler which supports the ``-std=c++14`` flag\n            or Visual Studio 2015 Update 2 and newer.\n\n.. note::\n\n    To define multiple overloaded constructors, simply declare one after the\n    other using the ``.def(py::init<...>())`` syntax. The existing machinery\n    for specifying keyword and default arguments also works.\n\nEnumerations and internal types\n===============================\n\nLet's now suppose that the example class contains an internal enumeration type,\ne.g.:\n\n.. code-block:: cpp\n\n    struct Pet {\n        enum Kind {\n            Dog = 0,\n            Cat\n        };\n\n        Pet(const std::string &name, Kind type) : name(name), type(type) { }\n\n        std::string name;\n        Kind type;\n    };\n\nThe binding code for this example looks as follows:\n\n.. code-block:: cpp\n\n    py::class_<Pet> pet(m, \"Pet\");\n\n    pet.def(py::init<const std::string &, Pet::Kind>())\n        .def_readwrite(\"name\", &Pet::name)\n        .def_readwrite(\"type\", &Pet::type);\n\n    py::enum_<Pet::Kind>(pet, \"Kind\")\n        .value(\"Dog\", Pet::Kind::Dog)\n        .value(\"Cat\", Pet::Kind::Cat)\n        .export_values();\n\nTo ensure that the ``Kind`` type is created within the scope of ``Pet``, the\n``pet`` :class:`class_` instance must be supplied to the :class:`enum_`.\nconstructor. The :func:`enum_::export_values` function exports the enum entries\ninto the parent scope, which should be skipped for newer C++11-style strongly\ntyped enums.\n\n.. code-block:: pycon\n\n    >>> p = Pet('Lucy', Pet.Cat)\n    >>> p.type\n    Kind.Cat\n    >>> int(p.type)\n    1L\n\nThe entries defined by the enumeration type are exposed in the ``__members__`` property:\n\n.. code-block:: pycon\n\n    >>> Pet.Kind.__members__\n    {'Dog': Kind.Dog, 'Cat': Kind.Cat}\n\nThe ``name`` property returns the name of the enum value as a unicode string.\n\n.. note::\n\n    It is also possible to use ``str(enum)``, however these accomplish different\n    goals. The following shows how these two approaches differ.\n\n    .. code-block:: pycon\n\n        >>> p = Pet( \"Lucy\", Pet.Cat )\n        >>> pet_type = p.type\n        >>> pet_type\n        Pet.Cat\n        >>> str(pet_type)\n        'Pet.Cat'\n        >>> pet_type.name\n        'Cat'\n\n.. note::\n\n    When the special tag ``py::arithmetic()`` is specified to the ``enum_``\n    constructor, pybind11 creates an enumeration that also supports rudimentary\n    arithmetic and bit-level operations like comparisons, and, or, xor, negation,\n    etc.\n\n    .. code-block:: cpp\n\n        py::enum_<Pet::Kind>(pet, \"Kind\", py::arithmetic())\n           ...\n\n    By default, these are omitted to conserve space.\n"
  },
  {
    "path": "src/third_party/pybind11/docs/compiling.rst",
    "content": ".. _compiling:\n\nBuild systems\n#############\n\nBuilding with setuptools\n========================\n\nFor projects on PyPI, building with setuptools is the way to go. Sylvain Corlay\nhas kindly provided an example project which shows how to set up everything,\nincluding automatic generation of documentation using Sphinx. Please refer to\nthe [python_example]_ repository.\n\n.. [python_example] https://github.com/pybind/python_example\n\nBuilding with cppimport\n========================\n\n[cppimport]_ is a small Python import hook that determines whether there is a C++\nsource file whose name matches the requested module. If there is, the file is\ncompiled as a Python extension using pybind11 and placed in the same folder as\nthe C++ source file. Python is then able to find the module and load it.\n\n.. [cppimport] https://github.com/tbenthompson/cppimport\n\n.. _cmake:\n\nBuilding with CMake\n===================\n\nFor C++ codebases that have an existing CMake-based build system, a Python\nextension module can be created with just a few lines of code:\n\n.. code-block:: cmake\n\n    cmake_minimum_required(VERSION 2.8.12)\n    project(example)\n\n    add_subdirectory(pybind11)\n    pybind11_add_module(example example.cpp)\n\nThis assumes that the pybind11 repository is located in a subdirectory named\n:file:`pybind11` and that the code is located in a file named :file:`example.cpp`.\nThe CMake command ``add_subdirectory`` will import the pybind11 project which\nprovides the ``pybind11_add_module`` function. It will take care of all the\ndetails needed to build a Python extension module on any platform.\n\nA working sample project, including a way to invoke CMake from :file:`setup.py` for\nPyPI integration, can be found in the [cmake_example]_  repository.\n\n.. [cmake_example] https://github.com/pybind/cmake_example\n\npybind11_add_module\n-------------------\n\nTo ease the creation of Python extension modules, pybind11 provides a CMake\nfunction with the following signature:\n\n.. code-block:: cmake\n\n    pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]\n                        [NO_EXTRAS] [SYSTEM] [THIN_LTO] source1 [source2 ...])\n\nThis function behaves very much like CMake's builtin ``add_library`` (in fact,\nit's a wrapper function around that command). It will add a library target\ncalled ``<name>`` to be built from the listed source files. In addition, it\nwill take care of all the Python-specific compiler and linker flags as well\nas the OS- and Python-version-specific file extension. The produced target\n``<name>`` can be further manipulated with regular CMake commands.\n\n``MODULE`` or ``SHARED`` may be given to specify the type of library. If no\ntype is given, ``MODULE`` is used by default which ensures the creation of a\nPython-exclusive module. Specifying ``SHARED`` will create a more traditional\ndynamic library which can also be linked from elsewhere. ``EXCLUDE_FROM_ALL``\nremoves this target from the default build (see CMake docs for details).\n\nSince pybind11 is a template library, ``pybind11_add_module`` adds compiler\nflags to ensure high quality code generation without bloat arising from long\nsymbol names and duplication of code in different translation units. It\nsets default visibility to *hidden*, which is required for some pybind11\nfeatures and functionality when attempting to load multiple pybind11 modules\ncompiled under different pybind11 versions.  It also adds additional flags\nenabling LTO (Link Time Optimization) and strip unneeded symbols. See the\n:ref:`FAQ entry <faq:symhidden>` for a more detailed explanation. These\nlatter optimizations are never applied in ``Debug`` mode.  If ``NO_EXTRAS`` is\ngiven, they will always be disabled, even in ``Release`` mode. However, this\nwill result in code bloat and is generally not recommended.\n\nBy default, pybind11 and Python headers will be included with ``-I``. In order\nto include pybind11 as system library, e.g. to avoid warnings in downstream\ncode with warn-levels outside of pybind11's scope, set the option ``SYSTEM``.\n\nAs stated above, LTO is enabled by default. Some newer compilers also support\ndifferent flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause\nthe function to prefer this flavor if available. The function falls back to\nregular LTO if ``-flto=thin`` is not available.\n\n.. _ThinLTO: http://clang.llvm.org/docs/ThinLTO.html\n\nConfiguration variables\n-----------------------\n\nBy default, pybind11 will compile modules with the C++14 standard, if available\non the target compiler, falling back to C++11 if C++14 support is not\navailable.  Note, however, that this default is subject to change: future\npybind11 releases are expected to migrate to newer C++ standards as they become\navailable.  To override this, the standard flag can be given explicitly in\n``PYBIND11_CPP_STANDARD``:\n\n.. code-block:: cmake\n\n    # Use just one of these:\n    # GCC/clang:\n    set(PYBIND11_CPP_STANDARD -std=c++11)\n    set(PYBIND11_CPP_STANDARD -std=c++14)\n    set(PYBIND11_CPP_STANDARD -std=c++1z) # Experimental C++17 support\n    # MSVC:\n    set(PYBIND11_CPP_STANDARD /std:c++14)\n    set(PYBIND11_CPP_STANDARD /std:c++latest) # Enables some MSVC C++17 features\n\n    add_subdirectory(pybind11)  # or find_package(pybind11)\n\nNote that this and all other configuration variables must be set **before** the\ncall to ``add_subdirectory`` or ``find_package``. The variables can also be set\nwhen calling CMake from the command line using the ``-D<variable>=<value>`` flag.\n\nThe target Python version can be selected by setting ``PYBIND11_PYTHON_VERSION``\nor an exact Python installation can be specified with ``PYTHON_EXECUTABLE``.\nFor example:\n\n.. code-block:: bash\n\n    cmake -DPYBIND11_PYTHON_VERSION=3.6 ..\n    # or\n    cmake -DPYTHON_EXECUTABLE=path/to/python ..\n\nfind_package vs. add_subdirectory\n---------------------------------\n\nFor CMake-based projects that don't include the pybind11 repository internally,\nan external installation can be detected through ``find_package(pybind11)``.\nSee the `Config file`_ docstring for details of relevant CMake variables.\n\n.. code-block:: cmake\n\n    cmake_minimum_required(VERSION 2.8.12)\n    project(example)\n\n    find_package(pybind11 REQUIRED)\n    pybind11_add_module(example example.cpp)\n\nOnce detected, the aforementioned ``pybind11_add_module`` can be employed as\nbefore. The function usage and configuration variables are identical no matter\nif pybind11 is added as a subdirectory or found as an installed package. You\ncan refer to the same [cmake_example]_ repository for a full sample project\n-- just swap out ``add_subdirectory`` for ``find_package``.\n\n.. _Config file: https://github.com/pybind/pybind11/blob/master/tools/pybind11Config.cmake.in\n\nAdvanced: interface library target\n----------------------------------\n\nWhen using a version of CMake greater than 3.0, pybind11 can additionally\nbe used as a special *interface library* . The target ``pybind11::module``\nis available with pybind11 headers, Python headers and libraries as needed,\nand C++ compile definitions attached. This target is suitable for linking\nto an independently constructed (through ``add_library``, not\n``pybind11_add_module``) target in the consuming project.\n\n.. code-block:: cmake\n\n    cmake_minimum_required(VERSION 3.0)\n    project(example)\n\n    find_package(pybind11 REQUIRED)  # or add_subdirectory(pybind11)\n\n    add_library(example MODULE main.cpp)\n    target_link_libraries(example PRIVATE pybind11::module)\n    set_target_properties(example PROPERTIES PREFIX \"${PYTHON_MODULE_PREFIX}\"\n                                             SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\n.. warning::\n\n    Since pybind11 is a metatemplate library, it is crucial that certain\n    compiler flags are provided to ensure high quality code generation. In\n    contrast to the ``pybind11_add_module()`` command, the CMake interface\n    library only provides the *minimal* set of parameters to ensure that the\n    code using pybind11 compiles, but it does **not** pass these extra compiler\n    flags (i.e. this is up to you).\n\n    These include Link Time Optimization (``-flto`` on GCC/Clang/ICPC, ``/GL``\n    and ``/LTCG`` on Visual Studio) and .OBJ files with many sections on Visual\n    Studio (``/bigobj``).  The :ref:`FAQ <faq:symhidden>` contains an\n    explanation on why these are needed.\n\nEmbedding the Python interpreter\n--------------------------------\n\nIn addition to extension modules, pybind11 also supports embedding Python into\na C++ executable or library. In CMake, simply link with the ``pybind11::embed``\ntarget. It provides everything needed to get the interpreter running. The Python\nheaders and libraries are attached to the target. Unlike ``pybind11::module``,\nthere is no need to manually set any additional properties here. For more\ninformation about usage in C++, see :doc:`/advanced/embedding`.\n\n.. code-block:: cmake\n\n    cmake_minimum_required(VERSION 3.0)\n    project(example)\n\n    find_package(pybind11 REQUIRED)  # or add_subdirectory(pybind11)\n\n    add_executable(example main.cpp)\n    target_link_libraries(example PRIVATE pybind11::embed)\n\n.. _building_manually:\n\nBuilding manually\n=================\n\npybind11 is a header-only library, hence it is not necessary to link against\nany special libraries and there are no intermediate (magic) translation steps.\n\nOn Linux, you can compile an example such as the one given in\n:ref:`simple_example` using the following command:\n\n.. code-block:: bash\n\n    $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`\n\nThe flags given here assume that you're using Python 3. For Python 2, just\nchange the executable appropriately (to ``python`` or ``python2``).\n\nThe ``python3 -m pybind11 --includes`` command fetches the include paths for\nboth pybind11 and Python headers. This assumes that pybind11 has been installed\nusing ``pip`` or ``conda``. If it hasn't, you can also manually specify\n``-I <path-to-pybind11>/include`` together with the Python includes path\n``python3-config --includes``.\n\nNote that Python 2.7 modules don't use a special suffix, so you should simply\nuse ``example.so`` instead of ``example`python3-config --extension-suffix```.\nBesides, the ``--extension-suffix`` option may or may not be available, depending\non the distribution; in the latter case, the module extension can be manually\nset to ``.so``.\n\nOn Mac OS: the build command is almost the same but it also requires passing\nthe ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when\nbuilding the module:\n\n.. code-block:: bash\n\n    $ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`\n\nIn general, it is advisable to include several additional build parameters\nthat can considerably reduce the size of the created binary. Refer to section\n:ref:`cmake` for a detailed example of a suitable cross-platform CMake-based\nbuild system that works on all platforms including Windows.\n\n.. note::\n\n    On Linux and macOS, it's better to (intentionally) not link against\n    ``libpython``. The symbols will be resolved when the extension library\n    is loaded into a Python binary. This is preferable because you might\n    have several different installations of a given Python version (e.g. the\n    system-provided Python, and one that ships with a piece of commercial\n    software). In this way, the plugin will work with both versions, instead\n    of possibly importing a second Python library into a process that already\n    contains one (which will lead to a segfault).\n\nGenerating binding code automatically\n=====================================\n\nThe ``Binder`` project is a tool for automatic generation of pybind11 binding\ncode by introspecting existing C++ codebases using LLVM/Clang. See the\n[binder]_ documentation for details.\n\n.. [binder] http://cppbinder.readthedocs.io/en/latest/about.html\n"
  },
  {
    "path": "src/third_party/pybind11/docs/conf.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# pybind11 documentation build configuration file, created by\n# sphinx-quickstart on Sun Oct 11 19:23:48 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\nimport subprocess\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['breathe']\n\nbreathe_projects = {'pybind11': '.build/doxygenxml/'}\nbreathe_default_project = 'pybind11'\nbreathe_domain_by_extension = {'h': 'cpp'}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['.templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'pybind11'\ncopyright = '2017, Wenzel Jakob'\nauthor = 'Wenzel Jakob'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '2.3'\n# The full version, including alpha/beta/rc tags.\nrelease = '2.3.dev0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['.build', 'release.rst']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'any'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'monokai'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd:  # only import and set the theme if we're building docs locally\n    import sphinx_rtd_theme\n    html_theme = 'sphinx_rtd_theme'\n    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n    html_context = {\n        'css_files': [\n            '_static/theme_overrides.css'\n        ]\n    }\nelse:\n    html_context = {\n        'css_files': [\n            '//media.readthedocs.org/css/sphinx_rtd_theme.css',            \n            '//media.readthedocs.org/css/readthedocs-doc-embed.css',    \n            '_static/theme_overrides.css'\n        ]\n    }\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pybind11doc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n'preamble': '\\DeclareUnicodeCharacter{00A0}{}',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n  (master_doc, 'pybind11.tex', 'pybind11 Documentation',\n   'Wenzel Jakob', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = 'pybind11-logo.png'\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (master_doc, 'pybind11', 'pybind11 Documentation',\n     [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n  (master_doc, 'pybind11', 'pybind11 Documentation',\n   author, 'pybind11', 'One line description of project.',\n   'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\nprimary_domain = 'cpp'\nhighlight_language = 'cpp'\n\n\ndef generate_doxygen_xml(app):\n    build_dir = os.path.join(app.confdir, '.build')\n    if not os.path.exists(build_dir):\n        os.mkdir(build_dir)\n\n    try:\n        subprocess.call(['doxygen', '--version'])\n        retcode = subprocess.call(['doxygen'], cwd=app.confdir)\n        if retcode < 0:\n            sys.stderr.write(\"doxygen error code: {}\\n\".format(-retcode))\n    except OSError as e:\n        sys.stderr.write(\"doxygen execution failed: {}\\n\".format(e))\n\n\ndef setup(app):\n    \"\"\"Add hook for building doxygen xml when needed\"\"\"\n    app.connect(\"builder-inited\", generate_doxygen_xml)\n"
  },
  {
    "path": "src/third_party/pybind11/docs/faq.rst",
    "content": "Frequently asked questions\n##########################\n\n\"ImportError: dynamic module does not define init function\"\n===========================================================\n\n1. Make sure that the name specified in PYBIND11_MODULE is identical to the\nfilename of the extension library (without prefixes such as .so)\n\n2. If the above did not fix the issue, you are likely using an incompatible\nversion of Python (for instance, the extension library was compiled against\nPython 2, while the interpreter is running on top of some version of Python\n3, or vice versa).\n\n\"Symbol not found: ``__Py_ZeroStruct`` / ``_PyInstanceMethod_Type``\"\n========================================================================\n\nSee the first answer.\n\n\"SystemError: dynamic module not initialized properly\"\n======================================================\n\nSee the first answer.\n\nThe Python interpreter immediately crashes when importing my module\n===================================================================\n\nSee the first answer.\n\nCMake doesn't detect the right Python version\n=============================================\n\nThe CMake-based build system will try to automatically detect the installed\nversion of Python and link against that. When this fails, or when there are\nmultiple versions of Python and it finds the wrong one, delete\n``CMakeCache.txt`` and then invoke CMake as follows:\n\n.. code-block:: bash\n\n    cmake -DPYTHON_EXECUTABLE:FILEPATH=<path-to-python-executable> .\n\nLimitations involving reference arguments\n=========================================\n\nIn C++, it's fairly common to pass arguments using mutable references or\nmutable pointers, which allows both read and write access to the value\nsupplied by the caller. This is sometimes done for efficiency reasons, or to\nrealize functions that have multiple return values. Here are two very basic\nexamples:\n\n.. code-block:: cpp\n\n    void increment(int &i) { i++; }\n    void increment_ptr(int *i) { (*i)++; }\n\nIn Python, all arguments are passed by reference, so there is no general\nissue in binding such code from Python.\n\nHowever, certain basic Python types (like ``str``, ``int``, ``bool``,\n``float``, etc.) are **immutable**. This means that the following attempt\nto port the function to Python doesn't have the same effect on the value\nprovided by the caller -- in fact, it does nothing at all.\n\n.. code-block:: python\n\n    def increment(i):\n        i += 1 # nope..\n\npybind11 is also affected by such language-level conventions, which means that\nbinding ``increment`` or ``increment_ptr`` will also create Python functions\nthat don't modify their arguments.\n\nAlthough inconvenient, one workaround is to encapsulate the immutable types in\na custom type that does allow modifications.\n\nAn other alternative involves binding a small wrapper lambda function that\nreturns a tuple with all output arguments (see the remainder of the\ndocumentation for examples on binding lambda functions). An example:\n\n.. code-block:: cpp\n\n    int foo(int &i) { i++; return 123; }\n\nand the binding code\n\n.. code-block:: cpp\n\n   m.def(\"foo\", [](int i) { int rv = foo(i); return std::make_tuple(rv, i); });\n\n\nHow can I reduce the build time?\n================================\n\nIt's good practice to split binding code over multiple files, as in the\nfollowing example:\n\n:file:`example.cpp`:\n\n.. code-block:: cpp\n\n    void init_ex1(py::module &);\n    void init_ex2(py::module &);\n    /* ... */\n\n    PYBIND11_MODULE(example, m) {\n        init_ex1(m);\n        init_ex2(m);\n        /* ... */\n    }\n\n:file:`ex1.cpp`:\n\n.. code-block:: cpp\n\n    void init_ex1(py::module &m) {\n        m.def(\"add\", [](int a, int b) { return a + b; });\n    }\n\n:file:`ex2.cpp`:\n\n.. code-block:: cpp\n\n    void init_ex2(py::module &m) {\n        m.def(\"sub\", [](int a, int b) { return a - b; });\n    }\n\n:command:`python`:\n\n.. code-block:: pycon\n\n    >>> import example\n    >>> example.add(1, 2)\n    3\n    >>> example.sub(1, 1)\n    0\n\nAs shown above, the various ``init_ex`` functions should be contained in\nseparate files that can be compiled independently from one another, and then\nlinked together into the same final shared object.  Following this approach\nwill:\n\n1. reduce memory requirements per compilation unit.\n\n2. enable parallel builds (if desired).\n\n3. allow for faster incremental builds. For instance, when a single class\n   definition is changed, only a subset of the binding code will generally need\n   to be recompiled.\n\n\"recursive template instantiation exceeded maximum depth of 256\"\n================================================================\n\nIf you receive an error about excessive recursive template evaluation, try\nspecifying a larger value, e.g. ``-ftemplate-depth=1024`` on GCC/Clang. The\nculprit is generally the generation of function signatures at compile time\nusing C++14 template metaprogramming.\n\n.. _`faq:hidden_visibility`:\n\n\"‘SomeClass’ declared with greater visibility than the type of its field ‘SomeClass::member’ [-Wattributes]\"\n============================================================================================================\n\nThis error typically indicates that you are compiling without the required\n``-fvisibility`` flag.  pybind11 code internally forces hidden visibility on\nall internal code, but if non-hidden (and thus *exported*) code attempts to\ninclude a pybind type (for example, ``py::object`` or ``py::list``) you can run\ninto this warning.\n\nTo avoid it, make sure you are specifying ``-fvisibility=hidden`` when\ncompiling pybind code.\n\nAs to why ``-fvisibility=hidden`` is necessary, because pybind modules could\nhave been compiled under different versions of pybind itself, it is also\nimportant that the symbols defined in one module do not clash with the\npotentially-incompatible symbols defined in another.  While Python extension\nmodules are usually loaded with localized symbols (under POSIX systems\ntypically using ``dlopen`` with the ``RTLD_LOCAL`` flag), this Python default\ncan be changed, but even if it isn't it is not always enough to guarantee\ncomplete independence of the symbols involved when not using\n``-fvisibility=hidden``.\n\nAdditionally, ``-fvisiblity=hidden`` can deliver considerably binary size\nsavings.  (See the following section for more details).\n\n\n.. _`faq:symhidden`:\n\nHow can I create smaller binaries?\n==================================\n\nTo do its job, pybind11 extensively relies on a programming technique known as\n*template metaprogramming*, which is a way of performing computation at compile\ntime using type information. Template metaprogamming usually instantiates code\ninvolving significant numbers of deeply nested types that are either completely\nremoved or reduced to just a few instructions during the compiler's optimization\nphase. However, due to the nested nature of these types, the resulting symbol\nnames in the compiled extension library can be extremely long. For instance,\nthe included test suite contains the following symbol:\n\n.. only:: html\n\n    .. code-block:: none\n\n        _​_​Z​N​8​p​y​b​i​n​d​1​1​1​2​c​p​p​_​f​u​n​c​t​i​o​n​C​1​I​v​8​E​x​a​m​p​l​e​2​J​R​N​S​t​3​_​_​1​6​v​e​c​t​o​r​I​N​S​3​_​1​2​b​a​s​i​c​_​s​t​r​i​n​g​I​w​N​S​3​_​1​1​c​h​a​r​_​t​r​a​i​t​s​I​w​E​E​N​S​3​_​9​a​l​l​o​c​a​t​o​r​I​w​E​E​E​E​N​S​8​_​I​S​A​_​E​E​E​E​E​J​N​S​_​4​n​a​m​e​E​N​S​_​7​s​i​b​l​i​n​g​E​N​S​_​9​i​s​_​m​e​t​h​o​d​E​A​2​8​_​c​E​E​E​M​T​0​_​F​T​_​D​p​T​1​_​E​D​p​R​K​T​2​_\n\n.. only:: not html\n\n    .. code-block:: cpp\n\n        __ZN8pybind1112cpp_functionC1Iv8Example2JRNSt3__16vectorINS3_12basic_stringIwNS3_11char_traitsIwEENS3_9allocatorIwEEEENS8_ISA_EEEEEJNS_4nameENS_7siblingENS_9is_methodEA28_cEEEMT0_FT_DpT1_EDpRKT2_\n\nwhich is the mangled form of the following function type:\n\n.. code-block:: cpp\n\n    pybind11::cpp_function::cpp_function<void, Example2, std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&, pybind11::name, pybind11::sibling, pybind11::is_method, char [28]>(void (Example2::*)(std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&), pybind11::name const&, pybind11::sibling const&, pybind11::is_method const&, char const (&) [28])\n\nThe memory needed to store just the mangled name of this function (196 bytes)\nis larger than the actual piece of code (111 bytes) it represents! On the other\nhand, it's silly to even give this function a name -- after all, it's just a\ntiny cog in a bigger piece of machinery that is not exposed to the outside\nworld. So we'll generally only want to export symbols for those functions which\nare actually called from the outside.\n\nThis can be achieved by specifying the parameter ``-fvisibility=hidden`` to GCC\nand Clang, which sets the default symbol visibility to *hidden*, which has a\ntremendous impact on the final binary size of the resulting extension library.\n(On Visual Studio, symbols are already hidden by default, so nothing needs to\nbe done there.)\n\nIn addition to decreasing binary size, ``-fvisibility=hidden`` also avoids\npotential serious issues when loading multiple modules and is required for\nproper pybind operation.  See the previous FAQ entry for more details.\n\nWorking with ancient Visual Studio 2008 builds on Windows\n=========================================================\n\nThe official Windows distributions of Python are compiled using truly\nancient versions of Visual Studio that lack good C++11 support. Some users\nimplicitly assume that it would be impossible to load a plugin built with\nVisual Studio 2015 into a Python distribution that was compiled using Visual\nStudio 2008. However, no such issue exists: it's perfectly legitimate to\ninterface DLLs that are built with different compilers and/or C libraries.\nCommon gotchas to watch out for involve not ``free()``-ing memory region\nthat that were ``malloc()``-ed in another shared library, using data\nstructures with incompatible ABIs, and so on. pybind11 is very careful not\nto make these types of mistakes.\n\nInconsistent detection of Python version in CMake and pybind11\n==============================================================\n\nThe functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake\nfor Python version detection are not used by pybind11 due to unreliability and limitations that make\nthem unsuitable for pybind11's needs. Instead pybind provides its own, more reliable Python detection\nCMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake\nPython detection in a system with several Python versions installed.\n\nThis difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following\nCmake code executed in a system with Python 2.7 and 3.x installed:\n\n.. code-block:: cmake\n\n    find_package(PythonInterp)\n    find_package(PythonLibs)\n    find_package(pybind11)\n\nIt will detect Python 2.7 and pybind11 will pick it as well.\n\nIn contrast this code:\n\n.. code-block:: cmake\n\n    find_package(pybind11)\n    find_package(PythonInterp)\n    find_package(PythonLibs)\n\nwill detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards.\n\nIt is advised to avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely\non pybind11 in detecting Python version. If this is not possible CMake machinery should be called *before* including pybind11.\n\nHow to cite this project?\n=========================\n\nWe suggest the following BibTeX template to cite pybind11 in scientific\ndiscourse:\n\n.. code-block:: bash\n\n    @misc{pybind11,\n       author = {Wenzel Jakob and Jason Rhinelander and Dean Moldovan},\n       year = {2017},\n       note = {https://github.com/pybind/pybind11},\n       title = {pybind11 -- Seamless operability between C++11 and Python}\n    }\n"
  },
  {
    "path": "src/third_party/pybind11/docs/index.rst",
    "content": ".. only: not latex\n\n    .. image:: pybind11-logo.png\n\npybind11 --- Seamless operability between C++11 and Python\n==========================================================\n\n.. only: not latex\n\n    Contents:\n\n.. toctree::\n   :maxdepth: 1\n\n   intro\n   changelog\n   upgrade\n\n.. toctree::\n   :caption: The Basics\n   :maxdepth: 2\n\n   basics\n   classes\n   compiling\n\n.. toctree::\n   :caption: Advanced Topics\n   :maxdepth: 2\n\n   advanced/functions\n   advanced/classes\n   advanced/exceptions\n   advanced/smart_ptrs\n   advanced/cast/index\n   advanced/pycpp/index\n   advanced/embedding\n   advanced/misc\n\n.. toctree::\n   :caption: Extra Information\n   :maxdepth: 1\n\n   faq\n   benchmark\n   limitations\n   reference\n"
  },
  {
    "path": "src/third_party/pybind11/docs/intro.rst",
    "content": ".. image:: pybind11-logo.png\n\nAbout this project\n==================\n**pybind11** is a lightweight header-only library that exposes C++ types in Python\nand vice versa, mainly to create Python bindings of existing C++ code. Its\ngoals and syntax are similar to the excellent `Boost.Python`_ library by David\nAbrahams: to minimize boilerplate code in traditional extension modules by\ninferring type information using compile-time introspection.\n\n.. _Boost.Python: http://www.boost.org/doc/libs/release/libs/python/doc/index.html\n\nThe main issue with Boost.Python—and the reason for creating such a similar\nproject—is Boost. Boost is an enormously large and complex suite of utility\nlibraries that works with almost every C++ compiler in existence. This\ncompatibility has its cost: arcane template tricks and workarounds are\nnecessary to support the oldest and buggiest of compiler specimens. Now that\nC++11-compatible compilers are widely available, this heavy machinery has\nbecome an excessively large and unnecessary dependency.\nThink of this library as a tiny self-contained version of Boost.Python with\neverything stripped away that isn't relevant for binding generation. Without\ncomments, the core header files only require ~4K lines of code and depend on\nPython (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This\ncompact implementation was possible thanks to some of the new C++11 language\nfeatures (specifically: tuples, lambda functions and variadic templates). Since\nits creation, this library has grown beyond Boost.Python in many ways, leading\nto dramatically simpler binding code in many common situations.\n\nCore features\n*************\nThe following core C++ features can be mapped to Python\n\n- Functions accepting and returning custom data structures per value, reference, or pointer\n- Instance methods and static methods\n- Overloaded functions\n- Instance attributes and static attributes\n- Arbitrary exception types\n- Enumerations\n- Callbacks\n- Iterators and ranges\n- Custom operators\n- Single and multiple inheritance\n- STL data structures\n- Smart pointers with reference counting like ``std::shared_ptr``\n- Internal references with correct reference counting\n- C++ classes with virtual (and pure virtual) methods can be extended in Python\n\nGoodies\n*******\nIn addition to the core functionality, pybind11 provides some extra goodies:\n\n- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an\n  implementation-agnostic interface.\n\n- It is possible to bind C++11 lambda functions with captured variables. The\n  lambda capture data is stored inside the resulting Python function object.\n\n- pybind11 uses C++11 move constructors and move assignment operators whenever\n  possible to efficiently transfer custom data types.\n\n- It's easy to expose the internal storage of custom data types through\n  Pythons' buffer protocols. This is handy e.g. for fast conversion between\n  C++ matrix classes like Eigen and NumPy without expensive copy operations.\n\n- pybind11 can automatically vectorize functions so that they are transparently\n  applied to all entries of one or more NumPy array arguments.\n\n- Python's slice-based access and assignment operations can be supported with\n  just a few lines of code.\n\n- Everything is contained in just a few header files; there is no need to link\n  against any additional libraries.\n\n- Binaries are generally smaller by a factor of at least 2 compared to\n  equivalent bindings generated by Boost.Python. A recent pybind11 conversion\n  of `PyRosetta`_, an enormous Boost.Python binding project, reported a binary\n  size reduction of **5.4x** and compile time reduction by **5.8x**.\n\n- Function signatures are precomputed at compile time (using ``constexpr``),\n  leading to smaller binaries.\n\n- With little extra effort, C++ types can be pickled and unpickled similar to\n  regular Python objects.\n\n.. _PyRosetta: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf\n\nSupported compilers\n*******************\n\n1. Clang/LLVM (any non-ancient version with C++11 support)\n2. GCC 4.8 or newer\n3. Microsoft Visual Studio 2015 or newer\n4. Intel C++ compiler v17 or newer (v16 with pybind11 v2.0 and v15 with pybind11 v2.0 and a `workaround <https://github.com/pybind/pybind11/issues/276>`_ )\n"
  },
  {
    "path": "src/third_party/pybind11/docs/limitations.rst",
    "content": "Limitations\n###########\n\npybind11 strives to be a general solution to binding generation, but it also has\ncertain limitations:\n\n- pybind11 casts away ``const``-ness in function arguments and return values.\n  This is in line with the Python language, which has no concept of ``const``\n  values. This means that some additional care is needed to avoid bugs that\n  would be caught by the type checker in a traditional C++ program.\n\n- The NumPy interface ``pybind11::array`` greatly simplifies accessing\n  numerical data from C++ (and vice versa), but it's not a full-blown array\n  class like ``Eigen::Array`` or ``boost.multi_array``.\n\nThese features could be implemented but would lead to a significant increase in\ncomplexity. I've decided to draw the line here to keep this project simple and\ncompact. Users who absolutely require these features are encouraged to fork\npybind11.\n\n"
  },
  {
    "path": "src/third_party/pybind11/docs/reference.rst",
    "content": ".. _reference:\n\n.. warning::\n\n    Please be advised that the reference documentation discussing pybind11\n    internals is currently incomplete. Please refer to the previous sections\n    and the pybind11 header files for the nitty gritty details.\n\nReference\n#########\n\n.. _macros:\n\nMacros\n======\n\n.. doxygendefine:: PYBIND11_MODULE\n\n.. _core_types:\n\nConvenience classes for arbitrary Python types\n==============================================\n\nCommon member functions\n-----------------------\n\n.. doxygenclass:: object_api\n    :members:\n\nWithout reference counting\n--------------------------\n\n.. doxygenclass:: handle\n    :members:\n\nWith reference counting\n-----------------------\n\n.. doxygenclass:: object\n    :members:\n\n.. doxygenfunction:: reinterpret_borrow\n\n.. doxygenfunction:: reinterpret_steal\n\nConvenience classes for specific Python types\n=============================================\n\n.. doxygenclass:: module\n    :members:\n\n.. doxygengroup:: pytypes\n    :members:\n\n.. _extras:\n\nPassing extra arguments to ``def`` or ``class_``\n================================================\n\n.. doxygengroup:: annotations\n    :members:\n\nEmbedding the interpreter\n=========================\n\n.. doxygendefine:: PYBIND11_EMBEDDED_MODULE\n\n.. doxygenfunction:: initialize_interpreter\n\n.. doxygenfunction:: finalize_interpreter\n\n.. doxygenclass:: scoped_interpreter\n\nRedirecting C++ streams\n=======================\n\n.. doxygenclass:: scoped_ostream_redirect\n\n.. doxygenclass:: scoped_estream_redirect\n\n.. doxygenfunction:: add_ostream_redirect\n\nPython built-in functions\n=========================\n\n.. doxygengroup:: python_builtins\n    :members:\n\nExceptions\n==========\n\n.. doxygenclass:: error_already_set\n    :members:\n\n.. doxygenclass:: builtin_exception\n    :members:\n\n\nLiterals\n========\n\n.. doxygennamespace:: literals\n"
  },
  {
    "path": "src/third_party/pybind11/docs/release.rst",
    "content": "To release a new version of pybind11:\n\n- Update the version number and push to pypi\n    - Update ``pybind11/_version.py`` (set release version, remove 'dev').\n    - Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``.\n    - Ensure that all the information in ``setup.py`` is up-to-date.\n    - Update version in ``docs/conf.py``.\n    - Tag release date in ``docs/changelog.rst``.\n    - ``git add`` and ``git commit``.\n    - if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``\n    - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.\n    - ``git push``\n    - ``git push --tags``.\n    - ``python setup.py sdist upload``.\n    - ``python setup.py bdist_wheel upload``.\n- Update conda-forge (https://github.com/conda-forge/pybind11-feedstock) via PR\n    - download release package from Github: ``wget https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz``\n    - compute checksum: ``shasum -a 256  vX.Y.Z.tar.gz``\n    - change version number and checksum in ``recipe/meta.yml``\n- Get back to work\n    - Update ``_version.py`` (add 'dev' and increment minor).\n    - Update version in ``docs/conf.py``\n    - Update version macros in ``include/pybind11/common.h``\n    - ``git add`` and ``git commit``.\n      ``git push``\n"
  },
  {
    "path": "src/third_party/pybind11/docs/requirements.txt",
    "content": "breathe == 4.5.0\n"
  },
  {
    "path": "src/third_party/pybind11/docs/upgrade.rst",
    "content": "Upgrade guide\n#############\n\nThis is a companion guide to the :doc:`changelog`. While the changelog briefly\nlists all of the new features, improvements and bug fixes, this upgrade guide\nfocuses only the subset which directly impacts your experience when upgrading\nto a new version. But it goes into more detail. This includes things like\ndeprecated APIs and their replacements, build system changes, general code\nmodernization and other useful information.\n\n\nv2.2\n====\n\nDeprecation of the ``PYBIND11_PLUGIN`` macro\n--------------------------------------------\n\n``PYBIND11_MODULE`` is now the preferred way to create module entry points.\nThe old macro emits a compile-time deprecation warning.\n\n.. code-block:: cpp\n\n    // old\n    PYBIND11_PLUGIN(example) {\n        py::module m(\"example\", \"documentation string\");\n\n        m.def(\"add\", [](int a, int b) { return a + b; });\n\n        return m.ptr();\n    }\n\n    // new\n    PYBIND11_MODULE(example, m) {\n        m.doc() = \"documentation string\"; // optional\n\n        m.def(\"add\", [](int a, int b) { return a + b; });\n    }\n\n\nNew API for defining custom constructors and pickling functions\n---------------------------------------------------------------\n\nThe old placement-new custom constructors have been deprecated. The new approach\nuses ``py::init()`` and factory functions to greatly improve type safety.\n\nPlacement-new can be called accidentally with an incompatible type (without any\ncompiler errors or warnings), or it can initialize the same object multiple times\nif not careful with the Python-side ``__init__`` calls. The new-style custom\nconstructors prevent such mistakes. See :ref:`custom_constructors` for details.\n\n.. code-block:: cpp\n\n    // old -- deprecated (runtime warning shown only in debug mode)\n    py::class<Foo>(m, \"Foo\")\n        .def(\"__init__\", [](Foo &self, ...) {\n            new (&self) Foo(...); // uses placement-new\n        });\n\n    // new\n    py::class<Foo>(m, \"Foo\")\n        .def(py::init([](...) { // Note: no `self` argument\n            return new Foo(...); // return by raw pointer\n            // or: return std::make_unique<Foo>(...); // return by holder\n            // or: return Foo(...); // return by value (move constructor)\n        }));\n\nMirroring the custom constructor changes, ``py::pickle()`` is now the preferred\nway to get and set object state. See :ref:`pickling` for details.\n\n.. code-block:: cpp\n\n    // old -- deprecated (runtime warning shown only in debug mode)\n    py::class<Foo>(m, \"Foo\")\n        ...\n        .def(\"__getstate__\", [](const Foo &self) {\n            return py::make_tuple(self.value1(), self.value2(), ...);\n        })\n        .def(\"__setstate__\", [](Foo &self, py::tuple t) {\n            new (&self) Foo(t[0].cast<std::string>(), ...);\n        });\n\n    // new\n    py::class<Foo>(m, \"Foo\")\n        ...\n        .def(py::pickle(\n            [](const Foo &self) { // __getstate__\n                return py::make_tuple(f.value1(), f.value2(), ...); // unchanged\n            },\n            [](py::tuple t) { // __setstate__, note: no `self` argument\n                return new Foo(t[0].cast<std::string>(), ...);\n                // or: return std::make_unique<Foo>(...); // return by holder\n                // or: return Foo(...); // return by value (move constructor)\n            }\n        ));\n\nFor both the constructors and pickling, warnings are shown at module\ninitialization time (on import, not when the functions are called).\nThey're only visible when compiled in debug mode. Sample warning:\n\n.. code-block:: none\n\n    pybind11-bound class 'mymodule.Foo' is using an old-style placement-new '__init__'\n    which has been deprecated. See the upgrade guide in pybind11's docs.\n\n\nStricter enforcement of hidden symbol visibility for pybind11 modules\n---------------------------------------------------------------------\n\npybind11 now tries to actively enforce hidden symbol visibility for modules.\nIf you're using either one of pybind11's :doc:`CMake or Python build systems\n<compiling>` (the two example repositories) and you haven't been exporting any\nsymbols, there's nothing to be concerned about. All the changes have been done\ntransparently in the background. If you were building manually or relied on\nspecific default visibility, read on.\n\nSetting default symbol visibility to *hidden* has always been recommended for\npybind11 (see :ref:`faq:symhidden`). On Linux and macOS, hidden symbol\nvisibility (in conjunction with the ``strip`` utility) yields much smaller\nmodule binaries. `CPython's extension docs`_ also recommend hiding symbols\nby default, with the goal of avoiding symbol name clashes between modules.\nStarting with v2.2, pybind11 enforces this more strictly: (1) by declaring\nall symbols inside the ``pybind11`` namespace as hidden and (2) by including\nthe ``-fvisibility=hidden`` flag on Linux and macOS (only for extension\nmodules, not for embedding the interpreter).\n\n.. _CPython's extension docs: https://docs.python.org/3/extending/extending.html#providing-a-c-api-for-an-extension-module\n\nThe namespace-scope hidden visibility is done automatically in pybind11's\nheaders and it's generally transparent to users. It ensures that:\n\n* Modules compiled with different pybind11 versions don't clash with each other.\n\n* Some new features, like ``py::module_local`` bindings, can work as intended.\n\nThe ``-fvisibility=hidden`` flag applies the same visibility to user bindings\noutside of the ``pybind11`` namespace. It's now set automatic by pybind11's\nCMake and Python build systems, but this needs to be done manually by users\nof other build systems. Adding this flag:\n\n* Minimizes the chances of symbol conflicts between modules. E.g. if two\n  unrelated modules were statically linked to different (ABI-incompatible)\n  versions of the same third-party library, a symbol clash would be likely\n  (and would end with unpredictable results).\n\n* Produces smaller binaries on Linux and macOS, as pointed out previously.\n\nWithin pybind11's CMake build system, ``pybind11_add_module`` has always been\nsetting the ``-fvisibility=hidden`` flag in release mode. From now on, it's\nbeing applied unconditionally, even in debug mode and it can no longer be opted\nout of with the ``NO_EXTRAS`` option. The ``pybind11::module`` target now also\nadds this flag to it's interface. The ``pybind11::embed`` target is unchanged.\n\nThe most significant change here is for the ``pybind11::module`` target. If you\nwere previously relying on default visibility, i.e. if your Python module was\ndoubling as a shared library with dependents, you'll need to either export\nsymbols manually (recommended for cross-platform libraries) or factor out the\nshared library (and have the Python module link to it like the other\ndependents). As a temporary workaround, you can also restore default visibility\nusing the CMake code below, but this is not recommended in the long run:\n\n.. code-block:: cmake\n\n    target_link_libraries(mymodule PRIVATE pybind11::module)\n\n    add_library(restore_default_visibility INTERFACE)\n    target_compile_options(restore_default_visibility INTERFACE -fvisibility=default)\n    target_link_libraries(mymodule PRIVATE restore_default_visibility)\n\n\nLocal STL container bindings\n----------------------------\n\nPrevious pybind11 versions could only bind types globally -- all pybind11\nmodules, even unrelated ones, would have access to the same exported types.\nHowever, this would also result in a conflict if two modules exported the\nsame C++ type, which is especially problematic for very common types, e.g.\n``std::vector<int>``. :ref:`module_local` were added to resolve this (see\nthat section for a complete usage guide).\n\n``py::class_`` still defaults to global bindings (because these types are\nusually unique across modules), however in order to avoid clashes of opaque\ntypes, ``py::bind_vector`` and ``py::bind_map`` will now bind STL containers\nas ``py::module_local`` if their elements are: builtins (``int``, ``float``,\netc.), not bound using ``py::class_``, or bound as ``py::module_local``. For\nexample, this change allows multiple modules to bind ``std::vector<int>``\nwithout causing conflicts. See :ref:`stl_bind` for more details.\n\nWhen upgrading to this version, if you have multiple modules which depend on\na single global binding of an STL container, note that all modules can still\naccept foreign  ``py::module_local`` types in the direction of Python-to-C++.\nThe locality only affects the C++-to-Python direction. If this is needed in\nmultiple modules, you'll need to either:\n\n* Add a copy of the same STL binding to all of the modules which need it.\n\n* Restore the global status of that single binding by marking it\n  ``py::module_local(false)``.\n\nThe latter is an easy workaround, but in the long run it would be best to\nlocalize all common type bindings in order to avoid conflicts with\nthird-party modules.\n\n\nNegative strides for Python buffer objects and numpy arrays\n-----------------------------------------------------------\n\nSupport for negative strides required changing the integer type from unsigned\nto signed in the interfaces of ``py::buffer_info`` and ``py::array``. If you\nhave compiler warnings enabled, you may notice some new conversion warnings\nafter upgrading. These can be resolved using ``static_cast``.\n\n\nDeprecation of some ``py::object`` APIs\n---------------------------------------\n\nTo compare ``py::object`` instances by pointer, you should now use\n``obj1.is(obj2)`` which is equivalent to ``obj1 is obj2`` in Python.\nPreviously, pybind11 used ``operator==`` for this (``obj1 == obj2``), but\nthat could be confusing and is now deprecated (so that it can eventually\nbe replaced with proper rich object comparison in a future release).\n\nFor classes which inherit from ``py::object``, ``borrowed`` and ``stolen``\nwere previously available as protected constructor tags. Now the types\nshould be used directly instead: ``borrowed_t{}`` and ``stolen_t{}``\n(`#771 <https://github.com/pybind/pybind11/pull/771>`_).\n\n\nStricter compile-time error checking\n------------------------------------\n\nSome error checks have been moved from run time to compile time. Notably,\nautomatic conversion of ``std::shared_ptr<T>`` is not possible when ``T`` is\nnot directly registered with ``py::class_<T>`` (e.g. ``std::shared_ptr<int>``\nor ``std::shared_ptr<std::vector<T>>`` are not automatically convertible).\nAttempting to bind a function with such arguments now results in a compile-time\nerror instead of waiting to fail at run time.\n\n``py::init<...>()`` constructor definitions are also stricter and now prevent\nbindings which could cause unexpected behavior:\n\n.. code-block:: cpp\n\n    struct Example {\n        Example(int &);\n    };\n\n    py::class_<Example>(m, \"Example\")\n        .def(py::init<int &>()); // OK, exact match\n        // .def(py::init<int>()); // compile-time error, mismatch\n\nA non-``const`` lvalue reference is not allowed to bind to an rvalue. However,\nnote that a constructor taking ``const T &`` can still be registered using\n``py::init<T>()`` because a ``const`` lvalue reference can bind to an rvalue.\n\nv2.1\n====\n\nMinimum compiler versions are enforced at compile time\n------------------------------------------------------\n\nThe minimums also apply to v2.0 but the check is now explicit and a compile-time\nerror is raised if the compiler does not meet the requirements:\n\n* GCC >= 4.8\n* clang >= 3.3 (appleclang >= 5.0)\n* MSVC >= 2015u3\n* Intel C++ >= 15.0\n\n\nThe ``py::metaclass`` attribute is not required for static properties\n---------------------------------------------------------------------\n\nBinding classes with static properties is now possible by default. The\nzero-parameter version of ``py::metaclass()`` is deprecated. However, a new\none-parameter ``py::metaclass(python_type)`` version was added for rare\ncases when a custom metaclass is needed to override pybind11's default.\n\n.. code-block:: cpp\n\n    // old -- emits a deprecation warning\n    py::class_<Foo>(m, \"Foo\", py::metaclass())\n        .def_property_readonly_static(\"foo\", ...);\n\n    // new -- static properties work without the attribute\n    py::class_<Foo>(m, \"Foo\")\n        .def_property_readonly_static(\"foo\", ...);\n\n    // new -- advanced feature, override pybind11's default metaclass\n    py::class_<Bar>(m, \"Bar\", py::metaclass(custom_python_type))\n        ...\n\n\nv2.0\n====\n\nBreaking changes in ``py::class_``\n----------------------------------\n\nThese changes were necessary to make type definitions in pybind11\nfuture-proof, to support PyPy via its ``cpyext`` mechanism (`#527\n<https://github.com/pybind/pybind11/pull/527>`_), and to improve efficiency\n(`rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_).\n\n1. Declarations of types that provide access via the buffer protocol must\n   now include the ``py::buffer_protocol()`` annotation as an argument to\n   the ``py::class_`` constructor.\n\n   .. code-block:: cpp\n\n       py::class_<Matrix>(\"Matrix\", py::buffer_protocol())\n           .def(py::init<...>())\n           .def_buffer(...);\n\n2. Classes which include static properties (e.g. ``def_readwrite_static()``)\n   must now include the ``py::metaclass()`` attribute. Note: this requirement\n   has since been removed in v2.1. If you're upgrading from 1.x, it's\n   recommended to skip directly to v2.1 or newer.\n\n3. This version of pybind11 uses a redesigned mechanism for instantiating\n   trampoline classes that are used to override virtual methods from within\n   Python. This led to the following user-visible syntax change:\n\n   .. code-block:: cpp\n\n       // old v1.x syntax\n       py::class_<TrampolineClass>(\"MyClass\")\n           .alias<MyClass>()\n           ...\n\n       // new v2.x syntax\n       py::class_<MyClass, TrampolineClass>(\"MyClass\")\n           ...\n\n   Importantly, both the original and the trampoline class are now specified\n   as arguments to the ``py::class_`` template, and the ``alias<..>()`` call\n   is gone. The new scheme has zero overhead in cases when Python doesn't\n   override any functions of the underlying C++ class.\n   `rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_.\n\n   The class type must be the first template argument given to ``py::class_``\n   while the trampoline can be mixed in arbitrary order with other arguments\n   (see the following section).\n\n\nDeprecation of the ``py::base<T>()`` attribute\n----------------------------------------------\n\n``py::base<T>()`` was deprecated in favor of specifying ``T`` as a template\nargument to ``py::class_``. This new syntax also supports multiple inheritance.\nNote that, while the type being exported must be the first argument in the\n``py::class_<Class, ...>`` template, the order of the following types (bases,\nholder and/or trampoline) is not important.\n\n.. code-block:: cpp\n\n    // old v1.x\n    py::class_<Derived>(\"Derived\", py::base<Base>());\n\n    // new v2.x\n    py::class_<Derived, Base>(\"Derived\");\n\n    // new -- multiple inheritance\n    py::class_<Derived, Base1, Base2>(\"Derived\");\n\n    // new -- apart from `Derived` the argument order can be arbitrary\n    py::class_<Derived, Base1, Holder, Base2, Trampoline>(\"Derived\");\n\n\nOut-of-the-box support for ``std::shared_ptr``\n----------------------------------------------\n\nThe relevant type caster is now built in, so it's no longer necessary to\ninclude a declaration of the form:\n\n.. code-block:: cpp\n\n    PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)\n\nContinuing to do so won’t cause an error or even a deprecation warning,\nbut it's completely redundant.\n\n\nDeprecation of a few ``py::object`` APIs\n----------------------------------------\n\nAll of the old-style calls emit deprecation warnings.\n\n+---------------------------------------+---------------------------------------------+\n|  Old syntax                           |  New syntax                                 |\n+=======================================+=============================================+\n| ``obj.call(args...)``                 | ``obj(args...)``                            |\n+---------------------------------------+---------------------------------------------+\n| ``obj.str()``                         | ``py::str(obj)``                            |\n+---------------------------------------+---------------------------------------------+\n| ``auto l = py::list(obj); l.check()`` | ``py::isinstance<py::list>(obj)``           |\n+---------------------------------------+---------------------------------------------+\n| ``py::object(ptr, true)``             | ``py::reinterpret_borrow<py::object>(ptr)`` |\n+---------------------------------------+---------------------------------------------+\n| ``py::object(ptr, false)``            | ``py::reinterpret_steal<py::object>(ptr)``  |\n+---------------------------------------+---------------------------------------------+\n| ``if (obj.attr(\"foo\"))``              | ``if (py::hasattr(obj, \"foo\"))``            |\n+---------------------------------------+---------------------------------------------+\n| ``if (obj[\"bar\"])``                   | ``if (obj.contains(\"bar\"))``                |\n+---------------------------------------+---------------------------------------------+\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/attr.h",
    "content": "/*\n    pybind11/attr.h: Infrastructure for processing custom\n    type and function attributes\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"cast.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\n/// \\addtogroup annotations\n/// @{\n\n/// Annotation for methods\nstruct is_method { handle class_; is_method(const handle &c) : class_(c) { } };\n\n/// Annotation for operators\nstruct is_operator { };\n\n/// Annotation for parent scope\nstruct scope { handle value; scope(const handle &s) : value(s) { } };\n\n/// Annotation for documentation\nstruct doc { const char *value; doc(const char *value) : value(value) { } };\n\n/// Annotation for function names\nstruct name { const char *value; name(const char *value) : value(value) { } };\n\n/// Annotation indicating that a function is an overload associated with a given \"sibling\"\nstruct sibling { handle value; sibling(const handle &value) : value(value.ptr()) { } };\n\n/// Annotation indicating that a class derives from another given type\ntemplate <typename T> struct base {\n    PYBIND11_DEPRECATED(\"base<T>() was deprecated in favor of specifying 'T' as a template argument to class_\")\n    base() { }\n};\n\n/// Keep patient alive while nurse lives\ntemplate <size_t Nurse, size_t Patient> struct keep_alive { };\n\n/// Annotation indicating that a class is involved in a multiple inheritance relationship\nstruct multiple_inheritance { };\n\n/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class\nstruct dynamic_attr { };\n\n/// Annotation which enables the buffer protocol for a type\nstruct buffer_protocol { };\n\n/// Annotation which requests that a special metaclass is created for a type\nstruct metaclass {\n    handle value;\n\n    PYBIND11_DEPRECATED(\"py::metaclass() is no longer required. It's turned on by default now.\")\n    metaclass() {}\n\n    /// Override pybind11's default metaclass\n    explicit metaclass(handle value) : value(value) { }\n};\n\n/// Annotation that marks a class as local to the module:\nstruct module_local { const bool value; constexpr module_local(bool v = true) : value(v) { } };\n\n/// Annotation to mark enums as an arithmetic type\nstruct arithmetic { };\n\n/** \\rst\n    A call policy which places one or more guard variables (``Ts...``) around the function call.\n\n    For example, this definition:\n\n    .. code-block:: cpp\n\n        m.def(\"foo\", foo, py::call_guard<T>());\n\n    is equivalent to the following pseudocode:\n\n    .. code-block:: cpp\n\n        m.def(\"foo\", [](args...) {\n            T scope_guard;\n            return foo(args...); // forwarded arguments\n        });\n \\endrst */\ntemplate <typename... Ts> struct call_guard;\n\ntemplate <> struct call_guard<> { using type = detail::void_type; };\n\ntemplate <typename T>\nstruct call_guard<T> {\n    static_assert(std::is_default_constructible<T>::value,\n                  \"The guard type must be default constructible\");\n\n    using type = T;\n};\n\ntemplate <typename T, typename... Ts>\nstruct call_guard<T, Ts...> {\n    struct type {\n        T guard{}; // Compose multiple guard types with left-to-right default-constructor order\n        typename call_guard<Ts...>::type next{};\n    };\n};\n\n/// @} annotations\n\nNAMESPACE_BEGIN(detail)\n/* Forward declarations */\nenum op_id : int;\nenum op_type : int;\nstruct undefined_t;\ntemplate <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t> struct op_;\ninline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);\n\n/// Internal data structure which holds metadata about a keyword argument\nstruct argument_record {\n    const char *name;  ///< Argument name\n    const char *descr; ///< Human-readable version of the argument value\n    handle value;      ///< Associated Python object\n    bool convert : 1;  ///< True if the argument is allowed to convert when loading\n    bool none : 1;     ///< True if None is allowed when loading\n\n    argument_record(const char *name, const char *descr, handle value, bool convert, bool none)\n        : name(name), descr(descr), value(value), convert(convert), none(none) { }\n};\n\n/// Internal data structure which holds metadata about a bound function (signature, overloads, etc.)\nstruct function_record {\n    function_record()\n        : is_constructor(false), is_new_style_constructor(false), is_stateless(false),\n          is_operator(false), has_args(false), has_kwargs(false), is_method(false) { }\n\n    /// Function name\n    char *name = nullptr; /* why no C++ strings? They generate heavier code.. */\n\n    // User-specified documentation string\n    char *doc = nullptr;\n\n    /// Human-readable version of the function signature\n    char *signature = nullptr;\n\n    /// List of registered keyword arguments\n    std::vector<argument_record> args;\n\n    /// Pointer to lambda function which converts arguments and performs the actual call\n    handle (*impl) (function_call &) = nullptr;\n\n    /// Storage for the wrapped function pointer and captured data, if any\n    void *data[3] = { };\n\n    /// Pointer to custom destructor for 'data' (if needed)\n    void (*free_data) (function_record *ptr) = nullptr;\n\n    /// Return value policy associated with this function\n    return_value_policy policy = return_value_policy::automatic;\n\n    /// True if name == '__init__'\n    bool is_constructor : 1;\n\n    /// True if this is a new-style `__init__` defined in `detail/init.h`\n    bool is_new_style_constructor : 1;\n\n    /// True if this is a stateless function pointer\n    bool is_stateless : 1;\n\n    /// True if this is an operator (__add__), etc.\n    bool is_operator : 1;\n\n    /// True if the function has a '*args' argument\n    bool has_args : 1;\n\n    /// True if the function has a '**kwargs' argument\n    bool has_kwargs : 1;\n\n    /// True if this is a method\n    bool is_method : 1;\n\n    /// Number of arguments (including py::args and/or py::kwargs, if present)\n    std::uint16_t nargs;\n\n    /// Python method object\n    PyMethodDef *def = nullptr;\n\n    /// Python handle to the parent scope (a class or a module)\n    handle scope;\n\n    /// Python handle to the sibling function representing an overload chain\n    handle sibling;\n\n    /// Pointer to next overload\n    function_record *next = nullptr;\n};\n\n/// Special data structure which (temporarily) holds metadata about a bound class\nstruct type_record {\n    PYBIND11_NOINLINE type_record()\n        : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false), module_local(false) { }\n\n    /// Handle to the parent scope\n    handle scope;\n\n    /// Name of the class\n    const char *name = nullptr;\n\n    // Pointer to RTTI type_info data structure\n    const std::type_info *type = nullptr;\n\n    /// How large is the underlying C++ type?\n    size_t type_size = 0;\n\n    /// What is the alignment of the underlying C++ type?\n    size_t type_align = 0;\n\n    /// How large is the type's holder?\n    size_t holder_size = 0;\n\n    /// The global operator new can be overridden with a class-specific variant\n    void *(*operator_new)(size_t) = nullptr;\n\n    /// Function pointer to class_<..>::init_instance\n    void (*init_instance)(instance *, const void *) = nullptr;\n\n    /// Function pointer to class_<..>::dealloc\n    void (*dealloc)(detail::value_and_holder &) = nullptr;\n\n    /// List of base classes of the newly created type\n    list bases;\n\n    /// Optional docstring\n    const char *doc = nullptr;\n\n    /// Custom metaclass (optional)\n    handle metaclass;\n\n    /// Multiple inheritance marker\n    bool multiple_inheritance : 1;\n\n    /// Does the class manage a __dict__?\n    bool dynamic_attr : 1;\n\n    /// Does the class implement the buffer protocol?\n    bool buffer_protocol : 1;\n\n    /// Is the default (unique_ptr) holder type used?\n    bool default_holder : 1;\n\n    /// Is the class definition local to the module shared object?\n    bool module_local : 1;\n\n    PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) {\n        auto base_info = detail::get_type_info(base, false);\n        if (!base_info) {\n            std::string tname(base.name());\n            detail::clean_type_id(tname);\n            pybind11_fail(\"generic_type: type \\\"\" + std::string(name) +\n                          \"\\\" referenced unknown base type \\\"\" + tname + \"\\\"\");\n        }\n\n        if (default_holder != base_info->default_holder) {\n            std::string tname(base.name());\n            detail::clean_type_id(tname);\n            pybind11_fail(\"generic_type: type \\\"\" + std::string(name) + \"\\\" \" +\n                    (default_holder ? \"does not have\" : \"has\") +\n                    \" a non-default holder type while its base \\\"\" + tname + \"\\\" \" +\n                    (base_info->default_holder ? \"does not\" : \"does\"));\n        }\n\n        bases.append((PyObject *) base_info->type);\n\n        if (base_info->type->tp_dictoffset != 0)\n            dynamic_attr = true;\n\n        if (caster)\n            base_info->implicit_casts.emplace_back(type, caster);\n    }\n};\n\ninline function_call::function_call(const function_record &f, handle p) :\n        func(f), parent(p) {\n    args.reserve(f.nargs);\n    args_convert.reserve(f.nargs);\n}\n\n/// Tag for a new-style `__init__` defined in `detail/init.h`\nstruct is_new_style_constructor { };\n\n/**\n * Partial template specializations to process custom attributes provided to\n * cpp_function_ and class_. These are either used to initialize the respective\n * fields in the type_record and function_record data structures or executed at\n * runtime to deal with custom call policies (e.g. keep_alive).\n */\ntemplate <typename T, typename SFINAE = void> struct process_attribute;\n\ntemplate <typename T> struct process_attribute_default {\n    /// Default implementation: do nothing\n    static void init(const T &, function_record *) { }\n    static void init(const T &, type_record *) { }\n    static void precall(function_call &) { }\n    static void postcall(function_call &, handle) { }\n};\n\n/// Process an attribute specifying the function's name\ntemplate <> struct process_attribute<name> : process_attribute_default<name> {\n    static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }\n};\n\n/// Process an attribute specifying the function's docstring\ntemplate <> struct process_attribute<doc> : process_attribute_default<doc> {\n    static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }\n};\n\n/// Process an attribute specifying the function's docstring (provided as a C-style string)\ntemplate <> struct process_attribute<const char *> : process_attribute_default<const char *> {\n    static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }\n    static void init(const char *d, type_record *r) { r->doc = const_cast<char *>(d); }\n};\ntemplate <> struct process_attribute<char *> : process_attribute<const char *> { };\n\n/// Process an attribute indicating the function's return value policy\ntemplate <> struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {\n    static void init(const return_value_policy &p, function_record *r) { r->policy = p; }\n};\n\n/// Process an attribute which indicates that this is an overloaded function associated with a given sibling\ntemplate <> struct process_attribute<sibling> : process_attribute_default<sibling> {\n    static void init(const sibling &s, function_record *r) { r->sibling = s.value; }\n};\n\n/// Process an attribute which indicates that this function is a method\ntemplate <> struct process_attribute<is_method> : process_attribute_default<is_method> {\n    static void init(const is_method &s, function_record *r) { r->is_method = true; r->scope = s.class_; }\n};\n\n/// Process an attribute which indicates the parent scope of a method\ntemplate <> struct process_attribute<scope> : process_attribute_default<scope> {\n    static void init(const scope &s, function_record *r) { r->scope = s.value; }\n};\n\n/// Process an attribute which indicates that this function is an operator\ntemplate <> struct process_attribute<is_operator> : process_attribute_default<is_operator> {\n    static void init(const is_operator &, function_record *r) { r->is_operator = true; }\n};\n\ntemplate <> struct process_attribute<is_new_style_constructor> : process_attribute_default<is_new_style_constructor> {\n    static void init(const is_new_style_constructor &, function_record *r) { r->is_new_style_constructor = true; }\n};\n\n/// Process a keyword argument attribute (*without* a default value)\ntemplate <> struct process_attribute<arg> : process_attribute_default<arg> {\n    static void init(const arg &a, function_record *r) {\n        if (r->is_method && r->args.empty())\n            r->args.emplace_back(\"self\", nullptr, handle(), true /*convert*/, false /*none not allowed*/);\n        r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);\n    }\n};\n\n/// Process a keyword argument attribute (*with* a default value)\ntemplate <> struct process_attribute<arg_v> : process_attribute_default<arg_v> {\n    static void init(const arg_v &a, function_record *r) {\n        if (r->is_method && r->args.empty())\n            r->args.emplace_back(\"self\", nullptr /*descr*/, handle() /*parent*/, true /*convert*/, false /*none not allowed*/);\n\n        if (!a.value) {\n#if !defined(NDEBUG)\n            std::string descr(\"'\");\n            if (a.name) descr += std::string(a.name) + \": \";\n            descr += a.type + \"'\";\n            if (r->is_method) {\n                if (r->name)\n                    descr += \" in method '\" + (std::string) str(r->scope) + \".\" + (std::string) r->name + \"'\";\n                else\n                    descr += \" in method of '\" + (std::string) str(r->scope) + \"'\";\n            } else if (r->name) {\n                descr += \" in function '\" + (std::string) r->name + \"'\";\n            }\n            pybind11_fail(\"arg(): could not convert default argument \"\n                          + descr + \" into a Python object (type not registered yet?)\");\n#else\n            pybind11_fail(\"arg(): could not convert default argument \"\n                          \"into a Python object (type not registered yet?). \"\n                          \"Compile in debug mode for more information.\");\n#endif\n        }\n        r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);\n    }\n};\n\n/// Process a parent class attribute.  Single inheritance only (class_ itself already guarantees that)\ntemplate <typename T>\nstruct process_attribute<T, enable_if_t<is_pyobject<T>::value>> : process_attribute_default<handle> {\n    static void init(const handle &h, type_record *r) { r->bases.append(h); }\n};\n\n/// Process a parent class attribute (deprecated, does not support multiple inheritance)\ntemplate <typename T>\nstruct process_attribute<base<T>> : process_attribute_default<base<T>> {\n    static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }\n};\n\n/// Process a multiple inheritance attribute\ntemplate <>\nstruct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {\n    static void init(const multiple_inheritance &, type_record *r) { r->multiple_inheritance = true; }\n};\n\ntemplate <>\nstruct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {\n    static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }\n};\n\ntemplate <>\nstruct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {\n    static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }\n};\n\ntemplate <>\nstruct process_attribute<metaclass> : process_attribute_default<metaclass> {\n    static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }\n};\n\ntemplate <>\nstruct process_attribute<module_local> : process_attribute_default<module_local> {\n    static void init(const module_local &l, type_record *r) { r->module_local = l.value; }\n};\n\n/// Process an 'arithmetic' attribute for enums (does nothing here)\ntemplate <>\nstruct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};\n\ntemplate <typename... Ts>\nstruct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> { };\n\n/**\n * Process a keep_alive call policy -- invokes keep_alive_impl during the\n * pre-call handler if both Nurse, Patient != 0 and use the post-call handler\n * otherwise\n */\ntemplate <size_t Nurse, size_t Patient> struct process_attribute<keep_alive<Nurse, Patient>> : public process_attribute_default<keep_alive<Nurse, Patient>> {\n    template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>\n    static void precall(function_call &call) { keep_alive_impl(Nurse, Patient, call, handle()); }\n    template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>\n    static void postcall(function_call &, handle) { }\n    template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>\n    static void precall(function_call &) { }\n    template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>\n    static void postcall(function_call &call, handle ret) { keep_alive_impl(Nurse, Patient, call, ret); }\n};\n\n/// Recursively iterate over variadic template arguments\ntemplate <typename... Args> struct process_attributes {\n    static void init(const Args&... args, function_record *r) {\n        int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };\n        ignore_unused(unused);\n    }\n    static void init(const Args&... args, type_record *r) {\n        int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };\n        ignore_unused(unused);\n    }\n    static void precall(function_call &call) {\n        int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::precall(call), 0) ... };\n        ignore_unused(unused);\n    }\n    static void postcall(function_call &call, handle fn_ret) {\n        int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0) ... };\n        ignore_unused(unused);\n    }\n};\n\ntemplate <typename T>\nusing is_call_guard = is_instantiation<call_guard, T>;\n\n/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)\ntemplate <typename... Extra>\nusing extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;\n\n/// Check the number of named arguments at compile time\ntemplate <typename... Extra,\n          size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),\n          size_t self  = constexpr_sum(std::is_same<is_method, Extra>::value...)>\nconstexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {\n    return named == 0 || (self + named + has_args + has_kwargs) == nargs;\n}\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/buffer_info.h",
    "content": "/*\n    pybind11/buffer_info.h: Python buffer object interface\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"detail/common.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\n/// Information record describing a Python buffer object\nstruct buffer_info {\n    void *ptr = nullptr;          // Pointer to the underlying storage\n    ssize_t itemsize = 0;         // Size of individual items in bytes\n    ssize_t size = 0;             // Total number of entries\n    std::string format;           // For homogeneous buffers, this should be set to format_descriptor<T>::format()\n    ssize_t ndim = 0;             // Number of dimensions\n    std::vector<ssize_t> shape;   // Shape of the tensor (1 entry per dimension)\n    std::vector<ssize_t> strides; // Number of entries between adjacent entries (for each per dimension)\n\n    buffer_info() { }\n\n    buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,\n                detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in)\n    : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),\n      shape(std::move(shape_in)), strides(std::move(strides_in)) {\n        if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size())\n            pybind11_fail(\"buffer_info: ndim doesn't match shape and/or strides length\");\n        for (size_t i = 0; i < (size_t) ndim; ++i)\n            size *= shape[i];\n    }\n\n    template <typename T>\n    buffer_info(T *ptr, detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in)\n    : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor<T>::format(), static_cast<ssize_t>(shape_in->size()), std::move(shape_in), std::move(strides_in)) { }\n\n    buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size)\n    : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}) { }\n\n    template <typename T>\n    buffer_info(T *ptr, ssize_t size)\n    : buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size) { }\n\n    explicit buffer_info(Py_buffer *view, bool ownview = true)\n    : buffer_info(view->buf, view->itemsize, view->format, view->ndim,\n            {view->shape, view->shape + view->ndim}, {view->strides, view->strides + view->ndim}) {\n        this->view = view;\n        this->ownview = ownview;\n    }\n\n    buffer_info(const buffer_info &) = delete;\n    buffer_info& operator=(const buffer_info &) = delete;\n\n    buffer_info(buffer_info &&other) {\n        (*this) = std::move(other);\n    }\n\n    buffer_info& operator=(buffer_info &&rhs) {\n        ptr = rhs.ptr;\n        itemsize = rhs.itemsize;\n        size = rhs.size;\n        format = std::move(rhs.format);\n        ndim = rhs.ndim;\n        shape = std::move(rhs.shape);\n        strides = std::move(rhs.strides);\n        std::swap(view, rhs.view);\n        std::swap(ownview, rhs.ownview);\n        return *this;\n    }\n\n    ~buffer_info() {\n        if (view && ownview) { PyBuffer_Release(view); delete view; }\n    }\n\nprivate:\n    struct private_ctr_tag { };\n\n    buffer_info(private_ctr_tag, void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,\n                detail::any_container<ssize_t> &&shape_in, detail::any_container<ssize_t> &&strides_in)\n    : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in)) { }\n\n    Py_buffer *view = nullptr;\n    bool ownview = false;\n};\n\nNAMESPACE_BEGIN(detail)\n\ntemplate <typename T, typename SFINAE = void> struct compare_buffer_info {\n    static bool compare(const buffer_info& b) {\n        return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);\n    }\n};\n\ntemplate <typename T> struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {\n    static bool compare(const buffer_info& b) {\n        return (size_t) b.itemsize == sizeof(T) && (b.format == format_descriptor<T>::value ||\n            ((sizeof(T) == sizeof(long)) && b.format == (std::is_unsigned<T>::value ? \"L\" : \"l\")) ||\n            ((sizeof(T) == sizeof(size_t)) && b.format == (std::is_unsigned<T>::value ? \"N\" : \"n\")));\n    }\n};\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/cast.h",
    "content": "/*\n    pybind11/cast.h: Partial template specializations to cast between\n    C++ and Python types\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pytypes.h\"\n#include \"detail/typeid.h\"\n#include \"detail/descr.h\"\n#include \"detail/internals.h\"\n#include <array>\n#include <limits>\n#include <tuple>\n#include <type_traits>\n\n#if defined(PYBIND11_CPP17)\n#  if defined(__has_include)\n#    if __has_include(<string_view>)\n#      define PYBIND11_HAS_STRING_VIEW\n#    endif\n#  elif defined(_MSC_VER)\n#    define PYBIND11_HAS_STRING_VIEW\n#  endif\n#endif\n#ifdef PYBIND11_HAS_STRING_VIEW\n#include <string_view>\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n/// A life support system for temporary objects created by `type_caster::load()`.\n/// Adding a patient will keep it alive up until the enclosing function returns.\nclass loader_life_support {\npublic:\n    /// A new patient frame is created when a function is entered\n    loader_life_support() {\n        get_internals().loader_patient_stack.push_back(nullptr);\n    }\n\n    /// ... and destroyed after it returns\n    ~loader_life_support() {\n        auto &stack = get_internals().loader_patient_stack;\n        if (stack.empty())\n            pybind11_fail(\"loader_life_support: internal error\");\n\n        auto ptr = stack.back();\n        stack.pop_back();\n        Py_CLEAR(ptr);\n\n        // A heuristic to reduce the stack's capacity (e.g. after long recursive calls)\n        if (stack.capacity() > 16 && stack.size() != 0 && stack.capacity() / stack.size() > 2)\n            stack.shrink_to_fit();\n    }\n\n    /// This can only be used inside a pybind11-bound function, either by `argument_loader`\n    /// at argument preparation time or by `py::cast()` at execution time.\n    PYBIND11_NOINLINE static void add_patient(handle h) {\n        auto &stack = get_internals().loader_patient_stack;\n        if (stack.empty())\n            throw cast_error(\"When called outside a bound function, py::cast() cannot \"\n                             \"do Python -> C++ conversions which require the creation \"\n                             \"of temporary values\");\n\n        auto &list_ptr = stack.back();\n        if (list_ptr == nullptr) {\n            list_ptr = PyList_New(1);\n            if (!list_ptr)\n                pybind11_fail(\"loader_life_support: error allocating list\");\n            PyList_SET_ITEM(list_ptr, 0, h.inc_ref().ptr());\n        } else {\n            auto result = PyList_Append(list_ptr, h.ptr());\n            if (result == -1)\n                pybind11_fail(\"loader_life_support: error adding patient\");\n        }\n    }\n};\n\n// Gets the cache entry for the given type, creating it if necessary.  The return value is the pair\n// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was\n// just created.\ninline std::pair<decltype(internals::registered_types_py)::iterator, bool> all_type_info_get_cache(PyTypeObject *type);\n\n// Populates a just-created cache entry.\nPYBIND11_NOINLINE inline void all_type_info_populate(PyTypeObject *t, std::vector<type_info *> &bases) {\n    std::vector<PyTypeObject *> check;\n    for (handle parent : reinterpret_borrow<tuple>(t->tp_bases))\n        check.push_back((PyTypeObject *) parent.ptr());\n\n    auto const &type_dict = get_internals().registered_types_py;\n    for (size_t i = 0; i < check.size(); i++) {\n        auto type = check[i];\n        // Ignore Python2 old-style class super type:\n        if (!PyType_Check((PyObject *) type)) continue;\n\n        // Check `type` in the current set of registered python types:\n        auto it = type_dict.find(type);\n        if (it != type_dict.end()) {\n            // We found a cache entry for it, so it's either pybind-registered or has pre-computed\n            // pybind bases, but we have to make sure we haven't already seen the type(s) before: we\n            // want to follow Python/virtual C++ rules that there should only be one instance of a\n            // common base.\n            for (auto *tinfo : it->second) {\n                // NB: Could use a second set here, rather than doing a linear search, but since\n                // having a large number of immediate pybind11-registered types seems fairly\n                // unlikely, that probably isn't worthwhile.\n                bool found = false;\n                for (auto *known : bases) {\n                    if (known == tinfo) { found = true; break; }\n                }\n                if (!found) bases.push_back(tinfo);\n            }\n        }\n        else if (type->tp_bases) {\n            // It's some python type, so keep follow its bases classes to look for one or more\n            // registered types\n            if (i + 1 == check.size()) {\n                // When we're at the end, we can pop off the current element to avoid growing\n                // `check` when adding just one base (which is typical--i.e. when there is no\n                // multiple inheritance)\n                check.pop_back();\n                i--;\n            }\n            for (handle parent : reinterpret_borrow<tuple>(type->tp_bases))\n                check.push_back((PyTypeObject *) parent.ptr());\n        }\n    }\n}\n\n/**\n * Extracts vector of type_info pointers of pybind-registered roots of the given Python type.  Will\n * be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side\n * derived class that uses single inheritance.  Will contain as many types as required for a Python\n * class that uses multiple inheritance to inherit (directly or indirectly) from multiple\n * pybind-registered classes.  Will be empty if neither the type nor any base classes are\n * pybind-registered.\n *\n * The value is cached for the lifetime of the Python type.\n */\ninline const std::vector<detail::type_info *> &all_type_info(PyTypeObject *type) {\n    auto ins = all_type_info_get_cache(type);\n    if (ins.second)\n        // New cache entry: populate it\n        all_type_info_populate(type, ins.first->second);\n\n    return ins.first->second;\n}\n\n/**\n * Gets a single pybind11 type info for a python type.  Returns nullptr if neither the type nor any\n * ancestors are pybind11-registered.  Throws an exception if there are multiple bases--use\n * `all_type_info` instead if you want to support multiple bases.\n */\nPYBIND11_NOINLINE inline detail::type_info* get_type_info(PyTypeObject *type) {\n    auto &bases = all_type_info(type);\n    if (bases.size() == 0)\n        return nullptr;\n    if (bases.size() > 1)\n        pybind11_fail(\"pybind11::detail::get_type_info: type has multiple pybind11-registered bases\");\n    return bases.front();\n}\n\ninline detail::type_info *get_local_type_info(const std::type_index &tp) {\n    auto &locals = registered_local_types_cpp();\n    auto it = locals.find(tp);\n    if (it != locals.end())\n        return it->second;\n    return nullptr;\n}\n\ninline detail::type_info *get_global_type_info(const std::type_index &tp) {\n    auto &types = get_internals().registered_types_cpp;\n    auto it = types.find(tp);\n    if (it != types.end())\n        return it->second;\n    return nullptr;\n}\n\n/// Return the type info for a given C++ type; on lookup failure can either throw or return nullptr.\nPYBIND11_NOINLINE inline detail::type_info *get_type_info(const std::type_index &tp,\n                                                          bool throw_if_missing = false) {\n    if (auto ltype = get_local_type_info(tp))\n        return ltype;\n    if (auto gtype = get_global_type_info(tp))\n        return gtype;\n\n    if (throw_if_missing) {\n        std::string tname = tp.name();\n        detail::clean_type_id(tname);\n        pybind11_fail(\"pybind11::detail::get_type_info: unable to find type info for \\\"\" + tname + \"\\\"\");\n    }\n    return nullptr;\n}\n\nPYBIND11_NOINLINE inline handle get_type_handle(const std::type_info &tp, bool throw_if_missing) {\n    detail::type_info *type_info = get_type_info(tp, throw_if_missing);\n    return handle(type_info ? ((PyObject *) type_info->type) : nullptr);\n}\n\nstruct value_and_holder {\n    instance *inst;\n    size_t index;\n    const detail::type_info *type;\n    void **vh;\n\n    // Main constructor for a found value/holder:\n    value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index) :\n        inst{i}, index{index}, type{type},\n        vh{inst->simple_layout ? inst->simple_value_holder : &inst->nonsimple.values_and_holders[vpos]}\n    {}\n\n    // Default constructor (used to signal a value-and-holder not found by get_value_and_holder())\n    value_and_holder() : inst{nullptr} {}\n\n    // Used for past-the-end iterator\n    value_and_holder(size_t index) : index{index} {}\n\n    template <typename V = void> V *&value_ptr() const {\n        return reinterpret_cast<V *&>(vh[0]);\n    }\n    // True if this `value_and_holder` has a non-null value pointer\n    explicit operator bool() const { return value_ptr(); }\n\n    template <typename H> H &holder() const {\n        return reinterpret_cast<H &>(vh[1]);\n    }\n    bool holder_constructed() const {\n        return inst->simple_layout\n            ? inst->simple_holder_constructed\n            : inst->nonsimple.status[index] & instance::status_holder_constructed;\n    }\n    void set_holder_constructed(bool v = true) {\n        if (inst->simple_layout)\n            inst->simple_holder_constructed = v;\n        else if (v)\n            inst->nonsimple.status[index] |= instance::status_holder_constructed;\n        else\n            inst->nonsimple.status[index] &= (uint8_t) ~instance::status_holder_constructed;\n    }\n    bool instance_registered() const {\n        return inst->simple_layout\n            ? inst->simple_instance_registered\n            : inst->nonsimple.status[index] & instance::status_instance_registered;\n    }\n    void set_instance_registered(bool v = true) {\n        if (inst->simple_layout)\n            inst->simple_instance_registered = v;\n        else if (v)\n            inst->nonsimple.status[index] |= instance::status_instance_registered;\n        else\n            inst->nonsimple.status[index] &= (uint8_t) ~instance::status_instance_registered;\n    }\n};\n\n// Container for accessing and iterating over an instance's values/holders\nstruct values_and_holders {\nprivate:\n    instance *inst;\n    using type_vec = std::vector<detail::type_info *>;\n    const type_vec &tinfo;\n\npublic:\n    values_and_holders(instance *inst) : inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {}\n\n    struct iterator {\n    private:\n        instance *inst;\n        const type_vec *types;\n        value_and_holder curr;\n        friend struct values_and_holders;\n        iterator(instance *inst, const type_vec *tinfo)\n            : inst{inst}, types{tinfo},\n            curr(inst /* instance */,\n                 types->empty() ? nullptr : (*types)[0] /* type info */,\n                 0, /* vpos: (non-simple types only): the first vptr comes first */\n                 0 /* index */)\n        {}\n        // Past-the-end iterator:\n        iterator(size_t end) : curr(end) {}\n    public:\n        bool operator==(const iterator &other) { return curr.index == other.curr.index; }\n        bool operator!=(const iterator &other) { return curr.index != other.curr.index; }\n        iterator &operator++() {\n            if (!inst->simple_layout)\n                curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;\n            ++curr.index;\n            curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr;\n            return *this;\n        }\n        value_and_holder &operator*() { return curr; }\n        value_and_holder *operator->() { return &curr; }\n    };\n\n    iterator begin() { return iterator(inst, &tinfo); }\n    iterator end() { return iterator(tinfo.size()); }\n\n    iterator find(const type_info *find_type) {\n        auto it = begin(), endit = end();\n        while (it != endit && it->type != find_type) ++it;\n        return it;\n    }\n\n    size_t size() { return tinfo.size(); }\n};\n\n/**\n * Extracts C++ value and holder pointer references from an instance (which may contain multiple\n * values/holders for python-side multiple inheritance) that match the given type.  Throws an error\n * if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance.  If\n * `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned,\n * regardless of type (and the resulting .type will be nullptr).\n *\n * The returned object should be short-lived: in particular, it must not outlive the called-upon\n * instance.\n */\nPYBIND11_NOINLINE inline value_and_holder instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/, bool throw_if_missing /*= true in common.h*/) {\n    // Optimize common case:\n    if (!find_type || Py_TYPE(this) == find_type->type)\n        return value_and_holder(this, find_type, 0, 0);\n\n    detail::values_and_holders vhs(this);\n    auto it = vhs.find(find_type);\n    if (it != vhs.end())\n        return *it;\n\n    if (!throw_if_missing)\n        return value_and_holder();\n\n#if defined(NDEBUG)\n    pybind11_fail(\"pybind11::detail::instance::get_value_and_holder: \"\n            \"type is not a pybind11 base of the given instance \"\n            \"(compile in debug mode for type details)\");\n#else\n    pybind11_fail(\"pybind11::detail::instance::get_value_and_holder: `\" +\n            std::string(find_type->type->tp_name) + \"' is not a pybind11 base of the given `\" +\n            std::string(Py_TYPE(this)->tp_name) + \"' instance\");\n#endif\n}\n\nPYBIND11_NOINLINE inline void instance::allocate_layout() {\n    auto &tinfo = all_type_info(Py_TYPE(this));\n\n    const size_t n_types = tinfo.size();\n\n    if (n_types == 0)\n        pybind11_fail(\"instance allocation failed: new instance has no pybind11-registered base types\");\n\n    simple_layout =\n        n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs();\n\n    // Simple path: no python-side multiple inheritance, and a small-enough holder\n    if (simple_layout) {\n        simple_value_holder[0] = nullptr;\n        simple_holder_constructed = false;\n        simple_instance_registered = false;\n    }\n    else { // multiple base types or a too-large holder\n        // Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer,\n        // [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool\n        // values that tracks whether each associated holder has been initialized.  Each [block] is\n        // padded, if necessary, to an integer multiple of sizeof(void *).\n        size_t space = 0;\n        for (auto t : tinfo) {\n            space += 1; // value pointer\n            space += t->holder_size_in_ptrs; // holder instance\n        }\n        size_t flags_at = space;\n        space += size_in_ptrs(n_types); // status bytes (holder_constructed and instance_registered)\n\n        // Allocate space for flags, values, and holders, and initialize it to 0 (flags and values,\n        // in particular, need to be 0).  Use Python's memory allocation functions: in Python 3.6\n        // they default to using pymalloc, which is designed to be efficient for small allocations\n        // like the one we're doing here; in earlier versions (and for larger allocations) they are\n        // just wrappers around malloc.\n#if PY_VERSION_HEX >= 0x03050000\n        nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *));\n        if (!nonsimple.values_and_holders) throw std::bad_alloc();\n#else\n        nonsimple.values_and_holders = (void **) PyMem_New(void *, space);\n        if (!nonsimple.values_and_holders) throw std::bad_alloc();\n        std::memset(nonsimple.values_and_holders, 0, space * sizeof(void *));\n#endif\n        nonsimple.status = reinterpret_cast<uint8_t *>(&nonsimple.values_and_holders[flags_at]);\n    }\n    owned = true;\n}\n\nPYBIND11_NOINLINE inline void instance::deallocate_layout() {\n    if (!simple_layout)\n        PyMem_Free(nonsimple.values_and_holders);\n}\n\nPYBIND11_NOINLINE inline bool isinstance_generic(handle obj, const std::type_info &tp) {\n    handle type = detail::get_type_handle(tp, false);\n    if (!type)\n        return false;\n    return isinstance(obj, type);\n}\n\nPYBIND11_NOINLINE inline std::string error_string() {\n    if (!PyErr_Occurred()) {\n        PyErr_SetString(PyExc_RuntimeError, \"Unknown internal error occurred\");\n        return \"Unknown internal error occurred\";\n    }\n\n    error_scope scope; // Preserve error state\n\n    std::string errorString;\n    if (scope.type) {\n        errorString += handle(scope.type).attr(\"__name__\").cast<std::string>();\n        errorString += \": \";\n    }\n    if (scope.value)\n        errorString += (std::string) str(scope.value);\n\n    PyErr_NormalizeException(&scope.type, &scope.value, &scope.trace);\n\n#if PY_MAJOR_VERSION >= 3\n    if (scope.trace != nullptr)\n        PyException_SetTraceback(scope.value, scope.trace);\n#endif\n\n#if !defined(PYPY_VERSION)\n    if (scope.trace) {\n        PyTracebackObject *trace = (PyTracebackObject *) scope.trace;\n\n        /* Get the deepest trace possible */\n        while (trace->tb_next)\n            trace = trace->tb_next;\n\n        PyFrameObject *frame = trace->tb_frame;\n        errorString += \"\\n\\nAt:\\n\";\n        while (frame) {\n            int lineno = PyFrame_GetLineNumber(frame);\n            errorString +=\n                \"  \" + handle(frame->f_code->co_filename).cast<std::string>() +\n                \"(\" + std::to_string(lineno) + \"): \" +\n                handle(frame->f_code->co_name).cast<std::string>() + \"\\n\";\n            frame = frame->f_back;\n        }\n    }\n#endif\n\n    return errorString;\n}\n\nPYBIND11_NOINLINE inline handle get_object_handle(const void *ptr, const detail::type_info *type ) {\n    auto &instances = get_internals().registered_instances;\n    auto range = instances.equal_range(ptr);\n    for (auto it = range.first; it != range.second; ++it) {\n        for (auto vh : values_and_holders(it->second)) {\n            if (vh.type == type)\n                return handle((PyObject *) it->second);\n        }\n    }\n    return handle();\n}\n\ninline PyThreadState *get_thread_state_unchecked() {\n#if defined(PYPY_VERSION)\n    return PyThreadState_GET();\n#elif PY_VERSION_HEX < 0x03000000\n    return _PyThreadState_Current;\n#elif PY_VERSION_HEX < 0x03050000\n    return (PyThreadState*) _Py_atomic_load_relaxed(&_PyThreadState_Current);\n#elif PY_VERSION_HEX < 0x03050200\n    return (PyThreadState*) _PyThreadState_Current.value;\n#else\n    return _PyThreadState_UncheckedGet();\n#endif\n}\n\n// Forward declarations\ninline void keep_alive_impl(handle nurse, handle patient);\ninline PyObject *make_new_instance(PyTypeObject *type);\n\nclass type_caster_generic {\npublic:\n    PYBIND11_NOINLINE type_caster_generic(const std::type_info &type_info)\n        : typeinfo(get_type_info(type_info)), cpptype(&type_info) { }\n\n    type_caster_generic(const type_info *typeinfo)\n        : typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) { }\n\n    bool load(handle src, bool convert) {\n        return load_impl<type_caster_generic>(src, convert);\n    }\n\n    PYBIND11_NOINLINE static handle cast(const void *_src, return_value_policy policy, handle parent,\n                                         const detail::type_info *tinfo,\n                                         void *(*copy_constructor)(const void *),\n                                         void *(*move_constructor)(const void *),\n                                         const void *existing_holder = nullptr) {\n        if (!tinfo) // no type info: error will be set already\n            return handle();\n\n        void *src = const_cast<void *>(_src);\n        if (src == nullptr)\n            return none().release();\n\n        auto it_instances = get_internals().registered_instances.equal_range(src);\n        for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) {\n            for (auto instance_type : detail::all_type_info(Py_TYPE(it_i->second))) {\n                if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype))\n                    return handle((PyObject *) it_i->second).inc_ref();\n            }\n        }\n\n        auto inst = reinterpret_steal<object>(make_new_instance(tinfo->type));\n        auto wrapper = reinterpret_cast<instance *>(inst.ptr());\n        wrapper->owned = false;\n        void *&valueptr = values_and_holders(wrapper).begin()->value_ptr();\n\n        switch (policy) {\n            case return_value_policy::automatic:\n            case return_value_policy::take_ownership:\n                valueptr = src;\n                wrapper->owned = true;\n                break;\n\n            case return_value_policy::automatic_reference:\n            case return_value_policy::reference:\n                valueptr = src;\n                wrapper->owned = false;\n                break;\n\n            case return_value_policy::copy:\n                if (copy_constructor)\n                    valueptr = copy_constructor(src);\n                else\n                    throw cast_error(\"return_value_policy = copy, but the \"\n                                     \"object is non-copyable!\");\n                wrapper->owned = true;\n                break;\n\n            case return_value_policy::move:\n                if (move_constructor)\n                    valueptr = move_constructor(src);\n                else if (copy_constructor)\n                    valueptr = copy_constructor(src);\n                else\n                    throw cast_error(\"return_value_policy = move, but the \"\n                                     \"object is neither movable nor copyable!\");\n                wrapper->owned = true;\n                break;\n\n            case return_value_policy::reference_internal:\n                valueptr = src;\n                wrapper->owned = false;\n                keep_alive_impl(inst, parent);\n                break;\n\n            default:\n                throw cast_error(\"unhandled return_value_policy: should not happen!\");\n        }\n\n        tinfo->init_instance(wrapper, existing_holder);\n\n        return inst.release();\n    }\n\n    // Base methods for generic caster; there are overridden in copyable_holder_caster\n    void load_value(value_and_holder &&v_h) {\n        auto *&vptr = v_h.value_ptr();\n        // Lazy allocation for unallocated values:\n        if (vptr == nullptr) {\n            auto *type = v_h.type ? v_h.type : typeinfo;\n            if (type->operator_new) {\n                vptr = type->operator_new(type->type_size);\n            } else {\n                #if defined(PYBIND11_CPP17)\n                    if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__)\n                        vptr = ::operator new(type->type_size,\n                                              (std::align_val_t) type->type_align);\n                    else\n                #endif\n                vptr = ::operator new(type->type_size);\n            }\n        }\n        value = vptr;\n    }\n    bool try_implicit_casts(handle src, bool convert) {\n        for (auto &cast : typeinfo->implicit_casts) {\n            type_caster_generic sub_caster(*cast.first);\n            if (sub_caster.load(src, convert)) {\n                value = cast.second(sub_caster.value);\n                return true;\n            }\n        }\n        return false;\n    }\n    bool try_direct_conversions(handle src) {\n        for (auto &converter : *typeinfo->direct_conversions) {\n            if (converter(src.ptr(), value))\n                return true;\n        }\n        return false;\n    }\n    void check_holder_compat() {}\n\n    PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) {\n        auto caster = type_caster_generic(ti);\n        if (caster.load(src, false))\n            return caster.value;\n        return nullptr;\n    }\n\n    /// Try to load with foreign typeinfo, if available. Used when there is no\n    /// native typeinfo, or when the native one wasn't able to produce a value.\n    PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {\n        constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;\n        const auto pytype = src.get_type();\n        if (!hasattr(pytype, local_key))\n            return false;\n\n        type_info *foreign_typeinfo = reinterpret_borrow<capsule>(getattr(pytype, local_key));\n        // Only consider this foreign loader if actually foreign and is a loader of the correct cpp type\n        if (foreign_typeinfo->module_local_load == &local_load\n            || (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype)))\n            return false;\n\n        if (auto result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) {\n            value = result;\n            return true;\n        }\n        return false;\n    }\n\n    // Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant\n    // bits of code between here and copyable_holder_caster where the two classes need different\n    // logic (without having to resort to virtual inheritance).\n    template <typename ThisT>\n    PYBIND11_NOINLINE bool load_impl(handle src, bool convert) {\n        if (!src) return false;\n        if (!typeinfo) return try_load_foreign_module_local(src);\n        if (src.is_none()) {\n            // Defer accepting None to other overloads (if we aren't in convert mode):\n            if (!convert) return false;\n            value = nullptr;\n            return true;\n        }\n\n        auto &this_ = static_cast<ThisT &>(*this);\n        this_.check_holder_compat();\n\n        PyTypeObject *srctype = Py_TYPE(src.ptr());\n\n        // Case 1: If src is an exact type match for the target type then we can reinterpret_cast\n        // the instance's value pointer to the target type:\n        if (srctype == typeinfo->type) {\n            this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());\n            return true;\n        }\n        // Case 2: We have a derived class\n        else if (PyType_IsSubtype(srctype, typeinfo->type)) {\n            auto &bases = all_type_info(srctype);\n            bool no_cpp_mi = typeinfo->simple_type;\n\n            // Case 2a: the python type is a Python-inherited derived class that inherits from just\n            // one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of\n            // the right type and we can use reinterpret_cast.\n            // (This is essentially the same as case 2b, but because not using multiple inheritance\n            // is extremely common, we handle it specially to avoid the loop iterator and type\n            // pointer lookup overhead)\n            if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) {\n                this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());\n                return true;\n            }\n            // Case 2b: the python type inherits from multiple C++ bases.  Check the bases to see if\n            // we can find an exact match (or, for a simple C++ type, an inherited match); if so, we\n            // can safely reinterpret_cast to the relevant pointer.\n            else if (bases.size() > 1) {\n                for (auto base : bases) {\n                    if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type) : base->type == typeinfo->type) {\n                        this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder(base));\n                        return true;\n                    }\n                }\n            }\n\n            // Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type match\n            // in the registered bases, above, so try implicit casting (needed for proper C++ casting\n            // when MI is involved).\n            if (this_.try_implicit_casts(src, convert))\n                return true;\n        }\n\n        // Perform an implicit conversion\n        if (convert) {\n            for (auto &converter : typeinfo->implicit_conversions) {\n                auto temp = reinterpret_steal<object>(converter(src.ptr(), typeinfo->type));\n                if (load_impl<ThisT>(temp, false)) {\n                    loader_life_support::add_patient(temp);\n                    return true;\n                }\n            }\n            if (this_.try_direct_conversions(src))\n                return true;\n        }\n\n        // Failed to match local typeinfo. Try again with global.\n        if (typeinfo->module_local) {\n            if (auto gtype = get_global_type_info(*typeinfo->cpptype)) {\n                typeinfo = gtype;\n                return load(src, false);\n            }\n        }\n\n        // Global typeinfo has precedence over foreign module_local\n        return try_load_foreign_module_local(src);\n    }\n\n\n    // Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast\n    // isn't needed or can't be used.  If the type is unknown, sets the error and returns a pair\n    // with .second = nullptr.  (p.first = nullptr is not an error: it becomes None).\n    PYBIND11_NOINLINE static std::pair<const void *, const type_info *> src_and_type(\n            const void *src, const std::type_info &cast_type, const std::type_info *rtti_type = nullptr) {\n        if (auto *tpi = get_type_info(cast_type))\n            return {src, const_cast<const type_info *>(tpi)};\n\n        // Not found, set error:\n        std::string tname = rtti_type ? rtti_type->name() : cast_type.name();\n        detail::clean_type_id(tname);\n        std::string msg = \"Unregistered type : \" + tname;\n        PyErr_SetString(PyExc_TypeError, msg.c_str());\n        return {nullptr, nullptr};\n    }\n\n    const type_info *typeinfo = nullptr;\n    const std::type_info *cpptype = nullptr;\n    void *value = nullptr;\n};\n\n/**\n * Determine suitable casting operator for pointer-or-lvalue-casting type casters.  The type caster\n * needs to provide `operator T*()` and `operator T&()` operators.\n *\n * If the type supports moving the value away via an `operator T&&() &&` method, it should use\n * `movable_cast_op_type` instead.\n */\ntemplate <typename T>\nusing cast_op_type =\n    conditional_t<std::is_pointer<remove_reference_t<T>>::value,\n        typename std::add_pointer<intrinsic_t<T>>::type,\n        typename std::add_lvalue_reference<intrinsic_t<T>>::type>;\n\n/**\n * Determine suitable casting operator for a type caster with a movable value.  Such a type caster\n * needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`.  The latter will be\n * called in appropriate contexts where the value can be moved rather than copied.\n *\n * These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro.\n */\ntemplate <typename T>\nusing movable_cast_op_type =\n    conditional_t<std::is_pointer<typename std::remove_reference<T>::type>::value,\n        typename std::add_pointer<intrinsic_t<T>>::type,\n    conditional_t<std::is_rvalue_reference<T>::value,\n        typename std::add_rvalue_reference<intrinsic_t<T>>::type,\n        typename std::add_lvalue_reference<intrinsic_t<T>>::type>>;\n\n// std::is_copy_constructible isn't quite enough: it lets std::vector<T> (and similar) through when\n// T is non-copyable, but code containing such a copy constructor fails to actually compile.\ntemplate <typename T, typename SFINAE = void> struct is_copy_constructible : std::is_copy_constructible<T> {};\n\n// Specialization for types that appear to be copy constructible but also look like stl containers\n// (we specifically check for: has `value_type` and `reference` with `reference = value_type&`): if\n// so, copy constructability depends on whether the value_type is copy constructible.\ntemplate <typename Container> struct is_copy_constructible<Container, enable_if_t<all_of<\n        std::is_copy_constructible<Container>,\n        std::is_same<typename Container::value_type &, typename Container::reference>\n    >::value>> : is_copy_constructible<typename Container::value_type> {};\n\n#if !defined(PYBIND11_CPP17)\n// Likewise for std::pair before C++17 (which mandates that the copy constructor not exist when the\n// two types aren't themselves copy constructible).\ntemplate <typename T1, typename T2> struct is_copy_constructible<std::pair<T1, T2>>\n    : all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};\n#endif\n\nNAMESPACE_END(detail)\n\n// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed\n// to by `src` actually is an instance of some class derived from `itype`.\n// If so, it sets `tinfo` to point to the std::type_info representing that derived\n// type, and returns a pointer to the start of the most-derived object of that type\n// (in which `src` is a subobject; this will be the same address as `src` in most\n// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src`\n// and leaves `tinfo` at its default value of nullptr.\n//\n// The default polymorphic_type_hook just returns src. A specialization for polymorphic\n// types determines the runtime type of the passed object and adjusts the this-pointer\n// appropriately via dynamic_cast<void*>. This is what enables a C++ Animal* to appear\n// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is\n// registered with pybind11, and this Animal is in fact a Dog).\n//\n// You may specialize polymorphic_type_hook yourself for types that want to appear\n// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern\n// in performance-sensitive applications, used most notably in LLVM.)\ntemplate <typename itype, typename SFINAE = void>\nstruct polymorphic_type_hook\n{\n    static const void *get(const itype *src, const std::type_info*&) { return src; }\n};\ntemplate <typename itype>\nstruct polymorphic_type_hook<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>>\n{\n    static const void *get(const itype *src, const std::type_info*& type) {\n        type = src ? &typeid(*src) : nullptr;\n        return dynamic_cast<const void*>(src);\n    }\n};\n\nNAMESPACE_BEGIN(detail)\n\n/// Generic type caster for objects stored on the heap\ntemplate <typename type> class type_caster_base : public type_caster_generic {\n    using itype = intrinsic_t<type>;\n\npublic:\n    static constexpr auto name = _<type>();\n\n    type_caster_base() : type_caster_base(typeid(type)) { }\n    explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) { }\n\n    static handle cast(const itype &src, return_value_policy policy, handle parent) {\n        if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)\n            policy = return_value_policy::copy;\n        return cast(&src, policy, parent);\n    }\n\n    static handle cast(itype &&src, return_value_policy, handle parent) {\n        return cast(&src, return_value_policy::move, parent);\n    }\n\n    // Returns a (pointer, type_info) pair taking care of necessary type lookup for a\n    // polymorphic type (using RTTI by default, but can be overridden by specializing\n    // polymorphic_type_hook). If the instance isn't derived, returns the base version.\n    static std::pair<const void *, const type_info *> src_and_type(const itype *src) {\n        auto &cast_type = typeid(itype);\n        const std::type_info *instance_type = nullptr;\n        const void *vsrc = polymorphic_type_hook<itype>::get(src, instance_type);\n        if (instance_type && !same_type(cast_type, *instance_type)) {\n            // This is a base pointer to a derived type. If the derived type is registered\n            // with pybind11, we want to make the full derived object available.\n            // In the typical case where itype is polymorphic, we get the correct\n            // derived pointer (which may be != base pointer) by a dynamic_cast to\n            // most derived type. If itype is not polymorphic, we won't get here\n            // except via a user-provided specialization of polymorphic_type_hook,\n            // and the user has promised that no this-pointer adjustment is\n            // required in that case, so it's OK to use static_cast.\n            if (const auto *tpi = get_type_info(*instance_type))\n                return {vsrc, tpi};\n        }\n        // Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer, so\n        // don't do a cast\n        return type_caster_generic::src_and_type(src, cast_type, instance_type);\n    }\n\n    static handle cast(const itype *src, return_value_policy policy, handle parent) {\n        auto st = src_and_type(src);\n        return type_caster_generic::cast(\n            st.first, policy, parent, st.second,\n            make_copy_constructor(src), make_move_constructor(src));\n    }\n\n    static handle cast_holder(const itype *src, const void *holder) {\n        auto st = src_and_type(src);\n        return type_caster_generic::cast(\n            st.first, return_value_policy::take_ownership, {}, st.second,\n            nullptr, nullptr, holder);\n    }\n\n    template <typename T> using cast_op_type = detail::cast_op_type<T>;\n\n    operator itype*() { return (type *) value; }\n    operator itype&() { if (!value) throw reference_cast_error(); return *((itype *) value); }\n\nprotected:\n    using Constructor = void *(*)(const void *);\n\n    /* Only enabled when the types are {copy,move}-constructible *and* when the type\n       does not have a private operator new implementation. */\n    template <typename T, typename = enable_if_t<is_copy_constructible<T>::value>>\n    static auto make_copy_constructor(const T *x) -> decltype(new T(*x), Constructor{}) {\n        return [](const void *arg) -> void * {\n            return new T(*reinterpret_cast<const T *>(arg));\n        };\n    }\n\n    template <typename T, typename = enable_if_t<std::is_move_constructible<T>::value>>\n    static auto make_move_constructor(const T *x) -> decltype(new T(std::move(*const_cast<T *>(x))), Constructor{}) {\n        return [](const void *arg) -> void * {\n            return new T(std::move(*const_cast<T *>(reinterpret_cast<const T *>(arg))));\n        };\n    }\n\n    static Constructor make_copy_constructor(...) { return nullptr; }\n    static Constructor make_move_constructor(...) { return nullptr; }\n};\n\ntemplate <typename type, typename SFINAE = void> class type_caster : public type_caster_base<type> { };\ntemplate <typename type> using make_caster = type_caster<intrinsic_t<type>>;\n\n// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T\ntemplate <typename T> typename make_caster<T>::template cast_op_type<T> cast_op(make_caster<T> &caster) {\n    return caster.operator typename make_caster<T>::template cast_op_type<T>();\n}\ntemplate <typename T> typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>\ncast_op(make_caster<T> &&caster) {\n    return std::move(caster).operator\n        typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>();\n}\n\ntemplate <typename type> class type_caster<std::reference_wrapper<type>> {\nprivate:\n    using caster_t = make_caster<type>;\n    caster_t subcaster;\n    using subcaster_cast_op_type = typename caster_t::template cast_op_type<type>;\n    static_assert(std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value,\n            \"std::reference_wrapper<T> caster requires T to have a caster with an `T &` operator\");\npublic:\n    bool load(handle src, bool convert) { return subcaster.load(src, convert); }\n    static constexpr auto name = caster_t::name;\n    static handle cast(const std::reference_wrapper<type> &src, return_value_policy policy, handle parent) {\n        // It is definitely wrong to take ownership of this pointer, so mask that rvp\n        if (policy == return_value_policy::take_ownership || policy == return_value_policy::automatic)\n            policy = return_value_policy::automatic_reference;\n        return caster_t::cast(&src.get(), policy, parent);\n    }\n    template <typename T> using cast_op_type = std::reference_wrapper<type>;\n    operator std::reference_wrapper<type>() { return subcaster.operator subcaster_cast_op_type&(); }\n};\n\n#define PYBIND11_TYPE_CASTER(type, py_name) \\\n    protected: \\\n        type value; \\\n    public: \\\n        static constexpr auto name = py_name; \\\n        template <typename T_, enable_if_t<std::is_same<type, remove_cv_t<T_>>::value, int> = 0> \\\n        static handle cast(T_ *src, return_value_policy policy, handle parent) { \\\n            if (!src) return none().release(); \\\n            if (policy == return_value_policy::take_ownership) { \\\n                auto h = cast(std::move(*src), policy, parent); delete src; return h; \\\n            } else { \\\n                return cast(*src, policy, parent); \\\n            } \\\n        } \\\n        operator type*() { return &value; } \\\n        operator type&() { return value; } \\\n        operator type&&() && { return std::move(value); } \\\n        template <typename T_> using cast_op_type = pybind11::detail::movable_cast_op_type<T_>\n\n\ntemplate <typename CharT> using is_std_char_type = any_of<\n    std::is_same<CharT, char>, /* std::string */\n    std::is_same<CharT, char16_t>, /* std::u16string */\n    std::is_same<CharT, char32_t>, /* std::u32string */\n    std::is_same<CharT, wchar_t> /* std::wstring */\n>;\n\ntemplate <typename T>\nstruct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {\n    using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;\n    using _py_type_1 = conditional_t<std::is_signed<T>::value, _py_type_0, typename std::make_unsigned<_py_type_0>::type>;\n    using py_type = conditional_t<std::is_floating_point<T>::value, double, _py_type_1>;\npublic:\n\n    bool load(handle src, bool convert) {\n        py_type py_value;\n\n        if (!src)\n            return false;\n\n        if (std::is_floating_point<T>::value) {\n            if (convert || PyFloat_Check(src.ptr()))\n                py_value = (py_type) PyFloat_AsDouble(src.ptr());\n            else\n                return false;\n        } else if (PyFloat_Check(src.ptr())) {\n            return false;\n        } else if (std::is_unsigned<py_type>::value) {\n            py_value = as_unsigned<py_type>(src.ptr());\n        } else { // signed integer:\n            py_value = sizeof(T) <= sizeof(long)\n                ? (py_type) PyLong_AsLong(src.ptr())\n                : (py_type) PYBIND11_LONG_AS_LONGLONG(src.ptr());\n        }\n\n        bool py_err = py_value == (py_type) -1 && PyErr_Occurred();\n        if (py_err || (std::is_integral<T>::value && sizeof(py_type) != sizeof(T) &&\n                       (py_value < (py_type) std::numeric_limits<T>::min() ||\n                        py_value > (py_type) std::numeric_limits<T>::max()))) {\n            bool type_error = py_err && PyErr_ExceptionMatches(\n#if PY_VERSION_HEX < 0x03000000 && !defined(PYPY_VERSION)\n                PyExc_SystemError\n#else\n                PyExc_TypeError\n#endif\n            );\n            PyErr_Clear();\n            if (type_error && convert && PyNumber_Check(src.ptr())) {\n                auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value\n                                                     ? PyNumber_Float(src.ptr())\n                                                     : PyNumber_Long(src.ptr()));\n                PyErr_Clear();\n                return load(tmp, false);\n            }\n            return false;\n        }\n\n        value = (T) py_value;\n        return true;\n    }\n\n    template<typename U = T>\n    static typename std::enable_if<std::is_floating_point<U>::value, handle>::type\n    cast(U src, return_value_policy /* policy */, handle /* parent */) {\n        return PyFloat_FromDouble((double) src);\n    }\n\n    template<typename U = T>\n    static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value && (sizeof(U) <= sizeof(long)), handle>::type\n    cast(U src, return_value_policy /* policy */, handle /* parent */) {\n        return PYBIND11_LONG_FROM_SIGNED((long) src);\n    }\n\n    template<typename U = T>\n    static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value && (sizeof(U) <= sizeof(unsigned long)), handle>::type\n    cast(U src, return_value_policy /* policy */, handle /* parent */) {\n        return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src);\n    }\n\n    template<typename U = T>\n    static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value && (sizeof(U) > sizeof(long)), handle>::type\n    cast(U src, return_value_policy /* policy */, handle /* parent */) {\n        return PyLong_FromLongLong((long long) src);\n    }\n\n    template<typename U = T>\n    static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value && (sizeof(U) > sizeof(unsigned long)), handle>::type\n    cast(U src, return_value_policy /* policy */, handle /* parent */) {\n        return PyLong_FromUnsignedLongLong((unsigned long long) src);\n    }\n\n    PYBIND11_TYPE_CASTER(T, _<std::is_integral<T>::value>(\"int\", \"float\"));\n};\n\ntemplate<typename T> struct void_caster {\npublic:\n    bool load(handle src, bool) {\n        if (src && src.is_none())\n            return true;\n        return false;\n    }\n    static handle cast(T, return_value_policy /* policy */, handle /* parent */) {\n        return none().inc_ref();\n    }\n    PYBIND11_TYPE_CASTER(T, _(\"None\"));\n};\n\ntemplate <> class type_caster<void_type> : public void_caster<void_type> {};\n\ntemplate <> class type_caster<void> : public type_caster<void_type> {\npublic:\n    using type_caster<void_type>::cast;\n\n    bool load(handle h, bool) {\n        if (!h) {\n            return false;\n        } else if (h.is_none()) {\n            value = nullptr;\n            return true;\n        }\n\n        /* Check if this is a capsule */\n        if (isinstance<capsule>(h)) {\n            value = reinterpret_borrow<capsule>(h);\n            return true;\n        }\n\n        /* Check if this is a C++ type */\n        auto &bases = all_type_info((PyTypeObject *) h.get_type().ptr());\n        if (bases.size() == 1) { // Only allowing loading from a single-value type\n            value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();\n            return true;\n        }\n\n        /* Fail */\n        return false;\n    }\n\n    static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) {\n        if (ptr)\n            return capsule(ptr).release();\n        else\n            return none().inc_ref();\n    }\n\n    template <typename T> using cast_op_type = void*&;\n    operator void *&() { return value; }\n    static constexpr auto name = _(\"capsule\");\nprivate:\n    void *value = nullptr;\n};\n\ntemplate <> class type_caster<std::nullptr_t> : public void_caster<std::nullptr_t> { };\n\ntemplate <> class type_caster<bool> {\npublic:\n    bool load(handle src, bool convert) {\n        if (!src) return false;\n        else if (src.ptr() == Py_True) { value = true; return true; }\n        else if (src.ptr() == Py_False) { value = false; return true; }\n        else if (convert || !strcmp(\"numpy.bool_\", Py_TYPE(src.ptr())->tp_name)) {\n            // (allow non-implicit conversion for numpy booleans)\n\n            Py_ssize_t res = -1;\n            if (src.is_none()) {\n                res = 0;  // None is implicitly converted to False\n            }\n            #if defined(PYPY_VERSION)\n            // On PyPy, check that \"__bool__\" (or \"__nonzero__\" on Python 2.7) attr exists\n            else if (hasattr(src, PYBIND11_BOOL_ATTR)) {\n                res = PyObject_IsTrue(src.ptr());\n            }\n            #else\n            // Alternate approach for CPython: this does the same as the above, but optimized\n            // using the CPython API so as to avoid an unneeded attribute lookup.\n            else if (auto tp_as_number = src.ptr()->ob_type->tp_as_number) {\n                if (PYBIND11_NB_BOOL(tp_as_number)) {\n                    res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr());\n                }\n            }\n            #endif\n            if (res == 0 || res == 1) {\n                value = (bool) res;\n                return true;\n            }\n        }\n        return false;\n    }\n    static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) {\n        return handle(src ? Py_True : Py_False).inc_ref();\n    }\n    PYBIND11_TYPE_CASTER(bool, _(\"bool\"));\n};\n\n// Helper class for UTF-{8,16,32} C++ stl strings:\ntemplate <typename StringType, bool IsView = false> struct string_caster {\n    using CharT = typename StringType::value_type;\n\n    // Simplify life by being able to assume standard char sizes (the standard only guarantees\n    // minimums, but Python requires exact sizes)\n    static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1, \"Unsupported char size != 1\");\n    static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2, \"Unsupported char16_t size != 2\");\n    static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4, \"Unsupported char32_t size != 4\");\n    // wchar_t can be either 16 bits (Windows) or 32 (everywhere else)\n    static_assert(!std::is_same<CharT, wchar_t>::value || sizeof(CharT) == 2 || sizeof(CharT) == 4,\n            \"Unsupported wchar_t size != 2/4\");\n    static constexpr size_t UTF_N = 8 * sizeof(CharT);\n\n    bool load(handle src, bool) {\n#if PY_MAJOR_VERSION < 3\n        object temp;\n#endif\n        handle load_src = src;\n        if (!src) {\n            return false;\n        } else if (!PyUnicode_Check(load_src.ptr())) {\n#if PY_MAJOR_VERSION >= 3\n            return load_bytes(load_src);\n#else\n            if (sizeof(CharT) == 1) {\n                return load_bytes(load_src);\n            }\n\n            // The below is a guaranteed failure in Python 3 when PyUnicode_Check returns false\n            if (!PYBIND11_BYTES_CHECK(load_src.ptr()))\n                return false;\n\n            temp = reinterpret_steal<object>(PyUnicode_FromObject(load_src.ptr()));\n            if (!temp) { PyErr_Clear(); return false; }\n            load_src = temp;\n#endif\n        }\n\n        object utfNbytes = reinterpret_steal<object>(PyUnicode_AsEncodedString(\n            load_src.ptr(), UTF_N == 8 ? \"utf-8\" : UTF_N == 16 ? \"utf-16\" : \"utf-32\", nullptr));\n        if (!utfNbytes) { PyErr_Clear(); return false; }\n\n        const CharT *buffer = reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));\n        size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);\n        if (UTF_N > 8) { buffer++; length--; } // Skip BOM for UTF-16/32\n        value = StringType(buffer, length);\n\n        // If we're loading a string_view we need to keep the encoded Python object alive:\n        if (IsView)\n            loader_life_support::add_patient(utfNbytes);\n\n        return true;\n    }\n\n    static handle cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {\n        const char *buffer = reinterpret_cast<const char *>(src.data());\n        ssize_t nbytes = ssize_t(src.size() * sizeof(CharT));\n        handle s = decode_utfN(buffer, nbytes);\n        if (!s) throw error_already_set();\n        return s;\n    }\n\n    PYBIND11_TYPE_CASTER(StringType, _(PYBIND11_STRING_NAME));\n\nprivate:\n    static handle decode_utfN(const char *buffer, ssize_t nbytes) {\n#if !defined(PYPY_VERSION)\n        return\n            UTF_N == 8  ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) :\n            UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) :\n                          PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);\n#else\n        // PyPy seems to have multiple problems related to PyUnicode_UTF*: the UTF8 version\n        // sometimes segfaults for unknown reasons, while the UTF16 and 32 versions require a\n        // non-const char * arguments, which is also a nuisance, so bypass the whole thing by just\n        // passing the encoding as a string value, which works properly:\n        return PyUnicode_Decode(buffer, nbytes, UTF_N == 8 ? \"utf-8\" : UTF_N == 16 ? \"utf-16\" : \"utf-32\", nullptr);\n#endif\n    }\n\n    // When loading into a std::string or char*, accept a bytes object as-is (i.e.\n    // without any encoding/decoding attempt).  For other C++ char sizes this is a no-op.\n    // which supports loading a unicode from a str, doesn't take this path.\n    template <typename C = CharT>\n    bool load_bytes(enable_if_t<sizeof(C) == 1, handle> src) {\n        if (PYBIND11_BYTES_CHECK(src.ptr())) {\n            // We were passed a Python 3 raw bytes; accept it into a std::string or char*\n            // without any encoding attempt.\n            const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr());\n            if (bytes) {\n                value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr()));\n                return true;\n            }\n        }\n\n        return false;\n    }\n\n    template <typename C = CharT>\n    bool load_bytes(enable_if_t<sizeof(C) != 1, handle>) { return false; }\n};\n\ntemplate <typename CharT, class Traits, class Allocator>\nstruct type_caster<std::basic_string<CharT, Traits, Allocator>, enable_if_t<is_std_char_type<CharT>::value>>\n    : string_caster<std::basic_string<CharT, Traits, Allocator>> {};\n\n#ifdef PYBIND11_HAS_STRING_VIEW\ntemplate <typename CharT, class Traits>\nstruct type_caster<std::basic_string_view<CharT, Traits>, enable_if_t<is_std_char_type<CharT>::value>>\n    : string_caster<std::basic_string_view<CharT, Traits>, true> {};\n#endif\n\n// Type caster for C-style strings.  We basically use a std::string type caster, but also add the\n// ability to use None as a nullptr char* (which the string caster doesn't allow).\ntemplate <typename CharT> struct type_caster<CharT, enable_if_t<is_std_char_type<CharT>::value>> {\n    using StringType = std::basic_string<CharT>;\n    using StringCaster = type_caster<StringType>;\n    StringCaster str_caster;\n    bool none = false;\n    CharT one_char = 0;\npublic:\n    bool load(handle src, bool convert) {\n        if (!src) return false;\n        if (src.is_none()) {\n            // Defer accepting None to other overloads (if we aren't in convert mode):\n            if (!convert) return false;\n            none = true;\n            return true;\n        }\n        return str_caster.load(src, convert);\n    }\n\n    static handle cast(const CharT *src, return_value_policy policy, handle parent) {\n        if (src == nullptr) return pybind11::none().inc_ref();\n        return StringCaster::cast(StringType(src), policy, parent);\n    }\n\n    static handle cast(CharT src, return_value_policy policy, handle parent) {\n        if (std::is_same<char, CharT>::value) {\n            handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr);\n            if (!s) throw error_already_set();\n            return s;\n        }\n        return StringCaster::cast(StringType(1, src), policy, parent);\n    }\n\n    operator CharT*() { return none ? nullptr : const_cast<CharT *>(static_cast<StringType &>(str_caster).c_str()); }\n    operator CharT&() {\n        if (none)\n            throw value_error(\"Cannot convert None to a character\");\n\n        auto &value = static_cast<StringType &>(str_caster);\n        size_t str_len = value.size();\n        if (str_len == 0)\n            throw value_error(\"Cannot convert empty string to a character\");\n\n        // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that\n        // is too high, and one for multiple unicode characters (caught later), so we need to figure\n        // out how long the first encoded character is in bytes to distinguish between these two\n        // errors.  We also allow want to allow unicode characters U+0080 through U+00FF, as those\n        // can fit into a single char value.\n        if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {\n            unsigned char v0 = static_cast<unsigned char>(value[0]);\n            size_t char0_bytes = !(v0 & 0x80) ? 1 : // low bits only: 0-127\n                (v0 & 0xE0) == 0xC0 ? 2 : // 0b110xxxxx - start of 2-byte sequence\n                (v0 & 0xF0) == 0xE0 ? 3 : // 0b1110xxxx - start of 3-byte sequence\n                4; // 0b11110xxx - start of 4-byte sequence\n\n            if (char0_bytes == str_len) {\n                // If we have a 128-255 value, we can decode it into a single char:\n                if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx\n                    one_char = static_cast<CharT>(((v0 & 3) << 6) + (static_cast<unsigned char>(value[1]) & 0x3F));\n                    return one_char;\n                }\n                // Otherwise we have a single character, but it's > U+00FF\n                throw value_error(\"Character code point not in range(0x100)\");\n            }\n        }\n\n        // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a\n        // surrogate pair with total length 2 instantly indicates a range error (but not a \"your\n        // string was too long\" error).\n        else if (StringCaster::UTF_N == 16 && str_len == 2) {\n            one_char = static_cast<CharT>(value[0]);\n            if (one_char >= 0xD800 && one_char < 0xE000)\n                throw value_error(\"Character code point not in range(0x10000)\");\n        }\n\n        if (str_len != 1)\n            throw value_error(\"Expected a character, but multi-character string found\");\n\n        one_char = value[0];\n        return one_char;\n    }\n\n    static constexpr auto name = _(PYBIND11_STRING_NAME);\n    template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;\n};\n\n// Base implementation for std::tuple and std::pair\ntemplate <template<typename...> class Tuple, typename... Ts> class tuple_caster {\n    using type = Tuple<Ts...>;\n    static constexpr auto size = sizeof...(Ts);\n    using indices = make_index_sequence<size>;\npublic:\n\n    bool load(handle src, bool convert) {\n        if (!isinstance<sequence>(src))\n            return false;\n        const auto seq = reinterpret_borrow<sequence>(src);\n        if (seq.size() != size)\n            return false;\n        return load_impl(seq, convert, indices{});\n    }\n\n    template <typename T>\n    static handle cast(T &&src, return_value_policy policy, handle parent) {\n        return cast_impl(std::forward<T>(src), policy, parent, indices{});\n    }\n\n    static constexpr auto name = _(\"Tuple[\") + concat(make_caster<Ts>::name...) + _(\"]\");\n\n    template <typename T> using cast_op_type = type;\n\n    operator type() & { return implicit_cast(indices{}); }\n    operator type() && { return std::move(*this).implicit_cast(indices{}); }\n\nprotected:\n    template <size_t... Is>\n    type implicit_cast(index_sequence<Is...>) & { return type(cast_op<Ts>(std::get<Is>(subcasters))...); }\n    template <size_t... Is>\n    type implicit_cast(index_sequence<Is...>) && { return type(cast_op<Ts>(std::move(std::get<Is>(subcasters)))...); }\n\n    static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; }\n\n    template <size_t... Is>\n    bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {\n        for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...})\n            if (!r)\n                return false;\n        return true;\n    }\n\n    /* Implementation: Convert a C++ tuple into a Python tuple */\n    template <typename T, size_t... Is>\n    static handle cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence<Is...>) {\n        std::array<object, size> entries{{\n            reinterpret_steal<object>(make_caster<Ts>::cast(std::get<Is>(std::forward<T>(src)), policy, parent))...\n        }};\n        for (const auto &entry: entries)\n            if (!entry)\n                return handle();\n        tuple result(size);\n        int counter = 0;\n        for (auto & entry: entries)\n            PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr());\n        return result.release();\n    }\n\n    Tuple<make_caster<Ts>...> subcasters;\n};\n\ntemplate <typename T1, typename T2> class type_caster<std::pair<T1, T2>>\n    : public tuple_caster<std::pair, T1, T2> {};\n\ntemplate <typename... Ts> class type_caster<std::tuple<Ts...>>\n    : public tuple_caster<std::tuple, Ts...> {};\n\n/// Helper class which abstracts away certain actions. Users can provide specializations for\n/// custom holders, but it's only necessary if the type has a non-standard interface.\ntemplate <typename T>\nstruct holder_helper {\n    static auto get(const T &p) -> decltype(p.get()) { return p.get(); }\n};\n\n/// Type caster for holder types like std::shared_ptr, etc.\ntemplate <typename type, typename holder_type>\nstruct copyable_holder_caster : public type_caster_base<type> {\npublic:\n    using base = type_caster_base<type>;\n    static_assert(std::is_base_of<base, type_caster<type>>::value,\n            \"Holder classes are only supported for custom types\");\n    using base::base;\n    using base::cast;\n    using base::typeinfo;\n    using base::value;\n\n    bool load(handle src, bool convert) {\n        return base::template load_impl<copyable_holder_caster<type, holder_type>>(src, convert);\n    }\n\n    explicit operator type*() { return this->value; }\n    explicit operator type&() { return *(this->value); }\n    explicit operator holder_type*() { return std::addressof(holder); }\n\n    // Workaround for Intel compiler bug\n    // see pybind11 issue 94\n    #if defined(__ICC) || defined(__INTEL_COMPILER)\n    operator holder_type&() { return holder; }\n    #else\n    explicit operator holder_type&() { return holder; }\n    #endif\n\n    static handle cast(const holder_type &src, return_value_policy, handle) {\n        const auto *ptr = holder_helper<holder_type>::get(src);\n        return type_caster_base<type>::cast_holder(ptr, &src);\n    }\n\nprotected:\n    friend class type_caster_generic;\n    void check_holder_compat() {\n        if (typeinfo->default_holder)\n            throw cast_error(\"Unable to load a custom holder type from a default-holder instance\");\n    }\n\n    bool load_value(value_and_holder &&v_h) {\n        if (v_h.holder_constructed()) {\n            value = v_h.value_ptr();\n            holder = v_h.template holder<holder_type>();\n            return true;\n        } else {\n            throw cast_error(\"Unable to cast from non-held to held instance (T& to Holder<T>) \"\n#if defined(NDEBUG)\n                             \"(compile in debug mode for type information)\");\n#else\n                             \"of type '\" + type_id<holder_type>() + \"''\");\n#endif\n        }\n    }\n\n    template <typename T = holder_type, detail::enable_if_t<!std::is_constructible<T, const T &, type*>::value, int> = 0>\n    bool try_implicit_casts(handle, bool) { return false; }\n\n    template <typename T = holder_type, detail::enable_if_t<std::is_constructible<T, const T &, type*>::value, int> = 0>\n    bool try_implicit_casts(handle src, bool convert) {\n        for (auto &cast : typeinfo->implicit_casts) {\n            copyable_holder_caster sub_caster(*cast.first);\n            if (sub_caster.load(src, convert)) {\n                value = cast.second(sub_caster.value);\n                holder = holder_type(sub_caster.holder, (type *) value);\n                return true;\n            }\n        }\n        return false;\n    }\n\n    static bool try_direct_conversions(handle) { return false; }\n\n\n    holder_type holder;\n};\n\n/// Specialize for the common std::shared_ptr, so users don't need to\ntemplate <typename T>\nclass type_caster<std::shared_ptr<T>> : public copyable_holder_caster<T, std::shared_ptr<T>> { };\n\ntemplate <typename type, typename holder_type>\nstruct move_only_holder_caster {\n    static_assert(std::is_base_of<type_caster_base<type>, type_caster<type>>::value,\n            \"Holder classes are only supported for custom types\");\n\n    static handle cast(holder_type &&src, return_value_policy, handle) {\n        auto *ptr = holder_helper<holder_type>::get(src);\n        return type_caster_base<type>::cast_holder(ptr, std::addressof(src));\n    }\n    static constexpr auto name = type_caster_base<type>::name;\n};\n\ntemplate <typename type, typename deleter>\nclass type_caster<std::unique_ptr<type, deleter>>\n    : public move_only_holder_caster<type, std::unique_ptr<type, deleter>> { };\n\ntemplate <typename type, typename holder_type>\nusing type_caster_holder = conditional_t<is_copy_constructible<holder_type>::value,\n                                         copyable_holder_caster<type, holder_type>,\n                                         move_only_holder_caster<type, holder_type>>;\n\ntemplate <typename T, bool Value = false> struct always_construct_holder { static constexpr bool value = Value; };\n\n/// Create a specialization for custom holder types (silently ignores std::shared_ptr)\n#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \\\n    namespace pybind11 { namespace detail { \\\n    template <typename type> \\\n    struct always_construct_holder<holder_type> : always_construct_holder<void, ##__VA_ARGS__>  { }; \\\n    template <typename type> \\\n    class type_caster<holder_type, enable_if_t<!is_shared_ptr<holder_type>::value>> \\\n        : public type_caster_holder<type, holder_type> { }; \\\n    }}\n\n// PYBIND11_DECLARE_HOLDER_TYPE holder types:\ntemplate <typename base, typename holder> struct is_holder_type :\n    std::is_base_of<detail::type_caster_holder<base, holder>, detail::type_caster<holder>> {};\n// Specialization for always-supported unique_ptr holders:\ntemplate <typename base, typename deleter> struct is_holder_type<base, std::unique_ptr<base, deleter>> :\n    std::true_type {};\n\ntemplate <typename T> struct handle_type_name { static constexpr auto name = _<T>(); };\ntemplate <> struct handle_type_name<bytes> { static constexpr auto name = _(PYBIND11_BYTES_NAME); };\ntemplate <> struct handle_type_name<args> { static constexpr auto name = _(\"*args\"); };\ntemplate <> struct handle_type_name<kwargs> { static constexpr auto name = _(\"**kwargs\"); };\n\ntemplate <typename type>\nstruct pyobject_caster {\n    template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>\n    bool load(handle src, bool /* convert */) { value = src; return static_cast<bool>(value); }\n\n    template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>\n    bool load(handle src, bool /* convert */) {\n        if (!isinstance<type>(src))\n            return false;\n        value = reinterpret_borrow<type>(src);\n        return true;\n    }\n\n    static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {\n        return src.inc_ref();\n    }\n    PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);\n};\n\ntemplate <typename T>\nclass type_caster<T, enable_if_t<is_pyobject<T>::value>> : public pyobject_caster<T> { };\n\n// Our conditions for enabling moving are quite restrictive:\n// At compile time:\n// - T needs to be a non-const, non-pointer, non-reference type\n// - type_caster<T>::operator T&() must exist\n// - the type must be move constructible (obviously)\n// At run-time:\n// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it\n//   must have ref_count() == 1)h\n// If any of the above are not satisfied, we fall back to copying.\ntemplate <typename T> using move_is_plain_type = satisfies_none_of<T,\n    std::is_void, std::is_pointer, std::is_reference, std::is_const\n>;\ntemplate <typename T, typename SFINAE = void> struct move_always : std::false_type {};\ntemplate <typename T> struct move_always<T, enable_if_t<all_of<\n    move_is_plain_type<T>,\n    negation<is_copy_constructible<T>>,\n    std::is_move_constructible<T>,\n    std::is_same<decltype(std::declval<make_caster<T>>().operator T&()), T&>\n>::value>> : std::true_type {};\ntemplate <typename T, typename SFINAE = void> struct move_if_unreferenced : std::false_type {};\ntemplate <typename T> struct move_if_unreferenced<T, enable_if_t<all_of<\n    move_is_plain_type<T>,\n    negation<move_always<T>>,\n    std::is_move_constructible<T>,\n    std::is_same<decltype(std::declval<make_caster<T>>().operator T&()), T&>\n>::value>> : std::true_type {};\ntemplate <typename T> using move_never = none_of<move_always<T>, move_if_unreferenced<T>>;\n\n// Detect whether returning a `type` from a cast on type's type_caster is going to result in a\n// reference or pointer to a local variable of the type_caster.  Basically, only\n// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe;\n// everything else returns a reference/pointer to a local variable.\ntemplate <typename type> using cast_is_temporary_value_reference = bool_constant<\n    (std::is_reference<type>::value || std::is_pointer<type>::value) &&\n    !std::is_base_of<type_caster_generic, make_caster<type>>::value &&\n    !std::is_same<intrinsic_t<type>, void>::value\n>;\n\n// When a value returned from a C++ function is being cast back to Python, we almost always want to\n// force `policy = move`, regardless of the return value policy the function/method was declared\n// with.\ntemplate <typename Return, typename SFINAE = void> struct return_value_policy_override {\n    static return_value_policy policy(return_value_policy p) { return p; }\n};\n\ntemplate <typename Return> struct return_value_policy_override<Return,\n        detail::enable_if_t<std::is_base_of<type_caster_generic, make_caster<Return>>::value, void>> {\n    static return_value_policy policy(return_value_policy p) {\n        return !std::is_lvalue_reference<Return>::value &&\n               !std::is_pointer<Return>::value\n                   ? return_value_policy::move : p;\n    }\n};\n\n// Basic python -> C++ casting; throws if casting fails\ntemplate <typename T, typename SFINAE> type_caster<T, SFINAE> &load_type(type_caster<T, SFINAE> &conv, const handle &handle) {\n    if (!conv.load(handle, true)) {\n#if defined(NDEBUG)\n        throw cast_error(\"Unable to cast Python instance to C++ type (compile in debug mode for details)\");\n#else\n        throw cast_error(\"Unable to cast Python instance of type \" +\n            (std::string) str(handle.get_type()) + \" to C++ type '\" + type_id<T>() + \"'\");\n#endif\n    }\n    return conv;\n}\n// Wrapper around the above that also constructs and returns a type_caster\ntemplate <typename T> make_caster<T> load_type(const handle &handle) {\n    make_caster<T> conv;\n    load_type(conv, handle);\n    return conv;\n}\n\nNAMESPACE_END(detail)\n\n// pytype -> C++ type\ntemplate <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>\nT cast(const handle &handle) {\n    using namespace detail;\n    static_assert(!cast_is_temporary_value_reference<T>::value,\n            \"Unable to cast type to reference: value is local to type caster\");\n    return cast_op<T>(load_type<T>(handle));\n}\n\n// pytype -> pytype (calls converting constructor)\ntemplate <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>\nT cast(const handle &handle) { return T(reinterpret_borrow<object>(handle)); }\n\n// C++ type -> py::object\ntemplate <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>\nobject cast(const T &value, return_value_policy policy = return_value_policy::automatic_reference,\n            handle parent = handle()) {\n    if (policy == return_value_policy::automatic)\n        policy = std::is_pointer<T>::value ? return_value_policy::take_ownership : return_value_policy::copy;\n    else if (policy == return_value_policy::automatic_reference)\n        policy = std::is_pointer<T>::value ? return_value_policy::reference : return_value_policy::copy;\n    return reinterpret_steal<object>(detail::make_caster<T>::cast(value, policy, parent));\n}\n\ntemplate <typename T> T handle::cast() const { return pybind11::cast<T>(*this); }\ntemplate <> inline void handle::cast() const { return; }\n\ntemplate <typename T>\ndetail::enable_if_t<!detail::move_never<T>::value, T> move(object &&obj) {\n    if (obj.ref_count() > 1)\n#if defined(NDEBUG)\n        throw cast_error(\"Unable to cast Python instance to C++ rvalue: instance has multiple references\"\n            \" (compile in debug mode for details)\");\n#else\n        throw cast_error(\"Unable to move from Python \" + (std::string) str(obj.get_type()) +\n                \" instance to C++ \" + type_id<T>() + \" instance: instance has multiple references\");\n#endif\n\n    // Move into a temporary and return that, because the reference may be a local value of `conv`\n    T ret = std::move(detail::load_type<T>(obj).operator T&());\n    return ret;\n}\n\n// Calling cast() on an rvalue calls pybind::cast with the object rvalue, which does:\n// - If we have to move (because T has no copy constructor), do it.  This will fail if the moved\n//   object has multiple references, but trying to copy will fail to compile.\n// - If both movable and copyable, check ref count: if 1, move; otherwise copy\n// - Otherwise (not movable), copy.\ntemplate <typename T> detail::enable_if_t<detail::move_always<T>::value, T> cast(object &&object) {\n    return move<T>(std::move(object));\n}\ntemplate <typename T> detail::enable_if_t<detail::move_if_unreferenced<T>::value, T> cast(object &&object) {\n    if (object.ref_count() > 1)\n        return cast<T>(object);\n    else\n        return move<T>(std::move(object));\n}\ntemplate <typename T> detail::enable_if_t<detail::move_never<T>::value, T> cast(object &&object) {\n    return cast<T>(object);\n}\n\ntemplate <typename T> T object::cast() const & { return pybind11::cast<T>(*this); }\ntemplate <typename T> T object::cast() && { return pybind11::cast<T>(std::move(*this)); }\ntemplate <> inline void object::cast() const & { return; }\ntemplate <> inline void object::cast() && { return; }\n\nNAMESPACE_BEGIN(detail)\n\n// Declared in pytypes.h:\ntemplate <typename T, enable_if_t<!is_pyobject<T>::value, int>>\nobject object_or_cast(T &&o) { return pybind11::cast(std::forward<T>(o)); }\n\nstruct overload_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the OVERLOAD_INT macro\ntemplate <typename ret_type> using overload_caster_t = conditional_t<\n    cast_is_temporary_value_reference<ret_type>::value, make_caster<ret_type>, overload_unused>;\n\n// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then\n// store the result in the given variable.  For other types, this is a no-op.\ntemplate <typename T> enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o, make_caster<T> &caster) {\n    return cast_op<T>(load_type(caster, o));\n}\ntemplate <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&, overload_unused &) {\n    pybind11_fail(\"Internal error: cast_ref fallback invoked\"); }\n\n// Trampoline use: Having a pybind11::cast with an invalid reference type is going to static_assert, even\n// though if it's in dead code, so we provide a \"trampoline\" to pybind11::cast that only does anything in\n// cases where pybind11::cast is valid.\ntemplate <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&o) {\n    return pybind11::cast<T>(std::move(o)); }\ntemplate <typename T> enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&) {\n    pybind11_fail(\"Internal error: cast_safe fallback invoked\"); }\ntemplate <> inline void cast_safe<void>(object &&) {}\n\nNAMESPACE_END(detail)\n\ntemplate <return_value_policy policy = return_value_policy::automatic_reference>\ntuple make_tuple() { return tuple(0); }\n\ntemplate <return_value_policy policy = return_value_policy::automatic_reference,\n          typename... Args> tuple make_tuple(Args&&... args_) {\n    constexpr size_t size = sizeof...(Args);\n    std::array<object, size> args {\n        { reinterpret_steal<object>(detail::make_caster<Args>::cast(\n            std::forward<Args>(args_), policy, nullptr))... }\n    };\n    for (size_t i = 0; i < args.size(); i++) {\n        if (!args[i]) {\n#if defined(NDEBUG)\n            throw cast_error(\"make_tuple(): unable to convert arguments to Python object (compile in debug mode for details)\");\n#else\n            std::array<std::string, size> argtypes { {type_id<Args>()...} };\n            throw cast_error(\"make_tuple(): unable to convert argument of type '\" +\n                argtypes[i] + \"' to Python object\");\n#endif\n        }\n    }\n    tuple result(size);\n    int counter = 0;\n    for (auto &arg_value : args)\n        PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr());\n    return result;\n}\n\n/// \\ingroup annotations\n/// Annotation for arguments\nstruct arg {\n    /// Constructs an argument with the name of the argument; if null or omitted, this is a positional argument.\n    constexpr explicit arg(const char *name = nullptr) : name(name), flag_noconvert(false), flag_none(true) { }\n    /// Assign a value to this argument\n    template <typename T> arg_v operator=(T &&value) const;\n    /// Indicate that the type should not be converted in the type caster\n    arg &noconvert(bool flag = true) { flag_noconvert = flag; return *this; }\n    /// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args)\n    arg &none(bool flag = true) { flag_none = flag; return *this; }\n\n    const char *name; ///< If non-null, this is a named kwargs argument\n    bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type caster!)\n    bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument\n};\n\n/// \\ingroup annotations\n/// Annotation for arguments with values\nstruct arg_v : arg {\nprivate:\n    template <typename T>\n    arg_v(arg &&base, T &&x, const char *descr = nullptr)\n        : arg(base),\n          value(reinterpret_steal<object>(\n              detail::make_caster<T>::cast(x, return_value_policy::automatic, {})\n          )),\n          descr(descr)\n#if !defined(NDEBUG)\n        , type(type_id<T>())\n#endif\n    { }\n\npublic:\n    /// Direct construction with name, default, and description\n    template <typename T>\n    arg_v(const char *name, T &&x, const char *descr = nullptr)\n        : arg_v(arg(name), std::forward<T>(x), descr) { }\n\n    /// Called internally when invoking `py::arg(\"a\") = value`\n    template <typename T>\n    arg_v(const arg &base, T &&x, const char *descr = nullptr)\n        : arg_v(arg(base), std::forward<T>(x), descr) { }\n\n    /// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg&\n    arg_v &noconvert(bool flag = true) { arg::noconvert(flag); return *this; }\n\n    /// Same as `arg::nonone()`, but returns *this as arg_v&, not arg&\n    arg_v &none(bool flag = true) { arg::none(flag); return *this; }\n\n    /// The default value\n    object value;\n    /// The (optional) description of the default value\n    const char *descr;\n#if !defined(NDEBUG)\n    /// The C++ type name of the default value (only available when compiled in debug mode)\n    std::string type;\n#endif\n};\n\ntemplate <typename T>\narg_v arg::operator=(T &&value) const { return {std::move(*this), std::forward<T>(value)}; }\n\n/// Alias for backward compatibility -- to be removed in version 2.0\ntemplate <typename /*unused*/> using arg_t = arg_v;\n\ninline namespace literals {\n/** \\rst\n    String literal version of `arg`\n \\endrst */\nconstexpr arg operator\"\" _a(const char *name, size_t) { return arg(name); }\n}\n\nNAMESPACE_BEGIN(detail)\n\n// forward declaration (definition in attr.h)\nstruct function_record;\n\n/// Internal data associated with a single function call\nstruct function_call {\n    function_call(const function_record &f, handle p); // Implementation in attr.h\n\n    /// The function data:\n    const function_record &func;\n\n    /// Arguments passed to the function:\n    std::vector<handle> args;\n\n    /// The `convert` value the arguments should be loaded with\n    std::vector<bool> args_convert;\n\n    /// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if\n    /// present, are also in `args` but without a reference).\n    object args_ref, kwargs_ref;\n\n    /// The parent, if any\n    handle parent;\n\n    /// If this is a call to an initializer, this argument contains `self`\n    handle init_self;\n};\n\n\n/// Helper class which loads arguments for C++ functions called from Python\ntemplate <typename... Args>\nclass argument_loader {\n    using indices = make_index_sequence<sizeof...(Args)>;\n\n    template <typename Arg> using argument_is_args   = std::is_same<intrinsic_t<Arg>, args>;\n    template <typename Arg> using argument_is_kwargs = std::is_same<intrinsic_t<Arg>, kwargs>;\n    // Get args/kwargs argument positions relative to the end of the argument list:\n    static constexpr auto args_pos = constexpr_first<argument_is_args, Args...>() - (int) sizeof...(Args),\n                        kwargs_pos = constexpr_first<argument_is_kwargs, Args...>() - (int) sizeof...(Args);\n\n    static constexpr bool args_kwargs_are_last = kwargs_pos >= - 1 && args_pos >= kwargs_pos - 1;\n\n    static_assert(args_kwargs_are_last, \"py::args/py::kwargs are only permitted as the last argument(s) of a function\");\n\npublic:\n    static constexpr bool has_kwargs = kwargs_pos < 0;\n    static constexpr bool has_args = args_pos < 0;\n\n    static constexpr auto arg_names = concat(type_descr(make_caster<Args>::name)...);\n\n    bool load_args(function_call &call) {\n        return load_impl_sequence(call, indices{});\n    }\n\n    template <typename Return, typename Guard, typename Func>\n    enable_if_t<!std::is_void<Return>::value, Return> call(Func &&f) && {\n        return std::move(*this).template call_impl<Return>(std::forward<Func>(f), indices{}, Guard{});\n    }\n\n    template <typename Return, typename Guard, typename Func>\n    enable_if_t<std::is_void<Return>::value, void_type> call(Func &&f) && {\n        std::move(*this).template call_impl<Return>(std::forward<Func>(f), indices{}, Guard{});\n        return void_type();\n    }\n\nprivate:\n\n    static bool load_impl_sequence(function_call &, index_sequence<>) { return true; }\n\n    template <size_t... Is>\n    bool load_impl_sequence(function_call &call, index_sequence<Is...>) {\n        for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...})\n            if (!r)\n                return false;\n        return true;\n    }\n\n    template <typename Return, typename Func, size_t... Is, typename Guard>\n    Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) {\n        return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);\n    }\n\n    std::tuple<make_caster<Args>...> argcasters;\n};\n\n/// Helper class which collects only positional arguments for a Python function call.\n/// A fancier version below can collect any argument, but this one is optimal for simple calls.\ntemplate <return_value_policy policy>\nclass simple_collector {\npublic:\n    template <typename... Ts>\n    explicit simple_collector(Ts &&...values)\n        : m_args(pybind11::make_tuple<policy>(std::forward<Ts>(values)...)) { }\n\n    const tuple &args() const & { return m_args; }\n    dict kwargs() const { return {}; }\n\n    tuple args() && { return std::move(m_args); }\n\n    /// Call a Python function and pass the collected arguments\n    object call(PyObject *ptr) const {\n        PyObject *result = PyObject_CallObject(ptr, m_args.ptr());\n        if (!result)\n            throw error_already_set();\n        return reinterpret_steal<object>(result);\n    }\n\nprivate:\n    tuple m_args;\n};\n\n/// Helper class which collects positional, keyword, * and ** arguments for a Python function call\ntemplate <return_value_policy policy>\nclass unpacking_collector {\npublic:\n    template <typename... Ts>\n    explicit unpacking_collector(Ts &&...values) {\n        // Tuples aren't (easily) resizable so a list is needed for collection,\n        // but the actual function call strictly requires a tuple.\n        auto args_list = list();\n        int _[] = { 0, (process(args_list, std::forward<Ts>(values)), 0)... };\n        ignore_unused(_);\n\n        m_args = std::move(args_list);\n    }\n\n    const tuple &args() const & { return m_args; }\n    const dict &kwargs() const & { return m_kwargs; }\n\n    tuple args() && { return std::move(m_args); }\n    dict kwargs() && { return std::move(m_kwargs); }\n\n    /// Call a Python function and pass the collected arguments\n    object call(PyObject *ptr) const {\n        PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr());\n        if (!result)\n            throw error_already_set();\n        return reinterpret_steal<object>(result);\n    }\n\nprivate:\n    template <typename T>\n    void process(list &args_list, T &&x) {\n        auto o = reinterpret_steal<object>(detail::make_caster<T>::cast(std::forward<T>(x), policy, {}));\n        if (!o) {\n#if defined(NDEBUG)\n            argument_cast_error();\n#else\n            argument_cast_error(std::to_string(args_list.size()), type_id<T>());\n#endif\n        }\n        args_list.append(o);\n    }\n\n    void process(list &args_list, detail::args_proxy ap) {\n        for (const auto &a : ap)\n            args_list.append(a);\n    }\n\n    void process(list &/*args_list*/, arg_v a) {\n        if (!a.name)\n#if defined(NDEBUG)\n            nameless_argument_error();\n#else\n            nameless_argument_error(a.type);\n#endif\n\n        if (m_kwargs.contains(a.name)) {\n#if defined(NDEBUG)\n            multiple_values_error();\n#else\n            multiple_values_error(a.name);\n#endif\n        }\n        if (!a.value) {\n#if defined(NDEBUG)\n            argument_cast_error();\n#else\n            argument_cast_error(a.name, a.type);\n#endif\n        }\n        m_kwargs[a.name] = a.value;\n    }\n\n    void process(list &/*args_list*/, detail::kwargs_proxy kp) {\n        if (!kp)\n            return;\n        for (const auto &k : reinterpret_borrow<dict>(kp)) {\n            if (m_kwargs.contains(k.first)) {\n#if defined(NDEBUG)\n                multiple_values_error();\n#else\n                multiple_values_error(str(k.first));\n#endif\n            }\n            m_kwargs[k.first] = k.second;\n        }\n    }\n\n    [[noreturn]] static void nameless_argument_error() {\n        throw type_error(\"Got kwargs without a name; only named arguments \"\n                         \"may be passed via py::arg() to a python function call. \"\n                         \"(compile in debug mode for details)\");\n    }\n    [[noreturn]] static void nameless_argument_error(std::string type) {\n        throw type_error(\"Got kwargs without a name of type '\" + type + \"'; only named \"\n                         \"arguments may be passed via py::arg() to a python function call. \");\n    }\n    [[noreturn]] static void multiple_values_error() {\n        throw type_error(\"Got multiple values for keyword argument \"\n                         \"(compile in debug mode for details)\");\n    }\n\n    [[noreturn]] static void multiple_values_error(std::string name) {\n        throw type_error(\"Got multiple values for keyword argument '\" + name + \"'\");\n    }\n\n    [[noreturn]] static void argument_cast_error() {\n        throw cast_error(\"Unable to convert call argument to Python object \"\n                         \"(compile in debug mode for details)\");\n    }\n\n    [[noreturn]] static void argument_cast_error(std::string name, std::string type) {\n        throw cast_error(\"Unable to convert call argument '\" + name\n                         + \"' of type '\" + type + \"' to Python object\");\n    }\n\nprivate:\n    tuple m_args;\n    dict m_kwargs;\n};\n\n/// Collect only positional arguments for a Python function call\ntemplate <return_value_policy policy, typename... Args,\n          typename = enable_if_t<all_of<is_positional<Args>...>::value>>\nsimple_collector<policy> collect_arguments(Args &&...args) {\n    return simple_collector<policy>(std::forward<Args>(args)...);\n}\n\n/// Collect all arguments, including keywords and unpacking (only instantiated when needed)\ntemplate <return_value_policy policy, typename... Args,\n          typename = enable_if_t<!all_of<is_positional<Args>...>::value>>\nunpacking_collector<policy> collect_arguments(Args &&...args) {\n    // Following argument order rules for generalized unpacking according to PEP 448\n    static_assert(\n        constexpr_last<is_positional, Args...>() < constexpr_first<is_keyword_or_ds, Args...>()\n        && constexpr_last<is_s_unpacking, Args...>() < constexpr_first<is_ds_unpacking, Args...>(),\n        \"Invalid function call: positional args must precede keywords and ** unpacking; \"\n        \"* unpacking must precede ** unpacking\"\n    );\n    return unpacking_collector<policy>(std::forward<Args>(args)...);\n}\n\ntemplate <typename Derived>\ntemplate <return_value_policy policy, typename... Args>\nobject object_api<Derived>::operator()(Args &&...args) const {\n    return detail::collect_arguments<policy>(std::forward<Args>(args)...).call(derived().ptr());\n}\n\ntemplate <typename Derived>\ntemplate <return_value_policy policy, typename... Args>\nobject object_api<Derived>::call(Args &&...args) const {\n    return operator()<policy>(std::forward<Args>(args)...);\n}\n\nNAMESPACE_END(detail)\n\n#define PYBIND11_MAKE_OPAQUE(...) \\\n    namespace pybind11 { namespace detail { \\\n        template<> class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> { }; \\\n    }}\n\n/// Lets you pass a type containing a `,` through a macro parameter without needing a separate\n/// typedef, e.g.: `PYBIND11_OVERLOAD(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`\n#define PYBIND11_TYPE(...) __VA_ARGS__\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/chrono.h",
    "content": "/*\n    pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime\n\n    Copyright (c) 2016 Trent Houliston <trent@houliston.me> and\n                       Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include <cmath>\n#include <ctime>\n#include <chrono>\n#include <datetime.h>\n\n// Backport the PyDateTime_DELTA functions from Python3.3 if required\n#ifndef PyDateTime_DELTA_GET_DAYS\n#define PyDateTime_DELTA_GET_DAYS(o)         (((PyDateTime_Delta*)o)->days)\n#endif\n#ifndef PyDateTime_DELTA_GET_SECONDS\n#define PyDateTime_DELTA_GET_SECONDS(o)      (((PyDateTime_Delta*)o)->seconds)\n#endif\n#ifndef PyDateTime_DELTA_GET_MICROSECONDS\n#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds)\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\ntemplate <typename type> class duration_caster {\npublic:\n    typedef typename type::rep rep;\n    typedef typename type::period period;\n\n    typedef std::chrono::duration<uint_fast32_t, std::ratio<86400>> days;\n\n    bool load(handle src, bool) {\n        using namespace std::chrono;\n\n        // Lazy initialise the PyDateTime import\n        if (!PyDateTimeAPI) { PyDateTime_IMPORT; }\n\n        if (!src) return false;\n        // If invoked with datetime.delta object\n        if (PyDelta_Check(src.ptr())) {\n            value = type(duration_cast<duration<rep, period>>(\n                  days(PyDateTime_DELTA_GET_DAYS(src.ptr()))\n                + seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr()))\n                + microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr()))));\n            return true;\n        }\n        // If invoked with a float we assume it is seconds and convert\n        else if (PyFloat_Check(src.ptr())) {\n            value = type(duration_cast<duration<rep, period>>(duration<double>(PyFloat_AsDouble(src.ptr()))));\n            return true;\n        }\n        else return false;\n    }\n\n    // If this is a duration just return it back\n    static const std::chrono::duration<rep, period>& get_duration(const std::chrono::duration<rep, period> &src) {\n        return src;\n    }\n\n    // If this is a time_point get the time_since_epoch\n    template <typename Clock> static std::chrono::duration<rep, period> get_duration(const std::chrono::time_point<Clock, std::chrono::duration<rep, period>> &src) {\n        return src.time_since_epoch();\n    }\n\n    static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) {\n        using namespace std::chrono;\n\n        // Use overloaded function to get our duration from our source\n        // Works out if it is a duration or time_point and get the duration\n        auto d = get_duration(src);\n\n        // Lazy initialise the PyDateTime import\n        if (!PyDateTimeAPI) { PyDateTime_IMPORT; }\n\n        // Declare these special duration types so the conversions happen with the correct primitive types (int)\n        using dd_t = duration<int, std::ratio<86400>>;\n        using ss_t = duration<int, std::ratio<1>>;\n        using us_t = duration<int, std::micro>;\n\n        auto dd = duration_cast<dd_t>(d);\n        auto subd = d - dd;\n        auto ss = duration_cast<ss_t>(subd);\n        auto us = duration_cast<us_t>(subd - ss);\n        return PyDelta_FromDSU(dd.count(), ss.count(), us.count());\n    }\n\n    PYBIND11_TYPE_CASTER(type, _(\"datetime.timedelta\"));\n};\n\n// This is for casting times on the system clock into datetime.datetime instances\ntemplate <typename Duration> class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {\npublic:\n    typedef std::chrono::time_point<std::chrono::system_clock, Duration> type;\n    bool load(handle src, bool) {\n        using namespace std::chrono;\n\n        // Lazy initialise the PyDateTime import\n        if (!PyDateTimeAPI) { PyDateTime_IMPORT; }\n\n        if (!src) return false;\n        if (PyDateTime_Check(src.ptr())) {\n            std::tm cal;\n            cal.tm_sec   = PyDateTime_DATE_GET_SECOND(src.ptr());\n            cal.tm_min   = PyDateTime_DATE_GET_MINUTE(src.ptr());\n            cal.tm_hour  = PyDateTime_DATE_GET_HOUR(src.ptr());\n            cal.tm_mday  = PyDateTime_GET_DAY(src.ptr());\n            cal.tm_mon   = PyDateTime_GET_MONTH(src.ptr()) - 1;\n            cal.tm_year  = PyDateTime_GET_YEAR(src.ptr()) - 1900;\n            cal.tm_isdst = -1;\n\n            value = system_clock::from_time_t(std::mktime(&cal)) + microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr()));\n            return true;\n        }\n        else return false;\n    }\n\n    static handle cast(const std::chrono::time_point<std::chrono::system_clock, Duration> &src, return_value_policy /* policy */, handle /* parent */) {\n        using namespace std::chrono;\n\n        // Lazy initialise the PyDateTime import\n        if (!PyDateTimeAPI) { PyDateTime_IMPORT; }\n\n        std::time_t tt = system_clock::to_time_t(src);\n        // this function uses static memory so it's best to copy it out asap just in case\n        // otherwise other code that is using localtime may break this (not just python code)\n        std::tm localtime = *std::localtime(&tt);\n\n        // Declare these special duration types so the conversions happen with the correct primitive types (int)\n        using us_t = duration<int, std::micro>;\n\n        return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,\n                                          localtime.tm_mon + 1,\n                                          localtime.tm_mday,\n                                          localtime.tm_hour,\n                                          localtime.tm_min,\n                                          localtime.tm_sec,\n                                          (duration_cast<us_t>(src.time_since_epoch() % seconds(1))).count());\n    }\n    PYBIND11_TYPE_CASTER(type, _(\"datetime.datetime\"));\n};\n\n// Other clocks that are not the system clock are not measured as datetime.datetime objects\n// since they are not measured on calendar time. So instead we just make them timedeltas\n// Or if they have passed us a time as a float we convert that\ntemplate <typename Clock, typename Duration> class type_caster<std::chrono::time_point<Clock, Duration>>\n: public duration_caster<std::chrono::time_point<Clock, Duration>> {\n};\n\ntemplate <typename Rep, typename Period> class type_caster<std::chrono::duration<Rep, Period>>\n: public duration_caster<std::chrono::duration<Rep, Period>> {\n};\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/common.h",
    "content": "#include \"detail/common.h\"\n#warning \"Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'.\"\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/complex.h",
    "content": "/*\n    pybind11/complex.h: Complex number support\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include <complex>\n\n/// glibc defines I as a macro which breaks things, e.g., boost template names\n#ifdef I\n#  undef I\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\ntemplate <typename T> struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {\n    static constexpr const char c = format_descriptor<T>::c;\n    static constexpr const char value[3] = { 'Z', c, '\\0' };\n    static std::string format() { return std::string(value); }\n};\n\n#ifndef PYBIND11_CPP17\n\ntemplate <typename T> constexpr const char format_descriptor<\n    std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];\n\n#endif\n\nNAMESPACE_BEGIN(detail)\n\ntemplate <typename T> struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {\n    static constexpr bool value = true;\n    static constexpr int index = is_fmt_numeric<T>::index + 3;\n};\n\ntemplate <typename T> class type_caster<std::complex<T>> {\npublic:\n    bool load(handle src, bool convert) {\n        if (!src)\n            return false;\n        if (!convert && !PyComplex_Check(src.ptr()))\n            return false;\n        Py_complex result = PyComplex_AsCComplex(src.ptr());\n        if (result.real == -1.0 && PyErr_Occurred()) {\n            PyErr_Clear();\n            return false;\n        }\n        value = std::complex<T>((T) result.real, (T) result.imag);\n        return true;\n    }\n\n    static handle cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {\n        return PyComplex_FromDoubles((double) src.real(), (double) src.imag());\n    }\n\n    PYBIND11_TYPE_CASTER(std::complex<T>, _(\"complex\"));\n};\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/class.h",
    "content": "/*\n    pybind11/detail/class.h: Python C API implementation details for py::class_\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"../attr.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n#if PY_VERSION_HEX >= 0x03030000\n#  define PYBIND11_BUILTIN_QUALNAME\n#  define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)\n#else\n// In pre-3.3 Python, we still set __qualname__ so that we can produce reliable function type\n// signatures; in 3.3+ this macro expands to nothing:\n#  define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) setattr((PyObject *) obj, \"__qualname__\", nameobj)\n#endif\n\ninline PyTypeObject *type_incref(PyTypeObject *type) {\n    Py_INCREF(type);\n    return type;\n}\n\n#if !defined(PYPY_VERSION)\n\n/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance.\nextern \"C\" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) {\n    return PyProperty_Type.tp_descr_get(self, cls, cls);\n}\n\n/// `pybind11_static_property.__set__()`: Just like the above `__get__()`.\nextern \"C\" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) {\n    PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);\n    return PyProperty_Type.tp_descr_set(self, cls, value);\n}\n\n/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`\n    methods are modified to always use the object type instead of a concrete instance.\n    Return value: New reference. */\ninline PyTypeObject *make_static_property_type() {\n    constexpr auto *name = \"pybind11_static_property\";\n    auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));\n\n    /* Danger zone: from now (and until PyType_Ready), make sure to\n       issue no Python C API calls which could potentially invoke the\n       garbage collector (the GC will call type_traverse(), which will in\n       turn find the newly constructed type in an invalid state) */\n    auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);\n    if (!heap_type)\n        pybind11_fail(\"make_static_property_type(): error allocating type!\");\n\n    heap_type->ht_name = name_obj.inc_ref().ptr();\n#ifdef PYBIND11_BUILTIN_QUALNAME\n    heap_type->ht_qualname = name_obj.inc_ref().ptr();\n#endif\n\n    auto type = &heap_type->ht_type;\n    type->tp_name = name;\n    type->tp_base = type_incref(&PyProperty_Type);\n    type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;\n    type->tp_descr_get = pybind11_static_get;\n    type->tp_descr_set = pybind11_static_set;\n\n    if (PyType_Ready(type) < 0)\n        pybind11_fail(\"make_static_property_type(): failure in PyType_Ready()!\");\n\n    setattr((PyObject *) type, \"__module__\", str(\"pybind11_builtins\"));\n    PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);\n\n    return type;\n}\n\n#else // PYPY\n\n/** PyPy has some issues with the above C API, so we evaluate Python code instead.\n    This function will only be called once so performance isn't really a concern.\n    Return value: New reference. */\ninline PyTypeObject *make_static_property_type() {\n    auto d = dict();\n    PyObject *result = PyRun_String(R\"(\\\n        class pybind11_static_property(property):\n            def __get__(self, obj, cls):\n                return property.__get__(self, cls, cls)\n\n            def __set__(self, obj, value):\n                cls = obj if isinstance(obj, type) else type(obj)\n                property.__set__(self, cls, value)\n        )\", Py_file_input, d.ptr(), d.ptr()\n    );\n    if (result == nullptr)\n        throw error_already_set();\n    Py_DECREF(result);\n    return (PyTypeObject *) d[\"pybind11_static_property\"].cast<object>().release().ptr();\n}\n\n#endif // PYPY\n\n/** Types with static properties need to handle `Type.static_prop = x` in a specific way.\n    By default, Python replaces the `static_property` itself, but for wrapped C++ types\n    we need to call `static_property.__set__()` in order to propagate the new value to\n    the underlying C++ data structure. */\nextern \"C\" inline int pybind11_meta_setattro(PyObject* obj, PyObject* name, PyObject* value) {\n    // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw\n    // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`).\n    PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);\n\n    // The following assignment combinations are possible:\n    //   1. `Type.static_prop = value`             --> descr_set: `Type.static_prop.__set__(value)`\n    //   2. `Type.static_prop = other_static_prop` --> setattro:  replace existing `static_prop`\n    //   3. `Type.regular_attribute = value`       --> setattro:  regular attribute assignment\n    const auto static_prop = (PyObject *) get_internals().static_property_type;\n    const auto call_descr_set = descr && PyObject_IsInstance(descr, static_prop)\n                                && !PyObject_IsInstance(value, static_prop);\n    if (call_descr_set) {\n        // Call `static_property.__set__()` instead of replacing the `static_property`.\n#if !defined(PYPY_VERSION)\n        return Py_TYPE(descr)->tp_descr_set(descr, obj, value);\n#else\n        if (PyObject *result = PyObject_CallMethod(descr, \"__set__\", \"OO\", obj, value)) {\n            Py_DECREF(result);\n            return 0;\n        } else {\n            return -1;\n        }\n#endif\n    } else {\n        // Replace existing attribute.\n        return PyType_Type.tp_setattro(obj, name, value);\n    }\n}\n\n#if PY_MAJOR_VERSION >= 3\n/**\n * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing\n * methods via cls.attr(\"m2\") = cls.attr(\"m1\"): instead the tp_descr_get returns a plain function,\n * when called on a class, or a PyMethod, when called on an instance.  Override that behaviour here\n * to do a special case bypass for PyInstanceMethod_Types.\n */\nextern \"C\" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) {\n    PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);\n    if (descr && PyInstanceMethod_Check(descr)) {\n        Py_INCREF(descr);\n        return descr;\n    }\n    else {\n        return PyType_Type.tp_getattro(obj, name);\n    }\n}\n#endif\n\n/** This metaclass is assigned by default to all pybind11 types and is required in order\n    for static properties to function correctly. Users may override this using `py::metaclass`.\n    Return value: New reference. */\ninline PyTypeObject* make_default_metaclass() {\n    constexpr auto *name = \"pybind11_type\";\n    auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));\n\n    /* Danger zone: from now (and until PyType_Ready), make sure to\n       issue no Python C API calls which could potentially invoke the\n       garbage collector (the GC will call type_traverse(), which will in\n       turn find the newly constructed type in an invalid state) */\n    auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);\n    if (!heap_type)\n        pybind11_fail(\"make_default_metaclass(): error allocating metaclass!\");\n\n    heap_type->ht_name = name_obj.inc_ref().ptr();\n#ifdef PYBIND11_BUILTIN_QUALNAME\n    heap_type->ht_qualname = name_obj.inc_ref().ptr();\n#endif\n\n    auto type = &heap_type->ht_type;\n    type->tp_name = name;\n    type->tp_base = type_incref(&PyType_Type);\n    type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;\n\n    type->tp_setattro = pybind11_meta_setattro;\n#if PY_MAJOR_VERSION >= 3\n    type->tp_getattro = pybind11_meta_getattro;\n#endif\n\n    if (PyType_Ready(type) < 0)\n        pybind11_fail(\"make_default_metaclass(): failure in PyType_Ready()!\");\n\n    setattr((PyObject *) type, \"__module__\", str(\"pybind11_builtins\"));\n    PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);\n\n    return type;\n}\n\n/// For multiple inheritance types we need to recursively register/deregister base pointers for any\n/// base classes with pointers that are difference from the instance value pointer so that we can\n/// correctly recognize an offset base class pointer. This calls a function with any offset base ptrs.\ninline void traverse_offset_bases(void *valueptr, const detail::type_info *tinfo, instance *self,\n        bool (*f)(void * /*parentptr*/, instance * /*self*/)) {\n    for (handle h : reinterpret_borrow<tuple>(tinfo->type->tp_bases)) {\n        if (auto parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) {\n            for (auto &c : parent_tinfo->implicit_casts) {\n                if (c.first == tinfo->cpptype) {\n                    auto *parentptr = c.second(valueptr);\n                    if (parentptr != valueptr)\n                        f(parentptr, self);\n                    traverse_offset_bases(parentptr, parent_tinfo, self, f);\n                    break;\n                }\n            }\n        }\n    }\n}\n\ninline bool register_instance_impl(void *ptr, instance *self) {\n    get_internals().registered_instances.emplace(ptr, self);\n    return true; // unused, but gives the same signature as the deregister func\n}\ninline bool deregister_instance_impl(void *ptr, instance *self) {\n    auto &registered_instances = get_internals().registered_instances;\n    auto range = registered_instances.equal_range(ptr);\n    for (auto it = range.first; it != range.second; ++it) {\n        if (Py_TYPE(self) == Py_TYPE(it->second)) {\n            registered_instances.erase(it);\n            return true;\n        }\n    }\n    return false;\n}\n\ninline void register_instance(instance *self, void *valptr, const type_info *tinfo) {\n    register_instance_impl(valptr, self);\n    if (!tinfo->simple_ancestors)\n        traverse_offset_bases(valptr, tinfo, self, register_instance_impl);\n}\n\ninline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) {\n    bool ret = deregister_instance_impl(valptr, self);\n    if (!tinfo->simple_ancestors)\n        traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl);\n    return ret;\n}\n\n/// Instance creation function for all pybind11 types. It allocates the internal instance layout for\n/// holding C++ objects and holders.  Allocation is done lazily (the first time the instance is cast\n/// to a reference or pointer), and initialization is done by an `__init__` function.\ninline PyObject *make_new_instance(PyTypeObject *type) {\n#if defined(PYPY_VERSION)\n    // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first inherited\n    // object is a a plain Python type (i.e. not derived from an extension type).  Fix it.\n    ssize_t instance_size = static_cast<ssize_t>(sizeof(instance));\n    if (type->tp_basicsize < instance_size) {\n        type->tp_basicsize = instance_size;\n    }\n#endif\n    PyObject *self = type->tp_alloc(type, 0);\n    auto inst = reinterpret_cast<instance *>(self);\n    // Allocate the value/holder internals:\n    inst->allocate_layout();\n\n    inst->owned = true;\n\n    return self;\n}\n\n/// Instance creation function for all pybind11 types. It only allocates space for the\n/// C++ object, but doesn't call the constructor -- an `__init__` function must do that.\nextern \"C\" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) {\n    return make_new_instance(type);\n}\n\n/// An `__init__` function constructs the C++ object. Users should provide at least one\n/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the\n/// following default function will be used which simply throws an exception.\nextern \"C\" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {\n    PyTypeObject *type = Py_TYPE(self);\n    std::string msg;\n#if defined(PYPY_VERSION)\n    msg += handle((PyObject *) type).attr(\"__module__\").cast<std::string>() + \".\";\n#endif\n    msg += type->tp_name;\n    msg += \": No constructor defined!\";\n    PyErr_SetString(PyExc_TypeError, msg.c_str());\n    return -1;\n}\n\ninline void add_patient(PyObject *nurse, PyObject *patient) {\n    auto &internals = get_internals();\n    auto instance = reinterpret_cast<detail::instance *>(nurse);\n    instance->has_patients = true;\n    Py_INCREF(patient);\n    internals.patients[nurse].push_back(patient);\n}\n\ninline void clear_patients(PyObject *self) {\n    auto instance = reinterpret_cast<detail::instance *>(self);\n    auto &internals = get_internals();\n    auto pos = internals.patients.find(self);\n    assert(pos != internals.patients.end());\n    // Clearing the patients can cause more Python code to run, which\n    // can invalidate the iterator. Extract the vector of patients\n    // from the unordered_map first.\n    auto patients = std::move(pos->second);\n    internals.patients.erase(pos);\n    instance->has_patients = false;\n    for (PyObject *&patient : patients)\n        Py_CLEAR(patient);\n}\n\n/// Clears all internal data from the instance and removes it from registered instances in\n/// preparation for deallocation.\ninline void clear_instance(PyObject *self) {\n    auto instance = reinterpret_cast<detail::instance *>(self);\n\n    // Deallocate any values/holders, if present:\n    for (auto &v_h : values_and_holders(instance)) {\n        if (v_h) {\n\n            // We have to deregister before we call dealloc because, for virtual MI types, we still\n            // need to be able to get the parent pointers.\n            if (v_h.instance_registered() && !deregister_instance(instance, v_h.value_ptr(), v_h.type))\n                pybind11_fail(\"pybind11_object_dealloc(): Tried to deallocate unregistered instance!\");\n\n            if (instance->owned || v_h.holder_constructed())\n                v_h.type->dealloc(v_h);\n        }\n    }\n    // Deallocate the value/holder layout internals:\n    instance->deallocate_layout();\n\n    if (instance->weakrefs)\n        PyObject_ClearWeakRefs(self);\n\n    PyObject **dict_ptr = _PyObject_GetDictPtr(self);\n    if (dict_ptr)\n        Py_CLEAR(*dict_ptr);\n\n    if (instance->has_patients)\n        clear_patients(self);\n}\n\n/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`\n/// to destroy the C++ object itself, while the rest is Python bookkeeping.\nextern \"C\" inline void pybind11_object_dealloc(PyObject *self) {\n    clear_instance(self);\n\n    auto type = Py_TYPE(self);\n    type->tp_free(self);\n\n    // `type->tp_dealloc != pybind11_object_dealloc` means that we're being called\n    // as part of a derived type's dealloc, in which case we're not allowed to decref\n    // the type here. For cross-module compatibility, we shouldn't compare directly\n    // with `pybind11_object_dealloc`, but with the common one stashed in internals.\n    auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;\n    if (type->tp_dealloc == pybind11_object_type->tp_dealloc)\n        Py_DECREF(type);\n}\n\n/** Create the type which can be used as a common base for all classes.  This is\n    needed in order to satisfy Python's requirements for multiple inheritance.\n    Return value: New reference. */\ninline PyObject *make_object_base_type(PyTypeObject *metaclass) {\n    constexpr auto *name = \"pybind11_object\";\n    auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));\n\n    /* Danger zone: from now (and until PyType_Ready), make sure to\n       issue no Python C API calls which could potentially invoke the\n       garbage collector (the GC will call type_traverse(), which will in\n       turn find the newly constructed type in an invalid state) */\n    auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);\n    if (!heap_type)\n        pybind11_fail(\"make_object_base_type(): error allocating type!\");\n\n    heap_type->ht_name = name_obj.inc_ref().ptr();\n#ifdef PYBIND11_BUILTIN_QUALNAME\n    heap_type->ht_qualname = name_obj.inc_ref().ptr();\n#endif\n\n    auto type = &heap_type->ht_type;\n    type->tp_name = name;\n    type->tp_base = type_incref(&PyBaseObject_Type);\n    type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));\n    type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;\n\n    type->tp_new = pybind11_object_new;\n    type->tp_init = pybind11_object_init;\n    type->tp_dealloc = pybind11_object_dealloc;\n\n    /* Support weak references (needed for the keep_alive feature) */\n    type->tp_weaklistoffset = offsetof(instance, weakrefs);\n\n    if (PyType_Ready(type) < 0)\n        pybind11_fail(\"PyType_Ready failed in make_object_base_type():\" + error_string());\n\n    setattr((PyObject *) type, \"__module__\", str(\"pybind11_builtins\"));\n    PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);\n\n    assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));\n    return (PyObject *) heap_type;\n}\n\n/// dynamic_attr: Support for `d = instance.__dict__`.\nextern \"C\" inline PyObject *pybind11_get_dict(PyObject *self, void *) {\n    PyObject *&dict = *_PyObject_GetDictPtr(self);\n    if (!dict)\n        dict = PyDict_New();\n    Py_XINCREF(dict);\n    return dict;\n}\n\n/// dynamic_attr: Support for `instance.__dict__ = dict()`.\nextern \"C\" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) {\n    if (!PyDict_Check(new_dict)) {\n        PyErr_Format(PyExc_TypeError, \"__dict__ must be set to a dictionary, not a '%.200s'\",\n                     Py_TYPE(new_dict)->tp_name);\n        return -1;\n    }\n    PyObject *&dict = *_PyObject_GetDictPtr(self);\n    Py_INCREF(new_dict);\n    Py_CLEAR(dict);\n    dict = new_dict;\n    return 0;\n}\n\n/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.\nextern \"C\" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {\n    PyObject *&dict = *_PyObject_GetDictPtr(self);\n    Py_VISIT(dict);\n    return 0;\n}\n\n/// dynamic_attr: Allow the GC to clear the dictionary.\nextern \"C\" inline int pybind11_clear(PyObject *self) {\n    PyObject *&dict = *_PyObject_GetDictPtr(self);\n    Py_CLEAR(dict);\n    return 0;\n}\n\n/// Give instances of this type a `__dict__` and opt into garbage collection.\ninline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {\n    auto type = &heap_type->ht_type;\n#if defined(PYPY_VERSION)\n    pybind11_fail(std::string(type->tp_name) + \": dynamic attributes are \"\n                                               \"currently not supported in \"\n                                               \"conjunction with PyPy!\");\n#endif\n    type->tp_flags |= Py_TPFLAGS_HAVE_GC;\n    type->tp_dictoffset = type->tp_basicsize; // place dict at the end\n    type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it\n    type->tp_traverse = pybind11_traverse;\n    type->tp_clear = pybind11_clear;\n\n    static PyGetSetDef getset[] = {\n        {const_cast<char*>(\"__dict__\"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr},\n        {nullptr, nullptr, nullptr, nullptr, nullptr}\n    };\n    type->tp_getset = getset;\n}\n\n/// buffer_protocol: Fill in the view as specified by flags.\nextern \"C\" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) {\n    // Look for a `get_buffer` implementation in this type's info or any bases (following MRO).\n    type_info *tinfo = nullptr;\n    for (auto type : reinterpret_borrow<tuple>(Py_TYPE(obj)->tp_mro)) {\n        tinfo = get_type_info((PyTypeObject *) type.ptr());\n        if (tinfo && tinfo->get_buffer)\n            break;\n    }\n    if (view == nullptr || obj == nullptr || !tinfo || !tinfo->get_buffer) {\n        if (view)\n            view->obj = nullptr;\n        PyErr_SetString(PyExc_BufferError, \"pybind11_getbuffer(): Internal error\");\n        return -1;\n    }\n    std::memset(view, 0, sizeof(Py_buffer));\n    buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);\n    view->obj = obj;\n    view->ndim = 1;\n    view->internal = info;\n    view->buf = info->ptr;\n    view->itemsize = info->itemsize;\n    view->len = view->itemsize;\n    for (auto s : info->shape)\n        view->len *= s;\n    if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT)\n        view->format = const_cast<char *>(info->format.c_str());\n    if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {\n        view->ndim = (int) info->ndim;\n        view->strides = &info->strides[0];\n        view->shape = &info->shape[0];\n    }\n    Py_INCREF(view->obj);\n    return 0;\n}\n\n/// buffer_protocol: Release the resources of the buffer.\nextern \"C\" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {\n    delete (buffer_info *) view->internal;\n}\n\n/// Give this type a buffer interface.\ninline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {\n    heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;\n#if PY_MAJOR_VERSION < 3\n    heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER;\n#endif\n\n    heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;\n    heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;\n}\n\n/** Create a brand new Python type according to the `type_record` specification.\n    Return value: New reference. */\ninline PyObject* make_new_python_type(const type_record &rec) {\n    auto name = reinterpret_steal<object>(PYBIND11_FROM_STRING(rec.name));\n\n    auto qualname = name;\n    if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, \"__qualname__\")) {\n#if PY_MAJOR_VERSION >= 3\n        qualname = reinterpret_steal<object>(\n            PyUnicode_FromFormat(\"%U.%U\", rec.scope.attr(\"__qualname__\").ptr(), name.ptr()));\n#else\n        qualname = str(rec.scope.attr(\"__qualname__\").cast<std::string>() + \".\" + rec.name);\n#endif\n    }\n\n    object module;\n    if (rec.scope) {\n        if (hasattr(rec.scope, \"__module__\"))\n            module = rec.scope.attr(\"__module__\");\n        else if (hasattr(rec.scope, \"__name__\"))\n            module = rec.scope.attr(\"__name__\");\n    }\n\n    auto full_name = c_str(\n#if !defined(PYPY_VERSION)\n        module ? str(module).cast<std::string>() + \".\" + rec.name :\n#endif\n        rec.name);\n\n    char *tp_doc = nullptr;\n    if (rec.doc && options::show_user_defined_docstrings()) {\n        /* Allocate memory for docstring (using PyObject_MALLOC, since\n           Python will free this later on) */\n        size_t size = strlen(rec.doc) + 1;\n        tp_doc = (char *) PyObject_MALLOC(size);\n        memcpy((void *) tp_doc, rec.doc, size);\n    }\n\n    auto &internals = get_internals();\n    auto bases = tuple(rec.bases);\n    auto base = (bases.size() == 0) ? internals.instance_base\n                                    : bases[0].ptr();\n\n    /* Danger zone: from now (and until PyType_Ready), make sure to\n       issue no Python C API calls which could potentially invoke the\n       garbage collector (the GC will call type_traverse(), which will in\n       turn find the newly constructed type in an invalid state) */\n    auto metaclass = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr()\n                                         : internals.default_metaclass;\n\n    auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);\n    if (!heap_type)\n        pybind11_fail(std::string(rec.name) + \": Unable to create type object!\");\n\n    heap_type->ht_name = name.release().ptr();\n#ifdef PYBIND11_BUILTIN_QUALNAME\n    heap_type->ht_qualname = qualname.inc_ref().ptr();\n#endif\n\n    auto type = &heap_type->ht_type;\n    type->tp_name = full_name;\n    type->tp_doc = tp_doc;\n    type->tp_base = type_incref((PyTypeObject *)base);\n    type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));\n    if (bases.size() > 0)\n        type->tp_bases = bases.release().ptr();\n\n    /* Don't inherit base __init__ */\n    type->tp_init = pybind11_object_init;\n\n    /* Supported protocols */\n    type->tp_as_number = &heap_type->as_number;\n    type->tp_as_sequence = &heap_type->as_sequence;\n    type->tp_as_mapping = &heap_type->as_mapping;\n\n    /* Flags */\n    type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;\n#if PY_MAJOR_VERSION < 3\n    type->tp_flags |= Py_TPFLAGS_CHECKTYPES;\n#endif\n\n    if (rec.dynamic_attr)\n        enable_dynamic_attributes(heap_type);\n\n    if (rec.buffer_protocol)\n        enable_buffer_protocol(heap_type);\n\n    if (PyType_Ready(type) < 0)\n        pybind11_fail(std::string(rec.name) + \": PyType_Ready failed (\" + error_string() + \")!\");\n\n    assert(rec.dynamic_attr ? PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)\n                            : !PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));\n\n    /* Register type with the parent scope */\n    if (rec.scope)\n        setattr(rec.scope, rec.name, (PyObject *) type);\n    else\n        Py_INCREF(type); // Keep it alive forever (reference leak)\n\n    if (module) // Needed by pydoc\n        setattr((PyObject *) type, \"__module__\", module);\n\n    PYBIND11_SET_OLDPY_QUALNAME(type, qualname);\n\n    return (PyObject *) type;\n}\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/common.h",
    "content": "/*\n    pybind11/detail/common.h -- Basic macros\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#if !defined(NAMESPACE_BEGIN)\n#  define NAMESPACE_BEGIN(name) namespace name {\n#endif\n#if !defined(NAMESPACE_END)\n#  define NAMESPACE_END(name) }\n#endif\n\n// Robust support for some features and loading modules compiled against different pybind versions\n// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute on\n// the main `pybind11` namespace.\n#if !defined(PYBIND11_NAMESPACE)\n#  ifdef __GNUG__\n#    define PYBIND11_NAMESPACE pybind11 __attribute__((visibility(\"hidden\")))\n#  else\n#    define PYBIND11_NAMESPACE pybind11\n#  endif\n#endif\n\n#if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER)\n#  if __cplusplus >= 201402L\n#    define PYBIND11_CPP14\n#    if __cplusplus >= 201703L\n#      define PYBIND11_CPP17\n#    endif\n#  endif\n#elif defined(_MSC_VER) && __cplusplus == 199711L\n// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented)\n// Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer\n#  if _MSVC_LANG >= 201402L\n#    define PYBIND11_CPP14\n#    if _MSVC_LANG > 201402L && _MSC_VER >= 1910\n#      define PYBIND11_CPP17\n#    endif\n#  endif\n#endif\n\n// Compiler version assertions\n#if defined(__INTEL_COMPILER)\n#  if __INTEL_COMPILER < 1700\n#    error pybind11 requires Intel C++ compiler v17 or newer\n#  endif\n#elif defined(__clang__) && !defined(__apple_build_version__)\n#  if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)\n#    error pybind11 requires clang 3.3 or newer\n#  endif\n#elif defined(__clang__)\n// Apple changes clang version macros to its Xcode version; the first Xcode release based on\n// (upstream) clang 3.3 was Xcode 5:\n#  if __clang_major__ < 5\n#    error pybind11 requires Xcode/clang 5.0 or newer\n#  endif\n#elif defined(__GNUG__)\n#  if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)\n#    error pybind11 requires gcc 4.8 or newer\n#  endif\n#elif defined(_MSC_VER)\n// Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features\n// (e.g. std::negation) added in 2015u3:\n#  if _MSC_FULL_VER < 190024210\n#    error pybind11 requires MSVC 2015 update 3 or newer\n#  endif\n#endif\n\n#if !defined(PYBIND11_EXPORT)\n#  if defined(WIN32) || defined(_WIN32)\n#    define PYBIND11_EXPORT __declspec(dllexport)\n#  else\n#    define PYBIND11_EXPORT __attribute__ ((visibility(\"default\")))\n#  endif\n#endif\n\n#if defined(_MSC_VER)\n#  define PYBIND11_NOINLINE __declspec(noinline)\n#else\n#  define PYBIND11_NOINLINE __attribute__ ((noinline))\n#endif\n\n#if defined(PYBIND11_CPP14)\n#  define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]\n#else\n#  define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))\n#endif\n\n#define PYBIND11_VERSION_MAJOR 2\n#define PYBIND11_VERSION_MINOR 3\n#define PYBIND11_VERSION_PATCH dev0\n\n/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode\n#if defined(_MSC_VER)\n#  if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4)\n#    define HAVE_ROUND 1\n#  endif\n#  pragma warning(push)\n#  pragma warning(disable: 4510 4610 4512 4005)\n#  if defined(_DEBUG)\n#    define PYBIND11_DEBUG_MARKER\n#    undef _DEBUG\n#  endif\n#endif\n\n#include <Python.h>\n#include <frameobject.h>\n#include <pythread.h>\n\n#if defined(_WIN32) && (defined(min) || defined(max))\n#  error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows\n#endif\n\n#if defined(isalnum)\n#  undef isalnum\n#  undef isalpha\n#  undef islower\n#  undef isspace\n#  undef isupper\n#  undef tolower\n#  undef toupper\n#endif\n\n#if defined(_MSC_VER)\n#  if defined(PYBIND11_DEBUG_MARKER)\n#    define _DEBUG\n#    undef PYBIND11_DEBUG_MARKER\n#  endif\n#  pragma warning(pop)\n#endif\n\n#include <cstddef>\n#include <cstring>\n#include <forward_list>\n#include <vector>\n#include <string>\n#include <stdexcept>\n#include <unordered_set>\n#include <unordered_map>\n#include <memory>\n#include <typeindex>\n#include <type_traits>\n\n#if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions\n#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)\n#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check\n#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION\n#define PYBIND11_BYTES_CHECK PyBytes_Check\n#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString\n#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize\n#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize\n#define PYBIND11_BYTES_AS_STRING PyBytes_AsString\n#define PYBIND11_BYTES_SIZE PyBytes_Size\n#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)\n#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)\n#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) o)\n#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) o)\n#define PYBIND11_BYTES_NAME \"bytes\"\n#define PYBIND11_STRING_NAME \"str\"\n#define PYBIND11_SLICE_OBJECT PyObject\n#define PYBIND11_FROM_STRING PyUnicode_FromString\n#define PYBIND11_STR_TYPE ::pybind11::str\n#define PYBIND11_BOOL_ATTR \"__bool__\"\n#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)\n#define PYBIND11_PLUGIN_IMPL(name) \\\n    extern \"C\" PYBIND11_EXPORT PyObject *PyInit_##name()\n\n#else\n#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_)\n#define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check\n#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION\n#define PYBIND11_BYTES_CHECK PyString_Check\n#define PYBIND11_BYTES_FROM_STRING PyString_FromString\n#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize\n#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize\n#define PYBIND11_BYTES_AS_STRING PyString_AsString\n#define PYBIND11_BYTES_SIZE PyString_Size\n#define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o))\n#define PYBIND11_LONG_AS_LONGLONG(o) (PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o))\n#define PYBIND11_LONG_FROM_SIGNED(o) PyInt_FromSsize_t((ssize_t) o) // Returns long if needed.\n#define PYBIND11_LONG_FROM_UNSIGNED(o) PyInt_FromSize_t((size_t) o) // Returns long if needed.\n#define PYBIND11_BYTES_NAME \"str\"\n#define PYBIND11_STRING_NAME \"unicode\"\n#define PYBIND11_SLICE_OBJECT PySliceObject\n#define PYBIND11_FROM_STRING PyString_FromString\n#define PYBIND11_STR_TYPE ::pybind11::bytes\n#define PYBIND11_BOOL_ATTR \"__nonzero__\"\n#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero)\n#define PYBIND11_PLUGIN_IMPL(name) \\\n    static PyObject *pybind11_init_wrapper();               \\\n    extern \"C\" PYBIND11_EXPORT void init##name() {          \\\n        (void)pybind11_init_wrapper();                      \\\n    }                                                       \\\n    PyObject *pybind11_init_wrapper()\n#endif\n\n#if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200\nextern \"C\" {\n    struct _Py_atomic_address { void *value; };\n    PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;\n}\n#endif\n\n#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code\n#define PYBIND11_STRINGIFY(x) #x\n#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)\n#define PYBIND11_CONCAT(first, second) first##second\n\n#define PYBIND11_CHECK_PYTHON_VERSION \\\n    {                                                                          \\\n        const char *compiled_ver = PYBIND11_TOSTRING(PY_MAJOR_VERSION)         \\\n            \".\" PYBIND11_TOSTRING(PY_MINOR_VERSION);                           \\\n        const char *runtime_ver = Py_GetVersion();                             \\\n        size_t len = std::strlen(compiled_ver);                                \\\n        if (std::strncmp(runtime_ver, compiled_ver, len) != 0                  \\\n                || (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) {     \\\n            PyErr_Format(PyExc_ImportError,                                    \\\n                \"Python version mismatch: module was compiled for Python %s, \" \\\n                \"but the interpreter version is incompatible: %s.\",            \\\n                compiled_ver, runtime_ver);                                    \\\n            return nullptr;                                                    \\\n        }                                                                      \\\n    }\n\n#define PYBIND11_CATCH_INIT_EXCEPTIONS \\\n        catch (pybind11::error_already_set &e) {                               \\\n            PyErr_SetString(PyExc_ImportError, e.what());                      \\\n            return nullptr;                                                    \\\n        } catch (const std::exception &e) {                                    \\\n            PyErr_SetString(PyExc_ImportError, e.what());                      \\\n            return nullptr;                                                    \\\n        }                                                                      \\\n\n/** \\rst\n    ***Deprecated in favor of PYBIND11_MODULE***\n\n    This macro creates the entry point that will be invoked when the Python interpreter\n    imports a plugin library. Please create a `module` in the function body and return\n    the pointer to its underlying Python object at the end.\n\n    .. code-block:: cpp\n\n        PYBIND11_PLUGIN(example) {\n            pybind11::module m(\"example\", \"pybind11 example plugin\");\n            /// Set up bindings here\n            return m.ptr();\n        }\n\\endrst */\n#define PYBIND11_PLUGIN(name)                                                  \\\n    PYBIND11_DEPRECATED(\"PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE\")  \\\n    static PyObject *pybind11_init();                                          \\\n    PYBIND11_PLUGIN_IMPL(name) {                                               \\\n        PYBIND11_CHECK_PYTHON_VERSION                                          \\\n        try {                                                                  \\\n            return pybind11_init();                                            \\\n        } PYBIND11_CATCH_INIT_EXCEPTIONS                                       \\\n    }                                                                          \\\n    PyObject *pybind11_init()\n\n/** \\rst\n    This macro creates the entry point that will be invoked when the Python interpreter\n    imports an extension module. The module name is given as the fist argument and it\n    should not be in quotes. The second macro argument defines a variable of type\n    `py::module` which can be used to initialize the module.\n\n    .. code-block:: cpp\n\n        PYBIND11_MODULE(example, m) {\n            m.doc() = \"pybind11 example module\";\n\n            // Add bindings here\n            m.def(\"foo\", []() {\n                return \"Hello, World!\";\n            });\n        }\n\\endrst */\n#define PYBIND11_MODULE(name, variable)                                        \\\n    static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &);     \\\n    PYBIND11_PLUGIN_IMPL(name) {                                               \\\n        PYBIND11_CHECK_PYTHON_VERSION                                          \\\n        auto m = pybind11::module(PYBIND11_TOSTRING(name));                    \\\n        try {                                                                  \\\n            PYBIND11_CONCAT(pybind11_init_, name)(m);                          \\\n            return m.ptr();                                                    \\\n        } PYBIND11_CATCH_INIT_EXCEPTIONS                                       \\\n    }                                                                          \\\n    void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable)\n\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\nusing ssize_t = Py_ssize_t;\nusing size_t  = std::size_t;\n\n/// Approach used to cast a previously unknown C++ instance into a Python object\nenum class return_value_policy : uint8_t {\n    /** This is the default return value policy, which falls back to the policy\n        return_value_policy::take_ownership when the return value is a pointer.\n        Otherwise, it uses return_value::move or return_value::copy for rvalue\n        and lvalue references, respectively. See below for a description of what\n        all of these different policies do. */\n    automatic = 0,\n\n    /** As above, but use policy return_value_policy::reference when the return\n        value is a pointer. This is the default conversion policy for function\n        arguments when calling Python functions manually from C++ code (i.e. via\n        handle::operator()). You probably won't need to use this. */\n    automatic_reference,\n\n    /** Reference an existing object (i.e. do not create a new copy) and take\n        ownership. Python will call the destructor and delete operator when the\n        object’s reference count reaches zero. Undefined behavior ensues when\n        the C++ side does the same.. */\n    take_ownership,\n\n    /** Create a new copy of the returned object, which will be owned by\n        Python. This policy is comparably safe because the lifetimes of the two\n        instances are decoupled. */\n    copy,\n\n    /** Use std::move to move the return value contents into a new instance\n        that will be owned by Python. This policy is comparably safe because the\n        lifetimes of the two instances (move source and destination) are\n        decoupled. */\n    move,\n\n    /** Reference an existing object, but do not take ownership. The C++ side\n        is responsible for managing the object’s lifetime and deallocating it\n        when it is no longer used. Warning: undefined behavior will ensue when\n        the C++ side deletes an object that is still referenced and used by\n        Python. */\n    reference,\n\n    /** This policy only applies to methods and properties. It references the\n        object without taking ownership similar to the above\n        return_value_policy::reference policy. In contrast to that policy, the\n        function or property’s implicit this argument (called the parent) is\n        considered to be the the owner of the return value (the child).\n        pybind11 then couples the lifetime of the parent to the child via a\n        reference relationship that ensures that the parent cannot be garbage\n        collected while Python is still using the child. More advanced\n        variations of this scheme are also possible using combinations of\n        return_value_policy::reference and the keep_alive call policy */\n    reference_internal\n};\n\nNAMESPACE_BEGIN(detail)\n\ninline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); }\n\n// Returns the size as a multiple of sizeof(void *), rounded up.\ninline static constexpr size_t size_in_ptrs(size_t s) { return 1 + ((s - 1) >> log2(sizeof(void *))); }\n\n/**\n * The space to allocate for simple layout instance holders (see below) in multiple of the size of\n * a pointer (e.g.  2 means 16 bytes on 64-bit architectures).  The default is the minimum required\n * to holder either a std::unique_ptr or std::shared_ptr (which is almost always\n * sizeof(std::shared_ptr<T>)).\n */\nconstexpr size_t instance_simple_holder_in_ptrs() {\n    static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),\n            \"pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs\");\n    return size_in_ptrs(sizeof(std::shared_ptr<int>));\n}\n\n// Forward declarations\nstruct type_info;\nstruct value_and_holder;\n\nstruct nonsimple_values_and_holders {\n    void **values_and_holders;\n    uint8_t *status;\n};\n\n/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')\nstruct instance {\n    PyObject_HEAD\n    /// Storage for pointers and holder; see simple_layout, below, for a description\n    union {\n        void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];\n        nonsimple_values_and_holders nonsimple;\n    };\n    /// Weak references\n    PyObject *weakrefs;\n    /// If true, the pointer is owned which means we're free to manage it with a holder.\n    bool owned : 1;\n    /**\n     * An instance has two possible value/holder layouts.\n     *\n     * Simple layout (when this flag is true), means the `simple_value_holder` is set with a pointer\n     * and the holder object governing that pointer, i.e. [val1*][holder].  This layout is applied\n     * whenever there is no python-side multiple inheritance of bound C++ types *and* the type's\n     * holder will fit in the default space (which is large enough to hold either a std::unique_ptr\n     * or std::shared_ptr).\n     *\n     * Non-simple layout applies when using custom holders that require more space than `shared_ptr`\n     * (which is typically the size of two pointers), or when multiple inheritance is used on the\n     * python side.  Non-simple layout allocates the required amount of memory to have multiple\n     * bound C++ classes as parents.  Under this layout, `nonsimple.values_and_holders` is set to a\n     * pointer to allocated space of the required space to hold a sequence of value pointers and\n     * holders followed `status`, a set of bit flags (1 byte each), i.e.\n     * [val1*][holder1][val2*][holder2]...[bb...]  where each [block] is rounded up to a multiple of\n     * `sizeof(void *)`.  `nonsimple.status` is, for convenience, a pointer to the\n     * beginning of the [bb...] block (but not independently allocated).\n     *\n     * Status bits indicate whether the associated holder is constructed (&\n     * status_holder_constructed) and whether the value pointer is registered (&\n     * status_instance_registered) in `registered_instances`.\n     */\n    bool simple_layout : 1;\n    /// For simple layout, tracks whether the holder has been constructed\n    bool simple_holder_constructed : 1;\n    /// For simple layout, tracks whether the instance is registered in `registered_instances`\n    bool simple_instance_registered : 1;\n    /// If true, get_internals().patients has an entry for this object\n    bool has_patients : 1;\n\n    /// Initializes all of the above type/values/holders data (but not the instance values themselves)\n    void allocate_layout();\n\n    /// Destroys/deallocates all of the above\n    void deallocate_layout();\n\n    /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`\n    /// omitted).  Returns a default-constructed (with `.inst = nullptr`) object on failure if\n    /// `throw_if_missing` is false.\n    value_and_holder get_value_and_holder(const type_info *find_type = nullptr, bool throw_if_missing = true);\n\n    /// Bit values for the non-simple status flags\n    static constexpr uint8_t status_holder_constructed  = 1;\n    static constexpr uint8_t status_instance_registered = 2;\n};\n\nstatic_assert(std::is_standard_layout<instance>::value, \"Internal error: `pybind11::detail::instance` is not standard layout!\");\n\n/// from __cpp_future__ import (convenient aliases from C++14/17)\n#if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910)\nusing std::enable_if_t;\nusing std::conditional_t;\nusing std::remove_cv_t;\nusing std::remove_reference_t;\n#else\ntemplate <bool B, typename T = void> using enable_if_t = typename std::enable_if<B, T>::type;\ntemplate <bool B, typename T, typename F> using conditional_t = typename std::conditional<B, T, F>::type;\ntemplate <typename T> using remove_cv_t = typename std::remove_cv<T>::type;\ntemplate <typename T> using remove_reference_t = typename std::remove_reference<T>::type;\n#endif\n\n/// Index sequences\n#if defined(PYBIND11_CPP14)\nusing std::index_sequence;\nusing std::make_index_sequence;\n#else\ntemplate<size_t ...> struct index_sequence  { };\ntemplate<size_t N, size_t ...S> struct make_index_sequence_impl : make_index_sequence_impl <N - 1, N - 1, S...> { };\ntemplate<size_t ...S> struct make_index_sequence_impl <0, S...> { typedef index_sequence<S...> type; };\ntemplate<size_t N> using make_index_sequence = typename make_index_sequence_impl<N>::type;\n#endif\n\n/// Make an index sequence of the indices of true arguments\ntemplate <typename ISeq, size_t, bool...> struct select_indices_impl { using type = ISeq; };\ntemplate <size_t... IPrev, size_t I, bool B, bool... Bs> struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>\n    : select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>, I + 1, Bs...> {};\ntemplate <bool... Bs> using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;\n\n/// Backports of std::bool_constant and std::negation to accommodate older compilers\ntemplate <bool B> using bool_constant = std::integral_constant<bool, B>;\ntemplate <typename T> struct negation : bool_constant<!T::value> { };\n\ntemplate <typename...> struct void_t_impl { using type = void; };\ntemplate <typename... Ts> using void_t = typename void_t_impl<Ts...>::type;\n\n/// Compile-time all/any/none of that check the boolean value of all template types\n#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))\ntemplate <class... Ts> using all_of = bool_constant<(Ts::value && ...)>;\ntemplate <class... Ts> using any_of = bool_constant<(Ts::value || ...)>;\n#elif !defined(_MSC_VER)\ntemplate <bool...> struct bools {};\ntemplate <class... Ts> using all_of = std::is_same<\n    bools<Ts::value..., true>,\n    bools<true, Ts::value...>>;\ntemplate <class... Ts> using any_of = negation<all_of<negation<Ts>...>>;\n#else\n// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit\n// at a slight loss of compilation efficiency).\ntemplate <class... Ts> using all_of = std::conjunction<Ts...>;\ntemplate <class... Ts> using any_of = std::disjunction<Ts...>;\n#endif\ntemplate <class... Ts> using none_of = negation<any_of<Ts...>>;\n\ntemplate <class T, template<class> class... Predicates> using satisfies_all_of = all_of<Predicates<T>...>;\ntemplate <class T, template<class> class... Predicates> using satisfies_any_of = any_of<Predicates<T>...>;\ntemplate <class T, template<class> class... Predicates> using satisfies_none_of = none_of<Predicates<T>...>;\n\n/// Strip the class from a method type\ntemplate <typename T> struct remove_class { };\ntemplate <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...)> { typedef R type(A...); };\ntemplate <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...) const> { typedef R type(A...); };\n\n/// Helper template to strip away type modifiers\ntemplate <typename T> struct intrinsic_type                       { typedef T type; };\ntemplate <typename T> struct intrinsic_type<const T>              { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T> struct intrinsic_type<T*>                   { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T> struct intrinsic_type<T&>                   { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T> struct intrinsic_type<T&&>                  { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T, size_t N> struct intrinsic_type<const T[N]> { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T, size_t N> struct intrinsic_type<T[N]>       { typedef typename intrinsic_type<T>::type type; };\ntemplate <typename T> using intrinsic_t = typename intrinsic_type<T>::type;\n\n/// Helper type to replace 'void' in some expressions\nstruct void_type { };\n\n/// Helper template which holds a list of types\ntemplate <typename...> struct type_list { };\n\n/// Compile-time integer sum\n#ifdef __cpp_fold_expressions\ntemplate <typename... Ts> constexpr size_t constexpr_sum(Ts... ns) { return (0 + ... + size_t{ns}); }\n#else\nconstexpr size_t constexpr_sum() { return 0; }\ntemplate <typename T, typename... Ts>\nconstexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); }\n#endif\n\nNAMESPACE_BEGIN(constexpr_impl)\n/// Implementation details for constexpr functions\nconstexpr int first(int i) { return i; }\ntemplate <typename T, typename... Ts>\nconstexpr int first(int i, T v, Ts... vs) { return v ? i : first(i + 1, vs...); }\n\nconstexpr int last(int /*i*/, int result) { return result; }\ntemplate <typename T, typename... Ts>\nconstexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); }\nNAMESPACE_END(constexpr_impl)\n\n/// Return the index of the first type in Ts which satisfies Predicate<T>.  Returns sizeof...(Ts) if\n/// none match.\ntemplate <template<typename> class Predicate, typename... Ts>\nconstexpr int constexpr_first() { return constexpr_impl::first(0, Predicate<Ts>::value...); }\n\n/// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.\ntemplate <template<typename> class Predicate, typename... Ts>\nconstexpr int constexpr_last() { return constexpr_impl::last(0, -1, Predicate<Ts>::value...); }\n\n/// Return the Nth element from the parameter pack\ntemplate <size_t N, typename T, typename... Ts>\nstruct pack_element { using type = typename pack_element<N - 1, Ts...>::type; };\ntemplate <typename T, typename... Ts>\nstruct pack_element<0, T, Ts...> { using type = T; };\n\n/// Return the one and only type which matches the predicate, or Default if none match.\n/// If more than one type matches the predicate, fail at compile-time.\ntemplate <template<typename> class Predicate, typename Default, typename... Ts>\nstruct exactly_one {\n    static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);\n    static_assert(found <= 1, \"Found more than one type matching the predicate\");\n\n    static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;\n    using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;\n};\ntemplate <template<typename> class P, typename Default>\nstruct exactly_one<P, Default> { using type = Default; };\n\ntemplate <template<typename> class Predicate, typename Default, typename... Ts>\nusing exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;\n\n/// Defer the evaluation of type T until types Us are instantiated\ntemplate <typename T, typename... /*Us*/> struct deferred_type { using type = T; };\ntemplate <typename T, typename... Us> using deferred_t = typename deferred_type<T, Us...>::type;\n\n/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,\n/// unlike `std::is_base_of`)\ntemplate <typename Base, typename Derived> using is_strict_base_of = bool_constant<\n    std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;\n\n/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived pointer\n/// can be converted to a Base pointer)\ntemplate <typename Base, typename Derived> using is_accessible_base_of = bool_constant<\n    std::is_base_of<Base, Derived>::value && std::is_convertible<Derived *, Base *>::value>;\n\ntemplate <template<typename...> class Base>\nstruct is_template_base_of_impl {\n    template <typename... Us> static std::true_type check(Base<Us...> *);\n    static std::false_type check(...);\n};\n\n/// Check if a template is the base of a type. For example:\n/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything\ntemplate <template<typename...> class Base, typename T>\n#if !defined(_MSC_VER)\nusing is_template_base_of = decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr));\n#else // MSVC2015 has trouble with decltype in template aliases\nstruct is_template_base_of : decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr)) { };\n#endif\n\n/// Check if T is an instantiation of the template `Class`. For example:\n/// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.\ntemplate <template<typename...> class Class, typename T>\nstruct is_instantiation : std::false_type { };\ntemplate <template<typename...> class Class, typename... Us>\nstruct is_instantiation<Class, Class<Us...>> : std::true_type { };\n\n/// Check if T is std::shared_ptr<U> where U can be anything\ntemplate <typename T> using is_shared_ptr = is_instantiation<std::shared_ptr, T>;\n\n/// Check if T looks like an input iterator\ntemplate <typename T, typename = void> struct is_input_iterator : std::false_type {};\ntemplate <typename T>\nstruct is_input_iterator<T, void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>\n    : std::true_type {};\n\ntemplate <typename T> using is_function_pointer = bool_constant<\n    std::is_pointer<T>::value && std::is_function<typename std::remove_pointer<T>::type>::value>;\n\ntemplate <typename F> struct strip_function_object {\n    using type = typename remove_class<decltype(&F::operator())>::type;\n};\n\n// Extracts the function signature from a function, function pointer or lambda.\ntemplate <typename Function, typename F = remove_reference_t<Function>>\nusing function_signature_t = conditional_t<\n    std::is_function<F>::value,\n    F,\n    typename conditional_t<\n        std::is_pointer<F>::value || std::is_member_pointer<F>::value,\n        std::remove_pointer<F>,\n        strip_function_object<F>\n    >::type\n>;\n\n/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member\n/// pointer.  Note that this can catch all sorts of other things, too; this is intended to be used\n/// in a place where passing a lambda makes sense.\ntemplate <typename T> using is_lambda = satisfies_none_of<remove_reference_t<T>,\n        std::is_function, std::is_pointer, std::is_member_pointer>;\n\n/// Ignore that a variable is unused in compiler warnings\ninline void ignore_unused(const int *) { }\n\n/// Apply a function over each element of a parameter pack\n#ifdef __cpp_fold_expressions\n#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)\n#else\nusing expand_side_effects = bool[];\n#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false }\n#endif\n\nNAMESPACE_END(detail)\n\n/// C++ bindings of builtin Python exceptions\nclass builtin_exception : public std::runtime_error {\npublic:\n    using std::runtime_error::runtime_error;\n    /// Set the error using the Python C API\n    virtual void set_error() const = 0;\n};\n\n#define PYBIND11_RUNTIME_EXCEPTION(name, type) \\\n    class name : public builtin_exception { public: \\\n        using builtin_exception::builtin_exception; \\\n        name() : name(\"\") { } \\\n        void set_error() const override { PyErr_SetString(type, what()); } \\\n    };\n\nPYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)\nPYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)\nPYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)\nPYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)\nPYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)\nPYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error\nPYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally\n\n[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const char *reason) { throw std::runtime_error(reason); }\n[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const std::string &reason) { throw std::runtime_error(reason); }\n\ntemplate <typename T, typename SFINAE = void> struct format_descriptor { };\n\nNAMESPACE_BEGIN(detail)\n// Returns the index of the given type in the type char array below, and in the list in numpy.h\n// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;\n// complex float,double,long double.  Note that the long double types only participate when long\n// double is actually longer than double (it isn't under MSVC).\n// NB: not only the string below but also complex.h and numpy.h rely on this order.\ntemplate <typename T, typename SFINAE = void> struct is_fmt_numeric { static constexpr bool value = false; };\ntemplate <typename T> struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {\n    static constexpr bool value = true;\n    static constexpr int index = std::is_same<T, bool>::value ? 0 : 1 + (\n        std::is_integral<T>::value ? detail::log2(sizeof(T))*2 + std::is_unsigned<T>::value : 8 + (\n        std::is_same<T, double>::value ? 1 : std::is_same<T, long double>::value ? 2 : 0));\n};\nNAMESPACE_END(detail)\n\ntemplate <typename T> struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {\n    static constexpr const char c = \"?bBhHiIqQfdg\"[detail::is_fmt_numeric<T>::index];\n    static constexpr const char value[2] = { c, '\\0' };\n    static std::string format() { return std::string(1, c); }\n};\n\n#if !defined(PYBIND11_CPP17)\n\ntemplate <typename T> constexpr const char format_descriptor<\n    T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];\n\n#endif\n\n/// RAII wrapper that temporarily clears any Python error state\nstruct error_scope {\n    PyObject *type, *value, *trace;\n    error_scope() { PyErr_Fetch(&type, &value, &trace); }\n    ~error_scope() { PyErr_Restore(type, value, trace); }\n};\n\n/// Dummy destructor wrapper that can be used to expose classes with a private destructor\nstruct nodelete { template <typename T> void operator()(T*) { } };\n\n// overload_cast requires variable templates: C++14\n#if defined(PYBIND11_CPP14)\n#define PYBIND11_OVERLOAD_CAST 1\n\nNAMESPACE_BEGIN(detail)\ntemplate <typename... Args>\nstruct overload_cast_impl {\n    constexpr overload_cast_impl() {} // MSVC 2015 needs this\n\n    template <typename Return>\n    constexpr auto operator()(Return (*pf)(Args...)) const noexcept\n                              -> decltype(pf) { return pf; }\n\n    template <typename Return, typename Class>\n    constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept\n                              -> decltype(pmf) { return pmf; }\n\n    template <typename Return, typename Class>\n    constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept\n                              -> decltype(pmf) { return pmf; }\n};\nNAMESPACE_END(detail)\n\n/// Syntax sugar for resolving overloaded function pointers:\n///  - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)\n///  - sweet:   overload_cast<Arg0, Arg1, Arg2>(&Class::func)\ntemplate <typename... Args>\nstatic constexpr detail::overload_cast_impl<Args...> overload_cast = {};\n// MSVC 2015 only accepts this particular initialization syntax for this variable template.\n\n/// Const member function selector for overload_cast\n///  - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)\n///  - sweet:   overload_cast<Arg>(&Class::func, const_)\nstatic constexpr auto const_ = std::true_type{};\n\n#else // no overload_cast: providing something that static_assert-fails:\ntemplate <typename... Args> struct overload_cast {\n    static_assert(detail::deferred_t<std::false_type, Args...>::value,\n                  \"pybind11::overload_cast<...> requires compiling in C++14 mode\");\n};\n#endif // overload_cast\n\nNAMESPACE_BEGIN(detail)\n\n// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from\n// any standard container (or C-style array) supporting std::begin/std::end, any singleton\n// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.\ntemplate <typename T>\nclass any_container {\n    std::vector<T> v;\npublic:\n    any_container() = default;\n\n    // Can construct from a pair of iterators\n    template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>\n    any_container(It first, It last) : v(first, last) { }\n\n    // Implicit conversion constructor from any arbitrary container type with values convertible to T\n    template <typename Container, typename = enable_if_t<std::is_convertible<decltype(*std::begin(std::declval<const Container &>())), T>::value>>\n    any_container(const Container &c) : any_container(std::begin(c), std::end(c)) { }\n\n    // initializer_list's aren't deducible, so don't get matched by the above template; we need this\n    // to explicitly allow implicit conversion from one:\n    template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>\n    any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) { }\n\n    // Avoid copying if given an rvalue vector of the correct type.\n    any_container(std::vector<T> &&v) : v(std::move(v)) { }\n\n    // Moves the vector out of an rvalue any_container\n    operator std::vector<T> &&() && { return std::move(v); }\n\n    // Dereferencing obtains a reference to the underlying vector\n    std::vector<T> &operator*() { return v; }\n    const std::vector<T> &operator*() const { return v; }\n\n    // -> lets you call methods on the underlying vector\n    std::vector<T> *operator->() { return &v; }\n    const std::vector<T> *operator->() const { return &v; }\n};\n\nNAMESPACE_END(detail)\n\n\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/descr.h",
    "content": "/*\n    pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"common.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n#if !defined(_MSC_VER)\n#  define PYBIND11_DESCR_CONSTEXPR static constexpr\n#else\n#  define PYBIND11_DESCR_CONSTEXPR const\n#endif\n\n/* Concatenate type signatures at compile time */\ntemplate <size_t N, typename... Ts>\nstruct descr {\n    char text[N + 1];\n\n    constexpr descr() : text{'\\0'} { }\n    constexpr descr(char const (&s)[N+1]) : descr(s, make_index_sequence<N>()) { }\n\n    template <size_t... Is>\n    constexpr descr(char const (&s)[N+1], index_sequence<Is...>) : text{s[Is]..., '\\0'} { }\n\n    template <typename... Chars>\n    constexpr descr(char c, Chars... cs) : text{c, static_cast<char>(cs)..., '\\0'} { }\n\n    static constexpr std::array<const std::type_info *, sizeof...(Ts) + 1> types() {\n        return {{&typeid(Ts)..., nullptr}};\n    }\n};\n\ntemplate <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>\nconstexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b,\n                                                   index_sequence<Is1...>, index_sequence<Is2...>) {\n    return {a.text[Is1]..., b.text[Is2]...};\n}\n\ntemplate <size_t N1, size_t N2, typename... Ts1, typename... Ts2>\nconstexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b) {\n    return plus_impl(a, b, make_index_sequence<N1>(), make_index_sequence<N2>());\n}\n\ntemplate <size_t N>\nconstexpr descr<N - 1> _(char const(&text)[N]) { return descr<N - 1>(text); }\nconstexpr descr<0> _(char const(&)[1]) { return {}; }\n\ntemplate <size_t Rem, size_t... Digits> struct int_to_str : int_to_str<Rem/10, Rem%10, Digits...> { };\ntemplate <size_t...Digits> struct int_to_str<0, Digits...> {\n    static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);\n};\n\n// Ternary description (like std::conditional)\ntemplate <bool B, size_t N1, size_t N2>\nconstexpr enable_if_t<B, descr<N1 - 1>> _(char const(&text1)[N1], char const(&)[N2]) {\n    return _(text1);\n}\ntemplate <bool B, size_t N1, size_t N2>\nconstexpr enable_if_t<!B, descr<N2 - 1>> _(char const(&)[N1], char const(&text2)[N2]) {\n    return _(text2);\n}\n\ntemplate <bool B, typename T1, typename T2>\nconstexpr enable_if_t<B, T1> _(const T1 &d, const T2 &) { return d; }\ntemplate <bool B, typename T1, typename T2>\nconstexpr enable_if_t<!B, T2> _(const T1 &, const T2 &d) { return d; }\n\ntemplate <size_t Size> auto constexpr _() -> decltype(int_to_str<Size / 10, Size % 10>::digits) {\n    return int_to_str<Size / 10, Size % 10>::digits;\n}\n\ntemplate <typename Type> constexpr descr<1, Type> _() { return {'%'}; }\n\nconstexpr descr<0> concat() { return {}; }\n\ntemplate <size_t N, typename... Ts>\nconstexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) { return descr; }\n\ntemplate <size_t N, typename... Ts, typename... Args>\nconstexpr auto concat(const descr<N, Ts...> &d, const Args &...args)\n    -> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {\n    return d + _(\", \") + concat(args...);\n}\n\ntemplate <size_t N, typename... Ts>\nconstexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {\n    return _(\"{\") + descr + _(\"}\");\n}\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/init.h",
    "content": "/*\n    pybind11/detail/init.h: init factory function implementation and support code.\n\n    Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"class.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\ntemplate <>\nclass type_caster<value_and_holder> {\npublic:\n    bool load(handle h, bool) {\n        value = reinterpret_cast<value_and_holder *>(h.ptr());\n        return true;\n    }\n\n    template <typename> using cast_op_type = value_and_holder &;\n    operator value_and_holder &() { return *value; }\n    static constexpr auto name = _<value_and_holder>();\n\nprivate:\n    value_and_holder *value = nullptr;\n};\n\nNAMESPACE_BEGIN(initimpl)\n\ninline void no_nullptr(void *ptr) {\n    if (!ptr) throw type_error(\"pybind11::init(): factory function returned nullptr\");\n}\n\n// Implementing functions for all forms of py::init<...> and py::init(...)\ntemplate <typename Class> using Cpp = typename Class::type;\ntemplate <typename Class> using Alias = typename Class::type_alias;\ntemplate <typename Class> using Holder = typename Class::holder_type;\n\ntemplate <typename Class> using is_alias_constructible = std::is_constructible<Alias<Class>, Cpp<Class> &&>;\n\n// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance.\ntemplate <typename Class, enable_if_t<Class::has_alias, int> = 0>\nbool is_alias(Cpp<Class> *ptr) {\n    return dynamic_cast<Alias<Class> *>(ptr) != nullptr;\n}\n// Failing fallback version of the above for a no-alias class (always returns false)\ntemplate <typename /*Class*/>\nconstexpr bool is_alias(void *) { return false; }\n\n// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall\n// back to brace aggregate initiailization so that for aggregate initialization can be used with\n// py::init, e.g.  `py::init<int, int>` to initialize a `struct T { int a; int b; }`.  For\n// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually\n// works, but will not do the expected thing when `T` has an `initializer_list<T>` constructor).\ntemplate <typename Class, typename... Args, detail::enable_if_t<std::is_constructible<Class, Args...>::value, int> = 0>\ninline Class *construct_or_initialize(Args &&...args) { return new Class(std::forward<Args>(args)...); }\ntemplate <typename Class, typename... Args, detail::enable_if_t<!std::is_constructible<Class, Args...>::value, int> = 0>\ninline Class *construct_or_initialize(Args &&...args) { return new Class{std::forward<Args>(args)...}; }\n\n// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor.  This allows types with\n// an alias to provide only a single Cpp factory function as long as the Alias can be\n// constructed from an rvalue reference of the base Cpp type.  This means that Alias classes\n// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to\n// inherit all the base class constructors.\ntemplate <typename Class>\nvoid construct_alias_from_cpp(std::true_type /*is_alias_constructible*/,\n                              value_and_holder &v_h, Cpp<Class> &&base) {\n    v_h.value_ptr() = new Alias<Class>(std::move(base));\n}\ntemplate <typename Class>\n[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/,\n                                           value_and_holder &, Cpp<Class> &&) {\n    throw type_error(\"pybind11::init(): unable to convert returned instance to required \"\n                     \"alias class: no `Alias<Class>(Class &&)` constructor available\");\n}\n\n// Error-generating fallback for factories that don't match one of the below construction\n// mechanisms.\ntemplate <typename Class>\nvoid construct(...) {\n    static_assert(!std::is_same<Class, Class>::value /* always false */,\n            \"pybind11::init(): init function must return a compatible pointer, \"\n            \"holder, or value\");\n}\n\n// Pointer return v1: the factory function returns a class pointer for a registered class.\n// If we don't need an alias (because this class doesn't have one, or because the final type is\n// inherited on the Python side) we can simply take over ownership.  Otherwise we need to try to\n// construct an Alias from the returned base instance.\ntemplate <typename Class>\nvoid construct(value_and_holder &v_h, Cpp<Class> *ptr, bool need_alias) {\n    no_nullptr(ptr);\n    if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {\n        // We're going to try to construct an alias by moving the cpp type.  Whether or not\n        // that succeeds, we still need to destroy the original cpp pointer (either the\n        // moved away leftover, if the alias construction works, or the value itself if we\n        // throw an error), but we can't just call `delete ptr`: it might have a special\n        // deleter, or might be shared_from_this.  So we construct a holder around it as if\n        // it was a normal instance, then steal the holder away into a local variable; thus\n        // the holder and destruction happens when we leave the C++ scope, and the holder\n        // class gets to handle the destruction however it likes.\n        v_h.value_ptr() = ptr;\n        v_h.set_instance_registered(true); // To prevent init_instance from registering it\n        v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder\n        Holder<Class> temp_holder(std::move(v_h.holder<Holder<Class>>())); // Steal the holder\n        v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null\n        v_h.set_instance_registered(false);\n\n        construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(*ptr));\n    } else {\n        // Otherwise the type isn't inherited, so we don't need an Alias\n        v_h.value_ptr() = ptr;\n    }\n}\n\n// Pointer return v2: a factory that always returns an alias instance ptr.  We simply take over\n// ownership of the pointer.\ntemplate <typename Class, enable_if_t<Class::has_alias, int> = 0>\nvoid construct(value_and_holder &v_h, Alias<Class> *alias_ptr, bool) {\n    no_nullptr(alias_ptr);\n    v_h.value_ptr() = static_cast<Cpp<Class> *>(alias_ptr);\n}\n\n// Holder return: copy its pointer, and move or copy the returned holder into the new instance's\n// holder.  This also handles types like std::shared_ptr<T> and std::unique_ptr<T> where T is a\n// derived type (through those holder's implicit conversion from derived class holder constructors).\ntemplate <typename Class>\nvoid construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {\n    auto *ptr = holder_helper<Holder<Class>>::get(holder);\n    // If we need an alias, check that the held pointer is actually an alias instance\n    if (Class::has_alias && need_alias && !is_alias<Class>(ptr))\n        throw type_error(\"pybind11::init(): construction failed: returned holder-wrapped instance \"\n                         \"is not an alias instance\");\n\n    v_h.value_ptr() = ptr;\n    v_h.type->init_instance(v_h.inst, &holder);\n}\n\n// return-by-value version 1: returning a cpp class by value.  If the class has an alias and an\n// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct\n// the alias from the base when needed (i.e. because of Python-side inheritance).  When we don't\n// need it, we simply move-construct the cpp value into a new instance.\ntemplate <typename Class>\nvoid construct(value_and_holder &v_h, Cpp<Class> &&result, bool need_alias) {\n    static_assert(std::is_move_constructible<Cpp<Class>>::value,\n        \"pybind11::init() return-by-value factory function requires a movable class\");\n    if (Class::has_alias && need_alias)\n        construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(result));\n    else\n        v_h.value_ptr() = new Cpp<Class>(std::move(result));\n}\n\n// return-by-value version 2: returning a value of the alias type itself.  We move-construct an\n// Alias instance (even if no the python-side inheritance is involved).  The is intended for\n// cases where Alias initialization is always desired.\ntemplate <typename Class>\nvoid construct(value_and_holder &v_h, Alias<Class> &&result, bool) {\n    static_assert(std::is_move_constructible<Alias<Class>>::value,\n        \"pybind11::init() return-by-alias-value factory function requires a movable alias class\");\n    v_h.value_ptr() = new Alias<Class>(std::move(result));\n}\n\n// Implementing class for py::init<...>()\ntemplate <typename... Args>\nstruct constructor {\n    template <typename Class, typename... Extra, enable_if_t<!Class::has_alias, int> = 0>\n    static void execute(Class &cl, const Extra&... extra) {\n        cl.def(\"__init__\", [](value_and_holder &v_h, Args... args) {\n            v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);\n        }, is_new_style_constructor(), extra...);\n    }\n\n    template <typename Class, typename... Extra,\n              enable_if_t<Class::has_alias &&\n                          std::is_constructible<Cpp<Class>, Args...>::value, int> = 0>\n    static void execute(Class &cl, const Extra&... extra) {\n        cl.def(\"__init__\", [](value_and_holder &v_h, Args... args) {\n            if (Py_TYPE(v_h.inst) == v_h.type->type)\n                v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);\n            else\n                v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);\n        }, is_new_style_constructor(), extra...);\n    }\n\n    template <typename Class, typename... Extra,\n              enable_if_t<Class::has_alias &&\n                          !std::is_constructible<Cpp<Class>, Args...>::value, int> = 0>\n    static void execute(Class &cl, const Extra&... extra) {\n        cl.def(\"__init__\", [](value_and_holder &v_h, Args... args) {\n            v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);\n        }, is_new_style_constructor(), extra...);\n    }\n};\n\n// Implementing class for py::init_alias<...>()\ntemplate <typename... Args> struct alias_constructor {\n    template <typename Class, typename... Extra,\n              enable_if_t<Class::has_alias && std::is_constructible<Alias<Class>, Args...>::value, int> = 0>\n    static void execute(Class &cl, const Extra&... extra) {\n        cl.def(\"__init__\", [](value_and_holder &v_h, Args... args) {\n            v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);\n        }, is_new_style_constructor(), extra...);\n    }\n};\n\n// Implementation class for py::init(Func) and py::init(Func, AliasFunc)\ntemplate <typename CFunc, typename AFunc = void_type (*)(),\n          typename = function_signature_t<CFunc>, typename = function_signature_t<AFunc>>\nstruct factory;\n\n// Specialization for py::init(Func)\ntemplate <typename Func, typename Return, typename... Args>\nstruct factory<Func, void_type (*)(), Return(Args...)> {\n    remove_reference_t<Func> class_factory;\n\n    factory(Func &&f) : class_factory(std::forward<Func>(f)) { }\n\n    // The given class either has no alias or has no separate alias factory;\n    // this always constructs the class itself.  If the class is registered with an alias\n    // type and an alias instance is needed (i.e. because the final type is a Python class\n    // inheriting from the C++ type) the returned value needs to either already be an alias\n    // instance, or the alias needs to be constructible from a `Class &&` argument.\n    template <typename Class, typename... Extra>\n    void execute(Class &cl, const Extra &...extra) && {\n        #if defined(PYBIND11_CPP14)\n        cl.def(\"__init__\", [func = std::move(class_factory)]\n        #else\n        auto &func = class_factory;\n        cl.def(\"__init__\", [func]\n        #endif\n        (value_and_holder &v_h, Args... args) {\n            construct<Class>(v_h, func(std::forward<Args>(args)...),\n                             Py_TYPE(v_h.inst) != v_h.type->type);\n        }, is_new_style_constructor(), extra...);\n    }\n};\n\n// Specialization for py::init(Func, AliasFunc)\ntemplate <typename CFunc, typename AFunc,\n          typename CReturn, typename... CArgs, typename AReturn, typename... AArgs>\nstruct factory<CFunc, AFunc, CReturn(CArgs...), AReturn(AArgs...)> {\n    static_assert(sizeof...(CArgs) == sizeof...(AArgs),\n                  \"pybind11::init(class_factory, alias_factory): class and alias factories \"\n                  \"must have identical argument signatures\");\n    static_assert(all_of<std::is_same<CArgs, AArgs>...>::value,\n                  \"pybind11::init(class_factory, alias_factory): class and alias factories \"\n                  \"must have identical argument signatures\");\n\n    remove_reference_t<CFunc> class_factory;\n    remove_reference_t<AFunc> alias_factory;\n\n    factory(CFunc &&c, AFunc &&a)\n        : class_factory(std::forward<CFunc>(c)), alias_factory(std::forward<AFunc>(a)) { }\n\n    // The class factory is called when the `self` type passed to `__init__` is the direct\n    // class (i.e. not inherited), the alias factory when `self` is a Python-side subtype.\n    template <typename Class, typename... Extra>\n    void execute(Class &cl, const Extra&... extra) && {\n        static_assert(Class::has_alias, \"The two-argument version of `py::init()` can \"\n                                        \"only be used if the class has an alias\");\n        #if defined(PYBIND11_CPP14)\n        cl.def(\"__init__\", [class_func = std::move(class_factory), alias_func = std::move(alias_factory)]\n        #else\n        auto &class_func = class_factory;\n        auto &alias_func = alias_factory;\n        cl.def(\"__init__\", [class_func, alias_func]\n        #endif\n        (value_and_holder &v_h, CArgs... args) {\n            if (Py_TYPE(v_h.inst) == v_h.type->type)\n                // If the instance type equals the registered type we don't have inheritance, so\n                // don't need the alias and can construct using the class function:\n                construct<Class>(v_h, class_func(std::forward<CArgs>(args)...), false);\n            else\n                construct<Class>(v_h, alias_func(std::forward<CArgs>(args)...), true);\n        }, is_new_style_constructor(), extra...);\n    }\n};\n\n/// Set just the C++ state. Same as `__init__`.\ntemplate <typename Class, typename T>\nvoid setstate(value_and_holder &v_h, T &&result, bool need_alias) {\n    construct<Class>(v_h, std::forward<T>(result), need_alias);\n}\n\n/// Set both the C++ and Python states\ntemplate <typename Class, typename T, typename O,\n          enable_if_t<std::is_convertible<O, handle>::value, int> = 0>\nvoid setstate(value_and_holder &v_h, std::pair<T, O> &&result, bool need_alias) {\n    construct<Class>(v_h, std::move(result.first), need_alias);\n    setattr((PyObject *) v_h.inst, \"__dict__\", result.second);\n}\n\n/// Implementation for py::pickle(GetState, SetState)\ntemplate <typename Get, typename Set,\n          typename = function_signature_t<Get>, typename = function_signature_t<Set>>\nstruct pickle_factory;\n\ntemplate <typename Get, typename Set,\n          typename RetState, typename Self, typename NewInstance, typename ArgState>\nstruct pickle_factory<Get, Set, RetState(Self), NewInstance(ArgState)> {\n    static_assert(std::is_same<intrinsic_t<RetState>, intrinsic_t<ArgState>>::value,\n                  \"The type returned by `__getstate__` must be the same \"\n                  \"as the argument accepted by `__setstate__`\");\n\n    remove_reference_t<Get> get;\n    remove_reference_t<Set> set;\n\n    pickle_factory(Get get, Set set)\n        : get(std::forward<Get>(get)), set(std::forward<Set>(set)) { }\n\n    template <typename Class, typename... Extra>\n    void execute(Class &cl, const Extra &...extra) && {\n        cl.def(\"__getstate__\", std::move(get));\n\n#if defined(PYBIND11_CPP14)\n        cl.def(\"__setstate__\", [func = std::move(set)]\n#else\n        auto &func = set;\n        cl.def(\"__setstate__\", [func]\n#endif\n        (value_and_holder &v_h, ArgState state) {\n            setstate<Class>(v_h, func(std::forward<ArgState>(state)),\n                            Py_TYPE(v_h.inst) != v_h.type->type);\n        }, is_new_style_constructor(), extra...);\n    }\n};\n\nNAMESPACE_END(initimpl)\nNAMESPACE_END(detail)\nNAMESPACE_END(pybind11)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/internals.h",
    "content": "/*\n    pybind11/detail/internals.h: Internal data structure and related functions\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"../pytypes.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n// Forward declarations\ninline PyTypeObject *make_static_property_type();\ninline PyTypeObject *make_default_metaclass();\ninline PyObject *make_object_base_type(PyTypeObject *metaclass);\n\n// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new\n// Thread Specific Storage (TSS) API.\n#if PY_VERSION_HEX >= 0x03070000\n#    define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr\n#    define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))\n#    define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (tstate))\n#    define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)\n#else\n    // Usually an int but a long on Cygwin64 with Python 3.x\n#    define PYBIND11_TLS_KEY_INIT(var) decltype(PyThread_create_key()) var = 0\n#    define PYBIND11_TLS_GET_VALUE(key) PyThread_get_key_value((key))\n#    if PY_MAJOR_VERSION < 3\n#        define PYBIND11_TLS_DELETE_VALUE(key)                               \\\n             PyThread_delete_key_value(key)\n#        define PYBIND11_TLS_REPLACE_VALUE(key, value)                       \\\n             do {                                                            \\\n                 PyThread_delete_key_value((key));                           \\\n                 PyThread_set_key_value((key), (value));                     \\\n             } while (false)\n#    else\n#        define PYBIND11_TLS_DELETE_VALUE(key)                               \\\n             PyThread_set_key_value((key), nullptr)\n#        define PYBIND11_TLS_REPLACE_VALUE(key, value)                       \\\n             PyThread_set_key_value((key), (value))\n#    endif\n#endif\n\n// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly\n// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module\n// even when `A` is the same, non-hidden-visibility type (e.g. from a common include).  Under\n// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name,\n// which works.  If not under a known-good stl, provide our own name-based hash and equality\n// functions that use the type name.\n#if defined(__GLIBCXX__)\ninline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; }\nusing type_hash = std::hash<std::type_index>;\nusing type_equal_to = std::equal_to<std::type_index>;\n#else\ninline bool same_type(const std::type_info &lhs, const std::type_info &rhs) {\n    return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;\n}\n\nstruct type_hash {\n    size_t operator()(const std::type_index &t) const {\n        size_t hash = 5381;\n        const char *ptr = t.name();\n        while (auto c = static_cast<unsigned char>(*ptr++))\n            hash = (hash * 33) ^ c;\n        return hash;\n    }\n};\n\nstruct type_equal_to {\n    bool operator()(const std::type_index &lhs, const std::type_index &rhs) const {\n        return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;\n    }\n};\n#endif\n\ntemplate <typename value_type>\nusing type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;\n\nstruct overload_hash {\n    inline size_t operator()(const std::pair<const PyObject *, const char *>& v) const {\n        size_t value = std::hash<const void *>()(v.first);\n        value ^= std::hash<const void *>()(v.second)  + 0x9e3779b9 + (value<<6) + (value>>2);\n        return value;\n    }\n};\n\n/// Internal data structure used to track registered instances and types.\n/// Whenever binary incompatible changes are made to this structure,\n/// `PYBIND11_INTERNALS_VERSION` must be incremented.\nstruct internals {\n    type_map<type_info *> registered_types_cpp; // std::type_index -> pybind11's type information\n    std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py; // PyTypeObject* -> base type_info(s)\n    std::unordered_multimap<const void *, instance*> registered_instances; // void * -> instance*\n    std::unordered_set<std::pair<const PyObject *, const char *>, overload_hash> inactive_overload_cache;\n    type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;\n    std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;\n    std::forward_list<void (*) (std::exception_ptr)> registered_exception_translators;\n    std::unordered_map<std::string, void *> shared_data; // Custom data to be shared across extensions\n    std::vector<PyObject *> loader_patient_stack; // Used by `loader_life_support`\n    std::forward_list<std::string> static_strings; // Stores the std::strings backing detail::c_str()\n    PyTypeObject *static_property_type;\n    PyTypeObject *default_metaclass;\n    PyObject *instance_base;\n#if defined(WITH_THREAD)\n    PYBIND11_TLS_KEY_INIT(tstate);\n    PyInterpreterState *istate = nullptr;\n#endif\n};\n\n/// Additional type information which does not fit into the PyTypeObject.\n/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`.\nstruct type_info {\n    PyTypeObject *type;\n    const std::type_info *cpptype;\n    size_t type_size, type_align, holder_size_in_ptrs;\n    void *(*operator_new)(size_t);\n    void (*init_instance)(instance *, const void *);\n    void (*dealloc)(value_and_holder &v_h);\n    std::vector<PyObject *(*)(PyObject *, PyTypeObject *)> implicit_conversions;\n    std::vector<std::pair<const std::type_info *, void *(*)(void *)>> implicit_casts;\n    std::vector<bool (*)(PyObject *, void *&)> *direct_conversions;\n    buffer_info *(*get_buffer)(PyObject *, void *) = nullptr;\n    void *get_buffer_data = nullptr;\n    void *(*module_local_load)(PyObject *, const type_info *) = nullptr;\n    /* A simple type never occurs as a (direct or indirect) parent\n     * of a class that makes use of multiple inheritance */\n    bool simple_type : 1;\n    /* True if there is no multiple inheritance in this type's inheritance tree */\n    bool simple_ancestors : 1;\n    /* for base vs derived holder_type checks */\n    bool default_holder : 1;\n    /* true if this is a type registered with py::module_local */\n    bool module_local : 1;\n};\n\n/// Tracks the `internals` and `type_info` ABI version independent of the main library version\n#define PYBIND11_INTERNALS_VERSION 3\n\n#if defined(_DEBUG)\n#   define PYBIND11_BUILD_TYPE \"_debug\"\n#else\n#   define PYBIND11_BUILD_TYPE \"\"\n#endif\n\n#if defined(WITH_THREAD)\n#  define PYBIND11_INTERNALS_KIND \"\"\n#else\n#  define PYBIND11_INTERNALS_KIND \"_without_thread\"\n#endif\n\n#define PYBIND11_INTERNALS_ID \"__pybind11_internals_v\" \\\n    PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_BUILD_TYPE \"__\"\n\n#define PYBIND11_MODULE_LOCAL_ID \"__pybind11_module_local_v\" \\\n    PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_BUILD_TYPE \"__\"\n\n/// Each module locally stores a pointer to the `internals` data. The data\n/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`.\ninline internals **&get_internals_pp() {\n    static internals **internals_pp = nullptr;\n    return internals_pp;\n}\n\n/// Return a reference to the current `internals` data\nPYBIND11_NOINLINE inline internals &get_internals() {\n    auto **&internals_pp = get_internals_pp();\n    if (internals_pp && *internals_pp)\n        return **internals_pp;\n\n    constexpr auto *id = PYBIND11_INTERNALS_ID;\n    auto builtins = handle(PyEval_GetBuiltins());\n    if (builtins.contains(id) && isinstance<capsule>(builtins[id])) {\n        internals_pp = static_cast<internals **>(capsule(builtins[id]));\n\n        // We loaded builtins through python's builtins, which means that our `error_already_set`\n        // and `builtin_exception` may be different local classes than the ones set up in the\n        // initial exception translator, below, so add another for our local exception classes.\n        //\n        // libstdc++ doesn't require this (types there are identified only by name)\n#if !defined(__GLIBCXX__)\n        (*internals_pp)->registered_exception_translators.push_front(\n            [](std::exception_ptr p) -> void {\n                try {\n                    if (p) std::rethrow_exception(p);\n                } catch (error_already_set &e)       { e.restore();   return;\n                } catch (const builtin_exception &e) { e.set_error(); return;\n                }\n            }\n        );\n#endif\n    } else {\n        if (!internals_pp) internals_pp = new internals*();\n        auto *&internals_ptr = *internals_pp;\n        internals_ptr = new internals();\n#if defined(WITH_THREAD)\n        PyEval_InitThreads();\n        PyThreadState *tstate = PyThreadState_Get();\n        #if PY_VERSION_HEX >= 0x03070000\n            internals_ptr->tstate = PyThread_tss_alloc();\n            if (!internals_ptr->tstate || PyThread_tss_create(internals_ptr->tstate))\n                pybind11_fail(\"get_internals: could not successfully initialize the TSS key!\");\n            PyThread_tss_set(internals_ptr->tstate, tstate);\n        #else\n            internals_ptr->tstate = PyThread_create_key();\n            if (internals_ptr->tstate == -1)\n                pybind11_fail(\"get_internals: could not successfully initialize the TLS key!\");\n            PyThread_set_key_value(internals_ptr->tstate, tstate);\n        #endif\n        internals_ptr->istate = tstate->interp;\n#endif\n        builtins[id] = capsule(internals_pp);\n        internals_ptr->registered_exception_translators.push_front(\n            [](std::exception_ptr p) -> void {\n                try {\n                    if (p) std::rethrow_exception(p);\n                } catch (error_already_set &e)           { e.restore();                                    return;\n                } catch (const builtin_exception &e)     { e.set_error();                                  return;\n                } catch (const std::bad_alloc &e)        { PyErr_SetString(PyExc_MemoryError,   e.what()); return;\n                } catch (const std::domain_error &e)     { PyErr_SetString(PyExc_ValueError,    e.what()); return;\n                } catch (const std::invalid_argument &e) { PyErr_SetString(PyExc_ValueError,    e.what()); return;\n                } catch (const std::length_error &e)     { PyErr_SetString(PyExc_ValueError,    e.what()); return;\n                } catch (const std::out_of_range &e)     { PyErr_SetString(PyExc_IndexError,    e.what()); return;\n                } catch (const std::range_error &e)      { PyErr_SetString(PyExc_ValueError,    e.what()); return;\n                } catch (const std::exception &e)        { PyErr_SetString(PyExc_RuntimeError,  e.what()); return;\n                } catch (...) {\n                    PyErr_SetString(PyExc_RuntimeError, \"Caught an unknown exception!\");\n                    return;\n                }\n            }\n        );\n        internals_ptr->static_property_type = make_static_property_type();\n        internals_ptr->default_metaclass = make_default_metaclass();\n        internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass);\n    }\n    return **internals_pp;\n}\n\n/// Works like `internals.registered_types_cpp`, but for module-local registered types:\ninline type_map<type_info *> &registered_local_types_cpp() {\n    static type_map<type_info *> locals{};\n    return locals;\n}\n\n/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its\n/// `c_str()`.  Such strings objects have a long storage duration -- the internal strings are only\n/// cleared when the program exits or after interpreter shutdown (when embedding), and so are\n/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name).\ntemplate <typename... Args>\nconst char *c_str(Args &&...args) {\n    auto &strings = get_internals().static_strings;\n    strings.emplace_front(std::forward<Args>(args)...);\n    return strings.front().c_str();\n}\n\nNAMESPACE_END(detail)\n\n/// Returns a named pointer that is shared among all extension modules (using the same\n/// pybind11 version) running in the current interpreter. Names starting with underscores\n/// are reserved for internal usage. Returns `nullptr` if no matching entry was found.\ninline PYBIND11_NOINLINE void *get_shared_data(const std::string &name) {\n    auto &internals = detail::get_internals();\n    auto it = internals.shared_data.find(name);\n    return it != internals.shared_data.end() ? it->second : nullptr;\n}\n\n/// Set the shared data that can be later recovered by `get_shared_data()`.\ninline PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) {\n    detail::get_internals().shared_data[name] = data;\n    return data;\n}\n\n/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if\n/// such entry exists. Otherwise, a new object of default-constructible type `T` is\n/// added to the shared data under the given name and a reference to it is returned.\ntemplate<typename T>\nT &get_or_create_shared_data(const std::string &name) {\n    auto &internals = detail::get_internals();\n    auto it = internals.shared_data.find(name);\n    T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr);\n    if (!ptr) {\n        ptr = new T();\n        internals.shared_data[name] = ptr;\n    }\n    return *ptr;\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/detail/typeid.h",
    "content": "/*\n    pybind11/detail/typeid.h: Compiler-independent access to type identifiers\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include <cstdio>\n#include <cstdlib>\n\n#if defined(__GNUG__)\n#include <cxxabi.h>\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n/// Erase all occurrences of a substring\ninline void erase_all(std::string &string, const std::string &search) {\n    for (size_t pos = 0;;) {\n        pos = string.find(search, pos);\n        if (pos == std::string::npos) break;\n        string.erase(pos, search.length());\n    }\n}\n\nPYBIND11_NOINLINE inline void clean_type_id(std::string &name) {\n#if defined(__GNUG__)\n    int status = 0;\n    std::unique_ptr<char, void (*)(void *)> res {\n        abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free };\n    if (status == 0)\n        name = res.get();\n#else\n    detail::erase_all(name, \"class \");\n    detail::erase_all(name, \"struct \");\n    detail::erase_all(name, \"enum \");\n#endif\n    detail::erase_all(name, \"pybind11::\");\n}\nNAMESPACE_END(detail)\n\n/// Return a string representation of a C++ type\ntemplate <typename T> static std::string type_id() {\n    std::string name(typeid(T).name());\n    detail::clean_type_id(name);\n    return name;\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/eigen.h",
    "content": "/*\n    pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"numpy.h\"\n\n#if defined(__INTEL_COMPILER)\n#  pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)\n#elif defined(__GNUG__) || defined(__clang__)\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wconversion\"\n#  pragma GCC diagnostic ignored \"-Wdeprecated-declarations\"\n#  ifdef __clang__\n//   Eigen generates a bunch of implicit-copy-constructor-is-deprecated warnings with -Wdeprecated\n//   under Clang, so disable that warning here:\n#    pragma GCC diagnostic ignored \"-Wdeprecated\"\n#  endif\n#  if __GNUC__ >= 7\n#    pragma GCC diagnostic ignored \"-Wint-in-bool-context\"\n#  endif\n#endif\n\n#if defined(_MSC_VER)\n#  pragma warning(push)\n#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#  pragma warning(disable: 4996) // warning C4996: std::unary_negate is deprecated in C++17\n#endif\n\n#include <Eigen/Core>\n#include <Eigen/SparseCore>\n\n// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit\n// move constructors that break things.  We could detect this an explicitly copy, but an extra copy\n// of matrices seems highly undesirable.\nstatic_assert(EIGEN_VERSION_AT_LEAST(3,2,7), \"Eigen support in pybind11 requires Eigen >= 3.2.7\");\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\n// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:\nusing EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;\ntemplate <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;\ntemplate <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;\n\nNAMESPACE_BEGIN(detail)\n\n#if EIGEN_VERSION_AT_LEAST(3,3,0)\nusing EigenIndex = Eigen::Index;\n#else\nusing EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;\n#endif\n\n// Matches Eigen::Map, Eigen::Ref, blocks, etc:\ntemplate <typename T> using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>, std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;\ntemplate <typename T> using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;\ntemplate <typename T> using is_eigen_dense_plain = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;\ntemplate <typename T> using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;\n// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above.  This\n// basically covers anything that can be assigned to a dense matrix but that don't have a typical\n// matrix data layout that can be copied from their .data().  For example, DiagonalMatrix and\n// SelfAdjointView fall into this category.\ntemplate <typename T> using is_eigen_other = all_of<\n    is_template_base_of<Eigen::EigenBase, T>,\n    negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>\n>;\n\n// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):\ntemplate <bool EigenRowMajor> struct EigenConformable {\n    bool conformable = false;\n    EigenIndex rows = 0, cols = 0;\n    EigenDStride stride{0, 0};      // Only valid if negativestrides is false!\n    bool negativestrides = false;   // If true, do not use stride!\n\n    EigenConformable(bool fits = false) : conformable{fits} {}\n    // Matrix type:\n    EigenConformable(EigenIndex r, EigenIndex c,\n            EigenIndex rstride, EigenIndex cstride) :\n        conformable{true}, rows{r}, cols{c} {\n        // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747\n        if (rstride < 0 || cstride < 0) {\n            negativestrides = true;\n        } else {\n            stride = {EigenRowMajor ? rstride : cstride /* outer stride */,\n                      EigenRowMajor ? cstride : rstride /* inner stride */ };\n        }\n    }\n    // Vector type:\n    EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)\n        : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {}\n\n    template <typename props> bool stride_compatible() const {\n        // To have compatible strides, we need (on both dimensions) one of fully dynamic strides,\n        // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant)\n        return\n            !negativestrides &&\n            (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() ||\n                (EigenRowMajor ? cols : rows) == 1) &&\n            (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() ||\n                (EigenRowMajor ? rows : cols) == 1);\n    }\n    operator bool() const { return conformable; }\n};\n\ntemplate <typename Type> struct eigen_extract_stride { using type = Type; };\ntemplate <typename PlainObjectType, int MapOptions, typename StrideType>\nstruct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> { using type = StrideType; };\ntemplate <typename PlainObjectType, int Options, typename StrideType>\nstruct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> { using type = StrideType; };\n\n// Helper struct for extracting information from an Eigen type\ntemplate <typename Type_> struct EigenProps {\n    using Type = Type_;\n    using Scalar = typename Type::Scalar;\n    using StrideType = typename eigen_extract_stride<Type>::type;\n    static constexpr EigenIndex\n        rows = Type::RowsAtCompileTime,\n        cols = Type::ColsAtCompileTime,\n        size = Type::SizeAtCompileTime;\n    static constexpr bool\n        row_major = Type::IsRowMajor,\n        vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1\n        fixed_rows = rows != Eigen::Dynamic,\n        fixed_cols = cols != Eigen::Dynamic,\n        fixed = size != Eigen::Dynamic, // Fully-fixed size\n        dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size\n\n    template <EigenIndex i, EigenIndex ifzero> using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;\n    static constexpr EigenIndex inner_stride = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,\n                                outer_stride = if_zero<StrideType::OuterStrideAtCompileTime,\n                                                       vector ? size : row_major ? cols : rows>::value;\n    static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;\n    static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;\n    static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;\n\n    // Takes an input array and determines whether we can make it fit into the Eigen type.  If\n    // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector\n    // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).\n    static EigenConformable<row_major> conformable(const array &a) {\n        const auto dims = a.ndim();\n        if (dims < 1 || dims > 2)\n            return false;\n\n        if (dims == 2) { // Matrix type: require exact match (or dynamic)\n\n            EigenIndex\n                np_rows = a.shape(0),\n                np_cols = a.shape(1),\n                np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),\n                np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));\n            if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))\n                return false;\n\n            return {np_rows, np_cols, np_rstride, np_cstride};\n        }\n\n        // Otherwise we're storing an n-vector.  Only one of the strides will be used, but whichever\n        // is used, we want the (single) numpy stride value.\n        const EigenIndex n = a.shape(0),\n              stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));\n\n        if (vector) { // Eigen type is a compile-time vector\n            if (fixed && size != n)\n                return false; // Vector size mismatch\n            return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};\n        }\n        else if (fixed) {\n            // The type has a fixed size, but is not a vector: abort\n            return false;\n        }\n        else if (fixed_cols) {\n            // Since this isn't a vector, cols must be != 1.  We allow this only if it exactly\n            // equals the number of elements (rows is Dynamic, and so 1 row is allowed).\n            if (cols != n) return false;\n            return {1, n, stride};\n        }\n        else {\n            // Otherwise it's either fully dynamic, or column dynamic; both become a column vector\n            if (fixed_rows && rows != n) return false;\n            return {n, 1, stride};\n        }\n    }\n\n    static constexpr bool show_writeable = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;\n    static constexpr bool show_order = is_eigen_dense_map<Type>::value;\n    static constexpr bool show_c_contiguous = show_order && requires_row_major;\n    static constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major;\n\n    static constexpr auto descriptor =\n        _(\"numpy.ndarray[\") + npy_format_descriptor<Scalar>::name +\n        _(\"[\")  + _<fixed_rows>(_<(size_t) rows>(), _(\"m\")) +\n        _(\", \") + _<fixed_cols>(_<(size_t) cols>(), _(\"n\")) +\n        _(\"]\") +\n        // For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to be\n        // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride\n        // options, possibly f_contiguous or c_contiguous.  We include them in the descriptor output\n        // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to\n        // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you\n        // *gave* a numpy.ndarray of the right type and dimensions.\n        _<show_writeable>(\", flags.writeable\", \"\") +\n        _<show_c_contiguous>(\", flags.c_contiguous\", \"\") +\n        _<show_f_contiguous>(\", flags.f_contiguous\", \"\") +\n        _(\"]\");\n};\n\n// Casts an Eigen type to numpy array.  If given a base, the numpy array references the src data,\n// otherwise it'll make a copy.  writeable lets you turn off the writeable flag for the array.\ntemplate <typename props> handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {\n    constexpr ssize_t elem_size = sizeof(typename props::Scalar);\n    array a;\n    if (props::vector)\n        a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base);\n    else\n        a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() },\n                  src.data(), base);\n\n    if (!writeable)\n        array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;\n\n    return a.release();\n}\n\n// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that\n// reference the Eigen object's data with `base` as the python-registered base class (if omitted,\n// the base will be set to None, and lifetime management is up to the caller).  The numpy array is\n// non-writeable if the given type is const.\ntemplate <typename props, typename Type>\nhandle eigen_ref_array(Type &src, handle parent = none()) {\n    // none here is to get past array's should-we-copy detection, which currently always\n    // copies when there is no base.  Setting the base to None should be harmless.\n    return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);\n}\n\n// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy\n// array that references the encapsulated data with a python-side reference to the capsule to tie\n// its destruction to that of any dependent python objects.  Const-ness is determined by whether or\n// not the Type of the pointer given is const.\ntemplate <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>\nhandle eigen_encapsulate(Type *src) {\n    capsule base(src, [](void *o) { delete static_cast<Type *>(o); });\n    return eigen_ref_array<props>(*src, base);\n}\n\n// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense\n// types.\ntemplate<typename Type>\nstruct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {\n    using Scalar = typename Type::Scalar;\n    using props = EigenProps<Type>;\n\n    bool load(handle src, bool convert) {\n        // If we're in no-convert mode, only load if given an array of the correct type\n        if (!convert && !isinstance<array_t<Scalar>>(src))\n            return false;\n\n        // Coerce into an array, but don't do type conversion yet; the copy below handles it.\n        auto buf = array::ensure(src);\n\n        if (!buf)\n            return false;\n\n        auto dims = buf.ndim();\n        if (dims < 1 || dims > 2)\n            return false;\n\n        auto fits = props::conformable(buf);\n        if (!fits)\n            return false;\n\n        // Allocate the new type, then build a numpy reference into it\n        value = Type(fits.rows, fits.cols);\n        auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));\n        if (dims == 1) ref = ref.squeeze();\n        else if (ref.ndim() == 1) buf = buf.squeeze();\n\n        int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());\n\n        if (result < 0) { // Copy failed!\n            PyErr_Clear();\n            return false;\n        }\n\n        return true;\n    }\n\nprivate:\n\n    // Cast implementation\n    template <typename CType>\n    static handle cast_impl(CType *src, return_value_policy policy, handle parent) {\n        switch (policy) {\n            case return_value_policy::take_ownership:\n            case return_value_policy::automatic:\n                return eigen_encapsulate<props>(src);\n            case return_value_policy::move:\n                return eigen_encapsulate<props>(new CType(std::move(*src)));\n            case return_value_policy::copy:\n                return eigen_array_cast<props>(*src);\n            case return_value_policy::reference:\n            case return_value_policy::automatic_reference:\n                return eigen_ref_array<props>(*src);\n            case return_value_policy::reference_internal:\n                return eigen_ref_array<props>(*src, parent);\n            default:\n                throw cast_error(\"unhandled return_value_policy: should not happen!\");\n        };\n    }\n\npublic:\n\n    // Normal returned non-reference, non-const value:\n    static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {\n        return cast_impl(&src, return_value_policy::move, parent);\n    }\n    // If you return a non-reference const, we mark the numpy array readonly:\n    static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {\n        return cast_impl(&src, return_value_policy::move, parent);\n    }\n    // lvalue reference return; default (automatic) becomes copy\n    static handle cast(Type &src, return_value_policy policy, handle parent) {\n        if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)\n            policy = return_value_policy::copy;\n        return cast_impl(&src, policy, parent);\n    }\n    // const lvalue reference return; default (automatic) becomes copy\n    static handle cast(const Type &src, return_value_policy policy, handle parent) {\n        if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)\n            policy = return_value_policy::copy;\n        return cast(&src, policy, parent);\n    }\n    // non-const pointer return\n    static handle cast(Type *src, return_value_policy policy, handle parent) {\n        return cast_impl(src, policy, parent);\n    }\n    // const pointer return\n    static handle cast(const Type *src, return_value_policy policy, handle parent) {\n        return cast_impl(src, policy, parent);\n    }\n\n    static constexpr auto name = props::descriptor;\n\n    operator Type*() { return &value; }\n    operator Type&() { return value; }\n    operator Type&&() && { return std::move(value); }\n    template <typename T> using cast_op_type = movable_cast_op_type<T>;\n\nprivate:\n    Type value;\n};\n\n// Base class for casting reference/map/block/etc. objects back to python.\ntemplate <typename MapType> struct eigen_map_caster {\nprivate:\n    using props = EigenProps<MapType>;\n\npublic:\n\n    // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has\n    // to stay around), but we'll allow it under the assumption that you know what you're doing (and\n    // have an appropriate keep_alive in place).  We return a numpy array pointing directly at the\n    // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note\n    // that this means you need to ensure you don't destroy the object in some other way (e.g. with\n    // an appropriate keep_alive, or with a reference to a statically allocated matrix).\n    static handle cast(const MapType &src, return_value_policy policy, handle parent) {\n        switch (policy) {\n            case return_value_policy::copy:\n                return eigen_array_cast<props>(src);\n            case return_value_policy::reference_internal:\n                return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);\n            case return_value_policy::reference:\n            case return_value_policy::automatic:\n            case return_value_policy::automatic_reference:\n                return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);\n            default:\n                // move, take_ownership don't make any sense for a ref/map:\n                pybind11_fail(\"Invalid return_value_policy for Eigen Map/Ref/Block type\");\n        }\n    }\n\n    static constexpr auto name = props::descriptor;\n\n    // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return\n    // types but not bound arguments).  We still provide them (with an explicitly delete) so that\n    // you end up here if you try anyway.\n    bool load(handle, bool) = delete;\n    operator MapType() = delete;\n    template <typename> using cast_op_type = MapType;\n};\n\n// We can return any map-like object (but can only load Refs, specialized next):\ntemplate <typename Type> struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>>\n    : eigen_map_caster<Type> {};\n\n// Loader for Ref<...> arguments.  See the documentation for info on how to make this work without\n// copying (it requires some extra effort in many cases).\ntemplate <typename PlainObjectType, typename StrideType>\nstruct type_caster<\n    Eigen::Ref<PlainObjectType, 0, StrideType>,\n    enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>\n> : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {\nprivate:\n    using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;\n    using props = EigenProps<Type>;\n    using Scalar = typename props::Scalar;\n    using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;\n    using Array = array_t<Scalar, array::forcecast |\n                ((props::row_major ? props::inner_stride : props::outer_stride) == 1 ? array::c_style :\n                 (props::row_major ? props::outer_stride : props::inner_stride) == 1 ? array::f_style : 0)>;\n    static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;\n    // Delay construction (these have no default constructor)\n    std::unique_ptr<MapType> map;\n    std::unique_ptr<Type> ref;\n    // Our array.  When possible, this is just a numpy array pointing to the source data, but\n    // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible\n    // layout, or is an array of a type that needs to be converted).  Using a numpy temporary\n    // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and\n    // storage order conversion.  (Note that we refuse to use this temporary copy when loading an\n    // argument for a Ref<M> with M non-const, i.e. a read-write reference).\n    Array copy_or_ref;\npublic:\n    bool load(handle src, bool convert) {\n        // First check whether what we have is already an array of the right type.  If not, we can't\n        // avoid a copy (because the copy is also going to do type conversion).\n        bool need_copy = !isinstance<Array>(src);\n\n        EigenConformable<props::row_major> fits;\n        if (!need_copy) {\n            // We don't need a converting copy, but we also need to check whether the strides are\n            // compatible with the Ref's stride requirements\n            Array aref = reinterpret_borrow<Array>(src);\n\n            if (aref && (!need_writeable || aref.writeable())) {\n                fits = props::conformable(aref);\n                if (!fits) return false; // Incompatible dimensions\n                if (!fits.template stride_compatible<props>())\n                    need_copy = true;\n                else\n                    copy_or_ref = std::move(aref);\n            }\n            else {\n                need_copy = true;\n            }\n        }\n\n        if (need_copy) {\n            // We need to copy: If we need a mutable reference, or we're not supposed to convert\n            // (either because we're in the no-convert overload pass, or because we're explicitly\n            // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.\n            if (!convert || need_writeable) return false;\n\n            Array copy = Array::ensure(src);\n            if (!copy) return false;\n            fits = props::conformable(copy);\n            if (!fits || !fits.template stride_compatible<props>())\n                return false;\n            copy_or_ref = std::move(copy);\n            loader_life_support::add_patient(copy_or_ref);\n        }\n\n        ref.reset();\n        map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner())));\n        ref.reset(new Type(*map));\n\n        return true;\n    }\n\n    operator Type*() { return ref.get(); }\n    operator Type&() { return *ref; }\n    template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;\n\nprivate:\n    template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>\n    Scalar *data(Array &a) { return a.mutable_data(); }\n\n    template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>\n    const Scalar *data(Array &a) { return a.data(); }\n\n    // Attempt to figure out a constructor of `Stride` that will work.\n    // If both strides are fixed, use a default constructor:\n    template <typename S> using stride_ctor_default = bool_constant<\n        S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&\n        std::is_default_constructible<S>::value>;\n    // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like\n    // Eigen::Stride, and use it:\n    template <typename S> using stride_ctor_dual = bool_constant<\n        !stride_ctor_default<S>::value && std::is_constructible<S, EigenIndex, EigenIndex>::value>;\n    // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use\n    // it (passing whichever stride is dynamic).\n    template <typename S> using stride_ctor_outer = bool_constant<\n        !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&\n        S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic &&\n        std::is_constructible<S, EigenIndex>::value>;\n    template <typename S> using stride_ctor_inner = bool_constant<\n        !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&\n        S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&\n        std::is_constructible<S, EigenIndex>::value>;\n\n    template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>\n    static S make_stride(EigenIndex, EigenIndex) { return S(); }\n    template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>\n    static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); }\n    template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>\n    static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); }\n    template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>\n    static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); }\n\n};\n\n// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not\n// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).\n// load() is not supported, but we can cast them into the python domain by first copying to a\n// regular Eigen::Matrix, then casting that.\ntemplate <typename Type>\nstruct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {\nprotected:\n    using Matrix = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;\n    using props = EigenProps<Matrix>;\npublic:\n    static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {\n        handle h = eigen_encapsulate<props>(new Matrix(src));\n        return h;\n    }\n    static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); }\n\n    static constexpr auto name = props::descriptor;\n\n    // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return\n    // types but not bound arguments).  We still provide them (with an explicitly delete) so that\n    // you end up here if you try anyway.\n    bool load(handle, bool) = delete;\n    operator Type() = delete;\n    template <typename> using cast_op_type = Type;\n};\n\ntemplate<typename Type>\nstruct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {\n    typedef typename Type::Scalar Scalar;\n    typedef remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())> StorageIndex;\n    typedef typename Type::Index Index;\n    static constexpr bool rowMajor = Type::IsRowMajor;\n\n    bool load(handle src, bool) {\n        if (!src)\n            return false;\n\n        auto obj = reinterpret_borrow<object>(src);\n        object sparse_module = module::import(\"scipy.sparse\");\n        object matrix_type = sparse_module.attr(\n            rowMajor ? \"csr_matrix\" : \"csc_matrix\");\n\n        if (!obj.get_type().is(matrix_type)) {\n            try {\n                obj = matrix_type(obj);\n            } catch (const error_already_set &) {\n                return false;\n            }\n        }\n\n        auto values = array_t<Scalar>((object) obj.attr(\"data\"));\n        auto innerIndices = array_t<StorageIndex>((object) obj.attr(\"indices\"));\n        auto outerIndices = array_t<StorageIndex>((object) obj.attr(\"indptr\"));\n        auto shape = pybind11::tuple((pybind11::object) obj.attr(\"shape\"));\n        auto nnz = obj.attr(\"nnz\").cast<Index>();\n\n        if (!values || !innerIndices || !outerIndices)\n            return false;\n\n        value = Eigen::MappedSparseMatrix<Scalar, Type::Flags, StorageIndex>(\n            shape[0].cast<Index>(), shape[1].cast<Index>(), nnz,\n            outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data());\n\n        return true;\n    }\n\n    static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {\n        const_cast<Type&>(src).makeCompressed();\n\n        object matrix_type = module::import(\"scipy.sparse\").attr(\n            rowMajor ? \"csr_matrix\" : \"csc_matrix\");\n\n        array data(src.nonZeros(), src.valuePtr());\n        array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());\n        array innerIndices(src.nonZeros(), src.innerIndexPtr());\n\n        return matrix_type(\n            std::make_tuple(data, innerIndices, outerIndices),\n            std::make_pair(src.rows(), src.cols())\n        ).release();\n    }\n\n    PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>(\"scipy.sparse.csr_matrix[\", \"scipy.sparse.csc_matrix[\")\n            + npy_format_descriptor<Scalar>::name + _(\"]\"));\n};\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n\n#if defined(__GNUG__) || defined(__clang__)\n#  pragma GCC diagnostic pop\n#elif defined(_MSC_VER)\n#  pragma warning(pop)\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/embed.h",
    "content": "/*\n    pybind11/embed.h: Support for embedding the interpreter\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include \"eval.h\"\n\n#if defined(PYPY_VERSION)\n#  error Embedding the interpreter is not supported with PyPy\n#endif\n\n#if PY_MAJOR_VERSION >= 3\n#  define PYBIND11_EMBEDDED_MODULE_IMPL(name)            \\\n      extern \"C\" PyObject *pybind11_init_impl_##name() { \\\n          return pybind11_init_wrapper_##name();         \\\n      }\n#else\n#  define PYBIND11_EMBEDDED_MODULE_IMPL(name)            \\\n      extern \"C\" void pybind11_init_impl_##name() {      \\\n          pybind11_init_wrapper_##name();                \\\n      }\n#endif\n\n/** \\rst\n    Add a new module to the table of builtins for the interpreter. Must be\n    defined in global scope. The first macro parameter is the name of the\n    module (without quotes). The second parameter is the variable which will\n    be used as the interface to add functions and classes to the module.\n\n    .. code-block:: cpp\n\n        PYBIND11_EMBEDDED_MODULE(example, m) {\n            // ... initialize functions and classes here\n            m.def(\"foo\", []() {\n                return \"Hello, World!\";\n            });\n        }\n \\endrst */\n#define PYBIND11_EMBEDDED_MODULE(name, variable)                              \\\n    static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &);    \\\n    static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() {        \\\n        auto m = pybind11::module(PYBIND11_TOSTRING(name));                   \\\n        try {                                                                 \\\n            PYBIND11_CONCAT(pybind11_init_, name)(m);                         \\\n            return m.ptr();                                                   \\\n        } catch (pybind11::error_already_set &e) {                            \\\n            PyErr_SetString(PyExc_ImportError, e.what());                     \\\n            return nullptr;                                                   \\\n        } catch (const std::exception &e) {                                   \\\n            PyErr_SetString(PyExc_ImportError, e.what());                     \\\n            return nullptr;                                                   \\\n        }                                                                     \\\n    }                                                                         \\\n    PYBIND11_EMBEDDED_MODULE_IMPL(name)                                       \\\n    pybind11::detail::embedded_module name(PYBIND11_TOSTRING(name),           \\\n                               PYBIND11_CONCAT(pybind11_init_impl_, name));   \\\n    void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable)\n\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks.\nstruct embedded_module {\n#if PY_MAJOR_VERSION >= 3\n    using init_t = PyObject *(*)();\n#else\n    using init_t = void (*)();\n#endif\n    embedded_module(const char *name, init_t init) {\n        if (Py_IsInitialized())\n            pybind11_fail(\"Can't add new modules after the interpreter has been initialized\");\n\n        auto result = PyImport_AppendInittab(name, init);\n        if (result == -1)\n            pybind11_fail(\"Insufficient memory to add a new module\");\n    }\n};\n\nNAMESPACE_END(detail)\n\n/** \\rst\n    Initialize the Python interpreter. No other pybind11 or CPython API functions can be\n    called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The\n    optional parameter can be used to skip the registration of signal handlers (see the\n    `Python documentation`_ for details). Calling this function again after the interpreter\n    has already been initialized is a fatal error.\n\n    If initializing the Python interpreter fails, then the program is terminated.  (This\n    is controlled by the CPython runtime and is an exception to pybind11's normal behavior\n    of throwing exceptions on errors.)\n\n    .. _Python documentation: https://docs.python.org/3/c-api/init.html#c.Py_InitializeEx\n \\endrst */\ninline void initialize_interpreter(bool init_signal_handlers = true) {\n    if (Py_IsInitialized())\n        pybind11_fail(\"The interpreter is already running\");\n\n    Py_InitializeEx(init_signal_handlers ? 1 : 0);\n\n    // Make .py files in the working directory available by default\n    module::import(\"sys\").attr(\"path\").cast<list>().append(\".\");\n}\n\n/** \\rst\n    Shut down the Python interpreter. No pybind11 or CPython API functions can be called\n    after this. In addition, pybind11 objects must not outlive the interpreter:\n\n    .. code-block:: cpp\n\n        { // BAD\n            py::initialize_interpreter();\n            auto hello = py::str(\"Hello, World!\");\n            py::finalize_interpreter();\n        } // <-- BOOM, hello's destructor is called after interpreter shutdown\n\n        { // GOOD\n            py::initialize_interpreter();\n            { // scoped\n                auto hello = py::str(\"Hello, World!\");\n            } // <-- OK, hello is cleaned up properly\n            py::finalize_interpreter();\n        }\n\n        { // BETTER\n            py::scoped_interpreter guard{};\n            auto hello = py::str(\"Hello, World!\");\n        }\n\n    .. warning::\n\n        The interpreter can be restarted by calling `initialize_interpreter` again.\n        Modules created using pybind11 can be safely re-initialized. However, Python\n        itself cannot completely unload binary extension modules and there are several\n        caveats with regard to interpreter restarting. All the details can be found\n        in the CPython documentation. In short, not all interpreter memory may be\n        freed, either due to reference cycles or user-created global data.\n\n \\endrst */\ninline void finalize_interpreter() {\n    handle builtins(PyEval_GetBuiltins());\n    const char *id = PYBIND11_INTERNALS_ID;\n\n    // Get the internals pointer (without creating it if it doesn't exist).  It's possible for the\n    // internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()`\n    // during destruction), so we get the pointer-pointer here and check it after Py_Finalize().\n    detail::internals **internals_ptr_ptr = detail::get_internals_pp();\n    // It could also be stashed in builtins, so look there too:\n    if (builtins.contains(id) && isinstance<capsule>(builtins[id]))\n        internals_ptr_ptr = capsule(builtins[id]);\n\n    Py_Finalize();\n\n    if (internals_ptr_ptr) {\n        delete *internals_ptr_ptr;\n        *internals_ptr_ptr = nullptr;\n    }\n}\n\n/** \\rst\n    Scope guard version of `initialize_interpreter` and `finalize_interpreter`.\n    This a move-only guard and only a single instance can exist.\n\n    .. code-block:: cpp\n\n        #include <pybind11/embed.h>\n\n        int main() {\n            py::scoped_interpreter guard{};\n            py::print(Hello, World!);\n        } // <-- interpreter shutdown\n \\endrst */\nclass scoped_interpreter {\npublic:\n    scoped_interpreter(bool init_signal_handlers = true) {\n        initialize_interpreter(init_signal_handlers);\n    }\n\n    scoped_interpreter(const scoped_interpreter &) = delete;\n    scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; }\n    scoped_interpreter &operator=(const scoped_interpreter &) = delete;\n    scoped_interpreter &operator=(scoped_interpreter &&) = delete;\n\n    ~scoped_interpreter() {\n        if (is_valid)\n            finalize_interpreter();\n    }\n\nprivate:\n    bool is_valid = true;\n};\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/eval.h",
    "content": "/*\n    pybind11/exec.h: Support for evaluating Python expressions and statements\n    from strings and files\n\n    Copyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and\n                       Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\nenum eval_mode {\n    /// Evaluate a string containing an isolated expression\n    eval_expr,\n\n    /// Evaluate a string containing a single statement. Returns \\c none\n    eval_single_statement,\n\n    /// Evaluate a string containing a sequence of statement. Returns \\c none\n    eval_statements\n};\n\ntemplate <eval_mode mode = eval_expr>\nobject eval(str expr, object global = globals(), object local = object()) {\n    if (!local)\n        local = global;\n\n    /* PyRun_String does not accept a PyObject / encoding specifier,\n       this seems to be the only alternative */\n    std::string buffer = \"# -*- coding: utf-8 -*-\\n\" + (std::string) expr;\n\n    int start;\n    switch (mode) {\n        case eval_expr:             start = Py_eval_input;   break;\n        case eval_single_statement: start = Py_single_input; break;\n        case eval_statements:       start = Py_file_input;   break;\n        default: pybind11_fail(\"invalid evaluation mode\");\n    }\n\n    PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr());\n    if (!result)\n        throw error_already_set();\n    return reinterpret_steal<object>(result);\n}\n\ntemplate <eval_mode mode = eval_expr, size_t N>\nobject eval(const char (&s)[N], object global = globals(), object local = object()) {\n    /* Support raw string literals by removing common leading whitespace */\n    auto expr = (s[0] == '\\n') ? str(module::import(\"textwrap\").attr(\"dedent\")(s))\n                               : str(s);\n    return eval<mode>(expr, global, local);\n}\n\ninline void exec(str expr, object global = globals(), object local = object()) {\n    eval<eval_statements>(expr, global, local);\n}\n\ntemplate <size_t N>\nvoid exec(const char (&s)[N], object global = globals(), object local = object()) {\n    eval<eval_statements>(s, global, local);\n}\n\ntemplate <eval_mode mode = eval_statements>\nobject eval_file(str fname, object global = globals(), object local = object()) {\n    if (!local)\n        local = global;\n\n    int start;\n    switch (mode) {\n        case eval_expr:             start = Py_eval_input;   break;\n        case eval_single_statement: start = Py_single_input; break;\n        case eval_statements:       start = Py_file_input;   break;\n        default: pybind11_fail(\"invalid evaluation mode\");\n    }\n\n    int closeFile = 1;\n    std::string fname_str = (std::string) fname;\n#if PY_VERSION_HEX >= 0x03040000\n    FILE *f = _Py_fopen_obj(fname.ptr(), \"r\");\n#elif PY_VERSION_HEX >= 0x03000000\n    FILE *f = _Py_fopen(fname.ptr(), \"r\");\n#else\n    /* No unicode support in open() :( */\n    auto fobj = reinterpret_steal<object>(PyFile_FromString(\n        const_cast<char *>(fname_str.c_str()),\n        const_cast<char*>(\"r\")));\n    FILE *f = nullptr;\n    if (fobj)\n        f = PyFile_AsFile(fobj.ptr());\n    closeFile = 0;\n#endif\n    if (!f) {\n        PyErr_Clear();\n        pybind11_fail(\"File \\\"\" + fname_str + \"\\\" could not be opened!\");\n    }\n\n#if PY_VERSION_HEX < 0x03000000 && defined(PYPY_VERSION)\n    PyObject *result = PyRun_File(f, fname_str.c_str(), start, global.ptr(),\n                                  local.ptr());\n    (void) closeFile;\n#else\n    PyObject *result = PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(),\n                                    local.ptr(), closeFile);\n#endif\n\n    if (!result)\n        throw error_already_set();\n    return reinterpret_steal<object>(result);\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/functional.h",
    "content": "/*\n    pybind11/functional.h: std::function<> support\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include <functional>\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\ntemplate <typename Return, typename... Args>\nstruct type_caster<std::function<Return(Args...)>> {\n    using type = std::function<Return(Args...)>;\n    using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;\n    using function_type = Return (*) (Args...);\n\npublic:\n    bool load(handle src, bool convert) {\n        if (src.is_none()) {\n            // Defer accepting None to other overloads (if we aren't in convert mode):\n            if (!convert) return false;\n            return true;\n        }\n\n        if (!isinstance<function>(src))\n            return false;\n\n        auto func = reinterpret_borrow<function>(src);\n\n        /*\n           When passing a C++ function as an argument to another C++\n           function via Python, every function call would normally involve\n           a full C++ -> Python -> C++ roundtrip, which can be prohibitive.\n           Here, we try to at least detect the case where the function is\n           stateless (i.e. function pointer or lambda function without\n           captured variables), in which case the roundtrip can be avoided.\n         */\n        if (auto cfunc = func.cpp_function()) {\n            auto c = reinterpret_borrow<capsule>(PyCFunction_GET_SELF(cfunc.ptr()));\n            auto rec = (function_record *) c;\n\n            if (rec && rec->is_stateless &&\n                    same_type(typeid(function_type), *reinterpret_cast<const std::type_info *>(rec->data[1]))) {\n                struct capture { function_type f; };\n                value = ((capture *) &rec->data)->f;\n                return true;\n            }\n        }\n\n        value = [func](Args... args) -> Return {\n            gil_scoped_acquire acq;\n            object retval(func(std::forward<Args>(args)...));\n            /* Visual studio 2015 parser issue: need parentheses around this expression */\n            return (retval.template cast<Return>());\n        };\n        return true;\n    }\n\n    template <typename Func>\n    static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {\n        if (!f_)\n            return none().inc_ref();\n\n        auto result = f_.template target<function_type>();\n        if (result)\n            return cpp_function(*result, policy).release();\n        else\n            return cpp_function(std::forward<Func>(f_), policy).release();\n    }\n\n    PYBIND11_TYPE_CASTER(type, _(\"Callable[[\") + concat(make_caster<Args>::name...) + _(\"], \")\n                               + make_caster<retval_type>::name + _(\"]\"));\n};\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/iostream.h",
    "content": "/*\n    pybind11/iostream.h -- Tools to assist with redirecting cout and cerr to Python\n\n    Copyright (c) 2017 Henry F. Schreiner\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n\n#include <streambuf>\n#include <ostream>\n#include <string>\n#include <memory>\n#include <iostream>\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n// Buffer that writes to Python instead of C++\nclass pythonbuf : public std::streambuf {\nprivate:\n    using traits_type = std::streambuf::traits_type;\n\n    char d_buffer[1024];\n    object pywrite;\n    object pyflush;\n\n    int overflow(int c) {\n        if (!traits_type::eq_int_type(c, traits_type::eof())) {\n            *pptr() = traits_type::to_char_type(c);\n            pbump(1);\n        }\n        return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof();\n    }\n\n    int sync() {\n        if (pbase() != pptr()) {\n            // This subtraction cannot be negative, so dropping the sign\n            str line(pbase(), static_cast<size_t>(pptr() - pbase()));\n\n            pywrite(line);\n            pyflush();\n\n            setp(pbase(), epptr());\n        }\n        return 0;\n    }\n\npublic:\n    pythonbuf(object pyostream)\n        : pywrite(pyostream.attr(\"write\")),\n          pyflush(pyostream.attr(\"flush\")) {\n        setp(d_buffer, d_buffer + sizeof(d_buffer) - 1);\n    }\n\n    /// Sync before destroy\n    ~pythonbuf() {\n        sync();\n    }\n};\n\nNAMESPACE_END(detail)\n\n\n/** \\rst\n    This a move-only guard that redirects output.\n\n    .. code-block:: cpp\n\n        #include <pybind11/iostream.h>\n\n        ...\n\n        {\n            py::scoped_ostream_redirect output;\n            std::cout << \"Hello, World!\"; // Python stdout\n        } // <-- return std::cout to normal\n\n    You can explicitly pass the c++ stream and the python object,\n    for example to guard stderr instead.\n\n    .. code-block:: cpp\n\n        {\n            py::scoped_ostream_redirect output{std::cerr, py::module::import(\"sys\").attr(\"stderr\")};\n            std::cerr << \"Hello, World!\";\n        }\n \\endrst */\nclass scoped_ostream_redirect {\nprotected:\n    std::streambuf *old;\n    std::ostream &costream;\n    detail::pythonbuf buffer;\n\npublic:\n    scoped_ostream_redirect(\n            std::ostream &costream = std::cout,\n            object pyostream = module::import(\"sys\").attr(\"stdout\"))\n        : costream(costream), buffer(pyostream) {\n        old = costream.rdbuf(&buffer);\n    }\n\n    ~scoped_ostream_redirect() {\n        costream.rdbuf(old);\n    }\n\n    scoped_ostream_redirect(const scoped_ostream_redirect &) = delete;\n    scoped_ostream_redirect(scoped_ostream_redirect &&other) = default;\n    scoped_ostream_redirect &operator=(const scoped_ostream_redirect &) = delete;\n    scoped_ostream_redirect &operator=(scoped_ostream_redirect &&) = delete;\n};\n\n\n/** \\rst\n    Like `scoped_ostream_redirect`, but redirects cerr by default. This class\n    is provided primary to make ``py::call_guard`` easier to make.\n\n    .. code-block:: cpp\n\n     m.def(\"noisy_func\", &noisy_func,\n           py::call_guard<scoped_ostream_redirect,\n                          scoped_estream_redirect>());\n\n\\endrst */\nclass scoped_estream_redirect : public scoped_ostream_redirect {\npublic:\n    scoped_estream_redirect(\n            std::ostream &costream = std::cerr,\n            object pyostream = module::import(\"sys\").attr(\"stderr\"))\n        : scoped_ostream_redirect(costream,pyostream) {}\n};\n\n\nNAMESPACE_BEGIN(detail)\n\n// Class to redirect output as a context manager. C++ backend.\nclass OstreamRedirect {\n    bool do_stdout_;\n    bool do_stderr_;\n    std::unique_ptr<scoped_ostream_redirect> redirect_stdout;\n    std::unique_ptr<scoped_estream_redirect> redirect_stderr;\n\npublic:\n    OstreamRedirect(bool do_stdout = true, bool do_stderr = true)\n        : do_stdout_(do_stdout), do_stderr_(do_stderr) {}\n\n    void enter() {\n        if (do_stdout_)\n            redirect_stdout.reset(new scoped_ostream_redirect());\n        if (do_stderr_)\n            redirect_stderr.reset(new scoped_estream_redirect());\n    }\n\n    void exit() {\n        redirect_stdout.reset();\n        redirect_stderr.reset();\n    }\n};\n\nNAMESPACE_END(detail)\n\n/** \\rst\n    This is a helper function to add a C++ redirect context manager to Python\n    instead of using a C++ guard. To use it, add the following to your binding code:\n\n    .. code-block:: cpp\n\n        #include <pybind11/iostream.h>\n\n        ...\n\n        py::add_ostream_redirect(m, \"ostream_redirect\");\n\n    You now have a Python context manager that redirects your output:\n\n    .. code-block:: python\n\n        with m.ostream_redirect():\n            m.print_to_cout_function()\n\n    This manager can optionally be told which streams to operate on:\n\n    .. code-block:: python\n\n        with m.ostream_redirect(stdout=true, stderr=true):\n            m.noisy_function_with_error_printing()\n\n \\endrst */\ninline class_<detail::OstreamRedirect> add_ostream_redirect(module m, std::string name = \"ostream_redirect\") {\n    return class_<detail::OstreamRedirect>(m, name.c_str(), module_local())\n        .def(init<bool,bool>(), arg(\"stdout\")=true, arg(\"stderr\")=true)\n        .def(\"__enter__\", &detail::OstreamRedirect::enter)\n        .def(\"__exit__\", [](detail::OstreamRedirect &self_, args) { self_.exit(); });\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/numpy.h",
    "content": "/*\n    pybind11/numpy.h: Basic NumPy support, vectorize() wrapper\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include \"complex.h\"\n#include <numeric>\n#include <algorithm>\n#include <array>\n#include <cstdlib>\n#include <cstring>\n#include <sstream>\n#include <string>\n#include <functional>\n#include <utility>\n#include <vector>\n#include <typeindex>\n\n#if defined(_MSC_VER)\n#  pragma warning(push)\n#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#endif\n\n/* This will be true on all flat address space platforms and allows us to reduce the\n   whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size\n   and dimension types (e.g. shape, strides, indexing), instead of inflicting this\n   upon the library user. */\nstatic_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), \"ssize_t != Py_intptr_t\");\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\nclass array; // Forward declaration\n\nNAMESPACE_BEGIN(detail)\ntemplate <typename type, typename SFINAE = void> struct npy_format_descriptor;\n\nstruct PyArrayDescr_Proxy {\n    PyObject_HEAD\n    PyObject *typeobj;\n    char kind;\n    char type;\n    char byteorder;\n    char flags;\n    int type_num;\n    int elsize;\n    int alignment;\n    char *subarray;\n    PyObject *fields;\n    PyObject *names;\n};\n\nstruct PyArray_Proxy {\n    PyObject_HEAD\n    char *data;\n    int nd;\n    ssize_t *dimensions;\n    ssize_t *strides;\n    PyObject *base;\n    PyObject *descr;\n    int flags;\n};\n\nstruct PyVoidScalarObject_Proxy {\n    PyObject_VAR_HEAD\n    char *obval;\n    PyArrayDescr_Proxy *descr;\n    int flags;\n    PyObject *base;\n};\n\nstruct numpy_type_info {\n    PyObject* dtype_ptr;\n    std::string format_str;\n};\n\nstruct numpy_internals {\n    std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;\n\n    numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) {\n        auto it = registered_dtypes.find(std::type_index(tinfo));\n        if (it != registered_dtypes.end())\n            return &(it->second);\n        if (throw_if_missing)\n            pybind11_fail(std::string(\"NumPy type info missing for \") + tinfo.name());\n        return nullptr;\n    }\n\n    template<typename T> numpy_type_info *get_type_info(bool throw_if_missing = true) {\n        return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);\n    }\n};\n\ninline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) {\n    ptr = &get_or_create_shared_data<numpy_internals>(\"_numpy_internals\");\n}\n\ninline numpy_internals& get_numpy_internals() {\n    static numpy_internals* ptr = nullptr;\n    if (!ptr)\n        load_numpy_internals(ptr);\n    return *ptr;\n}\n\nstruct npy_api {\n    enum constants {\n        NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,\n        NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,\n        NPY_ARRAY_OWNDATA_ = 0x0004,\n        NPY_ARRAY_FORCECAST_ = 0x0010,\n        NPY_ARRAY_ENSUREARRAY_ = 0x0040,\n        NPY_ARRAY_ALIGNED_ = 0x0100,\n        NPY_ARRAY_WRITEABLE_ = 0x0400,\n        NPY_BOOL_ = 0,\n        NPY_BYTE_, NPY_UBYTE_,\n        NPY_SHORT_, NPY_USHORT_,\n        NPY_INT_, NPY_UINT_,\n        NPY_LONG_, NPY_ULONG_,\n        NPY_LONGLONG_, NPY_ULONGLONG_,\n        NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_,\n        NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_,\n        NPY_OBJECT_ = 17,\n        NPY_STRING_, NPY_UNICODE_, NPY_VOID_\n    };\n\n    typedef struct {\n        Py_intptr_t *ptr;\n        int len;\n    } PyArray_Dims;\n\n    static npy_api& get() {\n        static npy_api api = lookup();\n        return api;\n    }\n\n    bool PyArray_Check_(PyObject *obj) const {\n        return (bool) PyObject_TypeCheck(obj, PyArray_Type_);\n    }\n    bool PyArrayDescr_Check_(PyObject *obj) const {\n        return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_);\n    }\n\n    unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();\n    PyObject *(*PyArray_DescrFromType_)(int);\n    PyObject *(*PyArray_NewFromDescr_)\n        (PyTypeObject *, PyObject *, int, Py_intptr_t *,\n         Py_intptr_t *, void *, int, PyObject *);\n    PyObject *(*PyArray_DescrNewFromType_)(int);\n    int (*PyArray_CopyInto_)(PyObject *, PyObject *);\n    PyObject *(*PyArray_NewCopy_)(PyObject *, int);\n    PyTypeObject *PyArray_Type_;\n    PyTypeObject *PyVoidArrType_Type_;\n    PyTypeObject *PyArrayDescr_Type_;\n    PyObject *(*PyArray_DescrFromScalar_)(PyObject *);\n    PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *);\n    int (*PyArray_DescrConverter_) (PyObject *, PyObject **);\n    bool (*PyArray_EquivTypes_) (PyObject *, PyObject *);\n    int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, char, PyObject **, int *,\n                                             Py_ssize_t *, PyObject **, PyObject *);\n    PyObject *(*PyArray_Squeeze_)(PyObject *);\n    int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);\n    PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int);\nprivate:\n    enum functions {\n        API_PyArray_GetNDArrayCFeatureVersion = 211,\n        API_PyArray_Type = 2,\n        API_PyArrayDescr_Type = 3,\n        API_PyVoidArrType_Type = 39,\n        API_PyArray_DescrFromType = 45,\n        API_PyArray_DescrFromScalar = 57,\n        API_PyArray_FromAny = 69,\n        API_PyArray_Resize = 80,\n        API_PyArray_CopyInto = 82,\n        API_PyArray_NewCopy = 85,\n        API_PyArray_NewFromDescr = 94,\n        API_PyArray_DescrNewFromType = 9,\n        API_PyArray_DescrConverter = 174,\n        API_PyArray_EquivTypes = 182,\n        API_PyArray_GetArrayParamsFromObject = 278,\n        API_PyArray_Squeeze = 136,\n        API_PyArray_SetBaseObject = 282\n    };\n\n    static npy_api lookup() {\n        module m = module::import(\"numpy.core.multiarray\");\n        auto c = m.attr(\"_ARRAY_API\");\n#if PY_MAJOR_VERSION >= 3\n        void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL);\n#else\n        void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr());\n#endif\n        npy_api api;\n#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];\n        DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);\n        if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7)\n            pybind11_fail(\"pybind11 numpy support requires numpy >= 1.7.0\");\n        DECL_NPY_API(PyArray_Type);\n        DECL_NPY_API(PyVoidArrType_Type);\n        DECL_NPY_API(PyArrayDescr_Type);\n        DECL_NPY_API(PyArray_DescrFromType);\n        DECL_NPY_API(PyArray_DescrFromScalar);\n        DECL_NPY_API(PyArray_FromAny);\n        DECL_NPY_API(PyArray_Resize);\n        DECL_NPY_API(PyArray_CopyInto);\n        DECL_NPY_API(PyArray_NewCopy);\n        DECL_NPY_API(PyArray_NewFromDescr);\n        DECL_NPY_API(PyArray_DescrNewFromType);\n        DECL_NPY_API(PyArray_DescrConverter);\n        DECL_NPY_API(PyArray_EquivTypes);\n        DECL_NPY_API(PyArray_GetArrayParamsFromObject);\n        DECL_NPY_API(PyArray_Squeeze);\n        DECL_NPY_API(PyArray_SetBaseObject);\n#undef DECL_NPY_API\n        return api;\n    }\n};\n\ninline PyArray_Proxy* array_proxy(void* ptr) {\n    return reinterpret_cast<PyArray_Proxy*>(ptr);\n}\n\ninline const PyArray_Proxy* array_proxy(const void* ptr) {\n    return reinterpret_cast<const PyArray_Proxy*>(ptr);\n}\n\ninline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) {\n   return reinterpret_cast<PyArrayDescr_Proxy*>(ptr);\n}\n\ninline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) {\n   return reinterpret_cast<const PyArrayDescr_Proxy*>(ptr);\n}\n\ninline bool check_flags(const void* ptr, int flag) {\n    return (flag == (array_proxy(ptr)->flags & flag));\n}\n\ntemplate <typename T> struct is_std_array : std::false_type { };\ntemplate <typename T, size_t N> struct is_std_array<std::array<T, N>> : std::true_type { };\ntemplate <typename T> struct is_complex : std::false_type { };\ntemplate <typename T> struct is_complex<std::complex<T>> : std::true_type { };\n\ntemplate <typename T> struct array_info_scalar {\n    typedef T type;\n    static constexpr bool is_array = false;\n    static constexpr bool is_empty = false;\n    static constexpr auto extents = _(\"\");\n    static void append_extents(list& /* shape */) { }\n};\n// Computes underlying type and a comma-separated list of extents for array\n// types (any mix of std::array and built-in arrays). An array of char is\n// treated as scalar because it gets special handling.\ntemplate <typename T> struct array_info : array_info_scalar<T> { };\ntemplate <typename T, size_t N> struct array_info<std::array<T, N>> {\n    using type = typename array_info<T>::type;\n    static constexpr bool is_array = true;\n    static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;\n    static constexpr size_t extent = N;\n\n    // appends the extents to shape\n    static void append_extents(list& shape) {\n        shape.append(N);\n        array_info<T>::append_extents(shape);\n    }\n\n    static constexpr auto extents = _<array_info<T>::is_array>(\n        concat(_<N>(), array_info<T>::extents), _<N>()\n    );\n};\n// For numpy we have special handling for arrays of characters, so we don't include\n// the size in the array extents.\ntemplate <size_t N> struct array_info<char[N]> : array_info_scalar<char[N]> { };\ntemplate <size_t N> struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> { };\ntemplate <typename T, size_t N> struct array_info<T[N]> : array_info<std::array<T, N>> { };\ntemplate <typename T> using remove_all_extents_t = typename array_info<T>::type;\n\ntemplate <typename T> using is_pod_struct = all_of<\n    std::is_standard_layout<T>,     // since we're accessing directly in memory we need a standard layout type\n#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)\n    // _GLIBCXX_USE_CXX11_ABI indicates that we're using libstdc++ from GCC 5 or newer, independent\n    // of the actual compiler (Clang can also use libstdc++, but it always defines __GNUC__ == 4).\n    std::is_trivially_copyable<T>,\n#else\n    // GCC 4 doesn't implement is_trivially_copyable, so approximate it\n    std::is_trivially_destructible<T>,\n    satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,\n#endif\n    satisfies_none_of<T, std::is_reference, std::is_array, is_std_array, std::is_arithmetic, is_complex, std::is_enum>\n>;\n\ntemplate <ssize_t Dim = 0, typename Strides> ssize_t byte_offset_unsafe(const Strides &) { return 0; }\ntemplate <ssize_t Dim = 0, typename Strides, typename... Ix>\nssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {\n    return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);\n}\n\n/**\n * Proxy class providing unsafe, unchecked const access to array data.  This is constructed through\n * the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`.  `Dims`\n * will be -1 for dimensions determined at runtime.\n */\ntemplate <typename T, ssize_t Dims>\nclass unchecked_reference {\nprotected:\n    static constexpr bool Dynamic = Dims < 0;\n    const unsigned char *data_;\n    // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to\n    // make large performance gains on big, nested loops, but requires compile-time dimensions\n    conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>>\n            shape_, strides_;\n    const ssize_t dims_;\n\n    friend class pybind11::array;\n    // Constructor for compile-time dimensions:\n    template <bool Dyn = Dynamic>\n    unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<!Dyn, ssize_t>)\n    : data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {\n        for (size_t i = 0; i < (size_t) dims_; i++) {\n            shape_[i] = shape[i];\n            strides_[i] = strides[i];\n        }\n    }\n    // Constructor for runtime dimensions:\n    template <bool Dyn = Dynamic>\n    unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<Dyn, ssize_t> dims)\n    : data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides}, dims_{dims} {}\n\npublic:\n    /**\n     * Unchecked const reference access to data at the given indices.  For a compile-time known\n     * number of dimensions, this requires the correct number of arguments; for run-time\n     * dimensionality, this is not checked (and so is up to the caller to use safely).\n     */\n    template <typename... Ix> const T &operator()(Ix... index) const {\n        static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,\n                \"Invalid number of indices for unchecked array reference\");\n        return *reinterpret_cast<const T *>(data_ + byte_offset_unsafe(strides_, ssize_t(index)...));\n    }\n    /**\n     * Unchecked const reference access to data; this operator only participates if the reference\n     * is to a 1-dimensional array.  When present, this is exactly equivalent to `obj(index)`.\n     */\n    template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>\n    const T &operator[](ssize_t index) const { return operator()(index); }\n\n    /// Pointer access to the data at the given indices.\n    template <typename... Ix> const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); }\n\n    /// Returns the item size, i.e. sizeof(T)\n    constexpr static ssize_t itemsize() { return sizeof(T); }\n\n    /// Returns the shape (i.e. size) of dimension `dim`\n    ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }\n\n    /// Returns the number of dimensions of the array\n    ssize_t ndim() const { return dims_; }\n\n    /// Returns the total number of elements in the referenced array, i.e. the product of the shapes\n    template <bool Dyn = Dynamic>\n    enable_if_t<!Dyn, ssize_t> size() const {\n        return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());\n    }\n    template <bool Dyn = Dynamic>\n    enable_if_t<Dyn, ssize_t> size() const {\n        return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());\n    }\n\n    /// Returns the total number of bytes used by the referenced data.  Note that the actual span in\n    /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice).\n    ssize_t nbytes() const {\n        return size() * itemsize();\n    }\n};\n\ntemplate <typename T, ssize_t Dims>\nclass unchecked_mutable_reference : public unchecked_reference<T, Dims> {\n    friend class pybind11::array;\n    using ConstBase = unchecked_reference<T, Dims>;\n    using ConstBase::ConstBase;\n    using ConstBase::Dynamic;\npublic:\n    /// Mutable, unchecked access to data at the given indices.\n    template <typename... Ix> T& operator()(Ix... index) {\n        static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,\n                \"Invalid number of indices for unchecked array reference\");\n        return const_cast<T &>(ConstBase::operator()(index...));\n    }\n    /**\n     * Mutable, unchecked access data at the given index; this operator only participates if the\n     * reference is to a 1-dimensional array (or has runtime dimensions).  When present, this is\n     * exactly equivalent to `obj(index)`.\n     */\n    template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>\n    T &operator[](ssize_t index) { return operator()(index); }\n\n    /// Mutable pointer access to the data at the given indices.\n    template <typename... Ix> T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); }\n};\n\ntemplate <typename T, ssize_t Dim>\nstruct type_caster<unchecked_reference<T, Dim>> {\n    static_assert(Dim == 0 && Dim > 0 /* always fail */, \"unchecked array proxy object is not castable\");\n};\ntemplate <typename T, ssize_t Dim>\nstruct type_caster<unchecked_mutable_reference<T, Dim>> : type_caster<unchecked_reference<T, Dim>> {};\n\nNAMESPACE_END(detail)\n\nclass dtype : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_);\n\n    explicit dtype(const buffer_info &info) {\n        dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format)));\n        // If info.itemsize == 0, use the value calculated from the format string\n        m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr();\n    }\n\n    explicit dtype(const std::string &format) {\n        m_ptr = from_args(pybind11::str(format)).release().ptr();\n    }\n\n    dtype(const char *format) : dtype(std::string(format)) { }\n\n    dtype(list names, list formats, list offsets, ssize_t itemsize) {\n        dict args;\n        args[\"names\"] = names;\n        args[\"formats\"] = formats;\n        args[\"offsets\"] = offsets;\n        args[\"itemsize\"] = pybind11::int_(itemsize);\n        m_ptr = from_args(args).release().ptr();\n    }\n\n    /// This is essentially the same as calling numpy.dtype(args) in Python.\n    static dtype from_args(object args) {\n        PyObject *ptr = nullptr;\n        if (!detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) || !ptr)\n            throw error_already_set();\n        return reinterpret_steal<dtype>(ptr);\n    }\n\n    /// Return dtype associated with a C++ type.\n    template <typename T> static dtype of() {\n        return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();\n    }\n\n    /// Size of the data type in bytes.\n    ssize_t itemsize() const {\n        return detail::array_descriptor_proxy(m_ptr)->elsize;\n    }\n\n    /// Returns true for structured data types.\n    bool has_fields() const {\n        return detail::array_descriptor_proxy(m_ptr)->names != nullptr;\n    }\n\n    /// Single-character type code.\n    char kind() const {\n        return detail::array_descriptor_proxy(m_ptr)->kind;\n    }\n\nprivate:\n    static object _dtype_from_pep3118() {\n        static PyObject *obj = module::import(\"numpy.core._internal\")\n            .attr(\"_dtype_from_pep3118\").cast<object>().release().ptr();\n        return reinterpret_borrow<object>(obj);\n    }\n\n    dtype strip_padding(ssize_t itemsize) {\n        // Recursively strip all void fields with empty names that are generated for\n        // padding fields (as of NumPy v1.11).\n        if (!has_fields())\n            return *this;\n\n        struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; };\n        std::vector<field_descr> field_descriptors;\n\n        for (auto field : attr(\"fields\").attr(\"items\")()) {\n            auto spec = field.cast<tuple>();\n            auto name = spec[0].cast<pybind11::str>();\n            auto format = spec[1].cast<tuple>()[0].cast<dtype>();\n            auto offset = spec[1].cast<tuple>()[1].cast<pybind11::int_>();\n            if (!len(name) && format.kind() == 'V')\n                continue;\n            field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset});\n        }\n\n        std::sort(field_descriptors.begin(), field_descriptors.end(),\n                  [](const field_descr& a, const field_descr& b) {\n                      return a.offset.cast<int>() < b.offset.cast<int>();\n                  });\n\n        list names, formats, offsets;\n        for (auto& descr : field_descriptors) {\n            names.append(descr.name);\n            formats.append(descr.format);\n            offsets.append(descr.offset);\n        }\n        return dtype(names, formats, offsets, itemsize);\n    }\n};\n\nclass array : public buffer {\npublic:\n    PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)\n\n    enum {\n        c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,\n        f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,\n        forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_\n    };\n\n    array() : array({{0}}, static_cast<const double *>(nullptr)) {}\n\n    using ShapeContainer = detail::any_container<ssize_t>;\n    using StridesContainer = detail::any_container<ssize_t>;\n\n    // Constructs an array taking shape/strides from arbitrary container types\n    array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides,\n          const void *ptr = nullptr, handle base = handle()) {\n\n        if (strides->empty())\n            *strides = c_strides(*shape, dt.itemsize());\n\n        auto ndim = shape->size();\n        if (ndim != strides->size())\n            pybind11_fail(\"NumPy: shape ndim doesn't match strides ndim\");\n        auto descr = dt;\n\n        int flags = 0;\n        if (base && ptr) {\n            if (isinstance<array>(base))\n                /* Copy flags from base (except ownership bit) */\n                flags = reinterpret_borrow<array>(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_;\n            else\n                /* Writable by default, easy to downgrade later on if needed */\n                flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;\n        }\n\n        auto &api = detail::npy_api::get();\n        auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(\n            api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(),\n            const_cast<void *>(ptr), flags, nullptr));\n        if (!tmp)\n            throw error_already_set();\n        if (ptr) {\n            if (base) {\n                api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());\n            } else {\n                tmp = reinterpret_steal<object>(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));\n            }\n        }\n        m_ptr = tmp.release().ptr();\n    }\n\n    array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle())\n        : array(dt, std::move(shape), {}, ptr, base) { }\n\n    template <typename T, typename = detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>\n    array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())\n        : array(dt, {{count}}, ptr, base) { }\n\n    template <typename T>\n    array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())\n        : array(pybind11::dtype::of<T>(), std::move(shape), std::move(strides), ptr, base) { }\n\n    template <typename T>\n    array(ShapeContainer shape, const T *ptr, handle base = handle())\n        : array(std::move(shape), {}, ptr, base) { }\n\n    template <typename T>\n    explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { }\n\n    explicit array(const buffer_info &info)\n    : array(pybind11::dtype(info), info.shape, info.strides, info.ptr) { }\n\n    /// Array descriptor (dtype)\n    pybind11::dtype dtype() const {\n        return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);\n    }\n\n    /// Total number of elements\n    ssize_t size() const {\n        return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());\n    }\n\n    /// Byte size of a single element\n    ssize_t itemsize() const {\n        return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize;\n    }\n\n    /// Total number of bytes\n    ssize_t nbytes() const {\n        return size() * itemsize();\n    }\n\n    /// Number of dimensions\n    ssize_t ndim() const {\n        return detail::array_proxy(m_ptr)->nd;\n    }\n\n    /// Base object\n    object base() const {\n        return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base);\n    }\n\n    /// Dimensions of the array\n    const ssize_t* shape() const {\n        return detail::array_proxy(m_ptr)->dimensions;\n    }\n\n    /// Dimension along a given axis\n    ssize_t shape(ssize_t dim) const {\n        if (dim >= ndim())\n            fail_dim_check(dim, \"invalid axis\");\n        return shape()[dim];\n    }\n\n    /// Strides of the array\n    const ssize_t* strides() const {\n        return detail::array_proxy(m_ptr)->strides;\n    }\n\n    /// Stride along a given axis\n    ssize_t strides(ssize_t dim) const {\n        if (dim >= ndim())\n            fail_dim_check(dim, \"invalid axis\");\n        return strides()[dim];\n    }\n\n    /// Return the NumPy array flags\n    int flags() const {\n        return detail::array_proxy(m_ptr)->flags;\n    }\n\n    /// If set, the array is writeable (otherwise the buffer is read-only)\n    bool writeable() const {\n        return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);\n    }\n\n    /// If set, the array owns the data (will be freed when the array is deleted)\n    bool owndata() const {\n        return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);\n    }\n\n    /// Pointer to the contained data. If index is not provided, points to the\n    /// beginning of the buffer. May throw if the index would lead to out of bounds access.\n    template<typename... Ix> const void* data(Ix... index) const {\n        return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));\n    }\n\n    /// Mutable pointer to the contained data. If index is not provided, points to the\n    /// beginning of the buffer. May throw if the index would lead to out of bounds access.\n    /// May throw if the array is not writeable.\n    template<typename... Ix> void* mutable_data(Ix... index) {\n        check_writeable();\n        return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));\n    }\n\n    /// Byte offset from beginning of the array to a given index (full or partial).\n    /// May throw if the index would lead to out of bounds access.\n    template<typename... Ix> ssize_t offset_at(Ix... index) const {\n        if ((ssize_t) sizeof...(index) > ndim())\n            fail_dim_check(sizeof...(index), \"too many indices for an array\");\n        return byte_offset(ssize_t(index)...);\n    }\n\n    ssize_t offset_at() const { return 0; }\n\n    /// Item count from beginning of the array to a given index (full or partial).\n    /// May throw if the index would lead to out of bounds access.\n    template<typename... Ix> ssize_t index_at(Ix... index) const {\n        return offset_at(index...) / itemsize();\n    }\n\n    /**\n     * Returns a proxy object that provides access to the array's data without bounds or\n     * dimensionality checking.  Will throw if the array is missing the `writeable` flag.  Use with\n     * care: the array must not be destroyed or reshaped for the duration of the returned object,\n     * and the caller must take care not to access invalid dimensions or dimension indices.\n     */\n    template <typename T, ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {\n        if (Dims >= 0 && ndim() != Dims)\n            throw std::domain_error(\"array has incorrect number of dimensions: \" + std::to_string(ndim()) +\n                    \"; expected \" + std::to_string(Dims));\n        return detail::unchecked_mutable_reference<T, Dims>(mutable_data(), shape(), strides(), ndim());\n    }\n\n    /**\n     * Returns a proxy object that provides const access to the array's data without bounds or\n     * dimensionality checking.  Unlike `mutable_unchecked()`, this does not require that the\n     * underlying array have the `writable` flag.  Use with care: the array must not be destroyed or\n     * reshaped for the duration of the returned object, and the caller must take care not to access\n     * invalid dimensions or dimension indices.\n     */\n    template <typename T, ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {\n        if (Dims >= 0 && ndim() != Dims)\n            throw std::domain_error(\"array has incorrect number of dimensions: \" + std::to_string(ndim()) +\n                    \"; expected \" + std::to_string(Dims));\n        return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());\n    }\n\n    /// Return a new view with all of the dimensions of length 1 removed\n    array squeeze() {\n        auto& api = detail::npy_api::get();\n        return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));\n    }\n\n    /// Resize array to given shape\n    /// If refcheck is true and more that one reference exist to this array\n    /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change\n    void resize(ShapeContainer new_shape, bool refcheck = true) {\n        detail::npy_api::PyArray_Dims d = {\n            new_shape->data(), int(new_shape->size())\n        };\n        // try to resize, set ordering param to -1 cause it's not used anyway\n        object new_array = reinterpret_steal<object>(\n            detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1)\n        );\n        if (!new_array) throw error_already_set();\n        if (isinstance<array>(new_array)) { *this = std::move(new_array); }\n    }\n\n    /// Ensure that the argument is a NumPy array\n    /// In case of an error, nullptr is returned and the Python error is cleared.\n    static array ensure(handle h, int ExtraFlags = 0) {\n        auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));\n        if (!result)\n            PyErr_Clear();\n        return result;\n    }\n\nprotected:\n    template<typename, typename> friend struct detail::npy_format_descriptor;\n\n    void fail_dim_check(ssize_t dim, const std::string& msg) const {\n        throw index_error(msg + \": \" + std::to_string(dim) +\n                          \" (ndim = \" + std::to_string(ndim()) + \")\");\n    }\n\n    template<typename... Ix> ssize_t byte_offset(Ix... index) const {\n        check_dimensions(index...);\n        return detail::byte_offset_unsafe(strides(), ssize_t(index)...);\n    }\n\n    void check_writeable() const {\n        if (!writeable())\n            throw std::domain_error(\"array is not writeable\");\n    }\n\n    // Default, C-style strides\n    static std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {\n        auto ndim = shape.size();\n        std::vector<ssize_t> strides(ndim, itemsize);\n        if (ndim > 0)\n            for (size_t i = ndim - 1; i > 0; --i)\n                strides[i - 1] = strides[i] * shape[i];\n        return strides;\n    }\n\n    // F-style strides; default when constructing an array_t with `ExtraFlags & f_style`\n    static std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {\n        auto ndim = shape.size();\n        std::vector<ssize_t> strides(ndim, itemsize);\n        for (size_t i = 1; i < ndim; ++i)\n            strides[i] = strides[i - 1] * shape[i - 1];\n        return strides;\n    }\n\n    template<typename... Ix> void check_dimensions(Ix... index) const {\n        check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);\n    }\n\n    void check_dimensions_impl(ssize_t, const ssize_t*) const { }\n\n    template<typename... Ix> void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const {\n        if (i >= *shape) {\n            throw index_error(std::string(\"index \") + std::to_string(i) +\n                              \" is out of bounds for axis \" + std::to_string(axis) +\n                              \" with size \" + std::to_string(*shape));\n        }\n        check_dimensions_impl(axis + 1, shape + 1, index...);\n    }\n\n    /// Create array from any object -- always returns a new reference\n    static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {\n        if (ptr == nullptr) {\n            PyErr_SetString(PyExc_ValueError, \"cannot create a pybind11::array from a nullptr\");\n            return nullptr;\n        }\n        return detail::npy_api::get().PyArray_FromAny_(\n            ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);\n    }\n};\n\ntemplate <typename T, int ExtraFlags = array::forcecast> class array_t : public array {\nprivate:\n    struct private_ctor {};\n    // Delegating constructor needed when both moving and accessing in the same constructor\n    array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base)\n        : array(std::move(shape), std::move(strides), ptr, base) {}\npublic:\n    static_assert(!detail::array_info<T>::is_array, \"Array types cannot be used with array_t\");\n\n    using value_type = T;\n\n    array_t() : array(0, static_cast<const T *>(nullptr)) {}\n    array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { }\n    array_t(handle h, stolen_t) : array(h, stolen_t{}) { }\n\n    PYBIND11_DEPRECATED(\"Use array_t<T>::ensure() instead\")\n    array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {\n        if (!m_ptr) PyErr_Clear();\n        if (!is_borrowed) Py_XDECREF(h.ptr());\n    }\n\n    array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {\n        if (!m_ptr) throw error_already_set();\n    }\n\n    explicit array_t(const buffer_info& info) : array(info) { }\n\n    array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle())\n        : array(std::move(shape), std::move(strides), ptr, base) { }\n\n    explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())\n        : array_t(private_ctor{}, std::move(shape),\n                ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()),\n                ptr, base) { }\n\n    explicit array_t(size_t count, const T *ptr = nullptr, handle base = handle())\n        : array({count}, {}, ptr, base) { }\n\n    constexpr ssize_t itemsize() const {\n        return sizeof(T);\n    }\n\n    template<typename... Ix> ssize_t index_at(Ix... index) const {\n        return offset_at(index...) / itemsize();\n    }\n\n    template<typename... Ix> const T* data(Ix... index) const {\n        return static_cast<const T*>(array::data(index...));\n    }\n\n    template<typename... Ix> T* mutable_data(Ix... index) {\n        return static_cast<T*>(array::mutable_data(index...));\n    }\n\n    // Reference to element at a given index\n    template<typename... Ix> const T& at(Ix... index) const {\n        if ((ssize_t) sizeof...(index) != ndim())\n            fail_dim_check(sizeof...(index), \"index dimension mismatch\");\n        return *(static_cast<const T*>(array::data()) + byte_offset(ssize_t(index)...) / itemsize());\n    }\n\n    // Mutable reference to element at a given index\n    template<typename... Ix> T& mutable_at(Ix... index) {\n        if ((ssize_t) sizeof...(index) != ndim())\n            fail_dim_check(sizeof...(index), \"index dimension mismatch\");\n        return *(static_cast<T*>(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize());\n    }\n\n    /**\n     * Returns a proxy object that provides access to the array's data without bounds or\n     * dimensionality checking.  Will throw if the array is missing the `writeable` flag.  Use with\n     * care: the array must not be destroyed or reshaped for the duration of the returned object,\n     * and the caller must take care not to access invalid dimensions or dimension indices.\n     */\n    template <ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {\n        return array::mutable_unchecked<T, Dims>();\n    }\n\n    /**\n     * Returns a proxy object that provides const access to the array's data without bounds or\n     * dimensionality checking.  Unlike `unchecked()`, this does not require that the underlying\n     * array have the `writable` flag.  Use with care: the array must not be destroyed or reshaped\n     * for the duration of the returned object, and the caller must take care not to access invalid\n     * dimensions or dimension indices.\n     */\n    template <ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {\n        return array::unchecked<T, Dims>();\n    }\n\n    /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert\n    /// it).  In case of an error, nullptr is returned and the Python error is cleared.\n    static array_t ensure(handle h) {\n        auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));\n        if (!result)\n            PyErr_Clear();\n        return result;\n    }\n\n    static bool check_(handle h) {\n        const auto &api = detail::npy_api::get();\n        return api.PyArray_Check_(h.ptr())\n               && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr());\n    }\n\nprotected:\n    /// Create array from any object -- always returns a new reference\n    static PyObject *raw_array_t(PyObject *ptr) {\n        if (ptr == nullptr) {\n            PyErr_SetString(PyExc_ValueError, \"cannot create a pybind11::array_t from a nullptr\");\n            return nullptr;\n        }\n        return detail::npy_api::get().PyArray_FromAny_(\n            ptr, dtype::of<T>().release().ptr(), 0, 0,\n            detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);\n    }\n};\n\ntemplate <typename T>\nstruct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {\n    static std::string format() {\n        return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();\n    }\n};\n\ntemplate <size_t N> struct format_descriptor<char[N]> {\n    static std::string format() { return std::to_string(N) + \"s\"; }\n};\ntemplate <size_t N> struct format_descriptor<std::array<char, N>> {\n    static std::string format() { return std::to_string(N) + \"s\"; }\n};\n\ntemplate <typename T>\nstruct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {\n    static std::string format() {\n        return format_descriptor<\n            typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();\n    }\n};\n\ntemplate <typename T>\nstruct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {\n    static std::string format() {\n        using namespace detail;\n        static constexpr auto extents = _(\"(\") + array_info<T>::extents + _(\")\");\n        return extents.text + format_descriptor<remove_all_extents_t<T>>::format();\n    }\n};\n\nNAMESPACE_BEGIN(detail)\ntemplate <typename T, int ExtraFlags>\nstruct pyobject_caster<array_t<T, ExtraFlags>> {\n    using type = array_t<T, ExtraFlags>;\n\n    bool load(handle src, bool convert) {\n        if (!convert && !type::check_(src))\n            return false;\n        value = type::ensure(src);\n        return static_cast<bool>(value);\n    }\n\n    static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {\n        return src.inc_ref();\n    }\n    PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);\n};\n\ntemplate <typename T>\nstruct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {\n    static bool compare(const buffer_info& b) {\n        return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());\n    }\n};\n\ntemplate <typename T, typename = void>\nstruct npy_format_descriptor_name;\n\ntemplate <typename T>\nstruct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {\n    static constexpr auto name = _<std::is_same<T, bool>::value>(\n        _(\"bool\"), _<std::is_signed<T>::value>(\"int\", \"uint\") + _<sizeof(T)*8>()\n    );\n};\n\ntemplate <typename T>\nstruct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {\n    static constexpr auto name = _<std::is_same<T, float>::value || std::is_same<T, double>::value>(\n        _(\"float\") + _<sizeof(T)*8>(), _(\"longdouble\")\n    );\n};\n\ntemplate <typename T>\nstruct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {\n    static constexpr auto name = _<std::is_same<typename T::value_type, float>::value\n                                   || std::is_same<typename T::value_type, double>::value>(\n        _(\"complex\") + _<sizeof(typename T::value_type)*16>(), _(\"longcomplex\")\n    );\n};\n\ntemplate <typename T>\nstruct npy_format_descriptor<T, enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>\n    : npy_format_descriptor_name<T> {\nprivate:\n    // NB: the order here must match the one in common.h\n    constexpr static const int values[15] = {\n        npy_api::NPY_BOOL_,\n        npy_api::NPY_BYTE_,   npy_api::NPY_UBYTE_,   npy_api::NPY_SHORT_,    npy_api::NPY_USHORT_,\n        npy_api::NPY_INT_,    npy_api::NPY_UINT_,    npy_api::NPY_LONGLONG_, npy_api::NPY_ULONGLONG_,\n        npy_api::NPY_FLOAT_,  npy_api::NPY_DOUBLE_,  npy_api::NPY_LONGDOUBLE_,\n        npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_\n    };\n\npublic:\n    static constexpr int value = values[detail::is_fmt_numeric<T>::index];\n\n    static pybind11::dtype dtype() {\n        if (auto ptr = npy_api::get().PyArray_DescrFromType_(value))\n            return reinterpret_borrow<pybind11::dtype>(ptr);\n        pybind11_fail(\"Unsupported buffer format!\");\n    }\n};\n\n#define PYBIND11_DECL_CHAR_FMT \\\n    static constexpr auto name = _(\"S\") + _<N>(); \\\n    static pybind11::dtype dtype() { return pybind11::dtype(std::string(\"S\") + std::to_string(N)); }\ntemplate <size_t N> struct npy_format_descriptor<char[N]> { PYBIND11_DECL_CHAR_FMT };\ntemplate <size_t N> struct npy_format_descriptor<std::array<char, N>> { PYBIND11_DECL_CHAR_FMT };\n#undef PYBIND11_DECL_CHAR_FMT\n\ntemplate<typename T> struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {\nprivate:\n    using base_descr = npy_format_descriptor<typename array_info<T>::type>;\npublic:\n    static_assert(!array_info<T>::is_empty, \"Zero-sized arrays are not supported\");\n\n    static constexpr auto name = _(\"(\") + array_info<T>::extents + _(\")\") + base_descr::name;\n    static pybind11::dtype dtype() {\n        list shape;\n        array_info<T>::append_extents(shape);\n        return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape));\n    }\n};\n\ntemplate<typename T> struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {\nprivate:\n    using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;\npublic:\n    static constexpr auto name = base_descr::name;\n    static pybind11::dtype dtype() { return base_descr::dtype(); }\n};\n\nstruct field_descriptor {\n    const char *name;\n    ssize_t offset;\n    ssize_t size;\n    std::string format;\n    dtype descr;\n};\n\ninline PYBIND11_NOINLINE void register_structured_dtype(\n    any_container<field_descriptor> fields,\n    const std::type_info& tinfo, ssize_t itemsize,\n    bool (*direct_converter)(PyObject *, void *&)) {\n\n    auto& numpy_internals = get_numpy_internals();\n    if (numpy_internals.get_type_info(tinfo, false))\n        pybind11_fail(\"NumPy: dtype is already registered\");\n\n    list names, formats, offsets;\n    for (auto field : *fields) {\n        if (!field.descr)\n            pybind11_fail(std::string(\"NumPy: unsupported field dtype: `\") +\n                            field.name + \"` @ \" + tinfo.name());\n        names.append(PYBIND11_STR_TYPE(field.name));\n        formats.append(field.descr);\n        offsets.append(pybind11::int_(field.offset));\n    }\n    auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr();\n\n    // There is an existing bug in NumPy (as of v1.11): trailing bytes are\n    // not encoded explicitly into the format string. This will supposedly\n    // get fixed in v1.12; for further details, see these:\n    // - https://github.com/numpy/numpy/issues/7797\n    // - https://github.com/numpy/numpy/pull/7798\n    // Because of this, we won't use numpy's logic to generate buffer format\n    // strings and will just do it ourselves.\n    std::vector<field_descriptor> ordered_fields(std::move(fields));\n    std::sort(ordered_fields.begin(), ordered_fields.end(),\n        [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });\n    ssize_t offset = 0;\n    std::ostringstream oss;\n    // mark the structure as unaligned with '^', because numpy and C++ don't\n    // always agree about alignment (particularly for complex), and we're\n    // explicitly listing all our padding. This depends on none of the fields\n    // overriding the endianness. Putting the ^ in front of individual fields\n    // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049\n    oss << \"^T{\";\n    for (auto& field : ordered_fields) {\n        if (field.offset > offset)\n            oss << (field.offset - offset) << 'x';\n        oss << field.format << ':' << field.name << ':';\n        offset = field.offset + field.size;\n    }\n    if (itemsize > offset)\n        oss << (itemsize - offset) << 'x';\n    oss << '}';\n    auto format_str = oss.str();\n\n    // Sanity check: verify that NumPy properly parses our buffer format string\n    auto& api = npy_api::get();\n    auto arr =  array(buffer_info(nullptr, itemsize, format_str, 1));\n    if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr()))\n        pybind11_fail(\"NumPy: invalid buffer descriptor!\");\n\n    auto tindex = std::type_index(tinfo);\n    numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str };\n    get_internals().direct_conversions[tindex].push_back(direct_converter);\n}\n\ntemplate <typename T, typename SFINAE> struct npy_format_descriptor {\n    static_assert(is_pod_struct<T>::value, \"Attempt to use a non-POD or unimplemented POD type as a numpy dtype\");\n\n    static constexpr auto name = make_caster<T>::name;\n\n    static pybind11::dtype dtype() {\n        return reinterpret_borrow<pybind11::dtype>(dtype_ptr());\n    }\n\n    static std::string format() {\n        static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;\n        return format_str;\n    }\n\n    static void register_dtype(any_container<field_descriptor> fields) {\n        register_structured_dtype(std::move(fields), typeid(typename std::remove_cv<T>::type),\n                                  sizeof(T), &direct_converter);\n    }\n\nprivate:\n    static PyObject* dtype_ptr() {\n        static PyObject* ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;\n        return ptr;\n    }\n\n    static bool direct_converter(PyObject *obj, void*& value) {\n        auto& api = npy_api::get();\n        if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_))\n            return false;\n        if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {\n            if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {\n                value = ((PyVoidScalarObject_Proxy *) obj)->obval;\n                return true;\n            }\n        }\n        return false;\n    }\n};\n\n#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)\n# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0)\n# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0)\n#else\n\n#define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name)                                          \\\n    ::pybind11::detail::field_descriptor {                                                    \\\n        Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)),                  \\\n        ::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(),           \\\n        ::pybind11::detail::npy_format_descriptor<decltype(std::declval<T>().Field)>::dtype() \\\n    }\n\n// Extract name, offset and format descriptor for a struct field\n#define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)\n\n// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro\n// (C) William Swanson, Paul Fultz\n#define PYBIND11_EVAL0(...) __VA_ARGS__\n#define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__)))\n#define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__)))\n#define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__)))\n#define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__)))\n#define PYBIND11_EVAL(...)  PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__)))\n#define PYBIND11_MAP_END(...)\n#define PYBIND11_MAP_OUT\n#define PYBIND11_MAP_COMMA ,\n#define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END\n#define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT\n#define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0)\n#define PYBIND11_MAP_NEXT(test, next)  PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next)\n#ifdef _MSC_VER // MSVC is not as eager to expand macros, hence this workaround\n#define PYBIND11_MAP_LIST_NEXT1(test, next) \\\n    PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))\n#else\n#define PYBIND11_MAP_LIST_NEXT1(test, next) \\\n    PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)\n#endif\n#define PYBIND11_MAP_LIST_NEXT(test, next) \\\n    PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)\n#define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \\\n    f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__)\n#define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \\\n    f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__)\n// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...\n#define PYBIND11_MAP_LIST(f, t, ...) \\\n    PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0))\n\n#define PYBIND11_NUMPY_DTYPE(Type, ...) \\\n    ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \\\n        (::std::vector<::pybind11::detail::field_descriptor> \\\n         {PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})\n\n#ifdef _MSC_VER\n#define PYBIND11_MAP2_LIST_NEXT1(test, next) \\\n    PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))\n#else\n#define PYBIND11_MAP2_LIST_NEXT1(test, next) \\\n    PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)\n#endif\n#define PYBIND11_MAP2_LIST_NEXT(test, next) \\\n    PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)\n#define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \\\n    f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__)\n#define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \\\n    f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__)\n// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...\n#define PYBIND11_MAP2_LIST(f, t, ...) \\\n    PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0))\n\n#define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \\\n    ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \\\n        (::std::vector<::pybind11::detail::field_descriptor> \\\n         {PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})\n\n#endif // __CLION_IDE__\n\ntemplate  <class T>\nusing array_iterator = typename std::add_pointer<T>::type;\n\ntemplate <class T>\narray_iterator<T> array_begin(const buffer_info& buffer) {\n    return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr));\n}\n\ntemplate <class T>\narray_iterator<T> array_end(const buffer_info& buffer) {\n    return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr) + buffer.size);\n}\n\nclass common_iterator {\npublic:\n    using container_type = std::vector<ssize_t>;\n    using value_type = container_type::value_type;\n    using size_type = container_type::size_type;\n\n    common_iterator() : p_ptr(0), m_strides() {}\n\n    common_iterator(void* ptr, const container_type& strides, const container_type& shape)\n        : p_ptr(reinterpret_cast<char*>(ptr)), m_strides(strides.size()) {\n        m_strides.back() = static_cast<value_type>(strides.back());\n        for (size_type i = m_strides.size() - 1; i != 0; --i) {\n            size_type j = i - 1;\n            value_type s = static_cast<value_type>(shape[i]);\n            m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;\n        }\n    }\n\n    void increment(size_type dim) {\n        p_ptr += m_strides[dim];\n    }\n\n    void* data() const {\n        return p_ptr;\n    }\n\nprivate:\n    char* p_ptr;\n    container_type m_strides;\n};\n\ntemplate <size_t N> class multi_array_iterator {\npublic:\n    using container_type = std::vector<ssize_t>;\n\n    multi_array_iterator(const std::array<buffer_info, N> &buffers,\n                         const container_type &shape)\n        : m_shape(shape.size()), m_index(shape.size(), 0),\n          m_common_iterator() {\n\n        // Manual copy to avoid conversion warning if using std::copy\n        for (size_t i = 0; i < shape.size(); ++i)\n            m_shape[i] = shape[i];\n\n        container_type strides(shape.size());\n        for (size_t i = 0; i < N; ++i)\n            init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);\n    }\n\n    multi_array_iterator& operator++() {\n        for (size_t j = m_index.size(); j != 0; --j) {\n            size_t i = j - 1;\n            if (++m_index[i] != m_shape[i]) {\n                increment_common_iterator(i);\n                break;\n            } else {\n                m_index[i] = 0;\n            }\n        }\n        return *this;\n    }\n\n    template <size_t K, class T = void> T* data() const {\n        return reinterpret_cast<T*>(m_common_iterator[K].data());\n    }\n\nprivate:\n\n    using common_iter = common_iterator;\n\n    void init_common_iterator(const buffer_info &buffer,\n                              const container_type &shape,\n                              common_iter &iterator,\n                              container_type &strides) {\n        auto buffer_shape_iter = buffer.shape.rbegin();\n        auto buffer_strides_iter = buffer.strides.rbegin();\n        auto shape_iter = shape.rbegin();\n        auto strides_iter = strides.rbegin();\n\n        while (buffer_shape_iter != buffer.shape.rend()) {\n            if (*shape_iter == *buffer_shape_iter)\n                *strides_iter = *buffer_strides_iter;\n            else\n                *strides_iter = 0;\n\n            ++buffer_shape_iter;\n            ++buffer_strides_iter;\n            ++shape_iter;\n            ++strides_iter;\n        }\n\n        std::fill(strides_iter, strides.rend(), 0);\n        iterator = common_iter(buffer.ptr, strides, shape);\n    }\n\n    void increment_common_iterator(size_t dim) {\n        for (auto &iter : m_common_iterator)\n            iter.increment(dim);\n    }\n\n    container_type m_shape;\n    container_type m_index;\n    std::array<common_iter, N> m_common_iterator;\n};\n\nenum class broadcast_trivial { non_trivial, c_trivial, f_trivial };\n\n// Populates the shape and number of dimensions for the set of buffers.  Returns a broadcast_trivial\n// enum value indicating whether the broadcast is \"trivial\"--that is, has each buffer being either a\n// singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage\n// buffer; returns `non_trivial` otherwise.\ntemplate <size_t N>\nbroadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {\n    ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {\n        return std::max(res, buf.ndim);\n    });\n\n    shape.clear();\n    shape.resize((size_t) ndim, 1);\n\n    // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or\n    // the full size).\n    for (size_t i = 0; i < N; ++i) {\n        auto res_iter = shape.rbegin();\n        auto end = buffers[i].shape.rend();\n        for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) {\n            const auto &dim_size_in = *shape_iter;\n            auto &dim_size_out = *res_iter;\n\n            // Each input dimension can either be 1 or `n`, but `n` values must match across buffers\n            if (dim_size_out == 1)\n                dim_size_out = dim_size_in;\n            else if (dim_size_in != 1 && dim_size_in != dim_size_out)\n                pybind11_fail(\"pybind11::vectorize: incompatible size/dimension of inputs!\");\n        }\n    }\n\n    bool trivial_broadcast_c = true;\n    bool trivial_broadcast_f = true;\n    for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {\n        if (buffers[i].size == 1)\n            continue;\n\n        // Require the same number of dimensions:\n        if (buffers[i].ndim != ndim)\n            return broadcast_trivial::non_trivial;\n\n        // Require all dimensions be full-size:\n        if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin()))\n            return broadcast_trivial::non_trivial;\n\n        // Check for C contiguity (but only if previous inputs were also C contiguous)\n        if (trivial_broadcast_c) {\n            ssize_t expect_stride = buffers[i].itemsize;\n            auto end = buffers[i].shape.crend();\n            for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin();\n                    trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {\n                if (expect_stride == *stride_iter)\n                    expect_stride *= *shape_iter;\n                else\n                    trivial_broadcast_c = false;\n            }\n        }\n\n        // Check for Fortran contiguity (if previous inputs were also F contiguous)\n        if (trivial_broadcast_f) {\n            ssize_t expect_stride = buffers[i].itemsize;\n            auto end = buffers[i].shape.cend();\n            for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin();\n                    trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {\n                if (expect_stride == *stride_iter)\n                    expect_stride *= *shape_iter;\n                else\n                    trivial_broadcast_f = false;\n            }\n        }\n    }\n\n    return\n        trivial_broadcast_c ? broadcast_trivial::c_trivial :\n        trivial_broadcast_f ? broadcast_trivial::f_trivial :\n        broadcast_trivial::non_trivial;\n}\n\ntemplate <typename T>\nstruct vectorize_arg {\n    static_assert(!std::is_rvalue_reference<T>::value, \"Functions with rvalue reference arguments cannot be vectorized\");\n    // The wrapped function gets called with this type:\n    using call_type = remove_reference_t<T>;\n    // Is this a vectorized argument?\n    static constexpr bool vectorize =\n        satisfies_any_of<call_type, std::is_arithmetic, is_complex, std::is_pod>::value &&\n        satisfies_none_of<call_type, std::is_pointer, std::is_array, is_std_array, std::is_enum>::value &&\n        (!std::is_reference<T>::value ||\n         (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));\n    // Accept this type: an array for vectorized types, otherwise the type as-is:\n    using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;\n};\n\ntemplate <typename Func, typename Return, typename... Args>\nstruct vectorize_helper {\nprivate:\n    static constexpr size_t N = sizeof...(Args);\n    static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);\n    static_assert(NVectorized >= 1,\n            \"pybind11::vectorize(...) requires a function with at least one vectorizable argument\");\n\npublic:\n    template <typename T>\n    explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) { }\n\n    object operator()(typename vectorize_arg<Args>::type... args) {\n        return run(args...,\n                   make_index_sequence<N>(),\n                   select_indices<vectorize_arg<Args>::vectorize...>(),\n                   make_index_sequence<NVectorized>());\n    }\n\nprivate:\n    remove_reference_t<Func> f;\n\n    // Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling with \"/permissive-\" flag\n    // when arg_call_types is manually inlined.\n    using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;\n    template <size_t Index> using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;\n\n    // Runs a vectorized function given arguments tuple and three index sequences:\n    //     - Index is the full set of 0 ... (N-1) argument indices;\n    //     - VIndex is the subset of argument indices with vectorized parameters, letting us access\n    //       vectorized arguments (anything not in this sequence is passed through)\n    //     - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that\n    //       we can store vectorized buffer_infos in an array (argument VIndex has its buffer at\n    //       index BIndex in the array).\n    template <size_t... Index, size_t... VIndex, size_t... BIndex> object run(\n            typename vectorize_arg<Args>::type &...args,\n            index_sequence<Index...> i_seq, index_sequence<VIndex...> vi_seq, index_sequence<BIndex...> bi_seq) {\n\n        // Pointers to values the function was called with; the vectorized ones set here will start\n        // out as array_t<T> pointers, but they will be changed them to T pointers before we make\n        // call the wrapped function.  Non-vectorized pointers are left as-is.\n        std::array<void *, N> params{{ &args... }};\n\n        // The array of `buffer_info`s of vectorized arguments:\n        std::array<buffer_info, NVectorized> buffers{{ reinterpret_cast<array *>(params[VIndex])->request()... }};\n\n        /* Determine dimensions parameters of output array */\n        ssize_t nd = 0;\n        std::vector<ssize_t> shape(0);\n        auto trivial = broadcast(buffers, nd, shape);\n        size_t ndim = (size_t) nd;\n\n        size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());\n\n        // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.\n        // not wrapped in an array).\n        if (size == 1 && ndim == 0) {\n            PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);\n            return cast(f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...));\n        }\n\n        array_t<Return> result;\n        if (trivial == broadcast_trivial::f_trivial) result = array_t<Return, array::f_style>(shape);\n        else result = array_t<Return>(shape);\n\n        if (size == 0) return std::move(result);\n\n        /* Call the function */\n        if (trivial == broadcast_trivial::non_trivial)\n            apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq);\n        else\n            apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq);\n\n        return std::move(result);\n    }\n\n    template <size_t... Index, size_t... VIndex, size_t... BIndex>\n    void apply_trivial(std::array<buffer_info, NVectorized> &buffers,\n                       std::array<void *, N> &params,\n                       Return *out,\n                       size_t size,\n                       index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {\n\n        // Initialize an array of mutable byte references and sizes with references set to the\n        // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size\n        // (except for singletons, which get an increment of 0).\n        std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{{\n            std::pair<unsigned char *&, const size_t>(\n                    reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),\n                    buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>)\n            )...\n        }};\n\n        for (size_t i = 0; i < size; ++i) {\n            out[i] = f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...);\n            for (auto &x : vecparams) x.first += x.second;\n        }\n    }\n\n    template <size_t... Index, size_t... VIndex, size_t... BIndex>\n    void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,\n                         std::array<void *, N> &params,\n                         array_t<Return> &output_array,\n                         index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {\n\n        buffer_info output = output_array.request();\n        multi_array_iterator<NVectorized> input_iter(buffers, output.shape);\n\n        for (array_iterator<Return> iter = array_begin<Return>(output), end = array_end<Return>(output);\n             iter != end;\n             ++iter, ++input_iter) {\n            PYBIND11_EXPAND_SIDE_EFFECTS((\n                params[VIndex] = input_iter.template data<BIndex>()\n            ));\n            *iter = f(*reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);\n        }\n    }\n};\n\ntemplate <typename Func, typename Return, typename... Args>\nvectorize_helper<Func, Return, Args...>\nvectorize_extractor(const Func &f, Return (*) (Args ...)) {\n    return detail::vectorize_helper<Func, Return, Args...>(f);\n}\n\ntemplate <typename T, int Flags> struct handle_type_name<array_t<T, Flags>> {\n    static constexpr auto name = _(\"numpy.ndarray[\") + npy_format_descriptor<T>::name + _(\"]\");\n};\n\nNAMESPACE_END(detail)\n\n// Vanilla pointer vectorizer:\ntemplate <typename Return, typename... Args>\ndetail::vectorize_helper<Return (*)(Args...), Return, Args...>\nvectorize(Return (*f) (Args ...)) {\n    return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);\n}\n\n// lambda vectorizer:\ntemplate <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>\nauto vectorize(Func &&f) -> decltype(\n        detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr)) {\n    return detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr);\n}\n\n// Vectorize a class method (non-const):\ntemplate <typename Return, typename Class, typename... Args,\n          typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())), Return, Class *, Args...>>\nHelper vectorize(Return (Class::*f)(Args...)) {\n    return Helper(std::mem_fn(f));\n}\n\n// Vectorize a class method (const):\ntemplate <typename Return, typename Class, typename... Args,\n          typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())), Return, const Class *, Args...>>\nHelper vectorize(Return (Class::*f)(Args...) const) {\n    return Helper(std::mem_fn(f));\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/operators.h",
    "content": "/*\n    pybind11/operator.h: Metatemplates for operator overloading\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n\n#if defined(__clang__) && !defined(__INTEL_COMPILER)\n#  pragma clang diagnostic ignored \"-Wunsequenced\" // multiple unsequenced modifications to 'self' (when using def(py::self OP Type()))\n#elif defined(_MSC_VER)\n#  pragma warning(push)\n#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n/// Enumeration with all supported operator types\nenum op_id : int {\n    op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift,\n    op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert,\n    op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le,\n    op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift,\n    op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero,\n    op_repr, op_truediv, op_itruediv, op_hash\n};\n\nenum op_type : int {\n    op_l, /* base type on left */\n    op_r, /* base type on right */\n    op_u  /* unary operator */\n};\n\nstruct self_t { };\nstatic const self_t self = self_t();\n\n/// Type for an unused type slot\nstruct undefined_t { };\n\n/// Don't warn about an unused variable\ninline self_t __self() { return self; }\n\n/// base template of operator implementations\ntemplate <op_id, op_type, typename B, typename L, typename R> struct op_impl { };\n\n/// Operator implementation generator\ntemplate <op_id id, op_type ot, typename L, typename R> struct op_ {\n    template <typename Class, typename... Extra> void execute(Class &cl, const Extra&... extra) const {\n        using Base = typename Class::type;\n        using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;\n        using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;\n        using op = op_impl<id, ot, Base, L_type, R_type>;\n        cl.def(op::name(), &op::execute, is_operator(), extra...);\n        #if PY_MAJOR_VERSION < 3\n        if (id == op_truediv || id == op_itruediv)\n            cl.def(id == op_itruediv ? \"__idiv__\" : ot == op_l ? \"__div__\" : \"__rdiv__\",\n                    &op::execute, is_operator(), extra...);\n        #endif\n    }\n    template <typename Class, typename... Extra> void execute_cast(Class &cl, const Extra&... extra) const {\n        using Base = typename Class::type;\n        using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;\n        using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;\n        using op = op_impl<id, ot, Base, L_type, R_type>;\n        cl.def(op::name(), &op::execute_cast, is_operator(), extra...);\n        #if PY_MAJOR_VERSION < 3\n        if (id == op_truediv || id == op_itruediv)\n            cl.def(id == op_itruediv ? \"__idiv__\" : ot == op_l ? \"__div__\" : \"__rdiv__\",\n                    &op::execute, is_operator(), extra...);\n        #endif\n    }\n};\n\n#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr)                                    \\\ntemplate <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \\\n    static char const* name() { return \"__\" #id \"__\"; }                                \\\n    static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); }   \\\n    static B execute_cast(const L &l, const R &r) { return B(expr); }                  \\\n};                                                                                     \\\ntemplate <typename B, typename L, typename R> struct op_impl<op_##id, op_r, B, L, R> { \\\n    static char const* name() { return \"__\" #rid \"__\"; }                               \\\n    static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); }   \\\n    static B execute_cast(const R &r, const L &l) { return B(expr); }                  \\\n};                                                                                     \\\ninline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) {         \\\n    return op_<op_##id, op_l, self_t, self_t>();                                       \\\n}                                                                                      \\\ntemplate <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) {    \\\n    return op_<op_##id, op_l, self_t, T>();                                            \\\n}                                                                                      \\\ntemplate <typename T> op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) {    \\\n    return op_<op_##id, op_r, T, self_t>();                                            \\\n}\n\n#define PYBIND11_INPLACE_OPERATOR(id, op, expr)                                        \\\ntemplate <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \\\n    static char const* name() { return \"__\" #id \"__\"; }                                \\\n    static auto execute(L &l, const R &r) -> decltype(expr) { return expr; }           \\\n    static B execute_cast(L &l, const R &r) { return B(expr); }                        \\\n};                                                                                     \\\ntemplate <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) {    \\\n    return op_<op_##id, op_l, self_t, T>();                                            \\\n}\n\n#define PYBIND11_UNARY_OPERATOR(id, op, expr)                                          \\\ntemplate <typename B, typename L> struct op_impl<op_##id, op_u, B, L, undefined_t> {   \\\n    static char const* name() { return \"__\" #id \"__\"; }                                \\\n    static auto execute(const L &l) -> decltype(expr) { return expr; }                 \\\n    static B execute_cast(const L &l) { return B(expr); }                              \\\n};                                                                                     \\\ninline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) {                    \\\n    return op_<op_##id, op_u, self_t, undefined_t>();                                  \\\n}\n\nPYBIND11_BINARY_OPERATOR(sub,       rsub,         operator-,    l - r)\nPYBIND11_BINARY_OPERATOR(add,       radd,         operator+,    l + r)\nPYBIND11_BINARY_OPERATOR(mul,       rmul,         operator*,    l * r)\nPYBIND11_BINARY_OPERATOR(truediv,   rtruediv,     operator/,    l / r)\nPYBIND11_BINARY_OPERATOR(mod,       rmod,         operator%,    l % r)\nPYBIND11_BINARY_OPERATOR(lshift,    rlshift,      operator<<,   l << r)\nPYBIND11_BINARY_OPERATOR(rshift,    rrshift,      operator>>,   l >> r)\nPYBIND11_BINARY_OPERATOR(and,       rand,         operator&,    l & r)\nPYBIND11_BINARY_OPERATOR(xor,       rxor,         operator^,    l ^ r)\nPYBIND11_BINARY_OPERATOR(eq,        eq,           operator==,   l == r)\nPYBIND11_BINARY_OPERATOR(ne,        ne,           operator!=,   l != r)\nPYBIND11_BINARY_OPERATOR(or,        ror,          operator|,    l | r)\nPYBIND11_BINARY_OPERATOR(gt,        lt,           operator>,    l > r)\nPYBIND11_BINARY_OPERATOR(ge,        le,           operator>=,   l >= r)\nPYBIND11_BINARY_OPERATOR(lt,        gt,           operator<,    l < r)\nPYBIND11_BINARY_OPERATOR(le,        ge,           operator<=,   l <= r)\n//PYBIND11_BINARY_OPERATOR(pow,       rpow,         pow,          std::pow(l,  r))\nPYBIND11_INPLACE_OPERATOR(iadd,     operator+=,   l += r)\nPYBIND11_INPLACE_OPERATOR(isub,     operator-=,   l -= r)\nPYBIND11_INPLACE_OPERATOR(imul,     operator*=,   l *= r)\nPYBIND11_INPLACE_OPERATOR(itruediv, operator/=,   l /= r)\nPYBIND11_INPLACE_OPERATOR(imod,     operator%=,   l %= r)\nPYBIND11_INPLACE_OPERATOR(ilshift,  operator<<=,  l <<= r)\nPYBIND11_INPLACE_OPERATOR(irshift,  operator>>=,  l >>= r)\nPYBIND11_INPLACE_OPERATOR(iand,     operator&=,   l &= r)\nPYBIND11_INPLACE_OPERATOR(ixor,     operator^=,   l ^= r)\nPYBIND11_INPLACE_OPERATOR(ior,      operator|=,   l |= r)\nPYBIND11_UNARY_OPERATOR(neg,        operator-,    -l)\nPYBIND11_UNARY_OPERATOR(pos,        operator+,    +l)\nPYBIND11_UNARY_OPERATOR(abs,        abs,          std::abs(l))\nPYBIND11_UNARY_OPERATOR(hash,       hash,         std::hash<L>()(l))\nPYBIND11_UNARY_OPERATOR(invert,     operator~,    (~l))\nPYBIND11_UNARY_OPERATOR(bool,       operator!,    !!l)\nPYBIND11_UNARY_OPERATOR(int,        int_,         (int) l)\nPYBIND11_UNARY_OPERATOR(float,      float_,       (double) l)\n\n#undef PYBIND11_BINARY_OPERATOR\n#undef PYBIND11_INPLACE_OPERATOR\n#undef PYBIND11_UNARY_OPERATOR\nNAMESPACE_END(detail)\n\nusing detail::self;\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n\n#if defined(_MSC_VER)\n#  pragma warning(pop)\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/options.h",
    "content": "/*\n    pybind11/options.h: global settings that are configurable at runtime.\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"detail/common.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\nclass options {\npublic:\n\n    // Default RAII constructor, which leaves settings as they currently are.\n    options() : previous_state(global_state()) {}\n\n    // Class is non-copyable.\n    options(const options&) = delete;\n    options& operator=(const options&) = delete;\n\n    // Destructor, which restores settings that were in effect before.\n    ~options() {\n        global_state() = previous_state;\n    }\n\n    // Setter methods (affect the global state):\n\n    options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; }\n\n    options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; }\n\n    options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; }\n\n    options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; }\n\n    // Getter methods (return the global state):\n\n    static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; }\n\n    static bool show_function_signatures() { return global_state().show_function_signatures; }\n\n    // This type is not meant to be allocated on the heap.\n    void* operator new(size_t) = delete;\n\nprivate:\n\n    struct state {\n        bool show_user_defined_docstrings = true;  //< Include user-supplied texts in docstrings.\n        bool show_function_signatures = true;      //< Include auto-generated function signatures in docstrings.\n    };\n\n    static state &global_state() {\n        static state instance;\n        return instance;\n    }\n\n    state previous_state;\n};\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/pybind11.h",
    "content": "/*\n    pybind11/pybind11.h: Main header file of the C++11 python\n    binding generator library\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#if defined(__INTEL_COMPILER)\n#  pragma warning push\n#  pragma warning disable 68    // integer conversion resulted in a change of sign\n#  pragma warning disable 186   // pointless comparison of unsigned integer with zero\n#  pragma warning disable 878   // incompatible exception specifications\n#  pragma warning disable 1334  // the \"template\" keyword used for syntactic disambiguation may only be used within a template\n#  pragma warning disable 1682  // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)\n#  pragma warning disable 1786  // function \"strdup\" was declared deprecated\n#  pragma warning disable 1875  // offsetof applied to non-POD (Plain Old Data) types is nonstandard\n#  pragma warning disable 2196  // warning #2196: routine is both \"inline\" and \"noinline\"\n#elif defined(_MSC_VER)\n#  pragma warning(push)\n#  pragma warning(disable: 4100) // warning C4100: Unreferenced formal parameter\n#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#  pragma warning(disable: 4512) // warning C4512: Assignment operator was implicitly defined as deleted\n#  pragma warning(disable: 4800) // warning C4800: 'int': forcing value to bool 'true' or 'false' (performance warning)\n#  pragma warning(disable: 4996) // warning C4996: The POSIX name for this item is deprecated. Instead, use the ISO C and C++ conformant name\n#  pragma warning(disable: 4702) // warning C4702: unreachable code\n#  pragma warning(disable: 4522) // warning C4522: multiple assignment operators specified\n#elif defined(__GNUG__) && !defined(__clang__)\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wunused-but-set-parameter\"\n#  pragma GCC diagnostic ignored \"-Wunused-but-set-variable\"\n#  pragma GCC diagnostic ignored \"-Wmissing-field-initializers\"\n#  pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n#  pragma GCC diagnostic ignored \"-Wattributes\"\n#  if __GNUC__ >= 7\n#    pragma GCC diagnostic ignored \"-Wnoexcept-type\"\n#  endif\n#endif\n\n#include \"attr.h\"\n#include \"options.h\"\n#include \"detail/class.h\"\n#include \"detail/init.h\"\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\n/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object\nclass cpp_function : public function {\npublic:\n    cpp_function() { }\n    cpp_function(std::nullptr_t) { }\n\n    /// Construct a cpp_function from a vanilla function pointer\n    template <typename Return, typename... Args, typename... Extra>\n    cpp_function(Return (*f)(Args...), const Extra&... extra) {\n        initialize(f, f, extra...);\n    }\n\n    /// Construct a cpp_function from a lambda function (possibly with internal state)\n    template <typename Func, typename... Extra,\n              typename = detail::enable_if_t<detail::is_lambda<Func>::value>>\n    cpp_function(Func &&f, const Extra&... extra) {\n        initialize(std::forward<Func>(f),\n                   (detail::function_signature_t<Func> *) nullptr, extra...);\n    }\n\n    /// Construct a cpp_function from a class method (non-const)\n    template <typename Return, typename Class, typename... Arg, typename... Extra>\n    cpp_function(Return (Class::*f)(Arg...), const Extra&... extra) {\n        initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(args...); },\n                   (Return (*) (Class *, Arg...)) nullptr, extra...);\n    }\n\n    /// Construct a cpp_function from a class method (const)\n    template <typename Return, typename Class, typename... Arg, typename... Extra>\n    cpp_function(Return (Class::*f)(Arg...) const, const Extra&... extra) {\n        initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(args...); },\n                   (Return (*)(const Class *, Arg ...)) nullptr, extra...);\n    }\n\n    /// Return the function name\n    object name() const { return attr(\"__name__\"); }\n\nprotected:\n    /// Space optimization: don't inline this frequently instantiated fragment\n    PYBIND11_NOINLINE detail::function_record *make_function_record() {\n        return new detail::function_record();\n    }\n\n    /// Special internal constructor for functors, lambda functions, etc.\n    template <typename Func, typename Return, typename... Args, typename... Extra>\n    void initialize(Func &&f, Return (*)(Args...), const Extra&... extra) {\n        using namespace detail;\n        struct capture { remove_reference_t<Func> f; };\n\n        /* Store the function including any extra state it might have (e.g. a lambda capture object) */\n        auto rec = make_function_record();\n\n        /* Store the capture object directly in the function record if there is enough space */\n        if (sizeof(capture) <= sizeof(rec->data)) {\n            /* Without these pragmas, GCC warns that there might not be\n               enough space to use the placement new operator. However, the\n               'if' statement above ensures that this is the case. */\n#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wplacement-new\"\n#endif\n            new ((capture *) &rec->data) capture { std::forward<Func>(f) };\n#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6\n#  pragma GCC diagnostic pop\n#endif\n            if (!std::is_trivially_destructible<Func>::value)\n                rec->free_data = [](function_record *r) { ((capture *) &r->data)->~capture(); };\n        } else {\n            rec->data[0] = new capture { std::forward<Func>(f) };\n            rec->free_data = [](function_record *r) { delete ((capture *) r->data[0]); };\n        }\n\n        /* Type casters for the function arguments and return value */\n        using cast_in = argument_loader<Args...>;\n        using cast_out = make_caster<\n            conditional_t<std::is_void<Return>::value, void_type, Return>\n        >;\n\n        static_assert(expected_num_args<Extra...>(sizeof...(Args), cast_in::has_args, cast_in::has_kwargs),\n                      \"The number of argument annotations does not match the number of function arguments\");\n\n        /* Dispatch code which converts function arguments and performs the actual function call */\n        rec->impl = [](function_call &call) -> handle {\n            cast_in args_converter;\n\n            /* Try to cast the function arguments into the C++ domain */\n            if (!args_converter.load_args(call))\n                return PYBIND11_TRY_NEXT_OVERLOAD;\n\n            /* Invoke call policy pre-call hook */\n            process_attributes<Extra...>::precall(call);\n\n            /* Get a pointer to the capture object */\n            auto data = (sizeof(capture) <= sizeof(call.func.data)\n                         ? &call.func.data : call.func.data[0]);\n            capture *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));\n\n            /* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */\n            return_value_policy policy = return_value_policy_override<Return>::policy(call.func.policy);\n\n            /* Function scope guard -- defaults to the compile-to-nothing `void_type` */\n            using Guard = extract_guard_t<Extra...>;\n\n            /* Perform the function call */\n            handle result = cast_out::cast(\n                std::move(args_converter).template call<Return, Guard>(cap->f), policy, call.parent);\n\n            /* Invoke call policy post-call hook */\n            process_attributes<Extra...>::postcall(call, result);\n\n            return result;\n        };\n\n        /* Process any user-provided function attributes */\n        process_attributes<Extra...>::init(extra..., rec);\n\n        /* Generate a readable signature describing the function's arguments and return value types */\n        static constexpr auto signature = _(\"(\") + cast_in::arg_names + _(\") -> \") + cast_out::name;\n        PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types();\n\n        /* Register the function with Python from generic (non-templated) code */\n        initialize_generic(rec, signature.text, types.data(), sizeof...(Args));\n\n        if (cast_in::has_args) rec->has_args = true;\n        if (cast_in::has_kwargs) rec->has_kwargs = true;\n\n        /* Stash some additional information used by an important optimization in 'functional.h' */\n        using FunctionType = Return (*)(Args...);\n        constexpr bool is_function_ptr =\n            std::is_convertible<Func, FunctionType>::value &&\n            sizeof(capture) == sizeof(void *);\n        if (is_function_ptr) {\n            rec->is_stateless = true;\n            rec->data[1] = const_cast<void *>(reinterpret_cast<const void *>(&typeid(FunctionType)));\n        }\n    }\n\n    /// Register a function call with Python (generic non-templated code goes here)\n    void initialize_generic(detail::function_record *rec, const char *text,\n                            const std::type_info *const *types, size_t args) {\n\n        /* Create copies of all referenced C-style strings */\n        rec->name = strdup(rec->name ? rec->name : \"\");\n        if (rec->doc) rec->doc = strdup(rec->doc);\n        for (auto &a: rec->args) {\n            if (a.name)\n                a.name = strdup(a.name);\n            if (a.descr)\n                a.descr = strdup(a.descr);\n            else if (a.value)\n                a.descr = strdup(a.value.attr(\"__repr__\")().cast<std::string>().c_str());\n        }\n\n        rec->is_constructor = !strcmp(rec->name, \"__init__\") || !strcmp(rec->name, \"__setstate__\");\n\n#if !defined(NDEBUG) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING)\n        if (rec->is_constructor && !rec->is_new_style_constructor) {\n            const auto class_name = std::string(((PyTypeObject *) rec->scope.ptr())->tp_name);\n            const auto func_name = std::string(rec->name);\n            PyErr_WarnEx(\n                PyExc_FutureWarning,\n                (\"pybind11-bound class '\" + class_name + \"' is using an old-style \"\n                 \"placement-new '\" + func_name + \"' which has been deprecated. See \"\n                 \"the upgrade guide in pybind11's docs. This message is only visible \"\n                 \"when compiled in debug mode.\").c_str(), 0\n            );\n        }\n#endif\n\n        /* Generate a proper function signature */\n        std::string signature;\n        size_t type_index = 0, arg_index = 0;\n        for (auto *pc = text; *pc != '\\0'; ++pc) {\n            const auto c = *pc;\n\n            if (c == '{') {\n                // Write arg name for everything except *args and **kwargs.\n                if (*(pc + 1) == '*')\n                    continue;\n\n                if (arg_index < rec->args.size() && rec->args[arg_index].name) {\n                    signature += rec->args[arg_index].name;\n                } else if (arg_index == 0 && rec->is_method) {\n                    signature += \"self\";\n                } else {\n                    signature += \"arg\" + std::to_string(arg_index - (rec->is_method ? 1 : 0));\n                }\n                signature += \": \";\n            } else if (c == '}') {\n                // Write default value if available.\n                if (arg_index < rec->args.size() && rec->args[arg_index].descr) {\n                    signature += \" = \";\n                    signature += rec->args[arg_index].descr;\n                }\n                arg_index++;\n            } else if (c == '%') {\n                const std::type_info *t = types[type_index++];\n                if (!t)\n                    pybind11_fail(\"Internal error while parsing type signature (1)\");\n                if (auto tinfo = detail::get_type_info(*t)) {\n                    handle th((PyObject *) tinfo->type);\n                    signature +=\n                        th.attr(\"__module__\").cast<std::string>() + \".\" +\n                        th.attr(\"__qualname__\").cast<std::string>(); // Python 3.3+, but we backport it to earlier versions\n                } else if (rec->is_new_style_constructor && arg_index == 0) {\n                    // A new-style `__init__` takes `self` as `value_and_holder`.\n                    // Rewrite it to the proper class type.\n                    signature +=\n                        rec->scope.attr(\"__module__\").cast<std::string>() + \".\" +\n                        rec->scope.attr(\"__qualname__\").cast<std::string>();\n                } else {\n                    std::string tname(t->name());\n                    detail::clean_type_id(tname);\n                    signature += tname;\n                }\n            } else {\n                signature += c;\n            }\n        }\n        if (arg_index != args || types[type_index] != nullptr)\n            pybind11_fail(\"Internal error while parsing type signature (2)\");\n\n#if PY_MAJOR_VERSION < 3\n        if (strcmp(rec->name, \"__next__\") == 0) {\n            std::free(rec->name);\n            rec->name = strdup(\"next\");\n        } else if (strcmp(rec->name, \"__bool__\") == 0) {\n            std::free(rec->name);\n            rec->name = strdup(\"__nonzero__\");\n        }\n#endif\n        rec->signature = strdup(signature.c_str());\n        rec->args.shrink_to_fit();\n        rec->nargs = (std::uint16_t) args;\n\n        if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr()))\n            rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr());\n\n        detail::function_record *chain = nullptr, *chain_start = rec;\n        if (rec->sibling) {\n            if (PyCFunction_Check(rec->sibling.ptr())) {\n                auto rec_capsule = reinterpret_borrow<capsule>(PyCFunction_GET_SELF(rec->sibling.ptr()));\n                chain = (detail::function_record *) rec_capsule;\n                /* Never append a method to an overload chain of a parent class;\n                   instead, hide the parent's overloads in this case */\n                if (!chain->scope.is(rec->scope))\n                    chain = nullptr;\n            }\n            // Don't trigger for things like the default __init__, which are wrapper_descriptors that we are intentionally replacing\n            else if (!rec->sibling.is_none() && rec->name[0] != '_')\n                pybind11_fail(\"Cannot overload existing non-function object \\\"\" + std::string(rec->name) +\n                        \"\\\" with a function of the same name\");\n        }\n\n        if (!chain) {\n            /* No existing overload was found, create a new function object */\n            rec->def = new PyMethodDef();\n            std::memset(rec->def, 0, sizeof(PyMethodDef));\n            rec->def->ml_name = rec->name;\n            rec->def->ml_meth = reinterpret_cast<PyCFunction>(reinterpret_cast<void (*) (void)>(*dispatcher));\n            rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS;\n\n            capsule rec_capsule(rec, [](void *ptr) {\n                destruct((detail::function_record *) ptr);\n            });\n\n            object scope_module;\n            if (rec->scope) {\n                if (hasattr(rec->scope, \"__module__\")) {\n                    scope_module = rec->scope.attr(\"__module__\");\n                } else if (hasattr(rec->scope, \"__name__\")) {\n                    scope_module = rec->scope.attr(\"__name__\");\n                }\n            }\n\n            m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr());\n            if (!m_ptr)\n                pybind11_fail(\"cpp_function::cpp_function(): Could not allocate function object\");\n        } else {\n            /* Append at the end of the overload chain */\n            m_ptr = rec->sibling.ptr();\n            inc_ref();\n            chain_start = chain;\n            if (chain->is_method != rec->is_method)\n                pybind11_fail(\"overloading a method with both static and instance methods is not supported; \"\n                    #if defined(NDEBUG)\n                        \"compile in debug mode for more details\"\n                    #else\n                        \"error while attempting to bind \" + std::string(rec->is_method ? \"instance\" : \"static\") + \" method \" +\n                        std::string(pybind11::str(rec->scope.attr(\"__name__\"))) + \".\" + std::string(rec->name) + signature\n                    #endif\n                );\n            while (chain->next)\n                chain = chain->next;\n            chain->next = rec;\n        }\n\n        std::string signatures;\n        int index = 0;\n        /* Create a nice pydoc rec including all signatures and\n           docstrings of the functions in the overload chain */\n        if (chain && options::show_function_signatures()) {\n            // First a generic signature\n            signatures += rec->name;\n            signatures += \"(*args, **kwargs)\\n\";\n            signatures += \"Overloaded function.\\n\\n\";\n        }\n        // Then specific overload signatures\n        bool first_user_def = true;\n        for (auto it = chain_start; it != nullptr; it = it->next) {\n            if (options::show_function_signatures()) {\n                if (index > 0) signatures += \"\\n\";\n                if (chain)\n                    signatures += std::to_string(++index) + \". \";\n                signatures += rec->name;\n                signatures += it->signature;\n                signatures += \"\\n\";\n            }\n            if (it->doc && strlen(it->doc) > 0 && options::show_user_defined_docstrings()) {\n                // If we're appending another docstring, and aren't printing function signatures, we\n                // need to append a newline first:\n                if (!options::show_function_signatures()) {\n                    if (first_user_def) first_user_def = false;\n                    else signatures += \"\\n\";\n                }\n                if (options::show_function_signatures()) signatures += \"\\n\";\n                signatures += it->doc;\n                if (options::show_function_signatures()) signatures += \"\\n\";\n            }\n        }\n\n        /* Install docstring */\n        PyCFunctionObject *func = (PyCFunctionObject *) m_ptr;\n        if (func->m_ml->ml_doc)\n            std::free(const_cast<char *>(func->m_ml->ml_doc));\n        func->m_ml->ml_doc = strdup(signatures.c_str());\n\n        if (rec->is_method) {\n            m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr());\n            if (!m_ptr)\n                pybind11_fail(\"cpp_function::cpp_function(): Could not allocate instance method object\");\n            Py_DECREF(func);\n        }\n    }\n\n    /// When a cpp_function is GCed, release any memory allocated by pybind11\n    static void destruct(detail::function_record *rec) {\n        while (rec) {\n            detail::function_record *next = rec->next;\n            if (rec->free_data)\n                rec->free_data(rec);\n            std::free((char *) rec->name);\n            std::free((char *) rec->doc);\n            std::free((char *) rec->signature);\n            for (auto &arg: rec->args) {\n                std::free(const_cast<char *>(arg.name));\n                std::free(const_cast<char *>(arg.descr));\n                arg.value.dec_ref();\n            }\n            if (rec->def) {\n                std::free(const_cast<char *>(rec->def->ml_doc));\n                delete rec->def;\n            }\n            delete rec;\n            rec = next;\n        }\n    }\n\n    /// Main dispatch logic for calls to functions bound using pybind11\n    static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) {\n        using namespace detail;\n\n        /* Iterator over the list of potentially admissible overloads */\n        const function_record *overloads = (function_record *) PyCapsule_GetPointer(self, nullptr),\n                              *it = overloads;\n\n        /* Need to know how many arguments + keyword arguments there are to pick the right overload */\n        const size_t n_args_in = (size_t) PyTuple_GET_SIZE(args_in);\n\n        handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr,\n               result = PYBIND11_TRY_NEXT_OVERLOAD;\n\n        auto self_value_and_holder = value_and_holder();\n        if (overloads->is_constructor) {\n            const auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());\n            const auto pi = reinterpret_cast<instance *>(parent.ptr());\n            self_value_and_holder = pi->get_value_and_holder(tinfo, false);\n\n            if (!self_value_and_holder.type || !self_value_and_holder.inst) {\n                PyErr_SetString(PyExc_TypeError, \"__init__(self, ...) called with invalid `self` argument\");\n                return nullptr;\n            }\n\n            // If this value is already registered it must mean __init__ is invoked multiple times;\n            // we really can't support that in C++, so just ignore the second __init__.\n            if (self_value_and_holder.instance_registered())\n                return none().release().ptr();\n        }\n\n        try {\n            // We do this in two passes: in the first pass, we load arguments with `convert=false`;\n            // in the second, we allow conversion (except for arguments with an explicit\n            // py::arg().noconvert()).  This lets us prefer calls without conversion, with\n            // conversion as a fallback.\n            std::vector<function_call> second_pass;\n\n            // However, if there are no overloads, we can just skip the no-convert pass entirely\n            const bool overloaded = it != nullptr && it->next != nullptr;\n\n            for (; it != nullptr; it = it->next) {\n\n                /* For each overload:\n                   1. Copy all positional arguments we were given, also checking to make sure that\n                      named positional arguments weren't *also* specified via kwarg.\n                   2. If we weren't given enough, try to make up the omitted ones by checking\n                      whether they were provided by a kwarg matching the `py::arg(\"name\")` name.  If\n                      so, use it (and remove it from kwargs; if not, see if the function binding\n                      provided a default that we can use.\n                   3. Ensure that either all keyword arguments were \"consumed\", or that the function\n                      takes a kwargs argument to accept unconsumed kwargs.\n                   4. Any positional arguments still left get put into a tuple (for args), and any\n                      leftover kwargs get put into a dict.\n                   5. Pack everything into a vector; if we have py::args or py::kwargs, they are an\n                      extra tuple or dict at the end of the positional arguments.\n                   6. Call the function call dispatcher (function_record::impl)\n\n                   If one of these fail, move on to the next overload and keep trying until we get a\n                   result other than PYBIND11_TRY_NEXT_OVERLOAD.\n                 */\n\n                const function_record &func = *it;\n                size_t pos_args = func.nargs;    // Number of positional arguments that we need\n                if (func.has_args) --pos_args;   // (but don't count py::args\n                if (func.has_kwargs) --pos_args; //  or py::kwargs)\n\n                if (!func.has_args && n_args_in > pos_args)\n                    continue; // Too many arguments for this overload\n\n                if (n_args_in < pos_args && func.args.size() < pos_args)\n                    continue; // Not enough arguments given, and not enough defaults to fill in the blanks\n\n                function_call call(func, parent);\n\n                size_t args_to_copy = std::min(pos_args, n_args_in);\n                size_t args_copied = 0;\n\n                // 0. Inject new-style `self` argument\n                if (func.is_new_style_constructor) {\n                    // The `value` may have been preallocated by an old-style `__init__`\n                    // if it was a preceding candidate for overload resolution.\n                    if (self_value_and_holder)\n                        self_value_and_holder.type->dealloc(self_value_and_holder);\n\n                    call.init_self = PyTuple_GET_ITEM(args_in, 0);\n                    call.args.push_back(reinterpret_cast<PyObject *>(&self_value_and_holder));\n                    call.args_convert.push_back(false);\n                    ++args_copied;\n                }\n\n                // 1. Copy any position arguments given.\n                bool bad_arg = false;\n                for (; args_copied < args_to_copy; ++args_copied) {\n                    const argument_record *arg_rec = args_copied < func.args.size() ? &func.args[args_copied] : nullptr;\n                    if (kwargs_in && arg_rec && arg_rec->name && PyDict_GetItemString(kwargs_in, arg_rec->name)) {\n                        bad_arg = true;\n                        break;\n                    }\n\n                    handle arg(PyTuple_GET_ITEM(args_in, args_copied));\n                    if (arg_rec && !arg_rec->none && arg.is_none()) {\n                        bad_arg = true;\n                        break;\n                    }\n                    call.args.push_back(arg);\n                    call.args_convert.push_back(arg_rec ? arg_rec->convert : true);\n                }\n                if (bad_arg)\n                    continue; // Maybe it was meant for another overload (issue #688)\n\n                // We'll need to copy this if we steal some kwargs for defaults\n                dict kwargs = reinterpret_borrow<dict>(kwargs_in);\n\n                // 2. Check kwargs and, failing that, defaults that may help complete the list\n                if (args_copied < pos_args) {\n                    bool copied_kwargs = false;\n\n                    for (; args_copied < pos_args; ++args_copied) {\n                        const auto &arg = func.args[args_copied];\n\n                        handle value;\n                        if (kwargs_in && arg.name)\n                            value = PyDict_GetItemString(kwargs.ptr(), arg.name);\n\n                        if (value) {\n                            // Consume a kwargs value\n                            if (!copied_kwargs) {\n                                kwargs = reinterpret_steal<dict>(PyDict_Copy(kwargs.ptr()));\n                                copied_kwargs = true;\n                            }\n                            PyDict_DelItemString(kwargs.ptr(), arg.name);\n                        } else if (arg.value) {\n                            value = arg.value;\n                        }\n\n                        if (value) {\n                            call.args.push_back(value);\n                            call.args_convert.push_back(arg.convert);\n                        }\n                        else\n                            break;\n                    }\n\n                    if (args_copied < pos_args)\n                        continue; // Not enough arguments, defaults, or kwargs to fill the positional arguments\n                }\n\n                // 3. Check everything was consumed (unless we have a kwargs arg)\n                if (kwargs && kwargs.size() > 0 && !func.has_kwargs)\n                    continue; // Unconsumed kwargs, but no py::kwargs argument to accept them\n\n                // 4a. If we have a py::args argument, create a new tuple with leftovers\n                if (func.has_args) {\n                    tuple extra_args;\n                    if (args_to_copy == 0) {\n                        // We didn't copy out any position arguments from the args_in tuple, so we\n                        // can reuse it directly without copying:\n                        extra_args = reinterpret_borrow<tuple>(args_in);\n                    } else if (args_copied >= n_args_in) {\n                        extra_args = tuple(0);\n                    } else {\n                        size_t args_size = n_args_in - args_copied;\n                        extra_args = tuple(args_size);\n                        for (size_t i = 0; i < args_size; ++i) {\n                            extra_args[i] = PyTuple_GET_ITEM(args_in, args_copied + i);\n                        }\n                    }\n                    call.args.push_back(extra_args);\n                    call.args_convert.push_back(false);\n                    call.args_ref = std::move(extra_args);\n                }\n\n                // 4b. If we have a py::kwargs, pass on any remaining kwargs\n                if (func.has_kwargs) {\n                    if (!kwargs.ptr())\n                        kwargs = dict(); // If we didn't get one, send an empty one\n                    call.args.push_back(kwargs);\n                    call.args_convert.push_back(false);\n                    call.kwargs_ref = std::move(kwargs);\n                }\n\n                // 5. Put everything in a vector.  Not technically step 5, we've been building it\n                // in `call.args` all along.\n                #if !defined(NDEBUG)\n                if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs)\n                    pybind11_fail(\"Internal error: function call dispatcher inserted wrong number of arguments!\");\n                #endif\n\n                std::vector<bool> second_pass_convert;\n                if (overloaded) {\n                    // We're in the first no-convert pass, so swap out the conversion flags for a\n                    // set of all-false flags.  If the call fails, we'll swap the flags back in for\n                    // the conversion-allowed call below.\n                    second_pass_convert.resize(func.nargs, false);\n                    call.args_convert.swap(second_pass_convert);\n                }\n\n                // 6. Call the function.\n                try {\n                    loader_life_support guard{};\n                    result = func.impl(call);\n                } catch (reference_cast_error &) {\n                    result = PYBIND11_TRY_NEXT_OVERLOAD;\n                }\n\n                if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD)\n                    break;\n\n                if (overloaded) {\n                    // The (overloaded) call failed; if the call has at least one argument that\n                    // permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`)\n                    // then add this call to the list of second pass overloads to try.\n                    for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) {\n                        if (second_pass_convert[i]) {\n                            // Found one: swap the converting flags back in and store the call for\n                            // the second pass.\n                            call.args_convert.swap(second_pass_convert);\n                            second_pass.push_back(std::move(call));\n                            break;\n                        }\n                    }\n                }\n            }\n\n            if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {\n                // The no-conversion pass finished without success, try again with conversion allowed\n                for (auto &call : second_pass) {\n                    try {\n                        loader_life_support guard{};\n                        result = call.func.impl(call);\n                    } catch (reference_cast_error &) {\n                        result = PYBIND11_TRY_NEXT_OVERLOAD;\n                    }\n\n                    if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {\n                        // The error reporting logic below expects 'it' to be valid, as it would be\n                        // if we'd encountered this failure in the first-pass loop.\n                        if (!result)\n                            it = &call.func;\n                        break;\n                    }\n                }\n            }\n        } catch (error_already_set &e) {\n            e.restore();\n            return nullptr;\n        } catch (...) {\n            /* When an exception is caught, give each registered exception\n               translator a chance to translate it to a Python exception\n               in reverse order of registration.\n\n               A translator may choose to do one of the following:\n\n                - catch the exception and call PyErr_SetString or PyErr_SetObject\n                  to set a standard (or custom) Python exception, or\n                - do nothing and let the exception fall through to the next translator, or\n                - delegate translation to the next translator by throwing a new type of exception. */\n\n            auto last_exception = std::current_exception();\n            auto &registered_exception_translators = get_internals().registered_exception_translators;\n            for (auto& translator : registered_exception_translators) {\n                try {\n                    translator(last_exception);\n                } catch (...) {\n                    last_exception = std::current_exception();\n                    continue;\n                }\n                return nullptr;\n            }\n            PyErr_SetString(PyExc_SystemError, \"Exception escaped from default exception translator!\");\n            return nullptr;\n        }\n\n        auto append_note_if_missing_header_is_suspected = [](std::string &msg) {\n            if (msg.find(\"std::\") != std::string::npos) {\n                msg += \"\\n\\n\"\n                       \"Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\\n\"\n                       \"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\\n\"\n                       \"conversions are optional and require extra headers to be included\\n\"\n                       \"when compiling your pybind11 module.\";\n            }\n        };\n\n        if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {\n            if (overloads->is_operator)\n                return handle(Py_NotImplemented).inc_ref().ptr();\n\n            std::string msg = std::string(overloads->name) + \"(): incompatible \" +\n                std::string(overloads->is_constructor ? \"constructor\" : \"function\") +\n                \" arguments. The following argument types are supported:\\n\";\n\n            int ctr = 0;\n            for (const function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) {\n                msg += \"    \"+ std::to_string(++ctr) + \". \";\n\n                bool wrote_sig = false;\n                if (overloads->is_constructor) {\n                    // For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as `Object(arg0, ...)`\n                    std::string sig = it2->signature;\n                    size_t start = sig.find('(') + 7; // skip \"(self: \"\n                    if (start < sig.size()) {\n                        // End at the , for the next argument\n                        size_t end = sig.find(\", \"), next = end + 2;\n                        size_t ret = sig.rfind(\" -> \");\n                        // Or the ), if there is no comma:\n                        if (end >= sig.size()) next = end = sig.find(')');\n                        if (start < end && next < sig.size()) {\n                            msg.append(sig, start, end - start);\n                            msg += '(';\n                            msg.append(sig, next, ret - next);\n                            wrote_sig = true;\n                        }\n                    }\n                }\n                if (!wrote_sig) msg += it2->signature;\n\n                msg += \"\\n\";\n            }\n            msg += \"\\nInvoked with: \";\n            auto args_ = reinterpret_borrow<tuple>(args_in);\n            bool some_args = false;\n            for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) {\n                if (!some_args) some_args = true;\n                else msg += \", \";\n                msg += pybind11::repr(args_[ti]);\n            }\n            if (kwargs_in) {\n                auto kwargs = reinterpret_borrow<dict>(kwargs_in);\n                if (kwargs.size() > 0) {\n                    if (some_args) msg += \"; \";\n                    msg += \"kwargs: \";\n                    bool first = true;\n                    for (auto kwarg : kwargs) {\n                        if (first) first = false;\n                        else msg += \", \";\n                        msg += pybind11::str(\"{}={!r}\").format(kwarg.first, kwarg.second);\n                    }\n                }\n            }\n\n            append_note_if_missing_header_is_suspected(msg);\n            PyErr_SetString(PyExc_TypeError, msg.c_str());\n            return nullptr;\n        } else if (!result) {\n            std::string msg = \"Unable to convert function return value to a \"\n                              \"Python type! The signature was\\n\\t\";\n            msg += it->signature;\n            append_note_if_missing_header_is_suspected(msg);\n            PyErr_SetString(PyExc_TypeError, msg.c_str());\n            return nullptr;\n        } else {\n            if (overloads->is_constructor && !self_value_and_holder.holder_constructed()) {\n                auto *pi = reinterpret_cast<instance *>(parent.ptr());\n                self_value_and_holder.type->init_instance(pi, nullptr);\n            }\n            return result.ptr();\n        }\n    }\n};\n\n/// Wrapper for Python extension modules\nclass module : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(module, object, PyModule_Check)\n\n    /// Create a new top-level Python module with the given name and docstring\n    explicit module(const char *name, const char *doc = nullptr) {\n        if (!options::show_user_defined_docstrings()) doc = nullptr;\n#if PY_MAJOR_VERSION >= 3\n        PyModuleDef *def = new PyModuleDef();\n        std::memset(def, 0, sizeof(PyModuleDef));\n        def->m_name = name;\n        def->m_doc = doc;\n        def->m_size = -1;\n        Py_INCREF(def);\n        m_ptr = PyModule_Create(def);\n#else\n        m_ptr = Py_InitModule3(name, nullptr, doc);\n#endif\n        if (m_ptr == nullptr)\n            pybind11_fail(\"Internal error in module::module()\");\n        inc_ref();\n    }\n\n    /** \\rst\n        Create Python binding for a new function within the module scope. ``Func``\n        can be a plain C++ function, a function pointer, or a lambda function. For\n        details on the ``Extra&& ... extra`` argument, see section :ref:`extras`.\n    \\endrst */\n    template <typename Func, typename... Extra>\n    module &def(const char *name_, Func &&f, const Extra& ... extra) {\n        cpp_function func(std::forward<Func>(f), name(name_), scope(*this),\n                          sibling(getattr(*this, name_, none())), extra...);\n        // NB: allow overwriting here because cpp_function sets up a chain with the intention of\n        // overwriting (and has already checked internally that it isn't overwriting non-functions).\n        add_object(name_, func, true /* overwrite */);\n        return *this;\n    }\n\n    /** \\rst\n        Create and return a new Python submodule with the given name and docstring.\n        This also works recursively, i.e.\n\n        .. code-block:: cpp\n\n            py::module m(\"example\", \"pybind11 example plugin\");\n            py::module m2 = m.def_submodule(\"sub\", \"A submodule of 'example'\");\n            py::module m3 = m2.def_submodule(\"subsub\", \"A submodule of 'example.sub'\");\n    \\endrst */\n    module def_submodule(const char *name, const char *doc = nullptr) {\n        std::string full_name = std::string(PyModule_GetName(m_ptr))\n            + std::string(\".\") + std::string(name);\n        auto result = reinterpret_borrow<module>(PyImport_AddModule(full_name.c_str()));\n        if (doc && options::show_user_defined_docstrings())\n            result.attr(\"__doc__\") = pybind11::str(doc);\n        attr(name) = result;\n        return result;\n    }\n\n    /// Import and return a module or throws `error_already_set`.\n    static module import(const char *name) {\n        PyObject *obj = PyImport_ImportModule(name);\n        if (!obj)\n            throw error_already_set();\n        return reinterpret_steal<module>(obj);\n    }\n\n    /// Reload the module or throws `error_already_set`.\n    void reload() {\n        PyObject *obj = PyImport_ReloadModule(ptr());\n        if (!obj)\n            throw error_already_set();\n        *this = reinterpret_steal<module>(obj);\n    }\n\n    // Adds an object to the module using the given name.  Throws if an object with the given name\n    // already exists.\n    //\n    // overwrite should almost always be false: attempting to overwrite objects that pybind11 has\n    // established will, in most cases, break things.\n    PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) {\n        if (!overwrite && hasattr(*this, name))\n            pybind11_fail(\"Error during initialization: multiple incompatible definitions with name \\\"\" +\n                    std::string(name) + \"\\\"\");\n\n        PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */);\n    }\n};\n\n/// \\ingroup python_builtins\n/// Return a dictionary representing the global variables in the current execution frame,\n/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded).\ninline dict globals() {\n    PyObject *p = PyEval_GetGlobals();\n    return reinterpret_borrow<dict>(p ? p : module::import(\"__main__\").attr(\"__dict__\").ptr());\n}\n\nNAMESPACE_BEGIN(detail)\n/// Generic support for creating new Python heap types\nclass generic_type : public object {\n    template <typename...> friend class class_;\npublic:\n    PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check)\nprotected:\n    void initialize(const type_record &rec) {\n        if (rec.scope && hasattr(rec.scope, rec.name))\n            pybind11_fail(\"generic_type: cannot initialize type \\\"\" + std::string(rec.name) +\n                          \"\\\": an object with that name is already defined\");\n\n        if (rec.module_local ? get_local_type_info(*rec.type) : get_global_type_info(*rec.type))\n            pybind11_fail(\"generic_type: type \\\"\" + std::string(rec.name) +\n                          \"\\\" is already registered!\");\n\n        m_ptr = make_new_python_type(rec);\n\n        /* Register supplemental type information in C++ dict */\n        auto *tinfo = new detail::type_info();\n        tinfo->type = (PyTypeObject *) m_ptr;\n        tinfo->cpptype = rec.type;\n        tinfo->type_size = rec.type_size;\n        tinfo->type_align = rec.type_align;\n        tinfo->operator_new = rec.operator_new;\n        tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size);\n        tinfo->init_instance = rec.init_instance;\n        tinfo->dealloc = rec.dealloc;\n        tinfo->simple_type = true;\n        tinfo->simple_ancestors = true;\n        tinfo->default_holder = rec.default_holder;\n        tinfo->module_local = rec.module_local;\n\n        auto &internals = get_internals();\n        auto tindex = std::type_index(*rec.type);\n        tinfo->direct_conversions = &internals.direct_conversions[tindex];\n        if (rec.module_local)\n            registered_local_types_cpp()[tindex] = tinfo;\n        else\n            internals.registered_types_cpp[tindex] = tinfo;\n        internals.registered_types_py[(PyTypeObject *) m_ptr] = { tinfo };\n\n        if (rec.bases.size() > 1 || rec.multiple_inheritance) {\n            mark_parents_nonsimple(tinfo->type);\n            tinfo->simple_ancestors = false;\n        }\n        else if (rec.bases.size() == 1) {\n            auto parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr());\n            tinfo->simple_ancestors = parent_tinfo->simple_ancestors;\n        }\n\n        if (rec.module_local) {\n            // Stash the local typeinfo and loader so that external modules can access it.\n            tinfo->module_local_load = &type_caster_generic::local_load;\n            setattr(m_ptr, PYBIND11_MODULE_LOCAL_ID, capsule(tinfo));\n        }\n    }\n\n    /// Helper function which tags all parents of a type using mult. inheritance\n    void mark_parents_nonsimple(PyTypeObject *value) {\n        auto t = reinterpret_borrow<tuple>(value->tp_bases);\n        for (handle h : t) {\n            auto tinfo2 = get_type_info((PyTypeObject *) h.ptr());\n            if (tinfo2)\n                tinfo2->simple_type = false;\n            mark_parents_nonsimple((PyTypeObject *) h.ptr());\n        }\n    }\n\n    void install_buffer_funcs(\n            buffer_info *(*get_buffer)(PyObject *, void *),\n            void *get_buffer_data) {\n        PyHeapTypeObject *type = (PyHeapTypeObject*) m_ptr;\n        auto tinfo = detail::get_type_info(&type->ht_type);\n\n        if (!type->ht_type.tp_as_buffer)\n            pybind11_fail(\n                \"To be able to register buffer protocol support for the type '\" +\n                std::string(tinfo->type->tp_name) +\n                \"' the associated class<>(..) invocation must \"\n                \"include the pybind11::buffer_protocol() annotation!\");\n\n        tinfo->get_buffer = get_buffer;\n        tinfo->get_buffer_data = get_buffer_data;\n    }\n\n    // rec_func must be set for either fget or fset.\n    void def_property_static_impl(const char *name,\n                                  handle fget, handle fset,\n                                  detail::function_record *rec_func) {\n        const auto is_static = rec_func && !(rec_func->is_method && rec_func->scope);\n        const auto has_doc = rec_func && rec_func->doc && pybind11::options::show_user_defined_docstrings();\n        auto property = handle((PyObject *) (is_static ? get_internals().static_property_type\n                                                       : &PyProperty_Type));\n        attr(name) = property(fget.ptr() ? fget : none(),\n                              fset.ptr() ? fset : none(),\n                              /*deleter*/none(),\n                              pybind11::str(has_doc ? rec_func->doc : \"\"));\n    }\n};\n\n/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded.\ntemplate <typename T, typename = void_t<decltype(static_cast<void *(*)(size_t)>(T::operator new))>>\nvoid set_operator_new(type_record *r) { r->operator_new = &T::operator new; }\n\ntemplate <typename> void set_operator_new(...) { }\n\ntemplate <typename T, typename SFINAE = void> struct has_operator_delete : std::false_type { };\ntemplate <typename T> struct has_operator_delete<T, void_t<decltype(static_cast<void (*)(void *)>(T::operator delete))>>\n    : std::true_type { };\ntemplate <typename T, typename SFINAE = void> struct has_operator_delete_size : std::false_type { };\ntemplate <typename T> struct has_operator_delete_size<T, void_t<decltype(static_cast<void (*)(void *, size_t)>(T::operator delete))>>\n    : std::true_type { };\n/// Call class-specific delete if it exists or global otherwise. Can also be an overload set.\ntemplate <typename T, enable_if_t<has_operator_delete<T>::value, int> = 0>\nvoid call_operator_delete(T *p, size_t, size_t) { T::operator delete(p); }\ntemplate <typename T, enable_if_t<!has_operator_delete<T>::value && has_operator_delete_size<T>::value, int> = 0>\nvoid call_operator_delete(T *p, size_t s, size_t) { T::operator delete(p, s); }\n\ninline void call_operator_delete(void *p, size_t s, size_t a) {\n    (void)s; (void)a;\n#if defined(PYBIND11_CPP17)\n    if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__)\n        ::operator delete(p, s, std::align_val_t(a));\n    else\n        ::operator delete(p, s);\n#else\n    ::operator delete(p);\n#endif\n}\n\nNAMESPACE_END(detail)\n\n/// Given a pointer to a member function, cast it to its `Derived` version.\n/// Forward everything else unchanged.\ntemplate <typename /*Derived*/, typename F>\nauto method_adaptor(F &&f) -> decltype(std::forward<F>(f)) { return std::forward<F>(f); }\n\ntemplate <typename Derived, typename Return, typename Class, typename... Args>\nauto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) {\n    static_assert(detail::is_accessible_base_of<Class, Derived>::value,\n        \"Cannot bind an inaccessible base class method; use a lambda definition instead\");\n    return pmf;\n}\n\ntemplate <typename Derived, typename Return, typename Class, typename... Args>\nauto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const {\n    static_assert(detail::is_accessible_base_of<Class, Derived>::value,\n        \"Cannot bind an inaccessible base class method; use a lambda definition instead\");\n    return pmf;\n}\n\ntemplate <typename type_, typename... options>\nclass class_ : public detail::generic_type {\n    template <typename T> using is_holder = detail::is_holder_type<type_, T>;\n    template <typename T> using is_subtype = detail::is_strict_base_of<type_, T>;\n    template <typename T> using is_base = detail::is_strict_base_of<T, type_>;\n    // struct instead of using here to help MSVC:\n    template <typename T> struct is_valid_class_option :\n        detail::any_of<is_holder<T>, is_subtype<T>, is_base<T>> {};\n\npublic:\n    using type = type_;\n    using type_alias = detail::exactly_one_t<is_subtype, void, options...>;\n    constexpr static bool has_alias = !std::is_void<type_alias>::value;\n    using holder_type = detail::exactly_one_t<is_holder, std::unique_ptr<type>, options...>;\n\n    static_assert(detail::all_of<is_valid_class_option<options>...>::value,\n            \"Unknown/invalid class_ template parameters provided\");\n\n    static_assert(!has_alias || std::is_polymorphic<type>::value,\n            \"Cannot use an alias class with a non-polymorphic type\");\n\n    PYBIND11_OBJECT(class_, generic_type, PyType_Check)\n\n    template <typename... Extra>\n    class_(handle scope, const char *name, const Extra &... extra) {\n        using namespace detail;\n\n        // MI can only be specified via class_ template options, not constructor parameters\n        static_assert(\n            none_of<is_pyobject<Extra>...>::value || // no base class arguments, or:\n            (   constexpr_sum(is_pyobject<Extra>::value...) == 1 && // Exactly one base\n                constexpr_sum(is_base<options>::value...)   == 0 && // no template option bases\n                none_of<std::is_same<multiple_inheritance, Extra>...>::value), // no multiple_inheritance attr\n            \"Error: multiple inheritance bases must be specified via class_ template options\");\n\n        type_record record;\n        record.scope = scope;\n        record.name = name;\n        record.type = &typeid(type);\n        record.type_size = sizeof(conditional_t<has_alias, type_alias, type>);\n        record.type_align = alignof(conditional_t<has_alias, type_alias, type>&);\n        record.holder_size = sizeof(holder_type);\n        record.init_instance = init_instance;\n        record.dealloc = dealloc;\n        record.default_holder = detail::is_instantiation<std::unique_ptr, holder_type>::value;\n\n        set_operator_new<type>(&record);\n\n        /* Register base classes specified via template arguments to class_, if any */\n        PYBIND11_EXPAND_SIDE_EFFECTS(add_base<options>(record));\n\n        /* Process optional arguments, if any */\n        process_attributes<Extra...>::init(extra..., &record);\n\n        generic_type::initialize(record);\n\n        if (has_alias) {\n            auto &instances = record.module_local ? registered_local_types_cpp() : get_internals().registered_types_cpp;\n            instances[std::type_index(typeid(type_alias))] = instances[std::type_index(typeid(type))];\n        }\n    }\n\n    template <typename Base, detail::enable_if_t<is_base<Base>::value, int> = 0>\n    static void add_base(detail::type_record &rec) {\n        rec.add_base(typeid(Base), [](void *src) -> void * {\n            return static_cast<Base *>(reinterpret_cast<type *>(src));\n        });\n    }\n\n    template <typename Base, detail::enable_if_t<!is_base<Base>::value, int> = 0>\n    static void add_base(detail::type_record &) { }\n\n    template <typename Func, typename... Extra>\n    class_ &def(const char *name_, Func&& f, const Extra&... extra) {\n        cpp_function cf(method_adaptor<type>(std::forward<Func>(f)), name(name_), is_method(*this),\n                        sibling(getattr(*this, name_, none())), extra...);\n        attr(cf.name()) = cf;\n        return *this;\n    }\n\n    template <typename Func, typename... Extra> class_ &\n    def_static(const char *name_, Func &&f, const Extra&... extra) {\n        static_assert(!std::is_member_function_pointer<Func>::value,\n                \"def_static(...) called with a non-static member function pointer\");\n        cpp_function cf(std::forward<Func>(f), name(name_), scope(*this),\n                        sibling(getattr(*this, name_, none())), extra...);\n        attr(cf.name()) = cf;\n        return *this;\n    }\n\n    template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>\n    class_ &def(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {\n        op.execute(*this, extra...);\n        return *this;\n    }\n\n    template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>\n    class_ & def_cast(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {\n        op.execute_cast(*this, extra...);\n        return *this;\n    }\n\n    template <typename... Args, typename... Extra>\n    class_ &def(const detail::initimpl::constructor<Args...> &init, const Extra&... extra) {\n        init.execute(*this, extra...);\n        return *this;\n    }\n\n    template <typename... Args, typename... Extra>\n    class_ &def(const detail::initimpl::alias_constructor<Args...> &init, const Extra&... extra) {\n        init.execute(*this, extra...);\n        return *this;\n    }\n\n    template <typename... Args, typename... Extra>\n    class_ &def(detail::initimpl::factory<Args...> &&init, const Extra&... extra) {\n        std::move(init).execute(*this, extra...);\n        return *this;\n    }\n\n    template <typename... Args, typename... Extra>\n    class_ &def(detail::initimpl::pickle_factory<Args...> &&pf, const Extra &...extra) {\n        std::move(pf).execute(*this, extra...);\n        return *this;\n    }\n\n    template <typename Func> class_& def_buffer(Func &&func) {\n        struct capture { Func func; };\n        capture *ptr = new capture { std::forward<Func>(func) };\n        install_buffer_funcs([](PyObject *obj, void *ptr) -> buffer_info* {\n            detail::make_caster<type> caster;\n            if (!caster.load(obj, false))\n                return nullptr;\n            return new buffer_info(((capture *) ptr)->func(caster));\n        }, ptr);\n        return *this;\n    }\n\n    template <typename Return, typename Class, typename... Args>\n    class_ &def_buffer(Return (Class::*func)(Args...)) {\n        return def_buffer([func] (type &obj) { return (obj.*func)(); });\n    }\n\n    template <typename Return, typename Class, typename... Args>\n    class_ &def_buffer(Return (Class::*func)(Args...) const) {\n        return def_buffer([func] (const type &obj) { return (obj.*func)(); });\n    }\n\n    template <typename C, typename D, typename... Extra>\n    class_ &def_readwrite(const char *name, D C::*pm, const Extra&... extra) {\n        static_assert(std::is_base_of<C, type>::value, \"def_readwrite() requires a class member (or base class member)\");\n        cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)),\n                     fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this));\n        def_property(name, fget, fset, return_value_policy::reference_internal, extra...);\n        return *this;\n    }\n\n    template <typename C, typename D, typename... Extra>\n    class_ &def_readonly(const char *name, const D C::*pm, const Extra& ...extra) {\n        static_assert(std::is_base_of<C, type>::value, \"def_readonly() requires a class member (or base class member)\");\n        cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this));\n        def_property_readonly(name, fget, return_value_policy::reference_internal, extra...);\n        return *this;\n    }\n\n    template <typename D, typename... Extra>\n    class_ &def_readwrite_static(const char *name, D *pm, const Extra& ...extra) {\n        cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)),\n                     fset([pm](object, const D &value) { *pm = value; }, scope(*this));\n        def_property_static(name, fget, fset, return_value_policy::reference, extra...);\n        return *this;\n    }\n\n    template <typename D, typename... Extra>\n    class_ &def_readonly_static(const char *name, const D *pm, const Extra& ...extra) {\n        cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this));\n        def_property_readonly_static(name, fget, return_value_policy::reference, extra...);\n        return *this;\n    }\n\n    /// Uses return_value_policy::reference_internal by default\n    template <typename Getter, typename... Extra>\n    class_ &def_property_readonly(const char *name, const Getter &fget, const Extra& ...extra) {\n        return def_property_readonly(name, cpp_function(method_adaptor<type>(fget)),\n                                     return_value_policy::reference_internal, extra...);\n    }\n\n    /// Uses cpp_function's return_value_policy by default\n    template <typename... Extra>\n    class_ &def_property_readonly(const char *name, const cpp_function &fget, const Extra& ...extra) {\n        return def_property(name, fget, nullptr, extra...);\n    }\n\n    /// Uses return_value_policy::reference by default\n    template <typename Getter, typename... Extra>\n    class_ &def_property_readonly_static(const char *name, const Getter &fget, const Extra& ...extra) {\n        return def_property_readonly_static(name, cpp_function(fget), return_value_policy::reference, extra...);\n    }\n\n    /// Uses cpp_function's return_value_policy by default\n    template <typename... Extra>\n    class_ &def_property_readonly_static(const char *name, const cpp_function &fget, const Extra& ...extra) {\n        return def_property_static(name, fget, nullptr, extra...);\n    }\n\n    /// Uses return_value_policy::reference_internal by default\n    template <typename Getter, typename Setter, typename... Extra>\n    class_ &def_property(const char *name, const Getter &fget, const Setter &fset, const Extra& ...extra) {\n        return def_property(name, fget, cpp_function(method_adaptor<type>(fset)), extra...);\n    }\n    template <typename Getter, typename... Extra>\n    class_ &def_property(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) {\n        return def_property(name, cpp_function(method_adaptor<type>(fget)), fset,\n                            return_value_policy::reference_internal, extra...);\n    }\n\n    /// Uses cpp_function's return_value_policy by default\n    template <typename... Extra>\n    class_ &def_property(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) {\n        return def_property_static(name, fget, fset, is_method(*this), extra...);\n    }\n\n    /// Uses return_value_policy::reference by default\n    template <typename Getter, typename... Extra>\n    class_ &def_property_static(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) {\n        return def_property_static(name, cpp_function(fget), fset, return_value_policy::reference, extra...);\n    }\n\n    /// Uses cpp_function's return_value_policy by default\n    template <typename... Extra>\n    class_ &def_property_static(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) {\n        auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset);\n        auto *rec_active = rec_fget;\n        if (rec_fget) {\n           char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific documentation string */\n           detail::process_attributes<Extra...>::init(extra..., rec_fget);\n           if (rec_fget->doc && rec_fget->doc != doc_prev) {\n              free(doc_prev);\n              rec_fget->doc = strdup(rec_fget->doc);\n           }\n        }\n        if (rec_fset) {\n            char *doc_prev = rec_fset->doc;\n            detail::process_attributes<Extra...>::init(extra..., rec_fset);\n            if (rec_fset->doc && rec_fset->doc != doc_prev) {\n                free(doc_prev);\n                rec_fset->doc = strdup(rec_fset->doc);\n            }\n            if (! rec_active) rec_active = rec_fset;\n        }\n        def_property_static_impl(name, fget, fset, rec_active);\n        return *this;\n    }\n\nprivate:\n    /// Initialize holder object, variant 1: object derives from enable_shared_from_this\n    template <typename T>\n    static void init_holder(detail::instance *inst, detail::value_and_holder &v_h,\n            const holder_type * /* unused */, const std::enable_shared_from_this<T> * /* dummy */) {\n        try {\n            auto sh = std::dynamic_pointer_cast<typename holder_type::element_type>(\n                    v_h.value_ptr<type>()->shared_from_this());\n            if (sh) {\n                new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(sh));\n                v_h.set_holder_constructed();\n            }\n        } catch (const std::bad_weak_ptr &) {}\n\n        if (!v_h.holder_constructed() && inst->owned) {\n            new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());\n            v_h.set_holder_constructed();\n        }\n    }\n\n    static void init_holder_from_existing(const detail::value_and_holder &v_h,\n            const holder_type *holder_ptr, std::true_type /*is_copy_constructible*/) {\n        new (std::addressof(v_h.holder<holder_type>())) holder_type(*reinterpret_cast<const holder_type *>(holder_ptr));\n    }\n\n    static void init_holder_from_existing(const detail::value_and_holder &v_h,\n            const holder_type *holder_ptr, std::false_type /*is_copy_constructible*/) {\n        new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(*const_cast<holder_type *>(holder_ptr)));\n    }\n\n    /// Initialize holder object, variant 2: try to construct from existing holder object, if possible\n    static void init_holder(detail::instance *inst, detail::value_and_holder &v_h,\n            const holder_type *holder_ptr, const void * /* dummy -- not enable_shared_from_this<T>) */) {\n        if (holder_ptr) {\n            init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible<holder_type>());\n            v_h.set_holder_constructed();\n        } else if (inst->owned || detail::always_construct_holder<holder_type>::value) {\n            new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());\n            v_h.set_holder_constructed();\n        }\n    }\n\n    /// Performs instance initialization including constructing a holder and registering the known\n    /// instance.  Should be called as soon as the `type` value_ptr is set for an instance.  Takes an\n    /// optional pointer to an existing holder to use; if not specified and the instance is\n    /// `.owned`, a new holder will be constructed to manage the value pointer.\n    static void init_instance(detail::instance *inst, const void *holder_ptr) {\n        auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type)));\n        if (!v_h.instance_registered()) {\n            register_instance(inst, v_h.value_ptr(), v_h.type);\n            v_h.set_instance_registered();\n        }\n        init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr<type>());\n    }\n\n    /// Deallocates an instance; via holder, if constructed; otherwise via operator delete.\n    static void dealloc(detail::value_and_holder &v_h) {\n        if (v_h.holder_constructed()) {\n            v_h.holder<holder_type>().~holder_type();\n            v_h.set_holder_constructed(false);\n        }\n        else {\n            detail::call_operator_delete(v_h.value_ptr<type>(),\n                v_h.type->type_size,\n                v_h.type->type_align\n            );\n        }\n        v_h.value_ptr() = nullptr;\n    }\n\n    static detail::function_record *get_function_record(handle h) {\n        h = detail::get_function(h);\n        return h ? (detail::function_record *) reinterpret_borrow<capsule>(PyCFunction_GET_SELF(h.ptr()))\n                 : nullptr;\n    }\n};\n\n/// Binds an existing constructor taking arguments Args...\ntemplate <typename... Args> detail::initimpl::constructor<Args...> init() { return {}; }\n/// Like `init<Args...>()`, but the instance is always constructed through the alias class (even\n/// when not inheriting on the Python side).\ntemplate <typename... Args> detail::initimpl::alias_constructor<Args...> init_alias() { return {}; }\n\n/// Binds a factory function as a constructor\ntemplate <typename Func, typename Ret = detail::initimpl::factory<Func>>\nRet init(Func &&f) { return {std::forward<Func>(f)}; }\n\n/// Dual-argument factory function: the first function is called when no alias is needed, the second\n/// when an alias is needed (i.e. due to python-side inheritance).  Arguments must be identical.\ntemplate <typename CFunc, typename AFunc, typename Ret = detail::initimpl::factory<CFunc, AFunc>>\nRet init(CFunc &&c, AFunc &&a) {\n    return {std::forward<CFunc>(c), std::forward<AFunc>(a)};\n}\n\n/// Binds pickling functions `__getstate__` and `__setstate__` and ensures that the type\n/// returned by `__getstate__` is the same as the argument accepted by `__setstate__`.\ntemplate <typename GetState, typename SetState>\ndetail::initimpl::pickle_factory<GetState, SetState> pickle(GetState &&g, SetState &&s) {\n    return {std::forward<GetState>(g), std::forward<SetState>(s)};\n}\n\nNAMESPACE_BEGIN(detail)\nstruct enum_base {\n    enum_base(handle base, handle parent) : m_base(base), m_parent(parent) { }\n\n    PYBIND11_NOINLINE void init(bool is_arithmetic, bool is_convertible) {\n        m_base.attr(\"__entries\") = dict();\n        auto property = handle((PyObject *) &PyProperty_Type);\n        auto static_property = handle((PyObject *) get_internals().static_property_type);\n\n        m_base.attr(\"__repr__\") = cpp_function(\n            [](handle arg) -> str {\n                handle type = arg.get_type();\n                object type_name = type.attr(\"__name__\");\n                dict entries = type.attr(\"__entries\");\n                for (const auto &kv : entries) {\n                    object other = kv.second[int_(0)];\n                    if (other.equal(arg))\n                        return pybind11::str(\"{}.{}\").format(type_name, kv.first);\n                }\n                return pybind11::str(\"{}.???\").format(type_name);\n            }, is_method(m_base)\n        );\n\n        m_base.attr(\"name\") = property(cpp_function(\n            [](handle arg) -> str {\n                dict entries = arg.get_type().attr(\"__entries\");\n                for (const auto &kv : entries) {\n                    if (handle(kv.second[int_(0)]).equal(arg))\n                        return pybind11::str(kv.first);\n                }\n                return \"???\";\n            }, is_method(m_base)\n        ));\n\n        m_base.attr(\"__doc__\") = static_property(cpp_function(\n            [](handle arg) -> std::string {\n                std::string docstring;\n                dict entries = arg.attr(\"__entries\");\n                if (((PyTypeObject *) arg.ptr())->tp_doc)\n                    docstring += std::string(((PyTypeObject *) arg.ptr())->tp_doc) + \"\\n\\n\";\n                docstring += \"Members:\";\n                for (const auto &kv : entries) {\n                    auto key = std::string(pybind11::str(kv.first));\n                    auto comment = kv.second[int_(1)];\n                    docstring += \"\\n\\n  \" + key;\n                    if (!comment.is_none())\n                        docstring += \" : \" + (std::string) pybind11::str(comment);\n                }\n                return docstring;\n            }\n        ), none(), none(), \"\");\n\n        m_base.attr(\"__members__\") = static_property(cpp_function(\n            [](handle arg) -> dict {\n                dict entries = arg.attr(\"__entries\"), m;\n                for (const auto &kv : entries)\n                    m[kv.first] = kv.second[int_(0)];\n                return m;\n            }), none(), none(), \"\"\n        );\n\n        #define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior)                     \\\n            m_base.attr(op) = cpp_function(                                            \\\n                [](object a, object b) {                                               \\\n                    if (!a.get_type().is(b.get_type()))                                \\\n                        strict_behavior;                                               \\\n                    return expr;                                                       \\\n                },                                                                     \\\n                is_method(m_base))\n\n        #define PYBIND11_ENUM_OP_CONV(op, expr)                                        \\\n            m_base.attr(op) = cpp_function(                                            \\\n                [](object a_, object b_) {                                             \\\n                    int_ a(a_), b(b_);                                                 \\\n                    return expr;                                                       \\\n                },                                                                     \\\n                is_method(m_base))\n\n        if (is_convertible) {\n            PYBIND11_ENUM_OP_CONV(\"__eq__\", !b.is_none() &&  a.equal(b));\n            PYBIND11_ENUM_OP_CONV(\"__ne__\",  b.is_none() || !a.equal(b));\n\n            if (is_arithmetic) {\n                PYBIND11_ENUM_OP_CONV(\"__lt__\",   a <  b);\n                PYBIND11_ENUM_OP_CONV(\"__gt__\",   a >  b);\n                PYBIND11_ENUM_OP_CONV(\"__le__\",   a <= b);\n                PYBIND11_ENUM_OP_CONV(\"__ge__\",   a >= b);\n                PYBIND11_ENUM_OP_CONV(\"__and__\",  a &  b);\n                PYBIND11_ENUM_OP_CONV(\"__rand__\", a &  b);\n                PYBIND11_ENUM_OP_CONV(\"__or__\",   a |  b);\n                PYBIND11_ENUM_OP_CONV(\"__ror__\",  a |  b);\n                PYBIND11_ENUM_OP_CONV(\"__xor__\",  a ^  b);\n                PYBIND11_ENUM_OP_CONV(\"__rxor__\", a ^  b);\n            }\n        } else {\n            PYBIND11_ENUM_OP_STRICT(\"__eq__\",  int_(a).equal(int_(b)), return false);\n            PYBIND11_ENUM_OP_STRICT(\"__ne__\", !int_(a).equal(int_(b)), return true);\n\n            if (is_arithmetic) {\n                #define PYBIND11_THROW throw type_error(\"Expected an enumeration of matching type!\");\n                PYBIND11_ENUM_OP_STRICT(\"__lt__\", int_(a) <  int_(b), PYBIND11_THROW);\n                PYBIND11_ENUM_OP_STRICT(\"__gt__\", int_(a) >  int_(b), PYBIND11_THROW);\n                PYBIND11_ENUM_OP_STRICT(\"__le__\", int_(a) <= int_(b), PYBIND11_THROW);\n                PYBIND11_ENUM_OP_STRICT(\"__ge__\", int_(a) >= int_(b), PYBIND11_THROW);\n                #undef PYBIND11_THROW\n            }\n        }\n\n        #undef PYBIND11_ENUM_OP_CONV\n        #undef PYBIND11_ENUM_OP_STRICT\n\n        object getstate = cpp_function(\n            [](object arg) { return int_(arg); }, is_method(m_base));\n\n        m_base.attr(\"__getstate__\") = getstate;\n        m_base.attr(\"__hash__\") = getstate;\n    }\n\n    PYBIND11_NOINLINE void value(char const* name_, object value, const char *doc = nullptr) {\n        dict entries = m_base.attr(\"__entries\");\n        str name(name_);\n        if (entries.contains(name)) {\n            std::string type_name = (std::string) str(m_base.attr(\"__name__\"));\n            throw value_error(type_name + \": element \\\"\" + std::string(name_) + \"\\\" already exists!\");\n        }\n\n        entries[name] = std::make_pair(value, doc);\n        m_base.attr(name) = value;\n    }\n\n    PYBIND11_NOINLINE void export_values() {\n        dict entries = m_base.attr(\"__entries\");\n        for (const auto &kv : entries)\n            m_parent.attr(kv.first) = kv.second[int_(0)];\n    }\n\n    handle m_base;\n    handle m_parent;\n};\n\nNAMESPACE_END(detail)\n\n/// Binds C++ enumerations and enumeration classes to Python\ntemplate <typename Type> class enum_ : public class_<Type> {\npublic:\n    using Base = class_<Type>;\n    using Base::def;\n    using Base::attr;\n    using Base::def_property_readonly;\n    using Base::def_property_readonly_static;\n    using Scalar = typename std::underlying_type<Type>::type;\n\n    template <typename... Extra>\n    enum_(const handle &scope, const char *name, const Extra&... extra)\n      : class_<Type>(scope, name, extra...), m_base(*this, scope) {\n        constexpr bool is_arithmetic = detail::any_of<std::is_same<arithmetic, Extra>...>::value;\n        constexpr bool is_convertible = std::is_convertible<Type, Scalar>::value;\n        m_base.init(is_arithmetic, is_convertible);\n\n        def(init([](Scalar i) { return static_cast<Type>(i); }));\n        def(\"__int__\", [](Type value) { return (Scalar) value; });\n        #if PY_MAJOR_VERSION < 3\n            def(\"__long__\", [](Type value) { return (Scalar) value; });\n        #endif\n        cpp_function setstate(\n            [](Type &value, Scalar arg) { value = static_cast<Type>(arg); },\n            is_method(*this));\n        attr(\"__setstate__\") = setstate;\n    }\n\n    /// Export enumeration entries into the parent scope\n    enum_& export_values() {\n        m_base.export_values();\n        return *this;\n    }\n\n    /// Add an enumeration entry\n    enum_& value(char const* name, Type value, const char *doc = nullptr) {\n        m_base.value(name, pybind11::cast(value, return_value_policy::copy), doc);\n        return *this;\n    }\n\nprivate:\n    detail::enum_base m_base;\n};\n\nNAMESPACE_BEGIN(detail)\n\n\ninline void keep_alive_impl(handle nurse, handle patient) {\n    if (!nurse || !patient)\n        pybind11_fail(\"Could not activate keep_alive!\");\n\n    if (patient.is_none() || nurse.is_none())\n        return; /* Nothing to keep alive or nothing to be kept alive by */\n\n    auto tinfo = all_type_info(Py_TYPE(nurse.ptr()));\n    if (!tinfo.empty()) {\n        /* It's a pybind-registered type, so we can store the patient in the\n         * internal list. */\n        add_patient(nurse.ptr(), patient.ptr());\n    }\n    else {\n        /* Fall back to clever approach based on weak references taken from\n         * Boost.Python. This is not used for pybind-registered types because\n         * the objects can be destroyed out-of-order in a GC pass. */\n        cpp_function disable_lifesupport(\n            [patient](handle weakref) { patient.dec_ref(); weakref.dec_ref(); });\n\n        weakref wr(nurse, disable_lifesupport);\n\n        patient.inc_ref(); /* reference patient and leak the weak reference */\n        (void) wr.release();\n    }\n}\n\nPYBIND11_NOINLINE inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) {\n    auto get_arg = [&](size_t n) {\n        if (n == 0)\n            return ret;\n        else if (n == 1 && call.init_self)\n            return call.init_self;\n        else if (n <= call.args.size())\n            return call.args[n - 1];\n        return handle();\n    };\n\n    keep_alive_impl(get_arg(Nurse), get_arg(Patient));\n}\n\ninline std::pair<decltype(internals::registered_types_py)::iterator, bool> all_type_info_get_cache(PyTypeObject *type) {\n    auto res = get_internals().registered_types_py\n#ifdef __cpp_lib_unordered_map_try_emplace\n        .try_emplace(type);\n#else\n        .emplace(type, std::vector<detail::type_info *>());\n#endif\n    if (res.second) {\n        // New cache entry created; set up a weak reference to automatically remove it if the type\n        // gets destroyed:\n        weakref((PyObject *) type, cpp_function([type](handle wr) {\n            get_internals().registered_types_py.erase(type);\n            wr.dec_ref();\n        })).release();\n    }\n\n    return res;\n}\n\ntemplate <typename Iterator, typename Sentinel, bool KeyIterator, return_value_policy Policy>\nstruct iterator_state {\n    Iterator it;\n    Sentinel end;\n    bool first_or_done;\n};\n\nNAMESPACE_END(detail)\n\n/// Makes a python iterator from a first and past-the-end C++ InputIterator.\ntemplate <return_value_policy Policy = return_value_policy::reference_internal,\n          typename Iterator,\n          typename Sentinel,\n          typename ValueType = decltype(*std::declval<Iterator>()),\n          typename... Extra>\niterator make_iterator(Iterator first, Sentinel last, Extra &&... extra) {\n    typedef detail::iterator_state<Iterator, Sentinel, false, Policy> state;\n\n    if (!detail::get_type_info(typeid(state), false)) {\n        class_<state>(handle(), \"iterator\", pybind11::module_local())\n            .def(\"__iter__\", [](state &s) -> state& { return s; })\n            .def(\"__next__\", [](state &s) -> ValueType {\n                if (!s.first_or_done)\n                    ++s.it;\n                else\n                    s.first_or_done = false;\n                if (s.it == s.end) {\n                    s.first_or_done = true;\n                    throw stop_iteration();\n                }\n                return *s.it;\n            }, std::forward<Extra>(extra)..., Policy);\n    }\n\n    return cast(state{first, last, true});\n}\n\n/// Makes an python iterator over the keys (`.first`) of a iterator over pairs from a\n/// first and past-the-end InputIterator.\ntemplate <return_value_policy Policy = return_value_policy::reference_internal,\n          typename Iterator,\n          typename Sentinel,\n          typename KeyType = decltype((*std::declval<Iterator>()).first),\n          typename... Extra>\niterator make_key_iterator(Iterator first, Sentinel last, Extra &&... extra) {\n    typedef detail::iterator_state<Iterator, Sentinel, true, Policy> state;\n\n    if (!detail::get_type_info(typeid(state), false)) {\n        class_<state>(handle(), \"iterator\", pybind11::module_local())\n            .def(\"__iter__\", [](state &s) -> state& { return s; })\n            .def(\"__next__\", [](state &s) -> KeyType {\n                if (!s.first_or_done)\n                    ++s.it;\n                else\n                    s.first_or_done = false;\n                if (s.it == s.end) {\n                    s.first_or_done = true;\n                    throw stop_iteration();\n                }\n                return (*s.it).first;\n            }, std::forward<Extra>(extra)..., Policy);\n    }\n\n    return cast(state{first, last, true});\n}\n\n/// Makes an iterator over values of an stl container or other container supporting\n/// `std::begin()`/`std::end()`\ntemplate <return_value_policy Policy = return_value_policy::reference_internal,\n          typename Type, typename... Extra> iterator make_iterator(Type &value, Extra&&... extra) {\n    return make_iterator<Policy>(std::begin(value), std::end(value), extra...);\n}\n\n/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting\n/// `std::begin()`/`std::end()`\ntemplate <return_value_policy Policy = return_value_policy::reference_internal,\n          typename Type, typename... Extra> iterator make_key_iterator(Type &value, Extra&&... extra) {\n    return make_key_iterator<Policy>(std::begin(value), std::end(value), extra...);\n}\n\ntemplate <typename InputType, typename OutputType> void implicitly_convertible() {\n    struct set_flag {\n        bool &flag;\n        set_flag(bool &flag) : flag(flag) { flag = true; }\n        ~set_flag() { flag = false; }\n    };\n    auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {\n        static bool currently_used = false;\n        if (currently_used) // implicit conversions are non-reentrant\n            return nullptr;\n        set_flag flag_helper(currently_used);\n        if (!detail::make_caster<InputType>().load(obj, false))\n            return nullptr;\n        tuple args(1);\n        args[0] = obj;\n        PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr);\n        if (result == nullptr)\n            PyErr_Clear();\n        return result;\n    };\n\n    if (auto tinfo = detail::get_type_info(typeid(OutputType)))\n        tinfo->implicit_conversions.push_back(implicit_caster);\n    else\n        pybind11_fail(\"implicitly_convertible: Unable to find type \" + type_id<OutputType>());\n}\n\ntemplate <typename ExceptionTranslator>\nvoid register_exception_translator(ExceptionTranslator&& translator) {\n    detail::get_internals().registered_exception_translators.push_front(\n        std::forward<ExceptionTranslator>(translator));\n}\n\n/**\n * Wrapper to generate a new Python exception type.\n *\n * This should only be used with PyErr_SetString for now.\n * It is not (yet) possible to use as a py::base.\n * Template type argument is reserved for future use.\n */\ntemplate <typename type>\nclass exception : public object {\npublic:\n    exception() = default;\n    exception(handle scope, const char *name, PyObject *base = PyExc_Exception) {\n        std::string full_name = scope.attr(\"__name__\").cast<std::string>() +\n                                std::string(\".\") + name;\n        m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base, NULL);\n        if (hasattr(scope, name))\n            pybind11_fail(\"Error during initialization: multiple incompatible \"\n                          \"definitions with name \\\"\" + std::string(name) + \"\\\"\");\n        scope.attr(name) = *this;\n    }\n\n    // Sets the current python exception to this exception object with the given message\n    void operator()(const char *message) {\n        PyErr_SetString(m_ptr, message);\n    }\n};\n\nNAMESPACE_BEGIN(detail)\n// Returns a reference to a function-local static exception object used in the simple\n// register_exception approach below.  (It would be simpler to have the static local variable\n// directly in register_exception, but that makes clang <3.5 segfault - issue #1349).\ntemplate <typename CppException>\nexception<CppException> &get_exception_object() { static exception<CppException> ex; return ex; }\nNAMESPACE_END(detail)\n\n/**\n * Registers a Python exception in `m` of the given `name` and installs an exception translator to\n * translate the C++ exception to the created Python exception using the exceptions what() method.\n * This is intended for simple exception translations; for more complex translation, register the\n * exception object and translator directly.\n */\ntemplate <typename CppException>\nexception<CppException> &register_exception(handle scope,\n                                            const char *name,\n                                            PyObject *base = PyExc_Exception) {\n    auto &ex = detail::get_exception_object<CppException>();\n    if (!ex) ex = exception<CppException>(scope, name, base);\n\n    register_exception_translator([](std::exception_ptr p) {\n        if (!p) return;\n        try {\n            std::rethrow_exception(p);\n        } catch (const CppException &e) {\n            detail::get_exception_object<CppException>()(e.what());\n        }\n    });\n    return ex;\n}\n\nNAMESPACE_BEGIN(detail)\nPYBIND11_NOINLINE inline void print(tuple args, dict kwargs) {\n    auto strings = tuple(args.size());\n    for (size_t i = 0; i < args.size(); ++i) {\n        strings[i] = str(args[i]);\n    }\n    auto sep = kwargs.contains(\"sep\") ? kwargs[\"sep\"] : cast(\" \");\n    auto line = sep.attr(\"join\")(strings);\n\n    object file;\n    if (kwargs.contains(\"file\")) {\n        file = kwargs[\"file\"].cast<object>();\n    } else {\n        try {\n            file = module::import(\"sys\").attr(\"stdout\");\n        } catch (const error_already_set &) {\n            /* If print() is called from code that is executed as\n               part of garbage collection during interpreter shutdown,\n               importing 'sys' can fail. Give up rather than crashing the\n               interpreter in this case. */\n            return;\n        }\n    }\n\n    auto write = file.attr(\"write\");\n    write(line);\n    write(kwargs.contains(\"end\") ? kwargs[\"end\"] : cast(\"\\n\"));\n\n    if (kwargs.contains(\"flush\") && kwargs[\"flush\"].cast<bool>())\n        file.attr(\"flush\")();\n}\nNAMESPACE_END(detail)\n\ntemplate <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>\nvoid print(Args &&...args) {\n    auto c = detail::collect_arguments<policy>(std::forward<Args>(args)...);\n    detail::print(c.args(), c.kwargs());\n}\n\n#if defined(WITH_THREAD) && !defined(PYPY_VERSION)\n\n/* The functions below essentially reproduce the PyGILState_* API using a RAII\n * pattern, but there are a few important differences:\n *\n * 1. When acquiring the GIL from an non-main thread during the finalization\n *    phase, the GILState API blindly terminates the calling thread, which\n *    is often not what is wanted. This API does not do this.\n *\n * 2. The gil_scoped_release function can optionally cut the relationship\n *    of a PyThreadState and its associated thread, which allows moving it to\n *    another thread (this is a fairly rare/advanced use case).\n *\n * 3. The reference count of an acquired thread state can be controlled. This\n *    can be handy to prevent cases where callbacks issued from an external\n *    thread would otherwise constantly construct and destroy thread state data\n *    structures.\n *\n * See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an\n * example which uses features 2 and 3 to migrate the Python thread of\n * execution to another thread (to run the event loop on the original thread,\n * in this case).\n */\n\nclass gil_scoped_acquire {\npublic:\n    PYBIND11_NOINLINE gil_scoped_acquire() {\n        auto const &internals = detail::get_internals();\n        tstate = (PyThreadState *) PYBIND11_TLS_GET_VALUE(internals.tstate);\n\n        if (!tstate) {\n            /* Check if the GIL was acquired using the PyGILState_* API instead (e.g. if\n               calling from a Python thread). Since we use a different key, this ensures\n               we don't create a new thread state and deadlock in PyEval_AcquireThread\n               below. Note we don't save this state with internals.tstate, since we don't\n               create it we would fail to clear it (its reference count should be > 0). */\n            tstate = PyGILState_GetThisThreadState();\n        }\n\n        if (!tstate) {\n            tstate = PyThreadState_New(internals.istate);\n            #if !defined(NDEBUG)\n                if (!tstate)\n                    pybind11_fail(\"scoped_acquire: could not create thread state!\");\n            #endif\n            tstate->gilstate_counter = 0;\n            PYBIND11_TLS_REPLACE_VALUE(internals.tstate, tstate);\n        } else {\n            release = detail::get_thread_state_unchecked() != tstate;\n        }\n\n        if (release) {\n            /* Work around an annoying assertion in PyThreadState_Swap */\n            #if defined(Py_DEBUG)\n                PyInterpreterState *interp = tstate->interp;\n                tstate->interp = nullptr;\n            #endif\n            PyEval_AcquireThread(tstate);\n            #if defined(Py_DEBUG)\n                tstate->interp = interp;\n            #endif\n        }\n\n        inc_ref();\n    }\n\n    void inc_ref() {\n        ++tstate->gilstate_counter;\n    }\n\n    PYBIND11_NOINLINE void dec_ref() {\n        --tstate->gilstate_counter;\n        #if !defined(NDEBUG)\n            if (detail::get_thread_state_unchecked() != tstate)\n                pybind11_fail(\"scoped_acquire::dec_ref(): thread state must be current!\");\n            if (tstate->gilstate_counter < 0)\n                pybind11_fail(\"scoped_acquire::dec_ref(): reference count underflow!\");\n        #endif\n        if (tstate->gilstate_counter == 0) {\n            #if !defined(NDEBUG)\n                if (!release)\n                    pybind11_fail(\"scoped_acquire::dec_ref(): internal error!\");\n            #endif\n            PyThreadState_Clear(tstate);\n            PyThreadState_DeleteCurrent();\n            PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate);\n            release = false;\n        }\n    }\n\n    PYBIND11_NOINLINE ~gil_scoped_acquire() {\n        dec_ref();\n        if (release)\n           PyEval_SaveThread();\n    }\nprivate:\n    PyThreadState *tstate = nullptr;\n    bool release = true;\n};\n\nclass gil_scoped_release {\npublic:\n    explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) {\n        // `get_internals()` must be called here unconditionally in order to initialize\n        // `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an\n        // initialization race could occur as multiple threads try `gil_scoped_acquire`.\n        const auto &internals = detail::get_internals();\n        tstate = PyEval_SaveThread();\n        if (disassoc) {\n            auto key = internals.tstate;\n            PYBIND11_TLS_DELETE_VALUE(key);\n        }\n    }\n    ~gil_scoped_release() {\n        if (!tstate)\n            return;\n        PyEval_RestoreThread(tstate);\n        if (disassoc) {\n            auto key = detail::get_internals().tstate;\n            PYBIND11_TLS_REPLACE_VALUE(key, tstate);\n        }\n    }\nprivate:\n    PyThreadState *tstate;\n    bool disassoc;\n};\n#elif defined(PYPY_VERSION)\nclass gil_scoped_acquire {\n    PyGILState_STATE state;\npublic:\n    gil_scoped_acquire() { state = PyGILState_Ensure(); }\n    ~gil_scoped_acquire() { PyGILState_Release(state); }\n};\n\nclass gil_scoped_release {\n    PyThreadState *state;\npublic:\n    gil_scoped_release() { state = PyEval_SaveThread(); }\n    ~gil_scoped_release() { PyEval_RestoreThread(state); }\n};\n#else\nclass gil_scoped_acquire { };\nclass gil_scoped_release { };\n#endif\n\nerror_already_set::~error_already_set() {\n    if (type) {\n        error_scope scope;\n        gil_scoped_acquire gil;\n        type.release().dec_ref();\n        value.release().dec_ref();\n        trace.release().dec_ref();\n    }\n}\n\ninline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name)  {\n    handle self = detail::get_object_handle(this_ptr, this_type);\n    if (!self)\n        return function();\n    handle type = self.get_type();\n    auto key = std::make_pair(type.ptr(), name);\n\n    /* Cache functions that aren't overloaded in Python to avoid\n       many costly Python dictionary lookups below */\n    auto &cache = detail::get_internals().inactive_overload_cache;\n    if (cache.find(key) != cache.end())\n        return function();\n\n    function overload = getattr(self, name, function());\n    if (overload.is_cpp_function()) {\n        cache.insert(key);\n        return function();\n    }\n\n    /* Don't call dispatch code if invoked from overridden function.\n       Unfortunately this doesn't work on PyPy. */\n#if !defined(PYPY_VERSION)\n    PyFrameObject *frame = PyThreadState_Get()->frame;\n    if (frame && (std::string) str(frame->f_code->co_name) == name &&\n        frame->f_code->co_argcount > 0) {\n        PyFrame_FastToLocals(frame);\n        PyObject *self_caller = PyDict_GetItem(\n            frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0));\n        if (self_caller == self.ptr())\n            return function();\n    }\n#else\n    /* PyPy currently doesn't provide a detailed cpyext emulation of\n       frame objects, so we have to emulate this using Python. This\n       is going to be slow..*/\n    dict d; d[\"self\"] = self; d[\"name\"] = pybind11::str(name);\n    PyObject *result = PyRun_String(\n        \"import inspect\\n\"\n        \"frame = inspect.currentframe()\\n\"\n        \"if frame is not None:\\n\"\n        \"    frame = frame.f_back\\n\"\n        \"    if frame is not None and str(frame.f_code.co_name) == name and \"\n        \"frame.f_code.co_argcount > 0:\\n\"\n        \"        self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\\n\"\n        \"        if self_caller == self:\\n\"\n        \"            self = None\\n\",\n        Py_file_input, d.ptr(), d.ptr());\n    if (result == nullptr)\n        throw error_already_set();\n    if (d[\"self\"].is_none())\n        return function();\n    Py_DECREF(result);\n#endif\n\n    return overload;\n}\n\ntemplate <class T> function get_overload(const T *this_ptr, const char *name) {\n    auto tinfo = detail::get_type_info(typeid(T));\n    return tinfo ? get_type_overload(this_ptr, tinfo, name) : function();\n}\n\n#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) { \\\n        pybind11::gil_scoped_acquire gil; \\\n        pybind11::function overload = pybind11::get_overload(static_cast<const cname *>(this), name); \\\n        if (overload) { \\\n            auto o = overload(__VA_ARGS__); \\\n            if (pybind11::detail::cast_is_temporary_value_reference<ret_type>::value) { \\\n                static pybind11::detail::overload_caster_t<ret_type> caster; \\\n                return pybind11::detail::cast_ref<ret_type>(std::move(o), caster); \\\n            } \\\n            else return pybind11::detail::cast_safe<ret_type>(std::move(o)); \\\n        } \\\n    }\n\n#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \\\n    PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \\\n    return cname::fn(__VA_ARGS__)\n\n#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \\\n    PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \\\n    pybind11::pybind11_fail(\"Tried to call pure virtual function \\\"\" PYBIND11_STRINGIFY(cname) \"::\" name \"\\\"\");\n\n#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \\\n    PYBIND11_OVERLOAD_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)\n\n#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \\\n    PYBIND11_OVERLOAD_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\n#  pragma warning(pop)\n#elif defined(__GNUG__) && !defined(__clang__)\n#  pragma GCC diagnostic pop\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/pytypes.h",
    "content": "/*\n    pybind11/pytypes.h: Convenience wrapper classes for basic Python types\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"detail/common.h\"\n#include \"buffer_info.h\"\n#include <utility>\n#include <type_traits>\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\n\n/* A few forward declarations */\nclass handle; class object;\nclass str; class iterator;\nstruct arg; struct arg_v;\n\nNAMESPACE_BEGIN(detail)\nclass args_proxy;\ninline bool isinstance_generic(handle obj, const std::type_info &tp);\n\n// Accessor forward declarations\ntemplate <typename Policy> class accessor;\nnamespace accessor_policies {\n    struct obj_attr;\n    struct str_attr;\n    struct generic_item;\n    struct sequence_item;\n    struct list_item;\n    struct tuple_item;\n}\nusing obj_attr_accessor = accessor<accessor_policies::obj_attr>;\nusing str_attr_accessor = accessor<accessor_policies::str_attr>;\nusing item_accessor = accessor<accessor_policies::generic_item>;\nusing sequence_accessor = accessor<accessor_policies::sequence_item>;\nusing list_accessor = accessor<accessor_policies::list_item>;\nusing tuple_accessor = accessor<accessor_policies::tuple_item>;\n\n/// Tag and check to identify a class which implements the Python object API\nclass pyobject_tag { };\ntemplate <typename T> using is_pyobject = std::is_base_of<pyobject_tag, remove_reference_t<T>>;\n\n/** \\rst\n    A mixin class which adds common functions to `handle`, `object` and various accessors.\n    The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``.\n\\endrst */\ntemplate <typename Derived>\nclass object_api : public pyobject_tag {\n    const Derived &derived() const { return static_cast<const Derived &>(*this); }\n\npublic:\n    /** \\rst\n        Return an iterator equivalent to calling ``iter()`` in Python. The object\n        must be a collection which supports the iteration protocol.\n    \\endrst */\n    iterator begin() const;\n    /// Return a sentinel which ends iteration.\n    iterator end() const;\n\n    /** \\rst\n        Return an internal functor to invoke the object's sequence protocol. Casting\n        the returned ``detail::item_accessor`` instance to a `handle` or `object`\n        subclass causes a corresponding call to ``__getitem__``. Assigning a `handle`\n        or `object` subclass causes a call to ``__setitem__``.\n    \\endrst */\n    item_accessor operator[](handle key) const;\n    /// See above (the only difference is that they key is provided as a string literal)\n    item_accessor operator[](const char *key) const;\n\n    /** \\rst\n        Return an internal functor to access the object's attributes. Casting the\n        returned ``detail::obj_attr_accessor`` instance to a `handle` or `object`\n        subclass causes a corresponding call to ``getattr``. Assigning a `handle`\n        or `object` subclass causes a call to ``setattr``.\n    \\endrst */\n    obj_attr_accessor attr(handle key) const;\n    /// See above (the only difference is that they key is provided as a string literal)\n    str_attr_accessor attr(const char *key) const;\n\n    /** \\rst\n        Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple``\n        or ``list`` for a function call. Applying another * to the result yields\n        ** unpacking, e.g. to unpack a dict as function keyword arguments.\n        See :ref:`calling_python_functions`.\n    \\endrst */\n    args_proxy operator*() const;\n\n    /// Check if the given item is contained within this object, i.e. ``item in obj``.\n    template <typename T> bool contains(T &&item) const;\n\n    /** \\rst\n        Assuming the Python object is a function or implements the ``__call__``\n        protocol, ``operator()`` invokes the underlying function, passing an\n        arbitrary set of parameters. The result is returned as a `object` and\n        may need to be converted back into a Python object using `handle::cast()`.\n\n        When some of the arguments cannot be converted to Python objects, the\n        function will throw a `cast_error` exception. When the Python function\n        call fails, a `error_already_set` exception is thrown.\n    \\endrst */\n    template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>\n    object operator()(Args &&...args) const;\n    template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>\n    PYBIND11_DEPRECATED(\"call(...) was deprecated in favor of operator()(...)\")\n        object call(Args&&... args) const;\n\n    /// Equivalent to ``obj is other`` in Python.\n    bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); }\n    /// Equivalent to ``obj is None`` in Python.\n    bool is_none() const { return derived().ptr() == Py_None; }\n    /// Equivalent to obj == other in Python\n    bool equal(object_api const &other) const      { return rich_compare(other, Py_EQ); }\n    bool not_equal(object_api const &other) const  { return rich_compare(other, Py_NE); }\n    bool operator<(object_api const &other) const  { return rich_compare(other, Py_LT); }\n    bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); }\n    bool operator>(object_api const &other) const  { return rich_compare(other, Py_GT); }\n    bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); }\n\n    object operator-() const;\n    object operator~() const;\n    object operator+(object_api const &other) const;\n    object operator+=(object_api const &other) const;\n    object operator-(object_api const &other) const;\n    object operator-=(object_api const &other) const;\n    object operator*(object_api const &other) const;\n    object operator*=(object_api const &other) const;\n    object operator/(object_api const &other) const;\n    object operator/=(object_api const &other) const;\n    object operator|(object_api const &other) const;\n    object operator|=(object_api const &other) const;\n    object operator&(object_api const &other) const;\n    object operator&=(object_api const &other) const;\n    object operator^(object_api const &other) const;\n    object operator^=(object_api const &other) const;\n    object operator<<(object_api const &other) const;\n    object operator<<=(object_api const &other) const;\n    object operator>>(object_api const &other) const;\n    object operator>>=(object_api const &other) const;\n\n    PYBIND11_DEPRECATED(\"Use py::str(obj) instead\")\n    pybind11::str str() const;\n\n    /// Get or set the object's docstring, i.e. ``obj.__doc__``.\n    str_attr_accessor doc() const;\n\n    /// Return the object's current reference count\n    int ref_count() const { return static_cast<int>(Py_REFCNT(derived().ptr())); }\n    /// Return a handle to the Python type object underlying the instance\n    handle get_type() const;\n\nprivate:\n    bool rich_compare(object_api const &other, int value) const;\n};\n\nNAMESPACE_END(detail)\n\n/** \\rst\n    Holds a reference to a Python object (no reference counting)\n\n    The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a\n    ``PyObject *`` in Python's C API). It does not perform any automatic reference\n    counting and merely provides a basic C++ interface to various Python API functions.\n\n    .. seealso::\n        The `object` class inherits from `handle` and adds automatic reference\n        counting features.\n\\endrst */\nclass handle : public detail::object_api<handle> {\npublic:\n    /// The default constructor creates a handle with a ``nullptr``-valued pointer\n    handle() = default;\n    /// Creates a ``handle`` from the given raw Python object pointer\n    handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject*\n\n    /// Return the underlying ``PyObject *`` pointer\n    PyObject *ptr() const { return m_ptr; }\n    PyObject *&ptr() { return m_ptr; }\n\n    /** \\rst\n        Manually increase the reference count of the Python object. Usually, it is\n        preferable to use the `object` class which derives from `handle` and calls\n        this function automatically. Returns a reference to itself.\n    \\endrst */\n    const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; }\n\n    /** \\rst\n        Manually decrease the reference count of the Python object. Usually, it is\n        preferable to use the `object` class which derives from `handle` and calls\n        this function automatically. Returns a reference to itself.\n    \\endrst */\n    const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; }\n\n    /** \\rst\n        Attempt to cast the Python object into the given C++ type. A `cast_error`\n        will be throw upon failure.\n    \\endrst */\n    template <typename T> T cast() const;\n    /// Return ``true`` when the `handle` wraps a valid Python object\n    explicit operator bool() const { return m_ptr != nullptr; }\n    /** \\rst\n        Deprecated: Check that the underlying pointers are the same.\n        Equivalent to ``obj1 is obj2`` in Python.\n    \\endrst */\n    PYBIND11_DEPRECATED(\"Use obj1.is(obj2) instead\")\n    bool operator==(const handle &h) const { return m_ptr == h.m_ptr; }\n    PYBIND11_DEPRECATED(\"Use !obj1.is(obj2) instead\")\n    bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; }\n    PYBIND11_DEPRECATED(\"Use handle::operator bool() instead\")\n    bool check() const { return m_ptr != nullptr; }\nprotected:\n    PyObject *m_ptr = nullptr;\n};\n\n/** \\rst\n    Holds a reference to a Python object (with reference counting)\n\n    Like `handle`, the `object` class is a thin wrapper around an arbitrary Python\n    object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it\n    optionally increases the object's reference count upon construction, and it\n    *always* decreases the reference count when the `object` instance goes out of\n    scope and is destructed. When using `object` instances consistently, it is much\n    easier to get reference counting right at the first attempt.\n\\endrst */\nclass object : public handle {\npublic:\n    object() = default;\n    PYBIND11_DEPRECATED(\"Use reinterpret_borrow<object>() or reinterpret_steal<object>()\")\n    object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); }\n    /// Copy constructor; always increases the reference count\n    object(const object &o) : handle(o) { inc_ref(); }\n    /// Move constructor; steals the object from ``other`` and preserves its reference count\n    object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; }\n    /// Destructor; automatically calls `handle::dec_ref()`\n    ~object() { dec_ref(); }\n\n    /** \\rst\n        Resets the internal pointer to ``nullptr`` without without decreasing the\n        object's reference count. The function returns a raw handle to the original\n        Python object.\n    \\endrst */\n    handle release() {\n      PyObject *tmp = m_ptr;\n      m_ptr = nullptr;\n      return handle(tmp);\n    }\n\n    object& operator=(const object &other) {\n        other.inc_ref();\n        dec_ref();\n        m_ptr = other.m_ptr;\n        return *this;\n    }\n\n    object& operator=(object &&other) noexcept {\n        if (this != &other) {\n            handle temp(m_ptr);\n            m_ptr = other.m_ptr;\n            other.m_ptr = nullptr;\n            temp.dec_ref();\n        }\n        return *this;\n    }\n\n    // Calling cast() on an object lvalue just copies (via handle::cast)\n    template <typename T> T cast() const &;\n    // Calling on an object rvalue does a move, if needed and/or possible\n    template <typename T> T cast() &&;\n\nprotected:\n    // Tags for choosing constructors from raw PyObject *\n    struct borrowed_t { };\n    struct stolen_t { };\n\n    template <typename T> friend T reinterpret_borrow(handle);\n    template <typename T> friend T reinterpret_steal(handle);\n\npublic:\n    // Only accessible from derived classes and the reinterpret_* functions\n    object(handle h, borrowed_t) : handle(h) { inc_ref(); }\n    object(handle h, stolen_t) : handle(h) { }\n};\n\n/** \\rst\n    Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference.\n    The target type ``T`` must be `object` or one of its derived classes. The function\n    doesn't do any conversions or checks. It's up to the user to make sure that the\n    target type is correct.\n\n    .. code-block:: cpp\n\n        PyObject *p = PyList_GetItem(obj, index);\n        py::object o = reinterpret_borrow<py::object>(p);\n        // or\n        py::tuple t = reinterpret_borrow<py::tuple>(p); // <-- `p` must be already be a `tuple`\n\\endrst */\ntemplate <typename T> T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; }\n\n/** \\rst\n    Like `reinterpret_borrow`, but steals the reference.\n\n     .. code-block:: cpp\n\n        PyObject *p = PyObject_Str(obj);\n        py::str s = reinterpret_steal<py::str>(p); // <-- `p` must be already be a `str`\n\\endrst */\ntemplate <typename T> T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; }\n\nNAMESPACE_BEGIN(detail)\ninline std::string error_string();\nNAMESPACE_END(detail)\n\n/// Fetch and hold an error which was already set in Python.  An instance of this is typically\n/// thrown to propagate python-side errors back through C++ which can either be caught manually or\n/// else falls back to the function dispatcher (which then raises the captured error back to\n/// python).\nclass error_already_set : public std::runtime_error {\npublic:\n    /// Constructs a new exception from the current Python error indicator, if any.  The current\n    /// Python error indicator will be cleared.\n    error_already_set() : std::runtime_error(detail::error_string()) {\n        PyErr_Fetch(&type.ptr(), &value.ptr(), &trace.ptr());\n    }\n\n    error_already_set(const error_already_set &) = default;\n    error_already_set(error_already_set &&) = default;\n\n    inline ~error_already_set();\n\n    /// Give the currently-held error back to Python, if any.  If there is currently a Python error\n    /// already set it is cleared first.  After this call, the current object no longer stores the\n    /// error variables (but the `.what()` string is still available).\n    void restore() { PyErr_Restore(type.release().ptr(), value.release().ptr(), trace.release().ptr()); }\n\n    // Does nothing; provided for backwards compatibility.\n    PYBIND11_DEPRECATED(\"Use of error_already_set.clear() is deprecated\")\n    void clear() {}\n\n    /// Check if the currently trapped error type matches the given Python exception class (or a\n    /// subclass thereof).  May also be passed a tuple to search for any exception class matches in\n    /// the given tuple.\n    bool matches(handle ex) const { return PyErr_GivenExceptionMatches(ex.ptr(), type.ptr()); }\n\nprivate:\n    object type, value, trace;\n};\n\n/** \\defgroup python_builtins _\n    Unless stated otherwise, the following C++ functions behave the same\n    as their Python counterparts.\n */\n\n/** \\ingroup python_builtins\n    \\rst\n    Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of\n    `object` or a class which was exposed to Python as ``py::class_<T>``.\n\\endrst */\ntemplate <typename T, detail::enable_if_t<std::is_base_of<object, T>::value, int> = 0>\nbool isinstance(handle obj) { return T::check_(obj); }\n\ntemplate <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>\nbool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); }\n\ntemplate <> inline bool isinstance<handle>(handle obj) = delete;\ntemplate <> inline bool isinstance<object>(handle obj) { return obj.ptr() != nullptr; }\n\n/// \\ingroup python_builtins\n/// Return true if ``obj`` is an instance of the ``type``.\ninline bool isinstance(handle obj, handle type) {\n    const auto result = PyObject_IsInstance(obj.ptr(), type.ptr());\n    if (result == -1)\n        throw error_already_set();\n    return result != 0;\n}\n\n/// \\addtogroup python_builtins\n/// @{\ninline bool hasattr(handle obj, handle name) {\n    return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1;\n}\n\ninline bool hasattr(handle obj, const char *name) {\n    return PyObject_HasAttrString(obj.ptr(), name) == 1;\n}\n\ninline void delattr(handle obj, handle name) {\n    if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) { throw error_already_set(); }\n}\n\ninline void delattr(handle obj, const char *name) {\n    if (PyObject_DelAttrString(obj.ptr(), name) != 0) { throw error_already_set(); }\n}\n\ninline object getattr(handle obj, handle name) {\n    PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr());\n    if (!result) { throw error_already_set(); }\n    return reinterpret_steal<object>(result);\n}\n\ninline object getattr(handle obj, const char *name) {\n    PyObject *result = PyObject_GetAttrString(obj.ptr(), name);\n    if (!result) { throw error_already_set(); }\n    return reinterpret_steal<object>(result);\n}\n\ninline object getattr(handle obj, handle name, handle default_) {\n    if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) {\n        return reinterpret_steal<object>(result);\n    } else {\n        PyErr_Clear();\n        return reinterpret_borrow<object>(default_);\n    }\n}\n\ninline object getattr(handle obj, const char *name, handle default_) {\n    if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) {\n        return reinterpret_steal<object>(result);\n    } else {\n        PyErr_Clear();\n        return reinterpret_borrow<object>(default_);\n    }\n}\n\ninline void setattr(handle obj, handle name, handle value) {\n    if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); }\n}\n\ninline void setattr(handle obj, const char *name, handle value) {\n    if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); }\n}\n\ninline ssize_t hash(handle obj) {\n    auto h = PyObject_Hash(obj.ptr());\n    if (h == -1) { throw error_already_set(); }\n    return h;\n}\n\n/// @} python_builtins\n\nNAMESPACE_BEGIN(detail)\ninline handle get_function(handle value) {\n    if (value) {\n#if PY_MAJOR_VERSION >= 3\n        if (PyInstanceMethod_Check(value.ptr()))\n            value = PyInstanceMethod_GET_FUNCTION(value.ptr());\n        else\n#endif\n        if (PyMethod_Check(value.ptr()))\n            value = PyMethod_GET_FUNCTION(value.ptr());\n    }\n    return value;\n}\n\n// Helper aliases/functions to support implicit casting of values given to python accessors/methods.\n// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes\n// through pybind11::cast(obj) to convert it to an `object`.\ntemplate <typename T, enable_if_t<is_pyobject<T>::value, int> = 0>\nauto object_or_cast(T &&o) -> decltype(std::forward<T>(o)) { return std::forward<T>(o); }\n// The following casting version is implemented in cast.h:\ntemplate <typename T, enable_if_t<!is_pyobject<T>::value, int> = 0>\nobject object_or_cast(T &&o);\n// Match a PyObject*, which we want to convert directly to handle via its converting constructor\ninline handle object_or_cast(PyObject *ptr) { return ptr; }\n\ntemplate <typename Policy>\nclass accessor : public object_api<accessor<Policy>> {\n    using key_type = typename Policy::key_type;\n\npublic:\n    accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { }\n    accessor(const accessor &) = default;\n    accessor(accessor &&) = default;\n\n    // accessor overload required to override default assignment operator (templates are not allowed\n    // to replace default compiler-generated assignments).\n    void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); }\n    void operator=(const accessor &a) & { operator=(handle(a)); }\n\n    template <typename T> void operator=(T &&value) && {\n        Policy::set(obj, key, object_or_cast(std::forward<T>(value)));\n    }\n    template <typename T> void operator=(T &&value) & {\n        get_cache() = reinterpret_borrow<object>(object_or_cast(std::forward<T>(value)));\n    }\n\n    template <typename T = Policy>\n    PYBIND11_DEPRECATED(\"Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)\")\n    explicit operator enable_if_t<std::is_same<T, accessor_policies::str_attr>::value ||\n            std::is_same<T, accessor_policies::obj_attr>::value, bool>() const {\n        return hasattr(obj, key);\n    }\n    template <typename T = Policy>\n    PYBIND11_DEPRECATED(\"Use of obj[key] as bool is deprecated in favor of obj.contains(key)\")\n    explicit operator enable_if_t<std::is_same<T, accessor_policies::generic_item>::value, bool>() const {\n        return obj.contains(key);\n    }\n\n    operator object() const { return get_cache(); }\n    PyObject *ptr() const { return get_cache().ptr(); }\n    template <typename T> T cast() const { return get_cache().template cast<T>(); }\n\nprivate:\n    object &get_cache() const {\n        if (!cache) { cache = Policy::get(obj, key); }\n        return cache;\n    }\n\nprivate:\n    handle obj;\n    key_type key;\n    mutable object cache;\n};\n\nNAMESPACE_BEGIN(accessor_policies)\nstruct obj_attr {\n    using key_type = object;\n    static object get(handle obj, handle key) { return getattr(obj, key); }\n    static void set(handle obj, handle key, handle val) { setattr(obj, key, val); }\n};\n\nstruct str_attr {\n    using key_type = const char *;\n    static object get(handle obj, const char *key) { return getattr(obj, key); }\n    static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); }\n};\n\nstruct generic_item {\n    using key_type = object;\n\n    static object get(handle obj, handle key) {\n        PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr());\n        if (!result) { throw error_already_set(); }\n        return reinterpret_steal<object>(result);\n    }\n\n    static void set(handle obj, handle key, handle val) {\n        if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); }\n    }\n};\n\nstruct sequence_item {\n    using key_type = size_t;\n\n    static object get(handle obj, size_t index) {\n        PyObject *result = PySequence_GetItem(obj.ptr(), static_cast<ssize_t>(index));\n        if (!result) { throw error_already_set(); }\n        return reinterpret_steal<object>(result);\n    }\n\n    static void set(handle obj, size_t index, handle val) {\n        // PySequence_SetItem does not steal a reference to 'val'\n        if (PySequence_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.ptr()) != 0) {\n            throw error_already_set();\n        }\n    }\n};\n\nstruct list_item {\n    using key_type = size_t;\n\n    static object get(handle obj, size_t index) {\n        PyObject *result = PyList_GetItem(obj.ptr(), static_cast<ssize_t>(index));\n        if (!result) { throw error_already_set(); }\n        return reinterpret_borrow<object>(result);\n    }\n\n    static void set(handle obj, size_t index, handle val) {\n        // PyList_SetItem steals a reference to 'val'\n        if (PyList_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {\n            throw error_already_set();\n        }\n    }\n};\n\nstruct tuple_item {\n    using key_type = size_t;\n\n    static object get(handle obj, size_t index) {\n        PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast<ssize_t>(index));\n        if (!result) { throw error_already_set(); }\n        return reinterpret_borrow<object>(result);\n    }\n\n    static void set(handle obj, size_t index, handle val) {\n        // PyTuple_SetItem steals a reference to 'val'\n        if (PyTuple_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {\n            throw error_already_set();\n        }\n    }\n};\nNAMESPACE_END(accessor_policies)\n\n/// STL iterator template used for tuple, list, sequence and dict\ntemplate <typename Policy>\nclass generic_iterator : public Policy {\n    using It = generic_iterator;\n\npublic:\n    using difference_type = ssize_t;\n    using iterator_category = typename Policy::iterator_category;\n    using value_type = typename Policy::value_type;\n    using reference = typename Policy::reference;\n    using pointer = typename Policy::pointer;\n\n    generic_iterator() = default;\n    generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { }\n\n    reference operator*() const { return Policy::dereference(); }\n    reference operator[](difference_type n) const { return *(*this + n); }\n    pointer operator->() const { return **this; }\n\n    It &operator++() { Policy::increment(); return *this; }\n    It operator++(int) { auto copy = *this; Policy::increment(); return copy; }\n    It &operator--() { Policy::decrement(); return *this; }\n    It operator--(int) { auto copy = *this; Policy::decrement(); return copy; }\n    It &operator+=(difference_type n) { Policy::advance(n); return *this; }\n    It &operator-=(difference_type n) { Policy::advance(-n); return *this; }\n\n    friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; }\n    friend It operator+(difference_type n, const It &b) { return b + n; }\n    friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; }\n    friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); }\n\n    friend bool operator==(const It &a, const It &b) { return a.equal(b); }\n    friend bool operator!=(const It &a, const It &b) { return !(a == b); }\n    friend bool operator< (const It &a, const It &b) { return b - a > 0; }\n    friend bool operator> (const It &a, const It &b) { return b < a; }\n    friend bool operator>=(const It &a, const It &b) { return !(a < b); }\n    friend bool operator<=(const It &a, const It &b) { return !(a > b); }\n};\n\nNAMESPACE_BEGIN(iterator_policies)\n/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers\ntemplate <typename T>\nstruct arrow_proxy {\n    T value;\n\n    arrow_proxy(T &&value) : value(std::move(value)) { }\n    T *operator->() const { return &value; }\n};\n\n/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS``\nclass sequence_fast_readonly {\nprotected:\n    using iterator_category = std::random_access_iterator_tag;\n    using value_type = handle;\n    using reference = const handle;\n    using pointer = arrow_proxy<const handle>;\n\n    sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { }\n\n    reference dereference() const { return *ptr; }\n    void increment() { ++ptr; }\n    void decrement() { --ptr; }\n    void advance(ssize_t n) { ptr += n; }\n    bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; }\n    ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; }\n\nprivate:\n    PyObject **ptr;\n};\n\n/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor``\nclass sequence_slow_readwrite {\nprotected:\n    using iterator_category = std::random_access_iterator_tag;\n    using value_type = object;\n    using reference = sequence_accessor;\n    using pointer = arrow_proxy<const sequence_accessor>;\n\n    sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { }\n\n    reference dereference() const { return {obj, static_cast<size_t>(index)}; }\n    void increment() { ++index; }\n    void decrement() { --index; }\n    void advance(ssize_t n) { index += n; }\n    bool equal(const sequence_slow_readwrite &b) const { return index == b.index; }\n    ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; }\n\nprivate:\n    handle obj;\n    ssize_t index;\n};\n\n/// Python's dictionary protocol permits this to be a forward iterator\nclass dict_readonly {\nprotected:\n    using iterator_category = std::forward_iterator_tag;\n    using value_type = std::pair<handle, handle>;\n    using reference = const value_type;\n    using pointer = arrow_proxy<const value_type>;\n\n    dict_readonly() = default;\n    dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); }\n\n    reference dereference() const { return {key, value}; }\n    void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } }\n    bool equal(const dict_readonly &b) const { return pos == b.pos; }\n\nprivate:\n    handle obj;\n    PyObject *key, *value;\n    ssize_t pos = -1;\n};\nNAMESPACE_END(iterator_policies)\n\n#if !defined(PYPY_VERSION)\nusing tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;\nusing list_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;\n#else\nusing tuple_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;\nusing list_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;\n#endif\n\nusing sequence_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;\nusing dict_iterator = generic_iterator<iterator_policies::dict_readonly>;\n\ninline bool PyIterable_Check(PyObject *obj) {\n    PyObject *iter = PyObject_GetIter(obj);\n    if (iter) {\n        Py_DECREF(iter);\n        return true;\n    } else {\n        PyErr_Clear();\n        return false;\n    }\n}\n\ninline bool PyNone_Check(PyObject *o) { return o == Py_None; }\n#if PY_MAJOR_VERSION >= 3\ninline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }\n#endif\n\ninline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); }\n\nclass kwargs_proxy : public handle {\npublic:\n    explicit kwargs_proxy(handle h) : handle(h) { }\n};\n\nclass args_proxy : public handle {\npublic:\n    explicit args_proxy(handle h) : handle(h) { }\n    kwargs_proxy operator*() const { return kwargs_proxy(*this); }\n};\n\n/// Python argument categories (using PEP 448 terms)\ntemplate <typename T> using is_keyword = std::is_base_of<arg, T>;\ntemplate <typename T> using is_s_unpacking = std::is_same<args_proxy, T>; // * unpacking\ntemplate <typename T> using is_ds_unpacking = std::is_same<kwargs_proxy, T>; // ** unpacking\ntemplate <typename T> using is_positional = satisfies_none_of<T,\n    is_keyword, is_s_unpacking, is_ds_unpacking\n>;\ntemplate <typename T> using is_keyword_or_ds = satisfies_any_of<T, is_keyword, is_ds_unpacking>;\n\n// Call argument collector forward declarations\ntemplate <return_value_policy policy = return_value_policy::automatic_reference>\nclass simple_collector;\ntemplate <return_value_policy policy = return_value_policy::automatic_reference>\nclass unpacking_collector;\n\nNAMESPACE_END(detail)\n\n// TODO: After the deprecated constructors are removed, this macro can be simplified by\n//       inheriting ctors: `using Parent::Parent`. It's not an option right now because\n//       the `using` statement triggers the parent deprecation warning even if the ctor\n//       isn't even used.\n#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \\\n    public: \\\n        PYBIND11_DEPRECATED(\"Use reinterpret_borrow<\"#Name\">() or reinterpret_steal<\"#Name\">()\") \\\n        Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \\\n        Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \\\n        Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \\\n        PYBIND11_DEPRECATED(\"Use py::isinstance<py::python_type>(obj) instead\") \\\n        bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \\\n        static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); }\n\n#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \\\n    PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \\\n    /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \\\n    Name(const object &o) \\\n    : Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) \\\n    { if (!m_ptr) throw error_already_set(); } \\\n    Name(object &&o) \\\n    : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \\\n    { if (!m_ptr) throw error_already_set(); } \\\n    template <typename Policy_> \\\n    Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) { }\n\n#define PYBIND11_OBJECT(Name, Parent, CheckFun) \\\n    PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \\\n    /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \\\n    Name(const object &o) : Parent(o) { } \\\n    Name(object &&o) : Parent(std::move(o)) { }\n\n#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \\\n    PYBIND11_OBJECT(Name, Parent, CheckFun) \\\n    Name() : Parent() { }\n\n/// \\addtogroup pytypes\n/// @{\n\n/** \\rst\n    Wraps a Python iterator so that it can also be used as a C++ input iterator\n\n    Caveat: copying an iterator does not (and cannot) clone the internal\n    state of the Python iterable. This also applies to the post-increment\n    operator. This iterator should only be used to retrieve the current\n    value using ``operator*()``.\n\\endrst */\nclass iterator : public object {\npublic:\n    using iterator_category = std::input_iterator_tag;\n    using difference_type = ssize_t;\n    using value_type = handle;\n    using reference = const handle;\n    using pointer = const handle *;\n\n    PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check)\n\n    iterator& operator++() {\n        advance();\n        return *this;\n    }\n\n    iterator operator++(int) {\n        auto rv = *this;\n        advance();\n        return rv;\n    }\n\n    reference operator*() const {\n        if (m_ptr && !value.ptr()) {\n            auto& self = const_cast<iterator &>(*this);\n            self.advance();\n        }\n        return value;\n    }\n\n    pointer operator->() const { operator*(); return &value; }\n\n    /** \\rst\n         The value which marks the end of the iteration. ``it == iterator::sentinel()``\n         is equivalent to catching ``StopIteration`` in Python.\n\n         .. code-block:: cpp\n\n             void foo(py::iterator it) {\n                 while (it != py::iterator::sentinel()) {\n                    // use `*it`\n                    ++it;\n                 }\n             }\n    \\endrst */\n    static iterator sentinel() { return {}; }\n\n    friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }\n    friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }\n\nprivate:\n    void advance() {\n        value = reinterpret_steal<object>(PyIter_Next(m_ptr));\n        if (PyErr_Occurred()) { throw error_already_set(); }\n    }\n\nprivate:\n    object value = {};\n};\n\nclass iterable : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)\n};\n\nclass bytes;\n\nclass str : public object {\npublic:\n    PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str)\n\n    str(const char *c, size_t n)\n        : object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate string object!\");\n    }\n\n    // 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects\n    str(const char *c = \"\")\n        : object(PyUnicode_FromString(c), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate string object!\");\n    }\n\n    str(const std::string &s) : str(s.data(), s.size()) { }\n\n    explicit str(const bytes &b);\n\n    /** \\rst\n        Return a string representation of the object. This is analogous to\n        the ``str()`` function in Python.\n    \\endrst */\n    explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { }\n\n    operator std::string() const {\n        object temp = *this;\n        if (PyUnicode_Check(m_ptr)) {\n            temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(m_ptr));\n            if (!temp)\n                pybind11_fail(\"Unable to extract string contents! (encoding issue)\");\n        }\n        char *buffer;\n        ssize_t length;\n        if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))\n            pybind11_fail(\"Unable to extract string contents! (invalid type)\");\n        return std::string(buffer, (size_t) length);\n    }\n\n    template <typename... Args>\n    str format(Args &&...args) const {\n        return attr(\"format\")(std::forward<Args>(args)...);\n    }\n\nprivate:\n    /// Return string representation -- always returns a new reference, even if already a str\n    static PyObject *raw_str(PyObject *op) {\n        PyObject *str_value = PyObject_Str(op);\n#if PY_MAJOR_VERSION < 3\n        if (!str_value) throw error_already_set();\n        PyObject *unicode = PyUnicode_FromEncodedObject(str_value, \"utf-8\", nullptr);\n        Py_XDECREF(str_value); str_value = unicode;\n#endif\n        return str_value;\n    }\n};\n/// @} pytypes\n\ninline namespace literals {\n/** \\rst\n    String literal version of `str`\n \\endrst */\ninline str operator\"\" _s(const char *s, size_t size) { return {s, size}; }\n}\n\n/// \\addtogroup pytypes\n/// @{\nclass bytes : public object {\npublic:\n    PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK)\n\n    // Allow implicit conversion:\n    bytes(const char *c = \"\")\n        : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate bytes object!\");\n    }\n\n    bytes(const char *c, size_t n)\n        : object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate bytes object!\");\n    }\n\n    // Allow implicit conversion:\n    bytes(const std::string &s) : bytes(s.data(), s.size()) { }\n\n    explicit bytes(const pybind11::str &s);\n\n    operator std::string() const {\n        char *buffer;\n        ssize_t length;\n        if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length))\n            pybind11_fail(\"Unable to extract bytes contents!\");\n        return std::string(buffer, (size_t) length);\n    }\n};\n\ninline bytes::bytes(const pybind11::str &s) {\n    object temp = s;\n    if (PyUnicode_Check(s.ptr())) {\n        temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(s.ptr()));\n        if (!temp)\n            pybind11_fail(\"Unable to extract string contents! (encoding issue)\");\n    }\n    char *buffer;\n    ssize_t length;\n    if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))\n        pybind11_fail(\"Unable to extract string contents! (invalid type)\");\n    auto obj = reinterpret_steal<object>(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length));\n    if (!obj)\n        pybind11_fail(\"Could not allocate bytes object!\");\n    m_ptr = obj.release().ptr();\n}\n\ninline str::str(const bytes& b) {\n    char *buffer;\n    ssize_t length;\n    if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length))\n        pybind11_fail(\"Unable to extract bytes contents!\");\n    auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, (ssize_t) length));\n    if (!obj)\n        pybind11_fail(\"Could not allocate string object!\");\n    m_ptr = obj.release().ptr();\n}\n\nclass none : public object {\npublic:\n    PYBIND11_OBJECT(none, object, detail::PyNone_Check)\n    none() : object(Py_None, borrowed_t{}) { }\n};\n\n#if PY_MAJOR_VERSION >= 3\nclass ellipsis : public object {\npublic:\n    PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)\n    ellipsis() : object(Py_Ellipsis, borrowed_t{}) { }\n};\n#endif\n\nclass bool_ : public object {\npublic:\n    PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool)\n    bool_() : object(Py_False, borrowed_t{}) { }\n    // Allow implicit conversion from and to `bool`:\n    bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { }\n    operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; }\n\nprivate:\n    /// Return the truth value of an object -- always returns a new reference\n    static PyObject *raw_bool(PyObject *op) {\n        const auto value = PyObject_IsTrue(op);\n        if (value == -1) return nullptr;\n        return handle(value ? Py_True : Py_False).inc_ref().ptr();\n    }\n};\n\nNAMESPACE_BEGIN(detail)\n// Converts a value to the given unsigned type.  If an error occurs, you get back (Unsigned) -1;\n// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).\n// (The distinction is critically important when casting a returned -1 error value to some other\n// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes).\ntemplate <typename Unsigned>\nUnsigned as_unsigned(PyObject *o) {\n    if (sizeof(Unsigned) <= sizeof(unsigned long)\n#if PY_VERSION_HEX < 0x03000000\n            || PyInt_Check(o)\n#endif\n    ) {\n        unsigned long v = PyLong_AsUnsignedLong(o);\n        return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;\n    }\n    else {\n        unsigned long long v = PyLong_AsUnsignedLongLong(o);\n        return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;\n    }\n}\nNAMESPACE_END(detail)\n\nclass int_ : public object {\npublic:\n    PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long)\n    int_() : object(PyLong_FromLong(0), stolen_t{}) { }\n    // Allow implicit conversion from C++ integral types:\n    template <typename T,\n              detail::enable_if_t<std::is_integral<T>::value, int> = 0>\n    int_(T value) {\n        if (sizeof(T) <= sizeof(long)) {\n            if (std::is_signed<T>::value)\n                m_ptr = PyLong_FromLong((long) value);\n            else\n                m_ptr = PyLong_FromUnsignedLong((unsigned long) value);\n        } else {\n            if (std::is_signed<T>::value)\n                m_ptr = PyLong_FromLongLong((long long) value);\n            else\n                m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value);\n        }\n        if (!m_ptr) pybind11_fail(\"Could not allocate int object!\");\n    }\n\n    template <typename T,\n              detail::enable_if_t<std::is_integral<T>::value, int> = 0>\n    operator T() const {\n        return std::is_unsigned<T>::value\n            ? detail::as_unsigned<T>(m_ptr)\n            : sizeof(T) <= sizeof(long)\n              ? (T) PyLong_AsLong(m_ptr)\n              : (T) PYBIND11_LONG_AS_LONGLONG(m_ptr);\n    }\n};\n\nclass float_ : public object {\npublic:\n    PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float)\n    // Allow implicit conversion from float/double:\n    float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate float object!\");\n    }\n    float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate float object!\");\n    }\n    operator float() const { return (float) PyFloat_AsDouble(m_ptr); }\n    operator double() const { return (double) PyFloat_AsDouble(m_ptr); }\n};\n\nclass weakref : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check)\n    explicit weakref(handle obj, handle callback = {})\n        : object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate weak reference!\");\n    }\n};\n\nclass slice : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check)\n    slice(ssize_t start_, ssize_t stop_, ssize_t step_) {\n        int_ start(start_), stop(stop_), step(step_);\n        m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr());\n        if (!m_ptr) pybind11_fail(\"Could not allocate slice object!\");\n    }\n    bool compute(size_t length, size_t *start, size_t *stop, size_t *step,\n                 size_t *slicelength) const {\n        return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,\n                                    (ssize_t) length, (ssize_t *) start,\n                                    (ssize_t *) stop, (ssize_t *) step,\n                                    (ssize_t *) slicelength) == 0;\n    }\n};\n\nclass capsule : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact)\n    PYBIND11_DEPRECATED(\"Use reinterpret_borrow<capsule>() or reinterpret_steal<capsule>()\")\n    capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { }\n\n    explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr)\n        : object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {\n        if (!m_ptr)\n            pybind11_fail(\"Could not allocate capsule object!\");\n    }\n\n    PYBIND11_DEPRECATED(\"Please pass a destructor that takes a void pointer as input\")\n    capsule(const void *value, void (*destruct)(PyObject *))\n        : object(PyCapsule_New(const_cast<void*>(value), nullptr, destruct), stolen_t{}) {\n        if (!m_ptr)\n            pybind11_fail(\"Could not allocate capsule object!\");\n    }\n\n    capsule(const void *value, void (*destructor)(void *)) {\n        m_ptr = PyCapsule_New(const_cast<void *>(value), nullptr, [](PyObject *o) {\n            auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));\n            void *ptr = PyCapsule_GetPointer(o, nullptr);\n            destructor(ptr);\n        });\n\n        if (!m_ptr)\n            pybind11_fail(\"Could not allocate capsule object!\");\n\n        if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0)\n            pybind11_fail(\"Could not set capsule context!\");\n    }\n\n    capsule(void (*destructor)()) {\n        m_ptr = PyCapsule_New(reinterpret_cast<void *>(destructor), nullptr, [](PyObject *o) {\n            auto destructor = reinterpret_cast<void (*)()>(PyCapsule_GetPointer(o, nullptr));\n            destructor();\n        });\n\n        if (!m_ptr)\n            pybind11_fail(\"Could not allocate capsule object!\");\n    }\n\n    template <typename T> operator T *() const {\n        auto name = this->name();\n        T * result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));\n        if (!result) pybind11_fail(\"Unable to extract capsule contents!\");\n        return result;\n    }\n\n    const char *name() const { return PyCapsule_GetName(m_ptr); }\n};\n\nclass tuple : public object {\npublic:\n    PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple)\n    explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate tuple object!\");\n    }\n    size_t size() const { return (size_t) PyTuple_Size(m_ptr); }\n    detail::tuple_accessor operator[](size_t index) const { return {*this, index}; }\n    detail::item_accessor operator[](handle h) const { return object::operator[](h); }\n    detail::tuple_iterator begin() const { return {*this, 0}; }\n    detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }\n};\n\nclass dict : public object {\npublic:\n    PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)\n    dict() : object(PyDict_New(), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate dict object!\");\n    }\n    template <typename... Args,\n              typename = detail::enable_if_t<detail::all_of<detail::is_keyword_or_ds<Args>...>::value>,\n              // MSVC workaround: it can't compile an out-of-line definition, so defer the collector\n              typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>\n    explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) { }\n\n    size_t size() const { return (size_t) PyDict_Size(m_ptr); }\n    detail::dict_iterator begin() const { return {*this, 0}; }\n    detail::dict_iterator end() const { return {}; }\n    void clear() const { PyDict_Clear(ptr()); }\n    bool contains(handle key) const { return PyDict_Contains(ptr(), key.ptr()) == 1; }\n    bool contains(const char *key) const { return PyDict_Contains(ptr(), pybind11::str(key).ptr()) == 1; }\n\nprivate:\n    /// Call the `dict` Python type -- always returns a new reference\n    static PyObject *raw_dict(PyObject *op) {\n        if (PyDict_Check(op))\n            return handle(op).inc_ref().ptr();\n        return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr);\n    }\n};\n\nclass sequence : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)\n    size_t size() const { return (size_t) PySequence_Size(m_ptr); }\n    detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }\n    detail::item_accessor operator[](handle h) const { return object::operator[](h); }\n    detail::sequence_iterator begin() const { return {*this, 0}; }\n    detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; }\n};\n\nclass list : public object {\npublic:\n    PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List)\n    explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate list object!\");\n    }\n    size_t size() const { return (size_t) PyList_Size(m_ptr); }\n    detail::list_accessor operator[](size_t index) const { return {*this, index}; }\n    detail::item_accessor operator[](handle h) const { return object::operator[](h); }\n    detail::list_iterator begin() const { return {*this, 0}; }\n    detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }\n    template <typename T> void append(T &&val) const {\n        PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());\n    }\n};\n\nclass args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) };\nclass kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check)  };\n\nclass set : public object {\npublic:\n    PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New)\n    set() : object(PySet_New(nullptr), stolen_t{}) {\n        if (!m_ptr) pybind11_fail(\"Could not allocate set object!\");\n    }\n    size_t size() const { return (size_t) PySet_Size(m_ptr); }\n    template <typename T> bool add(T &&val) const {\n        return PySet_Add(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 0;\n    }\n    void clear() const { PySet_Clear(m_ptr); }\n};\n\nclass function : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check)\n    handle cpp_function() const {\n        handle fun = detail::get_function(m_ptr);\n        if (fun && PyCFunction_Check(fun.ptr()))\n            return fun;\n        return handle();\n    }\n    bool is_cpp_function() const { return (bool) cpp_function(); }\n};\n\nclass buffer : public object {\npublic:\n    PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer)\n\n    buffer_info request(bool writable = false) {\n        int flags = PyBUF_STRIDES | PyBUF_FORMAT;\n        if (writable) flags |= PyBUF_WRITABLE;\n        Py_buffer *view = new Py_buffer();\n        if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {\n            delete view;\n            throw error_already_set();\n        }\n        return buffer_info(view);\n    }\n};\n\nclass memoryview : public object {\npublic:\n    explicit memoryview(const buffer_info& info) {\n        static Py_buffer buf { };\n        // Py_buffer uses signed sizes, strides and shape!..\n        static std::vector<Py_ssize_t> py_strides { };\n        static std::vector<Py_ssize_t> py_shape { };\n        buf.buf = info.ptr;\n        buf.itemsize = info.itemsize;\n        buf.format = const_cast<char *>(info.format.c_str());\n        buf.ndim = (int) info.ndim;\n        buf.len = info.size;\n        py_strides.clear();\n        py_shape.clear();\n        for (size_t i = 0; i < (size_t) info.ndim; ++i) {\n            py_strides.push_back(info.strides[i]);\n            py_shape.push_back(info.shape[i]);\n        }\n        buf.strides = py_strides.data();\n        buf.shape = py_shape.data();\n        buf.suboffsets = nullptr;\n        buf.readonly = false;\n        buf.internal = nullptr;\n\n        m_ptr = PyMemoryView_FromBuffer(&buf);\n        if (!m_ptr)\n            pybind11_fail(\"Unable to create memoryview from buffer descriptor\");\n    }\n\n    PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)\n};\n/// @} pytypes\n\n/// \\addtogroup python_builtins\n/// @{\ninline size_t len(handle h) {\n    ssize_t result = PyObject_Length(h.ptr());\n    if (result < 0)\n        pybind11_fail(\"Unable to compute length of object\");\n    return (size_t) result;\n}\n\ninline str repr(handle h) {\n    PyObject *str_value = PyObject_Repr(h.ptr());\n    if (!str_value) throw error_already_set();\n#if PY_MAJOR_VERSION < 3\n    PyObject *unicode = PyUnicode_FromEncodedObject(str_value, \"utf-8\", nullptr);\n    Py_XDECREF(str_value); str_value = unicode;\n    if (!str_value) throw error_already_set();\n#endif\n    return reinterpret_steal<str>(str_value);\n}\n\ninline iterator iter(handle obj) {\n    PyObject *result = PyObject_GetIter(obj.ptr());\n    if (!result) { throw error_already_set(); }\n    return reinterpret_steal<iterator>(result);\n}\n/// @} python_builtins\n\nNAMESPACE_BEGIN(detail)\ntemplate <typename D> iterator object_api<D>::begin() const { return iter(derived()); }\ntemplate <typename D> iterator object_api<D>::end() const { return iterator::sentinel(); }\ntemplate <typename D> item_accessor object_api<D>::operator[](handle key) const {\n    return {derived(), reinterpret_borrow<object>(key)};\n}\ntemplate <typename D> item_accessor object_api<D>::operator[](const char *key) const {\n    return {derived(), pybind11::str(key)};\n}\ntemplate <typename D> obj_attr_accessor object_api<D>::attr(handle key) const {\n    return {derived(), reinterpret_borrow<object>(key)};\n}\ntemplate <typename D> str_attr_accessor object_api<D>::attr(const char *key) const {\n    return {derived(), key};\n}\ntemplate <typename D> args_proxy object_api<D>::operator*() const {\n    return args_proxy(derived().ptr());\n}\ntemplate <typename D> template <typename T> bool object_api<D>::contains(T &&item) const {\n    return attr(\"__contains__\")(std::forward<T>(item)).template cast<bool>();\n}\n\ntemplate <typename D>\npybind11::str object_api<D>::str() const { return pybind11::str(derived()); }\n\ntemplate <typename D>\nstr_attr_accessor object_api<D>::doc() const { return attr(\"__doc__\"); }\n\ntemplate <typename D>\nhandle object_api<D>::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); }\n\ntemplate <typename D>\nbool object_api<D>::rich_compare(object_api const &other, int value) const {\n    int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value);\n    if (rv == -1)\n        throw error_already_set();\n    return rv == 1;\n}\n\n#define PYBIND11_MATH_OPERATOR_UNARY(op, fn)                                   \\\n    template <typename D> object object_api<D>::op() const {                   \\\n        object result = reinterpret_steal<object>(fn(derived().ptr()));        \\\n        if (!result.ptr())                                                     \\\n            throw error_already_set();                                         \\\n        return result;                                                         \\\n    }\n\n#define PYBIND11_MATH_OPERATOR_BINARY(op, fn)                                  \\\n    template <typename D>                                                      \\\n    object object_api<D>::op(object_api const &other) const {                  \\\n        object result = reinterpret_steal<object>(                             \\\n            fn(derived().ptr(), other.derived().ptr()));                       \\\n        if (!result.ptr())                                                     \\\n            throw error_already_set();                                         \\\n        return result;                                                         \\\n    }\n\nPYBIND11_MATH_OPERATOR_UNARY (operator~,   PyNumber_Invert)\nPYBIND11_MATH_OPERATOR_UNARY (operator-,   PyNumber_Negative)\nPYBIND11_MATH_OPERATOR_BINARY(operator+,   PyNumber_Add)\nPYBIND11_MATH_OPERATOR_BINARY(operator+=,  PyNumber_InPlaceAdd)\nPYBIND11_MATH_OPERATOR_BINARY(operator-,   PyNumber_Subtract)\nPYBIND11_MATH_OPERATOR_BINARY(operator-=,  PyNumber_InPlaceSubtract)\nPYBIND11_MATH_OPERATOR_BINARY(operator*,   PyNumber_Multiply)\nPYBIND11_MATH_OPERATOR_BINARY(operator*=,  PyNumber_InPlaceMultiply)\nPYBIND11_MATH_OPERATOR_BINARY(operator/,   PyNumber_TrueDivide)\nPYBIND11_MATH_OPERATOR_BINARY(operator/=,  PyNumber_InPlaceTrueDivide)\nPYBIND11_MATH_OPERATOR_BINARY(operator|,   PyNumber_Or)\nPYBIND11_MATH_OPERATOR_BINARY(operator|=,  PyNumber_InPlaceOr)\nPYBIND11_MATH_OPERATOR_BINARY(operator&,   PyNumber_And)\nPYBIND11_MATH_OPERATOR_BINARY(operator&=,  PyNumber_InPlaceAnd)\nPYBIND11_MATH_OPERATOR_BINARY(operator^,   PyNumber_Xor)\nPYBIND11_MATH_OPERATOR_BINARY(operator^=,  PyNumber_InPlaceXor)\nPYBIND11_MATH_OPERATOR_BINARY(operator<<,  PyNumber_Lshift)\nPYBIND11_MATH_OPERATOR_BINARY(operator<<=, PyNumber_InPlaceLshift)\nPYBIND11_MATH_OPERATOR_BINARY(operator>>,  PyNumber_Rshift)\nPYBIND11_MATH_OPERATOR_BINARY(operator>>=, PyNumber_InPlaceRshift)\n\n#undef PYBIND11_MATH_OPERATOR_UNARY\n#undef PYBIND11_MATH_OPERATOR_BINARY\n\nNAMESPACE_END(detail)\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/stl.h",
    "content": "/*\n    pybind11/stl.h: Transparent conversion for STL data types\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"pybind11.h\"\n#include <set>\n#include <unordered_set>\n#include <map>\n#include <unordered_map>\n#include <iostream>\n#include <list>\n#include <deque>\n#include <valarray>\n\n#if defined(_MSC_VER)\n#pragma warning(push)\n#pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#endif\n\n#ifdef __has_include\n// std::optional (but including it in c++14 mode isn't allowed)\n#  if defined(PYBIND11_CPP17) && __has_include(<optional>)\n#    include <optional>\n#    define PYBIND11_HAS_OPTIONAL 1\n#  endif\n// std::experimental::optional (but not allowed in c++11 mode)\n#  if defined(PYBIND11_CPP14) && (__has_include(<experimental/optional>) && \\\n                                 !__has_include(<optional>))\n#    include <experimental/optional>\n#    define PYBIND11_HAS_EXP_OPTIONAL 1\n#  endif\n// std::variant\n#  if defined(PYBIND11_CPP17) && __has_include(<variant>)\n#    include <variant>\n#    define PYBIND11_HAS_VARIANT 1\n#  endif\n#elif defined(_MSC_VER) && defined(PYBIND11_CPP17)\n#  include <optional>\n#  include <variant>\n#  define PYBIND11_HAS_OPTIONAL 1\n#  define PYBIND11_HAS_VARIANT 1\n#endif\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for\n/// forwarding a container element).  Typically used indirect via forwarded_type(), below.\ntemplate <typename T, typename U>\nusing forwarded_type = conditional_t<\n    std::is_lvalue_reference<T>::value, remove_reference_t<U> &, remove_reference_t<U> &&>;\n\n/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically\n/// used for forwarding a container's elements.\ntemplate <typename T, typename U>\nforwarded_type<T, U> forward_like(U &&u) {\n    return std::forward<detail::forwarded_type<T, U>>(std::forward<U>(u));\n}\n\ntemplate <typename Type, typename Key> struct set_caster {\n    using type = Type;\n    using key_conv = make_caster<Key>;\n\n    bool load(handle src, bool convert) {\n        if (!isinstance<pybind11::set>(src))\n            return false;\n        auto s = reinterpret_borrow<pybind11::set>(src);\n        value.clear();\n        for (auto entry : s) {\n            key_conv conv;\n            if (!conv.load(entry, convert))\n                return false;\n            value.insert(cast_op<Key &&>(std::move(conv)));\n        }\n        return true;\n    }\n\n    template <typename T>\n    static handle cast(T &&src, return_value_policy policy, handle parent) {\n        if (!std::is_lvalue_reference<T>::value)\n            policy = return_value_policy_override<Key>::policy(policy);\n        pybind11::set s;\n        for (auto &&value : src) {\n            auto value_ = reinterpret_steal<object>(key_conv::cast(forward_like<T>(value), policy, parent));\n            if (!value_ || !s.add(value_))\n                return handle();\n        }\n        return s.release();\n    }\n\n    PYBIND11_TYPE_CASTER(type, _(\"Set[\") + key_conv::name + _(\"]\"));\n};\n\ntemplate <typename Type, typename Key, typename Value> struct map_caster {\n    using key_conv   = make_caster<Key>;\n    using value_conv = make_caster<Value>;\n\n    bool load(handle src, bool convert) {\n        if (!isinstance<dict>(src))\n            return false;\n        auto d = reinterpret_borrow<dict>(src);\n        value.clear();\n        for (auto it : d) {\n            key_conv kconv;\n            value_conv vconv;\n            if (!kconv.load(it.first.ptr(), convert) ||\n                !vconv.load(it.second.ptr(), convert))\n                return false;\n            value.emplace(cast_op<Key &&>(std::move(kconv)), cast_op<Value &&>(std::move(vconv)));\n        }\n        return true;\n    }\n\n    template <typename T>\n    static handle cast(T &&src, return_value_policy policy, handle parent) {\n        dict d;\n        return_value_policy policy_key = policy;\n        return_value_policy policy_value = policy;\n        if (!std::is_lvalue_reference<T>::value) {\n            policy_key = return_value_policy_override<Key>::policy(policy_key);\n            policy_value = return_value_policy_override<Value>::policy(policy_value);\n        }\n        for (auto &&kv : src) {\n            auto key = reinterpret_steal<object>(key_conv::cast(forward_like<T>(kv.first), policy_key, parent));\n            auto value = reinterpret_steal<object>(value_conv::cast(forward_like<T>(kv.second), policy_value, parent));\n            if (!key || !value)\n                return handle();\n            d[key] = value;\n        }\n        return d.release();\n    }\n\n    PYBIND11_TYPE_CASTER(Type, _(\"Dict[\") + key_conv::name + _(\", \") + value_conv::name + _(\"]\"));\n};\n\ntemplate <typename Type, typename Value> struct list_caster {\n    using value_conv = make_caster<Value>;\n\n    bool load(handle src, bool convert) {\n        if (!isinstance<sequence>(src) || isinstance<str>(src))\n            return false;\n        auto s = reinterpret_borrow<sequence>(src);\n        value.clear();\n        reserve_maybe(s, &value);\n        for (auto it : s) {\n            value_conv conv;\n            if (!conv.load(it, convert))\n                return false;\n            value.push_back(cast_op<Value &&>(std::move(conv)));\n        }\n        return true;\n    }\n\nprivate:\n    template <typename T = Type,\n              enable_if_t<std::is_same<decltype(std::declval<T>().reserve(0)), void>::value, int> = 0>\n    void reserve_maybe(sequence s, Type *) { value.reserve(s.size()); }\n    void reserve_maybe(sequence, void *) { }\n\npublic:\n    template <typename T>\n    static handle cast(T &&src, return_value_policy policy, handle parent) {\n        if (!std::is_lvalue_reference<T>::value)\n            policy = return_value_policy_override<Value>::policy(policy);\n        list l(src.size());\n        size_t index = 0;\n        for (auto &&value : src) {\n            auto value_ = reinterpret_steal<object>(value_conv::cast(forward_like<T>(value), policy, parent));\n            if (!value_)\n                return handle();\n            PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference\n        }\n        return l.release();\n    }\n\n    PYBIND11_TYPE_CASTER(Type, _(\"List[\") + value_conv::name + _(\"]\"));\n};\n\ntemplate <typename Type, typename Alloc> struct type_caster<std::vector<Type, Alloc>>\n : list_caster<std::vector<Type, Alloc>, Type> { };\n\ntemplate <typename Type, typename Alloc> struct type_caster<std::deque<Type, Alloc>>\n : list_caster<std::deque<Type, Alloc>, Type> { };\n\ntemplate <typename Type, typename Alloc> struct type_caster<std::list<Type, Alloc>>\n : list_caster<std::list<Type, Alloc>, Type> { };\n\ntemplate <typename ArrayType, typename Value, bool Resizable, size_t Size = 0> struct array_caster {\n    using value_conv = make_caster<Value>;\n\nprivate:\n    template <bool R = Resizable>\n    bool require_size(enable_if_t<R, size_t> size) {\n        if (value.size() != size)\n            value.resize(size);\n        return true;\n    }\n    template <bool R = Resizable>\n    bool require_size(enable_if_t<!R, size_t> size) {\n        return size == Size;\n    }\n\npublic:\n    bool load(handle src, bool convert) {\n        if (!isinstance<sequence>(src))\n            return false;\n        auto l = reinterpret_borrow<sequence>(src);\n        if (!require_size(l.size()))\n            return false;\n        size_t ctr = 0;\n        for (auto it : l) {\n            value_conv conv;\n            if (!conv.load(it, convert))\n                return false;\n            value[ctr++] = cast_op<Value &&>(std::move(conv));\n        }\n        return true;\n    }\n\n    template <typename T>\n    static handle cast(T &&src, return_value_policy policy, handle parent) {\n        list l(src.size());\n        size_t index = 0;\n        for (auto &&value : src) {\n            auto value_ = reinterpret_steal<object>(value_conv::cast(forward_like<T>(value), policy, parent));\n            if (!value_)\n                return handle();\n            PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference\n        }\n        return l.release();\n    }\n\n    PYBIND11_TYPE_CASTER(ArrayType, _(\"List[\") + value_conv::name + _<Resizable>(_(\"\"), _(\"[\") + _<Size>() + _(\"]\")) + _(\"]\"));\n};\n\ntemplate <typename Type, size_t Size> struct type_caster<std::array<Type, Size>>\n : array_caster<std::array<Type, Size>, Type, false, Size> { };\n\ntemplate <typename Type> struct type_caster<std::valarray<Type>>\n : array_caster<std::valarray<Type>, Type, true> { };\n\ntemplate <typename Key, typename Compare, typename Alloc> struct type_caster<std::set<Key, Compare, Alloc>>\n  : set_caster<std::set<Key, Compare, Alloc>, Key> { };\n\ntemplate <typename Key, typename Hash, typename Equal, typename Alloc> struct type_caster<std::unordered_set<Key, Hash, Equal, Alloc>>\n  : set_caster<std::unordered_set<Key, Hash, Equal, Alloc>, Key> { };\n\ntemplate <typename Key, typename Value, typename Compare, typename Alloc> struct type_caster<std::map<Key, Value, Compare, Alloc>>\n  : map_caster<std::map<Key, Value, Compare, Alloc>, Key, Value> { };\n\ntemplate <typename Key, typename Value, typename Hash, typename Equal, typename Alloc> struct type_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>>\n  : map_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>, Key, Value> { };\n\n// This type caster is intended to be used for std::optional and std::experimental::optional\ntemplate<typename T> struct optional_caster {\n    using value_conv = make_caster<typename T::value_type>;\n\n    template <typename T_>\n    static handle cast(T_ &&src, return_value_policy policy, handle parent) {\n        if (!src)\n            return none().inc_ref();\n        policy = return_value_policy_override<typename T::value_type>::policy(policy);\n        return value_conv::cast(*std::forward<T_>(src), policy, parent);\n    }\n\n    bool load(handle src, bool convert) {\n        if (!src) {\n            return false;\n        } else if (src.is_none()) {\n            return true;  // default-constructed value is already empty\n        }\n        value_conv inner_caster;\n        if (!inner_caster.load(src, convert))\n            return false;\n\n        value.emplace(cast_op<typename T::value_type &&>(std::move(inner_caster)));\n        return true;\n    }\n\n    PYBIND11_TYPE_CASTER(T, _(\"Optional[\") + value_conv::name + _(\"]\"));\n};\n\n#if PYBIND11_HAS_OPTIONAL\ntemplate<typename T> struct type_caster<std::optional<T>>\n    : public optional_caster<std::optional<T>> {};\n\ntemplate<> struct type_caster<std::nullopt_t>\n    : public void_caster<std::nullopt_t> {};\n#endif\n\n#if PYBIND11_HAS_EXP_OPTIONAL\ntemplate<typename T> struct type_caster<std::experimental::optional<T>>\n    : public optional_caster<std::experimental::optional<T>> {};\n\ntemplate<> struct type_caster<std::experimental::nullopt_t>\n    : public void_caster<std::experimental::nullopt_t> {};\n#endif\n\n/// Visit a variant and cast any found type to Python\nstruct variant_caster_visitor {\n    return_value_policy policy;\n    handle parent;\n\n    using result_type = handle; // required by boost::variant in C++11\n\n    template <typename T>\n    result_type operator()(T &&src) const {\n        return make_caster<T>::cast(std::forward<T>(src), policy, parent);\n    }\n};\n\n/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar\n/// `namespace::variant` types which provide a `namespace::visit()` function are handled here\n/// automatically using argument-dependent lookup. Users can provide specializations for other\n/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`.\ntemplate <template<typename...> class Variant>\nstruct visit_helper {\n    template <typename... Args>\n    static auto call(Args &&...args) -> decltype(visit(std::forward<Args>(args)...)) {\n        return visit(std::forward<Args>(args)...);\n    }\n};\n\n/// Generic variant caster\ntemplate <typename Variant> struct variant_caster;\n\ntemplate <template<typename...> class V, typename... Ts>\nstruct variant_caster<V<Ts...>> {\n    static_assert(sizeof...(Ts) > 0, \"Variant must consist of at least one alternative.\");\n\n    template <typename U, typename... Us>\n    bool load_alternative(handle src, bool convert, type_list<U, Us...>) {\n        auto caster = make_caster<U>();\n        if (caster.load(src, convert)) {\n            value = cast_op<U>(caster);\n            return true;\n        }\n        return load_alternative(src, convert, type_list<Us...>{});\n    }\n\n    bool load_alternative(handle, bool, type_list<>) { return false; }\n\n    bool load(handle src, bool convert) {\n        // Do a first pass without conversions to improve constructor resolution.\n        // E.g. `py::int_(1).cast<variant<double, int>>()` needs to fill the `int`\n        // slot of the variant. Without two-pass loading `double` would be filled\n        // because it appears first and a conversion is possible.\n        if (convert && load_alternative(src, false, type_list<Ts...>{}))\n            return true;\n        return load_alternative(src, convert, type_list<Ts...>{});\n    }\n\n    template <typename Variant>\n    static handle cast(Variant &&src, return_value_policy policy, handle parent) {\n        return visit_helper<V>::call(variant_caster_visitor{policy, parent},\n                                     std::forward<Variant>(src));\n    }\n\n    using Type = V<Ts...>;\n    PYBIND11_TYPE_CASTER(Type, _(\"Union[\") + detail::concat(make_caster<Ts>::name...) + _(\"]\"));\n};\n\n#if PYBIND11_HAS_VARIANT\ntemplate <typename... Ts>\nstruct type_caster<std::variant<Ts...>> : variant_caster<std::variant<Ts...>> { };\n#endif\n\nNAMESPACE_END(detail)\n\ninline std::ostream &operator<<(std::ostream &os, const handle &obj) {\n    os << (std::string) str(obj);\n    return os;\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/include/pybind11/stl_bind.h",
    "content": "/*\n    pybind11/std_bind.h: Binding generators for STL data types\n\n    Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#pragma once\n\n#include \"detail/common.h\"\n#include \"operators.h\"\n\n#include <algorithm>\n#include <sstream>\n\nNAMESPACE_BEGIN(PYBIND11_NAMESPACE)\nNAMESPACE_BEGIN(detail)\n\n/* SFINAE helper class used by 'is_comparable */\ntemplate <typename T>  struct container_traits {\n    template <typename T2> static std::true_type test_comparable(decltype(std::declval<const T2 &>() == std::declval<const T2 &>())*);\n    template <typename T2> static std::false_type test_comparable(...);\n    template <typename T2> static std::true_type test_value(typename T2::value_type *);\n    template <typename T2> static std::false_type test_value(...);\n    template <typename T2> static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *);\n    template <typename T2> static std::false_type test_pair(...);\n\n    static constexpr const bool is_comparable = std::is_same<std::true_type, decltype(test_comparable<T>(nullptr))>::value;\n    static constexpr const bool is_pair = std::is_same<std::true_type, decltype(test_pair<T>(nullptr, nullptr))>::value;\n    static constexpr const bool is_vector = std::is_same<std::true_type, decltype(test_value<T>(nullptr))>::value;\n    static constexpr const bool is_element = !is_pair && !is_vector;\n};\n\n/* Default: is_comparable -> std::false_type */\ntemplate <typename T, typename SFINAE = void>\nstruct is_comparable : std::false_type { };\n\n/* For non-map data structures, check whether operator== can be instantiated */\ntemplate <typename T>\nstruct is_comparable<\n    T, enable_if_t<container_traits<T>::is_element &&\n                   container_traits<T>::is_comparable>>\n    : std::true_type { };\n\n/* For a vector/map data structure, recursively check the value type (which is std::pair for maps) */\ntemplate <typename T>\nstruct is_comparable<T, enable_if_t<container_traits<T>::is_vector>> {\n    static constexpr const bool value =\n        is_comparable<typename T::value_type>::value;\n};\n\n/* For pairs, recursively check the two data types */\ntemplate <typename T>\nstruct is_comparable<T, enable_if_t<container_traits<T>::is_pair>> {\n    static constexpr const bool value =\n        is_comparable<typename T::first_type>::value &&\n        is_comparable<typename T::second_type>::value;\n};\n\n/* Fallback functions */\ntemplate <typename, typename, typename... Args> void vector_if_copy_constructible(const Args &...) { }\ntemplate <typename, typename, typename... Args> void vector_if_equal_operator(const Args &...) { }\ntemplate <typename, typename, typename... Args> void vector_if_insertion_operator(const Args &...) { }\ntemplate <typename, typename, typename... Args> void vector_modifiers(const Args &...) { }\n\ntemplate<typename Vector, typename Class_>\nvoid vector_if_copy_constructible(enable_if_t<is_copy_constructible<Vector>::value, Class_> &cl) {\n    cl.def(init<const Vector &>(), \"Copy constructor\");\n}\n\ntemplate<typename Vector, typename Class_>\nvoid vector_if_equal_operator(enable_if_t<is_comparable<Vector>::value, Class_> &cl) {\n    using T = typename Vector::value_type;\n\n    cl.def(self == self);\n    cl.def(self != self);\n\n    cl.def(\"count\",\n        [](const Vector &v, const T &x) {\n            return std::count(v.begin(), v.end(), x);\n        },\n        arg(\"x\"),\n        \"Return the number of times ``x`` appears in the list\"\n    );\n\n    cl.def(\"remove\", [](Vector &v, const T &x) {\n            auto p = std::find(v.begin(), v.end(), x);\n            if (p != v.end())\n                v.erase(p);\n            else\n                throw value_error();\n        },\n        arg(\"x\"),\n        \"Remove the first item from the list whose value is x. \"\n        \"It is an error if there is no such item.\"\n    );\n\n    cl.def(\"__contains__\",\n        [](const Vector &v, const T &x) {\n            return std::find(v.begin(), v.end(), x) != v.end();\n        },\n        arg(\"x\"),\n        \"Return true the container contains ``x``\"\n    );\n}\n\n// Vector modifiers -- requires a copyable vector_type:\n// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it seems\n// silly to allow deletion but not insertion, so include them here too.)\ntemplate <typename Vector, typename Class_>\nvoid vector_modifiers(enable_if_t<is_copy_constructible<typename Vector::value_type>::value, Class_> &cl) {\n    using T = typename Vector::value_type;\n    using SizeType = typename Vector::size_type;\n    using DiffType = typename Vector::difference_type;\n\n    cl.def(\"append\",\n           [](Vector &v, const T &value) { v.push_back(value); },\n           arg(\"x\"),\n           \"Add an item to the end of the list\");\n\n    cl.def(init([](iterable it) {\n        auto v = std::unique_ptr<Vector>(new Vector());\n        v->reserve(len(it));\n        for (handle h : it)\n           v->push_back(h.cast<T>());\n        return v.release();\n    }));\n\n    cl.def(\"extend\",\n       [](Vector &v, const Vector &src) {\n           v.insert(v.end(), src.begin(), src.end());\n       },\n       arg(\"L\"),\n       \"Extend the list by appending all the items in the given list\"\n    );\n\n    cl.def(\"insert\",\n        [](Vector &v, SizeType i, const T &x) {\n            if (i > v.size())\n                throw index_error();\n            v.insert(v.begin() + (DiffType) i, x);\n        },\n        arg(\"i\") , arg(\"x\"),\n        \"Insert an item at a given position.\"\n    );\n\n    cl.def(\"pop\",\n        [](Vector &v) {\n            if (v.empty())\n                throw index_error();\n            T t = v.back();\n            v.pop_back();\n            return t;\n        },\n        \"Remove and return the last item\"\n    );\n\n    cl.def(\"pop\",\n        [](Vector &v, SizeType i) {\n            if (i >= v.size())\n                throw index_error();\n            T t = v[i];\n            v.erase(v.begin() + (DiffType) i);\n            return t;\n        },\n        arg(\"i\"),\n        \"Remove and return the item at index ``i``\"\n    );\n\n    cl.def(\"__setitem__\",\n        [](Vector &v, SizeType i, const T &t) {\n            if (i >= v.size())\n                throw index_error();\n            v[i] = t;\n        }\n    );\n\n    /// Slicing protocol\n    cl.def(\"__getitem__\",\n        [](const Vector &v, slice slice) -> Vector * {\n            size_t start, stop, step, slicelength;\n\n            if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))\n                throw error_already_set();\n\n            Vector *seq = new Vector();\n            seq->reserve((size_t) slicelength);\n\n            for (size_t i=0; i<slicelength; ++i) {\n                seq->push_back(v[start]);\n                start += step;\n            }\n            return seq;\n        },\n        arg(\"s\"),\n        \"Retrieve list elements using a slice object\"\n    );\n\n    cl.def(\"__setitem__\",\n        [](Vector &v, slice slice,  const Vector &value) {\n            size_t start, stop, step, slicelength;\n            if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))\n                throw error_already_set();\n\n            if (slicelength != value.size())\n                throw std::runtime_error(\"Left and right hand size of slice assignment have different sizes!\");\n\n            for (size_t i=0; i<slicelength; ++i) {\n                v[start] = value[i];\n                start += step;\n            }\n        },\n        \"Assign list elements using a slice object\"\n    );\n\n    cl.def(\"__delitem__\",\n        [](Vector &v, SizeType i) {\n            if (i >= v.size())\n                throw index_error();\n            v.erase(v.begin() + DiffType(i));\n        },\n        \"Delete the list elements at index ``i``\"\n    );\n\n    cl.def(\"__delitem__\",\n        [](Vector &v, slice slice) {\n            size_t start, stop, step, slicelength;\n\n            if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))\n                throw error_already_set();\n\n            if (step == 1 && false) {\n                v.erase(v.begin() + (DiffType) start, v.begin() + DiffType(start + slicelength));\n            } else {\n                for (size_t i = 0; i < slicelength; ++i) {\n                    v.erase(v.begin() + DiffType(start));\n                    start += step - 1;\n                }\n            }\n        },\n        \"Delete list elements using a slice object\"\n    );\n\n}\n\n// If the type has an operator[] that doesn't return a reference (most notably std::vector<bool>),\n// we have to access by copying; otherwise we return by reference.\ntemplate <typename Vector> using vector_needs_copy = negation<\n    std::is_same<decltype(std::declval<Vector>()[typename Vector::size_type()]), typename Vector::value_type &>>;\n\n// The usual case: access and iterate by reference\ntemplate <typename Vector, typename Class_>\nvoid vector_accessor(enable_if_t<!vector_needs_copy<Vector>::value, Class_> &cl) {\n    using T = typename Vector::value_type;\n    using SizeType = typename Vector::size_type;\n    using ItType   = typename Vector::iterator;\n\n    cl.def(\"__getitem__\",\n        [](Vector &v, SizeType i) -> T & {\n            if (i >= v.size())\n                throw index_error();\n            return v[i];\n        },\n        return_value_policy::reference_internal // ref + keepalive\n    );\n\n    cl.def(\"__iter__\",\n           [](Vector &v) {\n               return make_iterator<\n                   return_value_policy::reference_internal, ItType, ItType, T&>(\n                   v.begin(), v.end());\n           },\n           keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */\n    );\n}\n\n// The case for special objects, like std::vector<bool>, that have to be returned-by-copy:\ntemplate <typename Vector, typename Class_>\nvoid vector_accessor(enable_if_t<vector_needs_copy<Vector>::value, Class_> &cl) {\n    using T = typename Vector::value_type;\n    using SizeType = typename Vector::size_type;\n    using ItType   = typename Vector::iterator;\n    cl.def(\"__getitem__\",\n        [](const Vector &v, SizeType i) -> T {\n            if (i >= v.size())\n                throw index_error();\n            return v[i];\n        }\n    );\n\n    cl.def(\"__iter__\",\n           [](Vector &v) {\n               return make_iterator<\n                   return_value_policy::copy, ItType, ItType, T>(\n                   v.begin(), v.end());\n           },\n           keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */\n    );\n}\n\ntemplate <typename Vector, typename Class_> auto vector_if_insertion_operator(Class_ &cl, std::string const &name)\n    -> decltype(std::declval<std::ostream&>() << std::declval<typename Vector::value_type>(), void()) {\n    using size_type = typename Vector::size_type;\n\n    cl.def(\"__repr__\",\n           [name](Vector &v) {\n            std::ostringstream s;\n            s << name << '[';\n            for (size_type i=0; i < v.size(); ++i) {\n                s << v[i];\n                if (i != v.size() - 1)\n                    s << \", \";\n            }\n            s << ']';\n            return s.str();\n        },\n        \"Return the canonical string representation of this list.\"\n    );\n}\n\n// Provide the buffer interface for vectors if we have data() and we have a format for it\n// GCC seems to have \"void std::vector<bool>::data()\" - doing SFINAE on the existence of data() is insufficient, we need to check it returns an appropriate pointer\ntemplate <typename Vector, typename = void>\nstruct vector_has_data_and_format : std::false_type {};\ntemplate <typename Vector>\nstruct vector_has_data_and_format<Vector, enable_if_t<std::is_same<decltype(format_descriptor<typename Vector::value_type>::format(), std::declval<Vector>().data()), typename Vector::value_type*>::value>> : std::true_type {};\n\n// Add the buffer interface to a vector\ntemplate <typename Vector, typename Class_, typename... Args>\nenable_if_t<detail::any_of<std::is_same<Args, buffer_protocol>...>::value>\nvector_buffer(Class_& cl) {\n    using T = typename Vector::value_type;\n\n    static_assert(vector_has_data_and_format<Vector>::value, \"There is not an appropriate format descriptor for this vector\");\n\n    // numpy.h declares this for arbitrary types, but it may raise an exception and crash hard at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here\n    format_descriptor<T>::format();\n\n    cl.def_buffer([](Vector& v) -> buffer_info {\n        return buffer_info(v.data(), static_cast<ssize_t>(sizeof(T)), format_descriptor<T>::format(), 1, {v.size()}, {sizeof(T)});\n    });\n\n    cl.def(init([](buffer buf) {\n        auto info = buf.request();\n        if (info.ndim != 1 || info.strides[0] % static_cast<ssize_t>(sizeof(T)))\n            throw type_error(\"Only valid 1D buffers can be copied to a vector\");\n        if (!detail::compare_buffer_info<T>::compare(info) || (ssize_t) sizeof(T) != info.itemsize)\n            throw type_error(\"Format mismatch (Python: \" + info.format + \" C++: \" + format_descriptor<T>::format() + \")\");\n\n        auto vec = std::unique_ptr<Vector>(new Vector());\n        vec->reserve((size_t) info.shape[0]);\n        T *p = static_cast<T*>(info.ptr);\n        ssize_t step = info.strides[0] / static_cast<ssize_t>(sizeof(T));\n        T *end = p + info.shape[0] * step;\n        for (; p != end; p += step)\n            vec->push_back(*p);\n        return vec.release();\n    }));\n\n    return;\n}\n\ntemplate <typename Vector, typename Class_, typename... Args>\nenable_if_t<!detail::any_of<std::is_same<Args, buffer_protocol>...>::value> vector_buffer(Class_&) {}\n\nNAMESPACE_END(detail)\n\n//\n// std::vector\n//\ntemplate <typename Vector, typename holder_type = std::unique_ptr<Vector>, typename... Args>\nclass_<Vector, holder_type> bind_vector(handle scope, std::string const &name, Args&&... args) {\n    using Class_ = class_<Vector, holder_type>;\n\n    // If the value_type is unregistered (e.g. a converting type) or is itself registered\n    // module-local then make the vector binding module-local as well:\n    using vtype = typename Vector::value_type;\n    auto vtype_info = detail::get_type_info(typeid(vtype));\n    bool local = !vtype_info || vtype_info->module_local;\n\n    Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);\n\n    // Declare the buffer interface if a buffer_protocol() is passed in\n    detail::vector_buffer<Vector, Class_, Args...>(cl);\n\n    cl.def(init<>());\n\n    // Register copy constructor (if possible)\n    detail::vector_if_copy_constructible<Vector, Class_>(cl);\n\n    // Register comparison-related operators and functions (if possible)\n    detail::vector_if_equal_operator<Vector, Class_>(cl);\n\n    // Register stream insertion operator (if possible)\n    detail::vector_if_insertion_operator<Vector, Class_>(cl, name);\n\n    // Modifiers require copyable vector value type\n    detail::vector_modifiers<Vector, Class_>(cl);\n\n    // Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive\n    detail::vector_accessor<Vector, Class_>(cl);\n\n    cl.def(\"__bool__\",\n        [](const Vector &v) -> bool {\n            return !v.empty();\n        },\n        \"Check whether the list is nonempty\"\n    );\n\n    cl.def(\"__len__\", &Vector::size);\n\n\n\n\n#if 0\n    // C++ style functions deprecated, leaving it here as an example\n    cl.def(init<size_type>());\n\n    cl.def(\"resize\",\n         (void (Vector::*) (size_type count)) & Vector::resize,\n         \"changes the number of elements stored\");\n\n    cl.def(\"erase\",\n        [](Vector &v, SizeType i) {\n        if (i >= v.size())\n            throw index_error();\n        v.erase(v.begin() + i);\n    }, \"erases element at index ``i``\");\n\n    cl.def(\"empty\",         &Vector::empty,         \"checks whether the container is empty\");\n    cl.def(\"size\",          &Vector::size,          \"returns the number of elements\");\n    cl.def(\"push_back\", (void (Vector::*)(const T&)) &Vector::push_back, \"adds an element to the end\");\n    cl.def(\"pop_back\",                               &Vector::pop_back, \"removes the last element\");\n\n    cl.def(\"max_size\",      &Vector::max_size,      \"returns the maximum possible number of elements\");\n    cl.def(\"reserve\",       &Vector::reserve,       \"reserves storage\");\n    cl.def(\"capacity\",      &Vector::capacity,      \"returns the number of elements that can be held in currently allocated storage\");\n    cl.def(\"shrink_to_fit\", &Vector::shrink_to_fit, \"reduces memory usage by freeing unused memory\");\n\n    cl.def(\"clear\", &Vector::clear, \"clears the contents\");\n    cl.def(\"swap\",   &Vector::swap, \"swaps the contents\");\n\n    cl.def(\"front\", [](Vector &v) {\n        if (v.size()) return v.front();\n        else throw index_error();\n    }, \"access the first element\");\n\n    cl.def(\"back\", [](Vector &v) {\n        if (v.size()) return v.back();\n        else throw index_error();\n    }, \"access the last element \");\n\n#endif\n\n    return cl;\n}\n\n\n\n//\n// std::map, std::unordered_map\n//\n\nNAMESPACE_BEGIN(detail)\n\n/* Fallback functions */\ntemplate <typename, typename, typename... Args> void map_if_insertion_operator(const Args &...) { }\ntemplate <typename, typename, typename... Args> void map_assignment(const Args &...) { }\n\n// Map assignment when copy-assignable: just copy the value\ntemplate <typename Map, typename Class_>\nvoid map_assignment(enable_if_t<std::is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {\n    using KeyType = typename Map::key_type;\n    using MappedType = typename Map::mapped_type;\n\n    cl.def(\"__setitem__\",\n           [](Map &m, const KeyType &k, const MappedType &v) {\n               auto it = m.find(k);\n               if (it != m.end()) it->second = v;\n               else m.emplace(k, v);\n           }\n    );\n}\n\n// Not copy-assignable, but still copy-constructible: we can update the value by erasing and reinserting\ntemplate<typename Map, typename Class_>\nvoid map_assignment(enable_if_t<\n        !std::is_copy_assignable<typename Map::mapped_type>::value &&\n        is_copy_constructible<typename Map::mapped_type>::value,\n        Class_> &cl) {\n    using KeyType = typename Map::key_type;\n    using MappedType = typename Map::mapped_type;\n\n    cl.def(\"__setitem__\",\n           [](Map &m, const KeyType &k, const MappedType &v) {\n               // We can't use m[k] = v; because value type might not be default constructable\n               auto r = m.emplace(k, v);\n               if (!r.second) {\n                   // value type is not copy assignable so the only way to insert it is to erase it first...\n                   m.erase(r.first);\n                   m.emplace(k, v);\n               }\n           }\n    );\n}\n\n\ntemplate <typename Map, typename Class_> auto map_if_insertion_operator(Class_ &cl, std::string const &name)\n-> decltype(std::declval<std::ostream&>() << std::declval<typename Map::key_type>() << std::declval<typename Map::mapped_type>(), void()) {\n\n    cl.def(\"__repr__\",\n           [name](Map &m) {\n            std::ostringstream s;\n            s << name << '{';\n            bool f = false;\n            for (auto const &kv : m) {\n                if (f)\n                    s << \", \";\n                s << kv.first << \": \" << kv.second;\n                f = true;\n            }\n            s << '}';\n            return s.str();\n        },\n        \"Return the canonical string representation of this map.\"\n    );\n}\n\n\nNAMESPACE_END(detail)\n\ntemplate <typename Map, typename holder_type = std::unique_ptr<Map>, typename... Args>\nclass_<Map, holder_type> bind_map(handle scope, const std::string &name, Args&&... args) {\n    using KeyType = typename Map::key_type;\n    using MappedType = typename Map::mapped_type;\n    using Class_ = class_<Map, holder_type>;\n\n    // If either type is a non-module-local bound type then make the map binding non-local as well;\n    // otherwise (e.g. both types are either module-local or converting) the map will be\n    // module-local.\n    auto tinfo = detail::get_type_info(typeid(MappedType));\n    bool local = !tinfo || tinfo->module_local;\n    if (local) {\n        tinfo = detail::get_type_info(typeid(KeyType));\n        local = !tinfo || tinfo->module_local;\n    }\n\n    Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);\n\n    cl.def(init<>());\n\n    // Register stream insertion operator (if possible)\n    detail::map_if_insertion_operator<Map, Class_>(cl, name);\n\n    cl.def(\"__bool__\",\n        [](const Map &m) -> bool { return !m.empty(); },\n        \"Check whether the map is nonempty\"\n    );\n\n    cl.def(\"__iter__\",\n           [](Map &m) { return make_key_iterator(m.begin(), m.end()); },\n           keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */\n    );\n\n    cl.def(\"items\",\n           [](Map &m) { return make_iterator(m.begin(), m.end()); },\n           keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */\n    );\n\n    cl.def(\"__getitem__\",\n        [](Map &m, const KeyType &k) -> MappedType & {\n            auto it = m.find(k);\n            if (it == m.end())\n              throw key_error();\n           return it->second;\n        },\n        return_value_policy::reference_internal // ref + keepalive\n    );\n\n    // Assignment provided only if the type is copyable\n    detail::map_assignment<Map, Class_>(cl);\n\n    cl.def(\"__delitem__\",\n           [](Map &m, const KeyType &k) {\n               auto it = m.find(k);\n               if (it == m.end())\n                   throw key_error();\n               m.erase(it);\n           }\n    );\n\n    cl.def(\"__len__\", &Map::size);\n\n    return cl;\n}\n\nNAMESPACE_END(PYBIND11_NAMESPACE)\n"
  },
  {
    "path": "src/third_party/pybind11/pybind11/__init__.py",
    "content": "from ._version import version_info, __version__  # noqa: F401 imported but unused\n\n\ndef get_include(user=False):\n    from distutils.dist import Distribution\n    import os\n    import sys\n\n    # Are we running in a virtual environment?\n    virtualenv = hasattr(sys, 'real_prefix') or \\\n        sys.prefix != getattr(sys, \"base_prefix\", sys.prefix)\n\n    if virtualenv:\n        return os.path.join(sys.prefix, 'include', 'site',\n                            'python' + sys.version[:3])\n    else:\n        dist = Distribution({'name': 'pybind11'})\n        dist.parse_config_files()\n\n        dist_cobj = dist.get_command_obj('install', create=True)\n\n        # Search for packages in user's home directory?\n        if user:\n            dist_cobj.user = user\n            dist_cobj.prefix = \"\"\n        dist_cobj.finalize_options()\n\n        return os.path.dirname(dist_cobj.install_headers)\n"
  },
  {
    "path": "src/third_party/pybind11/pybind11/__main__.py",
    "content": "from __future__ import print_function\n\nimport argparse\nimport sys\nimport sysconfig\n\nfrom . import get_include\n\n\ndef print_includes():\n    dirs = [sysconfig.get_path('include'),\n            sysconfig.get_path('platinclude'),\n            get_include(),\n            get_include(True)]\n\n    # Make unique but preserve order\n    unique_dirs = []\n    for d in dirs:\n        if d not in unique_dirs:\n            unique_dirs.append(d)\n\n    print(' '.join('-I' + d for d in unique_dirs))\n\n\ndef main():\n    parser = argparse.ArgumentParser(prog='python -m pybind11')\n    parser.add_argument('--includes', action='store_true',\n                        help='Include flags for both pybind11 and Python headers.')\n    args = parser.parse_args()\n    if not sys.argv[1:]:\n        parser.print_help()\n    if args.includes:\n        print_includes()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "src/third_party/pybind11/pybind11/_version.py",
    "content": "version_info = (2, 3, 'dev0')\n__version__ = '.'.join(map(str, version_info))\n"
  },
  {
    "path": "src/third_party/pybind11/setup.cfg",
    "content": "[bdist_wheel]\nuniversal=1\n\n[flake8]\nmax-line-length = 99\nshow_source = True\nexclude = .git, __pycache__, build, dist, docs, tools, venv\nignore =\n    # required for pretty matrix formatting: multiple spaces after `,` and `[`\n    E201, E241, W504,\n    # camelcase 'cPickle' imported as lowercase 'pickle'\n    N813\n"
  },
  {
    "path": "src/third_party/pybind11/setup.py",
    "content": "#!/usr/bin/env python\n\n# Setup script for PyPI; use CMakeFile.txt to build extension modules\n\nfrom setuptools import setup\nfrom distutils.command.install_headers import install_headers\nfrom pybind11 import __version__\nimport os\n\n# Prevent installation of pybind11 headers by setting\n# PYBIND11_USE_CMAKE.\nif os.environ.get('PYBIND11_USE_CMAKE'):\n    headers = []\nelse:\n    headers = [\n        'include/pybind11/detail/class.h',\n        'include/pybind11/detail/common.h',\n        'include/pybind11/detail/descr.h',\n        'include/pybind11/detail/init.h',\n        'include/pybind11/detail/internals.h',\n        'include/pybind11/detail/typeid.h',\n        'include/pybind11/attr.h',\n        'include/pybind11/buffer_info.h',\n        'include/pybind11/cast.h',\n        'include/pybind11/chrono.h',\n        'include/pybind11/common.h',\n        'include/pybind11/complex.h',\n        'include/pybind11/eigen.h',\n        'include/pybind11/embed.h',\n        'include/pybind11/eval.h',\n        'include/pybind11/functional.h',\n        'include/pybind11/iostream.h',\n        'include/pybind11/numpy.h',\n        'include/pybind11/operators.h',\n        'include/pybind11/options.h',\n        'include/pybind11/pybind11.h',\n        'include/pybind11/pytypes.h',\n        'include/pybind11/stl.h',\n        'include/pybind11/stl_bind.h',\n    ]\n\n\nclass InstallHeaders(install_headers):\n    \"\"\"Use custom header installer because the default one flattens subdirectories\"\"\"\n    def run(self):\n        if not self.distribution.headers:\n            return\n\n        for header in self.distribution.headers:\n            subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11'))\n            install_dir = os.path.join(self.install_dir, subdir)\n            self.mkpath(install_dir)\n\n            (out, _) = self.copy_file(header, install_dir)\n            self.outfiles.append(out)\n\n\nsetup(\n    name='pybind11',\n    version=__version__,\n    description='Seamless operability between C++11 and Python',\n    author='Wenzel Jakob',\n    author_email='wenzel.jakob@epfl.ch',\n    url='https://github.com/pybind/pybind11',\n    download_url='https://github.com/pybind/pybind11/tarball/v' + __version__,\n    packages=['pybind11'],\n    license='BSD',\n    headers=headers,\n    cmdclass=dict(install_headers=InstallHeaders),\n    classifiers=[\n        'Development Status :: 5 - Production/Stable',\n        'Intended Audience :: Developers',\n        'Topic :: Software Development :: Libraries :: Python Modules',\n        'Topic :: Utilities',\n        'Programming Language :: C++',\n        'Programming Language :: Python :: 2.7',\n        'Programming Language :: Python :: 3',\n        'Programming Language :: Python :: 3.2',\n        'Programming Language :: Python :: 3.3',\n        'Programming Language :: Python :: 3.4',\n        'Programming Language :: Python :: 3.5',\n        'Programming Language :: Python :: 3.6',\n        'License :: OSI Approved :: BSD License'\n    ],\n    keywords='C++11, Python bindings',\n    long_description=\"\"\"pybind11 is a lightweight header-only library that\nexposes C++ types in Python and vice versa, mainly to create Python bindings of\nexisting C++ code. Its goals and syntax are similar to the excellent\nBoost.Python by David Abrahams: to minimize boilerplate code in traditional\nextension modules by inferring type information using compile-time\nintrospection.\n\nThe main issue with Boost.Python-and the reason for creating such a similar\nproject-is Boost. Boost is an enormously large and complex suite of utility\nlibraries that works with almost every C++ compiler in existence. This\ncompatibility has its cost: arcane template tricks and workarounds are\nnecessary to support the oldest and buggiest of compiler specimens. Now that\nC++11-compatible compilers are widely available, this heavy machinery has\nbecome an excessively large and unnecessary dependency.\n\nThink of this library as a tiny self-contained version of Boost.Python with\neverything stripped away that isn't relevant for binding generation. Without\ncomments, the core header files only require ~4K lines of code and depend on\nPython (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This\ncompact implementation was possible thanks to some of the new C++11 language\nfeatures (specifically: tuples, lambda functions and variadic templates). Since\nits creation, this library has grown beyond Boost.Python in many ways, leading\nto dramatically simpler binding code in many common situations.\"\"\")\n"
  },
  {
    "path": "src/third_party/pybind11/tests/CMakeLists.txt",
    "content": "# CMakeLists.txt -- Build system for the pybind11 test suite\n#\n# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\ncmake_minimum_required(VERSION 2.8.12)\n\noption(PYBIND11_WERROR  \"Report all warnings as errors\"  OFF)\n\nif (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n    # We're being loaded directly, i.e. not via add_subdirectory, so make this\n    # work as its own project and load the pybind11Config to get the tools we need\n    project(pybind11_tests CXX)\n\n    find_package(pybind11 REQUIRED CONFIG)\nendif()\n\nif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)\n  message(STATUS \"Setting tests build type to MinSizeRel as none was specified\")\n  set(CMAKE_BUILD_TYPE MinSizeRel CACHE STRING \"Choose the type of build.\" FORCE)\n  set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS \"Debug\" \"Release\"\n    \"MinSizeRel\" \"RelWithDebInfo\")\nendif()\n\n# Full set of test files (you can override these; see below)\nset(PYBIND11_TEST_FILES\n  test_buffers.cpp\n  test_builtin_casters.cpp\n  test_call_policies.cpp\n  test_callbacks.cpp\n  test_chrono.cpp\n  test_class.cpp\n  test_constants_and_functions.cpp\n  test_copy_move.cpp\n  test_docstring_options.cpp\n  test_eigen.cpp\n  test_enum.cpp\n  test_eval.cpp\n  test_exceptions.cpp\n  test_factory_constructors.cpp\n  test_gil_scoped.cpp\n  test_iostream.cpp\n  test_kwargs_and_defaults.cpp\n  test_local_bindings.cpp\n  test_methods_and_attributes.cpp\n  test_modules.cpp\n  test_multiple_inheritance.cpp\n  test_numpy_array.cpp\n  test_numpy_dtypes.cpp\n  test_numpy_vectorize.cpp\n  test_opaque_types.cpp\n  test_operator_overloading.cpp\n  test_pickling.cpp\n  test_pytypes.cpp\n  test_sequences_and_iterators.cpp\n  test_smart_ptr.cpp\n  test_stl.cpp\n  test_stl_binders.cpp\n  test_tagbased_polymorphic.cpp\n  test_virtual_functions.cpp\n)\n\n# Invoking cmake with something like:\n#     cmake -DPYBIND11_TEST_OVERRIDE=\"test_callbacks.cpp;test_picking.cpp\" ..\n# lets you override the tests that get compiled and run.  You can restore to all tests with:\n#     cmake -DPYBIND11_TEST_OVERRIDE= ..\nif (PYBIND11_TEST_OVERRIDE)\n  set(PYBIND11_TEST_FILES ${PYBIND11_TEST_OVERRIDE})\nendif()\n\nstring(REPLACE \".cpp\" \".py\" PYBIND11_PYTEST_FILES \"${PYBIND11_TEST_FILES}\")\n\n# Contains the set of test files that require pybind11_cross_module_tests to be\n# built; if none of these are built (i.e. because TEST_OVERRIDE is used and\n# doesn't include them) the second module doesn't get built.\nset(PYBIND11_CROSS_MODULE_TESTS\n  test_exceptions.py\n  test_local_bindings.py\n  test_stl.py\n  test_stl_binders.py\n)\n\n# Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but\n# keep it in PYBIND11_PYTEST_FILES, so that we get the \"eigen is not installed\"\n# skip message).\nlist(FIND PYBIND11_TEST_FILES test_eigen.cpp PYBIND11_TEST_FILES_EIGEN_I)\nif(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)\n  # Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake).\n  # Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also\n  # produces a fatal error if loaded from a pre-3.0 cmake.\n  if (NOT CMAKE_VERSION VERSION_LESS 3.0)\n    find_package(Eigen3 3.2.7 QUIET CONFIG)\n    if (EIGEN3_FOUND)\n      if (EIGEN3_VERSION_STRING AND NOT EIGEN3_VERSION_STRING VERSION_LESS 3.3.1)\n        set(PYBIND11_EIGEN_VIA_TARGET 1)\n      endif()\n    endif()\n  endif()\n  if (NOT EIGEN3_FOUND)\n    # Couldn't load via target, so fall back to allowing module mode finding, which will pick up\n    # tools/FindEigen3.cmake\n    find_package(Eigen3 3.2.7 QUIET)\n  endif()\n\n  if(EIGEN3_FOUND)\n    # Eigen 3.3.1+ cmake sets EIGEN3_VERSION_STRING (and hard codes the version when installed\n    # rather than looking it up in the cmake script); older versions, and the\n    # tools/FindEigen3.cmake, set EIGEN3_VERSION instead.\n    if(NOT EIGEN3_VERSION AND EIGEN3_VERSION_STRING)\n      set(EIGEN3_VERSION ${EIGEN3_VERSION_STRING})\n    endif()\n    message(STATUS \"Building tests with Eigen v${EIGEN3_VERSION}\")\n  else()\n    list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})\n    message(STATUS \"Building tests WITHOUT Eigen\")\n  endif()\nendif()\n\n# Optional dependency for some tests (boost::variant is only supported with version >= 1.56)\nfind_package(Boost 1.56)\n\n# Compile with compiler warnings turned on\nfunction(pybind11_enable_warnings target_name)\n  if(MSVC)\n    target_compile_options(${target_name} PRIVATE /W4)\n  else()\n      target_compile_options(${target_name} PRIVATE -Wall -Wextra -Wconversion -Wcast-qual -Wdeprecated)\n  endif()\n\n  if(PYBIND11_WERROR)\n    if(MSVC)\n      target_compile_options(${target_name} PRIVATE /WX)\n    else()\n      target_compile_options(${target_name} PRIVATE -Werror)\n    endif()\n  endif()\nendfunction()\n\nset(test_targets pybind11_tests)\n\n# Build pybind11_cross_module_tests if any test_whatever.py are being built that require it\nforeach(t ${PYBIND11_CROSS_MODULE_TESTS})\n  list(FIND PYBIND11_PYTEST_FILES ${t} i)\n  if (i GREATER -1)\n    list(APPEND test_targets pybind11_cross_module_tests)\n    break()\n  endif()\nendforeach()\n\nset(testdir ${CMAKE_CURRENT_SOURCE_DIR})\nforeach(target ${test_targets})\n  set(test_files ${PYBIND11_TEST_FILES})\n  if(NOT target STREQUAL \"pybind11_tests\")\n    set(test_files \"\")\n  endif()\n\n  # Create the binding library\n  pybind11_add_module(${target} THIN_LTO ${target}.cpp ${test_files} ${PYBIND11_HEADERS})\n  pybind11_enable_warnings(${target})\n\n  if(MSVC)\n    target_compile_options(${target} PRIVATE /utf-8)\n  endif()\n\n  if(EIGEN3_FOUND)\n    if (PYBIND11_EIGEN_VIA_TARGET)\n      target_link_libraries(${target} PRIVATE Eigen3::Eigen)\n    else()\n      target_include_directories(${target} PRIVATE ${EIGEN3_INCLUDE_DIR})\n    endif()\n    target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_EIGEN)\n  endif()\n\n  if(Boost_FOUND)\n    target_include_directories(${target} PRIVATE ${Boost_INCLUDE_DIRS})\n    target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_BOOST)\n  endif()\n\n  # Always write the output file directly into the 'tests' directory (even on MSVC)\n  if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY)\n    set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${testdir})\n    foreach(config ${CMAKE_CONFIGURATION_TYPES})\n      string(TOUPPER ${config} config)\n      set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config} ${testdir})\n    endforeach()\n  endif()\nendforeach()\n\n# Make sure pytest is found or produce a fatal error\nif(NOT PYBIND11_PYTEST_FOUND)\n  execute_process(COMMAND ${PYTHON_EXECUTABLE} -c \"import pytest; print(pytest.__version__)\"\n                  RESULT_VARIABLE pytest_not_found OUTPUT_VARIABLE pytest_version ERROR_QUIET)\n  if(pytest_not_found)\n    message(FATAL_ERROR \"Running the tests requires pytest. Please install it manually\"\n                        \" (try: ${PYTHON_EXECUTABLE} -m pip install pytest)\")\n  elseif(pytest_version VERSION_LESS 3.0)\n    message(FATAL_ERROR \"Running the tests requires pytest >= 3.0. Found: ${pytest_version}\"\n                        \"Please update it (try: ${PYTHON_EXECUTABLE} -m pip install -U pytest)\")\n  endif()\n  set(PYBIND11_PYTEST_FOUND TRUE CACHE INTERNAL \"\")\nendif()\n\nif(CMAKE_VERSION VERSION_LESS 3.2)\n  set(PYBIND11_USES_TERMINAL \"\")\nelse()\n  set(PYBIND11_USES_TERMINAL \"USES_TERMINAL\")\nendif()\n\n# A single command to compile and run the tests\nadd_custom_target(pytest COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYBIND11_PYTEST_FILES}\n                  DEPENDS ${test_targets} WORKING_DIRECTORY ${testdir} ${PYBIND11_USES_TERMINAL})\n\nif(PYBIND11_TEST_OVERRIDE)\n  add_custom_command(TARGET pytest POST_BUILD\n    COMMAND ${CMAKE_COMMAND} -E echo \"Note: not all tests run: -DPYBIND11_TEST_OVERRIDE is in effect\")\nendif()\n\n# Add a check target to run all the tests, starting with pytest (we add dependencies to this below)\nadd_custom_target(check DEPENDS pytest)\n\n# The remaining tests only apply when being built as part of the pybind11 project, but not if the\n# tests are being built independently.\nif (NOT PROJECT_NAME STREQUAL \"pybind11\")\n  return()\nendif()\n\n# Add a post-build comment to show the primary test suite .so size and, if a previous size, compare it:\nadd_custom_command(TARGET pybind11_tests POST_BUILD\n  COMMAND ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/tools/libsize.py\n  $<TARGET_FILE:pybind11_tests> ${CMAKE_CURRENT_BINARY_DIR}/sosize-$<TARGET_FILE_NAME:pybind11_tests>.txt)\n\n# Test embedding the interpreter. Provides the `cpptest` target.\nadd_subdirectory(test_embed)\n\n# Test CMake build using functions and targets from subdirectory or installed location\nadd_subdirectory(test_cmake_build)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/conftest.py",
    "content": "\"\"\"pytest configuration\n\nExtends output capture as needed by pybind11: ignore constructors, optional unordered lines.\nAdds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.\n\"\"\"\n\nimport pytest\nimport textwrap\nimport difflib\nimport re\nimport sys\nimport contextlib\nimport platform\nimport gc\n\n_unicode_marker = re.compile(r'u(\\'[^\\']*\\')')\n_long_marker = re.compile(r'([0-9])L')\n_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')\n\n\ndef _strip_and_dedent(s):\n    \"\"\"For triple-quote strings\"\"\"\n    return textwrap.dedent(s.lstrip('\\n').rstrip())\n\n\ndef _split_and_sort(s):\n    \"\"\"For output which does not require specific line order\"\"\"\n    return sorted(_strip_and_dedent(s).splitlines())\n\n\ndef _make_explanation(a, b):\n    \"\"\"Explanation for a failed assert -- the a and b arguments are List[str]\"\"\"\n    return [\"--- actual / +++ expected\"] + [line.strip('\\n') for line in difflib.ndiff(a, b)]\n\n\nclass Output(object):\n    \"\"\"Basic output post-processing and comparison\"\"\"\n    def __init__(self, string):\n        self.string = string\n        self.explanation = []\n\n    def __str__(self):\n        return self.string\n\n    def __eq__(self, other):\n        # Ignore constructor/destructor output which is prefixed with \"###\"\n        a = [line for line in self.string.strip().splitlines() if not line.startswith(\"###\")]\n        b = _strip_and_dedent(other).splitlines()\n        if a == b:\n            return True\n        else:\n            self.explanation = _make_explanation(a, b)\n            return False\n\n\nclass Unordered(Output):\n    \"\"\"Custom comparison for output without strict line ordering\"\"\"\n    def __eq__(self, other):\n        a = _split_and_sort(self.string)\n        b = _split_and_sort(other)\n        if a == b:\n            return True\n        else:\n            self.explanation = _make_explanation(a, b)\n            return False\n\n\nclass Capture(object):\n    def __init__(self, capfd):\n        self.capfd = capfd\n        self.out = \"\"\n        self.err = \"\"\n\n    def __enter__(self):\n        self.capfd.readouterr()\n        return self\n\n    def __exit__(self, *args):\n        self.out, self.err = self.capfd.readouterr()\n\n    def __eq__(self, other):\n        a = Output(self.out)\n        b = other\n        if a == b:\n            return True\n        else:\n            self.explanation = a.explanation\n            return False\n\n    def __str__(self):\n        return self.out\n\n    def __contains__(self, item):\n        return item in self.out\n\n    @property\n    def unordered(self):\n        return Unordered(self.out)\n\n    @property\n    def stderr(self):\n        return Output(self.err)\n\n\n@pytest.fixture\ndef capture(capsys):\n    \"\"\"Extended `capsys` with context manager and custom equality operators\"\"\"\n    return Capture(capsys)\n\n\nclass SanitizedString(object):\n    def __init__(self, sanitizer):\n        self.sanitizer = sanitizer\n        self.string = \"\"\n        self.explanation = []\n\n    def __call__(self, thing):\n        self.string = self.sanitizer(thing)\n        return self\n\n    def __eq__(self, other):\n        a = self.string\n        b = _strip_and_dedent(other)\n        if a == b:\n            return True\n        else:\n            self.explanation = _make_explanation(a.splitlines(), b.splitlines())\n            return False\n\n\ndef _sanitize_general(s):\n    s = s.strip()\n    s = s.replace(\"pybind11_tests.\", \"m.\")\n    s = s.replace(\"unicode\", \"str\")\n    s = _long_marker.sub(r\"\\1\", s)\n    s = _unicode_marker.sub(r\"\\1\", s)\n    return s\n\n\ndef _sanitize_docstring(thing):\n    s = thing.__doc__\n    s = _sanitize_general(s)\n    return s\n\n\n@pytest.fixture\ndef doc():\n    \"\"\"Sanitize docstrings and add custom failure explanation\"\"\"\n    return SanitizedString(_sanitize_docstring)\n\n\ndef _sanitize_message(thing):\n    s = str(thing)\n    s = _sanitize_general(s)\n    s = _hexadecimal.sub(\"0\", s)\n    return s\n\n\n@pytest.fixture\ndef msg():\n    \"\"\"Sanitize messages and add custom failure explanation\"\"\"\n    return SanitizedString(_sanitize_message)\n\n\n# noinspection PyUnusedLocal\ndef pytest_assertrepr_compare(op, left, right):\n    \"\"\"Hook to insert custom failure explanation\"\"\"\n    if hasattr(left, 'explanation'):\n        return left.explanation\n\n\n@contextlib.contextmanager\ndef suppress(exception):\n    \"\"\"Suppress the desired exception\"\"\"\n    try:\n        yield\n    except exception:\n        pass\n\n\ndef gc_collect():\n    ''' Run the garbage collector twice (needed when running\n    reference counting tests with PyPy) '''\n    gc.collect()\n    gc.collect()\n\n\ndef pytest_configure():\n    \"\"\"Add import suppression and test requirements to `pytest` namespace\"\"\"\n    try:\n        import numpy as np\n    except ImportError:\n        np = None\n    try:\n        import scipy\n    except ImportError:\n        scipy = None\n    try:\n        from pybind11_tests.eigen import have_eigen\n    except ImportError:\n        have_eigen = False\n    pypy = platform.python_implementation() == \"PyPy\"\n\n    skipif = pytest.mark.skipif\n    pytest.suppress = suppress\n    pytest.requires_numpy = skipif(not np, reason=\"numpy is not installed\")\n    pytest.requires_scipy = skipif(not np, reason=\"scipy is not installed\")\n    pytest.requires_eigen_and_numpy = skipif(not have_eigen or not np,\n                                             reason=\"eigen and/or numpy are not installed\")\n    pytest.requires_eigen_and_scipy = skipif(\n        not have_eigen or not scipy, reason=\"eigen and/or scipy are not installed\")\n    pytest.unsupported_on_pypy = skipif(pypy, reason=\"unsupported on PyPy\")\n    pytest.unsupported_on_py2 = skipif(sys.version_info.major < 3,\n                                       reason=\"unsupported on Python 2.x\")\n    pytest.gc_collect = gc_collect\n\n\ndef _test_import_pybind11():\n    \"\"\"Early diagnostic for test module initialization errors\n\n    When there is an error during initialization, the first import will report the\n    real error while all subsequent imports will report nonsense. This import test\n    is done early (in the pytest configuration file, before any tests) in order to\n    avoid the noise of having all tests fail with identical error messages.\n\n    Any possible exception is caught here and reported manually *without* the stack\n    trace. This further reduces noise since the trace would only show pytest internals\n    which are not useful for debugging pybind11 module issues.\n    \"\"\"\n    # noinspection PyBroadException\n    try:\n        import pybind11_tests  # noqa: F401 imported but unused\n    except Exception as e:\n        print(\"Failed to import pybind11_tests from pytest:\")\n        print(\"  {}: {}\".format(type(e).__name__, e))\n        sys.exit(1)\n\n\n_test_import_pybind11()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/constructor_stats.h",
    "content": "#pragma once\n/*\n    tests/constructor_stats.h -- framework for printing and tracking object\n    instance lifetimes in example/test code.\n\n    Copyright (c) 2016 Jason Rhinelander <jason@imaginary.ca>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n\nThis header provides a few useful tools for writing examples or tests that want to check and/or\ndisplay object instance lifetimes.  It requires that you include this header and add the following\nfunction calls to constructors:\n\n    class MyClass {\n        MyClass() { ...; print_default_created(this); }\n        ~MyClass() { ...; print_destroyed(this); }\n        MyClass(const MyClass &c) { ...; print_copy_created(this); }\n        MyClass(MyClass &&c) { ...; print_move_created(this); }\n        MyClass(int a, int b) { ...; print_created(this, a, b); }\n        MyClass &operator=(const MyClass &c) { ...; print_copy_assigned(this); }\n        MyClass &operator=(MyClass &&c) { ...; print_move_assigned(this); }\n\n        ...\n    }\n\nYou can find various examples of these in several of the existing testing .cpp files.  (Of course\nyou don't need to add any of the above constructors/operators that you don't actually have, except\nfor the destructor).\n\nEach of these will print an appropriate message such as:\n\n    ### MyClass @ 0x2801910 created via default constructor\n    ### MyClass @ 0x27fa780 created 100 200\n    ### MyClass @ 0x2801910 destroyed\n    ### MyClass @ 0x27fa780 destroyed\n\nYou can also include extra arguments (such as the 100, 200 in the output above, coming from the\nvalue constructor) for all of the above methods which will be included in the output.\n\nFor testing, each of these also keeps track the created instances and allows you to check how many\nof the various constructors have been invoked from the Python side via code such as:\n\n    from pybind11_tests import ConstructorStats\n    cstats = ConstructorStats.get(MyClass)\n    print(cstats.alive())\n    print(cstats.default_constructions)\n\nNote that `.alive()` should usually be the first thing you call as it invokes Python's garbage\ncollector to actually destroy objects that aren't yet referenced.\n\nFor everything except copy and move constructors and destructors, any extra values given to the\nprint_...() function is stored in a class-specific values list which you can retrieve and inspect\nfrom the ConstructorStats instance `.values()` method.\n\nIn some cases, when you need to track instances of a C++ class not registered with pybind11, you\nneed to add a function returning the ConstructorStats for the C++ class; this can be done with:\n\n    m.def(\"get_special_cstats\", &ConstructorStats::get<SpecialClass>, py::return_value_policy::reference)\n\nFinally, you can suppress the output messages, but keep the constructor tracking (for\ninspection/testing in python) by using the functions with `print_` replaced with `track_` (e.g.\n`track_copy_created(this)`).\n\n*/\n\n#include \"pybind11_tests.h\"\n#include <unordered_map>\n#include <list>\n#include <typeindex>\n#include <sstream>\n\nclass ConstructorStats {\nprotected:\n    std::unordered_map<void*, int> _instances; // Need a map rather than set because members can shared address with parents\n    std::list<std::string> _values; // Used to track values (e.g. of value constructors)\npublic:\n    int default_constructions = 0;\n    int copy_constructions = 0;\n    int move_constructions = 0;\n    int copy_assignments = 0;\n    int move_assignments = 0;\n\n    void copy_created(void *inst) {\n        created(inst);\n        copy_constructions++;\n    }\n\n    void move_created(void *inst) {\n        created(inst);\n        move_constructions++;\n    }\n\n    void default_created(void *inst) {\n        created(inst);\n        default_constructions++;\n    }\n\n    void created(void *inst) {\n        ++_instances[inst];\n    }\n\n    void destroyed(void *inst) {\n        if (--_instances[inst] < 0)\n            throw std::runtime_error(\"cstats.destroyed() called with unknown \"\n                                     \"instance; potential double-destruction \"\n                                     \"or a missing cstats.created()\");\n    }\n\n    static void gc() {\n        // Force garbage collection to ensure any pending destructors are invoked:\n#if defined(PYPY_VERSION)\n        PyObject *globals = PyEval_GetGlobals();\n        PyObject *result = PyRun_String(\n            \"import gc\\n\"\n            \"for i in range(2):\"\n            \"    gc.collect()\\n\",\n            Py_file_input, globals, globals);\n        if (result == nullptr)\n            throw py::error_already_set();\n        Py_DECREF(result);\n#else\n        py::module::import(\"gc\").attr(\"collect\")();\n#endif\n    }\n\n    int alive() {\n        gc();\n        int total = 0;\n        for (const auto &p : _instances)\n            if (p.second > 0)\n                total += p.second;\n        return total;\n    }\n\n    void value() {} // Recursion terminator\n    // Takes one or more values, converts them to strings, then stores them.\n    template <typename T, typename... Tmore> void value(const T &v, Tmore &&...args) {\n        std::ostringstream oss;\n        oss << v;\n        _values.push_back(oss.str());\n        value(std::forward<Tmore>(args)...);\n    }\n\n    // Move out stored values\n    py::list values() {\n        py::list l;\n        for (const auto &v : _values) l.append(py::cast(v));\n        _values.clear();\n        return l;\n    }\n\n    // Gets constructor stats from a C++ type index\n    static ConstructorStats& get(std::type_index type) {\n        static std::unordered_map<std::type_index, ConstructorStats> all_cstats;\n        return all_cstats[type];\n    }\n\n    // Gets constructor stats from a C++ type\n    template <typename T> static ConstructorStats& get() {\n#if defined(PYPY_VERSION)\n        gc();\n#endif\n        return get(typeid(T));\n    }\n\n    // Gets constructor stats from a Python class\n    static ConstructorStats& get(py::object class_) {\n        auto &internals = py::detail::get_internals();\n        const std::type_index *t1 = nullptr, *t2 = nullptr;\n        try {\n            auto *type_info = internals.registered_types_py.at((PyTypeObject *) class_.ptr()).at(0);\n            for (auto &p : internals.registered_types_cpp) {\n                if (p.second == type_info) {\n                    if (t1) {\n                        t2 = &p.first;\n                        break;\n                    }\n                    t1 = &p.first;\n                }\n            }\n        }\n        catch (const std::out_of_range &) {}\n        if (!t1) throw std::runtime_error(\"Unknown class passed to ConstructorStats::get()\");\n        auto &cs1 = get(*t1);\n        // If we have both a t1 and t2 match, one is probably the trampoline class; return whichever\n        // has more constructions (typically one or the other will be 0)\n        if (t2) {\n            auto &cs2 = get(*t2);\n            int cs1_total = cs1.default_constructions + cs1.copy_constructions + cs1.move_constructions + (int) cs1._values.size();\n            int cs2_total = cs2.default_constructions + cs2.copy_constructions + cs2.move_constructions + (int) cs2._values.size();\n            if (cs2_total > cs1_total) return cs2;\n        }\n        return cs1;\n    }\n};\n\n// To track construction/destruction, you need to call these methods from the various\n// constructors/operators.  The ones that take extra values record the given values in the\n// constructor stats values for later inspection.\ntemplate <class T> void track_copy_created(T *inst) { ConstructorStats::get<T>().copy_created(inst); }\ntemplate <class T> void track_move_created(T *inst) { ConstructorStats::get<T>().move_created(inst); }\ntemplate <class T, typename... Values> void track_copy_assigned(T *, Values &&...values) {\n    auto &cst = ConstructorStats::get<T>();\n    cst.copy_assignments++;\n    cst.value(std::forward<Values>(values)...);\n}\ntemplate <class T, typename... Values> void track_move_assigned(T *, Values &&...values) {\n    auto &cst = ConstructorStats::get<T>();\n    cst.move_assignments++;\n    cst.value(std::forward<Values>(values)...);\n}\ntemplate <class T, typename... Values> void track_default_created(T *inst, Values &&...values) {\n    auto &cst = ConstructorStats::get<T>();\n    cst.default_created(inst);\n    cst.value(std::forward<Values>(values)...);\n}\ntemplate <class T, typename... Values> void track_created(T *inst, Values &&...values) {\n    auto &cst = ConstructorStats::get<T>();\n    cst.created(inst);\n    cst.value(std::forward<Values>(values)...);\n}\ntemplate <class T, typename... Values> void track_destroyed(T *inst) {\n    ConstructorStats::get<T>().destroyed(inst);\n}\ntemplate <class T, typename... Values> void track_values(T *, Values &&...values) {\n    ConstructorStats::get<T>().value(std::forward<Values>(values)...);\n}\n\n/// Don't cast pointers to Python, print them as strings\ninline const char *format_ptrs(const char *p) { return p; }\ntemplate <typename T>\npy::str format_ptrs(T *p) { return \"{:#x}\"_s.format(reinterpret_cast<std::uintptr_t>(p)); }\ntemplate <typename T>\nauto format_ptrs(T &&x) -> decltype(std::forward<T>(x)) { return std::forward<T>(x); }\n\ntemplate <class T, typename... Output>\nvoid print_constr_details(T *inst, const std::string &action, Output &&...output) {\n    py::print(\"###\", py::type_id<T>(), \"@\", format_ptrs(inst), action,\n              format_ptrs(std::forward<Output>(output))...);\n}\n\n// Verbose versions of the above:\ntemplate <class T, typename... Values> void print_copy_created(T *inst, Values &&...values) { // NB: this prints, but doesn't store, given values\n    print_constr_details(inst, \"created via copy constructor\", values...);\n    track_copy_created(inst);\n}\ntemplate <class T, typename... Values> void print_move_created(T *inst, Values &&...values) { // NB: this prints, but doesn't store, given values\n    print_constr_details(inst, \"created via move constructor\", values...);\n    track_move_created(inst);\n}\ntemplate <class T, typename... Values> void print_copy_assigned(T *inst, Values &&...values) {\n    print_constr_details(inst, \"assigned via copy assignment\", values...);\n    track_copy_assigned(inst, values...);\n}\ntemplate <class T, typename... Values> void print_move_assigned(T *inst, Values &&...values) {\n    print_constr_details(inst, \"assigned via move assignment\", values...);\n    track_move_assigned(inst, values...);\n}\ntemplate <class T, typename... Values> void print_default_created(T *inst, Values &&...values) {\n    print_constr_details(inst, \"created via default constructor\", values...);\n    track_default_created(inst, values...);\n}\ntemplate <class T, typename... Values> void print_created(T *inst, Values &&...values) {\n    print_constr_details(inst, \"created\", values...);\n    track_created(inst, values...);\n}\ntemplate <class T, typename... Values> void print_destroyed(T *inst, Values &&...values) { // Prints but doesn't store given values\n    print_constr_details(inst, \"destroyed\", values...);\n    track_destroyed(inst);\n}\ntemplate <class T, typename... Values> void print_values(T *inst, Values &&...values) {\n    print_constr_details(inst, \":\", values...);\n    track_values(inst, values...);\n}\n\n"
  },
  {
    "path": "src/third_party/pybind11/tests/local_bindings.h",
    "content": "#pragma once\n#include \"pybind11_tests.h\"\n\n/// Simple class used to test py::local:\ntemplate <int> class LocalBase {\npublic:\n    LocalBase(int i) : i(i) { }\n    int i = -1;\n};\n\n/// Registered with py::module_local in both main and secondary modules:\nusing LocalType = LocalBase<0>;\n/// Registered without py::module_local in both modules:\nusing NonLocalType = LocalBase<1>;\n/// A second non-local type (for stl_bind tests):\nusing NonLocal2 = LocalBase<2>;\n/// Tests within-module, different-compilation-unit local definition conflict:\nusing LocalExternal = LocalBase<3>;\n/// Mixed: registered local first, then global\nusing MixedLocalGlobal = LocalBase<4>;\n/// Mixed: global first, then local\nusing MixedGlobalLocal = LocalBase<5>;\n\n/// Registered with py::module_local only in the secondary module:\nusing ExternalType1 = LocalBase<6>;\nusing ExternalType2 = LocalBase<7>;\n\nusing LocalVec = std::vector<LocalType>;\nusing LocalVec2 = std::vector<NonLocal2>;\nusing LocalMap = std::unordered_map<std::string, LocalType>;\nusing NonLocalVec = std::vector<NonLocalType>;\nusing NonLocalVec2 = std::vector<NonLocal2>;\nusing NonLocalMap = std::unordered_map<std::string, NonLocalType>;\nusing NonLocalMap2 = std::unordered_map<std::string, uint8_t>;\n\nPYBIND11_MAKE_OPAQUE(LocalVec);\nPYBIND11_MAKE_OPAQUE(LocalVec2);\nPYBIND11_MAKE_OPAQUE(LocalMap);\nPYBIND11_MAKE_OPAQUE(NonLocalVec);\n//PYBIND11_MAKE_OPAQUE(NonLocalVec2); // same type as LocalVec2\nPYBIND11_MAKE_OPAQUE(NonLocalMap);\nPYBIND11_MAKE_OPAQUE(NonLocalMap2);\n\n\n// Simple bindings (used with the above):\ntemplate <typename T, int Adjust = 0, typename... Args>\npy::class_<T> bind_local(Args && ...args) {\n    return py::class_<T>(std::forward<Args>(args)...)\n        .def(py::init<int>())\n        .def(\"get\", [](T &i) { return i.i + Adjust; });\n};\n\n// Simulate a foreign library base class (to match the example in the docs):\nnamespace pets {\nclass Pet {\npublic:\n    Pet(std::string name) : name_(name) {}\n    std::string name_;\n    const std::string &name() { return name_; }\n};\n}\n\nstruct MixGL { int i; MixGL(int i) : i{i} {} };\nstruct MixGL2 { int i; MixGL2(int i) : i{i} {} };\n"
  },
  {
    "path": "src/third_party/pybind11/tests/object.h",
    "content": "#if !defined(__OBJECT_H)\n#define __OBJECT_H\n\n#include <atomic>\n#include \"constructor_stats.h\"\n\n/// Reference counted object base class\nclass Object {\npublic:\n    /// Default constructor\n    Object() { print_default_created(this); }\n\n    /// Copy constructor\n    Object(const Object &) : m_refCount(0) { print_copy_created(this); }\n\n    /// Return the current reference count\n    int getRefCount() const { return m_refCount; };\n\n    /// Increase the object's reference count by one\n    void incRef() const { ++m_refCount; }\n\n    /** \\brief Decrease the reference count of\n     * the object and possibly deallocate it.\n     *\n     * The object will automatically be deallocated once\n     * the reference count reaches zero.\n     */\n    void decRef(bool dealloc = true) const {\n        --m_refCount;\n        if (m_refCount == 0 && dealloc)\n            delete this;\n        else if (m_refCount < 0)\n            throw std::runtime_error(\"Internal error: reference count < 0!\");\n    }\n\n    virtual std::string toString() const = 0;\nprotected:\n    /** \\brief Virtual protected deconstructor.\n     * (Will only be called by \\ref ref)\n     */\n    virtual ~Object() { print_destroyed(this); }\nprivate:\n    mutable std::atomic<int> m_refCount { 0 };\n};\n\n// Tag class used to track constructions of ref objects.  When we track constructors, below, we\n// track and print out the actual class (e.g. ref<MyObject>), and *also* add a fake tracker for\n// ref_tag.  This lets us check that the total number of ref<Anything> constructors/destructors is\n// correct without having to check each individual ref<Whatever> type individually.\nclass ref_tag {};\n\n/**\n * \\brief Reference counting helper\n *\n * The \\a ref refeference template is a simple wrapper to store a\n * pointer to an object. It takes care of increasing and decreasing\n * the reference count of the object. When the last reference goes\n * out of scope, the associated object will be deallocated.\n *\n * \\ingroup libcore\n */\ntemplate <typename T> class ref {\npublic:\n    /// Create a nullptr reference\n    ref() : m_ptr(nullptr) { print_default_created(this); track_default_created((ref_tag*) this); }\n\n    /// Construct a reference from a pointer\n    ref(T *ptr) : m_ptr(ptr) {\n        if (m_ptr) ((Object *) m_ptr)->incRef();\n\n        print_created(this, \"from pointer\", m_ptr); track_created((ref_tag*) this, \"from pointer\");\n\n    }\n\n    /// Copy constructor\n    ref(const ref &r) : m_ptr(r.m_ptr) {\n        if (m_ptr)\n            ((Object *) m_ptr)->incRef();\n\n        print_copy_created(this, \"with pointer\", m_ptr); track_copy_created((ref_tag*) this);\n    }\n\n    /// Move constructor\n    ref(ref &&r) : m_ptr(r.m_ptr) {\n        r.m_ptr = nullptr;\n\n        print_move_created(this, \"with pointer\", m_ptr); track_move_created((ref_tag*) this);\n    }\n\n    /// Destroy this reference\n    ~ref() {\n        if (m_ptr)\n            ((Object *) m_ptr)->decRef();\n\n        print_destroyed(this); track_destroyed((ref_tag*) this);\n    }\n\n    /// Move another reference into the current one\n    ref& operator=(ref&& r) {\n        print_move_assigned(this, \"pointer\", r.m_ptr); track_move_assigned((ref_tag*) this);\n\n        if (*this == r)\n            return *this;\n        if (m_ptr)\n            ((Object *) m_ptr)->decRef();\n        m_ptr = r.m_ptr;\n        r.m_ptr = nullptr;\n        return *this;\n    }\n\n    /// Overwrite this reference with another reference\n    ref& operator=(const ref& r) {\n        print_copy_assigned(this, \"pointer\", r.m_ptr); track_copy_assigned((ref_tag*) this);\n\n        if (m_ptr == r.m_ptr)\n            return *this;\n        if (m_ptr)\n            ((Object *) m_ptr)->decRef();\n        m_ptr = r.m_ptr;\n        if (m_ptr)\n            ((Object *) m_ptr)->incRef();\n        return *this;\n    }\n\n    /// Overwrite this reference with a pointer to another object\n    ref& operator=(T *ptr) {\n        print_values(this, \"assigned pointer\"); track_values((ref_tag*) this, \"assigned pointer\");\n\n        if (m_ptr == ptr)\n            return *this;\n        if (m_ptr)\n            ((Object *) m_ptr)->decRef();\n        m_ptr = ptr;\n        if (m_ptr)\n            ((Object *) m_ptr)->incRef();\n        return *this;\n    }\n\n    /// Compare this reference with another reference\n    bool operator==(const ref &r) const { return m_ptr == r.m_ptr; }\n\n    /// Compare this reference with another reference\n    bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; }\n\n    /// Compare this reference with a pointer\n    bool operator==(const T* ptr) const { return m_ptr == ptr; }\n\n    /// Compare this reference with a pointer\n    bool operator!=(const T* ptr) const { return m_ptr != ptr; }\n\n    /// Access the object referenced by this reference\n    T* operator->() { return m_ptr; }\n\n    /// Access the object referenced by this reference\n    const T* operator->() const { return m_ptr; }\n\n    /// Return a C++ reference to the referenced object\n    T& operator*() { return *m_ptr; }\n\n    /// Return a const C++ reference to the referenced object\n    const T& operator*() const { return *m_ptr; }\n\n    /// Return a pointer to the referenced object\n    operator T* () { return m_ptr; }\n\n    /// Return a const pointer to the referenced object\n    T* get_ptr() { return m_ptr; }\n\n    /// Return a pointer to the referenced object\n    const T* get_ptr() const { return m_ptr; }\nprivate:\n    T *m_ptr;\n};\n\n#endif /* __OBJECT_H */\n"
  },
  {
    "path": "src/third_party/pybind11/tests/pybind11_cross_module_tests.cpp",
    "content": "/*\n    tests/pybind11_cross_module_tests.cpp -- contains tests that require multiple modules\n\n    Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"local_bindings.h\"\n#include <pybind11/stl_bind.h>\n#include <numeric>\n\nPYBIND11_MODULE(pybind11_cross_module_tests, m) {\n    m.doc() = \"pybind11 cross-module test module\";\n\n    // test_local_bindings.py tests:\n    //\n    // Definitions here are tested by importing both this module and the\n    // relevant pybind11_tests submodule from a test_whatever.py\n\n    // test_load_external\n    bind_local<ExternalType1>(m, \"ExternalType1\", py::module_local());\n    bind_local<ExternalType2>(m, \"ExternalType2\", py::module_local());\n\n    // test_exceptions.py\n    m.def(\"raise_runtime_error\", []() { PyErr_SetString(PyExc_RuntimeError, \"My runtime error\"); throw py::error_already_set(); });\n    m.def(\"raise_value_error\", []() { PyErr_SetString(PyExc_ValueError, \"My value error\"); throw py::error_already_set(); });\n    m.def(\"throw_pybind_value_error\", []() { throw py::value_error(\"pybind11 value error\"); });\n    m.def(\"throw_pybind_type_error\", []() { throw py::type_error(\"pybind11 type error\"); });\n    m.def(\"throw_stop_iteration\", []() { throw py::stop_iteration(); });\n\n    // test_local_bindings.py\n    // Local to both:\n    bind_local<LocalType, 1>(m, \"LocalType\", py::module_local())\n        .def(\"get2\", [](LocalType &t) { return t.i + 2; })\n        ;\n\n    // Can only be called with our python type:\n    m.def(\"local_value\", [](LocalType &l) { return l.i; });\n\n    // test_nonlocal_failure\n    // This registration will fail (global registration when LocalFail is already registered\n    // globally in the main test module):\n    m.def(\"register_nonlocal\", [m]() {\n        bind_local<NonLocalType, 0>(m, \"NonLocalType\");\n    });\n\n    // test_stl_bind_local\n    // stl_bind.h binders defaults to py::module_local if the types are local or converting:\n    py::bind_vector<LocalVec>(m, \"LocalVec\");\n    py::bind_map<LocalMap>(m, \"LocalMap\");\n\n    // test_stl_bind_global\n    // and global if the type (or one of the types, for the map) is global (so these will fail,\n    // assuming pybind11_tests is already loaded):\n    m.def(\"register_nonlocal_vec\", [m]() {\n        py::bind_vector<NonLocalVec>(m, \"NonLocalVec\");\n    });\n    m.def(\"register_nonlocal_map\", [m]() {\n        py::bind_map<NonLocalMap>(m, \"NonLocalMap\");\n    });\n    // The default can, however, be overridden to global using `py::module_local()` or\n    // `py::module_local(false)`.\n    // Explicitly made local:\n    py::bind_vector<NonLocalVec2>(m, \"NonLocalVec2\", py::module_local());\n    // Explicitly made global (and so will fail to bind):\n    m.def(\"register_nonlocal_map2\", [m]() {\n        py::bind_map<NonLocalMap2>(m, \"NonLocalMap2\", py::module_local(false));\n    });\n\n    // test_mixed_local_global\n    // We try this both with the global type registered first and vice versa (the order shouldn't\n    // matter).\n    m.def(\"register_mixed_global_local\", [m]() {\n        bind_local<MixedGlobalLocal, 200>(m, \"MixedGlobalLocal\", py::module_local());\n    });\n    m.def(\"register_mixed_local_global\", [m]() {\n        bind_local<MixedLocalGlobal, 2000>(m, \"MixedLocalGlobal\", py::module_local(false));\n    });\n    m.def(\"get_mixed_gl\", [](int i) { return MixedGlobalLocal(i); });\n    m.def(\"get_mixed_lg\", [](int i) { return MixedLocalGlobal(i); });\n\n    // test_internal_locals_differ\n    m.def(\"local_cpp_types_addr\", []() { return (uintptr_t) &py::detail::registered_local_types_cpp(); });\n\n    // test_stl_caster_vs_stl_bind\n    py::bind_vector<std::vector<int>>(m, \"VectorInt\");\n\n    m.def(\"load_vector_via_binding\", [](std::vector<int> &v) {\n        return std::accumulate(v.begin(), v.end(), 0);\n    });\n\n    // test_cross_module_calls\n    m.def(\"return_self\", [](LocalVec *v) { return v; });\n    m.def(\"return_copy\", [](const LocalVec &v) { return LocalVec(v); });\n\n    class Dog : public pets::Pet { public: Dog(std::string name) : Pet(name) {}; };\n    py::class_<pets::Pet>(m, \"Pet\", py::module_local())\n        .def(\"name\", &pets::Pet::name);\n    // Binding for local extending class:\n    py::class_<Dog, pets::Pet>(m, \"Dog\")\n        .def(py::init<std::string>());\n    m.def(\"pet_name\", [](pets::Pet &p) { return p.name(); });\n\n    py::class_<MixGL>(m, \"MixGL\", py::module_local()).def(py::init<int>());\n    m.def(\"get_gl_value\", [](MixGL &o) { return o.i + 100; });\n\n    py::class_<MixGL2>(m, \"MixGL2\", py::module_local()).def(py::init<int>());\n\n    // test_vector_bool\n    // We can't test both stl.h and stl_bind.h conversions of `std::vector<bool>` within\n    // the same module (it would be an ODR violation). Therefore `bind_vector` of `bool`\n    // is defined here and tested in `test_stl_binders.py`.\n    py::bind_vector<std::vector<bool>>(m, \"VectorBool\");\n\n    // test_missing_header_message\n    // The main module already includes stl.h, but we need to test the error message\n    // which appears when this header is missing.\n    m.def(\"missing_header_arg\", [](std::vector<float>) { });\n    m.def(\"missing_header_return\", []() { return std::vector<float>(); });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/pybind11_tests.cpp",
    "content": "/*\n    tests/pybind11_tests.cpp -- pybind example plugin\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n\n#include <functional>\n#include <list>\n\n/*\nFor testing purposes, we define a static global variable here in a function that each individual\ntest .cpp calls with its initialization lambda.  It's convenient here because we can just not\ncompile some test files to disable/ignore some of the test code.\n\nIt is NOT recommended as a way to use pybind11 in practice, however: the initialization order will\nbe essentially random, which is okay for our test scripts (there are no dependencies between the\nindividual pybind11 test .cpp files), but most likely not what you want when using pybind11\nproductively.\n\nInstead, see the \"How can I reduce the build time?\" question in the \"Frequently asked questions\"\nsection of the documentation for good practice on splitting binding code over multiple files.\n*/\nstd::list<std::function<void(py::module &)>> &initializers() {\n    static std::list<std::function<void(py::module &)>> inits;\n    return inits;\n}\n\ntest_initializer::test_initializer(Initializer init) {\n    initializers().push_back(init);\n}\n\ntest_initializer::test_initializer(const char *submodule_name, Initializer init) {\n    initializers().push_back([=](py::module &parent) {\n        auto m = parent.def_submodule(submodule_name);\n        init(m);\n    });\n}\n\nvoid bind_ConstructorStats(py::module &m) {\n    py::class_<ConstructorStats>(m, \"ConstructorStats\")\n        .def(\"alive\", &ConstructorStats::alive)\n        .def(\"values\", &ConstructorStats::values)\n        .def_readwrite(\"default_constructions\", &ConstructorStats::default_constructions)\n        .def_readwrite(\"copy_assignments\", &ConstructorStats::copy_assignments)\n        .def_readwrite(\"move_assignments\", &ConstructorStats::move_assignments)\n        .def_readwrite(\"copy_constructions\", &ConstructorStats::copy_constructions)\n        .def_readwrite(\"move_constructions\", &ConstructorStats::move_constructions)\n        .def_static(\"get\", (ConstructorStats &(*)(py::object)) &ConstructorStats::get, py::return_value_policy::reference_internal)\n\n        // Not exactly ConstructorStats, but related: expose the internal pybind number of registered instances\n        // to allow instance cleanup checks (invokes a GC first)\n        .def_static(\"detail_reg_inst\", []() {\n            ConstructorStats::gc();\n            return py::detail::get_internals().registered_instances.size();\n        })\n        ;\n}\n\nPYBIND11_MODULE(pybind11_tests, m) {\n    m.doc() = \"pybind11 test module\";\n\n    bind_ConstructorStats(m);\n\n#if !defined(NDEBUG)\n    m.attr(\"debug_enabled\") = true;\n#else\n    m.attr(\"debug_enabled\") = false;\n#endif\n\n    py::class_<UserType>(m, \"UserType\", \"A `py::class_` type for testing\")\n        .def(py::init<>())\n        .def(py::init<int>())\n        .def(\"get_value\", &UserType::value, \"Get value using a method\")\n        .def(\"set_value\", &UserType::set, \"Set value using a method\")\n        .def_property(\"value\", &UserType::value, &UserType::set, \"Get/set value using a property\")\n        .def(\"__repr__\", [](const UserType& u) { return \"UserType({})\"_s.format(u.value()); });\n\n    py::class_<IncType, UserType>(m, \"IncType\")\n        .def(py::init<>())\n        .def(py::init<int>())\n        .def(\"__repr__\", [](const IncType& u) { return \"IncType({})\"_s.format(u.value()); });\n\n    for (const auto &initializer : initializers())\n        initializer(m);\n\n    if (!py::hasattr(m, \"have_eigen\")) m.attr(\"have_eigen\") = false;\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/pybind11_tests.h",
    "content": "#pragma once\n#include <pybind11/pybind11.h>\n\n#if defined(_MSC_VER) && _MSC_VER < 1910\n// We get some really long type names here which causes MSVC 2015 to emit warnings\n#  pragma warning(disable: 4503) // warning C4503: decorated name length exceeded, name was truncated\n#endif\n\nnamespace py = pybind11;\nusing namespace pybind11::literals;\n\nclass test_initializer {\n    using Initializer = void (*)(py::module &);\n\npublic:\n    test_initializer(Initializer init);\n    test_initializer(const char *submodule_name, Initializer init);\n};\n\n#define TEST_SUBMODULE(name, variable)                   \\\n    void test_submodule_##name(py::module &);            \\\n    test_initializer name(#name, test_submodule_##name); \\\n    void test_submodule_##name(py::module &variable)\n\n\n/// Dummy type which is not exported anywhere -- something to trigger a conversion error\nstruct UnregisteredType { };\n\n/// A user-defined type which is exported and can be used by any test\nclass UserType {\npublic:\n    UserType() = default;\n    UserType(int i) : i(i) { }\n\n    int value() const { return i; }\n    void set(int set) { i = set; }\n\nprivate:\n    int i = -1;\n};\n\n/// Like UserType, but increments `value` on copy for quick reference vs. copy tests\nclass IncType : public UserType {\npublic:\n    using UserType::UserType;\n    IncType() = default;\n    IncType(const IncType &other) : IncType(other.value() + 1) { }\n    IncType(IncType &&) = delete;\n    IncType &operator=(const IncType &) = delete;\n    IncType &operator=(IncType &&) = delete;\n};\n\n/// Custom cast-only type that casts to a string \"rvalue\" or \"lvalue\" depending on the cast context.\n/// Used to test recursive casters (e.g. std::tuple, stl containers).\nstruct RValueCaster {};\nNAMESPACE_BEGIN(pybind11)\nNAMESPACE_BEGIN(detail)\ntemplate<> class type_caster<RValueCaster> {\npublic:\n    PYBIND11_TYPE_CASTER(RValueCaster, _(\"RValueCaster\"));\n    static handle cast(RValueCaster &&, return_value_policy, handle) { return py::str(\"rvalue\").release(); }\n    static handle cast(const RValueCaster &, return_value_policy, handle) { return py::str(\"lvalue\").release(); }\n};\nNAMESPACE_END(detail)\nNAMESPACE_END(pybind11)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/pytest.ini",
    "content": "[pytest]\nminversion = 3.0\nnorecursedirs = test_cmake_build test_embed\naddopts =\n    # show summary of skipped tests\n    -rs\n    # capture only Python print and C++ py::print, but not C output (low-level Python errors)\n    --capture=sys\nfilterwarnings =\n    # make warnings into errors but ignore certain third-party extension issues\n    error\n    # importing scipy submodules on some version of Python\n    ignore::ImportWarning\n    # bogus numpy ABI warning (see numpy/#432)\n    ignore:.*numpy.dtype size changed.*:RuntimeWarning\n    ignore:.*numpy.ufunc size changed.*:RuntimeWarning\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_buffers.cpp",
    "content": "/*\n    tests/test_buffers.cpp -- supporting Pythons' buffer protocol\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n\nTEST_SUBMODULE(buffers, m) {\n    // test_from_python / test_to_python:\n    class Matrix {\n    public:\n        Matrix(ssize_t rows, ssize_t cols) : m_rows(rows), m_cols(cols) {\n            print_created(this, std::to_string(m_rows) + \"x\" + std::to_string(m_cols) + \" matrix\");\n            m_data = new float[(size_t) (rows*cols)];\n            memset(m_data, 0, sizeof(float) * (size_t) (rows * cols));\n        }\n\n        Matrix(const Matrix &s) : m_rows(s.m_rows), m_cols(s.m_cols) {\n            print_copy_created(this, std::to_string(m_rows) + \"x\" + std::to_string(m_cols) + \" matrix\");\n            m_data = new float[(size_t) (m_rows * m_cols)];\n            memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols));\n        }\n\n        Matrix(Matrix &&s) : m_rows(s.m_rows), m_cols(s.m_cols), m_data(s.m_data) {\n            print_move_created(this);\n            s.m_rows = 0;\n            s.m_cols = 0;\n            s.m_data = nullptr;\n        }\n\n        ~Matrix() {\n            print_destroyed(this, std::to_string(m_rows) + \"x\" + std::to_string(m_cols) + \" matrix\");\n            delete[] m_data;\n        }\n\n        Matrix &operator=(const Matrix &s) {\n            print_copy_assigned(this, std::to_string(m_rows) + \"x\" + std::to_string(m_cols) + \" matrix\");\n            delete[] m_data;\n            m_rows = s.m_rows;\n            m_cols = s.m_cols;\n            m_data = new float[(size_t) (m_rows * m_cols)];\n            memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols));\n            return *this;\n        }\n\n        Matrix &operator=(Matrix &&s) {\n            print_move_assigned(this, std::to_string(m_rows) + \"x\" + std::to_string(m_cols) + \" matrix\");\n            if (&s != this) {\n                delete[] m_data;\n                m_rows = s.m_rows; m_cols = s.m_cols; m_data = s.m_data;\n                s.m_rows = 0; s.m_cols = 0; s.m_data = nullptr;\n            }\n            return *this;\n        }\n\n        float operator()(ssize_t i, ssize_t j) const {\n            return m_data[(size_t) (i*m_cols + j)];\n        }\n\n        float &operator()(ssize_t i, ssize_t j) {\n            return m_data[(size_t) (i*m_cols + j)];\n        }\n\n        float *data() { return m_data; }\n\n        ssize_t rows() const { return m_rows; }\n        ssize_t cols() const { return m_cols; }\n    private:\n        ssize_t m_rows;\n        ssize_t m_cols;\n        float *m_data;\n    };\n    py::class_<Matrix>(m, \"Matrix\", py::buffer_protocol())\n        .def(py::init<ssize_t, ssize_t>())\n        /// Construct from a buffer\n        .def(py::init([](py::buffer b) {\n            py::buffer_info info = b.request();\n            if (info.format != py::format_descriptor<float>::format() || info.ndim != 2)\n                throw std::runtime_error(\"Incompatible buffer format!\");\n\n            auto v = new Matrix(info.shape[0], info.shape[1]);\n            memcpy(v->data(), info.ptr, sizeof(float) * (size_t) (v->rows() * v->cols()));\n            return v;\n        }))\n\n       .def(\"rows\", &Matrix::rows)\n       .def(\"cols\", &Matrix::cols)\n\n        /// Bare bones interface\n       .def(\"__getitem__\", [](const Matrix &m, std::pair<ssize_t, ssize_t> i) {\n            if (i.first >= m.rows() || i.second >= m.cols())\n                throw py::index_error();\n            return m(i.first, i.second);\n        })\n       .def(\"__setitem__\", [](Matrix &m, std::pair<ssize_t, ssize_t> i, float v) {\n            if (i.first >= m.rows() || i.second >= m.cols())\n                throw py::index_error();\n            m(i.first, i.second) = v;\n        })\n       /// Provide buffer access\n       .def_buffer([](Matrix &m) -> py::buffer_info {\n            return py::buffer_info(\n                m.data(),                               /* Pointer to buffer */\n                { m.rows(), m.cols() },                 /* Buffer dimensions */\n                { sizeof(float) * size_t(m.cols()),     /* Strides (in bytes) for each index */\n                  sizeof(float) }\n            );\n        })\n        ;\n\n\n    // test_inherited_protocol\n    class SquareMatrix : public Matrix {\n    public:\n        SquareMatrix(ssize_t n) : Matrix(n, n) { }\n    };\n    // Derived classes inherit the buffer protocol and the buffer access function\n    py::class_<SquareMatrix, Matrix>(m, \"SquareMatrix\")\n        .def(py::init<ssize_t>());\n\n\n    // test_pointer_to_member_fn\n    // Tests that passing a pointer to member to the base class works in\n    // the derived class.\n    struct Buffer {\n        int32_t value = 0;\n\n        py::buffer_info get_buffer_info() {\n            return py::buffer_info(&value, sizeof(value),\n                                   py::format_descriptor<int32_t>::format(), 1);\n        }\n    };\n    py::class_<Buffer>(m, \"Buffer\", py::buffer_protocol())\n        .def(py::init<>())\n        .def_readwrite(\"value\", &Buffer::value)\n        .def_buffer(&Buffer::get_buffer_info);\n\n\n    class ConstBuffer {\n        std::unique_ptr<int32_t> value;\n\n    public:\n        int32_t get_value() const { return *value; }\n        void set_value(int32_t v) { *value = v; }\n\n        py::buffer_info get_buffer_info() const {\n            return py::buffer_info(value.get(), sizeof(*value),\n                                   py::format_descriptor<int32_t>::format(), 1);\n        }\n\n        ConstBuffer() : value(new int32_t{0}) { };\n    };\n    py::class_<ConstBuffer>(m, \"ConstBuffer\", py::buffer_protocol())\n        .def(py::init<>())\n        .def_property(\"value\", &ConstBuffer::get_value, &ConstBuffer::set_value)\n        .def_buffer(&ConstBuffer::get_buffer_info);\n\n    struct DerivedBuffer : public Buffer { };\n    py::class_<DerivedBuffer>(m, \"DerivedBuffer\", py::buffer_protocol())\n        .def(py::init<>())\n        .def_readwrite(\"value\", (int32_t DerivedBuffer::*) &DerivedBuffer::value)\n        .def_buffer(&DerivedBuffer::get_buffer_info);\n\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_buffers.py",
    "content": "import struct\nimport pytest\nfrom pybind11_tests import buffers as m\nfrom pybind11_tests import ConstructorStats\n\npytestmark = pytest.requires_numpy\n\nwith pytest.suppress(ImportError):\n    import numpy as np\n\n\ndef test_from_python():\n    with pytest.raises(RuntimeError) as excinfo:\n        m.Matrix(np.array([1, 2, 3]))  # trying to assign a 1D array\n    assert str(excinfo.value) == \"Incompatible buffer format!\"\n\n    m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\n    m4 = m.Matrix(m3)\n\n    for i in range(m4.rows()):\n        for j in range(m4.cols()):\n            assert m3[i, j] == m4[i, j]\n\n    cstats = ConstructorStats.get(m.Matrix)\n    assert cstats.alive() == 1\n    del m3, m4\n    assert cstats.alive() == 0\n    assert cstats.values() == [\"2x3 matrix\"]\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0  # Don't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n\n# PyPy: Memory leak in the \"np.array(m, copy=False)\" call\n# https://bitbucket.org/pypy/pypy/issues/2444\n@pytest.unsupported_on_pypy\ndef test_to_python():\n    mat = m.Matrix(5, 4)\n    assert memoryview(mat).shape == (5, 4)\n\n    assert mat[2, 3] == 0\n    mat[2, 3] = 4.0\n    mat[3, 2] = 7.0\n    assert mat[2, 3] == 4\n    assert mat[3, 2] == 7\n    assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )\n    assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )\n\n    mat2 = np.array(mat, copy=False)\n    assert mat2.shape == (5, 4)\n    assert abs(mat2).sum() == 11\n    assert mat2[2, 3] == 4 and mat2[3, 2] == 7\n    mat2[2, 3] = 5\n    assert mat2[2, 3] == 5\n\n    cstats = ConstructorStats.get(m.Matrix)\n    assert cstats.alive() == 1\n    del mat\n    pytest.gc_collect()\n    assert cstats.alive() == 1\n    del mat2  # holds a mat reference\n    pytest.gc_collect()\n    assert cstats.alive() == 0\n    assert cstats.values() == [\"5x4 matrix\"]\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0  # Don't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n\n@pytest.unsupported_on_pypy\ndef test_inherited_protocol():\n    \"\"\"SquareMatrix is derived from Matrix and inherits the buffer protocol\"\"\"\n\n    matrix = m.SquareMatrix(5)\n    assert memoryview(matrix).shape == (5, 5)\n    assert np.asarray(matrix).shape == (5, 5)\n\n\n@pytest.unsupported_on_pypy\ndef test_pointer_to_member_fn():\n    for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:\n        buf = cls()\n        buf.value = 0x12345678\n        value = struct.unpack('i', bytearray(buf))[0]\n        assert value == 0x12345678\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_builtin_casters.cpp",
    "content": "/*\n    tests/test_builtin_casters.cpp -- Casters available without any additional headers\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/complex.h>\n\n#if defined(_MSC_VER)\n#  pragma warning(push)\n#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n#endif\n\nTEST_SUBMODULE(builtin_casters, m) {\n    // test_simple_string\n    m.def(\"string_roundtrip\", [](const char *s) { return s; });\n\n    // test_unicode_conversion\n    // Some test characters in utf16 and utf32 encodings.  The last one (the 𝐀) contains a null byte\n    char32_t a32 = 0x61 /*a*/, z32 = 0x7a /*z*/, ib32 = 0x203d /*‽*/, cake32 = 0x1f382 /*🎂*/,              mathbfA32 = 0x1d400 /*𝐀*/;\n    char16_t b16 = 0x62 /*b*/, z16 = 0x7a,       ib16 = 0x203d,       cake16_1 = 0xd83c, cake16_2 = 0xdf82, mathbfA16_1 = 0xd835, mathbfA16_2 = 0xdc00;\n    std::wstring wstr;\n    wstr.push_back(0x61); // a\n    wstr.push_back(0x2e18); // ⸘\n    if (sizeof(wchar_t) == 2) { wstr.push_back(mathbfA16_1); wstr.push_back(mathbfA16_2); } // 𝐀, utf16\n    else { wstr.push_back((wchar_t) mathbfA32); } // 𝐀, utf32\n    wstr.push_back(0x7a); // z\n\n    m.def(\"good_utf8_string\", []() { return std::string(u8\"Say utf8\\u203d \\U0001f382 \\U0001d400\"); }); // Say utf8‽ 🎂 𝐀\n    m.def(\"good_utf16_string\", [=]() { return std::u16string({ b16, ib16, cake16_1, cake16_2, mathbfA16_1, mathbfA16_2, z16 }); }); // b‽🎂𝐀z\n    m.def(\"good_utf32_string\", [=]() { return std::u32string({ a32, mathbfA32, cake32, ib32, z32 }); }); // a𝐀🎂‽z\n    m.def(\"good_wchar_string\", [=]() { return wstr; }); // a‽𝐀z\n    m.def(\"bad_utf8_string\", []()  { return std::string(\"abc\\xd0\" \"def\"); });\n    m.def(\"bad_utf16_string\", [=]() { return std::u16string({ b16, char16_t(0xd800), z16 }); });\n    // Under Python 2.7, invalid unicode UTF-32 characters don't appear to trigger UnicodeDecodeError\n    if (PY_MAJOR_VERSION >= 3)\n        m.def(\"bad_utf32_string\", [=]() { return std::u32string({ a32, char32_t(0xd800), z32 }); });\n    if (PY_MAJOR_VERSION >= 3 || sizeof(wchar_t) == 2)\n        m.def(\"bad_wchar_string\", [=]() { return std::wstring({ wchar_t(0x61), wchar_t(0xd800) }); });\n    m.def(\"u8_Z\", []() -> char { return 'Z'; });\n    m.def(\"u8_eacute\", []() -> char { return '\\xe9'; });\n    m.def(\"u16_ibang\", [=]() -> char16_t { return ib16; });\n    m.def(\"u32_mathbfA\", [=]() -> char32_t { return mathbfA32; });\n    m.def(\"wchar_heart\", []() -> wchar_t { return 0x2665; });\n\n    // test_single_char_arguments\n    m.attr(\"wchar_size\") = py::cast(sizeof(wchar_t));\n    m.def(\"ord_char\", [](char c) -> int { return static_cast<unsigned char>(c); });\n    m.def(\"ord_char_lv\", [](char &c) -> int { return static_cast<unsigned char>(c); });\n    m.def(\"ord_char16\", [](char16_t c) -> uint16_t { return c; });\n    m.def(\"ord_char16_lv\", [](char16_t &c) -> uint16_t { return c; });\n    m.def(\"ord_char32\", [](char32_t c) -> uint32_t { return c; });\n    m.def(\"ord_wchar\", [](wchar_t c) -> int { return c; });\n\n    // test_bytes_to_string\n    m.def(\"strlen\", [](char *s) { return strlen(s); });\n    m.def(\"string_length\", [](std::string s) { return s.length(); });\n\n    // test_string_view\n#ifdef PYBIND11_HAS_STRING_VIEW\n    m.attr(\"has_string_view\") = true;\n    m.def(\"string_view_print\",   [](std::string_view s)    { py::print(s, s.size()); });\n    m.def(\"string_view16_print\", [](std::u16string_view s) { py::print(s, s.size()); });\n    m.def(\"string_view32_print\", [](std::u32string_view s) { py::print(s, s.size()); });\n    m.def(\"string_view_chars\",   [](std::string_view s)    { py::list l; for (auto c : s) l.append((std::uint8_t) c); return l; });\n    m.def(\"string_view16_chars\", [](std::u16string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; });\n    m.def(\"string_view32_chars\", [](std::u32string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; });\n    m.def(\"string_view_return\",   []() { return std::string_view(u8\"utf8 secret \\U0001f382\"); });\n    m.def(\"string_view16_return\", []() { return std::u16string_view(u\"utf16 secret \\U0001f382\"); });\n    m.def(\"string_view32_return\", []() { return std::u32string_view(U\"utf32 secret \\U0001f382\"); });\n#endif\n\n    // test_integer_casting\n    m.def(\"i32_str\", [](std::int32_t v) { return std::to_string(v); });\n    m.def(\"u32_str\", [](std::uint32_t v) { return std::to_string(v); });\n    m.def(\"i64_str\", [](std::int64_t v) { return std::to_string(v); });\n    m.def(\"u64_str\", [](std::uint64_t v) { return std::to_string(v); });\n\n    // test_tuple\n    m.def(\"pair_passthrough\", [](std::pair<bool, std::string> input) {\n        return std::make_pair(input.second, input.first);\n    }, \"Return a pair in reversed order\");\n    m.def(\"tuple_passthrough\", [](std::tuple<bool, std::string, int> input) {\n        return std::make_tuple(std::get<2>(input), std::get<1>(input), std::get<0>(input));\n    }, \"Return a triple in reversed order\");\n    m.def(\"empty_tuple\", []() { return std::tuple<>(); });\n    static std::pair<RValueCaster, RValueCaster> lvpair;\n    static std::tuple<RValueCaster, RValueCaster, RValueCaster> lvtuple;\n    static std::pair<RValueCaster, std::tuple<RValueCaster, std::pair<RValueCaster, RValueCaster>>> lvnested;\n    m.def(\"rvalue_pair\", []() { return std::make_pair(RValueCaster{}, RValueCaster{}); });\n    m.def(\"lvalue_pair\", []() -> const decltype(lvpair) & { return lvpair; });\n    m.def(\"rvalue_tuple\", []() { return std::make_tuple(RValueCaster{}, RValueCaster{}, RValueCaster{}); });\n    m.def(\"lvalue_tuple\", []() -> const decltype(lvtuple) & { return lvtuple; });\n    m.def(\"rvalue_nested\", []() {\n        return std::make_pair(RValueCaster{}, std::make_tuple(RValueCaster{}, std::make_pair(RValueCaster{}, RValueCaster{}))); });\n    m.def(\"lvalue_nested\", []() -> const decltype(lvnested) & { return lvnested; });\n\n    // test_builtins_cast_return_none\n    m.def(\"return_none_string\", []() -> std::string * { return nullptr; });\n    m.def(\"return_none_char\",   []() -> const char *  { return nullptr; });\n    m.def(\"return_none_bool\",   []() -> bool *        { return nullptr; });\n    m.def(\"return_none_int\",    []() -> int *         { return nullptr; });\n    m.def(\"return_none_float\",  []() -> float *       { return nullptr; });\n\n    // test_none_deferred\n    m.def(\"defer_none_cstring\", [](char *) { return false; });\n    m.def(\"defer_none_cstring\", [](py::none) { return true; });\n    m.def(\"defer_none_custom\", [](UserType *) { return false; });\n    m.def(\"defer_none_custom\", [](py::none) { return true; });\n    m.def(\"nodefer_none_void\", [](void *) { return true; });\n    m.def(\"nodefer_none_void\", [](py::none) { return false; });\n\n    // test_void_caster\n    m.def(\"load_nullptr_t\", [](std::nullptr_t) {}); // not useful, but it should still compile\n    m.def(\"cast_nullptr_t\", []() { return std::nullptr_t{}; });\n\n    // test_bool_caster\n    m.def(\"bool_passthrough\", [](bool arg) { return arg; });\n    m.def(\"bool_passthrough_noconvert\", [](bool arg) { return arg; }, py::arg().noconvert());\n\n    // test_reference_wrapper\n    m.def(\"refwrap_builtin\", [](std::reference_wrapper<int> p) { return 10 * p.get(); });\n    m.def(\"refwrap_usertype\", [](std::reference_wrapper<UserType> p) { return p.get().value(); });\n    // Not currently supported (std::pair caster has return-by-value cast operator);\n    // triggers static_assert failure.\n    //m.def(\"refwrap_pair\", [](std::reference_wrapper<std::pair<int, int>>) { });\n\n    m.def(\"refwrap_list\", [](bool copy) {\n        static IncType x1(1), x2(2);\n        py::list l;\n        for (auto &f : {std::ref(x1), std::ref(x2)}) {\n            l.append(py::cast(f, copy ? py::return_value_policy::copy\n                                      : py::return_value_policy::reference));\n        }\n        return l;\n    }, \"copy\"_a);\n\n    m.def(\"refwrap_iiw\", [](const IncType &w) { return w.value(); });\n    m.def(\"refwrap_call_iiw\", [](IncType &w, py::function f) {\n        py::list l;\n        l.append(f(std::ref(w)));\n        l.append(f(std::cref(w)));\n        IncType x(w.value());\n        l.append(f(std::ref(x)));\n        IncType y(w.value());\n        auto r3 = std::ref(y);\n        l.append(f(r3));\n        return l;\n    });\n\n    // test_complex\n    m.def(\"complex_cast\", [](float x) { return \"{}\"_s.format(x); });\n    m.def(\"complex_cast\", [](std::complex<float> x) { return \"({}, {})\"_s.format(x.real(), x.imag()); });\n\n    // test int vs. long (Python 2)\n    m.def(\"int_cast\", []() {return (int) 42;});\n    m.def(\"long_cast\", []() {return (long) 42;});\n    m.def(\"longlong_cast\", []() {return  ULLONG_MAX;});\n\n    /// test void* cast operator\n    m.def(\"test_void_caster\", []() -> bool {\n        void *v = (void *) 0xabcd;\n        py::object o = py::cast(v);\n        return py::cast<void *>(o) == v;\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_builtin_casters.py",
    "content": "# Python < 3 needs this: coding=utf-8\nimport pytest\n\nfrom pybind11_tests import builtin_casters as m\nfrom pybind11_tests import UserType, IncType\n\n\ndef test_simple_string():\n    assert m.string_roundtrip(\"const char *\") == \"const char *\"\n\n\ndef test_unicode_conversion():\n    \"\"\"Tests unicode conversion and error reporting.\"\"\"\n    assert m.good_utf8_string() == u\"Say utf8‽ 🎂 𝐀\"\n    assert m.good_utf16_string() == u\"b‽🎂𝐀z\"\n    assert m.good_utf32_string() == u\"a𝐀🎂‽z\"\n    assert m.good_wchar_string() == u\"a⸘𝐀z\"\n\n    with pytest.raises(UnicodeDecodeError):\n        m.bad_utf8_string()\n\n    with pytest.raises(UnicodeDecodeError):\n        m.bad_utf16_string()\n\n    # These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)\n    if hasattr(m, \"bad_utf32_string\"):\n        with pytest.raises(UnicodeDecodeError):\n            m.bad_utf32_string()\n    if hasattr(m, \"bad_wchar_string\"):\n        with pytest.raises(UnicodeDecodeError):\n            m.bad_wchar_string()\n\n    assert m.u8_Z() == 'Z'\n    assert m.u8_eacute() == u'é'\n    assert m.u16_ibang() == u'‽'\n    assert m.u32_mathbfA() == u'𝐀'\n    assert m.wchar_heart() == u'♥'\n\n\ndef test_single_char_arguments():\n    \"\"\"Tests failures for passing invalid inputs to char-accepting functions\"\"\"\n    def toobig_message(r):\n        return \"Character code point not in range({0:#x})\".format(r)\n    toolong_message = \"Expected a character, but multi-character string found\"\n\n    assert m.ord_char(u'a') == 0x61  # simple ASCII\n    assert m.ord_char_lv(u'b') == 0x62\n    assert m.ord_char(u'é') == 0xE9  # requires 2 bytes in utf-8, but can be stuffed in a char\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_char(u'Ā') == 0x100  # requires 2 bytes, doesn't fit in a char\n    assert str(excinfo.value) == toobig_message(0x100)\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_char(u'ab')\n    assert str(excinfo.value) == toolong_message\n\n    assert m.ord_char16(u'a') == 0x61\n    assert m.ord_char16(u'é') == 0xE9\n    assert m.ord_char16_lv(u'ê') == 0xEA\n    assert m.ord_char16(u'Ā') == 0x100\n    assert m.ord_char16(u'‽') == 0x203d\n    assert m.ord_char16(u'♥') == 0x2665\n    assert m.ord_char16_lv(u'♡') == 0x2661\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_char16(u'🎂') == 0x1F382  # requires surrogate pair\n    assert str(excinfo.value) == toobig_message(0x10000)\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_char16(u'aa')\n    assert str(excinfo.value) == toolong_message\n\n    assert m.ord_char32(u'a') == 0x61\n    assert m.ord_char32(u'é') == 0xE9\n    assert m.ord_char32(u'Ā') == 0x100\n    assert m.ord_char32(u'‽') == 0x203d\n    assert m.ord_char32(u'♥') == 0x2665\n    assert m.ord_char32(u'🎂') == 0x1F382\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_char32(u'aa')\n    assert str(excinfo.value) == toolong_message\n\n    assert m.ord_wchar(u'a') == 0x61\n    assert m.ord_wchar(u'é') == 0xE9\n    assert m.ord_wchar(u'Ā') == 0x100\n    assert m.ord_wchar(u'‽') == 0x203d\n    assert m.ord_wchar(u'♥') == 0x2665\n    if m.wchar_size == 2:\n        with pytest.raises(ValueError) as excinfo:\n            assert m.ord_wchar(u'🎂') == 0x1F382  # requires surrogate pair\n        assert str(excinfo.value) == toobig_message(0x10000)\n    else:\n        assert m.ord_wchar(u'🎂') == 0x1F382\n    with pytest.raises(ValueError) as excinfo:\n        assert m.ord_wchar(u'aa')\n    assert str(excinfo.value) == toolong_message\n\n\ndef test_bytes_to_string():\n    \"\"\"Tests the ability to pass bytes to C++ string-accepting functions.  Note that this is\n    one-way: the only way to return bytes to Python is via the pybind11::bytes class.\"\"\"\n    # Issue #816\n    import sys\n    byte = bytes if sys.version_info[0] < 3 else str\n\n    assert m.strlen(byte(\"hi\")) == 2\n    assert m.string_length(byte(\"world\")) == 5\n    assert m.string_length(byte(\"a\\x00b\")) == 3\n    assert m.strlen(byte(\"a\\x00b\")) == 1  # C-string limitation\n\n    # passing in a utf8 encoded string should work\n    assert m.string_length(u'💩'.encode(\"utf8\")) == 4\n\n\n@pytest.mark.skipif(not hasattr(m, \"has_string_view\"), reason=\"no <string_view>\")\ndef test_string_view(capture):\n    \"\"\"Tests support for C++17 string_view arguments and return values\"\"\"\n    assert m.string_view_chars(\"Hi\") == [72, 105]\n    assert m.string_view_chars(\"Hi 🎂\") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]\n    assert m.string_view16_chars(\"Hi 🎂\") == [72, 105, 32, 0xd83c, 0xdf82]\n    assert m.string_view32_chars(\"Hi 🎂\") == [72, 105, 32, 127874]\n\n    assert m.string_view_return() == \"utf8 secret 🎂\"\n    assert m.string_view16_return() == \"utf16 secret 🎂\"\n    assert m.string_view32_return() == \"utf32 secret 🎂\"\n\n    with capture:\n        m.string_view_print(\"Hi\")\n        m.string_view_print(\"utf8 🎂\")\n        m.string_view16_print(\"utf16 🎂\")\n        m.string_view32_print(\"utf32 🎂\")\n    assert capture == \"\"\"\n        Hi 2\n        utf8 🎂 9\n        utf16 🎂 8\n        utf32 🎂 7\n    \"\"\"\n\n    with capture:\n        m.string_view_print(\"Hi, ascii\")\n        m.string_view_print(\"Hi, utf8 🎂\")\n        m.string_view16_print(\"Hi, utf16 🎂\")\n        m.string_view32_print(\"Hi, utf32 🎂\")\n    assert capture == \"\"\"\n        Hi, ascii 9\n        Hi, utf8 🎂 13\n        Hi, utf16 🎂 12\n        Hi, utf32 🎂 11\n    \"\"\"\n\n\ndef test_integer_casting():\n    \"\"\"Issue #929 - out-of-range integer values shouldn't be accepted\"\"\"\n    import sys\n    assert m.i32_str(-1) == \"-1\"\n    assert m.i64_str(-1) == \"-1\"\n    assert m.i32_str(2000000000) == \"2000000000\"\n    assert m.u32_str(2000000000) == \"2000000000\"\n    if sys.version_info < (3,):\n        assert m.i32_str(long(-1)) == \"-1\"  # noqa: F821 undefined name 'long'\n        assert m.i64_str(long(-1)) == \"-1\"  # noqa: F821 undefined name 'long'\n        assert m.i64_str(long(-999999999999)) == \"-999999999999\"  # noqa: F821 undefined name\n        assert m.u64_str(long(999999999999)) == \"999999999999\"  # noqa: F821 undefined name 'long'\n    else:\n        assert m.i64_str(-999999999999) == \"-999999999999\"\n        assert m.u64_str(999999999999) == \"999999999999\"\n\n    with pytest.raises(TypeError) as excinfo:\n        m.u32_str(-1)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.u64_str(-1)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.i32_str(-3000000000)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.i32_str(3000000000)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    if sys.version_info < (3,):\n        with pytest.raises(TypeError) as excinfo:\n            m.u32_str(long(-1))  # noqa: F821 undefined name 'long'\n        assert \"incompatible function arguments\" in str(excinfo.value)\n        with pytest.raises(TypeError) as excinfo:\n            m.u64_str(long(-1))  # noqa: F821 undefined name 'long'\n        assert \"incompatible function arguments\" in str(excinfo.value)\n\n\ndef test_tuple(doc):\n    \"\"\"std::pair <-> tuple & std::tuple <-> tuple\"\"\"\n    assert m.pair_passthrough((True, \"test\")) == (\"test\", True)\n    assert m.tuple_passthrough((True, \"test\", 5)) == (5, \"test\", True)\n    # Any sequence can be cast to a std::pair or std::tuple\n    assert m.pair_passthrough([True, \"test\"]) == (\"test\", True)\n    assert m.tuple_passthrough([True, \"test\", 5]) == (5, \"test\", True)\n    assert m.empty_tuple() == ()\n\n    assert doc(m.pair_passthrough) == \"\"\"\n        pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]\n\n        Return a pair in reversed order\n    \"\"\"\n    assert doc(m.tuple_passthrough) == \"\"\"\n        tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]\n\n        Return a triple in reversed order\n    \"\"\"\n\n    assert m.rvalue_pair() == (\"rvalue\", \"rvalue\")\n    assert m.lvalue_pair() == (\"lvalue\", \"lvalue\")\n    assert m.rvalue_tuple() == (\"rvalue\", \"rvalue\", \"rvalue\")\n    assert m.lvalue_tuple() == (\"lvalue\", \"lvalue\", \"lvalue\")\n    assert m.rvalue_nested() == (\"rvalue\", (\"rvalue\", (\"rvalue\", \"rvalue\")))\n    assert m.lvalue_nested() == (\"lvalue\", (\"lvalue\", (\"lvalue\", \"lvalue\")))\n\n\ndef test_builtins_cast_return_none():\n    \"\"\"Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None\"\"\"\n    assert m.return_none_string() is None\n    assert m.return_none_char() is None\n    assert m.return_none_bool() is None\n    assert m.return_none_int() is None\n    assert m.return_none_float() is None\n\n\ndef test_none_deferred():\n    \"\"\"None passed as various argument types should defer to other overloads\"\"\"\n    assert not m.defer_none_cstring(\"abc\")\n    assert m.defer_none_cstring(None)\n    assert not m.defer_none_custom(UserType())\n    assert m.defer_none_custom(None)\n    assert m.nodefer_none_void(None)\n\n\ndef test_void_caster():\n    assert m.load_nullptr_t(None) is None\n    assert m.cast_nullptr_t() is None\n\n\ndef test_reference_wrapper():\n    \"\"\"std::reference_wrapper for builtin and user types\"\"\"\n    assert m.refwrap_builtin(42) == 420\n    assert m.refwrap_usertype(UserType(42)) == 42\n\n    with pytest.raises(TypeError) as excinfo:\n        m.refwrap_builtin(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        m.refwrap_usertype(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    a1 = m.refwrap_list(copy=True)\n    a2 = m.refwrap_list(copy=True)\n    assert [x.value for x in a1] == [2, 3]\n    assert [x.value for x in a2] == [2, 3]\n    assert not a1[0] is a2[0] and not a1[1] is a2[1]\n\n    b1 = m.refwrap_list(copy=False)\n    b2 = m.refwrap_list(copy=False)\n    assert [x.value for x in b1] == [1, 2]\n    assert [x.value for x in b2] == [1, 2]\n    assert b1[0] is b2[0] and b1[1] is b2[1]\n\n    assert m.refwrap_iiw(IncType(5)) == 5\n    assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]\n\n\ndef test_complex_cast():\n    \"\"\"std::complex casts\"\"\"\n    assert m.complex_cast(1) == \"1.0\"\n    assert m.complex_cast(2j) == \"(0.0, 2.0)\"\n\n\ndef test_bool_caster():\n    \"\"\"Test bool caster implicit conversions.\"\"\"\n    convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert\n\n    def require_implicit(v):\n        pytest.raises(TypeError, noconvert, v)\n\n    def cant_convert(v):\n        pytest.raises(TypeError, convert, v)\n\n    # straight up bool\n    assert convert(True) is True\n    assert convert(False) is False\n    assert noconvert(True) is True\n    assert noconvert(False) is False\n\n    # None requires implicit conversion\n    require_implicit(None)\n    assert convert(None) is False\n\n    class A(object):\n        def __init__(self, x):\n            self.x = x\n\n        def __nonzero__(self):\n            return self.x\n\n        def __bool__(self):\n            return self.x\n\n    class B(object):\n        pass\n\n    # Arbitrary objects are not accepted\n    cant_convert(object())\n    cant_convert(B())\n\n    # Objects with __nonzero__ / __bool__ defined can be converted\n    require_implicit(A(True))\n    assert convert(A(True)) is True\n    assert convert(A(False)) is False\n\n\n@pytest.requires_numpy\ndef test_numpy_bool():\n    import numpy as np\n    convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert\n\n    # np.bool_ is not considered implicit\n    assert convert(np.bool_(True)) is True\n    assert convert(np.bool_(False)) is False\n    assert noconvert(np.bool_(True)) is True\n    assert noconvert(np.bool_(False)) is False\n\n\ndef test_int_long():\n    \"\"\"In Python 2, a C++ int should return a Python int rather than long\n    if possible: longs are not always accepted where ints are used (such\n    as the argument to sys.exit()). A C++ long long is always a Python\n    long.\"\"\"\n\n    import sys\n    must_be_long = type(getattr(sys, 'maxint', 1) + 1)\n    assert isinstance(m.int_cast(), int)\n    assert isinstance(m.long_cast(), int)\n    assert isinstance(m.longlong_cast(), must_be_long)\n\n\ndef test_void_caster_2():\n    assert m.test_void_caster()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_call_policies.cpp",
    "content": "/*\n    tests/test_call_policies.cpp -- keep_alive and call_guard\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\nstruct CustomGuard {\n    static bool enabled;\n\n    CustomGuard() { enabled = true; }\n    ~CustomGuard() { enabled = false; }\n\n    static const char *report_status() { return enabled ? \"guarded\" : \"unguarded\"; }\n};\nbool CustomGuard::enabled = false;\n\nstruct DependentGuard {\n    static bool enabled;\n\n    DependentGuard() { enabled = CustomGuard::enabled; }\n    ~DependentGuard() { enabled = false; }\n\n    static const char *report_status() { return enabled ? \"guarded\" : \"unguarded\"; }\n};\nbool DependentGuard::enabled = false;\n\nTEST_SUBMODULE(call_policies, m) {\n    // Parent/Child are used in:\n    // test_keep_alive_argument, test_keep_alive_return_value, test_alive_gc_derived,\n    // test_alive_gc_multi_derived, test_return_none, test_keep_alive_constructor\n    class Child {\n    public:\n        Child() { py::print(\"Allocating child.\"); }\n        Child(const Child &) = default;\n        Child(Child &&) = default;\n        ~Child() { py::print(\"Releasing child.\"); }\n    };\n    py::class_<Child>(m, \"Child\")\n        .def(py::init<>());\n\n    class Parent {\n    public:\n        Parent() { py::print(\"Allocating parent.\"); }\n        ~Parent() { py::print(\"Releasing parent.\"); }\n        void addChild(Child *) { }\n        Child *returnChild() { return new Child(); }\n        Child *returnNullChild() { return nullptr; }\n    };\n    py::class_<Parent>(m, \"Parent\")\n        .def(py::init<>())\n        .def(py::init([](Child *) { return new Parent(); }), py::keep_alive<1, 2>())\n        .def(\"addChild\", &Parent::addChild)\n        .def(\"addChildKeepAlive\", &Parent::addChild, py::keep_alive<1, 2>())\n        .def(\"returnChild\", &Parent::returnChild)\n        .def(\"returnChildKeepAlive\", &Parent::returnChild, py::keep_alive<1, 0>())\n        .def(\"returnNullChildKeepAliveChild\", &Parent::returnNullChild, py::keep_alive<1, 0>())\n        .def(\"returnNullChildKeepAliveParent\", &Parent::returnNullChild, py::keep_alive<0, 1>());\n\n#if !defined(PYPY_VERSION)\n    // test_alive_gc\n    class ParentGC : public Parent {\n    public:\n        using Parent::Parent;\n    };\n    py::class_<ParentGC, Parent>(m, \"ParentGC\", py::dynamic_attr())\n        .def(py::init<>());\n#endif\n\n    // test_call_guard\n    m.def(\"unguarded_call\", &CustomGuard::report_status);\n    m.def(\"guarded_call\", &CustomGuard::report_status, py::call_guard<CustomGuard>());\n\n    m.def(\"multiple_guards_correct_order\", []() {\n        return CustomGuard::report_status() + std::string(\" & \") + DependentGuard::report_status();\n    }, py::call_guard<CustomGuard, DependentGuard>());\n\n    m.def(\"multiple_guards_wrong_order\", []() {\n        return DependentGuard::report_status() + std::string(\" & \") + CustomGuard::report_status();\n    }, py::call_guard<DependentGuard, CustomGuard>());\n\n#if defined(WITH_THREAD) && !defined(PYPY_VERSION)\n    // `py::call_guard<py::gil_scoped_release>()` should work in PyPy as well,\n    // but it's unclear how to test it without `PyGILState_GetThisThreadState`.\n    auto report_gil_status = []() {\n        auto is_gil_held = false;\n        if (auto tstate = py::detail::get_thread_state_unchecked())\n            is_gil_held = (tstate == PyGILState_GetThisThreadState());\n\n        return is_gil_held ? \"GIL held\" : \"GIL released\";\n    };\n\n    m.def(\"with_gil\", report_gil_status);\n    m.def(\"without_gil\", report_gil_status, py::call_guard<py::gil_scoped_release>());\n#endif\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_call_policies.py",
    "content": "import pytest\nfrom pybind11_tests import call_policies as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_keep_alive_argument(capture):\n    n_inst = ConstructorStats.detail_reg_inst()\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.addChild(m.Child())\n        assert ConstructorStats.detail_reg_inst() == n_inst + 1\n    assert capture == \"\"\"\n        Allocating child.\n        Releasing child.\n    \"\"\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"Releasing parent.\"\n\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.addChildKeepAlive(m.Child())\n        assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    assert capture == \"Allocating child.\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n    \"\"\"\n\n\ndef test_keep_alive_return_value(capture):\n    n_inst = ConstructorStats.detail_reg_inst()\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.returnChild()\n        assert ConstructorStats.detail_reg_inst() == n_inst + 1\n    assert capture == \"\"\"\n        Allocating child.\n        Releasing child.\n    \"\"\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"Releasing parent.\"\n\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.returnChildKeepAlive()\n        assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    assert capture == \"Allocating child.\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n    \"\"\"\n\n\n# https://bitbucket.org/pypy/pypy/issues/2447\n@pytest.unsupported_on_pypy\ndef test_alive_gc(capture):\n    n_inst = ConstructorStats.detail_reg_inst()\n    p = m.ParentGC()\n    p.addChildKeepAlive(m.Child())\n    assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    lst = [p]\n    lst.append(lst)   # creates a circular reference\n    with capture:\n        del p, lst\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n    \"\"\"\n\n\ndef test_alive_gc_derived(capture):\n    class Derived(m.Parent):\n        pass\n\n    n_inst = ConstructorStats.detail_reg_inst()\n    p = Derived()\n    p.addChildKeepAlive(m.Child())\n    assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    lst = [p]\n    lst.append(lst)   # creates a circular reference\n    with capture:\n        del p, lst\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n    \"\"\"\n\n\ndef test_alive_gc_multi_derived(capture):\n    class Derived(m.Parent, m.Child):\n        def __init__(self):\n            m.Parent.__init__(self)\n            m.Child.__init__(self)\n\n    n_inst = ConstructorStats.detail_reg_inst()\n    p = Derived()\n    p.addChildKeepAlive(m.Child())\n    # +3 rather than +2 because Derived corresponds to two registered instances\n    assert ConstructorStats.detail_reg_inst() == n_inst + 3\n    lst = [p]\n    lst.append(lst)   # creates a circular reference\n    with capture:\n        del p, lst\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n        Releasing child.\n    \"\"\"\n\n\ndef test_return_none(capture):\n    n_inst = ConstructorStats.detail_reg_inst()\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.returnNullChildKeepAliveChild()\n        assert ConstructorStats.detail_reg_inst() == n_inst + 1\n    assert capture == \"\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"Releasing parent.\"\n\n    with capture:\n        p = m.Parent()\n    assert capture == \"Allocating parent.\"\n    with capture:\n        p.returnNullChildKeepAliveParent()\n        assert ConstructorStats.detail_reg_inst() == n_inst + 1\n    assert capture == \"\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"Releasing parent.\"\n\n\ndef test_keep_alive_constructor(capture):\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    with capture:\n        p = m.Parent(m.Child())\n        assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    assert capture == \"\"\"\n        Allocating child.\n        Allocating parent.\n    \"\"\"\n    with capture:\n        del p\n        assert ConstructorStats.detail_reg_inst() == n_inst\n    assert capture == \"\"\"\n        Releasing parent.\n        Releasing child.\n    \"\"\"\n\n\ndef test_call_guard():\n    assert m.unguarded_call() == \"unguarded\"\n    assert m.guarded_call() == \"guarded\"\n\n    assert m.multiple_guards_correct_order() == \"guarded & guarded\"\n    assert m.multiple_guards_wrong_order() == \"unguarded & guarded\"\n\n    if hasattr(m, \"with_gil\"):\n        assert m.with_gil() == \"GIL held\"\n        assert m.without_gil() == \"GIL released\"\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_callbacks.cpp",
    "content": "/*\n    tests/test_callbacks.cpp -- callbacks\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/functional.h>\n\n\nint dummy_function(int i) { return i + 1; }\n\nTEST_SUBMODULE(callbacks, m) {\n    // test_callbacks, test_function_signatures\n    m.def(\"test_callback1\", [](py::object func) { return func(); });\n    m.def(\"test_callback2\", [](py::object func) { return func(\"Hello\", 'x', true, 5); });\n    m.def(\"test_callback3\", [](const std::function<int(int)> &func) {\n        return \"func(43) = \" + std::to_string(func(43)); });\n    m.def(\"test_callback4\", []() -> std::function<int(int)> { return [](int i) { return i+1; }; });\n    m.def(\"test_callback5\", []() {\n        return py::cpp_function([](int i) { return i+1; }, py::arg(\"number\"));\n    });\n\n    // test_keyword_args_and_generalized_unpacking\n    m.def(\"test_tuple_unpacking\", [](py::function f) {\n        auto t1 = py::make_tuple(2, 3);\n        auto t2 = py::make_tuple(5, 6);\n        return f(\"positional\", 1, *t1, 4, *t2);\n    });\n\n    m.def(\"test_dict_unpacking\", [](py::function f) {\n        auto d1 = py::dict(\"key\"_a=\"value\", \"a\"_a=1);\n        auto d2 = py::dict();\n        auto d3 = py::dict(\"b\"_a=2);\n        return f(\"positional\", 1, **d1, **d2, **d3);\n    });\n\n    m.def(\"test_keyword_args\", [](py::function f) {\n        return f(\"x\"_a=10, \"y\"_a=20);\n    });\n\n    m.def(\"test_unpacking_and_keywords1\", [](py::function f) {\n        auto args = py::make_tuple(2);\n        auto kwargs = py::dict(\"d\"_a=4);\n        return f(1, *args, \"c\"_a=3, **kwargs);\n    });\n\n    m.def(\"test_unpacking_and_keywords2\", [](py::function f) {\n        auto kwargs1 = py::dict(\"a\"_a=1);\n        auto kwargs2 = py::dict(\"c\"_a=3, \"d\"_a=4);\n        return f(\"positional\", *py::make_tuple(1), 2, *py::make_tuple(3, 4), 5,\n                 \"key\"_a=\"value\", **kwargs1, \"b\"_a=2, **kwargs2, \"e\"_a=5);\n    });\n\n    m.def(\"test_unpacking_error1\", [](py::function f) {\n        auto kwargs = py::dict(\"x\"_a=3);\n        return f(\"x\"_a=1, \"y\"_a=2, **kwargs); // duplicate ** after keyword\n    });\n\n    m.def(\"test_unpacking_error2\", [](py::function f) {\n        auto kwargs = py::dict(\"x\"_a=3);\n        return f(**kwargs, \"x\"_a=1); // duplicate keyword after **\n    });\n\n    m.def(\"test_arg_conversion_error1\", [](py::function f) {\n        f(234, UnregisteredType(), \"kw\"_a=567);\n    });\n\n    m.def(\"test_arg_conversion_error2\", [](py::function f) {\n        f(234, \"expected_name\"_a=UnregisteredType(), \"kw\"_a=567);\n    });\n\n    // test_lambda_closure_cleanup\n    struct Payload {\n        Payload() { print_default_created(this); }\n        ~Payload() { print_destroyed(this); }\n        Payload(const Payload &) { print_copy_created(this); }\n        Payload(Payload &&) { print_move_created(this); }\n    };\n    // Export the payload constructor statistics for testing purposes:\n    m.def(\"payload_cstats\", &ConstructorStats::get<Payload>);\n    /* Test cleanup of lambda closure */\n    m.def(\"test_cleanup\", []() -> std::function<void(void)> {\n        Payload p;\n\n        return [p]() {\n            /* p should be cleaned up when the returned function is garbage collected */\n            (void) p;\n        };\n    });\n\n    // test_cpp_function_roundtrip\n    /* Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer */\n    m.def(\"dummy_function\", &dummy_function);\n    m.def(\"dummy_function2\", [](int i, int j) { return i + j; });\n    m.def(\"roundtrip\", [](std::function<int(int)> f, bool expect_none = false) {\n        if (expect_none && f)\n            throw std::runtime_error(\"Expected None to be converted to empty std::function\");\n        return f;\n    }, py::arg(\"f\"), py::arg(\"expect_none\")=false);\n    m.def(\"test_dummy_function\", [](const std::function<int(int)> &f) -> std::string {\n        using fn_type = int (*)(int);\n        auto result = f.target<fn_type>();\n        if (!result) {\n            auto r = f(1);\n            return \"can't convert to function pointer: eval(1) = \" + std::to_string(r);\n        } else if (*result == dummy_function) {\n            auto r = (*result)(1);\n            return \"matches dummy_function: eval(1) = \" + std::to_string(r);\n        } else {\n            return \"argument does NOT match dummy_function. This should never happen!\";\n        }\n    });\n\n    class AbstractBase { public: virtual unsigned int func() = 0; };\n    m.def(\"func_accepting_func_accepting_base\", [](std::function<double(AbstractBase&)>) { });\n\n    struct MovableObject {\n        bool valid = true;\n\n        MovableObject() = default;\n        MovableObject(const MovableObject &) = default;\n        MovableObject &operator=(const MovableObject &) = default;\n        MovableObject(MovableObject &&o) : valid(o.valid) { o.valid = false; }\n        MovableObject &operator=(MovableObject &&o) {\n            valid = o.valid;\n            o.valid = false;\n            return *this;\n        }\n    };\n    py::class_<MovableObject>(m, \"MovableObject\");\n\n    // test_movable_object\n    m.def(\"callback_with_movable\", [](std::function<void(MovableObject &)> f) {\n        auto x = MovableObject();\n        f(x); // lvalue reference shouldn't move out object\n        return x.valid; // must still return `true`\n    });\n\n    // test_bound_method_callback\n    struct CppBoundMethodTest {};\n    py::class_<CppBoundMethodTest>(m, \"CppBoundMethodTest\")\n        .def(py::init<>())\n        .def(\"triple\", [](CppBoundMethodTest &, int val) { return 3 * val; });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_callbacks.py",
    "content": "import pytest\nfrom pybind11_tests import callbacks as m\n\n\ndef test_callbacks():\n    from functools import partial\n\n    def func1():\n        return \"func1\"\n\n    def func2(a, b, c, d):\n        return \"func2\", a, b, c, d\n\n    def func3(a):\n        return \"func3({})\".format(a)\n\n    assert m.test_callback1(func1) == \"func1\"\n    assert m.test_callback2(func2) == (\"func2\", \"Hello\", \"x\", True, 5)\n    assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == (\"func2\", 1, 2, 3, 4)\n    assert m.test_callback1(partial(func3, \"partial\")) == \"func3(partial)\"\n    assert m.test_callback3(lambda i: i + 1) == \"func(43) = 44\"\n\n    f = m.test_callback4()\n    assert f(43) == 44\n    f = m.test_callback5()\n    assert f(number=43) == 44\n\n\ndef test_bound_method_callback():\n    # Bound Python method:\n    class MyClass:\n        def double(self, val):\n            return 2 * val\n\n    z = MyClass()\n    assert m.test_callback3(z.double) == \"func(43) = 86\"\n\n    z = m.CppBoundMethodTest()\n    assert m.test_callback3(z.triple) == \"func(43) = 129\"\n\n\ndef test_keyword_args_and_generalized_unpacking():\n\n    def f(*args, **kwargs):\n        return args, kwargs\n\n    assert m.test_tuple_unpacking(f) == ((\"positional\", 1, 2, 3, 4, 5, 6), {})\n    assert m.test_dict_unpacking(f) == ((\"positional\", 1), {\"key\": \"value\", \"a\": 1, \"b\": 2})\n    assert m.test_keyword_args(f) == ((), {\"x\": 10, \"y\": 20})\n    assert m.test_unpacking_and_keywords1(f) == ((1, 2), {\"c\": 3, \"d\": 4})\n    assert m.test_unpacking_and_keywords2(f) == (\n        (\"positional\", 1, 2, 3, 4, 5),\n        {\"key\": \"value\", \"a\": 1, \"b\": 2, \"c\": 3, \"d\": 4, \"e\": 5}\n    )\n\n    with pytest.raises(TypeError) as excinfo:\n        m.test_unpacking_error1(f)\n    assert \"Got multiple values for keyword argument\" in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        m.test_unpacking_error2(f)\n    assert \"Got multiple values for keyword argument\" in str(excinfo.value)\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.test_arg_conversion_error1(f)\n    assert \"Unable to convert call argument\" in str(excinfo.value)\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.test_arg_conversion_error2(f)\n    assert \"Unable to convert call argument\" in str(excinfo.value)\n\n\ndef test_lambda_closure_cleanup():\n    m.test_cleanup()\n    cstats = m.payload_cstats()\n    assert cstats.alive() == 0\n    assert cstats.copy_constructions == 1\n    assert cstats.move_constructions >= 1\n\n\ndef test_cpp_function_roundtrip():\n    \"\"\"Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer\"\"\"\n\n    assert m.test_dummy_function(m.dummy_function) == \"matches dummy_function: eval(1) = 2\"\n    assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==\n            \"matches dummy_function: eval(1) = 2\")\n    assert m.roundtrip(None, expect_none=True) is None\n    assert (m.test_dummy_function(lambda x: x + 2) ==\n            \"can't convert to function pointer: eval(1) = 3\")\n\n    with pytest.raises(TypeError) as excinfo:\n        m.test_dummy_function(m.dummy_function2)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        m.test_dummy_function(lambda x, y: x + y)\n    assert any(s in str(excinfo.value) for s in (\"missing 1 required positional argument\",\n                                                 \"takes exactly 2 arguments\"))\n\n\ndef test_function_signatures(doc):\n    assert doc(m.test_callback3) == \"test_callback3(arg0: Callable[[int], int]) -> str\"\n    assert doc(m.test_callback4) == \"test_callback4() -> Callable[[int], int]\"\n\n\ndef test_movable_object():\n    assert m.callback_with_movable(lambda _: None) is True\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_chrono.cpp",
    "content": "/*\n    tests/test_chrono.cpp -- test conversions to/from std::chrono types\n\n    Copyright (c) 2016 Trent Houliston <trent@houliston.me> and\n                       Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/chrono.h>\n\nTEST_SUBMODULE(chrono, m) {\n    using system_time = std::chrono::system_clock::time_point;\n    using steady_time = std::chrono::steady_clock::time_point;\n    // test_chrono_system_clock\n    // Return the current time off the wall clock\n    m.def(\"test_chrono1\", []() { return std::chrono::system_clock::now(); });\n\n    // test_chrono_system_clock_roundtrip\n    // Round trip the passed in system clock time\n    m.def(\"test_chrono2\", [](system_time t) { return t; });\n\n    // test_chrono_duration_roundtrip\n    // Round trip the passed in duration\n    m.def(\"test_chrono3\", [](std::chrono::system_clock::duration d) { return d; });\n\n    // test_chrono_duration_subtraction_equivalence\n    // Difference between two passed in time_points\n    m.def(\"test_chrono4\", [](system_time a, system_time b) { return a - b; });\n\n    // test_chrono_steady_clock\n    // Return the current time off the steady_clock\n    m.def(\"test_chrono5\", []() { return std::chrono::steady_clock::now(); });\n\n    // test_chrono_steady_clock_roundtrip\n    // Round trip a steady clock timepoint\n    m.def(\"test_chrono6\", [](steady_time t) { return t; });\n\n    // test_floating_point_duration\n    // Roundtrip a duration in microseconds from a float argument\n    m.def(\"test_chrono7\", [](std::chrono::microseconds t) { return t; });\n    // Float durations (issue #719)\n    m.def(\"test_chrono_float_diff\", [](std::chrono::duration<float> a, std::chrono::duration<float> b) {\n        return a - b; });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_chrono.py",
    "content": "from pybind11_tests import chrono as m\nimport datetime\n\n\ndef test_chrono_system_clock():\n\n    # Get the time from both c++ and datetime\n    date1 = m.test_chrono1()\n    date2 = datetime.datetime.today()\n\n    # The returned value should be a datetime\n    assert isinstance(date1, datetime.datetime)\n\n    # The numbers should vary by a very small amount (time it took to execute)\n    diff = abs(date1 - date2)\n\n    # There should never be a days/seconds difference\n    assert diff.days == 0\n    assert diff.seconds == 0\n\n    # We test that no more than about 0.5 seconds passes here\n    # This makes sure that the dates created are very close to the same\n    # but if the testing system is incredibly overloaded this should still pass\n    assert diff.microseconds < 500000\n\n\ndef test_chrono_system_clock_roundtrip():\n    date1 = datetime.datetime.today()\n\n    # Roundtrip the time\n    date2 = m.test_chrono2(date1)\n\n    # The returned value should be a datetime\n    assert isinstance(date2, datetime.datetime)\n\n    # They should be identical (no information lost on roundtrip)\n    diff = abs(date1 - date2)\n    assert diff.days == 0\n    assert diff.seconds == 0\n    assert diff.microseconds == 0\n\n\ndef test_chrono_duration_roundtrip():\n\n    # Get the difference between two times (a timedelta)\n    date1 = datetime.datetime.today()\n    date2 = datetime.datetime.today()\n    diff = date2 - date1\n\n    # Make sure this is a timedelta\n    assert isinstance(diff, datetime.timedelta)\n\n    cpp_diff = m.test_chrono3(diff)\n\n    assert cpp_diff.days == diff.days\n    assert cpp_diff.seconds == diff.seconds\n    assert cpp_diff.microseconds == diff.microseconds\n\n\ndef test_chrono_duration_subtraction_equivalence():\n\n    date1 = datetime.datetime.today()\n    date2 = datetime.datetime.today()\n\n    diff = date2 - date1\n    cpp_diff = m.test_chrono4(date2, date1)\n\n    assert cpp_diff.days == diff.days\n    assert cpp_diff.seconds == diff.seconds\n    assert cpp_diff.microseconds == diff.microseconds\n\n\ndef test_chrono_steady_clock():\n    time1 = m.test_chrono5()\n    assert isinstance(time1, datetime.timedelta)\n\n\ndef test_chrono_steady_clock_roundtrip():\n    time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)\n    time2 = m.test_chrono6(time1)\n\n    assert isinstance(time2, datetime.timedelta)\n\n    # They should be identical (no information lost on roundtrip)\n    assert time1.days == time2.days\n    assert time1.seconds == time2.seconds\n    assert time1.microseconds == time2.microseconds\n\n\ndef test_floating_point_duration():\n    # Test using a floating point number in seconds\n    time = m.test_chrono7(35.525123)\n\n    assert isinstance(time, datetime.timedelta)\n\n    assert time.seconds == 35\n    assert 525122 <= time.microseconds <= 525123\n\n    diff = m.test_chrono_float_diff(43.789012, 1.123456)\n    assert diff.seconds == 42\n    assert 665556 <= diff.microseconds <= 665557\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_class.cpp",
    "content": "/*\n    tests/test_class.cpp -- test py::class_ definitions and basic functionality\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include \"local_bindings.h\"\n#include <pybind11/stl.h>\n\n#if defined(_MSC_VER)\n#  pragma warning(disable: 4324) // warning C4324: structure was padded due to alignment specifier\n#endif\n\n// test_brace_initialization\nstruct NoBraceInitialization {\n    NoBraceInitialization(std::vector<int> v) : vec{std::move(v)} {}\n    template <typename T>\n    NoBraceInitialization(std::initializer_list<T> l) : vec(l) {}\n\n    std::vector<int> vec;\n};\n\nTEST_SUBMODULE(class_, m) {\n    // test_instance\n    struct NoConstructor {\n        NoConstructor() = default;\n        NoConstructor(const NoConstructor &) = default;\n        NoConstructor(NoConstructor &&) = default;\n        static NoConstructor *new_instance() {\n            auto *ptr = new NoConstructor();\n            print_created(ptr, \"via new_instance\");\n            return ptr;\n        }\n        ~NoConstructor() { print_destroyed(this); }\n    };\n\n    py::class_<NoConstructor>(m, \"NoConstructor\")\n        .def_static(\"new_instance\", &NoConstructor::new_instance, \"Return an instance\");\n\n    // test_inheritance\n    class Pet {\n    public:\n        Pet(const std::string &name, const std::string &species)\n            : m_name(name), m_species(species) {}\n        std::string name() const { return m_name; }\n        std::string species() const { return m_species; }\n    private:\n        std::string m_name;\n        std::string m_species;\n    };\n\n    class Dog : public Pet {\n    public:\n        Dog(const std::string &name) : Pet(name, \"dog\") {}\n        std::string bark() const { return \"Woof!\"; }\n    };\n\n    class Rabbit : public Pet {\n    public:\n        Rabbit(const std::string &name) : Pet(name, \"parrot\") {}\n    };\n\n    class Hamster : public Pet {\n    public:\n        Hamster(const std::string &name) : Pet(name, \"rodent\") {}\n    };\n\n    class Chimera : public Pet {\n        Chimera() : Pet(\"Kimmy\", \"chimera\") {}\n    };\n\n    py::class_<Pet> pet_class(m, \"Pet\");\n    pet_class\n        .def(py::init<std::string, std::string>())\n        .def(\"name\", &Pet::name)\n        .def(\"species\", &Pet::species);\n\n    /* One way of declaring a subclass relationship: reference parent's class_ object */\n    py::class_<Dog>(m, \"Dog\", pet_class)\n        .def(py::init<std::string>());\n\n    /* Another way of declaring a subclass relationship: reference parent's C++ type */\n    py::class_<Rabbit, Pet>(m, \"Rabbit\")\n        .def(py::init<std::string>());\n\n    /* And another: list parent in class template arguments */\n    py::class_<Hamster, Pet>(m, \"Hamster\")\n        .def(py::init<std::string>());\n\n    /* Constructors are not inherited by default */\n    py::class_<Chimera, Pet>(m, \"Chimera\");\n\n    m.def(\"pet_name_species\", [](const Pet &pet) { return pet.name() + \" is a \" + pet.species(); });\n    m.def(\"dog_bark\", [](const Dog &dog) { return dog.bark(); });\n\n    // test_automatic_upcasting\n    struct BaseClass {\n        BaseClass() = default;\n        BaseClass(const BaseClass &) = default;\n        BaseClass(BaseClass &&) = default;\n        virtual ~BaseClass() {}\n    };\n    struct DerivedClass1 : BaseClass { };\n    struct DerivedClass2 : BaseClass { };\n\n    py::class_<BaseClass>(m, \"BaseClass\").def(py::init<>());\n    py::class_<DerivedClass1>(m, \"DerivedClass1\").def(py::init<>());\n    py::class_<DerivedClass2>(m, \"DerivedClass2\").def(py::init<>());\n\n    m.def(\"return_class_1\", []() -> BaseClass* { return new DerivedClass1(); });\n    m.def(\"return_class_2\", []() -> BaseClass* { return new DerivedClass2(); });\n    m.def(\"return_class_n\", [](int n) -> BaseClass* {\n        if (n == 1) return new DerivedClass1();\n        if (n == 2) return new DerivedClass2();\n        return new BaseClass();\n    });\n    m.def(\"return_none\", []() -> BaseClass* { return nullptr; });\n\n    // test_isinstance\n    m.def(\"check_instances\", [](py::list l) {\n        return py::make_tuple(\n            py::isinstance<py::tuple>(l[0]),\n            py::isinstance<py::dict>(l[1]),\n            py::isinstance<Pet>(l[2]),\n            py::isinstance<Pet>(l[3]),\n            py::isinstance<Dog>(l[4]),\n            py::isinstance<Rabbit>(l[5]),\n            py::isinstance<UnregisteredType>(l[6])\n        );\n    });\n\n    // test_mismatched_holder\n    struct MismatchBase1 { };\n    struct MismatchDerived1 : MismatchBase1 { };\n\n    struct MismatchBase2 { };\n    struct MismatchDerived2 : MismatchBase2 { };\n\n    m.def(\"mismatched_holder_1\", []() {\n        auto mod = py::module::import(\"__main__\");\n        py::class_<MismatchBase1, std::shared_ptr<MismatchBase1>>(mod, \"MismatchBase1\");\n        py::class_<MismatchDerived1, MismatchBase1>(mod, \"MismatchDerived1\");\n    });\n    m.def(\"mismatched_holder_2\", []() {\n        auto mod = py::module::import(\"__main__\");\n        py::class_<MismatchBase2>(mod, \"MismatchBase2\");\n        py::class_<MismatchDerived2, std::shared_ptr<MismatchDerived2>,\n                   MismatchBase2>(mod, \"MismatchDerived2\");\n    });\n\n    // test_override_static\n    // #511: problem with inheritance + overwritten def_static\n    struct MyBase {\n        static std::unique_ptr<MyBase> make() {\n            return std::unique_ptr<MyBase>(new MyBase());\n        }\n    };\n\n    struct MyDerived : MyBase {\n        static std::unique_ptr<MyDerived> make() {\n            return std::unique_ptr<MyDerived>(new MyDerived());\n        }\n    };\n\n    py::class_<MyBase>(m, \"MyBase\")\n        .def_static(\"make\", &MyBase::make);\n\n    py::class_<MyDerived, MyBase>(m, \"MyDerived\")\n        .def_static(\"make\", &MyDerived::make)\n        .def_static(\"make2\", &MyDerived::make);\n\n    // test_implicit_conversion_life_support\n    struct ConvertibleFromUserType {\n        int i;\n\n        ConvertibleFromUserType(UserType u) : i(u.value()) { }\n    };\n\n    py::class_<ConvertibleFromUserType>(m, \"AcceptsUserType\")\n        .def(py::init<UserType>());\n    py::implicitly_convertible<UserType, ConvertibleFromUserType>();\n\n    m.def(\"implicitly_convert_argument\", [](const ConvertibleFromUserType &r) { return r.i; });\n    m.def(\"implicitly_convert_variable\", [](py::object o) {\n        // `o` is `UserType` and `r` is a reference to a temporary created by implicit\n        // conversion. This is valid when called inside a bound function because the temp\n        // object is attached to the same life support system as the arguments.\n        const auto &r = o.cast<const ConvertibleFromUserType &>();\n        return r.i;\n    });\n    m.add_object(\"implicitly_convert_variable_fail\", [&] {\n        auto f = [](PyObject *, PyObject *args) -> PyObject * {\n            auto o = py::reinterpret_borrow<py::tuple>(args)[0];\n            try { // It should fail here because there is no life support.\n                o.cast<const ConvertibleFromUserType &>();\n            } catch (const py::cast_error &e) {\n                return py::str(e.what()).release().ptr();\n            }\n            return py::str().release().ptr();\n        };\n\n        auto def = new PyMethodDef{\"f\", f, METH_VARARGS, nullptr};\n        return py::reinterpret_steal<py::object>(PyCFunction_NewEx(def, nullptr, m.ptr()));\n    }());\n\n    // test_operator_new_delete\n    struct HasOpNewDel {\n        std::uint64_t i;\n        static void *operator new(size_t s) { py::print(\"A new\", s); return ::operator new(s); }\n        static void *operator new(size_t s, void *ptr) { py::print(\"A placement-new\", s); return ptr; }\n        static void operator delete(void *p) { py::print(\"A delete\"); return ::operator delete(p); }\n    };\n    struct HasOpNewDelSize {\n        std::uint32_t i;\n        static void *operator new(size_t s) { py::print(\"B new\", s); return ::operator new(s); }\n        static void *operator new(size_t s, void *ptr) { py::print(\"B placement-new\", s); return ptr; }\n        static void operator delete(void *p, size_t s) { py::print(\"B delete\", s); return ::operator delete(p); }\n    };\n    struct AliasedHasOpNewDelSize {\n        std::uint64_t i;\n        static void *operator new(size_t s) { py::print(\"C new\", s); return ::operator new(s); }\n        static void *operator new(size_t s, void *ptr) { py::print(\"C placement-new\", s); return ptr; }\n        static void operator delete(void *p, size_t s) { py::print(\"C delete\", s); return ::operator delete(p); }\n        virtual ~AliasedHasOpNewDelSize() = default;\n    };\n    struct PyAliasedHasOpNewDelSize : AliasedHasOpNewDelSize {\n        PyAliasedHasOpNewDelSize() = default;\n        PyAliasedHasOpNewDelSize(int) { }\n        std::uint64_t j;\n    };\n    struct HasOpNewDelBoth {\n        std::uint32_t i[8];\n        static void *operator new(size_t s) { py::print(\"D new\", s); return ::operator new(s); }\n        static void *operator new(size_t s, void *ptr) { py::print(\"D placement-new\", s); return ptr; }\n        static void operator delete(void *p) { py::print(\"D delete\"); return ::operator delete(p); }\n        static void operator delete(void *p, size_t s) { py::print(\"D wrong delete\", s); return ::operator delete(p); }\n    };\n    py::class_<HasOpNewDel>(m, \"HasOpNewDel\").def(py::init<>());\n    py::class_<HasOpNewDelSize>(m, \"HasOpNewDelSize\").def(py::init<>());\n    py::class_<HasOpNewDelBoth>(m, \"HasOpNewDelBoth\").def(py::init<>());\n    py::class_<AliasedHasOpNewDelSize, PyAliasedHasOpNewDelSize> aliased(m, \"AliasedHasOpNewDelSize\");\n    aliased.def(py::init<>());\n    aliased.attr(\"size_noalias\") = py::int_(sizeof(AliasedHasOpNewDelSize));\n    aliased.attr(\"size_alias\") = py::int_(sizeof(PyAliasedHasOpNewDelSize));\n\n    // This test is actually part of test_local_bindings (test_duplicate_local), but we need a\n    // definition in a different compilation unit within the same module:\n    bind_local<LocalExternal, 17>(m, \"LocalExternal\", py::module_local());\n\n    // test_bind_protected_functions\n    class ProtectedA {\n    protected:\n        int foo() const { return value; }\n\n    private:\n        int value = 42;\n    };\n\n    class PublicistA : public ProtectedA {\n    public:\n        using ProtectedA::foo;\n    };\n\n    py::class_<ProtectedA>(m, \"ProtectedA\")\n        .def(py::init<>())\n#if !defined(_MSC_VER) || _MSC_VER >= 1910\n        .def(\"foo\", &PublicistA::foo);\n#else\n        .def(\"foo\", static_cast<int (ProtectedA::*)() const>(&PublicistA::foo));\n#endif\n\n    class ProtectedB {\n    public:\n        virtual ~ProtectedB() = default;\n\n    protected:\n        virtual int foo() const { return value; }\n\n    private:\n        int value = 42;\n    };\n\n    class TrampolineB : public ProtectedB {\n    public:\n        int foo() const override { PYBIND11_OVERLOAD(int, ProtectedB, foo, ); }\n    };\n\n    class PublicistB : public ProtectedB {\n    public:\n        using ProtectedB::foo;\n    };\n\n    py::class_<ProtectedB, TrampolineB>(m, \"ProtectedB\")\n        .def(py::init<>())\n#if !defined(_MSC_VER) || _MSC_VER >= 1910\n        .def(\"foo\", &PublicistB::foo);\n#else\n        .def(\"foo\", static_cast<int (ProtectedB::*)() const>(&PublicistB::foo));\n#endif\n\n    // test_brace_initialization\n    struct BraceInitialization {\n        int field1;\n        std::string field2;\n    };\n\n    py::class_<BraceInitialization>(m, \"BraceInitialization\")\n        .def(py::init<int, const std::string &>())\n        .def_readwrite(\"field1\", &BraceInitialization::field1)\n        .def_readwrite(\"field2\", &BraceInitialization::field2);\n    // We *don't* want to construct using braces when the given constructor argument maps to a\n    // constructor, because brace initialization could go to the wrong place (in particular when\n    // there is also an `initializer_list<T>`-accept constructor):\n    py::class_<NoBraceInitialization>(m, \"NoBraceInitialization\")\n        .def(py::init<std::vector<int>>())\n        .def_readonly(\"vec\", &NoBraceInitialization::vec);\n\n    // test_reentrant_implicit_conversion_failure\n    // #1035: issue with runaway reentrant implicit conversion\n    struct BogusImplicitConversion {\n        BogusImplicitConversion(const BogusImplicitConversion &) { }\n    };\n\n    py::class_<BogusImplicitConversion>(m, \"BogusImplicitConversion\")\n        .def(py::init<const BogusImplicitConversion &>());\n\n    py::implicitly_convertible<int, BogusImplicitConversion>();\n\n    // test_qualname\n    // #1166: nested class docstring doesn't show nested name\n    // Also related: tests that __qualname__ is set properly\n    struct NestBase {};\n    struct Nested {};\n    py::class_<NestBase> base(m, \"NestBase\");\n    base.def(py::init<>());\n    py::class_<Nested>(base, \"Nested\")\n        .def(py::init<>())\n        .def(\"fn\", [](Nested &, int, NestBase &, Nested &) {})\n        .def(\"fa\", [](Nested &, int, NestBase &, Nested &) {},\n                \"a\"_a, \"b\"_a, \"c\"_a);\n    base.def(\"g\", [](NestBase &, Nested &) {});\n    base.def(\"h\", []() { return NestBase(); });\n\n    // test_error_after_conversion\n    // The second-pass path through dispatcher() previously didn't\n    // remember which overload was used, and would crash trying to\n    // generate a useful error message\n\n    struct NotRegistered {};\n    struct StringWrapper { std::string str; };\n    m.def(\"test_error_after_conversions\", [](int) {});\n    m.def(\"test_error_after_conversions\",\n          [](StringWrapper) -> NotRegistered { return {}; });\n    py::class_<StringWrapper>(m, \"StringWrapper\").def(py::init<std::string>());\n    py::implicitly_convertible<std::string, StringWrapper>();\n\n    #if defined(PYBIND11_CPP17)\n        struct alignas(1024) Aligned {\n            std::uintptr_t ptr() const { return (uintptr_t) this; }\n        };\n        py::class_<Aligned>(m, \"Aligned\")\n            .def(py::init<>())\n            .def(\"ptr\", &Aligned::ptr);\n    #endif\n}\n\ntemplate <int N> class BreaksBase { public: virtual ~BreaksBase() = default; };\ntemplate <int N> class BreaksTramp : public BreaksBase<N> {};\n// These should all compile just fine:\ntypedef py::class_<BreaksBase<1>, std::unique_ptr<BreaksBase<1>>, BreaksTramp<1>> DoesntBreak1;\ntypedef py::class_<BreaksBase<2>, BreaksTramp<2>, std::unique_ptr<BreaksBase<2>>> DoesntBreak2;\ntypedef py::class_<BreaksBase<3>, std::unique_ptr<BreaksBase<3>>> DoesntBreak3;\ntypedef py::class_<BreaksBase<4>, BreaksTramp<4>> DoesntBreak4;\ntypedef py::class_<BreaksBase<5>> DoesntBreak5;\ntypedef py::class_<BreaksBase<6>, std::shared_ptr<BreaksBase<6>>, BreaksTramp<6>> DoesntBreak6;\ntypedef py::class_<BreaksBase<7>, BreaksTramp<7>, std::shared_ptr<BreaksBase<7>>> DoesntBreak7;\ntypedef py::class_<BreaksBase<8>, std::shared_ptr<BreaksBase<8>>> DoesntBreak8;\n#define CHECK_BASE(N) static_assert(std::is_same<typename DoesntBreak##N::type, BreaksBase<N>>::value, \\\n        \"DoesntBreak\" #N \" has wrong type!\")\nCHECK_BASE(1); CHECK_BASE(2); CHECK_BASE(3); CHECK_BASE(4); CHECK_BASE(5); CHECK_BASE(6); CHECK_BASE(7); CHECK_BASE(8);\n#define CHECK_ALIAS(N) static_assert(DoesntBreak##N::has_alias && std::is_same<typename DoesntBreak##N::type_alias, BreaksTramp<N>>::value, \\\n        \"DoesntBreak\" #N \" has wrong type_alias!\")\n#define CHECK_NOALIAS(N) static_assert(!DoesntBreak##N::has_alias && std::is_void<typename DoesntBreak##N::type_alias>::value, \\\n        \"DoesntBreak\" #N \" has type alias, but shouldn't!\")\nCHECK_ALIAS(1); CHECK_ALIAS(2); CHECK_NOALIAS(3); CHECK_ALIAS(4); CHECK_NOALIAS(5); CHECK_ALIAS(6); CHECK_ALIAS(7); CHECK_NOALIAS(8);\n#define CHECK_HOLDER(N, TYPE) static_assert(std::is_same<typename DoesntBreak##N::holder_type, std::TYPE##_ptr<BreaksBase<N>>>::value, \\\n        \"DoesntBreak\" #N \" has wrong holder_type!\")\nCHECK_HOLDER(1, unique); CHECK_HOLDER(2, unique); CHECK_HOLDER(3, unique); CHECK_HOLDER(4, unique); CHECK_HOLDER(5, unique);\nCHECK_HOLDER(6, shared); CHECK_HOLDER(7, shared); CHECK_HOLDER(8, shared);\n\n// There's no nice way to test that these fail because they fail to compile; leave them here,\n// though, so that they can be manually tested by uncommenting them (and seeing that compilation\n// failures occurs).\n\n// We have to actually look into the type: the typedef alone isn't enough to instantiate the type:\n#define CHECK_BROKEN(N) static_assert(std::is_same<typename Breaks##N::type, BreaksBase<-N>>::value, \\\n        \"Breaks1 has wrong type!\");\n\n//// Two holder classes:\n//typedef py::class_<BreaksBase<-1>, std::unique_ptr<BreaksBase<-1>>, std::unique_ptr<BreaksBase<-1>>> Breaks1;\n//CHECK_BROKEN(1);\n//// Two aliases:\n//typedef py::class_<BreaksBase<-2>, BreaksTramp<-2>, BreaksTramp<-2>> Breaks2;\n//CHECK_BROKEN(2);\n//// Holder + 2 aliases\n//typedef py::class_<BreaksBase<-3>, std::unique_ptr<BreaksBase<-3>>, BreaksTramp<-3>, BreaksTramp<-3>> Breaks3;\n//CHECK_BROKEN(3);\n//// Alias + 2 holders\n//typedef py::class_<BreaksBase<-4>, std::unique_ptr<BreaksBase<-4>>, BreaksTramp<-4>, std::shared_ptr<BreaksBase<-4>>> Breaks4;\n//CHECK_BROKEN(4);\n//// Invalid option (not a subclass or holder)\n//typedef py::class_<BreaksBase<-5>, BreaksTramp<-4>> Breaks5;\n//CHECK_BROKEN(5);\n//// Invalid option: multiple inheritance not supported:\n//template <> struct BreaksBase<-8> : BreaksBase<-6>, BreaksBase<-7> {};\n//typedef py::class_<BreaksBase<-8>, BreaksBase<-6>, BreaksBase<-7>> Breaks8;\n//CHECK_BROKEN(8);\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_class.py",
    "content": "import pytest\n\nfrom pybind11_tests import class_ as m\nfrom pybind11_tests import UserType, ConstructorStats\n\n\ndef test_repr():\n    # In Python 3.3+, repr() accesses __qualname__\n    assert \"pybind11_type\" in repr(type(UserType))\n    assert \"UserType\" in repr(UserType)\n\n\ndef test_instance(msg):\n    with pytest.raises(TypeError) as excinfo:\n        m.NoConstructor()\n    assert msg(excinfo.value) == \"m.class_.NoConstructor: No constructor defined!\"\n\n    instance = m.NoConstructor.new_instance()\n\n    cstats = ConstructorStats.get(m.NoConstructor)\n    assert cstats.alive() == 1\n    del instance\n    assert cstats.alive() == 0\n\n\ndef test_docstrings(doc):\n    assert doc(UserType) == \"A `py::class_` type for testing\"\n    assert UserType.__name__ == \"UserType\"\n    assert UserType.__module__ == \"pybind11_tests\"\n    assert UserType.get_value.__name__ == \"get_value\"\n    assert UserType.get_value.__module__ == \"pybind11_tests\"\n\n    assert doc(UserType.get_value) == \"\"\"\n        get_value(self: m.UserType) -> int\n\n        Get value using a method\n    \"\"\"\n    assert doc(UserType.value) == \"Get/set value using a property\"\n\n    assert doc(m.NoConstructor.new_instance) == \"\"\"\n        new_instance() -> m.class_.NoConstructor\n\n        Return an instance\n    \"\"\"\n\n\ndef test_qualname(doc):\n    \"\"\"Tests that a properly qualified name is set in __qualname__ (even in pre-3.3, where we\n    backport the attribute) and that generated docstrings properly use it and the module name\"\"\"\n    assert m.NestBase.__qualname__ == \"NestBase\"\n    assert m.NestBase.Nested.__qualname__ == \"NestBase.Nested\"\n\n    assert doc(m.NestBase.__init__) == \"\"\"\n        __init__(self: m.class_.NestBase) -> None\n    \"\"\"\n    assert doc(m.NestBase.g) == \"\"\"\n        g(self: m.class_.NestBase, arg0: m.class_.NestBase.Nested) -> None\n    \"\"\"\n    assert doc(m.NestBase.Nested.__init__) == \"\"\"\n        __init__(self: m.class_.NestBase.Nested) -> None\n    \"\"\"\n    assert doc(m.NestBase.Nested.fn) == \"\"\"\n        fn(self: m.class_.NestBase.Nested, arg0: int, arg1: m.class_.NestBase, arg2: m.class_.NestBase.Nested) -> None\n    \"\"\"  # noqa: E501 line too long\n    assert doc(m.NestBase.Nested.fa) == \"\"\"\n        fa(self: m.class_.NestBase.Nested, a: int, b: m.class_.NestBase, c: m.class_.NestBase.Nested) -> None\n    \"\"\"  # noqa: E501 line too long\n    assert m.NestBase.__module__ == \"pybind11_tests.class_\"\n    assert m.NestBase.Nested.__module__ == \"pybind11_tests.class_\"\n\n\ndef test_inheritance(msg):\n    roger = m.Rabbit('Rabbit')\n    assert roger.name() + \" is a \" + roger.species() == \"Rabbit is a parrot\"\n    assert m.pet_name_species(roger) == \"Rabbit is a parrot\"\n\n    polly = m.Pet('Polly', 'parrot')\n    assert polly.name() + \" is a \" + polly.species() == \"Polly is a parrot\"\n    assert m.pet_name_species(polly) == \"Polly is a parrot\"\n\n    molly = m.Dog('Molly')\n    assert molly.name() + \" is a \" + molly.species() == \"Molly is a dog\"\n    assert m.pet_name_species(molly) == \"Molly is a dog\"\n\n    fred = m.Hamster('Fred')\n    assert fred.name() + \" is a \" + fred.species() == \"Fred is a rodent\"\n\n    assert m.dog_bark(molly) == \"Woof!\"\n\n    with pytest.raises(TypeError) as excinfo:\n        m.dog_bark(polly)\n    assert msg(excinfo.value) == \"\"\"\n        dog_bark(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: m.class_.Dog) -> str\n\n        Invoked with: <m.class_.Pet object at 0>\n    \"\"\"\n\n    with pytest.raises(TypeError) as excinfo:\n        m.Chimera(\"lion\", \"goat\")\n    assert \"No constructor defined!\" in str(excinfo.value)\n\n\ndef test_automatic_upcasting():\n    assert type(m.return_class_1()).__name__ == \"DerivedClass1\"\n    assert type(m.return_class_2()).__name__ == \"DerivedClass2\"\n    assert type(m.return_none()).__name__ == \"NoneType\"\n    # Repeat these a few times in a random order to ensure no invalid caching is applied\n    assert type(m.return_class_n(1)).__name__ == \"DerivedClass1\"\n    assert type(m.return_class_n(2)).__name__ == \"DerivedClass2\"\n    assert type(m.return_class_n(0)).__name__ == \"BaseClass\"\n    assert type(m.return_class_n(2)).__name__ == \"DerivedClass2\"\n    assert type(m.return_class_n(2)).__name__ == \"DerivedClass2\"\n    assert type(m.return_class_n(0)).__name__ == \"BaseClass\"\n    assert type(m.return_class_n(1)).__name__ == \"DerivedClass1\"\n\n\ndef test_isinstance():\n    objects = [tuple(), dict(), m.Pet(\"Polly\", \"parrot\")] + [m.Dog(\"Molly\")] * 4\n    expected = (True, True, True, True, True, False, False)\n    assert m.check_instances(objects) == expected\n\n\ndef test_mismatched_holder():\n    import re\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.mismatched_holder_1()\n    assert re.match('generic_type: type \".*MismatchDerived1\" does not have a non-default '\n                    'holder type while its base \".*MismatchBase1\" does', str(excinfo.value))\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.mismatched_holder_2()\n    assert re.match('generic_type: type \".*MismatchDerived2\" has a non-default holder type '\n                    'while its base \".*MismatchBase2\" does not', str(excinfo.value))\n\n\ndef test_override_static():\n    \"\"\"#511: problem with inheritance + overwritten def_static\"\"\"\n    b = m.MyBase.make()\n    d1 = m.MyDerived.make2()\n    d2 = m.MyDerived.make()\n\n    assert isinstance(b, m.MyBase)\n    assert isinstance(d1, m.MyDerived)\n    assert isinstance(d2, m.MyDerived)\n\n\ndef test_implicit_conversion_life_support():\n    \"\"\"Ensure the lifetime of temporary objects created for implicit conversions\"\"\"\n    assert m.implicitly_convert_argument(UserType(5)) == 5\n    assert m.implicitly_convert_variable(UserType(5)) == 5\n\n    assert \"outside a bound function\" in m.implicitly_convert_variable_fail(UserType(5))\n\n\ndef test_operator_new_delete(capture):\n    \"\"\"Tests that class-specific operator new/delete functions are invoked\"\"\"\n\n    class SubAliased(m.AliasedHasOpNewDelSize):\n        pass\n\n    with capture:\n        a = m.HasOpNewDel()\n        b = m.HasOpNewDelSize()\n        d = m.HasOpNewDelBoth()\n    assert capture == \"\"\"\n        A new 8\n        B new 4\n        D new 32\n    \"\"\"\n    sz_alias = str(m.AliasedHasOpNewDelSize.size_alias)\n    sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias)\n    with capture:\n        c = m.AliasedHasOpNewDelSize()\n        c2 = SubAliased()\n    assert capture == (\n        \"C new \" + sz_noalias + \"\\n\" +\n        \"C new \" + sz_alias + \"\\n\"\n    )\n\n    with capture:\n        del a\n        pytest.gc_collect()\n        del b\n        pytest.gc_collect()\n        del d\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        A delete\n        B delete 4\n        D delete\n    \"\"\"\n\n    with capture:\n        del c\n        pytest.gc_collect()\n        del c2\n        pytest.gc_collect()\n    assert capture == (\n        \"C delete \" + sz_noalias + \"\\n\" +\n        \"C delete \" + sz_alias + \"\\n\"\n    )\n\n\ndef test_bind_protected_functions():\n    \"\"\"Expose protected member functions to Python using a helper class\"\"\"\n    a = m.ProtectedA()\n    assert a.foo() == 42\n\n    b = m.ProtectedB()\n    assert b.foo() == 42\n\n    class C(m.ProtectedB):\n        def __init__(self):\n            m.ProtectedB.__init__(self)\n\n        def foo(self):\n            return 0\n\n    c = C()\n    assert c.foo() == 0\n\n\ndef test_brace_initialization():\n    \"\"\" Tests that simple POD classes can be constructed using C++11 brace initialization \"\"\"\n    a = m.BraceInitialization(123, \"test\")\n    assert a.field1 == 123\n    assert a.field2 == \"test\"\n\n    # Tests that a non-simple class doesn't get brace initialization (if the\n    # class defines an initializer_list constructor, in particular, it would\n    # win over the expected constructor).\n    b = m.NoBraceInitialization([123, 456])\n    assert b.vec == [123, 456]\n\n\n@pytest.unsupported_on_pypy\ndef test_class_refcount():\n    \"\"\"Instances must correctly increase/decrease the reference count of their types (#1029)\"\"\"\n    from sys import getrefcount\n\n    class PyDog(m.Dog):\n        pass\n\n    for cls in m.Dog, PyDog:\n        refcount_1 = getrefcount(cls)\n        molly = [cls(\"Molly\") for _ in range(10)]\n        refcount_2 = getrefcount(cls)\n\n        del molly\n        pytest.gc_collect()\n        refcount_3 = getrefcount(cls)\n\n        assert refcount_1 == refcount_3\n        assert refcount_2 > refcount_1\n\n\ndef test_reentrant_implicit_conversion_failure(msg):\n    # ensure that there is no runaway reentrant implicit conversion (#1035)\n    with pytest.raises(TypeError) as excinfo:\n        m.BogusImplicitConversion(0)\n    assert msg(excinfo.value) == '''\n        __init__(): incompatible constructor arguments. The following argument types are supported:\n            1. m.class_.BogusImplicitConversion(arg0: m.class_.BogusImplicitConversion)\n\n        Invoked with: 0\n    '''\n\n\ndef test_error_after_conversions():\n    with pytest.raises(TypeError) as exc_info:\n        m.test_error_after_conversions(\"hello\")\n    assert str(exc_info.value).startswith(\n        \"Unable to convert function return value to a Python type!\")\n\n\ndef test_aligned():\n    if hasattr(m, \"Aligned\"):\n        p = m.Aligned().ptr()\n        assert p % 1024 == 0\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/CMakeLists.txt",
    "content": "add_custom_target(test_cmake_build)\n\nif(CMAKE_VERSION VERSION_LESS 3.1)\n  # 3.0 needed for interface library for subdirectory_target/installed_target\n  # 3.1 needed for cmake -E env for testing\n  return()\nendif()\n\ninclude(CMakeParseArguments)\nfunction(pybind11_add_build_test name)\n  cmake_parse_arguments(ARG \"INSTALL\" \"\" \"\" ${ARGN})\n\n  set(build_options \"-DCMAKE_PREFIX_PATH=${PROJECT_BINARY_DIR}/mock_install\"\n                    \"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}\"\n                    \"-DPYTHON_EXECUTABLE:FILEPATH=${PYTHON_EXECUTABLE}\"\n                    \"-DPYBIND11_CPP_STANDARD=${PYBIND11_CPP_STANDARD}\")\n  if(NOT ARG_INSTALL)\n    list(APPEND build_options \"-DPYBIND11_PROJECT_DIR=${PROJECT_SOURCE_DIR}\")\n  endif()\n\n  add_custom_target(test_${name} ${CMAKE_CTEST_COMMAND}\n    --quiet --output-log ${name}.log\n    --build-and-test \"${CMAKE_CURRENT_SOURCE_DIR}/${name}\"\n                     \"${CMAKE_CURRENT_BINARY_DIR}/${name}\"\n    --build-config Release\n    --build-noclean\n    --build-generator ${CMAKE_GENERATOR}\n    $<$<BOOL:${CMAKE_GENERATOR_PLATFORM}>:--build-generator-platform> ${CMAKE_GENERATOR_PLATFORM}\n    --build-makeprogram ${CMAKE_MAKE_PROGRAM}\n    --build-target check\n    --build-options ${build_options}\n  )\n  if(ARG_INSTALL)\n    add_dependencies(test_${name} mock_install)\n  endif()\n  add_dependencies(test_cmake_build test_${name})\nendfunction()\n\npybind11_add_build_test(subdirectory_function)\npybind11_add_build_test(subdirectory_target)\nif(NOT ${PYTHON_MODULE_EXTENSION} MATCHES \"pypy\")\n  pybind11_add_build_test(subdirectory_embed)\nendif()\n\nif(PYBIND11_INSTALL)\n  add_custom_target(mock_install ${CMAKE_COMMAND}\n    \"-DCMAKE_INSTALL_PREFIX=${PROJECT_BINARY_DIR}/mock_install\"\n    -P \"${PROJECT_BINARY_DIR}/cmake_install.cmake\"\n  )\n\n  pybind11_add_build_test(installed_function INSTALL)\n  pybind11_add_build_test(installed_target INSTALL)\n  if(NOT ${PYTHON_MODULE_EXTENSION} MATCHES \"pypy\")\n    pybind11_add_build_test(installed_embed INSTALL)\n  endif()\nendif()\n\nadd_dependencies(check test_cmake_build)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/embed.cpp",
    "content": "#include <pybind11/embed.h>\nnamespace py = pybind11;\n\nPYBIND11_EMBEDDED_MODULE(test_cmake_build, m) {\n    m.def(\"add\", [](int i, int j) { return i + j; });\n}\n\nint main(int argc, char *argv[]) {\n    if (argc != 2)\n        throw std::runtime_error(\"Expected test.py file as the first argument\");\n    auto test_py_file = argv[1];\n\n    py::scoped_interpreter guard{};\n\n    auto m = py::module::import(\"test_cmake_build\");\n    if (m.attr(\"add\")(1, 2).cast<int>() != 3)\n        throw std::runtime_error(\"embed.cpp failed\");\n\n    py::module::import(\"sys\").attr(\"argv\") = py::make_tuple(\"test.py\", \"embed.cpp\");\n    py::eval_file(test_py_file, py::globals());\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0)\nproject(test_installed_embed CXX)\n\nset(CMAKE_MODULE_PATH \"\")\nfind_package(pybind11 CONFIG REQUIRED)\nmessage(STATUS \"Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}\")\n\nadd_executable(test_cmake_build ../embed.cpp)\ntarget_link_libraries(test_cmake_build PRIVATE pybind11::embed)\n\n# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::embed).\n# This may be needed to resolve header conflicts, e.g. between Python release and debug headers.\nset_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)\n\nadd_custom_target(check $<TARGET_FILE:test_cmake_build> ${PROJECT_SOURCE_DIR}/../test.py)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.12)\nproject(test_installed_module CXX)\n\nset(CMAKE_MODULE_PATH \"\")\n\nfind_package(pybind11 CONFIG REQUIRED)\nmessage(STATUS \"Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}\")\n\npybind11_add_module(test_cmake_build SHARED NO_EXTRAS ../main.cpp)\n\nadd_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>\n                  ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0)\nproject(test_installed_target CXX)\n\nset(CMAKE_MODULE_PATH \"\")\n\nfind_package(pybind11 CONFIG REQUIRED)\nmessage(STATUS \"Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}\")\n\nadd_library(test_cmake_build MODULE ../main.cpp)\n\ntarget_link_libraries(test_cmake_build PRIVATE pybind11::module)\n\n# make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib\nset_target_properties(test_cmake_build PROPERTIES PREFIX \"${PYTHON_MODULE_PREFIX}\"\n                                                  SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\n# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::module).\n# This may be needed to resolve header conflicts, e.g. between Python release and debug headers.\nset_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)\n\nadd_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>\n                  ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/main.cpp",
    "content": "#include <pybind11/pybind11.h>\nnamespace py = pybind11;\n\nPYBIND11_MODULE(test_cmake_build, m) {\n    m.def(\"add\", [](int i, int j) { return i + j; });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0)\nproject(test_subdirectory_embed CXX)\n\nset(PYBIND11_INSTALL ON CACHE BOOL \"\")\nset(PYBIND11_EXPORT_NAME test_export)\n\nadd_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)\n\n# Test basic target functionality\nadd_executable(test_cmake_build ../embed.cpp)\ntarget_link_libraries(test_cmake_build PRIVATE pybind11::embed)\n\nadd_custom_target(check $<TARGET_FILE:test_cmake_build> ${PROJECT_SOURCE_DIR}/../test.py)\n\n# Test custom export group -- PYBIND11_EXPORT_NAME\nadd_library(test_embed_lib ../embed.cpp)\ntarget_link_libraries(test_embed_lib PRIVATE pybind11::embed)\n\ninstall(TARGETS test_embed_lib\n        EXPORT  test_export\n        ARCHIVE DESTINATION bin\n        LIBRARY DESTINATION lib\n        RUNTIME DESTINATION lib)\ninstall(EXPORT      test_export\n        DESTINATION lib/cmake/test_export/test_export-Targets.cmake)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.12)\nproject(test_subdirectory_module CXX)\n\nadd_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)\npybind11_add_module(test_cmake_build THIN_LTO ../main.cpp)\n\nadd_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>\n                  ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0)\nproject(test_subdirectory_target CXX)\n\nadd_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)\n\nadd_library(test_cmake_build MODULE ../main.cpp)\n\ntarget_link_libraries(test_cmake_build PRIVATE pybind11::module)\n\n# make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib\nset_target_properties(test_cmake_build PROPERTIES PREFIX \"${PYTHON_MODULE_PREFIX}\"\n                                                  SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\nadd_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>\n                  ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_cmake_build/test.py",
    "content": "import sys\nimport test_cmake_build\n\nassert test_cmake_build.add(1, 2) == 3\nprint(\"{} imports, runs, and adds: 1 + 2 = 3\".format(sys.argv[1]))\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_constants_and_functions.cpp",
    "content": "/*\n    tests/test_constants_and_functions.cpp -- global constants and functions, enumerations, raw byte strings\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\nenum MyEnum { EFirstEntry = 1, ESecondEntry };\n\nstd::string test_function1() {\n    return \"test_function()\";\n}\n\nstd::string test_function2(MyEnum k) {\n    return \"test_function(enum=\" + std::to_string(k) + \")\";\n}\n\nstd::string test_function3(int i) {\n    return \"test_function(\" + std::to_string(i) + \")\";\n}\n\npy::str test_function4()           { return \"test_function()\"; }\npy::str test_function4(char *)     { return \"test_function(char *)\"; }\npy::str test_function4(int, float) { return \"test_function(int, float)\"; }\npy::str test_function4(float, int) { return \"test_function(float, int)\"; }\n\npy::bytes return_bytes() {\n    const char *data = \"\\x01\\x00\\x02\\x00\";\n    return std::string(data, 4);\n}\n\nstd::string print_bytes(py::bytes bytes) {\n    std::string ret = \"bytes[\";\n    const auto value = static_cast<std::string>(bytes);\n    for (size_t i = 0; i < value.length(); ++i) {\n        ret += std::to_string(static_cast<int>(value[i])) + \" \";\n    }\n    ret.back() = ']';\n    return ret;\n}\n\n// Test that we properly handle C++17 exception specifiers (which are part of the function signature\n// in C++17).  These should all still work before C++17, but don't affect the function signature.\nnamespace test_exc_sp {\nint f1(int x) noexcept { return x+1; }\nint f2(int x) noexcept(true) { return x+2; }\nint f3(int x) noexcept(false) { return x+3; }\n#if defined(__GNUG__)\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wdeprecated\"\n#endif\nint f4(int x) throw() { return x+4; } // Deprecated equivalent to noexcept(true)\n#if defined(__GNUG__)\n#  pragma GCC diagnostic pop\n#endif\nstruct C {\n    int m1(int x) noexcept { return x-1; }\n    int m2(int x) const noexcept { return x-2; }\n    int m3(int x) noexcept(true) { return x-3; }\n    int m4(int x) const noexcept(true) { return x-4; }\n    int m5(int x) noexcept(false) { return x-5; }\n    int m6(int x) const noexcept(false) { return x-6; }\n#if defined(__GNUG__)\n#  pragma GCC diagnostic push\n#  pragma GCC diagnostic ignored \"-Wdeprecated\"\n#endif\n    int m7(int x) throw() { return x-7; }\n    int m8(int x) const throw() { return x-8; }\n#if defined(__GNUG__)\n#  pragma GCC diagnostic pop\n#endif\n};\n}\n\n\nTEST_SUBMODULE(constants_and_functions, m) {\n    // test_constants\n    m.attr(\"some_constant\") = py::int_(14);\n\n    // test_function_overloading\n    m.def(\"test_function\", &test_function1);\n    m.def(\"test_function\", &test_function2);\n    m.def(\"test_function\", &test_function3);\n\n#if defined(PYBIND11_OVERLOAD_CAST)\n    m.def(\"test_function\", py::overload_cast<>(&test_function4));\n    m.def(\"test_function\", py::overload_cast<char *>(&test_function4));\n    m.def(\"test_function\", py::overload_cast<int, float>(&test_function4));\n    m.def(\"test_function\", py::overload_cast<float, int>(&test_function4));\n#else\n    m.def(\"test_function\", static_cast<py::str (*)()>(&test_function4));\n    m.def(\"test_function\", static_cast<py::str (*)(char *)>(&test_function4));\n    m.def(\"test_function\", static_cast<py::str (*)(int, float)>(&test_function4));\n    m.def(\"test_function\", static_cast<py::str (*)(float, int)>(&test_function4));\n#endif\n\n    py::enum_<MyEnum>(m, \"MyEnum\")\n        .value(\"EFirstEntry\", EFirstEntry)\n        .value(\"ESecondEntry\", ESecondEntry)\n        .export_values();\n\n    // test_bytes\n    m.def(\"return_bytes\", &return_bytes);\n    m.def(\"print_bytes\", &print_bytes);\n\n    // test_exception_specifiers\n    using namespace test_exc_sp;\n    py::class_<C>(m, \"C\")\n        .def(py::init<>())\n        .def(\"m1\", &C::m1)\n        .def(\"m2\", &C::m2)\n        .def(\"m3\", &C::m3)\n        .def(\"m4\", &C::m4)\n        .def(\"m5\", &C::m5)\n        .def(\"m6\", &C::m6)\n        .def(\"m7\", &C::m7)\n        .def(\"m8\", &C::m8)\n        ;\n    m.def(\"f1\", f1);\n    m.def(\"f2\", f2);\n    m.def(\"f3\", f3);\n    m.def(\"f4\", f4);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_constants_and_functions.py",
    "content": "from pybind11_tests import constants_and_functions as m\n\n\ndef test_constants():\n    assert m.some_constant == 14\n\n\ndef test_function_overloading():\n    assert m.test_function() == \"test_function()\"\n    assert m.test_function(7) == \"test_function(7)\"\n    assert m.test_function(m.MyEnum.EFirstEntry) == \"test_function(enum=1)\"\n    assert m.test_function(m.MyEnum.ESecondEntry) == \"test_function(enum=2)\"\n\n    assert m.test_function() == \"test_function()\"\n    assert m.test_function(\"abcd\") == \"test_function(char *)\"\n    assert m.test_function(1, 1.0) == \"test_function(int, float)\"\n    assert m.test_function(1, 1.0) == \"test_function(int, float)\"\n    assert m.test_function(2.0, 2) == \"test_function(float, int)\"\n\n\ndef test_bytes():\n    assert m.print_bytes(m.return_bytes()) == \"bytes[1 0 2 0]\"\n\n\ndef test_exception_specifiers():\n    c = m.C()\n    assert c.m1(2) == 1\n    assert c.m2(3) == 1\n    assert c.m3(5) == 2\n    assert c.m4(7) == 3\n    assert c.m5(10) == 5\n    assert c.m6(14) == 8\n    assert c.m7(20) == 13\n    assert c.m8(29) == 21\n\n    assert m.f1(33) == 34\n    assert m.f2(53) == 55\n    assert m.f3(86) == 89\n    assert m.f4(140) == 144\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_copy_move.cpp",
    "content": "/*\n    tests/test_copy_move_policies.cpp -- 'copy' and 'move' return value policies\n                                         and related tests\n\n    Copyright (c) 2016 Ben North <ben@redfrontdoor.org>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/stl.h>\n\ntemplate <typename derived>\nstruct empty {\n    static const derived& get_one() { return instance_; }\n    static derived instance_;\n};\n\nstruct lacking_copy_ctor : public empty<lacking_copy_ctor> {\n    lacking_copy_ctor() {}\n    lacking_copy_ctor(const lacking_copy_ctor& other) = delete;\n};\n\ntemplate <> lacking_copy_ctor empty<lacking_copy_ctor>::instance_ = {};\n\nstruct lacking_move_ctor : public empty<lacking_move_ctor> {\n    lacking_move_ctor() {}\n    lacking_move_ctor(const lacking_move_ctor& other) = delete;\n    lacking_move_ctor(lacking_move_ctor&& other) = delete;\n};\n\ntemplate <> lacking_move_ctor empty<lacking_move_ctor>::instance_ = {};\n\n/* Custom type caster move/copy test classes */\nclass MoveOnlyInt {\npublic:\n    MoveOnlyInt() { print_default_created(this); }\n    MoveOnlyInt(int v) : value{std::move(v)} { print_created(this, value); }\n    MoveOnlyInt(MoveOnlyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); }\n    MoveOnlyInt &operator=(MoveOnlyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; }\n    MoveOnlyInt(const MoveOnlyInt &) = delete;\n    MoveOnlyInt &operator=(const MoveOnlyInt &) = delete;\n    ~MoveOnlyInt() { print_destroyed(this); }\n\n    int value;\n};\nclass MoveOrCopyInt {\npublic:\n    MoveOrCopyInt() { print_default_created(this); }\n    MoveOrCopyInt(int v) : value{std::move(v)} { print_created(this, value); }\n    MoveOrCopyInt(MoveOrCopyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); }\n    MoveOrCopyInt &operator=(MoveOrCopyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; }\n    MoveOrCopyInt(const MoveOrCopyInt &c) { print_copy_created(this, c.value); value = c.value; }\n    MoveOrCopyInt &operator=(const MoveOrCopyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; }\n    ~MoveOrCopyInt() { print_destroyed(this); }\n\n    int value;\n};\nclass CopyOnlyInt {\npublic:\n    CopyOnlyInt() { print_default_created(this); }\n    CopyOnlyInt(int v) : value{std::move(v)} { print_created(this, value); }\n    CopyOnlyInt(const CopyOnlyInt &c) { print_copy_created(this, c.value); value = c.value; }\n    CopyOnlyInt &operator=(const CopyOnlyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; }\n    ~CopyOnlyInt() { print_destroyed(this); }\n\n    int value;\n};\nNAMESPACE_BEGIN(pybind11)\nNAMESPACE_BEGIN(detail)\ntemplate <> struct type_caster<MoveOnlyInt> {\n    PYBIND11_TYPE_CASTER(MoveOnlyInt, _(\"MoveOnlyInt\"));\n    bool load(handle src, bool) { value = MoveOnlyInt(src.cast<int>()); return true; }\n    static handle cast(const MoveOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }\n};\n\ntemplate <> struct type_caster<MoveOrCopyInt> {\n    PYBIND11_TYPE_CASTER(MoveOrCopyInt, _(\"MoveOrCopyInt\"));\n    bool load(handle src, bool) { value = MoveOrCopyInt(src.cast<int>()); return true; }\n    static handle cast(const MoveOrCopyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }\n};\n\ntemplate <> struct type_caster<CopyOnlyInt> {\nprotected:\n    CopyOnlyInt value;\npublic:\n    static constexpr auto name = _(\"CopyOnlyInt\");\n    bool load(handle src, bool) { value = CopyOnlyInt(src.cast<int>()); return true; }\n    static handle cast(const CopyOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }\n    static handle cast(const CopyOnlyInt *src, return_value_policy policy, handle parent) {\n        if (!src) return none().release();\n        return cast(*src, policy, parent);\n    }\n    operator CopyOnlyInt*() { return &value; }\n    operator CopyOnlyInt&() { return value; }\n    template <typename T> using cast_op_type = pybind11::detail::cast_op_type<T>;\n};\nNAMESPACE_END(detail)\nNAMESPACE_END(pybind11)\n\nTEST_SUBMODULE(copy_move_policies, m) {\n    // test_lacking_copy_ctor\n    py::class_<lacking_copy_ctor>(m, \"lacking_copy_ctor\")\n        .def_static(\"get_one\", &lacking_copy_ctor::get_one,\n                    py::return_value_policy::copy);\n    // test_lacking_move_ctor\n    py::class_<lacking_move_ctor>(m, \"lacking_move_ctor\")\n        .def_static(\"get_one\", &lacking_move_ctor::get_one,\n                    py::return_value_policy::move);\n\n    // test_move_and_copy_casts\n    m.def(\"move_and_copy_casts\", [](py::object o) {\n        int r = 0;\n        r += py::cast<MoveOrCopyInt>(o).value; /* moves */\n        r += py::cast<MoveOnlyInt>(o).value; /* moves */\n        r += py::cast<CopyOnlyInt>(o).value; /* copies */\n        MoveOrCopyInt m1(py::cast<MoveOrCopyInt>(o)); /* moves */\n        MoveOnlyInt m2(py::cast<MoveOnlyInt>(o)); /* moves */\n        CopyOnlyInt m3(py::cast<CopyOnlyInt>(o)); /* copies */\n        r += m1.value + m2.value + m3.value;\n\n        return r;\n    });\n\n    // test_move_and_copy_loads\n    m.def(\"move_only\", [](MoveOnlyInt m) { return m.value; });\n    m.def(\"move_or_copy\", [](MoveOrCopyInt m) { return m.value; });\n    m.def(\"copy_only\", [](CopyOnlyInt m) { return m.value; });\n    m.def(\"move_pair\", [](std::pair<MoveOnlyInt, MoveOrCopyInt> p) {\n        return p.first.value + p.second.value;\n    });\n    m.def(\"move_tuple\", [](std::tuple<MoveOnlyInt, MoveOrCopyInt, MoveOnlyInt> t) {\n        return std::get<0>(t).value + std::get<1>(t).value + std::get<2>(t).value;\n    });\n    m.def(\"copy_tuple\", [](std::tuple<CopyOnlyInt, CopyOnlyInt> t) {\n        return std::get<0>(t).value + std::get<1>(t).value;\n    });\n    m.def(\"move_copy_nested\", [](std::pair<MoveOnlyInt, std::pair<std::tuple<MoveOrCopyInt, CopyOnlyInt, std::tuple<MoveOnlyInt>>, MoveOrCopyInt>> x) {\n        return x.first.value + std::get<0>(x.second.first).value + std::get<1>(x.second.first).value +\n            std::get<0>(std::get<2>(x.second.first)).value + x.second.second.value;\n    });\n    m.def(\"move_and_copy_cstats\", []() {\n        ConstructorStats::gc();\n        // Reset counts to 0 so that previous tests don't affect later ones:\n        auto &mc = ConstructorStats::get<MoveOrCopyInt>();\n        mc.move_assignments = mc.move_constructions = mc.copy_assignments = mc.copy_constructions = 0;\n        auto &mo = ConstructorStats::get<MoveOnlyInt>();\n        mo.move_assignments = mo.move_constructions = mo.copy_assignments = mo.copy_constructions = 0;\n        auto &co = ConstructorStats::get<CopyOnlyInt>();\n        co.move_assignments = co.move_constructions = co.copy_assignments = co.copy_constructions = 0;\n        py::dict d;\n        d[\"MoveOrCopyInt\"] = py::cast(mc, py::return_value_policy::reference);\n        d[\"MoveOnlyInt\"] = py::cast(mo, py::return_value_policy::reference);\n        d[\"CopyOnlyInt\"] = py::cast(co, py::return_value_policy::reference);\n        return d;\n    });\n#ifdef PYBIND11_HAS_OPTIONAL\n    // test_move_and_copy_load_optional\n    m.attr(\"has_optional\") = true;\n    m.def(\"move_optional\", [](std::optional<MoveOnlyInt> o) {\n        return o->value;\n    });\n    m.def(\"move_or_copy_optional\", [](std::optional<MoveOrCopyInt> o) {\n        return o->value;\n    });\n    m.def(\"copy_optional\", [](std::optional<CopyOnlyInt> o) {\n        return o->value;\n    });\n    m.def(\"move_optional_tuple\", [](std::optional<std::tuple<MoveOrCopyInt, MoveOnlyInt, CopyOnlyInt>> x) {\n        return std::get<0>(*x).value + std::get<1>(*x).value + std::get<2>(*x).value;\n    });\n#else\n    m.attr(\"has_optional\") = false;\n#endif\n\n    // #70 compilation issue if operator new is not public\n    struct PrivateOpNew {\n        int value = 1;\n    private:\n#if defined(_MSC_VER)\n#  pragma warning(disable: 4822) // warning C4822: local class member function does not have a body\n#endif\n        void *operator new(size_t bytes);\n    };\n    py::class_<PrivateOpNew>(m, \"PrivateOpNew\").def_readonly(\"value\", &PrivateOpNew::value);\n    m.def(\"private_op_new_value\", []() { return PrivateOpNew(); });\n    m.def(\"private_op_new_reference\", []() -> const PrivateOpNew & {\n        static PrivateOpNew x{};\n        return x;\n    }, py::return_value_policy::reference);\n\n    // test_move_fallback\n    // #389: rvp::move should fall-through to copy on non-movable objects\n    struct MoveIssue1 {\n        int v;\n        MoveIssue1(int v) : v{v} {}\n        MoveIssue1(const MoveIssue1 &c) = default;\n        MoveIssue1(MoveIssue1 &&) = delete;\n    };\n    py::class_<MoveIssue1>(m, \"MoveIssue1\").def(py::init<int>()).def_readwrite(\"value\", &MoveIssue1::v);\n\n    struct MoveIssue2 {\n        int v;\n        MoveIssue2(int v) : v{v} {}\n        MoveIssue2(MoveIssue2 &&) = default;\n    };\n    py::class_<MoveIssue2>(m, \"MoveIssue2\").def(py::init<int>()).def_readwrite(\"value\", &MoveIssue2::v);\n\n    m.def(\"get_moveissue1\", [](int i) { return new MoveIssue1(i); }, py::return_value_policy::move);\n    m.def(\"get_moveissue2\", [](int i) { return MoveIssue2(i); }, py::return_value_policy::move);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_copy_move.py",
    "content": "import pytest\nfrom pybind11_tests import copy_move_policies as m\n\n\ndef test_lacking_copy_ctor():\n    with pytest.raises(RuntimeError) as excinfo:\n        m.lacking_copy_ctor.get_one()\n    assert \"the object is non-copyable!\" in str(excinfo.value)\n\n\ndef test_lacking_move_ctor():\n    with pytest.raises(RuntimeError) as excinfo:\n        m.lacking_move_ctor.get_one()\n    assert \"the object is neither movable nor copyable!\" in str(excinfo.value)\n\n\ndef test_move_and_copy_casts():\n    \"\"\"Cast some values in C++ via custom type casters and count the number of moves/copies.\"\"\"\n\n    cstats = m.move_and_copy_cstats()\n    c_m, c_mc, c_c = cstats[\"MoveOnlyInt\"], cstats[\"MoveOrCopyInt\"], cstats[\"CopyOnlyInt\"]\n\n    # The type move constructions/assignments below each get incremented: the move assignment comes\n    # from the type_caster load; the move construction happens when extracting that via a cast or\n    # loading into an argument.\n    assert m.move_and_copy_casts(3) == 18\n    assert c_m.copy_assignments + c_m.copy_constructions == 0\n    assert c_m.move_assignments == 2\n    assert c_m.move_constructions >= 2\n    assert c_mc.alive() == 0\n    assert c_mc.copy_assignments + c_mc.copy_constructions == 0\n    assert c_mc.move_assignments == 2\n    assert c_mc.move_constructions >= 2\n    assert c_c.alive() == 0\n    assert c_c.copy_assignments == 2\n    assert c_c.copy_constructions >= 2\n    assert c_m.alive() + c_mc.alive() + c_c.alive() == 0\n\n\ndef test_move_and_copy_loads():\n    \"\"\"Call some functions that load arguments via custom type casters and count the number of\n    moves/copies.\"\"\"\n\n    cstats = m.move_and_copy_cstats()\n    c_m, c_mc, c_c = cstats[\"MoveOnlyInt\"], cstats[\"MoveOrCopyInt\"], cstats[\"CopyOnlyInt\"]\n\n    assert m.move_only(10) == 10  # 1 move, c_m\n    assert m.move_or_copy(11) == 11  # 1 move, c_mc\n    assert m.copy_only(12) == 12  # 1 copy, c_c\n    assert m.move_pair((13, 14)) == 27  # 1 c_m move, 1 c_mc move\n    assert m.move_tuple((15, 16, 17)) == 48  # 2 c_m moves, 1 c_mc move\n    assert m.copy_tuple((18, 19)) == 37  # 2 c_c copies\n    # Direct constructions: 2 c_m moves, 2 c_mc moves, 1 c_c copy\n    # Extra moves/copies when moving pairs/tuples: 3 c_m, 3 c_mc, 2 c_c\n    assert m.move_copy_nested((1, ((2, 3, (4,)), 5))) == 15\n\n    assert c_m.copy_assignments + c_m.copy_constructions == 0\n    assert c_m.move_assignments == 6\n    assert c_m.move_constructions == 9\n    assert c_mc.copy_assignments + c_mc.copy_constructions == 0\n    assert c_mc.move_assignments == 5\n    assert c_mc.move_constructions == 8\n    assert c_c.copy_assignments == 4\n    assert c_c.copy_constructions == 6\n    assert c_m.alive() + c_mc.alive() + c_c.alive() == 0\n\n\n@pytest.mark.skipif(not m.has_optional, reason='no <optional>')\ndef test_move_and_copy_load_optional():\n    \"\"\"Tests move/copy loads of std::optional arguments\"\"\"\n\n    cstats = m.move_and_copy_cstats()\n    c_m, c_mc, c_c = cstats[\"MoveOnlyInt\"], cstats[\"MoveOrCopyInt\"], cstats[\"CopyOnlyInt\"]\n\n    # The extra move/copy constructions below come from the std::optional move (which has to move\n    # its arguments):\n    assert m.move_optional(10) == 10  # c_m: 1 move assign, 2 move construct\n    assert m.move_or_copy_optional(11) == 11  # c_mc: 1 move assign, 2 move construct\n    assert m.copy_optional(12) == 12  # c_c: 1 copy assign, 2 copy construct\n    # 1 move assign + move construct moves each of c_m, c_mc, 1 c_c copy\n    # +1 move/copy construct each from moving the tuple\n    # +1 move/copy construct each from moving the optional (which moves the tuple again)\n    assert m.move_optional_tuple((3, 4, 5)) == 12\n\n    assert c_m.copy_assignments + c_m.copy_constructions == 0\n    assert c_m.move_assignments == 2\n    assert c_m.move_constructions == 5\n    assert c_mc.copy_assignments + c_mc.copy_constructions == 0\n    assert c_mc.move_assignments == 2\n    assert c_mc.move_constructions == 5\n    assert c_c.copy_assignments == 2\n    assert c_c.copy_constructions == 5\n    assert c_m.alive() + c_mc.alive() + c_c.alive() == 0\n\n\ndef test_private_op_new():\n    \"\"\"An object with a private `operator new` cannot be returned by value\"\"\"\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.private_op_new_value()\n    assert \"the object is neither movable nor copyable\" in str(excinfo.value)\n\n    assert m.private_op_new_reference().value == 1\n\n\ndef test_move_fallback():\n    \"\"\"#389: rvp::move should fall-through to copy on non-movable objects\"\"\"\n\n    m2 = m.get_moveissue2(2)\n    assert m2.value == 2\n    m1 = m.get_moveissue1(1)\n    assert m1.value == 1\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_docstring_options.cpp",
    "content": "/*\n    tests/test_docstring_options.cpp -- generation of docstrings and signatures\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\nTEST_SUBMODULE(docstring_options, m) {\n    // test_docstring_options\n    {\n        py::options options;\n        options.disable_function_signatures();\n\n        m.def(\"test_function1\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"));\n        m.def(\"test_function2\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"), \"A custom docstring\");\n\n        m.def(\"test_overloaded1\", [](int) {}, py::arg(\"i\"), \"Overload docstring\");\n        m.def(\"test_overloaded1\", [](double) {}, py::arg(\"d\"));\n\n        m.def(\"test_overloaded2\", [](int) {}, py::arg(\"i\"), \"overload docstring 1\");\n        m.def(\"test_overloaded2\", [](double) {}, py::arg(\"d\"), \"overload docstring 2\");\n\n        m.def(\"test_overloaded3\", [](int) {}, py::arg(\"i\"));\n        m.def(\"test_overloaded3\", [](double) {}, py::arg(\"d\"), \"Overload docstr\");\n\n        options.enable_function_signatures();\n\n        m.def(\"test_function3\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"));\n        m.def(\"test_function4\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"), \"A custom docstring\");\n\n        options.disable_function_signatures().disable_user_defined_docstrings();\n\n        m.def(\"test_function5\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"), \"A custom docstring\");\n\n        {\n            py::options nested_options;\n            nested_options.enable_user_defined_docstrings();\n            m.def(\"test_function6\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"), \"A custom docstring\");\n        }\n    }\n\n    m.def(\"test_function7\", [](int, int) {}, py::arg(\"a\"), py::arg(\"b\"), \"A custom docstring\");\n\n    {\n        py::options options;\n        options.disable_user_defined_docstrings();\n\n        struct DocstringTestFoo {\n            int value;\n            void setValue(int v) { value = v; }\n            int getValue() const { return value; }\n        };\n        py::class_<DocstringTestFoo>(m, \"DocstringTestFoo\", \"This is a class docstring\")\n            .def_property(\"value_prop\", &DocstringTestFoo::getValue, &DocstringTestFoo::setValue, \"This is a property docstring\")\n        ;\n    }\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_docstring_options.py",
    "content": "from pybind11_tests import docstring_options as m\n\n\ndef test_docstring_options():\n    # options.disable_function_signatures()\n    assert not m.test_function1.__doc__\n\n    assert m.test_function2.__doc__ == \"A custom docstring\"\n\n    # docstring specified on just the first overload definition:\n    assert m.test_overloaded1.__doc__ == \"Overload docstring\"\n\n    # docstring on both overloads:\n    assert m.test_overloaded2.__doc__ == \"overload docstring 1\\noverload docstring 2\"\n\n    # docstring on only second overload:\n    assert m.test_overloaded3.__doc__ == \"Overload docstr\"\n\n    # options.enable_function_signatures()\n    assert m.test_function3.__doc__ .startswith(\"test_function3(a: int, b: int) -> None\")\n\n    assert m.test_function4.__doc__ .startswith(\"test_function4(a: int, b: int) -> None\")\n    assert m.test_function4.__doc__ .endswith(\"A custom docstring\\n\")\n\n    # options.disable_function_signatures()\n    # options.disable_user_defined_docstrings()\n    assert not m.test_function5.__doc__\n\n    # nested options.enable_user_defined_docstrings()\n    assert m.test_function6.__doc__ == \"A custom docstring\"\n\n    # RAII destructor\n    assert m.test_function7.__doc__ .startswith(\"test_function7(a: int, b: int) -> None\")\n    assert m.test_function7.__doc__ .endswith(\"A custom docstring\\n\")\n\n    # Suppression of user-defined docstrings for non-function objects\n    assert not m.DocstringTestFoo.__doc__\n    assert not m.DocstringTestFoo.value_prop.__doc__\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_eigen.cpp",
    "content": "/*\n    tests/eigen.cpp -- automatic conversion of Eigen types\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/eigen.h>\n#include <pybind11/stl.h>\n\n#if defined(_MSC_VER)\n#  pragma warning(disable: 4996) // C4996: std::unary_negation is deprecated\n#endif\n\n#include <Eigen/Cholesky>\n\nusing MatrixXdR = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;\n\n\n\n// Sets/resets a testing reference matrix to have values of 10*r + c, where r and c are the\n// (1-based) row/column number.\ntemplate <typename M> void reset_ref(M &x) {\n    for (int i = 0; i < x.rows(); i++) for (int j = 0; j < x.cols(); j++)\n        x(i, j) = 11 + 10*i + j;\n}\n\n// Returns a static, column-major matrix\nEigen::MatrixXd &get_cm() {\n    static Eigen::MatrixXd *x;\n    if (!x) {\n        x = new Eigen::MatrixXd(3, 3);\n        reset_ref(*x);\n    }\n    return *x;\n}\n// Likewise, but row-major\nMatrixXdR &get_rm() {\n    static MatrixXdR *x;\n    if (!x) {\n        x = new MatrixXdR(3, 3);\n        reset_ref(*x);\n    }\n    return *x;\n}\n// Resets the values of the static matrices returned by get_cm()/get_rm()\nvoid reset_refs() {\n    reset_ref(get_cm());\n    reset_ref(get_rm());\n}\n\n// Returns element 2,1 from a matrix (used to test copy/nocopy)\ndouble get_elem(Eigen::Ref<const Eigen::MatrixXd> m) { return m(2, 1); };\n\n\n// Returns a matrix with 10*r + 100*c added to each matrix element (to help test that the matrix\n// reference is referencing rows/columns correctly).\ntemplate <typename MatrixArgType> Eigen::MatrixXd adjust_matrix(MatrixArgType m) {\n    Eigen::MatrixXd ret(m);\n    for (int c = 0; c < m.cols(); c++) for (int r = 0; r < m.rows(); r++)\n        ret(r, c) += 10*r + 100*c;\n    return ret;\n}\n\nstruct CustomOperatorNew {\n    CustomOperatorNew() = default;\n\n    Eigen::Matrix4d a = Eigen::Matrix4d::Zero();\n    Eigen::Matrix4d b = Eigen::Matrix4d::Identity();\n\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;\n};\n\nTEST_SUBMODULE(eigen, m) {\n    using FixedMatrixR = Eigen::Matrix<float, 5, 6, Eigen::RowMajor>;\n    using FixedMatrixC = Eigen::Matrix<float, 5, 6>;\n    using DenseMatrixR = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;\n    using DenseMatrixC = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>;\n    using FourRowMatrixC = Eigen::Matrix<float, 4, Eigen::Dynamic>;\n    using FourColMatrixC = Eigen::Matrix<float, Eigen::Dynamic, 4>;\n    using FourRowMatrixR = Eigen::Matrix<float, 4, Eigen::Dynamic>;\n    using FourColMatrixR = Eigen::Matrix<float, Eigen::Dynamic, 4>;\n    using SparseMatrixR = Eigen::SparseMatrix<float, Eigen::RowMajor>;\n    using SparseMatrixC = Eigen::SparseMatrix<float>;\n\n    m.attr(\"have_eigen\") = true;\n\n    // various tests\n    m.def(\"double_col\", [](const Eigen::VectorXf &x) -> Eigen::VectorXf { return 2.0f * x; });\n    m.def(\"double_row\", [](const Eigen::RowVectorXf &x) -> Eigen::RowVectorXf { return 2.0f * x; });\n    m.def(\"double_complex\", [](const Eigen::VectorXcf &x) -> Eigen::VectorXcf { return 2.0f * x; });\n    m.def(\"double_threec\", [](py::EigenDRef<Eigen::Vector3f> x) { x *= 2; });\n    m.def(\"double_threer\", [](py::EigenDRef<Eigen::RowVector3f> x) { x *= 2; });\n    m.def(\"double_mat_cm\", [](Eigen::MatrixXf x) -> Eigen::MatrixXf { return 2.0f * x; });\n    m.def(\"double_mat_rm\", [](DenseMatrixR x) -> DenseMatrixR { return 2.0f * x; });\n\n    // test_eigen_ref_to_python\n    // Different ways of passing via Eigen::Ref; the first and second are the Eigen-recommended\n    m.def(\"cholesky1\", [](Eigen::Ref<MatrixXdR> x) -> Eigen::MatrixXd { return x.llt().matrixL(); });\n    m.def(\"cholesky2\", [](const Eigen::Ref<const MatrixXdR> &x) -> Eigen::MatrixXd { return x.llt().matrixL(); });\n    m.def(\"cholesky3\", [](const Eigen::Ref<MatrixXdR> &x) -> Eigen::MatrixXd { return x.llt().matrixL(); });\n    m.def(\"cholesky4\", [](Eigen::Ref<const MatrixXdR> x) -> Eigen::MatrixXd { return x.llt().matrixL(); });\n\n    // test_eigen_ref_mutators\n    // Mutators: these add some value to the given element using Eigen, but Eigen should be mapping into\n    // the numpy array data and so the result should show up there.  There are three versions: one that\n    // works on a contiguous-row matrix (numpy's default), one for a contiguous-column matrix, and one\n    // for any matrix.\n    auto add_rm = [](Eigen::Ref<MatrixXdR> x, int r, int c, double v) { x(r,c) += v; };\n    auto add_cm = [](Eigen::Ref<Eigen::MatrixXd> x, int r, int c, double v) { x(r,c) += v; };\n\n    // Mutators (Eigen maps into numpy variables):\n    m.def(\"add_rm\", add_rm); // Only takes row-contiguous\n    m.def(\"add_cm\", add_cm); // Only takes column-contiguous\n    // Overloaded versions that will accept either row or column contiguous:\n    m.def(\"add1\", add_rm);\n    m.def(\"add1\", add_cm);\n    m.def(\"add2\", add_cm);\n    m.def(\"add2\", add_rm);\n    // This one accepts a matrix of any stride:\n    m.def(\"add_any\", [](py::EigenDRef<Eigen::MatrixXd> x, int r, int c, double v) { x(r,c) += v; });\n\n    // Return mutable references (numpy maps into eigen variables)\n    m.def(\"get_cm_ref\", []() { return Eigen::Ref<Eigen::MatrixXd>(get_cm()); });\n    m.def(\"get_rm_ref\", []() { return Eigen::Ref<MatrixXdR>(get_rm()); });\n    // The same references, but non-mutable (numpy maps into eigen variables, but is !writeable)\n    m.def(\"get_cm_const_ref\", []() { return Eigen::Ref<const Eigen::MatrixXd>(get_cm()); });\n    m.def(\"get_rm_const_ref\", []() { return Eigen::Ref<const MatrixXdR>(get_rm()); });\n\n    m.def(\"reset_refs\", reset_refs); // Restores get_{cm,rm}_ref to original values\n\n    // Increments and returns ref to (same) matrix\n    m.def(\"incr_matrix\", [](Eigen::Ref<Eigen::MatrixXd> m, double v) {\n        m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v);\n        return m;\n    }, py::return_value_policy::reference);\n\n    // Same, but accepts a matrix of any strides\n    m.def(\"incr_matrix_any\", [](py::EigenDRef<Eigen::MatrixXd> m, double v) {\n        m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v);\n        return m;\n    }, py::return_value_policy::reference);\n\n    // Returns an eigen slice of even rows\n    m.def(\"even_rows\", [](py::EigenDRef<Eigen::MatrixXd> m) {\n        return py::EigenDMap<Eigen::MatrixXd>(\n                m.data(), (m.rows() + 1) / 2, m.cols(),\n                py::EigenDStride(m.outerStride(), 2 * m.innerStride()));\n    }, py::return_value_policy::reference);\n\n    // Returns an eigen slice of even columns\n    m.def(\"even_cols\", [](py::EigenDRef<Eigen::MatrixXd> m) {\n        return py::EigenDMap<Eigen::MatrixXd>(\n                m.data(), m.rows(), (m.cols() + 1) / 2,\n                py::EigenDStride(2 * m.outerStride(), m.innerStride()));\n    }, py::return_value_policy::reference);\n\n    // Returns diagonals: a vector-like object with an inner stride != 1\n    m.def(\"diagonal\", [](const Eigen::Ref<const Eigen::MatrixXd> &x) { return x.diagonal(); });\n    m.def(\"diagonal_1\", [](const Eigen::Ref<const Eigen::MatrixXd> &x) { return x.diagonal<1>(); });\n    m.def(\"diagonal_n\", [](const Eigen::Ref<const Eigen::MatrixXd> &x, int index) { return x.diagonal(index); });\n\n    // Return a block of a matrix (gives non-standard strides)\n    m.def(\"block\", [](const Eigen::Ref<const Eigen::MatrixXd> &x, int start_row, int start_col, int block_rows, int block_cols) {\n        return x.block(start_row, start_col, block_rows, block_cols);\n    });\n\n    // test_eigen_return_references, test_eigen_keepalive\n    // return value referencing/copying tests:\n    class ReturnTester {\n        Eigen::MatrixXd mat = create();\n    public:\n        ReturnTester() { print_created(this); }\n        ~ReturnTester() { print_destroyed(this); }\n        static Eigen::MatrixXd create() { return Eigen::MatrixXd::Ones(10, 10); }\n        static const Eigen::MatrixXd createConst() { return Eigen::MatrixXd::Ones(10, 10); }\n        Eigen::MatrixXd &get() { return mat; }\n        Eigen::MatrixXd *getPtr() { return &mat; }\n        const Eigen::MatrixXd &view() { return mat; }\n        const Eigen::MatrixXd *viewPtr() { return &mat; }\n        Eigen::Ref<Eigen::MatrixXd> ref() { return mat; }\n        Eigen::Ref<const Eigen::MatrixXd> refConst() { return mat; }\n        Eigen::Block<Eigen::MatrixXd> block(int r, int c, int nrow, int ncol) { return mat.block(r, c, nrow, ncol); }\n        Eigen::Block<const Eigen::MatrixXd> blockConst(int r, int c, int nrow, int ncol) const { return mat.block(r, c, nrow, ncol); }\n        py::EigenDMap<Eigen::Matrix2d> corners() { return py::EigenDMap<Eigen::Matrix2d>(mat.data(),\n                    py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); }\n        py::EigenDMap<const Eigen::Matrix2d> cornersConst() const { return py::EigenDMap<const Eigen::Matrix2d>(mat.data(),\n                    py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); }\n    };\n    using rvp = py::return_value_policy;\n    py::class_<ReturnTester>(m, \"ReturnTester\")\n        .def(py::init<>())\n        .def_static(\"create\", &ReturnTester::create)\n        .def_static(\"create_const\", &ReturnTester::createConst)\n        .def(\"get\", &ReturnTester::get, rvp::reference_internal)\n        .def(\"get_ptr\", &ReturnTester::getPtr, rvp::reference_internal)\n        .def(\"view\", &ReturnTester::view, rvp::reference_internal)\n        .def(\"view_ptr\", &ReturnTester::view, rvp::reference_internal)\n        .def(\"copy_get\", &ReturnTester::get)   // Default rvp: copy\n        .def(\"copy_view\", &ReturnTester::view) //         \"\n        .def(\"ref\", &ReturnTester::ref) // Default for Ref is to reference\n        .def(\"ref_const\", &ReturnTester::refConst) // Likewise, but const\n        .def(\"ref_safe\", &ReturnTester::ref, rvp::reference_internal)\n        .def(\"ref_const_safe\", &ReturnTester::refConst, rvp::reference_internal)\n        .def(\"copy_ref\", &ReturnTester::ref, rvp::copy)\n        .def(\"copy_ref_const\", &ReturnTester::refConst, rvp::copy)\n        .def(\"block\", &ReturnTester::block)\n        .def(\"block_safe\", &ReturnTester::block, rvp::reference_internal)\n        .def(\"block_const\", &ReturnTester::blockConst, rvp::reference_internal)\n        .def(\"copy_block\", &ReturnTester::block, rvp::copy)\n        .def(\"corners\", &ReturnTester::corners, rvp::reference_internal)\n        .def(\"corners_const\", &ReturnTester::cornersConst, rvp::reference_internal)\n        ;\n\n    // test_special_matrix_objects\n    // Returns a DiagonalMatrix with diagonal (1,2,3,...)\n    m.def(\"incr_diag\", [](int k) {\n        Eigen::DiagonalMatrix<int, Eigen::Dynamic> m(k);\n        for (int i = 0; i < k; i++) m.diagonal()[i] = i+1;\n        return m;\n    });\n\n    // Returns a SelfAdjointView referencing the lower triangle of m\n    m.def(\"symmetric_lower\", [](const Eigen::MatrixXi &m) {\n            return m.selfadjointView<Eigen::Lower>();\n    });\n    // Returns a SelfAdjointView referencing the lower triangle of m\n    m.def(\"symmetric_upper\", [](const Eigen::MatrixXi &m) {\n            return m.selfadjointView<Eigen::Upper>();\n    });\n\n    // Test matrix for various functions below.\n    Eigen::MatrixXf mat(5, 6);\n    mat << 0,  3,  0,  0,  0, 11,\n           22, 0,  0,  0, 17, 11,\n           7,  5,  0,  1,  0, 11,\n           0,  0,  0,  0,  0, 11,\n           0,  0, 14,  0,  8, 11;\n\n    // test_fixed, and various other tests\n    m.def(\"fixed_r\", [mat]() -> FixedMatrixR { return FixedMatrixR(mat); });\n    m.def(\"fixed_r_const\", [mat]() -> const FixedMatrixR { return FixedMatrixR(mat); });\n    m.def(\"fixed_c\", [mat]() -> FixedMatrixC { return FixedMatrixC(mat); });\n    m.def(\"fixed_copy_r\", [](const FixedMatrixR &m) -> FixedMatrixR { return m; });\n    m.def(\"fixed_copy_c\", [](const FixedMatrixC &m) -> FixedMatrixC { return m; });\n    // test_mutator_descriptors\n    m.def(\"fixed_mutator_r\", [](Eigen::Ref<FixedMatrixR>) {});\n    m.def(\"fixed_mutator_c\", [](Eigen::Ref<FixedMatrixC>) {});\n    m.def(\"fixed_mutator_a\", [](py::EigenDRef<FixedMatrixC>) {});\n    // test_dense\n    m.def(\"dense_r\", [mat]() -> DenseMatrixR { return DenseMatrixR(mat); });\n    m.def(\"dense_c\", [mat]() -> DenseMatrixC { return DenseMatrixC(mat); });\n    m.def(\"dense_copy_r\", [](const DenseMatrixR &m) -> DenseMatrixR { return m; });\n    m.def(\"dense_copy_c\", [](const DenseMatrixC &m) -> DenseMatrixC { return m; });\n    // test_sparse, test_sparse_signature\n    m.def(\"sparse_r\", [mat]() -> SparseMatrixR { return Eigen::SparseView<Eigen::MatrixXf>(mat); });\n    m.def(\"sparse_c\", [mat]() -> SparseMatrixC { return Eigen::SparseView<Eigen::MatrixXf>(mat); });\n    m.def(\"sparse_copy_r\", [](const SparseMatrixR &m) -> SparseMatrixR { return m; });\n    m.def(\"sparse_copy_c\", [](const SparseMatrixC &m) -> SparseMatrixC { return m; });\n    // test_partially_fixed\n    m.def(\"partial_copy_four_rm_r\", [](const FourRowMatrixR &m) -> FourRowMatrixR { return m; });\n    m.def(\"partial_copy_four_rm_c\", [](const FourColMatrixR &m) -> FourColMatrixR { return m; });\n    m.def(\"partial_copy_four_cm_r\", [](const FourRowMatrixC &m) -> FourRowMatrixC { return m; });\n    m.def(\"partial_copy_four_cm_c\", [](const FourColMatrixC &m) -> FourColMatrixC { return m; });\n\n    // test_cpp_casting\n    // Test that we can cast a numpy object to a Eigen::MatrixXd explicitly\n    m.def(\"cpp_copy\", [](py::handle m) { return m.cast<Eigen::MatrixXd>()(1, 0); });\n    m.def(\"cpp_ref_c\", [](py::handle m) { return m.cast<Eigen::Ref<Eigen::MatrixXd>>()(1, 0); });\n    m.def(\"cpp_ref_r\", [](py::handle m) { return m.cast<Eigen::Ref<MatrixXdR>>()(1, 0); });\n    m.def(\"cpp_ref_any\", [](py::handle m) { return m.cast<py::EigenDRef<Eigen::MatrixXd>>()(1, 0); });\n\n\n    // test_nocopy_wrapper\n    // Test that we can prevent copying into an argument that would normally copy: First a version\n    // that would allow copying (if types or strides don't match) for comparison:\n    m.def(\"get_elem\", &get_elem);\n    // Now this alternative that calls the tells pybind to fail rather than copy:\n    m.def(\"get_elem_nocopy\", [](Eigen::Ref<const Eigen::MatrixXd> m) -> double { return get_elem(m); },\n            py::arg().noconvert());\n    // Also test a row-major-only no-copy const ref:\n    m.def(\"get_elem_rm_nocopy\", [](Eigen::Ref<const Eigen::Matrix<long, -1, -1, Eigen::RowMajor>> &m) -> long { return m(2, 1); },\n            py::arg().noconvert());\n\n    // test_issue738\n    // Issue #738: 1xN or Nx1 2D matrices were neither accepted nor properly copied with an\n    // incompatible stride value on the length-1 dimension--but that should be allowed (without\n    // requiring a copy!) because the stride value can be safely ignored on a size-1 dimension.\n    m.def(\"iss738_f1\", &adjust_matrix<const Eigen::Ref<const Eigen::MatrixXd> &>, py::arg().noconvert());\n    m.def(\"iss738_f2\", &adjust_matrix<const Eigen::Ref<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>> &>, py::arg().noconvert());\n\n    // test_issue1105\n    // Issue #1105: when converting from a numpy two-dimensional (Nx1) or (1xN) value into a dense\n    // eigen Vector or RowVector, the argument would fail to load because the numpy copy would fail:\n    // numpy won't broadcast a Nx1 into a 1-dimensional vector.\n    m.def(\"iss1105_col\", [](Eigen::VectorXd) { return true; });\n    m.def(\"iss1105_row\", [](Eigen::RowVectorXd) { return true; });\n\n    // test_named_arguments\n    // Make sure named arguments are working properly:\n    m.def(\"matrix_multiply\", [](const py::EigenDRef<const Eigen::MatrixXd> A, const py::EigenDRef<const Eigen::MatrixXd> B)\n            -> Eigen::MatrixXd {\n        if (A.cols() != B.rows()) throw std::domain_error(\"Nonconformable matrices!\");\n        return A * B;\n    }, py::arg(\"A\"), py::arg(\"B\"));\n\n    // test_custom_operator_new\n    py::class_<CustomOperatorNew>(m, \"CustomOperatorNew\")\n        .def(py::init<>())\n        .def_readonly(\"a\", &CustomOperatorNew::a)\n        .def_readonly(\"b\", &CustomOperatorNew::b);\n\n    // test_eigen_ref_life_support\n    // In case of a failure (the caster's temp array does not live long enough), creating\n    // a new array (np.ones(10)) increases the chances that the temp array will be garbage\n    // collected and/or that its memory will be overridden with different values.\n    m.def(\"get_elem_direct\", [](Eigen::Ref<const Eigen::VectorXd> v) {\n        py::module::import(\"numpy\").attr(\"ones\")(10);\n        return v(5);\n    });\n    m.def(\"get_elem_indirect\", [](std::vector<Eigen::Ref<const Eigen::VectorXd>> v) {\n        py::module::import(\"numpy\").attr(\"ones\")(10);\n        return v[0](5);\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_eigen.py",
    "content": "import pytest\nfrom pybind11_tests import ConstructorStats\n\npytestmark = pytest.requires_eigen_and_numpy\n\nwith pytest.suppress(ImportError):\n    from pybind11_tests import eigen as m\n    import numpy as np\n\n    ref = np.array([[ 0.,  3,  0,  0,  0, 11],\n                    [22,  0,  0,  0, 17, 11],\n                    [ 7,  5,  0,  1,  0, 11],\n                    [ 0,  0,  0,  0,  0, 11],\n                    [ 0,  0, 14,  0,  8, 11]])\n\n\ndef assert_equal_ref(mat):\n    np.testing.assert_array_equal(mat, ref)\n\n\ndef assert_sparse_equal_ref(sparse_mat):\n    assert_equal_ref(sparse_mat.toarray())\n\n\ndef test_fixed():\n    assert_equal_ref(m.fixed_c())\n    assert_equal_ref(m.fixed_r())\n    assert_equal_ref(m.fixed_copy_r(m.fixed_r()))\n    assert_equal_ref(m.fixed_copy_c(m.fixed_c()))\n    assert_equal_ref(m.fixed_copy_r(m.fixed_c()))\n    assert_equal_ref(m.fixed_copy_c(m.fixed_r()))\n\n\ndef test_dense():\n    assert_equal_ref(m.dense_r())\n    assert_equal_ref(m.dense_c())\n    assert_equal_ref(m.dense_copy_r(m.dense_r()))\n    assert_equal_ref(m.dense_copy_c(m.dense_c()))\n    assert_equal_ref(m.dense_copy_r(m.dense_c()))\n    assert_equal_ref(m.dense_copy_c(m.dense_r()))\n\n\ndef test_partially_fixed():\n    ref2 = np.array([[0., 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])\n    np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2), ref2)\n    np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2), ref2)\n    np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, 1]), ref2[:, [1]])\n    np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2[0, :]), ref2[[0], :])\n    np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])\n    np.testing.assert_array_equal(\n        m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])\n\n    np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2), ref2)\n    np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2), ref2)\n    np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, 1]), ref2[:, [1]])\n    np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2[0, :]), ref2[[0], :])\n    np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])\n    np.testing.assert_array_equal(\n        m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])\n\n    # TypeError should be raise for a shape mismatch\n    functions = [m.partial_copy_four_rm_r, m.partial_copy_four_rm_c,\n                 m.partial_copy_four_cm_r, m.partial_copy_four_cm_c]\n    matrix_with_wrong_shape = [[1, 2],\n                               [3, 4]]\n    for f in functions:\n        with pytest.raises(TypeError) as excinfo:\n            f(matrix_with_wrong_shape)\n        assert \"incompatible function arguments\" in str(excinfo.value)\n\n\ndef test_mutator_descriptors():\n    zr = np.arange(30, dtype='float32').reshape(5, 6)  # row-major\n    zc = zr.reshape(6, 5).transpose()  # column-major\n\n    m.fixed_mutator_r(zr)\n    m.fixed_mutator_c(zc)\n    m.fixed_mutator_a(zr)\n    m.fixed_mutator_a(zc)\n    with pytest.raises(TypeError) as excinfo:\n        m.fixed_mutator_r(zc)\n    assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.c_contiguous]) -> None'\n            in str(excinfo.value))\n    with pytest.raises(TypeError) as excinfo:\n        m.fixed_mutator_c(zr)\n    assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.f_contiguous]) -> None'\n            in str(excinfo.value))\n    with pytest.raises(TypeError) as excinfo:\n        m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype='float32'))\n    assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable]) -> None'\n            in str(excinfo.value))\n    zr.flags.writeable = False\n    with pytest.raises(TypeError):\n        m.fixed_mutator_r(zr)\n    with pytest.raises(TypeError):\n        m.fixed_mutator_a(zr)\n\n\ndef test_cpp_casting():\n    assert m.cpp_copy(m.fixed_r()) == 22.\n    assert m.cpp_copy(m.fixed_c()) == 22.\n    z = np.array([[5., 6], [7, 8]])\n    assert m.cpp_copy(z) == 7.\n    assert m.cpp_copy(m.get_cm_ref()) == 21.\n    assert m.cpp_copy(m.get_rm_ref()) == 21.\n    assert m.cpp_ref_c(m.get_cm_ref()) == 21.\n    assert m.cpp_ref_r(m.get_rm_ref()) == 21.\n    with pytest.raises(RuntimeError) as excinfo:\n        # Can't reference m.fixed_c: it contains floats, m.cpp_ref_any wants doubles\n        m.cpp_ref_any(m.fixed_c())\n    assert 'Unable to cast Python instance' in str(excinfo.value)\n    with pytest.raises(RuntimeError) as excinfo:\n        # Can't reference m.fixed_r: it contains floats, m.cpp_ref_any wants doubles\n        m.cpp_ref_any(m.fixed_r())\n    assert 'Unable to cast Python instance' in str(excinfo.value)\n    assert m.cpp_ref_any(m.ReturnTester.create()) == 1.\n\n    assert m.cpp_ref_any(m.get_cm_ref()) == 21.\n    assert m.cpp_ref_any(m.get_cm_ref()) == 21.\n\n\ndef test_pass_readonly_array():\n    z = np.full((5, 6), 42.0)\n    z.flags.writeable = False\n    np.testing.assert_array_equal(z, m.fixed_copy_r(z))\n    np.testing.assert_array_equal(m.fixed_r_const(), m.fixed_r())\n    assert not m.fixed_r_const().flags.writeable\n    np.testing.assert_array_equal(m.fixed_copy_r(m.fixed_r_const()), m.fixed_r_const())\n\n\ndef test_nonunit_stride_from_python():\n    counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))\n    second_row = counting_mat[1, :]\n    second_col = counting_mat[:, 1]\n    np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)\n    np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)\n    np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)\n\n    counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))\n    slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]\n    for slice_idx, ref_mat in enumerate(slices):\n        np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)\n        np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)\n\n    # Mutator:\n    m.double_threer(second_row)\n    m.double_threec(second_col)\n    np.testing.assert_array_equal(counting_mat, [[0., 2, 2], [6, 16, 10], [6, 14, 8]])\n\n\ndef test_negative_stride_from_python(msg):\n    \"\"\"Eigen doesn't support (as of yet) negative strides. When a function takes an Eigen matrix by\n    copy or const reference, we can pass a numpy array that has negative strides.  Otherwise, an\n    exception will be thrown as Eigen will not be able to map the numpy array.\"\"\"\n\n    counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))\n    counting_mat = counting_mat[::-1, ::-1]\n    second_row = counting_mat[1, :]\n    second_col = counting_mat[:, 1]\n    np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)\n    np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)\n    np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)\n    np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)\n\n    counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))\n    counting_3d = counting_3d[::-1, ::-1, ::-1]\n    slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]\n    for slice_idx, ref_mat in enumerate(slices):\n        np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)\n        np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)\n\n    # Mutator:\n    with pytest.raises(TypeError) as excinfo:\n        m.double_threer(second_row)\n    assert msg(excinfo.value) == \"\"\"\n        double_threer(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: numpy.ndarray[float32[1, 3], flags.writeable]) -> None\n\n        Invoked with: \"\"\" + repr(np.array([ 5.,  4.,  3.], dtype='float32'))  # noqa: E501 line too long\n\n    with pytest.raises(TypeError) as excinfo:\n        m.double_threec(second_col)\n    assert msg(excinfo.value) == \"\"\"\n        double_threec(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: numpy.ndarray[float32[3, 1], flags.writeable]) -> None\n\n        Invoked with: \"\"\" + repr(np.array([ 7.,  4.,  1.], dtype='float32'))  # noqa: E501 line too long\n\n\ndef test_nonunit_stride_to_python():\n    assert np.all(m.diagonal(ref) == ref.diagonal())\n    assert np.all(m.diagonal_1(ref) == ref.diagonal(1))\n    for i in range(-5, 7):\n        assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), \"m.diagonal_n({})\".format(i)\n\n    assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])\n    assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:])\n    assert np.all(m.block(ref, 1, 4, 3, 2) == ref[1:4, 4:])\n\n\ndef test_eigen_ref_to_python():\n    chols = [m.cholesky1, m.cholesky2, m.cholesky3, m.cholesky4]\n    for i, chol in enumerate(chols, start=1):\n        mymat = chol(np.array([[1., 2, 4], [2, 13, 23], [4, 23, 77]]))\n        assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), \"cholesky{}\".format(i)\n\n\ndef assign_both(a1, a2, r, c, v):\n    a1[r, c] = v\n    a2[r, c] = v\n\n\ndef array_copy_but_one(a, r, c, v):\n    z = np.array(a, copy=True)\n    z[r, c] = v\n    return z\n\n\ndef test_eigen_return_references():\n    \"\"\"Tests various ways of returning references and non-referencing copies\"\"\"\n\n    master = np.ones((10, 10))\n    a = m.ReturnTester()\n    a_get1 = a.get()\n    assert not a_get1.flags.owndata and a_get1.flags.writeable\n    assign_both(a_get1, master, 3, 3, 5)\n    a_get2 = a.get_ptr()\n    assert not a_get2.flags.owndata and a_get2.flags.writeable\n    assign_both(a_get1, master, 2, 3, 6)\n\n    a_view1 = a.view()\n    assert not a_view1.flags.owndata and not a_view1.flags.writeable\n    with pytest.raises(ValueError):\n        a_view1[2, 3] = 4\n    a_view2 = a.view_ptr()\n    assert not a_view2.flags.owndata and not a_view2.flags.writeable\n    with pytest.raises(ValueError):\n        a_view2[2, 3] = 4\n\n    a_copy1 = a.copy_get()\n    assert a_copy1.flags.owndata and a_copy1.flags.writeable\n    np.testing.assert_array_equal(a_copy1, master)\n    a_copy1[7, 7] = -44  # Shouldn't affect anything else\n    c1want = array_copy_but_one(master, 7, 7, -44)\n    a_copy2 = a.copy_view()\n    assert a_copy2.flags.owndata and a_copy2.flags.writeable\n    np.testing.assert_array_equal(a_copy2, master)\n    a_copy2[4, 4] = -22  # Shouldn't affect anything else\n    c2want = array_copy_but_one(master, 4, 4, -22)\n\n    a_ref1 = a.ref()\n    assert not a_ref1.flags.owndata and a_ref1.flags.writeable\n    assign_both(a_ref1, master, 1, 1, 15)\n    a_ref2 = a.ref_const()\n    assert not a_ref2.flags.owndata and not a_ref2.flags.writeable\n    with pytest.raises(ValueError):\n        a_ref2[5, 5] = 33\n    a_ref3 = a.ref_safe()\n    assert not a_ref3.flags.owndata and a_ref3.flags.writeable\n    assign_both(a_ref3, master, 0, 7, 99)\n    a_ref4 = a.ref_const_safe()\n    assert not a_ref4.flags.owndata and not a_ref4.flags.writeable\n    with pytest.raises(ValueError):\n        a_ref4[7, 0] = 987654321\n\n    a_copy3 = a.copy_ref()\n    assert a_copy3.flags.owndata and a_copy3.flags.writeable\n    np.testing.assert_array_equal(a_copy3, master)\n    a_copy3[8, 1] = 11\n    c3want = array_copy_but_one(master, 8, 1, 11)\n    a_copy4 = a.copy_ref_const()\n    assert a_copy4.flags.owndata and a_copy4.flags.writeable\n    np.testing.assert_array_equal(a_copy4, master)\n    a_copy4[8, 4] = 88\n    c4want = array_copy_but_one(master, 8, 4, 88)\n\n    a_block1 = a.block(3, 3, 2, 2)\n    assert not a_block1.flags.owndata and a_block1.flags.writeable\n    a_block1[0, 0] = 55\n    master[3, 3] = 55\n    a_block2 = a.block_safe(2, 2, 3, 2)\n    assert not a_block2.flags.owndata and a_block2.flags.writeable\n    a_block2[2, 1] = -123\n    master[4, 3] = -123\n    a_block3 = a.block_const(6, 7, 4, 3)\n    assert not a_block3.flags.owndata and not a_block3.flags.writeable\n    with pytest.raises(ValueError):\n        a_block3[2, 2] = -44444\n\n    a_copy5 = a.copy_block(2, 2, 2, 3)\n    assert a_copy5.flags.owndata and a_copy5.flags.writeable\n    np.testing.assert_array_equal(a_copy5, master[2:4, 2:5])\n    a_copy5[1, 1] = 777\n    c5want = array_copy_but_one(master[2:4, 2:5], 1, 1, 777)\n\n    a_corn1 = a.corners()\n    assert not a_corn1.flags.owndata and a_corn1.flags.writeable\n    a_corn1 *= 50\n    a_corn1[1, 1] = 999\n    master[0, 0] = 50\n    master[0, 9] = 50\n    master[9, 0] = 50\n    master[9, 9] = 999\n    a_corn2 = a.corners_const()\n    assert not a_corn2.flags.owndata and not a_corn2.flags.writeable\n    with pytest.raises(ValueError):\n        a_corn2[1, 0] = 51\n\n    # All of the changes made all the way along should be visible everywhere\n    # now (except for the copies, of course)\n    np.testing.assert_array_equal(a_get1, master)\n    np.testing.assert_array_equal(a_get2, master)\n    np.testing.assert_array_equal(a_view1, master)\n    np.testing.assert_array_equal(a_view2, master)\n    np.testing.assert_array_equal(a_ref1, master)\n    np.testing.assert_array_equal(a_ref2, master)\n    np.testing.assert_array_equal(a_ref3, master)\n    np.testing.assert_array_equal(a_ref4, master)\n    np.testing.assert_array_equal(a_block1, master[3:5, 3:5])\n    np.testing.assert_array_equal(a_block2, master[2:5, 2:4])\n    np.testing.assert_array_equal(a_block3, master[6:10, 7:10])\n    np.testing.assert_array_equal(a_corn1, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])\n    np.testing.assert_array_equal(a_corn2, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])\n\n    np.testing.assert_array_equal(a_copy1, c1want)\n    np.testing.assert_array_equal(a_copy2, c2want)\n    np.testing.assert_array_equal(a_copy3, c3want)\n    np.testing.assert_array_equal(a_copy4, c4want)\n    np.testing.assert_array_equal(a_copy5, c5want)\n\n\ndef assert_keeps_alive(cl, method, *args):\n    cstats = ConstructorStats.get(cl)\n    start_with = cstats.alive()\n    a = cl()\n    assert cstats.alive() == start_with + 1\n    z = method(a, *args)\n    assert cstats.alive() == start_with + 1\n    del a\n    # Here's the keep alive in action:\n    assert cstats.alive() == start_with + 1\n    del z\n    # Keep alive should have expired:\n    assert cstats.alive() == start_with\n\n\ndef test_eigen_keepalive():\n    a = m.ReturnTester()\n    cstats = ConstructorStats.get(m.ReturnTester)\n    assert cstats.alive() == 1\n    unsafe = [a.ref(), a.ref_const(), a.block(1, 2, 3, 4)]\n    copies = [a.copy_get(), a.copy_view(), a.copy_ref(), a.copy_ref_const(),\n              a.copy_block(4, 3, 2, 1)]\n    del a\n    assert cstats.alive() == 0\n    del unsafe\n    del copies\n\n    for meth in [m.ReturnTester.get, m.ReturnTester.get_ptr, m.ReturnTester.view,\n                 m.ReturnTester.view_ptr, m.ReturnTester.ref_safe, m.ReturnTester.ref_const_safe,\n                 m.ReturnTester.corners, m.ReturnTester.corners_const]:\n        assert_keeps_alive(m.ReturnTester, meth)\n\n    for meth in [m.ReturnTester.block_safe, m.ReturnTester.block_const]:\n        assert_keeps_alive(m.ReturnTester, meth, 4, 3, 2, 1)\n\n\ndef test_eigen_ref_mutators():\n    \"\"\"Tests Eigen's ability to mutate numpy values\"\"\"\n\n    orig = np.array([[1., 2, 3], [4, 5, 6], [7, 8, 9]])\n    zr = np.array(orig)\n    zc = np.array(orig, order='F')\n    m.add_rm(zr, 1, 0, 100)\n    assert np.all(zr == np.array([[1., 2, 3], [104, 5, 6], [7, 8, 9]]))\n    m.add_cm(zc, 1, 0, 200)\n    assert np.all(zc == np.array([[1., 2, 3], [204, 5, 6], [7, 8, 9]]))\n\n    m.add_any(zr, 1, 0, 20)\n    assert np.all(zr == np.array([[1., 2, 3], [124, 5, 6], [7, 8, 9]]))\n    m.add_any(zc, 1, 0, 10)\n    assert np.all(zc == np.array([[1., 2, 3], [214, 5, 6], [7, 8, 9]]))\n\n    # Can't reference a col-major array with a row-major Ref, and vice versa:\n    with pytest.raises(TypeError):\n        m.add_rm(zc, 1, 0, 1)\n    with pytest.raises(TypeError):\n        m.add_cm(zr, 1, 0, 1)\n\n    # Overloads:\n    m.add1(zr, 1, 0, -100)\n    m.add2(zr, 1, 0, -20)\n    assert np.all(zr == orig)\n    m.add1(zc, 1, 0, -200)\n    m.add2(zc, 1, 0, -10)\n    assert np.all(zc == orig)\n\n    # a non-contiguous slice (this won't work on either the row- or\n    # column-contiguous refs, but should work for the any)\n    cornersr = zr[0::2, 0::2]\n    cornersc = zc[0::2, 0::2]\n\n    assert np.all(cornersr == np.array([[1., 3], [7, 9]]))\n    assert np.all(cornersc == np.array([[1., 3], [7, 9]]))\n\n    with pytest.raises(TypeError):\n        m.add_rm(cornersr, 0, 1, 25)\n    with pytest.raises(TypeError):\n        m.add_cm(cornersr, 0, 1, 25)\n    with pytest.raises(TypeError):\n        m.add_rm(cornersc, 0, 1, 25)\n    with pytest.raises(TypeError):\n        m.add_cm(cornersc, 0, 1, 25)\n    m.add_any(cornersr, 0, 1, 25)\n    m.add_any(cornersc, 0, 1, 44)\n    assert np.all(zr == np.array([[1., 2, 28], [4, 5, 6], [7, 8, 9]]))\n    assert np.all(zc == np.array([[1., 2, 47], [4, 5, 6], [7, 8, 9]]))\n\n    # You shouldn't be allowed to pass a non-writeable array to a mutating Eigen method:\n    zro = zr[0:4, 0:4]\n    zro.flags.writeable = False\n    with pytest.raises(TypeError):\n        m.add_rm(zro, 0, 0, 0)\n    with pytest.raises(TypeError):\n        m.add_any(zro, 0, 0, 0)\n    with pytest.raises(TypeError):\n        m.add1(zro, 0, 0, 0)\n    with pytest.raises(TypeError):\n        m.add2(zro, 0, 0, 0)\n\n    # integer array shouldn't be passable to a double-matrix-accepting mutating func:\n    zi = np.array([[1, 2], [3, 4]])\n    with pytest.raises(TypeError):\n        m.add_rm(zi)\n\n\ndef test_numpy_ref_mutators():\n    \"\"\"Tests numpy mutating Eigen matrices (for returned Eigen::Ref<...>s)\"\"\"\n\n    m.reset_refs()  # In case another test already changed it\n\n    zc = m.get_cm_ref()\n    zcro = m.get_cm_const_ref()\n    zr = m.get_rm_ref()\n    zrro = m.get_rm_const_ref()\n\n    assert [zc[1, 2], zcro[1, 2], zr[1, 2], zrro[1, 2]] == [23] * 4\n\n    assert not zc.flags.owndata and zc.flags.writeable\n    assert not zr.flags.owndata and zr.flags.writeable\n    assert not zcro.flags.owndata and not zcro.flags.writeable\n    assert not zrro.flags.owndata and not zrro.flags.writeable\n\n    zc[1, 2] = 99\n    expect = np.array([[11., 12, 13], [21, 22, 99], [31, 32, 33]])\n    # We should have just changed zc, of course, but also zcro and the original eigen matrix\n    assert np.all(zc == expect)\n    assert np.all(zcro == expect)\n    assert np.all(m.get_cm_ref() == expect)\n\n    zr[1, 2] = 99\n    assert np.all(zr == expect)\n    assert np.all(zrro == expect)\n    assert np.all(m.get_rm_ref() == expect)\n\n    # Make sure the readonly ones are numpy-readonly:\n    with pytest.raises(ValueError):\n        zcro[1, 2] = 6\n    with pytest.raises(ValueError):\n        zrro[1, 2] = 6\n\n    # We should be able to explicitly copy like this (and since we're copying,\n    # the const should drop away)\n    y1 = np.array(m.get_cm_const_ref())\n\n    assert y1.flags.owndata and y1.flags.writeable\n    # We should get copies of the eigen data, which was modified above:\n    assert y1[1, 2] == 99\n    y1[1, 2] += 12\n    assert y1[1, 2] == 111\n    assert zc[1, 2] == 99  # Make sure we aren't referencing the original\n\n\ndef test_both_ref_mutators():\n    \"\"\"Tests a complex chain of nested eigen/numpy references\"\"\"\n\n    m.reset_refs()  # In case another test already changed it\n\n    z = m.get_cm_ref()  # numpy -> eigen\n    z[0, 2] -= 3\n    z2 = m.incr_matrix(z, 1)  # numpy -> eigen -> numpy -> eigen\n    z2[1, 1] += 6\n    z3 = m.incr_matrix(z, 2)  # (numpy -> eigen)^3\n    z3[2, 2] += -5\n    z4 = m.incr_matrix(z, 3)  # (numpy -> eigen)^4\n    z4[1, 1] -= 1\n    z5 = m.incr_matrix(z, 4)  # (numpy -> eigen)^5\n    z5[0, 0] = 0\n    assert np.all(z == z2)\n    assert np.all(z == z3)\n    assert np.all(z == z4)\n    assert np.all(z == z5)\n    expect = np.array([[0., 22, 20], [31, 37, 33], [41, 42, 38]])\n    assert np.all(z == expect)\n\n    y = np.array(range(100), dtype='float64').reshape(10, 10)\n    y2 = m.incr_matrix_any(y, 10)  # np -> eigen -> np\n    y3 = m.incr_matrix_any(y2[0::2, 0::2], -33)  # np -> eigen -> np slice -> np -> eigen -> np\n    y4 = m.even_rows(y3)  # numpy -> eigen slice -> (... y3)\n    y5 = m.even_cols(y4)  # numpy -> eigen slice -> (... y4)\n    y6 = m.incr_matrix_any(y5, 1000)  # numpy -> eigen -> (... y5)\n\n    # Apply same mutations using just numpy:\n    yexpect = np.array(range(100), dtype='float64').reshape(10, 10)\n    yexpect += 10\n    yexpect[0::2, 0::2] -= 33\n    yexpect[0::4, 0::4] += 1000\n    assert np.all(y6 == yexpect[0::4, 0::4])\n    assert np.all(y5 == yexpect[0::4, 0::4])\n    assert np.all(y4 == yexpect[0::4, 0::2])\n    assert np.all(y3 == yexpect[0::2, 0::2])\n    assert np.all(y2 == yexpect)\n    assert np.all(y == yexpect)\n\n\ndef test_nocopy_wrapper():\n    # get_elem requires a column-contiguous matrix reference, but should be\n    # callable with other types of matrix (via copying):\n    int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order='F')\n    dbl_matrix_colmajor = np.array(int_matrix_colmajor, dtype='double', order='F', copy=True)\n    int_matrix_rowmajor = np.array(int_matrix_colmajor, order='C', copy=True)\n    dbl_matrix_rowmajor = np.array(int_matrix_rowmajor, dtype='double', order='C', copy=True)\n\n    # All should be callable via get_elem:\n    assert m.get_elem(int_matrix_colmajor) == 8\n    assert m.get_elem(dbl_matrix_colmajor) == 8\n    assert m.get_elem(int_matrix_rowmajor) == 8\n    assert m.get_elem(dbl_matrix_rowmajor) == 8\n\n    # All but the second should fail with m.get_elem_nocopy:\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_nocopy(int_matrix_colmajor)\n    assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.f_contiguous' in str(excinfo.value))\n    assert m.get_elem_nocopy(dbl_matrix_colmajor) == 8\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_nocopy(int_matrix_rowmajor)\n    assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.f_contiguous' in str(excinfo.value))\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_nocopy(dbl_matrix_rowmajor)\n    assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.f_contiguous' in str(excinfo.value))\n\n    # For the row-major test, we take a long matrix in row-major, so only the third is allowed:\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_rm_nocopy(int_matrix_colmajor)\n    assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.c_contiguous' in str(excinfo.value))\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_rm_nocopy(dbl_matrix_colmajor)\n    assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.c_contiguous' in str(excinfo.value))\n    assert m.get_elem_rm_nocopy(int_matrix_rowmajor) == 8\n    with pytest.raises(TypeError) as excinfo:\n        m.get_elem_rm_nocopy(dbl_matrix_rowmajor)\n    assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and\n            ', flags.c_contiguous' in str(excinfo.value))\n\n\ndef test_eigen_ref_life_support():\n    \"\"\"Ensure the lifetime of temporary arrays created by the `Ref` caster\n\n    The `Ref` caster sometimes creates a copy which needs to stay alive. This needs to\n    happen both for directs casts (just the array) or indirectly (e.g. list of arrays).\n    \"\"\"\n\n    a = np.full(shape=10, fill_value=8, dtype=np.int8)\n    assert m.get_elem_direct(a) == 8\n\n    list_of_a = [a]\n    assert m.get_elem_indirect(list_of_a) == 8\n\n\ndef test_special_matrix_objects():\n    assert np.all(m.incr_diag(7) == np.diag([1., 2, 3, 4, 5, 6, 7]))\n\n    asymm = np.array([[ 1.,  2,  3,  4],\n                      [ 5,  6,  7,  8],\n                      [ 9, 10, 11, 12],\n                      [13, 14, 15, 16]])\n    symm_lower = np.array(asymm)\n    symm_upper = np.array(asymm)\n    for i in range(4):\n        for j in range(i + 1, 4):\n            symm_lower[i, j] = symm_lower[j, i]\n            symm_upper[j, i] = symm_upper[i, j]\n\n    assert np.all(m.symmetric_lower(asymm) == symm_lower)\n    assert np.all(m.symmetric_upper(asymm) == symm_upper)\n\n\ndef test_dense_signature(doc):\n    assert doc(m.double_col) == \"\"\"\n        double_col(arg0: numpy.ndarray[float32[m, 1]]) -> numpy.ndarray[float32[m, 1]]\n    \"\"\"\n    assert doc(m.double_row) == \"\"\"\n        double_row(arg0: numpy.ndarray[float32[1, n]]) -> numpy.ndarray[float32[1, n]]\n    \"\"\"\n    assert doc(m.double_complex) == \"\"\"\n        double_complex(arg0: numpy.ndarray[complex64[m, 1]]) -> numpy.ndarray[complex64[m, 1]]\n    \"\"\"\n    assert doc(m.double_mat_rm) == \"\"\"\n        double_mat_rm(arg0: numpy.ndarray[float32[m, n]]) -> numpy.ndarray[float32[m, n]]\n    \"\"\"\n\n\ndef test_named_arguments():\n    a = np.array([[1.0, 2], [3, 4], [5, 6]])\n    b = np.ones((2, 1))\n\n    assert np.all(m.matrix_multiply(a, b) == np.array([[3.], [7], [11]]))\n    assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.], [7], [11]]))\n    assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.], [7], [11]]))\n\n    with pytest.raises(ValueError) as excinfo:\n        m.matrix_multiply(b, a)\n    assert str(excinfo.value) == 'Nonconformable matrices!'\n\n    with pytest.raises(ValueError) as excinfo:\n        m.matrix_multiply(A=b, B=a)\n    assert str(excinfo.value) == 'Nonconformable matrices!'\n\n    with pytest.raises(ValueError) as excinfo:\n        m.matrix_multiply(B=a, A=b)\n    assert str(excinfo.value) == 'Nonconformable matrices!'\n\n\n@pytest.requires_eigen_and_scipy\ndef test_sparse():\n    assert_sparse_equal_ref(m.sparse_r())\n    assert_sparse_equal_ref(m.sparse_c())\n    assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_r()))\n    assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_c()))\n    assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_c()))\n    assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_r()))\n\n\n@pytest.requires_eigen_and_scipy\ndef test_sparse_signature(doc):\n    assert doc(m.sparse_copy_r) == \"\"\"\n        sparse_copy_r(arg0: scipy.sparse.csr_matrix[float32]) -> scipy.sparse.csr_matrix[float32]\n    \"\"\"  # noqa: E501 line too long\n    assert doc(m.sparse_copy_c) == \"\"\"\n        sparse_copy_c(arg0: scipy.sparse.csc_matrix[float32]) -> scipy.sparse.csc_matrix[float32]\n    \"\"\"  # noqa: E501 line too long\n\n\ndef test_issue738():\n    \"\"\"Ignore strides on a length-1 dimension (even if they would be incompatible length > 1)\"\"\"\n    assert np.all(m.iss738_f1(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))\n    assert np.all(m.iss738_f1(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))\n\n    assert np.all(m.iss738_f2(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))\n    assert np.all(m.iss738_f2(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))\n\n\ndef test_issue1105():\n    \"\"\"Issue 1105: 1xN or Nx1 input arrays weren't accepted for eigen\n    compile-time row vectors or column vector\"\"\"\n    assert m.iss1105_row(np.ones((1, 7)))\n    assert m.iss1105_col(np.ones((7, 1)))\n\n    # These should still fail (incompatible dimensions):\n    with pytest.raises(TypeError) as excinfo:\n        m.iss1105_row(np.ones((7, 1)))\n    assert \"incompatible function arguments\" in str(excinfo)\n    with pytest.raises(TypeError) as excinfo:\n        m.iss1105_col(np.ones((1, 7)))\n    assert \"incompatible function arguments\" in str(excinfo)\n\n\ndef test_custom_operator_new():\n    \"\"\"Using Eigen types as member variables requires a class-specific\n    operator new with proper alignment\"\"\"\n\n    o = m.CustomOperatorNew()\n    np.testing.assert_allclose(o.a, 0.0)\n    np.testing.assert_allclose(o.b.diagonal(), 1.0)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_embed/CMakeLists.txt",
    "content": "if(${PYTHON_MODULE_EXTENSION} MATCHES \"pypy\")\n  add_custom_target(cpptest)  # Dummy target on PyPy. Embedding is not supported.\n  set(_suppress_unused_variable_warning \"${DOWNLOAD_CATCH}\")\n  return()\nendif()\n\nfind_package(Catch 1.9.3)\nif(CATCH_FOUND)\n  message(STATUS \"Building interpreter tests using Catch v${CATCH_VERSION}\")\nelse()\n  message(STATUS \"Catch not detected. Interpreter tests will be skipped. Install Catch headers\"\n                 \" manually or use `cmake -DDOWNLOAD_CATCH=1` to fetch them automatically.\")\n  return()\nendif()\n\nadd_executable(test_embed\n  catch.cpp\n  test_interpreter.cpp\n)\ntarget_include_directories(test_embed PRIVATE ${CATCH_INCLUDE_DIR})\npybind11_enable_warnings(test_embed)\n\nif(NOT CMAKE_VERSION VERSION_LESS 3.0)\n  target_link_libraries(test_embed PRIVATE pybind11::embed)\nelse()\n  target_include_directories(test_embed PRIVATE ${PYBIND11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS})\n  target_compile_options(test_embed PRIVATE ${PYBIND11_CPP_STANDARD})\n  target_link_libraries(test_embed PRIVATE ${PYTHON_LIBRARIES})\nendif()\n\nfind_package(Threads REQUIRED)\ntarget_link_libraries(test_embed PUBLIC ${CMAKE_THREAD_LIBS_INIT})\n\nadd_custom_target(cpptest COMMAND $<TARGET_FILE:test_embed>\n                  WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})\n\npybind11_add_module(external_module THIN_LTO external_module.cpp)\nset_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})\nadd_dependencies(cpptest external_module)\n\nadd_dependencies(check cpptest)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_embed/catch.cpp",
    "content": "// The Catch implementation is compiled here. This is a standalone\n// translation unit to avoid recompiling it for every test change.\n\n#include <pybind11/embed.h>\n\n#ifdef _MSC_VER\n// Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch\n// 2.0.1; this should be fixed in the next catch release after 2.0.1).\n#  pragma warning(disable: 4996)\n#endif\n\n#define CATCH_CONFIG_RUNNER\n#include <catch.hpp>\n\nnamespace py = pybind11;\n\nint main(int argc, char *argv[]) {\n    py::scoped_interpreter guard{};\n    auto result = Catch::Session().run(argc, argv);\n\n    return result < 0xff ? result : 0xff;\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_embed/external_module.cpp",
    "content": "#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\n/* Simple test module/test class to check that the referenced internals data of external pybind11\n * modules aren't preserved over a finalize/initialize.\n */\n\nPYBIND11_MODULE(external_module, m) {\n    class A {\n    public:\n        A(int value) : v{value} {};\n        int v;\n    };\n\n    py::class_<A>(m, \"A\")\n        .def(py::init<int>())\n        .def_readwrite(\"value\", &A::v);\n\n    m.def(\"internals_at\", []() {\n        return reinterpret_cast<uintptr_t>(&py::detail::get_internals());\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_embed/test_interpreter.cpp",
    "content": "#include <pybind11/embed.h>\n\n#ifdef _MSC_VER\n// Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch\n// 2.0.1; this should be fixed in the next catch release after 2.0.1).\n#  pragma warning(disable: 4996)\n#endif\n\n#include <catch.hpp>\n\n#include <thread>\n#include <fstream>\n#include <functional>\n\nnamespace py = pybind11;\nusing namespace py::literals;\n\nclass Widget {\npublic:\n    Widget(std::string message) : message(message) { }\n    virtual ~Widget() = default;\n\n    std::string the_message() const { return message; }\n    virtual int the_answer() const = 0;\n\nprivate:\n    std::string message;\n};\n\nclass PyWidget final : public Widget {\n    using Widget::Widget;\n\n    int the_answer() const override { PYBIND11_OVERLOAD_PURE(int, Widget, the_answer); }\n};\n\nPYBIND11_EMBEDDED_MODULE(widget_module, m) {\n    py::class_<Widget, PyWidget>(m, \"Widget\")\n        .def(py::init<std::string>())\n        .def_property_readonly(\"the_message\", &Widget::the_message);\n\n    m.def(\"add\", [](int i, int j) { return i + j; });\n}\n\nPYBIND11_EMBEDDED_MODULE(throw_exception, ) {\n    throw std::runtime_error(\"C++ Error\");\n}\n\nPYBIND11_EMBEDDED_MODULE(throw_error_already_set, ) {\n    auto d = py::dict();\n    d[\"missing\"].cast<py::object>();\n}\n\nTEST_CASE(\"Pass classes and data between modules defined in C++ and Python\") {\n    auto module = py::module::import(\"test_interpreter\");\n    REQUIRE(py::hasattr(module, \"DerivedWidget\"));\n\n    auto locals = py::dict(\"hello\"_a=\"Hello, World!\", \"x\"_a=5, **module.attr(\"__dict__\"));\n    py::exec(R\"(\n        widget = DerivedWidget(\"{} - {}\".format(hello, x))\n        message = widget.the_message\n    )\", py::globals(), locals);\n    REQUIRE(locals[\"message\"].cast<std::string>() == \"Hello, World! - 5\");\n\n    auto py_widget = module.attr(\"DerivedWidget\")(\"The question\");\n    auto message = py_widget.attr(\"the_message\");\n    REQUIRE(message.cast<std::string>() == \"The question\");\n\n    const auto &cpp_widget = py_widget.cast<const Widget &>();\n    REQUIRE(cpp_widget.the_answer() == 42);\n}\n\nTEST_CASE(\"Import error handling\") {\n    REQUIRE_NOTHROW(py::module::import(\"widget_module\"));\n    REQUIRE_THROWS_WITH(py::module::import(\"throw_exception\"),\n                        \"ImportError: C++ Error\");\n    REQUIRE_THROWS_WITH(py::module::import(\"throw_error_already_set\"),\n                        Catch::Contains(\"ImportError: KeyError\"));\n}\n\nTEST_CASE(\"There can be only one interpreter\") {\n    static_assert(std::is_move_constructible<py::scoped_interpreter>::value, \"\");\n    static_assert(!std::is_move_assignable<py::scoped_interpreter>::value, \"\");\n    static_assert(!std::is_copy_constructible<py::scoped_interpreter>::value, \"\");\n    static_assert(!std::is_copy_assignable<py::scoped_interpreter>::value, \"\");\n\n    REQUIRE_THROWS_WITH(py::initialize_interpreter(), \"The interpreter is already running\");\n    REQUIRE_THROWS_WITH(py::scoped_interpreter(), \"The interpreter is already running\");\n\n    py::finalize_interpreter();\n    REQUIRE_NOTHROW(py::scoped_interpreter());\n    {\n        auto pyi1 = py::scoped_interpreter();\n        auto pyi2 = std::move(pyi1);\n    }\n    py::initialize_interpreter();\n}\n\nbool has_pybind11_internals_builtin() {\n    auto builtins = py::handle(PyEval_GetBuiltins());\n    return builtins.contains(PYBIND11_INTERNALS_ID);\n};\n\nbool has_pybind11_internals_static() {\n    auto **&ipp = py::detail::get_internals_pp();\n    return ipp && *ipp;\n}\n\nTEST_CASE(\"Restart the interpreter\") {\n    // Verify pre-restart state.\n    REQUIRE(py::module::import(\"widget_module\").attr(\"add\")(1, 2).cast<int>() == 3);\n    REQUIRE(has_pybind11_internals_builtin());\n    REQUIRE(has_pybind11_internals_static());\n    REQUIRE(py::module::import(\"external_module\").attr(\"A\")(123).attr(\"value\").cast<int>() == 123);\n\n    // local and foreign module internals should point to the same internals:\n    REQUIRE(reinterpret_cast<uintptr_t>(*py::detail::get_internals_pp()) ==\n            py::module::import(\"external_module\").attr(\"internals_at\")().cast<uintptr_t>());\n\n    // Restart the interpreter.\n    py::finalize_interpreter();\n    REQUIRE(Py_IsInitialized() == 0);\n\n    py::initialize_interpreter();\n    REQUIRE(Py_IsInitialized() == 1);\n\n    // Internals are deleted after a restart.\n    REQUIRE_FALSE(has_pybind11_internals_builtin());\n    REQUIRE_FALSE(has_pybind11_internals_static());\n    pybind11::detail::get_internals();\n    REQUIRE(has_pybind11_internals_builtin());\n    REQUIRE(has_pybind11_internals_static());\n    REQUIRE(reinterpret_cast<uintptr_t>(*py::detail::get_internals_pp()) ==\n            py::module::import(\"external_module\").attr(\"internals_at\")().cast<uintptr_t>());\n\n    // Make sure that an interpreter with no get_internals() created until finalize still gets the\n    // internals destroyed\n    py::finalize_interpreter();\n    py::initialize_interpreter();\n    bool ran = false;\n    py::module::import(\"__main__\").attr(\"internals_destroy_test\") =\n        py::capsule(&ran, [](void *ran) { py::detail::get_internals(); *static_cast<bool *>(ran) = true; });\n    REQUIRE_FALSE(has_pybind11_internals_builtin());\n    REQUIRE_FALSE(has_pybind11_internals_static());\n    REQUIRE_FALSE(ran);\n    py::finalize_interpreter();\n    REQUIRE(ran);\n    py::initialize_interpreter();\n    REQUIRE_FALSE(has_pybind11_internals_builtin());\n    REQUIRE_FALSE(has_pybind11_internals_static());\n\n    // C++ modules can be reloaded.\n    auto cpp_module = py::module::import(\"widget_module\");\n    REQUIRE(cpp_module.attr(\"add\")(1, 2).cast<int>() == 3);\n\n    // C++ type information is reloaded and can be used in python modules.\n    auto py_module = py::module::import(\"test_interpreter\");\n    auto py_widget = py_module.attr(\"DerivedWidget\")(\"Hello after restart\");\n    REQUIRE(py_widget.attr(\"the_message\").cast<std::string>() == \"Hello after restart\");\n}\n\nTEST_CASE(\"Subinterpreter\") {\n    // Add tags to the modules in the main interpreter and test the basics.\n    py::module::import(\"__main__\").attr(\"main_tag\") = \"main interpreter\";\n    {\n        auto m = py::module::import(\"widget_module\");\n        m.attr(\"extension_module_tag\") = \"added to module in main interpreter\";\n\n        REQUIRE(m.attr(\"add\")(1, 2).cast<int>() == 3);\n    }\n    REQUIRE(has_pybind11_internals_builtin());\n    REQUIRE(has_pybind11_internals_static());\n\n    /// Create and switch to a subinterpreter.\n    auto main_tstate = PyThreadState_Get();\n    auto sub_tstate = Py_NewInterpreter();\n\n    // Subinterpreters get their own copy of builtins. detail::get_internals() still\n    // works by returning from the static variable, i.e. all interpreters share a single\n    // global pybind11::internals;\n    REQUIRE_FALSE(has_pybind11_internals_builtin());\n    REQUIRE(has_pybind11_internals_static());\n\n    // Modules tags should be gone.\n    REQUIRE_FALSE(py::hasattr(py::module::import(\"__main__\"), \"tag\"));\n    {\n        auto m = py::module::import(\"widget_module\");\n        REQUIRE_FALSE(py::hasattr(m, \"extension_module_tag\"));\n\n        // Function bindings should still work.\n        REQUIRE(m.attr(\"add\")(1, 2).cast<int>() == 3);\n    }\n\n    // Restore main interpreter.\n    Py_EndInterpreter(sub_tstate);\n    PyThreadState_Swap(main_tstate);\n\n    REQUIRE(py::hasattr(py::module::import(\"__main__\"), \"main_tag\"));\n    REQUIRE(py::hasattr(py::module::import(\"widget_module\"), \"extension_module_tag\"));\n}\n\nTEST_CASE(\"Execution frame\") {\n    // When the interpreter is embedded, there is no execution frame, but `py::exec`\n    // should still function by using reasonable globals: `__main__.__dict__`.\n    py::exec(\"var = dict(number=42)\");\n    REQUIRE(py::globals()[\"var\"][\"number\"].cast<int>() == 42);\n}\n\nTEST_CASE(\"Threads\") {\n    // Restart interpreter to ensure threads are not initialized\n    py::finalize_interpreter();\n    py::initialize_interpreter();\n    REQUIRE_FALSE(has_pybind11_internals_static());\n\n    constexpr auto num_threads = 10;\n    auto locals = py::dict(\"count\"_a=0);\n\n    {\n        py::gil_scoped_release gil_release{};\n        REQUIRE(has_pybind11_internals_static());\n\n        auto threads = std::vector<std::thread>();\n        for (auto i = 0; i < num_threads; ++i) {\n            threads.emplace_back([&]() {\n                py::gil_scoped_acquire gil{};\n                locals[\"count\"] = locals[\"count\"].cast<int>() + 1;\n            });\n        }\n\n        for (auto &thread : threads) {\n            thread.join();\n        }\n    }\n\n    REQUIRE(locals[\"count\"].cast<int>() == num_threads);\n}\n\n// Scope exit utility https://stackoverflow.com/a/36644501/7255855\nstruct scope_exit {\n    std::function<void()> f_;\n    explicit scope_exit(std::function<void()> f) noexcept : f_(std::move(f)) {}\n    ~scope_exit() { if (f_) f_(); }\n};\n\nTEST_CASE(\"Reload module from file\") {\n    // Disable generation of cached bytecode (.pyc files) for this test, otherwise\n    // Python might pick up an old version from the cache instead of the new versions\n    // of the .py files generated below\n    auto sys = py::module::import(\"sys\");\n    bool dont_write_bytecode = sys.attr(\"dont_write_bytecode\").cast<bool>();\n    sys.attr(\"dont_write_bytecode\") = true;\n    // Reset the value at scope exit\n    scope_exit reset_dont_write_bytecode([&]() {\n        sys.attr(\"dont_write_bytecode\") = dont_write_bytecode;\n    });\n\n    std::string module_name = \"test_module_reload\";\n    std::string module_file = module_name + \".py\";\n\n    // Create the module .py file\n    std::ofstream test_module(module_file);\n    test_module << \"def test():\\n\";\n    test_module << \"    return 1\\n\";\n    test_module.close();\n    // Delete the file at scope exit\n    scope_exit delete_module_file([&]() {\n        std::remove(module_file.c_str());\n    });\n\n    // Import the module from file\n    auto module = py::module::import(module_name.c_str());\n    int result = module.attr(\"test\")().cast<int>();\n    REQUIRE(result == 1);\n\n    // Update the module .py file with a small change\n    test_module.open(module_file);\n    test_module << \"def test():\\n\";\n    test_module << \"    return 2\\n\";\n    test_module.close();\n\n    // Reload the module\n    module.reload();\n    result = module.attr(\"test\")().cast<int>();\n    REQUIRE(result == 2);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_embed/test_interpreter.py",
    "content": "from widget_module import Widget\n\n\nclass DerivedWidget(Widget):\n    def __init__(self, message):\n        super(DerivedWidget, self).__init__(message)\n\n    def the_answer(self):\n        return 42\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_enum.cpp",
    "content": "/*\n    tests/test_enums.cpp -- enumerations\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\nTEST_SUBMODULE(enums, m) {\n    // test_unscoped_enum\n    enum UnscopedEnum {\n        EOne = 1,\n        ETwo\n    };\n    py::enum_<UnscopedEnum>(m, \"UnscopedEnum\", py::arithmetic(), \"An unscoped enumeration\")\n        .value(\"EOne\", EOne, \"Docstring for EOne\")\n        .value(\"ETwo\", ETwo, \"Docstring for ETwo\")\n        .export_values();\n\n    // test_scoped_enum\n    enum class ScopedEnum {\n        Two = 2,\n        Three\n    };\n    py::enum_<ScopedEnum>(m, \"ScopedEnum\", py::arithmetic())\n        .value(\"Two\", ScopedEnum::Two)\n        .value(\"Three\", ScopedEnum::Three);\n\n    m.def(\"test_scoped_enum\", [](ScopedEnum z) {\n        return \"ScopedEnum::\" + std::string(z == ScopedEnum::Two ? \"Two\" : \"Three\");\n    });\n\n    // test_binary_operators\n    enum Flags {\n        Read = 4,\n        Write = 2,\n        Execute = 1\n    };\n    py::enum_<Flags>(m, \"Flags\", py::arithmetic())\n        .value(\"Read\", Flags::Read)\n        .value(\"Write\", Flags::Write)\n        .value(\"Execute\", Flags::Execute)\n        .export_values();\n\n    // test_implicit_conversion\n    class ClassWithUnscopedEnum {\n    public:\n        enum EMode {\n            EFirstMode = 1,\n            ESecondMode\n        };\n\n        static EMode test_function(EMode mode) {\n            return mode;\n        }\n    };\n    py::class_<ClassWithUnscopedEnum> exenum_class(m, \"ClassWithUnscopedEnum\");\n    exenum_class.def_static(\"test_function\", &ClassWithUnscopedEnum::test_function);\n    py::enum_<ClassWithUnscopedEnum::EMode>(exenum_class, \"EMode\")\n        .value(\"EFirstMode\", ClassWithUnscopedEnum::EFirstMode)\n        .value(\"ESecondMode\", ClassWithUnscopedEnum::ESecondMode)\n        .export_values();\n\n    // test_enum_to_int\n    m.def(\"test_enum_to_int\", [](int) { });\n    m.def(\"test_enum_to_uint\", [](uint32_t) { });\n    m.def(\"test_enum_to_long_long\", [](long long) { });\n\n    // test_duplicate_enum_name\n    enum SimpleEnum\n    {\n        ONE, TWO, THREE\n    };\n\n    m.def(\"register_bad_enum\", [m]() {\n        py::enum_<SimpleEnum>(m, \"SimpleEnum\")\n            .value(\"ONE\", SimpleEnum::ONE)          //NOTE: all value function calls are called with the same first parameter value\n            .value(\"ONE\", SimpleEnum::TWO)\n            .value(\"ONE\", SimpleEnum::THREE)\n            .export_values();\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_enum.py",
    "content": "import pytest\nfrom pybind11_tests import enums as m\n\n\ndef test_unscoped_enum():\n    assert str(m.UnscopedEnum.EOne) == \"UnscopedEnum.EOne\"\n    assert str(m.UnscopedEnum.ETwo) == \"UnscopedEnum.ETwo\"\n    assert str(m.EOne) == \"UnscopedEnum.EOne\"\n\n    # name property\n    assert m.UnscopedEnum.EOne.name == \"EOne\"\n    assert m.UnscopedEnum.ETwo.name == \"ETwo\"\n    assert m.EOne.name == \"EOne\"\n    # name readonly\n    with pytest.raises(AttributeError):\n        m.UnscopedEnum.EOne.name = \"\"\n    # name returns a copy\n    foo = m.UnscopedEnum.EOne.name\n    foo = \"bar\"\n    assert m.UnscopedEnum.EOne.name == \"EOne\"\n\n    # __members__ property\n    assert m.UnscopedEnum.__members__ == \\\n        {\"EOne\": m.UnscopedEnum.EOne, \"ETwo\": m.UnscopedEnum.ETwo}\n    # __members__ readonly\n    with pytest.raises(AttributeError):\n        m.UnscopedEnum.__members__ = {}\n    # __members__ returns a copy\n    foo = m.UnscopedEnum.__members__\n    foo[\"bar\"] = \"baz\"\n    assert m.UnscopedEnum.__members__ == \\\n        {\"EOne\": m.UnscopedEnum.EOne, \"ETwo\": m.UnscopedEnum.ETwo}\n\n    assert m.UnscopedEnum.__doc__ == \\\n        '''An unscoped enumeration\n\nMembers:\n\n  EOne : Docstring for EOne\n\n  ETwo : Docstring for ETwo''' or m.UnscopedEnum.__doc__ == \\\n        '''An unscoped enumeration\n\nMembers:\n\n  ETwo : Docstring for ETwo\n\n  EOne : Docstring for EOne'''\n\n    # Unscoped enums will accept ==/!= int comparisons\n    y = m.UnscopedEnum.ETwo\n    assert y == 2\n    assert 2 == y\n    assert y != 3\n    assert 3 != y\n\n    assert int(m.UnscopedEnum.ETwo) == 2\n    assert str(m.UnscopedEnum(2)) == \"UnscopedEnum.ETwo\"\n\n    # order\n    assert m.UnscopedEnum.EOne < m.UnscopedEnum.ETwo\n    assert m.UnscopedEnum.EOne < 2\n    assert m.UnscopedEnum.ETwo > m.UnscopedEnum.EOne\n    assert m.UnscopedEnum.ETwo > 1\n    assert m.UnscopedEnum.ETwo <= 2\n    assert m.UnscopedEnum.ETwo >= 2\n    assert m.UnscopedEnum.EOne <= m.UnscopedEnum.ETwo\n    assert m.UnscopedEnum.EOne <= 2\n    assert m.UnscopedEnum.ETwo >= m.UnscopedEnum.EOne\n    assert m.UnscopedEnum.ETwo >= 1\n    assert not (m.UnscopedEnum.ETwo < m.UnscopedEnum.EOne)\n    assert not (2 < m.UnscopedEnum.EOne)\n\n\ndef test_scoped_enum():\n    assert m.test_scoped_enum(m.ScopedEnum.Three) == \"ScopedEnum::Three\"\n    z = m.ScopedEnum.Two\n    assert m.test_scoped_enum(z) == \"ScopedEnum::Two\"\n\n    # Scoped enums will *NOT* accept ==/!= int comparisons (Will always return False)\n    assert not z == 3\n    assert not 3 == z\n    assert z != 3\n    assert 3 != z\n    # Scoped enums will *NOT* accept >, <, >= and <= int comparisons (Will throw exceptions)\n    with pytest.raises(TypeError):\n        z > 3\n    with pytest.raises(TypeError):\n        z < 3\n    with pytest.raises(TypeError):\n        z >= 3\n    with pytest.raises(TypeError):\n        z <= 3\n\n    # order\n    assert m.ScopedEnum.Two < m.ScopedEnum.Three\n    assert m.ScopedEnum.Three > m.ScopedEnum.Two\n    assert m.ScopedEnum.Two <= m.ScopedEnum.Three\n    assert m.ScopedEnum.Two <= m.ScopedEnum.Two\n    assert m.ScopedEnum.Two >= m.ScopedEnum.Two\n    assert m.ScopedEnum.Three >= m.ScopedEnum.Two\n\n\ndef test_implicit_conversion():\n    assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == \"EMode.EFirstMode\"\n    assert str(m.ClassWithUnscopedEnum.EFirstMode) == \"EMode.EFirstMode\"\n\n    f = m.ClassWithUnscopedEnum.test_function\n    first = m.ClassWithUnscopedEnum.EFirstMode\n    second = m.ClassWithUnscopedEnum.ESecondMode\n\n    assert f(first) == 1\n\n    assert f(first) == f(first)\n    assert not f(first) != f(first)\n\n    assert f(first) != f(second)\n    assert not f(first) == f(second)\n\n    assert f(first) == int(f(first))\n    assert not f(first) != int(f(first))\n\n    assert f(first) != int(f(second))\n    assert not f(first) == int(f(second))\n\n    # noinspection PyDictCreation\n    x = {f(first): 1, f(second): 2}\n    x[f(first)] = 3\n    x[f(second)] = 4\n    # Hashing test\n    assert str(x) == \"{EMode.EFirstMode: 3, EMode.ESecondMode: 4}\"\n\n\ndef test_binary_operators():\n    assert int(m.Flags.Read) == 4\n    assert int(m.Flags.Write) == 2\n    assert int(m.Flags.Execute) == 1\n    assert int(m.Flags.Read | m.Flags.Write | m.Flags.Execute) == 7\n    assert int(m.Flags.Read | m.Flags.Write) == 6\n    assert int(m.Flags.Read | m.Flags.Execute) == 5\n    assert int(m.Flags.Write | m.Flags.Execute) == 3\n    assert int(m.Flags.Write | 1) == 3\n\n    state = m.Flags.Read | m.Flags.Write\n    assert (state & m.Flags.Read) != 0\n    assert (state & m.Flags.Write) != 0\n    assert (state & m.Flags.Execute) == 0\n    assert (state & 1) == 0\n\n    state2 = ~state\n    assert state2 == -7\n    assert int(state ^ state2) == -1\n\n\ndef test_enum_to_int():\n    m.test_enum_to_int(m.Flags.Read)\n    m.test_enum_to_int(m.ClassWithUnscopedEnum.EMode.EFirstMode)\n    m.test_enum_to_uint(m.Flags.Read)\n    m.test_enum_to_uint(m.ClassWithUnscopedEnum.EMode.EFirstMode)\n    m.test_enum_to_long_long(m.Flags.Read)\n    m.test_enum_to_long_long(m.ClassWithUnscopedEnum.EMode.EFirstMode)\n\n\ndef test_duplicate_enum_name():\n    with pytest.raises(ValueError) as excinfo:\n        m.register_bad_enum()\n    assert str(excinfo.value) == 'SimpleEnum: element \"ONE\" already exists!'\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_eval.cpp",
    "content": "/*\n    tests/test_eval.cpp -- Usage of eval() and eval_file()\n\n    Copyright (c) 2016 Klemens D. Morgenstern\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n\n#include <pybind11/eval.h>\n#include \"pybind11_tests.h\"\n\nTEST_SUBMODULE(eval_, m) {\n    // test_evals\n\n    auto global = py::dict(py::module::import(\"__main__\").attr(\"__dict__\"));\n\n    m.def(\"test_eval_statements\", [global]() {\n        auto local = py::dict();\n        local[\"call_test\"] = py::cpp_function([&]() -> int {\n            return 42;\n        });\n\n        // Regular string literal\n        py::exec(\n            \"message = 'Hello World!'\\n\"\n            \"x = call_test()\",\n            global, local\n        );\n\n        // Multi-line raw string literal\n        py::exec(R\"(\n            if x == 42:\n                print(message)\n            else:\n                raise RuntimeError\n            )\", global, local\n        );\n        auto x = local[\"x\"].cast<int>();\n\n        return x == 42;\n    });\n\n    m.def(\"test_eval\", [global]() {\n        auto local = py::dict();\n        local[\"x\"] = py::int_(42);\n        auto x = py::eval(\"x\", global, local);\n        return x.cast<int>() == 42;\n    });\n\n    m.def(\"test_eval_single_statement\", []() {\n        auto local = py::dict();\n        local[\"call_test\"] = py::cpp_function([&]() -> int {\n            return 42;\n        });\n\n        auto result = py::eval<py::eval_single_statement>(\"x = call_test()\", py::dict(), local);\n        auto x = local[\"x\"].cast<int>();\n        return result.is_none() && x == 42;\n    });\n\n    m.def(\"test_eval_file\", [global](py::str filename) {\n        auto local = py::dict();\n        local[\"y\"] = py::int_(43);\n\n        int val_out;\n        local[\"call_test2\"] = py::cpp_function([&](int value) { val_out = value; });\n\n        auto result = py::eval_file(filename, global, local);\n        return val_out == 43 && result.is_none();\n    });\n\n    m.def(\"test_eval_failure\", []() {\n        try {\n            py::eval(\"nonsense code ...\");\n        } catch (py::error_already_set &) {\n            return true;\n        }\n        return false;\n    });\n\n    m.def(\"test_eval_file_failure\", []() {\n        try {\n            py::eval_file(\"non-existing file\");\n        } catch (std::exception &) {\n            return true;\n        }\n        return false;\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_eval.py",
    "content": "import os\nfrom pybind11_tests import eval_ as m\n\n\ndef test_evals(capture):\n    with capture:\n        assert m.test_eval_statements()\n    assert capture == \"Hello World!\"\n\n    assert m.test_eval()\n    assert m.test_eval_single_statement()\n\n    filename = os.path.join(os.path.dirname(__file__), \"test_eval_call.py\")\n    assert m.test_eval_file(filename)\n\n    assert m.test_eval_failure()\n    assert m.test_eval_file_failure()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_eval_call.py",
    "content": "# This file is called from 'test_eval.py'\n\nif 'call_test2' in locals():\n    call_test2(y)  # noqa: F821 undefined name\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_exceptions.cpp",
    "content": "/*\n    tests/test_custom-exceptions.cpp -- exception translation\n\n    Copyright (c) 2016 Pim Schellart <P.Schellart@princeton.edu>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\n// A type that should be raised as an exception in Python\nclass MyException : public std::exception {\npublic:\n    explicit MyException(const char * m) : message{m} {}\n    virtual const char * what() const noexcept override {return message.c_str();}\nprivate:\n    std::string message = \"\";\n};\n\n// A type that should be translated to a standard Python exception\nclass MyException2 : public std::exception {\npublic:\n    explicit MyException2(const char * m) : message{m} {}\n    virtual const char * what() const noexcept override {return message.c_str();}\nprivate:\n    std::string message = \"\";\n};\n\n// A type that is not derived from std::exception (and is thus unknown)\nclass MyException3 {\npublic:\n    explicit MyException3(const char * m) : message{m} {}\n    virtual const char * what() const noexcept {return message.c_str();}\nprivate:\n    std::string message = \"\";\n};\n\n// A type that should be translated to MyException\n// and delegated to its exception translator\nclass MyException4 : public std::exception {\npublic:\n    explicit MyException4(const char * m) : message{m} {}\n    virtual const char * what() const noexcept override {return message.c_str();}\nprivate:\n    std::string message = \"\";\n};\n\n\n// Like the above, but declared via the helper function\nclass MyException5 : public std::logic_error {\npublic:\n    explicit MyException5(const std::string &what) : std::logic_error(what) {}\n};\n\n// Inherits from MyException5\nclass MyException5_1 : public MyException5 {\n    using MyException5::MyException5;\n};\n\nstruct PythonCallInDestructor {\n    PythonCallInDestructor(const py::dict &d) : d(d) {}\n    ~PythonCallInDestructor() { d[\"good\"] = true; }\n\n    py::dict d;\n};\n\nTEST_SUBMODULE(exceptions, m) {\n    m.def(\"throw_std_exception\", []() {\n        throw std::runtime_error(\"This exception was intentionally thrown.\");\n    });\n\n    // make a new custom exception and use it as a translation target\n    static py::exception<MyException> ex(m, \"MyException\");\n    py::register_exception_translator([](std::exception_ptr p) {\n        try {\n            if (p) std::rethrow_exception(p);\n        } catch (const MyException &e) {\n            // Set MyException as the active python error\n            ex(e.what());\n        }\n    });\n\n    // register new translator for MyException2\n    // no need to store anything here because this type will\n    // never by visible from Python\n    py::register_exception_translator([](std::exception_ptr p) {\n        try {\n            if (p) std::rethrow_exception(p);\n        } catch (const MyException2 &e) {\n            // Translate this exception to a standard RuntimeError\n            PyErr_SetString(PyExc_RuntimeError, e.what());\n        }\n    });\n\n    // register new translator for MyException4\n    // which will catch it and delegate to the previously registered\n    // translator for MyException by throwing a new exception\n    py::register_exception_translator([](std::exception_ptr p) {\n        try {\n            if (p) std::rethrow_exception(p);\n        } catch (const MyException4 &e) {\n            throw MyException(e.what());\n        }\n    });\n\n    // A simple exception translation:\n    auto ex5 = py::register_exception<MyException5>(m, \"MyException5\");\n    // A slightly more complicated one that declares MyException5_1 as a subclass of MyException5\n    py::register_exception<MyException5_1>(m, \"MyException5_1\", ex5.ptr());\n\n    m.def(\"throws1\", []() { throw MyException(\"this error should go to a custom type\"); });\n    m.def(\"throws2\", []() { throw MyException2(\"this error should go to a standard Python exception\"); });\n    m.def(\"throws3\", []() { throw MyException3(\"this error cannot be translated\"); });\n    m.def(\"throws4\", []() { throw MyException4(\"this error is rethrown\"); });\n    m.def(\"throws5\", []() { throw MyException5(\"this is a helper-defined translated exception\"); });\n    m.def(\"throws5_1\", []() { throw MyException5_1(\"MyException5 subclass\"); });\n    m.def(\"throws_logic_error\", []() { throw std::logic_error(\"this error should fall through to the standard handler\"); });\n    m.def(\"exception_matches\", []() {\n        py::dict foo;\n        try { foo[\"bar\"]; }\n        catch (py::error_already_set& ex) {\n            if (!ex.matches(PyExc_KeyError)) throw;\n        }\n    });\n\n    m.def(\"throw_already_set\", [](bool err) {\n        if (err)\n            PyErr_SetString(PyExc_ValueError, \"foo\");\n        try {\n            throw py::error_already_set();\n        } catch (const std::runtime_error& e) {\n            if ((err && e.what() != std::string(\"ValueError: foo\")) ||\n                (!err && e.what() != std::string(\"Unknown internal error occurred\")))\n            {\n                PyErr_Clear();\n                throw std::runtime_error(\"error message mismatch\");\n            }\n        }\n        PyErr_Clear();\n        if (err)\n            PyErr_SetString(PyExc_ValueError, \"foo\");\n        throw py::error_already_set();\n    });\n\n    m.def(\"python_call_in_destructor\", [](py::dict d) {\n        try {\n            PythonCallInDestructor set_dict_in_destructor(d);\n            PyErr_SetString(PyExc_ValueError, \"foo\");\n            throw py::error_already_set();\n        } catch (const py::error_already_set&) {\n            return true;\n        }\n        return false;\n    });\n\n    // test_nested_throws\n    m.def(\"try_catch\", [m](py::object exc_type, py::function f, py::args args) {\n        try { f(*args); }\n        catch (py::error_already_set &ex) {\n            if (ex.matches(exc_type))\n                py::print(ex.what());\n            else\n                throw;\n        }\n    });\n\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_exceptions.py",
    "content": "import pytest\n\nfrom pybind11_tests import exceptions as m\nimport pybind11_cross_module_tests as cm\n\n\ndef test_std_exception(msg):\n    with pytest.raises(RuntimeError) as excinfo:\n        m.throw_std_exception()\n    assert msg(excinfo.value) == \"This exception was intentionally thrown.\"\n\n\ndef test_error_already_set(msg):\n    with pytest.raises(RuntimeError) as excinfo:\n        m.throw_already_set(False)\n    assert msg(excinfo.value) == \"Unknown internal error occurred\"\n\n    with pytest.raises(ValueError) as excinfo:\n        m.throw_already_set(True)\n    assert msg(excinfo.value) == \"foo\"\n\n\ndef test_cross_module_exceptions():\n    with pytest.raises(RuntimeError) as excinfo:\n        cm.raise_runtime_error()\n    assert str(excinfo.value) == \"My runtime error\"\n\n    with pytest.raises(ValueError) as excinfo:\n        cm.raise_value_error()\n    assert str(excinfo.value) == \"My value error\"\n\n    with pytest.raises(ValueError) as excinfo:\n        cm.throw_pybind_value_error()\n    assert str(excinfo.value) == \"pybind11 value error\"\n\n    with pytest.raises(TypeError) as excinfo:\n        cm.throw_pybind_type_error()\n    assert str(excinfo.value) == \"pybind11 type error\"\n\n    with pytest.raises(StopIteration) as excinfo:\n        cm.throw_stop_iteration()\n\n\ndef test_python_call_in_catch():\n    d = {}\n    assert m.python_call_in_destructor(d) is True\n    assert d[\"good\"] is True\n\n\ndef test_exception_matches():\n    m.exception_matches()\n\n\ndef test_custom(msg):\n    # Can we catch a MyException?\n    with pytest.raises(m.MyException) as excinfo:\n        m.throws1()\n    assert msg(excinfo.value) == \"this error should go to a custom type\"\n\n    # Can we translate to standard Python exceptions?\n    with pytest.raises(RuntimeError) as excinfo:\n        m.throws2()\n    assert msg(excinfo.value) == \"this error should go to a standard Python exception\"\n\n    # Can we handle unknown exceptions?\n    with pytest.raises(RuntimeError) as excinfo:\n        m.throws3()\n    assert msg(excinfo.value) == \"Caught an unknown exception!\"\n\n    # Can we delegate to another handler by rethrowing?\n    with pytest.raises(m.MyException) as excinfo:\n        m.throws4()\n    assert msg(excinfo.value) == \"this error is rethrown\"\n\n    # Can we fall-through to the default handler?\n    with pytest.raises(RuntimeError) as excinfo:\n        m.throws_logic_error()\n    assert msg(excinfo.value) == \"this error should fall through to the standard handler\"\n\n    # Can we handle a helper-declared exception?\n    with pytest.raises(m.MyException5) as excinfo:\n        m.throws5()\n    assert msg(excinfo.value) == \"this is a helper-defined translated exception\"\n\n    # Exception subclassing:\n    with pytest.raises(m.MyException5) as excinfo:\n        m.throws5_1()\n    assert msg(excinfo.value) == \"MyException5 subclass\"\n    assert isinstance(excinfo.value, m.MyException5_1)\n\n    with pytest.raises(m.MyException5_1) as excinfo:\n        m.throws5_1()\n    assert msg(excinfo.value) == \"MyException5 subclass\"\n\n    with pytest.raises(m.MyException5) as excinfo:\n        try:\n            m.throws5()\n        except m.MyException5_1:\n            raise RuntimeError(\"Exception error: caught child from parent\")\n    assert msg(excinfo.value) == \"this is a helper-defined translated exception\"\n\n\ndef test_nested_throws(capture):\n    \"\"\"Tests nested (e.g. C++ -> Python -> C++) exception handling\"\"\"\n\n    def throw_myex():\n        raise m.MyException(\"nested error\")\n\n    def throw_myex5():\n        raise m.MyException5(\"nested error 5\")\n\n    # In the comments below, the exception is caught in the first step, thrown in the last step\n\n    # C++ -> Python\n    with capture:\n        m.try_catch(m.MyException5, throw_myex5)\n    assert str(capture).startswith(\"MyException5: nested error 5\")\n\n    # Python -> C++ -> Python\n    with pytest.raises(m.MyException) as excinfo:\n        m.try_catch(m.MyException5, throw_myex)\n    assert str(excinfo.value) == \"nested error\"\n\n    def pycatch(exctype, f, *args):\n        try:\n            f(*args)\n        except m.MyException as e:\n            print(e)\n\n    # C++ -> Python -> C++ -> Python\n    with capture:\n        m.try_catch(\n            m.MyException5, pycatch, m.MyException, m.try_catch, m.MyException, throw_myex5)\n    assert str(capture).startswith(\"MyException5: nested error 5\")\n\n    # C++ -> Python -> C++\n    with capture:\n        m.try_catch(m.MyException, pycatch, m.MyException5, m.throws4)\n    assert capture == \"this error is rethrown\"\n\n    # Python -> C++ -> Python -> C++\n    with pytest.raises(m.MyException5) as excinfo:\n        m.try_catch(m.MyException, pycatch, m.MyException, m.throws5)\n    assert str(excinfo.value) == \"this is a helper-defined translated exception\"\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_factory_constructors.cpp",
    "content": "/*\n    tests/test_factory_constructors.cpp -- tests construction from a factory function\n                                           via py::init_factory()\n\n    Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <cmath>\n\n// Classes for testing python construction via C++ factory function:\n// Not publicly constructible, copyable, or movable:\nclass TestFactory1 {\n    friend class TestFactoryHelper;\n    TestFactory1() : value(\"(empty)\") { print_default_created(this); }\n    TestFactory1(int v) : value(std::to_string(v)) { print_created(this, value); }\n    TestFactory1(std::string v) : value(std::move(v)) { print_created(this, value); }\n    TestFactory1(TestFactory1 &&) = delete;\n    TestFactory1(const TestFactory1 &) = delete;\n    TestFactory1 &operator=(TestFactory1 &&) = delete;\n    TestFactory1 &operator=(const TestFactory1 &) = delete;\npublic:\n    std::string value;\n    ~TestFactory1() { print_destroyed(this); }\n};\n// Non-public construction, but moveable:\nclass TestFactory2 {\n    friend class TestFactoryHelper;\n    TestFactory2() : value(\"(empty2)\") { print_default_created(this); }\n    TestFactory2(int v) : value(std::to_string(v)) { print_created(this, value); }\n    TestFactory2(std::string v) : value(std::move(v)) { print_created(this, value); }\npublic:\n    TestFactory2(TestFactory2 &&m) { value = std::move(m.value); print_move_created(this); }\n    TestFactory2 &operator=(TestFactory2 &&m) { value = std::move(m.value); print_move_assigned(this); return *this; }\n    std::string value;\n    ~TestFactory2() { print_destroyed(this); }\n};\n// Mixed direct/factory construction:\nclass TestFactory3 {\nprotected:\n    friend class TestFactoryHelper;\n    TestFactory3() : value(\"(empty3)\") { print_default_created(this); }\n    TestFactory3(int v) : value(std::to_string(v)) { print_created(this, value); }\npublic:\n    TestFactory3(std::string v) : value(std::move(v)) { print_created(this, value); }\n    TestFactory3(TestFactory3 &&m) { value = std::move(m.value); print_move_created(this); }\n    TestFactory3 &operator=(TestFactory3 &&m) { value = std::move(m.value); print_move_assigned(this); return *this; }\n    std::string value;\n    virtual ~TestFactory3() { print_destroyed(this); }\n};\n// Inheritance test\nclass TestFactory4 : public TestFactory3 {\npublic:\n    TestFactory4() : TestFactory3() { print_default_created(this); }\n    TestFactory4(int v) : TestFactory3(v) { print_created(this, v); }\n    virtual ~TestFactory4() { print_destroyed(this); }\n};\n// Another class for an invalid downcast test\nclass TestFactory5 : public TestFactory3 {\npublic:\n    TestFactory5(int i) : TestFactory3(i) { print_created(this, i); }\n    virtual ~TestFactory5() { print_destroyed(this); }\n};\n\nclass TestFactory6 {\nprotected:\n    int value;\n    bool alias = false;\npublic:\n    TestFactory6(int i) : value{i} { print_created(this, i); }\n    TestFactory6(TestFactory6 &&f) { print_move_created(this); value = f.value; alias = f.alias; }\n    TestFactory6(const TestFactory6 &f) { print_copy_created(this); value = f.value; alias = f.alias; }\n    virtual ~TestFactory6() { print_destroyed(this); }\n    virtual int get() { return value; }\n    bool has_alias() { return alias; }\n};\nclass PyTF6 : public TestFactory6 {\npublic:\n    // Special constructor that allows the factory to construct a PyTF6 from a TestFactory6 only\n    // when an alias is needed:\n    PyTF6(TestFactory6 &&base) : TestFactory6(std::move(base)) { alias = true; print_created(this, \"move\", value); }\n    PyTF6(int i) : TestFactory6(i) { alias = true; print_created(this, i); }\n    PyTF6(PyTF6 &&f) : TestFactory6(std::move(f)) { print_move_created(this); }\n    PyTF6(const PyTF6 &f) : TestFactory6(f) { print_copy_created(this); }\n    PyTF6(std::string s) : TestFactory6((int) s.size()) { alias = true; print_created(this, s); }\n    virtual ~PyTF6() { print_destroyed(this); }\n    int get() override { PYBIND11_OVERLOAD(int, TestFactory6, get, /*no args*/); }\n};\n\nclass TestFactory7 {\nprotected:\n    int value;\n    bool alias = false;\npublic:\n    TestFactory7(int i) : value{i} { print_created(this, i); }\n    TestFactory7(TestFactory7 &&f) { print_move_created(this); value = f.value; alias = f.alias; }\n    TestFactory7(const TestFactory7 &f) { print_copy_created(this); value = f.value; alias = f.alias; }\n    virtual ~TestFactory7() { print_destroyed(this); }\n    virtual int get() { return value; }\n    bool has_alias() { return alias; }\n};\nclass PyTF7 : public TestFactory7 {\npublic:\n    PyTF7(int i) : TestFactory7(i) { alias = true; print_created(this, i); }\n    PyTF7(PyTF7 &&f) : TestFactory7(std::move(f)) { print_move_created(this); }\n    PyTF7(const PyTF7 &f) : TestFactory7(f) { print_copy_created(this); }\n    virtual ~PyTF7() { print_destroyed(this); }\n    int get() override { PYBIND11_OVERLOAD(int, TestFactory7, get, /*no args*/); }\n};\n\n\nclass TestFactoryHelper {\npublic:\n    // Non-movable, non-copyable type:\n    // Return via pointer:\n    static TestFactory1 *construct1() { return new TestFactory1(); }\n    // Holder:\n    static std::unique_ptr<TestFactory1> construct1(int a) { return std::unique_ptr<TestFactory1>(new TestFactory1(a)); }\n    // pointer again\n    static TestFactory1 *construct1_string(std::string a) { return new TestFactory1(a); }\n\n    // Moveable type:\n    // pointer:\n    static TestFactory2 *construct2() { return new TestFactory2(); }\n    // holder:\n    static std::unique_ptr<TestFactory2> construct2(int a) { return std::unique_ptr<TestFactory2>(new TestFactory2(a)); }\n    // by value moving:\n    static TestFactory2 construct2(std::string a) { return TestFactory2(a); }\n\n    // shared_ptr holder type:\n    // pointer:\n    static TestFactory3 *construct3() { return new TestFactory3(); }\n    // holder:\n    static std::shared_ptr<TestFactory3> construct3(int a) { return std::shared_ptr<TestFactory3>(new TestFactory3(a)); }\n};\n\nTEST_SUBMODULE(factory_constructors, m) {\n\n    // Define various trivial types to allow simpler overload resolution:\n    py::module m_tag = m.def_submodule(\"tag\");\n#define MAKE_TAG_TYPE(Name) \\\n    struct Name##_tag {}; \\\n    py::class_<Name##_tag>(m_tag, #Name \"_tag\").def(py::init<>()); \\\n    m_tag.attr(#Name) = py::cast(Name##_tag{})\n    MAKE_TAG_TYPE(pointer);\n    MAKE_TAG_TYPE(unique_ptr);\n    MAKE_TAG_TYPE(move);\n    MAKE_TAG_TYPE(shared_ptr);\n    MAKE_TAG_TYPE(derived);\n    MAKE_TAG_TYPE(TF4);\n    MAKE_TAG_TYPE(TF5);\n    MAKE_TAG_TYPE(null_ptr);\n    MAKE_TAG_TYPE(base);\n    MAKE_TAG_TYPE(invalid_base);\n    MAKE_TAG_TYPE(alias);\n    MAKE_TAG_TYPE(unaliasable);\n    MAKE_TAG_TYPE(mixed);\n\n    // test_init_factory_basic, test_bad_type\n    py::class_<TestFactory1>(m, \"TestFactory1\")\n        .def(py::init([](unique_ptr_tag, int v) { return TestFactoryHelper::construct1(v); }))\n        .def(py::init(&TestFactoryHelper::construct1_string)) // raw function pointer\n        .def(py::init([](pointer_tag) { return TestFactoryHelper::construct1(); }))\n        .def(py::init([](py::handle, int v, py::handle) { return TestFactoryHelper::construct1(v); }))\n        .def_readwrite(\"value\", &TestFactory1::value)\n        ;\n    py::class_<TestFactory2>(m, \"TestFactory2\")\n        .def(py::init([](pointer_tag, int v) { return TestFactoryHelper::construct2(v); }))\n        .def(py::init([](unique_ptr_tag, std::string v) { return TestFactoryHelper::construct2(v); }))\n        .def(py::init([](move_tag) { return TestFactoryHelper::construct2(); }))\n        .def_readwrite(\"value\", &TestFactory2::value)\n        ;\n\n    // Stateful & reused:\n    int c = 1;\n    auto c4a = [c](pointer_tag, TF4_tag, int a) { (void) c; return new TestFactory4(a);};\n\n    // test_init_factory_basic, test_init_factory_casting\n    py::class_<TestFactory3, std::shared_ptr<TestFactory3>>(m, \"TestFactory3\")\n        .def(py::init([](pointer_tag, int v) { return TestFactoryHelper::construct3(v); }))\n        .def(py::init([](shared_ptr_tag) { return TestFactoryHelper::construct3(); }))\n        .def(\"__init__\", [](TestFactory3 &self, std::string v) { new (&self) TestFactory3(v); }) // placement-new ctor\n\n        // factories returning a derived type:\n        .def(py::init(c4a)) // derived ptr\n        .def(py::init([](pointer_tag, TF5_tag, int a) { return new TestFactory5(a); }))\n        // derived shared ptr:\n        .def(py::init([](shared_ptr_tag, TF4_tag, int a) { return std::make_shared<TestFactory4>(a); }))\n        .def(py::init([](shared_ptr_tag, TF5_tag, int a) { return std::make_shared<TestFactory5>(a); }))\n\n        // Returns nullptr:\n        .def(py::init([](null_ptr_tag) { return (TestFactory3 *) nullptr; }))\n\n        .def_readwrite(\"value\", &TestFactory3::value)\n        ;\n\n    // test_init_factory_casting\n    py::class_<TestFactory4, TestFactory3, std::shared_ptr<TestFactory4>>(m, \"TestFactory4\")\n        .def(py::init(c4a)) // pointer\n        ;\n\n    // Doesn't need to be registered, but registering makes getting ConstructorStats easier:\n    py::class_<TestFactory5, TestFactory3, std::shared_ptr<TestFactory5>>(m, \"TestFactory5\");\n\n    // test_init_factory_alias\n    // Alias testing\n    py::class_<TestFactory6, PyTF6>(m, \"TestFactory6\")\n        .def(py::init([](base_tag, int i) { return TestFactory6(i); }))\n        .def(py::init([](alias_tag, int i) { return PyTF6(i); }))\n        .def(py::init([](alias_tag, std::string s) { return PyTF6(s); }))\n        .def(py::init([](alias_tag, pointer_tag, int i) { return new PyTF6(i); }))\n        .def(py::init([](base_tag, pointer_tag, int i) { return new TestFactory6(i); }))\n        .def(py::init([](base_tag, alias_tag, pointer_tag, int i) { return (TestFactory6 *) new PyTF6(i); }))\n\n        .def(\"get\", &TestFactory6::get)\n        .def(\"has_alias\", &TestFactory6::has_alias)\n\n        .def_static(\"get_cstats\", &ConstructorStats::get<TestFactory6>, py::return_value_policy::reference)\n        .def_static(\"get_alias_cstats\", &ConstructorStats::get<PyTF6>, py::return_value_policy::reference)\n        ;\n\n    // test_init_factory_dual\n    // Separate alias constructor testing\n    py::class_<TestFactory7, PyTF7, std::shared_ptr<TestFactory7>>(m, \"TestFactory7\")\n        .def(py::init(\n            [](int i) { return TestFactory7(i); },\n            [](int i) { return PyTF7(i); }))\n        .def(py::init(\n            [](pointer_tag, int i) { return new TestFactory7(i); },\n            [](pointer_tag, int i) { return new PyTF7(i); }))\n        .def(py::init(\n            [](mixed_tag, int i) { return new TestFactory7(i); },\n            [](mixed_tag, int i) { return PyTF7(i); }))\n        .def(py::init(\n            [](mixed_tag, std::string s) { return TestFactory7((int) s.size()); },\n            [](mixed_tag, std::string s) { return new PyTF7((int) s.size()); }))\n        .def(py::init(\n            [](base_tag, pointer_tag, int i) { return new TestFactory7(i); },\n            [](base_tag, pointer_tag, int i) { return (TestFactory7 *) new PyTF7(i); }))\n        .def(py::init(\n            [](alias_tag, pointer_tag, int i) { return new PyTF7(i); },\n            [](alias_tag, pointer_tag, int i) { return new PyTF7(10*i); }))\n        .def(py::init(\n            [](shared_ptr_tag, base_tag, int i) { return std::make_shared<TestFactory7>(i); },\n            [](shared_ptr_tag, base_tag, int i) { auto *p = new PyTF7(i); return std::shared_ptr<TestFactory7>(p); }))\n        .def(py::init(\n            [](shared_ptr_tag, invalid_base_tag, int i) { return std::make_shared<TestFactory7>(i); },\n            [](shared_ptr_tag, invalid_base_tag, int i) { return std::make_shared<TestFactory7>(i); })) // <-- invalid alias factory\n\n        .def(\"get\", &TestFactory7::get)\n        .def(\"has_alias\", &TestFactory7::has_alias)\n\n        .def_static(\"get_cstats\", &ConstructorStats::get<TestFactory7>, py::return_value_policy::reference)\n        .def_static(\"get_alias_cstats\", &ConstructorStats::get<PyTF7>, py::return_value_policy::reference)\n        ;\n\n    // test_placement_new_alternative\n    // Class with a custom new operator but *without* a placement new operator (issue #948)\n    class NoPlacementNew {\n    public:\n        NoPlacementNew(int i) : i(i) { }\n        static void *operator new(std::size_t s) {\n            auto *p = ::operator new(s);\n            py::print(\"operator new called, returning\", reinterpret_cast<uintptr_t>(p));\n            return p;\n        }\n        static void operator delete(void *p) {\n            py::print(\"operator delete called on\", reinterpret_cast<uintptr_t>(p));\n            ::operator delete(p);\n        }\n        int i;\n    };\n    // As of 2.2, `py::init<args>` no longer requires placement new\n    py::class_<NoPlacementNew>(m, \"NoPlacementNew\")\n        .def(py::init<int>())\n        .def(py::init([]() { return new NoPlacementNew(100); }))\n        .def_readwrite(\"i\", &NoPlacementNew::i)\n        ;\n\n\n    // test_reallocations\n    // Class that has verbose operator_new/operator_delete calls\n    struct NoisyAlloc {\n        NoisyAlloc(const NoisyAlloc &) = default;\n        NoisyAlloc(int i) { py::print(py::str(\"NoisyAlloc(int {})\").format(i)); }\n        NoisyAlloc(double d) { py::print(py::str(\"NoisyAlloc(double {})\").format(d)); }\n        ~NoisyAlloc() { py::print(\"~NoisyAlloc()\"); }\n\n        static void *operator new(size_t s) { py::print(\"noisy new\"); return ::operator new(s); }\n        static void *operator new(size_t, void *p) { py::print(\"noisy placement new\"); return p; }\n        static void operator delete(void *p, size_t) { py::print(\"noisy delete\"); ::operator delete(p); }\n        static void operator delete(void *, void *) { py::print(\"noisy placement delete\"); }\n#if defined(_MSC_VER) && _MSC_VER < 1910\n        // MSVC 2015 bug: the above \"noisy delete\" isn't invoked (fixed in MSVC 2017)\n        static void operator delete(void *p) { py::print(\"noisy delete\"); ::operator delete(p); }\n#endif\n    };\n    py::class_<NoisyAlloc>(m, \"NoisyAlloc\")\n        // Since these overloads have the same number of arguments, the dispatcher will try each of\n        // them until the arguments convert.  Thus we can get a pre-allocation here when passing a\n        // single non-integer:\n        .def(\"__init__\", [](NoisyAlloc *a, int i) { new (a) NoisyAlloc(i); }) // Regular constructor, runs first, requires preallocation\n        .def(py::init([](double d) { return new NoisyAlloc(d); }))\n\n        // The two-argument version: first the factory pointer overload.\n        .def(py::init([](int i, int) { return new NoisyAlloc(i); }))\n        // Return-by-value:\n        .def(py::init([](double d, int) { return NoisyAlloc(d); }))\n        // Old-style placement new init; requires preallocation\n        .def(\"__init__\", [](NoisyAlloc &a, double d, double) { new (&a) NoisyAlloc(d); })\n        // Requires deallocation of previous overload preallocated value:\n        .def(py::init([](int i, double) { return new NoisyAlloc(i); }))\n        // Regular again: requires yet another preallocation\n        .def(\"__init__\", [](NoisyAlloc &a, int i, std::string) { new (&a) NoisyAlloc(i); })\n        ;\n\n\n\n\n    // static_assert testing (the following def's should all fail with appropriate compilation errors):\n#if 0\n    struct BadF1Base {};\n    struct BadF1 : BadF1Base {};\n    struct PyBadF1 : BadF1 {};\n    py::class_<BadF1, PyBadF1, std::shared_ptr<BadF1>> bf1(m, \"BadF1\");\n    // wrapped factory function must return a compatible pointer, holder, or value\n    bf1.def(py::init([]() { return 3; }));\n    // incompatible factory function pointer return type\n    bf1.def(py::init([]() { static int three = 3; return &three; }));\n    // incompatible factory function std::shared_ptr<T> return type: cannot convert shared_ptr<T> to holder\n    // (non-polymorphic base)\n    bf1.def(py::init([]() { return std::shared_ptr<BadF1Base>(new BadF1()); }));\n#endif\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_factory_constructors.py",
    "content": "import pytest\nimport re\n\nfrom pybind11_tests import factory_constructors as m\nfrom pybind11_tests.factory_constructors import tag\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_init_factory_basic():\n    \"\"\"Tests py::init_factory() wrapper around various ways of returning the object\"\"\"\n\n    cstats = [ConstructorStats.get(c) for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]]\n    cstats[0].alive()  # force gc\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    x1 = m.TestFactory1(tag.unique_ptr, 3)\n    assert x1.value == \"3\"\n    y1 = m.TestFactory1(tag.pointer)\n    assert y1.value == \"(empty)\"\n    z1 = m.TestFactory1(\"hi!\")\n    assert z1.value == \"hi!\"\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 3\n\n    x2 = m.TestFactory2(tag.move)\n    assert x2.value == \"(empty2)\"\n    y2 = m.TestFactory2(tag.pointer, 7)\n    assert y2.value == \"7\"\n    z2 = m.TestFactory2(tag.unique_ptr, \"hi again\")\n    assert z2.value == \"hi again\"\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 6\n\n    x3 = m.TestFactory3(tag.shared_ptr)\n    assert x3.value == \"(empty3)\"\n    y3 = m.TestFactory3(tag.pointer, 42)\n    assert y3.value == \"42\"\n    z3 = m.TestFactory3(\"bye\")\n    assert z3.value == \"bye\"\n\n    with pytest.raises(TypeError) as excinfo:\n        m.TestFactory3(tag.null_ptr)\n    assert str(excinfo.value) == \"pybind11::init(): factory function returned nullptr\"\n\n    assert [i.alive() for i in cstats] == [3, 3, 3]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 9\n\n    del x1, y2, y3, z3\n    assert [i.alive() for i in cstats] == [2, 2, 1]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 5\n    del x2, x3, y1, z1, z2\n    assert [i.alive() for i in cstats] == [0, 0, 0]\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    assert [i.values() for i in cstats] == [\n        [\"3\", \"hi!\"],\n        [\"7\", \"hi again\"],\n        [\"42\", \"bye\"]\n    ]\n    assert [i.default_constructions for i in cstats] == [1, 1, 1]\n\n\ndef test_init_factory_signature(msg):\n    with pytest.raises(TypeError) as excinfo:\n        m.TestFactory1(\"invalid\", \"constructor\", \"arguments\")\n    assert msg(excinfo.value) == \"\"\"\n        __init__(): incompatible constructor arguments. The following argument types are supported:\n            1. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int)\n            2. m.factory_constructors.TestFactory1(arg0: str)\n            3. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.pointer_tag)\n            4. m.factory_constructors.TestFactory1(arg0: handle, arg1: int, arg2: handle)\n\n        Invoked with: 'invalid', 'constructor', 'arguments'\n    \"\"\"  # noqa: E501 line too long\n\n    assert msg(m.TestFactory1.__init__.__doc__) == \"\"\"\n        __init__(*args, **kwargs)\n        Overloaded function.\n\n        1. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int) -> None\n\n        2. __init__(self: m.factory_constructors.TestFactory1, arg0: str) -> None\n\n        3. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.pointer_tag) -> None\n\n        4. __init__(self: m.factory_constructors.TestFactory1, arg0: handle, arg1: int, arg2: handle) -> None\n    \"\"\"  # noqa: E501 line too long\n\n\ndef test_init_factory_casting():\n    \"\"\"Tests py::init_factory() wrapper with various upcasting and downcasting returns\"\"\"\n\n    cstats = [ConstructorStats.get(c) for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]]\n    cstats[0].alive()  # force gc\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    # Construction from derived references:\n    a = m.TestFactory3(tag.pointer, tag.TF4, 4)\n    assert a.value == \"4\"\n    b = m.TestFactory3(tag.shared_ptr, tag.TF4, 5)\n    assert b.value == \"5\"\n    c = m.TestFactory3(tag.pointer, tag.TF5, 6)\n    assert c.value == \"6\"\n    d = m.TestFactory3(tag.shared_ptr, tag.TF5, 7)\n    assert d.value == \"7\"\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 4\n\n    # Shared a lambda with TF3:\n    e = m.TestFactory4(tag.pointer, tag.TF4, 8)\n    assert e.value == \"8\"\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 5\n    assert [i.alive() for i in cstats] == [5, 3, 2]\n\n    del a\n    assert [i.alive() for i in cstats] == [4, 2, 2]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 4\n\n    del b, c, e\n    assert [i.alive() for i in cstats] == [1, 0, 1]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 1\n\n    del d\n    assert [i.alive() for i in cstats] == [0, 0, 0]\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    assert [i.values() for i in cstats] == [\n        [\"4\", \"5\", \"6\", \"7\", \"8\"],\n        [\"4\", \"5\", \"8\"],\n        [\"6\", \"7\"]\n    ]\n\n\ndef test_init_factory_alias():\n    \"\"\"Tests py::init_factory() wrapper with value conversions and alias types\"\"\"\n\n    cstats = [m.TestFactory6.get_cstats(), m.TestFactory6.get_alias_cstats()]\n    cstats[0].alive()  # force gc\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    a = m.TestFactory6(tag.base, 1)\n    assert a.get() == 1\n    assert not a.has_alias()\n    b = m.TestFactory6(tag.alias, \"hi there\")\n    assert b.get() == 8\n    assert b.has_alias()\n    c = m.TestFactory6(tag.alias, 3)\n    assert c.get() == 3\n    assert c.has_alias()\n    d = m.TestFactory6(tag.alias, tag.pointer, 4)\n    assert d.get() == 4\n    assert d.has_alias()\n    e = m.TestFactory6(tag.base, tag.pointer, 5)\n    assert e.get() == 5\n    assert not e.has_alias()\n    f = m.TestFactory6(tag.base, tag.alias, tag.pointer, 6)\n    assert f.get() == 6\n    assert f.has_alias()\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 6\n    assert [i.alive() for i in cstats] == [6, 4]\n\n    del a, b, e\n    assert [i.alive() for i in cstats] == [3, 3]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 3\n    del f, c, d\n    assert [i.alive() for i in cstats] == [0, 0]\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    class MyTest(m.TestFactory6):\n        def __init__(self, *args):\n            m.TestFactory6.__init__(self, *args)\n\n        def get(self):\n            return -5 + m.TestFactory6.get(self)\n\n    # Return Class by value, moved into new alias:\n    z = MyTest(tag.base, 123)\n    assert z.get() == 118\n    assert z.has_alias()\n\n    # Return alias by value, moved into new alias:\n    y = MyTest(tag.alias, \"why hello!\")\n    assert y.get() == 5\n    assert y.has_alias()\n\n    # Return Class by pointer, moved into new alias then original destroyed:\n    x = MyTest(tag.base, tag.pointer, 47)\n    assert x.get() == 42\n    assert x.has_alias()\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 3\n    assert [i.alive() for i in cstats] == [3, 3]\n    del x, y, z\n    assert [i.alive() for i in cstats] == [0, 0]\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    assert [i.values() for i in cstats] == [\n        [\"1\", \"8\", \"3\", \"4\", \"5\", \"6\", \"123\", \"10\", \"47\"],\n        [\"hi there\", \"3\", \"4\", \"6\", \"move\", \"123\", \"why hello!\", \"move\", \"47\"]\n    ]\n\n\ndef test_init_factory_dual():\n    \"\"\"Tests init factory functions with dual main/alias factory functions\"\"\"\n    from pybind11_tests.factory_constructors import TestFactory7\n\n    cstats = [TestFactory7.get_cstats(), TestFactory7.get_alias_cstats()]\n    cstats[0].alive()  # force gc\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    class PythFactory7(TestFactory7):\n        def get(self):\n            return 100 + TestFactory7.get(self)\n\n    a1 = TestFactory7(1)\n    a2 = PythFactory7(2)\n    assert a1.get() == 1\n    assert a2.get() == 102\n    assert not a1.has_alias()\n    assert a2.has_alias()\n\n    b1 = TestFactory7(tag.pointer, 3)\n    b2 = PythFactory7(tag.pointer, 4)\n    assert b1.get() == 3\n    assert b2.get() == 104\n    assert not b1.has_alias()\n    assert b2.has_alias()\n\n    c1 = TestFactory7(tag.mixed, 5)\n    c2 = PythFactory7(tag.mixed, 6)\n    assert c1.get() == 5\n    assert c2.get() == 106\n    assert not c1.has_alias()\n    assert c2.has_alias()\n\n    d1 = TestFactory7(tag.base, tag.pointer, 7)\n    d2 = PythFactory7(tag.base, tag.pointer, 8)\n    assert d1.get() == 7\n    assert d2.get() == 108\n    assert not d1.has_alias()\n    assert d2.has_alias()\n\n    # Both return an alias; the second multiplies the value by 10:\n    e1 = TestFactory7(tag.alias, tag.pointer, 9)\n    e2 = PythFactory7(tag.alias, tag.pointer, 10)\n    assert e1.get() == 9\n    assert e2.get() == 200\n    assert e1.has_alias()\n    assert e2.has_alias()\n\n    f1 = TestFactory7(tag.shared_ptr, tag.base, 11)\n    f2 = PythFactory7(tag.shared_ptr, tag.base, 12)\n    assert f1.get() == 11\n    assert f2.get() == 112\n    assert not f1.has_alias()\n    assert f2.has_alias()\n\n    g1 = TestFactory7(tag.shared_ptr, tag.invalid_base, 13)\n    assert g1.get() == 13\n    assert not g1.has_alias()\n    with pytest.raises(TypeError) as excinfo:\n        PythFactory7(tag.shared_ptr, tag.invalid_base, 14)\n    assert (str(excinfo.value) ==\n            \"pybind11::init(): construction failed: returned holder-wrapped instance is not an \"\n            \"alias instance\")\n\n    assert [i.alive() for i in cstats] == [13, 7]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 13\n\n    del a1, a2, b1, d1, e1, e2\n    assert [i.alive() for i in cstats] == [7, 4]\n    assert ConstructorStats.detail_reg_inst() == n_inst + 7\n    del b2, c1, c2, d2, f1, f2, g1\n    assert [i.alive() for i in cstats] == [0, 0]\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    assert [i.values() for i in cstats] == [\n        [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"100\", \"11\", \"12\", \"13\", \"14\"],\n        [\"2\", \"4\", \"6\", \"8\", \"9\", \"100\", \"12\"]\n    ]\n\n\ndef test_no_placement_new(capture):\n    \"\"\"Prior to 2.2, `py::init<...>` relied on the type supporting placement\n    new; this tests a class without placement new support.\"\"\"\n    with capture:\n        a = m.NoPlacementNew(123)\n\n    found = re.search(r'^operator new called, returning (\\d+)\\n$', str(capture))\n    assert found\n    assert a.i == 123\n    with capture:\n        del a\n        pytest.gc_collect()\n    assert capture == \"operator delete called on \" + found.group(1)\n\n    with capture:\n        b = m.NoPlacementNew()\n\n    found = re.search(r'^operator new called, returning (\\d+)\\n$', str(capture))\n    assert found\n    assert b.i == 100\n    with capture:\n        del b\n        pytest.gc_collect()\n    assert capture == \"operator delete called on \" + found.group(1)\n\n\ndef test_multiple_inheritance():\n    class MITest(m.TestFactory1, m.TestFactory2):\n        def __init__(self):\n            m.TestFactory1.__init__(self, tag.unique_ptr, 33)\n            m.TestFactory2.__init__(self, tag.move)\n\n    a = MITest()\n    assert m.TestFactory1.value.fget(a) == \"33\"\n    assert m.TestFactory2.value.fget(a) == \"(empty2)\"\n\n\ndef create_and_destroy(*args):\n    a = m.NoisyAlloc(*args)\n    print(\"---\")\n    del a\n    pytest.gc_collect()\n\n\ndef strip_comments(s):\n    return re.sub(r'\\s+#.*', '', s)\n\n\ndef test_reallocations(capture, msg):\n    \"\"\"When the constructor is overloaded, previous overloads can require a preallocated value.\n    This test makes sure that such preallocated values only happen when they might be necessary,\n    and that they are deallocated properly\"\"\"\n\n    pytest.gc_collect()\n\n    with capture:\n        create_and_destroy(1)\n    assert msg(capture) == \"\"\"\n        noisy new\n        noisy placement new\n        NoisyAlloc(int 1)\n        ---\n        ~NoisyAlloc()\n        noisy delete\n    \"\"\"\n    with capture:\n        create_and_destroy(1.5)\n    assert msg(capture) == strip_comments(\"\"\"\n        noisy new               # allocation required to attempt first overload\n        noisy delete            # have to dealloc before considering factory init overload\n        noisy new               # pointer factory calling \"new\", part 1: allocation\n        NoisyAlloc(double 1.5)  # ... part two, invoking constructor\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n    with capture:\n        create_and_destroy(2, 3)\n    assert msg(capture) == strip_comments(\"\"\"\n        noisy new          # pointer factory calling \"new\", allocation\n        NoisyAlloc(int 2)  # constructor\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n    with capture:\n        create_and_destroy(2.5, 3)\n    assert msg(capture) == strip_comments(\"\"\"\n        NoisyAlloc(double 2.5)  # construction (local func variable: operator_new not called)\n        noisy new               # return-by-value \"new\" part 1: allocation\n        ~NoisyAlloc()           # moved-away local func variable destruction\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n    with capture:\n        create_and_destroy(3.5, 4.5)\n    assert msg(capture) == strip_comments(\"\"\"\n        noisy new               # preallocation needed before invoking placement-new overload\n        noisy placement new     # Placement new\n        NoisyAlloc(double 3.5)  # construction\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n    with capture:\n        create_and_destroy(4, 0.5)\n    assert msg(capture) == strip_comments(\"\"\"\n        noisy new          # preallocation needed before invoking placement-new overload\n        noisy delete       # deallocation of preallocated storage\n        noisy new          # Factory pointer allocation\n        NoisyAlloc(int 4)  # factory pointer construction\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n    with capture:\n        create_and_destroy(5, \"hi\")\n    assert msg(capture) == strip_comments(\"\"\"\n        noisy new            # preallocation needed before invoking first placement new\n        noisy delete         # delete before considering new-style constructor\n        noisy new            # preallocation for second placement new\n        noisy placement new  # Placement new in the second placement new overload\n        NoisyAlloc(int 5)    # construction\n        ---\n        ~NoisyAlloc()  # Destructor\n        noisy delete   # operator delete\n    \"\"\")\n\n\n@pytest.unsupported_on_py2\ndef test_invalid_self():\n    \"\"\"Tests invocation of the pybind-registered base class with an invalid `self` argument.  You\n    can only actually do this on Python 3: Python 2 raises an exception itself if you try.\"\"\"\n    class NotPybindDerived(object):\n        pass\n\n    # Attempts to initialize with an invalid type passed as `self`:\n    class BrokenTF1(m.TestFactory1):\n        def __init__(self, bad):\n            if bad == 1:\n                a = m.TestFactory2(tag.pointer, 1)\n                m.TestFactory1.__init__(a, tag.pointer)\n            elif bad == 2:\n                a = NotPybindDerived()\n                m.TestFactory1.__init__(a, tag.pointer)\n\n    # Same as above, but for a class with an alias:\n    class BrokenTF6(m.TestFactory6):\n        def __init__(self, bad):\n            if bad == 1:\n                a = m.TestFactory2(tag.pointer, 1)\n                m.TestFactory6.__init__(a, tag.base, 1)\n            elif bad == 2:\n                a = m.TestFactory2(tag.pointer, 1)\n                m.TestFactory6.__init__(a, tag.alias, 1)\n            elif bad == 3:\n                m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.base, 1)\n            elif bad == 4:\n                m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1)\n\n    for arg in (1, 2):\n        with pytest.raises(TypeError) as excinfo:\n            BrokenTF1(arg)\n        assert str(excinfo.value) == \"__init__(self, ...) called with invalid `self` argument\"\n\n    for arg in (1, 2, 3, 4):\n        with pytest.raises(TypeError) as excinfo:\n            BrokenTF6(arg)\n        assert str(excinfo.value) == \"__init__(self, ...) called with invalid `self` argument\"\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_gil_scoped.cpp",
    "content": "/*\n    tests/test_gil_scoped.cpp -- acquire and release gil\n\n    Copyright (c) 2017 Borja Zarco (Google LLC) <bzarco@google.com>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/functional.h>\n\n\nclass VirtClass  {\npublic:\n    virtual ~VirtClass() {}\n    virtual void virtual_func() {}\n    virtual void pure_virtual_func() = 0;\n};\n\nclass PyVirtClass : public VirtClass {\n    void virtual_func() override {\n        PYBIND11_OVERLOAD(void, VirtClass, virtual_func,);\n    }\n    void pure_virtual_func() override {\n        PYBIND11_OVERLOAD_PURE(void, VirtClass, pure_virtual_func,);\n    }\n};\n\nTEST_SUBMODULE(gil_scoped, m) {\n  py::class_<VirtClass, PyVirtClass>(m, \"VirtClass\")\n      .def(py::init<>())\n      .def(\"virtual_func\", &VirtClass::virtual_func)\n      .def(\"pure_virtual_func\", &VirtClass::pure_virtual_func);\n\n    m.def(\"test_callback_py_obj\",\n          [](py::object func) { func(); });\n    m.def(\"test_callback_std_func\",\n          [](const std::function<void()> &func) { func(); });\n    m.def(\"test_callback_virtual_func\",\n          [](VirtClass &virt) { virt.virtual_func(); });\n    m.def(\"test_callback_pure_virtual_func\",\n          [](VirtClass &virt) { virt.pure_virtual_func(); });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_gil_scoped.py",
    "content": "import multiprocessing\nimport threading\nfrom pybind11_tests import gil_scoped as m\n\n\ndef _run_in_process(target, *args, **kwargs):\n    \"\"\"Runs target in process and returns its exitcode after 10s (None if still alive).\"\"\"\n    process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)\n    process.daemon = True\n    try:\n        process.start()\n        # Do not need to wait much, 10s should be more than enough.\n        process.join(timeout=10)\n        return process.exitcode\n    finally:\n        if process.is_alive():\n            process.terminate()\n\n\ndef _python_to_cpp_to_python():\n    \"\"\"Calls different C++ functions that come back to Python.\"\"\"\n    class ExtendedVirtClass(m.VirtClass):\n        def virtual_func(self):\n            pass\n\n        def pure_virtual_func(self):\n            pass\n\n    extended = ExtendedVirtClass()\n    m.test_callback_py_obj(lambda: None)\n    m.test_callback_std_func(lambda: None)\n    m.test_callback_virtual_func(extended)\n    m.test_callback_pure_virtual_func(extended)\n\n\ndef _python_to_cpp_to_python_from_threads(num_threads, parallel=False):\n    \"\"\"Calls different C++ functions that come back to Python, from Python threads.\"\"\"\n    threads = []\n    for _ in range(num_threads):\n        thread = threading.Thread(target=_python_to_cpp_to_python)\n        thread.daemon = True\n        thread.start()\n        if parallel:\n            threads.append(thread)\n        else:\n            thread.join()\n    for thread in threads:\n        thread.join()\n\n\ndef test_python_to_cpp_to_python_from_thread():\n    \"\"\"Makes sure there is no GIL deadlock when running in a thread.\n\n    It runs in a separate process to be able to stop and assert if it deadlocks.\n    \"\"\"\n    assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0\n\n\ndef test_python_to_cpp_to_python_from_thread_multiple_parallel():\n    \"\"\"Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.\n\n    It runs in a separate process to be able to stop and assert if it deadlocks.\n    \"\"\"\n    assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0\n\n\ndef test_python_to_cpp_to_python_from_thread_multiple_sequential():\n    \"\"\"Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.\n\n    It runs in a separate process to be able to stop and assert if it deadlocks.\n    \"\"\"\n    assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0\n\n\ndef test_python_to_cpp_to_python_from_process():\n    \"\"\"Makes sure there is no GIL deadlock when using processes.\n\n    This test is for completion, but it was never an issue.\n    \"\"\"\n    assert _run_in_process(_python_to_cpp_to_python) == 0\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_iostream.cpp",
    "content": "/*\n    tests/test_iostream.cpp -- Usage of scoped_output_redirect\n\n    Copyright (c) 2017 Henry F. Schreiner\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n\n#include <pybind11/iostream.h>\n#include \"pybind11_tests.h\"\n#include <iostream>\n\n\nvoid noisy_function(std::string msg, bool flush) {\n\n    std::cout << msg;\n    if (flush)\n        std::cout << std::flush;\n}\n\nvoid noisy_funct_dual(std::string msg, std::string emsg) {\n    std::cout << msg;\n    std::cerr << emsg;\n}\n\nTEST_SUBMODULE(iostream, m) {\n\n    add_ostream_redirect(m);\n\n    // test_evals\n\n    m.def(\"captured_output_default\", [](std::string msg) {\n        py::scoped_ostream_redirect redir;\n        std::cout << msg << std::flush;\n    });\n\n    m.def(\"captured_output\", [](std::string msg) {\n        py::scoped_ostream_redirect redir(std::cout, py::module::import(\"sys\").attr(\"stdout\"));\n        std::cout << msg << std::flush;\n    });\n\n    m.def(\"guard_output\", &noisy_function,\n            py::call_guard<py::scoped_ostream_redirect>(),\n            py::arg(\"msg\"), py::arg(\"flush\")=true);\n\n    m.def(\"captured_err\", [](std::string msg) {\n        py::scoped_ostream_redirect redir(std::cerr, py::module::import(\"sys\").attr(\"stderr\"));\n        std::cerr << msg << std::flush;\n    });\n\n    m.def(\"noisy_function\", &noisy_function, py::arg(\"msg\"), py::arg(\"flush\") = true);\n\n    m.def(\"dual_guard\", &noisy_funct_dual,\n            py::call_guard<py::scoped_ostream_redirect, py::scoped_estream_redirect>(),\n            py::arg(\"msg\"), py::arg(\"emsg\"));\n\n    m.def(\"raw_output\", [](std::string msg) {\n        std::cout << msg << std::flush;\n    });\n\n    m.def(\"raw_err\", [](std::string msg) {\n        std::cerr << msg << std::flush;\n    });\n\n    m.def(\"captured_dual\", [](std::string msg, std::string emsg) {\n        py::scoped_ostream_redirect redirout(std::cout, py::module::import(\"sys\").attr(\"stdout\"));\n        py::scoped_ostream_redirect redirerr(std::cerr, py::module::import(\"sys\").attr(\"stderr\"));\n        std::cout << msg << std::flush;\n        std::cerr << emsg << std::flush;\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_iostream.py",
    "content": "from pybind11_tests import iostream as m\nimport sys\n\nfrom contextlib import contextmanager\n\ntry:\n    # Python 3\n    from io import StringIO\nexcept ImportError:\n    # Python 2\n    try:\n        from cStringIO import StringIO\n    except ImportError:\n        from StringIO import StringIO\n\ntry:\n    # Python 3.4\n    from contextlib import redirect_stdout\nexcept ImportError:\n    @contextmanager\n    def redirect_stdout(target):\n        original = sys.stdout\n        sys.stdout = target\n        yield\n        sys.stdout = original\n\ntry:\n    # Python 3.5\n    from contextlib import redirect_stderr\nexcept ImportError:\n    @contextmanager\n    def redirect_stderr(target):\n        original = sys.stderr\n        sys.stderr = target\n        yield\n        sys.stderr = original\n\n\ndef test_captured(capsys):\n    msg = \"I've been redirected to Python, I hope!\"\n    m.captured_output(msg)\n    stdout, stderr = capsys.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n\n    m.captured_output_default(msg)\n    stdout, stderr = capsys.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n\n    m.captured_err(msg)\n    stdout, stderr = capsys.readouterr()\n    assert stdout == ''\n    assert stderr == msg\n\n\ndef test_captured_large_string(capsys):\n    # Make this bigger than the buffer used on the C++ side: 1024 chars\n    msg = \"I've been redirected to Python, I hope!\"\n    msg = msg * (1024 // len(msg) + 1)\n\n    m.captured_output_default(msg)\n    stdout, stderr = capsys.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n\n\ndef test_guard_capture(capsys):\n    msg = \"I've been redirected to Python, I hope!\"\n    m.guard_output(msg)\n    stdout, stderr = capsys.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n\n\ndef test_series_captured(capture):\n    with capture:\n        m.captured_output(\"a\")\n        m.captured_output(\"b\")\n    assert capture == \"ab\"\n\n\ndef test_flush(capfd):\n    msg = \"(not flushed)\"\n    msg2 = \"(flushed)\"\n\n    with m.ostream_redirect():\n        m.noisy_function(msg, flush=False)\n        stdout, stderr = capfd.readouterr()\n        assert stdout == ''\n\n        m.noisy_function(msg2, flush=True)\n        stdout, stderr = capfd.readouterr()\n        assert stdout == msg + msg2\n\n        m.noisy_function(msg, flush=False)\n\n    stdout, stderr = capfd.readouterr()\n    assert stdout == msg\n\n\ndef test_not_captured(capfd):\n    msg = \"Something that should not show up in log\"\n    stream = StringIO()\n    with redirect_stdout(stream):\n        m.raw_output(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n    assert stream.getvalue() == ''\n\n    stream = StringIO()\n    with redirect_stdout(stream):\n        m.captured_output(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == ''\n    assert stderr == ''\n    assert stream.getvalue() == msg\n\n\ndef test_err(capfd):\n    msg = \"Something that should not show up in log\"\n    stream = StringIO()\n    with redirect_stderr(stream):\n        m.raw_err(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == ''\n    assert stderr == msg\n    assert stream.getvalue() == ''\n\n    stream = StringIO()\n    with redirect_stderr(stream):\n        m.captured_err(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == ''\n    assert stderr == ''\n    assert stream.getvalue() == msg\n\n\ndef test_multi_captured(capfd):\n    stream = StringIO()\n    with redirect_stdout(stream):\n        m.captured_output(\"a\")\n        m.raw_output(\"b\")\n        m.captured_output(\"c\")\n        m.raw_output(\"d\")\n    stdout, stderr = capfd.readouterr()\n    assert stdout == 'bd'\n    assert stream.getvalue() == 'ac'\n\n\ndef test_dual(capsys):\n    m.captured_dual(\"a\", \"b\")\n    stdout, stderr = capsys.readouterr()\n    assert stdout == \"a\"\n    assert stderr == \"b\"\n\n\ndef test_redirect(capfd):\n    msg = \"Should not be in log!\"\n    stream = StringIO()\n    with redirect_stdout(stream):\n        m.raw_output(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == msg\n    assert stream.getvalue() == ''\n\n    stream = StringIO()\n    with redirect_stdout(stream):\n        with m.ostream_redirect():\n            m.raw_output(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == ''\n    assert stream.getvalue() == msg\n\n    stream = StringIO()\n    with redirect_stdout(stream):\n        m.raw_output(msg)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == msg\n    assert stream.getvalue() == ''\n\n\ndef test_redirect_err(capfd):\n    msg = \"StdOut\"\n    msg2 = \"StdErr\"\n\n    stream = StringIO()\n    with redirect_stderr(stream):\n        with m.ostream_redirect(stdout=False):\n            m.raw_output(msg)\n            m.raw_err(msg2)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == msg\n    assert stderr == ''\n    assert stream.getvalue() == msg2\n\n\ndef test_redirect_both(capfd):\n    msg = \"StdOut\"\n    msg2 = \"StdErr\"\n\n    stream = StringIO()\n    stream2 = StringIO()\n    with redirect_stdout(stream):\n        with redirect_stderr(stream2):\n            with m.ostream_redirect():\n                m.raw_output(msg)\n                m.raw_err(msg2)\n    stdout, stderr = capfd.readouterr()\n    assert stdout == ''\n    assert stderr == ''\n    assert stream.getvalue() == msg\n    assert stream2.getvalue() == msg2\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_kwargs_and_defaults.cpp",
    "content": "/*\n    tests/test_kwargs_and_defaults.cpp -- keyword arguments and default values\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/stl.h>\n\nTEST_SUBMODULE(kwargs_and_defaults, m) {\n    auto kw_func = [](int x, int y) { return \"x=\" + std::to_string(x) + \", y=\" + std::to_string(y); };\n\n    // test_named_arguments\n    m.def(\"kw_func0\", kw_func);\n    m.def(\"kw_func1\", kw_func, py::arg(\"x\"), py::arg(\"y\"));\n    m.def(\"kw_func2\", kw_func, py::arg(\"x\") = 100, py::arg(\"y\") = 200);\n    m.def(\"kw_func3\", [](const char *) { }, py::arg(\"data\") = std::string(\"Hello world!\"));\n\n    /* A fancier default argument */\n    std::vector<int> list{{13, 17}};\n    m.def(\"kw_func4\", [](const std::vector<int> &entries) {\n        std::string ret = \"{\";\n        for (int i : entries)\n            ret += std::to_string(i) + \" \";\n        ret.back() = '}';\n        return ret;\n    }, py::arg(\"myList\") = list);\n\n    m.def(\"kw_func_udl\", kw_func, \"x\"_a, \"y\"_a=300);\n    m.def(\"kw_func_udl_z\", kw_func, \"x\"_a, \"y\"_a=0);\n\n    // test_args_and_kwargs\n    m.def(\"args_function\", [](py::args args) -> py::tuple {\n        return std::move(args);\n    });\n    m.def(\"args_kwargs_function\", [](py::args args, py::kwargs kwargs) {\n        return py::make_tuple(args, kwargs);\n    });\n\n    // test_mixed_args_and_kwargs\n    m.def(\"mixed_plus_args\", [](int i, double j, py::args args) {\n        return py::make_tuple(i, j, args);\n    });\n    m.def(\"mixed_plus_kwargs\", [](int i, double j, py::kwargs kwargs) {\n        return py::make_tuple(i, j, kwargs);\n    });\n    auto mixed_plus_both = [](int i, double j, py::args args, py::kwargs kwargs) {\n        return py::make_tuple(i, j, args, kwargs);\n    };\n    m.def(\"mixed_plus_args_kwargs\", mixed_plus_both);\n\n    m.def(\"mixed_plus_args_kwargs_defaults\", mixed_plus_both,\n            py::arg(\"i\") = 1, py::arg(\"j\") = 3.14159);\n\n    // test_args_refcount\n    // PyPy needs a garbage collection to get the reference count values to match CPython's behaviour\n    #ifdef PYPY_VERSION\n    #define GC_IF_NEEDED ConstructorStats::gc()\n    #else\n    #define GC_IF_NEEDED\n    #endif\n    m.def(\"arg_refcount_h\", [](py::handle h) { GC_IF_NEEDED; return h.ref_count(); });\n    m.def(\"arg_refcount_h\", [](py::handle h, py::handle, py::handle) { GC_IF_NEEDED; return h.ref_count(); });\n    m.def(\"arg_refcount_o\", [](py::object o) { GC_IF_NEEDED; return o.ref_count(); });\n    m.def(\"args_refcount\", [](py::args a) {\n        GC_IF_NEEDED;\n        py::tuple t(a.size());\n        for (size_t i = 0; i < a.size(); i++)\n            // Use raw Python API here to avoid an extra, intermediate incref on the tuple item:\n            t[i] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));\n        return t;\n    });\n    m.def(\"mixed_args_refcount\", [](py::object o, py::args a) {\n        GC_IF_NEEDED;\n        py::tuple t(a.size() + 1);\n        t[0] = o.ref_count();\n        for (size_t i = 0; i < a.size(); i++)\n            // Use raw Python API here to avoid an extra, intermediate incref on the tuple item:\n            t[i + 1] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));\n        return t;\n    });\n\n    // pybind11 won't allow these to be bound: args and kwargs, if present, must be at the end.\n    // Uncomment these to test that the static_assert is indeed working:\n//    m.def(\"bad_args1\", [](py::args, int) {});\n//    m.def(\"bad_args2\", [](py::kwargs, int) {});\n//    m.def(\"bad_args3\", [](py::kwargs, py::args) {});\n//    m.def(\"bad_args4\", [](py::args, int, py::kwargs) {});\n//    m.def(\"bad_args5\", [](py::args, py::kwargs, int) {});\n//    m.def(\"bad_args6\", [](py::args, py::args) {});\n//    m.def(\"bad_args7\", [](py::kwargs, py::kwargs) {});\n\n    // test_function_signatures (along with most of the above)\n    struct KWClass { void foo(int, float) {} };\n    py::class_<KWClass>(m, \"KWClass\")\n        .def(\"foo0\", &KWClass::foo)\n        .def(\"foo1\", &KWClass::foo, \"x\"_a, \"y\"_a);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_kwargs_and_defaults.py",
    "content": "import pytest\nfrom pybind11_tests import kwargs_and_defaults as m\n\n\ndef test_function_signatures(doc):\n    assert doc(m.kw_func0) == \"kw_func0(arg0: int, arg1: int) -> str\"\n    assert doc(m.kw_func1) == \"kw_func1(x: int, y: int) -> str\"\n    assert doc(m.kw_func2) == \"kw_func2(x: int = 100, y: int = 200) -> str\"\n    assert doc(m.kw_func3) == \"kw_func3(data: str = 'Hello world!') -> None\"\n    assert doc(m.kw_func4) == \"kw_func4(myList: List[int] = [13, 17]) -> str\"\n    assert doc(m.kw_func_udl) == \"kw_func_udl(x: int, y: int = 300) -> str\"\n    assert doc(m.kw_func_udl_z) == \"kw_func_udl_z(x: int, y: int = 0) -> str\"\n    assert doc(m.args_function) == \"args_function(*args) -> tuple\"\n    assert doc(m.args_kwargs_function) == \"args_kwargs_function(*args, **kwargs) -> tuple\"\n    assert doc(m.KWClass.foo0) == \\\n        \"foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None\"\n    assert doc(m.KWClass.foo1) == \\\n        \"foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None\"\n\n\ndef test_named_arguments(msg):\n    assert m.kw_func0(5, 10) == \"x=5, y=10\"\n\n    assert m.kw_func1(5, 10) == \"x=5, y=10\"\n    assert m.kw_func1(5, y=10) == \"x=5, y=10\"\n    assert m.kw_func1(y=10, x=5) == \"x=5, y=10\"\n\n    assert m.kw_func2() == \"x=100, y=200\"\n    assert m.kw_func2(5) == \"x=5, y=200\"\n    assert m.kw_func2(x=5) == \"x=5, y=200\"\n    assert m.kw_func2(y=10) == \"x=100, y=10\"\n    assert m.kw_func2(5, 10) == \"x=5, y=10\"\n    assert m.kw_func2(x=5, y=10) == \"x=5, y=10\"\n\n    with pytest.raises(TypeError) as excinfo:\n        # noinspection PyArgumentList\n        m.kw_func2(x=5, y=10, z=12)\n    assert excinfo.match(\n        r'(?s)^kw_func2\\(\\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')\n\n    assert m.kw_func4() == \"{13 17}\"\n    assert m.kw_func4(myList=[1, 2, 3]) == \"{1 2 3}\"\n\n    assert m.kw_func_udl(x=5, y=10) == \"x=5, y=10\"\n    assert m.kw_func_udl_z(x=5) == \"x=5, y=0\"\n\n\ndef test_arg_and_kwargs():\n    args = 'arg1_value', 'arg2_value', 3\n    assert m.args_function(*args) == args\n\n    args = 'a1', 'a2'\n    kwargs = dict(arg3='a3', arg4=4)\n    assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)\n\n\ndef test_mixed_args_and_kwargs(msg):\n    mpa = m.mixed_plus_args\n    mpk = m.mixed_plus_kwargs\n    mpak = m.mixed_plus_args_kwargs\n    mpakd = m.mixed_plus_args_kwargs_defaults\n\n    assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))\n    assert mpa(1, 2.5) == (1, 2.5, ())\n    with pytest.raises(TypeError) as excinfo:\n        assert mpa(1)\n    assert msg(excinfo.value) == \"\"\"\n        mixed_plus_args(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: int, arg1: float, *args) -> tuple\n\n        Invoked with: 1\n    \"\"\"  # noqa: E501 line too long\n    with pytest.raises(TypeError) as excinfo:\n        assert mpa()\n    assert msg(excinfo.value) == \"\"\"\n        mixed_plus_args(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: int, arg1: float, *args) -> tuple\n\n        Invoked with:\n    \"\"\"  # noqa: E501 line too long\n\n    assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})\n    assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (\n        7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})\n    assert mpakd() == (1, 3.14159, (), {})\n    assert mpakd(3) == (3, 3.14159, (), {})\n    assert mpakd(j=2.71828) == (1, 2.71828, (), {})\n    assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})\n    assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (\n        1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})\n    # Arguments specified both positionally and via kwargs should fail:\n    with pytest.raises(TypeError) as excinfo:\n        assert mpakd(1, i=1)\n    assert msg(excinfo.value) == \"\"\"\n        mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:\n            1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple\n\n        Invoked with: 1; kwargs: i=1\n    \"\"\"  # noqa: E501 line too long\n    with pytest.raises(TypeError) as excinfo:\n        assert mpakd(1, 2, j=1)\n    assert msg(excinfo.value) == \"\"\"\n        mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:\n            1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple\n\n        Invoked with: 1, 2; kwargs: j=1\n    \"\"\"  # noqa: E501 line too long\n\n\ndef test_args_refcount():\n    \"\"\"Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular\n    arguments\"\"\"\n    refcount = m.arg_refcount_h\n\n    myval = 54321\n    expected = refcount(myval)\n    assert m.arg_refcount_h(myval) == expected\n    assert m.arg_refcount_o(myval) == expected + 1\n    assert m.arg_refcount_h(myval) == expected\n    assert refcount(myval) == expected\n\n    assert m.mixed_plus_args(1, 2.0, \"a\", myval) == (1, 2.0, (\"a\", myval))\n    assert refcount(myval) == expected\n\n    assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {\"a\": 1, \"b\": myval})\n    assert refcount(myval) == expected\n\n    assert m.args_function(-1, myval) == (-1, myval)\n    assert refcount(myval) == expected\n\n    assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {\"a\": myval})\n    assert refcount(myval) == expected\n\n    assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \\\n        ((7, 8, myval), {\"a\": 1, \"b\": myval})\n    assert refcount(myval) == expected\n\n    exp3 = refcount(myval, myval, myval)\n    assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)\n    assert refcount(myval) == expected\n\n    # This function takes the first arg as a `py::object` and the rest as a `py::args`.  Unlike the\n    # previous case, when we have both positional and `py::args` we need to construct a new tuple\n    # for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input\n    # tuple without having to inc_ref the individual elements, but here we can't, hence the extra\n    # refs.\n    assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_local_bindings.cpp",
    "content": "/*\n    tests/test_local_bindings.cpp -- tests the py::module_local class feature which makes a class\n                                     binding local to the module in which it is defined.\n\n    Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"local_bindings.h\"\n#include <pybind11/stl.h>\n#include <pybind11/stl_bind.h>\n#include <numeric>\n\nTEST_SUBMODULE(local_bindings, m) {\n    // test_load_external\n    m.def(\"load_external1\", [](ExternalType1 &e) { return e.i; });\n    m.def(\"load_external2\", [](ExternalType2 &e) { return e.i; });\n\n    // test_local_bindings\n    // Register a class with py::module_local:\n    bind_local<LocalType, -1>(m, \"LocalType\", py::module_local())\n        .def(\"get3\", [](LocalType &t) { return t.i + 3; })\n        ;\n\n    m.def(\"local_value\", [](LocalType &l) { return l.i; });\n\n    // test_nonlocal_failure\n    // The main pybind11 test module is loaded first, so this registration will succeed (the second\n    // one, in pybind11_cross_module_tests.cpp, is designed to fail):\n    bind_local<NonLocalType, 0>(m, \"NonLocalType\")\n        .def(py::init<int>())\n        .def(\"get\", [](LocalType &i) { return i.i; })\n        ;\n\n    // test_duplicate_local\n    // py::module_local declarations should be visible across compilation units that get linked together;\n    // this tries to register a duplicate local.  It depends on a definition in test_class.cpp and\n    // should raise a runtime error from the duplicate definition attempt.  If test_class isn't\n    // available it *also* throws a runtime error (with \"test_class not enabled\" as value).\n    m.def(\"register_local_external\", [m]() {\n        auto main = py::module::import(\"pybind11_tests\");\n        if (py::hasattr(main, \"class_\")) {\n            bind_local<LocalExternal, 7>(m, \"LocalExternal\", py::module_local());\n        }\n        else throw std::runtime_error(\"test_class not enabled\");\n    });\n\n    // test_stl_bind_local\n    // stl_bind.h binders defaults to py::module_local if the types are local or converting:\n    py::bind_vector<LocalVec>(m, \"LocalVec\");\n    py::bind_map<LocalMap>(m, \"LocalMap\");\n    // and global if the type (or one of the types, for the map) is global:\n    py::bind_vector<NonLocalVec>(m, \"NonLocalVec\");\n    py::bind_map<NonLocalMap>(m, \"NonLocalMap\");\n\n    // test_stl_bind_global\n    // They can, however, be overridden to global using `py::module_local(false)`:\n    bind_local<NonLocal2, 10>(m, \"NonLocal2\");\n    py::bind_vector<LocalVec2>(m, \"LocalVec2\", py::module_local());\n    py::bind_map<NonLocalMap2>(m, \"NonLocalMap2\", py::module_local(false));\n\n    // test_mixed_local_global\n    // We try this both with the global type registered first and vice versa (the order shouldn't\n    // matter).\n    m.def(\"register_mixed_global\", [m]() {\n        bind_local<MixedGlobalLocal, 100>(m, \"MixedGlobalLocal\", py::module_local(false));\n    });\n    m.def(\"register_mixed_local\", [m]() {\n        bind_local<MixedLocalGlobal, 1000>(m, \"MixedLocalGlobal\", py::module_local());\n    });\n    m.def(\"get_mixed_gl\", [](int i) { return MixedGlobalLocal(i); });\n    m.def(\"get_mixed_lg\", [](int i) { return MixedLocalGlobal(i); });\n\n    // test_internal_locals_differ\n    m.def(\"local_cpp_types_addr\", []() { return (uintptr_t) &py::detail::registered_local_types_cpp(); });\n\n    // test_stl_caster_vs_stl_bind\n    m.def(\"load_vector_via_caster\", [](std::vector<int> v) {\n        return std::accumulate(v.begin(), v.end(), 0);\n    });\n\n    // test_cross_module_calls\n    m.def(\"return_self\", [](LocalVec *v) { return v; });\n    m.def(\"return_copy\", [](const LocalVec &v) { return LocalVec(v); });\n\n    class Cat : public pets::Pet { public: Cat(std::string name) : Pet(name) {}; };\n    py::class_<pets::Pet>(m, \"Pet\", py::module_local())\n        .def(\"get_name\", &pets::Pet::name);\n    // Binding for local extending class:\n    py::class_<Cat, pets::Pet>(m, \"Cat\")\n        .def(py::init<std::string>());\n    m.def(\"pet_name\", [](pets::Pet &p) { return p.name(); });\n\n    py::class_<MixGL>(m, \"MixGL\").def(py::init<int>());\n    m.def(\"get_gl_value\", [](MixGL &o) { return o.i + 10; });\n\n    py::class_<MixGL2>(m, \"MixGL2\").def(py::init<int>());\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_local_bindings.py",
    "content": "import pytest\n\nfrom pybind11_tests import local_bindings as m\n\n\ndef test_load_external():\n    \"\"\"Load a `py::module_local` type that's only registered in an external module\"\"\"\n    import pybind11_cross_module_tests as cm\n\n    assert m.load_external1(cm.ExternalType1(11)) == 11\n    assert m.load_external2(cm.ExternalType2(22)) == 22\n\n    with pytest.raises(TypeError) as excinfo:\n        assert m.load_external2(cm.ExternalType1(21)) == 21\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        assert m.load_external1(cm.ExternalType2(12)) == 12\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n\ndef test_local_bindings():\n    \"\"\"Tests that duplicate `py::module_local` class bindings work across modules\"\"\"\n\n    # Make sure we can load the second module with the conflicting (but local) definition:\n    import pybind11_cross_module_tests as cm\n\n    i1 = m.LocalType(5)\n    assert i1.get() == 4\n    assert i1.get3() == 8\n\n    i2 = cm.LocalType(10)\n    assert i2.get() == 11\n    assert i2.get2() == 12\n\n    assert not hasattr(i1, 'get2')\n    assert not hasattr(i2, 'get3')\n\n    # Loading within the local module\n    assert m.local_value(i1) == 5\n    assert cm.local_value(i2) == 10\n\n    # Cross-module loading works as well (on failure, the type loader looks for\n    # external module-local converters):\n    assert m.local_value(i2) == 10\n    assert cm.local_value(i1) == 5\n\n\ndef test_nonlocal_failure():\n    \"\"\"Tests that attempting to register a non-local type in multiple modules fails\"\"\"\n    import pybind11_cross_module_tests as cm\n\n    with pytest.raises(RuntimeError) as excinfo:\n        cm.register_nonlocal()\n    assert str(excinfo.value) == 'generic_type: type \"NonLocalType\" is already registered!'\n\n\ndef test_duplicate_local():\n    \"\"\"Tests expected failure when registering a class twice with py::local in the same module\"\"\"\n    with pytest.raises(RuntimeError) as excinfo:\n        m.register_local_external()\n    import pybind11_tests\n    assert str(excinfo.value) == (\n        'generic_type: type \"LocalExternal\" is already registered!'\n        if hasattr(pybind11_tests, 'class_') else 'test_class not enabled')\n\n\ndef test_stl_bind_local():\n    import pybind11_cross_module_tests as cm\n\n    v1, v2 = m.LocalVec(), cm.LocalVec()\n    v1.append(m.LocalType(1))\n    v1.append(m.LocalType(2))\n    v2.append(cm.LocalType(1))\n    v2.append(cm.LocalType(2))\n\n    # Cross module value loading:\n    v1.append(cm.LocalType(3))\n    v2.append(m.LocalType(3))\n\n    assert [i.get() for i in v1] == [0, 1, 2]\n    assert [i.get() for i in v2] == [2, 3, 4]\n\n    v3, v4 = m.NonLocalVec(), cm.NonLocalVec2()\n    v3.append(m.NonLocalType(1))\n    v3.append(m.NonLocalType(2))\n    v4.append(m.NonLocal2(3))\n    v4.append(m.NonLocal2(4))\n\n    assert [i.get() for i in v3] == [1, 2]\n    assert [i.get() for i in v4] == [13, 14]\n\n    d1, d2 = m.LocalMap(), cm.LocalMap()\n    d1[\"a\"] = v1[0]\n    d1[\"b\"] = v1[1]\n    d2[\"c\"] = v2[0]\n    d2[\"d\"] = v2[1]\n    assert {i: d1[i].get() for i in d1} == {'a': 0, 'b': 1}\n    assert {i: d2[i].get() for i in d2} == {'c': 2, 'd': 3}\n\n\ndef test_stl_bind_global():\n    import pybind11_cross_module_tests as cm\n\n    with pytest.raises(RuntimeError) as excinfo:\n        cm.register_nonlocal_map()\n    assert str(excinfo.value) == 'generic_type: type \"NonLocalMap\" is already registered!'\n\n    with pytest.raises(RuntimeError) as excinfo:\n        cm.register_nonlocal_vec()\n    assert str(excinfo.value) == 'generic_type: type \"NonLocalVec\" is already registered!'\n\n    with pytest.raises(RuntimeError) as excinfo:\n        cm.register_nonlocal_map2()\n    assert str(excinfo.value) == 'generic_type: type \"NonLocalMap2\" is already registered!'\n\n\ndef test_mixed_local_global():\n    \"\"\"Local types take precedence over globally registered types: a module with a `module_local`\n    type can be registered even if the type is already registered globally.  With the module,\n    casting will go to the local type; outside the module casting goes to the global type.\"\"\"\n    import pybind11_cross_module_tests as cm\n    m.register_mixed_global()\n    m.register_mixed_local()\n\n    a = []\n    a.append(m.MixedGlobalLocal(1))\n    a.append(m.MixedLocalGlobal(2))\n    a.append(m.get_mixed_gl(3))\n    a.append(m.get_mixed_lg(4))\n\n    assert [x.get() for x in a] == [101, 1002, 103, 1004]\n\n    cm.register_mixed_global_local()\n    cm.register_mixed_local_global()\n    a.append(m.MixedGlobalLocal(5))\n    a.append(m.MixedLocalGlobal(6))\n    a.append(cm.MixedGlobalLocal(7))\n    a.append(cm.MixedLocalGlobal(8))\n    a.append(m.get_mixed_gl(9))\n    a.append(m.get_mixed_lg(10))\n    a.append(cm.get_mixed_gl(11))\n    a.append(cm.get_mixed_lg(12))\n\n    assert [x.get() for x in a] == \\\n        [101, 1002, 103, 1004, 105, 1006, 207, 2008, 109, 1010, 211, 2012]\n\n\ndef test_internal_locals_differ():\n    \"\"\"Makes sure the internal local type map differs across the two modules\"\"\"\n    import pybind11_cross_module_tests as cm\n    assert m.local_cpp_types_addr() != cm.local_cpp_types_addr()\n\n\ndef test_stl_caster_vs_stl_bind(msg):\n    \"\"\"One module uses a generic vector caster from `<pybind11/stl.h>` while the other\n    exports `std::vector<int>` via `py:bind_vector` and `py::module_local`\"\"\"\n    import pybind11_cross_module_tests as cm\n\n    v1 = cm.VectorInt([1, 2, 3])\n    assert m.load_vector_via_caster(v1) == 6\n    assert cm.load_vector_via_binding(v1) == 6\n\n    v2 = [1, 2, 3]\n    assert m.load_vector_via_caster(v2) == 6\n    with pytest.raises(TypeError) as excinfo:\n        cm.load_vector_via_binding(v2) == 6\n    assert msg(excinfo.value) == \"\"\"\n    load_vector_via_binding(): incompatible function arguments. The following argument types are supported:\n        1. (arg0: pybind11_cross_module_tests.VectorInt) -> int\n\n    Invoked with: [1, 2, 3]\n    \"\"\"  # noqa: E501 line too long\n\n\ndef test_cross_module_calls():\n    import pybind11_cross_module_tests as cm\n\n    v1 = m.LocalVec()\n    v1.append(m.LocalType(1))\n    v2 = cm.LocalVec()\n    v2.append(cm.LocalType(2))\n\n    # Returning the self pointer should get picked up as returning an existing\n    # instance (even when that instance is of a foreign, non-local type).\n    assert m.return_self(v1) is v1\n    assert cm.return_self(v2) is v2\n    assert m.return_self(v2) is v2\n    assert cm.return_self(v1) is v1\n\n    assert m.LocalVec is not cm.LocalVec\n    # Returning a copy, on the other hand, always goes to the local type,\n    # regardless of where the source type came from.\n    assert type(m.return_copy(v1)) is m.LocalVec\n    assert type(m.return_copy(v2)) is m.LocalVec\n    assert type(cm.return_copy(v1)) is cm.LocalVec\n    assert type(cm.return_copy(v2)) is cm.LocalVec\n\n    # Test the example given in the documentation (which also tests inheritance casting):\n    mycat = m.Cat(\"Fluffy\")\n    mydog = cm.Dog(\"Rover\")\n    assert mycat.get_name() == \"Fluffy\"\n    assert mydog.name() == \"Rover\"\n    assert m.Cat.__base__.__name__ == \"Pet\"\n    assert cm.Dog.__base__.__name__ == \"Pet\"\n    assert m.Cat.__base__ is not cm.Dog.__base__\n    assert m.pet_name(mycat) == \"Fluffy\"\n    assert m.pet_name(mydog) == \"Rover\"\n    assert cm.pet_name(mycat) == \"Fluffy\"\n    assert cm.pet_name(mydog) == \"Rover\"\n\n    assert m.MixGL is not cm.MixGL\n    a = m.MixGL(1)\n    b = cm.MixGL(2)\n    assert m.get_gl_value(a) == 11\n    assert m.get_gl_value(b) == 12\n    assert cm.get_gl_value(a) == 101\n    assert cm.get_gl_value(b) == 102\n\n    c, d = m.MixGL2(3), cm.MixGL2(4)\n    with pytest.raises(TypeError) as excinfo:\n        m.get_gl_value(c)\n    assert \"incompatible function arguments\" in str(excinfo)\n    with pytest.raises(TypeError) as excinfo:\n        m.get_gl_value(d)\n    assert \"incompatible function arguments\" in str(excinfo)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_methods_and_attributes.cpp",
    "content": "/*\n    tests/test_methods_and_attributes.cpp -- constructors, deconstructors, attribute access,\n    __str__, argument and return value conventions\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n\nclass ExampleMandA {\npublic:\n    ExampleMandA() { print_default_created(this); }\n    ExampleMandA(int value) : value(value) { print_created(this, value); }\n    ExampleMandA(const ExampleMandA &e) : value(e.value) { print_copy_created(this); }\n    ExampleMandA(ExampleMandA &&e) : value(e.value) { print_move_created(this); }\n    ~ExampleMandA() { print_destroyed(this); }\n\n    std::string toString() {\n        return \"ExampleMandA[value=\" + std::to_string(value) + \"]\";\n    }\n\n    void operator=(const ExampleMandA &e) { print_copy_assigned(this); value = e.value; }\n    void operator=(ExampleMandA &&e) { print_move_assigned(this); value = e.value; }\n\n    void add1(ExampleMandA other) { value += other.value; }         // passing by value\n    void add2(ExampleMandA &other) { value += other.value; }        // passing by reference\n    void add3(const ExampleMandA &other) { value += other.value; }  // passing by const reference\n    void add4(ExampleMandA *other) { value += other->value; }       // passing by pointer\n    void add5(const ExampleMandA *other) { value += other->value; } // passing by const pointer\n\n    void add6(int other) { value += other; }                        // passing by value\n    void add7(int &other) { value += other; }                       // passing by reference\n    void add8(const int &other) { value += other; }                 // passing by const reference\n    void add9(int *other) { value += *other; }                      // passing by pointer\n    void add10(const int *other) { value += *other; }               // passing by const pointer\n\n    ExampleMandA self1() { return *this; }                          // return by value\n    ExampleMandA &self2() { return *this; }                         // return by reference\n    const ExampleMandA &self3() { return *this; }                   // return by const reference\n    ExampleMandA *self4() { return this; }                          // return by pointer\n    const ExampleMandA *self5() { return this; }                    // return by const pointer\n\n    int internal1() { return value; }                               // return by value\n    int &internal2() { return value; }                              // return by reference\n    const int &internal3() { return value; }                        // return by const reference\n    int *internal4() { return &value; }                             // return by pointer\n    const int *internal5() { return &value; }                       // return by const pointer\n\n    py::str overloaded()             { return \"()\"; }\n    py::str overloaded(int)          { return \"(int)\"; }\n    py::str overloaded(int, float)   { return \"(int, float)\"; }\n    py::str overloaded(float, int)   { return \"(float, int)\"; }\n    py::str overloaded(int, int)     { return \"(int, int)\"; }\n    py::str overloaded(float, float) { return \"(float, float)\"; }\n    py::str overloaded(int)          const { return \"(int) const\"; }\n    py::str overloaded(int, float)   const { return \"(int, float) const\"; }\n    py::str overloaded(float, int)   const { return \"(float, int) const\"; }\n    py::str overloaded(int, int)     const { return \"(int, int) const\"; }\n    py::str overloaded(float, float) const { return \"(float, float) const\"; }\n\n    static py::str overloaded(float) { return \"static float\"; }\n\n    int value = 0;\n};\n\nstruct TestProperties {\n    int value = 1;\n    static int static_value;\n\n    int get() const { return value; }\n    void set(int v) { value = v; }\n\n    static int static_get() { return static_value; }\n    static void static_set(int v) { static_value = v; }\n};\nint TestProperties::static_value = 1;\n\nstruct TestPropertiesOverride : TestProperties {\n    int value = 99;\n    static int static_value;\n};\nint TestPropertiesOverride::static_value = 99;\n\nstruct TestPropRVP {\n    UserType v1{1};\n    UserType v2{1};\n    static UserType sv1;\n    static UserType sv2;\n\n    const UserType &get1() const { return v1; }\n    const UserType &get2() const { return v2; }\n    UserType get_rvalue() const { return v2; }\n    void set1(int v) { v1.set(v); }\n    void set2(int v) { v2.set(v); }\n};\nUserType TestPropRVP::sv1(1);\nUserType TestPropRVP::sv2(1);\n\n// py::arg/py::arg_v testing: these arguments just record their argument when invoked\nclass ArgInspector1 { public: std::string arg = \"(default arg inspector 1)\"; };\nclass ArgInspector2 { public: std::string arg = \"(default arg inspector 2)\"; };\nclass ArgAlwaysConverts { };\nnamespace pybind11 { namespace detail {\ntemplate <> struct type_caster<ArgInspector1> {\npublic:\n    PYBIND11_TYPE_CASTER(ArgInspector1, _(\"ArgInspector1\"));\n\n    bool load(handle src, bool convert) {\n        value.arg = \"loading ArgInspector1 argument \" +\n            std::string(convert ? \"WITH\" : \"WITHOUT\") + \" conversion allowed.  \"\n            \"Argument value = \" + (std::string) str(src);\n        return true;\n    }\n\n    static handle cast(const ArgInspector1 &src, return_value_policy, handle) {\n        return str(src.arg).release();\n    }\n};\ntemplate <> struct type_caster<ArgInspector2> {\npublic:\n    PYBIND11_TYPE_CASTER(ArgInspector2, _(\"ArgInspector2\"));\n\n    bool load(handle src, bool convert) {\n        value.arg = \"loading ArgInspector2 argument \" +\n            std::string(convert ? \"WITH\" : \"WITHOUT\") + \" conversion allowed.  \"\n            \"Argument value = \" + (std::string) str(src);\n        return true;\n    }\n\n    static handle cast(const ArgInspector2 &src, return_value_policy, handle) {\n        return str(src.arg).release();\n    }\n};\ntemplate <> struct type_caster<ArgAlwaysConverts> {\npublic:\n    PYBIND11_TYPE_CASTER(ArgAlwaysConverts, _(\"ArgAlwaysConverts\"));\n\n    bool load(handle, bool convert) {\n        return convert;\n    }\n\n    static handle cast(const ArgAlwaysConverts &, return_value_policy, handle) {\n        return py::none().release();\n    }\n};\n}}\n\n// test_custom_caster_destruction\nclass DestructionTester {\npublic:\n    DestructionTester() { print_default_created(this); }\n    ~DestructionTester() { print_destroyed(this); }\n    DestructionTester(const DestructionTester &) { print_copy_created(this); }\n    DestructionTester(DestructionTester &&) { print_move_created(this); }\n    DestructionTester &operator=(const DestructionTester &) { print_copy_assigned(this); return *this; }\n    DestructionTester &operator=(DestructionTester &&) { print_move_assigned(this); return *this; }\n};\nnamespace pybind11 { namespace detail {\ntemplate <> struct type_caster<DestructionTester> {\n    PYBIND11_TYPE_CASTER(DestructionTester, _(\"DestructionTester\"));\n    bool load(handle, bool) { return true; }\n\n    static handle cast(const DestructionTester &, return_value_policy, handle) {\n        return py::bool_(true).release();\n    }\n};\n}}\n\n// Test None-allowed py::arg argument policy\nclass NoneTester { public: int answer = 42; };\nint none1(const NoneTester &obj) { return obj.answer; }\nint none2(NoneTester *obj) { return obj ? obj->answer : -1; }\nint none3(std::shared_ptr<NoneTester> &obj) { return obj ? obj->answer : -1; }\nint none4(std::shared_ptr<NoneTester> *obj) { return obj && *obj ? (*obj)->answer : -1; }\nint none5(std::shared_ptr<NoneTester> obj) { return obj ? obj->answer : -1; }\n\nstruct StrIssue {\n    int val = -1;\n\n    StrIssue() = default;\n    StrIssue(int i) : val{i} {}\n};\n\n// Issues #854, #910: incompatible function args when member function/pointer is in unregistered base class\nclass UnregisteredBase {\npublic:\n    void do_nothing() const {}\n    void increase_value() { rw_value++; ro_value += 0.25; }\n    void set_int(int v) { rw_value = v; }\n    int get_int() const { return rw_value; }\n    double get_double() const { return ro_value; }\n    int rw_value = 42;\n    double ro_value = 1.25;\n};\nclass RegisteredDerived : public UnregisteredBase {\npublic:\n    using UnregisteredBase::UnregisteredBase;\n    double sum() const { return rw_value + ro_value; }\n};\n\nTEST_SUBMODULE(methods_and_attributes, m) {\n    // test_methods_and_attributes\n    py::class_<ExampleMandA> emna(m, \"ExampleMandA\");\n    emna.def(py::init<>())\n        .def(py::init<int>())\n        .def(py::init<const ExampleMandA&>())\n        .def(\"add1\", &ExampleMandA::add1)\n        .def(\"add2\", &ExampleMandA::add2)\n        .def(\"add3\", &ExampleMandA::add3)\n        .def(\"add4\", &ExampleMandA::add4)\n        .def(\"add5\", &ExampleMandA::add5)\n        .def(\"add6\", &ExampleMandA::add6)\n        .def(\"add7\", &ExampleMandA::add7)\n        .def(\"add8\", &ExampleMandA::add8)\n        .def(\"add9\", &ExampleMandA::add9)\n        .def(\"add10\", &ExampleMandA::add10)\n        .def(\"self1\", &ExampleMandA::self1)\n        .def(\"self2\", &ExampleMandA::self2)\n        .def(\"self3\", &ExampleMandA::self3)\n        .def(\"self4\", &ExampleMandA::self4)\n        .def(\"self5\", &ExampleMandA::self5)\n        .def(\"internal1\", &ExampleMandA::internal1)\n        .def(\"internal2\", &ExampleMandA::internal2)\n        .def(\"internal3\", &ExampleMandA::internal3)\n        .def(\"internal4\", &ExampleMandA::internal4)\n        .def(\"internal5\", &ExampleMandA::internal5)\n#if defined(PYBIND11_OVERLOAD_CAST)\n        .def(\"overloaded\", py::overload_cast<>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", py::overload_cast<int>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", py::overload_cast<int,   float>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", py::overload_cast<float,   int>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", py::overload_cast<int,     int>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", py::overload_cast<float, float>(&ExampleMandA::overloaded))\n        .def(\"overloaded_float\", py::overload_cast<float, float>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", py::overload_cast<int         >(&ExampleMandA::overloaded, py::const_))\n        .def(\"overloaded_const\", py::overload_cast<int,   float>(&ExampleMandA::overloaded, py::const_))\n        .def(\"overloaded_const\", py::overload_cast<float,   int>(&ExampleMandA::overloaded, py::const_))\n        .def(\"overloaded_const\", py::overload_cast<int,     int>(&ExampleMandA::overloaded, py::const_))\n        .def(\"overloaded_const\", py::overload_cast<float, float>(&ExampleMandA::overloaded, py::const_))\n#else\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)()>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)(int)>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)(int,   float)>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)(float,   int)>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)(int,     int)>(&ExampleMandA::overloaded))\n        .def(\"overloaded\", static_cast<py::str (ExampleMandA::*)(float, float)>(&ExampleMandA::overloaded))\n        .def(\"overloaded_float\", static_cast<py::str (ExampleMandA::*)(float, float)>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", static_cast<py::str (ExampleMandA::*)(int         ) const>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", static_cast<py::str (ExampleMandA::*)(int,   float) const>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", static_cast<py::str (ExampleMandA::*)(float,   int) const>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", static_cast<py::str (ExampleMandA::*)(int,     int) const>(&ExampleMandA::overloaded))\n        .def(\"overloaded_const\", static_cast<py::str (ExampleMandA::*)(float, float) const>(&ExampleMandA::overloaded))\n#endif\n        // test_no_mixed_overloads\n        // Raise error if trying to mix static/non-static overloads on the same name:\n        .def_static(\"add_mixed_overloads1\", []() {\n            auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import(\"pybind11_tests.methods_and_attributes\").attr(\"ExampleMandA\"));\n            emna.def       (\"overload_mixed1\", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded))\n                .def_static(\"overload_mixed1\", static_cast<py::str (              *)(float   )>(&ExampleMandA::overloaded));\n        })\n        .def_static(\"add_mixed_overloads2\", []() {\n            auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import(\"pybind11_tests.methods_and_attributes\").attr(\"ExampleMandA\"));\n            emna.def_static(\"overload_mixed2\", static_cast<py::str (              *)(float   )>(&ExampleMandA::overloaded))\n                .def       (\"overload_mixed2\", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded));\n        })\n        .def(\"__str__\", &ExampleMandA::toString)\n        .def_readwrite(\"value\", &ExampleMandA::value);\n\n    // test_copy_method\n    // Issue #443: can't call copied methods in Python 3\n    emna.attr(\"add2b\") = emna.attr(\"add2\");\n\n    // test_properties, test_static_properties, test_static_cls\n    py::class_<TestProperties>(m, \"TestProperties\")\n        .def(py::init<>())\n        .def_readonly(\"def_readonly\", &TestProperties::value)\n        .def_readwrite(\"def_readwrite\", &TestProperties::value)\n        .def_property(\"def_writeonly\", nullptr,\n                      [](TestProperties& s,int v) { s.value = v; } )\n        .def_property(\"def_property_writeonly\", nullptr, &TestProperties::set)\n        .def_property_readonly(\"def_property_readonly\", &TestProperties::get)\n        .def_property(\"def_property\", &TestProperties::get, &TestProperties::set)\n        .def_property(\"def_property_impossible\", nullptr, nullptr)\n        .def_readonly_static(\"def_readonly_static\", &TestProperties::static_value)\n        .def_readwrite_static(\"def_readwrite_static\", &TestProperties::static_value)\n        .def_property_static(\"def_writeonly_static\", nullptr,\n                             [](py::object, int v) { TestProperties::static_value = v; })\n        .def_property_readonly_static(\"def_property_readonly_static\",\n                                      [](py::object) { return TestProperties::static_get(); })\n        .def_property_static(\"def_property_writeonly_static\", nullptr,\n                             [](py::object, int v) { return TestProperties::static_set(v); })\n        .def_property_static(\"def_property_static\",\n                             [](py::object) { return TestProperties::static_get(); },\n                             [](py::object, int v) { TestProperties::static_set(v); })\n        .def_property_static(\"static_cls\",\n                             [](py::object cls) { return cls; },\n                             [](py::object cls, py::function f) { f(cls); });\n\n    py::class_<TestPropertiesOverride, TestProperties>(m, \"TestPropertiesOverride\")\n        .def(py::init<>())\n        .def_readonly(\"def_readonly\", &TestPropertiesOverride::value)\n        .def_readonly_static(\"def_readonly_static\", &TestPropertiesOverride::static_value);\n\n    auto static_get1 = [](py::object) -> const UserType & { return TestPropRVP::sv1; };\n    auto static_get2 = [](py::object) -> const UserType & { return TestPropRVP::sv2; };\n    auto static_set1 = [](py::object, int v) { TestPropRVP::sv1.set(v); };\n    auto static_set2 = [](py::object, int v) { TestPropRVP::sv2.set(v); };\n    auto rvp_copy = py::return_value_policy::copy;\n\n    // test_property_return_value_policies\n    py::class_<TestPropRVP>(m, \"TestPropRVP\")\n        .def(py::init<>())\n        .def_property_readonly(\"ro_ref\", &TestPropRVP::get1)\n        .def_property_readonly(\"ro_copy\", &TestPropRVP::get2, rvp_copy)\n        .def_property_readonly(\"ro_func\", py::cpp_function(&TestPropRVP::get2, rvp_copy))\n        .def_property(\"rw_ref\", &TestPropRVP::get1, &TestPropRVP::set1)\n        .def_property(\"rw_copy\", &TestPropRVP::get2, &TestPropRVP::set2, rvp_copy)\n        .def_property(\"rw_func\", py::cpp_function(&TestPropRVP::get2, rvp_copy), &TestPropRVP::set2)\n        .def_property_readonly_static(\"static_ro_ref\", static_get1)\n        .def_property_readonly_static(\"static_ro_copy\", static_get2, rvp_copy)\n        .def_property_readonly_static(\"static_ro_func\", py::cpp_function(static_get2, rvp_copy))\n        .def_property_static(\"static_rw_ref\", static_get1, static_set1)\n        .def_property_static(\"static_rw_copy\", static_get2, static_set2, rvp_copy)\n        .def_property_static(\"static_rw_func\", py::cpp_function(static_get2, rvp_copy), static_set2)\n        // test_property_rvalue_policy\n        .def_property_readonly(\"rvalue\", &TestPropRVP::get_rvalue)\n        .def_property_readonly_static(\"static_rvalue\", [](py::object) { return UserType(1); });\n\n    // test_metaclass_override\n    struct MetaclassOverride { };\n    py::class_<MetaclassOverride>(m, \"MetaclassOverride\", py::metaclass((PyObject *) &PyType_Type))\n        .def_property_readonly_static(\"readonly\", [](py::object) { return 1; });\n\n#if !defined(PYPY_VERSION)\n    // test_dynamic_attributes\n    class DynamicClass {\n    public:\n        DynamicClass() { print_default_created(this); }\n        ~DynamicClass() { print_destroyed(this); }\n    };\n    py::class_<DynamicClass>(m, \"DynamicClass\", py::dynamic_attr())\n        .def(py::init());\n\n    class CppDerivedDynamicClass : public DynamicClass { };\n    py::class_<CppDerivedDynamicClass, DynamicClass>(m, \"CppDerivedDynamicClass\")\n        .def(py::init());\n#endif\n\n    // test_noconvert_args\n    //\n    // Test converting.  The ArgAlwaysConverts is just there to make the first no-conversion pass\n    // fail so that our call always ends up happening via the second dispatch (the one that allows\n    // some conversion).\n    class ArgInspector {\n    public:\n        ArgInspector1 f(ArgInspector1 a, ArgAlwaysConverts) { return a; }\n        std::string g(ArgInspector1 a, const ArgInspector1 &b, int c, ArgInspector2 *d, ArgAlwaysConverts) {\n            return a.arg + \"\\n\" + b.arg + \"\\n\" + std::to_string(c) + \"\\n\" + d->arg;\n        }\n        static ArgInspector2 h(ArgInspector2 a, ArgAlwaysConverts) { return a; }\n    };\n    py::class_<ArgInspector>(m, \"ArgInspector\")\n        .def(py::init<>())\n        .def(\"f\", &ArgInspector::f, py::arg(), py::arg() = ArgAlwaysConverts())\n        .def(\"g\", &ArgInspector::g, \"a\"_a.noconvert(), \"b\"_a, \"c\"_a.noconvert()=13, \"d\"_a=ArgInspector2(), py::arg() = ArgAlwaysConverts())\n        .def_static(\"h\", &ArgInspector::h, py::arg().noconvert(), py::arg() = ArgAlwaysConverts())\n        ;\n    m.def(\"arg_inspect_func\", [](ArgInspector2 a, ArgInspector1 b, ArgAlwaysConverts) { return a.arg + \"\\n\" + b.arg; },\n            py::arg().noconvert(false), py::arg_v(nullptr, ArgInspector1()).noconvert(true), py::arg() = ArgAlwaysConverts());\n\n    m.def(\"floats_preferred\", [](double f) { return 0.5 * f; }, py::arg(\"f\"));\n    m.def(\"floats_only\", [](double f) { return 0.5 * f; }, py::arg(\"f\").noconvert());\n    m.def(\"ints_preferred\", [](int i) { return i / 2; }, py::arg(\"i\"));\n    m.def(\"ints_only\", [](int i) { return i / 2; }, py::arg(\"i\").noconvert());\n\n    // test_bad_arg_default\n    // Issue/PR #648: bad arg default debugging output\n#if !defined(NDEBUG)\n    m.attr(\"debug_enabled\") = true;\n#else\n    m.attr(\"debug_enabled\") = false;\n#endif\n    m.def(\"bad_arg_def_named\", []{\n        auto m = py::module::import(\"pybind11_tests\");\n        m.def(\"should_fail\", [](int, UnregisteredType) {}, py::arg(), py::arg(\"a\") = UnregisteredType());\n    });\n    m.def(\"bad_arg_def_unnamed\", []{\n        auto m = py::module::import(\"pybind11_tests\");\n        m.def(\"should_fail\", [](int, UnregisteredType) {}, py::arg(), py::arg() = UnregisteredType());\n    });\n\n    // test_accepts_none\n    py::class_<NoneTester, std::shared_ptr<NoneTester>>(m, \"NoneTester\")\n        .def(py::init<>());\n    m.def(\"no_none1\", &none1, py::arg().none(false));\n    m.def(\"no_none2\", &none2, py::arg().none(false));\n    m.def(\"no_none3\", &none3, py::arg().none(false));\n    m.def(\"no_none4\", &none4, py::arg().none(false));\n    m.def(\"no_none5\", &none5, py::arg().none(false));\n    m.def(\"ok_none1\", &none1);\n    m.def(\"ok_none2\", &none2, py::arg().none(true));\n    m.def(\"ok_none3\", &none3);\n    m.def(\"ok_none4\", &none4, py::arg().none(true));\n    m.def(\"ok_none5\", &none5);\n\n    // test_str_issue\n    // Issue #283: __str__ called on uninitialized instance when constructor arguments invalid\n    py::class_<StrIssue>(m, \"StrIssue\")\n        .def(py::init<int>())\n        .def(py::init<>())\n        .def(\"__str__\", [](const StrIssue &si) {\n            return \"StrIssue[\" + std::to_string(si.val) + \"]\"; }\n        );\n\n    // test_unregistered_base_implementations\n    //\n    // Issues #854/910: incompatible function args when member function/pointer is in unregistered\n    // base class The methods and member pointers below actually resolve to members/pointers in\n    // UnregisteredBase; before this test/fix they would be registered via lambda with a first\n    // argument of an unregistered type, and thus uncallable.\n    py::class_<RegisteredDerived>(m, \"RegisteredDerived\")\n        .def(py::init<>())\n        .def(\"do_nothing\", &RegisteredDerived::do_nothing)\n        .def(\"increase_value\", &RegisteredDerived::increase_value)\n        .def_readwrite(\"rw_value\", &RegisteredDerived::rw_value)\n        .def_readonly(\"ro_value\", &RegisteredDerived::ro_value)\n        // These should trigger a static_assert if uncommented\n        //.def_readwrite(\"fails\", &UserType::value) // should trigger a static_assert if uncommented\n        //.def_readonly(\"fails\", &UserType::value) // should trigger a static_assert if uncommented\n        .def_property(\"rw_value_prop\", &RegisteredDerived::get_int, &RegisteredDerived::set_int)\n        .def_property_readonly(\"ro_value_prop\", &RegisteredDerived::get_double)\n        // This one is in the registered class:\n        .def(\"sum\", &RegisteredDerived::sum)\n        ;\n\n    using Adapted = decltype(py::method_adaptor<RegisteredDerived>(&RegisteredDerived::do_nothing));\n    static_assert(std::is_same<Adapted, void (RegisteredDerived::*)() const>::value, \"\");\n\n    // test_custom_caster_destruction\n    // Test that `take_ownership` works on types with a custom type caster when given a pointer\n\n    // default policy: don't take ownership:\n    m.def(\"custom_caster_no_destroy\", []() { static auto *dt = new DestructionTester(); return dt; });\n\n    m.def(\"custom_caster_destroy\", []() { return new DestructionTester(); },\n            py::return_value_policy::take_ownership); // Takes ownership: destroy when finished\n    m.def(\"custom_caster_destroy_const\", []() -> const DestructionTester * { return new DestructionTester(); },\n            py::return_value_policy::take_ownership); // Likewise (const doesn't inhibit destruction)\n    m.def(\"destruction_tester_cstats\", &ConstructorStats::get<DestructionTester>, py::return_value_policy::reference);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_methods_and_attributes.py",
    "content": "import pytest\nfrom pybind11_tests import methods_and_attributes as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_methods_and_attributes():\n    instance1 = m.ExampleMandA()\n    instance2 = m.ExampleMandA(32)\n\n    instance1.add1(instance2)\n    instance1.add2(instance2)\n    instance1.add3(instance2)\n    instance1.add4(instance2)\n    instance1.add5(instance2)\n    instance1.add6(32)\n    instance1.add7(32)\n    instance1.add8(32)\n    instance1.add9(32)\n    instance1.add10(32)\n\n    assert str(instance1) == \"ExampleMandA[value=320]\"\n    assert str(instance2) == \"ExampleMandA[value=32]\"\n    assert str(instance1.self1()) == \"ExampleMandA[value=320]\"\n    assert str(instance1.self2()) == \"ExampleMandA[value=320]\"\n    assert str(instance1.self3()) == \"ExampleMandA[value=320]\"\n    assert str(instance1.self4()) == \"ExampleMandA[value=320]\"\n    assert str(instance1.self5()) == \"ExampleMandA[value=320]\"\n\n    assert instance1.internal1() == 320\n    assert instance1.internal2() == 320\n    assert instance1.internal3() == 320\n    assert instance1.internal4() == 320\n    assert instance1.internal5() == 320\n\n    assert instance1.overloaded() == \"()\"\n    assert instance1.overloaded(0) == \"(int)\"\n    assert instance1.overloaded(1, 1.0) == \"(int, float)\"\n    assert instance1.overloaded(2.0, 2) == \"(float, int)\"\n    assert instance1.overloaded(3,   3) == \"(int, int)\"\n    assert instance1.overloaded(4., 4.) == \"(float, float)\"\n    assert instance1.overloaded_const(-3) == \"(int) const\"\n    assert instance1.overloaded_const(5, 5.0) == \"(int, float) const\"\n    assert instance1.overloaded_const(6.0, 6) == \"(float, int) const\"\n    assert instance1.overloaded_const(7,   7) == \"(int, int) const\"\n    assert instance1.overloaded_const(8., 8.) == \"(float, float) const\"\n    assert instance1.overloaded_float(1, 1) == \"(float, float)\"\n    assert instance1.overloaded_float(1, 1.) == \"(float, float)\"\n    assert instance1.overloaded_float(1., 1) == \"(float, float)\"\n    assert instance1.overloaded_float(1., 1.) == \"(float, float)\"\n\n    assert instance1.value == 320\n    instance1.value = 100\n    assert str(instance1) == \"ExampleMandA[value=100]\"\n\n    cstats = ConstructorStats.get(m.ExampleMandA)\n    assert cstats.alive() == 2\n    del instance1, instance2\n    assert cstats.alive() == 0\n    assert cstats.values() == [\"32\"]\n    assert cstats.default_constructions == 1\n    assert cstats.copy_constructions == 3\n    assert cstats.move_constructions >= 1\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n\ndef test_copy_method():\n    \"\"\"Issue #443: calling copied methods fails in Python 3\"\"\"\n\n    m.ExampleMandA.add2c = m.ExampleMandA.add2\n    m.ExampleMandA.add2d = m.ExampleMandA.add2b\n    a = m.ExampleMandA(123)\n    assert a.value == 123\n    a.add2(m.ExampleMandA(-100))\n    assert a.value == 23\n    a.add2b(m.ExampleMandA(20))\n    assert a.value == 43\n    a.add2c(m.ExampleMandA(6))\n    assert a.value == 49\n    a.add2d(m.ExampleMandA(-7))\n    assert a.value == 42\n\n\ndef test_properties():\n    instance = m.TestProperties()\n\n    assert instance.def_readonly == 1\n    with pytest.raises(AttributeError):\n        instance.def_readonly = 2\n\n    instance.def_readwrite = 2\n    assert instance.def_readwrite == 2\n\n    assert instance.def_property_readonly == 2\n    with pytest.raises(AttributeError):\n        instance.def_property_readonly = 3\n\n    instance.def_property = 3\n    assert instance.def_property == 3\n\n    with pytest.raises(AttributeError) as excinfo:\n        dummy = instance.def_property_writeonly  # noqa: F841 unused var\n    assert \"unreadable attribute\" in str(excinfo)\n\n    instance.def_property_writeonly = 4\n    assert instance.def_property_readonly == 4\n\n    with pytest.raises(AttributeError) as excinfo:\n        dummy = instance.def_property_impossible  # noqa: F841 unused var\n    assert \"unreadable attribute\" in str(excinfo)\n\n    with pytest.raises(AttributeError) as excinfo:\n        instance.def_property_impossible = 5\n    assert \"can't set attribute\" in str(excinfo)\n\n\ndef test_static_properties():\n    assert m.TestProperties.def_readonly_static == 1\n    with pytest.raises(AttributeError) as excinfo:\n        m.TestProperties.def_readonly_static = 2\n    assert \"can't set attribute\" in str(excinfo)\n\n    m.TestProperties.def_readwrite_static = 2\n    assert m.TestProperties.def_readwrite_static == 2\n\n    with pytest.raises(AttributeError) as excinfo:\n        dummy = m.TestProperties.def_writeonly_static  # noqa: F841 unused var\n    assert \"unreadable attribute\" in str(excinfo)\n\n    m.TestProperties.def_writeonly_static = 3\n    assert m.TestProperties.def_readonly_static == 3\n\n    assert m.TestProperties.def_property_readonly_static == 3\n    with pytest.raises(AttributeError) as excinfo:\n        m.TestProperties.def_property_readonly_static = 99\n    assert \"can't set attribute\" in str(excinfo)\n\n    m.TestProperties.def_property_static = 4\n    assert m.TestProperties.def_property_static == 4\n\n    with pytest.raises(AttributeError) as excinfo:\n        dummy = m.TestProperties.def_property_writeonly_static\n    assert \"unreadable attribute\" in str(excinfo)\n\n    m.TestProperties.def_property_writeonly_static = 5\n    assert m.TestProperties.def_property_static == 5\n\n    # Static property read and write via instance\n    instance = m.TestProperties()\n\n    m.TestProperties.def_readwrite_static = 0\n    assert m.TestProperties.def_readwrite_static == 0\n    assert instance.def_readwrite_static == 0\n\n    instance.def_readwrite_static = 2\n    assert m.TestProperties.def_readwrite_static == 2\n    assert instance.def_readwrite_static == 2\n\n    with pytest.raises(AttributeError) as excinfo:\n        dummy = instance.def_property_writeonly_static  # noqa: F841 unused var\n    assert \"unreadable attribute\" in str(excinfo)\n\n    instance.def_property_writeonly_static = 4\n    assert instance.def_property_static == 4\n\n    # It should be possible to override properties in derived classes\n    assert m.TestPropertiesOverride().def_readonly == 99\n    assert m.TestPropertiesOverride.def_readonly_static == 99\n\n\ndef test_static_cls():\n    \"\"\"Static property getter and setters expect the type object as the their only argument\"\"\"\n\n    instance = m.TestProperties()\n    assert m.TestProperties.static_cls is m.TestProperties\n    assert instance.static_cls is m.TestProperties\n\n    def check_self(self):\n        assert self is m.TestProperties\n\n    m.TestProperties.static_cls = check_self\n    instance.static_cls = check_self\n\n\ndef test_metaclass_override():\n    \"\"\"Overriding pybind11's default metaclass changes the behavior of `static_property`\"\"\"\n\n    assert type(m.ExampleMandA).__name__ == \"pybind11_type\"\n    assert type(m.MetaclassOverride).__name__ == \"type\"\n\n    assert m.MetaclassOverride.readonly == 1\n    assert type(m.MetaclassOverride.__dict__[\"readonly\"]).__name__ == \"pybind11_static_property\"\n\n    # Regular `type` replaces the property instead of calling `__set__()`\n    m.MetaclassOverride.readonly = 2\n    assert m.MetaclassOverride.readonly == 2\n    assert isinstance(m.MetaclassOverride.__dict__[\"readonly\"], int)\n\n\ndef test_no_mixed_overloads():\n    from pybind11_tests import debug_enabled\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.ExampleMandA.add_mixed_overloads1()\n    assert (str(excinfo.value) ==\n            \"overloading a method with both static and instance methods is not supported; \" +\n            (\"compile in debug mode for more details\" if not debug_enabled else\n             \"error while attempting to bind static method ExampleMandA.overload_mixed1\"\n             \"(arg0: float) -> str\")\n            )\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.ExampleMandA.add_mixed_overloads2()\n    assert (str(excinfo.value) ==\n            \"overloading a method with both static and instance methods is not supported; \" +\n            (\"compile in debug mode for more details\" if not debug_enabled else\n             \"error while attempting to bind instance method ExampleMandA.overload_mixed2\"\n             \"(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)\"\n             \" -> str\")\n            )\n\n\n@pytest.mark.parametrize(\"access\", [\"ro\", \"rw\", \"static_ro\", \"static_rw\"])\ndef test_property_return_value_policies(access):\n    if not access.startswith(\"static\"):\n        obj = m.TestPropRVP()\n    else:\n        obj = m.TestPropRVP\n\n    ref = getattr(obj, access + \"_ref\")\n    assert ref.value == 1\n    ref.value = 2\n    assert getattr(obj, access + \"_ref\").value == 2\n    ref.value = 1  # restore original value for static properties\n\n    copy = getattr(obj, access + \"_copy\")\n    assert copy.value == 1\n    copy.value = 2\n    assert getattr(obj, access + \"_copy\").value == 1\n\n    copy = getattr(obj, access + \"_func\")\n    assert copy.value == 1\n    copy.value = 2\n    assert getattr(obj, access + \"_func\").value == 1\n\n\ndef test_property_rvalue_policy():\n    \"\"\"When returning an rvalue, the return value policy is automatically changed from\n    `reference(_internal)` to `move`. The following would not work otherwise.\"\"\"\n\n    instance = m.TestPropRVP()\n    o = instance.rvalue\n    assert o.value == 1\n\n    os = m.TestPropRVP.static_rvalue\n    assert os.value == 1\n\n\n# https://bitbucket.org/pypy/pypy/issues/2447\n@pytest.unsupported_on_pypy\ndef test_dynamic_attributes():\n    instance = m.DynamicClass()\n    assert not hasattr(instance, \"foo\")\n    assert \"foo\" not in dir(instance)\n\n    # Dynamically add attribute\n    instance.foo = 42\n    assert hasattr(instance, \"foo\")\n    assert instance.foo == 42\n    assert \"foo\" in dir(instance)\n\n    # __dict__ should be accessible and replaceable\n    assert \"foo\" in instance.__dict__\n    instance.__dict__ = {\"bar\": True}\n    assert not hasattr(instance, \"foo\")\n    assert hasattr(instance, \"bar\")\n\n    with pytest.raises(TypeError) as excinfo:\n        instance.__dict__ = []\n    assert str(excinfo.value) == \"__dict__ must be set to a dictionary, not a 'list'\"\n\n    cstats = ConstructorStats.get(m.DynamicClass)\n    assert cstats.alive() == 1\n    del instance\n    assert cstats.alive() == 0\n\n    # Derived classes should work as well\n    class PythonDerivedDynamicClass(m.DynamicClass):\n        pass\n\n    for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass:\n        derived = cls()\n        derived.foobar = 100\n        assert derived.foobar == 100\n\n        assert cstats.alive() == 1\n        del derived\n        assert cstats.alive() == 0\n\n\n# https://bitbucket.org/pypy/pypy/issues/2447\n@pytest.unsupported_on_pypy\ndef test_cyclic_gc():\n    # One object references itself\n    instance = m.DynamicClass()\n    instance.circular_reference = instance\n\n    cstats = ConstructorStats.get(m.DynamicClass)\n    assert cstats.alive() == 1\n    del instance\n    assert cstats.alive() == 0\n\n    # Two object reference each other\n    i1 = m.DynamicClass()\n    i2 = m.DynamicClass()\n    i1.cycle = i2\n    i2.cycle = i1\n\n    assert cstats.alive() == 2\n    del i1, i2\n    assert cstats.alive() == 0\n\n\ndef test_noconvert_args(msg):\n    a = m.ArgInspector()\n    assert msg(a.f(\"hi\")) == \"\"\"\n        loading ArgInspector1 argument WITH conversion allowed.  Argument value = hi\n    \"\"\"\n    assert msg(a.g(\"this is a\", \"this is b\")) == \"\"\"\n        loading ArgInspector1 argument WITHOUT conversion allowed.  Argument value = this is a\n        loading ArgInspector1 argument WITH conversion allowed.  Argument value = this is b\n        13\n        loading ArgInspector2 argument WITH conversion allowed.  Argument value = (default arg inspector 2)\n    \"\"\"  # noqa: E501 line too long\n    assert msg(a.g(\"this is a\", \"this is b\", 42)) == \"\"\"\n        loading ArgInspector1 argument WITHOUT conversion allowed.  Argument value = this is a\n        loading ArgInspector1 argument WITH conversion allowed.  Argument value = this is b\n        42\n        loading ArgInspector2 argument WITH conversion allowed.  Argument value = (default arg inspector 2)\n    \"\"\"  # noqa: E501 line too long\n    assert msg(a.g(\"this is a\", \"this is b\", 42, \"this is d\")) == \"\"\"\n        loading ArgInspector1 argument WITHOUT conversion allowed.  Argument value = this is a\n        loading ArgInspector1 argument WITH conversion allowed.  Argument value = this is b\n        42\n        loading ArgInspector2 argument WITH conversion allowed.  Argument value = this is d\n    \"\"\"\n    assert (a.h(\"arg 1\") ==\n            \"loading ArgInspector2 argument WITHOUT conversion allowed.  Argument value = arg 1\")\n    assert msg(m.arg_inspect_func(\"A1\", \"A2\")) == \"\"\"\n        loading ArgInspector2 argument WITH conversion allowed.  Argument value = A1\n        loading ArgInspector1 argument WITHOUT conversion allowed.  Argument value = A2\n    \"\"\"\n\n    assert m.floats_preferred(4) == 2.0\n    assert m.floats_only(4.0) == 2.0\n    with pytest.raises(TypeError) as excinfo:\n        m.floats_only(4)\n    assert msg(excinfo.value) == \"\"\"\n        floats_only(): incompatible function arguments. The following argument types are supported:\n            1. (f: float) -> float\n\n        Invoked with: 4\n    \"\"\"\n\n    assert m.ints_preferred(4) == 2\n    assert m.ints_preferred(True) == 0\n    with pytest.raises(TypeError) as excinfo:\n        m.ints_preferred(4.0)\n    assert msg(excinfo.value) == \"\"\"\n        ints_preferred(): incompatible function arguments. The following argument types are supported:\n            1. (i: int) -> int\n\n        Invoked with: 4.0\n    \"\"\"  # noqa: E501 line too long\n\n    assert m.ints_only(4) == 2\n    with pytest.raises(TypeError) as excinfo:\n        m.ints_only(4.0)\n    assert msg(excinfo.value) == \"\"\"\n        ints_only(): incompatible function arguments. The following argument types are supported:\n            1. (i: int) -> int\n\n        Invoked with: 4.0\n    \"\"\"\n\n\ndef test_bad_arg_default(msg):\n    from pybind11_tests import debug_enabled\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.bad_arg_def_named()\n    assert msg(excinfo.value) == (\n        \"arg(): could not convert default argument 'a: UnregisteredType' in function \"\n        \"'should_fail' into a Python object (type not registered yet?)\"\n        if debug_enabled else\n        \"arg(): could not convert default argument into a Python object (type not registered \"\n        \"yet?). Compile in debug mode for more information.\"\n    )\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.bad_arg_def_unnamed()\n    assert msg(excinfo.value) == (\n        \"arg(): could not convert default argument 'UnregisteredType' in function \"\n        \"'should_fail' into a Python object (type not registered yet?)\"\n        if debug_enabled else\n        \"arg(): could not convert default argument into a Python object (type not registered \"\n        \"yet?). Compile in debug mode for more information.\"\n    )\n\n\ndef test_accepts_none(msg):\n    a = m.NoneTester()\n    assert m.no_none1(a) == 42\n    assert m.no_none2(a) == 42\n    assert m.no_none3(a) == 42\n    assert m.no_none4(a) == 42\n    assert m.no_none5(a) == 42\n    assert m.ok_none1(a) == 42\n    assert m.ok_none2(a) == 42\n    assert m.ok_none3(a) == 42\n    assert m.ok_none4(a) == 42\n    assert m.ok_none5(a) == 42\n\n    with pytest.raises(TypeError) as excinfo:\n        m.no_none1(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.no_none2(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.no_none3(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.no_none4(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n    with pytest.raises(TypeError) as excinfo:\n        m.no_none5(None)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    # The first one still raises because you can't pass None as a lvalue reference arg:\n    with pytest.raises(TypeError) as excinfo:\n        assert m.ok_none1(None) == -1\n    assert msg(excinfo.value) == \"\"\"\n        ok_none1(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: m.methods_and_attributes.NoneTester) -> int\n\n        Invoked with: None\n    \"\"\"\n\n    # The rest take the argument as pointer or holder, and accept None:\n    assert m.ok_none2(None) == -1\n    assert m.ok_none3(None) == -1\n    assert m.ok_none4(None) == -1\n    assert m.ok_none5(None) == -1\n\n\ndef test_str_issue(msg):\n    \"\"\"#283: __str__ called on uninitialized instance when constructor arguments invalid\"\"\"\n\n    assert str(m.StrIssue(3)) == \"StrIssue[3]\"\n\n    with pytest.raises(TypeError) as excinfo:\n        str(m.StrIssue(\"no\", \"such\", \"constructor\"))\n    assert msg(excinfo.value) == \"\"\"\n        __init__(): incompatible constructor arguments. The following argument types are supported:\n            1. m.methods_and_attributes.StrIssue(arg0: int)\n            2. m.methods_and_attributes.StrIssue()\n\n        Invoked with: 'no', 'such', 'constructor'\n    \"\"\"\n\n\ndef test_unregistered_base_implementations():\n    a = m.RegisteredDerived()\n    a.do_nothing()\n    assert a.rw_value == 42\n    assert a.ro_value == 1.25\n    a.rw_value += 5\n    assert a.sum() == 48.25\n    a.increase_value()\n    assert a.rw_value == 48\n    assert a.ro_value == 1.5\n    assert a.sum() == 49.5\n    assert a.rw_value_prop == 48\n    a.rw_value_prop += 1\n    assert a.rw_value_prop == 49\n    a.increase_value()\n    assert a.ro_value_prop == 1.75\n\n\ndef test_custom_caster_destruction():\n    \"\"\"Tests that returning a pointer to a type that gets converted with a custom type caster gets\n    destroyed when the function has py::return_value_policy::take_ownership policy applied.\"\"\"\n\n    cstats = m.destruction_tester_cstats()\n    # This one *doesn't* have take_ownership: the pointer should be used but not destroyed:\n    z = m.custom_caster_no_destroy()\n    assert cstats.alive() == 1 and cstats.default_constructions == 1\n    assert z\n\n    # take_ownership applied: this constructs a new object, casts it, then destroys it:\n    z = m.custom_caster_destroy()\n    assert z\n    assert cstats.default_constructions == 2\n\n    # Same, but with a const pointer return (which should *not* inhibit destruction):\n    z = m.custom_caster_destroy_const()\n    assert z\n    assert cstats.default_constructions == 3\n\n    # Make sure we still only have the original object (from ..._no_destroy()) alive:\n    assert cstats.alive() == 1\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_modules.cpp",
    "content": "/*\n    tests/test_modules.cpp -- nested modules, importing modules, and\n                            internal references\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n\nTEST_SUBMODULE(modules, m) {\n    // test_nested_modules\n    py::module m_sub = m.def_submodule(\"subsubmodule\");\n    m_sub.def(\"submodule_func\", []() { return \"submodule_func()\"; });\n\n    // test_reference_internal\n    class A {\n    public:\n        A(int v) : v(v) { print_created(this, v); }\n        ~A() { print_destroyed(this); }\n        A(const A&) { print_copy_created(this); }\n        A& operator=(const A &copy) { print_copy_assigned(this); v = copy.v; return *this; }\n        std::string toString() { return \"A[\" + std::to_string(v) + \"]\"; }\n    private:\n        int v;\n    };\n    py::class_<A>(m_sub, \"A\")\n        .def(py::init<int>())\n        .def(\"__repr__\", &A::toString);\n\n    class B {\n    public:\n        B() { print_default_created(this); }\n        ~B() { print_destroyed(this); }\n        B(const B&) { print_copy_created(this); }\n        B& operator=(const B &copy) { print_copy_assigned(this); a1 = copy.a1; a2 = copy.a2; return *this; }\n        A &get_a1() { return a1; }\n        A &get_a2() { return a2; }\n\n        A a1{1};\n        A a2{2};\n    };\n    py::class_<B>(m_sub, \"B\")\n        .def(py::init<>())\n        .def(\"get_a1\", &B::get_a1, \"Return the internal A 1\", py::return_value_policy::reference_internal)\n        .def(\"get_a2\", &B::get_a2, \"Return the internal A 2\", py::return_value_policy::reference_internal)\n        .def_readwrite(\"a1\", &B::a1)  // def_readonly uses an internal reference return policy by default\n        .def_readwrite(\"a2\", &B::a2);\n\n    m.attr(\"OD\") = py::module::import(\"collections\").attr(\"OrderedDict\");\n\n    // test_duplicate_registration\n    // Registering two things with the same name\n    m.def(\"duplicate_registration\", []() {\n        class Dupe1 { };\n        class Dupe2 { };\n        class Dupe3 { };\n        class DupeException { };\n\n        auto dm = py::module(\"dummy\");\n        auto failures = py::list();\n\n        py::class_<Dupe1>(dm, \"Dupe1\");\n        py::class_<Dupe2>(dm, \"Dupe2\");\n        dm.def(\"dupe1_factory\", []() { return Dupe1(); });\n        py::exception<DupeException>(dm, \"DupeException\");\n\n        try {\n            py::class_<Dupe1>(dm, \"Dupe1\");\n            failures.append(\"Dupe1 class\");\n        } catch (std::runtime_error &) {}\n        try {\n            dm.def(\"Dupe1\", []() { return Dupe1(); });\n            failures.append(\"Dupe1 function\");\n        } catch (std::runtime_error &) {}\n        try {\n            py::class_<Dupe3>(dm, \"dupe1_factory\");\n            failures.append(\"dupe1_factory\");\n        } catch (std::runtime_error &) {}\n        try {\n            py::exception<Dupe3>(dm, \"Dupe2\");\n            failures.append(\"Dupe2\");\n        } catch (std::runtime_error &) {}\n        try {\n            dm.def(\"DupeException\", []() { return 30; });\n            failures.append(\"DupeException1\");\n        } catch (std::runtime_error &) {}\n        try {\n            py::class_<DupeException>(dm, \"DupeException\");\n            failures.append(\"DupeException2\");\n        } catch (std::runtime_error &) {}\n\n        return failures;\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_modules.py",
    "content": "from pybind11_tests import modules as m\nfrom pybind11_tests.modules import subsubmodule as ms\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_nested_modules():\n    import pybind11_tests\n    assert pybind11_tests.__name__ == \"pybind11_tests\"\n    assert pybind11_tests.modules.__name__ == \"pybind11_tests.modules\"\n    assert pybind11_tests.modules.subsubmodule.__name__ == \"pybind11_tests.modules.subsubmodule\"\n    assert m.__name__ == \"pybind11_tests.modules\"\n    assert ms.__name__ == \"pybind11_tests.modules.subsubmodule\"\n\n    assert ms.submodule_func() == \"submodule_func()\"\n\n\ndef test_reference_internal():\n    b = ms.B()\n    assert str(b.get_a1()) == \"A[1]\"\n    assert str(b.a1) == \"A[1]\"\n    assert str(b.get_a2()) == \"A[2]\"\n    assert str(b.a2) == \"A[2]\"\n\n    b.a1 = ms.A(42)\n    b.a2 = ms.A(43)\n    assert str(b.get_a1()) == \"A[42]\"\n    assert str(b.a1) == \"A[42]\"\n    assert str(b.get_a2()) == \"A[43]\"\n    assert str(b.a2) == \"A[43]\"\n\n    astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B)\n    assert astats.alive() == 2\n    assert bstats.alive() == 1\n    del b\n    assert astats.alive() == 0\n    assert bstats.alive() == 0\n    assert astats.values() == ['1', '2', '42', '43']\n    assert bstats.values() == []\n    assert astats.default_constructions == 0\n    assert bstats.default_constructions == 1\n    assert astats.copy_constructions == 0\n    assert bstats.copy_constructions == 0\n    # assert astats.move_constructions >= 0  # Don't invoke any\n    # assert bstats.move_constructions >= 0  # Don't invoke any\n    assert astats.copy_assignments == 2\n    assert bstats.copy_assignments == 0\n    assert astats.move_assignments == 0\n    assert bstats.move_assignments == 0\n\n\ndef test_importing():\n    from pybind11_tests.modules import OD\n    from collections import OrderedDict\n\n    assert OD is OrderedDict\n    assert str(OD([(1, 'a'), (2, 'b')])) == \"OrderedDict([(1, 'a'), (2, 'b')])\"\n\n\ndef test_pydoc():\n    \"\"\"Pydoc needs to be able to provide help() for everything inside a pybind11 module\"\"\"\n    import pybind11_tests\n    import pydoc\n\n    assert pybind11_tests.__name__ == \"pybind11_tests\"\n    assert pybind11_tests.__doc__ == \"pybind11 test module\"\n    assert pydoc.text.docmodule(pybind11_tests)\n\n\ndef test_duplicate_registration():\n    \"\"\"Registering two things with the same name\"\"\"\n\n    assert m.duplicate_registration() == []\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_multiple_inheritance.cpp",
    "content": "/*\n    tests/test_multiple_inheritance.cpp -- multiple inheritance,\n    implicit MI casts\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n\n// Many bases for testing that multiple inheritance from many classes (i.e. requiring extra\n// space for holder constructed flags) works.\ntemplate <int N> struct BaseN {\n    BaseN(int i) : i(i) { }\n    int i;\n};\n\n// test_mi_static_properties\nstruct Vanilla {\n    std::string vanilla() { return \"Vanilla\"; };\n};\nstruct WithStatic1 {\n    static std::string static_func1() { return \"WithStatic1\"; };\n    static int static_value1;\n};\nstruct WithStatic2 {\n    static std::string static_func2() { return \"WithStatic2\"; };\n    static int static_value2;\n};\nstruct VanillaStaticMix1 : Vanilla, WithStatic1, WithStatic2 {\n    static std::string static_func() { return \"VanillaStaticMix1\"; }\n    static int static_value;\n};\nstruct VanillaStaticMix2 : WithStatic1, Vanilla, WithStatic2 {\n    static std::string static_func() { return \"VanillaStaticMix2\"; }\n    static int static_value;\n};\nint WithStatic1::static_value1 = 1;\nint WithStatic2::static_value2 = 2;\nint VanillaStaticMix1::static_value = 12;\nint VanillaStaticMix2::static_value = 12;\n\nTEST_SUBMODULE(multiple_inheritance, m) {\n\n    // test_multiple_inheritance_mix1\n    // test_multiple_inheritance_mix2\n    struct Base1 {\n        Base1(int i) : i(i) { }\n        int foo() { return i; }\n        int i;\n    };\n    py::class_<Base1> b1(m, \"Base1\");\n    b1.def(py::init<int>())\n      .def(\"foo\", &Base1::foo);\n\n    struct Base2 {\n        Base2(int i) : i(i) { }\n        int bar() { return i; }\n        int i;\n    };\n    py::class_<Base2> b2(m, \"Base2\");\n    b2.def(py::init<int>())\n      .def(\"bar\", &Base2::bar);\n\n\n    // test_multiple_inheritance_cpp\n    struct Base12 : Base1, Base2 {\n        Base12(int i, int j) : Base1(i), Base2(j) { }\n    };\n    struct MIType : Base12 {\n        MIType(int i, int j) : Base12(i, j) { }\n    };\n    py::class_<Base12, Base1, Base2>(m, \"Base12\");\n    py::class_<MIType, Base12>(m, \"MIType\")\n        .def(py::init<int, int>());\n\n\n    // test_multiple_inheritance_python_many_bases\n    #define PYBIND11_BASEN(N) py::class_<BaseN<N>>(m, \"BaseN\" #N).def(py::init<int>()).def(\"f\" #N, [](BaseN<N> &b) { return b.i + N; })\n    PYBIND11_BASEN( 1); PYBIND11_BASEN( 2); PYBIND11_BASEN( 3); PYBIND11_BASEN( 4);\n    PYBIND11_BASEN( 5); PYBIND11_BASEN( 6); PYBIND11_BASEN( 7); PYBIND11_BASEN( 8);\n    PYBIND11_BASEN( 9); PYBIND11_BASEN(10); PYBIND11_BASEN(11); PYBIND11_BASEN(12);\n    PYBIND11_BASEN(13); PYBIND11_BASEN(14); PYBIND11_BASEN(15); PYBIND11_BASEN(16);\n    PYBIND11_BASEN(17);\n\n    // Uncommenting this should result in a compile time failure (MI can only be specified via\n    // template parameters because pybind has to know the types involved; see discussion in #742 for\n    // details).\n//    struct Base12v2 : Base1, Base2 {\n//        Base12v2(int i, int j) : Base1(i), Base2(j) { }\n//    };\n//    py::class_<Base12v2>(m, \"Base12v2\", b1, b2)\n//        .def(py::init<int, int>());\n\n\n    // test_multiple_inheritance_virtbase\n    // Test the case where not all base classes are specified, and where pybind11 requires the\n    // py::multiple_inheritance flag to perform proper casting between types.\n    struct Base1a {\n        Base1a(int i) : i(i) { }\n        int foo() { return i; }\n        int i;\n    };\n    py::class_<Base1a, std::shared_ptr<Base1a>>(m, \"Base1a\")\n        .def(py::init<int>())\n        .def(\"foo\", &Base1a::foo);\n\n    struct Base2a {\n        Base2a(int i) : i(i) { }\n        int bar() { return i; }\n        int i;\n    };\n    py::class_<Base2a, std::shared_ptr<Base2a>>(m, \"Base2a\")\n        .def(py::init<int>())\n        .def(\"bar\", &Base2a::bar);\n\n    struct Base12a : Base1a, Base2a {\n        Base12a(int i, int j) : Base1a(i), Base2a(j) { }\n    };\n    py::class_<Base12a, /* Base1 missing */ Base2a,\n               std::shared_ptr<Base12a>>(m, \"Base12a\", py::multiple_inheritance())\n        .def(py::init<int, int>());\n\n    m.def(\"bar_base2a\", [](Base2a *b) { return b->bar(); });\n    m.def(\"bar_base2a_sharedptr\", [](std::shared_ptr<Base2a> b) { return b->bar(); });\n\n    // test_mi_unaligned_base\n    // test_mi_base_return\n    // Issue #801: invalid casting to derived type with MI bases\n    struct I801B1 { int a = 1; I801B1() = default; I801B1(const I801B1 &) = default; virtual ~I801B1() = default; };\n    struct I801B2 { int b = 2; I801B2() = default; I801B2(const I801B2 &) = default; virtual ~I801B2() = default; };\n    struct I801C : I801B1, I801B2 {};\n    struct I801D : I801C {}; // Indirect MI\n    // Unregistered classes:\n    struct I801B3 { int c = 3; virtual ~I801B3() = default; };\n    struct I801E : I801B3, I801D {};\n\n    py::class_<I801B1, std::shared_ptr<I801B1>>(m, \"I801B1\").def(py::init<>()).def_readonly(\"a\", &I801B1::a);\n    py::class_<I801B2, std::shared_ptr<I801B2>>(m, \"I801B2\").def(py::init<>()).def_readonly(\"b\", &I801B2::b);\n    py::class_<I801C, I801B1, I801B2, std::shared_ptr<I801C>>(m, \"I801C\").def(py::init<>());\n    py::class_<I801D, I801C, std::shared_ptr<I801D>>(m, \"I801D\").def(py::init<>());\n\n    // Two separate issues here: first, we want to recognize a pointer to a base type as being a\n    // known instance even when the pointer value is unequal (i.e. due to a non-first\n    // multiple-inheritance base class):\n    m.def(\"i801b1_c\", [](I801C *c) { return static_cast<I801B1 *>(c); });\n    m.def(\"i801b2_c\", [](I801C *c) { return static_cast<I801B2 *>(c); });\n    m.def(\"i801b1_d\", [](I801D *d) { return static_cast<I801B1 *>(d); });\n    m.def(\"i801b2_d\", [](I801D *d) { return static_cast<I801B2 *>(d); });\n\n    // Second, when returned a base class pointer to a derived instance, we cannot assume that the\n    // pointer is `reinterpret_cast`able to the derived pointer because, like above, the base class\n    // pointer could be offset.\n    m.def(\"i801c_b1\", []() -> I801B1 * { return new I801C(); });\n    m.def(\"i801c_b2\", []() -> I801B2 * { return new I801C(); });\n    m.def(\"i801d_b1\", []() -> I801B1 * { return new I801D(); });\n    m.def(\"i801d_b2\", []() -> I801B2 * { return new I801D(); });\n\n    // Return a base class pointer to a pybind-registered type when the actual derived type\n    // isn't pybind-registered (and uses multiple-inheritance to offset the pybind base)\n    m.def(\"i801e_c\", []() -> I801C * { return new I801E(); });\n    m.def(\"i801e_b2\", []() -> I801B2 * { return new I801E(); });\n\n\n    // test_mi_static_properties\n    py::class_<Vanilla>(m, \"Vanilla\")\n        .def(py::init<>())\n        .def(\"vanilla\", &Vanilla::vanilla);\n\n    py::class_<WithStatic1>(m, \"WithStatic1\")\n        .def(py::init<>())\n        .def_static(\"static_func1\", &WithStatic1::static_func1)\n        .def_readwrite_static(\"static_value1\", &WithStatic1::static_value1);\n\n    py::class_<WithStatic2>(m, \"WithStatic2\")\n        .def(py::init<>())\n        .def_static(\"static_func2\", &WithStatic2::static_func2)\n        .def_readwrite_static(\"static_value2\", &WithStatic2::static_value2);\n\n    py::class_<VanillaStaticMix1, Vanilla, WithStatic1, WithStatic2>(\n        m, \"VanillaStaticMix1\")\n        .def(py::init<>())\n        .def_static(\"static_func\", &VanillaStaticMix1::static_func)\n        .def_readwrite_static(\"static_value\", &VanillaStaticMix1::static_value);\n\n    py::class_<VanillaStaticMix2, WithStatic1, Vanilla, WithStatic2>(\n        m, \"VanillaStaticMix2\")\n        .def(py::init<>())\n        .def_static(\"static_func\", &VanillaStaticMix2::static_func)\n        .def_readwrite_static(\"static_value\", &VanillaStaticMix2::static_value);\n\n\n#if !defined(PYPY_VERSION)\n    struct WithDict { };\n    struct VanillaDictMix1 : Vanilla, WithDict { };\n    struct VanillaDictMix2 : WithDict, Vanilla { };\n    py::class_<WithDict>(m, \"WithDict\", py::dynamic_attr()).def(py::init<>());\n    py::class_<VanillaDictMix1, Vanilla, WithDict>(m, \"VanillaDictMix1\").def(py::init<>());\n    py::class_<VanillaDictMix2, WithDict, Vanilla>(m, \"VanillaDictMix2\").def(py::init<>());\n#endif\n\n    // test_diamond_inheritance\n    // Issue #959: segfault when constructing diamond inheritance instance\n    // All of these have int members so that there will be various unequal pointers involved.\n    struct B { int b; B() = default; B(const B&) = default; virtual ~B() = default; };\n    struct C0 : public virtual B { int c0; };\n    struct C1 : public virtual B { int c1; };\n    struct D : public C0, public C1 { int d; };\n    py::class_<B>(m, \"B\")\n        .def(\"b\", [](B *self) { return self; });\n    py::class_<C0, B>(m, \"C0\")\n        .def(\"c0\", [](C0 *self) { return self; });\n    py::class_<C1, B>(m, \"C1\")\n        .def(\"c1\", [](C1 *self) { return self; });\n    py::class_<D, C0, C1>(m, \"D\")\n        .def(py::init<>());\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_multiple_inheritance.py",
    "content": "import pytest\nfrom pybind11_tests import ConstructorStats\nfrom pybind11_tests import multiple_inheritance as m\n\n\ndef test_multiple_inheritance_cpp():\n    mt = m.MIType(3, 4)\n\n    assert mt.foo() == 3\n    assert mt.bar() == 4\n\n\ndef test_multiple_inheritance_mix1():\n    class Base1:\n        def __init__(self, i):\n            self.i = i\n\n        def foo(self):\n            return self.i\n\n    class MITypePy(Base1, m.Base2):\n        def __init__(self, i, j):\n            Base1.__init__(self, i)\n            m.Base2.__init__(self, j)\n\n    mt = MITypePy(3, 4)\n\n    assert mt.foo() == 3\n    assert mt.bar() == 4\n\n\ndef test_multiple_inheritance_mix2():\n\n    class Base2:\n        def __init__(self, i):\n            self.i = i\n\n        def bar(self):\n            return self.i\n\n    class MITypePy(m.Base1, Base2):\n        def __init__(self, i, j):\n            m.Base1.__init__(self, i)\n            Base2.__init__(self, j)\n\n    mt = MITypePy(3, 4)\n\n    assert mt.foo() == 3\n    assert mt.bar() == 4\n\n\ndef test_multiple_inheritance_python():\n\n    class MI1(m.Base1, m.Base2):\n        def __init__(self, i, j):\n            m.Base1.__init__(self, i)\n            m.Base2.__init__(self, j)\n\n    class B1(object):\n        def v(self):\n            return 1\n\n    class MI2(B1, m.Base1, m.Base2):\n        def __init__(self, i, j):\n            B1.__init__(self)\n            m.Base1.__init__(self, i)\n            m.Base2.__init__(self, j)\n\n    class MI3(MI2):\n        def __init__(self, i, j):\n            MI2.__init__(self, i, j)\n\n    class MI4(MI3, m.Base2):\n        def __init__(self, i, j):\n            MI3.__init__(self, i, j)\n            # This should be ignored (Base2 is already initialized via MI2):\n            m.Base2.__init__(self, i + 100)\n\n    class MI5(m.Base2, B1, m.Base1):\n        def __init__(self, i, j):\n            B1.__init__(self)\n            m.Base1.__init__(self, i)\n            m.Base2.__init__(self, j)\n\n    class MI6(m.Base2, B1):\n        def __init__(self, i):\n            m.Base2.__init__(self, i)\n            B1.__init__(self)\n\n    class B2(B1):\n        def v(self):\n            return 2\n\n    class B3(object):\n        def v(self):\n            return 3\n\n    class B4(B3, B2):\n        def v(self):\n            return 4\n\n    class MI7(B4, MI6):\n        def __init__(self, i):\n            B4.__init__(self)\n            MI6.__init__(self, i)\n\n    class MI8(MI6, B3):\n        def __init__(self, i):\n            MI6.__init__(self, i)\n            B3.__init__(self)\n\n    class MI8b(B3, MI6):\n        def __init__(self, i):\n            B3.__init__(self)\n            MI6.__init__(self, i)\n\n    mi1 = MI1(1, 2)\n    assert mi1.foo() == 1\n    assert mi1.bar() == 2\n\n    mi2 = MI2(3, 4)\n    assert mi2.v() == 1\n    assert mi2.foo() == 3\n    assert mi2.bar() == 4\n\n    mi3 = MI3(5, 6)\n    assert mi3.v() == 1\n    assert mi3.foo() == 5\n    assert mi3.bar() == 6\n\n    mi4 = MI4(7, 8)\n    assert mi4.v() == 1\n    assert mi4.foo() == 7\n    assert mi4.bar() == 8\n\n    mi5 = MI5(10, 11)\n    assert mi5.v() == 1\n    assert mi5.foo() == 10\n    assert mi5.bar() == 11\n\n    mi6 = MI6(12)\n    assert mi6.v() == 1\n    assert mi6.bar() == 12\n\n    mi7 = MI7(13)\n    assert mi7.v() == 4\n    assert mi7.bar() == 13\n\n    mi8 = MI8(14)\n    assert mi8.v() == 1\n    assert mi8.bar() == 14\n\n    mi8b = MI8b(15)\n    assert mi8b.v() == 3\n    assert mi8b.bar() == 15\n\n\ndef test_multiple_inheritance_python_many_bases():\n\n    class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):\n        def __init__(self):\n            m.BaseN1.__init__(self, 1)\n            m.BaseN2.__init__(self, 2)\n            m.BaseN3.__init__(self, 3)\n            m.BaseN4.__init__(self, 4)\n\n    class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):\n        def __init__(self):\n            m.BaseN5.__init__(self, 5)\n            m.BaseN6.__init__(self, 6)\n            m.BaseN7.__init__(self, 7)\n            m.BaseN8.__init__(self, 8)\n\n    class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15,\n                    m.BaseN16):\n        def __init__(self):\n            m.BaseN9.__init__(self, 9)\n            m.BaseN10.__init__(self, 10)\n            m.BaseN11.__init__(self, 11)\n            m.BaseN12.__init__(self, 12)\n            m.BaseN13.__init__(self, 13)\n            m.BaseN14.__init__(self, 14)\n            m.BaseN15.__init__(self, 15)\n            m.BaseN16.__init__(self, 16)\n\n    class MIMany19(MIMany14, MIMany58, m.BaseN9):\n        def __init__(self):\n            MIMany14.__init__(self)\n            MIMany58.__init__(self)\n            m.BaseN9.__init__(self, 9)\n\n    class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):\n        def __init__(self):\n            MIMany14.__init__(self)\n            MIMany58.__init__(self)\n            MIMany916.__init__(self)\n            m.BaseN17.__init__(self, 17)\n\n    # Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:\n    a = MIMany14()\n    for i in range(1, 4):\n        assert getattr(a, \"f\" + str(i))() == 2 * i\n\n    # Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:\n    b = MIMany916()\n    for i in range(9, 16):\n        assert getattr(b, \"f\" + str(i))() == 2 * i\n\n    # Inherits from 9: requires >= 2 pointers worth of holder flags\n    c = MIMany19()\n    for i in range(1, 9):\n        assert getattr(c, \"f\" + str(i))() == 2 * i\n\n    # Inherits from 17: requires >= 3 pointers worth of holder flags\n    d = MIMany117()\n    for i in range(1, 17):\n        assert getattr(d, \"f\" + str(i))() == 2 * i\n\n\ndef test_multiple_inheritance_virtbase():\n\n    class MITypePy(m.Base12a):\n        def __init__(self, i, j):\n            m.Base12a.__init__(self, i, j)\n\n    mt = MITypePy(3, 4)\n    assert mt.bar() == 4\n    assert m.bar_base2a(mt) == 4\n    assert m.bar_base2a_sharedptr(mt) == 4\n\n\ndef test_mi_static_properties():\n    \"\"\"Mixing bases with and without static properties should be possible\n     and the result should be independent of base definition order\"\"\"\n\n    for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):\n        assert d.vanilla() == \"Vanilla\"\n        assert d.static_func1() == \"WithStatic1\"\n        assert d.static_func2() == \"WithStatic2\"\n        assert d.static_func() == d.__class__.__name__\n\n        m.WithStatic1.static_value1 = 1\n        m.WithStatic2.static_value2 = 2\n        assert d.static_value1 == 1\n        assert d.static_value2 == 2\n        assert d.static_value == 12\n\n        d.static_value1 = 0\n        assert d.static_value1 == 0\n        d.static_value2 = 0\n        assert d.static_value2 == 0\n        d.static_value = 0\n        assert d.static_value == 0\n\n\n@pytest.unsupported_on_pypy\ndef test_mi_dynamic_attributes():\n    \"\"\"Mixing bases with and without dynamic attribute support\"\"\"\n\n    for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):\n        d.dynamic = 1\n        assert d.dynamic == 1\n\n\ndef test_mi_unaligned_base():\n    \"\"\"Returning an offset (non-first MI) base class pointer should recognize the instance\"\"\"\n\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    c = m.I801C()\n    d = m.I801D()\n    # + 4 below because we have the two instances, and each instance has offset base I801B2\n    assert ConstructorStats.detail_reg_inst() == n_inst + 4\n    b1c = m.i801b1_c(c)\n    assert b1c is c\n    b2c = m.i801b2_c(c)\n    assert b2c is c\n    b1d = m.i801b1_d(d)\n    assert b1d is d\n    b2d = m.i801b2_d(d)\n    assert b2d is d\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 4  # no extra instances\n    del c, b1c, b2c\n    assert ConstructorStats.detail_reg_inst() == n_inst + 2\n    del d, b1d, b2d\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n\ndef test_mi_base_return():\n    \"\"\"Tests returning an offset (non-first MI) base class pointer to a derived instance\"\"\"\n\n    n_inst = ConstructorStats.detail_reg_inst()\n\n    c1 = m.i801c_b1()\n    assert type(c1) is m.I801C\n    assert c1.a == 1\n    assert c1.b == 2\n\n    d1 = m.i801d_b1()\n    assert type(d1) is m.I801D\n    assert d1.a == 1\n    assert d1.b == 2\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 4\n\n    c2 = m.i801c_b2()\n    assert type(c2) is m.I801C\n    assert c2.a == 1\n    assert c2.b == 2\n\n    d2 = m.i801d_b2()\n    assert type(d2) is m.I801D\n    assert d2.a == 1\n    assert d2.b == 2\n\n    assert ConstructorStats.detail_reg_inst() == n_inst + 8\n\n    del c2\n    assert ConstructorStats.detail_reg_inst() == n_inst + 6\n    del c1, d1, d2\n    assert ConstructorStats.detail_reg_inst() == n_inst\n\n    # Returning an unregistered derived type with a registered base; we won't\n    # pick up the derived type, obviously, but should still work (as an object\n    # of whatever type was returned).\n    e1 = m.i801e_c()\n    assert type(e1) is m.I801C\n    assert e1.a == 1\n    assert e1.b == 2\n\n    e2 = m.i801e_b2()\n    assert type(e2) is m.I801B2\n    assert e2.b == 2\n\n\ndef test_diamond_inheritance():\n    \"\"\"Tests that diamond inheritance works as expected (issue #959)\"\"\"\n\n    # Issue #959: this shouldn't segfault:\n    d = m.D()\n\n    # Make sure all the various distinct pointers are all recognized as registered instances:\n    assert d is d.c0()\n    assert d is d.c1()\n    assert d is d.b()\n    assert d is d.c0().b()\n    assert d is d.c1().b()\n    assert d is d.c0().c1().b().c0().b()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_array.cpp",
    "content": "/*\n    tests/test_numpy_array.cpp -- test core array functionality\n\n    Copyright (c) 2016 Ivan Smirnov <i.s.smirnov@gmail.com>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\n#include <pybind11/numpy.h>\n#include <pybind11/stl.h>\n\n#include <cstdint>\n\nusing arr = py::array;\nusing arr_t = py::array_t<uint16_t, 0>;\nstatic_assert(std::is_same<arr_t::value_type, uint16_t>::value, \"\");\n\ntemplate<typename... Ix> arr data(const arr& a, Ix... index) {\n    return arr(a.nbytes() - a.offset_at(index...), (const uint8_t *) a.data(index...));\n}\n\ntemplate<typename... Ix> arr data_t(const arr_t& a, Ix... index) {\n    return arr(a.size() - a.index_at(index...), a.data(index...));\n}\n\ntemplate<typename... Ix> arr& mutate_data(arr& a, Ix... index) {\n    auto ptr = (uint8_t *) a.mutable_data(index...);\n    for (ssize_t i = 0; i < a.nbytes() - a.offset_at(index...); i++)\n        ptr[i] = (uint8_t) (ptr[i] * 2);\n    return a;\n}\n\ntemplate<typename... Ix> arr_t& mutate_data_t(arr_t& a, Ix... index) {\n    auto ptr = a.mutable_data(index...);\n    for (ssize_t i = 0; i < a.size() - a.index_at(index...); i++)\n        ptr[i]++;\n    return a;\n}\n\ntemplate<typename... Ix> ssize_t index_at(const arr& a, Ix... idx) { return a.index_at(idx...); }\ntemplate<typename... Ix> ssize_t index_at_t(const arr_t& a, Ix... idx) { return a.index_at(idx...); }\ntemplate<typename... Ix> ssize_t offset_at(const arr& a, Ix... idx) { return a.offset_at(idx...); }\ntemplate<typename... Ix> ssize_t offset_at_t(const arr_t& a, Ix... idx) { return a.offset_at(idx...); }\ntemplate<typename... Ix> ssize_t at_t(const arr_t& a, Ix... idx) { return a.at(idx...); }\ntemplate<typename... Ix> arr_t& mutate_at_t(arr_t& a, Ix... idx) { a.mutable_at(idx...)++; return a; }\n\n#define def_index_fn(name, type) \\\n    sm.def(#name, [](type a) { return name(a); }); \\\n    sm.def(#name, [](type a, int i) { return name(a, i); }); \\\n    sm.def(#name, [](type a, int i, int j) { return name(a, i, j); }); \\\n    sm.def(#name, [](type a, int i, int j, int k) { return name(a, i, j, k); });\n\ntemplate <typename T, typename T2> py::handle auxiliaries(T &&r, T2 &&r2) {\n    if (r.ndim() != 2) throw std::domain_error(\"error: ndim != 2\");\n    py::list l;\n    l.append(*r.data(0, 0));\n    l.append(*r2.mutable_data(0, 0));\n    l.append(r.data(0, 1) == r2.mutable_data(0, 1));\n    l.append(r.ndim());\n    l.append(r.itemsize());\n    l.append(r.shape(0));\n    l.append(r.shape(1));\n    l.append(r.size());\n    l.append(r.nbytes());\n    return l.release();\n}\n\nTEST_SUBMODULE(numpy_array, sm) {\n    try { py::module::import(\"numpy\"); }\n    catch (...) { return; }\n\n    // test_array_attributes\n    sm.def(\"ndim\", [](const arr& a) { return a.ndim(); });\n    sm.def(\"shape\", [](const arr& a) { return arr(a.ndim(), a.shape()); });\n    sm.def(\"shape\", [](const arr& a, ssize_t dim) { return a.shape(dim); });\n    sm.def(\"strides\", [](const arr& a) { return arr(a.ndim(), a.strides()); });\n    sm.def(\"strides\", [](const arr& a, ssize_t dim) { return a.strides(dim); });\n    sm.def(\"writeable\", [](const arr& a) { return a.writeable(); });\n    sm.def(\"size\", [](const arr& a) { return a.size(); });\n    sm.def(\"itemsize\", [](const arr& a) { return a.itemsize(); });\n    sm.def(\"nbytes\", [](const arr& a) { return a.nbytes(); });\n    sm.def(\"owndata\", [](const arr& a) { return a.owndata(); });\n\n    // test_index_offset\n    def_index_fn(index_at, const arr&);\n    def_index_fn(index_at_t, const arr_t&);\n    def_index_fn(offset_at, const arr&);\n    def_index_fn(offset_at_t, const arr_t&);\n    // test_data\n    def_index_fn(data, const arr&);\n    def_index_fn(data_t, const arr_t&);\n    // test_mutate_data, test_mutate_readonly\n    def_index_fn(mutate_data, arr&);\n    def_index_fn(mutate_data_t, arr_t&);\n    def_index_fn(at_t, const arr_t&);\n    def_index_fn(mutate_at_t, arr_t&);\n\n    // test_make_c_f_array\n    sm.def(\"make_f_array\", [] { return py::array_t<float>({ 2, 2 }, { 4, 8 }); });\n    sm.def(\"make_c_array\", [] { return py::array_t<float>({ 2, 2 }, { 8, 4 }); });\n\n    // test_empty_shaped_array\n    sm.def(\"make_empty_shaped_array\", [] { return py::array(py::dtype(\"f\"), {}, {}); });\n\n    // test_wrap\n    sm.def(\"wrap\", [](py::array a) {\n        return py::array(\n            a.dtype(),\n            {a.shape(), a.shape() + a.ndim()},\n            {a.strides(), a.strides() + a.ndim()},\n            a.data(),\n            a\n        );\n    });\n\n    // test_numpy_view\n    struct ArrayClass {\n        int data[2] = { 1, 2 };\n        ArrayClass() { py::print(\"ArrayClass()\"); }\n        ~ArrayClass() { py::print(\"~ArrayClass()\"); }\n    };\n    py::class_<ArrayClass>(sm, \"ArrayClass\")\n        .def(py::init<>())\n        .def(\"numpy_view\", [](py::object &obj) {\n            py::print(\"ArrayClass::numpy_view()\");\n            ArrayClass &a = obj.cast<ArrayClass&>();\n            return py::array_t<int>({2}, {4}, a.data, obj);\n        }\n    );\n\n    // test_cast_numpy_int64_to_uint64\n    sm.def(\"function_taking_uint64\", [](uint64_t) { });\n\n    // test_isinstance\n    sm.def(\"isinstance_untyped\", [](py::object yes, py::object no) {\n        return py::isinstance<py::array>(yes) && !py::isinstance<py::array>(no);\n    });\n    sm.def(\"isinstance_typed\", [](py::object o) {\n        return py::isinstance<py::array_t<double>>(o) && !py::isinstance<py::array_t<int>>(o);\n    });\n\n    // test_constructors\n    sm.def(\"default_constructors\", []() {\n        return py::dict(\n            \"array\"_a=py::array(),\n            \"array_t<int32>\"_a=py::array_t<std::int32_t>(),\n            \"array_t<double>\"_a=py::array_t<double>()\n        );\n    });\n    sm.def(\"converting_constructors\", [](py::object o) {\n        return py::dict(\n            \"array\"_a=py::array(o),\n            \"array_t<int32>\"_a=py::array_t<std::int32_t>(o),\n            \"array_t<double>\"_a=py::array_t<double>(o)\n        );\n    });\n\n    // test_overload_resolution\n    sm.def(\"overloaded\", [](py::array_t<double>) { return \"double\"; });\n    sm.def(\"overloaded\", [](py::array_t<float>) { return \"float\"; });\n    sm.def(\"overloaded\", [](py::array_t<int>) { return \"int\"; });\n    sm.def(\"overloaded\", [](py::array_t<unsigned short>) { return \"unsigned short\"; });\n    sm.def(\"overloaded\", [](py::array_t<long long>) { return \"long long\"; });\n    sm.def(\"overloaded\", [](py::array_t<std::complex<double>>) { return \"double complex\"; });\n    sm.def(\"overloaded\", [](py::array_t<std::complex<float>>) { return \"float complex\"; });\n\n    sm.def(\"overloaded2\", [](py::array_t<std::complex<double>>) { return \"double complex\"; });\n    sm.def(\"overloaded2\", [](py::array_t<double>) { return \"double\"; });\n    sm.def(\"overloaded2\", [](py::array_t<std::complex<float>>) { return \"float complex\"; });\n    sm.def(\"overloaded2\", [](py::array_t<float>) { return \"float\"; });\n\n    // Only accept the exact types:\n    sm.def(\"overloaded3\", [](py::array_t<int>) { return \"int\"; }, py::arg().noconvert());\n    sm.def(\"overloaded3\", [](py::array_t<double>) { return \"double\"; }, py::arg().noconvert());\n\n    // Make sure we don't do unsafe coercion (e.g. float to int) when not using forcecast, but\n    // rather that float gets converted via the safe (conversion to double) overload:\n    sm.def(\"overloaded4\", [](py::array_t<long long, 0>) { return \"long long\"; });\n    sm.def(\"overloaded4\", [](py::array_t<double, 0>) { return \"double\"; });\n\n    // But we do allow conversion to int if forcecast is enabled (but only if no overload matches\n    // without conversion)\n    sm.def(\"overloaded5\", [](py::array_t<unsigned int>) { return \"unsigned int\"; });\n    sm.def(\"overloaded5\", [](py::array_t<double>) { return \"double\"; });\n\n    // test_greedy_string_overload\n    // Issue 685: ndarray shouldn't go to std::string overload\n    sm.def(\"issue685\", [](std::string) { return \"string\"; });\n    sm.def(\"issue685\", [](py::array) { return \"array\"; });\n    sm.def(\"issue685\", [](py::object) { return \"other\"; });\n\n    // test_array_unchecked_fixed_dims\n    sm.def(\"proxy_add2\", [](py::array_t<double> a, double v) {\n        auto r = a.mutable_unchecked<2>();\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            for (ssize_t j = 0; j < r.shape(1); j++)\n                r(i, j) += v;\n    }, py::arg().noconvert(), py::arg());\n\n    sm.def(\"proxy_init3\", [](double start) {\n        py::array_t<double, py::array::c_style> a({ 3, 3, 3 });\n        auto r = a.mutable_unchecked<3>();\n        for (ssize_t i = 0; i < r.shape(0); i++)\n        for (ssize_t j = 0; j < r.shape(1); j++)\n        for (ssize_t k = 0; k < r.shape(2); k++)\n            r(i, j, k) = start++;\n        return a;\n    });\n    sm.def(\"proxy_init3F\", [](double start) {\n        py::array_t<double, py::array::f_style> a({ 3, 3, 3 });\n        auto r = a.mutable_unchecked<3>();\n        for (ssize_t k = 0; k < r.shape(2); k++)\n        for (ssize_t j = 0; j < r.shape(1); j++)\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            r(i, j, k) = start++;\n        return a;\n    });\n    sm.def(\"proxy_squared_L2_norm\", [](py::array_t<double> a) {\n        auto r = a.unchecked<1>();\n        double sumsq = 0;\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            sumsq += r[i] * r(i); // Either notation works for a 1D array\n        return sumsq;\n    });\n\n    sm.def(\"proxy_auxiliaries2\", [](py::array_t<double> a) {\n        auto r = a.unchecked<2>();\n        auto r2 = a.mutable_unchecked<2>();\n        return auxiliaries(r, r2);\n    });\n\n    // test_array_unchecked_dyn_dims\n    // Same as the above, but without a compile-time dimensions specification:\n    sm.def(\"proxy_add2_dyn\", [](py::array_t<double> a, double v) {\n        auto r = a.mutable_unchecked();\n        if (r.ndim() != 2) throw std::domain_error(\"error: ndim != 2\");\n        for (ssize_t i = 0; i < r.shape(0); i++)\n            for (ssize_t j = 0; j < r.shape(1); j++)\n                r(i, j) += v;\n    }, py::arg().noconvert(), py::arg());\n    sm.def(\"proxy_init3_dyn\", [](double start) {\n        py::array_t<double, py::array::c_style> a({ 3, 3, 3 });\n        auto r = a.mutable_unchecked();\n        if (r.ndim() != 3) throw std::domain_error(\"error: ndim != 3\");\n        for (ssize_t i = 0; i < r.shape(0); i++)\n        for (ssize_t j = 0; j < r.shape(1); j++)\n        for (ssize_t k = 0; k < r.shape(2); k++)\n            r(i, j, k) = start++;\n        return a;\n    });\n    sm.def(\"proxy_auxiliaries2_dyn\", [](py::array_t<double> a) {\n        return auxiliaries(a.unchecked(), a.mutable_unchecked());\n    });\n\n    sm.def(\"array_auxiliaries2\", [](py::array_t<double> a) {\n        return auxiliaries(a, a);\n    });\n\n    // test_array_failures\n    // Issue #785: Uninformative \"Unknown internal error\" exception when constructing array from empty object:\n    sm.def(\"array_fail_test\", []() { return py::array(py::object()); });\n    sm.def(\"array_t_fail_test\", []() { return py::array_t<double>(py::object()); });\n    // Make sure the error from numpy is being passed through:\n    sm.def(\"array_fail_test_negative_size\", []() { int c = 0; return py::array(-1, &c); });\n\n    // test_initializer_list\n    // Issue (unnumbered; reported in #788): regression: initializer lists can be ambiguous\n    sm.def(\"array_initializer_list1\", []() { return py::array_t<float>(1); }); // { 1 } also works, but clang warns about it\n    sm.def(\"array_initializer_list2\", []() { return py::array_t<float>({ 1, 2 }); });\n    sm.def(\"array_initializer_list3\", []() { return py::array_t<float>({ 1, 2, 3 }); });\n    sm.def(\"array_initializer_list4\", []() { return py::array_t<float>({ 1, 2, 3, 4 }); });\n\n    // test_array_resize\n    // reshape array to 2D without changing size\n    sm.def(\"array_reshape2\", [](py::array_t<double> a) {\n        const ssize_t dim_sz = (ssize_t)std::sqrt(a.size());\n        if (dim_sz * dim_sz != a.size())\n            throw std::domain_error(\"array_reshape2: input array total size is not a squared integer\");\n        a.resize({dim_sz, dim_sz});\n    });\n\n    // resize to 3D array with each dimension = N\n    sm.def(\"array_resize3\", [](py::array_t<double> a, size_t N, bool refcheck) {\n        a.resize({N, N, N}, refcheck);\n    });\n\n    // test_array_create_and_resize\n    // return 2D array with Nrows = Ncols = N\n    sm.def(\"create_and_resize\", [](size_t N) {\n        py::array_t<double> a;\n        a.resize({N, N});\n        std::fill(a.mutable_data(), a.mutable_data() + a.size(), 42.);\n        return a;\n    });\n\n#if PY_MAJOR_VERSION >= 3\n        sm.def(\"index_using_ellipsis\", [](py::array a) {\n            return a[py::make_tuple(0, py::ellipsis(), 0)];\n        });\n#endif\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_array.py",
    "content": "import pytest\nfrom pybind11_tests import numpy_array as m\n\npytestmark = pytest.requires_numpy\n\nwith pytest.suppress(ImportError):\n    import numpy as np\n\n\n@pytest.fixture(scope='function')\ndef arr():\n    return np.array([[1, 2, 3], [4, 5, 6]], '=u2')\n\n\ndef test_array_attributes():\n    a = np.array(0, 'f8')\n    assert m.ndim(a) == 0\n    assert all(m.shape(a) == [])\n    assert all(m.strides(a) == [])\n    with pytest.raises(IndexError) as excinfo:\n        m.shape(a, 0)\n    assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'\n    with pytest.raises(IndexError) as excinfo:\n        m.strides(a, 0)\n    assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'\n    assert m.writeable(a)\n    assert m.size(a) == 1\n    assert m.itemsize(a) == 8\n    assert m.nbytes(a) == 8\n    assert m.owndata(a)\n\n    a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()\n    a.flags.writeable = False\n    assert m.ndim(a) == 2\n    assert all(m.shape(a) == [2, 3])\n    assert m.shape(a, 0) == 2\n    assert m.shape(a, 1) == 3\n    assert all(m.strides(a) == [6, 2])\n    assert m.strides(a, 0) == 6\n    assert m.strides(a, 1) == 2\n    with pytest.raises(IndexError) as excinfo:\n        m.shape(a, 2)\n    assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'\n    with pytest.raises(IndexError) as excinfo:\n        m.strides(a, 2)\n    assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'\n    assert not m.writeable(a)\n    assert m.size(a) == 6\n    assert m.itemsize(a) == 2\n    assert m.nbytes(a) == 12\n    assert not m.owndata(a)\n\n\n@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])\ndef test_index_offset(arr, args, ret):\n    assert m.index_at(arr, *args) == ret\n    assert m.index_at_t(arr, *args) == ret\n    assert m.offset_at(arr, *args) == ret * arr.dtype.itemsize\n    assert m.offset_at_t(arr, *args) == ret * arr.dtype.itemsize\n\n\ndef test_dim_check_fail(arr):\n    for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t,\n                 m.mutate_data, m.mutate_data_t):\n        with pytest.raises(IndexError) as excinfo:\n            func(arr, 1, 2, 3)\n        assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'\n\n\n@pytest.mark.parametrize('args, ret',\n                         [([], [1, 2, 3, 4, 5, 6]),\n                          ([1], [4, 5, 6]),\n                          ([0, 1], [2, 3, 4, 5, 6]),\n                          ([1, 2], [6])])\ndef test_data(arr, args, ret):\n    from sys import byteorder\n    assert all(m.data_t(arr, *args) == ret)\n    assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret)\n    assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0)\n\n\n@pytest.mark.parametrize('dim', [0, 1, 3])\ndef test_at_fail(arr, dim):\n    for func in m.at_t, m.mutate_at_t:\n        with pytest.raises(IndexError) as excinfo:\n            func(arr, *([0] * dim))\n        assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)\n\n\ndef test_at(arr):\n    assert m.at_t(arr, 0, 2) == 3\n    assert m.at_t(arr, 1, 0) == 4\n\n    assert all(m.mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6])\n    assert all(m.mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6])\n\n\ndef test_mutate_readonly(arr):\n    arr.flags.writeable = False\n    for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)):\n        with pytest.raises(ValueError) as excinfo:\n            func(arr, *args)\n        assert str(excinfo.value) == 'array is not writeable'\n\n\ndef test_mutate_data(arr):\n    assert all(m.mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12])\n    assert all(m.mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24])\n    assert all(m.mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48])\n    assert all(m.mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96])\n    assert all(m.mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192])\n\n    assert all(m.mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193])\n    assert all(m.mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194])\n    assert all(m.mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195])\n    assert all(m.mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196])\n    assert all(m.mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197])\n\n\ndef test_bounds_check(arr):\n    for func in (m.index_at, m.index_at_t, m.data, m.data_t,\n                 m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t):\n        with pytest.raises(IndexError) as excinfo:\n            func(arr, 2, 0)\n        assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'\n        with pytest.raises(IndexError) as excinfo:\n            func(arr, 0, 4)\n        assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'\n\n\ndef test_make_c_f_array():\n    assert m.make_c_array().flags.c_contiguous\n    assert not m.make_c_array().flags.f_contiguous\n    assert m.make_f_array().flags.f_contiguous\n    assert not m.make_f_array().flags.c_contiguous\n\n\ndef test_make_empty_shaped_array():\n    m.make_empty_shaped_array()\n\n\ndef test_wrap():\n    def assert_references(a, b, base=None):\n        from distutils.version import LooseVersion\n        if base is None:\n            base = a\n        assert a is not b\n        assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]\n        assert a.shape == b.shape\n        assert a.strides == b.strides\n        assert a.flags.c_contiguous == b.flags.c_contiguous\n        assert a.flags.f_contiguous == b.flags.f_contiguous\n        assert a.flags.writeable == b.flags.writeable\n        assert a.flags.aligned == b.flags.aligned\n        if LooseVersion(np.__version__) >= LooseVersion(\"1.14.0\"):\n            assert a.flags.writebackifcopy == b.flags.writebackifcopy\n        else:\n            assert a.flags.updateifcopy == b.flags.updateifcopy\n        assert np.all(a == b)\n        assert not b.flags.owndata\n        assert b.base is base\n        if a.flags.writeable and a.ndim == 2:\n            a[0, 0] = 1234\n            assert b[0, 0] == 1234\n\n    a1 = np.array([1, 2], dtype=np.int16)\n    assert a1.flags.owndata and a1.base is None\n    a2 = m.wrap(a1)\n    assert_references(a1, a2)\n\n    a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')\n    assert a1.flags.owndata and a1.base is None\n    a2 = m.wrap(a1)\n    assert_references(a1, a2)\n\n    a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')\n    a1.flags.writeable = False\n    a2 = m.wrap(a1)\n    assert_references(a1, a2)\n\n    a1 = np.random.random((4, 4, 4))\n    a2 = m.wrap(a1)\n    assert_references(a1, a2)\n\n    a1t = a1.transpose()\n    a2 = m.wrap(a1t)\n    assert_references(a1t, a2, a1)\n\n    a1d = a1.diagonal()\n    a2 = m.wrap(a1d)\n    assert_references(a1d, a2, a1)\n\n    a1m = a1[::-1, ::-1, ::-1]\n    a2 = m.wrap(a1m)\n    assert_references(a1m, a2, a1)\n\n\ndef test_numpy_view(capture):\n    with capture:\n        ac = m.ArrayClass()\n        ac_view_1 = ac.numpy_view()\n        ac_view_2 = ac.numpy_view()\n        assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))\n        del ac\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        ArrayClass()\n        ArrayClass::numpy_view()\n        ArrayClass::numpy_view()\n    \"\"\"\n    ac_view_1[0] = 4\n    ac_view_1[1] = 3\n    assert ac_view_2[0] == 4\n    assert ac_view_2[1] == 3\n    with capture:\n        del ac_view_1\n        del ac_view_2\n        pytest.gc_collect()\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        ~ArrayClass()\n    \"\"\"\n\n\n@pytest.unsupported_on_pypy\ndef test_cast_numpy_int64_to_uint64():\n    m.function_taking_uint64(123)\n    m.function_taking_uint64(np.uint64(123))\n\n\ndef test_isinstance():\n    assert m.isinstance_untyped(np.array([1, 2, 3]), \"not an array\")\n    assert m.isinstance_typed(np.array([1.0, 2.0, 3.0]))\n\n\ndef test_constructors():\n    defaults = m.default_constructors()\n    for a in defaults.values():\n        assert a.size == 0\n    assert defaults[\"array\"].dtype == np.array([]).dtype\n    assert defaults[\"array_t<int32>\"].dtype == np.int32\n    assert defaults[\"array_t<double>\"].dtype == np.float64\n\n    results = m.converting_constructors([1, 2, 3])\n    for a in results.values():\n        np.testing.assert_array_equal(a, [1, 2, 3])\n    assert results[\"array\"].dtype == np.int_\n    assert results[\"array_t<int32>\"].dtype == np.int32\n    assert results[\"array_t<double>\"].dtype == np.float64\n\n\ndef test_overload_resolution(msg):\n    # Exact overload matches:\n    assert m.overloaded(np.array([1], dtype='float64')) == 'double'\n    assert m.overloaded(np.array([1], dtype='float32')) == 'float'\n    assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short'\n    assert m.overloaded(np.array([1], dtype='intc')) == 'int'\n    assert m.overloaded(np.array([1], dtype='longlong')) == 'long long'\n    assert m.overloaded(np.array([1], dtype='complex')) == 'double complex'\n    assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex'\n\n    # No exact match, should call first convertible version:\n    assert m.overloaded(np.array([1], dtype='uint8')) == 'double'\n\n    with pytest.raises(TypeError) as excinfo:\n        m.overloaded(\"not an array\")\n    assert msg(excinfo.value) == \"\"\"\n        overloaded(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: numpy.ndarray[float64]) -> str\n            2. (arg0: numpy.ndarray[float32]) -> str\n            3. (arg0: numpy.ndarray[int32]) -> str\n            4. (arg0: numpy.ndarray[uint16]) -> str\n            5. (arg0: numpy.ndarray[int64]) -> str\n            6. (arg0: numpy.ndarray[complex128]) -> str\n            7. (arg0: numpy.ndarray[complex64]) -> str\n\n        Invoked with: 'not an array'\n    \"\"\"\n\n    assert m.overloaded2(np.array([1], dtype='float64')) == 'double'\n    assert m.overloaded2(np.array([1], dtype='float32')) == 'float'\n    assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex'\n    assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex'\n    assert m.overloaded2(np.array([1], dtype='float32')) == 'float'\n\n    assert m.overloaded3(np.array([1], dtype='float64')) == 'double'\n    assert m.overloaded3(np.array([1], dtype='intc')) == 'int'\n    expected_exc = \"\"\"\n        overloaded3(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: numpy.ndarray[int32]) -> str\n            2. (arg0: numpy.ndarray[float64]) -> str\n\n        Invoked with: \"\"\"\n\n    with pytest.raises(TypeError) as excinfo:\n        m.overloaded3(np.array([1], dtype='uintc'))\n    assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype='uint32'))\n    with pytest.raises(TypeError) as excinfo:\n        m.overloaded3(np.array([1], dtype='float32'))\n    assert msg(excinfo.value) == expected_exc + repr(np.array([1.], dtype='float32'))\n    with pytest.raises(TypeError) as excinfo:\n        m.overloaded3(np.array([1], dtype='complex'))\n    assert msg(excinfo.value) == expected_exc + repr(np.array([1. + 0.j]))\n\n    # Exact matches:\n    assert m.overloaded4(np.array([1], dtype='double')) == 'double'\n    assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long'\n    # Non-exact matches requiring conversion.  Since float to integer isn't a\n    # save conversion, it should go to the double overload, but short can go to\n    # either (and so should end up on the first-registered, the long long).\n    assert m.overloaded4(np.array([1], dtype='float32')) == 'double'\n    assert m.overloaded4(np.array([1], dtype='short')) == 'long long'\n\n    assert m.overloaded5(np.array([1], dtype='double')) == 'double'\n    assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int'\n    assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int'\n\n\ndef test_greedy_string_overload():\n    \"\"\"Tests fix for #685 - ndarray shouldn't go to std::string overload\"\"\"\n\n    assert m.issue685(\"abc\") == \"string\"\n    assert m.issue685(np.array([97, 98, 99], dtype='b')) == \"array\"\n    assert m.issue685(123) == \"other\"\n\n\ndef test_array_unchecked_fixed_dims(msg):\n    z1 = np.array([[1, 2], [3, 4]], dtype='float64')\n    m.proxy_add2(z1, 10)\n    assert np.all(z1 == [[11, 12], [13, 14]])\n\n    with pytest.raises(ValueError) as excinfo:\n        m.proxy_add2(np.array([1., 2, 3]), 5.0)\n    assert msg(excinfo.value) == \"array has incorrect number of dimensions: 1; expected 2\"\n\n    expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')\n    assert np.all(m.proxy_init3(3.0) == expect_c)\n    expect_f = np.transpose(expect_c)\n    assert np.all(m.proxy_init3F(3.0) == expect_f)\n\n    assert m.proxy_squared_L2_norm(np.array(range(6))) == 55\n    assert m.proxy_squared_L2_norm(np.array(range(6), dtype=\"float64\")) == 55\n\n    assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]\n    assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1)\n\n\ndef test_array_unchecked_dyn_dims(msg):\n    z1 = np.array([[1, 2], [3, 4]], dtype='float64')\n    m.proxy_add2_dyn(z1, 10)\n    assert np.all(z1 == [[11, 12], [13, 14]])\n\n    expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')\n    assert np.all(m.proxy_init3_dyn(3.0) == expect_c)\n\n    assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]\n    assert m.proxy_auxiliaries2_dyn(z1) == m.array_auxiliaries2(z1)\n\n\ndef test_array_failure():\n    with pytest.raises(ValueError) as excinfo:\n        m.array_fail_test()\n    assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr'\n\n    with pytest.raises(ValueError) as excinfo:\n        m.array_t_fail_test()\n    assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr'\n\n    with pytest.raises(ValueError) as excinfo:\n        m.array_fail_test_negative_size()\n    assert str(excinfo.value) == 'negative dimensions are not allowed'\n\n\ndef test_initializer_list():\n    assert m.array_initializer_list1().shape == (1,)\n    assert m.array_initializer_list2().shape == (1, 2)\n    assert m.array_initializer_list3().shape == (1, 2, 3)\n    assert m.array_initializer_list4().shape == (1, 2, 3, 4)\n\n\ndef test_array_resize(msg):\n    a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64')\n    m.array_reshape2(a)\n    assert(a.size == 9)\n    assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]))\n\n    # total size change should succced with refcheck off\n    m.array_resize3(a, 4, False)\n    assert(a.size == 64)\n    # ... and fail with refcheck on\n    try:\n        m.array_resize3(a, 3, True)\n    except ValueError as e:\n        assert(str(e).startswith(\"cannot resize an array\"))\n    # transposed array doesn't own data\n    b = a.transpose()\n    try:\n        m.array_resize3(b, 3, False)\n    except ValueError as e:\n        assert(str(e).startswith(\"cannot resize this array: it does not own its data\"))\n    # ... but reshape should be fine\n    m.array_reshape2(b)\n    assert(b.shape == (8, 8))\n\n\n@pytest.unsupported_on_pypy\ndef test_array_create_and_resize(msg):\n    a = m.create_and_resize(2)\n    assert(a.size == 4)\n    assert(np.all(a == 42.))\n\n\n@pytest.unsupported_on_py2\ndef test_index_using_ellipsis():\n    a = m.index_using_ellipsis(np.zeros((5, 6, 7)))\n    assert a.shape == (6,)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_dtypes.cpp",
    "content": "/*\n  tests/test_numpy_dtypes.cpp -- Structured and compound NumPy dtypes\n\n  Copyright (c) 2016 Ivan Smirnov\n\n  All rights reserved. Use of this source code is governed by a\n  BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/numpy.h>\n\n#ifdef __GNUC__\n#define PYBIND11_PACKED(cls) cls __attribute__((__packed__))\n#else\n#define PYBIND11_PACKED(cls) __pragma(pack(push, 1)) cls __pragma(pack(pop))\n#endif\n\nnamespace py = pybind11;\n\nstruct SimpleStruct {\n    bool bool_;\n    uint32_t uint_;\n    float float_;\n    long double ldbl_;\n};\n\nstd::ostream& operator<<(std::ostream& os, const SimpleStruct& v) {\n    return os << \"s:\" << v.bool_ << \",\" << v.uint_ << \",\" << v.float_ << \",\" << v.ldbl_;\n}\n\nPYBIND11_PACKED(struct PackedStruct {\n    bool bool_;\n    uint32_t uint_;\n    float float_;\n    long double ldbl_;\n});\n\nstd::ostream& operator<<(std::ostream& os, const PackedStruct& v) {\n    return os << \"p:\" << v.bool_ << \",\" << v.uint_ << \",\" << v.float_ << \",\" << v.ldbl_;\n}\n\nPYBIND11_PACKED(struct NestedStruct {\n    SimpleStruct a;\n    PackedStruct b;\n});\n\nstd::ostream& operator<<(std::ostream& os, const NestedStruct& v) {\n    return os << \"n:a=\" << v.a << \";b=\" << v.b;\n}\n\nstruct PartialStruct {\n    bool bool_;\n    uint32_t uint_;\n    float float_;\n    uint64_t dummy2;\n    long double ldbl_;\n};\n\nstruct PartialNestedStruct {\n    uint64_t dummy1;\n    PartialStruct a;\n    uint64_t dummy2;\n};\n\nstruct UnboundStruct { };\n\nstruct StringStruct {\n    char a[3];\n    std::array<char, 3> b;\n};\n\nstruct ComplexStruct {\n    std::complex<float> cflt;\n    std::complex<double> cdbl;\n};\n\nstd::ostream& operator<<(std::ostream& os, const ComplexStruct& v) {\n    return os << \"c:\" << v.cflt << \",\" << v.cdbl;\n}\n\nstruct ArrayStruct {\n    char a[3][4];\n    int32_t b[2];\n    std::array<uint8_t, 3> c;\n    std::array<float, 2> d[4];\n};\n\nPYBIND11_PACKED(struct StructWithUglyNames {\n    int8_t __x__;\n    uint64_t __y__;\n});\n\nenum class E1 : int64_t { A = -1, B = 1 };\nenum E2 : uint8_t { X = 1, Y = 2 };\n\nPYBIND11_PACKED(struct EnumStruct {\n    E1 e1;\n    E2 e2;\n});\n\nstd::ostream& operator<<(std::ostream& os, const StringStruct& v) {\n    os << \"a='\";\n    for (size_t i = 0; i < 3 && v.a[i]; i++) os << v.a[i];\n    os << \"',b='\";\n    for (size_t i = 0; i < 3 && v.b[i]; i++) os << v.b[i];\n    return os << \"'\";\n}\n\nstd::ostream& operator<<(std::ostream& os, const ArrayStruct& v) {\n    os << \"a={\";\n    for (int i = 0; i < 3; i++) {\n        if (i > 0)\n            os << ',';\n        os << '{';\n        for (int j = 0; j < 3; j++)\n            os << v.a[i][j] << ',';\n        os << v.a[i][3] << '}';\n    }\n    os << \"},b={\" << v.b[0] << ',' << v.b[1];\n    os << \"},c={\" << int(v.c[0]) << ',' << int(v.c[1]) << ',' << int(v.c[2]);\n    os << \"},d={\";\n    for (int i = 0; i < 4; i++) {\n        if (i > 0)\n            os << ',';\n        os << '{' << v.d[i][0] << ',' << v.d[i][1] << '}';\n    }\n    return os << '}';\n}\n\nstd::ostream& operator<<(std::ostream& os, const EnumStruct& v) {\n    return os << \"e1=\" << (v.e1 == E1::A ? \"A\" : \"B\") << \",e2=\" << (v.e2 == E2::X ? \"X\" : \"Y\");\n}\n\ntemplate <typename T>\npy::array mkarray_via_buffer(size_t n) {\n    return py::array(py::buffer_info(nullptr, sizeof(T),\n                                     py::format_descriptor<T>::format(),\n                                     1, { n }, { sizeof(T) }));\n}\n\n#define SET_TEST_VALS(s, i) do { \\\n    s.bool_ = (i) % 2 != 0; \\\n    s.uint_ = (uint32_t) (i); \\\n    s.float_ = (float) (i) * 1.5f; \\\n    s.ldbl_ = (long double) (i) * -2.5L; } while (0)\n\ntemplate <typename S>\npy::array_t<S, 0> create_recarray(size_t n) {\n    auto arr = mkarray_via_buffer<S>(n);\n    auto req = arr.request();\n    auto ptr = static_cast<S*>(req.ptr);\n    for (size_t i = 0; i < n; i++) {\n        SET_TEST_VALS(ptr[i], i);\n    }\n    return arr;\n}\n\ntemplate <typename S>\npy::list print_recarray(py::array_t<S, 0> arr) {\n    const auto req = arr.request();\n    const auto ptr = static_cast<S*>(req.ptr);\n    auto l = py::list();\n    for (ssize_t i = 0; i < req.size; i++) {\n        std::stringstream ss;\n        ss << ptr[i];\n        l.append(py::str(ss.str()));\n    }\n    return l;\n}\n\npy::array_t<int32_t, 0> test_array_ctors(int i) {\n    using arr_t = py::array_t<int32_t, 0>;\n\n    std::vector<int32_t> data { 1, 2, 3, 4, 5, 6 };\n    std::vector<ssize_t> shape { 3, 2 };\n    std::vector<ssize_t> strides { 8, 4 };\n\n    auto ptr = data.data();\n    auto vptr = (void *) ptr;\n    auto dtype = py::dtype(\"int32\");\n\n    py::buffer_info buf_ndim1(vptr, 4, \"i\", 6);\n    py::buffer_info buf_ndim1_null(nullptr, 4, \"i\", 6);\n    py::buffer_info buf_ndim2(vptr, 4, \"i\", 2, shape, strides);\n    py::buffer_info buf_ndim2_null(nullptr, 4, \"i\", 2, shape, strides);\n\n    auto fill = [](py::array arr) {\n        auto req = arr.request();\n        for (int i = 0; i < 6; i++) ((int32_t *) req.ptr)[i] = i + 1;\n        return arr;\n    };\n\n    switch (i) {\n    // shape: (3, 2)\n    case 10: return arr_t(shape, strides, ptr);\n    case 11: return py::array(shape, strides, ptr);\n    case 12: return py::array(dtype, shape, strides, vptr);\n    case 13: return arr_t(shape, ptr);\n    case 14: return py::array(shape, ptr);\n    case 15: return py::array(dtype, shape, vptr);\n    case 16: return arr_t(buf_ndim2);\n    case 17: return py::array(buf_ndim2);\n    // shape: (3, 2) - post-fill\n    case 20: return fill(arr_t(shape, strides));\n    case 21: return py::array(shape, strides, ptr); // can't have nullptr due to templated ctor\n    case 22: return fill(py::array(dtype, shape, strides));\n    case 23: return fill(arr_t(shape));\n    case 24: return py::array(shape, ptr); // can't have nullptr due to templated ctor\n    case 25: return fill(py::array(dtype, shape));\n    case 26: return fill(arr_t(buf_ndim2_null));\n    case 27: return fill(py::array(buf_ndim2_null));\n    // shape: (6, )\n    case 30: return arr_t(6, ptr);\n    case 31: return py::array(6, ptr);\n    case 32: return py::array(dtype, 6, vptr);\n    case 33: return arr_t(buf_ndim1);\n    case 34: return py::array(buf_ndim1);\n    // shape: (6, )\n    case 40: return fill(arr_t(6));\n    case 41: return py::array(6, ptr);  // can't have nullptr due to templated ctor\n    case 42: return fill(py::array(dtype, 6));\n    case 43: return fill(arr_t(buf_ndim1_null));\n    case 44: return fill(py::array(buf_ndim1_null));\n    }\n    return arr_t();\n}\n\npy::list test_dtype_ctors() {\n    py::list list;\n    list.append(py::dtype(\"int32\"));\n    list.append(py::dtype(std::string(\"float64\")));\n    list.append(py::dtype::from_args(py::str(\"bool\")));\n    py::list names, offsets, formats;\n    py::dict dict;\n    names.append(py::str(\"a\")); names.append(py::str(\"b\")); dict[\"names\"] = names;\n    offsets.append(py::int_(1)); offsets.append(py::int_(10)); dict[\"offsets\"] = offsets;\n    formats.append(py::dtype(\"int32\")); formats.append(py::dtype(\"float64\")); dict[\"formats\"] = formats;\n    dict[\"itemsize\"] = py::int_(20);\n    list.append(py::dtype::from_args(dict));\n    list.append(py::dtype(names, formats, offsets, 20));\n    list.append(py::dtype(py::buffer_info((void *) 0, sizeof(unsigned int), \"I\", 1)));\n    list.append(py::dtype(py::buffer_info((void *) 0, 0, \"T{i:a:f:b:}\", 1)));\n    return list;\n}\n\nstruct A {};\nstruct B {};\n\nTEST_SUBMODULE(numpy_dtypes, m) {\n    try { py::module::import(\"numpy\"); }\n    catch (...) { return; }\n\n    // typeinfo may be registered before the dtype descriptor for scalar casts to work...\n    py::class_<SimpleStruct>(m, \"SimpleStruct\");\n\n    PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_);\n    PYBIND11_NUMPY_DTYPE(PackedStruct, bool_, uint_, float_, ldbl_);\n    PYBIND11_NUMPY_DTYPE(NestedStruct, a, b);\n    PYBIND11_NUMPY_DTYPE(PartialStruct, bool_, uint_, float_, ldbl_);\n    PYBIND11_NUMPY_DTYPE(PartialNestedStruct, a);\n    PYBIND11_NUMPY_DTYPE(StringStruct, a, b);\n    PYBIND11_NUMPY_DTYPE(ArrayStruct, a, b, c, d);\n    PYBIND11_NUMPY_DTYPE(EnumStruct, e1, e2);\n    PYBIND11_NUMPY_DTYPE(ComplexStruct, cflt, cdbl);\n\n    // ... or after\n    py::class_<PackedStruct>(m, \"PackedStruct\");\n\n    PYBIND11_NUMPY_DTYPE_EX(StructWithUglyNames, __x__, \"x\", __y__, \"y\");\n\n    // If uncommented, this should produce a static_assert failure telling the user that the struct\n    // is not a POD type\n//    struct NotPOD { std::string v; NotPOD() : v(\"hi\") {}; };\n//    PYBIND11_NUMPY_DTYPE(NotPOD, v);\n\n    // Check that dtypes can be registered programmatically, both from\n    // initializer lists of field descriptors and from other containers.\n    py::detail::npy_format_descriptor<A>::register_dtype(\n        {}\n    );\n    py::detail::npy_format_descriptor<B>::register_dtype(\n        std::vector<py::detail::field_descriptor>{}\n    );\n\n    // test_recarray, test_scalar_conversion\n    m.def(\"create_rec_simple\", &create_recarray<SimpleStruct>);\n    m.def(\"create_rec_packed\", &create_recarray<PackedStruct>);\n    m.def(\"create_rec_nested\", [](size_t n) { // test_signature\n        py::array_t<NestedStruct, 0> arr = mkarray_via_buffer<NestedStruct>(n);\n        auto req = arr.request();\n        auto ptr = static_cast<NestedStruct*>(req.ptr);\n        for (size_t i = 0; i < n; i++) {\n            SET_TEST_VALS(ptr[i].a, i);\n            SET_TEST_VALS(ptr[i].b, i + 1);\n        }\n        return arr;\n    });\n    m.def(\"create_rec_partial\", &create_recarray<PartialStruct>);\n    m.def(\"create_rec_partial_nested\", [](size_t n) {\n        py::array_t<PartialNestedStruct, 0> arr = mkarray_via_buffer<PartialNestedStruct>(n);\n        auto req = arr.request();\n        auto ptr = static_cast<PartialNestedStruct*>(req.ptr);\n        for (size_t i = 0; i < n; i++) {\n            SET_TEST_VALS(ptr[i].a, i);\n        }\n        return arr;\n    });\n    m.def(\"print_rec_simple\", &print_recarray<SimpleStruct>);\n    m.def(\"print_rec_packed\", &print_recarray<PackedStruct>);\n    m.def(\"print_rec_nested\", &print_recarray<NestedStruct>);\n\n    // test_format_descriptors\n    m.def(\"get_format_unbound\", []() { return py::format_descriptor<UnboundStruct>::format(); });\n    m.def(\"print_format_descriptors\", []() {\n        py::list l;\n        for (const auto &fmt : {\n            py::format_descriptor<SimpleStruct>::format(),\n            py::format_descriptor<PackedStruct>::format(),\n            py::format_descriptor<NestedStruct>::format(),\n            py::format_descriptor<PartialStruct>::format(),\n            py::format_descriptor<PartialNestedStruct>::format(),\n            py::format_descriptor<StringStruct>::format(),\n            py::format_descriptor<ArrayStruct>::format(),\n            py::format_descriptor<EnumStruct>::format(),\n            py::format_descriptor<ComplexStruct>::format()\n        }) {\n            l.append(py::cast(fmt));\n        }\n        return l;\n    });\n\n    // test_dtype\n    m.def(\"print_dtypes\", []() {\n        py::list l;\n        for (const py::handle &d : {\n            py::dtype::of<SimpleStruct>(),\n            py::dtype::of<PackedStruct>(),\n            py::dtype::of<NestedStruct>(),\n            py::dtype::of<PartialStruct>(),\n            py::dtype::of<PartialNestedStruct>(),\n            py::dtype::of<StringStruct>(),\n            py::dtype::of<ArrayStruct>(),\n            py::dtype::of<EnumStruct>(),\n            py::dtype::of<StructWithUglyNames>(),\n            py::dtype::of<ComplexStruct>()\n        })\n            l.append(py::str(d));\n        return l;\n    });\n    m.def(\"test_dtype_ctors\", &test_dtype_ctors);\n    m.def(\"test_dtype_methods\", []() {\n        py::list list;\n        auto dt1 = py::dtype::of<int32_t>();\n        auto dt2 = py::dtype::of<SimpleStruct>();\n        list.append(dt1); list.append(dt2);\n        list.append(py::bool_(dt1.has_fields())); list.append(py::bool_(dt2.has_fields()));\n        list.append(py::int_(dt1.itemsize())); list.append(py::int_(dt2.itemsize()));\n        return list;\n    });\n    struct TrailingPaddingStruct {\n        int32_t a;\n        char b;\n    };\n    PYBIND11_NUMPY_DTYPE(TrailingPaddingStruct, a, b);\n    m.def(\"trailing_padding_dtype\", []() { return py::dtype::of<TrailingPaddingStruct>(); });\n\n    // test_string_array\n    m.def(\"create_string_array\", [](bool non_empty) {\n        py::array_t<StringStruct, 0> arr = mkarray_via_buffer<StringStruct>(non_empty ? 4 : 0);\n        if (non_empty) {\n            auto req = arr.request();\n            auto ptr = static_cast<StringStruct*>(req.ptr);\n            for (ssize_t i = 0; i < req.size * req.itemsize; i++)\n                static_cast<char*>(req.ptr)[i] = 0;\n            ptr[1].a[0] = 'a'; ptr[1].b[0] = 'a';\n            ptr[2].a[0] = 'a'; ptr[2].b[0] = 'a';\n            ptr[3].a[0] = 'a'; ptr[3].b[0] = 'a';\n\n            ptr[2].a[1] = 'b'; ptr[2].b[1] = 'b';\n            ptr[3].a[1] = 'b'; ptr[3].b[1] = 'b';\n\n            ptr[3].a[2] = 'c'; ptr[3].b[2] = 'c';\n        }\n        return arr;\n    });\n    m.def(\"print_string_array\", &print_recarray<StringStruct>);\n\n    // test_array_array\n    m.def(\"create_array_array\", [](size_t n) {\n        py::array_t<ArrayStruct, 0> arr = mkarray_via_buffer<ArrayStruct>(n);\n        auto ptr = (ArrayStruct *) arr.mutable_data();\n        for (size_t i = 0; i < n; i++) {\n            for (size_t j = 0; j < 3; j++)\n                for (size_t k = 0; k < 4; k++)\n                    ptr[i].a[j][k] = char('A' + (i * 100 + j * 10 + k) % 26);\n            for (size_t j = 0; j < 2; j++)\n                ptr[i].b[j] = int32_t(i * 1000 + j);\n            for (size_t j = 0; j < 3; j++)\n                ptr[i].c[j] = uint8_t(i * 10 + j);\n            for (size_t j = 0; j < 4; j++)\n                for (size_t k = 0; k < 2; k++)\n                    ptr[i].d[j][k] = float(i) * 100.0f + float(j) * 10.0f + float(k);\n        }\n        return arr;\n    });\n    m.def(\"print_array_array\", &print_recarray<ArrayStruct>);\n\n    // test_enum_array\n    m.def(\"create_enum_array\", [](size_t n) {\n        py::array_t<EnumStruct, 0> arr = mkarray_via_buffer<EnumStruct>(n);\n        auto ptr = (EnumStruct *) arr.mutable_data();\n        for (size_t i = 0; i < n; i++) {\n            ptr[i].e1 = static_cast<E1>(-1 + ((int) i % 2) * 2);\n            ptr[i].e2 = static_cast<E2>(1 + (i % 2));\n        }\n        return arr;\n    });\n    m.def(\"print_enum_array\", &print_recarray<EnumStruct>);\n\n    // test_complex_array\n    m.def(\"create_complex_array\", [](size_t n) {\n        py::array_t<ComplexStruct, 0> arr = mkarray_via_buffer<ComplexStruct>(n);\n        auto ptr = (ComplexStruct *) arr.mutable_data();\n        for (size_t i = 0; i < n; i++) {\n            ptr[i].cflt.real(float(i));\n            ptr[i].cflt.imag(float(i) + 0.25f);\n            ptr[i].cdbl.real(double(i) + 0.5);\n            ptr[i].cdbl.imag(double(i) + 0.75);\n        }\n        return arr;\n    });\n    m.def(\"print_complex_array\", &print_recarray<ComplexStruct>);\n\n    // test_array_constructors\n    m.def(\"test_array_ctors\", &test_array_ctors);\n\n    // test_compare_buffer_info\n    struct CompareStruct {\n        bool x;\n        uint32_t y;\n        float z;\n    };\n    PYBIND11_NUMPY_DTYPE(CompareStruct, x, y, z);\n    m.def(\"compare_buffer_info\", []() {\n        py::list list;\n        list.append(py::bool_(py::detail::compare_buffer_info<float>::compare(py::buffer_info(nullptr, sizeof(float), \"f\", 1))));\n        list.append(py::bool_(py::detail::compare_buffer_info<unsigned>::compare(py::buffer_info(nullptr, sizeof(int), \"I\", 1))));\n        list.append(py::bool_(py::detail::compare_buffer_info<long>::compare(py::buffer_info(nullptr, sizeof(long), \"l\", 1))));\n        list.append(py::bool_(py::detail::compare_buffer_info<long>::compare(py::buffer_info(nullptr, sizeof(long), sizeof(long) == sizeof(int) ? \"i\" : \"q\", 1))));\n        list.append(py::bool_(py::detail::compare_buffer_info<CompareStruct>::compare(py::buffer_info(nullptr, sizeof(CompareStruct), \"T{?:x:3xI:y:f:z:}\", 1))));\n        return list;\n    });\n    m.def(\"buffer_to_dtype\", [](py::buffer& buf) { return py::dtype(buf.request()); });\n\n    // test_scalar_conversion\n    m.def(\"f_simple\", [](SimpleStruct s) { return s.uint_ * 10; });\n    m.def(\"f_packed\", [](PackedStruct s) { return s.uint_ * 10; });\n    m.def(\"f_nested\", [](NestedStruct s) { return s.a.uint_ * 10; });\n\n    // test_register_dtype\n    m.def(\"register_dtype\", []() { PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); });\n\n    // test_str_leak\n    m.def(\"dtype_wrapper\", [](py::object d) { return py::dtype::from_args(std::move(d)); });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_dtypes.py",
    "content": "import re\nimport pytest\nfrom pybind11_tests import numpy_dtypes as m\n\npytestmark = pytest.requires_numpy\n\nwith pytest.suppress(ImportError):\n    import numpy as np\n\n\n@pytest.fixture(scope='module')\ndef simple_dtype():\n    ld = np.dtype('longdouble')\n    return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],\n                     'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],\n                     'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})\n\n\n@pytest.fixture(scope='module')\ndef packed_dtype():\n    return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])\n\n\ndef dt_fmt():\n    from sys import byteorder\n    e = '<' if byteorder == 'little' else '>'\n    return (\"{{'names':['bool_','uint_','float_','ldbl_'],\"\n            \" 'formats':['?','\" + e + \"u4','\" + e + \"f4','\" + e + \"f{}'],\"\n            \" 'offsets':[0,4,8,{}], 'itemsize':{}}}\")\n\n\ndef simple_dtype_fmt():\n    ld = np.dtype('longdouble')\n    simple_ld_off = 12 + 4 * (ld.alignment > 4)\n    return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)\n\n\ndef packed_dtype_fmt():\n    from sys import byteorder\n    return \"[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]\".format(\n        np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')\n\n\ndef partial_ld_offset():\n    return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (\n        np.dtype('longdouble').alignment > 8)\n\n\ndef partial_dtype_fmt():\n    ld = np.dtype('longdouble')\n    partial_ld_off = partial_ld_offset()\n    return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)\n\n\ndef partial_nested_fmt():\n    ld = np.dtype('longdouble')\n    partial_nested_off = 8 + 8 * (ld.alignment > 8)\n    partial_ld_off = partial_ld_offset()\n    partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize\n    return \"{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}\".format(\n        partial_dtype_fmt(), partial_nested_off, partial_nested_size)\n\n\ndef assert_equal(actual, expected_data, expected_dtype):\n    np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))\n\n\ndef test_format_descriptors():\n    with pytest.raises(RuntimeError) as excinfo:\n        m.get_format_unbound()\n    assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))\n\n    ld = np.dtype('longdouble')\n    ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char\n    ss_fmt = \"^T{?:bool_:3xI:uint_:f:float_:\" + ldbl_fmt + \":ldbl_:}\"\n    dbl = np.dtype('double')\n    partial_fmt = (\"^T{?:bool_:3xI:uint_:f:float_:\" +\n                   str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +\n                   \"xg:ldbl_:}\")\n    nested_extra = str(max(8, ld.alignment))\n    assert m.print_format_descriptors() == [\n        ss_fmt,\n        \"^T{?:bool_:I:uint_:f:float_:g:ldbl_:}\",\n        \"^T{\" + ss_fmt + \":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}\",\n        partial_fmt,\n        \"^T{\" + nested_extra + \"x\" + partial_fmt + \":a:\" + nested_extra + \"x}\",\n        \"^T{3s:a:3s:b:}\",\n        \"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}\",\n        '^T{q:e1:B:e2:}',\n        '^T{Zf:cflt:Zd:cdbl:}'\n    ]\n\n\ndef test_dtype(simple_dtype):\n    from sys import byteorder\n    e = '<' if byteorder == 'little' else '>'\n\n    assert m.print_dtypes() == [\n        simple_dtype_fmt(),\n        packed_dtype_fmt(),\n        \"[('a', {}), ('b', {})]\".format(simple_dtype_fmt(), packed_dtype_fmt()),\n        partial_dtype_fmt(),\n        partial_nested_fmt(),\n        \"[('a', 'S3'), ('b', 'S3')]\",\n        (\"{{'names':['a','b','c','d'], \" +\n         \"'formats':[('S4', (3,)),('\" + e + \"i4', (2,)),('u1', (3,)),('\" + e + \"f4', (4, 2))], \" +\n         \"'offsets':[0,12,20,24], 'itemsize':56}}\").format(e=e),\n        \"[('e1', '\" + e + \"i8'), ('e2', 'u1')]\",\n        \"[('x', 'i1'), ('y', '\" + e + \"u8')]\",\n        \"[('cflt', '\" + e + \"c8'), ('cdbl', '\" + e + \"c16')]\"\n    ]\n\n    d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],\n                   'offsets': [1, 10], 'itemsize': 20})\n    d2 = np.dtype([('a', 'i4'), ('b', 'f4')])\n    assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),\n                                    np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]\n\n    assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,\n                                      np.dtype('int32').itemsize, simple_dtype.itemsize]\n\n    assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))\n\n\ndef test_recarray(simple_dtype, packed_dtype):\n    elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]\n\n    for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:\n        arr = func(0)\n        assert arr.dtype == dtype\n        assert_equal(arr, [], simple_dtype)\n        assert_equal(arr, [], packed_dtype)\n\n        arr = func(3)\n        assert arr.dtype == dtype\n        assert_equal(arr, elements, simple_dtype)\n        assert_equal(arr, elements, packed_dtype)\n\n        if dtype == simple_dtype:\n            assert m.print_rec_simple(arr) == [\n                \"s:0,0,0,-0\",\n                \"s:1,1,1.5,-2.5\",\n                \"s:0,2,3,-5\"\n            ]\n        else:\n            assert m.print_rec_packed(arr) == [\n                \"p:0,0,0,-0\",\n                \"p:1,1,1.5,-2.5\",\n                \"p:0,2,3,-5\"\n            ]\n\n    nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])\n\n    arr = m.create_rec_nested(0)\n    assert arr.dtype == nested_dtype\n    assert_equal(arr, [], nested_dtype)\n\n    arr = m.create_rec_nested(3)\n    assert arr.dtype == nested_dtype\n    assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),\n                       ((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),\n                       ((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)\n    assert m.print_rec_nested(arr) == [\n        \"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5\",\n        \"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5\",\n        \"n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5\"\n    ]\n\n    arr = m.create_rec_partial(3)\n    assert str(arr.dtype) == partial_dtype_fmt()\n    partial_dtype = arr.dtype\n    assert '' not in arr.dtype.fields\n    assert partial_dtype.itemsize > simple_dtype.itemsize\n    assert_equal(arr, elements, simple_dtype)\n    assert_equal(arr, elements, packed_dtype)\n\n    arr = m.create_rec_partial_nested(3)\n    assert str(arr.dtype) == partial_nested_fmt()\n    assert '' not in arr.dtype.fields\n    assert '' not in arr.dtype.fields['a'][0].fields\n    assert arr.dtype.itemsize > partial_dtype.itemsize\n    np.testing.assert_equal(arr['a'], m.create_rec_partial(3))\n\n\ndef test_array_constructors():\n    data = np.arange(1, 7, dtype='int32')\n    for i in range(8):\n        np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))\n        np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))\n    for i in range(5):\n        np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)\n        np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)\n\n\ndef test_string_array():\n    arr = m.create_string_array(True)\n    assert str(arr.dtype) == \"[('a', 'S3'), ('b', 'S3')]\"\n    assert m.print_string_array(arr) == [\n        \"a='',b=''\",\n        \"a='a',b='a'\",\n        \"a='ab',b='ab'\",\n        \"a='abc',b='abc'\"\n    ]\n    dtype = arr.dtype\n    assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']\n    assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']\n    arr = m.create_string_array(False)\n    assert dtype == arr.dtype\n\n\ndef test_array_array():\n    from sys import byteorder\n    e = '<' if byteorder == 'little' else '>'\n\n    arr = m.create_array_array(3)\n    assert str(arr.dtype) == (\n        \"{{'names':['a','b','c','d'], \" +\n        \"'formats':[('S4', (3,)),('\" + e + \"i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], \" +\n        \"'offsets':[0,12,20,24], 'itemsize':56}}\").format(e=e)\n    assert m.print_array_array(arr) == [\n        \"a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1},\" +\n        \"c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}\",\n        \"a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001},\" +\n        \"c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}\",\n        \"a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001},\" +\n        \"c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}\",\n    ]\n    assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],\n                                 [b'WXYZ', b'GHIJ', b'QRST'],\n                                 [b'STUV', b'CDEF', b'MNOP']]\n    assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]\n    assert m.create_array_array(0).dtype == arr.dtype\n\n\ndef test_enum_array():\n    from sys import byteorder\n    e = '<' if byteorder == 'little' else '>'\n\n    arr = m.create_enum_array(3)\n    dtype = arr.dtype\n    assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])\n    assert m.print_enum_array(arr) == [\n        \"e1=A,e2=X\",\n        \"e1=B,e2=Y\",\n        \"e1=A,e2=X\"\n    ]\n    assert arr['e1'].tolist() == [-1, 1, -1]\n    assert arr['e2'].tolist() == [1, 2, 1]\n    assert m.create_enum_array(0).dtype == dtype\n\n\ndef test_complex_array():\n    from sys import byteorder\n    e = '<' if byteorder == 'little' else '>'\n\n    arr = m.create_complex_array(3)\n    dtype = arr.dtype\n    assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])\n    assert m.print_complex_array(arr) == [\n        \"c:(0,0.25),(0.5,0.75)\",\n        \"c:(1,1.25),(1.5,1.75)\",\n        \"c:(2,2.25),(2.5,2.75)\"\n    ]\n    assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]\n    assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]\n    assert m.create_complex_array(0).dtype == dtype\n\n\ndef test_signature(doc):\n    assert doc(m.create_rec_nested) == \\\n        \"create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]\"\n\n\ndef test_scalar_conversion():\n    n = 3\n    arrays = [m.create_rec_simple(n), m.create_rec_packed(n),\n              m.create_rec_nested(n), m.create_enum_array(n)]\n    funcs = [m.f_simple, m.f_packed, m.f_nested]\n\n    for i, func in enumerate(funcs):\n        for j, arr in enumerate(arrays):\n            if i == j and i < 2:\n                assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]\n            else:\n                with pytest.raises(TypeError) as excinfo:\n                    func(arr[0])\n                assert 'incompatible function arguments' in str(excinfo.value)\n\n\ndef test_register_dtype():\n    with pytest.raises(RuntimeError) as excinfo:\n        m.register_dtype()\n    assert 'dtype is already registered' in str(excinfo.value)\n\n\n@pytest.unsupported_on_pypy\ndef test_str_leak():\n    from sys import getrefcount\n    fmt = \"f4\"\n    pytest.gc_collect()\n    start = getrefcount(fmt)\n    d = m.dtype_wrapper(fmt)\n    assert d is np.dtype(\"f4\")\n    del d\n    pytest.gc_collect()\n    assert getrefcount(fmt) == start\n\n\ndef test_compare_buffer_info():\n    assert all(m.compare_buffer_info())\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_vectorize.cpp",
    "content": "/*\n    tests/test_numpy_vectorize.cpp -- auto-vectorize functions over NumPy array\n    arguments\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/numpy.h>\n\ndouble my_func(int x, float y, double z) {\n    py::print(\"my_func(x:int={}, y:float={:.0f}, z:float={:.0f})\"_s.format(x, y, z));\n    return (float) x*y*z;\n}\n\nTEST_SUBMODULE(numpy_vectorize, m) {\n    try { py::module::import(\"numpy\"); }\n    catch (...) { return; }\n\n    // test_vectorize, test_docs, test_array_collapse\n    // Vectorize all arguments of a function (though non-vector arguments are also allowed)\n    m.def(\"vectorized_func\", py::vectorize(my_func));\n\n    // Vectorize a lambda function with a capture object (e.g. to exclude some arguments from the vectorization)\n    m.def(\"vectorized_func2\",\n        [](py::array_t<int> x, py::array_t<float> y, float z) {\n            return py::vectorize([z](int x, float y) { return my_func(x, y, z); })(x, y);\n        }\n    );\n\n    // Vectorize a complex-valued function\n    m.def(\"vectorized_func3\", py::vectorize(\n        [](std::complex<double> c) { return c * std::complex<double>(2.f); }\n    ));\n\n    // test_type_selection\n    // Numpy function which only accepts specific data types\n    m.def(\"selective_func\", [](py::array_t<int, py::array::c_style>) { return \"Int branch taken.\"; });\n    m.def(\"selective_func\", [](py::array_t<float, py::array::c_style>) { return \"Float branch taken.\"; });\n    m.def(\"selective_func\", [](py::array_t<std::complex<float>, py::array::c_style>) { return \"Complex float branch taken.\"; });\n\n\n    // test_passthrough_arguments\n    // Passthrough test: references and non-pod types should be automatically passed through (in the\n    // function definition below, only `b`, `d`, and `g` are vectorized):\n    struct NonPODClass {\n        NonPODClass(int v) : value{v} {}\n        int value;\n    };\n    py::class_<NonPODClass>(m, \"NonPODClass\").def(py::init<int>());\n    m.def(\"vec_passthrough\", py::vectorize(\n        [](double *a, double b, py::array_t<double> c, const int &d, int &e, NonPODClass f, const double g) {\n            return *a + b + c.at(0) + d + e + f.value + g;\n        }\n    ));\n\n    // test_method_vectorization\n    struct VectorizeTestClass {\n        VectorizeTestClass(int v) : value{v} {};\n        float method(int x, float y) { return y + (float) (x + value); }\n        int value = 0;\n    };\n    py::class_<VectorizeTestClass> vtc(m, \"VectorizeTestClass\");\n    vtc .def(py::init<int>())\n        .def_readwrite(\"value\", &VectorizeTestClass::value);\n\n    // Automatic vectorizing of methods\n    vtc.def(\"method\", py::vectorize(&VectorizeTestClass::method));\n\n    // test_trivial_broadcasting\n    // Internal optimization test for whether the input is trivially broadcastable:\n    py::enum_<py::detail::broadcast_trivial>(m, \"trivial\")\n        .value(\"f_trivial\", py::detail::broadcast_trivial::f_trivial)\n        .value(\"c_trivial\", py::detail::broadcast_trivial::c_trivial)\n        .value(\"non_trivial\", py::detail::broadcast_trivial::non_trivial);\n    m.def(\"vectorized_is_trivial\", [](\n                py::array_t<int, py::array::forcecast> arg1,\n                py::array_t<float, py::array::forcecast> arg2,\n                py::array_t<double, py::array::forcecast> arg3\n                ) {\n        ssize_t ndim;\n        std::vector<ssize_t> shape;\n        std::array<py::buffer_info, 3> buffers {{ arg1.request(), arg2.request(), arg3.request() }};\n        return py::detail::broadcast(buffers, ndim, shape);\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_numpy_vectorize.py",
    "content": "import pytest\nfrom pybind11_tests import numpy_vectorize as m\n\npytestmark = pytest.requires_numpy\n\nwith pytest.suppress(ImportError):\n    import numpy as np\n\n\ndef test_vectorize(capture):\n    assert np.isclose(m.vectorized_func3(np.array(3 + 7j)), [6 + 14j])\n\n    for f in [m.vectorized_func, m.vectorized_func2]:\n        with capture:\n            assert np.isclose(f(1, 2, 3), 6)\n        assert capture == \"my_func(x:int=1, y:float=2, z:float=3)\"\n        with capture:\n            assert np.isclose(f(np.array(1), np.array(2), 3), 6)\n        assert capture == \"my_func(x:int=1, y:float=2, z:float=3)\"\n        with capture:\n            assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36])\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=3)\n            my_func(x:int=3, y:float=4, z:float=3)\n        \"\"\"\n        with capture:\n            a = np.array([[1, 2], [3, 4]], order='F')\n            b = np.array([[10, 20], [30, 40]], order='F')\n            c = 3\n            result = f(a, b, c)\n            assert np.allclose(result, a * b * c)\n            assert result.flags.f_contiguous\n        # All inputs are F order and full or singletons, so we the result is in col-major order:\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=10, z:float=3)\n            my_func(x:int=3, y:float=30, z:float=3)\n            my_func(x:int=2, y:float=20, z:float=3)\n            my_func(x:int=4, y:float=40, z:float=3)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=3)\n            my_func(x:int=3, y:float=4, z:float=3)\n            my_func(x:int=5, y:float=6, z:float=3)\n            my_func(x:int=7, y:float=8, z:float=3)\n            my_func(x:int=9, y:float=10, z:float=3)\n            my_func(x:int=11, y:float=12, z:float=3)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=2)\n            my_func(x:int=2, y:float=3, z:float=2)\n            my_func(x:int=3, y:float=4, z:float=2)\n            my_func(x:int=4, y:float=2, z:float=2)\n            my_func(x:int=5, y:float=3, z:float=2)\n            my_func(x:int=6, y:float=4, z:float=2)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=2)\n            my_func(x:int=2, y:float=2, z:float=2)\n            my_func(x:int=3, y:float=2, z:float=2)\n            my_func(x:int=4, y:float=3, z:float=2)\n            my_func(x:int=5, y:float=3, z:float=2)\n            my_func(x:int=6, y:float=3, z:float=2)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=2)\n            my_func(x:int=2, y:float=2, z:float=2)\n            my_func(x:int=3, y:float=2, z:float=2)\n            my_func(x:int=4, y:float=3, z:float=2)\n            my_func(x:int=5, y:float=3, z:float=2)\n            my_func(x:int=6, y:float=3, z:float=2)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=2)\n            my_func(x:int=3, y:float=2, z:float=2)\n            my_func(x:int=4, y:float=3, z:float=2)\n            my_func(x:int=6, y:float=3, z:float=2)\n        \"\"\"\n        with capture:\n            a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2\n            assert np.allclose(f(a, b, c), a * b * c)\n        assert capture == \"\"\"\n            my_func(x:int=1, y:float=2, z:float=2)\n            my_func(x:int=3, y:float=2, z:float=2)\n            my_func(x:int=4, y:float=3, z:float=2)\n            my_func(x:int=6, y:float=3, z:float=2)\n        \"\"\"\n\n\ndef test_type_selection():\n    assert m.selective_func(np.array([1], dtype=np.int32)) == \"Int branch taken.\"\n    assert m.selective_func(np.array([1.0], dtype=np.float32)) == \"Float branch taken.\"\n    assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == \"Complex float branch taken.\"\n\n\ndef test_docs(doc):\n    assert doc(m.vectorized_func) == \"\"\"\n        vectorized_func(arg0: numpy.ndarray[int32], arg1: numpy.ndarray[float32], arg2: numpy.ndarray[float64]) -> object\n    \"\"\"  # noqa: E501 line too long\n\n\ndef test_trivial_broadcasting():\n    trivial, vectorized_is_trivial = m.trivial, m.vectorized_is_trivial\n\n    assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial\n    assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial\n    assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial\n    assert trivial.c_trivial == vectorized_is_trivial(\n        np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3)\n    assert vectorized_is_trivial(\n        np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial\n    assert vectorized_is_trivial(\n        np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial\n    z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32')\n    z2 = np.array(z1, dtype='float32')\n    z3 = np.array(z1, dtype='float64')\n    assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial\n    assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial\n    assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial\n    assert vectorized_is_trivial(z1, z2, 1) == trivial.c_trivial\n    assert vectorized_is_trivial(z1[::2, ::2], 1, 1) == trivial.non_trivial\n    assert vectorized_is_trivial(1, 1, z1[::2, ::2]) == trivial.c_trivial\n    assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial\n    assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial\n\n    y1 = np.array(z1, order='F')\n    y2 = np.array(y1)\n    y3 = np.array(y1)\n    assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial\n    assert vectorized_is_trivial(y1, 1, 1) == trivial.f_trivial\n    assert vectorized_is_trivial(1, y2, 1) == trivial.f_trivial\n    assert vectorized_is_trivial(1, 1, y3) == trivial.f_trivial\n    assert vectorized_is_trivial(y1, z2, 1) == trivial.non_trivial\n    assert vectorized_is_trivial(z1[1::4, 1::4], y2, 1) == trivial.f_trivial\n    assert vectorized_is_trivial(y1[1::4, 1::4], z2, 1) == trivial.c_trivial\n\n    assert m.vectorized_func(z1, z2, z3).flags.c_contiguous\n    assert m.vectorized_func(y1, y2, y3).flags.f_contiguous\n    assert m.vectorized_func(z1, 1, 1).flags.c_contiguous\n    assert m.vectorized_func(1, y2, 1).flags.f_contiguous\n    assert m.vectorized_func(z1[1::4, 1::4], y2, 1).flags.f_contiguous\n    assert m.vectorized_func(y1[1::4, 1::4], z2, 1).flags.c_contiguous\n\n\ndef test_passthrough_arguments(doc):\n    assert doc(m.vec_passthrough) == (\n        \"vec_passthrough(\" + \", \".join([\n            \"arg0: float\",\n            \"arg1: numpy.ndarray[float64]\",\n            \"arg2: numpy.ndarray[float64]\",\n            \"arg3: numpy.ndarray[int32]\",\n            \"arg4: int\",\n            \"arg5: m.numpy_vectorize.NonPODClass\",\n            \"arg6: numpy.ndarray[float64]\"]) + \") -> object\")\n\n    b = np.array([[10, 20, 30]], dtype='float64')\n    c = np.array([100, 200])  # NOT a vectorized argument\n    d = np.array([[1000], [2000], [3000]], dtype='int')\n    g = np.array([[1000000, 2000000, 3000000]], dtype='int')  # requires casting\n    assert np.all(\n        m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) ==\n        np.array([[1111111, 2111121, 3111131],\n                  [1112111, 2112121, 3112131],\n                  [1113111, 2113121, 3113131]]))\n\n\ndef test_method_vectorization():\n    o = m.VectorizeTestClass(3)\n    x = np.array([1, 2], dtype='int')\n    y = np.array([[10], [20]], dtype='float32')\n    assert np.all(o.method(x, y) == [[14, 15], [24, 25]])\n\n\ndef test_array_collapse():\n    assert not isinstance(m.vectorized_func(1, 2, 3), np.ndarray)\n    assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray)\n    z = m.vectorized_func([1], 2, 3)\n    assert isinstance(z, np.ndarray)\n    assert z.shape == (1, )\n    z = m.vectorized_func(1, [[[2]]], 3)\n    assert isinstance(z, np.ndarray)\n    assert z.shape == (1, 1, 1)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_opaque_types.cpp",
    "content": "/*\n    tests/test_opaque_types.cpp -- opaque types, passing void pointers\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/stl.h>\n#include <vector>\n\n// IMPORTANT: Disable internal pybind11 translation mechanisms for STL data structures\n//\n// This also deliberately doesn't use the below StringList type alias to test\n// that MAKE_OPAQUE can handle a type containing a `,`.  (The `std::allocator`\n// bit is just the default `std::vector` allocator).\nPYBIND11_MAKE_OPAQUE(std::vector<std::string, std::allocator<std::string>>);\n\nusing StringList = std::vector<std::string, std::allocator<std::string>>;\n\nTEST_SUBMODULE(opaque_types, m) {\n    // test_string_list\n    py::class_<StringList>(m, \"StringList\")\n        .def(py::init<>())\n        .def(\"pop_back\", &StringList::pop_back)\n        /* There are multiple versions of push_back(), etc. Select the right ones. */\n        .def(\"push_back\", (void (StringList::*)(const std::string &)) &StringList::push_back)\n        .def(\"back\", (std::string &(StringList::*)()) &StringList::back)\n        .def(\"__len__\", [](const StringList &v) { return v.size(); })\n        .def(\"__iter__\", [](StringList &v) {\n           return py::make_iterator(v.begin(), v.end());\n        }, py::keep_alive<0, 1>());\n\n    class ClassWithSTLVecProperty {\n    public:\n        StringList stringList;\n    };\n    py::class_<ClassWithSTLVecProperty>(m, \"ClassWithSTLVecProperty\")\n        .def(py::init<>())\n        .def_readwrite(\"stringList\", &ClassWithSTLVecProperty::stringList);\n\n    m.def(\"print_opaque_list\", [](const StringList &l) {\n        std::string ret = \"Opaque list: [\";\n        bool first = true;\n        for (auto entry : l) {\n            if (!first)\n                ret += \", \";\n            ret += entry;\n            first = false;\n        }\n        return ret + \"]\";\n    });\n\n    // test_pointers\n    m.def(\"return_void_ptr\", []() { return (void *) 0x1234; });\n    m.def(\"get_void_ptr_value\", [](void *ptr) { return reinterpret_cast<std::intptr_t>(ptr); });\n    m.def(\"return_null_str\", []() { return (char *) nullptr; });\n    m.def(\"get_null_str_value\", [](char *ptr) { return reinterpret_cast<std::intptr_t>(ptr); });\n\n    m.def(\"return_unique_ptr\", []() -> std::unique_ptr<StringList> {\n        StringList *result = new StringList();\n        result->push_back(\"some value\");\n        return std::unique_ptr<StringList>(result);\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_opaque_types.py",
    "content": "import pytest\nfrom pybind11_tests import opaque_types as m\nfrom pybind11_tests import ConstructorStats, UserType\n\n\ndef test_string_list():\n    lst = m.StringList()\n    lst.push_back(\"Element 1\")\n    lst.push_back(\"Element 2\")\n    assert m.print_opaque_list(lst) == \"Opaque list: [Element 1, Element 2]\"\n    assert lst.back() == \"Element 2\"\n\n    for i, k in enumerate(lst, start=1):\n        assert k == \"Element {}\".format(i)\n    lst.pop_back()\n    assert m.print_opaque_list(lst) == \"Opaque list: [Element 1]\"\n\n    cvp = m.ClassWithSTLVecProperty()\n    assert m.print_opaque_list(cvp.stringList) == \"Opaque list: []\"\n\n    cvp.stringList = lst\n    cvp.stringList.push_back(\"Element 3\")\n    assert m.print_opaque_list(cvp.stringList) == \"Opaque list: [Element 1, Element 3]\"\n\n\ndef test_pointers(msg):\n    living_before = ConstructorStats.get(UserType).alive()\n    assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234\n    assert m.get_void_ptr_value(UserType())  # Should also work for other C++ types\n    assert ConstructorStats.get(UserType).alive() == living_before\n\n    with pytest.raises(TypeError) as excinfo:\n        m.get_void_ptr_value([1, 2, 3])  # This should not work\n    assert msg(excinfo.value) == \"\"\"\n        get_void_ptr_value(): incompatible function arguments. The following argument types are supported:\n            1. (arg0: capsule) -> int\n\n        Invoked with: [1, 2, 3]\n    \"\"\"  # noqa: E501 line too long\n\n    assert m.return_null_str() is None\n    assert m.get_null_str_value(m.return_null_str()) is not None\n\n    ptr = m.return_unique_ptr()\n    assert \"StringList\" in repr(ptr)\n    assert m.print_opaque_list(ptr) == \"Opaque list: [some value]\"\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_operator_overloading.cpp",
    "content": "/*\n    tests/test_operator_overloading.cpp -- operator overloading\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/operators.h>\n#include <functional>\n\nclass Vector2 {\npublic:\n    Vector2(float x, float y) : x(x), y(y) { print_created(this, toString()); }\n    Vector2(const Vector2 &v) : x(v.x), y(v.y) { print_copy_created(this); }\n    Vector2(Vector2 &&v) : x(v.x), y(v.y) { print_move_created(this); v.x = v.y = 0; }\n    Vector2 &operator=(const Vector2 &v) { x = v.x; y = v.y; print_copy_assigned(this); return *this; }\n    Vector2 &operator=(Vector2 &&v) { x = v.x; y = v.y; v.x = v.y = 0; print_move_assigned(this); return *this; }\n    ~Vector2() { print_destroyed(this); }\n\n    std::string toString() const { return \"[\" + std::to_string(x) + \", \" + std::to_string(y) + \"]\"; }\n\n    Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); }\n    Vector2 operator-(const Vector2 &v) const { return Vector2(x - v.x, y - v.y); }\n    Vector2 operator-(float value) const { return Vector2(x - value, y - value); }\n    Vector2 operator+(float value) const { return Vector2(x + value, y + value); }\n    Vector2 operator*(float value) const { return Vector2(x * value, y * value); }\n    Vector2 operator/(float value) const { return Vector2(x / value, y / value); }\n    Vector2 operator*(const Vector2 &v) const { return Vector2(x * v.x, y * v.y); }\n    Vector2 operator/(const Vector2 &v) const { return Vector2(x / v.x, y / v.y); }\n    Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; }\n    Vector2& operator-=(const Vector2 &v) { x -= v.x; y -= v.y; return *this; }\n    Vector2& operator*=(float v) { x *= v; y *= v; return *this; }\n    Vector2& operator/=(float v) { x /= v; y /= v; return *this; }\n    Vector2& operator*=(const Vector2 &v) { x *= v.x; y *= v.y; return *this; }\n    Vector2& operator/=(const Vector2 &v) { x /= v.x; y /= v.y; return *this; }\n\n    friend Vector2 operator+(float f, const Vector2 &v) { return Vector2(f + v.x, f + v.y); }\n    friend Vector2 operator-(float f, const Vector2 &v) { return Vector2(f - v.x, f - v.y); }\n    friend Vector2 operator*(float f, const Vector2 &v) { return Vector2(f * v.x, f * v.y); }\n    friend Vector2 operator/(float f, const Vector2 &v) { return Vector2(f / v.x, f / v.y); }\nprivate:\n    float x, y;\n};\n\nclass C1 { };\nclass C2 { };\n\nint operator+(const C1 &, const C1 &) { return 11; }\nint operator+(const C2 &, const C2 &) { return 22; }\nint operator+(const C2 &, const C1 &) { return 21; }\nint operator+(const C1 &, const C2 &) { return 12; }\n\nnamespace std {\n    template<>\n    struct hash<Vector2> {\n        // Not a good hash function, but easy to test\n        size_t operator()(const Vector2 &) { return 4; }\n    };\n}\n\n// MSVC warns about unknown pragmas, and warnings are errors.\n#ifndef _MSC_VER\n  #pragma GCC diagnostic push\n  // clang 7.0.0 and Apple LLVM 10.0.1 introduce `-Wself-assign-overloaded` to\n  // `-Wall`, which is used here for overloading (e.g. `py::self += py::self `).\n  // Here, we suppress the warning using `#pragma diagnostic`.\n  // Taken from: https://github.com/RobotLocomotion/drake/commit/aaf84b46\n  // TODO(eric): This could be resolved using a function / functor (e.g. `py::self()`).\n  #if (__APPLE__) && (__clang__)\n    #if (__clang_major__ >= 10) && (__clang_minor__ >= 0) && (__clang_patchlevel__ >= 1)\n      #pragma GCC diagnostic ignored \"-Wself-assign-overloaded\"\n    #endif\n  #elif (__clang__)\n    #if (__clang_major__ >= 7)\n      #pragma GCC diagnostic ignored \"-Wself-assign-overloaded\"\n    #endif\n  #endif\n#endif\n\nTEST_SUBMODULE(operators, m) {\n\n    // test_operator_overloading\n    py::class_<Vector2>(m, \"Vector2\")\n        .def(py::init<float, float>())\n        .def(py::self + py::self)\n        .def(py::self + float())\n        .def(py::self - py::self)\n        .def(py::self - float())\n        .def(py::self * float())\n        .def(py::self / float())\n        .def(py::self * py::self)\n        .def(py::self / py::self)\n        .def(py::self += py::self)\n        .def(py::self -= py::self)\n        .def(py::self *= float())\n        .def(py::self /= float())\n        .def(py::self *= py::self)\n        .def(py::self /= py::self)\n        .def(float() + py::self)\n        .def(float() - py::self)\n        .def(float() * py::self)\n        .def(float() / py::self)\n        .def(\"__str__\", &Vector2::toString)\n        .def(hash(py::self))\n        ;\n\n    m.attr(\"Vector\") = m.attr(\"Vector2\");\n\n    // test_operators_notimplemented\n    // #393: need to return NotSupported to ensure correct arithmetic operator behavior\n    py::class_<C1>(m, \"C1\")\n        .def(py::init<>())\n        .def(py::self + py::self);\n\n    py::class_<C2>(m, \"C2\")\n        .def(py::init<>())\n        .def(py::self + py::self)\n        .def(\"__add__\", [](const C2& c2, const C1& c1) { return c2 + c1; })\n        .def(\"__radd__\", [](const C2& c2, const C1& c1) { return c1 + c2; });\n\n    // test_nested\n    // #328: first member in a class can't be used in operators\n    struct NestABase { int value = -2; };\n    py::class_<NestABase>(m, \"NestABase\")\n        .def(py::init<>())\n        .def_readwrite(\"value\", &NestABase::value);\n\n    struct NestA : NestABase {\n        int value = 3;\n        NestA& operator+=(int i) { value += i; return *this; }\n    };\n    py::class_<NestA>(m, \"NestA\")\n        .def(py::init<>())\n        .def(py::self += int())\n        .def(\"as_base\", [](NestA &a) -> NestABase& {\n            return (NestABase&) a;\n        }, py::return_value_policy::reference_internal);\n    m.def(\"get_NestA\", [](const NestA &a) { return a.value; });\n\n    struct NestB {\n        NestA a;\n        int value = 4;\n        NestB& operator-=(int i) { value -= i; return *this; }\n    };\n    py::class_<NestB>(m, \"NestB\")\n        .def(py::init<>())\n        .def(py::self -= int())\n        .def_readwrite(\"a\", &NestB::a);\n    m.def(\"get_NestB\", [](const NestB &b) { return b.value; });\n\n    struct NestC {\n        NestB b;\n        int value = 5;\n        NestC& operator*=(int i) { value *= i; return *this; }\n    };\n    py::class_<NestC>(m, \"NestC\")\n        .def(py::init<>())\n        .def(py::self *= int())\n        .def_readwrite(\"b\", &NestC::b);\n    m.def(\"get_NestC\", [](const NestC &c) { return c.value; });\n}\n\n#ifndef _MSC_VER\n  #pragma GCC diagnostic pop\n#endif\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_operator_overloading.py",
    "content": "import pytest\nfrom pybind11_tests import operators as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_operator_overloading():\n    v1 = m.Vector2(1, 2)\n    v2 = m.Vector(3, -1)\n    assert str(v1) == \"[1.000000, 2.000000]\"\n    assert str(v2) == \"[3.000000, -1.000000]\"\n\n    assert str(v1 + v2) == \"[4.000000, 1.000000]\"\n    assert str(v1 - v2) == \"[-2.000000, 3.000000]\"\n    assert str(v1 - 8) == \"[-7.000000, -6.000000]\"\n    assert str(v1 + 8) == \"[9.000000, 10.000000]\"\n    assert str(v1 * 8) == \"[8.000000, 16.000000]\"\n    assert str(v1 / 8) == \"[0.125000, 0.250000]\"\n    assert str(8 - v1) == \"[7.000000, 6.000000]\"\n    assert str(8 + v1) == \"[9.000000, 10.000000]\"\n    assert str(8 * v1) == \"[8.000000, 16.000000]\"\n    assert str(8 / v1) == \"[8.000000, 4.000000]\"\n    assert str(v1 * v2) == \"[3.000000, -2.000000]\"\n    assert str(v2 / v1) == \"[3.000000, -0.500000]\"\n\n    v1 += 2 * v2\n    assert str(v1) == \"[7.000000, 0.000000]\"\n    v1 -= v2\n    assert str(v1) == \"[4.000000, 1.000000]\"\n    v1 *= 2\n    assert str(v1) == \"[8.000000, 2.000000]\"\n    v1 /= 16\n    assert str(v1) == \"[0.500000, 0.125000]\"\n    v1 *= v2\n    assert str(v1) == \"[1.500000, -0.125000]\"\n    v2 /= v1\n    assert str(v2) == \"[2.000000, 8.000000]\"\n\n    assert hash(v1) == 4\n\n    cstats = ConstructorStats.get(m.Vector2)\n    assert cstats.alive() == 2\n    del v1\n    assert cstats.alive() == 1\n    del v2\n    assert cstats.alive() == 0\n    assert cstats.values() == ['[1.000000, 2.000000]', '[3.000000, -1.000000]',\n                               '[4.000000, 1.000000]', '[-2.000000, 3.000000]',\n                               '[-7.000000, -6.000000]', '[9.000000, 10.000000]',\n                               '[8.000000, 16.000000]', '[0.125000, 0.250000]',\n                               '[7.000000, 6.000000]', '[9.000000, 10.000000]',\n                               '[8.000000, 16.000000]', '[8.000000, 4.000000]',\n                               '[3.000000, -2.000000]', '[3.000000, -0.500000]',\n                               '[6.000000, -2.000000]']\n    assert cstats.default_constructions == 0\n    assert cstats.copy_constructions == 0\n    assert cstats.move_constructions >= 10\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n\ndef test_operators_notimplemented():\n    \"\"\"#393: need to return NotSupported to ensure correct arithmetic operator behavior\"\"\"\n\n    c1, c2 = m.C1(), m.C2()\n    assert c1 + c1 == 11\n    assert c2 + c2 == 22\n    assert c2 + c1 == 21\n    assert c1 + c2 == 12\n\n\ndef test_nested():\n    \"\"\"#328: first member in a class can't be used in operators\"\"\"\n\n    a = m.NestA()\n    b = m.NestB()\n    c = m.NestC()\n\n    a += 10\n    assert m.get_NestA(a) == 13\n    b.a += 100\n    assert m.get_NestA(b.a) == 103\n    c.b.a += 1000\n    assert m.get_NestA(c.b.a) == 1003\n    b -= 1\n    assert m.get_NestB(b) == 3\n    c.b -= 3\n    assert m.get_NestB(c.b) == 1\n    c *= 7\n    assert m.get_NestC(c) == 35\n\n    abase = a.as_base()\n    assert abase.value == -2\n    a.as_base().value += 44\n    assert abase.value == 42\n    assert c.b.a.as_base().value == -2\n    c.b.a.as_base().value += 44\n    assert c.b.a.as_base().value == 42\n\n    del c\n    pytest.gc_collect()\n    del a  # Shouldn't delete while abase is still alive\n    pytest.gc_collect()\n\n    assert abase.value == 42\n    del abase, b\n    pytest.gc_collect()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_pickling.cpp",
    "content": "/*\n    tests/test_pickling.cpp -- pickle support\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\nTEST_SUBMODULE(pickling, m) {\n    // test_roundtrip\n    class Pickleable {\n    public:\n        Pickleable(const std::string &value) : m_value(value) { }\n        const std::string &value() const { return m_value; }\n\n        void setExtra1(int extra1) { m_extra1 = extra1; }\n        void setExtra2(int extra2) { m_extra2 = extra2; }\n        int extra1() const { return m_extra1; }\n        int extra2() const { return m_extra2; }\n    private:\n        std::string m_value;\n        int m_extra1 = 0;\n        int m_extra2 = 0;\n    };\n\n    class PickleableNew : public Pickleable {\n    public:\n        using Pickleable::Pickleable;\n    };\n\n    py::class_<Pickleable>(m, \"Pickleable\")\n        .def(py::init<std::string>())\n        .def(\"value\", &Pickleable::value)\n        .def(\"extra1\", &Pickleable::extra1)\n        .def(\"extra2\", &Pickleable::extra2)\n        .def(\"setExtra1\", &Pickleable::setExtra1)\n        .def(\"setExtra2\", &Pickleable::setExtra2)\n        // For details on the methods below, refer to\n        // http://docs.python.org/3/library/pickle.html#pickling-class-instances\n        .def(\"__getstate__\", [](const Pickleable &p) {\n            /* Return a tuple that fully encodes the state of the object */\n            return py::make_tuple(p.value(), p.extra1(), p.extra2());\n        })\n        .def(\"__setstate__\", [](Pickleable &p, py::tuple t) {\n            if (t.size() != 3)\n                throw std::runtime_error(\"Invalid state!\");\n            /* Invoke the constructor (need to use in-place version) */\n            new (&p) Pickleable(t[0].cast<std::string>());\n\n            /* Assign any additional state */\n            p.setExtra1(t[1].cast<int>());\n            p.setExtra2(t[2].cast<int>());\n        });\n\n    py::class_<PickleableNew, Pickleable>(m, \"PickleableNew\")\n        .def(py::init<std::string>())\n        .def(py::pickle(\n            [](const PickleableNew &p) {\n                return py::make_tuple(p.value(), p.extra1(), p.extra2());\n            },\n            [](py::tuple t) {\n                if (t.size() != 3)\n                    throw std::runtime_error(\"Invalid state!\");\n                auto p = PickleableNew(t[0].cast<std::string>());\n\n                p.setExtra1(t[1].cast<int>());\n                p.setExtra2(t[2].cast<int>());\n                return p;\n            }\n        ));\n\n#if !defined(PYPY_VERSION)\n    // test_roundtrip_with_dict\n    class PickleableWithDict {\n    public:\n        PickleableWithDict(const std::string &value) : value(value) { }\n\n        std::string value;\n        int extra;\n    };\n\n    class PickleableWithDictNew : public PickleableWithDict {\n    public:\n        using PickleableWithDict::PickleableWithDict;\n    };\n\n    py::class_<PickleableWithDict>(m, \"PickleableWithDict\", py::dynamic_attr())\n        .def(py::init<std::string>())\n        .def_readwrite(\"value\", &PickleableWithDict::value)\n        .def_readwrite(\"extra\", &PickleableWithDict::extra)\n        .def(\"__getstate__\", [](py::object self) {\n            /* Also include __dict__ in state */\n            return py::make_tuple(self.attr(\"value\"), self.attr(\"extra\"), self.attr(\"__dict__\"));\n        })\n        .def(\"__setstate__\", [](py::object self, py::tuple t) {\n            if (t.size() != 3)\n                throw std::runtime_error(\"Invalid state!\");\n            /* Cast and construct */\n            auto& p = self.cast<PickleableWithDict&>();\n            new (&p) PickleableWithDict(t[0].cast<std::string>());\n\n            /* Assign C++ state */\n            p.extra = t[1].cast<int>();\n\n            /* Assign Python state */\n            self.attr(\"__dict__\") = t[2];\n        });\n\n    py::class_<PickleableWithDictNew, PickleableWithDict>(m, \"PickleableWithDictNew\")\n        .def(py::init<std::string>())\n        .def(py::pickle(\n            [](py::object self) {\n                return py::make_tuple(self.attr(\"value\"), self.attr(\"extra\"), self.attr(\"__dict__\"));\n            },\n            [](const py::tuple &t) {\n                if (t.size() != 3)\n                    throw std::runtime_error(\"Invalid state!\");\n\n                auto cpp_state = PickleableWithDictNew(t[0].cast<std::string>());\n                cpp_state.extra = t[1].cast<int>();\n\n                auto py_state = t[2].cast<py::dict>();\n                return std::make_pair(cpp_state, py_state);\n            }\n        ));\n#endif\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_pickling.py",
    "content": "import pytest\nfrom pybind11_tests import pickling as m\n\ntry:\n    import cPickle as pickle  # Use cPickle on Python 2.7\nexcept ImportError:\n    import pickle\n\n\n@pytest.mark.parametrize(\"cls_name\", [\"Pickleable\", \"PickleableNew\"])\ndef test_roundtrip(cls_name):\n    cls = getattr(m, cls_name)\n    p = cls(\"test_value\")\n    p.setExtra1(15)\n    p.setExtra2(48)\n\n    data = pickle.dumps(p, 2)  # Must use pickle protocol >= 2\n    p2 = pickle.loads(data)\n    assert p2.value() == p.value()\n    assert p2.extra1() == p.extra1()\n    assert p2.extra2() == p.extra2()\n\n\n@pytest.unsupported_on_pypy\n@pytest.mark.parametrize(\"cls_name\", [\"PickleableWithDict\", \"PickleableWithDictNew\"])\ndef test_roundtrip_with_dict(cls_name):\n    cls = getattr(m, cls_name)\n    p = cls(\"test_value\")\n    p.extra = 15\n    p.dynamic = \"Attribute\"\n\n    data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)\n    p2 = pickle.loads(data)\n    assert p2.value == p.value\n    assert p2.extra == p.extra\n    assert p2.dynamic == p.dynamic\n\n\ndef test_enum_pickle():\n    from pybind11_tests import enums as e\n    data = pickle.dumps(e.EOne, 2)\n    assert e.EOne == pickle.loads(data)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_pytypes.cpp",
    "content": "/*\n    tests/test_pytypes.cpp -- Python type casters\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\n\nTEST_SUBMODULE(pytypes, m) {\n    // test_list\n    m.def(\"get_list\", []() {\n        py::list list;\n        list.append(\"value\");\n        py::print(\"Entry at position 0:\", list[0]);\n        list[0] = py::str(\"overwritten\");\n        return list;\n    });\n    m.def(\"print_list\", [](py::list list) {\n        int index = 0;\n        for (auto item : list)\n            py::print(\"list item {}: {}\"_s.format(index++, item));\n    });\n\n    // test_set\n    m.def(\"get_set\", []() {\n        py::set set;\n        set.add(py::str(\"key1\"));\n        set.add(\"key2\");\n        set.add(std::string(\"key3\"));\n        return set;\n    });\n    m.def(\"print_set\", [](py::set set) {\n        for (auto item : set)\n            py::print(\"key:\", item);\n    });\n\n    // test_dict\n    m.def(\"get_dict\", []() { return py::dict(\"key\"_a=\"value\"); });\n    m.def(\"print_dict\", [](py::dict dict) {\n        for (auto item : dict)\n            py::print(\"key: {}, value={}\"_s.format(item.first, item.second));\n    });\n    m.def(\"dict_keyword_constructor\", []() {\n        auto d1 = py::dict(\"x\"_a=1, \"y\"_a=2);\n        auto d2 = py::dict(\"z\"_a=3, **d1);\n        return d2;\n    });\n\n    // test_str\n    m.def(\"str_from_string\", []() { return py::str(std::string(\"baz\")); });\n    m.def(\"str_from_bytes\", []() { return py::str(py::bytes(\"boo\", 3)); });\n    m.def(\"str_from_object\", [](const py::object& obj) { return py::str(obj); });\n    m.def(\"repr_from_object\", [](const py::object& obj) { return py::repr(obj); });\n\n    m.def(\"str_format\", []() {\n        auto s1 = \"{} + {} = {}\"_s.format(1, 2, 3);\n        auto s2 = \"{a} + {b} = {c}\"_s.format(\"a\"_a=1, \"b\"_a=2, \"c\"_a=3);\n        return py::make_tuple(s1, s2);\n    });\n\n    // test_bytes\n    m.def(\"bytes_from_string\", []() { return py::bytes(std::string(\"foo\")); });\n    m.def(\"bytes_from_str\", []() { return py::bytes(py::str(\"bar\", 3)); });\n\n    // test_capsule\n    m.def(\"return_capsule_with_destructor\", []() {\n        py::print(\"creating capsule\");\n        return py::capsule([]() {\n            py::print(\"destructing capsule\");\n        });\n    });\n\n    m.def(\"return_capsule_with_destructor_2\", []() {\n        py::print(\"creating capsule\");\n        return py::capsule((void *) 1234, [](void *ptr) {\n            py::print(\"destructing capsule: {}\"_s.format((size_t) ptr));\n        });\n    });\n\n    m.def(\"return_capsule_with_name_and_destructor\", []() {\n        auto capsule = py::capsule((void *) 1234, \"pointer type description\", [](PyObject *ptr) {\n            if (ptr) {\n                auto name = PyCapsule_GetName(ptr);\n                py::print(\"destructing capsule ({}, '{}')\"_s.format(\n                    (size_t) PyCapsule_GetPointer(ptr, name), name\n                ));\n            }\n        });\n        void *contents = capsule;\n        py::print(\"created capsule ({}, '{}')\"_s.format((size_t) contents, capsule.name()));\n        return capsule;\n    });\n\n    // test_accessors\n    m.def(\"accessor_api\", [](py::object o) {\n        auto d = py::dict();\n\n        d[\"basic_attr\"] = o.attr(\"basic_attr\");\n\n        auto l = py::list();\n        for (const auto &item : o.attr(\"begin_end\")) {\n            l.append(item);\n        }\n        d[\"begin_end\"] = l;\n\n        d[\"operator[object]\"] = o.attr(\"d\")[\"operator[object]\"_s];\n        d[\"operator[char *]\"] = o.attr(\"d\")[\"operator[char *]\"];\n\n        d[\"attr(object)\"] = o.attr(\"sub\").attr(\"attr_obj\");\n        d[\"attr(char *)\"] = o.attr(\"sub\").attr(\"attr_char\");\n        try {\n            o.attr(\"sub\").attr(\"missing\").ptr();\n        } catch (const py::error_already_set &) {\n            d[\"missing_attr_ptr\"] = \"raised\"_s;\n        }\n        try {\n            o.attr(\"missing\").attr(\"doesn't matter\");\n        } catch (const py::error_already_set &) {\n            d[\"missing_attr_chain\"] = \"raised\"_s;\n        }\n\n        d[\"is_none\"] = o.attr(\"basic_attr\").is_none();\n\n        d[\"operator()\"] = o.attr(\"func\")(1);\n        d[\"operator*\"] = o.attr(\"func\")(*o.attr(\"begin_end\"));\n\n        // Test implicit conversion\n        py::list implicit_list = o.attr(\"begin_end\");\n        d[\"implicit_list\"] = implicit_list;\n        py::dict implicit_dict = o.attr(\"__dict__\");\n        d[\"implicit_dict\"] = implicit_dict;\n\n        return d;\n    });\n\n    m.def(\"tuple_accessor\", [](py::tuple existing_t) {\n        try {\n            existing_t[0] = 1;\n        } catch (const py::error_already_set &) {\n            // --> Python system error\n            // Only new tuples (refcount == 1) are mutable\n            auto new_t = py::tuple(3);\n            for (size_t i = 0; i < new_t.size(); ++i) {\n                new_t[i] = i;\n            }\n            return new_t;\n        }\n        return py::tuple();\n    });\n\n    m.def(\"accessor_assignment\", []() {\n        auto l = py::list(1);\n        l[0] = 0;\n\n        auto d = py::dict();\n        d[\"get\"] = l[0];\n        auto var = l[0];\n        d[\"deferred_get\"] = var;\n        l[0] = 1;\n        d[\"set\"] = l[0];\n        var = 99; // this assignment should not overwrite l[0]\n        d[\"deferred_set\"] = l[0];\n        d[\"var\"] = var;\n\n        return d;\n    });\n\n    // test_constructors\n    m.def(\"default_constructors\", []() {\n        return py::dict(\n            \"str\"_a=py::str(),\n            \"bool\"_a=py::bool_(),\n            \"int\"_a=py::int_(),\n            \"float\"_a=py::float_(),\n            \"tuple\"_a=py::tuple(),\n            \"list\"_a=py::list(),\n            \"dict\"_a=py::dict(),\n            \"set\"_a=py::set()\n        );\n    });\n\n    m.def(\"converting_constructors\", [](py::dict d) {\n        return py::dict(\n            \"str\"_a=py::str(d[\"str\"]),\n            \"bool\"_a=py::bool_(d[\"bool\"]),\n            \"int\"_a=py::int_(d[\"int\"]),\n            \"float\"_a=py::float_(d[\"float\"]),\n            \"tuple\"_a=py::tuple(d[\"tuple\"]),\n            \"list\"_a=py::list(d[\"list\"]),\n            \"dict\"_a=py::dict(d[\"dict\"]),\n            \"set\"_a=py::set(d[\"set\"]),\n            \"memoryview\"_a=py::memoryview(d[\"memoryview\"])\n        );\n    });\n\n    m.def(\"cast_functions\", [](py::dict d) {\n        // When converting between Python types, obj.cast<T>() should be the same as T(obj)\n        return py::dict(\n            \"str\"_a=d[\"str\"].cast<py::str>(),\n            \"bool\"_a=d[\"bool\"].cast<py::bool_>(),\n            \"int\"_a=d[\"int\"].cast<py::int_>(),\n            \"float\"_a=d[\"float\"].cast<py::float_>(),\n            \"tuple\"_a=d[\"tuple\"].cast<py::tuple>(),\n            \"list\"_a=d[\"list\"].cast<py::list>(),\n            \"dict\"_a=d[\"dict\"].cast<py::dict>(),\n            \"set\"_a=d[\"set\"].cast<py::set>(),\n            \"memoryview\"_a=d[\"memoryview\"].cast<py::memoryview>()\n        );\n    });\n\n    m.def(\"get_implicit_casting\", []() {\n        py::dict d;\n        d[\"char*_i1\"] = \"abc\";\n        const char *c2 = \"abc\";\n        d[\"char*_i2\"] = c2;\n        d[\"char*_e\"] = py::cast(c2);\n        d[\"char*_p\"] = py::str(c2);\n\n        d[\"int_i1\"] = 42;\n        int i = 42;\n        d[\"int_i2\"] = i;\n        i++;\n        d[\"int_e\"] = py::cast(i);\n        i++;\n        d[\"int_p\"] = py::int_(i);\n\n        d[\"str_i1\"] = std::string(\"str\");\n        std::string s2(\"str1\");\n        d[\"str_i2\"] = s2;\n        s2[3] = '2';\n        d[\"str_e\"] = py::cast(s2);\n        s2[3] = '3';\n        d[\"str_p\"] = py::str(s2);\n\n        py::list l(2);\n        l[0] = 3;\n        l[1] = py::cast(6);\n        l.append(9);\n        l.append(py::cast(12));\n        l.append(py::int_(15));\n\n        return py::dict(\n            \"d\"_a=d,\n            \"l\"_a=l\n        );\n    });\n\n    // test_print\n    m.def(\"print_function\", []() {\n        py::print(\"Hello, World!\");\n        py::print(1, 2.0, \"three\", true, std::string(\"-- multiple args\"));\n        auto args = py::make_tuple(\"and\", \"a\", \"custom\", \"separator\");\n        py::print(\"*args\", *args, \"sep\"_a=\"-\");\n        py::print(\"no new line here\", \"end\"_a=\" -- \");\n        py::print(\"next print\");\n\n        auto py_stderr = py::module::import(\"sys\").attr(\"stderr\");\n        py::print(\"this goes to stderr\", \"file\"_a=py_stderr);\n\n        py::print(\"flush\", \"flush\"_a=true);\n\n        py::print(\"{a} + {b} = {c}\"_s.format(\"a\"_a=\"py::print\", \"b\"_a=\"str.format\", \"c\"_a=\"this\"));\n    });\n\n    m.def(\"print_failure\", []() { py::print(42, UnregisteredType()); });\n\n    m.def(\"hash_function\", [](py::object obj) { return py::hash(obj); });\n\n    m.def(\"test_number_protocol\", [](py::object a, py::object b) {\n        py::list l;\n        l.append(a.equal(b));\n        l.append(a.not_equal(b));\n        l.append(a < b);\n        l.append(a <= b);\n        l.append(a > b);\n        l.append(a >= b);\n        l.append(a + b);\n        l.append(a - b);\n        l.append(a * b);\n        l.append(a / b);\n        l.append(a | b);\n        l.append(a & b);\n        l.append(a ^ b);\n        l.append(a >> b);\n        l.append(a << b);\n        return l;\n    });\n\n    m.def(\"test_list_slicing\", [](py::list a) {\n        return a[py::slice(0, -1, 2)];\n    });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_pytypes.py",
    "content": "from __future__ import division\nimport pytest\nimport sys\n\nfrom pybind11_tests import pytypes as m\nfrom pybind11_tests import debug_enabled\n\n\ndef test_list(capture, doc):\n    with capture:\n        lst = m.get_list()\n        assert lst == [\"overwritten\"]\n\n        lst.append(\"value2\")\n        m.print_list(lst)\n    assert capture.unordered == \"\"\"\n        Entry at position 0: value\n        list item 0: overwritten\n        list item 1: value2\n    \"\"\"\n\n    assert doc(m.get_list) == \"get_list() -> list\"\n    assert doc(m.print_list) == \"print_list(arg0: list) -> None\"\n\n\ndef test_set(capture, doc):\n    s = m.get_set()\n    assert s == {\"key1\", \"key2\", \"key3\"}\n\n    with capture:\n        s.add(\"key4\")\n        m.print_set(s)\n    assert capture.unordered == \"\"\"\n        key: key1\n        key: key2\n        key: key3\n        key: key4\n    \"\"\"\n\n    assert doc(m.get_list) == \"get_list() -> list\"\n    assert doc(m.print_list) == \"print_list(arg0: list) -> None\"\n\n\ndef test_dict(capture, doc):\n    d = m.get_dict()\n    assert d == {\"key\": \"value\"}\n\n    with capture:\n        d[\"key2\"] = \"value2\"\n        m.print_dict(d)\n    assert capture.unordered == \"\"\"\n        key: key, value=value\n        key: key2, value=value2\n    \"\"\"\n\n    assert doc(m.get_dict) == \"get_dict() -> dict\"\n    assert doc(m.print_dict) == \"print_dict(arg0: dict) -> None\"\n\n    assert m.dict_keyword_constructor() == {\"x\": 1, \"y\": 2, \"z\": 3}\n\n\ndef test_str(doc):\n    assert m.str_from_string().encode().decode() == \"baz\"\n    assert m.str_from_bytes().encode().decode() == \"boo\"\n\n    assert doc(m.str_from_bytes) == \"str_from_bytes() -> str\"\n\n    class A(object):\n        def __str__(self):\n            return \"this is a str\"\n\n        def __repr__(self):\n            return \"this is a repr\"\n\n    assert m.str_from_object(A()) == \"this is a str\"\n    assert m.repr_from_object(A()) == \"this is a repr\"\n\n    s1, s2 = m.str_format()\n    assert s1 == \"1 + 2 = 3\"\n    assert s1 == s2\n\n\ndef test_bytes(doc):\n    assert m.bytes_from_string().decode() == \"foo\"\n    assert m.bytes_from_str().decode() == \"bar\"\n\n    assert doc(m.bytes_from_str) == \"bytes_from_str() -> {}\".format(\n        \"bytes\" if sys.version_info[0] == 3 else \"str\"\n    )\n\n\ndef test_capsule(capture):\n    pytest.gc_collect()\n    with capture:\n        a = m.return_capsule_with_destructor()\n        del a\n        pytest.gc_collect()\n    assert capture.unordered == \"\"\"\n        creating capsule\n        destructing capsule\n    \"\"\"\n\n    with capture:\n        a = m.return_capsule_with_destructor_2()\n        del a\n        pytest.gc_collect()\n    assert capture.unordered == \"\"\"\n        creating capsule\n        destructing capsule: 1234\n    \"\"\"\n\n    with capture:\n        a = m.return_capsule_with_name_and_destructor()\n        del a\n        pytest.gc_collect()\n    assert capture.unordered == \"\"\"\n        created capsule (1234, 'pointer type description')\n        destructing capsule (1234, 'pointer type description')\n    \"\"\"\n\n\ndef test_accessors():\n    class SubTestObject:\n        attr_obj = 1\n        attr_char = 2\n\n    class TestObject:\n        basic_attr = 1\n        begin_end = [1, 2, 3]\n        d = {\"operator[object]\": 1, \"operator[char *]\": 2}\n        sub = SubTestObject()\n\n        def func(self, x, *args):\n            return self.basic_attr + x + sum(args)\n\n    d = m.accessor_api(TestObject())\n    assert d[\"basic_attr\"] == 1\n    assert d[\"begin_end\"] == [1, 2, 3]\n    assert d[\"operator[object]\"] == 1\n    assert d[\"operator[char *]\"] == 2\n    assert d[\"attr(object)\"] == 1\n    assert d[\"attr(char *)\"] == 2\n    assert d[\"missing_attr_ptr\"] == \"raised\"\n    assert d[\"missing_attr_chain\"] == \"raised\"\n    assert d[\"is_none\"] is False\n    assert d[\"operator()\"] == 2\n    assert d[\"operator*\"] == 7\n    assert d[\"implicit_list\"] == [1, 2, 3]\n    assert all(x in TestObject.__dict__ for x in d[\"implicit_dict\"])\n\n    assert m.tuple_accessor(tuple()) == (0, 1, 2)\n\n    d = m.accessor_assignment()\n    assert d[\"get\"] == 0\n    assert d[\"deferred_get\"] == 0\n    assert d[\"set\"] == 1\n    assert d[\"deferred_set\"] == 1\n    assert d[\"var\"] == 99\n\n\ndef test_constructors():\n    \"\"\"C++ default and converting constructors are equivalent to type calls in Python\"\"\"\n    types = [str, bool, int, float, tuple, list, dict, set]\n    expected = {t.__name__: t() for t in types}\n    assert m.default_constructors() == expected\n\n    data = {\n        str: 42,\n        bool: \"Not empty\",\n        int: \"42\",\n        float: \"+1e3\",\n        tuple: range(3),\n        list: range(3),\n        dict: [(\"two\", 2), (\"one\", 1), (\"three\", 3)],\n        set: [4, 4, 5, 6, 6, 6],\n        memoryview: b'abc'\n    }\n    inputs = {k.__name__: v for k, v in data.items()}\n    expected = {k.__name__: k(v) for k, v in data.items()}\n\n    assert m.converting_constructors(inputs) == expected\n    assert m.cast_functions(inputs) == expected\n\n    # Converting constructors and cast functions should just reference rather\n    # than copy when no conversion is needed:\n    noconv1 = m.converting_constructors(expected)\n    for k in noconv1:\n        assert noconv1[k] is expected[k]\n\n    noconv2 = m.cast_functions(expected)\n    for k in noconv2:\n        assert noconv2[k] is expected[k]\n\n\ndef test_implicit_casting():\n    \"\"\"Tests implicit casting when assigning or appending to dicts and lists.\"\"\"\n    z = m.get_implicit_casting()\n    assert z['d'] == {\n        'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',\n        'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',\n        'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44\n    }\n    assert z['l'] == [3, 6, 9, 12, 15]\n\n\ndef test_print(capture):\n    with capture:\n        m.print_function()\n    assert capture == \"\"\"\n        Hello, World!\n        1 2.0 three True -- multiple args\n        *args-and-a-custom-separator\n        no new line here -- next print\n        flush\n        py::print + str.format = this\n    \"\"\"\n    assert capture.stderr == \"this goes to stderr\"\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.print_failure()\n    assert str(excinfo.value) == \"make_tuple(): unable to convert \" + (\n        \"argument of type 'UnregisteredType' to Python object\"\n        if debug_enabled else\n        \"arguments to Python object (compile in debug mode for details)\"\n    )\n\n\ndef test_hash():\n    class Hashable(object):\n        def __init__(self, value):\n            self.value = value\n\n        def __hash__(self):\n            return self.value\n\n    class Unhashable(object):\n        __hash__ = None\n\n    assert m.hash_function(Hashable(42)) == 42\n    with pytest.raises(TypeError):\n        m.hash_function(Unhashable())\n\n\ndef test_number_protocol():\n    for a, b in [(1, 1), (3, 5)]:\n        li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b,\n              a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b]\n        assert m.test_number_protocol(a, b) == li\n\n\ndef test_list_slicing():\n    li = list(range(100))\n    assert li[::2] == m.test_list_slicing(li)\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_sequences_and_iterators.cpp",
    "content": "/*\n    tests/test_sequences_and_iterators.cpp -- supporting Pythons' sequence protocol, iterators,\n    etc.\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/operators.h>\n#include <pybind11/stl.h>\n\ntemplate<typename T>\nclass NonZeroIterator {\n    const T* ptr_;\npublic:\n    NonZeroIterator(const T* ptr) : ptr_(ptr) {}\n    const T& operator*() const { return *ptr_; }\n    NonZeroIterator& operator++() { ++ptr_; return *this; }\n};\n\nclass NonZeroSentinel {};\n\ntemplate<typename A, typename B>\nbool operator==(const NonZeroIterator<std::pair<A, B>>& it, const NonZeroSentinel&) {\n    return !(*it).first || !(*it).second;\n}\n\ntemplate <typename PythonType>\npy::list test_random_access_iterator(PythonType x) {\n    if (x.size() < 5)\n        throw py::value_error(\"Please provide at least 5 elements for testing.\");\n\n    auto checks = py::list();\n    auto assert_equal = [&checks](py::handle a, py::handle b) {\n        auto result = PyObject_RichCompareBool(a.ptr(), b.ptr(), Py_EQ);\n        if (result == -1) { throw py::error_already_set(); }\n        checks.append(result != 0);\n    };\n\n    auto it = x.begin();\n    assert_equal(x[0], *it);\n    assert_equal(x[0], it[0]);\n    assert_equal(x[1], it[1]);\n\n    assert_equal(x[1], *(++it));\n    assert_equal(x[1], *(it++));\n    assert_equal(x[2], *it);\n    assert_equal(x[3], *(it += 1));\n    assert_equal(x[2], *(--it));\n    assert_equal(x[2], *(it--));\n    assert_equal(x[1], *it);\n    assert_equal(x[0], *(it -= 1));\n\n    assert_equal(it->attr(\"real\"), x[0].attr(\"real\"));\n    assert_equal((it + 1)->attr(\"real\"), x[1].attr(\"real\"));\n\n    assert_equal(x[1], *(it + 1));\n    assert_equal(x[1], *(1 + it));\n    it += 3;\n    assert_equal(x[1], *(it - 2));\n\n    checks.append(static_cast<std::size_t>(x.end() - x.begin()) == x.size());\n    checks.append((x.begin() + static_cast<std::ptrdiff_t>(x.size())) == x.end());\n    checks.append(x.begin() < x.end());\n\n    return checks;\n}\n\nTEST_SUBMODULE(sequences_and_iterators, m) {\n\n    // test_sequence\n    class Sequence {\n    public:\n        Sequence(size_t size) : m_size(size) {\n            print_created(this, \"of size\", m_size);\n            m_data = new float[size];\n            memset(m_data, 0, sizeof(float) * size);\n        }\n        Sequence(const std::vector<float> &value) : m_size(value.size()) {\n            print_created(this, \"of size\", m_size, \"from std::vector\");\n            m_data = new float[m_size];\n            memcpy(m_data, &value[0], sizeof(float) * m_size);\n        }\n        Sequence(const Sequence &s) : m_size(s.m_size) {\n            print_copy_created(this);\n            m_data = new float[m_size];\n            memcpy(m_data, s.m_data, sizeof(float)*m_size);\n        }\n        Sequence(Sequence &&s) : m_size(s.m_size), m_data(s.m_data) {\n            print_move_created(this);\n            s.m_size = 0;\n            s.m_data = nullptr;\n        }\n\n        ~Sequence() { print_destroyed(this); delete[] m_data; }\n\n        Sequence &operator=(const Sequence &s) {\n            if (&s != this) {\n                delete[] m_data;\n                m_size = s.m_size;\n                m_data = new float[m_size];\n                memcpy(m_data, s.m_data, sizeof(float)*m_size);\n            }\n            print_copy_assigned(this);\n            return *this;\n        }\n\n        Sequence &operator=(Sequence &&s) {\n            if (&s != this) {\n                delete[] m_data;\n                m_size = s.m_size;\n                m_data = s.m_data;\n                s.m_size = 0;\n                s.m_data = nullptr;\n            }\n            print_move_assigned(this);\n            return *this;\n        }\n\n        bool operator==(const Sequence &s) const {\n            if (m_size != s.size()) return false;\n            for (size_t i = 0; i < m_size; ++i)\n                if (m_data[i] != s[i])\n                    return false;\n            return true;\n        }\n        bool operator!=(const Sequence &s) const { return !operator==(s); }\n\n        float operator[](size_t index) const { return m_data[index]; }\n        float &operator[](size_t index) { return m_data[index]; }\n\n        bool contains(float v) const {\n            for (size_t i = 0; i < m_size; ++i)\n                if (v == m_data[i])\n                    return true;\n            return false;\n        }\n\n        Sequence reversed() const {\n            Sequence result(m_size);\n            for (size_t i = 0; i < m_size; ++i)\n                result[m_size - i - 1] = m_data[i];\n            return result;\n        }\n\n        size_t size() const { return m_size; }\n\n        const float *begin() const { return m_data; }\n        const float *end() const { return m_data+m_size; }\n\n    private:\n        size_t m_size;\n        float *m_data;\n    };\n    py::class_<Sequence>(m, \"Sequence\")\n        .def(py::init<size_t>())\n        .def(py::init<const std::vector<float>&>())\n        /// Bare bones interface\n        .def(\"__getitem__\", [](const Sequence &s, size_t i) {\n            if (i >= s.size()) throw py::index_error();\n            return s[i];\n        })\n        .def(\"__setitem__\", [](Sequence &s, size_t i, float v) {\n            if (i >= s.size()) throw py::index_error();\n            s[i] = v;\n        })\n        .def(\"__len__\", &Sequence::size)\n        /// Optional sequence protocol operations\n        .def(\"__iter__\", [](const Sequence &s) { return py::make_iterator(s.begin(), s.end()); },\n                         py::keep_alive<0, 1>() /* Essential: keep object alive while iterator exists */)\n        .def(\"__contains__\", [](const Sequence &s, float v) { return s.contains(v); })\n        .def(\"__reversed__\", [](const Sequence &s) -> Sequence { return s.reversed(); })\n        /// Slicing protocol (optional)\n        .def(\"__getitem__\", [](const Sequence &s, py::slice slice) -> Sequence* {\n            size_t start, stop, step, slicelength;\n            if (!slice.compute(s.size(), &start, &stop, &step, &slicelength))\n                throw py::error_already_set();\n            Sequence *seq = new Sequence(slicelength);\n            for (size_t i = 0; i < slicelength; ++i) {\n                (*seq)[i] = s[start]; start += step;\n            }\n            return seq;\n        })\n        .def(\"__setitem__\", [](Sequence &s, py::slice slice, const Sequence &value) {\n            size_t start, stop, step, slicelength;\n            if (!slice.compute(s.size(), &start, &stop, &step, &slicelength))\n                throw py::error_already_set();\n            if (slicelength != value.size())\n                throw std::runtime_error(\"Left and right hand size of slice assignment have different sizes!\");\n            for (size_t i = 0; i < slicelength; ++i) {\n                s[start] = value[i]; start += step;\n            }\n        })\n        /// Comparisons\n        .def(py::self == py::self)\n        .def(py::self != py::self)\n        // Could also define py::self + py::self for concatenation, etc.\n        ;\n\n    // test_map_iterator\n    // Interface of a map-like object that isn't (directly) an unordered_map, but provides some basic\n    // map-like functionality.\n    class StringMap {\n    public:\n        StringMap() = default;\n        StringMap(std::unordered_map<std::string, std::string> init)\n            : map(std::move(init)) {}\n\n        void set(std::string key, std::string val) { map[key] = val; }\n        std::string get(std::string key) const { return map.at(key); }\n        size_t size() const { return map.size(); }\n    private:\n        std::unordered_map<std::string, std::string> map;\n    public:\n        decltype(map.cbegin()) begin() const { return map.cbegin(); }\n        decltype(map.cend()) end() const { return map.cend(); }\n    };\n    py::class_<StringMap>(m, \"StringMap\")\n        .def(py::init<>())\n        .def(py::init<std::unordered_map<std::string, std::string>>())\n        .def(\"__getitem__\", [](const StringMap &map, std::string key) {\n                try { return map.get(key); }\n                catch (const std::out_of_range&) {\n                    throw py::key_error(\"key '\" + key + \"' does not exist\");\n                }\n        })\n        .def(\"__setitem__\", &StringMap::set)\n        .def(\"__len__\", &StringMap::size)\n        .def(\"__iter__\", [](const StringMap &map) { return py::make_key_iterator(map.begin(), map.end()); },\n                py::keep_alive<0, 1>())\n        .def(\"items\", [](const StringMap &map) { return py::make_iterator(map.begin(), map.end()); },\n                py::keep_alive<0, 1>())\n        ;\n\n    // test_generalized_iterators\n    class IntPairs {\n    public:\n        IntPairs(std::vector<std::pair<int, int>> data) : data_(std::move(data)) {}\n        const std::pair<int, int>* begin() const { return data_.data(); }\n    private:\n        std::vector<std::pair<int, int>> data_;\n    };\n    py::class_<IntPairs>(m, \"IntPairs\")\n        .def(py::init<std::vector<std::pair<int, int>>>())\n        .def(\"nonzero\", [](const IntPairs& s) {\n                return py::make_iterator(NonZeroIterator<std::pair<int, int>>(s.begin()), NonZeroSentinel());\n        }, py::keep_alive<0, 1>())\n        .def(\"nonzero_keys\", [](const IntPairs& s) {\n            return py::make_key_iterator(NonZeroIterator<std::pair<int, int>>(s.begin()), NonZeroSentinel());\n        }, py::keep_alive<0, 1>())\n        ;\n\n\n#if 0\n    // Obsolete: special data structure for exposing custom iterator types to python\n    // kept here for illustrative purposes because there might be some use cases which\n    // are not covered by the much simpler py::make_iterator\n\n    struct PySequenceIterator {\n        PySequenceIterator(const Sequence &seq, py::object ref) : seq(seq), ref(ref) { }\n\n        float next() {\n            if (index == seq.size())\n                throw py::stop_iteration();\n            return seq[index++];\n        }\n\n        const Sequence &seq;\n        py::object ref; // keep a reference\n        size_t index = 0;\n    };\n\n    py::class_<PySequenceIterator>(seq, \"Iterator\")\n        .def(\"__iter__\", [](PySequenceIterator &it) -> PySequenceIterator& { return it; })\n        .def(\"__next__\", &PySequenceIterator::next);\n\n    On the actual Sequence object, the iterator would be constructed as follows:\n    .def(\"__iter__\", [](py::object s) { return PySequenceIterator(s.cast<const Sequence &>(), s); })\n#endif\n\n    // test_python_iterator_in_cpp\n    m.def(\"object_to_list\", [](py::object o) {\n        auto l = py::list();\n        for (auto item : o) {\n            l.append(item);\n        }\n        return l;\n    });\n\n    m.def(\"iterator_to_list\", [](py::iterator it) {\n        auto l = py::list();\n        while (it != py::iterator::sentinel()) {\n            l.append(*it);\n            ++it;\n        }\n        return l;\n    });\n\n    // Make sure that py::iterator works with std algorithms\n    m.def(\"count_none\", [](py::object o) {\n        return std::count_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); });\n    });\n\n    m.def(\"find_none\", [](py::object o) {\n        auto it = std::find_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); });\n        return it->is_none();\n    });\n\n    m.def(\"count_nonzeros\", [](py::dict d) {\n       return std::count_if(d.begin(), d.end(), [](std::pair<py::handle, py::handle> p) {\n           return p.second.cast<int>() != 0;\n       });\n    });\n\n    m.def(\"tuple_iterator\", &test_random_access_iterator<py::tuple>);\n    m.def(\"list_iterator\", &test_random_access_iterator<py::list>);\n    m.def(\"sequence_iterator\", &test_random_access_iterator<py::sequence>);\n\n    // test_iterator_passthrough\n    // #181: iterator passthrough did not compile\n    m.def(\"iterator_passthrough\", [](py::iterator s) -> py::iterator {\n        return py::make_iterator(std::begin(s), std::end(s));\n    });\n\n    // test_iterator_rvp\n    // #388: Can't make iterators via make_iterator() with different r/v policies\n    static std::vector<int> list = { 1, 2, 3 };\n    m.def(\"make_iterator_1\", []() { return py::make_iterator<py::return_value_policy::copy>(list); });\n    m.def(\"make_iterator_2\", []() { return py::make_iterator<py::return_value_policy::automatic>(list); });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_sequences_and_iterators.py",
    "content": "import pytest\nfrom pybind11_tests import sequences_and_iterators as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef isclose(a, b, rel_tol=1e-05, abs_tol=0.0):\n    \"\"\"Like math.isclose() from Python 3.5\"\"\"\n    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n\ndef allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0):\n    return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list))\n\n\ndef test_generalized_iterators():\n    assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)]\n    assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)]\n    assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == []\n\n    assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3]\n    assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1]\n    assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == []\n\n    # __next__ must continue to raise StopIteration\n    it = m.IntPairs([(0, 0)]).nonzero()\n    for _ in range(3):\n        with pytest.raises(StopIteration):\n            next(it)\n\n    it = m.IntPairs([(0, 0)]).nonzero_keys()\n    for _ in range(3):\n        with pytest.raises(StopIteration):\n            next(it)\n\n\ndef test_sequence():\n    cstats = ConstructorStats.get(m.Sequence)\n\n    s = m.Sequence(5)\n    assert cstats.values() == ['of size', '5']\n\n    assert \"Sequence\" in repr(s)\n    assert len(s) == 5\n    assert s[0] == 0 and s[3] == 0\n    assert 12.34 not in s\n    s[0], s[3] = 12.34, 56.78\n    assert 12.34 in s\n    assert isclose(s[0], 12.34) and isclose(s[3], 56.78)\n\n    rev = reversed(s)\n    assert cstats.values() == ['of size', '5']\n\n    rev2 = s[::-1]\n    assert cstats.values() == ['of size', '5']\n\n    it = iter(m.Sequence(0))\n    for _ in range(3):  # __next__ must continue to raise StopIteration\n        with pytest.raises(StopIteration):\n            next(it)\n    assert cstats.values() == ['of size', '0']\n\n    expected = [0, 56.78, 0, 0, 12.34]\n    assert allclose(rev, expected)\n    assert allclose(rev2, expected)\n    assert rev == rev2\n\n    rev[0::2] = m.Sequence([2.0, 2.0, 2.0])\n    assert cstats.values() == ['of size', '3', 'from std::vector']\n\n    assert allclose(rev, [2, 56.78, 2, 0, 2])\n\n    assert cstats.alive() == 4\n    del it\n    assert cstats.alive() == 3\n    del s\n    assert cstats.alive() == 2\n    del rev\n    assert cstats.alive() == 1\n    del rev2\n    assert cstats.alive() == 0\n\n    assert cstats.values() == []\n    assert cstats.default_constructions == 0\n    assert cstats.copy_constructions == 0\n    assert cstats.move_constructions >= 1\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n\ndef test_map_iterator():\n    sm = m.StringMap({'hi': 'bye', 'black': 'white'})\n    assert sm['hi'] == 'bye'\n    assert len(sm) == 2\n    assert sm['black'] == 'white'\n\n    with pytest.raises(KeyError):\n        assert sm['orange']\n    sm['orange'] = 'banana'\n    assert sm['orange'] == 'banana'\n\n    expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}\n    for k in sm:\n        assert sm[k] == expected[k]\n    for k, v in sm.items():\n        assert v == expected[k]\n\n    it = iter(m.StringMap({}))\n    for _ in range(3):  # __next__ must continue to raise StopIteration\n        with pytest.raises(StopIteration):\n            next(it)\n\n\ndef test_python_iterator_in_cpp():\n    t = (1, 2, 3)\n    assert m.object_to_list(t) == [1, 2, 3]\n    assert m.object_to_list(iter(t)) == [1, 2, 3]\n    assert m.iterator_to_list(iter(t)) == [1, 2, 3]\n\n    with pytest.raises(TypeError) as excinfo:\n        m.object_to_list(1)\n    assert \"object is not iterable\" in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        m.iterator_to_list(1)\n    assert \"incompatible function arguments\" in str(excinfo.value)\n\n    def bad_next_call():\n        raise RuntimeError(\"py::iterator::advance() should propagate errors\")\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.iterator_to_list(iter(bad_next_call, None))\n    assert str(excinfo.value) == \"py::iterator::advance() should propagate errors\"\n\n    lst = [1, None, 0, None]\n    assert m.count_none(lst) == 2\n    assert m.find_none(lst) is True\n    assert m.count_nonzeros({\"a\": 0, \"b\": 1, \"c\": 2}) == 2\n\n    r = range(5)\n    assert all(m.tuple_iterator(tuple(r)))\n    assert all(m.list_iterator(list(r)))\n    assert all(m.sequence_iterator(r))\n\n\ndef test_iterator_passthrough():\n    \"\"\"#181: iterator passthrough did not compile\"\"\"\n    from pybind11_tests.sequences_and_iterators import iterator_passthrough\n\n    assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]\n\n\ndef test_iterator_rvp():\n    \"\"\"#388: Can't make iterators via make_iterator() with different r/v policies \"\"\"\n    import pybind11_tests.sequences_and_iterators as m\n\n    assert list(m.make_iterator_1()) == [1, 2, 3]\n    assert list(m.make_iterator_2()) == [1, 2, 3]\n    assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2()))\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_smart_ptr.cpp",
    "content": "/*\n    tests/test_smart_ptr.cpp -- binding classes with custom reference counting,\n    implicit conversions between types\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#if defined(_MSC_VER) && _MSC_VER < 1910\n#  pragma warning(disable: 4702) // unreachable code in system header\n#endif\n\n#include \"pybind11_tests.h\"\n#include \"object.h\"\n\n// Make pybind aware of the ref-counted wrapper type (s):\n\n// ref<T> is a wrapper for 'Object' which uses intrusive reference counting\n// It is always possible to construct a ref<T> from an Object* pointer without\n// possible inconsistencies, hence the 'true' argument at the end.\nPYBIND11_DECLARE_HOLDER_TYPE(T, ref<T>, true);\n// Make pybind11 aware of the non-standard getter member function\nnamespace pybind11 { namespace detail {\n    template <typename T>\n    struct holder_helper<ref<T>> {\n        static const T *get(const ref<T> &p) { return p.get_ptr(); }\n    };\n}}\n\n// The following is not required anymore for std::shared_ptr, but it should compile without error:\nPYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>);\n\n// This is just a wrapper around unique_ptr, but with extra fields to deliberately bloat up the\n// holder size to trigger the non-simple-layout internal instance layout for single inheritance with\n// large holder type:\ntemplate <typename T> class huge_unique_ptr {\n    std::unique_ptr<T> ptr;\n    uint64_t padding[10];\npublic:\n    huge_unique_ptr(T *p) : ptr(p) {};\n    T *get() { return ptr.get(); }\n};\nPYBIND11_DECLARE_HOLDER_TYPE(T, huge_unique_ptr<T>);\n\n// Simple custom holder that works like unique_ptr\ntemplate <typename T>\nclass custom_unique_ptr {\n    std::unique_ptr<T> impl;\npublic:\n    custom_unique_ptr(T* p) : impl(p) { }\n    T* get() const { return impl.get(); }\n    T* release_ptr() { return impl.release(); }\n};\nPYBIND11_DECLARE_HOLDER_TYPE(T, custom_unique_ptr<T>);\n\n// Simple custom holder that works like shared_ptr and has operator& overload\n// To obtain address of an instance of this holder pybind should use std::addressof\n// Attempt to get address via operator& may leads to segmentation fault\ntemplate <typename T>\nclass shared_ptr_with_addressof_operator {\n    std::shared_ptr<T> impl;\npublic:\n    shared_ptr_with_addressof_operator( ) = default;\n    shared_ptr_with_addressof_operator(T* p) : impl(p) { }\n    T* get() const { return impl.get(); }\n    T** operator&() { throw std::logic_error(\"Call of overloaded operator& is not expected\"); }\n};\nPYBIND11_DECLARE_HOLDER_TYPE(T, shared_ptr_with_addressof_operator<T>);\n\n// Simple custom holder that works like unique_ptr and has operator& overload\n// To obtain address of an instance of this holder pybind should use std::addressof\n// Attempt to get address via operator& may leads to segmentation fault\ntemplate <typename T>\nclass unique_ptr_with_addressof_operator {\n    std::unique_ptr<T> impl;\npublic:\n    unique_ptr_with_addressof_operator() = default;\n    unique_ptr_with_addressof_operator(T* p) : impl(p) { }\n    T* get() const { return impl.get(); }\n    T* release_ptr() { return impl.release(); }\n    T** operator&() { throw std::logic_error(\"Call of overloaded operator& is not expected\"); }\n};\nPYBIND11_DECLARE_HOLDER_TYPE(T, unique_ptr_with_addressof_operator<T>);\n\n\nTEST_SUBMODULE(smart_ptr, m) {\n\n    // test_smart_ptr\n\n    // Object implementation in `object.h`\n    py::class_<Object, ref<Object>> obj(m, \"Object\");\n    obj.def(\"getRefCount\", &Object::getRefCount);\n\n    // Custom object with builtin reference counting (see 'object.h' for the implementation)\n    class MyObject1 : public Object {\n    public:\n        MyObject1(int value) : value(value) { print_created(this, toString()); }\n        std::string toString() const { return \"MyObject1[\" + std::to_string(value) + \"]\"; }\n    protected:\n        virtual ~MyObject1() { print_destroyed(this); }\n    private:\n        int value;\n    };\n    py::class_<MyObject1, ref<MyObject1>>(m, \"MyObject1\", obj)\n        .def(py::init<int>());\n    py::implicitly_convertible<py::int_, MyObject1>();\n\n    m.def(\"make_object_1\", []() -> Object * { return new MyObject1(1); });\n    m.def(\"make_object_2\", []() -> ref<Object> { return new MyObject1(2); });\n    m.def(\"make_myobject1_1\", []() -> MyObject1 * { return new MyObject1(4); });\n    m.def(\"make_myobject1_2\", []() -> ref<MyObject1> { return new MyObject1(5); });\n    m.def(\"print_object_1\", [](const Object *obj) { py::print(obj->toString()); });\n    m.def(\"print_object_2\", [](ref<Object> obj) { py::print(obj->toString()); });\n    m.def(\"print_object_3\", [](const ref<Object> &obj) { py::print(obj->toString()); });\n    m.def(\"print_object_4\", [](const ref<Object> *obj) { py::print((*obj)->toString()); });\n    m.def(\"print_myobject1_1\", [](const MyObject1 *obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject1_2\", [](ref<MyObject1> obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject1_3\", [](const ref<MyObject1> &obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject1_4\", [](const ref<MyObject1> *obj) { py::print((*obj)->toString()); });\n\n    // Expose constructor stats for the ref type\n    m.def(\"cstats_ref\", &ConstructorStats::get<ref_tag>);\n\n\n    // Object managed by a std::shared_ptr<>\n    class MyObject2 {\n    public:\n        MyObject2(const MyObject2 &) = default;\n        MyObject2(int value) : value(value) { print_created(this, toString()); }\n        std::string toString() const { return \"MyObject2[\" + std::to_string(value) + \"]\"; }\n        virtual ~MyObject2() { print_destroyed(this); }\n    private:\n        int value;\n    };\n    py::class_<MyObject2, std::shared_ptr<MyObject2>>(m, \"MyObject2\")\n        .def(py::init<int>());\n    m.def(\"make_myobject2_1\", []() { return new MyObject2(6); });\n    m.def(\"make_myobject2_2\", []() { return std::make_shared<MyObject2>(7); });\n    m.def(\"print_myobject2_1\", [](const MyObject2 *obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject2_2\", [](std::shared_ptr<MyObject2> obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject2_3\", [](const std::shared_ptr<MyObject2> &obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject2_4\", [](const std::shared_ptr<MyObject2> *obj) { py::print((*obj)->toString()); });\n\n    // Object managed by a std::shared_ptr<>, additionally derives from std::enable_shared_from_this<>\n    class MyObject3 : public std::enable_shared_from_this<MyObject3> {\n    public:\n        MyObject3(const MyObject3 &) = default;\n        MyObject3(int value) : value(value) { print_created(this, toString()); }\n        std::string toString() const { return \"MyObject3[\" + std::to_string(value) + \"]\"; }\n        virtual ~MyObject3() { print_destroyed(this); }\n    private:\n        int value;\n    };\n    py::class_<MyObject3, std::shared_ptr<MyObject3>>(m, \"MyObject3\")\n        .def(py::init<int>());\n    m.def(\"make_myobject3_1\", []() { return new MyObject3(8); });\n    m.def(\"make_myobject3_2\", []() { return std::make_shared<MyObject3>(9); });\n    m.def(\"print_myobject3_1\", [](const MyObject3 *obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject3_2\", [](std::shared_ptr<MyObject3> obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject3_3\", [](const std::shared_ptr<MyObject3> &obj) { py::print(obj->toString()); });\n    m.def(\"print_myobject3_4\", [](const std::shared_ptr<MyObject3> *obj) { py::print((*obj)->toString()); });\n\n    // test_smart_ptr_refcounting\n    m.def(\"test_object1_refcounting\", []() {\n        ref<MyObject1> o = new MyObject1(0);\n        bool good = o->getRefCount() == 1;\n        py::object o2 = py::cast(o, py::return_value_policy::reference);\n        // always request (partial) ownership for objects with intrusive\n        // reference counting even when using the 'reference' RVP\n        good &= o->getRefCount() == 2;\n        return good;\n    });\n\n    // test_unique_nodelete\n    // Object with a private destructor\n    class MyObject4 {\n    public:\n        MyObject4(int value) : value{value} { print_created(this); }\n        int value;\n    private:\n        ~MyObject4() { print_destroyed(this); }\n    };\n    py::class_<MyObject4, std::unique_ptr<MyObject4, py::nodelete>>(m, \"MyObject4\")\n        .def(py::init<int>())\n        .def_readwrite(\"value\", &MyObject4::value);\n\n    // test_unique_deleter\n    // Object with std::unique_ptr<T, D> where D is not matching the base class\n    // Object with a protected destructor\n    class MyObject4a {\n    public:\n        MyObject4a(int i) {\n            value = i;\n            print_created(this);\n        };\n        int value;\n    protected:\n        virtual ~MyObject4a() { print_destroyed(this); }\n    };\n    py::class_<MyObject4a, std::unique_ptr<MyObject4a, py::nodelete>>(m, \"MyObject4a\")\n        .def(py::init<int>())\n        .def_readwrite(\"value\", &MyObject4a::value);\n\n    // Object derived but with public destructor and no Deleter in default holder\n    class MyObject4b : public MyObject4a {\n    public:\n        MyObject4b(int i) : MyObject4a(i) { print_created(this); }\n        ~MyObject4b() { print_destroyed(this); }\n    };\n    py::class_<MyObject4b, MyObject4a>(m, \"MyObject4b\")\n        .def(py::init<int>());\n\n    // test_large_holder\n    class MyObject5 { // managed by huge_unique_ptr\n    public:\n        MyObject5(int value) : value{value} { print_created(this); }\n        ~MyObject5() { print_destroyed(this); }\n        int value;\n    };\n    py::class_<MyObject5, huge_unique_ptr<MyObject5>>(m, \"MyObject5\")\n        .def(py::init<int>())\n        .def_readwrite(\"value\", &MyObject5::value);\n\n    // test_shared_ptr_and_references\n    struct SharedPtrRef {\n        struct A {\n            A() { print_created(this); }\n            A(const A &) { print_copy_created(this); }\n            A(A &&) { print_move_created(this); }\n            ~A() { print_destroyed(this); }\n        };\n\n        A value = {};\n        std::shared_ptr<A> shared = std::make_shared<A>();\n    };\n    using A = SharedPtrRef::A;\n    py::class_<A, std::shared_ptr<A>>(m, \"A\");\n    py::class_<SharedPtrRef>(m, \"SharedPtrRef\")\n        .def(py::init<>())\n        .def_readonly(\"ref\", &SharedPtrRef::value)\n        .def_property_readonly(\"copy\", [](const SharedPtrRef &s) { return s.value; },\n                               py::return_value_policy::copy)\n        .def_readonly(\"holder_ref\", &SharedPtrRef::shared)\n        .def_property_readonly(\"holder_copy\", [](const SharedPtrRef &s) { return s.shared; },\n                               py::return_value_policy::copy)\n        .def(\"set_ref\", [](SharedPtrRef &, const A &) { return true; })\n        .def(\"set_holder\", [](SharedPtrRef &, std::shared_ptr<A>) { return true; });\n\n    // test_shared_ptr_from_this_and_references\n    struct SharedFromThisRef {\n        struct B : std::enable_shared_from_this<B> {\n            B() { print_created(this); }\n            B(const B &) : std::enable_shared_from_this<B>() { print_copy_created(this); }\n            B(B &&) : std::enable_shared_from_this<B>() { print_move_created(this); }\n            ~B() { print_destroyed(this); }\n        };\n\n        B value = {};\n        std::shared_ptr<B> shared = std::make_shared<B>();\n    };\n    using B = SharedFromThisRef::B;\n    py::class_<B, std::shared_ptr<B>>(m, \"B\");\n    py::class_<SharedFromThisRef>(m, \"SharedFromThisRef\")\n        .def(py::init<>())\n        .def_readonly(\"bad_wp\", &SharedFromThisRef::value)\n        .def_property_readonly(\"ref\", [](const SharedFromThisRef &s) -> const B & { return *s.shared; })\n        .def_property_readonly(\"copy\", [](const SharedFromThisRef &s) { return s.value; },\n                               py::return_value_policy::copy)\n        .def_readonly(\"holder_ref\", &SharedFromThisRef::shared)\n        .def_property_readonly(\"holder_copy\", [](const SharedFromThisRef &s) { return s.shared; },\n                               py::return_value_policy::copy)\n        .def(\"set_ref\", [](SharedFromThisRef &, const B &) { return true; })\n        .def(\"set_holder\", [](SharedFromThisRef &, std::shared_ptr<B>) { return true; });\n\n    // Issue #865: shared_from_this doesn't work with virtual inheritance\n    struct SharedFromThisVBase : std::enable_shared_from_this<SharedFromThisVBase> {\n        SharedFromThisVBase() = default;\n        SharedFromThisVBase(const SharedFromThisVBase &) = default;\n        virtual ~SharedFromThisVBase() = default;\n    };\n    struct SharedFromThisVirt : virtual SharedFromThisVBase {};\n    static std::shared_ptr<SharedFromThisVirt> sft(new SharedFromThisVirt());\n    py::class_<SharedFromThisVirt, std::shared_ptr<SharedFromThisVirt>>(m, \"SharedFromThisVirt\")\n        .def_static(\"get\", []() { return sft.get(); });\n\n    // test_move_only_holder\n    struct C {\n        C() { print_created(this); }\n        ~C() { print_destroyed(this); }\n    };\n    py::class_<C, custom_unique_ptr<C>>(m, \"TypeWithMoveOnlyHolder\")\n        .def_static(\"make\", []() { return custom_unique_ptr<C>(new C); });\n\n    // test_holder_with_addressof_operator\n    struct TypeForHolderWithAddressOf {\n        TypeForHolderWithAddressOf() { print_created(this); }\n        TypeForHolderWithAddressOf(const TypeForHolderWithAddressOf &) { print_copy_created(this); }\n        TypeForHolderWithAddressOf(TypeForHolderWithAddressOf &&) { print_move_created(this); }\n        ~TypeForHolderWithAddressOf() { print_destroyed(this); }\n        std::string toString() const {\n            return \"TypeForHolderWithAddressOf[\" + std::to_string(value) + \"]\";\n        }\n        int value = 42;\n    };\n    using HolderWithAddressOf = shared_ptr_with_addressof_operator<TypeForHolderWithAddressOf>;\n    py::class_<TypeForHolderWithAddressOf, HolderWithAddressOf>(m, \"TypeForHolderWithAddressOf\")\n        .def_static(\"make\", []() { return HolderWithAddressOf(new TypeForHolderWithAddressOf); })\n        .def(\"get\", [](const HolderWithAddressOf &self) { return self.get(); })\n        .def(\"print_object_1\", [](const TypeForHolderWithAddressOf *obj) { py::print(obj->toString()); })\n        .def(\"print_object_2\", [](HolderWithAddressOf obj) { py::print(obj.get()->toString()); })\n        .def(\"print_object_3\", [](const HolderWithAddressOf &obj) { py::print(obj.get()->toString()); })\n        .def(\"print_object_4\", [](const HolderWithAddressOf *obj) { py::print((*obj).get()->toString()); });\n\n    // test_move_only_holder_with_addressof_operator\n    struct TypeForMoveOnlyHolderWithAddressOf {\n        TypeForMoveOnlyHolderWithAddressOf(int value) : value{value} { print_created(this); }\n        ~TypeForMoveOnlyHolderWithAddressOf() { print_destroyed(this); }\n        std::string toString() const {\n            return \"MoveOnlyHolderWithAddressOf[\" + std::to_string(value) + \"]\";\n        }\n        int value;\n    };\n    using MoveOnlyHolderWithAddressOf = unique_ptr_with_addressof_operator<TypeForMoveOnlyHolderWithAddressOf>;\n    py::class_<TypeForMoveOnlyHolderWithAddressOf, MoveOnlyHolderWithAddressOf>(m, \"TypeForMoveOnlyHolderWithAddressOf\")\n        .def_static(\"make\", []() { return MoveOnlyHolderWithAddressOf(new TypeForMoveOnlyHolderWithAddressOf(0)); })\n        .def_readwrite(\"value\", &TypeForMoveOnlyHolderWithAddressOf::value)\n        .def(\"print_object\", [](const TypeForMoveOnlyHolderWithAddressOf *obj) { py::print(obj->toString()); });\n\n    // test_smart_ptr_from_default\n    struct HeldByDefaultHolder { };\n    py::class_<HeldByDefaultHolder>(m, \"HeldByDefaultHolder\")\n        .def(py::init<>())\n        .def_static(\"load_shared_ptr\", [](std::shared_ptr<HeldByDefaultHolder>) {});\n\n    // test_shared_ptr_gc\n    // #187: issue involving std::shared_ptr<> return value policy & garbage collection\n    struct ElementBase {\n        virtual ~ElementBase() { } /* Force creation of virtual table */\n    };\n    py::class_<ElementBase, std::shared_ptr<ElementBase>>(m, \"ElementBase\");\n\n    struct ElementA : ElementBase {\n        ElementA(int v) : v(v) { }\n        int value() { return v; }\n        int v;\n    };\n    py::class_<ElementA, ElementBase, std::shared_ptr<ElementA>>(m, \"ElementA\")\n        .def(py::init<int>())\n        .def(\"value\", &ElementA::value);\n\n    struct ElementList {\n        void add(std::shared_ptr<ElementBase> e) { l.push_back(e); }\n        std::vector<std::shared_ptr<ElementBase>> l;\n    };\n    py::class_<ElementList, std::shared_ptr<ElementList>>(m, \"ElementList\")\n        .def(py::init<>())\n        .def(\"add\", &ElementList::add)\n        .def(\"get\", [](ElementList &el) {\n            py::list list;\n            for (auto &e : el.l)\n                list.append(py::cast(e));\n            return list;\n        });\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_smart_ptr.py",
    "content": "import pytest\nfrom pybind11_tests import smart_ptr as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_smart_ptr(capture):\n    # Object1\n    for i, o in enumerate([m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1):\n        assert o.getRefCount() == 1\n        with capture:\n            m.print_object_1(o)\n            m.print_object_2(o)\n            m.print_object_3(o)\n            m.print_object_4(o)\n        assert capture == \"MyObject1[{i}]\\n\".format(i=i) * 4\n\n    for i, o in enumerate([m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7],\n                          start=4):\n        print(o)\n        with capture:\n            if not isinstance(o, int):\n                m.print_object_1(o)\n                m.print_object_2(o)\n                m.print_object_3(o)\n                m.print_object_4(o)\n            m.print_myobject1_1(o)\n            m.print_myobject1_2(o)\n            m.print_myobject1_3(o)\n            m.print_myobject1_4(o)\n        assert capture == \"MyObject1[{i}]\\n\".format(i=i) * (4 if isinstance(o, int) else 8)\n\n    cstats = ConstructorStats.get(m.MyObject1)\n    assert cstats.alive() == 0\n    expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4\n    assert cstats.values() == expected_values\n    assert cstats.default_constructions == 0\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0 # Doesn't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n    # Object2\n    for i, o in zip([8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]):\n        print(o)\n        with capture:\n            m.print_myobject2_1(o)\n            m.print_myobject2_2(o)\n            m.print_myobject2_3(o)\n            m.print_myobject2_4(o)\n        assert capture == \"MyObject2[{i}]\\n\".format(i=i) * 4\n\n    cstats = ConstructorStats.get(m.MyObject2)\n    assert cstats.alive() == 1\n    o = None\n    assert cstats.alive() == 0\n    assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']\n    assert cstats.default_constructions == 0\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0 # Doesn't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n    # Object3\n    for i, o in zip([9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]):\n        print(o)\n        with capture:\n            m.print_myobject3_1(o)\n            m.print_myobject3_2(o)\n            m.print_myobject3_3(o)\n            m.print_myobject3_4(o)\n        assert capture == \"MyObject3[{i}]\\n\".format(i=i) * 4\n\n    cstats = ConstructorStats.get(m.MyObject3)\n    assert cstats.alive() == 1\n    o = None\n    assert cstats.alive() == 0\n    assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']\n    assert cstats.default_constructions == 0\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0 # Doesn't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n    # Object\n    cstats = ConstructorStats.get(m.Object)\n    assert cstats.alive() == 0\n    assert cstats.values() == []\n    assert cstats.default_constructions == 10\n    assert cstats.copy_constructions == 0\n    # assert cstats.move_constructions >= 0 # Doesn't invoke any\n    assert cstats.copy_assignments == 0\n    assert cstats.move_assignments == 0\n\n    # ref<>\n    cstats = m.cstats_ref()\n    assert cstats.alive() == 0\n    assert cstats.values() == ['from pointer'] * 10\n    assert cstats.default_constructions == 30\n    assert cstats.copy_constructions == 12\n    # assert cstats.move_constructions >= 0 # Doesn't invoke any\n    assert cstats.copy_assignments == 30\n    assert cstats.move_assignments == 0\n\n\ndef test_smart_ptr_refcounting():\n    assert m.test_object1_refcounting()\n\n\ndef test_unique_nodelete():\n    o = m.MyObject4(23)\n    assert o.value == 23\n    cstats = ConstructorStats.get(m.MyObject4)\n    assert cstats.alive() == 1\n    del o\n    assert cstats.alive() == 1  # Leak, but that's intentional\n\n\ndef test_unique_nodelete4a():\n    o = m.MyObject4a(23)\n    assert o.value == 23\n    cstats = ConstructorStats.get(m.MyObject4a)\n    assert cstats.alive() == 1\n    del o\n    assert cstats.alive() == 1  # Leak, but that's intentional\n\n\ndef test_unique_deleter():\n    o = m.MyObject4b(23)\n    assert o.value == 23\n    cstats4a = ConstructorStats.get(m.MyObject4a)\n    assert cstats4a.alive() == 2  # Two becaue of previous test\n    cstats4b = ConstructorStats.get(m.MyObject4b)\n    assert cstats4b.alive() == 1\n    del o\n    assert cstats4a.alive() == 1  # Should now only be one leftover from previous test\n    assert cstats4b.alive() == 0  # Should be deleted\n\n\ndef test_large_holder():\n    o = m.MyObject5(5)\n    assert o.value == 5\n    cstats = ConstructorStats.get(m.MyObject5)\n    assert cstats.alive() == 1\n    del o\n    assert cstats.alive() == 0\n\n\ndef test_shared_ptr_and_references():\n    s = m.SharedPtrRef()\n    stats = ConstructorStats.get(m.A)\n    assert stats.alive() == 2\n\n    ref = s.ref  # init_holder_helper(holder_ptr=false, owned=false)\n    assert stats.alive() == 2\n    assert s.set_ref(ref)\n    with pytest.raises(RuntimeError) as excinfo:\n        assert s.set_holder(ref)\n    assert \"Unable to cast from non-held to held instance\" in str(excinfo.value)\n\n    copy = s.copy  # init_holder_helper(holder_ptr=false, owned=true)\n    assert stats.alive() == 3\n    assert s.set_ref(copy)\n    assert s.set_holder(copy)\n\n    holder_ref = s.holder_ref  # init_holder_helper(holder_ptr=true, owned=false)\n    assert stats.alive() == 3\n    assert s.set_ref(holder_ref)\n    assert s.set_holder(holder_ref)\n\n    holder_copy = s.holder_copy  # init_holder_helper(holder_ptr=true, owned=true)\n    assert stats.alive() == 3\n    assert s.set_ref(holder_copy)\n    assert s.set_holder(holder_copy)\n\n    del ref, copy, holder_ref, holder_copy, s\n    assert stats.alive() == 0\n\n\ndef test_shared_ptr_from_this_and_references():\n    s = m.SharedFromThisRef()\n    stats = ConstructorStats.get(m.B)\n    assert stats.alive() == 2\n\n    ref = s.ref  # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)\n    assert stats.alive() == 2\n    assert s.set_ref(ref)\n    assert s.set_holder(ref)  # std::enable_shared_from_this can create a holder from a reference\n\n    bad_wp = s.bad_wp  # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)\n    assert stats.alive() == 2\n    assert s.set_ref(bad_wp)\n    with pytest.raises(RuntimeError) as excinfo:\n        assert s.set_holder(bad_wp)\n    assert \"Unable to cast from non-held to held instance\" in str(excinfo.value)\n\n    copy = s.copy  # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false)\n    assert stats.alive() == 3\n    assert s.set_ref(copy)\n    assert s.set_holder(copy)\n\n    holder_ref = s.holder_ref  # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)\n    assert stats.alive() == 3\n    assert s.set_ref(holder_ref)\n    assert s.set_holder(holder_ref)\n\n    holder_copy = s.holder_copy  # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)\n    assert stats.alive() == 3\n    assert s.set_ref(holder_copy)\n    assert s.set_holder(holder_copy)\n\n    del ref, bad_wp, copy, holder_ref, holder_copy, s\n    assert stats.alive() == 0\n\n    z = m.SharedFromThisVirt.get()\n    y = m.SharedFromThisVirt.get()\n    assert y is z\n\n\ndef test_move_only_holder():\n    a = m.TypeWithMoveOnlyHolder.make()\n    stats = ConstructorStats.get(m.TypeWithMoveOnlyHolder)\n    assert stats.alive() == 1\n    del a\n    assert stats.alive() == 0\n\n\ndef test_holder_with_addressof_operator():\n    # this test must not throw exception from c++\n    a = m.TypeForHolderWithAddressOf.make()\n    a.print_object_1()\n    a.print_object_2()\n    a.print_object_3()\n    a.print_object_4()\n\n    stats = ConstructorStats.get(m.TypeForHolderWithAddressOf)\n    assert stats.alive() == 1\n\n    np = m.TypeForHolderWithAddressOf.make()\n    assert stats.alive() == 2\n    del a\n    assert stats.alive() == 1\n    del np\n    assert stats.alive() == 0\n\n    b = m.TypeForHolderWithAddressOf.make()\n    c = b\n    assert b.get() is c.get()\n    assert stats.alive() == 1\n\n    del b\n    assert stats.alive() == 1\n\n    del c\n    assert stats.alive() == 0\n\n\ndef test_move_only_holder_with_addressof_operator():\n    a = m.TypeForMoveOnlyHolderWithAddressOf.make()\n    a.print_object()\n\n    stats = ConstructorStats.get(m.TypeForMoveOnlyHolderWithAddressOf)\n    assert stats.alive() == 1\n\n    a.value = 42\n    assert a.value == 42\n\n    del a\n    assert stats.alive() == 0\n\n\ndef test_smart_ptr_from_default():\n    instance = m.HeldByDefaultHolder()\n    with pytest.raises(RuntimeError) as excinfo:\n        m.HeldByDefaultHolder.load_shared_ptr(instance)\n    assert \"Unable to load a custom holder type from a default-holder instance\" in str(excinfo)\n\n\ndef test_shared_ptr_gc():\n    \"\"\"#187: issue involving std::shared_ptr<> return value policy & garbage collection\"\"\"\n    el = m.ElementList()\n    for i in range(10):\n        el.add(m.ElementA(i))\n    pytest.gc_collect()\n    for i, v in enumerate(el.get()):\n        assert i == v.value()\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_stl.cpp",
    "content": "/*\n    tests/test_stl.cpp -- STL type casters\n\n    Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/stl.h>\n\n#include <vector>\n#include <string>\n\n// Test with `std::variant` in C++17 mode, or with `boost::variant` in C++11/14\n#if PYBIND11_HAS_VARIANT\nusing std::variant;\n#elif defined(PYBIND11_TEST_BOOST) && (!defined(_MSC_VER) || _MSC_VER >= 1910)\n#  include <boost/variant.hpp>\n#  define PYBIND11_HAS_VARIANT 1\nusing boost::variant;\n\nnamespace pybind11 { namespace detail {\ntemplate <typename... Ts>\nstruct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};\n\ntemplate <>\nstruct visit_helper<boost::variant> {\n    template <typename... Args>\n    static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {\n        return boost::apply_visitor(args...);\n    }\n};\n}} // namespace pybind11::detail\n#endif\n\nPYBIND11_MAKE_OPAQUE(std::vector<std::string, std::allocator<std::string>>);\n\n/// Issue #528: templated constructor\nstruct TplCtorClass {\n    template <typename T> TplCtorClass(const T &) { }\n    bool operator==(const TplCtorClass &) const { return true; }\n};\n\nnamespace std {\n    template <>\n    struct hash<TplCtorClass> { size_t operator()(const TplCtorClass &) const { return 0; } };\n}\n\n\nTEST_SUBMODULE(stl, m) {\n    // test_vector\n    m.def(\"cast_vector\", []() { return std::vector<int>{1}; });\n    m.def(\"load_vector\", [](const std::vector<int> &v) { return v.at(0) == 1 && v.at(1) == 2; });\n    // `std::vector<bool>` is special because it returns proxy objects instead of references\n    m.def(\"cast_bool_vector\", []() { return std::vector<bool>{true, false}; });\n    m.def(\"load_bool_vector\", [](const std::vector<bool> &v) {\n        return v.at(0) == true && v.at(1) == false;\n    });\n    // Unnumbered regression (caused by #936): pointers to stl containers aren't castable\n    static std::vector<RValueCaster> lvv{2};\n    m.def(\"cast_ptr_vector\", []() { return &lvv; });\n\n    // test_deque\n    m.def(\"cast_deque\", []() { return std::deque<int>{1}; });\n    m.def(\"load_deque\", [](const std::deque<int> &v) { return v.at(0) == 1 && v.at(1) == 2; });\n\n    // test_array\n    m.def(\"cast_array\", []() { return std::array<int, 2> {{1 , 2}}; });\n    m.def(\"load_array\", [](const std::array<int, 2> &a) { return a[0] == 1 && a[1] == 2; });\n\n    // test_valarray\n    m.def(\"cast_valarray\", []() { return std::valarray<int>{1, 4, 9}; });\n    m.def(\"load_valarray\", [](const std::valarray<int>& v) {\n        return v.size() == 3 && v[0] == 1 && v[1] == 4 && v[2] == 9;\n    });\n\n    // test_map\n    m.def(\"cast_map\", []() { return std::map<std::string, std::string>{{\"key\", \"value\"}}; });\n    m.def(\"load_map\", [](const std::map<std::string, std::string> &map) {\n        return map.at(\"key\") == \"value\" && map.at(\"key2\") == \"value2\";\n    });\n\n    // test_set\n    m.def(\"cast_set\", []() { return std::set<std::string>{\"key1\", \"key2\"}; });\n    m.def(\"load_set\", [](const std::set<std::string> &set) {\n        return set.count(\"key1\") && set.count(\"key2\") && set.count(\"key3\");\n    });\n\n    // test_recursive_casting\n    m.def(\"cast_rv_vector\", []() { return std::vector<RValueCaster>{2}; });\n    m.def(\"cast_rv_array\", []() { return std::array<RValueCaster, 3>(); });\n    // NB: map and set keys are `const`, so while we technically do move them (as `const Type &&`),\n    // casters don't typically do anything with that, which means they fall to the `const Type &`\n    // caster.\n    m.def(\"cast_rv_map\", []() { return std::unordered_map<std::string, RValueCaster>{{\"a\", RValueCaster{}}}; });\n    m.def(\"cast_rv_nested\", []() {\n        std::vector<std::array<std::list<std::unordered_map<std::string, RValueCaster>>, 2>> v;\n        v.emplace_back(); // add an array\n        v.back()[0].emplace_back(); // add a map to the array\n        v.back()[0].back().emplace(\"b\", RValueCaster{});\n        v.back()[0].back().emplace(\"c\", RValueCaster{});\n        v.back()[1].emplace_back(); // add a map to the array\n        v.back()[1].back().emplace(\"a\", RValueCaster{});\n        return v;\n    });\n    static std::array<RValueCaster, 2> lva;\n    static std::unordered_map<std::string, RValueCaster> lvm{{\"a\", RValueCaster{}}, {\"b\", RValueCaster{}}};\n    static std::unordered_map<std::string, std::vector<std::list<std::array<RValueCaster, 2>>>> lvn;\n    lvn[\"a\"].emplace_back(); // add a list\n    lvn[\"a\"].back().emplace_back(); // add an array\n    lvn[\"a\"].emplace_back(); // another list\n    lvn[\"a\"].back().emplace_back(); // add an array\n    lvn[\"b\"].emplace_back(); // add a list\n    lvn[\"b\"].back().emplace_back(); // add an array\n    lvn[\"b\"].back().emplace_back(); // add another array\n    m.def(\"cast_lv_vector\", []() -> const decltype(lvv) & { return lvv; });\n    m.def(\"cast_lv_array\", []() -> const decltype(lva) & { return lva; });\n    m.def(\"cast_lv_map\", []() -> const decltype(lvm) & { return lvm; });\n    m.def(\"cast_lv_nested\", []() -> const decltype(lvn) & { return lvn; });\n    // #853:\n    m.def(\"cast_unique_ptr_vector\", []() {\n        std::vector<std::unique_ptr<UserType>> v;\n        v.emplace_back(new UserType{7});\n        v.emplace_back(new UserType{42});\n        return v;\n    });\n\n    // test_move_out_container\n    struct MoveOutContainer {\n        struct Value { int value; };\n        std::list<Value> move_list() const { return {{0}, {1}, {2}}; }\n    };\n    py::class_<MoveOutContainer::Value>(m, \"MoveOutContainerValue\")\n        .def_readonly(\"value\", &MoveOutContainer::Value::value);\n    py::class_<MoveOutContainer>(m, \"MoveOutContainer\")\n        .def(py::init<>())\n        .def_property_readonly(\"move_list\", &MoveOutContainer::move_list);\n\n    // Class that can be move- and copy-constructed, but not assigned\n    struct NoAssign {\n        int value;\n\n        explicit NoAssign(int value = 0) : value(value) { }\n        NoAssign(const NoAssign &) = default;\n        NoAssign(NoAssign &&) = default;\n\n        NoAssign &operator=(const NoAssign &) = delete;\n        NoAssign &operator=(NoAssign &&) = delete;\n    };\n    py::class_<NoAssign>(m, \"NoAssign\", \"Class with no C++ assignment operators\")\n        .def(py::init<>())\n        .def(py::init<int>());\n\n#ifdef PYBIND11_HAS_OPTIONAL\n    // test_optional\n    m.attr(\"has_optional\") = true;\n\n    using opt_int = std::optional<int>;\n    using opt_no_assign = std::optional<NoAssign>;\n    m.def(\"double_or_zero\", [](const opt_int& x) -> int {\n        return x.value_or(0) * 2;\n    });\n    m.def(\"half_or_none\", [](int x) -> opt_int {\n        return x ? opt_int(x / 2) : opt_int();\n    });\n    m.def(\"test_nullopt\", [](opt_int x) {\n        return x.value_or(42);\n    }, py::arg_v(\"x\", std::nullopt, \"None\"));\n    m.def(\"test_no_assign\", [](const opt_no_assign &x) {\n        return x ? x->value : 42;\n    }, py::arg_v(\"x\", std::nullopt, \"None\"));\n\n    m.def(\"nodefer_none_optional\", [](std::optional<int>) { return true; });\n    m.def(\"nodefer_none_optional\", [](py::none) { return false; });\n#endif\n\n#ifdef PYBIND11_HAS_EXP_OPTIONAL\n    // test_exp_optional\n    m.attr(\"has_exp_optional\") = true;\n\n    using exp_opt_int = std::experimental::optional<int>;\n    using exp_opt_no_assign = std::experimental::optional<NoAssign>;\n    m.def(\"double_or_zero_exp\", [](const exp_opt_int& x) -> int {\n        return x.value_or(0) * 2;\n    });\n    m.def(\"half_or_none_exp\", [](int x) -> exp_opt_int {\n        return x ? exp_opt_int(x / 2) : exp_opt_int();\n    });\n    m.def(\"test_nullopt_exp\", [](exp_opt_int x) {\n        return x.value_or(42);\n    }, py::arg_v(\"x\", std::experimental::nullopt, \"None\"));\n    m.def(\"test_no_assign_exp\", [](const exp_opt_no_assign &x) {\n        return x ? x->value : 42;\n    }, py::arg_v(\"x\", std::experimental::nullopt, \"None\"));\n#endif\n\n#ifdef PYBIND11_HAS_VARIANT\n    static_assert(std::is_same<py::detail::variant_caster_visitor::result_type, py::handle>::value,\n                  \"visitor::result_type is required by boost::variant in C++11 mode\");\n\n    struct visitor {\n        using result_type = const char *;\n\n        result_type operator()(int) { return \"int\"; }\n        result_type operator()(std::string) { return \"std::string\"; }\n        result_type operator()(double) { return \"double\"; }\n        result_type operator()(std::nullptr_t) { return \"std::nullptr_t\"; }\n    };\n\n    // test_variant\n    m.def(\"load_variant\", [](variant<int, std::string, double, std::nullptr_t> v) {\n        return py::detail::visit_helper<variant>::call(visitor(), v);\n    });\n    m.def(\"load_variant_2pass\", [](variant<double, int> v) {\n        return py::detail::visit_helper<variant>::call(visitor(), v);\n    });\n    m.def(\"cast_variant\", []() {\n        using V = variant<int, std::string>;\n        return py::make_tuple(V(5), V(\"Hello\"));\n    });\n#endif\n\n    // #528: templated constructor\n    // (no python tests: the test here is that this compiles)\n    m.def(\"tpl_ctor_vector\", [](std::vector<TplCtorClass> &) {});\n    m.def(\"tpl_ctor_map\", [](std::unordered_map<TplCtorClass, TplCtorClass> &) {});\n    m.def(\"tpl_ctor_set\", [](std::unordered_set<TplCtorClass> &) {});\n#if defined(PYBIND11_HAS_OPTIONAL)\n    m.def(\"tpl_constr_optional\", [](std::optional<TplCtorClass> &) {});\n#elif defined(PYBIND11_HAS_EXP_OPTIONAL)\n    m.def(\"tpl_constr_optional\", [](std::experimental::optional<TplCtorClass> &) {});\n#endif\n\n    // test_vec_of_reference_wrapper\n    // #171: Can't return STL structures containing reference wrapper\n    m.def(\"return_vec_of_reference_wrapper\", [](std::reference_wrapper<UserType> p4) {\n        static UserType p1{1}, p2{2}, p3{3};\n        return std::vector<std::reference_wrapper<UserType>> {\n            std::ref(p1), std::ref(p2), std::ref(p3), p4\n        };\n    });\n\n    // test_stl_pass_by_pointer\n    m.def(\"stl_pass_by_pointer\", [](std::vector<int>* v) { return *v; }, \"v\"_a=nullptr);\n\n    // #1258: pybind11/stl.h converts string to vector<string>\n    m.def(\"func_with_string_or_vector_string_arg_overload\", [](std::vector<std::string>) { return 1; });\n    m.def(\"func_with_string_or_vector_string_arg_overload\", [](std::list<std::string>) { return 2; });\n    m.def(\"func_with_string_or_vector_string_arg_overload\", [](std::string) { return 3; });\n\n    class Placeholder {\n    public:\n        Placeholder() { print_created(this); }\n        Placeholder(const Placeholder &) = delete;\n        ~Placeholder() { print_destroyed(this); }\n    };\n    py::class_<Placeholder>(m, \"Placeholder\");\n\n    /// test_stl_vector_ownership\n    m.def(\"test_stl_ownership\",\n          []() {\n              std::vector<Placeholder *> result;\n              result.push_back(new Placeholder());\n              return result;\n          },\n          py::return_value_policy::take_ownership);\n\n    m.def(\"array_cast_sequence\", [](std::array<int, 3> x) { return x; });\n\n    /// test_issue_1561\n    struct Issue1561Inner { std::string data; };\n    struct Issue1561Outer { std::vector<Issue1561Inner> list; };\n\n    py::class_<Issue1561Inner>(m, \"Issue1561Inner\")\n        .def(py::init<std::string>())\n        .def_readwrite(\"data\", &Issue1561Inner::data);\n\n    py::class_<Issue1561Outer>(m, \"Issue1561Outer\")\n        .def(py::init<>())\n        .def_readwrite(\"list\", &Issue1561Outer::list);\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_stl.py",
    "content": "import pytest\n\nfrom pybind11_tests import stl as m\nfrom pybind11_tests import UserType\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_vector(doc):\n    \"\"\"std::vector <-> list\"\"\"\n    lst = m.cast_vector()\n    assert lst == [1]\n    lst.append(2)\n    assert m.load_vector(lst)\n    assert m.load_vector(tuple(lst))\n\n    assert m.cast_bool_vector() == [True, False]\n    assert m.load_bool_vector([True, False])\n\n    assert doc(m.cast_vector) == \"cast_vector() -> List[int]\"\n    assert doc(m.load_vector) == \"load_vector(arg0: List[int]) -> bool\"\n\n    # Test regression caused by 936: pointers to stl containers weren't castable\n    assert m.cast_ptr_vector() == [\"lvalue\", \"lvalue\"]\n\n\ndef test_deque(doc):\n    \"\"\"std::deque <-> list\"\"\"\n    lst = m.cast_deque()\n    assert lst == [1]\n    lst.append(2)\n    assert m.load_deque(lst)\n    assert m.load_deque(tuple(lst))\n\n\ndef test_array(doc):\n    \"\"\"std::array <-> list\"\"\"\n    lst = m.cast_array()\n    assert lst == [1, 2]\n    assert m.load_array(lst)\n\n    assert doc(m.cast_array) == \"cast_array() -> List[int[2]]\"\n    assert doc(m.load_array) == \"load_array(arg0: List[int[2]]) -> bool\"\n\n\ndef test_valarray(doc):\n    \"\"\"std::valarray <-> list\"\"\"\n    lst = m.cast_valarray()\n    assert lst == [1, 4, 9]\n    assert m.load_valarray(lst)\n\n    assert doc(m.cast_valarray) == \"cast_valarray() -> List[int]\"\n    assert doc(m.load_valarray) == \"load_valarray(arg0: List[int]) -> bool\"\n\n\ndef test_map(doc):\n    \"\"\"std::map <-> dict\"\"\"\n    d = m.cast_map()\n    assert d == {\"key\": \"value\"}\n    d[\"key2\"] = \"value2\"\n    assert m.load_map(d)\n\n    assert doc(m.cast_map) == \"cast_map() -> Dict[str, str]\"\n    assert doc(m.load_map) == \"load_map(arg0: Dict[str, str]) -> bool\"\n\n\ndef test_set(doc):\n    \"\"\"std::set <-> set\"\"\"\n    s = m.cast_set()\n    assert s == {\"key1\", \"key2\"}\n    s.add(\"key3\")\n    assert m.load_set(s)\n\n    assert doc(m.cast_set) == \"cast_set() -> Set[str]\"\n    assert doc(m.load_set) == \"load_set(arg0: Set[str]) -> bool\"\n\n\ndef test_recursive_casting():\n    \"\"\"Tests that stl casters preserve lvalue/rvalue context for container values\"\"\"\n    assert m.cast_rv_vector() == [\"rvalue\", \"rvalue\"]\n    assert m.cast_lv_vector() == [\"lvalue\", \"lvalue\"]\n    assert m.cast_rv_array() == [\"rvalue\", \"rvalue\", \"rvalue\"]\n    assert m.cast_lv_array() == [\"lvalue\", \"lvalue\"]\n    assert m.cast_rv_map() == {\"a\": \"rvalue\"}\n    assert m.cast_lv_map() == {\"a\": \"lvalue\", \"b\": \"lvalue\"}\n    assert m.cast_rv_nested() == [[[{\"b\": \"rvalue\", \"c\": \"rvalue\"}], [{\"a\": \"rvalue\"}]]]\n    assert m.cast_lv_nested() == {\n        \"a\": [[[\"lvalue\", \"lvalue\"]], [[\"lvalue\", \"lvalue\"]]],\n        \"b\": [[[\"lvalue\", \"lvalue\"], [\"lvalue\", \"lvalue\"]]]\n    }\n\n    # Issue #853 test case:\n    z = m.cast_unique_ptr_vector()\n    assert z[0].value == 7 and z[1].value == 42\n\n\ndef test_move_out_container():\n    \"\"\"Properties use the `reference_internal` policy by default. If the underlying function\n    returns an rvalue, the policy is automatically changed to `move` to avoid referencing\n    a temporary. In case the return value is a container of user-defined types, the policy\n    also needs to be applied to the elements, not just the container.\"\"\"\n    c = m.MoveOutContainer()\n    moved_out_list = c.move_list\n    assert [x.value for x in moved_out_list] == [0, 1, 2]\n\n\n@pytest.mark.skipif(not hasattr(m, \"has_optional\"), reason='no <optional>')\ndef test_optional():\n    assert m.double_or_zero(None) == 0\n    assert m.double_or_zero(42) == 84\n    pytest.raises(TypeError, m.double_or_zero, 'foo')\n\n    assert m.half_or_none(0) is None\n    assert m.half_or_none(42) == 21\n    pytest.raises(TypeError, m.half_or_none, 'foo')\n\n    assert m.test_nullopt() == 42\n    assert m.test_nullopt(None) == 42\n    assert m.test_nullopt(42) == 42\n    assert m.test_nullopt(43) == 43\n\n    assert m.test_no_assign() == 42\n    assert m.test_no_assign(None) == 42\n    assert m.test_no_assign(m.NoAssign(43)) == 43\n    pytest.raises(TypeError, m.test_no_assign, 43)\n\n    assert m.nodefer_none_optional(None)\n\n\n@pytest.mark.skipif(not hasattr(m, \"has_exp_optional\"), reason='no <experimental/optional>')\ndef test_exp_optional():\n    assert m.double_or_zero_exp(None) == 0\n    assert m.double_or_zero_exp(42) == 84\n    pytest.raises(TypeError, m.double_or_zero_exp, 'foo')\n\n    assert m.half_or_none_exp(0) is None\n    assert m.half_or_none_exp(42) == 21\n    pytest.raises(TypeError, m.half_or_none_exp, 'foo')\n\n    assert m.test_nullopt_exp() == 42\n    assert m.test_nullopt_exp(None) == 42\n    assert m.test_nullopt_exp(42) == 42\n    assert m.test_nullopt_exp(43) == 43\n\n    assert m.test_no_assign_exp() == 42\n    assert m.test_no_assign_exp(None) == 42\n    assert m.test_no_assign_exp(m.NoAssign(43)) == 43\n    pytest.raises(TypeError, m.test_no_assign_exp, 43)\n\n\n@pytest.mark.skipif(not hasattr(m, \"load_variant\"), reason='no <variant>')\ndef test_variant(doc):\n    assert m.load_variant(1) == \"int\"\n    assert m.load_variant(\"1\") == \"std::string\"\n    assert m.load_variant(1.0) == \"double\"\n    assert m.load_variant(None) == \"std::nullptr_t\"\n\n    assert m.load_variant_2pass(1) == \"int\"\n    assert m.load_variant_2pass(1.0) == \"double\"\n\n    assert m.cast_variant() == (5, \"Hello\")\n\n    assert doc(m.load_variant) == \"load_variant(arg0: Union[int, str, float, None]) -> str\"\n\n\ndef test_vec_of_reference_wrapper():\n    \"\"\"#171: Can't return reference wrappers (or STL structures containing them)\"\"\"\n    assert str(m.return_vec_of_reference_wrapper(UserType(4))) == \\\n        \"[UserType(1), UserType(2), UserType(3), UserType(4)]\"\n\n\ndef test_stl_pass_by_pointer(msg):\n    \"\"\"Passing nullptr or None to an STL container pointer is not expected to work\"\"\"\n    with pytest.raises(TypeError) as excinfo:\n        m.stl_pass_by_pointer()  # default value is `nullptr`\n    assert msg(excinfo.value) == \"\"\"\n        stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:\n            1. (v: List[int] = None) -> List[int]\n\n        Invoked with:\n    \"\"\"  # noqa: E501 line too long\n\n    with pytest.raises(TypeError) as excinfo:\n        m.stl_pass_by_pointer(None)\n    assert msg(excinfo.value) == \"\"\"\n        stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:\n            1. (v: List[int] = None) -> List[int]\n\n        Invoked with: None\n    \"\"\"  # noqa: E501 line too long\n\n    assert m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]\n\n\ndef test_missing_header_message():\n    \"\"\"Trying convert `list` to a `std::vector`, or vice versa, without including\n    <pybind11/stl.h> should result in a helpful suggestion in the error message\"\"\"\n    import pybind11_cross_module_tests as cm\n\n    expected_message = (\"Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\\n\"\n                        \"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\\n\"\n                        \"conversions are optional and require extra headers to be included\\n\"\n                        \"when compiling your pybind11 module.\")\n\n    with pytest.raises(TypeError) as excinfo:\n        cm.missing_header_arg([1.0, 2.0, 3.0])\n    assert expected_message in str(excinfo.value)\n\n    with pytest.raises(TypeError) as excinfo:\n        cm.missing_header_return()\n    assert expected_message in str(excinfo.value)\n\n\ndef test_function_with_string_and_vector_string_arg():\n    \"\"\"Check if a string is NOT implicitly converted to a list, which was the\n    behavior before fix of issue #1258\"\"\"\n    assert m.func_with_string_or_vector_string_arg_overload(('A', 'B', )) == 2\n    assert m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2\n    assert m.func_with_string_or_vector_string_arg_overload('A') == 3\n\n\ndef test_stl_ownership():\n    cstats = ConstructorStats.get(m.Placeholder)\n    assert cstats.alive() == 0\n    r = m.test_stl_ownership()\n    assert len(r) == 1\n    del r\n    assert cstats.alive() == 0\n\n\ndef test_array_cast_sequence():\n    assert m.array_cast_sequence((1, 2, 3)) == [1, 2, 3]\n\n\ndef test_issue_1561():\n    \"\"\" check fix for issue #1561 \"\"\"\n    bar = m.Issue1561Outer()\n    bar.list = [m.Issue1561Inner('bar')]\n    bar.list\n    assert bar.list[0].data == 'bar'\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_stl_binders.cpp",
    "content": "/*\n    tests/test_stl_binders.cpp -- Usage of stl_binders functions\n\n    Copyright (c) 2016 Sergey Lyskov\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n\n#include <pybind11/stl_bind.h>\n#include <pybind11/numpy.h>\n#include <map>\n#include <deque>\n#include <unordered_map>\n\nclass El {\npublic:\n    El() = delete;\n    El(int v) : a(v) { }\n\n    int a;\n};\n\nstd::ostream & operator<<(std::ostream &s, El const&v) {\n    s << \"El{\" << v.a << '}';\n    return s;\n}\n\n/// Issue #487: binding std::vector<E> with E non-copyable\nclass E_nc {\npublic:\n    explicit E_nc(int i) : value{i} {}\n    E_nc(const E_nc &) = delete;\n    E_nc &operator=(const E_nc &) = delete;\n    E_nc(E_nc &&) = default;\n    E_nc &operator=(E_nc &&) = default;\n\n    int value;\n};\n\ntemplate <class Container> Container *one_to_n(int n) {\n    auto v = new Container();\n    for (int i = 1; i <= n; i++)\n        v->emplace_back(i);\n    return v;\n}\n\ntemplate <class Map> Map *times_ten(int n) {\n    auto m = new Map();\n    for (int i = 1; i <= n; i++)\n        m->emplace(int(i), E_nc(10*i));\n    return m;\n}\n\nTEST_SUBMODULE(stl_binders, m) {\n    // test_vector_int\n    py::bind_vector<std::vector<unsigned int>>(m, \"VectorInt\", py::buffer_protocol());\n\n    // test_vector_custom\n    py::class_<El>(m, \"El\")\n        .def(py::init<int>());\n    py::bind_vector<std::vector<El>>(m, \"VectorEl\");\n    py::bind_vector<std::vector<std::vector<El>>>(m, \"VectorVectorEl\");\n\n    // test_map_string_double\n    py::bind_map<std::map<std::string, double>>(m, \"MapStringDouble\");\n    py::bind_map<std::unordered_map<std::string, double>>(m, \"UnorderedMapStringDouble\");\n\n    // test_map_string_double_const\n    py::bind_map<std::map<std::string, double const>>(m, \"MapStringDoubleConst\");\n    py::bind_map<std::unordered_map<std::string, double const>>(m, \"UnorderedMapStringDoubleConst\");\n\n    py::class_<E_nc>(m, \"ENC\")\n        .def(py::init<int>())\n        .def_readwrite(\"value\", &E_nc::value);\n\n    // test_noncopyable_containers\n    py::bind_vector<std::vector<E_nc>>(m, \"VectorENC\");\n    m.def(\"get_vnc\", &one_to_n<std::vector<E_nc>>, py::return_value_policy::reference);\n    py::bind_vector<std::deque<E_nc>>(m, \"DequeENC\");\n    m.def(\"get_dnc\", &one_to_n<std::deque<E_nc>>, py::return_value_policy::reference);\n    py::bind_map<std::map<int, E_nc>>(m, \"MapENC\");\n    m.def(\"get_mnc\", &times_ten<std::map<int, E_nc>>, py::return_value_policy::reference);\n    py::bind_map<std::unordered_map<int, E_nc>>(m, \"UmapENC\");\n    m.def(\"get_umnc\", &times_ten<std::unordered_map<int, E_nc>>, py::return_value_policy::reference);\n\n    // test_vector_buffer\n    py::bind_vector<std::vector<unsigned char>>(m, \"VectorUChar\", py::buffer_protocol());\n    // no dtype declared for this version:\n    struct VUndeclStruct { bool w; uint32_t x; double y; bool z; };\n    m.def(\"create_undeclstruct\", [m] () mutable {\n        py::bind_vector<std::vector<VUndeclStruct>>(m, \"VectorUndeclStruct\", py::buffer_protocol());\n    });\n\n    // The rest depends on numpy:\n    try { py::module::import(\"numpy\"); }\n    catch (...) { return; }\n\n    // test_vector_buffer_numpy\n    struct VStruct { bool w; uint32_t x; double y; bool z; };\n    PYBIND11_NUMPY_DTYPE(VStruct, w, x, y, z);\n    py::class_<VStruct>(m, \"VStruct\").def_readwrite(\"x\", &VStruct::x);\n    py::bind_vector<std::vector<VStruct>>(m, \"VectorStruct\", py::buffer_protocol());\n    m.def(\"get_vectorstruct\", [] {return std::vector<VStruct> {{0, 5, 3.0, 1}, {1, 30, -1e4, 0}};});\n}\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_stl_binders.py",
    "content": "import pytest\nimport sys\nfrom pybind11_tests import stl_binders as m\n\nwith pytest.suppress(ImportError):\n    import numpy as np\n\n\ndef test_vector_int():\n    v_int = m.VectorInt([0, 0])\n    assert len(v_int) == 2\n    assert bool(v_int) is True\n\n    v_int2 = m.VectorInt([0, 0])\n    assert v_int == v_int2\n    v_int2[1] = 1\n    assert v_int != v_int2\n\n    v_int2.append(2)\n    v_int2.insert(0, 1)\n    v_int2.insert(0, 2)\n    v_int2.insert(0, 3)\n    v_int2.insert(6, 3)\n    assert str(v_int2) == \"VectorInt[3, 2, 1, 0, 1, 2, 3]\"\n    with pytest.raises(IndexError):\n        v_int2.insert(8, 4)\n\n    v_int.append(99)\n    v_int2[2:-2] = v_int\n    assert v_int2 == m.VectorInt([3, 2, 0, 0, 99, 2, 3])\n    del v_int2[1:3]\n    assert v_int2 == m.VectorInt([3, 0, 99, 2, 3])\n    del v_int2[0]\n    assert v_int2 == m.VectorInt([0, 99, 2, 3])\n\n\n# related to the PyPy's buffer protocol.\n@pytest.unsupported_on_pypy\ndef test_vector_buffer():\n    b = bytearray([1, 2, 3, 4])\n    v = m.VectorUChar(b)\n    assert v[1] == 2\n    v[2] = 5\n    mv = memoryview(v)  # We expose the buffer interface\n    if sys.version_info.major > 2:\n        assert mv[2] == 5\n        mv[2] = 6\n    else:\n        assert mv[2] == '\\x05'\n        mv[2] = '\\x06'\n    assert v[2] == 6\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.create_undeclstruct()  # Undeclared struct contents, no buffer interface\n    assert \"NumPy type info missing for \" in str(excinfo.value)\n\n\n@pytest.unsupported_on_pypy\n@pytest.requires_numpy\ndef test_vector_buffer_numpy():\n    a = np.array([1, 2, 3, 4], dtype=np.int32)\n    with pytest.raises(TypeError):\n        m.VectorInt(a)\n\n    a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.uintc)\n    v = m.VectorInt(a[0, :])\n    assert len(v) == 4\n    assert v[2] == 3\n    ma = np.asarray(v)\n    ma[2] = 5\n    assert v[2] == 5\n\n    v = m.VectorInt(a[:, 1])\n    assert len(v) == 3\n    assert v[2] == 10\n\n    v = m.get_vectorstruct()\n    assert v[0].x == 5\n    ma = np.asarray(v)\n    ma[1]['x'] = 99\n    assert v[1].x == 99\n\n    v = m.VectorStruct(np.zeros(3, dtype=np.dtype([('w', 'bool'), ('x', 'I'),\n                                                   ('y', 'float64'), ('z', 'bool')], align=True)))\n    assert len(v) == 3\n\n\ndef test_vector_bool():\n    import pybind11_cross_module_tests as cm\n\n    vv_c = cm.VectorBool()\n    for i in range(10):\n        vv_c.append(i % 2 == 0)\n    for i in range(10):\n        assert vv_c[i] == (i % 2 == 0)\n    assert str(vv_c) == \"VectorBool[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]\"\n\n\ndef test_vector_custom():\n    v_a = m.VectorEl()\n    v_a.append(m.El(1))\n    v_a.append(m.El(2))\n    assert str(v_a) == \"VectorEl[El{1}, El{2}]\"\n\n    vv_a = m.VectorVectorEl()\n    vv_a.append(v_a)\n    vv_b = vv_a[0]\n    assert str(vv_b) == \"VectorEl[El{1}, El{2}]\"\n\n\ndef test_map_string_double():\n    mm = m.MapStringDouble()\n    mm['a'] = 1\n    mm['b'] = 2.5\n\n    assert list(mm) == ['a', 'b']\n    assert list(mm.items()) == [('a', 1), ('b', 2.5)]\n    assert str(mm) == \"MapStringDouble{a: 1, b: 2.5}\"\n\n    um = m.UnorderedMapStringDouble()\n    um['ua'] = 1.1\n    um['ub'] = 2.6\n\n    assert sorted(list(um)) == ['ua', 'ub']\n    assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]\n    assert \"UnorderedMapStringDouble\" in str(um)\n\n\ndef test_map_string_double_const():\n    mc = m.MapStringDoubleConst()\n    mc['a'] = 10\n    mc['b'] = 20.5\n    assert str(mc) == \"MapStringDoubleConst{a: 10, b: 20.5}\"\n\n    umc = m.UnorderedMapStringDoubleConst()\n    umc['a'] = 11\n    umc['b'] = 21.5\n\n    str(umc)\n\n\ndef test_noncopyable_containers():\n    # std::vector\n    vnc = m.get_vnc(5)\n    for i in range(0, 5):\n        assert vnc[i].value == i + 1\n\n    for i, j in enumerate(vnc, start=1):\n        assert j.value == i\n\n    # std::deque\n    dnc = m.get_dnc(5)\n    for i in range(0, 5):\n        assert dnc[i].value == i + 1\n\n    i = 1\n    for j in dnc:\n        assert(j.value == i)\n        i += 1\n\n    # std::map\n    mnc = m.get_mnc(5)\n    for i in range(1, 6):\n        assert mnc[i].value == 10 * i\n\n    vsum = 0\n    for k, v in mnc.items():\n        assert v.value == 10 * k\n        vsum += v.value\n\n    assert vsum == 150\n\n    # std::unordered_map\n    mnc = m.get_umnc(5)\n    for i in range(1, 6):\n        assert mnc[i].value == 10 * i\n\n    vsum = 0\n    for k, v in mnc.items():\n        assert v.value == 10 * k\n        vsum += v.value\n\n    assert vsum == 150\n\n\ndef test_map_delitem():\n    mm = m.MapStringDouble()\n    mm['a'] = 1\n    mm['b'] = 2.5\n\n    assert list(mm) == ['a', 'b']\n    assert list(mm.items()) == [('a', 1), ('b', 2.5)]\n    del mm['a']\n    assert list(mm) == ['b']\n    assert list(mm.items()) == [('b', 2.5)]\n\n    um = m.UnorderedMapStringDouble()\n    um['ua'] = 1.1\n    um['ub'] = 2.6\n\n    assert sorted(list(um)) == ['ua', 'ub']\n    assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]\n    del um['ua']\n    assert sorted(list(um)) == ['ub']\n    assert sorted(list(um.items())) == [('ub', 2.6)]\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_tagbased_polymorphic.cpp",
    "content": "/*\n    tests/test_tagbased_polymorphic.cpp -- test of polymorphic_type_hook\n\n    Copyright (c) 2018 Hudson River Trading LLC <opensource@hudson-trading.com>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include <pybind11/stl.h>\n\nstruct Animal\n{\n    enum class Kind {\n        Unknown = 0,\n        Dog = 100, Labrador, Chihuahua, LastDog = 199,\n        Cat = 200, Panther, LastCat = 299\n    };\n    static const std::type_info* type_of_kind(Kind kind);\n    static std::string name_of_kind(Kind kind);\n\n    const Kind kind;\n    const std::string name;\n\n  protected:\n    Animal(const std::string& _name, Kind _kind)\n        : kind(_kind), name(_name)\n    {}\n};\n\nstruct Dog : Animal\n{\n    Dog(const std::string& _name, Kind _kind = Kind::Dog) : Animal(_name, _kind) {}\n    std::string bark() const { return name_of_kind(kind) + \" \" + name + \" goes \" + sound; }\n    std::string sound = \"WOOF!\";\n};\n\nstruct Labrador : Dog\n{\n    Labrador(const std::string& _name, int _excitement = 9001)\n        : Dog(_name, Kind::Labrador), excitement(_excitement) {}\n    int excitement;\n};\n\nstruct Chihuahua : Dog\n{\n    Chihuahua(const std::string& _name) : Dog(_name, Kind::Chihuahua) { sound = \"iyiyiyiyiyi\"; }\n    std::string bark() const { return Dog::bark() + \" and runs in circles\"; }\n};\n\nstruct Cat : Animal\n{\n    Cat(const std::string& _name, Kind _kind = Kind::Cat) : Animal(_name, _kind) {}\n    std::string purr() const { return \"mrowr\"; }\n};\n\nstruct Panther : Cat\n{\n    Panther(const std::string& _name) : Cat(_name, Kind::Panther) {}\n    std::string purr() const { return \"mrrrRRRRRR\"; }\n};\n\nstd::vector<std::unique_ptr<Animal>> create_zoo()\n{\n    std::vector<std::unique_ptr<Animal>> ret;\n    ret.emplace_back(new Labrador(\"Fido\", 15000));\n\n    // simulate some new type of Dog that the Python bindings\n    // haven't been updated for; it should still be considered\n    // a Dog, not just an Animal.\n    ret.emplace_back(new Dog(\"Ginger\", Dog::Kind(150)));\n\n    ret.emplace_back(new Chihuahua(\"Hertzl\"));\n    ret.emplace_back(new Cat(\"Tiger\", Cat::Kind::Cat));\n    ret.emplace_back(new Panther(\"Leo\"));\n    return ret;\n}\n\nconst std::type_info* Animal::type_of_kind(Kind kind)\n{\n    switch (kind) {\n        case Kind::Unknown: break;\n\n        case Kind::Dog: break;\n        case Kind::Labrador: return &typeid(Labrador);\n        case Kind::Chihuahua: return &typeid(Chihuahua);\n        case Kind::LastDog: break;\n\n        case Kind::Cat: break;\n        case Kind::Panther: return &typeid(Panther);\n        case Kind::LastCat: break;\n    }\n\n    if (kind >= Kind::Dog && kind <= Kind::LastDog) return &typeid(Dog);\n    if (kind >= Kind::Cat && kind <= Kind::LastCat) return &typeid(Cat);\n    return nullptr;\n}\n\nstd::string Animal::name_of_kind(Kind kind)\n{\n    std::string raw_name = type_of_kind(kind)->name();\n    py::detail::clean_type_id(raw_name);\n    return raw_name;\n}\n\nnamespace pybind11 {\n    template <typename itype>\n    struct polymorphic_type_hook<itype, detail::enable_if_t<std::is_base_of<Animal, itype>::value>>\n    {\n        static const void *get(const itype *src, const std::type_info*& type)\n        { type = src ? Animal::type_of_kind(src->kind) : nullptr; return src; }\n    };\n}\n\nTEST_SUBMODULE(tagbased_polymorphic, m) {\n    py::class_<Animal>(m, \"Animal\")\n        .def_readonly(\"name\", &Animal::name);\n    py::class_<Dog, Animal>(m, \"Dog\")\n        .def(py::init<std::string>())\n        .def_readwrite(\"sound\", &Dog::sound)\n        .def(\"bark\", &Dog::bark);\n    py::class_<Labrador, Dog>(m, \"Labrador\")\n        .def(py::init<std::string, int>(), \"name\"_a, \"excitement\"_a = 9001)\n        .def_readwrite(\"excitement\", &Labrador::excitement);\n    py::class_<Chihuahua, Dog>(m, \"Chihuahua\")\n        .def(py::init<std::string>())\n        .def(\"bark\", &Chihuahua::bark);\n    py::class_<Cat, Animal>(m, \"Cat\")\n        .def(py::init<std::string>())\n        .def(\"purr\", &Cat::purr);\n    py::class_<Panther, Cat>(m, \"Panther\")\n        .def(py::init<std::string>())\n        .def(\"purr\", &Panther::purr);\n    m.def(\"create_zoo\", &create_zoo);\n};\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_tagbased_polymorphic.py",
    "content": "from pybind11_tests import tagbased_polymorphic as m\n\n\ndef test_downcast():\n    zoo = m.create_zoo()\n    assert [type(animal) for animal in zoo] == [\n        m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther\n    ]\n    assert [animal.name for animal in zoo] == [\n        \"Fido\", \"Ginger\", \"Hertzl\", \"Tiger\", \"Leo\"\n    ]\n    zoo[1].sound = \"woooooo\"\n    assert [dog.bark() for dog in zoo[:3]] == [\n        \"Labrador Fido goes WOOF!\",\n        \"Dog Ginger goes woooooo\",\n        \"Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles\"\n    ]\n    assert [cat.purr() for cat in zoo[3:]] == [\"mrowr\", \"mrrrRRRRRR\"]\n    zoo[0].excitement -= 1000\n    assert zoo[0].excitement == 14000\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_virtual_functions.cpp",
    "content": "/*\n    tests/test_virtual_functions.cpp -- overriding virtual functions from Python\n\n    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\n\n    All rights reserved. Use of this source code is governed by a\n    BSD-style license that can be found in the LICENSE file.\n*/\n\n#include \"pybind11_tests.h\"\n#include \"constructor_stats.h\"\n#include <pybind11/functional.h>\n#include <thread>\n\n/* This is an example class that we'll want to be able to extend from Python */\nclass ExampleVirt  {\npublic:\n    ExampleVirt(int state) : state(state) { print_created(this, state); }\n    ExampleVirt(const ExampleVirt &e) : state(e.state) { print_copy_created(this); }\n    ExampleVirt(ExampleVirt &&e) : state(e.state) { print_move_created(this); e.state = 0; }\n    virtual ~ExampleVirt() { print_destroyed(this); }\n\n    virtual int run(int value) {\n        py::print(\"Original implementation of \"\n                  \"ExampleVirt::run(state={}, value={}, str1={}, str2={})\"_s.format(state, value, get_string1(), *get_string2()));\n        return state + value;\n    }\n\n    virtual bool run_bool() = 0;\n    virtual void pure_virtual() = 0;\n\n    // Returning a reference/pointer to a type converted from python (numbers, strings, etc.) is a\n    // bit trickier, because the actual int& or std::string& or whatever only exists temporarily, so\n    // we have to handle it specially in the trampoline class (see below).\n    virtual const std::string &get_string1() { return str1; }\n    virtual const std::string *get_string2() { return &str2; }\n\nprivate:\n    int state;\n    const std::string str1{\"default1\"}, str2{\"default2\"};\n};\n\n/* This is a wrapper class that must be generated */\nclass PyExampleVirt : public ExampleVirt {\npublic:\n    using ExampleVirt::ExampleVirt; /* Inherit constructors */\n\n    int run(int value) override {\n        /* Generate wrapping code that enables native function overloading */\n        PYBIND11_OVERLOAD(\n            int,         /* Return type */\n            ExampleVirt, /* Parent class */\n            run,         /* Name of function */\n            value        /* Argument(s) */\n        );\n    }\n\n    bool run_bool() override {\n        PYBIND11_OVERLOAD_PURE(\n            bool,         /* Return type */\n            ExampleVirt,  /* Parent class */\n            run_bool,     /* Name of function */\n                          /* This function has no arguments. The trailing comma\n                             in the previous line is needed for some compilers */\n        );\n    }\n\n    void pure_virtual() override {\n        PYBIND11_OVERLOAD_PURE(\n            void,         /* Return type */\n            ExampleVirt,  /* Parent class */\n            pure_virtual, /* Name of function */\n                          /* This function has no arguments. The trailing comma\n                             in the previous line is needed for some compilers */\n        );\n    }\n\n    // We can return reference types for compatibility with C++ virtual interfaces that do so, but\n    // note they have some significant limitations (see the documentation).\n    const std::string &get_string1() override {\n        PYBIND11_OVERLOAD(\n            const std::string &, /* Return type */\n            ExampleVirt,         /* Parent class */\n            get_string1,         /* Name of function */\n                                 /* (no arguments) */\n        );\n    }\n\n    const std::string *get_string2() override {\n        PYBIND11_OVERLOAD(\n            const std::string *, /* Return type */\n            ExampleVirt,         /* Parent class */\n            get_string2,         /* Name of function */\n                                 /* (no arguments) */\n        );\n    }\n\n};\n\nclass NonCopyable {\npublic:\n    NonCopyable(int a, int b) : value{new int(a*b)} { print_created(this, a, b); }\n    NonCopyable(NonCopyable &&o) { value = std::move(o.value); print_move_created(this); }\n    NonCopyable(const NonCopyable &) = delete;\n    NonCopyable() = delete;\n    void operator=(const NonCopyable &) = delete;\n    void operator=(NonCopyable &&) = delete;\n    std::string get_value() const {\n        if (value) return std::to_string(*value); else return \"(null)\";\n    }\n    ~NonCopyable() { print_destroyed(this); }\n\nprivate:\n    std::unique_ptr<int> value;\n};\n\n// This is like the above, but is both copy and movable.  In effect this means it should get moved\n// when it is not referenced elsewhere, but copied if it is still referenced.\nclass Movable {\npublic:\n    Movable(int a, int b) : value{a+b} { print_created(this, a, b); }\n    Movable(const Movable &m) { value = m.value; print_copy_created(this); }\n    Movable(Movable &&m) { value = std::move(m.value); print_move_created(this); }\n    std::string get_value() const { return std::to_string(value); }\n    ~Movable() { print_destroyed(this); }\nprivate:\n    int value;\n};\n\nclass NCVirt {\npublic:\n    virtual ~NCVirt() { }\n    virtual NonCopyable get_noncopyable(int a, int b) { return NonCopyable(a, b); }\n    virtual Movable get_movable(int a, int b) = 0;\n\n    std::string print_nc(int a, int b) { return get_noncopyable(a, b).get_value(); }\n    std::string print_movable(int a, int b) { return get_movable(a, b).get_value(); }\n};\nclass NCVirtTrampoline : public NCVirt {\n#if !defined(__INTEL_COMPILER)\n    NonCopyable get_noncopyable(int a, int b) override {\n        PYBIND11_OVERLOAD(NonCopyable, NCVirt, get_noncopyable, a, b);\n    }\n#endif\n    Movable get_movable(int a, int b) override {\n        PYBIND11_OVERLOAD_PURE(Movable, NCVirt, get_movable, a, b);\n    }\n};\n\nstruct Base {\n    /* for some reason MSVC2015 can't compile this if the function is pure virtual */\n    virtual std::string dispatch() const { return {}; };\n    virtual ~Base() = default;\n};\n\nstruct DispatchIssue : Base {\n    virtual std::string dispatch() const {\n        PYBIND11_OVERLOAD_PURE(std::string, Base, dispatch, /* no arguments */);\n    }\n};\n\nstatic void test_gil() {\n    {\n        py::gil_scoped_acquire lock;\n        py::print(\"1st lock acquired\");\n\n    }\n\n    {\n        py::gil_scoped_acquire lock;\n        py::print(\"2nd lock acquired\");\n    }\n\n}\n\nstatic void test_gil_from_thread() {\n    py::gil_scoped_release release;\n\n    std::thread t(test_gil);\n    t.join();\n}\n\n\n// Forward declaration (so that we can put the main tests here; the inherited virtual approaches are\n// rather long).\nvoid initialize_inherited_virtuals(py::module &m);\n\nTEST_SUBMODULE(virtual_functions, m) {\n    // test_override\n    py::class_<ExampleVirt, PyExampleVirt>(m, \"ExampleVirt\")\n        .def(py::init<int>())\n        /* Reference original class in function definitions */\n        .def(\"run\", &ExampleVirt::run)\n        .def(\"run_bool\", &ExampleVirt::run_bool)\n        .def(\"pure_virtual\", &ExampleVirt::pure_virtual);\n\n    py::class_<NonCopyable>(m, \"NonCopyable\")\n        .def(py::init<int, int>());\n\n    py::class_<Movable>(m, \"Movable\")\n        .def(py::init<int, int>());\n\n    // test_move_support\n#if !defined(__INTEL_COMPILER)\n    py::class_<NCVirt, NCVirtTrampoline>(m, \"NCVirt\")\n        .def(py::init<>())\n        .def(\"get_noncopyable\", &NCVirt::get_noncopyable)\n        .def(\"get_movable\", &NCVirt::get_movable)\n        .def(\"print_nc\", &NCVirt::print_nc)\n        .def(\"print_movable\", &NCVirt::print_movable);\n#endif\n\n    m.def(\"runExampleVirt\", [](ExampleVirt *ex, int value) { return ex->run(value); });\n    m.def(\"runExampleVirtBool\", [](ExampleVirt* ex) { return ex->run_bool(); });\n    m.def(\"runExampleVirtVirtual\", [](ExampleVirt *ex) { ex->pure_virtual(); });\n\n    m.def(\"cstats_debug\", &ConstructorStats::get<ExampleVirt>);\n    initialize_inherited_virtuals(m);\n\n    // test_alias_delay_initialization1\n    // don't invoke Python dispatch classes by default when instantiating C++ classes\n    // that were not extended on the Python side\n    struct A {\n        virtual ~A() {}\n        virtual void f() { py::print(\"A.f()\"); }\n    };\n\n    struct PyA : A {\n        PyA() { py::print(\"PyA.PyA()\"); }\n        ~PyA() { py::print(\"PyA.~PyA()\"); }\n\n        void f() override {\n            py::print(\"PyA.f()\");\n            // This convolution just gives a `void`, but tests that PYBIND11_TYPE() works to protect\n            // a type containing a ,\n            PYBIND11_OVERLOAD(PYBIND11_TYPE(typename std::enable_if<true, void>::type), A, f);\n        }\n    };\n\n    py::class_<A, PyA>(m, \"A\")\n        .def(py::init<>())\n        .def(\"f\", &A::f);\n\n    m.def(\"call_f\", [](A *a) { a->f(); });\n\n    // test_alias_delay_initialization2\n    // ... unless we explicitly request it, as in this example:\n    struct A2 {\n        virtual ~A2() {}\n        virtual void f() { py::print(\"A2.f()\"); }\n    };\n\n    struct PyA2 : A2 {\n        PyA2() { py::print(\"PyA2.PyA2()\"); }\n        ~PyA2() { py::print(\"PyA2.~PyA2()\"); }\n        void f() override {\n            py::print(\"PyA2.f()\");\n            PYBIND11_OVERLOAD(void, A2, f);\n        }\n    };\n\n    py::class_<A2, PyA2>(m, \"A2\")\n        .def(py::init_alias<>())\n        .def(py::init([](int) { return new PyA2(); }))\n        .def(\"f\", &A2::f);\n\n    m.def(\"call_f\", [](A2 *a2) { a2->f(); });\n\n    // test_dispatch_issue\n    // #159: virtual function dispatch has problems with similar-named functions\n    py::class_<Base, DispatchIssue>(m, \"DispatchIssue\")\n        .def(py::init<>())\n        .def(\"dispatch\", &Base::dispatch);\n\n    m.def(\"dispatch_issue_go\", [](const Base * b) { return b->dispatch(); });\n\n    // test_override_ref\n    // #392/397: overriding reference-returning functions\n    class OverrideTest {\n    public:\n        struct A { std::string value = \"hi\"; };\n        std::string v;\n        A a;\n        explicit OverrideTest(const std::string &v) : v{v} {}\n        virtual std::string str_value() { return v; }\n        virtual std::string &str_ref() { return v; }\n        virtual A A_value() { return a; }\n        virtual A &A_ref() { return a; }\n        virtual ~OverrideTest() = default;\n    };\n\n    class PyOverrideTest : public OverrideTest {\n    public:\n        using OverrideTest::OverrideTest;\n        std::string str_value() override { PYBIND11_OVERLOAD(std::string, OverrideTest, str_value); }\n        // Not allowed (uncommenting should hit a static_assert failure): we can't get a reference\n        // to a python numeric value, since we only copy values in the numeric type caster:\n//      std::string &str_ref() override { PYBIND11_OVERLOAD(std::string &, OverrideTest, str_ref); }\n        // But we can work around it like this:\n    private:\n        std::string _tmp;\n        std::string str_ref_helper() { PYBIND11_OVERLOAD(std::string, OverrideTest, str_ref); }\n    public:\n        std::string &str_ref() override { return _tmp = str_ref_helper(); }\n\n        A A_value() override { PYBIND11_OVERLOAD(A, OverrideTest, A_value); }\n        A &A_ref() override { PYBIND11_OVERLOAD(A &, OverrideTest, A_ref); }\n    };\n\n    py::class_<OverrideTest::A>(m, \"OverrideTest_A\")\n        .def_readwrite(\"value\", &OverrideTest::A::value);\n    py::class_<OverrideTest, PyOverrideTest>(m, \"OverrideTest\")\n        .def(py::init<const std::string &>())\n        .def(\"str_value\", &OverrideTest::str_value)\n//      .def(\"str_ref\", &OverrideTest::str_ref)\n        .def(\"A_value\", &OverrideTest::A_value)\n        .def(\"A_ref\", &OverrideTest::A_ref);\n}\n\n\n// Inheriting virtual methods.  We do two versions here: the repeat-everything version and the\n// templated trampoline versions mentioned in docs/advanced.rst.\n//\n// These base classes are exactly the same, but we technically need distinct\n// classes for this example code because we need to be able to bind them\n// properly (pybind11, sensibly, doesn't allow us to bind the same C++ class to\n// multiple python classes).\nclass A_Repeat {\n#define A_METHODS \\\npublic: \\\n    virtual int unlucky_number() = 0; \\\n    virtual std::string say_something(unsigned times) { \\\n        std::string s = \"\"; \\\n        for (unsigned i = 0; i < times; ++i) \\\n            s += \"hi\"; \\\n        return s; \\\n    } \\\n    std::string say_everything() { \\\n        return say_something(1) + \" \" + std::to_string(unlucky_number()); \\\n    }\nA_METHODS\n    virtual ~A_Repeat() = default;\n};\nclass B_Repeat : public A_Repeat {\n#define B_METHODS \\\npublic: \\\n    int unlucky_number() override { return 13; } \\\n    std::string say_something(unsigned times) override { \\\n        return \"B says hi \" + std::to_string(times) + \" times\"; \\\n    } \\\n    virtual double lucky_number() { return 7.0; }\nB_METHODS\n};\nclass C_Repeat : public B_Repeat {\n#define C_METHODS \\\npublic: \\\n    int unlucky_number() override { return 4444; } \\\n    double lucky_number() override { return 888; }\nC_METHODS\n};\nclass D_Repeat : public C_Repeat {\n#define D_METHODS // Nothing overridden.\nD_METHODS\n};\n\n// Base classes for templated inheritance trampolines.  Identical to the repeat-everything version:\nclass A_Tpl { A_METHODS; virtual ~A_Tpl() = default; };\nclass B_Tpl : public A_Tpl { B_METHODS };\nclass C_Tpl : public B_Tpl { C_METHODS };\nclass D_Tpl : public C_Tpl { D_METHODS };\n\n\n// Inheritance approach 1: each trampoline gets every virtual method (11 in total)\nclass PyA_Repeat : public A_Repeat {\npublic:\n    using A_Repeat::A_Repeat;\n    int unlucky_number() override { PYBIND11_OVERLOAD_PURE(int, A_Repeat, unlucky_number, ); }\n    std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, A_Repeat, say_something, times); }\n};\nclass PyB_Repeat : public B_Repeat {\npublic:\n    using B_Repeat::B_Repeat;\n    int unlucky_number() override { PYBIND11_OVERLOAD(int, B_Repeat, unlucky_number, ); }\n    std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, B_Repeat, say_something, times); }\n    double lucky_number() override { PYBIND11_OVERLOAD(double, B_Repeat, lucky_number, ); }\n};\nclass PyC_Repeat : public C_Repeat {\npublic:\n    using C_Repeat::C_Repeat;\n    int unlucky_number() override { PYBIND11_OVERLOAD(int, C_Repeat, unlucky_number, ); }\n    std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, C_Repeat, say_something, times); }\n    double lucky_number() override { PYBIND11_OVERLOAD(double, C_Repeat, lucky_number, ); }\n};\nclass PyD_Repeat : public D_Repeat {\npublic:\n    using D_Repeat::D_Repeat;\n    int unlucky_number() override { PYBIND11_OVERLOAD(int, D_Repeat, unlucky_number, ); }\n    std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, D_Repeat, say_something, times); }\n    double lucky_number() override { PYBIND11_OVERLOAD(double, D_Repeat, lucky_number, ); }\n};\n\n// Inheritance approach 2: templated trampoline classes.\n//\n// Advantages:\n// - we have only 2 (template) class and 4 method declarations (one per virtual method, plus one for\n//   any override of a pure virtual method), versus 4 classes and 6 methods (MI) or 4 classes and 11\n//   methods (repeat).\n// - Compared to MI, we also don't have to change the non-trampoline inheritance to virtual, and can\n//   properly inherit constructors.\n//\n// Disadvantage:\n// - the compiler must still generate and compile 14 different methods (more, even, than the 11\n//   required for the repeat approach) instead of the 6 required for MI.  (If there was no pure\n//   method (or no pure method override), the number would drop down to the same 11 as the repeat\n//   approach).\ntemplate <class Base = A_Tpl>\nclass PyA_Tpl : public Base {\npublic:\n    using Base::Base; // Inherit constructors\n    int unlucky_number() override { PYBIND11_OVERLOAD_PURE(int, Base, unlucky_number, ); }\n    std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, Base, say_something, times); }\n};\ntemplate <class Base = B_Tpl>\nclass PyB_Tpl : public PyA_Tpl<Base> {\npublic:\n    using PyA_Tpl<Base>::PyA_Tpl; // Inherit constructors (via PyA_Tpl's inherited constructors)\n    int unlucky_number() override { PYBIND11_OVERLOAD(int, Base, unlucky_number, ); }\n    double lucky_number() override { PYBIND11_OVERLOAD(double, Base, lucky_number, ); }\n};\n// Since C_Tpl and D_Tpl don't declare any new virtual methods, we don't actually need these (we can\n// use PyB_Tpl<C_Tpl> and PyB_Tpl<D_Tpl> for the trampoline classes instead):\n/*\ntemplate <class Base = C_Tpl> class PyC_Tpl : public PyB_Tpl<Base> {\npublic:\n    using PyB_Tpl<Base>::PyB_Tpl;\n};\ntemplate <class Base = D_Tpl> class PyD_Tpl : public PyC_Tpl<Base> {\npublic:\n    using PyC_Tpl<Base>::PyC_Tpl;\n};\n*/\n\nvoid initialize_inherited_virtuals(py::module &m) {\n    // test_inherited_virtuals\n\n    // Method 1: repeat\n    py::class_<A_Repeat, PyA_Repeat>(m, \"A_Repeat\")\n        .def(py::init<>())\n        .def(\"unlucky_number\", &A_Repeat::unlucky_number)\n        .def(\"say_something\", &A_Repeat::say_something)\n        .def(\"say_everything\", &A_Repeat::say_everything);\n    py::class_<B_Repeat, A_Repeat, PyB_Repeat>(m, \"B_Repeat\")\n        .def(py::init<>())\n        .def(\"lucky_number\", &B_Repeat::lucky_number);\n    py::class_<C_Repeat, B_Repeat, PyC_Repeat>(m, \"C_Repeat\")\n        .def(py::init<>());\n    py::class_<D_Repeat, C_Repeat, PyD_Repeat>(m, \"D_Repeat\")\n        .def(py::init<>());\n\n    // test_\n    // Method 2: Templated trampolines\n    py::class_<A_Tpl, PyA_Tpl<>>(m, \"A_Tpl\")\n        .def(py::init<>())\n        .def(\"unlucky_number\", &A_Tpl::unlucky_number)\n        .def(\"say_something\", &A_Tpl::say_something)\n        .def(\"say_everything\", &A_Tpl::say_everything);\n    py::class_<B_Tpl, A_Tpl, PyB_Tpl<>>(m, \"B_Tpl\")\n        .def(py::init<>())\n        .def(\"lucky_number\", &B_Tpl::lucky_number);\n    py::class_<C_Tpl, B_Tpl, PyB_Tpl<C_Tpl>>(m, \"C_Tpl\")\n        .def(py::init<>());\n    py::class_<D_Tpl, C_Tpl, PyB_Tpl<D_Tpl>>(m, \"D_Tpl\")\n        .def(py::init<>());\n\n\n    // Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)\n    m.def(\"test_gil\", &test_gil);\n    m.def(\"test_gil_from_thread\", &test_gil_from_thread);\n};\n"
  },
  {
    "path": "src/third_party/pybind11/tests/test_virtual_functions.py",
    "content": "import pytest\n\nfrom pybind11_tests import virtual_functions as m\nfrom pybind11_tests import ConstructorStats\n\n\ndef test_override(capture, msg):\n    class ExtendedExampleVirt(m.ExampleVirt):\n        def __init__(self, state):\n            super(ExtendedExampleVirt, self).__init__(state + 1)\n            self.data = \"Hello world\"\n\n        def run(self, value):\n            print('ExtendedExampleVirt::run(%i), calling parent..' % value)\n            return super(ExtendedExampleVirt, self).run(value + 1)\n\n        def run_bool(self):\n            print('ExtendedExampleVirt::run_bool()')\n            return False\n\n        def get_string1(self):\n            return \"override1\"\n\n        def pure_virtual(self):\n            print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)\n\n    class ExtendedExampleVirt2(ExtendedExampleVirt):\n        def __init__(self, state):\n            super(ExtendedExampleVirt2, self).__init__(state + 1)\n\n        def get_string2(self):\n            return \"override2\"\n\n    ex12 = m.ExampleVirt(10)\n    with capture:\n        assert m.runExampleVirt(ex12, 20) == 30\n    assert capture == \"\"\"\n        Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)\n    \"\"\"  # noqa: E501 line too long\n\n    with pytest.raises(RuntimeError) as excinfo:\n        m.runExampleVirtVirtual(ex12)\n    assert msg(excinfo.value) == 'Tried to call pure virtual function \"ExampleVirt::pure_virtual\"'\n\n    ex12p = ExtendedExampleVirt(10)\n    with capture:\n        assert m.runExampleVirt(ex12p, 20) == 32\n    assert capture == \"\"\"\n        ExtendedExampleVirt::run(20), calling parent..\n        Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)\n    \"\"\"  # noqa: E501 line too long\n    with capture:\n        assert m.runExampleVirtBool(ex12p) is False\n    assert capture == \"ExtendedExampleVirt::run_bool()\"\n    with capture:\n        m.runExampleVirtVirtual(ex12p)\n    assert capture == \"ExtendedExampleVirt::pure_virtual(): Hello world\"\n\n    ex12p2 = ExtendedExampleVirt2(15)\n    with capture:\n        assert m.runExampleVirt(ex12p2, 50) == 68\n    assert capture == \"\"\"\n        ExtendedExampleVirt::run(50), calling parent..\n        Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)\n    \"\"\"  # noqa: E501 line too long\n\n    cstats = ConstructorStats.get(m.ExampleVirt)\n    assert cstats.alive() == 3\n    del ex12, ex12p, ex12p2\n    assert cstats.alive() == 0\n    assert cstats.values() == ['10', '11', '17']\n    assert cstats.copy_constructions == 0\n    assert cstats.move_constructions >= 0\n\n\ndef test_alias_delay_initialization1(capture):\n    \"\"\"`A` only initializes its trampoline class when we inherit from it\n\n    If we just create and use an A instance directly, the trampoline initialization is\n    bypassed and we only initialize an A() instead (for performance reasons).\n    \"\"\"\n    class B(m.A):\n        def __init__(self):\n            super(B, self).__init__()\n\n        def f(self):\n            print(\"In python f()\")\n\n    # C++ version\n    with capture:\n        a = m.A()\n        m.call_f(a)\n        del a\n        pytest.gc_collect()\n    assert capture == \"A.f()\"\n\n    # Python version\n    with capture:\n        b = B()\n        m.call_f(b)\n        del b\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        PyA.PyA()\n        PyA.f()\n        In python f()\n        PyA.~PyA()\n    \"\"\"\n\n\ndef test_alias_delay_initialization2(capture):\n    \"\"\"`A2`, unlike the above, is configured to always initialize the alias\n\n    While the extra initialization and extra class layer has small virtual dispatch\n    performance penalty, it also allows us to do more things with the trampoline\n    class such as defining local variables and performing construction/destruction.\n    \"\"\"\n    class B2(m.A2):\n        def __init__(self):\n            super(B2, self).__init__()\n\n        def f(self):\n            print(\"In python B2.f()\")\n\n    # No python subclass version\n    with capture:\n        a2 = m.A2()\n        m.call_f(a2)\n        del a2\n        pytest.gc_collect()\n        a3 = m.A2(1)\n        m.call_f(a3)\n        del a3\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        PyA2.PyA2()\n        PyA2.f()\n        A2.f()\n        PyA2.~PyA2()\n        PyA2.PyA2()\n        PyA2.f()\n        A2.f()\n        PyA2.~PyA2()\n    \"\"\"\n\n    # Python subclass version\n    with capture:\n        b2 = B2()\n        m.call_f(b2)\n        del b2\n        pytest.gc_collect()\n    assert capture == \"\"\"\n        PyA2.PyA2()\n        PyA2.f()\n        In python B2.f()\n        PyA2.~PyA2()\n    \"\"\"\n\n\n# PyPy: Reference count > 1 causes call with noncopyable instance\n# to fail in ncv1.print_nc()\n@pytest.unsupported_on_pypy\n@pytest.mark.skipif(not hasattr(m, \"NCVirt\"), reason=\"NCVirt test broken on ICPC\")\ndef test_move_support():\n    class NCVirtExt(m.NCVirt):\n        def get_noncopyable(self, a, b):\n            # Constructs and returns a new instance:\n            nc = m.NonCopyable(a * a, b * b)\n            return nc\n\n        def get_movable(self, a, b):\n            # Return a referenced copy\n            self.movable = m.Movable(a, b)\n            return self.movable\n\n    class NCVirtExt2(m.NCVirt):\n        def get_noncopyable(self, a, b):\n            # Keep a reference: this is going to throw an exception\n            self.nc = m.NonCopyable(a, b)\n            return self.nc\n\n        def get_movable(self, a, b):\n            # Return a new instance without storing it\n            return m.Movable(a, b)\n\n    ncv1 = NCVirtExt()\n    assert ncv1.print_nc(2, 3) == \"36\"\n    assert ncv1.print_movable(4, 5) == \"9\"\n    ncv2 = NCVirtExt2()\n    assert ncv2.print_movable(7, 7) == \"14\"\n    # Don't check the exception message here because it differs under debug/non-debug mode\n    with pytest.raises(RuntimeError):\n        ncv2.print_nc(9, 9)\n\n    nc_stats = ConstructorStats.get(m.NonCopyable)\n    mv_stats = ConstructorStats.get(m.Movable)\n    assert nc_stats.alive() == 1\n    assert mv_stats.alive() == 1\n    del ncv1, ncv2\n    assert nc_stats.alive() == 0\n    assert mv_stats.alive() == 0\n    assert nc_stats.values() == ['4', '9', '9', '9']\n    assert mv_stats.values() == ['4', '5', '7', '7']\n    assert nc_stats.copy_constructions == 0\n    assert mv_stats.copy_constructions == 1\n    assert nc_stats.move_constructions >= 0\n    assert mv_stats.move_constructions >= 0\n\n\ndef test_dispatch_issue(msg):\n    \"\"\"#159: virtual function dispatch has problems with similar-named functions\"\"\"\n    class PyClass1(m.DispatchIssue):\n        def dispatch(self):\n            return \"Yay..\"\n\n    class PyClass2(m.DispatchIssue):\n        def dispatch(self):\n            with pytest.raises(RuntimeError) as excinfo:\n                super(PyClass2, self).dispatch()\n            assert msg(excinfo.value) == 'Tried to call pure virtual function \"Base::dispatch\"'\n\n            p = PyClass1()\n            return m.dispatch_issue_go(p)\n\n    b = PyClass2()\n    assert m.dispatch_issue_go(b) == \"Yay..\"\n\n\ndef test_override_ref():\n    \"\"\"#392/397: overriding reference-returning functions\"\"\"\n    o = m.OverrideTest(\"asdf\")\n\n    # Not allowed (see associated .cpp comment)\n    # i = o.str_ref()\n    # assert o.str_ref() == \"asdf\"\n    assert o.str_value() == \"asdf\"\n\n    assert o.A_value().value == \"hi\"\n    a = o.A_ref()\n    assert a.value == \"hi\"\n    a.value = \"bye\"\n    assert a.value == \"bye\"\n\n\ndef test_inherited_virtuals():\n    class AR(m.A_Repeat):\n        def unlucky_number(self):\n            return 99\n\n    class AT(m.A_Tpl):\n        def unlucky_number(self):\n            return 999\n\n    obj = AR()\n    assert obj.say_something(3) == \"hihihi\"\n    assert obj.unlucky_number() == 99\n    assert obj.say_everything() == \"hi 99\"\n\n    obj = AT()\n    assert obj.say_something(3) == \"hihihi\"\n    assert obj.unlucky_number() == 999\n    assert obj.say_everything() == \"hi 999\"\n\n    for obj in [m.B_Repeat(), m.B_Tpl()]:\n        assert obj.say_something(3) == \"B says hi 3 times\"\n        assert obj.unlucky_number() == 13\n        assert obj.lucky_number() == 7.0\n        assert obj.say_everything() == \"B says hi 1 times 13\"\n\n    for obj in [m.C_Repeat(), m.C_Tpl()]:\n        assert obj.say_something(3) == \"B says hi 3 times\"\n        assert obj.unlucky_number() == 4444\n        assert obj.lucky_number() == 888.0\n        assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    class CR(m.C_Repeat):\n        def lucky_number(self):\n            return m.C_Repeat.lucky_number(self) + 1.25\n\n    obj = CR()\n    assert obj.say_something(3) == \"B says hi 3 times\"\n    assert obj.unlucky_number() == 4444\n    assert obj.lucky_number() == 889.25\n    assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    class CT(m.C_Tpl):\n        pass\n\n    obj = CT()\n    assert obj.say_something(3) == \"B says hi 3 times\"\n    assert obj.unlucky_number() == 4444\n    assert obj.lucky_number() == 888.0\n    assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    class CCR(CR):\n        def lucky_number(self):\n            return CR.lucky_number(self) * 10\n\n    obj = CCR()\n    assert obj.say_something(3) == \"B says hi 3 times\"\n    assert obj.unlucky_number() == 4444\n    assert obj.lucky_number() == 8892.5\n    assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    class CCT(CT):\n        def lucky_number(self):\n            return CT.lucky_number(self) * 1000\n\n    obj = CCT()\n    assert obj.say_something(3) == \"B says hi 3 times\"\n    assert obj.unlucky_number() == 4444\n    assert obj.lucky_number() == 888000.0\n    assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    class DR(m.D_Repeat):\n        def unlucky_number(self):\n            return 123\n\n        def lucky_number(self):\n            return 42.0\n\n    for obj in [m.D_Repeat(), m.D_Tpl()]:\n        assert obj.say_something(3) == \"B says hi 3 times\"\n        assert obj.unlucky_number() == 4444\n        assert obj.lucky_number() == 888.0\n        assert obj.say_everything() == \"B says hi 1 times 4444\"\n\n    obj = DR()\n    assert obj.say_something(3) == \"B says hi 3 times\"\n    assert obj.unlucky_number() == 123\n    assert obj.lucky_number() == 42.0\n    assert obj.say_everything() == \"B says hi 1 times 123\"\n\n    class DT(m.D_Tpl):\n        def say_something(self, times):\n            return \"DT says:\" + (' quack' * times)\n\n        def unlucky_number(self):\n            return 1234\n\n        def lucky_number(self):\n            return -4.25\n\n    obj = DT()\n    assert obj.say_something(3) == \"DT says: quack quack quack\"\n    assert obj.unlucky_number() == 1234\n    assert obj.lucky_number() == -4.25\n    assert obj.say_everything() == \"DT says: quack 1234\"\n\n    class DT2(DT):\n        def say_something(self, times):\n            return \"DT2: \" + ('QUACK' * times)\n\n        def unlucky_number(self):\n            return -3\n\n    class BT(m.B_Tpl):\n        def say_something(self, times):\n            return \"BT\" * times\n\n        def unlucky_number(self):\n            return -7\n\n        def lucky_number(self):\n            return -1.375\n\n    obj = BT()\n    assert obj.say_something(3) == \"BTBTBT\"\n    assert obj.unlucky_number() == -7\n    assert obj.lucky_number() == -1.375\n    assert obj.say_everything() == \"BT -7\"\n\n\ndef test_issue_1454():\n    # Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)\n    m.test_gil()\n    m.test_gil_from_thread()\n"
  },
  {
    "path": "src/third_party/pybind11/tools/FindCatch.cmake",
    "content": "# - Find the Catch test framework or download it (single header)\n#\n# This is a quick module for internal use. It assumes that Catch is\n# REQUIRED and that a minimum version is provided (not EXACT). If\n# a suitable version isn't found locally, the single header file\n# will be downloaded and placed in the build dir: PROJECT_BINARY_DIR.\n#\n# This code sets the following variables:\n#  CATCH_INCLUDE_DIR      - path to catch.hpp\n#  CATCH_VERSION          - version number\n\nif(NOT Catch_FIND_VERSION)\n  message(FATAL_ERROR \"A version number must be specified.\")\nelseif(Catch_FIND_REQUIRED)\n  message(FATAL_ERROR \"This module assumes Catch is not required.\")\nelseif(Catch_FIND_VERSION_EXACT)\n  message(FATAL_ERROR \"Exact version numbers are not supported, only minimum.\")\nendif()\n\n# Extract the version number from catch.hpp\nfunction(_get_catch_version)\n  file(STRINGS \"${CATCH_INCLUDE_DIR}/catch.hpp\" version_line REGEX \"Catch v.*\" LIMIT_COUNT 1)\n  if(version_line MATCHES \"Catch v([0-9]+)\\\\.([0-9]+)\\\\.([0-9]+)\")\n    set(CATCH_VERSION \"${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}\" PARENT_SCOPE)\n  endif()\nendfunction()\n\n# Download the single-header version of Catch\nfunction(_download_catch version destination_dir)\n  message(STATUS \"Downloading catch v${version}...\")\n  set(url https://github.com/philsquared/Catch/releases/download/v${version}/catch.hpp)\n  file(DOWNLOAD ${url} \"${destination_dir}/catch.hpp\" STATUS status)\n  list(GET status 0 error)\n  if(error)\n    message(FATAL_ERROR \"Could not download ${url}\")\n  endif()\n  set(CATCH_INCLUDE_DIR \"${destination_dir}\" CACHE INTERNAL \"\")\nendfunction()\n\n# Look for catch locally\nfind_path(CATCH_INCLUDE_DIR NAMES catch.hpp PATH_SUFFIXES catch)\nif(CATCH_INCLUDE_DIR)\n  _get_catch_version()\nendif()\n\n# Download the header if it wasn't found or if it's outdated\nif(NOT CATCH_VERSION OR CATCH_VERSION VERSION_LESS ${Catch_FIND_VERSION})\n  if(DOWNLOAD_CATCH)\n    _download_catch(${Catch_FIND_VERSION} \"${PROJECT_BINARY_DIR}/catch/\")\n    _get_catch_version()\n  else()\n    set(CATCH_FOUND FALSE)\n    return()\n  endif()\nendif()\n\nset(CATCH_FOUND TRUE)\n"
  },
  {
    "path": "src/third_party/pybind11/tools/FindEigen3.cmake",
    "content": "# - Try to find Eigen3 lib\n#\n# This module supports requiring a minimum version, e.g. you can do\n#   find_package(Eigen3 3.1.2)\n# to require version 3.1.2 or newer of Eigen3.\n#\n# Once done this will define\n#\n#  EIGEN3_FOUND - system has eigen lib with correct version\n#  EIGEN3_INCLUDE_DIR - the eigen include directory\n#  EIGEN3_VERSION - eigen version\n\n# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\n# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\n# Copyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n# Redistribution and use is allowed according to the terms of the 2-clause BSD license.\n\nif(NOT Eigen3_FIND_VERSION)\n  if(NOT Eigen3_FIND_VERSION_MAJOR)\n    set(Eigen3_FIND_VERSION_MAJOR 2)\n  endif(NOT Eigen3_FIND_VERSION_MAJOR)\n  if(NOT Eigen3_FIND_VERSION_MINOR)\n    set(Eigen3_FIND_VERSION_MINOR 91)\n  endif(NOT Eigen3_FIND_VERSION_MINOR)\n  if(NOT Eigen3_FIND_VERSION_PATCH)\n    set(Eigen3_FIND_VERSION_PATCH 0)\n  endif(NOT Eigen3_FIND_VERSION_PATCH)\n\n  set(Eigen3_FIND_VERSION \"${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}\")\nendif(NOT Eigen3_FIND_VERSION)\n\nmacro(_eigen3_check_version)\n  file(READ \"${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h\" _eigen3_version_header)\n\n  string(REGEX MATCH \"define[ \\t]+EIGEN_WORLD_VERSION[ \\t]+([0-9]+)\" _eigen3_world_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_WORLD_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MAJOR_VERSION[ \\t]+([0-9]+)\" _eigen3_major_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MAJOR_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MINOR_VERSION[ \\t]+([0-9]+)\" _eigen3_minor_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MINOR_VERSION \"${CMAKE_MATCH_1}\")\n\n  set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})\n  if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n    set(EIGEN3_VERSION_OK FALSE)\n  else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n    set(EIGEN3_VERSION_OK TRUE)\n  endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n\n  if(NOT EIGEN3_VERSION_OK)\n\n    message(STATUS \"Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, \"\n                   \"but at least version ${Eigen3_FIND_VERSION} is required\")\n  endif(NOT EIGEN3_VERSION_OK)\nendmacro(_eigen3_check_version)\n\nif (EIGEN3_INCLUDE_DIR)\n\n  # in cache already\n  _eigen3_check_version()\n  set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})\n\nelse (EIGEN3_INCLUDE_DIR)\n\n  find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library\n      PATHS\n      ${CMAKE_INSTALL_PREFIX}/include\n      ${KDE4_INCLUDE_DIR}\n      PATH_SUFFIXES eigen3 eigen\n    )\n\n  if(EIGEN3_INCLUDE_DIR)\n    _eigen3_check_version()\n  endif(EIGEN3_INCLUDE_DIR)\n\n  include(FindPackageHandleStandardArgs)\n  find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)\n\n  mark_as_advanced(EIGEN3_INCLUDE_DIR)\n\nendif(EIGEN3_INCLUDE_DIR)\n\n"
  },
  {
    "path": "src/third_party/pybind11/tools/FindPythonLibsNew.cmake",
    "content": "# - Find python libraries\n# This module finds the libraries corresponding to the Python interpreter\n# FindPythonInterp provides.\n# This code sets the following variables:\n#\n#  PYTHONLIBS_FOUND           - have the Python libs been found\n#  PYTHON_PREFIX              - path to the Python installation\n#  PYTHON_LIBRARIES           - path to the python library\n#  PYTHON_INCLUDE_DIRS        - path to where Python.h is found\n#  PYTHON_MODULE_EXTENSION    - lib extension, e.g. '.so' or '.pyd'\n#  PYTHON_MODULE_PREFIX       - lib name prefix: usually an empty string\n#  PYTHON_SITE_PACKAGES       - path to installation site-packages\n#  PYTHON_IS_DEBUG            - whether the Python interpreter is a debug build\n#\n# Thanks to talljimbo for the patch adding the 'LDVERSION' config\n# variable usage.\n\n#=============================================================================\n# Copyright 2001-2009 Kitware, Inc.\n# Copyright 2012 Continuum Analytics, Inc.\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the names of Kitware, Inc., the Insight Software Consortium,\n# nor the names of their contributors may be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#=============================================================================\n\n# Checking for the extension makes sure that `LibsNew` was found and not just `Libs`.\nif(PYTHONLIBS_FOUND AND PYTHON_MODULE_EXTENSION)\n    return()\nendif()\n\n# Use the Python interpreter to find the libs.\nif(PythonLibsNew_FIND_REQUIRED)\n    find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} REQUIRED)\nelse()\n    find_package(PythonInterp ${PythonLibsNew_FIND_VERSION})\nendif()\n\nif(NOT PYTHONINTERP_FOUND)\n    set(PYTHONLIBS_FOUND FALSE)\n    return()\nendif()\n\n# According to http://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter\n# testing whether sys has the gettotalrefcount function is a reliable, cross-platform\n# way to detect a CPython debug interpreter.\n#\n# The library suffix is from the config var LDVERSION sometimes, otherwise\n# VERSION. VERSION will typically be like \"2.7\" on unix, and \"27\" on windows.\nexecute_process(COMMAND \"${PYTHON_EXECUTABLE}\" \"-c\"\n    \"from distutils import sysconfig as s;import sys;import struct;\nprint('.'.join(str(v) for v in sys.version_info));\nprint(sys.prefix);\nprint(s.get_python_inc(plat_specific=True));\nprint(s.get_python_lib(plat_specific=True));\nprint(s.get_config_var('SO'));\nprint(hasattr(sys, 'gettotalrefcount')+0);\nprint(struct.calcsize('@P'));\nprint(s.get_config_var('LDVERSION') or s.get_config_var('VERSION'));\nprint(s.get_config_var('LIBDIR') or '');\nprint(s.get_config_var('MULTIARCH') or '');\n\"\n    RESULT_VARIABLE _PYTHON_SUCCESS\n    OUTPUT_VARIABLE _PYTHON_VALUES\n    ERROR_VARIABLE _PYTHON_ERROR_VALUE)\n\nif(NOT _PYTHON_SUCCESS MATCHES 0)\n    if(PythonLibsNew_FIND_REQUIRED)\n        message(FATAL_ERROR\n            \"Python config failure:\\n${_PYTHON_ERROR_VALUE}\")\n    endif()\n    set(PYTHONLIBS_FOUND FALSE)\n    return()\nendif()\n\n# Convert the process output into a list\nif(WIN32)\n    string(REGEX REPLACE \"\\\\\\\\\" \"/\" _PYTHON_VALUES ${_PYTHON_VALUES})\nendif()\nstring(REGEX REPLACE \";\" \"\\\\\\\\;\" _PYTHON_VALUES ${_PYTHON_VALUES})\nstring(REGEX REPLACE \"\\n\" \";\" _PYTHON_VALUES ${_PYTHON_VALUES})\nlist(GET _PYTHON_VALUES 0 _PYTHON_VERSION_LIST)\nlist(GET _PYTHON_VALUES 1 PYTHON_PREFIX)\nlist(GET _PYTHON_VALUES 2 PYTHON_INCLUDE_DIR)\nlist(GET _PYTHON_VALUES 3 PYTHON_SITE_PACKAGES)\nlist(GET _PYTHON_VALUES 4 PYTHON_MODULE_EXTENSION)\nlist(GET _PYTHON_VALUES 5 PYTHON_IS_DEBUG)\nlist(GET _PYTHON_VALUES 6 PYTHON_SIZEOF_VOID_P)\nlist(GET _PYTHON_VALUES 7 PYTHON_LIBRARY_SUFFIX)\nlist(GET _PYTHON_VALUES 8 PYTHON_LIBDIR)\nlist(GET _PYTHON_VALUES 9 PYTHON_MULTIARCH)\n\n# Make sure the Python has the same pointer-size as the chosen compiler\n# Skip if CMAKE_SIZEOF_VOID_P is not defined\nif(CMAKE_SIZEOF_VOID_P AND (NOT \"${PYTHON_SIZEOF_VOID_P}\" STREQUAL \"${CMAKE_SIZEOF_VOID_P}\"))\n    if(PythonLibsNew_FIND_REQUIRED)\n        math(EXPR _PYTHON_BITS \"${PYTHON_SIZEOF_VOID_P} * 8\")\n        math(EXPR _CMAKE_BITS \"${CMAKE_SIZEOF_VOID_P} * 8\")\n        message(FATAL_ERROR\n            \"Python config failure: Python is ${_PYTHON_BITS}-bit, \"\n            \"chosen compiler is  ${_CMAKE_BITS}-bit\")\n    endif()\n    set(PYTHONLIBS_FOUND FALSE)\n    return()\nendif()\n\n# The built-in FindPython didn't always give the version numbers\nstring(REGEX REPLACE \"\\\\.\" \";\" _PYTHON_VERSION_LIST ${_PYTHON_VERSION_LIST})\nlist(GET _PYTHON_VERSION_LIST 0 PYTHON_VERSION_MAJOR)\nlist(GET _PYTHON_VERSION_LIST 1 PYTHON_VERSION_MINOR)\nlist(GET _PYTHON_VERSION_LIST 2 PYTHON_VERSION_PATCH)\n\n# Make sure all directory separators are '/'\nstring(REGEX REPLACE \"\\\\\\\\\" \"/\" PYTHON_PREFIX ${PYTHON_PREFIX})\nstring(REGEX REPLACE \"\\\\\\\\\" \"/\" PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE_DIR})\nstring(REGEX REPLACE \"\\\\\\\\\" \"/\" PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES})\n\nif(CMAKE_HOST_WIN32)\n    set(PYTHON_LIBRARY\n        \"${PYTHON_PREFIX}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib\")\n\n    # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the\n    # original python installation. They may be found relative to PYTHON_INCLUDE_DIR.\n    if(NOT EXISTS \"${PYTHON_LIBRARY}\")\n        get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY)\n        set(PYTHON_LIBRARY\n            \"${_PYTHON_ROOT}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib\")\n    endif()\n\n    # raise an error if the python libs are still not found.\n    if(NOT EXISTS \"${PYTHON_LIBRARY}\")\n        message(FATAL_ERROR \"Python libraries not found\")\n    endif()\n\nelse()\n    if(PYTHON_MULTIARCH)\n        set(_PYTHON_LIBS_SEARCH \"${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}\" \"${PYTHON_LIBDIR}\")\n    else()\n        set(_PYTHON_LIBS_SEARCH \"${PYTHON_LIBDIR}\")\n    endif()\n    #message(STATUS \"Searching for Python libs in ${_PYTHON_LIBS_SEARCH}\")\n    # Probably this needs to be more involved. It would be nice if the config\n    # information the python interpreter itself gave us were more complete.\n    find_library(PYTHON_LIBRARY\n        NAMES \"python${PYTHON_LIBRARY_SUFFIX}\"\n        PATHS ${_PYTHON_LIBS_SEARCH}\n        NO_DEFAULT_PATH)\n\n    # If all else fails, just set the name/version and let the linker figure out the path.\n    if(NOT PYTHON_LIBRARY)\n        set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX})\n    endif()\nendif()\n\nMARK_AS_ADVANCED(\n  PYTHON_LIBRARY\n  PYTHON_INCLUDE_DIR\n)\n\n# We use PYTHON_INCLUDE_DIR, PYTHON_LIBRARY and PYTHON_DEBUG_LIBRARY for the\n# cache entries because they are meant to specify the location of a single\n# library. We now set the variables listed by the documentation for this\n# module.\nSET(PYTHON_INCLUDE_DIRS \"${PYTHON_INCLUDE_DIR}\")\nSET(PYTHON_LIBRARIES \"${PYTHON_LIBRARY}\")\nSET(PYTHON_DEBUG_LIBRARIES \"${PYTHON_DEBUG_LIBRARY}\")\n\nfind_package_message(PYTHON\n    \"Found PythonLibs: ${PYTHON_LIBRARY}\"\n    \"${PYTHON_EXECUTABLE}${PYTHON_VERSION}\")\n\nset(PYTHONLIBS_FOUND TRUE)\n"
  },
  {
    "path": "src/third_party/pybind11/tools/check-style.sh",
    "content": "#!/bin/bash\n#\n# Script to check include/test code for common pybind11 code style errors.\n#\n# This script currently checks for\n#\n# 1. use of tabs instead of spaces\n# 2. MSDOS-style CRLF endings\n# 3. trailing spaces\n# 4. missing space between keyword and parenthesis, e.g.: for(, if(, while(\n# 5. Missing space between right parenthesis and brace, e.g. 'for (...){'\n# 6. opening brace on its own line. It should always be on the same line as the\n#    if/while/for/do statement.\n#\n# Invoke as: tools/check-style.sh\n#\n\ncheck_style_errors=0\nIFS=$'\\n'\n\nfound=\"$( GREP_COLORS='mt=41' GREP_COLOR='41' grep $'\\t' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )\"\nif [ -n \"$found\" ]; then\n    # The mt=41 sets a red background for matched tabs:\n    echo -e '\\033[31;01mError: found tab characters in the following files:\\033[0m'\n    check_style_errors=1\n    echo \"$found\" | sed -e 's/^/    /'\nfi\n\n\nfound=\"$( grep -IUlr $'\\r' include tests/*.{cpp,py,h} docs/*.rst --color=always )\"\nif [ -n \"$found\" ]; then\n    echo -e '\\033[31;01mError: found CRLF characters in the following files:\\033[0m'\n    check_style_errors=1\n    echo \"$found\" | sed -e 's/^/    /'\nfi\n\nfound=\"$(GREP_COLORS='mt=41' GREP_COLOR='41' grep '[[:blank:]]\\+$' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )\"\nif [ -n \"$found\" ]; then\n    # The mt=41 sets a red background for matched trailing spaces\n    echo -e '\\033[31;01mError: found trailing spaces in the following files:\\033[0m'\n    check_style_errors=1\n    echo \"$found\" | sed -e 's/^/    /'\nfi\n\nfound=\"$(grep '\\<\\(if\\|for\\|while\\|catch\\)(\\|){' include tests/*.{cpp,h} -rn --color=always)\"\nif [ -n \"$found\" ]; then\n    echo -e '\\033[31;01mError: found the following coding style problems:\\033[0m'\n    check_style_errors=1\n    echo \"$found\" | sed -e 's/^/    /'\nfi\n\nfound=\"$(awk '\nfunction prefix(filename, lineno) {\n    return \"    \\033[35m\" filename \"\\033[36m:\\033[32m\" lineno \"\\033[36m:\\033[0m\"\n}\nfunction mark(pattern, string) { sub(pattern, \"\\033[01;31m&\\033[0m\", string); return string }\nlast && /^\\s*{/ {\n    print prefix(FILENAME, FNR-1) mark(\"\\\\)\\\\s*$\", last)\n    print prefix(FILENAME, FNR)   mark(\"^\\\\s*{\", $0)\n    last=\"\"\n}\n{ last = /(if|for|while|catch|switch)\\s*\\(.*\\)\\s*$/ ? $0 : \"\" }\n' $(find include -type f) tests/*.{cpp,h} docs/*.rst)\"\nif [ -n \"$found\" ]; then\n    check_style_errors=1\n    echo -e '\\033[31;01mError: braces should occur on the same line as the if/while/.. statement. Found issues in the following files:\\033[0m'\n    echo \"$found\"\nfi\n\nexit $check_style_errors\n"
  },
  {
    "path": "src/third_party/pybind11/tools/libsize.py",
    "content": "from __future__ import print_function, division\nimport os\nimport sys\n\n# Internal build script for generating debugging test .so size.\n# Usage:\n#     python libsize.py file.so save.txt -- displays the size of file.so and, if save.txt exists, compares it to the\n#                                           size in it, then overwrites save.txt with the new size for future runs.\n\nif len(sys.argv) != 3:\n    sys.exit(\"Invalid arguments: usage: python libsize.py file.so save.txt\")\n\nlib = sys.argv[1]\nsave = sys.argv[2]\n\nif not os.path.exists(lib):\n    sys.exit(\"Error: requested file ({}) does not exist\".format(lib))\n\nlibsize = os.path.getsize(lib)\n\nprint(\"------\", os.path.basename(lib), \"file size:\", libsize, end='')\n\nif os.path.exists(save):\n    with open(save) as sf:\n        oldsize = int(sf.readline())\n\n    if oldsize > 0:\n        change = libsize - oldsize\n        if change == 0:\n            print(\" (no change)\")\n        else:\n            print(\" (change of {:+} bytes = {:+.2%})\".format(change, change / oldsize))\nelse:\n    print()\n\nwith open(save, 'w') as sf:\n    sf.write(str(libsize))\n\n"
  },
  {
    "path": "src/third_party/pybind11/tools/mkdoc.py",
    "content": "#!/usr/bin/env python3\n#\n#  Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]\n#\n#  Extract documentation from C++ header files to use it in Python bindings\n#\n\nimport os\nimport sys\nimport platform\nimport re\nimport textwrap\n\nfrom clang import cindex\nfrom clang.cindex import CursorKind\nfrom collections import OrderedDict\nfrom threading import Thread, Semaphore\nfrom multiprocessing import cpu_count\n\nRECURSE_LIST = [\n    CursorKind.TRANSLATION_UNIT,\n    CursorKind.NAMESPACE,\n    CursorKind.CLASS_DECL,\n    CursorKind.STRUCT_DECL,\n    CursorKind.ENUM_DECL,\n    CursorKind.CLASS_TEMPLATE\n]\n\nPRINT_LIST = [\n    CursorKind.CLASS_DECL,\n    CursorKind.STRUCT_DECL,\n    CursorKind.ENUM_DECL,\n    CursorKind.ENUM_CONSTANT_DECL,\n    CursorKind.CLASS_TEMPLATE,\n    CursorKind.FUNCTION_DECL,\n    CursorKind.FUNCTION_TEMPLATE,\n    CursorKind.CONVERSION_FUNCTION,\n    CursorKind.CXX_METHOD,\n    CursorKind.CONSTRUCTOR,\n    CursorKind.FIELD_DECL\n]\n\nCPP_OPERATORS = {\n    '<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',\n    '+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':\n    'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',\n    '>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':\n    'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',\n    '&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':\n    'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'\n}\n\nCPP_OPERATORS = OrderedDict(\n    sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))\n\njob_count = cpu_count()\njob_semaphore = Semaphore(job_count)\n\noutput = []\n\ndef d(s):\n    return s.decode('utf8')\n\n\ndef sanitize_name(name):\n    name = re.sub(r'type-parameter-0-([0-9]+)', r'T\\1', name)\n    for k, v in CPP_OPERATORS.items():\n        name = name.replace('operator%s' % k, 'operator_%s' % v)\n    name = re.sub('<.*>', '', name)\n    name = ''.join([ch if ch.isalnum() else '_' for ch in name])\n    name = re.sub('_$', '', re.sub('_+', '_', name))\n    return '__doc_' + name\n\n\ndef process_comment(comment):\n    result = ''\n\n    # Remove C++ comment syntax\n    leading_spaces = float('inf')\n    for s in comment.expandtabs(tabsize=4).splitlines():\n        s = s.strip()\n        if s.startswith('/*'):\n            s = s[2:].lstrip('*')\n        elif s.endswith('*/'):\n            s = s[:-2].rstrip('*')\n        elif s.startswith('///'):\n            s = s[3:]\n        if s.startswith('*'):\n            s = s[1:]\n        if len(s) > 0:\n            leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))\n        result += s + '\\n'\n\n    if leading_spaces != float('inf'):\n        result2 = \"\"\n        for s in result.splitlines():\n            result2 += s[leading_spaces:] + '\\n'\n        result = result2\n\n    # Doxygen tags\n    cpp_group = '([\\w:]+)'\n    param_group = '([\\[\\w:\\]]+)'\n\n    s = result\n    s = re.sub(r'\\\\c\\s+%s' % cpp_group, r'``\\1``', s)\n    s = re.sub(r'\\\\a\\s+%s' % cpp_group, r'*\\1*', s)\n    s = re.sub(r'\\\\e\\s+%s' % cpp_group, r'*\\1*', s)\n    s = re.sub(r'\\\\em\\s+%s' % cpp_group, r'*\\1*', s)\n    s = re.sub(r'\\\\b\\s+%s' % cpp_group, r'**\\1**', s)\n    s = re.sub(r'\\\\ingroup\\s+%s' % cpp_group, r'', s)\n    s = re.sub(r'\\\\param%s?\\s+%s' % (param_group, cpp_group),\n               r'\\n\\n$Parameter ``\\2``:\\n\\n', s)\n    s = re.sub(r'\\\\tparam%s?\\s+%s' % (param_group, cpp_group),\n               r'\\n\\n$Template parameter ``\\2``:\\n\\n', s)\n\n    for in_, out_ in {\n        'return': 'Returns',\n        'author': 'Author',\n        'authors': 'Authors',\n        'copyright': 'Copyright',\n        'date': 'Date',\n        'remark': 'Remark',\n        'sa': 'See also',\n        'see': 'See also',\n        'extends': 'Extends',\n        'throw': 'Throws',\n        'throws': 'Throws'\n    }.items():\n        s = re.sub(r'\\\\%s\\s*' % in_, r'\\n\\n$%s:\\n\\n' % out_, s)\n\n    s = re.sub(r'\\\\details\\s*', r'\\n\\n', s)\n    s = re.sub(r'\\\\brief\\s*', r'', s)\n    s = re.sub(r'\\\\short\\s*', r'', s)\n    s = re.sub(r'\\\\ref\\s*', r'', s)\n\n    s = re.sub(r'\\\\code\\s?(.*?)\\s?\\\\endcode',\n               r\"```\\n\\1\\n```\\n\", s, flags=re.DOTALL)\n\n    # HTML/TeX tags\n    s = re.sub(r'<tt>(.*?)</tt>', r'``\\1``', s, flags=re.DOTALL)\n    s = re.sub(r'<pre>(.*?)</pre>', r\"```\\n\\1\\n```\\n\", s, flags=re.DOTALL)\n    s = re.sub(r'<em>(.*?)</em>', r'*\\1*', s, flags=re.DOTALL)\n    s = re.sub(r'<b>(.*?)</b>', r'**\\1**', s, flags=re.DOTALL)\n    s = re.sub(r'\\\\f\\$(.*?)\\\\f\\$', r'$\\1$', s, flags=re.DOTALL)\n    s = re.sub(r'<li>', r'\\n\\n* ', s)\n    s = re.sub(r'</?ul>', r'', s)\n    s = re.sub(r'</li>', r'\\n\\n', s)\n\n    s = s.replace('``true``', '``True``')\n    s = s.replace('``false``', '``False``')\n\n    # Re-flow text\n    wrapper = textwrap.TextWrapper()\n    wrapper.expand_tabs = True\n    wrapper.replace_whitespace = True\n    wrapper.drop_whitespace = True\n    wrapper.width = 70\n    wrapper.initial_indent = wrapper.subsequent_indent = ''\n\n    result = ''\n    in_code_segment = False\n    for x in re.split(r'(```)', s):\n        if x == '```':\n            if not in_code_segment:\n                result += '```\\n'\n            else:\n                result += '\\n```\\n\\n'\n            in_code_segment = not in_code_segment\n        elif in_code_segment:\n            result += x.strip()\n        else:\n            for y in re.split(r'(?: *\\n *){2,}', x):\n                wrapped = wrapper.fill(re.sub(r'\\s+', ' ', y).strip())\n                if len(wrapped) > 0 and wrapped[0] == '$':\n                    result += wrapped[1:] + '\\n'\n                    wrapper.initial_indent = \\\n                        wrapper.subsequent_indent = ' ' * 4\n                else:\n                    if len(wrapped) > 0:\n                        result += wrapped + '\\n\\n'\n                    wrapper.initial_indent = wrapper.subsequent_indent = ''\n    return result.rstrip().lstrip('\\n')\n\n\ndef extract(filename, node, prefix):\n    if not (node.location.file is None or\n            os.path.samefile(d(node.location.file.name), filename)):\n        return 0\n    if node.kind in RECURSE_LIST:\n        sub_prefix = prefix\n        if node.kind != CursorKind.TRANSLATION_UNIT:\n            if len(sub_prefix) > 0:\n                sub_prefix += '_'\n            sub_prefix += d(node.spelling)\n        for i in node.get_children():\n            extract(filename, i, sub_prefix)\n    if node.kind in PRINT_LIST:\n        comment = d(node.raw_comment) if node.raw_comment is not None else ''\n        comment = process_comment(comment)\n        sub_prefix = prefix\n        if len(sub_prefix) > 0:\n            sub_prefix += '_'\n        if len(node.spelling) > 0:\n            name = sanitize_name(sub_prefix + d(node.spelling))\n            global output\n            output.append((name, filename, comment))\n\n\nclass ExtractionThread(Thread):\n    def __init__(self, filename, parameters):\n        Thread.__init__(self)\n        self.filename = filename\n        self.parameters = parameters\n        job_semaphore.acquire()\n\n    def run(self):\n        print('Processing \"%s\" ..' % self.filename, file=sys.stderr)\n        try:\n            index = cindex.Index(\n                cindex.conf.lib.clang_createIndex(False, True))\n            tu = index.parse(self.filename, self.parameters)\n            extract(self.filename, tu.cursor, '')\n        finally:\n            job_semaphore.release()\n\nif __name__ == '__main__':\n    parameters = ['-x', 'c++', '-std=c++11']\n    filenames = []\n\n    if platform.system() == 'Darwin':\n        dev_path = '/Applications/Xcode.app/Contents/Developer/'\n        lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'\n        sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'\n        libclang = lib_dir + 'libclang.dylib'\n\n        if os.path.exists(libclang):\n            cindex.Config.set_library_path(os.path.dirname(libclang))\n\n        if os.path.exists(sdk_dir):\n            sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])\n            parameters.append('-isysroot')\n            parameters.append(sysroot_dir)\n\n    for item in sys.argv[1:]:\n        if item.startswith('-'):\n            parameters.append(item)\n        else:\n            filenames.append(item)\n\n    if len(filenames) == 0:\n        print('Syntax: %s [.. a list of header files ..]' % sys.argv[0])\n        exit(-1)\n\n    print('''/*\n  This file contains docstrings for the Python bindings.\n  Do not edit! These were automatically extracted by mkdoc.py\n */\n\n#define __EXPAND(x)                                      x\n#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...)  COUNT\n#define __VA_SIZE(...)                                   __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))\n#define __CAT1(a, b)                                     a ## b\n#define __CAT2(a, b)                                     __CAT1(a, b)\n#define __DOC1(n1)                                       __doc_##n1\n#define __DOC2(n1, n2)                                   __doc_##n1##_##n2\n#define __DOC3(n1, n2, n3)                               __doc_##n1##_##n2##_##n3\n#define __DOC4(n1, n2, n3, n4)                           __doc_##n1##_##n2##_##n3##_##n4\n#define __DOC5(n1, n2, n3, n4, n5)                       __doc_##n1##_##n2##_##n3##_##n4##_##n5\n#define __DOC6(n1, n2, n3, n4, n5, n6)                   __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6\n#define __DOC7(n1, n2, n3, n4, n5, n6, n7)               __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7\n#define DOC(...)                                         __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))\n\n#if defined(__GNUG__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-variable\"\n#endif\n''')\n\n    output.clear()\n    for filename in filenames:\n        thr = ExtractionThread(filename, parameters)\n        thr.start()\n\n    print('Waiting for jobs to finish ..', file=sys.stderr)\n    for i in range(job_count):\n        job_semaphore.acquire()\n\n    name_ctr = 1\n    name_prev = None\n    for name, _, comment in list(sorted(output, key=lambda x: (x[0], x[1]))):\n        if name == name_prev:\n            name_ctr += 1\n            name = name + \"_%i\" % name_ctr\n        else:\n            name_prev = name\n            name_ctr = 1\n        print('\\nstatic const char *%s =%sR\"doc(%s)doc\";' %\n              (name, '\\n' if '\\n' in comment else ' ', comment))\n\n    print('''\n#if defined(__GNUG__)\n#pragma GCC diagnostic pop\n#endif\n''')\n"
  },
  {
    "path": "src/third_party/pybind11/tools/pybind11Config.cmake.in",
    "content": "# pybind11Config.cmake\n# --------------------\n#\n# PYBIND11 cmake module.\n# This module sets the following variables in your project::\n#\n#   pybind11_FOUND - true if pybind11 and all required components found on the system\n#   pybind11_VERSION - pybind11 version in format Major.Minor.Release\n#   pybind11_INCLUDE_DIRS - Directories where pybind11 and python headers are located.\n#   pybind11_INCLUDE_DIR - Directory where pybind11 headers are located.\n#   pybind11_DEFINITIONS - Definitions necessary to use pybind11, namely USING_pybind11.\n#   pybind11_LIBRARIES - compile flags and python libraries (as needed) to link against.\n#   pybind11_LIBRARY - empty.\n#   CMAKE_MODULE_PATH - appends location of accompanying FindPythonLibsNew.cmake and\n#                       pybind11Tools.cmake modules.\n#\n#\n# Available components: None\n#\n#\n# Exported targets::\n#\n# If pybind11 is found, this module defines the following :prop_tgt:`IMPORTED`\n# interface library targets::\n#\n#   pybind11::module - for extension modules\n#   pybind11::embed - for embedding the Python interpreter\n#\n# Python headers, libraries (as needed by platform), and the C++ standard\n# are attached to the target. Set PythonLibsNew variables to influence\n# python detection and PYBIND11_CPP_STANDARD (-std=c++11 or -std=c++14) to\n# influence standard setting. ::\n#\n#   find_package(pybind11 CONFIG REQUIRED)\n#   message(STATUS \"Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}\")\n#\n#   # Create an extension module\n#   add_library(mylib MODULE main.cpp)\n#   target_link_libraries(mylib pybind11::module)\n#\n#   # Or embed the Python interpreter into an executable\n#   add_executable(myexe main.cpp)\n#   target_link_libraries(myexe pybind11::embed)\n#\n# Suggested usage::\n#\n# find_package with version info is not recommended except for release versions. ::\n#\n#   find_package(pybind11 CONFIG)\n#   find_package(pybind11 2.0 EXACT CONFIG REQUIRED)\n#\n#\n# The following variables can be set to guide the search for this package::\n#\n#   pybind11_DIR - CMake variable, set to directory containing this Config file\n#   CMAKE_PREFIX_PATH - CMake variable, set to root directory of this package\n#   PATH - environment variable, set to bin directory of this package\n#   CMAKE_DISABLE_FIND_PACKAGE_pybind11 - CMake variable, disables\n#     find_package(pybind11) when not REQUIRED, perhaps to force internal build\n\n@PACKAGE_INIT@\n\nset(PN pybind11)\n\n# location of pybind11/pybind11.h\nset(${PN}_INCLUDE_DIR \"${PACKAGE_PREFIX_DIR}/@CMAKE_INSTALL_INCLUDEDIR@\")\n\nset(${PN}_LIBRARY \"\")\nset(${PN}_DEFINITIONS USING_${PN})\n\ncheck_required_components(${PN})\n\n# make detectable the FindPythonLibsNew.cmake module\nlist(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR})\n\ninclude(pybind11Tools)\n\nif(NOT (CMAKE_VERSION VERSION_LESS 3.0))\n#-----------------------------------------------------------------------------\n# Don't include targets if this file is being picked up by another\n# project which has already built this as a subproject\n#-----------------------------------------------------------------------------\nif(NOT TARGET ${PN}::pybind11)\n    include(\"${CMAKE_CURRENT_LIST_DIR}/${PN}Targets.cmake\")\n\n    find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} MODULE REQUIRED)\n    set_property(TARGET ${PN}::pybind11 APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${PYTHON_INCLUDE_DIRS})\n    set_property(TARGET ${PN}::embed APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${PYTHON_LIBRARIES})\n    if(WIN32 OR CYGWIN)\n      set_property(TARGET ${PN}::module APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${PYTHON_LIBRARIES})\n    endif()\n\n    set_property(TARGET ${PN}::pybind11 APPEND PROPERTY INTERFACE_COMPILE_OPTIONS \"${PYBIND11_CPP_STANDARD}\")\n\n    get_property(_iid TARGET ${PN}::pybind11 PROPERTY INTERFACE_INCLUDE_DIRECTORIES)\n    get_property(_ill TARGET ${PN}::module PROPERTY INTERFACE_LINK_LIBRARIES)\n    set(${PN}_INCLUDE_DIRS ${_iid})\n    set(${PN}_LIBRARIES ${_ico} ${_ill})\nendif()\nendif()\n"
  },
  {
    "path": "src/third_party/pybind11/tools/pybind11Tools.cmake",
    "content": "# tools/pybind11Tools.cmake -- Build system for the pybind11 modules\n#\n# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\ncmake_minimum_required(VERSION 2.8.12)\n\n# Add a CMake parameter for choosing a desired Python version\nif(NOT PYBIND11_PYTHON_VERSION)\n  set(PYBIND11_PYTHON_VERSION \"\" CACHE STRING \"Python version to use for compiling modules\")\nendif()\n\nset(Python_ADDITIONAL_VERSIONS 3.7 3.6 3.5 3.4)\nfind_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} REQUIRED)\n\ninclude(CheckCXXCompilerFlag)\ninclude(CMakeParseArguments)\n\nif(NOT PYBIND11_CPP_STANDARD AND NOT CMAKE_CXX_STANDARD)\n  if(NOT MSVC)\n    check_cxx_compiler_flag(\"-std=c++14\" HAS_CPP14_FLAG)\n\n    if (HAS_CPP14_FLAG)\n      set(PYBIND11_CPP_STANDARD -std=c++14)\n    else()\n      check_cxx_compiler_flag(\"-std=c++11\" HAS_CPP11_FLAG)\n      if (HAS_CPP11_FLAG)\n        set(PYBIND11_CPP_STANDARD -std=c++11)\n      else()\n        message(FATAL_ERROR \"Unsupported compiler -- pybind11 requires C++11 support!\")\n      endif()\n    endif()\n  elseif(MSVC)\n    set(PYBIND11_CPP_STANDARD /std:c++14)\n  endif()\n\n  set(PYBIND11_CPP_STANDARD ${PYBIND11_CPP_STANDARD} CACHE STRING\n      \"C++ standard flag, e.g. -std=c++11, -std=c++14, /std:c++14.  Defaults to C++14 mode.\" FORCE)\nendif()\n\n# Checks whether the given CXX/linker flags can compile and link a cxx file.  cxxflags and\n# linkerflags are lists of flags to use.  The result variable is a unique variable name for each set\n# of flags: the compilation result will be cached base on the result variable.  If the flags work,\n# sets them in cxxflags_out/linkerflags_out internal cache variables (in addition to ${result}).\nfunction(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out linkerflags_out)\n  set(CMAKE_REQUIRED_LIBRARIES ${linkerflags})\n  check_cxx_compiler_flag(\"${cxxflags}\" ${result})\n  if (${result})\n    set(${cxxflags_out} \"${cxxflags}\" CACHE INTERNAL \"\" FORCE)\n    set(${linkerflags_out} \"${linkerflags}\" CACHE INTERNAL \"\" FORCE)\n  endif()\nendfunction()\n\n# Internal: find the appropriate link time optimization flags for this compiler\nfunction(_pybind11_add_lto_flags target_name prefer_thin_lto)\n  if (NOT DEFINED PYBIND11_LTO_CXX_FLAGS)\n    set(PYBIND11_LTO_CXX_FLAGS \"\" CACHE INTERNAL \"\")\n    set(PYBIND11_LTO_LINKER_FLAGS \"\" CACHE INTERNAL \"\")\n\n    if(CMAKE_CXX_COMPILER_ID MATCHES \"GNU|Clang\")\n      set(cxx_append \"\")\n      set(linker_append \"\")\n      if (CMAKE_CXX_COMPILER_ID MATCHES \"Clang\" AND NOT APPLE)\n        # Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it\n        set(linker_append \";$<$<CONFIG:MinSizeRel>:-O3>\")\n      elseif(CMAKE_CXX_COMPILER_ID MATCHES \"GNU\")\n        set(cxx_append \";-fno-fat-lto-objects\")\n      endif()\n\n      if (CMAKE_CXX_COMPILER_ID MATCHES \"Clang\" AND prefer_thin_lto)\n        _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO_THIN\n          \"-flto=thin${cxx_append}\" \"-flto=thin${linker_append}\"\n          PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)\n      endif()\n\n      if (NOT HAS_FLTO_THIN)\n        _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO\n          \"-flto${cxx_append}\" \"-flto${linker_append}\"\n          PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)\n      endif()\n    elseif (CMAKE_CXX_COMPILER_ID MATCHES \"Intel\")\n      # Intel equivalent to LTO is called IPO\n      _pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO\n      \"-ipo\" \"-ipo\" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)\n    elseif(MSVC)\n      # cmake only interprets libraries as linker flags when they start with a - (otherwise it\n      # converts /LTCG to \\LTCG as if it was a Windows path).  Luckily MSVC supports passing flags\n      # with - instead of /, even if it is a bit non-standard:\n      _pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG\n        \"/GL\" \"-LTCG\" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)\n    endif()\n\n    if (PYBIND11_LTO_CXX_FLAGS)\n      message(STATUS \"LTO enabled\")\n    else()\n      message(STATUS \"LTO disabled (not supported by the compiler and/or linker)\")\n    endif()\n  endif()\n\n  # Enable LTO flags if found, except for Debug builds\n  if (PYBIND11_LTO_CXX_FLAGS)\n    target_compile_options(${target_name} PRIVATE \"$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_CXX_FLAGS}>\")\n  endif()\n  if (PYBIND11_LTO_LINKER_FLAGS)\n    target_link_libraries(${target_name} PRIVATE \"$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_LINKER_FLAGS}>\")\n  endif()\nendfunction()\n\n# Build a Python extension module:\n# pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]\n#                     [NO_EXTRAS] [SYSTEM] [THIN_LTO] source1 [source2 ...])\n#\nfunction(pybind11_add_module target_name)\n  set(options MODULE SHARED EXCLUDE_FROM_ALL NO_EXTRAS SYSTEM THIN_LTO)\n  cmake_parse_arguments(ARG \"${options}\" \"\" \"\" ${ARGN})\n\n  if(ARG_MODULE AND ARG_SHARED)\n    message(FATAL_ERROR \"Can't be both MODULE and SHARED\")\n  elseif(ARG_SHARED)\n    set(lib_type SHARED)\n  else()\n    set(lib_type MODULE)\n  endif()\n\n  if(ARG_EXCLUDE_FROM_ALL)\n    set(exclude_from_all EXCLUDE_FROM_ALL)\n  endif()\n\n  add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS})\n\n  if(ARG_SYSTEM)\n    set(inc_isystem SYSTEM)\n  endif()\n\n  target_include_directories(${target_name} ${inc_isystem}\n    PRIVATE ${PYBIND11_INCLUDE_DIR}  # from project CMakeLists.txt\n    PRIVATE ${pybind11_INCLUDE_DIR}  # from pybind11Config\n    PRIVATE ${PYTHON_INCLUDE_DIRS})\n\n  # Python debug libraries expose slightly different objects\n  # https://docs.python.org/3.6/c-api/intro.html#debugging-builds\n  # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib\n  if(PYTHON_IS_DEBUG)\n    target_compile_definitions(${target_name} PRIVATE Py_DEBUG)\n  endif()\n\n  # The prefix and extension are provided by FindPythonLibsNew.cmake\n  set_target_properties(${target_name} PROPERTIES PREFIX \"${PYTHON_MODULE_PREFIX}\")\n  set_target_properties(${target_name} PROPERTIES SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\n  # -fvisibility=hidden is required to allow multiple modules compiled against\n  # different pybind versions to work properly, and for some features (e.g.\n  # py::module_local).  We force it on everything inside the `pybind11`\n  # namespace; also turning it on for a pybind module compilation here avoids\n  # potential warnings or issues from having mixed hidden/non-hidden types.\n  set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET \"hidden\")\n  set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET \"hidden\")\n\n  if(WIN32 OR CYGWIN)\n    # Link against the Python shared library on Windows\n    target_link_libraries(${target_name} PRIVATE ${PYTHON_LIBRARIES})\n  elseif(APPLE)\n    # It's quite common to have multiple copies of the same Python version\n    # installed on one's system. E.g.: one copy from the OS and another copy\n    # that's statically linked into an application like Blender or Maya.\n    # If we link our plugin library against the OS Python here and import it\n    # into Blender or Maya later on, this will cause segfaults when multiple\n    # conflicting Python instances are active at the same time (even when they\n    # are of the same version).\n\n    # Windows is not affected by this issue since it handles DLL imports\n    # differently. The solution for Linux and Mac OS is simple: we just don't\n    # link against the Python library. The resulting shared library will have\n    # missing symbols, but that's perfectly fine -- they will be resolved at\n    # import time.\n\n    target_link_libraries(${target_name} PRIVATE \"-undefined dynamic_lookup\")\n\n    if(ARG_SHARED)\n      # Suppress CMake >= 3.0 warning for shared libraries\n      set_target_properties(${target_name} PROPERTIES MACOSX_RPATH ON)\n    endif()\n  endif()\n\n  # Make sure C++11/14 are enabled\n  target_compile_options(${target_name} PUBLIC ${PYBIND11_CPP_STANDARD})\n\n  if(ARG_NO_EXTRAS)\n    return()\n  endif()\n\n  _pybind11_add_lto_flags(${target_name} ${ARG_THIN_LTO})\n\n  if (NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug)\n    # Strip unnecessary sections of the binary on Linux/Mac OS\n    if(CMAKE_STRIP)\n      if(APPLE)\n        add_custom_command(TARGET ${target_name} POST_BUILD\n                           COMMAND ${CMAKE_STRIP} -x $<TARGET_FILE:${target_name}>)\n      else()\n        add_custom_command(TARGET ${target_name} POST_BUILD\n                           COMMAND ${CMAKE_STRIP} $<TARGET_FILE:${target_name}>)\n      endif()\n    endif()\n  endif()\n\n  if(MSVC)\n    # /MP enables multithreaded builds (relevant when there are many files), /bigobj is\n    # needed for bigger binding projects due to the limit to 64k addressable sections\n    target_compile_options(${target_name} PRIVATE /bigobj)\n    if(CMAKE_VERSION VERSION_LESS 3.11)\n      target_compile_options(${target_name} PRIVATE $<$<NOT:$<CONFIG:Debug>>:/MP>)\n    else()\n      # Only set these options for C++ files.  This is important so that, for\n      # instance, projects that include other types of source files like CUDA\n      # .cu files don't get these options propagated to nvcc since that would\n      # cause the build to fail.\n      target_compile_options(${target_name} PRIVATE $<$<NOT:$<CONFIG:Debug>>:$<$<COMPILE_LANGUAGE:CXX>:/MP>>)\n    endif()\n  endif()\nendfunction()\n"
  },
  {
    "path": "src/third_party/zstd/lib/README.md",
    "content": "Zstandard library files\n================================\n\nThe __lib__ directory is split into several sub-directories,\nin order to make it easier to select or exclude features.\n\n\n#### Building\n\n`Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions),\nincluding commands variables, staged install, directory variables and standard targets.\n- `make` : generates both static and dynamic libraries\n- `make install` : install libraries and headers in target system directories\n\n`libzstd` default scope is pretty large, including compression, decompression, dictionary builder,\nand support for decoding legacy formats >= v0.5.0.\nThe scope can be reduced on demand (see paragraph _modular build_).\n\n\n#### Multithreading support\n\nMultithreading is disabled by default when building with `make`.\nEnabling multithreading requires 2 conditions :\n- set build macro `ZSTD_MULTITHREAD` (`-DZSTD_MULTITHREAD` for `gcc`)\n- for POSIX systems : compile with pthread (`-pthread` compilation flag for `gcc`)\n\nBoth conditions are automatically applied when invoking `make lib-mt` target.\n\nWhen linking a POSIX program with a multithreaded version of `libzstd`,\nnote that it's necessary to invoke the `-pthread` flag during link stage.\n\nMultithreading capabilities are exposed\nvia the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351).\n\n\n#### API\n\nZstandard's stable API is exposed within [lib/zstd.h](zstd.h).\n\n\n#### Advanced API\n\nOptional advanced features are exposed via :\n\n- `lib/common/zstd_errors.h` : translates `size_t` function results\n                               into a `ZSTD_ErrorCode`, for accurate error handling.\n\n- `ZSTD_STATIC_LINKING_ONLY` : if this macro is defined _before_ including `zstd.h`,\n                          it unlocks access to the experimental API,\n                          exposed in the second part of `zstd.h`.\n                          All definitions in the experimental APIs are unstable,\n                          they may still change in the future, or even be removed.\n                          As a consequence, experimental definitions shall ___never be used with dynamic library___ !\n                          Only static linking is allowed.\n\n\n#### Modular build\n\nIt's possible to compile only a limited set of features within `libzstd`.\nThe file structure is designed to make this selection manually achievable for any build system :\n\n- Directory `lib/common` is always required, for all variants.\n\n- Compression source code lies in `lib/compress`\n\n- Decompression source code lies in `lib/decompress`\n\n- It's possible to include only `compress` or only `decompress`, they don't depend on each other.\n\n- `lib/dictBuilder` : makes it possible to generate dictionaries from a set of samples.\n        The API is exposed in `lib/dictBuilder/zdict.h`.\n        This module depends on both `lib/common` and `lib/compress` .\n\n- `lib/legacy` : makes it possible to decompress legacy zstd formats, starting from `v0.1.0`.\n        This module depends on `lib/common` and `lib/decompress`.\n        To enable this feature, define `ZSTD_LEGACY_SUPPORT` during compilation.\n        Specifying a number limits versions supported to that version onward.\n        For example, `ZSTD_LEGACY_SUPPORT=2` means : \"support legacy formats >= v0.2.0\".\n        Conversely, `ZSTD_LEGACY_SUPPORT=0` means \"do __not__ support legacy formats\".\n        By default, this build macro is set as `ZSTD_LEGACY_SUPPORT=5`.\n        Decoding supported legacy format is a transparent capability triggered within decompression functions.\n        It's also allowed to invoke legacy API directly, exposed in `lib/legacy/zstd_legacy.h`.\n        Each version does also provide its own set of advanced API.\n        For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` .\n\n- While invoking `make libzstd`, it's possible to define build macros\n        `ZSTD_LIB_COMPRESSION, ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`,\n        and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the corresponding features.\n        This will also disable compilation of all dependencies\n        (eg. `ZSTD_LIB_COMPRESSION=0` will also disable dictBuilder).\n\n- There are some additional build macros that can be used to minify the decoder.\n\n  Zstandard often has more than one implementation of a piece of functionality,\n  where each implementation optimizes for different scenarios. For example, the\n  Huffman decoder has complementary implementations that decode the stream one\n  symbol at a time or two symbols at a time. Zstd normally includes both (and\n  dispatches between them at runtime), but by defining `HUF_FORCE_DECOMPRESS_X1`\n  or `HUF_FORCE_DECOMPRESS_X2`, you can force the use of one or the other, avoiding\n  compilation of the other. Similarly, `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT`\n  and `ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG` force the compilation and use of\n  only one or the other of two decompression implementations. The smallest\n  binary is achieved by using `HUF_FORCE_DECOMPRESS_X1` and\n  `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT`.\n\n  For squeezing the last ounce of size out, you can also define\n  `ZSTD_NO_INLINE`, which disables inlining, and `ZSTD_STRIP_ERROR_STRINGS`,\n  which removes the error messages that are otherwise returned by\n  `ZSTD_getErrorName`.\n\n- While invoking `make libzstd`, the build macro `ZSTD_LEGACY_MULTITHREADED_API=1`\n  will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in\n  the shared library, which is now hidden by default.\n\n- The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries\n  which can detect at runtime the presence of BMI2 instructions, and use them only if present.\n  These instructions contribute to better performance, notably on the decoder side.\n  By default, this feature is automatically enabled on detecting\n  the right instruction set (x64) and compiler (clang or gcc >= 5).\n  It's obviously disabled for different cpus,\n  or when BMI2 instruction set is _required_ by the compiler command line\n  (in this case, only the BMI2 code path is generated).\n  Setting this macro will either force to generate the BMI2 dispatcher (1)\n  or prevent it (0). It overrides automatic detection.\n\n\n#### Windows : using MinGW+MSYS to create DLL\n\nDLL can be created using MinGW+MSYS with the `make libzstd` command.\nThis command creates `dll\\libzstd.dll` and the import library `dll\\libzstd.lib`.\nThe import library is only required with Visual C++.\nThe header file `zstd.h` and the dynamic library `dll\\libzstd.dll` are required to\ncompile a project using gcc/MinGW.\nThe dynamic library has to be added to linking options.\nIt means that if a project that uses ZSTD consists of a single `test-dll.c`\nfile it should be linked with `dll\\libzstd.dll`. For example:\n```\n    gcc $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\\libzstd.dll\n```\nThe compiled executable will require ZSTD DLL which is available at `dll\\libzstd.dll`.\n\n\n#### Deprecated API\n\nObsolete API on their way out are stored in directory `lib/deprecated`.\nAt this stage, it contains older streaming prototypes, in `lib/deprecated/zbuff.h`.\nThese prototypes will be removed in some future version.\nConsider migrating code towards supported streaming API exposed in `zstd.h`.\n\n\n#### Miscellaneous\n\nThe other files are not source code. There are :\n\n - `BUCK` : support for `buck` build system (https://buckbuild.com/)\n - `Makefile` : `make` script to build and install zstd library (static and dynamic)\n - `README.md` : this file\n - `dll/` : resources directory for Windows compilation\n - `libzstd.pc.in` : script for `pkg-config` (used in `make install`)\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/bitstream.h",
    "content": "/* ******************************************************************\n   bitstream\n   Part of FSE library\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*\n*  This API consists of small unitary functions, which must be inlined for best performance.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n/*-****************************************\n*  Dependencies\n******************************************/\n#include \"mem.h\"            /* unaligned access routines */\n#include \"compiler.h\"       /* UNLIKELY() */\n#include \"debug.h\"          /* assert(), DEBUGLOG(), RAWLOG() */\n#include \"error_private.h\"  /* error codes and messages */\n\n\n/*=========================================\n*  Target specific\n=========================================*/\n#if defined(__BMI__) && defined(__GNUC__)\n#  include <immintrin.h>   /* support for bextr (experimental) */\n#elif defined(__ICCARM__)\n#  include <intrinsics.h>\n#endif\n\n#define STREAM_ACCUMULATOR_MIN_32  25\n#define STREAM_ACCUMULATOR_MIN_64  57\n#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))\n\n\n/*-******************************************\n*  bitStream encoding API (write forward)\n********************************************/\n/* bitStream can mix input from multiple sources.\n * A critical property of these streams is that they encode and decode in **reverse** direction.\n * So the first bit sequence you add will be the last to be read, like a LIFO stack.\n */\ntypedef struct {\n    size_t bitContainer;\n    unsigned bitPos;\n    char*  startPtr;\n    char*  ptr;\n    char*  endPtr;\n} BIT_CStream_t;\n\nMEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);\nMEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);\nMEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);\nMEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);\n\n/* Start with initCStream, providing the size of buffer to write into.\n*  bitStream will never write outside of this buffer.\n*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.\n*\n*  bits are first added to a local register.\n*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.\n*  Writing data into memory is an explicit operation, performed by the flushBits function.\n*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.\n*  After a flushBits, a maximum of 7 bits might still be stored into local register.\n*\n*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.\n*\n*  Last operation is to close the bitStream.\n*  The function returns the final size of CStream in bytes.\n*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)\n*/\n\n\n/*-********************************************\n*  bitStream decoding API (read backward)\n**********************************************/\ntypedef struct {\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n    const char* limitPtr;\n} BIT_DStream_t;\n\ntypedef enum { BIT_DStream_unfinished = 0,\n               BIT_DStream_endOfBuffer = 1,\n               BIT_DStream_completed = 2,\n               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);\n\n\n/* Start by invoking BIT_initDStream().\n*  A chunk of the bitStream is then stored into a local register.\n*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).\n*  You can then retrieve bitFields stored into the local register, **in reverse order**.\n*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.\n*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.\n*  Otherwise, it can be less than that, so proceed accordingly.\n*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().\n*/\n\n\n/*-****************************************\n*  unsafe API\n******************************************/\nMEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);\n/* faster, but works only if value is \"clean\", meaning all high bits above nbBits are 0 */\n\nMEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);\n/* unsafe version; does not check buffer overflow */\n\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/*-**************************************************************\n*  Internal functions\n****************************************************************/\nMEM_STATIC unsigned BIT_highbit32 (U32 val)\n{\n    assert(val != 0);\n    {\n#   if defined(_MSC_VER)   /* Visual */\n        unsigned long r=0;\n        _BitScanReverse ( &r, val );\n        return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n        return __builtin_clz (val) ^ 31;\n#   elif defined(__ICCARM__)    /* IAR Intrinsic */\n        return 31 - __CLZ(val);\n#   else   /* Software version */\n        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,\n                                                 11, 14, 16, 18, 22, 25,  3, 30,\n                                                  8, 12, 20, 28, 15, 17, 24,  7,\n                                                 19, 27, 23,  6, 26,  5,  4, 31 };\n        U32 v = val;\n        v |= v >> 1;\n        v |= v >> 2;\n        v |= v >> 4;\n        v |= v >> 8;\n        v |= v >> 16;\n        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n#   endif\n    }\n}\n\n/*=====    Local Constants   =====*/\nstatic const unsigned BIT_mask[] = {\n    0,          1,         3,         7,         0xF,       0x1F,\n    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,\n    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,\n    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,\n    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,\n    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */\n#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))\n\n/*-**************************************************************\n*  bitStream encoding\n****************************************************************/\n/*! BIT_initCStream() :\n *  `dstCapacity` must be > sizeof(size_t)\n *  @return : 0 if success,\n *            otherwise an error code (can be tested using ERR_isError()) */\nMEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,\n                                  void* startPtr, size_t dstCapacity)\n{\n    bitC->bitContainer = 0;\n    bitC->bitPos = 0;\n    bitC->startPtr = (char*)startPtr;\n    bitC->ptr = bitC->startPtr;\n    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);\n    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);\n    return 0;\n}\n\n/*! BIT_addBits() :\n *  can add up to 31 bits into `bitC`.\n *  Note : does not check for register overflow ! */\nMEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,\n                            size_t value, unsigned nbBits)\n{\n    MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);\n    assert(nbBits < BIT_MASK_SIZE);\n    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);\n    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;\n    bitC->bitPos += nbBits;\n}\n\n/*! BIT_addBitsFast() :\n *  works only if `value` is _clean_,\n *  meaning all high bits above nbBits are 0 */\nMEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,\n                                size_t value, unsigned nbBits)\n{\n    assert((value>>nbBits) == 0);\n    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);\n    bitC->bitContainer |= value << bitC->bitPos;\n    bitC->bitPos += nbBits;\n}\n\n/*! BIT_flushBitsFast() :\n *  assumption : bitContainer has not overflowed\n *  unsafe version; does not check buffer overflow */\nMEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)\n{\n    size_t const nbBytes = bitC->bitPos >> 3;\n    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);\n    assert(bitC->ptr <= bitC->endPtr);\n    MEM_writeLEST(bitC->ptr, bitC->bitContainer);\n    bitC->ptr += nbBytes;\n    bitC->bitPos &= 7;\n    bitC->bitContainer >>= nbBytes*8;\n}\n\n/*! BIT_flushBits() :\n *  assumption : bitContainer has not overflowed\n *  safe version; check for buffer overflow, and prevents it.\n *  note : does not signal buffer overflow.\n *  overflow will be revealed later on using BIT_closeCStream() */\nMEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)\n{\n    size_t const nbBytes = bitC->bitPos >> 3;\n    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);\n    assert(bitC->ptr <= bitC->endPtr);\n    MEM_writeLEST(bitC->ptr, bitC->bitContainer);\n    bitC->ptr += nbBytes;\n    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;\n    bitC->bitPos &= 7;\n    bitC->bitContainer >>= nbBytes*8;\n}\n\n/*! BIT_closeCStream() :\n *  @return : size of CStream, in bytes,\n *            or 0 if it could not fit into dstBuffer */\nMEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)\n{\n    BIT_addBitsFast(bitC, 1, 1);   /* endMark */\n    BIT_flushBits(bitC);\n    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */\n    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);\n}\n\n\n/*-********************************************************\n*  bitStream decoding\n**********************************************************/\n/*! BIT_initDStream() :\n *  Initialize a BIT_DStream_t.\n * `bitD` : a pointer to an already allocated BIT_DStream_t structure.\n * `srcSize` must be the *exact* size of the bitStream, in bytes.\n * @return : size of stream (== srcSize), or an errorCode if a problem is detected\n */\nMEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    bitD->start = (const char*)srcBuffer;\n    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);\n\n    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */\n          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }\n    } else {\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);\n                /* fall-through */\n\n        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);\n                /* fall-through */\n\n        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);\n                /* fall-through */\n\n        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;\n                /* fall-through */\n\n        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;\n                /* fall-through */\n\n        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;\n                /* fall-through */\n\n        default: break;\n        }\n        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;\n            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */\n        }\n        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\nMEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)\n{\n    return bitContainer >> start;\n}\n\nMEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)\n{\n    U32 const regMask = sizeof(bitContainer)*8 - 1;\n    /* if start > regMask, bitstream is corrupted, and result is undefined */\n    assert(nbBits < BIT_MASK_SIZE);\n    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];\n}\n\nMEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)\n{\n    assert(nbBits < BIT_MASK_SIZE);\n    return bitContainer & BIT_mask[nbBits];\n}\n\n/*! BIT_lookBits() :\n *  Provides next n bits from local register.\n *  local register is not modified.\n *  On 32-bits, maxNbBits==24.\n *  On 64-bits, maxNbBits==56.\n * @return : value extracted */\nMEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)\n{\n    /* arbitrate between double-shift and shift+mask */\n#if 1\n    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,\n     * bitstream is likely corrupted, and result is undefined */\n    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);\n#else\n    /* this code path is slower on my os-x laptop */\n    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);\n#endif\n}\n\n/*! BIT_lookBitsFast() :\n *  unsafe version; only works if nbBits >= 1 */\nMEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)\n{\n    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;\n    assert(nbBits >= 1);\n    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);\n}\n\nMEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\n/*! BIT_readBits() :\n *  Read (consume) next n bits from local register and update.\n *  Pay attention to not read more than nbBits contained into local register.\n * @return : extracted value. */\nMEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)\n{\n    size_t const value = BIT_lookBits(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*! BIT_readBitsFast() :\n *  unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)\n{\n    size_t const value = BIT_lookBitsFast(bitD, nbBits);\n    assert(nbBits >= 1);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*! BIT_reloadDStreamFast() :\n *  Similar to BIT_reloadDStream(), but with two differences:\n *  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!\n *  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this\n *     point you must use BIT_reloadDStream() to reload.\n */\nMEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)\n{\n    if (UNLIKELY(bitD->ptr < bitD->limitPtr))\n        return BIT_DStream_overflow;\n    assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);\n    bitD->ptr -= bitD->bitsConsumed >> 3;\n    bitD->bitsConsumed &= 7;\n    bitD->bitContainer = MEM_readLEST(bitD->ptr);\n    return BIT_DStream_unfinished;\n}\n\n/*! BIT_reloadDStream() :\n *  Refill `bitD` from buffer previously set in BIT_initDStream() .\n *  This function is safe, it guarantees it will not read beyond src buffer.\n * @return : status of `BIT_DStream_t` internal register.\n *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */\n        return BIT_DStream_overflow;\n\n    if (bitD->ptr >= bitD->limitPtr) {\n        return BIT_reloadDStreamFast(bitD);\n    }\n    if (bitD->ptr == bitD->start) {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;\n        return BIT_DStream_completed;\n    }\n    /* start < ptr < limitPtr */\n    {   U32 nbBytes = bitD->bitsConsumed >> 3;\n        BIT_DStream_status result = BIT_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start) {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BIT_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */\n        return result;\n    }\n}\n\n/*! BIT_endOfDStream() :\n * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).\n */\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/compiler.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_COMPILER_H\n#define ZSTD_COMPILER_H\n\n/*-*******************************************************\n*  Compiler specifics\n*********************************************************/\n/* force inlining */\n\n#if !defined(ZSTD_NO_INLINE)\n#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#  define INLINE_KEYWORD inline\n#else\n#  define INLINE_KEYWORD\n#endif\n\n#if defined(__GNUC__) || defined(__ICCARM__)\n#  define FORCE_INLINE_ATTR __attribute__((always_inline))\n#elif defined(_MSC_VER)\n#  define FORCE_INLINE_ATTR __forceinline\n#else\n#  define FORCE_INLINE_ATTR\n#endif\n\n#else\n\n#define INLINE_KEYWORD\n#define FORCE_INLINE_ATTR\n\n#endif\n\n/**\n * FORCE_INLINE_TEMPLATE is used to define C \"templates\", which take constant\n * parameters. They must be inlined for the compiler to eliminate the constant\n * branches.\n */\n#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR\n/**\n * HINT_INLINE is used to help the compiler generate better code. It is *not*\n * used for \"templates\", so it can be tweaked based on the compilers\n * performance.\n *\n * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the\n * always_inline attribute.\n *\n * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline\n * attribute.\n */\n#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5\n#  define HINT_INLINE static INLINE_KEYWORD\n#else\n#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR\n#endif\n\n/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */\n#if defined(__GNUC__)\n#  define UNUSED_ATTR __attribute__((unused))\n#else\n#  define UNUSED_ATTR\n#endif\n\n/* force no inlining */\n#ifdef _MSC_VER\n#  define FORCE_NOINLINE static __declspec(noinline)\n#else\n#  if defined(__GNUC__) || defined(__ICCARM__)\n#    define FORCE_NOINLINE static __attribute__((__noinline__))\n#  else\n#    define FORCE_NOINLINE static\n#  endif\n#endif\n\n/* target attribute */\n#ifndef __has_attribute\n  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */\n#endif\n#if defined(__GNUC__) || defined(__ICCARM__)\n#  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))\n#else\n#  define TARGET_ATTRIBUTE(target)\n#endif\n\n/* Enable runtime BMI2 dispatch based on the CPU.\n * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.\n */\n#ifndef DYNAMIC_BMI2\n  #if ((defined(__clang__) && __has_attribute(__target__)) \\\n      || (defined(__GNUC__) \\\n          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \\\n      && (defined(__x86_64__) || defined(_M_X86)) \\\n      && !defined(__BMI2__)\n  #  define DYNAMIC_BMI2 1\n  #else\n  #  define DYNAMIC_BMI2 0\n  #endif\n#endif\n\n/* prefetch\n * can be disabled, by declaring NO_PREFETCH build macro */\n#if defined(NO_PREFETCH)\n#  define PREFETCH_L1(ptr)  (void)(ptr)  /* disabled */\n#  define PREFETCH_L2(ptr)  (void)(ptr)  /* disabled */\n#else\n#  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */\n#    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */\n#    define PREFETCH_L1(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)\n#    define PREFETCH_L2(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T1)\n#  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )\n#    define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)\n#    define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)\n#  else\n#    define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */\n#    define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */\n#  endif\n#endif  /* NO_PREFETCH */\n\n#define CACHELINE_SIZE 64\n\n#define PREFETCH_AREA(p, s)  {            \\\n    const char* const _ptr = (const char*)(p);  \\\n    size_t const _size = (size_t)(s);     \\\n    size_t _pos;                          \\\n    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \\\n        PREFETCH_L2(_ptr + _pos);         \\\n    }                                     \\\n}\n\n/* vectorization\n * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */\n#if !defined(__clang__) && defined(__GNUC__)\n#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)\n#    define DONT_VECTORIZE __attribute__((optimize(\"no-tree-vectorize\")))\n#  else\n#    define DONT_VECTORIZE _Pragma(\"GCC optimize(\\\"no-tree-vectorize\\\")\")\n#  endif\n#else\n#  define DONT_VECTORIZE\n#endif\n\n/* Tell the compiler that a branch is likely or unlikely.\n * Only use these macros if it causes the compiler to generate better code.\n * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc\n * and clang, please do.\n */\n#if defined(__GNUC__)\n#define LIKELY(x) (__builtin_expect((x), 1))\n#define UNLIKELY(x) (__builtin_expect((x), 0))\n#else\n#define LIKELY(x) (x)\n#define UNLIKELY(x) (x)\n#endif\n\n/* disable warnings */\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n#endif /* ZSTD_COMPILER_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/cpu.h",
    "content": "/*\n * Copyright (c) 2018-present, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_COMMON_CPU_H\n#define ZSTD_COMMON_CPU_H\n\n/**\n * Implementation taken from folly/CpuId.h\n * https://github.com/facebook/folly/blob/master/folly/CpuId.h\n */\n\n#include <string.h>\n\n#include \"mem.h\"\n\n#ifdef _MSC_VER\n#include <intrin.h>\n#endif\n\ntypedef struct {\n    U32 f1c;\n    U32 f1d;\n    U32 f7b;\n    U32 f7c;\n} ZSTD_cpuid_t;\n\nMEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {\n    U32 f1c = 0;\n    U32 f1d = 0;\n    U32 f7b = 0;\n    U32 f7c = 0;\n#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))\n    int reg[4];\n    __cpuid((int*)reg, 0);\n    {\n        int const n = reg[0];\n        if (n >= 1) {\n            __cpuid((int*)reg, 1);\n            f1c = (U32)reg[2];\n            f1d = (U32)reg[3];\n        }\n        if (n >= 7) {\n            __cpuidex((int*)reg, 7, 0);\n            f7b = (U32)reg[1];\n            f7c = (U32)reg[2];\n        }\n    }\n#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)\n    /* The following block like the normal cpuid branch below, but gcc\n     * reserves ebx for use of its pic register so we must specially\n     * handle the save and restore to avoid clobbering the register\n     */\n    U32 n;\n    __asm__(\n        \"pushl %%ebx\\n\\t\"\n        \"cpuid\\n\\t\"\n        \"popl %%ebx\\n\\t\"\n        : \"=a\"(n)\n        : \"a\"(0)\n        : \"ecx\", \"edx\");\n    if (n >= 1) {\n      U32 f1a;\n      __asm__(\n          \"pushl %%ebx\\n\\t\"\n          \"cpuid\\n\\t\"\n          \"popl %%ebx\\n\\t\"\n          : \"=a\"(f1a), \"=c\"(f1c), \"=d\"(f1d)\n          : \"a\"(1));\n    }\n    if (n >= 7) {\n      __asm__(\n          \"pushl %%ebx\\n\\t\"\n          \"cpuid\\n\\t\"\n          \"movl %%ebx, %%eax\\n\\t\"\n          \"popl %%ebx\"\n          : \"=a\"(f7b), \"=c\"(f7c)\n          : \"a\"(7), \"c\"(0)\n          : \"edx\");\n    }\n#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)\n    U32 n;\n    __asm__(\"cpuid\" : \"=a\"(n) : \"a\"(0) : \"ebx\", \"ecx\", \"edx\");\n    if (n >= 1) {\n      U32 f1a;\n      __asm__(\"cpuid\" : \"=a\"(f1a), \"=c\"(f1c), \"=d\"(f1d) : \"a\"(1) : \"ebx\");\n    }\n    if (n >= 7) {\n      U32 f7a;\n      __asm__(\"cpuid\"\n              : \"=a\"(f7a), \"=b\"(f7b), \"=c\"(f7c)\n              : \"a\"(7), \"c\"(0)\n              : \"edx\");\n    }\n#endif\n    {\n        ZSTD_cpuid_t cpuid;\n        cpuid.f1c = f1c;\n        cpuid.f1d = f1d;\n        cpuid.f7b = f7b;\n        cpuid.f7c = f7c;\n        return cpuid;\n    }\n}\n\n#define X(name, r, bit)                                                        \\\n  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \\\n    return ((cpuid.r) & (1U << bit)) != 0;                                     \\\n  }\n\n/* cpuid(1): Processor Info and Feature Bits. */\n#define C(name, bit) X(name, f1c, bit)\n  C(sse3, 0)\n  C(pclmuldq, 1)\n  C(dtes64, 2)\n  C(monitor, 3)\n  C(dscpl, 4)\n  C(vmx, 5)\n  C(smx, 6)\n  C(eist, 7)\n  C(tm2, 8)\n  C(ssse3, 9)\n  C(cnxtid, 10)\n  C(fma, 12)\n  C(cx16, 13)\n  C(xtpr, 14)\n  C(pdcm, 15)\n  C(pcid, 17)\n  C(dca, 18)\n  C(sse41, 19)\n  C(sse42, 20)\n  C(x2apic, 21)\n  C(movbe, 22)\n  C(popcnt, 23)\n  C(tscdeadline, 24)\n  C(aes, 25)\n  C(xsave, 26)\n  C(osxsave, 27)\n  C(avx, 28)\n  C(f16c, 29)\n  C(rdrand, 30)\n#undef C\n#define D(name, bit) X(name, f1d, bit)\n  D(fpu, 0)\n  D(vme, 1)\n  D(de, 2)\n  D(pse, 3)\n  D(tsc, 4)\n  D(msr, 5)\n  D(pae, 6)\n  D(mce, 7)\n  D(cx8, 8)\n  D(apic, 9)\n  D(sep, 11)\n  D(mtrr, 12)\n  D(pge, 13)\n  D(mca, 14)\n  D(cmov, 15)\n  D(pat, 16)\n  D(pse36, 17)\n  D(psn, 18)\n  D(clfsh, 19)\n  D(ds, 21)\n  D(acpi, 22)\n  D(mmx, 23)\n  D(fxsr, 24)\n  D(sse, 25)\n  D(sse2, 26)\n  D(ss, 27)\n  D(htt, 28)\n  D(tm, 29)\n  D(pbe, 31)\n#undef D\n\n/* cpuid(7): Extended Features. */\n#define B(name, bit) X(name, f7b, bit)\n  B(bmi1, 3)\n  B(hle, 4)\n  B(avx2, 5)\n  B(smep, 7)\n  B(bmi2, 8)\n  B(erms, 9)\n  B(invpcid, 10)\n  B(rtm, 11)\n  B(mpx, 14)\n  B(avx512f, 16)\n  B(avx512dq, 17)\n  B(rdseed, 18)\n  B(adx, 19)\n  B(smap, 20)\n  B(avx512ifma, 21)\n  B(pcommit, 22)\n  B(clflushopt, 23)\n  B(clwb, 24)\n  B(avx512pf, 26)\n  B(avx512er, 27)\n  B(avx512cd, 28)\n  B(sha, 29)\n  B(avx512bw, 30)\n  B(avx512vl, 31)\n#undef B\n#define C(name, bit) X(name, f7c, bit)\n  C(prefetchwt1, 0)\n  C(avx512vbmi, 1)\n#undef C\n\n#undef X\n\n#endif /* ZSTD_COMMON_CPU_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/debug.c",
    "content": "/* ******************************************************************\n   debug\n   Part of FSE library\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n\n/*\n * This module only hosts one global variable\n * which can be used to dynamically influence the verbosity of traces,\n * such as DEBUGLOG and RAWLOG\n */\n\n#include \"debug.h\"\n\nint g_debuglevel = DEBUGLEVEL;\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/debug.h",
    "content": "/* ******************************************************************\n   debug\n   Part of FSE library\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n\n/*\n * The purpose of this header is to enable debug functions.\n * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,\n * and DEBUG_STATIC_ASSERT() for compile-time.\n *\n * By default, DEBUGLEVEL==0, which means run-time debug is disabled.\n *\n * Level 1 enables assert() only.\n * Starting level 2, traces can be generated and pushed to stderr.\n * The higher the level, the more verbose the traces.\n *\n * It's possible to dynamically adjust level using variable g_debug_level,\n * which is only declared if DEBUGLEVEL>=2,\n * and is a global variable, not multi-thread protected (use with care)\n */\n\n#ifndef DEBUG_H_12987983217\n#define DEBUG_H_12987983217\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* static assert is triggered at compile time, leaving no runtime artefact.\n * static assert only works with compile-time constants.\n * Also, this variant can only be used inside a function. */\n#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])\n\n\n/* DEBUGLEVEL is expected to be defined externally,\n * typically through compiler command line.\n * Value must be a number. */\n#ifndef DEBUGLEVEL\n#  define DEBUGLEVEL 0\n#endif\n\n\n/* DEBUGFILE can be defined externally,\n * typically through compiler command line.\n * note : currently useless.\n * Value must be stderr or stdout */\n#ifndef DEBUGFILE\n#  define DEBUGFILE stderr\n#endif\n\n\n/* recommended values for DEBUGLEVEL :\n * 0 : release mode, no debug, all run-time checks disabled\n * 1 : enables assert() only, no display\n * 2 : reserved, for currently active debug path\n * 3 : events once per object lifetime (CCtx, CDict, etc.)\n * 4 : events once per frame\n * 5 : events once per block\n * 6 : events once per sequence (verbose)\n * 7+: events at every position (*very* verbose)\n *\n * It's generally inconvenient to output traces > 5.\n * In which case, it's possible to selectively trigger high verbosity levels\n * by modifying g_debug_level.\n */\n\n#if (DEBUGLEVEL>=1)\n#  include <assert.h>\n#else\n#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */\n#    define assert(condition) ((void)0)   /* disable assert (default) */\n#  endif\n#endif\n\n#if (DEBUGLEVEL>=2)\n#  include <stdio.h>\nextern int g_debuglevel; /* the variable is only declared,\n                            it actually lives in debug.c,\n                            and is shared by the whole process.\n                            It's not thread-safe.\n                            It's useful when enabling very verbose levels\n                            on selective conditions (such as position in src) */\n\n#  define RAWLOG(l, ...) {                                      \\\n                if (l<=g_debuglevel) {                          \\\n                    fprintf(stderr, __VA_ARGS__);               \\\n            }   }\n#  define DEBUGLOG(l, ...) {                                    \\\n                if (l<=g_debuglevel) {                          \\\n                    fprintf(stderr, __FILE__ \": \" __VA_ARGS__); \\\n                    fprintf(stderr, \" \\n\");                     \\\n            }   }\n#else\n#  define RAWLOG(l, ...)      {}    /* disabled */\n#  define DEBUGLOG(l, ...)    {}    /* disabled */\n#endif\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* DEBUG_H_12987983217 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/entropy_common.c",
    "content": "/*\n   Common functions of New Generation Entropy library\n   Copyright (C) 2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n*************************************************************************** */\n\n/* *************************************\n*  Dependencies\n***************************************/\n#include \"mem.h\"\n#include \"error_private.h\"       /* ERR_*, ERROR */\n#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */\n#include \"huf.h\"\n\n\n/*===   Version   ===*/\nunsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }\n\n\n/*===   Error Management   ===*/\nunsigned FSE_isError(size_t code) { return ERR_isError(code); }\nconst char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\nunsigned HUF_isError(size_t code) { return ERR_isError(code); }\nconst char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/*-**************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nsize_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) {\n        /* This function only works when hbSize >= 4 */\n        char buffer[4];\n        memset(buffer, 0, sizeof(buffer));\n        memcpy(buffer, headerBuffer, hbSize);\n        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,\n                                                    buffer, sizeof(buffer));\n            if (FSE_isError(countSize)) return countSize;\n            if (countSize > hbSize) return ERROR(corruption_detected);\n            return countSize;\n    }   }\n    assert(hbSize >= 4);\n\n    /* init */\n    memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) & (charnum<=*maxSVPtr)) {\n        if (previous0) {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF) {\n                n0 += 24;\n                if (ip < iend-5) {\n                    ip += 2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                } else {\n                    bitStream >>= 16;\n                    bitCount   += 16;\n            }   }\n            while ((bitStream & 3) == 3) {\n                n0 += 3;\n                bitStream >>= 2;\n                bitCount += 2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                assert((bitCount >> 3) <= 3); /* For first condition to work */\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            } else {\n                bitStream >>= 2;\n        }   }\n        {   int const max = (2*threshold-1) - remaining;\n            int count;\n\n            if ((bitStream & (threshold-1)) < (U32)max) {\n                count = bitStream & (threshold-1);\n                bitCount += nbBits-1;\n            } else {\n                count = bitStream & (2*threshold-1);\n                if (count >= threshold) count -= max;\n                bitCount += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= count < 0 ? -count : count;   /* -1 means +1 */\n            normalizedCounter[charnum++] = (short)count;\n            previous0 = !count;\n            while (remaining < threshold) {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n            } else {\n                bitCount -= (int)(8 * (iend - 4 - ip));\n                ip = iend - 4;\n            }\n            bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n    }   }   /* while ((remaining>1) & (charnum<=*maxSVPtr)) */\n    if (remaining != 1) return ERROR(corruption_detected);\n    if (bitCount > 32) return ERROR(corruption_detected);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    return ip-istart;\n}\n\n\n/*! HUF_readStats() :\n    Read compact Huffman tree, saved by HUF_writeCTable().\n    `huffWeight` is destination buffer.\n    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.\n    @return : size read from `src` , or an error Code .\n    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .\n*/\nsize_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                     U32* nbSymbolsPtr, U32* tableLogPtr,\n                     const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    /* memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128) {  /* special header */\n        oSize = iSize - 127;\n        iSize = ((oSize+1)/2);\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        if (oSize >= hwSize) return ERROR(corruption_detected);\n        ip += 1;\n        {   U32 n;\n            for (n=0; n<oSize; n+=2) {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n    }   }   }\n    else  {   /* header compressed with FSE (normal case) */\n        FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)];  /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSE_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));\n    weightTotal = 0;\n    {   U32 n; for (n=0; n<oSize; n++) {\n            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);\n            rankStats[huffWeight[n]]++;\n            weightTotal += (1 << huffWeight[n]) >> 1;\n    }   }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;\n        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);\n        *tableLogPtr = tableLog;\n        /* determine last weight */\n        {   U32 const total = 1 << tableLog;\n            U32 const rest = total - weightTotal;\n            U32 const verif = 1 << BIT_highbit32(rest);\n            U32 const lastWeight = BIT_highbit32(rest) + 1;\n            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n            huffWeight[oSize] = (BYTE)lastWeight;\n            rankStats[lastWeight]++;\n    }   }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    return iSize+1;\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/error_private.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* The purpose of this file is to have a single list of error strings embedded in binary */\n\n#include \"error_private.h\"\n\nconst char* ERR_getErrorString(ERR_enum code)\n{\n#ifdef ZSTD_STRIP_ERROR_STRINGS\n    (void)code;\n    return \"Error strings stripped\";\n#else\n    static const char* const notErrorCode = \"Unspecified error code\";\n    switch( code )\n    {\n    case PREFIX(no_error): return \"No error detected\";\n    case PREFIX(GENERIC):  return \"Error (generic)\";\n    case PREFIX(prefix_unknown): return \"Unknown frame descriptor\";\n    case PREFIX(version_unsupported): return \"Version not supported\";\n    case PREFIX(frameParameter_unsupported): return \"Unsupported frame parameter\";\n    case PREFIX(frameParameter_windowTooLarge): return \"Frame requires too much memory for decoding\";\n    case PREFIX(corruption_detected): return \"Corrupted block detected\";\n    case PREFIX(checksum_wrong): return \"Restored data doesn't match checksum\";\n    case PREFIX(parameter_unsupported): return \"Unsupported parameter\";\n    case PREFIX(parameter_outOfBound): return \"Parameter is out of bound\";\n    case PREFIX(init_missing): return \"Context should be init first\";\n    case PREFIX(memory_allocation): return \"Allocation error : not enough memory\";\n    case PREFIX(workSpace_tooSmall): return \"workSpace buffer is not large enough\";\n    case PREFIX(stage_wrong): return \"Operation not authorized at current processing stage\";\n    case PREFIX(tableLog_tooLarge): return \"tableLog requires too much memory : unsupported\";\n    case PREFIX(maxSymbolValue_tooLarge): return \"Unsupported max Symbol Value : too large\";\n    case PREFIX(maxSymbolValue_tooSmall): return \"Specified maxSymbolValue is too small\";\n    case PREFIX(dictionary_corrupted): return \"Dictionary is corrupted\";\n    case PREFIX(dictionary_wrong): return \"Dictionary mismatch\";\n    case PREFIX(dictionaryCreation_failed): return \"Cannot create Dictionary from provided samples\";\n    case PREFIX(dstSize_tooSmall): return \"Destination buffer is too small\";\n    case PREFIX(srcSize_wrong): return \"Src size is incorrect\";\n    case PREFIX(dstBuffer_null): return \"Operation on NULL destination buffer\";\n        /* following error codes are not stable and may be removed or changed in a future version */\n    case PREFIX(frameIndex_tooLarge): return \"Frame index is too large\";\n    case PREFIX(seekableIO): return \"An I/O error occurred when reading/seeking\";\n    case PREFIX(maxCode):\n    default: return notErrorCode;\n    }\n#endif\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/error_private.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* Note : this module is expected to remain private, do not expose it */\n\n#ifndef ERROR_H_MODULE\n#define ERROR_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* ****************************************\n*  Dependencies\n******************************************/\n#include <stddef.h>        /* size_t */\n#include \"zstd_errors.h\"  /* enum list */\n\n\n/* ****************************************\n*  Compiler-specific\n******************************************/\n#if defined(__GNUC__)\n#  define ERR_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define ERR_STATIC static inline\n#elif defined(_MSC_VER)\n#  define ERR_STATIC static __inline\n#else\n#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/*-****************************************\n*  Customization (error_public.h)\n******************************************/\ntypedef ZSTD_ErrorCode ERR_enum;\n#define PREFIX(name) ZSTD_error_##name\n\n\n/*-****************************************\n*  Error codes handling\n******************************************/\n#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */\n#define ERROR(name) ZSTD_ERROR(name)\n#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))\n\nERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }\n\nERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }\n\n\n/*-****************************************\n*  Error Strings\n******************************************/\n\nconst char* ERR_getErrorString(ERR_enum code);   /* error_private.c */\n\nERR_STATIC const char* ERR_getErrorName(size_t code)\n{\n    return ERR_getErrorString(ERR_getErrorCode(code));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ERROR_H_MODULE */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/fse.h",
    "content": "/* ******************************************************************\n   FSE : Finite State Entropy codec\n   Public Prototypes declaration\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#ifndef FSE_H\n#define FSE_H\n\n\n/*-*****************************************\n*  Dependencies\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n\n\n/*-*****************************************\n*  FSE_PUBLIC_API : control library symbols visibility\n******************************************/\n#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)\n#  define FSE_PUBLIC_API __attribute__ ((visibility (\"default\")))\n#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */\n#  define FSE_PUBLIC_API __declspec(dllexport)\n#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)\n#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/\n#else\n#  define FSE_PUBLIC_API\n#endif\n\n/*------   Version   ------*/\n#define FSE_VERSION_MAJOR    0\n#define FSE_VERSION_MINOR    9\n#define FSE_VERSION_RELEASE  0\n\n#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE\n#define FSE_QUOTE(str) #str\n#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)\n#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)\n\n#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)\nFSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */\n\n\n/*-****************************************\n*  FSE simple functions\n******************************************/\n/*! FSE_compress() :\n    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.\n    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).\n    @return : size of compressed data (<= dstCapacity).\n    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!\n                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.\n                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())\n*/\nFSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,\n                             const void* src, size_t srcSize);\n\n/*! FSE_decompress():\n    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstCapacity'.\n    @return : size of regenerated data (<= maxDstSize),\n              or an error code, which can be tested using FSE_isError() .\n\n    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!\n    Why ? : making this distinction requires a header.\n    Header management is intentionally delegated to the user layer, which can better manage special cases.\n*/\nFSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,\n                               const void* cSrc, size_t cSrcSize);\n\n\n/*-*****************************************\n*  Tool functions\n******************************************/\nFSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */\n\n/* Error Management */\nFSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */\nFSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */\n\n\n/*-*****************************************\n*  FSE advanced functions\n******************************************/\n/*! FSE_compress2() :\n    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'\n    Both parameters can be defined as '0' to mean : use default value\n    @return : size of compressed data\n    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!\n                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.\n                     if FSE_isError(return), it's an error code.\n*/\nFSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);\n\n\n/*-*****************************************\n*  FSE detailed API\n******************************************/\n/*!\nFSE_compress() does the following:\n1. count symbol occurrence from source[] into table count[] (see hist.h)\n2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)\n3. save normalized counters to memory buffer using writeNCount()\n4. build encoding table 'CTable' from normalized counters\n5. encode the data stream using encoding table 'CTable'\n\nFSE_decompress() does the following:\n1. read normalized counters with readNCount()\n2. build decoding table 'DTable' from normalized counters\n3. decode the data stream using decoding table 'DTable'\n\nThe following API allows targeting specific sub-functions for advanced tasks.\nFor example, it's possible to compress several blocks using the same 'CTable',\nor to save and provide normalized distribution using external method.\n*/\n\n/* *** COMPRESSION *** */\n\n/*! FSE_optimalTableLog():\n    dynamically downsize 'tableLog' when conditions are met.\n    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.\n    @return : recommended tableLog (necessarily <= 'maxTableLog') */\nFSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);\n\n/*! FSE_normalizeCount():\n    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)\n    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).\n    @return : tableLog,\n              or an errorCode, which can be tested using FSE_isError() */\nFSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,\n                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue);\n\n/*! FSE_NCountWriteBound():\n    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.\n    Typically useful for allocation purpose. */\nFSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);\n\n/*! FSE_writeNCount():\n    Compactly save 'normalizedCounter' into 'buffer'.\n    @return : size of the compressed table,\n              or an errorCode, which can be tested using FSE_isError(). */\nFSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,\n                                 const short* normalizedCounter,\n                                 unsigned maxSymbolValue, unsigned tableLog);\n\n/*! Constructor and Destructor of FSE_CTable.\n    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */\ntypedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */\nFSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);\nFSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);\n\n/*! FSE_buildCTable():\n    Builds `ct`, which must be already allocated, using FSE_createCTable().\n    @return : 0, or an errorCode, which can be tested using FSE_isError() */\nFSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*! FSE_compress_usingCTable():\n    Compress `src` using `ct` into `dst` which must be already allocated.\n    @return : size of compressed data (<= `dstCapacity`),\n              or 0 if compressed data could not fit into `dst`,\n              or an errorCode, which can be tested using FSE_isError() */\nFSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);\n\n/*!\nTutorial :\n----------\nThe first step is to count all symbols. FSE_count() does this job very fast.\nResult will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.\n'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]\nmaxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)\nFSE_count() will return the number of occurrence of the most frequent symbol.\nThis can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.\nIf there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).\n\nThe next step is to normalize the frequencies.\nFSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.\nIt also guarantees a minimum of 1 to any Symbol with frequency >= 1.\nYou can use 'tableLog'==0 to mean \"use default tableLog value\".\nIf you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),\nwhich will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means \"default\").\n\nThe result of FSE_normalizeCount() will be saved into a table,\ncalled 'normalizedCounter', which is a table of signed short.\n'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.\nThe return value is tableLog if everything proceeded as expected.\nIt is 0 if there is a single symbol within distribution.\nIf there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).\n\n'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().\n'buffer' must be already allocated.\nFor guaranteed success, buffer size must be at least FSE_headerBound().\nThe result of the function is the number of bytes written into 'buffer'.\nIf there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).\n\n'normalizedCounter' can then be used to create the compression table 'CTable'.\nThe space required by 'CTable' must be already allocated, using FSE_createCTable().\nYou can then use FSE_buildCTable() to fill 'CTable'.\nIf there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).\n\n'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().\nSimilar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'\nThe function returns the size of compressed data (without header), necessarily <= `dstCapacity`.\nIf it returns '0', compressed data could not fit into 'dst'.\nIf there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).\n*/\n\n\n/* *** DECOMPRESSION *** */\n\n/*! FSE_readNCount():\n    Read compactly saved 'normalizedCounter' from 'rBuffer'.\n    @return : size read from 'rBuffer',\n              or an errorCode, which can be tested using FSE_isError().\n              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */\nFSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,\n                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,\n                           const void* rBuffer, size_t rBuffSize);\n\n/*! Constructor and Destructor of FSE_DTable.\n    Note that its size depends on 'tableLog' */\ntypedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\nFSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);\nFSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);\n\n/*! FSE_buildDTable():\n    Builds 'dt', which must be already allocated, using FSE_createDTable().\n    return : 0, or an errorCode, which can be tested using FSE_isError() */\nFSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*! FSE_decompress_usingDTable():\n    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`\n    into `dst` which must be already allocated.\n    @return : size of regenerated data (necessarily <= `dstCapacity`),\n              or an errorCode, which can be tested using FSE_isError() */\nFSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);\n\n/*!\nTutorial :\n----------\n(Note : these functions only decompress FSE-compressed blocks.\n If block is uncompressed, use memcpy() instead\n If block is a single repeated byte, use memset() instead )\n\nThe first step is to obtain the normalized frequencies of symbols.\nThis can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().\n'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.\nIn practice, that means it's necessary to know 'maxSymbolValue' beforehand,\nor size the table to handle worst case situations (typically 256).\nFSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.\nThe result of FSE_readNCount() is the number of bytes read from 'rBuffer'.\nNote that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.\nIf there is an error, the function will return an error code, which can be tested using FSE_isError().\n\nThe next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.\nThis is performed by the function FSE_buildDTable().\nThe space required by 'FSE_DTable' must be already allocated using FSE_createDTable().\nIf there is an error, the function will return an error code, which can be tested using FSE_isError().\n\n`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().\n`cSrcSize` must be strictly correct, otherwise decompression will fail.\nFSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).\nIf there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)\n*/\n\n#endif  /* FSE_H */\n\n#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)\n#define FSE_H_FSE_STATIC_LINKING_ONLY\n\n/* *** Dependency *** */\n#include \"bitstream.h\"\n\n\n/* *****************************************\n*  Static allocation\n*******************************************/\n/* FSE buffer bounds */\n#define FSE_NCOUNTBOUND 512\n#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)\n#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */\n#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))\n#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */\n#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))\n#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))\n\n\n/* *****************************************\n *  FSE advanced API\n ***************************************** */\n\nunsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);\n/**< same as FSE_optimalTableLog(), which used `minus==2` */\n\n/* FSE_compress_wksp() :\n * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).\n * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.\n */\n#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )\nsize_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);\n\nsize_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);\n/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */\n\nsize_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);\n/**< build a fake FSE_CTable, designed to compress always the same symbolValue */\n\n/* FSE_buildCTable_wksp() :\n * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).\n * `wkspSize` must be >= `(1<<tableLog)`.\n */\nsize_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);\n\nsize_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);\n/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */\n\nsize_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);\n/**< build a fake FSE_DTable, designed to always generate the same symbolValue */\n\nsize_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);\n/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */\n\ntypedef enum {\n   FSE_repeat_none,  /**< Cannot use the previous table */\n   FSE_repeat_check, /**< Can use the previous table but it must be checked */\n   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */\n } FSE_repeat;\n\n/* *****************************************\n*  FSE symbol compression API\n*******************************************/\n/*!\n   This API consists of small unitary functions, which highly benefit from being inlined.\n   Hence their body are included in next section.\n*/\ntypedef struct {\n    ptrdiff_t   value;\n    const void* stateTable;\n    const void* symbolTT;\n    unsigned    stateLog;\n} FSE_CState_t;\n\nstatic void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);\n\nstatic void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);\n\nstatic void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);\n\n/**<\nThese functions are inner components of FSE_compress_usingCTable().\nThey allow the creation of custom streams, mixing multiple tables and bit sources.\n\nA key property to keep in mind is that encoding and decoding are done **in reverse direction**.\nSo the first symbol you will encode is the last you will decode, like a LIFO stack.\n\nYou will need a few variables to track your CStream. They are :\n\nFSE_CTable    ct;         // Provided by FSE_buildCTable()\nBIT_CStream_t bitStream;  // bitStream tracking structure\nFSE_CState_t  state;      // State tracking structure (can have several)\n\n\nThe first thing to do is to init bitStream and state.\n    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);\n    FSE_initCState(&state, ct);\n\nNote that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();\nYou can then encode your input data, byte after byte.\nFSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.\nRemember decoding will be done in reverse direction.\n    FSE_encodeByte(&bitStream, &state, symbol);\n\nAt any time, you can also add any bit sequence.\nNote : maximum allowed nbBits is 25, for compatibility with 32-bits decoders\n    BIT_addBits(&bitStream, bitField, nbBits);\n\nThe above methods don't commit data to memory, they just store it into local register, for speed.\nLocal register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).\nWriting data to memory is a manual operation, performed by the flushBits function.\n    BIT_flushBits(&bitStream);\n\nYour last FSE encoding operation shall be to flush your last state value(s).\n    FSE_flushState(&bitStream, &state);\n\nFinally, you must close the bitStream.\nThe function returns the size of CStream in bytes.\nIf data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)\nIf there is an error, it returns an errorCode (which can be tested using FSE_isError()).\n    size_t size = BIT_closeCStream(&bitStream);\n*/\n\n\n/* *****************************************\n*  FSE symbol decompression API\n*******************************************/\ntypedef struct {\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSE_DState_t;\n\n\nstatic void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);\n\nstatic unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n\nstatic unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);\n\n/**<\nLet's now decompose FSE_decompress_usingDTable() into its unitary components.\nYou will decode FSE-encoded symbols from the bitStream,\nand also any other bitFields you put in, **in reverse order**.\n\nYou will need a few variables to track your bitStream. They are :\n\nBIT_DStream_t DStream;    // Stream context\nFSE_DState_t  DState;     // State context. Multiple ones are possible\nFSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()\n\nThe first thing to do is to init the bitStream.\n    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);\n\nYou should then retrieve your initial state(s)\n(in reverse flushing order if you have several ones) :\n    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);\n\nYou can then decode your data, symbol after symbol.\nFor information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.\nKeep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).\n    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);\n\nYou can retrieve any bitfield you eventually stored into the bitStream (in reverse order)\nNote : maximum allowed nbBits is 25, for 32-bits compatibility\n    size_t bitField = BIT_readBits(&DStream, nbBits);\n\nAll above operations only read from local register (which size depends on size_t).\nRefueling the register from memory is manually performed by the reload method.\n    endSignal = FSE_reloadDStream(&DStream);\n\nBIT_reloadDStream() result tells if there is still some more data to read from DStream.\nBIT_DStream_unfinished : there is still some data left into the DStream.\nBIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.\nBIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.\nBIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.\n\nWhen reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,\nto properly detect the exact end of stream.\nAfter each decoded symbol, check if DStream is fully consumed using this simple test :\n    BIT_reloadDStream(&DStream) >= BIT_DStream_completed\n\nWhen it's done, verify decompression is fully completed, by checking both DStream and the relevant states.\nChecking if DStream has reached its end is performed by :\n    BIT_endOfDStream(&DStream);\nCheck also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.\n    FSE_endOfDState(&DState);\n*/\n\n\n/* *****************************************\n*  FSE unsafe API\n*******************************************/\nstatic unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/* *****************************************\n*  Implementation of inlined functions\n*******************************************/\ntypedef struct {\n    int deltaFindState;\n    U32 deltaNbBits;\n} FSE_symbolCompressionTransform; /* total 8 bytes */\n\nMEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)\n{\n    const void* ptr = ct;\n    const U16* u16ptr = (const U16*) ptr;\n    const U32 tableLog = MEM_read16(ptr);\n    statePtr->value = (ptrdiff_t)1<<tableLog;\n    statePtr->stateTable = u16ptr+2;\n    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);\n    statePtr->stateLog = tableLog;\n}\n\n\n/*! FSE_initCState2() :\n*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)\n*   uses the smallest state value possible, saving the cost of this symbol */\nMEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)\n{\n    FSE_initCState(statePtr, ct);\n    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];\n        const U16* stateTable = (const U16*)(statePtr->stateTable);\n        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);\n        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;\n        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];\n    }\n}\n\nMEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)\n{\n    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];\n    const U16* const stateTable = (const U16*)(statePtr->stateTable);\n    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);\n    BIT_addBits(bitC, statePtr->value, nbBitsOut);\n    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];\n}\n\nMEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)\n{\n    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);\n    BIT_flushBits(bitC);\n}\n\n\n/* FSE_getMaxNbBits() :\n * Approximate maximum cost of a symbol, in bits.\n * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)\n * note 1 : assume symbolValue is valid (<= maxSymbolValue)\n * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */\nMEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)\n{\n    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;\n    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;\n}\n\n/* FSE_bitCost() :\n * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)\n * note 1 : assume symbolValue is valid (<= maxSymbolValue)\n * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */\nMEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)\n{\n    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;\n    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;\n    U32 const threshold = (minNbBits+1) << 16;\n    assert(tableLog < 16);\n    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */\n    {   U32 const tableSize = 1 << tableLog;\n        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);\n        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */\n        U32 const bitMultiplier = 1 << accuracyLog;\n        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);\n        assert(normalizedDeltaFromThreshold <= bitMultiplier);\n        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;\n    }\n}\n\n\n/* ======    Decompression    ====== */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSE_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSE_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;\n    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);\n    BIT_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)\n{\n    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    return DInfo.symbol;\n}\n\nMEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    size_t const lowBits = BIT_readBits(bitD, nbBits);\n    DStatePtr->state = DInfo.newState + lowBits;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BIT_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n/*! FSE_decodeSymbolFast() :\n    unsafe, only works if no symbol has a probability > 50% */\nMEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n/* **************************************************************\n*  Tuning parameters\n****************************************************************/\n/*!MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#ifndef FSE_MAX_MEMORY_USAGE\n#  define FSE_MAX_MEMORY_USAGE 14\n#endif\n#ifndef FSE_DEFAULT_MEMORY_USAGE\n#  define FSE_DEFAULT_MEMORY_USAGE 13\n#endif\n\n/*!FSE_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#ifndef FSE_MAX_SYMBOL_VALUE\n#  define FSE_MAX_SYMBOL_VALUE 255\n#endif\n\n/* **************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSE_FUNCTION_TYPE BYTE\n#define FSE_FUNCTION_EXTENSION\n#define FSE_DECODE_TYPE FSE_decode_t\n\n\n#endif   /* !FSE_COMMONDEFS_ONLY */\n\n\n/* ***************************************************************\n*  Constants\n*****************************************************************/\n#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)\n#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)\n#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)\n#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)\n#define FSE_MIN_TABLELOG 5\n\n#define FSE_TABLELOG_ABSOLUTE_MAX 15\n#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX\n#  error \"FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)\n\n\n#endif /* FSE_STATIC_LINKING_ONLY */\n\n\n#if defined (__cplusplus)\n}\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/fse_decompress.c",
    "content": "/* ******************************************************************\n   FSE : Finite State Entropy decoder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include \"bitstream.h\"\n#include \"compiler.h\"\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#include \"error_private.h\"\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_isError ERR_isError\n#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */\n\n/* check and forward error code */\n#ifndef CHECK_F\n#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }\n#endif\n\n\n/* **************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\n\n/* Function templates */\nFSE_DTable* FSE_createDTable (unsigned tableLog)\n{\n    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;\n    return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );\n}\n\nvoid FSE_freeDTable (FSE_DTable* dt)\n{\n    free(dt);\n}\n\nsize_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */\n    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);\n    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];\n\n    U32 const maxSV1 = maxSymbolValue + 1;\n    U32 const tableSize = 1 << tableLog;\n    U32 highThreshold = tableSize-1;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    {   FSE_DTableHeader DTableH;\n        DTableH.tableLog = (U16)tableLog;\n        DTableH.fastMode = 1;\n        {   S16 const largeLimit= (S16)(1 << (tableLog-1));\n            U32 s;\n            for (s=0; s<maxSV1; s++) {\n                if (normalizedCounter[s]==-1) {\n                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;\n                    symbolNext[s] = 1;\n                } else {\n                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;\n                    symbolNext[s] = normalizedCounter[s];\n        }   }   }\n        memcpy(dt, &DTableH, sizeof(DTableH));\n    }\n\n    /* Spread symbols */\n    {   U32 const tableMask = tableSize-1;\n        U32 const step = FSE_TABLESTEP(tableSize);\n        U32 s, position = 0;\n        for (s=0; s<maxSV1; s++) {\n            int i;\n            for (i=0; i<normalizedCounter[s]; i++) {\n                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;\n                position = (position + step) & tableMask;\n                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }   }\n        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n    }\n\n    /* Build Decoding table */\n    {   U32 u;\n        for (u=0; u<tableSize; u++) {\n            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);\n            U32 const nextState = symbolNext[symbol]++;\n            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );\n            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);\n    }   }\n\n    return 0;\n}\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n/*-*******************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nsize_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nsize_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSV1 = tableMask+1;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<maxSV1; s++) {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSE_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BIT_DStream_t bitD;\n    FSE_DState_t state1;\n    FSE_DState_t state2;\n\n    /* Init */\n    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));\n\n    FSE_initDState(&state1, &bitD, dt);\n    FSE_initDState(&state2, &bitD, dt);\n\n#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {\n        op[0] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[1] = FSE_GETSYMBOL(&state2);\n\n        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[3] = FSE_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */\n    while (1) {\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n        *op++ = FSE_GETSYMBOL(&state1);\n        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {\n            *op++ = FSE_GETSYMBOL(&state2);\n            break;\n        }\n\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n        *op++ = FSE_GETSYMBOL(&state2);\n        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {\n            *op++ = FSE_GETSYMBOL(&state1);\n            break;\n    }   }\n\n    return op-ostart;\n}\n\n\nsize_t FSE_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSE_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;\n    const U32 fastMode = DTableH->fastMode;\n\n    /* select fast mode (static) */\n    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nsize_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSE_MAX_SYMBOL_VALUE+1];\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n\n    /* normal FSE decoding mode */\n    size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSE_isError(NCountLength)) return NCountLength;\n    //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */\n    if (tableLog > maxLog) return ERROR(tableLog_tooLarge);\n    ip += NCountLength;\n    cSrcSize -= NCountLength;\n\n    CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );\n\n    return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace);   /* always return, even if it is an error code */\n}\n\n\ntypedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];\n\nsize_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)\n{\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);\n}\n\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/huf.h",
    "content": "/* ******************************************************************\n   huff0 huffman codec,\n   part of Finite State Entropy library\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#ifndef HUF_H_298734234\n#define HUF_H_298734234\n\n/* *** Dependencies *** */\n#include <stddef.h>    /* size_t */\n\n\n/* *** library symbols visibility *** */\n/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,\n *        HUF symbols remain \"private\" (internal symbols for library only).\n *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */\n#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)\n#  define HUF_PUBLIC_API __attribute__ ((visibility (\"default\")))\n#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */\n#  define HUF_PUBLIC_API __declspec(dllexport)\n#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)\n#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */\n#else\n#  define HUF_PUBLIC_API\n#endif\n\n\n/* ========================== */\n/* ***  simple functions  *** */\n/* ========================== */\n\n/** HUF_compress() :\n *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.\n * 'dst' buffer must be already allocated.\n *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).\n * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.\n * @return : size of compressed data (<= `dstCapacity`).\n *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!\n *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())\n */\nHUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,\n                             const void* src, size_t srcSize);\n\n/** HUF_decompress() :\n *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',\n *  into already allocated buffer 'dst', of minimum size 'dstSize'.\n * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.\n *  Note : in contrast with FSE, HUF_decompress can regenerate\n *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,\n *         because it knows size to regenerate (originalSize).\n * @return : size of regenerated data (== originalSize),\n *           or an error code, which can be tested using HUF_isError()\n */\nHUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,\n                               const void* cSrc, size_t cSrcSize);\n\n\n/* ***   Tool functions *** */\n#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */\nHUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */\n\n/* Error Management */\nHUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */\nHUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */\n\n\n/* ***   Advanced function   *** */\n\n/** HUF_compress2() :\n *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.\n * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .\n * `tableLog` must be `<= HUF_TABLELOG_MAX` . */\nHUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,\n                               const void* src, size_t srcSize,\n                               unsigned maxSymbolValue, unsigned tableLog);\n\n/** HUF_compress4X_wksp() :\n *  Same as HUF_compress2(), but uses externally allocated `workSpace`.\n * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */\n#define HUF_WORKSPACE_SIZE (6 << 10)\n#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))\nHUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,\n                                     const void* src, size_t srcSize,\n                                     unsigned maxSymbolValue, unsigned tableLog,\n                                     void* workSpace, size_t wkspSize);\n\n#endif   /* HUF_H_298734234 */\n\n/* ******************************************************************\n *  WARNING !!\n *  The following section contains advanced and experimental definitions\n *  which shall never be used in the context of a dynamic library,\n *  because they are not guaranteed to remain stable in the future.\n *  Only consider them in association with static linking.\n * *****************************************************************/\n#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)\n#define HUF_H_HUF_STATIC_LINKING_ONLY\n\n/* *** Dependencies *** */\n#include \"mem.h\"   /* U32 */\n\n\n/* *** Constants *** */\n#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */\n#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */\n#define HUF_SYMBOLVALUE_MAX  255\n\n#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */\n#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)\n#  error \"HUF_TABLELOG_MAX is too large !\"\n#endif\n\n\n/* ****************************************\n*  Static allocation\n******************************************/\n/* HUF buffer bounds */\n#define HUF_CTABLEBOUND 129\n#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */\n#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* static allocation of HUF's Compression Table */\n#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */\n#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))\n#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \\\n    U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \\\n    void* name##hv = &(name##hb); \\\n    HUF_CElt* name = (HUF_CElt*)(name##hv)   /* no final ; */\n\n/* static allocation of HUF's DTable */\ntypedef U32 HUF_DTable;\n#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))\n#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \\\n        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }\n#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }\n\n\n/* ****************************************\n*  Advanced decompression functions\n******************************************/\nsize_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\n#endif\n\nsize_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */\nsize_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */\nsize_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */\nsize_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\nsize_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\nsize_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */\n#endif\n\n\n/* ****************************************\n *  HUF detailed API\n * ****************************************/\n\n/*! HUF_compress() does the following:\n *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within \"fse.h\")\n *  2. (optional) refine tableLog using HUF_optimalTableLog()\n *  3. build Huffman table from count using HUF_buildCTable()\n *  4. save Huffman table to memory buffer using HUF_writeCTable()\n *  5. encode the data stream using HUF_compress4X_usingCTable()\n *\n *  The following API allows targeting specific sub-functions for advanced tasks.\n *  For example, it's possible to compress several blocks using the same 'CTable',\n *  or to save and regenerate 'CTable' using external methods.\n */\nunsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);\ntypedef struct HUF_CElt_s HUF_CElt;   /* incomplete type */\nsize_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */\nsize_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);\nsize_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);\nsize_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);\n\ntypedef enum {\n   HUF_repeat_none,  /**< Cannot use the previous table */\n   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */\n   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */\n } HUF_repeat;\n/** HUF_compress4X_repeat() :\n *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.\n *  If it uses hufTable it does not modify hufTable or repeat.\n *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.\n *  If preferRepeat then the old table will always be used if valid. */\nsize_t HUF_compress4X_repeat(void* dst, size_t dstSize,\n                       const void* src, size_t srcSize,\n                       unsigned maxSymbolValue, unsigned tableLog,\n                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */\n                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);\n\n/** HUF_buildCTable_wksp() :\n *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.\n * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.\n */\n#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)\n#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))\nsize_t HUF_buildCTable_wksp (HUF_CElt* tree,\n                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,\n                             void* workSpace, size_t wkspSize);\n\n/*! HUF_readStats() :\n *  Read compact Huffman tree, saved by HUF_writeCTable().\n * `huffWeight` is destination buffer.\n * @return : size read from `src` , or an error Code .\n *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */\nsize_t HUF_readStats(BYTE* huffWeight, size_t hwSize,\n                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,\n                     const void* src, size_t srcSize);\n\n/** HUF_readCTable() :\n *  Loading a CTable saved with HUF_writeCTable() */\nsize_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);\n\n/** HUF_getNbBits() :\n *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX\n *  Note 1 : is not inlined, as HUF_CElt definition is private\n *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */\nU32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);\n\n/*\n * HUF_decompress() does the following:\n * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics\n * 2. build Huffman table from save, using HUF_readDTableX?()\n * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()\n */\n\n/** HUF_selectDecoder() :\n *  Tells which decoder is likely to decode faster,\n *  based on a set of pre-computed metrics.\n * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .\n *  Assumption : 0 < dstSize <= 128 KB */\nU32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);\n\n/**\n *  The minimum workspace size for the `workSpace` used in\n *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().\n *\n *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when\n *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.\n *  Buffer overflow errors may potentially occur if code modifications result in\n *  a required workspace size greater than that specified in the following\n *  macro.\n */\n#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)\n#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))\n\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);\nsize_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);\n#endif\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);\nsize_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);\n#endif\n\nsize_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);\n#endif\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);\n#endif\n\n\n/* ====================== */\n/* single stream variants */\n/* ====================== */\n\nsize_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);\nsize_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */\nsize_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);\n/** HUF_compress1X_repeat() :\n *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.\n *  If it uses hufTable it does not modify hufTable or repeat.\n *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.\n *  If preferRepeat then the old table will always be used if valid. */\nsize_t HUF_compress1X_repeat(void* dst, size_t dstSize,\n                       const void* src, size_t srcSize,\n                       unsigned maxSymbolValue, unsigned tableLog,\n                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */\n                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);\n\nsize_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */\n#endif\n\nsize_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\nsize_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\nsize_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */\n#endif\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\nsize_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */\n#endif\n\nsize_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);\n#endif\n#ifndef HUF_FORCE_DECOMPRESS_X1\nsize_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);\n#endif\n\n/* BMI2 variants.\n * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.\n */\nsize_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);\n#endif\nsize_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);\nsize_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);\n\n#endif /* HUF_STATIC_LINKING_ONLY */\n\n#if defined (__cplusplus)\n}\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/mem.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*-****************************************\n*  Dependencies\n******************************************/\n#include <stddef.h>     /* size_t, ptrdiff_t */\n#include <string.h>     /* memcpy */\n\n\n/*-****************************************\n*  Compiler specifics\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n#if defined(__GNUC__)\n#  define MEM_STATIC static __inline __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n#ifndef __has_builtin\n#  define __has_builtin(x) 0  /* compat. with non-clang compilers */\n#endif\n\n/* code only tested on 32 and 64 bits systems */\n#define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }\nMEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }\n\n/* detects whether we are being compiled under msan */\n#if defined (__has_feature)\n#  if __has_feature(memory_sanitizer)\n#    define MEMORY_SANITIZER 1\n#  endif\n#endif\n\n#if defined (MEMORY_SANITIZER)\n/* Not all platforms that support msan provide sanitizers/msan_interface.h.\n * We therefore declare the functions we need ourselves, rather than trying to\n * include the header file... */\n\n#include <stdint.h> /* intptr_t */\n\n/* Make memory region fully initialized (without changing its contents). */\nvoid __msan_unpoison(const volatile void *a, size_t size);\n\n/* Make memory region fully uninitialized (without changing its contents).\n   This is a legacy interface that does not update origin information. Use\n   __msan_allocated_memory() instead. */\nvoid __msan_poison(const volatile void *a, size_t size);\n\n/* Returns the offset of the first (at least partially) poisoned byte in the\n   memory range, or -1 if the whole range is good. */\nintptr_t __msan_test_shadow(const volatile void *x, size_t size);\n#endif\n\n/* detects whether we are being compiled under asan */\n#if defined (__has_feature)\n#  if __has_feature(address_sanitizer)\n#    define ADDRESS_SANITIZER 1\n#  endif\n#elif defined(__SANITIZE_ADDRESS__)\n#  define ADDRESS_SANITIZER 1\n#endif\n\n#if defined (ADDRESS_SANITIZER)\n/* Not all platforms that support asan provide sanitizers/asan_interface.h.\n * We therefore declare the functions we need ourselves, rather than trying to\n * include the header file... */\n\n/**\n * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.\n *\n * This memory must be previously allocated by your program. Instrumented\n * code is forbidden from accessing addresses in this region until it is\n * unpoisoned. This function is not guaranteed to poison the entire region -\n * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan\n * alignment restrictions.\n *\n * \\note This function is not thread-safe because no two threads can poison or\n * unpoison memory in the same memory region simultaneously.\n *\n * \\param addr Start of memory region.\n * \\param size Size of memory region. */\nvoid __asan_poison_memory_region(void const volatile *addr, size_t size);\n\n/**\n * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.\n *\n * This memory must be previously allocated by your program. Accessing\n * addresses in this region is allowed until this region is poisoned again.\n * This function could unpoison a super-region of <c>[addr, addr+size)</c> due\n * to ASan alignment restrictions.\n *\n * \\note This function is not thread-safe because no two threads can\n * poison or unpoison memory in the same memory region simultaneously.\n *\n * \\param addr Start of memory region.\n * \\param size Size of memory region. */\nvoid __asan_unpoison_memory_region(void const volatile *addr, size_t size);\n#endif\n\n\n/*-**************************************************************\n*  Basic Types\n*****************************************************************/\n#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n# include <stdint.h>\n  typedef   uint8_t BYTE;\n  typedef  uint16_t U16;\n  typedef   int16_t S16;\n  typedef  uint32_t U32;\n  typedef   int32_t S32;\n  typedef  uint64_t U64;\n  typedef   int64_t S64;\n#else\n# include <limits.h>\n#if CHAR_BIT != 8\n#  error \"this implementation requires char to be exactly 8-bit type\"\n#endif\n  typedef unsigned char      BYTE;\n#if USHRT_MAX != 65535\n#  error \"this implementation requires short to be exactly 16-bit type\"\n#endif\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n#if UINT_MAX != 4294967295\n#  error \"this implementation requires int to be exactly 32-bit type\"\n#endif\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n/* note : there are no limits defined for long long type in C90.\n * limits exist in C99, however, in such case, <stdint.h> is preferred */\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/*-**************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets depending on alignment.\n *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard, by lying on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\nMEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nMEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\nMEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\n#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))\n    __pragma( pack(push, 1) )\n    typedef struct { U16 v; } unalign16;\n    typedef struct { U32 v; } unalign32;\n    typedef struct { U64 v; } unalign64;\n    typedef struct { size_t v; } unalignArch;\n    __pragma( pack(pop) )\n#else\n    typedef struct { U16 v; } __attribute__((packed)) unalign16;\n    typedef struct { U32 v; } __attribute__((packed)) unalign32;\n    typedef struct { U64 v; } __attribute__((packed)) unalign64;\n    typedef struct { size_t v; } __attribute__((packed)) unalignArch;\n#endif\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }\nMEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }\nMEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }\nMEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC size_t MEM_readST(const void* memPtr)\n{\n    size_t val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\nMEM_STATIC void MEM_write32(void* memPtr, U32 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\nMEM_STATIC void MEM_write64(void* memPtr, U64 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* MEM_FORCE_MEMORY_ACCESS */\n\nMEM_STATIC U32 MEM_swap32(U32 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_ulong(in);\n#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \\\n  || (defined(__clang__) && __has_builtin(__builtin_bswap32))\n    return __builtin_bswap32(in);\n#else\n    return  ((in << 24) & 0xff000000 ) |\n            ((in <<  8) & 0x00ff0000 ) |\n            ((in >>  8) & 0x0000ff00 ) |\n            ((in >> 24) & 0x000000ff );\n#endif\n}\n\nMEM_STATIC U64 MEM_swap64(U64 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_uint64(in);\n#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \\\n  || (defined(__clang__) && __has_builtin(__builtin_bswap64))\n    return __builtin_bswap64(in);\n#else\n    return  ((in << 56) & 0xff00000000000000ULL) |\n            ((in << 40) & 0x00ff000000000000ULL) |\n            ((in << 24) & 0x0000ff0000000000ULL) |\n            ((in << 8)  & 0x000000ff00000000ULL) |\n            ((in >> 8)  & 0x00000000ff000000ULL) |\n            ((in >> 24) & 0x0000000000ff0000ULL) |\n            ((in >> 40) & 0x000000000000ff00ULL) |\n            ((in >> 56) & 0x00000000000000ffULL);\n#endif\n}\n\nMEM_STATIC size_t MEM_swapST(size_t in)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_swap32((U32)in);\n    else\n        return (size_t)MEM_swap64((U64)in);\n}\n\n/*=== Little endian r/w ===*/\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian()) {\n        MEM_write16(memPtr, val);\n    } else {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE24(const void* memPtr)\n{\n    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);\n}\n\nMEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)\n{\n    MEM_writeLE16(memPtr, (U16)val);\n    ((BYTE*)memPtr)[2] = (BYTE)(val>>16);\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n        return MEM_swap32(MEM_read32(memPtr));\n}\n\nMEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)\n{\n    if (MEM_isLittleEndian())\n        MEM_write32(memPtr, val32);\n    else\n        MEM_write32(memPtr, MEM_swap32(val32));\n}\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n        return MEM_swap64(MEM_read64(memPtr));\n}\n\nMEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)\n{\n    if (MEM_isLittleEndian())\n        MEM_write64(memPtr, val64);\n    else\n        MEM_write64(memPtr, MEM_swap64(val64));\n}\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\nMEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)\n{\n    if (MEM_32bits())\n        MEM_writeLE32(memPtr, (U32)val);\n    else\n        MEM_writeLE64(memPtr, (U64)val);\n}\n\n/*=== Big endian r/w ===*/\n\nMEM_STATIC U32 MEM_readBE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_swap32(MEM_read32(memPtr));\n    else\n        return MEM_read32(memPtr);\n}\n\nMEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)\n{\n    if (MEM_isLittleEndian())\n        MEM_write32(memPtr, MEM_swap32(val32));\n    else\n        MEM_write32(memPtr, val32);\n}\n\nMEM_STATIC U64 MEM_readBE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_swap64(MEM_read64(memPtr));\n    else\n        return MEM_read64(memPtr);\n}\n\nMEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)\n{\n    if (MEM_isLittleEndian())\n        MEM_write64(memPtr, MEM_swap64(val64));\n    else\n        MEM_write64(memPtr, val64);\n}\n\nMEM_STATIC size_t MEM_readBEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readBE32(memPtr);\n    else\n        return (size_t)MEM_readBE64(memPtr);\n}\n\nMEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)\n{\n    if (MEM_32bits())\n        MEM_writeBE32(memPtr, (U32)val);\n    else\n        MEM_writeBE64(memPtr, (U64)val);\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/pool.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/* ======   Dependencies   ======= */\n#include <stddef.h>    /* size_t */\n#include \"debug.h\"     /* assert */\n#include \"zstd_internal.h\"  /* ZSTD_malloc, ZSTD_free */\n#include \"pool.h\"\n\n/* ======   Compiler specifics   ====== */\n#if defined(_MSC_VER)\n#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */\n#endif\n\n\n#ifdef ZSTD_MULTITHREAD\n\n#include \"threading.h\"   /* pthread adaptation */\n\n/* A job is a function and an opaque argument */\ntypedef struct POOL_job_s {\n    POOL_function function;\n    void *opaque;\n} POOL_job;\n\nstruct POOL_ctx_s {\n    ZSTD_customMem customMem;\n    /* Keep track of the threads */\n    ZSTD_pthread_t* threads;\n    size_t threadCapacity;\n    size_t threadLimit;\n\n    /* The queue is a circular buffer */\n    POOL_job *queue;\n    size_t queueHead;\n    size_t queueTail;\n    size_t queueSize;\n\n    /* The number of threads working on jobs */\n    size_t numThreadsBusy;\n    /* Indicates if the queue is empty */\n    int queueEmpty;\n\n    /* The mutex protects the queue */\n    ZSTD_pthread_mutex_t queueMutex;\n    /* Condition variable for pushers to wait on when the queue is full */\n    ZSTD_pthread_cond_t queuePushCond;\n    /* Condition variables for poppers to wait on when the queue is empty */\n    ZSTD_pthread_cond_t queuePopCond;\n    /* Indicates if the queue is shutting down */\n    int shutdown;\n};\n\n/* POOL_thread() :\n * Work thread for the thread pool.\n * Waits for jobs and executes them.\n * @returns : NULL on failure else non-null.\n */\nstatic void* POOL_thread(void* opaque) {\n    POOL_ctx* const ctx = (POOL_ctx*)opaque;\n    if (!ctx) { return NULL; }\n    for (;;) {\n        /* Lock the mutex and wait for a non-empty queue or until shutdown */\n        ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n\n        while ( ctx->queueEmpty\n            || (ctx->numThreadsBusy >= ctx->threadLimit) ) {\n            if (ctx->shutdown) {\n                /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),\n                 * a few threads will be shutdown while !queueEmpty,\n                 * but enough threads will remain active to finish the queue */\n                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n                return opaque;\n            }\n            ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);\n        }\n        /* Pop a job off the queue */\n        {   POOL_job const job = ctx->queue[ctx->queueHead];\n            ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;\n            ctx->numThreadsBusy++;\n            ctx->queueEmpty = ctx->queueHead == ctx->queueTail;\n            /* Unlock the mutex, signal a pusher, and run the job */\n            ZSTD_pthread_cond_signal(&ctx->queuePushCond);\n            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n\n            job.function(job.opaque);\n\n            /* If the intended queue size was 0, signal after finishing job */\n            ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n            ctx->numThreadsBusy--;\n            if (ctx->queueSize == 1) {\n                ZSTD_pthread_cond_signal(&ctx->queuePushCond);\n            }\n            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n        }\n    }  /* for (;;) */\n    assert(0);  /* Unreachable */\n}\n\nPOOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {\n    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);\n}\n\nPOOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,\n                               ZSTD_customMem customMem) {\n    POOL_ctx* ctx;\n    /* Check parameters */\n    if (!numThreads) { return NULL; }\n    /* Allocate the context and zero initialize */\n    ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);\n    if (!ctx) { return NULL; }\n    /* Initialize the job queue.\n     * It needs one extra space since one space is wasted to differentiate\n     * empty and full queues.\n     */\n    ctx->queueSize = queueSize + 1;\n    ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);\n    ctx->queueHead = 0;\n    ctx->queueTail = 0;\n    ctx->numThreadsBusy = 0;\n    ctx->queueEmpty = 1;\n    {\n        int error = 0;\n        error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);\n        error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);\n        error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);\n        if (error) { POOL_free(ctx); return NULL; }\n    }\n    ctx->shutdown = 0;\n    /* Allocate space for the thread handles */\n    ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);\n    ctx->threadCapacity = 0;\n    ctx->customMem = customMem;\n    /* Check for errors */\n    if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }\n    /* Initialize the threads */\n    {   size_t i;\n        for (i = 0; i < numThreads; ++i) {\n            if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {\n                ctx->threadCapacity = i;\n                POOL_free(ctx);\n                return NULL;\n        }   }\n        ctx->threadCapacity = numThreads;\n        ctx->threadLimit = numThreads;\n    }\n    return ctx;\n}\n\n/*! POOL_join() :\n    Shutdown the queue, wake any sleeping threads, and join all of the threads.\n*/\nstatic void POOL_join(POOL_ctx* ctx) {\n    /* Shut down the queue */\n    ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n    ctx->shutdown = 1;\n    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n    /* Wake up sleeping threads */\n    ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);\n    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);\n    /* Join all of the threads */\n    {   size_t i;\n        for (i = 0; i < ctx->threadCapacity; ++i) {\n            ZSTD_pthread_join(ctx->threads[i], NULL);  /* note : could fail */\n    }   }\n}\n\nvoid POOL_free(POOL_ctx *ctx) {\n    if (!ctx) { return; }\n    POOL_join(ctx);\n    ZSTD_pthread_mutex_destroy(&ctx->queueMutex);\n    ZSTD_pthread_cond_destroy(&ctx->queuePushCond);\n    ZSTD_pthread_cond_destroy(&ctx->queuePopCond);\n    ZSTD_free(ctx->queue, ctx->customMem);\n    ZSTD_free(ctx->threads, ctx->customMem);\n    ZSTD_free(ctx, ctx->customMem);\n}\n\n\n\nsize_t POOL_sizeof(POOL_ctx *ctx) {\n    if (ctx==NULL) return 0;  /* supports sizeof NULL */\n    return sizeof(*ctx)\n        + ctx->queueSize * sizeof(POOL_job)\n        + ctx->threadCapacity * sizeof(ZSTD_pthread_t);\n}\n\n\n/* @return : 0 on success, 1 on error */\nstatic int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)\n{\n    if (numThreads <= ctx->threadCapacity) {\n        if (!numThreads) return 1;\n        ctx->threadLimit = numThreads;\n        return 0;\n    }\n    /* numThreads > threadCapacity */\n    {   ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);\n        if (!threadPool) return 1;\n        /* replace existing thread pool */\n        memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));\n        ZSTD_free(ctx->threads, ctx->customMem);\n        ctx->threads = threadPool;\n        /* Initialize additional threads */\n        {   size_t threadId;\n            for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {\n                if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {\n                    ctx->threadCapacity = threadId;\n                    return 1;\n            }   }\n    }   }\n    /* successfully expanded */\n    ctx->threadCapacity = numThreads;\n    ctx->threadLimit = numThreads;\n    return 0;\n}\n\n/* @return : 0 on success, 1 on error */\nint POOL_resize(POOL_ctx* ctx, size_t numThreads)\n{\n    int result;\n    if (ctx==NULL) return 1;\n    ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n    result = POOL_resize_internal(ctx, numThreads);\n    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);\n    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n    return result;\n}\n\n/**\n * Returns 1 if the queue is full and 0 otherwise.\n *\n * When queueSize is 1 (pool was created with an intended queueSize of 0),\n * then a queue is empty if there is a thread free _and_ no job is waiting.\n */\nstatic int isQueueFull(POOL_ctx const* ctx) {\n    if (ctx->queueSize > 1) {\n        return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);\n    } else {\n        return (ctx->numThreadsBusy == ctx->threadLimit) ||\n               !ctx->queueEmpty;\n    }\n}\n\n\nstatic void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)\n{\n    POOL_job const job = {function, opaque};\n    assert(ctx != NULL);\n    if (ctx->shutdown) return;\n\n    ctx->queueEmpty = 0;\n    ctx->queue[ctx->queueTail] = job;\n    ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;\n    ZSTD_pthread_cond_signal(&ctx->queuePopCond);\n}\n\nvoid POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)\n{\n    assert(ctx != NULL);\n    ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n    /* Wait until there is space in the queue for the new job */\n    while (isQueueFull(ctx) && (!ctx->shutdown)) {\n        ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);\n    }\n    POOL_add_internal(ctx, function, opaque);\n    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n}\n\n\nint POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)\n{\n    assert(ctx != NULL);\n    ZSTD_pthread_mutex_lock(&ctx->queueMutex);\n    if (isQueueFull(ctx)) {\n        ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n        return 0;\n    }\n    POOL_add_internal(ctx, function, opaque);\n    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);\n    return 1;\n}\n\n\n#else  /* ZSTD_MULTITHREAD  not defined */\n\n/* ========================== */\n/* No multi-threading support */\n/* ========================== */\n\n\n/* We don't need any data, but if it is empty, malloc() might return NULL. */\nstruct POOL_ctx_s {\n    int dummy;\n};\nstatic POOL_ctx g_ctx;\n\nPOOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {\n    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);\n}\n\nPOOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {\n    (void)numThreads;\n    (void)queueSize;\n    (void)customMem;\n    return &g_ctx;\n}\n\nvoid POOL_free(POOL_ctx* ctx) {\n    assert(!ctx || ctx == &g_ctx);\n    (void)ctx;\n}\n\nint POOL_resize(POOL_ctx* ctx, size_t numThreads) {\n    (void)ctx; (void)numThreads;\n    return 0;\n}\n\nvoid POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {\n    (void)ctx;\n    function(opaque);\n}\n\nint POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {\n    (void)ctx;\n    function(opaque);\n    return 1;\n}\n\nsize_t POOL_sizeof(POOL_ctx* ctx) {\n    if (ctx==NULL) return 0;  /* supports sizeof NULL */\n    assert(ctx == &g_ctx);\n    return sizeof(*ctx);\n}\n\n#endif  /* ZSTD_MULTITHREAD */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/pool.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef POOL_H\n#define POOL_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n#include <stddef.h>   /* size_t */\n#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_customMem */\n#include \"zstd.h\"\n\ntypedef struct POOL_ctx_s POOL_ctx;\n\n/*! POOL_create() :\n *  Create a thread pool with at most `numThreads` threads.\n * `numThreads` must be at least 1.\n *  The maximum number of queued jobs before blocking is `queueSize`.\n * @return : POOL_ctx pointer on success, else NULL.\n*/\nPOOL_ctx* POOL_create(size_t numThreads, size_t queueSize);\n\nPOOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,\n                               ZSTD_customMem customMem);\n\n/*! POOL_free() :\n *  Free a thread pool returned by POOL_create().\n */\nvoid POOL_free(POOL_ctx* ctx);\n\n/*! POOL_resize() :\n *  Expands or shrinks pool's number of threads.\n *  This is more efficient than releasing + creating a new context,\n *  since it tries to preserve and re-use existing threads.\n * `numThreads` must be at least 1.\n * @return : 0 when resize was successful,\n *           !0 (typically 1) if there is an error.\n *    note : only numThreads can be resized, queueSize remains unchanged.\n */\nint POOL_resize(POOL_ctx* ctx, size_t numThreads);\n\n/*! POOL_sizeof() :\n * @return threadpool memory usage\n *  note : compatible with NULL (returns 0 in this case)\n */\nsize_t POOL_sizeof(POOL_ctx* ctx);\n\n/*! POOL_function :\n *  The function type that can be added to a thread pool.\n */\ntypedef void (*POOL_function)(void*);\n\n/*! POOL_add() :\n *  Add the job `function(opaque)` to the thread pool. `ctx` must be valid.\n *  Possibly blocks until there is room in the queue.\n *  Note : The function may be executed asynchronously,\n *         therefore, `opaque` must live until function has been completed.\n */\nvoid POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);\n\n\n/*! POOL_tryAdd() :\n *  Add the job `function(opaque)` to thread pool _if_ a worker is available.\n *  Returns immediately even if not (does not block).\n * @return : 1 if successful, 0 if not.\n */\nint POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/threading.c",
    "content": "/**\n * Copyright (c) 2016 Tino Reichardt\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n *\n * You can contact the author at:\n * - zstdmt source repository: https://github.com/mcmilk/zstdmt\n */\n\n/**\n * This file will hold wrapper for systems, which do not support pthreads\n */\n\n#include \"threading.h\"\n\n/* create fake symbol to avoid empty translation unit warning */\nint g_ZSTD_threading_useless_symbol;\n\n#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)\n\n/**\n * Windows minimalist Pthread Wrapper, based on :\n * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html\n */\n\n\n/* ===  Dependencies  === */\n#include <process.h>\n#include <errno.h>\n\n\n/* ===  Implementation  === */\n\nstatic unsigned __stdcall worker(void *arg)\n{\n    ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;\n    thread->arg = thread->start_routine(thread->arg);\n    return 0;\n}\n\nint ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,\n            void* (*start_routine) (void*), void* arg)\n{\n    (void)unused;\n    thread->arg = arg;\n    thread->start_routine = start_routine;\n    thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);\n\n    if (!thread->handle)\n        return errno;\n    else\n        return 0;\n}\n\nint ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)\n{\n    DWORD result;\n\n    if (!thread.handle) return 0;\n\n    result = WaitForSingleObject(thread.handle, INFINITE);\n    switch (result) {\n    case WAIT_OBJECT_0:\n        if (value_ptr) *value_ptr = thread.arg;\n        return 0;\n    case WAIT_ABANDONED:\n        return EINVAL;\n    default:\n        return GetLastError();\n    }\n}\n\n#endif   /* ZSTD_MULTITHREAD */\n\n#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)\n\n#include <stdlib.h>\n\nint ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)\n{\n    *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));\n    if (!*mutex)\n        return 1;\n    return pthread_mutex_init(*mutex, attr);\n}\n\nint ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)\n{\n    if (!*mutex)\n        return 0;\n    {\n        int const ret = pthread_mutex_destroy(*mutex);\n        free(*mutex);\n        return ret;\n    }\n}\n\nint ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)\n{\n    *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));\n    if (!*cond)\n        return 1;\n    return pthread_cond_init(*cond, attr);\n}\n\nint ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)\n{\n    if (!*cond)\n        return 0;\n    {\n        int const ret = pthread_cond_destroy(*cond);\n        free(*cond);\n        return ret;\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/threading.h",
    "content": "/**\n * Copyright (c) 2016 Tino Reichardt\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n *\n * You can contact the author at:\n * - zstdmt source repository: https://github.com/mcmilk/zstdmt\n */\n\n#ifndef THREADING_H_938743\n#define THREADING_H_938743\n\n#include \"debug.h\"\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)\n\n/**\n * Windows minimalist Pthread Wrapper, based on :\n * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html\n */\n#ifdef WINVER\n#  undef WINVER\n#endif\n#define WINVER       0x0600\n\n#ifdef _WIN32_WINNT\n#  undef _WIN32_WINNT\n#endif\n#define _WIN32_WINNT 0x0600\n\n#ifndef WIN32_LEAN_AND_MEAN\n#  define WIN32_LEAN_AND_MEAN\n#endif\n\n#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */\n#include <windows.h>\n#undef ERROR\n#define ERROR(name) ZSTD_ERROR(name)\n\n\n/* mutex */\n#define ZSTD_pthread_mutex_t           CRITICAL_SECTION\n#define ZSTD_pthread_mutex_init(a, b)  ((void)(b), InitializeCriticalSection((a)), 0)\n#define ZSTD_pthread_mutex_destroy(a)  DeleteCriticalSection((a))\n#define ZSTD_pthread_mutex_lock(a)     EnterCriticalSection((a))\n#define ZSTD_pthread_mutex_unlock(a)   LeaveCriticalSection((a))\n\n/* condition variable */\n#define ZSTD_pthread_cond_t             CONDITION_VARIABLE\n#define ZSTD_pthread_cond_init(a, b)    ((void)(b), InitializeConditionVariable((a)), 0)\n#define ZSTD_pthread_cond_destroy(a)    ((void)(a))\n#define ZSTD_pthread_cond_wait(a, b)    SleepConditionVariableCS((a), (b), INFINITE)\n#define ZSTD_pthread_cond_signal(a)     WakeConditionVariable((a))\n#define ZSTD_pthread_cond_broadcast(a)  WakeAllConditionVariable((a))\n\n/* ZSTD_pthread_create() and ZSTD_pthread_join() */\ntypedef struct {\n    HANDLE handle;\n    void* (*start_routine)(void*);\n    void* arg;\n} ZSTD_pthread_t;\n\nint ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,\n                   void* (*start_routine) (void*), void* arg);\n\nint ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);\n\n/**\n * add here more wrappers as required\n */\n\n\n#elif defined(ZSTD_MULTITHREAD)    /* posix assumed ; need a better detection method */\n/* ===   POSIX Systems   === */\n#  include <pthread.h>\n\n#if DEBUGLEVEL < 1\n\n#define ZSTD_pthread_mutex_t            pthread_mutex_t\n#define ZSTD_pthread_mutex_init(a, b)   pthread_mutex_init((a), (b))\n#define ZSTD_pthread_mutex_destroy(a)   pthread_mutex_destroy((a))\n#define ZSTD_pthread_mutex_lock(a)      pthread_mutex_lock((a))\n#define ZSTD_pthread_mutex_unlock(a)    pthread_mutex_unlock((a))\n\n#define ZSTD_pthread_cond_t             pthread_cond_t\n#define ZSTD_pthread_cond_init(a, b)    pthread_cond_init((a), (b))\n#define ZSTD_pthread_cond_destroy(a)    pthread_cond_destroy((a))\n#define ZSTD_pthread_cond_wait(a, b)    pthread_cond_wait((a), (b))\n#define ZSTD_pthread_cond_signal(a)     pthread_cond_signal((a))\n#define ZSTD_pthread_cond_broadcast(a)  pthread_cond_broadcast((a))\n\n#define ZSTD_pthread_t                  pthread_t\n#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))\n#define ZSTD_pthread_join(a, b)         pthread_join((a),(b))\n\n#else /* DEBUGLEVEL >= 1 */\n\n/* Debug implementation of threading.\n * In this implementation we use pointers for mutexes and condition variables.\n * This way, if we forget to init/destroy them the program will crash or ASAN\n * will report leaks.\n */\n\n#define ZSTD_pthread_mutex_t            pthread_mutex_t*\nint ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);\nint ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);\n#define ZSTD_pthread_mutex_lock(a)      pthread_mutex_lock(*(a))\n#define ZSTD_pthread_mutex_unlock(a)    pthread_mutex_unlock(*(a))\n\n#define ZSTD_pthread_cond_t             pthread_cond_t*\nint ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);\nint ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);\n#define ZSTD_pthread_cond_wait(a, b)    pthread_cond_wait(*(a), *(b))\n#define ZSTD_pthread_cond_signal(a)     pthread_cond_signal(*(a))\n#define ZSTD_pthread_cond_broadcast(a)  pthread_cond_broadcast(*(a))\n\n#define ZSTD_pthread_t                  pthread_t\n#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))\n#define ZSTD_pthread_join(a, b)         pthread_join((a),(b))\n\n#endif\n\n#else  /* ZSTD_MULTITHREAD not defined */\n/* No multithreading support */\n\ntypedef int ZSTD_pthread_mutex_t;\n#define ZSTD_pthread_mutex_init(a, b)   ((void)(a), (void)(b), 0)\n#define ZSTD_pthread_mutex_destroy(a)   ((void)(a))\n#define ZSTD_pthread_mutex_lock(a)      ((void)(a))\n#define ZSTD_pthread_mutex_unlock(a)    ((void)(a))\n\ntypedef int ZSTD_pthread_cond_t;\n#define ZSTD_pthread_cond_init(a, b)    ((void)(a), (void)(b), 0)\n#define ZSTD_pthread_cond_destroy(a)    ((void)(a))\n#define ZSTD_pthread_cond_wait(a, b)    ((void)(a), (void)(b))\n#define ZSTD_pthread_cond_signal(a)     ((void)(a))\n#define ZSTD_pthread_cond_broadcast(a)  ((void)(a))\n\n/* do not use ZSTD_pthread_t */\n\n#endif /* ZSTD_MULTITHREAD */\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* THREADING_H_938743 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/xxhash.c",
    "content": "/*\n*  xxHash - Fast Hash algorithm\n*  Copyright (C) 2012-2016, Yann Collet\n*\n*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n*\n*  Redistribution and use in source and binary forms, with or without\n*  modification, are permitted provided that the following conditions are\n*  met:\n*\n*  * Redistributions of source code must retain the above copyright\n*  notice, this list of conditions and the following disclaimer.\n*  * Redistributions in binary form must reproduce the above\n*  copyright notice, this list of conditions and the following disclaimer\n*  in the documentation and/or other materials provided with the\n*  distribution.\n*\n*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n*  \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*\n*  You can contact the author at :\n*  - xxHash homepage: http://www.xxhash.com\n*  - xxHash source repository : https://github.com/Cyan4973/xxHash\n*/\n\n\n/* *************************************\n*  Tuning parameters\n***************************************/\n/*!XXH_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.\n *            It can generate buggy code on targets which do not support unaligned memory accesses.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://stackoverflow.com/a/32095106/646947 for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define XXH_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \\\n  defined(__ICCARM__)\n#    define XXH_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\n/*!XXH_ACCEPT_NULL_INPUT_POINTER :\n * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.\n * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.\n * By default, this option is disabled. To enable it, uncomment below define :\n */\n/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */\n\n/*!XXH_FORCE_NATIVE_FORMAT :\n * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.\n * Results are therefore identical for little-endian and big-endian CPU.\n * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.\n * Should endian-independence be of no importance for your application, you may set the #define below to 1,\n * to improve speed for Big-endian CPU.\n * This option has no impact on Little_Endian CPU.\n */\n#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */\n#  define XXH_FORCE_NATIVE_FORMAT 0\n#endif\n\n/*!XXH_FORCE_ALIGN_CHECK :\n * This is a minor performance trick, only useful with lots of very small keys.\n * It means : check for aligned/unaligned input.\n * The check costs one initial branch per hash; set to 0 when the input data\n * is guaranteed to be aligned.\n */\n#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */\n#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)\n#    define XXH_FORCE_ALIGN_CHECK 0\n#  else\n#    define XXH_FORCE_ALIGN_CHECK 1\n#  endif\n#endif\n\n\n/* *************************************\n*  Includes & Memory related functions\n***************************************/\n/* Modify the local functions below should you wish to use some other memory routines */\n/* for malloc(), free() */\n#include <stdlib.h>\n#include <stddef.h>     /* size_t */\nstatic void* XXH_malloc(size_t s) { return malloc(s); }\nstatic void  XXH_free  (void* p)  { free(p); }\n/* for memcpy() */\n#include <string.h>\nstatic void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }\n\n#ifndef XXH_STATIC_LINKING_ONLY\n#  define XXH_STATIC_LINKING_ONLY\n#endif\n#include \"xxhash.h\"\n\n\n/* *************************************\n*  Compiler Specific Options\n***************************************/\n#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#  define INLINE_KEYWORD inline\n#else\n#  define INLINE_KEYWORD\n#endif\n\n#if defined(__GNUC__) || defined(__ICCARM__)\n#  define FORCE_INLINE_ATTR __attribute__((always_inline))\n#elif defined(_MSC_VER)\n#  define FORCE_INLINE_ATTR __forceinline\n#else\n#  define FORCE_INLINE_ATTR\n#endif\n\n#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR\n\n\n#ifdef _MSC_VER\n#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */\n#endif\n\n\n/* *************************************\n*  Basic Types\n***************************************/\n#ifndef MEM_MODULE\n# define MEM_MODULE\n# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n#   include <stdint.h>\n    typedef uint8_t  BYTE;\n    typedef uint16_t U16;\n    typedef uint32_t U32;\n    typedef  int32_t S32;\n    typedef uint64_t U64;\n#  else\n    typedef unsigned char      BYTE;\n    typedef unsigned short     U16;\n    typedef unsigned int       U32;\n    typedef   signed int       S32;\n    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */\n#  endif\n#endif\n\n\n#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))\n\n/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */\nstatic U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\n#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;\n\nstatic U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nstatic U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\n#else\n\n/* portable and safe solution. Generally efficient.\n * see : http://stackoverflow.com/a/32095106/646947\n */\n\nstatic U32 XXH_read32(const void* memPtr)\n{\n    U32 val;\n    memcpy(&val, memPtr, sizeof(val));\n    return val;\n}\n\nstatic U64 XXH_read64(const void* memPtr)\n{\n    U64 val;\n    memcpy(&val, memPtr, sizeof(val));\n    return val;\n}\n\n#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */\n\n\n/* ****************************************\n*  Compiler-specific Functions and Macros\n******************************************/\n#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n\n/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */\n#if defined(_MSC_VER)\n#  define XXH_rotl32(x,r) _rotl(x,r)\n#  define XXH_rotl64(x,r) _rotl64(x,r)\n#else\n#if defined(__ICCARM__)\n#  include <intrinsics.h>\n#  define XXH_rotl32(x,r) __ROR(x,(32 - r))\n#else\n#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))\n#endif\n#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))\n#endif\n\n#if defined(_MSC_VER)     /* Visual Studio */\n#  define XXH_swap32 _byteswap_ulong\n#  define XXH_swap64 _byteswap_uint64\n#elif GCC_VERSION >= 403\n#  define XXH_swap32 __builtin_bswap32\n#  define XXH_swap64 __builtin_bswap64\n#else\nstatic U32 XXH_swap32 (U32 x)\n{\n    return  ((x << 24) & 0xff000000 ) |\n            ((x <<  8) & 0x00ff0000 ) |\n            ((x >>  8) & 0x0000ff00 ) |\n            ((x >> 24) & 0x000000ff );\n}\nstatic U64 XXH_swap64 (U64 x)\n{\n    return  ((x << 56) & 0xff00000000000000ULL) |\n            ((x << 40) & 0x00ff000000000000ULL) |\n            ((x << 24) & 0x0000ff0000000000ULL) |\n            ((x << 8)  & 0x000000ff00000000ULL) |\n            ((x >> 8)  & 0x00000000ff000000ULL) |\n            ((x >> 24) & 0x0000000000ff0000ULL) |\n            ((x >> 40) & 0x000000000000ff00ULL) |\n            ((x >> 56) & 0x00000000000000ffULL);\n}\n#endif\n\n\n/* *************************************\n*  Architecture Macros\n***************************************/\ntypedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;\n\n/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */\n#ifndef XXH_CPU_LITTLE_ENDIAN\n    static const int g_one = 1;\n#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))\n#endif\n\n\n/* ***************************\n*  Memory reads\n*****************************/\ntypedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;\n\nFORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)\n{\n    if (align==XXH_unaligned)\n        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));\n    else\n        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);\n}\n\nFORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)\n{\n    return XXH_readLE32_align(ptr, endian, XXH_unaligned);\n}\n\nstatic U32 XXH_readBE32(const void* ptr)\n{\n    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);\n}\n\nFORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)\n{\n    if (align==XXH_unaligned)\n        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));\n    else\n        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);\n}\n\nFORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)\n{\n    return XXH_readLE64_align(ptr, endian, XXH_unaligned);\n}\n\nstatic U64 XXH_readBE64(const void* ptr)\n{\n    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);\n}\n\n\n/* *************************************\n*  Macros\n***************************************/\n#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */\n\n\n/* *************************************\n*  Constants\n***************************************/\nstatic const U32 PRIME32_1 = 2654435761U;\nstatic const U32 PRIME32_2 = 2246822519U;\nstatic const U32 PRIME32_3 = 3266489917U;\nstatic const U32 PRIME32_4 =  668265263U;\nstatic const U32 PRIME32_5 =  374761393U;\n\nstatic const U64 PRIME64_1 = 11400714785074694791ULL;\nstatic const U64 PRIME64_2 = 14029467366897019727ULL;\nstatic const U64 PRIME64_3 =  1609587929392839161ULL;\nstatic const U64 PRIME64_4 =  9650029242287828579ULL;\nstatic const U64 PRIME64_5 =  2870177450012600261ULL;\n\nXXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }\n\n\n/* **************************\n*  Utils\n****************************/\nXXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)\n{\n    memcpy(dstState, srcState, sizeof(*dstState));\n}\n\nXXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)\n{\n    memcpy(dstState, srcState, sizeof(*dstState));\n}\n\n\n/* ***************************\n*  Simple Hash Functions\n*****************************/\n\nstatic U32 XXH32_round(U32 seed, U32 input)\n{\n    seed += input * PRIME32_2;\n    seed  = XXH_rotl32(seed, 13);\n    seed *= PRIME32_1;\n    return seed;\n}\n\nFORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* bEnd = p + len;\n    U32 h32;\n#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)\n\n#ifdef XXH_ACCEPT_NULL_INPUT_POINTER\n    if (p==NULL) {\n        len=0;\n        bEnd=p=(const BYTE*)(size_t)16;\n    }\n#endif\n\n    if (len>=16) {\n        const BYTE* const limit = bEnd - 16;\n        U32 v1 = seed + PRIME32_1 + PRIME32_2;\n        U32 v2 = seed + PRIME32_2;\n        U32 v3 = seed + 0;\n        U32 v4 = seed - PRIME32_1;\n\n        do {\n            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;\n            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;\n            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;\n            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;\n        } while (p<=limit);\n\n        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);\n    } else {\n        h32  = seed + PRIME32_5;\n    }\n\n    h32 += (U32) len;\n\n    while (p+4<=bEnd) {\n        h32 += XXH_get32bits(p) * PRIME32_3;\n        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;\n        p+=4;\n    }\n\n    while (p<bEnd) {\n        h32 += (*p) * PRIME32_5;\n        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;\n        p++;\n    }\n\n    h32 ^= h32 >> 15;\n    h32 *= PRIME32_2;\n    h32 ^= h32 >> 13;\n    h32 *= PRIME32_3;\n    h32 ^= h32 >> 16;\n\n    return h32;\n}\n\n\nXXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)\n{\n#if 0\n    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */\n    XXH32_CREATESTATE_STATIC(state);\n    XXH32_reset(state, seed);\n    XXH32_update(state, input, len);\n    return XXH32_digest(state);\n#else\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if (XXH_FORCE_ALIGN_CHECK) {\n        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */\n            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);\n            else\n                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);\n    }   }\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);\n    else\n        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);\n#endif\n}\n\n\nstatic U64 XXH64_round(U64 acc, U64 input)\n{\n    acc += input * PRIME64_2;\n    acc  = XXH_rotl64(acc, 31);\n    acc *= PRIME64_1;\n    return acc;\n}\n\nstatic U64 XXH64_mergeRound(U64 acc, U64 val)\n{\n    val  = XXH64_round(0, val);\n    acc ^= val;\n    acc  = acc * PRIME64_1 + PRIME64_4;\n    return acc;\n}\n\nFORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* const bEnd = p + len;\n    U64 h64;\n#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)\n\n#ifdef XXH_ACCEPT_NULL_INPUT_POINTER\n    if (p==NULL) {\n        len=0;\n        bEnd=p=(const BYTE*)(size_t)32;\n    }\n#endif\n\n    if (len>=32) {\n        const BYTE* const limit = bEnd - 32;\n        U64 v1 = seed + PRIME64_1 + PRIME64_2;\n        U64 v2 = seed + PRIME64_2;\n        U64 v3 = seed + 0;\n        U64 v4 = seed - PRIME64_1;\n\n        do {\n            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;\n            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;\n            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;\n            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;\n        } while (p<=limit);\n\n        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);\n        h64 = XXH64_mergeRound(h64, v1);\n        h64 = XXH64_mergeRound(h64, v2);\n        h64 = XXH64_mergeRound(h64, v3);\n        h64 = XXH64_mergeRound(h64, v4);\n\n    } else {\n        h64  = seed + PRIME64_5;\n    }\n\n    h64 += (U64) len;\n\n    while (p+8<=bEnd) {\n        U64 const k1 = XXH64_round(0, XXH_get64bits(p));\n        h64 ^= k1;\n        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;\n        p+=8;\n    }\n\n    if (p+4<=bEnd) {\n        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;\n        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;\n        p+=4;\n    }\n\n    while (p<bEnd) {\n        h64 ^= (*p) * PRIME64_5;\n        h64 = XXH_rotl64(h64, 11) * PRIME64_1;\n        p++;\n    }\n\n    h64 ^= h64 >> 33;\n    h64 *= PRIME64_2;\n    h64 ^= h64 >> 29;\n    h64 *= PRIME64_3;\n    h64 ^= h64 >> 32;\n\n    return h64;\n}\n\n\nXXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)\n{\n#if 0\n    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */\n    XXH64_CREATESTATE_STATIC(state);\n    XXH64_reset(state, seed);\n    XXH64_update(state, input, len);\n    return XXH64_digest(state);\n#else\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if (XXH_FORCE_ALIGN_CHECK) {\n        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */\n            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);\n            else\n                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);\n    }   }\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);\n    else\n        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);\n#endif\n}\n\n\n/* **************************************************\n*  Advanced Hash Functions\n****************************************************/\n\nXXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)\n{\n    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));\n}\nXXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)\n{\n    XXH_free(statePtr);\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)\n{\n    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));\n}\nXXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)\n{\n    XXH_free(statePtr);\n    return XXH_OK;\n}\n\n\n/*** Hash feed ***/\n\nXXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)\n{\n    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */\n    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */\n    state.v1 = seed + PRIME32_1 + PRIME32_2;\n    state.v2 = seed + PRIME32_2;\n    state.v3 = seed + 0;\n    state.v4 = seed - PRIME32_1;\n    memcpy(statePtr, &state, sizeof(state));\n    return XXH_OK;\n}\n\n\nXXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)\n{\n    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */\n    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */\n    state.v1 = seed + PRIME64_1 + PRIME64_2;\n    state.v2 = seed + PRIME64_2;\n    state.v3 = seed + 0;\n    state.v4 = seed - PRIME64_1;\n    memcpy(statePtr, &state, sizeof(state));\n    return XXH_OK;\n}\n\n\nFORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* const bEnd = p + len;\n\n#ifdef XXH_ACCEPT_NULL_INPUT_POINTER\n    if (input==NULL) return XXH_ERROR;\n#endif\n\n    state->total_len_32 += (unsigned)len;\n    state->large_len |= (len>=16) | (state->total_len_32>=16);\n\n    if (state->memsize + len < 16)  {   /* fill in tmp buffer */\n        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);\n        state->memsize += (unsigned)len;\n        return XXH_OK;\n    }\n\n    if (state->memsize) {   /* some data left from previous update */\n        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);\n        {   const U32* p32 = state->mem32;\n            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;\n            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;\n            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;\n            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;\n        }\n        p += 16-state->memsize;\n        state->memsize = 0;\n    }\n\n    if (p <= bEnd-16) {\n        const BYTE* const limit = bEnd - 16;\n        U32 v1 = state->v1;\n        U32 v2 = state->v2;\n        U32 v3 = state->v3;\n        U32 v4 = state->v4;\n\n        do {\n            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;\n            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;\n            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;\n            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;\n        } while (p<=limit);\n\n        state->v1 = v1;\n        state->v2 = v2;\n        state->v3 = v3;\n        state->v4 = v4;\n    }\n\n    if (p < bEnd) {\n        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));\n        state->memsize = (unsigned)(bEnd-p);\n    }\n\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);\n    else\n        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);\n}\n\n\n\nFORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)\n{\n    const BYTE * p = (const BYTE*)state->mem32;\n    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;\n    U32 h32;\n\n    if (state->large_len) {\n        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);\n    } else {\n        h32 = state->v3 /* == seed */ + PRIME32_5;\n    }\n\n    h32 += state->total_len_32;\n\n    while (p+4<=bEnd) {\n        h32 += XXH_readLE32(p, endian) * PRIME32_3;\n        h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n        p+=4;\n    }\n\n    while (p<bEnd) {\n        h32 += (*p) * PRIME32_5;\n        h32  = XXH_rotl32(h32, 11) * PRIME32_1;\n        p++;\n    }\n\n    h32 ^= h32 >> 15;\n    h32 *= PRIME32_2;\n    h32 ^= h32 >> 13;\n    h32 *= PRIME32_3;\n    h32 ^= h32 >> 16;\n\n    return h32;\n}\n\n\nXXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_digest_endian(state_in, XXH_littleEndian);\n    else\n        return XXH32_digest_endian(state_in, XXH_bigEndian);\n}\n\n\n\n/* **** XXH64 **** */\n\nFORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* const bEnd = p + len;\n\n#ifdef XXH_ACCEPT_NULL_INPUT_POINTER\n    if (input==NULL) return XXH_ERROR;\n#endif\n\n    state->total_len += len;\n\n    if (state->memsize + len < 32) {  /* fill in tmp buffer */\n        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);\n        state->memsize += (U32)len;\n        return XXH_OK;\n    }\n\n    if (state->memsize) {   /* tmp buffer is full */\n        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);\n        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));\n        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));\n        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));\n        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));\n        p += 32-state->memsize;\n        state->memsize = 0;\n    }\n\n    if (p+32 <= bEnd) {\n        const BYTE* const limit = bEnd - 32;\n        U64 v1 = state->v1;\n        U64 v2 = state->v2;\n        U64 v3 = state->v3;\n        U64 v4 = state->v4;\n\n        do {\n            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;\n            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;\n            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;\n            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;\n        } while (p<=limit);\n\n        state->v1 = v1;\n        state->v2 = v2;\n        state->v3 = v3;\n        state->v4 = v4;\n    }\n\n    if (p < bEnd) {\n        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));\n        state->memsize = (unsigned)(bEnd-p);\n    }\n\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);\n    else\n        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);\n}\n\n\n\nFORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)\n{\n    const BYTE * p = (const BYTE*)state->mem64;\n    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;\n    U64 h64;\n\n    if (state->total_len >= 32) {\n        U64 const v1 = state->v1;\n        U64 const v2 = state->v2;\n        U64 const v3 = state->v3;\n        U64 const v4 = state->v4;\n\n        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);\n        h64 = XXH64_mergeRound(h64, v1);\n        h64 = XXH64_mergeRound(h64, v2);\n        h64 = XXH64_mergeRound(h64, v3);\n        h64 = XXH64_mergeRound(h64, v4);\n    } else {\n        h64  = state->v3 + PRIME64_5;\n    }\n\n    h64 += (U64) state->total_len;\n\n    while (p+8<=bEnd) {\n        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));\n        h64 ^= k1;\n        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;\n        p+=8;\n    }\n\n    if (p+4<=bEnd) {\n        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;\n        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;\n        p+=4;\n    }\n\n    while (p<bEnd) {\n        h64 ^= (*p) * PRIME64_5;\n        h64  = XXH_rotl64(h64, 11) * PRIME64_1;\n        p++;\n    }\n\n    h64 ^= h64 >> 33;\n    h64 *= PRIME64_2;\n    h64 ^= h64 >> 29;\n    h64 *= PRIME64_3;\n    h64 ^= h64 >> 32;\n\n    return h64;\n}\n\n\nXXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_digest_endian(state_in, XXH_littleEndian);\n    else\n        return XXH64_digest_endian(state_in, XXH_bigEndian);\n}\n\n\n/* **************************\n*  Canonical representation\n****************************/\n\n/*! Default XXH result types are basic unsigned 32 and 64 bits.\n*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).\n*   These functions allow transformation of hash result into and from its canonical format.\n*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.\n*/\n\nXXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)\n{\n    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));\n    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);\n    memcpy(dst, &hash, sizeof(*dst));\n}\n\nXXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)\n{\n    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));\n    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);\n    memcpy(dst, &hash, sizeof(*dst));\n}\n\nXXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)\n{\n    return XXH_readBE32(src);\n}\n\nXXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)\n{\n    return XXH_readBE64(src);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/xxhash.h",
    "content": "/*\n   xxHash - Extremely Fast Hash algorithm\n   Header File\n   Copyright (C) 2012-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - xxHash source repository : https://github.com/Cyan4973/xxHash\n*/\n\n/* Notice extracted from xxHash homepage :\n\nxxHash is an extremely fast Hash algorithm, running at RAM speed limits.\nIt also successfully passes all tests from the SMHasher suite.\n\nComparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)\n\nName            Speed       Q.Score   Author\nxxHash          5.4 GB/s     10\nCrapWow         3.2 GB/s      2       Andrew\nMumurHash 3a    2.7 GB/s     10       Austin Appleby\nSpookyHash      2.0 GB/s     10       Bob Jenkins\nSBox            1.4 GB/s      9       Bret Mulvey\nLookup3         1.2 GB/s      9       Bob Jenkins\nSuperFastHash   1.2 GB/s      1       Paul Hsieh\nCityHash64      1.05 GB/s    10       Pike & Alakuijala\nFNV             0.55 GB/s     5       Fowler, Noll, Vo\nCRC32           0.43 GB/s     9\nMD5-32          0.33 GB/s    10       Ronald L. Rivest\nSHA1-32         0.28 GB/s    10\n\nQ.Score is a measure of quality of the hash function.\nIt depends on successfully passing SMHasher test set.\n10 is a perfect score.\n\nA 64-bits version, named XXH64, is available since r35.\nIt offers much better speed, but for 64-bits applications only.\nName     Speed on 64 bits    Speed on 32 bits\nXXH64       13.8 GB/s            1.9 GB/s\nXXH32        6.8 GB/s            6.0 GB/s\n*/\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#ifndef XXHASH_H_5627135585666179\n#define XXHASH_H_5627135585666179 1\n\n\n/* ****************************\n*  Definitions\n******************************/\n#include <stddef.h>   /* size_t */\ntypedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;\n\n\n/* ****************************\n*  API modifier\n******************************/\n/** XXH_PRIVATE_API\n*   This is useful if you want to include xxhash functions in `static` mode\n*   in order to inline them, and remove their symbol from the public list.\n*   Methodology :\n*     #define XXH_PRIVATE_API\n*     #include \"xxhash.h\"\n*   `xxhash.c` is automatically included.\n*   It's not useful to compile and link it as a separate module anymore.\n*/\n#ifdef XXH_PRIVATE_API\n#  ifndef XXH_STATIC_LINKING_ONLY\n#    define XXH_STATIC_LINKING_ONLY\n#  endif\n#  if defined(__GNUC__)\n#    define XXH_PUBLIC_API static __inline __attribute__((unused))\n#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#    define XXH_PUBLIC_API static inline\n#  elif defined(_MSC_VER)\n#    define XXH_PUBLIC_API static __inline\n#  else\n#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */\n#  endif\n#else\n#  define XXH_PUBLIC_API   /* do nothing */\n#endif /* XXH_PRIVATE_API */\n\n/*!XXH_NAMESPACE, aka Namespace Emulation :\n\nIf you want to include _and expose_ xxHash functions from within your own library,\nbut also want to avoid symbol collisions with another library which also includes xxHash,\n\nyou can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library\nwith the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).\n\nNote that no change is required within the calling program as long as it includes `xxhash.h` :\nregular symbol name will be automatically translated by this header.\n*/\n#ifdef XXH_NAMESPACE\n#  define XXH_CAT(A,B) A##B\n#  define XXH_NAME2(A,B) XXH_CAT(A,B)\n#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)\n#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)\n#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)\n#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)\n#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)\n#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)\n#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)\n#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)\n#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)\n#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)\n#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)\n#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)\n#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)\n#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)\n#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)\n#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)\n#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)\n#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)\n#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)\n#endif\n\n\n/* *************************************\n*  Version\n***************************************/\n#define XXH_VERSION_MAJOR    0\n#define XXH_VERSION_MINOR    6\n#define XXH_VERSION_RELEASE  2\n#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)\nXXH_PUBLIC_API unsigned XXH_versionNumber (void);\n\n\n/* ****************************\n*  Simple Hash Functions\n******************************/\ntypedef unsigned int       XXH32_hash_t;\ntypedef unsigned long long XXH64_hash_t;\n\nXXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);\nXXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);\n\n/*!\nXXH32() :\n    Calculate the 32-bits hash of sequence \"length\" bytes stored at memory address \"input\".\n    The memory between input & input+length must be valid (allocated and read-accessible).\n    \"seed\" can be used to alter the result predictably.\n    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s\nXXH64() :\n    Calculate the 64-bits hash of sequence of length \"len\" stored at memory address \"input\".\n    \"seed\" can be used to alter the result predictably.\n    This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).\n*/\n\n\n/* ****************************\n*  Streaming Hash Functions\n******************************/\ntypedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */\ntypedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */\n\n/*! State allocation, compatible with dynamic libraries */\n\nXXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);\nXXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);\n\nXXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);\nXXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);\n\n\n/* hash streaming */\n\nXXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);\nXXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);\nXXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);\n\nXXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);\nXXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);\nXXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);\n\n/*\nThese functions generate the xxHash of an input provided in multiple segments.\nNote that, for small input, they are slower than single-call functions, due to state management.\nFor small input, prefer `XXH32()` and `XXH64()` .\n\nXXH state must first be allocated, using XXH*_createState() .\n\nStart a new hash by initializing state with a seed, using XXH*_reset().\n\nThen, feed the hash state by calling XXH*_update() as many times as necessary.\nObviously, input must be allocated and read accessible.\nThe function returns an error code, with 0 meaning OK, and any other value meaning there is an error.\n\nFinally, a hash value can be produced anytime, by using XXH*_digest().\nThis function returns the nn-bits hash as an int or long long.\n\nIt's still possible to continue inserting input into the hash state after a digest,\nand generate some new hashes later on, by calling again XXH*_digest().\n\nWhen done, free XXH state space if it was allocated dynamically.\n*/\n\n\n/* **************************\n*  Utils\n****************************/\n#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* ! C99 */\n#  define restrict   /* disable restrict */\n#endif\n\nXXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);\nXXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);\n\n\n/* **************************\n*  Canonical representation\n****************************/\n/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.\n*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).\n*  These functions allow transformation of hash result into and from its canonical format.\n*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.\n*/\ntypedef struct { unsigned char digest[4]; } XXH32_canonical_t;\ntypedef struct { unsigned char digest[8]; } XXH64_canonical_t;\n\nXXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);\nXXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);\n\nXXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);\nXXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);\n\n#endif /* XXHASH_H_5627135585666179 */\n\n\n\n/* ================================================================================================\n   This section contains definitions which are not guaranteed to remain stable.\n   They may change in future versions, becoming incompatible with a different version of the library.\n   They shall only be used with static linking.\n   Never use these definitions in association with dynamic linking !\n=================================================================================================== */\n#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)\n#define XXH_STATIC_H_3543687687345\n\n/* These definitions are only meant to allow allocation of XXH state\n   statically, on stack, or in a struct for example.\n   Do not use members directly. */\n\n   struct XXH32_state_s {\n       unsigned total_len_32;\n       unsigned large_len;\n       unsigned v1;\n       unsigned v2;\n       unsigned v3;\n       unsigned v4;\n       unsigned mem32[4];   /* buffer defined as U32 for alignment */\n       unsigned memsize;\n       unsigned reserved;   /* never read nor write, will be removed in a future version */\n   };   /* typedef'd to XXH32_state_t */\n\n   struct XXH64_state_s {\n       unsigned long long total_len;\n       unsigned long long v1;\n       unsigned long long v2;\n       unsigned long long v3;\n       unsigned long long v4;\n       unsigned long long mem64[4];   /* buffer defined as U64 for alignment */\n       unsigned memsize;\n       unsigned reserved[2];          /* never read nor write, will be removed in a future version */\n   };   /* typedef'd to XXH64_state_t */\n\n\n#  ifdef XXH_PRIVATE_API\n#    include \"xxhash.c\"   /* include xxhash functions as `static`, for inlining */\n#  endif\n\n#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/zstd_common.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include <stdlib.h>      /* malloc, calloc, free */\n#include <string.h>      /* memset */\n#include \"error_private.h\"\n#include \"zstd_internal.h\"\n\n\n/*-****************************************\n*  Version\n******************************************/\nunsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }\n\nconst char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }\n\n\n/*-****************************************\n*  ZSTD Error Management\n******************************************/\n#undef ZSTD_isError   /* defined within zstd_internal.h */\n/*! ZSTD_isError() :\n *  tells if a return value is an error code\n *  symbol is required for external callers */\nunsigned ZSTD_isError(size_t code) { return ERR_isError(code); }\n\n/*! ZSTD_getErrorName() :\n *  provides error code string from function result (useful for debugging) */\nconst char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n/*! ZSTD_getError() :\n *  convert a `size_t` function result into a proper ZSTD_errorCode enum */\nZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }\n\n/*! ZSTD_getErrorString() :\n *  provides error code string from enum */\nconst char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }\n\n\n\n/*=**************************************************************\n*  Custom allocator\n****************************************************************/\nvoid* ZSTD_malloc(size_t size, ZSTD_customMem customMem)\n{\n    if (customMem.customAlloc)\n        return customMem.customAlloc(customMem.opaque, size);\n    return malloc(size);\n}\n\nvoid* ZSTD_calloc(size_t size, ZSTD_customMem customMem)\n{\n    if (customMem.customAlloc) {\n        /* calloc implemented as malloc+memset;\n         * not as efficient as calloc, but next best guess for custom malloc */\n        void* const ptr = customMem.customAlloc(customMem.opaque, size);\n        memset(ptr, 0, size);\n        return ptr;\n    }\n    return calloc(1, size);\n}\n\nvoid ZSTD_free(void* ptr, ZSTD_customMem customMem)\n{\n    if (ptr!=NULL) {\n        if (customMem.customFree)\n            customMem.customFree(customMem.opaque, ptr);\n        else\n            free(ptr);\n    }\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/zstd_errors.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_ERRORS_H_398273423\n#define ZSTD_ERRORS_H_398273423\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*===== dependency =====*/\n#include <stddef.h>   /* size_t */\n\n\n/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */\n#ifndef ZSTDERRORLIB_VISIBILITY\n#  if defined(__GNUC__) && (__GNUC__ >= 4)\n#    define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility (\"default\")))\n#  else\n#    define ZSTDERRORLIB_VISIBILITY\n#  endif\n#endif\n#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)\n#  define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY\n#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)\n#  define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/\n#else\n#  define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY\n#endif\n\n/*-*********************************************\n *  Error codes list\n *-*********************************************\n *  Error codes _values_ are pinned down since v1.3.1 only.\n *  Therefore, don't rely on values if you may link to any version < v1.3.1.\n *\n *  Only values < 100 are considered stable.\n *\n *  note 1 : this API shall be used with static linking only.\n *           dynamic linking is not yet officially supported.\n *  note 2 : Prefer relying on the enum than on its value whenever possible\n *           This is the only supported way to use the error list < v1.3.1\n *  note 3 : ZSTD_isError() is always correct, whatever the library version.\n **********************************************/\ntypedef enum {\n  ZSTD_error_no_error = 0,\n  ZSTD_error_GENERIC  = 1,\n  ZSTD_error_prefix_unknown                = 10,\n  ZSTD_error_version_unsupported           = 12,\n  ZSTD_error_frameParameter_unsupported    = 14,\n  ZSTD_error_frameParameter_windowTooLarge = 16,\n  ZSTD_error_corruption_detected = 20,\n  ZSTD_error_checksum_wrong      = 22,\n  ZSTD_error_dictionary_corrupted      = 30,\n  ZSTD_error_dictionary_wrong          = 32,\n  ZSTD_error_dictionaryCreation_failed = 34,\n  ZSTD_error_parameter_unsupported   = 40,\n  ZSTD_error_parameter_outOfBound    = 42,\n  ZSTD_error_tableLog_tooLarge       = 44,\n  ZSTD_error_maxSymbolValue_tooLarge = 46,\n  ZSTD_error_maxSymbolValue_tooSmall = 48,\n  ZSTD_error_stage_wrong       = 60,\n  ZSTD_error_init_missing      = 62,\n  ZSTD_error_memory_allocation = 64,\n  ZSTD_error_workSpace_tooSmall= 66,\n  ZSTD_error_dstSize_tooSmall = 70,\n  ZSTD_error_srcSize_wrong    = 72,\n  ZSTD_error_dstBuffer_null   = 74,\n  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */\n  ZSTD_error_frameIndex_tooLarge = 100,\n  ZSTD_error_seekableIO          = 102,\n  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */\n} ZSTD_ErrorCode;\n\n/*! ZSTD_getErrorCode() :\n    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,\n    which can be used to compare with enum list published above */\nZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);\nZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_ERRORS_H_398273423 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/common/zstd_internal.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_CCOMMON_H_MODULE\n#define ZSTD_CCOMMON_H_MODULE\n\n/* this module contains definitions which must be identical\n * across compression, decompression and dictBuilder.\n * It also contains a few functions useful to at least 2 of them\n * and which benefit from being inlined */\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include \"compiler.h\"\n#include \"mem.h\"\n#include \"debug.h\"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */\n#include \"error_private.h\"\n#define ZSTD_STATIC_LINKING_ONLY\n#include \"zstd.h\"\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#ifndef XXH_STATIC_LINKING_ONLY\n#  define XXH_STATIC_LINKING_ONLY  /* XXH64_state_t */\n#endif\n#include \"xxhash.h\"                /* XXH_reset, update, digest */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* ---- static assert (debug) --- */\n#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)\n#define ZSTD_isError ERR_isError   /* for inlining */\n#define FSE_isError  ERR_isError\n#define HUF_isError  ERR_isError\n\n\n/*-*************************************\n*  shared macros\n***************************************/\n#undef MIN\n#undef MAX\n#define MIN(a,b) ((a)<(b) ? (a) : (b))\n#define MAX(a,b) ((a)>(b) ? (a) : (b))\n\n/**\n * Return the specified error if the condition evaluates to true.\n *\n * In debug modes, prints additional information.\n * In order to do that (particularly, printing the conditional that failed),\n * this can't just wrap RETURN_ERROR().\n */\n#define RETURN_ERROR_IF(cond, err, ...) \\\n  if (cond) { \\\n    RAWLOG(3, \"%s:%d: ERROR!: check %s failed, returning %s\", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \\\n    RAWLOG(3, \": \" __VA_ARGS__); \\\n    RAWLOG(3, \"\\n\"); \\\n    return ERROR(err); \\\n  }\n\n/**\n * Unconditionally return the specified error.\n *\n * In debug modes, prints additional information.\n */\n#define RETURN_ERROR(err, ...) \\\n  do { \\\n    RAWLOG(3, \"%s:%d: ERROR!: unconditional check failed, returning %s\", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \\\n    RAWLOG(3, \": \" __VA_ARGS__); \\\n    RAWLOG(3, \"\\n\"); \\\n    return ERROR(err); \\\n  } while(0);\n\n/**\n * If the provided expression evaluates to an error code, returns that error code.\n *\n * In debug modes, prints additional information.\n */\n#define FORWARD_IF_ERROR(err, ...) \\\n  do { \\\n    size_t const err_code = (err); \\\n    if (ERR_isError(err_code)) { \\\n      RAWLOG(3, \"%s:%d: ERROR!: forwarding error in %s: %s\", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \\\n      RAWLOG(3, \": \" __VA_ARGS__); \\\n      RAWLOG(3, \"\\n\"); \\\n      return err_code; \\\n    } \\\n  } while(0);\n\n\n/*-*************************************\n*  Common constants\n***************************************/\n#define ZSTD_OPT_NUM    (1<<12)\n\n#define ZSTD_REP_NUM      3                 /* number of repcodes */\n#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)\nstatic const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10\nstatic const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };\nstatic const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };\n\n#define ZSTD_FRAMEIDSIZE 4   /* magic number size */\n\n#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */\nstatic const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;\ntypedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;\n\n#define ZSTD_FRAMECHECKSUMSIZE 4\n\n#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */\n#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */\n\n#define HufLog 12\ntypedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;\n\n#define LONGNBSEQ 0x7F00\n\n#define MINMATCH 3\n\n#define Litbits  8\n#define MaxLit ((1<<Litbits) - 1)\n#define MaxML   52\n#define MaxLL   35\n#define DefaultMaxOff 28\n#define MaxOff  31\n#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */\n#define MLFSELog    9\n#define LLFSELog    9\n#define OffFSELog   8\n#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)\n\nstatic const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3,\n                                      4, 6, 7, 8, 9,10,11,12,\n                                     13,14,15,16 };\nstatic const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,\n                                             2, 2, 2, 2, 2, 1, 1, 1,\n                                             2, 2, 2, 2, 2, 2, 2, 2,\n                                             2, 3, 2, 1, 1, 1, 1, 1,\n                                            -1,-1,-1,-1 };\n#define LL_DEFAULTNORMLOG 6  /* for static allocation */\nstatic const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;\n\nstatic const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3,\n                                      4, 4, 5, 7, 8, 9,10,11,\n                                     12,13,14,15,16 };\nstatic const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,\n                                             2, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1,-1,-1,\n                                            -1,-1,-1,-1,-1 };\n#define ML_DEFAULTNORMLOG 6  /* for static allocation */\nstatic const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;\n\nstatic const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,\n                                                     2, 1, 1, 1, 1, 1, 1, 1,\n                                                     1, 1, 1, 1, 1, 1, 1, 1,\n                                                    -1,-1,-1,-1,-1 };\n#define OF_DEFAULTNORMLOG 5  /* for static allocation */\nstatic const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;\n\n\n/*-*******************************************\n*  Shared functions to include for inlining\n*********************************************/\nstatic void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }\nstatic void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }\n#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }\n\n#define WILDCOPY_OVERLENGTH 32\n#define WILDCOPY_VECLEN 16\n\ntypedef enum {\n    ZSTD_no_overlap,\n    ZSTD_overlap_src_before_dst\n    /*  ZSTD_overlap_dst_before_src, */\n} ZSTD_overlap_e;\n\n/*! ZSTD_wildcopy() :\n *  Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)\n *  @param ovtype controls the overlap detection\n *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.\n *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.\n *           The src buffer must be before the dst buffer.\n */\nMEM_STATIC FORCE_INLINE_ATTR \nvoid ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)\n{\n    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n\n    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));\n\n    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {\n        /* Handle short offset copies. */\n        do {\n            COPY8(op, ip)\n        } while (op < oend);\n    } else {\n        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);\n        /* Separate out the first COPY16() call because the copy length is\n         * almost certain to be short, so the branches have different\n         * probabilities. Since it is almost certain to be short, only do\n\t * one COPY16() in the first call. Then, do two calls per loop since\n\t * at that point it is more likely to have a high trip count.\n         */\n        COPY16(op, ip);\n        if (op >= oend) return;\n        do {\n            COPY16(op, ip);\n            COPY16(op, ip);\n        }\n        while (op < oend);\n    }\n}\n\n\n/*-*******************************************\n*  Private declarations\n*********************************************/\ntypedef struct seqDef_s {\n    U32 offset;\n    U16 litLength;\n    U16 matchLength;\n} seqDef;\n\ntypedef struct {\n    seqDef* sequencesStart;\n    seqDef* sequences;\n    BYTE* litStart;\n    BYTE* lit;\n    BYTE* llCode;\n    BYTE* mlCode;\n    BYTE* ofCode;\n    size_t maxNbSeq;\n    size_t maxNbLit;\n    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */\n    U32   longLengthPos;\n} seqStore_t;\n\n/**\n * Contains the compressed frame size and an upper-bound for the decompressed frame size.\n * Note: before using `compressedSize`, check for errors using ZSTD_isError().\n *       similarly, before using `decompressedBound`, check for errors using:\n *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`\n */\ntypedef struct {\n    size_t compressedSize;\n    unsigned long long decompressedBound;\n} ZSTD_frameSizeInfo;   /* decompress & legacy */\n\nconst seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */\nvoid ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */\n\n/* custom memory allocation functions */\nvoid* ZSTD_malloc(size_t size, ZSTD_customMem customMem);\nvoid* ZSTD_calloc(size_t size, ZSTD_customMem customMem);\nvoid ZSTD_free(void* ptr, ZSTD_customMem customMem);\n\n\nMEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */\n{\n    assert(val != 0);\n    {\n#   if defined(_MSC_VER)   /* Visual */\n        unsigned long r=0;\n        _BitScanReverse(&r, val);\n        return (unsigned)r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */\n        return __builtin_clz (val) ^ 31;\n#   elif defined(__ICCARM__)    /* IAR Intrinsic */\n        return 31 - __CLZ(val);\n#   else   /* Software version */\n        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n        U32 v = val;\n        v |= v >> 1;\n        v |= v >> 2;\n        v |= v >> 4;\n        v |= v >> 8;\n        v |= v >> 16;\n        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];\n#   endif\n    }\n}\n\n\n/* ZSTD_invalidateRepCodes() :\n * ensures next compression will not use repcodes from previous block.\n * Note : only works with regular variant;\n *        do not use with extDict variant ! */\nvoid ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */\n\n\ntypedef struct {\n    blockType_e blockType;\n    U32 lastBlock;\n    U32 origSize;\n} blockProperties_t;   /* declared here for decompress and fullbench */\n\n/*! ZSTD_getcBlockSize() :\n *  Provides the size of compressed block from block header `src` */\n/* Used by: decompress, fullbench (does not get its definition from here) */\nsize_t ZSTD_getcBlockSize(const void* src, size_t srcSize,\n                          blockProperties_t* bpPtr);\n\n/*! ZSTD_decodeSeqHeaders() :\n *  decode sequence header from src */\n/* Used by: decompress, fullbench (does not get its definition from here) */\nsize_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,\n                       const void* src, size_t srcSize);\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* ZSTD_CCOMMON_H_MODULE */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/fse_compress.c",
    "content": "/* ******************************************************************\n   FSE : Finite State Entropy encoder\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include \"compiler.h\"\n#include \"mem.h\"        /* U32, U16, etc. */\n#include \"debug.h\"      /* assert, DEBUGLOG */\n#include \"hist.h\"       /* HIST_count_wksp */\n#include \"bitstream.h\"\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#include \"error_private.h\"\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_isError ERR_isError\n\n\n/* **************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\n\n/* Function templates */\n\n/* FSE_buildCTable_wksp() :\n * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).\n * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`\n * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements\n */\nsize_t FSE_buildCTable_wksp(FSE_CTable* ct,\n                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,\n                            void* workSpace, size_t wkspSize)\n{\n    U32 const tableSize = 1 << tableLog;\n    U32 const tableMask = tableSize - 1;\n    void* const ptr = ct;\n    U16* const tableU16 = ( (U16*) ptr) + 2;\n    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;\n    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);\n    U32 const step = FSE_TABLESTEP(tableSize);\n    U32 cumul[FSE_MAX_SYMBOL_VALUE+2];\n\n    FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;\n    U32 highThreshold = tableSize-1;\n\n    /* CTable header */\n    if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);\n    tableU16[-2] = (U16) tableLog;\n    tableU16[-1] = (U16) maxSymbolValue;\n    assert(tableLog < 16);   /* required for threshold strategy to work */\n\n    /* For explanations on how to distribute symbol values over the table :\n     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */\n\n     #ifdef __clang_analyzer__\n     memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */\n     #endif\n\n    /* symbol start positions */\n    {   U32 u;\n        cumul[0] = 0;\n        for (u=1; u <= maxSymbolValue+1; u++) {\n            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */\n                cumul[u] = cumul[u-1] + 1;\n                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);\n            } else {\n                cumul[u] = cumul[u-1] + normalizedCounter[u-1];\n        }   }\n        cumul[maxSymbolValue+1] = tableSize+1;\n    }\n\n    /* Spread symbols */\n    {   U32 position = 0;\n        U32 symbol;\n        for (symbol=0; symbol<=maxSymbolValue; symbol++) {\n            int nbOccurrences;\n            int const freq = normalizedCounter[symbol];\n            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {\n                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;\n                position = (position + step) & tableMask;\n                while (position > highThreshold)\n                    position = (position + step) & tableMask;   /* Low proba area */\n        }   }\n\n        assert(position==0);  /* Must have initialized all positions */\n    }\n\n    /* Build table */\n    {   U32 u; for (u=0; u<tableSize; u++) {\n        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */\n        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */\n    }   }\n\n    /* Build Symbol Transformation Table */\n    {   unsigned total = 0;\n        unsigned s;\n        for (s=0; s<=maxSymbolValue; s++) {\n            switch (normalizedCounter[s])\n            {\n            case  0:\n                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */\n                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);\n                break;\n\n            case -1:\n            case  1:\n                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);\n                symbolTT[s].deltaFindState = total - 1;\n                total ++;\n                break;\n            default :\n                {\n                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);\n                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;\n                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;\n                    symbolTT[s].deltaFindState = total - normalizedCounter[s];\n                    total +=  normalizedCounter[s];\n    }   }   }   }\n\n#if 0  /* debug : symbol costs */\n    DEBUGLOG(5, \"\\n --- table statistics : \");\n    {   U32 symbol;\n        for (symbol=0; symbol<=maxSymbolValue; symbol++) {\n            DEBUGLOG(5, \"%3u: w=%3i,   maxBits=%u, fracBits=%.2f\",\n                symbol, normalizedCounter[symbol],\n                FSE_getMaxNbBits(symbolTT, symbol),\n                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);\n        }\n    }\n#endif\n\n    return 0;\n}\n\n\nsize_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE];   /* memset() is not necessary, even if static analyzer complain about it */\n    return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));\n}\n\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n\n/*-**************************************************************\n*  FSE NCount encoding\n****************************************************************/\nsize_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)\n{\n    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;\n    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */\n}\n\nstatic size_t\nFSE_writeNCount_generic (void* header, size_t headerBufferSize,\n                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,\n                         unsigned writeIsSafe)\n{\n    BYTE* const ostart = (BYTE*) header;\n    BYTE* out = ostart;\n    BYTE* const oend = ostart + headerBufferSize;\n    int nbBits;\n    const int tableSize = 1 << tableLog;\n    int remaining;\n    int threshold;\n    U32 bitStream = 0;\n    int bitCount = 0;\n    unsigned symbol = 0;\n    unsigned const alphabetSize = maxSymbolValue + 1;\n    int previousIs0 = 0;\n\n    /* Table Size */\n    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;\n    bitCount  += 4;\n\n    /* Init */\n    remaining = tableSize+1;   /* +1 for extra accuracy */\n    threshold = tableSize;\n    nbBits = tableLog+1;\n\n    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */\n        if (previousIs0) {\n            unsigned start = symbol;\n            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;\n            if (symbol == alphabetSize) break;   /* incorrect distribution */\n            while (symbol >= start+24) {\n                start+=24;\n                bitStream += 0xFFFFU << bitCount;\n                if ((!writeIsSafe) && (out > oend-2))\n                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */\n                out[0] = (BYTE) bitStream;\n                out[1] = (BYTE)(bitStream>>8);\n                out+=2;\n                bitStream>>=16;\n            }\n            while (symbol >= start+3) {\n                start+=3;\n                bitStream += 3 << bitCount;\n                bitCount += 2;\n            }\n            bitStream += (symbol-start) << bitCount;\n            bitCount += 2;\n            if (bitCount>16) {\n                if ((!writeIsSafe) && (out > oend - 2))\n                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */\n                out[0] = (BYTE)bitStream;\n                out[1] = (BYTE)(bitStream>>8);\n                out += 2;\n                bitStream >>= 16;\n                bitCount -= 16;\n        }   }\n        {   int count = normalizedCounter[symbol++];\n            int const max = (2*threshold-1) - remaining;\n            remaining -= count < 0 ? -count : count;\n            count++;   /* +1 for extra accuracy */\n            if (count>=threshold)\n                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */\n            bitStream += count << bitCount;\n            bitCount  += nbBits;\n            bitCount  -= (count<max);\n            previousIs0  = (count==1);\n            if (remaining<1) return ERROR(GENERIC);\n            while (remaining<threshold) { nbBits--; threshold>>=1; }\n        }\n        if (bitCount>16) {\n            if ((!writeIsSafe) && (out > oend - 2))\n                return ERROR(dstSize_tooSmall);   /* Buffer overflow */\n            out[0] = (BYTE)bitStream;\n            out[1] = (BYTE)(bitStream>>8);\n            out += 2;\n            bitStream >>= 16;\n            bitCount -= 16;\n    }   }\n\n    if (remaining != 1)\n        return ERROR(GENERIC);  /* incorrect normalized distribution */\n    assert(symbol <= alphabetSize);\n\n    /* flush remaining bitStream */\n    if ((!writeIsSafe) && (out > oend - 2))\n        return ERROR(dstSize_tooSmall);   /* Buffer overflow */\n    out[0] = (BYTE)bitStream;\n    out[1] = (BYTE)(bitStream>>8);\n    out+= (bitCount+7) /8;\n\n    return (out-ostart);\n}\n\n\nsize_t FSE_writeNCount (void* buffer, size_t bufferSize,\n                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */\n    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */\n\n    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))\n        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);\n\n    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);\n}\n\n\n/*-**************************************************************\n*  FSE Compression Code\n****************************************************************/\n\nFSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)\n{\n    size_t size;\n    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;\n    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);\n    return (FSE_CTable*)malloc(size);\n}\n\nvoid FSE_freeCTable (FSE_CTable* ct) { free(ct); }\n\n/* provides the minimum logSize to safely represent a distribution */\nstatic unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)\n{\n    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;\n    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;\n    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;\n    assert(srcSize > 1); /* Not supported, RLE should be used instead */\n    return minBits;\n}\n\nunsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)\n{\n    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;\n    U32 tableLog = maxTableLog;\n    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);\n    assert(srcSize > 1); /* Not supported, RLE should be used instead */\n    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;\n    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */\n    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */\n    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;\n    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;\n    return tableLog;\n}\n\nunsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)\n{\n    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);\n}\n\n\n/* Secondary normalization method.\n   To be used when primary method fails. */\n\nstatic size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)\n{\n    short const NOT_YET_ASSIGNED = -2;\n    U32 s;\n    U32 distributed = 0;\n    U32 ToDistribute;\n\n    /* Init */\n    U32 const lowThreshold = (U32)(total >> tableLog);\n    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));\n\n    for (s=0; s<=maxSymbolValue; s++) {\n        if (count[s] == 0) {\n            norm[s]=0;\n            continue;\n        }\n        if (count[s] <= lowThreshold) {\n            norm[s] = -1;\n            distributed++;\n            total -= count[s];\n            continue;\n        }\n        if (count[s] <= lowOne) {\n            norm[s] = 1;\n            distributed++;\n            total -= count[s];\n            continue;\n        }\n\n        norm[s]=NOT_YET_ASSIGNED;\n    }\n    ToDistribute = (1 << tableLog) - distributed;\n\n    if (ToDistribute == 0)\n        return 0;\n\n    if ((total / ToDistribute) > lowOne) {\n        /* risk of rounding to zero */\n        lowOne = (U32)((total * 3) / (ToDistribute * 2));\n        for (s=0; s<=maxSymbolValue; s++) {\n            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {\n                norm[s] = 1;\n                distributed++;\n                total -= count[s];\n                continue;\n        }   }\n        ToDistribute = (1 << tableLog) - distributed;\n    }\n\n    if (distributed == maxSymbolValue+1) {\n        /* all values are pretty poor;\n           probably incompressible data (should have already been detected);\n           find max, then give all remaining points to max */\n        U32 maxV = 0, maxC = 0;\n        for (s=0; s<=maxSymbolValue; s++)\n            if (count[s] > maxC) { maxV=s; maxC=count[s]; }\n        norm[maxV] += (short)ToDistribute;\n        return 0;\n    }\n\n    if (total == 0) {\n        /* all of the symbols were low enough for the lowOne or lowThreshold */\n        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))\n            if (norm[s] > 0) { ToDistribute--; norm[s]++; }\n        return 0;\n    }\n\n    {   U64 const vStepLog = 62 - tableLog;\n        U64 const mid = (1ULL << (vStepLog-1)) - 1;\n        U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total;   /* scale on remaining */\n        U64 tmpTotal = mid;\n        for (s=0; s<=maxSymbolValue; s++) {\n            if (norm[s]==NOT_YET_ASSIGNED) {\n                U64 const end = tmpTotal + (count[s] * rStep);\n                U32 const sStart = (U32)(tmpTotal >> vStepLog);\n                U32 const sEnd = (U32)(end >> vStepLog);\n                U32 const weight = sEnd - sStart;\n                if (weight < 1)\n                    return ERROR(GENERIC);\n                norm[s] = (short)weight;\n                tmpTotal = end;\n    }   }   }\n\n    return 0;\n}\n\n\nsize_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,\n                           const unsigned* count, size_t total,\n                           unsigned maxSymbolValue)\n{\n    /* Sanity checks */\n    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;\n    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */\n    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */\n\n    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };\n        U64 const scale = 62 - tableLog;\n        U64 const step = ((U64)1<<62) / total;   /* <== here, one division ! */\n        U64 const vStep = 1ULL<<(scale-20);\n        int stillToDistribute = 1<<tableLog;\n        unsigned s;\n        unsigned largest=0;\n        short largestP=0;\n        U32 lowThreshold = (U32)(total >> tableLog);\n\n        for (s=0; s<=maxSymbolValue; s++) {\n            if (count[s] == total) return 0;   /* rle special case */\n            if (count[s] == 0) { normalizedCounter[s]=0; continue; }\n            if (count[s] <= lowThreshold) {\n                normalizedCounter[s] = -1;\n                stillToDistribute--;\n            } else {\n                short proba = (short)((count[s]*step) >> scale);\n                if (proba<8) {\n                    U64 restToBeat = vStep * rtbTable[proba];\n                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;\n                }\n                if (proba > largestP) { largestP=proba; largest=s; }\n                normalizedCounter[s] = proba;\n                stillToDistribute -= proba;\n        }   }\n        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {\n            /* corner case, need another normalization method */\n            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);\n            if (FSE_isError(errorCode)) return errorCode;\n        }\n        else normalizedCounter[largest] += (short)stillToDistribute;\n    }\n\n#if 0\n    {   /* Print Table (debug) */\n        U32 s;\n        U32 nTotal = 0;\n        for (s=0; s<=maxSymbolValue; s++)\n            RAWLOG(2, \"%3i: %4i \\n\", s, normalizedCounter[s]);\n        for (s=0; s<=maxSymbolValue; s++)\n            nTotal += abs(normalizedCounter[s]);\n        if (nTotal != (1U<<tableLog))\n            RAWLOG(2, \"Warning !!! Total == %u != %u !!!\", nTotal, 1U<<tableLog);\n        getchar();\n    }\n#endif\n\n    return tableLog;\n}\n\n\n/* fake FSE_CTable, for raw (uncompressed) input */\nsize_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)\n{\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    void* const ptr = ct;\n    U16* const tableU16 = ( (U16*) ptr) + 2;\n    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */\n    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);             /* min size */\n\n    /* header */\n    tableU16[-2] = (U16) nbBits;\n    tableU16[-1] = (U16) maxSymbolValue;\n\n    /* Build table */\n    for (s=0; s<tableSize; s++)\n        tableU16[s] = (U16)(tableSize + s);\n\n    /* Build Symbol Transformation Table */\n    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);\n        for (s=0; s<=maxSymbolValue; s++) {\n            symbolTT[s].deltaNbBits = deltaNbBits;\n            symbolTT[s].deltaFindState = s-1;\n    }   }\n\n    return 0;\n}\n\n/* fake FSE_CTable, for rle input (always same symbol) */\nsize_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)\n{\n    void* ptr = ct;\n    U16* tableU16 = ( (U16*) ptr) + 2;\n    void* FSCTptr = (U32*)ptr + 2;\n    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;\n\n    /* header */\n    tableU16[-2] = (U16) 0;\n    tableU16[-1] = (U16) symbolValue;\n\n    /* Build table */\n    tableU16[0] = 0;\n    tableU16[1] = 0;   /* just in case */\n\n    /* Build Symbol Transformation Table */\n    symbolTT[symbolValue].deltaNbBits = 0;\n    symbolTT[symbolValue].deltaFindState = 0;\n\n    return 0;\n}\n\n\nstatic size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,\n                           const void* src, size_t srcSize,\n                           const FSE_CTable* ct, const unsigned fast)\n{\n    const BYTE* const istart = (const BYTE*) src;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* ip=iend;\n\n    BIT_CStream_t bitC;\n    FSE_CState_t CState1, CState2;\n\n    /* init */\n    if (srcSize <= 2) return 0;\n    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);\n      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }\n\n#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))\n\n    if (srcSize & 1) {\n        FSE_initCState2(&CState1, ct, *--ip);\n        FSE_initCState2(&CState2, ct, *--ip);\n        FSE_encodeSymbol(&bitC, &CState1, *--ip);\n        FSE_FLUSHBITS(&bitC);\n    } else {\n        FSE_initCState2(&CState2, ct, *--ip);\n        FSE_initCState2(&CState1, ct, *--ip);\n    }\n\n    /* join to mod 4 */\n    srcSize -= 2;\n    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */\n        FSE_encodeSymbol(&bitC, &CState2, *--ip);\n        FSE_encodeSymbol(&bitC, &CState1, *--ip);\n        FSE_FLUSHBITS(&bitC);\n    }\n\n    /* 2 or 4 encoding per loop */\n    while ( ip>istart ) {\n\n        FSE_encodeSymbol(&bitC, &CState2, *--ip);\n\n        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */\n            FSE_FLUSHBITS(&bitC);\n\n        FSE_encodeSymbol(&bitC, &CState1, *--ip);\n\n        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */\n            FSE_encodeSymbol(&bitC, &CState2, *--ip);\n            FSE_encodeSymbol(&bitC, &CState1, *--ip);\n        }\n\n        FSE_FLUSHBITS(&bitC);\n    }\n\n    FSE_flushCState(&bitC, &CState2);\n    FSE_flushCState(&bitC, &CState1);\n    return BIT_closeCStream(&bitC);\n}\n\nsize_t FSE_compress_usingCTable (void* dst, size_t dstSize,\n                           const void* src, size_t srcSize,\n                           const FSE_CTable* ct)\n{\n    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));\n\n    if (fast)\n        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);\n    else\n        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);\n}\n\n\nsize_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }\n\n#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e\n#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }\n\n/* FSE_compress_wksp() :\n * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).\n * `wkspSize` size must be `(1<<tableLog)`.\n */\nsize_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + dstSize;\n\n    unsigned count[FSE_MAX_SYMBOL_VALUE+1];\n    S16   norm[FSE_MAX_SYMBOL_VALUE+1];\n    FSE_CTable* CTable = (FSE_CTable*)workSpace;\n    size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);\n    void* scratchBuffer = (void*)(CTable + CTableSize);\n    size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));\n\n    /* init conditions */\n    if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);\n    if (srcSize <= 1) return 0;  /* Not compressible */\n    if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n    if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;\n\n    /* Scan input and build symbol stats */\n    {   CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );\n        if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */\n        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */\n        if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */\n    }\n\n    tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);\n    CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );\n\n    /* Write table description header */\n    {   CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );\n        op += nc_err;\n    }\n\n    /* Compress */\n    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );\n    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );\n        if (cSize == 0) return 0;   /* not enough space for compressed data */\n        op += cSize;\n    }\n\n    /* check compressibility */\n    if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;\n\n    return op-ostart;\n}\n\ntypedef struct {\n    FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];\n    BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];\n} fseWkspMax_t;\n\nsize_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)\n{\n    fseWkspMax_t scratchBuffer;\n    DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));\n}\n\nsize_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);\n}\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/hist.c",
    "content": "/* ******************************************************************\n   hist : Histogram functions\n   part of Finite State Entropy project\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* --- dependencies --- */\n#include \"mem.h\"             /* U32, BYTE, etc. */\n#include \"debug.h\"           /* assert, DEBUGLOG */\n#include \"error_private.h\"   /* ERROR */\n#include \"hist.h\"\n\n\n/* --- Error management --- */\nunsigned HIST_isError(size_t code) { return ERR_isError(code); }\n\n/*-**************************************************************\n *  Histogram functions\n ****************************************************************/\nunsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,\n                           const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* const end = ip + srcSize;\n    unsigned maxSymbolValue = *maxSymbolValuePtr;\n    unsigned largestCount=0;\n\n    memset(count, 0, (maxSymbolValue+1) * sizeof(*count));\n    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }\n\n    while (ip<end) {\n        assert(*ip <= maxSymbolValue);\n        count[*ip++]++;\n    }\n\n    while (!count[maxSymbolValue]) maxSymbolValue--;\n    *maxSymbolValuePtr = maxSymbolValue;\n\n    {   U32 s;\n        for (s=0; s<=maxSymbolValue; s++)\n            if (count[s] > largestCount) largestCount = count[s];\n    }\n\n    return largestCount;\n}\n\ntypedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;\n\n/* HIST_count_parallel_wksp() :\n * store histogram into 4 intermediate tables, recombined at the end.\n * this design makes better use of OoO cpus,\n * and is noticeably faster when some values are heavily repeated.\n * But it needs some additional workspace for intermediate tables.\n * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.\n * @return : largest histogram frequency,\n *           or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */\nstatic size_t HIST_count_parallel_wksp(\n                                unsigned* count, unsigned* maxSymbolValuePtr,\n                                const void* source, size_t sourceSize,\n                                HIST_checkInput_e check,\n                                U32* const workSpace)\n{\n    const BYTE* ip = (const BYTE*)source;\n    const BYTE* const iend = ip+sourceSize;\n    unsigned maxSymbolValue = *maxSymbolValuePtr;\n    unsigned max=0;\n    U32* const Counting1 = workSpace;\n    U32* const Counting2 = Counting1 + 256;\n    U32* const Counting3 = Counting2 + 256;\n    U32* const Counting4 = Counting3 + 256;\n\n    memset(workSpace, 0, 4*256*sizeof(unsigned));\n\n    /* safety checks */\n    if (!sourceSize) {\n        memset(count, 0, maxSymbolValue + 1);\n        *maxSymbolValuePtr = 0;\n        return 0;\n    }\n    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */\n\n    /* by stripes of 16 bytes */\n    {   U32 cached = MEM_read32(ip); ip += 4;\n        while (ip < iend-15) {\n            U32 c = cached; cached = MEM_read32(ip); ip += 4;\n            Counting1[(BYTE) c     ]++;\n            Counting2[(BYTE)(c>>8) ]++;\n            Counting3[(BYTE)(c>>16)]++;\n            Counting4[       c>>24 ]++;\n            c = cached; cached = MEM_read32(ip); ip += 4;\n            Counting1[(BYTE) c     ]++;\n            Counting2[(BYTE)(c>>8) ]++;\n            Counting3[(BYTE)(c>>16)]++;\n            Counting4[       c>>24 ]++;\n            c = cached; cached = MEM_read32(ip); ip += 4;\n            Counting1[(BYTE) c     ]++;\n            Counting2[(BYTE)(c>>8) ]++;\n            Counting3[(BYTE)(c>>16)]++;\n            Counting4[       c>>24 ]++;\n            c = cached; cached = MEM_read32(ip); ip += 4;\n            Counting1[(BYTE) c     ]++;\n            Counting2[(BYTE)(c>>8) ]++;\n            Counting3[(BYTE)(c>>16)]++;\n            Counting4[       c>>24 ]++;\n        }\n        ip-=4;\n    }\n\n    /* finish last symbols */\n    while (ip<iend) Counting1[*ip++]++;\n\n    if (check) {   /* verify stats will fit into destination table */\n        U32 s; for (s=255; s>maxSymbolValue; s--) {\n            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];\n            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);\n    }   }\n\n    {   U32 s;\n        if (maxSymbolValue > 255) maxSymbolValue = 255;\n        for (s=0; s<=maxSymbolValue; s++) {\n            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];\n            if (count[s] > max) max = count[s];\n    }   }\n\n    while (!count[maxSymbolValue]) maxSymbolValue--;\n    *maxSymbolValuePtr = maxSymbolValue;\n    return (size_t)max;\n}\n\n/* HIST_countFast_wksp() :\n * Same as HIST_countFast(), but using an externally provided scratch buffer.\n * `workSpace` is a writable buffer which must be 4-bytes aligned,\n * `workSpaceSize` must be >= HIST_WKSP_SIZE\n */\nsize_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,\n                          const void* source, size_t sourceSize,\n                          void* workSpace, size_t workSpaceSize)\n{\n    if (sourceSize < 1500) /* heuristic threshold */\n        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);\n    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */\n    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);\n    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);\n}\n\n/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */\nsize_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,\n                     const void* source, size_t sourceSize)\n{\n    unsigned tmpCounters[HIST_WKSP_SIZE_U32];\n    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));\n}\n\n/* HIST_count_wksp() :\n * Same as HIST_count(), but using an externally provided scratch buffer.\n * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */\nsize_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,\n                       const void* source, size_t sourceSize,\n                       void* workSpace, size_t workSpaceSize)\n{\n    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */\n    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);\n    if (*maxSymbolValuePtr < 255)\n        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);\n    *maxSymbolValuePtr = 255;\n    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);\n}\n\nsize_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,\n                 const void* src, size_t srcSize)\n{\n    unsigned tmpCounters[HIST_WKSP_SIZE_U32];\n    return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/hist.h",
    "content": "/* ******************************************************************\n   hist : Histogram functions\n   part of Finite State Entropy project\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* --- dependencies --- */\n#include <stddef.h>   /* size_t */\n\n\n/* --- simple histogram functions --- */\n\n/*! HIST_count():\n *  Provides the precise count of each byte within a table 'count'.\n * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).\n *  Updates *maxSymbolValuePtr with actual largest symbol value detected.\n * @return : count of the most frequent symbol (which isn't identified).\n *           or an error code, which can be tested using HIST_isError().\n *           note : if return == srcSize, there is only one symbol.\n */\nsize_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,\n                  const void* src, size_t srcSize);\n\nunsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */\n\n\n/* --- advanced histogram functions --- */\n\n#define HIST_WKSP_SIZE_U32 1024\n#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))\n/** HIST_count_wksp() :\n *  Same as HIST_count(), but using an externally provided scratch buffer.\n *  Benefit is this function will use very little stack space.\n * `workSpace` is a writable buffer which must be 4-bytes aligned,\n * `workSpaceSize` must be >= HIST_WKSP_SIZE\n */\nsize_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,\n                       const void* src, size_t srcSize,\n                       void* workSpace, size_t workSpaceSize);\n\n/** HIST_countFast() :\n *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.\n *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`\n */\nsize_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,\n                      const void* src, size_t srcSize);\n\n/** HIST_countFast_wksp() :\n *  Same as HIST_countFast(), but using an externally provided scratch buffer.\n * `workSpace` is a writable buffer which must be 4-bytes aligned,\n * `workSpaceSize` must be >= HIST_WKSP_SIZE\n */\nsize_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,\n                           const void* src, size_t srcSize,\n                           void* workSpace, size_t workSpaceSize);\n\n/*! HIST_count_simple() :\n *  Same as HIST_countFast(), this function is unsafe,\n *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.\n *  It is also a bit slower for large inputs.\n *  However, it does not need any additional memory (not even on stack).\n * @return : count of the most frequent symbol.\n *  Note this function doesn't produce any error (i.e. it must succeed).\n */\nunsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,\n                           const void* src, size_t srcSize);\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/huf_compress.c",
    "content": "/* ******************************************************************\n   Huffman encoder, part of New Generation Entropy library\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n#include \"compiler.h\"\n#include \"bitstream.h\"\n#include \"hist.h\"\n#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */\n#include \"fse.h\"        /* header compression */\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"error_private.h\"\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define HUF_isError ERR_isError\n#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */\n#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e\n#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }\n\n\n/* **************************************************************\n*  Utils\n****************************************************************/\nunsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)\n{\n    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);\n}\n\n\n/* *******************************************************\n*  HUF : Huffman block compression\n*********************************************************/\n/* HUF_compressWeights() :\n * Same as FSE_compress(), but dedicated to huff0's weights compression.\n * The use case needs much less stack memory.\n * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.\n */\n#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6\nstatic size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + dstSize;\n\n    unsigned maxSymbolValue = HUF_TABLELOG_MAX;\n    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;\n\n    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];\n    BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];\n\n    unsigned count[HUF_TABLELOG_MAX+1];\n    S16 norm[HUF_TABLELOG_MAX+1];\n\n    /* init conditions */\n    if (wtSize <= 1) return 0;  /* Not compressible */\n\n    /* Scan input and build symbol stats */\n    {   unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize);   /* never fails */\n        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */\n        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */\n    }\n\n    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);\n    CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );\n\n    /* Write table description header */\n    {   CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );\n        op += hSize;\n    }\n\n    /* Compress */\n    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );\n    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );\n        if (cSize == 0) return 0;   /* not enough space for compressed data */\n        op += cSize;\n    }\n\n    return op-ostart;\n}\n\n\nstruct HUF_CElt_s {\n  U16  val;\n  BYTE nbBits;\n};   /* typedef'd to HUF_CElt within \"huf.h\" */\n\n/*! HUF_writeCTable() :\n    `CTable` : Huffman tree to save, using huf representation.\n    @return : size of saved CTable */\nsize_t HUF_writeCTable (void* dst, size_t maxDstSize,\n                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)\n{\n    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */\n    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];\n    BYTE* op = (BYTE*)dst;\n    U32 n;\n\n     /* check conditions */\n    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);\n\n    /* convert to weight */\n    bitsToWeight[0] = 0;\n    for (n=1; n<huffLog+1; n++)\n        bitsToWeight[n] = (BYTE)(huffLog + 1 - n);\n    for (n=0; n<maxSymbolValue; n++)\n        huffWeight[n] = bitsToWeight[CTable[n].nbBits];\n\n    /* attempt weights compression by FSE */\n    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );\n        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */\n            op[0] = (BYTE)hSize;\n            return hSize+1;\n    }   }\n\n    /* write raw values as 4-bits (max : 15) */\n    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */\n    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */\n    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));\n    huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */\n    for (n=0; n<maxSymbolValue; n+=2)\n        op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);\n    return ((maxSymbolValue+1)/2) + 1;\n}\n\n\nsize_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)\n{\n    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */\n    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    U32 nbSymbols = 0;\n\n    /* get symbol weights */\n    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));\n\n    /* check result */\n    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);\n    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);\n\n    /* Prepare base value per rank */\n    {   U32 n, nextRankStart = 0;\n        for (n=1; n<=tableLog; n++) {\n            U32 current = nextRankStart;\n            nextRankStart += (rankVal[n] << (n-1));\n            rankVal[n] = current;\n    }   }\n\n    /* fill nbBits */\n    *hasZeroWeights = 0;\n    {   U32 n; for (n=0; n<nbSymbols; n++) {\n            const U32 w = huffWeight[n];\n            *hasZeroWeights |= (w == 0);\n            CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);\n    }   }\n\n    /* fill val */\n    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */\n        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};\n        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }\n        /* determine stating value per rank */\n        valPerRank[tableLog+1] = 0;   /* for w==0 */\n        {   U16 min = 0;\n            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */\n                valPerRank[n] = min;     /* get starting value within each rank */\n                min += nbPerRank[n];\n                min >>= 1;\n        }   }\n        /* assign value within rank, symbol order */\n        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }\n    }\n\n    *maxSymbolValuePtr = nbSymbols - 1;\n    return readSize;\n}\n\nU32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)\n{\n    const HUF_CElt* table = (const HUF_CElt*)symbolTable;\n    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);\n    return table[symbolValue].nbBits;\n}\n\n\ntypedef struct nodeElt_s {\n    U32 count;\n    U16 parent;\n    BYTE byte;\n    BYTE nbBits;\n} nodeElt;\n\nstatic U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)\n{\n    const U32 largestBits = huffNode[lastNonNull].nbBits;\n    if (largestBits <= maxNbBits) return largestBits;   /* early exit : no elt > maxNbBits */\n\n    /* there are several too large elements (at least >= 2) */\n    {   int totalCost = 0;\n        const U32 baseCost = 1 << (largestBits - maxNbBits);\n        U32 n = lastNonNull;\n\n        while (huffNode[n].nbBits > maxNbBits) {\n            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));\n            huffNode[n].nbBits = (BYTE)maxNbBits;\n            n --;\n        }  /* n stops at huffNode[n].nbBits <= maxNbBits */\n        while (huffNode[n].nbBits == maxNbBits) n--;   /* n end at index of smallest symbol using < maxNbBits */\n\n        /* renorm totalCost */\n        totalCost >>= (largestBits - maxNbBits);  /* note : totalCost is necessarily a multiple of baseCost */\n\n        /* repay normalized cost */\n        {   U32 const noSymbol = 0xF0F0F0F0;\n            U32 rankLast[HUF_TABLELOG_MAX+2];\n            int pos;\n\n            /* Get pos of last (smallest) symbol per rank */\n            memset(rankLast, 0xF0, sizeof(rankLast));\n            {   U32 currentNbBits = maxNbBits;\n                for (pos=n ; pos >= 0; pos--) {\n                    if (huffNode[pos].nbBits >= currentNbBits) continue;\n                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */\n                    rankLast[maxNbBits-currentNbBits] = pos;\n            }   }\n\n            while (totalCost > 0) {\n                U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;\n                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {\n                    U32 highPos = rankLast[nBitsToDecrease];\n                    U32 lowPos = rankLast[nBitsToDecrease-1];\n                    if (highPos == noSymbol) continue;\n                    if (lowPos == noSymbol) break;\n                    {   U32 const highTotal = huffNode[highPos].count;\n                        U32 const lowTotal = 2 * huffNode[lowPos].count;\n                        if (highTotal <= lowTotal) break;\n                }   }\n                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */\n                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */\n                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))\n                    nBitsToDecrease ++;\n                totalCost -= 1 << (nBitsToDecrease-1);\n                if (rankLast[nBitsToDecrease-1] == noSymbol)\n                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];   /* this rank is no longer empty */\n                huffNode[rankLast[nBitsToDecrease]].nbBits ++;\n                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */\n                    rankLast[nBitsToDecrease] = noSymbol;\n                else {\n                    rankLast[nBitsToDecrease]--;\n                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)\n                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */\n            }   }   /* while (totalCost > 0) */\n\n            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */\n                if (rankLast[1] == noSymbol) {  /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */\n                    while (huffNode[n].nbBits == maxNbBits) n--;\n                    huffNode[n+1].nbBits--;\n                    rankLast[1] = n+1;\n                    totalCost++;\n                    continue;\n                }\n                huffNode[ rankLast[1] + 1 ].nbBits--;\n                rankLast[1]++;\n                totalCost ++;\n    }   }   }   /* there are several too large elements (at least >= 2) */\n\n    return maxNbBits;\n}\n\n\ntypedef struct {\n    U32 base;\n    U32 current;\n} rankPos;\n\nstatic void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)\n{\n    rankPos rank[32];\n    U32 n;\n\n    memset(rank, 0, sizeof(rank));\n    for (n=0; n<=maxSymbolValue; n++) {\n        U32 r = BIT_highbit32(count[n] + 1);\n        rank[r].base ++;\n    }\n    for (n=30; n>0; n--) rank[n-1].base += rank[n].base;\n    for (n=0; n<32; n++) rank[n].current = rank[n].base;\n    for (n=0; n<=maxSymbolValue; n++) {\n        U32 const c = count[n];\n        U32 const r = BIT_highbit32(c+1) + 1;\n        U32 pos = rank[r].current++;\n        while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {\n            huffNode[pos] = huffNode[pos-1];\n            pos--;\n        }\n        huffNode[pos].count = c;\n        huffNode[pos].byte  = (BYTE)n;\n    }\n}\n\n\n/** HUF_buildCTable_wksp() :\n *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.\n *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.\n */\n#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)\ntypedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];\nsize_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)\n{\n    nodeElt* const huffNode0 = (nodeElt*)workSpace;\n    nodeElt* const huffNode = huffNode0+1;\n    U32 n, nonNullRank;\n    int lowS, lowN;\n    U16 nodeNb = STARTNODE;\n    U32 nodeRoot;\n\n    /* safety checks */\n    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */\n    if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);\n    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;\n    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);\n    memset(huffNode0, 0, sizeof(huffNodeTable));\n\n    /* sort, decreasing order */\n    HUF_sort(huffNode, count, maxSymbolValue);\n\n    /* init for parents */\n    nonNullRank = maxSymbolValue;\n    while(huffNode[nonNullRank].count == 0) nonNullRank--;\n    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;\n    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;\n    huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;\n    nodeNb++; lowS-=2;\n    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);\n    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */\n\n    /* create parents */\n    while (nodeNb <= nodeRoot) {\n        U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;\n        U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;\n        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;\n        huffNode[n1].parent = huffNode[n2].parent = nodeNb;\n        nodeNb++;\n    }\n\n    /* distribute weights (unlimited tree height) */\n    huffNode[nodeRoot].nbBits = 0;\n    for (n=nodeRoot-1; n>=STARTNODE; n--)\n        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;\n    for (n=0; n<=nonNullRank; n++)\n        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;\n\n    /* enforce maxTableLog */\n    maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);\n\n    /* fill result into tree (val, nbBits) */\n    {   U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};\n        U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};\n        if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */\n        for (n=0; n<=nonNullRank; n++)\n            nbPerRank[huffNode[n].nbBits]++;\n        /* determine stating value per rank */\n        {   U16 min = 0;\n            for (n=maxNbBits; n>0; n--) {\n                valPerRank[n] = min;      /* get starting value within each rank */\n                min += nbPerRank[n];\n                min >>= 1;\n        }   }\n        for (n=0; n<=maxSymbolValue; n++)\n            tree[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */\n        for (n=0; n<=maxSymbolValue; n++)\n            tree[n].val = valPerRank[tree[n].nbBits]++;   /* assign value within rank, symbol order */\n    }\n\n    return maxNbBits;\n}\n\n/** HUF_buildCTable() :\n * @return : maxNbBits\n *  Note : count is used before tree is written, so they can safely overlap\n */\nsize_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)\n{\n    huffNodeTable nodeTable;\n    return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));\n}\n\nsize_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)\n{\n    size_t nbBits = 0;\n    int s;\n    for (s = 0; s <= (int)maxSymbolValue; ++s) {\n        nbBits += CTable[s].nbBits * count[s];\n    }\n    return nbBits >> 3;\n}\n\nstatic int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {\n  int bad = 0;\n  int s;\n  for (s = 0; s <= (int)maxSymbolValue; ++s) {\n    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);\n  }\n  return !bad;\n}\n\nsize_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }\n\nFORCE_INLINE_TEMPLATE void\nHUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)\n{\n    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);\n}\n\n#define HUF_FLUSHBITS(s)  BIT_flushBits(s)\n\n#define HUF_FLUSHBITS_1(stream) \\\n    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)\n\n#define HUF_FLUSHBITS_2(stream) \\\n    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)\n\nFORCE_INLINE_TEMPLATE size_t\nHUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,\n                                   const void* src, size_t srcSize,\n                                   const HUF_CElt* CTable)\n{\n    const BYTE* ip = (const BYTE*) src;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstSize;\n    BYTE* op = ostart;\n    size_t n;\n    BIT_CStream_t bitC;\n\n    /* init */\n    if (dstSize < 8) return 0;   /* not enough space to compress */\n    { size_t const initErr = BIT_initCStream(&bitC, op, oend-op);\n      if (HUF_isError(initErr)) return 0; }\n\n    n = srcSize & ~3;  /* join to mod 4 */\n    switch (srcSize & 3)\n    {\n        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);\n                 HUF_FLUSHBITS_2(&bitC);\n\t\t /* fall-through */\n        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);\n                 HUF_FLUSHBITS_1(&bitC);\n\t\t /* fall-through */\n        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);\n                 HUF_FLUSHBITS(&bitC);\n\t\t /* fall-through */\n        case 0 : /* fall-through */\n        default: break;\n    }\n\n    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */\n        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);\n        HUF_FLUSHBITS_1(&bitC);\n        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);\n        HUF_FLUSHBITS_2(&bitC);\n        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);\n        HUF_FLUSHBITS_1(&bitC);\n        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);\n        HUF_FLUSHBITS(&bitC);\n    }\n\n    return BIT_closeCStream(&bitC);\n}\n\n#if DYNAMIC_BMI2\n\nstatic TARGET_ATTRIBUTE(\"bmi2\") size_t\nHUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,\n                                   const void* src, size_t srcSize,\n                                   const HUF_CElt* CTable)\n{\n    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);\n}\n\nstatic size_t\nHUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,\n                                      const void* src, size_t srcSize,\n                                      const HUF_CElt* CTable)\n{\n    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);\n}\n\nstatic size_t\nHUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,\n                              const void* src, size_t srcSize,\n                              const HUF_CElt* CTable, const int bmi2)\n{\n    if (bmi2) {\n        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);\n    }\n    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);\n}\n\n#else\n\nstatic size_t\nHUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,\n                              const void* src, size_t srcSize,\n                              const HUF_CElt* CTable, const int bmi2)\n{\n    (void)bmi2;\n    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);\n}\n\n#endif\n\nsize_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)\n{\n    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);\n}\n\n\nstatic size_t\nHUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,\n                              const void* src, size_t srcSize,\n                              const HUF_CElt* CTable, int bmi2)\n{\n    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */\n    const BYTE* ip = (const BYTE*) src;\n    const BYTE* const iend = ip + srcSize;\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* const oend = ostart + dstSize;\n    BYTE* op = ostart;\n\n    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */\n    if (srcSize < 12) return 0;   /* no saving possible : too small input */\n    op += 6;   /* jumpTable */\n\n    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );\n        if (cSize==0) return 0;\n        assert(cSize <= 65535);\n        MEM_writeLE16(ostart, (U16)cSize);\n        op += cSize;\n    }\n\n    ip += segmentSize;\n    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );\n        if (cSize==0) return 0;\n        assert(cSize <= 65535);\n        MEM_writeLE16(ostart+2, (U16)cSize);\n        op += cSize;\n    }\n\n    ip += segmentSize;\n    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );\n        if (cSize==0) return 0;\n        assert(cSize <= 65535);\n        MEM_writeLE16(ostart+4, (U16)cSize);\n        op += cSize;\n    }\n\n    ip += segmentSize;\n    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );\n        if (cSize==0) return 0;\n        op += cSize;\n    }\n\n    return op-ostart;\n}\n\nsize_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)\n{\n    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);\n}\n\ntypedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;\n\nstatic size_t HUF_compressCTable_internal(\n                BYTE* const ostart, BYTE* op, BYTE* const oend,\n                const void* src, size_t srcSize,\n                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)\n{\n    size_t const cSize = (nbStreams==HUF_singleStream) ?\n                         HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :\n                         HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);\n    if (HUF_isError(cSize)) { return cSize; }\n    if (cSize==0) { return 0; }   /* uncompressible */\n    op += cSize;\n    /* check compressibility */\n    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }\n    return op-ostart;\n}\n\ntypedef struct {\n    unsigned count[HUF_SYMBOLVALUE_MAX + 1];\n    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];\n    huffNodeTable nodeTable;\n} HUF_compress_tables_t;\n\n/* HUF_compress_internal() :\n * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */\nstatic size_t\nHUF_compress_internal (void* dst, size_t dstSize,\n                 const void* src, size_t srcSize,\n                       unsigned maxSymbolValue, unsigned huffLog,\n                       HUF_nbStreams_e nbStreams,\n                       void* workSpace, size_t wkspSize,\n                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,\n                 const int bmi2)\n{\n    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstSize;\n    BYTE* op = ostart;\n\n    /* checks & inits */\n    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */\n    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);\n    if (!srcSize) return 0;  /* Uncompressed */\n    if (!dstSize) return 0;  /* cannot fit anything within dst budget */\n    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */\n    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);\n    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);\n    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;\n    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;\n\n    /* Heuristic : If old table is valid, use it for small inputs */\n    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {\n        return HUF_compressCTable_internal(ostart, op, oend,\n                                           src, srcSize,\n                                           nbStreams, oldHufTable, bmi2);\n    }\n\n    /* Scan input and build symbol stats */\n    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );\n        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */\n        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */\n    }\n\n    /* Check validity of previous table */\n    if ( repeat\n      && *repeat == HUF_repeat_check\n      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {\n        *repeat = HUF_repeat_none;\n    }\n    /* Heuristic : use existing table for small inputs */\n    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {\n        return HUF_compressCTable_internal(ostart, op, oend,\n                                           src, srcSize,\n                                           nbStreams, oldHufTable, bmi2);\n    }\n\n    /* Build Huffman Tree */\n    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);\n    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,\n                                            maxSymbolValue, huffLog,\n                                            table->nodeTable, sizeof(table->nodeTable));\n        CHECK_F(maxBits);\n        huffLog = (U32)maxBits;\n        /* Zero unused symbols in CTable, so we can check it for validity */\n        memset(table->CTable + (maxSymbolValue + 1), 0,\n               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));\n    }\n\n    /* Write table description header */\n    {   CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );\n        /* Check if using previous huffman table is beneficial */\n        if (repeat && *repeat != HUF_repeat_none) {\n            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);\n            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);\n            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {\n                return HUF_compressCTable_internal(ostart, op, oend,\n                                                   src, srcSize,\n                                                   nbStreams, oldHufTable, bmi2);\n        }   }\n\n        /* Use the new huffman table */\n        if (hSize + 12ul >= srcSize) { return 0; }\n        op += hSize;\n        if (repeat) { *repeat = HUF_repeat_none; }\n        if (oldHufTable)\n            memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */\n    }\n    return HUF_compressCTable_internal(ostart, op, oend,\n                                       src, srcSize,\n                                       nbStreams, table->CTable, bmi2);\n}\n\n\nsize_t HUF_compress1X_wksp (void* dst, size_t dstSize,\n                      const void* src, size_t srcSize,\n                      unsigned maxSymbolValue, unsigned huffLog,\n                      void* workSpace, size_t wkspSize)\n{\n    return HUF_compress_internal(dst, dstSize, src, srcSize,\n                                 maxSymbolValue, huffLog, HUF_singleStream,\n                                 workSpace, wkspSize,\n                                 NULL, NULL, 0, 0 /*bmi2*/);\n}\n\nsize_t HUF_compress1X_repeat (void* dst, size_t dstSize,\n                      const void* src, size_t srcSize,\n                      unsigned maxSymbolValue, unsigned huffLog,\n                      void* workSpace, size_t wkspSize,\n                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)\n{\n    return HUF_compress_internal(dst, dstSize, src, srcSize,\n                                 maxSymbolValue, huffLog, HUF_singleStream,\n                                 workSpace, wkspSize, hufTable,\n                                 repeat, preferRepeat, bmi2);\n}\n\nsize_t HUF_compress1X (void* dst, size_t dstSize,\n                 const void* src, size_t srcSize,\n                 unsigned maxSymbolValue, unsigned huffLog)\n{\n    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];\n    return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));\n}\n\n/* HUF_compress4X_repeat():\n * compress input using 4 streams.\n * provide workspace to generate compression tables */\nsize_t HUF_compress4X_wksp (void* dst, size_t dstSize,\n                      const void* src, size_t srcSize,\n                      unsigned maxSymbolValue, unsigned huffLog,\n                      void* workSpace, size_t wkspSize)\n{\n    return HUF_compress_internal(dst, dstSize, src, srcSize,\n                                 maxSymbolValue, huffLog, HUF_fourStreams,\n                                 workSpace, wkspSize,\n                                 NULL, NULL, 0, 0 /*bmi2*/);\n}\n\n/* HUF_compress4X_repeat():\n * compress input using 4 streams.\n * re-use an existing huffman compression table */\nsize_t HUF_compress4X_repeat (void* dst, size_t dstSize,\n                      const void* src, size_t srcSize,\n                      unsigned maxSymbolValue, unsigned huffLog,\n                      void* workSpace, size_t wkspSize,\n                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)\n{\n    return HUF_compress_internal(dst, dstSize, src, srcSize,\n                                 maxSymbolValue, huffLog, HUF_fourStreams,\n                                 workSpace, wkspSize,\n                                 hufTable, repeat, preferRepeat, bmi2);\n}\n\nsize_t HUF_compress2 (void* dst, size_t dstSize,\n                const void* src, size_t srcSize,\n                unsigned maxSymbolValue, unsigned huffLog)\n{\n    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];\n    return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));\n}\n\nsize_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include <limits.h>         /* INT_MAX */\n#include <string.h>         /* memset */\n#include \"cpu.h\"\n#include \"mem.h\"\n#include \"hist.h\"           /* HIST_countFast_wksp */\n#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"zstd_compress_internal.h\"\n#include \"zstd_compress_sequences.h\"\n#include \"zstd_compress_literals.h\"\n#include \"zstd_fast.h\"\n#include \"zstd_double_fast.h\"\n#include \"zstd_lazy.h\"\n#include \"zstd_opt.h\"\n#include \"zstd_ldm.h\"\n#include \"zstd_compress_superblock.h\"\n\n\n/*-*************************************\n*  Helper functions\n***************************************/\n/* ZSTD_compressBound()\n * Note that the result from this function is only compatible with the \"normal\"\n * full-block strategy.\n * When there are a lot of small blocks due to frequent flush in streaming mode\n * the overhead of headers can make the compressed data to be larger than the\n * return value of ZSTD_compressBound().\n */\nsize_t ZSTD_compressBound(size_t srcSize) {\n    return ZSTD_COMPRESSBOUND(srcSize);\n}\n\n\n/*-*************************************\n*  Context memory management\n***************************************/\nstruct ZSTD_CDict_s {\n    const void* dictContent;\n    size_t dictContentSize;\n    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */\n    ZSTD_cwksp workspace;\n    ZSTD_matchState_t matchState;\n    ZSTD_compressedBlockState_t cBlockState;\n    ZSTD_customMem customMem;\n    U32 dictID;\n    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */\n};  /* typedef'd to ZSTD_CDict within \"zstd.h\" */\n\nZSTD_CCtx* ZSTD_createCCtx(void)\n{\n    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);\n}\n\nstatic void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)\n{\n    assert(cctx != NULL);\n    memset(cctx, 0, sizeof(*cctx));\n    cctx->customMem = memManager;\n    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());\n    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);\n        assert(!ZSTD_isError(err));\n        (void)err;\n    }\n}\n\nZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)\n{\n    ZSTD_STATIC_ASSERT(zcss_init==0);\n    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));\n    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;\n    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);\n        if (!cctx) return NULL;\n        ZSTD_initCCtx(cctx, customMem);\n        return cctx;\n    }\n}\n\nZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)\n{\n    ZSTD_cwksp ws;\n    ZSTD_CCtx* cctx;\n    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */\n    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */\n    ZSTD_cwksp_init(&ws, workspace, workspaceSize);\n\n    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));\n    if (cctx == NULL) {\n        return NULL;\n    }\n    memset(cctx, 0, sizeof(ZSTD_CCtx));\n    ZSTD_cwksp_move(&cctx->workspace, &ws);\n    cctx->staticSize = workspaceSize;\n\n    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */\n    if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;\n    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));\n    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));\n    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(\n        &cctx->workspace, HUF_WORKSPACE_SIZE);\n    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());\n    return cctx;\n}\n\n/**\n * Clears and frees all of the dictionaries in the CCtx.\n */\nstatic void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)\n{\n    ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);\n    ZSTD_freeCDict(cctx->localDict.cdict);\n    memset(&cctx->localDict, 0, sizeof(cctx->localDict));\n    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));\n    cctx->cdict = NULL;\n}\n\nstatic size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)\n{\n    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;\n    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);\n    return bufferSize + cdictSize;\n}\n\nstatic void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)\n{\n    assert(cctx != NULL);\n    assert(cctx->staticSize == 0);\n    ZSTD_clearAllDicts(cctx);\n#ifdef ZSTD_MULTITHREAD\n    ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;\n#endif\n    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);\n}\n\nsize_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)\n{\n    if (cctx==NULL) return 0;   /* support free on NULL */\n    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,\n                    \"not compatible with static CCtx\");\n    {\n        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);\n        ZSTD_freeCCtxContent(cctx);\n        if (!cctxInWorkspace) {\n            ZSTD_free(cctx, cctx->customMem);\n        }\n    }\n    return 0;\n}\n\n\nstatic size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)\n{\n#ifdef ZSTD_MULTITHREAD\n    return ZSTDMT_sizeof_CCtx(cctx->mtctx);\n#else\n    (void)cctx;\n    return 0;\n#endif\n}\n\n\nsize_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)\n{\n    if (cctx==NULL) return 0;   /* support sizeof on NULL */\n    /* cctx may be in the workspace */\n    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))\n           + ZSTD_cwksp_sizeof(&cctx->workspace)\n           + ZSTD_sizeof_localDict(cctx->localDict)\n           + ZSTD_sizeof_mtctx(cctx);\n}\n\nsize_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)\n{\n    return ZSTD_sizeof_CCtx(zcs);  /* same object */\n}\n\n/* private API call, for dictBuilder only */\nconst seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }\n\nstatic ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(\n        ZSTD_compressionParameters cParams)\n{\n    ZSTD_CCtx_params cctxParams;\n    memset(&cctxParams, 0, sizeof(cctxParams));\n    cctxParams.cParams = cParams;\n    cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */\n    assert(!ZSTD_checkCParams(cParams));\n    cctxParams.fParams.contentSizeFlag = 1;\n    return cctxParams;\n}\n\nstatic ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(\n        ZSTD_customMem customMem)\n{\n    ZSTD_CCtx_params* params;\n    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;\n    params = (ZSTD_CCtx_params*)ZSTD_calloc(\n            sizeof(ZSTD_CCtx_params), customMem);\n    if (!params) { return NULL; }\n    params->customMem = customMem;\n    params->compressionLevel = ZSTD_CLEVEL_DEFAULT;\n    params->fParams.contentSizeFlag = 1;\n    return params;\n}\n\nZSTD_CCtx_params* ZSTD_createCCtxParams(void)\n{\n    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);\n}\n\nsize_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)\n{\n    if (params == NULL) { return 0; }\n    ZSTD_free(params, params->customMem);\n    return 0;\n}\n\nsize_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)\n{\n    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);\n}\n\nsize_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {\n    RETURN_ERROR_IF(!cctxParams, GENERIC);\n    memset(cctxParams, 0, sizeof(*cctxParams));\n    cctxParams->compressionLevel = compressionLevel;\n    cctxParams->fParams.contentSizeFlag = 1;\n    return 0;\n}\n\nsize_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)\n{\n    RETURN_ERROR_IF(!cctxParams, GENERIC);\n    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );\n    memset(cctxParams, 0, sizeof(*cctxParams));\n    assert(!ZSTD_checkCParams(params.cParams));\n    cctxParams->cParams = params.cParams;\n    cctxParams->fParams = params.fParams;\n    cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */\n    return 0;\n}\n\n/* ZSTD_assignParamsToCCtxParams() :\n * params is presumed valid at this stage */\nstatic ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(\n        const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)\n{\n    ZSTD_CCtx_params ret = *cctxParams;\n    assert(!ZSTD_checkCParams(params->cParams));\n    ret.cParams = params->cParams;\n    ret.fParams = params->fParams;\n    ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */\n    return ret;\n}\n\nZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)\n{\n    ZSTD_bounds bounds = { 0, 0, 0 };\n\n    switch(param)\n    {\n    case ZSTD_c_compressionLevel:\n        bounds.lowerBound = ZSTD_minCLevel();\n        bounds.upperBound = ZSTD_maxCLevel();\n        return bounds;\n\n    case ZSTD_c_windowLog:\n        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;\n        bounds.upperBound = ZSTD_WINDOWLOG_MAX;\n        return bounds;\n\n    case ZSTD_c_hashLog:\n        bounds.lowerBound = ZSTD_HASHLOG_MIN;\n        bounds.upperBound = ZSTD_HASHLOG_MAX;\n        return bounds;\n\n    case ZSTD_c_chainLog:\n        bounds.lowerBound = ZSTD_CHAINLOG_MIN;\n        bounds.upperBound = ZSTD_CHAINLOG_MAX;\n        return bounds;\n\n    case ZSTD_c_searchLog:\n        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;\n        bounds.upperBound = ZSTD_SEARCHLOG_MAX;\n        return bounds;\n\n    case ZSTD_c_minMatch:\n        bounds.lowerBound = ZSTD_MINMATCH_MIN;\n        bounds.upperBound = ZSTD_MINMATCH_MAX;\n        return bounds;\n\n    case ZSTD_c_targetLength:\n        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;\n        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;\n        return bounds;\n\n    case ZSTD_c_strategy:\n        bounds.lowerBound = ZSTD_STRATEGY_MIN;\n        bounds.upperBound = ZSTD_STRATEGY_MAX;\n        return bounds;\n\n    case ZSTD_c_contentSizeFlag:\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_checksumFlag:\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_dictIDFlag:\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_nbWorkers:\n        bounds.lowerBound = 0;\n#ifdef ZSTD_MULTITHREAD\n        bounds.upperBound = ZSTDMT_NBWORKERS_MAX;\n#else\n        bounds.upperBound = 0;\n#endif\n        return bounds;\n\n    case ZSTD_c_jobSize:\n        bounds.lowerBound = 0;\n#ifdef ZSTD_MULTITHREAD\n        bounds.upperBound = ZSTDMT_JOBSIZE_MAX;\n#else\n        bounds.upperBound = 0;\n#endif\n        return bounds;\n\n    case ZSTD_c_overlapLog:\n#ifdef ZSTD_MULTITHREAD\n        bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;\n        bounds.upperBound = ZSTD_OVERLAPLOG_MAX;\n#else\n        bounds.lowerBound = 0;\n        bounds.upperBound = 0;\n#endif\n        return bounds;\n\n    case ZSTD_c_enableLongDistanceMatching:\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_ldmHashLog:\n        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;\n        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;\n        return bounds;\n\n    case ZSTD_c_ldmMinMatch:\n        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;\n        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;\n        return bounds;\n\n    case ZSTD_c_ldmBucketSizeLog:\n        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;\n        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;\n        return bounds;\n\n    case ZSTD_c_ldmHashRateLog:\n        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;\n        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;\n        return bounds;\n\n    /* experimental parameters */\n    case ZSTD_c_rsyncable:\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_forceMaxWindow :\n        bounds.lowerBound = 0;\n        bounds.upperBound = 1;\n        return bounds;\n\n    case ZSTD_c_format:\n        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);\n        bounds.lowerBound = ZSTD_f_zstd1;\n        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */\n        return bounds;\n\n    case ZSTD_c_forceAttachDict:\n        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);\n        bounds.lowerBound = ZSTD_dictDefaultAttach;\n        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */\n        return bounds;\n\n    case ZSTD_c_literalCompressionMode:\n        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);\n        bounds.lowerBound = ZSTD_lcm_auto;\n        bounds.upperBound = ZSTD_lcm_uncompressed;\n        return bounds;\n\n    case ZSTD_c_targetCBlockSize:\n        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;\n        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;\n        return bounds;\n\n    case ZSTD_c_srcSizeHint:\n        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;\n        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;\n        return bounds;\n\n    default:\n        {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };\n            return boundError;\n        }\n    }\n}\n\n/* ZSTD_cParam_clampBounds:\n * Clamps the value into the bounded range.\n */\nstatic size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)\n{\n    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);\n    if (ZSTD_isError(bounds.error)) return bounds.error;\n    if (*value < bounds.lowerBound) *value = bounds.lowerBound;\n    if (*value > bounds.upperBound) *value = bounds.upperBound;\n    return 0;\n}\n\n#define BOUNDCHECK(cParam, val) { \\\n    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \\\n                    parameter_outOfBound); \\\n}\n\n\nstatic int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)\n{\n    switch(param)\n    {\n    case ZSTD_c_compressionLevel:\n    case ZSTD_c_hashLog:\n    case ZSTD_c_chainLog:\n    case ZSTD_c_searchLog:\n    case ZSTD_c_minMatch:\n    case ZSTD_c_targetLength:\n    case ZSTD_c_strategy:\n        return 1;\n\n    case ZSTD_c_format:\n    case ZSTD_c_windowLog:\n    case ZSTD_c_contentSizeFlag:\n    case ZSTD_c_checksumFlag:\n    case ZSTD_c_dictIDFlag:\n    case ZSTD_c_forceMaxWindow :\n    case ZSTD_c_nbWorkers:\n    case ZSTD_c_jobSize:\n    case ZSTD_c_overlapLog:\n    case ZSTD_c_rsyncable:\n    case ZSTD_c_enableLongDistanceMatching:\n    case ZSTD_c_ldmHashLog:\n    case ZSTD_c_ldmMinMatch:\n    case ZSTD_c_ldmBucketSizeLog:\n    case ZSTD_c_ldmHashRateLog:\n    case ZSTD_c_forceAttachDict:\n    case ZSTD_c_literalCompressionMode:\n    case ZSTD_c_targetCBlockSize:\n    case ZSTD_c_srcSizeHint:\n    default:\n        return 0;\n    }\n}\n\nsize_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)\n{\n    DEBUGLOG(4, \"ZSTD_CCtx_setParameter (%i, %i)\", (int)param, value);\n    if (cctx->streamStage != zcss_init) {\n        if (ZSTD_isUpdateAuthorized(param)) {\n            cctx->cParamsChanged = 1;\n        } else {\n            RETURN_ERROR(stage_wrong);\n    }   }\n\n    switch(param)\n    {\n    case ZSTD_c_nbWorkers:\n        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,\n                        \"MT not compatible with static alloc\");\n        break;\n\n    case ZSTD_c_compressionLevel:\n    case ZSTD_c_windowLog:\n    case ZSTD_c_hashLog:\n    case ZSTD_c_chainLog:\n    case ZSTD_c_searchLog:\n    case ZSTD_c_minMatch:\n    case ZSTD_c_targetLength:\n    case ZSTD_c_strategy:\n    case ZSTD_c_ldmHashRateLog:\n    case ZSTD_c_format:\n    case ZSTD_c_contentSizeFlag:\n    case ZSTD_c_checksumFlag:\n    case ZSTD_c_dictIDFlag:\n    case ZSTD_c_forceMaxWindow:\n    case ZSTD_c_forceAttachDict:\n    case ZSTD_c_literalCompressionMode:\n    case ZSTD_c_jobSize:\n    case ZSTD_c_overlapLog:\n    case ZSTD_c_rsyncable:\n    case ZSTD_c_enableLongDistanceMatching:\n    case ZSTD_c_ldmHashLog:\n    case ZSTD_c_ldmMinMatch:\n    case ZSTD_c_ldmBucketSizeLog:\n    case ZSTD_c_targetCBlockSize:\n    case ZSTD_c_srcSizeHint:\n        break;\n\n    default: RETURN_ERROR(parameter_unsupported);\n    }\n    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);\n}\n\nsize_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,\n                                    ZSTD_cParameter param, int value)\n{\n    DEBUGLOG(4, \"ZSTD_CCtxParams_setParameter (%i, %i)\", (int)param, value);\n    switch(param)\n    {\n    case ZSTD_c_format :\n        BOUNDCHECK(ZSTD_c_format, value);\n        CCtxParams->format = (ZSTD_format_e)value;\n        return (size_t)CCtxParams->format;\n\n    case ZSTD_c_compressionLevel : {\n        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));\n        if (value) {  /* 0 : does not change current level */\n            CCtxParams->compressionLevel = value;\n        }\n        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;\n        return 0;  /* return type (size_t) cannot represent negative values */\n    }\n\n    case ZSTD_c_windowLog :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_windowLog, value);\n        CCtxParams->cParams.windowLog = (U32)value;\n        return CCtxParams->cParams.windowLog;\n\n    case ZSTD_c_hashLog :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_hashLog, value);\n        CCtxParams->cParams.hashLog = (U32)value;\n        return CCtxParams->cParams.hashLog;\n\n    case ZSTD_c_chainLog :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_chainLog, value);\n        CCtxParams->cParams.chainLog = (U32)value;\n        return CCtxParams->cParams.chainLog;\n\n    case ZSTD_c_searchLog :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_searchLog, value);\n        CCtxParams->cParams.searchLog = (U32)value;\n        return (size_t)value;\n\n    case ZSTD_c_minMatch :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_minMatch, value);\n        CCtxParams->cParams.minMatch = value;\n        return CCtxParams->cParams.minMatch;\n\n    case ZSTD_c_targetLength :\n        BOUNDCHECK(ZSTD_c_targetLength, value);\n        CCtxParams->cParams.targetLength = value;\n        return CCtxParams->cParams.targetLength;\n\n    case ZSTD_c_strategy :\n        if (value!=0)   /* 0 => use default */\n            BOUNDCHECK(ZSTD_c_strategy, value);\n        CCtxParams->cParams.strategy = (ZSTD_strategy)value;\n        return (size_t)CCtxParams->cParams.strategy;\n\n    case ZSTD_c_contentSizeFlag :\n        /* Content size written in frame header _when known_ (default:1) */\n        DEBUGLOG(4, \"set content size flag = %u\", (value!=0));\n        CCtxParams->fParams.contentSizeFlag = value != 0;\n        return CCtxParams->fParams.contentSizeFlag;\n\n    case ZSTD_c_checksumFlag :\n        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */\n        CCtxParams->fParams.checksumFlag = value != 0;\n        return CCtxParams->fParams.checksumFlag;\n\n    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */\n        DEBUGLOG(4, \"set dictIDFlag = %u\", (value!=0));\n        CCtxParams->fParams.noDictIDFlag = !value;\n        return !CCtxParams->fParams.noDictIDFlag;\n\n    case ZSTD_c_forceMaxWindow :\n        CCtxParams->forceWindow = (value != 0);\n        return CCtxParams->forceWindow;\n\n    case ZSTD_c_forceAttachDict : {\n        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;\n        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);\n        CCtxParams->attachDictPref = pref;\n        return CCtxParams->attachDictPref;\n    }\n\n    case ZSTD_c_literalCompressionMode : {\n        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;\n        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);\n        CCtxParams->literalCompressionMode = lcm;\n        return CCtxParams->literalCompressionMode;\n    }\n\n    case ZSTD_c_nbWorkers :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR_IF(value!=0, parameter_unsupported, \"not compiled with multithreading\");\n        return 0;\n#else\n        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));\n        CCtxParams->nbWorkers = value;\n        return CCtxParams->nbWorkers;\n#endif\n\n    case ZSTD_c_jobSize :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR_IF(value!=0, parameter_unsupported, \"not compiled with multithreading\");\n        return 0;\n#else\n        /* Adjust to the minimum non-default value. */\n        if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)\n            value = ZSTDMT_JOBSIZE_MIN;\n        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));\n        assert(value >= 0);\n        CCtxParams->jobSize = value;\n        return CCtxParams->jobSize;\n#endif\n\n    case ZSTD_c_overlapLog :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR_IF(value!=0, parameter_unsupported, \"not compiled with multithreading\");\n        return 0;\n#else\n        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));\n        CCtxParams->overlapLog = value;\n        return CCtxParams->overlapLog;\n#endif\n\n    case ZSTD_c_rsyncable :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR_IF(value!=0, parameter_unsupported, \"not compiled with multithreading\");\n        return 0;\n#else\n        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));\n        CCtxParams->rsyncable = value;\n        return CCtxParams->rsyncable;\n#endif\n\n    case ZSTD_c_enableLongDistanceMatching :\n        CCtxParams->ldmParams.enableLdm = (value!=0);\n        return CCtxParams->ldmParams.enableLdm;\n\n    case ZSTD_c_ldmHashLog :\n        if (value!=0)   /* 0 ==> auto */\n            BOUNDCHECK(ZSTD_c_ldmHashLog, value);\n        CCtxParams->ldmParams.hashLog = value;\n        return CCtxParams->ldmParams.hashLog;\n\n    case ZSTD_c_ldmMinMatch :\n        if (value!=0)   /* 0 ==> default */\n            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);\n        CCtxParams->ldmParams.minMatchLength = value;\n        return CCtxParams->ldmParams.minMatchLength;\n\n    case ZSTD_c_ldmBucketSizeLog :\n        if (value!=0)   /* 0 ==> default */\n            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);\n        CCtxParams->ldmParams.bucketSizeLog = value;\n        return CCtxParams->ldmParams.bucketSizeLog;\n\n    case ZSTD_c_ldmHashRateLog :\n        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,\n                        parameter_outOfBound);\n        CCtxParams->ldmParams.hashRateLog = value;\n        return CCtxParams->ldmParams.hashRateLog;\n\n    case ZSTD_c_targetCBlockSize :\n        if (value!=0)   /* 0 ==> default */\n            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);\n        CCtxParams->targetCBlockSize = value;\n        return CCtxParams->targetCBlockSize;\n\n    case ZSTD_c_srcSizeHint :\n        if (value!=0)    /* 0 ==> default */\n            BOUNDCHECK(ZSTD_c_srcSizeHint, value);\n        CCtxParams->srcSizeHint = value;\n        return CCtxParams->srcSizeHint;\n\n    default: RETURN_ERROR(parameter_unsupported, \"unknown parameter\");\n    }\n}\n\nsize_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)\n{\n    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);\n}\n\nsize_t ZSTD_CCtxParams_getParameter(\n        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)\n{\n    switch(param)\n    {\n    case ZSTD_c_format :\n        *value = CCtxParams->format;\n        break;\n    case ZSTD_c_compressionLevel :\n        *value = CCtxParams->compressionLevel;\n        break;\n    case ZSTD_c_windowLog :\n        *value = (int)CCtxParams->cParams.windowLog;\n        break;\n    case ZSTD_c_hashLog :\n        *value = (int)CCtxParams->cParams.hashLog;\n        break;\n    case ZSTD_c_chainLog :\n        *value = (int)CCtxParams->cParams.chainLog;\n        break;\n    case ZSTD_c_searchLog :\n        *value = CCtxParams->cParams.searchLog;\n        break;\n    case ZSTD_c_minMatch :\n        *value = CCtxParams->cParams.minMatch;\n        break;\n    case ZSTD_c_targetLength :\n        *value = CCtxParams->cParams.targetLength;\n        break;\n    case ZSTD_c_strategy :\n        *value = (unsigned)CCtxParams->cParams.strategy;\n        break;\n    case ZSTD_c_contentSizeFlag :\n        *value = CCtxParams->fParams.contentSizeFlag;\n        break;\n    case ZSTD_c_checksumFlag :\n        *value = CCtxParams->fParams.checksumFlag;\n        break;\n    case ZSTD_c_dictIDFlag :\n        *value = !CCtxParams->fParams.noDictIDFlag;\n        break;\n    case ZSTD_c_forceMaxWindow :\n        *value = CCtxParams->forceWindow;\n        break;\n    case ZSTD_c_forceAttachDict :\n        *value = CCtxParams->attachDictPref;\n        break;\n    case ZSTD_c_literalCompressionMode :\n        *value = CCtxParams->literalCompressionMode;\n        break;\n    case ZSTD_c_nbWorkers :\n#ifndef ZSTD_MULTITHREAD\n        assert(CCtxParams->nbWorkers == 0);\n#endif\n        *value = CCtxParams->nbWorkers;\n        break;\n    case ZSTD_c_jobSize :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR(parameter_unsupported, \"not compiled with multithreading\");\n#else\n        assert(CCtxParams->jobSize <= INT_MAX);\n        *value = (int)CCtxParams->jobSize;\n        break;\n#endif\n    case ZSTD_c_overlapLog :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR(parameter_unsupported, \"not compiled with multithreading\");\n#else\n        *value = CCtxParams->overlapLog;\n        break;\n#endif\n    case ZSTD_c_rsyncable :\n#ifndef ZSTD_MULTITHREAD\n        RETURN_ERROR(parameter_unsupported, \"not compiled with multithreading\");\n#else\n        *value = CCtxParams->rsyncable;\n        break;\n#endif\n    case ZSTD_c_enableLongDistanceMatching :\n        *value = CCtxParams->ldmParams.enableLdm;\n        break;\n    case ZSTD_c_ldmHashLog :\n        *value = CCtxParams->ldmParams.hashLog;\n        break;\n    case ZSTD_c_ldmMinMatch :\n        *value = CCtxParams->ldmParams.minMatchLength;\n        break;\n    case ZSTD_c_ldmBucketSizeLog :\n        *value = CCtxParams->ldmParams.bucketSizeLog;\n        break;\n    case ZSTD_c_ldmHashRateLog :\n        *value = CCtxParams->ldmParams.hashRateLog;\n        break;\n    case ZSTD_c_targetCBlockSize :\n        *value = (int)CCtxParams->targetCBlockSize;\n        break;\n    case ZSTD_c_srcSizeHint :\n        *value = (int)CCtxParams->srcSizeHint;\n        break;\n    default: RETURN_ERROR(parameter_unsupported, \"unknown parameter\");\n    }\n    return 0;\n}\n\n/** ZSTD_CCtx_setParametersUsingCCtxParams() :\n *  just applies `params` into `cctx`\n *  no action is performed, parameters are merely stored.\n *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.\n *    This is possible even if a compression is ongoing.\n *    In which case, new parameters will be applied on the fly, starting with next compression job.\n */\nsize_t ZSTD_CCtx_setParametersUsingCCtxParams(\n        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)\n{\n    DEBUGLOG(4, \"ZSTD_CCtx_setParametersUsingCCtxParams\");\n    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n    RETURN_ERROR_IF(cctx->cdict, stage_wrong);\n\n    cctx->requestedParams = *params;\n    return 0;\n}\n\nZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_CCtx_setPledgedSrcSize to %u bytes\", (U32)pledgedSrcSize);\n    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;\n    return 0;\n}\n\n/**\n * Initializes the local dict using the requested parameters.\n * NOTE: This does not use the pledged src size, because it may be used for more\n * than one compression.\n */\nstatic size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)\n{\n    ZSTD_localDict* const dl = &cctx->localDict;\n    ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(\n            &cctx->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN, dl->dictSize);\n    if (dl->dict == NULL) {\n        /* No local dictionary. */\n        assert(dl->dictBuffer == NULL);\n        assert(dl->cdict == NULL);\n        assert(dl->dictSize == 0);\n        return 0;\n    }\n    if (dl->cdict != NULL) {\n        assert(cctx->cdict == dl->cdict);\n        /* Local dictionary already initialized. */\n        return 0;\n    }\n    assert(dl->dictSize > 0);\n    assert(cctx->cdict == NULL);\n    assert(cctx->prefixDict.dict == NULL);\n\n    dl->cdict = ZSTD_createCDict_advanced(\n            dl->dict,\n            dl->dictSize,\n            ZSTD_dlm_byRef,\n            dl->dictContentType,\n            cParams,\n            cctx->customMem);\n    RETURN_ERROR_IF(!dl->cdict, memory_allocation);\n    cctx->cdict = dl->cdict;\n    return 0;\n}\n\nsize_t ZSTD_CCtx_loadDictionary_advanced(\n        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,\n        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)\n{\n    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,\n                    \"no malloc for static CCtx\");\n    DEBUGLOG(4, \"ZSTD_CCtx_loadDictionary_advanced (size: %u)\", (U32)dictSize);\n    ZSTD_clearAllDicts(cctx);  /* in case one already exists */\n    if (dict == NULL || dictSize == 0)  /* no dictionary mode */\n        return 0;\n    if (dictLoadMethod == ZSTD_dlm_byRef) {\n        cctx->localDict.dict = dict;\n    } else {\n        void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);\n        RETURN_ERROR_IF(!dictBuffer, memory_allocation);\n        memcpy(dictBuffer, dict, dictSize);\n        cctx->localDict.dictBuffer = dictBuffer;\n        cctx->localDict.dict = dictBuffer;\n    }\n    cctx->localDict.dictSize = dictSize;\n    cctx->localDict.dictContentType = dictContentType;\n    return 0;\n}\n\nZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(\n      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)\n{\n    return ZSTD_CCtx_loadDictionary_advanced(\n            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);\n}\n\nZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)\n{\n    return ZSTD_CCtx_loadDictionary_advanced(\n            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);\n}\n\n\nsize_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)\n{\n    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n    /* Free the existing local cdict (if any) to save memory. */\n    ZSTD_clearAllDicts(cctx);\n    cctx->cdict = cdict;\n    return 0;\n}\n\nsize_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)\n{\n    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);\n}\n\nsize_t ZSTD_CCtx_refPrefix_advanced(\n        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)\n{\n    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n    ZSTD_clearAllDicts(cctx);\n    cctx->prefixDict.dict = prefix;\n    cctx->prefixDict.dictSize = prefixSize;\n    cctx->prefixDict.dictContentType = dictContentType;\n    return 0;\n}\n\n/*! ZSTD_CCtx_reset() :\n *  Also dumps dictionary */\nsize_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)\n{\n    if ( (reset == ZSTD_reset_session_only)\n      || (reset == ZSTD_reset_session_and_parameters) ) {\n        cctx->streamStage = zcss_init;\n        cctx->pledgedSrcSizePlusOne = 0;\n    }\n    if ( (reset == ZSTD_reset_parameters)\n      || (reset == ZSTD_reset_session_and_parameters) ) {\n        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);\n        ZSTD_clearAllDicts(cctx);\n        return ZSTD_CCtxParams_reset(&cctx->requestedParams);\n    }\n    return 0;\n}\n\n\n/** ZSTD_checkCParams() :\n    control CParam values remain within authorized range.\n    @return : 0, or an error code if one value is beyond authorized range */\nsize_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)\n{\n    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);\n    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);\n    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);\n    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);\n    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);\n    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);\n    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);\n    return 0;\n}\n\n/** ZSTD_clampCParams() :\n *  make CParam values within valid range.\n *  @return : valid CParams */\nstatic ZSTD_compressionParameters\nZSTD_clampCParams(ZSTD_compressionParameters cParams)\n{\n#   define CLAMP_TYPE(cParam, val, type) {                                \\\n        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \\\n        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \\\n        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \\\n    }\n#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)\n    CLAMP(ZSTD_c_windowLog, cParams.windowLog);\n    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);\n    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);\n    CLAMP(ZSTD_c_searchLog, cParams.searchLog);\n    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);\n    CLAMP(ZSTD_c_targetLength,cParams.targetLength);\n    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);\n    return cParams;\n}\n\n/** ZSTD_cycleLog() :\n *  condition for correct operation : hashLog > 1 */\nstatic U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)\n{\n    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);\n    return hashLog - btScale;\n}\n\n/** ZSTD_adjustCParams_internal() :\n *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).\n *  mostly downsize to reduce memory consumption and initialization latency.\n * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.\n *  note : `srcSize==0` means 0!\n *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */\nstatic ZSTD_compressionParameters\nZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,\n                            unsigned long long srcSize,\n                            size_t dictSize)\n{\n    static const U64 minSrcSize = 513; /* (1<<9) + 1 */\n    static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);\n    assert(ZSTD_checkCParams(cPar)==0);\n\n    if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)\n        srcSize = minSrcSize;\n\n    /* resize windowLog if input is small enough, to use less memory */\n    if ( (srcSize < maxWindowResize)\n      && (dictSize < maxWindowResize) )  {\n        U32 const tSize = (U32)(srcSize + dictSize);\n        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;\n        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :\n                            ZSTD_highbit32(tSize-1) + 1;\n        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;\n    }\n    if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;\n    {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);\n        if (cycleLog > cPar.windowLog)\n            cPar.chainLog -= (cycleLog - cPar.windowLog);\n    }\n\n    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)\n        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */\n\n    return cPar;\n}\n\nZSTD_compressionParameters\nZSTD_adjustCParams(ZSTD_compressionParameters cPar,\n                   unsigned long long srcSize,\n                   size_t dictSize)\n{\n    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */\n    if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;\n    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);\n}\n\nstatic ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);\nstatic ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);\n\nZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(\n        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)\n{\n    ZSTD_compressionParameters cParams;\n    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {\n      srcSizeHint = CCtxParams->srcSizeHint;\n    }\n    cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize);\n    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;\n    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;\n    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;\n    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;\n    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;\n    if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;\n    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;\n    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;\n    assert(!ZSTD_checkCParams(cParams));\n    /* srcSizeHint == 0 means 0 */\n    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);\n}\n\nstatic size_t\nZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,\n                       const U32 forCCtx)\n{\n    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);\n    size_t const hSize = ((size_t)1) << cParams->hashLog;\n    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;\n    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;\n    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't\n     * surrounded by redzones in ASAN. */\n    size_t const tableSpace = chainSize * sizeof(U32)\n                            + hSize * sizeof(U32)\n                            + h3Size * sizeof(U32);\n    size_t const optPotentialSpace =\n        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))\n      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))\n      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))\n      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))\n      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))\n      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));\n    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))\n                                ? optPotentialSpace\n                                : 0;\n    DEBUGLOG(4, \"chainSize: %u - hSize: %u - h3Size: %u\",\n                (U32)chainSize, (U32)hSize, (U32)h3Size);\n    return tableSpace + optSpace;\n}\n\nsize_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)\n{\n    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, \"Estimate CCtx size is supported for single-threaded compression only.\");\n    {   ZSTD_compressionParameters const cParams =\n                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);\n        U32    const divider = (cParams.minMatch==3) ? 3 : 4;\n        size_t const maxNbSeq = blockSize / divider;\n        size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)\n                                + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))\n                                + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));\n        size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);\n        size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));\n        size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);\n\n        size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);\n        size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq));\n\n        size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +\n                                   matchStateSize + ldmSpace + ldmSeqSpace;\n        size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx));\n\n        DEBUGLOG(5, \"sizeof(ZSTD_CCtx) : %u\", (U32)cctxSpace);\n        DEBUGLOG(5, \"estimate workspace : %u\", (U32)neededSpace);\n        return cctxSpace + neededSpace;\n    }\n}\n\nsize_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)\n{\n    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);\n    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);\n}\n\nstatic size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)\n{\n    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n    return ZSTD_estimateCCtxSize_usingCParams(cParams);\n}\n\nsize_t ZSTD_estimateCCtxSize(int compressionLevel)\n{\n    int level;\n    size_t memBudget = 0;\n    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {\n        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);\n        if (newMB > memBudget) memBudget = newMB;\n    }\n    return memBudget;\n}\n\nsize_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)\n{\n    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, \"Estimate CCtx size is supported for single-threaded compression only.\");\n    {   ZSTD_compressionParameters const cParams =\n                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n        size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);\n        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);\n        size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;\n        size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;\n        size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize)\n                                   + ZSTD_cwksp_alloc_size(outBuffSize);\n\n        return CCtxSize + streamingSize;\n    }\n}\n\nsize_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)\n{\n    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);\n    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);\n}\n\nstatic size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)\n{\n    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n    return ZSTD_estimateCStreamSize_usingCParams(cParams);\n}\n\nsize_t ZSTD_estimateCStreamSize(int compressionLevel)\n{\n    int level;\n    size_t memBudget = 0;\n    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {\n        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);\n        if (newMB > memBudget) memBudget = newMB;\n    }\n    return memBudget;\n}\n\n/* ZSTD_getFrameProgression():\n * tells how much data has been consumed (input) and produced (output) for current frame.\n * able to count progression inside worker threads (non-blocking mode).\n */\nZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)\n{\n#ifdef ZSTD_MULTITHREAD\n    if (cctx->appliedParams.nbWorkers > 0) {\n        return ZSTDMT_getFrameProgression(cctx->mtctx);\n    }\n#endif\n    {   ZSTD_frameProgression fp;\n        size_t const buffered = (cctx->inBuff == NULL) ? 0 :\n                                cctx->inBuffPos - cctx->inToCompress;\n        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);\n        assert(buffered <= ZSTD_BLOCKSIZE_MAX);\n        fp.ingested = cctx->consumedSrcSize + buffered;\n        fp.consumed = cctx->consumedSrcSize;\n        fp.produced = cctx->producedCSize;\n        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */\n        fp.currentJobID = 0;\n        fp.nbActiveWorkers = 0;\n        return fp;\n}   }\n\n/*! ZSTD_toFlushNow()\n *  Only useful for multithreading scenarios currently (nbWorkers >= 1).\n */\nsize_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)\n{\n#ifdef ZSTD_MULTITHREAD\n    if (cctx->appliedParams.nbWorkers > 0) {\n        return ZSTDMT_toFlushNow(cctx->mtctx);\n    }\n#endif\n    (void)cctx;\n    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */\n}\n\nstatic void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,\n                                    ZSTD_compressionParameters cParams2)\n{\n    (void)cParams1;\n    (void)cParams2;\n    assert(cParams1.windowLog    == cParams2.windowLog);\n    assert(cParams1.chainLog     == cParams2.chainLog);\n    assert(cParams1.hashLog      == cParams2.hashLog);\n    assert(cParams1.searchLog    == cParams2.searchLog);\n    assert(cParams1.minMatch     == cParams2.minMatch);\n    assert(cParams1.targetLength == cParams2.targetLength);\n    assert(cParams1.strategy     == cParams2.strategy);\n}\n\nvoid ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)\n{\n    int i;\n    for (i = 0; i < ZSTD_REP_NUM; ++i)\n        bs->rep[i] = repStartValue[i];\n    bs->entropy.huf.repeatMode = HUF_repeat_none;\n    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;\n    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;\n    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;\n}\n\n/*! ZSTD_invalidateMatchState()\n *  Invalidate all the matches in the match finder tables.\n *  Requires nextSrc and base to be set (can be NULL).\n */\nstatic void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)\n{\n    ZSTD_window_clear(&ms->window);\n\n    ms->nextToUpdate = ms->window.dictLimit;\n    ms->loadedDictEnd = 0;\n    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */\n    ms->dictMatchState = NULL;\n}\n\n/**\n * Indicates whether this compression proceeds directly from user-provided\n * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or\n * whether the context needs to buffer the input/output (ZSTDb_buffered).\n */\ntypedef enum {\n    ZSTDb_not_buffered,\n    ZSTDb_buffered\n} ZSTD_buffered_policy_e;\n\n/**\n * Controls, for this matchState reset, whether the tables need to be cleared /\n * prepared for the coming compression (ZSTDcrp_makeClean), or whether the\n * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a\n * subsequent operation will overwrite the table space anyways (e.g., copying\n * the matchState contents in from a CDict).\n */\ntypedef enum {\n    ZSTDcrp_makeClean,\n    ZSTDcrp_leaveDirty\n} ZSTD_compResetPolicy_e;\n\n/**\n * Controls, for this matchState reset, whether indexing can continue where it\n * left off (ZSTDirp_continue), or whether it needs to be restarted from zero\n * (ZSTDirp_reset).\n */\ntypedef enum {\n    ZSTDirp_continue,\n    ZSTDirp_reset\n} ZSTD_indexResetPolicy_e;\n\ntypedef enum {\n    ZSTD_resetTarget_CDict,\n    ZSTD_resetTarget_CCtx\n} ZSTD_resetTarget_e;\n\nstatic size_t\nZSTD_reset_matchState(ZSTD_matchState_t* ms,\n                      ZSTD_cwksp* ws,\n                const ZSTD_compressionParameters* cParams,\n                const ZSTD_compResetPolicy_e crp,\n                const ZSTD_indexResetPolicy_e forceResetIndex,\n                const ZSTD_resetTarget_e forWho)\n{\n    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);\n    size_t const hSize = ((size_t)1) << cParams->hashLog;\n    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;\n    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;\n\n    DEBUGLOG(4, \"reset indices : %u\", forceResetIndex == ZSTDirp_reset);\n    if (forceResetIndex == ZSTDirp_reset) {\n        ZSTD_window_init(&ms->window);\n        ZSTD_cwksp_mark_tables_dirty(ws);\n    }\n\n    ms->hashLog3 = hashLog3;\n\n    ZSTD_invalidateMatchState(ms);\n\n    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */\n\n    ZSTD_cwksp_clear_tables(ws);\n\n    DEBUGLOG(5, \"reserving table space\");\n    /* table Space */\n    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));\n    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));\n    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));\n    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,\n                    \"failed a workspace allocation in ZSTD_reset_matchState\");\n\n    DEBUGLOG(4, \"reset table : %u\", crp!=ZSTDcrp_leaveDirty);\n    if (crp!=ZSTDcrp_leaveDirty) {\n        /* reset tables only */\n        ZSTD_cwksp_clean_tables(ws);\n    }\n\n    /* opt parser space */\n    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {\n        DEBUGLOG(4, \"reserving optimal parser space\");\n        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));\n        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));\n        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));\n        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));\n        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));\n        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));\n    }\n\n    ms->cParams = *cParams;\n\n    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,\n                    \"failed a workspace allocation in ZSTD_reset_matchState\");\n\n    return 0;\n}\n\n/* ZSTD_indexTooCloseToMax() :\n * minor optimization : prefer memset() rather than reduceIndex()\n * which is measurably slow in some circumstances (reported for Visual Studio).\n * Works when re-using a context for a lot of smallish inputs :\n * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,\n * memset() will be triggered before reduceIndex().\n */\n#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)\nstatic int ZSTD_indexTooCloseToMax(ZSTD_window_t w)\n{\n    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);\n}\n\n/*! ZSTD_resetCCtx_internal() :\n    note : `params` are assumed fully validated at this stage */\nstatic size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,\n                                      ZSTD_CCtx_params params,\n                                      U64 const pledgedSrcSize,\n                                      ZSTD_compResetPolicy_e const crp,\n                                      ZSTD_buffered_policy_e const zbuff)\n{\n    ZSTD_cwksp* const ws = &zc->workspace;\n    DEBUGLOG(4, \"ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u\",\n                (U32)pledgedSrcSize, params.cParams.windowLog);\n    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));\n\n    zc->isFirstBlock = 1;\n\n    if (params.ldmParams.enableLdm) {\n        /* Adjust long distance matching parameters */\n        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);\n        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);\n        assert(params.ldmParams.hashRateLog < 32);\n        zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);\n    }\n\n    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));\n        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);\n        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;\n        size_t const maxNbSeq = blockSize / divider;\n        size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)\n                                + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))\n                                + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));\n        size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;\n        size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;\n        size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);\n        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);\n\n        ZSTD_indexResetPolicy_e needsIndexReset = zc->initialized ? ZSTDirp_continue : ZSTDirp_reset;\n\n        if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {\n            needsIndexReset = ZSTDirp_reset;\n        }\n\n        ZSTD_cwksp_bump_oversized_duration(ws, 0);\n\n        /* Check if workspace is large enough, alloc a new one if needed */\n        {   size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;\n            size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);\n            size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));\n            size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize);\n            size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);\n            size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq));\n\n            size_t const neededSpace =\n                cctxSpace +\n                entropySpace +\n                blockStateSpace +\n                ldmSpace +\n                ldmSeqSpace +\n                matchStateSize +\n                tokenSpace +\n                bufferSpace;\n\n            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;\n            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);\n\n            DEBUGLOG(4, \"Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers\",\n                        neededSpace>>10, matchStateSize>>10, bufferSpace>>10);\n            DEBUGLOG(4, \"windowSize: %zu - blockSize: %zu\", windowSize, blockSize);\n\n            if (workspaceTooSmall || workspaceWasteful) {\n                DEBUGLOG(4, \"Resize workspaceSize from %zuKB to %zuKB\",\n                            ZSTD_cwksp_sizeof(ws) >> 10,\n                            neededSpace >> 10);\n\n                RETURN_ERROR_IF(zc->staticSize, memory_allocation, \"static cctx : no resize\");\n\n                needsIndexReset = ZSTDirp_reset;\n\n                ZSTD_cwksp_free(ws, zc->customMem);\n                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem));\n\n                DEBUGLOG(5, \"reserving object space\");\n                /* Statically sized space.\n                 * entropyWorkspace never moves,\n                 * though prev/next block swap places */\n                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));\n                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));\n                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, \"couldn't allocate prevCBlock\");\n                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));\n                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, \"couldn't allocate nextCBlock\");\n                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE);\n                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, \"couldn't allocate entropyWorkspace\");\n        }   }\n\n        ZSTD_cwksp_clear(ws);\n\n        /* init params */\n        zc->appliedParams = params;\n        zc->blockState.matchState.cParams = params.cParams;\n        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;\n        zc->consumedSrcSize = 0;\n        zc->producedCSize = 0;\n        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)\n            zc->appliedParams.fParams.contentSizeFlag = 0;\n        DEBUGLOG(4, \"pledged content size : %u ; flag : %u\",\n            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);\n        zc->blockSize = blockSize;\n\n        XXH64_reset(&zc->xxhState, 0);\n        zc->stage = ZSTDcs_init;\n        zc->dictID = 0;\n\n        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);\n\n        /* ZSTD_wildcopy() is used to copy into the literals buffer,\n         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.\n         */\n        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);\n        zc->seqStore.maxNbLit = blockSize;\n\n        /* buffers */\n        zc->inBuffSize = buffInSize;\n        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);\n        zc->outBuffSize = buffOutSize;\n        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);\n\n        /* ldm bucketOffsets table */\n        if (params.ldmParams.enableLdm) {\n            /* TODO: avoid memset? */\n            size_t const ldmBucketSize =\n                  ((size_t)1) << (params.ldmParams.hashLog -\n                                  params.ldmParams.bucketSizeLog);\n            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);\n            memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);\n        }\n\n        /* sequences storage */\n        ZSTD_referenceExternalSequences(zc, NULL, 0);\n        zc->seqStore.maxNbSeq = maxNbSeq;\n        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));\n        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));\n        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));\n        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));\n\n        FORWARD_IF_ERROR(ZSTD_reset_matchState(\n            &zc->blockState.matchState,\n            ws,\n            &params.cParams,\n            crp,\n            needsIndexReset,\n            ZSTD_resetTarget_CCtx));\n\n        /* ldm hash table */\n        if (params.ldmParams.enableLdm) {\n            /* TODO: avoid memset? */\n            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;\n            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));\n            memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));\n            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));\n            zc->maxNbLdmSequences = maxNbLdmSeq;\n\n            ZSTD_window_init(&zc->ldmState.window);\n            ZSTD_window_clear(&zc->ldmState.window);\n        }\n\n        DEBUGLOG(3, \"wksp: finished allocating, %zd bytes remain available\", ZSTD_cwksp_available_space(ws));\n        zc->initialized = 1;\n\n        return 0;\n    }\n}\n\n/* ZSTD_invalidateRepCodes() :\n * ensures next compression will not use repcodes from previous block.\n * Note : only works with regular variant;\n *        do not use with extDict variant ! */\nvoid ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {\n    int i;\n    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;\n    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));\n}\n\n/* These are the approximate sizes for each strategy past which copying the\n * dictionary tables into the working context is faster than using them\n * in-place.\n */\nstatic const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {\n    8 KB,  /* unused */\n    8 KB,  /* ZSTD_fast */\n    16 KB, /* ZSTD_dfast */\n    32 KB, /* ZSTD_greedy */\n    32 KB, /* ZSTD_lazy */\n    32 KB, /* ZSTD_lazy2 */\n    32 KB, /* ZSTD_btlazy2 */\n    32 KB, /* ZSTD_btopt */\n    8 KB,  /* ZSTD_btultra */\n    8 KB   /* ZSTD_btultra2 */\n};\n\nstatic int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,\n                                 const ZSTD_CCtx_params* params,\n                                 U64 pledgedSrcSize)\n{\n    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];\n    return ( pledgedSrcSize <= cutoff\n          || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN\n          || params->attachDictPref == ZSTD_dictForceAttach )\n        && params->attachDictPref != ZSTD_dictForceCopy\n        && !params->forceWindow; /* dictMatchState isn't correctly\n                                 * handled in _enforceMaxDist */\n}\n\nstatic size_t\nZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,\n                        const ZSTD_CDict* cdict,\n                        ZSTD_CCtx_params params,\n                        U64 pledgedSrcSize,\n                        ZSTD_buffered_policy_e zbuff)\n{\n    {   const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;\n        unsigned const windowLog = params.cParams.windowLog;\n        assert(windowLog != 0);\n        /* Resize working context table params for input only, since the dict\n         * has its own tables. */\n        /* pledgeSrcSize == 0 means 0! */\n        params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);\n        params.cParams.windowLog = windowLog;\n        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,\n                                                 ZSTDcrp_makeClean, zbuff));\n        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);\n    }\n\n    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc\n                                  - cdict->matchState.window.base);\n        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;\n        if (cdictLen == 0) {\n            /* don't even attach dictionaries with no contents */\n            DEBUGLOG(4, \"skipping attaching empty dictionary\");\n        } else {\n            DEBUGLOG(4, \"attaching dictionary into context\");\n            cctx->blockState.matchState.dictMatchState = &cdict->matchState;\n\n            /* prep working match state so dict matches never have negative indices\n             * when they are translated to the working context's index space. */\n            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {\n                cctx->blockState.matchState.window.nextSrc =\n                    cctx->blockState.matchState.window.base + cdictEnd;\n                ZSTD_window_clear(&cctx->blockState.matchState.window);\n            }\n            /* loadedDictEnd is expressed within the referential of the active context */\n            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;\n    }   }\n\n    cctx->dictID = cdict->dictID;\n\n    /* copy block state */\n    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));\n\n    return 0;\n}\n\nstatic size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,\n                            const ZSTD_CDict* cdict,\n                            ZSTD_CCtx_params params,\n                            U64 pledgedSrcSize,\n                            ZSTD_buffered_policy_e zbuff)\n{\n    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;\n\n    DEBUGLOG(4, \"copying dictionary into context\");\n\n    {   unsigned const windowLog = params.cParams.windowLog;\n        assert(windowLog != 0);\n        /* Copy only compression parameters related to tables. */\n        params.cParams = *cdict_cParams;\n        params.cParams.windowLog = windowLog;\n        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,\n                                                 ZSTDcrp_leaveDirty, zbuff));\n        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);\n        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);\n        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);\n    }\n\n    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);\n\n    /* copy tables */\n    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);\n        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;\n\n        memcpy(cctx->blockState.matchState.hashTable,\n               cdict->matchState.hashTable,\n               hSize * sizeof(U32));\n        memcpy(cctx->blockState.matchState.chainTable,\n               cdict->matchState.chainTable,\n               chainSize * sizeof(U32));\n    }\n\n    /* Zero the hashTable3, since the cdict never fills it */\n    {   int const h3log = cctx->blockState.matchState.hashLog3;\n        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;\n        assert(cdict->matchState.hashLog3 == 0);\n        memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));\n    }\n\n    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);\n\n    /* copy dictionary offsets */\n    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;\n        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;\n        dstMatchState->window       = srcMatchState->window;\n        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;\n        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;\n    }\n\n    cctx->dictID = cdict->dictID;\n\n    /* copy block state */\n    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));\n\n    return 0;\n}\n\n/* We have a choice between copying the dictionary context into the working\n * context, or referencing the dictionary context from the working context\n * in-place. We decide here which strategy to use. */\nstatic size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,\n                            const ZSTD_CDict* cdict,\n                            const ZSTD_CCtx_params* params,\n                            U64 pledgedSrcSize,\n                            ZSTD_buffered_policy_e zbuff)\n{\n\n    DEBUGLOG(4, \"ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)\",\n                (unsigned)pledgedSrcSize);\n\n    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {\n        return ZSTD_resetCCtx_byAttachingCDict(\n            cctx, cdict, *params, pledgedSrcSize, zbuff);\n    } else {\n        return ZSTD_resetCCtx_byCopyingCDict(\n            cctx, cdict, *params, pledgedSrcSize, zbuff);\n    }\n}\n\n/*! ZSTD_copyCCtx_internal() :\n *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.\n *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).\n *  The \"context\", in this case, refers to the hash and chain tables,\n *  entropy tables, and dictionary references.\n * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.\n * @return : 0, or an error code */\nstatic size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,\n                            const ZSTD_CCtx* srcCCtx,\n                            ZSTD_frameParameters fParams,\n                            U64 pledgedSrcSize,\n                            ZSTD_buffered_policy_e zbuff)\n{\n    DEBUGLOG(5, \"ZSTD_copyCCtx_internal\");\n    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);\n\n    memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));\n    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;\n        /* Copy only compression parameters related to tables. */\n        params.cParams = srcCCtx->appliedParams.cParams;\n        params.fParams = fParams;\n        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,\n                                ZSTDcrp_leaveDirty, zbuff);\n        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);\n        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);\n        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);\n        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);\n        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);\n    }\n\n    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);\n\n    /* copy tables */\n    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);\n        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;\n        int const h3log = srcCCtx->blockState.matchState.hashLog3;\n        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;\n\n        memcpy(dstCCtx->blockState.matchState.hashTable,\n               srcCCtx->blockState.matchState.hashTable,\n               hSize * sizeof(U32));\n        memcpy(dstCCtx->blockState.matchState.chainTable,\n               srcCCtx->blockState.matchState.chainTable,\n               chainSize * sizeof(U32));\n        memcpy(dstCCtx->blockState.matchState.hashTable3,\n               srcCCtx->blockState.matchState.hashTable3,\n               h3Size * sizeof(U32));\n    }\n\n    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);\n\n    /* copy dictionary offsets */\n    {\n        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;\n        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;\n        dstMatchState->window       = srcMatchState->window;\n        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;\n        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;\n    }\n    dstCCtx->dictID = srcCCtx->dictID;\n\n    /* copy block state */\n    memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));\n\n    return 0;\n}\n\n/*! ZSTD_copyCCtx() :\n *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.\n *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).\n *  pledgedSrcSize==0 means \"unknown\".\n*   @return : 0, or an error code */\nsize_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)\n{\n    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };\n    ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);\n    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);\n    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;\n    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);\n\n    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,\n                                fParams, pledgedSrcSize,\n                                zbuff);\n}\n\n\n#define ZSTD_ROWSIZE 16\n/*! ZSTD_reduceTable() :\n *  reduce table indexes by `reducerValue`, or squash to zero.\n *  PreserveMark preserves \"unsorted mark\" for btlazy2 strategy.\n *  It must be set to a clear 0/1 value, to remove branch during inlining.\n *  Presume table size is a multiple of ZSTD_ROWSIZE\n *  to help auto-vectorization */\nFORCE_INLINE_TEMPLATE void\nZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)\n{\n    int const nbRows = (int)size / ZSTD_ROWSIZE;\n    int cellNb = 0;\n    int rowNb;\n    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */\n    assert(size < (1U<<31));   /* can be casted to int */\n\n#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)\n    /* To validate that the table re-use logic is sound, and that we don't\n     * access table space that we haven't cleaned, we re-\"poison\" the table\n     * space every time we mark it dirty.\n     *\n     * This function however is intended to operate on those dirty tables and\n     * re-clean them. So when this function is used correctly, we can unpoison\n     * the memory it operated on. This introduces a blind spot though, since\n     * if we now try to operate on __actually__ poisoned memory, we will not\n     * detect that. */\n    __msan_unpoison(table, size * sizeof(U32));\n#endif\n\n    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {\n        int column;\n        for (column=0; column<ZSTD_ROWSIZE; column++) {\n            if (preserveMark) {\n                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;\n                table[cellNb] += adder;\n            }\n            if (table[cellNb] < reducerValue) table[cellNb] = 0;\n            else table[cellNb] -= reducerValue;\n            cellNb++;\n    }   }\n}\n\nstatic void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)\n{\n    ZSTD_reduceTable_internal(table, size, reducerValue, 0);\n}\n\nstatic void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)\n{\n    ZSTD_reduceTable_internal(table, size, reducerValue, 1);\n}\n\n/*! ZSTD_reduceIndex() :\n*   rescale all indexes to avoid future overflow (indexes are U32) */\nstatic void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)\n{\n    {   U32 const hSize = (U32)1 << params->cParams.hashLog;\n        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);\n    }\n\n    if (params->cParams.strategy != ZSTD_fast) {\n        U32 const chainSize = (U32)1 << params->cParams.chainLog;\n        if (params->cParams.strategy == ZSTD_btlazy2)\n            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);\n        else\n            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);\n    }\n\n    if (ms->hashLog3) {\n        U32 const h3Size = (U32)1 << ms->hashLog3;\n        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);\n    }\n}\n\n\n/*-*******************************************************\n*  Block entropic compression\n*********************************************************/\n\n/* See doc/zstd_compression_format.md for detailed format description */\n\nvoid ZSTD_seqToCodes(const seqStore_t* seqStorePtr)\n{\n    const seqDef* const sequences = seqStorePtr->sequencesStart;\n    BYTE* const llCodeTable = seqStorePtr->llCode;\n    BYTE* const ofCodeTable = seqStorePtr->ofCode;\n    BYTE* const mlCodeTable = seqStorePtr->mlCode;\n    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);\n    U32 u;\n    assert(nbSeq <= seqStorePtr->maxNbSeq);\n    for (u=0; u<nbSeq; u++) {\n        U32 const llv = sequences[u].litLength;\n        U32 const mlv = sequences[u].matchLength;\n        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);\n        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);\n        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);\n    }\n    if (seqStorePtr->longLengthID==1)\n        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;\n    if (seqStorePtr->longLengthID==2)\n        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;\n}\n\nstatic int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)\n{\n    switch (cctxParams->literalCompressionMode) {\n    case ZSTD_lcm_huffman:\n        return 0;\n    case ZSTD_lcm_uncompressed:\n        return 1;\n    default:\n        assert(0 /* impossible: pre-validated */);\n        /* fall-through */\n    case ZSTD_lcm_auto:\n        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);\n    }\n}\n\n/* ZSTD_useTargetCBlockSize():\n * Returns if target compressed block size param is being used.\n * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.\n * Returns 1 if true, 0 otherwise. */\nstatic int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)\n{\n    DEBUGLOG(5, \"ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)\", cctxParams->targetCBlockSize);\n    return (cctxParams->targetCBlockSize != 0);\n}\n\n/* ZSTD_compressSequences_internal():\n * actually compresses both literals and sequences */\nMEM_STATIC size_t\nZSTD_compressSequences_internal(seqStore_t* seqStorePtr,\n                          const ZSTD_entropyCTables_t* prevEntropy,\n                                ZSTD_entropyCTables_t* nextEntropy,\n                          const ZSTD_CCtx_params* cctxParams,\n                                void* dst, size_t dstCapacity,\n                                void* entropyWorkspace, size_t entropyWkspSize,\n                          const int bmi2)\n{\n    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;\n    ZSTD_strategy const strategy = cctxParams->cParams.strategy;\n    unsigned count[MaxSeq+1];\n    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;\n    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;\n    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;\n    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */\n    const seqDef* const sequences = seqStorePtr->sequencesStart;\n    const BYTE* const ofCodeTable = seqStorePtr->ofCode;\n    const BYTE* const llCodeTable = seqStorePtr->llCode;\n    const BYTE* const mlCodeTable = seqStorePtr->mlCode;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstCapacity;\n    BYTE* op = ostart;\n    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);\n    BYTE* seqHead;\n    BYTE* lastNCount = NULL;\n\n    DEBUGLOG(5, \"ZSTD_compressSequences_internal (nbSeq=%zu)\", nbSeq);\n    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));\n\n    /* Compress literals */\n    {   const BYTE* const literals = seqStorePtr->litStart;\n        size_t const litSize = (size_t)(seqStorePtr->lit - literals);\n        size_t const cSize = ZSTD_compressLiterals(\n                                    &prevEntropy->huf, &nextEntropy->huf,\n                                    cctxParams->cParams.strategy,\n                                    ZSTD_disableLiteralsCompression(cctxParams),\n                                    op, dstCapacity,\n                                    literals, litSize,\n                                    entropyWorkspace, entropyWkspSize,\n                                    bmi2);\n        FORWARD_IF_ERROR(cSize);\n        assert(cSize <= dstCapacity);\n        op += cSize;\n    }\n\n    /* Sequences Header */\n    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,\n                    dstSize_tooSmall);\n    if (nbSeq < 128) {\n        *op++ = (BYTE)nbSeq;\n    } else if (nbSeq < LONGNBSEQ) {\n        op[0] = (BYTE)((nbSeq>>8) + 0x80);\n        op[1] = (BYTE)nbSeq;\n        op+=2;\n    } else {\n        op[0]=0xFF;\n        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));\n        op+=3;\n    }\n    assert(op <= oend);\n    if (nbSeq==0) {\n        /* Copy the old tables over as if we repeated them */\n        memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));\n        return (size_t)(op - ostart);\n    }\n\n    /* seqHead : flags for FSE encoding type */\n    seqHead = op++;\n    assert(op <= oend);\n\n    /* convert length/distances into codes */\n    ZSTD_seqToCodes(seqStorePtr);\n    /* build CTable for Literal Lengths */\n    {   unsigned max = MaxLL;\n        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */\n        DEBUGLOG(5, \"Building LL table\");\n        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;\n        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,\n                                        count, max, mostFrequent, nbSeq,\n                                        LLFSELog, prevEntropy->fse.litlengthCTable,\n                                        LL_defaultNorm, LL_defaultNormLog,\n                                        ZSTD_defaultAllowed, strategy);\n        assert(set_basic < set_compressed && set_rle < set_compressed);\n        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(\n                op, (size_t)(oend - op),\n                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,\n                count, max, llCodeTable, nbSeq,\n                LL_defaultNorm, LL_defaultNormLog, MaxLL,\n                prevEntropy->fse.litlengthCTable,\n                sizeof(prevEntropy->fse.litlengthCTable),\n                entropyWorkspace, entropyWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (LLtype == set_compressed)\n                lastNCount = op;\n            op += countSize;\n            assert(op <= oend);\n    }   }\n    /* build CTable for Offsets */\n    {   unsigned max = MaxOff;\n        size_t const mostFrequent = HIST_countFast_wksp(\n            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */\n        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */\n        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;\n        DEBUGLOG(5, \"Building OF table\");\n        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;\n        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,\n                                        count, max, mostFrequent, nbSeq,\n                                        OffFSELog, prevEntropy->fse.offcodeCTable,\n                                        OF_defaultNorm, OF_defaultNormLog,\n                                        defaultPolicy, strategy);\n        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(\n                op, (size_t)(oend - op),\n                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,\n                count, max, ofCodeTable, nbSeq,\n                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,\n                prevEntropy->fse.offcodeCTable,\n                sizeof(prevEntropy->fse.offcodeCTable),\n                entropyWorkspace, entropyWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (Offtype == set_compressed)\n                lastNCount = op;\n            op += countSize;\n            assert(op <= oend);\n    }   }\n    /* build CTable for MatchLengths */\n    {   unsigned max = MaxML;\n        size_t const mostFrequent = HIST_countFast_wksp(\n            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */\n        DEBUGLOG(5, \"Building ML table (remaining space : %i)\", (int)(oend-op));\n        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;\n        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,\n                                        count, max, mostFrequent, nbSeq,\n                                        MLFSELog, prevEntropy->fse.matchlengthCTable,\n                                        ML_defaultNorm, ML_defaultNormLog,\n                                        ZSTD_defaultAllowed, strategy);\n        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(\n                op, (size_t)(oend - op),\n                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,\n                count, max, mlCodeTable, nbSeq,\n                ML_defaultNorm, ML_defaultNormLog, MaxML,\n                prevEntropy->fse.matchlengthCTable,\n                sizeof(prevEntropy->fse.matchlengthCTable),\n                entropyWorkspace, entropyWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (MLtype == set_compressed)\n                lastNCount = op;\n            op += countSize;\n            assert(op <= oend);\n    }   }\n\n    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));\n\n    {   size_t const bitstreamSize = ZSTD_encodeSequences(\n                                        op, (size_t)(oend - op),\n                                        CTable_MatchLength, mlCodeTable,\n                                        CTable_OffsetBits, ofCodeTable,\n                                        CTable_LitLength, llCodeTable,\n                                        sequences, nbSeq,\n                                        longOffsets, bmi2);\n        FORWARD_IF_ERROR(bitstreamSize);\n        op += bitstreamSize;\n        assert(op <= oend);\n        /* zstd versions <= 1.3.4 mistakenly report corruption when\n         * FSE_readNCount() receives a buffer < 4 bytes.\n         * Fixed by https://github.com/facebook/zstd/pull/1146.\n         * This can happen when the last set_compressed table present is 2\n         * bytes and the bitstream is only one byte.\n         * In this exceedingly rare case, we will simply emit an uncompressed\n         * block, since it isn't worth optimizing.\n         */\n        if (lastNCount && (op - lastNCount) < 4) {\n            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */\n            assert(op - lastNCount == 3);\n            DEBUGLOG(5, \"Avoiding bug in zstd decoder in versions <= 1.3.4 by \"\n                        \"emitting an uncompressed block.\");\n            return 0;\n        }\n    }\n\n    DEBUGLOG(5, \"compressed block size : %u\", (unsigned)(op - ostart));\n    return (size_t)(op - ostart);\n}\n\nMEM_STATIC size_t\nZSTD_compressSequences(seqStore_t* seqStorePtr,\n                       const ZSTD_entropyCTables_t* prevEntropy,\n                             ZSTD_entropyCTables_t* nextEntropy,\n                       const ZSTD_CCtx_params* cctxParams,\n                             void* dst, size_t dstCapacity,\n                             size_t srcSize,\n                             void* entropyWorkspace, size_t entropyWkspSize,\n                             int bmi2)\n{\n    size_t const cSize = ZSTD_compressSequences_internal(\n                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,\n                            dst, dstCapacity,\n                            entropyWorkspace, entropyWkspSize, bmi2);\n    if (cSize == 0) return 0;\n    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.\n     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.\n     */\n    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))\n        return 0;  /* block not compressed */\n    FORWARD_IF_ERROR(cSize);\n\n    /* Check compressibility */\n    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);\n        if (cSize >= maxCSize) return 0;  /* block not compressed */\n    }\n\n    return cSize;\n}\n\n/* ZSTD_selectBlockCompressor() :\n * Not static, but internal use only (used by long distance matcher)\n * assumption : strat is a valid strategy */\nZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)\n{\n    static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {\n        { ZSTD_compressBlock_fast  /* default for 0 */,\n          ZSTD_compressBlock_fast,\n          ZSTD_compressBlock_doubleFast,\n          ZSTD_compressBlock_greedy,\n          ZSTD_compressBlock_lazy,\n          ZSTD_compressBlock_lazy2,\n          ZSTD_compressBlock_btlazy2,\n          ZSTD_compressBlock_btopt,\n          ZSTD_compressBlock_btultra,\n          ZSTD_compressBlock_btultra2 },\n        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,\n          ZSTD_compressBlock_fast_extDict,\n          ZSTD_compressBlock_doubleFast_extDict,\n          ZSTD_compressBlock_greedy_extDict,\n          ZSTD_compressBlock_lazy_extDict,\n          ZSTD_compressBlock_lazy2_extDict,\n          ZSTD_compressBlock_btlazy2_extDict,\n          ZSTD_compressBlock_btopt_extDict,\n          ZSTD_compressBlock_btultra_extDict,\n          ZSTD_compressBlock_btultra_extDict },\n        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,\n          ZSTD_compressBlock_fast_dictMatchState,\n          ZSTD_compressBlock_doubleFast_dictMatchState,\n          ZSTD_compressBlock_greedy_dictMatchState,\n          ZSTD_compressBlock_lazy_dictMatchState,\n          ZSTD_compressBlock_lazy2_dictMatchState,\n          ZSTD_compressBlock_btlazy2_dictMatchState,\n          ZSTD_compressBlock_btopt_dictMatchState,\n          ZSTD_compressBlock_btultra_dictMatchState,\n          ZSTD_compressBlock_btultra_dictMatchState }\n    };\n    ZSTD_blockCompressor selectedCompressor;\n    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);\n\n    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));\n    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];\n    assert(selectedCompressor != NULL);\n    return selectedCompressor;\n}\n\nstatic void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,\n                                   const BYTE* anchor, size_t lastLLSize)\n{\n    memcpy(seqStorePtr->lit, anchor, lastLLSize);\n    seqStorePtr->lit += lastLLSize;\n}\n\nvoid ZSTD_resetSeqStore(seqStore_t* ssPtr)\n{\n    ssPtr->lit = ssPtr->litStart;\n    ssPtr->sequences = ssPtr->sequencesStart;\n    ssPtr->longLengthID = 0;\n}\n\ntypedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;\n\nstatic size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)\n{\n    ZSTD_matchState_t* const ms = &zc->blockState.matchState;\n    DEBUGLOG(5, \"ZSTD_buildSeqStore (srcSize=%zu)\", srcSize);\n    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);\n    /* Assert that we have correctly flushed the ctx params into the ms's copy */\n    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);\n    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {\n        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);\n        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */\n    }\n    ZSTD_resetSeqStore(&(zc->seqStore));\n    /* required for optimal parser to read stats from dictionary */\n    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;\n    /* tell the optimal parser how we expect to compress literals */\n    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;\n    /* a gap between an attached dict and the current window is not safe,\n     * they must remain adjacent,\n     * and when that stops being the case, the dict must be unset */\n    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);\n\n    /* limited update after a very long match */\n    {   const BYTE* const base = ms->window.base;\n        const BYTE* const istart = (const BYTE*)src;\n        const U32 current = (U32)(istart-base);\n        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */\n        if (current > ms->nextToUpdate + 384)\n            ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));\n    }\n\n    /* select and store sequences */\n    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);\n        size_t lastLLSize;\n        {   int i;\n            for (i = 0; i < ZSTD_REP_NUM; ++i)\n                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];\n        }\n        if (zc->externSeqStore.pos < zc->externSeqStore.size) {\n            assert(!zc->appliedParams.ldmParams.enableLdm);\n            /* Updates ldmSeqStore.pos */\n            lastLLSize =\n                ZSTD_ldm_blockCompress(&zc->externSeqStore,\n                                       ms, &zc->seqStore,\n                                       zc->blockState.nextCBlock->rep,\n                                       src, srcSize);\n            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);\n        } else if (zc->appliedParams.ldmParams.enableLdm) {\n            rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};\n\n            ldmSeqStore.seq = zc->ldmSequences;\n            ldmSeqStore.capacity = zc->maxNbLdmSequences;\n            /* Updates ldmSeqStore.size */\n            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,\n                                               &zc->appliedParams.ldmParams,\n                                               src, srcSize));\n            /* Updates ldmSeqStore.pos */\n            lastLLSize =\n                ZSTD_ldm_blockCompress(&ldmSeqStore,\n                                       ms, &zc->seqStore,\n                                       zc->blockState.nextCBlock->rep,\n                                       src, srcSize);\n            assert(ldmSeqStore.pos == ldmSeqStore.size);\n        } else {   /* not long range mode */\n            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);\n            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);\n        }\n        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;\n            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);\n    }   }\n    return ZSTDbss_compress;\n}\n\nstatic void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)\n{\n    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);\n    const seqDef* seqs = seqStore->sequencesStart;\n    size_t seqsSize = seqStore->sequences - seqs;\n\n    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];\n    size_t i; size_t position; int repIdx;\n\n    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);\n    for (i = 0, position = 0; i < seqsSize; ++i) {\n        outSeqs[i].offset = seqs[i].offset;\n        outSeqs[i].litLength = seqs[i].litLength;\n        outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH;\n\n        if (i == seqStore->longLengthPos) {\n            if (seqStore->longLengthID == 1) {\n                outSeqs[i].litLength += 0x10000;\n            } else if (seqStore->longLengthID == 2) {\n                outSeqs[i].matchLength += 0x10000;\n            }\n        }\n\n        if (outSeqs[i].offset <= ZSTD_REP_NUM) {\n            outSeqs[i].rep = outSeqs[i].offset;\n            repIdx = (unsigned int)i - outSeqs[i].offset;\n\n            if (outSeqs[i].litLength == 0) {\n                if (outSeqs[i].offset < 3) {\n                    --repIdx;\n                } else {\n                    repIdx = (unsigned int)i - 1;\n                }\n                ++outSeqs[i].rep;\n            }\n            assert(repIdx >= -3);\n            outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1];\n            if (outSeqs[i].rep == 4) {\n                --outSeqs[i].offset;\n            }\n        } else {\n            outSeqs[i].offset -= ZSTD_REP_NUM;\n        }\n\n        position += outSeqs[i].litLength;\n        outSeqs[i].matchPos = (unsigned int)position;\n        position += outSeqs[i].matchLength;\n    }\n    zc->seqCollector.seqIndex += seqsSize;\n}\n\nsize_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,\n    size_t outSeqsSize, const void* src, size_t srcSize)\n{\n    const size_t dstCapacity = ZSTD_compressBound(srcSize);\n    void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem);\n    SeqCollector seqCollector;\n\n    RETURN_ERROR_IF(dst == NULL, memory_allocation);\n\n    seqCollector.collectSequences = 1;\n    seqCollector.seqStart = outSeqs;\n    seqCollector.seqIndex = 0;\n    seqCollector.maxSequences = outSeqsSize;\n    zc->seqCollector = seqCollector;\n\n    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);\n    ZSTD_free(dst, ZSTD_defaultCMem);\n    return zc->seqCollector.seqIndex;\n}\n\n/* Returns true if the given block is a RLE block */\nstatic int ZSTD_isRLE(const BYTE *ip, size_t length) {\n    size_t i;\n    if (length < 2) return 1;\n    for (i = 1; i < length; ++i) {\n        if (ip[0] != ip[i]) return 0;\n    }\n    return 1;\n}\n\nstatic void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)\n{\n    ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;\n    zc->blockState.prevCBlock = zc->blockState.nextCBlock;\n    zc->blockState.nextCBlock = tmp;\n}\n\nstatic size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,\n                                        void* dst, size_t dstCapacity,\n                                        const void* src, size_t srcSize, U32 frame)\n{\n    /* This the upper bound for the length of an rle block.\n     * This isn't the actual upper bound. Finding the real threshold\n     * needs further investigation.\n     */\n    const U32 rleMaxLength = 25;\n    size_t cSize;\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    DEBUGLOG(5, \"ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)\",\n                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,\n                (unsigned)zc->blockState.matchState.nextToUpdate);\n\n    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);\n        FORWARD_IF_ERROR(bss);\n        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }\n    }\n\n    if (zc->seqCollector.collectSequences) {\n        ZSTD_copyBlockSequences(zc);\n        return 0;\n    }\n\n    /* encode sequences and literals */\n    cSize = ZSTD_compressSequences(&zc->seqStore,\n            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,\n            &zc->appliedParams,\n            dst, dstCapacity,\n            srcSize,\n            zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,\n            zc->bmi2);\n\n    if (frame &&\n        /* We don't want to emit our first block as a RLE even if it qualifies because\n         * doing so will cause the decoder (cli only) to throw a \"should consume all input error.\"\n         * This is only an issue for zstd <= v1.4.3\n         */\n        !zc->isFirstBlock &&\n        cSize < rleMaxLength &&\n        ZSTD_isRLE(ip, srcSize))\n    {\n        cSize = 1;\n        op[0] = ip[0];\n    }\n\nout:\n    if (!ZSTD_isError(cSize) && cSize > 1) {\n        ZSTD_confirmRepcodesAndEntropyTables(zc);\n    }\n    /* We check that dictionaries have offset codes available for the first\n     * block. After the first block, the offcode table might not have large\n     * enough codes to represent the offsets in the data.\n     */\n    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)\n        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;\n\n    return cSize;\n}\n\nstatic size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,\n                               void* dst, size_t dstCapacity,\n                               const void* src, size_t srcSize,\n                               const size_t bss, U32 lastBlock)\n{\n    DEBUGLOG(6, \"Attempting ZSTD_compressSuperBlock()\");\n    if (bss == ZSTDbss_compress) {\n        /* Attempt superblock compression.\n         *\n         * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the\n         * standard ZSTD_compressBound(). This is a problem, because even if we have\n         * space now, taking an extra byte now could cause us to run out of space later\n         * and violate ZSTD_compressBound().\n         *\n         * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.\n         *\n         * In order to respect ZSTD_compressBound() we must attempt to emit a raw\n         * uncompressed block in these cases:\n         *   * cSize == 0: Return code for an uncompressed block.\n         *   * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).\n         *     ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of\n         *     output space.\n         *   * cSize >= blockBound(srcSize): We have expanded the block too much so\n         *     emit an uncompressed block.\n         */\n        size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, lastBlock);\n        if (cSize != ERROR(dstSize_tooSmall)) {\n            FORWARD_IF_ERROR(cSize);\n            if (cSize != 0 && cSize < srcSize + ZSTD_blockHeaderSize) {\n                ZSTD_confirmRepcodesAndEntropyTables(zc);\n                return cSize;\n            }\n        }\n    }\n\n    DEBUGLOG(6, \"Resorting to ZSTD_noCompressBlock()\");\n    /* Superblock compression failed, attempt to emit a single no compress block.\n     * The decoder will be able to stream this block since it is uncompressed.\n     */\n    return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);\n}\n\nstatic size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,\n                               void* dst, size_t dstCapacity,\n                               const void* src, size_t srcSize,\n                               U32 lastBlock)\n{\n    size_t cSize = 0;\n    const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);\n    DEBUGLOG(5, \"ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)\",\n                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);\n    FORWARD_IF_ERROR(bss);\n\n    cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);\n    FORWARD_IF_ERROR(cSize);\n\n    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)\n        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;\n\n    return cSize;\n}\n\nstatic void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,\n                                         ZSTD_cwksp* ws,\n                                         ZSTD_CCtx_params const* params,\n                                         void const* ip,\n                                         void const* iend)\n{\n    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {\n        U32 const maxDist = (U32)1 << params->cParams.windowLog;\n        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);\n        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);\n        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);\n        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);\n        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);\n        ZSTD_cwksp_mark_tables_dirty(ws);\n        ZSTD_reduceIndex(ms, params, correction);\n        ZSTD_cwksp_mark_tables_clean(ws);\n        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;\n        else ms->nextToUpdate -= correction;\n        /* invalidate dictionaries on overflow correction */\n        ms->loadedDictEnd = 0;\n        ms->dictMatchState = NULL;\n    }\n}\n\n/*! ZSTD_compress_frameChunk() :\n*   Compress a chunk of data into one or multiple blocks.\n*   All blocks will be terminated, all input will be consumed.\n*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.\n*   Frame is supposed already started (header already produced)\n*   @return : compressed size, or an error code\n*/\nstatic size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,\n                                     void* dst, size_t dstCapacity,\n                               const void* src, size_t srcSize,\n                                     U32 lastFrameChunk)\n{\n    size_t blockSize = cctx->blockSize;\n    size_t remaining = srcSize;\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* op = ostart;\n    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;\n\n    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);\n\n    DEBUGLOG(5, \"ZSTD_compress_frameChunk (blockSize=%u)\", (unsigned)blockSize);\n    if (cctx->appliedParams.fParams.checksumFlag && srcSize)\n        XXH64_update(&cctx->xxhState, src, srcSize);\n\n    while (remaining) {\n        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;\n        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);\n\n        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,\n                        dstSize_tooSmall,\n                        \"not enough space to store compressed block\");\n        if (remaining < blockSize) blockSize = remaining;\n\n        ZSTD_overflowCorrectIfNeeded(\n            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);\n        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);\n\n        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */\n        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;\n\n        {   size_t cSize;\n            if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {\n                cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);\n                FORWARD_IF_ERROR(cSize);\n                assert(cSize > 0);\n                assert(cSize <= blockSize + ZSTD_blockHeaderSize);\n            } else {\n                cSize = ZSTD_compressBlock_internal(cctx,\n                                        op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,\n                                        ip, blockSize, 1 /* frame */);\n                FORWARD_IF_ERROR(cSize);\n\n                if (cSize == 0) {  /* block is not compressible */\n                    cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);\n                    FORWARD_IF_ERROR(cSize);\n                } else {\n                    U32 const cBlockHeader = cSize == 1 ?\n                        lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :\n                        lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);\n                    MEM_writeLE24(op, cBlockHeader);\n                    cSize += ZSTD_blockHeaderSize;\n                }\n            }\n\n\n            ip += blockSize;\n            assert(remaining >= blockSize);\n            remaining -= blockSize;\n            op += cSize;\n            assert(dstCapacity >= cSize);\n            dstCapacity -= cSize;\n            cctx->isFirstBlock = 0;\n            DEBUGLOG(5, \"ZSTD_compress_frameChunk: adding a block of size %u\",\n                        (unsigned)cSize);\n    }   }\n\n    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;\n    return (size_t)(op-ostart);\n}\n\n\nstatic size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,\n                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)\n{   BYTE* const op = (BYTE*)dst;\n    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */\n    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */\n    U32   const checksumFlag = params->fParams.checksumFlag>0;\n    U32   const windowSize = (U32)1 << params->cParams.windowLog;\n    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);\n    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);\n    U32   const fcsCode = params->fParams.contentSizeFlag ?\n                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */\n    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );\n    size_t pos=0;\n\n    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));\n    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);\n    DEBUGLOG(4, \"ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u\",\n                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);\n\n    if (params->format == ZSTD_f_zstd1) {\n        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);\n        pos = 4;\n    }\n    op[pos++] = frameHeaderDescriptionByte;\n    if (!singleSegment) op[pos++] = windowLogByte;\n    switch(dictIDSizeCode)\n    {\n        default:  assert(0); /* impossible */\n        case 0 : break;\n        case 1 : op[pos] = (BYTE)(dictID); pos++; break;\n        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;\n        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;\n    }\n    switch(fcsCode)\n    {\n        default:  assert(0); /* impossible */\n        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;\n        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;\n        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;\n        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;\n    }\n    return pos;\n}\n\n/* ZSTD_writeLastEmptyBlock() :\n * output an empty Block with end-of-frame mark to complete a frame\n * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))\n *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)\n */\nsize_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)\n{\n    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);\n    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */\n        MEM_writeLE24(dst, cBlockHeader24);\n        return ZSTD_blockHeaderSize;\n    }\n}\n\nsize_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)\n{\n    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);\n    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,\n                    parameter_unsupported);\n    cctx->externSeqStore.seq = seq;\n    cctx->externSeqStore.size = nbSeq;\n    cctx->externSeqStore.capacity = nbSeq;\n    cctx->externSeqStore.pos = 0;\n    return 0;\n}\n\n\nstatic size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,\n                              void* dst, size_t dstCapacity,\n                        const void* src, size_t srcSize,\n                               U32 frame, U32 lastFrameChunk)\n{\n    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;\n    size_t fhSize = 0;\n\n    DEBUGLOG(5, \"ZSTD_compressContinue_internal, stage: %u, srcSize: %u\",\n                cctx->stage, (unsigned)srcSize);\n    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,\n                    \"missing init (ZSTD_compressBegin)\");\n\n    if (frame && (cctx->stage==ZSTDcs_init)) {\n        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,\n                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);\n        FORWARD_IF_ERROR(fhSize);\n        assert(fhSize <= dstCapacity);\n        dstCapacity -= fhSize;\n        dst = (char*)dst + fhSize;\n        cctx->stage = ZSTDcs_ongoing;\n    }\n\n    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */\n\n    if (!ZSTD_window_update(&ms->window, src, srcSize)) {\n        ms->nextToUpdate = ms->window.dictLimit;\n    }\n    if (cctx->appliedParams.ldmParams.enableLdm) {\n        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);\n    }\n\n    if (!frame) {\n        /* overflow check and correction for block mode */\n        ZSTD_overflowCorrectIfNeeded(\n            ms, &cctx->workspace, &cctx->appliedParams,\n            src, (BYTE const*)src + srcSize);\n    }\n\n    DEBUGLOG(5, \"ZSTD_compressContinue_internal (blockSize=%u)\", (unsigned)cctx->blockSize);\n    {   size_t const cSize = frame ?\n                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :\n                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);\n        FORWARD_IF_ERROR(cSize);\n        cctx->consumedSrcSize += srcSize;\n        cctx->producedCSize += (cSize + fhSize);\n        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));\n        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */\n            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);\n            RETURN_ERROR_IF(\n                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,\n                srcSize_wrong,\n                \"error : pledgedSrcSize = %u, while realSrcSize >= %u\",\n                (unsigned)cctx->pledgedSrcSizePlusOne-1,\n                (unsigned)cctx->consumedSrcSize);\n        }\n        return cSize + fhSize;\n    }\n}\n\nsize_t ZSTD_compressContinue (ZSTD_CCtx* cctx,\n                              void* dst, size_t dstCapacity,\n                        const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_compressContinue (srcSize=%u)\", (unsigned)srcSize);\n    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);\n}\n\n\nsize_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)\n{\n    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;\n    assert(!ZSTD_checkCParams(cParams));\n    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);\n}\n\nsize_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_compressBlock: srcSize = %u\", (unsigned)srcSize);\n    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);\n      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong); }\n\n    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);\n}\n\n/*! ZSTD_loadDictionaryContent() :\n *  @return : 0, or an error code\n */\nstatic size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,\n                                         ZSTD_cwksp* ws,\n                                         ZSTD_CCtx_params const* params,\n                                         const void* src, size_t srcSize,\n                                         ZSTD_dictTableLoadMethod_e dtlm)\n{\n    const BYTE* ip = (const BYTE*) src;\n    const BYTE* const iend = ip + srcSize;\n\n    ZSTD_window_update(&ms->window, src, srcSize);\n    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);\n\n    /* Assert that we the ms params match the params we're being given */\n    ZSTD_assertEqualCParams(params->cParams, ms->cParams);\n\n    if (srcSize <= HASH_READ_SIZE) return 0;\n\n    while (iend - ip > HASH_READ_SIZE) {\n        size_t const remaining = (size_t)(iend - ip);\n        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);\n        const BYTE* const ichunk = ip + chunk;\n\n        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);\n\n        switch(params->cParams.strategy)\n        {\n        case ZSTD_fast:\n            ZSTD_fillHashTable(ms, ichunk, dtlm);\n            break;\n        case ZSTD_dfast:\n            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);\n            break;\n\n        case ZSTD_greedy:\n        case ZSTD_lazy:\n        case ZSTD_lazy2:\n            if (chunk >= HASH_READ_SIZE)\n                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);\n            break;\n\n        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */\n        case ZSTD_btopt:\n        case ZSTD_btultra:\n        case ZSTD_btultra2:\n            if (chunk >= HASH_READ_SIZE)\n                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);\n            break;\n\n        default:\n            assert(0);  /* not possible : not a valid strategy id */\n        }\n\n        ip = ichunk;\n    }\n\n    ms->nextToUpdate = (U32)(iend - ms->window.base);\n    return 0;\n}\n\n\n/* Dictionaries that assign zero probability to symbols that show up causes problems\n   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols\n   that we may encounter during compression.\n   NOTE: This behavior is not standard and could be improved in the future. */\nstatic size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {\n    U32 s;\n    RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);\n    for (s = 0; s <= maxSymbolValue; ++s) {\n        RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);\n    }\n    return 0;\n}\n\nsize_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,\n                         short* offcodeNCount, unsigned* offcodeMaxValue,\n                         const void* const dict, size_t dictSize)\n{\n    const BYTE* dictPtr = (const BYTE*)dict;    /* skip magic num and dict ID */\n    const BYTE* const dictEnd = dictPtr + dictSize;\n    dictPtr += 8;\n    bs->entropy.huf.repeatMode = HUF_repeat_check;\n\n    {   unsigned maxSymbolValue = 255;\n        unsigned hasZeroWeights = 1;\n        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,\n            dictEnd-dictPtr, &hasZeroWeights);\n\n        /* We only set the loaded table as valid if it contains all non-zero\n         * weights. Otherwise, we set it to check */\n        if (!hasZeroWeights)\n            bs->entropy.huf.repeatMode = HUF_repeat_valid;\n\n        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);\n        dictPtr += hufHeaderSize;\n    }\n\n    {   unsigned offcodeLog;\n        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);\n        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */\n        /* fill all offset symbols to avoid garbage at end of table */\n        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(\n                bs->entropy.fse.offcodeCTable,\n                offcodeNCount, MaxOff, offcodeLog,\n                workspace, HUF_WORKSPACE_SIZE)),\n            dictionary_corrupted);\n        dictPtr += offcodeHeaderSize;\n    }\n\n    {   short matchlengthNCount[MaxML+1];\n        unsigned matchlengthMaxValue = MaxML, matchlengthLog;\n        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);\n        /* Every match length code must have non-zero probability */\n        FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));\n        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(\n                bs->entropy.fse.matchlengthCTable,\n                matchlengthNCount, matchlengthMaxValue, matchlengthLog,\n                workspace, HUF_WORKSPACE_SIZE)),\n            dictionary_corrupted);\n        dictPtr += matchlengthHeaderSize;\n    }\n\n    {   short litlengthNCount[MaxLL+1];\n        unsigned litlengthMaxValue = MaxLL, litlengthLog;\n        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);\n        /* Every literal length code must have non-zero probability */\n        FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));\n        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(\n                bs->entropy.fse.litlengthCTable,\n                litlengthNCount, litlengthMaxValue, litlengthLog,\n                workspace, HUF_WORKSPACE_SIZE)),\n            dictionary_corrupted);\n        dictPtr += litlengthHeaderSize;\n    }\n\n    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);\n    bs->rep[0] = MEM_readLE32(dictPtr+0);\n    bs->rep[1] = MEM_readLE32(dictPtr+4);\n    bs->rep[2] = MEM_readLE32(dictPtr+8);\n    dictPtr += 12;\n\n    return dictPtr - (const BYTE*)dict;\n}\n\n/* Dictionary format :\n * See :\n * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format\n */\n/*! ZSTD_loadZstdDictionary() :\n * @return : dictID, or an error code\n *  assumptions : magic number supposed already checked\n *                dictSize supposed >= 8\n */\nstatic size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,\n                                      ZSTD_matchState_t* ms,\n                                      ZSTD_cwksp* ws,\n                                      ZSTD_CCtx_params const* params,\n                                      const void* dict, size_t dictSize,\n                                      ZSTD_dictTableLoadMethod_e dtlm,\n                                      void* workspace)\n{\n    const BYTE* dictPtr = (const BYTE*)dict;\n    const BYTE* const dictEnd = dictPtr + dictSize;\n    short offcodeNCount[MaxOff+1];\n    unsigned offcodeMaxValue = MaxOff;\n    size_t dictID;\n    size_t eSize;\n\n    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));\n    assert(dictSize >= 8);\n    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);\n\n    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr + 4 /* skip magic number */ );\n    eSize = ZSTD_loadCEntropy(bs, workspace, offcodeNCount, &offcodeMaxValue, dict, dictSize);\n    FORWARD_IF_ERROR(eSize);\n    dictPtr += eSize;\n\n    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);\n        U32 offcodeMax = MaxOff;\n        if (dictContentSize <= ((U32)-1) - 128 KB) {\n            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */\n            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */\n        }\n        /* All offset values <= dictContentSize + 128 KB must be representable */\n        FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));\n        /* All repCodes must be <= dictContentSize and != 0*/\n        {   U32 u;\n            for (u=0; u<3; u++) {\n                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);\n                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);\n        }   }\n\n        bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;\n        bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;\n        bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;\n        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(\n            ms, ws, params, dictPtr, dictContentSize, dtlm));\n        return dictID;\n    }\n}\n\n/** ZSTD_compress_insertDictionary() :\n*   @return : dictID, or an error code */\nstatic size_t\nZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,\n                               ZSTD_matchState_t* ms,\n                               ZSTD_cwksp* ws,\n                         const ZSTD_CCtx_params* params,\n                         const void* dict, size_t dictSize,\n                               ZSTD_dictContentType_e dictContentType,\n                               ZSTD_dictTableLoadMethod_e dtlm,\n                               void* workspace)\n{\n    DEBUGLOG(4, \"ZSTD_compress_insertDictionary (dictSize=%u)\", (U32)dictSize);\n    if ((dict==NULL) || (dictSize<8)) {\n        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);\n        return 0;\n    }\n\n    ZSTD_reset_compressedBlockState(bs);\n\n    /* dict restricted modes */\n    if (dictContentType == ZSTD_dct_rawContent)\n        return ZSTD_loadDictionaryContent(ms, ws, params, dict, dictSize, dtlm);\n\n    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {\n        if (dictContentType == ZSTD_dct_auto) {\n            DEBUGLOG(4, \"raw content dictionary detected\");\n            return ZSTD_loadDictionaryContent(\n                ms, ws, params, dict, dictSize, dtlm);\n        }\n        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);\n        assert(0);   /* impossible */\n    }\n\n    /* dict as full zstd dictionary */\n    return ZSTD_loadZstdDictionary(\n        bs, ms, ws, params, dict, dictSize, dtlm, workspace);\n}\n\n#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)\n#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6)\n\n/*! ZSTD_compressBegin_internal() :\n * @return : 0, or an error code */\nstatic size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,\n                                    const void* dict, size_t dictSize,\n                                    ZSTD_dictContentType_e dictContentType,\n                                    ZSTD_dictTableLoadMethod_e dtlm,\n                                    const ZSTD_CDict* cdict,\n                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,\n                                    ZSTD_buffered_policy_e zbuff)\n{\n    DEBUGLOG(4, \"ZSTD_compressBegin_internal: wlog=%u\", params->cParams.windowLog);\n    /* params are supposed to be fully validated at this point */\n    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));\n    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */\n    if ( (cdict)\n      && (cdict->dictContentSize > 0)\n      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF\n        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER\n        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN\n        || cdict->compressionLevel == 0)\n      && (params->attachDictPref != ZSTD_dictForceLoad) ) {\n        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);\n    }\n\n    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,\n                                     ZSTDcrp_makeClean, zbuff) );\n    {   size_t const dictID = cdict ?\n                ZSTD_compress_insertDictionary(\n                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,\n                        &cctx->workspace, &cctx->appliedParams, cdict->dictContent,\n                        cdict->dictContentSize, dictContentType, dtlm,\n                        cctx->entropyWorkspace)\n              : ZSTD_compress_insertDictionary(\n                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,\n                        &cctx->workspace, &cctx->appliedParams, dict, dictSize,\n                        dictContentType, dtlm, cctx->entropyWorkspace);\n        FORWARD_IF_ERROR(dictID);\n        assert(dictID <= UINT_MAX);\n        cctx->dictID = (U32)dictID;\n    }\n    return 0;\n}\n\nsize_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,\n                                    const void* dict, size_t dictSize,\n                                    ZSTD_dictContentType_e dictContentType,\n                                    ZSTD_dictTableLoadMethod_e dtlm,\n                                    const ZSTD_CDict* cdict,\n                                    const ZSTD_CCtx_params* params,\n                                    unsigned long long pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_compressBegin_advanced_internal: wlog=%u\", params->cParams.windowLog);\n    /* compression parameters verification and optimization */\n    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) );\n    return ZSTD_compressBegin_internal(cctx,\n                                       dict, dictSize, dictContentType, dtlm,\n                                       cdict,\n                                       params, pledgedSrcSize,\n                                       ZSTDb_not_buffered);\n}\n\n/*! ZSTD_compressBegin_advanced() :\n*   @return : 0, or an error code */\nsize_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,\n                             const void* dict, size_t dictSize,\n                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)\n{\n    ZSTD_CCtx_params const cctxParams =\n            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);\n    return ZSTD_compressBegin_advanced_internal(cctx,\n                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,\n                                            NULL /*cdict*/,\n                                            &cctxParams, pledgedSrcSize);\n}\n\nsize_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)\n{\n    ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);\n    ZSTD_CCtx_params const cctxParams =\n            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);\n    DEBUGLOG(4, \"ZSTD_compressBegin_usingDict (dictSize=%u)\", (unsigned)dictSize);\n    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,\n                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);\n}\n\nsize_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)\n{\n    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);\n}\n\n\n/*! ZSTD_writeEpilogue() :\n*   Ends a frame.\n*   @return : nb of bytes written into dst (or an error code) */\nstatic size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)\n{\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* op = ostart;\n    size_t fhSize = 0;\n\n    DEBUGLOG(4, \"ZSTD_writeEpilogue\");\n    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, \"init missing\");\n\n    /* special case : empty frame */\n    if (cctx->stage == ZSTDcs_init) {\n        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);\n        FORWARD_IF_ERROR(fhSize);\n        dstCapacity -= fhSize;\n        op += fhSize;\n        cctx->stage = ZSTDcs_ongoing;\n    }\n\n    if (cctx->stage != ZSTDcs_ending) {\n        /* write one last empty block, make it the \"last\" block */\n        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;\n        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);\n        MEM_writeLE32(op, cBlockHeader24);\n        op += ZSTD_blockHeaderSize;\n        dstCapacity -= ZSTD_blockHeaderSize;\n    }\n\n    if (cctx->appliedParams.fParams.checksumFlag) {\n        U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);\n        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);\n        DEBUGLOG(4, \"ZSTD_writeEpilogue: write checksum : %08X\", (unsigned)checksum);\n        MEM_writeLE32(op, checksum);\n        op += 4;\n    }\n\n    cctx->stage = ZSTDcs_created;  /* return to \"created but no init\" status */\n    return op-ostart;\n}\n\nsize_t ZSTD_compressEnd (ZSTD_CCtx* cctx,\n                         void* dst, size_t dstCapacity,\n                   const void* src, size_t srcSize)\n{\n    size_t endResult;\n    size_t const cSize = ZSTD_compressContinue_internal(cctx,\n                                dst, dstCapacity, src, srcSize,\n                                1 /* frame mode */, 1 /* last chunk */);\n    FORWARD_IF_ERROR(cSize);\n    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);\n    FORWARD_IF_ERROR(endResult);\n    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));\n    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */\n        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);\n        DEBUGLOG(4, \"end of frame : controlling src size\");\n        RETURN_ERROR_IF(\n            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,\n            srcSize_wrong,\n             \"error : pledgedSrcSize = %u, while realSrcSize = %u\",\n            (unsigned)cctx->pledgedSrcSizePlusOne-1,\n            (unsigned)cctx->consumedSrcSize);\n    }\n    return cSize + endResult;\n}\n\n\nstatic size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,\n                                      void* dst, size_t dstCapacity,\n                                const void* src, size_t srcSize,\n                                const void* dict,size_t dictSize,\n                                const ZSTD_parameters* params)\n{\n    ZSTD_CCtx_params const cctxParams =\n            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);\n    DEBUGLOG(4, \"ZSTD_compress_internal\");\n    return ZSTD_compress_advanced_internal(cctx,\n                                           dst, dstCapacity,\n                                           src, srcSize,\n                                           dict, dictSize,\n                                           &cctxParams);\n}\n\nsize_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,\n                               void* dst, size_t dstCapacity,\n                         const void* src, size_t srcSize,\n                         const void* dict,size_t dictSize,\n                               ZSTD_parameters params)\n{\n    DEBUGLOG(4, \"ZSTD_compress_advanced\");\n    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));\n    return ZSTD_compress_internal(cctx,\n                                  dst, dstCapacity,\n                                  src, srcSize,\n                                  dict, dictSize,\n                                  &params);\n}\n\n/* Internal */\nsize_t ZSTD_compress_advanced_internal(\n        ZSTD_CCtx* cctx,\n        void* dst, size_t dstCapacity,\n        const void* src, size_t srcSize,\n        const void* dict,size_t dictSize,\n        const ZSTD_CCtx_params* params)\n{\n    DEBUGLOG(4, \"ZSTD_compress_advanced_internal (srcSize:%u)\", (unsigned)srcSize);\n    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,\n                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,\n                         params, srcSize, ZSTDb_not_buffered) );\n    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);\n}\n\nsize_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,\n                               void* dst, size_t dstCapacity,\n                         const void* src, size_t srcSize,\n                         const void* dict, size_t dictSize,\n                               int compressionLevel)\n{\n    ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0);\n    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);\n    assert(params.fParams.contentSizeFlag == 1);\n    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);\n}\n\nsize_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,\n                         void* dst, size_t dstCapacity,\n                   const void* src, size_t srcSize,\n                         int compressionLevel)\n{\n    DEBUGLOG(4, \"ZSTD_compressCCtx (srcSize=%u)\", (unsigned)srcSize);\n    assert(cctx != NULL);\n    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);\n}\n\nsize_t ZSTD_compress(void* dst, size_t dstCapacity,\n               const void* src, size_t srcSize,\n                     int compressionLevel)\n{\n    size_t result;\n    ZSTD_CCtx ctxBody;\n    ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);\n    result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);\n    ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */\n    return result;\n}\n\n\n/* =====  Dictionary API  ===== */\n\n/*! ZSTD_estimateCDictSize_advanced() :\n *  Estimate amount of memory that will be needed to create a dictionary with following arguments */\nsize_t ZSTD_estimateCDictSize_advanced(\n        size_t dictSize, ZSTD_compressionParameters cParams,\n        ZSTD_dictLoadMethod_e dictLoadMethod)\n{\n    DEBUGLOG(5, \"sizeof(ZSTD_CDict) : %u\", (unsigned)sizeof(ZSTD_CDict));\n    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))\n         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)\n         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)\n         + (dictLoadMethod == ZSTD_dlm_byRef ? 0\n            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));\n}\n\nsize_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)\n{\n    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);\n    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);\n}\n\nsize_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)\n{\n    if (cdict==NULL) return 0;   /* support sizeof on NULL */\n    DEBUGLOG(5, \"sizeof(*cdict) : %u\", (unsigned)sizeof(*cdict));\n    /* cdict may be in the workspace */\n    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))\n        + ZSTD_cwksp_sizeof(&cdict->workspace);\n}\n\nstatic size_t ZSTD_initCDict_internal(\n                    ZSTD_CDict* cdict,\n              const void* dictBuffer, size_t dictSize,\n                    ZSTD_dictLoadMethod_e dictLoadMethod,\n                    ZSTD_dictContentType_e dictContentType,\n                    ZSTD_compressionParameters cParams)\n{\n    DEBUGLOG(3, \"ZSTD_initCDict_internal (dictContentType:%u)\", (unsigned)dictContentType);\n    assert(!ZSTD_checkCParams(cParams));\n    cdict->matchState.cParams = cParams;\n    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {\n        cdict->dictContent = dictBuffer;\n    } else {\n         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));\n        RETURN_ERROR_IF(!internalBuffer, memory_allocation);\n        cdict->dictContent = internalBuffer;\n        memcpy(internalBuffer, dictBuffer, dictSize);\n    }\n    cdict->dictContentSize = dictSize;\n\n    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);\n\n\n    /* Reset the state to no dictionary */\n    ZSTD_reset_compressedBlockState(&cdict->cBlockState);\n    FORWARD_IF_ERROR(ZSTD_reset_matchState(\n        &cdict->matchState,\n        &cdict->workspace,\n        &cParams,\n        ZSTDcrp_makeClean,\n        ZSTDirp_reset,\n        ZSTD_resetTarget_CDict));\n    /* (Maybe) load the dictionary\n     * Skips loading the dictionary if it is < 8 bytes.\n     */\n    {   ZSTD_CCtx_params params;\n        memset(&params, 0, sizeof(params));\n        params.compressionLevel = ZSTD_CLEVEL_DEFAULT;\n        params.fParams.contentSizeFlag = 1;\n        params.cParams = cParams;\n        {   size_t const dictID = ZSTD_compress_insertDictionary(\n                    &cdict->cBlockState, &cdict->matchState, &cdict->workspace,\n                    &params, cdict->dictContent, cdict->dictContentSize,\n                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);\n            FORWARD_IF_ERROR(dictID);\n            assert(dictID <= (size_t)(U32)-1);\n            cdict->dictID = (U32)dictID;\n        }\n    }\n\n    return 0;\n}\n\nZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,\n                                      ZSTD_dictLoadMethod_e dictLoadMethod,\n                                      ZSTD_dictContentType_e dictContentType,\n                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)\n{\n    DEBUGLOG(3, \"ZSTD_createCDict_advanced, mode %u\", (unsigned)dictContentType);\n    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;\n\n    {   size_t const workspaceSize =\n            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +\n            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +\n            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +\n            (dictLoadMethod == ZSTD_dlm_byRef ? 0\n             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));\n        void* const workspace = ZSTD_malloc(workspaceSize, customMem);\n        ZSTD_cwksp ws;\n        ZSTD_CDict* cdict;\n\n        if (!workspace) {\n            ZSTD_free(workspace, customMem);\n            return NULL;\n        }\n\n        ZSTD_cwksp_init(&ws, workspace, workspaceSize);\n\n        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));\n        assert(cdict != NULL);\n        ZSTD_cwksp_move(&cdict->workspace, &ws);\n        cdict->customMem = customMem;\n        cdict->compressionLevel = 0; /* signals advanced API usage */\n\n        if (ZSTD_isError( ZSTD_initCDict_internal(cdict,\n                                        dictBuffer, dictSize,\n                                        dictLoadMethod, dictContentType,\n                                        cParams) )) {\n            ZSTD_freeCDict(cdict);\n            return NULL;\n        }\n\n        return cdict;\n    }\n}\n\nZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)\n{\n    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);\n    ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize,\n                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,\n                                                  cParams, ZSTD_defaultCMem);\n    if (cdict)\n        cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel;\n    return cdict;\n}\n\nZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)\n{\n    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);\n    return ZSTD_createCDict_advanced(dict, dictSize,\n                                     ZSTD_dlm_byRef, ZSTD_dct_auto,\n                                     cParams, ZSTD_defaultCMem);\n}\n\nsize_t ZSTD_freeCDict(ZSTD_CDict* cdict)\n{\n    if (cdict==NULL) return 0;   /* support free on NULL */\n    {   ZSTD_customMem const cMem = cdict->customMem;\n        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);\n        ZSTD_cwksp_free(&cdict->workspace, cMem);\n        if (!cdictInWorkspace) {\n            ZSTD_free(cdict, cMem);\n        }\n        return 0;\n    }\n}\n\n/*! ZSTD_initStaticCDict_advanced() :\n *  Generate a digested dictionary in provided memory area.\n *  workspace: The memory area to emplace the dictionary into.\n *             Provided pointer must 8-bytes aligned.\n *             It must outlive dictionary usage.\n *  workspaceSize: Use ZSTD_estimateCDictSize()\n *                 to determine how large workspace must be.\n *  cParams : use ZSTD_getCParams() to transform a compression level\n *            into its relevants cParams.\n * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)\n *  Note : there is no corresponding \"free\" function.\n *         Since workspace was allocated externally, it must be freed externally.\n */\nconst ZSTD_CDict* ZSTD_initStaticCDict(\n                                 void* workspace, size_t workspaceSize,\n                           const void* dict, size_t dictSize,\n                                 ZSTD_dictLoadMethod_e dictLoadMethod,\n                                 ZSTD_dictContentType_e dictContentType,\n                                 ZSTD_compressionParameters cParams)\n{\n    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);\n    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))\n                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0\n                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))\n                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)\n                            + matchStateSize;\n    ZSTD_CDict* cdict;\n\n    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */\n\n    {\n        ZSTD_cwksp ws;\n        ZSTD_cwksp_init(&ws, workspace, workspaceSize);\n        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));\n        if (cdict == NULL) return NULL;\n        ZSTD_cwksp_move(&cdict->workspace, &ws);\n    }\n\n    DEBUGLOG(4, \"(workspaceSize < neededSize) : (%u < %u) => %u\",\n        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));\n    if (workspaceSize < neededSize) return NULL;\n\n    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,\n                                              dict, dictSize,\n                                              dictLoadMethod, dictContentType,\n                                              cParams) ))\n        return NULL;\n\n    return cdict;\n}\n\nZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)\n{\n    assert(cdict != NULL);\n    return cdict->matchState.cParams;\n}\n\n/* ZSTD_compressBegin_usingCDict_advanced() :\n * cdict must be != NULL */\nsize_t ZSTD_compressBegin_usingCDict_advanced(\n    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,\n    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_compressBegin_usingCDict_advanced\");\n    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);\n    {   ZSTD_CCtx_params params = cctx->requestedParams;\n        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF\n                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER\n                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN\n                        || cdict->compressionLevel == 0 )\n                      && (params.attachDictPref != ZSTD_dictForceLoad) ?\n                ZSTD_getCParamsFromCDict(cdict)\n              : ZSTD_getCParams(cdict->compressionLevel,\n                                pledgedSrcSize,\n                                cdict->dictContentSize);\n        /* Increase window log to fit the entire dictionary and source if the\n         * source size is known. Limit the increase to 19, which is the\n         * window log for compression level 1 with the largest source size.\n         */\n        if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {\n            U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);\n            U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;\n            params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);\n        }\n        params.fParams = fParams;\n        return ZSTD_compressBegin_internal(cctx,\n                                           NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,\n                                           cdict,\n                                           &params, pledgedSrcSize,\n                                           ZSTDb_not_buffered);\n    }\n}\n\n/* ZSTD_compressBegin_usingCDict() :\n * pledgedSrcSize=0 means \"unknown\"\n * if pledgedSrcSize>0, it will enable contentSizeFlag */\nsize_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)\n{\n    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };\n    DEBUGLOG(4, \"ZSTD_compressBegin_usingCDict : dictIDFlag == %u\", !fParams.noDictIDFlag);\n    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);\n}\n\nsize_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,\n                                void* dst, size_t dstCapacity,\n                                const void* src, size_t srcSize,\n                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)\n{\n    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */\n    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);\n}\n\n/*! ZSTD_compress_usingCDict() :\n *  Compression using a digested Dictionary.\n *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.\n *  Note that compression parameters are decided at CDict creation time\n *  while frame parameters are hardcoded */\nsize_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,\n                                void* dst, size_t dstCapacity,\n                                const void* src, size_t srcSize,\n                                const ZSTD_CDict* cdict)\n{\n    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };\n    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);\n}\n\n\n\n/* ******************************************************************\n*  Streaming\n********************************************************************/\n\nZSTD_CStream* ZSTD_createCStream(void)\n{\n    DEBUGLOG(3, \"ZSTD_createCStream\");\n    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);\n}\n\nZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)\n{\n    return ZSTD_initStaticCCtx(workspace, workspaceSize);\n}\n\nZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)\n{   /* CStream and CCtx are now same object */\n    return ZSTD_createCCtx_advanced(customMem);\n}\n\nsize_t ZSTD_freeCStream(ZSTD_CStream* zcs)\n{\n    return ZSTD_freeCCtx(zcs);   /* same object */\n}\n\n\n\n/*======   Initialization   ======*/\n\nsize_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }\n\nsize_t ZSTD_CStreamOutSize(void)\n{\n    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;\n}\n\nstatic size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,\n                    const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,\n                    const ZSTD_CDict* const cdict,\n                    ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_resetCStream_internal\");\n    /* Finalize the compression parameters */\n    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);\n    /* params are supposed to be fully validated at this point */\n    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));\n    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */\n\n    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,\n                                         dict, dictSize, dictContentType, ZSTD_dtlm_fast,\n                                         cdict,\n                                         &params, pledgedSrcSize,\n                                         ZSTDb_buffered) );\n\n    cctx->inToCompress = 0;\n    cctx->inBuffPos = 0;\n    cctx->inBuffTarget = cctx->blockSize\n                      + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */\n    cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;\n    cctx->streamStage = zcss_load;\n    cctx->frameEnded = 0;\n    return 0;   /* ready to go */\n}\n\n/* ZSTD_resetCStream():\n * pledgedSrcSize == 0 means \"unknown\" */\nsize_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)\n{\n    /* temporary : 0 interpreted as \"unknown\" during transition period.\n     * Users willing to specify \"unknown\" **must** use ZSTD_CONTENTSIZE_UNKNOWN.\n     * 0 will be interpreted as \"empty\" in the future.\n     */\n    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;\n    DEBUGLOG(4, \"ZSTD_resetCStream: pledgedSrcSize = %u\", (unsigned)pledgedSrcSize);\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );\n    return 0;\n}\n\n/*! ZSTD_initCStream_internal() :\n *  Note : for lib/compress only. Used by zstdmt_compress.c.\n *  Assumption 1 : params are valid\n *  Assumption 2 : either dict, or cdict, is defined, not both */\nsize_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,\n                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,\n                    const ZSTD_CCtx_params* params,\n                    unsigned long long pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_initCStream_internal\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );\n    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));\n    zcs->requestedParams = *params;\n    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */\n    if (dict) {\n        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );\n    } else {\n        /* Dictionary is cleared if !cdict */\n        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );\n    }\n    return 0;\n}\n\n/* ZSTD_initCStream_usingCDict_advanced() :\n * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */\nsize_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,\n                                            const ZSTD_CDict* cdict,\n                                            ZSTD_frameParameters fParams,\n                                            unsigned long long pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTD_initCStream_usingCDict_advanced\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );\n    zcs->requestedParams.fParams = fParams;\n    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );\n    return 0;\n}\n\n/* note : cdict must outlive compression session */\nsize_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)\n{\n    DEBUGLOG(4, \"ZSTD_initCStream_usingCDict\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );\n    return 0;\n}\n\n\n/* ZSTD_initCStream_advanced() :\n * pledgedSrcSize must be exact.\n * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\n * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */\nsize_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,\n                                 const void* dict, size_t dictSize,\n                                 ZSTD_parameters params, unsigned long long pss)\n{\n    /* for compatibility with older programs relying on this behavior.\n     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.\n     * This line will be removed in the future.\n     */\n    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;\n    DEBUGLOG(4, \"ZSTD_initCStream_advanced\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );\n    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );\n    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, &params);\n    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );\n    return 0;\n}\n\nsize_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)\n{\n    DEBUGLOG(4, \"ZSTD_initCStream_usingDict\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );\n    return 0;\n}\n\nsize_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)\n{\n    /* temporary : 0 interpreted as \"unknown\" during transition period.\n     * Users willing to specify \"unknown\" **must** use ZSTD_CONTENTSIZE_UNKNOWN.\n     * 0 will be interpreted as \"empty\" in the future.\n     */\n    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;\n    DEBUGLOG(4, \"ZSTD_initCStream_srcSize\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );\n    return 0;\n}\n\nsize_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)\n{\n    DEBUGLOG(4, \"ZSTD_initCStream\");\n    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );\n    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );\n    return 0;\n}\n\n/*======   Compression   ======*/\n\nstatic size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)\n{\n    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;\n    if (hintInSize==0) hintInSize = cctx->blockSize;\n    return hintInSize;\n}\n\nstatic size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,\n                       const void* src, size_t srcSize)\n{\n    size_t const length = MIN(dstCapacity, srcSize);\n    if (length) memcpy(dst, src, length);\n    return length;\n}\n\n/** ZSTD_compressStream_generic():\n *  internal function for all *compressStream*() variants\n *  non-static, because can be called from zstdmt_compress.c\n * @return : hint size for next input */\nstatic size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,\n                                          ZSTD_outBuffer* output,\n                                          ZSTD_inBuffer* input,\n                                          ZSTD_EndDirective const flushMode)\n{\n    const char* const istart = (const char*)input->src;\n    const char* const iend = input->size != 0 ? istart + input->size : istart;\n    const char* ip = input->pos != 0 ? istart + input->pos : istart;\n    char* const ostart = (char*)output->dst;\n    char* const oend = output->size != 0 ? ostart + output->size : ostart;\n    char* op = output->pos != 0 ? ostart + output->pos : ostart;\n    U32 someMoreWork = 1;\n\n    /* check expectations */\n    DEBUGLOG(5, \"ZSTD_compressStream_generic, flush=%u\", (unsigned)flushMode);\n    assert(zcs->inBuff != NULL);\n    assert(zcs->inBuffSize > 0);\n    assert(zcs->outBuff !=  NULL);\n    assert(zcs->outBuffSize > 0);\n    assert(output->pos <= output->size);\n    assert(input->pos <= input->size);\n\n    while (someMoreWork) {\n        switch(zcs->streamStage)\n        {\n        case zcss_init:\n            RETURN_ERROR(init_missing, \"call ZSTD_initCStream() first!\");\n\n        case zcss_load:\n            if ( (flushMode == ZSTD_e_end)\n              && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */\n              && (zcs->inBuffPos == 0) ) {\n                /* shortcut to compression pass directly into output buffer */\n                size_t const cSize = ZSTD_compressEnd(zcs,\n                                                op, oend-op, ip, iend-ip);\n                DEBUGLOG(4, \"ZSTD_compressEnd : cSize=%u\", (unsigned)cSize);\n                FORWARD_IF_ERROR(cSize);\n                ip = iend;\n                op += cSize;\n                zcs->frameEnded = 1;\n                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n                someMoreWork = 0; break;\n            }\n            /* complete loading into inBuffer */\n            {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;\n                size_t const loaded = ZSTD_limitCopy(\n                                        zcs->inBuff + zcs->inBuffPos, toLoad,\n                                        ip, iend-ip);\n                zcs->inBuffPos += loaded;\n                if (loaded != 0)\n                    ip += loaded;\n                if ( (flushMode == ZSTD_e_continue)\n                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {\n                    /* not enough input to fill full block : stop here */\n                    someMoreWork = 0; break;\n                }\n                if ( (flushMode == ZSTD_e_flush)\n                  && (zcs->inBuffPos == zcs->inToCompress) ) {\n                    /* empty */\n                    someMoreWork = 0; break;\n                }\n            }\n            /* compress current block (note : this stage cannot be stopped in the middle) */\n            DEBUGLOG(5, \"stream compression stage (flushMode==%u)\", flushMode);\n            {   void* cDst;\n                size_t cSize;\n                size_t const iSize = zcs->inBuffPos - zcs->inToCompress;\n                size_t oSize = oend-op;\n                unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);\n                if (oSize >= ZSTD_compressBound(iSize))\n                    cDst = op;   /* compress into output buffer, to skip flush stage */\n                else\n                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;\n                cSize = lastBlock ?\n                        ZSTD_compressEnd(zcs, cDst, oSize,\n                                    zcs->inBuff + zcs->inToCompress, iSize) :\n                        ZSTD_compressContinue(zcs, cDst, oSize,\n                                    zcs->inBuff + zcs->inToCompress, iSize);\n                FORWARD_IF_ERROR(cSize);\n                zcs->frameEnded = lastBlock;\n                /* prepare next block */\n                zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;\n                if (zcs->inBuffTarget > zcs->inBuffSize)\n                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;\n                DEBUGLOG(5, \"inBuffTarget:%u / inBuffSize:%u\",\n                         (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);\n                if (!lastBlock)\n                    assert(zcs->inBuffTarget <= zcs->inBuffSize);\n                zcs->inToCompress = zcs->inBuffPos;\n                if (cDst == op) {  /* no need to flush */\n                    op += cSize;\n                    if (zcs->frameEnded) {\n                        DEBUGLOG(5, \"Frame completed directly in outBuffer\");\n                        someMoreWork = 0;\n                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n                    }\n                    break;\n                }\n                zcs->outBuffContentSize = cSize;\n                zcs->outBuffFlushedSize = 0;\n                zcs->streamStage = zcss_flush; /* pass-through to flush stage */\n            }\n\t    /* fall-through */\n        case zcss_flush:\n            DEBUGLOG(5, \"flush stage\");\n            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;\n                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),\n                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);\n                DEBUGLOG(5, \"toFlush: %u into %u ==> flushed: %u\",\n                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);\n                if (flushed)\n                    op += flushed;\n                zcs->outBuffFlushedSize += flushed;\n                if (toFlush!=flushed) {\n                    /* flush not fully completed, presumably because dst is too small */\n                    assert(op==oend);\n                    someMoreWork = 0;\n                    break;\n                }\n                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;\n                if (zcs->frameEnded) {\n                    DEBUGLOG(5, \"Frame completed on flush\");\n                    someMoreWork = 0;\n                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n                    break;\n                }\n                zcs->streamStage = zcss_load;\n                break;\n            }\n\n        default: /* impossible */\n            assert(0);\n        }\n    }\n\n    input->pos = ip - istart;\n    output->pos = op - ostart;\n    if (zcs->frameEnded) return 0;\n    return ZSTD_nextInputSizeHint(zcs);\n}\n\nstatic size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)\n{\n#ifdef ZSTD_MULTITHREAD\n    if (cctx->appliedParams.nbWorkers >= 1) {\n        assert(cctx->mtctx != NULL);\n        return ZSTDMT_nextInputSizeHint(cctx->mtctx);\n    }\n#endif\n    return ZSTD_nextInputSizeHint(cctx);\n\n}\n\nsize_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)\n{\n    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );\n    return ZSTD_nextInputSizeHint_MTorST(zcs);\n}\n\n\nsize_t ZSTD_compressStream2( ZSTD_CCtx* cctx,\n                             ZSTD_outBuffer* output,\n                             ZSTD_inBuffer* input,\n                             ZSTD_EndDirective endOp)\n{\n    DEBUGLOG(5, \"ZSTD_compressStream2, endOp=%u \", (unsigned)endOp);\n    /* check conditions */\n    RETURN_ERROR_IF(output->pos > output->size, GENERIC);\n    RETURN_ERROR_IF(input->pos  > input->size, GENERIC);\n    assert(cctx!=NULL);\n\n    /* transparent initialization stage */\n    if (cctx->streamStage == zcss_init) {\n        ZSTD_CCtx_params params = cctx->requestedParams;\n        ZSTD_prefixDict const prefixDict = cctx->prefixDict;\n        FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */\n        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */\n        assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */\n        DEBUGLOG(4, \"ZSTD_compressStream2 : transparent init stage\");\n        if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */\n        params.cParams = ZSTD_getCParamsFromCCtxParams(\n                &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);\n\n\n#ifdef ZSTD_MULTITHREAD\n        if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {\n            params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */\n        }\n        if (params.nbWorkers > 0) {\n            /* mt context creation */\n            if (cctx->mtctx == NULL) {\n                DEBUGLOG(4, \"ZSTD_compressStream2: creating new mtctx for nbWorkers=%u\",\n                            params.nbWorkers);\n                cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem);\n                RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);\n            }\n            /* mt compression */\n            DEBUGLOG(4, \"call ZSTDMT_initCStream_internal as nbWorkers=%u\", params.nbWorkers);\n            FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(\n                        cctx->mtctx,\n                        prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,\n                        cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );\n            cctx->streamStage = zcss_load;\n            cctx->appliedParams.nbWorkers = params.nbWorkers;\n        } else\n#endif\n        {   FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,\n                            prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,\n                            cctx->cdict,\n                            params, cctx->pledgedSrcSizePlusOne-1) );\n            assert(cctx->streamStage == zcss_load);\n            assert(cctx->appliedParams.nbWorkers == 0);\n    }   }\n    /* end of transparent initialization stage */\n\n    /* compression stage */\n#ifdef ZSTD_MULTITHREAD\n    if (cctx->appliedParams.nbWorkers > 0) {\n        int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);\n        size_t flushMin;\n        assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);\n        if (cctx->cParamsChanged) {\n            ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);\n            cctx->cParamsChanged = 0;\n        }\n        do {\n            flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);\n            if ( ZSTD_isError(flushMin)\n              || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */\n                ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);\n            }\n            FORWARD_IF_ERROR(flushMin);\n        } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);\n        DEBUGLOG(5, \"completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic\");\n        /* Either we don't require maximum forward progress, we've finished the\n         * flush, or we are out of output space.\n         */\n        assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);\n        return flushMin;\n    }\n#endif\n    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );\n    DEBUGLOG(5, \"completed ZSTD_compressStream2\");\n    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */\n}\n\nsize_t ZSTD_compressStream2_simpleArgs (\n                            ZSTD_CCtx* cctx,\n                            void* dst, size_t dstCapacity, size_t* dstPos,\n                      const void* src, size_t srcSize, size_t* srcPos,\n                            ZSTD_EndDirective endOp)\n{\n    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };\n    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };\n    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */\n    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);\n    *dstPos = output.pos;\n    *srcPos = input.pos;\n    return cErr;\n}\n\nsize_t ZSTD_compress2(ZSTD_CCtx* cctx,\n                      void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{\n    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);\n    {   size_t oPos = 0;\n        size_t iPos = 0;\n        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,\n                                        dst, dstCapacity, &oPos,\n                                        src, srcSize, &iPos,\n                                        ZSTD_e_end);\n        FORWARD_IF_ERROR(result);\n        if (result != 0) {  /* compression not completed, due to lack of output space */\n            assert(oPos == dstCapacity);\n            RETURN_ERROR(dstSize_tooSmall);\n        }\n        assert(iPos == srcSize);   /* all input is expected consumed */\n        return oPos;\n    }\n}\n\n/*======   Finalize   ======*/\n\n/*! ZSTD_flushStream() :\n * @return : amount of data remaining to flush */\nsize_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)\n{\n    ZSTD_inBuffer input = { NULL, 0, 0 };\n    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);\n}\n\n\nsize_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)\n{\n    ZSTD_inBuffer input = { NULL, 0, 0 };\n    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);\n    FORWARD_IF_ERROR( remainingToFlush );\n    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */\n    /* single thread mode : attempt to calculate remaining to flush more precisely */\n    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;\n        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);\n        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;\n        DEBUGLOG(4, \"ZSTD_endStream : remaining to flush : %u\", (unsigned)toFlush);\n        return toFlush;\n    }\n}\n\n\n/*-=====  Pre-defined compression levels  =====-*/\n\n#define ZSTD_MAX_CLEVEL     22\nint ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }\nint ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }\n\nstatic const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {\n{   /* \"default\" - for any srcSize > 256 KB */\n    /* W,  C,  H,  S,  L, TL, strat */\n    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */\n    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */\n    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */\n    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */\n    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */\n    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */\n    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */\n    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */\n    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */\n    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */\n    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */\n    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */\n    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */\n    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */\n    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */\n    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */\n    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */\n    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */\n    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */\n    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */\n    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */\n    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */\n    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */\n},\n{   /* for srcSize <= 256 KB */\n    /* W,  C,  H,  S,  L,  T, strat */\n    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */\n    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */\n    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */\n    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */\n    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/\n    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/\n    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/\n    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */\n    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */\n    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */\n    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */\n    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/\n    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/\n    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */\n    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/\n    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/\n    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/\n    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/\n    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/\n    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/\n    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/\n    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/\n    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/\n},\n{   /* for srcSize <= 128 KB */\n    /* W,  C,  H,  S,  L,  T, strat */\n    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */\n    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */\n    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */\n    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */\n    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */\n    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */\n    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */\n    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */\n    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */\n    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */\n    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */\n    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */\n    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */\n    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/\n    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/\n    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/\n    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/\n    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/\n    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/\n    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/\n    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/\n    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/\n    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/\n},\n{   /* for srcSize <= 16 KB */\n    /* W,  C,  H,  S,  L,  T, strat */\n    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */\n    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */\n    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */\n    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */\n    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */\n    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/\n    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */\n    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */\n    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/\n    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/\n    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/\n    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/\n    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/\n    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/\n    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/\n    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/\n    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/\n    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/\n    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/\n    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/\n    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/\n    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/\n    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/\n},\n};\n\n/*! ZSTD_getCParams_internal() :\n * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.\n *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.\n *        Use dictSize == 0 for unknown or unused. */\nstatic ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)\n{\n    int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;\n    size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;\n    U64 const rSize = unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;\n    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);\n    int row = compressionLevel;\n    DEBUGLOG(5, \"ZSTD_getCParams_internal (cLevel=%i)\", compressionLevel);\n    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */\n    if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */\n    if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;\n    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];\n        if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */\n        /* refine parameters based on srcSize & dictSize */\n        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);\n    }\n}\n\n/*! ZSTD_getCParams() :\n * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.\n *  Size values are optional, provide 0 if not known or unused */\nZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)\n{\n    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;\n    return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);\n}\n\n/*! ZSTD_getParams() :\n *  same idea as ZSTD_getCParams()\n * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).\n *  Fields of `ZSTD_frameParameters` are set to default values */\nstatic ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {\n    ZSTD_parameters params;\n    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);\n    DEBUGLOG(5, \"ZSTD_getParams (cLevel=%i)\", compressionLevel);\n    memset(&params, 0, sizeof(params));\n    params.cParams = cParams;\n    params.fParams.contentSizeFlag = 1;\n    return params;\n}\n\n/*! ZSTD_getParams() :\n *  same idea as ZSTD_getCParams()\n * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).\n *  Fields of `ZSTD_frameParameters` are set to default values */\nZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {\n    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;\n    return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_internal.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* This header contains definitions\n * that shall **only** be used by modules within lib/compress.\n */\n\n#ifndef ZSTD_COMPRESS_H\n#define ZSTD_COMPRESS_H\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include \"zstd_internal.h\"\n#include \"zstd_cwksp.h\"\n#ifdef ZSTD_MULTITHREAD\n#  include \"zstdmt_compress.h\"\n#endif\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define kSearchStrength      8\n#define HASH_READ_SIZE       8\n#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means \"unsorted\".\n                                       It could be confused for a real successor at index \"1\", if sorted as larger than its predecessor.\n                                       It's not a big deal though : candidate will just be sorted again.\n                                       Additionally, candidate position 1 will be lost.\n                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.\n                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.\n                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */\n\n\n/*-*************************************\n*  Context memory management\n***************************************/\ntypedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;\ntypedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;\n\ntypedef struct ZSTD_prefixDict_s {\n    const void* dict;\n    size_t dictSize;\n    ZSTD_dictContentType_e dictContentType;\n} ZSTD_prefixDict;\n\ntypedef struct {\n    void* dictBuffer;\n    void const* dict;\n    size_t dictSize;\n    ZSTD_dictContentType_e dictContentType;\n    ZSTD_CDict* cdict;\n} ZSTD_localDict;\n\ntypedef struct {\n    U32 CTable[HUF_CTABLE_SIZE_U32(255)];\n    HUF_repeat repeatMode;\n} ZSTD_hufCTables_t;\n\ntypedef struct {\n    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];\n    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];\n    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];\n    FSE_repeat offcode_repeatMode;\n    FSE_repeat matchlength_repeatMode;\n    FSE_repeat litlength_repeatMode;\n} ZSTD_fseCTables_t;\n\ntypedef struct {\n    ZSTD_hufCTables_t huf;\n    ZSTD_fseCTables_t fse;\n} ZSTD_entropyCTables_t;\n\ntypedef struct {\n    U32 off;\n    U32 len;\n} ZSTD_match_t;\n\ntypedef struct {\n    int price;\n    U32 off;\n    U32 mlen;\n    U32 litlen;\n    U32 rep[ZSTD_REP_NUM];\n} ZSTD_optimal_t;\n\ntypedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;\n\ntypedef struct {\n    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */\n    unsigned* litFreq;           /* table of literals statistics, of size 256 */\n    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */\n    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */\n    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */\n    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */\n    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */\n\n    U32  litSum;                 /* nb of literals */\n    U32  litLengthSum;           /* nb of litLength codes */\n    U32  matchLengthSum;         /* nb of matchLength codes */\n    U32  offCodeSum;             /* nb of offset codes */\n    U32  litSumBasePrice;        /* to compare to log2(litfreq) */\n    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */\n    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */\n    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */\n    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */\n    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */\n    ZSTD_literalCompressionMode_e literalCompressionMode;\n} optState_t;\n\ntypedef struct {\n  ZSTD_entropyCTables_t entropy;\n  U32 rep[ZSTD_REP_NUM];\n} ZSTD_compressedBlockState_t;\n\ntypedef struct {\n    BYTE const* nextSrc;    /* next block here to continue on current prefix */\n    BYTE const* base;       /* All regular indexes relative to this position */\n    BYTE const* dictBase;   /* extDict indexes relative to this position */\n    U32 dictLimit;          /* below that point, need extDict */\n    U32 lowLimit;           /* below that point, no more valid data */\n} ZSTD_window_t;\n\ntypedef struct ZSTD_matchState_t ZSTD_matchState_t;\nstruct ZSTD_matchState_t {\n    ZSTD_window_t window;   /* State for window round buffer management */\n    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.\n                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.\n                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.\n                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().\n                             * When dict referential is copied into active context (i.e. not attached),\n                             * loadedDictEnd == dictSize, since referential starts from zero.\n                             */\n    U32 nextToUpdate;       /* index from which to continue table update */\n    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */\n    U32* hashTable;\n    U32* hashTable3;\n    U32* chainTable;\n    optState_t opt;         /* optimal parser state */\n    const ZSTD_matchState_t* dictMatchState;\n    ZSTD_compressionParameters cParams;\n};\n\ntypedef struct {\n    ZSTD_compressedBlockState_t* prevCBlock;\n    ZSTD_compressedBlockState_t* nextCBlock;\n    ZSTD_matchState_t matchState;\n} ZSTD_blockState_t;\n\ntypedef struct {\n    U32 offset;\n    U32 checksum;\n} ldmEntry_t;\n\ntypedef struct {\n    ZSTD_window_t window;   /* State for the window round buffer management */\n    ldmEntry_t* hashTable;\n    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */\n    U64 hashPower;          /* Used to compute the rolling hash.\n                             * Depends on ldmParams.minMatchLength */\n} ldmState_t;\n\ntypedef struct {\n    U32 enableLdm;          /* 1 if enable long distance matching */\n    U32 hashLog;            /* Log size of hashTable */\n    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */\n    U32 minMatchLength;     /* Minimum match length */\n    U32 hashRateLog;       /* Log number of entries to skip */\n    U32 windowLog;          /* Window log for the LDM */\n} ldmParams_t;\n\ntypedef struct {\n    U32 offset;\n    U32 litLength;\n    U32 matchLength;\n} rawSeq;\n\ntypedef struct {\n  rawSeq* seq;     /* The start of the sequences */\n  size_t pos;      /* The position where reading stopped. <= size. */\n  size_t size;     /* The number of sequences. <= capacity. */\n  size_t capacity; /* The capacity starting from `seq` pointer */\n} rawSeqStore_t;\n\ntypedef struct {\n    int collectSequences;\n    ZSTD_Sequence* seqStart;\n    size_t seqIndex;\n    size_t maxSequences;\n} SeqCollector;\n\nstruct ZSTD_CCtx_params_s {\n    ZSTD_format_e format;\n    ZSTD_compressionParameters cParams;\n    ZSTD_frameParameters fParams;\n\n    int compressionLevel;\n    int forceWindow;           /* force back-references to respect limit of\n                                * 1<<wLog, even for dictionary */\n    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.\n                                * No target when targetCBlockSize == 0.\n                                * There is no guarantee on compressed block size */\n    int srcSizeHint;           /* User's best guess of source size.\n                                * Hint is not valid when srcSizeHint == 0.\n                                * There is no guarantee that hint is close to actual source size */\n\n    ZSTD_dictAttachPref_e attachDictPref;\n    ZSTD_literalCompressionMode_e literalCompressionMode;\n\n    /* Multithreading: used to pass parameters to mtctx */\n    int nbWorkers;\n    size_t jobSize;\n    int overlapLog;\n    int rsyncable;\n\n    /* Long distance matching parameters */\n    ldmParams_t ldmParams;\n\n    /* Internal use, for createCCtxParams() and freeCCtxParams() only */\n    ZSTD_customMem customMem;\n};  /* typedef'd to ZSTD_CCtx_params within \"zstd.h\" */\n\nstruct ZSTD_CCtx_s {\n    ZSTD_compressionStage_e stage;\n    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */\n    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */\n    ZSTD_CCtx_params requestedParams;\n    ZSTD_CCtx_params appliedParams;\n    U32   dictID;\n\n    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */\n    size_t blockSize;\n    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */\n    unsigned long long consumedSrcSize;\n    unsigned long long producedCSize;\n    XXH64_state_t xxhState;\n    ZSTD_customMem customMem;\n    size_t staticSize;\n    SeqCollector seqCollector;\n    int isFirstBlock;\n    int initialized;\n\n    seqStore_t seqStore;      /* sequences storage ptrs */\n    ldmState_t ldmState;      /* long distance matching state */\n    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */\n    size_t maxNbLdmSequences;\n    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */\n    ZSTD_blockState_t blockState;\n    U32* entropyWorkspace;  /* entropy workspace of HUF_WORKSPACE_SIZE bytes */\n\n    /* streaming */\n    char*  inBuff;\n    size_t inBuffSize;\n    size_t inToCompress;\n    size_t inBuffPos;\n    size_t inBuffTarget;\n    char*  outBuff;\n    size_t outBuffSize;\n    size_t outBuffContentSize;\n    size_t outBuffFlushedSize;\n    ZSTD_cStreamStage streamStage;\n    U32    frameEnded;\n\n    /* Dictionary */\n    ZSTD_localDict localDict;\n    const ZSTD_CDict* cdict;\n    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */\n\n    /* Multi-threading */\n#ifdef ZSTD_MULTITHREAD\n    ZSTDMT_CCtx* mtctx;\n#endif\n};\n\ntypedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;\n\ntypedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;\n\n\ntypedef size_t (*ZSTD_blockCompressor) (\n        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);\n\n\nMEM_STATIC U32 ZSTD_LLcode(U32 litLength)\n{\n    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,\n                                       8,  9, 10, 11, 12, 13, 14, 15,\n                                      16, 16, 17, 17, 18, 18, 19, 19,\n                                      20, 20, 20, 20, 21, 21, 21, 21,\n                                      22, 22, 22, 22, 22, 22, 22, 22,\n                                      23, 23, 23, 23, 23, 23, 23, 23,\n                                      24, 24, 24, 24, 24, 24, 24, 24,\n                                      24, 24, 24, 24, 24, 24, 24, 24 };\n    static const U32 LL_deltaCode = 19;\n    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];\n}\n\n/* ZSTD_MLcode() :\n * note : mlBase = matchLength - MINMATCH;\n *        because it's the format it's stored in seqStore->sequences */\nMEM_STATIC U32 ZSTD_MLcode(U32 mlBase)\n{\n    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,\n                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,\n                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,\n                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,\n                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,\n                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,\n                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };\n    static const U32 ML_deltaCode = 36;\n    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];\n}\n\n/* ZSTD_cParam_withinBounds:\n * @return 1 if value is within cParam bounds,\n * 0 otherwise */\nMEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)\n{\n    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);\n    if (ZSTD_isError(bounds.error)) return 0;\n    if (value < bounds.lowerBound) return 0;\n    if (value > bounds.upperBound) return 0;\n    return 1;\n}\n\n/* ZSTD_noCompressBlock() :\n * Writes uncompressed block to dst buffer from given src.\n * Returns the size of the block */\nMEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)\n{\n    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);\n    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,\n                    dstSize_tooSmall);\n    MEM_writeLE24(dst, cBlockHeader24);\n    memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);\n    return ZSTD_blockHeaderSize + srcSize;\n}\n\n\n/* ZSTD_minGain() :\n * minimum compression required\n * to generate a compress block or a compressed literals section.\n * note : use same formula for both situations */\nMEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)\n{\n    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;\n    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);\n    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));\n    return (srcSize >> minlog) + 2;\n}\n\n/*! ZSTD_safecopyLiterals() :\n *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.\n *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single\n *  large copies.\n */\nstatic void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {\n    assert(iend > ilimit_w);\n    if (ip <= ilimit_w) {\n        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);\n        op += ilimit_w - ip;\n        ip = ilimit_w;\n    }\n    while (ip < iend) *op++ = *ip++;\n}\n\n/*! ZSTD_storeSeq() :\n *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.\n *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).\n *  `mlBase` : matchLength - MINMATCH\n *  Allowed to overread literals up to litLimit.\n*/\nHINT_INLINE UNUSED_ATTR\nvoid ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)\n{\n    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;\n    BYTE const* const litEnd = literals + litLength;\n#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)\n    static const BYTE* g_start = NULL;\n    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */\n    {   U32 const pos = (U32)((const BYTE*)literals - g_start);\n        DEBUGLOG(6, \"Cpos%7u :%3u literals, match%4u bytes at offCode%7u\",\n               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);\n    }\n#endif\n    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);\n    /* copy Literals */\n    assert(seqStorePtr->maxNbLit <= 128 KB);\n    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);\n    assert(literals + litLength <= litLimit);\n    if (litEnd <= litLimit_w) {\n        /* Common case we can use wildcopy.\n\t * First copy 16 bytes, because literals are likely short.\n\t */\n        assert(WILDCOPY_OVERLENGTH >= 16);\n        ZSTD_copy16(seqStorePtr->lit, literals);\n        if (litLength > 16) {\n            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);\n        }\n    } else {\n        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);\n    }\n    seqStorePtr->lit += litLength;\n\n    /* literal Length */\n    if (litLength>0xFFFF) {\n        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */\n        seqStorePtr->longLengthID = 1;\n        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);\n    }\n    seqStorePtr->sequences[0].litLength = (U16)litLength;\n\n    /* match offset */\n    seqStorePtr->sequences[0].offset = offCode + 1;\n\n    /* match Length */\n    if (mlBase>0xFFFF) {\n        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */\n        seqStorePtr->longLengthID = 2;\n        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);\n    }\n    seqStorePtr->sequences[0].matchLength = (U16)mlBase;\n\n    seqStorePtr->sequences++;\n}\n\n\n/*-*************************************\n*  Match length counter\n***************************************/\nstatic unsigned ZSTD_NbCommonBytes (size_t val)\n{\n    if (MEM_isLittleEndian()) {\n        if (MEM_64bits()) {\n#       if defined(_MSC_VER) && defined(_WIN64)\n            unsigned long r = 0;\n            _BitScanForward64( &r, (U64)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 4)\n            return (__builtin_ctzll((U64)val) >> 3);\n#       else\n            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,\n                                                     0, 3, 1, 3, 1, 4, 2, 7,\n                                                     0, 2, 3, 6, 1, 5, 3, 5,\n                                                     1, 3, 4, 4, 2, 5, 6, 7,\n                                                     7, 0, 1, 2, 3, 3, 4, 6,\n                                                     2, 6, 5, 5, 3, 4, 5, 6,\n                                                     7, 1, 2, 4, 6, 4, 4, 5,\n                                                     7, 2, 6, 5, 7, 6, 7, 7 };\n            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];\n#       endif\n        } else { /* 32 bits */\n#       if defined(_MSC_VER)\n            unsigned long r=0;\n            _BitScanForward( &r, (U32)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_ctz((U32)val) >> 3);\n#       else\n            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,\n                                                     3, 2, 2, 1, 3, 2, 0, 1,\n                                                     3, 3, 1, 2, 2, 2, 2, 0,\n                                                     3, 1, 2, 0, 1, 0, 1, 1 };\n            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];\n#       endif\n        }\n    } else {  /* Big Endian CPU */\n        if (MEM_64bits()) {\n#       if defined(_MSC_VER) && defined(_WIN64)\n            unsigned long r = 0;\n            _BitScanReverse64( &r, val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 4)\n            return (__builtin_clzll(val) >> 3);\n#       else\n            unsigned r;\n            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */\n            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }\n            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n            r += (!val);\n            return r;\n#       endif\n        } else { /* 32 bits */\n#       if defined(_MSC_VER)\n            unsigned long r = 0;\n            _BitScanReverse( &r, (unsigned long)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_clz((U32)val) >> 3);\n#       else\n            unsigned r;\n            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }\n            r += (!val);\n            return r;\n#       endif\n    }   }\n}\n\n\nMEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)\n{\n    const BYTE* const pStart = pIn;\n    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);\n\n    if (pIn < pInLoopLimit) {\n        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);\n          if (diff) return ZSTD_NbCommonBytes(diff); }\n        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);\n        while (pIn < pInLoopLimit) {\n            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);\n            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }\n            pIn += ZSTD_NbCommonBytes(diff);\n            return (size_t)(pIn - pStart);\n    }   }\n    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }\n    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }\n    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;\n    return (size_t)(pIn - pStart);\n}\n\n/** ZSTD_count_2segments() :\n *  can count match length with `ip` & `match` in 2 different segments.\n *  convention : on reaching mEnd, match count continue starting from iStart\n */\nMEM_STATIC size_t\nZSTD_count_2segments(const BYTE* ip, const BYTE* match,\n                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)\n{\n    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);\n    size_t const matchLength = ZSTD_count(ip, match, vEnd);\n    if (match + matchLength != mEnd) return matchLength;\n    DEBUGLOG(7, \"ZSTD_count_2segments: found a 2-parts match (current length==%zu)\", matchLength);\n    DEBUGLOG(7, \"distance from match beginning to end dictionary = %zi\", mEnd - match);\n    DEBUGLOG(7, \"distance from current pos to end buffer = %zi\", iEnd - ip);\n    DEBUGLOG(7, \"next byte : ip==%02X, istart==%02X\", ip[matchLength], *iStart);\n    DEBUGLOG(7, \"final match length = %zu\", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));\n    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);\n}\n\n\n/*-*************************************\n *  Hashes\n ***************************************/\nstatic const U32 prime3bytes = 506832829U;\nstatic U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }\nMEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */\n\nstatic const U32 prime4bytes = 2654435761U;\nstatic U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }\nstatic size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }\n\nstatic const U64 prime5bytes = 889523592379ULL;\nstatic size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }\n\nstatic const U64 prime6bytes = 227718039650203ULL;\nstatic size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }\n\nstatic const U64 prime7bytes = 58295818150454627ULL;\nstatic size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }\n\nstatic const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;\nstatic size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }\n\nMEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)\n{\n    switch(mls)\n    {\n    default:\n    case 4: return ZSTD_hash4Ptr(p, hBits);\n    case 5: return ZSTD_hash5Ptr(p, hBits);\n    case 6: return ZSTD_hash6Ptr(p, hBits);\n    case 7: return ZSTD_hash7Ptr(p, hBits);\n    case 8: return ZSTD_hash8Ptr(p, hBits);\n    }\n}\n\n/** ZSTD_ipow() :\n * Return base^exponent.\n */\nstatic U64 ZSTD_ipow(U64 base, U64 exponent)\n{\n    U64 power = 1;\n    while (exponent) {\n      if (exponent & 1) power *= base;\n      exponent >>= 1;\n      base *= base;\n    }\n    return power;\n}\n\n#define ZSTD_ROLL_HASH_CHAR_OFFSET 10\n\n/** ZSTD_rollingHash_append() :\n * Add the buffer to the hash value.\n */\nstatic U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)\n{\n    BYTE const* istart = (BYTE const*)buf;\n    size_t pos;\n    for (pos = 0; pos < size; ++pos) {\n        hash *= prime8bytes;\n        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;\n    }\n    return hash;\n}\n\n/** ZSTD_rollingHash_compute() :\n * Compute the rolling hash value of the buffer.\n */\nMEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)\n{\n    return ZSTD_rollingHash_append(0, buf, size);\n}\n\n/** ZSTD_rollingHash_primePower() :\n * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash\n * over a window of length bytes.\n */\nMEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)\n{\n    return ZSTD_ipow(prime8bytes, length - 1);\n}\n\n/** ZSTD_rollingHash_rotate() :\n * Rotate the rolling hash by one byte.\n */\nMEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)\n{\n    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;\n    hash *= prime8bytes;\n    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;\n    return hash;\n}\n\n/*-*************************************\n*  Round buffer management\n***************************************/\n#if (ZSTD_WINDOWLOG_MAX_64 > 31)\n# error \"ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX\"\n#endif\n/* Max current allowed */\n#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))\n/* Maximum chunk size before overflow correction needs to be called again */\n#define ZSTD_CHUNKSIZE_MAX                                                     \\\n    ( ((U32)-1)                  /* Maximum ending current index */            \\\n    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */\n\n/**\n * ZSTD_window_clear():\n * Clears the window containing the history by simply setting it to empty.\n */\nMEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)\n{\n    size_t const endT = (size_t)(window->nextSrc - window->base);\n    U32 const end = (U32)endT;\n\n    window->lowLimit = end;\n    window->dictLimit = end;\n}\n\n/**\n * ZSTD_window_hasExtDict():\n * Returns non-zero if the window has a non-empty extDict.\n */\nMEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)\n{\n    return window.lowLimit < window.dictLimit;\n}\n\n/**\n * ZSTD_matchState_dictMode():\n * Inspects the provided matchState and figures out what dictMode should be\n * passed to the compressor.\n */\nMEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)\n{\n    return ZSTD_window_hasExtDict(ms->window) ?\n        ZSTD_extDict :\n        ms->dictMatchState != NULL ?\n            ZSTD_dictMatchState :\n            ZSTD_noDict;\n}\n\n/**\n * ZSTD_window_needOverflowCorrection():\n * Returns non-zero if the indices are getting too large and need overflow\n * protection.\n */\nMEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,\n                                                  void const* srcEnd)\n{\n    U32 const current = (U32)((BYTE const*)srcEnd - window.base);\n    return current > ZSTD_CURRENT_MAX;\n}\n\n/**\n * ZSTD_window_correctOverflow():\n * Reduces the indices to protect from index overflow.\n * Returns the correction made to the indices, which must be applied to every\n * stored index.\n *\n * The least significant cycleLog bits of the indices must remain the same,\n * which may be 0. Every index up to maxDist in the past must be valid.\n * NOTE: (maxDist & cycleMask) must be zero.\n */\nMEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,\n                                           U32 maxDist, void const* src)\n{\n    /* preemptive overflow correction:\n     * 1. correction is large enough:\n     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog\n     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog\n     *\n     *    current - newCurrent\n     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)\n     *    > (3<<29) - (1<<chainLog)\n     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)\n     *    > 1<<29\n     *\n     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:\n     *    After correction, current is less than (1<<chainLog + 1<<windowLog).\n     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.\n     *    In 32-bit mode we are safe, because (chainLog <= 29), so\n     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.\n     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:\n     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.\n     */\n    U32 const cycleMask = (1U << cycleLog) - 1;\n    U32 const current = (U32)((BYTE const*)src - window->base);\n    U32 const currentCycle0 = current & cycleMask;\n    /* Exclude zero so that newCurrent - maxDist >= 1. */\n    U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;\n    U32 const newCurrent = currentCycle1 + maxDist;\n    U32 const correction = current - newCurrent;\n    assert((maxDist & cycleMask) == 0);\n    assert(current > newCurrent);\n    /* Loose bound, should be around 1<<29 (see above) */\n    assert(correction > 1<<28);\n\n    window->base += correction;\n    window->dictBase += correction;\n    if (window->lowLimit <= correction) window->lowLimit = 1;\n    else window->lowLimit -= correction;\n    if (window->dictLimit <= correction) window->dictLimit = 1;\n    else window->dictLimit -= correction;\n\n    /* Ensure we can still reference the full window. */\n    assert(newCurrent >= maxDist);\n    assert(newCurrent - maxDist >= 1);\n    /* Ensure that lowLimit and dictLimit didn't underflow. */\n    assert(window->lowLimit <= newCurrent);\n    assert(window->dictLimit <= newCurrent);\n\n    DEBUGLOG(4, \"Correction of 0x%x bytes to lowLimit=0x%x\", correction,\n             window->lowLimit);\n    return correction;\n}\n\n/**\n * ZSTD_window_enforceMaxDist():\n * Updates lowLimit so that:\n *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd\n *\n * It ensures index is valid as long as index >= lowLimit.\n * This must be called before a block compression call.\n *\n * loadedDictEnd is only defined if a dictionary is in use for current compression.\n * As the name implies, loadedDictEnd represents the index at end of dictionary.\n * The value lies within context's referential, it can be directly compared to blockEndIdx.\n *\n * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.\n * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.\n * This is because dictionaries are allowed to be referenced fully\n * as long as the last byte of the dictionary is in the window.\n * Once input has progressed beyond window size, dictionary cannot be referenced anymore.\n *\n * In normal dict mode, the dictionary lies between lowLimit and dictLimit.\n * In dictMatchState mode, lowLimit and dictLimit are the same,\n * and the dictionary is below them.\n * forceWindow and dictMatchState are therefore incompatible.\n */\nMEM_STATIC void\nZSTD_window_enforceMaxDist(ZSTD_window_t* window,\n                     const void* blockEnd,\n                           U32   maxDist,\n                           U32*  loadedDictEndPtr,\n                     const ZSTD_matchState_t** dictMatchStatePtr)\n{\n    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);\n    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;\n    DEBUGLOG(5, \"ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u\",\n                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);\n\n    /* - When there is no dictionary : loadedDictEnd == 0.\n         In which case, the test (blockEndIdx > maxDist) is merely to avoid\n         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.\n       - When there is a standard dictionary :\n         Index referential is copied from the dictionary,\n         which means it starts from 0.\n         In which case, loadedDictEnd == dictSize,\n         and it makes sense to compare `blockEndIdx > maxDist + dictSize`\n         since `blockEndIdx` also starts from zero.\n       - When there is an attached dictionary :\n         loadedDictEnd is expressed within the referential of the context,\n         so it can be directly compared against blockEndIdx.\n    */\n    if (blockEndIdx > maxDist + loadedDictEnd) {\n        U32 const newLowLimit = blockEndIdx - maxDist;\n        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;\n        if (window->dictLimit < window->lowLimit) {\n            DEBUGLOG(5, \"Update dictLimit to match lowLimit, from %u to %u\",\n                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);\n            window->dictLimit = window->lowLimit;\n        }\n        /* On reaching window size, dictionaries are invalidated */\n        if (loadedDictEndPtr) *loadedDictEndPtr = 0;\n        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;\n    }\n}\n\n/* Similar to ZSTD_window_enforceMaxDist(),\n * but only invalidates dictionary\n * when input progresses beyond window size.\n * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)\n *              loadedDictEnd uses same referential as window->base\n *              maxDist is the window size */\nMEM_STATIC void\nZSTD_checkDictValidity(const ZSTD_window_t* window,\n                       const void* blockEnd,\n                             U32   maxDist,\n                             U32*  loadedDictEndPtr,\n                       const ZSTD_matchState_t** dictMatchStatePtr)\n{\n    assert(loadedDictEndPtr != NULL);\n    assert(dictMatchStatePtr != NULL);\n    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);\n        U32 const loadedDictEnd = *loadedDictEndPtr;\n        DEBUGLOG(5, \"ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u\",\n                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);\n        assert(blockEndIdx >= loadedDictEnd);\n\n        if (blockEndIdx > loadedDictEnd + maxDist) {\n            /* On reaching window size, dictionaries are invalidated.\n             * For simplification, if window size is reached anywhere within next block,\n             * the dictionary is invalidated for the full block.\n             */\n            DEBUGLOG(6, \"invalidating dictionary for current block (distance > windowSize)\");\n            *loadedDictEndPtr = 0;\n            *dictMatchStatePtr = NULL;\n        } else {\n            if (*loadedDictEndPtr != 0) {\n                DEBUGLOG(6, \"dictionary considered valid for current block\");\n    }   }   }\n}\n\nMEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {\n    memset(window, 0, sizeof(*window));\n    window->base = (BYTE const*)\"\";\n    window->dictBase = (BYTE const*)\"\";\n    window->dictLimit = 1;    /* start from 1, so that 1st position is valid */\n    window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */\n    window->nextSrc = window->base + 1;   /* see issue #1241 */\n}\n\n/**\n * ZSTD_window_update():\n * Updates the window by appending [src, src + srcSize) to the window.\n * If it is not contiguous, the current prefix becomes the extDict, and we\n * forget about the extDict. Handles overlap of the prefix and extDict.\n * Returns non-zero if the segment is contiguous.\n */\nMEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,\n                                  void const* src, size_t srcSize)\n{\n    BYTE const* const ip = (BYTE const*)src;\n    U32 contiguous = 1;\n    DEBUGLOG(5, \"ZSTD_window_update\");\n    if (srcSize == 0)\n        return contiguous;\n    assert(window->base != NULL);\n    assert(window->dictBase != NULL);\n    /* Check if blocks follow each other */\n    if (src != window->nextSrc) {\n        /* not contiguous */\n        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);\n        DEBUGLOG(5, \"Non contiguous blocks, new segment starts at %u\", window->dictLimit);\n        window->lowLimit = window->dictLimit;\n        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */\n        window->dictLimit = (U32)distanceFromBase;\n        window->dictBase = window->base;\n        window->base = ip - distanceFromBase;\n        // ms->nextToUpdate = window->dictLimit;\n        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */\n        contiguous = 0;\n    }\n    window->nextSrc = ip + srcSize;\n    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */\n    if ( (ip+srcSize > window->dictBase + window->lowLimit)\n       & (ip < window->dictBase + window->dictLimit)) {\n        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;\n        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;\n        window->lowLimit = lowLimitMax;\n        DEBUGLOG(5, \"Overlapping extDict and input : new lowLimit = %u\", window->lowLimit);\n    }\n    return contiguous;\n}\n\nMEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)\n{\n    U32    const maxDistance = 1U << windowLog;\n    U32    const lowestValid = ms->window.lowLimit;\n    U32    const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;\n    U32    const isDictionary = (ms->loadedDictEnd != 0);\n    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;\n    return matchLowest;\n}\n\n\n\n/* debug functions */\n#if (DEBUGLEVEL>=2)\n\nMEM_STATIC double ZSTD_fWeight(U32 rawStat)\n{\n    U32 const fp_accuracy = 8;\n    U32 const fp_multiplier = (1 << fp_accuracy);\n    U32 const newStat = rawStat + 1;\n    U32 const hb = ZSTD_highbit32(newStat);\n    U32 const BWeight = hb * fp_multiplier;\n    U32 const FWeight = (newStat << fp_accuracy) >> hb;\n    U32 const weight = BWeight + FWeight;\n    assert(hb + fp_accuracy < 31);\n    return (double)weight / fp_multiplier;\n}\n\n/* display a table content,\n * listing each element, its frequency, and its predicted bit cost */\nMEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)\n{\n    unsigned u, sum;\n    for (u=0, sum=0; u<=max; u++) sum += table[u];\n    DEBUGLOG(2, \"total nb elts: %u\", sum);\n    for (u=0; u<=max; u++) {\n        DEBUGLOG(2, \"%2u: %5u  (%.2f)\",\n                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );\n    }\n}\n\n#endif\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n/* ===============================================================\n * Shared internal declarations\n * These prototypes may be called from sources not in lib/compress\n * =============================================================== */\n\n/* ZSTD_loadCEntropy() :\n * dict : must point at beginning of a valid zstd dictionary.\n * return : size of dictionary header (size of magic number + dict ID + entropy tables)\n * assumptions : magic number supposed already checked\n *               and dictSize >= 8 */\nsize_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,\n                         short* offcodeNCount, unsigned* offcodeMaxValue,\n                         const void* const dict, size_t dictSize);\n\nvoid ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);\n\n/* ==============================================================\n * Private declarations\n * These prototypes shall only be called from within lib/compress\n * ============================================================== */\n\n/* ZSTD_getCParamsFromCCtxParams() :\n * cParams are built depending on compressionLevel, src size hints,\n * LDM and manually set compression parameters.\n * Note: srcSizeHint == 0 means 0!\n */\nZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(\n        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);\n\n/*! ZSTD_initCStream_internal() :\n *  Private use only. Init streaming operation.\n *  expects params to be valid.\n *  must receive dict, or cdict, or none, but not both.\n *  @return : 0, or an error code */\nsize_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,\n                     const void* dict, size_t dictSize,\n                     const ZSTD_CDict* cdict,\n                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);\n\nvoid ZSTD_resetSeqStore(seqStore_t* ssPtr);\n\n/*! ZSTD_getCParamsFromCDict() :\n *  as the name implies */\nZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);\n\n/* ZSTD_compressBegin_advanced_internal() :\n * Private use only. To be called from zstdmt_compress.c. */\nsize_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,\n                                    const void* dict, size_t dictSize,\n                                    ZSTD_dictContentType_e dictContentType,\n                                    ZSTD_dictTableLoadMethod_e dtlm,\n                                    const ZSTD_CDict* cdict,\n                                    const ZSTD_CCtx_params* params,\n                                    unsigned long long pledgedSrcSize);\n\n/* ZSTD_compress_advanced_internal() :\n * Private use only. To be called from zstdmt_compress.c. */\nsize_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,\n                                       void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize,\n                                 const void* dict,size_t dictSize,\n                                 const ZSTD_CCtx_params* params);\n\n\n/* ZSTD_writeLastEmptyBlock() :\n * output an empty Block with end-of-frame mark to complete a frame\n * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))\n *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)\n */\nsize_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);\n\n\n/* ZSTD_referenceExternalSequences() :\n * Must be called before starting a compression operation.\n * seqs must parse a prefix of the source.\n * This cannot be used when long range matching is enabled.\n * Zstd will use these sequences, and pass the literals to a secondary block\n * compressor.\n * @return : An error code on failure.\n * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory\n * access and data corruption.\n */\nsize_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);\n\n\n#endif /* ZSTD_COMPRESS_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_literals.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n /*-*************************************\n *  Dependencies\n ***************************************/\n#include \"zstd_compress_literals.h\"\n\nsize_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    BYTE* const ostart = (BYTE* const)dst;\n    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);\n\n    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);\n\n    switch(flSize)\n    {\n        case 1: /* 2 - 1 - 5 */\n            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));\n            break;\n        case 2: /* 2 - 2 - 12 */\n            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));\n            break;\n        case 3: /* 2 - 2 - 20 */\n            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));\n            break;\n        default:   /* not necessary : flSize is {1,2,3} */\n            assert(0);\n    }\n\n    memcpy(ostart + flSize, src, srcSize);\n    return srcSize + flSize;\n}\n\nsize_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    BYTE* const ostart = (BYTE* const)dst;\n    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);\n\n    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */\n\n    switch(flSize)\n    {\n        case 1: /* 2 - 1 - 5 */\n            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));\n            break;\n        case 2: /* 2 - 2 - 12 */\n            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));\n            break;\n        case 3: /* 2 - 2 - 20 */\n            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));\n            break;\n        default:   /* not necessary : flSize is {1,2,3} */\n            assert(0);\n    }\n\n    ostart[flSize] = *(const BYTE*)src;\n    return flSize+1;\n}\n\nsize_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,\n                              ZSTD_hufCTables_t* nextHuf,\n                              ZSTD_strategy strategy, int disableLiteralCompression,\n                              void* dst, size_t dstCapacity,\n                        const void* src, size_t srcSize,\n                              void* entropyWorkspace, size_t entropyWorkspaceSize,\n                        const int bmi2)\n{\n    size_t const minGain = ZSTD_minGain(srcSize, strategy);\n    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);\n    BYTE*  const ostart = (BYTE*)dst;\n    U32 singleStream = srcSize < 256;\n    symbolEncodingType_e hType = set_compressed;\n    size_t cLitSize;\n\n    DEBUGLOG(5,\"ZSTD_compressLiterals (disableLiteralCompression=%i)\",\n                disableLiteralCompression);\n\n    /* Prepare nextEntropy assuming reusing the existing table */\n    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));\n\n    if (disableLiteralCompression)\n        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);\n\n    /* small ? don't even attempt compression (speed opt) */\n#   define COMPRESS_LITERALS_SIZE_MIN 63\n    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;\n        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);\n    }\n\n    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, \"not enough space for compression\");\n    {   HUF_repeat repeat = prevHuf->repeatMode;\n        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;\n        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;\n        cLitSize = singleStream ?\n            HUF_compress1X_repeat(\n                ostart+lhSize, dstCapacity-lhSize, src, srcSize,\n                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,\n                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :\n            HUF_compress4X_repeat(\n                ostart+lhSize, dstCapacity-lhSize, src, srcSize,\n                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,\n                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);\n        if (repeat != HUF_repeat_none) {\n            /* reused the existing table */\n            hType = set_repeat;\n        }\n    }\n\n    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {\n        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));\n        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);\n    }\n    if (cLitSize==1) {\n        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));\n        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);\n    }\n\n    if (hType == set_compressed) {\n        /* using a newly constructed table */\n        nextHuf->repeatMode = HUF_repeat_check;\n    }\n\n    /* Build header */\n    switch(lhSize)\n    {\n    case 3: /* 2 - 2 - 10 - 10 */\n        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);\n            MEM_writeLE24(ostart, lhc);\n            break;\n        }\n    case 4: /* 2 - 2 - 14 - 14 */\n        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);\n            MEM_writeLE32(ostart, lhc);\n            break;\n        }\n    case 5: /* 2 - 2 - 18 - 18 */\n        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);\n            MEM_writeLE32(ostart, lhc);\n            ostart[4] = (BYTE)(cLitSize >> 10);\n            break;\n        }\n    default:  /* not possible : lhSize is {3,4,5} */\n        assert(0);\n    }\n    return lhSize+cLitSize;\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_literals.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_COMPRESS_LITERALS_H\n#define ZSTD_COMPRESS_LITERALS_H\n\n#include \"zstd_compress_internal.h\" /* ZSTD_hufCTables_t, ZSTD_minGain() */\n\n\nsize_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\nsize_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\nsize_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,\n                              ZSTD_hufCTables_t* nextHuf,\n                              ZSTD_strategy strategy, int disableLiteralCompression,\n                              void* dst, size_t dstCapacity,\n                        const void* src, size_t srcSize,\n                              void* entropyWorkspace, size_t entropyWorkspaceSize,\n                        const int bmi2);\n\n#endif /* ZSTD_COMPRESS_LITERALS_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_sequences.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n /*-*************************************\n *  Dependencies\n ***************************************/\n#include \"zstd_compress_sequences.h\"\n\n/**\n * -log2(x / 256) lookup table for x in [0, 256).\n * If x == 0: Return 0\n * Else: Return floor(-log2(x / 256) * 256)\n */\nstatic unsigned const kInverseProbabilityLog256[256] = {\n    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,\n    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,\n    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,\n    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,\n    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,\n    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,\n    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,\n    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,\n    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,\n    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,\n    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,\n    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,\n    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,\n    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,\n    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,\n    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,\n    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,\n    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,\n    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,\n    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,\n    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,\n    5,    4,    2,    1,\n};\n\nstatic unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {\n  void const* ptr = ctable;\n  U16 const* u16ptr = (U16 const*)ptr;\n  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);\n  return maxSymbolValue;\n}\n\n/**\n * Returns the cost in bytes of encoding the normalized count header.\n * Returns an error if any of the helper functions return an error.\n */\nstatic size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,\n                              size_t const nbSeq, unsigned const FSELog)\n{\n    BYTE wksp[FSE_NCOUNTBOUND];\n    S16 norm[MaxSeq + 1];\n    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);\n    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));\n    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);\n}\n\n/**\n * Returns the cost in bits of encoding the distribution described by count\n * using the entropy bound.\n */\nstatic size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)\n{\n    unsigned cost = 0;\n    unsigned s;\n    for (s = 0; s <= max; ++s) {\n        unsigned norm = (unsigned)((256 * count[s]) / total);\n        if (count[s] != 0 && norm == 0)\n            norm = 1;\n        assert(count[s] < total);\n        cost += count[s] * kInverseProbabilityLog256[norm];\n    }\n    return cost >> 8;\n}\n\n/**\n * Returns the cost in bits of encoding the distribution in count using ctable.\n * Returns an error if ctable cannot represent all the symbols in count.\n */\nsize_t ZSTD_fseBitCost(\n    FSE_CTable const* ctable,\n    unsigned const* count,\n    unsigned const max)\n{\n    unsigned const kAccuracyLog = 8;\n    size_t cost = 0;\n    unsigned s;\n    FSE_CState_t cstate;\n    FSE_initCState(&cstate, ctable);\n    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {\n        DEBUGLOG(5, \"Repeat FSE_CTable has maxSymbolValue %u < %u\",\n                    ZSTD_getFSEMaxSymbolValue(ctable), max);\n        return ERROR(GENERIC);\n    }\n    for (s = 0; s <= max; ++s) {\n        unsigned const tableLog = cstate.stateLog;\n        unsigned const badCost = (tableLog + 1) << kAccuracyLog;\n        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);\n        if (count[s] == 0)\n            continue;\n        if (bitCost >= badCost) {\n            DEBUGLOG(5, \"Repeat FSE_CTable has Prob[%u] == 0\", s);\n            return ERROR(GENERIC);\n        }\n        cost += count[s] * bitCost;\n    }\n    return cost >> kAccuracyLog;\n}\n\n/**\n * Returns the cost in bits of encoding the distribution in count using the\n * table described by norm. The max symbol support by norm is assumed >= max.\n * norm must be valid for every symbol with non-zero probability in count.\n */\nsize_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,\n                             unsigned const* count, unsigned const max)\n{\n    unsigned const shift = 8 - accuracyLog;\n    size_t cost = 0;\n    unsigned s;\n    assert(accuracyLog <= 8);\n    for (s = 0; s <= max; ++s) {\n        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;\n        unsigned const norm256 = normAcc << shift;\n        assert(norm256 > 0);\n        assert(norm256 < 256);\n        cost += count[s] * kInverseProbabilityLog256[norm256];\n    }\n    return cost >> 8;\n}\n\nsymbolEncodingType_e\nZSTD_selectEncodingType(\n        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,\n        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,\n        FSE_CTable const* prevCTable,\n        short const* defaultNorm, U32 defaultNormLog,\n        ZSTD_defaultPolicy_e const isDefaultAllowed,\n        ZSTD_strategy const strategy)\n{\n    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);\n    if (mostFrequent == nbSeq) {\n        *repeatMode = FSE_repeat_none;\n        if (isDefaultAllowed && nbSeq <= 2) {\n            /* Prefer set_basic over set_rle when there are 2 or less symbols,\n             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.\n             * If basic encoding isn't possible, always choose RLE.\n             */\n            DEBUGLOG(5, \"Selected set_basic\");\n            return set_basic;\n        }\n        DEBUGLOG(5, \"Selected set_rle\");\n        return set_rle;\n    }\n    if (strategy < ZSTD_lazy) {\n        if (isDefaultAllowed) {\n            size_t const staticFse_nbSeq_max = 1000;\n            size_t const mult = 10 - strategy;\n            size_t const baseLog = 3;\n            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */\n            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */\n            assert(mult <= 9 && mult >= 7);\n            if ( (*repeatMode == FSE_repeat_valid)\n              && (nbSeq < staticFse_nbSeq_max) ) {\n                DEBUGLOG(5, \"Selected set_repeat\");\n                return set_repeat;\n            }\n            if ( (nbSeq < dynamicFse_nbSeq_min)\n              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {\n                DEBUGLOG(5, \"Selected set_basic\");\n                /* The format allows default tables to be repeated, but it isn't useful.\n                 * When using simple heuristics to select encoding type, we don't want\n                 * to confuse these tables with dictionaries. When running more careful\n                 * analysis, we don't need to waste time checking both repeating tables\n                 * and default tables.\n                 */\n                *repeatMode = FSE_repeat_none;\n                return set_basic;\n            }\n        }\n    } else {\n        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);\n        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);\n        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);\n        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);\n\n        if (isDefaultAllowed) {\n            assert(!ZSTD_isError(basicCost));\n            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));\n        }\n        assert(!ZSTD_isError(NCountCost));\n        assert(compressedCost < ERROR(maxCode));\n        DEBUGLOG(5, \"Estimated bit costs: basic=%u\\trepeat=%u\\tcompressed=%u\",\n                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);\n        if (basicCost <= repeatCost && basicCost <= compressedCost) {\n            DEBUGLOG(5, \"Selected set_basic\");\n            assert(isDefaultAllowed);\n            *repeatMode = FSE_repeat_none;\n            return set_basic;\n        }\n        if (repeatCost <= compressedCost) {\n            DEBUGLOG(5, \"Selected set_repeat\");\n            assert(!ZSTD_isError(repeatCost));\n            return set_repeat;\n        }\n        assert(compressedCost < basicCost && compressedCost < repeatCost);\n    }\n    DEBUGLOG(5, \"Selected set_compressed\");\n    *repeatMode = FSE_repeat_check;\n    return set_compressed;\n}\n\nsize_t\nZSTD_buildCTable(void* dst, size_t dstCapacity,\n                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,\n                unsigned* count, U32 max,\n                const BYTE* codeTable, size_t nbSeq,\n                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,\n                const FSE_CTable* prevCTable, size_t prevCTableSize,\n                void* entropyWorkspace, size_t entropyWorkspaceSize)\n{\n    BYTE* op = (BYTE*)dst;\n    const BYTE* const oend = op + dstCapacity;\n    DEBUGLOG(6, \"ZSTD_buildCTable (dstCapacity=%u)\", (unsigned)dstCapacity);\n\n    switch (type) {\n    case set_rle:\n        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));\n        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);\n        *op = codeTable[0];\n        return 1;\n    case set_repeat:\n        memcpy(nextCTable, prevCTable, prevCTableSize);\n        return 0;\n    case set_basic:\n        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize));  /* note : could be pre-calculated */\n        return 0;\n    case set_compressed: {\n        S16 norm[MaxSeq + 1];\n        size_t nbSeq_1 = nbSeq;\n        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);\n        if (count[codeTable[nbSeq-1]] > 1) {\n            count[codeTable[nbSeq-1]]--;\n            nbSeq_1--;\n        }\n        assert(nbSeq_1 > 1);\n        FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));\n        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */\n            FORWARD_IF_ERROR(NCountSize);\n            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));\n            return NCountSize;\n        }\n    }\n    default: assert(0); RETURN_ERROR(GENERIC);\n    }\n}\n\nFORCE_INLINE_TEMPLATE size_t\nZSTD_encodeSequences_body(\n            void* dst, size_t dstCapacity,\n            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,\n            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,\n            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,\n            seqDef const* sequences, size_t nbSeq, int longOffsets)\n{\n    BIT_CStream_t blockStream;\n    FSE_CState_t  stateMatchLength;\n    FSE_CState_t  stateOffsetBits;\n    FSE_CState_t  stateLitLength;\n\n    RETURN_ERROR_IF(\n        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),\n        dstSize_tooSmall, \"not enough space remaining\");\n    DEBUGLOG(6, \"available space for bitstream : %i  (dstCapacity=%u)\",\n                (int)(blockStream.endPtr - blockStream.startPtr),\n                (unsigned)dstCapacity);\n\n    /* first symbols */\n    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);\n    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);\n    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);\n    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);\n    if (MEM_32bits()) BIT_flushBits(&blockStream);\n    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);\n    if (MEM_32bits()) BIT_flushBits(&blockStream);\n    if (longOffsets) {\n        U32 const ofBits = ofCodeTable[nbSeq-1];\n        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);\n        if (extraBits) {\n            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);\n            BIT_flushBits(&blockStream);\n        }\n        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,\n                    ofBits - extraBits);\n    } else {\n        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);\n    }\n    BIT_flushBits(&blockStream);\n\n    {   size_t n;\n        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */\n            BYTE const llCode = llCodeTable[n];\n            BYTE const ofCode = ofCodeTable[n];\n            BYTE const mlCode = mlCodeTable[n];\n            U32  const llBits = LL_bits[llCode];\n            U32  const ofBits = ofCode;\n            U32  const mlBits = ML_bits[mlCode];\n            DEBUGLOG(6, \"encoding: litlen:%2u - matchlen:%2u - offCode:%7u\",\n                        (unsigned)sequences[n].litLength,\n                        (unsigned)sequences[n].matchLength + MINMATCH,\n                        (unsigned)sequences[n].offset);\n                                                                            /* 32b*/  /* 64b*/\n                                                                            /* (7)*/  /* (7)*/\n            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */\n            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */\n            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/\n            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */\n            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))\n                BIT_flushBits(&blockStream);                                /* (7)*/\n            BIT_addBits(&blockStream, sequences[n].litLength, llBits);\n            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);\n            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);\n            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);\n            if (longOffsets) {\n                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);\n                if (extraBits) {\n                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);\n                    BIT_flushBits(&blockStream);                            /* (7)*/\n                }\n                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,\n                            ofBits - extraBits);                            /* 31 */\n            } else {\n                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */\n            }\n            BIT_flushBits(&blockStream);                                    /* (7)*/\n            DEBUGLOG(7, \"remaining space : %i\", (int)(blockStream.endPtr - blockStream.ptr));\n    }   }\n\n    DEBUGLOG(6, \"ZSTD_encodeSequences: flushing ML state with %u bits\", stateMatchLength.stateLog);\n    FSE_flushCState(&blockStream, &stateMatchLength);\n    DEBUGLOG(6, \"ZSTD_encodeSequences: flushing Off state with %u bits\", stateOffsetBits.stateLog);\n    FSE_flushCState(&blockStream, &stateOffsetBits);\n    DEBUGLOG(6, \"ZSTD_encodeSequences: flushing LL state with %u bits\", stateLitLength.stateLog);\n    FSE_flushCState(&blockStream, &stateLitLength);\n\n    {   size_t const streamSize = BIT_closeCStream(&blockStream);\n        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, \"not enough space\");\n        return streamSize;\n    }\n}\n\nstatic size_t\nZSTD_encodeSequences_default(\n            void* dst, size_t dstCapacity,\n            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,\n            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,\n            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,\n            seqDef const* sequences, size_t nbSeq, int longOffsets)\n{\n    return ZSTD_encodeSequences_body(dst, dstCapacity,\n                                    CTable_MatchLength, mlCodeTable,\n                                    CTable_OffsetBits, ofCodeTable,\n                                    CTable_LitLength, llCodeTable,\n                                    sequences, nbSeq, longOffsets);\n}\n\n\n#if DYNAMIC_BMI2\n\nstatic TARGET_ATTRIBUTE(\"bmi2\") size_t\nZSTD_encodeSequences_bmi2(\n            void* dst, size_t dstCapacity,\n            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,\n            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,\n            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,\n            seqDef const* sequences, size_t nbSeq, int longOffsets)\n{\n    return ZSTD_encodeSequences_body(dst, dstCapacity,\n                                    CTable_MatchLength, mlCodeTable,\n                                    CTable_OffsetBits, ofCodeTable,\n                                    CTable_LitLength, llCodeTable,\n                                    sequences, nbSeq, longOffsets);\n}\n\n#endif\n\nsize_t ZSTD_encodeSequences(\n            void* dst, size_t dstCapacity,\n            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,\n            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,\n            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,\n            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)\n{\n    DEBUGLOG(5, \"ZSTD_encodeSequences: dstCapacity = %u\", (unsigned)dstCapacity);\n#if DYNAMIC_BMI2\n    if (bmi2) {\n        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,\n                                         CTable_MatchLength, mlCodeTable,\n                                         CTable_OffsetBits, ofCodeTable,\n                                         CTable_LitLength, llCodeTable,\n                                         sequences, nbSeq, longOffsets);\n    }\n#endif\n    (void)bmi2;\n    return ZSTD_encodeSequences_default(dst, dstCapacity,\n                                        CTable_MatchLength, mlCodeTable,\n                                        CTable_OffsetBits, ofCodeTable,\n                                        CTable_LitLength, llCodeTable,\n                                        sequences, nbSeq, longOffsets);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_sequences.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_COMPRESS_SEQUENCES_H\n#define ZSTD_COMPRESS_SEQUENCES_H\n\n#include \"fse.h\" /* FSE_repeat, FSE_CTable */\n#include \"zstd_internal.h\" /* symbolEncodingType_e, ZSTD_strategy */\n\ntypedef enum {\n    ZSTD_defaultDisallowed = 0,\n    ZSTD_defaultAllowed = 1\n} ZSTD_defaultPolicy_e;\n\nsymbolEncodingType_e\nZSTD_selectEncodingType(\n        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,\n        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,\n        FSE_CTable const* prevCTable,\n        short const* defaultNorm, U32 defaultNormLog,\n        ZSTD_defaultPolicy_e const isDefaultAllowed,\n        ZSTD_strategy const strategy);\n\nsize_t\nZSTD_buildCTable(void* dst, size_t dstCapacity,\n                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,\n                unsigned* count, U32 max,\n                const BYTE* codeTable, size_t nbSeq,\n                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,\n                const FSE_CTable* prevCTable, size_t prevCTableSize,\n                void* entropyWorkspace, size_t entropyWorkspaceSize);\n\nsize_t ZSTD_encodeSequences(\n            void* dst, size_t dstCapacity,\n            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,\n            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,\n            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,\n            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);\n\nsize_t ZSTD_fseBitCost(\n    FSE_CTable const* ctable,\n    unsigned const* count,\n    unsigned const max);\n\nsize_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,\n                             unsigned const* count, unsigned const max);\n#endif /* ZSTD_COMPRESS_SEQUENCES_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_superblock.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n /*-*************************************\n *  Dependencies\n ***************************************/\n#include \"hist.h\"           /* HIST_countFast_wksp */\n#include \"zstd_compress_internal.h\"\n#include \"zstd_compress_sequences.h\"\n#include \"zstd_compress_literals.h\"\n#include \"zstd_compress_superblock.h\"\n\n/*-*************************************\n*  Superblock entropy buffer structs\n***************************************/\n/** ZSTD_hufCTablesMetadata_t :\n *  Stores Literals Block Type for a super-block in hType, and\n *  huffman tree description in hufDesBuffer.\n *  hufDesSize refers to the size of huffman tree description in bytes.\n *  This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */\ntypedef struct {\n    symbolEncodingType_e hType;\n    BYTE hufDesBuffer[500]; // TODO give name to this value\n    size_t hufDesSize;\n} ZSTD_hufCTablesMetadata_t;\n\n/** ZSTD_fseCTablesMetadata_t :\n *  Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and\n *  fse tables in fseTablesBuffer.\n *  fseTablesSize refers to the size of fse tables in bytes.\n *  This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */\ntypedef struct {\n    symbolEncodingType_e llType;\n    symbolEncodingType_e ofType;\n    symbolEncodingType_e mlType;\n    BYTE fseTablesBuffer[500]; // TODO give name to this value\n    size_t fseTablesSize;\n    size_t lastCountSize; // This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences()\n} ZSTD_fseCTablesMetadata_t;\n\ntypedef struct {\n    ZSTD_hufCTablesMetadata_t hufMetadata;\n    ZSTD_fseCTablesMetadata_t fseMetadata;\n} ZSTD_entropyCTablesMetadata_t;\n\n\n/** ZSTD_buildSuperBlockEntropy_literal() :\n *  Builds entropy for the super-block literals.\n *  Stores literals block type (raw, rle, compressed) and\n *  huffman description table to hufMetadata.\n *  Currently, this does not consider the option of reusing huffman table from\n *  previous super-block. I think it would be a good improvement to add that option.\n *  @return : size of huffman description table or error code */\nstatic size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,\n                                            const ZSTD_hufCTables_t* prevHuf,\n                                                  ZSTD_hufCTables_t* nextHuf,\n                                                  ZSTD_hufCTablesMetadata_t* hufMetadata,\n                                                  void* workspace, size_t wkspSize)\n{\n    BYTE* const wkspStart = (BYTE*)workspace;\n    BYTE* const wkspEnd = wkspStart + wkspSize;\n    BYTE* const countWkspStart = wkspStart;\n    unsigned* const countWksp = (unsigned*)workspace;\n    const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);\n    BYTE* const nodeWksp = countWkspStart + countWkspSize;\n    const size_t nodeWkspSize = wkspEnd-nodeWksp;\n    unsigned maxSymbolValue = 255;\n    unsigned huffLog = 11;\n\n    DEBUGLOG(5, \"ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)\", srcSize);\n\n    /* Prepare nextEntropy assuming reusing the existing table */\n    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));\n\n    /* small ? don't even attempt compression (speed opt) */\n#   define COMPRESS_LITERALS_SIZE_MIN 63\n    {   size_t const minLitSize = COMPRESS_LITERALS_SIZE_MIN;\n        if (srcSize <= minLitSize) { hufMetadata->hType = set_basic; return 0; }\n    }\n\n    /* Scan input and build symbol stats */\n    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);\n        FORWARD_IF_ERROR(largest);\n        if (largest == srcSize) { hufMetadata->hType = set_rle; return 0; }\n        if (largest <= (srcSize >> 7)+4) { hufMetadata->hType = set_basic; return 0; }\n    }\n\n\n    /* Build Huffman Tree */\n    memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));\n    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);\n    {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,\n                                                    maxSymbolValue, huffLog,\n                                                    nodeWksp, nodeWkspSize);\n        FORWARD_IF_ERROR(maxBits);\n        huffLog = (U32)maxBits;\n        {   size_t cSize = HUF_estimateCompressedSize(\n                              (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);\n            size_t hSize = HUF_writeCTable(\n                              hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),\n                              (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog);\n            if (cSize + hSize >= srcSize) { hufMetadata->hType = set_basic; return 0; }\n            hufMetadata->hType = set_compressed;\n            return hSize;\n        }\n    }\n}\n\n/** ZSTD_buildSuperBlockEntropy_sequences() :\n *  Builds entropy for the super-block sequences.\n *  Stores symbol compression modes and fse table to fseMetadata.\n *  @return : size of fse tables or error code */\nstatic size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,\n                                              const ZSTD_fseCTables_t* prevEntropy,\n                                                    ZSTD_fseCTables_t* nextEntropy,\n                                              const ZSTD_CCtx_params* cctxParams,\n                                                    ZSTD_fseCTablesMetadata_t* fseMetadata,\n                                                    void* workspace, size_t wkspSize)\n{\n    BYTE* const wkspStart = (BYTE*)workspace;\n    BYTE* const wkspEnd = wkspStart + wkspSize;\n    BYTE* const countWkspStart = wkspStart;\n    unsigned* const countWksp = (unsigned*)workspace;\n    const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);\n    BYTE* const cTableWksp = countWkspStart + countWkspSize;\n    const size_t cTableWkspSize = wkspEnd-cTableWksp;\n    ZSTD_strategy const strategy = cctxParams->cParams.strategy;\n    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;\n    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;\n    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;\n    const BYTE* const ofCodeTable = seqStorePtr->ofCode;\n    const BYTE* const llCodeTable = seqStorePtr->llCode;\n    const BYTE* const mlCodeTable = seqStorePtr->mlCode;\n    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;\n    BYTE* const ostart = fseMetadata->fseTablesBuffer;\n    BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);\n    BYTE* op = ostart;\n\n    assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));\n    DEBUGLOG(5, \"ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)\", nbSeq);\n    memset(workspace, 0, wkspSize);\n\n    fseMetadata->lastCountSize = 0;\n    /* convert length/distances into codes */\n    ZSTD_seqToCodes(seqStorePtr);\n    /* build CTable for Literal Lengths */\n    {   U32 LLtype;\n        unsigned max = MaxLL;\n        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */\n        DEBUGLOG(5, \"Building LL table\");\n        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;\n        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,\n                                        countWksp, max, mostFrequent, nbSeq,\n                                        LLFSELog, prevEntropy->litlengthCTable,\n                                        LL_defaultNorm, LL_defaultNormLog,\n                                        ZSTD_defaultAllowed, strategy);\n        assert(set_basic < set_compressed && set_rle < set_compressed);\n        assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,\n                                                    countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,\n                                                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),\n                                                    cTableWksp, cTableWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (LLtype == set_compressed)\n                fseMetadata->lastCountSize = countSize;\n            op += countSize;\n            fseMetadata->llType = (symbolEncodingType_e) LLtype;\n    }   }\n    /* build CTable for Offsets */\n    {   U32 Offtype;\n        unsigned max = MaxOff;\n        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */\n        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */\n        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;\n        DEBUGLOG(5, \"Building OF table\");\n        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;\n        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,\n                                        countWksp, max, mostFrequent, nbSeq,\n                                        OffFSELog, prevEntropy->offcodeCTable,\n                                        OF_defaultNorm, OF_defaultNormLog,\n                                        defaultPolicy, strategy);\n        assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,\n                                                    countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,\n                                                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),\n                                                    cTableWksp, cTableWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (Offtype == set_compressed)\n                fseMetadata->lastCountSize = countSize;\n            op += countSize;\n            fseMetadata->ofType = (symbolEncodingType_e) Offtype;\n    }   }\n    /* build CTable for MatchLengths */\n    {   U32 MLtype;\n        unsigned max = MaxML;\n        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */\n        DEBUGLOG(5, \"Building ML table (remaining space : %i)\", (int)(oend-op));\n        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;\n        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,\n                                        countWksp, max, mostFrequent, nbSeq,\n                                        MLFSELog, prevEntropy->matchlengthCTable,\n                                        ML_defaultNorm, ML_defaultNormLog,\n                                        ZSTD_defaultAllowed, strategy);\n        assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */\n        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,\n                                                    countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,\n                                                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),\n                                                    cTableWksp, cTableWkspSize);\n            FORWARD_IF_ERROR(countSize);\n            if (MLtype == set_compressed)\n                fseMetadata->lastCountSize = countSize;\n            op += countSize;\n            fseMetadata->mlType = (symbolEncodingType_e) MLtype;\n    }   }\n    assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));\n    return op-ostart;\n}\n\n\n/** ZSTD_buildSuperBlockEntropy() :\n *  Builds entropy for the super-block.\n *  @return : 0 on success or error code */\nstatic size_t\nZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,\n                      const ZSTD_entropyCTables_t* prevEntropy,\n                            ZSTD_entropyCTables_t* nextEntropy,\n                      const ZSTD_CCtx_params* cctxParams,\n                            ZSTD_entropyCTablesMetadata_t* entropyMetadata,\n                            void* workspace, size_t wkspSize)\n{\n    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;\n    DEBUGLOG(5, \"ZSTD_buildSuperBlockEntropy\");\n    entropyMetadata->hufMetadata.hufDesSize =\n        ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,\n                                            &prevEntropy->huf, &nextEntropy->huf,\n                                            &entropyMetadata->hufMetadata,\n                                            workspace, wkspSize);\n    FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize);\n    entropyMetadata->fseMetadata.fseTablesSize =\n        ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,\n                                              &prevEntropy->fse, &nextEntropy->fse,\n                                              cctxParams,\n                                              &entropyMetadata->fseMetadata,\n                                              workspace, wkspSize);\n    FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize);\n    return 0;\n}\n\n/** ZSTD_compressSubBlock_literal() :\n *  Compresses literals section for a sub-block.\n *  Compressed literal size needs to be less than uncompressed literal size.\n *      ZSTD spec doesn't have this constaint. I will explain why I have this constraint here.\n *      Literals section header size ranges from 1 to 5 bytes,\n *      which is dictated by regenerated size and compressed size.\n *      In order to figure out the memory address to start writing compressed literal,\n *      it is necessary to figure out the literals section header size.\n *      The challenge is that compressed size is only known after compression.\n *      This is a chicken and egg problem.\n *      I am simplifying the problem by assuming that\n *      compressed size will always be less than or equal to regenerated size,\n *      and using regenerated size to calculate literals section header size.\n *  hufMetadata->hType has literals block type info.\n *      If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.\n *      If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.\n *      If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block\n *      and the following sub-blocks' literals sections will be Treeless_Literals_Block.\n *  @return : compressed size of literals section of a sub-block\n *            Or 0 if it unable to compress.\n *            Or error code */\nstatic size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,\n                                    const ZSTD_hufCTablesMetadata_t* hufMetadata,\n                                    const BYTE* literals, size_t litSize,\n                                    void* dst, size_t dstSize,\n                                    const int bmi2, int writeEntropy)\n{\n    size_t const lhSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstSize;\n    BYTE* op = ostart + lhSize;\n    U32 singleStream = litSize < 256;\n    symbolEncodingType_e hType = writeEntropy ? set_compressed : set_repeat;\n    size_t cLitSize = 0;\n\n    (void)bmi2; // TODO bmi2...\n\n    DEBUGLOG(5, \"ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)\", litSize, lhSize, writeEntropy);\n\n    if (writeEntropy && litSize == 0) {\n      /* Literals section cannot be compressed mode when litSize == 0.\n       * (This seems to be decoder constraint.)\n       * Entropy cannot be written if literals section is not compressed mode.\n       */\n      return 0;\n    }\n\n    if (litSize == 0 || hufMetadata->hType == set_basic) {\n      DEBUGLOG(5, \"ZSTD_compressSubBlock_literal using raw literal\");\n      return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);\n    } else if (hufMetadata->hType == set_rle) {\n      DEBUGLOG(5, \"ZSTD_compressSubBlock_literal using rle literal\");\n      return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);\n    }\n\n    if (lhSize == 3) singleStream = 1;\n    if (writeEntropy) {\n        memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);\n        op += hufMetadata->hufDesSize;\n        cLitSize += hufMetadata->hufDesSize;\n        DEBUGLOG(5, \"ZSTD_compressSubBlock_literal (hSize=%zu)\", hufMetadata->hufDesSize);\n    }\n\n    // TODO bmi2\n    {   const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)\n                                          : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);\n        op += cSize;\n        cLitSize += cSize;\n        if (cSize == 0 || ERR_isError(cSize)) {\n          return 0;\n        }\n        if (cLitSize > litSize) {\n            if (writeEntropy) return 0;\n            else return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);\n        }\n        DEBUGLOG(5, \"ZSTD_compressSubBlock_literal (cSize=%zu)\", cSize);\n    }\n\n    /* Build header */\n    switch(lhSize)\n    {\n    case 3: /* 2 - 2 - 10 - 10 */\n        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);\n            MEM_writeLE24(ostart, lhc);\n            break;\n        }\n    case 4: /* 2 - 2 - 14 - 14 */\n        {   U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);\n            MEM_writeLE32(ostart, lhc);\n            break;\n        }\n    case 5: /* 2 - 2 - 18 - 18 */\n        {   U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);\n            MEM_writeLE32(ostart, lhc);\n            ostart[4] = (BYTE)(cLitSize >> 10);\n            break;\n        }\n    default:  /* not possible : lhSize is {3,4,5} */\n        assert(0);\n    }\n    return op-ostart;\n}\n\nstatic size_t ZSTD_seqDecompressedSize(const seqDef* sequences, size_t nbSeq, size_t litSize) {\n    const seqDef* const sstart = sequences;\n    const seqDef* const send = sequences + nbSeq;\n    const seqDef* sp = sstart;\n    size_t matchLengthSum = 0;\n    while (send-sp > 0) {\n      matchLengthSum += sp->matchLength + MINMATCH;\n      sp++;\n    }\n    return matchLengthSum + litSize;\n}\n\n/** ZSTD_compressSubBlock_sequences() :\n *  Compresses sequences section for a sub-block.\n *  fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have\n *  symbol compression modes for the super-block.\n *  First sub-block will have these in its header. The following sub-blocks\n *  will always have repeat mode.\n *  @return : compressed size of sequences section of a sub-block\n *            Or 0 if it is unable to compress\n *            Or error code. */\nstatic size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,\n                                              const ZSTD_fseCTablesMetadata_t* fseMetadata,\n                                              const seqDef* sequences, size_t nbSeq,\n                                              const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,\n                                              const ZSTD_CCtx_params* cctxParams,\n                                              void* dst, size_t dstCapacity,\n                                              const int bmi2, int writeEntropy)\n{\n    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstCapacity;\n    BYTE* op = ostart;\n    BYTE* seqHead;\n\n    DEBUGLOG(5, \"ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)\", nbSeq, writeEntropy, longOffsets);\n\n    /* Sequences Header */\n    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,\n                    dstSize_tooSmall);\n    if (nbSeq < 0x7F)\n        *op++ = (BYTE)nbSeq;\n    else if (nbSeq < LONGNBSEQ)\n        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;\n    else\n        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;\n    if (writeEntropy && nbSeq == 0) {\n        return 0;\n    }\n    if (nbSeq==0) {\n        return op - ostart;\n    }\n\n    /* seqHead : flags for FSE encoding type */\n    seqHead = op++;\n\n    DEBUGLOG(5, \"ZSTD_compressSubBlock_sequences (seqHeadSize=%u)\", (unsigned)(op-ostart));\n\n    if (writeEntropy) {\n        const U32 LLtype = fseMetadata->llType;\n        const U32 Offtype = fseMetadata->ofType;\n        const U32 MLtype = fseMetadata->mlType;\n        DEBUGLOG(5, \"ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)\", fseMetadata->fseTablesSize);\n        *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));\n        memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);\n        op += fseMetadata->fseTablesSize;\n    } else {\n        const U32 repeat = set_repeat;\n        *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));\n    }\n\n    {   size_t const bitstreamSize = ZSTD_encodeSequences(\n                                        op, oend - op,\n                                        fseTables->matchlengthCTable, mlCode,\n                                        fseTables->offcodeCTable, ofCode,\n                                        fseTables->litlengthCTable, llCode,\n                                        sequences, nbSeq,\n                                        longOffsets, bmi2);\n        FORWARD_IF_ERROR(bitstreamSize);\n        op += bitstreamSize;\n        /* zstd versions <= 1.3.4 mistakenly report corruption when\n         * FSE_readNCount() receives a buffer < 4 bytes.\n         * Fixed by https://github.com/facebook/zstd/pull/1146.\n         * This can happen when the last set_compressed table present is 2\n         * bytes and the bitstream is only one byte.\n         * In this exceedingly rare case, we will simply emit an uncompressed\n         * block, since it isn't worth optimizing.\n         */\n        if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {\n            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */\n            assert(fseMetadata->lastCountSize + bitstreamSize == 3);\n            DEBUGLOG(5, \"Avoiding bug in zstd decoder in versions <= 1.3.4 by \"\n                        \"emitting an uncompressed block.\");\n            return 0;\n        }\n        DEBUGLOG(5, \"ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)\", bitstreamSize);\n    }\n\n    /* zstd versions <= 1.4.0 mistakenly report error when\n     * sequences section body size is less than 3 bytes.\n     * Fixed by https://github.com/facebook/zstd/pull/1664.\n     * This can happen when the previous sequences section block is compressed\n     * with rle mode and the current block's sequences section is compressed\n     * with repeat mode where sequences section body size can be 1 byte.\n     */\n    if (op-seqHead < 4) {\n        return 0;\n    }\n\n    return op - ostart;\n}\n\n/** ZSTD_compressSubBlock() :\n *  Compresses a single sub-block.\n *  @return : compressed size of the sub-block\n *            Or 0 if it failed to compress. */\nstatic size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,\n                                    const ZSTD_entropyCTablesMetadata_t* entropyMetadata,\n                                    const seqDef* sequences, size_t nbSeq,\n                                    const BYTE* literals, size_t litSize,\n                                    const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,\n                                    const ZSTD_CCtx_params* cctxParams,\n                                    void* dst, size_t dstCapacity,\n                                    const int bmi2, int writeEntropy, U32 lastBlock)\n{\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstCapacity;\n    BYTE* op = ostart + ZSTD_blockHeaderSize;\n    DEBUGLOG(5, \"ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeEntropy=%d, lastBlock=%d)\",\n                litSize, nbSeq, writeEntropy, lastBlock);\n    {   size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,\n                                                        &entropyMetadata->hufMetadata, literals, litSize,\n                                                        op, oend-op, bmi2, writeEntropy);\n        FORWARD_IF_ERROR(cLitSize);\n        if (cLitSize == 0) return 0;\n        op += cLitSize;\n    }\n    {   size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,\n                                                  &entropyMetadata->fseMetadata,\n                                                  sequences, nbSeq,\n                                                  llCode, mlCode, ofCode,\n                                                  cctxParams,\n                                                  op, oend-op,\n                                                  bmi2, writeEntropy);\n        FORWARD_IF_ERROR(cSeqSize);\n        if (cSeqSize == 0) return 0;\n        op += cSeqSize;\n    }\n    /* Write block header */\n    {   size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;\n        U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);\n        MEM_writeLE24(ostart, cBlockHeader24);\n    }\n    return op-ostart;\n}\n\nstatic size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,\n                                                const ZSTD_hufCTables_t* huf,\n                                                const ZSTD_hufCTablesMetadata_t* hufMetadata,\n                                                void* workspace, size_t wkspSize,\n                                                int writeEntropy)\n{\n    unsigned* const countWksp = (unsigned*)workspace;\n    unsigned maxSymbolValue = 255;\n    size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */\n\n    if (hufMetadata->hType == set_basic) return litSize;\n    else if (hufMetadata->hType == set_rle) return 1;\n    else if (hufMetadata->hType == set_compressed) {\n        size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);\n        if (ZSTD_isError(largest)) return litSize;\n        {   size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);\n            if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;\n            return cLitSizeEstimate + literalSectionHeaderSize;\n    }   }\n    assert(0); /* impossible */\n    return 0;\n}\n\nstatic size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,\n                        const BYTE* codeTable, unsigned maxCode,\n                        size_t nbSeq, const FSE_CTable* fseCTable,\n                        const U32* additionalBits,\n                        short const* defaultNorm, U32 defaultNormLog,\n                        void* workspace, size_t wkspSize)\n{\n    unsigned* const countWksp = (unsigned*)workspace;\n    const BYTE* ctp = codeTable;\n    const BYTE* const ctStart = ctp;\n    const BYTE* const ctEnd = ctStart + nbSeq;\n    size_t cSymbolTypeSizeEstimateInBits = 0;\n    unsigned max = maxCode;\n\n    HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize);  /* can't fail */\n    if (type == set_basic) {\n        cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);\n    } else if (type == set_rle) {\n        cSymbolTypeSizeEstimateInBits = 0;\n    } else if (type == set_compressed || type == set_repeat) {\n        cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);\n    }\n    if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;\n    while (ctp < ctEnd) {\n        if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];\n        else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */\n        ctp++;\n    }\n    return cSymbolTypeSizeEstimateInBits / 8;\n}\n\nstatic size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,\n                                                  const BYTE* llCodeTable,\n                                                  const BYTE* mlCodeTable,\n                                                  size_t nbSeq,\n                                                  const ZSTD_fseCTables_t* fseTables,\n                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata,\n                                                  void* workspace, size_t wkspSize,\n                                                  int writeEntropy)\n{\n    size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */\n    size_t cSeqSizeEstimate = 0;\n    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,\n                                         nbSeq, fseTables->offcodeCTable, NULL,\n                                         OF_defaultNorm, OF_defaultNormLog,\n                                         workspace, wkspSize);\n    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,\n                                         nbSeq, fseTables->litlengthCTable, LL_bits,\n                                         LL_defaultNorm, LL_defaultNormLog,\n                                         workspace, wkspSize);\n    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,\n                                         nbSeq, fseTables->matchlengthCTable, ML_bits,\n                                         ML_defaultNorm, ML_defaultNormLog,\n                                         workspace, wkspSize);\n    if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;\n    return cSeqSizeEstimate + sequencesSectionHeaderSize;\n}\n\nstatic size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,\n                                        const BYTE* ofCodeTable,\n                                        const BYTE* llCodeTable,\n                                        const BYTE* mlCodeTable,\n                                        size_t nbSeq,\n                                        const ZSTD_entropyCTables_t* entropy,\n                                        const ZSTD_entropyCTablesMetadata_t* entropyMetadata,\n                                        void* workspace, size_t wkspSize,\n                                        int writeEntropy) {\n    size_t cSizeEstimate = 0;\n    cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,\n                                                         &entropy->huf, &entropyMetadata->hufMetadata,\n                                                         workspace, wkspSize, writeEntropy);\n    cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,\n                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,\n                                                         workspace, wkspSize, writeEntropy);\n    return cSizeEstimate + ZSTD_blockHeaderSize;\n}\n\n/** ZSTD_compressSubBlock_multi() :\n *  Breaks super-block into multiple sub-blocks and compresses them.\n *  Entropy will be written to the first block.\n *  The following blocks will use repeat mode to compress.\n *  All sub-blocks are compressed blocks (no raw or rle blocks).\n *  @return : compressed size of the super block (which is multiple ZSTD blocks)\n *            Or 0 if it failed to compress. */\nstatic size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,\n                            const ZSTD_entropyCTables_t* entropy,\n                            const ZSTD_entropyCTablesMetadata_t* entropyMetadata,\n                            const ZSTD_CCtx_params* cctxParams,\n                                  void* dst, size_t dstCapacity,\n                            const int bmi2, U32 lastBlock,\n                            void* workspace, size_t wkspSize)\n{\n    const seqDef* const sstart = seqStorePtr->sequencesStart;\n    const seqDef* const send = seqStorePtr->sequences;\n    const seqDef* sp = sstart;\n    const BYTE* const lstart = seqStorePtr->litStart;\n    const BYTE* const lend = seqStorePtr->lit;\n    const BYTE* lp = lstart;\n    BYTE* const ostart = (BYTE*)dst;\n    BYTE* const oend = ostart + dstCapacity;\n    BYTE* op = ostart;\n    const BYTE* llCodePtr = seqStorePtr->llCode;\n    const BYTE* mlCodePtr = seqStorePtr->mlCode;\n    const BYTE* ofCodePtr = seqStorePtr->ofCode;\n    size_t targetCBlockSize = cctxParams->targetCBlockSize;\n    size_t litSize, seqCount;\n    int writeEntropy = 1;\n    size_t remaining = ZSTD_seqDecompressedSize(sstart, send-sstart, lend-lstart);\n    size_t cBlockSizeEstimate = 0;\n\n    DEBUGLOG(5, \"ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)\",\n                (unsigned)(lend-lp), (unsigned)(send-sstart));\n\n    litSize = 0;\n    seqCount = 0;\n    while (sp + seqCount < send) {\n        const seqDef* const sequence = sp + seqCount;\n        const U32 lastSequence = sequence+1 == send;\n        litSize = (sequence == send) ? (size_t)(lend-lp) : litSize + sequence->litLength;\n        seqCount++;\n        /* I think there is an optimization opportunity here.\n         * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful\n         * since it recalculates estimate from scratch.\n         * For example, it would recount literal distribution and symbol codes everytime.\n         */\n        cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,\n                                                       entropy, entropyMetadata,\n                                                       workspace, wkspSize, writeEntropy);\n        if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {\n            const size_t decompressedSize = ZSTD_seqDecompressedSize(sp, seqCount, litSize);\n            const size_t cSize = ZSTD_compressSubBlock(entropy, entropyMetadata,\n                                                       sp, seqCount,\n                                                       lp, litSize,\n                                                       llCodePtr, mlCodePtr, ofCodePtr,\n                                                       cctxParams,\n                                                       op, oend-op,\n                                                       bmi2, writeEntropy, lastBlock && lastSequence);\n            FORWARD_IF_ERROR(cSize);\n            if (cSize > 0 && cSize < decompressedSize) {\n                assert(remaining >= decompressedSize);\n                remaining -= decompressedSize;\n                sp += seqCount;\n                lp += litSize;\n                op += cSize;\n                llCodePtr += seqCount;\n                mlCodePtr += seqCount;\n                ofCodePtr += seqCount;\n                litSize = 0;\n                seqCount = 0;\n                writeEntropy = 0; // Entropy only needs to be written once\n            }\n        }\n    }\n    if (remaining) {\n        DEBUGLOG(5, \"ZSTD_compressSubBlock_multi failed to compress\");\n        return 0;\n    }\n    DEBUGLOG(5, \"ZSTD_compressSubBlock_multi compressed\");\n    return op-ostart;\n}\n\nsize_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,\n                               void* dst, size_t dstCapacity,\n                               unsigned lastBlock) {\n    ZSTD_entropyCTablesMetadata_t entropyMetadata;\n\n    FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,\n          &zc->blockState.prevCBlock->entropy,\n          &zc->blockState.nextCBlock->entropy,\n          &zc->appliedParams,\n          &entropyMetadata,\n          zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */));\n\n    return ZSTD_compressSubBlock_multi(&zc->seqStore,\n            &zc->blockState.nextCBlock->entropy,\n            &entropyMetadata,\n            &zc->appliedParams,\n            dst, dstCapacity,\n            zc->bmi2, lastBlock,\n            zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_compress_superblock.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_COMPRESS_ADVANCED_H\n#define ZSTD_COMPRESS_ADVANCED_H\n\n/*-*************************************\n*  Dependencies\n***************************************/\n\n#include \"zstd.h\" /* ZSTD_CCtx */\n\n/*-*************************************\n*  Target Compressed Block Size\n***************************************/\n\n/* ZSTD_compressSuperBlock() :\n * Used to compress a super block when targetCBlockSize is being used.\n * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */\nsize_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,\n                               void* dst, size_t dstCapacity,\n                               unsigned lastBlock);\n\n#endif /* ZSTD_COMPRESS_ADVANCED_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_cwksp.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_CWKSP_H\n#define ZSTD_CWKSP_H\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include \"zstd_internal.h\"\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*-*************************************\n*  Constants\n***************************************/\n\n/* define \"workspace is too large\" as this number of times larger than needed */\n#define ZSTD_WORKSPACETOOLARGE_FACTOR 3\n\n/* when workspace is continuously too large\n * during at least this number of times,\n * context's memory usage is considered wasteful,\n * because it's sized to handle a worst case scenario which rarely happens.\n * In which case, resize it down to free some memory */\n#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128\n\n/* Since the workspace is effectively its own little malloc implementation /\n * arena, when we run under ASAN, we should similarly insert redzones between\n * each internal element of the workspace, so ASAN will catch overruns that\n * reach outside an object but that stay inside the workspace.\n *\n * This defines the size of that redzone.\n */\n#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE\n#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128\n#endif\n\n/*-*************************************\n*  Structures\n***************************************/\ntypedef enum {\n    ZSTD_cwksp_alloc_objects,\n    ZSTD_cwksp_alloc_buffers,\n    ZSTD_cwksp_alloc_aligned\n} ZSTD_cwksp_alloc_phase_e;\n\n/**\n * Zstd fits all its internal datastructures into a single continuous buffer,\n * so that it only needs to perform a single OS allocation (or so that a buffer\n * can be provided to it and it can perform no allocations at all). This buffer\n * is called the workspace.\n *\n * Several optimizations complicate that process of allocating memory ranges\n * from this workspace for each internal datastructure:\n *\n * - These different internal datastructures have different setup requirements:\n *\n *   - The static objects need to be cleared once and can then be trivially\n *     reused for each compression.\n *\n *   - Various buffers don't need to be initialized at all--they are always\n *     written into before they're read.\n *\n *   - The matchstate tables have a unique requirement that they don't need\n *     their memory to be totally cleared, but they do need the memory to have\n *     some bound, i.e., a guarantee that all values in the memory they've been\n *     allocated is less than some maximum value (which is the starting value\n *     for the indices that they will then use for compression). When this\n *     guarantee is provided to them, they can use the memory without any setup\n *     work. When it can't, they have to clear the area.\n *\n * - These buffers also have different alignment requirements.\n *\n * - We would like to reuse the objects in the workspace for multiple\n *   compressions without having to perform any expensive reallocation or\n *   reinitialization work.\n *\n * - We would like to be able to efficiently reuse the workspace across\n *   multiple compressions **even when the compression parameters change** and\n *   we need to resize some of the objects (where possible).\n *\n * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp\n * abstraction was created. It works as follows:\n *\n * Workspace Layout:\n *\n * [                        ... workspace ...                         ]\n * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]\n *\n * The various objects that live in the workspace are divided into the\n * following categories, and are allocated separately:\n *\n * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,\n *   so that literally everything fits in a single buffer. Note: if present,\n *   this must be the first object in the workspace, since ZSTD_free{CCtx,\n *   CDict}() rely on a pointer comparison to see whether one or two frees are\n *   required.\n *\n * - Fixed size objects: these are fixed-size, fixed-count objects that are\n *   nonetheless \"dynamically\" allocated in the workspace so that we can\n *   control how they're initialized separately from the broader ZSTD_CCtx.\n *   Examples:\n *   - Entropy Workspace\n *   - 2 x ZSTD_compressedBlockState_t\n *   - CDict dictionary contents\n *\n * - Tables: these are any of several different datastructures (hash tables,\n *   chain tables, binary trees) that all respect a common format: they are\n *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).\n *   Their sizes depend on the cparams.\n *\n * - Aligned: these buffers are used for various purposes that require 4 byte\n *   alignment, but don't require any initialization before they're used.\n *\n * - Buffers: these buffers are used for various purposes that don't require\n *   any alignment or initialization before they're used. This means they can\n *   be moved around at no cost for a new compression.\n *\n * Allocating Memory:\n *\n * The various types of objects must be allocated in order, so they can be\n * correctly packed into the workspace buffer. That order is:\n *\n * 1. Objects\n * 2. Buffers\n * 3. Aligned\n * 4. Tables\n *\n * Attempts to reserve objects of different types out of order will fail.\n */\ntypedef struct {\n    void* workspace;\n    void* workspaceEnd;\n\n    void* objectEnd;\n    void* tableEnd;\n    void* tableValidEnd;\n    void* allocStart;\n\n    int allocFailed;\n    int workspaceOversizedDuration;\n    ZSTD_cwksp_alloc_phase_e phase;\n} ZSTD_cwksp;\n\n/*-*************************************\n*  Functions\n***************************************/\n\nMEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);\n\nMEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {\n    (void)ws;\n    assert(ws->workspace <= ws->objectEnd);\n    assert(ws->objectEnd <= ws->tableEnd);\n    assert(ws->objectEnd <= ws->tableValidEnd);\n    assert(ws->tableEnd <= ws->allocStart);\n    assert(ws->tableValidEnd <= ws->allocStart);\n    assert(ws->allocStart <= ws->workspaceEnd);\n}\n\n/**\n * Align must be a power of 2.\n */\nMEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {\n    size_t const mask = align - 1;\n    assert((align & mask) == 0);\n    return (size + mask) & ~mask;\n}\n\n/**\n * Use this to determine how much space in the workspace we will consume to\n * allocate this object. (Normally it should be exactly the size of the object,\n * but under special conditions, like ASAN, where we pad each object, it might\n * be larger.)\n *\n * Since tables aren't currently redzoned, you don't need to call through this\n * to figure out how much space you need for the matchState tables. Everything\n * else is though.\n */\nMEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;\n#else\n    return size;\n#endif\n}\n\nMEM_STATIC void ZSTD_cwksp_internal_advance_phase(\n        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {\n    assert(phase >= ws->phase);\n    if (phase > ws->phase) {\n        if (ws->phase < ZSTD_cwksp_alloc_buffers &&\n                phase >= ZSTD_cwksp_alloc_buffers) {\n            ws->tableValidEnd = ws->objectEnd;\n        }\n        if (ws->phase < ZSTD_cwksp_alloc_aligned &&\n                phase >= ZSTD_cwksp_alloc_aligned) {\n            /* If unaligned allocations down from a too-large top have left us\n             * unaligned, we need to realign our alloc ptr. Technically, this\n             * can consume space that is unaccounted for in the neededSpace\n             * calculation. However, I believe this can only happen when the\n             * workspace is too large, and specifically when it is too large\n             * by a larger margin than the space that will be consumed. */\n            /* TODO: cleaner, compiler warning friendly way to do this??? */\n            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));\n            if (ws->allocStart < ws->tableValidEnd) {\n                ws->tableValidEnd = ws->allocStart;\n            }\n        }\n        ws->phase = phase;\n    }\n}\n\n/**\n * Returns whether this object/buffer/etc was allocated in this workspace.\n */\nMEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {\n    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);\n}\n\n/**\n * Internal function. Do not use directly.\n */\nMEM_STATIC void* ZSTD_cwksp_reserve_internal(\n        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {\n    void* alloc;\n    void* bottom = ws->tableEnd;\n    ZSTD_cwksp_internal_advance_phase(ws, phase);\n    alloc = (BYTE *)ws->allocStart - bytes;\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    /* over-reserve space */\n    alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;\n#endif\n\n    DEBUGLOG(5, \"cwksp: reserving %p %zd bytes, %zd bytes remaining\",\n        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);\n    ZSTD_cwksp_assert_internal_consistency(ws);\n    assert(alloc >= bottom);\n    if (alloc < bottom) {\n        DEBUGLOG(4, \"cwksp: alloc failed!\");\n        ws->allocFailed = 1;\n        return NULL;\n    }\n    if (alloc < ws->tableValidEnd) {\n        ws->tableValidEnd = alloc;\n    }\n    ws->allocStart = alloc;\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on\n     * either size. */\n    alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;\n    __asan_unpoison_memory_region(alloc, bytes);\n#endif\n\n    return alloc;\n}\n\n/**\n * Reserves and returns unaligned memory.\n */\nMEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {\n    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);\n}\n\n/**\n * Reserves and returns memory sized on and aligned on sizeof(unsigned).\n */\nMEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {\n    assert((bytes & (sizeof(U32)-1)) == 0);\n    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);\n}\n\n/**\n * Aligned on sizeof(unsigned). These buffers have the special property that\n * their values remain constrained, allowing us to re-use them without\n * memset()-ing them.\n */\nMEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {\n    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;\n    void* alloc = ws->tableEnd;\n    void* end = (BYTE *)alloc + bytes;\n    void* top = ws->allocStart;\n\n    DEBUGLOG(5, \"cwksp: reserving %p table %zd bytes, %zd bytes remaining\",\n        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);\n    assert((bytes & (sizeof(U32)-1)) == 0);\n    ZSTD_cwksp_internal_advance_phase(ws, phase);\n    ZSTD_cwksp_assert_internal_consistency(ws);\n    assert(end <= top);\n    if (end > top) {\n        DEBUGLOG(4, \"cwksp: table alloc failed!\");\n        ws->allocFailed = 1;\n        return NULL;\n    }\n    ws->tableEnd = end;\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    __asan_unpoison_memory_region(alloc, bytes);\n#endif\n\n    return alloc;\n}\n\n/**\n * Aligned on sizeof(void*).\n */\nMEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {\n    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));\n    void* alloc = ws->objectEnd;\n    void* end = (BYTE*)alloc + roundedBytes;\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    /* over-reserve space */\n    end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;\n#endif\n\n    DEBUGLOG(5,\n        \"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining\",\n        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);\n    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);\n    assert((bytes & (sizeof(void*)-1)) == 0);\n    ZSTD_cwksp_assert_internal_consistency(ws);\n    /* we must be in the first phase, no advance is possible */\n    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {\n        DEBUGLOG(4, \"cwksp: object alloc failed!\");\n        ws->allocFailed = 1;\n        return NULL;\n    }\n    ws->objectEnd = end;\n    ws->tableEnd = end;\n    ws->tableValidEnd = end;\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on\n     * either size. */\n    alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;\n    __asan_unpoison_memory_region(alloc, bytes);\n#endif\n\n    return alloc;\n}\n\nMEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {\n    DEBUGLOG(4, \"cwksp: ZSTD_cwksp_mark_tables_dirty\");\n\n#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)\n    /* To validate that the table re-use logic is sound, and that we don't\n     * access table space that we haven't cleaned, we re-\"poison\" the table\n     * space every time we mark it dirty. */\n    {\n        size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;\n        assert(__msan_test_shadow(ws->objectEnd, size) == -1);\n        __msan_poison(ws->objectEnd, size);\n    }\n#endif\n\n    assert(ws->tableValidEnd >= ws->objectEnd);\n    assert(ws->tableValidEnd <= ws->allocStart);\n    ws->tableValidEnd = ws->objectEnd;\n    ZSTD_cwksp_assert_internal_consistency(ws);\n}\n\nMEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {\n    DEBUGLOG(4, \"cwksp: ZSTD_cwksp_mark_tables_clean\");\n    assert(ws->tableValidEnd >= ws->objectEnd);\n    assert(ws->tableValidEnd <= ws->allocStart);\n    if (ws->tableValidEnd < ws->tableEnd) {\n        ws->tableValidEnd = ws->tableEnd;\n    }\n    ZSTD_cwksp_assert_internal_consistency(ws);\n}\n\n/**\n * Zero the part of the allocated tables not already marked clean.\n */\nMEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {\n    DEBUGLOG(4, \"cwksp: ZSTD_cwksp_clean_tables\");\n    assert(ws->tableValidEnd >= ws->objectEnd);\n    assert(ws->tableValidEnd <= ws->allocStart);\n    if (ws->tableValidEnd < ws->tableEnd) {\n        memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);\n    }\n    ZSTD_cwksp_mark_tables_clean(ws);\n}\n\n/**\n * Invalidates table allocations.\n * All other allocations remain valid.\n */\nMEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {\n    DEBUGLOG(4, \"cwksp: clearing tables!\");\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    {\n        size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;\n        __asan_poison_memory_region(ws->objectEnd, size);\n    }\n#endif\n\n    ws->tableEnd = ws->objectEnd;\n    ZSTD_cwksp_assert_internal_consistency(ws);\n}\n\n/**\n * Invalidates all buffer, aligned, and table allocations.\n * Object allocations remain valid.\n */\nMEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {\n    DEBUGLOG(4, \"cwksp: clearing!\");\n\n#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)\n    /* To validate that the context re-use logic is sound, and that we don't\n     * access stuff that this compression hasn't initialized, we re-\"poison\"\n     * the workspace (or at least the non-static, non-table parts of it)\n     * every time we start a new compression. */\n    {\n        size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;\n        __msan_poison(ws->tableValidEnd, size);\n    }\n#endif\n\n#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)\n    {\n        size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;\n        __asan_poison_memory_region(ws->objectEnd, size);\n    }\n#endif\n\n    ws->tableEnd = ws->objectEnd;\n    ws->allocStart = ws->workspaceEnd;\n    ws->allocFailed = 0;\n    if (ws->phase > ZSTD_cwksp_alloc_buffers) {\n        ws->phase = ZSTD_cwksp_alloc_buffers;\n    }\n    ZSTD_cwksp_assert_internal_consistency(ws);\n}\n\n/**\n * The provided workspace takes ownership of the buffer [start, start+size).\n * Any existing values in the workspace are ignored (the previously managed\n * buffer, if present, must be separately freed).\n */\nMEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {\n    DEBUGLOG(4, \"cwksp: init'ing workspace with %zd bytes\", size);\n    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */\n    ws->workspace = start;\n    ws->workspaceEnd = (BYTE*)start + size;\n    ws->objectEnd = ws->workspace;\n    ws->tableValidEnd = ws->objectEnd;\n    ws->phase = ZSTD_cwksp_alloc_objects;\n    ZSTD_cwksp_clear(ws);\n    ws->workspaceOversizedDuration = 0;\n    ZSTD_cwksp_assert_internal_consistency(ws);\n}\n\nMEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {\n    void* workspace = ZSTD_malloc(size, customMem);\n    DEBUGLOG(4, \"cwksp: creating new workspace with %zd bytes\", size);\n    RETURN_ERROR_IF(workspace == NULL, memory_allocation);\n    ZSTD_cwksp_init(ws, workspace, size);\n    return 0;\n}\n\nMEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {\n    void *ptr = ws->workspace;\n    DEBUGLOG(4, \"cwksp: freeing workspace\");\n    memset(ws, 0, sizeof(ZSTD_cwksp));\n    ZSTD_free(ptr, customMem);\n}\n\n/**\n * Moves the management of a workspace from one cwksp to another. The src cwksp\n * is left in an invalid state (src must be re-init()'ed before its used again).\n */\nMEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {\n    *dst = *src;\n    memset(src, 0, sizeof(ZSTD_cwksp));\n}\n\nMEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {\n    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);\n}\n\nMEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {\n    return ws->allocFailed;\n}\n\n/*-*************************************\n*  Functions Checking Free Space\n***************************************/\n\nMEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {\n    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);\n}\n\nMEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {\n    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;\n}\n\nMEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {\n    return ZSTD_cwksp_check_available(\n        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);\n}\n\nMEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {\n    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)\n        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;\n}\n\nMEM_STATIC void ZSTD_cwksp_bump_oversized_duration(\n        ZSTD_cwksp* ws, size_t additionalNeededSpace) {\n    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {\n        ws->workspaceOversizedDuration++;\n    } else {\n        ws->workspaceOversizedDuration = 0;\n    }\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_CWKSP_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_double_fast.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#include \"zstd_compress_internal.h\"\n#include \"zstd_double_fast.h\"\n\n\nvoid ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,\n                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashLarge = ms->hashTable;\n    U32  const hBitsL = cParams->hashLog;\n    U32  const mls = cParams->minMatch;\n    U32* const hashSmall = ms->chainTable;\n    U32  const hBitsS = cParams->chainLog;\n    const BYTE* const base = ms->window.base;\n    const BYTE* ip = base + ms->nextToUpdate;\n    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;\n    const U32 fastHashFillStep = 3;\n\n    /* Always insert every fastHashFillStep position into the hash tables.\n     * Insert the other positions into the large hash table if their entry\n     * is empty.\n     */\n    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {\n        U32 const current = (U32)(ip - base);\n        U32 i;\n        for (i = 0; i < fastHashFillStep; ++i) {\n            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);\n            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);\n            if (i == 0)\n                hashSmall[smHash] = current + i;\n            if (i == 0 || hashLarge[lgHash] == 0)\n                hashLarge[lgHash] = current + i;\n            /* Only load extra positions for ZSTD_dtlm_full */\n            if (dtlm == ZSTD_dtlm_fast)\n                break;\n    }   }\n}\n\n\nFORCE_INLINE_TEMPLATE\nsize_t ZSTD_compressBlock_doubleFast_generic(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize,\n        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)\n{\n    ZSTD_compressionParameters const* cParams = &ms->cParams;\n    U32* const hashLong = ms->hashTable;\n    const U32 hBitsL = cParams->hashLog;\n    U32* const hashSmall = ms->chainTable;\n    const U32 hBitsS = cParams->chainLog;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);\n    const U32 lowestValid = ms->window.dictLimit;\n    const U32 maxDistance = 1U << cParams->windowLog;\n    /* presumes that, if there is a dictionary, it must be using Attach mode */\n    const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;\n    const BYTE* const prefixLowest = base + prefixLowestIndex;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - HASH_READ_SIZE;\n    U32 offset_1=rep[0], offset_2=rep[1];\n    U32 offsetSaved = 0;\n\n    const ZSTD_matchState_t* const dms = ms->dictMatchState;\n    const ZSTD_compressionParameters* const dictCParams =\n                                     dictMode == ZSTD_dictMatchState ?\n                                     &dms->cParams : NULL;\n    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?\n                                     dms->hashTable : NULL;\n    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?\n                                     dms->chainTable : NULL;\n    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.dictLimit : 0;\n    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.base : NULL;\n    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?\n                                     dictBase + dictStartIndex : NULL;\n    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.nextSrc : NULL;\n    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?\n                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :\n                                     0;\n    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?\n                                     dictCParams->hashLog : hBitsL;\n    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?\n                                     dictCParams->chainLog : hBitsS;\n    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));\n\n    DEBUGLOG(5, \"ZSTD_compressBlock_doubleFast_generic\");\n\n    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);\n\n    /* if a dictionary is attached, it must be within window range */\n    if (dictMode == ZSTD_dictMatchState) {\n        assert(lowestValid + maxDistance >= endIndex);\n    }\n\n    /* init */\n    ip += (dictAndPrefixLength == 0);\n    if (dictMode == ZSTD_noDict) {\n        U32 const maxRep = (U32)(ip - prefixLowest);\n        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;\n        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;\n    }\n    if (dictMode == ZSTD_dictMatchState) {\n        /* dictMatchState repCode checks don't currently handle repCode == 0\n         * disabling. */\n        assert(offset_1 <= dictAndPrefixLength);\n        assert(offset_2 <= dictAndPrefixLength);\n    }\n\n    /* Main Search Loop */\n    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */\n        size_t mLength;\n        U32 offset;\n        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);\n        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);\n        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);\n        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);\n        U32 const current = (U32)(ip-base);\n        U32 const matchIndexL = hashLong[h2];\n        U32 matchIndexS = hashSmall[h];\n        const BYTE* matchLong = base + matchIndexL;\n        const BYTE* match = base + matchIndexS;\n        const U32 repIndex = current + 1 - offset_1;\n        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState\n                            && repIndex < prefixLowestIndex) ?\n                               dictBase + (repIndex - dictIndexDelta) :\n                               base + repIndex;\n        hashLong[h2] = hashSmall[h] = current;   /* update hash tables */\n\n        /* check dictMatchState repcode */\n        if (dictMode == ZSTD_dictMatchState\n            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)\n            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {\n            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;\n            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;\n            ip++;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);\n            goto _match_stored;\n        }\n\n        /* check noDict repcode */\n        if ( dictMode == ZSTD_noDict\n          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {\n            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;\n            ip++;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);\n            goto _match_stored;\n        }\n\n        if (matchIndexL > prefixLowestIndex) {\n            /* check prefix long match */\n            if (MEM_read64(matchLong) == MEM_read64(ip)) {\n                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;\n                offset = (U32)(ip-matchLong);\n                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */\n                goto _match_found;\n            }\n        } else if (dictMode == ZSTD_dictMatchState) {\n            /* check dictMatchState long match */\n            U32 const dictMatchIndexL = dictHashLong[dictHL];\n            const BYTE* dictMatchL = dictBase + dictMatchIndexL;\n            assert(dictMatchL < dictEnd);\n\n            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {\n                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;\n                offset = (U32)(current - dictMatchIndexL - dictIndexDelta);\n                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */\n                goto _match_found;\n        }   }\n\n        if (matchIndexS > prefixLowestIndex) {\n            /* check prefix short match */\n            if (MEM_read32(match) == MEM_read32(ip)) {\n                goto _search_next_long;\n            }\n        } else if (dictMode == ZSTD_dictMatchState) {\n            /* check dictMatchState short match */\n            U32 const dictMatchIndexS = dictHashSmall[dictHS];\n            match = dictBase + dictMatchIndexS;\n            matchIndexS = dictMatchIndexS + dictIndexDelta;\n\n            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {\n                goto _search_next_long;\n        }   }\n\n        ip += ((ip-anchor) >> kSearchStrength) + 1;\n        continue;\n\n_search_next_long:\n\n        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);\n            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);\n            U32 const matchIndexL3 = hashLong[hl3];\n            const BYTE* matchL3 = base + matchIndexL3;\n            hashLong[hl3] = current + 1;\n\n            /* check prefix long +1 match */\n            if (matchIndexL3 > prefixLowestIndex) {\n                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {\n                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;\n                    ip++;\n                    offset = (U32)(ip-matchL3);\n                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */\n                    goto _match_found;\n                }\n            } else if (dictMode == ZSTD_dictMatchState) {\n                /* check dict long +1 match */\n                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];\n                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;\n                assert(dictMatchL3 < dictEnd);\n                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {\n                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;\n                    ip++;\n                    offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);\n                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */\n                    goto _match_found;\n        }   }   }\n\n        /* if no long +1 match, explore the short match we found */\n        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {\n            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;\n            offset = (U32)(current - matchIndexS);\n            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */\n        } else {\n            mLength = ZSTD_count(ip+4, match+4, iend) + 4;\n            offset = (U32)(ip - match);\n            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */\n        }\n\n        /* fall-through */\n\n_match_found:\n        offset_2 = offset_1;\n        offset_1 = offset;\n\n        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n\n_match_stored:\n        /* match found */\n        ip += mLength;\n        anchor = ip;\n\n        if (ip <= ilimit) {\n            /* Complementary insertion */\n            /* done after iLimit test, as candidates could be > iend-8 */\n            {   U32 const indexToInsert = current+2;\n                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;\n                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);\n                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;\n                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);\n            }\n\n            /* check immediate repcode */\n            if (dictMode == ZSTD_dictMatchState) {\n                while (ip <= ilimit) {\n                    U32 const current2 = (U32)(ip-base);\n                    U32 const repIndex2 = current2 - offset_2;\n                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState\n                        && repIndex2 < prefixLowestIndex ?\n                            dictBase + repIndex2 - dictIndexDelta :\n                            base + repIndex2;\n                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)\n                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {\n                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;\n                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;\n                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */\n                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);\n                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;\n                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;\n                        ip += repLength2;\n                        anchor = ip;\n                        continue;\n                    }\n                    break;\n            }   }\n\n            if (dictMode == ZSTD_noDict) {\n                while ( (ip <= ilimit)\n                     && ( (offset_2>0)\n                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {\n                    /* store sequence */\n                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;\n                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */\n                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);\n                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);\n                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);\n                    ip += rLength;\n                    anchor = ip;\n                    continue;   /* faster when present ... (?) */\n        }   }   }\n    }   /* while (ip < ilimit) */\n\n    /* save reps for next block */\n    rep[0] = offset_1 ? offset_1 : offsetSaved;\n    rep[1] = offset_2 ? offset_2 : offsetSaved;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_doubleFast(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    const U32 mls = ms->cParams.minMatch;\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);\n    case 5 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);\n    case 6 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);\n    case 7 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);\n    }\n}\n\n\nsize_t ZSTD_compressBlock_doubleFast_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    const U32 mls = ms->cParams.minMatch;\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);\n    case 5 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);\n    case 6 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);\n    case 7 :\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);\n    }\n}\n\n\nstatic size_t ZSTD_compressBlock_doubleFast_extDict_generic(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize,\n        U32 const mls /* template */)\n{\n    ZSTD_compressionParameters const* cParams = &ms->cParams;\n    U32* const hashLong = ms->hashTable;\n    U32  const hBitsL = cParams->hashLog;\n    U32* const hashSmall = ms->chainTable;\n    U32  const hBitsS = cParams->chainLog;\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - 8;\n    const BYTE* const base = ms->window.base;\n    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);\n    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);\n    const U32   dictStartIndex = lowLimit;\n    const U32   dictLimit = ms->window.dictLimit;\n    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;\n    const BYTE* const prefixStart = base + prefixStartIndex;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const BYTE* const dictStart = dictBase + dictStartIndex;\n    const BYTE* const dictEnd = dictBase + prefixStartIndex;\n    U32 offset_1=rep[0], offset_2=rep[1];\n\n    DEBUGLOG(5, \"ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)\", srcSize);\n\n    /* if extDict is invalidated due to maxDistance, switch to \"regular\" variant */\n    if (prefixStartIndex == dictStartIndex)\n        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);\n\n    /* Search Loop */\n    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */\n        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);\n        const U32 matchIndex = hashSmall[hSmall];\n        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;\n        const BYTE* match = matchBase + matchIndex;\n\n        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);\n        const U32 matchLongIndex = hashLong[hLong];\n        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;\n        const BYTE* matchLong = matchLongBase + matchLongIndex;\n\n        const U32 current = (U32)(ip-base);\n        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */\n        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;\n        const BYTE* const repMatch = repBase + repIndex;\n        size_t mLength;\n        hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */\n\n        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */\n            & (repIndex > dictStartIndex))\n          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {\n            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;\n            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;\n            ip++;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);\n        } else {\n            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {\n                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;\n                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;\n                U32 offset;\n                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;\n                offset = current - matchLongIndex;\n                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */\n                offset_2 = offset_1;\n                offset_1 = offset;\n                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n\n            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {\n                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);\n                U32 const matchIndex3 = hashLong[h3];\n                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;\n                const BYTE* match3 = match3Base + matchIndex3;\n                U32 offset;\n                hashLong[h3] = current + 1;\n                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {\n                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;\n                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;\n                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;\n                    ip++;\n                    offset = current+1 - matchIndex3;\n                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */\n                } else {\n                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;\n                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;\n                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;\n                    offset = current - matchIndex;\n                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */\n                }\n                offset_2 = offset_1;\n                offset_1 = offset;\n                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n\n            } else {\n                ip += ((ip-anchor) >> kSearchStrength) + 1;\n                continue;\n        }   }\n\n        /* move to next sequence start */\n        ip += mLength;\n        anchor = ip;\n\n        if (ip <= ilimit) {\n            /* Complementary insertion */\n            /* done after iLimit test, as candidates could be > iend-8 */\n            {   U32 const indexToInsert = current+2;\n                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;\n                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);\n                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;\n                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);\n            }\n\n            /* check immediate repcode */\n            while (ip <= ilimit) {\n                U32 const current2 = (U32)(ip-base);\n                U32 const repIndex2 = current2 - offset_2;\n                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;\n                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */\n                    & (repIndex2 > dictStartIndex))\n                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {\n                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;\n                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;\n                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */\n                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);\n                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;\n                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;\n                    ip += repLength2;\n                    anchor = ip;\n                    continue;\n                }\n                break;\n    }   }   }\n\n    /* save reps for next block */\n    rep[0] = offset_1;\n    rep[1] = offset_2;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_doubleFast_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    U32 const mls = ms->cParams.minMatch;\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);\n    case 5 :\n        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);\n    case 6 :\n        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);\n    case 7 :\n        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);\n    }\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_double_fast.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_DOUBLE_FAST_H\n#define ZSTD_DOUBLE_FAST_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#include \"mem.h\"      /* U32 */\n#include \"zstd_compress_internal.h\"     /* ZSTD_CCtx, size_t */\n\nvoid ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,\n                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);\nsize_t ZSTD_compressBlock_doubleFast(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_doubleFast_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_doubleFast_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_DOUBLE_FAST_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_fast.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#include \"zstd_compress_internal.h\"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */\n#include \"zstd_fast.h\"\n\n\nvoid ZSTD_fillHashTable(ZSTD_matchState_t* ms,\n                        const void* const end,\n                        ZSTD_dictTableLoadMethod_e dtlm)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashTable = ms->hashTable;\n    U32  const hBits = cParams->hashLog;\n    U32  const mls = cParams->minMatch;\n    const BYTE* const base = ms->window.base;\n    const BYTE* ip = base + ms->nextToUpdate;\n    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;\n    const U32 fastHashFillStep = 3;\n\n    /* Always insert every fastHashFillStep position into the hash table.\n     * Insert the other positions if their hash entry is empty.\n     */\n    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {\n        U32 const current = (U32)(ip - base);\n        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);\n        hashTable[hash0] = current;\n        if (dtlm == ZSTD_dtlm_fast) continue;\n        /* Only load extra positions for ZSTD_dtlm_full */\n        {   U32 p;\n            for (p = 1; p < fastHashFillStep; ++p) {\n                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);\n                if (hashTable[hash] == 0) {  /* not yet filled */\n                    hashTable[hash] = current + p;\n    }   }   }   }\n}\n\n\nFORCE_INLINE_TEMPLATE size_t\nZSTD_compressBlock_fast_generic(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize,\n        U32 const mls)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashTable = ms->hashTable;\n    U32 const hlog = cParams->hashLog;\n    /* support stepSize of 0 */\n    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const istart = (const BYTE*)src;\n    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */\n    const BYTE* ip0 = istart;\n    const BYTE* ip1;\n    const BYTE* anchor = istart;\n    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);\n    const U32   maxDistance = 1U << cParams->windowLog;\n    const U32   validStartIndex = ms->window.dictLimit;\n    const U32   prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;\n    const BYTE* const prefixStart = base + prefixStartIndex;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - HASH_READ_SIZE;\n    U32 offset_1=rep[0], offset_2=rep[1];\n    U32 offsetSaved = 0;\n\n    /* init */\n    DEBUGLOG(5, \"ZSTD_compressBlock_fast_generic\");\n    ip0 += (ip0 == prefixStart);\n    ip1 = ip0 + 1;\n    {   U32 const maxRep = (U32)(ip0 - prefixStart);\n        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;\n        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;\n    }\n\n    /* Main Search Loop */\n    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */\n        size_t mLength;\n        BYTE const* ip2 = ip0 + 2;\n        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);\n        U32 const val0 = MEM_read32(ip0);\n        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);\n        U32 const val1 = MEM_read32(ip1);\n        U32 const current0 = (U32)(ip0-base);\n        U32 const current1 = (U32)(ip1-base);\n        U32 const matchIndex0 = hashTable[h0];\n        U32 const matchIndex1 = hashTable[h1];\n        BYTE const* repMatch = ip2-offset_1;\n        const BYTE* match0 = base + matchIndex0;\n        const BYTE* match1 = base + matchIndex1;\n        U32 offcode;\n        hashTable[h0] = current0;   /* update hash table */\n        hashTable[h1] = current1;   /* update hash table */\n\n        assert(ip0 + 1 == ip1);\n\n        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {\n            mLength = ip2[-1] == repMatch[-1] ? 1 : 0;\n            ip0 = ip2 - mLength;\n            match0 = repMatch - mLength;\n            offcode = 0;\n            goto _match;\n        }\n        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {\n            /* found a regular match */\n            goto _offset;\n        }\n        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {\n            /* found a regular match after one literal */\n            ip0 = ip1;\n            match0 = match1;\n            goto _offset;\n        }\n        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;\n            assert(step >= 2);\n            ip0 += step;\n            ip1 += step;\n            continue;\n        }\n_offset: /* Requires: ip0, match0 */\n        /* Compute the offset code */\n        offset_2 = offset_1;\n        offset_1 = (U32)(ip0-match0);\n        offcode = offset_1 + ZSTD_REP_MOVE;\n        mLength = 0;\n        /* Count the backwards match length */\n        while (((ip0>anchor) & (match0>prefixStart))\n             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */\n\n_match: /* Requires: ip0, match0, offcode */\n        /* Count the forward length */\n        mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;\n        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);\n        /* match found */\n        ip0 += mLength;\n        anchor = ip0;\n        ip1 = ip0 + 1;\n\n        if (ip0 <= ilimit) {\n            /* Fill Table */\n            assert(base+current0+2 > istart);  /* check base overflow */\n            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */\n            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);\n\n            while ( ((ip0 <= ilimit) & (offset_2>0))  /* offset_2==0 means offset_2 is invalidated */\n                 && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {\n                /* store sequence */\n                size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;\n                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */\n                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);\n                ip0 += rLength;\n                ip1 = ip0 + 1;\n                ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);\n                anchor = ip0;\n                continue;   /* faster when present (confirmed on gcc-8) ... (?) */\n            }\n        }\n    }\n\n    /* save reps for next block */\n    rep[0] = offset_1 ? offset_1 : offsetSaved;\n    rep[1] = offset_2 ? offset_2 : offsetSaved;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_fast(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    U32 const mls = ms->cParams.minMatch;\n    assert(ms->dictMatchState == NULL);\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);\n    case 5 :\n        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);\n    case 6 :\n        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);\n    case 7 :\n        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);\n    }\n}\n\nFORCE_INLINE_TEMPLATE\nsize_t ZSTD_compressBlock_fast_dictMatchState_generic(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize, U32 const mls)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashTable = ms->hashTable;\n    U32 const hlog = cParams->hashLog;\n    /* support stepSize of 0 */\n    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);\n    const BYTE* const base = ms->window.base;\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const U32   prefixStartIndex = ms->window.dictLimit;\n    const BYTE* const prefixStart = base + prefixStartIndex;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - HASH_READ_SIZE;\n    U32 offset_1=rep[0], offset_2=rep[1];\n    U32 offsetSaved = 0;\n\n    const ZSTD_matchState_t* const dms = ms->dictMatchState;\n    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;\n    const U32* const dictHashTable = dms->hashTable;\n    const U32 dictStartIndex       = dms->window.dictLimit;\n    const BYTE* const dictBase     = dms->window.base;\n    const BYTE* const dictStart    = dictBase + dictStartIndex;\n    const BYTE* const dictEnd      = dms->window.nextSrc;\n    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);\n    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);\n    const U32 dictHLog             = dictCParams->hashLog;\n\n    /* if a dictionary is still attached, it necessarily means that\n     * it is within window size. So we just check it. */\n    const U32 maxDistance = 1U << cParams->windowLog;\n    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);\n    assert(endIndex - prefixStartIndex <= maxDistance);\n    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */\n\n    /* ensure there will be no no underflow\n     * when translating a dict index into a local index */\n    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));\n\n    /* init */\n    DEBUGLOG(5, \"ZSTD_compressBlock_fast_dictMatchState_generic\");\n    ip += (dictAndPrefixLength == 0);\n    /* dictMatchState repCode checks don't currently handle repCode == 0\n     * disabling. */\n    assert(offset_1 <= dictAndPrefixLength);\n    assert(offset_2 <= dictAndPrefixLength);\n\n    /* Main Search Loop */\n    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */\n        size_t mLength;\n        size_t const h = ZSTD_hashPtr(ip, hlog, mls);\n        U32 const current = (U32)(ip-base);\n        U32 const matchIndex = hashTable[h];\n        const BYTE* match = base + matchIndex;\n        const U32 repIndex = current + 1 - offset_1;\n        const BYTE* repMatch = (repIndex < prefixStartIndex) ?\n                               dictBase + (repIndex - dictIndexDelta) :\n                               base + repIndex;\n        hashTable[h] = current;   /* update hash table */\n\n        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */\n          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {\n            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;\n            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;\n            ip++;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);\n        } else if ( (matchIndex <= prefixStartIndex) ) {\n            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);\n            U32 const dictMatchIndex = dictHashTable[dictHash];\n            const BYTE* dictMatch = dictBase + dictMatchIndex;\n            if (dictMatchIndex <= dictStartIndex ||\n                MEM_read32(dictMatch) != MEM_read32(ip)) {\n                assert(stepSize >= 1);\n                ip += ((ip-anchor) >> kSearchStrength) + stepSize;\n                continue;\n            } else {\n                /* found a dict match */\n                U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);\n                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;\n                while (((ip>anchor) & (dictMatch>dictStart))\n                     && (ip[-1] == dictMatch[-1])) {\n                    ip--; dictMatch--; mLength++;\n                } /* catch up */\n                offset_2 = offset_1;\n                offset_1 = offset;\n                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n            }\n        } else if (MEM_read32(match) != MEM_read32(ip)) {\n            /* it's not a match, and we're not going to check the dictionary */\n            assert(stepSize >= 1);\n            ip += ((ip-anchor) >> kSearchStrength) + stepSize;\n            continue;\n        } else {\n            /* found a regular match */\n            U32 const offset = (U32)(ip-match);\n            mLength = ZSTD_count(ip+4, match+4, iend) + 4;\n            while (((ip>anchor) & (match>prefixStart))\n                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */\n            offset_2 = offset_1;\n            offset_1 = offset;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n        }\n\n        /* match found */\n        ip += mLength;\n        anchor = ip;\n\n        if (ip <= ilimit) {\n            /* Fill Table */\n            assert(base+current+2 > istart);  /* check base overflow */\n            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;  /* here because current+2 could be > iend-8 */\n            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);\n\n            /* check immediate repcode */\n            while (ip <= ilimit) {\n                U32 const current2 = (U32)(ip-base);\n                U32 const repIndex2 = current2 - offset_2;\n                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?\n                        dictBase - dictIndexDelta + repIndex2 :\n                        base + repIndex2;\n                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)\n                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {\n                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;\n                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;\n                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */\n                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);\n                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;\n                    ip += repLength2;\n                    anchor = ip;\n                    continue;\n                }\n                break;\n            }\n        }\n    }\n\n    /* save reps for next block */\n    rep[0] = offset_1 ? offset_1 : offsetSaved;\n    rep[1] = offset_2 ? offset_2 : offsetSaved;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\nsize_t ZSTD_compressBlock_fast_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    U32 const mls = ms->cParams.minMatch;\n    assert(ms->dictMatchState != NULL);\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);\n    case 5 :\n        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);\n    case 6 :\n        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);\n    case 7 :\n        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);\n    }\n}\n\n\nstatic size_t ZSTD_compressBlock_fast_extDict_generic(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize, U32 const mls)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashTable = ms->hashTable;\n    U32 const hlog = cParams->hashLog;\n    /* support stepSize of 0 */\n    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);\n    const BYTE* const base = ms->window.base;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);\n    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);\n    const U32   dictStartIndex = lowLimit;\n    const BYTE* const dictStart = dictBase + dictStartIndex;\n    const U32   dictLimit = ms->window.dictLimit;\n    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;\n    const BYTE* const prefixStart = base + prefixStartIndex;\n    const BYTE* const dictEnd = dictBase + prefixStartIndex;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - 8;\n    U32 offset_1=rep[0], offset_2=rep[1];\n\n    DEBUGLOG(5, \"ZSTD_compressBlock_fast_extDict_generic\");\n\n    /* switch to \"regular\" variant if extDict is invalidated due to maxDistance */\n    if (prefixStartIndex == dictStartIndex)\n        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);\n\n    /* Search Loop */\n    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */\n        const size_t h = ZSTD_hashPtr(ip, hlog, mls);\n        const U32    matchIndex = hashTable[h];\n        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;\n        const BYTE*  match = matchBase + matchIndex;\n        const U32    current = (U32)(ip-base);\n        const U32    repIndex = current + 1 - offset_1;\n        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;\n        const BYTE* const repMatch = repBase + repIndex;\n        hashTable[h] = current;   /* update hash table */\n        assert(offset_1 <= current +1);   /* check repIndex */\n\n        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))\n           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {\n            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;\n            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;\n            ip++;\n            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);\n            ip += rLength;\n            anchor = ip;\n        } else {\n            if ( (matchIndex < dictStartIndex) ||\n                 (MEM_read32(match) != MEM_read32(ip)) ) {\n                assert(stepSize >= 1);\n                ip += ((ip-anchor) >> kSearchStrength) + stepSize;\n                continue;\n            }\n            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;\n                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;\n                U32 const offset = current - matchIndex;\n                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;\n                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */\n                offset_2 = offset_1; offset_1 = offset;  /* update offset history */\n                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);\n                ip += mLength;\n                anchor = ip;\n        }   }\n\n        if (ip <= ilimit) {\n            /* Fill Table */\n            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;\n            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);\n            /* check immediate repcode */\n            while (ip <= ilimit) {\n                U32 const current2 = (U32)(ip-base);\n                U32 const repIndex2 = current2 - offset_2;\n                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;\n                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */\n                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {\n                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;\n                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;\n                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */\n                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);\n                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;\n                    ip += repLength2;\n                    anchor = ip;\n                    continue;\n                }\n                break;\n    }   }   }\n\n    /* save reps for next block */\n    rep[0] = offset_1;\n    rep[1] = offset_2;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_fast_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    U32 const mls = ms->cParams.minMatch;\n    switch(mls)\n    {\n    default: /* includes case 3 */\n    case 4 :\n        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);\n    case 5 :\n        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);\n    case 6 :\n        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);\n    case 7 :\n        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);\n    }\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_fast.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_FAST_H\n#define ZSTD_FAST_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#include \"mem.h\"      /* U32 */\n#include \"zstd_compress_internal.h\"\n\nvoid ZSTD_fillHashTable(ZSTD_matchState_t* ms,\n                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);\nsize_t ZSTD_compressBlock_fast(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_fast_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_fast_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_FAST_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_lazy.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#include \"zstd_compress_internal.h\"\n#include \"zstd_lazy.h\"\n\n\n/*-*************************************\n*  Binary Tree search\n***************************************/\n\nstatic void\nZSTD_updateDUBT(ZSTD_matchState_t* ms,\n                const BYTE* ip, const BYTE* iend,\n                U32 mls)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const hashTable = ms->hashTable;\n    U32  const hashLog = cParams->hashLog;\n\n    U32* const bt = ms->chainTable;\n    U32  const btLog  = cParams->chainLog - 1;\n    U32  const btMask = (1 << btLog) - 1;\n\n    const BYTE* const base = ms->window.base;\n    U32 const target = (U32)(ip - base);\n    U32 idx = ms->nextToUpdate;\n\n    if (idx != target)\n        DEBUGLOG(7, \"ZSTD_updateDUBT, from %u to %u (dictLimit:%u)\",\n                    idx, target, ms->window.dictLimit);\n    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */\n    (void)iend;\n\n    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */\n    for ( ; idx < target ; idx++) {\n        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */\n        U32    const matchIndex = hashTable[h];\n\n        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);\n        U32*   const sortMarkPtr  = nextCandidatePtr + 1;\n\n        DEBUGLOG(8, \"ZSTD_updateDUBT: insert %u\", idx);\n        hashTable[h] = idx;   /* Update Hash Table */\n        *nextCandidatePtr = matchIndex;   /* update BT like a chain */\n        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;\n    }\n    ms->nextToUpdate = target;\n}\n\n\n/** ZSTD_insertDUBT1() :\n *  sort one already inserted but unsorted position\n *  assumption : current >= btlow == (current - btmask)\n *  doesn't fail */\nstatic void\nZSTD_insertDUBT1(ZSTD_matchState_t* ms,\n                 U32 current, const BYTE* inputEnd,\n                 U32 nbCompares, U32 btLow,\n                 const ZSTD_dictMode_e dictMode)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const bt = ms->chainTable;\n    U32  const btLog  = cParams->chainLog - 1;\n    U32  const btMask = (1 << btLog) - 1;\n    size_t commonLengthSmaller=0, commonLengthLarger=0;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const U32 dictLimit = ms->window.dictLimit;\n    const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;\n    const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;\n    const BYTE* const dictEnd = dictBase + dictLimit;\n    const BYTE* const prefixStart = base + dictLimit;\n    const BYTE* match;\n    U32* smallerPtr = bt + 2*(current&btMask);\n    U32* largerPtr  = smallerPtr + 1;\n    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */\n    U32 dummy32;   /* to be nullified at the end */\n    U32 const windowValid = ms->window.lowLimit;\n    U32 const maxDistance = 1U << cParams->windowLog;\n    U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;\n\n\n    DEBUGLOG(8, \"ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)\",\n                current, dictLimit, windowLow);\n    assert(current >= btLow);\n    assert(ip < iend);   /* condition for ZSTD_count */\n\n    while (nbCompares-- && (matchIndex > windowLow)) {\n        U32* const nextPtr = bt + 2*(matchIndex & btMask);\n        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n        assert(matchIndex < current);\n        /* note : all candidates are now supposed sorted,\n         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK\n         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */\n\n        if ( (dictMode != ZSTD_extDict)\n          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/\n          || (current < dictLimit) /* both in extDict */) {\n            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)\n                                     || (matchIndex+matchLength >= dictLimit)) ?\n                                        base : dictBase;\n            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */\n                 || (current < dictLimit) );\n            match = mBase + matchIndex;\n            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);\n        } else {\n            match = dictBase + matchIndex;\n            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);\n            if (matchIndex+matchLength >= dictLimit)\n                match = base + matchIndex;   /* preparation for next read of match[matchLength] */\n        }\n\n        DEBUGLOG(8, \"ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes \",\n                    current, matchIndex, (U32)matchLength);\n\n        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */\n            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */\n        }\n\n        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */\n            /* match is smaller than current */\n            *smallerPtr = matchIndex;             /* update smaller idx */\n            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */\n            DEBUGLOG(8, \"ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u\",\n                        matchIndex, btLow, nextPtr[1]);\n            smallerPtr = nextPtr+1;               /* new \"candidate\" => larger than match, which was smaller than target */\n            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */\n        } else {\n            /* match is larger than current */\n            *largerPtr = matchIndex;\n            commonLengthLarger = matchLength;\n            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */\n            DEBUGLOG(8, \"ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u\",\n                        matchIndex, btLow, nextPtr[0]);\n            largerPtr = nextPtr;\n            matchIndex = nextPtr[0];\n    }   }\n\n    *smallerPtr = *largerPtr = 0;\n}\n\n\nstatic size_t\nZSTD_DUBT_findBetterDictMatch (\n        ZSTD_matchState_t* ms,\n        const BYTE* const ip, const BYTE* const iend,\n        size_t* offsetPtr,\n        size_t bestLength,\n        U32 nbCompares,\n        U32 const mls,\n        const ZSTD_dictMode_e dictMode)\n{\n    const ZSTD_matchState_t * const dms = ms->dictMatchState;\n    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;\n    const U32 * const dictHashTable = dms->hashTable;\n    U32         const hashLog = dmsCParams->hashLog;\n    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);\n    U32               dictMatchIndex = dictHashTable[h];\n\n    const BYTE* const base = ms->window.base;\n    const BYTE* const prefixStart = base + ms->window.dictLimit;\n    U32         const current = (U32)(ip-base);\n    const BYTE* const dictBase = dms->window.base;\n    const BYTE* const dictEnd = dms->window.nextSrc;\n    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);\n    U32         const dictLowLimit = dms->window.lowLimit;\n    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;\n\n    U32*        const dictBt = dms->chainTable;\n    U32         const btLog  = dmsCParams->chainLog - 1;\n    U32         const btMask = (1 << btLog) - 1;\n    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;\n\n    size_t commonLengthSmaller=0, commonLengthLarger=0;\n\n    (void)dictMode;\n    assert(dictMode == ZSTD_dictMatchState);\n\n    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {\n        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);\n        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n        const BYTE* match = dictBase + dictMatchIndex;\n        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);\n        if (dictMatchIndex+matchLength >= dictHighLimit)\n            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */\n\n        if (matchLength > bestLength) {\n            U32 matchIndex = dictMatchIndex + dictIndexDelta;\n            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {\n                DEBUGLOG(9, \"ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)\",\n                    current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);\n                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;\n            }\n            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */\n                break;   /* drop, to guarantee consistency (miss a little bit of compression) */\n            }\n        }\n\n        if (match[matchLength] < ip[matchLength]) {\n            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */\n            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */\n        } else {\n            /* match is larger than current */\n            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */\n            commonLengthLarger = matchLength;\n            dictMatchIndex = nextPtr[0];\n        }\n    }\n\n    if (bestLength >= MINMATCH) {\n        U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;\n        DEBUGLOG(8, \"ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)\",\n                    current, (U32)bestLength, (U32)*offsetPtr, mIndex);\n    }\n    return bestLength;\n\n}\n\n\nstatic size_t\nZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,\n                        const BYTE* const ip, const BYTE* const iend,\n                        size_t* offsetPtr,\n                        U32 const mls,\n                        const ZSTD_dictMode_e dictMode)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32*   const hashTable = ms->hashTable;\n    U32    const hashLog = cParams->hashLog;\n    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);\n    U32          matchIndex  = hashTable[h];\n\n    const BYTE* const base = ms->window.base;\n    U32    const current = (U32)(ip-base);\n    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);\n\n    U32*   const bt = ms->chainTable;\n    U32    const btLog  = cParams->chainLog - 1;\n    U32    const btMask = (1 << btLog) - 1;\n    U32    const btLow = (btMask >= current) ? 0 : current - btMask;\n    U32    const unsortLimit = MAX(btLow, windowLow);\n\n    U32*         nextCandidate = bt + 2*(matchIndex&btMask);\n    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;\n    U32          nbCompares = 1U << cParams->searchLog;\n    U32          nbCandidates = nbCompares;\n    U32          previousCandidate = 0;\n\n    DEBUGLOG(7, \"ZSTD_DUBT_findBestMatch (%u) \", current);\n    assert(ip <= iend-8);   /* required for h calculation */\n\n    /* reach end of unsorted candidates list */\n    while ( (matchIndex > unsortLimit)\n         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)\n         && (nbCandidates > 1) ) {\n        DEBUGLOG(8, \"ZSTD_DUBT_findBestMatch: candidate %u is unsorted\",\n                    matchIndex);\n        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */\n        previousCandidate = matchIndex;\n        matchIndex = *nextCandidate;\n        nextCandidate = bt + 2*(matchIndex&btMask);\n        unsortedMark = bt + 2*(matchIndex&btMask) + 1;\n        nbCandidates --;\n    }\n\n    /* nullify last candidate if it's still unsorted\n     * simplification, detrimental to compression ratio, beneficial for speed */\n    if ( (matchIndex > unsortLimit)\n      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {\n        DEBUGLOG(7, \"ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u\",\n                    matchIndex);\n        *nextCandidate = *unsortedMark = 0;\n    }\n\n    /* batch sort stacked candidates */\n    matchIndex = previousCandidate;\n    while (matchIndex) {  /* will end on matchIndex == 0 */\n        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;\n        U32 const nextCandidateIdx = *nextCandidateIdxPtr;\n        ZSTD_insertDUBT1(ms, matchIndex, iend,\n                         nbCandidates, unsortLimit, dictMode);\n        matchIndex = nextCandidateIdx;\n        nbCandidates++;\n    }\n\n    /* find longest match */\n    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;\n        const BYTE* const dictBase = ms->window.dictBase;\n        const U32 dictLimit = ms->window.dictLimit;\n        const BYTE* const dictEnd = dictBase + dictLimit;\n        const BYTE* const prefixStart = base + dictLimit;\n        U32* smallerPtr = bt + 2*(current&btMask);\n        U32* largerPtr  = bt + 2*(current&btMask) + 1;\n        U32 matchEndIdx = current + 8 + 1;\n        U32 dummy32;   /* to be nullified at the end */\n        size_t bestLength = 0;\n\n        matchIndex  = hashTable[h];\n        hashTable[h] = current;   /* Update Hash Table */\n\n        while (nbCompares-- && (matchIndex > windowLow)) {\n            U32* const nextPtr = bt + 2*(matchIndex & btMask);\n            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n            const BYTE* match;\n\n            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {\n                match = base + matchIndex;\n                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);\n            } else {\n                match = dictBase + matchIndex;\n                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);\n                if (matchIndex+matchLength >= dictLimit)\n                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */\n            }\n\n            if (matchLength > bestLength) {\n                if (matchLength > matchEndIdx - matchIndex)\n                    matchEndIdx = matchIndex + (U32)matchLength;\n                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )\n                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;\n                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */\n                    if (dictMode == ZSTD_dictMatchState) {\n                        nbCompares = 0; /* in addition to avoiding checking any\n                                         * further in this loop, make sure we\n                                         * skip checking in the dictionary. */\n                    }\n                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */\n                }\n            }\n\n            if (match[matchLength] < ip[matchLength]) {\n                /* match is smaller than current */\n                *smallerPtr = matchIndex;             /* update smaller idx */\n                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n                smallerPtr = nextPtr+1;               /* new \"smaller\" => larger of match */\n                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */\n            } else {\n                /* match is larger than current */\n                *largerPtr = matchIndex;\n                commonLengthLarger = matchLength;\n                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n                largerPtr = nextPtr;\n                matchIndex = nextPtr[0];\n        }   }\n\n        *smallerPtr = *largerPtr = 0;\n\n        if (dictMode == ZSTD_dictMatchState && nbCompares) {\n            bestLength = ZSTD_DUBT_findBetterDictMatch(\n                    ms, ip, iend,\n                    offsetPtr, bestLength, nbCompares,\n                    mls, dictMode);\n        }\n\n        assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */\n        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */\n        if (bestLength >= MINMATCH) {\n            U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;\n            DEBUGLOG(8, \"ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)\",\n                        current, (U32)bestLength, (U32)*offsetPtr, mIndex);\n        }\n        return bestLength;\n    }\n}\n\n\n/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */\nFORCE_INLINE_TEMPLATE size_t\nZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,\n                const BYTE* const ip, const BYTE* const iLimit,\n                      size_t* offsetPtr,\n                const U32 mls /* template */,\n                const ZSTD_dictMode_e dictMode)\n{\n    DEBUGLOG(7, \"ZSTD_BtFindBestMatch\");\n    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */\n    ZSTD_updateDUBT(ms, ip, iLimit, mls);\n    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);\n}\n\n\nstatic size_t\nZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,\n                            const BYTE* ip, const BYTE* const iLimit,\n                                  size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);\n    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);\n    case 7 :\n    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);\n    }\n}\n\n\nstatic size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* const iLimit,\n                        size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);\n    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);\n    case 7 :\n    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);\n    }\n}\n\n\nstatic size_t ZSTD_BtFindBestMatch_extDict_selectMLS (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* const iLimit,\n                        size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);\n    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);\n    case 7 :\n    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);\n    }\n}\n\n\n\n/* *********************************\n*  Hash Chain\n***********************************/\n#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]\n\n/* Update chains up to ip (excluded)\n   Assumption : always within prefix (i.e. not within extDict) */\nstatic U32 ZSTD_insertAndFindFirstIndex_internal(\n                        ZSTD_matchState_t* ms,\n                        const ZSTD_compressionParameters* const cParams,\n                        const BYTE* ip, U32 const mls)\n{\n    U32* const hashTable  = ms->hashTable;\n    const U32 hashLog = cParams->hashLog;\n    U32* const chainTable = ms->chainTable;\n    const U32 chainMask = (1 << cParams->chainLog) - 1;\n    const BYTE* const base = ms->window.base;\n    const U32 target = (U32)(ip - base);\n    U32 idx = ms->nextToUpdate;\n\n    while(idx < target) { /* catch up */\n        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);\n        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];\n        hashTable[h] = idx;\n        idx++;\n    }\n\n    ms->nextToUpdate = target;\n    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];\n}\n\nU32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);\n}\n\n\n/* inlining is important to hardwire a hot branch (template emulation) */\nFORCE_INLINE_TEMPLATE\nsize_t ZSTD_HcFindBestMatch_generic (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* const ip, const BYTE* const iLimit,\n                        size_t* offsetPtr,\n                        const U32 mls, const ZSTD_dictMode_e dictMode)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32* const chainTable = ms->chainTable;\n    const U32 chainSize = (1 << cParams->chainLog);\n    const U32 chainMask = chainSize-1;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const U32 dictLimit = ms->window.dictLimit;\n    const BYTE* const prefixStart = base + dictLimit;\n    const BYTE* const dictEnd = dictBase + dictLimit;\n    const U32 current = (U32)(ip-base);\n    const U32 maxDistance = 1U << cParams->windowLog;\n    const U32 lowestValid = ms->window.lowLimit;\n    const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;\n    const U32 isDictionary = (ms->loadedDictEnd != 0);\n    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;\n    const U32 minChain = current > chainSize ? current - chainSize : 0;\n    U32 nbAttempts = 1U << cParams->searchLog;\n    size_t ml=4-1;\n\n    /* HC4 match finder */\n    U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);\n\n    for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {\n        size_t currentMl=0;\n        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {\n            const BYTE* const match = base + matchIndex;\n            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */\n            if (match[ml] == ip[ml])   /* potentially better */\n                currentMl = ZSTD_count(ip, match, iLimit);\n        } else {\n            const BYTE* const match = dictBase + matchIndex;\n            assert(match+4 <= dictEnd);\n            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */\n                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;\n        }\n\n        /* save best solution */\n        if (currentMl > ml) {\n            ml = currentMl;\n            *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;\n            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */\n        }\n\n        if (matchIndex <= minChain) break;\n        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);\n    }\n\n    if (dictMode == ZSTD_dictMatchState) {\n        const ZSTD_matchState_t* const dms = ms->dictMatchState;\n        const U32* const dmsChainTable = dms->chainTable;\n        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);\n        const U32 dmsChainMask         = dmsChainSize - 1;\n        const U32 dmsLowestIndex       = dms->window.dictLimit;\n        const BYTE* const dmsBase      = dms->window.base;\n        const BYTE* const dmsEnd       = dms->window.nextSrc;\n        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);\n        const U32 dmsIndexDelta        = dictLimit - dmsSize;\n        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;\n\n        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];\n\n        for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {\n            size_t currentMl=0;\n            const BYTE* const match = dmsBase + matchIndex;\n            assert(match+4 <= dmsEnd);\n            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */\n                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;\n\n            /* save best solution */\n            if (currentMl > ml) {\n                ml = currentMl;\n                *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;\n                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */\n            }\n\n            if (matchIndex <= dmsMinChain) break;\n            matchIndex = dmsChainTable[matchIndex & dmsChainMask];\n        }\n    }\n\n    return ml;\n}\n\n\nFORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* const iLimit,\n                        size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);\n    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);\n    case 7 :\n    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);\n    }\n}\n\n\nstatic size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* const iLimit,\n                        size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);\n    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);\n    case 7 :\n    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);\n    }\n}\n\n\nFORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* const iLimit,\n                        size_t* offsetPtr)\n{\n    switch(ms->cParams.minMatch)\n    {\n    default : /* includes case 3 */\n    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);\n    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);\n    case 7 :\n    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);\n    }\n}\n\n\n/* *******************************\n*  Common parser - lazy strategy\n*********************************/\ntypedef enum { search_hashChain, search_binaryTree } searchMethod_e;\n\nFORCE_INLINE_TEMPLATE size_t\nZSTD_compressBlock_lazy_generic(\n                        ZSTD_matchState_t* ms, seqStore_t* seqStore,\n                        U32 rep[ZSTD_REP_NUM],\n                        const void* src, size_t srcSize,\n                        const searchMethod_e searchMethod, const U32 depth,\n                        ZSTD_dictMode_e const dictMode)\n{\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - 8;\n    const BYTE* const base = ms->window.base;\n    const U32 prefixLowestIndex = ms->window.dictLimit;\n    const BYTE* const prefixLowest = base + prefixLowestIndex;\n\n    typedef size_t (*searchMax_f)(\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);\n    searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?\n        (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS\n                                         : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :\n        (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS\n                                         : ZSTD_HcFindBestMatch_selectMLS);\n    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;\n\n    const ZSTD_matchState_t* const dms = ms->dictMatchState;\n    const U32 dictLowestIndex      = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.dictLimit : 0;\n    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.base : NULL;\n    const BYTE* const dictLowest   = dictMode == ZSTD_dictMatchState ?\n                                     dictBase + dictLowestIndex : NULL;\n    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?\n                                     dms->window.nextSrc : NULL;\n    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?\n                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :\n                                     0;\n    const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));\n\n    /* init */\n    ip += (dictAndPrefixLength == 0);\n    if (dictMode == ZSTD_noDict) {\n        U32 const maxRep = (U32)(ip - prefixLowest);\n        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;\n        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;\n    }\n    if (dictMode == ZSTD_dictMatchState) {\n        /* dictMatchState repCode checks don't currently handle repCode == 0\n         * disabling. */\n        assert(offset_1 <= dictAndPrefixLength);\n        assert(offset_2 <= dictAndPrefixLength);\n    }\n\n    /* Match Loop */\n    while (ip < ilimit) {\n        size_t matchLength=0;\n        size_t offset=0;\n        const BYTE* start=ip+1;\n\n        /* check repCode */\n        if (dictMode == ZSTD_dictMatchState) {\n            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;\n            const BYTE* repMatch = (dictMode == ZSTD_dictMatchState\n                                && repIndex < prefixLowestIndex) ?\n                                   dictBase + (repIndex - dictIndexDelta) :\n                                   base + repIndex;\n            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)\n                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {\n                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;\n                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;\n                if (depth==0) goto _storeSequence;\n            }\n        }\n        if ( dictMode == ZSTD_noDict\n          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {\n            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;\n            if (depth==0) goto _storeSequence;\n        }\n\n        /* first search (depth 0) */\n        {   size_t offsetFound = 999999999;\n            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);\n            if (ml2 > matchLength)\n                matchLength = ml2, start = ip, offset=offsetFound;\n        }\n\n        if (matchLength < 4) {\n            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */\n            continue;\n        }\n\n        /* let's try to find a better solution */\n        if (depth>=1)\n        while (ip<ilimit) {\n            ip ++;\n            if ( (dictMode == ZSTD_noDict)\n              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {\n                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;\n                int const gain2 = (int)(mlRep * 3);\n                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);\n                if ((mlRep >= 4) && (gain2 > gain1))\n                    matchLength = mlRep, offset = 0, start = ip;\n            }\n            if (dictMode == ZSTD_dictMatchState) {\n                const U32 repIndex = (U32)(ip - base) - offset_1;\n                const BYTE* repMatch = repIndex < prefixLowestIndex ?\n                               dictBase + (repIndex - dictIndexDelta) :\n                               base + repIndex;\n                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)\n                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {\n                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;\n                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;\n                    int const gain2 = (int)(mlRep * 3);\n                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);\n                    if ((mlRep >= 4) && (gain2 > gain1))\n                        matchLength = mlRep, offset = 0, start = ip;\n                }\n            }\n            {   size_t offset2=999999999;\n                size_t const ml2 = searchMax(ms, ip, iend, &offset2);\n                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */\n                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);\n                if ((ml2 >= 4) && (gain2 > gain1)) {\n                    matchLength = ml2, offset = offset2, start = ip;\n                    continue;   /* search a better one */\n            }   }\n\n            /* let's find an even better one */\n            if ((depth==2) && (ip<ilimit)) {\n                ip ++;\n                if ( (dictMode == ZSTD_noDict)\n                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {\n                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;\n                    int const gain2 = (int)(mlRep * 4);\n                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);\n                    if ((mlRep >= 4) && (gain2 > gain1))\n                        matchLength = mlRep, offset = 0, start = ip;\n                }\n                if (dictMode == ZSTD_dictMatchState) {\n                    const U32 repIndex = (U32)(ip - base) - offset_1;\n                    const BYTE* repMatch = repIndex < prefixLowestIndex ?\n                                   dictBase + (repIndex - dictIndexDelta) :\n                                   base + repIndex;\n                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)\n                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {\n                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;\n                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;\n                        int const gain2 = (int)(mlRep * 4);\n                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);\n                        if ((mlRep >= 4) && (gain2 > gain1))\n                            matchLength = mlRep, offset = 0, start = ip;\n                    }\n                }\n                {   size_t offset2=999999999;\n                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);\n                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */\n                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);\n                    if ((ml2 >= 4) && (gain2 > gain1)) {\n                        matchLength = ml2, offset = offset2, start = ip;\n                        continue;\n            }   }   }\n            break;  /* nothing found : store previous solution */\n        }\n\n        /* NOTE:\n         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.\n         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which\n         * overflows the pointer, which is undefined behavior.\n         */\n        /* catch up */\n        if (offset) {\n            if (dictMode == ZSTD_noDict) {\n                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))\n                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */\n                    { start--; matchLength++; }\n            }\n            if (dictMode == ZSTD_dictMatchState) {\n                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));\n                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;\n                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;\n                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */\n            }\n            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);\n        }\n        /* store sequence */\n_storeSequence:\n        {   size_t const litLength = start - anchor;\n            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);\n            anchor = ip = start + matchLength;\n        }\n\n        /* check immediate repcode */\n        if (dictMode == ZSTD_dictMatchState) {\n            while (ip <= ilimit) {\n                U32 const current2 = (U32)(ip-base);\n                U32 const repIndex = current2 - offset_2;\n                const BYTE* repMatch = dictMode == ZSTD_dictMatchState\n                    && repIndex < prefixLowestIndex ?\n                        dictBase - dictIndexDelta + repIndex :\n                        base + repIndex;\n                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)\n                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {\n                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;\n                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;\n                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */\n                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);\n                    ip += matchLength;\n                    anchor = ip;\n                    continue;\n                }\n                break;\n            }\n        }\n\n        if (dictMode == ZSTD_noDict) {\n            while ( ((ip <= ilimit) & (offset_2>0))\n                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {\n                /* store sequence */\n                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;\n                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */\n                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);\n                ip += matchLength;\n                anchor = ip;\n                continue;   /* faster when present ... (?) */\n    }   }   }\n\n    /* Save reps for next block */\n    rep[0] = offset_1 ? offset_1 : savedOffset;\n    rep[1] = offset_2 ? offset_2 : savedOffset;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_btlazy2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_lazy2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_lazy(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_greedy(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_btlazy2_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);\n}\n\nsize_t ZSTD_compressBlock_lazy2_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);\n}\n\nsize_t ZSTD_compressBlock_lazy_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);\n}\n\nsize_t ZSTD_compressBlock_greedy_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);\n}\n\n\nFORCE_INLINE_TEMPLATE\nsize_t ZSTD_compressBlock_lazy_extDict_generic(\n                        ZSTD_matchState_t* ms, seqStore_t* seqStore,\n                        U32 rep[ZSTD_REP_NUM],\n                        const void* src, size_t srcSize,\n                        const searchMethod_e searchMethod, const U32 depth)\n{\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - 8;\n    const BYTE* const base = ms->window.base;\n    const U32 dictLimit = ms->window.dictLimit;\n    const U32 lowestIndex = ms->window.lowLimit;\n    const BYTE* const prefixStart = base + dictLimit;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const BYTE* const dictEnd  = dictBase + dictLimit;\n    const BYTE* const dictStart  = dictBase + lowestIndex;\n\n    typedef size_t (*searchMax_f)(\n                        ZSTD_matchState_t* ms,\n                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);\n    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;\n\n    U32 offset_1 = rep[0], offset_2 = rep[1];\n\n    /* init */\n    ip += (ip == prefixStart);\n\n    /* Match Loop */\n    while (ip < ilimit) {\n        size_t matchLength=0;\n        size_t offset=0;\n        const BYTE* start=ip+1;\n        U32 current = (U32)(ip-base);\n\n        /* check repCode */\n        {   const U32 repIndex = (U32)(current+1 - offset_1);\n            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;\n            const BYTE* const repMatch = repBase + repIndex;\n            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))   /* intentional overflow */\n            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {\n                /* repcode detected we should take it */\n                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;\n                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;\n                if (depth==0) goto _storeSequence;\n        }   }\n\n        /* first search (depth 0) */\n        {   size_t offsetFound = 999999999;\n            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);\n            if (ml2 > matchLength)\n                matchLength = ml2, start = ip, offset=offsetFound;\n        }\n\n         if (matchLength < 4) {\n            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */\n            continue;\n        }\n\n        /* let's try to find a better solution */\n        if (depth>=1)\n        while (ip<ilimit) {\n            ip ++;\n            current++;\n            /* check repCode */\n            if (offset) {\n                const U32 repIndex = (U32)(current - offset_1);\n                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;\n                const BYTE* const repMatch = repBase + repIndex;\n                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */\n                if (MEM_read32(ip) == MEM_read32(repMatch)) {\n                    /* repcode detected */\n                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;\n                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;\n                    int const gain2 = (int)(repLength * 3);\n                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);\n                    if ((repLength >= 4) && (gain2 > gain1))\n                        matchLength = repLength, offset = 0, start = ip;\n            }   }\n\n            /* search match, depth 1 */\n            {   size_t offset2=999999999;\n                size_t const ml2 = searchMax(ms, ip, iend, &offset2);\n                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */\n                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);\n                if ((ml2 >= 4) && (gain2 > gain1)) {\n                    matchLength = ml2, offset = offset2, start = ip;\n                    continue;   /* search a better one */\n            }   }\n\n            /* let's find an even better one */\n            if ((depth==2) && (ip<ilimit)) {\n                ip ++;\n                current++;\n                /* check repCode */\n                if (offset) {\n                    const U32 repIndex = (U32)(current - offset_1);\n                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;\n                    const BYTE* const repMatch = repBase + repIndex;\n                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */\n                    if (MEM_read32(ip) == MEM_read32(repMatch)) {\n                        /* repcode detected */\n                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;\n                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;\n                        int const gain2 = (int)(repLength * 4);\n                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);\n                        if ((repLength >= 4) && (gain2 > gain1))\n                            matchLength = repLength, offset = 0, start = ip;\n                }   }\n\n                /* search match, depth 2 */\n                {   size_t offset2=999999999;\n                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);\n                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */\n                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);\n                    if ((ml2 >= 4) && (gain2 > gain1)) {\n                        matchLength = ml2, offset = offset2, start = ip;\n                        continue;\n            }   }   }\n            break;  /* nothing found : store previous solution */\n        }\n\n        /* catch up */\n        if (offset) {\n            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));\n            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;\n            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;\n            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */\n            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);\n        }\n\n        /* store sequence */\n_storeSequence:\n        {   size_t const litLength = start - anchor;\n            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);\n            anchor = ip = start + matchLength;\n        }\n\n        /* check immediate repcode */\n        while (ip <= ilimit) {\n            const U32 repIndex = (U32)((ip-base) - offset_2);\n            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;\n            const BYTE* const repMatch = repBase + repIndex;\n            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */\n            if (MEM_read32(ip) == MEM_read32(repMatch)) {\n                /* repcode detected we should take it */\n                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;\n                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;\n                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */\n                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);\n                ip += matchLength;\n                anchor = ip;\n                continue;   /* faster when present ... (?) */\n            }\n            break;\n    }   }\n\n    /* Save reps for next block */\n    rep[0] = offset_1;\n    rep[1] = offset_2;\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_greedy_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);\n}\n\nsize_t ZSTD_compressBlock_lazy_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n\n{\n    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);\n}\n\nsize_t ZSTD_compressBlock_lazy2_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n\n{\n    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);\n}\n\nsize_t ZSTD_compressBlock_btlazy2_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize)\n\n{\n    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_lazy.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_LAZY_H\n#define ZSTD_LAZY_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#include \"zstd_compress_internal.h\"\n\nU32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);\n\nvoid ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */\n\nsize_t ZSTD_compressBlock_btlazy2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_greedy(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\nsize_t ZSTD_compressBlock_btlazy2_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy2_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_greedy_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\nsize_t ZSTD_compressBlock_greedy_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_lazy2_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_btlazy2_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_LAZY_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_ldm.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n */\n\n#include \"zstd_ldm.h\"\n\n#include \"debug.h\"\n#include \"zstd_fast.h\"          /* ZSTD_fillHashTable() */\n#include \"zstd_double_fast.h\"   /* ZSTD_fillDoubleHashTable() */\n\n#define LDM_BUCKET_SIZE_LOG 3\n#define LDM_MIN_MATCH_LENGTH 64\n#define LDM_HASH_RLOG 7\n#define LDM_HASH_CHAR_OFFSET 10\n\nvoid ZSTD_ldm_adjustParameters(ldmParams_t* params,\n                               ZSTD_compressionParameters const* cParams)\n{\n    params->windowLog = cParams->windowLog;\n    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);\n    DEBUGLOG(4, \"ZSTD_ldm_adjustParameters\");\n    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;\n    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;\n    if (cParams->strategy >= ZSTD_btopt) {\n      /* Get out of the way of the optimal parser */\n      U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);\n      assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);\n      assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);\n      params->minMatchLength = minMatch;\n    }\n    if (params->hashLog == 0) {\n        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);\n        assert(params->hashLog <= ZSTD_HASHLOG_MAX);\n    }\n    if (params->hashRateLog == 0) {\n        params->hashRateLog = params->windowLog < params->hashLog\n                                   ? 0\n                                   : params->windowLog - params->hashLog;\n    }\n    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);\n}\n\nsize_t ZSTD_ldm_getTableSize(ldmParams_t params)\n{\n    size_t const ldmHSize = ((size_t)1) << params.hashLog;\n    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);\n    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);\n    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)\n                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));\n    return params.enableLdm ? totalSize : 0;\n}\n\nsize_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)\n{\n    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;\n}\n\n/** ZSTD_ldm_getSmallHash() :\n *  numBits should be <= 32\n *  If numBits==0, returns 0.\n *  @return : the most significant numBits of value. */\nstatic U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)\n{\n    assert(numBits <= 32);\n    return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));\n}\n\n/** ZSTD_ldm_getChecksum() :\n *  numBitsToDiscard should be <= 32\n *  @return : the next most significant 32 bits after numBitsToDiscard */\nstatic U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)\n{\n    assert(numBitsToDiscard <= 32);\n    return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;\n}\n\n/** ZSTD_ldm_getTag() ;\n *  Given the hash, returns the most significant numTagBits bits\n *  after (32 + hbits) bits.\n *\n *  If there are not enough bits remaining, return the last\n *  numTagBits bits. */\nstatic U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)\n{\n    assert(numTagBits < 32 && hbits <= 32);\n    if (32 - hbits < numTagBits) {\n        return hash & (((U32)1 << numTagBits) - 1);\n    } else {\n        return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);\n    }\n}\n\n/** ZSTD_ldm_getBucket() :\n *  Returns a pointer to the start of the bucket associated with hash. */\nstatic ldmEntry_t* ZSTD_ldm_getBucket(\n        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)\n{\n    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);\n}\n\n/** ZSTD_ldm_insertEntry() :\n *  Insert the entry with corresponding hash into the hash table */\nstatic void ZSTD_ldm_insertEntry(ldmState_t* ldmState,\n                                 size_t const hash, const ldmEntry_t entry,\n                                 ldmParams_t const ldmParams)\n{\n    BYTE* const bucketOffsets = ldmState->bucketOffsets;\n    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;\n    bucketOffsets[hash]++;\n    bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;\n}\n\n/** ZSTD_ldm_makeEntryAndInsertByTag() :\n *\n *  Gets the small hash, checksum, and tag from the rollingHash.\n *\n *  If the tag matches (1 << ldmParams.hashRateLog)-1, then\n *  creates an ldmEntry from the offset, and inserts it into the hash table.\n *\n *  hBits is the length of the small hash, which is the most significant hBits\n *  of rollingHash. The checksum is the next 32 most significant bits, followed\n *  by ldmParams.hashRateLog bits that make up the tag. */\nstatic void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,\n                                             U64 const rollingHash,\n                                             U32 const hBits,\n                                             U32 const offset,\n                                             ldmParams_t const ldmParams)\n{\n    U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);\n    U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;\n    if (tag == tagMask) {\n        U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);\n        U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);\n        ldmEntry_t entry;\n        entry.offset = offset;\n        entry.checksum = checksum;\n        ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);\n    }\n}\n\n/** ZSTD_ldm_countBackwardsMatch() :\n *  Returns the number of bytes that match backwards before pIn and pMatch.\n *\n *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */\nstatic size_t ZSTD_ldm_countBackwardsMatch(\n            const BYTE* pIn, const BYTE* pAnchor,\n            const BYTE* pMatch, const BYTE* pBase)\n{\n    size_t matchLength = 0;\n    while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {\n        pIn--;\n        pMatch--;\n        matchLength++;\n    }\n    return matchLength;\n}\n\n/** ZSTD_ldm_fillFastTables() :\n *\n *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.\n *  This is similar to ZSTD_loadDictionaryContent.\n *\n *  The tables for the other strategies are filled within their\n *  block compressors. */\nstatic size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,\n                                      void const* end)\n{\n    const BYTE* const iend = (const BYTE*)end;\n\n    switch(ms->cParams.strategy)\n    {\n    case ZSTD_fast:\n        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);\n        break;\n\n    case ZSTD_dfast:\n        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);\n        break;\n\n    case ZSTD_greedy:\n    case ZSTD_lazy:\n    case ZSTD_lazy2:\n    case ZSTD_btlazy2:\n    case ZSTD_btopt:\n    case ZSTD_btultra:\n    case ZSTD_btultra2:\n        break;\n    default:\n        assert(0);  /* not possible : not a valid strategy id */\n    }\n\n    return 0;\n}\n\n/** ZSTD_ldm_fillLdmHashTable() :\n *\n *  Fills hashTable from (lastHashed + 1) to iend (non-inclusive).\n *  lastHash is the rolling hash that corresponds to lastHashed.\n *\n *  Returns the rolling hash corresponding to position iend-1. */\nstatic U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,\n                                     U64 lastHash, const BYTE* lastHashed,\n                                     const BYTE* iend, const BYTE* base,\n                                     U32 hBits, ldmParams_t const ldmParams)\n{\n    U64 rollingHash = lastHash;\n    const BYTE* cur = lastHashed + 1;\n\n    while (cur < iend) {\n        rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],\n                                              cur[ldmParams.minMatchLength-1],\n                                              state->hashPower);\n        ZSTD_ldm_makeEntryAndInsertByTag(state,\n                                         rollingHash, hBits,\n                                         (U32)(cur - base), ldmParams);\n        ++cur;\n    }\n    return rollingHash;\n}\n\n\n/** ZSTD_ldm_limitTableUpdate() :\n *\n *  Sets cctx->nextToUpdate to a position corresponding closer to anchor\n *  if it is far way\n *  (after a long match, only update tables a limited amount). */\nstatic void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)\n{\n    U32 const current = (U32)(anchor - ms->window.base);\n    if (current > ms->nextToUpdate + 1024) {\n        ms->nextToUpdate =\n            current - MIN(512, current - ms->nextToUpdate - 1024);\n    }\n}\n\nstatic size_t ZSTD_ldm_generateSequences_internal(\n        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,\n        ldmParams_t const* params, void const* src, size_t srcSize)\n{\n    /* LDM parameters */\n    int const extDict = ZSTD_window_hasExtDict(ldmState->window);\n    U32 const minMatchLength = params->minMatchLength;\n    U64 const hashPower = ldmState->hashPower;\n    U32 const hBits = params->hashLog - params->bucketSizeLog;\n    U32 const ldmBucketSize = 1U << params->bucketSizeLog;\n    U32 const hashRateLog = params->hashRateLog;\n    U32 const ldmTagMask = (1U << params->hashRateLog) - 1;\n    /* Prefix and extDict parameters */\n    U32 const dictLimit = ldmState->window.dictLimit;\n    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;\n    BYTE const* const base = ldmState->window.base;\n    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;\n    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;\n    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;\n    BYTE const* const lowPrefixPtr = base + dictLimit;\n    /* Input bounds */\n    BYTE const* const istart = (BYTE const*)src;\n    BYTE const* const iend = istart + srcSize;\n    BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);\n    /* Input positions */\n    BYTE const* anchor = istart;\n    BYTE const* ip = istart;\n    /* Rolling hash */\n    BYTE const* lastHashed = NULL;\n    U64 rollingHash = 0;\n\n    while (ip <= ilimit) {\n        size_t mLength;\n        U32 const current = (U32)(ip - base);\n        size_t forwardMatchLength = 0, backwardMatchLength = 0;\n        ldmEntry_t* bestEntry = NULL;\n        if (ip != istart) {\n            rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],\n                                                  lastHashed[minMatchLength],\n                                                  hashPower);\n        } else {\n            rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);\n        }\n        lastHashed = ip;\n\n        /* Do not insert and do not look for a match */\n        if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {\n           ip++;\n           continue;\n        }\n\n        /* Get the best entry and compute the match lengths */\n        {\n            ldmEntry_t* const bucket =\n                ZSTD_ldm_getBucket(ldmState,\n                                   ZSTD_ldm_getSmallHash(rollingHash, hBits),\n                                   *params);\n            ldmEntry_t* cur;\n            size_t bestMatchLength = 0;\n            U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);\n\n            for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {\n                size_t curForwardMatchLength, curBackwardMatchLength,\n                       curTotalMatchLength;\n                if (cur->checksum != checksum || cur->offset <= lowestIndex) {\n                    continue;\n                }\n                if (extDict) {\n                    BYTE const* const curMatchBase =\n                        cur->offset < dictLimit ? dictBase : base;\n                    BYTE const* const pMatch = curMatchBase + cur->offset;\n                    BYTE const* const matchEnd =\n                        cur->offset < dictLimit ? dictEnd : iend;\n                    BYTE const* const lowMatchPtr =\n                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;\n\n                    curForwardMatchLength = ZSTD_count_2segments(\n                                                ip, pMatch, iend,\n                                                matchEnd, lowPrefixPtr);\n                    if (curForwardMatchLength < minMatchLength) {\n                        continue;\n                    }\n                    curBackwardMatchLength =\n                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,\n                                                     lowMatchPtr);\n                    curTotalMatchLength = curForwardMatchLength +\n                                          curBackwardMatchLength;\n                } else { /* !extDict */\n                    BYTE const* const pMatch = base + cur->offset;\n                    curForwardMatchLength = ZSTD_count(ip, pMatch, iend);\n                    if (curForwardMatchLength < minMatchLength) {\n                        continue;\n                    }\n                    curBackwardMatchLength =\n                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,\n                                                     lowPrefixPtr);\n                    curTotalMatchLength = curForwardMatchLength +\n                                          curBackwardMatchLength;\n                }\n\n                if (curTotalMatchLength > bestMatchLength) {\n                    bestMatchLength = curTotalMatchLength;\n                    forwardMatchLength = curForwardMatchLength;\n                    backwardMatchLength = curBackwardMatchLength;\n                    bestEntry = cur;\n                }\n            }\n        }\n\n        /* No match found -- continue searching */\n        if (bestEntry == NULL) {\n            ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,\n                                             hBits, current,\n                                             *params);\n            ip++;\n            continue;\n        }\n\n        /* Match found */\n        mLength = forwardMatchLength + backwardMatchLength;\n        ip -= backwardMatchLength;\n\n        {\n            /* Store the sequence:\n             * ip = current - backwardMatchLength\n             * The match is at (bestEntry->offset - backwardMatchLength)\n             */\n            U32 const matchIndex = bestEntry->offset;\n            U32 const offset = current - matchIndex;\n            rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;\n\n            /* Out of sequence storage */\n            if (rawSeqStore->size == rawSeqStore->capacity)\n                return ERROR(dstSize_tooSmall);\n            seq->litLength = (U32)(ip - anchor);\n            seq->matchLength = (U32)mLength;\n            seq->offset = offset;\n            rawSeqStore->size++;\n        }\n\n        /* Insert the current entry into the hash table */\n        ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,\n                                         (U32)(lastHashed - base),\n                                         *params);\n\n        assert(ip + backwardMatchLength == lastHashed);\n\n        /* Fill the hash table from lastHashed+1 to ip+mLength*/\n        /* Heuristic: don't need to fill the entire table at end of block */\n        if (ip + mLength <= ilimit) {\n            rollingHash = ZSTD_ldm_fillLdmHashTable(\n                              ldmState, rollingHash, lastHashed,\n                              ip + mLength, base, hBits, *params);\n            lastHashed = ip + mLength - 1;\n        }\n        ip += mLength;\n        anchor = ip;\n    }\n    return iend - anchor;\n}\n\n/*! ZSTD_ldm_reduceTable() :\n *  reduce table indexes by `reducerValue` */\nstatic void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,\n                                 U32 const reducerValue)\n{\n    U32 u;\n    for (u = 0; u < size; u++) {\n        if (table[u].offset < reducerValue) table[u].offset = 0;\n        else table[u].offset -= reducerValue;\n    }\n}\n\nsize_t ZSTD_ldm_generateSequences(\n        ldmState_t* ldmState, rawSeqStore_t* sequences,\n        ldmParams_t const* params, void const* src, size_t srcSize)\n{\n    U32 const maxDist = 1U << params->windowLog;\n    BYTE const* const istart = (BYTE const*)src;\n    BYTE const* const iend = istart + srcSize;\n    size_t const kMaxChunkSize = 1 << 20;\n    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);\n    size_t chunk;\n    size_t leftoverSize = 0;\n\n    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);\n    /* Check that ZSTD_window_update() has been called for this chunk prior\n     * to passing it to this function.\n     */\n    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);\n    /* The input could be very large (in zstdmt), so it must be broken up into\n     * chunks to enforce the maximum distance and handle overflow correction.\n     */\n    assert(sequences->pos <= sequences->size);\n    assert(sequences->size <= sequences->capacity);\n    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {\n        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;\n        size_t const remaining = (size_t)(iend - chunkStart);\n        BYTE const *const chunkEnd =\n            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;\n        size_t const chunkSize = chunkEnd - chunkStart;\n        size_t newLeftoverSize;\n        size_t const prevSize = sequences->size;\n\n        assert(chunkStart < iend);\n        /* 1. Perform overflow correction if necessary. */\n        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {\n            U32 const ldmHSize = 1U << params->hashLog;\n            U32 const correction = ZSTD_window_correctOverflow(\n                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);\n            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);\n        }\n        /* 2. We enforce the maximum offset allowed.\n         *\n         * kMaxChunkSize should be small enough that we don't lose too much of\n         * the window through early invalidation.\n         * TODO: * Test the chunk size.\n         *       * Try invalidation after the sequence generation and test the\n         *         the offset against maxDist directly.\n         */\n        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);\n        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */\n        newLeftoverSize = ZSTD_ldm_generateSequences_internal(\n            ldmState, sequences, params, chunkStart, chunkSize);\n        if (ZSTD_isError(newLeftoverSize))\n            return newLeftoverSize;\n        /* 4. We add the leftover literals from previous iterations to the first\n         *    newly generated sequence, or add the `newLeftoverSize` if none are\n         *    generated.\n         */\n        /* Prepend the leftover literals from the last call */\n        if (prevSize < sequences->size) {\n            sequences->seq[prevSize].litLength += (U32)leftoverSize;\n            leftoverSize = newLeftoverSize;\n        } else {\n            assert(newLeftoverSize == chunkSize);\n            leftoverSize += chunkSize;\n        }\n    }\n    return 0;\n}\n\nvoid ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {\n    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {\n        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;\n        if (srcSize <= seq->litLength) {\n            /* Skip past srcSize literals */\n            seq->litLength -= (U32)srcSize;\n            return;\n        }\n        srcSize -= seq->litLength;\n        seq->litLength = 0;\n        if (srcSize < seq->matchLength) {\n            /* Skip past the first srcSize of the match */\n            seq->matchLength -= (U32)srcSize;\n            if (seq->matchLength < minMatch) {\n                /* The match is too short, omit it */\n                if (rawSeqStore->pos + 1 < rawSeqStore->size) {\n                    seq[1].litLength += seq[0].matchLength;\n                }\n                rawSeqStore->pos++;\n            }\n            return;\n        }\n        srcSize -= seq->matchLength;\n        seq->matchLength = 0;\n        rawSeqStore->pos++;\n    }\n}\n\n/**\n * If the sequence length is longer than remaining then the sequence is split\n * between this block and the next.\n *\n * Returns the current sequence to handle, or if the rest of the block should\n * be literals, it returns a sequence with offset == 0.\n */\nstatic rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,\n                                 U32 const remaining, U32 const minMatch)\n{\n    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];\n    assert(sequence.offset > 0);\n    /* Likely: No partial sequence */\n    if (remaining >= sequence.litLength + sequence.matchLength) {\n        rawSeqStore->pos++;\n        return sequence;\n    }\n    /* Cut the sequence short (offset == 0 ==> rest is literals). */\n    if (remaining <= sequence.litLength) {\n        sequence.offset = 0;\n    } else if (remaining < sequence.litLength + sequence.matchLength) {\n        sequence.matchLength = remaining - sequence.litLength;\n        if (sequence.matchLength < minMatch) {\n            sequence.offset = 0;\n        }\n    }\n    /* Skip past `remaining` bytes for the future sequences. */\n    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);\n    return sequence;\n}\n\nsize_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,\n    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n    void const* src, size_t srcSize)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    unsigned const minMatch = cParams->minMatch;\n    ZSTD_blockCompressor const blockCompressor =\n        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));\n    /* Input bounds */\n    BYTE const* const istart = (BYTE const*)src;\n    BYTE const* const iend = istart + srcSize;\n    /* Input positions */\n    BYTE const* ip = istart;\n\n    DEBUGLOG(5, \"ZSTD_ldm_blockCompress: srcSize=%zu\", srcSize);\n    assert(rawSeqStore->pos <= rawSeqStore->size);\n    assert(rawSeqStore->size <= rawSeqStore->capacity);\n    /* Loop through each sequence and apply the block compressor to the lits */\n    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {\n        /* maybeSplitSequence updates rawSeqStore->pos */\n        rawSeq const sequence = maybeSplitSequence(rawSeqStore,\n                                                   (U32)(iend - ip), minMatch);\n        int i;\n        /* End signal */\n        if (sequence.offset == 0)\n            break;\n\n        assert(sequence.offset <= (1U << cParams->windowLog));\n        assert(ip + sequence.litLength + sequence.matchLength <= iend);\n\n        /* Fill tables for block compressor */\n        ZSTD_ldm_limitTableUpdate(ms, ip);\n        ZSTD_ldm_fillFastTables(ms, ip);\n        /* Run the block compressor */\n        DEBUGLOG(5, \"calling block compressor on segment of size %u\", sequence.litLength);\n        {\n            size_t const newLitLength =\n                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);\n            ip += sequence.litLength;\n            /* Update the repcodes */\n            for (i = ZSTD_REP_NUM - 1; i > 0; i--)\n                rep[i] = rep[i-1];\n            rep[0] = sequence.offset;\n            /* Store the sequence */\n            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,\n                          sequence.offset + ZSTD_REP_MOVE,\n                          sequence.matchLength - MINMATCH);\n            ip += sequence.matchLength;\n        }\n    }\n    /* Fill the tables for the block compressor */\n    ZSTD_ldm_limitTableUpdate(ms, ip);\n    ZSTD_ldm_fillFastTables(ms, ip);\n    /* Compress the last literals */\n    return blockCompressor(ms, seqStore, rep, ip, iend - ip);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_ldm.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n */\n\n#ifndef ZSTD_LDM_H\n#define ZSTD_LDM_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#include \"zstd_compress_internal.h\"   /* ldmParams_t, U32 */\n#include \"zstd.h\"   /* ZSTD_CCtx, size_t */\n\n/*-*************************************\n*  Long distance matching\n***************************************/\n\n#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT\n\n/**\n * ZSTD_ldm_generateSequences():\n *\n * Generates the sequences using the long distance match finder.\n * Generates long range matching sequences in `sequences`, which parse a prefix\n * of the source. `sequences` must be large enough to store every sequence,\n * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.\n * @returns 0 or an error code.\n *\n * NOTE: The user must have called ZSTD_window_update() for all of the input\n * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.\n * NOTE: This function returns an error if it runs out of space to store\n *       sequences.\n */\nsize_t ZSTD_ldm_generateSequences(\n            ldmState_t* ldms, rawSeqStore_t* sequences,\n            ldmParams_t const* params, void const* src, size_t srcSize);\n\n/**\n * ZSTD_ldm_blockCompress():\n *\n * Compresses a block using the predefined sequences, along with a secondary\n * block compressor. The literals section of every sequence is passed to the\n * secondary block compressor, and those sequences are interspersed with the\n * predefined sequences. Returns the length of the last literals.\n * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.\n * `rawSeqStore.seq` may also be updated to split the last sequence between two\n * blocks.\n * @return The length of the last literals.\n *\n * NOTE: The source must be at most the maximum block size, but the predefined\n * sequences can be any size, and may be longer than the block. In the case that\n * they are longer than the block, the last sequences may need to be split into\n * two. We handle that case correctly, and update `rawSeqStore` appropriately.\n * NOTE: This function does not return any errors.\n */\nsize_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,\n            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n            void const* src, size_t srcSize);\n\n/**\n * ZSTD_ldm_skipSequences():\n *\n * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.\n * Avoids emitting matches less than `minMatch` bytes.\n * Must be called for data with is not passed to ZSTD_ldm_blockCompress().\n */\nvoid ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,\n    U32 const minMatch);\n\n\n/** ZSTD_ldm_getTableSize() :\n *  Estimate the space needed for long distance matching tables or 0 if LDM is\n *  disabled.\n */\nsize_t ZSTD_ldm_getTableSize(ldmParams_t params);\n\n/** ZSTD_ldm_getSeqSpace() :\n *  Return an upper bound on the number of sequences that can be produced by\n *  the long distance matcher, or 0 if LDM is disabled.\n */\nsize_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);\n\n/** ZSTD_ldm_adjustParameters() :\n *  If the params->hashRateLog is not set, set it to its default value based on\n *  windowLog and params->hashLog.\n *\n *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to\n *  params->hashLog if it is not).\n *\n *  Ensures that the minMatchLength >= targetLength during optimal parsing.\n */\nvoid ZSTD_ldm_adjustParameters(ldmParams_t* params,\n                               ZSTD_compressionParameters const* cParams);\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_FAST_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_opt.c",
    "content": "/*\n * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#include \"zstd_compress_internal.h\"\n#include \"hist.h\"\n#include \"zstd_opt.h\"\n\n\n#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */\n#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */\n#define ZSTD_MAX_PRICE     (1<<30)\n\n#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */\n\n\n/*-*************************************\n*  Price functions for optimal parser\n***************************************/\n\n#if 0    /* approximation at bit level */\n#  define BITCOST_ACCURACY 0\n#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)\n#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))\n#elif 0  /* fractional bit accuracy */\n#  define BITCOST_ACCURACY 8\n#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)\n#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))\n#else    /* opt==approx, ultra==accurate */\n#  define BITCOST_ACCURACY 8\n#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)\n#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))\n#endif\n\nMEM_STATIC U32 ZSTD_bitWeight(U32 stat)\n{\n    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);\n}\n\nMEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)\n{\n    U32 const stat = rawStat + 1;\n    U32 const hb = ZSTD_highbit32(stat);\n    U32 const BWeight = hb * BITCOST_MULTIPLIER;\n    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;\n    U32 const weight = BWeight + FWeight;\n    assert(hb + BITCOST_ACCURACY < 31);\n    return weight;\n}\n\n#if (DEBUGLEVEL>=2)\n/* debugging function,\n * @return price in bytes as fractional value\n * for debug messages only */\nMEM_STATIC double ZSTD_fCost(U32 price)\n{\n    return (double)price / (BITCOST_MULTIPLIER*8);\n}\n#endif\n\nstatic int ZSTD_compressedLiterals(optState_t const* const optPtr)\n{\n    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;\n}\n\nstatic void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)\n{\n    if (ZSTD_compressedLiterals(optPtr))\n        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);\n    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);\n    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);\n    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);\n}\n\n\n/* ZSTD_downscaleStat() :\n * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)\n * return the resulting sum of elements */\nstatic U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)\n{\n    U32 s, sum=0;\n    DEBUGLOG(5, \"ZSTD_downscaleStat (nbElts=%u)\", (unsigned)lastEltIndex+1);\n    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);\n    for (s=0; s<lastEltIndex+1; s++) {\n        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));\n        sum += table[s];\n    }\n    return sum;\n}\n\n/* ZSTD_rescaleFreqs() :\n * if first block (detected by optPtr->litLengthSum == 0) : init statistics\n *    take hints from dictionary if there is one\n *    or init from zero, using src for literals stats, or flat 1 for match symbols\n * otherwise downscale existing stats, to be used as seed for next block.\n */\nstatic void\nZSTD_rescaleFreqs(optState_t* const optPtr,\n            const BYTE* const src, size_t const srcSize,\n                  int const optLevel)\n{\n    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);\n    DEBUGLOG(5, \"ZSTD_rescaleFreqs (srcSize=%u)\", (unsigned)srcSize);\n    optPtr->priceType = zop_dynamic;\n\n    if (optPtr->litLengthSum == 0) {  /* first block : init */\n        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */\n            DEBUGLOG(5, \"(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef\");\n            optPtr->priceType = zop_predef;\n        }\n\n        assert(optPtr->symbolCosts != NULL);\n        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {\n            /* huffman table presumed generated by dictionary */\n            optPtr->priceType = zop_dynamic;\n\n            if (compressedLiterals) {\n                unsigned lit;\n                assert(optPtr->litFreq != NULL);\n                optPtr->litSum = 0;\n                for (lit=0; lit<=MaxLit; lit++) {\n                    U32 const scaleLog = 11;   /* scale to 2K */\n                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);\n                    assert(bitCost <= scaleLog);\n                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;\n                    optPtr->litSum += optPtr->litFreq[lit];\n            }   }\n\n            {   unsigned ll;\n                FSE_CState_t llstate;\n                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);\n                optPtr->litLengthSum = 0;\n                for (ll=0; ll<=MaxLL; ll++) {\n                    U32 const scaleLog = 10;   /* scale to 1K */\n                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);\n                    assert(bitCost < scaleLog);\n                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;\n                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];\n            }   }\n\n            {   unsigned ml;\n                FSE_CState_t mlstate;\n                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);\n                optPtr->matchLengthSum = 0;\n                for (ml=0; ml<=MaxML; ml++) {\n                    U32 const scaleLog = 10;\n                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);\n                    assert(bitCost < scaleLog);\n                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;\n                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];\n            }   }\n\n            {   unsigned of;\n                FSE_CState_t ofstate;\n                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);\n                optPtr->offCodeSum = 0;\n                for (of=0; of<=MaxOff; of++) {\n                    U32 const scaleLog = 10;\n                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);\n                    assert(bitCost < scaleLog);\n                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;\n                    optPtr->offCodeSum += optPtr->offCodeFreq[of];\n            }   }\n\n        } else {  /* not a dictionary */\n\n            assert(optPtr->litFreq != NULL);\n            if (compressedLiterals) {\n                unsigned lit = MaxLit;\n                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */\n                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);\n            }\n\n            {   unsigned ll;\n                for (ll=0; ll<=MaxLL; ll++)\n                    optPtr->litLengthFreq[ll] = 1;\n            }\n            optPtr->litLengthSum = MaxLL+1;\n\n            {   unsigned ml;\n                for (ml=0; ml<=MaxML; ml++)\n                    optPtr->matchLengthFreq[ml] = 1;\n            }\n            optPtr->matchLengthSum = MaxML+1;\n\n            {   unsigned of;\n                for (of=0; of<=MaxOff; of++)\n                    optPtr->offCodeFreq[of] = 1;\n            }\n            optPtr->offCodeSum = MaxOff+1;\n\n        }\n\n    } else {   /* new block : re-use previous statistics, scaled down */\n\n        if (compressedLiterals)\n            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);\n        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);\n        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);\n        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);\n    }\n\n    ZSTD_setBasePrices(optPtr, optLevel);\n}\n\n/* ZSTD_rawLiteralsCost() :\n * price of literals (only) in specified segment (which length can be 0).\n * does not include price of literalLength symbol */\nstatic U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,\n                                const optState_t* const optPtr,\n                                int optLevel)\n{\n    if (litLength == 0) return 0;\n\n    if (!ZSTD_compressedLiterals(optPtr))\n        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */\n\n    if (optPtr->priceType == zop_predef)\n        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */\n\n    /* dynamic statistics */\n    {   U32 price = litLength * optPtr->litSumBasePrice;\n        U32 u;\n        for (u=0; u < litLength; u++) {\n            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */\n            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);\n        }\n        return price;\n    }\n}\n\n/* ZSTD_litLengthPrice() :\n * cost of literalLength symbol */\nstatic U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)\n{\n    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);\n\n    /* dynamic statistics */\n    {   U32 const llCode = ZSTD_LLcode(litLength);\n        return (LL_bits[llCode] * BITCOST_MULTIPLIER)\n             + optPtr->litLengthSumBasePrice\n             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);\n    }\n}\n\n/* ZSTD_litLengthContribution() :\n * @return ( cost(litlength) - cost(0) )\n * this value can then be added to rawLiteralsCost()\n * to provide a cost which is directly comparable to a match ending at same position */\nstatic int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)\n{\n    if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);\n\n    /* dynamic statistics */\n    {   U32 const llCode = ZSTD_LLcode(litLength);\n        int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)\n                               + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */\n                               - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);\n#if 1\n        return contribution;\n#else\n        return MAX(0, contribution); /* sometimes better, sometimes not ... */\n#endif\n    }\n}\n\n/* ZSTD_literalsContribution() :\n * creates a fake cost for the literals part of a sequence\n * which can be compared to the ending cost of a match\n * should a new match start at this position */\nstatic int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,\n                                     const optState_t* const optPtr,\n                                     int optLevel)\n{\n    int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)\n                           + ZSTD_litLengthContribution(litLength, optPtr, optLevel);\n    return contribution;\n}\n\n/* ZSTD_getMatchPrice() :\n * Provides the cost of the match part (offset + matchLength) of a sequence\n * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.\n * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */\nFORCE_INLINE_TEMPLATE U32\nZSTD_getMatchPrice(U32 const offset,\n                   U32 const matchLength,\n             const optState_t* const optPtr,\n                   int const optLevel)\n{\n    U32 price;\n    U32 const offCode = ZSTD_highbit32(offset+1);\n    U32 const mlBase = matchLength - MINMATCH;\n    assert(matchLength >= MINMATCH);\n\n    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */\n        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);\n\n    /* dynamic statistics */\n    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));\n    if ((optLevel<2) /*static*/ && offCode >= 20)\n        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */\n\n    /* match Length */\n    {   U32 const mlCode = ZSTD_MLcode(mlBase);\n        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));\n    }\n\n    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */\n\n    DEBUGLOG(8, \"ZSTD_getMatchPrice(ml:%u) = %u\", matchLength, price);\n    return price;\n}\n\n/* ZSTD_updateStats() :\n * assumption : literals + litLengtn <= iend */\nstatic void ZSTD_updateStats(optState_t* const optPtr,\n                             U32 litLength, const BYTE* literals,\n                             U32 offsetCode, U32 matchLength)\n{\n    /* literals */\n    if (ZSTD_compressedLiterals(optPtr)) {\n        U32 u;\n        for (u=0; u < litLength; u++)\n            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;\n        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;\n    }\n\n    /* literal Length */\n    {   U32 const llCode = ZSTD_LLcode(litLength);\n        optPtr->litLengthFreq[llCode]++;\n        optPtr->litLengthSum++;\n    }\n\n    /* match offset code (0-2=>repCode; 3+=>offset+2) */\n    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);\n        assert(offCode <= MaxOff);\n        optPtr->offCodeFreq[offCode]++;\n        optPtr->offCodeSum++;\n    }\n\n    /* match Length */\n    {   U32 const mlBase = matchLength - MINMATCH;\n        U32 const mlCode = ZSTD_MLcode(mlBase);\n        optPtr->matchLengthFreq[mlCode]++;\n        optPtr->matchLengthSum++;\n    }\n}\n\n\n/* ZSTD_readMINMATCH() :\n * function safe only for comparisons\n * assumption : memPtr must be at least 4 bytes before end of buffer */\nMEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)\n{\n    switch (length)\n    {\n    default :\n    case 4 : return MEM_read32(memPtr);\n    case 3 : if (MEM_isLittleEndian())\n                return MEM_read32(memPtr)<<8;\n             else\n                return MEM_read32(memPtr)>>8;\n    }\n}\n\n\n/* Update hashTable3 up to ip (excluded)\n   Assumption : always within prefix (i.e. not within extDict) */\nstatic U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,\n                                              U32* nextToUpdate3,\n                                              const BYTE* const ip)\n{\n    U32* const hashTable3 = ms->hashTable3;\n    U32 const hashLog3 = ms->hashLog3;\n    const BYTE* const base = ms->window.base;\n    U32 idx = *nextToUpdate3;\n    U32 const target = (U32)(ip - base);\n    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);\n    assert(hashLog3 > 0);\n\n    while(idx < target) {\n        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;\n        idx++;\n    }\n\n    *nextToUpdate3 = target;\n    return hashTable3[hash3];\n}\n\n\n/*-*************************************\n*  Binary Tree search\n***************************************/\n/** ZSTD_insertBt1() : add one or multiple positions to tree.\n *  ip : assumed <= iend-8 .\n * @return : nb of positions added */\nstatic U32 ZSTD_insertBt1(\n                ZSTD_matchState_t* ms,\n                const BYTE* const ip, const BYTE* const iend,\n                U32 const mls, const int extDict)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32*   const hashTable = ms->hashTable;\n    U32    const hashLog = cParams->hashLog;\n    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);\n    U32*   const bt = ms->chainTable;\n    U32    const btLog  = cParams->chainLog - 1;\n    U32    const btMask = (1 << btLog) - 1;\n    U32 matchIndex = hashTable[h];\n    size_t commonLengthSmaller=0, commonLengthLarger=0;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const dictBase = ms->window.dictBase;\n    const U32 dictLimit = ms->window.dictLimit;\n    const BYTE* const dictEnd = dictBase + dictLimit;\n    const BYTE* const prefixStart = base + dictLimit;\n    const BYTE* match;\n    const U32 current = (U32)(ip-base);\n    const U32 btLow = btMask >= current ? 0 : current - btMask;\n    U32* smallerPtr = bt + 2*(current&btMask);\n    U32* largerPtr  = smallerPtr + 1;\n    U32 dummy32;   /* to be nullified at the end */\n    U32 const windowLow = ms->window.lowLimit;\n    U32 matchEndIdx = current+8+1;\n    size_t bestLength = 8;\n    U32 nbCompares = 1U << cParams->searchLog;\n#ifdef ZSTD_C_PREDICT\n    U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);\n    U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);\n    predictedSmall += (predictedSmall>0);\n    predictedLarge += (predictedLarge>0);\n#endif /* ZSTD_C_PREDICT */\n\n    DEBUGLOG(8, \"ZSTD_insertBt1 (%u)\", current);\n\n    assert(ip <= iend-8);   /* required for h calculation */\n    hashTable[h] = current;   /* Update Hash Table */\n\n    assert(windowLow > 0);\n    while (nbCompares-- && (matchIndex >= windowLow)) {\n        U32* const nextPtr = bt + 2*(matchIndex & btMask);\n        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n        assert(matchIndex < current);\n\n#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */\n        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */\n        if (matchIndex == predictedSmall) {\n            /* no need to check length, result known */\n            *smallerPtr = matchIndex;\n            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n            smallerPtr = nextPtr+1;               /* new \"smaller\" => larger of match */\n            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */\n            predictedSmall = predictPtr[1] + (predictPtr[1]>0);\n            continue;\n        }\n        if (matchIndex == predictedLarge) {\n            *largerPtr = matchIndex;\n            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n            largerPtr = nextPtr;\n            matchIndex = nextPtr[0];\n            predictedLarge = predictPtr[0] + (predictPtr[0]>0);\n            continue;\n        }\n#endif\n\n        if (!extDict || (matchIndex+matchLength >= dictLimit)) {\n            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */\n            match = base + matchIndex;\n            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);\n        } else {\n            match = dictBase + matchIndex;\n            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);\n            if (matchIndex+matchLength >= dictLimit)\n                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */\n        }\n\n        if (matchLength > bestLength) {\n            bestLength = matchLength;\n            if (matchLength > matchEndIdx - matchIndex)\n                matchEndIdx = matchIndex + (U32)matchLength;\n        }\n\n        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */\n            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */\n        }\n\n        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */\n            /* match is smaller than current */\n            *smallerPtr = matchIndex;             /* update smaller idx */\n            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */\n            smallerPtr = nextPtr+1;               /* new \"candidate\" => larger than match, which was smaller than target */\n            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */\n        } else {\n            /* match is larger than current */\n            *largerPtr = matchIndex;\n            commonLengthLarger = matchLength;\n            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */\n            largerPtr = nextPtr;\n            matchIndex = nextPtr[0];\n    }   }\n\n    *smallerPtr = *largerPtr = 0;\n    {   U32 positions = 0;\n        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */\n        assert(matchEndIdx > current + 8);\n        return MAX(positions, matchEndIdx - (current + 8));\n    }\n}\n\nFORCE_INLINE_TEMPLATE\nvoid ZSTD_updateTree_internal(\n                ZSTD_matchState_t* ms,\n                const BYTE* const ip, const BYTE* const iend,\n                const U32 mls, const ZSTD_dictMode_e dictMode)\n{\n    const BYTE* const base = ms->window.base;\n    U32 const target = (U32)(ip - base);\n    U32 idx = ms->nextToUpdate;\n    DEBUGLOG(6, \"ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)\",\n                idx, target, dictMode);\n\n    while(idx < target) {\n        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);\n        assert(idx < (U32)(idx + forward));\n        idx += forward;\n    }\n    assert((size_t)(ip - base) <= (size_t)(U32)(-1));\n    assert((size_t)(iend - base) <= (size_t)(U32)(-1));\n    ms->nextToUpdate = target;\n}\n\nvoid ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {\n    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);\n}\n\nFORCE_INLINE_TEMPLATE\nU32 ZSTD_insertBtAndGetAllMatches (\n                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */\n                    ZSTD_matchState_t* ms,\n                    U32* nextToUpdate3,\n                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,\n                    const U32 rep[ZSTD_REP_NUM],\n                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */\n                    const U32 lengthToBeat,\n                    U32 const mls /* template */)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);\n    const BYTE* const base = ms->window.base;\n    U32 const current = (U32)(ip-base);\n    U32 const hashLog = cParams->hashLog;\n    U32 const minMatch = (mls==3) ? 3 : 4;\n    U32* const hashTable = ms->hashTable;\n    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);\n    U32 matchIndex  = hashTable[h];\n    U32* const bt   = ms->chainTable;\n    U32 const btLog = cParams->chainLog - 1;\n    U32 const btMask= (1U << btLog) - 1;\n    size_t commonLengthSmaller=0, commonLengthLarger=0;\n    const BYTE* const dictBase = ms->window.dictBase;\n    U32 const dictLimit = ms->window.dictLimit;\n    const BYTE* const dictEnd = dictBase + dictLimit;\n    const BYTE* const prefixStart = base + dictLimit;\n    U32 const btLow = (btMask >= current) ? 0 : current - btMask;\n    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);\n    U32 const matchLow = windowLow ? windowLow : 1;\n    U32* smallerPtr = bt + 2*(current&btMask);\n    U32* largerPtr  = bt + 2*(current&btMask) + 1;\n    U32 matchEndIdx = current+8+1;   /* farthest referenced position of any match => detects repetitive patterns */\n    U32 dummy32;   /* to be nullified at the end */\n    U32 mnum = 0;\n    U32 nbCompares = 1U << cParams->searchLog;\n\n    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;\n    const ZSTD_compressionParameters* const dmsCParams =\n                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;\n    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;\n    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;\n    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;\n    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;\n    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;\n    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;\n    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;\n    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;\n    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;\n\n    size_t bestLength = lengthToBeat-1;\n    DEBUGLOG(8, \"ZSTD_insertBtAndGetAllMatches: current=%u\", current);\n\n    /* check repCode */\n    assert(ll0 <= 1);   /* necessarily 1 or 0 */\n    {   U32 const lastR = ZSTD_REP_NUM + ll0;\n        U32 repCode;\n        for (repCode = ll0; repCode < lastR; repCode++) {\n            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];\n            U32 const repIndex = current - repOffset;\n            U32 repLen = 0;\n            assert(current >= dictLimit);\n            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) {  /* equivalent to `current > repIndex >= dictLimit` */\n                if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {\n                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;\n                }\n            } else {  /* repIndex < dictLimit || repIndex >= current */\n                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?\n                                             dmsBase + repIndex - dmsIndexDelta :\n                                             dictBase + repIndex;\n                assert(current >= windowLow);\n                if ( dictMode == ZSTD_extDict\n                  && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow)  /* equivalent to `current > repIndex >= windowLow` */\n                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)\n                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {\n                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;\n                }\n                if (dictMode == ZSTD_dictMatchState\n                  && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `current > repIndex >= dmsLowLimit` */\n                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */\n                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {\n                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;\n            }   }\n            /* save longer solution */\n            if (repLen > bestLength) {\n                DEBUGLOG(8, \"found repCode %u (ll0:%u, offset:%u) of length %u\",\n                            repCode, ll0, repOffset, repLen);\n                bestLength = repLen;\n                matches[mnum].off = repCode - ll0;\n                matches[mnum].len = (U32)repLen;\n                mnum++;\n                if ( (repLen > sufficient_len)\n                   | (ip+repLen == iLimit) ) {  /* best possible */\n                    return mnum;\n    }   }   }   }\n\n    /* HC3 match finder */\n    if ((mls == 3) /*static*/ && (bestLength < mls)) {\n        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);\n        if ((matchIndex3 >= matchLow)\n          & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {\n            size_t mlen;\n            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {\n                const BYTE* const match = base + matchIndex3;\n                mlen = ZSTD_count(ip, match, iLimit);\n            } else {\n                const BYTE* const match = dictBase + matchIndex3;\n                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);\n            }\n\n            /* save best solution */\n            if (mlen >= mls /* == 3 > bestLength */) {\n                DEBUGLOG(8, \"found small match with hlog3, of length %u\",\n                            (U32)mlen);\n                bestLength = mlen;\n                assert(current > matchIndex3);\n                assert(mnum==0);  /* no prior solution */\n                matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;\n                matches[0].len = (U32)mlen;\n                mnum = 1;\n                if ( (mlen > sufficient_len) |\n                     (ip+mlen == iLimit) ) {  /* best possible length */\n                    ms->nextToUpdate = current+1;  /* skip insertion */\n                    return 1;\n        }   }   }\n        /* no dictMatchState lookup: dicts don't have a populated HC3 table */\n    }\n\n    hashTable[h] = current;   /* Update Hash Table */\n\n    while (nbCompares-- && (matchIndex >= matchLow)) {\n        U32* const nextPtr = bt + 2*(matchIndex & btMask);\n        const BYTE* match;\n        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n        assert(current > matchIndex);\n\n        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {\n            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */\n            match = base + matchIndex;\n            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */\n            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);\n        } else {\n            match = dictBase + matchIndex;\n            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */\n            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);\n            if (matchIndex+matchLength >= dictLimit)\n                match = base + matchIndex;   /* prepare for match[matchLength] read */\n        }\n\n        if (matchLength > bestLength) {\n            DEBUGLOG(8, \"found match of length %u at distance %u (offCode=%u)\",\n                    (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);\n            assert(matchEndIdx > matchIndex);\n            if (matchLength > matchEndIdx - matchIndex)\n                matchEndIdx = matchIndex + (U32)matchLength;\n            bestLength = matchLength;\n            matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;\n            matches[mnum].len = (U32)matchLength;\n            mnum++;\n            if ( (matchLength > ZSTD_OPT_NUM)\n               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {\n                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */\n                break; /* drop, to preserve bt consistency (miss a little bit of compression) */\n            }\n        }\n\n        if (match[matchLength] < ip[matchLength]) {\n            /* match smaller than current */\n            *smallerPtr = matchIndex;             /* update smaller idx */\n            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */\n            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */\n        } else {\n            *largerPtr = matchIndex;\n            commonLengthLarger = matchLength;\n            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */\n            largerPtr = nextPtr;\n            matchIndex = nextPtr[0];\n    }   }\n\n    *smallerPtr = *largerPtr = 0;\n\n    if (dictMode == ZSTD_dictMatchState && nbCompares) {\n        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);\n        U32 dictMatchIndex = dms->hashTable[dmsH];\n        const U32* const dmsBt = dms->chainTable;\n        commonLengthSmaller = commonLengthLarger = 0;\n        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {\n            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);\n            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */\n            const BYTE* match = dmsBase + dictMatchIndex;\n            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);\n            if (dictMatchIndex+matchLength >= dmsHighLimit)\n                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */\n\n            if (matchLength > bestLength) {\n                matchIndex = dictMatchIndex + dmsIndexDelta;\n                DEBUGLOG(8, \"found dms match of length %u at distance %u (offCode=%u)\",\n                        (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);\n                if (matchLength > matchEndIdx - matchIndex)\n                    matchEndIdx = matchIndex + (U32)matchLength;\n                bestLength = matchLength;\n                matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;\n                matches[mnum].len = (U32)matchLength;\n                mnum++;\n                if ( (matchLength > ZSTD_OPT_NUM)\n                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {\n                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */\n                }\n            }\n\n            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */\n            if (match[matchLength] < ip[matchLength]) {\n                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */\n                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */\n            } else {\n                /* match is larger than current */\n                commonLengthLarger = matchLength;\n                dictMatchIndex = nextPtr[0];\n            }\n        }\n    }\n\n    assert(matchEndIdx > current+8);\n    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */\n    return mnum;\n}\n\n\nFORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (\n                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */\n                        ZSTD_matchState_t* ms,\n                        U32* nextToUpdate3,\n                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,\n                        const U32 rep[ZSTD_REP_NUM],\n                        U32 const ll0,\n                        U32 const lengthToBeat)\n{\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n    U32 const matchLengthSearch = cParams->minMatch;\n    DEBUGLOG(8, \"ZSTD_BtGetAllMatches\");\n    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */\n    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);\n    switch(matchLengthSearch)\n    {\n    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);\n    default :\n    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);\n    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);\n    case 7 :\n    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);\n    }\n}\n\n\n/*-*******************************\n*  Optimal parser\n*********************************/\ntypedef struct repcodes_s {\n    U32 rep[3];\n} repcodes_t;\n\nstatic repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)\n{\n    repcodes_t newReps;\n    if (offset >= ZSTD_REP_NUM) {  /* full offset */\n        newReps.rep[2] = rep[1];\n        newReps.rep[1] = rep[0];\n        newReps.rep[0] = offset - ZSTD_REP_MOVE;\n    } else {   /* repcode */\n        U32 const repCode = offset + ll0;\n        if (repCode > 0) {  /* note : if repCode==0, no change */\n            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];\n            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];\n            newReps.rep[1] = rep[0];\n            newReps.rep[0] = currentOffset;\n        } else {   /* repCode == 0 */\n            memcpy(&newReps, rep, sizeof(newReps));\n        }\n    }\n    return newReps;\n}\n\n\nstatic U32 ZSTD_totalLen(ZSTD_optimal_t sol)\n{\n    return sol.litlen + sol.mlen;\n}\n\n#if 0 /* debug */\n\nstatic void\nlistStats(const U32* table, int lastEltID)\n{\n    int const nbElts = lastEltID + 1;\n    int enb;\n    for (enb=0; enb < nbElts; enb++) {\n        (void)table;\n        //RAWLOG(2, \"%3i:%3i,  \", enb, table[enb]);\n        RAWLOG(2, \"%4i,\", table[enb]);\n    }\n    RAWLOG(2, \" \\n\");\n}\n\n#endif\n\nFORCE_INLINE_TEMPLATE size_t\nZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,\n                               seqStore_t* seqStore,\n                               U32 rep[ZSTD_REP_NUM],\n                         const void* src, size_t srcSize,\n                         const int optLevel,\n                         const ZSTD_dictMode_e dictMode)\n{\n    optState_t* const optStatePtr = &ms->opt;\n    const BYTE* const istart = (const BYTE*)src;\n    const BYTE* ip = istart;\n    const BYTE* anchor = istart;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* const ilimit = iend - 8;\n    const BYTE* const base = ms->window.base;\n    const BYTE* const prefixStart = base + ms->window.dictLimit;\n    const ZSTD_compressionParameters* const cParams = &ms->cParams;\n\n    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);\n    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;\n    U32 nextToUpdate3 = ms->nextToUpdate;\n\n    ZSTD_optimal_t* const opt = optStatePtr->priceTable;\n    ZSTD_match_t* const matches = optStatePtr->matchTable;\n    ZSTD_optimal_t lastSequence;\n\n    /* init */\n    DEBUGLOG(5, \"ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u\",\n                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);\n    assert(optLevel <= 2);\n    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);\n    ip += (ip==prefixStart);\n\n    /* Match Loop */\n    while (ip < ilimit) {\n        U32 cur, last_pos = 0;\n\n        /* find first match */\n        {   U32 const litlen = (U32)(ip - anchor);\n            U32 const ll0 = !litlen;\n            U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);\n            if (!nbMatches) { ip++; continue; }\n\n            /* initialize opt[0] */\n            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }\n            opt[0].mlen = 0;  /* means is_a_literal */\n            opt[0].litlen = litlen;\n            opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);\n\n            /* large match -> immediate encoding */\n            {   U32 const maxML = matches[nbMatches-1].len;\n                U32 const maxOffset = matches[nbMatches-1].off;\n                DEBUGLOG(6, \"found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series\",\n                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));\n\n                if (maxML > sufficient_len) {\n                    lastSequence.litlen = litlen;\n                    lastSequence.mlen = maxML;\n                    lastSequence.off = maxOffset;\n                    DEBUGLOG(6, \"large match (%u>%u), immediate encoding\",\n                                maxML, sufficient_len);\n                    cur = 0;\n                    last_pos = ZSTD_totalLen(lastSequence);\n                    goto _shortestPath;\n            }   }\n\n            /* set prices for first matches starting position == 0 */\n            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);\n                U32 pos;\n                U32 matchNb;\n                for (pos = 1; pos < minMatch; pos++) {\n                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */\n                }\n                for (matchNb = 0; matchNb < nbMatches; matchNb++) {\n                    U32 const offset = matches[matchNb].off;\n                    U32 const end = matches[matchNb].len;\n                    repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);\n                    for ( ; pos <= end ; pos++ ) {\n                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);\n                        U32 const sequencePrice = literalsPrice + matchPrice;\n                        DEBUGLOG(7, \"rPos:%u => set initial price : %.2f\",\n                                    pos, ZSTD_fCost(sequencePrice));\n                        opt[pos].mlen = pos;\n                        opt[pos].off = offset;\n                        opt[pos].litlen = litlen;\n                        opt[pos].price = sequencePrice;\n                        ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));\n                        memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));\n                }   }\n                last_pos = pos-1;\n            }\n        }\n\n        /* check further positions */\n        for (cur = 1; cur <= last_pos; cur++) {\n            const BYTE* const inr = ip + cur;\n            assert(cur < ZSTD_OPT_NUM);\n            DEBUGLOG(7, \"cPos:%zi==rPos:%u\", inr-istart, cur)\n\n            /* Fix current position with one literal if cheaper */\n            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;\n                int const price = opt[cur-1].price\n                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)\n                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)\n                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);\n                assert(price < 1000000000); /* overflow check */\n                if (price <= opt[cur].price) {\n                    DEBUGLOG(7, \"cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)\",\n                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,\n                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);\n                    opt[cur].mlen = 0;\n                    opt[cur].off = 0;\n                    opt[cur].litlen = litlen;\n                    opt[cur].price = price;\n                    memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));\n                } else {\n                    DEBUGLOG(7, \"cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)\",\n                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),\n                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);\n                }\n            }\n\n            /* last match must start at a minimum distance of 8 from oend */\n            if (inr > ilimit) continue;\n\n            if (cur == last_pos) break;\n\n            if ( (optLevel==0) /*static_test*/\n              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {\n                DEBUGLOG(7, \"move to next rPos:%u : price is <=\", cur+1);\n                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */\n            }\n\n            {   U32 const ll0 = (opt[cur].mlen != 0);\n                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;\n                U32 const previousPrice = opt[cur].price;\n                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);\n                U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);\n                U32 matchNb;\n                if (!nbMatches) {\n                    DEBUGLOG(7, \"rPos:%u : no match found\", cur);\n                    continue;\n                }\n\n                {   U32 const maxML = matches[nbMatches-1].len;\n                    DEBUGLOG(7, \"cPos:%zi==rPos:%u, found %u matches, of maxLength=%u\",\n                                inr-istart, cur, nbMatches, maxML);\n\n                    if ( (maxML > sufficient_len)\n                      || (cur + maxML >= ZSTD_OPT_NUM) ) {\n                        lastSequence.mlen = maxML;\n                        lastSequence.off = matches[nbMatches-1].off;\n                        lastSequence.litlen = litlen;\n                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */\n                        last_pos = cur + ZSTD_totalLen(lastSequence);\n                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */\n                        goto _shortestPath;\n                }   }\n\n                /* set prices using matches found at position == cur */\n                for (matchNb = 0; matchNb < nbMatches; matchNb++) {\n                    U32 const offset = matches[matchNb].off;\n                    repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);\n                    U32 const lastML = matches[matchNb].len;\n                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;\n                    U32 mlen;\n\n                    DEBUGLOG(7, \"testing match %u => offCode=%4u, mlen=%2u, llen=%2u\",\n                                matchNb, matches[matchNb].off, lastML, litlen);\n\n                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */\n                        U32 const pos = cur + mlen;\n                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);\n\n                        if ((pos > last_pos) || (price < opt[pos].price)) {\n                            DEBUGLOG(7, \"rPos:%u (ml=%2u) => new better price (%.2f<%.2f)\",\n                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));\n                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */\n                            opt[pos].mlen = mlen;\n                            opt[pos].off = offset;\n                            opt[pos].litlen = litlen;\n                            opt[pos].price = price;\n                            ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));\n                            memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));\n                        } else {\n                            DEBUGLOG(7, \"rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)\",\n                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));\n                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */\n                        }\n            }   }   }\n        }  /* for (cur = 1; cur <= last_pos; cur++) */\n\n        lastSequence = opt[last_pos];\n        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */\n        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/\n\n_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */\n        assert(opt[0].mlen == 0);\n\n        {   U32 const storeEnd = cur + 1;\n            U32 storeStart = storeEnd;\n            U32 seqPos = cur;\n\n            DEBUGLOG(6, \"start reverse traversal (last_pos:%u, cur:%u)\",\n                        last_pos, cur); (void)last_pos;\n            assert(storeEnd < ZSTD_OPT_NUM);\n            DEBUGLOG(6, \"last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)\",\n                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);\n            opt[storeEnd] = lastSequence;\n            while (seqPos > 0) {\n                U32 const backDist = ZSTD_totalLen(opt[seqPos]);\n                storeStart--;\n                DEBUGLOG(6, \"sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)\",\n                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);\n                opt[storeStart] = opt[seqPos];\n                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;\n            }\n\n            /* save sequences */\n            DEBUGLOG(6, \"sending selected sequences into seqStore\")\n            {   U32 storePos;\n                for (storePos=storeStart; storePos <= storeEnd; storePos++) {\n                    U32 const llen = opt[storePos].litlen;\n                    U32 const mlen = opt[storePos].mlen;\n                    U32 const offCode = opt[storePos].off;\n                    U32 const advance = llen + mlen;\n                    DEBUGLOG(6, \"considering seq starting at %zi, llen=%u, mlen=%u\",\n                                anchor - istart, (unsigned)llen, (unsigned)mlen);\n\n                    if (mlen==0) {  /* only literals => must be last \"sequence\", actually starting a new stream of sequences */\n                        assert(storePos == storeEnd);   /* must be last sequence */\n                        ip = anchor + llen;     /* last \"sequence\" is a bunch of literals => don't progress anchor */\n                        continue;   /* will finish */\n                    }\n\n                    /* repcodes update : like ZSTD_updateRep(), but update in place */\n                    if (offCode >= ZSTD_REP_NUM) {  /* full offset */\n                        rep[2] = rep[1];\n                        rep[1] = rep[0];\n                        rep[0] = offCode - ZSTD_REP_MOVE;\n                    } else {   /* repcode */\n                        U32 const repCode = offCode + (llen==0);\n                        if (repCode) {  /* note : if repCode==0, no change */\n                            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];\n                            if (repCode >= 2) rep[2] = rep[1];\n                            rep[1] = rep[0];\n                            rep[0] = currentOffset;\n                    }   }\n\n                    assert(anchor + llen <= iend);\n                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);\n                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);\n                    anchor += advance;\n                    ip = anchor;\n            }   }\n            ZSTD_setBasePrices(optStatePtr, optLevel);\n        }\n\n    }   /* while (ip < ilimit) */\n\n    /* Return the last literals size */\n    return (size_t)(iend - anchor);\n}\n\n\nsize_t ZSTD_compressBlock_btopt(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_compressBlock_btopt\");\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);\n}\n\n\n/* used in 2-pass strategy */\nstatic U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)\n{\n    U32 s, sum=0;\n    assert(ZSTD_FREQ_DIV+bonus >= 0);\n    for (s=0; s<lastEltIndex+1; s++) {\n        table[s] <<= ZSTD_FREQ_DIV+bonus;\n        table[s]--;\n        sum += table[s];\n    }\n    return sum;\n}\n\n/* used in 2-pass strategy */\nMEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)\n{\n    if (ZSTD_compressedLiterals(optPtr))\n        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);\n    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);\n    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);\n    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);\n}\n\n/* ZSTD_initStats_ultra():\n * make a first compression pass, just to seed stats with more accurate starting values.\n * only works on first block, with no dictionary and no ldm.\n * this function cannot error, hence its contract must be respected.\n */\nstatic void\nZSTD_initStats_ultra(ZSTD_matchState_t* ms,\n                     seqStore_t* seqStore,\n                     U32 rep[ZSTD_REP_NUM],\n               const void* src, size_t srcSize)\n{\n    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */\n    memcpy(tmpRep, rep, sizeof(tmpRep));\n\n    DEBUGLOG(4, \"ZSTD_initStats_ultra (srcSize=%zu)\", srcSize);\n    assert(ms->opt.litLengthSum == 0);    /* first block */\n    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */\n    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */\n    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */\n\n    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/\n\n    /* invalidate first scan from history */\n    ZSTD_resetSeqStore(seqStore);\n    ms->window.base -= srcSize;\n    ms->window.dictLimit += (U32)srcSize;\n    ms->window.lowLimit = ms->window.dictLimit;\n    ms->nextToUpdate = ms->window.dictLimit;\n\n    /* re-inforce weight of collected statistics */\n    ZSTD_upscaleStats(&ms->opt);\n}\n\nsize_t ZSTD_compressBlock_btultra(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_compressBlock_btultra (srcSize=%zu)\", srcSize);\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_btultra2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    U32 const current = (U32)((const BYTE*)src - ms->window.base);\n    DEBUGLOG(5, \"ZSTD_compressBlock_btultra2 (srcSize=%zu)\", srcSize);\n\n    /* 2-pass strategy:\n     * this strategy makes a first pass over first block to collect statistics\n     * and seed next round's statistics with it.\n     * After 1st pass, function forgets everything, and starts a new block.\n     * Consequently, this can only work if no data has been previously loaded in tables,\n     * aka, no dictionary, no prefix, no ldm preprocessing.\n     * The compression ratio gain is generally small (~0.5% on first block),\n     * the cost is 2x cpu time on first block. */\n    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);\n    if ( (ms->opt.litLengthSum==0)   /* first block */\n      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */\n      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */\n      && (current == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */\n      && (srcSize > ZSTD_PREDEF_THRESHOLD)\n      ) {\n        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);\n    }\n\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);\n}\n\nsize_t ZSTD_compressBlock_btopt_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);\n}\n\nsize_t ZSTD_compressBlock_btultra_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);\n}\n\nsize_t ZSTD_compressBlock_btopt_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);\n}\n\nsize_t ZSTD_compressBlock_btultra_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        const void* src, size_t srcSize)\n{\n    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);\n}\n\n/* note : no btultra2 variant for extDict nor dictMatchState,\n * because btultra2 is not meant to work with dictionaries\n * and is only specific for the first block (no prefix) */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstd_opt.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_OPT_H\n#define ZSTD_OPT_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#include \"zstd_compress_internal.h\"\n\n/* used in ZSTD_loadDictionaryContent() */\nvoid ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);\n\nsize_t ZSTD_compressBlock_btopt(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_btultra(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_btultra2(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\n\nsize_t ZSTD_compressBlock_btopt_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_btultra_dictMatchState(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\nsize_t ZSTD_compressBlock_btopt_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\nsize_t ZSTD_compressBlock_btultra_extDict(\n        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],\n        void const* src, size_t srcSize);\n\n        /* note : no btultra2 variant for extDict nor dictMatchState,\n         * because btultra2 is not meant to work with dictionaries\n         * and is only specific for the first block (no prefix) */\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_OPT_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstdmt_compress.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/* ======   Compiler specifics   ====== */\n#if defined(_MSC_VER)\n#  pragma warning(disable : 4204)   /* disable: C4204: non-constant aggregate initializer */\n#endif\n\n\n/* ======   Constants   ====== */\n#define ZSTDMT_OVERLAPLOG_DEFAULT 0\n\n\n/* ======   Dependencies   ====== */\n#include <string.h>      /* memcpy, memset */\n#include <limits.h>      /* INT_MAX, UINT_MAX */\n#include \"mem.h\"         /* MEM_STATIC */\n#include \"pool.h\"        /* threadpool */\n#include \"threading.h\"   /* mutex */\n#include \"zstd_compress_internal.h\"  /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */\n#include \"zstd_ldm.h\"\n#include \"zstdmt_compress.h\"\n\n/* Guards code to support resizing the SeqPool.\n * We will want to resize the SeqPool to save memory in the future.\n * Until then, comment the code out since it is unused.\n */\n#define ZSTD_RESIZE_SEQPOOL 0\n\n/* ======   Debug   ====== */\n#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \\\n    && !defined(_MSC_VER) \\\n    && !defined(__MINGW32__)\n\n#  include <stdio.h>\n#  include <unistd.h>\n#  include <sys/times.h>\n\n#  define DEBUG_PRINTHEX(l,p,n) {            \\\n    unsigned debug_u;                        \\\n    for (debug_u=0; debug_u<(n); debug_u++)  \\\n        RAWLOG(l, \"%02X \", ((const unsigned char*)(p))[debug_u]); \\\n    RAWLOG(l, \" \\n\");                        \\\n}\n\nstatic unsigned long long GetCurrentClockTimeMicroseconds(void)\n{\n   static clock_t _ticksPerSecond = 0;\n   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);\n\n   {   struct tms junk; clock_t newTicks = (clock_t) times(&junk);\n       return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);\n}  }\n\n#define MUTEX_WAIT_TIME_DLEVEL 6\n#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) {          \\\n    if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) {   \\\n        unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \\\n        ZSTD_pthread_mutex_lock(mutex);           \\\n        {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \\\n            unsigned long long const elapsedTime = (afterTime-beforeTime); \\\n            if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \\\n                DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \"Thread took %llu microseconds to acquire mutex %s \\n\", \\\n                   elapsedTime, #mutex);          \\\n        }   }                                     \\\n    } else {                                      \\\n        ZSTD_pthread_mutex_lock(mutex);           \\\n    }                                             \\\n}\n\n#else\n\n#  define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)\n#  define DEBUG_PRINTHEX(l,p,n) {}\n\n#endif\n\n\n/* =====   Buffer Pool   ===== */\n/* a single Buffer Pool can be invoked from multiple threads in parallel */\n\ntypedef struct buffer_s {\n    void* start;\n    size_t capacity;\n} buffer_t;\n\nstatic const buffer_t g_nullBuffer = { NULL, 0 };\n\ntypedef struct ZSTDMT_bufferPool_s {\n    ZSTD_pthread_mutex_t poolMutex;\n    size_t bufferSize;\n    unsigned totalBuffers;\n    unsigned nbBuffers;\n    ZSTD_customMem cMem;\n    buffer_t bTable[1];   /* variable size */\n} ZSTDMT_bufferPool;\n\nstatic ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)\n{\n    unsigned const maxNbBuffers = 2*nbWorkers + 3;\n    ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(\n        sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);\n    if (bufPool==NULL) return NULL;\n    if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {\n        ZSTD_free(bufPool, cMem);\n        return NULL;\n    }\n    bufPool->bufferSize = 64 KB;\n    bufPool->totalBuffers = maxNbBuffers;\n    bufPool->nbBuffers = 0;\n    bufPool->cMem = cMem;\n    return bufPool;\n}\n\nstatic void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)\n{\n    unsigned u;\n    DEBUGLOG(3, \"ZSTDMT_freeBufferPool (address:%08X)\", (U32)(size_t)bufPool);\n    if (!bufPool) return;   /* compatibility with free on NULL */\n    for (u=0; u<bufPool->totalBuffers; u++) {\n        DEBUGLOG(4, \"free buffer %2u (address:%08X)\", u, (U32)(size_t)bufPool->bTable[u].start);\n        ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);\n    }\n    ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);\n    ZSTD_free(bufPool, bufPool->cMem);\n}\n\n/* only works at initialization, not during compression */\nstatic size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)\n{\n    size_t const poolSize = sizeof(*bufPool)\n                          + (bufPool->totalBuffers - 1) * sizeof(buffer_t);\n    unsigned u;\n    size_t totalBufferSize = 0;\n    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);\n    for (u=0; u<bufPool->totalBuffers; u++)\n        totalBufferSize += bufPool->bTable[u].capacity;\n    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n\n    return poolSize + totalBufferSize;\n}\n\n/* ZSTDMT_setBufferSize() :\n * all future buffers provided by this buffer pool will have _at least_ this size\n * note : it's better for all buffers to have same size,\n * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */\nstatic void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)\n{\n    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);\n    DEBUGLOG(4, \"ZSTDMT_setBufferSize: bSize = %u\", (U32)bSize);\n    bufPool->bufferSize = bSize;\n    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n}\n\n\nstatic ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)\n{\n    unsigned const maxNbBuffers = 2*nbWorkers + 3;\n    if (srcBufPool==NULL) return NULL;\n    if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */\n        return srcBufPool;\n    /* need a larger buffer pool */\n    {   ZSTD_customMem const cMem = srcBufPool->cMem;\n        size_t const bSize = srcBufPool->bufferSize;   /* forward parameters */\n        ZSTDMT_bufferPool* newBufPool;\n        ZSTDMT_freeBufferPool(srcBufPool);\n        newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);\n        if (newBufPool==NULL) return newBufPool;\n        ZSTDMT_setBufferSize(newBufPool, bSize);\n        return newBufPool;\n    }\n}\n\n/** ZSTDMT_getBuffer() :\n *  assumption : bufPool must be valid\n * @return : a buffer, with start pointer and size\n *  note: allocation may fail, in this case, start==NULL and size==0 */\nstatic buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)\n{\n    size_t const bSize = bufPool->bufferSize;\n    DEBUGLOG(5, \"ZSTDMT_getBuffer: bSize = %u\", (U32)bufPool->bufferSize);\n    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);\n    if (bufPool->nbBuffers) {   /* try to use an existing buffer */\n        buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];\n        size_t const availBufferSize = buf.capacity;\n        bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;\n        if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {\n            /* large enough, but not too much */\n            DEBUGLOG(5, \"ZSTDMT_getBuffer: provide buffer %u of size %u\",\n                        bufPool->nbBuffers, (U32)buf.capacity);\n            ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n            return buf;\n        }\n        /* size conditions not respected : scratch this buffer, create new one */\n        DEBUGLOG(5, \"ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing\");\n        ZSTD_free(buf.start, bufPool->cMem);\n    }\n    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n    /* create new buffer */\n    DEBUGLOG(5, \"ZSTDMT_getBuffer: create a new buffer\");\n    {   buffer_t buffer;\n        void* const start = ZSTD_malloc(bSize, bufPool->cMem);\n        buffer.start = start;   /* note : start can be NULL if malloc fails ! */\n        buffer.capacity = (start==NULL) ? 0 : bSize;\n        if (start==NULL) {\n            DEBUGLOG(5, \"ZSTDMT_getBuffer: buffer allocation failure !!\");\n        } else {\n            DEBUGLOG(5, \"ZSTDMT_getBuffer: created buffer of size %u\", (U32)bSize);\n        }\n        return buffer;\n    }\n}\n\n#if ZSTD_RESIZE_SEQPOOL\n/** ZSTDMT_resizeBuffer() :\n * assumption : bufPool must be valid\n * @return : a buffer that is at least the buffer pool buffer size.\n *           If a reallocation happens, the data in the input buffer is copied.\n */\nstatic buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)\n{\n    size_t const bSize = bufPool->bufferSize;\n    if (buffer.capacity < bSize) {\n        void* const start = ZSTD_malloc(bSize, bufPool->cMem);\n        buffer_t newBuffer;\n        newBuffer.start = start;\n        newBuffer.capacity = start == NULL ? 0 : bSize;\n        if (start != NULL) {\n            assert(newBuffer.capacity >= buffer.capacity);\n            memcpy(newBuffer.start, buffer.start, buffer.capacity);\n            DEBUGLOG(5, \"ZSTDMT_resizeBuffer: created buffer of size %u\", (U32)bSize);\n            return newBuffer;\n        }\n        DEBUGLOG(5, \"ZSTDMT_resizeBuffer: buffer allocation failure !!\");\n    }\n    return buffer;\n}\n#endif\n\n/* store buffer for later re-use, up to pool capacity */\nstatic void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)\n{\n    DEBUGLOG(5, \"ZSTDMT_releaseBuffer\");\n    if (buf.start == NULL) return;   /* compatible with release on NULL */\n    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);\n    if (bufPool->nbBuffers < bufPool->totalBuffers) {\n        bufPool->bTable[bufPool->nbBuffers++] = buf;  /* stored for later use */\n        DEBUGLOG(5, \"ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u\",\n                    (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));\n        ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n        return;\n    }\n    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);\n    /* Reached bufferPool capacity (should not happen) */\n    DEBUGLOG(5, \"ZSTDMT_releaseBuffer: pool capacity reached => freeing \");\n    ZSTD_free(buf.start, bufPool->cMem);\n}\n\n\n/* =====   Seq Pool Wrapper   ====== */\n\nstatic rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};\n\ntypedef ZSTDMT_bufferPool ZSTDMT_seqPool;\n\nstatic size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)\n{\n    return ZSTDMT_sizeof_bufferPool(seqPool);\n}\n\nstatic rawSeqStore_t bufferToSeq(buffer_t buffer)\n{\n    rawSeqStore_t seq = {NULL, 0, 0, 0};\n    seq.seq = (rawSeq*)buffer.start;\n    seq.capacity = buffer.capacity / sizeof(rawSeq);\n    return seq;\n}\n\nstatic buffer_t seqToBuffer(rawSeqStore_t seq)\n{\n    buffer_t buffer;\n    buffer.start = seq.seq;\n    buffer.capacity = seq.capacity * sizeof(rawSeq);\n    return buffer;\n}\n\nstatic rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)\n{\n    if (seqPool->bufferSize == 0) {\n        return kNullRawSeqStore;\n    }\n    return bufferToSeq(ZSTDMT_getBuffer(seqPool));\n}\n\n#if ZSTD_RESIZE_SEQPOOL\nstatic rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)\n{\n  return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));\n}\n#endif\n\nstatic void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)\n{\n  ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));\n}\n\nstatic void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)\n{\n  ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));\n}\n\nstatic ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)\n{\n    ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);\n    if (seqPool == NULL) return NULL;\n    ZSTDMT_setNbSeq(seqPool, 0);\n    return seqPool;\n}\n\nstatic void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)\n{\n    ZSTDMT_freeBufferPool(seqPool);\n}\n\nstatic ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)\n{\n    return ZSTDMT_expandBufferPool(pool, nbWorkers);\n}\n\n\n/* =====   CCtx Pool   ===== */\n/* a single CCtx Pool can be invoked from multiple threads in parallel */\n\ntypedef struct {\n    ZSTD_pthread_mutex_t poolMutex;\n    int totalCCtx;\n    int availCCtx;\n    ZSTD_customMem cMem;\n    ZSTD_CCtx* cctx[1];   /* variable size */\n} ZSTDMT_CCtxPool;\n\n/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */\nstatic void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)\n{\n    int cid;\n    for (cid=0; cid<pool->totalCCtx; cid++)\n        ZSTD_freeCCtx(pool->cctx[cid]);  /* note : compatible with free on NULL */\n    ZSTD_pthread_mutex_destroy(&pool->poolMutex);\n    ZSTD_free(pool, pool->cMem);\n}\n\n/* ZSTDMT_createCCtxPool() :\n * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */\nstatic ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,\n                                              ZSTD_customMem cMem)\n{\n    ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(\n        sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);\n    assert(nbWorkers > 0);\n    if (!cctxPool) return NULL;\n    if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {\n        ZSTD_free(cctxPool, cMem);\n        return NULL;\n    }\n    cctxPool->cMem = cMem;\n    cctxPool->totalCCtx = nbWorkers;\n    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */\n    cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);\n    if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }\n    DEBUGLOG(3, \"cctxPool created, with %u workers\", nbWorkers);\n    return cctxPool;\n}\n\nstatic ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,\n                                              int nbWorkers)\n{\n    if (srcPool==NULL) return NULL;\n    if (nbWorkers <= srcPool->totalCCtx) return srcPool;   /* good enough */\n    /* need a larger cctx pool */\n    {   ZSTD_customMem const cMem = srcPool->cMem;\n        ZSTDMT_freeCCtxPool(srcPool);\n        return ZSTDMT_createCCtxPool(nbWorkers, cMem);\n    }\n}\n\n/* only works during initialization phase, not during compression */\nstatic size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)\n{\n    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);\n    {   unsigned const nbWorkers = cctxPool->totalCCtx;\n        size_t const poolSize = sizeof(*cctxPool)\n                                + (nbWorkers-1) * sizeof(ZSTD_CCtx*);\n        unsigned u;\n        size_t totalCCtxSize = 0;\n        for (u=0; u<nbWorkers; u++) {\n            totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);\n        }\n        ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);\n        assert(nbWorkers > 0);\n        return poolSize + totalCCtxSize;\n    }\n}\n\nstatic ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)\n{\n    DEBUGLOG(5, \"ZSTDMT_getCCtx\");\n    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);\n    if (cctxPool->availCCtx) {\n        cctxPool->availCCtx--;\n        {   ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];\n            ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);\n            return cctx;\n    }   }\n    ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);\n    DEBUGLOG(5, \"create one more CCtx\");\n    return ZSTD_createCCtx_advanced(cctxPool->cMem);   /* note : can be NULL, when creation fails ! */\n}\n\nstatic void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)\n{\n    if (cctx==NULL) return;   /* compatibility with release on NULL */\n    ZSTD_pthread_mutex_lock(&pool->poolMutex);\n    if (pool->availCCtx < pool->totalCCtx)\n        pool->cctx[pool->availCCtx++] = cctx;\n    else {\n        /* pool overflow : should not happen, since totalCCtx==nbWorkers */\n        DEBUGLOG(4, \"CCtx pool overflow : free cctx\");\n        ZSTD_freeCCtx(cctx);\n    }\n    ZSTD_pthread_mutex_unlock(&pool->poolMutex);\n}\n\n/* ====   Serial State   ==== */\n\ntypedef struct {\n    void const* start;\n    size_t size;\n} range_t;\n\ntypedef struct {\n    /* All variables in the struct are protected by mutex. */\n    ZSTD_pthread_mutex_t mutex;\n    ZSTD_pthread_cond_t cond;\n    ZSTD_CCtx_params params;\n    ldmState_t ldmState;\n    XXH64_state_t xxhState;\n    unsigned nextJobID;\n    /* Protects ldmWindow.\n     * Must be acquired after the main mutex when acquiring both.\n     */\n    ZSTD_pthread_mutex_t ldmWindowMutex;\n    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */\n    ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */\n} serialState_t;\n\nstatic int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)\n{\n    /* Adjust parameters */\n    if (params.ldmParams.enableLdm) {\n        DEBUGLOG(4, \"LDM window size = %u KB\", (1U << params.cParams.windowLog) >> 10);\n        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);\n        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);\n        assert(params.ldmParams.hashRateLog < 32);\n        serialState->ldmState.hashPower =\n                ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);\n    } else {\n        memset(&params.ldmParams, 0, sizeof(params.ldmParams));\n    }\n    serialState->nextJobID = 0;\n    if (params.fParams.checksumFlag)\n        XXH64_reset(&serialState->xxhState, 0);\n    if (params.ldmParams.enableLdm) {\n        ZSTD_customMem cMem = params.customMem;\n        unsigned const hashLog = params.ldmParams.hashLog;\n        size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);\n        unsigned const bucketLog =\n            params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;\n        size_t const bucketSize = (size_t)1 << bucketLog;\n        unsigned const prevBucketLog =\n            serialState->params.ldmParams.hashLog -\n            serialState->params.ldmParams.bucketSizeLog;\n        /* Size the seq pool tables */\n        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));\n        /* Reset the window */\n        ZSTD_window_init(&serialState->ldmState.window);\n        serialState->ldmWindow = serialState->ldmState.window;\n        /* Resize tables and output space if necessary. */\n        if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {\n            ZSTD_free(serialState->ldmState.hashTable, cMem);\n            serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);\n        }\n        if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {\n            ZSTD_free(serialState->ldmState.bucketOffsets, cMem);\n            serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);\n        }\n        if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)\n            return 1;\n        /* Zero the tables */\n        memset(serialState->ldmState.hashTable, 0, hashSize);\n        memset(serialState->ldmState.bucketOffsets, 0, bucketSize);\n    }\n    serialState->params = params;\n    serialState->params.jobSize = (U32)jobSize;\n    return 0;\n}\n\nstatic int ZSTDMT_serialState_init(serialState_t* serialState)\n{\n    int initError = 0;\n    memset(serialState, 0, sizeof(*serialState));\n    initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);\n    initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);\n    initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);\n    initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);\n    return initError;\n}\n\nstatic void ZSTDMT_serialState_free(serialState_t* serialState)\n{\n    ZSTD_customMem cMem = serialState->params.customMem;\n    ZSTD_pthread_mutex_destroy(&serialState->mutex);\n    ZSTD_pthread_cond_destroy(&serialState->cond);\n    ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);\n    ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);\n    ZSTD_free(serialState->ldmState.hashTable, cMem);\n    ZSTD_free(serialState->ldmState.bucketOffsets, cMem);\n}\n\nstatic void ZSTDMT_serialState_update(serialState_t* serialState,\n                                      ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,\n                                      range_t src, unsigned jobID)\n{\n    /* Wait for our turn */\n    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);\n    while (serialState->nextJobID < jobID) {\n        DEBUGLOG(5, \"wait for serialState->cond\");\n        ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);\n    }\n    /* A future job may error and skip our job */\n    if (serialState->nextJobID == jobID) {\n        /* It is now our turn, do any processing necessary */\n        if (serialState->params.ldmParams.enableLdm) {\n            size_t error;\n            assert(seqStore.seq != NULL && seqStore.pos == 0 &&\n                   seqStore.size == 0 && seqStore.capacity > 0);\n            assert(src.size <= serialState->params.jobSize);\n            ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);\n            error = ZSTD_ldm_generateSequences(\n                &serialState->ldmState, &seqStore,\n                &serialState->params.ldmParams, src.start, src.size);\n            /* We provide a large enough buffer to never fail. */\n            assert(!ZSTD_isError(error)); (void)error;\n            /* Update ldmWindow to match the ldmState.window and signal the main\n             * thread if it is waiting for a buffer.\n             */\n            ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);\n            serialState->ldmWindow = serialState->ldmState.window;\n            ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);\n            ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);\n        }\n        if (serialState->params.fParams.checksumFlag && src.size > 0)\n            XXH64_update(&serialState->xxhState, src.start, src.size);\n    }\n    /* Now it is the next jobs turn */\n    serialState->nextJobID++;\n    ZSTD_pthread_cond_broadcast(&serialState->cond);\n    ZSTD_pthread_mutex_unlock(&serialState->mutex);\n\n    if (seqStore.size > 0) {\n        size_t const err = ZSTD_referenceExternalSequences(\n            jobCCtx, seqStore.seq, seqStore.size);\n        assert(serialState->params.ldmParams.enableLdm);\n        assert(!ZSTD_isError(err));\n        (void)err;\n    }\n}\n\nstatic void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,\n                                              unsigned jobID, size_t cSize)\n{\n    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);\n    if (serialState->nextJobID <= jobID) {\n        assert(ZSTD_isError(cSize)); (void)cSize;\n        DEBUGLOG(5, \"Skipping past job %u because of error\", jobID);\n        serialState->nextJobID = jobID + 1;\n        ZSTD_pthread_cond_broadcast(&serialState->cond);\n\n        ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);\n        ZSTD_window_clear(&serialState->ldmWindow);\n        ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);\n        ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);\n    }\n    ZSTD_pthread_mutex_unlock(&serialState->mutex);\n\n}\n\n\n/* ------------------------------------------ */\n/* =====          Worker thread         ===== */\n/* ------------------------------------------ */\n\nstatic const range_t kNullRange = { NULL, 0 };\n\ntypedef struct {\n    size_t   consumed;                   /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */\n    size_t   cSize;                      /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */\n    ZSTD_pthread_mutex_t job_mutex;      /* Thread-safe - used by mtctx and worker */\n    ZSTD_pthread_cond_t job_cond;        /* Thread-safe - used by mtctx and worker */\n    ZSTDMT_CCtxPool* cctxPool;           /* Thread-safe - used by mtctx and (all) workers */\n    ZSTDMT_bufferPool* bufPool;          /* Thread-safe - used by mtctx and (all) workers */\n    ZSTDMT_seqPool* seqPool;             /* Thread-safe - used by mtctx and (all) workers */\n    serialState_t* serial;               /* Thread-safe - used by mtctx and (all) workers */\n    buffer_t dstBuff;                    /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */\n    range_t prefix;                      /* set by mtctx, then read by worker & mtctx => no barrier */\n    range_t src;                         /* set by mtctx, then read by worker & mtctx => no barrier */\n    unsigned jobID;                      /* set by mtctx, then read by worker => no barrier */\n    unsigned firstJob;                   /* set by mtctx, then read by worker => no barrier */\n    unsigned lastJob;                    /* set by mtctx, then read by worker => no barrier */\n    ZSTD_CCtx_params params;             /* set by mtctx, then read by worker => no barrier */\n    const ZSTD_CDict* cdict;             /* set by mtctx, then read by worker => no barrier */\n    unsigned long long fullFrameSize;    /* set by mtctx, then read by worker => no barrier */\n    size_t   dstFlushed;                 /* used only by mtctx */\n    unsigned frameChecksumNeeded;        /* used only by mtctx */\n} ZSTDMT_jobDescription;\n\n#define JOB_ERROR(e) {                          \\\n    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);   \\\n    job->cSize = e;                             \\\n    ZSTD_pthread_mutex_unlock(&job->job_mutex); \\\n    goto _endJob;                               \\\n}\n\n/* ZSTDMT_compressionJob() is a POOL_function type */\nstatic void ZSTDMT_compressionJob(void* jobDescription)\n{\n    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;\n    ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */\n    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);\n    rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);\n    buffer_t dstBuff = job->dstBuff;\n    size_t lastCBlockSize = 0;\n\n    /* resources */\n    if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));\n    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */\n        dstBuff = ZSTDMT_getBuffer(job->bufPool);\n        if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));\n        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */\n    }\n    if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)\n        JOB_ERROR(ERROR(memory_allocation));\n\n    /* Don't compute the checksum for chunks, since we compute it externally,\n     * but write it in the header.\n     */\n    if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;\n    /* Don't run LDM for the chunks, since we handle it externally */\n    jobParams.ldmParams.enableLdm = 0;\n\n\n    /* init */\n    if (job->cdict) {\n        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);\n        assert(job->firstJob);  /* only allowed for first job */\n        if (ZSTD_isError(initError)) JOB_ERROR(initError);\n    } else {  /* srcStart points at reloaded section */\n        U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;\n        {   size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);\n            if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);\n        }\n        {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,\n                                        job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in \"content-only\" mode (no header analysis) */\n                                        ZSTD_dtlm_fast,\n                                        NULL, /*cdict*/\n                                        &jobParams, pledgedSrcSize);\n            if (ZSTD_isError(initError)) JOB_ERROR(initError);\n    }   }\n\n    /* Perform serial step as early as possible, but after CCtx initialization */\n    ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);\n\n    if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */\n        size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);\n        if (ZSTD_isError(hSize)) JOB_ERROR(hSize);\n        DEBUGLOG(5, \"ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)\", (U32)hSize);\n        ZSTD_invalidateRepCodes(cctx);\n    }\n\n    /* compress */\n    {   size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;\n        int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);\n        const BYTE* ip = (const BYTE*) job->src.start;\n        BYTE* const ostart = (BYTE*)dstBuff.start;\n        BYTE* op = ostart;\n        BYTE* oend = op + dstBuff.capacity;\n        int chunkNb;\n        if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize);   /* check overflow */\n        DEBUGLOG(5, \"ZSTDMT_compressionJob: compress %u bytes in %i blocks\", (U32)job->src.size, nbChunks);\n        assert(job->cSize == 0);\n        for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {\n            size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);\n            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);\n            ip += chunkSize;\n            op += cSize; assert(op < oend);\n            /* stats */\n            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);\n            job->cSize += cSize;\n            job->consumed = chunkSize * chunkNb;\n            DEBUGLOG(5, \"ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)\",\n                        (U32)cSize, (U32)job->cSize);\n            ZSTD_pthread_cond_signal(&job->job_cond);   /* warns some more data is ready to be flushed */\n            ZSTD_pthread_mutex_unlock(&job->job_mutex);\n        }\n        /* last block */\n        assert(chunkSize > 0);\n        assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */\n        if ((nbChunks > 0) | job->lastJob /*must output a \"last block\" flag*/ ) {\n            size_t const lastBlockSize1 = job->src.size & (chunkSize-1);\n            size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;\n            size_t const cSize = (job->lastJob) ?\n                 ZSTD_compressEnd     (cctx, op, oend-op, ip, lastBlockSize) :\n                 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);\n            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);\n            lastCBlockSize = cSize;\n    }   }\n\n_endJob:\n    ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);\n    if (job->prefix.size > 0)\n        DEBUGLOG(5, \"Finished with prefix: %zx\", (size_t)job->prefix.start);\n    DEBUGLOG(5, \"Finished with source: %zx\", (size_t)job->src.start);\n    /* release resources */\n    ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);\n    ZSTDMT_releaseCCtx(job->cctxPool, cctx);\n    /* report */\n    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);\n    if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);\n    job->cSize += lastCBlockSize;\n    job->consumed = job->src.size;  /* when job->consumed == job->src.size , compression job is presumed completed */\n    ZSTD_pthread_cond_signal(&job->job_cond);\n    ZSTD_pthread_mutex_unlock(&job->job_mutex);\n}\n\n\n/* ------------------------------------------ */\n/* =====   Multi-threaded compression   ===== */\n/* ------------------------------------------ */\n\ntypedef struct {\n    range_t prefix;         /* read-only non-owned prefix buffer */\n    buffer_t buffer;\n    size_t filled;\n} inBuff_t;\n\ntypedef struct {\n  BYTE* buffer;     /* The round input buffer. All jobs get references\n                     * to pieces of the buffer. ZSTDMT_tryGetInputRange()\n                     * handles handing out job input buffers, and makes\n                     * sure it doesn't overlap with any pieces still in use.\n                     */\n  size_t capacity;  /* The capacity of buffer. */\n  size_t pos;       /* The position of the current inBuff in the round\n                     * buffer. Updated past the end if the inBuff once\n                     * the inBuff is sent to the worker thread.\n                     * pos <= capacity.\n                     */\n} roundBuff_t;\n\nstatic const roundBuff_t kNullRoundBuff = {NULL, 0, 0};\n\n#define RSYNC_LENGTH 32\n\ntypedef struct {\n  U64 hash;\n  U64 hitMask;\n  U64 primePower;\n} rsyncState_t;\n\nstruct ZSTDMT_CCtx_s {\n    POOL_ctx* factory;\n    ZSTDMT_jobDescription* jobs;\n    ZSTDMT_bufferPool* bufPool;\n    ZSTDMT_CCtxPool* cctxPool;\n    ZSTDMT_seqPool* seqPool;\n    ZSTD_CCtx_params params;\n    size_t targetSectionSize;\n    size_t targetPrefixSize;\n    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */\n    inBuff_t inBuff;\n    roundBuff_t roundBuff;\n    serialState_t serial;\n    rsyncState_t rsync;\n    unsigned singleBlockingThread;\n    unsigned jobIDMask;\n    unsigned doneJobID;\n    unsigned nextJobID;\n    unsigned frameEnded;\n    unsigned allJobsCompleted;\n    unsigned long long frameContentSize;\n    unsigned long long consumed;\n    unsigned long long produced;\n    ZSTD_customMem cMem;\n    ZSTD_CDict* cdictLocal;\n    const ZSTD_CDict* cdict;\n};\n\nstatic void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)\n{\n    U32 jobNb;\n    if (jobTable == NULL) return;\n    for (jobNb=0; jobNb<nbJobs; jobNb++) {\n        ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);\n        ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);\n    }\n    ZSTD_free(jobTable, cMem);\n}\n\n/* ZSTDMT_allocJobsTable()\n * allocate and init a job table.\n * update *nbJobsPtr to next power of 2 value, as size of table */\nstatic ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)\n{\n    U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;\n    U32 const nbJobs = 1 << nbJobsLog2;\n    U32 jobNb;\n    ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)\n                ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);\n    int initError = 0;\n    if (jobTable==NULL) return NULL;\n    *nbJobsPtr = nbJobs;\n    for (jobNb=0; jobNb<nbJobs; jobNb++) {\n        initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);\n        initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);\n    }\n    if (initError != 0) {\n        ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);\n        return NULL;\n    }\n    return jobTable;\n}\n\nstatic size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {\n    U32 nbJobs = nbWorkers + 2;\n    if (nbJobs > mtctx->jobIDMask+1) {  /* need more job capacity */\n        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);\n        mtctx->jobIDMask = 0;\n        mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);\n        if (mtctx->jobs==NULL) return ERROR(memory_allocation);\n        assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0));  /* ensure nbJobs is a power of 2 */\n        mtctx->jobIDMask = nbJobs - 1;\n    }\n    return 0;\n}\n\n\n/* ZSTDMT_CCtxParam_setNbWorkers():\n * Internal use only */\nsize_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)\n{\n    return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);\n}\n\nMEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)\n{\n    ZSTDMT_CCtx* mtctx;\n    U32 nbJobs = nbWorkers + 2;\n    int initError;\n    DEBUGLOG(3, \"ZSTDMT_createCCtx_advanced (nbWorkers = %u)\", nbWorkers);\n\n    if (nbWorkers < 1) return NULL;\n    nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);\n    if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))\n        /* invalid custom allocator */\n        return NULL;\n\n    mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);\n    if (!mtctx) return NULL;\n    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);\n    mtctx->cMem = cMem;\n    mtctx->allJobsCompleted = 1;\n    mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);\n    mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);\n    assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0);  /* ensure nbJobs is a power of 2 */\n    mtctx->jobIDMask = nbJobs - 1;\n    mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);\n    mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);\n    mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);\n    initError = ZSTDMT_serialState_init(&mtctx->serial);\n    mtctx->roundBuff = kNullRoundBuff;\n    if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {\n        ZSTDMT_freeCCtx(mtctx);\n        return NULL;\n    }\n    DEBUGLOG(3, \"mt_cctx created, for %u threads\", nbWorkers);\n    return mtctx;\n}\n\nZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)\n{\n#ifdef ZSTD_MULTITHREAD\n    return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);\n#else\n    (void)nbWorkers;\n    (void)cMem;\n    return NULL;\n#endif\n}\n\nZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)\n{\n    return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);\n}\n\n\n/* ZSTDMT_releaseAllJobResources() :\n * note : ensure all workers are killed first ! */\nstatic void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)\n{\n    unsigned jobID;\n    DEBUGLOG(3, \"ZSTDMT_releaseAllJobResources\");\n    for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {\n        /* Copy the mutex/cond out */\n        ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;\n        ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;\n\n        DEBUGLOG(4, \"job%02u: release dst address %08X\", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);\n        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);\n\n        /* Clear the job description, but keep the mutex/cond */\n        memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));\n        mtctx->jobs[jobID].job_mutex = mutex;\n        mtctx->jobs[jobID].job_cond = cond;\n    }\n    mtctx->inBuff.buffer = g_nullBuffer;\n    mtctx->inBuff.filled = 0;\n    mtctx->allJobsCompleted = 1;\n}\n\nstatic void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)\n{\n    DEBUGLOG(4, \"ZSTDMT_waitForAllJobsCompleted\");\n    while (mtctx->doneJobID < mtctx->nextJobID) {\n        unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;\n        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);\n        while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {\n            DEBUGLOG(4, \"waiting for jobCompleted signal from job %u\", mtctx->doneJobID);   /* we want to block when waiting for data to flush */\n            ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);\n        }\n        ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);\n        mtctx->doneJobID++;\n    }\n}\n\nsize_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)\n{\n    if (mtctx==NULL) return 0;   /* compatible with free on NULL */\n    POOL_free(mtctx->factory);   /* stop and free worker threads */\n    ZSTDMT_releaseAllJobResources(mtctx);  /* release job resources into pools first */\n    ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);\n    ZSTDMT_freeBufferPool(mtctx->bufPool);\n    ZSTDMT_freeCCtxPool(mtctx->cctxPool);\n    ZSTDMT_freeSeqPool(mtctx->seqPool);\n    ZSTDMT_serialState_free(&mtctx->serial);\n    ZSTD_freeCDict(mtctx->cdictLocal);\n    if (mtctx->roundBuff.buffer)\n        ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);\n    ZSTD_free(mtctx, mtctx->cMem);\n    return 0;\n}\n\nsize_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)\n{\n    if (mtctx == NULL) return 0;   /* supports sizeof NULL */\n    return sizeof(*mtctx)\n            + POOL_sizeof(mtctx->factory)\n            + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)\n            + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)\n            + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)\n            + ZSTDMT_sizeof_seqPool(mtctx->seqPool)\n            + ZSTD_sizeof_CDict(mtctx->cdictLocal)\n            + mtctx->roundBuff.capacity;\n}\n\n/* Internal only */\nsize_t\nZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,\n                                   ZSTDMT_parameter parameter,\n                                   int value)\n{\n    DEBUGLOG(4, \"ZSTDMT_CCtxParam_setMTCtxParameter\");\n    switch(parameter)\n    {\n    case ZSTDMT_p_jobSize :\n        DEBUGLOG(4, \"ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i\", value);\n        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);\n    case ZSTDMT_p_overlapLog :\n        DEBUGLOG(4, \"ZSTDMT_p_overlapLog : %i\", value);\n        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);\n    case ZSTDMT_p_rsyncable :\n        DEBUGLOG(4, \"ZSTD_p_rsyncable : %i\", value);\n        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);\n    default :\n        return ERROR(parameter_unsupported);\n    }\n}\n\nsize_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)\n{\n    DEBUGLOG(4, \"ZSTDMT_setMTCtxParameter\");\n    return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);\n}\n\nsize_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)\n{\n    switch (parameter) {\n    case ZSTDMT_p_jobSize:\n        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);\n    case ZSTDMT_p_overlapLog:\n        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);\n    case ZSTDMT_p_rsyncable:\n        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);\n    default:\n        return ERROR(parameter_unsupported);\n    }\n}\n\n/* Sets parameters relevant to the compression job,\n * initializing others to default values. */\nstatic ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)\n{\n    ZSTD_CCtx_params jobParams = *params;\n    /* Clear parameters related to multithreading */\n    jobParams.forceWindow = 0;\n    jobParams.nbWorkers = 0;\n    jobParams.jobSize = 0;\n    jobParams.overlapLog = 0;\n    jobParams.rsyncable = 0;\n    memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));\n    memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));\n    return jobParams;\n}\n\n\n/* ZSTDMT_resize() :\n * @return : error code if fails, 0 on success */\nstatic size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)\n{\n    if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);\n    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );\n    mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);\n    if (mtctx->bufPool == NULL) return ERROR(memory_allocation);\n    mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);\n    if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);\n    mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);\n    if (mtctx->seqPool == NULL) return ERROR(memory_allocation);\n    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);\n    return 0;\n}\n\n\n/*! ZSTDMT_updateCParams_whileCompressing() :\n *  Updates a selected set of compression parameters, remaining compatible with currently active frame.\n *  New parameters will be applied to next compression job. */\nvoid ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)\n{\n    U32 const saved_wlog = mtctx->params.cParams.windowLog;   /* Do not modify windowLog while compressing */\n    int const compressionLevel = cctxParams->compressionLevel;\n    DEBUGLOG(5, \"ZSTDMT_updateCParams_whileCompressing (level:%i)\",\n                compressionLevel);\n    mtctx->params.compressionLevel = compressionLevel;\n    {   ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n        cParams.windowLog = saved_wlog;\n        mtctx->params.cParams = cParams;\n    }\n}\n\n/* ZSTDMT_getFrameProgression():\n * tells how much data has been consumed (input) and produced (output) for current frame.\n * able to count progression inside worker threads.\n * Note : mutex will be acquired during statistics collection inside workers. */\nZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)\n{\n    ZSTD_frameProgression fps;\n    DEBUGLOG(5, \"ZSTDMT_getFrameProgression\");\n    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;\n    fps.consumed = mtctx->consumed;\n    fps.produced = fps.flushed = mtctx->produced;\n    fps.currentJobID = mtctx->nextJobID;\n    fps.nbActiveWorkers = 0;\n    {   unsigned jobNb;\n        unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);\n        DEBUGLOG(6, \"ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)\",\n                    mtctx->doneJobID, lastJobNb, mtctx->jobReady)\n        for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {\n            unsigned const wJobID = jobNb & mtctx->jobIDMask;\n            ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];\n            ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);\n            {   size_t const cResult = jobPtr->cSize;\n                size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;\n                size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;\n                assert(flushed <= produced);\n                fps.ingested += jobPtr->src.size;\n                fps.consumed += jobPtr->consumed;\n                fps.produced += produced;\n                fps.flushed  += flushed;\n                fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);\n            }\n            ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);\n        }\n    }\n    return fps;\n}\n\n\nsize_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)\n{\n    size_t toFlush;\n    unsigned const jobID = mtctx->doneJobID;\n    assert(jobID <= mtctx->nextJobID);\n    if (jobID == mtctx->nextJobID) return 0;   /* no active job => nothing to flush */\n\n    /* look into oldest non-fully-flushed job */\n    {   unsigned const wJobID = jobID & mtctx->jobIDMask;\n        ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];\n        ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);\n        {   size_t const cResult = jobPtr->cSize;\n            size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;\n            size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;\n            assert(flushed <= produced);\n            assert(jobPtr->consumed <= jobPtr->src.size);\n            toFlush = produced - flushed;\n            /* if toFlush==0, nothing is available to flush.\n             * However, jobID is expected to still be active:\n             * if jobID was already completed and fully flushed,\n             * ZSTDMT_flushProduced() should have already moved onto next job.\n             * Therefore, some input has not yet been consumed. */\n            if (toFlush==0) {\n                assert(jobPtr->consumed < jobPtr->src.size);\n            }\n        }\n        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);\n    }\n\n    return toFlush;\n}\n\n\n/* ------------------------------------------ */\n/* =====   Multi-threaded compression   ===== */\n/* ------------------------------------------ */\n\nstatic unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)\n{\n    unsigned jobLog;\n    if (params->ldmParams.enableLdm) {\n        /* In Long Range Mode, the windowLog is typically oversized.\n         * In which case, it's preferable to determine the jobSize\n         * based on chainLog instead. */\n        jobLog = MAX(21, params->cParams.chainLog + 4);\n    } else {\n        jobLog = MAX(20, params->cParams.windowLog + 2);\n    }\n    return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);\n}\n\nstatic int ZSTDMT_overlapLog_default(ZSTD_strategy strat)\n{\n    switch(strat)\n    {\n        case ZSTD_btultra2:\n            return 9;\n        case ZSTD_btultra:\n        case ZSTD_btopt:\n            return 8;\n        case ZSTD_btlazy2:\n        case ZSTD_lazy2:\n            return 7;\n        case ZSTD_lazy:\n        case ZSTD_greedy:\n        case ZSTD_dfast:\n        case ZSTD_fast:\n        default:;\n    }\n    return 6;\n}\n\nstatic int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)\n{\n    assert(0 <= ovlog && ovlog <= 9);\n    if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);\n    return ovlog;\n}\n\nstatic size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)\n{\n    int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);\n    int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);\n    assert(0 <= overlapRLog && overlapRLog <= 8);\n    if (params->ldmParams.enableLdm) {\n        /* In Long Range Mode, the windowLog is typically oversized.\n         * In which case, it's preferable to determine the jobSize\n         * based on chainLog instead.\n         * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */\n        ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)\n                - overlapRLog;\n    }\n    assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);\n    DEBUGLOG(4, \"overlapLog : %i\", params->overlapLog);\n    DEBUGLOG(4, \"overlap size : %i\", 1 << ovLog);\n    return (ovLog==0) ? 0 : (size_t)1 << ovLog;\n}\n\nstatic unsigned\nZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)\n{\n    assert(nbWorkers>0);\n    {   size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);\n        size_t const jobMaxSize = jobSizeTarget << 2;\n        size_t const passSizeMax = jobMaxSize * nbWorkers;\n        unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;\n        unsigned const nbJobsLarge = multiplier * nbWorkers;\n        unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;\n        unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);\n        return (multiplier>1) ? nbJobsLarge : nbJobsSmall;\n}   }\n\n/* ZSTDMT_compress_advanced_internal() :\n * This is a blocking function : it will only give back control to caller after finishing its compression job.\n */\nstatic size_t ZSTDMT_compress_advanced_internal(\n                ZSTDMT_CCtx* mtctx,\n                void* dst, size_t dstCapacity,\n          const void* src, size_t srcSize,\n          const ZSTD_CDict* cdict,\n                ZSTD_CCtx_params params)\n{\n    ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(&params);\n    size_t const overlapSize = ZSTDMT_computeOverlapSize(&params);\n    unsigned const nbJobs = ZSTDMT_computeNbJobs(&params, srcSize, params.nbWorkers);\n    size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;\n    size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize;   /* avoid too small last block */\n    const char* const srcStart = (const char*)src;\n    size_t remainingSrcSize = srcSize;\n    unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize));  /* presumes avgJobSize >= 256 KB, which should be the case */\n    size_t frameStartPos = 0, dstBufferPos = 0;\n    assert(jobParams.nbWorkers == 0);\n    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);\n\n    params.jobSize = (U32)avgJobSize;\n    DEBUGLOG(4, \"ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) \",\n                nbJobs, (U32)proposedJobSize, (U32)avgJobSize);\n\n    if ((nbJobs==1) | (params.nbWorkers<=1)) {   /* fallback to single-thread mode : this is a blocking invocation anyway */\n        ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];\n        DEBUGLOG(4, \"ZSTDMT_compress_advanced_internal: fallback to single-thread mode\");\n        if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);\n        return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);\n    }\n\n    assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */\n    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );\n    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))\n        return ERROR(memory_allocation);\n\n    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */\n\n    {   unsigned u;\n        for (u=0; u<nbJobs; u++) {\n            size_t const jobSize = MIN(remainingSrcSize, avgJobSize);\n            size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);\n            buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };\n            buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;\n            size_t dictSize = u ? overlapSize : 0;\n\n            mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;\n            mtctx->jobs[u].prefix.size = dictSize;\n            mtctx->jobs[u].src.start = srcStart + frameStartPos;\n            mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0);  /* avoid job.src.size == 0 */\n            mtctx->jobs[u].consumed = 0;\n            mtctx->jobs[u].cSize = 0;\n            mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;\n            mtctx->jobs[u].fullFrameSize = srcSize;\n            mtctx->jobs[u].params = jobParams;\n            /* do not calculate checksum within sections, but write it in header for first section */\n            mtctx->jobs[u].dstBuff = dstBuffer;\n            mtctx->jobs[u].cctxPool = mtctx->cctxPool;\n            mtctx->jobs[u].bufPool = mtctx->bufPool;\n            mtctx->jobs[u].seqPool = mtctx->seqPool;\n            mtctx->jobs[u].serial = &mtctx->serial;\n            mtctx->jobs[u].jobID = u;\n            mtctx->jobs[u].firstJob = (u==0);\n            mtctx->jobs[u].lastJob = (u==nbJobs-1);\n\n            DEBUGLOG(5, \"ZSTDMT_compress_advanced_internal: posting job %u  (%u bytes)\", u, (U32)jobSize);\n            DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);\n            POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);\n\n            frameStartPos += jobSize;\n            dstBufferPos += dstBufferCapacity;\n            remainingSrcSize -= jobSize;\n    }   }\n\n    /* collect result */\n    {   size_t error = 0, dstPos = 0;\n        unsigned jobID;\n        for (jobID=0; jobID<nbJobs; jobID++) {\n            DEBUGLOG(5, \"waiting for job %u \", jobID);\n            ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);\n            while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {\n                DEBUGLOG(5, \"waiting for jobCompleted signal from job %u\", jobID);\n                ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);\n            }\n            ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);\n            DEBUGLOG(5, \"ready to write job %u \", jobID);\n\n            {   size_t const cSize = mtctx->jobs[jobID].cSize;\n                if (ZSTD_isError(cSize)) error = cSize;\n                if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);\n                if (jobID) {   /* note : job 0 is written directly at dst, which is correct position */\n                    if (!error)\n                        memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize);  /* may overlap when job compressed within dst */\n                    if (jobID >= compressWithinDst) {  /* job compressed into its own buffer, which must be released */\n                        DEBUGLOG(5, \"releasing buffer %u>=%u\", jobID, compressWithinDst);\n                        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);\n                }   }\n                mtctx->jobs[jobID].dstBuff = g_nullBuffer;\n                mtctx->jobs[jobID].cSize = 0;\n                dstPos += cSize ;\n            }\n        }  /* for (jobID=0; jobID<nbJobs; jobID++) */\n\n        DEBUGLOG(4, \"checksumFlag : %u \", params.fParams.checksumFlag);\n        if (params.fParams.checksumFlag) {\n            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);\n            if (dstPos + 4 > dstCapacity) {\n                error = ERROR(dstSize_tooSmall);\n            } else {\n                DEBUGLOG(4, \"writing checksum : %08X \\n\", checksum);\n                MEM_writeLE32((char*)dst + dstPos, checksum);\n                dstPos += 4;\n        }   }\n\n        if (!error) DEBUGLOG(4, \"compressed size : %u  \", (U32)dstPos);\n        return error ? error : dstPos;\n    }\n}\n\nsize_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,\n                                void* dst, size_t dstCapacity,\n                          const void* src, size_t srcSize,\n                          const ZSTD_CDict* cdict,\n                                ZSTD_parameters params,\n                                int overlapLog)\n{\n    ZSTD_CCtx_params cctxParams = mtctx->params;\n    cctxParams.cParams = params.cParams;\n    cctxParams.fParams = params.fParams;\n    assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);\n    cctxParams.overlapLog = overlapLog;\n    return ZSTDMT_compress_advanced_internal(mtctx,\n                                             dst, dstCapacity,\n                                             src, srcSize,\n                                             cdict, cctxParams);\n}\n\n\nsize_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,\n                           void* dst, size_t dstCapacity,\n                     const void* src, size_t srcSize,\n                           int compressionLevel)\n{\n    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);\n    int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);\n    params.fParams.contentSizeFlag = 1;\n    return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);\n}\n\n\n/* ====================================== */\n/* =======      Streaming API     ======= */\n/* ====================================== */\n\nsize_t ZSTDMT_initCStream_internal(\n        ZSTDMT_CCtx* mtctx,\n        const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,\n        const ZSTD_CDict* cdict, ZSTD_CCtx_params params,\n        unsigned long long pledgedSrcSize)\n{\n    DEBUGLOG(4, \"ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)\",\n                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);\n\n    /* params supposed partially fully validated at this point */\n    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));\n    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */\n\n    /* init */\n    if (params.nbWorkers != mtctx->params.nbWorkers)\n        FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );\n\n    if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;\n    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;\n\n    mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */\n    if (mtctx->singleBlockingThread) {\n        ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(&params);\n        DEBUGLOG(5, \"ZSTDMT_initCStream_internal: switch to single blocking thread mode\");\n        assert(singleThreadParams.nbWorkers == 0);\n        return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],\n                                         dict, dictSize, cdict,\n                                         &singleThreadParams, pledgedSrcSize);\n    }\n\n    DEBUGLOG(4, \"ZSTDMT_initCStream_internal: %u workers\", params.nbWorkers);\n\n    if (mtctx->allJobsCompleted == 0) {   /* previous compression not correctly finished */\n        ZSTDMT_waitForAllJobsCompleted(mtctx);\n        ZSTDMT_releaseAllJobResources(mtctx);\n        mtctx->allJobsCompleted = 1;\n    }\n\n    mtctx->params = params;\n    mtctx->frameContentSize = pledgedSrcSize;\n    if (dict) {\n        ZSTD_freeCDict(mtctx->cdictLocal);\n        mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,\n                                                    ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */\n                                                    params.cParams, mtctx->cMem);\n        mtctx->cdict = mtctx->cdictLocal;\n        if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);\n    } else {\n        ZSTD_freeCDict(mtctx->cdictLocal);\n        mtctx->cdictLocal = NULL;\n        mtctx->cdict = cdict;\n    }\n\n    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);\n    DEBUGLOG(4, \"overlapLog=%i => %u KB\", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));\n    mtctx->targetSectionSize = params.jobSize;\n    if (mtctx->targetSectionSize == 0) {\n        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);\n    }\n    assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);\n\n    if (params.rsyncable) {\n        /* Aim for the targetsectionSize as the average job size. */\n        U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);\n        U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;\n        assert(jobSizeMB >= 1);\n        DEBUGLOG(4, \"rsyncLog = %u\", rsyncBits);\n        mtctx->rsync.hash = 0;\n        mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;\n        mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);\n    }\n    if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */\n    DEBUGLOG(4, \"Job Size : %u KB (note : set to %u)\", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);\n    DEBUGLOG(4, \"inBuff Size : %u KB\", (U32)(mtctx->targetSectionSize>>10));\n    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));\n    {\n        /* If ldm is enabled we need windowSize space. */\n        size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;\n        /* Two buffers of slack, plus extra space for the overlap\n         * This is the minimum slack that LDM works with. One extra because\n         * flush might waste up to targetSectionSize-1 bytes. Another extra\n         * for the overlap (if > 0), then one to fill which doesn't overlap\n         * with the LDM window.\n         */\n        size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);\n        size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;\n        /* Compute the total size, and always have enough slack */\n        size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);\n        size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;\n        size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;\n        if (mtctx->roundBuff.capacity < capacity) {\n            if (mtctx->roundBuff.buffer)\n                ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);\n            mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);\n            if (mtctx->roundBuff.buffer == NULL) {\n                mtctx->roundBuff.capacity = 0;\n                return ERROR(memory_allocation);\n            }\n            mtctx->roundBuff.capacity = capacity;\n        }\n    }\n    DEBUGLOG(4, \"roundBuff capacity : %u KB\", (U32)(mtctx->roundBuff.capacity>>10));\n    mtctx->roundBuff.pos = 0;\n    mtctx->inBuff.buffer = g_nullBuffer;\n    mtctx->inBuff.filled = 0;\n    mtctx->inBuff.prefix = kNullRange;\n    mtctx->doneJobID = 0;\n    mtctx->nextJobID = 0;\n    mtctx->frameEnded = 0;\n    mtctx->allJobsCompleted = 0;\n    mtctx->consumed = 0;\n    mtctx->produced = 0;\n    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))\n        return ERROR(memory_allocation);\n    return 0;\n}\n\nsize_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,\n                             const void* dict, size_t dictSize,\n                                   ZSTD_parameters params,\n                                   unsigned long long pledgedSrcSize)\n{\n    ZSTD_CCtx_params cctxParams = mtctx->params;  /* retrieve sticky params */\n    DEBUGLOG(4, \"ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)\", (U32)pledgedSrcSize);\n    cctxParams.cParams = params.cParams;\n    cctxParams.fParams = params.fParams;\n    return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,\n                                       cctxParams, pledgedSrcSize);\n}\n\nsize_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,\n                               const ZSTD_CDict* cdict,\n                                     ZSTD_frameParameters fParams,\n                                     unsigned long long pledgedSrcSize)\n{\n    ZSTD_CCtx_params cctxParams = mtctx->params;\n    if (cdict==NULL) return ERROR(dictionary_wrong);   /* method incompatible with NULL cdict */\n    cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);\n    cctxParams.fParams = fParams;\n    return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,\n                                       cctxParams, pledgedSrcSize);\n}\n\n\n/* ZSTDMT_resetCStream() :\n * pledgedSrcSize can be zero == unknown (for the time being)\n * prefer using ZSTD_CONTENTSIZE_UNKNOWN,\n * as `0` might mean \"empty\" in the future */\nsize_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)\n{\n    if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;\n    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,\n                                       pledgedSrcSize);\n}\n\nsize_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {\n    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);\n    ZSTD_CCtx_params cctxParams = mtctx->params;   /* retrieve sticky params */\n    DEBUGLOG(4, \"ZSTDMT_initCStream (cLevel=%i)\", compressionLevel);\n    cctxParams.cParams = params.cParams;\n    cctxParams.fParams = params.fParams;\n    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);\n}\n\n\n/* ZSTDMT_writeLastEmptyBlock()\n * Write a single empty block with an end-of-frame to finish a frame.\n * Job must be created from streaming variant.\n * This function is always successful if expected conditions are fulfilled.\n */\nstatic void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)\n{\n    assert(job->lastJob == 1);\n    assert(job->src.size == 0);   /* last job is empty -> will be simplified into a last empty block */\n    assert(job->firstJob == 0);   /* cannot be first job, as it also needs to create frame header */\n    assert(job->dstBuff.start == NULL);   /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */\n    job->dstBuff = ZSTDMT_getBuffer(job->bufPool);\n    if (job->dstBuff.start == NULL) {\n      job->cSize = ERROR(memory_allocation);\n      return;\n    }\n    assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize);   /* no buffer should ever be that small */\n    job->src = kNullRange;\n    job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);\n    assert(!ZSTD_isError(job->cSize));\n    assert(job->consumed == 0);\n}\n\nstatic size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)\n{\n    unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;\n    int const endFrame = (endOp == ZSTD_e_end);\n\n    if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {\n        DEBUGLOG(5, \"ZSTDMT_createCompressionJob: will not create new job : table is full\");\n        assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));\n        return 0;\n    }\n\n    if (!mtctx->jobReady) {\n        BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;\n        DEBUGLOG(5, \"ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload \",\n                    mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);\n        mtctx->jobs[jobID].src.start = src;\n        mtctx->jobs[jobID].src.size = srcSize;\n        assert(mtctx->inBuff.filled >= srcSize);\n        mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;\n        mtctx->jobs[jobID].consumed = 0;\n        mtctx->jobs[jobID].cSize = 0;\n        mtctx->jobs[jobID].params = mtctx->params;\n        mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;\n        mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;\n        mtctx->jobs[jobID].dstBuff = g_nullBuffer;\n        mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;\n        mtctx->jobs[jobID].bufPool = mtctx->bufPool;\n        mtctx->jobs[jobID].seqPool = mtctx->seqPool;\n        mtctx->jobs[jobID].serial = &mtctx->serial;\n        mtctx->jobs[jobID].jobID = mtctx->nextJobID;\n        mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);\n        mtctx->jobs[jobID].lastJob = endFrame;\n        mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);\n        mtctx->jobs[jobID].dstFlushed = 0;\n\n        /* Update the round buffer pos and clear the input buffer to be reset */\n        mtctx->roundBuff.pos += srcSize;\n        mtctx->inBuff.buffer = g_nullBuffer;\n        mtctx->inBuff.filled = 0;\n        /* Set the prefix */\n        if (!endFrame) {\n            size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);\n            mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;\n            mtctx->inBuff.prefix.size = newPrefixSize;\n        } else {   /* endFrame==1 => no need for another input buffer */\n            mtctx->inBuff.prefix = kNullRange;\n            mtctx->frameEnded = endFrame;\n            if (mtctx->nextJobID == 0) {\n                /* single job exception : checksum is already calculated directly within worker thread */\n                mtctx->params.fParams.checksumFlag = 0;\n        }   }\n\n        if ( (srcSize == 0)\n          && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {\n            DEBUGLOG(5, \"ZSTDMT_createCompressionJob: creating a last empty block to end frame\");\n            assert(endOp == ZSTD_e_end);  /* only possible case : need to end the frame with an empty last block */\n            ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);\n            mtctx->nextJobID++;\n            return 0;\n        }\n    }\n\n    DEBUGLOG(5, \"ZSTDMT_createCompressionJob: posting job %u : %u bytes  (end:%u, jobNb == %u (mod:%u))\",\n                mtctx->nextJobID,\n                (U32)mtctx->jobs[jobID].src.size,\n                mtctx->jobs[jobID].lastJob,\n                mtctx->nextJobID,\n                jobID);\n    if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {\n        mtctx->nextJobID++;\n        mtctx->jobReady = 0;\n    } else {\n        DEBUGLOG(5, \"ZSTDMT_createCompressionJob: no worker available for job %u\", mtctx->nextJobID);\n        mtctx->jobReady = 1;\n    }\n    return 0;\n}\n\n\n/*! ZSTDMT_flushProduced() :\n *  flush whatever data has been produced but not yet flushed in current job.\n *  move to next job if current one is fully flushed.\n * `output` : `pos` will be updated with amount of data flushed .\n * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .\n * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */\nstatic size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)\n{\n    unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;\n    DEBUGLOG(5, \"ZSTDMT_flushProduced (blocking:%u , job %u <= %u)\",\n                blockToFlush, mtctx->doneJobID, mtctx->nextJobID);\n    assert(output->size >= output->pos);\n\n    ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);\n    if (  blockToFlush\n      && (mtctx->doneJobID < mtctx->nextJobID) ) {\n        assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);\n        while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) {  /* nothing to flush */\n            if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {\n                DEBUGLOG(5, \"job %u is completely consumed (%u == %u) => don't wait for cond, there will be none\",\n                            mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);\n                break;\n            }\n            DEBUGLOG(5, \"waiting for something to flush from job %u (currently flushed: %u bytes)\",\n                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);\n            ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex);  /* block when nothing to flush but some to come */\n    }   }\n\n    /* try to flush something */\n    {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */\n        size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */\n        size_t const srcSize = mtctx->jobs[wJobID].src.size;       /* read-only, could be done after mutex lock, but no-declaration-after-statement */\n        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);\n        if (ZSTD_isError(cSize)) {\n            DEBUGLOG(5, \"ZSTDMT_flushProduced: job %u : compression error detected : %s\",\n                        mtctx->doneJobID, ZSTD_getErrorName(cSize));\n            ZSTDMT_waitForAllJobsCompleted(mtctx);\n            ZSTDMT_releaseAllJobResources(mtctx);\n            return cSize;\n        }\n        /* add frame checksum if necessary (can only happen once) */\n        assert(srcConsumed <= srcSize);\n        if ( (srcConsumed == srcSize)   /* job completed -> worker no longer active */\n          && mtctx->jobs[wJobID].frameChecksumNeeded ) {\n            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);\n            DEBUGLOG(4, \"ZSTDMT_flushProduced: writing checksum : %08X \\n\", checksum);\n            MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);\n            cSize += 4;\n            mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */\n            mtctx->jobs[wJobID].frameChecksumNeeded = 0;\n        }\n\n        if (cSize > 0) {   /* compression is ongoing or completed */\n            size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);\n            DEBUGLOG(5, \"ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)\",\n                        (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);\n            assert(mtctx->doneJobID < mtctx->nextJobID);\n            assert(cSize >= mtctx->jobs[wJobID].dstFlushed);\n            assert(mtctx->jobs[wJobID].dstBuff.start != NULL);\n            if (toFlush > 0) {\n                memcpy((char*)output->dst + output->pos,\n                    (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,\n                    toFlush);\n            }\n            output->pos += toFlush;\n            mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */\n\n            if ( (srcConsumed == srcSize)    /* job is completed */\n              && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */\n                DEBUGLOG(5, \"Job %u completed (%u bytes), moving to next one\",\n                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);\n                ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);\n                DEBUGLOG(5, \"dstBuffer released\");\n                mtctx->jobs[wJobID].dstBuff = g_nullBuffer;\n                mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered \"not started\" in future check */\n                mtctx->consumed += srcSize;\n                mtctx->produced += cSize;\n                mtctx->doneJobID++;\n        }   }\n\n        /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */\n        if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);\n        if (srcSize > srcConsumed) return 1;   /* current job not completely compressed */\n    }\n    if (mtctx->doneJobID < mtctx->nextJobID) return 1;   /* some more jobs ongoing */\n    if (mtctx->jobReady) return 1;      /* one job is ready to push, just not yet in the list */\n    if (mtctx->inBuff.filled > 0) return 1;   /* input is not empty, and still needs to be converted into a job */\n    mtctx->allJobsCompleted = mtctx->frameEnded;   /* all jobs are entirely flushed => if this one is last one, frame is completed */\n    if (end == ZSTD_e_end) return !mtctx->frameEnded;  /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */\n    return 0;   /* internal buffers fully flushed */\n}\n\n/**\n * Returns the range of data used by the earliest job that is not yet complete.\n * If the data of the first job is broken up into two segments, we cover both\n * sections.\n */\nstatic range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)\n{\n    unsigned const firstJobID = mtctx->doneJobID;\n    unsigned const lastJobID = mtctx->nextJobID;\n    unsigned jobID;\n\n    for (jobID = firstJobID; jobID < lastJobID; ++jobID) {\n        unsigned const wJobID = jobID & mtctx->jobIDMask;\n        size_t consumed;\n\n        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);\n        consumed = mtctx->jobs[wJobID].consumed;\n        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);\n\n        if (consumed < mtctx->jobs[wJobID].src.size) {\n            range_t range = mtctx->jobs[wJobID].prefix;\n            if (range.size == 0) {\n                /* Empty prefix */\n                range = mtctx->jobs[wJobID].src;\n            }\n            /* Job source in multiple segments not supported yet */\n            assert(range.start <= mtctx->jobs[wJobID].src.start);\n            return range;\n        }\n    }\n    return kNullRange;\n}\n\n/**\n * Returns non-zero iff buffer and range overlap.\n */\nstatic int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)\n{\n    BYTE const* const bufferStart = (BYTE const*)buffer.start;\n    BYTE const* const bufferEnd = bufferStart + buffer.capacity;\n    BYTE const* const rangeStart = (BYTE const*)range.start;\n    BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;\n\n    if (rangeStart == NULL || bufferStart == NULL)\n        return 0;\n    /* Empty ranges cannot overlap */\n    if (bufferStart == bufferEnd || rangeStart == rangeEnd)\n        return 0;\n\n    return bufferStart < rangeEnd && rangeStart < bufferEnd;\n}\n\nstatic int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)\n{\n    range_t extDict;\n    range_t prefix;\n\n    DEBUGLOG(5, \"ZSTDMT_doesOverlapWindow\");\n    extDict.start = window.dictBase + window.lowLimit;\n    extDict.size = window.dictLimit - window.lowLimit;\n\n    prefix.start = window.base + window.dictLimit;\n    prefix.size = window.nextSrc - (window.base + window.dictLimit);\n    DEBUGLOG(5, \"extDict [0x%zx, 0x%zx)\",\n                (size_t)extDict.start,\n                (size_t)extDict.start + extDict.size);\n    DEBUGLOG(5, \"prefix  [0x%zx, 0x%zx)\",\n                (size_t)prefix.start,\n                (size_t)prefix.start + prefix.size);\n\n    return ZSTDMT_isOverlapped(buffer, extDict)\n        || ZSTDMT_isOverlapped(buffer, prefix);\n}\n\nstatic void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)\n{\n    if (mtctx->params.ldmParams.enableLdm) {\n        ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;\n        DEBUGLOG(5, \"ZSTDMT_waitForLdmComplete\");\n        DEBUGLOG(5, \"source  [0x%zx, 0x%zx)\",\n                    (size_t)buffer.start,\n                    (size_t)buffer.start + buffer.capacity);\n        ZSTD_PTHREAD_MUTEX_LOCK(mutex);\n        while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {\n            DEBUGLOG(5, \"Waiting for LDM to finish...\");\n            ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);\n        }\n        DEBUGLOG(6, \"Done waiting for LDM to finish\");\n        ZSTD_pthread_mutex_unlock(mutex);\n    }\n}\n\n/**\n * Attempts to set the inBuff to the next section to fill.\n * If any part of the new section is still in use we give up.\n * Returns non-zero if the buffer is filled.\n */\nstatic int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)\n{\n    range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);\n    size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;\n    size_t const target = mtctx->targetSectionSize;\n    buffer_t buffer;\n\n    DEBUGLOG(5, \"ZSTDMT_tryGetInputRange\");\n    assert(mtctx->inBuff.buffer.start == NULL);\n    assert(mtctx->roundBuff.capacity >= target);\n\n    if (spaceLeft < target) {\n        /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.\n         * Simply copy the prefix to the beginning in that case.\n         */\n        BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;\n        size_t const prefixSize = mtctx->inBuff.prefix.size;\n\n        buffer.start = start;\n        buffer.capacity = prefixSize;\n        if (ZSTDMT_isOverlapped(buffer, inUse)) {\n            DEBUGLOG(5, \"Waiting for buffer...\");\n            return 0;\n        }\n        ZSTDMT_waitForLdmComplete(mtctx, buffer);\n        memmove(start, mtctx->inBuff.prefix.start, prefixSize);\n        mtctx->inBuff.prefix.start = start;\n        mtctx->roundBuff.pos = prefixSize;\n    }\n    buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;\n    buffer.capacity = target;\n\n    if (ZSTDMT_isOverlapped(buffer, inUse)) {\n        DEBUGLOG(5, \"Waiting for buffer...\");\n        return 0;\n    }\n    assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));\n\n    ZSTDMT_waitForLdmComplete(mtctx, buffer);\n\n    DEBUGLOG(5, \"Using prefix range [%zx, %zx)\",\n                (size_t)mtctx->inBuff.prefix.start,\n                (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);\n    DEBUGLOG(5, \"Using source range [%zx, %zx)\",\n                (size_t)buffer.start,\n                (size_t)buffer.start + buffer.capacity);\n\n\n    mtctx->inBuff.buffer = buffer;\n    mtctx->inBuff.filled = 0;\n    assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);\n    return 1;\n}\n\ntypedef struct {\n  size_t toLoad;  /* The number of bytes to load from the input. */\n  int flush;      /* Boolean declaring if we must flush because we found a synchronization point. */\n} syncPoint_t;\n\n/**\n * Searches through the input for a synchronization point. If one is found, we\n * will instruct the caller to flush, and return the number of bytes to load.\n * Otherwise, we will load as many bytes as possible and instruct the caller\n * to continue as normal.\n */\nstatic syncPoint_t\nfindSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)\n{\n    BYTE const* const istart = (BYTE const*)input.src + input.pos;\n    U64 const primePower = mtctx->rsync.primePower;\n    U64 const hitMask = mtctx->rsync.hitMask;\n\n    syncPoint_t syncPoint;\n    U64 hash;\n    BYTE const* prev;\n    size_t pos;\n\n    syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);\n    syncPoint.flush = 0;\n    if (!mtctx->params.rsyncable)\n        /* Rsync is disabled. */\n        return syncPoint;\n    if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)\n        /* Not enough to compute the hash.\n         * We will miss any synchronization points in this RSYNC_LENGTH byte\n         * window. However, since it depends only in the internal buffers, if the\n         * state is already synchronized, we will remain synchronized.\n         * Additionally, the probability that we miss a synchronization point is\n         * low: RSYNC_LENGTH / targetSectionSize.\n         */\n        return syncPoint;\n    /* Initialize the loop variables. */\n    if (mtctx->inBuff.filled >= RSYNC_LENGTH) {\n        /* We have enough bytes buffered to initialize the hash.\n         * Start scanning at the beginning of the input.\n         */\n        pos = 0;\n        prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;\n        hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);\n    } else {\n        /* We don't have enough bytes buffered to initialize the hash, but\n         * we know we have at least RSYNC_LENGTH bytes total.\n         * Start scanning after the first RSYNC_LENGTH bytes less the bytes\n         * already buffered.\n         */\n        pos = RSYNC_LENGTH - mtctx->inBuff.filled;\n        prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;\n        hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);\n        hash = ZSTD_rollingHash_append(hash, istart, pos);\n    }\n    /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll\n     * through the input. If we hit a synchronization point, then cut the\n     * job off, and tell the compressor to flush the job. Otherwise, load\n     * all the bytes and continue as normal.\n     * If we go too long without a synchronization point (targetSectionSize)\n     * then a block will be emitted anyways, but this is okay, since if we\n     * are already synchronized we will remain synchronized.\n     */\n    for (; pos < syncPoint.toLoad; ++pos) {\n        BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];\n        /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */\n        hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);\n        if ((hash & hitMask) == hitMask) {\n            syncPoint.toLoad = pos + 1;\n            syncPoint.flush = 1;\n            break;\n        }\n    }\n    return syncPoint;\n}\n\nsize_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)\n{\n    size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;\n    if (hintInSize==0) hintInSize = mtctx->targetSectionSize;\n    return hintInSize;\n}\n\n/** ZSTDMT_compressStream_generic() :\n *  internal use only - exposed to be invoked from zstd_compress.c\n *  assumption : output and input are valid (pos <= size)\n * @return : minimum amount of data remaining to flush, 0 if none */\nsize_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,\n                                     ZSTD_outBuffer* output,\n                                     ZSTD_inBuffer* input,\n                                     ZSTD_EndDirective endOp)\n{\n    unsigned forwardInputProgress = 0;\n    DEBUGLOG(5, \"ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)\",\n                (U32)endOp, (U32)(input->size - input->pos));\n    assert(output->pos <= output->size);\n    assert(input->pos  <= input->size);\n\n    if (mtctx->singleBlockingThread) {  /* delegate to single-thread (synchronous) */\n        return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);\n    }\n\n    if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {\n        /* current frame being ended. Only flush/end are allowed */\n        return ERROR(stage_wrong);\n    }\n\n    /* single-pass shortcut (note : synchronous-mode) */\n    if ( (!mtctx->params.rsyncable)   /* rsyncable mode is disabled */\n      && (mtctx->nextJobID == 0)      /* just started */\n      && (mtctx->inBuff.filled == 0)  /* nothing buffered */\n      && (!mtctx->jobReady)           /* no job already created */\n      && (endOp == ZSTD_e_end)        /* end order */\n      && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */\n        size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,\n                (char*)output->dst + output->pos, output->size - output->pos,\n                (const char*)input->src + input->pos, input->size - input->pos,\n                mtctx->cdict, mtctx->params);\n        if (ZSTD_isError(cSize)) return cSize;\n        input->pos = input->size;\n        output->pos += cSize;\n        mtctx->allJobsCompleted = 1;\n        mtctx->frameEnded = 1;\n        return 0;\n    }\n\n    /* fill input buffer */\n    if ( (!mtctx->jobReady)\n      && (input->size > input->pos) ) {   /* support NULL input */\n        if (mtctx->inBuff.buffer.start == NULL) {\n            assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */\n            if (!ZSTDMT_tryGetInputRange(mtctx)) {\n                /* It is only possible for this operation to fail if there are\n                 * still compression jobs ongoing.\n                 */\n                DEBUGLOG(5, \"ZSTDMT_tryGetInputRange failed\");\n                assert(mtctx->doneJobID != mtctx->nextJobID);\n            } else\n                DEBUGLOG(5, \"ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p\", mtctx->inBuff.buffer.start);\n        }\n        if (mtctx->inBuff.buffer.start != NULL) {\n            syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);\n            if (syncPoint.flush && endOp == ZSTD_e_continue) {\n                endOp = ZSTD_e_flush;\n            }\n            assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);\n            DEBUGLOG(5, \"ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u\",\n                        (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);\n            memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);\n            input->pos += syncPoint.toLoad;\n            mtctx->inBuff.filled += syncPoint.toLoad;\n            forwardInputProgress = syncPoint.toLoad>0;\n        }\n        if ((input->pos < input->size) && (endOp == ZSTD_e_end))\n            endOp = ZSTD_e_flush;   /* can't end now : not all input consumed */\n    }\n\n    if ( (mtctx->jobReady)\n      || (mtctx->inBuff.filled >= mtctx->targetSectionSize)  /* filled enough : let's compress */\n      || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0))  /* something to flush : let's go */\n      || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */\n        size_t const jobSize = mtctx->inBuff.filled;\n        assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);\n        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );\n    }\n\n    /* check for potential compressed data ready to be flushed */\n    {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */\n        if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */\n        DEBUGLOG(5, \"end of ZSTDMT_compressStream_generic: remainingToFlush = %u\", (U32)remainingToFlush);\n        return remainingToFlush;\n    }\n}\n\n\nsize_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)\n{\n    FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );\n\n    /* recommended next input size : fill current input buffer */\n    return mtctx->targetSectionSize - mtctx->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */\n}\n\n\nstatic size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)\n{\n    size_t const srcSize = mtctx->inBuff.filled;\n    DEBUGLOG(5, \"ZSTDMT_flushStream_internal\");\n\n    if ( mtctx->jobReady     /* one job ready for a worker to pick up */\n      || (srcSize > 0)       /* still some data within input buffer */\n      || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) {  /* need a last 0-size block to end frame */\n           DEBUGLOG(5, \"ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)\",\n                        (U32)srcSize, (U32)endFrame);\n        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );\n    }\n\n    /* check if there is any data available to flush */\n    return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);\n}\n\n\nsize_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)\n{\n    DEBUGLOG(5, \"ZSTDMT_flushStream\");\n    if (mtctx->singleBlockingThread)\n        return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);\n    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);\n}\n\nsize_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)\n{\n    DEBUGLOG(4, \"ZSTDMT_endStream\");\n    if (mtctx->singleBlockingThread)\n        return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);\n    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/compress/zstdmt_compress.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n #ifndef ZSTDMT_COMPRESS_H\n #define ZSTDMT_COMPRESS_H\n\n #if defined (__cplusplus)\n extern \"C\" {\n #endif\n\n\n/* Note : This is an internal API.\n *        These APIs used to be exposed with ZSTDLIB_API,\n *        because it used to be the only way to invoke MT compression.\n *        Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()\n *        instead.\n *\n *        If you depend on these APIs and can't switch, then define\n *        ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.\n *        However, we may completely remove these functions in a future\n *        release, so please switch soon.\n *\n *        This API requires ZSTD_MULTITHREAD to be defined during compilation,\n *        otherwise ZSTDMT_createCCtx*() will fail.\n */\n\n#ifdef ZSTD_LEGACY_MULTITHREADED_API\n#  define ZSTDMT_API ZSTDLIB_API\n#else\n#  define ZSTDMT_API\n#endif\n\n/* ===   Dependencies   === */\n#include <stddef.h>                /* size_t */\n#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */\n#include \"zstd.h\"            /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */\n\n\n/* ===   Constants   === */\n#ifndef ZSTDMT_NBWORKERS_MAX\n#  define ZSTDMT_NBWORKERS_MAX 200\n#endif\n#ifndef ZSTDMT_JOBSIZE_MIN\n#  define ZSTDMT_JOBSIZE_MIN (1 MB)\n#endif\n#define ZSTDMT_JOBLOG_MAX   (MEM_32bits() ? 29 : 30)\n#define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (1024 MB))\n\n\n/* ===   Memory management   === */\ntypedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;\n/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */\nZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);\n/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */\nZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,\n                                                    ZSTD_customMem cMem);\nZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);\n\nZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);\n\n\n/* ===   Simple one-pass compression function   === */\n\nZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,\n                                       void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize,\n                                       int compressionLevel);\n\n\n\n/* ===   Streaming functions   === */\n\nZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);\nZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean \"empty\" */\n\nZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);\nZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);\n\nZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */\nZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */\n\n\n/* ===   Advanced functions and parameters  === */\n\nZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,\n                                          void* dst, size_t dstCapacity,\n                                    const void* src, size_t srcSize,\n                                    const ZSTD_CDict* cdict,\n                                          ZSTD_parameters params,\n                                          int overlapLog);\n\nZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,\n                                        const void* dict, size_t dictSize,   /* dict can be released after init, a local copy is preserved within zcs */\n                                        ZSTD_parameters params,\n                                        unsigned long long pledgedSrcSize);  /* pledgedSrcSize is optional and can be zero == unknown */\n\nZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,\n                                        const ZSTD_CDict* cdict,\n                                        ZSTD_frameParameters fparams,\n                                        unsigned long long pledgedSrcSize);  /* note : zero means empty */\n\n/* ZSTDMT_parameter :\n * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */\ntypedef enum {\n    ZSTDMT_p_jobSize,     /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */\n    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a \"sticky\" parameter : its value will be re-used on next compression job */\n    ZSTDMT_p_rsyncable    /* Enables rsyncable mode. */\n} ZSTDMT_parameter;\n\n/* ZSTDMT_setMTCtxParameter() :\n * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.\n * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__\n * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.\n * @return : 0, or an error code (which can be tested using ZSTD_isError()) */\nZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);\n\n/* ZSTDMT_getMTCtxParameter() :\n * Query the ZSTDMT_CCtx for a parameter value.\n * @return : 0, or an error code (which can be tested using ZSTD_isError()) */\nZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);\n\n\n/*! ZSTDMT_compressStream_generic() :\n *  Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()\n *  depending on flush directive.\n * @return : minimum amount of data still to be flushed\n *           0 if fully flushed\n *           or an error code\n *  note : needs to be init using any ZSTD_initCStream*() variant */\nZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,\n                                                ZSTD_outBuffer* output,\n                                                ZSTD_inBuffer* input,\n                                                ZSTD_EndDirective endOp);\n\n\n/* ========================================================\n * ===  Private interface, for use by ZSTD_compress.c   ===\n * ===  Not exposed in libzstd. Never invoke directly   ===\n * ======================================================== */\n\n /*! ZSTDMT_toFlushNow()\n  *  Tell how many bytes are ready to be flushed immediately.\n  *  Probe the oldest active job (not yet entirely flushed) and check its output buffer.\n  *  If return 0, it means there is no active job,\n  *  or, it means oldest job is still active, but everything produced has been flushed so far,\n  *  therefore flushing is limited by speed of oldest job. */\nsize_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);\n\n/*! ZSTDMT_CCtxParam_setMTCtxParameter()\n *  like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */\nsize_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);\n\n/*! ZSTDMT_CCtxParam_setNbWorkers()\n *  Set nbWorkers, and clamp it.\n *  Also reset jobSize and overlapLog */\nsize_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);\n\n/*! ZSTDMT_updateCParams_whileCompressing() :\n *  Updates only a selected set of compression parameters, to remain compatible with current frame.\n *  New parameters will be applied to next compression job. */\nvoid ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);\n\n/*! ZSTDMT_getFrameProgression():\n *  tells how much data has been consumed (input) and produced (output) for current frame.\n *  able to count progression inside worker threads.\n */\nZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);\n\n\n/*! ZSTDMT_initCStream_internal() :\n *  Private use only. Init streaming operation.\n *  expects params to be valid.\n *  must receive dict, or cdict, or none, but not both.\n *  @return : 0, or an error code */\nsize_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,\n                    const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,\n                    const ZSTD_CDict* cdict,\n                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* ZSTDMT_COMPRESS_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/huf_decompress.c",
    "content": "/* ******************************************************************\n   huff0 huffman decoder,\n   part of Finite State Entropy library\n   Copyright (C) 2013-present, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n/* **************************************************************\n*  Dependencies\n****************************************************************/\n#include <string.h>     /* memcpy, memset */\n#include \"compiler.h\"\n#include \"bitstream.h\"  /* BIT_* */\n#include \"fse.h\"        /* to compress headers */\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"error_private.h\"\n\n/* **************************************************************\n*  Macros\n****************************************************************/\n\n/* These two optional macros force the use one way or another of the two\n * Huffman decompression implementations. You can't force in both directions\n * at the same time.\n */\n#if defined(HUF_FORCE_DECOMPRESS_X1) && \\\n    defined(HUF_FORCE_DECOMPRESS_X2)\n#error \"Cannot force the use of the X1 and X2 decoders at the same time!\"\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define HUF_isError ERR_isError\n#ifndef CHECK_F\n#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }\n#endif\n\n\n/* **************************************************************\n*  Byte alignment for workSpace management\n****************************************************************/\n#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)\n#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))\n\n\n/* **************************************************************\n*  BMI2 Variant Wrappers\n****************************************************************/\n#if DYNAMIC_BMI2\n\n#define HUF_DGEN(fn)                                                        \\\n                                                                            \\\n    static size_t fn##_default(                                             \\\n                  void* dst,  size_t dstSize,                               \\\n            const void* cSrc, size_t cSrcSize,                              \\\n            const HUF_DTable* DTable)                                       \\\n    {                                                                       \\\n        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \\\n    }                                                                       \\\n                                                                            \\\n    static TARGET_ATTRIBUTE(\"bmi2\") size_t fn##_bmi2(                       \\\n                  void* dst,  size_t dstSize,                               \\\n            const void* cSrc, size_t cSrcSize,                              \\\n            const HUF_DTable* DTable)                                       \\\n    {                                                                       \\\n        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \\\n    }                                                                       \\\n                                                                            \\\n    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \\\n                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \\\n    {                                                                       \\\n        if (bmi2) {                                                         \\\n            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \\\n        }                                                                   \\\n        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \\\n    }\n\n#else\n\n#define HUF_DGEN(fn)                                                        \\\n    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \\\n                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \\\n    {                                                                       \\\n        (void)bmi2;                                                         \\\n        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \\\n    }\n\n#endif\n\n\n/*-***************************/\n/*  generic DTableDesc       */\n/*-***************************/\ntypedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;\n\nstatic DTableDesc HUF_getDTableDesc(const HUF_DTable* table)\n{\n    DTableDesc dtd;\n    memcpy(&dtd, table, sizeof(dtd));\n    return dtd;\n}\n\n\n#ifndef HUF_FORCE_DECOMPRESS_X2\n\n/*-***************************/\n/*  single-symbol decoding   */\n/*-***************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */\n\nsize_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)\n{\n    U32 tableLog = 0;\n    U32 nbSymbols = 0;\n    size_t iSize;\n    void* const dtPtr = DTable + 1;\n    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;\n\n    U32* rankVal;\n    BYTE* huffWeight;\n    size_t spaceUsed32 = 0;\n\n    rankVal = (U32 *)workSpace + spaceUsed32;\n    spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;\n    huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);\n    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;\n\n    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);\n\n    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));\n    /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* Table header */\n    {   DTableDesc dtd = HUF_getDTableDesc(DTable);\n        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */\n        dtd.tableType = 0;\n        dtd.tableLog = (BYTE)tableLog;\n        memcpy(DTable, &dtd, sizeof(dtd));\n    }\n\n    /* Calculate starting value for each rank */\n    {   U32 n, nextRankStart = 0;\n        for (n=1; n<tableLog+1; n++) {\n            U32 const current = nextRankStart;\n            nextRankStart += (rankVal[n] << (n-1));\n            rankVal[n] = current;\n    }   }\n\n    /* fill DTable */\n    {   U32 n;\n        size_t const nEnd = nbSymbols;\n        for (n=0; n<nEnd; n++) {\n            size_t const w = huffWeight[n];\n            size_t const length = (1 << w) >> 1;\n            size_t const uStart = rankVal[w];\n            size_t const uEnd = uStart + length;\n            size_t u;\n            HUF_DEltX1 D;\n            D.byte = (BYTE)n;\n            D.nbBits = (BYTE)(tableLog + 1 - w);\n            rankVal[w] = (U32)uEnd;\n            if (length < 4) {\n                /* Use length in the loop bound so the compiler knows it is short. */\n                for (u = 0; u < length; ++u)\n                    dt[uStart + u] = D;\n            } else {\n                /* Unroll the loop 4 times, we know it is a power of 2. */\n                for (u = uStart; u < uEnd; u += 4) {\n                    dt[u + 0] = D;\n                    dt[u + 1] = D;\n                    dt[u + 2] = D;\n                    dt[u + 3] = D;\n    }   }   }   }\n    return iSize;\n}\n\nsize_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_readDTableX1_wksp(DTable, src, srcSize,\n                                 workSpace, sizeof(workSpace));\n}\n\nFORCE_INLINE_TEMPLATE BYTE\nHUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)\n{\n    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n    BYTE const c = dt[val].byte;\n    BIT_skipBits(Dstream, dt[val].nbBits);\n    return c;\n}\n\n#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \\\n    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \\\n    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \\\n        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)\n\n#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)\n\nHINT_INLINE size_t\nHUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {\n        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);\n    }\n\n    /* [0-3] symbols remaining */\n    if (MEM_32bits())\n        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))\n            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, no need to reload */\n    while (p < pEnd)\n        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\nFORCE_INLINE_TEMPLATE size_t\nHUF_decompress1X1_usingDTable_internal_body(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + dstSize;\n    const void* dtPtr = DTable + 1;\n    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;\n    BIT_DStream_t bitD;\n    DTableDesc const dtd = HUF_getDTableDesc(DTable);\n    U32 const dtLog = dtd.tableLog;\n\n    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );\n\n    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);\n\n    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    return dstSize;\n}\n\nFORCE_INLINE_TEMPLATE size_t\nHUF_decompress4X1_usingDTable_internal_body(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    /* Check */\n    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        BYTE* const olimit = oend - 3;\n        const void* const dtPtr = DTable + 1;\n        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        size_t const length1 = MEM_readLE16(istart);\n        size_t const length2 = MEM_readLE16(istart+2);\n        size_t const length3 = MEM_readLE16(istart+4);\n        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        DTableDesc const dtd = HUF_getDTableDesc(DTable);\n        U32 const dtLog = dtd.tableLog;\n        U32 endSignal = 1;\n\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );\n        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );\n        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );\n        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );\n\n        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */\n        for ( ; (endSignal) & (op4 < olimit) ; ) {\n            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);\n            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;\n            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;\n            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;\n            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;\n        }\n\n        /* check corruption */\n        /* note : should not be necessary : op# advance in lock step, and we control op4.\n         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n          if (!endCheck) return ERROR(corruption_detected); }\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\ntypedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,\n                                               const void *cSrc,\n                                               size_t cSrcSize,\n                                               const HUF_DTable *DTable);\n\nHUF_DGEN(HUF_decompress1X1_usingDTable_internal)\nHUF_DGEN(HUF_decompress4X1_usingDTable_internal)\n\n\n\nsize_t HUF_decompress1X1_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    DTableDesc dtd = HUF_getDTableDesc(DTable);\n    if (dtd.tableType != 0) return ERROR(GENERIC);\n    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n}\n\nsize_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);\n}\n\n\nsize_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,\n                              const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,\n                                       workSpace, sizeof(workSpace));\n}\n\nsize_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);\n    return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\nsize_t HUF_decompress4X1_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    DTableDesc dtd = HUF_getDTableDesc(DTable);\n    if (dtd.tableType != 0) return ERROR(GENERIC);\n    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n}\n\nstatic size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize, int bmi2)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,\n                                                workSpace, wkspSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);\n}\n\nsize_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize)\n{\n    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);\n}\n\n\nsize_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,\n                                       workSpace, sizeof(workSpace));\n}\nsize_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);\n    return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\n#endif /* HUF_FORCE_DECOMPRESS_X2 */\n\n\n#ifndef HUF_FORCE_DECOMPRESS_X1\n\n/* *************************/\n/* double-symbols decoding */\n/* *************************/\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\ntypedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];\ntypedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];\n\n\n/* HUF_fillDTableX2Level2() :\n * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */\nstatic void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUF_DEltX2 DElt;\n    U32 rankVal[HUF_TABLELOG_MAX + 1];\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1) {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */\n            const U32 symbol = sortedSymbols[s].symbol;\n            const U32 weight = sortedSymbols[s].weight;\n            const U32 nbBits = nbBitsBaseline - weight;\n            const U32 length = 1 << (sizeLog-nbBits);\n            const U32 start = rankVal[weight];\n            U32 i = start;\n            const U32 end = start + length;\n\n            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n            DElt.nbBits = (BYTE)(nbBits + consumed);\n            DElt.length = 2;\n            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n            rankVal[weight] += length;\n    }   }\n}\n\n\nstatic void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUF_TABLELOG_MAX + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++) {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        } else {\n            HUF_DEltX2 DElt;\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits = (BYTE)(nbBits);\n            DElt.length = 1;\n            {   U32 const end = start + length;\n                U32 u;\n                for (u = start; u < end; u++) DTable[u] = DElt;\n        }   }\n        rankVal[weight] += length;\n    }\n}\n\nsize_t HUF_readDTableX2_wksp(HUF_DTable* DTable,\n                       const void* src, size_t srcSize,\n                             void* workSpace, size_t wkspSize)\n{\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    DTableDesc dtd = HUF_getDTableDesc(DTable);\n    U32 const maxTableLog = dtd.maxTableLog;\n    size_t iSize;\n    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */\n    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;\n    U32 *rankStart;\n\n    rankValCol_t* rankVal;\n    U32* rankStats;\n    U32* rankStart0;\n    sortedSymbol_t* sortedSymbol;\n    BYTE* weightList;\n    size_t spaceUsed32 = 0;\n\n    rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);\n    spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;\n    rankStats = (U32 *)workSpace + spaceUsed32;\n    spaceUsed32 += HUF_TABLELOG_MAX + 1;\n    rankStart0 = (U32 *)workSpace + spaceUsed32;\n    spaceUsed32 += HUF_TABLELOG_MAX + 2;\n    sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);\n    spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;\n    weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);\n    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;\n\n    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);\n\n    rankStart = rankStart0 + 1;\n    memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));\n\n    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */\n    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);\n    /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */\n\n    /* Get start index of each weight */\n    {   U32 w, nextRankStart = 0;\n        for (w=1; w<maxW+1; w++) {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {   U32 s;\n        for (s=0; s<nbSymbols; s++) {\n            U32 const w = weightList[s];\n            U32 const r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {   U32* const rankVal0 = rankVal[0];\n        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */\n            U32 nextRankVal = 0;\n            U32 w;\n            for (w=1; w<maxW+1; w++) {\n                U32 current = nextRankVal;\n                nextRankVal += rankStats[w] << (w+rescale);\n                rankVal0[w] = current;\n        }   }\n        {   U32 const minBits = tableLog+1 - maxW;\n            U32 consumed;\n            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {\n                U32* const rankValPtr = rankVal[consumed];\n                U32 w;\n                for (w = 1; w < maxW+1; w++) {\n                    rankValPtr[w] = rankVal0[w] >> consumed;\n    }   }   }   }\n\n    HUF_fillDTableX2(dt, maxTableLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    dtd.tableLog = (BYTE)maxTableLog;\n    dtd.tableType = 1;\n    memcpy(DTable, &dtd, sizeof(dtd));\n    return iSize;\n}\n\nsize_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)\n{\n  U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n  return HUF_readDTableX2_wksp(DTable, src, srcSize,\n                               workSpace, sizeof(workSpace));\n}\n\n\nFORCE_INLINE_TEMPLATE U32\nHUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)\n{\n    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BIT_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nFORCE_INLINE_TEMPLATE U32\nHUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)\n{\n    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);\n    else {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {\n            BIT_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);\n    }   }\n    return 1;\n}\n\n#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \\\n        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)\n\nHINT_INLINE size_t\nHUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,\n                const HUF_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to end : up to 2 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\nFORCE_INLINE_TEMPLATE size_t\nHUF_decompress1X2_usingDTable_internal_body(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    BIT_DStream_t bitD;\n\n    /* Init */\n    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );\n\n    /* decode */\n    {   BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */\n        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;\n        DTableDesc const dtd = HUF_getDTableDesc(DTable);\n        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);\n    }\n\n    /* check */\n    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    /* decoded size */\n    return dstSize;\n}\n\nFORCE_INLINE_TEMPLATE size_t\nHUF_decompress4X2_usingDTable_internal_body(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        BYTE* const olimit = oend - (sizeof(size_t)-1);\n        const void* const dtPtr = DTable+1;\n        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        size_t const length1 = MEM_readLE16(istart);\n        size_t const length2 = MEM_readLE16(istart+2);\n        size_t const length3 = MEM_readLE16(istart+4);\n        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        size_t const segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal = 1;\n        DTableDesc const dtd = HUF_getDTableDesc(DTable);\n        U32 const dtLog = dtd.tableLog;\n\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );\n        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );\n        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );\n        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        for ( ; (endSignal) & (op4 < olimit); ) {\n#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);\n            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;\n            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);\n            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;\n            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;\n#else\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);\n            endSignal = LIKELY(\n                        (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)\n                      & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)\n                      & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)\n                      & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));\n#endif\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n          if (!endCheck) return ERROR(corruption_detected); }\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\nHUF_DGEN(HUF_decompress1X2_usingDTable_internal)\nHUF_DGEN(HUF_decompress4X2_usingDTable_internal)\n\nsize_t HUF_decompress1X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    DTableDesc dtd = HUF_getDTableDesc(DTable);\n    if (dtd.tableType != 1) return ERROR(GENERIC);\n    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n}\n\nsize_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,\n                                               workSpace, wkspSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);\n}\n\n\nsize_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,\n                              const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,\n                                       workSpace, sizeof(workSpace));\n}\n\nsize_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);\n    return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\nsize_t HUF_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUF_DTable* DTable)\n{\n    DTableDesc dtd = HUF_getDTableDesc(DTable);\n    if (dtd.tableType != 1) return ERROR(GENERIC);\n    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n}\n\nstatic size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize, int bmi2)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,\n                                         workSpace, wkspSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);\n}\n\nsize_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,\n                                   const void* cSrc, size_t cSrcSize,\n                                   void* workSpace, size_t wkspSize)\n{\n    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);\n}\n\n\nsize_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,\n                              const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,\n                                       workSpace, sizeof(workSpace));\n}\n\nsize_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);\n    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\n#endif /* HUF_FORCE_DECOMPRESS_X1 */\n\n\n/* ***********************************/\n/* Universal decompression selectors */\n/* ***********************************/\n\nsize_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,\n                                    const void* cSrc, size_t cSrcSize,\n                                    const HUF_DTable* DTable)\n{\n    DTableDesc const dtd = HUF_getDTableDesc(DTable);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n    (void)dtd;\n    assert(dtd.tableType == 0);\n    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n    (void)dtd;\n    assert(dtd.tableType == 1);\n    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#else\n    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :\n                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#endif\n}\n\nsize_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,\n                                    const void* cSrc, size_t cSrcSize,\n                                    const HUF_DTable* DTable)\n{\n    DTableDesc const dtd = HUF_getDTableDesc(DTable);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n    (void)dtd;\n    assert(dtd.tableType == 0);\n    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n    (void)dtd;\n    assert(dtd.tableType == 1);\n    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#else\n    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :\n                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);\n#endif\n}\n\n\n#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n#endif\n\n/** HUF_selectDecoder() :\n *  Tells which decoder is likely to decode faster,\n *  based on a set of pre-computed metrics.\n * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .\n *  Assumption : 0 < dstSize <= 128 KB */\nU32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)\n{\n    assert(dstSize > 0);\n    assert(dstSize <= 128*1024);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n    (void)dstSize;\n    (void)cSrcSize;\n    return 0;\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n    (void)dstSize;\n    (void)cSrcSize;\n    return 1;\n#else\n    /* decoder timing evaluation */\n    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */\n        U32 const D256 = (U32)(dstSize >> 8);\n        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);\n        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);\n        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */\n        return DTime1 < DTime0;\n    }\n#endif\n}\n\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nsize_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)\n    static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };\n#endif\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n        (void)algoNb;\n        assert(algoNb == 0);\n        return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n        (void)algoNb;\n        assert(algoNb == 1);\n        return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);\n#else\n        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n#endif\n    }\n}\n\nsize_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n        (void)algoNb;\n        assert(algoNb == 0);\n        return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n        (void)algoNb;\n        assert(algoNb == 1);\n        return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);\n#else\n        return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :\n                        HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;\n#endif\n    }\n}\n\nsize_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,\n                                         workSpace, sizeof(workSpace));\n}\n\n\nsize_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,\n                                     size_t dstSize, const void* cSrc,\n                                     size_t cSrcSize, void* workSpace,\n                                     size_t wkspSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize == 0) return ERROR(corruption_detected);\n\n    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n        (void)algoNb;\n        assert(algoNb == 0);\n        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n        (void)algoNb;\n        assert(algoNb == 1);\n        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);\n#else\n        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,\n                            cSrcSize, workSpace, wkspSize):\n                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);\n#endif\n    }\n}\n\nsize_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,\n                                  const void* cSrc, size_t cSrcSize,\n                                  void* workSpace, size_t wkspSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n        (void)algoNb;\n        assert(algoNb == 0);\n        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,\n                                cSrcSize, workSpace, wkspSize);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n        (void)algoNb;\n        assert(algoNb == 1);\n        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,\n                                cSrcSize, workSpace, wkspSize);\n#else\n        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,\n                                cSrcSize, workSpace, wkspSize):\n                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,\n                                cSrcSize, workSpace, wkspSize);\n#endif\n    }\n}\n\nsize_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,\n                             const void* cSrc, size_t cSrcSize)\n{\n    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];\n    return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,\n                                      workSpace, sizeof(workSpace));\n}\n\n\nsize_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)\n{\n    DTableDesc const dtd = HUF_getDTableDesc(DTable);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n    (void)dtd;\n    assert(dtd.tableType == 0);\n    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n    (void)dtd;\n    assert(dtd.tableType == 1);\n    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#else\n    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :\n                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#endif\n}\n\n#ifndef HUF_FORCE_DECOMPRESS_X2\nsize_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);\n}\n#endif\n\nsize_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)\n{\n    DTableDesc const dtd = HUF_getDTableDesc(DTable);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n    (void)dtd;\n    assert(dtd.tableType == 0);\n    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n    (void)dtd;\n    assert(dtd.tableType == 1);\n    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#else\n    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :\n                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);\n#endif\n}\n\nsize_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize == 0) return ERROR(corruption_detected);\n\n    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);\n#if defined(HUF_FORCE_DECOMPRESS_X1)\n        (void)algoNb;\n        assert(algoNb == 0);\n        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);\n#elif defined(HUF_FORCE_DECOMPRESS_X2)\n        (void)algoNb;\n        assert(algoNb == 1);\n        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);\n#else\n        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :\n                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);\n#endif\n    }\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_ddict.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* zstd_ddict.c :\n * concentrates all logic that needs to know the internals of ZSTD_DDict object */\n\n/*-*******************************************************\n*  Dependencies\n*********************************************************/\n#include <string.h>      /* memcpy, memmove, memset */\n#include \"cpu.h\"         /* bmi2 */\n#include \"mem.h\"         /* low level memory routines */\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"zstd_decompress_internal.h\"\n#include \"zstd_ddict.h\"\n\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)\n#  include \"zstd_legacy.h\"\n#endif\n\n\n\n/*-*******************************************************\n*  Types\n*********************************************************/\nstruct ZSTD_DDict_s {\n    void* dictBuffer;\n    const void* dictContent;\n    size_t dictSize;\n    ZSTD_entropyDTables_t entropy;\n    U32 dictID;\n    U32 entropyPresent;\n    ZSTD_customMem cMem;\n};  /* typedef'd to ZSTD_DDict within \"zstd.h\" */\n\nconst void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)\n{\n    assert(ddict != NULL);\n    return ddict->dictContent;\n}\n\nsize_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)\n{\n    assert(ddict != NULL);\n    return ddict->dictSize;\n}\n\nvoid ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)\n{\n    DEBUGLOG(4, \"ZSTD_copyDDictParameters\");\n    assert(dctx != NULL);\n    assert(ddict != NULL);\n    dctx->dictID = ddict->dictID;\n    dctx->prefixStart = ddict->dictContent;\n    dctx->virtualStart = ddict->dictContent;\n    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;\n    dctx->previousDstEnd = dctx->dictEnd;\n    if (ddict->entropyPresent) {\n        dctx->litEntropy = 1;\n        dctx->fseEntropy = 1;\n        dctx->LLTptr = ddict->entropy.LLTable;\n        dctx->MLTptr = ddict->entropy.MLTable;\n        dctx->OFTptr = ddict->entropy.OFTable;\n        dctx->HUFptr = ddict->entropy.hufTable;\n        dctx->entropy.rep[0] = ddict->entropy.rep[0];\n        dctx->entropy.rep[1] = ddict->entropy.rep[1];\n        dctx->entropy.rep[2] = ddict->entropy.rep[2];\n    } else {\n        dctx->litEntropy = 0;\n        dctx->fseEntropy = 0;\n    }\n}\n\n\nstatic size_t\nZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,\n                           ZSTD_dictContentType_e dictContentType)\n{\n    ddict->dictID = 0;\n    ddict->entropyPresent = 0;\n    if (dictContentType == ZSTD_dct_rawContent) return 0;\n\n    if (ddict->dictSize < 8) {\n        if (dictContentType == ZSTD_dct_fullDict)\n            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */\n        return 0;   /* pure content mode */\n    }\n    {   U32 const magic = MEM_readLE32(ddict->dictContent);\n        if (magic != ZSTD_MAGIC_DICTIONARY) {\n            if (dictContentType == ZSTD_dct_fullDict)\n                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */\n            return 0;   /* pure content mode */\n        }\n    }\n    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);\n\n    /* load entropy tables */\n    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(\n            &ddict->entropy, ddict->dictContent, ddict->dictSize)),\n        dictionary_corrupted);\n    ddict->entropyPresent = 1;\n    return 0;\n}\n\n\nstatic size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,\n                                      const void* dict, size_t dictSize,\n                                      ZSTD_dictLoadMethod_e dictLoadMethod,\n                                      ZSTD_dictContentType_e dictContentType)\n{\n    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {\n        ddict->dictBuffer = NULL;\n        ddict->dictContent = dict;\n        if (!dict) dictSize = 0;\n    } else {\n        void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);\n        ddict->dictBuffer = internalBuffer;\n        ddict->dictContent = internalBuffer;\n        if (!internalBuffer) return ERROR(memory_allocation);\n        memcpy(internalBuffer, dict, dictSize);\n    }\n    ddict->dictSize = dictSize;\n    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */\n\n    /* parse dictionary content */\n    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );\n\n    return 0;\n}\n\nZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,\n                                      ZSTD_dictLoadMethod_e dictLoadMethod,\n                                      ZSTD_dictContentType_e dictContentType,\n                                      ZSTD_customMem customMem)\n{\n    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;\n\n    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);\n        if (ddict == NULL) return NULL;\n        ddict->cMem = customMem;\n        {   size_t const initResult = ZSTD_initDDict_internal(ddict,\n                                            dict, dictSize,\n                                            dictLoadMethod, dictContentType);\n            if (ZSTD_isError(initResult)) {\n                ZSTD_freeDDict(ddict);\n                return NULL;\n        }   }\n        return ddict;\n    }\n}\n\n/*! ZSTD_createDDict() :\n*   Create a digested dictionary, to start decompression without startup delay.\n*   `dict` content is copied inside DDict.\n*   Consequently, `dict` can be released after `ZSTD_DDict` creation */\nZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)\n{\n    ZSTD_customMem const allocator = { NULL, NULL, NULL };\n    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);\n}\n\n/*! ZSTD_createDDict_byReference() :\n *  Create a digested dictionary, to start decompression without startup delay.\n *  Dictionary content is simply referenced, it will be accessed during decompression.\n *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */\nZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)\n{\n    ZSTD_customMem const allocator = { NULL, NULL, NULL };\n    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);\n}\n\n\nconst ZSTD_DDict* ZSTD_initStaticDDict(\n                                void* sBuffer, size_t sBufferSize,\n                                const void* dict, size_t dictSize,\n                                ZSTD_dictLoadMethod_e dictLoadMethod,\n                                ZSTD_dictContentType_e dictContentType)\n{\n    size_t const neededSpace = sizeof(ZSTD_DDict)\n                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);\n    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;\n    assert(sBuffer != NULL);\n    assert(dict != NULL);\n    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */\n    if (sBufferSize < neededSpace) return NULL;\n    if (dictLoadMethod == ZSTD_dlm_byCopy) {\n        memcpy(ddict+1, dict, dictSize);  /* local copy */\n        dict = ddict+1;\n    }\n    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,\n                                              dict, dictSize,\n                                              ZSTD_dlm_byRef, dictContentType) ))\n        return NULL;\n    return ddict;\n}\n\n\nsize_t ZSTD_freeDDict(ZSTD_DDict* ddict)\n{\n    if (ddict==NULL) return 0;   /* support free on NULL */\n    {   ZSTD_customMem const cMem = ddict->cMem;\n        ZSTD_free(ddict->dictBuffer, cMem);\n        ZSTD_free(ddict, cMem);\n        return 0;\n    }\n}\n\n/*! ZSTD_estimateDDictSize() :\n *  Estimate amount of memory that will be needed to create a dictionary for decompression.\n *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */\nsize_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)\n{\n    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);\n}\n\nsize_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)\n{\n    if (ddict==NULL) return 0;   /* support sizeof on NULL */\n    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;\n}\n\n/*! ZSTD_getDictID_fromDDict() :\n *  Provides the dictID of the dictionary loaded into `ddict`.\n *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */\nunsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)\n{\n    if (ddict==NULL) return 0;\n    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_ddict.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n#ifndef ZSTD_DDICT_H\n#define ZSTD_DDICT_H\n\n/*-*******************************************************\n *  Dependencies\n *********************************************************/\n#include <stddef.h>   /* size_t */\n#include \"zstd.h\"     /* ZSTD_DDict, and several public functions */\n\n\n/*-*******************************************************\n *  Interface\n *********************************************************/\n\n/* note: several prototypes are already published in `zstd.h` :\n * ZSTD_createDDict()\n * ZSTD_createDDict_byReference()\n * ZSTD_createDDict_advanced()\n * ZSTD_freeDDict()\n * ZSTD_initStaticDDict()\n * ZSTD_sizeof_DDict()\n * ZSTD_estimateDDictSize()\n * ZSTD_getDictID_fromDict()\n */\n\nconst void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);\nsize_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);\n\nvoid ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);\n\n\n\n#endif /* ZSTD_DDICT_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_decompress.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n * HEAPMODE :\n * Select how default decompression function ZSTD_decompress() allocates its context,\n * on stack (0), or into heap (1, default; requires malloc()).\n * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.\n */\n#ifndef ZSTD_HEAPMODE\n#  define ZSTD_HEAPMODE 1\n#endif\n\n/*!\n*  LEGACY_SUPPORT :\n*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)\n*/\n#ifndef ZSTD_LEGACY_SUPPORT\n#  define ZSTD_LEGACY_SUPPORT 0\n#endif\n\n/*!\n *  MAXWINDOWSIZE_DEFAULT :\n *  maximum window size accepted by DStream __by default__.\n *  Frames requiring more memory will be rejected.\n *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().\n */\n#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT\n#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)\n#endif\n\n/*!\n *  NO_FORWARD_PROGRESS_MAX :\n *  maximum allowed nb of calls to ZSTD_decompressStream()\n *  without any forward progress\n *  (defined as: no byte read from input, and no byte flushed to output)\n *  before triggering an error.\n */\n#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX\n#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16\n#endif\n\n\n/*-*******************************************************\n*  Dependencies\n*********************************************************/\n#include <string.h>      /* memcpy, memmove, memset */\n#include \"cpu.h\"         /* bmi2 */\n#include \"mem.h\"         /* low level memory routines */\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"zstd_internal.h\"  /* blockProperties_t */\n#include \"zstd_decompress_internal.h\"   /* ZSTD_DCtx */\n#include \"zstd_ddict.h\"  /* ZSTD_DDictDictContent */\n#include \"zstd_decompress_block.h\"   /* ZSTD_decompressBlock_internal */\n\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)\n#  include \"zstd_legacy.h\"\n#endif\n\n\n/*-*************************************************************\n*   Context management\n***************************************************************/\nsize_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)\n{\n    if (dctx==NULL) return 0;   /* support sizeof NULL */\n    return sizeof(*dctx)\n           + ZSTD_sizeof_DDict(dctx->ddictLocal)\n           + dctx->inBuffSize + dctx->outBuffSize;\n}\n\nsize_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }\n\n\nstatic size_t ZSTD_startingInputLength(ZSTD_format_e format)\n{\n    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);\n    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */\n    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );\n    return startingInputLength;\n}\n\nstatic void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)\n{\n    dctx->format = ZSTD_f_zstd1;  /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */\n    dctx->staticSize  = 0;\n    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;\n    dctx->ddict       = NULL;\n    dctx->ddictLocal  = NULL;\n    dctx->dictEnd     = NULL;\n    dctx->ddictIsCold = 0;\n    dctx->dictUses = ZSTD_dont_use;\n    dctx->inBuff      = NULL;\n    dctx->inBuffSize  = 0;\n    dctx->outBuffSize = 0;\n    dctx->streamStage = zdss_init;\n    dctx->legacyContext = NULL;\n    dctx->previousLegacyVersion = 0;\n    dctx->noForwardProgress = 0;\n    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());\n}\n\nZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)\n{\n    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;\n\n    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */\n    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */\n\n    ZSTD_initDCtx_internal(dctx);\n    dctx->staticSize = workspaceSize;\n    dctx->inBuff = (char*)(dctx+1);\n    return dctx;\n}\n\nZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)\n{\n    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;\n\n    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);\n        if (!dctx) return NULL;\n        dctx->customMem = customMem;\n        ZSTD_initDCtx_internal(dctx);\n        return dctx;\n    }\n}\n\nZSTD_DCtx* ZSTD_createDCtx(void)\n{\n    DEBUGLOG(3, \"ZSTD_createDCtx\");\n    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);\n}\n\nstatic void ZSTD_clearDict(ZSTD_DCtx* dctx)\n{\n    ZSTD_freeDDict(dctx->ddictLocal);\n    dctx->ddictLocal = NULL;\n    dctx->ddict = NULL;\n    dctx->dictUses = ZSTD_dont_use;\n}\n\nsize_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)\n{\n    if (dctx==NULL) return 0;   /* support free on NULL */\n    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, \"not compatible with static DCtx\");\n    {   ZSTD_customMem const cMem = dctx->customMem;\n        ZSTD_clearDict(dctx);\n        ZSTD_free(dctx->inBuff, cMem);\n        dctx->inBuff = NULL;\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)\n        if (dctx->legacyContext)\n            ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);\n#endif\n        ZSTD_free(dctx, cMem);\n        return 0;\n    }\n}\n\n/* no longer useful */\nvoid ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)\n{\n    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);\n    memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */\n}\n\n\n/*-*************************************************************\n *   Frame header decoding\n ***************************************************************/\n\n/*! ZSTD_isFrame() :\n *  Tells if the content of `buffer` starts with a valid Frame Identifier.\n *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\n *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\n *  Note 3 : Skippable Frame Identifiers are considered valid. */\nunsigned ZSTD_isFrame(const void* buffer, size_t size)\n{\n    if (size < ZSTD_FRAMEIDSIZE) return 0;\n    {   U32 const magic = MEM_readLE32(buffer);\n        if (magic == ZSTD_MAGICNUMBER) return 1;\n        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;\n    }\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)\n    if (ZSTD_isLegacy(buffer, size)) return 1;\n#endif\n    return 0;\n}\n\n/** ZSTD_frameHeaderSize_internal() :\n *  srcSize must be large enough to reach header size fields.\n *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.\n * @return : size of the Frame Header\n *           or an error code, which can be tested with ZSTD_isError() */\nstatic size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)\n{\n    size_t const minInputSize = ZSTD_startingInputLength(format);\n    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);\n\n    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];\n        U32 const dictID= fhd & 3;\n        U32 const singleSegment = (fhd >> 5) & 1;\n        U32 const fcsId = fhd >> 6;\n        return minInputSize + !singleSegment\n             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]\n             + (singleSegment && !fcsId);\n    }\n}\n\n/** ZSTD_frameHeaderSize() :\n *  srcSize must be >= ZSTD_frameHeaderSize_prefix.\n * @return : size of the Frame Header,\n *           or an error code (if srcSize is too small) */\nsize_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)\n{\n    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);\n}\n\n\n/** ZSTD_getFrameHeader_advanced() :\n *  decode Frame Header, or require larger `srcSize`.\n *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless\n * @return : 0, `zfhPtr` is correctly filled,\n *          >0, `srcSize` is too small, value is wanted `srcSize` amount,\n *           or an error code, which can be tested using ZSTD_isError() */\nsize_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t const minInputSize = ZSTD_startingInputLength(format);\n\n    memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */\n    if (srcSize < minInputSize) return minInputSize;\n    RETURN_ERROR_IF(src==NULL, GENERIC, \"invalid parameter\");\n\n    if ( (format != ZSTD_f_zstd1_magicless)\n      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {\n        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {\n            /* skippable frame */\n            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)\n                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */\n            memset(zfhPtr, 0, sizeof(*zfhPtr));\n            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);\n            zfhPtr->frameType = ZSTD_skippableFrame;\n            return 0;\n        }\n        RETURN_ERROR(prefix_unknown);\n    }\n\n    /* ensure there is enough `srcSize` to fully read/decode frame header */\n    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);\n        if (srcSize < fhsize) return fhsize;\n        zfhPtr->headerSize = (U32)fhsize;\n    }\n\n    {   BYTE const fhdByte = ip[minInputSize-1];\n        size_t pos = minInputSize;\n        U32 const dictIDSizeCode = fhdByte&3;\n        U32 const checksumFlag = (fhdByte>>2)&1;\n        U32 const singleSegment = (fhdByte>>5)&1;\n        U32 const fcsID = fhdByte>>6;\n        U64 windowSize = 0;\n        U32 dictID = 0;\n        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;\n        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,\n                        \"reserved bits, must be zero\");\n\n        if (!singleSegment) {\n            BYTE const wlByte = ip[pos++];\n            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;\n            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);\n            windowSize = (1ULL << windowLog);\n            windowSize += (windowSize >> 3) * (wlByte&7);\n        }\n        switch(dictIDSizeCode)\n        {\n            default: assert(0);  /* impossible */\n            case 0 : break;\n            case 1 : dictID = ip[pos]; pos++; break;\n            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;\n            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;\n        }\n        switch(fcsID)\n        {\n            default: assert(0);  /* impossible */\n            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;\n            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;\n            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;\n            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;\n        }\n        if (singleSegment) windowSize = frameContentSize;\n\n        zfhPtr->frameType = ZSTD_frame;\n        zfhPtr->frameContentSize = frameContentSize;\n        zfhPtr->windowSize = windowSize;\n        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);\n        zfhPtr->dictID = dictID;\n        zfhPtr->checksumFlag = checksumFlag;\n    }\n    return 0;\n}\n\n/** ZSTD_getFrameHeader() :\n *  decode Frame Header, or require larger `srcSize`.\n *  note : this function does not consume input, it only reads it.\n * @return : 0, `zfhPtr` is correctly filled,\n *          >0, `srcSize` is too small, value is wanted `srcSize` amount,\n *           or an error code, which can be tested using ZSTD_isError() */\nsize_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)\n{\n    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);\n}\n\n\n/** ZSTD_getFrameContentSize() :\n *  compatible with legacy mode\n * @return : decompressed size of the single frame pointed to be `src` if known, otherwise\n *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined\n *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */\nunsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)\n{\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)\n    if (ZSTD_isLegacy(src, srcSize)) {\n        unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);\n        return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;\n    }\n#endif\n    {   ZSTD_frameHeader zfh;\n        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)\n            return ZSTD_CONTENTSIZE_ERROR;\n        if (zfh.frameType == ZSTD_skippableFrame) {\n            return 0;\n        } else {\n            return zfh.frameContentSize;\n    }   }\n}\n\nstatic size_t readSkippableFrameSize(void const* src, size_t srcSize)\n{\n    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;\n    U32 sizeU32;\n\n    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);\n\n    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);\n    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,\n                    frameParameter_unsupported);\n    {\n        size_t const skippableSize = skippableHeaderSize + sizeU32;\n        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong);\n        return skippableSize;\n    }\n}\n\n/** ZSTD_findDecompressedSize() :\n *  compatible with legacy mode\n *  `srcSize` must be the exact length of some number of ZSTD compressed and/or\n *      skippable frames\n *  @return : decompressed size of the frames contained */\nunsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)\n{\n    unsigned long long totalDstSize = 0;\n\n    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {\n        U32 const magicNumber = MEM_readLE32(src);\n\n        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {\n            size_t const skippableSize = readSkippableFrameSize(src, srcSize);\n            if (ZSTD_isError(skippableSize)) {\n                return ZSTD_CONTENTSIZE_ERROR;\n            }\n            assert(skippableSize <= srcSize);\n\n            src = (const BYTE *)src + skippableSize;\n            srcSize -= skippableSize;\n            continue;\n        }\n\n        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);\n            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;\n\n            /* check for overflow */\n            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;\n            totalDstSize += ret;\n        }\n        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);\n            if (ZSTD_isError(frameSrcSize)) {\n                return ZSTD_CONTENTSIZE_ERROR;\n            }\n\n            src = (const BYTE *)src + frameSrcSize;\n            srcSize -= frameSrcSize;\n        }\n    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */\n\n    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;\n\n    return totalDstSize;\n}\n\n/** ZSTD_getDecompressedSize() :\n *  compatible with legacy mode\n * @return : decompressed size if known, 0 otherwise\n             note : 0 can mean any of the following :\n                   - frame content is empty\n                   - decompressed size field is not present in frame header\n                   - frame header unknown / not supported\n                   - frame header not complete (`srcSize` too small) */\nunsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)\n{\n    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);\n    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);\n    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;\n}\n\n\n/** ZSTD_decodeFrameHeader() :\n * `headerSize` must be the size provided by ZSTD_frameHeaderSize().\n * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */\nstatic size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)\n{\n    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);\n    if (ZSTD_isError(result)) return result;    /* invalid header */\n    RETURN_ERROR_IF(result>0, srcSize_wrong, \"headerSize too small\");\n#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION\n    /* Skip the dictID check in fuzzing mode, because it makes the search\n     * harder.\n     */\n    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),\n                    dictionary_wrong);\n#endif\n    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);\n    return 0;\n}\n\nstatic ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)\n{\n    ZSTD_frameSizeInfo frameSizeInfo;\n    frameSizeInfo.compressedSize = ret;\n    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;\n    return frameSizeInfo;\n}\n\nstatic ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)\n{\n    ZSTD_frameSizeInfo frameSizeInfo;\n    memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));\n\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)\n    if (ZSTD_isLegacy(src, srcSize))\n        return ZSTD_findFrameSizeInfoLegacy(src, srcSize);\n#endif\n\n    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)\n        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {\n        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);\n        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||\n               frameSizeInfo.compressedSize <= srcSize);\n        return frameSizeInfo;\n    } else {\n        const BYTE* ip = (const BYTE*)src;\n        const BYTE* const ipstart = ip;\n        size_t remainingSize = srcSize;\n        size_t nbBlocks = 0;\n        ZSTD_frameHeader zfh;\n\n        /* Extract Frame Header */\n        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);\n            if (ZSTD_isError(ret))\n                return ZSTD_errorFrameSizeInfo(ret);\n            if (ret > 0)\n                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));\n        }\n\n        ip += zfh.headerSize;\n        remainingSize -= zfh.headerSize;\n\n        /* Iterate over each block */\n        while (1) {\n            blockProperties_t blockProperties;\n            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);\n            if (ZSTD_isError(cBlockSize))\n                return ZSTD_errorFrameSizeInfo(cBlockSize);\n\n            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)\n                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));\n\n            ip += ZSTD_blockHeaderSize + cBlockSize;\n            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;\n            nbBlocks++;\n\n            if (blockProperties.lastBlock) break;\n        }\n\n        /* Final frame content checksum */\n        if (zfh.checksumFlag) {\n            if (remainingSize < 4)\n                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));\n            ip += 4;\n        }\n\n        frameSizeInfo.compressedSize = ip - ipstart;\n        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)\n                                        ? zfh.frameContentSize\n                                        : nbBlocks * zfh.blockSizeMax;\n        return frameSizeInfo;\n    }\n}\n\n/** ZSTD_findFrameCompressedSize() :\n *  compatible with legacy mode\n *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame\n *  `srcSize` must be at least as large as the frame contained\n *  @return : the compressed size of the frame starting at `src` */\nsize_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)\n{\n    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);\n    return frameSizeInfo.compressedSize;\n}\n\n/** ZSTD_decompressBound() :\n *  compatible with legacy mode\n *  `src` must point to the start of a ZSTD frame or a skippeable frame\n *  `srcSize` must be at least as large as the frame contained\n *  @return : the maximum decompressed size of the compressed source\n */\nunsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)\n{\n    unsigned long long bound = 0;\n    /* Iterate over each frame */\n    while (srcSize > 0) {\n        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);\n        size_t const compressedSize = frameSizeInfo.compressedSize;\n        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;\n        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)\n            return ZSTD_CONTENTSIZE_ERROR;\n        assert(srcSize >= compressedSize);\n        src = (const BYTE*)src + compressedSize;\n        srcSize -= compressedSize;\n        bound += decompressedBound;\n    }\n    return bound;\n}\n\n\n/*-*************************************************************\n *   Frame decoding\n ***************************************************************/\n\n/** ZSTD_insertBlock() :\n *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */\nsize_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)\n{\n    DEBUGLOG(5, \"ZSTD_insertBlock: %u bytes\", (unsigned)blockSize);\n    ZSTD_checkContinuity(dctx, blockStart);\n    dctx->previousDstEnd = (const char*)blockStart + blockSize;\n    return blockSize;\n}\n\n\nstatic size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,\n                          const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_copyRawBlock\");\n    if (dst == NULL) {\n        if (srcSize == 0) return 0;\n        RETURN_ERROR(dstBuffer_null);\n    }\n    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\nstatic size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,\n                               BYTE b,\n                               size_t regenSize)\n{\n    if (dst == NULL) {\n        if (regenSize == 0) return 0;\n        RETURN_ERROR(dstBuffer_null);\n    }\n    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);\n    memset(dst, b, regenSize);\n    return regenSize;\n}\n\n\n/*! ZSTD_decompressFrame() :\n * @dctx must be properly initialized\n *  will update *srcPtr and *srcSizePtr,\n *  to make *srcPtr progress by one frame. */\nstatic size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,\n                                   void* dst, size_t dstCapacity,\n                             const void** srcPtr, size_t *srcSizePtr)\n{\n    const BYTE* ip = (const BYTE*)(*srcPtr);\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;\n    BYTE* op = ostart;\n    size_t remainingSrcSize = *srcSizePtr;\n\n    DEBUGLOG(4, \"ZSTD_decompressFrame (srcSize:%i)\", (int)*srcSizePtr);\n\n    /* check */\n    RETURN_ERROR_IF(\n        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,\n        srcSize_wrong);\n\n    /* Frame Header */\n    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(\n                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);\n        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;\n        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,\n                        srcSize_wrong);\n        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );\n        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1) {\n        size_t decodedSize;\n        blockProperties_t blockProperties;\n        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSrcSize -= ZSTD_blockHeaderSize;\n        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);\n            break;\n        case bt_raw :\n            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);\n            break;\n        case bt_reserved :\n        default:\n            RETURN_ERROR(corruption_detected);\n        }\n\n        if (ZSTD_isError(decodedSize)) return decodedSize;\n        if (dctx->fParams.checksumFlag)\n            XXH64_update(&dctx->xxhState, op, decodedSize);\n        if (decodedSize != 0)\n            op += decodedSize;\n        assert(ip != NULL);\n        ip += cBlockSize;\n        remainingSrcSize -= cBlockSize;\n        if (blockProperties.lastBlock) break;\n    }\n\n    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {\n        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,\n                        corruption_detected);\n    }\n    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */\n        U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);\n        U32 checkRead;\n        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);\n        checkRead = MEM_readLE32(ip);\n        RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);\n        ip += 4;\n        remainingSrcSize -= 4;\n    }\n\n    /* Allow caller to get size read */\n    *srcPtr = ip;\n    *srcSizePtr = remainingSrcSize;\n    return op-ostart;\n}\n\nstatic size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,\n                                        void* dst, size_t dstCapacity,\n                                  const void* src, size_t srcSize,\n                                  const void* dict, size_t dictSize,\n                                  const ZSTD_DDict* ddict)\n{\n    void* const dststart = dst;\n    int moreThan1Frame = 0;\n\n    DEBUGLOG(5, \"ZSTD_decompressMultiFrame\");\n    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */\n\n    if (ddict) {\n        dict = ZSTD_DDict_dictContent(ddict);\n        dictSize = ZSTD_DDict_dictSize(ddict);\n    }\n\n    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {\n\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)\n        if (ZSTD_isLegacy(src, srcSize)) {\n            size_t decodedSize;\n            size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);\n            if (ZSTD_isError(frameSize)) return frameSize;\n            RETURN_ERROR_IF(dctx->staticSize, memory_allocation,\n                \"legacy support is not compatible with static dctx\");\n\n            decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);\n            if (ZSTD_isError(decodedSize)) return decodedSize;\n\n            assert(decodedSize <=- dstCapacity);\n            dst = (BYTE*)dst + decodedSize;\n            dstCapacity -= decodedSize;\n\n            src = (const BYTE*)src + frameSize;\n            srcSize -= frameSize;\n\n            continue;\n        }\n#endif\n\n        {   U32 const magicNumber = MEM_readLE32(src);\n            DEBUGLOG(4, \"reading magic number %08X (expecting %08X)\",\n                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);\n            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {\n                size_t const skippableSize = readSkippableFrameSize(src, srcSize);\n                FORWARD_IF_ERROR(skippableSize);\n                assert(skippableSize <= srcSize);\n\n                src = (const BYTE *)src + skippableSize;\n                srcSize -= skippableSize;\n                continue;\n        }   }\n\n        if (ddict) {\n            /* we were called from ZSTD_decompress_usingDDict */\n            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));\n        } else {\n            /* this will initialize correctly with no dict if dict == NULL, so\n             * use this in all cases but ddict */\n            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));\n        }\n        ZSTD_checkContinuity(dctx, dst);\n\n        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,\n                                                    &src, &srcSize);\n            RETURN_ERROR_IF(\n                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)\n             && (moreThan1Frame==1),\n                srcSize_wrong,\n                \"at least one frame successfully completed, but following \"\n                \"bytes are garbage: it's more likely to be a srcSize error, \"\n                \"specifying more bytes than compressed size of frame(s). This \"\n                \"error message replaces ERROR(prefix_unknown), which would be \"\n                \"confusing, as the first header is actually correct. Note that \"\n                \"one could be unlucky, it might be a corruption error instead, \"\n                \"happening right at the place where we expect zstd magic \"\n                \"bytes. But this is _much_ less likely than a srcSize field \"\n                \"error.\");\n            if (ZSTD_isError(res)) return res;\n            assert(res <= dstCapacity);\n            if (res != 0)\n                dst = (BYTE*)dst + res;\n            dstCapacity -= res;\n        }\n        moreThan1Frame = 1;\n    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */\n\n    RETURN_ERROR_IF(srcSize, srcSize_wrong, \"input not entirely consumed\");\n\n    return (BYTE*)dst - (BYTE*)dststart;\n}\n\nsize_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,\n                                 void* dst, size_t dstCapacity,\n                           const void* src, size_t srcSize,\n                           const void* dict, size_t dictSize)\n{\n    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);\n}\n\n\nstatic ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)\n{\n    switch (dctx->dictUses) {\n    default:\n        assert(0 /* Impossible */);\n        /* fall-through */\n    case ZSTD_dont_use:\n        ZSTD_clearDict(dctx);\n        return NULL;\n    case ZSTD_use_indefinitely:\n        return dctx->ddict;\n    case ZSTD_use_once:\n        dctx->dictUses = ZSTD_dont_use;\n        return dctx->ddict;\n    }\n}\n\nsize_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));\n}\n\n\nsize_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)\n    size_t regenSize;\n    ZSTD_DCtx* const dctx = ZSTD_createDCtx();\n    RETURN_ERROR_IF(dctx==NULL, memory_allocation);\n    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);\n    ZSTD_freeDCtx(dctx);\n    return regenSize;\n#else   /* stack mode */\n    ZSTD_DCtx dctx;\n    ZSTD_initDCtx_internal(&dctx);\n    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);\n#endif\n}\n\n\n/*-**************************************\n*   Advanced Streaming Decompression API\n*   Bufferless and synchronous\n****************************************/\nsize_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }\n\n/**\n * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,\n * we allow taking a partial block as the input. Currently only raw uncompressed blocks can\n * be streamed.\n *\n * For blocks that can be streamed, this allows us to reduce the latency until we produce\n * output, and avoid copying the input.\n *\n * @param inputSize - The total amount of input that the caller currently has.\n */\nstatic size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {\n    if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))\n        return dctx->expected;\n    if (dctx->bType != bt_raw)\n        return dctx->expected;\n    return MIN(MAX(inputSize, 1), dctx->expected);\n}\n\nZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {\n    switch(dctx->stage)\n    {\n    default:   /* should not happen */\n        assert(0);\n    case ZSTDds_getFrameHeaderSize:\n    case ZSTDds_decodeFrameHeader:\n        return ZSTDnit_frameHeader;\n    case ZSTDds_decodeBlockHeader:\n        return ZSTDnit_blockHeader;\n    case ZSTDds_decompressBlock:\n        return ZSTDnit_block;\n    case ZSTDds_decompressLastBlock:\n        return ZSTDnit_lastBlock;\n    case ZSTDds_checkChecksum:\n        return ZSTDnit_checksum;\n    case ZSTDds_decodeSkippableHeader:\n    case ZSTDds_skipFrame:\n        return ZSTDnit_skippableFrame;\n    }\n}\n\nstatic int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }\n\n/** ZSTD_decompressContinue() :\n *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())\n *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)\n *            or an error code, which can be tested using ZSTD_isError() */\nsize_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    DEBUGLOG(5, \"ZSTD_decompressContinue (srcSize:%u)\", (unsigned)srcSize);\n    /* Sanity check */\n    RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, \"not allowed\");\n    if (dstCapacity) ZSTD_checkContinuity(dctx, dst);\n\n    switch (dctx->stage)\n    {\n    case ZSTDds_getFrameHeaderSize :\n        assert(src != NULL);\n        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */\n            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */\n            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */\n                memcpy(dctx->headerBuffer, src, srcSize);\n                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */\n                dctx->stage = ZSTDds_decodeSkippableHeader;\n                return 0;\n        }   }\n        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);\n        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;\n        memcpy(dctx->headerBuffer, src, srcSize);\n        dctx->expected = dctx->headerSize - srcSize;\n        dctx->stage = ZSTDds_decodeFrameHeader;\n        return 0;\n\n    case ZSTDds_decodeFrameHeader:\n        assert(src != NULL);\n        memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);\n        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));\n        dctx->expected = ZSTD_blockHeaderSize;\n        dctx->stage = ZSTDds_decodeBlockHeader;\n        return 0;\n\n    case ZSTDds_decodeBlockHeader:\n        {   blockProperties_t bp;\n            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);\n            if (ZSTD_isError(cBlockSize)) return cBlockSize;\n            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, \"Block Size Exceeds Maximum\");\n            dctx->expected = cBlockSize;\n            dctx->bType = bp.blockType;\n            dctx->rleSize = bp.origSize;\n            if (cBlockSize) {\n                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;\n                return 0;\n            }\n            /* empty block */\n            if (bp.lastBlock) {\n                if (dctx->fParams.checksumFlag) {\n                    dctx->expected = 4;\n                    dctx->stage = ZSTDds_checkChecksum;\n                } else {\n                    dctx->expected = 0; /* end of frame */\n                    dctx->stage = ZSTDds_getFrameHeaderSize;\n                }\n            } else {\n                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */\n                dctx->stage = ZSTDds_decodeBlockHeader;\n            }\n            return 0;\n        }\n\n    case ZSTDds_decompressLastBlock:\n    case ZSTDds_decompressBlock:\n        DEBUGLOG(5, \"ZSTD_decompressContinue: case ZSTDds_decompressBlock\");\n        {   size_t rSize;\n            switch(dctx->bType)\n            {\n            case bt_compressed:\n                DEBUGLOG(5, \"ZSTD_decompressContinue: case bt_compressed\");\n                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);\n                dctx->expected = 0;  /* Streaming not supported */\n                break;\n            case bt_raw :\n                assert(srcSize <= dctx->expected);\n                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);\n                FORWARD_IF_ERROR(rSize);\n                assert(rSize == srcSize);\n                dctx->expected -= rSize;\n                break;\n            case bt_rle :\n                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);\n                dctx->expected = 0;  /* Streaming not supported */\n                break;\n            case bt_reserved :   /* should never happen */\n            default:\n                RETURN_ERROR(corruption_detected);\n            }\n            FORWARD_IF_ERROR(rSize);\n            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, \"Decompressed Block Size Exceeds Maximum\");\n            DEBUGLOG(5, \"ZSTD_decompressContinue: decoded size from block : %u\", (unsigned)rSize);\n            dctx->decodedSize += rSize;\n            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);\n            dctx->previousDstEnd = (char*)dst + rSize;\n\n            /* Stay on the same stage until we are finished streaming the block. */\n            if (dctx->expected > 0) {\n                return rSize;\n            }\n\n            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */\n                DEBUGLOG(4, \"ZSTD_decompressContinue: decoded size from frame : %u\", (unsigned)dctx->decodedSize);\n                RETURN_ERROR_IF(\n                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN\n                 && dctx->decodedSize != dctx->fParams.frameContentSize,\n                    corruption_detected);\n                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */\n                    dctx->expected = 4;\n                    dctx->stage = ZSTDds_checkChecksum;\n                } else {\n                    dctx->expected = 0;   /* ends here */\n                    dctx->stage = ZSTDds_getFrameHeaderSize;\n                }\n            } else {\n                dctx->stage = ZSTDds_decodeBlockHeader;\n                dctx->expected = ZSTD_blockHeaderSize;\n            }\n            return rSize;\n        }\n\n    case ZSTDds_checkChecksum:\n        assert(srcSize == 4);  /* guaranteed by dctx->expected */\n        {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);\n            U32 const check32 = MEM_readLE32(src);\n            DEBUGLOG(4, \"ZSTD_decompressContinue: checksum : calculated %08X :: %08X read\", (unsigned)h32, (unsigned)check32);\n            RETURN_ERROR_IF(check32 != h32, checksum_wrong);\n            dctx->expected = 0;\n            dctx->stage = ZSTDds_getFrameHeaderSize;\n            return 0;\n        }\n\n    case ZSTDds_decodeSkippableHeader:\n        assert(src != NULL);\n        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);\n        memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */\n        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */\n        dctx->stage = ZSTDds_skipFrame;\n        return 0;\n\n    case ZSTDds_skipFrame:\n        dctx->expected = 0;\n        dctx->stage = ZSTDds_getFrameHeaderSize;\n        return 0;\n\n    default:\n        assert(0);   /* impossible */\n        RETURN_ERROR(GENERIC);   /* some compiler require default to do something */\n    }\n}\n\n\nstatic size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    dctx->dictEnd = dctx->previousDstEnd;\n    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));\n    dctx->prefixStart = dict;\n    dctx->previousDstEnd = (const char*)dict + dictSize;\n    return 0;\n}\n\n/*! ZSTD_loadDEntropy() :\n *  dict : must point at beginning of a valid zstd dictionary.\n * @return : size of entropy tables read */\nsize_t\nZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,\n                  const void* const dict, size_t const dictSize)\n{\n    const BYTE* dictPtr = (const BYTE*)dict;\n    const BYTE* const dictEnd = dictPtr + dictSize;\n\n    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);\n    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */\n    dictPtr += 8;   /* skip header = magic + dictID */\n\n    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));\n    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));\n    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);\n    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */\n        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);\n#ifdef HUF_FORCE_DECOMPRESS_X1\n        /* in minimal huffman, we always use X1 variants */\n        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,\n                                                dictPtr, dictEnd - dictPtr,\n                                                workspace, workspaceSize);\n#else\n        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,\n                                                dictPtr, dictEnd - dictPtr,\n                                                workspace, workspaceSize);\n#endif\n        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);\n        dictPtr += hSize;\n    }\n\n    {   short offcodeNCount[MaxOff+1];\n        unsigned offcodeMaxValue = MaxOff, offcodeLog;\n        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);\n        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);\n        ZSTD_buildFSETable( entropy->OFTable,\n                            offcodeNCount, offcodeMaxValue,\n                            OF_base, OF_bits,\n                            offcodeLog);\n        dictPtr += offcodeHeaderSize;\n    }\n\n    {   short matchlengthNCount[MaxML+1];\n        unsigned matchlengthMaxValue = MaxML, matchlengthLog;\n        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);\n        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);\n        ZSTD_buildFSETable( entropy->MLTable,\n                            matchlengthNCount, matchlengthMaxValue,\n                            ML_base, ML_bits,\n                            matchlengthLog);\n        dictPtr += matchlengthHeaderSize;\n    }\n\n    {   short litlengthNCount[MaxLL+1];\n        unsigned litlengthMaxValue = MaxLL, litlengthLog;\n        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);\n        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);\n        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);\n        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);\n        ZSTD_buildFSETable( entropy->LLTable,\n                            litlengthNCount, litlengthMaxValue,\n                            LL_base, LL_bits,\n                            litlengthLog);\n        dictPtr += litlengthHeaderSize;\n    }\n\n    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);\n    {   int i;\n        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));\n        for (i=0; i<3; i++) {\n            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;\n            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,\n                            dictionary_corrupted);\n            entropy->rep[i] = rep;\n    }   }\n\n    return dictPtr - (const BYTE*)dict;\n}\n\nstatic size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);\n    {   U32 const magic = MEM_readLE32(dict);\n        if (magic != ZSTD_MAGIC_DICTIONARY) {\n            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */\n    }   }\n    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);\n\n    /* load entropy tables */\n    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);\n        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);\n        dict = (const char*)dict + eSize;\n        dictSize -= eSize;\n    }\n    dctx->litEntropy = dctx->fseEntropy = 1;\n\n    /* reference dictionary content */\n    return ZSTD_refDictContent(dctx, dict, dictSize);\n}\n\nsize_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)\n{\n    assert(dctx != NULL);\n    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */\n    dctx->stage = ZSTDds_getFrameHeaderSize;\n    dctx->decodedSize = 0;\n    dctx->previousDstEnd = NULL;\n    dctx->prefixStart = NULL;\n    dctx->virtualStart = NULL;\n    dctx->dictEnd = NULL;\n    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */\n    dctx->litEntropy = dctx->fseEntropy = 0;\n    dctx->dictID = 0;\n    dctx->bType = bt_reserved;\n    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));\n    memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */\n    dctx->LLTptr = dctx->entropy.LLTable;\n    dctx->MLTptr = dctx->entropy.MLTable;\n    dctx->OFTptr = dctx->entropy.OFTable;\n    dctx->HUFptr = dctx->entropy.hufTable;\n    return 0;\n}\n\nsize_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );\n    if (dict && dictSize)\n        RETURN_ERROR_IF(\n            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),\n            dictionary_corrupted);\n    return 0;\n}\n\n\n/* ======   ZSTD_DDict   ====== */\n\nsize_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)\n{\n    DEBUGLOG(4, \"ZSTD_decompressBegin_usingDDict\");\n    assert(dctx != NULL);\n    if (ddict) {\n        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);\n        size_t const dictSize = ZSTD_DDict_dictSize(ddict);\n        const void* const dictEnd = dictStart + dictSize;\n        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);\n        DEBUGLOG(4, \"DDict is %s\",\n                    dctx->ddictIsCold ? \"~cold~\" : \"hot!\");\n    }\n    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );\n    if (ddict) {   /* NULL ddict is equivalent to no dictionary */\n        ZSTD_copyDDictParameters(dctx, ddict);\n    }\n    return 0;\n}\n\n/*! ZSTD_getDictID_fromDict() :\n *  Provides the dictID stored within dictionary.\n *  if @return == 0, the dictionary is not conformant with Zstandard specification.\n *  It can still be loaded, but as a content-only dictionary. */\nunsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)\n{\n    if (dictSize < 8) return 0;\n    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;\n    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);\n}\n\n/*! ZSTD_getDictID_fromFrame() :\n *  Provides the dictID required to decompress frame stored within `src`.\n *  If @return == 0, the dictID could not be decoded.\n *  This could for one of the following reasons :\n *  - The frame does not require a dictionary (most common case).\n *  - The frame was built with dictID intentionally removed.\n *    Needed dictionary is a hidden information.\n *    Note : this use case also happens when using a non-conformant dictionary.\n *  - `srcSize` is too small, and as a result, frame header could not be decoded.\n *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.\n *  - This is not a Zstandard frame.\n *  When identifying the exact failure cause, it's possible to use\n *  ZSTD_getFrameHeader(), which will provide a more precise error code. */\nunsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)\n{\n    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };\n    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);\n    if (ZSTD_isError(hError)) return 0;\n    return zfp.dictID;\n}\n\n\n/*! ZSTD_decompress_usingDDict() :\n*   Decompression using a pre-digested Dictionary\n*   Use dictionary without significant overhead. */\nsize_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,\n                                  void* dst, size_t dstCapacity,\n                            const void* src, size_t srcSize,\n                            const ZSTD_DDict* ddict)\n{\n    /* pass content and size in case legacy frames are encountered */\n    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,\n                                     NULL, 0,\n                                     ddict);\n}\n\n\n/*=====================================\n*   Streaming decompression\n*====================================*/\n\nZSTD_DStream* ZSTD_createDStream(void)\n{\n    DEBUGLOG(3, \"ZSTD_createDStream\");\n    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);\n}\n\nZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)\n{\n    return ZSTD_initStaticDCtx(workspace, workspaceSize);\n}\n\nZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)\n{\n    return ZSTD_createDCtx_advanced(customMem);\n}\n\nsize_t ZSTD_freeDStream(ZSTD_DStream* zds)\n{\n    return ZSTD_freeDCtx(zds);\n}\n\n\n/* ***  Initialization  *** */\n\nsize_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }\nsize_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }\n\nsize_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,\n                                   const void* dict, size_t dictSize,\n                                         ZSTD_dictLoadMethod_e dictLoadMethod,\n                                         ZSTD_dictContentType_e dictContentType)\n{\n    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);\n    ZSTD_clearDict(dctx);\n    if (dict && dictSize != 0) {\n        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);\n        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);\n        dctx->ddict = dctx->ddictLocal;\n        dctx->dictUses = ZSTD_use_indefinitely;\n    }\n    return 0;\n}\n\nsize_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);\n}\n\nsize_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);\n}\n\nsize_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)\n{\n    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));\n    dctx->dictUses = ZSTD_use_once;\n    return 0;\n}\n\nsize_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)\n{\n    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);\n}\n\n\n/* ZSTD_initDStream_usingDict() :\n * return : expected size, aka ZSTD_startingInputLength().\n * this function cannot fail */\nsize_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)\n{\n    DEBUGLOG(4, \"ZSTD_initDStream_usingDict\");\n    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );\n    return ZSTD_startingInputLength(zds->format);\n}\n\n/* note : this variant can't fail */\nsize_t ZSTD_initDStream(ZSTD_DStream* zds)\n{\n    DEBUGLOG(4, \"ZSTD_initDStream\");\n    return ZSTD_initDStream_usingDDict(zds, NULL);\n}\n\n/* ZSTD_initDStream_usingDDict() :\n * ddict will just be referenced, and must outlive decompression session\n * this function cannot fail */\nsize_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)\n{\n    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );\n    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );\n    return ZSTD_startingInputLength(dctx->format);\n}\n\n/* ZSTD_resetDStream() :\n * return : expected size, aka ZSTD_startingInputLength().\n * this function cannot fail */\nsize_t ZSTD_resetDStream(ZSTD_DStream* dctx)\n{\n    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));\n    return ZSTD_startingInputLength(dctx->format);\n}\n\n\nsize_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)\n{\n    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);\n    ZSTD_clearDict(dctx);\n    if (ddict) {\n        dctx->ddict = ddict;\n        dctx->dictUses = ZSTD_use_indefinitely;\n    }\n    return 0;\n}\n\n/* ZSTD_DCtx_setMaxWindowSize() :\n * note : no direct equivalence in ZSTD_DCtx_setParameter,\n * since this version sets windowSize, and the other sets windowLog */\nsize_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)\n{\n    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);\n    size_t const min = (size_t)1 << bounds.lowerBound;\n    size_t const max = (size_t)1 << bounds.upperBound;\n    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);\n    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);\n    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);\n    dctx->maxWindowSize = maxWindowSize;\n    return 0;\n}\n\nsize_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)\n{\n    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);\n}\n\nZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)\n{\n    ZSTD_bounds bounds = { 0, 0, 0 };\n    switch(dParam) {\n        case ZSTD_d_windowLogMax:\n            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;\n            bounds.upperBound = ZSTD_WINDOWLOG_MAX;\n            return bounds;\n        case ZSTD_d_format:\n            bounds.lowerBound = (int)ZSTD_f_zstd1;\n            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;\n            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);\n            return bounds;\n        default:;\n    }\n    bounds.error = ERROR(parameter_unsupported);\n    return bounds;\n}\n\n/* ZSTD_dParam_withinBounds:\n * @return 1 if value is within dParam bounds,\n * 0 otherwise */\nstatic int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)\n{\n    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);\n    if (ZSTD_isError(bounds.error)) return 0;\n    if (value < bounds.lowerBound) return 0;\n    if (value > bounds.upperBound) return 0;\n    return 1;\n}\n\n#define CHECK_DBOUNDS(p,v) {                \\\n    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \\\n}\n\nsize_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)\n{\n    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);\n    switch(dParam) {\n        case ZSTD_d_windowLogMax:\n            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;\n            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);\n            dctx->maxWindowSize = ((size_t)1) << value;\n            return 0;\n        case ZSTD_d_format:\n            CHECK_DBOUNDS(ZSTD_d_format, value);\n            dctx->format = (ZSTD_format_e)value;\n            return 0;\n        default:;\n    }\n    RETURN_ERROR(parameter_unsupported);\n}\n\nsize_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)\n{\n    if ( (reset == ZSTD_reset_session_only)\n      || (reset == ZSTD_reset_session_and_parameters) ) {\n        dctx->streamStage = zdss_init;\n        dctx->noForwardProgress = 0;\n    }\n    if ( (reset == ZSTD_reset_parameters)\n      || (reset == ZSTD_reset_session_and_parameters) ) {\n        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);\n        ZSTD_clearDict(dctx);\n        dctx->format = ZSTD_f_zstd1;\n        dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;\n    }\n    return 0;\n}\n\n\nsize_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)\n{\n    return ZSTD_sizeof_DCtx(dctx);\n}\n\nsize_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)\n{\n    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);\n    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);\n    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);\n    size_t const minRBSize = (size_t) neededSize;\n    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,\n                    frameParameter_windowTooLarge);\n    return minRBSize;\n}\n\nsize_t ZSTD_estimateDStreamSize(size_t windowSize)\n{\n    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);\n    size_t const inBuffSize = blockSize;  /* no block can be larger */\n    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);\n    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;\n}\n\nsize_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)\n{\n    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */\n    ZSTD_frameHeader zfh;\n    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);\n    if (ZSTD_isError(err)) return err;\n    RETURN_ERROR_IF(err>0, srcSize_wrong);\n    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,\n                    frameParameter_windowTooLarge);\n    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);\n}\n\n\n/* *****   Decompression   ***** */\n\nMEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    size_t const length = MIN(dstCapacity, srcSize);\n    if (length > 0) {\n        memcpy(dst, src, length);\n    }\n    return length;\n}\n\n\nsize_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)\n{\n    const char* const src = (const char*)input->src;\n    const char* const istart = input->pos != 0 ? src + input->pos : src;\n    const char* const iend = input->size != 0 ? src + input->size : src;\n    const char* ip = istart;\n    char* const dst = (char*)output->dst;\n    char* const ostart = output->pos != 0 ? dst + output->pos : dst;\n    char* const oend = output->size != 0 ? dst + output->size : dst;\n    char* op = ostart;\n    U32 someMoreWork = 1;\n\n    DEBUGLOG(5, \"ZSTD_decompressStream\");\n    RETURN_ERROR_IF(\n        input->pos > input->size,\n        srcSize_wrong,\n        \"forbidden. in: pos: %u   vs size: %u\",\n        (U32)input->pos, (U32)input->size);\n    RETURN_ERROR_IF(\n        output->pos > output->size,\n        dstSize_tooSmall,\n        \"forbidden. out: pos: %u   vs size: %u\",\n        (U32)output->pos, (U32)output->size);\n    DEBUGLOG(5, \"input size : %u\", (U32)(input->size - input->pos));\n\n    while (someMoreWork) {\n        switch(zds->streamStage)\n        {\n        case zdss_init :\n            DEBUGLOG(5, \"stage zdss_init => transparent reset \");\n            zds->streamStage = zdss_loadHeader;\n            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;\n            zds->legacyVersion = 0;\n            zds->hostageByte = 0;\n            /* fall-through */\n\n        case zdss_loadHeader :\n            DEBUGLOG(5, \"stage zdss_loadHeader (srcSize : %u)\", (U32)(iend - ip));\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)\n            if (zds->legacyVersion) {\n                RETURN_ERROR_IF(zds->staticSize, memory_allocation,\n                    \"legacy support is incompatible with static dctx\");\n                {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);\n                    if (hint==0) zds->streamStage = zdss_init;\n                    return hint;\n            }   }\n#endif\n            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);\n                DEBUGLOG(5, \"header size : %u\", (U32)hSize);\n                if (ZSTD_isError(hSize)) {\n#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)\n                    U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);\n                    if (legacyVersion) {\n                        ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);\n                        const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;\n                        size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;\n                        DEBUGLOG(5, \"ZSTD_decompressStream: detected legacy version v0.%u\", legacyVersion);\n                        RETURN_ERROR_IF(zds->staticSize, memory_allocation,\n                            \"legacy support is incompatible with static dctx\");\n                        FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,\n                                    zds->previousLegacyVersion, legacyVersion,\n                                    dict, dictSize));\n                        zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;\n                        {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);\n                            if (hint==0) zds->streamStage = zdss_init;   /* or stay in stage zdss_loadHeader */\n                            return hint;\n                    }   }\n#endif\n                    return hSize;   /* error */\n                }\n                if (hSize != 0) {   /* need more input */\n                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */\n                    size_t const remainingInput = (size_t)(iend-ip);\n                    assert(iend >= ip);\n                    if (toLoad > remainingInput) {   /* not enough input to load full header */\n                        if (remainingInput > 0) {\n                            memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);\n                            zds->lhSize += remainingInput;\n                        }\n                        input->pos = input->size;\n                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */\n                    }\n                    assert(ip != NULL);\n                    memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;\n                    break;\n            }   }\n\n            /* check for single-pass mode opportunity */\n            if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */\n                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {\n                size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);\n                if (cSize <= (size_t)(iend-istart)) {\n                    /* shortcut : using single-pass mode */\n                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));\n                    if (ZSTD_isError(decompressedSize)) return decompressedSize;\n                    DEBUGLOG(4, \"shortcut to single-pass ZSTD_decompress_usingDDict()\")\n                    ip = istart + cSize;\n                    op += decompressedSize;\n                    zds->expected = 0;\n                    zds->streamStage = zdss_init;\n                    someMoreWork = 0;\n                    break;\n            }   }\n\n            /* Consume header (see ZSTDds_decodeFrameHeader) */\n            DEBUGLOG(4, \"Consume header\");\n            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));\n\n            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */\n                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);\n                zds->stage = ZSTDds_skipFrame;\n            } else {\n                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));\n                zds->expected = ZSTD_blockHeaderSize;\n                zds->stage = ZSTDds_decodeBlockHeader;\n            }\n\n            /* control buffer memory usage */\n            DEBUGLOG(4, \"Control max memory usage (%u KB <= max %u KB)\",\n                        (U32)(zds->fParams.windowSize >>10),\n                        (U32)(zds->maxWindowSize >> 10) );\n            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);\n            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,\n                            frameParameter_windowTooLarge);\n\n            /* Adapt buffer sizes to frame header instructions */\n            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);\n                size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);\n                if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {\n                    size_t const bufferSize = neededInBuffSize + neededOutBuffSize;\n                    DEBUGLOG(4, \"inBuff  : from %u to %u\",\n                                (U32)zds->inBuffSize, (U32)neededInBuffSize);\n                    DEBUGLOG(4, \"outBuff : from %u to %u\",\n                                (U32)zds->outBuffSize, (U32)neededOutBuffSize);\n                    if (zds->staticSize) {  /* static DCtx */\n                        DEBUGLOG(4, \"staticSize : %u\", (U32)zds->staticSize);\n                        assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */\n                        RETURN_ERROR_IF(\n                            bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),\n                            memory_allocation);\n                    } else {\n                        ZSTD_free(zds->inBuff, zds->customMem);\n                        zds->inBuffSize = 0;\n                        zds->outBuffSize = 0;\n                        zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);\n                        RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);\n                    }\n                    zds->inBuffSize = neededInBuffSize;\n                    zds->outBuff = zds->inBuff + zds->inBuffSize;\n                    zds->outBuffSize = neededOutBuffSize;\n            }   }\n            zds->streamStage = zdss_read;\n            /* fall-through */\n\n        case zdss_read:\n            DEBUGLOG(5, \"stage zdss_read\");\n            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip);\n                DEBUGLOG(5, \"neededInSize = %u\", (U32)neededInSize);\n                if (neededInSize==0) {  /* end of frame */\n                    zds->streamStage = zdss_init;\n                    someMoreWork = 0;\n                    break;\n                }\n                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */\n                    int const isSkipFrame = ZSTD_isSkipFrame(zds);\n                    size_t const decodedSize = ZSTD_decompressContinue(zds,\n                        zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),\n                        ip, neededInSize);\n                    if (ZSTD_isError(decodedSize)) return decodedSize;\n                    ip += neededInSize;\n                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */\n                    zds->outEnd = zds->outStart + decodedSize;\n                    zds->streamStage = zdss_flush;\n                    break;\n            }   }\n            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */\n            zds->streamStage = zdss_load;\n            /* fall-through */\n\n        case zdss_load:\n            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);\n                size_t const toLoad = neededInSize - zds->inPos;\n                int const isSkipFrame = ZSTD_isSkipFrame(zds);\n                size_t loadedSize;\n                /* At this point we shouldn't be decompressing a block that we can stream. */\n                assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));\n                if (isSkipFrame) {\n                    loadedSize = MIN(toLoad, (size_t)(iend-ip));\n                } else {\n                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,\n                                    corruption_detected,\n                                    \"should never happen\");\n                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);\n                }\n                ip += loadedSize;\n                zds->inPos += loadedSize;\n                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */\n\n                /* decode loaded input */\n                {   size_t const decodedSize = ZSTD_decompressContinue(zds,\n                        zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,\n                        zds->inBuff, neededInSize);\n                    if (ZSTD_isError(decodedSize)) return decodedSize;\n                    zds->inPos = 0;   /* input is consumed */\n                    if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; }   /* this was just a header */\n                    zds->outEnd = zds->outStart +  decodedSize;\n            }   }\n            zds->streamStage = zdss_flush;\n            /* fall-through */\n\n        case zdss_flush:\n            {   size_t const toFlushSize = zds->outEnd - zds->outStart;\n                size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);\n                op += flushedSize;\n                zds->outStart += flushedSize;\n                if (flushedSize == toFlushSize) {  /* flush completed */\n                    zds->streamStage = zdss_read;\n                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)\n                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {\n                        DEBUGLOG(5, \"restart filling outBuff from beginning (left:%i, needed:%u)\",\n                                (int)(zds->outBuffSize - zds->outStart),\n                                (U32)zds->fParams.blockSizeMax);\n                        zds->outStart = zds->outEnd = 0;\n                    }\n                    break;\n            }   }\n            /* cannot complete flush */\n            someMoreWork = 0;\n            break;\n\n        default:\n            assert(0);    /* impossible */\n            RETURN_ERROR(GENERIC);   /* some compiler require default to do something */\n    }   }\n\n    /* result */\n    input->pos = (size_t)(ip - (const char*)(input->src));\n    output->pos = (size_t)(op - (char*)(output->dst));\n    if ((ip==istart) && (op==ostart)) {  /* no forward progress */\n        zds->noForwardProgress ++;\n        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {\n            RETURN_ERROR_IF(op==oend, dstSize_tooSmall);\n            RETURN_ERROR_IF(ip==iend, srcSize_wrong);\n            assert(0);\n        }\n    } else {\n        zds->noForwardProgress = 0;\n    }\n    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);\n        if (!nextSrcSizeHint) {   /* frame fully decoded */\n            if (zds->outEnd == zds->outStart) {  /* output fully flushed */\n                if (zds->hostageByte) {\n                    if (input->pos >= input->size) {\n                        /* can't release hostage (not present) */\n                        zds->streamStage = zdss_read;\n                        return 1;\n                    }\n                    input->pos++;  /* release hostage */\n                }   /* zds->hostageByte */\n                return 0;\n            }  /* zds->outEnd == zds->outStart */\n            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */\n                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */\n                zds->hostageByte=1;\n            }\n            return 1;\n        }  /* nextSrcSizeHint==0 */\n        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */\n        assert(zds->inPos <= nextSrcSizeHint);\n        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/\n        return nextSrcSizeHint;\n    }\n}\n\nsize_t ZSTD_decompressStream_simpleArgs (\n                            ZSTD_DCtx* dctx,\n                            void* dst, size_t dstCapacity, size_t* dstPos,\n                      const void* src, size_t srcSize, size_t* srcPos)\n{\n    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };\n    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };\n    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */\n    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);\n    *dstPos = output.pos;\n    *srcPos = input.pos;\n    return cErr;\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_decompress_block.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* zstd_decompress_block :\n * this module takes care of decompressing _compressed_ block */\n\n/*-*******************************************************\n*  Dependencies\n*********************************************************/\n#include <string.h>      /* memcpy, memmove, memset */\n#include \"compiler.h\"    /* prefetch */\n#include \"cpu.h\"         /* bmi2 */\n#include \"mem.h\"         /* low level memory routines */\n#define FSE_STATIC_LINKING_ONLY\n#include \"fse.h\"\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"\n#include \"zstd_internal.h\"\n#include \"zstd_decompress_internal.h\"   /* ZSTD_DCtx */\n#include \"zstd_ddict.h\"  /* ZSTD_DDictDictContent */\n#include \"zstd_decompress_block.h\"\n\n/*_*******************************************************\n*  Macros\n**********************************************************/\n\n/* These two optional macros force the use one way or another of the two\n * ZSTD_decompressSequences implementations. You can't force in both directions\n * at the same time.\n */\n#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \\\n    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)\n#error \"Cannot force the use of the short and the long ZSTD_decompressSequences variants!\"\n#endif\n\n\n/*_*******************************************************\n*  Memory operations\n**********************************************************/\nstatic void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\n\n/*-*************************************************************\n *   Block decoding\n ***************************************************************/\n\n/*! ZSTD_getcBlockSize() :\n *  Provides the size of compressed block from block header `src` */\nsize_t ZSTD_getcBlockSize(const void* src, size_t srcSize,\n                          blockProperties_t* bpPtr)\n{\n    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);\n\n    {   U32 const cBlockHeader = MEM_readLE24(src);\n        U32 const cSize = cBlockHeader >> 3;\n        bpPtr->lastBlock = cBlockHeader & 1;\n        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);\n        bpPtr->origSize = cSize;   /* only useful for RLE */\n        if (bpPtr->blockType == bt_rle) return 1;\n        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);\n        return cSize;\n    }\n}\n\n\n/* Hidden declaration for fullbench */\nsize_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,\n                          const void* src, size_t srcSize);\n/*! ZSTD_decodeLiteralsBlock() :\n * @return : nb of bytes read from src (< srcSize )\n *  note : symbol not declared but exposed for fullbench */\nsize_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,\n                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */\n{\n    DEBUGLOG(5, \"ZSTD_decodeLiteralsBlock\");\n    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);\n\n    {   const BYTE* const istart = (const BYTE*) src;\n        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);\n\n        switch(litEncType)\n        {\n        case set_repeat:\n            DEBUGLOG(5, \"set_repeat flag : re-using stats from previous compressed literals block\");\n            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);\n            /* fall-through */\n\n        case set_compressed:\n            RETURN_ERROR_IF(srcSize < 5, corruption_detected, \"srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3\");\n            {   size_t lhSize, litSize, litCSize;\n                U32 singleStream=0;\n                U32 const lhlCode = (istart[0] >> 2) & 3;\n                U32 const lhc = MEM_readLE32(istart);\n                size_t hufSuccess;\n                switch(lhlCode)\n                {\n                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */\n                    /* 2 - 2 - 10 - 10 */\n                    singleStream = !lhlCode;\n                    lhSize = 3;\n                    litSize  = (lhc >> 4) & 0x3FF;\n                    litCSize = (lhc >> 14) & 0x3FF;\n                    break;\n                case 2:\n                    /* 2 - 2 - 14 - 14 */\n                    lhSize = 4;\n                    litSize  = (lhc >> 4) & 0x3FFF;\n                    litCSize = lhc >> 18;\n                    break;\n                case 3:\n                    /* 2 - 2 - 18 - 18 */\n                    lhSize = 5;\n                    litSize  = (lhc >> 4) & 0x3FFFF;\n                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);\n                    break;\n                }\n                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);\n                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);\n\n                /* prefetch huffman table if cold */\n                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {\n                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));\n                }\n\n                if (litEncType==set_repeat) {\n                    if (singleStream) {\n                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(\n                            dctx->litBuffer, litSize, istart+lhSize, litCSize,\n                            dctx->HUFptr, dctx->bmi2);\n                    } else {\n                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(\n                            dctx->litBuffer, litSize, istart+lhSize, litCSize,\n                            dctx->HUFptr, dctx->bmi2);\n                    }\n                } else {\n                    if (singleStream) {\n#if defined(HUF_FORCE_DECOMPRESS_X2)\n                        hufSuccess = HUF_decompress1X_DCtx_wksp(\n                            dctx->entropy.hufTable, dctx->litBuffer, litSize,\n                            istart+lhSize, litCSize, dctx->workspace,\n                            sizeof(dctx->workspace));\n#else\n                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(\n                            dctx->entropy.hufTable, dctx->litBuffer, litSize,\n                            istart+lhSize, litCSize, dctx->workspace,\n                            sizeof(dctx->workspace), dctx->bmi2);\n#endif\n                    } else {\n                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(\n                            dctx->entropy.hufTable, dctx->litBuffer, litSize,\n                            istart+lhSize, litCSize, dctx->workspace,\n                            sizeof(dctx->workspace), dctx->bmi2);\n                    }\n                }\n\n                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);\n\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                dctx->litEntropy = 1;\n                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;\n                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n                return litCSize + lhSize;\n            }\n\n        case set_basic:\n            {   size_t litSize, lhSize;\n                U32 const lhlCode = ((istart[0]) >> 2) & 3;\n                switch(lhlCode)\n                {\n                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */\n                    lhSize = 1;\n                    litSize = istart[0] >> 3;\n                    break;\n                case 1:\n                    lhSize = 2;\n                    litSize = MEM_readLE16(istart) >> 4;\n                    break;\n                case 3:\n                    lhSize = 3;\n                    litSize = MEM_readLE24(istart) >> 4;\n                    break;\n                }\n\n                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */\n                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);\n                    memcpy(dctx->litBuffer, istart+lhSize, litSize);\n                    dctx->litPtr = dctx->litBuffer;\n                    dctx->litSize = litSize;\n                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n                    return lhSize+litSize;\n                }\n                /* direct reference into compressed stream */\n                dctx->litPtr = istart+lhSize;\n                dctx->litSize = litSize;\n                return lhSize+litSize;\n            }\n\n        case set_rle:\n            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;\n                size_t litSize, lhSize;\n                switch(lhlCode)\n                {\n                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */\n                    lhSize = 1;\n                    litSize = istart[0] >> 3;\n                    break;\n                case 1:\n                    lhSize = 2;\n                    litSize = MEM_readLE16(istart) >> 4;\n                    break;\n                case 3:\n                    lhSize = 3;\n                    litSize = MEM_readLE24(istart) >> 4;\n                    RETURN_ERROR_IF(srcSize<4, corruption_detected, \"srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4\");\n                    break;\n                }\n                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);\n                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                return lhSize+1;\n            }\n        default:\n            RETURN_ERROR(corruption_detected, \"impossible\");\n        }\n    }\n}\n\n/* Default FSE distribution tables.\n * These are pre-calculated FSE decoding tables using default distributions as defined in specification :\n * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions\n * They were generated programmatically with following method :\n * - start from default distributions, present in /lib/common/zstd_internal.h\n * - generate tables normally, using ZSTD_buildFSETable()\n * - printout the content of tables\n * - pretify output, report below, test with fuzzer to ensure it's correct */\n\n/* Default FSE distribution table for Literal Lengths */\nstatic const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {\n     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */\n     /* nextState, nbAddBits, nbBits, baseVal */\n     {  0,  0,  4,    0},  { 16,  0,  4,    0},\n     { 32,  0,  5,    1},  {  0,  0,  5,    3},\n     {  0,  0,  5,    4},  {  0,  0,  5,    6},\n     {  0,  0,  5,    7},  {  0,  0,  5,    9},\n     {  0,  0,  5,   10},  {  0,  0,  5,   12},\n     {  0,  0,  6,   14},  {  0,  1,  5,   16},\n     {  0,  1,  5,   20},  {  0,  1,  5,   22},\n     {  0,  2,  5,   28},  {  0,  3,  5,   32},\n     {  0,  4,  5,   48},  { 32,  6,  5,   64},\n     {  0,  7,  5,  128},  {  0,  8,  6,  256},\n     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},\n     { 32,  0,  4,    0},  {  0,  0,  4,    1},\n     {  0,  0,  5,    2},  { 32,  0,  5,    4},\n     {  0,  0,  5,    5},  { 32,  0,  5,    7},\n     {  0,  0,  5,    8},  { 32,  0,  5,   10},\n     {  0,  0,  5,   11},  {  0,  0,  6,   13},\n     { 32,  1,  5,   16},  {  0,  1,  5,   18},\n     { 32,  1,  5,   22},  {  0,  2,  5,   24},\n     { 32,  3,  5,   32},  {  0,  3,  5,   40},\n     {  0,  6,  4,   64},  { 16,  6,  4,   64},\n     { 32,  7,  5,  128},  {  0,  9,  6,  512},\n     {  0, 11,  6, 2048},  { 48,  0,  4,    0},\n     { 16,  0,  4,    1},  { 32,  0,  5,    2},\n     { 32,  0,  5,    3},  { 32,  0,  5,    5},\n     { 32,  0,  5,    6},  { 32,  0,  5,    8},\n     { 32,  0,  5,    9},  { 32,  0,  5,   11},\n     { 32,  0,  5,   12},  {  0,  0,  6,   15},\n     { 32,  1,  5,   18},  { 32,  1,  5,   20},\n     { 32,  2,  5,   24},  { 32,  2,  5,   28},\n     { 32,  3,  5,   40},  { 32,  4,  5,   48},\n     {  0, 16,  6,65536},  {  0, 15,  6,32768},\n     {  0, 14,  6,16384},  {  0, 13,  6, 8192},\n};   /* LL_defaultDTable */\n\n/* Default FSE distribution table for Offset Codes */\nstatic const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {\n    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */\n    /* nextState, nbAddBits, nbBits, baseVal */\n    {  0,  0,  5,    0},     {  0,  6,  4,   61},\n    {  0,  9,  5,  509},     {  0, 15,  5,32765},\n    {  0, 21,  5,2097149},   {  0,  3,  5,    5},\n    {  0,  7,  4,  125},     {  0, 12,  5, 4093},\n    {  0, 18,  5,262141},    {  0, 23,  5,8388605},\n    {  0,  5,  5,   29},     {  0,  8,  4,  253},\n    {  0, 14,  5,16381},     {  0, 20,  5,1048573},\n    {  0,  2,  5,    1},     { 16,  7,  4,  125},\n    {  0, 11,  5, 2045},     {  0, 17,  5,131069},\n    {  0, 22,  5,4194301},   {  0,  4,  5,   13},\n    { 16,  8,  4,  253},     {  0, 13,  5, 8189},\n    {  0, 19,  5,524285},    {  0,  1,  5,    1},\n    { 16,  6,  4,   61},     {  0, 10,  5, 1021},\n    {  0, 16,  5,65533},     {  0, 28,  5,268435453},\n    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},\n    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},\n};   /* OF_defaultDTable */\n\n\n/* Default FSE distribution table for Match Lengths */\nstatic const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {\n    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */\n    /* nextState, nbAddBits, nbBits, baseVal */\n    {  0,  0,  6,    3},  {  0,  0,  4,    4},\n    { 32,  0,  5,    5},  {  0,  0,  5,    6},\n    {  0,  0,  5,    8},  {  0,  0,  5,    9},\n    {  0,  0,  5,   11},  {  0,  0,  6,   13},\n    {  0,  0,  6,   16},  {  0,  0,  6,   19},\n    {  0,  0,  6,   22},  {  0,  0,  6,   25},\n    {  0,  0,  6,   28},  {  0,  0,  6,   31},\n    {  0,  0,  6,   34},  {  0,  1,  6,   37},\n    {  0,  1,  6,   41},  {  0,  2,  6,   47},\n    {  0,  3,  6,   59},  {  0,  4,  6,   83},\n    {  0,  7,  6,  131},  {  0,  9,  6,  515},\n    { 16,  0,  4,    4},  {  0,  0,  4,    5},\n    { 32,  0,  5,    6},  {  0,  0,  5,    7},\n    { 32,  0,  5,    9},  {  0,  0,  5,   10},\n    {  0,  0,  6,   12},  {  0,  0,  6,   15},\n    {  0,  0,  6,   18},  {  0,  0,  6,   21},\n    {  0,  0,  6,   24},  {  0,  0,  6,   27},\n    {  0,  0,  6,   30},  {  0,  0,  6,   33},\n    {  0,  1,  6,   35},  {  0,  1,  6,   39},\n    {  0,  2,  6,   43},  {  0,  3,  6,   51},\n    {  0,  4,  6,   67},  {  0,  5,  6,   99},\n    {  0,  8,  6,  259},  { 32,  0,  4,    4},\n    { 48,  0,  4,    4},  { 16,  0,  4,    5},\n    { 32,  0,  5,    7},  { 32,  0,  5,    8},\n    { 32,  0,  5,   10},  { 32,  0,  5,   11},\n    {  0,  0,  6,   14},  {  0,  0,  6,   17},\n    {  0,  0,  6,   20},  {  0,  0,  6,   23},\n    {  0,  0,  6,   26},  {  0,  0,  6,   29},\n    {  0,  0,  6,   32},  {  0, 16,  6,65539},\n    {  0, 15,  6,32771},  {  0, 14,  6,16387},\n    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},\n    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},\n};   /* ML_defaultDTable */\n\n\nstatic void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)\n{\n    void* ptr = dt;\n    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;\n    ZSTD_seqSymbol* const cell = dt + 1;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->nbBits = 0;\n    cell->nextState = 0;\n    assert(nbAddBits < 255);\n    cell->nbAdditionalBits = (BYTE)nbAddBits;\n    cell->baseValue = baseValue;\n}\n\n\n/* ZSTD_buildFSETable() :\n * generate FSE decoding table for one symbol (ll, ml or off)\n * cannot fail if input is valid =>\n * all inputs are presumed validated at this stage */\nvoid\nZSTD_buildFSETable(ZSTD_seqSymbol* dt,\n            const short* normalizedCounter, unsigned maxSymbolValue,\n            const U32* baseValue, const U32* nbAdditionalBits,\n            unsigned tableLog)\n{\n    ZSTD_seqSymbol* const tableDecode = dt+1;\n    U16 symbolNext[MaxSeq+1];\n\n    U32 const maxSV1 = maxSymbolValue + 1;\n    U32 const tableSize = 1 << tableLog;\n    U32 highThreshold = tableSize-1;\n\n    /* Sanity Checks */\n    assert(maxSymbolValue <= MaxSeq);\n    assert(tableLog <= MaxFSELog);\n\n    /* Init, lay down lowprob symbols */\n    {   ZSTD_seqSymbol_header DTableH;\n        DTableH.tableLog = tableLog;\n        DTableH.fastMode = 1;\n        {   S16 const largeLimit= (S16)(1 << (tableLog-1));\n            U32 s;\n            for (s=0; s<maxSV1; s++) {\n                if (normalizedCounter[s]==-1) {\n                    tableDecode[highThreshold--].baseValue = s;\n                    symbolNext[s] = 1;\n                } else {\n                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;\n                    assert(normalizedCounter[s]>=0);\n                    symbolNext[s] = (U16)normalizedCounter[s];\n        }   }   }\n        memcpy(dt, &DTableH, sizeof(DTableH));\n    }\n\n    /* Spread symbols */\n    {   U32 const tableMask = tableSize-1;\n        U32 const step = FSE_TABLESTEP(tableSize);\n        U32 s, position = 0;\n        for (s=0; s<maxSV1; s++) {\n            int i;\n            for (i=0; i<normalizedCounter[s]; i++) {\n                tableDecode[position].baseValue = s;\n                position = (position + step) & tableMask;\n                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }   }\n        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n    }\n\n    /* Build Decoding table */\n    {   U32 u;\n        for (u=0; u<tableSize; u++) {\n            U32 const symbol = tableDecode[u].baseValue;\n            U32 const nextState = symbolNext[symbol]++;\n            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );\n            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);\n            assert(nbAdditionalBits[symbol] < 255);\n            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];\n            tableDecode[u].baseValue = baseValue[symbol];\n    }   }\n}\n\n\n/*! ZSTD_buildSeqTable() :\n * @return : nb bytes read from src,\n *           or an error code if it fails */\nstatic size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,\n                                 symbolEncodingType_e type, unsigned max, U32 maxLog,\n                                 const void* src, size_t srcSize,\n                                 const U32* baseValue, const U32* nbAdditionalBits,\n                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,\n                                 int ddictIsCold, int nbSeq)\n{\n    switch(type)\n    {\n    case set_rle :\n        RETURN_ERROR_IF(!srcSize, srcSize_wrong);\n        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);\n        {   U32 const symbol = *(const BYTE*)src;\n            U32 const baseline = baseValue[symbol];\n            U32 const nbBits = nbAdditionalBits[symbol];\n            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);\n        }\n        *DTablePtr = DTableSpace;\n        return 1;\n    case set_basic :\n        *DTablePtr = defaultTable;\n        return 0;\n    case set_repeat:\n        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);\n        /* prefetch FSE table if used */\n        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {\n            const void* const pStart = *DTablePtr;\n            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));\n            PREFETCH_AREA(pStart, pSize);\n        }\n        return 0;\n    case set_compressed :\n        {   unsigned tableLog;\n            S16 norm[MaxSeq+1];\n            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);\n            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);\n            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);\n            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);\n            *DTablePtr = DTableSpace;\n            return headerSize;\n        }\n    default :\n        assert(0);\n        RETURN_ERROR(GENERIC, \"impossible\");\n    }\n}\n\nsize_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,\n                             const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* ip = istart;\n    int nbSeq;\n    DEBUGLOG(5, \"ZSTD_decodeSeqHeaders\");\n\n    /* check */\n    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);\n\n    /* SeqHead */\n    nbSeq = *ip++;\n    if (!nbSeq) {\n        *nbSeqPtr=0;\n        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);\n        return 1;\n    }\n    if (nbSeq > 0x7F) {\n        if (nbSeq == 0xFF) {\n            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);\n            nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;\n        } else {\n            RETURN_ERROR_IF(ip >= iend, srcSize_wrong);\n            nbSeq = ((nbSeq-0x80)<<8) + *ip++;\n        }\n    }\n    *nbSeqPtr = nbSeq;\n\n    /* FSE table descriptors */\n    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong); /* minimum possible size: 1 byte for symbol encoding types */\n    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);\n        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);\n        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);\n        ip++;\n\n        /* Build DTables */\n        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,\n                                                      LLtype, MaxLL, LLFSELog,\n                                                      ip, iend-ip,\n                                                      LL_base, LL_bits,\n                                                      LL_defaultDTable, dctx->fseEntropy,\n                                                      dctx->ddictIsCold, nbSeq);\n            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);\n            ip += llhSize;\n        }\n\n        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,\n                                                      OFtype, MaxOff, OffFSELog,\n                                                      ip, iend-ip,\n                                                      OF_base, OF_bits,\n                                                      OF_defaultDTable, dctx->fseEntropy,\n                                                      dctx->ddictIsCold, nbSeq);\n            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);\n            ip += ofhSize;\n        }\n\n        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,\n                                                      MLtype, MaxML, MLFSELog,\n                                                      ip, iend-ip,\n                                                      ML_base, ML_bits,\n                                                      ML_defaultDTable, dctx->fseEntropy,\n                                                      dctx->ddictIsCold, nbSeq);\n            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);\n            ip += mlhSize;\n        }\n    }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t matchLength;\n    size_t offset;\n    const BYTE* match;\n} seq_t;\n\ntypedef struct {\n    size_t state;\n    const ZSTD_seqSymbol* table;\n} ZSTD_fseState;\n\ntypedef struct {\n    BIT_DStream_t DStream;\n    ZSTD_fseState stateLL;\n    ZSTD_fseState stateOffb;\n    ZSTD_fseState stateML;\n    size_t prevOffset[ZSTD_REP_NUM];\n    const BYTE* prefixStart;\n    const BYTE* dictEnd;\n    size_t pos;\n} seqState_t;\n\n/*! ZSTD_overlapCopy8() :\n *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.\n *  If the offset is < 8 then the offset is spread to at least 8 bytes.\n *\n *  Precondition: *ip <= *op\n *  Postcondition: *op - *op >= 8\n */\nHINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {\n    assert(*ip <= *op);\n    if (offset < 8) {\n        /* close range match, overlap */\n        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */\n        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */\n        int const sub2 = dec64table[offset];\n        (*op)[0] = (*ip)[0];\n        (*op)[1] = (*ip)[1];\n        (*op)[2] = (*ip)[2];\n        (*op)[3] = (*ip)[3];\n        *ip += dec32table[offset];\n        ZSTD_copy4(*op+4, *ip);\n        *ip -= sub2;\n    } else {\n        ZSTD_copy8(*op, *ip);\n    }\n    *ip += 8;\n    *op += 8;\n    assert(*op - *ip >= 8);\n}\n\n/*! ZSTD_safecopy() :\n *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer\n *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).\n *  This function is only called in the uncommon case where the sequence is near the end of the block. It\n *  should be fast for a single long sequence, but can be slow for several short sequences.\n *\n *  @param ovtype controls the overlap detection\n *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.\n *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.\n *           The src buffer must be before the dst buffer.\n */\nstatic void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {\n    ptrdiff_t const diff = op - ip;\n    BYTE* const oend = op + length;\n\n    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||\n           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));\n\n    if (length < 8) {\n        /* Handle short lengths. */\n        while (op < oend) *op++ = *ip++;\n        return;\n    }\n    if (ovtype == ZSTD_overlap_src_before_dst) {\n        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */\n        assert(length >= 8);\n        ZSTD_overlapCopy8(&op, &ip, diff);\n        assert(op - ip >= 8);\n        assert(op <= oend);\n    }\n\n    if (oend <= oend_w) {\n        /* No risk of overwrite. */\n        ZSTD_wildcopy(op, ip, length, ovtype);\n        return;\n    }\n    if (op <= oend_w) {\n        /* Wildcopy until we get close to the end. */\n        assert(oend > oend_w);\n        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);\n        ip += oend_w - op;\n        op = oend_w;\n    }\n    /* Handle the leftovers. */\n    while (op < oend) *op++ = *ip++;\n}\n\n/* ZSTD_execSequenceEnd():\n * This version handles cases that are near the end of the output buffer. It requires\n * more careful checks to make sure there is no overflow. By separating out these hard\n * and unlikely cases, we can speed up the common cases.\n *\n * NOTE: This function needs to be fast for a single long sequence, but doesn't need\n * to be optimized for many small sequences, since those fall into ZSTD_execSequence().\n */\nFORCE_NOINLINE\nsize_t ZSTD_execSequenceEnd(BYTE* op,\n                            BYTE* const oend, seq_t sequence,\n                            const BYTE** litPtr, const BYTE* const litLimit,\n                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)\n{\n    BYTE* const oLitEnd = op + sequence.litLength;\n    size_t const sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    const BYTE* const iLitEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;\n\n    /* bounds checks */\n    assert(oLitEnd < oMatchEnd);\n    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, \"last match must fit within dstBuffer\");\n    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, \"try to read beyond literal buffer\");\n\n    /* copy literals */\n    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);\n    op = oLitEnd;\n    *litPtr = iLitEnd;\n\n    /* copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {\n        /* offset beyond prefix */\n        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);\n        match = dictEnd - (prefixStart-match);\n        if (match + sequence.matchLength <= dictEnd) {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {   size_t const length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = prefixStart;\n    }   }\n    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);\n    return sequenceLength;\n}\n\nHINT_INLINE\nsize_t ZSTD_execSequence(BYTE* op,\n                         BYTE* const oend, seq_t sequence,\n                         const BYTE** litPtr, const BYTE* const litLimit,\n                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)\n{\n    BYTE* const oLitEnd = op + sequence.litLength;\n    size_t const sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;\n    const BYTE* const iLitEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n\n    /* Errors and uncommon cases handled here. */\n    assert(oLitEnd < oMatchEnd);\n    if (UNLIKELY(iLitEnd > litLimit || oMatchEnd > oend_w))\n        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);\n\n    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */\n    assert(iLitEnd <= litLimit /* Literal length is in bounds */);\n    assert(oLitEnd <= oend_w /* Can wildcopy literals */);\n    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);\n\n    /* Copy Literals:\n     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.\n     * We likely don't need the full 32-byte wildcopy.\n     */\n    assert(WILDCOPY_OVERLENGTH >= 16);\n    ZSTD_copy16(op, (*litPtr));\n    if (UNLIKELY(sequence.litLength > 16)) {\n        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);\n    }\n    op = oLitEnd;\n    *litPtr = iLitEnd;   /* update for next sequence */\n\n    /* Copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {\n        /* offset beyond prefix -> go into extDict */\n        RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected);\n        match = dictEnd + (match - prefixStart);\n        if (match + sequence.matchLength <= dictEnd) {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {   size_t const length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = prefixStart;\n    }   }\n    /* Match within prefix of 1 or more bytes */\n    assert(op <= oMatchEnd);\n    assert(oMatchEnd <= oend_w);\n    assert(match >= prefixStart);\n    assert(sequence.matchLength >= 1);\n\n    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy\n     * without overlap checking.\n     */\n    if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {\n        /* We bet on a full wildcopy for matches, since we expect matches to be\n         * longer than literals (in general). In silesia, ~10% of matches are longer\n         * than 16 bytes.\n         */\n        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);\n        return sequenceLength;\n    }\n    assert(sequence.offset < WILDCOPY_VECLEN);\n\n    /* Copy 8 bytes and spread the offset to be >= 8. */\n    ZSTD_overlapCopy8(&op, &match, sequence.offset);\n\n    /* If the match length is > 8 bytes, then continue with the wildcopy. */\n    if (sequence.matchLength > 8) {\n        assert(op < oMatchEnd);\n        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);\n    }\n    return sequenceLength;\n}\n\nstatic void\nZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)\n{\n    const void* ptr = dt;\n    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;\n    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);\n    DEBUGLOG(6, \"ZSTD_initFseState : val=%u using %u bits\",\n                (U32)DStatePtr->state, DTableH->tableLog);\n    BIT_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nFORCE_INLINE_TEMPLATE void\nZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)\n{\n    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    size_t const lowBits = BIT_readBits(bitD, nbBits);\n    DStatePtr->state = DInfo.nextState + lowBits;\n}\n\nFORCE_INLINE_TEMPLATE void\nZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)\n{\n    U32 const nbBits = DInfo.nbBits;\n    size_t const lowBits = BIT_readBits(bitD, nbBits);\n    DStatePtr->state = DInfo.nextState + lowBits;\n}\n\n/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum\n * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)\n * bits before reloading. This value is the maximum number of bytes we read\n * after reloading when we are decoding long offsets.\n */\n#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \\\n    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \\\n        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \\\n        : 0)\n\ntypedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;\ntypedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;\n\nFORCE_INLINE_TEMPLATE seq_t\nZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)\n{\n    seq_t seq;\n    ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];\n    ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];\n    ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];\n    U32 const llBase = llDInfo.baseValue;\n    U32 const mlBase = mlDInfo.baseValue;\n    U32 const ofBase = ofDInfo.baseValue;\n    BYTE const llBits = llDInfo.nbAdditionalBits;\n    BYTE const mlBits = mlDInfo.nbAdditionalBits;\n    BYTE const ofBits = ofDInfo.nbAdditionalBits;\n    BYTE const totalBits = llBits+mlBits+ofBits;\n\n    /* sequence */\n    {   size_t offset;\n        if (ofBits > 1) {\n            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);\n            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);\n            assert(ofBits <= MaxOff);\n            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {\n                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);\n                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);\n                BIT_reloadDStream(&seqState->DStream);\n                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);\n                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */\n            } else {\n                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */\n                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);\n            }\n            seqState->prevOffset[2] = seqState->prevOffset[1];\n            seqState->prevOffset[1] = seqState->prevOffset[0];\n            seqState->prevOffset[0] = offset;\n        } else {\n            U32 const ll0 = (llBase == 0);\n            if (LIKELY((ofBits == 0))) {\n                if (LIKELY(!ll0))\n                    offset = seqState->prevOffset[0];\n                else {\n                    offset = seqState->prevOffset[1];\n                    seqState->prevOffset[1] = seqState->prevOffset[0];\n                    seqState->prevOffset[0] = offset;\n                }\n            } else {\n                offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);\n                {   size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];\n                    temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */\n                    if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];\n                    seqState->prevOffset[1] = seqState->prevOffset[0];\n                    seqState->prevOffset[0] = offset = temp;\n        }   }   }\n        seq.offset = offset;\n    }\n\n    seq.matchLength = mlBase;\n    if (mlBits > 0)\n        seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);\n\n    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))\n        BIT_reloadDStream(&seqState->DStream);\n    if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))\n        BIT_reloadDStream(&seqState->DStream);\n    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */\n    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);\n\n    seq.litLength = llBase;\n    if (llBits > 0)\n        seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);\n\n    if (MEM_32bits())\n        BIT_reloadDStream(&seqState->DStream);\n\n    DEBUGLOG(6, \"seq: litL=%u, matchL=%u, offset=%u\",\n                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);\n\n    if (prefetch == ZSTD_p_prefetch) {\n        size_t const pos = seqState->pos + seq.litLength;\n        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;\n        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.\n                                                    * No consequence though : no memory access will occur, offset is only used for prefetching */\n        seqState->pos = pos + seq.matchLength;\n    }\n\n    /* ANS state update\n     * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().\n     * clang-9.2.0 does 7% worse with ZSTD_updateFseState().\n     * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the\n     * better option, so it is the default for other compilers. But, if you\n     * measure that it is worse, please put up a pull request.\n     */\n    {\n#if defined(__GNUC__) && !defined(__clang__)\n        const int kUseUpdateFseState = 1;\n#else\n        const int kUseUpdateFseState = 0;\n#endif\n        if (kUseUpdateFseState) {\n            ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */\n            ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */\n            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */\n            ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */\n        } else {\n            ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);    /* <=  9 bits */\n            ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);    /* <=  9 bits */\n            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */\n            ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);  /* <=  8 bits */\n        }\n    }\n\n    return seq;\n}\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG\nFORCE_INLINE_TEMPLATE size_t\nDONT_VECTORIZE\nZSTD_decompressSequences_body( ZSTD_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize, int nbSeq,\n                         const ZSTD_longOffset_e isLongOffset)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + maxDstSize;\n    BYTE* op = ostart;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);\n    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n    DEBUGLOG(5, \"ZSTD_decompressSequences_body\");\n\n    /* Regen sequences */\n    if (nbSeq) {\n        seqState_t seqState;\n        size_t error = 0;\n        dctx->fseEntropy = 1;\n        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }\n        RETURN_ERROR_IF(\n            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),\n            corruption_detected);\n        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);\n        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);\n        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);\n\n        ZSTD_STATIC_ASSERT(\n                BIT_DStream_unfinished < BIT_DStream_completed &&\n                BIT_DStream_endOfBuffer < BIT_DStream_completed &&\n                BIT_DStream_completed < BIT_DStream_overflow);\n\n        for ( ; ; ) {\n            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);\n            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);\n            DEBUGLOG(6, \"regenerated sequence size : %u\", (U32)oneSeqSize);\n            BIT_reloadDStream(&(seqState.DStream));\n            /* gcc and clang both don't like early returns in this loop.\n             * gcc doesn't like early breaks either.\n             * Instead save an error and report it at the end.\n             * When there is an error, don't increment op, so we don't\n             * overwrite.\n             */\n            if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;\n            else op += oneSeqSize;\n            if (UNLIKELY(!--nbSeq)) break;\n        }\n\n        /* check if reached exact end */\n        DEBUGLOG(5, \"ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i\", nbSeq);\n        if (ZSTD_isError(error)) return error;\n        RETURN_ERROR_IF(nbSeq, corruption_detected);\n        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected);\n        /* save reps for next block */\n        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }\n    }\n\n    /* last literal segment */\n    {   size_t const lastLLSize = litEnd - litPtr;\n        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);\n        memcpy(op, litPtr, lastLLSize);\n        op += lastLLSize;\n    }\n\n    return op-ostart;\n}\n\nstatic size_t\nZSTD_decompressSequences_default(ZSTD_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                           const void* seqStart, size_t seqSize, int nbSeq,\n                           const ZSTD_longOffset_e isLongOffset)\n{\n    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT\nFORCE_INLINE_TEMPLATE size_t\nZSTD_decompressSequencesLong_body(\n                               ZSTD_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize, int nbSeq,\n                         const ZSTD_longOffset_e isLongOffset)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + maxDstSize;\n    BYTE* op = ostart;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);\n    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n\n    /* Regen sequences */\n    if (nbSeq) {\n#define STORED_SEQS 4\n#define STORED_SEQS_MASK (STORED_SEQS-1)\n#define ADVANCED_SEQS 4\n        seq_t sequences[STORED_SEQS];\n        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);\n        seqState_t seqState;\n        int seqNb;\n        dctx->fseEntropy = 1;\n        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }\n        seqState.prefixStart = prefixStart;\n        seqState.pos = (size_t)(op-prefixStart);\n        seqState.dictEnd = dictEnd;\n        assert(iend >= ip);\n        RETURN_ERROR_IF(\n            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),\n            corruption_detected);\n        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);\n        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);\n        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);\n\n        /* prepare in advance */\n        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {\n            sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);\n            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */\n        }\n        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);\n\n        /* decode and decompress */\n        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {\n            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);\n            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);\n            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;\n            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */\n            sequences[seqNb & STORED_SEQS_MASK] = sequence;\n            op += oneSeqSize;\n        }\n        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);\n\n        /* finish queue */\n        seqNb -= seqAdvance;\n        for ( ; seqNb<nbSeq ; seqNb++) {\n            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);\n            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* save reps for next block */\n        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }\n    }\n\n    /* last literal segment */\n    {   size_t const lastLLSize = litEnd - litPtr;\n        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);\n        memcpy(op, litPtr, lastLLSize);\n        op += lastLLSize;\n    }\n\n    return op-ostart;\n}\n\nstatic size_t\nZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                           const void* seqStart, size_t seqSize, int nbSeq,\n                           const ZSTD_longOffset_e isLongOffset)\n{\n    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */\n\n\n\n#if DYNAMIC_BMI2\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG\nstatic TARGET_ATTRIBUTE(\"bmi2\") size_t\nDONT_VECTORIZE\nZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                           const void* seqStart, size_t seqSize, int nbSeq,\n                           const ZSTD_longOffset_e isLongOffset)\n{\n    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT\nstatic TARGET_ATTRIBUTE(\"bmi2\") size_t\nZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                           const void* seqStart, size_t seqSize, int nbSeq,\n                           const ZSTD_longOffset_e isLongOffset)\n{\n    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */\n\n#endif /* DYNAMIC_BMI2 */\n\ntypedef size_t (*ZSTD_decompressSequences_t)(\n                            ZSTD_DCtx* dctx,\n                            void* dst, size_t maxDstSize,\n                            const void* seqStart, size_t seqSize, int nbSeq,\n                            const ZSTD_longOffset_e isLongOffset);\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG\nstatic size_t\nZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,\n                   const void* seqStart, size_t seqSize, int nbSeq,\n                   const ZSTD_longOffset_e isLongOffset)\n{\n    DEBUGLOG(5, \"ZSTD_decompressSequences\");\n#if DYNAMIC_BMI2\n    if (dctx->bmi2) {\n        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n    }\n#endif\n  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */\n\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT\n/* ZSTD_decompressSequencesLong() :\n * decompression function triggered when a minimum share of offsets is considered \"long\",\n * aka out of cache.\n * note : \"long\" definition seems overloaded here, sometimes meaning \"wider than bitstream register\", and sometimes meaning \"farther than memory cache distance\".\n * This function will try to mitigate main memory latency through the use of prefetching */\nstatic size_t\nZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,\n                             void* dst, size_t maxDstSize,\n                             const void* seqStart, size_t seqSize, int nbSeq,\n                             const ZSTD_longOffset_e isLongOffset)\n{\n    DEBUGLOG(5, \"ZSTD_decompressSequencesLong\");\n#if DYNAMIC_BMI2\n    if (dctx->bmi2) {\n        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n    }\n#endif\n  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);\n}\n#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */\n\n\n\n#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \\\n    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)\n/* ZSTD_getLongOffsetsShare() :\n * condition : offTable must be valid\n * @return : \"share\" of long offsets (arbitrarily defined as > (1<<23))\n *           compared to maximum possible of (1<<OffFSELog) */\nstatic unsigned\nZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)\n{\n    const void* ptr = offTable;\n    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;\n    const ZSTD_seqSymbol* table = offTable + 1;\n    U32 const max = 1 << tableLog;\n    U32 u, total = 0;\n    DEBUGLOG(5, \"ZSTD_getLongOffsetsShare: (tableLog=%u)\", tableLog);\n\n    assert(max <= (1 << OffFSELog));  /* max not too large */\n    for (u=0; u<max; u++) {\n        if (table[u].nbAdditionalBits > 22) total += 1;\n    }\n\n    assert(tableLog <= OffFSELog);\n    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */\n\n    return total;\n}\n#endif\n\n\nsize_t\nZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,\n                              void* dst, size_t dstCapacity,\n                        const void* src, size_t srcSize, const int frame)\n{   /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n    /* isLongOffset must be true if there are long offsets.\n     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.\n     * We don't expect that to be the case in 64-bit mode.\n     * In block mode, window size is not known, so we have to be conservative.\n     * (note: but it could be evaluated from current-lowLimit)\n     */\n    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));\n    DEBUGLOG(5, \"ZSTD_decompressBlock_internal (size : %u)\", (U32)srcSize);\n\n    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);\n\n    /* Decode literals section */\n    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);\n        DEBUGLOG(5, \"ZSTD_decodeLiteralsBlock : %u\", (U32)litCSize);\n        if (ZSTD_isError(litCSize)) return litCSize;\n        ip += litCSize;\n        srcSize -= litCSize;\n    }\n\n    /* Build Decoding Tables */\n    {\n        /* These macros control at build-time which decompressor implementation\n         * we use. If neither is defined, we do some inspection and dispatch at\n         * runtime.\n         */\n#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \\\n    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)\n        int usePrefetchDecoder = dctx->ddictIsCold;\n#endif\n        int nbSeq;\n        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);\n        if (ZSTD_isError(seqHSize)) return seqHSize;\n        ip += seqHSize;\n        srcSize -= seqHSize;\n\n#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \\\n    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)\n        if ( !usePrefetchDecoder\n          && (!frame || (dctx->fParams.windowSize > (1<<24)))\n          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */\n            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);\n            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */\n            usePrefetchDecoder = (shareLongOffsets >= minShare);\n        }\n#endif\n\n        dctx->ddictIsCold = 0;\n\n#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \\\n    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)\n        if (usePrefetchDecoder)\n#endif\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT\n            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);\n#endif\n\n#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG\n        /* else */\n        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);\n#endif\n    }\n}\n\n\nvoid ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)\n{\n    if (dst != dctx->previousDstEnd) {   /* not contiguous */\n        dctx->dictEnd = dctx->previousDstEnd;\n        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));\n        dctx->prefixStart = dst;\n        dctx->previousDstEnd = dst;\n    }\n}\n\n\nsize_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{\n    size_t dSize;\n    ZSTD_checkContinuity(dctx, dst);\n    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);\n    dctx->previousDstEnd = (char*)dst + dSize;\n    return dSize;\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_decompress_block.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n#ifndef ZSTD_DEC_BLOCK_H\n#define ZSTD_DEC_BLOCK_H\n\n/*-*******************************************************\n *  Dependencies\n *********************************************************/\n#include <stddef.h>   /* size_t */\n#include \"zstd.h\"    /* DCtx, and some public functions */\n#include \"zstd_internal.h\"  /* blockProperties_t, and some public functions */\n#include \"zstd_decompress_internal.h\"  /* ZSTD_seqSymbol */\n\n\n/* ===   Prototypes   === */\n\n/* note: prototypes already published within `zstd.h` :\n * ZSTD_decompressBlock()\n */\n\n/* note: prototypes already published within `zstd_internal.h` :\n * ZSTD_getcBlockSize()\n * ZSTD_decodeSeqHeaders()\n */\n\n\n/* ZSTD_decompressBlock_internal() :\n * decompress block, starting at `src`,\n * into destination buffer `dst`.\n * @return : decompressed block size,\n *           or an error code (which can be tested using ZSTD_isError())\n */\nsize_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,\n                               void* dst, size_t dstCapacity,\n                         const void* src, size_t srcSize, const int frame);\n\n/* ZSTD_buildFSETable() :\n * generate FSE decoding table for one symbol (ll, ml or off)\n * this function must be called with valid parameters only\n * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)\n * in which case it cannot fail.\n * Internal use only.\n */\nvoid ZSTD_buildFSETable(ZSTD_seqSymbol* dt,\n             const short* normalizedCounter, unsigned maxSymbolValue,\n             const U32* baseValue, const U32* nbAdditionalBits,\n                   unsigned tableLog);\n\n\n#endif /* ZSTD_DEC_BLOCK_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/decompress/zstd_decompress_internal.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/* zstd_decompress_internal:\n * objects and definitions shared within lib/decompress modules */\n\n #ifndef ZSTD_DECOMPRESS_INTERNAL_H\n #define ZSTD_DECOMPRESS_INTERNAL_H\n\n\n/*-*******************************************************\n *  Dependencies\n *********************************************************/\n#include \"mem.h\"             /* BYTE, U16, U32 */\n#include \"zstd_internal.h\"   /* ZSTD_seqSymbol */\n\n\n\n/*-*******************************************************\n *  Constants\n *********************************************************/\nstatic const U32 LL_base[MaxLL+1] = {\n                 0,    1,    2,     3,     4,     5,     6,      7,\n                 8,    9,   10,    11,    12,    13,    14,     15,\n                16,   18,   20,    22,    24,    28,    32,     40,\n                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,\n                0x2000, 0x4000, 0x8000, 0x10000 };\n\nstatic const U32 OF_base[MaxOff+1] = {\n                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,\n                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,\n                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,\n                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };\n\nstatic const U32 OF_bits[MaxOff+1] = {\n                     0,  1,  2,  3,  4,  5,  6,  7,\n                     8,  9, 10, 11, 12, 13, 14, 15,\n                    16, 17, 18, 19, 20, 21, 22, 23,\n                    24, 25, 26, 27, 28, 29, 30, 31 };\n\nstatic const U32 ML_base[MaxML+1] = {\n                     3,  4,  5,    6,     7,     8,     9,    10,\n                    11, 12, 13,   14,    15,    16,    17,    18,\n                    19, 20, 21,   22,    23,    24,    25,    26,\n                    27, 28, 29,   30,    31,    32,    33,    34,\n                    35, 37, 39,   41,    43,    47,    51,    59,\n                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,\n                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };\n\n\n/*-*******************************************************\n *  Decompression types\n *********************************************************/\n typedef struct {\n     U32 fastMode;\n     U32 tableLog;\n } ZSTD_seqSymbol_header;\n\n typedef struct {\n     U16  nextState;\n     BYTE nbAdditionalBits;\n     BYTE nbBits;\n     U32  baseValue;\n } ZSTD_seqSymbol;\n\n #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))\n\ntypedef struct {\n    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */\n    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */\n    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */\n    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */\n    U32 rep[ZSTD_REP_NUM];\n} ZSTD_entropyDTables_t;\n\ntypedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,\n               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,\n               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,\n               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;\n\ntypedef enum { zdss_init=0, zdss_loadHeader,\n               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;\n\ntypedef enum {\n    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */\n    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */\n    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */\n} ZSTD_dictUses_e;\n\nstruct ZSTD_DCtx_s\n{\n    const ZSTD_seqSymbol* LLTptr;\n    const ZSTD_seqSymbol* MLTptr;\n    const ZSTD_seqSymbol* OFTptr;\n    const HUF_DTable* HUFptr;\n    ZSTD_entropyDTables_t entropy;\n    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */\n    const void* previousDstEnd;   /* detect continuity */\n    const void* prefixStart;      /* start of current segment */\n    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */\n    const void* dictEnd;          /* end of previous segment */\n    size_t expected;\n    ZSTD_frameHeader fParams;\n    U64 decodedSize;\n    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */\n    ZSTD_dStage stage;\n    U32 litEntropy;\n    U32 fseEntropy;\n    XXH64_state_t xxhState;\n    size_t headerSize;\n    ZSTD_format_e format;\n    const BYTE* litPtr;\n    ZSTD_customMem customMem;\n    size_t litSize;\n    size_t rleSize;\n    size_t staticSize;\n    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */\n\n    /* dictionary */\n    ZSTD_DDict* ddictLocal;\n    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */\n    U32 dictID;\n    int ddictIsCold;             /* if == 1 : dictionary is \"new\" for working context, and presumed \"cold\" (not in cpu cache) */\n    ZSTD_dictUses_e dictUses;\n\n    /* streaming */\n    ZSTD_dStreamStage streamStage;\n    char*  inBuff;\n    size_t inBuffSize;\n    size_t inPos;\n    size_t maxWindowSize;\n    char*  outBuff;\n    size_t outBuffSize;\n    size_t outStart;\n    size_t outEnd;\n    size_t lhSize;\n    void* legacyContext;\n    U32 previousLegacyVersion;\n    U32 legacyVersion;\n    U32 hostageByte;\n    int noForwardProgress;\n\n    /* workspace */\n    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];\n    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];\n};  /* typedef'd to ZSTD_DCtx within \"zstd.h\" */\n\n\n/*-*******************************************************\n *  Shared internal functions\n *********************************************************/\n\n/*! ZSTD_loadDEntropy() :\n *  dict : must point at beginning of a valid zstd dictionary.\n * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */\nsize_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,\n                   const void* const dict, size_t const dictSize);\n\n/*! ZSTD_checkContinuity() :\n *  check if next `dst` follows previous position, where decompression ended.\n *  If yes, do nothing (continue on current segment).\n *  If not, classify previous segment as \"external dictionary\", and start a new segment.\n *  This function cannot fail. */\nvoid ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst);\n\n\n#endif /* ZSTD_DECOMPRESS_INTERNAL_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/deprecated/zbuff.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* ***************************************************************\n*  NOTES/WARNINGS\n******************************************************************/\n/* The streaming API defined here is deprecated.\n * Consider migrating towards ZSTD_compressStream() API in `zstd.h`\n * See 'lib/README.md'.\n *****************************************************************/\n\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#ifndef ZSTD_BUFFERED_H_23987\n#define ZSTD_BUFFERED_H_23987\n\n/* *************************************\n*  Dependencies\n***************************************/\n#include <stddef.h>      /* size_t */\n#include \"zstd.h\"        /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */\n\n\n/* ***************************************************************\n*  Compiler specifics\n*****************************************************************/\n/* Deprecation warnings */\n/* Should these warnings be a problem,\n * it is generally possible to disable them,\n * typically with -Wno-deprecated-declarations for gcc\n * or _CRT_SECURE_NO_WARNINGS in Visual.\n * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS\n */\n#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS\n#  define ZBUFF_DEPRECATED(message) ZSTDLIB_API  /* disable deprecation warnings */\n#else\n#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */\n#    define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API\n#  elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)\n#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))\n#  elif defined(__GNUC__) && (__GNUC__ >= 3)\n#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))\n#  elif defined(_MSC_VER)\n#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))\n#  else\n#    pragma message(\"WARNING: You need to implement ZBUFF_DEPRECATED for this compiler\")\n#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API\n#  endif\n#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */\n\n\n/* *************************************\n*  Streaming functions\n***************************************/\n/* This is the easier \"buffered\" streaming API,\n*  using an internal buffer to lift all restrictions on user-provided buffers\n*  which can be any size, any place, for both input and output.\n*  ZBUFF and ZSTD are 100% interoperable,\n*  frames created by one can be decoded by the other one */\n\ntypedef ZSTD_CStream ZBUFF_CCtx;\nZBUFF_DEPRECATED(\"use ZSTD_createCStream\") ZBUFF_CCtx* ZBUFF_createCCtx(void);\nZBUFF_DEPRECATED(\"use ZSTD_freeCStream\")   size_t      ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);\n\nZBUFF_DEPRECATED(\"use ZSTD_initCStream\")           size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);\nZBUFF_DEPRECATED(\"use ZSTD_initCStream_usingDict\") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);\n\nZBUFF_DEPRECATED(\"use ZSTD_compressStream\") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);\nZBUFF_DEPRECATED(\"use ZSTD_flushStream\")    size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);\nZBUFF_DEPRECATED(\"use ZSTD_endStream\")      size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);\n\n/*-*************************************************\n*  Streaming compression - howto\n*\n*  A ZBUFF_CCtx object is required to track streaming operation.\n*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.\n*  ZBUFF_CCtx objects can be reused multiple times.\n*\n*  Start by initializing ZBUF_CCtx.\n*  Use ZBUFF_compressInit() to start a new compression operation.\n*  Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.\n*\n*  Use ZBUFF_compressContinue() repetitively to consume input stream.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.\n*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().\n*  The nb of bytes written into `dst` will be reported into *dstCapacityPtr.\n*  Note that the function cannot output more than *dstCapacityPtr,\n*  therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.\n*  @return : nb of bytes still present into internal buffer (0 if it's empty)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  ZBUFF_compressEnd() instructs to finish a frame.\n*  It will perform a flush and write frame epilogue.\n*  The epilogue is required for decoders to consider a frame completed.\n*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.\n*  In which case, call again ZBUFF_compressFlush() to complete the flush.\n*  @return : nb of bytes still present into internal buffer (0 if it's empty)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()\n*  input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)\n*  output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.\n*  By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.\n* **************************************************/\n\n\ntypedef ZSTD_DStream ZBUFF_DCtx;\nZBUFF_DEPRECATED(\"use ZSTD_createDStream\") ZBUFF_DCtx* ZBUFF_createDCtx(void);\nZBUFF_DEPRECATED(\"use ZSTD_freeDStream\")   size_t      ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);\n\nZBUFF_DEPRECATED(\"use ZSTD_initDStream\")           size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);\nZBUFF_DEPRECATED(\"use ZSTD_initDStream_usingDict\") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);\n\nZBUFF_DEPRECATED(\"use ZSTD_decompressStream\") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,\n                                            void* dst, size_t* dstCapacityPtr,\n                                      const void* src, size_t* srcSizePtr);\n\n/*-***************************************************************************\n*  Streaming decompression howto\n*\n*  A ZBUFF_DCtx object is required to track streaming operations.\n*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.\n*  Use ZBUFF_decompressInit() to start a new decompression operation,\n*   or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFF_DCtx objects can be re-init multiple times.\n*\n*  Use ZBUFF_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.\n*  @return : 0 when a frame is completely decoded and fully flushed,\n*            1 when there is still some data left within internal buffer to flush,\n*            >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()\n*  output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFF_recommendedDInSize == 128KB + 3;\n*           just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nZBUFF_DEPRECATED(\"use ZSTD_isError\")      unsigned ZBUFF_isError(size_t errorCode);\nZBUFF_DEPRECATED(\"use ZSTD_getErrorName\") const char* ZBUFF_getErrorName(size_t errorCode);\n\n/** Functions below provide recommended buffer sizes for Compression or Decompression operations.\n*   These sizes are just hints, they tend to offer better latency */\nZBUFF_DEPRECATED(\"use ZSTD_CStreamInSize\")  size_t ZBUFF_recommendedCInSize(void);\nZBUFF_DEPRECATED(\"use ZSTD_CStreamOutSize\") size_t ZBUFF_recommendedCOutSize(void);\nZBUFF_DEPRECATED(\"use ZSTD_DStreamInSize\")  size_t ZBUFF_recommendedDInSize(void);\nZBUFF_DEPRECATED(\"use ZSTD_DStreamOutSize\") size_t ZBUFF_recommendedDOutSize(void);\n\n#endif  /* ZSTD_BUFFERED_H_23987 */\n\n\n#ifdef ZBUFF_STATIC_LINKING_ONLY\n#ifndef ZBUFF_STATIC_H_30298098432\n#define ZBUFF_STATIC_H_30298098432\n\n/* ====================================================================================\n * The definitions in this section are considered experimental.\n * They should never be used in association with a dynamic library, as they may change in the future.\n * They are provided for advanced usages.\n * Use them only in association with static linking.\n * ==================================================================================== */\n\n/*--- Dependency ---*/\n#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters, ZSTD_customMem */\n#include \"zstd.h\"\n\n\n/*--- Custom memory allocator ---*/\n/*! ZBUFF_createCCtx_advanced() :\n *  Create a ZBUFF compression context using external alloc and free functions */\nZBUFF_DEPRECATED(\"use ZSTD_createCStream_advanced\") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);\n\n/*! ZBUFF_createDCtx_advanced() :\n *  Create a ZBUFF decompression context using external alloc and free functions */\nZBUFF_DEPRECATED(\"use ZSTD_createDStream_advanced\") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);\n\n\n/*--- Advanced Streaming Initialization ---*/\nZBUFF_DEPRECATED(\"use ZSTD_initDStream_usingDict\") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,\n                                               const void* dict, size_t dictSize,\n                                               ZSTD_parameters params, unsigned long long pledgedSrcSize);\n\n\n#endif    /* ZBUFF_STATIC_H_30298098432 */\n#endif    /* ZBUFF_STATIC_LINKING_ONLY */\n\n\n#if defined (__cplusplus)\n}\n#endif\n"
  },
  {
    "path": "src/third_party/zstd/lib/deprecated/zbuff_common.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include \"error_private.h\"\n#include \"zbuff.h\"\n\n/*-****************************************\n*  ZBUFF Error Management  (deprecated)\n******************************************/\n\n/*! ZBUFF_isError() :\n*   tells if a return value is an error code */\nunsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }\n/*! ZBUFF_getErrorName() :\n*   provides error code string from function result (useful for debugging) */\nconst char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n"
  },
  {
    "path": "src/third_party/zstd/lib/deprecated/zbuff_compress.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n\n/* *************************************\n*  Dependencies\n***************************************/\n#define ZBUFF_STATIC_LINKING_ONLY\n#include \"zbuff.h\"\n\n\n/*-***********************************************************\n*  Streaming compression\n*\n*  A ZBUFF_CCtx object is required to track streaming operation.\n*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.\n*  Use ZBUFF_compressInit() to start a new compression operation.\n*  ZBUFF_CCtx objects can be reused multiple times.\n*\n*  Use ZBUFF_compressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.\n*  The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.\n*  Note that it will not output more than *dstCapacityPtr.\n*  Therefore, some content might still be left into its internal buffer if dst buffer is too small.\n*  @return : nb of bytes still present into internal buffer (0 if it's empty)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  ZBUFF_compressEnd() instructs to finish a frame.\n*  It will perform a flush and write frame epilogue.\n*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.\n*  @return : nb of bytes still present into internal buffer (0 if it's empty)\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory)\n*  input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.\n*  output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.\n* ***********************************************************/\n\nZBUFF_CCtx* ZBUFF_createCCtx(void)\n{\n    return ZSTD_createCStream();\n}\n\nZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)\n{\n    return ZSTD_createCStream_advanced(customMem);\n}\n\nsize_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)\n{\n    return ZSTD_freeCStream(zbc);\n}\n\n\n/* ======   Initialization   ====== */\n\nsize_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,\n                                   const void* dict, size_t dictSize,\n                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)\n{\n    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* preserve \"0 == unknown\" behavior */\n    return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);\n}\n\n\nsize_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)\n{\n    return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);\n}\n\nsize_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)\n{\n    return ZSTD_initCStream(zbc, compressionLevel);\n}\n\n/* ======   Compression   ====== */\n\n\nsize_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,\n                              void* dst, size_t* dstCapacityPtr,\n                        const void* src, size_t* srcSizePtr)\n{\n    size_t result;\n    ZSTD_outBuffer outBuff;\n    ZSTD_inBuffer inBuff;\n    outBuff.dst = dst;\n    outBuff.pos = 0;\n    outBuff.size = *dstCapacityPtr;\n    inBuff.src = src;\n    inBuff.pos = 0;\n    inBuff.size = *srcSizePtr;\n    result = ZSTD_compressStream(zbc, &outBuff, &inBuff);\n    *dstCapacityPtr = outBuff.pos;\n    *srcSizePtr = inBuff.pos;\n    return result;\n}\n\n\n\n/* ======   Finalize   ====== */\n\nsize_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)\n{\n    size_t result;\n    ZSTD_outBuffer outBuff;\n    outBuff.dst = dst;\n    outBuff.pos = 0;\n    outBuff.size = *dstCapacityPtr;\n    result = ZSTD_flushStream(zbc, &outBuff);\n    *dstCapacityPtr = outBuff.pos;\n    return result;\n}\n\n\nsize_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)\n{\n    size_t result;\n    ZSTD_outBuffer outBuff;\n    outBuff.dst = dst;\n    outBuff.pos = 0;\n    outBuff.size = *dstCapacityPtr;\n    result = ZSTD_endStream(zbc, &outBuff);\n    *dstCapacityPtr = outBuff.pos;\n    return result;\n}\n\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nsize_t ZBUFF_recommendedCInSize(void)  { return ZSTD_CStreamInSize(); }\nsize_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }\n"
  },
  {
    "path": "src/third_party/zstd/lib/deprecated/zbuff_decompress.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n\n/* *************************************\n*  Dependencies\n***************************************/\n#define ZBUFF_STATIC_LINKING_ONLY\n#include \"zbuff.h\"\n\n\nZBUFF_DCtx* ZBUFF_createDCtx(void)\n{\n    return ZSTD_createDStream();\n}\n\nZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)\n{\n    return ZSTD_createDStream_advanced(customMem);\n}\n\nsize_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)\n{\n    return ZSTD_freeDStream(zbd);\n}\n\n\n/* *** Initialization *** */\n\nsize_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)\n{\n    return ZSTD_initDStream_usingDict(zbd, dict, dictSize);\n}\n\nsize_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)\n{\n    return ZSTD_initDStream(zbd);\n}\n\n\n/* *** Decompression *** */\n\nsize_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,\n                                void* dst, size_t* dstCapacityPtr,\n                          const void* src, size_t* srcSizePtr)\n{\n    ZSTD_outBuffer outBuff;\n    ZSTD_inBuffer inBuff;\n    size_t result;\n    outBuff.dst  = dst;\n    outBuff.pos  = 0;\n    outBuff.size = *dstCapacityPtr;\n    inBuff.src  = src;\n    inBuff.pos  = 0;\n    inBuff.size = *srcSizePtr;\n    result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);\n    *dstCapacityPtr = outBuff.pos;\n    *srcSizePtr = inBuff.pos;\n    return result;\n}\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nsize_t ZBUFF_recommendedDInSize(void)  { return ZSTD_DStreamInSize(); }\nsize_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/cover.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n/* *****************************************************************************\n * Constructs a dictionary using a heuristic based on the following paper:\n *\n * Liao, Petri, Moffat, Wirth\n * Effective Construction of Relative Lempel-Ziv Dictionaries\n * Published in WWW 2016.\n *\n * Adapted from code originally written by @ot (Giuseppe Ottaviano).\n ******************************************************************************/\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include <stdio.h>  /* fprintf */\n#include <stdlib.h> /* malloc, free, qsort */\n#include <string.h> /* memset */\n#include <time.h>   /* clock */\n\n#include \"mem.h\" /* read */\n#include \"pool.h\"\n#include \"threading.h\"\n#include \"cover.h\"\n#include \"zstd_internal.h\" /* includes zstd.h */\n#ifndef ZDICT_STATIC_LINKING_ONLY\n#define ZDICT_STATIC_LINKING_ONLY\n#endif\n#include \"zdict.h\"\n\n/*-*************************************\n*  Constants\n***************************************/\n#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))\n#define DEFAULT_SPLITPOINT 1.0\n\n/*-*************************************\n*  Console display\n***************************************/\nstatic int g_displayLevel = 2;\n#define DISPLAY(...)                                                           \\\n  {                                                                            \\\n    fprintf(stderr, __VA_ARGS__);                                              \\\n    fflush(stderr);                                                            \\\n  }\n#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \\\n  if (displayLevel >= l) {                                                     \\\n    DISPLAY(__VA_ARGS__);                                                      \\\n  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */\n#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)\n\n#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \\\n  if (displayLevel >= l) {                                                     \\\n    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \\\n      g_time = clock();                                                        \\\n      DISPLAY(__VA_ARGS__);                                                    \\\n    }                                                                          \\\n  }\n#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)\nstatic const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;\nstatic clock_t g_time = 0;\n\n/*-*************************************\n* Hash table\n***************************************\n* A small specialized hash map for storing activeDmers.\n* The map does not resize, so if it becomes full it will loop forever.\n* Thus, the map must be large enough to store every value.\n* The map implements linear probing and keeps its load less than 0.5.\n*/\n\n#define MAP_EMPTY_VALUE ((U32)-1)\ntypedef struct COVER_map_pair_t_s {\n  U32 key;\n  U32 value;\n} COVER_map_pair_t;\n\ntypedef struct COVER_map_s {\n  COVER_map_pair_t *data;\n  U32 sizeLog;\n  U32 size;\n  U32 sizeMask;\n} COVER_map_t;\n\n/**\n * Clear the map.\n */\nstatic void COVER_map_clear(COVER_map_t *map) {\n  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));\n}\n\n/**\n * Initializes a map of the given size.\n * Returns 1 on success and 0 on failure.\n * The map must be destroyed with COVER_map_destroy().\n * The map is only guaranteed to be large enough to hold size elements.\n */\nstatic int COVER_map_init(COVER_map_t *map, U32 size) {\n  map->sizeLog = ZSTD_highbit32(size) + 2;\n  map->size = (U32)1 << map->sizeLog;\n  map->sizeMask = map->size - 1;\n  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));\n  if (!map->data) {\n    map->sizeLog = 0;\n    map->size = 0;\n    return 0;\n  }\n  COVER_map_clear(map);\n  return 1;\n}\n\n/**\n * Internal hash function\n */\nstatic const U32 prime4bytes = 2654435761U;\nstatic U32 COVER_map_hash(COVER_map_t *map, U32 key) {\n  return (key * prime4bytes) >> (32 - map->sizeLog);\n}\n\n/**\n * Helper function that returns the index that a key should be placed into.\n */\nstatic U32 COVER_map_index(COVER_map_t *map, U32 key) {\n  const U32 hash = COVER_map_hash(map, key);\n  U32 i;\n  for (i = hash;; i = (i + 1) & map->sizeMask) {\n    COVER_map_pair_t *pos = &map->data[i];\n    if (pos->value == MAP_EMPTY_VALUE) {\n      return i;\n    }\n    if (pos->key == key) {\n      return i;\n    }\n  }\n}\n\n/**\n * Returns the pointer to the value for key.\n * If key is not in the map, it is inserted and the value is set to 0.\n * The map must not be full.\n */\nstatic U32 *COVER_map_at(COVER_map_t *map, U32 key) {\n  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];\n  if (pos->value == MAP_EMPTY_VALUE) {\n    pos->key = key;\n    pos->value = 0;\n  }\n  return &pos->value;\n}\n\n/**\n * Deletes key from the map if present.\n */\nstatic void COVER_map_remove(COVER_map_t *map, U32 key) {\n  U32 i = COVER_map_index(map, key);\n  COVER_map_pair_t *del = &map->data[i];\n  U32 shift = 1;\n  if (del->value == MAP_EMPTY_VALUE) {\n    return;\n  }\n  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {\n    COVER_map_pair_t *const pos = &map->data[i];\n    /* If the position is empty we are done */\n    if (pos->value == MAP_EMPTY_VALUE) {\n      del->value = MAP_EMPTY_VALUE;\n      return;\n    }\n    /* If pos can be moved to del do so */\n    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {\n      del->key = pos->key;\n      del->value = pos->value;\n      del = pos;\n      shift = 1;\n    } else {\n      ++shift;\n    }\n  }\n}\n\n/**\n * Destroys a map that is inited with COVER_map_init().\n */\nstatic void COVER_map_destroy(COVER_map_t *map) {\n  if (map->data) {\n    free(map->data);\n  }\n  map->data = NULL;\n  map->size = 0;\n}\n\n/*-*************************************\n* Context\n***************************************/\n\ntypedef struct {\n  const BYTE *samples;\n  size_t *offsets;\n  const size_t *samplesSizes;\n  size_t nbSamples;\n  size_t nbTrainSamples;\n  size_t nbTestSamples;\n  U32 *suffix;\n  size_t suffixSize;\n  U32 *freqs;\n  U32 *dmerAt;\n  unsigned d;\n} COVER_ctx_t;\n\n/* We need a global context for qsort... */\nstatic COVER_ctx_t *g_ctx = NULL;\n\n/*-*************************************\n*  Helper functions\n***************************************/\n\n/**\n * Returns the sum of the sample sizes.\n */\nsize_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {\n  size_t sum = 0;\n  unsigned i;\n  for (i = 0; i < nbSamples; ++i) {\n    sum += samplesSizes[i];\n  }\n  return sum;\n}\n\n/**\n * Returns -1 if the dmer at lp is less than the dmer at rp.\n * Return 0 if the dmers at lp and rp are equal.\n * Returns 1 if the dmer at lp is greater than the dmer at rp.\n */\nstatic int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {\n  U32 const lhs = *(U32 const *)lp;\n  U32 const rhs = *(U32 const *)rp;\n  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);\n}\n/**\n * Faster version for d <= 8.\n */\nstatic int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {\n  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);\n  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;\n  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;\n  if (lhs < rhs) {\n    return -1;\n  }\n  return (lhs > rhs);\n}\n\n/**\n * Same as COVER_cmp() except ties are broken by pointer value\n * NOTE: g_ctx must be set to call this function.  A global is required because\n * qsort doesn't take an opaque pointer.\n */\nstatic int COVER_strict_cmp(const void *lp, const void *rp) {\n  int result = COVER_cmp(g_ctx, lp, rp);\n  if (result == 0) {\n    result = lp < rp ? -1 : 1;\n  }\n  return result;\n}\n/**\n * Faster version for d <= 8.\n */\nstatic int COVER_strict_cmp8(const void *lp, const void *rp) {\n  int result = COVER_cmp8(g_ctx, lp, rp);\n  if (result == 0) {\n    result = lp < rp ? -1 : 1;\n  }\n  return result;\n}\n\n/**\n * Returns the first pointer in [first, last) whose element does not compare\n * less than value.  If no such element exists it returns last.\n */\nstatic const size_t *COVER_lower_bound(const size_t *first, const size_t *last,\n                                       size_t value) {\n  size_t count = last - first;\n  while (count != 0) {\n    size_t step = count / 2;\n    const size_t *ptr = first;\n    ptr += step;\n    if (*ptr < value) {\n      first = ++ptr;\n      count -= step + 1;\n    } else {\n      count = step;\n    }\n  }\n  return first;\n}\n\n/**\n * Generic groupBy function.\n * Groups an array sorted by cmp into groups with equivalent values.\n * Calls grp for each group.\n */\nstatic void\nCOVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,\n              int (*cmp)(COVER_ctx_t *, const void *, const void *),\n              void (*grp)(COVER_ctx_t *, const void *, const void *)) {\n  const BYTE *ptr = (const BYTE *)data;\n  size_t num = 0;\n  while (num < count) {\n    const BYTE *grpEnd = ptr + size;\n    ++num;\n    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {\n      grpEnd += size;\n      ++num;\n    }\n    grp(ctx, ptr, grpEnd);\n    ptr = grpEnd;\n  }\n}\n\n/*-*************************************\n*  Cover functions\n***************************************/\n\n/**\n * Called on each group of positions with the same dmer.\n * Counts the frequency of each dmer and saves it in the suffix array.\n * Fills `ctx->dmerAt`.\n */\nstatic void COVER_group(COVER_ctx_t *ctx, const void *group,\n                        const void *groupEnd) {\n  /* The group consists of all the positions with the same first d bytes. */\n  const U32 *grpPtr = (const U32 *)group;\n  const U32 *grpEnd = (const U32 *)groupEnd;\n  /* The dmerId is how we will reference this dmer.\n   * This allows us to map the whole dmer space to a much smaller space, the\n   * size of the suffix array.\n   */\n  const U32 dmerId = (U32)(grpPtr - ctx->suffix);\n  /* Count the number of samples this dmer shows up in */\n  U32 freq = 0;\n  /* Details */\n  const size_t *curOffsetPtr = ctx->offsets;\n  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;\n  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a\n   * different sample than the last.\n   */\n  size_t curSampleEnd = ctx->offsets[0];\n  for (; grpPtr != grpEnd; ++grpPtr) {\n    /* Save the dmerId for this position so we can get back to it. */\n    ctx->dmerAt[*grpPtr] = dmerId;\n    /* Dictionaries only help for the first reference to the dmer.\n     * After that zstd can reference the match from the previous reference.\n     * So only count each dmer once for each sample it is in.\n     */\n    if (*grpPtr < curSampleEnd) {\n      continue;\n    }\n    freq += 1;\n    /* Binary search to find the end of the sample *grpPtr is in.\n     * In the common case that grpPtr + 1 == grpEnd we can skip the binary\n     * search because the loop is over.\n     */\n    if (grpPtr + 1 != grpEnd) {\n      const size_t *sampleEndPtr =\n          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);\n      curSampleEnd = *sampleEndPtr;\n      curOffsetPtr = sampleEndPtr + 1;\n    }\n  }\n  /* At this point we are never going to look at this segment of the suffix\n   * array again.  We take advantage of this fact to save memory.\n   * We store the frequency of the dmer in the first position of the group,\n   * which is dmerId.\n   */\n  ctx->suffix[dmerId] = freq;\n}\n\n\n/**\n * Selects the best segment in an epoch.\n * Segments of are scored according to the function:\n *\n * Let F(d) be the frequency of dmer d.\n * Let S_i be the dmer at position i of segment S which has length k.\n *\n *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})\n *\n * Once the dmer d is in the dictionary we set F(d) = 0.\n */\nstatic COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,\n                                           COVER_map_t *activeDmers, U32 begin,\n                                           U32 end,\n                                           ZDICT_cover_params_t parameters) {\n  /* Constants */\n  const U32 k = parameters.k;\n  const U32 d = parameters.d;\n  const U32 dmersInK = k - d + 1;\n  /* Try each segment (activeSegment) and save the best (bestSegment) */\n  COVER_segment_t bestSegment = {0, 0, 0};\n  COVER_segment_t activeSegment;\n  /* Reset the activeDmers in the segment */\n  COVER_map_clear(activeDmers);\n  /* The activeSegment starts at the beginning of the epoch. */\n  activeSegment.begin = begin;\n  activeSegment.end = begin;\n  activeSegment.score = 0;\n  /* Slide the activeSegment through the whole epoch.\n   * Save the best segment in bestSegment.\n   */\n  while (activeSegment.end < end) {\n    /* The dmerId for the dmer at the next position */\n    U32 newDmer = ctx->dmerAt[activeSegment.end];\n    /* The entry in activeDmers for this dmerId */\n    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);\n    /* If the dmer isn't already present in the segment add its score. */\n    if (*newDmerOcc == 0) {\n      /* The paper suggest using the L-0.5 norm, but experiments show that it\n       * doesn't help.\n       */\n      activeSegment.score += freqs[newDmer];\n    }\n    /* Add the dmer to the segment */\n    activeSegment.end += 1;\n    *newDmerOcc += 1;\n\n    /* If the window is now too large, drop the first position */\n    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {\n      U32 delDmer = ctx->dmerAt[activeSegment.begin];\n      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);\n      activeSegment.begin += 1;\n      *delDmerOcc -= 1;\n      /* If this is the last occurrence of the dmer, subtract its score */\n      if (*delDmerOcc == 0) {\n        COVER_map_remove(activeDmers, delDmer);\n        activeSegment.score -= freqs[delDmer];\n      }\n    }\n\n    /* If this segment is the best so far save it */\n    if (activeSegment.score > bestSegment.score) {\n      bestSegment = activeSegment;\n    }\n  }\n  {\n    /* Trim off the zero frequency head and tail from the segment. */\n    U32 newBegin = bestSegment.end;\n    U32 newEnd = bestSegment.begin;\n    U32 pos;\n    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {\n      U32 freq = freqs[ctx->dmerAt[pos]];\n      if (freq != 0) {\n        newBegin = MIN(newBegin, pos);\n        newEnd = pos + 1;\n      }\n    }\n    bestSegment.begin = newBegin;\n    bestSegment.end = newEnd;\n  }\n  {\n    /* Zero out the frequency of each dmer covered by the chosen segment. */\n    U32 pos;\n    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {\n      freqs[ctx->dmerAt[pos]] = 0;\n    }\n  }\n  return bestSegment;\n}\n\n/**\n * Check the validity of the parameters.\n * Returns non-zero if the parameters are valid and 0 otherwise.\n */\nstatic int COVER_checkParameters(ZDICT_cover_params_t parameters,\n                                 size_t maxDictSize) {\n  /* k and d are required parameters */\n  if (parameters.d == 0 || parameters.k == 0) {\n    return 0;\n  }\n  /* k <= maxDictSize */\n  if (parameters.k > maxDictSize) {\n    return 0;\n  }\n  /* d <= k */\n  if (parameters.d > parameters.k) {\n    return 0;\n  }\n  /* 0 < splitPoint <= 1 */\n  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){\n    return 0;\n  }\n  return 1;\n}\n\n/**\n * Clean up a context initialized with `COVER_ctx_init()`.\n */\nstatic void COVER_ctx_destroy(COVER_ctx_t *ctx) {\n  if (!ctx) {\n    return;\n  }\n  if (ctx->suffix) {\n    free(ctx->suffix);\n    ctx->suffix = NULL;\n  }\n  if (ctx->freqs) {\n    free(ctx->freqs);\n    ctx->freqs = NULL;\n  }\n  if (ctx->dmerAt) {\n    free(ctx->dmerAt);\n    ctx->dmerAt = NULL;\n  }\n  if (ctx->offsets) {\n    free(ctx->offsets);\n    ctx->offsets = NULL;\n  }\n}\n\n/**\n * Prepare a context for dictionary building.\n * The context is only dependent on the parameter `d` and can used multiple\n * times.\n * Returns 0 on success or error code on error.\n * The context must be destroyed with `COVER_ctx_destroy()`.\n */\nstatic size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,\n                          const size_t *samplesSizes, unsigned nbSamples,\n                          unsigned d, double splitPoint) {\n  const BYTE *const samples = (const BYTE *)samplesBuffer;\n  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);\n  /* Split samples into testing and training sets */\n  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;\n  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;\n  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;\n  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;\n  /* Checks */\n  if (totalSamplesSize < MAX(d, sizeof(U64)) ||\n      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {\n    DISPLAYLEVEL(1, \"Total samples size is too large (%u MB), maximum size is %u MB\\n\",\n                 (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));\n    return ERROR(srcSize_wrong);\n  }\n  /* Check if there are at least 5 training samples */\n  if (nbTrainSamples < 5) {\n    DISPLAYLEVEL(1, \"Total number of training samples is %u and is invalid.\", nbTrainSamples);\n    return ERROR(srcSize_wrong);\n  }\n  /* Check if there's testing sample */\n  if (nbTestSamples < 1) {\n    DISPLAYLEVEL(1, \"Total number of testing samples is %u and is invalid.\", nbTestSamples);\n    return ERROR(srcSize_wrong);\n  }\n  /* Zero the context */\n  memset(ctx, 0, sizeof(*ctx));\n  DISPLAYLEVEL(2, \"Training on %u samples of total size %u\\n\", nbTrainSamples,\n               (unsigned)trainingSamplesSize);\n  DISPLAYLEVEL(2, \"Testing on %u samples of total size %u\\n\", nbTestSamples,\n               (unsigned)testSamplesSize);\n  ctx->samples = samples;\n  ctx->samplesSizes = samplesSizes;\n  ctx->nbSamples = nbSamples;\n  ctx->nbTrainSamples = nbTrainSamples;\n  ctx->nbTestSamples = nbTestSamples;\n  /* Partial suffix array */\n  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;\n  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));\n  /* Maps index to the dmerID */\n  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));\n  /* The offsets of each file */\n  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));\n  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {\n    DISPLAYLEVEL(1, \"Failed to allocate scratch buffers\\n\");\n    COVER_ctx_destroy(ctx);\n    return ERROR(memory_allocation);\n  }\n  ctx->freqs = NULL;\n  ctx->d = d;\n\n  /* Fill offsets from the samplesSizes */\n  {\n    U32 i;\n    ctx->offsets[0] = 0;\n    for (i = 1; i <= nbSamples; ++i) {\n      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];\n    }\n  }\n  DISPLAYLEVEL(2, \"Constructing partial suffix array\\n\");\n  {\n    /* suffix is a partial suffix array.\n     * It only sorts suffixes by their first parameters.d bytes.\n     * The sort is stable, so each dmer group is sorted by position in input.\n     */\n    U32 i;\n    for (i = 0; i < ctx->suffixSize; ++i) {\n      ctx->suffix[i] = i;\n    }\n    /* qsort doesn't take an opaque pointer, so pass as a global.\n     * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.\n     */\n    g_ctx = ctx;\n#if defined(__OpenBSD__)\n    mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),\n          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));\n#else\n    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),\n          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));\n#endif\n  }\n  DISPLAYLEVEL(2, \"Computing frequencies\\n\");\n  /* For each dmer group (group of positions with the same first d bytes):\n   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is\n   *    (groupBeginPtr - suffix).  This allows us to go from position to\n   *    dmerID so we can look up values in freq.\n   * 2. We calculate how many samples the dmer occurs in and save it in\n   *    freqs[dmerId].\n   */\n  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,\n                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);\n  ctx->freqs = ctx->suffix;\n  ctx->suffix = NULL;\n  return 0;\n}\n\nvoid COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)\n{\n  const double ratio = (double)nbDmers / maxDictSize;\n  if (ratio >= 10) {\n      return;\n  }\n  LOCALDISPLAYLEVEL(displayLevel, 1,\n                    \"WARNING: The maximum dictionary size %u is too large \"\n                    \"compared to the source size %u! \"\n                    \"size(source)/size(dictionary) = %f, but it should be >= \"\n                    \"10! This may lead to a subpar dictionary! We recommend \"\n                    \"training on sources at least 10x, and preferably 100x \"\n                    \"the size of the dictionary! \\n\", (U32)maxDictSize,\n                    (U32)nbDmers, ratio);\n}\n\nCOVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,\n                                       U32 nbDmers, U32 k, U32 passes)\n{\n  const U32 minEpochSize = k * 10;\n  COVER_epoch_info_t epochs;\n  epochs.num = MAX(1, maxDictSize / k / passes);\n  epochs.size = nbDmers / epochs.num;\n  if (epochs.size >= minEpochSize) {\n      assert(epochs.size * epochs.num <= nbDmers);\n      return epochs;\n  }\n  epochs.size = MIN(minEpochSize, nbDmers);\n  epochs.num = nbDmers / epochs.size;\n  assert(epochs.size * epochs.num <= nbDmers);\n  return epochs;\n}\n\n/**\n * Given the prepared context build the dictionary.\n */\nstatic size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,\n                                    COVER_map_t *activeDmers, void *dictBuffer,\n                                    size_t dictBufferCapacity,\n                                    ZDICT_cover_params_t parameters) {\n  BYTE *const dict = (BYTE *)dictBuffer;\n  size_t tail = dictBufferCapacity;\n  /* Divide the data into epochs. We will select one segment from each epoch. */\n  const COVER_epoch_info_t epochs = COVER_computeEpochs(\n      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);\n  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));\n  size_t zeroScoreRun = 0;\n  size_t epoch;\n  DISPLAYLEVEL(2, \"Breaking content into %u epochs of size %u\\n\",\n                (U32)epochs.num, (U32)epochs.size);\n  /* Loop through the epochs until there are no more segments or the dictionary\n   * is full.\n   */\n  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {\n    const U32 epochBegin = (U32)(epoch * epochs.size);\n    const U32 epochEnd = epochBegin + epochs.size;\n    size_t segmentSize;\n    /* Select a segment */\n    COVER_segment_t segment = COVER_selectSegment(\n        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);\n    /* If the segment covers no dmers, then we are out of content.\n     * There may be new content in other epochs, for continue for some time.\n     */\n    if (segment.score == 0) {\n      if (++zeroScoreRun >= maxZeroScoreRun) {\n          break;\n      }\n      continue;\n    }\n    zeroScoreRun = 0;\n    /* Trim the segment if necessary and if it is too small then we are done */\n    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);\n    if (segmentSize < parameters.d) {\n      break;\n    }\n    /* We fill the dictionary from the back to allow the best segments to be\n     * referenced with the smallest offsets.\n     */\n    tail -= segmentSize;\n    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);\n    DISPLAYUPDATE(\n        2, \"\\r%u%%       \",\n        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));\n  }\n  DISPLAYLEVEL(2, \"\\r%79s\\r\", \"\");\n  return tail;\n}\n\nZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(\n    void *dictBuffer, size_t dictBufferCapacity,\n    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,\n    ZDICT_cover_params_t parameters)\n{\n  BYTE* const dict = (BYTE*)dictBuffer;\n  COVER_ctx_t ctx;\n  COVER_map_t activeDmers;\n  parameters.splitPoint = 1.0;\n  /* Initialize global data */\n  g_displayLevel = parameters.zParams.notificationLevel;\n  /* Checks */\n  if (!COVER_checkParameters(parameters, dictBufferCapacity)) {\n    DISPLAYLEVEL(1, \"Cover parameters incorrect\\n\");\n    return ERROR(parameter_outOfBound);\n  }\n  if (nbSamples == 0) {\n    DISPLAYLEVEL(1, \"Cover must have at least one input file\\n\");\n    return ERROR(srcSize_wrong);\n  }\n  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {\n    DISPLAYLEVEL(1, \"dictBufferCapacity must be at least %u\\n\",\n                 ZDICT_DICTSIZE_MIN);\n    return ERROR(dstSize_tooSmall);\n  }\n  /* Initialize context and activeDmers */\n  {\n    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,\n                      parameters.d, parameters.splitPoint);\n    if (ZSTD_isError(initVal)) {\n      return initVal;\n    }\n  }\n  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);\n  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {\n    DISPLAYLEVEL(1, \"Failed to allocate dmer map: out of memory\\n\");\n    COVER_ctx_destroy(&ctx);\n    return ERROR(memory_allocation);\n  }\n\n  DISPLAYLEVEL(2, \"Building dictionary\\n\");\n  {\n    const size_t tail =\n        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,\n                              dictBufferCapacity, parameters);\n    const size_t dictionarySize = ZDICT_finalizeDictionary(\n        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,\n        samplesBuffer, samplesSizes, nbSamples, parameters.zParams);\n    if (!ZSTD_isError(dictionarySize)) {\n      DISPLAYLEVEL(2, \"Constructed dictionary of size %u\\n\",\n                   (unsigned)dictionarySize);\n    }\n    COVER_ctx_destroy(&ctx);\n    COVER_map_destroy(&activeDmers);\n    return dictionarySize;\n  }\n}\n\n\n\nsize_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,\n                                    const size_t *samplesSizes, const BYTE *samples,\n                                    size_t *offsets,\n                                    size_t nbTrainSamples, size_t nbSamples,\n                                    BYTE *const dict, size_t dictBufferCapacity) {\n  size_t totalCompressedSize = ERROR(GENERIC);\n  /* Pointers */\n  ZSTD_CCtx *cctx;\n  ZSTD_CDict *cdict;\n  void *dst;\n  /* Local variables */\n  size_t dstCapacity;\n  size_t i;\n  /* Allocate dst with enough space to compress the maximum sized sample */\n  {\n    size_t maxSampleSize = 0;\n    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;\n    for (; i < nbSamples; ++i) {\n      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);\n    }\n    dstCapacity = ZSTD_compressBound(maxSampleSize);\n    dst = malloc(dstCapacity);\n  }\n  /* Create the cctx and cdict */\n  cctx = ZSTD_createCCtx();\n  cdict = ZSTD_createCDict(dict, dictBufferCapacity,\n                           parameters.zParams.compressionLevel);\n  if (!dst || !cctx || !cdict) {\n    goto _compressCleanup;\n  }\n  /* Compress each sample and sum their sizes (or error) */\n  totalCompressedSize = dictBufferCapacity;\n  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;\n  for (; i < nbSamples; ++i) {\n    const size_t size = ZSTD_compress_usingCDict(\n        cctx, dst, dstCapacity, samples + offsets[i],\n        samplesSizes[i], cdict);\n    if (ZSTD_isError(size)) {\n      totalCompressedSize = size;\n      goto _compressCleanup;\n    }\n    totalCompressedSize += size;\n  }\n_compressCleanup:\n  ZSTD_freeCCtx(cctx);\n  ZSTD_freeCDict(cdict);\n  if (dst) {\n    free(dst);\n  }\n  return totalCompressedSize;\n}\n\n\n/**\n * Initialize the `COVER_best_t`.\n */\nvoid COVER_best_init(COVER_best_t *best) {\n  if (best==NULL) return; /* compatible with init on NULL */\n  (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);\n  (void)ZSTD_pthread_cond_init(&best->cond, NULL);\n  best->liveJobs = 0;\n  best->dict = NULL;\n  best->dictSize = 0;\n  best->compressedSize = (size_t)-1;\n  memset(&best->parameters, 0, sizeof(best->parameters));\n}\n\n/**\n * Wait until liveJobs == 0.\n */\nvoid COVER_best_wait(COVER_best_t *best) {\n  if (!best) {\n    return;\n  }\n  ZSTD_pthread_mutex_lock(&best->mutex);\n  while (best->liveJobs != 0) {\n    ZSTD_pthread_cond_wait(&best->cond, &best->mutex);\n  }\n  ZSTD_pthread_mutex_unlock(&best->mutex);\n}\n\n/**\n * Call COVER_best_wait() and then destroy the COVER_best_t.\n */\nvoid COVER_best_destroy(COVER_best_t *best) {\n  if (!best) {\n    return;\n  }\n  COVER_best_wait(best);\n  if (best->dict) {\n    free(best->dict);\n  }\n  ZSTD_pthread_mutex_destroy(&best->mutex);\n  ZSTD_pthread_cond_destroy(&best->cond);\n}\n\n/**\n * Called when a thread is about to be launched.\n * Increments liveJobs.\n */\nvoid COVER_best_start(COVER_best_t *best) {\n  if (!best) {\n    return;\n  }\n  ZSTD_pthread_mutex_lock(&best->mutex);\n  ++best->liveJobs;\n  ZSTD_pthread_mutex_unlock(&best->mutex);\n}\n\n/**\n * Called when a thread finishes executing, both on error or success.\n * Decrements liveJobs and signals any waiting threads if liveJobs == 0.\n * If this dictionary is the best so far save it and its parameters.\n */\nvoid COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,\n                              COVER_dictSelection_t selection) {\n  void* dict = selection.dictContent;\n  size_t compressedSize = selection.totalCompressedSize;\n  size_t dictSize = selection.dictSize;\n  if (!best) {\n    return;\n  }\n  {\n    size_t liveJobs;\n    ZSTD_pthread_mutex_lock(&best->mutex);\n    --best->liveJobs;\n    liveJobs = best->liveJobs;\n    /* If the new dictionary is better */\n    if (compressedSize < best->compressedSize) {\n      /* Allocate space if necessary */\n      if (!best->dict || best->dictSize < dictSize) {\n        if (best->dict) {\n          free(best->dict);\n        }\n        best->dict = malloc(dictSize);\n        if (!best->dict) {\n          best->compressedSize = ERROR(GENERIC);\n          best->dictSize = 0;\n          ZSTD_pthread_cond_signal(&best->cond);\n          ZSTD_pthread_mutex_unlock(&best->mutex);\n          return;\n        }\n      }\n      /* Save the dictionary, parameters, and size */\n      if (dict) {\n        memcpy(best->dict, dict, dictSize);\n        best->dictSize = dictSize;\n        best->parameters = parameters;\n        best->compressedSize = compressedSize;\n      }\n    }\n    if (liveJobs == 0) {\n      ZSTD_pthread_cond_broadcast(&best->cond);\n    }\n    ZSTD_pthread_mutex_unlock(&best->mutex);\n  }\n}\n\nCOVER_dictSelection_t COVER_dictSelectionError(size_t error) {\n    COVER_dictSelection_t selection = { NULL, 0, error };\n    return selection;\n}\n\nunsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {\n  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);\n}\n\nvoid COVER_dictSelectionFree(COVER_dictSelection_t selection){\n  free(selection.dictContent);\n}\n\nCOVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,\n        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,\n        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {\n\n  size_t largestDict = 0;\n  size_t largestCompressed = 0;\n  BYTE* customDictContentEnd = customDictContent + dictContentSize;\n\n  BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize);\n  BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize);\n  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;\n\n  if (!largestDictbuffer || !candidateDictBuffer) {\n    free(largestDictbuffer);\n    free(candidateDictBuffer);\n    return COVER_dictSelectionError(dictContentSize);\n  }\n\n  /* Initial dictionary size and compressed size */\n  memcpy(largestDictbuffer, customDictContent, dictContentSize);\n  dictContentSize = ZDICT_finalizeDictionary(\n    largestDictbuffer, dictContentSize, customDictContent, dictContentSize,\n    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);\n\n  if (ZDICT_isError(dictContentSize)) {\n    free(largestDictbuffer);\n    free(candidateDictBuffer);\n    return COVER_dictSelectionError(dictContentSize);\n  }\n\n  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,\n                                                       samplesBuffer, offsets,\n                                                       nbCheckSamples, nbSamples,\n                                                       largestDictbuffer, dictContentSize);\n\n  if (ZSTD_isError(totalCompressedSize)) {\n    free(largestDictbuffer);\n    free(candidateDictBuffer);\n    return COVER_dictSelectionError(totalCompressedSize);\n  }\n\n  if (params.shrinkDict == 0) {\n    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };\n    free(candidateDictBuffer);\n    return selection;\n  }\n\n  largestDict = dictContentSize;\n  largestCompressed = totalCompressedSize;\n  dictContentSize = ZDICT_DICTSIZE_MIN;\n\n  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */\n  while (dictContentSize < largestDict) {\n    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);\n    dictContentSize = ZDICT_finalizeDictionary(\n      candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize,\n      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);\n\n    if (ZDICT_isError(dictContentSize)) {\n      free(largestDictbuffer);\n      free(candidateDictBuffer);\n      return COVER_dictSelectionError(dictContentSize);\n\n    }\n\n    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,\n                                                         samplesBuffer, offsets,\n                                                         nbCheckSamples, nbSamples,\n                                                         candidateDictBuffer, dictContentSize);\n\n    if (ZSTD_isError(totalCompressedSize)) {\n      free(largestDictbuffer);\n      free(candidateDictBuffer);\n      return COVER_dictSelectionError(totalCompressedSize);\n    }\n\n    if (totalCompressedSize <= largestCompressed * regressionTolerance) {\n      COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };\n      free(largestDictbuffer);\n      return selection;\n    }\n    dictContentSize *= 2;\n  }\n  dictContentSize = largestDict;\n  totalCompressedSize = largestCompressed;\n  {\n    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };\n    free(candidateDictBuffer);\n    return selection;\n  }\n}\n\n/**\n * Parameters for COVER_tryParameters().\n */\ntypedef struct COVER_tryParameters_data_s {\n  const COVER_ctx_t *ctx;\n  COVER_best_t *best;\n  size_t dictBufferCapacity;\n  ZDICT_cover_params_t parameters;\n} COVER_tryParameters_data_t;\n\n/**\n * Tries a set of parameters and updates the COVER_best_t with the results.\n * This function is thread safe if zstd is compiled with multithreaded support.\n * It takes its parameters as an *OWNING* opaque pointer to support threading.\n */\nstatic void COVER_tryParameters(void *opaque) {\n  /* Save parameters as local variables */\n  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;\n  const COVER_ctx_t *const ctx = data->ctx;\n  const ZDICT_cover_params_t parameters = data->parameters;\n  size_t dictBufferCapacity = data->dictBufferCapacity;\n  size_t totalCompressedSize = ERROR(GENERIC);\n  /* Allocate space for hash table, dict, and freqs */\n  COVER_map_t activeDmers;\n  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);\n  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));\n  U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));\n  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {\n    DISPLAYLEVEL(1, \"Failed to allocate dmer map: out of memory\\n\");\n    goto _cleanup;\n  }\n  if (!dict || !freqs) {\n    DISPLAYLEVEL(1, \"Failed to allocate buffers: out of memory\\n\");\n    goto _cleanup;\n  }\n  /* Copy the frequencies because we need to modify them */\n  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));\n  /* Build the dictionary */\n  {\n    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,\n                                              dictBufferCapacity, parameters);\n    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,\n        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,\n        totalCompressedSize);\n\n    if (COVER_dictSelectionIsError(selection)) {\n      DISPLAYLEVEL(1, \"Failed to select dictionary\\n\");\n      goto _cleanup;\n    }\n  }\n_cleanup:\n  free(dict);\n  COVER_best_finish(data->best, parameters, selection);\n  free(data);\n  COVER_map_destroy(&activeDmers);\n  COVER_dictSelectionFree(selection);\n  if (freqs) {\n    free(freqs);\n  }\n}\n\nZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(\n    void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,\n    const size_t *samplesSizes, unsigned nbSamples,\n    ZDICT_cover_params_t *parameters) {\n  /* constants */\n  const unsigned nbThreads = parameters->nbThreads;\n  const double splitPoint =\n      parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;\n  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;\n  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;\n  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;\n  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;\n  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;\n  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);\n  const unsigned kIterations =\n      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);\n  const unsigned shrinkDict = 0;\n  /* Local variables */\n  const int displayLevel = parameters->zParams.notificationLevel;\n  unsigned iteration = 1;\n  unsigned d;\n  unsigned k;\n  COVER_best_t best;\n  POOL_ctx *pool = NULL;\n  int warned = 0;\n\n  /* Checks */\n  if (splitPoint <= 0 || splitPoint > 1) {\n    LOCALDISPLAYLEVEL(displayLevel, 1, \"Incorrect parameters\\n\");\n    return ERROR(parameter_outOfBound);\n  }\n  if (kMinK < kMaxD || kMaxK < kMinK) {\n    LOCALDISPLAYLEVEL(displayLevel, 1, \"Incorrect parameters\\n\");\n    return ERROR(parameter_outOfBound);\n  }\n  if (nbSamples == 0) {\n    DISPLAYLEVEL(1, \"Cover must have at least one input file\\n\");\n    return ERROR(srcSize_wrong);\n  }\n  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {\n    DISPLAYLEVEL(1, \"dictBufferCapacity must be at least %u\\n\",\n                 ZDICT_DICTSIZE_MIN);\n    return ERROR(dstSize_tooSmall);\n  }\n  if (nbThreads > 1) {\n    pool = POOL_create(nbThreads, 1);\n    if (!pool) {\n      return ERROR(memory_allocation);\n    }\n  }\n  /* Initialization */\n  COVER_best_init(&best);\n  /* Turn down global display level to clean up display at level 2 and below */\n  g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;\n  /* Loop through d first because each new value needs a new context */\n  LOCALDISPLAYLEVEL(displayLevel, 2, \"Trying %u different sets of parameters\\n\",\n                    kIterations);\n  for (d = kMinD; d <= kMaxD; d += 2) {\n    /* Initialize the context for this value of d */\n    COVER_ctx_t ctx;\n    LOCALDISPLAYLEVEL(displayLevel, 3, \"d=%u\\n\", d);\n    {\n      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);\n      if (ZSTD_isError(initVal)) {\n        LOCALDISPLAYLEVEL(displayLevel, 1, \"Failed to initialize context\\n\");\n        COVER_best_destroy(&best);\n        POOL_free(pool);\n        return initVal;\n      }\n    }\n    if (!warned) {\n      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);\n      warned = 1;\n    }\n    /* Loop through k reusing the same context */\n    for (k = kMinK; k <= kMaxK; k += kStepSize) {\n      /* Prepare the arguments */\n      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(\n          sizeof(COVER_tryParameters_data_t));\n      LOCALDISPLAYLEVEL(displayLevel, 3, \"k=%u\\n\", k);\n      if (!data) {\n        LOCALDISPLAYLEVEL(displayLevel, 1, \"Failed to allocate parameters\\n\");\n        COVER_best_destroy(&best);\n        COVER_ctx_destroy(&ctx);\n        POOL_free(pool);\n        return ERROR(memory_allocation);\n      }\n      data->ctx = &ctx;\n      data->best = &best;\n      data->dictBufferCapacity = dictBufferCapacity;\n      data->parameters = *parameters;\n      data->parameters.k = k;\n      data->parameters.d = d;\n      data->parameters.splitPoint = splitPoint;\n      data->parameters.steps = kSteps;\n      data->parameters.shrinkDict = shrinkDict;\n      data->parameters.zParams.notificationLevel = g_displayLevel;\n      /* Check the parameters */\n      if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {\n        DISPLAYLEVEL(1, \"Cover parameters incorrect\\n\");\n        free(data);\n        continue;\n      }\n      /* Call the function and pass ownership of data to it */\n      COVER_best_start(&best);\n      if (pool) {\n        POOL_add(pool, &COVER_tryParameters, data);\n      } else {\n        COVER_tryParameters(data);\n      }\n      /* Print status */\n      LOCALDISPLAYUPDATE(displayLevel, 2, \"\\r%u%%       \",\n                         (unsigned)((iteration * 100) / kIterations));\n      ++iteration;\n    }\n    COVER_best_wait(&best);\n    COVER_ctx_destroy(&ctx);\n  }\n  LOCALDISPLAYLEVEL(displayLevel, 2, \"\\r%79s\\r\", \"\");\n  /* Fill the output buffer and parameters with output of the best parameters */\n  {\n    const size_t dictSize = best.dictSize;\n    if (ZSTD_isError(best.compressedSize)) {\n      const size_t compressedSize = best.compressedSize;\n      COVER_best_destroy(&best);\n      POOL_free(pool);\n      return compressedSize;\n    }\n    *parameters = best.parameters;\n    memcpy(dictBuffer, best.dict, dictSize);\n    COVER_best_destroy(&best);\n    POOL_free(pool);\n    return dictSize;\n  }\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/cover.h",
    "content": "#include <stdio.h>  /* fprintf */\n#include <stdlib.h> /* malloc, free, qsort */\n#include <string.h> /* memset */\n#include <time.h>   /* clock */\n#include \"mem.h\" /* read */\n#include \"pool.h\"\n#include \"threading.h\"\n#include \"zstd_internal.h\" /* includes zstd.h */\n#ifndef ZDICT_STATIC_LINKING_ONLY\n#define ZDICT_STATIC_LINKING_ONLY\n#endif\n#include \"zdict.h\"\n\n/**\n * COVER_best_t is used for two purposes:\n * 1. Synchronizing threads.\n * 2. Saving the best parameters and dictionary.\n *\n * All of the methods except COVER_best_init() are thread safe if zstd is\n * compiled with multithreaded support.\n */\ntypedef struct COVER_best_s {\n  ZSTD_pthread_mutex_t mutex;\n  ZSTD_pthread_cond_t cond;\n  size_t liveJobs;\n  void *dict;\n  size_t dictSize;\n  ZDICT_cover_params_t parameters;\n  size_t compressedSize;\n} COVER_best_t;\n\n/**\n * A segment is a range in the source as well as the score of the segment.\n */\ntypedef struct {\n  U32 begin;\n  U32 end;\n  U32 score;\n} COVER_segment_t;\n\n/**\n *Number of epochs and size of each epoch.\n */\ntypedef struct {\n  U32 num;\n  U32 size;\n} COVER_epoch_info_t;\n\n/**\n * Struct used for the dictionary selection function.\n */\ntypedef struct COVER_dictSelection {\n  BYTE* dictContent;\n  size_t dictSize;\n  size_t totalCompressedSize;\n} COVER_dictSelection_t;\n\n/**\n * Computes the number of epochs and the size of each epoch.\n * We will make sure that each epoch gets at least 10 * k bytes.\n *\n * The COVER algorithms divide the data up into epochs of equal size and\n * select one segment from each epoch.\n *\n * @param maxDictSize The maximum allowed dictionary size.\n * @param nbDmers     The number of dmers we are training on.\n * @param k           The parameter k (segment size).\n * @param passes      The target number of passes over the dmer corpus.\n *                    More passes means a better dictionary.\n */\nCOVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,\n                                       U32 k, U32 passes);\n\n/**\n * Warns the user when their corpus is too small.\n */\nvoid COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);\n\n/**\n *  Checks total compressed size of a dictionary\n */\nsize_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,\n                                      const size_t *samplesSizes, const BYTE *samples,\n                                      size_t *offsets,\n                                      size_t nbTrainSamples, size_t nbSamples,\n                                      BYTE *const dict, size_t dictBufferCapacity);\n\n/**\n * Returns the sum of the sample sizes.\n */\nsize_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;\n\n/**\n * Initialize the `COVER_best_t`.\n */\nvoid COVER_best_init(COVER_best_t *best);\n\n/**\n * Wait until liveJobs == 0.\n */\nvoid COVER_best_wait(COVER_best_t *best);\n\n/**\n * Call COVER_best_wait() and then destroy the COVER_best_t.\n */\nvoid COVER_best_destroy(COVER_best_t *best);\n\n/**\n * Called when a thread is about to be launched.\n * Increments liveJobs.\n */\nvoid COVER_best_start(COVER_best_t *best);\n\n/**\n * Called when a thread finishes executing, both on error or success.\n * Decrements liveJobs and signals any waiting threads if liveJobs == 0.\n * If this dictionary is the best so far save it and its parameters.\n */\nvoid COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,\n                       COVER_dictSelection_t selection);\n/**\n * Error function for COVER_selectDict function. Checks if the return\n * value is an error.\n */\nunsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection);\n\n /**\n  * Error function for COVER_selectDict function. Returns a struct where\n  * return.totalCompressedSize is a ZSTD error.\n  */\nCOVER_dictSelection_t COVER_dictSelectionError(size_t error);\n\n/**\n * Always call after selectDict is called to free up used memory from\n * newly created dictionary.\n */\nvoid COVER_dictSelectionFree(COVER_dictSelection_t selection);\n\n/**\n * Called to finalize the dictionary and select one based on whether or not\n * the shrink-dict flag was enabled. If enabled the dictionary used is the\n * smallest dictionary within a specified regression of the compressed size\n * from the largest dictionary.\n */\n COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,\n                       size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,\n                       size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/divsufsort.c",
    "content": "/*\n * divsufsort.c for libdivsufsort-lite\n * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n\n/*- Compiler specifics -*/\n#ifdef __clang__\n#pragma clang diagnostic ignored \"-Wshorten-64-to-32\"\n#endif\n\n#if defined(_MSC_VER)\n#  pragma warning(disable : 4244)\n#  pragma warning(disable : 4127)    /* C4127 : Condition expression is constant */\n#endif\n\n\n/*- Dependencies -*/\n#include <assert.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"divsufsort.h\"\n\n/*- Constants -*/\n#if defined(INLINE)\n# undef INLINE\n#endif\n#if !defined(INLINE)\n# define INLINE __inline\n#endif\n#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)\n# undef ALPHABET_SIZE\n#endif\n#if !defined(ALPHABET_SIZE)\n# define ALPHABET_SIZE (256)\n#endif\n#define BUCKET_A_SIZE (ALPHABET_SIZE)\n#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)\n#if defined(SS_INSERTIONSORT_THRESHOLD)\n# if SS_INSERTIONSORT_THRESHOLD < 1\n#  undef SS_INSERTIONSORT_THRESHOLD\n#  define SS_INSERTIONSORT_THRESHOLD (1)\n# endif\n#else\n# define SS_INSERTIONSORT_THRESHOLD (8)\n#endif\n#if defined(SS_BLOCKSIZE)\n# if SS_BLOCKSIZE < 0\n#  undef SS_BLOCKSIZE\n#  define SS_BLOCKSIZE (0)\n# elif 32768 <= SS_BLOCKSIZE\n#  undef SS_BLOCKSIZE\n#  define SS_BLOCKSIZE (32767)\n# endif\n#else\n# define SS_BLOCKSIZE (1024)\n#endif\n/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */\n#if SS_BLOCKSIZE == 0\n# define SS_MISORT_STACKSIZE (96)\n#elif SS_BLOCKSIZE <= 4096\n# define SS_MISORT_STACKSIZE (16)\n#else\n# define SS_MISORT_STACKSIZE (24)\n#endif\n#define SS_SMERGE_STACKSIZE (32)\n#define TR_INSERTIONSORT_THRESHOLD (8)\n#define TR_STACKSIZE (64)\n\n\n/*- Macros -*/\n#ifndef SWAP\n# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)\n#endif /* SWAP */\n#ifndef MIN\n# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))\n#endif /* MIN */\n#ifndef MAX\n# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))\n#endif /* MAX */\n#define STACK_PUSH(_a, _b, _c, _d)\\\n  do {\\\n    assert(ssize < STACK_SIZE);\\\n    stack[ssize].a = (_a), stack[ssize].b = (_b),\\\n    stack[ssize].c = (_c), stack[ssize++].d = (_d);\\\n  } while(0)\n#define STACK_PUSH5(_a, _b, _c, _d, _e)\\\n  do {\\\n    assert(ssize < STACK_SIZE);\\\n    stack[ssize].a = (_a), stack[ssize].b = (_b),\\\n    stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\\\n  } while(0)\n#define STACK_POP(_a, _b, _c, _d)\\\n  do {\\\n    assert(0 <= ssize);\\\n    if(ssize == 0) { return; }\\\n    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\\\n    (_c) = stack[ssize].c, (_d) = stack[ssize].d;\\\n  } while(0)\n#define STACK_POP5(_a, _b, _c, _d, _e)\\\n  do {\\\n    assert(0 <= ssize);\\\n    if(ssize == 0) { return; }\\\n    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\\\n    (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\\\n  } while(0)\n#define BUCKET_A(_c0) bucket_A[(_c0)]\n#if ALPHABET_SIZE == 256\n#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])\n#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])\n#else\n#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])\n#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])\n#endif\n\n\n/*- Private Functions -*/\n\nstatic const int lg_table[256]= {\n -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,\n  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,\n  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,\n  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,\n  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,\n  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,\n  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,\n  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7\n};\n\n#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)\n\nstatic INLINE\nint\nss_ilg(int n) {\n#if SS_BLOCKSIZE == 0\n  return (n & 0xffff0000) ?\n          ((n & 0xff000000) ?\n            24 + lg_table[(n >> 24) & 0xff] :\n            16 + lg_table[(n >> 16) & 0xff]) :\n          ((n & 0x0000ff00) ?\n             8 + lg_table[(n >>  8) & 0xff] :\n             0 + lg_table[(n >>  0) & 0xff]);\n#elif SS_BLOCKSIZE < 256\n  return lg_table[n];\n#else\n  return (n & 0xff00) ?\n          8 + lg_table[(n >> 8) & 0xff] :\n          0 + lg_table[(n >> 0) & 0xff];\n#endif\n}\n\n#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */\n\n#if SS_BLOCKSIZE != 0\n\nstatic const int sqq_table[256] = {\n  0,  16,  22,  27,  32,  35,  39,  42,  45,  48,  50,  53,  55,  57,  59,  61,\n 64,  65,  67,  69,  71,  73,  75,  76,  78,  80,  81,  83,  84,  86,  87,  89,\n 90,  91,  93,  94,  96,  97,  98,  99, 101, 102, 103, 104, 106, 107, 108, 109,\n110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,\n128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,\n143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,\n156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,\n169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,\n181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,\n192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,\n202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,\n212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,\n221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,\n230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,\n239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,\n247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255\n};\n\nstatic INLINE\nint\nss_isqrt(int x) {\n  int y, e;\n\n  if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }\n  e = (x & 0xffff0000) ?\n        ((x & 0xff000000) ?\n          24 + lg_table[(x >> 24) & 0xff] :\n          16 + lg_table[(x >> 16) & 0xff]) :\n        ((x & 0x0000ff00) ?\n           8 + lg_table[(x >>  8) & 0xff] :\n           0 + lg_table[(x >>  0) & 0xff]);\n\n  if(e >= 16) {\n    y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);\n    if(e >= 24) { y = (y + 1 + x / y) >> 1; }\n    y = (y + 1 + x / y) >> 1;\n  } else if(e >= 8) {\n    y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;\n  } else {\n    return sqq_table[x] >> 4;\n  }\n\n  return (x < (y * y)) ? y - 1 : y;\n}\n\n#endif /* SS_BLOCKSIZE != 0 */\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Compares two suffixes. */\nstatic INLINE\nint\nss_compare(const unsigned char *T,\n           const int *p1, const int *p2,\n           int depth) {\n  const unsigned char *U1, *U2, *U1n, *U2n;\n\n  for(U1 = T + depth + *p1,\n      U2 = T + depth + *p2,\n      U1n = T + *(p1 + 1) + 2,\n      U2n = T + *(p2 + 1) + 2;\n      (U1 < U1n) && (U2 < U2n) && (*U1 == *U2);\n      ++U1, ++U2) {\n  }\n\n  return U1 < U1n ?\n        (U2 < U2n ? *U1 - *U2 : 1) :\n        (U2 < U2n ? -1 : 0);\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)\n\n/* Insertionsort for small size groups */\nstatic\nvoid\nss_insertionsort(const unsigned char *T, const int *PA,\n                 int *first, int *last, int depth) {\n  int *i, *j;\n  int t;\n  int r;\n\n  for(i = last - 2; first <= i; --i) {\n    for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {\n      do { *(j - 1) = *j; } while((++j < last) && (*j < 0));\n      if(last <= j) { break; }\n    }\n    if(r == 0) { *j = ~*j; }\n    *(j - 1) = t;\n  }\n}\n\n#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */\n\n\n/*---------------------------------------------------------------------------*/\n\n#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)\n\nstatic INLINE\nvoid\nss_fixdown(const unsigned char *Td, const int *PA,\n           int *SA, int i, int size) {\n  int j, k;\n  int v;\n  int c, d, e;\n\n  for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {\n    d = Td[PA[SA[k = j++]]];\n    if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }\n    if(d <= c) { break; }\n  }\n  SA[i] = v;\n}\n\n/* Simple top-down heapsort. */\nstatic\nvoid\nss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {\n  int i, m;\n  int t;\n\n  m = size;\n  if((size % 2) == 0) {\n    m--;\n    if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }\n  }\n\n  for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }\n  if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }\n  for(i = m - 1; 0 < i; --i) {\n    t = SA[0], SA[0] = SA[i];\n    ss_fixdown(Td, PA, SA, 0, i);\n    SA[i] = t;\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Returns the median of three elements. */\nstatic INLINE\nint *\nss_median3(const unsigned char *Td, const int *PA,\n           int *v1, int *v2, int *v3) {\n  int *t;\n  if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }\n  if(Td[PA[*v2]] > Td[PA[*v3]]) {\n    if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }\n    else { return v3; }\n  }\n  return v2;\n}\n\n/* Returns the median of five elements. */\nstatic INLINE\nint *\nss_median5(const unsigned char *Td, const int *PA,\n           int *v1, int *v2, int *v3, int *v4, int *v5) {\n  int *t;\n  if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }\n  if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }\n  if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }\n  if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }\n  if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }\n  if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }\n  return v3;\n}\n\n/* Returns the pivot element. */\nstatic INLINE\nint *\nss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {\n  int *middle;\n  int t;\n\n  t = last - first;\n  middle = first + t / 2;\n\n  if(t <= 512) {\n    if(t <= 32) {\n      return ss_median3(Td, PA, first, middle, last - 1);\n    } else {\n      t >>= 2;\n      return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);\n    }\n  }\n  t >>= 3;\n  first  = ss_median3(Td, PA, first, first + t, first + (t << 1));\n  middle = ss_median3(Td, PA, middle - t, middle, middle + t);\n  last   = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);\n  return ss_median3(Td, PA, first, middle, last);\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Binary partition for substrings. */\nstatic INLINE\nint *\nss_partition(const int *PA,\n                    int *first, int *last, int depth) {\n  int *a, *b;\n  int t;\n  for(a = first - 1, b = last;;) {\n    for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }\n    for(; (a < --b) && ((PA[*b] + depth) <  (PA[*b + 1] + 1));) { }\n    if(b <= a) { break; }\n    t = ~*b;\n    *b = *a;\n    *a = t;\n  }\n  if(first < a) { *first = ~*first; }\n  return a;\n}\n\n/* Multikey introsort for medium size groups. */\nstatic\nvoid\nss_mintrosort(const unsigned char *T, const int *PA,\n              int *first, int *last,\n              int depth) {\n#define STACK_SIZE SS_MISORT_STACKSIZE\n  struct { int *a, *b, c; int d; } stack[STACK_SIZE];\n  const unsigned char *Td;\n  int *a, *b, *c, *d, *e, *f;\n  int s, t;\n  int ssize;\n  int limit;\n  int v, x = 0;\n\n  for(ssize = 0, limit = ss_ilg(last - first);;) {\n\n    if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {\n#if 1 < SS_INSERTIONSORT_THRESHOLD\n      if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }\n#endif\n      STACK_POP(first, last, depth, limit);\n      continue;\n    }\n\n    Td = T + depth;\n    if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }\n    if(limit < 0) {\n      for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {\n        if((x = Td[PA[*a]]) != v) {\n          if(1 < (a - first)) { break; }\n          v = x;\n          first = a;\n        }\n      }\n      if(Td[PA[*first] - 1] < v) {\n        first = ss_partition(PA, first, a, depth);\n      }\n      if((a - first) <= (last - a)) {\n        if(1 < (a - first)) {\n          STACK_PUSH(a, last, depth, -1);\n          last = a, depth += 1, limit = ss_ilg(a - first);\n        } else {\n          first = a, limit = -1;\n        }\n      } else {\n        if(1 < (last - a)) {\n          STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));\n          first = a, limit = -1;\n        } else {\n          last = a, depth += 1, limit = ss_ilg(a - first);\n        }\n      }\n      continue;\n    }\n\n    /* choose pivot */\n    a = ss_pivot(Td, PA, first, last);\n    v = Td[PA[*a]];\n    SWAP(*first, *a);\n\n    /* partition */\n    for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }\n    if(((a = b) < last) && (x < v)) {\n      for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {\n        if(x == v) { SWAP(*b, *a); ++a; }\n      }\n    }\n    for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }\n    if((b < (d = c)) && (x > v)) {\n      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {\n        if(x == v) { SWAP(*c, *d); --d; }\n      }\n    }\n    for(; b < c;) {\n      SWAP(*b, *c);\n      for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {\n        if(x == v) { SWAP(*b, *a); ++a; }\n      }\n      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {\n        if(x == v) { SWAP(*c, *d); --d; }\n      }\n    }\n\n    if(a <= d) {\n      c = b - 1;\n\n      if((s = a - first) > (t = b - a)) { s = t; }\n      for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }\n      if((s = d - c) > (t = last - d - 1)) { s = t; }\n      for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }\n\n      a = first + (b - a), c = last - (d - c);\n      b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);\n\n      if((a - first) <= (last - c)) {\n        if((last - c) <= (c - b)) {\n          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));\n          STACK_PUSH(c, last, depth, limit);\n          last = a;\n        } else if((a - first) <= (c - b)) {\n          STACK_PUSH(c, last, depth, limit);\n          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));\n          last = a;\n        } else {\n          STACK_PUSH(c, last, depth, limit);\n          STACK_PUSH(first, a, depth, limit);\n          first = b, last = c, depth += 1, limit = ss_ilg(c - b);\n        }\n      } else {\n        if((a - first) <= (c - b)) {\n          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));\n          STACK_PUSH(first, a, depth, limit);\n          first = c;\n        } else if((last - c) <= (c - b)) {\n          STACK_PUSH(first, a, depth, limit);\n          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));\n          first = c;\n        } else {\n          STACK_PUSH(first, a, depth, limit);\n          STACK_PUSH(c, last, depth, limit);\n          first = b, last = c, depth += 1, limit = ss_ilg(c - b);\n        }\n      }\n    } else {\n      limit += 1;\n      if(Td[PA[*first] - 1] < v) {\n        first = ss_partition(PA, first, last, depth);\n        limit = ss_ilg(last - first);\n      }\n      depth += 1;\n    }\n  }\n#undef STACK_SIZE\n}\n\n#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */\n\n\n/*---------------------------------------------------------------------------*/\n\n#if SS_BLOCKSIZE != 0\n\nstatic INLINE\nvoid\nss_blockswap(int *a, int *b, int n) {\n  int t;\n  for(; 0 < n; --n, ++a, ++b) {\n    t = *a, *a = *b, *b = t;\n  }\n}\n\nstatic INLINE\nvoid\nss_rotate(int *first, int *middle, int *last) {\n  int *a, *b, t;\n  int l, r;\n  l = middle - first, r = last - middle;\n  for(; (0 < l) && (0 < r);) {\n    if(l == r) { ss_blockswap(first, middle, l); break; }\n    if(l < r) {\n      a = last - 1, b = middle - 1;\n      t = *a;\n      do {\n        *a-- = *b, *b-- = *a;\n        if(b < first) {\n          *a = t;\n          last = a;\n          if((r -= l + 1) <= l) { break; }\n          a -= 1, b = middle - 1;\n          t = *a;\n        }\n      } while(1);\n    } else {\n      a = first, b = middle;\n      t = *a;\n      do {\n        *a++ = *b, *b++ = *a;\n        if(last <= b) {\n          *a = t;\n          first = a + 1;\n          if((l -= r + 1) <= r) { break; }\n          a += 1, b = middle;\n          t = *a;\n        }\n      } while(1);\n    }\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\nstatic\nvoid\nss_inplacemerge(const unsigned char *T, const int *PA,\n                int *first, int *middle, int *last,\n                int depth) {\n  const int *p;\n  int *a, *b;\n  int len, half;\n  int q, r;\n  int x;\n\n  for(;;) {\n    if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }\n    else                { x = 0; p = PA +  *(last - 1); }\n    for(a = first, len = middle - first, half = len >> 1, r = -1;\n        0 < len;\n        len = half, half >>= 1) {\n      b = a + half;\n      q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);\n      if(q < 0) {\n        a = b + 1;\n        half -= (len & 1) ^ 1;\n      } else {\n        r = q;\n      }\n    }\n    if(a < middle) {\n      if(r == 0) { *a = ~*a; }\n      ss_rotate(a, middle, last);\n      last -= middle - a;\n      middle = a;\n      if(first == middle) { break; }\n    }\n    --last;\n    if(x != 0) { while(*--last < 0) { } }\n    if(middle == last) { break; }\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Merge-forward with internal buffer. */\nstatic\nvoid\nss_mergeforward(const unsigned char *T, const int *PA,\n                int *first, int *middle, int *last,\n                int *buf, int depth) {\n  int *a, *b, *c, *bufend;\n  int t;\n  int r;\n\n  bufend = buf + (middle - first) - 1;\n  ss_blockswap(buf, first, middle - first);\n\n  for(t = *(a = first), b = buf, c = middle;;) {\n    r = ss_compare(T, PA + *b, PA + *c, depth);\n    if(r < 0) {\n      do {\n        *a++ = *b;\n        if(bufend <= b) { *bufend = t; return; }\n        *b++ = *a;\n      } while(*b < 0);\n    } else if(r > 0) {\n      do {\n        *a++ = *c, *c++ = *a;\n        if(last <= c) {\n          while(b < bufend) { *a++ = *b, *b++ = *a; }\n          *a = *b, *b = t;\n          return;\n        }\n      } while(*c < 0);\n    } else {\n      *c = ~*c;\n      do {\n        *a++ = *b;\n        if(bufend <= b) { *bufend = t; return; }\n        *b++ = *a;\n      } while(*b < 0);\n\n      do {\n        *a++ = *c, *c++ = *a;\n        if(last <= c) {\n          while(b < bufend) { *a++ = *b, *b++ = *a; }\n          *a = *b, *b = t;\n          return;\n        }\n      } while(*c < 0);\n    }\n  }\n}\n\n/* Merge-backward with internal buffer. */\nstatic\nvoid\nss_mergebackward(const unsigned char *T, const int *PA,\n                 int *first, int *middle, int *last,\n                 int *buf, int depth) {\n  const int *p1, *p2;\n  int *a, *b, *c, *bufend;\n  int t;\n  int r;\n  int x;\n\n  bufend = buf + (last - middle) - 1;\n  ss_blockswap(buf, middle, last - middle);\n\n  x = 0;\n  if(*bufend < 0)       { p1 = PA + ~*bufend; x |= 1; }\n  else                  { p1 = PA +  *bufend; }\n  if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }\n  else                  { p2 = PA +  *(middle - 1); }\n  for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {\n    r = ss_compare(T, p1, p2, depth);\n    if(0 < r) {\n      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }\n      *a-- = *b;\n      if(b <= buf) { *buf = t; break; }\n      *b-- = *a;\n      if(*b < 0) { p1 = PA + ~*b; x |= 1; }\n      else       { p1 = PA +  *b; }\n    } else if(r < 0) {\n      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }\n      *a-- = *c, *c-- = *a;\n      if(c < first) {\n        while(buf < b) { *a-- = *b, *b-- = *a; }\n        *a = *b, *b = t;\n        break;\n      }\n      if(*c < 0) { p2 = PA + ~*c; x |= 2; }\n      else       { p2 = PA +  *c; }\n    } else {\n      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }\n      *a-- = ~*b;\n      if(b <= buf) { *buf = t; break; }\n      *b-- = *a;\n      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }\n      *a-- = *c, *c-- = *a;\n      if(c < first) {\n        while(buf < b) { *a-- = *b, *b-- = *a; }\n        *a = *b, *b = t;\n        break;\n      }\n      if(*b < 0) { p1 = PA + ~*b; x |= 1; }\n      else       { p1 = PA +  *b; }\n      if(*c < 0) { p2 = PA + ~*c; x |= 2; }\n      else       { p2 = PA +  *c; }\n    }\n  }\n}\n\n/* D&C based merge. */\nstatic\nvoid\nss_swapmerge(const unsigned char *T, const int *PA,\n             int *first, int *middle, int *last,\n             int *buf, int bufsize, int depth) {\n#define STACK_SIZE SS_SMERGE_STACKSIZE\n#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))\n#define MERGE_CHECK(a, b, c)\\\n  do {\\\n    if(((c) & 1) ||\\\n       (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\\\n      *(a) = ~*(a);\\\n    }\\\n    if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\\\n      *(b) = ~*(b);\\\n    }\\\n  } while(0)\n  struct { int *a, *b, *c; int d; } stack[STACK_SIZE];\n  int *l, *r, *lm, *rm;\n  int m, len, half;\n  int ssize;\n  int check, next;\n\n  for(check = 0, ssize = 0;;) {\n    if((last - middle) <= bufsize) {\n      if((first < middle) && (middle < last)) {\n        ss_mergebackward(T, PA, first, middle, last, buf, depth);\n      }\n      MERGE_CHECK(first, last, check);\n      STACK_POP(first, middle, last, check);\n      continue;\n    }\n\n    if((middle - first) <= bufsize) {\n      if(first < middle) {\n        ss_mergeforward(T, PA, first, middle, last, buf, depth);\n      }\n      MERGE_CHECK(first, last, check);\n      STACK_POP(first, middle, last, check);\n      continue;\n    }\n\n    for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;\n        0 < len;\n        len = half, half >>= 1) {\n      if(ss_compare(T, PA + GETIDX(*(middle + m + half)),\n                       PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {\n        m += half + 1;\n        half -= (len & 1) ^ 1;\n      }\n    }\n\n    if(0 < m) {\n      lm = middle - m, rm = middle + m;\n      ss_blockswap(lm, middle, m);\n      l = r = middle, next = 0;\n      if(rm < last) {\n        if(*rm < 0) {\n          *rm = ~*rm;\n          if(first < lm) { for(; *--l < 0;) { } next |= 4; }\n          next |= 1;\n        } else if(first < lm) {\n          for(; *r < 0; ++r) { }\n          next |= 2;\n        }\n      }\n\n      if((l - first) <= (last - r)) {\n        STACK_PUSH(r, rm, last, (next & 3) | (check & 4));\n        middle = lm, last = l, check = (check & 3) | (next & 4);\n      } else {\n        if((next & 2) && (r == middle)) { next ^= 6; }\n        STACK_PUSH(first, lm, l, (check & 3) | (next & 4));\n        first = r, middle = rm, check = (next & 3) | (check & 4);\n      }\n    } else {\n      if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {\n        *middle = ~*middle;\n      }\n      MERGE_CHECK(first, last, check);\n      STACK_POP(first, middle, last, check);\n    }\n  }\n#undef STACK_SIZE\n}\n\n#endif /* SS_BLOCKSIZE != 0 */\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Substring sort */\nstatic\nvoid\nsssort(const unsigned char *T, const int *PA,\n       int *first, int *last,\n       int *buf, int bufsize,\n       int depth, int n, int lastsuffix) {\n  int *a;\n#if SS_BLOCKSIZE != 0\n  int *b, *middle, *curbuf;\n  int j, k, curbufsize, limit;\n#endif\n  int i;\n\n  if(lastsuffix != 0) { ++first; }\n\n#if SS_BLOCKSIZE == 0\n  ss_mintrosort(T, PA, first, last, depth);\n#else\n  if((bufsize < SS_BLOCKSIZE) &&\n      (bufsize < (last - first)) &&\n      (bufsize < (limit = ss_isqrt(last - first)))) {\n    if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }\n    buf = middle = last - limit, bufsize = limit;\n  } else {\n    middle = last, limit = 0;\n  }\n  for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {\n#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE\n    ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);\n#elif 1 < SS_BLOCKSIZE\n    ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);\n#endif\n    curbufsize = last - (a + SS_BLOCKSIZE);\n    curbuf = a + SS_BLOCKSIZE;\n    if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }\n    for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {\n      ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);\n    }\n  }\n#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE\n  ss_mintrosort(T, PA, a, middle, depth);\n#elif 1 < SS_BLOCKSIZE\n  ss_insertionsort(T, PA, a, middle, depth);\n#endif\n  for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {\n    if(i & 1) {\n      ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);\n      a -= k;\n    }\n  }\n  if(limit != 0) {\n#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE\n    ss_mintrosort(T, PA, middle, last, depth);\n#elif 1 < SS_BLOCKSIZE\n    ss_insertionsort(T, PA, middle, last, depth);\n#endif\n    ss_inplacemerge(T, PA, first, middle, last, depth);\n  }\n#endif\n\n  if(lastsuffix != 0) {\n    /* Insert last type B* suffix. */\n    int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;\n    for(a = first, i = *(first - 1);\n        (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));\n        ++a) {\n      *(a - 1) = *a;\n    }\n    *(a - 1) = i;\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\nstatic INLINE\nint\ntr_ilg(int n) {\n  return (n & 0xffff0000) ?\n          ((n & 0xff000000) ?\n            24 + lg_table[(n >> 24) & 0xff] :\n            16 + lg_table[(n >> 16) & 0xff]) :\n          ((n & 0x0000ff00) ?\n             8 + lg_table[(n >>  8) & 0xff] :\n             0 + lg_table[(n >>  0) & 0xff]);\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Simple insertionsort for small size groups. */\nstatic\nvoid\ntr_insertionsort(const int *ISAd, int *first, int *last) {\n  int *a, *b;\n  int t, r;\n\n  for(a = first + 1; a < last; ++a) {\n    for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {\n      do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));\n      if(b < first) { break; }\n    }\n    if(r == 0) { *b = ~*b; }\n    *(b + 1) = t;\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\nstatic INLINE\nvoid\ntr_fixdown(const int *ISAd, int *SA, int i, int size) {\n  int j, k;\n  int v;\n  int c, d, e;\n\n  for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {\n    d = ISAd[SA[k = j++]];\n    if(d < (e = ISAd[SA[j]])) { k = j; d = e; }\n    if(d <= c) { break; }\n  }\n  SA[i] = v;\n}\n\n/* Simple top-down heapsort. */\nstatic\nvoid\ntr_heapsort(const int *ISAd, int *SA, int size) {\n  int i, m;\n  int t;\n\n  m = size;\n  if((size % 2) == 0) {\n    m--;\n    if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }\n  }\n\n  for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }\n  if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }\n  for(i = m - 1; 0 < i; --i) {\n    t = SA[0], SA[0] = SA[i];\n    tr_fixdown(ISAd, SA, 0, i);\n    SA[i] = t;\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Returns the median of three elements. */\nstatic INLINE\nint *\ntr_median3(const int *ISAd, int *v1, int *v2, int *v3) {\n  int *t;\n  if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }\n  if(ISAd[*v2] > ISAd[*v3]) {\n    if(ISAd[*v1] > ISAd[*v3]) { return v1; }\n    else { return v3; }\n  }\n  return v2;\n}\n\n/* Returns the median of five elements. */\nstatic INLINE\nint *\ntr_median5(const int *ISAd,\n           int *v1, int *v2, int *v3, int *v4, int *v5) {\n  int *t;\n  if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }\n  if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }\n  if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }\n  if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }\n  if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }\n  if(ISAd[*v3] > ISAd[*v4]) { return v4; }\n  return v3;\n}\n\n/* Returns the pivot element. */\nstatic INLINE\nint *\ntr_pivot(const int *ISAd, int *first, int *last) {\n  int *middle;\n  int t;\n\n  t = last - first;\n  middle = first + t / 2;\n\n  if(t <= 512) {\n    if(t <= 32) {\n      return tr_median3(ISAd, first, middle, last - 1);\n    } else {\n      t >>= 2;\n      return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);\n    }\n  }\n  t >>= 3;\n  first  = tr_median3(ISAd, first, first + t, first + (t << 1));\n  middle = tr_median3(ISAd, middle - t, middle, middle + t);\n  last   = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);\n  return tr_median3(ISAd, first, middle, last);\n}\n\n\n/*---------------------------------------------------------------------------*/\n\ntypedef struct _trbudget_t trbudget_t;\nstruct _trbudget_t {\n  int chance;\n  int remain;\n  int incval;\n  int count;\n};\n\nstatic INLINE\nvoid\ntrbudget_init(trbudget_t *budget, int chance, int incval) {\n  budget->chance = chance;\n  budget->remain = budget->incval = incval;\n}\n\nstatic INLINE\nint\ntrbudget_check(trbudget_t *budget, int size) {\n  if(size <= budget->remain) { budget->remain -= size; return 1; }\n  if(budget->chance == 0) { budget->count += size; return 0; }\n  budget->remain += budget->incval - size;\n  budget->chance -= 1;\n  return 1;\n}\n\n\n/*---------------------------------------------------------------------------*/\n\nstatic INLINE\nvoid\ntr_partition(const int *ISAd,\n             int *first, int *middle, int *last,\n             int **pa, int **pb, int v) {\n  int *a, *b, *c, *d, *e, *f;\n  int t, s;\n  int x = 0;\n\n  for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }\n  if(((a = b) < last) && (x < v)) {\n    for(; (++b < last) && ((x = ISAd[*b]) <= v);) {\n      if(x == v) { SWAP(*b, *a); ++a; }\n    }\n  }\n  for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }\n  if((b < (d = c)) && (x > v)) {\n    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {\n      if(x == v) { SWAP(*c, *d); --d; }\n    }\n  }\n  for(; b < c;) {\n    SWAP(*b, *c);\n    for(; (++b < c) && ((x = ISAd[*b]) <= v);) {\n      if(x == v) { SWAP(*b, *a); ++a; }\n    }\n    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {\n      if(x == v) { SWAP(*c, *d); --d; }\n    }\n  }\n\n  if(a <= d) {\n    c = b - 1;\n    if((s = a - first) > (t = b - a)) { s = t; }\n    for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }\n    if((s = d - c) > (t = last - d - 1)) { s = t; }\n    for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }\n    first += (b - a), last -= (d - c);\n  }\n  *pa = first, *pb = last;\n}\n\nstatic\nvoid\ntr_copy(int *ISA, const int *SA,\n        int *first, int *a, int *b, int *last,\n        int depth) {\n  /* sort suffixes of middle partition\n     by using sorted order of suffixes of left and right partition. */\n  int *c, *d, *e;\n  int s, v;\n\n  v = b - SA - 1;\n  for(c = first, d = a - 1; c <= d; ++c) {\n    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {\n      *++d = s;\n      ISA[s] = d - SA;\n    }\n  }\n  for(c = last - 1, e = d + 1, d = b; e < d; --c) {\n    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {\n      *--d = s;\n      ISA[s] = d - SA;\n    }\n  }\n}\n\nstatic\nvoid\ntr_partialcopy(int *ISA, const int *SA,\n               int *first, int *a, int *b, int *last,\n               int depth) {\n  int *c, *d, *e;\n  int s, v;\n  int rank, lastrank, newrank = -1;\n\n  v = b - SA - 1;\n  lastrank = -1;\n  for(c = first, d = a - 1; c <= d; ++c) {\n    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {\n      *++d = s;\n      rank = ISA[s + depth];\n      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }\n      ISA[s] = newrank;\n    }\n  }\n\n  lastrank = -1;\n  for(e = d; first <= e; --e) {\n    rank = ISA[*e];\n    if(lastrank != rank) { lastrank = rank; newrank = e - SA; }\n    if(newrank != rank) { ISA[*e] = newrank; }\n  }\n\n  lastrank = -1;\n  for(c = last - 1, e = d + 1, d = b; e < d; --c) {\n    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {\n      *--d = s;\n      rank = ISA[s + depth];\n      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }\n      ISA[s] = newrank;\n    }\n  }\n}\n\nstatic\nvoid\ntr_introsort(int *ISA, const int *ISAd,\n             int *SA, int *first, int *last,\n             trbudget_t *budget) {\n#define STACK_SIZE TR_STACKSIZE\n  struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];\n  int *a, *b, *c;\n  int t;\n  int v, x = 0;\n  int incr = ISAd - ISA;\n  int limit, next;\n  int ssize, trlink = -1;\n\n  for(ssize = 0, limit = tr_ilg(last - first);;) {\n\n    if(limit < 0) {\n      if(limit == -1) {\n        /* tandem repeat partition */\n        tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);\n\n        /* update ranks */\n        if(a < last) {\n          for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }\n        }\n        if(b < last) {\n          for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }\n        }\n\n        /* push */\n        if(1 < (b - a)) {\n          STACK_PUSH5(NULL, a, b, 0, 0);\n          STACK_PUSH5(ISAd - incr, first, last, -2, trlink);\n          trlink = ssize - 2;\n        }\n        if((a - first) <= (last - b)) {\n          if(1 < (a - first)) {\n            STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);\n            last = a, limit = tr_ilg(a - first);\n          } else if(1 < (last - b)) {\n            first = b, limit = tr_ilg(last - b);\n          } else {\n            STACK_POP5(ISAd, first, last, limit, trlink);\n          }\n        } else {\n          if(1 < (last - b)) {\n            STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);\n            first = b, limit = tr_ilg(last - b);\n          } else if(1 < (a - first)) {\n            last = a, limit = tr_ilg(a - first);\n          } else {\n            STACK_POP5(ISAd, first, last, limit, trlink);\n          }\n        }\n      } else if(limit == -2) {\n        /* tandem repeat copy */\n        a = stack[--ssize].b, b = stack[ssize].c;\n        if(stack[ssize].d == 0) {\n          tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);\n        } else {\n          if(0 <= trlink) { stack[trlink].d = -1; }\n          tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);\n        }\n        STACK_POP5(ISAd, first, last, limit, trlink);\n      } else {\n        /* sorted partition */\n        if(0 <= *first) {\n          a = first;\n          do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));\n          first = a;\n        }\n        if(first < last) {\n          a = first; do { *a = ~*a; } while(*++a < 0);\n          next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;\n          if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }\n\n          /* push */\n          if(trbudget_check(budget, a - first)) {\n            if((a - first) <= (last - a)) {\n              STACK_PUSH5(ISAd, a, last, -3, trlink);\n              ISAd += incr, last = a, limit = next;\n            } else {\n              if(1 < (last - a)) {\n                STACK_PUSH5(ISAd + incr, first, a, next, trlink);\n                first = a, limit = -3;\n              } else {\n                ISAd += incr, last = a, limit = next;\n              }\n            }\n          } else {\n            if(0 <= trlink) { stack[trlink].d = -1; }\n            if(1 < (last - a)) {\n              first = a, limit = -3;\n            } else {\n              STACK_POP5(ISAd, first, last, limit, trlink);\n            }\n          }\n        } else {\n          STACK_POP5(ISAd, first, last, limit, trlink);\n        }\n      }\n      continue;\n    }\n\n    if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {\n      tr_insertionsort(ISAd, first, last);\n      limit = -3;\n      continue;\n    }\n\n    if(limit-- == 0) {\n      tr_heapsort(ISAd, first, last - first);\n      for(a = last - 1; first < a; a = b) {\n        for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }\n      }\n      limit = -3;\n      continue;\n    }\n\n    /* choose pivot */\n    a = tr_pivot(ISAd, first, last);\n    SWAP(*first, *a);\n    v = ISAd[*first];\n\n    /* partition */\n    tr_partition(ISAd, first, first + 1, last, &a, &b, v);\n    if((last - first) != (b - a)) {\n      next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;\n\n      /* update ranks */\n      for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }\n      if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }\n\n      /* push */\n      if((1 < (b - a)) && (trbudget_check(budget, b - a))) {\n        if((a - first) <= (last - b)) {\n          if((last - b) <= (b - a)) {\n            if(1 < (a - first)) {\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              STACK_PUSH5(ISAd, b, last, limit, trlink);\n              last = a;\n            } else if(1 < (last - b)) {\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              first = b;\n            } else {\n              ISAd += incr, first = a, last = b, limit = next;\n            }\n          } else if((a - first) <= (b - a)) {\n            if(1 < (a - first)) {\n              STACK_PUSH5(ISAd, b, last, limit, trlink);\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              last = a;\n            } else {\n              STACK_PUSH5(ISAd, b, last, limit, trlink);\n              ISAd += incr, first = a, last = b, limit = next;\n            }\n          } else {\n            STACK_PUSH5(ISAd, b, last, limit, trlink);\n            STACK_PUSH5(ISAd, first, a, limit, trlink);\n            ISAd += incr, first = a, last = b, limit = next;\n          }\n        } else {\n          if((a - first) <= (b - a)) {\n            if(1 < (last - b)) {\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              STACK_PUSH5(ISAd, first, a, limit, trlink);\n              first = b;\n            } else if(1 < (a - first)) {\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              last = a;\n            } else {\n              ISAd += incr, first = a, last = b, limit = next;\n            }\n          } else if((last - b) <= (b - a)) {\n            if(1 < (last - b)) {\n              STACK_PUSH5(ISAd, first, a, limit, trlink);\n              STACK_PUSH5(ISAd + incr, a, b, next, trlink);\n              first = b;\n            } else {\n              STACK_PUSH5(ISAd, first, a, limit, trlink);\n              ISAd += incr, first = a, last = b, limit = next;\n            }\n          } else {\n            STACK_PUSH5(ISAd, first, a, limit, trlink);\n            STACK_PUSH5(ISAd, b, last, limit, trlink);\n            ISAd += incr, first = a, last = b, limit = next;\n          }\n        }\n      } else {\n        if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }\n        if((a - first) <= (last - b)) {\n          if(1 < (a - first)) {\n            STACK_PUSH5(ISAd, b, last, limit, trlink);\n            last = a;\n          } else if(1 < (last - b)) {\n            first = b;\n          } else {\n            STACK_POP5(ISAd, first, last, limit, trlink);\n          }\n        } else {\n          if(1 < (last - b)) {\n            STACK_PUSH5(ISAd, first, a, limit, trlink);\n            first = b;\n          } else if(1 < (a - first)) {\n            last = a;\n          } else {\n            STACK_POP5(ISAd, first, last, limit, trlink);\n          }\n        }\n      }\n    } else {\n      if(trbudget_check(budget, last - first)) {\n        limit = tr_ilg(last - first), ISAd += incr;\n      } else {\n        if(0 <= trlink) { stack[trlink].d = -1; }\n        STACK_POP5(ISAd, first, last, limit, trlink);\n      }\n    }\n  }\n#undef STACK_SIZE\n}\n\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Tandem repeat sort */\nstatic\nvoid\ntrsort(int *ISA, int *SA, int n, int depth) {\n  int *ISAd;\n  int *first, *last;\n  trbudget_t budget;\n  int t, skip, unsorted;\n\n  trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);\n/*  trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */\n  for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {\n    first = SA;\n    skip = 0;\n    unsorted = 0;\n    do {\n      if((t = *first) < 0) { first -= t; skip += t; }\n      else {\n        if(skip != 0) { *(first + skip) = skip; skip = 0; }\n        last = SA + ISA[t] + 1;\n        if(1 < (last - first)) {\n          budget.count = 0;\n          tr_introsort(ISA, ISAd, SA, first, last, &budget);\n          if(budget.count != 0) { unsorted += budget.count; }\n          else { skip = first - last; }\n        } else if((last - first) == 1) {\n          skip = -1;\n        }\n        first = last;\n      }\n    } while(first < (SA + n));\n    if(skip != 0) { *(first + skip) = skip; }\n    if(unsorted == 0) { break; }\n  }\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/* Sorts suffixes of type B*. */\nstatic\nint\nsort_typeBstar(const unsigned char *T, int *SA,\n               int *bucket_A, int *bucket_B,\n               int n, int openMP) {\n  int *PAb, *ISAb, *buf;\n#ifdef LIBBSC_OPENMP\n  int *curbuf;\n  int l;\n#endif\n  int i, j, k, t, m, bufsize;\n  int c0, c1;\n#ifdef LIBBSC_OPENMP\n  int d0, d1;\n#endif\n  (void)openMP;\n\n  /* Initialize bucket arrays. */\n  for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }\n  for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }\n\n  /* Count the number of occurrences of the first one or two characters of each\n     type A, B and B* suffix. Moreover, store the beginning position of all\n     type B* suffixes into the array SA. */\n  for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {\n    /* type A suffix. */\n    do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));\n    if(0 <= i) {\n      /* type B* suffix. */\n      ++BUCKET_BSTAR(c0, c1);\n      SA[--m] = i;\n      /* type B suffix. */\n      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {\n        ++BUCKET_B(c0, c1);\n      }\n    }\n  }\n  m = n - m;\n/*\nnote:\n  A type B* suffix is lexicographically smaller than a type B suffix that\n  begins with the same first two characters.\n*/\n\n  /* Calculate the index of start/end point of each bucket. */\n  for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {\n    t = i + BUCKET_A(c0);\n    BUCKET_A(c0) = i + j; /* start point */\n    i = t + BUCKET_B(c0, c0);\n    for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {\n      j += BUCKET_BSTAR(c0, c1);\n      BUCKET_BSTAR(c0, c1) = j; /* end point */\n      i += BUCKET_B(c0, c1);\n    }\n  }\n\n  if(0 < m) {\n    /* Sort the type B* suffixes by their first two characters. */\n    PAb = SA + n - m; ISAb = SA + m;\n    for(i = m - 2; 0 <= i; --i) {\n      t = PAb[i], c0 = T[t], c1 = T[t + 1];\n      SA[--BUCKET_BSTAR(c0, c1)] = i;\n    }\n    t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];\n    SA[--BUCKET_BSTAR(c0, c1)] = m - 1;\n\n    /* Sort the type B* substrings using sssort. */\n#ifdef LIBBSC_OPENMP\n    if (openMP)\n    {\n        buf = SA + m;\n        c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;\n#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)\n        {\n          bufsize = (n - (2 * m)) / omp_get_num_threads();\n          curbuf = buf + omp_get_thread_num() * bufsize;\n          k = 0;\n          for(;;) {\n            #pragma omp critical(sssort_lock)\n            {\n              if(0 < (l = j)) {\n                d0 = c0, d1 = c1;\n                do {\n                  k = BUCKET_BSTAR(d0, d1);\n                  if(--d1 <= d0) {\n                    d1 = ALPHABET_SIZE - 1;\n                    if(--d0 < 0) { break; }\n                  }\n                } while(((l - k) <= 1) && (0 < (l = k)));\n                c0 = d0, c1 = d1, j = k;\n              }\n            }\n            if(l == 0) { break; }\n            sssort(T, PAb, SA + k, SA + l,\n                   curbuf, bufsize, 2, n, *(SA + k) == (m - 1));\n          }\n        }\n    }\n    else\n    {\n        buf = SA + m, bufsize = n - (2 * m);\n        for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {\n          for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {\n            i = BUCKET_BSTAR(c0, c1);\n            if(1 < (j - i)) {\n              sssort(T, PAb, SA + i, SA + j,\n                     buf, bufsize, 2, n, *(SA + i) == (m - 1));\n            }\n          }\n        }\n    }\n#else\n    buf = SA + m, bufsize = n - (2 * m);\n    for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {\n      for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {\n        i = BUCKET_BSTAR(c0, c1);\n        if(1 < (j - i)) {\n          sssort(T, PAb, SA + i, SA + j,\n                 buf, bufsize, 2, n, *(SA + i) == (m - 1));\n        }\n      }\n    }\n#endif\n\n    /* Compute ranks of type B* substrings. */\n    for(i = m - 1; 0 <= i; --i) {\n      if(0 <= SA[i]) {\n        j = i;\n        do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));\n        SA[i + 1] = i - j;\n        if(i <= 0) { break; }\n      }\n      j = i;\n      do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);\n      ISAb[SA[i]] = j;\n    }\n\n    /* Construct the inverse suffix array of type B* suffixes using trsort. */\n    trsort(ISAb, SA, m, 1);\n\n    /* Set the sorted order of tyoe B* suffixes. */\n    for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {\n      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }\n      if(0 <= i) {\n        t = i;\n        for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }\n        SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;\n      }\n    }\n\n    /* Calculate the index of start/end point of each bucket. */\n    BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */\n    for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {\n      i = BUCKET_A(c0 + 1) - 1;\n      for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {\n        t = i - BUCKET_B(c0, c1);\n        BUCKET_B(c0, c1) = i; /* end point */\n\n        /* Move all type B* suffixes to the correct position. */\n        for(i = t, j = BUCKET_BSTAR(c0, c1);\n            j <= k;\n            --i, --k) { SA[i] = SA[k]; }\n      }\n      BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */\n      BUCKET_B(c0, c0) = i; /* end point */\n    }\n  }\n\n  return m;\n}\n\n/* Constructs the suffix array by using the sorted order of type B* suffixes. */\nstatic\nvoid\nconstruct_SA(const unsigned char *T, int *SA,\n             int *bucket_A, int *bucket_B,\n             int n, int m) {\n  int *i, *j, *k;\n  int s;\n  int c0, c1, c2;\n\n  if(0 < m) {\n    /* Construct the sorted order of type B suffixes by using\n       the sorted order of type B* suffixes. */\n    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {\n      /* Scan the suffix array from right to left. */\n      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),\n          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;\n          i <= j;\n          --j) {\n        if(0 < (s = *j)) {\n          assert(T[s] == c1);\n          assert(((s + 1) < n) && (T[s] <= T[s + 1]));\n          assert(T[s - 1] <= T[s]);\n          *j = ~s;\n          c0 = T[--s];\n          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }\n          if(c0 != c2) {\n            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }\n            k = SA + BUCKET_B(c2 = c0, c1);\n          }\n          assert(k < j); assert(k != NULL);\n          *k-- = s;\n        } else {\n          assert(((s == 0) && (T[s] == c1)) || (s < 0));\n          *j = ~s;\n        }\n      }\n    }\n  }\n\n  /* Construct the suffix array by using\n     the sorted order of type B suffixes. */\n  k = SA + BUCKET_A(c2 = T[n - 1]);\n  *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);\n  /* Scan the suffix array from left to right. */\n  for(i = SA, j = SA + n; i < j; ++i) {\n    if(0 < (s = *i)) {\n      assert(T[s - 1] >= T[s]);\n      c0 = T[--s];\n      if((s == 0) || (T[s - 1] < c0)) { s = ~s; }\n      if(c0 != c2) {\n        BUCKET_A(c2) = k - SA;\n        k = SA + BUCKET_A(c2 = c0);\n      }\n      assert(i < k);\n      *k++ = s;\n    } else {\n      assert(s < 0);\n      *i = ~s;\n    }\n  }\n}\n\n/* Constructs the burrows-wheeler transformed string directly\n   by using the sorted order of type B* suffixes. */\nstatic\nint\nconstruct_BWT(const unsigned char *T, int *SA,\n              int *bucket_A, int *bucket_B,\n              int n, int m) {\n  int *i, *j, *k, *orig;\n  int s;\n  int c0, c1, c2;\n\n  if(0 < m) {\n    /* Construct the sorted order of type B suffixes by using\n       the sorted order of type B* suffixes. */\n    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {\n      /* Scan the suffix array from right to left. */\n      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),\n          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;\n          i <= j;\n          --j) {\n        if(0 < (s = *j)) {\n          assert(T[s] == c1);\n          assert(((s + 1) < n) && (T[s] <= T[s + 1]));\n          assert(T[s - 1] <= T[s]);\n          c0 = T[--s];\n          *j = ~((int)c0);\n          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }\n          if(c0 != c2) {\n            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }\n            k = SA + BUCKET_B(c2 = c0, c1);\n          }\n          assert(k < j); assert(k != NULL);\n          *k-- = s;\n        } else if(s != 0) {\n          *j = ~s;\n#ifndef NDEBUG\n        } else {\n          assert(T[s] == c1);\n#endif\n        }\n      }\n    }\n  }\n\n  /* Construct the BWTed string by using\n     the sorted order of type B suffixes. */\n  k = SA + BUCKET_A(c2 = T[n - 1]);\n  *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);\n  /* Scan the suffix array from left to right. */\n  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {\n    if(0 < (s = *i)) {\n      assert(T[s - 1] >= T[s]);\n      c0 = T[--s];\n      *i = c0;\n      if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }\n      if(c0 != c2) {\n        BUCKET_A(c2) = k - SA;\n        k = SA + BUCKET_A(c2 = c0);\n      }\n      assert(i < k);\n      *k++ = s;\n    } else if(s != 0) {\n      *i = ~s;\n    } else {\n      orig = i;\n    }\n  }\n\n  return orig - SA;\n}\n\n/* Constructs the burrows-wheeler transformed string directly\n   by using the sorted order of type B* suffixes. */\nstatic\nint\nconstruct_BWT_indexes(const unsigned char *T, int *SA,\n                      int *bucket_A, int *bucket_B,\n                      int n, int m,\n                      unsigned char * num_indexes, int * indexes) {\n  int *i, *j, *k, *orig;\n  int s;\n  int c0, c1, c2;\n\n  int mod = n / 8;\n  {\n      mod |= mod >> 1;  mod |= mod >> 2;\n      mod |= mod >> 4;  mod |= mod >> 8;\n      mod |= mod >> 16; mod >>= 1;\n\n      *num_indexes = (unsigned char)((n - 1) / (mod + 1));\n  }\n\n  if(0 < m) {\n    /* Construct the sorted order of type B suffixes by using\n       the sorted order of type B* suffixes. */\n    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {\n      /* Scan the suffix array from right to left. */\n      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),\n          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;\n          i <= j;\n          --j) {\n        if(0 < (s = *j)) {\n          assert(T[s] == c1);\n          assert(((s + 1) < n) && (T[s] <= T[s + 1]));\n          assert(T[s - 1] <= T[s]);\n\n          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;\n\n          c0 = T[--s];\n          *j = ~((int)c0);\n          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }\n          if(c0 != c2) {\n            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }\n            k = SA + BUCKET_B(c2 = c0, c1);\n          }\n          assert(k < j); assert(k != NULL);\n          *k-- = s;\n        } else if(s != 0) {\n          *j = ~s;\n#ifndef NDEBUG\n        } else {\n          assert(T[s] == c1);\n#endif\n        }\n      }\n    }\n  }\n\n  /* Construct the BWTed string by using\n     the sorted order of type B suffixes. */\n  k = SA + BUCKET_A(c2 = T[n - 1]);\n  if (T[n - 2] < c2) {\n    if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;\n    *k++ = ~((int)T[n - 2]);\n  }\n  else {\n    *k++ = n - 1;\n  }\n\n  /* Scan the suffix array from left to right. */\n  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {\n    if(0 < (s = *i)) {\n      assert(T[s - 1] >= T[s]);\n\n      if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;\n\n      c0 = T[--s];\n      *i = c0;\n      if(c0 != c2) {\n        BUCKET_A(c2) = k - SA;\n        k = SA + BUCKET_A(c2 = c0);\n      }\n      assert(i < k);\n      if((0 < s) && (T[s - 1] < c0)) {\n          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;\n          *k++ = ~((int)T[s - 1]);\n      } else\n        *k++ = s;\n    } else if(s != 0) {\n      *i = ~s;\n    } else {\n      orig = i;\n    }\n  }\n\n  return orig - SA;\n}\n\n\n/*---------------------------------------------------------------------------*/\n\n/*- Function -*/\n\nint\ndivsufsort(const unsigned char *T, int *SA, int n, int openMP) {\n  int *bucket_A, *bucket_B;\n  int m;\n  int err = 0;\n\n  /* Check arguments. */\n  if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }\n  else if(n == 0) { return 0; }\n  else if(n == 1) { SA[0] = 0; return 0; }\n  else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }\n\n  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));\n  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));\n\n  /* Suffixsort. */\n  if((bucket_A != NULL) && (bucket_B != NULL)) {\n    m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);\n    construct_SA(T, SA, bucket_A, bucket_B, n, m);\n  } else {\n    err = -2;\n  }\n\n  free(bucket_B);\n  free(bucket_A);\n\n  return err;\n}\n\nint\ndivbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {\n  int *B;\n  int *bucket_A, *bucket_B;\n  int m, pidx, i;\n\n  /* Check arguments. */\n  if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }\n  else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }\n\n  if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }\n  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));\n  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));\n\n  /* Burrows-Wheeler Transform. */\n  if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {\n    m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);\n\n    if (num_indexes == NULL || indexes == NULL) {\n        pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);\n    } else {\n        pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);\n    }\n\n    /* Copy to output string. */\n    U[0] = T[n - 1];\n    for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }\n    for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }\n    pidx += 1;\n  } else {\n    pidx = -2;\n  }\n\n  free(bucket_B);\n  free(bucket_A);\n  if(A == NULL) { free(B); }\n\n  return pidx;\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/divsufsort.h",
    "content": "/*\n * divsufsort.h for libdivsufsort-lite\n * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n\n#ifndef _DIVSUFSORT_H\n#define _DIVSUFSORT_H 1\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n\n/*- Prototypes -*/\n\n/**\n * Constructs the suffix array of a given string.\n * @param T [0..n-1] The input string.\n * @param SA [0..n-1] The output array of suffixes.\n * @param n The length of the given string.\n * @param openMP enables OpenMP optimization.\n * @return 0 if no error occurred, -1 or -2 otherwise.\n */\nint\ndivsufsort(const unsigned char *T, int *SA, int n, int openMP);\n\n/**\n * Constructs the burrows-wheeler transformed string of a given string.\n * @param T [0..n-1] The input string.\n * @param U [0..n-1] The output string. (can be T)\n * @param A [0..n-1] The temporary array. (can be NULL)\n * @param n The length of the given string.\n * @param num_indexes The length of secondary indexes array. (can be NULL)\n * @param indexes The secondary indexes array. (can be NULL)\n * @param openMP enables OpenMP optimization.\n * @return The primary index if no error occurred, -1 or -2 otherwise.\n */\nint\ndivbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);\n\n\n#ifdef __cplusplus\n} /* extern \"C\" */\n#endif /* __cplusplus */\n\n#endif /* _DIVSUFSORT_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/fastcover.c",
    "content": "/*-*************************************\n*  Dependencies\n***************************************/\n#include <stdio.h>  /* fprintf */\n#include <stdlib.h> /* malloc, free, qsort */\n#include <string.h> /* memset */\n#include <time.h>   /* clock */\n\n#include \"mem.h\" /* read */\n#include \"pool.h\"\n#include \"threading.h\"\n#include \"cover.h\"\n#include \"zstd_internal.h\" /* includes zstd.h */\n#ifndef ZDICT_STATIC_LINKING_ONLY\n#define ZDICT_STATIC_LINKING_ONLY\n#endif\n#include \"zdict.h\"\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))\n#define FASTCOVER_MAX_F 31\n#define FASTCOVER_MAX_ACCEL 10\n#define DEFAULT_SPLITPOINT 0.75\n#define DEFAULT_F 20\n#define DEFAULT_ACCEL 1\n\n\n/*-*************************************\n*  Console display\n***************************************/\nstatic int g_displayLevel = 2;\n#define DISPLAY(...)                                                           \\\n  {                                                                            \\\n    fprintf(stderr, __VA_ARGS__);                                              \\\n    fflush(stderr);                                                            \\\n  }\n#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \\\n  if (displayLevel >= l) {                                                     \\\n    DISPLAY(__VA_ARGS__);                                                      \\\n  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */\n#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)\n\n#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \\\n  if (displayLevel >= l) {                                                     \\\n    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \\\n      g_time = clock();                                                        \\\n      DISPLAY(__VA_ARGS__);                                                    \\\n    }                                                                          \\\n  }\n#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)\nstatic const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;\nstatic clock_t g_time = 0;\n\n\n/*-*************************************\n* Hash Functions\n***************************************/\nstatic const U64 prime6bytes = 227718039650203ULL;\nstatic size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }\n\nstatic const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;\nstatic size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }\nstatic size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }\n\n\n/**\n * Hash the d-byte value pointed to by p and mod 2^f\n */\nstatic size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {\n  if (d == 6) {\n    return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);\n  }\n  return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);\n}\n\n\n/*-*************************************\n* Acceleration\n***************************************/\ntypedef struct {\n  unsigned finalize;    /* Percentage of training samples used for ZDICT_finalizeDictionary */\n  unsigned skip;        /* Number of dmer skipped between each dmer counted in computeFrequency */\n} FASTCOVER_accel_t;\n\n\nstatic const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {\n  { 100, 0 },   /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */\n  { 100, 0 },   /* accel = 1 */\n  { 50, 1 },   /* accel = 2 */\n  { 34, 2 },   /* accel = 3 */\n  { 25, 3 },   /* accel = 4 */\n  { 20, 4 },   /* accel = 5 */\n  { 17, 5 },   /* accel = 6 */\n  { 14, 6 },   /* accel = 7 */\n  { 13, 7 },   /* accel = 8 */\n  { 11, 8 },   /* accel = 9 */\n  { 10, 9 },   /* accel = 10 */\n};\n\n\n/*-*************************************\n* Context\n***************************************/\ntypedef struct {\n  const BYTE *samples;\n  size_t *offsets;\n  const size_t *samplesSizes;\n  size_t nbSamples;\n  size_t nbTrainSamples;\n  size_t nbTestSamples;\n  size_t nbDmers;\n  U32 *freqs;\n  unsigned d;\n  unsigned f;\n  FASTCOVER_accel_t accelParams;\n} FASTCOVER_ctx_t;\n\n\n/*-*************************************\n*  Helper functions\n***************************************/\n/**\n * Selects the best segment in an epoch.\n * Segments of are scored according to the function:\n *\n * Let F(d) be the frequency of all dmers with hash value d.\n * Let S_i be hash value of the dmer at position i of segment S which has length k.\n *\n *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})\n *\n * Once the dmer with hash value d is in the dictionary we set F(d) = 0.\n */\nstatic COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,\n                                              U32 *freqs, U32 begin, U32 end,\n                                              ZDICT_cover_params_t parameters,\n                                              U16* segmentFreqs) {\n  /* Constants */\n  const U32 k = parameters.k;\n  const U32 d = parameters.d;\n  const U32 f = ctx->f;\n  const U32 dmersInK = k - d + 1;\n\n  /* Try each segment (activeSegment) and save the best (bestSegment) */\n  COVER_segment_t bestSegment = {0, 0, 0};\n  COVER_segment_t activeSegment;\n\n  /* Reset the activeDmers in the segment */\n  /* The activeSegment starts at the beginning of the epoch. */\n  activeSegment.begin = begin;\n  activeSegment.end = begin;\n  activeSegment.score = 0;\n\n  /* Slide the activeSegment through the whole epoch.\n   * Save the best segment in bestSegment.\n   */\n  while (activeSegment.end < end) {\n    /* Get hash value of current dmer */\n    const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);\n\n    /* Add frequency of this index to score if this is the first occurrence of index in active segment */\n    if (segmentFreqs[idx] == 0) {\n      activeSegment.score += freqs[idx];\n    }\n    /* Increment end of segment and segmentFreqs*/\n    activeSegment.end += 1;\n    segmentFreqs[idx] += 1;\n    /* If the window is now too large, drop the first position */\n    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {\n      /* Get hash value of the dmer to be eliminated from active segment */\n      const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);\n      segmentFreqs[delIndex] -= 1;\n      /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */\n      if (segmentFreqs[delIndex] == 0) {\n        activeSegment.score -= freqs[delIndex];\n      }\n      /* Increment start of segment */\n      activeSegment.begin += 1;\n    }\n\n    /* If this segment is the best so far save it */\n    if (activeSegment.score > bestSegment.score) {\n      bestSegment = activeSegment;\n    }\n  }\n\n  /* Zero out rest of segmentFreqs array */\n  while (activeSegment.begin < end) {\n    const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);\n    segmentFreqs[delIndex] -= 1;\n    activeSegment.begin += 1;\n  }\n\n  {\n    /*  Zero the frequency of hash value of each dmer covered by the chosen segment. */\n    U32 pos;\n    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {\n      const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);\n      freqs[i] = 0;\n    }\n  }\n\n  return bestSegment;\n}\n\n\nstatic int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,\n                                     size_t maxDictSize, unsigned f,\n                                     unsigned accel) {\n  /* k, d, and f are required parameters */\n  if (parameters.d == 0 || parameters.k == 0) {\n    return 0;\n  }\n  /* d has to be 6 or 8 */\n  if (parameters.d != 6 && parameters.d != 8) {\n    return 0;\n  }\n  /* k <= maxDictSize */\n  if (parameters.k > maxDictSize) {\n    return 0;\n  }\n  /* d <= k */\n  if (parameters.d > parameters.k) {\n    return 0;\n  }\n  /* 0 < f <= FASTCOVER_MAX_F*/\n  if (f > FASTCOVER_MAX_F || f == 0) {\n    return 0;\n  }\n  /* 0 < splitPoint <= 1 */\n  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {\n    return 0;\n  }\n  /* 0 < accel <= 10 */\n  if (accel > 10 || accel == 0) {\n    return 0;\n  }\n  return 1;\n}\n\n\n/**\n * Clean up a context initialized with `FASTCOVER_ctx_init()`.\n */\nstatic void\nFASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)\n{\n    if (!ctx) return;\n\n    free(ctx->freqs);\n    ctx->freqs = NULL;\n\n    free(ctx->offsets);\n    ctx->offsets = NULL;\n}\n\n\n/**\n * Calculate for frequency of hash value of each dmer in ctx->samples\n */\nstatic void\nFASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)\n{\n    const unsigned f = ctx->f;\n    const unsigned d = ctx->d;\n    const unsigned skip = ctx->accelParams.skip;\n    const unsigned readLength = MAX(d, 8);\n    size_t i;\n    assert(ctx->nbTrainSamples >= 5);\n    assert(ctx->nbTrainSamples <= ctx->nbSamples);\n    for (i = 0; i < ctx->nbTrainSamples; i++) {\n        size_t start = ctx->offsets[i];  /* start of current dmer */\n        size_t const currSampleEnd = ctx->offsets[i+1];\n        while (start + readLength <= currSampleEnd) {\n            const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);\n            freqs[dmerIndex]++;\n            start = start + skip + 1;\n        }\n    }\n}\n\n\n/**\n * Prepare a context for dictionary building.\n * The context is only dependent on the parameter `d` and can used multiple\n * times.\n * Returns 0 on success or error code on error.\n * The context must be destroyed with `FASTCOVER_ctx_destroy()`.\n */\nstatic size_t\nFASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,\n                   const void* samplesBuffer,\n                   const size_t* samplesSizes, unsigned nbSamples,\n                   unsigned d, double splitPoint, unsigned f,\n                   FASTCOVER_accel_t accelParams)\n{\n    const BYTE* const samples = (const BYTE*)samplesBuffer;\n    const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);\n    /* Split samples into testing and training sets */\n    const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;\n    const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;\n    const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;\n    const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;\n\n    /* Checks */\n    if (totalSamplesSize < MAX(d, sizeof(U64)) ||\n        totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {\n        DISPLAYLEVEL(1, \"Total samples size is too large (%u MB), maximum size is %u MB\\n\",\n                    (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));\n        return ERROR(srcSize_wrong);\n    }\n\n    /* Check if there are at least 5 training samples */\n    if (nbTrainSamples < 5) {\n        DISPLAYLEVEL(1, \"Total number of training samples is %u and is invalid\\n\", nbTrainSamples);\n        return ERROR(srcSize_wrong);\n    }\n\n    /* Check if there's testing sample */\n    if (nbTestSamples < 1) {\n        DISPLAYLEVEL(1, \"Total number of testing samples is %u and is invalid.\\n\", nbTestSamples);\n        return ERROR(srcSize_wrong);\n    }\n\n    /* Zero the context */\n    memset(ctx, 0, sizeof(*ctx));\n    DISPLAYLEVEL(2, \"Training on %u samples of total size %u\\n\", nbTrainSamples,\n                    (unsigned)trainingSamplesSize);\n    DISPLAYLEVEL(2, \"Testing on %u samples of total size %u\\n\", nbTestSamples,\n                    (unsigned)testSamplesSize);\n\n    ctx->samples = samples;\n    ctx->samplesSizes = samplesSizes;\n    ctx->nbSamples = nbSamples;\n    ctx->nbTrainSamples = nbTrainSamples;\n    ctx->nbTestSamples = nbTestSamples;\n    ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;\n    ctx->d = d;\n    ctx->f = f;\n    ctx->accelParams = accelParams;\n\n    /* The offsets of each file */\n    ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));\n    if (ctx->offsets == NULL) {\n        DISPLAYLEVEL(1, \"Failed to allocate scratch buffers \\n\");\n        FASTCOVER_ctx_destroy(ctx);\n        return ERROR(memory_allocation);\n    }\n\n    /* Fill offsets from the samplesSizes */\n    {   U32 i;\n        ctx->offsets[0] = 0;\n        assert(nbSamples >= 5);\n        for (i = 1; i <= nbSamples; ++i) {\n            ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];\n        }\n    }\n\n    /* Initialize frequency array of size 2^f */\n    ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));\n    if (ctx->freqs == NULL) {\n        DISPLAYLEVEL(1, \"Failed to allocate frequency table \\n\");\n        FASTCOVER_ctx_destroy(ctx);\n        return ERROR(memory_allocation);\n    }\n\n    DISPLAYLEVEL(2, \"Computing frequencies\\n\");\n    FASTCOVER_computeFrequency(ctx->freqs, ctx);\n\n    return 0;\n}\n\n\n/**\n * Given the prepared context build the dictionary.\n */\nstatic size_t\nFASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,\n                          U32* freqs,\n                          void* dictBuffer, size_t dictBufferCapacity,\n                          ZDICT_cover_params_t parameters,\n                          U16* segmentFreqs)\n{\n  BYTE *const dict = (BYTE *)dictBuffer;\n  size_t tail = dictBufferCapacity;\n  /* Divide the data into epochs. We will select one segment from each epoch. */\n  const COVER_epoch_info_t epochs = COVER_computeEpochs(\n      (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);\n  const size_t maxZeroScoreRun = 10;\n  size_t zeroScoreRun = 0;\n  size_t epoch;\n  DISPLAYLEVEL(2, \"Breaking content into %u epochs of size %u\\n\",\n                (U32)epochs.num, (U32)epochs.size);\n  /* Loop through the epochs until there are no more segments or the dictionary\n   * is full.\n   */\n  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {\n    const U32 epochBegin = (U32)(epoch * epochs.size);\n    const U32 epochEnd = epochBegin + epochs.size;\n    size_t segmentSize;\n    /* Select a segment */\n    COVER_segment_t segment = FASTCOVER_selectSegment(\n        ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);\n\n    /* If the segment covers no dmers, then we are out of content.\n     * There may be new content in other epochs, for continue for some time.\n     */\n    if (segment.score == 0) {\n      if (++zeroScoreRun >= maxZeroScoreRun) {\n          break;\n      }\n      continue;\n    }\n    zeroScoreRun = 0;\n\n    /* Trim the segment if necessary and if it is too small then we are done */\n    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);\n    if (segmentSize < parameters.d) {\n      break;\n    }\n\n    /* We fill the dictionary from the back to allow the best segments to be\n     * referenced with the smallest offsets.\n     */\n    tail -= segmentSize;\n    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);\n    DISPLAYUPDATE(\n        2, \"\\r%u%%       \",\n        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));\n  }\n  DISPLAYLEVEL(2, \"\\r%79s\\r\", \"\");\n  return tail;\n}\n\n/**\n * Parameters for FASTCOVER_tryParameters().\n */\ntypedef struct FASTCOVER_tryParameters_data_s {\n    const FASTCOVER_ctx_t* ctx;\n    COVER_best_t* best;\n    size_t dictBufferCapacity;\n    ZDICT_cover_params_t parameters;\n} FASTCOVER_tryParameters_data_t;\n\n\n/**\n * Tries a set of parameters and updates the COVER_best_t with the results.\n * This function is thread safe if zstd is compiled with multithreaded support.\n * It takes its parameters as an *OWNING* opaque pointer to support threading.\n */\nstatic void FASTCOVER_tryParameters(void *opaque)\n{\n  /* Save parameters as local variables */\n  FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;\n  const FASTCOVER_ctx_t *const ctx = data->ctx;\n  const ZDICT_cover_params_t parameters = data->parameters;\n  size_t dictBufferCapacity = data->dictBufferCapacity;\n  size_t totalCompressedSize = ERROR(GENERIC);\n  /* Initialize array to keep track of frequency of dmer within activeSegment */\n  U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));\n  /* Allocate space for hash table, dict, and freqs */\n  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);\n  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));\n  U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));\n  if (!segmentFreqs || !dict || !freqs) {\n    DISPLAYLEVEL(1, \"Failed to allocate buffers: out of memory\\n\");\n    goto _cleanup;\n  }\n  /* Copy the frequencies because we need to modify them */\n  memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));\n  /* Build the dictionary */\n  { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,\n                                                    parameters, segmentFreqs);\n\n    const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);\n    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,\n         ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,\n         totalCompressedSize);\n\n    if (COVER_dictSelectionIsError(selection)) {\n      DISPLAYLEVEL(1, \"Failed to select dictionary\\n\");\n      goto _cleanup;\n    }\n  }\n_cleanup:\n  free(dict);\n  COVER_best_finish(data->best, parameters, selection);\n  free(data);\n  free(segmentFreqs);\n  COVER_dictSelectionFree(selection);\n  free(freqs);\n}\n\n\nstatic void\nFASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,\n                               ZDICT_cover_params_t* coverParams)\n{\n    coverParams->k = fastCoverParams.k;\n    coverParams->d = fastCoverParams.d;\n    coverParams->steps = fastCoverParams.steps;\n    coverParams->nbThreads = fastCoverParams.nbThreads;\n    coverParams->splitPoint = fastCoverParams.splitPoint;\n    coverParams->zParams = fastCoverParams.zParams;\n    coverParams->shrinkDict = fastCoverParams.shrinkDict;\n}\n\n\nstatic void\nFASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,\n                                   ZDICT_fastCover_params_t* fastCoverParams,\n                                   unsigned f, unsigned accel)\n{\n    fastCoverParams->k = coverParams.k;\n    fastCoverParams->d = coverParams.d;\n    fastCoverParams->steps = coverParams.steps;\n    fastCoverParams->nbThreads = coverParams.nbThreads;\n    fastCoverParams->splitPoint = coverParams.splitPoint;\n    fastCoverParams->f = f;\n    fastCoverParams->accel = accel;\n    fastCoverParams->zParams = coverParams.zParams;\n    fastCoverParams->shrinkDict = coverParams.shrinkDict;\n}\n\n\nZDICTLIB_API size_t\nZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,\n                                const void* samplesBuffer,\n                                const size_t* samplesSizes, unsigned nbSamples,\n                                ZDICT_fastCover_params_t parameters)\n{\n    BYTE* const dict = (BYTE*)dictBuffer;\n    FASTCOVER_ctx_t ctx;\n    ZDICT_cover_params_t coverParams;\n    FASTCOVER_accel_t accelParams;\n    /* Initialize global data */\n    g_displayLevel = parameters.zParams.notificationLevel;\n    /* Assign splitPoint and f if not provided */\n    parameters.splitPoint = 1.0;\n    parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;\n    parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;\n    /* Convert to cover parameter */\n    memset(&coverParams, 0 , sizeof(coverParams));\n    FASTCOVER_convertToCoverParams(parameters, &coverParams);\n    /* Checks */\n    if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,\n                                   parameters.accel)) {\n      DISPLAYLEVEL(1, \"FASTCOVER parameters incorrect\\n\");\n      return ERROR(parameter_outOfBound);\n    }\n    if (nbSamples == 0) {\n      DISPLAYLEVEL(1, \"FASTCOVER must have at least one input file\\n\");\n      return ERROR(srcSize_wrong);\n    }\n    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {\n      DISPLAYLEVEL(1, \"dictBufferCapacity must be at least %u\\n\",\n                   ZDICT_DICTSIZE_MIN);\n      return ERROR(dstSize_tooSmall);\n    }\n    /* Assign corresponding FASTCOVER_accel_t to accelParams*/\n    accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];\n    /* Initialize context */\n    {\n      size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,\n                            coverParams.d, parameters.splitPoint, parameters.f,\n                            accelParams);\n      if (ZSTD_isError(initVal)) {\n        DISPLAYLEVEL(1, \"Failed to initialize context\\n\");\n        return initVal;\n      }\n    }\n    COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);\n    /* Build the dictionary */\n    DISPLAYLEVEL(2, \"Building dictionary\\n\");\n    {\n      /* Initialize array to keep track of frequency of dmer within activeSegment */\n      U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));\n      const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,\n                                                dictBufferCapacity, coverParams, segmentFreqs);\n      const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);\n      const size_t dictionarySize = ZDICT_finalizeDictionary(\n          dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,\n          samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);\n      if (!ZSTD_isError(dictionarySize)) {\n          DISPLAYLEVEL(2, \"Constructed dictionary of size %u\\n\",\n                      (unsigned)dictionarySize);\n      }\n      FASTCOVER_ctx_destroy(&ctx);\n      free(segmentFreqs);\n      return dictionarySize;\n    }\n}\n\n\nZDICTLIB_API size_t\nZDICT_optimizeTrainFromBuffer_fastCover(\n                    void* dictBuffer, size_t dictBufferCapacity,\n                    const void* samplesBuffer,\n                    const size_t* samplesSizes, unsigned nbSamples,\n                    ZDICT_fastCover_params_t* parameters)\n{\n    ZDICT_cover_params_t coverParams;\n    FASTCOVER_accel_t accelParams;\n    /* constants */\n    const unsigned nbThreads = parameters->nbThreads;\n    const double splitPoint =\n        parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;\n    const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;\n    const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;\n    const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;\n    const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;\n    const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;\n    const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);\n    const unsigned kIterations =\n        (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);\n    const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;\n    const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;\n    const unsigned shrinkDict = 0;\n    /* Local variables */\n    const int displayLevel = parameters->zParams.notificationLevel;\n    unsigned iteration = 1;\n    unsigned d;\n    unsigned k;\n    COVER_best_t best;\n    POOL_ctx *pool = NULL;\n    int warned = 0;\n    /* Checks */\n    if (splitPoint <= 0 || splitPoint > 1) {\n      LOCALDISPLAYLEVEL(displayLevel, 1, \"Incorrect splitPoint\\n\");\n      return ERROR(parameter_outOfBound);\n    }\n    if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {\n      LOCALDISPLAYLEVEL(displayLevel, 1, \"Incorrect accel\\n\");\n      return ERROR(parameter_outOfBound);\n    }\n    if (kMinK < kMaxD || kMaxK < kMinK) {\n      LOCALDISPLAYLEVEL(displayLevel, 1, \"Incorrect k\\n\");\n      return ERROR(parameter_outOfBound);\n    }\n    if (nbSamples == 0) {\n      LOCALDISPLAYLEVEL(displayLevel, 1, \"FASTCOVER must have at least one input file\\n\");\n      return ERROR(srcSize_wrong);\n    }\n    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {\n      LOCALDISPLAYLEVEL(displayLevel, 1, \"dictBufferCapacity must be at least %u\\n\",\n                   ZDICT_DICTSIZE_MIN);\n      return ERROR(dstSize_tooSmall);\n    }\n    if (nbThreads > 1) {\n      pool = POOL_create(nbThreads, 1);\n      if (!pool) {\n        return ERROR(memory_allocation);\n      }\n    }\n    /* Initialization */\n    COVER_best_init(&best);\n    memset(&coverParams, 0 , sizeof(coverParams));\n    FASTCOVER_convertToCoverParams(*parameters, &coverParams);\n    accelParams = FASTCOVER_defaultAccelParameters[accel];\n    /* Turn down global display level to clean up display at level 2 and below */\n    g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;\n    /* Loop through d first because each new value needs a new context */\n    LOCALDISPLAYLEVEL(displayLevel, 2, \"Trying %u different sets of parameters\\n\",\n                      kIterations);\n    for (d = kMinD; d <= kMaxD; d += 2) {\n      /* Initialize the context for this value of d */\n      FASTCOVER_ctx_t ctx;\n      LOCALDISPLAYLEVEL(displayLevel, 3, \"d=%u\\n\", d);\n      {\n        size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);\n        if (ZSTD_isError(initVal)) {\n          LOCALDISPLAYLEVEL(displayLevel, 1, \"Failed to initialize context\\n\");\n          COVER_best_destroy(&best);\n          POOL_free(pool);\n          return initVal;\n        }\n      }\n      if (!warned) {\n        COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);\n        warned = 1;\n      }\n      /* Loop through k reusing the same context */\n      for (k = kMinK; k <= kMaxK; k += kStepSize) {\n        /* Prepare the arguments */\n        FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(\n            sizeof(FASTCOVER_tryParameters_data_t));\n        LOCALDISPLAYLEVEL(displayLevel, 3, \"k=%u\\n\", k);\n        if (!data) {\n          LOCALDISPLAYLEVEL(displayLevel, 1, \"Failed to allocate parameters\\n\");\n          COVER_best_destroy(&best);\n          FASTCOVER_ctx_destroy(&ctx);\n          POOL_free(pool);\n          return ERROR(memory_allocation);\n        }\n        data->ctx = &ctx;\n        data->best = &best;\n        data->dictBufferCapacity = dictBufferCapacity;\n        data->parameters = coverParams;\n        data->parameters.k = k;\n        data->parameters.d = d;\n        data->parameters.splitPoint = splitPoint;\n        data->parameters.steps = kSteps;\n        data->parameters.shrinkDict = shrinkDict;\n        data->parameters.zParams.notificationLevel = g_displayLevel;\n        /* Check the parameters */\n        if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,\n                                       data->ctx->f, accel)) {\n          DISPLAYLEVEL(1, \"FASTCOVER parameters incorrect\\n\");\n          free(data);\n          continue;\n        }\n        /* Call the function and pass ownership of data to it */\n        COVER_best_start(&best);\n        if (pool) {\n          POOL_add(pool, &FASTCOVER_tryParameters, data);\n        } else {\n          FASTCOVER_tryParameters(data);\n        }\n        /* Print status */\n        LOCALDISPLAYUPDATE(displayLevel, 2, \"\\r%u%%       \",\n                           (unsigned)((iteration * 100) / kIterations));\n        ++iteration;\n      }\n      COVER_best_wait(&best);\n      FASTCOVER_ctx_destroy(&ctx);\n    }\n    LOCALDISPLAYLEVEL(displayLevel, 2, \"\\r%79s\\r\", \"\");\n    /* Fill the output buffer and parameters with output of the best parameters */\n    {\n      const size_t dictSize = best.dictSize;\n      if (ZSTD_isError(best.compressedSize)) {\n        const size_t compressedSize = best.compressedSize;\n        COVER_best_destroy(&best);\n        POOL_free(pool);\n        return compressedSize;\n      }\n      FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);\n      memcpy(dictBuffer, best.dict, dictSize);\n      COVER_best_destroy(&best);\n      POOL_free(pool);\n      return dictSize;\n    }\n\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/zdict.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/*-**************************************\n*  Tuning parameters\n****************************************/\n#define MINRATIO 4   /* minimum nb of apparition to be selected in dictionary */\n#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)\n#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)\n\n\n/*-**************************************\n*  Compiler Options\n****************************************/\n/* Unix Large Files support (>4GB) */\n#define _FILE_OFFSET_BITS 64\n#if (defined(__sun__) && (!defined(__LP64__)))   /* Sun Solaris 32-bits requires specific definitions */\n#  define _LARGEFILE_SOURCE\n#elif ! defined(__LP64__)                        /* No point defining Large file for 64 bit */\n#  define _LARGEFILE64_SOURCE\n#endif\n\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include <stdlib.h>        /* malloc, free */\n#include <string.h>        /* memset */\n#include <stdio.h>         /* fprintf, fopen, ftello64 */\n#include <time.h>          /* clock */\n\n#include \"mem.h\"           /* read */\n#include \"fse.h\"           /* FSE_normalizeCount, FSE_writeNCount */\n#define HUF_STATIC_LINKING_ONLY\n#include \"huf.h\"           /* HUF_buildCTable, HUF_writeCTable */\n#include \"zstd_internal.h\" /* includes zstd.h */\n#include \"xxhash.h\"        /* XXH64 */\n#include \"divsufsort.h\"\n#ifndef ZDICT_STATIC_LINKING_ONLY\n#  define ZDICT_STATIC_LINKING_ONLY\n#endif\n#include \"zdict.h\"\n#include \"compress/zstd_compress_internal.h\" /* ZSTD_loadCEntropy() */\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define DICTLISTSIZE_DEFAULT 10000\n\n#define NOISELENGTH 32\n\nstatic const int g_compressionLevel_default = 3;\nstatic const U32 g_selectivity_default = 9;\n\n\n/*-*************************************\n*  Console display\n***************************************/\n#define DISPLAY(...)         { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }\n#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); }    /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */\n\nstatic clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }\n\nstatic void ZDICT_printHex(const void* ptr, size_t length)\n{\n    const BYTE* const b = (const BYTE*)ptr;\n    size_t u;\n    for (u=0; u<length; u++) {\n        BYTE c = b[u];\n        if (c<32 || c>126) c = '.';   /* non-printable char */\n        DISPLAY(\"%c\", c);\n    }\n}\n\n\n/*-********************************************************\n*  Helper functions\n**********************************************************/\nunsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }\n\nconst char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n\nunsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)\n{\n    if (dictSize < 8) return 0;\n    if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;\n    return MEM_readLE32((const char*)dictBuffer + 4);\n}\n\nsize_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)\n{\n    size_t headerSize;\n    if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted);\n\n    {   unsigned offcodeMaxValue = MaxOff;\n        ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));\n        U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE);\n        short* offcodeNCount = (short*)malloc((MaxOff+1)*sizeof(short));\n        if (!bs || !wksp || !offcodeNCount) {\n            headerSize = ERROR(memory_allocation);\n        } else {\n            ZSTD_reset_compressedBlockState(bs);\n            headerSize = ZSTD_loadCEntropy(bs, wksp, offcodeNCount, &offcodeMaxValue, dictBuffer, dictSize);\n        }\n\n        free(bs);\n        free(wksp);\n        free(offcodeNCount);\n    }\n\n    return headerSize;\n}\n\n/*-********************************************************\n*  Dictionary training functions\n**********************************************************/\nstatic unsigned ZDICT_NbCommonBytes (size_t val)\n{\n    if (MEM_isLittleEndian()) {\n        if (MEM_64bits()) {\n#       if defined(_MSC_VER) && defined(_WIN64)\n            unsigned long r = 0;\n            _BitScanForward64( &r, (U64)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_ctzll((U64)val) >> 3);\n#       else\n            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };\n            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];\n#       endif\n        } else { /* 32 bits */\n#       if defined(_MSC_VER)\n            unsigned long r=0;\n            _BitScanForward( &r, (U32)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_ctz((U32)val) >> 3);\n#       else\n            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };\n            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];\n#       endif\n        }\n    } else {  /* Big Endian CPU */\n        if (MEM_64bits()) {\n#       if defined(_MSC_VER) && defined(_WIN64)\n            unsigned long r = 0;\n            _BitScanReverse64( &r, val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_clzll(val) >> 3);\n#       else\n            unsigned r;\n            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */\n            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }\n            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n            r += (!val);\n            return r;\n#       endif\n        } else { /* 32 bits */\n#       if defined(_MSC_VER)\n            unsigned long r = 0;\n            _BitScanReverse( &r, (unsigned long)val );\n            return (unsigned)(r>>3);\n#       elif defined(__GNUC__) && (__GNUC__ >= 3)\n            return (__builtin_clz((U32)val) >> 3);\n#       else\n            unsigned r;\n            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }\n            r += (!val);\n            return r;\n#       endif\n    }   }\n}\n\n\n/*! ZDICT_count() :\n    Count the nb of common bytes between 2 pointers.\n    Note : this function presumes end of buffer followed by noisy guard band.\n*/\nstatic size_t ZDICT_count(const void* pIn, const void* pMatch)\n{\n    const char* const pStart = (const char*)pIn;\n    for (;;) {\n        size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);\n        if (!diff) {\n            pIn = (const char*)pIn+sizeof(size_t);\n            pMatch = (const char*)pMatch+sizeof(size_t);\n            continue;\n        }\n        pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);\n        return (size_t)((const char*)pIn - pStart);\n    }\n}\n\n\ntypedef struct {\n    U32 pos;\n    U32 length;\n    U32 savings;\n} dictItem;\n\nstatic void ZDICT_initDictItem(dictItem* d)\n{\n    d->pos = 1;\n    d->length = 0;\n    d->savings = (U32)(-1);\n}\n\n\n#define LLIMIT 64          /* heuristic determined experimentally */\n#define MINMATCHLENGTH 7   /* heuristic determined experimentally */\nstatic dictItem ZDICT_analyzePos(\n                       BYTE* doneMarks,\n                       const int* suffix, U32 start,\n                       const void* buffer, U32 minRatio, U32 notificationLevel)\n{\n    U32 lengthList[LLIMIT] = {0};\n    U32 cumulLength[LLIMIT] = {0};\n    U32 savings[LLIMIT] = {0};\n    const BYTE* b = (const BYTE*)buffer;\n    size_t maxLength = LLIMIT;\n    size_t pos = suffix[start];\n    U32 end = start;\n    dictItem solution;\n\n    /* init */\n    memset(&solution, 0, sizeof(solution));\n    doneMarks[pos] = 1;\n\n    /* trivial repetition cases */\n    if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))\n       ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))\n       ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {\n        /* skip and mark segment */\n        U16 const pattern16 = MEM_read16(b+pos+4);\n        U32 u, patternEnd = 6;\n        while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;\n        if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;\n        for (u=1; u<patternEnd; u++)\n            doneMarks[pos+u] = 1;\n        return solution;\n    }\n\n    /* look forward */\n    {   size_t length;\n        do {\n            end++;\n            length = ZDICT_count(b + pos, b + suffix[end]);\n        } while (length >= MINMATCHLENGTH);\n    }\n\n    /* look backward */\n    {   size_t length;\n        do {\n            length = ZDICT_count(b + pos, b + *(suffix+start-1));\n            if (length >=MINMATCHLENGTH) start--;\n        } while(length >= MINMATCHLENGTH);\n    }\n\n    /* exit if not found a minimum nb of repetitions */\n    if (end-start < minRatio) {\n        U32 idx;\n        for(idx=start; idx<end; idx++)\n            doneMarks[suffix[idx]] = 1;\n        return solution;\n    }\n\n    {   int i;\n        U32 mml;\n        U32 refinedStart = start;\n        U32 refinedEnd = end;\n\n        DISPLAYLEVEL(4, \"\\n\");\n        DISPLAYLEVEL(4, \"found %3u matches of length >= %i at pos %7u  \", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);\n        DISPLAYLEVEL(4, \"\\n\");\n\n        for (mml = MINMATCHLENGTH ; ; mml++) {\n            BYTE currentChar = 0;\n            U32 currentCount = 0;\n            U32 currentID = refinedStart;\n            U32 id;\n            U32 selectedCount = 0;\n            U32 selectedID = currentID;\n            for (id =refinedStart; id < refinedEnd; id++) {\n                if (b[suffix[id] + mml] != currentChar) {\n                    if (currentCount > selectedCount) {\n                        selectedCount = currentCount;\n                        selectedID = currentID;\n                    }\n                    currentID = id;\n                    currentChar = b[ suffix[id] + mml];\n                    currentCount = 0;\n                }\n                currentCount ++;\n            }\n            if (currentCount > selectedCount) {  /* for last */\n                selectedCount = currentCount;\n                selectedID = currentID;\n            }\n\n            if (selectedCount < minRatio)\n                break;\n            refinedStart = selectedID;\n            refinedEnd = refinedStart + selectedCount;\n        }\n\n        /* evaluate gain based on new dict */\n        start = refinedStart;\n        pos = suffix[refinedStart];\n        end = start;\n        memset(lengthList, 0, sizeof(lengthList));\n\n        /* look forward */\n        {   size_t length;\n            do {\n                end++;\n                length = ZDICT_count(b + pos, b + suffix[end]);\n                if (length >= LLIMIT) length = LLIMIT-1;\n                lengthList[length]++;\n            } while (length >=MINMATCHLENGTH);\n        }\n\n        /* look backward */\n        {   size_t length = MINMATCHLENGTH;\n            while ((length >= MINMATCHLENGTH) & (start > 0)) {\n                length = ZDICT_count(b + pos, b + suffix[start - 1]);\n                if (length >= LLIMIT) length = LLIMIT - 1;\n                lengthList[length]++;\n                if (length >= MINMATCHLENGTH) start--;\n            }\n        }\n\n        /* largest useful length */\n        memset(cumulLength, 0, sizeof(cumulLength));\n        cumulLength[maxLength-1] = lengthList[maxLength-1];\n        for (i=(int)(maxLength-2); i>=0; i--)\n            cumulLength[i] = cumulLength[i+1] + lengthList[i];\n\n        for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;\n        maxLength = i;\n\n        /* reduce maxLength in case of final into repetitive data */\n        {   U32 l = (U32)maxLength;\n            BYTE const c = b[pos + maxLength-1];\n            while (b[pos+l-2]==c) l--;\n            maxLength = l;\n        }\n        if (maxLength < MINMATCHLENGTH) return solution;   /* skip : no long-enough solution */\n\n        /* calculate savings */\n        savings[5] = 0;\n        for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)\n            savings[i] = savings[i-1] + (lengthList[i] * (i-3));\n\n        DISPLAYLEVEL(4, \"Selected dict at position %u, of length %u : saves %u (ratio: %.2f)  \\n\",\n                     (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);\n\n        solution.pos = (U32)pos;\n        solution.length = (U32)maxLength;\n        solution.savings = savings[maxLength];\n\n        /* mark positions done */\n        {   U32 id;\n            for (id=start; id<end; id++) {\n                U32 p, pEnd, length;\n                U32 const testedPos = suffix[id];\n                if (testedPos == pos)\n                    length = solution.length;\n                else {\n                    length = (U32)ZDICT_count(b+pos, b+testedPos);\n                    if (length > solution.length) length = solution.length;\n                }\n                pEnd = (U32)(testedPos + length);\n                for (p=testedPos; p<pEnd; p++)\n                    doneMarks[p] = 1;\n    }   }   }\n\n    return solution;\n}\n\n\nstatic int isIncluded(const void* in, const void* container, size_t length)\n{\n    const char* const ip = (const char*) in;\n    const char* const into = (const char*) container;\n    size_t u;\n\n    for (u=0; u<length; u++) {  /* works because end of buffer is a noisy guard band */\n        if (ip[u] != into[u]) break;\n    }\n\n    return u==length;\n}\n\n/*! ZDICT_tryMerge() :\n    check if dictItem can be merged, do it if possible\n    @return : id of destination elt, 0 if not merged\n*/\nstatic U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)\n{\n    const U32 tableSize = table->pos;\n    const U32 eltEnd = elt.pos + elt.length;\n    const char* const buf = (const char*) buffer;\n\n    /* tail overlap */\n    U32 u; for (u=1; u<tableSize; u++) {\n        if (u==eltNbToSkip) continue;\n        if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) {  /* overlap, existing > new */\n            /* append */\n            U32 const addedLength = table[u].pos - elt.pos;\n            table[u].length += addedLength;\n            table[u].pos = elt.pos;\n            table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */\n            table[u].savings += elt.length / 8;    /* rough approx bonus */\n            elt = table[u];\n            /* sort : improve rank */\n            while ((u>1) && (table[u-1].savings < elt.savings))\n            table[u] = table[u-1], u--;\n            table[u] = elt;\n            return u;\n    }   }\n\n    /* front overlap */\n    for (u=1; u<tableSize; u++) {\n        if (u==eltNbToSkip) continue;\n\n        if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) {  /* overlap, existing < new */\n            /* append */\n            int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);\n            table[u].savings += elt.length / 8;    /* rough approx bonus */\n            if (addedLength > 0) {   /* otherwise, elt fully included into existing */\n                table[u].length += addedLength;\n                table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */\n            }\n            /* sort : improve rank */\n            elt = table[u];\n            while ((u>1) && (table[u-1].savings < elt.savings))\n                table[u] = table[u-1], u--;\n            table[u] = elt;\n            return u;\n        }\n\n        if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {\n            if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {\n                size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );\n                table[u].pos = elt.pos;\n                table[u].savings += (U32)(elt.savings * addedLength / elt.length);\n                table[u].length = MIN(elt.length, table[u].length + 1);\n                return u;\n            }\n        }\n    }\n\n    return 0;\n}\n\n\nstatic void ZDICT_removeDictItem(dictItem* table, U32 id)\n{\n    /* convention : table[0].pos stores nb of elts */\n    U32 const max = table[0].pos;\n    U32 u;\n    if (!id) return;   /* protection, should never happen */\n    for (u=id; u<max-1; u++)\n        table[u] = table[u+1];\n    table->pos--;\n}\n\n\nstatic void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)\n{\n    /* merge if possible */\n    U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);\n    if (mergeId) {\n        U32 newMerge = 1;\n        while (newMerge) {\n            newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);\n            if (newMerge) ZDICT_removeDictItem(table, mergeId);\n            mergeId = newMerge;\n        }\n        return;\n    }\n\n    /* insert */\n    {   U32 current;\n        U32 nextElt = table->pos;\n        if (nextElt >= maxSize) nextElt = maxSize-1;\n        current = nextElt-1;\n        while (table[current].savings < elt.savings) {\n            table[current+1] = table[current];\n            current--;\n        }\n        table[current+1] = elt;\n        table->pos = nextElt+1;\n    }\n}\n\n\nstatic U32 ZDICT_dictSize(const dictItem* dictList)\n{\n    U32 u, dictSize = 0;\n    for (u=1; u<dictList[0].pos; u++)\n        dictSize += dictList[u].length;\n    return dictSize;\n}\n\n\nstatic size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,\n                            const void* const buffer, size_t bufferSize,   /* buffer must end with noisy guard band */\n                            const size_t* fileSizes, unsigned nbFiles,\n                            unsigned minRatio, U32 notificationLevel)\n{\n    int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));\n    int* const suffix = suffix0+1;\n    U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));\n    BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks));   /* +16 for overflow security */\n    U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));\n    size_t result = 0;\n    clock_t displayClock = 0;\n    clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;\n\n#   define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \\\n            if (ZDICT_clockSpan(displayClock) > refreshRate)  \\\n            { displayClock = clock(); DISPLAY(__VA_ARGS__); \\\n            if (notificationLevel>=4) fflush(stderr); } }\n\n    /* init */\n    DISPLAYLEVEL(2, \"\\r%70s\\r\", \"\");   /* clean display line */\n    if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {\n        result = ERROR(memory_allocation);\n        goto _cleanup;\n    }\n    if (minRatio < MINRATIO) minRatio = MINRATIO;\n    memset(doneMarks, 0, bufferSize+16);\n\n    /* limit sample set size (divsufsort limitation)*/\n    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, \"sample set too large : reduced to %u MB ...\\n\", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));\n    while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];\n\n    /* sort */\n    DISPLAYLEVEL(2, \"sorting %u files of total size %u MB ...\\n\", nbFiles, (unsigned)(bufferSize>>20));\n    {   int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);\n        if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }\n    }\n    suffix[bufferSize] = (int)bufferSize;   /* leads into noise */\n    suffix0[0] = (int)bufferSize;           /* leads into noise */\n    /* build reverse suffix sort */\n    {   size_t pos;\n        for (pos=0; pos < bufferSize; pos++)\n            reverseSuffix[suffix[pos]] = (U32)pos;\n        /* note filePos tracks borders between samples.\n           It's not used at this stage, but planned to become useful in a later update */\n        filePos[0] = 0;\n        for (pos=1; pos<nbFiles; pos++)\n            filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);\n    }\n\n    DISPLAYLEVEL(2, \"finding patterns ... \\n\");\n    DISPLAYLEVEL(3, \"minimum ratio : %u \\n\", minRatio);\n\n    {   U32 cursor; for (cursor=0; cursor < bufferSize; ) {\n            dictItem solution;\n            if (doneMarks[cursor]) { cursor++; continue; }\n            solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);\n            if (solution.length==0) { cursor++; continue; }\n            ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);\n            cursor += solution.length;\n            DISPLAYUPDATE(2, \"\\r%4.2f %% \\r\", (double)cursor / bufferSize * 100);\n    }   }\n\n_cleanup:\n    free(suffix0);\n    free(reverseSuffix);\n    free(doneMarks);\n    free(filePos);\n    return result;\n}\n\n\nstatic void ZDICT_fillNoise(void* buffer, size_t length)\n{\n    unsigned const prime1 = 2654435761U;\n    unsigned const prime2 = 2246822519U;\n    unsigned acc = prime1;\n    size_t p=0;\n    for (p=0; p<length; p++) {\n        acc *= prime2;\n        ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);\n    }\n}\n\n\ntypedef struct\n{\n    ZSTD_CDict* dict;    /* dictionary */\n    ZSTD_CCtx* zc;     /* working context */\n    void* workPlace;   /* must be ZSTD_BLOCKSIZE_MAX allocated */\n} EStats_ress_t;\n\n#define MAXREPOFFSET 1024\n\nstatic void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,\n                              unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,\n                              const void* src, size_t srcSize,\n                              U32 notificationLevel)\n{\n    size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog);\n    size_t cSize;\n\n    if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */\n    {   size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);\n        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, \"warning : ZSTD_compressBegin_usingCDict failed \\n\"); return; }\n\n    }\n    cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);\n    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, \"warning : could not compress sample size %u \\n\", (unsigned)srcSize); return; }\n\n    if (cSize) {  /* if == 0; block is not compressible */\n        const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);\n\n        /* literals stats */\n        {   const BYTE* bytePtr;\n            for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)\n                countLit[*bytePtr]++;\n        }\n\n        /* seqStats */\n        {   U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);\n            ZSTD_seqToCodes(seqStorePtr);\n\n            {   const BYTE* codePtr = seqStorePtr->ofCode;\n                U32 u;\n                for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;\n            }\n\n            {   const BYTE* codePtr = seqStorePtr->mlCode;\n                U32 u;\n                for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;\n            }\n\n            {   const BYTE* codePtr = seqStorePtr->llCode;\n                U32 u;\n                for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;\n            }\n\n            if (nbSeq >= 2) { /* rep offsets */\n                const seqDef* const seq = seqStorePtr->sequencesStart;\n                U32 offset1 = seq[0].offset - 3;\n                U32 offset2 = seq[1].offset - 3;\n                if (offset1 >= MAXREPOFFSET) offset1 = 0;\n                if (offset2 >= MAXREPOFFSET) offset2 = 0;\n                repOffsets[offset1] += 3;\n                repOffsets[offset2] += 1;\n    }   }   }\n}\n\nstatic size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)\n{\n    size_t total=0;\n    unsigned u;\n    for (u=0; u<nbFiles; u++) total += fileSizes[u];\n    return total;\n}\n\ntypedef struct { U32 offset; U32 count; } offsetCount_t;\n\nstatic void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)\n{\n    U32 u;\n    table[ZSTD_REP_NUM].offset = val;\n    table[ZSTD_REP_NUM].count = count;\n    for (u=ZSTD_REP_NUM; u>0; u--) {\n        offsetCount_t tmp;\n        if (table[u-1].count >= table[u].count) break;\n        tmp = table[u-1];\n        table[u-1] = table[u];\n        table[u] = tmp;\n    }\n}\n\n/* ZDICT_flatLit() :\n * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.\n * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.\n */\nstatic void ZDICT_flatLit(unsigned* countLit)\n{\n    int u;\n    for (u=1; u<256; u++) countLit[u] = 2;\n    countLit[0]   = 4;\n    countLit[253] = 1;\n    countLit[254] = 1;\n}\n\n#define OFFCODE_MAX 30  /* only applicable to first block */\nstatic size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,\n                                   unsigned compressionLevel,\n                             const void*  srcBuffer, const size_t* fileSizes, unsigned nbFiles,\n                             const void* dictBuffer, size_t  dictBufferSize,\n                                   unsigned notificationLevel)\n{\n    unsigned countLit[256];\n    HUF_CREATE_STATIC_CTABLE(hufTable, 255);\n    unsigned offcodeCount[OFFCODE_MAX+1];\n    short offcodeNCount[OFFCODE_MAX+1];\n    U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));\n    unsigned matchLengthCount[MaxML+1];\n    short matchLengthNCount[MaxML+1];\n    unsigned litLengthCount[MaxLL+1];\n    short litLengthNCount[MaxLL+1];\n    U32 repOffset[MAXREPOFFSET];\n    offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];\n    EStats_ress_t esr = { NULL, NULL, NULL };\n    ZSTD_parameters params;\n    U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;\n    size_t pos = 0, errorCode;\n    size_t eSize = 0;\n    size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);\n    size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);\n    BYTE* dstPtr = (BYTE*)dstBuffer;\n\n    /* init */\n    DEBUGLOG(4, \"ZDICT_analyzeEntropy\");\n    if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; }   /* too large dictionary */\n    for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */\n    for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;\n    for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;\n    for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;\n    memset(repOffset, 0, sizeof(repOffset));\n    repOffset[1] = repOffset[4] = repOffset[8] = 1;\n    memset(bestRepOffset, 0, sizeof(bestRepOffset));\n    if (compressionLevel==0) compressionLevel = g_compressionLevel_default;\n    params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);\n\n    esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);\n    esr.zc = ZSTD_createCCtx();\n    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);\n    if (!esr.dict || !esr.zc || !esr.workPlace) {\n        eSize = ERROR(memory_allocation);\n        DISPLAYLEVEL(1, \"Not enough memory \\n\");\n        goto _cleanup;\n    }\n\n    /* collect stats on all samples */\n    for (u=0; u<nbFiles; u++) {\n        ZDICT_countEStats(esr, &params,\n                          countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,\n                         (const char*)srcBuffer + pos, fileSizes[u],\n                          notificationLevel);\n        pos += fileSizes[u];\n    }\n\n    /* analyze, build stats, starting with literals */\n    {   size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);\n        if (HUF_isError(maxNbBits)) {\n            eSize = maxNbBits;\n            DISPLAYLEVEL(1, \" HUF_buildCTable error \\n\");\n            goto _cleanup;\n        }\n        if (maxNbBits==8) {  /* not compressible : will fail on HUF_writeCTable() */\n            DISPLAYLEVEL(2, \"warning : pathological dataset : literals are not compressible : samples are noisy or too regular \\n\");\n            ZDICT_flatLit(countLit);  /* replace distribution by a fake \"mostly flat but still compressible\" distribution, that HUF_writeCTable() can encode */\n            maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);\n            assert(maxNbBits==9);\n        }\n        huffLog = (U32)maxNbBits;\n    }\n\n    /* looking for most common first offsets */\n    {   U32 offset;\n        for (offset=1; offset<MAXREPOFFSET; offset++)\n            ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);\n    }\n    /* note : the result of this phase should be used to better appreciate the impact on statistics */\n\n    total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];\n    errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);\n    if (FSE_isError(errorCode)) {\n        eSize = errorCode;\n        DISPLAYLEVEL(1, \"FSE_normalizeCount error with offcodeCount \\n\");\n        goto _cleanup;\n    }\n    Offlog = (U32)errorCode;\n\n    total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];\n    errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);\n    if (FSE_isError(errorCode)) {\n        eSize = errorCode;\n        DISPLAYLEVEL(1, \"FSE_normalizeCount error with matchLengthCount \\n\");\n        goto _cleanup;\n    }\n    mlLog = (U32)errorCode;\n\n    total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];\n    errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);\n    if (FSE_isError(errorCode)) {\n        eSize = errorCode;\n        DISPLAYLEVEL(1, \"FSE_normalizeCount error with litLengthCount \\n\");\n        goto _cleanup;\n    }\n    llLog = (U32)errorCode;\n\n    /* write result to buffer */\n    {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);\n        if (HUF_isError(hhSize)) {\n            eSize = hhSize;\n            DISPLAYLEVEL(1, \"HUF_writeCTable error \\n\");\n            goto _cleanup;\n        }\n        dstPtr += hhSize;\n        maxDstSize -= hhSize;\n        eSize += hhSize;\n    }\n\n    {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);\n        if (FSE_isError(ohSize)) {\n            eSize = ohSize;\n            DISPLAYLEVEL(1, \"FSE_writeNCount error with offcodeNCount \\n\");\n            goto _cleanup;\n        }\n        dstPtr += ohSize;\n        maxDstSize -= ohSize;\n        eSize += ohSize;\n    }\n\n    {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);\n        if (FSE_isError(mhSize)) {\n            eSize = mhSize;\n            DISPLAYLEVEL(1, \"FSE_writeNCount error with matchLengthNCount \\n\");\n            goto _cleanup;\n        }\n        dstPtr += mhSize;\n        maxDstSize -= mhSize;\n        eSize += mhSize;\n    }\n\n    {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);\n        if (FSE_isError(lhSize)) {\n            eSize = lhSize;\n            DISPLAYLEVEL(1, \"FSE_writeNCount error with litlengthNCount \\n\");\n            goto _cleanup;\n        }\n        dstPtr += lhSize;\n        maxDstSize -= lhSize;\n        eSize += lhSize;\n    }\n\n    if (maxDstSize<12) {\n        eSize = ERROR(dstSize_tooSmall);\n        DISPLAYLEVEL(1, \"not enough space to write RepOffsets \\n\");\n        goto _cleanup;\n    }\n# if 0\n    MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);\n    MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);\n    MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);\n#else\n    /* at this stage, we don't use the result of \"most common first offset\",\n       as the impact of statistics is not properly evaluated */\n    MEM_writeLE32(dstPtr+0, repStartValue[0]);\n    MEM_writeLE32(dstPtr+4, repStartValue[1]);\n    MEM_writeLE32(dstPtr+8, repStartValue[2]);\n#endif\n    eSize += 12;\n\n_cleanup:\n    ZSTD_freeCDict(esr.dict);\n    ZSTD_freeCCtx(esr.zc);\n    free(esr.workPlace);\n\n    return eSize;\n}\n\n\n\nsize_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,\n                          const void* customDictContent, size_t dictContentSize,\n                          const void* samplesBuffer, const size_t* samplesSizes,\n                          unsigned nbSamples, ZDICT_params_t params)\n{\n    size_t hSize;\n#define HBUFFSIZE 256   /* should prove large enough for all entropy headers */\n    BYTE header[HBUFFSIZE];\n    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;\n    U32 const notificationLevel = params.notificationLevel;\n\n    /* check conditions */\n    DEBUGLOG(4, \"ZDICT_finalizeDictionary\");\n    if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);\n    if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);\n    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);\n\n    /* dictionary header */\n    MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);\n    {   U64 const randomID = XXH64(customDictContent, dictContentSize, 0);\n        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;\n        U32 const dictID = params.dictID ? params.dictID : compliantID;\n        MEM_writeLE32(header+4, dictID);\n    }\n    hSize = 8;\n\n    /* entropy tables */\n    DISPLAYLEVEL(2, \"\\r%70s\\r\", \"\");   /* clean display line */\n    DISPLAYLEVEL(2, \"statistics ... \\n\");\n    {   size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,\n                                  compressionLevel,\n                                  samplesBuffer, samplesSizes, nbSamples,\n                                  customDictContent, dictContentSize,\n                                  notificationLevel);\n        if (ZDICT_isError(eSize)) return eSize;\n        hSize += eSize;\n    }\n\n    /* copy elements in final buffer ; note : src and dst buffer can overlap */\n    if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;\n    {   size_t const dictSize = hSize + dictContentSize;\n        char* dictEnd = (char*)dictBuffer + dictSize;\n        memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);\n        memcpy(dictBuffer, header, hSize);\n        return dictSize;\n    }\n}\n\n\nstatic size_t ZDICT_addEntropyTablesFromBuffer_advanced(\n        void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,\n        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n        ZDICT_params_t params)\n{\n    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;\n    U32 const notificationLevel = params.notificationLevel;\n    size_t hSize = 8;\n\n    /* calculate entropy tables */\n    DISPLAYLEVEL(2, \"\\r%70s\\r\", \"\");   /* clean display line */\n    DISPLAYLEVEL(2, \"statistics ... \\n\");\n    {   size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,\n                                  compressionLevel,\n                                  samplesBuffer, samplesSizes, nbSamples,\n                                  (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,\n                                  notificationLevel);\n        if (ZDICT_isError(eSize)) return eSize;\n        hSize += eSize;\n    }\n\n    /* add dictionary header (after entropy tables) */\n    MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);\n    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);\n        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;\n        U32 const dictID = params.dictID ? params.dictID : compliantID;\n        MEM_writeLE32((char*)dictBuffer+4, dictID);\n    }\n\n    if (hSize + dictContentSize < dictBufferCapacity)\n        memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);\n    return MIN(dictBufferCapacity, hSize+dictContentSize);\n}\n\n/* Hidden declaration for dbio.c */\nsize_t ZDICT_trainFromBuffer_unsafe_legacy(\n                            void* dictBuffer, size_t maxDictSize,\n                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n                            ZDICT_legacy_params_t params);\n/*! ZDICT_trainFromBuffer_unsafe_legacy() :\n*   Warning : `samplesBuffer` must be followed by noisy guard band.\n*   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()\n*/\nsize_t ZDICT_trainFromBuffer_unsafe_legacy(\n                            void* dictBuffer, size_t maxDictSize,\n                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n                            ZDICT_legacy_params_t params)\n{\n    U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));\n    dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));\n    unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;\n    unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;\n    size_t const targetDictSize = maxDictSize;\n    size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);\n    size_t dictSize = 0;\n    U32 const notificationLevel = params.zParams.notificationLevel;\n\n    /* checks */\n    if (!dictList) return ERROR(memory_allocation);\n    if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); }   /* requested dictionary size is too small */\n    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* not enough source to create dictionary */\n\n    /* init */\n    ZDICT_initDictItem(dictList);\n\n    /* build dictionary */\n    ZDICT_trainBuffer_legacy(dictList, dictListSize,\n                       samplesBuffer, samplesBuffSize,\n                       samplesSizes, nbSamples,\n                       minRep, notificationLevel);\n\n    /* display best matches */\n    if (params.zParams.notificationLevel>= 3) {\n        unsigned const nb = MIN(25, dictList[0].pos);\n        unsigned const dictContentSize = ZDICT_dictSize(dictList);\n        unsigned u;\n        DISPLAYLEVEL(3, \"\\n %u segments found, of total size %u \\n\", (unsigned)dictList[0].pos-1, dictContentSize);\n        DISPLAYLEVEL(3, \"list %u best segments \\n\", nb-1);\n        for (u=1; u<nb; u++) {\n            unsigned const pos = dictList[u].pos;\n            unsigned const length = dictList[u].length;\n            U32 const printedLength = MIN(40, length);\n            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {\n                free(dictList);\n                return ERROR(GENERIC);   /* should never happen */\n            }\n            DISPLAYLEVEL(3, \"%3u:%3u bytes at pos %8u, savings %7u bytes |\",\n                         u, length, pos, (unsigned)dictList[u].savings);\n            ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);\n            DISPLAYLEVEL(3, \"| \\n\");\n    }   }\n\n\n    /* create dictionary */\n    {   unsigned dictContentSize = ZDICT_dictSize(dictList);\n        if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* dictionary content too small */\n        if (dictContentSize < targetDictSize/4) {\n            DISPLAYLEVEL(2, \"!  warning : selected content significantly smaller than requested (%u < %u) \\n\", dictContentSize, (unsigned)maxDictSize);\n            if (samplesBuffSize < 10 * targetDictSize)\n                DISPLAYLEVEL(2, \"!  consider increasing the number of samples (total size : %u MB)\\n\", (unsigned)(samplesBuffSize>>20));\n            if (minRep > MINRATIO) {\n                DISPLAYLEVEL(2, \"!  consider increasing selectivity to produce larger dictionary (-s%u) \\n\", selectivity+1);\n                DISPLAYLEVEL(2, \"!  note : larger dictionaries are not necessarily better, test its efficiency on samples \\n\");\n            }\n        }\n\n        if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {\n            unsigned proposedSelectivity = selectivity-1;\n            while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }\n            DISPLAYLEVEL(2, \"!  note : calculated dictionary significantly larger than requested (%u > %u) \\n\", dictContentSize, (unsigned)maxDictSize);\n            DISPLAYLEVEL(2, \"!  consider increasing dictionary size, or produce denser dictionary (-s%u) \\n\", proposedSelectivity);\n            DISPLAYLEVEL(2, \"!  always test dictionary efficiency on real samples \\n\");\n        }\n\n        /* limit dictionary size */\n        {   U32 const max = dictList->pos;   /* convention : nb of useful elts within dictList */\n            U32 currentSize = 0;\n            U32 n; for (n=1; n<max; n++) {\n                currentSize += dictList[n].length;\n                if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }\n            }\n            dictList->pos = n;\n            dictContentSize = currentSize;\n        }\n\n        /* build dict content */\n        {   U32 u;\n            BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;\n            for (u=1; u<dictList->pos; u++) {\n                U32 l = dictList[u].length;\n                ptr -= l;\n                if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); }   /* should not happen */\n                memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);\n        }   }\n\n        dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,\n                                                             samplesBuffer, samplesSizes, nbSamples,\n                                                             params.zParams);\n    }\n\n    /* clean up */\n    free(dictList);\n    return dictSize;\n}\n\n\n/* ZDICT_trainFromBuffer_legacy() :\n * issue : samplesBuffer need to be followed by a noisy guard band.\n * work around : duplicate the buffer, and add the noise */\nsize_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,\n                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n                              ZDICT_legacy_params_t params)\n{\n    size_t result;\n    void* newBuff;\n    size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);\n    if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0;   /* not enough content => no dictionary */\n\n    newBuff = malloc(sBuffSize + NOISELENGTH);\n    if (!newBuff) return ERROR(memory_allocation);\n\n    memcpy(newBuff, samplesBuffer, sBuffSize);\n    ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH);   /* guard band, for end of buffer condition */\n\n    result =\n        ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,\n                                            samplesSizes, nbSamples, params);\n    free(newBuff);\n    return result;\n}\n\n\nsize_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,\n                             const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)\n{\n    ZDICT_fastCover_params_t params;\n    DEBUGLOG(3, \"ZDICT_trainFromBuffer\");\n    memset(&params, 0, sizeof(params));\n    params.d = 8;\n    params.steps = 4;\n    /* Default to level 6 since no compression level information is available */\n    params.zParams.compressionLevel = 3;\n#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)\n    params.zParams.notificationLevel = DEBUGLEVEL;\n#endif\n    return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,\n                                               samplesBuffer, samplesSizes, nbSamples,\n                                               &params);\n}\n\nsize_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,\n                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)\n{\n    ZDICT_params_t params;\n    memset(&params, 0, sizeof(params));\n    return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,\n                                                     samplesBuffer, samplesSizes, nbSamples,\n                                                     params);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/dictBuilder/zdict.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef DICTBUILDER_H_001\n#define DICTBUILDER_H_001\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*======  Dependencies  ======*/\n#include <stddef.h>  /* size_t */\n\n\n/* =====   ZDICTLIB_API : control library symbols visibility   ===== */\n#ifndef ZDICTLIB_VISIBILITY\n#  if defined(__GNUC__) && (__GNUC__ >= 4)\n#    define ZDICTLIB_VISIBILITY __attribute__ ((visibility (\"default\")))\n#  else\n#    define ZDICTLIB_VISIBILITY\n#  endif\n#endif\n#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)\n#  define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY\n#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)\n#  define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/\n#else\n#  define ZDICTLIB_API ZDICTLIB_VISIBILITY\n#endif\n\n\n/*! ZDICT_trainFromBuffer():\n *  Train a dictionary from an array of samples.\n *  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\n *  f=20, and accel=1.\n *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n *  The resulting dictionary will be saved into `dictBuffer`.\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *  Note:  Dictionary training will fail if there are not enough samples to construct a\n *         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\n *         If dictionary training fails, you should use zstd without a dictionary, as the dictionary\n *         would've been ineffective anyways. If you believe your samples would benefit from a dictionary\n *         please open an issue with details, and we can look into it.\n *  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\n *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n */\nZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,\n                                    const void* samplesBuffer,\n                                    const size_t* samplesSizes, unsigned nbSamples);\n\n\n/*======   Helper functions   ======*/\nZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize);  /**< extracts dictID; @return zero if error (not a valid dictionary) */\nZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize);  /* returns dict header size; returns a ZSTD error code on failure */\nZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);\nZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);\n\n\n\n#ifdef ZDICT_STATIC_LINKING_ONLY\n\n/* ====================================================================================\n * The definitions in this section are considered experimental.\n * They should never be used with a dynamic library, as they may change in the future.\n * They are provided for advanced usages.\n * Use them only in association with static linking.\n * ==================================================================================== */\n\ntypedef struct {\n    int      compressionLevel;   /* optimize for a specific zstd compression level; 0 means default */\n    unsigned notificationLevel;  /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */\n    unsigned dictID;             /* force dictID value; 0 means auto mode (32-bits random value) */\n} ZDICT_params_t;\n\n/*! ZDICT_cover_params_t:\n *  k and d are the only required parameters.\n *  For others, value 0 means default.\n */\ntypedef struct {\n    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */\n    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */\n    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */\n    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */\n    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */\n    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */\n    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */\n    ZDICT_params_t zParams;\n} ZDICT_cover_params_t;\n\ntypedef struct {\n    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */\n    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */\n    unsigned f;                  /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/\n    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */\n    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */\n    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */\n    unsigned accel;              /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */\n    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */\n    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */\n\n    ZDICT_params_t zParams;\n} ZDICT_fastCover_params_t;\n\n/*! ZDICT_trainFromBuffer_cover():\n *  Train a dictionary from an array of samples using the COVER algorithm.\n *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n *  The resulting dictionary will be saved into `dictBuffer`.\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *          See ZDICT_trainFromBuffer() for details on failure modes.\n *  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.\n *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n */\nZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(\n          void *dictBuffer, size_t dictBufferCapacity,\n    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,\n          ZDICT_cover_params_t parameters);\n\n/*! ZDICT_optimizeTrainFromBuffer_cover():\n * The same requirements as above hold for all the parameters except `parameters`.\n * This function tries many parameter combinations and picks the best parameters.\n * `*parameters` is filled with the best parameters found,\n * dictionary constructed with those parameters is stored in `dictBuffer`.\n *\n * All of the parameters d, k, steps are optional.\n * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n * if steps is zero it defaults to its default value.\n * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n *\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *          On success `*parameters` contains the parameters selected.\n *          See ZDICT_trainFromBuffer() for details on failure modes.\n * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.\n */\nZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(\n          void* dictBuffer, size_t dictBufferCapacity,\n    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n          ZDICT_cover_params_t* parameters);\n\n/*! ZDICT_trainFromBuffer_fastCover():\n *  Train a dictionary from an array of samples using a modified version of COVER algorithm.\n *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n *  d and k are required.\n *  All other parameters are optional, will use default values if not provided\n *  The resulting dictionary will be saved into `dictBuffer`.\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *          See ZDICT_trainFromBuffer() for details on failure modes.\n *  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.\n *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n */\nZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,\n                    size_t dictBufferCapacity, const void *samplesBuffer,\n                    const size_t *samplesSizes, unsigned nbSamples,\n                    ZDICT_fastCover_params_t parameters);\n\n/*! ZDICT_optimizeTrainFromBuffer_fastCover():\n * The same requirements as above hold for all the parameters except `parameters`.\n * This function tries many parameter combinations (specifically, k and d combinations)\n * and picks the best parameters. `*parameters` is filled with the best parameters found,\n * dictionary constructed with those parameters is stored in `dictBuffer`.\n * All of the parameters d, k, steps, f, and accel are optional.\n * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n * if steps is zero it defaults to its default value.\n * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n * If f is zero, default value of 20 is used.\n * If accel is zero, default value of 1 is used.\n *\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *          On success `*parameters` contains the parameters selected.\n *          See ZDICT_trainFromBuffer() for details on failure modes.\n * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.\n */\nZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,\n                    size_t dictBufferCapacity, const void* samplesBuffer,\n                    const size_t* samplesSizes, unsigned nbSamples,\n                    ZDICT_fastCover_params_t* parameters);\n\n/*! ZDICT_finalizeDictionary():\n * Given a custom content as a basis for dictionary, and a set of samples,\n * finalize dictionary by adding headers and statistics.\n *\n * Samples must be stored concatenated in a flat buffer `samplesBuffer`,\n * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.\n *\n * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.\n * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.\n *\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),\n *          or an error code, which can be tested by ZDICT_isError().\n * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.\n * Note 2: dictBuffer and dictContent can overlap\n */\n#define ZDICT_CONTENTSIZE_MIN 128\n#define ZDICT_DICTSIZE_MIN    256\nZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,\n                                const void* dictContent, size_t dictContentSize,\n                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,\n                                ZDICT_params_t parameters);\n\ntypedef struct {\n    unsigned selectivityLevel;   /* 0 means default; larger => select more => larger dictionary */\n    ZDICT_params_t zParams;\n} ZDICT_legacy_params_t;\n\n/*! ZDICT_trainFromBuffer_legacy():\n *  Train a dictionary from an array of samples.\n *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n *  The resulting dictionary will be saved into `dictBuffer`.\n * `parameters` is optional and can be provided with values set to 0 to mean \"default\".\n * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n *          or an error code, which can be tested with ZDICT_isError().\n *          See ZDICT_trainFromBuffer() for details on failure modes.\n *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n *  Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.\n */\nZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(\n    void *dictBuffer, size_t dictBufferCapacity,\n    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,\n    ZDICT_legacy_params_t parameters);\n\n/* Deprecation warnings */\n/* It is generally possible to disable deprecation warnings from compiler,\n   for example with -Wno-deprecated-declarations for gcc\n   or _CRT_SECURE_NO_WARNINGS in Visual.\n   Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */\n#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS\n#  define ZDICT_DEPRECATED(message) ZDICTLIB_API   /* disable deprecation warnings */\n#else\n#  define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */\n#    define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API\n#  elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)\n#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))\n#  elif (ZDICT_GCC_VERSION >= 301)\n#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))\n#  elif defined(_MSC_VER)\n#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))\n#  else\n#    pragma message(\"WARNING: You need to implement ZDICT_DEPRECATED for this compiler\")\n#    define ZDICT_DEPRECATED(message) ZDICTLIB_API\n#  endif\n#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */\n\nZDICT_DEPRECATED(\"use ZDICT_finalizeDictionary() instead\")\nsize_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,\n                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);\n\n\n#endif   /* ZDICT_STATIC_LINKING_ONLY */\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* DICTBUILDER_H_001 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/dll/example/README.md",
    "content": "ZSTD Windows binary package\n====================================\n\n#### The package contents\n\n- `zstd.exe`                  : Command Line Utility, supporting gzip-like arguments\n- `dll\\libzstd.dll`           : The ZSTD dynamic library (DLL)\n- `dll\\libzstd.lib`           : The import library of the ZSTD dynamic library (DLL) for Visual C++\n- `example\\`                  : The example of usage of the ZSTD library\n- `include\\`                  : Header files required by the ZSTD library\n- `static\\libzstd_static.lib` : The static ZSTD library (LIB)\n\n\n#### Usage of Command Line Interface\n\nCommand Line Interface (CLI) supports gzip-like arguments.\nBy default CLI takes an input file and compresses it to an output file:\n```\n    Usage: zstd [arg] [input] [output]\n```\nThe full list of commands for CLI can be obtained with `-h` or `-H`. The ratio can\nbe improved with commands from `-3` to `-16` but higher levels also have slower\ncompression. CLI includes in-memory compression benchmark module with compression\nlevels starting from `-b` and ending with `-e` with iteration time of `-i` seconds.\nCLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined\ninto `-b1e18i1`.\n\n\n#### The example of usage of static and dynamic ZSTD libraries with gcc/MinGW\n\nUse `cd example` and `make` to build `fullbench-dll` and `fullbench-lib`.\n`fullbench-dll` uses a dynamic ZSTD library from the `dll` directory.\n`fullbench-lib` uses a static ZSTD library from the `lib` directory.\n\n\n#### Using ZSTD DLL with gcc/MinGW\n\nThe header files from `include\\` and the dynamic library `dll\\libzstd.dll`\nare required to compile a project using gcc/MinGW.\nThe dynamic library has to be added to linking options.\nIt means that if a project that uses ZSTD consists of a single `test-dll.c`\nfile it should be linked with `dll\\libzstd.dll`. For example:\n```\n    gcc $(CFLAGS) -Iinclude\\ test-dll.c -o test-dll dll\\libzstd.dll\n```\nThe compiled executable will require ZSTD DLL which is available at `dll\\libzstd.dll`.\n\n\n#### The example of usage of static and dynamic ZSTD libraries with Visual C++\n\nOpen `example\\fullbench-dll.sln` to compile `fullbench-dll` that uses a\ndynamic ZSTD library from the `dll` directory. The solution works with Visual C++\n2010 or newer. When one will open the solution with Visual C++ newer than 2010\nthen the solution will upgraded to the current version.\n\n\n#### Using ZSTD DLL with Visual C++\n\nThe header files from `include\\` and the import library `dll\\libzstd.lib`\nare required to compile a project using Visual C++.\n\n1. The path to header files should be added to `Additional Include Directories` that can\n   be found in project properties `C/C++` then `General`.\n2. The import library has to be added to `Additional Dependencies` that can\n   be found in project properties `Linker` then `Input`.\n   If one will provide only the name `libzstd.lib` without a full path to the library\n   the directory has to be added to `Linker\\General\\Additional Library Directories`.\n\nThe compiled executable will require ZSTD DLL which is available at `dll\\libzstd.dll`.\n"
  },
  {
    "path": "src/third_party/zstd/lib/dll/example/build_package.bat",
    "content": "@ECHO OFF\r\nMKDIR bin\\dll bin\\static bin\\example bin\\include\r\nCOPY tests\\fullbench.c bin\\example\\\r\nCOPY programs\\datagen.c bin\\example\\\r\nCOPY programs\\datagen.h bin\\example\\\r\nCOPY programs\\util.h bin\\example\\\r\nCOPY programs\\platform.h bin\\example\\\r\nCOPY lib\\common\\mem.h bin\\example\\\r\nCOPY lib\\common\\zstd_internal.h bin\\example\\\r\nCOPY lib\\common\\error_private.h bin\\example\\\r\nCOPY lib\\common\\xxhash.h bin\\example\\\r\nCOPY lib\\libzstd.a bin\\static\\libzstd_static.lib\r\nCOPY lib\\dll\\libzstd.* bin\\dll\\\r\nCOPY lib\\dll\\example\\Makefile bin\\example\\\r\nCOPY lib\\dll\\example\\fullbench-dll.* bin\\example\\\r\nCOPY lib\\dll\\example\\README.md bin\\\r\nCOPY lib\\zstd.h bin\\include\\\r\nCOPY lib\\common\\zstd_errors.h bin\\include\\\r\nCOPY lib\\dictBuilder\\zdict.h bin\\include\\\r\nCOPY programs\\zstd.exe bin\\zstd.exe\r\n"
  },
  {
    "path": "src/third_party/zstd/lib/dll/example/fullbench-dll.sln",
    "content": "Microsoft Visual Studio Solution File, Format Version 12.00\r\n# Visual Studio Express 2012 for Windows Desktop\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"fullbench-dll\", \"fullbench-dll.vcxproj\", \"{13992FD2-077E-4954-B065-A428198201A9}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tDebug|Win32 = Debug|Win32\r\n\t\tDebug|x64 = Debug|x64\r\n\t\tRelease|Win32 = Release|Win32\r\n\t\tRelease|x64 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64\r\n\t\t{13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "src/third_party/zstd/lib/dll/example/fullbench-dll.vcxproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\r\n<Project DefaultTargets=\"Build\" ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\r\n  <ItemGroup Label=\"ProjectConfigurations\">\r\n    <ProjectConfiguration Include=\"Debug|Win32\">\r\n      <Configuration>Debug</Configuration>\r\n      <Platform>Win32</Platform>\r\n    </ProjectConfiguration>\r\n    <ProjectConfiguration Include=\"Debug|x64\">\r\n      <Configuration>Debug</Configuration>\r\n      <Platform>x64</Platform>\r\n    </ProjectConfiguration>\r\n    <ProjectConfiguration Include=\"Release|Win32\">\r\n      <Configuration>Release</Configuration>\r\n      <Platform>Win32</Platform>\r\n    </ProjectConfiguration>\r\n    <ProjectConfiguration Include=\"Release|x64\">\r\n      <Configuration>Release</Configuration>\r\n      <Platform>x64</Platform>\r\n    </ProjectConfiguration>\r\n  </ItemGroup>\r\n  <PropertyGroup Label=\"Globals\">\r\n    <ProjectGuid>{00000000-1CC8-4FD7-9281-6B8DBB9D3DF8}</ProjectGuid>\r\n    <Keyword>Win32Proj</Keyword>\r\n    <RootNamespace>fullbench-dll</RootNamespace>\r\n    <OutDir>$(SolutionDir)bin\\$(Platform)_$(Configuration)\\</OutDir>\r\n    <IntDir>$(SolutionDir)bin\\obj\\$(RootNamespace)_$(Platform)_$(Configuration)\\</IntDir>\r\n  </PropertyGroup>\r\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\" />\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"Configuration\">\r\n    <ConfigurationType>Application</ConfigurationType>\r\n    <UseDebugLibraries>true</UseDebugLibraries>\r\n    <CharacterSet>MultiByte</CharacterSet>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|x64'\" Label=\"Configuration\">\r\n    <ConfigurationType>Application</ConfigurationType>\r\n    <UseDebugLibraries>true</UseDebugLibraries>\r\n    <CharacterSet>MultiByte</CharacterSet>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"Configuration\">\r\n    <ConfigurationType>Application</ConfigurationType>\r\n    <UseDebugLibraries>false</UseDebugLibraries>\r\n    <WholeProgramOptimization>true</WholeProgramOptimization>\r\n    <CharacterSet>MultiByte</CharacterSet>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|x64'\" Label=\"Configuration\">\r\n    <ConfigurationType>Application</ConfigurationType>\r\n    <UseDebugLibraries>false</UseDebugLibraries>\r\n    <WholeProgramOptimization>true</WholeProgramOptimization>\r\n    <CharacterSet>MultiByte</CharacterSet>\r\n  </PropertyGroup>\r\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.props\" />\r\n  <ImportGroup Label=\"ExtensionSettings\">\r\n  </ImportGroup>\r\n  <ImportGroup Label=\"PropertySheets\" Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\r\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\r\n  </ImportGroup>\r\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|x64'\" Label=\"PropertySheets\">\r\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\r\n  </ImportGroup>\r\n  <ImportGroup Label=\"PropertySheets\" Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\r\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\r\n  </ImportGroup>\r\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|x64'\" Label=\"PropertySheets\">\r\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\r\n  </ImportGroup>\r\n  <PropertyGroup Label=\"UserMacros\" />\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\r\n    <LinkIncremental>true</LinkIncremental>\r\n    <IncludePath>$(IncludePath);$(SolutionDir)..\\..\\lib;$(SolutionDir)..\\..\\programs;$(SolutionDir)..\\..\\lib\\legacy;$(SolutionDir)..\\..\\lib\\common;$(UniversalCRT_IncludePath);</IncludePath>\r\n    <RunCodeAnalysis>false</RunCodeAnalysis>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|x64'\">\r\n    <LinkIncremental>true</LinkIncremental>\r\n    <IncludePath>$(IncludePath);$(SolutionDir)..\\..\\lib;$(SolutionDir)..\\..\\programs;$(SolutionDir)..\\..\\lib\\legacy;$(SolutionDir)..\\..\\lib\\common;$(UniversalCRT_IncludePath);</IncludePath>\r\n    <RunCodeAnalysis>false</RunCodeAnalysis>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\r\n    <LinkIncremental>false</LinkIncremental>\r\n    <IncludePath>$(IncludePath);$(SolutionDir)..\\..\\lib;$(SolutionDir)..\\..\\programs;$(SolutionDir)..\\..\\lib\\legacy;$(SolutionDir)..\\..\\lib\\common;$(UniversalCRT_IncludePath);</IncludePath>\r\n    <RunCodeAnalysis>false</RunCodeAnalysis>\r\n  </PropertyGroup>\r\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|x64'\">\r\n    <LinkIncremental>false</LinkIncremental>\r\n    <IncludePath>$(IncludePath);$(SolutionDir)..\\..\\lib;$(SolutionDir)..\\..\\programs;$(SolutionDir)..\\..\\lib\\legacy;$(SolutionDir)..\\..\\lib\\common;$(UniversalCRT_IncludePath);</IncludePath>\r\n    <RunCodeAnalysis>false</RunCodeAnalysis>\r\n  </PropertyGroup>\r\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\r\n    <ClCompile>\r\n      <PrecompiledHeader>\r\n      </PrecompiledHeader>\r\n      <WarningLevel>Level4</WarningLevel>\r\n      <Optimization>Disabled</Optimization>\r\n      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r\n      <TreatWarningAsError>true</TreatWarningAsError>\r\n      <EnablePREfast>false</EnablePREfast>\r\n      <AdditionalIncludeDirectories>..\\include</AdditionalIncludeDirectories>\r\n    </ClCompile>\r\n    <Link>\r\n      <SubSystem>Console</SubSystem>\r\n      <GenerateDebugInformation>true</GenerateDebugInformation>\r\n      <AdditionalLibraryDirectories>$(SolutionDir)..\\dll;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r\n      <AdditionalDependencies>libzstd.lib;%(AdditionalDependencies)</AdditionalDependencies>\r\n      <ImageHasSafeExceptionHandlers>false</ImageHasSafeExceptionHandlers>\r\n    </Link>\r\n  </ItemDefinitionGroup>\r\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|x64'\">\r\n    <ClCompile>\r\n      <PrecompiledHeader>\r\n      </PrecompiledHeader>\r\n      <WarningLevel>Level4</WarningLevel>\r\n      <Optimization>Disabled</Optimization>\r\n      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r\n      <TreatWarningAsError>true</TreatWarningAsError>\r\n      <EnablePREfast>false</EnablePREfast>\r\n      <AdditionalIncludeDirectories>..\\include</AdditionalIncludeDirectories>\r\n    </ClCompile>\r\n    <Link>\r\n      <SubSystem>Console</SubSystem>\r\n      <GenerateDebugInformation>true</GenerateDebugInformation>\r\n      <AdditionalLibraryDirectories>$(SolutionDir)..\\dll;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r\n      <AdditionalDependencies>libzstd.lib;%(AdditionalDependencies)</AdditionalDependencies>\r\n    </Link>\r\n  </ItemDefinitionGroup>\r\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\r\n    <ClCompile>\r\n      <WarningLevel>Level4</WarningLevel>\r\n      <PrecompiledHeader>\r\n      </PrecompiledHeader>\r\n      <Optimization>MaxSpeed</Optimization>\r\n      <FunctionLevelLinking>true</FunctionLevelLinking>\r\n      <IntrinsicFunctions>true</IntrinsicFunctions>\r\n      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r\n      <EnablePREfast>false</EnablePREfast>\r\n      <AdditionalIncludeDirectories>..\\include</AdditionalIncludeDirectories>\r\n      <TreatWarningAsError>false</TreatWarningAsError>\r\n      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r\n    </ClCompile>\r\n    <Link>\r\n      <SubSystem>Console</SubSystem>\r\n      <GenerateDebugInformation>true</GenerateDebugInformation>\r\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\r\n      <OptimizeReferences>true</OptimizeReferences>\r\n      <AdditionalLibraryDirectories>$(SolutionDir)..\\dll;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r\n      <AdditionalDependencies>libzstd.lib;%(AdditionalDependencies)</AdditionalDependencies>\r\n      <ImageHasSafeExceptionHandlers>false</ImageHasSafeExceptionHandlers>\r\n    </Link>\r\n  </ItemDefinitionGroup>\r\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|x64'\">\r\n    <ClCompile>\r\n      <WarningLevel>Level4</WarningLevel>\r\n      <PrecompiledHeader>\r\n      </PrecompiledHeader>\r\n      <Optimization>MaxSpeed</Optimization>\r\n      <FunctionLevelLinking>true</FunctionLevelLinking>\r\n      <IntrinsicFunctions>true</IntrinsicFunctions>\r\n      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r\n      <TreatWarningAsError>false</TreatWarningAsError>\r\n      <EnablePREfast>false</EnablePREfast>\r\n      <AdditionalIncludeDirectories>..\\include</AdditionalIncludeDirectories>\r\n      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r\n    </ClCompile>\r\n    <Link>\r\n      <SubSystem>Console</SubSystem>\r\n      <GenerateDebugInformation>true</GenerateDebugInformation>\r\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\r\n      <OptimizeReferences>true</OptimizeReferences>\r\n      <AdditionalLibraryDirectories>$(SolutionDir)..\\dll;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r\n      <AdditionalDependencies>libzstd.lib;%(AdditionalDependencies)</AdditionalDependencies>\r\n    </Link>\r\n  </ItemDefinitionGroup>\r\n  <ItemGroup>\r\n    <ClCompile Include=\"datagen.c\" />\r\n    <ClCompile Include=\"fullbench.c\" />\r\n  </ItemGroup>\r\n  <ItemGroup>\r\n    <ClInclude Include=\"..\\include\\zstd.h\" />\r\n  </ItemGroup>\r\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />\r\n  <ImportGroup Label=\"ExtensionTargets\">\r\n  </ImportGroup>\r\n</Project>"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_legacy.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_LEGACY_H\n#define ZSTD_LEGACY_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include \"mem.h\"            /* MEM_STATIC */\n#include \"error_private.h\"  /* ERROR */\n#include \"zstd_internal.h\"  /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */\n\n#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)\n#  undef ZSTD_LEGACY_SUPPORT\n#  define ZSTD_LEGACY_SUPPORT 8\n#endif\n\n#if (ZSTD_LEGACY_SUPPORT <= 1)\n#  include \"zstd_v01.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 2)\n#  include \"zstd_v02.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 3)\n#  include \"zstd_v03.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n#  include \"zstd_v04.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n#  include \"zstd_v05.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n#  include \"zstd_v06.h\"\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n#  include \"zstd_v07.h\"\n#endif\n\n/** ZSTD_isLegacy() :\n    @return : > 0 if supported by legacy decoder. 0 otherwise.\n              return value is the version.\n*/\nMEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)\n{\n    U32 magicNumberLE;\n    if (srcSize<4) return 0;\n    magicNumberLE = MEM_readLE32(src);\n    switch(magicNumberLE)\n    {\n#if (ZSTD_LEGACY_SUPPORT <= 1)\n        case ZSTDv01_magicNumberLE:return 1;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 2)\n        case ZSTDv02_magicNumber : return 2;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 3)\n        case ZSTDv03_magicNumber : return 3;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case ZSTDv04_magicNumber : return 4;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case ZSTDv05_MAGICNUMBER : return 5;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case ZSTDv06_MAGICNUMBER : return 6;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case ZSTDv07_MAGICNUMBER : return 7;\n#endif\n        default : return 0;\n    }\n}\n\n\nMEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)\n{\n    U32 const version = ZSTD_isLegacy(src, srcSize);\n    if (version < 5) return 0;  /* no decompressed size in frame header, or not a legacy format */\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n    if (version==5) {\n        ZSTDv05_parameters fParams;\n        size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);\n        if (frResult != 0) return 0;\n        return fParams.srcSize;\n    }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n    if (version==6) {\n        ZSTDv06_frameParams fParams;\n        size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);\n        if (frResult != 0) return 0;\n        return fParams.frameContentSize;\n    }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n    if (version==7) {\n        ZSTDv07_frameParams fParams;\n        size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);\n        if (frResult != 0) return 0;\n        return fParams.frameContentSize;\n    }\n#endif\n    return 0;   /* should not be possible */\n}\n\n\nMEM_STATIC size_t ZSTD_decompressLegacy(\n                     void* dst, size_t dstCapacity,\n               const void* src, size_t compressedSize,\n               const void* dict,size_t dictSize)\n{\n    U32 const version = ZSTD_isLegacy(src, compressedSize);\n    (void)dst; (void)dstCapacity; (void)dict; (void)dictSize;  /* unused when ZSTD_LEGACY_SUPPORT >= 8 */\n    switch(version)\n    {\n#if (ZSTD_LEGACY_SUPPORT <= 1)\n        case 1 :\n            return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 2)\n        case 2 :\n            return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 3)\n        case 3 :\n            return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case 4 :\n            return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case 5 :\n            {   size_t result;\n                ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();\n                if (zd==NULL) return ERROR(memory_allocation);\n                result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);\n                ZSTDv05_freeDCtx(zd);\n                return result;\n            }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case 6 :\n            {   size_t result;\n                ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();\n                if (zd==NULL) return ERROR(memory_allocation);\n                result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);\n                ZSTDv06_freeDCtx(zd);\n                return result;\n            }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case 7 :\n            {   size_t result;\n                ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();\n                if (zd==NULL) return ERROR(memory_allocation);\n                result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);\n                ZSTDv07_freeDCtx(zd);\n                return result;\n            }\n#endif\n        default :\n            return ERROR(prefix_unknown);\n    }\n}\n\nMEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)\n{\n    ZSTD_frameSizeInfo frameSizeInfo;\n    U32 const version = ZSTD_isLegacy(src, srcSize);\n    switch(version)\n    {\n#if (ZSTD_LEGACY_SUPPORT <= 1)\n        case 1 :\n            ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 2)\n        case 2 :\n            ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 3)\n        case 3 :\n            ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case 4 :\n            ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case 5 :\n            ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case 6 :\n            ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case 7 :\n            ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,\n                &frameSizeInfo.compressedSize,\n                &frameSizeInfo.decompressedBound);\n            break;\n#endif\n        default :\n            frameSizeInfo.compressedSize = ERROR(prefix_unknown);\n            frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;\n            break;\n    }\n    if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) {\n        frameSizeInfo.compressedSize = ERROR(srcSize_wrong);\n        frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;\n    }\n    return frameSizeInfo;\n}\n\nMEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)\n{\n    ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize);\n    return frameSizeInfo.compressedSize;\n}\n\nMEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)\n{\n    switch(version)\n    {\n        default :\n        case 1 :\n        case 2 :\n        case 3 :\n            (void)legacyContext;\n            return ERROR(version_unsupported);\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);\n#endif\n    }\n}\n\n\nMEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,\n                                        const void* dict, size_t dictSize)\n{\n    DEBUGLOG(5, \"ZSTD_initLegacyStream for v0.%u\", newVersion);\n    if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);\n    switch(newVersion)\n    {\n        default :\n        case 1 :\n        case 2 :\n        case 3 :\n            (void)dict; (void)dictSize;\n            return 0;\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case 4 :\n        {\n            ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;\n            if (dctx==NULL) return ERROR(memory_allocation);\n            ZBUFFv04_decompressInit(dctx);\n            ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);\n            *legacyContext = dctx;\n            return 0;\n        }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case 5 :\n        {\n            ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;\n            if (dctx==NULL) return ERROR(memory_allocation);\n            ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);\n            *legacyContext = dctx;\n            return 0;\n        }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case 6 :\n        {\n            ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;\n            if (dctx==NULL) return ERROR(memory_allocation);\n            ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);\n            *legacyContext = dctx;\n            return 0;\n        }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case 7 :\n        {\n            ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;\n            if (dctx==NULL) return ERROR(memory_allocation);\n            ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);\n            *legacyContext = dctx;\n            return 0;\n        }\n#endif\n    }\n}\n\n\n\nMEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,\n                                              ZSTD_outBuffer* output, ZSTD_inBuffer* input)\n{\n    DEBUGLOG(5, \"ZSTD_decompressLegacyStream for v0.%u\", version);\n    switch(version)\n    {\n        default :\n        case 1 :\n        case 2 :\n        case 3 :\n            (void)legacyContext; (void)output; (void)input;\n            return ERROR(version_unsupported);\n#if (ZSTD_LEGACY_SUPPORT <= 4)\n        case 4 :\n            {\n                ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;\n                const void* src = (const char*)input->src + input->pos;\n                size_t readSize = input->size - input->pos;\n                void* dst = (char*)output->dst + output->pos;\n                size_t decodedSize = output->size - output->pos;\n                size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);\n                output->pos += decodedSize;\n                input->pos += readSize;\n                return hintSize;\n            }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 5)\n        case 5 :\n            {\n                ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;\n                const void* src = (const char*)input->src + input->pos;\n                size_t readSize = input->size - input->pos;\n                void* dst = (char*)output->dst + output->pos;\n                size_t decodedSize = output->size - output->pos;\n                size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);\n                output->pos += decodedSize;\n                input->pos += readSize;\n                return hintSize;\n            }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 6)\n        case 6 :\n            {\n                ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;\n                const void* src = (const char*)input->src + input->pos;\n                size_t readSize = input->size - input->pos;\n                void* dst = (char*)output->dst + output->pos;\n                size_t decodedSize = output->size - output->pos;\n                size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);\n                output->pos += decodedSize;\n                input->pos += readSize;\n                return hintSize;\n            }\n#endif\n#if (ZSTD_LEGACY_SUPPORT <= 7)\n        case 7 :\n            {\n                ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;\n                const void* src = (const char*)input->src + input->pos;\n                size_t readSize = input->size - input->pos;\n                void* dst = (char*)output->dst + output->pos;\n                size_t decodedSize = output->size - output->pos;\n                size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);\n                output->pos += decodedSize;\n                input->pos += readSize;\n                return hintSize;\n            }\n#endif\n    }\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* ZSTD_LEGACY_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v01.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/******************************************\n*  Includes\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include \"zstd_v01.h\"\n#include \"error_private.h\"\n\n\n/******************************************\n*  Static allocation\n******************************************/\n/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */\n#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */\n#define HUF_DTABLE_SIZE_U16(maxTableLog)   (1 + (1<<maxTableLog))\n#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \\\n        unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }\n\n\n/******************************************\n*  Error Management\n******************************************/\n#define FSE_LIST_ERRORS(ITEM) \\\n        ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \\\n        ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \\\n        ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\\\n        ITEM(FSE_ERROR_corruptionDetected) \\\n        ITEM(FSE_ERROR_maxCode)\n\n#define FSE_GENERATE_ENUM(ENUM) ENUM,\ntypedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */\n\n\n/******************************************\n*  FSE symbol compression API\n******************************************/\n/*\n   This API consists of small unitary functions, which highly benefit from being inlined.\n   You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.\n   Visual seems to do it automatically.\n   For gcc or clang, you'll need to add -flto flag at compilation and linking stages.\n   If none of these solutions is applicable, include \"fse.c\" directly.\n*/\n\ntypedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\ntypedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\n\ntypedef struct\n{\n    size_t bitContainer;\n    int    bitPos;\n    char*  startPtr;\n    char*  ptr;\n    char*  endPtr;\n} FSE_CStream_t;\n\ntypedef struct\n{\n    ptrdiff_t   value;\n    const void* stateTable;\n    const void* symbolTT;\n    unsigned    stateLog;\n} FSE_CState_t;\n\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} FSE_DStream_t;\n\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSE_DState_t;\n\ntypedef enum { FSE_DStream_unfinished = 0,\n               FSE_DStream_endOfBuffer = 1,\n               FSE_DStream_completed = 2,\n               FSE_DStream_tooFar = 3 } FSE_DStream_status;  /* result of FSE_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */\n\n\n/****************************************************************\n*  Tuning parameters\n****************************************************************/\n/* MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSE_MAX_MEMORY_USAGE 14\n#define FSE_DEFAULT_MEMORY_USAGE 13\n\n/* FSE_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSE_MAX_SYMBOL_VALUE 255\n\n\n/****************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSE_FUNCTION_TYPE BYTE\n#define FSE_FUNCTION_EXTENSION\n\n\n/****************************************************************\n*  Byte symbol type\n****************************************************************/\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSE_decode_t;   /* size == U32 */\n\n\n\n/****************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/****************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n\n#ifndef MEM_ACCESS_MODULE\n#define MEM_ACCESS_MODULE\n/****************************************************************\n*  Basic Types\n*****************************************************************/\n#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n# include <stdint.h>\ntypedef  uint8_t BYTE;\ntypedef uint16_t U16;\ntypedef  int16_t S16;\ntypedef uint32_t U32;\ntypedef  int32_t S32;\ntypedef uint64_t U64;\ntypedef  int64_t S64;\n#else\ntypedef unsigned char       BYTE;\ntypedef unsigned short      U16;\ntypedef   signed short      S16;\ntypedef unsigned int        U32;\ntypedef   signed int        S32;\ntypedef unsigned long long  U64;\ntypedef   signed long long  S64;\n#endif\n\n#endif   /* MEM_ACCESS_MODULE */\n\n/****************************************************************\n*  Memory I/O\n*****************************************************************/\n/* FSE_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets generating assembly depending on alignment.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef FSE_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define FSE_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define FSE_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\n\nstatic unsigned FSE_32bits(void)\n{\n    return sizeof(void*)==4;\n}\n\nstatic unsigned FSE_isLittleEndian(void)\n{\n    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)\n\nstatic U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\n#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;\n\nstatic U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nstatic U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nstatic U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\n#else\n\nstatic U16 FSE_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 FSE_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U64 FSE_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\n#endif // FSE_FORCE_MEMORY_ACCESS\n\nstatic U16 FSE_readLE16(const void* memPtr)\n{\n    if (FSE_isLittleEndian())\n        return FSE_read16(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nstatic U32 FSE_readLE32(const void* memPtr)\n{\n    if (FSE_isLittleEndian())\n        return FSE_read32(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));\n    }\n}\n\n\nstatic U64 FSE_readLE64(const void* memPtr)\n{\n    if (FSE_isLittleEndian())\n        return FSE_read64(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)\n                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));\n    }\n}\n\nstatic size_t FSE_readLEST(const void* memPtr)\n{\n    if (FSE_32bits())\n        return (size_t)FSE_readLE32(memPtr);\n    else\n        return (size_t)FSE_readLE64(memPtr);\n}\n\n\n\n/****************************************************************\n*  Constants\n*****************************************************************/\n#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)\n#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)\n#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)\n#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)\n#define FSE_MIN_TABLELOG 5\n\n#define FSE_TABLELOG_ABSOLUTE_MAX 15\n#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX\n#error \"FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n\n/****************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/****************************************************************\n*  Complex types\n****************************************************************/\ntypedef struct\n{\n    int deltaFindState;\n    U32 deltaNbBits;\n} FSE_symbolCompressionTransform; /* total 8 bytes */\n\ntypedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];\n\n/****************************************************************\n*  Internal functions\n****************************************************************/\nFORCE_INLINE unsigned FSE_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (GCC_VERSION >= 304)   /* GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n/****************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\n\n\nstatic U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }\n\n#define FSE_DECODE_TYPE FSE_decode_t\n\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSE_DTableHeader;   /* sizeof U32 */\n\nstatic size_t FSE_buildDTable\n(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1;   /* because dt is unsigned, 32-bits aligned on 32-bits */\n    const U32 tableSize = 1 << tableLog;\n    const U32 tableMask = tableSize-1;\n    const U32 step = FSE_tableStep(tableSize);\n    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];\n    U32 position = 0;\n    U32 highThreshold = tableSize-1;\n    const S16 largeLimit= (S16)(1 << (tableLog-1));\n    U32 noLarge = 1;\n    U32 s;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;\n    if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;\n\n    /* Init, lay down lowprob symbols */\n    DTableH[0].tableLog = (U16)tableLog;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        if (normalizedCounter[s]==-1)\n        {\n            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;\n            symbolNext[s] = 1;\n        }\n        else\n        {\n            if (normalizedCounter[s] >= largeLimit) noLarge=0;\n            symbolNext[s] = normalizedCounter[s];\n        }\n    }\n\n    /* Spread symbols */\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        int i;\n        for (i=0; i<normalizedCounter[s]; i++)\n        {\n            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;\n            position = (position + step) & tableMask;\n            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }\n    }\n\n    if (position!=0) return (size_t)-FSE_ERROR_GENERIC;   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n\n    /* Build Decoding table */\n    {\n        U32 i;\n        for (i=0; i<tableSize; i++)\n        {\n            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );\n            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);\n        }\n    }\n\n    DTableH->fastMode = (U16)noLarge;\n    return 0;\n}\n\n\n/******************************************\n*  FSE byte symbol\n******************************************/\n#ifndef FSE_COMMONDEFS_ONLY\n\nstatic unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }\n\nstatic short FSE_abs(short a)\n{\n    return a<0? -a : a;\n}\n\n\n/****************************************************************\n*  Header bitstream management\n****************************************************************/\nstatic size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;\n    bitStream = FSE_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr))\n    {\n        if (previous0)\n        {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF)\n            {\n                n0+=24;\n                if (ip < iend-5)\n                {\n                    ip+=2;\n                    bitStream = FSE_readLE32(ip) >> bitCount;\n                }\n                else\n                {\n                    bitStream >>= 16;\n                    bitCount+=16;\n                }\n            }\n            while ((bitStream & 3) == 3)\n            {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n            {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = FSE_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {\n            const short max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max)\n            {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            }\n            else\n            {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSE_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold)\n            {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            {\n                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n                {\n                    ip += bitCount>>3;\n                    bitCount &= 7;\n                }\n                else\n                {\n                    bitCount -= (int)(8 * (iend - 4 - ip));\n                    ip = iend - 4;\n                }\n                bitStream = FSE_readLE32(ip) >> (bitCount & 31);\n            }\n        }\n    }\n    if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n    return ip-istart;\n}\n\n\n/*********************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC;             /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\n\n/* FSE_initDStream\n * Initialize a FSE_DStream_t.\n * srcBuffer must point at the beginning of an FSE block.\n * The function result is the size of the FSE_block (== srcSize).\n * If srcSize is too small, the function will return an errorCode;\n */\nstatic size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;\n\n    if (srcSize >=  sizeof(size_t))\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);\n        bitD->bitContainer = FSE_readLEST(bitD->ptr);\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */\n        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);\n    }\n    else\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);\n                    /* fallthrough */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);\n                    /* fallthrough */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);\n                    /* fallthrough */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;\n                    /* fallthrough */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;\n                    /* fallthrough */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;\n                    /* fallthrough */\n            default:;\n        }\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */\n        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);\n        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\n\n/*!FSE_lookBits\n * Provides next n bits from the bitContainer.\n * bitContainer is not modified (bits are still present for next read/look)\n * On 32-bits, maxNbBits==25\n * On 64-bits, maxNbBits==57\n * return : value extracted.\n */\nstatic size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\nstatic size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nstatic void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\n\n/*!FSE_readBits\n * Read next n bits from the bitContainer.\n * On 32-bits, don't read more than maxNbBits==25\n * On 64-bits, don't read more than maxNbBits==57\n * Use the fast variant *only* if n >= 1.\n * return : value extracted.\n */\nstatic size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = FSE_lookBits(bitD, nbBits);\n    FSE_skipBits(bitD, nbBits);\n    return value;\n}\n\nstatic size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */\n{\n    size_t value = FSE_lookBitsFast(bitD, nbBits);\n    FSE_skipBits(bitD, nbBits);\n    return value;\n}\n\nstatic unsigned FSE_reloadDStream(FSE_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return FSE_DStream_tooFar;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))\n    {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = FSE_readLEST(bitD->ptr);\n        return FSE_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start)\n    {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;\n        return FSE_DStream_completed;\n    }\n    {\n        U32 nbBytes = bitD->bitsConsumed >> 3;\n        U32 result = FSE_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start)\n        {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = FSE_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = FSE_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n\nstatic void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;\n    DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);\n    FSE_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nstatic BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32  nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = FSE_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nstatic BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32 nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = FSE_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n/* FSE_endOfDStream\n   Tells if bitD has reached end of bitStream or not */\n\nstatic unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)\n{\n    return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));\n}\n\nstatic unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\nFORCE_INLINE size_t FSE_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSE_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    FSE_DStream_t bitD;\n    FSE_DState_t state1;\n    FSE_DState_t state2;\n    size_t errorCode;\n\n    /* Init */\n    errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n    if (FSE_isError(errorCode)) return errorCode;\n\n    FSE_initDState(&state1, &bitD, dt);\n    FSE_initDState(&state2, &bitD, dt);\n\n#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)\n    {\n        op[0] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            FSE_reloadDStream(&bitD);\n\n        op[1] = FSE_GETSYMBOL(&state2);\n\n        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            FSE_reloadDStream(&bitD);\n\n        op[3] = FSE_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */\n    while (1)\n    {\n        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state1);\n\n        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state2);\n    }\n\n    /* end ? */\n    if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))\n        return op-ostart;\n\n    if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */\n\n    return (size_t)-FSE_ERROR_corruptionDetected;\n}\n\n\nstatic size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));   /* memcpy() into local variable, to avoid strict aliasing warning */\n\n    /* select fast mode (static) */\n    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nstatic size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSE_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n    size_t errorCode;\n\n    if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */\n\n    /* normal FSE decoding mode */\n    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSE_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);\n    if (FSE_isError(errorCode)) return errorCode;\n\n    /* always return, even if it is an error code */\n    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);\n}\n\n\n\n/* *******************************************************\n*  Huff0 : Huffman block compression\n*********************************************************/\n#define HUF_MAX_SYMBOL_VALUE 255\n#define HUF_DEFAULT_TABLELOG  12       /* used by default, when not specified */\n#define HUF_MAX_TABLELOG  12           /* max possible tableLog; for allocation purpose; can be modified */\n#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */\n#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)\n#  error \"HUF_MAX_TABLELOG is too large !\"\n#endif\n\ntypedef struct HUF_CElt_s {\n  U16  val;\n  BYTE nbBits;\n} HUF_CElt ;\n\ntypedef struct nodeElt_s {\n    U32 count;\n    U16 parent;\n    BYTE byte;\n    BYTE nbBits;\n} nodeElt;\n\n\n/* *******************************************************\n*  Huff0 : Huffman block decompression\n*********************************************************/\ntypedef struct {\n    BYTE byte;\n    BYTE nbBits;\n} HUF_DElt;\n\nstatic size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];  /* large enough for values from 0 to 16 */\n    U32 weightTotal;\n    U32 maxBits;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n    U32 n;\n    U32 nextRankStart;\n    void* ptr = DTable+1;\n    HUF_DElt* const dt = (HUF_DElt*)ptr;\n\n    if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n    iSize = ip[0];\n\n    FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* should not be necessary, but some analyzer complain ... */\n    if (iSize >= 128)  /* special header */\n    {\n        if (iSize >= (242))   /* RLE */\n        {\n            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, sizeof(huffWeight));\n            iSize = 0;\n        }\n        else   /* Incompressible */\n        {\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n            ip += 1;\n            for (n=0; n<oSize; n+=2)\n            {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n            }\n        }\n    }\n    else  /* header compressed with FSE (normal case) */\n    {\n        if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n        oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize);   /* max 255 values decoded, last one is implied */\n        if (FSE_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankVal, 0, sizeof(rankVal));\n    weightTotal = 0;\n    for (n=0; n<oSize; n++)\n    {\n        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;\n        rankVal[huffWeight[n]]++;\n        weightTotal += (1 << huffWeight[n]) >> 1;\n    }\n    if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    maxBits = FSE_highbit32(weightTotal) + 1;\n    if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge;   /* DTable is too small */\n    DTable[0] = (U16)maxBits;\n    {\n        U32 total = 1 << maxBits;\n        U32 rest = total - weightTotal;\n        U32 verif = 1 << FSE_highbit32(rest);\n        U32 lastWeight = FSE_highbit32(rest) + 1;\n        if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected;    /* last value must be a clean power of 2 */\n        huffWeight[oSize] = (BYTE)lastWeight;\n        rankVal[lastWeight]++;\n    }\n\n    /* check tree construction validity */\n    if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected;   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<=maxBits; n++)\n    {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<=oSize; n++)\n    {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUF_DElt D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize+1;\n}\n\n\nstatic BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)\n{\n        const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n        const BYTE c = dt[val].byte;\n        FSE_skipBits(Dstream, dt[val].nbBits);\n        return c;\n}\n\nstatic size_t HUF_decompress_usingDTable(   /* -3% slower when non static */\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong;\n    {\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* op = ostart;\n        BYTE* const omax = op + maxDstSize;\n        BYTE* const olimit = omax-15;\n\n        const void* ptr = DTable;\n        const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n        U32 reloadStatus;\n\n        /* Init */\n\n        const U16* jumpTable = (const U16*)cSrc;\n        const size_t length1 = FSE_readLE16(jumpTable);\n        const size_t length2 = FSE_readLE16(jumpTable+1);\n        const size_t length3 = FSE_readLE16(jumpTable+2);\n        const size_t length4 = cSrcSize - 6 - length1 - length2 - length3;   // check coherency !!\n        const char* const start1 = (const char*)(cSrc) + 6;\n        const char* const start2 = start1 + length1;\n        const char* const start3 = start2 + length2;\n        const char* const start4 = start3 + length3;\n        FSE_DStream_t bitD1, bitD2, bitD3, bitD4;\n\n        if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n\n        errorCode = FSE_initDStream(&bitD1, start1, length1);\n        if (FSE_isError(errorCode)) return errorCode;\n        errorCode = FSE_initDStream(&bitD2, start2, length2);\n        if (FSE_isError(errorCode)) return errorCode;\n        errorCode = FSE_initDStream(&bitD3, start3, length3);\n        if (FSE_isError(errorCode)) return errorCode;\n        errorCode = FSE_initDStream(&bitD4, start4, length4);\n        if (FSE_isError(errorCode)) return errorCode;\n\n        reloadStatus=FSE_reloadDStream(&bitD2);\n\n        /* 16 symbols per loop */\n        for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit);  /* D2-3-4 are supposed to be synchronized and finish together */\n            op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))\n        {\n    #define HUF_DECODE_SYMBOL_0(n, Dstream) \\\n            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);\n\n    #define HUF_DECODE_SYMBOL_1(n, Dstream) \\\n            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \\\n            if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)\n\n    #define HUF_DECODE_SYMBOL_2(n, Dstream) \\\n            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \\\n            if (FSE_32bits()) FSE_reloadDStream(&Dstream)\n\n            HUF_DECODE_SYMBOL_1( 0, bitD1);\n            HUF_DECODE_SYMBOL_1( 1, bitD2);\n            HUF_DECODE_SYMBOL_1( 2, bitD3);\n            HUF_DECODE_SYMBOL_1( 3, bitD4);\n            HUF_DECODE_SYMBOL_2( 4, bitD1);\n            HUF_DECODE_SYMBOL_2( 5, bitD2);\n            HUF_DECODE_SYMBOL_2( 6, bitD3);\n            HUF_DECODE_SYMBOL_2( 7, bitD4);\n            HUF_DECODE_SYMBOL_1( 8, bitD1);\n            HUF_DECODE_SYMBOL_1( 9, bitD2);\n            HUF_DECODE_SYMBOL_1(10, bitD3);\n            HUF_DECODE_SYMBOL_1(11, bitD4);\n            HUF_DECODE_SYMBOL_0(12, bitD1);\n            HUF_DECODE_SYMBOL_0(13, bitD2);\n            HUF_DECODE_SYMBOL_0(14, bitD3);\n            HUF_DECODE_SYMBOL_0(15, bitD4);\n        }\n\n        if (reloadStatus!=FSE_DStream_completed)   /* not complete : some bitStream might be FSE_DStream_unfinished */\n            return (size_t)-FSE_ERROR_corruptionDetected;\n\n        /* tail */\n        {\n            // bitTail = bitD1;   // *much* slower : -20% !??!\n            FSE_DStream_t bitTail;\n            bitTail.ptr = bitD1.ptr;\n            bitTail.bitsConsumed = bitD1.bitsConsumed;\n            bitTail.bitContainer = bitD1.bitContainer;   // required in case of FSE_DStream_endOfBuffer\n            bitTail.start = start1;\n            for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)\n            {\n                HUF_DECODE_SYMBOL_0(0, bitTail);\n            }\n\n            if (FSE_endOfDStream(&bitTail))\n                return op-ostart;\n        }\n\n        if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */\n\n        return (size_t)-FSE_ERROR_corruptionDetected;\n    }\n}\n\n\nstatic size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);\n    if (FSE_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);\n}\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/****************************************************************\n*  Tuning parameters\n*****************************************************************/\n/* MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect */\n#define ZSTD_MEMORY_USAGE 17\n\n\n/**************************************\n   CPU Feature Detection\n**************************************/\n/*\n * Automated efficient unaligned memory access detection\n * Based on known hardware architectures\n * This list will be updated thanks to feedbacks\n */\n#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \\\n    || defined(__ARM_FEATURE_UNALIGNED) \\\n    || defined(__i386__) || defined(__x86_64__) \\\n    || defined(_M_IX86) || defined(_M_X64) \\\n    || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \\\n    || (defined(_M_ARM) && (_M_ARM >= 7))\n#  define ZSTD_UNALIGNED_ACCESS 1\n#else\n#  define ZSTD_UNALIGNED_ACCESS 0\n#endif\n\n\n/********************************************************\n*  Includes\n*********************************************************/\n#include <stdlib.h>      /* calloc */\n#include <string.h>      /* memcpy, memmove */\n#include <stdio.h>       /* debug : printf */\n\n\n/********************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef __AVX2__\n#  include <immintrin.h>   /* AVX2 intrinsics */\n#endif\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n\n#ifndef MEM_ACCESS_MODULE\n#define MEM_ACCESS_MODULE\n/********************************************************\n*  Basic Types\n*********************************************************/\n#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n# include <stdint.h>\ntypedef  uint8_t BYTE;\ntypedef uint16_t U16;\ntypedef  int16_t S16;\ntypedef uint32_t U32;\ntypedef  int32_t S32;\ntypedef uint64_t U64;\n#else\ntypedef unsigned char       BYTE;\ntypedef unsigned short      U16;\ntypedef   signed short      S16;\ntypedef unsigned int        U32;\ntypedef   signed int        S32;\ntypedef unsigned long long  U64;\n#endif\n\n#endif   /* MEM_ACCESS_MODULE */\n\n\n/********************************************************\n*  Constants\n*********************************************************/\nstatic const U32 ZSTD_magicNumber = 0xFD2FB51E;   /* 3rd version : seqNb header */\n\n#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)\n#define HASH_TABLESIZE (1 << HASH_LOG)\n#define HASH_MASK (HASH_TABLESIZE - 1)\n\n#define KNUTH 2654435761\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BLOCKSIZE (128 KB)                 /* define, for static allocation */\n\n#define WORKPLACESIZE (BLOCKSIZE*3)\n#define MINMATCH 4\n#define MLbits   7\n#define LLbits   6\n#define Offbits  5\n#define MaxML  ((1<<MLbits )-1)\n#define MaxLL  ((1<<LLbits )-1)\n#define MaxOff ((1<<Offbits)-1)\n#define LitFSELog  11\n#define MLFSELog   10\n#define LLFSELog   10\n#define OffFSELog   9\n#define MAX(a,b) ((a)<(b)?(b):(a))\n#define MaxSeq MAX(MaxLL, MaxML)\n\n#define LITERAL_NOENTROPY 63\n#define COMMAND_NOENTROPY 7   /* to remove */\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\nstatic const size_t ZSTD_blockHeaderSize = 3;\nstatic const size_t ZSTD_frameHeaderSize = 4;\n\n\n/********************************************************\n*  Memory operations\n*********************************************************/\nstatic unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }\n\nstatic unsigned ZSTD_isLittleEndian(void)\n{\n    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\nstatic U16    ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }\n\nstatic void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\nstatic void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s)    { ZSTD_copy8(d,s); d+=8; s+=8; }\n\nstatic void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    while (op < oend) COPY8(op, ip);\n}\n\nstatic U16 ZSTD_readLE16(const void* memPtr)\n{\n    if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)((U16)p[0] + ((U16)p[1]<<8));\n    }\n}\n\nstatic U32 ZSTD_readLE24(const void* memPtr)\n{\n    return ZSTD_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);\n}\n\nstatic U32 ZSTD_readBE32(const void* memPtr)\n{\n    const BYTE* p = (const BYTE*)memPtr;\n    return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));\n}\n\n\n/**************************************\n*  Local structures\n***************************************/\ntypedef struct ZSTD_Cctx_s ZSTD_Cctx;\n\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* offCode;\n    BYTE* litStart;\n    BYTE* lit;\n    BYTE* litLengthStart;\n    BYTE* litLength;\n    BYTE* matchLengthStart;\n    BYTE* matchLength;\n    BYTE* dumpsStart;\n    BYTE* dumps;\n} seqStore_t;\n\n\ntypedef struct ZSTD_Cctx_s\n{\n    const BYTE* base;\n    U32 current;\n    U32 nextUpdate;\n    seqStore_t seqStore;\n#ifdef __AVX2__\n    __m256i hashTable[HASH_TABLESIZE>>3];\n#else\n    U32 hashTable[HASH_TABLESIZE];\n#endif\n    BYTE buffer[WORKPLACESIZE];\n} cctxi_t;\n\n\n\n\n/**************************************\n*  Error Management\n**************************************/\n/* published entry point */\nunsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }\n\n\n/**************************************\n*  Tool functions\n**************************************/\n#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */\n#define ZSTD_VERSION_MINOR    1    /* for new (non-breaking) interface capabilities */\n#define ZSTD_VERSION_RELEASE  3    /* for tweaks, bug-fixes, or development */\n#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)\n\n/**************************************************************\n*   Decompression code\n**************************************************************/\n\nstatic size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    BYTE headerFlags;\n    U32 cSize;\n\n    if (srcSize < 3) return ERROR(srcSize_wrong);\n\n    headerFlags = *in;\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n\n    bpPtr->blockType = (blockType_t)(headerFlags >> 6);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\n\nstatic size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\nstatic size_t ZSTD_decompressLiterals(void* ctx,\n                                      void* dst, size_t maxDstSize,\n                                const void* src, size_t srcSize)\n{\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + maxDstSize;\n    const BYTE* ip = (const BYTE*)src;\n    size_t errorCode;\n    size_t litSize;\n\n    /* check : minimum 2, for litSize, +1, for content */\n    if (srcSize <= 3) return ERROR(corruption_detected);\n\n    litSize = ip[1] + (ip[0]<<8);\n    litSize += ((ip[-3] >> 3) & 7) << 16;   // mmmmh....\n    op = oend - litSize;\n\n    (void)ctx;\n    if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);\n    if (FSE_isError(errorCode)) return ERROR(GENERIC);\n    return litSize;\n}\n\n\nstatic size_t ZSTDv01_decodeLiteralsBlock(void* ctx,\n                                void* dst, size_t maxDstSize,\n                          const BYTE** litStart, size_t* litSize,\n                          const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + maxDstSize;\n    blockProperties_t litbp;\n\n    size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);\n    if (ZSTDv01_isError(litcSize)) return litcSize;\n    if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n    ip += ZSTD_blockHeaderSize;\n\n    switch(litbp.blockType)\n    {\n    case bt_raw:\n        *litStart = ip;\n        ip += litcSize;\n        *litSize = litcSize;\n        break;\n    case bt_rle:\n        {\n            size_t rleSize = litbp.origSize;\n            if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);\n            if (!srcSize) return ERROR(srcSize_wrong);\n            memset(oend - rleSize, *ip, rleSize);\n            *litStart = oend - rleSize;\n            *litSize = rleSize;\n            ip++;\n            break;\n        }\n    case bt_compressed:\n        {\n            size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);\n            if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;\n            *litStart = oend - decodedLitSize;\n            *litSize = decodedLitSize;\n            ip += litcSize;\n            break;\n        }\n    case bt_end:\n    default:\n        return ERROR(GENERIC);\n    }\n\n    return ip-istart;\n}\n\n\nstatic size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,\n                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,\n                         const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    const BYTE* const iend = istart + srcSize;\n    U32 LLtype, Offtype, MLtype;\n    U32 LLlog, Offlog, MLlog;\n    size_t dumpsLength;\n\n    /* check */\n    if (srcSize < 5) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    *nbSeq = ZSTD_readLE16(ip); ip+=2;\n    LLtype  = *ip >> 6;\n    Offtype = (*ip >> 4) & 3;\n    MLtype  = (*ip >> 2) & 3;\n    if (*ip & 2)\n    {\n        dumpsLength  = ip[2];\n        dumpsLength += ip[1] << 8;\n        ip += 3;\n    }\n    else\n    {\n        dumpsLength  = ip[1];\n        dumpsLength += (ip[0] & 1) << 8;\n        ip += 2;\n    }\n    *dumpsPtr = ip;\n    ip += dumpsLength;\n    *dumpsLengthPtr = dumpsLength;\n\n    /* check */\n    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n\n    /* sequences */\n    {\n        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */\n        size_t headerSize;\n\n        /* Build DTables */\n        switch(LLtype)\n        {\n        case bt_rle :\n            LLlog = 0;\n            FSE_buildDTable_rle(DTableLL, *ip++); break;\n        case bt_raw :\n            LLlog = LLbits;\n            FSE_buildDTable_raw(DTableLL, LLbits); break;\n        default :\n            {   U32 max = MaxLL;\n                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (LLlog > LLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableLL, norm, max, LLlog);\n        }   }\n\n        switch(Offtype)\n        {\n        case bt_rle :\n            Offlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableOffb, *ip++); break;\n        case bt_raw :\n            Offlog = Offbits;\n            FSE_buildDTable_raw(DTableOffb, Offbits); break;\n        default :\n            {   U32 max = MaxOff;\n                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (Offlog > OffFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableOffb, norm, max, Offlog);\n        }   }\n\n        switch(MLtype)\n        {\n        case bt_rle :\n            MLlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableML, *ip++); break;\n        case bt_raw :\n            MLlog = MLbits;\n            FSE_buildDTable_raw(DTableML, MLbits); break;\n        default :\n            {   U32 max = MaxML;\n                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (MLlog > MLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableML, norm, max, MLlog);\n    }   }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t offset;\n    size_t matchLength;\n} seq_t;\n\ntypedef struct {\n    FSE_DStream_t DStream;\n    FSE_DState_t stateLL;\n    FSE_DState_t stateOffb;\n    FSE_DState_t stateML;\n    size_t prevOffset;\n    const BYTE* dumps;\n    const BYTE* dumpsEnd;\n} seqState_t;\n\n\nstatic void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    size_t litLength;\n    size_t prevOffset;\n    size_t offset;\n    size_t matchLength;\n    const BYTE* dumps = seqState->dumps;\n    const BYTE* const de = seqState->dumpsEnd;\n\n    /* Literal length */\n    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));\n    prevOffset = litLength ? seq->offset : seqState->prevOffset;\n    seqState->prevOffset = seq->offset;\n    if (litLength == MaxLL)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) litLength += add;\n        else\n        {\n            if (dumps<=(de-3))\n            {\n                litLength = ZSTD_readLE24(dumps);\n                dumps += 3;\n            }\n        }\n    }\n\n    /* Offset */\n    {\n        U32 offsetCode, nbBits;\n        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));\n        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));\n        nbBits = offsetCode - 1;\n        if (offsetCode==0) nbBits = 0;   /* cmove */\n        offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);\n        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));\n        if (offsetCode==0) offset = prevOffset;\n    }\n\n    /* MatchLength */\n    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));\n    if (matchLength == MaxML)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) matchLength += add;\n        else\n        {\n            if (dumps<=(de-3))\n            {\n                matchLength = ZSTD_readLE24(dumps);\n                dumps += 3;\n            }\n        }\n    }\n    matchLength += MINMATCH;\n\n    /* save result */\n    seq->litLength = litLength;\n    seq->offset = offset;\n    seq->matchLength = matchLength;\n    seqState->dumps = dumps;\n}\n\n\nstatic size_t ZSTD_execSequence(BYTE* op,\n                                seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                BYTE* const base, BYTE* const oend)\n{\n    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */\n    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */\n    const BYTE* const ostart = op;\n    const size_t litLength = sequence.litLength;\n    BYTE* const endMatch = op + litLength + sequence.matchLength;    /* risk : address space overflow (32-bits) */\n    const BYTE* const litEnd = *litPtr + litLength;\n\n    /* check */\n    if (endMatch > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (litEnd > litLimit) return ERROR(corruption_detected);\n    if (sequence.matchLength > (size_t)(*litPtr-op))  return ERROR(dstSize_tooSmall);    /* overwrite literal segment */\n\n    /* copy Literals */\n    if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))\n        memmove(op, *litPtr, litLength);   /* overwrite risk */\n    else\n        ZSTD_wildcopy(op, *litPtr, litLength);\n    op += litLength;\n    *litPtr = litEnd;   /* update for next sequence */\n\n    /* check : last match must be at a minimum distance of 8 from end of dest buffer */\n    if (oend-op < 8) return ERROR(dstSize_tooSmall);\n\n    /* copy Match */\n    {\n        const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);\n        const BYTE* match = op - sequence.offset;            /* possible underflow at op - offset ? */\n        size_t qutt = 12;\n        U64 saved[2];\n\n        /* check */\n        if (match < base) return ERROR(corruption_detected);\n        if (sequence.offset > (size_t)base) return ERROR(corruption_detected);\n\n        /* save beginning of literal sequence, in case of write overlap */\n        if (overlapRisk)\n        {\n            if ((endMatch + qutt) > oend) qutt = oend-endMatch;\n            memcpy(saved, endMatch, qutt);\n        }\n\n        if (sequence.offset < 8)\n        {\n            const int dec64 = dec64table[sequence.offset];\n            op[0] = match[0];\n            op[1] = match[1];\n            op[2] = match[2];\n            op[3] = match[3];\n            match += dec32table[sequence.offset];\n            ZSTD_copy4(op+4, match);\n            match -= dec64;\n        } else { ZSTD_copy8(op, match); }\n        op += 8; match += 8;\n\n        if (endMatch > oend-(16-MINMATCH))\n        {\n            if (op < oend-8)\n            {\n                ZSTD_wildcopy(op, match, (oend-8) - op);\n                match += (oend-8) - op;\n                op = oend-8;\n            }\n            while (op<endMatch) *op++ = *match++;\n        }\n        else\n            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n\n        /* restore, in case of overlap */\n        if (overlapRisk) memcpy(endMatch, saved, qutt);\n    }\n\n    return endMatch-ostart;\n}\n\ntypedef struct ZSTDv01_Dctx_s\n{\n    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];\n    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];\n    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];\n    void* previousDstEnd;\n    void* base;\n    size_t expected;\n    blockType_t bType;\n    U32 phase;\n} dctx_t;\n\n\nstatic size_t ZSTD_decompressSequences(\n                               void* ctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize,\n                         const BYTE* litStart, size_t litSize)\n{\n    dctx_t* dctx = (dctx_t*)ctx;\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t errorCode, dumpsLength;\n    const BYTE* litPtr = litStart;\n    const BYTE* const litEnd = litStart + litSize;\n    int nbSeq;\n    const BYTE* dumps;\n    U32* DTableLL = dctx->LLTable;\n    U32* DTableML = dctx->MLTable;\n    U32* DTableOffb = dctx->OffTable;\n    BYTE* const base = (BYTE*) (dctx->base);\n\n    /* Build Decoding Tables */\n    errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,\n                                      DTableLL, DTableML, DTableOffb,\n                                      ip, iend-ip);\n    if (ZSTDv01_isError(errorCode)) return errorCode;\n    ip += errorCode;\n\n    /* Regen sequences */\n    {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        seqState.dumps = dumps;\n        seqState.dumpsEnd = dumps + dumpsLength;\n        seqState.prevOffset = 1;\n        errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);\n        if (FSE_isError(errorCode)) return ERROR(corruption_detected);\n        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )\n        {\n            size_t oneSeqSize;\n            nbSeq--;\n            ZSTD_decodeSequence(&sequence, &seqState);\n            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);\n            if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* check if reached exact end */\n        if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */\n        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */\n\n        /* last literal segment */\n        {\n            size_t lastLLSize = litEnd - litPtr;\n            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n            if (op != litPtr) memmove(op, litPtr, lastLLSize);\n            op += lastLLSize;\n        }\n    }\n\n    return op-ostart;\n}\n\n\nstatic size_t ZSTD_decompressBlock(\n                            void* ctx,\n                            void* dst, size_t maxDstSize,\n                      const void* src, size_t srcSize)\n{\n    /* blockType == blockCompressed, srcSize is trusted */\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* litPtr = NULL;\n    size_t litSize = 0;\n    size_t errorCode;\n\n    /* Decode literals sub-block */\n    errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);\n    if (ZSTDv01_isError(errorCode)) return errorCode;\n    ip += errorCode;\n    srcSize -= errorCode;\n\n    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);\n}\n\n\nsize_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t remainingSize = srcSize;\n    U32 magicNumber;\n    size_t errorCode=0;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n    magicNumber = ZSTD_readBE32(src);\n    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTDv01_isError(blockSize)) return blockSize;\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (blockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);\n            break;\n        case bt_raw :\n            errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);\n        }\n        if (blockSize == 0) break;   /* bt_end */\n\n        if (ZSTDv01_isError(errorCode)) return errorCode;\n        op += errorCode;\n        ip += blockSize;\n        remainingSize -= blockSize;\n    }\n\n    return op-ostart;\n}\n\nsize_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    dctx_t ctx;\n    ctx.base = dst;\n    return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    U32 magicNumber;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n    magicNumber = ZSTD_readBE32(src);\n    if (magicNumber != ZSTD_magicNumber) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n        return;\n    }\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTDv01_isError(blockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);\n            return;\n        }\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (blockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (blockSize == 0) break;   /* bt_end */\n\n        ip += blockSize;\n        remainingSize -= blockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * BLOCKSIZE;\n}\n\n/*******************************\n*  Streaming Decompression API\n*******************************/\n\nsize_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)\n{\n    dctx->expected = ZSTD_frameHeaderSize;\n    dctx->phase = 0;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    return 0;\n}\n\nZSTDv01_Dctx* ZSTDv01_createDCtx(void)\n{\n    ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));\n    if (dctx==NULL) return NULL;\n    ZSTDv01_resetDCtx(dctx);\n    return dctx;\n}\n\nsize_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)\n{\n    free(dctx);\n    return 0;\n}\n\nsize_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)\n{\n    return ((dctx_t*)dctx)->expected;\n}\n\nsize_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    dctx_t* ctx = (dctx_t*)dctx;\n\n    /* Sanity check */\n    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);\n    if (dst != ctx->previousDstEnd)  /* not contiguous */\n        ctx->base = dst;\n\n    /* Decompress : frame header */\n    if (ctx->phase == 0)\n    {\n        /* Check frame magic header */\n        U32 magicNumber = ZSTD_readBE32(src);\n        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        return 0;\n    }\n\n    /* Decompress : block header */\n    if (ctx->phase == 1)\n    {\n        blockProperties_t bp;\n        size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);\n        if (ZSTDv01_isError(blockSize)) return blockSize;\n        if (bp.blockType == bt_end)\n        {\n            ctx->expected = 0;\n            ctx->phase = 0;\n        }\n        else\n        {\n            ctx->expected = blockSize;\n            ctx->bType = bp.blockType;\n            ctx->phase = 2;\n        }\n\n        return 0;\n    }\n\n    /* Decompress : block content */\n    {\n        size_t rSize;\n        switch(ctx->bType)\n        {\n        case bt_compressed:\n            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);\n            break;\n        case bt_raw :\n            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet handled */\n            break;\n        case bt_end :   /* should never happen (filtered at phase 1) */\n            rSize = 0;\n            break;\n        default:\n            return ERROR(GENERIC);\n        }\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);\n        return rSize;\n    }\n\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v01.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_V01_H_28739879432\n#define ZSTD_V01_H_28739879432\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Simple one-step function\n***************************************/\n/**\nZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format\n    compressedSize : is the exact source size\n    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.\n                      It must be equal or larger than originalSize, otherwise decompression will fail.\n    return : the number of bytes decompressed into destination buffer (originalSize)\n             or an errorCode if it fails (which can be tested using ZSTDv01_isError())\n*/\nsize_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize);\n\n /**\n ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format\n     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n     cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                 or an error code if it fails (which can be tested using ZSTDv01_isError())\n     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n     note : assumes `cSize` and `dBound` are _not_ NULL.\n */\nvoid ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                     size_t* cSize, unsigned long long* dBound);\n\n/**\nZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error\n*/\nunsigned ZSTDv01_isError(size_t code);\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;\nZSTDv01_Dctx* ZSTDv01_createDCtx(void);\nsize_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);\n\nsize_t ZSTDv01_decompressDCtx(void* ctx,\n                              void* dst, size_t maxOriginalSize,\n                        const void* src, size_t compressedSize);\n\n/* *************************************\n*  Streaming functions\n***************************************/\nsize_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);\n\nsize_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);\nsize_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);\n/**\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTDv01_magicNumber   0xFD2FB51E   /* Big Endian version */\n#define ZSTDv01_magicNumberLE 0x1EB52FFD   /* Little Endian version */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_V01_H_28739879432 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v02.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include \"zstd_v02.h\"\n#include \"error_private.h\"\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n\n\n/* ******************************************************************\n   mem.h\n   low-level memory access routines\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/******************************************\n*  Includes\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include <string.h>    /* memcpy */\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/****************************************************************\n*  Basic Types\n*****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/****************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets generating assembly depending on alignment.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif // MEM_FORCE_MEMORY_ACCESS\n\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian())\n    {\n        MEM_write16(memPtr, val);\n    }\n    else\n    {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE24(const void* memPtr)\n{\n    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));\n    }\n}\n\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)\n                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));\n    }\n}\n\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n\n\n/* ******************************************************************\n   bitstream\n   Part of NewGen Entropy library\n   header file (to include)\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which highly benefit from being inlined.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n\n/**********************************************\n*  bitStream decompression API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BIT_DStream_t;\n\ntypedef enum { BIT_DStream_unfinished = 0,\n               BIT_DStream_endOfBuffer = 1,\n               BIT_DStream_completed = 2,\n               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);\n\n\n/******************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/****************************************************************\n*  Helper functions\n****************************************************************/\nMEM_STATIC unsigned BIT_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n\n/**********************************************************\n* bitStream decoding\n**********************************************************/\n\n/*!BIT_initDStream\n*  Initialize a BIT_DStream_t.\n*  @bitD : a pointer to an already allocated BIT_DStream_t structure\n*  @srcBuffer must point at the beginning of a bitStream\n*  @srcSize must be the exact size of the bitStream\n*  @result : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(size_t))   /* normal case */\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n    }\n    else\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);\n                    /* fallthrough */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);\n                    /* fallthrough */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);\n                    /* fallthrough */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;\n                    /* fallthrough */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;\n                    /* fallthrough */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;\n                    /* fallthrough */\n            default:;\n        }\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\nMEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BIT_lookBitsFast :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBits(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*!BIT_readBitsFast :\n*  unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBitsFast(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return BIT_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))\n    {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BIT_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start)\n    {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;\n        return BIT_DStream_completed;\n    }\n    {\n        U32 nbBytes = bitD->bitsConsumed >> 3;\n        BIT_DStream_status result = BIT_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start)\n        {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BIT_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BIT_endOfDStream\n*   @return Tells if DStream has reached its exact end\n*/\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n/* ******************************************************************\n   Error codes and messages\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef ERROR_H_MODULE\n#define ERROR_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define ERR_STATIC static inline\n#elif defined(_MSC_VER)\n#  define ERR_STATIC static __inline\n#elif defined(__GNUC__)\n#  define ERR_STATIC static __attribute__((unused))\n#else\n#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/******************************************\n*  Error Management\n******************************************/\n#define PREFIX(name) ZSTD_error_##name\n\n#define ERROR(name) (size_t)-PREFIX(name)\n\n#define ERROR_LIST(ITEM) \\\n        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \\\n        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \\\n        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \\\n        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \\\n        ITEM(PREFIX(maxCode))\n\n#define ERROR_GENERATE_ENUM(ENUM) ENUM,\ntypedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */\n\n#define ERROR_CONVERTTOSTRING(STRING) #STRING,\n#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)\nstatic const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };\n\nERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }\n\nERR_STATIC const char* ERR_getErrorName(size_t code)\n{\n    static const char* codeError = \"Unspecified error code\";\n    if (ERR_isError(code)) return ERR_strings[-(int)(code)];\n    return codeError;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ERROR_H_MODULE */\n/*\nConstructor and Destructor of type FSE_CTable\n    Note that its size depends on 'tableLog' and 'maxSymbolValue' */\ntypedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\ntypedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\n\n\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/******************************************\n*  Static allocation\n******************************************/\n/* FSE buffer bounds */\n#define FSE_NCOUNTBOUND 512\n#define FSE_BLOCKBOUND(size) (size + (size>>7))\n#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */\n#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))\n#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/******************************************\n*  FSE advanced API\n******************************************/\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);\n/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);\n/* build a fake FSE_DTable, designed to always generate the same symbolValue */\n\n\n/******************************************\n*  FSE symbol decompression API\n******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSE_DState_t;\n\n\nstatic void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);\n\nstatic unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n\nstatic unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);\n\n\n/******************************************\n*  FSE unsafe API\n******************************************/\nstatic unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/******************************************\n*  Implementation of inline functions\n******************************************/\n\n/* decompression */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSE_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSE_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));\n    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);\n    BIT_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32  nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32 nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/******************************************\n*  Static allocation macros\n******************************************/\n/* Huff0 buffer bounds */\n#define HUF_CTABLEBOUND 129\n#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */\n#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* static allocation of Huff0's DTable */\n#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */\n#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }\n\n\n/******************************************\n*  Advanced functions\n******************************************/\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */\nstatic size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* quad-symbols decoder */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n/*\n    zstd - standard compression library\n    Header File\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Version\n***************************************/\n#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */\n#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */\n#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */\n#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */\n\n#if defined (__cplusplus)\n}\n#endif\n/*\n    zstd - standard compression library\n    Header File for static linking only\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* The objects defined into this file should be considered experimental.\n * They are not labelled stable, as their prototype may change in the future.\n * You can use them for tests, provide feedback, or if you can endure risk of future changes.\n */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Streaming functions\n***************************************/\n\ntypedef struct ZSTD_DCtx_s ZSTD_DCtx;\n\n/*\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTD_magicNumber 0xFD2FB522   /* v0.2 (current)*/\n\n\n#if defined (__cplusplus)\n}\n#endif\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n/****************************************************************\n*  Tuning parameters\n****************************************************************/\n/* MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSE_MAX_MEMORY_USAGE 14\n#define FSE_DEFAULT_MEMORY_USAGE 13\n\n/* FSE_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSE_MAX_SYMBOL_VALUE 255\n\n\n/****************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSE_FUNCTION_TYPE BYTE\n#define FSE_FUNCTION_EXTENSION\n\n\n/****************************************************************\n*  Byte symbol type\n****************************************************************/\n#endif   /* !FSE_COMMONDEFS_ONLY */\n\n\n/****************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/****************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n/****************************************************************\n*  Constants\n*****************************************************************/\n#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)\n#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)\n#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)\n#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)\n#define FSE_MIN_TABLELOG 5\n\n#define FSE_TABLELOG_ABSOLUTE_MAX 15\n#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX\n#error \"FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n\n/****************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/****************************************************************\n*  Complex types\n****************************************************************/\ntypedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];\n\n\n/****************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\n\n/* Function templates */\n\n#define FSE_DECODE_TYPE FSE_decode_t\n\nstatic U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }\n\nstatic size_t FSE_buildDTable\n(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* ptr = dt+1;\n    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;\n    FSE_DTableHeader DTableH;\n    const U32 tableSize = 1 << tableLog;\n    const U32 tableMask = tableSize-1;\n    const U32 step = FSE_tableStep(tableSize);\n    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];\n    U32 position = 0;\n    U32 highThreshold = tableSize-1;\n    const S16 largeLimit= (S16)(1 << (tableLog-1));\n    U32 noLarge = 1;\n    U32 s;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    DTableH.tableLog = (U16)tableLog;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        if (normalizedCounter[s]==-1)\n        {\n            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;\n            symbolNext[s] = 1;\n        }\n        else\n        {\n            if (normalizedCounter[s] >= largeLimit) noLarge=0;\n            symbolNext[s] = normalizedCounter[s];\n        }\n    }\n\n    /* Spread symbols */\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        int i;\n        for (i=0; i<normalizedCounter[s]; i++)\n        {\n            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;\n            position = (position + step) & tableMask;\n            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }\n    }\n\n    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n\n    /* Build Decoding table */\n    {\n        U32 i;\n        for (i=0; i<tableSize; i++)\n        {\n            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );\n            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);\n        }\n    }\n\n    DTableH.fastMode = (U16)noLarge;\n    memcpy(dt, &DTableH, sizeof(DTableH));   /* memcpy(), to avoid strict aliasing warnings */\n    return 0;\n}\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n/******************************************\n*  FSE helper functions\n******************************************/\nstatic unsigned FSE_isError(size_t code) { return ERR_isError(code); }\n\n\n/****************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nstatic short FSE_abs(short a)\n{\n    return (short)(a<0 ? -a : a);\n}\n\nstatic size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr))\n    {\n        if (previous0)\n        {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF)\n            {\n                n0+=24;\n                if (ip < iend-5)\n                {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                }\n                else\n                {\n                    bitStream >>= 16;\n                    bitCount+=16;\n                }\n            }\n            while ((bitStream & 3) == 3)\n            {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n            {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {\n            const short max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max)\n            {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            }\n            else\n            {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSE_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold)\n            {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            {\n                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n                {\n                    ip += bitCount>>3;\n                    bitCount &= 7;\n                }\n                else\n                {\n                    bitCount -= (int)(8 * (iend - 4 - ip));\n                    ip = iend - 4;\n                }\n                bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n            }\n        }\n    }\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n\n\n/*********************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSE_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSE_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BIT_DStream_t bitD;\n    FSE_DState_t state1;\n    FSE_DState_t state2;\n    size_t errorCode;\n\n    /* Init */\n    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n    if (FSE_isError(errorCode)) return errorCode;\n\n    FSE_initDState(&state1, &bitD, dt);\n    FSE_initDState(&state2, &bitD, dt);\n\n#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)\n    {\n        op[0] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[1] = FSE_GETSYMBOL(&state2);\n\n        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[3] = FSE_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */\n    while (1)\n    {\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state1);\n\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state2);\n    }\n\n    /* end ? */\n    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))\n        return op-ostart;\n\n    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */\n\n    return ERROR(corruption_detected);\n}\n\n\nstatic size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));\n\n    /* select fast mode (static) */\n    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nstatic size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSE_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n    size_t errorCode;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSE decoding mode */\n    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSE_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);\n    if (FSE_isError(errorCode)) return errorCode;\n\n    /* always return, even if it is an error code */\n    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);\n}\n\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/****************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n/****************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n/****************************************************************\n*  Error Management\n****************************************************************/\n#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/******************************************\n*  Helper functions\n******************************************/\nstatic unsigned HUF_isError(size_t code) { return ERR_isError(code); }\n\n#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */\n#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */\n#define HUF_MAX_SYMBOL_VALUE 255\n#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)\n#  error \"HUF_MAX_TABLELOG is too large !\"\n#endif\n\n\n\n/*********************************************************\n*  Huff0 : Huffman block decompression\n*********************************************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\n/*! HUF_readStats\n    Read compact Huffman tree, saved by HUF_writeCTable\n    @huffWeight : destination buffer\n    @return : size read from `src`\n*/\nstatic size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                            U32* nbSymbolsPtr, U32* tableLogPtr,\n                            const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    U32 tableLog;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n    U32 n;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  /* special header */\n    {\n        if (iSize >= (242))   /* RLE */\n        {\n            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else   /* Incompressible */\n        {\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            for (n=0; n<oSize; n+=2)\n            {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n            }\n        }\n    }\n    else  /* header compressed with FSE (normal case) */\n    {\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSE_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));\n    weightTotal = 0;\n    for (n=0; n<oSize; n++)\n    {\n        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n        rankStats[huffWeight[n]]++;\n        weightTotal += (1 << huffWeight[n]) >> 1;\n    }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    tableLog = BIT_highbit32(weightTotal) + 1;\n    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n    {\n        U32 total = 1 << tableLog;\n        U32 rest = total - weightTotal;\n        U32 verif = 1 << BIT_highbit32(rest);\n        U32 lastWeight = BIT_highbit32(rest) + 1;\n        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n        huffWeight[oSize] = (BYTE)lastWeight;\n        rankStats[lastWeight]++;\n    }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    *tableLogPtr = tableLog;\n    return iSize+1;\n}\n\n\n/**************************/\n/* single-symbol decoding */\n/**************************/\n\nstatic size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize = ip[0];\n    U32 nbSymbols = 0;\n    U32 n;\n    U32 nextRankStart;\n    void* ptr = DTable+1;\n    HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<=tableLog; n++)\n    {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<nbSymbols; n++)\n    {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUF_DEltX2 D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize;\n}\n\nstatic BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)\n{\n        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n        const BYTE c = dt[val].byte;\n        BIT_skipBits(Dstream, dt[val].nbBits);\n        return c;\n}\n\n#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))\n    {\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\n\nstatic size_t HUF_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n\n        const void* ptr = DTable;\n        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/***************************/\n/* double-symbols decoding */\n/***************************/\n\nstatic void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUF_DEltX4 DElt;\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    U32 s;\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1)\n    {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */\n    {\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }\n}\n\ntypedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];\n\nstatic void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)\n    {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */\n        {\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        }\n        else\n        {\n            U32 i;\n            const U32 end = start + length;\n            HUF_DEltX4 DElt;\n\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits   = (BYTE)(nbBits);\n            DElt.length   = 1;\n            for (i = start; i < end; i++)\n                DTable[i] = DElt;\n        }\n        rankVal[weight] += length;\n    }\n}\n\nstatic size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    const U32 memLog = DTable[0];\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize = ip[0];\n    void* ptr = DTable;\n    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */\n    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--)\n        {if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */\n\n    /* Get start index of each weight */\n    {\n        U32 w, nextRankStart = 0;\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {\n        U32 s;\n        for (s=0; s<nbSymbols; s++)\n        {\n            U32 w = weightList[s];\n            U32 r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {\n        const U32 minBits = tableLog+1 - maxW;\n        U32 nextRankVal = 0;\n        U32 w, consumed;\n        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n        U32* rankVal0 = rankVal[0];\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankVal;\n            nextRankVal += rankStats[w] << (w+rescale);\n            rankVal0[w] = current;\n        }\n        for (consumed = minBits; consumed <= memLog - minBits; consumed++)\n        {\n            U32* rankValPtr = rankVal[consumed];\n            for (w = 1; w <= maxW; w++)\n            {\n                rankValPtr[w] = rankVal0[w] >> consumed;\n            }\n        }\n    }\n\n    HUF_fillDTableX4(dt, memLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    return iSize;\n}\n\n\nstatic U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BIT_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);\n    else\n    {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))\n        {\n            BIT_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n        }\n    }\n    return 1;\n}\n\n\n#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))\n    {\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\n\n\nstatic size_t HUF_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n\n        const void* ptr = DTable;\n        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/**********************************/\n/* quad-symbol decoding           */\n/**********************************/\ntypedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;\ntypedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;\n\n/* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */\nstatic void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,\n                           const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,\n                           const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)\n{\n    const int scaleLog = nbBitsBaseline - sizeLog;   /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */\n    const int minBits  = nbBitsBaseline - maxWeight;\n    const U32 level = DDesc.nbBytes;\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    U32 symbolStartPos, s;\n\n    /* local rankVal, will be modified */\n    memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1)\n    {\n        U32 i;\n        const U32 skipSize = rankVal[minWeight];\n        for (i = 0; i < skipSize; i++)\n        {\n            DSequence[i] = baseSeq;\n            DDescription[i] = DDesc;\n        }\n    }\n\n    /* fill DTable */\n    DDesc.nbBytes++;\n    symbolStartPos = rankStart[minWeight];\n    for (s=symbolStartPos; s<sortedListSize; s++)\n    {\n        const BYTE symbol = sortedSymbols[s].symbol;\n        const U32  weight = sortedSymbols[s].weight;   /* >= 1 (sorted) */\n        const int  nbBits = nbBitsBaseline - weight;   /* >= 1 (by construction) */\n        const int  totalBits = consumed+nbBits;\n        const U32  start  = rankVal[weight];\n        const U32  length = 1 << (sizeLog-nbBits);\n        baseSeq.byte[level] = symbol;\n        DDesc.nbBits = (BYTE)totalBits;\n\n        if ((level<3) && (sizeLog-totalBits >= minBits))   /* enough room for another symbol */\n        {\n            int nextMinWeight = totalBits + scaleLog;\n            if (nextMinWeight < 1) nextMinWeight = 1;\n            HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,\n                           rankValOrigin, totalBits, nextMinWeight, maxWeight,\n                           sortedSymbols, sortedListSize, rankStart,\n                           nbBitsBaseline, baseSeq, DDesc);   /* recursive (max : level 3) */\n        }\n        else\n        {\n            U32 i;\n            const U32 end = start + length;\n            for (i = start; i < end; i++)\n            {\n                DDescription[i] = DDesc;\n                DSequence[i] = baseSeq;\n            }\n        }\n        rankVal[weight] += length;\n    }\n}\n\n\n/* note : same preparation as X4 */\nstatic size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    rankVal_t rankVal;\n    const U32 memLog = DTable[0];\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize = ip[0];\n\n    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--)\n        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */\n\n\n    /* Get start index of each weight */\n    {\n        U32 w, nextRankStart = 0;\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {\n        U32 s;\n        for (s=0; s<nbSymbols; s++)\n        {\n            U32 w = weightList[s];\n            U32 r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {\n        const U32 minBits = tableLog+1 - maxW;\n        U32 nextRankVal = 0;\n        U32 w, consumed;\n        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n        U32* rankVal0 = rankVal[0];\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankVal;\n            nextRankVal += rankStats[w] << (w+rescale);\n            rankVal0[w] = current;\n        }\n        for (consumed = minBits; consumed <= memLog - minBits; consumed++)\n        {\n            U32* rankValPtr = rankVal[consumed];\n            for (w = 1; w <= maxW; w++)\n            {\n                rankValPtr[w] = rankVal0[w] >> consumed;\n            }\n        }\n    }\n\n\n    /* fill tables */\n    {\n        void* ptr = DTable+1;\n        HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);\n        void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));\n        HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);\n        HUF_DSeqX6 DSeq;\n        HUF_DDescX6 DDesc;\n        DSeq.sequence = 0;\n        DDesc.nbBits = 0;\n        DDesc.nbBytes = 0;\n        HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,\n                       (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,\n                       sortedSymbol, sizeOfSort, rankStart0,\n                       tableLog+1, DSeq, DDesc);\n    }\n\n    return iSize;\n}\n\n\nstatic U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, ds+val, sizeof(HUF_DSeqX6));\n    BIT_skipBits(DStream, dd[val].nbBits);\n    return dd[val].nbBytes;\n}\n\nstatic U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,\n                                  const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    U32 length = dd[val].nbBytes;\n    if (length <= maxL)\n    {\n        memcpy(op, ds+val, length);\n        BIT_skipBits(DStream, dd[val].nbBits);\n        return length;\n    }\n    memcpy(op, ds+val, maxL);\n    if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))\n    {\n        BIT_skipBits(DStream, dd[val].nbBits);\n        if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n            DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n    }\n    return maxL;\n}\n\n\n#define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \\\n    ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)\n\n#define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)\n\n#define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)\n\nstatic inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)\n{\n    const void* ddPtr = DTable+1;\n    const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);\n    const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));\n    const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);\n    BYTE* const pStart = p;\n\n    /* up to 16 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))\n    {\n        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX6_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);\n    }\n\n    /* closer to the end, up to 4 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))\n        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);\n\n    while (p <= pEnd-4)\n        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    while (p < pEnd)\n        p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);\n\n    return p-pStart;\n}\n\n\n\nstatic size_t HUF_decompress4X6_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n\n        const U32 dtLog = DTable[0];\n        const void* ddPtr = DTable+1;\n        const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);\n        const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));\n        const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-64 symbols per loop (4-16 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )\n        {\n            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX6_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX6_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX6_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX6_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX6_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX6_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX6_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX6_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);\n        HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);\n        HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);\n        HUF_decodeStreamX6(op4, &bitD4, oend,     DTable, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/**********************************/\n/* Generic decompression selector */\n/**********************************/\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nstatic size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };\n    /* estimate decompression time */\n    U32 Q;\n    const U32 D256 = (U32)(dstSize >> 8);\n    U32 Dtime[3];\n    U32 algoNb = 0;\n    int n;\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    /* decoder timing evaluation */\n    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n    for (n=0; n<3; n++)\n        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);\n\n    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */\n\n    if (Dtime[1] < Dtime[0]) algoNb = 1;\n    if (Dtime[2] < Dtime[algoNb]) algoNb = 2;\n\n    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n\n    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */\n}\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n*  MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*/\n#define ZSTD_MEMORY_USAGE 17\n\n/*!\n * HEAPMODE :\n * Select how default compression functions will allocate memory for their hash table,\n * in memory stack (0, fastest), or in memory heap (1, requires malloc())\n * Note that compression context is fairly large, as a consequence heap memory is recommended.\n */\n#ifndef ZSTD_HEAPMODE\n#  define ZSTD_HEAPMODE 1\n#endif /* ZSTD_HEAPMODE */\n\n/*!\n*  LEGACY_SUPPORT :\n*  decompressor can decode older formats (starting from Zstd 0.1+)\n*/\n#ifndef ZSTD_LEGACY_SUPPORT\n#  define ZSTD_LEGACY_SUPPORT 1\n#endif\n\n\n/* *******************************************************\n*  Includes\n*********************************************************/\n#include <stdlib.h>      /* calloc */\n#include <string.h>      /* memcpy, memmove */\n#include <stdio.h>       /* debug : printf */\n\n\n/* *******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef __AVX2__\n#  include <immintrin.h>   /* AVX2 intrinsics */\n#endif\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n\n/* *******************************************************\n*  Constants\n*********************************************************/\n#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)\n#define HASH_TABLESIZE (1 << HASH_LOG)\n#define HASH_MASK (HASH_TABLESIZE - 1)\n\n#define KNUTH 2654435761\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BLOCKSIZE (128 KB)                 /* define, for static allocation */\n#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)\n#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)\n#define IS_RAW BIT0\n#define IS_RLE BIT1\n\n#define WORKPLACESIZE (BLOCKSIZE*3)\n#define MINMATCH 4\n#define MLbits   7\n#define LLbits   6\n#define Offbits  5\n#define MaxML  ((1<<MLbits )-1)\n#define MaxLL  ((1<<LLbits )-1)\n#define MaxOff   31\n#define LitFSELog  11\n#define MLFSELog   10\n#define LLFSELog   10\n#define OffFSELog   9\n#define MAX(a,b) ((a)<(b)?(b):(a))\n#define MaxSeq MAX(MaxLL, MaxML)\n\n#define LITERAL_NOENTROPY 63\n#define COMMAND_NOENTROPY 7   /* to remove */\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\nstatic const size_t ZSTD_blockHeaderSize = 3;\nstatic const size_t ZSTD_frameHeaderSize = 4;\n\n\n/* *******************************************************\n*  Memory operations\n**********************************************************/\nstatic void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\nstatic void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */\nstatic void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do COPY8(op, ip) while (op < oend);\n}\n\n\n/* **************************************\n*  Local structures\n****************************************/\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* offCode;\n    BYTE* litStart;\n    BYTE* lit;\n    BYTE* litLengthStart;\n    BYTE* litLength;\n    BYTE* matchLengthStart;\n    BYTE* matchLength;\n    BYTE* dumpsStart;\n    BYTE* dumps;\n} seqStore_t;\n\n\n/* *************************************\n*  Error Management\n***************************************/\n/*! ZSTD_isError\n*   tells if a return value is an error code */\nstatic unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }\n\n\n\n/* *************************************************************\n*   Decompression section\n***************************************************************/\nstruct ZSTD_DCtx_s\n{\n    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];\n    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];\n    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];\n    void* previousDstEnd;\n    void* base;\n    size_t expected;\n    blockType_t bType;\n    U32 phase;\n    const BYTE* litPtr;\n    size_t litSize;\n    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];\n};   /* typedef'd to ZSTD_Dctx within \"zstd_static.h\" */\n\n\nstatic size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    BYTE headerFlags;\n    U32 cSize;\n\n    if (srcSize < 3) return ERROR(srcSize_wrong);\n\n    headerFlags = *in;\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n\n    bpPtr->blockType = (blockType_t)(headerFlags >> 6);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\nstatic size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/** ZSTD_decompressLiterals\n    @return : nb of bytes read from src, or an error code*/\nstatic size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,\n                                const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n\n    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n\n    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);\n    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);\n\n    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);\n\n    *maxDstSizePtr = litSize;\n    return litCSize + 5;\n}\n\n\n/** ZSTD_decodeLiteralsBlock\n    @return : nb of bytes read from src (< srcSize )*/\nstatic size_t ZSTD_decodeLiteralsBlock(void* ctx,\n                          const void* src, size_t srcSize)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;\n    const BYTE* const istart = (const BYTE* const)src;\n\n    /* any compressed block with literals segment must be at least this size */\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch(*istart & 3)\n    {\n    default:\n    case 0:\n        {\n            size_t litSize = BLOCKSIZE;\n            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, 8);\n            return readSize;   /* works if it's an error too */\n        }\n    case IS_RAW:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */\n            {\n                if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n                if (litSize > srcSize-3) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, 8);\n                return litSize+3;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+3;\n            dctx->litSize = litSize;\n            return litSize+3;\n        }\n    case IS_RLE:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[3], litSize + 8);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return 4;\n        }\n    }\n}\n\n\nstatic size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,\n                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,\n                         const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    const BYTE* const iend = istart + srcSize;\n    U32 LLtype, Offtype, MLtype;\n    U32 LLlog, Offlog, MLlog;\n    size_t dumpsLength;\n\n    /* check */\n    if (srcSize < 5) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    *nbSeq = MEM_readLE16(ip); ip+=2;\n    LLtype  = *ip >> 6;\n    Offtype = (*ip >> 4) & 3;\n    MLtype  = (*ip >> 2) & 3;\n    if (*ip & 2)\n    {\n        dumpsLength  = ip[2];\n        dumpsLength += ip[1] << 8;\n        ip += 3;\n    }\n    else\n    {\n        dumpsLength  = ip[1];\n        dumpsLength += (ip[0] & 1) << 8;\n        ip += 2;\n    }\n    *dumpsPtr = ip;\n    ip += dumpsLength;\n    *dumpsLengthPtr = dumpsLength;\n\n    /* check */\n    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n\n    /* sequences */\n    {\n        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */\n        size_t headerSize;\n\n        /* Build DTables */\n        switch(LLtype)\n        {\n        case bt_rle :\n            LLlog = 0;\n            FSE_buildDTable_rle(DTableLL, *ip++); break;\n        case bt_raw :\n            LLlog = LLbits;\n            FSE_buildDTable_raw(DTableLL, LLbits); break;\n        default :\n            {   U32 max = MaxLL;\n                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (LLlog > LLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableLL, norm, max, LLlog);\n        }   }\n\n        switch(Offtype)\n        {\n        case bt_rle :\n            Offlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */\n            break;\n        case bt_raw :\n            Offlog = Offbits;\n            FSE_buildDTable_raw(DTableOffb, Offbits); break;\n        default :\n            {   U32 max = MaxOff;\n                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (Offlog > OffFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableOffb, norm, max, Offlog);\n        }   }\n\n        switch(MLtype)\n        {\n        case bt_rle :\n            MLlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableML, *ip++); break;\n        case bt_raw :\n            MLlog = MLbits;\n            FSE_buildDTable_raw(DTableML, MLbits); break;\n        default :\n            {   U32 max = MaxML;\n                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (MLlog > MLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableML, norm, max, MLlog);\n    }   }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t offset;\n    size_t matchLength;\n} seq_t;\n\ntypedef struct {\n    BIT_DStream_t DStream;\n    FSE_DState_t stateLL;\n    FSE_DState_t stateOffb;\n    FSE_DState_t stateML;\n    size_t prevOffset;\n    const BYTE* dumps;\n    const BYTE* dumpsEnd;\n} seqState_t;\n\n\nstatic void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    size_t litLength;\n    size_t prevOffset;\n    size_t offset;\n    size_t matchLength;\n    const BYTE* dumps = seqState->dumps;\n    const BYTE* const de = seqState->dumpsEnd;\n\n    /* Literal length */\n    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));\n    prevOffset = litLength ? seq->offset : seqState->prevOffset;\n    seqState->prevOffset = seq->offset;\n    if (litLength == MaxLL)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) litLength += add;\n        else if (dumps + 3 <= de)\n        {\n            litLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n\n    /* Offset */\n    {\n        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */\n                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,\n                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,\n                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };\n        U32 offsetCode, nbBits;\n        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        nbBits = offsetCode - 1;\n        if (offsetCode==0) nbBits = 0;   /* cmove */\n        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        if (offsetCode==0) offset = prevOffset;   /* cmove */\n    }\n\n    /* MatchLength */\n    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));\n    if (matchLength == MaxML)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) matchLength += add;\n        else if (dumps + 3 <= de)\n        {\n            matchLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n    matchLength += MINMATCH;\n\n    /* save result */\n    seq->litLength = litLength;\n    seq->offset = offset;\n    seq->matchLength = matchLength;\n    seqState->dumps = dumps;\n}\n\n\nstatic size_t ZSTD_execSequence(BYTE* op,\n                                seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                BYTE* const base, BYTE* const oend)\n{\n    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */\n    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */\n    const BYTE* const ostart = op;\n    BYTE* const oLitEnd = op + sequence.litLength;\n    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_8 = oend-8;\n    const BYTE* const litEnd = *litPtr + sequence.litLength;\n\n    /* checks */\n    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */\n    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */\n\n    /* copy Literals */\n    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = litEnd;   /* update for next sequence */\n\n    /* copy Match */\n    {\n        const BYTE* match = op - sequence.offset;\n\n        /* check */\n        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */\n        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */\n        if (match < base) return ERROR(corruption_detected);\n\n        /* close range match, overlap */\n        if (sequence.offset < 8)\n        {\n            const int dec64 = dec64table[sequence.offset];\n            op[0] = match[0];\n            op[1] = match[1];\n            op[2] = match[2];\n            op[3] = match[3];\n            match += dec32table[sequence.offset];\n            ZSTD_copy4(op+4, match);\n            match -= dec64;\n        }\n        else\n        {\n            ZSTD_copy8(op, match);\n        }\n        op += 8; match += 8;\n\n        if (oMatchEnd > oend-(16-MINMATCH))\n        {\n            if (op < oend_8)\n            {\n                ZSTD_wildcopy(op, match, oend_8 - op);\n                match += oend_8 - op;\n                op = oend_8;\n            }\n            while (op < oMatchEnd) *op++ = *match++;\n        }\n        else\n        {\n            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n        }\n    }\n\n    return oMatchEnd - ostart;\n}\n\nstatic size_t ZSTD_decompressSequences(\n                               void* ctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t errorCode, dumpsLength;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    int nbSeq;\n    const BYTE* dumps;\n    U32* DTableLL = dctx->LLTable;\n    U32* DTableML = dctx->MLTable;\n    U32* DTableOffb = dctx->OffTable;\n    BYTE* const base = (BYTE*) (dctx->base);\n\n    /* Build Decoding Tables */\n    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,\n                                      DTableLL, DTableML, DTableOffb,\n                                      ip, iend-ip);\n    if (ZSTD_isError(errorCode)) return errorCode;\n    ip += errorCode;\n\n    /* Regen sequences */\n    {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        seqState.dumps = dumps;\n        seqState.dumpsEnd = dumps + dumpsLength;\n        seqState.prevOffset = 1;\n        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);\n        if (ERR_isError(errorCode)) return ERROR(corruption_detected);\n        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )\n        {\n            size_t oneSeqSize;\n            nbSeq--;\n            ZSTD_decodeSequence(&sequence, &seqState);\n            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);\n            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* check if reached exact end */\n        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */\n        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */\n\n        /* last literal segment */\n        {\n            size_t lastLLSize = litEnd - litPtr;\n            if (litPtr > litEnd) return ERROR(corruption_detected);\n            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n            if (op != litPtr) memmove(op, litPtr, lastLLSize);\n            op += lastLLSize;\n        }\n    }\n\n    return op-ostart;\n}\n\n\nstatic size_t ZSTD_decompressBlock(\n                            void* ctx,\n                            void* dst, size_t maxDstSize,\n                      const void* src, size_t srcSize)\n{\n    /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n\n    /* Decode literals sub-block */\n    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);\n    if (ZSTD_isError(litCSize)) return litCSize;\n    ip += litCSize;\n    srcSize -= litCSize;\n\n    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);\n}\n\n\nstatic size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t remainingSize = srcSize;\n    U32 magicNumber;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t decodedSize=0;\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        if (ZSTD_isError(decodedSize)) return decodedSize;\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\nstatic size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    ZSTD_DCtx ctx;\n    ctx.base = dst;\n    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    U32 magicNumber;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_magicNumber) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n        return;\n    }\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * BLOCKSIZE;\n}\n\n/*******************************\n*  Streaming Decompression API\n*******************************/\n\nstatic size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)\n{\n    dctx->expected = ZSTD_frameHeaderSize;\n    dctx->phase = 0;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    return 0;\n}\n\nstatic ZSTD_DCtx* ZSTD_createDCtx(void)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));\n    if (dctx==NULL) return NULL;\n    ZSTD_resetDCtx(dctx);\n    return dctx;\n}\n\nstatic size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)\n{\n    free(dctx);\n    return 0;\n}\n\nstatic size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nstatic size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);\n    if (dst != ctx->previousDstEnd)  /* not contiguous */\n        ctx->base = dst;\n\n    /* Decompress : frame header */\n    if (ctx->phase == 0)\n    {\n        /* Check frame magic header */\n        U32 magicNumber = MEM_readLE32(src);\n        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        return 0;\n    }\n\n    /* Decompress : block header */\n    if (ctx->phase == 1)\n    {\n        blockProperties_t bp;\n        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);\n        if (ZSTD_isError(blockSize)) return blockSize;\n        if (bp.blockType == bt_end)\n        {\n            ctx->expected = 0;\n            ctx->phase = 0;\n        }\n        else\n        {\n            ctx->expected = blockSize;\n            ctx->bType = bp.blockType;\n            ctx->phase = 2;\n        }\n\n        return 0;\n    }\n\n    /* Decompress : block content */\n    {\n        size_t rSize;\n        switch(ctx->bType)\n        {\n        case bt_compressed:\n            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);\n            break;\n        case bt_raw :\n            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet handled */\n            break;\n        case bt_end :   /* should never happen (filtered at phase 1) */\n            rSize = 0;\n            break;\n        default:\n            return ERROR(GENERIC);\n        }\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);\n        return rSize;\n    }\n\n}\n\n\n/* wrapper layer */\n\nunsigned ZSTDv02_isError(size_t code)\n{\n    return ZSTD_isError(code);\n}\n\nsize_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize)\n{\n    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);\n}\n\nZSTDv02_Dctx* ZSTDv02_createDCtx(void)\n{\n    return (ZSTDv02_Dctx*)ZSTD_createDCtx();\n}\n\nsize_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)\n{\n    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)\n{\n    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)\n{\n    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v02.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_V02_H_4174539423\n#define ZSTD_V02_H_4174539423\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Simple one-step function\n***************************************/\n/**\nZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format\n    compressedSize : is the exact source size\n    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.\n                      It must be equal or larger than originalSize, otherwise decompression will fail.\n    return : the number of bytes decompressed into destination buffer (originalSize)\n             or an errorCode if it fails (which can be tested using ZSTDv01_isError())\n*/\nsize_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize);\n\n /**\n ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format\n     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n     cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                 or an error code if it fails (which can be tested using ZSTDv01_isError())\n     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n */\nvoid ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                     size_t* cSize, unsigned long long* dBound);\n\n/**\nZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error\n*/\nunsigned ZSTDv02_isError(size_t code);\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;\nZSTDv02_Dctx* ZSTDv02_createDCtx(void);\nsize_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);\n\nsize_t ZSTDv02_decompressDCtx(void* ctx,\n                              void* dst, size_t maxOriginalSize,\n                        const void* src, size_t compressedSize);\n\n/* *************************************\n*  Streaming functions\n***************************************/\nsize_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);\n\nsize_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);\nsize_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);\n/**\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTDv02_magicNumber 0xFD2FB522   /* v0.2 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_V02_H_4174539423 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v03.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include \"zstd_v03.h\"\n#include \"error_private.h\"\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n\n\n\n/* ******************************************************************\n   mem.h\n   low-level memory access routines\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/******************************************\n*  Includes\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include <string.h>    /* memcpy */\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/****************************************************************\n*  Basic Types\n*****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/****************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets generating assembly depending on alignment.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n\n#endif // MEM_FORCE_MEMORY_ACCESS\n\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian())\n    {\n        MEM_write16(memPtr, val);\n    }\n    else\n    {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE24(const void* memPtr)\n{\n    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));\n    }\n}\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)\n                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));\n    }\n}\n\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n\n\n/* ******************************************************************\n   bitstream\n   Part of NewGen Entropy library\n   header file (to include)\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which highly benefit from being inlined.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n\n/**********************************************\n*  bitStream decompression API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BIT_DStream_t;\n\ntypedef enum { BIT_DStream_unfinished = 0,\n               BIT_DStream_endOfBuffer = 1,\n               BIT_DStream_completed = 2,\n               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);\n\n\n\n/******************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/****************************************************************\n*  Helper functions\n****************************************************************/\nMEM_STATIC unsigned BIT_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n\n/**********************************************************\n* bitStream decoding\n**********************************************************/\n\n/*!BIT_initDStream\n*  Initialize a BIT_DStream_t.\n*  @bitD : a pointer to an already allocated BIT_DStream_t structure\n*  @srcBuffer must point at the beginning of a bitStream\n*  @srcSize must be the exact size of the bitStream\n*  @result : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(size_t))   /* normal case */\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n    }\n    else\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);\n                    /* fallthrough */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);\n                    /* fallthrough */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);\n                    /* fallthrough */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;\n                    /* fallthrough */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;\n                    /* fallthrough */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;\n                    /* fallthrough */\n            default:;\n        }\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;\n    }\n\n    return srcSize;\n}\nMEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BIT_lookBitsFast :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBits(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*!BIT_readBitsFast :\n*  unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBitsFast(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return BIT_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))\n    {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BIT_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start)\n    {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;\n        return BIT_DStream_completed;\n    }\n    {\n        U32 nbBytes = bitD->bitsConsumed >> 3;\n        BIT_DStream_status result = BIT_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start)\n        {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BIT_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BIT_endOfDStream\n*   @return Tells if DStream has reached its exact end\n*/\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n/* ******************************************************************\n   Error codes and messages\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef ERROR_H_MODULE\n#define ERROR_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define ERR_STATIC static inline\n#elif defined(_MSC_VER)\n#  define ERR_STATIC static __inline\n#elif defined(__GNUC__)\n#  define ERR_STATIC static __attribute__((unused))\n#else\n#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/******************************************\n*  Error Management\n******************************************/\n#define PREFIX(name) ZSTD_error_##name\n\n#define ERROR(name) (size_t)-PREFIX(name)\n\n#define ERROR_LIST(ITEM) \\\n        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \\\n        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \\\n        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \\\n        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \\\n        ITEM(PREFIX(maxCode))\n\n#define ERROR_GENERATE_ENUM(ENUM) ENUM,\ntypedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */\n\n#define ERROR_CONVERTTOSTRING(STRING) #STRING,\n#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)\nstatic const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };\n\nERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }\n\nERR_STATIC const char* ERR_getErrorName(size_t code)\n{\n    static const char* codeError = \"Unspecified error code\";\n    if (ERR_isError(code)) return ERR_strings[-(int)(code)];\n    return codeError;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ERROR_H_MODULE */\n/*\nConstructor and Destructor of type FSE_CTable\n    Note that its size depends on 'tableLog' and 'maxSymbolValue' */\ntypedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\ntypedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\n\n\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/******************************************\n*  Static allocation\n******************************************/\n/* FSE buffer bounds */\n#define FSE_NCOUNTBOUND 512\n#define FSE_BLOCKBOUND(size) (size + (size>>7))\n#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */\n#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))\n#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/******************************************\n*  FSE advanced API\n******************************************/\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);\n/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);\n/* build a fake FSE_DTable, designed to always generate the same symbolValue */\n\n\n/******************************************\n*  FSE symbol decompression API\n******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSE_DState_t;\n\n\nstatic void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);\n\nstatic unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n\nstatic unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);\n\n\n/******************************************\n*  FSE unsafe API\n******************************************/\nstatic unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/******************************************\n*  Implementation of inline functions\n******************************************/\n\n/* decompression */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSE_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSE_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));\n    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);\n    BIT_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32  nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32 nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/******************************************\n*  Static allocation macros\n******************************************/\n/* Huff0 buffer bounds */\n#define HUF_CTABLEBOUND 129\n#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */\n#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* static allocation of Huff0's DTable */\n#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */\n#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }\n\n\n/******************************************\n*  Advanced functions\n******************************************/\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n/*\n    zstd - standard compression library\n    Header File\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Version\n***************************************/\n#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */\n#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */\n#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */\n#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */\n\n#if defined (__cplusplus)\n}\n#endif\n/*\n    zstd - standard compression library\n    Header File for static linking only\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* The objects defined into this file should be considered experimental.\n * They are not labelled stable, as their prototype may change in the future.\n * You can use them for tests, provide feedback, or if you can endure risk of future changes.\n */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Streaming functions\n***************************************/\n\ntypedef struct ZSTD_DCtx_s ZSTD_DCtx;\n\n/*\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTD_magicNumber 0xFD2FB523   /* v0.3 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n/****************************************************************\n*  Tuning parameters\n****************************************************************/\n/* MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSE_MAX_MEMORY_USAGE 14\n#define FSE_DEFAULT_MEMORY_USAGE 13\n\n/* FSE_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSE_MAX_SYMBOL_VALUE 255\n\n\n/****************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSE_FUNCTION_TYPE BYTE\n#define FSE_FUNCTION_EXTENSION\n\n\n/****************************************************************\n*  Byte symbol type\n****************************************************************/\n#endif   /* !FSE_COMMONDEFS_ONLY */\n\n\n/****************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/****************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n/****************************************************************\n*  Constants\n*****************************************************************/\n#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)\n#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)\n#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)\n#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)\n#define FSE_MIN_TABLELOG 5\n\n#define FSE_TABLELOG_ABSOLUTE_MAX 15\n#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX\n#error \"FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n\n/****************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/****************************************************************\n*  Complex types\n****************************************************************/\ntypedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];\n\n\n/****************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\n\n/* Function templates */\n\n#define FSE_DECODE_TYPE FSE_decode_t\n\nstatic U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }\n\nstatic size_t FSE_buildDTable\n(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* ptr = dt+1;\n    FSE_DTableHeader DTableH;\n    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;\n    const U32 tableSize = 1 << tableLog;\n    const U32 tableMask = tableSize-1;\n    const U32 step = FSE_tableStep(tableSize);\n    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];\n    U32 position = 0;\n    U32 highThreshold = tableSize-1;\n    const S16 largeLimit= (S16)(1 << (tableLog-1));\n    U32 noLarge = 1;\n    U32 s;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    DTableH.tableLog = (U16)tableLog;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        if (normalizedCounter[s]==-1)\n        {\n            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;\n            symbolNext[s] = 1;\n        }\n        else\n        {\n            if (normalizedCounter[s] >= largeLimit) noLarge=0;\n            symbolNext[s] = normalizedCounter[s];\n        }\n    }\n\n    /* Spread symbols */\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        int i;\n        for (i=0; i<normalizedCounter[s]; i++)\n        {\n            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;\n            position = (position + step) & tableMask;\n            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }\n    }\n\n    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n\n    /* Build Decoding table */\n    {\n        U32 i;\n        for (i=0; i<tableSize; i++)\n        {\n            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );\n            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);\n        }\n    }\n\n    DTableH.fastMode = (U16)noLarge;\n    memcpy(dt, &DTableH, sizeof(DTableH));\n    return 0;\n}\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n/******************************************\n*  FSE helper functions\n******************************************/\nstatic unsigned FSE_isError(size_t code) { return ERR_isError(code); }\n\n\n/****************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nstatic short FSE_abs(short a)\n{\n    return a<0 ? -a : a;\n}\n\nstatic size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr))\n    {\n        if (previous0)\n        {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF)\n            {\n                n0+=24;\n                if (ip < iend-5)\n                {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                }\n                else\n                {\n                    bitStream >>= 16;\n                    bitCount+=16;\n                }\n            }\n            while ((bitStream & 3) == 3)\n            {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n            {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {\n            const short max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max)\n            {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            }\n            else\n            {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSE_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold)\n            {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            {\n                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n                {\n                    ip += bitCount>>3;\n                    bitCount &= 7;\n                }\n                else\n                {\n                    bitCount -= (int)(8 * (iend - 4 - ip));\n                    ip = iend - 4;\n                }\n                bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n            }\n        }\n    }\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n\n\n/*********************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSE_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSE_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BIT_DStream_t bitD;\n    FSE_DState_t state1;\n    FSE_DState_t state2;\n    size_t errorCode;\n\n    /* Init */\n    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n    if (FSE_isError(errorCode)) return errorCode;\n\n    FSE_initDState(&state1, &bitD, dt);\n    FSE_initDState(&state2, &bitD, dt);\n\n#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)\n    {\n        op[0] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[1] = FSE_GETSYMBOL(&state2);\n\n        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[3] = FSE_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */\n    while (1)\n    {\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state1);\n\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state2);\n    }\n\n    /* end ? */\n    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))\n        return op-ostart;\n\n    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */\n\n    return ERROR(corruption_detected);\n}\n\n\nstatic size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));\n\n    /* select fast mode (static) */\n    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nstatic size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSE_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n    size_t errorCode;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSE decoding mode */\n    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSE_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);\n    if (FSE_isError(errorCode)) return errorCode;\n\n    /* always return, even if it is an error code */\n    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);\n}\n\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/****************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n/****************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n/****************************************************************\n*  Error Management\n****************************************************************/\n#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/******************************************\n*  Helper functions\n******************************************/\nstatic unsigned HUF_isError(size_t code) { return ERR_isError(code); }\n\n#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */\n#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */\n#define HUF_MAX_SYMBOL_VALUE 255\n#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)\n#  error \"HUF_MAX_TABLELOG is too large !\"\n#endif\n\n\n\n/*********************************************************\n*  Huff0 : Huffman block decompression\n*********************************************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\n/*! HUF_readStats\n    Read compact Huffman tree, saved by HUF_writeCTable\n    @huffWeight : destination buffer\n    @return : size read from `src`\n*/\nstatic size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                            U32* nbSymbolsPtr, U32* tableLogPtr,\n                            const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    U32 tableLog;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n    U32 n;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  /* special header */\n    {\n        if (iSize >= (242))   /* RLE */\n        {\n            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else   /* Incompressible */\n        {\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            for (n=0; n<oSize; n+=2)\n            {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n            }\n        }\n    }\n    else  /* header compressed with FSE (normal case) */\n    {\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSE_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));\n    weightTotal = 0;\n    for (n=0; n<oSize; n++)\n    {\n        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n        rankStats[huffWeight[n]]++;\n        weightTotal += (1 << huffWeight[n]) >> 1;\n    }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    tableLog = BIT_highbit32(weightTotal) + 1;\n    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n    {\n        U32 total = 1 << tableLog;\n        U32 rest = total - weightTotal;\n        U32 verif = 1 << BIT_highbit32(rest);\n        U32 lastWeight = BIT_highbit32(rest) + 1;\n        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n        huffWeight[oSize] = (BYTE)lastWeight;\n        rankStats[lastWeight]++;\n    }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    *tableLogPtr = tableLog;\n    return iSize+1;\n}\n\n\n/**************************/\n/* single-symbol decoding */\n/**************************/\n\nstatic size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize = ip[0];\n    U32 nbSymbols = 0;\n    U32 n;\n    U32 nextRankStart;\n    void* ptr = DTable+1;\n    HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr);\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<=tableLog; n++)\n    {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<nbSymbols; n++)\n    {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUF_DEltX2 D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize;\n}\n\nstatic BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)\n{\n        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n        const BYTE c = dt[val].byte;\n        BIT_skipBits(Dstream, dt[val].nbBits);\n        return c;\n}\n\n#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))\n    {\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\n\nstatic size_t HUF_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n\n        const void* ptr = DTable;\n        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/***************************/\n/* double-symbols decoding */\n/***************************/\n\nstatic void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUF_DEltX4 DElt;\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    U32 s;\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1)\n    {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */\n    {\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }\n}\n\ntypedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];\n\nstatic void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)\n    {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */\n        {\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        }\n        else\n        {\n            U32 i;\n            const U32 end = start + length;\n            HUF_DEltX4 DElt;\n\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits   = (BYTE)(nbBits);\n            DElt.length   = 1;\n            for (i = start; i < end; i++)\n                DTable[i] = DElt;\n        }\n        rankVal[weight] += length;\n    }\n}\n\nstatic size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    const U32 memLog = DTable[0];\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize = ip[0];\n    void* ptr = DTable;\n    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */\n    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--)\n        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */\n\n    /* Get start index of each weight */\n    {\n        U32 w, nextRankStart = 0;\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {\n        U32 s;\n        for (s=0; s<nbSymbols; s++)\n        {\n            U32 w = weightList[s];\n            U32 r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {\n        const U32 minBits = tableLog+1 - maxW;\n        U32 nextRankVal = 0;\n        U32 w, consumed;\n        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n        U32* rankVal0 = rankVal[0];\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankVal;\n            nextRankVal += rankStats[w] << (w+rescale);\n            rankVal0[w] = current;\n        }\n        for (consumed = minBits; consumed <= memLog - minBits; consumed++)\n        {\n            U32* rankValPtr = rankVal[consumed];\n            for (w = 1; w <= maxW; w++)\n            {\n                rankValPtr[w] = rankVal0[w] >> consumed;\n            }\n        }\n    }\n\n    HUF_fillDTableX4(dt, memLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    return iSize;\n}\n\n\nstatic U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BIT_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);\n    else\n    {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))\n        {\n            BIT_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n        }\n    }\n    return 1;\n}\n\n\n#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))\n    {\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\n\n\nstatic size_t HUF_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n\n        const void* ptr = DTable;\n        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/**********************************/\n/* Generic decompression selector */\n/**********************************/\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nstatic size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };\n    /* estimate decompression time */\n    U32 Q;\n    const U32 D256 = (U32)(dstSize >> 8);\n    U32 Dtime[3];\n    U32 algoNb = 0;\n    int n;\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    /* decoder timing evaluation */\n    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n    for (n=0; n<3; n++)\n        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);\n\n    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */\n\n    if (Dtime[1] < Dtime[0]) algoNb = 1;\n\n    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n\n    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */\n}\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n*  MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*/\n#define ZSTD_MEMORY_USAGE 17\n\n/*!\n * HEAPMODE :\n * Select how default compression functions will allocate memory for their hash table,\n * in memory stack (0, fastest), or in memory heap (1, requires malloc())\n * Note that compression context is fairly large, as a consequence heap memory is recommended.\n */\n#ifndef ZSTD_HEAPMODE\n#  define ZSTD_HEAPMODE 1\n#endif /* ZSTD_HEAPMODE */\n\n/*!\n*  LEGACY_SUPPORT :\n*  decompressor can decode older formats (starting from Zstd 0.1+)\n*/\n#ifndef ZSTD_LEGACY_SUPPORT\n#  define ZSTD_LEGACY_SUPPORT 1\n#endif\n\n\n/* *******************************************************\n*  Includes\n*********************************************************/\n#include <stdlib.h>      /* calloc */\n#include <string.h>      /* memcpy, memmove */\n#include <stdio.h>       /* debug : printf */\n\n\n/* *******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef __AVX2__\n#  include <immintrin.h>   /* AVX2 intrinsics */\n#endif\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#else\n#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n#endif\n\n\n/* *******************************************************\n*  Constants\n*********************************************************/\n#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)\n#define HASH_TABLESIZE (1 << HASH_LOG)\n#define HASH_MASK (HASH_TABLESIZE - 1)\n\n#define KNUTH 2654435761\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BLOCKSIZE (128 KB)                 /* define, for static allocation */\n#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)\n#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)\n#define IS_RAW BIT0\n#define IS_RLE BIT1\n\n#define WORKPLACESIZE (BLOCKSIZE*3)\n#define MINMATCH 4\n#define MLbits   7\n#define LLbits   6\n#define Offbits  5\n#define MaxML  ((1<<MLbits )-1)\n#define MaxLL  ((1<<LLbits )-1)\n#define MaxOff   31\n#define LitFSELog  11\n#define MLFSELog   10\n#define LLFSELog   10\n#define OffFSELog   9\n#define MAX(a,b) ((a)<(b)?(b):(a))\n#define MaxSeq MAX(MaxLL, MaxML)\n\n#define LITERAL_NOENTROPY 63\n#define COMMAND_NOENTROPY 7   /* to remove */\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\nstatic const size_t ZSTD_blockHeaderSize = 3;\nstatic const size_t ZSTD_frameHeaderSize = 4;\n\n\n/* *******************************************************\n*  Memory operations\n**********************************************************/\nstatic void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\nstatic void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */\nstatic void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do COPY8(op, ip) while (op < oend);\n}\n\n\n/* **************************************\n*  Local structures\n****************************************/\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* offCode;\n    BYTE* litStart;\n    BYTE* lit;\n    BYTE* litLengthStart;\n    BYTE* litLength;\n    BYTE* matchLengthStart;\n    BYTE* matchLength;\n    BYTE* dumpsStart;\n    BYTE* dumps;\n} seqStore_t;\n\n\n/* *************************************\n*  Error Management\n***************************************/\n/*! ZSTD_isError\n*   tells if a return value is an error code */\nstatic unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }\n\n\n\n/* *************************************************************\n*   Decompression section\n***************************************************************/\nstruct ZSTD_DCtx_s\n{\n    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];\n    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];\n    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];\n    void* previousDstEnd;\n    void* base;\n    size_t expected;\n    blockType_t bType;\n    U32 phase;\n    const BYTE* litPtr;\n    size_t litSize;\n    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];\n};   /* typedef'd to ZSTD_Dctx within \"zstd_static.h\" */\n\n\nstatic size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    BYTE headerFlags;\n    U32 cSize;\n\n    if (srcSize < 3) return ERROR(srcSize_wrong);\n\n    headerFlags = *in;\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n\n    bpPtr->blockType = (blockType_t)(headerFlags >> 6);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\nstatic size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/** ZSTD_decompressLiterals\n    @return : nb of bytes read from src, or an error code*/\nstatic size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,\n                                const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n\n    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n\n    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);\n    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);\n\n    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);\n\n    *maxDstSizePtr = litSize;\n    return litCSize + 5;\n}\n\n\n/** ZSTD_decodeLiteralsBlock\n    @return : nb of bytes read from src (< srcSize )*/\nstatic size_t ZSTD_decodeLiteralsBlock(void* ctx,\n                          const void* src, size_t srcSize)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;\n    const BYTE* const istart = (const BYTE* const)src;\n\n    /* any compressed block with literals segment must be at least this size */\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch(*istart & 3)\n    {\n    default:\n    case 0:\n        {\n            size_t litSize = BLOCKSIZE;\n            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, 8);\n            return readSize;   /* works if it's an error too */\n        }\n    case IS_RAW:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */\n            {\n                if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n                if (litSize > srcSize-3) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, 8);\n                return litSize+3;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+3;\n            dctx->litSize = litSize;\n            return litSize+3;\n        }\n    case IS_RLE:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[3], litSize + 8);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return 4;\n        }\n    }\n}\n\n\nstatic size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,\n                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,\n                         const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    const BYTE* const iend = istart + srcSize;\n    U32 LLtype, Offtype, MLtype;\n    U32 LLlog, Offlog, MLlog;\n    size_t dumpsLength;\n\n    /* check */\n    if (srcSize < 5) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    *nbSeq = MEM_readLE16(ip); ip+=2;\n    LLtype  = *ip >> 6;\n    Offtype = (*ip >> 4) & 3;\n    MLtype  = (*ip >> 2) & 3;\n    if (*ip & 2)\n    {\n        dumpsLength  = ip[2];\n        dumpsLength += ip[1] << 8;\n        ip += 3;\n    }\n    else\n    {\n        dumpsLength  = ip[1];\n        dumpsLength += (ip[0] & 1) << 8;\n        ip += 2;\n    }\n    *dumpsPtr = ip;\n    ip += dumpsLength;\n    *dumpsLengthPtr = dumpsLength;\n\n    /* check */\n    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n\n    /* sequences */\n    {\n        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */\n        size_t headerSize;\n\n        /* Build DTables */\n        switch(LLtype)\n        {\n        case bt_rle :\n            LLlog = 0;\n            FSE_buildDTable_rle(DTableLL, *ip++); break;\n        case bt_raw :\n            LLlog = LLbits;\n            FSE_buildDTable_raw(DTableLL, LLbits); break;\n        default :\n            {   U32 max = MaxLL;\n                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (LLlog > LLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableLL, norm, max, LLlog);\n        }   }\n\n        switch(Offtype)\n        {\n        case bt_rle :\n            Offlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */\n            break;\n        case bt_raw :\n            Offlog = Offbits;\n            FSE_buildDTable_raw(DTableOffb, Offbits); break;\n        default :\n            {   U32 max = MaxOff;\n                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (Offlog > OffFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableOffb, norm, max, Offlog);\n        }   }\n\n        switch(MLtype)\n        {\n        case bt_rle :\n            MLlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableML, *ip++); break;\n        case bt_raw :\n            MLlog = MLbits;\n            FSE_buildDTable_raw(DTableML, MLbits); break;\n        default :\n            {   U32 max = MaxML;\n                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (MLlog > MLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableML, norm, max, MLlog);\n    }   }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t offset;\n    size_t matchLength;\n} seq_t;\n\ntypedef struct {\n    BIT_DStream_t DStream;\n    FSE_DState_t stateLL;\n    FSE_DState_t stateOffb;\n    FSE_DState_t stateML;\n    size_t prevOffset;\n    const BYTE* dumps;\n    const BYTE* dumpsEnd;\n} seqState_t;\n\n\nstatic void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    size_t litLength;\n    size_t prevOffset;\n    size_t offset;\n    size_t matchLength;\n    const BYTE* dumps = seqState->dumps;\n    const BYTE* const de = seqState->dumpsEnd;\n\n    /* Literal length */\n    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));\n    prevOffset = litLength ? seq->offset : seqState->prevOffset;\n    seqState->prevOffset = seq->offset;\n    if (litLength == MaxLL)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) litLength += add;\n        else if (dumps + 3 <= de)\n        {\n            litLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n\n    /* Offset */\n    {\n        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */\n                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,\n                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,\n                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };\n        U32 offsetCode, nbBits;\n        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        nbBits = offsetCode - 1;\n        if (offsetCode==0) nbBits = 0;   /* cmove */\n        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        if (offsetCode==0) offset = prevOffset;   /* cmove */\n    }\n\n    /* MatchLength */\n    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));\n    if (matchLength == MaxML)\n    {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) matchLength += add;\n        else if (dumps + 3 <= de)\n        {\n            matchLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n    matchLength += MINMATCH;\n\n    /* save result */\n    seq->litLength = litLength;\n    seq->offset = offset;\n    seq->matchLength = matchLength;\n    seqState->dumps = dumps;\n}\n\n\nstatic size_t ZSTD_execSequence(BYTE* op,\n                                seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                BYTE* const base, BYTE* const oend)\n{\n    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */\n    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */\n    const BYTE* const ostart = op;\n    BYTE* const oLitEnd = op + sequence.litLength;\n    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_8 = oend-8;\n    const BYTE* const litEnd = *litPtr + sequence.litLength;\n\n    /* checks */\n    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */\n    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */\n\n    /* copy Literals */\n    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = litEnd;   /* update for next sequence */\n\n    /* copy Match */\n    {\n        const BYTE* match = op - sequence.offset;\n\n        /* check */\n        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */\n        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */\n        if (match < base) return ERROR(corruption_detected);\n\n        /* close range match, overlap */\n        if (sequence.offset < 8)\n        {\n            const int dec64 = dec64table[sequence.offset];\n            op[0] = match[0];\n            op[1] = match[1];\n            op[2] = match[2];\n            op[3] = match[3];\n            match += dec32table[sequence.offset];\n            ZSTD_copy4(op+4, match);\n            match -= dec64;\n        }\n        else\n        {\n            ZSTD_copy8(op, match);\n        }\n        op += 8; match += 8;\n\n        if (oMatchEnd > oend-(16-MINMATCH))\n        {\n            if (op < oend_8)\n            {\n                ZSTD_wildcopy(op, match, oend_8 - op);\n                match += oend_8 - op;\n                op = oend_8;\n            }\n            while (op < oMatchEnd) *op++ = *match++;\n        }\n        else\n        {\n            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n        }\n    }\n\n    return oMatchEnd - ostart;\n}\n\nstatic size_t ZSTD_decompressSequences(\n                               void* ctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t errorCode, dumpsLength;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    int nbSeq;\n    const BYTE* dumps;\n    U32* DTableLL = dctx->LLTable;\n    U32* DTableML = dctx->MLTable;\n    U32* DTableOffb = dctx->OffTable;\n    BYTE* const base = (BYTE*) (dctx->base);\n\n    /* Build Decoding Tables */\n    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,\n                                      DTableLL, DTableML, DTableOffb,\n                                      ip, iend-ip);\n    if (ZSTD_isError(errorCode)) return errorCode;\n    ip += errorCode;\n\n    /* Regen sequences */\n    {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        seqState.dumps = dumps;\n        seqState.dumpsEnd = dumps + dumpsLength;\n        seqState.prevOffset = sequence.offset = 4;\n        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);\n        if (ERR_isError(errorCode)) return ERROR(corruption_detected);\n        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )\n        {\n            size_t oneSeqSize;\n            nbSeq--;\n            ZSTD_decodeSequence(&sequence, &seqState);\n            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);\n            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* check if reached exact end */\n        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */\n        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */\n\n        /* last literal segment */\n        {\n            size_t lastLLSize = litEnd - litPtr;\n            if (litPtr > litEnd) return ERROR(corruption_detected);\n            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n            if (op != litPtr) memmove(op, litPtr, lastLLSize);\n            op += lastLLSize;\n        }\n    }\n\n    return op-ostart;\n}\n\n\nstatic size_t ZSTD_decompressBlock(\n                            void* ctx,\n                            void* dst, size_t maxDstSize,\n                      const void* src, size_t srcSize)\n{\n    /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n\n    /* Decode literals sub-block */\n    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);\n    if (ZSTD_isError(litCSize)) return litCSize;\n    ip += litCSize;\n    srcSize -= litCSize;\n\n    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);\n}\n\n\nstatic size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t remainingSize = srcSize;\n    U32 magicNumber;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t decodedSize=0;\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        if (ZSTD_isError(decodedSize)) return decodedSize;\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\nstatic size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    ZSTD_DCtx ctx;\n    ctx.base = dst;\n    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nMEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    U32 magicNumber;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_magicNumber) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n        return;\n    }\n    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * BLOCKSIZE;\n}\n\n\n/*******************************\n*  Streaming Decompression API\n*******************************/\n\nstatic size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)\n{\n    dctx->expected = ZSTD_frameHeaderSize;\n    dctx->phase = 0;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    return 0;\n}\n\nstatic ZSTD_DCtx* ZSTD_createDCtx(void)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));\n    if (dctx==NULL) return NULL;\n    ZSTD_resetDCtx(dctx);\n    return dctx;\n}\n\nstatic size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)\n{\n    free(dctx);\n    return 0;\n}\n\nstatic size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nstatic size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);\n    if (dst != ctx->previousDstEnd)  /* not contiguous */\n        ctx->base = dst;\n\n    /* Decompress : frame header */\n    if (ctx->phase == 0)\n    {\n        /* Check frame magic header */\n        U32 magicNumber = MEM_readLE32(src);\n        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        return 0;\n    }\n\n    /* Decompress : block header */\n    if (ctx->phase == 1)\n    {\n        blockProperties_t bp;\n        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);\n        if (ZSTD_isError(blockSize)) return blockSize;\n        if (bp.blockType == bt_end)\n        {\n            ctx->expected = 0;\n            ctx->phase = 0;\n        }\n        else\n        {\n            ctx->expected = blockSize;\n            ctx->bType = bp.blockType;\n            ctx->phase = 2;\n        }\n\n        return 0;\n    }\n\n    /* Decompress : block content */\n    {\n        size_t rSize;\n        switch(ctx->bType)\n        {\n        case bt_compressed:\n            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);\n            break;\n        case bt_raw :\n            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet handled */\n            break;\n        case bt_end :   /* should never happen (filtered at phase 1) */\n            rSize = 0;\n            break;\n        default:\n            return ERROR(GENERIC);\n        }\n        ctx->phase = 1;\n        ctx->expected = ZSTD_blockHeaderSize;\n        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);\n        return rSize;\n    }\n\n}\n\n\n/* wrapper layer */\n\nunsigned ZSTDv03_isError(size_t code)\n{\n    return ZSTD_isError(code);\n}\n\nsize_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize)\n{\n    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);\n}\n\nZSTDv03_Dctx* ZSTDv03_createDCtx(void)\n{\n    return (ZSTDv03_Dctx*)ZSTD_createDCtx();\n}\n\nsize_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)\n{\n    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)\n{\n    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)\n{\n    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);\n}\n\nsize_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);\n}\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v03.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_V03_H_298734209782\n#define ZSTD_V03_H_298734209782\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Simple one-step function\n***************************************/\n/**\nZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format\n    compressedSize : is the exact source size\n    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.\n                      It must be equal or larger than originalSize, otherwise decompression will fail.\n    return : the number of bytes decompressed into destination buffer (originalSize)\n             or an errorCode if it fails (which can be tested using ZSTDv01_isError())\n*/\nsize_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize);\n\n /**\n ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format\n     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n     cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                 or an error code if it fails (which can be tested using ZSTDv01_isError())\n     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n */\n void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                      size_t* cSize, unsigned long long* dBound);\n\n    /**\nZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error\n*/\nunsigned ZSTDv03_isError(size_t code);\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;\nZSTDv03_Dctx* ZSTDv03_createDCtx(void);\nsize_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);\n\nsize_t ZSTDv03_decompressDCtx(void* ctx,\n                              void* dst, size_t maxOriginalSize,\n                        const void* src, size_t compressedSize);\n\n/* *************************************\n*  Streaming functions\n***************************************/\nsize_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);\n\nsize_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);\nsize_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);\n/**\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTDv03_magicNumber 0xFD2FB523   /* v0.3 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_V03_H_298734209782 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v04.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n /******************************************\n *  Includes\n ******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include <string.h>    /* memcpy */\n\n#include \"zstd_v04.h\"\n#include \"error_private.h\"\n\n\n/* ******************************************************************\n *   mem.h\n *******************************************************************/\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/******************************************\n*  Compiler-specific\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/****************************************************************\n*  Basic Types\n*****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/*-*************************************\n*  Debug\n***************************************/\n#include \"debug.h\"\n#ifndef assert\n#  define assert(condition) ((void)0)\n#endif\n\n\n/****************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets generating assembly depending on alignment.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif // MEM_FORCE_MEMORY_ACCESS\n\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian())\n    {\n        MEM_write16(memPtr, val);\n    }\n    else\n    {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE24(const void* memPtr)\n{\n    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));\n    }\n}\n\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n    {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)\n                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));\n    }\n}\n\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n\n/*\n    zstd - standard compression library\n    Header File for static linking only\n*/\n#ifndef ZSTD_STATIC_H\n#define ZSTD_STATIC_H\n\n\n/* *************************************\n*  Types\n***************************************/\n#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11\n\n/** from faster to stronger */\ntypedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;\n\ntypedef struct\n{\n    U64 srcSize;       /* optional : tells how much bytes are present in the frame. Use 0 if not known. */\n    U32 windowLog;     /* largest match distance : larger == more compression, more memory needed during decompression */\n    U32 contentLog;    /* full search segment : larger == more compression, slower, more memory (useless for fast) */\n    U32 hashLog;       /* dispatch table : larger == more memory, faster */\n    U32 searchLog;     /* nb of searches : larger == more compression, slower */\n    U32 searchLength;  /* size of matches : larger == faster decompression, sometimes less compression */\n    ZSTD_strategy strategy;\n} ZSTD_parameters;\n\ntypedef ZSTDv04_Dctx ZSTD_DCtx;\n\n/* *************************************\n*  Advanced functions\n***************************************/\n/** ZSTD_decompress_usingDict\n*   Same as ZSTD_decompressDCtx, using a Dictionary content as prefix\n*   Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */\nstatic size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,\n                                             void* dst, size_t maxDstSize,\n                                       const void* src, size_t srcSize,\n                                       const void* dict,size_t dictSize);\n\n\n/* **************************************\n*  Streaming functions (direct mode)\n****************************************/\nstatic size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);\nstatic size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);\nstatic void   ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);\n\nstatic size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);\nstatic size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);\n\n/**\n  Streaming decompression, bufferless mode\n\n  A ZSTD_DCtx object is required to track streaming operations.\n  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\n  A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.\n\n  First operation is to retrieve frame parameters, using ZSTD_getFrameParams().\n  This function doesn't consume its input. It needs enough input data to properly decode the frame header.\n  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.\n  Result : 0 when successful, it means the ZSTD_parameters structure has been filled.\n           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.\n           errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)\n\n  Then, you can optionally insert a dictionary.\n  This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.\n\n  Then it's possible to start decompression.\n  Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.\n  ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).\n  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.\n\n  @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n\n  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\n  Context can then be reset to start a new decompression.\n*/\n\n\n\n\n#endif  /* ZSTD_STATIC_H */\n\n\n/*\n    zstd_internal - common functions to include\n    Header File for include\n*/\n#ifndef ZSTD_CCOMMON_H_MODULE\n#define ZSTD_CCOMMON_H_MODULE\n\n/* *************************************\n*  Common macros\n***************************************/\n#define MIN(a,b) ((a)<(b) ? (a) : (b))\n#define MAX(a,b) ((a)>(b) ? (a) : (b))\n\n\n/* *************************************\n*  Common constants\n***************************************/\n#define ZSTD_MAGICNUMBER 0xFD2FB524   /* v0.4 */\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BLOCKSIZE (128 KB)                 /* define, for static allocation */\n\nstatic const size_t ZSTD_blockHeaderSize = 3;\nstatic const size_t ZSTD_frameHeaderSize_min = 5;\n#define ZSTD_frameHeaderSize_max 5         /* define, for static allocation */\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define IS_RAW BIT0\n#define IS_RLE BIT1\n\n#define MINMATCH 4\n#define REPCODE_STARTVALUE 4\n\n#define MLbits   7\n#define LLbits   6\n#define Offbits  5\n#define MaxML  ((1<<MLbits) - 1)\n#define MaxLL  ((1<<LLbits) - 1)\n#define MaxOff ((1<<Offbits)- 1)\n#define MLFSELog   10\n#define LLFSELog   10\n#define OffFSELog   9\n#define MaxSeq MAX(MaxLL, MaxML)\n\n#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)\n#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\n\n/* ******************************************\n*  Shared functions to include for inlining\n********************************************/\nstatic void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */\nstatic void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do\n        COPY8(op, ip)\n    while (op < oend);\n}\n\n\n\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   header file\n****************************************************************** */\n#ifndef FSE_H\n#define FSE_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* *****************************************\n*  Includes\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n\n\n/* *****************************************\n*  FSE simple functions\n******************************************/\nstatic size_t FSE_decompress(void* dst,  size_t maxDstSize,\n                const void* cSrc, size_t cSrcSize);\n/*!\nFSE_decompress():\n    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'maxDstSize'.\n    return : size of regenerated data (<= maxDstSize)\n             or an error code, which can be tested using FSE_isError()\n\n    ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!\n    Why ? : making this distinction requires a header.\n    Header management is intentionally delegated to the user layer, which can better manage special cases.\n*/\n\n\n/* *****************************************\n*  Tool functions\n******************************************/\n/* Error Management */\nstatic unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */\n\n\n\n/* *****************************************\n*  FSE detailed API\n******************************************/\n/*!\nFSE_compress() does the following:\n1. count symbol occurrence from source[] into table count[]\n2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)\n3. save normalized counters to memory buffer using writeNCount()\n4. build encoding table 'CTable' from normalized counters\n5. encode the data stream using encoding table 'CTable'\n\nFSE_decompress() does the following:\n1. read normalized counters with readNCount()\n2. build decoding table 'DTable' from normalized counters\n3. decode the data stream using decoding table 'DTable'\n\nThe following API allows targeting specific sub-functions for advanced tasks.\nFor example, it's possible to compress several blocks using the same 'CTable',\nor to save and provide normalized distribution using external method.\n*/\n\n\n/* *** DECOMPRESSION *** */\n\n/*!\nFSE_readNCount():\n   Read compactly saved 'normalizedCounter' from 'rBuffer'.\n   return : size read from 'rBuffer'\n            or an errorCode, which can be tested using FSE_isError()\n            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */\nstatic  size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);\n\n/*!\nConstructor and Destructor of type FSE_DTable\n    Note that its size depends on 'tableLog' */\ntypedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\n\n/*!\nFSE_buildDTable():\n   Builds 'dt', which must be already allocated, using FSE_createDTable()\n   return : 0,\n            or an errorCode, which can be tested using FSE_isError() */\nstatic size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*!\nFSE_decompress_usingDTable():\n   Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'\n   into 'dst' which must be already allocated.\n   return : size of regenerated data (necessarily <= maxDstSize)\n            or an errorCode, which can be tested using FSE_isError() */\nstatic  size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);\n\n/*!\nTutorial :\n----------\n(Note : these functions only decompress FSE-compressed blocks.\n If block is uncompressed, use memcpy() instead\n If block is a single repeated byte, use memset() instead )\n\nThe first step is to obtain the normalized frequencies of symbols.\nThis can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().\n'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.\nIn practice, that means it's necessary to know 'maxSymbolValue' beforehand,\nor size the table to handle worst case situations (typically 256).\nFSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.\nThe result of FSE_readNCount() is the number of bytes read from 'rBuffer'.\nNote that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.\nIf there is an error, the function will return an error code, which can be tested using FSE_isError().\n\nThe next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.\nThis is performed by the function FSE_buildDTable().\nThe space required by 'FSE_DTable' must be already allocated using FSE_createDTable().\nIf there is an error, the function will return an error code, which can be tested using FSE_isError().\n\n'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().\n'cSrcSize' must be strictly correct, otherwise decompression will fail.\nFSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).\nIf there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)\n*/\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSE_H */\n\n\n/* ******************************************************************\n   bitstream\n   Part of NewGen Entropy library\n   header file (to include)\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which highly benefit from being inlined.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n/**********************************************\n*  bitStream decompression API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BIT_DStream_t;\n\ntypedef enum { BIT_DStream_unfinished = 0,\n               BIT_DStream_endOfBuffer = 1,\n               BIT_DStream_completed = 2,\n               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);\n\n\n\n\n/******************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/****************************************************************\n*  Helper functions\n****************************************************************/\nMEM_STATIC unsigned BIT_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n/**********************************************************\n* bitStream decoding\n**********************************************************/\n\n/*!BIT_initDStream\n*  Initialize a BIT_DStream_t.\n*  @bitD : a pointer to an already allocated BIT_DStream_t structure\n*  @srcBuffer must point at the beginning of a bitStream\n*  @srcSize must be the exact size of the bitStream\n*  @result : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(size_t))   /* normal case */\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n    }\n    else\n    {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */\n            default: break;\n        }\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);\n        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\nMEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BIT_lookBitsFast :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBits(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*!BIT_readBitsFast :\n*  unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)\n{\n    size_t value = BIT_lookBitsFast(bitD, nbBits);\n    BIT_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return BIT_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))\n    {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BIT_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start)\n    {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;\n        return BIT_DStream_completed;\n    }\n    {\n        U32 nbBytes = bitD->bitsConsumed >> 3;\n        BIT_DStream_status result = BIT_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start)\n        {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BIT_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BIT_endOfDStream\n*   @return Tells if DStream has reached its exact end\n*/\nMEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n\n\n\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef FSE_STATIC_H\n#define FSE_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* *****************************************\n*  Static allocation\n*******************************************/\n/* FSE buffer bounds */\n#define FSE_NCOUNTBOUND 512\n#define FSE_BLOCKBOUND(size) (size + (size>>7))\n#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */\n#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))\n#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/* *****************************************\n*  FSE advanced API\n*******************************************/\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);\n/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);\n/* build a fake FSE_DTable, designed to always generate the same symbolValue */\n\n\n\n/* *****************************************\n*  FSE symbol decompression API\n*******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSE_DState_t;\n\n\nstatic void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);\n\nstatic unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n\nstatic unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);\n\n\n/* *****************************************\n*  FSE unsafe API\n*******************************************/\nstatic unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/* *****************************************\n*  Implementation of inlined functions\n*******************************************/\n/* decompression */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSE_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSE_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    memcpy(&DTableH, dt, sizeof(DTableH));\n    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);\n    BIT_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32  nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)\n{\n    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32 nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BIT_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSE_STATIC_H */\n\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#ifndef FSE_COMMONDEFS_ONLY\n\n/* **************************************************************\n*  Tuning parameters\n****************************************************************/\n/*!MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSE_MAX_MEMORY_USAGE 14\n#define FSE_DEFAULT_MEMORY_USAGE 13\n\n/*!FSE_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSE_MAX_SYMBOL_VALUE 255\n\n\n/* **************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSE_FUNCTION_TYPE BYTE\n#define FSE_FUNCTION_EXTENSION\n#define FSE_DECODE_TYPE FSE_decode_t\n\n\n#endif   /* !FSE_COMMONDEFS_ONLY */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/* **************************************************************\n*  Dependencies\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n\n/* ***************************************************************\n*  Constants\n*****************************************************************/\n#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)\n#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)\n#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)\n#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)\n#define FSE_MIN_TABLELOG 5\n\n#define FSE_TABLELOG_ABSOLUTE_MAX 15\n#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX\n#error \"FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/* **************************************************************\n*  Complex types\n****************************************************************/\ntypedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];\n\n\n/*-**************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSE_FUNCTION_EXTENSION\n#  error \"FSE_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSE_FUNCTION_TYPE\n#  error \"FSE_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSE_CAT(X,Y) X##Y\n#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)\n#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)\n\nstatic U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }\n\n\nstatic size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    FSE_DTableHeader DTableH;\n    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */\n    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);\n    const U32 tableSize = 1 << tableLog;\n    const U32 tableMask = tableSize-1;\n    const U32 step = FSE_tableStep(tableSize);\n    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];\n    U32 position = 0;\n    U32 highThreshold = tableSize-1;\n    const S16 largeLimit= (S16)(1 << (tableLog-1));\n    U32 noLarge = 1;\n    U32 s;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */\n    DTableH.tableLog = (U16)tableLog;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        if (normalizedCounter[s]==-1)\n        {\n            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;\n            symbolNext[s] = 1;\n        }\n        else\n        {\n            if (normalizedCounter[s] >= largeLimit) noLarge=0;\n            symbolNext[s] = normalizedCounter[s];\n        }\n    }\n\n    /* Spread symbols */\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        int i;\n        for (i=0; i<normalizedCounter[s]; i++)\n        {\n            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;\n            position = (position + step) & tableMask;\n            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }\n    }\n\n    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n\n    /* Build Decoding table */\n    {\n        U32 i;\n        for (i=0; i<tableSize; i++)\n        {\n            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );\n            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);\n        }\n    }\n\n    DTableH.fastMode = (U16)noLarge;\n    memcpy(dt, &DTableH, sizeof(DTableH));\n    return 0;\n}\n\n\n#ifndef FSE_COMMONDEFS_ONLY\n/******************************************\n*  FSE helper functions\n******************************************/\nstatic unsigned FSE_isError(size_t code) { return ERR_isError(code); }\n\n\n/****************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nstatic short FSE_abs(short a)\n{\n    return a<0 ? -a : a;\n}\n\nstatic size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr))\n    {\n        if (previous0)\n        {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF)\n            {\n                n0+=24;\n                if (ip < iend-5)\n                {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                }\n                else\n                {\n                    bitStream >>= 16;\n                    bitCount+=16;\n                }\n            }\n            while ((bitStream & 3) == 3)\n            {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n            {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {\n            const short max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max)\n            {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            }\n            else\n            {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSE_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold)\n            {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            {\n                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))\n                {\n                    ip += bitCount>>3;\n                    bitCount &= 7;\n                }\n                else\n                {\n                    bitCount -= (int)(8 * (iend - 4 - ip));\n                    ip = iend - 4;\n                }\n                bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n            }\n        }\n    }\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n\n\n/*********************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nstatic size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nstatic size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<=maxSymbolValue; s++)\n    {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSE_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSE_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BIT_DStream_t bitD;\n    FSE_DState_t state1;\n    FSE_DState_t state2;\n    size_t errorCode;\n\n    /* Init */\n    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n    if (FSE_isError(errorCode)) return errorCode;\n\n    FSE_initDState(&state1, &bitD, dt);\n    FSE_initDState(&state2, &bitD, dt);\n\n#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)\n    {\n        op[0] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[1] = FSE_GETSYMBOL(&state2);\n\n        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSE_GETSYMBOL(&state1);\n\n        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BIT_reloadDStream(&bitD);\n\n        op[3] = FSE_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */\n    while (1)\n    {\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state1);\n\n        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )\n            break;\n\n        *op++ = FSE_GETSYMBOL(&state2);\n    }\n\n    /* end ? */\n    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))\n        return op-ostart;\n\n    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */\n\n    return ERROR(corruption_detected);\n}\n\n\nstatic size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSE_DTable* dt)\n{\n    FSE_DTableHeader DTableH;\n    U32 fastMode;\n\n    memcpy(&DTableH, dt, sizeof(DTableH));\n    fastMode = DTableH.fastMode;\n\n    /* select fast mode (static) */\n    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nstatic size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSE_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;\n    size_t errorCode;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSE decoding mode */\n    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSE_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);\n    if (FSE_isError(errorCode)) return errorCode;\n\n    /* always return, even if it is an error code */\n    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);\n}\n\n\n\n#endif   /* FSE_COMMONDEFS_ONLY */\n\n\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   header file\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef HUFF0_H\n#define HUFF0_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* ****************************************\n*  Dependency\n******************************************/\n#include <stddef.h>    /* size_t */\n\n\n/* ****************************************\n*  Huff0 simple functions\n******************************************/\nstatic size_t HUF_decompress(void* dst,  size_t dstSize,\n                const void* cSrc, size_t cSrcSize);\n/*!\nHUF_decompress():\n    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstSize'.\n    'dstSize' must be the exact size of original (uncompressed) data.\n    Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.\n    @return : size of regenerated data (== dstSize)\n              or an error code, which can be tested using HUF_isError()\n*/\n\n\n/* ****************************************\n*  Tool functions\n******************************************/\n/* Error Management */\nstatic unsigned    HUF_isError(size_t code);        /* tells if a return value is an error code */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* HUFF0_H */\n\n\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef HUFF0_STATIC_H\n#define HUFF0_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/* ****************************************\n*  Static allocation macros\n******************************************/\n/* static allocation of Huff0's DTable */\n#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */\n#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \\\n        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }\n\n\n/* ****************************************\n*  Advanced decompression functions\n******************************************/\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */\n\n\n/* ****************************************\n*  Huff0 detailed API\n******************************************/\n/*!\nHUF_decompress() does the following:\n1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics\n2. build Huffman table from save, using HUF_readDTableXn()\n3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable\n\n*/\nstatic size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);\nstatic size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);\n\nstatic size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);\nstatic size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* HUFF0_STATIC_H */\n\n\n\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n\n/* **************************************************************\n*  Constants\n****************************************************************/\n#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */\n#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */\n#define HUF_MAX_SYMBOL_VALUE 255\n#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)\n#  error \"HUF_MAX_TABLELOG is too large !\"\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\nstatic unsigned HUF_isError(size_t code) { return ERR_isError(code); }\n#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n\n/*-*******************************************************\n*  Huff0 : Huffman block decompression\n*********************************************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\n/*! HUF_readStats\n    Read compact Huffman tree, saved by HUF_writeCTable\n    @huffWeight : destination buffer\n    @return : size read from `src`\n*/\nstatic size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                            U32* nbSymbolsPtr, U32* tableLogPtr,\n                            const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    U32 tableLog;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n    U32 n;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  /* special header */\n    {\n        if (iSize >= (242))   /* RLE */\n        {\n            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else   /* Incompressible */\n        {\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            for (n=0; n<oSize; n+=2)\n            {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n            }\n        }\n    }\n    else  /* header compressed with FSE (normal case) */\n    {\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSE_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));\n    weightTotal = 0;\n    for (n=0; n<oSize; n++)\n    {\n        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n        rankStats[huffWeight[n]]++;\n        weightTotal += (1 << huffWeight[n]) >> 1;\n    }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    tableLog = BIT_highbit32(weightTotal) + 1;\n    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n    {\n        U32 total = 1 << tableLog;\n        U32 rest = total - weightTotal;\n        U32 verif = 1 << BIT_highbit32(rest);\n        U32 lastWeight = BIT_highbit32(rest) + 1;\n        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n        huffWeight[oSize] = (BYTE)lastWeight;\n        rankStats[lastWeight]++;\n    }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    *tableLogPtr = tableLog;\n    return iSize+1;\n}\n\n\n/**************************/\n/* single-symbol decoding */\n/**************************/\n\nstatic size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    size_t iSize;\n    U32 nbSymbols = 0;\n    U32 n;\n    U32 nextRankStart;\n    void* const dtPtr = DTable + 1;\n    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<=tableLog; n++)\n    {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<nbSymbols; n++)\n    {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUF_DEltX2 D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize;\n}\n\nstatic BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)\n{\n        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n        const BYTE c = dt[val].byte;\n        BIT_skipBits(Dstream, dt[val].nbBits);\n        return c;\n}\n\n#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))\n    {\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\n\nstatic size_t HUF_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/***************************/\n/* double-symbols decoding */\n/***************************/\n\nstatic void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUF_DEltX4 DElt;\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    U32 s;\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1)\n    {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */\n    {\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }\n}\n\ntypedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];\n\nstatic void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++)\n    {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */\n        {\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        }\n        else\n        {\n            U32 i;\n            const U32 end = start + length;\n            HUF_DEltX4 DElt;\n\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits   = (BYTE)(nbBits);\n            DElt.length   = 1;\n            for (i = start; i < end; i++)\n                DTable[i] = DElt;\n        }\n        rankVal[weight] += length;\n    }\n}\n\nstatic size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    const U32 memLog = DTable[0];\n    size_t iSize;\n    void* dtPtr = DTable;\n    HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;\n\n    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */\n    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUF_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--)\n        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */\n\n    /* Get start index of each weight */\n    {\n        U32 w, nextRankStart = 0;\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {\n        U32 s;\n        for (s=0; s<nbSymbols; s++)\n        {\n            U32 w = weightList[s];\n            U32 r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {\n        const U32 minBits = tableLog+1 - maxW;\n        U32 nextRankVal = 0;\n        U32 w, consumed;\n        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n        U32* rankVal0 = rankVal[0];\n        for (w=1; w<=maxW; w++)\n        {\n            U32 current = nextRankVal;\n            nextRankVal += rankStats[w] << (w+rescale);\n            rankVal0[w] = current;\n        }\n        for (consumed = minBits; consumed <= memLog - minBits; consumed++)\n        {\n            U32* rankValPtr = rankVal[consumed];\n            for (w = 1; w <= maxW; w++)\n            {\n                rankValPtr[w] = rankVal0[w] >> consumed;\n            }\n        }\n    }\n\n    HUF_fillDTableX4(dt, memLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    return iSize;\n}\n\n\nstatic U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BIT_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);\n    else\n    {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))\n        {\n            BIT_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n        }\n    }\n    return 1;\n}\n\n\n#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))\n    {\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\nstatic size_t HUF_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BIT_DStream_t bitD1;\n        BIT_DStream_t bitD2;\n        BIT_DStream_t bitD3;\n        BIT_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BIT_initDStream(&bitD1, istart1, length1);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD2, istart2, length2);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD3, istart3, length3);\n        if (HUF_isError(errorCode)) return errorCode;\n        errorCode = BIT_initDStream(&bitD4, istart4, length4);\n        if (HUF_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )\n        {\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nstatic size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUF_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/**********************************/\n/* Generic decompression selector */\n/**********************************/\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nstatic size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };\n    /* estimate decompression time */\n    U32 Q;\n    const U32 D256 = (U32)(dstSize >> 8);\n    U32 Dtime[3];\n    U32 algoNb = 0;\n    int n;\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    /* decoder timing evaluation */\n    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n    for (n=0; n<3; n++)\n        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);\n\n    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */\n\n    if (Dtime[1] < Dtime[0]) algoNb = 1;\n\n    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n\n    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */\n}\n\n\n\n#endif   /* ZSTD_CCOMMON_H_MODULE */\n\n\n/*\n    zstd - decompression module fo v0.4 legacy format\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n * HEAPMODE :\n * Select how default decompression function ZSTD_decompress() will allocate memory,\n * in memory stack (0), or in memory heap (1, requires malloc())\n */\n#ifndef ZSTD_HEAPMODE\n#  define ZSTD_HEAPMODE 1\n#endif\n\n\n/* *******************************************************\n*  Includes\n*********************************************************/\n#include <stdlib.h>      /* calloc */\n#include <string.h>      /* memcpy, memmove */\n#include <stdio.h>       /* debug : printf */\n\n\n/* *******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n\n/* *************************************\n*  Local types\n***************************************/\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\n\n/* *******************************************************\n*  Memory operations\n**********************************************************/\nstatic void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\n\n/* *************************************\n*  Error Management\n***************************************/\n\n/*! ZSTD_isError\n*   tells if a return value is an error code */\nstatic unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }\n\n\n/* *************************************************************\n*   Context management\n***************************************************************/\ntypedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,\n               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;\n\nstruct ZSTDv04_Dctx_s\n{\n    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];\n    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];\n    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];\n    const void* previousDstEnd;\n    const void* base;\n    const void* vBase;\n    const void* dictEnd;\n    size_t expected;\n    size_t headerSize;\n    ZSTD_parameters params;\n    blockType_t bType;\n    ZSTD_dStage stage;\n    const BYTE* litPtr;\n    size_t litSize;\n    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];\n    BYTE headerBuffer[ZSTD_frameHeaderSize_max];\n};  /* typedef'd to ZSTD_DCtx within \"zstd_static.h\" */\n\nstatic size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)\n{\n    dctx->expected = ZSTD_frameHeaderSize_min;\n    dctx->stage = ZSTDds_getFrameHeaderSize;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    dctx->vBase = NULL;\n    dctx->dictEnd = NULL;\n    return 0;\n}\n\nstatic ZSTD_DCtx* ZSTD_createDCtx(void)\n{\n    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));\n    if (dctx==NULL) return NULL;\n    ZSTD_resetDCtx(dctx);\n    return dctx;\n}\n\nstatic size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)\n{\n    free(dctx);\n    return 0;\n}\n\n\n/* *************************************************************\n*   Decompression section\n***************************************************************/\n/** ZSTD_decodeFrameHeader_Part1\n*   decode the 1st part of the Frame Header, which tells Frame Header size.\n*   srcSize must be == ZSTD_frameHeaderSize_min\n*   @return : the full size of the Frame Header */\nstatic size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)\n{\n    U32 magicNumber;\n    if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);\n    zc->headerSize = ZSTD_frameHeaderSize_min;\n    return zc->headerSize;\n}\n\n\nstatic size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)\n{\n    U32 magicNumber;\n    if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);\n    memset(params, 0, sizeof(*params));\n    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;\n    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */\n    return 0;\n}\n\n/** ZSTD_decodeFrameHeader_Part2\n*   decode the full Frame Header\n*   srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1\n*   @return : 0, or an error code, which can be tested using ZSTD_isError() */\nstatic size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)\n{\n    size_t result;\n    if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);\n    result = ZSTD_getFrameParams(&(zc->params), src, srcSize);\n    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);\n    return result;\n}\n\n\nstatic size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    BYTE headerFlags;\n    U32 cSize;\n\n    if (srcSize < 3) return ERROR(srcSize_wrong);\n\n    headerFlags = *in;\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n\n    bpPtr->blockType = (blockType_t)(headerFlags >> 6);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\nstatic size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/** ZSTD_decompressLiterals\n    @return : nb of bytes read from src, or an error code*/\nstatic size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,\n                                const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n\n    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n\n    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);\n    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);\n\n    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);\n\n    *maxDstSizePtr = litSize;\n    return litCSize + 5;\n}\n\n\n/** ZSTD_decodeLiteralsBlock\n    @return : nb of bytes read from src (< srcSize ) */\nstatic size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,\n                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */\n{\n    const BYTE* const istart = (const BYTE*) src;\n\n    /* any compressed block with literals segment must be at least this size */\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch(*istart & 3)\n    {\n    /* compressed */\n    case 0:\n        {\n            size_t litSize = BLOCKSIZE;\n            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, 8);\n            return readSize;   /* works if it's an error too */\n        }\n    case IS_RAW:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */\n            {\n                if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n                if (litSize > srcSize-3) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, 8);\n                return litSize+3;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+3;\n            dctx->litSize = litSize;\n            return litSize+3;        }\n    case IS_RLE:\n        {\n            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */\n            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[3], litSize + 8);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return 4;\n        }\n    default:\n        return ERROR(corruption_detected);   /* forbidden nominal case */\n    }\n}\n\n\nstatic size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,\n                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,\n                         const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    const BYTE* const iend = istart + srcSize;\n    U32 LLtype, Offtype, MLtype;\n    U32 LLlog, Offlog, MLlog;\n    size_t dumpsLength;\n\n    /* check */\n    if (srcSize < 5) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    *nbSeq = MEM_readLE16(ip); ip+=2;\n    LLtype  = *ip >> 6;\n    Offtype = (*ip >> 4) & 3;\n    MLtype  = (*ip >> 2) & 3;\n    if (*ip & 2)\n    {\n        dumpsLength  = ip[2];\n        dumpsLength += ip[1] << 8;\n        ip += 3;\n    }\n    else\n    {\n        dumpsLength  = ip[1];\n        dumpsLength += (ip[0] & 1) << 8;\n        ip += 2;\n    }\n    *dumpsPtr = ip;\n    ip += dumpsLength;\n    *dumpsLengthPtr = dumpsLength;\n\n    /* check */\n    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n\n    /* sequences */\n    {\n        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */\n        size_t headerSize;\n\n        /* Build DTables */\n        switch(LLtype)\n        {\n        case bt_rle :\n            LLlog = 0;\n            FSE_buildDTable_rle(DTableLL, *ip++); break;\n        case bt_raw :\n            LLlog = LLbits;\n            FSE_buildDTable_raw(DTableLL, LLbits); break;\n        default :\n            {   U32 max = MaxLL;\n                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (LLlog > LLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableLL, norm, max, LLlog);\n        }   }\n\n        switch(Offtype)\n        {\n        case bt_rle :\n            Offlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */\n            break;\n        case bt_raw :\n            Offlog = Offbits;\n            FSE_buildDTable_raw(DTableOffb, Offbits); break;\n        default :\n            {   U32 max = MaxOff;\n                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (Offlog > OffFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableOffb, norm, max, Offlog);\n        }   }\n\n        switch(MLtype)\n        {\n        case bt_rle :\n            MLlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSE_buildDTable_rle(DTableML, *ip++); break;\n        case bt_raw :\n            MLlog = MLbits;\n            FSE_buildDTable_raw(DTableML, MLbits); break;\n        default :\n            {   U32 max = MaxML;\n                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);\n                if (FSE_isError(headerSize)) return ERROR(GENERIC);\n                if (MLlog > MLFSELog) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSE_buildDTable(DTableML, norm, max, MLlog);\n    }   }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t offset;\n    size_t matchLength;\n} seq_t;\n\ntypedef struct {\n    BIT_DStream_t DStream;\n    FSE_DState_t stateLL;\n    FSE_DState_t stateOffb;\n    FSE_DState_t stateML;\n    size_t prevOffset;\n    const BYTE* dumps;\n    const BYTE* dumpsEnd;\n} seqState_t;\n\n\nstatic void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    size_t litLength;\n    size_t prevOffset;\n    size_t offset;\n    size_t matchLength;\n    const BYTE* dumps = seqState->dumps;\n    const BYTE* const de = seqState->dumpsEnd;\n\n    /* Literal length */\n    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));\n    prevOffset = litLength ? seq->offset : seqState->prevOffset;\n    if (litLength == MaxLL) {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) litLength += add;\n        else if (dumps + 3 <= de) {\n            litLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n\n    /* Offset */\n    {   static const U32 offsetPrefix[MaxOff+1] = {\n                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,\n                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,\n                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };\n        U32 offsetCode, nbBits;\n        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        nbBits = offsetCode - 1;\n        if (offsetCode==0) nbBits = 0;   /* cmove */\n        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);\n        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));\n        if (offsetCode==0) offset = prevOffset;   /* cmove */\n        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */\n    }\n\n    /* MatchLength */\n    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));\n    if (matchLength == MaxML) {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) matchLength += add;\n        else if (dumps + 3 <= de){\n            matchLength = MEM_readLE24(dumps);\n            dumps += 3;\n        }\n        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n    matchLength += MINMATCH;\n\n    /* save result */\n    seq->litLength = litLength;\n    seq->offset = offset;\n    seq->matchLength = matchLength;\n    seqState->dumps = dumps;\n}\n\n\nstatic size_t ZSTD_execSequence(BYTE* op,\n                                BYTE* const oend, seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)\n{\n    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */\n    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */\n    BYTE* const oLitEnd = op + sequence.litLength;\n    const size_t sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_8 = oend-8;\n    const BYTE* const litEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n\n    /* check */\n    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */\n    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */\n\n    /* copy Literals */\n    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = litEnd;   /* update for next sequence */\n\n    /* copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - base))\n    {\n        /* offset beyond prefix */\n        if (sequence.offset > (size_t)(oLitEnd - vBase))\n            return ERROR(corruption_detected);\n        match = dictEnd - (base-match);\n        if (match + sequence.matchLength <= dictEnd)\n        {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {\n            size_t length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = base;\n            if (op > oend_8 || sequence.matchLength < MINMATCH) {\n              while (op < oMatchEnd) *op++ = *match++;\n              return sequenceLength;\n            }\n        }\n    }\n    /* Requirement: op <= oend_8 */\n\n    /* match within prefix */\n    if (sequence.offset < 8) {\n        /* close range match, overlap */\n        const int sub2 = dec64table[sequence.offset];\n        op[0] = match[0];\n        op[1] = match[1];\n        op[2] = match[2];\n        op[3] = match[3];\n        match += dec32table[sequence.offset];\n        ZSTD_copy4(op+4, match);\n        match -= sub2;\n    } else {\n        ZSTD_copy8(op, match);\n    }\n    op += 8; match += 8;\n\n    if (oMatchEnd > oend-(16-MINMATCH))\n    {\n        if (op < oend_8)\n        {\n            ZSTD_wildcopy(op, match, oend_8 - op);\n            match += oend_8 - op;\n            op = oend_8;\n        }\n        while (op < oMatchEnd) *op++ = *match++;\n    }\n    else\n    {\n        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8, but must be signed */\n    }\n    return sequenceLength;\n}\n\n\nstatic size_t ZSTD_decompressSequences(\n                               ZSTD_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t errorCode, dumpsLength;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    int nbSeq;\n    const BYTE* dumps;\n    U32* DTableLL = dctx->LLTable;\n    U32* DTableML = dctx->MLTable;\n    U32* DTableOffb = dctx->OffTable;\n    const BYTE* const base = (const BYTE*) (dctx->base);\n    const BYTE* const vBase = (const BYTE*) (dctx->vBase);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n\n    /* Build Decoding Tables */\n    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,\n                                      DTableLL, DTableML, DTableOffb,\n                                      ip, iend-ip);\n    if (ZSTD_isError(errorCode)) return errorCode;\n    ip += errorCode;\n\n    /* Regen sequences */\n    {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        sequence.offset = 4;\n        seqState.dumps = dumps;\n        seqState.dumpsEnd = dumps + dumpsLength;\n        seqState.prevOffset = 4;\n        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);\n        if (ERR_isError(errorCode)) return ERROR(corruption_detected);\n        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )\n        {\n            size_t oneSeqSize;\n            nbSeq--;\n            ZSTD_decodeSequence(&sequence, &seqState);\n            oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);\n            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* check if reached exact end */\n        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* DStream should be entirely and exactly consumed; otherwise data is corrupted */\n\n        /* last literal segment */\n        {\n            size_t lastLLSize = litEnd - litPtr;\n            if (litPtr > litEnd) return ERROR(corruption_detected);\n            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n            if (op != litPtr) memcpy(op, litPtr, lastLLSize);\n            op += lastLLSize;\n        }\n    }\n\n    return op-ostart;\n}\n\n\nstatic void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)\n{\n    if (dst != dctx->previousDstEnd)   /* not contiguous */\n    {\n        dctx->dictEnd = dctx->previousDstEnd;\n        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n        dctx->base = dst;\n        dctx->previousDstEnd = dst;\n    }\n}\n\n\nstatic size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,\n                            void* dst, size_t maxDstSize,\n                      const void* src, size_t srcSize)\n{\n    /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n    size_t litCSize;\n\n    if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);\n\n    /* Decode literals sub-block */\n    litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);\n    if (ZSTD_isError(litCSize)) return litCSize;\n    ip += litCSize;\n    srcSize -= litCSize;\n\n    return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);\n}\n\n\nstatic size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,\n                                 void* dst, size_t maxDstSize,\n                                 const void* src, size_t srcSize,\n                                 const void* dict, size_t dictSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t remainingSize = srcSize;\n    blockProperties_t blockProperties;\n\n    /* init */\n    ZSTD_resetDCtx(ctx);\n    if (dict)\n    {\n        ZSTD_decompress_insertDictionary(ctx, dict, dictSize);\n        ctx->dictEnd = ctx->previousDstEnd;\n        ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));\n        ctx->base = dst;\n    }\n    else\n    {\n        ctx->vBase = ctx->base = ctx->dictEnd = dst;\n    }\n\n    /* Frame Header */\n    {\n        size_t frameHeaderSize;\n        if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n        frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);\n        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;\n        if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n        frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);\n        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t decodedSize=0;\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        if (ZSTD_isError(decodedSize)) return decodedSize;\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTD_frameHeaderSize_min) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n        return;\n    }\n    ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTD_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTD_blockHeaderSize;\n        remainingSize -= ZSTD_blockHeaderSize;\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * BLOCKSIZE;\n}\n\n/* ******************************\n*  Streaming Decompression API\n********************************/\nstatic size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nstatic size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);\n    ZSTD_checkContinuity(ctx, dst);\n\n    /* Decompress : frame header; part 1 */\n    switch (ctx->stage)\n    {\n    case ZSTDds_getFrameHeaderSize :\n        /* get frame header size */\n        if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */\n        ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);\n        if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;\n        memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);\n        if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC);   /* impossible */\n        ctx->expected = 0;   /* not necessary to copy more */\n        /* fallthrough */\n    case ZSTDds_decodeFrameHeader:\n        /* get frame header */\n        {   size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);\n            if (ZSTD_isError(result)) return result;\n            ctx->expected = ZSTD_blockHeaderSize;\n            ctx->stage = ZSTDds_decodeBlockHeader;\n            return 0;\n        }\n    case ZSTDds_decodeBlockHeader:\n        /* Decode block header */\n        {   blockProperties_t bp;\n            size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);\n            if (ZSTD_isError(blockSize)) return blockSize;\n            if (bp.blockType == bt_end)\n            {\n                ctx->expected = 0;\n                ctx->stage = ZSTDds_getFrameHeaderSize;\n            }\n            else\n            {\n                ctx->expected = blockSize;\n                ctx->bType = bp.blockType;\n                ctx->stage = ZSTDds_decompressBlock;\n            }\n            return 0;\n        }\n    case ZSTDds_decompressBlock:\n        {\n            /* Decompress : block content */\n            size_t rSize;\n            switch(ctx->bType)\n            {\n            case bt_compressed:\n                rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);\n                break;\n            case bt_raw :\n                rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);\n                break;\n            case bt_rle :\n                return ERROR(GENERIC);   /* not yet handled */\n                break;\n            case bt_end :   /* should never happen (filtered at phase 1) */\n                rSize = 0;\n                break;\n            default:\n                return ERROR(GENERIC);\n            }\n            ctx->stage = ZSTDds_decodeBlockHeader;\n            ctx->expected = ZSTD_blockHeaderSize;\n            ctx->previousDstEnd = (char*)dst + rSize;\n            return rSize;\n        }\n    default:\n        return ERROR(GENERIC);   /* impossible */\n    }\n}\n\n\nstatic void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)\n{\n    ctx->dictEnd = ctx->previousDstEnd;\n    ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));\n    ctx->base = dict;\n    ctx->previousDstEnd = (const char*)dict + dictSize;\n}\n\n\n\n/*\n    Buffered version of Zstd compression library\n    Copyright (C) 2015, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* The objects defined into this file should be considered experimental.\n * They are not labelled stable, as their prototype may change in the future.\n * You can use them for tests, provide feedback, or if you can endure risk of future changes.\n */\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stdlib.h>\n\n\n/** ************************************************\n*  Streaming decompression\n*\n*  A ZBUFF_DCtx object is required to track streaming operation.\n*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.\n*  Use ZBUFF_decompressInit() to start a new decompression operation.\n*  ZBUFF_DCtx objects can be reused multiple times.\n*\n*  Use ZBUFF_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *maxDstSizePtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.\n*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .\n*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)\n*            or 0 when a frame is completely decoded\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory)\n*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.\n*  input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* **************************************************/\n\ntypedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,\n               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;\n\n/* *** Resource management *** */\n\n#define ZSTD_frameHeaderSize_max 5   /* too magical, should come from reference */\nstruct ZBUFFv04_DCtx_s {\n    ZSTD_DCtx* zc;\n    ZSTD_parameters params;\n    char* inBuff;\n    size_t inBuffSize;\n    size_t inPos;\n    char* outBuff;\n    size_t outBuffSize;\n    size_t outStart;\n    size_t outEnd;\n    size_t hPos;\n    const char* dict;\n    size_t dictSize;\n    ZBUFF_dStage stage;\n    unsigned char headerBuffer[ZSTD_frameHeaderSize_max];\n};   /* typedef'd to ZBUFF_DCtx within \"zstd_buffered.h\" */\n\ntypedef ZBUFFv04_DCtx ZBUFF_DCtx;\n\n\nstatic ZBUFF_DCtx* ZBUFF_createDCtx(void)\n{\n    ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));\n    if (zbc==NULL) return NULL;\n    memset(zbc, 0, sizeof(*zbc));\n    zbc->zc = ZSTD_createDCtx();\n    zbc->stage = ZBUFFds_init;\n    return zbc;\n}\n\nstatic size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)\n{\n    if (zbc==NULL) return 0;   /* support free on null */\n    ZSTD_freeDCtx(zbc->zc);\n    free(zbc->inBuff);\n    free(zbc->outBuff);\n    free(zbc);\n    return 0;\n}\n\n\n/* *** Initialization *** */\n\nstatic size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)\n{\n    zbc->stage = ZBUFFds_readHeader;\n    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;\n    return ZSTD_resetDCtx(zbc->zc);\n}\n\n\nstatic size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)\n{\n    zbc->dict = (const char*)src;\n    zbc->dictSize = srcSize;\n    return 0;\n}\n\nstatic size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    size_t length = MIN(maxDstSize, srcSize);\n    if (length > 0) {\n        memcpy(dst, src, length);\n    }\n    return length;\n}\n\n/* *** Decompression *** */\n\nstatic size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)\n{\n    const char* const istart = (const char*)src;\n    const char* ip = istart;\n    const char* const iend = istart + *srcSizePtr;\n    char* const ostart = (char*)dst;\n    char* op = ostart;\n    char* const oend = ostart + *maxDstSizePtr;\n    U32 notDone = 1;\n\n    DEBUGLOG(5, \"ZBUFF_decompressContinue\");\n    while (notDone)\n    {\n        switch(zbc->stage)\n        {\n\n        case ZBUFFds_init :\n            DEBUGLOG(5, \"ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)\");\n            return ERROR(init_missing);\n\n        case ZBUFFds_readHeader :\n            /* read header from src */\n            {   size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);\n                if (ZSTD_isError(headerSize)) return headerSize;\n                if (headerSize) {\n                    /* not enough input to decode header : tell how many bytes would be necessary */\n                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);\n                    zbc->hPos += *srcSizePtr;\n                    *maxDstSizePtr = 0;\n                    zbc->stage = ZBUFFds_loadHeader;\n                    return headerSize - zbc->hPos;\n                }\n                zbc->stage = ZBUFFds_decodeHeader;\n                break;\n            }\n\n        case ZBUFFds_loadHeader:\n            /* complete header from src */\n            {   size_t headerSize = ZBUFF_limitCopy(\n                    zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,\n                    src, *srcSizePtr);\n                zbc->hPos += headerSize;\n                ip += headerSize;\n                headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);\n                if (ZSTD_isError(headerSize)) return headerSize;\n                if (headerSize) {\n                    /* not enough input to decode header : tell how many bytes would be necessary */\n                    *maxDstSizePtr = 0;\n                    return headerSize - zbc->hPos;\n            }   }\n            /* intentional fallthrough */\n\n        case ZBUFFds_decodeHeader:\n                /* apply header to create / resize buffers */\n                {   size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;\n                    size_t const neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */\n                    if (zbc->inBuffSize < neededInSize) {\n                        free(zbc->inBuff);\n                        zbc->inBuffSize = neededInSize;\n                        zbc->inBuff = (char*)malloc(neededInSize);\n                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);\n                    }\n                    if (zbc->outBuffSize < neededOutSize) {\n                        free(zbc->outBuff);\n                        zbc->outBuffSize = neededOutSize;\n                        zbc->outBuff = (char*)malloc(neededOutSize);\n                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);\n                }   }\n                if (zbc->dictSize)\n                    ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);\n                if (zbc->hPos) {\n                    /* some data already loaded into headerBuffer : transfer into inBuff */\n                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);\n                    zbc->inPos = zbc->hPos;\n                    zbc->hPos = 0;\n                    zbc->stage = ZBUFFds_load;\n                    break;\n                }\n                zbc->stage = ZBUFFds_read;\n\t\t/* fall-through */\n        case ZBUFFds_read:\n            {\n                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);\n                if (neededInSize==0)   /* end of frame */\n                {\n                    zbc->stage = ZBUFFds_init;\n                    notDone = 0;\n                    break;\n                }\n                if ((size_t)(iend-ip) >= neededInSize)\n                {\n                    /* directly decode from src */\n                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,\n                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,\n                        ip, neededInSize);\n                    if (ZSTD_isError(decodedSize)) return decodedSize;\n                    ip += neededInSize;\n                    if (!decodedSize) break;   /* this was just a header */\n                    zbc->outEnd = zbc->outStart +  decodedSize;\n                    zbc->stage = ZBUFFds_flush;\n                    break;\n                }\n                if (ip==iend) { notDone = 0; break; }   /* no more input */\n                zbc->stage = ZBUFFds_load;\n            }\n\t    /* fall-through */\n        case ZBUFFds_load:\n            {\n                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);\n                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */\n                size_t loadedSize;\n                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */\n                loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);\n                ip += loadedSize;\n                zbc->inPos += loadedSize;\n                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */\n                {\n                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,\n                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,\n                        zbc->inBuff, neededInSize);\n                    if (ZSTD_isError(decodedSize)) return decodedSize;\n                    zbc->inPos = 0;   /* input is consumed */\n                    if (!decodedSize) { zbc->stage = ZBUFFds_read; break; }   /* this was just a header */\n                    zbc->outEnd = zbc->outStart +  decodedSize;\n                    zbc->stage = ZBUFFds_flush;\n                    /* ZBUFFds_flush follows */\n                }\n            }\n\t    /* fall-through */\n        case ZBUFFds_flush:\n            {\n                size_t toFlushSize = zbc->outEnd - zbc->outStart;\n                size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);\n                op += flushedSize;\n                zbc->outStart += flushedSize;\n                if (flushedSize == toFlushSize)\n                {\n                    zbc->stage = ZBUFFds_read;\n                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)\n                        zbc->outStart = zbc->outEnd = 0;\n                    break;\n                }\n                /* cannot flush everything */\n                notDone = 0;\n                break;\n            }\n        default: return ERROR(GENERIC);   /* impossible */\n        }\n    }\n\n    *srcSizePtr = ip-istart;\n    *maxDstSizePtr = op-ostart;\n\n    {\n        size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);\n        if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3;   /* get the next block header while at it */\n        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/\n        return nextSrcSizeHint;\n    }\n}\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nunsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }\nconst char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n\nsize_t ZBUFFv04_recommendedDInSize()  { return BLOCKSIZE + 3; }\nsize_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }\n\n\n\n/*- ========================================================================= -*/\n\n/* final wrapping stage */\n\nsize_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);\n}\n\nsize_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)\n    size_t regenSize;\n    ZSTD_DCtx* dctx = ZSTD_createDCtx();\n    if (dctx==NULL) return ERROR(memory_allocation);\n    regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);\n    ZSTD_freeDCtx(dctx);\n    return regenSize;\n#else\n    ZSTD_DCtx dctx;\n    return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);\n#endif\n}\n\nsize_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }\n\nsize_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)\n{\n    return ZSTD_nextSrcSizeToDecompress(dctx);\n}\n\nsize_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);\n}\n\n\n\nZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }\nsize_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }\n\nsize_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }\nsize_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)\n{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }\n\nsize_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)\n{\n    DEBUGLOG(5, \"ZBUFFv04_decompressContinue\");\n    return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);\n}\n\nZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }\nsize_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v04.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTD_V04_H_91868324769238\n#define ZSTD_V04_H_91868324769238\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/* *************************************\n*  Includes\n***************************************/\n#include <stddef.h>   /* size_t */\n\n\n/* *************************************\n*  Simple one-step function\n***************************************/\n/**\nZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format\n    compressedSize : is the exact source size\n    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.\n                      It must be equal or larger than originalSize, otherwise decompression will fail.\n    return : the number of bytes decompressed into destination buffer (originalSize)\n             or an errorCode if it fails (which can be tested using ZSTDv01_isError())\n*/\nsize_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,\n                     const void* src, size_t compressedSize);\n\n /**\n ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format\n     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n     cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                 or an error code if it fails (which can be tested using ZSTDv01_isError())\n     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n */\n void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                      size_t* cSize, unsigned long long* dBound);\n\n/**\nZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error\n*/\nunsigned ZSTDv04_isError(size_t code);\n\n\n/* *************************************\n*  Advanced functions\n***************************************/\ntypedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;\nZSTDv04_Dctx* ZSTDv04_createDCtx(void);\nsize_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);\n\nsize_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,\n                              void* dst, size_t maxOriginalSize,\n                        const void* src, size_t compressedSize);\n\n\n/* *************************************\n*  Direct Streaming\n***************************************/\nsize_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);\n\nsize_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);\nsize_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);\n/**\n  Use above functions alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.\n  Result is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.\n*/\n\n\n/* *************************************\n*  Buffered Streaming\n***************************************/\ntypedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;\nZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);\nsize_t         ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);\n\nsize_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);\nsize_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);\n\nsize_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);\n\n/** ************************************************\n*  Streaming decompression\n*\n*  A ZBUFF_DCtx object is required to track streaming operation.\n*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.\n*  Use ZBUFF_decompressInit() to start a new decompression operation.\n*  ZBUFF_DCtx objects can be reused multiple times.\n*\n*  Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()\n*  It must be the same content as the one set during compression phase.\n*  Dictionary content must remain accessible during the decompression process.\n*\n*  Use ZBUFF_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *maxDstSizePtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)\n*            or 0 when a frame is completely decoded\n*            or an error code, which can be tested using ZBUFF_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize\n*  output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.\n*  input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* **************************************************/\nunsigned ZBUFFv04_isError(size_t errorCode);\nconst char* ZBUFFv04_getErrorName(size_t errorCode);\n\n\n/** The below functions provide recommended buffer sizes for Compression or Decompression operations.\n*   These sizes are not compulsory, they just tend to offer better latency */\nsize_t ZBUFFv04_recommendedDInSize(void);\nsize_t ZBUFFv04_recommendedDOutSize(void);\n\n\n/* *************************************\n*  Prefix - version detection\n***************************************/\n#define ZSTDv04_magicNumber 0xFD2FB524   /* v0.4 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* ZSTD_V04_H_91868324769238 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v05.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/*- Dependencies -*/\n#include \"zstd_v05.h\"\n#include \"error_private.h\"\n\n\n/* ******************************************************************\n   mem.h\n   low-level memory access routines\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*-****************************************\n*  Dependencies\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include <string.h>    /* memcpy */\n\n\n/*-****************************************\n*  Compiler specifics\n******************************************/\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/*-**************************************************************\n*  Basic Types\n*****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/*-**************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets depending on alignment.\n *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard, by lying on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nMEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\nMEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\nMEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }\nMEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\nMEM_STATIC void MEM_write32(void* memPtr, U32 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\nMEM_STATIC void MEM_write64(void* memPtr, U64 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* MEM_FORCE_MEMORY_ACCESS */\n\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian()) {\n        MEM_write16(memPtr, val);\n    } else {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));\n    }\n}\n\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)\n                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));\n    }\n}\n\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n\n/*\n    zstd - standard compression library\n    Header File for static linking only\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net\n*/\n#ifndef ZSTD_STATIC_H\n#define ZSTD_STATIC_H\n\n/* The prototypes defined within this file are considered experimental.\n * They should not be used in the context DLL as they may change in the future.\n * Prefer static linking if you need them, to control breaking version changes issues.\n */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/*-*************************************\n*  Types\n***************************************/\n#define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11\n\n\n/*-*************************************\n*  Advanced functions\n***************************************/\n/*- Advanced Decompression functions -*/\n\n/*! ZSTDv05_decompress_usingPreparedDCtx() :\n*   Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.\n*   It avoids reloading the dictionary each time.\n*   `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict().\n*   Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */\nsize_t ZSTDv05_decompress_usingPreparedDCtx(\n                                             ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx,\n                                             void* dst, size_t dstCapacity,\n                                       const void* src, size_t srcSize);\n\n\n/* **************************************\n*  Streaming functions (direct mode)\n****************************************/\nsize_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);\n\n/*\n  Streaming decompression, direct mode (bufferless)\n\n  A ZSTDv05_DCtx object is required to track streaming operations.\n  Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it.\n  A ZSTDv05_DCtx object can be re-used multiple times.\n\n  First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams().\n  This operation is independent, and just needs enough input data to properly decode the frame header.\n  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.\n  Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled.\n           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.\n           errorCode, which can be tested using ZSTDv05_isError()\n\n  Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict()\n  Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx()\n\n  Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively.\n  ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue().\n  ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail.\n  ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).\n  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.\n\n  @result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'.\n  It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header.\n\n  A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero.\n  Context can then be reset to start a new decompression.\n*/\n\n\n/* **************************************\n*  Block functions\n****************************************/\n/*! Block functions produce and decode raw zstd blocks, without frame metadata.\n    User will have to take in charge required information to regenerate data, such as block sizes.\n\n    A few rules to respect :\n    - Uncompressed block size must be <= 128 KB\n    - Compressing or decompressing requires a context structure\n      + Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx()\n    - It is necessary to init context before starting\n      + compression : ZSTDv05_compressBegin()\n      + decompression : ZSTDv05_decompressBegin()\n      + variants _usingDict() are also allowed\n      + copyCCtx() and copyDCtx() work too\n    - When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero.\n      In which case, nothing is produced into `dst`.\n      + User must test for such outcome and deal directly with uncompressed data\n      + ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !!\n*/\n\nsize_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* ZSTDv05_STATIC_H */\n\n\n/*\n    zstd_internal - common functions to include\n    Header File for include\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n*/\n#ifndef ZSTD_CCOMMON_H_MODULE\n#define ZSTD_CCOMMON_H_MODULE\n\n\n\n/*-*************************************\n*  Common macros\n***************************************/\n#define MIN(a,b) ((a)<(b) ? (a) : (b))\n#define MAX(a,b) ((a)>(b) ? (a) : (b))\n\n\n/*-*************************************\n*  Common constants\n***************************************/\n#define ZSTDv05_DICT_MAGIC  0xEC30A435\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BLOCKSIZE (128 KB)                 /* define, for static allocation */\n\nstatic const size_t ZSTDv05_blockHeaderSize = 3;\nstatic const size_t ZSTDv05_frameHeaderSize_min = 5;\n#define ZSTDv05_frameHeaderSize_max 5         /* define, for static allocation */\n\n#define BITv057 128\n#define BITv056  64\n#define BITv055  32\n#define BITv054  16\n#define BITv051   2\n#define BITv050   1\n\n#define IS_HUFv05 0\n#define IS_PCH 1\n#define IS_RAW 2\n#define IS_RLE 3\n\n#define MINMATCH 4\n#define REPCODE_STARTVALUE 1\n\n#define Litbits  8\n#define MLbits   7\n#define LLbits   6\n#define Offbits  5\n#define MaxLit ((1<<Litbits) - 1)\n#define MaxML  ((1<<MLbits) - 1)\n#define MaxLL  ((1<<LLbits) - 1)\n#define MaxOff ((1<<Offbits)- 1)\n#define MLFSEv05Log   10\n#define LLFSEv05Log   10\n#define OffFSEv05Log   9\n#define MaxSeq MAX(MaxLL, MaxML)\n\n#define FSEv05_ENCODING_RAW     0\n#define FSEv05_ENCODING_RLE     1\n#define FSEv05_ENCODING_STATIC  2\n#define FSEv05_ENCODING_DYNAMIC 3\n\n\n#define HufLog 12\n\n#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */\n#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */\n\n#define WILDCOPY_OVERLENGTH 8\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\n\n/*-*******************************************\n*  Shared functions to include for inlining\n*********************************************/\nstatic void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n\n#define COPY8(d,s) { ZSTDv05_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTDv05_wildcopy() :\n*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */\nMEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do\n        COPY8(op, ip)\n    while (op < oend);\n}\n\n\n/*-*******************************************\n*  Private interfaces\n*********************************************/\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* offCode;\n    BYTE* litStart;\n    BYTE* lit;\n    BYTE* litLengthStart;\n    BYTE* litLength;\n    BYTE* matchLengthStart;\n    BYTE* matchLength;\n    BYTE* dumpsStart;\n    BYTE* dumps;\n    /* opt */\n    U32* matchLengthFreq;\n    U32* litLengthFreq;\n    U32* litFreq;\n    U32* offCodeFreq;\n    U32  matchLengthSum;\n    U32  litLengthSum;\n    U32  litSum;\n    U32  offCodeSum;\n} seqStore_t;\n\n\n\n#endif   /* ZSTDv05_CCOMMON_H_MODULE */\n/* ******************************************************************\n   FSEv05 : Finite State Entropy coder\n   header file\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef FSEv05_H\n#define FSEv05_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* *****************************************\n*  Includes\n******************************************/\n#include <stddef.h>    /* size_t, ptrdiff_t */\n\n\n/*-****************************************\n*  FSEv05 simple functions\n******************************************/\nsize_t FSEv05_decompress(void* dst,  size_t maxDstSize,\n                const void* cSrc, size_t cSrcSize);\n/*!\nFSEv05_decompress():\n    Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'maxDstSize'.\n    return : size of regenerated data (<= maxDstSize)\n             or an error code, which can be tested using FSEv05_isError()\n\n    ** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!!\n    Why ? : making this distinction requires a header.\n    Header management is intentionally delegated to the user layer, which can better manage special cases.\n*/\n\n\n/* *****************************************\n*  Tool functions\n******************************************/\n/* Error Management */\nunsigned    FSEv05_isError(size_t code);        /* tells if a return value is an error code */\nconst char* FSEv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */\n\n\n\n\n/* *****************************************\n*  FSEv05 detailed API\n******************************************/\n/* *** DECOMPRESSION *** */\n\n/*!\nFSEv05_readNCount():\n   Read compactly saved 'normalizedCounter' from 'rBuffer'.\n   return : size read from 'rBuffer'\n            or an errorCode, which can be tested using FSEv05_isError()\n            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */\nsize_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);\n\n/*!\nConstructor and Destructor of type FSEv05_DTable\n    Note that its size depends on 'tableLog' */\ntypedef unsigned FSEv05_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\nFSEv05_DTable* FSEv05_createDTable(unsigned tableLog);\nvoid        FSEv05_freeDTable(FSEv05_DTable* dt);\n\n/*!\nFSEv05_buildDTable():\n   Builds 'dt', which must be already allocated, using FSEv05_createDTable()\n   @return : 0,\n             or an errorCode, which can be tested using FSEv05_isError() */\nsize_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*!\nFSEv05_decompress_usingDTable():\n   Decompress compressed source @cSrc of size @cSrcSize using `dt`\n   into `dst` which must be already allocated.\n   @return : size of regenerated data (necessarily <= @dstCapacity)\n             or an errorCode, which can be tested using FSEv05_isError() */\nsize_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt);\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSEv05_H */\n/* ******************************************************************\n   bitstream\n   Part of FSEv05 library\n   header file (to include)\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef BITv05STREAM_H_MODULE\n#define BITv05STREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which highly benefit from being inlined.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n\n\n/*-********************************************\n*  bitStream decoding API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BITv05_DStream_t;\n\ntypedef enum { BITv05_DStream_unfinished = 0,\n               BITv05_DStream_endOfBuffer = 1,\n               BITv05_DStream_completed = 2,\n               BITv05_DStream_overflow = 3 } BITv05_DStream_status;  /* result of BITv05_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD);\nMEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD);\n\n\n/*-****************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/*-**************************************************************\n*  Helper functions\n****************************************************************/\nMEM_STATIC unsigned BITv05_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n\n/*-********************************************************\n* bitStream decoding\n**********************************************************/\n/*!BITv05_initDStream\n*  Initialize a BITv05_DStream_t.\n*  @bitD : a pointer to an already allocated BITv05_DStream_t structure\n*  @srcBuffer must point at the beginning of a bitStream\n*  @srcSize must be the exact size of the bitStream\n*  @result : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(size_t)) {  /* normal case */\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);\n    } else {\n        U32 contain32;\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */\n            default: break;\n        }\n        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];\n        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */\n        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);\n        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\nMEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BITv05_lookBitsFast :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits)\n{\n    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)\n{\n    size_t value = BITv05_lookBits(bitD, nbBits);\n    BITv05_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*!BITv05_readBitsFast :\n*  unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)\n{\n    size_t value = BITv05_lookBitsFast(bitD, nbBits);\n    BITv05_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return BITv05_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BITv05_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start) {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer;\n        return BITv05_DStream_completed;\n    }\n    {\n        U32 nbBytes = bitD->bitsConsumed >> 3;\n        BITv05_DStream_status result = BITv05_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start) {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BITv05_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BITv05_endOfDStream\n*   @return Tells if DStream has reached its exact end\n*/\nMEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITv05STREAM_H_MODULE */\n/* ******************************************************************\n   FSEv05 : Finite State Entropy coder\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef FSEv05_STATIC_H\n#define FSEv05_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/* *****************************************\n*  Static allocation\n*******************************************/\n/* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */\n#define FSEv05_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/* *****************************************\n*  FSEv05 advanced API\n*******************************************/\nsize_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits);\n/* build a fake FSEv05_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nsize_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, unsigned char symbolValue);\n/* build a fake FSEv05_DTable, designed to always generate the same symbolValue */\n\n\n\n/* *****************************************\n*  FSEv05 symbol decompression API\n*******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSEv05_DState_t;\n\n\nstatic void     FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt);\n\nstatic unsigned char FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);\n\nstatic unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr);\n\n\n\n/* *****************************************\n*  FSEv05 unsafe API\n*******************************************/\nstatic unsigned char FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/* *****************************************\n*  Implementation of inlined functions\n*******************************************/\n/* decompression */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSEv05_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSEv05_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr;\n    DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog);\n    BITv05_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr)\n{\n    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    return DInfo.symbol;\n}\n\nMEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)\n{\n    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32  nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BITv05_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)\n{\n    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    const U32 nbBits = DInfo.nbBits;\n    BYTE symbol = DInfo.symbol;\n    size_t lowBits = BITv05_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\nMEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)\n{\n    return DStatePtr->state == 0;\n}\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSEv05_STATIC_H */\n/* ******************************************************************\n   FSEv05 : Finite State Entropy coder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n#ifndef FSEv05_COMMONDEFS_ONLY\n\n/* **************************************************************\n*  Tuning parameters\n****************************************************************/\n/*!MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSEv05_MAX_MEMORY_USAGE 14\n#define FSEv05_DEFAULT_MEMORY_USAGE 13\n\n/*!FSEv05_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSEv05_MAX_SYMBOL_VALUE 255\n\n\n/* **************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSEv05_FUNCTION_TYPE BYTE\n#define FSEv05_FUNCTION_EXTENSION\n#define FSEv05_DECODE_TYPE FSEv05_decode_t\n\n\n#endif   /* !FSEv05_COMMONDEFS_ONLY */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n\n\n/* ***************************************************************\n*  Constants\n*****************************************************************/\n#define FSEv05_MAX_TABLELOG  (FSEv05_MAX_MEMORY_USAGE-2)\n#define FSEv05_MAX_TABLESIZE (1U<<FSEv05_MAX_TABLELOG)\n#define FSEv05_MAXTABLESIZE_MASK (FSEv05_MAX_TABLESIZE-1)\n#define FSEv05_DEFAULT_TABLELOG (FSEv05_DEFAULT_MEMORY_USAGE-2)\n#define FSEv05_MIN_TABLELOG 5\n\n#define FSEv05_TABLELOG_ABSOLUTE_MAX 15\n#if FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX\n#error \"FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/* **************************************************************\n*  Complex types\n****************************************************************/\ntypedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];\n\n\n/* **************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSEv05_FUNCTION_EXTENSION\n#  error \"FSEv05_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSEv05_FUNCTION_TYPE\n#  error \"FSEv05_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSEv05_CAT(X,Y) X##Y\n#define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y)\n#define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y)\n\n\n/* Function templates */\nstatic U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }\n\n\n\nFSEv05_DTable* FSEv05_createDTable (unsigned tableLog)\n{\n    if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX;\n    return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );\n}\n\nvoid FSEv05_freeDTable (FSEv05_DTable* dt)\n{\n    free(dt);\n}\n\nsize_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    FSEv05_DTableHeader DTableH;\n    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */\n    FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr);\n    const U32 tableSize = 1 << tableLog;\n    const U32 tableMask = tableSize-1;\n    const U32 step = FSEv05_tableStep(tableSize);\n    U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1];\n    U32 position = 0;\n    U32 highThreshold = tableSize-1;\n    const S16 largeLimit= (S16)(1 << (tableLog-1));\n    U32 noLarge = 1;\n    U32 s;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */\n    DTableH.tableLog = (U16)tableLog;\n    for (s=0; s<=maxSymbolValue; s++) {\n        if (normalizedCounter[s]==-1) {\n            tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s;\n            symbolNext[s] = 1;\n        } else {\n            if (normalizedCounter[s] >= largeLimit) noLarge=0;\n            symbolNext[s] = normalizedCounter[s];\n    }   }\n\n    /* Spread symbols */\n    for (s=0; s<=maxSymbolValue; s++) {\n        int i;\n        for (i=0; i<normalizedCounter[s]; i++) {\n            tableDecode[position].symbol = (FSEv05_FUNCTION_TYPE)s;\n            position = (position + step) & tableMask;\n            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n    }   }\n\n    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n\n    /* Build Decoding table */\n    {\n        U32 i;\n        for (i=0; i<tableSize; i++) {\n            FSEv05_FUNCTION_TYPE symbol = (FSEv05_FUNCTION_TYPE)(tableDecode[i].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[i].nbBits = (BYTE) (tableLog - BITv05_highbit32 ((U32)nextState) );\n            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);\n    }   }\n\n    DTableH.fastMode = (U16)noLarge;\n    memcpy(dt, &DTableH, sizeof(DTableH));\n    return 0;\n}\n\n\n#ifndef FSEv05_COMMONDEFS_ONLY\n/*-****************************************\n*  FSEv05 helper functions\n******************************************/\nunsigned FSEv05_isError(size_t code) { return ERR_isError(code); }\n\nconst char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/*-**************************************************************\n*  FSEv05 NCount encoding-decoding\n****************************************************************/\nstatic short FSEv05_abs(short a) { return a<0 ? -a : a; }\n\n\nsize_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSEv05_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr)) {\n        if (previous0) {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF) {\n                n0+=24;\n                if (ip < iend-5) {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                } else {\n                    bitStream >>= 16;\n                    bitCount+=16;\n            }   }\n            while ((bitStream & 3) == 3) {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {\n            const short max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max) {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            } else {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSEv05_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold) {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n            } else {\n                bitCount -= (int)(8 * (iend - 4 - ip));\n                ip = iend - 4;\n            }\n            bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n    }   }\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n\n\n\n/*-*******************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nsize_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nsize_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSymbolValue = tableMask;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<=maxSymbolValue; s++) {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSEv05_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSEv05_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BITv05_DStream_t bitD;\n    FSEv05_DState_t state1;\n    FSEv05_DState_t state2;\n    size_t errorCode;\n\n    /* Init */\n    errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n    if (FSEv05_isError(errorCode)) return errorCode;\n\n    FSEv05_initDState(&state1, &bitD, dt);\n    FSEv05_initDState(&state2, &bitD, dt);\n\n#define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op<olimit) ; op+=4) {\n        op[0] = FSEv05_GETSYMBOL(&state1);\n\n        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv05_reloadDStream(&bitD);\n\n        op[1] = FSEv05_GETSYMBOL(&state2);\n\n        if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSEv05_GETSYMBOL(&state1);\n\n        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv05_reloadDStream(&bitD);\n\n        op[3] = FSEv05_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */\n    while (1) {\n        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) )\n            break;\n\n        *op++ = FSEv05_GETSYMBOL(&state1);\n\n        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) )\n            break;\n\n        *op++ = FSEv05_GETSYMBOL(&state2);\n    }\n\n    /* end ? */\n    if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2))\n        return op-ostart;\n\n    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */\n\n    return ERROR(corruption_detected);\n}\n\n\nsize_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSEv05_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr;\n    const U32 fastMode = DTableH->fastMode;\n\n    /* select fast mode (static) */\n    if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nsize_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSEv05_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE;\n    size_t errorCode;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSEv05 decoding mode */\n    errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n    if (FSEv05_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog);\n    if (FSEv05_isError(errorCode)) return errorCode;\n\n    /* always return, even if it is an error code */\n    return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);\n}\n\n\n\n#endif   /* FSEv05_COMMONDEFS_ONLY */\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   header file\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef HUFF0_H\n#define HUFF0_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/* ****************************************\n*  Huff0 simple functions\n******************************************/\nsize_t HUFv05_decompress(void* dst,  size_t dstSize,\n                const void* cSrc, size_t cSrcSize);\n/*!\nHUFv05_decompress():\n    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstSize'.\n    @dstSize : must be the **exact** size of original (uncompressed) data.\n    Note : in contrast with FSEv05, HUFv05_decompress can regenerate\n           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,\n           because it knows size to regenerate.\n    @return : size of regenerated data (== dstSize)\n              or an error code, which can be tested using HUFv05_isError()\n*/\n\n\n/* ****************************************\n*  Tool functions\n******************************************/\n/* Error Management */\nunsigned    HUFv05_isError(size_t code);        /* tells if a return value is an error code */\nconst char* HUFv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* HUF0_H */\n/* ******************************************************************\n   Huff0 : Huffman codec, part of New Generation Entropy library\n   header file, for static linking only\n   Copyright (C) 2013-2016, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef HUF0_STATIC_H\n#define HUF0_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/* ****************************************\n*  Static allocation\n******************************************/\n/* static allocation of Huff0's DTable */\n#define HUFv05_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))\n#define HUFv05_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        unsigned short DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUFv05_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUFv05_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \\\n        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }\n\n\n/* ****************************************\n*  Advanced decompression functions\n******************************************/\nsize_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nsize_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */\n\n\n/* ****************************************\n*  Huff0 detailed API\n******************************************/\n/*!\nHUFv05_decompress() does the following:\n1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics\n2. build Huffman table from save, using HUFv05_readDTableXn()\n3. decode 1 or 4 segments in parallel using HUFv05_decompressSXn_usingDTable\n*/\nsize_t HUFv05_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);\nsize_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);\n\nsize_t HUFv05_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);\nsize_t HUFv05_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);\n\n\n/* single stream variants */\n\nsize_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nsize_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */\n\nsize_t HUFv05_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);\nsize_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* HUF0_STATIC_H */\n/* ******************************************************************\n   Huff0 : Huffman coder, part of New Generation Entropy library\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSEv05+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n/* **************************************************************\n*  Includes\n****************************************************************/\n#include <stdlib.h>     /* malloc, free, qsort */\n#include <string.h>     /* memcpy, memset */\n#include <stdio.h>      /* printf (debug) */\n\n\n/* **************************************************************\n*  Constants\n****************************************************************/\n#define HUFv05_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUFv05_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */\n#define HUFv05_DEFAULT_TABLELOG  HUFv05_MAX_TABLELOG   /* tableLog by default, when not specified */\n#define HUFv05_MAX_SYMBOL_VALUE 255\n#if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG)\n#  error \"HUFv05_MAX_TABLELOG is too large !\"\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\nunsigned HUFv05_isError(size_t code) { return ERR_isError(code); }\nconst char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); }\n#define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/* *******************************************************\n*  Huff0 : Huffman block decompression\n*********************************************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2;   /* single-symbol decoding */\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\n/*! HUFv05_readStats\n    Read compact Huffman tree, saved by HUFv05_writeCTable\n    @huffWeight : destination buffer\n    @return : size read from `src`\n*/\nstatic size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                            U32* nbSymbolsPtr, U32* tableLogPtr,\n                            const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    U32 tableLog;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n    U32 n;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  { /* special header */\n        if (iSize >= (242)) {  /* RLE */\n            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else {   /* Incompressible */\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            for (n=0; n<oSize; n+=2) {\n                huffWeight[n]   = ip[n/2] >> 4;\n                huffWeight[n+1] = ip[n/2] & 15;\n    }   }   }\n    else  {   /* header compressed with FSEv05 (normal case) */\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSEv05_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));\n    weightTotal = 0;\n    for (n=0; n<oSize; n++) {\n        if (huffWeight[n] >= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n        rankStats[huffWeight[n]]++;\n        weightTotal += (1 << huffWeight[n]) >> 1;\n    }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    tableLog = BITv05_highbit32(weightTotal) + 1;\n    if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n    {   /* determine last weight */\n        U32 total = 1 << tableLog;\n        U32 rest = total - weightTotal;\n        U32 verif = 1 << BITv05_highbit32(rest);\n        U32 lastWeight = BITv05_highbit32(rest) + 1;\n        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n        huffWeight[oSize] = (BYTE)lastWeight;\n        rankStats[lastWeight]++;\n    }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    *tableLogPtr = tableLog;\n    return iSize+1;\n}\n\n\n/*-***************************/\n/*  single-symbol decoding   */\n/*-***************************/\n\nsize_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    size_t iSize;\n    U32 nbSymbols = 0;\n    U32 n;\n    U32 nextRankStart;\n    void* const dtPtr = DTable + 1;\n    HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr;\n\n    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv05_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<=tableLog; n++) {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<nbSymbols; n++) {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUFv05_DEltX2 D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize;\n}\n\nstatic BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog)\n{\n        const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n        const BYTE c = dt[val].byte;\n        BITv05_skipBits(Dstream, dt[val].nbBits);\n        return c;\n}\n\n#define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \\\n        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) {\n        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd))\n        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\nsize_t HUFv05_decompress1X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + dstSize;\n    const U32 dtLog = DTable[0];\n    const void* dtPtr = DTable;\n    const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1;\n    BITv05_DStream_t bitD;\n\n    if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall);\n    { size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);\n      if (HUFv05_isError(errorCode)) return errorCode; }\n\n    HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog);\n\n    /* check */\n    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    return dstSize;\n}\n\nsize_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUFv05_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\nsize_t HUFv05_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    /* Check */\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BITv05_DStream_t bitD1;\n        BITv05_DStream_t bitD2;\n        BITv05_DStream_t bitD3;\n        BITv05_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BITv05_initDStream(&bitD1, istart1, length1);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD2, istart2, length2);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD3, istart3, length3);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD4, istart4, length4);\n        if (HUFv05_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);\n            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv05_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n    size_t errorCode;\n\n    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUFv05_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/* *************************/\n/* double-symbols decoding */\n/* *************************/\n\nstatic void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUFv05_DEltX4 DElt;\n    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];\n    U32 s;\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1) {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }\n}\n\ntypedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1];\n\nstatic void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++) {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        } else {\n            U32 i;\n            const U32 end = start + length;\n            HUFv05_DEltX4 DElt;\n\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits   = (BYTE)(nbBits);\n            DElt.length   = 1;\n            for (i = start; i < end; i++)\n                DTable[i] = DElt;\n        }\n        rankVal[weight] += length;\n    }\n}\n\nsize_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    const U32 memLog = DTable[0];\n    size_t iSize;\n    void* dtPtr = DTable;\n    HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;\n\n    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned));   /* if compilation fails here, assertion is false */\n    if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv05_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */\n\n    /* Get start index of each weight */\n    {\n        U32 w, nextRankStart = 0;\n        for (w=1; w<=maxW; w++) {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {\n        U32 s;\n        for (s=0; s<nbSymbols; s++) {\n            U32 w = weightList[s];\n            U32 r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {\n        const U32 minBits = tableLog+1 - maxW;\n        U32 nextRankVal = 0;\n        U32 w, consumed;\n        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n        U32* rankVal0 = rankVal[0];\n        for (w=1; w<=maxW; w++) {\n            U32 current = nextRankVal;\n            nextRankVal += rankStats[w] << (w+rescale);\n            rankVal0[w] = current;\n        }\n        for (consumed = minBits; consumed <= memLog - minBits; consumed++) {\n            U32* rankValPtr = rankVal[consumed];\n            for (w = 1; w <= maxW; w++) {\n                rankValPtr[w] = rankVal0[w] >> consumed;\n    }   }   }\n\n    HUFv05_fillDTableX4(dt, memLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    return iSize;\n}\n\n\nstatic U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BITv05_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits);\n    else {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {\n            BITv05_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n    }   }\n    return 1;\n}\n\n\n#define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \\\n        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) {\n        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2))\n        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\n\nsize_t HUFv05_decompress1X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const unsigned* DTable)\n{\n    const BYTE* const istart = (const BYTE*) cSrc;\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* const oend = ostart + dstSize;\n\n    const U32 dtLog = DTable[0];\n    const void* const dtPtr = DTable;\n    const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;\n    size_t errorCode;\n\n    /* Init */\n    BITv05_DStream_t bitD;\n    errorCode = BITv05_initDStream(&bitD, istart, cSrcSize);\n    if (HUFv05_isError(errorCode)) return errorCode;\n\n    /* finish bitStreams one by one */\n    HUFv05_decodeStreamX4(ostart, &bitD, oend,     dt, dtLog);\n\n    /* check */\n    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    /* decoded size */\n    return dstSize;\n}\n\nsize_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUFv05_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\nsize_t HUFv05_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const unsigned* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {\n        const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BITv05_DStream_t bitD1;\n        BITv05_DStream_t bitD2;\n        BITv05_DStream_t bitD3;\n        BITv05_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BITv05_initDStream(&bitD1, istart1, length1);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD2, istart2, length2);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD3, istart3, length3);\n        if (HUFv05_isError(errorCode)) return errorCode;\n        errorCode = BITv05_initDStream(&bitD4, istart4, length4);\n        if (HUFv05_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv05_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUFv05_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/* ********************************/\n/* Generic decompression selector */\n/* ********************************/\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nsize_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL };\n    /* estimate decompression time */\n    U32 Q;\n    const U32 D256 = (U32)(dstSize >> 8);\n    U32 Dtime[3];\n    U32 algoNb = 0;\n    int n;\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize >= dstSize) return ERROR(corruption_detected);   /* invalid, or not compressed, but not compressed already dealt with */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    /* decoder timing evaluation */\n    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n    for (n=0; n<3; n++)\n        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);\n\n    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */\n\n    if (Dtime[1] < Dtime[0]) algoNb = 1;\n\n    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n\n    //return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n    //return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */\n}\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n * HEAPMODE :\n * Select how default decompression function ZSTDv05_decompress() will allocate memory,\n * in memory stack (0), or in memory heap (1, requires malloc())\n */\n#ifndef ZSTDv05_HEAPMODE\n#  define ZSTDv05_HEAPMODE 1\n#endif\n\n\n/*-*******************************************************\n*  Dependencies\n*********************************************************/\n#include <stdlib.h>      /* calloc */\n#include <string.h>      /* memcpy, memmove */\n#include <stdio.h>       /* debug only : printf */\n\n\n/*-*******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n\n/*-*************************************\n*  Local types\n***************************************/\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\n\n/* *******************************************************\n*  Memory operations\n**********************************************************/\nstatic void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\n\n/* *************************************\n*  Error Management\n***************************************/\n/*! ZSTDv05_isError() :\n*   tells if a return value is an error code */\nunsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); }\n\n\n/*! ZSTDv05_getErrorName() :\n*   provides error code string (useful for debugging) */\nconst char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/* *************************************************************\n*   Context management\n***************************************************************/\ntypedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader,\n               ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage;\n\nstruct ZSTDv05_DCtx_s\n{\n    FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)];\n    FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)];\n    FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)];\n    unsigned   hufTableX4[HUFv05_DTABLE_SIZE(HufLog)];\n    const void* previousDstEnd;\n    const void* base;\n    const void* vBase;\n    const void* dictEnd;\n    size_t expected;\n    size_t headerSize;\n    ZSTDv05_parameters params;\n    blockType_t bType;   /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */\n    ZSTDv05_dStage stage;\n    U32 flagStaticTables;\n    const BYTE* litPtr;\n    size_t litSize;\n    BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];\n    BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];\n};  /* typedef'd to ZSTDv05_DCtx within \"zstd_static.h\" */\n\nsize_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */\nsize_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }\n\nsize_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)\n{\n    dctx->expected = ZSTDv05_frameHeaderSize_min;\n    dctx->stage = ZSTDv05ds_getFrameHeaderSize;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    dctx->vBase = NULL;\n    dctx->dictEnd = NULL;\n    dctx->hufTableX4[0] = HufLog;\n    dctx->flagStaticTables = 0;\n    return 0;\n}\n\nZSTDv05_DCtx* ZSTDv05_createDCtx(void)\n{\n    ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx));\n    if (dctx==NULL) return NULL;\n    ZSTDv05_decompressBegin(dctx);\n    return dctx;\n}\n\nsize_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx)\n{\n    free(dctx);\n    return 0;   /* reserved as a potential error code in the future */\n}\n\nvoid ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx)\n{\n    memcpy(dstDCtx, srcDCtx,\n           sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max));  /* no need to copy workspace */\n}\n\n\n/* *************************************************************\n*   Decompression section\n***************************************************************/\n\n/* Frame format description\n   Frame Header -  [ Block Header - Block ] - Frame End\n   1) Frame Header\n      - 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h)\n      - 1 byte  - Window Descriptor\n   2) Block Header\n      - 3 bytes, starting with a 2-bits descriptor\n                 Uncompressed, Compressed, Frame End, unused\n   3) Block\n      See Block Format Description\n   4) Frame End\n      - 3 bytes, compatible with Block Header\n*/\n\n/* Block format description\n\n   Block = Literal Section - Sequences Section\n   Prerequisite : size of (compressed) block, maximum size of regenerated data\n\n   1) Literal Section\n\n   1.1) Header : 1-5 bytes\n        flags: 2 bits\n            00 compressed by Huff0\n            01 unused\n            10 is Raw (uncompressed)\n            11 is Rle\n            Note : using 01 => Huff0 with precomputed table ?\n            Note : delta map ? => compressed ?\n\n   1.1.1) Huff0-compressed literal block : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RAW<<6) + (0<<4) + size\n               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RLE<<6) + (0<<4) + size\n               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n        1- CTable available (stored into workspace ?)\n        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)\n\n\n   1.2) Literal block content\n\n   1.2.1) Huff0 block, using sizes from header\n        See Huff0 format\n\n   1.2.2) Huff0 block, using prepared table\n\n   1.2.3) Raw content\n\n   1.2.4) single byte\n\n\n   2) Sequences section\n      TO DO\n*/\n\n\n/** ZSTDv05_decodeFrameHeader_Part1() :\n*   decode the 1st part of the Frame Header, which tells Frame Header size.\n*   srcSize must be == ZSTDv05_frameHeaderSize_min.\n*   @return : the full size of the Frame Header */\nstatic size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)\n{\n    U32 magicNumber;\n    if (srcSize != ZSTDv05_frameHeaderSize_min)\n        return ERROR(srcSize_wrong);\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);\n    zc->headerSize = ZSTDv05_frameHeaderSize_min;\n    return zc->headerSize;\n}\n\n\nsize_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize)\n{\n    U32 magicNumber;\n    if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max;\n    magicNumber = MEM_readLE32(src);\n    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);\n    memset(params, 0, sizeof(*params));\n    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN;\n    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */\n    return 0;\n}\n\n/** ZSTDv05_decodeFrameHeader_Part2() :\n*   decode the full Frame Header.\n*   srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1().\n*   @return : 0, or an error code, which can be tested using ZSTDv05_isError() */\nstatic size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)\n{\n    size_t result;\n    if (srcSize != zc->headerSize)\n        return ERROR(srcSize_wrong);\n    result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);\n    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);\n    return result;\n}\n\n\nstatic size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    BYTE headerFlags;\n    U32 cSize;\n\n    if (srcSize < 3)\n        return ERROR(srcSize_wrong);\n\n    headerFlags = *in;\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n\n    bpPtr->blockType = (blockType_t)(headerFlags >> 6);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\n\nstatic size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    if (dst==NULL) return ERROR(dstSize_tooSmall);\n    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/*! ZSTDv05_decodeLiteralsBlock() :\n    @return : nb of bytes read from src (< srcSize ) */\nstatic size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,\n                                    const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */\n{\n    const BYTE* const istart = (const BYTE*) src;\n\n    /* any compressed block with literals segment must be at least this size */\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch(istart[0]>> 6)\n    {\n    case IS_HUFv05:\n        {\n            size_t litSize, litCSize, singleStream=0;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                /* 2 - 2 - 10 - 10 */\n                lhSize=3;\n                singleStream = istart[0] & 16;\n                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n                litCSize = ((istart[1] &  3) << 8) + istart[2];\n                break;\n            case 2:\n                /* 2 - 2 - 14 - 14 */\n                lhSize=4;\n                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);\n                litCSize = ((istart[2] & 63) <<  8) + istart[3];\n                break;\n            case 3:\n                /* 2 - 2 - 18 - 18 */\n                lhSize=5;\n                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);\n                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];\n                break;\n            }\n            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            if (HUFv05_isError(singleStream ?\n                            HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :\n                            HUFv05_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))\n                return ERROR(corruption_detected);\n\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case IS_PCH:\n        {\n            size_t errorCode;\n            size_t litSize, litCSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */\n                return ERROR(corruption_detected);\n            if (!dctx->flagStaticTables)\n                return ERROR(dictionary_corrupted);\n\n            /* 2 - 2 - 10 - 10 */\n            lhSize=3;\n            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n            litCSize = ((istart[1] &  3) << 8) + istart[2];\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);\n            if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);\n\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case IS_RAW:\n        {\n            size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize=1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                break;\n            }\n\n            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */\n                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart+lhSize, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n                return lhSize+litSize;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+lhSize;\n            dctx->litSize = litSize;\n            return lhSize+litSize;\n        }\n    case IS_RLE:\n        {\n            size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize = 1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */\n                break;\n            }\n            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return lhSize+1;\n        }\n    default:\n        return ERROR(corruption_detected);   /* impossible */\n    }\n}\n\n\nstatic size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,\n                         FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,\n                         const void* src, size_t srcSize, U32 flagStaticTable)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* ip = istart;\n    const BYTE* const iend = istart + srcSize;\n    U32 LLtype, Offtype, MLtype;\n    unsigned LLlog, Offlog, MLlog;\n    size_t dumpsLength;\n\n    /* check */\n    if (srcSize < MIN_SEQUENCES_SIZE)\n        return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    *nbSeq = *ip++;\n    if (*nbSeq==0) return 1;\n    if (*nbSeq >= 128) {\n        if (ip >= iend) return ERROR(srcSize_wrong);\n        *nbSeq = ((nbSeq[0]-128)<<8) + *ip++;\n    }\n\n    if (ip >= iend) return ERROR(srcSize_wrong);\n    LLtype  = *ip >> 6;\n    Offtype = (*ip >> 4) & 3;\n    MLtype  = (*ip >> 2) & 3;\n    if (*ip & 2) {\n        if (ip+3 > iend) return ERROR(srcSize_wrong);\n        dumpsLength  = ip[2];\n        dumpsLength += ip[1] << 8;\n        ip += 3;\n    } else {\n        if (ip+2 > iend) return ERROR(srcSize_wrong);\n        dumpsLength  = ip[1];\n        dumpsLength += (ip[0] & 1) << 8;\n        ip += 2;\n    }\n    *dumpsPtr = ip;\n    ip += dumpsLength;\n    *dumpsLengthPtr = dumpsLength;\n\n    /* check */\n    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n\n    /* sequences */\n    {\n        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */\n        size_t headerSize;\n\n        /* Build DTables */\n        switch(LLtype)\n        {\n        case FSEv05_ENCODING_RLE :\n            LLlog = 0;\n            FSEv05_buildDTable_rle(DTableLL, *ip++);\n            break;\n        case FSEv05_ENCODING_RAW :\n            LLlog = LLbits;\n            FSEv05_buildDTable_raw(DTableLL, LLbits);\n            break;\n        case FSEv05_ENCODING_STATIC:\n            if (!flagStaticTable) return ERROR(corruption_detected);\n            break;\n        case FSEv05_ENCODING_DYNAMIC :\n        default :   /* impossible */\n            {   unsigned max = MaxLL;\n                headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);\n                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);\n                if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSEv05_buildDTable(DTableLL, norm, max, LLlog);\n        }   }\n\n        switch(Offtype)\n        {\n        case FSEv05_ENCODING_RLE :\n            Offlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */\n            break;\n        case FSEv05_ENCODING_RAW :\n            Offlog = Offbits;\n            FSEv05_buildDTable_raw(DTableOffb, Offbits);\n            break;\n        case FSEv05_ENCODING_STATIC:\n            if (!flagStaticTable) return ERROR(corruption_detected);\n            break;\n        case FSEv05_ENCODING_DYNAMIC :\n        default :   /* impossible */\n            {   unsigned max = MaxOff;\n                headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);\n                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);\n                if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSEv05_buildDTable(DTableOffb, norm, max, Offlog);\n        }   }\n\n        switch(MLtype)\n        {\n        case FSEv05_ENCODING_RLE :\n            MLlog = 0;\n            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : \"raw\", hence no header, but at least xxLog bits */\n            FSEv05_buildDTable_rle(DTableML, *ip++);\n            break;\n        case FSEv05_ENCODING_RAW :\n            MLlog = MLbits;\n            FSEv05_buildDTable_raw(DTableML, MLbits);\n            break;\n        case FSEv05_ENCODING_STATIC:\n            if (!flagStaticTable) return ERROR(corruption_detected);\n            break;\n        case FSEv05_ENCODING_DYNAMIC :\n        default :   /* impossible */\n            {   unsigned max = MaxML;\n                headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);\n                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);\n                if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);\n                ip += headerSize;\n                FSEv05_buildDTable(DTableML, norm, max, MLlog);\n    }   }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t matchLength;\n    size_t offset;\n} seq_t;\n\ntypedef struct {\n    BITv05_DStream_t DStream;\n    FSEv05_DState_t stateLL;\n    FSEv05_DState_t stateOffb;\n    FSEv05_DState_t stateML;\n    size_t prevOffset;\n    const BYTE* dumps;\n    const BYTE* dumpsEnd;\n} seqState_t;\n\n\n\nstatic void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    size_t litLength;\n    size_t prevOffset;\n    size_t offset;\n    size_t matchLength;\n    const BYTE* dumps = seqState->dumps;\n    const BYTE* const de = seqState->dumpsEnd;\n\n    /* Literal length */\n    litLength = FSEv05_peakSymbol(&(seqState->stateLL));\n    prevOffset = litLength ? seq->offset : seqState->prevOffset;\n    if (litLength == MaxLL) {\n        const U32 add = *dumps++;\n        if (add < 255) litLength += add;\n        else if (dumps + 2 <= de) {\n            litLength = MEM_readLE16(dumps);\n            dumps += 2;\n            if ((litLength & 1) && dumps < de) {\n                litLength += *dumps << 16;\n                dumps += 1;\n            }\n            litLength>>=1;\n        }\n        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n\n    /* Offset */\n    {\n        static const U32 offsetPrefix[MaxOff+1] = {\n                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,\n                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,\n                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };\n        U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */\n        U32 nbBits = offsetCode - 1;\n        if (offsetCode==0) nbBits = 0;   /* cmove */\n        offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits);\n        if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));\n        if (offsetCode==0) offset = prevOffset;   /* repcode, cmove */\n        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */\n        FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));    /* update */\n    }\n\n    /* Literal length update */\n    FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));   /* update */\n    if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));\n\n    /* MatchLength */\n    matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));\n    if (matchLength == MaxML) {\n        const U32 add = dumps<de ? *dumps++ : 0;\n        if (add < 255) matchLength += add;\n        else if (dumps + 2 <= de) {\n            matchLength = MEM_readLE16(dumps);\n            dumps += 2;\n            if ((matchLength & 1) && dumps < de) {\n                matchLength += *dumps << 16;\n                dumps += 1;\n            }\n            matchLength >>= 1;\n        }\n        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */\n    }\n    matchLength += MINMATCH;\n\n    /* save result */\n    seq->litLength = litLength;\n    seq->offset = offset;\n    seq->matchLength = matchLength;\n    seqState->dumps = dumps;\n\n#if 0   /* debug */\n    {\n        static U64 totalDecoded = 0;\n        printf(\"pos %6u : %3u literals & match %3u bytes at distance %6u \\n\",\n           (U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset);\n        totalDecoded += litLength + matchLength;\n    }\n#endif\n}\n\n\nstatic size_t ZSTDv05_execSequence(BYTE* op,\n                                BYTE* const oend, seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)\n{\n    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */\n    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */\n    BYTE* const oLitEnd = op + sequence.litLength;\n    const size_t sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_8 = oend-8;\n    const BYTE* const litEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n\n    /* check */\n    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */\n    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */\n\n    /* copy Literals */\n    ZSTDv05_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = litEnd;   /* update for next sequence */\n\n    /* copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - base)) {\n        /* offset beyond prefix */\n        if (sequence.offset > (size_t)(oLitEnd - vBase))\n            return ERROR(corruption_detected);\n        match = dictEnd - (base-match);\n        if (match + sequence.matchLength <= dictEnd) {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {\n            size_t length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = base;\n            if (op > oend_8 || sequence.matchLength < MINMATCH) {\n              while (op < oMatchEnd) *op++ = *match++;\n              return sequenceLength;\n            }\n    }   }\n    /* Requirement: op <= oend_8 */\n\n    /* match within prefix */\n    if (sequence.offset < 8) {\n        /* close range match, overlap */\n        const int sub2 = dec64table[sequence.offset];\n        op[0] = match[0];\n        op[1] = match[1];\n        op[2] = match[2];\n        op[3] = match[3];\n        match += dec32table[sequence.offset];\n        ZSTDv05_copy4(op+4, match);\n        match -= sub2;\n    } else {\n        ZSTDv05_copy8(op, match);\n    }\n    op += 8; match += 8;\n\n    if (oMatchEnd > oend-(16-MINMATCH)) {\n        if (op < oend_8) {\n            ZSTDv05_wildcopy(op, match, oend_8 - op);\n            match += oend_8 - op;\n            op = oend_8;\n        }\n        while (op < oMatchEnd)\n            *op++ = *match++;\n    } else {\n        ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n    }\n    return sequenceLength;\n}\n\n\nstatic size_t ZSTDv05_decompressSequences(\n                               ZSTDv05_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t errorCode, dumpsLength=0;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    int nbSeq=0;\n    const BYTE* dumps = NULL;\n    unsigned* DTableLL = dctx->LLTable;\n    unsigned* DTableML = dctx->MLTable;\n    unsigned* DTableOffb = dctx->OffTable;\n    const BYTE* const base = (const BYTE*) (dctx->base);\n    const BYTE* const vBase = (const BYTE*) (dctx->vBase);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n\n    /* Build Decoding Tables */\n    errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,\n                                      DTableLL, DTableML, DTableOffb,\n                                      ip, seqSize, dctx->flagStaticTables);\n    if (ZSTDv05_isError(errorCode)) return errorCode;\n    ip += errorCode;\n\n    /* Regen sequences */\n    if (nbSeq) {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        sequence.offset = REPCODE_STARTVALUE;\n        seqState.dumps = dumps;\n        seqState.dumpsEnd = dumps + dumpsLength;\n        seqState.prevOffset = REPCODE_STARTVALUE;\n        errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip);\n        if (ERR_isError(errorCode)) return ERROR(corruption_detected);\n        FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) {\n            size_t oneSeqSize;\n            nbSeq--;\n            ZSTDv05_decodeSequence(&sequence, &seqState);\n            oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);\n            if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;\n            op += oneSeqSize;\n        }\n\n        /* check if reached exact end */\n        if (nbSeq) return ERROR(corruption_detected);\n    }\n\n    /* last literal segment */\n    {\n        size_t lastLLSize = litEnd - litPtr;\n        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */\n        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n        memcpy(op, litPtr, lastLLSize);\n        op += lastLLSize;\n    }\n\n    return op-ostart;\n}\n\n\nstatic void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst)\n{\n    if (dst != dctx->previousDstEnd) {   /* not contiguous */\n        dctx->dictEnd = dctx->previousDstEnd;\n        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n        dctx->base = dst;\n        dctx->previousDstEnd = dst;\n    }\n}\n\n\nstatic size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{   /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n    size_t litCSize;\n\n    if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong);\n\n    /* Decode literals sub-block */\n    litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize);\n    if (ZSTDv05_isError(litCSize)) return litCSize;\n    ip += litCSize;\n    srcSize -= litCSize;\n\n    return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);\n}\n\n\nsize_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{\n    ZSTDv05_checkContinuity(dctx, dst);\n    return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\n/*! ZSTDv05_decompress_continueDCtx\n*   dctx must have been properly initialized */\nstatic size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                                 const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + maxDstSize;\n    size_t remainingSize = srcSize;\n    blockProperties_t blockProperties;\n    memset(&blockProperties, 0, sizeof(blockProperties));\n\n    /* Frame Header */\n    {   size_t frameHeaderSize;\n        if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);\n        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);\n        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;\n        if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize);\n        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t decodedSize=0;\n        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTDv05_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTDv05_blockHeaderSize;\n        remainingSize -= ZSTDv05_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        if (ZSTDv05_isError(decodedSize)) return decodedSize;\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\n\nsize_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx,\n                                         void* dst, size_t maxDstSize,\n                                   const void* src, size_t srcSize)\n{\n    ZSTDv05_copyDCtx(dctx, refDCtx);\n    ZSTDv05_checkContinuity(dctx, dst);\n    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);\n}\n\n\nsize_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,\n                                 void* dst, size_t maxDstSize,\n                                 const void* src, size_t srcSize,\n                                 const void* dict, size_t dictSize)\n{\n    ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize);\n    ZSTDv05_checkContinuity(dctx, dst);\n    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);\n}\n\n\nsize_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);\n}\n\nsize_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n#if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1)\n    size_t regenSize;\n    ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx();\n    if (dctx==NULL) return ERROR(memory_allocation);\n    regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);\n    ZSTDv05_freeDCtx(dctx);\n    return regenSize;\n#else\n    ZSTDv05_DCtx dctx;\n    return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);\n#endif\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    blockProperties_t blockProperties;\n\n    /* Frame Header */\n    if (srcSize < ZSTDv05_frameHeaderSize_min) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n    if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n        return;\n    }\n    ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;\n\n    /* Loop on each block */\n    while (1)\n    {\n        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTDv05_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTDv05_blockHeaderSize;\n        remainingSize -= ZSTDv05_blockHeaderSize;\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * BLOCKSIZE;\n}\n\n/* ******************************\n*  Streaming Decompression API\n********************************/\nsize_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nsize_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);\n    ZSTDv05_checkContinuity(dctx, dst);\n\n    /* Decompress : frame header; part 1 */\n    switch (dctx->stage)\n    {\n    case ZSTDv05ds_getFrameHeaderSize :\n        /* get frame header size */\n        if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */\n        dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);\n        if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize;\n        memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min);\n        if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */\n        dctx->expected = 0;   /* not necessary to copy more */\n        /* fallthrough */\n    case ZSTDv05ds_decodeFrameHeader:\n        /* get frame header */\n        {   size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize);\n            if (ZSTDv05_isError(result)) return result;\n            dctx->expected = ZSTDv05_blockHeaderSize;\n            dctx->stage = ZSTDv05ds_decodeBlockHeader;\n            return 0;\n        }\n    case ZSTDv05ds_decodeBlockHeader:\n        {\n            /* Decode block header */\n            blockProperties_t bp;\n            size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp);\n            if (ZSTDv05_isError(blockSize)) return blockSize;\n            if (bp.blockType == bt_end) {\n                dctx->expected = 0;\n                dctx->stage = ZSTDv05ds_getFrameHeaderSize;\n            }\n            else {\n                dctx->expected = blockSize;\n                dctx->bType = bp.blockType;\n                dctx->stage = ZSTDv05ds_decompressBlock;\n            }\n            return 0;\n        }\n    case ZSTDv05ds_decompressBlock:\n        {\n            /* Decompress : block content */\n            size_t rSize;\n            switch(dctx->bType)\n            {\n            case bt_compressed:\n                rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize);\n                break;\n            case bt_raw :\n                rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize);\n                break;\n            case bt_rle :\n                return ERROR(GENERIC);   /* not yet handled */\n                break;\n            case bt_end :   /* should never happen (filtered at phase 1) */\n                rSize = 0;\n                break;\n            default:\n                return ERROR(GENERIC);   /* impossible */\n            }\n            dctx->stage = ZSTDv05ds_decodeBlockHeader;\n            dctx->expected = ZSTDv05_blockHeaderSize;\n            dctx->previousDstEnd = (char*)dst + rSize;\n            return rSize;\n        }\n    default:\n        return ERROR(GENERIC);   /* impossible */\n    }\n}\n\n\nstatic void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    dctx->dictEnd = dctx->previousDstEnd;\n    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n    dctx->base = dict;\n    dctx->previousDstEnd = (const char*)dict + dictSize;\n}\n\nstatic size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;\n    short offcodeNCount[MaxOff+1];\n    unsigned offcodeMaxValue=MaxOff, offcodeLog;\n    short matchlengthNCount[MaxML+1];\n    unsigned matchlengthMaxValue = MaxML, matchlengthLog;\n    short litlengthNCount[MaxLL+1];\n    unsigned litlengthMaxValue = MaxLL, litlengthLog;\n\n    hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize);\n    if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted);\n    dict = (const char*)dict + hSize;\n    dictSize -= hSize;\n\n    offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);\n    if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);\n    if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted);\n    errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);\n    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);\n    dict = (const char*)dict + offcodeHeaderSize;\n    dictSize -= offcodeHeaderSize;\n\n    matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);\n    if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);\n    if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted);\n    errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);\n    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);\n    dict = (const char*)dict + matchlengthHeaderSize;\n    dictSize -= matchlengthHeaderSize;\n\n    litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);\n    if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted);\n    if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);\n    errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);\n    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);\n\n    dctx->flagStaticTables = 1;\n    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;\n}\n\nstatic size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    size_t eSize;\n    U32 magic = MEM_readLE32(dict);\n    if (magic != ZSTDv05_DICT_MAGIC) {\n        /* pure content mode */\n        ZSTDv05_refDictContent(dctx, dict, dictSize);\n        return 0;\n    }\n    /* load entropy tables */\n    dict = (const char*)dict + 4;\n    dictSize -= 4;\n    eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize);\n    if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted);\n\n    /* reference dictionary content */\n    dict = (const char*)dict + eSize;\n    dictSize -= eSize;\n    ZSTDv05_refDictContent(dctx, dict, dictSize);\n\n    return 0;\n}\n\n\nsize_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    size_t errorCode;\n    errorCode = ZSTDv05_decompressBegin(dctx);\n    if (ZSTDv05_isError(errorCode)) return errorCode;\n\n    if (dict && dictSize) {\n        errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize);\n        if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted);\n    }\n\n    return 0;\n}\n\n/*\n    Buffered version of Zstd compression library\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd source repository : https://github.com/Cyan4973/zstd\n    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c\n*/\n\n/* The objects defined into this file should be considered experimental.\n * They are not labelled stable, as their prototype may change in the future.\n * You can use them for tests, provide feedback, or if you can endure risk of future changes.\n */\n\n\n\n/* *************************************\n*  Constants\n***************************************/\nstatic size_t ZBUFFv05_blockHeaderSize = 3;\n\n\n\n/* *** Compression *** */\n\nstatic size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)\n{\n    size_t length = MIN(maxDstSize, srcSize);\n    if (length > 0) {\n        memcpy(dst, src, length);\n    }\n    return length;\n}\n\n\n\n\n/** ************************************************\n*  Streaming decompression\n*\n*  A ZBUFFv05_DCtx object is required to track streaming operation.\n*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.\n*  Use ZBUFFv05_decompressInit() to start a new decompression operation.\n*  ZBUFFv05_DCtx objects can be reused multiple times.\n*\n*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *maxDstSizePtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.\n*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .\n*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)\n*            or 0 when a frame is completely decoded\n*            or an error code, which can be tested using ZBUFFv05_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory)\n*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.\n*  input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* **************************************************/\n\ntypedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader,\n               ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage;\n\n/* *** Resource management *** */\n\n#define ZSTDv05_frameHeaderSize_max 5   /* too magical, should come from reference */\nstruct ZBUFFv05_DCtx_s {\n    ZSTDv05_DCtx* zc;\n    ZSTDv05_parameters params;\n    char* inBuff;\n    size_t inBuffSize;\n    size_t inPos;\n    char* outBuff;\n    size_t outBuffSize;\n    size_t outStart;\n    size_t outEnd;\n    size_t hPos;\n    ZBUFFv05_dStage stage;\n    unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max];\n};   /* typedef'd to ZBUFFv05_DCtx within \"zstd_buffered.h\" */\n\n\nZBUFFv05_DCtx* ZBUFFv05_createDCtx(void)\n{\n    ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx));\n    if (zbc==NULL) return NULL;\n    memset(zbc, 0, sizeof(*zbc));\n    zbc->zc = ZSTDv05_createDCtx();\n    zbc->stage = ZBUFFv05ds_init;\n    return zbc;\n}\n\nsize_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc)\n{\n    if (zbc==NULL) return 0;   /* support free on null */\n    ZSTDv05_freeDCtx(zbc->zc);\n    free(zbc->inBuff);\n    free(zbc->outBuff);\n    free(zbc);\n    return 0;\n}\n\n\n/* *** Initialization *** */\n\nsize_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize)\n{\n    zbc->stage = ZBUFFv05ds_readHeader;\n    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0;\n    return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize);\n}\n\nsize_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc)\n{\n    return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0);\n}\n\n\n/* *** Decompression *** */\n\nsize_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)\n{\n    const char* const istart = (const char*)src;\n    const char* ip = istart;\n    const char* const iend = istart + *srcSizePtr;\n    char* const ostart = (char*)dst;\n    char* op = ostart;\n    char* const oend = ostart + *maxDstSizePtr;\n    U32 notDone = 1;\n\n    while (notDone) {\n        switch(zbc->stage)\n        {\n        case ZBUFFv05ds_init :\n            return ERROR(init_missing);\n\n        case ZBUFFv05ds_readHeader :\n            /* read header from src */\n            {\n                size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr);\n                if (ZSTDv05_isError(headerSize)) return headerSize;\n                if (headerSize) {\n                    /* not enough input to decode header : tell how many bytes would be necessary */\n                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);\n                    zbc->hPos += *srcSizePtr;\n                    *maxDstSizePtr = 0;\n                    zbc->stage = ZBUFFv05ds_loadHeader;\n                    return headerSize - zbc->hPos;\n                }\n                zbc->stage = ZBUFFv05ds_decodeHeader;\n                break;\n            }\n\t    /* fall-through */\n        case ZBUFFv05ds_loadHeader:\n            /* complete header from src */\n            {\n                size_t headerSize = ZBUFFv05_limitCopy(\n                    zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos,\n                    src, *srcSizePtr);\n                zbc->hPos += headerSize;\n                ip += headerSize;\n                headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);\n                if (ZSTDv05_isError(headerSize)) return headerSize;\n                if (headerSize) {\n                    /* not enough input to decode header : tell how many bytes would be necessary */\n                    *maxDstSizePtr = 0;\n                    return headerSize - zbc->hPos;\n                }\n                // zbc->stage = ZBUFFv05ds_decodeHeader; break;   /* useless : stage follows */\n            }\n\t    /* fall-through */\n        case ZBUFFv05ds_decodeHeader:\n                /* apply header to create / resize buffers */\n                {\n                    size_t neededOutSize = (size_t)1 << zbc->params.windowLog;\n                    size_t neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */\n                    if (zbc->inBuffSize < neededInSize) {\n                        free(zbc->inBuff);\n                        zbc->inBuffSize = neededInSize;\n                        zbc->inBuff = (char*)malloc(neededInSize);\n                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);\n                    }\n                    if (zbc->outBuffSize < neededOutSize) {\n                        free(zbc->outBuff);\n                        zbc->outBuffSize = neededOutSize;\n                        zbc->outBuff = (char*)malloc(neededOutSize);\n                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);\n                }   }\n                if (zbc->hPos) {\n                    /* some data already loaded into headerBuffer : transfer into inBuff */\n                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);\n                    zbc->inPos = zbc->hPos;\n                    zbc->hPos = 0;\n                    zbc->stage = ZBUFFv05ds_load;\n                    break;\n                }\n                zbc->stage = ZBUFFv05ds_read;\n\t\t/* fall-through */\n        case ZBUFFv05ds_read:\n            {\n                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);\n                if (neededInSize==0) {  /* end of frame */\n                    zbc->stage = ZBUFFv05ds_init;\n                    notDone = 0;\n                    break;\n                }\n                if ((size_t)(iend-ip) >= neededInSize) {\n                    /* directly decode from src */\n                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,\n                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,\n                        ip, neededInSize);\n                    if (ZSTDv05_isError(decodedSize)) return decodedSize;\n                    ip += neededInSize;\n                    if (!decodedSize) break;   /* this was just a header */\n                    zbc->outEnd = zbc->outStart +  decodedSize;\n                    zbc->stage = ZBUFFv05ds_flush;\n                    break;\n                }\n                if (ip==iend) { notDone = 0; break; }   /* no more input */\n                zbc->stage = ZBUFFv05ds_load;\n            }\n\t    /* fall-through */\n        case ZBUFFv05ds_load:\n            {\n                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);\n                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */\n                size_t loadedSize;\n                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */\n                loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);\n                ip += loadedSize;\n                zbc->inPos += loadedSize;\n                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */\n                {\n                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,\n                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,\n                        zbc->inBuff, neededInSize);\n                    if (ZSTDv05_isError(decodedSize)) return decodedSize;\n                    zbc->inPos = 0;   /* input is consumed */\n                    if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; }   /* this was just a header */\n                    zbc->outEnd = zbc->outStart +  decodedSize;\n                    zbc->stage = ZBUFFv05ds_flush;\n                    // break; /* ZBUFFv05ds_flush follows */\n                }\n\t    }\n\t    /* fall-through */\n        case ZBUFFv05ds_flush:\n            {\n                size_t toFlushSize = zbc->outEnd - zbc->outStart;\n                size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);\n                op += flushedSize;\n                zbc->outStart += flushedSize;\n                if (flushedSize == toFlushSize) {\n                    zbc->stage = ZBUFFv05ds_read;\n                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)\n                        zbc->outStart = zbc->outEnd = 0;\n                    break;\n                }\n                /* cannot flush everything */\n                notDone = 0;\n                break;\n            }\n        default: return ERROR(GENERIC);   /* impossible */\n    }   }\n\n    *srcSizePtr = ip-istart;\n    *maxDstSizePtr = op-ostart;\n\n    {   size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);\n        if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize;   /* get next block header too */\n        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/\n        return nextSrcSizeHint;\n    }\n}\n\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nunsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); }\nconst char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n\nsize_t ZBUFFv05_recommendedDInSize(void)  { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; }\nsize_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; }\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v05.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTDv05_H\n#define ZSTDv05_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*-*************************************\n*  Dependencies\n***************************************/\n#include <stddef.h>   /* size_t */\n#include \"mem.h\"      /* U64, U32 */\n\n\n/* *************************************\n*  Simple functions\n***************************************/\n/*! ZSTDv05_decompress() :\n    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.\n    `dstCapacity` must be large enough, equal or larger than originalSize.\n    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n              or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */\nsize_t ZSTDv05_decompress( void* dst, size_t dstCapacity,\n                     const void* src, size_t compressedSize);\n\n /**\n ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format\n     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n     cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                 or an error code if it fails (which can be tested using ZSTDv01_isError())\n     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n */\nvoid ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                     size_t* cSize, unsigned long long* dBound);\n\n/* *************************************\n*  Helper functions\n***************************************/\n/* Error Management */\nunsigned    ZSTDv05_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */\nconst char* ZSTDv05_getErrorName(size_t code);     /*!< provides readable string for an error code */\n\n\n/* *************************************\n*  Explicit memory management\n***************************************/\n/** Decompression context */\ntypedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;\nZSTDv05_DCtx* ZSTDv05_createDCtx(void);\nsize_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx);      /*!< @return : errorCode */\n\n/** ZSTDv05_decompressDCtx() :\n*   Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */\nsize_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n/*-***********************\n*  Simple Dictionary API\n*************************/\n/*! ZSTDv05_decompress_usingDict() :\n*   Decompression using a pre-defined Dictionary content (see dictBuilder).\n*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.\n*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */\nsize_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,\n                                            void* dst, size_t dstCapacity,\n                                      const void* src, size_t srcSize,\n                                      const void* dict,size_t dictSize);\n\n/*-************************\n*  Advanced Streaming API\n***************************/\ntypedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;\ntypedef struct {\n    U64 srcSize;\n    U32 windowLog;     /* the only useful information to retrieve */\n    U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;\n} ZSTDv05_parameters;\nsize_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);\n\nsize_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);\nvoid   ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);\nsize_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);\nsize_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n/*-***********************\n*  ZBUFF API\n*************************/\ntypedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;\nZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);\nsize_t         ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);\n\nsize_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);\nsize_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);\n\nsize_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,\n                                            void* dst, size_t* dstCapacityPtr,\n                                      const void* src, size_t* srcSizePtr);\n\n/*-***************************************************************************\n*  Streaming decompression\n*\n*  A ZBUFFv05_DCtx object is required to track streaming operations.\n*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.\n*  Use ZBUFFv05_decompressInit() to start a new decompression operation,\n*   or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFFv05_DCtx objects can be reused multiple times.\n*\n*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)\n*            or 0 when a frame is completely decoded\n*            or an error code, which can be tested using ZBUFFv05_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()\n*  output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nunsigned ZBUFFv05_isError(size_t errorCode);\nconst char* ZBUFFv05_getErrorName(size_t errorCode);\n\n/** Functions below provide recommended buffer sizes for Compression or Decompression operations.\n*   These sizes are just hints, and tend to offer better latency */\nsize_t ZBUFFv05_recommendedDInSize(void);\nsize_t ZBUFFv05_recommendedDOutSize(void);\n\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define ZSTDv05_MAGICNUMBER 0xFD2FB525   /* v0.5 */\n\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* ZSTDv0505_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v06.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/*- Dependencies -*/\n#include \"zstd_v06.h\"\n#include <stddef.h>    /* size_t, ptrdiff_t */\n#include <string.h>    /* memcpy */\n#include <stdlib.h>    /* malloc, free, qsort */\n#include \"error_private.h\"\n\n\n\n/* ******************************************************************\n   mem.h\n   low-level memory access routines\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*-****************************************\n*  Compiler specifics\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/*-**************************************************************\n*  Basic Types\n*****************************************************************/\n#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/*-**************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets depending on alignment.\n *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard, by lying on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n\n#endif /* MEM_FORCE_MEMORY_ACCESS */\n\nMEM_STATIC U32 MEM_swap32(U32 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_ulong(in);\n#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)\n    return __builtin_bswap32(in);\n#else\n    return  ((in << 24) & 0xff000000 ) |\n            ((in <<  8) & 0x00ff0000 ) |\n            ((in >>  8) & 0x0000ff00 ) |\n            ((in >> 24) & 0x000000ff );\n#endif\n}\n\nMEM_STATIC U64 MEM_swap64(U64 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_uint64(in);\n#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)\n    return __builtin_bswap64(in);\n#else\n    return  ((in << 56) & 0xff00000000000000ULL) |\n            ((in << 40) & 0x00ff000000000000ULL) |\n            ((in << 24) & 0x0000ff0000000000ULL) |\n            ((in << 8)  & 0x000000ff00000000ULL) |\n            ((in >> 8)  & 0x00000000ff000000ULL) |\n            ((in >> 24) & 0x0000000000ff0000ULL) |\n            ((in >> 40) & 0x000000000000ff00ULL) |\n            ((in >> 56) & 0x00000000000000ffULL);\n#endif\n}\n\n\n/*=== Little endian r/w ===*/\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian()) {\n        MEM_write16(memPtr, val);\n    } else {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n        return MEM_swap32(MEM_read32(memPtr));\n}\n\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n        return MEM_swap64(MEM_read64(memPtr));\n}\n\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n\n/*\n    zstd - standard compression library\n    Header File for static linking only\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net\n*/\n#ifndef ZSTDv06_STATIC_H\n#define ZSTDv06_STATIC_H\n\n/* The prototypes defined within this file are considered experimental.\n * They should not be used in the context DLL as they may change in the future.\n * Prefer static linking if you need them, to control breaking version changes issues.\n */\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/*- Advanced Decompression functions -*/\n\n/*! ZSTDv06_decompress_usingPreparedDCtx() :\n*   Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.\n*   It avoids reloading the dictionary each time.\n*   `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict().\n*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */\nZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx(\n                                           ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx,\n                                           void* dst, size_t dstCapacity,\n                                     const void* src, size_t srcSize);\n\n\n\n#define ZSTDv06_FRAMEHEADERSIZE_MAX 13    /* for static allocation */\nstatic const size_t ZSTDv06_frameHeaderSize_min = 5;\nstatic const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX;\n\nZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx);\n\n/*\n  Streaming decompression, direct mode (bufferless)\n\n  A ZSTDv06_DCtx object is required to track streaming operations.\n  Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it.\n  A ZSTDv06_DCtx object can be re-used multiple times.\n\n  First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input.\n  It can provide the minimum size of rolling buffer required to properly decompress data,\n  and optionally the final size of uncompressed content.\n  (Note : content size is an optional info that may not be present. 0 means : content size unknown)\n  Frame parameters are extracted from the beginning of compressed frame.\n  The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work)\n  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.\n  Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled.\n          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.\n           errorCode, which can be tested using ZSTDv06_isError()\n\n  Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict().\n  Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx().\n\n  Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively.\n  ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue().\n  ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail.\n  ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).\n  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.\n\n  @result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity)\n  It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header.\n\n  A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero.\n  Context can then be reset to start a new decompression.\n*/\n\n\n/* **************************************\n*  Block functions\n****************************************/\n/*! Block functions produce and decode raw zstd blocks, without frame metadata.\n    User will have to take in charge required information to regenerate data, such as compressed and content sizes.\n\n    A few rules to respect :\n    - Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB)\n    - Compressing or decompressing requires a context structure\n      + Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx()\n    - It is necessary to init context before starting\n      + compression : ZSTDv06_compressBegin()\n      + decompression : ZSTDv06_decompressBegin()\n      + variants _usingDict() are also allowed\n      + copyCCtx() and copyDCtx() work too\n    - When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero.\n      In which case, nothing is produced into `dst`.\n      + User must test for such outcome and deal directly with uncompressed data\n      + ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !!\n*/\n\n#define ZSTDv06_BLOCKSIZE_MAX (128 * 1024)   /* define, for static allocation */\nZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* ZSTDv06_STATIC_H */\n/*\n    zstd_internal - common functions to include\n    Header File for include\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : https://www.zstd.net\n*/\n#ifndef ZSTDv06_CCOMMON_H_MODULE\n#define ZSTDv06_CCOMMON_H_MODULE\n\n\n/*-*************************************\n*  Common macros\n***************************************/\n#define MIN(a,b) ((a)<(b) ? (a) : (b))\n#define MAX(a,b) ((a)>(b) ? (a) : (b))\n\n\n/*-*************************************\n*  Common constants\n***************************************/\n#define ZSTDv06_DICT_MAGIC  0xEC30A436\n\n#define ZSTDv06_REP_NUM    3\n#define ZSTDv06_REP_INIT   ZSTDv06_REP_NUM\n#define ZSTDv06_REP_MOVE   (ZSTDv06_REP_NUM-1)\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12\nstatic const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 };\n\n#define ZSTDv06_BLOCKHEADERSIZE 3   /* because C standard does not allow a static const value to be defined using another static const value .... :( */\nstatic const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE;\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\n#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */\n#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */\n\n#define HufLog 12\n\n#define IS_HUF 0\n#define IS_PCH 1\n#define IS_RAW 2\n#define IS_RLE 3\n\n#define LONGNBSEQ 0x7F00\n\n#define MINMATCH 3\n#define EQUAL_READ32 4\n#define REPCODE_STARTVALUE 1\n\n#define Litbits  8\n#define MaxLit ((1<<Litbits) - 1)\n#define MaxML  52\n#define MaxLL  35\n#define MaxOff 28\n#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */\n#define MLFSELog    9\n#define LLFSELog    9\n#define OffFSELog   8\n\n#define FSEv06_ENCODING_RAW     0\n#define FSEv06_ENCODING_RLE     1\n#define FSEv06_ENCODING_STATIC  2\n#define FSEv06_ENCODING_DYNAMIC 3\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\nstatic const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,\n                                     13,14,15,16 };\nstatic const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,\n                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,\n                                            -1,-1,-1,-1 };\nstatic const U32 LL_defaultNormLog = 6;\n\nstatic const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,\n                                     12,13,14,15,16 };\nstatic const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,\n                                            -1,-1,-1,-1,-1 };\nstatic const U32 ML_defaultNormLog = 6;\n\nstatic const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };\nstatic const U32 OF_defaultNormLog = 5;\n\n\n/*-*******************************************\n*  Shared functions to include for inlining\n*********************************************/\nstatic void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n#define COPY8(d,s) { ZSTDv06_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTDv06_wildcopy() :\n*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */\n#define WILDCOPY_OVERLENGTH 8\nMEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do\n        COPY8(op, ip)\n    while (op < oend);\n}\n\n\n\n/*-*******************************************\n*  Private interfaces\n*********************************************/\ntypedef struct {\n    U32 off;\n    U32 len;\n} ZSTDv06_match_t;\n\ntypedef struct {\n    U32 price;\n    U32 off;\n    U32 mlen;\n    U32 litlen;\n    U32 rep[ZSTDv06_REP_INIT];\n} ZSTDv06_optimal_t;\n\ntypedef struct { U32  unused; } ZSTDv06_stats_t;\n\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* litStart;\n    BYTE* lit;\n    U16*  litLengthStart;\n    U16*  litLength;\n    BYTE* llCodeStart;\n    U16*  matchLengthStart;\n    U16*  matchLength;\n    BYTE* mlCodeStart;\n    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */\n    U32   longLengthPos;\n    /* opt */\n    ZSTDv06_optimal_t* priceTable;\n    ZSTDv06_match_t* matchTable;\n    U32* matchLengthFreq;\n    U32* litLengthFreq;\n    U32* litFreq;\n    U32* offCodeFreq;\n    U32  matchLengthSum;\n    U32  matchSum;\n    U32  litLengthSum;\n    U32  litSum;\n    U32  offCodeSum;\n    U32  log2matchLengthSum;\n    U32  log2matchSum;\n    U32  log2litLengthSum;\n    U32  log2litSum;\n    U32  log2offCodeSum;\n    U32  factor;\n    U32  cachedPrice;\n    U32  cachedLitLength;\n    const BYTE* cachedLiterals;\n    ZSTDv06_stats_t stats;\n} seqStore_t;\n\nvoid ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);\n\n\n#endif   /* ZSTDv06_CCOMMON_H_MODULE */\n/* ******************************************************************\n   FSE : Finite State Entropy codec\n   Public Prototypes declaration\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef FSEv06_H\n#define FSEv06_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/*-****************************************\n*  FSE simple functions\n******************************************/\n/*! FSEv06_decompress():\n    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstCapacity'.\n    @return : size of regenerated data (<= maxDstSize),\n              or an error code, which can be tested using FSEv06_isError() .\n\n    ** Important ** : FSEv06_decompress() does not decompress non-compressible nor RLE data !!!\n    Why ? : making this distinction requires a header.\n    Header management is intentionally delegated to the user layer, which can better manage special cases.\n*/\nsize_t FSEv06_decompress(void* dst,  size_t dstCapacity,\n                const void* cSrc, size_t cSrcSize);\n\n\n/*-*****************************************\n*  Tool functions\n******************************************/\nsize_t FSEv06_compressBound(size_t size);       /* maximum compressed size */\n\n/* Error Management */\nunsigned    FSEv06_isError(size_t code);        /* tells if a return value is an error code */\nconst char* FSEv06_getErrorName(size_t code);   /* provides error code string (useful for debugging) */\n\n\n\n/*-*****************************************\n*  FSE detailed API\n******************************************/\n/*!\n\nFSEv06_decompress() does the following:\n1. read normalized counters with readNCount()\n2. build decoding table 'DTable' from normalized counters\n3. decode the data stream using decoding table 'DTable'\n\nThe following API allows targeting specific sub-functions for advanced tasks.\nFor example, it's possible to compress several blocks using the same 'CTable',\nor to save and provide normalized distribution using external method.\n*/\n\n\n/* *** DECOMPRESSION *** */\n\n/*! FSEv06_readNCount():\n    Read compactly saved 'normalizedCounter' from 'rBuffer'.\n    @return : size read from 'rBuffer',\n              or an errorCode, which can be tested using FSEv06_isError().\n              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */\nsize_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);\n\n/*! Constructor and Destructor of FSEv06_DTable.\n    Note that its size depends on 'tableLog' */\ntypedef unsigned FSEv06_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\nFSEv06_DTable* FSEv06_createDTable(unsigned tableLog);\nvoid        FSEv06_freeDTable(FSEv06_DTable* dt);\n\n/*! FSEv06_buildDTable():\n    Builds 'dt', which must be already allocated, using FSEv06_createDTable().\n    return : 0, or an errorCode, which can be tested using FSEv06_isError() */\nsize_t FSEv06_buildDTable (FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*! FSEv06_decompress_usingDTable():\n    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`\n    into `dst` which must be already allocated.\n    @return : size of regenerated data (necessarily <= `dstCapacity`),\n              or an errorCode, which can be tested using FSEv06_isError() */\nsize_t FSEv06_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt);\n\n/*!\nTutorial :\n----------\n(Note : these functions only decompress FSE-compressed blocks.\n If block is uncompressed, use memcpy() instead\n If block is a single repeated byte, use memset() instead )\n\nThe first step is to obtain the normalized frequencies of symbols.\nThis can be performed by FSEv06_readNCount() if it was saved using FSEv06_writeNCount().\n'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.\nIn practice, that means it's necessary to know 'maxSymbolValue' beforehand,\nor size the table to handle worst case situations (typically 256).\nFSEv06_readNCount() will provide 'tableLog' and 'maxSymbolValue'.\nThe result of FSEv06_readNCount() is the number of bytes read from 'rBuffer'.\nNote that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.\nIf there is an error, the function will return an error code, which can be tested using FSEv06_isError().\n\nThe next step is to build the decompression tables 'FSEv06_DTable' from 'normalizedCounter'.\nThis is performed by the function FSEv06_buildDTable().\nThe space required by 'FSEv06_DTable' must be already allocated using FSEv06_createDTable().\nIf there is an error, the function will return an error code, which can be tested using FSEv06_isError().\n\n`FSEv06_DTable` can then be used to decompress `cSrc`, with FSEv06_decompress_usingDTable().\n`cSrcSize` must be strictly correct, otherwise decompression will fail.\nFSEv06_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).\nIf there is an error, the function will return an error code, which can be tested using FSEv06_isError(). (ex: dst buffer too small)\n*/\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSEv06_H */\n/* ******************************************************************\n   bitstream\n   Part of FSE library\n   header file (to include)\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which must be inlined for best performance.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n\n/*=========================================\n*  Target specific\n=========================================*/\n#if defined(__BMI__) && defined(__GNUC__)\n#  include <immintrin.h>   /* support for bextr (experimental) */\n#endif\n\n\n\n/*-********************************************\n*  bitStream decoding API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BITv06_DStream_t;\n\ntypedef enum { BITv06_DStream_unfinished = 0,\n               BITv06_DStream_endOfBuffer = 1,\n               BITv06_DStream_completed = 2,\n               BITv06_DStream_overflow = 3 } BITv06_DStream_status;  /* result of BITv06_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD);\nMEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD);\n\n\n\n/*-****************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/*-**************************************************************\n*  Internal functions\n****************************************************************/\nMEM_STATIC unsigned BITv06_highbit32 ( U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    unsigned r;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n    return r;\n#   endif\n}\n\n\n\n/*-********************************************************\n* bitStream decoding\n**********************************************************/\n/*! BITv06_initDStream() :\n*   Initialize a BITv06_DStream_t.\n*   `bitD` : a pointer to an already allocated BITv06_DStream_t structure.\n*   `srcSize` must be the *exact* size of the bitStream, in bytes.\n*   @return : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */\n          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }\n    } else {\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */\n            default: break;\n        }\n        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */\n          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }\n        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\n\n MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits)\n{\n    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BITv06_lookBitsFast() :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits)\n{\n    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits)\n{\n    size_t const value = BITv06_lookBits(bitD, nbBits);\n    BITv06_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*! BITv06_readBitsFast() :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)\n{\n    size_t const value = BITv06_lookBitsFast(bitD, nbBits);\n    BITv06_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */\n        return BITv06_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BITv06_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start) {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer;\n        return BITv06_DStream_completed;\n    }\n    {   U32 nbBytes = bitD->bitsConsumed >> 3;\n        BITv06_DStream_status result = BITv06_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start) {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BITv06_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BITv06_endOfDStream() :\n*   @return Tells if DStream has exactly reached its end (all bits consumed).\n*/\nMEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n/* ******************************************************************\n   FSE : Finite State Entropy coder\n   header file for static linking (only)\n   Copyright (C) 2013-2015, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n   - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef FSEv06_STATIC_H\n#define FSEv06_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* *****************************************\n*  Static allocation\n*******************************************/\n/* FSE buffer bounds */\n#define FSEv06_NCOUNTBOUND 512\n#define FSEv06_BLOCKBOUND(size) (size + (size>>7))\n#define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */\n#define FSEv06_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/* *****************************************\n*  FSE advanced API\n*******************************************/\nsize_t FSEv06_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);\n/* same as FSEv06_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */\n\nsize_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits);\n/* build a fake FSEv06_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nsize_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, unsigned char symbolValue);\n/* build a fake FSEv06_DTable, designed to always generate the same symbolValue */\n\n\n/* *****************************************\n*  FSE symbol decompression API\n*******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSEv06_DState_t;\n\n\nstatic void     FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt);\n\nstatic unsigned char FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);\n\n\n/* *****************************************\n*  FSE unsafe API\n*******************************************/\nstatic unsigned char FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/* *****************************************\n*  Implementation of inlined functions\n*******************************************/\n\n\n/* ======    Decompression    ====== */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSEv06_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSEv06_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr;\n    DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog);\n    BITv06_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr)\n{\n    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    return DInfo.symbol;\n}\n\nMEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)\n{\n    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    size_t const lowBits = BITv06_readBits(bitD, nbBits);\n    DStatePtr->state = DInfo.newState + lowBits;\n}\n\nMEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)\n{\n    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BITv06_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n/*! FSEv06_decodeSymbolFast() :\n    unsafe, only works if no symbol has a probability > 50% */\nMEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)\n{\n    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BITv06_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n\n\n#ifndef FSEv06_COMMONDEFS_ONLY\n\n/* **************************************************************\n*  Tuning parameters\n****************************************************************/\n/*!MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSEv06_MAX_MEMORY_USAGE 14\n#define FSEv06_DEFAULT_MEMORY_USAGE 13\n\n/*!FSEv06_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSEv06_MAX_SYMBOL_VALUE 255\n\n\n/* **************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSEv06_FUNCTION_TYPE BYTE\n#define FSEv06_FUNCTION_EXTENSION\n#define FSEv06_DECODE_TYPE FSEv06_decode_t\n\n\n#endif   /* !FSEv06_COMMONDEFS_ONLY */\n\n\n/* ***************************************************************\n*  Constants\n*****************************************************************/\n#define FSEv06_MAX_TABLELOG  (FSEv06_MAX_MEMORY_USAGE-2)\n#define FSEv06_MAX_TABLESIZE (1U<<FSEv06_MAX_TABLELOG)\n#define FSEv06_MAXTABLESIZE_MASK (FSEv06_MAX_TABLESIZE-1)\n#define FSEv06_DEFAULT_TABLELOG (FSEv06_DEFAULT_MEMORY_USAGE-2)\n#define FSEv06_MIN_TABLELOG 5\n\n#define FSEv06_TABLELOG_ABSOLUTE_MAX 15\n#if FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX\n#error \"FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n#define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSEv06_STATIC_H */\n/*\n   Common functions of New Generation Entropy library\n   Copyright (C) 2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n*************************************************************************** */\n\n\n/*-****************************************\n*  FSE Error Management\n******************************************/\nunsigned FSEv06_isError(size_t code) { return ERR_isError(code); }\n\nconst char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/* **************************************************************\n*  HUF Error Management\n****************************************************************/\nstatic unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }\n\n\n/*-**************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nstatic short FSEv06_abs(short a) { return a<0 ? -a : a; }\n\nsize_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr)) {\n        if (previous0) {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF) {\n                n0+=24;\n                if (ip < iend-5) {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                } else {\n                    bitStream >>= 16;\n                    bitCount+=16;\n            }   }\n            while ((bitStream & 3) == 3) {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {   short const max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max) {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            } else {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSEv06_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold) {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n            } else {\n                bitCount -= (int)(8 * (iend - 4 - ip));\n                ip = iend - 4;\n            }\n            bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n/* ******************************************************************\n   FSE : Finite State Entropy decoder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSEv06_isError ERR_isError\n#define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/* **************************************************************\n*  Complex types\n****************************************************************/\ntypedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)];\n\n\n/* **************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSEv06_FUNCTION_EXTENSION\n#  error \"FSEv06_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSEv06_FUNCTION_TYPE\n#  error \"FSEv06_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSEv06_CAT(X,Y) X##Y\n#define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y)\n#define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y)\n\n\n/* Function templates */\nFSEv06_DTable* FSEv06_createDTable (unsigned tableLog)\n{\n    if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX;\n    return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );\n}\n\nvoid FSEv06_freeDTable (FSEv06_DTable* dt)\n{\n    free(dt);\n}\n\nsize_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */\n    FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr);\n    U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1];\n\n    U32 const maxSV1 = maxSymbolValue + 1;\n    U32 const tableSize = 1 << tableLog;\n    U32 highThreshold = tableSize-1;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    {   FSEv06_DTableHeader DTableH;\n        DTableH.tableLog = (U16)tableLog;\n        DTableH.fastMode = 1;\n        {   S16 const largeLimit= (S16)(1 << (tableLog-1));\n            U32 s;\n            for (s=0; s<maxSV1; s++) {\n                if (normalizedCounter[s]==-1) {\n                    tableDecode[highThreshold--].symbol = (FSEv06_FUNCTION_TYPE)s;\n                    symbolNext[s] = 1;\n                } else {\n                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;\n                    symbolNext[s] = normalizedCounter[s];\n        }   }   }\n        memcpy(dt, &DTableH, sizeof(DTableH));\n    }\n\n    /* Spread symbols */\n    {   U32 const tableMask = tableSize-1;\n        U32 const step = FSEv06_TABLESTEP(tableSize);\n        U32 s, position = 0;\n        for (s=0; s<maxSV1; s++) {\n            int i;\n            for (i=0; i<normalizedCounter[s]; i++) {\n                tableDecode[position].symbol = (FSEv06_FUNCTION_TYPE)s;\n                position = (position + step) & tableMask;\n                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }   }\n\n        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n    }\n\n    /* Build Decoding table */\n    {   U32 u;\n        for (u=0; u<tableSize; u++) {\n            FSEv06_FUNCTION_TYPE const symbol = (FSEv06_FUNCTION_TYPE)(tableDecode[u].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[u].nbBits = (BYTE) (tableLog - BITv06_highbit32 ((U32)nextState) );\n            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);\n    }   }\n\n    return 0;\n}\n\n\n\n#ifndef FSEv06_COMMONDEFS_ONLY\n\n/*-*******************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nsize_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv06_decode_t* const cell = (FSEv06_decode_t*)dPtr;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nsize_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSV1 = tableMask+1;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<maxSV1; s++) {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSEv06_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSEv06_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BITv06_DStream_t bitD;\n    FSEv06_DState_t state1;\n    FSEv06_DState_t state2;\n\n    /* Init */\n    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n      if (FSEv06_isError(errorCode)) return errorCode; }\n\n    FSEv06_initDState(&state1, &bitD, dt);\n    FSEv06_initDState(&state2, &bitD, dt);\n\n#define FSEv06_GETSYMBOL(statePtr) fast ? FSEv06_decodeSymbolFast(statePtr, &bitD) : FSEv06_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BITv06_reloadDStream(&bitD)==BITv06_DStream_unfinished) && (op<olimit) ; op+=4) {\n        op[0] = FSEv06_GETSYMBOL(&state1);\n\n        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv06_reloadDStream(&bitD);\n\n        op[1] = FSEv06_GETSYMBOL(&state2);\n\n        if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSEv06_GETSYMBOL(&state1);\n\n        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv06_reloadDStream(&bitD);\n\n        op[3] = FSEv06_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */\n    while (1) {\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n\n        *op++ = FSEv06_GETSYMBOL(&state1);\n\n        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {\n            *op++ = FSEv06_GETSYMBOL(&state2);\n            break;\n        }\n\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n\n        *op++ = FSEv06_GETSYMBOL(&state2);\n\n        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {\n            *op++ = FSEv06_GETSYMBOL(&state1);\n            break;\n    }   }\n\n    return op-ostart;\n}\n\n\nsize_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSEv06_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr;\n    const U32 fastMode = DTableH->fastMode;\n\n    /* select fast mode (static) */\n    if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nsize_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSEv06_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSE decoding mode */\n    {   size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n        if (FSEv06_isError(NCountLength)) return NCountLength;\n        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n        ip += NCountLength;\n        cSrcSize -= NCountLength;\n    }\n\n    { size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog);\n      if (FSEv06_isError(errorCode)) return errorCode; }\n\n    return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */\n}\n\n\n\n#endif   /* FSEv06_COMMONDEFS_ONLY */\n/* ******************************************************************\n   Huffman coder, part of New Generation Entropy library\n   header file\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef HUFv06_H\n#define HUFv06_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* ****************************************\n*  HUF simple functions\n******************************************/\nsize_t HUFv06_decompress(void* dst,  size_t dstSize,\n                const void* cSrc, size_t cSrcSize);\n/*\nHUFv06_decompress() :\n    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstSize'.\n    `dstSize` : must be the **exact** size of original (uncompressed) data.\n    Note : in contrast with FSE, HUFv06_decompress can regenerate\n           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,\n           because it knows size to regenerate.\n    @return : size of regenerated data (== dstSize)\n              or an error code, which can be tested using HUFv06_isError()\n*/\n\n\n/* ****************************************\n*  Tool functions\n******************************************/\nsize_t HUFv06_compressBound(size_t size);       /**< maximum compressed size */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* HUFv06_H */\n/* ******************************************************************\n   Huffman codec, part of New Generation Entropy library\n   header file, for static linking only\n   Copyright (C) 2013-2016, Yann Collet\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef HUFv06_STATIC_H\n#define HUFv06_STATIC_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* ****************************************\n*  Static allocation\n******************************************/\n/* HUF buffer bounds */\n#define HUFv06_CTABLEBOUND 129\n#define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */\n#define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */\n\n/* static allocation of HUF's DTable */\n#define HUFv06_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))\n#define HUFv06_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        unsigned short DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUFv06_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }\n#define HUFv06_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \\\n        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }\n\n\n/* ****************************************\n*  Advanced decompression functions\n******************************************/\nsize_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nsize_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */\n\n\n\n/*!\nHUFv06_decompress() does the following:\n1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics\n2. build Huffman table from save, using HUFv06_readDTableXn()\n3. decode 1 or 4 segments in parallel using HUFv06_decompressSXn_usingDTable\n*/\nsize_t HUFv06_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);\nsize_t HUFv06_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);\n\nsize_t HUFv06_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);\nsize_t HUFv06_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);\n\n\n/* single stream variants */\nsize_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nsize_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */\n\nsize_t HUFv06_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);\nsize_t HUFv06_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);\n\n\n\n/* **************************************************************\n*  Constants\n****************************************************************/\n#define HUFv06_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv06_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUFv06_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv06_ABSOLUTEMAX_TABLELOG */\n#define HUFv06_DEFAULT_TABLELOG  HUFv06_MAX_TABLELOG   /* tableLog by default, when not specified */\n#define HUFv06_MAX_SYMBOL_VALUE 255\n#if (HUFv06_MAX_TABLELOG > HUFv06_ABSOLUTEMAX_TABLELOG)\n#  error \"HUFv06_MAX_TABLELOG is too large !\"\n#endif\n\n\n\n/*! HUFv06_readStats() :\n    Read compact Huffman tree, saved by HUFv06_writeCTable().\n    `huffWeight` is destination buffer.\n    @return : size read from `src`\n*/\nMEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                            U32* nbSymbolsPtr, U32* tableLogPtr,\n                            const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  { /* special header */\n        if (iSize >= (242)) {  /* RLE */\n            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else {   /* Incompressible */\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            {   U32 n;\n                for (n=0; n<oSize; n+=2) {\n                    huffWeight[n]   = ip[n/2] >> 4;\n                    huffWeight[n+1] = ip[n/2] & 15;\n    }   }   }   }\n    else  {   /* header compressed with FSE (normal case) */\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSEv06_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));\n    weightTotal = 0;\n    {   U32 n; for (n=0; n<oSize; n++) {\n            if (huffWeight[n] >= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n            rankStats[huffWeight[n]]++;\n            weightTotal += (1 << huffWeight[n]) >> 1;\n    }   }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    {   U32 const tableLog = BITv06_highbit32(weightTotal) + 1;\n        if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);\n        *tableLogPtr = tableLog;\n        /* determine last weight */\n        {   U32 const total = 1 << tableLog;\n            U32 const rest = total - weightTotal;\n            U32 const verif = 1 << BITv06_highbit32(rest);\n            U32 const lastWeight = BITv06_highbit32(rest) + 1;\n            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n            huffWeight[oSize] = (BYTE)lastWeight;\n            rankStats[lastWeight]++;\n    }   }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    return iSize+1;\n}\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* HUFv06_STATIC_H */\n/* ******************************************************************\n   Huffman decoder, part of New Generation Entropy library\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n\n/* *******************************************************\n*  HUF : Huffman block decompression\n*********************************************************/\ntypedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2;   /* single-symbol decoding */\n\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\n\n\n/*-***************************/\n/*  single-symbol decoding   */\n/*-***************************/\n\nsize_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1];\n    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    size_t iSize;\n    U32 nbSymbols = 0;\n    U32 n;\n    U32 nextRankStart;\n    void* const dtPtr = DTable + 1;\n    HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr;\n\n    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv06_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */\n    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */\n\n    /* Prepare ranks */\n    nextRankStart = 0;\n    for (n=1; n<tableLog+1; n++) {\n        U32 current = nextRankStart;\n        nextRankStart += (rankVal[n] << (n-1));\n        rankVal[n] = current;\n    }\n\n    /* fill DTable */\n    for (n=0; n<nbSymbols; n++) {\n        const U32 w = huffWeight[n];\n        const U32 length = (1 << w) >> 1;\n        U32 i;\n        HUFv06_DEltX2 D;\n        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n        for (i = rankVal[w]; i < rankVal[w] + length; i++)\n            dt[i] = D;\n        rankVal[w] += length;\n    }\n\n    return iSize;\n}\n\n\nstatic BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog)\n{\n    const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n    const BYTE c = dt[val].byte;\n    BITv06_skipBits(Dstream, dt[val].nbBits);\n    return c;\n}\n\n#define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \\\n        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) {\n        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd))\n        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\nsize_t HUFv06_decompress1X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + dstSize;\n    const U32 dtLog = DTable[0];\n    const void* dtPtr = DTable;\n    const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1;\n    BITv06_DStream_t bitD;\n\n    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);\n      if (HUFv06_isError(errorCode)) return errorCode; }\n\n    HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog);\n\n    /* check */\n    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    return dstSize;\n}\n\nsize_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUFv06_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\nsize_t HUFv06_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U16* DTable)\n{\n    /* Check */\n    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BITv06_DStream_t bitD1;\n        BITv06_DStream_t bitD2;\n        BITv06_DStream_t bitD3;\n        BITv06_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BITv06_initDStream(&bitD1, istart1, length1);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD2, istart2, length2);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD3, istart3, length3);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD4, istart4, length4);\n        if (HUFv06_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4);\n            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv06_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);\n    if (HUFv06_isError(errorCode)) return errorCode;\n    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += errorCode;\n    cSrcSize -= errorCode;\n\n    return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n/* *************************/\n/* double-symbols decoding */\n/* *************************/\n\nstatic void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUFv06_DEltX4 DElt;\n    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1) {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }}\n}\n\ntypedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1];\n\nstatic void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++) {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        } else {\n            HUFv06_DEltX4 DElt;\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits = (BYTE)(nbBits);\n            DElt.length = 1;\n            {   U32 u;\n                const U32 end = start + length;\n                for (u = start; u < end; u++) DTable[u] = DElt;\n        }   }\n        rankVal[weight] += length;\n    }\n}\n\nsize_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1];\n    sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1];\n    U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 };\n    U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    const U32 memLog = DTable[0];\n    size_t iSize;\n    void* dtPtr = DTable;\n    HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1;\n\n    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */\n    if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv06_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */\n\n    /* Get start index of each weight */\n    {   U32 w, nextRankStart = 0;\n        for (w=1; w<maxW+1; w++) {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {   U32 s;\n        for (s=0; s<nbSymbols; s++) {\n            U32 const w = weightList[s];\n            U32 const r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {   U32* const rankVal0 = rankVal[0];\n        {   int const rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */\n            U32 nextRankVal = 0;\n            U32 w;\n            for (w=1; w<maxW+1; w++) {\n                U32 current = nextRankVal;\n                nextRankVal += rankStats[w] << (w+rescale);\n                rankVal0[w] = current;\n        }   }\n        {   U32 const minBits = tableLog+1 - maxW;\n            U32 consumed;\n            for (consumed = minBits; consumed < memLog - minBits + 1; consumed++) {\n                U32* const rankValPtr = rankVal[consumed];\n                U32 w;\n                for (w = 1; w < maxW+1; w++) {\n                    rankValPtr[w] = rankVal0[w] >> consumed;\n    }   }   }   }\n\n    HUFv06_fillDTableX4(dt, memLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    return iSize;\n}\n\n\nstatic U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BITv06_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits);\n    else {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {\n            BITv06_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n    }   }\n    return 1;\n}\n\n\n#define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \\\n        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) {\n        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2))\n        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\n\nsize_t HUFv06_decompress1X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    const BYTE* const istart = (const BYTE*) cSrc;\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* const oend = ostart + dstSize;\n\n    const U32 dtLog = DTable[0];\n    const void* const dtPtr = DTable;\n    const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;\n\n    /* Init */\n    BITv06_DStream_t bitD;\n    { size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize);\n      if (HUFv06_isError(errorCode)) return errorCode; }\n\n    /* decode */\n    HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);\n\n    /* check */\n    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    /* decoded size */\n    return dstSize;\n}\n\nsize_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUFv06_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\nsize_t HUFv06_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const U32* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable;\n        const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;\n        const U32 dtLog = DTable[0];\n        size_t errorCode;\n\n        /* Init */\n        BITv06_DStream_t bitD1;\n        BITv06_DStream_t bitD2;\n        BITv06_DStream_t bitD3;\n        BITv06_DStream_t bitD4;\n        const size_t length1 = MEM_readLE16(istart);\n        const size_t length2 = MEM_readLE16(istart+2);\n        const size_t length3 = MEM_readLE16(istart+4);\n        size_t length4;\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n\n        length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        errorCode = BITv06_initDStream(&bitD1, istart1, length1);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD2, istart2, length2);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD3, istart3, length3);\n        if (HUFv06_isError(errorCode)) return errorCode;\n        errorCode = BITv06_initDStream(&bitD4, istart4, length4);\n        if (HUFv06_isError(errorCode)) return errorCode;\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv06_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);\n    if (HUFv06_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize;\n    cSrcSize -= hSize;\n\n    return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);\n}\n\n\n\n\n/* ********************************/\n/* Generic decompression selector */\n/* ********************************/\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nsize_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL };\n    U32 Dtime[3];   /* decompression time estimation */\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    /* decoder timing evaluation */\n    {   U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n        U32 const D256 = (U32)(dstSize >> 8);\n        U32 n; for (n=0; n<3; n++)\n            Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);\n    }\n\n    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */\n\n    {   U32 algoNb = 0;\n        if (Dtime[1] < Dtime[0]) algoNb = 1;\n        // if (Dtime[2] < Dtime[algoNb]) algoNb = 2;   /* current speed of HUFv06_decompress4X6 is not good */\n        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n    }\n\n    //return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n    //return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */\n}\n/*\n    Common functions of Zstd compression library\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net/\n*/\n\n\n/*-****************************************\n*  Version\n******************************************/\n\n/*-****************************************\n*  ZSTD Error Management\n******************************************/\n/*! ZSTDv06_isError() :\n*   tells if a return value is an error code */\nunsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); }\n\n/*! ZSTDv06_getErrorName() :\n*   provides error code string from function result (useful for debugging) */\nconst char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/* **************************************************************\n*  ZBUFF Error Management\n****************************************************************/\nunsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); }\n\nconst char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n * HEAPMODE :\n * Select how default decompression function ZSTDv06_decompress() will allocate memory,\n * in memory stack (0), or in memory heap (1, requires malloc())\n */\n#ifndef ZSTDv06_HEAPMODE\n#  define ZSTDv06_HEAPMODE 1\n#endif\n\n\n\n/*-*******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#endif\n\n\n/*-*************************************\n*  Macros\n***************************************/\n#define ZSTDv06_isError ERR_isError   /* for inlining */\n#define FSEv06_isError  ERR_isError\n#define HUFv06_isError  ERR_isError\n\n\n/*_*******************************************************\n*  Memory operations\n**********************************************************/\nstatic void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\n\n/*-*************************************************************\n*   Context management\n***************************************************************/\ntypedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,\n               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage;\n\nstruct ZSTDv06_DCtx_s\n{\n    FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)];\n    FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)];\n    FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)];\n    unsigned   hufTableX4[HUFv06_DTABLE_SIZE(HufLog)];\n    const void* previousDstEnd;\n    const void* base;\n    const void* vBase;\n    const void* dictEnd;\n    size_t expected;\n    size_t headerSize;\n    ZSTDv06_frameParams fParams;\n    blockType_t bType;   /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */\n    ZSTDv06_dStage stage;\n    U32 flagRepeatTable;\n    const BYTE* litPtr;\n    size_t litSize;\n    BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];\n    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];\n};  /* typedef'd to ZSTDv06_DCtx within \"zstd_static.h\" */\n\nsize_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */\nsize_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }\n\nsize_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)\n{\n    dctx->expected = ZSTDv06_frameHeaderSize_min;\n    dctx->stage = ZSTDds_getFrameHeaderSize;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    dctx->vBase = NULL;\n    dctx->dictEnd = NULL;\n    dctx->hufTableX4[0] = HufLog;\n    dctx->flagRepeatTable = 0;\n    return 0;\n}\n\nZSTDv06_DCtx* ZSTDv06_createDCtx(void)\n{\n    ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx));\n    if (dctx==NULL) return NULL;\n    ZSTDv06_decompressBegin(dctx);\n    return dctx;\n}\n\nsize_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx)\n{\n    free(dctx);\n    return 0;   /* reserved as a potential error code in the future */\n}\n\nvoid ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx)\n{\n    memcpy(dstDCtx, srcDCtx,\n           sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max));  /* no need to copy workspace */\n}\n\n\n/*-*************************************************************\n*   Decompression section\n***************************************************************/\n\n/* Frame format description\n   Frame Header -  [ Block Header - Block ] - Frame End\n   1) Frame Header\n      - 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h)\n      - 1 byte  - Frame Descriptor\n   2) Block Header\n      - 3 bytes, starting with a 2-bits descriptor\n                 Uncompressed, Compressed, Frame End, unused\n   3) Block\n      See Block Format Description\n   4) Frame End\n      - 3 bytes, compatible with Block Header\n*/\n\n\n/* Frame descriptor\n\n   1 byte, using :\n   bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN   (see zstd_internal.h)\n   bit 4   : minmatch 4(0) or 3(1)\n   bit 5   : reserved (must be zero)\n   bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes\n\n   Optional : content size (0, 1, 2 or 8 bytes)\n   0 : unknown\n   1 : 0-255 bytes\n   2 : 256 - 65535+256\n   8 : up to 16 exa\n*/\n\n\n/* Compressed Block, format description\n\n   Block = Literal Section - Sequences Section\n   Prerequisite : size of (compressed) block, maximum size of regenerated data\n\n   1) Literal Section\n\n   1.1) Header : 1-5 bytes\n        flags: 2 bits\n            00 compressed by Huff0\n            01 unused\n            10 is Raw (uncompressed)\n            11 is Rle\n            Note : using 01 => Huff0 with precomputed table ?\n            Note : delta map ? => compressed ?\n\n   1.1.1) Huff0-compressed literal block : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RAW<<6) + (0<<4) + size\n               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RLE<<6) + (0<<4) + size\n               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n        1- CTable available (stored into workspace ?)\n        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)\n\n\n   1.2) Literal block content\n\n   1.2.1) Huff0 block, using sizes from header\n        See Huff0 format\n\n   1.2.2) Huff0 block, using prepared table\n\n   1.2.3) Raw content\n\n   1.2.4) single byte\n\n\n   2) Sequences section\n      TO DO\n*/\n\n/** ZSTDv06_frameHeaderSize() :\n*   srcSize must be >= ZSTDv06_frameHeaderSize_min.\n*   @return : size of the Frame Header */\nstatic size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize)\n{\n    if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);\n    { U32 const fcsId = (((const BYTE*)src)[4]) >> 6;\n      return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; }\n}\n\n\n/** ZSTDv06_getFrameParams() :\n*   decode Frame Header, or provide expected `srcSize`.\n*   @return : 0, `fparamsPtr` is correctly filled,\n*            >0, `srcSize` is too small, result is expected `srcSize`,\n*             or an error code, which can be tested using ZSTDv06_isError() */\nsize_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n\n    if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min;\n    if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);\n\n    /* ensure there is enough `srcSize` to fully read/decode frame header */\n    { size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize);\n      if (srcSize < fhsize) return fhsize; }\n\n    memset(fparamsPtr, 0, sizeof(*fparamsPtr));\n    {   BYTE const frameDesc = ip[4];\n        fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN;\n        if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported);   /* reserved 1 bit */\n        switch(frameDesc >> 6)  /* fcsId */\n        {\n            default:   /* impossible */\n            case 0 : fparamsPtr->frameContentSize = 0; break;\n            case 1 : fparamsPtr->frameContentSize = ip[5]; break;\n            case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break;\n            case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break;\n    }   }\n    return 0;\n}\n\n\n/** ZSTDv06_decodeFrameHeader() :\n*   `srcSize` must be the size provided by ZSTDv06_frameHeaderSize().\n*   @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */\nstatic size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)\n{\n    size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);\n    if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);\n    return result;\n}\n\n\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\n/*! ZSTDv06_getcBlockSize() :\n*   Provides the size of compressed block from block header `src` */\nstatic size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    U32 cSize;\n\n    if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);\n\n    bpPtr->blockType = (blockType_t)((*in) >> 6);\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\n\nstatic size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    if (dst==NULL) return ERROR(dstSize_tooSmall);\n    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/*! ZSTDv06_decodeLiteralsBlock() :\n    @return : nb of bytes read from src (< srcSize ) */\nstatic size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,\n                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */\n{\n    const BYTE* const istart = (const BYTE*) src;\n\n    /* any compressed block with literals segment must be at least this size */\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch(istart[0]>> 6)\n    {\n    case IS_HUF:\n        {   size_t litSize, litCSize, singleStream=0;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                /* 2 - 2 - 10 - 10 */\n                lhSize=3;\n                singleStream = istart[0] & 16;\n                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n                litCSize = ((istart[1] &  3) << 8) + istart[2];\n                break;\n            case 2:\n                /* 2 - 2 - 14 - 14 */\n                lhSize=4;\n                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);\n                litCSize = ((istart[2] & 63) <<  8) + istart[3];\n                break;\n            case 3:\n                /* 2 - 2 - 18 - 18 */\n                lhSize=5;\n                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);\n                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];\n                break;\n            }\n            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            if (HUFv06_isError(singleStream ?\n                            HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :\n                            HUFv06_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))\n                return ERROR(corruption_detected);\n\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case IS_PCH:\n        {   size_t litSize, litCSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */\n                return ERROR(corruption_detected);\n            if (!dctx->flagRepeatTable)\n                return ERROR(dictionary_corrupted);\n\n            /* 2 - 2 - 10 - 10 */\n            lhSize=3;\n            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n            litCSize = ((istart[1] &  3) << 8) + istart[2];\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            {   size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);\n                if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);\n            }\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case IS_RAW:\n        {   size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize=1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                break;\n            }\n\n            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */\n                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart+lhSize, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n                return lhSize+litSize;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+lhSize;\n            dctx->litSize = litSize;\n            return lhSize+litSize;\n        }\n    case IS_RLE:\n        {   size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize = 1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */\n                break;\n            }\n            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return lhSize+1;\n        }\n    default:\n        return ERROR(corruption_detected);   /* impossible */\n    }\n}\n\n\n/*! ZSTDv06_buildSeqTable() :\n    @return : nb bytes read from src,\n              or an error code if it fails, testable with ZSTDv06_isError()\n*/\nstatic size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,\n                                 const void* src, size_t srcSize,\n                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)\n{\n    switch(type)\n    {\n    case FSEv06_ENCODING_RLE :\n        if (!srcSize) return ERROR(srcSize_wrong);\n        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);\n        FSEv06_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */\n        return 1;\n    case FSEv06_ENCODING_RAW :\n        FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog);\n        return 0;\n    case FSEv06_ENCODING_STATIC:\n        if (!flagRepeatTable) return ERROR(corruption_detected);\n        return 0;\n    default :   /* impossible */\n    case FSEv06_ENCODING_DYNAMIC :\n        {   U32 tableLog;\n            S16 norm[MaxSeq+1];\n            size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize);\n            if (FSEv06_isError(headerSize)) return ERROR(corruption_detected);\n            if (tableLog > maxLog) return ERROR(corruption_detected);\n            FSEv06_buildDTable(DTable, norm, max, tableLog);\n            return headerSize;\n    }   }\n}\n\n\nstatic size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,\n                             FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,\n                             const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* ip = istart;\n\n    /* check */\n    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    {   int nbSeq = *ip++;\n        if (!nbSeq) { *nbSeqPtr=0; return 1; }\n        if (nbSeq > 0x7F) {\n            if (nbSeq == 0xFF) {\n                if (ip+2 > iend) return ERROR(srcSize_wrong);\n                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;\n            } else {\n                if (ip >= iend) return ERROR(srcSize_wrong);\n                nbSeq = ((nbSeq-0x80)<<8) + *ip++;\n            }\n        }\n        *nbSeqPtr = nbSeq;\n    }\n\n    /* FSE table descriptors */\n    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n    {   U32 const LLtype  = *ip >> 6;\n        U32 const Offtype = (*ip >> 4) & 3;\n        U32 const MLtype  = (*ip >> 2) & 3;\n        ip++;\n\n        /* Build DTables */\n        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);\n            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);\n            ip += bhSize;\n        }\n        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);\n            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);\n            ip += bhSize;\n        }\n        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);\n            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);\n            ip += bhSize;\n    }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t matchLength;\n    size_t offset;\n} seq_t;\n\ntypedef struct {\n    BITv06_DStream_t DStream;\n    FSEv06_DState_t stateLL;\n    FSEv06_DState_t stateOffb;\n    FSEv06_DState_t stateML;\n    size_t prevOffset[ZSTDv06_REP_INIT];\n} seqState_t;\n\n\n\nstatic void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)\n{\n    /* Literal length */\n    U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL));\n    U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML));\n    U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */\n\n    U32 const llBits = LL_bits[llCode];\n    U32 const mlBits = ML_bits[mlCode];\n    U32 const ofBits = ofCode;\n    U32 const totalBits = llBits+mlBits+ofBits;\n\n    static const U32 LL_base[MaxLL+1] = {\n                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,\n                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,\n                            0x2000, 0x4000, 0x8000, 0x10000 };\n\n    static const U32 ML_base[MaxML+1] = {\n                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,   11,    12,    13,    14,    15,\n                            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,   27,    28,    29,    30,    31,\n                            32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800,\n                            0x1000, 0x2000, 0x4000, 0x8000, 0x10000 };\n\n    static const U32 OF_base[MaxOff+1] = {\n                 0,        1,       3,       7,     0xF,     0x1F,     0x3F,     0x7F,\n                 0xFF,   0x1FF,   0x3FF,   0x7FF,   0xFFF,   0x1FFF,   0x3FFF,   0x7FFF,\n                 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,\n                 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 };\n\n    /* sequence */\n    {   size_t offset;\n        if (!ofCode)\n            offset = 0;\n        else {\n            offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits);   /* <=  26 bits */\n            if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));\n        }\n\n        if (offset < ZSTDv06_REP_NUM) {\n            if (llCode == 0 && offset <= 1) offset = 1-offset;\n\n            if (offset != 0) {\n                size_t temp = seqState->prevOffset[offset];\n                if (offset != 1) {\n                    seqState->prevOffset[2] = seqState->prevOffset[1];\n                }\n                seqState->prevOffset[1] = seqState->prevOffset[0];\n                seqState->prevOffset[0] = offset = temp;\n\n            } else {\n                offset = seqState->prevOffset[0];\n            }\n        } else {\n            offset -= ZSTDv06_REP_MOVE;\n            seqState->prevOffset[2] = seqState->prevOffset[1];\n            seqState->prevOffset[1] = seqState->prevOffset[0];\n            seqState->prevOffset[0] = offset;\n        }\n        seq->offset = offset;\n    }\n\n    seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */\n    if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream));\n\n    seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */\n    if (MEM_32bits() ||\n       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream));\n\n    /* ANS state update */\n    FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */\n    FSEv06_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */\n    if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));     /* <= 18 bits */\n    FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */\n}\n\n\nstatic size_t ZSTDv06_execSequence(BYTE* op,\n                                BYTE* const oend, seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)\n{\n    BYTE* const oLitEnd = op + sequence.litLength;\n    size_t const sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_8 = oend-8;\n    const BYTE* const iLitEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n\n    /* check */\n    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */\n    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */\n    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */\n\n    /* copy Literals */\n    ZSTDv06_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = iLitEnd;   /* update for next sequence */\n\n    /* copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - base)) {\n        /* offset beyond prefix */\n        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);\n        match = dictEnd - (base-match);\n        if (match + sequence.matchLength <= dictEnd) {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {   size_t const length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = base;\n            if (op > oend_8 || sequence.matchLength < MINMATCH) {\n              while (op < oMatchEnd) *op++ = *match++;\n              return sequenceLength;\n            }\n    }   }\n    /* Requirement: op <= oend_8 */\n\n    /* match within prefix */\n    if (sequence.offset < 8) {\n        /* close range match, overlap */\n        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */\n        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */\n        int const sub2 = dec64table[sequence.offset];\n        op[0] = match[0];\n        op[1] = match[1];\n        op[2] = match[2];\n        op[3] = match[3];\n        match += dec32table[sequence.offset];\n        ZSTDv06_copy4(op+4, match);\n        match -= sub2;\n    } else {\n        ZSTDv06_copy8(op, match);\n    }\n    op += 8; match += 8;\n\n    if (oMatchEnd > oend-(16-MINMATCH)) {\n        if (op < oend_8) {\n            ZSTDv06_wildcopy(op, match, oend_8 - op);\n            match += oend_8 - op;\n            op = oend_8;\n        }\n        while (op < oMatchEnd) *op++ = *match++;\n    } else {\n        ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n    }\n    return sequenceLength;\n}\n\n\nstatic size_t ZSTDv06_decompressSequences(\n                               ZSTDv06_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + maxDstSize;\n    BYTE* op = ostart;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    FSEv06_DTable* DTableLL = dctx->LLTable;\n    FSEv06_DTable* DTableML = dctx->MLTable;\n    FSEv06_DTable* DTableOffb = dctx->OffTable;\n    const BYTE* const base = (const BYTE*) (dctx->base);\n    const BYTE* const vBase = (const BYTE*) (dctx->vBase);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n    int nbSeq;\n\n    /* Build Decoding Tables */\n    {   size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize);\n        if (ZSTDv06_isError(seqHSize)) return seqHSize;\n        ip += seqHSize;\n        dctx->flagRepeatTable = 0;\n    }\n\n    /* Regen sequences */\n    if (nbSeq) {\n        seq_t sequence;\n        seqState_t seqState;\n\n        memset(&sequence, 0, sizeof(sequence));\n        sequence.offset = REPCODE_STARTVALUE;\n        { U32 i; for (i=0; i<ZSTDv06_REP_INIT; i++) seqState.prevOffset[i] = REPCODE_STARTVALUE; }\n        { size_t const errorCode = BITv06_initDStream(&(seqState.DStream), ip, iend-ip);\n          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }\n        FSEv06_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSEv06_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSEv06_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BITv06_reloadDStream(&(seqState.DStream)) <= BITv06_DStream_completed) && nbSeq ; ) {\n            nbSeq--;\n            ZSTDv06_decodeSequence(&sequence, &seqState);\n\n#if 0  /* debug */\n            static BYTE* start = NULL;\n            if (start==NULL) start = op;\n            size_t pos = (size_t)(op-start);\n            if ((pos >= 5810037) && (pos < 5810400))\n                printf(\"Dpos %6u :%5u literals & match %3u bytes at distance %6u \\n\",\n                       pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);\n#endif\n\n            {   size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);\n                if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;\n                op += oneSeqSize;\n        }   }\n\n        /* check if reached exact end */\n        if (nbSeq) return ERROR(corruption_detected);\n    }\n\n    /* last literal segment */\n    {   size_t const lastLLSize = litEnd - litPtr;\n        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */\n        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);\n        memcpy(op, litPtr, lastLLSize);\n        op += lastLLSize;\n    }\n\n    return op-ostart;\n}\n\n\nstatic void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst)\n{\n    if (dst != dctx->previousDstEnd) {   /* not contiguous */\n        dctx->dictEnd = dctx->previousDstEnd;\n        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n        dctx->base = dst;\n        dctx->previousDstEnd = dst;\n    }\n}\n\n\nstatic size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{   /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n\n    if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);\n\n    /* Decode literals sub-block */\n    {   size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize);\n        if (ZSTDv06_isError(litCSize)) return litCSize;\n        ip += litCSize;\n        srcSize -= litCSize;\n    }\n    return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);\n}\n\n\nsize_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{\n    ZSTDv06_checkContinuity(dctx, dst);\n    return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\n/*! ZSTDv06_decompressFrame() :\n*   `dctx` must be properly initialized */\nstatic size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx,\n                                 void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* const iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* op = ostart;\n    BYTE* const oend = ostart + dstCapacity;\n    size_t remainingSize = srcSize;\n    blockProperties_t blockProperties = { bt_compressed, 0 };\n\n    /* check */\n    if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);\n\n    /* Frame Header */\n    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);\n        if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;\n        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);\n        if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1) {\n        size_t decodedSize=0;\n        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTDv06_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTDv06_blockHeaderSize;\n        remainingSize -= ZSTDv06_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            return ERROR(GENERIC);   /* not yet supported */\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        if (ZSTDv06_isError(decodedSize)) return decodedSize;\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\n\nsize_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx,\n                                         void* dst, size_t dstCapacity,\n                                   const void* src, size_t srcSize)\n{\n    ZSTDv06_copyDCtx(dctx, refDCtx);\n    ZSTDv06_checkContinuity(dctx, dst);\n    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\nsize_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,\n                                 void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize,\n                                 const void* dict, size_t dictSize)\n{\n    ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize);\n    ZSTDv06_checkContinuity(dctx, dst);\n    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\nsize_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);\n}\n\n\nsize_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n#if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1)\n    size_t regenSize;\n    ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx();\n    if (dctx==NULL) return ERROR(memory_allocation);\n    regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);\n    ZSTDv06_freeDCtx(dctx);\n    return regenSize;\n#else   /* stack mode */\n    ZSTDv06_DCtx dctx;\n    return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);\n#endif\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n    blockProperties_t blockProperties = { bt_compressed, 0 };\n\n    /* Frame Header */\n    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);\n        if (ZSTDv06_isError(frameHeaderSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);\n            return;\n        }\n        if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n            return;\n        }\n        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1) {\n        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTDv06_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTDv06_blockHeaderSize;\n        remainingSize -= ZSTDv06_blockHeaderSize;\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        if (cBlockSize == 0) break;   /* bt_end */\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;\n}\n\n/*_******************************\n*  Streaming Decompression API\n********************************/\nsize_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nsize_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);\n    if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst);\n\n    /* Decompress : frame header; part 1 */\n    switch (dctx->stage)\n    {\n    case ZSTDds_getFrameHeaderSize :\n        if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */\n        dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);\n        if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize;\n        memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min);\n        if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) {\n            dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min;\n            dctx->stage = ZSTDds_decodeFrameHeader;\n            return 0;\n        }\n        dctx->expected = 0;   /* not necessary to copy more */\n\t/* fall-through */\n    case ZSTDds_decodeFrameHeader:\n        {   size_t result;\n            memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);\n            result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);\n            if (ZSTDv06_isError(result)) return result;\n            dctx->expected = ZSTDv06_blockHeaderSize;\n            dctx->stage = ZSTDds_decodeBlockHeader;\n            return 0;\n        }\n    case ZSTDds_decodeBlockHeader:\n        {   blockProperties_t bp;\n            size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp);\n            if (ZSTDv06_isError(cBlockSize)) return cBlockSize;\n            if (bp.blockType == bt_end) {\n                dctx->expected = 0;\n                dctx->stage = ZSTDds_getFrameHeaderSize;\n            } else {\n                dctx->expected = cBlockSize;\n                dctx->bType = bp.blockType;\n                dctx->stage = ZSTDds_decompressBlock;\n            }\n            return 0;\n        }\n    case ZSTDds_decompressBlock:\n        {   size_t rSize;\n            switch(dctx->bType)\n            {\n            case bt_compressed:\n                rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);\n                break;\n            case bt_raw :\n                rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize);\n                break;\n            case bt_rle :\n                return ERROR(GENERIC);   /* not yet handled */\n                break;\n            case bt_end :   /* should never happen (filtered at phase 1) */\n                rSize = 0;\n                break;\n            default:\n                return ERROR(GENERIC);   /* impossible */\n            }\n            dctx->stage = ZSTDds_decodeBlockHeader;\n            dctx->expected = ZSTDv06_blockHeaderSize;\n            dctx->previousDstEnd = (char*)dst + rSize;\n            return rSize;\n        }\n    default:\n        return ERROR(GENERIC);   /* impossible */\n    }\n}\n\n\nstatic void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    dctx->dictEnd = dctx->previousDstEnd;\n    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n    dctx->base = dict;\n    dctx->previousDstEnd = (const char*)dict + dictSize;\n}\n\nstatic size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize;\n\n    hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize);\n    if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted);\n    dict = (const char*)dict + hSize;\n    dictSize -= hSize;\n\n    {   short offcodeNCount[MaxOff+1];\n        U32 offcodeMaxValue=MaxOff, offcodeLog;\n        offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);\n        if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);\n        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);\n          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }\n        dict = (const char*)dict + offcodeHeaderSize;\n        dictSize -= offcodeHeaderSize;\n    }\n\n    {   short matchlengthNCount[MaxML+1];\n        unsigned matchlengthMaxValue = MaxML, matchlengthLog;\n        matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);\n        if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);\n        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);\n          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }\n        dict = (const char*)dict + matchlengthHeaderSize;\n        dictSize -= matchlengthHeaderSize;\n    }\n\n    {   short litlengthNCount[MaxLL+1];\n        unsigned litlengthMaxValue = MaxLL, litlengthLog;\n        litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);\n        if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);\n        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);\n          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }\n    }\n\n    dctx->flagRepeatTable = 1;\n    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;\n}\n\nstatic size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    size_t eSize;\n    U32 const magic = MEM_readLE32(dict);\n    if (magic != ZSTDv06_DICT_MAGIC) {\n        /* pure content mode */\n        ZSTDv06_refDictContent(dctx, dict, dictSize);\n        return 0;\n    }\n    /* load entropy tables */\n    dict = (const char*)dict + 4;\n    dictSize -= 4;\n    eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize);\n    if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted);\n\n    /* reference dictionary content */\n    dict = (const char*)dict + eSize;\n    dictSize -= eSize;\n    ZSTDv06_refDictContent(dctx, dict, dictSize);\n\n    return 0;\n}\n\n\nsize_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    { size_t const errorCode = ZSTDv06_decompressBegin(dctx);\n      if (ZSTDv06_isError(errorCode)) return errorCode; }\n\n    if (dict && dictSize) {\n        size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize);\n        if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted);\n    }\n\n    return 0;\n}\n\n/*\n    Buffered version of Zstd compression library\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net/\n*/\n\n\n/*-***************************************************************************\n*  Streaming decompression howto\n*\n*  A ZBUFFv06_DCtx object is required to track streaming operations.\n*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.\n*  Use ZBUFFv06_decompressInit() to start a new decompression operation,\n*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.\n*\n*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),\n*            or 0 when a frame is completely decoded,\n*            or an error code, which can be tested using ZBUFFv06_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()\n*  output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;\n*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\ntypedef enum { ZBUFFds_init, ZBUFFds_loadHeader,\n               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage;\n\n/* *** Resource management *** */\nstruct ZBUFFv06_DCtx_s {\n    ZSTDv06_DCtx* zd;\n    ZSTDv06_frameParams fParams;\n    ZBUFFv06_dStage stage;\n    char*  inBuff;\n    size_t inBuffSize;\n    size_t inPos;\n    char*  outBuff;\n    size_t outBuffSize;\n    size_t outStart;\n    size_t outEnd;\n    size_t blockSize;\n    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];\n    size_t lhSize;\n};   /* typedef'd to ZBUFFv06_DCtx within \"zstd_buffered.h\" */\n\n\nZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)\n{\n    ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx));\n    if (zbd==NULL) return NULL;\n    memset(zbd, 0, sizeof(*zbd));\n    zbd->zd = ZSTDv06_createDCtx();\n    zbd->stage = ZBUFFds_init;\n    return zbd;\n}\n\nsize_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd)\n{\n    if (zbd==NULL) return 0;   /* support free on null */\n    ZSTDv06_freeDCtx(zbd->zd);\n    free(zbd->inBuff);\n    free(zbd->outBuff);\n    free(zbd);\n    return 0;\n}\n\n\n/* *** Initialization *** */\n\nsize_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize)\n{\n    zbd->stage = ZBUFFds_loadHeader;\n    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;\n    return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize);\n}\n\nsize_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd)\n{\n    return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0);\n}\n\n\n\nMEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    size_t length = MIN(dstCapacity, srcSize);\n    if (length > 0) {\n        memcpy(dst, src, length);\n    }\n    return length;\n}\n\n\n/* *** Decompression *** */\n\nsize_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,\n                                void* dst, size_t* dstCapacityPtr,\n                          const void* src, size_t* srcSizePtr)\n{\n    const char* const istart = (const char*)src;\n    const char* const iend = istart + *srcSizePtr;\n    const char* ip = istart;\n    char* const ostart = (char*)dst;\n    char* const oend = ostart + *dstCapacityPtr;\n    char* op = ostart;\n    U32 notDone = 1;\n\n    while (notDone) {\n        switch(zbd->stage)\n        {\n        case ZBUFFds_init :\n            return ERROR(init_missing);\n\n        case ZBUFFds_loadHeader :\n            {   size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);\n                if (hSize != 0) {\n                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */\n                    if (ZSTDv06_isError(hSize)) return hSize;\n                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */\n                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);\n                        zbd->lhSize += iend-ip;\n                        *dstCapacityPtr = 0;\n                        return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize;   /* remaining header bytes + next block header */\n                    }\n                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;\n                    break;\n            }   }\n\n            /* Consume header */\n            {   size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv06_frameHeaderSize_min */\n                size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);\n                if (ZSTDv06_isError(h1Result)) return h1Result;\n                if (h1Size < zbd->lhSize) {   /* long header */\n                    size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);\n                    size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);\n                    if (ZSTDv06_isError(h2Result)) return h2Result;\n            }   }\n\n            /* Frame header instruct buffer sizes */\n            {   size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX);\n                zbd->blockSize = blockSize;\n                if (zbd->inBuffSize < blockSize) {\n                    free(zbd->inBuff);\n                    zbd->inBuffSize = blockSize;\n                    zbd->inBuff = (char*)malloc(blockSize);\n                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);\n                }\n                {   size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;\n                    if (zbd->outBuffSize < neededOutSize) {\n                        free(zbd->outBuff);\n                        zbd->outBuffSize = neededOutSize;\n                        zbd->outBuff = (char*)malloc(neededOutSize);\n                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);\n            }   }   }\n            zbd->stage = ZBUFFds_read;\n\t    /* fall-through */\n        case ZBUFFds_read:\n            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);\n                if (neededInSize==0) {  /* end of frame */\n                    zbd->stage = ZBUFFds_init;\n                    notDone = 0;\n                    break;\n                }\n                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */\n                    size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,\n                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,\n                        ip, neededInSize);\n                    if (ZSTDv06_isError(decodedSize)) return decodedSize;\n                    ip += neededInSize;\n                    if (!decodedSize) break;   /* this was just a header */\n                    zbd->outEnd = zbd->outStart +  decodedSize;\n                    zbd->stage = ZBUFFds_flush;\n                    break;\n                }\n                if (ip==iend) { notDone = 0; break; }   /* no more input */\n                zbd->stage = ZBUFFds_load;\n            }\n\t    /* fall-through */\n        case ZBUFFds_load:\n            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);\n                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */\n                size_t loadedSize;\n                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */\n                loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);\n                ip += loadedSize;\n                zbd->inPos += loadedSize;\n                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */\n\n                /* decode loaded input */\n                {   size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,\n                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,\n                        zbd->inBuff, neededInSize);\n                    if (ZSTDv06_isError(decodedSize)) return decodedSize;\n                    zbd->inPos = 0;   /* input is consumed */\n                    if (!decodedSize) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */\n                    zbd->outEnd = zbd->outStart +  decodedSize;\n                    zbd->stage = ZBUFFds_flush;\n                    // break; /* ZBUFFds_flush follows */\n                }\n\t    }\n\t    /* fall-through */\n        case ZBUFFds_flush:\n            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;\n                size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);\n                op += flushedSize;\n                zbd->outStart += flushedSize;\n                if (flushedSize == toFlushSize) {\n                    zbd->stage = ZBUFFds_read;\n                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)\n                        zbd->outStart = zbd->outEnd = 0;\n                    break;\n                }\n                /* cannot flush everything */\n                notDone = 0;\n                break;\n            }\n        default: return ERROR(GENERIC);   /* impossible */\n    }   }\n\n    /* result */\n    *srcSizePtr = ip-istart;\n    *dstCapacityPtr = op-ostart;\n    {   size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);\n        if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize;   /* get following block header too */\n        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/\n        return nextSrcSizeHint;\n    }\n}\n\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nsize_t ZBUFFv06_recommendedDInSize(void)  { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; }\nsize_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; }\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v06.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTDv06_H\n#define ZSTDv06_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*======  Dependency  ======*/\n#include <stddef.h>   /* size_t */\n\n\n/*======  Export for Windows  ======*/\n/*!\n*  ZSTDv06_DLL_EXPORT :\n*  Enable exporting of functions when building a Windows DLL\n*/\n#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)\n#  define ZSTDLIBv06_API __declspec(dllexport)\n#else\n#  define ZSTDLIBv06_API\n#endif\n\n\n/* *************************************\n*  Simple functions\n***************************************/\n/*! ZSTDv06_decompress() :\n    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.\n    `dstCapacity` must be large enough, equal or larger than originalSize.\n    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n              or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */\nZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,\n                                    const void* src, size_t compressedSize);\n\n/**\nZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format\n    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n    cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                or an error code if it fails (which can be tested using ZSTDv01_isError())\n    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n*/\nvoid ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                     size_t* cSize, unsigned long long* dBound);\n\n/* *************************************\n*  Helper functions\n***************************************/\nZSTDLIBv06_API size_t      ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */\n\n/* Error Management */\nZSTDLIBv06_API unsigned    ZSTDv06_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */\nZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code);     /*!< provides readable string for an error code */\n\n\n/* *************************************\n*  Explicit memory management\n***************************************/\n/** Decompression context */\ntypedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;\nZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);\nZSTDLIBv06_API size_t     ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx);      /*!< @return : errorCode */\n\n/** ZSTDv06_decompressDCtx() :\n*   Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */\nZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n/*-***********************\n*  Dictionary API\n*************************/\n/*! ZSTDv06_decompress_usingDict() :\n*   Decompression using a pre-defined Dictionary content (see dictBuilder).\n*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.\n*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */\nZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,\n                                                   void* dst, size_t dstCapacity,\n                                             const void* src, size_t srcSize,\n                                             const void* dict,size_t dictSize);\n\n\n/*-************************\n*  Advanced Streaming API\n***************************/\nstruct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };\ntypedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;\n\nZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */\nZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);\nZSTDLIBv06_API void   ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);\n\nZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);\nZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n\n/* *************************************\n*  ZBUFF API\n***************************************/\n\ntypedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;\nZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);\nZSTDLIBv06_API size_t         ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);\n\nZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);\nZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);\n\nZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,\n                                                  void* dst, size_t* dstCapacityPtr,\n                                            const void* src, size_t* srcSizePtr);\n\n/*-***************************************************************************\n*  Streaming decompression howto\n*\n*  A ZBUFFv06_DCtx object is required to track streaming operations.\n*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.\n*  Use ZBUFFv06_decompressInit() to start a new decompression operation,\n*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.\n*\n*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),\n*            or 0 when a frame is completely decoded,\n*            or an error code, which can be tested using ZBUFFv06_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()\n*  output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;\n*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);\nZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);\n\n/** Functions below provide recommended buffer sizes for Compression or Decompression operations.\n*   These sizes are just hints, they tend to offer better latency */\nZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);\nZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define ZSTDv06_MAGICNUMBER 0xFD2FB526   /* v0.6 */\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* ZSTDv06_BUFFERED_H */\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v07.c",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n\n/*- Dependencies -*/\n#include <stddef.h>     /* size_t, ptrdiff_t */\n#include <string.h>     /* memcpy */\n#include <stdlib.h>     /* malloc, free, qsort */\n\n#ifndef XXH_STATIC_LINKING_ONLY\n#  define XXH_STATIC_LINKING_ONLY    /* XXH64_state_t */\n#endif\n#include \"xxhash.h\"                  /* XXH64_* */\n#include \"zstd_v07.h\"\n\n#define FSEv07_STATIC_LINKING_ONLY   /* FSEv07_MIN_TABLELOG */\n#define HUFv07_STATIC_LINKING_ONLY   /* HUFv07_TABLELOG_ABSOLUTEMAX */\n#define ZSTDv07_STATIC_LINKING_ONLY\n\n#include \"error_private.h\"\n\n\n#ifdef ZSTDv07_STATIC_LINKING_ONLY\n\n/* ====================================================================================\n * The definitions in this section are considered experimental.\n * They should never be used with a dynamic library, as they may change in the future.\n * They are provided for advanced usages.\n * Use them only in association with static linking.\n * ==================================================================================== */\n\n/*--- Constants ---*/\n#define ZSTDv07_MAGIC_SKIPPABLE_START  0x184D2A50U\n\n#define ZSTDv07_WINDOWLOG_MAX_32  25\n#define ZSTDv07_WINDOWLOG_MAX_64  27\n#define ZSTDv07_WINDOWLOG_MAX    ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64))\n#define ZSTDv07_WINDOWLOG_MIN     18\n#define ZSTDv07_CHAINLOG_MAX     (ZSTDv07_WINDOWLOG_MAX+1)\n#define ZSTDv07_CHAINLOG_MIN       4\n#define ZSTDv07_HASHLOG_MAX       ZSTDv07_WINDOWLOG_MAX\n#define ZSTDv07_HASHLOG_MIN       12\n#define ZSTDv07_HASHLOG3_MAX      17\n#define ZSTDv07_SEARCHLOG_MAX    (ZSTDv07_WINDOWLOG_MAX-1)\n#define ZSTDv07_SEARCHLOG_MIN      1\n#define ZSTDv07_SEARCHLENGTH_MAX   7\n#define ZSTDv07_SEARCHLENGTH_MIN   3\n#define ZSTDv07_TARGETLENGTH_MIN   4\n#define ZSTDv07_TARGETLENGTH_MAX 999\n\n#define ZSTDv07_FRAMEHEADERSIZE_MAX 18    /* for static allocation */\nstatic const size_t ZSTDv07_frameHeaderSize_min = 5;\nstatic const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX;\nstatic const size_t ZSTDv07_skippableHeaderSize = 8;  /* magic number + skippable frame length */\n\n\n/* custom memory allocation functions */\ntypedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size);\ntypedef void  (*ZSTDv07_freeFunction) (void* opaque, void* address);\ntypedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem;\n\n\n/*--- Advanced Decompression functions ---*/\n\n/*! ZSTDv07_estimateDCtxSize() :\n *  Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */\nZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void);\n\n/*! ZSTDv07_createDCtx_advanced() :\n *  Create a ZSTD decompression context using external alloc and free functions */\nZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem);\n\n/*! ZSTDv07_sizeofDCtx() :\n *  Gives the amount of memory used by a given ZSTDv07_DCtx */\nZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx);\n\n\n/* ******************************************************************\n*  Buffer-less streaming functions (synchronous mode)\n********************************************************************/\n\nZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx);\nZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize);\nZSTDLIBv07_API void   ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx);\n\nZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx);\nZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n/*\n  Buffer-less streaming decompression (synchronous mode)\n\n  A ZSTDv07_DCtx object is required to track streaming operations.\n  Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it.\n  A ZSTDv07_DCtx object can be re-used multiple times.\n\n  First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input.\n  It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),\n  and optionally the final size of uncompressed content.\n  (Note : content size is an optional info that may not be present. 0 means : content size unknown)\n  Frame parameters are extracted from the beginning of compressed frame.\n  The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work)\n  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.\n  Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled.\n          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.\n           errorCode, which can be tested using ZSTDv07_isError()\n\n  Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict().\n  Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx().\n\n  Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively.\n  ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue().\n  ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail.\n\n  @result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\n  It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header.\n\n  ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.\n  They should preferably be located contiguously, prior to current block.\n  Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.\n  ZSTDv07_decompressContinue() is very sensitive to contiguity,\n  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\n    or that previous contiguous segment is large enough to properly handle maximum back-reference.\n\n  A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero.\n  Context can then be reset to start a new decompression.\n\n\n  == Special case : skippable frames ==\n\n  Skippable frames allow the integration of user-defined data into a flow of concatenated frames.\n  Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following:\n  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\n  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\n  c) Frame Content - any content (User Data) of length equal to Frame Size\n  For skippable frames ZSTDv07_decompressContinue() always returns 0.\n  For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.\n  It also returns Frame Size as fparamsPtr->frameContentSize.\n*/\n\n\n/* **************************************\n*  Block functions\n****************************************/\n/*! Block functions produce and decode raw zstd blocks, without frame metadata.\n    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).\n    User will have to take in charge required information to regenerate data, such as compressed and content sizes.\n\n    A few rules to respect :\n    - Compressing and decompressing require a context structure\n      + Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx()\n    - It is necessary to init context before starting\n      + compression : ZSTDv07_compressBegin()\n      + decompression : ZSTDv07_decompressBegin()\n      + variants _usingDict() are also allowed\n      + copyCCtx() and copyDCtx() work too\n    - Block size is limited, it must be <= ZSTDv07_getBlockSizeMax()\n      + If you need to compress more, cut data into multiple blocks\n      + Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large.\n    - When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero.\n      In which case, nothing is produced into `dst`.\n      + User must test for such outcome and deal directly with uncompressed data\n      + ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!!\n      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.\n        Use ZSTDv07_insertBlock() in such a case.\n*/\n\n#define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)   /* define, for static allocation */\nZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\nZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert block into `dctx` history. Useful for uncompressed blocks */\n\n\n#endif   /* ZSTDv07_STATIC_LINKING_ONLY */\n\n\n/* ******************************************************************\n   mem.h\n   low-level memory access routines\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n#ifndef MEM_H_MODULE\n#define MEM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*-****************************************\n*  Compiler specifics\n******************************************/\n#if defined(_MSC_VER)   /* Visual Studio */\n#   include <stdlib.h>  /* _byteswap_ulong */\n#   include <intrin.h>  /* _byteswap_* */\n#endif\n#if defined(__GNUC__)\n#  define MEM_STATIC static __attribute__((unused))\n#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#  define MEM_STATIC static inline\n#elif defined(_MSC_VER)\n#  define MEM_STATIC static __inline\n#else\n#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */\n#endif\n\n\n/*-**************************************************************\n*  Basic Types\n*****************************************************************/\n#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n# include <stdint.h>\n  typedef  uint8_t BYTE;\n  typedef uint16_t U16;\n  typedef  int16_t S16;\n  typedef uint32_t U32;\n  typedef  int32_t S32;\n  typedef uint64_t U64;\n  typedef  int64_t S64;\n#else\n  typedef unsigned char       BYTE;\n  typedef unsigned short      U16;\n  typedef   signed short      S16;\n  typedef unsigned int        U32;\n  typedef   signed int        S32;\n  typedef unsigned long long  U64;\n  typedef   signed long long  S64;\n#endif\n\n\n/*-**************************************************************\n*  Memory I/O\n*****************************************************************/\n/* MEM_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n *            It can generate buggy code on targets depending on alignment.\n *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define MEM_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))\n#    define MEM_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\nMEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }\nMEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }\n\nMEM_STATIC unsigned MEM_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)\n\n/* violates C standard, by lying on structure alignment.\nOnly use if no other choice to achieve best performance on target platform */\nMEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }\nMEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }\nMEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\n\n#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;\n\nMEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }\nMEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\nMEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }\n\n#else\n\n/* default method, safe and standard.\n   can sometimes prove slower */\n\nMEM_STATIC U16 MEM_read16(const void* memPtr)\n{\n    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U32 MEM_read32(const void* memPtr)\n{\n    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC U64 MEM_read64(const void* memPtr)\n{\n    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nMEM_STATIC void MEM_write16(void* memPtr, U16 value)\n{\n    memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* MEM_FORCE_MEMORY_ACCESS */\n\nMEM_STATIC U32 MEM_swap32(U32 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_ulong(in);\n#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)\n    return __builtin_bswap32(in);\n#else\n    return  ((in << 24) & 0xff000000 ) |\n            ((in <<  8) & 0x00ff0000 ) |\n            ((in >>  8) & 0x0000ff00 ) |\n            ((in >> 24) & 0x000000ff );\n#endif\n}\n\nMEM_STATIC U64 MEM_swap64(U64 in)\n{\n#if defined(_MSC_VER)     /* Visual Studio */\n    return _byteswap_uint64(in);\n#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)\n    return __builtin_bswap64(in);\n#else\n    return  ((in << 56) & 0xff00000000000000ULL) |\n            ((in << 40) & 0x00ff000000000000ULL) |\n            ((in << 24) & 0x0000ff0000000000ULL) |\n            ((in << 8)  & 0x000000ff00000000ULL) |\n            ((in >> 8)  & 0x00000000ff000000ULL) |\n            ((in >> 24) & 0x0000000000ff0000ULL) |\n            ((in >> 40) & 0x000000000000ff00ULL) |\n            ((in >> 56) & 0x00000000000000ffULL);\n#endif\n}\n\n\n/*=== Little endian r/w ===*/\n\nMEM_STATIC U16 MEM_readLE16(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read16(memPtr);\n    else {\n        const BYTE* p = (const BYTE*)memPtr;\n        return (U16)(p[0] + (p[1]<<8));\n    }\n}\n\nMEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)\n{\n    if (MEM_isLittleEndian()) {\n        MEM_write16(memPtr, val);\n    } else {\n        BYTE* p = (BYTE*)memPtr;\n        p[0] = (BYTE)val;\n        p[1] = (BYTE)(val>>8);\n    }\n}\n\nMEM_STATIC U32 MEM_readLE32(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read32(memPtr);\n    else\n        return MEM_swap32(MEM_read32(memPtr));\n}\n\n\nMEM_STATIC U64 MEM_readLE64(const void* memPtr)\n{\n    if (MEM_isLittleEndian())\n        return MEM_read64(memPtr);\n    else\n        return MEM_swap64(MEM_read64(memPtr));\n}\n\nMEM_STATIC size_t MEM_readLEST(const void* memPtr)\n{\n    if (MEM_32bits())\n        return (size_t)MEM_readLE32(memPtr);\n    else\n        return (size_t)MEM_readLE64(memPtr);\n}\n\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* MEM_H_MODULE */\n/* ******************************************************************\n   bitstream\n   Part of FSE library\n   header file (to include)\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef BITSTREAM_H_MODULE\n#define BITSTREAM_H_MODULE\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/*\n*  This API consists of small unitary functions, which must be inlined for best performance.\n*  Since link-time-optimization is not available for all compilers,\n*  these functions are defined into a .h to be included.\n*/\n\n\n/*=========================================\n*  Target specific\n=========================================*/\n#if defined(__BMI__) && defined(__GNUC__)\n#  include <immintrin.h>   /* support for bextr (experimental) */\n#endif\n\n/*-********************************************\n*  bitStream decoding API (read backward)\n**********************************************/\ntypedef struct\n{\n    size_t   bitContainer;\n    unsigned bitsConsumed;\n    const char* ptr;\n    const char* start;\n} BITv07_DStream_t;\n\ntypedef enum { BITv07_DStream_unfinished = 0,\n               BITv07_DStream_endOfBuffer = 1,\n               BITv07_DStream_completed = 2,\n               BITv07_DStream_overflow = 3 } BITv07_DStream_status;  /* result of BITv07_reloadDStream() */\n               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */\n\nMEM_STATIC size_t   BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize);\nMEM_STATIC size_t   BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits);\nMEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD);\nMEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD);\n\n\n\n/*-****************************************\n*  unsafe API\n******************************************/\nMEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);\n/* faster, but works only if nbBits >= 1 */\n\n\n\n/*-**************************************************************\n*  Internal functions\n****************************************************************/\nMEM_STATIC unsigned BITv07_highbit32 (U32 val)\n{\n#   if defined(_MSC_VER)   /* Visual */\n    unsigned long r=0;\n    _BitScanReverse ( &r, val );\n    return (unsigned) r;\n#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */\n    return __builtin_clz (val) ^ 31;\n#   else   /* Software version */\n    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n    U32 v = val;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];\n#   endif\n}\n\n\n\n/*-********************************************************\n* bitStream decoding\n**********************************************************/\n/*! BITv07_initDStream() :\n*   Initialize a BITv07_DStream_t.\n*   `bitD` : a pointer to an already allocated BITv07_DStream_t structure.\n*   `srcSize` must be the *exact* size of the bitStream, in bytes.\n*   @return : size of stream (== srcSize) or an errorCode if a problem is detected\n*/\nMEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize)\n{\n    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }\n\n    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;\n          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }\n    } else {\n        bitD->start = (const char*)srcBuffer;\n        bitD->ptr   = bitD->start;\n        bitD->bitContainer = *(const BYTE*)(bitD->start);\n        switch(srcSize)\n        {\n            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */\n            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */\n            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */\n            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */\n            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */\n            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */\n            default: break;\n        }\n        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];\n          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;\n          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }\n        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;\n    }\n\n    return srcSize;\n}\n\n\n MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits)\n{\n    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);\n}\n\n/*! BITv07_lookBitsFast() :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits)\n{\n    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;\n    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);\n}\n\nMEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits)\n{\n    bitD->bitsConsumed += nbBits;\n}\n\nMEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits)\n{\n    size_t const value = BITv07_lookBits(bitD, nbBits);\n    BITv07_skipBits(bitD, nbBits);\n    return value;\n}\n\n/*! BITv07_readBitsFast() :\n*   unsafe version; only works only if nbBits >= 1 */\nMEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits)\n{\n    size_t const value = BITv07_lookBitsFast(bitD, nbBits);\n    BITv07_skipBits(bitD, nbBits);\n    return value;\n}\n\nMEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD)\n{\n    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should not happen => corruption detected */\n        return BITv07_DStream_overflow;\n\n    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {\n        bitD->ptr -= bitD->bitsConsumed >> 3;\n        bitD->bitsConsumed &= 7;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);\n        return BITv07_DStream_unfinished;\n    }\n    if (bitD->ptr == bitD->start) {\n        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer;\n        return BITv07_DStream_completed;\n    }\n    {   U32 nbBytes = bitD->bitsConsumed >> 3;\n        BITv07_DStream_status result = BITv07_DStream_unfinished;\n        if (bitD->ptr - nbBytes < bitD->start) {\n            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */\n            result = BITv07_DStream_endOfBuffer;\n        }\n        bitD->ptr -= nbBytes;\n        bitD->bitsConsumed -= nbBytes*8;\n        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */\n        return result;\n    }\n}\n\n/*! BITv07_endOfDStream() :\n*   @return Tells if DStream has exactly reached its end (all bits consumed).\n*/\nMEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream)\n{\n    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));\n}\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* BITSTREAM_H_MODULE */\n/* ******************************************************************\n   FSE : Finite State Entropy codec\n   Public Prototypes declaration\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef FSEv07_H\n#define FSEv07_H\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/*-****************************************\n*  FSE simple functions\n******************************************/\n\n/*! FSEv07_decompress():\n    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated destination buffer 'dst', of size 'dstCapacity'.\n    @return : size of regenerated data (<= maxDstSize),\n              or an error code, which can be tested using FSEv07_isError() .\n\n    ** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!!\n    Why ? : making this distinction requires a header.\n    Header management is intentionally delegated to the user layer, which can better manage special cases.\n*/\nsize_t FSEv07_decompress(void* dst,  size_t dstCapacity,\n                const void* cSrc, size_t cSrcSize);\n\n\n/* Error Management */\nunsigned    FSEv07_isError(size_t code);        /* tells if a return value is an error code */\nconst char* FSEv07_getErrorName(size_t code);   /* provides error code string (useful for debugging) */\n\n\n/*-*****************************************\n*  FSE detailed API\n******************************************/\n/*!\nFSEv07_decompress() does the following:\n1. read normalized counters with readNCount()\n2. build decoding table 'DTable' from normalized counters\n3. decode the data stream using decoding table 'DTable'\n\nThe following API allows targeting specific sub-functions for advanced tasks.\nFor example, it's possible to compress several blocks using the same 'CTable',\nor to save and provide normalized distribution using external method.\n*/\n\n\n/* *** DECOMPRESSION *** */\n\n/*! FSEv07_readNCount():\n    Read compactly saved 'normalizedCounter' from 'rBuffer'.\n    @return : size read from 'rBuffer',\n              or an errorCode, which can be tested using FSEv07_isError().\n              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */\nsize_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);\n\n/*! Constructor and Destructor of FSEv07_DTable.\n    Note that its size depends on 'tableLog' */\ntypedef unsigned FSEv07_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */\nFSEv07_DTable* FSEv07_createDTable(unsigned tableLog);\nvoid        FSEv07_freeDTable(FSEv07_DTable* dt);\n\n/*! FSEv07_buildDTable():\n    Builds 'dt', which must be already allocated, using FSEv07_createDTable().\n    return : 0, or an errorCode, which can be tested using FSEv07_isError() */\nsize_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);\n\n/*! FSEv07_decompress_usingDTable():\n    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`\n    into `dst` which must be already allocated.\n    @return : size of regenerated data (necessarily <= `dstCapacity`),\n              or an errorCode, which can be tested using FSEv07_isError() */\nsize_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt);\n\n/*!\nTutorial :\n----------\n(Note : these functions only decompress FSE-compressed blocks.\n If block is uncompressed, use memcpy() instead\n If block is a single repeated byte, use memset() instead )\n\nThe first step is to obtain the normalized frequencies of symbols.\nThis can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount().\n'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.\nIn practice, that means it's necessary to know 'maxSymbolValue' beforehand,\nor size the table to handle worst case situations (typically 256).\nFSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'.\nThe result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'.\nNote that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.\nIf there is an error, the function will return an error code, which can be tested using FSEv07_isError().\n\nThe next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'.\nThis is performed by the function FSEv07_buildDTable().\nThe space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable().\nIf there is an error, the function will return an error code, which can be tested using FSEv07_isError().\n\n`FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable().\n`cSrcSize` must be strictly correct, otherwise decompression will fail.\nFSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).\nIf there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small)\n*/\n\n\n#ifdef FSEv07_STATIC_LINKING_ONLY\n\n\n/* *****************************************\n*  Static allocation\n*******************************************/\n/* FSE buffer bounds */\n#define FSEv07_NCOUNTBOUND 512\n#define FSEv07_BLOCKBOUND(size) (size + (size>>7))\n\n/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */\n#define FSEv07_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))\n\n\n/* *****************************************\n*  FSE advanced API\n*******************************************/\nsize_t FSEv07_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);\n/**< same as FSEv07_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */\n\nunsigned FSEv07_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);\n/**< same as FSEv07_optimalTableLog(), which used `minus==2` */\n\nsize_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits);\n/**< build a fake FSEv07_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */\n\nsize_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, unsigned char symbolValue);\n/**< build a fake FSEv07_DTable, designed to always generate the same symbolValue */\n\n\n\n/* *****************************************\n*  FSE symbol decompression API\n*******************************************/\ntypedef struct\n{\n    size_t      state;\n    const void* table;   /* precise table may vary, depending on U16 */\n} FSEv07_DState_t;\n\n\nstatic void     FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt);\n\nstatic unsigned char FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);\n\n\n\n/* *****************************************\n*  FSE unsafe API\n*******************************************/\nstatic unsigned char FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);\n/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */\n\n\n/* ======    Decompression    ====== */\n\ntypedef struct {\n    U16 tableLog;\n    U16 fastMode;\n} FSEv07_DTableHeader;   /* sizeof U32 */\n\ntypedef struct\n{\n    unsigned short newState;\n    unsigned char  symbol;\n    unsigned char  nbBits;\n} FSEv07_decode_t;   /* size == U32 */\n\nMEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr;\n    DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog);\n    BITv07_reloadDStream(bitD);\n    DStatePtr->table = dt + 1;\n}\n\nMEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr)\n{\n    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    return DInfo.symbol;\n}\n\nMEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)\n{\n    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    size_t const lowBits = BITv07_readBits(bitD, nbBits);\n    DStatePtr->state = DInfo.newState + lowBits;\n}\n\nMEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)\n{\n    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BITv07_readBits(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n/*! FSEv07_decodeSymbolFast() :\n    unsafe, only works if no symbol has a probability > 50% */\nMEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)\n{\n    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];\n    U32 const nbBits = DInfo.nbBits;\n    BYTE const symbol = DInfo.symbol;\n    size_t const lowBits = BITv07_readBitsFast(bitD, nbBits);\n\n    DStatePtr->state = DInfo.newState + lowBits;\n    return symbol;\n}\n\n\n\n#ifndef FSEv07_COMMONDEFS_ONLY\n\n/* **************************************************************\n*  Tuning parameters\n****************************************************************/\n/*!MEMORY_USAGE :\n*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)\n*  Increasing memory usage improves compression ratio\n*  Reduced memory usage can improve speed, due to cache effect\n*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */\n#define FSEv07_MAX_MEMORY_USAGE 14\n#define FSEv07_DEFAULT_MEMORY_USAGE 13\n\n/*!FSEv07_MAX_SYMBOL_VALUE :\n*  Maximum symbol value authorized.\n*  Required for proper stack allocation */\n#define FSEv07_MAX_SYMBOL_VALUE 255\n\n\n/* **************************************************************\n*  template functions type & suffix\n****************************************************************/\n#define FSEv07_FUNCTION_TYPE BYTE\n#define FSEv07_FUNCTION_EXTENSION\n#define FSEv07_DECODE_TYPE FSEv07_decode_t\n\n\n#endif   /* !FSEv07_COMMONDEFS_ONLY */\n\n\n/* ***************************************************************\n*  Constants\n*****************************************************************/\n#define FSEv07_MAX_TABLELOG  (FSEv07_MAX_MEMORY_USAGE-2)\n#define FSEv07_MAX_TABLESIZE (1U<<FSEv07_MAX_TABLELOG)\n#define FSEv07_MAXTABLESIZE_MASK (FSEv07_MAX_TABLESIZE-1)\n#define FSEv07_DEFAULT_TABLELOG (FSEv07_DEFAULT_MEMORY_USAGE-2)\n#define FSEv07_MIN_TABLELOG 5\n\n#define FSEv07_TABLELOG_ABSOLUTE_MAX 15\n#if FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX\n#  error \"FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported\"\n#endif\n\n#define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)\n\n\n#endif /* FSEv07_STATIC_LINKING_ONLY */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* FSEv07_H */\n/* ******************************************************************\n   Huffman coder, part of New Generation Entropy library\n   header file\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy\n****************************************************************** */\n#ifndef HUFv07_H_298734234\n#define HUFv07_H_298734234\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n\n/* *** simple functions *** */\n/**\nHUFv07_decompress() :\n    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',\n    into already allocated buffer 'dst', of minimum size 'dstSize'.\n    `dstSize` : **must** be the ***exact*** size of original (uncompressed) data.\n    Note : in contrast with FSE, HUFv07_decompress can regenerate\n           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,\n           because it knows size to regenerate.\n    @return : size of regenerated data (== dstSize),\n              or an error code, which can be tested using HUFv07_isError()\n*/\nsize_t HUFv07_decompress(void* dst,  size_t dstSize,\n                const void* cSrc, size_t cSrcSize);\n\n\n/* ****************************************\n*  Tool functions\n******************************************/\n#define HUFv07_BLOCKSIZE_MAX (128 * 1024)\n\n/* Error Management */\nunsigned    HUFv07_isError(size_t code);        /**< tells if a return value is an error code */\nconst char* HUFv07_getErrorName(size_t code);   /**< provides error code string (useful for debugging) */\n\n\n/* *** Advanced function *** */\n\n\n#ifdef HUFv07_STATIC_LINKING_ONLY\n\n\n/* *** Constants *** */\n#define HUFv07_TABLELOG_ABSOLUTEMAX  16   /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */\n#define HUFv07_TABLELOG_MAX  12           /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */\n#define HUFv07_TABLELOG_DEFAULT  11       /* tableLog by default, when not specified */\n#define HUFv07_SYMBOLVALUE_MAX 255\n#if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX)\n#  error \"HUFv07_TABLELOG_MAX is too large !\"\n#endif\n\n\n/* ****************************************\n*  Static allocation\n******************************************/\n/* HUF buffer bounds */\n#define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */\n\n/* static allocation of HUF's DTable */\ntypedef U32 HUFv07_DTable;\n#define HUFv07_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))\n#define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \\\n        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }\n#define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \\\n        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }\n\n\n/* ****************************************\n*  Advanced decompression functions\n******************************************/\nsize_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\nsize_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\n\nsize_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */\nsize_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */\nsize_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\nsize_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\n\nsize_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\nsize_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */\nsize_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */\n\n\n/* ****************************************\n*  HUF detailed API\n******************************************/\n/*!\nThe following API allows targeting specific sub-functions for advanced tasks.\nFor example, it's possible to compress several blocks using the same 'CTable',\nor to save and regenerate 'CTable' using external methods.\n*/\n/* FSEv07_count() : find it within \"fse.h\" */\n\n/*! HUFv07_readStats() :\n    Read compact Huffman tree, saved by HUFv07_writeCTable().\n    `huffWeight` is destination buffer.\n    @return : size read from `src` , or an error Code .\n    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */\nsize_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                     U32* nbSymbolsPtr, U32* tableLogPtr,\n                     const void* src, size_t srcSize);\n\n\n/*\nHUFv07_decompress() does the following:\n1. select the decompression algorithm (X2, X4) based on pre-computed heuristics\n2. build Huffman table from save, using HUFv07_readDTableXn()\n3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable\n*/\n\n/** HUFv07_selectDecoder() :\n*   Tells which decoder is likely to decode faster,\n*   based on a set of pre-determined metrics.\n*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .\n*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */\nU32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize);\n\nsize_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize);\nsize_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize);\n\nsize_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\nsize_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\nsize_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\n\n\n/* single stream variants */\nsize_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */\nsize_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */\n\nsize_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\nsize_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\nsize_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);\n\n\n#endif /* HUFv07_STATIC_LINKING_ONLY */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif   /* HUFv07_H_298734234 */\n/*\n   Common functions of New Generation Entropy library\n   Copyright (C) 2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n*************************************************************************** */\n\n\n\n/*-****************************************\n*  FSE Error Management\n******************************************/\nunsigned FSEv07_isError(size_t code) { return ERR_isError(code); }\n\nconst char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/* **************************************************************\n*  HUF Error Management\n****************************************************************/\nunsigned HUFv07_isError(size_t code) { return ERR_isError(code); }\n\nconst char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n/*-**************************************************************\n*  FSE NCount encoding-decoding\n****************************************************************/\nstatic short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); }\n\nsize_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,\n                 const void* headerBuffer, size_t hbSize)\n{\n    const BYTE* const istart = (const BYTE*) headerBuffer;\n    const BYTE* const iend = istart + hbSize;\n    const BYTE* ip = istart;\n    int nbBits;\n    int remaining;\n    int threshold;\n    U32 bitStream;\n    int bitCount;\n    unsigned charnum = 0;\n    int previous0 = 0;\n\n    if (hbSize < 4) return ERROR(srcSize_wrong);\n    bitStream = MEM_readLE32(ip);\n    nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG;   /* extract tableLog */\n    if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);\n    bitStream >>= 4;\n    bitCount = 4;\n    *tableLogPtr = nbBits;\n    remaining = (1<<nbBits)+1;\n    threshold = 1<<nbBits;\n    nbBits++;\n\n    while ((remaining>1) && (charnum<=*maxSVPtr)) {\n        if (previous0) {\n            unsigned n0 = charnum;\n            while ((bitStream & 0xFFFF) == 0xFFFF) {\n                n0+=24;\n                if (ip < iend-5) {\n                    ip+=2;\n                    bitStream = MEM_readLE32(ip) >> bitCount;\n                } else {\n                    bitStream >>= 16;\n                    bitCount+=16;\n            }   }\n            while ((bitStream & 3) == 3) {\n                n0+=3;\n                bitStream>>=2;\n                bitCount+=2;\n            }\n            n0 += bitStream & 3;\n            bitCount += 2;\n            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);\n            while (charnum < n0) normalizedCounter[charnum++] = 0;\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n                bitStream = MEM_readLE32(ip) >> bitCount;\n            }\n            else\n                bitStream >>= 2;\n        }\n        {   short const max = (short)((2*threshold-1)-remaining);\n            short count;\n\n            if ((bitStream & (threshold-1)) < (U32)max) {\n                count = (short)(bitStream & (threshold-1));\n                bitCount   += nbBits-1;\n            } else {\n                count = (short)(bitStream & (2*threshold-1));\n                if (count >= threshold) count -= max;\n                bitCount   += nbBits;\n            }\n\n            count--;   /* extra accuracy */\n            remaining -= FSEv07_abs(count);\n            normalizedCounter[charnum++] = count;\n            previous0 = !count;\n            while (remaining < threshold) {\n                nbBits--;\n                threshold >>= 1;\n            }\n\n            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {\n                ip += bitCount>>3;\n                bitCount &= 7;\n            } else {\n                bitCount -= (int)(8 * (iend - 4 - ip));\n                ip = iend - 4;\n            }\n            bitStream = MEM_readLE32(ip) >> (bitCount & 31);\n    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */\n    if (remaining != 1) return ERROR(GENERIC);\n    *maxSVPtr = charnum-1;\n\n    ip += (bitCount+7)>>3;\n    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);\n    return ip-istart;\n}\n\n\n/*! HUFv07_readStats() :\n    Read compact Huffman tree, saved by HUFv07_writeCTable().\n    `huffWeight` is destination buffer.\n    @return : size read from `src` , or an error Code .\n    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() .\n*/\nsize_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,\n                     U32* nbSymbolsPtr, U32* tableLogPtr,\n                     const void* src, size_t srcSize)\n{\n    U32 weightTotal;\n    const BYTE* ip = (const BYTE*) src;\n    size_t iSize;\n    size_t oSize;\n\n    if (!srcSize) return ERROR(srcSize_wrong);\n    iSize = ip[0];\n    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */\n\n    if (iSize >= 128)  { /* special header */\n        if (iSize >= (242)) {  /* RLE */\n            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };\n            oSize = l[iSize-242];\n            memset(huffWeight, 1, hwSize);\n            iSize = 0;\n        }\n        else {   /* Incompressible */\n            oSize = iSize - 127;\n            iSize = ((oSize+1)/2);\n            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n            if (oSize >= hwSize) return ERROR(corruption_detected);\n            ip += 1;\n            {   U32 n;\n                for (n=0; n<oSize; n+=2) {\n                    huffWeight[n]   = ip[n/2] >> 4;\n                    huffWeight[n+1] = ip[n/2] & 15;\n    }   }   }   }\n    else  {   /* header compressed with FSE (normal case) */\n        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);\n        oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */\n        if (FSEv07_isError(oSize)) return oSize;\n    }\n\n    /* collect weight stats */\n    memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));\n    weightTotal = 0;\n    {   U32 n; for (n=0; n<oSize; n++) {\n            if (huffWeight[n] >= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);\n            rankStats[huffWeight[n]]++;\n            weightTotal += (1 << huffWeight[n]) >> 1;\n    }   }\n    if (weightTotal == 0) return ERROR(corruption_detected);\n\n    /* get last non-null symbol weight (implied, total must be 2^n) */\n    {   U32 const tableLog = BITv07_highbit32(weightTotal) + 1;\n        if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);\n        *tableLogPtr = tableLog;\n        /* determine last weight */\n        {   U32 const total = 1 << tableLog;\n            U32 const rest = total - weightTotal;\n            U32 const verif = 1 << BITv07_highbit32(rest);\n            U32 const lastWeight = BITv07_highbit32(rest) + 1;\n            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */\n            huffWeight[oSize] = (BYTE)lastWeight;\n            rankStats[lastWeight]++;\n    }   }\n\n    /* check tree construction validity */\n    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */\n\n    /* results */\n    *nbSymbolsPtr = (U32)(oSize+1);\n    return iSize+1;\n}\n/* ******************************************************************\n   FSE : Finite State Entropy decoder\n   Copyright (C) 2013-2015, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  define FORCE_INLINE static __forceinline\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define FSEv07_isError ERR_isError\n#define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/* **************************************************************\n*  Complex types\n****************************************************************/\ntypedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)];\n\n\n/* **************************************************************\n*  Templates\n****************************************************************/\n/*\n  designed to be included\n  for type-specific functions (template emulation in C)\n  Objective is to write these functions only once, for improved maintenance\n*/\n\n/* safety checks */\n#ifndef FSEv07_FUNCTION_EXTENSION\n#  error \"FSEv07_FUNCTION_EXTENSION must be defined\"\n#endif\n#ifndef FSEv07_FUNCTION_TYPE\n#  error \"FSEv07_FUNCTION_TYPE must be defined\"\n#endif\n\n/* Function names */\n#define FSEv07_CAT(X,Y) X##Y\n#define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y)\n#define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y)\n\n\n/* Function templates */\nFSEv07_DTable* FSEv07_createDTable (unsigned tableLog)\n{\n    if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX;\n    return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );\n}\n\nvoid FSEv07_freeDTable (FSEv07_DTable* dt)\n{\n    free(dt);\n}\n\nsize_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)\n{\n    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */\n    FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr);\n    U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1];\n\n    U32 const maxSV1 = maxSymbolValue + 1;\n    U32 const tableSize = 1 << tableLog;\n    U32 highThreshold = tableSize-1;\n\n    /* Sanity Checks */\n    if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);\n    if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge);\n\n    /* Init, lay down lowprob symbols */\n    {   FSEv07_DTableHeader DTableH;\n        DTableH.tableLog = (U16)tableLog;\n        DTableH.fastMode = 1;\n        {   S16 const largeLimit= (S16)(1 << (tableLog-1));\n            U32 s;\n            for (s=0; s<maxSV1; s++) {\n                if (normalizedCounter[s]==-1) {\n                    tableDecode[highThreshold--].symbol = (FSEv07_FUNCTION_TYPE)s;\n                    symbolNext[s] = 1;\n                } else {\n                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;\n                    symbolNext[s] = normalizedCounter[s];\n        }   }   }\n        memcpy(dt, &DTableH, sizeof(DTableH));\n    }\n\n    /* Spread symbols */\n    {   U32 const tableMask = tableSize-1;\n        U32 const step = FSEv07_TABLESTEP(tableSize);\n        U32 s, position = 0;\n        for (s=0; s<maxSV1; s++) {\n            int i;\n            for (i=0; i<normalizedCounter[s]; i++) {\n                tableDecode[position].symbol = (FSEv07_FUNCTION_TYPE)s;\n                position = (position + step) & tableMask;\n                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */\n        }   }\n\n        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */\n    }\n\n    /* Build Decoding table */\n    {   U32 u;\n        for (u=0; u<tableSize; u++) {\n            FSEv07_FUNCTION_TYPE const symbol = (FSEv07_FUNCTION_TYPE)(tableDecode[u].symbol);\n            U16 nextState = symbolNext[symbol]++;\n            tableDecode[u].nbBits = (BYTE) (tableLog - BITv07_highbit32 ((U32)nextState) );\n            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);\n    }   }\n\n    return 0;\n}\n\n\n\n#ifndef FSEv07_COMMONDEFS_ONLY\n\n/*-*******************************************************\n*  Decompression (Byte symbols)\n*********************************************************/\nsize_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, BYTE symbolValue)\n{\n    void* ptr = dt;\n    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv07_decode_t* const cell = (FSEv07_decode_t*)dPtr;\n\n    DTableH->tableLog = 0;\n    DTableH->fastMode = 0;\n\n    cell->newState = 0;\n    cell->symbol = symbolValue;\n    cell->nbBits = 0;\n\n    return 0;\n}\n\n\nsize_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits)\n{\n    void* ptr = dt;\n    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;\n    void* dPtr = dt + 1;\n    FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr;\n    const unsigned tableSize = 1 << nbBits;\n    const unsigned tableMask = tableSize - 1;\n    const unsigned maxSV1 = tableMask+1;\n    unsigned s;\n\n    /* Sanity checks */\n    if (nbBits < 1) return ERROR(GENERIC);         /* min size */\n\n    /* Build Decoding Table */\n    DTableH->tableLog = (U16)nbBits;\n    DTableH->fastMode = 1;\n    for (s=0; s<maxSV1; s++) {\n        dinfo[s].newState = 0;\n        dinfo[s].symbol = (BYTE)s;\n        dinfo[s].nbBits = (BYTE)nbBits;\n    }\n\n    return 0;\n}\n\nFORCE_INLINE size_t FSEv07_decompress_usingDTable_generic(\n          void* dst, size_t maxDstSize,\n    const void* cSrc, size_t cSrcSize,\n    const FSEv07_DTable* dt, const unsigned fast)\n{\n    BYTE* const ostart = (BYTE*) dst;\n    BYTE* op = ostart;\n    BYTE* const omax = op + maxDstSize;\n    BYTE* const olimit = omax-3;\n\n    BITv07_DStream_t bitD;\n    FSEv07_DState_t state1;\n    FSEv07_DState_t state2;\n\n    /* Init */\n    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */\n      if (FSEv07_isError(errorCode)) return errorCode; }\n\n    FSEv07_initDState(&state1, &bitD, dt);\n    FSEv07_initDState(&state2, &bitD, dt);\n\n#define FSEv07_GETSYMBOL(statePtr) fast ? FSEv07_decodeSymbolFast(statePtr, &bitD) : FSEv07_decodeSymbol(statePtr, &bitD)\n\n    /* 4 symbols per loop */\n    for ( ; (BITv07_reloadDStream(&bitD)==BITv07_DStream_unfinished) && (op<olimit) ; op+=4) {\n        op[0] = FSEv07_GETSYMBOL(&state1);\n\n        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv07_reloadDStream(&bitD);\n\n        op[1] = FSEv07_GETSYMBOL(&state2);\n\n        if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            { if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } }\n\n        op[2] = FSEv07_GETSYMBOL(&state1);\n\n        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */\n            BITv07_reloadDStream(&bitD);\n\n        op[3] = FSEv07_GETSYMBOL(&state2);\n    }\n\n    /* tail */\n    /* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */\n    while (1) {\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n\n        *op++ = FSEv07_GETSYMBOL(&state1);\n\n        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {\n            *op++ = FSEv07_GETSYMBOL(&state2);\n            break;\n        }\n\n        if (op>(omax-2)) return ERROR(dstSize_tooSmall);\n\n        *op++ = FSEv07_GETSYMBOL(&state2);\n\n        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {\n            *op++ = FSEv07_GETSYMBOL(&state1);\n            break;\n    }   }\n\n    return op-ostart;\n}\n\n\nsize_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize,\n                            const void* cSrc, size_t cSrcSize,\n                            const FSEv07_DTable* dt)\n{\n    const void* ptr = dt;\n    const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr;\n    const U32 fastMode = DTableH->fastMode;\n\n    /* select fast mode (static) */\n    if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);\n    return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);\n}\n\n\nsize_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* const istart = (const BYTE*)cSrc;\n    const BYTE* ip = istart;\n    short counting[FSEv07_MAX_SYMBOL_VALUE+1];\n    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */\n    unsigned tableLog;\n    unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE;\n\n    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */\n\n    /* normal FSE decoding mode */\n    {   size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);\n        if (FSEv07_isError(NCountLength)) return NCountLength;\n        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */\n        ip += NCountLength;\n        cSrcSize -= NCountLength;\n    }\n\n    { size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog);\n      if (FSEv07_isError(errorCode)) return errorCode; }\n\n    return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */\n}\n\n\n\n#endif   /* FSEv07_COMMONDEFS_ONLY */\n\n/* ******************************************************************\n   Huffman decoder, part of New Generation Entropy library\n   Copyright (C) 2013-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy\n    - Public forum : https://groups.google.com/forum/#!forum/lz4c\n****************************************************************** */\n\n/* **************************************************************\n*  Compiler specifics\n****************************************************************/\n#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n/* inline is defined */\n#elif defined(_MSC_VER)\n#  define inline __inline\n#else\n#  define inline /* disable inline */\n#endif\n\n\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#endif\n\n\n\n/* **************************************************************\n*  Error Management\n****************************************************************/\n#define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */\n\n\n/*-***************************/\n/*  generic DTableDesc       */\n/*-***************************/\n\ntypedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;\n\nstatic DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table)\n{\n    DTableDesc dtd;\n    memcpy(&dtd, table, sizeof(dtd));\n    return dtd;\n}\n\n\n/*-***************************/\n/*  single-symbol decoding   */\n/*-***************************/\n\ntypedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2;   /* single-symbol decoding */\n\nsize_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize)\n{\n    BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1];\n    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */\n    U32 tableLog = 0;\n    U32 nbSymbols = 0;\n    size_t iSize;\n    void* const dtPtr = DTable + 1;\n    HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr;\n\n    HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable));\n    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv07_isError(iSize)) return iSize;\n\n    /* Table header */\n    {   DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, huffman tree cannot fit in */\n        dtd.tableType = 0;\n        dtd.tableLog = (BYTE)tableLog;\n        memcpy(DTable, &dtd, sizeof(dtd));\n    }\n\n    /* Prepare ranks */\n    {   U32 n, nextRankStart = 0;\n        for (n=1; n<tableLog+1; n++) {\n            U32 current = nextRankStart;\n            nextRankStart += (rankVal[n] << (n-1));\n            rankVal[n] = current;\n    }   }\n\n    /* fill DTable */\n    {   U32 n;\n        for (n=0; n<nbSymbols; n++) {\n            U32 const w = huffWeight[n];\n            U32 const length = (1 << w) >> 1;\n            U32 i;\n            HUFv07_DEltX2 D;\n            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);\n            for (i = rankVal[w]; i < rankVal[w] + length; i++)\n                dt[i] = D;\n            rankVal[w] += length;\n    }   }\n\n    return iSize;\n}\n\n\nstatic BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog)\n{\n    size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */\n    BYTE const c = dt[val].byte;\n    BITv07_skipBits(Dstream, dt[val].nbBits);\n    return c;\n}\n\n#define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \\\n    *ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog)\n\n#define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \\\n        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\n#define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)\n\nstatic inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 4 symbols at a time */\n    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) {\n        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);\n    }\n\n    /* closer to the end */\n    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd))\n        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    /* no more data to retrieve from bitstream, hence no need to reload */\n    while (p < pEnd)\n        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);\n\n    return pEnd-pStart;\n}\n\nstatic size_t HUFv07_decompress1X2_usingDTable_internal(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + dstSize;\n    const void* dtPtr = DTable + 1;\n    const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;\n    BITv07_DStream_t bitD;\n    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n    U32 const dtLog = dtd.tableLog;\n\n    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);\n      if (HUFv07_isError(errorCode)) return errorCode; }\n\n    HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog);\n\n    /* check */\n    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    return dstSize;\n}\n\nsize_t HUFv07_decompress1X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n    if (dtd.tableType != 0) return ERROR(GENERIC);\n    return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);\n}\n\nsize_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize);\n    if (HUFv07_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);\n}\n\nsize_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);\n    return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\n\nstatic size_t HUFv07_decompress4X2_usingDTable_internal(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    /* Check */\n    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable + 1;\n        const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;\n\n        /* Init */\n        BITv07_DStream_t bitD1;\n        BITv07_DStream_t bitD2;\n        BITv07_DStream_t bitD3;\n        BITv07_DStream_t bitD4;\n        size_t const length1 = MEM_readLE16(istart);\n        size_t const length2 = MEM_readLE16(istart+2);\n        size_t const length3 = MEM_readLE16(istart+4);\n        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        const size_t segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n        U32 const dtLog = dtd.tableLog;\n\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4);\n            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv07_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);\n        if (!endSignal) return ERROR(corruption_detected);\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv07_decompress4X2_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n    if (dtd.tableType != 0) return ERROR(GENERIC);\n    return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);\n}\n\n\nsize_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize);\n    if (HUFv07_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);\n}\n\nsize_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);\n    return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\n\n/* *************************/\n/* double-symbols decoding */\n/* *************************/\ntypedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4;  /* double-symbols decoding */\n\ntypedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;\n\nstatic void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed,\n                           const U32* rankValOrigin, const int minWeight,\n                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,\n                           U32 nbBitsBaseline, U16 baseSeq)\n{\n    HUFv07_DEltX4 DElt;\n    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];\n\n    /* get pre-calculated rankVal */\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill skipped values */\n    if (minWeight>1) {\n        U32 i, skipSize = rankVal[minWeight];\n        MEM_writeLE16(&(DElt.sequence), baseSeq);\n        DElt.nbBits   = (BYTE)(consumed);\n        DElt.length   = 1;\n        for (i = 0; i < skipSize; i++)\n            DTable[i] = DElt;\n    }\n\n    /* fill DTable */\n    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */\n        const U32 symbol = sortedSymbols[s].symbol;\n        const U32 weight = sortedSymbols[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 length = 1 << (sizeLog-nbBits);\n        const U32 start = rankVal[weight];\n        U32 i = start;\n        const U32 end = start + length;\n\n        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));\n        DElt.nbBits = (BYTE)(nbBits + consumed);\n        DElt.length = 2;\n        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */\n\n        rankVal[weight] += length;\n    }}\n}\n\ntypedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1];\n\nstatic void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog,\n                           const sortedSymbol_t* sortedList, const U32 sortedListSize,\n                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,\n                           const U32 nbBitsBaseline)\n{\n    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];\n    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */\n    const U32 minBits  = nbBitsBaseline - maxWeight;\n    U32 s;\n\n    memcpy(rankVal, rankValOrigin, sizeof(rankVal));\n\n    /* fill DTable */\n    for (s=0; s<sortedListSize; s++) {\n        const U16 symbol = sortedList[s].symbol;\n        const U32 weight = sortedList[s].weight;\n        const U32 nbBits = nbBitsBaseline - weight;\n        const U32 start = rankVal[weight];\n        const U32 length = 1 << (targetLog-nbBits);\n\n        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */\n            U32 sortedRank;\n            int minWeight = nbBits + scaleLog;\n            if (minWeight < 1) minWeight = 1;\n            sortedRank = rankStart[minWeight];\n            HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,\n                           rankValOrigin[nbBits], minWeight,\n                           sortedList+sortedRank, sortedListSize-sortedRank,\n                           nbBitsBaseline, symbol);\n        } else {\n            HUFv07_DEltX4 DElt;\n            MEM_writeLE16(&(DElt.sequence), symbol);\n            DElt.nbBits = (BYTE)(nbBits);\n            DElt.length = 1;\n            {   U32 u;\n                const U32 end = start + length;\n                for (u = start; u < end; u++) DTable[u] = DElt;\n        }   }\n        rankVal[weight] += length;\n    }\n}\n\nsize_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize)\n{\n    BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1];\n    sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1];\n    U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 };\n    U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 };\n    U32* const rankStart = rankStart0+1;\n    rankVal_t rankVal;\n    U32 tableLog, maxW, sizeOfSort, nbSymbols;\n    DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n    U32 const maxTableLog = dtd.maxTableLog;\n    size_t iSize;\n    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */\n    HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr;\n\n    HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable));   /* if compilation fails here, assertion is false */\n    if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);\n    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */\n\n    iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);\n    if (HUFv07_isError(iSize)) return iSize;\n\n    /* check result */\n    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */\n\n    /* find maxWeight */\n    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */\n\n    /* Get start index of each weight */\n    {   U32 w, nextRankStart = 0;\n        for (w=1; w<maxW+1; w++) {\n            U32 current = nextRankStart;\n            nextRankStart += rankStats[w];\n            rankStart[w] = current;\n        }\n        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/\n        sizeOfSort = nextRankStart;\n    }\n\n    /* sort symbols by weight */\n    {   U32 s;\n        for (s=0; s<nbSymbols; s++) {\n            U32 const w = weightList[s];\n            U32 const r = rankStart[w]++;\n            sortedSymbol[r].symbol = (BYTE)s;\n            sortedSymbol[r].weight = (BYTE)w;\n        }\n        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */\n    }\n\n    /* Build rankVal */\n    {   U32* const rankVal0 = rankVal[0];\n        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */\n            U32 nextRankVal = 0;\n            U32 w;\n            for (w=1; w<maxW+1; w++) {\n                U32 current = nextRankVal;\n                nextRankVal += rankStats[w] << (w+rescale);\n                rankVal0[w] = current;\n        }   }\n        {   U32 const minBits = tableLog+1 - maxW;\n            U32 consumed;\n            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {\n                U32* const rankValPtr = rankVal[consumed];\n                U32 w;\n                for (w = 1; w < maxW+1; w++) {\n                    rankValPtr[w] = rankVal0[w] >> consumed;\n    }   }   }   }\n\n    HUFv07_fillDTableX4(dt, maxTableLog,\n                   sortedSymbol, sizeOfSort,\n                   rankStart0, rankVal, maxW,\n                   tableLog+1);\n\n    dtd.tableLog = (BYTE)maxTableLog;\n    dtd.tableType = 1;\n    memcpy(DTable, &dtd, sizeof(dtd));\n    return iSize;\n}\n\n\nstatic U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 2);\n    BITv07_skipBits(DStream, dt[val].nbBits);\n    return dt[val].length;\n}\n\nstatic U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)\n{\n    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */\n    memcpy(op, dt+val, 1);\n    if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits);\n    else {\n        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {\n            BITv07_skipBits(DStream, dt[val].nbBits);\n            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))\n                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */\n    }   }\n    return 1;\n}\n\n\n#define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \\\n    ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \\\n    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \\\n        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\n#define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \\\n    if (MEM_64bits()) \\\n        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)\n\nstatic inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog)\n{\n    BYTE* const pStart = p;\n\n    /* up to 8 symbols at a time */\n    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) {\n        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);\n        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);\n    }\n\n    /* closer to end : up to 2 symbols at a time */\n    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2))\n        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);\n\n    while (p <= pEnd-2)\n        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */\n\n    if (p < pEnd)\n        p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);\n\n    return p-pStart;\n}\n\n\nstatic size_t HUFv07_decompress1X4_usingDTable_internal(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    BITv07_DStream_t bitD;\n\n    /* Init */\n    {   size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);\n        if (HUFv07_isError(errorCode)) return errorCode;\n    }\n\n    /* decode */\n    {   BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */\n        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;\n        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n        HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);\n    }\n\n    /* check */\n    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);\n\n    /* decoded size */\n    return dstSize;\n}\n\nsize_t HUFv07_decompress1X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n    if (dtd.tableType != 1) return ERROR(GENERIC);\n    return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);\n}\n\nsize_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize);\n    if (HUFv07_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);\n}\n\nsize_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);\n    return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\nstatic size_t HUFv07_decompress4X4_usingDTable_internal(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */\n\n    {   const BYTE* const istart = (const BYTE*) cSrc;\n        BYTE* const ostart = (BYTE*) dst;\n        BYTE* const oend = ostart + dstSize;\n        const void* const dtPtr = DTable+1;\n        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;\n\n        /* Init */\n        BITv07_DStream_t bitD1;\n        BITv07_DStream_t bitD2;\n        BITv07_DStream_t bitD3;\n        BITv07_DStream_t bitD4;\n        size_t const length1 = MEM_readLE16(istart);\n        size_t const length2 = MEM_readLE16(istart+2);\n        size_t const length3 = MEM_readLE16(istart+4);\n        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);\n        const BYTE* const istart1 = istart + 6;  /* jumpTable */\n        const BYTE* const istart2 = istart1 + length1;\n        const BYTE* const istart3 = istart2 + length2;\n        const BYTE* const istart4 = istart3 + length3;\n        size_t const segmentSize = (dstSize+3) / 4;\n        BYTE* const opStart2 = ostart + segmentSize;\n        BYTE* const opStart3 = opStart2 + segmentSize;\n        BYTE* const opStart4 = opStart3 + segmentSize;\n        BYTE* op1 = ostart;\n        BYTE* op2 = opStart2;\n        BYTE* op3 = opStart3;\n        BYTE* op4 = opStart4;\n        U32 endSignal;\n        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n        U32 const dtLog = dtd.tableLog;\n\n        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */\n        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);\n          if (HUFv07_isError(errorCode)) return errorCode; }\n\n        /* 16-32 symbols per loop (4-8 symbols per stream) */\n        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);\n        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {\n            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);\n            HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1);\n            HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2);\n            HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3);\n            HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4);\n\n            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);\n        }\n\n        /* check corruption */\n        if (op1 > opStart2) return ERROR(corruption_detected);\n        if (op2 > opStart3) return ERROR(corruption_detected);\n        if (op3 > opStart4) return ERROR(corruption_detected);\n        /* note : op4 supposed already verified within main loop */\n\n        /* finish bitStreams one by one */\n        HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);\n        HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);\n        HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);\n        HUFv07_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);\n\n        /* check */\n        { U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);\n          if (!endCheck) return ERROR(corruption_detected); }\n\n        /* decoded size */\n        return dstSize;\n    }\n}\n\n\nsize_t HUFv07_decompress4X4_usingDTable(\n          void* dst,  size_t dstSize,\n    const void* cSrc, size_t cSrcSize,\n    const HUFv07_DTable* DTable)\n{\n    DTableDesc dtd = HUFv07_getDTableDesc(DTable);\n    if (dtd.tableType != 1) return ERROR(GENERIC);\n    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);\n}\n\n\nsize_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    const BYTE* ip = (const BYTE*) cSrc;\n\n    size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize);\n    if (HUFv07_isError(hSize)) return hSize;\n    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);\n    ip += hSize; cSrcSize -= hSize;\n\n    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);\n}\n\nsize_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);\n    return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);\n}\n\n\n/* ********************************/\n/* Generic decompression selector */\n/* ********************************/\n\nsize_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize,\n                                    const void* cSrc, size_t cSrcSize,\n                                    const HUFv07_DTable* DTable)\n{\n    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n    return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :\n                           HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);\n}\n\nsize_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize,\n                                    const void* cSrc, size_t cSrcSize,\n                                    const HUFv07_DTable* DTable)\n{\n    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);\n    return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :\n                           HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);\n}\n\n\ntypedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;\nstatic const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =\n{\n    /* single, double, quad */\n    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */\n    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */\n    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */\n    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */\n    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */\n    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */\n    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */\n    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */\n    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */\n    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */\n    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */\n    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */\n    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */\n    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */\n    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */\n    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */\n};\n\n/** HUFv07_selectDecoder() :\n*   Tells which decoder is likely to decode faster,\n*   based on a set of pre-determined metrics.\n*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .\n*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */\nU32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize)\n{\n    /* decoder timing evaluation */\n    U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */\n    U32 const D256 = (U32)(dstSize >> 8);\n    U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);\n    U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);\n    DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, for cache eviction */\n\n    return DTime1 < DTime0;\n}\n\n\ntypedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);\n\nsize_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 };\n\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);\n        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);\n    }\n\n    //return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */\n    //return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */\n}\n\nsize_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);\n        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :\n                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;\n    }\n}\n\nsize_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected);   /* invalid */\n\n    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);\n        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :\n                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;\n    }\n}\n\nsize_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)\n{\n    /* validation checks */\n    if (dstSize == 0) return ERROR(dstSize_tooSmall);\n    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */\n    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */\n    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */\n\n    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);\n        return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :\n                        HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;\n    }\n}\n/*\n    Common functions of Zstd compression library\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net/\n*/\n\n\n\n/*-****************************************\n*  ZSTD Error Management\n******************************************/\n/*! ZSTDv07_isError() :\n*   tells if a return value is an error code */\nunsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); }\n\n/*! ZSTDv07_getErrorName() :\n*   provides error code string from function result (useful for debugging) */\nconst char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); }\n\n\n\n/* **************************************************************\n*  ZBUFF Error Management\n****************************************************************/\nunsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); }\n\nconst char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }\n\n\n\nstatic void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)\n{\n    void* address = malloc(size);\n    (void)opaque;\n    /* printf(\"alloc %p, %d opaque=%p \\n\", address, (int)size, opaque); */\n    return address;\n}\n\nstatic void ZSTDv07_defaultFreeFunction(void* opaque, void* address)\n{\n    (void)opaque;\n    /* if (address) printf(\"free %p opaque=%p \\n\", address, opaque); */\n    free(address);\n}\n/*\n    zstd_internal - common functions to include\n    Header File for include\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : https://www.zstd.net\n*/\n#ifndef ZSTDv07_CCOMMON_H_MODULE\n#define ZSTDv07_CCOMMON_H_MODULE\n\n\n/*-*************************************\n*  Common macros\n***************************************/\n#define MIN(a,b) ((a)<(b) ? (a) : (b))\n#define MAX(a,b) ((a)>(b) ? (a) : (b))\n\n\n/*-*************************************\n*  Common constants\n***************************************/\n#define ZSTDv07_OPT_NUM    (1<<12)\n#define ZSTDv07_DICT_MAGIC  0xEC30A437   /* v0.7 */\n\n#define ZSTDv07_REP_NUM    3\n#define ZSTDv07_REP_INIT   ZSTDv07_REP_NUM\n#define ZSTDv07_REP_MOVE   (ZSTDv07_REP_NUM-1)\nstatic const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 };\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define BIT7 128\n#define BIT6  64\n#define BIT5  32\n#define BIT4  16\n#define BIT1   2\n#define BIT0   1\n\n#define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10\nstatic const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 };\nstatic const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 };\n\n#define ZSTDv07_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */\nstatic const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE;\ntypedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;\n\n#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */\n#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */\n\n#define HufLog 12\ntypedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t;\n\n#define LONGNBSEQ 0x7F00\n\n#define MINMATCH 3\n#define EQUAL_READ32 4\n\n#define Litbits  8\n#define MaxLit ((1<<Litbits) - 1)\n#define MaxML  52\n#define MaxLL  35\n#define MaxOff 28\n#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */\n#define MLFSELog    9\n#define LLFSELog    9\n#define OffFSELog   8\n\n#define FSEv07_ENCODING_RAW     0\n#define FSEv07_ENCODING_RLE     1\n#define FSEv07_ENCODING_STATIC  2\n#define FSEv07_ENCODING_DYNAMIC 3\n\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\n\nstatic const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,\n                                     13,14,15,16 };\nstatic const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,\n                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,\n                                            -1,-1,-1,-1 };\nstatic const U32 LL_defaultNormLog = 6;\n\nstatic const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,\n                                     12,13,14,15,16 };\nstatic const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,\n                                            -1,-1,-1,-1,-1 };\nstatic const U32 ML_defaultNormLog = 6;\n\nstatic const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };\nstatic const U32 OF_defaultNormLog = 5;\n\n\n/*-*******************************************\n*  Shared functions to include for inlining\n*********************************************/\nstatic void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }\n#define COPY8(d,s) { ZSTDv07_copy8(d,s); d+=8; s+=8; }\n\n/*! ZSTDv07_wildcopy() :\n*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */\n#define WILDCOPY_OVERLENGTH 8\nMEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)\n{\n    const BYTE* ip = (const BYTE*)src;\n    BYTE* op = (BYTE*)dst;\n    BYTE* const oend = op + length;\n    do\n        COPY8(op, ip)\n    while (op < oend);\n}\n\n\n/*-*******************************************\n*  Private interfaces\n*********************************************/\ntypedef struct ZSTDv07_stats_s ZSTDv07_stats_t;\n\ntypedef struct {\n    U32 off;\n    U32 len;\n} ZSTDv07_match_t;\n\ntypedef struct {\n    U32 price;\n    U32 off;\n    U32 mlen;\n    U32 litlen;\n    U32 rep[ZSTDv07_REP_INIT];\n} ZSTDv07_optimal_t;\n\nstruct ZSTDv07_stats_s { U32 unused; };\n\ntypedef struct {\n    void* buffer;\n    U32*  offsetStart;\n    U32*  offset;\n    BYTE* offCodeStart;\n    BYTE* litStart;\n    BYTE* lit;\n    U16*  litLengthStart;\n    U16*  litLength;\n    BYTE* llCodeStart;\n    U16*  matchLengthStart;\n    U16*  matchLength;\n    BYTE* mlCodeStart;\n    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */\n    U32   longLengthPos;\n    /* opt */\n    ZSTDv07_optimal_t* priceTable;\n    ZSTDv07_match_t* matchTable;\n    U32* matchLengthFreq;\n    U32* litLengthFreq;\n    U32* litFreq;\n    U32* offCodeFreq;\n    U32  matchLengthSum;\n    U32  matchSum;\n    U32  litLengthSum;\n    U32  litSum;\n    U32  offCodeSum;\n    U32  log2matchLengthSum;\n    U32  log2matchSum;\n    U32  log2litLengthSum;\n    U32  log2litSum;\n    U32  log2offCodeSum;\n    U32  factor;\n    U32  cachedPrice;\n    U32  cachedLitLength;\n    const BYTE* cachedLiterals;\n    ZSTDv07_stats_t stats;\n} seqStore_t;\n\nvoid ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);\n\n/* custom memory allocation functions */\nstatic const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };\n\n#endif   /* ZSTDv07_CCOMMON_H_MODULE */\n/*\n    zstd - standard compression library\n    Copyright (C) 2014-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net\n*/\n\n/* ***************************************************************\n*  Tuning parameters\n*****************************************************************/\n/*!\n * HEAPMODE :\n * Select how default decompression function ZSTDv07_decompress() will allocate memory,\n * in memory stack (0), or in memory heap (1, requires malloc())\n */\n#ifndef ZSTDv07_HEAPMODE\n#  define ZSTDv07_HEAPMODE 1\n#endif\n\n\n/*-*******************************************************\n*  Compiler specifics\n*********************************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  include <intrin.h>                    /* For Visual 2005 */\n#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */\n#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */\n#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */\n#endif\n\n\n/*-*************************************\n*  Macros\n***************************************/\n#define ZSTDv07_isError ERR_isError   /* for inlining */\n#define FSEv07_isError  ERR_isError\n#define HUFv07_isError  ERR_isError\n\n\n/*_*******************************************************\n*  Memory operations\n**********************************************************/\nstatic void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }\n\n\n/*-*************************************************************\n*   Context management\n***************************************************************/\ntypedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,\n               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,\n               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage;\n\nstruct ZSTDv07_DCtx_s\n{\n    FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)];\n    FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)];\n    FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)];\n    HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)];  /* can accommodate HUFv07_decompress4X */\n    const void* previousDstEnd;\n    const void* base;\n    const void* vBase;\n    const void* dictEnd;\n    size_t expected;\n    U32 rep[3];\n    ZSTDv07_frameParams fParams;\n    blockType_t bType;   /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */\n    ZSTDv07_dStage stage;\n    U32 litEntropy;\n    U32 fseEntropy;\n    XXH64_state_t xxhState;\n    size_t headerSize;\n    U32 dictID;\n    const BYTE* litPtr;\n    ZSTDv07_customMem customMem;\n    size_t litSize;\n    BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];\n    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];\n};  /* typedef'd to ZSTDv07_DCtx within \"zstd_static.h\" */\n\nint ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx);\n\nsize_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); }\n\nsize_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); }\n\nsize_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx)\n{\n    dctx->expected = ZSTDv07_frameHeaderSize_min;\n    dctx->stage = ZSTDds_getFrameHeaderSize;\n    dctx->previousDstEnd = NULL;\n    dctx->base = NULL;\n    dctx->vBase = NULL;\n    dctx->dictEnd = NULL;\n    dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001);\n    dctx->litEntropy = dctx->fseEntropy = 0;\n    dctx->dictID = 0;\n    { int i; for (i=0; i<ZSTDv07_REP_NUM; i++) dctx->rep[i] = repStartValue[i]; }\n    return 0;\n}\n\nZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem)\n{\n    ZSTDv07_DCtx* dctx;\n\n    if (!customMem.customAlloc && !customMem.customFree)\n        customMem = defaultCustomMem;\n\n    if (!customMem.customAlloc || !customMem.customFree)\n        return NULL;\n\n    dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx));\n    if (!dctx) return NULL;\n    memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem));\n    ZSTDv07_decompressBegin(dctx);\n    return dctx;\n}\n\nZSTDv07_DCtx* ZSTDv07_createDCtx(void)\n{\n    return ZSTDv07_createDCtx_advanced(defaultCustomMem);\n}\n\nsize_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx)\n{\n    if (dctx==NULL) return 0;   /* support free on NULL */\n    dctx->customMem.customFree(dctx->customMem.opaque, dctx);\n    return 0;   /* reserved as a potential error code in the future */\n}\n\nvoid ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx)\n{\n    memcpy(dstDCtx, srcDCtx,\n           sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max));  /* no need to copy workspace */\n}\n\n\n/*-*************************************************************\n*   Decompression section\n***************************************************************/\n\n/* Frame format description\n   Frame Header -  [ Block Header - Block ] - Frame End\n   1) Frame Header\n      - 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h)\n      - 1 byte  - Frame Descriptor\n   2) Block Header\n      - 3 bytes, starting with a 2-bits descriptor\n                 Uncompressed, Compressed, Frame End, unused\n   3) Block\n      See Block Format Description\n   4) Frame End\n      - 3 bytes, compatible with Block Header\n*/\n\n\n/* Frame Header :\n\n   1 byte - FrameHeaderDescription :\n   bit 0-1 : dictID (0, 1, 2 or 4 bytes)\n   bit 2   : checksumFlag\n   bit 3   : reserved (must be zero)\n   bit 4   : reserved (unused, can be any value)\n   bit 5   : Single Segment (if 1, WindowLog byte is not present)\n   bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8)\n             if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1;\n\n   Optional : WindowLog (0 or 1 byte)\n   bit 0-2 : octal Fractional (1/8th)\n   bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB)\n\n   Optional : dictID (0, 1, 2 or 4 bytes)\n   Automatic adaptation\n   0 : no dictID\n   1 : 1 - 255\n   2 : 256 - 65535\n   4 : all other values\n\n   Optional : content size (0, 1, 2, 4 or 8 bytes)\n   0 : unknown          (fcfs==0 and swl==0)\n   1 : 0-255 bytes      (fcfs==0 and swl==1)\n   2 : 256 - 65535+256  (fcfs==1)\n   4 : 0 - 4GB-1        (fcfs==2)\n   8 : 0 - 16EB-1       (fcfs==3)\n*/\n\n\n/* Compressed Block, format description\n\n   Block = Literal Section - Sequences Section\n   Prerequisite : size of (compressed) block, maximum size of regenerated data\n\n   1) Literal Section\n\n   1.1) Header : 1-5 bytes\n        flags: 2 bits\n            00 compressed by Huff0\n            01 unused\n            10 is Raw (uncompressed)\n            11 is Rle\n            Note : using 01 => Huff0 with precomputed table ?\n            Note : delta map ? => compressed ?\n\n   1.1.1) Huff0-compressed literal block : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RAW<<6) + (0<<4) + size\n               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes\n        size :  5 bits: (IS_RLE<<6) + (0<<4) + size\n               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)\n                        size&255\n               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)\n                        size>>8&255\n                        size&255\n\n   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes\n            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream\n            srcSize < 1 KB => 3 bytes (2-2-10-10)\n            srcSize < 16KB => 4 bytes (2-2-14-14)\n            else           => 5 bytes (2-2-18-18)\n            big endian convention\n\n        1- CTable available (stored into workspace ?)\n        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)\n\n\n   1.2) Literal block content\n\n   1.2.1) Huff0 block, using sizes from header\n        See Huff0 format\n\n   1.2.2) Huff0 block, using prepared table\n\n   1.2.3) Raw content\n\n   1.2.4) single byte\n\n\n   2) Sequences section\n      TO DO\n*/\n\n/** ZSTDv07_frameHeaderSize() :\n*   srcSize must be >= ZSTDv07_frameHeaderSize_min.\n*   @return : size of the Frame Header */\nstatic size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize)\n{\n    if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);\n    {   BYTE const fhd = ((const BYTE*)src)[4];\n        U32 const dictID= fhd & 3;\n        U32 const directMode = (fhd >> 5) & 1;\n        U32 const fcsId = fhd >> 6;\n        return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId]\n                + (directMode && !ZSTDv07_fcs_fieldSize[fcsId]);\n    }\n}\n\n\n/** ZSTDv07_getFrameParams() :\n*   decode Frame Header, or require larger `srcSize`.\n*   @return : 0, `fparamsPtr` is correctly filled,\n*            >0, `srcSize` is too small, result is expected `srcSize`,\n*             or an error code, which can be tested using ZSTDv07_isError() */\nsize_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n\n    if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;\n    memset(fparamsPtr, 0, sizeof(*fparamsPtr));\n    if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {\n        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {\n            if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */\n            fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);\n            fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */\n            return 0;\n        }\n        return ERROR(prefix_unknown);\n    }\n\n    /* ensure there is enough `srcSize` to fully read/decode frame header */\n    { size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize);\n      if (srcSize < fhsize) return fhsize; }\n\n    {   BYTE const fhdByte = ip[4];\n        size_t pos = 5;\n        U32 const dictIDSizeCode = fhdByte&3;\n        U32 const checksumFlag = (fhdByte>>2)&1;\n        U32 const directMode = (fhdByte>>5)&1;\n        U32 const fcsID = fhdByte>>6;\n        U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX;\n        U32 windowSize = 0;\n        U32 dictID = 0;\n        U64 frameContentSize = 0;\n        if ((fhdByte & 0x08) != 0)   /* reserved bits, which must be zero */\n            return ERROR(frameParameter_unsupported);\n        if (!directMode) {\n            BYTE const wlByte = ip[pos++];\n            U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;\n            if (windowLog > ZSTDv07_WINDOWLOG_MAX)\n                return ERROR(frameParameter_unsupported);\n            windowSize = (1U << windowLog);\n            windowSize += (windowSize >> 3) * (wlByte&7);\n        }\n\n        switch(dictIDSizeCode)\n        {\n            default:   /* impossible */\n            case 0 : break;\n            case 1 : dictID = ip[pos]; pos++; break;\n            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;\n            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;\n        }\n        switch(fcsID)\n        {\n            default:   /* impossible */\n            case 0 : if (directMode) frameContentSize = ip[pos]; break;\n            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;\n            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;\n            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;\n        }\n        if (!windowSize) windowSize = (U32)frameContentSize;\n        if (windowSize > windowSizeMax)\n            return ERROR(frameParameter_unsupported);\n        fparamsPtr->frameContentSize = frameContentSize;\n        fparamsPtr->windowSize = windowSize;\n        fparamsPtr->dictID = dictID;\n        fparamsPtr->checksumFlag = checksumFlag;\n    }\n    return 0;\n}\n\n\n/** ZSTDv07_getDecompressedSize() :\n*   compatible with legacy mode\n*   @return : decompressed size if known, 0 otherwise\n              note : 0 can mean any of the following :\n                   - decompressed size is not provided within frame header\n                   - frame header unknown / not supported\n                   - frame header not completely provided (`srcSize` too small) */\nunsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)\n{\n    ZSTDv07_frameParams fparams;\n    size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);\n    if (frResult!=0) return 0;\n    return fparams.frameContentSize;\n}\n\n\n/** ZSTDv07_decodeFrameHeader() :\n*   `srcSize` must be the size provided by ZSTDv07_frameHeaderSize().\n*   @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */\nstatic size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize)\n{\n    size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize);\n    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);\n    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);\n    return result;\n}\n\n\ntypedef struct\n{\n    blockType_t blockType;\n    U32 origSize;\n} blockProperties_t;\n\n/*! ZSTDv07_getcBlockSize() :\n*   Provides the size of compressed block from block header `src` */\nstatic size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)\n{\n    const BYTE* const in = (const BYTE* const)src;\n    U32 cSize;\n\n    if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);\n\n    bpPtr->blockType = (blockType_t)((*in) >> 6);\n    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);\n    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;\n\n    if (bpPtr->blockType == bt_end) return 0;\n    if (bpPtr->blockType == bt_rle) return 1;\n    return cSize;\n}\n\n\nstatic size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);\n    memcpy(dst, src, srcSize);\n    return srcSize;\n}\n\n\n/*! ZSTDv07_decodeLiteralsBlock() :\n    @return : nb of bytes read from src (< srcSize ) */\nstatic size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,\n                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */\n{\n    const BYTE* const istart = (const BYTE*) src;\n\n    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);\n\n    switch((litBlockType_t)(istart[0]>> 6))\n    {\n    case lbt_huffman:\n        {   size_t litSize, litCSize, singleStream=0;\n            U32 lhSize = (istart[0] >> 4) & 3;\n            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                /* 2 - 2 - 10 - 10 */\n                lhSize=3;\n                singleStream = istart[0] & 16;\n                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n                litCSize = ((istart[1] &  3) << 8) + istart[2];\n                break;\n            case 2:\n                /* 2 - 2 - 14 - 14 */\n                lhSize=4;\n                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);\n                litCSize = ((istart[2] & 63) <<  8) + istart[3];\n                break;\n            case 3:\n                /* 2 - 2 - 18 - 18 */\n                lhSize=5;\n                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);\n                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];\n                break;\n            }\n            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            if (HUFv07_isError(singleStream ?\n                            HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :\n                            HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) ))\n                return ERROR(corruption_detected);\n\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            dctx->litEntropy = 1;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case lbt_repeat:\n        {   size_t litSize, litCSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */\n                return ERROR(corruption_detected);\n            if (dctx->litEntropy==0)\n                return ERROR(dictionary_corrupted);\n\n            /* 2 - 2 - 10 - 10 */\n            lhSize=3;\n            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);\n            litCSize = ((istart[1] &  3) << 8) + istart[2];\n            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);\n\n            {   size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable);\n                if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);\n            }\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n            return litCSize + lhSize;\n        }\n    case lbt_raw:\n        {   size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize=1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                break;\n            }\n\n            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */\n                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);\n                memcpy(dctx->litBuffer, istart+lhSize, litSize);\n                dctx->litPtr = dctx->litBuffer;\n                dctx->litSize = litSize;\n                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);\n                return lhSize+litSize;\n            }\n            /* direct reference into compressed stream */\n            dctx->litPtr = istart+lhSize;\n            dctx->litSize = litSize;\n            return lhSize+litSize;\n        }\n    case lbt_rle:\n        {   size_t litSize;\n            U32 lhSize = ((istart[0]) >> 4) & 3;\n            switch(lhSize)\n            {\n            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */\n                lhSize = 1;\n                litSize = istart[0] & 31;\n                break;\n            case 2:\n                litSize = ((istart[0] & 15) << 8) + istart[1];\n                break;\n            case 3:\n                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];\n                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */\n                break;\n            }\n            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);\n            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);\n            dctx->litPtr = dctx->litBuffer;\n            dctx->litSize = litSize;\n            return lhSize+1;\n        }\n    default:\n        return ERROR(corruption_detected);   /* impossible */\n    }\n}\n\n\n/*! ZSTDv07_buildSeqTable() :\n    @return : nb bytes read from src,\n              or an error code if it fails, testable with ZSTDv07_isError()\n*/\nstatic size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,\n                                 const void* src, size_t srcSize,\n                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)\n{\n    switch(type)\n    {\n    case FSEv07_ENCODING_RLE :\n        if (!srcSize) return ERROR(srcSize_wrong);\n        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);\n        FSEv07_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */\n        return 1;\n    case FSEv07_ENCODING_RAW :\n        FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog);\n        return 0;\n    case FSEv07_ENCODING_STATIC:\n        if (!flagRepeatTable) return ERROR(corruption_detected);\n        return 0;\n    default :   /* impossible */\n    case FSEv07_ENCODING_DYNAMIC :\n        {   U32 tableLog;\n            S16 norm[MaxSeq+1];\n            size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize);\n            if (FSEv07_isError(headerSize)) return ERROR(corruption_detected);\n            if (tableLog > maxLog) return ERROR(corruption_detected);\n            FSEv07_buildDTable(DTable, norm, max, tableLog);\n            return headerSize;\n    }   }\n}\n\n\nstatic size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,\n                             FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,\n                             const void* src, size_t srcSize)\n{\n    const BYTE* const istart = (const BYTE* const)src;\n    const BYTE* const iend = istart + srcSize;\n    const BYTE* ip = istart;\n\n    /* check */\n    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);\n\n    /* SeqHead */\n    {   int nbSeq = *ip++;\n        if (!nbSeq) { *nbSeqPtr=0; return 1; }\n        if (nbSeq > 0x7F) {\n            if (nbSeq == 0xFF) {\n                if (ip+2 > iend) return ERROR(srcSize_wrong);\n                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;\n            } else {\n                if (ip >= iend) return ERROR(srcSize_wrong);\n                nbSeq = ((nbSeq-0x80)<<8) + *ip++;\n            }\n        }\n        *nbSeqPtr = nbSeq;\n    }\n\n    /* FSE table descriptors */\n    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are \"raw\", hence no header, but at least xxLog bits per type */\n    {   U32 const LLtype  = *ip >> 6;\n        U32 const OFtype = (*ip >> 4) & 3;\n        U32 const MLtype  = (*ip >> 2) & 3;\n        ip++;\n\n        /* Build DTables */\n        {   size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);\n            if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);\n            ip += llhSize;\n        }\n        {   size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);\n            if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected);\n            ip += ofhSize;\n        }\n        {   size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);\n            if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected);\n            ip += mlhSize;\n    }   }\n\n    return ip-istart;\n}\n\n\ntypedef struct {\n    size_t litLength;\n    size_t matchLength;\n    size_t offset;\n} seq_t;\n\ntypedef struct {\n    BITv07_DStream_t DStream;\n    FSEv07_DState_t stateLL;\n    FSEv07_DState_t stateOffb;\n    FSEv07_DState_t stateML;\n    size_t prevOffset[ZSTDv07_REP_INIT];\n} seqState_t;\n\n\nstatic seq_t ZSTDv07_decodeSequence(seqState_t* seqState)\n{\n    seq_t seq;\n\n    U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL));\n    U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML));\n    U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */\n\n    U32 const llBits = LL_bits[llCode];\n    U32 const mlBits = ML_bits[mlCode];\n    U32 const ofBits = ofCode;\n    U32 const totalBits = llBits+mlBits+ofBits;\n\n    static const U32 LL_base[MaxLL+1] = {\n                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,\n                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,\n                            0x2000, 0x4000, 0x8000, 0x10000 };\n\n    static const U32 ML_base[MaxML+1] = {\n                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,\n                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,\n                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,\n                            0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };\n\n    static const U32 OF_base[MaxOff+1] = {\n                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,\n                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,\n                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,\n                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };\n\n    /* sequence */\n    {   size_t offset;\n        if (!ofCode)\n            offset = 0;\n        else {\n            offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits);   /* <=  (ZSTDv07_WINDOWLOG_MAX-1) bits */\n            if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));\n        }\n\n        if (ofCode <= 1) {\n            if ((llCode == 0) & (offset <= 1)) offset = 1-offset;\n            if (offset) {\n                size_t const temp = seqState->prevOffset[offset];\n                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];\n                seqState->prevOffset[1] = seqState->prevOffset[0];\n                seqState->prevOffset[0] = offset = temp;\n            } else {\n                offset = seqState->prevOffset[0];\n            }\n        } else {\n            seqState->prevOffset[2] = seqState->prevOffset[1];\n            seqState->prevOffset[1] = seqState->prevOffset[0];\n            seqState->prevOffset[0] = offset;\n        }\n        seq.offset = offset;\n    }\n\n    seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */\n    if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream));\n\n    seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */\n    if (MEM_32bits() ||\n       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream));\n\n    /* ANS state update */\n    FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */\n    FSEv07_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */\n    if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));     /* <= 18 bits */\n    FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */\n\n    return seq;\n}\n\n\nstatic\nsize_t ZSTDv07_execSequence(BYTE* op,\n                                BYTE* const oend, seq_t sequence,\n                                const BYTE** litPtr, const BYTE* const litLimit,\n                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)\n{\n    BYTE* const oLitEnd = op + sequence.litLength;\n    size_t const sequenceLength = sequence.litLength + sequence.matchLength;\n    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */\n    BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;\n    const BYTE* const iLitEnd = *litPtr + sequence.litLength;\n    const BYTE* match = oLitEnd - sequence.offset;\n\n    /* check */\n    if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */\n    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */\n\n    /* copy Literals */\n    ZSTDv07_wildcopy(op, *litPtr, sequence.litLength);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */\n    op = oLitEnd;\n    *litPtr = iLitEnd;   /* update for next sequence */\n\n    /* copy Match */\n    if (sequence.offset > (size_t)(oLitEnd - base)) {\n        /* offset beyond prefix */\n        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);\n        match = dictEnd - (base-match);\n        if (match + sequence.matchLength <= dictEnd) {\n            memmove(oLitEnd, match, sequence.matchLength);\n            return sequenceLength;\n        }\n        /* span extDict & currentPrefixSegment */\n        {   size_t const length1 = dictEnd - match;\n            memmove(oLitEnd, match, length1);\n            op = oLitEnd + length1;\n            sequence.matchLength -= length1;\n            match = base;\n            if (op > oend_w || sequence.matchLength < MINMATCH) {\n              while (op < oMatchEnd) *op++ = *match++;\n              return sequenceLength;\n            }\n    }   }\n    /* Requirement: op <= oend_w */\n\n    /* match within prefix */\n    if (sequence.offset < 8) {\n        /* close range match, overlap */\n        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */\n        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */\n        int const sub2 = dec64table[sequence.offset];\n        op[0] = match[0];\n        op[1] = match[1];\n        op[2] = match[2];\n        op[3] = match[3];\n        match += dec32table[sequence.offset];\n        ZSTDv07_copy4(op+4, match);\n        match -= sub2;\n    } else {\n        ZSTDv07_copy8(op, match);\n    }\n    op += 8; match += 8;\n\n    if (oMatchEnd > oend-(16-MINMATCH)) {\n        if (op < oend_w) {\n            ZSTDv07_wildcopy(op, match, oend_w - op);\n            match += oend_w - op;\n            op = oend_w;\n        }\n        while (op < oMatchEnd) *op++ = *match++;\n    } else {\n        ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */\n    }\n    return sequenceLength;\n}\n\n\nstatic size_t ZSTDv07_decompressSequences(\n                               ZSTDv07_DCtx* dctx,\n                               void* dst, size_t maxDstSize,\n                         const void* seqStart, size_t seqSize)\n{\n    const BYTE* ip = (const BYTE*)seqStart;\n    const BYTE* const iend = ip + seqSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + maxDstSize;\n    BYTE* op = ostart;\n    const BYTE* litPtr = dctx->litPtr;\n    const BYTE* const litEnd = litPtr + dctx->litSize;\n    FSEv07_DTable* DTableLL = dctx->LLTable;\n    FSEv07_DTable* DTableML = dctx->MLTable;\n    FSEv07_DTable* DTableOffb = dctx->OffTable;\n    const BYTE* const base = (const BYTE*) (dctx->base);\n    const BYTE* const vBase = (const BYTE*) (dctx->vBase);\n    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);\n    int nbSeq;\n\n    /* Build Decoding Tables */\n    {   size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize);\n        if (ZSTDv07_isError(seqHSize)) return seqHSize;\n        ip += seqHSize;\n    }\n\n    /* Regen sequences */\n    if (nbSeq) {\n        seqState_t seqState;\n        dctx->fseEntropy = 1;\n        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) seqState.prevOffset[i] = dctx->rep[i]; }\n        { size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip);\n          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }\n        FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);\n        FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);\n        FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);\n\n        for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {\n            nbSeq--;\n            {   seq_t const sequence = ZSTDv07_decodeSequence(&seqState);\n                size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);\n                if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;\n                op += oneSeqSize;\n        }   }\n\n        /* check if reached exact end */\n        if (nbSeq) return ERROR(corruption_detected);\n        /* save reps for next block */\n        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }\n    }\n\n    /* last literal segment */\n    {   size_t const lastLLSize = litEnd - litPtr;\n        //if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */\n        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);\n        memcpy(op, litPtr, lastLLSize);\n        op += lastLLSize;\n    }\n\n    return op-ostart;\n}\n\n\nstatic void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst)\n{\n    if (dst != dctx->previousDstEnd) {   /* not contiguous */\n        dctx->dictEnd = dctx->previousDstEnd;\n        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n        dctx->base = dst;\n        dctx->previousDstEnd = dst;\n    }\n}\n\n\nstatic size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{   /* blockType == blockCompressed */\n    const BYTE* ip = (const BYTE*)src;\n\n    if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);\n\n    /* Decode literals sub-block */\n    {   size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize);\n        if (ZSTDv07_isError(litCSize)) return litCSize;\n        ip += litCSize;\n        srcSize -= litCSize;\n    }\n    return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);\n}\n\n\nsize_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx,\n                            void* dst, size_t dstCapacity,\n                      const void* src, size_t srcSize)\n{\n    size_t dSize;\n    ZSTDv07_checkContinuity(dctx, dst);\n    dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);\n    dctx->previousDstEnd = (char*)dst + dSize;\n    return dSize;\n}\n\n\n/** ZSTDv07_insertBlock() :\n    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */\nZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize)\n{\n    ZSTDv07_checkContinuity(dctx, blockStart);\n    dctx->previousDstEnd = (const char*)blockStart + blockSize;\n    return blockSize;\n}\n\n\nstatic size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)\n{\n    if (length > dstCapacity) return ERROR(dstSize_tooSmall);\n    memset(dst, byte, length);\n    return length;\n}\n\n\n/*! ZSTDv07_decompressFrame() :\n*   `dctx` must be properly initialized */\nstatic size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx,\n                                 void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize)\n{\n    const BYTE* ip = (const BYTE*)src;\n    const BYTE* const iend = ip + srcSize;\n    BYTE* const ostart = (BYTE* const)dst;\n    BYTE* const oend = ostart + dstCapacity;\n    BYTE* op = ostart;\n    size_t remainingSize = srcSize;\n\n    /* check */\n    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);\n\n    /* Frame Header */\n    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);\n        if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;\n        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);\n        if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1) {\n        size_t decodedSize;\n        blockProperties_t blockProperties;\n        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties);\n        if (ZSTDv07_isError(cBlockSize)) return cBlockSize;\n\n        ip += ZSTDv07_blockHeaderSize;\n        remainingSize -= ZSTDv07_blockHeaderSize;\n        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);\n\n        switch(blockProperties.blockType)\n        {\n        case bt_compressed:\n            decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);\n            break;\n        case bt_raw :\n            decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize);\n            break;\n        case bt_rle :\n            decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);\n            break;\n        case bt_end :\n            /* end of frame */\n            if (remainingSize) return ERROR(srcSize_wrong);\n            decodedSize = 0;\n            break;\n        default:\n            return ERROR(GENERIC);   /* impossible */\n        }\n        if (blockProperties.blockType == bt_end) break;   /* bt_end */\n\n        if (ZSTDv07_isError(decodedSize)) return decodedSize;\n        if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);\n        op += decodedSize;\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n    }\n\n    return op-ostart;\n}\n\n\n/*! ZSTDv07_decompress_usingPreparedDCtx() :\n*   Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.\n*   It avoids reloading the dictionary each time.\n*   `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().\n*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */\nstatic size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,\n                                         void* dst, size_t dstCapacity,\n                                   const void* src, size_t srcSize)\n{\n    ZSTDv07_copyDCtx(dctx, refDCtx);\n    ZSTDv07_checkContinuity(dctx, dst);\n    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\nsize_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,\n                                 void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize,\n                                 const void* dict, size_t dictSize)\n{\n    ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize);\n    ZSTDv07_checkContinuity(dctx, dst);\n    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);\n}\n\n\nsize_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);\n}\n\n\nsize_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n#if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1)\n    size_t regenSize;\n    ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx();\n    if (dctx==NULL) return ERROR(memory_allocation);\n    regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);\n    ZSTDv07_freeDCtx(dctx);\n    return regenSize;\n#else   /* stack mode */\n    ZSTDv07_DCtx dctx;\n    return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);\n#endif\n}\n\n/* ZSTD_errorFrameSizeInfoLegacy() :\n   assumes `cSize` and `dBound` are _not_ NULL */\nstatic void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)\n{\n    *cSize = ret;\n    *dBound = ZSTD_CONTENTSIZE_ERROR;\n}\n\nvoid ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)\n{\n    const BYTE* ip = (const BYTE*)src;\n    size_t remainingSize = srcSize;\n    size_t nbBlocks = 0;\n\n    /* check */\n    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {\n        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n        return;\n    }\n\n    /* Frame Header */\n    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);\n        if (ZSTDv07_isError(frameHeaderSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);\n            return;\n        }\n        if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));\n            return;\n        }\n        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n        ip += frameHeaderSize; remainingSize -= frameHeaderSize;\n    }\n\n    /* Loop on each block */\n    while (1) {\n        blockProperties_t blockProperties;\n        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);\n        if (ZSTDv07_isError(cBlockSize)) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);\n            return;\n        }\n\n        ip += ZSTDv07_blockHeaderSize;\n        remainingSize -= ZSTDv07_blockHeaderSize;\n\n        if (blockProperties.blockType == bt_end) break;\n\n        if (cBlockSize > remainingSize) {\n            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));\n            return;\n        }\n\n        ip += cBlockSize;\n        remainingSize -= cBlockSize;\n        nbBlocks++;\n    }\n\n    *cSize = ip - (const BYTE*)src;\n    *dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;\n}\n\n/*_******************************\n*  Streaming Decompression API\n********************************/\nsize_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx)\n{\n    return dctx->expected;\n}\n\nint ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx)\n{\n    return dctx->stage == ZSTDds_skipFrame;\n}\n\n/** ZSTDv07_decompressContinue() :\n*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)\n*             or an error code, which can be tested using ZSTDv07_isError() */\nsize_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    /* Sanity check */\n    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);\n    if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst);\n\n    switch (dctx->stage)\n    {\n    case ZSTDds_getFrameHeaderSize :\n        if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */\n        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {\n            memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);\n            dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */\n            dctx->stage = ZSTDds_decodeSkippableHeader;\n            return 0;\n        }\n        dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);\n        if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize;\n        memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);\n        if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) {\n            dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min;\n            dctx->stage = ZSTDds_decodeFrameHeader;\n            return 0;\n        }\n        dctx->expected = 0;   /* not necessary to copy more */\n\t/* fall-through */\n    case ZSTDds_decodeFrameHeader:\n        {   size_t result;\n            memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);\n            result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);\n            if (ZSTDv07_isError(result)) return result;\n            dctx->expected = ZSTDv07_blockHeaderSize;\n            dctx->stage = ZSTDds_decodeBlockHeader;\n            return 0;\n        }\n    case ZSTDds_decodeBlockHeader:\n        {   blockProperties_t bp;\n            size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp);\n            if (ZSTDv07_isError(cBlockSize)) return cBlockSize;\n            if (bp.blockType == bt_end) {\n                if (dctx->fParams.checksumFlag) {\n                    U64 const h64 = XXH64_digest(&dctx->xxhState);\n                    U32 const h32 = (U32)(h64>>11) & ((1<<22)-1);\n                    const BYTE* const ip = (const BYTE*)src;\n                    U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16);\n                    if (check32 != h32) return ERROR(checksum_wrong);\n                }\n                dctx->expected = 0;\n                dctx->stage = ZSTDds_getFrameHeaderSize;\n            } else {\n                dctx->expected = cBlockSize;\n                dctx->bType = bp.blockType;\n                dctx->stage = ZSTDds_decompressBlock;\n            }\n            return 0;\n        }\n    case ZSTDds_decompressBlock:\n        {   size_t rSize;\n            switch(dctx->bType)\n            {\n            case bt_compressed:\n                rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);\n                break;\n            case bt_raw :\n                rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize);\n                break;\n            case bt_rle :\n                return ERROR(GENERIC);   /* not yet handled */\n                break;\n            case bt_end :   /* should never happen (filtered at phase 1) */\n                rSize = 0;\n                break;\n            default:\n                return ERROR(GENERIC);   /* impossible */\n            }\n            dctx->stage = ZSTDds_decodeBlockHeader;\n            dctx->expected = ZSTDv07_blockHeaderSize;\n            dctx->previousDstEnd = (char*)dst + rSize;\n            if (ZSTDv07_isError(rSize)) return rSize;\n            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);\n            return rSize;\n        }\n    case ZSTDds_decodeSkippableHeader:\n        {   memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);\n            dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);\n            dctx->stage = ZSTDds_skipFrame;\n            return 0;\n        }\n    case ZSTDds_skipFrame:\n        {   dctx->expected = 0;\n            dctx->stage = ZSTDds_getFrameHeaderSize;\n            return 0;\n        }\n    default:\n        return ERROR(GENERIC);   /* impossible */\n    }\n}\n\n\nstatic size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    dctx->dictEnd = dctx->previousDstEnd;\n    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));\n    dctx->base = dict;\n    dctx->previousDstEnd = (const char*)dict + dictSize;\n    return 0;\n}\n\nstatic size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize)\n{\n    const BYTE* dictPtr = (const BYTE*)dict;\n    const BYTE* const dictEnd = dictPtr + dictSize;\n\n    {   size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize);\n        if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted);\n        dictPtr += hSize;\n    }\n\n    {   short offcodeNCount[MaxOff+1];\n        U32 offcodeMaxValue=MaxOff, offcodeLog;\n        size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);\n        if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);\n        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);\n          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }\n        dictPtr += offcodeHeaderSize;\n    }\n\n    {   short matchlengthNCount[MaxML+1];\n        unsigned matchlengthMaxValue = MaxML, matchlengthLog;\n        size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);\n        if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);\n        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);\n          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }\n        dictPtr += matchlengthHeaderSize;\n    }\n\n    {   short litlengthNCount[MaxLL+1];\n        unsigned litlengthMaxValue = MaxLL, litlengthLog;\n        size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);\n        if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);\n        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);\n        { size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);\n          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }\n        dictPtr += litlengthHeaderSize;\n    }\n\n    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);\n    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);\n    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);\n    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);\n    dictPtr += 12;\n\n    dctx->litEntropy = dctx->fseEntropy = 1;\n    return dictPtr - (const BYTE*)dict;\n}\n\nstatic size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize);\n    {   U32 const magic = MEM_readLE32(dict);\n        if (magic != ZSTDv07_DICT_MAGIC) {\n            return ZSTDv07_refDictContent(dctx, dict, dictSize);   /* pure content mode */\n    }   }\n    dctx->dictID = MEM_readLE32((const char*)dict + 4);\n\n    /* load entropy tables */\n    dict = (const char*)dict + 8;\n    dictSize -= 8;\n    {   size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize);\n        if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted);\n        dict = (const char*)dict + eSize;\n        dictSize -= eSize;\n    }\n\n    /* reference dictionary content */\n    return ZSTDv07_refDictContent(dctx, dict, dictSize);\n}\n\n\nsize_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)\n{\n    { size_t const errorCode = ZSTDv07_decompressBegin(dctx);\n      if (ZSTDv07_isError(errorCode)) return errorCode; }\n\n    if (dict && dictSize) {\n        size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize);\n        if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted);\n    }\n\n    return 0;\n}\n\n\nstruct ZSTDv07_DDict_s {\n    void* dict;\n    size_t dictSize;\n    ZSTDv07_DCtx* refContext;\n};  /* typedef'd tp ZSTDv07_CDict within zstd.h */\n\nstatic ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)\n{\n    if (!customMem.customAlloc && !customMem.customFree)\n        customMem = defaultCustomMem;\n\n    if (!customMem.customAlloc || !customMem.customFree)\n        return NULL;\n\n    {   ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict));\n        void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize);\n        ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem);\n\n        if (!dictContent || !ddict || !dctx) {\n            customMem.customFree(customMem.opaque, dictContent);\n            customMem.customFree(customMem.opaque, ddict);\n            customMem.customFree(customMem.opaque, dctx);\n            return NULL;\n        }\n\n        memcpy(dictContent, dict, dictSize);\n        {   size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize);\n            if (ZSTDv07_isError(errorCode)) {\n                customMem.customFree(customMem.opaque, dictContent);\n                customMem.customFree(customMem.opaque, ddict);\n                customMem.customFree(customMem.opaque, dctx);\n                return NULL;\n        }   }\n\n        ddict->dict = dictContent;\n        ddict->dictSize = dictSize;\n        ddict->refContext = dctx;\n        return ddict;\n    }\n}\n\n/*! ZSTDv07_createDDict() :\n*   Create a digested dictionary, ready to start decompression without startup delay.\n*   `dict` can be released after `ZSTDv07_DDict` creation */\nZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize)\n{\n    ZSTDv07_customMem const allocator = { NULL, NULL, NULL };\n    return ZSTDv07_createDDict_advanced(dict, dictSize, allocator);\n}\n\nsize_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict)\n{\n    ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree;\n    void* const opaque = ddict->refContext->customMem.opaque;\n    ZSTDv07_freeDCtx(ddict->refContext);\n    cFree(opaque, ddict->dict);\n    cFree(opaque, ddict);\n    return 0;\n}\n\n/*! ZSTDv07_decompress_usingDDict() :\n*   Decompression using a pre-digested Dictionary\n*   Use dictionary without significant overhead. */\nZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,\n                                           void* dst, size_t dstCapacity,\n                                     const void* src, size_t srcSize,\n                                     const ZSTDv07_DDict* ddict)\n{\n    return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext,\n                                           dst, dstCapacity,\n                                           src, srcSize);\n}\n/*\n    Buffered version of Zstd compression library\n    Copyright (C) 2015-2016, Yann Collet.\n\n    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n    Redistribution and use in source and binary forms, with or without\n    modification, are permitted provided that the following conditions are\n    met:\n    * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\n    copyright notice, this list of conditions and the following disclaimer\n    in the documentation and/or other materials provided with the\n    distribution.\n    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n    You can contact the author at :\n    - zstd homepage : http://www.zstd.net/\n*/\n\n\n\n/*-***************************************************************************\n*  Streaming decompression howto\n*\n*  A ZBUFFv07_DCtx object is required to track streaming operations.\n*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.\n*  Use ZBUFFv07_decompressInit() to start a new decompression operation,\n*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.\n*\n*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),\n*            or 0 when a frame is completely decoded,\n*            or an error code, which can be tested using ZBUFFv07_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()\n*  output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;\n*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\ntypedef enum { ZBUFFds_init, ZBUFFds_loadHeader,\n               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage;\n\n/* *** Resource management *** */\nstruct ZBUFFv07_DCtx_s {\n    ZSTDv07_DCtx* zd;\n    ZSTDv07_frameParams fParams;\n    ZBUFFv07_dStage stage;\n    char*  inBuff;\n    size_t inBuffSize;\n    size_t inPos;\n    char*  outBuff;\n    size_t outBuffSize;\n    size_t outStart;\n    size_t outEnd;\n    size_t blockSize;\n    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];\n    size_t lhSize;\n    ZSTDv07_customMem customMem;\n};   /* typedef'd to ZBUFFv07_DCtx within \"zstd_buffered.h\" */\n\nZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem);\n\nZBUFFv07_DCtx* ZBUFFv07_createDCtx(void)\n{\n    return ZBUFFv07_createDCtx_advanced(defaultCustomMem);\n}\n\nZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem)\n{\n    ZBUFFv07_DCtx* zbd;\n\n    if (!customMem.customAlloc && !customMem.customFree)\n        customMem = defaultCustomMem;\n\n    if (!customMem.customAlloc || !customMem.customFree)\n        return NULL;\n\n    zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx));\n    if (zbd==NULL) return NULL;\n    memset(zbd, 0, sizeof(ZBUFFv07_DCtx));\n    memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem));\n    zbd->zd = ZSTDv07_createDCtx_advanced(customMem);\n    if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; }\n    zbd->stage = ZBUFFds_init;\n    return zbd;\n}\n\nsize_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd)\n{\n    if (zbd==NULL) return 0;   /* support free on null */\n    ZSTDv07_freeDCtx(zbd->zd);\n    if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);\n    if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);\n    zbd->customMem.customFree(zbd->customMem.opaque, zbd);\n    return 0;\n}\n\n\n/* *** Initialization *** */\n\nsize_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize)\n{\n    zbd->stage = ZBUFFds_loadHeader;\n    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;\n    return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize);\n}\n\nsize_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd)\n{\n    return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0);\n}\n\n\n/* internal util function */\nMEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)\n{\n    size_t const length = MIN(dstCapacity, srcSize);\n    if (length > 0) {\n        memcpy(dst, src, length);\n    }\n    return length;\n}\n\n\n/* *** Decompression *** */\n\nsize_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,\n                                void* dst, size_t* dstCapacityPtr,\n                          const void* src, size_t* srcSizePtr)\n{\n    const char* const istart = (const char*)src;\n    const char* const iend = istart + *srcSizePtr;\n    const char* ip = istart;\n    char* const ostart = (char*)dst;\n    char* const oend = ostart + *dstCapacityPtr;\n    char* op = ostart;\n    U32 notDone = 1;\n\n    while (notDone) {\n        switch(zbd->stage)\n        {\n        case ZBUFFds_init :\n            return ERROR(init_missing);\n\n        case ZBUFFds_loadHeader :\n            {   size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);\n                if (ZSTDv07_isError(hSize)) return hSize;\n                if (hSize != 0) {\n                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */\n                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */\n                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);\n                        zbd->lhSize += iend-ip;\n                        *dstCapacityPtr = 0;\n                        return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize;   /* remaining header bytes + next block header */\n                    }\n                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;\n                    break;\n            }   }\n\n            /* Consume header */\n            {   size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv07_frameHeaderSize_min */\n                size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);\n                if (ZSTDv07_isError(h1Result)) return h1Result;\n                if (h1Size < zbd->lhSize) {   /* long header */\n                    size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);\n                    size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);\n                    if (ZSTDv07_isError(h2Result)) return h2Result;\n            }   }\n\n            zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN);\n\n            /* Frame header instruct buffer sizes */\n            {   size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX);\n                zbd->blockSize = blockSize;\n                if (zbd->inBuffSize < blockSize) {\n                    zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);\n                    zbd->inBuffSize = blockSize;\n                    zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);\n                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);\n                }\n                {   size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;\n                    if (zbd->outBuffSize < neededOutSize) {\n                        zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);\n                        zbd->outBuffSize = neededOutSize;\n                        zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize);\n                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);\n            }   }   }\n            zbd->stage = ZBUFFds_read;\n            /* pass-through */\n\t    /* fall-through */\n        case ZBUFFds_read:\n            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);\n                if (neededInSize==0) {  /* end of frame */\n                    zbd->stage = ZBUFFds_init;\n                    notDone = 0;\n                    break;\n                }\n                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */\n                    const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);\n                    size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,\n                        zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart),\n                        ip, neededInSize);\n                    if (ZSTDv07_isError(decodedSize)) return decodedSize;\n                    ip += neededInSize;\n                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */\n                    zbd->outEnd = zbd->outStart +  decodedSize;\n                    zbd->stage = ZBUFFds_flush;\n                    break;\n                }\n                if (ip==iend) { notDone = 0; break; }   /* no more input */\n                zbd->stage = ZBUFFds_load;\n            }\n\t    /* fall-through */\n        case ZBUFFds_load:\n            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);\n                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */\n                size_t loadedSize;\n                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */\n                loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);\n                ip += loadedSize;\n                zbd->inPos += loadedSize;\n                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */\n\n                /* decode loaded input */\n                {  const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);\n                   size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,\n                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,\n                        zbd->inBuff, neededInSize);\n                    if (ZSTDv07_isError(decodedSize)) return decodedSize;\n                    zbd->inPos = 0;   /* input is consumed */\n                    if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */\n                    zbd->outEnd = zbd->outStart +  decodedSize;\n                    zbd->stage = ZBUFFds_flush;\n                    /* break; */\n                    /* pass-through */\n                }\n\t    }\n\t    /* fall-through */\n        case ZBUFFds_flush:\n            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;\n                size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);\n                op += flushedSize;\n                zbd->outStart += flushedSize;\n                if (flushedSize == toFlushSize) {\n                    zbd->stage = ZBUFFds_read;\n                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)\n                        zbd->outStart = zbd->outEnd = 0;\n                    break;\n                }\n                /* cannot flush everything */\n                notDone = 0;\n                break;\n            }\n        default: return ERROR(GENERIC);   /* impossible */\n    }   }\n\n    /* result */\n    *srcSizePtr = ip-istart;\n    *dstCapacityPtr = op-ostart;\n    {   size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);\n        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/\n        return nextSrcSizeHint;\n    }\n}\n\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nsize_t ZBUFFv07_recommendedDInSize(void)  { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; }\nsize_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; }\n"
  },
  {
    "path": "src/third_party/zstd/lib/legacy/zstd_v07.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n\n#ifndef ZSTDv07_H_235446\n#define ZSTDv07_H_235446\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n/*======  Dependency  ======*/\n#include <stddef.h>   /* size_t */\n\n\n/*======  Export for Windows  ======*/\n/*!\n*  ZSTDv07_DLL_EXPORT :\n*  Enable exporting of functions when building a Windows DLL\n*/\n#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)\n#  define ZSTDLIBv07_API __declspec(dllexport)\n#else\n#  define ZSTDLIBv07_API\n#endif\n\n\n/* *************************************\n*  Simple API\n***************************************/\n/*! ZSTDv07_getDecompressedSize() :\n*   @return : decompressed size if known, 0 otherwise.\n       note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.\n       note 2 : decompressed size could be wrong or intentionally modified !\n                always ensure results fit within application's authorized limits */\nunsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);\n\n/*! ZSTDv07_decompress() :\n    `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.\n    `dstCapacity` must be equal or larger than originalSize.\n    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n              or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */\nZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,\n                                    const void* src, size_t compressedSize);\n\n/**\nZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format\n    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'\n    cSize (output parameter)  : the number of bytes that would be read to decompress this frame\n                                or an error code if it fails (which can be tested using ZSTDv01_isError())\n    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame\n                                or ZSTD_CONTENTSIZE_ERROR if an error occurs\n\n    note : assumes `cSize` and `dBound` are _not_ NULL.\n*/\nvoid ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,\n                                     size_t* cSize, unsigned long long* dBound);\n\n/*======  Helper functions  ======*/\nZSTDLIBv07_API unsigned    ZSTDv07_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */\nZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code);     /*!< provides readable string from an error code */\n\n\n/*-*************************************\n*  Explicit memory management\n***************************************/\n/** Decompression context */\ntypedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;\nZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);\nZSTDLIBv07_API size_t     ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx);      /*!< @return : errorCode */\n\n/** ZSTDv07_decompressDCtx() :\n*   Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */\nZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n/*-************************\n*  Simple dictionary API\n***************************/\n/*! ZSTDv07_decompress_usingDict() :\n*   Decompression using a pre-defined Dictionary content (see dictBuilder).\n*   Dictionary must be identical to the one used during compression.\n*   Note : This function load the dictionary, resulting in a significant startup time */\nZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,\n                                                   void* dst, size_t dstCapacity,\n                                             const void* src, size_t srcSize,\n                                             const void* dict,size_t dictSize);\n\n\n/*-**************************\n*  Advanced Dictionary API\n****************************/\n/*! ZSTDv07_createDDict() :\n*   Create a digested dictionary, ready to start decompression operation without startup delay.\n*   `dict` can be released after creation */\ntypedef struct ZSTDv07_DDict_s ZSTDv07_DDict;\nZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);\nZSTDLIBv07_API size_t      ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);\n\n/*! ZSTDv07_decompress_usingDDict() :\n*   Decompression using a pre-digested Dictionary\n*   Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */\nZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,\n                                                    void* dst, size_t dstCapacity,\n                                              const void* src, size_t srcSize,\n                                              const ZSTDv07_DDict* ddict);\n\ntypedef struct {\n    unsigned long long frameContentSize;\n    unsigned windowSize;\n    unsigned dictID;\n    unsigned checksumFlag;\n} ZSTDv07_frameParams;\n\nZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */\n\n\n\n\n/* *************************************\n*  Streaming functions\n***************************************/\ntypedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;\nZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);\nZSTDLIBv07_API size_t      ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);\n\nZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);\nZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);\n\nZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,\n                                            void* dst, size_t* dstCapacityPtr,\n                                      const void* src, size_t* srcSizePtr);\n\n/*-***************************************************************************\n*  Streaming decompression howto\n*\n*  A ZBUFFv07_DCtx object is required to track streaming operations.\n*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.\n*  Use ZBUFFv07_decompressInit() to start a new decompression operation,\n*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.\n*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.\n*\n*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.\n*  *srcSizePtr and *dstCapacityPtr can be any size.\n*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.\n*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.\n*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.\n*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),\n*            or 0 when a frame is completely decoded,\n*            or an error code, which can be tested using ZBUFFv07_isError().\n*\n*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()\n*  output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.\n*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;\n*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .\n* *******************************************************************************/\n\n\n/* *************************************\n*  Tool functions\n***************************************/\nZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);\nZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);\n\n/** Functions below provide recommended buffer sizes for Compression or Decompression operations.\n*   These sizes are just hints, they tend to offer better latency */\nZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);\nZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);\n\n\n/*-*************************************\n*  Constants\n***************************************/\n#define ZSTDv07_MAGICNUMBER            0xFD2FB527   /* v0.7 */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif  /* ZSTDv07_H_235446 */\n"
  },
  {
    "path": "src/third_party/zstd/lib/libzstd.pc.in",
    "content": "#   ZSTD - standard compression algorithm\n#   Copyright (C) 2014-2016, Yann Collet, Facebook\n#   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\nprefix=@PREFIX@\nexec_prefix=${prefix}\nincludedir=${prefix}/include\nlibdir=${exec_prefix}/lib\n\nName: zstd\nDescription: fast lossless compression algorithm library\nURL: http://www.zstd.net/\nVersion: @VERSION@\nLibs: -L${libdir} -lzstd\nCflags: -I${includedir}\n"
  },
  {
    "path": "src/third_party/zstd/lib/zstd.h",
    "content": "/*\n * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under both the BSD-style license (found in the\n * LICENSE file in the root directory of this source tree) and the GPLv2 (found\n * in the COPYING file in the root directory of this source tree).\n * You may select, at your option, one of the above-listed licenses.\n */\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n#ifndef ZSTD_H_235446\n#define ZSTD_H_235446\n\n/* ======   Dependency   ======*/\n#include <limits.h>   /* INT_MAX */\n#include <stddef.h>   /* size_t */\n\n\n/* =====   ZSTDLIB_API : control library symbols visibility   ===== */\n#ifndef ZSTDLIB_VISIBILITY\n#  if defined(__GNUC__) && (__GNUC__ >= 4)\n#    define ZSTDLIB_VISIBILITY __attribute__ ((visibility (\"default\")))\n#  else\n#    define ZSTDLIB_VISIBILITY\n#  endif\n#endif\n#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)\n#  define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY\n#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)\n#  define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/\n#else\n#  define ZSTDLIB_API ZSTDLIB_VISIBILITY\n#endif\n\n\n/*******************************************************************************\n  Introduction\n\n  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting\n  real-time compression scenarios at zlib-level and better compression ratios.\n  The zstd compression library provides in-memory compression and decompression\n  functions.\n\n  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),\n  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with\n  caution, as they require more memory. The library also offers negative\n  compression levels, which extend the range of speed vs. ratio preferences.\n  The lower the level, the faster the speed (at the cost of compression).\n\n  Compression can be done in:\n    - a single step (described as Simple API)\n    - a single step, reusing a context (described as Explicit context)\n    - unbounded multiple steps (described as Streaming compression)\n\n  The compression ratio achievable on small data can be highly improved using\n  a dictionary. Dictionary compression can be performed in:\n    - a single step (described as Simple dictionary API)\n    - a single step, reusing a dictionary (described as Bulk-processing\n      dictionary API)\n\n  Advanced experimental functions can be accessed using\n  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.\n\n  Advanced experimental APIs should never be used with a dynamically-linked\n  library. They are not \"stable\"; their definitions or signatures may change in\n  the future. Only static linking is allowed.\n*******************************************************************************/\n\n/*------   Version   ------*/\n#define ZSTD_VERSION_MAJOR    1\n#define ZSTD_VERSION_MINOR    4\n#define ZSTD_VERSION_RELEASE  5\n\n#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)\nZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */\n\n#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE\n#define ZSTD_QUOTE(str) #str\n#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)\n#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)\nZSTDLIB_API const char* ZSTD_versionString(void);   /* requires v1.3.0+ */\n\n/* *************************************\n *  Default constant\n ***************************************/\n#ifndef ZSTD_CLEVEL_DEFAULT\n#  define ZSTD_CLEVEL_DEFAULT 3\n#endif\n\n/* *************************************\n *  Constants\n ***************************************/\n\n/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */\n#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */\n#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */\n#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */\n#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0\n\n#define ZSTD_BLOCKSIZELOG_MAX  17\n#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)\n\n\n\n/***************************************\n*  Simple API\n***************************************/\n/*! ZSTD_compress() :\n *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\n *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.\n *  @return : compressed size written into `dst` (<= `dstCapacity),\n *            or an error code if it fails (which can be tested using ZSTD_isError()). */\nZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,\n                            const void* src, size_t srcSize,\n                                  int compressionLevel);\n\n/*! ZSTD_decompress() :\n *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\n *  `dstCapacity` is an upper bound of originalSize to regenerate.\n *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.\n *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */\nZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,\n                              const void* src, size_t compressedSize);\n\n/*! ZSTD_getFrameContentSize() : requires v1.3.0+\n *  `src` should point to the start of a ZSTD encoded frame.\n *  `srcSize` must be at least as large as the frame header.\n *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.\n *  @return : - decompressed size of `src` frame content, if known\n *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined\n *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)\n *   note 1 : a 0 return value means the frame is valid but \"empty\".\n *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.\n *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\n *            In which case, it's necessary to use streaming mode to decompress data.\n *            Optionally, application can rely on some implicit limit,\n *            as ZSTD_decompress() only needs an upper bound of decompressed size.\n *            (For example, data could be necessarily cut into blocks <= 16 KB).\n *   note 3 : decompressed size is always present when compression is completed using single-pass functions,\n *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().\n *   note 4 : decompressed size can be very large (64-bits value),\n *            potentially larger than what local system can handle as a single memory segment.\n *            In which case, it's necessary to use streaming mode to decompress data.\n *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.\n *            Always ensure return value fits within application's authorized limits.\n *            Each application can set its own limits.\n *   note 6 : This function replaces ZSTD_getDecompressedSize() */\n#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)\n#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)\nZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);\n\n/*! ZSTD_getDecompressedSize() :\n *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().\n *  Both functions work the same way, but ZSTD_getDecompressedSize() blends\n *  \"empty\", \"unknown\" and \"error\" results to the same return value (0),\n *  while ZSTD_getFrameContentSize() gives them separate return values.\n * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */\nZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);\n\n/*! ZSTD_findFrameCompressedSize() :\n * `src` should point to the start of a ZSTD frame or skippable frame.\n * `srcSize` must be >= first frame size\n * @return : the compressed size of the first frame starting at `src`,\n *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\n *        or an error code if input is invalid */\nZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);\n\n\n/*======  Helper functions  ======*/\n#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */\nZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */\nZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */\nZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */\nZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */\nZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */\n\n\n/***************************************\n*  Explicit context\n***************************************/\n/*= Compression context\n *  When compressing many times,\n *  it is recommended to allocate a context just once,\n *  and re-use it for each successive compression operation.\n *  This will make workload friendlier for system's memory.\n *  Note : re-using context is just a speed / resource optimization.\n *         It doesn't change the compression ratio, which remains identical.\n *  Note 2 : In multi-threaded environments,\n *         use one different context per thread for parallel execution.\n */\ntypedef struct ZSTD_CCtx_s ZSTD_CCtx;\nZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);\nZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);\n\n/*! ZSTD_compressCCtx() :\n *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n *  Important : in order to behave similarly to `ZSTD_compress()`,\n *  this function compresses at requested compression level,\n *  __ignoring any other parameter__ .\n *  If any advanced parameter was set using the advanced API,\n *  they will all be reset. Only `compressionLevel` remains.\n */\nZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,\n                                     void* dst, size_t dstCapacity,\n                               const void* src, size_t srcSize,\n                                     int compressionLevel);\n\n/*= Decompression context\n *  When decompressing many times,\n *  it is recommended to allocate a context only once,\n *  and re-use it for each successive compression operation.\n *  This will make workload friendlier for system's memory.\n *  Use one context per thread for parallel execution. */\ntypedef struct ZSTD_DCtx_s ZSTD_DCtx;\nZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);\nZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);\n\n/*! ZSTD_decompressDCtx() :\n *  Same as ZSTD_decompress(),\n *  requires an allocated ZSTD_DCtx.\n *  Compatible with sticky parameters.\n */\nZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,\n                                       void* dst, size_t dstCapacity,\n                                 const void* src, size_t srcSize);\n\n\n/***************************************\n*  Advanced compression API\n***************************************/\n\n/* API design :\n *   Parameters are pushed one by one into an existing context,\n *   using ZSTD_CCtx_set*() functions.\n *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.\n *   \"sticky\" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !\n *   __They do not apply to \"simple\" one-shot variants such as ZSTD_compressCCtx()__ .\n *\n *   It's possible to reset all parameters to \"default\" using ZSTD_CCtx_reset().\n *\n *   This API supercedes all other \"advanced\" API entry points in the experimental section.\n *   In the future, we expect to remove from experimental API entry points which are redundant with this API.\n */\n\n\n/* Compression strategies, listed from fastest to strongest */\ntypedef enum { ZSTD_fast=1,\n               ZSTD_dfast=2,\n               ZSTD_greedy=3,\n               ZSTD_lazy=4,\n               ZSTD_lazy2=5,\n               ZSTD_btlazy2=6,\n               ZSTD_btopt=7,\n               ZSTD_btultra=8,\n               ZSTD_btultra2=9\n               /* note : new strategies _might_ be added in the future.\n                         Only the order (from fast to strong) is guaranteed */\n} ZSTD_strategy;\n\n\ntypedef enum {\n\n    /* compression parameters\n     * Note: When compressing with a ZSTD_CDict these parameters are superseded\n     * by the parameters used to construct the ZSTD_CDict.\n     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */\n    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.\n                              * Note that exact compression parameters are dynamically determined,\n                              * depending on both compression level and srcSize (when known).\n                              * Default level is ZSTD_CLEVEL_DEFAULT==3.\n                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.\n                              * Note 1 : it's possible to pass a negative compression level.\n                              * Note 2 : setting a level resets all other compression parameters to default */\n    /* Advanced compression parameters :\n     * It's possible to pin down compression parameters to some specific values.\n     * In which case, these values are no longer dynamically selected by the compressor */\n    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.\n                              * This will set a memory budget for streaming decompression,\n                              * with larger values requiring more memory\n                              * and typically compressing more.\n                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.\n                              * Special: value 0 means \"use default windowLog\".\n                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT\n                              *       requires explicitly allowing such size at streaming decompression stage. */\n    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.\n                              * Resulting memory usage is (1 << (hashLog+2)).\n                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.\n                              * Larger tables improve compression ratio of strategies <= dFast,\n                              * and improve speed of strategies > dFast.\n                              * Special: value 0 means \"use default hashLog\". */\n    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.\n                              * Resulting memory usage is (1 << (chainLog+2)).\n                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.\n                              * Larger tables result in better and slower compression.\n                              * This parameter is useless for \"fast\" strategy.\n                              * It's still useful when using \"dfast\" strategy,\n                              * in which case it defines a secondary probe table.\n                              * Special: value 0 means \"use default chainLog\". */\n    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.\n                              * More attempts result in better and slower compression.\n                              * This parameter is useless for \"fast\" and \"dFast\" strategies.\n                              * Special: value 0 means \"use default searchLog\". */\n    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.\n                              * Note that Zstandard can still find matches of smaller size,\n                              * it just tweaks its search algorithm to look for this size and larger.\n                              * Larger values increase compression and decompression speed, but decrease ratio.\n                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.\n                              * Note that currently, for all strategies < btopt, effective minimum is 4.\n                              *                    , for all strategies > fast, effective maximum is 6.\n                              * Special: value 0 means \"use default minMatchLength\". */\n    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.\n                              * For strategies btopt, btultra & btultra2:\n                              *     Length of Match considered \"good enough\" to stop search.\n                              *     Larger values make compression stronger, and slower.\n                              * For strategy fast:\n                              *     Distance between match sampling.\n                              *     Larger values make compression faster, and weaker.\n                              * Special: value 0 means \"use default targetLength\". */\n    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.\n                              * The higher the value of selected strategy, the more complex it is,\n                              * resulting in stronger and slower compression.\n                              * Special: value 0 means \"use default strategy\". */\n\n    /* LDM mode parameters */\n    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.\n                                     * This parameter is designed to improve compression ratio\n                                     * for large inputs, by finding large matches at long distance.\n                                     * It increases memory usage and window size.\n                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB\n                                     * except when expressly set to a different value. */\n    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.\n                              * Larger values increase memory usage and compression ratio,\n                              * but decrease compression speed.\n                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX\n                              * default: windowlog - 7.\n                              * Special: value 0 means \"automatically determine hashlog\". */\n    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.\n                              * Larger/too small values usually decrease compression ratio.\n                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.\n                              * Special: value 0 means \"use default value\" (default: 64). */\n    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.\n                              * Larger values improve collision resolution but decrease compression speed.\n                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.\n                              * Special: value 0 means \"use default value\" (default: 3). */\n    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.\n                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).\n                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.\n                              * Larger values improve compression speed.\n                              * Deviating far from default value will likely result in a compression ratio decrease.\n                              * Special: value 0 means \"automatically determine hashRateLog\". */\n\n    /* frame parameters */\n    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)\n                              * Content size must be known at the beginning of compression.\n                              * This is automatically the case when using ZSTD_compress2(),\n                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */\n    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */\n    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */\n\n    /* multi-threading parameters */\n    /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).\n     * They return an error otherwise. */\n    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.\n                              * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :\n                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,\n                              * while compression work is performed in parallel, within worker threads.\n                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :\n                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).\n                              * More workers improve speed, but also increase memory usage.\n                              * Default value is `0`, aka \"single-threaded mode\" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */\n    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.\n                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.\n                              * 0 means default, which is dynamically determined based on compression parameters.\n                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.\n                              * The minimum size is automatically and transparently enforced. */\n    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.\n                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.\n                              * It helps preserve compression ratio, while each job is compressed in parallel.\n                              * This value is enforced only when nbWorkers >= 1.\n                              * Larger values increase compression ratio, but decrease speed.\n                              * Possible values range from 0 to 9 :\n                              * - 0 means \"default\" : value will be determined by the library, depending on strategy\n                              * - 1 means \"no overlap\"\n                              * - 9 means \"full overlap\", using a full window size.\n                              * Each intermediate rank increases/decreases load size by a factor 2 :\n                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default\n                              * default value varies between 6 and 9, depending on strategy */\n\n    /* note : additional experimental parameters are also available\n     * within the experimental section of the API.\n     * At the time of this writing, they include :\n     * ZSTD_c_rsyncable\n     * ZSTD_c_format\n     * ZSTD_c_forceMaxWindow\n     * ZSTD_c_forceAttachDict\n     * ZSTD_c_literalCompressionMode\n     * ZSTD_c_targetCBlockSize\n     * ZSTD_c_srcSizeHint\n     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.\n     * note : never ever use experimentalParam? names directly;\n     *        also, the enums values themselves are unstable and can still change.\n     */\n     ZSTD_c_experimentalParam1=500,\n     ZSTD_c_experimentalParam2=10,\n     ZSTD_c_experimentalParam3=1000,\n     ZSTD_c_experimentalParam4=1001,\n     ZSTD_c_experimentalParam5=1002,\n     ZSTD_c_experimentalParam6=1003,\n     ZSTD_c_experimentalParam7=1004\n} ZSTD_cParameter;\n\ntypedef struct {\n    size_t error;\n    int lowerBound;\n    int upperBound;\n} ZSTD_bounds;\n\n/*! ZSTD_cParam_getBounds() :\n *  All parameters must belong to an interval with lower and upper bounds,\n *  otherwise they will either trigger an error or be automatically clamped.\n * @return : a structure, ZSTD_bounds, which contains\n *         - an error status field, which must be tested using ZSTD_isError()\n *         - lower and upper bounds, both inclusive\n */\nZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);\n\n/*! ZSTD_CCtx_setParameter() :\n *  Set one compression parameter, selected by enum ZSTD_cParameter.\n *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\n *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n *  Setting a parameter is generally only possible during frame initialization (before starting compression).\n *  Exception : when using multi-threading mode (nbWorkers >= 1),\n *              the following parameters can be updated _during_ compression (within same frame):\n *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\n *              new parameters will be active for next job only (after a flush()).\n * @return : an error code (which can be tested using ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);\n\n/*! ZSTD_CCtx_setPledgedSrcSize() :\n *  Total input data size to be compressed as a single frame.\n *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\n *  This value will also be controlled at end of frame, and trigger an error if not respected.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\n *           In order to mean \"unknown content size\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\n *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\n *  Note 2 : pledgedSrcSize is only valid once, for the next frame.\n *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\n *  Note 3 : Whenever all input data is provided and consumed in a single round,\n *           for example with ZSTD_compress2(),\n *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\n *           this value is automatically overridden by srcSize instead.\n */\nZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);\n\ntypedef enum {\n    ZSTD_reset_session_only = 1,\n    ZSTD_reset_parameters = 2,\n    ZSTD_reset_session_and_parameters = 3\n} ZSTD_ResetDirective;\n\n/*! ZSTD_CCtx_reset() :\n *  There are 2 different things that can be reset, independently or jointly :\n *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.\n *                  Useful after an error, or to interrupt any ongoing compression.\n *                  Any internal data not yet flushed is cancelled.\n *                  Compression parameters and dictionary remain unchanged.\n *                  They will be used to compress next frame.\n *                  Resetting session never fails.\n *  - The parameters : changes all parameters back to \"default\".\n *                  This removes any reference to any dictionary too.\n *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\n *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\n *  - Both : similar to resetting the session, followed by resetting parameters.\n */\nZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);\n\n/*! ZSTD_compress2() :\n *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n *  ZSTD_compress2() always starts a new frame.\n *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n *  - The function is always blocking, returns when compression is completed.\n *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.\n * @return : compressed size written into `dst` (<= `dstCapacity),\n *           or an error code if it fails (which can be tested using ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,\n                                   void* dst, size_t dstCapacity,\n                             const void* src, size_t srcSize);\n\n\n/***************************************\n*  Advanced decompression API\n***************************************/\n\n/* The advanced API pushes parameters one by one into an existing DCtx context.\n * Parameters are sticky, and remain valid for all following frames\n * using the same DCtx context.\n * It's possible to reset parameters to default values using ZSTD_DCtx_reset().\n * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().\n *        Therefore, no new decompression function is necessary.\n */\n\ntypedef enum {\n\n    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which\n                              * the streaming API will refuse to allocate memory buffer\n                              * in order to protect the host from unreasonable memory requirements.\n                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\n                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).\n                              * Special: value 0 means \"use default maximum windowLog\". */\n\n    /* note : additional experimental parameters are also available\n     * within the experimental section of the API.\n     * At the time of this writing, they include :\n     * ZSTD_c_format\n     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.\n     * note : never ever use experimentalParam? names directly\n     */\n     ZSTD_d_experimentalParam1=1000\n\n} ZSTD_dParameter;\n\n/*! ZSTD_dParam_getBounds() :\n *  All parameters must belong to an interval with lower and upper bounds,\n *  otherwise they will either trigger an error or be automatically clamped.\n * @return : a structure, ZSTD_bounds, which contains\n *         - an error status field, which must be tested using ZSTD_isError()\n *         - both lower and upper bounds, inclusive\n */\nZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);\n\n/*! ZSTD_DCtx_setParameter() :\n *  Set one compression parameter, selected by enum ZSTD_dParameter.\n *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\n *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n *  Setting a parameter is only possible during frame initialization (before starting decompression).\n * @return : 0, or an error code (which can be tested using ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);\n\n/*! ZSTD_DCtx_reset() :\n *  Return a DCtx to clean state.\n *  Session and parameters can be reset jointly or separately.\n *  Parameters can only be reset when no active frame is being decompressed.\n * @return : 0, or an error code, which can be tested with ZSTD_isError()\n */\nZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);\n\n\n/****************************\n*  Streaming\n****************************/\n\ntypedef struct ZSTD_inBuffer_s {\n  const void* src;    /**< start of input buffer */\n  size_t size;        /**< size of input buffer */\n  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */\n} ZSTD_inBuffer;\n\ntypedef struct ZSTD_outBuffer_s {\n  void*  dst;         /**< start of output buffer */\n  size_t size;        /**< size of output buffer */\n  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */\n} ZSTD_outBuffer;\n\n\n\n/*-***********************************************************************\n*  Streaming compression - HowTo\n*\n*  A ZSTD_CStream object is required to track streaming operation.\n*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.\n*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.\n*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.\n*\n*  For parallel execution, use one separate ZSTD_CStream per thread.\n*\n*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.\n*\n*  Parameters are sticky : when starting a new compression on the same context,\n*  it will re-use the same sticky parameters as previous compression session.\n*  When in doubt, it's recommended to fully initialize the context before usage.\n*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),\n*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to\n*  set more specific parameters, the pledged source size, or load a dictionary.\n*\n*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to\n*  consume input stream. The function will automatically update both `pos`\n*  fields within `input` and `output`.\n*  Note that the function may not consume the entire input, for example, because\n*  the output buffer is already full, in which case `input.pos < input.size`.\n*  The caller must check if input has been entirely consumed.\n*  If not, the caller must make some room to receive more compressed data,\n*  and then present again remaining input data.\n*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,\n*        but doesn't guarantee maximal forward progress. This is especially relevant\n*        when compressing with multiple threads. The call won't block if it can\n*        consume some input, but if it can't it will wait for some, but not all,\n*        output to be flushed.\n* @return : provides a minimum amount of data remaining to be flushed from internal buffers\n*           or an error code, which can be tested using ZSTD_isError().\n*\n*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,\n*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.\n*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).\n*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.\n*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the\n*  operation.\n*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will\n*        block until the flush is complete or the output buffer is full.\n*  @return : 0 if internal buffers are entirely flushed,\n*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),\n*            or an error code, which can be tested using ZSTD_isError().\n*\n*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.\n*  It will perform a flush and write frame epilogue.\n*  The epilogue is required for decoders to consider a frame completed.\n*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.\n*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to\n*  start a new frame.\n*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will\n*        block until the flush is complete or the output buffer is full.\n*  @return : 0 if frame fully completed and fully flushed,\n*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),\n*            or an error code, which can be tested using ZSTD_isError().\n*\n* *******************************************************************/\n\ntypedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */\n                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */\n/*===== ZSTD_CStream management functions =====*/\nZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);\nZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);\n\n/*===== Streaming compression functions =====*/\ntypedef enum {\n    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */\n    ZSTD_e_flush=1,    /* flush any data provided so far,\n                        * it creates (at least) one new block, that can be decoded immediately on reception;\n                        * frame will continue: any future data can still reference previously compressed data, improving compression.\n                        * note : multithreaded compression will block to flush as much output as possible. */\n    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.\n                        * note that frame is only closed after compressed data is fully flushed (return value == 0).\n                        * After that point, any additional data starts a new frame.\n                        * note : each frame is independent (does not reference any content from previous frame).\n                        : note : multithreaded compression will block to flush as much output as possible. */\n} ZSTD_EndDirective;\n\n/*! ZSTD_compressStream2() :\n *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,\n *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.\n *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n *  - @return provides a minimum amount of data remaining to be flushed from internal buffers\n *            or an error code, which can be tested using ZSTD_isError().\n *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n *            Before starting a new compression job, or changing compression parameters,\n *            it is required to fully flush internal buffers.\n */\nZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,\n                                         ZSTD_outBuffer* output,\n                                         ZSTD_inBuffer* input,\n                                         ZSTD_EndDirective endOp);\n\n\n/* These buffer sizes are softly recommended.\n * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.\n * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),\n * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.\n *\n * However, note that these recommendations are from the perspective of a C caller program.\n * If the streaming interface is invoked from some other language,\n * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,\n * a major performance rule is to reduce crossing such interface to an absolute minimum.\n * It's not rare that performance ends being spent more into the interface, rather than compression itself.\n * In which cases, prefer using large buffers, as large as practical,\n * for both input and output, to reduce the nb of roundtrips.\n */\nZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */\nZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */\n\n\n/* *****************************************************************************\n * This following is a legacy streaming API.\n * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().\n * It is redundant, but remains fully supported.\n * Advanced parameters and dictionary compression can only be used through the\n * new API.\n ******************************************************************************/\n\n/*!\n * Equivalent to:\n *\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n */\nZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);\n/*!\n * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\n * NOTE: The return value is different. ZSTD_compressStream() returns a hint for\n * the next read size (if non-zero and not an error). ZSTD_compressStream2()\n * returns the minimum nb of bytes left to flush (if non-zero and not an error).\n */\nZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);\n/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */\nZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);\n/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */\nZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);\n\n\n/*-***************************************************************************\n*  Streaming decompression - HowTo\n*\n*  A ZSTD_DStream object is required to track streaming operations.\n*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.\n*  ZSTD_DStream objects can be re-used multiple times.\n*\n*  Use ZSTD_initDStream() to start a new decompression operation.\n* @return : recommended first input size\n*  Alternatively, use advanced API to set specific properties.\n*\n*  Use ZSTD_decompressStream() repetitively to consume your input.\n*  The function will update both `pos` fields.\n*  If `input.pos < input.size`, some input has not been consumed.\n*  It's up to the caller to present again remaining data.\n*  The function tries to flush all data decoded immediately, respecting output buffer size.\n*  If `output.pos < output.size`, decoder has flushed everything it could.\n*  But if `output.pos == output.size`, there might be some data left within internal buffers.,\n*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.\n*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.\n* @return : 0 when a frame is completely decoded and fully flushed,\n*        or an error code, which can be tested using ZSTD_isError(),\n*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :\n*                                the return value is a suggested next input size (just a hint for better latency)\n*                                that will never request more than the remaining frame size.\n* *******************************************************************************/\n\ntypedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */\n                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */\n/*===== ZSTD_DStream management functions =====*/\nZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);\nZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);\n\n/*===== Streaming decompression functions =====*/\n\n/* This function is redundant with the advanced API and equivalent to:\n *\n *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n *     ZSTD_DCtx_refDDict(zds, NULL);\n */\nZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);\n\nZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);\n\nZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */\nZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */\n\n\n/**************************\n*  Simple dictionary API\n***************************/\n/*! ZSTD_compress_usingDict() :\n *  Compression at an explicit compression level using a Dictionary.\n *  A dictionary can be any arbitrary data segment (also called a prefix),\n *  or a buffer with specified information (see dictBuilder/zdict.h).\n *  Note : This function loads the dictionary, resulting in significant startup delay.\n *         It's intended for a dictionary used only once.\n *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */\nZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,\n                                           void* dst, size_t dstCapacity,\n                                     const void* src, size_t srcSize,\n                                     const void* dict,size_t dictSize,\n                                           int compressionLevel);\n\n/*! ZSTD_decompress_usingDict() :\n *  Decompression using a known Dictionary.\n *  Dictionary must be identical to the one used during compression.\n *  Note : This function loads the dictionary, resulting in significant startup delay.\n *         It's intended for a dictionary used only once.\n *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */\nZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,\n                                             void* dst, size_t dstCapacity,\n                                       const void* src, size_t srcSize,\n                                       const void* dict,size_t dictSize);\n\n\n/***********************************\n *  Bulk processing dictionary API\n **********************************/\ntypedef struct ZSTD_CDict_s ZSTD_CDict;\n\n/*! ZSTD_createCDict() :\n *  When compressing multiple messages or blocks using the same dictionary,\n *  it's recommended to digest the dictionary only once, since it's a costly operation.\n *  ZSTD_createCDict() will create a state from digesting a dictionary.\n *  The resulting state can be used for future compression operations with very limited startup cost.\n *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\n * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\n *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\n *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\n *      in which case the only thing that it transports is the @compressionLevel.\n *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\n *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */\nZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,\n                                         int compressionLevel);\n\n/*! ZSTD_freeCDict() :\n *  Function frees memory allocated by ZSTD_createCDict(). */\nZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);\n\n/*! ZSTD_compress_usingCDict() :\n *  Compression using a digested Dictionary.\n *  Recommended when same dictionary is used multiple times.\n *  Note : compression level is _decided at dictionary creation time_,\n *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */\nZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,\n                                            void* dst, size_t dstCapacity,\n                                      const void* src, size_t srcSize,\n                                      const ZSTD_CDict* cdict);\n\n\ntypedef struct ZSTD_DDict_s ZSTD_DDict;\n\n/*! ZSTD_createDDict() :\n *  Create a digested dictionary, ready to start decompression operation without startup delay.\n *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */\nZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);\n\n/*! ZSTD_freeDDict() :\n *  Function frees memory allocated with ZSTD_createDDict() */\nZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);\n\n/*! ZSTD_decompress_usingDDict() :\n *  Decompression using a digested Dictionary.\n *  Recommended when same dictionary is used multiple times. */\nZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,\n                                              void* dst, size_t dstCapacity,\n                                        const void* src, size_t srcSize,\n                                        const ZSTD_DDict* ddict);\n\n\n/********************************\n *  Dictionary helper functions\n *******************************/\n\n/*! ZSTD_getDictID_fromDict() :\n *  Provides the dictID stored within dictionary.\n *  if @return == 0, the dictionary is not conformant with Zstandard specification.\n *  It can still be loaded, but as a content-only dictionary. */\nZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);\n\n/*! ZSTD_getDictID_fromDDict() :\n *  Provides the dictID of the dictionary loaded into `ddict`.\n *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */\nZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);\n\n/*! ZSTD_getDictID_fromFrame() :\n *  Provides the dictID required to decompressed the frame stored within `src`.\n *  If @return == 0, the dictID could not be decoded.\n *  This could for one of the following reasons :\n *  - The frame does not require a dictionary to be decoded (most common case).\n *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.\n *    Note : this use case also happens when using a non-conformant dictionary.\n *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\n *  - This is not a Zstandard frame.\n *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */\nZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);\n\n\n/*******************************************************************************\n * Advanced dictionary and prefix API\n *\n * This API allows dictionaries to be used with ZSTD_compress2(),\n * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and\n * only reset with the context is reset with ZSTD_reset_parameters or\n * ZSTD_reset_session_and_parameters. Prefixes are single-use.\n ******************************************************************************/\n\n\n/*! ZSTD_CCtx_loadDictionary() :\n *  Create an internal CDict from `dict` buffer.\n *  Decompression will have to use same dictionary.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\n *           meaning \"return to no-dictionary mode\".\n *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.\n *           To return to \"no-dictionary\" situation, load a NULL dictionary (or reset parameters).\n *  Note 2 : Loading a dictionary involves building tables.\n *           It's also a CPU consuming operation, with non-negligible impact on latency.\n *           Tables are dependent on compression parameters, and for this reason,\n *           compression parameters can no longer be changed after loading a dictionary.\n *  Note 3 :`dict` content will be copied internally.\n *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\n *           In such a case, dictionary buffer must outlive its users.\n *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\n *           to precisely select how dictionary content must be interpreted. */\nZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);\n\n/*! ZSTD_CCtx_refCDict() :\n *  Reference a prepared dictionary, to be used for all next compressed frames.\n *  Note that compression parameters are enforced from within CDict,\n *  and supersede any compression parameter previously set within CCtx.\n *  The parameters ignored are labled as \"superseded-by-cdict\" in the ZSTD_cParameter enum docs.\n *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\n *  The dictionary will remain valid for future compressed frames using same CCtx.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Special : Referencing a NULL CDict means \"return to no-dictionary mode\".\n *  Note 1 : Currently, only one dictionary can be managed.\n *           Referencing a new dictionary effectively \"discards\" any previous one.\n *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */\nZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);\n\n/*! ZSTD_CCtx_refPrefix() :\n *  Reference a prefix (single-usage dictionary) for next compressed frame.\n *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\n *  Decompression will need same prefix to properly regenerate data.\n *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,\n *  but performs much faster, especially during decompression (compression speed is tunable with compression level).\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\n *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.\n *           Its content must remain unmodified during compression.\n *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\n *           ensure that the window size is large enough to contain the entire source.\n *           See ZSTD_c_windowLog.\n *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\n *           It's a CPU consuming operation, with non-negligible impact on latency.\n *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.\n *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\n *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */\nZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,\n                                 const void* prefix, size_t prefixSize);\n\n/*! ZSTD_DCtx_loadDictionary() :\n *  Create an internal DDict from dict buffer,\n *  to be used to decompress next frames.\n *  The dictionary remains valid for all future frames, until explicitly invalidated.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\n *            meaning \"return to no-dictionary mode\".\n *  Note 1 : Loading a dictionary involves building tables,\n *           which has a non-negligible impact on CPU usage and latency.\n *           It's recommended to \"load once, use many times\", to amortize the cost\n *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\n *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\n *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\n *           how dictionary content is loaded and interpreted.\n */\nZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);\n\n/*! ZSTD_DCtx_refDDict() :\n *  Reference a prepared dictionary, to be used to decompress next frames.\n *  The dictionary remains active for decompression of future frames using same DCtx.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Note 1 : Currently, only one dictionary can be managed.\n *           Referencing a new dictionary effectively \"discards\" any previous one.\n *  Special: referencing a NULL DDict means \"return to no-dictionary mode\".\n *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.\n */\nZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);\n\n/*! ZSTD_DCtx_refPrefix() :\n *  Reference a prefix (single-usage dictionary) to decompress next frame.\n *  This is the reverse operation of ZSTD_CCtx_refPrefix(),\n *  and must use the same prefix as the one used during compression.\n *  Prefix is **only used once**. Reference is discarded at end of frame.\n *  End of frame is reached when ZSTD_decompressStream() returns 0.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\n *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\n *           Prefix buffer must remain unmodified up to the end of frame,\n *           reached when ZSTD_decompressStream() returns 0.\n *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\n *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\n *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\n *           A full dictionary is more costly, as it requires building tables.\n */\nZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,\n                                 const void* prefix, size_t prefixSize);\n\n/* ===   Memory management   === */\n\n/*! ZSTD_sizeof_*() :\n *  These functions give the _current_ memory usage of selected object.\n *  Note that object memory usage can evolve (increase or decrease) over time. */\nZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);\nZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);\nZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);\nZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);\nZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);\nZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);\n\n#endif  /* ZSTD_H_235446 */\n\n\n/* **************************************************************************************\n *   ADVANCED AND EXPERIMENTAL FUNCTIONS\n ****************************************************************************************\n * The definitions in the following section are considered experimental.\n * They are provided for advanced scenarios.\n * They should never be used with a dynamic library, as prototypes may change in the future.\n * Use them only in association with static linking.\n * ***************************************************************************************/\n\n#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)\n#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY\n\n/****************************************************************************************\n *   experimental API (static linking only)\n ****************************************************************************************\n * The following symbols and constants\n * are not planned to join \"stable API\" status in the near future.\n * They can still change in future versions.\n * Some of them are planned to remain in the static_only section indefinitely.\n * Some of them might be removed in the future (especially when redundant with existing stable functions)\n * ***************************************************************************************/\n\n#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */\n#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)\n#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */\n#define ZSTD_SKIPPABLEHEADERSIZE    8\n\n/* compression parameter bounds */\n#define ZSTD_WINDOWLOG_MAX_32    30\n#define ZSTD_WINDOWLOG_MAX_64    31\n#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))\n#define ZSTD_WINDOWLOG_MIN       10\n#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)\n#define ZSTD_HASHLOG_MIN          6\n#define ZSTD_CHAINLOG_MAX_32     29\n#define ZSTD_CHAINLOG_MAX_64     30\n#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))\n#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN\n#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)\n#define ZSTD_SEARCHLOG_MIN        1\n#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */\n#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */\n#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX\n#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */\n#define ZSTD_STRATEGY_MIN        ZSTD_fast\n#define ZSTD_STRATEGY_MAX        ZSTD_btultra2\n\n\n#define ZSTD_OVERLAPLOG_MIN       0\n#define ZSTD_OVERLAPLOG_MAX       9\n\n#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame\n                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,\n                                           * to preserve host's memory from unreasonable requirements.\n                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).\n                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */\n\n\n/* LDM parameter bounds */\n#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN\n#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX\n#define ZSTD_LDM_MINMATCH_MIN        4\n#define ZSTD_LDM_MINMATCH_MAX     4096\n#define ZSTD_LDM_BUCKETSIZELOG_MIN   1\n#define ZSTD_LDM_BUCKETSIZELOG_MAX   8\n#define ZSTD_LDM_HASHRATELOG_MIN     0\n#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)\n\n/* Advanced parameter bounds */\n#define ZSTD_TARGETCBLOCKSIZE_MIN   64\n#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX\n#define ZSTD_SRCSIZEHINT_MIN        0\n#define ZSTD_SRCSIZEHINT_MAX        INT_MAX\n\n/* internal */\n#define ZSTD_HASHLOG3_MAX           17\n\n\n/* ---  Advanced types  --- */\n\ntypedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;\n\ntypedef struct {\n    unsigned int matchPos; /* Match pos in dst */\n    /* If seqDef.offset > 3, then this is seqDef.offset - 3\n     * If seqDef.offset < 3, then this is the corresponding repeat offset\n     * But if seqDef.offset < 3 and litLength == 0, this is the\n     *   repeat offset before the corresponding repeat offset\n     * And if seqDef.offset == 3 and litLength == 0, this is the\n     *   most recent repeat offset - 1\n     */\n    unsigned int offset;\n    unsigned int litLength; /* Literal length */\n    unsigned int matchLength; /* Match length */\n    /* 0 when seq not rep and seqDef.offset otherwise\n     * when litLength == 0 this will be <= 4, otherwise <= 3 like normal\n     */\n    unsigned int rep;\n} ZSTD_Sequence;\n\ntypedef struct {\n    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */\n    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */\n    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */\n    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */\n    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */\n    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */\n    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */\n} ZSTD_compressionParameters;\n\ntypedef struct {\n    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */\n    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */\n    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */\n} ZSTD_frameParameters;\n\ntypedef struct {\n    ZSTD_compressionParameters cParams;\n    ZSTD_frameParameters fParams;\n} ZSTD_parameters;\n\ntypedef enum {\n    ZSTD_dct_auto = 0,       /* dictionary is \"full\" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is \"rawContent\" */\n    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */\n    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */\n} ZSTD_dictContentType_e;\n\ntypedef enum {\n    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */\n    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */\n} ZSTD_dictLoadMethod_e;\n\ntypedef enum {\n    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */\n    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.\n                                 * Useful to save 4 bytes per generated frame.\n                                 * Decoder cannot recognise automatically this format, requiring this instruction. */\n} ZSTD_format_e;\n\ntypedef enum {\n    /* Note: this enum and the behavior it controls are effectively internal\n     * implementation details of the compressor. They are expected to continue\n     * to evolve and should be considered only in the context of extremely\n     * advanced performance tuning.\n     *\n     * Zstd currently supports the use of a CDict in three ways:\n     *\n     * - The contents of the CDict can be copied into the working context. This\n     *   means that the compression can search both the dictionary and input\n     *   while operating on a single set of internal tables. This makes\n     *   the compression faster per-byte of input. However, the initial copy of\n     *   the CDict's tables incurs a fixed cost at the beginning of the\n     *   compression. For small compressions (< 8 KB), that copy can dominate\n     *   the cost of the compression.\n     *\n     * - The CDict's tables can be used in-place. In this model, compression is\n     *   slower per input byte, because the compressor has to search two sets of\n     *   tables. However, this model incurs no start-up cost (as long as the\n     *   working context's tables can be reused). For small inputs, this can be\n     *   faster than copying the CDict's tables.\n     *\n     * - The CDict's tables are not used at all, and instead we use the working\n     *   context alone to reload the dictionary and use params based on the source\n     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().\n     *   This method is effective when the dictionary sizes are very small relative\n     *   to the input size, and the input size is fairly large to begin with.\n     *\n     * Zstd has a simple internal heuristic that selects which strategy to use\n     * at the beginning of a compression. However, if experimentation shows that\n     * Zstd is making poor choices, it is possible to override that choice with\n     * this enum.\n     */\n    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */\n    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */\n    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */\n    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */\n} ZSTD_dictAttachPref_e;\n\ntypedef enum {\n  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.\n                               *   Negative compression levels will be uncompressed, and positive compression\n                               *   levels will be compressed. */\n  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be\n                               *   emitted if Huffman compression is not profitable. */\n  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */\n} ZSTD_literalCompressionMode_e;\n\n\n/***************************************\n*  Frame size functions\n***************************************/\n\n/*! ZSTD_findDecompressedSize() :\n *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n *  `srcSize` must be the _exact_ size of this series\n *       (i.e. there should be a frame boundary at `src + srcSize`)\n *  @return : - decompressed size of all data in all successive frames\n *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN\n *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR\n *\n *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.\n *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\n *            In which case, it's necessary to use streaming mode to decompress data.\n *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()\n *   note 3 : decompressed size can be very large (64-bits value),\n *            potentially larger than what local system can handle as a single memory segment.\n *            In which case, it's necessary to use streaming mode to decompress data.\n *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.\n *            Always ensure result fits within application's authorized limits.\n *            Each application can set its own limits.\n *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to\n *            read each contained frame header.  This is fast as most of the data is skipped,\n *            however it does mean that all frame data must be present and valid. */\nZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);\n\n/*! ZSTD_decompressBound() :\n *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n *  `srcSize` must be the _exact_ size of this series\n *       (i.e. there should be a frame boundary at `src + srcSize`)\n *  @return : - upper-bound for the decompressed size of all data in all successive frames\n *            - if an error occured: ZSTD_CONTENTSIZE_ERROR\n *\n *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.\n *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.\n *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.\n *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:\n *              upper-bound = # blocks * min(128 KB, Window_Size)\n */\nZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);\n\n/*! ZSTD_frameHeaderSize() :\n *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.\n * @return : size of the Frame Header,\n *           or an error code (if srcSize is too small) */\nZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);\n\n/*! ZSTD_getSequences() :\n * Extract sequences from the sequence store\n * zc can be used to insert custom compression params.\n * This function invokes ZSTD_compress2\n * @return : number of sequences extracted\n */\nZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,\n    size_t outSeqsSize, const void* src, size_t srcSize);\n\n\n/***************************************\n*  Memory management\n***************************************/\n\n/*! ZSTD_estimate*() :\n *  These functions make it possible to estimate memory usage of a future\n *  {D,C}Ctx, before its creation.\n *\n *  ZSTD_estimateCCtxSize() will provide a budget large enough for any\n *  compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),\n *  this estimate does not include space for a window buffer, so this estimate\n *  is guaranteed to be enough for single-shot compressions, but not streaming\n *  compressions. It will however assume the input may be arbitrarily large,\n *  which is the worst case. If srcSize is known to always be small,\n *  ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.\n *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with\n *  ZSTD_getCParams() to create cParams from compressionLevel.\n *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with\n *  ZSTD_CCtxParams_setParameter().\n *\n *  Note: only single-threaded compression is supported. This function will\n *  return an error code if ZSTD_c_nbWorkers is >= 1. */\nZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);\nZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);\nZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);\nZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);\n\n/*! ZSTD_estimateCStreamSize() :\n *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.\n *  It will also consider src size to be arbitrarily \"large\", which is worst case.\n *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\n *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\n *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\n *  Note : CStream size estimation is only correct for single-threaded compression.\n *  ZSTD_DStream memory budget depends on window Size.\n *  This information can be passed manually, using ZSTD_estimateDStreamSize,\n *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\n *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\n *         an internal ?Dict will be created, which additional size is not estimated here.\n *         In this case, get total size by adding ZSTD_estimate?DictSize */\nZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);\nZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);\nZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);\nZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);\nZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);\n\n/*! ZSTD_estimate?DictSize() :\n *  ZSTD_estimateCDictSize() will bet that src size is relatively \"small\", and content is copied, like ZSTD_createCDict().\n *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().\n *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.\n */\nZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);\nZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);\nZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);\n\n/*! ZSTD_initStatic*() :\n *  Initialize an object using a pre-allocated fixed-size buffer.\n *  workspace: The memory area to emplace the object into.\n *             Provided pointer *must be 8-bytes aligned*.\n *             Buffer must outlive object.\n *  workspaceSize: Use ZSTD_estimate*Size() to determine\n *                 how large workspace must be to support target scenario.\n * @return : pointer to object (same address as workspace, just different type),\n *           or NULL if error (size too small, incorrect alignment, etc.)\n *  Note : zstd will never resize nor malloc() when using a static buffer.\n *         If the object requires more memory than available,\n *         zstd will just error out (typically ZSTD_error_memory_allocation).\n *  Note 2 : there is no corresponding \"free\" function.\n *           Since workspace is allocated externally, it must be freed externally too.\n *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level\n *           into its associated cParams.\n *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by\n *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().\n *  Limitation 2 : static cctx currently not compatible with multi-threading.\n *  Limitation 3 : static dctx is incompatible with legacy support.\n */\nZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);\nZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */\n\nZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);\nZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */\n\nZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(\n                                        void* workspace, size_t workspaceSize,\n                                        const void* dict, size_t dictSize,\n                                        ZSTD_dictLoadMethod_e dictLoadMethod,\n                                        ZSTD_dictContentType_e dictContentType,\n                                        ZSTD_compressionParameters cParams);\n\nZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(\n                                        void* workspace, size_t workspaceSize,\n                                        const void* dict, size_t dictSize,\n                                        ZSTD_dictLoadMethod_e dictLoadMethod,\n                                        ZSTD_dictContentType_e dictContentType);\n\n\n/*! Custom memory allocation :\n *  These prototypes make it possible to pass your own allocation/free functions.\n *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.\n *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.\n */\ntypedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);\ntypedef void  (*ZSTD_freeFunction) (void* opaque, void* address);\ntypedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;\nstatic ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */\n\nZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);\nZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);\nZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);\nZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);\n\nZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,\n                                                  ZSTD_dictLoadMethod_e dictLoadMethod,\n                                                  ZSTD_dictContentType_e dictContentType,\n                                                  ZSTD_compressionParameters cParams,\n                                                  ZSTD_customMem customMem);\n\nZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,\n                                                  ZSTD_dictLoadMethod_e dictLoadMethod,\n                                                  ZSTD_dictContentType_e dictContentType,\n                                                  ZSTD_customMem customMem);\n\n\n\n/***************************************\n*  Advanced compression functions\n***************************************/\n\n/*! ZSTD_createCDict_byReference() :\n *  Create a digested dictionary for compression\n *  Dictionary content is just referenced, not duplicated.\n *  As a consequence, `dictBuffer` **must** outlive CDict,\n *  and its content must remain unmodified throughout the lifetime of CDict.\n *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */\nZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);\n\n/*! ZSTD_getCParams() :\n * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.\n * `estimatedSrcSize` value is optional, select 0 if not known */\nZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);\n\n/*! ZSTD_getParams() :\n *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.\n *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */\nZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);\n\n/*! ZSTD_checkCParams() :\n *  Ensure param values remain within authorized range.\n * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */\nZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);\n\n/*! ZSTD_adjustCParams() :\n *  optimize params for a given `srcSize` and `dictSize`.\n * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.\n * `dictSize` must be `0` when there is no dictionary.\n *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.\n *  This function never fails (wide contract) */\nZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);\n\n/*! ZSTD_compress_advanced() :\n *  Note : this function is now DEPRECATED.\n *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.\n *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */\nZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,\n                                          void* dst, size_t dstCapacity,\n                                    const void* src, size_t srcSize,\n                                    const void* dict,size_t dictSize,\n                                          ZSTD_parameters params);\n\n/*! ZSTD_compress_usingCDict_advanced() :\n *  Note : this function is now REDUNDANT.\n *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.\n *  This prototype will be marked as deprecated and generate compilation warning in some future version */\nZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,\n                                              void* dst, size_t dstCapacity,\n                                        const void* src, size_t srcSize,\n                                        const ZSTD_CDict* cdict,\n                                              ZSTD_frameParameters fParams);\n\n\n/*! ZSTD_CCtx_loadDictionary_byReference() :\n *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.\n *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */\nZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);\n\n/*! ZSTD_CCtx_loadDictionary_advanced() :\n *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over\n *  how to load the dictionary (by copy ? by reference ?)\n *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */\nZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);\n\n/*! ZSTD_CCtx_refPrefix_advanced() :\n *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over\n *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */\nZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);\n\n/* ===   experimental parameters   === */\n/* these parameters can be used with ZSTD_setParameter()\n * they are not guaranteed to remain supported in the future */\n\n /* Enables rsyncable mode,\n  * which makes compressed files more rsync friendly\n  * by adding periodic synchronization points to the compressed data.\n  * The target average block size is ZSTD_c_jobSize / 2.\n  * It's possible to modify the job size to increase or decrease\n  * the granularity of the synchronization point.\n  * Once the jobSize is smaller than the window size,\n  * it will result in compression ratio degradation.\n  * NOTE 1: rsyncable mode only works when multithreading is enabled.\n  * NOTE 2: rsyncable performs poorly in combination with long range mode,\n  * since it will decrease the effectiveness of synchronization points,\n  * though mileage may vary.\n  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.\n  * If the selected compression level is already running significantly slower,\n  * the overall speed won't be significantly impacted.\n  */\n #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1\n\n/* Select a compression format.\n * The value must be of type ZSTD_format_e.\n * See ZSTD_format_e enum definition for details */\n#define ZSTD_c_format ZSTD_c_experimentalParam2\n\n/* Force back-reference distances to remain < windowSize,\n * even when referencing into Dictionary content (default:0) */\n#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3\n\n/* Controls whether the contents of a CDict\n * are used in place, or copied into the working context.\n * Accepts values from the ZSTD_dictAttachPref_e enum.\n * See the comments on that enum for an explanation of the feature. */\n#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4\n\n/* Controls how the literals are compressed (default is auto).\n * The value must be of type ZSTD_literalCompressionMode_e.\n * See ZSTD_literalCompressionMode_t enum definition for details.\n */\n#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5\n\n/* Tries to fit compressed block size to be around targetCBlockSize.\n * No target when targetCBlockSize == 0.\n * There is no guarantee on compressed block size (default:0) */\n#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6\n\n/* User's best guess of source size.\n * Hint is not valid when srcSizeHint == 0.\n * There is no guarantee that hint is close to actual source size,\n * but compression ratio may regress significantly if guess considerably underestimates */\n#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7\n\n/*! ZSTD_CCtx_getParameter() :\n *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,\n *  and store it into int* value.\n * @return : 0, or an error code (which can be tested with ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);\n\n\n/*! ZSTD_CCtx_params :\n *  Quick howto :\n *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure\n *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into\n *                                     an existing ZSTD_CCtx_params structure.\n *                                     This is similar to\n *                                     ZSTD_CCtx_setParameter().\n *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to\n *                                    an existing CCtx.\n *                                    These parameters will be applied to\n *                                    all subsequent frames.\n *  - ZSTD_compressStream2() : Do compression using the CCtx.\n *  - ZSTD_freeCCtxParams() : Free the memory.\n *\n *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()\n *  for static allocation of CCtx for single-threaded compression.\n */\nZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);\nZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);\n\n/*! ZSTD_CCtxParams_reset() :\n *  Reset params to default values.\n */\nZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);\n\n/*! ZSTD_CCtxParams_init() :\n *  Initializes the compression parameters of cctxParams according to\n *  compression level. All other parameters are reset to their default values.\n */\nZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);\n\n/*! ZSTD_CCtxParams_init_advanced() :\n *  Initializes the compression and frame parameters of cctxParams according to\n *  params. All other parameters are reset to their default values.\n */\nZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);\n\n/*! ZSTD_CCtxParams_setParameter() :\n *  Similar to ZSTD_CCtx_setParameter.\n *  Set one compression parameter, selected by enum ZSTD_cParameter.\n *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);\n\n/*! ZSTD_CCtxParams_getParameter() :\n * Similar to ZSTD_CCtx_getParameter.\n * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.\n * @result : 0, or an error code (which can be tested with ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);\n\n/*! ZSTD_CCtx_setParametersUsingCCtxParams() :\n *  Apply a set of ZSTD_CCtx_params to the compression context.\n *  This can be done even after compression is started,\n *    if nbWorkers==0, this will have no impact until a new compression is started.\n *    if nbWorkers>=1, new parameters will be picked up at next job,\n *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).\n */\nZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(\n        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);\n\n/*! ZSTD_compressStream2_simpleArgs() :\n *  Same as ZSTD_compressStream2(),\n *  but using only integral types as arguments.\n *  This variant might be helpful for binders from dynamic languages\n *  which have troubles handling structures containing memory pointers.\n */\nZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (\n                            ZSTD_CCtx* cctx,\n                            void* dst, size_t dstCapacity, size_t* dstPos,\n                      const void* src, size_t srcSize, size_t* srcPos,\n                            ZSTD_EndDirective endOp);\n\n\n/***************************************\n*  Advanced decompression functions\n***************************************/\n\n/*! ZSTD_isFrame() :\n *  Tells if the content of `buffer` starts with a valid Frame Identifier.\n *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\n *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\n *  Note 3 : Skippable Frame Identifiers are considered valid. */\nZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);\n\n/*! ZSTD_createDDict_byReference() :\n *  Create a digested dictionary, ready to start decompression operation without startup delay.\n *  Dictionary content is referenced, and therefore stays in dictBuffer.\n *  It is important that dictBuffer outlives DDict,\n *  it must remain read accessible throughout the lifetime of DDict */\nZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);\n\n/*! ZSTD_DCtx_loadDictionary_byReference() :\n *  Same as ZSTD_DCtx_loadDictionary(),\n *  but references `dict` content instead of copying it into `dctx`.\n *  This saves memory if `dict` remains around.,\n *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */\nZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);\n\n/*! ZSTD_DCtx_loadDictionary_advanced() :\n *  Same as ZSTD_DCtx_loadDictionary(),\n *  but gives direct control over\n *  how to load the dictionary (by copy ? by reference ?)\n *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */\nZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);\n\n/*! ZSTD_DCtx_refPrefix_advanced() :\n *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over\n *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */\nZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);\n\n/*! ZSTD_DCtx_setMaxWindowSize() :\n *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.\n *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).\n *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\n *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)\n * @return : 0, or an error code (which can be tested using ZSTD_isError()).\n */\nZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);\n\n/* ZSTD_d_format\n * experimental parameter,\n * allowing selection between ZSTD_format_e input compression formats\n */\n#define ZSTD_d_format ZSTD_d_experimentalParam1\n\n/*! ZSTD_DCtx_setFormat() :\n *  Instruct the decoder context about what kind of data to decode next.\n *  This instruction is mandatory to decode data without a fully-formed header,\n *  such ZSTD_f_zstd1_magicless for example.\n * @return : 0, or an error code (which can be tested using ZSTD_isError()). */\nZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);\n\n/*! ZSTD_decompressStream_simpleArgs() :\n *  Same as ZSTD_decompressStream(),\n *  but using only integral types as arguments.\n *  This can be helpful for binders from dynamic languages\n *  which have troubles handling structures containing memory pointers.\n */\nZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (\n                            ZSTD_DCtx* dctx,\n                            void* dst, size_t dstCapacity, size_t* dstPos,\n                      const void* src, size_t srcSize, size_t* srcPos);\n\n\n/********************************************************************\n*  Advanced streaming functions\n*  Warning : most of these functions are now redundant with the Advanced API.\n*  Once Advanced API reaches \"stable\" status,\n*  redundant functions will be deprecated, and then at some point removed.\n********************************************************************/\n\n/*=====   Advanced Streaming compression functions  =====*/\n/**! ZSTD_initCStream_srcSize() :\n * This function is deprecated, and equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n *\n * pledgedSrcSize must be correct. If it is not known at init time, use\n * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,\n * \"0\" also disables frame content size field. It may be enabled in the future.\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t\nZSTD_initCStream_srcSize(ZSTD_CStream* zcs,\n                         int compressionLevel,\n                         unsigned long long pledgedSrcSize);\n\n/**! ZSTD_initCStream_usingDict() :\n * This function is deprecated, and is equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n *\n * Creates of an internal CDict (incompatible with static CCtx), except if\n * dict == NULL or dictSize < 8, in which case no dict is used.\n * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if\n * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t\nZSTD_initCStream_usingDict(ZSTD_CStream* zcs,\n                     const void* dict, size_t dictSize,\n                           int compressionLevel);\n\n/**! ZSTD_initCStream_advanced() :\n * This function is deprecated, and is approximately equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     // Pseudocode: Set each zstd parameter and leave the rest as-is.\n *     for ((param, value) : params) {\n *         ZSTD_CCtx_setParameter(zcs, param, value);\n *     }\n *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n *\n * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.\n * pledgedSrcSize must be correct.\n * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t\nZSTD_initCStream_advanced(ZSTD_CStream* zcs,\n                    const void* dict, size_t dictSize,\n                          ZSTD_parameters params,\n                          unsigned long long pledgedSrcSize);\n\n/**! ZSTD_initCStream_usingCDict() :\n * This function is deprecated, and equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     ZSTD_CCtx_refCDict(zcs, cdict);\n *\n * note : cdict will just be referenced, and must outlive compression session\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);\n\n/**! ZSTD_initCStream_usingCDict_advanced() :\n *   This function is DEPRECATED, and is approximately equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.\n *     for ((fParam, value) : fParams) {\n *         ZSTD_CCtx_setParameter(zcs, fParam, value);\n *     }\n *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n *     ZSTD_CCtx_refCDict(zcs, cdict);\n *\n * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.\n * pledgedSrcSize must be correct. If srcSize is not known at init time, use\n * value ZSTD_CONTENTSIZE_UNKNOWN.\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t\nZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,\n                               const ZSTD_CDict* cdict,\n                                     ZSTD_frameParameters fParams,\n                                     unsigned long long pledgedSrcSize);\n\n/*! ZSTD_resetCStream() :\n * This function is deprecated, and is equivalent to:\n *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n *\n *  start a new frame, using same parameters from previous frame.\n *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.\n *  Note that zcs must be init at least once before using ZSTD_resetCStream().\n *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\n *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\n *  For the time being, pledgedSrcSize==0 is interpreted as \"srcSize unknown\" for compatibility with older programs,\n *  but it will change to mean \"empty\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\n * @return : 0, or an error code (which can be tested using ZSTD_isError())\n *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);\n\n\ntypedef struct {\n    unsigned long long ingested;   /* nb input bytes read and buffered */\n    unsigned long long consumed;   /* nb input bytes actually compressed */\n    unsigned long long produced;   /* nb of compressed bytes generated and buffered */\n    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */\n    unsigned currentJobID;         /* MT only : latest started job nb */\n    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */\n} ZSTD_frameProgression;\n\n/* ZSTD_getFrameProgression() :\n * tells how much data has been ingested (read from input)\n * consumed (input actually compressed) and produced (output) for current frame.\n * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.\n * Aggregates progression inside active worker threads.\n */\nZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);\n\n/*! ZSTD_toFlushNow() :\n *  Tell how many bytes are ready to be flushed immediately.\n *  Useful for multithreading scenarios (nbWorkers >= 1).\n *  Probe the oldest active job, defined as oldest job not yet entirely flushed,\n *  and check its output buffer.\n * @return : amount of data stored in oldest job and ready to be flushed immediately.\n *  if @return == 0, it means either :\n *  + there is no active job (could be checked with ZSTD_frameProgression()), or\n *  + oldest job is still actively compressing data,\n *    but everything it has produced has also been flushed so far,\n *    therefore flush speed is limited by production speed of oldest job\n *    irrespective of the speed of concurrent (and newer) jobs.\n */\nZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);\n\n\n/*=====   Advanced Streaming decompression functions  =====*/\n/**\n * This function is deprecated, and is equivalent to:\n *\n *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);\n *\n * note: no dictionary will be used if dict == NULL or dictSize < 8\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);\n\n/**\n * This function is deprecated, and is equivalent to:\n *\n *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n *     ZSTD_DCtx_refDDict(zds, ddict);\n *\n * note : ddict is referenced, it must outlive decompression session\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);\n\n/**\n * This function is deprecated, and is equivalent to:\n *\n *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n *\n * re-use decompression parameters from previous init; saves dictionary loading\n * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x\n */\nZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);\n\n\n/*********************************************************************\n*  Buffer-less and synchronous inner streaming functions\n*\n*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.\n*  But it's also a complex one, with several restrictions, documented below.\n*  Prefer normal streaming API for an easier experience.\n********************************************************************* */\n\n/**\n  Buffer-less streaming compression (synchronous mode)\n\n  A ZSTD_CCtx object is required to track streaming operations.\n  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\n  ZSTD_CCtx object can be re-used multiple times within successive compression operations.\n\n  Start by initializing a context.\n  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,\n  or ZSTD_compressBegin_advanced(), for finer parameter control.\n  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()\n\n  Then, consume your input using ZSTD_compressContinue().\n  There are some important considerations to keep in mind when using this advanced function :\n  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\n  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\n  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\n    Worst case evaluation is provided by ZSTD_compressBound().\n    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.\n  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\n    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\n  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\n    In which case, it will \"discard\" the relevant memory section from its history.\n\n  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\n  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\n  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\n\n  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.\n*/\n\n/*=====   Buffer-less streaming compression functions  =====*/\nZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);\nZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);\nZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */\nZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */\nZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */\nZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */\n\nZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\nZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n\n/*-\n  Buffer-less streaming decompression (synchronous mode)\n\n  A ZSTD_DCtx object is required to track streaming operations.\n  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\n  A ZSTD_DCtx object can be re-used multiple times.\n\n  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\n  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\n  Data fragment must be large enough to ensure successful decoding.\n `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\n  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\n           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.\n           errorCode, which can be tested using ZSTD_isError().\n\n  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,\n  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\n  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\n  As a consequence, check that values remain within valid application range.\n  For example, do not allocate memory blindly, check that `windowSize` is within expectation.\n  Each application can set its own limits, depending on local restrictions.\n  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\n\n  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\n  ZSTD_decompressContinue() is very sensitive to contiguity,\n  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\n  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.\n  There are multiple ways to guarantee this condition.\n\n  The most memory efficient way is to use a round buffer of sufficient size.\n  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\n  which can @return an error code if required value is too large for current system (in 32-bits mode).\n  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\n  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,\n  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\n  At which point, decoding can resume from the beginning of the buffer.\n  Note that already decoded data stored in the buffer should be flushed before being overwritten.\n\n  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\n\n  Finally, if you control the compression process, you can also ignore all buffer size rules,\n  as long as the encoder and decoder progress in \"lock-step\",\n  aka use exactly the same buffer sizes, break contiguity at the same place, etc.\n\n  Once buffers are setup, start decompression, with ZSTD_decompressBegin().\n  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\n\n  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\n  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\n  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\n\n @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\n  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\n  It can also be an error code, which can be tested with ZSTD_isError().\n\n  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\n  Context can then be reset to start a new decompression.\n\n  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\n  This information is not required to properly decode a frame.\n\n  == Special case : skippable frames ==\n\n  Skippable frames allow integration of user-defined data into a flow of concatenated frames.\n  Skippable frames will be ignored (skipped) by decompressor.\n  The format of skippable frames is as follows :\n  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\n  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\n  c) Frame Content - any content (User Data) of length equal to Frame Size\n  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\n  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.\n*/\n\n/*=====   Buffer-less streaming decompression functions  =====*/\ntypedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;\ntypedef struct {\n    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means \"empty\" */\n    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */\n    unsigned blockSizeMax;\n    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */\n    unsigned headerSize;\n    unsigned dictID;\n    unsigned checksumFlag;\n} ZSTD_frameHeader;\n\n/*! ZSTD_getFrameHeader() :\n *  decode Frame Header, or requires larger `srcSize`.\n * @return : 0, `zfhPtr` is correctly filled,\n *          >0, `srcSize` is too small, value is wanted `srcSize` amount,\n *           or an error code, which can be tested using ZSTD_isError() */\nZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */\n/*! ZSTD_getFrameHeader_advanced() :\n *  same as ZSTD_getFrameHeader(),\n *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */\nZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);\nZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */\n\nZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);\nZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);\nZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);\n\nZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);\nZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\n\n/* misc */\nZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);\ntypedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;\nZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);\n\n\n\n\n/* ============================ */\n/**       Block level API       */\n/* ============================ */\n\n/*!\n    Block functions produce and decode raw zstd blocks, without frame metadata.\n    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).\n    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.\n\n    A few rules to respect :\n    - Compressing and decompressing require a context structure\n      + Use ZSTD_createCCtx() and ZSTD_createDCtx()\n    - It is necessary to init context before starting\n      + compression : any ZSTD_compressBegin*() variant, including with dictionary\n      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary\n      + copyCCtx() and copyDCtx() can be used too\n    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB\n      + If input is larger than a block size, it's necessary to split input data into multiple blocks\n      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.\n        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.\n    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !\n      ===> In which case, nothing is produced into `dst` !\n      + User __must__ test for such outcome and deal directly with uncompressed data\n      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.\n        Doing so would mess up with statistics history, leading to potential data corruption.\n      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!\n      + In case of multiple successive blocks, should some of them be uncompressed,\n        decoder must be informed of their existence in order to follow proper history.\n        Use ZSTD_insertBlock() for such a case.\n*/\n\n/*=====   Raw zstd block functions  =====*/\nZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);\nZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\nZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);\nZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */\n\n\n#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */\n\n#if defined (__cplusplus)\n}\n#endif\n"
  },
  {
    "path": "src/tube/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0 FATAL_ERROR)\n\n# set(CMAKE_PREFIX_PATH\n#     /private/home/hengyuan/miniconda3/envs/pytorch-c/lib/python3.7/site-packages/torch)\n# find_package(Torch REQUIRED)\n\nexecute_process(\n    COMMAND python -c \"import torch; import os; print(os.path.dirname(torch.__file__), end='')\"\n    OUTPUT_VARIABLE TorchPath\n)\n\nfind_package(PythonInterp 3.7 REQUIRED)\nfind_package(PythonLibs 3.7 REQUIRED)\n\n# lib for other c++ programs\nadd_library(_tube\n  src_cpp/data_channel.cc\n)\ntarget_include_directories(_tube PUBLIC ${TORCH_INCLUDE_DIRS})\ntarget_include_directories(_tube PUBLIC ${PYTHON_INCLUDE_DIRS})\ntarget_link_libraries(_tube PUBLIC ${TORCH_LIBRARIES} ${TorchPath}/lib/libtorch_python.so fmt zmq)\n\n#target_include_directories(_tube PUBLIC ../third_party ../third_party/zstd/lib ../third_party/zstd/lib/common)\n\n# tests\n#add_executable(test_data_channel src_cpp/test/test_data_channel.cc)\n#target_link_libraries(test_data_channel _tube)\n\n# pybind lib\npybind11_add_module(tube src_cpp/pybind.cc)\ntarget_link_libraries(tube PUBLIC libpolygames)\n"
  },
  {
    "path": "src/tube/README.md",
    "content": "# tube\n\n### build\nwe need to build pytorch from source first. To do that, follow the instruction here \nhttps://github.com/pytorch/pytorch#from-source. \n\nInstruction for building PyTorch on devfair:\n```\nmodule purge\nmodule load cudnn/v7.4-cuda.10.0\nmodule load cuda/10.0\n\n# create a fresh conda environment with python3\nconda create --name [your env name] python=3.7\n\nconda activate [your env name] # Or source activate [your env name], depending on conda version.\n\nconda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing\nconda install -c pytorch magma-cuda100\n\n# clone the repo\n# Note: put the repo onto /scratch partition for MUCH FASTER building speed. \ngit clone --recursive https://github.com/pytorch/pytorch\ncd pytorch\n\nexport CMAKE_PREFIX_PATH=${CONDA_PREFIX:-\"$(dirname $(which conda))/../\"}\n# set cuda arch list so that the built binary can be run on both pascal and volta\nTORCH_CUDA_ARCH_LIST=\"6.0;7.0\" python setup.py install\n```\n\nTo build this repo:\n\n```\nmkdir build\ncd build\ncmake ..\nmake\n```\n\n### run\nNote that we need to set the following before running any multi-threading \nprogram that uses torch::Tensor. Otherwise a simple tensor operation will\nuse all cores by default.\n```\nexport OMP_NUM_THREADS=1\n```\n\n\nTodo:\n\n1. coding convention & clang-format file\n2. split into .h and .cc properly\n\nTodo++:\n1. improve performance by changing to lock-free structure\n2. multi-buffer\n3. we should be ablt to test environment in C++ alone\n\n\nTo explain:\n\n1. pybind shared_ptr\n2. pybind keep_alive\n3. torch accessor can still go out of bound, i.e. extremely wired segfault, we need to think about it\n"
  },
  {
    "path": "src/tube/pytube/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n"
  },
  {
    "path": "src/tube/pytube/data_channel_manager.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport concurrent.futures\nfrom . import utils\n\n\nclass DataChannelManager:\n    def __init__(self, channels, *, num_thread=None):\n        self.channels = {}\n        for c in channels:\n            assert c.name not in self.channels\n            self.channels[c.name] = c\n\n        self.num_thread = num_thread if num_thread is not None else len(channels)\n        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.num_thread)\n\n        self.channels_waiting_reply = set()\n        self.futures = []\n        for _, c in self.channels.items():\n            self.futures.append(self.executor.submit(self._channel_get_input, c))\n\n    def __contains__(self, name):\n        return name in self.channels\n\n    def _channel_get_input(self, channel):\n        \"\"\"a helper wrapper function for channel.get_input.\"\"\"\n        data = channel.get_input()\n        return channel.name, data\n\n    def get_input(self, *, max_timeout_s=None):\n        \"\"\"\n        max_timeout_s: the max amount of time (in second) before this\n            function returns. if no data arrives within this\n            max_timeout, this function will return empty dict {}\n        \"\"\"\n        # print('@@@ remaining futures:', len(self.futures))\n        utils.assert_eq(len(self.futures), len(self.channels))\n        done, pending = concurrent.futures.wait(\n            self.futures,\n            timeout=max_timeout_s,\n            return_when=concurrent.futures.FIRST_COMPLETED)\n        done = list(done)\n        pending = list(pending)\n        # utils.assert_eq(len(done) + len(pending), len(self.futures))\n        self.futures = pending\n\n        ready = {}\n        for f in done:\n            name, data = f.result()\n            # assert name not in self.channels_waiting_reply\n            # self.channels_waiting_reply.add(name)\n            ready[name] = data\n        return ready\n\n    def set_reply(self, name, reply):\n        reply = {key: reply[key].detach().cpu() for key in reply}\n        # breakpoint()\n        # assert name in self.channels_waiting_reply\n        # self.channels_waiting_reply.remove(name)\n\n        channel = self.channels[name]\n        channel.set_reply(reply)\n        self.futures.append(self.executor.submit(self._channel_get_input, channel))\n\n    def terminate(self):\n        for name, c in self.channels.items():\n            c.terminate()\n\n        self.executor.shutdown(wait=True)\n"
  },
  {
    "path": "src/tube/pytube/test_dc_manager.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.join(root, 'build'))\n\nfrom collections import defaultdict\nimport torch\nimport xrl\n\nfrom . import data_channel_manager\n\n\ndef create_env(batchsize, num_env, timeout_ms):\n    dc_fast = xrl.DataChannel('fast', batchsize, timeout_ms)\n    dc_slow = xrl.DataChannel('slow', batchsize, timeout_ms)\n    dc_manager = data_channel_manager.DataChannelManager([dc_fast, dc_slow])\n\n    context = xrl.Context()\n    for i in range(num_env):\n        p = xrl.DualDispatchThread(i, 10, dc_fast, dc_slow)\n        context.push_env_thread(p)\n\n    return context, dc_manager\n\n\nif __name__ == '__main__':\n    context, dc_manager = create_env(5, 8, 10)\n    context.start()\n\n    count = defaultdict(int)\n    bcount = defaultdict(int)\n\n    while not context.terminated():\n        print('get input')\n        batches = dc_manager.get_input(max_timeout_s=1)\n        for key, batch in batches.items():\n            batchsize = batch['s'].size(0)\n            print('@@@ receive:', key, ', batchsize:', batchsize)\n            count[key] += 1\n            bcount[batchsize] += 1\n            reply = {'a': batch['s']}\n            dc_manager.set_reply(key, reply)\n\n        print(count)\n        print(bcount)\n\n    print('end of the story')\n    dc_manager.terminate()\n"
  },
  {
    "path": "src/tube/pytube/utils.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"utils\"\"\"\n\n\ndef assert_eq(real, expected, msg='assert_eq fails'):\n    assert real == expected, '%s: %s (real) vs %s (expected)' % (msg, real, expected)\n"
  },
  {
    "path": "src/tube/src_cpp/context.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <atomic>\n#include <memory>\n#include <thread>\n#include <vector>\n\n#include \"env_thread.h\"\n\nnamespace tube {\n\nclass Context {\n public:\n  Context()\n      : started_(false)\n      , numTerminatedThread_(0) {\n  }\n\n  Context(const Context&) = delete;\n  Context& operator=(const Context&) = delete;\n\n  ~Context() {\n    for (auto& v : envs_) {\n      v->terminate();\n    }\n    for (auto& v : threads_) {\n      v.join();\n    }\n  }\n\n  int pushEnvThread(std::shared_ptr<EnvThread> env) {\n    assert(!started_);\n    envs_.push_back(std::move(env));\n    return (int)envs_.size();\n  }\n\n  void start() {\n    for (int i = 0; i < (int)envs_.size(); ++i) {\n      threads_.emplace_back([this, i]() {\n        envs_[i]->mainLoop();\n        ++numTerminatedThread_;\n      });\n    }\n  }\n\n  bool terminated() {\n    // std::cout << \">>> \" << numTerminatedThread_ << std::endl;\n    return numTerminatedThread_ == (int)envs_.size();\n  }\n\n  std::string getStatsStr() const {\n    EnvThread::Stats cum_stats;\n    for (const auto& env : envs_) {\n      const auto& stats = env->get_stats();\n      for (const auto& key2stat : stats) {\n        auto& cum_stats_val = cum_stats[key2stat.first];\n        const auto& stat_val = key2stat.second;\n        std::get<0>(cum_stats_val) += std::get<0>(stat_val);\n        std::get<1>(cum_stats_val) += std::get<1>(stat_val);\n        std::get<2>(cum_stats_val) += std::get<2>(stat_val);\n      }\n    }\n    std::ostringstream oss;\n    for (const auto& key2stat : cum_stats) {\n      const auto f0 = std::get<0>(key2stat.second);\n      const auto f1 = std::get<1>(key2stat.second);\n      const auto f2 = std::get<2>(key2stat.second);\n      const auto mean = (f0 > 0 ? f1 / f0 : 0);\n      const auto stddev = (f0 > 0 ? std::sqrt(f2 / f0 - mean * mean) : 0);\n      oss << key2stat.first << \": N=\" << f0 << \", avg=\" << mean\n          << \", std=\" << stddev << std::endl;\n    }\n    return oss.str();\n  }\n\n private:\n  bool started_;\n  std::atomic<int> numTerminatedThread_;\n  std::vector<std::shared_ptr<EnvThread>> envs_;\n  std::vector<std::thread> threads_;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/data_block.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <torch/extension.h>\n#include <vector>\n\n#include \"utils.h\"\n\nnamespace tube {\n\nclass DataBlock {\n public:\n  DataBlock(const std::string& name,\n            const std::vector<int64_t>& sizes,\n            torch::ScalarType dtype)\n      : name(std::move(name))\n      , data(torch::zeros(sizes, dtype)) {\n  }\n\n  torch::Tensor& getBuffer() {\n    return data;\n  }\n\n  std::vector<int64_t> sizes() {\n    return data.sizes().vec();\n  }\n\n  torch::ScalarType dtype() {\n    return data.scalar_type();\n  }\n\n  const std::string name;\n  torch::Tensor data;\n};\n}  // namespace tube\n\n#include \"episodic_trajectory.h\"\n#include \"fixed_len_trajectory.h\"\n#include \"indefinite_trajectory.h\"\n"
  },
  {
    "path": "src/tube/src_cpp/data_channel.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include \"data_channel.h\"\n\nusing tube::DataBlock;\nusing tube::DataChannel;\n\nvoid createBuffers(int batchsize,\n                   const std::vector<std::shared_ptr<DataBlock>>& blocks,\n                   std::unordered_map<std::string, torch::Tensor>& buffer) {\n  for (const auto& b : blocks) {\n    std::vector<int64_t> sizes =\n        tube::utils::pushLeft((int64_t)batchsize, b->sizes());\n    auto ret = buffer.insert({b->name, torch::zeros(sizes, b->dtype())});\n    assert(ret.second);\n  }\n}\n\nvoid checkBuffers(\n    int batchsize,\n    const std::vector<std::shared_ptr<DataBlock>>& blocks,\n    const std::unordered_map<std::string, torch::Tensor>& buffer) {\n  int numBuffer = 0;\n  for (const auto& b : blocks) {\n    std::vector<int64_t> sizes =\n        tube::utils::pushLeft((int64_t)batchsize, b->sizes());\n    auto dtype = b->dtype();\n\n    std::unordered_map<std::string, torch::Tensor>::const_iterator it;\n    it = buffer.find(b->name);\n    assert(it != buffer.end());\n    ++numBuffer;\n\n    auto refSizes = it->second.sizes().vec();\n    auto refDtype = it->second.dtype();\n\n    // utils::printVector(refSizes);\n    // utils::printVector(sizes);\n    assert(sizes == refSizes);\n    assert(dtype == refDtype);\n  }\n  assert(numBuffer == (int)buffer.size());\n}\n\nvoid DataChannel::createOrCheckBuffers(\n    const std::vector<std::shared_ptr<DataBlock>>& send,\n    const std::vector<std::shared_ptr<DataBlock>>& reply) {\n  assert(!send.empty() || !reply.empty());\n  if (sendName2Buffer_.empty() && replyName2Buffer_.empty()) {\n    // only the first call will create the buffer\n    createBuffers(batchsize, send, sendName2Buffer_);\n    createBuffers(batchsize, reply, replyName2Buffer_);\n  } else {\n    checkBuffers(batchsize, send, sendName2Buffer_);\n    checkBuffers(batchsize, reply, replyName2Buffer_);\n  }\n}\n\nvoid DataChannel::terminate() {\n  // called by python, once called, unblock getInput and returns so\n  // that the final python future waiting for getting input can end\n  // if called, the return value from getInput is undefined\n  std::unique_lock<std::mutex> lk1(mFilled_);\n  std::unique_lock<std::mutex> lk2(mReplied_);\n  std::unique_lock<std::mutex> lk3(mAvailSlots_);\n  terminated_ = true;\n  cvFilled_.notify_all();\n  cvReplied_.notify_all();\n  cvAvailSlots_.notify_all();\n}\n\n// for consumer (python) to get input\n// timeout < 0, wait until full batch\n// timeout == 0, return immediately\n// timeout > 0, wait until full batch or (timeout && batch not empty)\nconst std::unordered_map<std::string, torch::Tensor> DataChannel::getInput() {\n  std::unique_lock<std::mutex> lk(mFilled_);\n  if (timeoutMs < 0) {\n    cvFilled_.wait(\n        lk, [this] { return terminated_ || numFilledSlot_ == batchsize; });\n    return sendName2Buffer_;\n  }\n\n  bool returnAll = false;\n  do {\n    returnAll = cvFilled_.wait_for(\n        lk, std::chrono::milliseconds(timeoutMs),\n        [this] { return terminated_ || numFilledSlot_ == batchsize; });\n  } while (numFilledSlot_ == 0 && !terminated_);\n\n  if (returnAll) {\n    return sendName2Buffer_;\n  }\n\n  // hold the lock to prevent new \"mark-as-filled\"\n  lkFilled_ = std::move(lk);\n  return sliceTensorsForSend();\n}\n\n// for consumer (python) to set reply\nvoid DataChannel::setReply(\n    const std::unordered_map<std::string, torch::Tensor>& reply) {\n  if (sentSlots_.empty()) {\n    if (numFilledSlot_ != batchsize) {\n      std::cout << name << \", setReply: numFilledSlots: \" << numFilledSlot_\n                << \" != batchsize: \" << batchsize << std::endl;\n      assert(false);\n    }\n    tube::utils::copyTensors(reply, replyName2Buffer_);\n  } else {\n    if (numFilledSlot_ >= batchsize) {\n      std::cout << name << \", setReply: numFilledSlots: \" << numFilledSlot_\n                << \" >= batchsize: \" << batchsize << std::endl;\n      assert(false);\n    }\n    tube::utils::copyTensors(reply, replyName2Buffer_, sentSlots_);\n  }\n\n  // lock free, other thread is waiting on cvAvailSlots_ or cvReplied_,\n  // or, when timeout >= 0, blocked by mFilled_\n  numFilledSlot_ = 0;\n\n  {\n    std::lock_guard<std::mutex> lk(mReplied_);\n    for (int i = 0; i < (int)slotStatus_.size(); ++i) {\n      if (slotStatus_[i] == SlotStatus::filled) {\n        slotStatus_[i] = SlotStatus::replied;\n      } else if (slotStatus_[i] == SlotStatus::filledAutoRelease) {\n        slotStatus_[i] = SlotStatus::replied;\n        releaseSlot(i);\n      }\n    }\n  }\n\n  if (!sentSlots_.empty()) {\n    lkFilled_.unlock();\n    sentSlots_.clear();\n  }\n\n  cvReplied_.notify_all();\n}\n\nstd::unordered_map<std::string, torch::Tensor> DataChannel::getSlot(\n    int* pSlot) {\n  std::unique_lock<std::mutex> lk(mAvailSlots_);\n  cvAvailSlots_.wait(\n      lk, [this] { return availSlots_.size() > 0 || terminated_; });\n  if (terminated_) {\n    return {};\n  }\n  int slot = availSlots_.back();\n  availSlots_.pop_back();\n  assert(slotStatus_[slot] == SlotStatus::avail);\n  lk.unlock();\n\n  *pSlot = slot;\n  std::unordered_map<std::string, torch::Tensor> buffers;\n  for (auto& name2tensor : sendName2Buffer_) {\n    auto tensor = name2tensor.second.slice(0, slot, slot + 1).squeeze(0);\n    buffers[name2tensor.first] = tensor;\n  }\n  return buffers;\n}\n\nvoid DataChannel::markSlotFilled(int slot) {\n  std::unique_lock<std::mutex> lk(mFilled_);\n\n  assert(slotStatus_.at(slot) == SlotStatus::avail);\n  slotStatus_.at(slot) = SlotStatus::filled;\n\n  numFilledSlot_ += 1;\n  assert(numFilledSlot_ <= batchsize);\n  if (numFilledSlot_ == batchsize) {\n    lk.unlock();\n    cvFilled_.notify_all();  // there should be only one waiting\n  }\n}\n\nvoid DataChannel::markSlotFilledAutoRelease(int slot) {\n  std::unique_lock<std::mutex> lk(mFilled_);\n\n  assert(slotStatus_.at(slot) == SlotStatus::avail);\n  slotStatus_.at(slot) = SlotStatus::filledAutoRelease;\n\n  numFilledSlot_ += 1;\n  assert(numFilledSlot_ <= batchsize);\n  if (numFilledSlot_ == batchsize) {\n    lk.unlock();\n    cvFilled_.notify_all();  // there should be only one waiting\n  }\n}\n\nstd::unordered_map<std::string, torch::Tensor> DataChannel::getReply(int slot) {\n  std::unique_lock<std::mutex> lk(mReplied_);\n  cvReplied_.wait(lk, [this, slot] {\n    return slotStatus_[slot] == SlotStatus::replied || terminated_;\n  });\n  lk.unlock();\n\n  std::unordered_map<std::string, torch::Tensor> buffers;\n  for (auto& name2tensor : replyName2Buffer_) {\n    auto tensor = name2tensor.second.slice(0, slot, slot + 1).squeeze(0);\n    buffers[name2tensor.first] = tensor;\n  }\n  return buffers;\n}\n\nvoid DataChannel::releaseSlot(int slot) {\n  // assert(slotStatus_[slot] == SlotStatus::replied);\n  slotStatus_[slot] = SlotStatus::avail;\n\n  std::unique_lock<std::mutex> lk(mAvailSlots_);\n  availSlots_.push_back(slot);\n  lk.unlock();\n  cvAvailSlots_.notify_one();\n}\n\nconst std::unordered_map<std::string, torch::Tensor>\nDataChannel::sliceTensorsForSend() {\n  assert(sentSlots_.empty());\n  for (int i = 0; i < (int)slotStatus_.size(); ++i) {\n    if (slotStatus_[i] == SlotStatus::filled ||\n        slotStatus_[i] == SlotStatus::filledAutoRelease) {\n      sentSlots_.push_back(i);\n    }\n  }\n  assert((int)sentSlots_.size() < batchsize);\n\n  torch::Tensor indices = torch::from_blob(\n      sentSlots_.data(), {(int64_t)sentSlots_.size()}, torch::kInt64);\n  std::unordered_map<std::string, torch::Tensor> sliced;\n  for (const auto& name2tensor : sendName2Buffer_) {\n    const std::string& name = name2tensor.first;\n    const torch::Tensor& tensor = name2tensor.second.index_select(0, indices);\n    sliced.insert({name, tensor});\n  }\n  return sliced;\n}\n"
  },
  {
    "path": "src/tube/src_cpp/data_channel.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <chrono>\n#include <condition_variable>\n#include <iostream>\n#include <mutex>\n#include <vector>\n\n#include \"data_block.h\"\n#include \"utils.h\"\n\nnamespace tube {\n\nclass DataChannel {\n public:\n  DataChannel(std::string name, int batchsize, int timeoutMs)\n      : name(name)\n      , batchsize(batchsize)\n      , timeoutMs(timeoutMs) {\n    for (int i = 0; i < batchsize; ++i) {\n      availSlots_.push_back(i);\n      slotStatus_.push_back(SlotStatus::avail);\n    }\n  }\n\n  void createOrCheckBuffers(\n      const std::vector<std::shared_ptr<DataBlock>>& send,\n      const std::vector<std::shared_ptr<DataBlock>>& reply);\n\n  void terminate();\n  bool terminated() {\n    return terminated_;\n  }\n\n  // for consumer (python) to get input\n  // timeout < 0, wait until full batch\n  // timeout == 0, return immediately\n  // timeout > 0, wait until full batch or (timeout && batch not empty)\n  const std::unordered_map<std::string, torch::Tensor> getInput();\n\n  // for consumer (python) to set reply\n  void setReply(const std::unordered_map<std::string, torch::Tensor>& reply);\n\n  // for dispatchers\n  std::unordered_map<std::string, torch::Tensor> getSlot(int* pSlot);\n\n  void markSlotFilled(int slot);\n  void markSlotFilledAutoRelease(int slot);\n\n  std::unordered_map<std::string, torch::Tensor> getReply(int slot);\n\n  void releaseSlot(int slot);\n\n  const std::string name;\n  const int batchsize;\n  const int timeoutMs;\n\n private:\n  const std::unordered_map<std::string, torch::Tensor> sliceTensorsForSend();\n\n  // for slot management\n  enum class SlotStatus {\n    avail,\n    filled,\n    filledAutoRelease,\n    replied,\n  };\n\n  std::unordered_map<std::string, torch::Tensor> sendName2Buffer_;\n  std::unordered_map<std::string, torch::Tensor> replyName2Buffer_;\n\n  std::vector<SlotStatus> slotStatus_;\n  std::vector<int64_t> sentSlots_;\n\n  std::mutex mAvailSlots_;\n  std::condition_variable cvAvailSlots_;\n  std::vector<int> availSlots_;\n\n  std::unique_lock<std::mutex> lkFilled_;\n  std::mutex mFilled_;\n  std::condition_variable cvFilled_;\n  int numFilledSlot_ = 0;\n\n  std::mutex mReplied_;\n  std::condition_variable cvReplied_;\n\n  bool terminated_ = false;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/dispatcher.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"data_block.h\"\n#include \"data_channel.h\"\n\nnamespace tube {\n\nclass Dispatcher {\n public:\n  static constexpr int DISPATCH_ERR_DC_TERM = -2;\n  static constexpr int DISPATCH_ERR_NO_SLOT = -1;\n  static constexpr int DISPATCH_NOERR = 0;\n\n  Dispatcher(std::shared_ptr<DataChannel> dc)\n      : dc_(std::move(dc)) {\n  }\n\n  void addDataBlocks(const std::vector<std::shared_ptr<DataBlock>>& send,\n                     const std::vector<std::shared_ptr<DataBlock>>& reply) {\n    for (auto b : send) {\n      auto ret = sendTensors_.insert({b->name, b->data});\n      if (!ret.second) {\n        std::cout << \"Error: duplicated sendkey for dispatcher, \"\n                  << \"key=\" << b->name << \", DataChannel=\" << dc_->name;\n        assert(false);\n      }\n    }\n\n    for (auto b : reply) {\n      auto ret = replyTensors_.insert({b->name, b->data});\n      if (!ret.second) {\n        std::cout << \"Error: duplicated replykey for dispatcher, \"\n                  << \"key=\" << b->name << \", DataChannel=\" << dc_->name;\n        assert(false);\n      }\n    }\n    dc_->createOrCheckBuffers(send, reply);\n  }\n\n  // send data and get reply\n  int dispatch() {\n    int slot = -1;\n    if (dc_->terminated()) {\n      return DISPATCH_ERR_DC_TERM;\n    }\n    std::unordered_map<std::string, torch::Tensor> sendBuffers =\n        dc_->getSlot(&slot);\n    if (slot == -1) {\n      return DISPATCH_ERR_NO_SLOT;\n    }\n    assert(slot >= 0 && slot < dc_->batchsize);\n    utils::copyTensors(sendTensors_, sendBuffers);\n\n    dc_->markSlotFilled(slot);\n\n    std::unordered_map<std::string, torch::Tensor> replyBuffers =\n        dc_->getReply(slot);\n    utils::copyTensors(replyBuffers, replyTensors_);\n\n    dc_->releaseSlot(slot);\n    return DISPATCH_NOERR;\n  }\n\n  // send data and discard the reply without waiting for it\n  int dispatchNoReply() {\n    int slot = -1;\n    if (dc_->terminated()) {\n      return DISPATCH_ERR_DC_TERM;\n    }\n    std::unordered_map<std::string, torch::Tensor> sendBuffers =\n        dc_->getSlot(&slot);\n    if (slot == -1) {\n      return DISPATCH_ERR_NO_SLOT;\n    }\n    assert(slot >= 0 && slot < dc_->batchsize);\n    utils::copyTensors(sendTensors_, sendBuffers);\n\n    dc_->markSlotFilledAutoRelease(slot);\n    return DISPATCH_NOERR;\n  }\n\n  void terminate() {\n    if (dc_) {\n      dc_->terminate();\n    }\n  }\n\n private:\n  std::shared_ptr<DataChannel> dc_;\n  std::unordered_map<std::string, torch::Tensor> sendTensors_;\n  std::unordered_map<std::string, torch::Tensor> replyTensors_;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/env_thread.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <atomic>\n#include <tuple>\n#include <unordered_map>\n\nnamespace tube {\n\nclass EnvThread {\n public:\n  using StatsValue = std::tuple<double, double, double>;\n  using Stats = std::unordered_map<std::string, StatsValue>;\n\n  EnvThread() = default;\n  EnvThread(EnvThread&& n) {\n    terminate_ = n.terminate_.load();\n  }\n\n  virtual ~EnvThread() {\n  }\n\n  virtual void mainLoop() = 0;\n\n  virtual void terminate() {\n    terminate_ = true;\n  }\n\n  /// Get various statistics associated with this thread\n  virtual Stats get_stats() {\n    return Stats();\n  }\n\n  std::atomic_bool terminate_{false};\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/episodic_trajectory.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"data_block.h\"\n\nnamespace tube {\n\nclass EpisodicTrajectory {\n public:\n  EpisodicTrajectory(const std::string& name,\n                     // int blockLen,\n                     const std::vector<int64_t>& sizes,\n                     torch::ScalarType dtype)\n      : name(name)\n      // , blockLen(blockLen)\n      , dtype(dtype)\n      , sizes(sizes)\n      , buffer(std::make_shared<DataBlock>(name, sizes, dtype)) {\n  }\n\n  int pushBack(torch::Tensor t) {\n    assert(t.dtype() == dtype);\n    assert(t.sizes() == sizes);\n    trajectory_.push_back(t);\n    return (int)trajectory_.size();\n  }\n\n  bool prepareForSend() {\n    if (trajectory_.empty()) {\n      return false;\n    }\n\n    buffer->data.copy_(trajectory_.back());\n    // buffer->data = std::move(trajectory_.back());\n    trajectory_.pop_back();\n    return true;\n  }\n\n  int len() {\n    return (int)trajectory_.size();\n  }\n\n  const std::string name;\n  // const int blockLen;\n  const torch::ScalarType dtype;\n  const std::vector<int64_t> sizes;\n\n  std::shared_ptr<DataBlock> buffer;\n\n private:\n  std::vector<torch::Tensor> trajectory_;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/fixed_len_trajectory.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"data_block.h\"\n\nnamespace tube {\n\nclass FixedLengthTrajectory {\n public:\n  FixedLengthTrajectory(const std::string& name,\n                        int len,\n                        const std::vector<int64_t>& sizes,\n                        torch::ScalarType dtype)\n      : name(name)\n      , len(len)\n      , sizes(sizes)\n      , dtype(dtype)\n      , buffer(std::make_shared<DataBlock>(name, sizes, dtype))\n      , trajectory(std::make_shared<DataBlock>(\n            name, utils::pushLeft(len, sizes), dtype))\n      , nextSlot_(0) {\n  }\n\n  torch::Tensor& getBuffer() {\n    return buffer->data;\n  }\n\n  const torch::Tensor& getBuffer() const {\n    return buffer->data;\n  }\n\n  int pushBufferToTrajectory() {\n    int pushedSlot = nextSlot_;\n\n    // user might accidentally change the tensor\n    // TODO: better ways to prevent it?\n    assert(buffer->dtype() == dtype);\n    assert(buffer->sizes() == sizes);\n    trajectory->data[pushedSlot].copy_(buffer->data);\n\n    nextSlot_ = (nextSlot_ + 1) % len;\n    return pushedSlot;\n  }\n\n  const std::string name;\n  const int len;\n  const std::vector<int64_t> sizes;\n  const torch::ScalarType dtype;\n\n  // TODO: not good to be public, but need to be shared with dispatcher anyway\n  std::shared_ptr<DataBlock> buffer;\n  std::shared_ptr<DataBlock> trajectory;\n\n private:\n  int nextSlot_;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/indefinite_trajectory.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include \"data_block.h\"\n#include <deque>\n#include <torch/extension.h>\n\nnamespace tube {\n\nclass IndefiniteTrajectory {\n public:\n  IndefiniteTrajectory(const std::string& name,\n                       int blockLen,\n                       const std::vector<int64_t>& sizes,\n                       torch::ScalarType dtype)\n      : name(name)\n      , blockLen(blockLen)\n      , dtype(dtype)\n      , sizes(sizes)\n      , buffer(std::make_shared<DataBlock>(name, sizes, dtype))\n      , trajectory(std::make_shared<DataBlock>(\n            name, utils::pushLeft(blockLen, sizes), dtype)) {\n  }\n\n  torch::Tensor& getBuffer() {\n    return buffer->data;\n  }\n\n  const torch::Tensor& getBuffer() const {\n    return buffer->data;\n  }\n\n  int pushBufferToTrajectory() {\n    // user might accidentally change the tensor\n    // TODO: better ways to prevent it?\n    assert(buffer->dtype() == dtype);\n    assert(buffer->sizes() == sizes);\n    trajectory_.push_back(buffer->data.clone());\n\n    return len();\n  }\n\n  bool prepareForSend() {\n    if ((int)trajectory_.size() < blockLen) {\n      return false;\n    }\n    for (int i = 0; i < blockLen; ++i) {\n      trajectory->data[i].copy_(trajectory_.front());\n      trajectory_.pop_front();\n    }\n    return true;\n  }\n\n  int len() {\n    return (int)trajectory_.size();\n  }\n\n  const std::string name;\n  const int blockLen;\n  const torch::ScalarType dtype;\n  const std::vector<int64_t> sizes;\n\n  std::shared_ptr<DataBlock> buffer;\n  std::shared_ptr<DataBlock> trajectory;\n\n private:\n  std::deque<torch::Tensor> trajectory_;\n};\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/pybind.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <pybind11/pybind11.h>\n#include <torch/extension.h>\n\n#include \"context.h\"\n#include \"data_channel.h\"\n\n// for testing\n#include \"test/test_producer.h\"\n\nnamespace py = pybind11;\nusing namespace tube;\n\nPYBIND11_MODULE(tube, m) {\n  py::class_<DataChannel, std::shared_ptr<DataChannel>>(m, \"DataChannel\")\n      .def(py::init<std::string, int, int>())\n      .def_readonly(\"name\", &DataChannel::name)\n      .def(\"terminate\", &DataChannel::terminate)\n      .def(\"get_input\", &DataChannel::getInput,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"set_reply\", &DataChannel::setReply,\n           py::call_guard<py::gil_scoped_release>());\n\n  py::class_<EnvThread, std::shared_ptr<EnvThread>>(m, \"EnvThread\");\n\n  py::class_<Context>(m, \"Context\")\n      .def(py::init<>())\n      .def(\"push_env_thread\", &Context::pushEnvThread, py::keep_alive<1, 2>())\n      .def(\"start\", &Context::start)\n      .def(\"terminated\", &Context::terminated)\n      .def(\"get_stats_str\", &Context::getStatsStr);\n\n  // for testing\n  py::class_<ProducerThread, EnvThread, std::shared_ptr<ProducerThread>>(\n      m, \"ProducerThread\")\n      .def(py::init<int, std::shared_ptr<DataChannel>>());\n\n  py::class_<DualDispatchThread, EnvThread,\n             std::shared_ptr<DualDispatchThread>>(m, \"DualDispatchThread\")\n      .def(py::init<int, int, std::shared_ptr<DataChannel>,\n                    std::shared_ptr<DataChannel>>());\n}\n"
  },
  {
    "path": "src/tube/src_cpp/test/test_data_channel.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <iostream>\n#include <vector>\n\n#include \"../data_channel.h\"\n#include \"test_producer.h\"\n\nusing tube::DataChannel;\nusing tube::ProducerThread;\n\nclass ConsumerThread {\n public:\n  ConsumerThread(std::shared_ptr<DataChannel>& dc)\n      : dc_(dc) {\n  }\n\n  void mainLoop() {\n    int numBatch = 0;\n    while (true) {\n      const auto& input = dc_->getInput();\n\n      std::cout << \">>>In Consumer mainLoop, batch: \" << numBatch << std::endl;\n      std::cout << input.at(\"s\") << std::endl;\n      std::cout << \"========================\" << std::endl;\n\n      std::unordered_map<std::string, torch::Tensor> reply;\n      for (const auto& name2tensor : input) {\n        reply[\"a\"] = name2tensor.second.clone();\n      }\n      dc_->setReply(reply);\n      ++numBatch;\n    }\n  }\n\n private:\n  std::shared_ptr<DataChannel> dc_;\n};\n\nint main() {\n  int batchsize = 10;\n  int numThread = 10;\n\n  auto dc = std::make_shared<DataChannel>(\"default_channel\", batchsize, -1);\n\n  std::vector<ProducerThread> producers;\n  producers.reserve(numThread);\n  std::vector<std::thread> tProducers;\n  for (int i = 0; i < numThread; ++i) {\n    producers.push_back(ProducerThread(i, dc));\n    std::thread t(&ProducerThread::mainLoop, std::ref(producers[i]));\n    tProducers.push_back(std::move(t));\n    std::cout << \"add producer: \" << i << std::endl;\n  }\n\n  ConsumerThread consumer(dc);\n  std::thread tConsumer(&ConsumerThread::mainLoop, std::ref(consumer));\n\n  for (int i = 0; i < numThread; ++i) {\n    tProducers[i].join();\n  }\n\n  tConsumer.join();\n\n  return 0;\n}\n"
  },
  {
    "path": "src/tube/src_cpp/test/test_producer.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <iostream>\n#include <memory>\n#include <vector>\n\n#include \"../data_block.h\"\n#include \"../data_channel.h\"\n#include \"../dispatcher.h\"\n#include \"../env_thread.h\"\n\nnamespace tube {\n\nclass ProducerThread : public EnvThread {\n public:\n  ProducerThread(int threadIdx, std::shared_ptr<DataChannel> dc)\n      : threadIdx(threadIdx)\n      , dispatcher_(std::make_unique<Dispatcher>(std::move(dc))) {\n    s_ = std::make_shared<DataBlock>(\n        \"s\", std::initializer_list<int64_t>{1}, torch::kInt32);\n    a_ = std::make_shared<DataBlock>(\n        \"a\", std::initializer_list<int64_t>{1}, torch::kInt32);\n\n    dispatcher_->addDataBlocks({s_}, {a_});\n    std::cout << \"create thread: \" << threadIdx << std::endl;\n  }\n\n  virtual void mainLoop() override {\n    std::cout << \"in mainloop, thread: \" << threadIdx << std::endl;\n    for (int i = 0; i < 10; ++i) {\n      s_->data[0] = threadIdx;\n      dispatcher_->dispatch();\n\n      std::cout << \"thread: \" << threadIdx << \", reply: \" << a_->data[0]\n                << std::endl;\n    }\n  }\n\n  const int threadIdx;\n\n private:\n  std::unique_ptr<Dispatcher> dispatcher_;\n  std::shared_ptr<DataBlock> s_;\n  std::shared_ptr<DataBlock> a_;\n  ;\n};\n\nclass DualDispatchThread : public EnvThread {\n public:\n  DualDispatchThread(int threadIdx,\n                     int maxStep,\n                     std::shared_ptr<DataChannel> dcFast,\n                     std::shared_ptr<DataChannel> dcSlow)\n      : threadIdx(threadIdx)\n      , maxStep(maxStep)\n      , dispatcherFast_(std::make_unique<Dispatcher>(std::move(dcFast)))\n      , dispatcherSlow_(std::make_unique<Dispatcher>(std::move(dcSlow))) {\n    auto sf = std::make_shared<DataBlock>(\n        \"s\", std::initializer_list<int64_t>{1}, torch::kInt32);\n    auto af = std::make_shared<DataBlock>(\n        \"a\", std::initializer_list<int64_t>{1}, torch::kInt32);\n\n    blocksFastSend_.push_back(sf);\n    blocksFastReply_.push_back(af);\n    dispatcherFast_->addDataBlocks(blocksFastSend_, blocksFastReply_);\n\n    auto ss = std::make_shared<DataBlock>(\n        \"s\", std::initializer_list<int64_t>{1}, torch::kInt32);\n    auto as = std::make_shared<DataBlock>(\n        \"a\", std::initializer_list<int64_t>{1}, torch::kInt32);\n\n    blocksSlowSend_.push_back(ss);\n    blocksSlowReply_.push_back(as);\n    dispatcherSlow_->addDataBlocks(blocksSlowSend_, blocksSlowReply_);\n\n    std::cout << \"create thread: \" << threadIdx << std::endl;\n  }\n\n  virtual void mainLoop() override {\n    std::cout << \"in mainloop, thread: \" << threadIdx << std::endl;\n    int i = 0;\n    while (i < 3) {\n      blocksFastSend_[0]->data[0] = threadIdx;\n      dispatcherFast_->dispatch();\n      // std::cout << \"thread: \" << threadIdx << \", stepIdx: \" << stepIdx_ << \",\n      // reply(fast): \"\n      //           << blocksFastReply_[0]->data[0] << std::endl;\n      ++stepIdx_;\n      if (stepIdx_ == maxStep) {\n        // std::cout << \">>>thread Slow dispatch\" << std::endl;\n        blocksSlowSend_[0]->data[0] = threadIdx * threadIdx;\n        dispatcherSlow_->dispatch();\n        // std::cout << \"thread: \" << threadIdx << \", reply(slow): \"\n        //           << blocksSlowReply[0]->data[0] << std::endl;\n        stepIdx_ = 0;\n        ++i;\n      }\n    }\n    std::cout << \"thread: \" << threadIdx << \" done\" << std::endl;\n  }\n\n  const int threadIdx;\n  const int maxStep;\n\n private:\n  int stepIdx_ = 0;\n  std::unique_ptr<Dispatcher> dispatcherFast_;\n  std::unique_ptr<Dispatcher> dispatcherSlow_;\n  std::vector<std::shared_ptr<DataBlock>> blocksFastSend_, blocksFastReply_;\n  std::vector<std::shared_ptr<DataBlock>> blocksSlowSend_, blocksSlowReply_;\n};\n\n}  // namespace tube\n"
  },
  {
    "path": "src/tube/src_cpp/utils.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#pragma once\n\n#include <iostream>\n#include <torch/torch.h>\n#include <unordered_map>\n#include <vector>\n\nnamespace tube {\n\nnamespace utils {\n\ninline int getProduct(const std::vector<int64_t>& nums) {\n  int prod = 1;\n  for (auto v : nums) {\n    prod *= v;\n  }\n  return prod;\n}\n\ninline std::vector<int64_t> pushLeft(int64_t left,\n                                     const std::vector<int64_t>& nums) {\n  std::vector<int64_t> vec;\n  vec.push_back(left);\n  for (auto v : nums) {\n    vec.push_back(v);\n  }\n  return vec;\n}\n\ntemplate <typename T> inline void printVector(const std::vector<T>& vec) {\n  for (const auto& v : vec) {\n    std::cout << v << \", \";\n  }\n  std::cout << std::endl;\n}\n\ntemplate <typename T> inline void printMapKey(const T& map) {\n  for (const auto& name2sth : map) {\n    std::cout << name2sth.first << \", \";\n  }\n  std::cout << std::endl;\n}\n\ninline void verifyTensors(\n    const std::unordered_map<std::string, torch::Tensor>& src,\n    const std::unordered_map<std::string, torch::Tensor>& dest) {\n  if (src.size() != dest.size()) {\n    std::cout << \"src.size()[\" << src.size() << \"] != dest.size()[\"\n              << dest.size() << \"]\" << std::endl;\n    std::cout << \"src keys: \";\n    for (const auto& p : src)\n      std::cout << p.first << \" \";\n    std::cout << \"dest keys: \";\n    for (const auto& p : dest)\n      std::cout << p.first << \" \";\n    std::cout << std::endl;\n    assert(false);\n  }\n\n  for (const auto& name2tensor : src) {\n    const auto& name = name2tensor.first;\n    const auto& srcTensor = name2tensor.second;\n    // std::cout << \"in copy: trying to get: \" << name << std::endl;\n    // std::cout << \"dest map keys\" << std::endl;\n    // printMapKey(dest);\n    const auto& destTensor = dest.at(name);\n    // if (destTensor.sizes() != srcTensor.sizes()) {\n    //   std::cout << \"copy size-mismatch: \"\n    //             << destTensor.sizes() << \", \" << srcTensor.sizes() <<\n    //             std::endl;\n    // }\n    if (destTensor.sizes() != srcTensor.sizes()) {\n      std::cout << name << \", dstSize: \" << destTensor.sizes()\n                << \", srcSize: \" << srcTensor.sizes() << std::endl;\n      assert(false);\n    }\n\n    if (destTensor.dtype() != srcTensor.dtype()) {\n      std::cout << name << \", dstType: \" << destTensor.dtype()\n                << \", srcType: \" << srcTensor.dtype() << std::endl;\n      assert(false);\n    }\n  }\n}\n\ninline void copyTensors(\n    const std::unordered_map<std::string, torch::Tensor>& src,\n    std::unordered_map<std::string, torch::Tensor>& dest) {\n  verifyTensors(src, dest);\n  for (const auto& name2tensor : src) {\n    const auto& name = name2tensor.first;\n    const auto& srcTensor = name2tensor.second;\n    // std::cout << \"in copy: trying to get: \" << name << std::endl;\n    // std::cout << \"dest map keys\" << std::endl;\n    // printMapKey(dest);\n    auto& destTensor = dest.at(name);\n    // if (destTensor.sizes() != srcTensor.sizes()) {\n    //   std::cout << \"copy size-mismatch: \"\n    //             << destTensor.sizes() << \", \" << srcTensor.sizes() <<\n    //             std::endl;\n    // }\n    destTensor.copy_(srcTensor);\n  }\n}\n\n// TODO: maybe merge these two functions?\ninline void copyTensors(\n    const std::unordered_map<std::string, torch::Tensor>& src,\n    std::unordered_map<std::string, torch::Tensor>& dest,\n    std::vector<int64_t>& index) {\n  assert(src.size() == dest.size());\n  assert(!index.empty());\n  torch::Tensor indexTensor =\n      torch::from_blob(index.data(), {(int64_t)index.size()}, torch::kInt64);\n\n  for (const auto& name2tensor : src) {\n    const auto& name = name2tensor.first;\n    const auto& srcTensor = name2tensor.second;\n    auto& destTensor = dest.at(name);\n    // assert(destTensor.sizes() == srcTensor.sizes());\n    assert(destTensor.dtype() == srcTensor.dtype());\n    destTensor.index_copy_(0, indexTensor, srcTensor);\n  }\n}\n\n}  // namespace utils\n}  // namespace tube\n"
  },
  {
    "path": "tests/CMakeLists.txt",
    "content": "cmake_minimum_required( VERSION 3.3 )\nproject( polygames-tests )\nset(CMAKE_CXX_STANDARD 17)\n\nexecute_process(\n    COMMAND python -c \"import torch; import os; print(os.path.dirname(torch.__file__), end='')\"\n    OUTPUT_VARIABLE TorchPath\n)\nset(CMAKE_PREFIX_PATH ${TorchPath})\nfind_package(Torch REQUIRED)\ninclude_directories(${TORCH_INCLUDE_DIRS})\n\nfind_package( PythonInterp 3.7 REQUIRED )\nfind_package( PythonLibs 3.7 REQUIRED )\ninclude_directories( ${PYTHON_INCLUDE_DIRS} )\n\nfind_package (Threads)\n\nfind_package( GTest REQUIRED )\ninclude_directories( ${GTEST_INCLUDE_DIRS} )\n\nfind_package(JNI REQUIRED)\ninclude_directories( ${JNI_INCLUDE_DIRS})\n\ninclude_directories(\n ../games\n ../torchRL\n ../torchRL/third_party/fmt/include\n ../torchRL/tube/src_cpp\n )\n\nadd_executable( polygames-tests\n ../core/game.cc\n ../core/state.cc\n ../torchRL/mcts/mcts.cc\n ../torchRL/mcts/node.cc\n ../torchRL/tube/src_cpp/data_channel.cc\n ../torchRL/tube/src_cpp/replay_buffer.cc\n tests.cc\n\n # Include your tests here.\n connectfour-tests.cc\n havannah-state-tests.cc\n havannah-tests.cc\n hex-state-tests.cc\n hex-tests.cc\n\n ludii-game-tests.cc\n ../games/ludii/jni_utils.cc\n ../games/ludii/ludii_game_wrapper.cc\n ../games/ludii/ludii_state_wrapper.cc\n )\n\ntarget_link_libraries( polygames-tests \n    ${CMAKE_THREAD_LIBS_INIT}\n    ${GTEST_LIBRARIES}\n    ${JNI_LIBRARIES}\n    ${TORCH_LIBRARIES}\n    )\n\ninstall (TARGETS polygames-tests DESTINATION bin)\n\n"
  },
  {
    "path": "tests/README.md",
    "content": "Unit tests for games\n\n```\nconda activate pypg\n# or: source activate pypg\n\nconda install gtest\n# if necessary\n\nmkdir build\ncd build\ncmake ..\nmake -j\n./polygames-test\n```\n\n"
  },
  {
    "path": "tests/connectfour-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Just a few tests for understanding the public methods of Action and State,\n// with an already implemented game. It may be interesting to write \"real\" unit\n// tests.\n\n#include <gtest/gtest.h>\n#include \"utils.h\"\n#include <connectfour.h>\n\nTEST(Connectfour, init_1) {\n\n StateForConnectFour state(0);\n state.Initialize();\n\n ASSERT_EQ((std::vector<int64_t>{3, 6, 7}), state.GetFeatureSize());\n ASSERT_EQ((std::vector<int64_t>{7, 1, 1}), state.GetActionSize());\n ASSERT_EQ(GameStatus::player0Turn, GameStatus(state.getCurrentPlayer()));\n\n for (int i=0; i<7; ++i) {\n  auto a_i = std::dynamic_pointer_cast<ActionForConnectFour>(state.GetLegalActions()[i]);\n  ASSERT_EQ(i, a_i->GetX());\n  ASSERT_EQ(0, a_i->GetY());\n  ASSERT_EQ(0, a_i->GetZ());\n  ASSERT_EQ(i, a_i->GetHash());\n  ASSERT_EQ(i, a_i->GetIndex());\n }\n\n std::vector<float> expectedFeatures {\n\n  // history - 0, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n\n };\n\n // DEBUG\n // printPlanes<std::vector<float>>(state.GetFeatures(), 3, 6, 7);\n // printPlanes<std::vector<float>>(expectedFeatures, 3, 6, 7);\n // printData<std::vector<float>>(state.GetFeatures());\n // printData<std::vector<float>>(expectedFeatures);\n\n ASSERT_EQ(expectedFeatures.size(), 3*6*7);\n ASSERT_EQ(state.GetFeatures().size(), 3*6*7);\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\nTEST(Connectfour, play_1) {\n\n StateForConnectFour state(0);\n state.Initialize();\n\n ActionForConnectFour action(1, 7);\n state.ApplyAction(action);\n\n ASSERT_EQ((std::vector<int64_t>{3, 6, 7}), state.GetFeatureSize());\n ASSERT_EQ((std::vector<int64_t>{7, 1, 1}), state.GetActionSize());\n ASSERT_EQ(GameStatus::player1Turn, GameStatus(state.getCurrentPlayer()));\n\n for (int i=0; i<7; ++i) {\n  auto a_i = std::dynamic_pointer_cast<ActionForConnectFour>(state.GetLegalActions()[i]);\n  ASSERT_EQ(i, a_i->GetX());\n  ASSERT_EQ(0, a_i->GetY());\n  ASSERT_EQ(0, a_i->GetZ());\n  ASSERT_EQ(i, a_i->GetHash());\n  ASSERT_EQ(i, a_i->GetIndex());\n }\n\n std::vector<float> expectedFeatures {\n\n  // history - 0, player 0\n     0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,\n\n };\n\n // DEBUG\n // printPlanes<std::vector<float>>(state.GetFeatures(), 3, 6, 7);\n // printPlanes<std::vector<float>>(expectedFeatures, 3, 6, 7);\n\n ASSERT_EQ(expectedFeatures.size(), 3*6*7);\n ASSERT_EQ(state.GetFeatures().size(), 3*6*7);\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\n"
  },
  {
    "path": "tests/havannah-state-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Unit tests for Havannah Action/State.\n\n#include <havannah_state.h>\n#include <gtest/gtest.h>\n#include \"utils.h\"\n\n///////////////////////////////////////////////////////////////////////////////\n// helpers\n///////////////////////////////////////////////////////////////////////////////\n\nnamespace Havannah {\n\n template <int SIZE, bool PIE, bool EXTENDED> class StateTest :\n   public Havannah::State<SIZE, PIE, EXTENDED> {\n  public:\n   StateTest<SIZE, PIE, EXTENDED>(int seed, int history, bool turnFeatures) :\n    Havannah::State<SIZE, PIE, EXTENDED>(seed, history, turnFeatures) {}\n   GameStatus GetStatus() { return ::State::_status; };\n };\n\n};\n\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(HavannahStateGroup, init_0) {\n\n const int size = 5;\n const int history = 0;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, true, false> state(0, history, turnFeatures);\n\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // features\n std::vector<float> expectedFeatures(nbChannels*fullsize*fullsize, 0.f);\n const std::vector<float> boardFeatures = {\n  0, 0, 0, 0, 1, 1, 1, 1, 1, \n  0, 0, 0, 1, 1, 1, 1, 1, 1, \n  0, 0, 1, 1, 1, 1, 1, 1, 1, \n  0, 1, 1, 1, 1, 1, 1, 1, 1, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, \n  1, 1, 1, 1, 1, 1, 1, 1, 0, \n  1, 1, 1, 1, 1, 1, 1, 0, 0, \n  1, 1, 1, 1, 1, 1, 0, 0, 0, \n  1, 1, 1, 1, 1, 0, 0, 0, 0\n };\n const int f2 = fullsize*fullsize;\n std::copy(boardFeatures.begin(), boardFeatures.end(), expectedFeatures.begin() + 2*f2);\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // actions\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n ASSERT_EQ(nbActions, state.GetLegalActions().size());\n\n}\n\n\nTEST(HavannahStateGroup, init_1) {\n\n const int size = 8;\n const int history = 2;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, true, false> state(0, history, turnFeatures);\n\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // features\n std::vector<float> expectedFeatures(nbChannels*fullsize*fullsize, 0.f);\n const std::vector<float> boardFeatures = {\n  0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, \n  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0\n };\n const int f2 = fullsize*fullsize;\n std::copy(boardFeatures.begin(), boardFeatures.end(), expectedFeatures.begin() + 2*f2);\n std::copy(boardFeatures.begin(), boardFeatures.end(), expectedFeatures.begin() + 5*f2);\n std::copy(boardFeatures.begin(), boardFeatures.end(), expectedFeatures.begin() + 8*f2);\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // actions\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n ASSERT_EQ(nbActions, state.GetLegalActions().size());\n\n}\n\n\nTEST(HavannahStateGroup, init_2) {\n\n const int size = 3;\n const int history = 0;\n const bool turnFeatures = false;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, true, false> state(0, history, turnFeatures);\n\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // features\n std::vector<float> expectedFeatures(nbChannels*fullsize*fullsize, 0.f);\n const std::vector<float> boardFeatures = {\n  0, 0, 1, 1, 1, \n  0, 1, 1, 1, 1, \n  1, 1, 1, 1, 1, \n  1, 1, 1, 1, 0, \n  1, 1, 1, 0, 0\n };\n const int f2 = fullsize*fullsize;\n std::copy(boardFeatures.begin(), boardFeatures.end(), expectedFeatures.begin() + 2*f2);\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // actions\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n ASSERT_EQ(nbActions, state.GetLegalActions().size());\n\n std::vector<std::pair<int,int>> actions {{\n            {0,2}, {0,3}, {0,4},\n\n        {1,1}, {1,2}, {1,3}, {1,4},\n\n     {2,0}, {2,1}, {2,2}, {2,3}, {2,4},\n\n        {3,0}, {3,1}, {3,2}, {3,3},\n\n            {4,0}, {4,1}, {4,2}\n }};\n\n for (int k=0; k<nbActions; k++) {\n  auto expectedAction = actions[k];\n  auto action = state.GetLegalActions()[k];\n  int i = expectedAction.first;\n  int j = expectedAction.second;\n  int h = i*fullsize + j;\n  ASSERT_EQ(0, action->GetX());\n  ASSERT_EQ(i, action->GetY());\n  ASSERT_EQ(j, action->GetZ());\n  ASSERT_EQ(h, action->GetHash());\n  ASSERT_EQ(k, action->GetIndex());\n }\n\n}\n\n\nTEST(HavannahStateGroup, clone_1) {\n\n try {\n  Havannah::State<4, true, false> state(0);\n  auto clone = state.clone();\n  auto ptrClone = dynamic_cast<Havannah::State<4, true, false> *>(clone.get());\n\n  ASSERT_NE(&state, ptrClone);\n  ASSERT_EQ(37, state.GetLegalActions().size());\n  ASSERT_EQ(37, ptrClone->GetLegalActions().size());\n\n  Havannah::Action<4> a(2, 3, 11);\n  // 0 0 0 1 1 1 1 \n  // 0 0 1 1 1 1 1 \n  // 0 1 1 A 1 1 1 \n  // 1 1 1 1 1 1 1 \n  // 1 1 1 1 1 1 0 \n  // 1 1 1 1 1 0 0 \n  // 1 1 1 1 0 0 0 \n  // DEBUG\n  // std::cout << state.actionDescription(a) << std::endl;\n  // auto pa11 = state.GetLegalActions()[11];\n  // std::cout << state.actionDescription(*pa11) << std::endl;\n  // for (auto pa : state.GetLegalActions())\n  //     std::cout << state.actionDescription(*pa) << \" \";\n  // std::cout << std::endl;\n\n  state.ApplyAction(a);\n\n  ASSERT_EQ(37, state.GetLegalActions().size());\n  // still 37 actions becauseof swap\n\n  ASSERT_EQ(37, ptrClone->GetLegalActions().size());\n }\n catch (std::bad_cast) {\n  FAIL() << \"not a Havannah::State<4, true, false>\"; \n }\n\n}\n\n\nTEST(HavannahStateGroup, clone_2) {\n\n try {\n  Havannah::State<4, true, false> state(0);\n  auto clone = state.clone();\n  auto ptrClone = dynamic_cast<Havannah::State<4, true, false> *>(clone.get());\n\n  ASSERT_NE(&state, ptrClone);\n  ASSERT_EQ(37, state.GetLegalActions().size());\n  ASSERT_EQ(37, ptrClone->GetLegalActions().size());\n\n  Havannah::Action<4> a(2, 3, -1);\n  state.ApplyAction(a);\n  state.ApplyAction(a);  // swap\n  ASSERT_EQ(36, state.GetLegalActions().size());\n  ASSERT_EQ(37, ptrClone->GetLegalActions().size());\n }\n catch (std::bad_cast) {\n  FAIL() << \"not a Havannah::State<4, true, false>\"; \n }\n\n}\n\n\nTEST(HavannahStateGroup, features_1_pie) {\n\n const int size = 3;\n const int history = 2;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, true, false> state(0, history, turnFeatures);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = nbActions;\n ASSERT_EQ(currentPlayer, state.GetStatus());\n ASSERT_EQ(k, state.GetLegalActions().size());\n\n // first action\n const Havannah::Action<fullsize> a0 {1,2,-1};\n state.ApplyAction(a0);\n std::swap(currentPlayer, nextPlayer);\n ASSERT_EQ(currentPlayer, state.GetStatus());\n ASSERT_EQ(k, state.GetLegalActions().size());\n\n // second action\n const Havannah::Action<fullsize> a1 {2,2,-1};\n state.ApplyAction(a1);\n std::swap(currentPlayer, nextPlayer);\n k -= 2;\n ASSERT_EQ(currentPlayer, state.GetStatus());\n ASSERT_EQ(k, state.GetLegalActions().size());\n\n // next actions\n const std::vector<Havannah::Action<fullsize>> actions {{\n     {3,0,-1}\n }};\n for (const auto & a : actions) {\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n }\n ASSERT_EQ(GameStatus::player1Turn, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n  // history - 2, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 1, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 0, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // turn\n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f\n\n };\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\nTEST(HavannahStateGroup, features_1_nopie) {\n\n const int size = 3;\n const int history = 2;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, false, false> state(0, history, turnFeatures);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n\n std::vector<Havannah::Action<fullsize>> actions {{\n     {1,2,-1}, {2,2,-1},\n     {3,0,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = nbActions;\n for (const auto & a : actions) {\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(k, state.GetLegalActions().size());\n }\n ASSERT_EQ(GameStatus::player1Turn, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n  // history - 2, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 1, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 0, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // turn\n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f\n\n };\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\nTEST(HavannahStateGroup, features_2_nopie) {\n\n const int size = 3;\n const int history = 2;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, false, false> state(0, history, turnFeatures);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n\n std::vector<Havannah::Action<fullsize>> actions {{\n     {1,2,-1}, {2,2,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = nbActions;\n for (const auto & a : actions) {\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(k, state.GetLegalActions().size());\n }\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n  // history - 2, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 2, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 1, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 1, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // history - 0, player 0\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n  // history - 0, board cells\n     0.f, 0.f, 1.f, 1.f, 1.f, \n     0.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 1.f, \n     1.f, 1.f, 1.f, 1.f, 0.f, \n     1.f, 1.f, 1.f, 0.f, 0.f, \n\n  // turn\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f,\n\n };\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n // std::cout << \"*** expected ***\" << std::endl;\n // printData<const std::vector<float>&>(expectedFeatures);\n // std::cout << \"*** actual ***\" << std::endl;\n // printData<const std::vector<float>&>(state.GetFeatures());\n\n}\n\n\nTEST(HavannahStateGroup, features_3_nopie) {\n\n const int size = 4;\n const int history = 2;\n const bool turnFeatures = true;\n const int fullsize = 2*size - 1;\n const int nbChannels = 3*(1+history) + (turnFeatures ? 1 : 0);\n const int nbActions = fullsize*fullsize - size*(size-1);\n\n Havannah::StateTest<size, false, false> state(0, history, turnFeatures);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, fullsize, fullsize}), state.GetActionSize());\n\n std::vector<Havannah::Action<fullsize>> actions {{\n     {2,2,-1}, {5,3,-1}, \n     {1,4,-1}, {2,3,-1}, \n     {3,3,-1}, {3,5,-1}, \n     {2,4,-1}, {6,2,-1}, \n     {3,2,-1}, {4,4,-1}, \n     {0,4,-1}, {2,6,-1}, \n     {1,3,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = nbActions;\n for (const auto & a : actions) {\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(k, state.GetLegalActions().size());\n }\n ASSERT_EQ(GameStatus::player0Win, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, fullsize, fullsize}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n   // history - 2, player 0\n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 2, player 1\n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 2, board cells\n   0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n\n   // history - 1, player 0\n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 1, player 1\n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 1.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 1, board cells\n   0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n\n   // history - 0, player 0\n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 0, player 1\n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 1.f, \n   0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, \n   0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, \n   0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, \n   0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, \n\n   // history - 0, board cells\n   0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, \n   1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, \n\n   // turn\n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, \n   1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f\n\n };\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(expectedFeatures, nbChannels, fullsize, fullsize);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, fullsize, fullsize);\n\n // std::cout << \"*** expected ***\" << std::endl;\n // printData<const std::vector<float>&>(expectedFeatures);\n // std::cout << \"*** actual ***\" << std::endl;\n // printData<const std::vector<float>&>(state.GetFeatures());\n\n // Just a hack for converting to actions some indices \n // (obtained using the GUI: https://gitlab.com/juliendehos/hex_hav).\n // Havannah::Board<4> b;\n // b.reset();\n // std::vector<int> gameIndices = {\n //  16, 38,\n //  11, 17,\n //  24, 26,\n //  18, 44,\n //  23, 32,\n //   4, 20,\n //  10,\n // };\n // for (int i : gameIndices) {\n //  auto c = b.convertIndexToCell(i);\n //  std::cout << \"{\" << c.first << \",\" << c.second << \"}, \" << std::endl;\n // }\n\n}\n\n\n"
  },
  {
    "path": "tests/havannah-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Unit tests for the Havannah game.\n\n#include <havannah.h>\n#include <gtest/gtest.h>\n#include \"utils.h\"\n\n///////////////////////////////////////////////////////////////////////////////\n// helpers\n///////////////////////////////////////////////////////////////////////////////\n\nstatic void CheckHavannahPathInfo(const Havannah::PathInfo & path, Havannah::Color color,\n  unsigned borders, unsigned corners, int m) {\n ASSERT_EQ(path._color, color);\n ASSERT_EQ(path._borders, borders);\n ASSERT_EQ(path._corners, corners);\n ASSERT_EQ(path._mainPathIndex, m);\n}\n\nnamespace Havannah {\n\n template<int SIZE, bool PIE>\n  class BoardTest : public Board<SIZE, PIE> {\n\n   public:\n\n    BoardTest() : Board<SIZE, PIE>() {}\n\n    int getNbIndices() const {\n        return Board<SIZE, PIE>::_nbIndices;\n    }\n\n    int getNbEmptyIndices() const {\n        return Board<SIZE, PIE>::_nbEmptyIndices;\n    }\n\n    void getPathIndexAndColorAtIndex(int index, int& pathIndex, Color& color) const {\n     return Board<SIZE, PIE>::getPathIndexAndColorAtIndex(index, pathIndex, color);\n    }\n\n    const std::array<std::array<int,7>,fullsize(SIZE)*fullsize(SIZE)> & getNeighboursBoard() const {\n     return Board<SIZE, PIE>::_neighboursBoard;\n    }\n\n    std::array<PathInfo, fullsize(SIZE)*fullsize(SIZE)> & getPaths() {\n     return Board<SIZE, PIE>::_paths;\n    }\n\n    int & getPathsEnd() {\n     return Board<SIZE, PIE>::_pathsEnd;\n    }\n\n    std::array<int, fullsize(SIZE)*fullsize(SIZE)> & getPathBoard() {\n     return Board<SIZE, PIE>::_pathBoard;\n    }\n\n    unsigned computeBorders(int index) const { return Board<SIZE, PIE>::computeBorders(index); }\n    unsigned computeCorners(int index) const { return Board<SIZE, PIE>::computeCorners(index); }\n\n  };\n\n}  // namespace Havannah\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(HavannahGroup, fullsize) {\n ASSERT_EQ(Havannah::fullsize(5), 9);\n ASSERT_EQ(Havannah::fullsize(6), 11);\n ASSERT_EQ(Havannah::fullsize(7), 13);\n ASSERT_EQ(Havannah::fullsize(8), 15);\n ASSERT_EQ(Havannah::fullsize(9), 17);\n ASSERT_EQ(Havannah::fullsize(10), 19);\n}\n\nTEST(HavannahGroup, reset_8) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 15*15-7*8);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getNbEmptyIndices(), 15*15-7*8);\n}\n\nTEST(HavannahGroup, reset_7) {\n Havannah::BoardTest<7, true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 13*13-6*7);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getNbEmptyIndices(), 13*13-6*7);\n}\n\nTEST(HavannahGroup, reset_5) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 9*9-4*5);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getNbEmptyIndices(), 9*9-4*5);\n}\n\nTEST(HavannahGroup, copyConstructor) {\n Havannah::BoardTest<5, true> b0;\n b0.reset();\n Havannah::BoardTest<5, true> b(b0);\n ASSERT_EQ(b.getNbIndices(), 61);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getNbEmptyIndices(), 61);\n}\n\nTEST(HavannahGroup, resetNeighboursTopLeft) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(0, 4));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(0,5)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(1,3)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(1,4)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursTopCenter) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(0, 6));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(0,5)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(0,7)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(1,5)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Havannah::Cell(1,6)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursTopRight) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(0, 8));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(0,7)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(1,7)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(1,8)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursMiddleRight) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(4, 8));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(3,8)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(4,7)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(5,7)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursMiddleRight2) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(6, 6));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(5,6)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(5,7)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(6,5)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Havannah::Cell(7,5)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursBottomRight) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(8, 4));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(7,4)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(7,5)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(8,3)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursBottomCenter) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(8, 2));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(7,2)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(7,3)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(8,1)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Havannah::Cell(8,3)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursBottomLeft) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(8, 0));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(7,0)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(7,1)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(8,1)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursMiddleLeft) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(4, 0));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(3,1)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(4,1)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(5,0)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HavannahGroup, resetNeighboursMiddleCenter) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index = b.convertCellToIndex(Havannah::Cell(4, 4));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Havannah::Cell(3,4)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Havannah::Cell(3,5)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Havannah::Cell(4,3)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Havannah::Cell(4,5)));\n ASSERT_EQ(refNeighbourIndices[4], b.convertCellToIndex(Havannah::Cell(5,3)));\n ASSERT_EQ(refNeighbourIndices[5], b.convertCellToIndex(Havannah::Cell(5,4)));\n ASSERT_EQ(refNeighbourIndices[6], -1);\n}\n\nTEST(HavannahGroup, resetPaths) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n ASSERT_EQ(b.getPathsEnd(), 1);\n const Havannah::PathInfo & p0 = b.getPaths()[0];\n ASSERT_EQ(p0._color, Havannah::Color::COLOR_NONE);\n ASSERT_EQ(p0._borders, 0);\n ASSERT_EQ(p0._corners, 0);\n ASSERT_EQ(p0._mainPathIndex, 0);\n}\n\nTEST(HavannahGroup, resetPathBoard) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n for (int i=0; i<61; i++) {\n  ASSERT_EQ(b.getPathBoard()[i], 0); \n  ASSERT_EQ(b.getColorAtIndex(i), Havannah::Color::COLOR_NONE);\n }\n}\n\nTEST(HavannahGroup, resetGetters1) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 61);\n ASSERT_EQ(b.getNbEmptyIndices(), 61);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n}\n\nTEST(HavannahGroup, isValid1) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n\n ASSERT_EQ(b.isValidIndex(12), true);\n ASSERT_EQ(b.isValidIndex(4), true);\n ASSERT_EQ(b.isValidIndex(0), false);\n ASSERT_EQ(b.isValidIndex(-1), false);\n ASSERT_EQ(b.isValidIndex(60), true);\n ASSERT_EQ(b.isValidIndex(61), false);\n\n ASSERT_EQ(b.isValidCell(Havannah::Cell(2, 3)), true);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(0, 4)), true);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(0, 3)), false);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(-1, 1)), false);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(2, 0)), false);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(2, 5)), true);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(2, -1)), false);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(3, 9)), false);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(3, 8)), true);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(7, 5)), true);\n ASSERT_EQ(b.isValidCell(Havannah::Cell(7, 6)), false);\n}\n\nTEST(HavannahGroup, convertIndexToCell) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n ASSERT_EQ(b.convertIndexToCell(3), Havannah::Cell(0, 3));\n ASSERT_EQ(b.convertIndexToCell(19), Havannah::Cell(1, 4));\n ASSERT_EQ(b.convertIndexToCell(61), Havannah::Cell(4, 1));\n}\n\nTEST(HavannahGroup, convertCellToIndex) {\n Havannah::BoardTest<10, true> b;\n b.reset();\n ASSERT_EQ(b.convertCellToIndex(Havannah::Cell(0, 2)), 2);\n ASSERT_EQ(b.convertCellToIndex(Havannah::Cell(1, 0)), 19);\n ASSERT_EQ(b.convertCellToIndex(Havannah::Cell(7, 5)), 138);\n}\n\nTEST(HavannahGroup, computeBorderCorner_center) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(7, 7));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_topleft) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(0, 7));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 1);\n index = b.convertCellToIndex(Havannah::Cell(0, 8));\n ASSERT_EQ(b.computeBorders(index), 1);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(1, 6));\n ASSERT_EQ(b.computeBorders(index), 32);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_topright) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(0, 14));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 2);\n index = b.convertCellToIndex(Havannah::Cell(0, 13));\n ASSERT_EQ(b.computeBorders(index), 1);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(1, 14));\n ASSERT_EQ(b.computeBorders(index), 2);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_middleright) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(7, 14));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 4);\n index = b.convertCellToIndex(Havannah::Cell(6, 14));\n ASSERT_EQ(b.computeBorders(index), 2);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(8, 13));\n ASSERT_EQ(b.computeBorders(index), 4);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_bottomright) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(14, 7));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 8);\n index = b.convertCellToIndex(Havannah::Cell(13, 8));\n ASSERT_EQ(b.computeBorders(index), 4);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(14, 6));\n ASSERT_EQ(b.computeBorders(index), 8);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_bottomleft) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(14, 0));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 16);\n index = b.convertCellToIndex(Havannah::Cell(13, 0));\n ASSERT_EQ(b.computeBorders(index), 16);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(14, 1));\n ASSERT_EQ(b.computeBorders(index), 8);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\nTEST(HavannahGroup, computeBorderCorner_middleleft) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n index = b.convertCellToIndex(Havannah::Cell(7, 0));\n ASSERT_EQ(b.computeBorders(index), 0);\n ASSERT_EQ(b.computeCorners(index), 32);\n index = b.convertCellToIndex(Havannah::Cell(6, 1));\n ASSERT_EQ(b.computeBorders(index), 32);\n ASSERT_EQ(b.computeCorners(index), 0);\n index = b.convertCellToIndex(Havannah::Cell(8, 0));\n ASSERT_EQ(b.computeBorders(index), 16);\n ASSERT_EQ(b.computeCorners(index), 0);\n}\n\n\nTEST(HavannahGroup, getPathIndexAndPlayerAtIndex) {\n Havannah::BoardTest<7, true> b;\n b.reset();\n b.getPathsEnd() = 3;\n b.getPaths()[1] = Havannah::PathInfo(1, Havannah::Color::COLOR_BLACK, 0, 0);\n b.getPaths()[2] = Havannah::PathInfo(2, Havannah::Color::COLOR_WHITE, 0, 0);\n\n int index_1_1 = b.convertCellToIndex(Havannah::Cell(1, 1));\n int index_3_1 = b.convertCellToIndex(Havannah::Cell(3, 1));\n int index_2_3 = b.convertCellToIndex(Havannah::Cell(2, 3));\n\n b.getPathBoard()[index_3_1] = 1;\n b.getPathBoard()[index_2_3] = 2;\n\n int pathIndex;\n Havannah::Color color;\n\n b.getPathIndexAndColorAtIndex(index_1_1, pathIndex, color);\n ASSERT_EQ(pathIndex, 0);\n ASSERT_EQ(color, Havannah::Color::COLOR_NONE);\n\n b.getPathIndexAndColorAtIndex(index_3_1, pathIndex, color);\n ASSERT_EQ(pathIndex, 1);\n ASSERT_EQ(color, Havannah::Color::COLOR_BLACK);\n\n b.getPathIndexAndColorAtIndex(index_2_3, pathIndex, color);\n ASSERT_EQ(pathIndex, 2);\n ASSERT_EQ(color, Havannah::Color::COLOR_WHITE);\n}\n\nTEST(HavannahGroup, play1) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n int index;\n\n index = b.convertCellToIndex(Havannah::Cell(2, 8));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 2);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Havannah::Color::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._borders, 0);\n ASSERT_EQ(b.getPaths()[1]._corners, 0);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n\n index = b.convertCellToIndex(Havannah::Cell(3, 8));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 2);\n ASSERT_EQ(b.getPaths()[2]._color, Havannah::Color::COLOR_WHITE);\n ASSERT_EQ(b.getPaths()[2]._borders, 0);\n ASSERT_EQ(b.getPaths()[2]._corners, 0);\n ASSERT_EQ(b.getPaths()[2]._mainPathIndex, 2);\n\n index = b.convertCellToIndex(Havannah::Cell(2, 9));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Havannah::Color::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._borders, 0);\n ASSERT_EQ(b.getPaths()[1]._corners, 0);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n}\n\nTEST(HavannahGroup, play2) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index;\n\n index = b.convertCellToIndex(Havannah::Cell(2, 5));\n b.play(index);\n index = b.convertCellToIndex(Havannah::Cell(3, 5));\n b.play(index);\n index = b.convertCellToIndex(Havannah::Cell(2, 6));\n b.play(index);\n\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Havannah::Color::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._borders, 0);\n ASSERT_EQ(b.getPaths()[1]._corners, 0);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n}\n\nTEST(HavannahGroup, play3) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n int index;\n\n ASSERT_EQ(b.getNbIndices(), 61);\n\n ASSERT_EQ(b.getNbEmptyIndices(), 61);\n ASSERT_EQ(b.getLastIndex(), std::optional<int>());\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n\n index = b.convertCellToIndex(Havannah::Cell(2, 6));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 60);\n ASSERT_EQ(b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_1);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n\n index = b.convertCellToIndex(Havannah::Cell(3, 6));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 59);\n ASSERT_EQ(b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_0);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n\n index = b.convertCellToIndex(Havannah::Cell(2, 7));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 58);\n ASSERT_EQ(b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentPlayer(), PLAYER_1);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_NULL);\n}\n\nTEST(HavannahGroup, findWinnerPath) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n\n // play cells\n std::vector<Havannah::Cell> gameCells = {\n  Havannah::Cell(4, 1), Havannah::Cell(0, 4), \n  Havannah::Cell(4, 3), Havannah::Cell(1, 4), \n  Havannah::Cell(4, 5), Havannah::Cell(2, 4), \n  Havannah::Cell(4, 7), Havannah::Cell(3, 4), \n  Havannah::Cell(4, 2), Havannah::Cell(5, 0), \n  Havannah::Cell(4, 4), Havannah::Cell(5, 1), \n  Havannah::Cell(4, 0), Havannah::Cell(5, 2), \n  Havannah::Cell(4, 6), Havannah::Cell(5, 3), \n  Havannah::Cell(4, 8)\n };\n for (const Havannah::Cell & c : gameCells) {\n  int index = b.convertCellToIndex(c);\n  b.play(index);\n }\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_0);\n\n // test cells in winner path\n std::vector<int> winIndices = b.findWinnerPath();\n ASSERT_EQ(int(winIndices.size()), 9);\n std::vector<Havannah::Cell> winCells { \n  Havannah::Cell(4, 0),\n   Havannah::Cell(4, 1),\n   Havannah::Cell(4, 2),\n   Havannah::Cell(4, 3),\n   Havannah::Cell(4, 4),\n   Havannah::Cell(4, 5),\n   Havannah::Cell(4, 6),\n   Havannah::Cell(4, 7),\n   Havannah::Cell(4, 8)\n };\n for (const Havannah::Cell & c : winCells) {\n  int index = b.convertCellToIndex(c);\n  ASSERT_NE(winIndices.end(), std::find(winIndices.begin(), \n     winIndices.end(), index));\n }\n\n}\n\nTEST(HavannahGroup, winner_white_fork) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  28, 21,\n  38, 13,\n  31, 22,\n  23, 57,\n  6, 41,\n  55, 16,\n  24, 42,\n  25, 58,\n  17, 56,\n  14, 49,\n  39, 67,\n  37\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_0);\n}\n\nTEST(HavannahGroup, winner_white_ring) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  37, 22,\n  30, 49,\n  47, 55,\n  46, 38,\n  29, 31,\n  39\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_0);\n}\n\nTEST(HavannahGroup, winner_white_bridge) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  72, 47,\n  65, 8,\n  75, 26,\n  76, 58,\n  66, 55,\n  64\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_0);\n}\n\nTEST(HavannahGroup, winner_black_fork) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  39, 6,\n  23, 16,\n  31, 33,\n  72, 43,\n  50, 52,\n  26, 17,\n  35, 15,\n  49, 34,\n  41, 24\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_1);\n}\n\nTEST(HavannahGroup, winner_black_bridge) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  21, 44,\n  40, 59,\n  60, 51,\n  68, 67,\n  56, 52,\n  34, 76\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_1);\n}\n\nTEST(HavannahGroup, winner_black_ring) {\n Havannah::BoardTest<5, true> b;\n b.reset();\n std::vector<int> gameIndices = {\n  29, 32,\n  38, 42,\n  64, 58,\n  14, 48,\n  25, 40,\n  68, 41,\n  34, 50,\n  74, 57\n };\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_1);\n}\n\n// https://github.com/facebookexternal/polygames/commit/fdf094d1ab9a0bcc60422e03c44b59d1c2df9f3e#diff-9367f7a48f6ff9d5d578838ec92885baR406\n// https://www.littlegolem.net/jsp/game/game.jsp?gid=2130971\nTEST(HavannahGroup, littlegolem_lorentz_ttrttr) {\n Havannah::BoardTest<8, true> b;\n b.reset();\n std::vector<Havannah::Cell> gameCells = {\n     {7,7}, {7,7},\n     {3,6}, {3,11}, {2,8}, {7,2},\n     {9,3}, {4,10}, {5,5}, {8,3},\n     {8,5}, {7,5}, {6,6}, {7,6},\n     {5,8}, {3,9}, {6,9}, {3,8},\n     {5,11}, {8,4}, {3,7}, {9,2},\n     {4,7}, {6,5}, {6,4}, {6,1},\n     {9,4}, {10,2}, {12,3}, {5,6},\n     {5,7}, {13,1}, {12,1}, {10,3},\n     {13,4}, {10,4}, {10,5}, {9,5},\n     {8,6}, {9,6}, {8,7}, {12,2},\n     {14,1}, {11,5}, {11,4}, {14,0},\n     {7,0}, {13,2}, {14,2}, {13,3},\n     {14,3}, {9,1}, {12,4}, {8,1},\n     {7,3}, {10,6}, {12,6}, {8,0},\n     {10,7}, {12,5}, {13,5}, {7,8},\n     {9,9}, {6,8}, {5,9}, {7,11},\n     {7,10}, {7,9}, {5,10}, {6,10},\n     {5,12}, {9,7}, {8,8}, {4,5},\n     {8,9}, {6,11}, {2,6}, {1,7},\n     {1,8}, {7,13}, {0,8}, {8,13},\n     {1,6}, {7,12}, {0,7}\n };\n std::vector<int> gameIndices(gameCells.size());\n std::transform(gameCells.begin(), gameCells.end(), gameIndices.begin(),\n         [&](const Havannah::Cell & c) { return b.convertCellToIndex(c); });\n for (int i : gameIndices)\n  b.play(i);\n ASSERT_EQ(b.getWinnerPlayer(), PLAYER_0);\n}\n\n"
  },
  {
    "path": "tests/hex-state-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Unit tests for Hex Action/State.\n\n#include <hex_state.h>\n#include <gtest/gtest.h>\n#include \"utils.h\"\n\n///////////////////////////////////////////////////////////////////////////////\n// helpers\n///////////////////////////////////////////////////////////////////////////////\n\nnamespace Hex {\n\n template <int SIZE, bool PIE> class StateTest : public Hex::State<SIZE, PIE> {\n  public:\n   StateTest<SIZE, PIE>(int seed, int history, bool turnFeatures) : \n    Hex::State<SIZE, PIE>(seed, history, turnFeatures) {}\n   GameStatus GetStatus() { return ::State::_status; };\n };\n\n};\n\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(HexStateGroup, init_1) {\n\n Hex::StateTest<7,true> state(0, 0, false);\n\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // features\n ASSERT_EQ((std::vector<int64_t>{2, 7, 7}), state.GetFeatureSize());\n ASSERT_EQ((std::vector<float>(2*7*7, 0.f)), state.GetFeatures());\n // ASSERT_EQ((std::vector<int64_t>{7, 7, 7}), state.GetFeatureSize());\n // ASSERT_EQ((std::vector<float>(7*7*7, 0.f)), state.GetFeatures());\n\n // actions\n ASSERT_EQ((std::vector<int64_t>{1, 7, 7}), state.GetActionSize());\n ASSERT_EQ(7*7, state.GetLegalActions().size());\n for (int k=0; k<state.GetLegalActions().size(); ++k) {\n  int i = k / 7;\n  int j = k % 7;\n  auto a = state.GetLegalActions()[k];\n  ASSERT_EQ(0, a->GetX());\n  ASSERT_EQ(i, a->GetY());\n  ASSERT_EQ(j, a->GetZ());\n  ASSERT_EQ(k, a->GetHash());\n  ASSERT_EQ(k, a->GetIndex());\n }\n}\n\n\nTEST(HexStateGroup, play_1) {\n\n Hex::StateTest<7,true> state(0, 0, false);\n\n Hex::Action<7> a(2, 3, 2*7+3);\n state.ApplyAction(a);\n\n ASSERT_EQ(GameStatus::player1Turn, state.GetStatus());\n\n // features\n ASSERT_EQ((std::vector<int64_t>{2, 7, 7}), state.GetFeatureSize());\n for (int p=0; p<2; ++p) {\n  for (int i=0; i<2; ++i) {\n   for (int j=0; j<2; ++j) {\n    int k = (p*2+i)*7+j;\n    auto f_k = state.GetFeatures()[k];\n    if (p==0 and i==2 and j==3)\n     ASSERT_EQ(1, f_k);\n    else \n     ASSERT_EQ(0, f_k);\n   }\n  }\n }\n\n // actions\n ASSERT_EQ((std::vector<int64_t>{1, 7, 7}), state.GetActionSize());\n ASSERT_EQ(7*7, state.GetLegalActions().size());\n for (int i=0; i<2; ++i) {\n  for (int j=0; j<2; ++j) {\n   int k = i*7+j;\n   if (k<2*7+3) {\n   auto a = state.GetLegalActions()[k];\n    ASSERT_EQ(0, a->GetX());\n    ASSERT_EQ(i, a->GetY());\n    ASSERT_EQ(j, a->GetZ());\n    ASSERT_EQ(k, a->GetHash());\n    ASSERT_EQ(k, a->GetIndex());\n   }\n   else if (k>2*7+3) {\n    int k2 = k-1;\n    auto a = state.GetLegalActions()[k2];\n    ASSERT_EQ(0, a->GetX());\n    ASSERT_EQ(i, a->GetY());\n    ASSERT_EQ(j, a->GetZ());\n    ASSERT_EQ(k2, a->GetHash());\n    ASSERT_EQ(k2, a->GetIndex());\n   }\n  }\n }\n\n}\n\n\nTEST(HexStateGroup, clone_1) {\n\n try {\n  Hex::State<7,true> state(0);\n  auto clone = state.clone();\n  auto ptrClone = dynamic_cast<Hex::State<7,true> *>(clone.get());\n\n  ASSERT_NE(&state, ptrClone);\n  ASSERT_EQ(49, state.GetLegalActions().size());\n  ASSERT_EQ(49, ptrClone->GetLegalActions().size());\n\n  Hex::Action<7> a(2, 3, -1);\n  state.ApplyAction(a);\n\n  ASSERT_EQ(49, state.GetLegalActions().size());\n  ASSERT_EQ(49, ptrClone->GetLegalActions().size());\n }\n catch (std::bad_cast) {\n  FAIL() << \"not a Hex::State<7,true>\"; \n }\n\n}\n\n\nTEST(HexStateGroup, features_1) {\n\n Hex::StateTest<3,true> state(0, 2, true);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, 3, 3}), state.GetActionSize());\n\n std::vector<Hex::Action<3>> actions {{\n     {1,0,-1},\n     {0,0,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n ASSERT_EQ(9, state.GetLegalActions().size());\n state.ApplyAction(actions[0]);\n\n ASSERT_EQ(GameStatus::player1Turn, state.GetStatus());\n ASSERT_EQ(9, state.GetLegalActions().size());\n state.ApplyAction(actions[1]);\n\n ASSERT_EQ(7, state.GetLegalActions().size());\n ASSERT_EQ(GameStatus::player0Turn, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{7, 3, 3}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f\n\n };\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<std::vector<float>>(expectedFeatures, 7, 3, 3);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<std::vector<float>>(state.GetFeatures(), 7, 3, 3);\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\nTEST(HexStateGroup, features_2) {\n\n Hex::StateTest<3,false> state(0, 2, true);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, 3, 3}), state.GetActionSize());\n\n std::vector<Hex::Action<3>> actions {{\n     {1,1,-1}, {0,0,-1},\n     {2,2,-1}, {2,0,-1},\n     {1,0,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = 9;\n for (const auto & a : actions) {\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(k, state.GetLegalActions().size());\n }\n ASSERT_EQ(GameStatus::player1Turn, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{7, 3, 3}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n\n     // history - 2, player 0\n     0.f, 0.f, 0.f,\n     0.f, 1.f, 0.f,\n     0.f, 0.f, 1.f,\n\n     // history - 2, player 1\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n\n     // history - 1, player 0\n     0.f, 0.f, 0.f,\n     0.f, 1.f, 0.f,\n     0.f, 0.f, 1.f,\n\n     // history - 1, player 1\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f,\n\n     // history - 1, player 0\n     0.f, 0.f, 0.f,\n     1.f, 1.f, 0.f,\n     0.f, 0.f, 1.f,\n\n     // history - 0, player 1\n     1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f,\n\n     // turn\n     1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f,\n     1.f, 1.f, 1.f\n\n };\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<std::vector<float>>(expectedFeatures, 7, 3, 3);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<std::vector<float>>(state.GetFeatures(), 7, 3, 3);\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n}\n\n\nTEST(HexStateGroup, features_3) {\n const int history = 2;\n const int size = 9;\n const bool turnFeatures = true;\n const int nbChannels = 2*(1 + history) + (turnFeatures ? 1 : 0);\n\n Hex::StateTest<size,false> state(0, history, turnFeatures);\n\n // apply actions\n\n ASSERT_EQ((std::vector<int64_t>{1, size, size}), state.GetActionSize());\n\n std::vector<Hex::Action<size>> actions {{\n   {0,0,-1}, {4,1,-1},\n   {2,3,-1}, {5,2,-1},\n   {2,5,-1}, {4,4,-1},\n   {2,6,-1}, {5,5,-1},\n   {7,4,-1}, {4,7,-1},\n   {7,6,-1}, {3,8,-1},\n   {5,6,-1}, {4,6,-1},\n   {4,5,-1}, {5,4,-1},\n   {5,3,-1}, {4,3,-1},\n   {4,2,-1}, {5,1,-1},\n   {5,0,-1}, {4,0,-1}\n }};\n\n auto currentPlayer = GameStatus::player0Turn;\n auto nextPlayer = GameStatus::player1Turn;\n int k = size*size;\n for (const auto & a : actions) {\n     ASSERT_EQ(currentPlayer, state.GetStatus());\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     state.ApplyAction(a);\n     std::swap(currentPlayer, nextPlayer);\n     k--;\n     ASSERT_EQ(k, state.GetLegalActions().size());\n     // std::cout << a.to_string() << std::endl;\n }\n ASSERT_EQ(GameStatus::player1Win, state.GetStatus());\n\n // check features\n\n ASSERT_EQ((std::vector<int64_t>{nbChannels, size, size}), state.GetFeatureSize());\n\n std::vector<float> expectedFeatures {\n\n     // history - 2, player 0\n     1.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // history - 2, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f,\n     0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f,\n     0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // history - 1, player 0\n     1.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // history - 1, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f,\n     0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f,\n     0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // history - 0, player 0\n     1.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f,\n     1.f, 0.f, 0.f, 1.f, 0.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // history - 0, player 1\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f,\n     1.f, 1.f, 0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f,\n     0.f, 1.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n\n     // turn\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,\n     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f\n\n\n };\n\n // DEBUG\n // std::cout << \"*** expected ***\" << std::endl;\n // printPlanes<std::vector<float>>(expectedFeatures, nbChannels, size, size);\n // std::cout << \"*** actual ***\" << std::endl;\n // printPlanes<std::vector<float>>(state.GetFeatures(), nbChannels, size, size);\n\n ASSERT_EQ(expectedFeatures, state.GetFeatures());\n\n // // Just a hack for converting to actions some indices \n // // (obtained using the GUI: https://gitlab.com/juliendehos/hex_hav).\n // Hex::Board<9> b;\n // b.reset();\n // std::vector<int> gameIndices = {\n //  37, 21,\n //  47, 23,\n //  40, 24,\n //  50, 67,\n //  43, 69,\n //  35, 51,\n //  42, 41,\n //  49, 48,\n //  39, 38,\n //  46, 45,\n //  36\n // };\n // for (int i : gameIndices) {\n //  auto c = b.convertIndexToCell(i);\n //  std::cout << \"{\" << c.first << \",\" << c.second << \"}, \" << std::endl;\n // }\n\n}\n\n"
  },
  {
    "path": "tests/hex-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Unit tests for the Hex game.\n\n#include <hex.h>\n#include <gtest/gtest.h>\n#include \"utils.h\"\n\n///////////////////////////////////////////////////////////////////////////////\n// helpers\n///////////////////////////////////////////////////////////////////////////////\n\nstatic void CheckHexPathInfo(const Hex::PathInfo & path, Hex::Color color, bool b1, bool b2, int m) {\n ASSERT_EQ(path._color, color);\n ASSERT_EQ(path._isConnectedBorder1, b1);\n ASSERT_EQ(path._isConnectedBorder2, b2);\n ASSERT_EQ(path._mainPathIndex, m);\n}\n\nnamespace Hex {\n\n template<int SIZE, bool PIE>\n  class BoardTest : public Board<SIZE, PIE> {\n\n   public:\n\n    BoardTest() : Board<SIZE, PIE>() {}\n\n    int getNbIndices() const {\n        return Board<SIZE, PIE>::_nbIndices;\n    }\n\n    int getNbEmptyIndices() const {\n        return Board<SIZE, PIE>::_nbEmptyIndices;\n    }\n\n    void computeBorderConnection(int index, Color color, \n      bool & isConnectedBorder1, bool & isConnectedBorder2) const {\n     return Board<SIZE, PIE>::computeBorderConnection(index, color, \n       isConnectedBorder1, isConnectedBorder2);\n    }\n\n    void getPathIndexAndColorAtIndex(int index, int & pathIndex, \n      Color & color) const {\n     return Board<SIZE, PIE>::getPathIndexAndColorAtIndex(index, pathIndex, color);\n    }\n\n    const std::array<std::array<int,7>,SIZE*SIZE> & getNeighboursBoard() const {\n     return Board<SIZE, PIE>::_neighboursBoard;\n    }\n\n    std::array<PathInfo, SIZE*SIZE> & getPaths() {\n     return Board<SIZE, PIE>::_paths;\n    }\n\n    int & getPathsEnd() {\n     return Board<SIZE, PIE>::_pathsEnd;\n    }\n\n    std::array<int, SIZE*SIZE> & getPathBoard() {\n     return Board<SIZE, PIE>::_pathBoard;\n    }\n\n    int findNthEmptyIndex(int n) const {\n     assert(n < (Board<SIZE,PIE>::_nbEmptyIndices));\n     assert(n >= 0);\n   \n     int nbEmpty = 0;\n     int i = 0;\n     while (true) {\n       if (Board<SIZE, PIE>::_pathBoard[i] == 0) {\n         if (nbEmpty == n)\n           break;\n         else\n           nbEmpty++;\n       }\n       i++;\n     }\n     return i;\n   }\n\n   std::vector<int> findEmptyIndices() const {\n     std::vector<int> emptyIndices;\n     emptyIndices.reserve(Board<SIZE, PIE>::_nbEmptyIndices);\n     for (int k = 0; k < Board<SIZE, PIE>::_nbFullIndices; k++)\n       if (Board<SIZE, PIE>::_pathBoard[k] == 0)\n         emptyIndices.push_back(k);\n     if (Board<SIZE, PIE>::canPie() and\n         Board<SIZE, PIE>::_nbEmptyIndices == Board<SIZE, PIE>::_nbIndices - 1)\n       emptyIndices.push_back(*(Board<SIZE, PIE>::_lastIndex));\n     return emptyIndices;\n   }\n\n  };\n\n}  // namespace Hex\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(HexGroup, reset) {\n Hex::BoardTest<7,true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 49);\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_BLACK);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n ASSERT_FALSE(b.getLastIndex());\n ASSERT_EQ(b.getNbEmptyIndices(), 49);\n}\n\nTEST(HexGroup, copyConstructor) {\n Hex::BoardTest<7,true> b0;\n b0.reset();\n Hex::BoardTest<7,true> b(b0);\n ASSERT_EQ(b.getNbIndices(), 49);\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_BLACK);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n ASSERT_FALSE(b.getLastIndex());\n ASSERT_EQ(b.getNbEmptyIndices(), 49);\n}\n\nTEST(HexGroup, resetNeighboursTopLeft) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(0, 0));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(0,1)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(1,0)));\n ASSERT_EQ(refNeighbourIndices[2], -1);\n}\n\nTEST(HexGroup, resetNeighboursTopCenter) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(0, 3));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(0,2)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(0,4)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(1,2)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Hex::Cell(1,3)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HexGroup, resetNeighboursTopRight) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(0, 6));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(0,5)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(1,5)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(1,6)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HexGroup, resetNeighboursMiddleRight) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(3, 6));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(2,6)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(3,5)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(4,5)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Hex::Cell(4,6)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HexGroup, resetNeighboursBottomRight) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(6, 6));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(5,6)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(6,5)));\n ASSERT_EQ(refNeighbourIndices[2], -1);\n}\n\nTEST(HexGroup, resetNeighboursBottomCenter) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(6, 3));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(5,3)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(5,4)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(6,2)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Hex::Cell(6,4)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HexGroup, resetNeighboursBottomLeft) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(6, 0));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(5,0)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(5,1)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(6,1)));\n ASSERT_EQ(refNeighbourIndices[3], -1);\n}\n\nTEST(HexGroup, resetNeighboursMiddleLeft) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(3, 0));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(2,0)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(2,1)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(3,1)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Hex::Cell(4,0)));\n ASSERT_EQ(refNeighbourIndices[4], -1);\n}\n\nTEST(HexGroup, resetNeighboursMiddleCenter) {\n Hex::BoardTest<7,true> b;\n b.reset();\n int index = b.convertCellToIndex(Hex::Cell(3, 3));\n const std::array<int, 7> refNeighbourIndices = b.getNeighboursBoard()[index];\n ASSERT_EQ(refNeighbourIndices[0], b.convertCellToIndex(Hex::Cell(2,3)));\n ASSERT_EQ(refNeighbourIndices[1], b.convertCellToIndex(Hex::Cell(2,4)));\n ASSERT_EQ(refNeighbourIndices[2], b.convertCellToIndex(Hex::Cell(3,2)));\n ASSERT_EQ(refNeighbourIndices[3], b.convertCellToIndex(Hex::Cell(3,4)));\n ASSERT_EQ(refNeighbourIndices[4], b.convertCellToIndex(Hex::Cell(4,2)));\n ASSERT_EQ(refNeighbourIndices[5], b.convertCellToIndex(Hex::Cell(4,3)));\n ASSERT_EQ(refNeighbourIndices[6], -1);\n}\n\nTEST(HexGroup, resetPaths) {\n Hex::BoardTest<5,true> b;\n b.reset();\n ASSERT_EQ(b.getPathsEnd(), 1);\n const Hex::PathInfo & p0 = b.getPaths()[0];\n ASSERT_EQ(p0._color, Hex::COLOR_NONE);\n ASSERT_EQ(p0._isConnectedBorder1, false);\n ASSERT_EQ(p0._isConnectedBorder2, false);\n ASSERT_EQ(p0._mainPathIndex, 0);\n}\n\nTEST(HexGroup, resetPathBoard) {\n Hex::BoardTest<5,true> b;\n b.reset();\n for (int i=0; i<25; i++) {\n  ASSERT_EQ(b.getPathBoard()[i], 0); \n  int pathIndex;\n  Hex::Color color;\n  b.getPathIndexAndColorAtIndex(i, pathIndex, color);\n  ASSERT_EQ(pathIndex, 0);\n  ASSERT_EQ(color, Hex::COLOR_NONE);\n }\n}\n\nTEST(HexGroup, resetGetters1) {\n Hex::BoardTest<11,true> b;\n b.reset();\n ASSERT_EQ(b.getNbIndices(), 121);\n ASSERT_EQ(b.getNbEmptyIndices(), 121);\n ASSERT_FALSE(b.getLastIndex());\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_BLACK);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n}\n\nTEST(HexGroup, isValid1) {\n Hex::BoardTest<8,true> b;\n b.reset();\n\n ASSERT_EQ(b.isValidIndex(12), true);\n ASSERT_EQ(b.isValidIndex(0), true);\n ASSERT_EQ(b.isValidIndex(-1), false);\n ASSERT_EQ(b.isValidIndex(63), true);\n ASSERT_EQ(b.isValidIndex(64), false);\n\n ASSERT_EQ(b.isValidCell(Hex::Cell(3, 4)), true);\n ASSERT_EQ(b.isValidCell(Hex::Cell(0, 2)), true);\n ASSERT_EQ(b.isValidCell(Hex::Cell(-1, 1)), false);\n ASSERT_EQ(b.isValidCell(Hex::Cell(2, 0)), true);\n ASSERT_EQ(b.isValidCell(Hex::Cell(2, -1)), false);\n ASSERT_EQ(b.isValidCell(Hex::Cell(3, 7)), true);\n ASSERT_EQ(b.isValidCell(Hex::Cell(3, 8)), false);\n ASSERT_EQ(b.isValidCell(Hex::Cell(7, 2)), true);\n ASSERT_EQ(b.isValidCell(Hex::Cell(8, 1)), false);\n}\n\n\nTEST(HexGroup, convertIndexToCell) {\n Hex::BoardTest<9,true> b;\n b.reset();\n ASSERT_EQ(b.convertIndexToCell(3), Hex::Cell(0, 3));\n ASSERT_EQ(b.convertIndexToCell(13), Hex::Cell(1, 4));\n ASSERT_EQ(b.convertIndexToCell(37), Hex::Cell(4, 1));\n}\n\nTEST(HexGroup, convertCellToIndex) {\n Hex::BoardTest<10,true> b;\n b.reset();\n ASSERT_EQ(b.convertCellToIndex(Hex::Cell(0, 2)), 2);\n ASSERT_EQ(b.convertCellToIndex(Hex::Cell(1, 0)), 10);\n ASSERT_EQ(b.convertCellToIndex(Hex::Cell(7, 5)), 75);\n}\n\nTEST(HexGroup, computeBorderConnection0) {\n Hex::BoardTest<12,true> b;\n b.reset();\n\n int index;\n bool isConnectedBorder1;\n bool isConnectedBorder2;\n\n // left\n index = b.convertCellToIndex(Hex::Cell(7, 0));\n b.computeBorderConnection(index, Hex::COLOR_WHITE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, true);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // right\n index = b.convertCellToIndex(Hex::Cell(7, 11));\n b.computeBorderConnection(index, Hex::COLOR_WHITE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, true);\n\n // top\n index = b.convertCellToIndex(Hex::Cell(0, 7));\n b.computeBorderConnection(index, Hex::COLOR_WHITE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // bottom\n index = b.convertCellToIndex(Hex::Cell(11, 7));\n b.computeBorderConnection(index, Hex::COLOR_WHITE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n}\n\nTEST(HexGroup, computeBorderConnection1) {\n Hex::BoardTest<12,true> b;\n b.reset();\n\n int index;\n bool isConnectedBorder1;\n bool isConnectedBorder2;\n\n // left\n index = b.convertCellToIndex(Hex::Cell(7, 0));\n b.computeBorderConnection(index, Hex::COLOR_BLACK, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // right\n index = b.convertCellToIndex(Hex::Cell(7, 11));\n b.computeBorderConnection(index, Hex::COLOR_BLACK, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // top\n index = b.convertCellToIndex(Hex::Cell(0, 7));\n b.computeBorderConnection(index, Hex::COLOR_BLACK, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, true);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // bottom\n index = b.convertCellToIndex(Hex::Cell(11, 7));\n b.computeBorderConnection(index, Hex::COLOR_BLACK, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, true);\n}\n\nTEST(HexGroup, computeBorderConnectionNull) {\n Hex::BoardTest<12,true> b;\n b.reset();\n\n int index;\n bool isConnectedBorder1;\n bool isConnectedBorder2;\n\n // left\n index = b.convertCellToIndex(Hex::Cell(7, 0));\n b.computeBorderConnection(index, Hex::COLOR_NONE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // right\n index = b.convertCellToIndex(Hex::Cell(7, 11));\n b.computeBorderConnection(index, Hex::COLOR_NONE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // top\n index = b.convertCellToIndex(Hex::Cell(0, 7));\n b.computeBorderConnection(index, Hex::COLOR_NONE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n\n // bottom\n index = b.convertCellToIndex(Hex::Cell(11, 7));\n b.computeBorderConnection(index, Hex::COLOR_NONE, \n   isConnectedBorder1, isConnectedBorder2);\n ASSERT_EQ(isConnectedBorder1, false);\n ASSERT_EQ(isConnectedBorder2, false);\n}\n\nTEST(HexGroup, getPathIndexAndColorAtIndex) {\n Hex::BoardTest<7,true> b;\n b.reset();\n b.getPathsEnd() = 3;\n b.getPaths()[1] = Hex::PathInfo(1, Hex::COLOR_BLACK, false, false);\n b.getPaths()[2] = Hex::PathInfo(2, Hex::COLOR_WHITE, false, false);\n\n int index_1_1 = b.convertCellToIndex(Hex::Cell(1, 1));\n int index_3_1 = b.convertCellToIndex(Hex::Cell(3, 1));\n int index_2_3 = b.convertCellToIndex(Hex::Cell(2, 3));\n\n b.getPathBoard()[index_3_1] = 1;\n b.getPathBoard()[index_2_3] = 2;\n\n int pathIndex;\n Hex::Color color;\n\n b.getPathIndexAndColorAtIndex(index_1_1, pathIndex, color);\n ASSERT_EQ(pathIndex, 0);\n ASSERT_EQ(color, Hex::COLOR_NONE);\n\n b.getPathIndexAndColorAtIndex(index_3_1, pathIndex, color);\n ASSERT_EQ(pathIndex, 1);\n ASSERT_EQ(color, Hex::COLOR_BLACK);\n\n b.getPathIndexAndColorAtIndex(index_2_3, pathIndex, color);\n ASSERT_EQ(pathIndex, 2);\n ASSERT_EQ(color, Hex::COLOR_WHITE);\n}\n\nTEST(HexGroup, play0) {\n Hex::BoardTest<5,true> b;\n b.reset();\n int index;\n\n index = b.convertCellToIndex(Hex::Cell(0, 0));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 2);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n CheckHexPathInfo(b.getPaths()[1], Hex::COLOR_BLACK, true, false, 1);\n\n index = b.convertCellToIndex(Hex::Cell(0, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 2);\n CheckHexPathInfo(b.getPaths()[2], Hex::COLOR_WHITE, false, false, 2);\n\n index = b.convertCellToIndex(Hex::Cell(3, 0));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 4);\n ASSERT_EQ(b.getPathBoard()[index], 3);\n CheckHexPathInfo(b.getPaths()[3], Hex::COLOR_BLACK, false, false, 3);\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 5);\n ASSERT_EQ(b.getPathBoard()[index], 4);\n CheckHexPathInfo(b.getPaths()[4], Hex::COLOR_WHITE, false, false, 4);\n\n index = b.convertCellToIndex(Hex::Cell(0, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 5);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n CheckHexPathInfo(b.getPaths()[1], Hex::COLOR_BLACK, true, false, 1);\n\n index = b.convertCellToIndex(Hex::Cell(2, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 5);\n ASSERT_EQ(b.getPathBoard()[index], 4);\n CheckHexPathInfo(b.getPaths()[4], Hex::COLOR_WHITE, false, false, 4);\n\n index = b.convertCellToIndex(Hex::Cell(2, 4));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 6);\n ASSERT_EQ(b.getPathBoard()[index], 5);\n CheckHexPathInfo(b.getPaths()[5], Hex::COLOR_BLACK, false, false, 5);\n\n index = b.convertCellToIndex(Hex::Cell(4, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 7);\n ASSERT_EQ(b.getPathBoard()[index], 6);\n CheckHexPathInfo(b.getPaths()[6], Hex::COLOR_WHITE, false, false, 6);\n\n index = b.convertCellToIndex(Hex::Cell(1, 0));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 7);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n CheckHexPathInfo(b.getPaths()[1], Hex::COLOR_BLACK, true, false, 1);\n\n index = b.convertCellToIndex(Hex::Cell(4, 0));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 8);\n ASSERT_EQ(b.getPathBoard()[index], 7);\n CheckHexPathInfo(b.getPaths()[7], Hex::COLOR_WHITE, true, false, 7);\n\n index = b.convertCellToIndex(Hex::Cell(1, 3));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 9);\n ASSERT_EQ(b.getPathBoard()[index], 8);\n CheckHexPathInfo(b.getPaths()[8], Hex::COLOR_BLACK, false, false, 8);\n\n index = b.convertCellToIndex(Hex::Cell(2, 0));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 9);\n ASSERT_EQ(b.getPathBoard()[index], 4);\n CheckHexPathInfo(b.getPaths()[4], Hex::COLOR_WHITE, true, false, 4);\n\n index = b.convertCellToIndex(Hex::Cell(3, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 10);\n ASSERT_EQ(b.getPathBoard()[index], 9);\n CheckHexPathInfo(b.getPaths()[9], Hex::COLOR_BLACK, false, false, 9);\n\n index = b.convertCellToIndex(Hex::Cell(4, 4));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 11);\n ASSERT_EQ(b.getPathBoard()[index], 10);\n CheckHexPathInfo(b.getPaths()[10], Hex::COLOR_WHITE, false, true, 10);\n\n index = b.convertCellToIndex(Hex::Cell(3, 3));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 11);\n ASSERT_EQ(b.getPathBoard()[index], 5);\n CheckHexPathInfo(b.getPaths()[5], Hex::COLOR_BLACK, false, false, 5);\n ASSERT_EQ(b.getPaths()[9]._mainPathIndex, 5);\n\n index = b.convertCellToIndex(Hex::Cell(1, 4));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 11);\n CheckHexPathInfo(b.getPaths()[11], Hex::COLOR_WHITE, false, true, 11);\n\n index = b.convertCellToIndex(Hex::Cell(2, 3));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 5);\n CheckHexPathInfo(b.getPaths()[5], Hex::COLOR_BLACK, false, false, 5);\n ASSERT_EQ(b.getPaths()[8]._mainPathIndex, 5);\n ASSERT_EQ(b.getPaths()[9]._mainPathIndex, 5);\n\n index = b.convertCellToIndex(Hex::Cell(0, 4));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 11);\n CheckHexPathInfo(b.getPaths()[11], Hex::COLOR_WHITE, false, true, 11);\n\n index = b.convertCellToIndex(Hex::Cell(1, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n CheckHexPathInfo(b.getPaths()[1], Hex::COLOR_BLACK, true, false, 1);\n\n index = b.convertCellToIndex(Hex::Cell(3, 4));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 10);\n CheckHexPathInfo(b.getPaths()[10], Hex::COLOR_WHITE, false, true, 10);\n\n index = b.convertCellToIndex(Hex::Cell(4, 3));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 5);\n CheckHexPathInfo(b.getPaths()[5], Hex::COLOR_BLACK, false, true, 5);\n\n index = b.convertCellToIndex(Hex::Cell(0, 3));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 2);\n CheckHexPathInfo(b.getPaths()[2], Hex::COLOR_WHITE, false, true, 2);\n ASSERT_EQ(b.getPaths()[11]._mainPathIndex, 2);\n\n index = b.convertCellToIndex(Hex::Cell(3, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 3);\n CheckHexPathInfo(b.getPaths()[3], Hex::COLOR_BLACK, false, true, 3);\n ASSERT_EQ(b.getPaths()[5]._mainPathIndex, 3);\n ASSERT_EQ(b.getPaths()[8]._mainPathIndex, 3);\n ASSERT_EQ(b.getPaths()[9]._mainPathIndex, 3);\n\n index = b.convertCellToIndex(Hex::Cell(4, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 6);\n CheckHexPathInfo(b.getPaths()[6], Hex::COLOR_WHITE, true, false, 6);\n ASSERT_EQ(b.getPaths()[7]._mainPathIndex, 6);\n\n index = b.convertCellToIndex(Hex::Cell(1, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 12);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n CheckHexPathInfo(b.getPaths()[1], Hex::COLOR_BLACK, true, true, 1);\n ASSERT_EQ(b.getPaths()[3]._mainPathIndex, 1);\n ASSERT_EQ(b.getPaths()[5]._mainPathIndex, 1);\n ASSERT_EQ(b.getPaths()[8]._mainPathIndex, 1);\n ASSERT_EQ(b.getPaths()[9]._mainPathIndex, 1);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_BLACK);\n}\n\nTEST(HexGroup, play1) {\n Hex::BoardTest<5,true> b;\n b.reset();\n int index;\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 2);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Hex::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder1, false);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder2, false);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n\n index = b.convertCellToIndex(Hex::Cell(3, 1));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 2);\n ASSERT_EQ(b.getPaths()[2]._color, Hex::COLOR_WHITE);\n ASSERT_EQ(b.getPaths()[2]._isConnectedBorder1, false);\n ASSERT_EQ(b.getPaths()[2]._isConnectedBorder2, false);\n ASSERT_EQ(b.getPaths()[2]._mainPathIndex, 2);\n\n index = b.convertCellToIndex(Hex::Cell(2, 2));\n b.play(index);\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Hex::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder1, false);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder2, false);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n}\n\nTEST(HexGroup, play2) {\n Hex::BoardTest<5,true> b;\n b.reset();\n int index;\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n index = b.convertCellToIndex(Hex::Cell(3, 1));\n b.play(index);\n index = b.convertCellToIndex(Hex::Cell(2, 2));\n b.play(index);\n\n ASSERT_EQ(b.getPathsEnd(), 3);\n ASSERT_EQ(b.getPathBoard()[index], 1);\n ASSERT_EQ(b.getPaths()[1]._color, Hex::COLOR_BLACK);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder1, false);\n ASSERT_EQ(b.getPaths()[1]._isConnectedBorder2, false);\n ASSERT_EQ(b.getPaths()[1]._mainPathIndex, 1);\n}\n\nTEST(HexGroup, play3) {\n Hex::BoardTest<5,true> b;\n b.reset();\n int index;\n\n ASSERT_EQ(b.getNbIndices(), 25);\n\n ASSERT_EQ(b.getNbEmptyIndices(), 25);\n ASSERT_FALSE(b.getLastIndex());\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_BLACK);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 24);\n ASSERT_EQ(*b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_WHITE);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n\n index = b.convertCellToIndex(Hex::Cell(3, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 23);\n ASSERT_EQ(*b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_BLACK);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n\n index = b.convertCellToIndex(Hex::Cell(2, 2));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 22);\n ASSERT_EQ(*b.getLastIndex(), index);\n ASSERT_EQ(b.getCurrentColor(), Hex::COLOR_WHITE);\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_NONE);\n}\n\nTEST(HexGroup, findWinnerPath) {\n Hex::BoardTest<5,true> b;\n b.reset();\n\n // play cells\n std::vector<Hex::Cell> gameCells = {\n  Hex::Cell(4, 4),\n  Hex::Cell(1, 1), Hex::Cell(2, 0), Hex::Cell(2, 3), Hex::Cell(3, 2), Hex::Cell(4, 2), \n  Hex::Cell(1, 2), Hex::Cell(0, 4), Hex::Cell(3, 0), Hex::Cell(1, 0), Hex::Cell(3, 3), \n  Hex::Cell(1, 3), Hex::Cell(0, 0), Hex::Cell(2, 1), Hex::Cell(1, 4), Hex::Cell(2, 2)\n };\n for (const Hex::Cell & c : gameCells) {\n  int index = b.convertCellToIndex(c);\n  b.play(index);\n }\n ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_WHITE);\n\n // test cells in winner path\n std::vector<int> winIndices = b.findWinnerPath();\n ASSERT_EQ(int(winIndices.size()), 7);\n std::vector<Hex::Cell> winCells { Hex::Cell(1, 1), Hex::Cell(2, 3), Hex::Cell(0, 4), \n  Hex::Cell(1, 0), Hex::Cell(1, 3), Hex::Cell(2, 1), Hex::Cell(2, 2) };\n for (const Hex::Cell & c : winCells) {\n  int index = b.convertCellToIndex(c);\n  ASSERT_NE(winIndices.end(), std::find(winIndices.begin(), \n     winIndices.end(), index));\n }\n}\n\nTEST(HexGroup, findEmptyIndices) {\n Hex::BoardTest<5,true> b;\n b.reset();\n\n // play cells\n std::vector<Hex::Cell> gameCells = {\n  Hex::Cell(1, 1), Hex::Cell(2, 0), Hex::Cell(2, 3), Hex::Cell(3, 2), Hex::Cell(4, 2), \n  Hex::Cell(1, 2), Hex::Cell(0, 4), Hex::Cell(3, 0), Hex::Cell(1, 0), Hex::Cell(3, 3), \n  Hex::Cell(1, 3), Hex::Cell(0, 0), Hex::Cell(2, 1), Hex::Cell(1, 4), Hex::Cell(2, 2)\n };\n for (const Hex::Cell & c : gameCells) {\n  int index = b.convertCellToIndex(c);\n  b.play(index);\n }\n\n // test cells in winner path\n std::vector<Hex::Cell> expectedEmptyCells = { \n  Hex::Cell(0, 1), Hex::Cell(0, 2), Hex::Cell(0, 3), Hex::Cell(2, 4), Hex::Cell(3, 1), \n  Hex::Cell(3, 4), Hex::Cell(4, 0), Hex::Cell(4, 1), Hex::Cell(4, 3), Hex::Cell(4, 4) \n };\n\n std::vector<int> emptyIndices = b.findEmptyIndices();\n ASSERT_EQ(emptyIndices.size(), expectedEmptyCells.size());\n ASSERT_EQ(unsigned(b.getNbEmptyIndices()), \n   expectedEmptyCells.size());\n\n for (unsigned i=0; i<emptyIndices.size(); i++) {\n  int index = b.convertCellToIndex(expectedEmptyCells[i]);\n  ASSERT_EQ(index, emptyIndices[i]);\n }\n}\n\nTEST(HexGroup, findNthEmptyIndex) {\n Hex::BoardTest<5,true> b;\n b.reset();\n\n // play cells\n std::vector<Hex::Cell> gameCells = {\n  Hex::Cell(1, 1), Hex::Cell(2, 0), Hex::Cell(2, 3), Hex::Cell(3, 2), Hex::Cell(4, 2), \n  Hex::Cell(1, 2), Hex::Cell(0, 4), Hex::Cell(3, 0), Hex::Cell(1, 0), Hex::Cell(3, 3), \n  Hex::Cell(1, 3), Hex::Cell(0, 0), Hex::Cell(2, 1), Hex::Cell(1, 4), Hex::Cell(2, 2)\n };\n for (const Hex::Cell & c : gameCells) {\n  int index = b.convertCellToIndex(c);\n  b.play(index);\n }\n\n // test cells in winner path\n std::vector<Hex::Cell> expectedEmptyCells = { \n  Hex::Cell(0, 1), Hex::Cell(0, 2), Hex::Cell(0, 3), Hex::Cell(2, 4), Hex::Cell(3, 1), \n  Hex::Cell(3, 4), Hex::Cell(4, 0), Hex::Cell(4, 1), Hex::Cell(4, 3), Hex::Cell(4, 4) \n };\n\n ASSERT_EQ(unsigned(b.getNbEmptyIndices()), \n   expectedEmptyCells.size());\n\n for (unsigned i=0; i<expectedEmptyCells.size(); i++) {\n  int indexExpected = b.convertCellToIndex(expectedEmptyCells[i]);\n  int indexFound = b.findNthEmptyIndex(i);\n  ASSERT_EQ(indexFound, indexExpected);\n }\n}\n\nTEST(HexGroup, winner_white) {\n    Hex::BoardTest<9,true> b;\n    b.reset();\n    std::vector<int> gameIndices = {\n        0, 37, 21,\n        47, 23,\n        40, 24,\n        50, 67,\n        43, 69,\n        35, 51,\n        42, 41,\n        49, 48,\n        39, 38,\n        46, 45,\n        36\n    };\n    for (int i : gameIndices)\n        b.play(i);\n    ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_WHITE);\n    ASSERT_EQ(b.isGameFinished(), true);\n}\n\n\nTEST(HexGroup, playPie1) {\n Hex::BoardTest<5,true> b;\n b.reset();\n\n ASSERT_EQ(b.getNbEmptyIndices(), 25);\n\n int index;\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 24);\n ASSERT_EQ(b.canPie(), true);\n ASSERT_EQ(b.findEmptyIndices().size(), 25);\n\n index = b.convertCellToIndex(Hex::Cell(1, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 23);\n ASSERT_EQ(b.canPie(), false);\n ASSERT_EQ(b.findEmptyIndices().size(), 23);\n}\n\n\nTEST(HexGroup, playPie2) {\n Hex::BoardTest<5,true> b;\n b.reset();\n\n ASSERT_EQ(b.getNbEmptyIndices(), 25);\n\n int index;\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 24);\n ASSERT_EQ(b.canPie(), true);\n ASSERT_EQ(b.findEmptyIndices().size(), 25);\n\n index = b.convertCellToIndex(Hex::Cell(2, 1));\n b.play(index);\n ASSERT_EQ(b.getNbEmptyIndices(), 24);\n ASSERT_EQ(b.canPie(), false);\n ASSERT_EQ(b.findEmptyIndices().size(), 24);\n}\n\nTEST(HexGroup, winner_pie) {\n    Hex::BoardTest<9,true> b;\n    b.reset();\n    std::vector<int> gameIndices = {\n      13, \n      13, 29,\n      23, 47,\n      40, 56,\n      50, 43,\n      67, 61,\n      75, 59,\n      58, 49,\n      41, 32,\n      31, 22,\n      14, 5,\n      4\n    };\n\n    for (int i : gameIndices)\n        b.play(i);\n    ASSERT_EQ(b.getWinnerColor(), Hex::COLOR_BLACK);\n    ASSERT_EQ(b.isGameFinished(), true);\n\n    /*\n    for (int i : gameIndices) {\n        auto c = b.convertIndexToCell(i);\n        std::swap(c.first, c.second);\n        std::cout << b.convertCellToIndex(c) << std::endl;\n    }\n    */\n\n}\n\n"
  },
  {
    "path": "tests/ludii-game-tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include <ludii/jni_utils.h>\n#include <ludii/ludii_state_wrapper.h>\n\n#include \"utils.h\"\n#include <gtest/gtest.h>\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(LudiiGameGroup, ludii_yavalath_0) {\n  Ludii::JNIUtils::InitJVM(\"\");  // Use default /ludii/Ludii.jar path\n  JNIEnv* jni_env = Ludii::JNIUtils::GetEnv();\n  EXPECT_TRUE(jni_env);\n\n  Ludii::LudiiGameWrapper game_wrapper(\"Yavalath.lud\");\n  Ludii::LudiiStateWrapper state =\n      Ludii::LudiiStateWrapper(0, std::move(game_wrapper));\n  state.Initialize();\n\n  ASSERT_EQ((std::vector<int64_t>{10, 9, 17}), state.GetFeatureSize());\n  ASSERT_EQ((std::vector<int64_t>{3, 9, 17}), state.GetActionSize());\n  ASSERT_EQ(GameStatus::player0Turn, GameStatus(state.getCurrentPlayer()));\n\n  // We expect the following meanings for Yavalath state tensor channels:\n  // 0: Piece Type 1 (Ball1)\n  // 1: Piece Type 2 (Ball2)\n  // 2: Is Player 1 the current mover?\n  // 3: Is Player 2 the current mover?\n  // 4: Did Swap Occur?\n  // 5: Does position exist in container 0 (Board)?\n  // 6: Last move's from-position\n  // 7: Last move's to-position\n  // 8: Second-to-last move's from-position\n  // 9: Second-to-last move's to-position\n\n  // TODO guess we really need a channel to indicate that swap happened\n  const std::vector<float> features = state.GetFeatures();\n\n  // We expect empty board initial state, so first two channels\n  // should be all-zero\n  size_t i = 0;\n  while (i < 2 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // Player 1 should be mover, so expect channel filled with 1s next\n  while (i < 3 * 9 * 17) {\n    ASSERT_EQ(1, features[i]);\n    ++i;\n  }\n\n  // Player 2 not current mover, so full channel of 0s\n  while (i < 4 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // No swap occured yet, so expect full channel of 0s\n  while (i < 5 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // Channel: Does position exist in container 0 (Board)?\n  // First and last column have 5 cells each,\n  // expected pattern: 0,0,0,0,1,0,1,0,1,0,1,0,1,0,0,0,0\n  const float _5_cells_pattern[17] = {\n      0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 0 * 17 + j], _5_cells_pattern[j]);\n    ASSERT_EQ(features[i + 8 * 17 + j], _5_cells_pattern[j]);\n  }\n\n  // Second and second-to-last column have 6 cells each,\n  // expected pattern: 0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0\n  const float _6_cells_pattern[17] = {\n      0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 1 * 17 + j], _6_cells_pattern[j]);\n    ASSERT_EQ(features[i + 7 * 17 + j], _6_cells_pattern[j]);\n  }\n\n  // Third and third-to-last column have 7 cells each,\n  // expected pattern: 0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0\n  const float _7_cells_pattern[17] = {\n      0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 2 * 17 + j], _7_cells_pattern[j]);\n    ASSERT_EQ(features[i + 6 * 17 + j], _7_cells_pattern[j]);\n  }\n\n  // Fourth and fourth-to-last column have 8 cells each,\n  // expected pattern: 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0\n  const float _8_cells_pattern[17] = {\n      0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 3 * 17 + j], _8_cells_pattern[j]);\n    ASSERT_EQ(features[i + 5 * 17 + j], _8_cells_pattern[j]);\n  }\n\n  // Middle column has 9 cells,\n  // expected pattern: 1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1\n  const float _9_cells_pattern[17] = {\n      1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 4 * 17 + j], _9_cells_pattern[j]);\n  }\n  i += 9 * 17;\n\n  // All remaining channels should be all-zero; no moves played\n  while (i < 10 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n} /**\n   * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n   *\n   * This source code is licensed under the MIT license found in the\n   * LICENSE file in the root directory of this source tree.\n   */\n\n#include <ludii/jni_utils.h>\n#include <ludii/ludii_state_wrapper.h>\n\n#include \"utils.h\"\n#include <gtest/gtest.h>\n\n///////////////////////////////////////////////////////////////////////////////\n// unit tests\n///////////////////////////////////////////////////////////////////////////////\n\nTEST(LudiiGameGroup, ludii_yavalath_0) {\n  Ludii::JNIUtils::InitJVM(\"\");  // Use default /ludii/Ludii.jar path\n  JNIEnv* jni_env = Ludii::JNIUtils::GetEnv();\n  EXPECT_TRUE(jni_env);\n\n  Ludii::LudiiGameWrapper game_wrapper(\"Yavalath.lud\");\n  Ludii::LudiiStateWrapper state =\n      Ludii::LudiiStateWrapper(0, std::move(game_wrapper));\n  state.Initialize();\n\n  ASSERT_EQ((std::vector<int64_t>{10, 9, 17}), state.GetFeatureSize());\n  ASSERT_EQ((std::vector<int64_t>{3, 9, 17}), state.GetActionSize());\n  ASSERT_EQ(GameStatus::player0Turn, GameStatus(state.getCurrentPlayer()));\n\n  // We expect the following meanings for Yavalath state tensor channels:\n  // 0: Piece Type 1 (Ball1)\n  // 1: Piece Type 2 (Ball2)\n  // 2: Is Player 1 the current mover?\n  // 3: Is Player 2 the current mover?\n  // 4: Did Swap Occur?\n  // 5: Does position exist in container 0 (Board)?\n  // 6: Last move's from-position\n  // 7: Last move's to-position\n  // 8: Second-to-last move's from-position\n  // 9: Second-to-last move's to-position\n\n  // TODO guess we really need a channel to indicate that swap happened\n  const std::vector<float> features = state.GetFeatures();\n\n  // We expect empty board initial state, so first two channels\n  // should be all-zero\n  size_t i = 0;\n  while (i < 2 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // Player 1 should be mover, so expect channel filled with 1s next\n  while (i < 3 * 9 * 17) {\n    ASSERT_EQ(1, features[i]);\n    ++i;\n  }\n\n  // Player 2 not current mover, so full channel of 0s\n  while (i < 4 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // No swap occured yet, so expect full channel of 0s\n  while (i < 5 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n\n  // Channel: Does position exist in container 0 (Board)?\n  // First and last column have 5 cells each,\n  // expected pattern: 0,0,0,0,1,0,1,0,1,0,1,0,1,0,0,0,0\n  const float _5_cells_pattern[17] = {\n      0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 0 * 17 + j], _5_cells_pattern[j]);\n    ASSERT_EQ(features[i + 8 * 17 + j], _5_cells_pattern[j]);\n  }\n\n  // Second and second-to-last column have 6 cells each,\n  // expected pattern: 0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0\n  const float _6_cells_pattern[17] = {\n      0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 1 * 17 + j], _6_cells_pattern[j]);\n    ASSERT_EQ(features[i + 7 * 17 + j], _6_cells_pattern[j]);\n  }\n\n  // Third and third-to-last column have 7 cells each,\n  // expected pattern: 0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0\n  const float _7_cells_pattern[17] = {\n      0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 2 * 17 + j], _7_cells_pattern[j]);\n    ASSERT_EQ(features[i + 6 * 17 + j], _7_cells_pattern[j]);\n  }\n\n  // Fourth and fourth-to-last column have 8 cells each,\n  // expected pattern: 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0\n  const float _8_cells_pattern[17] = {\n      0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 3 * 17 + j], _8_cells_pattern[j]);\n    ASSERT_EQ(features[i + 5 * 17 + j], _8_cells_pattern[j]);\n  }\n\n  // Middle column has 9 cells,\n  // expected pattern: 1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1\n  const float _9_cells_pattern[17] = {\n      1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1};\n  for (size_t j = 0; j < 17; ++j) {\n    ASSERT_EQ(features[i + 4 * 17 + j], _9_cells_pattern[j]);\n  }\n  i += 9 * 17;\n\n  // All remaining channels should be all-zero; no moves played\n  while (i < 10 * 9 * 17) {\n    ASSERT_EQ(0, features[i]);\n    ++i;\n  }\n}"
  },
  {
    "path": "tests/python/test_replay_buffer.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport unittest\nimport tempfile\nfrom contextlib import contextmanager\nimport pypolygames as ppg\nimport polygames\nimport tube\n\nclass TrainingEnvironment(object):\n    model_fpath = None\n    assembler = None\n    context = None\n    optim = None\n    game = None\n\nDEFAULT_CAPACITY = 1000\n\n@contextmanager\ndef create_training_env(game_name, act_batchsize=50,\n    n_act_channels=1, n_games=100, n_actors=1,\n    model_device=\"cuda:0\", act_devices=[\"cuda:0\"],\n    lr=6.25e-5, eps=1.5e-4,\n    replay_capacity=DEFAULT_CAPACITY, seed=1):\n    model_fname = game_name + \"_model_latest.pt\"\n    training_env = TrainingEnvironment()\n    with tempfile.TemporaryDirectory() as save_dir:\n        train_option, eval_option = ppg.workflow.set_up_options(\n            seed=seed, save_dir=save_dir, game_name=game_name)\n\n        model_fpath = os.path.join(save_dir, model_fname)\n        model = ppg.workflow.create_model(game_name).to(model_device)\n        model.save(model_fpath)\n        optim = ppg.workflow.create_optimizer(model=model, lr=lr, eps=eps)\n\n        context, assembler, get_train_reward = ppg.workflow.create_train_envs(\n            act_batchsize=act_batchsize,\n            replay_capacity=replay_capacity,\n            game_name=game_name,\n            num_game=n_games,\n            num_actor=n_actors,\n            model_path=model_fpath,\n            seed=seed,\n            train_option=train_option,\n        )\n\n        training_env.train_option = train_option\n        training_env.eval_option = eval_option\n        training_env.model_fpath = model_fpath\n        training_env.assembler = assembler\n        training_env.context = context\n        training_env.optim = optim\n        training_env.get_train_reward = get_train_reward\n        try:\n            yield training_env\n        finally:\n            pass\n\nclass TestReplayBuffer(unittest.TestCase):\n\n    def test_init(self):\n        game_name = \"Connect4\"\n        with create_training_env(game_name) as training_env:\n            replay_buffer = training_env.assembler.buffer\n            self.assertTrue(hasattr(replay_buffer, 'size'))\n            self.assertEqual(replay_buffer.size, 0)\n            self.assertTrue(hasattr(replay_buffer, 'capacity'))\n            self.assertEqual(replay_buffer.capacity, DEFAULT_CAPACITY)\n            self.assertTrue(hasattr(replay_buffer, 'is_full'))\n            self.assertFalse(replay_buffer.is_full)\n\n    def test_init_one_game(self):\n        game_name = \"Connect4\"\n        with create_training_env(game_name, n_games=1) as training_env:\n            training_env.context.start()\n            training_env.assembler.start()\n\n            evaluate_before_training(\n                game_name=game_name,\n                num_game=1,\n                seed=1,\n                model=training_env.model,\n                device=\"cuda:0\",\n                eval_option=training_env.eval_option,\n                num_actor=1,\n            )\n\n            time.sleep(2)\n            print(\"replay buffer size: \", assembler.buffer_size())\n            time.sleep(2)\n            print(\"replay buffer size: \", assembler.buffer_size())\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "tests/tests.cc",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Unit tests runner.\n\n#include <gtest/gtest.h>\n\nint main(int argc, char** argv) {\n  ::testing::InitGoogleTest(&argc, argv);\n  return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "tests/utils.h",
    "content": "/**\n * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n// Some utility functions for writing unit tests.\n\n#pragma once\n\n#include <iomanip>\n#include <iostream>\n\n// Print a feature plane:\n// printPlanes<const std::vector<float>&>(state.GetFeatures(), indexChannels, nbRows, nbCols);\ntemplate <typename T>\nvoid printPlane(T data, int c, int ni, int nj) {\n \n for (int i=0; i<ni; i++) {\n  for (int j=0; j<nj; j++) {\n   std::cout << data[(c*ni + i)*nj +j] << \" \";\n  }\n std::cout << std::endl;\n }\n std::cout << std::endl;\n}\n\n// Print several feature planes:\n// printPlanes<const std::vector<float>&>(state.GetFeatures(), nbChannels, nbRows, nbCols);\ntemplate <typename T>\nvoid printPlanes(T data, int nc, int ni, int nj) {\n for (int c=0; c<nc; c++)\n  printPlane<T>(data, c, ni, nj);\n}\n\n// Print raw data:\n// printData<const std::vector<float>&>(state.GetFeatures());\ntemplate <typename T>\nvoid printData(T data) {\n for (const auto & x : data)\n  std::cout << x << \" \";\n std::cout << std::endl;\n}\n\n"
  }
]